summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLee Garrett <lgarrett@rocketjump.eu>2020-12-19 18:12:24 +0100
committerLee Garrett <lgarrett@rocketjump.eu>2020-12-19 18:12:24 +0100
commitb0c840f0b8eabf61dee4b8be2e00a8747ed0f2f0 (patch)
tree425c0ec94f93ae7cebc8ecfbe99d2ad7f0180904
downloaddebian-ansible-core-b0c840f0b8eabf61dee4b8be2e00a8747ed0f2f0.zip
New upstream version 2.10.4
-rw-r--r--COPYING675
-rw-r--r--MANIFEST.in36
-rw-r--r--Makefile288
-rw-r--r--PKG-INFO150
-rw-r--r--README.rst115
-rw-r--r--SYMLINK_CACHE.json1
l---------bin/ansible1
l---------bin/ansible-config1
l---------bin/ansible-connection1
l---------bin/ansible-console1
l---------bin/ansible-doc1
l---------bin/ansible-galaxy1
l---------bin/ansible-inventory1
l---------bin/ansible-playbook1
l---------bin/ansible-pull1
l---------bin/ansible-test1
l---------bin/ansible-vault1
-rw-r--r--changelogs/CHANGELOG-v2.10.rst870
-rw-r--r--changelogs/CHANGELOG.rst6
-rw-r--r--changelogs/changelog.yaml2284
-rwxr-xr-xdocs/bin/find-plugin-refs.py83
-rwxr-xr-xdocs/bin/testing_formatter.sh40
-rw-r--r--docs/docsite/.gitignore19
-rw-r--r--docs/docsite/.nojekyll0
-rw-r--r--docs/docsite/Makefile139
-rw-r--r--docs/docsite/Makefile.sphinx24
-rw-r--r--docs/docsite/README.md26
-rw-r--r--docs/docsite/_extensions/pygments_lexer.py187
-rw-r--r--docs/docsite/_static/ansible.css59
-rw-r--r--docs/docsite/_static/pygments.css76
-rw-r--r--docs/docsite/_themes/sphinx_rtd_theme/__init__.py20
-rw-r--r--docs/docsite/_themes/sphinx_rtd_theme/ansible_banner.html45
-rw-r--r--docs/docsite/_themes/sphinx_rtd_theme/ansible_eol_banner.html4
-rw-r--r--docs/docsite/_themes/sphinx_rtd_theme/ansible_extrabody.html25
-rw-r--r--docs/docsite/_themes/sphinx_rtd_theme/ansible_extrafooter.html18
-rw-r--r--docs/docsite/_themes/sphinx_rtd_theme/ansible_extrahead.html12
-rw-r--r--docs/docsite/_themes/sphinx_rtd_theme/ansible_extranav.html9
-rw-r--r--docs/docsite/_themes/sphinx_rtd_theme/ansible_searchbox.html10
-rw-r--r--docs/docsite/_themes/sphinx_rtd_theme/ansible_versions.html29
-rw-r--r--docs/docsite/_themes/sphinx_rtd_theme/breadcrumbs.html96
-rw-r--r--docs/docsite/_themes/sphinx_rtd_theme/footer.html62
-rw-r--r--docs/docsite/_themes/sphinx_rtd_theme/layout.html250
-rw-r--r--docs/docsite/_themes/sphinx_rtd_theme/search.html50
-rw-r--r--docs/docsite/_themes/sphinx_rtd_theme/searchbox.html9
-rw-r--r--docs/docsite/_themes/sphinx_rtd_theme/static/css/badge_only.css1
-rw-r--r--docs/docsite/_themes/sphinx_rtd_theme/static/css/theme.css6
-rw-r--r--docs/docsite/_themes/sphinx_rtd_theme/static/fonts/FontAwesome.otfbin0 -> 134808 bytes
-rw-r--r--docs/docsite/_themes/sphinx_rtd_theme/static/fonts/fontawesome-webfont.eotbin0 -> 165742 bytes
-rw-r--r--docs/docsite/_themes/sphinx_rtd_theme/static/fonts/fontawesome-webfont.svg2671
-rw-r--r--docs/docsite/_themes/sphinx_rtd_theme/static/fonts/fontawesome-webfont.ttfbin0 -> 165548 bytes
-rw-r--r--docs/docsite/_themes/sphinx_rtd_theme/static/fonts/fontawesome-webfont.woffbin0 -> 98024 bytes
-rw-r--r--docs/docsite/_themes/sphinx_rtd_theme/static/fonts/fontawesome-webfont.woff2bin0 -> 77160 bytes
-rw-r--r--docs/docsite/_themes/sphinx_rtd_theme/static/images/logo_invert.pngbin0 -> 8342 bytes
-rw-r--r--docs/docsite/_themes/sphinx_rtd_theme/static/js/modernizr.min.js4
-rw-r--r--docs/docsite/_themes/sphinx_rtd_theme/static/js/theme.js3
-rw-r--r--docs/docsite/_themes/sphinx_rtd_theme/theme.conf18
-rw-r--r--docs/docsite/_themes/sphinx_rtd_theme/versions.html36
-rw-r--r--docs/docsite/ansible_2_10.invbin0 -> 218830 bytes
-rw-r--r--docs/docsite/ansible_2_5.invbin0 -> 123432 bytes
-rw-r--r--docs/docsite/ansible_2_6.invbin0 -> 134346 bytes
-rw-r--r--docs/docsite/ansible_2_7.invbin0 -> 149642 bytes
-rw-r--r--docs/docsite/ansible_2_8.invbin0 -> 198670 bytes
-rw-r--r--docs/docsite/ansible_2_9.invbin0 -> 242519 bytes
-rw-r--r--docs/docsite/collection-plugins.yml17
-rw-r--r--docs/docsite/jinja2.invbin0 -> 3278 bytes
-rw-r--r--docs/docsite/js/ansible/application.js106
-rw-r--r--docs/docsite/keyword_desc.yml79
-rw-r--r--docs/docsite/modules.js5
-rw-r--r--docs/docsite/python2.invbin0 -> 85381 bytes
-rw-r--r--docs/docsite/python3.invbin0 -> 107011 bytes
-rw-r--r--docs/docsite/requirements.txt9
-rw-r--r--docs/docsite/rst/404.rst12
-rw-r--r--docs/docsite/rst/api/index.rst107
-rw-r--r--docs/docsite/rst/community/code_of_conduct.rst146
-rw-r--r--docs/docsite/rst/community/committer_guidelines.rst156
-rw-r--r--docs/docsite/rst/community/communication.rst103
-rw-r--r--docs/docsite/rst/community/community.rst6
-rw-r--r--docs/docsite/rst/community/contributing_maintained_collections.rst271
-rw-r--r--docs/docsite/rst/community/contributor_license_agreement.rst7
-rw-r--r--docs/docsite/rst/community/development_process.rst277
-rw-r--r--docs/docsite/rst/community/documentation_contributions.rst214
-rw-r--r--docs/docsite/rst/community/github_admins.rst32
-rw-r--r--docs/docsite/rst/community/how_can_I_help.rst86
-rw-r--r--docs/docsite/rst/community/index.rst88
-rw-r--r--docs/docsite/rst/community/maintainers.rst34
-rw-r--r--docs/docsite/rst/community/other_tools_and_programs.rst123
-rw-r--r--docs/docsite/rst/community/release_managers.rst82
-rw-r--r--docs/docsite/rst/community/reporting_bugs_and_features.rst56
-rw-r--r--docs/docsite/rst/community/triage_process.rst8
-rw-r--r--docs/docsite/rst/conf.py293
-rw-r--r--docs/docsite/rst/dev_guide/debugging.rst112
-rw-r--r--docs/docsite/rst/dev_guide/developing_api.rst47
-rw-r--r--docs/docsite/rst/dev_guide/developing_collections.rst801
-rw-r--r--docs/docsite/rst/dev_guide/developing_core.rst21
-rw-r--r--docs/docsite/rst/dev_guide/developing_inventory.rst422
-rw-r--r--docs/docsite/rst/dev_guide/developing_locally.rst105
-rw-r--r--docs/docsite/rst/dev_guide/developing_module_utilities.rst69
-rw-r--r--docs/docsite/rst/dev_guide/developing_modules.rst51
-rw-r--r--docs/docsite/rst/dev_guide/developing_modules_best_practices.rst177
-rw-r--r--docs/docsite/rst/dev_guide/developing_modules_checklist.rst46
-rw-r--r--docs/docsite/rst/dev_guide/developing_modules_documenting.rst438
-rw-r--r--docs/docsite/rst/dev_guide/developing_modules_general.rst221
-rw-r--r--docs/docsite/rst/dev_guide/developing_modules_general_aci.rst443
-rw-r--r--docs/docsite/rst/dev_guide/developing_modules_general_windows.rst696
-rw-r--r--docs/docsite/rst/dev_guide/developing_modules_in_groups.rst80
-rw-r--r--docs/docsite/rst/dev_guide/developing_plugins.rst495
-rw-r--r--docs/docsite/rst/dev_guide/developing_program_flow_modules.rst880
-rw-r--r--docs/docsite/rst/dev_guide/developing_python_3.rst404
-rw-r--r--docs/docsite/rst/dev_guide/developing_rebasing.rst83
-rw-r--r--docs/docsite/rst/dev_guide/index.rst92
-rw-r--r--docs/docsite/rst/dev_guide/migrating_roles.rst410
-rw-r--r--docs/docsite/rst/dev_guide/module_lifecycle.rst50
-rw-r--r--docs/docsite/rst/dev_guide/overview_architecture.rst149
-rw-r--r--docs/docsite/rst/dev_guide/platforms/aws_guidelines.rst754
-rw-r--r--docs/docsite/rst/dev_guide/platforms/openstack_guidelines.rst57
-rw-r--r--docs/docsite/rst/dev_guide/platforms/ovirt_dev_guide.rst220
-rw-r--r--docs/docsite/rst/dev_guide/platforms/vmware_guidelines.rst270
-rw-r--r--docs/docsite/rst/dev_guide/shared_snippets/licensing.txt9
-rw-r--r--docs/docsite/rst/dev_guide/style_guide/basic_rules.rst69
-rw-r--r--docs/docsite/rst/dev_guide/style_guide/grammar_punctuation.rst201
-rw-r--r--docs/docsite/rst/dev_guide/style_guide/images/commas-matter-2.jpgbin0 -> 53403 bytes
-rw-r--r--docs/docsite/rst/dev_guide/style_guide/images/commas-matter.jpgbin0 -> 85373 bytes
-rw-r--r--docs/docsite/rst/dev_guide/style_guide/images/hyphen-funny.jpgbin0 -> 49628 bytes
-rw-r--r--docs/docsite/rst/dev_guide/style_guide/images/thenvsthan.jpgbin0 -> 36500 bytes
-rw-r--r--docs/docsite/rst/dev_guide/style_guide/index.rst244
-rw-r--r--docs/docsite/rst/dev_guide/style_guide/resources.rst10
-rw-r--r--docs/docsite/rst/dev_guide/style_guide/search_hints.rst48
-rw-r--r--docs/docsite/rst/dev_guide/style_guide/spelling_word_choice.rst327
-rw-r--r--docs/docsite/rst/dev_guide/style_guide/trademarks.rst96
-rw-r--r--docs/docsite/rst/dev_guide/style_guide/voice_style.rst20
-rw-r--r--docs/docsite/rst/dev_guide/style_guide/why_use.rst23
-rw-r--r--docs/docsite/rst/dev_guide/testing.rst243
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/action-plugin-docs.rst4
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/ansible-doc.rst4
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/ansible-var-precedence-check.rst6
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/azure-requirements.rst10
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/bin-symlinks.rst11
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/boilerplate.rst11
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/botmeta.rst4
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/changelog.rst17
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/compile.rst4
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/configure-remoting-ps1.rst5
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/deprecated-config.rst6
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/docs-build.rst4
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/empty-init.rst10
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/future-import-boilerplate.rst51
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/ignores.rst99
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/import.rst5
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/integration-aliases.rst182
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/line-endings.rst4
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/metaclass-boilerplate.rst23
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/no-assert.rst16
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/no-basestring.rst11
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/no-dict-iteritems.rst16
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/no-dict-iterkeys.rst9
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/no-dict-itervalues.rst16
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/no-get-exception.rst28
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/no-illegal-filenames.rst61
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/no-main-display.rst12
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/no-smart-quotes.rst4
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/no-tests-as-filters.rst12
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/no-underscore-variable.rst30
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/no-unicode-literals.rst16
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/no-unwanted-files.rst13
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/no-wildcard-import.rst31
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/obsolete-files.rst14
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/package-data.rst5
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/pep8.rst6
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/pslint.rst4
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/pylint-ansible-test.rst8
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/pylint.rst4
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/release-names.rst4
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/replace-urlopen.rst4
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/required-and-default-attributes.rst5
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/rstcheck.rst4
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/runtime-metadata.rst7
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/sanity-docs.rst4
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/shebang.rst16
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/shellcheck.rst4
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/symlinks.rst6
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/test-constraints.rst4
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/update-bundled.rst31
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/use-argspec-type-path.rst10
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/use-compat-six.rst4
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/validate-modules.rst6
-rw-r--r--docs/docsite/rst/dev_guide/testing/sanity/yamllint.rst4
-rw-r--r--docs/docsite/rst/dev_guide/testing_compile.rst76
-rw-r--r--docs/docsite/rst/dev_guide/testing_documentation.rst36
-rw-r--r--docs/docsite/rst/dev_guide/testing_httptester.rst27
-rw-r--r--docs/docsite/rst/dev_guide/testing_integration.rst236
-rw-r--r--docs/docsite/rst/dev_guide/testing_integration_legacy.rst108
-rw-r--r--docs/docsite/rst/dev_guide/testing_pep8.rst24
-rw-r--r--docs/docsite/rst/dev_guide/testing_running_locally.rst89
-rw-r--r--docs/docsite/rst/dev_guide/testing_sanity.rst53
-rw-r--r--docs/docsite/rst/dev_guide/testing_units.rst213
-rw-r--r--docs/docsite/rst/dev_guide/testing_units_modules.rst563
-rw-r--r--docs/docsite/rst/dev_guide/testing_validate-modules.rst165
-rw-r--r--docs/docsite/rst/galaxy/dev_guide.rst246
-rw-r--r--docs/docsite/rst/galaxy/user_guide.rst493
-rw-r--r--docs/docsite/rst/images/cow.pngbin0 -> 5777 bytes
-rw-r--r--docs/docsite/rst/index.rst105
-rw-r--r--docs/docsite/rst/installation_guide/index.rst13
-rw-r--r--docs/docsite/rst/installation_guide/intro_configuration.rst59
-rw-r--r--docs/docsite/rst/installation_guide/intro_installation.rst608
-rw-r--r--docs/docsite/rst/inventory/implicit_localhost.rst35
-rw-r--r--docs/docsite/rst/network/dev_guide/developing_plugins_network.rst265
-rw-r--r--docs/docsite/rst/network/dev_guide/developing_resource_modules_network.rst819
-rw-r--r--docs/docsite/rst/network/dev_guide/documenting_modules_network.rst52
-rw-r--r--docs/docsite/rst/network/dev_guide/index.rst32
-rw-r--r--docs/docsite/rst/network/getting_started/basic_concepts.rst10
-rw-r--r--docs/docsite/rst/network/getting_started/first_inventory.rst431
-rw-r--r--docs/docsite/rst/network/getting_started/first_playbook.rst212
-rw-r--r--docs/docsite/rst/network/getting_started/index.rst34
-rw-r--r--docs/docsite/rst/network/getting_started/intermediate_concepts.rst39
-rw-r--r--docs/docsite/rst/network/getting_started/network_connection_options.rst48
-rw-r--r--docs/docsite/rst/network/getting_started/network_differences.rst68
-rw-r--r--docs/docsite/rst/network/getting_started/network_resources.rst46
-rw-r--r--docs/docsite/rst/network/getting_started/network_roles.rst267
-rw-r--r--docs/docsite/rst/network/getting_started/sample_files/first_playbook.yml15
-rw-r--r--docs/docsite/rst/network/getting_started/sample_files/first_playbook_ext.yml29
-rw-r--r--docs/docsite/rst/network/index.rst20
-rw-r--r--docs/docsite/rst/network/user_guide/cli_parsing.rst719
-rw-r--r--docs/docsite/rst/network/user_guide/faq.rst76
-rw-r--r--docs/docsite/rst/network/user_guide/index.rst25
-rw-r--r--docs/docsite/rst/network/user_guide/network_best_practices_2.5.rst483
-rw-r--r--docs/docsite/rst/network/user_guide/network_debug_troubleshooting.rst828
-rw-r--r--docs/docsite/rst/network/user_guide/network_resource_modules.rst196
-rw-r--r--docs/docsite/rst/network/user_guide/network_working_with_command_output.rst122
-rw-r--r--docs/docsite/rst/network/user_guide/platform_ce.rst213
-rw-r--r--docs/docsite/rst/network/user_guide/platform_cnos.rst78
-rw-r--r--docs/docsite/rst/network/user_guide/platform_dellos10.rst80
-rw-r--r--docs/docsite/rst/network/user_guide/platform_dellos6.rst79
-rw-r--r--docs/docsite/rst/network/user_guide/platform_dellos9.rst79
-rw-r--r--docs/docsite/rst/network/user_guide/platform_enos.rst80
-rw-r--r--docs/docsite/rst/network/user_guide/platform_eos.rst140
-rw-r--r--docs/docsite/rst/network/user_guide/platform_eric_eccli.rst73
-rw-r--r--docs/docsite/rst/network/user_guide/platform_exos.rst108
-rw-r--r--docs/docsite/rst/network/user_guide/platform_frr.rst73
-rw-r--r--docs/docsite/rst/network/user_guide/platform_icx.rst77
-rw-r--r--docs/docsite/rst/network/user_guide/platform_index.rst121
-rw-r--r--docs/docsite/rst/network/user_guide/platform_ios.rst79
-rw-r--r--docs/docsite/rst/network/user_guide/platform_iosxr.rst130
-rw-r--r--docs/docsite/rst/network/user_guide/platform_ironware.rst80
-rw-r--r--docs/docsite/rst/network/user_guide/platform_junos.rst129
-rw-r--r--docs/docsite/rst/network/user_guide/platform_meraki.rst44
-rw-r--r--docs/docsite/rst/network/user_guide/platform_netconf_enabled.rst133
-rw-r--r--docs/docsite/rst/network/user_guide/platform_netvisor.rst78
-rw-r--r--docs/docsite/rst/network/user_guide/platform_nos.rst76
-rw-r--r--docs/docsite/rst/network/user_guide/platform_nxos.rst164
-rw-r--r--docs/docsite/rst/network/user_guide/platform_routeros.rst80
-rw-r--r--docs/docsite/rst/network/user_guide/platform_slxos.rst77
-rw-r--r--docs/docsite/rst/network/user_guide/platform_voss.rst78
-rw-r--r--docs/docsite/rst/network/user_guide/platform_vyos.rst74
-rw-r--r--docs/docsite/rst/network/user_guide/shared_snippets/SSH_warning.txt2
-rw-r--r--docs/docsite/rst/plugins/action.rst56
-rw-r--r--docs/docsite/rst/plugins/become.rst67
-rw-r--r--docs/docsite/rst/plugins/cache.rst140
-rw-r--r--docs/docsite/rst/plugins/callback.rst101
-rw-r--r--docs/docsite/rst/plugins/cliconf.rst47
-rw-r--r--docs/docsite/rst/plugins/connection.rst78
-rw-r--r--docs/docsite/rst/plugins/httpapi.rst72
-rw-r--r--docs/docsite/rst/plugins/index.html4
-rw-r--r--docs/docsite/rst/plugins/inventory.rst162
-rw-r--r--docs/docsite/rst/plugins/lookup.rst158
-rw-r--r--docs/docsite/rst/plugins/netconf.rst47
-rw-r--r--docs/docsite/rst/plugins/plugins.rst44
-rw-r--r--docs/docsite/rst/plugins/shell.rst53
-rw-r--r--docs/docsite/rst/plugins/strategy.rst79
-rw-r--r--docs/docsite/rst/plugins/vars.rst79
-rw-r--r--docs/docsite/rst/porting_guides/porting_guide_2.0.rst13
-rw-r--r--docs/docsite/rst/porting_guides/porting_guide_2.10.rst13
-rw-r--r--docs/docsite/rst/porting_guides/porting_guide_2.3.rst12
-rw-r--r--docs/docsite/rst/porting_guides/porting_guide_2.4.rst13
-rw-r--r--docs/docsite/rst/porting_guides/porting_guide_2.5.rst13
-rw-r--r--docs/docsite/rst/porting_guides/porting_guide_2.6.rst13
-rw-r--r--docs/docsite/rst/porting_guides/porting_guide_2.7.rst13
-rw-r--r--docs/docsite/rst/porting_guides/porting_guide_2.8.rst13
-rw-r--r--docs/docsite/rst/porting_guides/porting_guide_2.9.rst13
-rw-r--r--docs/docsite/rst/porting_guides/porting_guide_base_2.10.rst13
-rw-r--r--docs/docsite/rst/porting_guides/porting_guides.rst11
-rw-r--r--docs/docsite/rst/reference_appendices/.rstcheck.cfg2
-rw-r--r--docs/docsite/rst/reference_appendices/YAMLSyntax.rst242
-rw-r--r--docs/docsite/rst/reference_appendices/automationhub.rst10
-rw-r--r--docs/docsite/rst/reference_appendices/common_return_values.rst251
-rw-r--r--docs/docsite/rst/reference_appendices/faq.rst766
-rw-r--r--docs/docsite/rst/reference_appendices/general_precedence.rst140
-rw-r--r--docs/docsite/rst/reference_appendices/glossary.rst501
-rw-r--r--docs/docsite/rst/reference_appendices/interpreter_discovery.rst51
-rw-r--r--docs/docsite/rst/reference_appendices/logging.rst14
-rw-r--r--docs/docsite/rst/reference_appendices/module_utils.rst27
-rw-r--r--docs/docsite/rst/reference_appendices/python_3_support.rst95
-rw-r--r--docs/docsite/rst/reference_appendices/release_and_maintenance.rst33
-rw-r--r--docs/docsite/rst/reference_appendices/special_variables.rst167
-rw-r--r--docs/docsite/rst/reference_appendices/test_strategies.rst275
-rw-r--r--docs/docsite/rst/reference_appendices/tower.rst13
-rw-r--r--docs/docsite/rst/roadmap/COLLECTIONS_2_10.rst46
-rw-r--r--docs/docsite/rst/roadmap/ROADMAP_2_10.rst51
-rw-r--r--docs/docsite/rst/roadmap/ROADMAP_2_5.rst142
-rw-r--r--docs/docsite/rst/roadmap/ROADMAP_2_6.rst82
-rw-r--r--docs/docsite/rst/roadmap/ROADMAP_2_7.rst109
-rw-r--r--docs/docsite/rst/roadmap/ROADMAP_2_8.rst38
-rw-r--r--docs/docsite/rst/roadmap/ROADMAP_2_9.rst39
-rw-r--r--docs/docsite/rst/roadmap/ansible_base_roadmap_index.rst27
-rw-r--r--docs/docsite/rst/roadmap/ansible_roadmap_index.rst26
-rw-r--r--docs/docsite/rst/roadmap/index.rst29
-rw-r--r--docs/docsite/rst/roadmap/old_roadmap_index.rst19
-rw-r--r--docs/docsite/rst/scenario_guides/cloud_guides.rst22
-rw-r--r--docs/docsite/rst/scenario_guides/guide_aci.rst661
-rw-r--r--docs/docsite/rst/scenario_guides/guide_alicloud.rst125
-rw-r--r--docs/docsite/rst/scenario_guides/guide_aws.rst281
-rw-r--r--docs/docsite/rst/scenario_guides/guide_azure.rst480
-rw-r--r--docs/docsite/rst/scenario_guides/guide_cloudstack.rst377
-rw-r--r--docs/docsite/rst/scenario_guides/guide_docker.rst330
-rw-r--r--docs/docsite/rst/scenario_guides/guide_gce.rst302
-rw-r--r--docs/docsite/rst/scenario_guides/guide_infoblox.rst288
-rw-r--r--docs/docsite/rst/scenario_guides/guide_kubernetes.rst63
-rw-r--r--docs/docsite/rst/scenario_guides/guide_meraki.rst193
-rw-r--r--docs/docsite/rst/scenario_guides/guide_online.rst41
-rw-r--r--docs/docsite/rst/scenario_guides/guide_oracle.rst103
-rw-r--r--docs/docsite/rst/scenario_guides/guide_packet.rst311
-rw-r--r--docs/docsite/rst/scenario_guides/guide_rax.rst810
-rw-r--r--docs/docsite/rst/scenario_guides/guide_scaleway.rst293
-rw-r--r--docs/docsite/rst/scenario_guides/guide_vagrant.rst136
-rw-r--r--docs/docsite/rst/scenario_guides/guide_vmware.rst33
-rw-r--r--docs/docsite/rst/scenario_guides/guide_vultr.rst171
-rw-r--r--docs/docsite/rst/scenario_guides/guides.rst43
-rw-r--r--docs/docsite/rst/scenario_guides/network_guides.rst16
-rw-r--r--docs/docsite/rst/scenario_guides/scenario_template.rst53
-rw-r--r--docs/docsite/rst/scenario_guides/virt_guides.rst15
-rw-r--r--docs/docsite/rst/scenario_guides/vmware_scenarios/faq.rst26
-rw-r--r--docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_clone_template.rst222
-rw-r--r--docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_find_vm_folder.rst120
-rw-r--r--docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_remove_vm.rst126
-rw-r--r--docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_rename_vm.rst173
-rw-r--r--docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_vmware_http.rst161
-rw-r--r--docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_concepts.rst45
-rw-r--r--docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_external_doc_links.rst11
-rw-r--r--docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_getting_started.rst9
-rw-r--r--docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_intro.rst53
-rw-r--r--docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_inventory.rst90
-rw-r--r--docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_inventory_filters.rst216
-rw-r--r--docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_inventory_hostnames.rst128
-rw-r--r--docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_inventory_vm_attributes.rst1183
-rw-r--r--docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_module_reference.rst9
-rw-r--r--docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_requirements.rst44
-rw-r--r--docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_scenarios.rst16
-rw-r--r--docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_troubleshooting.rst102
-rw-r--r--docs/docsite/rst/shared_snippets/basic_concepts.txt34
-rw-r--r--docs/docsite/rst/shared_snippets/download_tarball_collections.txt8
-rw-r--r--docs/docsite/rst/shared_snippets/galaxy_server_list.txt80
-rw-r--r--docs/docsite/rst/shared_snippets/installing_collections.txt42
-rw-r--r--docs/docsite/rst/shared_snippets/installing_collections_git_repo.txt84
-rw-r--r--docs/docsite/rst/shared_snippets/installing_multiple_collections.txt51
-rw-r--r--docs/docsite/rst/shared_snippets/installing_older_collection.txt25
-rw-r--r--docs/docsite/rst/user_guide/basic_concepts.rst12
-rw-r--r--docs/docsite/rst/user_guide/become.rst702
-rw-r--r--docs/docsite/rst/user_guide/collections_using.rst324
-rw-r--r--docs/docsite/rst/user_guide/command_line_tools.rst20
-rw-r--r--docs/docsite/rst/user_guide/complex_data_manipulation.rst243
-rw-r--r--docs/docsite/rst/user_guide/connection_details.rst116
-rw-r--r--docs/docsite/rst/user_guide/guide_rolling_upgrade.rst324
-rw-r--r--docs/docsite/rst/user_guide/index.rst133
-rw-r--r--docs/docsite/rst/user_guide/intro.rst15
-rw-r--r--docs/docsite/rst/user_guide/intro_adhoc.rst206
-rw-r--r--docs/docsite/rst/user_guide/intro_bsd.rst106
-rw-r--r--docs/docsite/rst/user_guide/intro_dynamic_inventory.rst249
-rw-r--r--docs/docsite/rst/user_guide/intro_getting_started.rst139
-rw-r--r--docs/docsite/rst/user_guide/intro_inventory.rst788
-rw-r--r--docs/docsite/rst/user_guide/intro_patterns.rst171
-rw-r--r--docs/docsite/rst/user_guide/intro_windows.rst4
-rw-r--r--docs/docsite/rst/user_guide/modules.rst36
-rw-r--r--docs/docsite/rst/user_guide/modules_intro.rst52
-rw-r--r--docs/docsite/rst/user_guide/modules_support.rst70
-rw-r--r--docs/docsite/rst/user_guide/playbook_pathing.rst42
-rw-r--r--docs/docsite/rst/user_guide/playbooks.rst21
-rw-r--r--docs/docsite/rst/user_guide/playbooks_advanced_syntax.rst112
-rw-r--r--docs/docsite/rst/user_guide/playbooks_async.rst161
-rw-r--r--docs/docsite/rst/user_guide/playbooks_best_practices.rst167
-rw-r--r--docs/docsite/rst/user_guide/playbooks_blocks.rst189
-rw-r--r--docs/docsite/rst/user_guide/playbooks_checkmode.rst97
-rw-r--r--docs/docsite/rst/user_guide/playbooks_conditionals.rst508
-rw-r--r--docs/docsite/rst/user_guide/playbooks_debugger.rst329
-rw-r--r--docs/docsite/rst/user_guide/playbooks_delegation.rst136
-rw-r--r--docs/docsite/rst/user_guide/playbooks_environment.rst141
-rw-r--r--docs/docsite/rst/user_guide/playbooks_error_handling.rst245
-rw-r--r--docs/docsite/rst/user_guide/playbooks_filters.rst1696
-rw-r--r--docs/docsite/rst/user_guide/playbooks_filters_ipaddr.rst744
-rw-r--r--docs/docsite/rst/user_guide/playbooks_handlers.rst148
-rw-r--r--docs/docsite/rst/user_guide/playbooks_intro.rst151
-rw-r--r--docs/docsite/rst/user_guide/playbooks_lookups.rst37
-rw-r--r--docs/docsite/rst/user_guide/playbooks_loops.rst445
-rw-r--r--docs/docsite/rst/user_guide/playbooks_module_defaults.rst143
-rw-r--r--docs/docsite/rst/user_guide/playbooks_prompts.rst116
-rw-r--r--docs/docsite/rst/user_guide/playbooks_python_version.rst64
-rw-r--r--docs/docsite/rst/user_guide/playbooks_reuse.rst201
-rw-r--r--docs/docsite/rst/user_guide/playbooks_reuse_includes.rst32
-rw-r--r--docs/docsite/rst/user_guide/playbooks_reuse_roles.rst490
-rw-r--r--docs/docsite/rst/user_guide/playbooks_roles.rst19
-rw-r--r--docs/docsite/rst/user_guide/playbooks_special_topics.rst8
-rw-r--r--docs/docsite/rst/user_guide/playbooks_startnstep.rst40
-rw-r--r--docs/docsite/rst/user_guide/playbooks_strategies.rst216
-rw-r--r--docs/docsite/rst/user_guide/playbooks_tags.rst428
-rw-r--r--docs/docsite/rst/user_guide/playbooks_templating.rst55
-rw-r--r--docs/docsite/rst/user_guide/playbooks_tests.rst395
-rw-r--r--docs/docsite/rst/user_guide/playbooks_variables.rst466
-rw-r--r--docs/docsite/rst/user_guide/playbooks_vars_facts.rst680
-rw-r--r--docs/docsite/rst/user_guide/playbooks_vault.rst6
-rw-r--r--docs/docsite/rst/user_guide/plugin_filtering_config.rst26
-rw-r--r--docs/docsite/rst/user_guide/quickstart.rst20
-rw-r--r--docs/docsite/rst/user_guide/sample_setup.rst285
-rw-r--r--docs/docsite/rst/user_guide/shared_snippets/SSH_password_prompt.txt2
-rw-r--r--docs/docsite/rst/user_guide/shared_snippets/with2loop.txt205
-rw-r--r--docs/docsite/rst/user_guide/vault.rst660
-rw-r--r--docs/docsite/rst/user_guide/windows.rst21
-rw-r--r--docs/docsite/rst/user_guide/windows_dsc.rst505
-rw-r--r--docs/docsite/rst/user_guide/windows_faq.rst236
-rw-r--r--docs/docsite/rst/user_guide/windows_performance.rst61
-rw-r--r--docs/docsite/rst/user_guide/windows_setup.rst573
-rw-r--r--docs/docsite/rst/user_guide/windows_usage.rst513
-rw-r--r--docs/docsite/rst/user_guide/windows_winrm.rst913
-rw-r--r--docs/docsite/variables.dot38
-rw-r--r--docs/man/.gitignore2
-rw-r--r--docs/man/man1/ansible-config.1135
-rw-r--r--docs/man/man1/ansible-console.1299
-rw-r--r--docs/man/man1/ansible-doc.1165
-rw-r--r--docs/man/man1/ansible-galaxy.1105
-rw-r--r--docs/man/man1/ansible-inventory.1213
-rw-r--r--docs/man/man1/ansible-playbook.1350
-rw-r--r--docs/man/man1/ansible-pull.1371
-rw-r--r--docs/man/man1/ansible-vault.1371
-rw-r--r--docs/man/man1/ansible.1341
-rw-r--r--docs/man/man3/.gitdir0
-rw-r--r--docs/templates/cli_rst.j2161
-rw-r--r--docs/templates/collections_galaxy_meta.rst.j298
-rw-r--r--docs/templates/config.rst.j2227
-rw-r--r--docs/templates/man.j2128
-rw-r--r--docs/templates/modules_by_category.rst.j217
-rw-r--r--docs/templates/playbooks_keywords.rst.j233
-rw-r--r--examples/ansible.cfg525
-rw-r--r--examples/hosts44
-rw-r--r--examples/scripts/ConfigureRemotingForAnsible.ps1453
-rw-r--r--examples/scripts/upgrade_to_ps3.ps193
-rwxr-xr-xhacking/build-ansible.py103
-rw-r--r--hacking/build_library/__init__.py0
-rw-r--r--hacking/build_library/build_ansible/__init__.py0
-rw-r--r--hacking/build_library/build_ansible/announce.py293
-rw-r--r--hacking/build_library/build_ansible/change_detection.py33
-rw-r--r--hacking/build_library/build_ansible/command_plugins/collection_meta.py72
-rw-r--r--hacking/build_library/build_ansible/command_plugins/docs_build.py168
-rw-r--r--hacking/build_library/build_ansible/command_plugins/dump_config.py76
-rw-r--r--hacking/build_library/build_ansible/command_plugins/dump_keywords.py121
-rw-r--r--hacking/build_library/build_ansible/command_plugins/file_deprecated_issues.py153
-rw-r--r--hacking/build_library/build_ansible/command_plugins/generate_man.py303
-rw-r--r--hacking/build_library/build_ansible/command_plugins/porting_guide.py138
-rw-r--r--hacking/build_library/build_ansible/command_plugins/release_announcement.py78
-rw-r--r--hacking/build_library/build_ansible/command_plugins/update_intersphinx.py101
-rw-r--r--hacking/build_library/build_ansible/commands.py50
-rw-r--r--hacking/build_library/build_ansible/errors.py19
-rw-r--r--lib/ansible/__init__.py31
-rw-r--r--lib/ansible/_vendor/__init__.py46
-rw-r--r--lib/ansible/cli/__init__.py491
-rw-r--r--lib/ansible/cli/adhoc.py175
-rw-r--r--lib/ansible/cli/arguments/__init__.py5
-rw-r--r--lib/ansible/cli/arguments/option_helpers.py369
-rw-r--r--lib/ansible/cli/config.py188
-rw-r--r--lib/ansible/cli/console.py454
-rw-r--r--lib/ansible/cli/doc.py740
-rw-r--r--lib/ansible/cli/galaxy.py1492
-rw-r--r--lib/ansible/cli/inventory.py391
-rw-r--r--lib/ansible/cli/playbook.py203
-rw-r--r--lib/ansible/cli/pull.py336
-rw-r--r--lib/ansible/cli/scripts/__init__.py0
-rwxr-xr-xlib/ansible/cli/scripts/ansible_cli_stub.py165
-rwxr-xr-xlib/ansible/cli/scripts/ansible_connection_cli_stub.py342
-rw-r--r--lib/ansible/cli/vault.py457
-rw-r--r--lib/ansible/collections/__init__.py29
-rw-r--r--lib/ansible/collections/list.py101
-rw-r--r--lib/ansible/compat/__init__.py26
-rw-r--r--lib/ansible/compat/selectors/__init__.py30
-rw-r--r--lib/ansible/config/__init__.py0
-rw-r--r--lib/ansible/config/ansible_builtin_runtime.yml9662
-rw-r--r--lib/ansible/config/base.yml2002
-rw-r--r--lib/ansible/config/data.py43
-rw-r--r--lib/ansible/config/manager.py588
-rw-r--r--lib/ansible/constants.py227
-rw-r--r--lib/ansible/context.py56
-rw-r--r--lib/ansible/errors/__init__.py341
-rw-r--r--lib/ansible/errors/yaml_strings.py140
-rw-r--r--lib/ansible/executor/__init__.py20
-rw-r--r--lib/ansible/executor/action_write_locks.py44
-rw-r--r--lib/ansible/executor/discovery/__init__.py0
-rw-r--r--lib/ansible/executor/discovery/python_target.py48
-rw-r--r--lib/ansible/executor/interpreter_discovery.py203
-rw-r--r--lib/ansible/executor/module_common.py1390
-rw-r--r--lib/ansible/executor/play_iterator.py567
-rw-r--r--lib/ansible/executor/playbook_executor.py311
-rw-r--r--lib/ansible/executor/powershell/__init__.py0
-rw-r--r--lib/ansible/executor/powershell/async_watchdog.ps1110
-rw-r--r--lib/ansible/executor/powershell/async_wrapper.ps1172
-rw-r--r--lib/ansible/executor/powershell/become_wrapper.ps1155
-rw-r--r--lib/ansible/executor/powershell/bootstrap_wrapper.ps113
-rw-r--r--lib/ansible/executor/powershell/coverage_wrapper.ps1196
-rw-r--r--lib/ansible/executor/powershell/exec_wrapper.ps1229
-rw-r--r--lib/ansible/executor/powershell/module_manifest.py389
-rw-r--r--lib/ansible/executor/powershell/module_powershell_wrapper.ps173
-rw-r--r--lib/ansible/executor/powershell/module_script_wrapper.ps122
-rw-r--r--lib/ansible/executor/powershell/module_wrapper.ps1221
-rw-r--r--lib/ansible/executor/process/__init__.py20
-rw-r--r--lib/ansible/executor/process/worker.py223
-rw-r--r--lib/ansible/executor/stats.py99
-rw-r--r--lib/ansible/executor/task_executor.py1178
-rw-r--r--lib/ansible/executor/task_queue_manager.py395
-rw-r--r--lib/ansible/executor/task_result.py154
-rw-r--r--lib/ansible/galaxy/__init__.py72
-rw-r--r--lib/ansible/galaxy/api.py581
-rw-r--r--lib/ansible/galaxy/collection.py1551
-rw-r--r--lib/ansible/galaxy/data/apb/.travis.yml25
-rw-r--r--lib/ansible/galaxy/data/apb/Dockerfile.j29
-rw-r--r--lib/ansible/galaxy/data/apb/Makefile.j221
-rw-r--r--lib/ansible/galaxy/data/apb/README.md38
-rw-r--r--lib/ansible/galaxy/data/apb/apb.yml.j213
-rw-r--r--lib/ansible/galaxy/data/apb/defaults/main.yml.j22
-rw-r--r--lib/ansible/galaxy/data/apb/files/.git_keep0
-rw-r--r--lib/ansible/galaxy/data/apb/handlers/main.yml.j22
-rw-r--r--lib/ansible/galaxy/data/apb/meta/main.yml.j244
-rw-r--r--lib/ansible/galaxy/data/apb/playbooks/deprovision.yml.j28
-rw-r--r--lib/ansible/galaxy/data/apb/playbooks/provision.yml.j28
-rw-r--r--lib/ansible/galaxy/data/apb/tasks/main.yml.j22
-rw-r--r--lib/ansible/galaxy/data/apb/templates/.git_keep0
-rw-r--r--lib/ansible/galaxy/data/apb/tests/ansible.cfg2
-rw-r--r--lib/ansible/galaxy/data/apb/tests/inventory3
-rw-r--r--lib/ansible/galaxy/data/apb/tests/test.yml.j27
-rw-r--r--lib/ansible/galaxy/data/apb/vars/main.yml.j22
-rw-r--r--lib/ansible/galaxy/data/collections_galaxy_meta.yml110
-rw-r--r--lib/ansible/galaxy/data/container/.travis.yml45
-rw-r--r--lib/ansible/galaxy/data/container/README.md49
-rw-r--r--lib/ansible/galaxy/data/container/defaults/main.yml.j22
-rw-r--r--lib/ansible/galaxy/data/container/files/.git_keep0
-rw-r--r--lib/ansible/galaxy/data/container/handlers/main.yml.j22
-rw-r--r--lib/ansible/galaxy/data/container/meta/container.yml.j211
-rw-r--r--lib/ansible/galaxy/data/container/meta/main.yml.j252
-rw-r--r--lib/ansible/galaxy/data/container/tasks/main.yml.j22
-rw-r--r--lib/ansible/galaxy/data/container/templates/.git_keep0
-rw-r--r--lib/ansible/galaxy/data/container/tests/ansible.cfg2
-rw-r--r--lib/ansible/galaxy/data/container/tests/inventory3
-rw-r--r--lib/ansible/galaxy/data/container/tests/test.yml.j27
-rw-r--r--lib/ansible/galaxy/data/container/vars/main.yml.j22
-rw-r--r--lib/ansible/galaxy/data/default/collection/README.md.j23
-rw-r--r--lib/ansible/galaxy/data/default/collection/docs/.git_keep0
-rw-r--r--lib/ansible/galaxy/data/default/collection/galaxy.yml.j211
-rw-r--r--lib/ansible/galaxy/data/default/collection/plugins/README.md.j231
-rw-r--r--lib/ansible/galaxy/data/default/collection/roles/.git_keep0
-rw-r--r--lib/ansible/galaxy/data/default/role/.travis.yml29
-rw-r--r--lib/ansible/galaxy/data/default/role/README.md38
-rw-r--r--lib/ansible/galaxy/data/default/role/defaults/main.yml.j22
-rw-r--r--lib/ansible/galaxy/data/default/role/files/.git_keep0
-rw-r--r--lib/ansible/galaxy/data/default/role/handlers/main.yml.j22
-rw-r--r--lib/ansible/galaxy/data/default/role/meta/main.yml.j255
-rw-r--r--lib/ansible/galaxy/data/default/role/tasks/main.yml.j22
-rw-r--r--lib/ansible/galaxy/data/default/role/templates/.git_keep0
-rw-r--r--lib/ansible/galaxy/data/default/role/tests/inventory2
-rw-r--r--lib/ansible/galaxy/data/default/role/tests/test.yml.j25
-rw-r--r--lib/ansible/galaxy/data/default/role/vars/main.yml.j22
-rw-r--r--lib/ansible/galaxy/data/network/.travis.yml29
-rw-r--r--lib/ansible/galaxy/data/network/README.md38
-rw-r--r--lib/ansible/galaxy/data/network/cliconf_plugins/example.py.j240
-rw-r--r--lib/ansible/galaxy/data/network/defaults/main.yml.j22
-rw-r--r--lib/ansible/galaxy/data/network/files/.git_keep0
-rw-r--r--lib/ansible/galaxy/data/network/library/example_command.py.j266
-rw-r--r--lib/ansible/galaxy/data/network/library/example_config.py.j266
-rw-r--r--lib/ansible/galaxy/data/network/library/example_facts.py.j266
-rw-r--r--lib/ansible/galaxy/data/network/meta/main.yml.j252
-rw-r--r--lib/ansible/galaxy/data/network/module_utils/example.py.j240
-rw-r--r--lib/ansible/galaxy/data/network/netconf_plugins/example.py.j240
-rw-r--r--lib/ansible/galaxy/data/network/tasks/main.yml.j22
-rw-r--r--lib/ansible/galaxy/data/network/templates/.git_keep0
-rw-r--r--lib/ansible/galaxy/data/network/terminal_plugins/example.py.j240
-rw-r--r--lib/ansible/galaxy/data/network/tests/inventory2
-rw-r--r--lib/ansible/galaxy/data/network/tests/test.yml.j214
-rw-r--r--lib/ansible/galaxy/data/network/vars/main.yml.j22
-rw-r--r--lib/ansible/galaxy/role.py399
-rw-r--r--lib/ansible/galaxy/token.py180
-rw-r--r--lib/ansible/galaxy/user_agent.py23
-rw-r--r--lib/ansible/inventory/__init__.py0
-rw-r--r--lib/ansible/inventory/data.py280
-rw-r--r--lib/ansible/inventory/group.py289
-rw-r--r--lib/ansible/inventory/helpers.py40
-rw-r--r--lib/ansible/inventory/host.py162
-rw-r--r--lib/ansible/inventory/manager.py641
-rw-r--r--lib/ansible/module_utils/__init__.py0
-rw-r--r--lib/ansible/module_utils/_text.py15
l---------lib/ansible/module_utils/ansible_release.py1
-rw-r--r--lib/ansible/module_utils/api.py116
-rw-r--r--lib/ansible/module_utils/basic.py2853
-rw-r--r--lib/ansible/module_utils/common/__init__.py0
-rw-r--r--lib/ansible/module_utils/common/_collections_compat.py46
-rw-r--r--lib/ansible/module_utils/common/_json_compat.py16
-rw-r--r--lib/ansible/module_utils/common/_utils.py40
-rw-r--r--lib/ansible/module_utils/common/collections.py112
-rw-r--r--lib/ansible/module_utils/common/dict_transformations.py140
-rw-r--r--lib/ansible/module_utils/common/file.py202
-rw-r--r--lib/ansible/module_utils/common/json.py82
-rw-r--r--lib/ansible/module_utils/common/network.py161
-rw-r--r--lib/ansible/module_utils/common/parameters.py197
-rw-r--r--lib/ansible/module_utils/common/process.py44
-rw-r--r--lib/ansible/module_utils/common/removed.py63
-rw-r--r--lib/ansible/module_utils/common/sys_info.py159
-rw-r--r--lib/ansible/module_utils/common/text/__init__.py0
-rw-r--r--lib/ansible/module_utils/common/text/converters.py322
-rw-r--r--lib/ansible/module_utils/common/text/formatters.py114
-rw-r--r--lib/ansible/module_utils/common/validation.py547
-rw-r--r--lib/ansible/module_utils/common/warnings.py40
-rw-r--r--lib/ansible/module_utils/compat/__init__.py0
-rw-r--r--lib/ansible/module_utils/compat/_selectors2.py655
-rw-r--r--lib/ansible/module_utils/compat/importlib.py18
-rw-r--r--lib/ansible/module_utils/compat/paramiko.py17
-rw-r--r--lib/ansible/module_utils/compat/selectors.py56
-rw-r--r--lib/ansible/module_utils/connection.py217
-rw-r--r--lib/ansible/module_utils/csharp/Ansible.AccessToken.cs460
-rw-r--r--lib/ansible/module_utils/csharp/Ansible.Basic.cs1476
-rw-r--r--lib/ansible/module_utils/csharp/Ansible.Become.cs655
-rw-r--r--lib/ansible/module_utils/csharp/Ansible.Privilege.cs443
-rw-r--r--lib/ansible/module_utils/csharp/Ansible.Process.cs461
-rw-r--r--lib/ansible/module_utils/csharp/__init__.py0
-rw-r--r--lib/ansible/module_utils/distro/__init__.py46
-rw-r--r--lib/ansible/module_utils/distro/_distro.py1271
-rw-r--r--lib/ansible/module_utils/facts/__init__.py34
-rw-r--r--lib/ansible/module_utils/facts/ansible_collector.py142
-rw-r--r--lib/ansible/module_utils/facts/collector.py400
-rw-r--r--lib/ansible/module_utils/facts/compat.py87
-rw-r--r--lib/ansible/module_utils/facts/default_collectors.py172
-rw-r--r--lib/ansible/module_utils/facts/hardware/__init__.py0
-rw-r--r--lib/ansible/module_utils/facts/hardware/aix.py252
-rw-r--r--lib/ansible/module_utils/facts/hardware/base.py66
-rw-r--r--lib/ansible/module_utils/facts/hardware/darwin.py131
-rw-r--r--lib/ansible/module_utils/facts/hardware/dragonfly.py26
-rw-r--r--lib/ansible/module_utils/facts/hardware/freebsd.py214
-rw-r--r--lib/ansible/module_utils/facts/hardware/hpux.py165
-rw-r--r--lib/ansible/module_utils/facts/hardware/hurd.py53
-rw-r--r--lib/ansible/module_utils/facts/hardware/linux.py847
-rw-r--r--lib/ansible/module_utils/facts/hardware/netbsd.py162
-rw-r--r--lib/ansible/module_utils/facts/hardware/openbsd.py170
-rw-r--r--lib/ansible/module_utils/facts/hardware/sunos.py287
-rw-r--r--lib/ansible/module_utils/facts/namespace.py51
-rw-r--r--lib/ansible/module_utils/facts/network/__init__.py0
-rw-r--r--lib/ansible/module_utils/facts/network/aix.py145
-rw-r--r--lib/ansible/module_utils/facts/network/base.py70
-rw-r--r--lib/ansible/module_utils/facts/network/darwin.py49
-rw-r--r--lib/ansible/module_utils/facts/network/dragonfly.py33
-rw-r--r--lib/ansible/module_utils/facts/network/fc_wwn.py83
-rw-r--r--lib/ansible/module_utils/facts/network/freebsd.py33
-rw-r--r--lib/ansible/module_utils/facts/network/generic_bsd.py310
-rw-r--r--lib/ansible/module_utils/facts/network/hpux.py82
-rw-r--r--lib/ansible/module_utils/facts/network/hurd.py87
-rw-r--r--lib/ansible/module_utils/facts/network/iscsi.py113
-rw-r--r--lib/ansible/module_utils/facts/network/linux.py322
-rw-r--r--lib/ansible/module_utils/facts/network/netbsd.py48
-rw-r--r--lib/ansible/module_utils/facts/network/nvme.py55
-rw-r--r--lib/ansible/module_utils/facts/network/openbsd.py42
-rw-r--r--lib/ansible/module_utils/facts/network/sunos.py116
-rw-r--r--lib/ansible/module_utils/facts/other/__init__.py0
-rw-r--r--lib/ansible/module_utils/facts/other/facter.py85
-rw-r--r--lib/ansible/module_utils/facts/other/ohai.py72
-rw-r--r--lib/ansible/module_utils/facts/packages.py86
-rw-r--r--lib/ansible/module_utils/facts/sysctl.py38
-rw-r--r--lib/ansible/module_utils/facts/system/__init__.py0
-rw-r--r--lib/ansible/module_utils/facts/system/apparmor.py39
-rw-r--r--lib/ansible/module_utils/facts/system/caps.py55
-rw-r--r--lib/ansible/module_utils/facts/system/chroot.py47
-rw-r--r--lib/ansible/module_utils/facts/system/cmdline.py79
-rw-r--r--lib/ansible/module_utils/facts/system/date_time.py62
-rw-r--r--lib/ansible/module_utils/facts/system/distribution.py681
-rw-r--r--lib/ansible/module_utils/facts/system/dns.py67
-rw-r--r--lib/ansible/module_utils/facts/system/env.py37
-rw-r--r--lib/ansible/module_utils/facts/system/fips.py37
-rw-r--r--lib/ansible/module_utils/facts/system/local.py92
-rw-r--r--lib/ansible/module_utils/facts/system/lsb.py106
-rw-r--r--lib/ansible/module_utils/facts/system/pkg_mgr.py141
-rw-r--r--lib/ansible/module_utils/facts/system/platform.py97
-rw-r--r--lib/ansible/module_utils/facts/system/python.py60
-rw-r--r--lib/ansible/module_utils/facts/system/selinux.py91
-rw-r--r--lib/ansible/module_utils/facts/system/service_mgr.py152
-rw-r--r--lib/ansible/module_utils/facts/system/ssh_pub_keys.py54
-rw-r--r--lib/ansible/module_utils/facts/system/user.py50
-rw-r--r--lib/ansible/module_utils/facts/timeout.py68
-rw-r--r--lib/ansible/module_utils/facts/utils.py79
-rw-r--r--lib/ansible/module_utils/facts/virtual/__init__.py0
-rw-r--r--lib/ansible/module_utils/facts/virtual/base.py70
-rw-r--r--lib/ansible/module_utils/facts/virtual/dragonfly.py25
-rw-r--r--lib/ansible/module_utils/facts/virtual/freebsd.py57
-rw-r--r--lib/ansible/module_utils/facts/virtual/hpux.py62
-rw-r--r--lib/ansible/module_utils/facts/virtual/linux.py251
-rw-r--r--lib/ansible/module_utils/facts/virtual/netbsd.py50
-rw-r--r--lib/ansible/module_utils/facts/virtual/openbsd.py64
-rw-r--r--lib/ansible/module_utils/facts/virtual/sunos.py120
-rw-r--r--lib/ansible/module_utils/facts/virtual/sysctl.py69
-rw-r--r--lib/ansible/module_utils/json_utils.py79
-rw-r--r--lib/ansible/module_utils/parsing/__init__.py0
-rw-r--r--lib/ansible/module_utils/parsing/convert_bool.py29
-rw-r--r--lib/ansible/module_utils/powershell/Ansible.ModuleUtils.AddType.psm1370
-rw-r--r--lib/ansible/module_utils/powershell/Ansible.ModuleUtils.ArgvParser.psm175
-rw-r--r--lib/ansible/module_utils/powershell/Ansible.ModuleUtils.Backup.psm133
-rw-r--r--lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CamelConversion.psm165
-rw-r--r--lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CommandUtil.psm1122
-rw-r--r--lib/ansible/module_utils/powershell/Ansible.ModuleUtils.FileUtil.psm160
-rw-r--r--lib/ansible/module_utils/powershell/Ansible.ModuleUtils.Legacy.psm1377
-rw-r--r--lib/ansible/module_utils/powershell/Ansible.ModuleUtils.LinkUtil.psm1454
-rw-r--r--lib/ansible/module_utils/powershell/Ansible.ModuleUtils.PrivilegeUtil.psm199
-rw-r--r--lib/ansible/module_utils/powershell/Ansible.ModuleUtils.SID.psm193
-rw-r--r--lib/ansible/module_utils/powershell/Ansible.ModuleUtils.WebRequest.psm1514
-rw-r--r--lib/ansible/module_utils/powershell/__init__.py0
-rw-r--r--lib/ansible/module_utils/pycompat24.py91
-rw-r--r--lib/ansible/module_utils/service.py274
-rw-r--r--lib/ansible/module_utils/six/__init__.py962
-rw-r--r--lib/ansible/module_utils/splitter.py219
-rw-r--r--lib/ansible/module_utils/urls.py1721
-rw-r--r--lib/ansible/module_utils/yumdnf.py178
-rw-r--r--lib/ansible/modules/__init__.py0
-rw-r--r--lib/ansible/modules/add_host.py87
-rw-r--r--lib/ansible/modules/apt.py1229
-rw-r--r--lib/ansible/modules/apt_key.py356
-rw-r--r--lib/ansible/modules/apt_repository.py600
-rw-r--r--lib/ansible/modules/assemble.py258
-rw-r--r--lib/ansible/modules/assert.py83
-rw-r--r--lib/ansible/modules/async_status.py140
-rw-r--r--lib/ansible/modules/async_wrapper.py351
-rw-r--r--lib/ansible/modules/blockinfile.py352
-rw-r--r--lib/ansible/modules/command.py373
-rw-r--r--lib/ansible/modules/copy.py799
-rw-r--r--lib/ansible/modules/cron.py773
-rw-r--r--lib/ansible/modules/debconf.py201
-rw-r--r--lib/ansible/modules/debug.py78
-rw-r--r--lib/ansible/modules/dnf.py1330
-rw-r--r--lib/ansible/modules/dpkg_selections.py74
-rw-r--r--lib/ansible/modules/expect.py241
-rw-r--r--lib/ansible/modules/fail.py42
-rw-r--r--lib/ansible/modules/fetch.py105
-rw-r--r--lib/ansible/modules/file.py926
-rw-r--r--lib/ansible/modules/find.py467
-rw-r--r--lib/ansible/modules/gather_facts.py43
-rw-r--r--lib/ansible/modules/get_url.py650
-rw-r--r--lib/ansible/modules/getent.py157
-rw-r--r--lib/ansible/modules/git.py1277
-rw-r--r--lib/ansible/modules/group.py656
-rw-r--r--lib/ansible/modules/group_by.py58
-rw-r--r--lib/ansible/modules/hostname.py863
-rw-r--r--lib/ansible/modules/import_playbook.py58
-rw-r--r--lib/ansible/modules/import_role.py93
-rw-r--r--lib/ansible/modules/import_tasks.py60
-rw-r--r--lib/ansible/modules/include.py83
-rw-r--r--lib/ansible/modules/include_role.py125
-rw-r--r--lib/ansible/modules/include_tasks.py90
-rw-r--r--lib/ansible/modules/include_vars.py164
-rw-r--r--lib/ansible/modules/iptables.py794
-rw-r--r--lib/ansible/modules/known_hosts.py351
-rw-r--r--lib/ansible/modules/lineinfile.py577
-rw-r--r--lib/ansible/modules/meta.py98
-rw-r--r--lib/ansible/modules/package.py64
-rw-r--r--lib/ansible/modules/package_facts.py449
-rw-r--r--lib/ansible/modules/pause.py96
-rw-r--r--lib/ansible/modules/ping.py81
-rw-r--r--lib/ansible/modules/pip.py782
-rw-r--r--lib/ansible/modules/raw.py76
-rw-r--r--lib/ansible/modules/reboot.py105
-rw-r--r--lib/ansible/modules/replace.py298
-rw-r--r--lib/ansible/modules/rpm_key.py240
-rw-r--r--lib/ansible/modules/script.py87
-rw-r--r--lib/ansible/modules/service.py1671
-rw-r--r--lib/ansible/modules/service_facts.py256
-rw-r--r--lib/ansible/modules/set_fact.py80
-rw-r--r--lib/ansible/modules/set_stats.py62
-rw-r--r--lib/ansible/modules/setup.py182
-rw-r--r--lib/ansible/modules/shell.py208
-rw-r--r--lib/ansible/modules/slurp.py87
-rw-r--r--lib/ansible/modules/stat.py540
-rw-r--r--lib/ansible/modules/subversion.py342
-rw-r--r--lib/ansible/modules/systemd.py561
-rw-r--r--lib/ansible/modules/sysvinit.py349
-rw-r--r--lib/ansible/modules/tempfile.py117
-rw-r--r--lib/ansible/modules/template.py90
-rw-r--r--lib/ansible/modules/unarchive.py910
-rw-r--r--lib/ansible/modules/uri.py774
-rw-r--r--lib/ansible/modules/user.py3062
-rw-r--r--lib/ansible/modules/wait_for.py669
-rw-r--r--lib/ansible/modules/wait_for_connection.py107
-rw-r--r--lib/ansible/modules/yum.py1698
-rw-r--r--lib/ansible/modules/yum_repository.py679
-rw-r--r--lib/ansible/parsing/__init__.py20
-rw-r--r--lib/ansible/parsing/ajson.py42
-rw-r--r--lib/ansible/parsing/dataloader.py454
-rw-r--r--lib/ansible/parsing/mod_args.py347
-rw-r--r--lib/ansible/parsing/plugin_docs.py113
-rw-r--r--lib/ansible/parsing/quoting.py31
-rw-r--r--lib/ansible/parsing/splitter.py287
-rw-r--r--lib/ansible/parsing/utils/__init__.py20
-rw-r--r--lib/ansible/parsing/utils/addresses.py216
-rw-r--r--lib/ansible/parsing/utils/jsonify.py38
-rw-r--r--lib/ansible/parsing/utils/yaml.py83
-rw-r--r--lib/ansible/parsing/vault/__init__.py1380
-rw-r--r--lib/ansible/parsing/yaml/__init__.py20
-rw-r--r--lib/ansible/parsing/yaml/constructor.py169
-rw-r--r--lib/ansible/parsing/yaml/dumper.py92
-rw-r--r--lib/ansible/parsing/yaml/loader.py52
-rw-r--r--lib/ansible/parsing/yaml/objects.py379
-rw-r--r--lib/ansible/playbook/__init__.py119
-rw-r--r--lib/ansible/playbook/attribute.py119
-rw-r--r--lib/ansible/playbook/base.py630
-rw-r--r--lib/ansible/playbook/block.py424
-rw-r--r--lib/ansible/playbook/collectionsearch.py61
-rw-r--r--lib/ansible/playbook/conditional.py224
-rw-r--r--lib/ansible/playbook/handler.py59
-rw-r--r--lib/ansible/playbook/handler_task_include.py39
-rw-r--r--lib/ansible/playbook/helpers.py396
-rw-r--r--lib/ansible/playbook/included_file.py211
-rw-r--r--lib/ansible/playbook/loop_control.py40
-rw-r--r--lib/ansible/playbook/play.py343
-rw-r--r--lib/ansible/playbook/play_context.py407
-rw-r--r--lib/ansible/playbook/playbook_include.py161
-rw-r--r--lib/ansible/playbook/role/__init__.py528
-rw-r--r--lib/ansible/playbook/role/definition.py240
-rw-r--r--lib/ansible/playbook/role/include.py60
-rw-r--r--lib/ansible/playbook/role/metadata.py128
-rw-r--r--lib/ansible/playbook/role/requirement.py130
-rw-r--r--lib/ansible/playbook/role_include.py176
-rw-r--r--lib/ansible/playbook/taggable.py89
-rw-r--r--lib/ansible/playbook/task.py543
-rw-r--r--lib/ansible/playbook/task_include.py156
-rw-r--r--lib/ansible/plugins/__init__.py89
-rw-r--r--lib/ansible/plugins/action/__init__.py1232
-rw-r--r--lib/ansible/plugins/action/add_host.py97
-rw-r--r--lib/ansible/plugins/action/assemble.py166
-rw-r--r--lib/ansible/plugins/action/assert.py94
-rw-r--r--lib/ansible/plugins/action/async_status.py46
-rw-r--r--lib/ansible/plugins/action/command.py32
-rw-r--r--lib/ansible/plugins/action/copy.py599
-rw-r--r--lib/ansible/plugins/action/debug.py80
-rw-r--r--lib/ansible/plugins/action/fail.py43
-rw-r--r--lib/ansible/plugins/action/fetch.py199
-rw-r--r--lib/ansible/plugins/action/gather_facts.py138
-rw-r--r--lib/ansible/plugins/action/group_by.py51
-rw-r--r--lib/ansible/plugins/action/include_vars.py278
-rw-r--r--lib/ansible/plugins/action/normal.py59
-rw-r--r--lib/ansible/plugins/action/package.py94
-rw-r--r--lib/ansible/plugins/action/pause.py301
-rw-r--r--lib/ansible/plugins/action/raw.py50
-rw-r--r--lib/ansible/plugins/action/reboot.py446
-rw-r--r--lib/ansible/plugins/action/script.py152
-rw-r--r--lib/ansible/plugins/action/service.py101
-rw-r--r--lib/ansible/plugins/action/set_fact.py61
-rw-r--r--lib/ansible/plugins/action/set_stats.py77
-rw-r--r--lib/ansible/plugins/action/shell.py27
-rw-r--r--lib/ansible/plugins/action/template.py188
-rw-r--r--lib/ansible/plugins/action/unarchive.py111
-rw-r--r--lib/ansible/plugins/action/uri.py95
-rw-r--r--lib/ansible/plugins/action/wait_for_connection.py120
-rw-r--r--lib/ansible/plugins/action/yum.py103
-rw-r--r--lib/ansible/plugins/become/__init__.py107
-rw-r--r--lib/ansible/plugins/become/runas.py70
-rw-r--r--lib/ansible/plugins/become/su.py158
-rw-r--r--lib/ansible/plugins/become/sudo.py104
-rw-r--r--lib/ansible/plugins/cache/__init__.py376
-rw-r--r--lib/ansible/plugins/cache/base.py21
-rw-r--r--lib/ansible/plugins/cache/jsonfile.py63
-rw-r--r--lib/ansible/plugins/cache/memory.py53
-rw-r--r--lib/ansible/plugins/callback/__init__.py441
-rw-r--r--lib/ansible/plugins/callback/default.py426
-rw-r--r--lib/ansible/plugins/callback/junit.py382
-rw-r--r--lib/ansible/plugins/callback/minimal.py78
-rw-r--r--lib/ansible/plugins/callback/oneline.py77
-rw-r--r--lib/ansible/plugins/callback/tree.py69
-rw-r--r--lib/ansible/plugins/cliconf/__init__.py477
-rw-r--r--lib/ansible/plugins/connection/__init__.py383
-rw-r--r--lib/ansible/plugins/connection/local.py162
-rw-r--r--lib/ansible/plugins/connection/paramiko_ssh.py607
-rw-r--r--lib/ansible/plugins/connection/psrp.py954
-rw-r--r--lib/ansible/plugins/connection/ssh.py1285
-rw-r--r--lib/ansible/plugins/connection/winrm.py712
-rw-r--r--lib/ansible/plugins/doc_fragments/__init__.py0
-rw-r--r--lib/ansible/plugins/doc_fragments/backup.py20
-rw-r--r--lib/ansible/plugins/doc_fragments/constructed.py32
-rw-r--r--lib/ansible/plugins/doc_fragments/decrypt.py20
-rw-r--r--lib/ansible/plugins/doc_fragments/default_callback.py85
-rw-r--r--lib/ansible/plugins/doc_fragments/files.py80
-rw-r--r--lib/ansible/plugins/doc_fragments/inventory_cache.py74
-rw-r--r--lib/ansible/plugins/doc_fragments/return_common.py42
-rw-r--r--lib/ansible/plugins/doc_fragments/shell_common.py76
-rw-r--r--lib/ansible/plugins/doc_fragments/shell_windows.py49
-rw-r--r--lib/ansible/plugins/doc_fragments/template_common.py114
-rw-r--r--lib/ansible/plugins/doc_fragments/url.py66
-rw-r--r--lib/ansible/plugins/doc_fragments/url_windows.py150
-rw-r--r--lib/ansible/plugins/doc_fragments/validate.py19
-rw-r--r--lib/ansible/plugins/doc_fragments/vars_plugin_staging.py24
-rw-r--r--lib/ansible/plugins/filter/__init__.py3
-rw-r--r--lib/ansible/plugins/filter/core.py663
-rw-r--r--lib/ansible/plugins/filter/mathstuff.py267
-rw-r--r--lib/ansible/plugins/filter/urls.py69
-rw-r--r--lib/ansible/plugins/filter/urlsplit.py35
-rw-r--r--lib/ansible/plugins/httpapi/__init__.py87
-rw-r--r--lib/ansible/plugins/inventory/__init__.py449
-rw-r--r--lib/ansible/plugins/inventory/advanced_host_list.py63
-rw-r--r--lib/ansible/plugins/inventory/auto.py63
-rw-r--r--lib/ansible/plugins/inventory/constructed.py137
-rw-r--r--lib/ansible/plugins/inventory/generator.py136
-rw-r--r--lib/ansible/plugins/inventory/host_list.py66
-rw-r--r--lib/ansible/plugins/inventory/ini.py394
-rw-r--r--lib/ansible/plugins/inventory/script.py215
-rw-r--r--lib/ansible/plugins/inventory/toml.py262
-rw-r--r--lib/ansible/plugins/inventory/yaml.py177
-rw-r--r--lib/ansible/plugins/loader.py1275
-rw-r--r--lib/ansible/plugins/lookup/__init__.py125
-rw-r--r--lib/ansible/plugins/lookup/config.py87
-rw-r--r--lib/ansible/plugins/lookup/csvfile.py166
-rw-r--r--lib/ansible/plugins/lookup/dict.py76
-rw-r--r--lib/ansible/plugins/lookup/env.py60
-rw-r--r--lib/ansible/plugins/lookup/file.py86
-rw-r--r--lib/ansible/plugins/lookup/fileglob.py82
-rw-r--r--lib/ansible/plugins/lookup/first_found.py176
-rw-r--r--lib/ansible/plugins/lookup/indexed_items.py52
-rw-r--r--lib/ansible/plugins/lookup/ini.py165
-rw-r--r--lib/ansible/plugins/lookup/inventory_hostnames.py74
-rw-r--r--lib/ansible/plugins/lookup/items.py73
-rw-r--r--lib/ansible/plugins/lookup/lines.py62
-rw-r--r--lib/ansible/plugins/lookup/list.py44
-rw-r--r--lib/ansible/plugins/lookup/nested.py85
-rw-r--r--lib/ansible/plugins/lookup/password.py343
-rw-r--r--lib/ansible/plugins/lookup/pipe.py76
-rw-r--r--lib/ansible/plugins/lookup/random_choice.py53
-rw-r--r--lib/ansible/plugins/lookup/sequence.py268
-rw-r--r--lib/ansible/plugins/lookup/subelements.py169
-rw-r--r--lib/ansible/plugins/lookup/template.py114
-rw-r--r--lib/ansible/plugins/lookup/together.py67
-rw-r--r--lib/ansible/plugins/lookup/unvault.py63
-rw-r--r--lib/ansible/plugins/lookup/url.py221
-rw-r--r--lib/ansible/plugins/lookup/varnames.py80
-rw-r--r--lib/ansible/plugins/lookup/vars.py106
-rw-r--r--lib/ansible/plugins/netconf/__init__.py373
-rw-r--r--lib/ansible/plugins/shell/__init__.py227
-rw-r--r--lib/ansible/plugins/shell/cmd.py58
-rw-r--r--lib/ansible/plugins/shell/powershell.py288
-rw-r--r--lib/ansible/plugins/shell/sh.py79
-rw-r--r--lib/ansible/plugins/strategy/__init__.py1384
-rw-r--r--lib/ansible/plugins/strategy/debug.py37
-rw-r--r--lib/ansible/plugins/strategy/free.py284
-rw-r--r--lib/ansible/plugins/strategy/host_pinned.py45
-rw-r--r--lib/ansible/plugins/strategy/linear.py461
-rw-r--r--lib/ansible/plugins/terminal/__init__.py134
-rw-r--r--lib/ansible/plugins/test/__init__.py3
-rw-r--r--lib/ansible/plugins/test/core.py250
-rw-r--r--lib/ansible/plugins/test/files.py48
-rw-r--r--lib/ansible/plugins/test/mathstuff.py62
-rw-r--r--lib/ansible/plugins/vars/__init__.py41
-rw-r--r--lib/ansible/plugins/vars/host_group_vars.py115
-rw-r--r--lib/ansible/release.py24
-rw-r--r--lib/ansible/template/__init__.py1096
-rw-r--r--lib/ansible/template/native_helpers.py91
-rw-r--r--lib/ansible/template/safe_eval.py166
-rw-r--r--lib/ansible/template/template.py43
-rw-r--r--lib/ansible/template/vars.py130
-rw-r--r--lib/ansible/utils/__init__.py20
-rw-r--r--lib/ansible/utils/cmd_functions.py82
-rw-r--r--lib/ansible/utils/collection_loader/__init__.py23
-rw-r--r--lib/ansible/utils/collection_loader/_collection_config.py101
-rw-r--r--lib/ansible/utils/collection_loader/_collection_finder.py979
-rw-r--r--lib/ansible/utils/collection_loader/_collection_meta.py33
-rw-r--r--lib/ansible/utils/color.py127
-rw-r--r--lib/ansible/utils/context_objects.py92
-rw-r--r--lib/ansible/utils/display.py438
-rw-r--r--lib/ansible/utils/encrypt.py197
-rw-r--r--lib/ansible/utils/fqcn.py33
-rw-r--r--lib/ansible/utils/galaxy.py94
-rw-r--r--lib/ansible/utils/hashing.py98
-rw-r--r--lib/ansible/utils/helpers.py51
-rw-r--r--lib/ansible/utils/jsonrpc.py113
-rw-r--r--lib/ansible/utils/listify.py40
-rw-r--r--lib/ansible/utils/multiprocessing.py21
-rw-r--r--lib/ansible/utils/path.py157
-rw-r--r--lib/ansible/utils/plugin_docs.py258
-rw-r--r--lib/ansible/utils/py3compat.py69
-rw-r--r--lib/ansible/utils/sentinel.py68
-rw-r--r--lib/ansible/utils/shlex.py34
-rw-r--r--lib/ansible/utils/singleton.py29
-rw-r--r--lib/ansible/utils/ssh_functions.py65
-rw-r--r--lib/ansible/utils/unicode.py33
-rw-r--r--lib/ansible/utils/unsafe_proxy.py139
-rw-r--r--lib/ansible/utils/vars.py295
-rw-r--r--lib/ansible/utils/version.py272
-rw-r--r--lib/ansible/vars/__init__.py0
-rw-r--r--lib/ansible/vars/clean.py176
-rw-r--r--lib/ansible/vars/fact_cache.py111
-rw-r--r--lib/ansible/vars/hostvars.py154
-rw-r--r--lib/ansible/vars/manager.py719
-rw-r--r--lib/ansible/vars/plugins.py95
-rw-r--r--lib/ansible/vars/reserved.py81
-rw-r--r--licenses/Apache-License.txt202
-rw-r--r--licenses/MIT-license.txt14
-rw-r--r--licenses/PSF-license.txt48
-rw-r--r--licenses/simplified_bsd.txt8
-rw-r--r--packaging/arch/README.md8
-rw-r--r--packaging/debian/Dockerfile20
-rw-r--r--packaging/debian/README.md39
-rw-r--r--packaging/debian/ansible-base.dirs4
-rw-r--r--packaging/debian/ansible-base.install15
-rw-r--r--packaging/debian/ansible-test.install2
-rw-r--r--packaging/debian/changelog5
-rw-r--r--packaging/debian/compat1
-rw-r--r--packaging/debian/control29
-rw-r--r--packaging/debian/copyright26
-rw-r--r--packaging/debian/docs1
-rw-r--r--packaging/debian/pycompat1
-rwxr-xr-xpackaging/debian/rules17
-rw-r--r--packaging/gentoo/README.md3
-rw-r--r--packaging/macports/.gitignore2
-rw-r--r--packaging/macports/README.md39
-rw-r--r--packaging/macports/sysutils/ansible/Portfile67
-rw-r--r--packaging/release/Makefile61
-rw-r--r--packaging/release/tests/__init__.py0
-rw-r--r--packaging/release/tests/version_helper_test.py47
-rw-r--r--packaging/release/versionhelper/__init__.py0
-rw-r--r--packaging/release/versionhelper/version_helper.py195
-rwxr-xr-xpackaging/sdist/check-link-behavior.py51
-rw-r--r--requirements.txt9
-rw-r--r--setup.py428
-rw-r--r--test/ansible_test/Makefile13
-rw-r--r--test/ansible_test/unit/test_diff.py105
-rw-r--r--test/ansible_test/validate-modules-unit/test_validate_modules_regex.py43
-rw-r--r--test/integration/network-integration.cfg14
-rw-r--r--test/integration/network-integration.requirements.txt1
-rw-r--r--test/integration/targets/add_host/aliases1
-rw-r--r--test/integration/targets/add_host/tasks/main.yml159
-rw-r--r--test/integration/targets/ansiballz_python/aliases2
-rw-r--r--test/integration/targets/ansiballz_python/library/check_rlimit_and_maxfd.py31
-rw-r--r--test/integration/targets/ansiballz_python/library/custom_module.py19
-rw-r--r--test/integration/targets/ansiballz_python/library/sys_check.py23
-rw-r--r--test/integration/targets/ansiballz_python/module_utils/custom_util.py6
-rw-r--r--test/integration/targets/ansiballz_python/tasks/main.yml68
-rw-r--r--test/integration/targets/ansible-doc/aliases1
-rw-r--r--test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/MANIFEST.json30
-rw-r--r--test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/cache/notjsonfile.py49
-rw-r--r--test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/inventory/statichost.py35
-rw-r--r--test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/lookup/noop.py37
-rw-r--r--test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/modules/fakemodule.py26
-rw-r--r--test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/modules/notrealmodule.py13
-rw-r--r--test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/vars/noop_vars_plugin.py27
-rw-r--r--test/integration/targets/ansible-doc/fakemodule.output15
-rw-r--r--test/integration/targets/ansible-doc/inventory1
-rw-r--r--test/integration/targets/ansible-doc/library/test_docs.py39
-rw-r--r--test/integration/targets/ansible-doc/library/test_docs_missing_description.py40
-rw-r--r--test/integration/targets/ansible-doc/library/test_docs_no_metadata.py35
-rw-r--r--test/integration/targets/ansible-doc/library/test_docs_no_status.py38
-rw-r--r--test/integration/targets/ansible-doc/library/test_docs_non_iterable_status.py39
-rw-r--r--test/integration/targets/ansible-doc/library/test_docs_removed_precedence.py40
-rw-r--r--test/integration/targets/ansible-doc/library/test_docs_removed_status.py39
-rw-r--r--test/integration/targets/ansible-doc/library/test_docs_returns.py56
-rw-r--r--test/integration/targets/ansible-doc/library/test_docs_returns_broken.py40
-rw-r--r--test/integration/targets/ansible-doc/library/test_docs_suboptions.py70
-rw-r--r--test/integration/targets/ansible-doc/library/test_empty.py0
-rw-r--r--test/integration/targets/ansible-doc/library/test_no_docs.py23
-rw-r--r--test/integration/targets/ansible-doc/library/test_no_docs_no_metadata.py18
-rw-r--r--test/integration/targets/ansible-doc/library/test_no_docs_no_status.py22
-rw-r--r--test/integration/targets/ansible-doc/library/test_no_docs_non_iterable_status.py23
-rwxr-xr-xtest/integration/targets/ansible-doc/runme.sh42
-rw-r--r--test/integration/targets/ansible-doc/test.yml138
-rw-r--r--test/integration/targets/ansible-doc/test_docs_returns.output37
-rw-r--r--test/integration/targets/ansible-doc/test_docs_suboptions.output43
-rw-r--r--test/integration/targets/ansible-galaxy-collection-scm/aliases3
-rw-r--r--test/integration/targets/ansible-galaxy-collection-scm/meta/main.yml3
-rw-r--r--test/integration/targets/ansible-galaxy-collection-scm/tasks/download.yml47
-rw-r--r--test/integration/targets/ansible-galaxy-collection-scm/tasks/empty_installed_collections.yml7
-rw-r--r--test/integration/targets/ansible-galaxy-collection-scm/tasks/individual_collection_repo.yml20
-rw-r--r--test/integration/targets/ansible-galaxy-collection-scm/tasks/main.yml41
-rw-r--r--test/integration/targets/ansible-galaxy-collection-scm/tasks/multi_collection_repo_all.yml14
-rw-r--r--test/integration/targets/ansible-galaxy-collection-scm/tasks/multi_collection_repo_individual.yml15
-rw-r--r--test/integration/targets/ansible-galaxy-collection-scm/tasks/reinstalling.yml31
-rw-r--r--test/integration/targets/ansible-galaxy-collection-scm/tasks/scm_dependency.yml14
-rw-r--r--test/integration/targets/ansible-galaxy-collection-scm/tasks/scm_dependency_deduplication.yml54
-rw-r--r--test/integration/targets/ansible-galaxy-collection-scm/tasks/setup.yml19
-rw-r--r--test/integration/targets/ansible-galaxy-collection-scm/tasks/setup_multi_collection_repo.yml27
-rw-r--r--test/integration/targets/ansible-galaxy-collection-scm/tasks/setup_recursive_scm_dependency.yml33
-rw-r--r--test/integration/targets/ansible-galaxy-collection/aliases3
-rw-r--r--test/integration/targets/ansible-galaxy-collection/files/build_bad_tar.py84
-rw-r--r--test/integration/targets/ansible-galaxy-collection/library/setup_collections.py169
-rw-r--r--test/integration/targets/ansible-galaxy-collection/meta/main.yml3
-rw-r--r--test/integration/targets/ansible-galaxy-collection/tasks/build.yml53
-rw-r--r--test/integration/targets/ansible-galaxy-collection/tasks/download.yml142
-rw-r--r--test/integration/targets/ansible-galaxy-collection/tasks/init.yml44
-rw-r--r--test/integration/targets/ansible-galaxy-collection/tasks/install.yml330
-rw-r--r--test/integration/targets/ansible-galaxy-collection/tasks/main.yml175
-rw-r--r--test/integration/targets/ansible-galaxy-collection/tasks/publish.yml46
-rw-r--r--test/integration/targets/ansible-galaxy-collection/templates/ansible.cfg.j210
-rw-r--r--test/integration/targets/ansible-galaxy-collection/vars/main.yml1
-rw-r--r--test/integration/targets/ansible-galaxy/aliases4
-rw-r--r--test/integration/targets/ansible-galaxy/cleanup-default.yml5
-rw-r--r--test/integration/targets/ansible-galaxy/cleanup-freebsd.yml6
-rw-r--r--test/integration/targets/ansible-galaxy/cleanup.yml19
-rwxr-xr-xtest/integration/targets/ansible-galaxy/runme.sh425
-rw-r--r--test/integration/targets/ansible-galaxy/setup.yml11
-rw-r--r--test/integration/targets/ansible-runner/aliases6
-rw-r--r--test/integration/targets/ansible-runner/files/adhoc_example1.py26
-rw-r--r--test/integration/targets/ansible-runner/files/constraints.txt5
-rw-r--r--test/integration/targets/ansible-runner/files/playbook_example1.py38
-rw-r--r--test/integration/targets/ansible-runner/filter_plugins/parse.py17
-rw-r--r--test/integration/targets/ansible-runner/inventory1
-rwxr-xr-xtest/integration/targets/ansible-runner/runme.sh5
-rw-r--r--test/integration/targets/ansible-runner/tasks/adhoc_example1.yml16
-rw-r--r--test/integration/targets/ansible-runner/tasks/main.yml5
-rw-r--r--test/integration/targets/ansible-runner/tasks/playbook_example1.yml16
-rw-r--r--test/integration/targets/ansible-runner/tasks/setup.yml19
-rw-r--r--test/integration/targets/ansible-runner/test.yml3
-rw-r--r--test/integration/targets/ansible-test-docker/aliases1
-rw-r--r--test/integration/targets/ansible-test-docker/ansible_collections/ns/col/galaxy.yml6
-rw-r--r--test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/doc_fragments/ps_util.py21
-rw-r--r--test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/module_utils/PSUtil.psm116
-rw-r--r--test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/module_utils/my_util.py6
-rw-r--r--test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/modules/hello.py46
-rw-r--r--test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/modules/win_util_args.ps116
-rw-r--r--test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/modules/win_util_args.py39
-rw-r--r--test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/integration/targets/minimal/tasks/main.yml7
-rw-r--r--test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/unit/plugins/module_utils/test_my_util.py8
-rw-r--r--test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/unit/plugins/modules/test_hello.py8
-rwxr-xr-xtest/integration/targets/ansible-test-docker/collection-tests/docker.sh18
-rwxr-xr-xtest/integration/targets/ansible-test-docker/runme.sh24
-rw-r--r--test/integration/targets/ansible-test/aliases2
-rw-r--r--test/integration/targets/ansible-test/ansible_collections/ns/col/README.rst3
-rw-r--r--test/integration/targets/ansible-test/ansible_collections/ns/col/galaxy.yml6
-rw-r--r--test/integration/targets/ansible-test/ansible_collections/ns/col/meta/runtime.yml4
-rw-r--r--test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/module_utils/__init__.py0
-rw-r--r--test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/module_utils/my_util.py6
-rw-r--r--test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/modules/hello.py46
-rw-r--r--test/integration/targets/ansible-test/ansible_collections/ns/col/tests/integration/targets/hello/tasks/main.yml7
-rw-r--r--test/integration/targets/ansible-test/ansible_collections/ns/col/tests/sanity/ignore.txt0
-rw-r--r--test/integration/targets/ansible-test/ansible_collections/ns/col/tests/unit/plugins/module_utils/test_my_util.py8
-rw-r--r--test/integration/targets/ansible-test/ansible_collections/ns/col/tests/unit/plugins/modules/test_hello.py8
-rw-r--r--test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/integration/constraints.txt1
-rw-r--r--test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/integration/requirements.txt1
-rw-r--r--test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/integration/targets/constraints/tasks/main.yml7
-rw-r--r--test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/unit/constraints.txt1
-rw-r--r--test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/unit/plugins/modules/test_constraints.py8
-rw-r--r--test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/unit/requirements.txt1
-rwxr-xr-xtest/integration/targets/ansible-test/collection-tests/constraints.sh20
-rwxr-xr-xtest/integration/targets/ansible-test/collection-tests/coverage.sh22
-rwxr-xr-xtest/integration/targets/ansible-test/collection-tests/git-at-collection-base.sh10
-rwxr-xr-xtest/integration/targets/ansible-test/collection-tests/git-at-collection-root.sh10
-rwxr-xr-xtest/integration/targets/ansible-test/collection-tests/git-common.bash43
-rw-r--r--test/integration/targets/ansible-test/collection-tests/install-git.yml5
-rwxr-xr-xtest/integration/targets/ansible-test/collection-tests/venv.sh39
-rwxr-xr-xtest/integration/targets/ansible-test/runme.sh24
-rw-r--r--test/integration/targets/ansible/adhoc-callback.stdout12
-rw-r--r--test/integration/targets/ansible/aliases2
-rw-r--r--test/integration/targets/ansible/ansible-testé.cfg2
-rw-r--r--test/integration/targets/ansible/callback_plugins/callback_debug.py24
-rw-r--r--test/integration/targets/ansible/no-extension2
-rw-r--r--test/integration/targets/ansible/playbook.yml5
-rw-r--r--test/integration/targets/ansible/playbookdir_cfg.ini2
-rwxr-xr-xtest/integration/targets/ansible/runme.sh64
-rw-r--r--test/integration/targets/ansible/vars.yml1
-rw-r--r--test/integration/targets/any_errors_fatal/18602.yml21
-rw-r--r--test/integration/targets/any_errors_fatal/aliases1
-rw-r--r--test/integration/targets/any_errors_fatal/always_block.yml27
-rw-r--r--test/integration/targets/any_errors_fatal/inventory6
-rw-r--r--test/integration/targets/any_errors_fatal/on_includes.yml7
-rw-r--r--test/integration/targets/any_errors_fatal/play_level.yml15
-rwxr-xr-xtest/integration/targets/any_errors_fatal/runme.sh23
-rw-r--r--test/integration/targets/any_errors_fatal/test_fatal.yml12
-rw-r--r--test/integration/targets/apt/aliases7
-rw-r--r--test/integration/targets/apt/defaults/main.yml1
-rw-r--r--test/integration/targets/apt/meta/main.yml3
-rw-r--r--test/integration/targets/apt/tasks/apt-builddep.yml55
-rw-r--r--test/integration/targets/apt/tasks/apt-multiarch.yml34
-rw-r--r--test/integration/targets/apt/tasks/apt.yml413
-rw-r--r--test/integration/targets/apt/tasks/main.yml40
-rw-r--r--test/integration/targets/apt/tasks/repo.yml253
-rw-r--r--test/integration/targets/apt/tasks/upgrade.yml64
-rw-r--r--test/integration/targets/apt/tasks/url-with-deps.yml56
-rw-r--r--test/integration/targets/apt_key/aliases6
-rw-r--r--test/integration/targets/apt_key/meta/main.yml2
-rw-r--r--test/integration/targets/apt_key/tasks/apt_key.yml19
-rw-r--r--test/integration/targets/apt_key/tasks/main.yml28
-rw-r--r--test/integration/targets/apt_repository/aliases7
-rw-r--r--test/integration/targets/apt_repository/meta/main.yml2
-rw-r--r--test/integration/targets/apt_repository/tasks/apt.yml243
-rw-r--r--test/integration/targets/apt_repository/tasks/cleanup.yml17
-rw-r--r--test/integration/targets/apt_repository/tasks/main.yml25
-rw-r--r--test/integration/targets/apt_repository/tasks/mode.yaml130
-rw-r--r--test/integration/targets/apt_repository/tasks/mode_cleanup.yaml7
-rw-r--r--test/integration/targets/args/aliases1
-rwxr-xr-xtest/integration/targets/args/runme.sh12
-rw-r--r--test/integration/targets/argspec/aliases1
-rw-r--r--test/integration/targets/argspec/library/argspec.py153
-rw-r--r--test/integration/targets/argspec/tasks/main.yml419
-rw-r--r--test/integration/targets/argspec/tasks/password_no_log.yml14
-rw-r--r--test/integration/targets/assemble/aliases1
-rw-r--r--test/integration/targets/assemble/files/fragment11
-rw-r--r--test/integration/targets/assemble/files/fragment21
-rw-r--r--test/integration/targets/assemble/files/fragment31
-rw-r--r--test/integration/targets/assemble/files/fragment41
-rw-r--r--test/integration/targets/assemble/files/fragment51
-rw-r--r--test/integration/targets/assemble/meta/main.yml20
-rw-r--r--test/integration/targets/assemble/tasks/main.yml163
-rw-r--r--test/integration/targets/assert/aliases2
-rw-r--r--test/integration/targets/assert/assert_quiet.out.quiet.stderr2
-rw-r--r--test/integration/targets/assert/assert_quiet.out.quiet.stdout17
-rw-r--r--test/integration/targets/assert/inventory3
-rw-r--r--test/integration/targets/assert/quiet.yml16
-rwxr-xr-xtest/integration/targets/assert/runme.sh71
-rw-r--r--test/integration/targets/async/aliases4
-rw-r--r--test/integration/targets/async/library/async_test.py49
-rw-r--r--test/integration/targets/async/meta/main.yml2
-rw-r--r--test/integration/targets/async/tasks/main.yml300
-rw-r--r--test/integration/targets/async_extra_data/aliases1
-rw-r--r--test/integration/targets/async_extra_data/library/junkping.py15
-rwxr-xr-xtest/integration/targets/async_extra_data/runme.sh7
-rw-r--r--test/integration/targets/async_extra_data/test_async.yml10
-rw-r--r--test/integration/targets/async_fail/action_plugins/normal.py62
-rw-r--r--test/integration/targets/async_fail/aliases3
-rw-r--r--test/integration/targets/async_fail/library/async_test.py50
-rw-r--r--test/integration/targets/async_fail/meta/main.yml2
-rw-r--r--test/integration/targets/async_fail/tasks/main.yml36
-rw-r--r--test/integration/targets/become/aliases3
-rw-r--r--test/integration/targets/become/files/baz.txt1
-rw-r--r--test/integration/targets/become/tasks/default.yml82
-rw-r--r--test/integration/targets/become/tasks/main.yml5
-rw-r--r--test/integration/targets/become/tasks/su.yml91
-rw-r--r--test/integration/targets/become/tasks/sudo.yml91
-rw-r--r--test/integration/targets/become/templates/bar.j21
-rw-r--r--test/integration/targets/become/vars/default.yml1
-rw-r--r--test/integration/targets/binary/aliases1
-rw-r--r--test/integration/targets/binary/files/b64_latin11
-rw-r--r--test/integration/targets/binary/files/b64_utf81
-rw-r--r--test/integration/targets/binary/files/from_playbook1
-rw-r--r--test/integration/targets/binary/meta/main.yml2
-rw-r--r--test/integration/targets/binary/tasks/main.yml131
-rw-r--r--test/integration/targets/binary/templates/b64_latin1_template.j21
-rw-r--r--test/integration/targets/binary/templates/b64_utf8_template.j21
-rw-r--r--test/integration/targets/binary/templates/from_playbook_template.j21
-rw-r--r--test/integration/targets/binary/vars/main.yml3
-rw-r--r--test/integration/targets/binary_modules/Makefile16
-rw-r--r--test/integration/targets/binary_modules/aliases1
-rw-r--r--test/integration/targets/binary_modules/download_binary_modules.yml9
-rw-r--r--test/integration/targets/binary_modules/group_vars/all3
-rw-r--r--test/integration/targets/binary_modules/library/.gitignore1
-rw-r--r--test/integration/targets/binary_modules/library/helloworld.go89
-rw-r--r--test/integration/targets/binary_modules/roles/test_binary_modules/tasks/main.yml53
-rwxr-xr-xtest/integration/targets/binary_modules/test.sh8
-rw-r--r--test/integration/targets/binary_modules/test_binary_modules.yml5
-rw-r--r--test/integration/targets/binary_modules_posix/aliases2
-rwxr-xr-xtest/integration/targets/binary_modules_posix/runme.sh6
-rw-r--r--test/integration/targets/binary_modules_winrm/aliases4
-rwxr-xr-xtest/integration/targets/binary_modules_winrm/runme.sh6
-rw-r--r--test/integration/targets/blockinfile/aliases1
-rw-r--r--test/integration/targets/blockinfile/files/sshd_config135
-rw-r--r--test/integration/targets/blockinfile/meta/main.yml2
-rw-r--r--test/integration/targets/blockinfile/tasks/add_block_to_existing_file.yml47
-rw-r--r--test/integration/targets/blockinfile/tasks/block_without_trailing_newline.yml30
-rw-r--r--test/integration/targets/blockinfile/tasks/create_file.yml32
-rw-r--r--test/integration/targets/blockinfile/tasks/diff.yml18
-rw-r--r--test/integration/targets/blockinfile/tasks/file_without_trailing_newline.yml36
-rw-r--r--test/integration/targets/blockinfile/tasks/insertafter.yml37
-rw-r--r--test/integration/targets/blockinfile/tasks/insertbefore.yml39
-rw-r--r--test/integration/targets/blockinfile/tasks/main.yml40
-rw-r--r--test/integration/targets/blockinfile/tasks/preserve_line_endings.yml24
-rw-r--r--test/integration/targets/blockinfile/tasks/validate.yml28
-rw-r--r--test/integration/targets/blocks/aliases1
-rw-r--r--test/integration/targets/blocks/always_failure_no_rescue_rc.yml13
-rw-r--r--test/integration/targets/blocks/always_failure_with_rescue_rc.yml16
-rw-r--r--test/integration/targets/blocks/always_no_rescue_rc.yml12
-rw-r--r--test/integration/targets/blocks/block_fail.yml5
-rw-r--r--test/integration/targets/blocks/block_fail_tasks.yml9
-rw-r--r--test/integration/targets/blocks/block_in_rescue.yml33
-rw-r--r--test/integration/targets/blocks/block_rescue_vars.yml16
-rw-r--r--test/integration/targets/blocks/fail.yml2
-rw-r--r--test/integration/targets/blocks/issue29047.yml4
-rw-r--r--test/integration/targets/blocks/issue29047_tasks.yml13
-rw-r--r--test/integration/targets/blocks/issue71306.yml16
-rw-r--r--test/integration/targets/blocks/main.yml128
-rw-r--r--test/integration/targets/blocks/nested_fail.yml3
-rw-r--r--test/integration/targets/blocks/nested_nested_fail.yml3
-rwxr-xr-xtest/integration/targets/blocks/runme.sh95
-rw-r--r--test/integration/targets/builtin_vars_prompt/aliases3
-rwxr-xr-xtest/integration/targets/builtin_vars_prompt/runme.sh6
-rw-r--r--test/integration/targets/builtin_vars_prompt/test-vars_prompt.py127
-rw-r--r--test/integration/targets/builtin_vars_prompt/unsafe.yml20
-rw-r--r--test/integration/targets/builtin_vars_prompt/unsupported.yml18
-rw-r--r--test/integration/targets/builtin_vars_prompt/vars_prompt-1.yml15
-rw-r--r--test/integration/targets/builtin_vars_prompt/vars_prompt-2.yml16
-rw-r--r--test/integration/targets/builtin_vars_prompt/vars_prompt-3.yml17
-rw-r--r--test/integration/targets/builtin_vars_prompt/vars_prompt-4.yml16
-rw-r--r--test/integration/targets/builtin_vars_prompt/vars_prompt-5.yml14
-rw-r--r--test/integration/targets/builtin_vars_prompt/vars_prompt-6.yml20
-rw-r--r--test/integration/targets/builtin_vars_prompt/vars_prompt-7.yml12
-rw-r--r--test/integration/targets/callback_default/aliases2
-rw-r--r--test/integration/targets/callback_default/callback_default.out.check_markers_dry.stderr2
-rw-r--r--test/integration/targets/callback_default/callback_default.out.check_markers_dry.stdout78
-rw-r--r--test/integration/targets/callback_default/callback_default.out.check_markers_wet.stderr2
-rw-r--r--test/integration/targets/callback_default/callback_default.out.check_markers_wet.stdout74
-rw-r--r--test/integration/targets/callback_default/callback_default.out.check_nomarkers_dry.stderr2
-rw-r--r--test/integration/targets/callback_default/callback_default.out.check_nomarkers_dry.stdout74
-rw-r--r--test/integration/targets/callback_default/callback_default.out.check_nomarkers_wet.stderr2
-rw-r--r--test/integration/targets/callback_default/callback_default.out.check_nomarkers_wet.stdout74
-rw-r--r--test/integration/targets/callback_default/callback_default.out.default.stderr2
-rw-r--r--test/integration/targets/callback_default/callback_default.out.default.stdout72
-rw-r--r--test/integration/targets/callback_default/callback_default.out.failed_to_stderr.stderr5
-rw-r--r--test/integration/targets/callback_default/callback_default.out.failed_to_stderr.stdout69
-rw-r--r--test/integration/targets/callback_default/callback_default.out.hide_ok.stderr2
-rw-r--r--test/integration/targets/callback_default/callback_default.out.hide_ok.stdout56
-rw-r--r--test/integration/targets/callback_default/callback_default.out.hide_skipped.stderr2
-rw-r--r--test/integration/targets/callback_default/callback_default.out.hide_skipped.stdout66
-rw-r--r--test/integration/targets/callback_default/callback_default.out.hide_skipped_ok.stderr2
-rw-r--r--test/integration/targets/callback_default/callback_default.out.hide_skipped_ok.stdout52
-rw-r--r--test/integration/targets/callback_default/include_me.yml2
-rw-r--r--test/integration/targets/callback_default/inventory5
-rwxr-xr-xtest/integration/targets/callback_default/runme.sh186
-rw-r--r--test/integration/targets/callback_default/test.yml88
-rw-r--r--test/integration/targets/callback_default/test_2.yml6
-rw-r--r--test/integration/targets/callback_default/test_dryrun.yml93
-rw-r--r--test/integration/targets/changed_when/aliases1
-rw-r--r--test/integration/targets/changed_when/meta/main.yml2
-rw-r--r--test/integration/targets/changed_when/tasks/main.yml61
-rw-r--r--test/integration/targets/check_mode/aliases1
-rw-r--r--test/integration/targets/check_mode/check_mode-not-on-cli.yml37
-rw-r--r--test/integration/targets/check_mode/check_mode-on-cli.yml36
-rw-r--r--test/integration/targets/check_mode/check_mode.yml7
-rw-r--r--test/integration/targets/check_mode/roles/test_always_run/meta/main.yml17
-rw-r--r--test/integration/targets/check_mode/roles/test_always_run/tasks/main.yml29
-rw-r--r--test/integration/targets/check_mode/roles/test_check_mode/files/foo.txt1
-rw-r--r--test/integration/targets/check_mode/roles/test_check_mode/tasks/main.yml50
-rw-r--r--test/integration/targets/check_mode/roles/test_check_mode/templates/foo.j21
-rw-r--r--test/integration/targets/check_mode/roles/test_check_mode/vars/main.yml1
-rwxr-xr-xtest/integration/targets/check_mode/runme.sh7
-rw-r--r--test/integration/targets/cli/aliases5
-rwxr-xr-xtest/integration/targets/cli/runme.sh7
-rw-r--r--test/integration/targets/cli/setup.yml42
-rw-r--r--test/integration/targets/cli/test-cli.py21
-rw-r--r--test/integration/targets/cli/test_k_and_K.py27
-rw-r--r--test/integration/targets/collections/a.statichost.yml3
-rw-r--r--test/integration/targets/collections/aliases4
-rw-r--r--test/integration/targets/collections/cache.statichost.yml7
-rw-r--r--test/integration/targets/collections/check_populated_inventory.yml11
-rw-r--r--test/integration/targets/collections/collection_root_sys/ansible_collections/testns/coll_in_sys/plugins/modules/systestmodule.py13
-rw-r--r--test/integration/targets/collections/collection_root_sys/ansible_collections/testns/testcoll/plugins/modules/maskedmodule.py13
-rw-r--r--test/integration/targets/collections/collection_root_sys/ansible_collections/testns/testcoll/plugins/modules/testmodule.py13
-rw-r--r--test/integration/targets/collections/collection_root_sys/ansible_collections/testns/testcoll/roles/maskedrole/tasks/main.yml2
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/ansible/builtin/plugins/modules/ping.py13
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/ansible/bullcoll/plugins/modules/bullmodule.py13
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/othercoll/plugins/module_utils/formerly_testcoll_pkg/__init__.py1
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/othercoll/plugins/module_utils/formerly_testcoll_pkg/submod.py1
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testbroken/plugins/filter/broken_filter.py13
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/meta/runtime.yml52
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/default_collection_playbook.yml49
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/roles/non_coll_role/library/embedded_module.py13
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/roles/non_coll_role/tasks/main.yml29
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/roles/non_coll_role_to_call/tasks/main.yml7
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/action_subdir/subdir_ping_action.py19
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/bypass_host_loop.py17
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/plugin_lookup.py33
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/subclassed_normal.py11
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/uses_redirected_import.py20
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/callback/usercallback.py27
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/connection/localconn.py41
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/doc_fragments/frag.py18
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/filter/filter_subdir/my_subdir_filters.py14
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/filter/myfilters.py14
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/filter/myfilters2.py14
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/lookup/lookup_subdir/my_subdir_lookup.py11
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/lookup/mylookup.py11
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/lookup/mylookup2.py12
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/AnotherCSMU.cs12
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/MyCSMU.cs19
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/MyPSMU.psm19
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/base.py12
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/leaf.py6
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/nested_same/__init__.py0
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/nested_same/nested_same/__init__.py0
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/nested_same/nested_same/nested_same.py6
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/secondary.py6
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg/__init__.py0
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg/subcs.cs13
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg/submod.py6
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg/subps.psm19
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg_with_init.py11
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg_with_init/__init__.py10
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg_with_init/mod_in_subpkg_with_init.py6
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/deprecated_ping.py13
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/module_subdir/subdir_ping_module.py14
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/ping.py13
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/testmodule.py21
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/testmodule_bad_docfrags.py25
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_base_mu_granular_nested_import.py19
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_collection_redirected_mu.py21
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_core_redirected_mu.py19
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_flat_import.bak3
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_flat_import.py19
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_flat_import.yml3
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_granular_import.py19
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_module_import_from.py31
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_mu_missing.py16
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_mu_missing_redirect_collection.py16
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_mu_missing_redirect_module.py16
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_nested_same_as_func.py19
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_nested_same_as_module.py19
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_csbasic_only.ps122
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_selfcontained.ps19
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_selfcontained.py1
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_uses_coll_csmu.ps126
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_uses_coll_psmu.ps125
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/test/mytests.py13
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/test/mytests2.py13
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/test/test_subdir/my_subdir_tests.py13
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/vars/custom_vars.py44
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/call_standalone/tasks/main.yml6
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/calls_intra_collection_dep_role_unqualified/meta/main.yml2
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/calls_intra_collection_dep_role_unqualified/tasks/main.yml7
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/common_handlers/handlers/main.yml6
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/role_subdir/subdir_testrole/tasks/main.yml10
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/test_fqcn_handlers/meta/main.yml2
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/test_fqcn_handlers/tasks/main.yml7
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole/meta/main.yml4
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole/tasks/main.yml39
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole_main_yaml/meta/main.yml4
-rw-r--r--test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole_main_yaml/tasks/main.yml33
-rw-r--r--test/integration/targets/collections/collections/ansible_collections/me/mycoll1/plugins/action/action1.py29
-rw-r--r--test/integration/targets/collections/collections/ansible_collections/me/mycoll1/plugins/modules/action1.py24
-rw-r--r--test/integration/targets/collections/collections/ansible_collections/me/mycoll2/plugins/modules/module1.py43
-rw-r--r--test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/cache/custom_jsonfile.py63
-rw-r--r--test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/inventory/statichost.py68
-rw-r--r--test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/module_utils/__init__.py0
-rw-r--r--test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/module_utils/sub1/__init__.py0
-rw-r--r--test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/module_utils/sub1/foomodule.py6
-rw-r--r--test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/modules/contentadjmodule.py13
-rw-r--r--test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/vars/custom_adj_vars.py45
-rw-r--r--test/integration/targets/collections/custom_vars_plugins/v1_vars_plugin.py37
-rw-r--r--test/integration/targets/collections/custom_vars_plugins/v2_vars_plugin.py45
-rw-r--r--test/integration/targets/collections/custom_vars_plugins/vars_req_whitelist.py46
-rw-r--r--test/integration/targets/collections/filter_plugins/override_formerly_core_masked_filter.py13
-rw-r--r--test/integration/targets/collections/includeme.yml6
-rw-r--r--test/integration/targets/collections/inventory_test.yml26
-rw-r--r--test/integration/targets/collections/invocation_tests.yml5
-rw-r--r--test/integration/targets/collections/library/ping.py13
-rw-r--r--test/integration/targets/collections/noop.yml4
-rw-r--r--test/integration/targets/collections/posix.yml443
-rw-r--r--test/integration/targets/collections/redirected.statichost.yml3
-rw-r--r--test/integration/targets/collections/roles/standalone/tasks/main.yml2
-rw-r--r--test/integration/targets/collections/roles/testrole/tasks/main.yml28
-rwxr-xr-xtest/integration/targets/collections/runme.sh111
-rw-r--r--test/integration/targets/collections/test_bypass_host_loop.yml22
-rw-r--r--test/integration/targets/collections/test_collection_meta.yml46
-rw-r--r--test/integration/targets/collections/test_plugins/override_formerly_core_masked_test.py16
-rw-r--r--test/integration/targets/collections/testcoll2/MANIFEST.json0
-rw-r--r--test/integration/targets/collections/testcoll2/plugins/modules/testmodule2.py33
-rwxr-xr-xtest/integration/targets/collections/vars_plugin_tests.sh91
-rw-r--r--test/integration/targets/collections/windows.yml28
-rw-r--r--test/integration/targets/collections_plugin_namespace/aliases1
-rw-r--r--test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/filter/test_filter.py15
-rw-r--r--test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/lookup/lookup_name.py9
-rw-r--r--test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/lookup/lookup_no_future_boilerplate.py10
-rw-r--r--test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/test/test_test.py13
-rw-r--r--test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/roles/test/tasks/main.yml12
-rwxr-xr-xtest/integration/targets/collections_plugin_namespace/runme.sh5
-rw-r--r--test/integration/targets/collections_plugin_namespace/test.yml3
-rw-r--r--test/integration/targets/collections_relative_imports/aliases4
-rw-r--r--test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/PSRel1.psm111
-rw-r--r--test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util1.py6
-rw-r--r--test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util2.py8
-rw-r--r--test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util3.py8
-rw-r--r--test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/sub_pkg/PSRel2.psm111
-rw-r--r--test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/modules/my_module.py24
-rw-r--r--test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/modules/win_relative.ps110
-rw-r--r--test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/roles/test/tasks/main.yml4
-rw-r--r--test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col2/plugins/module_utils/PSRel3.psm111
-rw-r--r--test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col2/plugins/module_utils/sub_pkg/CSRel4.cs14
-rwxr-xr-xtest/integration/targets/collections_relative_imports/runme.sh13
-rw-r--r--test/integration/targets/collections_relative_imports/test.yml3
-rw-r--r--test/integration/targets/collections_relative_imports/windows.yml11
-rw-r--r--test/integration/targets/collections_runtime_pythonpath/aliases3
-rw-r--r--test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-boo/ansible_collections/python/dist/plugins/modules/boo.py28
-rw-r--r--test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-boo/pyproject.toml6
-rw-r--r--test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-boo/setup.cfg15
-rw-r--r--test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-foo/ansible_collections/python/dist/plugins/modules/boo.py28
-rwxr-xr-xtest/integration/targets/collections_runtime_pythonpath/runme.sh60
-rw-r--r--test/integration/targets/command_shell/aliases4
-rwxr-xr-xtest/integration/targets/command_shell/files/create_afile.sh3
-rwxr-xr-xtest/integration/targets/command_shell/files/remove_afile.sh3
-rwxr-xr-xtest/integration/targets/command_shell/files/test.sh3
-rw-r--r--test/integration/targets/command_shell/meta/main.yml2
-rw-r--r--test/integration/targets/command_shell/tasks/main.yml446
-rw-r--r--test/integration/targets/common_network/aliases1
-rw-r--r--test/integration/targets/common_network/tasks/main.yml4
-rw-r--r--test/integration/targets/common_network/test_plugins/is_mac.py14
-rw-r--r--test/integration/targets/conditionals/aliases1
-rw-r--r--test/integration/targets/conditionals/play.yml551
-rwxr-xr-xtest/integration/targets/conditionals/runme.sh15
-rw-r--r--test/integration/targets/conditionals/test_no_warnings.yml18
-rw-r--r--test/integration/targets/conditionals/test_warnings.yml14
-rw-r--r--test/integration/targets/conditionals/vars/main.yml22
-rw-r--r--test/integration/targets/config/aliases1
-rwxr-xr-xtest/integration/targets/config/runme.sh17
-rw-r--r--test/integration/targets/connection/aliases1
-rwxr-xr-xtest/integration/targets/connection/test.sh23
-rw-r--r--test/integration/targets/connection/test_connection.yml43
-rw-r--r--test/integration/targets/connection_delegation/action_plugins/delegation_action.py12
-rw-r--r--test/integration/targets/connection_delegation/aliases5
-rw-r--r--test/integration/targets/connection_delegation/connection_plugins/delegation_connection.py45
-rw-r--r--test/integration/targets/connection_delegation/inventory.ini1
-rwxr-xr-xtest/integration/targets/connection_delegation/runme.sh9
-rw-r--r--test/integration/targets/connection_delegation/test.yml23
-rw-r--r--test/integration/targets/connection_local/aliases1
l---------test/integration/targets/connection_local/runme.sh1
-rw-r--r--test/integration/targets/connection_local/test_connection.inventory7
-rw-r--r--test/integration/targets/connection_paramiko_ssh/aliases5
-rwxr-xr-xtest/integration/targets/connection_paramiko_ssh/runme.sh7
l---------test/integration/targets/connection_paramiko_ssh/test.sh1
-rw-r--r--test/integration/targets/connection_paramiko_ssh/test_connection.inventory7
-rw-r--r--test/integration/targets/connection_posix/aliases2
-rwxr-xr-xtest/integration/targets/connection_posix/test.sh18
-rw-r--r--test/integration/targets/connection_psrp/aliases4
-rw-r--r--test/integration/targets/connection_psrp/files/empty.txt0
-rwxr-xr-xtest/integration/targets/connection_psrp/runme.sh24
-rw-r--r--test/integration/targets/connection_psrp/test_connection.inventory.j29
-rw-r--r--test/integration/targets/connection_psrp/tests.yml133
-rw-r--r--test/integration/targets/connection_ssh/aliases3
l---------test/integration/targets/connection_ssh/posix.sh1
-rwxr-xr-xtest/integration/targets/connection_ssh/runme.sh65
-rw-r--r--test/integration/targets/connection_ssh/test_connection.inventory7
-rw-r--r--test/integration/targets/connection_windows_ssh/aliases6
-rwxr-xr-xtest/integration/targets/connection_windows_ssh/runme.sh54
-rw-r--r--test/integration/targets/connection_windows_ssh/test_connection.inventory.j212
-rw-r--r--test/integration/targets/connection_windows_ssh/tests.yml32
-rw-r--r--test/integration/targets/connection_windows_ssh/tests_fetch.yml41
-rwxr-xr-xtest/integration/targets/connection_windows_ssh/windows.sh25
-rw-r--r--test/integration/targets/connection_winrm/aliases4
-rwxr-xr-xtest/integration/targets/connection_winrm/runme.sh18
-rw-r--r--test/integration/targets/connection_winrm/test_connection.inventory.j210
-rw-r--r--test/integration/targets/copy/aliases4
-rw-r--r--test/integration/targets/copy/defaults/main.yml2
-rw-r--r--test/integration/targets/copy/files/foo.txt1
-rw-r--r--test/integration/targets/copy/files/subdir/bar.txt1
l---------test/integration/targets/copy/files/subdir/subdir1/bar.txt1
-rw-r--r--test/integration/targets/copy/files/subdir/subdir2/baz.txt1
-rw-r--r--test/integration/targets/copy/files/subdir/subdir2/subdir3/subdir4/qux.txt1
-rw-r--r--test/integration/targets/copy/meta/main.yml3
-rw-r--r--test/integration/targets/copy/tasks/acls.yml33
-rw-r--r--test/integration/targets/copy/tasks/check_mode.yml126
-rw-r--r--test/integration/targets/copy/tasks/dest_in_non_existent_directories.yml29
-rw-r--r--test/integration/targets/copy/tasks/dest_in_non_existent_directories_remote_src.yml43
-rw-r--r--test/integration/targets/copy/tasks/main.yml117
-rw-r--r--test/integration/targets/copy/tasks/no_log.yml82
-rw-r--r--test/integration/targets/copy/tasks/src_file_dest_file_in_non_existent_dir.yml26
-rw-r--r--test/integration/targets/copy/tasks/src_file_dest_file_in_non_existent_dir_remote_src.yml32
-rw-r--r--test/integration/targets/copy/tasks/tests.yml2261
-rw-r--r--test/integration/targets/cron/aliases5
-rw-r--r--test/integration/targets/cron/defaults/main.yml1
-rw-r--r--test/integration/targets/cron/meta/main.yml2
-rw-r--r--test/integration/targets/cron/tasks/main.yml213
-rw-r--r--test/integration/targets/dataloader/aliases1
-rw-r--r--test/integration/targets/dataloader/attempt_to_load_invalid_json.yml4
-rwxr-xr-xtest/integration/targets/dataloader/runme.sh6
-rw-r--r--test/integration/targets/dataloader/vars/invalid.json1
-rw-r--r--test/integration/targets/debconf/aliases2
-rw-r--r--test/integration/targets/debconf/meta/main.yml2
-rw-r--r--test/integration/targets/debconf/tasks/main.yml36
-rw-r--r--test/integration/targets/debug/aliases1
-rw-r--r--test/integration/targets/debug/main.yml6
-rw-r--r--test/integration/targets/debug/main_fqcn.yml6
-rwxr-xr-xtest/integration/targets/debug/runme.sh17
-rw-r--r--test/integration/targets/delegate_to/aliases4
-rw-r--r--test/integration/targets/delegate_to/connection_plugins/fakelocal.py76
-rw-r--r--test/integration/targets/delegate_to/delegate_and_nolog.yml8
-rw-r--r--test/integration/targets/delegate_to/delegate_facts_block.yml25
-rw-r--r--test/integration/targets/delegate_to/delegate_local_from_root.yml10
-rw-r--r--test/integration/targets/delegate_to/delegate_vars_hanldling.yml58
-rw-r--r--test/integration/targets/delegate_to/discovery_applied.yml8
-rw-r--r--test/integration/targets/delegate_to/files/testfile1
-rw-r--r--test/integration/targets/delegate_to/has_hostvars.yml64
-rw-r--r--test/integration/targets/delegate_to/inventory9
-rw-r--r--test/integration/targets/delegate_to/inventory_interpreters5
-rw-r--r--test/integration/targets/delegate_to/library/detect_interpreter.py18
-rw-r--r--test/integration/targets/delegate_to/roles/test_template/templates/foo.j23
-rwxr-xr-xtest/integration/targets/delegate_to/runme.sh74
-rw-r--r--test/integration/targets/delegate_to/test_delegate_to.yml58
-rw-r--r--test/integration/targets/delegate_to/test_delegate_to_loop_caching.yml45
-rw-r--r--test/integration/targets/delegate_to/test_delegate_to_loop_randomness.yml73
-rw-r--r--test/integration/targets/delegate_to/test_loop_control.yml16
-rw-r--r--test/integration/targets/delegate_to/verify_interpreter.yml47
-rw-r--r--test/integration/targets/dict_transformations/aliases1
-rw-r--r--test/integration/targets/dict_transformations/library/convert_camelCase.py48
-rw-r--r--test/integration/targets/dict_transformations/library/convert_snake_case.py55
-rw-r--r--test/integration/targets/dict_transformations/tasks/main.yml3
-rw-r--r--test/integration/targets/dict_transformations/tasks/test_convert_camelCase.yml33
-rw-r--r--test/integration/targets/dict_transformations/tasks/test_convert_snake_case.yml26
-rw-r--r--test/integration/targets/dnf/aliases7
-rw-r--r--test/integration/targets/dnf/meta/main.yml4
-rw-r--r--test/integration/targets/dnf/tasks/dnf.yml774
-rw-r--r--test/integration/targets/dnf/tasks/dnfinstallroot.yml47
-rw-r--r--test/integration/targets/dnf/tasks/dnfreleasever.yml47
-rw-r--r--test/integration/targets/dnf/tasks/filters.yml134
-rw-r--r--test/integration/targets/dnf/tasks/filters_check_mode.yml118
-rw-r--r--test/integration/targets/dnf/tasks/gpg.yml72
-rw-r--r--test/integration/targets/dnf/tasks/logging.yml47
-rw-r--r--test/integration/targets/dnf/tasks/main.yml62
-rw-r--r--test/integration/targets/dnf/tasks/modularity.yml99
-rw-r--r--test/integration/targets/dnf/tasks/repo.yml309
-rw-r--r--test/integration/targets/dnf/vars/CentOS.yml2
-rw-r--r--test/integration/targets/dnf/vars/Fedora.yml6
-rw-r--r--test/integration/targets/dnf/vars/RedHat.yml2
-rw-r--r--test/integration/targets/dnf/vars/main.yml4
-rw-r--r--test/integration/targets/dpkg_selections/aliases7
-rw-r--r--test/integration/targets/dpkg_selections/defaults/main.yaml1
-rw-r--r--test/integration/targets/dpkg_selections/tasks/dpkg_selections.yaml89
-rw-r--r--test/integration/targets/dpkg_selections/tasks/main.yaml3
-rw-r--r--test/integration/targets/egg-info/aliases1
-rw-r--r--test/integration/targets/egg-info/lookup_plugins/import_pkg_resources.py11
-rw-r--r--test/integration/targets/egg-info/tasks/main.yml3
-rw-r--r--test/integration/targets/embedded_module/aliases1
-rw-r--r--test/integration/targets/embedded_module/library/test_integration_module3
-rw-r--r--test/integration/targets/embedded_module/tasks/main.yml9
-rw-r--r--test/integration/targets/environment/aliases1
-rwxr-xr-xtest/integration/targets/environment/runme.sh5
-rw-r--r--test/integration/targets/environment/test_environment.yml173
-rw-r--r--test/integration/targets/error_from_connection/aliases1
-rw-r--r--test/integration/targets/error_from_connection/connection_plugins/dummy.py45
-rw-r--r--test/integration/targets/error_from_connection/inventory2
-rw-r--r--test/integration/targets/error_from_connection/play.yml20
-rwxr-xr-xtest/integration/targets/error_from_connection/runme.sh5
-rw-r--r--test/integration/targets/expect/aliases2
-rw-r--r--test/integration/targets/expect/files/foo.txt1
-rw-r--r--test/integration/targets/expect/files/test_command.py12
-rw-r--r--test/integration/targets/expect/tasks/main.yml205
-rw-r--r--test/integration/targets/facts_d/aliases1
-rw-r--r--test/integration/targets/facts_d/meta/main.yml2
-rw-r--r--test/integration/targets/facts_d/tasks/main.yml41
-rw-r--r--test/integration/targets/facts_linux_network/aliases5
-rw-r--r--test/integration/targets/facts_linux_network/meta/main.yml2
-rw-r--r--test/integration/targets/facts_linux_network/tasks/main.yml18
-rw-r--r--test/integration/targets/failed_when/aliases1
-rw-r--r--test/integration/targets/failed_when/tasks/main.yml68
-rw-r--r--test/integration/targets/fetch/aliases2
-rw-r--r--test/integration/targets/fetch/injection/avoid_slurp_return.yml26
-rw-r--r--test/integration/targets/fetch/injection/here.txt1
-rw-r--r--test/integration/targets/fetch/injection/library/slurp.py29
-rw-r--r--test/integration/targets/fetch/roles/fetch_tests/meta/main.yml2
-rw-r--r--test/integration/targets/fetch/roles/fetch_tests/tasks/main.yml141
-rw-r--r--test/integration/targets/fetch/run_fetch_tests.yml5
-rwxr-xr-xtest/integration/targets/fetch/runme.sh12
-rw-r--r--test/integration/targets/file/aliases3
-rw-r--r--test/integration/targets/file/defaults/main.yml2
-rw-r--r--test/integration/targets/file/files/foo.txt1
-rw-r--r--test/integration/targets/file/files/foobar/directory/fileC0
-rw-r--r--test/integration/targets/file/files/foobar/directory/fileD0
-rw-r--r--test/integration/targets/file/files/foobar/fileA0
-rw-r--r--test/integration/targets/file/files/foobar/fileB0
-rw-r--r--test/integration/targets/file/meta/main.yml3
-rw-r--r--test/integration/targets/file/tasks/diff_peek.yml10
-rw-r--r--test/integration/targets/file/tasks/directory_as_dest.yml345
-rw-r--r--test/integration/targets/file/tasks/initialize.yml15
-rw-r--r--test/integration/targets/file/tasks/main.yml752
-rw-r--r--test/integration/targets/file/tasks/selinux_tests.yml33
-rw-r--r--test/integration/targets/file/tasks/state_link.yml487
-rw-r--r--test/integration/targets/file/tasks/unicode_path.yml10
-rw-r--r--test/integration/targets/filter_core/aliases3
-rw-r--r--test/integration/targets/filter_core/files/9851.txt3
-rw-r--r--test/integration/targets/filter_core/files/fileglob/one.txt0
-rw-r--r--test/integration/targets/filter_core/files/fileglob/two.txt0
-rw-r--r--test/integration/targets/filter_core/files/foo.txt69
-rw-r--r--test/integration/targets/filter_core/handle_undefined_type_errors.yml29
-rw-r--r--test/integration/targets/filter_core/host_vars/localhost1
-rw-r--r--test/integration/targets/filter_core/meta/main.yml3
-rwxr-xr-xtest/integration/targets/filter_core/runme.sh6
-rw-r--r--test/integration/targets/filter_core/runme.yml3
-rw-r--r--test/integration/targets/filter_core/tasks/main.yml576
-rw-r--r--test/integration/targets/filter_core/templates/foo.j262
-rw-r--r--test/integration/targets/filter_core/templates/py26json.j22
-rw-r--r--test/integration/targets/filter_core/vars/main.yml106
-rw-r--r--test/integration/targets/filter_mathstuff/aliases3
-rw-r--r--test/integration/targets/filter_mathstuff/tasks/main.yml288
-rw-r--r--test/integration/targets/filter_urls/aliases3
-rwxr-xr-xtest/integration/targets/filter_urls/runme.sh22
-rw-r--r--test/integration/targets/filter_urls/runme.yml4
-rw-r--r--test/integration/targets/filter_urls/tasks/main.yml31
-rw-r--r--test/integration/targets/filter_urlsplit/aliases3
-rw-r--r--test/integration/targets/filter_urlsplit/tasks/main.yml30
-rw-r--r--test/integration/targets/find/aliases1
-rw-r--r--test/integration/targets/find/meta/main.yml2
-rw-r--r--test/integration/targets/find/tasks/main.yml97
-rw-r--r--test/integration/targets/gathering/aliases1
-rw-r--r--test/integration/targets/gathering/explicit.yml14
-rw-r--r--test/integration/targets/gathering/implicit.yml23
-rwxr-xr-xtest/integration/targets/gathering/runme.sh7
-rw-r--r--test/integration/targets/gathering/smart.yml23
-rw-r--r--test/integration/targets/gathering/uuid.fact10
-rw-r--r--test/integration/targets/gathering_facts/aliases2
-rw-r--r--test/integration/targets/gathering_facts/cache_plugins/none.py50
-rw-r--r--test/integration/targets/gathering_facts/inventory2
-rw-r--r--test/integration/targets/gathering_facts/library/bogus_facts12
-rw-r--r--test/integration/targets/gathering_facts/library/facts_one25
-rw-r--r--test/integration/targets/gathering_facts/library/facts_two24
-rw-r--r--test/integration/targets/gathering_facts/library/file_utils.py54
-rw-r--r--test/integration/targets/gathering_facts/one_two.json27
-rw-r--r--test/integration/targets/gathering_facts/prevent_clobbering.yml8
-rwxr-xr-xtest/integration/targets/gathering_facts/runme.sh18
-rw-r--r--test/integration/targets/gathering_facts/test_gathering_facts.yml474
-rw-r--r--test/integration/targets/gathering_facts/test_prevent_injection.yml14
-rw-r--r--test/integration/targets/gathering_facts/test_run_once.yml32
-rw-r--r--test/integration/targets/gathering_facts/two_one.json27
-rw-r--r--test/integration/targets/gathering_facts/uuid.fact10
-rw-r--r--test/integration/targets/gathering_facts/verify_merge_facts.yml41
-rw-r--r--test/integration/targets/get_url/aliases4
-rw-r--r--test/integration/targets/get_url/files/testserver.py20
-rw-r--r--test/integration/targets/get_url/meta/main.yml4
-rw-r--r--test/integration/targets/get_url/tasks/main.yml463
-rw-r--r--test/integration/targets/getent/aliases2
-rw-r--r--test/integration/targets/getent/meta/main.yml2
-rw-r--r--test/integration/targets/getent/tasks/main.yml46
-rw-r--r--test/integration/targets/git/aliases2
-rw-r--r--test/integration/targets/git/handlers/cleanup-default.yml6
-rw-r--r--test/integration/targets/git/handlers/cleanup-freebsd.yml5
-rw-r--r--test/integration/targets/git/handlers/main.yml7
-rw-r--r--test/integration/targets/git/meta/main.yml3
-rw-r--r--test/integration/targets/git/tasks/ambiguous-ref.yml37
-rw-r--r--test/integration/targets/git/tasks/archive.yml135
-rw-r--r--test/integration/targets/git/tasks/change-repo-url.yml132
-rw-r--r--test/integration/targets/git/tasks/checkout-new-tag.yml54
-rw-r--r--test/integration/targets/git/tasks/depth.yml229
-rw-r--r--test/integration/targets/git/tasks/forcefully-fetch-tag.yml38
-rw-r--r--test/integration/targets/git/tasks/formats.yml40
-rw-r--r--test/integration/targets/git/tasks/gpg-verification.yml192
-rw-r--r--test/integration/targets/git/tasks/localmods.yml112
-rw-r--r--test/integration/targets/git/tasks/main.yml40
-rw-r--r--test/integration/targets/git/tasks/missing_hostkey.yml48
-rw-r--r--test/integration/targets/git/tasks/no-destination.yml13
-rw-r--r--test/integration/targets/git/tasks/reset-origin.yml25
-rw-r--r--test/integration/targets/git/tasks/separate-git-dir.yml132
-rw-r--r--test/integration/targets/git/tasks/setup-local-repos.yml45
-rw-r--r--test/integration/targets/git/tasks/setup.yml43
-rw-r--r--test/integration/targets/git/tasks/specific-revision.yml238
-rw-r--r--test/integration/targets/git/tasks/submodules.yml124
-rw-r--r--test/integration/targets/git/vars/main.yml97
-rw-r--r--test/integration/targets/group/aliases2
-rw-r--r--test/integration/targets/group/files/gidget.py12
-rw-r--r--test/integration/targets/group/files/grouplist.sh20
-rw-r--r--test/integration/targets/group/meta/main.yml2
-rw-r--r--test/integration/targets/group/tasks/main.yml40
-rw-r--r--test/integration/targets/group/tasks/tests.yml329
-rw-r--r--test/integration/targets/group_by/aliases1
-rw-r--r--test/integration/targets/group_by/create_groups.yml39
-rw-r--r--test/integration/targets/group_by/group_vars/all3
-rw-r--r--test/integration/targets/group_by/group_vars/camelus1
-rw-r--r--test/integration/targets/group_by/group_vars/vicugna1
-rw-r--r--test/integration/targets/group_by/inventory.group_by9
-rwxr-xr-xtest/integration/targets/group_by/runme.sh6
-rw-r--r--test/integration/targets/group_by/test_group_by.yml187
-rw-r--r--test/integration/targets/group_by/test_group_by_skipped.yml30
-rw-r--r--test/integration/targets/groupby_filter/aliases1
-rwxr-xr-xtest/integration/targets/groupby_filter/runme.sh13
-rw-r--r--test/integration/targets/groupby_filter/test_jinja2_groupby.yml29
-rw-r--r--test/integration/targets/handler_race/aliases3
-rw-r--r--test/integration/targets/handler_race/inventory30
-rw-r--r--test/integration/targets/handler_race/roles/do_handlers/handlers/main.yml4
-rw-r--r--test/integration/targets/handler_race/roles/do_handlers/tasks/main.yml9
-rw-r--r--test/integration/targets/handler_race/roles/more_sleep/tasks/main.yml8
-rw-r--r--test/integration/targets/handler_race/roles/random_sleep/tasks/main.yml8
-rwxr-xr-xtest/integration/targets/handler_race/runme.sh6
-rw-r--r--test/integration/targets/handler_race/test_handler_race.yml10
-rw-r--r--test/integration/targets/handlers/aliases3
-rw-r--r--test/integration/targets/handlers/from_handlers.yml39
-rw-r--r--test/integration/targets/handlers/handlers.yml2
-rw-r--r--test/integration/targets/handlers/inventory.handlers10
-rw-r--r--test/integration/targets/handlers/roles/test_force_handlers/handlers/main.yml2
-rw-r--r--test/integration/targets/handlers/roles/test_force_handlers/tasks/main.yml26
-rw-r--r--test/integration/targets/handlers/roles/test_handlers/handlers/main.yml5
-rw-r--r--test/integration/targets/handlers/roles/test_handlers/meta/main.yml1
-rw-r--r--test/integration/targets/handlers/roles/test_handlers/tasks/main.yml52
-rw-r--r--test/integration/targets/handlers/roles/test_handlers_include/handlers/main.yml1
-rw-r--r--test/integration/targets/handlers/roles/test_handlers_include/tasks/main.yml4
-rw-r--r--test/integration/targets/handlers/roles/test_handlers_include_role/handlers/main.yml5
-rw-r--r--test/integration/targets/handlers/roles/test_handlers_include_role/meta/main.yml1
-rw-r--r--test/integration/targets/handlers/roles/test_handlers_include_role/tasks/main.yml47
-rw-r--r--test/integration/targets/handlers/roles/test_handlers_listen/handlers/main.yml10
-rw-r--r--test/integration/targets/handlers/roles/test_handlers_listen/tasks/main.yml6
-rw-r--r--test/integration/targets/handlers/roles/test_handlers_meta/handlers/alternate.yml12
-rw-r--r--test/integration/targets/handlers/roles/test_handlers_meta/handlers/main.yml10
-rw-r--r--test/integration/targets/handlers/roles/test_handlers_meta/tasks/main.yml75
-rw-r--r--test/integration/targets/handlers/roles/test_templating_in_handlers/handlers/main.yml21
-rw-r--r--test/integration/targets/handlers/roles/test_templating_in_handlers/tasks/main.yml26
-rwxr-xr-xtest/integration/targets/handlers/runme.sh95
-rw-r--r--test/integration/targets/handlers/test_force_handlers.yml27
-rw-r--r--test/integration/targets/handlers/test_handlers.yml47
-rw-r--r--test/integration/targets/handlers/test_handlers_any_errors_fatal.yml24
-rw-r--r--test/integration/targets/handlers/test_handlers_include.yml14
-rw-r--r--test/integration/targets/handlers/test_handlers_include_role.yml8
-rw-r--r--test/integration/targets/handlers/test_handlers_including_task.yml16
-rw-r--r--test/integration/targets/handlers/test_handlers_inexistent_notify.yml10
-rw-r--r--test/integration/targets/handlers/test_handlers_listen.yml128
-rw-r--r--test/integration/targets/handlers/test_handlers_template_run_once.yml12
-rw-r--r--test/integration/targets/handlers/test_listening_handlers.yml24
-rw-r--r--test/integration/targets/handlers/test_templating_in_handlers.yml62
-rw-r--r--test/integration/targets/hash/aliases1
-rw-r--r--test/integration/targets/hash/group_vars/all3
-rw-r--r--test/integration/targets/hash/host_vars/testhost2
-rw-r--r--test/integration/targets/hash/roles/test_hash_behaviour/defaults/main.yml21
-rw-r--r--test/integration/targets/hash/roles/test_hash_behaviour/meta/main.yml17
-rw-r--r--test/integration/targets/hash/roles/test_hash_behaviour/tasks/main.yml37
-rw-r--r--test/integration/targets/hash/roles/test_hash_behaviour/vars/main.yml21
-rwxr-xr-xtest/integration/targets/hash/runme.sh8
-rw-r--r--test/integration/targets/hash/test_hash.yml21
-rw-r--r--test/integration/targets/hash/vars/test_hash_vars.yml3
-rw-r--r--test/integration/targets/hosts_field/aliases1
-rw-r--r--test/integration/targets/hosts_field/inventory.hosts_field1
-rwxr-xr-xtest/integration/targets/hosts_field/runme.sh49
-rw-r--r--test/integration/targets/hosts_field/test_hosts_field.json1
-rw-r--r--test/integration/targets/hosts_field/test_hosts_field.yml62
-rw-r--r--test/integration/targets/ignore_errors/aliases1
-rw-r--r--test/integration/targets/ignore_errors/meta/main.yml2
-rw-r--r--test/integration/targets/ignore_errors/tasks/main.yml22
-rw-r--r--test/integration/targets/ignore_unreachable/aliases1
-rw-r--r--test/integration/targets/ignore_unreachable/fake_connectors/bad_exec.py11
-rw-r--r--test/integration/targets/ignore_unreachable/fake_connectors/bad_put_file.py11
-rw-r--r--test/integration/targets/ignore_unreachable/inventory3
-rw-r--r--test/integration/targets/ignore_unreachable/meta/main.yml2
-rwxr-xr-xtest/integration/targets/ignore_unreachable/runme.sh16
-rw-r--r--test/integration/targets/ignore_unreachable/test_base_cannot_connect.yml5
-rw-r--r--test/integration/targets/ignore_unreachable/test_cannot_connect.yml29
-rw-r--r--test/integration/targets/ignore_unreachable/test_with_bad_plugins.yml24
-rw-r--r--test/integration/targets/incidental_azure_rm_mariadbserver/aliases3
-rw-r--r--test/integration/targets/incidental_azure_rm_mariadbserver/tasks/main.yml640
-rw-r--r--test/integration/targets/incidental_azure_rm_resource/aliases3
-rw-r--r--test/integration/targets/incidental_azure_rm_resource/tasks/main.yml158
-rw-r--r--test/integration/targets/incidental_cloud_init_data_facts/aliases6
-rw-r--r--test/integration/targets/incidental_cloud_init_data_facts/tasks/main.yml50
-rw-r--r--test/integration/targets/incidental_cloudformation/aliases2
-rw-r--r--test/integration/targets/incidental_cloudformation/defaults/main.yml8
-rw-r--r--test/integration/targets/incidental_cloudformation/files/cf_template.json37
-rw-r--r--test/integration/targets/incidental_cloudformation/tasks/main.yml476
-rw-r--r--test/integration/targets/incidental_cs_common/aliases1
-rw-r--r--test/integration/targets/incidental_cs_common/defaults/main.yml6
-rw-r--r--test/integration/targets/incidental_deploy_helper/aliases1
-rw-r--r--test/integration/targets/incidental_deploy_helper/tasks/main.yml149
-rw-r--r--test/integration/targets/incidental_flatpak_remote/aliases8
-rw-r--r--test/integration/targets/incidental_flatpak_remote/meta/main.yml2
-rw-r--r--test/integration/targets/incidental_flatpak_remote/tasks/check_mode.yml101
-rw-r--r--test/integration/targets/incidental_flatpak_remote/tasks/main.yml57
-rw-r--r--test/integration/targets/incidental_flatpak_remote/tasks/setup.yml27
-rw-r--r--test/integration/targets/incidental_flatpak_remote/tasks/test.yml72
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/aliases2
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/playbooks/create_inventory_config.yml11
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/playbooks/empty_inventory_config.yml9
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/playbooks/populate_cache.yml64
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/playbooks/setup.yml62
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/playbooks/tear_down.yml39
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml9
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_inventory_cache.yml18
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory.yml91
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml79
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_refresh_inventory.yml74
-rwxr-xr-xtest/integration/targets/incidental_inventory_aws_ec2/runme.sh35
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/templates/inventory.yml12
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_cache.yml12
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_constructed.yml20
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/test.aws_ec2.yml0
-rw-r--r--test/integration/targets/incidental_inventory_docker_swarm/aliases13
-rw-r--r--test/integration/targets/incidental_inventory_docker_swarm/inventory_1.docker_swarm.yml3
-rw-r--r--test/integration/targets/incidental_inventory_docker_swarm/inventory_2.docker_swarm.yml5
-rw-r--r--test/integration/targets/incidental_inventory_docker_swarm/meta/main.yml3
-rw-r--r--test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_cleanup.yml19
-rw-r--r--test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_setup.yml15
-rw-r--r--test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_1.yml58
-rw-r--r--test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_2.yml35
-rwxr-xr-xtest/integration/targets/incidental_inventory_docker_swarm/runme.sh23
-rw-r--r--test/integration/targets/incidental_inventory_foreman/aliases3
-rw-r--r--test/integration/targets/incidental_inventory_foreman/ansible.cfg5
-rw-r--r--test/integration/targets/incidental_inventory_foreman/inspect_cache.yml31
-rwxr-xr-xtest/integration/targets/incidental_inventory_foreman/runme.sh50
-rw-r--r--test/integration/targets/incidental_inventory_foreman/test_foreman_inventory.yml59
-rw-r--r--test/integration/targets/incidental_ios_file/aliases2
-rw-r--r--test/integration/targets/incidental_ios_file/defaults/main.yaml2
-rw-r--r--test/integration/targets/incidental_ios_file/ios1.cfg3
-rw-r--r--test/integration/targets/incidental_ios_file/nonascii.binbin0 -> 32768 bytes
-rw-r--r--test/integration/targets/incidental_ios_file/tasks/cli.yaml17
-rw-r--r--test/integration/targets/incidental_ios_file/tasks/main.yaml2
-rw-r--r--test/integration/targets/incidental_ios_file/tests/cli/net_get.yaml52
-rw-r--r--test/integration/targets/incidental_ios_file/tests/cli/net_put.yaml73
-rw-r--r--test/integration/targets/incidental_lookup_rabbitmq/aliases6
-rw-r--r--test/integration/targets/incidental_lookup_rabbitmq/meta/main.yml2
-rw-r--r--test/integration/targets/incidental_lookup_rabbitmq/tasks/main.yml5
-rw-r--r--test/integration/targets/incidental_lookup_rabbitmq/tasks/ubuntu.yml138
-rw-r--r--test/integration/targets/incidental_lvg/aliases6
-rw-r--r--test/integration/targets/incidental_lvg/meta/main.yml2
-rw-r--r--test/integration/targets/incidental_lvg/tasks/main.yml15
-rw-r--r--test/integration/targets/incidental_lvg/tasks/setup.yml13
-rw-r--r--test/integration/targets/incidental_lvg/tasks/teardown.yml17
-rw-r--r--test/integration/targets/incidental_lvg/tasks/test_grow_reduce.yml33
-rw-r--r--test/integration/targets/incidental_lvg/tasks/test_indempotency.yml15
-rw-r--r--test/integration/targets/incidental_mongodb_parameter/aliases8
-rw-r--r--test/integration/targets/incidental_mongodb_parameter/defaults/main.yml21
-rw-r--r--test/integration/targets/incidental_mongodb_parameter/meta/main.yml3
-rw-r--r--test/integration/targets/incidental_mongodb_parameter/tasks/main.yml143
-rw-r--r--test/integration/targets/incidental_mongodb_parameter/tasks/mongod_singlenode.yml55
-rw-r--r--test/integration/targets/incidental_mongodb_parameter/tasks/mongod_teardown.yml25
-rw-r--r--test/integration/targets/incidental_postgresql_user/aliases4
-rw-r--r--test/integration/targets/incidental_postgresql_user/defaults/main.yml3
-rw-r--r--test/integration/targets/incidental_postgresql_user/meta/main.yml2
-rw-r--r--test/integration/targets/incidental_postgresql_user/tasks/main.yml7
-rw-r--r--test/integration/targets/incidental_postgresql_user/tasks/postgresql_user_general.yml741
-rw-r--r--test/integration/targets/incidental_postgresql_user/tasks/postgresql_user_initial.yml153
-rw-r--r--test/integration/targets/incidental_postgresql_user/tasks/test_no_password_change.yml167
-rw-r--r--test/integration/targets/incidental_postgresql_user/tasks/test_password.yml336
-rw-r--r--test/integration/targets/incidental_setup_docker/aliases2
-rw-r--r--test/integration/targets/incidental_setup_docker/defaults/main.yml18
-rw-r--r--test/integration/targets/incidental_setup_docker/handlers/main.yml14
-rw-r--r--test/integration/targets/incidental_setup_docker/meta/main.yml2
-rw-r--r--test/integration/targets/incidental_setup_docker/tasks/Debian.yml43
-rw-r--r--test/integration/targets/incidental_setup_docker/tasks/Fedora.yml21
-rw-r--r--test/integration/targets/incidental_setup_docker/tasks/RedHat-7.yml44
-rw-r--r--test/integration/targets/incidental_setup_docker/tasks/RedHat-8.yml33
-rw-r--r--test/integration/targets/incidental_setup_docker/tasks/Suse.yml7
-rw-r--r--test/integration/targets/incidental_setup_docker/tasks/main.yml113
-rw-r--r--test/integration/targets/incidental_setup_docker/vars/Debian.yml9
-rw-r--r--test/integration/targets/incidental_setup_docker/vars/Fedora.yml5
-rw-r--r--test/integration/targets/incidental_setup_docker/vars/RedHat-7.yml18
-rw-r--r--test/integration/targets/incidental_setup_docker/vars/RedHat-8.yml9
-rw-r--r--test/integration/targets/incidental_setup_docker/vars/Suse.yml2
-rw-r--r--test/integration/targets/incidental_setup_docker/vars/Ubuntu-14.yml5
-rw-r--r--test/integration/targets/incidental_setup_docker/vars/default.yml0
-rw-r--r--test/integration/targets/incidental_setup_ec2/aliases1
-rw-r--r--test/integration/targets/incidental_setup_ec2/defaults/main.yml2
-rw-r--r--test/integration/targets/incidental_setup_ec2/tasks/common.yml119
-rw-r--r--test/integration/targets/incidental_setup_ec2/vars/main.yml3
-rw-r--r--test/integration/targets/incidental_setup_flatpak_remote/README.md138
-rw-r--r--test/integration/targets/incidental_setup_flatpak_remote/aliases1
-rw-r--r--test/integration/targets/incidental_setup_flatpak_remote/files/repo.tar.xzbin0 -> 15496 bytes
-rw-r--r--test/integration/targets/incidental_setup_flatpak_remote/handlers/main.yaml4
-rw-r--r--test/integration/targets/incidental_setup_flatpak_remote/meta/main.yaml2
-rw-r--r--test/integration/targets/incidental_setup_flatpak_remote/tasks/main.yaml22
-rw-r--r--test/integration/targets/incidental_setup_mongodb/aliases1
-rw-r--r--test/integration/targets/incidental_setup_mongodb/defaults/main.yml46
-rw-r--r--test/integration/targets/incidental_setup_mongodb/handlers/main.yml24
-rw-r--r--test/integration/targets/incidental_setup_mongodb/tasks/main.yml166
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/aliases1
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/defaults/main.yml17
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/files/dummy--1.0.sql2
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/files/dummy--2.0.sql2
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/files/dummy--3.0.sql2
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/files/dummy.control3
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/files/pg_hba.conf10
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/tasks/main.yml222
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/tasks/ssl.yml81
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/Debian-8.yml8
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-11-py3.yml12
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-11.yml12
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.0-py3.yml12
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.0.yml12
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.1-py3.yml12
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.1.yml12
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/RedHat-py3.yml8
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/RedHat.yml7
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-12.yml8
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-14.yml8
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-16-py3.yml8
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-16.yml8
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-18-py3.yml8
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/default-py3.yml6
-rw-r--r--test/integration/targets/incidental_setup_postgresql_db/vars/default.yml6
-rw-r--r--test/integration/targets/incidental_setup_rabbitmq/aliases1
-rw-r--r--test/integration/targets/incidental_setup_rabbitmq/files/rabbitmq.conf8
-rw-r--r--test/integration/targets/incidental_setup_rabbitmq/meta/main.yml3
-rw-r--r--test/integration/targets/incidental_setup_rabbitmq/tasks/main.yml3
-rw-r--r--test/integration/targets/incidental_setup_rabbitmq/tasks/ubuntu.yml63
-rw-r--r--test/integration/targets/incidental_setup_tls/aliases1
-rw-r--r--test/integration/targets/incidental_setup_tls/files/ca_certificate.pem19
-rw-r--r--test/integration/targets/incidental_setup_tls/files/ca_key.pem28
-rw-r--r--test/integration/targets/incidental_setup_tls/files/client_certificate.pem20
-rw-r--r--test/integration/targets/incidental_setup_tls/files/client_key.pem27
-rw-r--r--test/integration/targets/incidental_setup_tls/files/server_certificate.pem20
-rw-r--r--test/integration/targets/incidental_setup_tls/files/server_key.pem27
-rw-r--r--test/integration/targets/incidental_setup_tls/tasks/main.yml21
-rw-r--r--test/integration/targets/incidental_synchronize/aliases1
-rw-r--r--test/integration/targets/incidental_synchronize/files/bar.txt1
-rw-r--r--test/integration/targets/incidental_synchronize/files/foo.txt1
-rw-r--r--test/integration/targets/incidental_synchronize/tasks/main.yml273
-rw-r--r--test/integration/targets/incidental_timezone/aliases5
-rw-r--r--test/integration/targets/incidental_timezone/tasks/main.yml57
-rw-r--r--test/integration/targets/incidental_timezone/tasks/test.yml607
-rw-r--r--test/integration/targets/incidental_vyos_config/aliases2
-rw-r--r--test/integration/targets/incidental_vyos_config/defaults/main.yaml3
-rw-r--r--test/integration/targets/incidental_vyos_config/tasks/cli.yaml22
-rw-r--r--test/integration/targets/incidental_vyos_config/tasks/cli_config.yaml16
-rw-r--r--test/integration/targets/incidental_vyos_config/tasks/main.yaml3
-rw-r--r--test/integration/targets/incidental_vyos_config/tests/cli/backup.yaml113
-rw-r--r--test/integration/targets/incidental_vyos_config/tests/cli/check_config.yaml63
-rw-r--r--test/integration/targets/incidental_vyos_config/tests/cli/comment.yaml34
-rw-r--r--test/integration/targets/incidental_vyos_config/tests/cli/config.cfg3
-rw-r--r--test/integration/targets/incidental_vyos_config/tests/cli/save.yaml54
-rw-r--r--test/integration/targets/incidental_vyos_config/tests/cli/simple.yaml53
-rw-r--r--test/integration/targets/incidental_vyos_config/tests/cli_config/cli_backup.yaml114
-rw-r--r--test/integration/targets/incidental_vyos_config/tests/cli_config/cli_basic.yaml28
-rw-r--r--test/integration/targets/incidental_vyos_config/tests/cli_config/cli_comment.yaml30
-rw-r--r--test/integration/targets/incidental_vyos_lldp_interfaces/aliases2
-rw-r--r--test/integration/targets/incidental_vyos_lldp_interfaces/defaults/main.yaml3
-rw-r--r--test/integration/targets/incidental_vyos_lldp_interfaces/meta/main.yaml3
-rw-r--r--test/integration/targets/incidental_vyos_lldp_interfaces/tasks/cli.yaml19
-rw-r--r--test/integration/targets/incidental_vyos_lldp_interfaces/tasks/main.yaml2
-rw-r--r--test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/_populate.yaml14
-rw-r--r--test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/_populate_intf.yaml10
-rw-r--r--test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/_remove_config.yaml8
-rw-r--r--test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/deleted.yaml46
-rw-r--r--test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/empty_config.yaml36
-rw-r--r--test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/merged.yaml58
-rw-r--r--test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/overridden.yaml49
-rw-r--r--test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/replaced.yaml63
-rw-r--r--test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/rtt.yaml57
-rw-r--r--test/integration/targets/incidental_vyos_lldp_interfaces/vars/main.yaml130
-rw-r--r--test/integration/targets/incidental_vyos_prepare_tests/aliases1
-rw-r--r--test/integration/targets/incidental_vyos_prepare_tests/tasks/main.yaml13
-rw-r--r--test/integration/targets/incidental_win_copy/aliases2
-rw-r--r--test/integration/targets/incidental_win_copy/defaults/main.yml1
-rw-r--r--test/integration/targets/incidental_win_copy/files-different/vault/folder/nested-vault-file6
-rw-r--r--test/integration/targets/incidental_win_copy/files-different/vault/readme.txt5
-rw-r--r--test/integration/targets/incidental_win_copy/files-different/vault/vault-file6
-rw-r--r--test/integration/targets/incidental_win_copy/files/empty.txt0
-rw-r--r--test/integration/targets/incidental_win_copy/files/foo.txt1
-rw-r--r--test/integration/targets/incidental_win_copy/files/subdir/bar.txt1
-rw-r--r--test/integration/targets/incidental_win_copy/files/subdir/subdir2/baz.txt1
-rw-r--r--test/integration/targets/incidental_win_copy/files/subdir/subdir2/subdir3/subdir4/qux.txt1
-rw-r--r--test/integration/targets/incidental_win_copy/tasks/main.yml34
-rw-r--r--test/integration/targets/incidental_win_copy/tasks/remote_tests.yml471
-rw-r--r--test/integration/targets/incidental_win_copy/tasks/tests.yml535
-rw-r--r--test/integration/targets/incidental_win_data_deduplication/aliases5
-rw-r--r--test/integration/targets/incidental_win_data_deduplication/meta/main.yml2
-rw-r--r--test/integration/targets/incidental_win_data_deduplication/tasks/main.yml2
-rw-r--r--test/integration/targets/incidental_win_data_deduplication/tasks/pre_test.yml40
-rw-r--r--test/integration/targets/incidental_win_data_deduplication/tasks/tests.yml47
-rw-r--r--test/integration/targets/incidental_win_data_deduplication/templates/partition_creation_script.j211
-rw-r--r--test/integration/targets/incidental_win_data_deduplication/templates/partition_deletion_script.j23
-rw-r--r--test/integration/targets/incidental_win_dsc/aliases6
-rw-r--r--test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xSetReboot/ANSIBLE_xSetReboot.psm141
-rw-r--r--test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xSetReboot/ANSIBLE_xSetReboot.schema.mof7
-rw-r--r--test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.psm1214
-rw-r--r--test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.schema.mof60
-rw-r--r--test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/xTestDsc.psd113
-rw-r--r--test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.1/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.psm1214
-rw-r--r--test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.1/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.schema.mof63
-rw-r--r--test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.1/xTestDsc.psd113
-rw-r--r--test/integration/targets/incidental_win_dsc/meta/main.yml2
-rw-r--r--test/integration/targets/incidental_win_dsc/tasks/main.yml39
-rw-r--r--test/integration/targets/incidental_win_dsc/tasks/tests.yml544
-rw-r--r--test/integration/targets/incidental_win_lineinfile/aliases3
-rw-r--r--test/integration/targets/incidental_win_lineinfile/files/test.txt5
-rw-r--r--test/integration/targets/incidental_win_lineinfile/files/test_linebreak.txt0
-rw-r--r--test/integration/targets/incidental_win_lineinfile/files/test_quoting.txt0
-rw-r--r--test/integration/targets/incidental_win_lineinfile/files/testempty.txt0
-rw-r--r--test/integration/targets/incidental_win_lineinfile/files/testnoeof.txt2
-rw-r--r--test/integration/targets/incidental_win_lineinfile/meta/main.yml2
-rw-r--r--test/integration/targets/incidental_win_lineinfile/tasks/main.yml708
-rw-r--r--test/integration/targets/incidental_win_ping/aliases2
-rw-r--r--test/integration/targets/incidental_win_ping/library/win_ping_set_attr.ps131
-rw-r--r--test/integration/targets/incidental_win_ping/library/win_ping_strict_mode_error.ps130
-rw-r--r--test/integration/targets/incidental_win_ping/library/win_ping_syntax_error.ps130
-rw-r--r--test/integration/targets/incidental_win_ping/library/win_ping_throw.ps130
-rw-r--r--test/integration/targets/incidental_win_ping/library/win_ping_throw_string.ps130
-rw-r--r--test/integration/targets/incidental_win_ping/tasks/main.yml67
-rw-r--r--test/integration/targets/incidental_win_prepare_tests/aliases1
-rw-r--r--test/integration/targets/incidental_win_prepare_tests/meta/main.yml3
-rw-r--r--test/integration/targets/incidental_win_prepare_tests/tasks/main.yml29
-rw-r--r--test/integration/targets/incidental_win_psexec/aliases2
-rw-r--r--test/integration/targets/incidental_win_psexec/meta/main.yml2
-rw-r--r--test/integration/targets/incidental_win_psexec/tasks/main.yml80
-rw-r--r--test/integration/targets/incidental_win_reboot/aliases2
-rw-r--r--test/integration/targets/incidental_win_reboot/tasks/main.yml70
-rw-r--r--test/integration/targets/incidental_win_reboot/templates/post_reboot.ps18
-rw-r--r--test/integration/targets/incidental_win_security_policy/aliases2
-rw-r--r--test/integration/targets/incidental_win_security_policy/library/test_win_security_policy.ps153
-rw-r--r--test/integration/targets/incidental_win_security_policy/tasks/main.yml41
-rw-r--r--test/integration/targets/incidental_win_security_policy/tasks/tests.yml186
-rw-r--r--test/integration/targets/incidental_xml/aliases4
-rw-r--r--test/integration/targets/incidental_xml/fixtures/ansible-xml-beers-unicode.xml13
-rw-r--r--test/integration/targets/incidental_xml/fixtures/ansible-xml-beers.xml14
-rw-r--r--test/integration/targets/incidental_xml/fixtures/ansible-xml-namespaced-beers.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-add-children-elements-unicode.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-add-children-elements.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-add-children-from-groupvars.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-add-children-insertafter.xml17
-rw-r--r--test/integration/targets/incidental_xml/results/test-add-children-insertbefore.xml17
-rw-r--r--test/integration/targets/incidental_xml/results/test-add-children-with-attributes-unicode.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-add-children-with-attributes.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-add-element-implicitly.yml32
-rw-r--r--test/integration/targets/incidental_xml/results/test-add-namespaced-children-elements.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-pretty-print-only.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-pretty-print.xml15
-rw-r--r--test/integration/targets/incidental_xml/results/test-remove-attribute.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-remove-element.xml13
-rw-r--r--test/integration/targets/incidental_xml/results/test-remove-namespaced-attribute.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-remove-namespaced-element.xml13
-rw-r--r--test/integration/targets/incidental_xml/results/test-set-attribute-value-unicode.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-set-attribute-value.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-set-children-elements-level.xml11
-rw-r--r--test/integration/targets/incidental_xml/results/test-set-children-elements-unicode.xml11
-rw-r--r--test/integration/targets/incidental_xml/results/test-set-children-elements.xml11
-rw-r--r--test/integration/targets/incidental_xml/results/test-set-element-value-empty.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-set-element-value-unicode.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-set-element-value.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-set-namespaced-attribute-value.xml14
-rw-r--r--test/integration/targets/incidental_xml/results/test-set-namespaced-element-value.xml14
-rw-r--r--test/integration/targets/incidental_xml/tasks/main.yml67
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-add-children-elements-unicode.yml29
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-add-children-elements.yml29
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-add-children-from-groupvars.yml28
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-add-children-insertafter.yml32
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-add-children-insertbefore.yml32
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-add-children-with-attributes-unicode.yml31
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-add-children-with-attributes.yml35
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-add-element-implicitly.yml237
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-add-namespaced-children-elements.yml32
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-children-elements-xml.yml30
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-count-unicode.yml19
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-count.yml19
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-get-element-content-unicode.yml32
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-get-element-content.yml52
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-mutually-exclusive-attributes.yml22
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-pretty-print-only.yml29
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-pretty-print.yml30
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-remove-attribute.yml28
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-remove-element.yml28
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-remove-namespaced-attribute.yml33
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-remove-namespaced-element.yml33
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-set-attribute-value-unicode.yml29
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-set-attribute-value.yml29
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-set-children-elements-level.yml74
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-set-children-elements-unicode.yml46
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-set-children-elements.yml53
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-set-element-value-empty.yml28
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-set-element-value-unicode.yml43
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-set-element-value.yml43
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-set-namespaced-attribute-value.yml34
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-set-namespaced-children-elements.yml57
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-set-namespaced-element-value.yml46
-rw-r--r--test/integration/targets/incidental_xml/tasks/test-xmlstring.yml81
-rw-r--r--test/integration/targets/incidental_xml/vars/main.yml6
-rw-r--r--test/integration/targets/include_import/aliases2
-rw-r--r--test/integration/targets/include_import/apply/import_apply.yml31
-rw-r--r--test/integration/targets/include_import/apply/include_apply.yml50
-rw-r--r--test/integration/targets/include_import/apply/include_tasks.yml2
-rw-r--r--test/integration/targets/include_import/apply/roles/include_role/tasks/main.yml2
-rw-r--r--test/integration/targets/include_import/apply/roles/include_role2/tasks/main.yml2
-rw-r--r--test/integration/targets/include_import/empty_group_warning/playbook.yml13
-rw-r--r--test/integration/targets/include_import/empty_group_warning/tasks.yml3
-rw-r--r--test/integration/targets/include_import/grandchild/block_include_tasks.yml2
-rw-r--r--test/integration/targets/include_import/grandchild/import.yml1
-rw-r--r--test/integration/targets/include_import/grandchild/import_include_include_tasks.yml2
-rw-r--r--test/integration/targets/include_import/grandchild/include_level_1.yml1
-rw-r--r--test/integration/targets/include_import/handler_addressing/playbook.yml11
-rw-r--r--test/integration/targets/include_import/handler_addressing/roles/import_handler_test/handlers/main.yml2
-rw-r--r--test/integration/targets/include_import/handler_addressing/roles/import_handler_test/tasks/handlers.yml2
-rw-r--r--test/integration/targets/include_import/handler_addressing/roles/import_handler_test/tasks/main.yml3
-rw-r--r--test/integration/targets/include_import/handler_addressing/roles/include_handler_test/handlers/main.yml2
-rw-r--r--test/integration/targets/include_import/handler_addressing/roles/include_handler_test/tasks/handlers.yml2
-rw-r--r--test/integration/targets/include_import/handler_addressing/roles/include_handler_test/tasks/main.yml3
-rw-r--r--test/integration/targets/include_import/inventory6
-rw-r--r--test/integration/targets/include_import/nestedtasks/nested/nested.yml2
-rw-r--r--test/integration/targets/include_import/parent_templating/playbook.yml11
-rw-r--r--test/integration/targets/include_import/parent_templating/roles/test/tasks/localhost.yml1
-rw-r--r--test/integration/targets/include_import/parent_templating/roles/test/tasks/main.yml1
-rw-r--r--test/integration/targets/include_import/parent_templating/roles/test/tasks/other.yml2
-rw-r--r--test/integration/targets/include_import/playbook/group_vars/all.yml1
-rw-r--r--test/integration/targets/include_import/playbook/playbook1.yml9
-rw-r--r--test/integration/targets/include_import/playbook/playbook2.yml9
-rw-r--r--test/integration/targets/include_import/playbook/playbook3.yml10
-rw-r--r--test/integration/targets/include_import/playbook/playbook4.yml9
-rw-r--r--test/integration/targets/include_import/playbook/playbook_needing_vars.yml6
-rw-r--r--test/integration/targets/include_import/playbook/roles/import_playbook_role/tasks/main.yml2
-rw-r--r--test/integration/targets/include_import/playbook/sub_playbook/library/helloworld.py30
-rw-r--r--test/integration/targets/include_import/playbook/sub_playbook/sub_playbook.yml4
-rw-r--r--test/integration/targets/include_import/playbook/test_import_playbook.yml26
-rw-r--r--test/integration/targets/include_import/playbook/test_import_playbook_tags.yml10
-rw-r--r--test/integration/targets/include_import/playbook/validate1.yml10
-rw-r--r--test/integration/targets/include_import/playbook/validate2.yml10
-rw-r--r--test/integration/targets/include_import/playbook/validate34.yml11
-rw-r--r--test/integration/targets/include_import/playbook/validate_tags.yml11
-rw-r--r--test/integration/targets/include_import/public_exposure/no_bleeding.yml25
-rw-r--r--test/integration/targets/include_import/public_exposure/no_overwrite_roles.yml4
-rw-r--r--test/integration/targets/include_import/public_exposure/playbook.yml56
-rw-r--r--test/integration/targets/include_import/public_exposure/roles/call_import/tasks/main.yml6
-rw-r--r--test/integration/targets/include_import/public_exposure/roles/dynamic/defaults/main.yml1
-rw-r--r--test/integration/targets/include_import/public_exposure/roles/dynamic/tasks/main.yml5
-rw-r--r--test/integration/targets/include_import/public_exposure/roles/dynamic/vars/main.yml1
-rw-r--r--test/integration/targets/include_import/public_exposure/roles/dynamic_private/defaults/main.yml1
-rw-r--r--test/integration/targets/include_import/public_exposure/roles/dynamic_private/tasks/main.yml5
-rw-r--r--test/integration/targets/include_import/public_exposure/roles/dynamic_private/vars/main.yml1
-rw-r--r--test/integration/targets/include_import/public_exposure/roles/from/defaults/from.yml1
-rw-r--r--test/integration/targets/include_import/public_exposure/roles/from/tasks/from.yml5
-rw-r--r--test/integration/targets/include_import/public_exposure/roles/from/vars/from.yml1
-rw-r--r--test/integration/targets/include_import/public_exposure/roles/regular/defaults/main.yml1
-rw-r--r--test/integration/targets/include_import/public_exposure/roles/regular/tasks/main.yml5
-rw-r--r--test/integration/targets/include_import/public_exposure/roles/regular/vars/main.yml1
-rw-r--r--test/integration/targets/include_import/public_exposure/roles/static/defaults/main.yml1
-rw-r--r--test/integration/targets/include_import/public_exposure/roles/static/tasks/main.yml5
-rw-r--r--test/integration/targets/include_import/public_exposure/roles/static/vars/main.yml1
-rw-r--r--test/integration/targets/include_import/role/test_import_role.yml139
-rw-r--r--test/integration/targets/include_import/role/test_include_role.yml166
-rw-r--r--test/integration/targets/include_import/role/test_include_role_vars_from.yml10
-rw-r--r--test/integration/targets/include_import/roles/delegated_handler/handlers/main.yml4
-rw-r--r--test/integration/targets/include_import/roles/delegated_handler/tasks/main.yml3
-rw-r--r--test/integration/targets/include_import/roles/dup_allowed_role/meta/main.yml2
-rw-r--r--test/integration/targets/include_import/roles/dup_allowed_role/tasks/main.yml3
-rw-r--r--test/integration/targets/include_import/roles/loop_name_assert/tasks/main.yml4
-rw-r--r--test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/defaults/main.yml3
-rw-r--r--test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/meta/main.yml2
-rw-r--r--test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/tasks/main.yml2
-rw-r--r--test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/tasks/rund.yml2
-rw-r--r--test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/vars/main.yml2
-rw-r--r--test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/defaults/main.yml3
-rw-r--r--test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/meta/main.yml2
-rw-r--r--test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/tasks/main.yml2
-rw-r--r--test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/tasks/rune.yml2
-rw-r--r--test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/vars/main.yml2
-rw-r--r--test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/defaults/main.yml3
-rw-r--r--test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/meta/main.yml1
-rw-r--r--test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/tasks/main.yml2
-rw-r--r--test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/tasks/runf.yml2
-rw-r--r--test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/vars/main.yml2
-rw-r--r--test/integration/targets/include_import/roles/nested/nested_dep_role/defaults/main.yml3
-rw-r--r--test/integration/targets/include_import/roles/nested/nested_dep_role/meta/main.yml2
-rw-r--r--test/integration/targets/include_import/roles/nested/nested_dep_role/tasks/main.yml2
-rw-r--r--test/integration/targets/include_import/roles/nested/nested_dep_role/tasks/runc.yml4
-rw-r--r--test/integration/targets/include_import/roles/nested/nested_dep_role/vars/main.yml2
-rw-r--r--test/integration/targets/include_import/roles/nested_include_task/meta/main.yml2
-rw-r--r--test/integration/targets/include_import/roles/nested_include_task/tasks/main.yml2
-rw-r--r--test/integration/targets/include_import/roles/nested_include_task/tasks/runa.yml3
-rw-r--r--test/integration/targets/include_import/roles/role1/tasks/canary1.yml2
-rw-r--r--test/integration/targets/include_import/roles/role1/tasks/canary2.yml2
-rw-r--r--test/integration/targets/include_import/roles/role1/tasks/canary3.yml2
-rw-r--r--test/integration/targets/include_import/roles/role1/tasks/fail.yml3
-rw-r--r--test/integration/targets/include_import/roles/role1/tasks/main.yml3
-rw-r--r--test/integration/targets/include_import/roles/role1/tasks/r1t01.yml1
-rw-r--r--test/integration/targets/include_import/roles/role1/tasks/r1t02.yml1
-rw-r--r--test/integration/targets/include_import/roles/role1/tasks/r1t03.yml1
-rw-r--r--test/integration/targets/include_import/roles/role1/tasks/r1t04.yml1
-rw-r--r--test/integration/targets/include_import/roles/role1/tasks/r1t05.yml1
-rw-r--r--test/integration/targets/include_import/roles/role1/tasks/r1t06.yml1
-rw-r--r--test/integration/targets/include_import/roles/role1/tasks/r1t07.yml1
-rw-r--r--test/integration/targets/include_import/roles/role1/tasks/r1t08.yml1
-rw-r--r--test/integration/targets/include_import/roles/role1/tasks/r1t09.yml1
-rw-r--r--test/integration/targets/include_import/roles/role1/tasks/r1t10.yml1
-rw-r--r--test/integration/targets/include_import/roles/role1/tasks/r1t11.yml1
-rw-r--r--test/integration/targets/include_import/roles/role1/tasks/r1t12.yml2
-rw-r--r--test/integration/targets/include_import/roles/role1/tasks/tasks.yml2
-rw-r--r--test/integration/targets/include_import/roles/role1/tasks/vartest.yml2
-rw-r--r--test/integration/targets/include_import/roles/role1/vars/main.yml1
-rw-r--r--test/integration/targets/include_import/roles/role1/vars/role1vars.yml1
-rw-r--r--test/integration/targets/include_import/roles/role2/tasks/main.yml3
-rw-r--r--test/integration/targets/include_import/roles/role3/defaults/main.yml2
-rw-r--r--test/integration/targets/include_import/roles/role3/handlers/main.yml3
-rw-r--r--test/integration/targets/include_import/roles/role3/tasks/main.yml3
-rw-r--r--test/integration/targets/include_import/roles/role3/tasks/tasks.yml2
-rw-r--r--test/integration/targets/include_import/roles/role3/tasks/vartest.yml2
-rw-r--r--test/integration/targets/include_import/roles/role3/vars/main.yml1
-rw-r--r--test/integration/targets/include_import/roles/role3/vars/role3vars.yml2
-rw-r--r--test/integration/targets/include_import/roles/role_with_deps/meta/main.yml3
-rw-r--r--test/integration/targets/include_import/roles/role_with_deps/tasks/main.yml2
-rw-r--r--test/integration/targets/include_import/run_once/include_me.yml2
-rw-r--r--test/integration/targets/include_import/run_once/playbook.yml61
-rwxr-xr-xtest/integration/targets/include_import/runme.sh124
-rw-r--r--test/integration/targets/include_import/tasks/debug_item.yml2
-rw-r--r--test/integration/targets/include_import/tasks/hello/.gitignore1
-rw-r--r--test/integration/targets/include_import/tasks/hello/keep0
-rw-r--r--test/integration/targets/include_import/tasks/nested/nested.yml2
-rw-r--r--test/integration/targets/include_import/tasks/tasks1.yml5
-rw-r--r--test/integration/targets/include_import/tasks/tasks2.yml5
-rw-r--r--test/integration/targets/include_import/tasks/tasks3.yml5
-rw-r--r--test/integration/targets/include_import/tasks/tasks4.yml5
-rw-r--r--test/integration/targets/include_import/tasks/tasks5.yml6
-rw-r--r--test/integration/targets/include_import/tasks/tasks6.yml5
-rw-r--r--test/integration/targets/include_import/tasks/test_allow_single_role_dup.yml8
-rw-r--r--test/integration/targets/include_import/tasks/test_import_tasks.yml41
-rw-r--r--test/integration/targets/include_import/tasks/test_import_tasks_tags.yml23
-rw-r--r--test/integration/targets/include_import/tasks/test_include_dupe_loop.yml8
-rw-r--r--test/integration/targets/include_import/tasks/test_include_tasks.yml44
-rw-r--r--test/integration/targets/include_import/tasks/test_include_tasks_tags.yml25
-rw-r--r--test/integration/targets/include_import/tasks/test_recursion.yml6
-rw-r--r--test/integration/targets/include_import/tasks/validate3.yml4
-rw-r--r--test/integration/targets/include_import/tasks/validate_tags.yml8
-rw-r--r--test/integration/targets/include_import/test_copious_include_tasks.yml44
-rw-r--r--test/integration/targets/include_import/test_copious_include_tasks_fqcn.yml44
-rw-r--r--test/integration/targets/include_import/test_grandparent_inheritance.yml29
-rw-r--r--test/integration/targets/include_import/test_grandparent_inheritance_fqcn.yml29
-rw-r--r--test/integration/targets/include_import/test_include_loop_fqcn.yml17
-rw-r--r--test/integration/targets/include_import/test_loop_var_bleed.yaml9
-rw-r--r--test/integration/targets/include_import/test_nested_tasks.yml6
-rw-r--r--test/integration/targets/include_import/test_nested_tasks_fqcn.yml6
-rw-r--r--test/integration/targets/include_import/test_role_recursion.yml7
-rw-r--r--test/integration/targets/include_import/test_role_recursion_fqcn.yml7
-rw-r--r--test/integration/targets/include_import/undefined_var/include_tasks.yml5
-rw-r--r--test/integration/targets/include_import/undefined_var/include_that_defines_var.yml5
-rw-r--r--test/integration/targets/include_import/undefined_var/playbook.yml36
-rw-r--r--test/integration/targets/include_import/valid_include_keywords/include_me.yml6
-rw-r--r--test/integration/targets/include_import/valid_include_keywords/include_me_listen.yml2
-rw-r--r--test/integration/targets/include_import/valid_include_keywords/include_me_notify.yml2
-rw-r--r--test/integration/targets/include_import/valid_include_keywords/playbook.yml40
-rw-r--r--test/integration/targets/include_parent_role_vars/aliases2
-rw-r--r--test/integration/targets/include_parent_role_vars/tasks/included_by_other_role.yml37
-rw-r--r--test/integration/targets/include_parent_role_vars/tasks/included_by_ourselves.yml14
-rw-r--r--test/integration/targets/include_parent_role_vars/tasks/main.yml21
-rw-r--r--test/integration/targets/include_vars-ad-hoc/aliases1
-rw-r--r--test/integration/targets/include_vars-ad-hoc/dir/inc.yml1
-rwxr-xr-xtest/integration/targets/include_vars-ad-hoc/runme.sh6
-rw-r--r--test/integration/targets/include_vars/aliases1
-rw-r--r--test/integration/targets/include_vars/defaults/main.yml3
-rw-r--r--test/integration/targets/include_vars/tasks/main.yml164
-rw-r--r--test/integration/targets/include_vars/vars/all/all.yml3
-rw-r--r--test/integration/targets/include_vars/vars/environments/development/all.yml3
-rw-r--r--test/integration/targets/include_vars/vars/environments/development/services/webapp.yml4
-rw-r--r--test/integration/targets/include_vars/vars/services/service_vars.yml2
-rw-r--r--test/integration/targets/include_vars/vars/services/service_vars_fqcn.yml3
-rw-r--r--test/integration/targets/include_vars/vars/services/webapp.yml4
-rw-r--r--test/integration/targets/include_vars/vars/webapp/file_without_extension2
-rw-r--r--test/integration/targets/include_when_parent_is_dynamic/aliases2
-rw-r--r--test/integration/targets/include_when_parent_is_dynamic/playbook.yml4
-rwxr-xr-xtest/integration/targets/include_when_parent_is_dynamic/runme.sh13
-rw-r--r--test/integration/targets/include_when_parent_is_dynamic/syntax_error.yml1
-rw-r--r--test/integration/targets/include_when_parent_is_dynamic/tasks.yml12
-rw-r--r--test/integration/targets/include_when_parent_is_static/aliases2
-rw-r--r--test/integration/targets/include_when_parent_is_static/playbook.yml4
-rwxr-xr-xtest/integration/targets/include_when_parent_is_static/runme.sh13
-rw-r--r--test/integration/targets/include_when_parent_is_static/syntax_error.yml1
-rw-r--r--test/integration/targets/include_when_parent_is_static/tasks.yml12
-rw-r--r--test/integration/targets/includes/aliases1
-rw-r--r--test/integration/targets/includes/roles/test_includes/handlers/main.yml1
-rw-r--r--test/integration/targets/includes/roles/test_includes/handlers/more_handlers.yml12
-rw-r--r--test/integration/targets/includes/roles/test_includes/tasks/branch_toplevel.yml9
-rw-r--r--test/integration/targets/includes/roles/test_includes/tasks/empty.yml0
-rw-r--r--test/integration/targets/includes/roles/test_includes/tasks/included_task1.yml9
-rw-r--r--test/integration/targets/includes/roles/test_includes/tasks/leaf_sublevel.yml2
-rw-r--r--test/integration/targets/includes/roles/test_includes/tasks/main.yml106
-rw-r--r--test/integration/targets/includes/roles/test_includes/tasks/not_a_role_task.yml4
-rw-r--r--test/integration/targets/includes/roles/test_includes_free/tasks/inner.yml2
-rw-r--r--test/integration/targets/includes/roles/test_includes_free/tasks/inner_fqcn.yml2
-rw-r--r--test/integration/targets/includes/roles/test_includes_free/tasks/main.yml9
-rw-r--r--test/integration/targets/includes/roles/test_includes_host_pinned/tasks/inner.yml2
-rw-r--r--test/integration/targets/includes/roles/test_includes_host_pinned/tasks/main.yml6
-rwxr-xr-xtest/integration/targets/includes/runme.sh5
-rw-r--r--test/integration/targets/includes/test_include_free.yml10
-rw-r--r--test/integration/targets/includes/test_include_host_pinned.yml9
-rw-r--r--test/integration/targets/includes/test_includes.yml7
-rw-r--r--test/integration/targets/includes/test_includes2.yml22
-rw-r--r--test/integration/targets/includes/test_includes3.yml6
-rw-r--r--test/integration/targets/includes/test_includes4.yml2
-rw-r--r--test/integration/targets/includes_race/aliases2
-rw-r--r--test/integration/targets/includes_race/inventory30
-rw-r--r--test/integration/targets/includes_race/roles/random_sleep/tasks/main.yml8
-rw-r--r--test/integration/targets/includes_race/roles/set_a_fact/tasks/fact1.yml4
-rw-r--r--test/integration/targets/includes_race/roles/set_a_fact/tasks/fact2.yml4
-rwxr-xr-xtest/integration/targets/includes_race/runme.sh5
-rw-r--r--test/integration/targets/includes_race/test_includes_race.yml19
-rw-r--r--test/integration/targets/infra/aliases3
-rw-r--r--test/integration/targets/infra/inventory.local2
-rw-r--r--test/integration/targets/infra/library/test.py21
-rwxr-xr-xtest/integration/targets/infra/runme.sh39
-rw-r--r--test/integration/targets/infra/test_test_infra.yml25
-rw-r--r--test/integration/targets/interpreter_discovery_python/aliases2
-rw-r--r--test/integration/targets/interpreter_discovery_python/library/test_echo_module.py29
-rw-r--r--test/integration/targets/interpreter_discovery_python/tasks/main.yml177
-rw-r--r--test/integration/targets/interpreter_discovery_python_delegate_facts/aliases2
-rw-r--r--test/integration/targets/interpreter_discovery_python_delegate_facts/delegate_facts.yml10
-rw-r--r--test/integration/targets/interpreter_discovery_python_delegate_facts/inventory2
-rwxr-xr-xtest/integration/targets/interpreter_discovery_python_delegate_facts/runme.sh5
-rw-r--r--test/integration/targets/inventory/aliases1
-rw-r--r--test/integration/targets/inventory/playbook.yml4
-rwxr-xr-xtest/integration/targets/inventory/runme.sh36
-rw-r--r--test/integration/targets/inventory/strategy.yml12
-rw-r--r--test/integration/targets/inventory_ini/aliases1
-rw-r--r--test/integration/targets/inventory_ini/inventory.ini5
-rwxr-xr-xtest/integration/targets/inventory_ini/runme.sh5
-rw-r--r--test/integration/targets/inventory_ini/test_ansible_become.yml11
-rw-r--r--test/integration/targets/inventory_script/aliases1
-rw-r--r--test/integration/targets/inventory_script/inventory.json1045
-rwxr-xr-xtest/integration/targets/inventory_script/inventory.sh7
-rwxr-xr-xtest/integration/targets/inventory_script/runme.sh5
-rw-r--r--test/integration/targets/inventory_yaml/aliases2
-rw-r--r--test/integration/targets/inventory_yaml/empty.json10
-rwxr-xr-xtest/integration/targets/inventory_yaml/runme.sh4
-rw-r--r--test/integration/targets/inventory_yaml/success.json61
-rw-r--r--test/integration/targets/inventory_yaml/test.yml27
-rw-r--r--test/integration/targets/jinja2_native_types/aliases1
-rw-r--r--test/integration/targets/jinja2_native_types/nested_undefined.yml24
-rwxr-xr-xtest/integration/targets/jinja2_native_types/runme.sh10
-rw-r--r--test/integration/targets/jinja2_native_types/runtests.yml50
-rw-r--r--test/integration/targets/jinja2_native_types/test_bool.yml53
-rw-r--r--test/integration/targets/jinja2_native_types/test_casting.yml31
-rw-r--r--test/integration/targets/jinja2_native_types/test_concatentation.yml88
-rw-r--r--test/integration/targets/jinja2_native_types/test_dunder.yml23
-rw-r--r--test/integration/targets/jinja2_native_types/test_hostvars.yml10
-rw-r--r--test/integration/targets/jinja2_native_types/test_none.yml11
-rw-r--r--test/integration/targets/jinja2_native_types/test_template.yml27
-rw-r--r--test/integration/targets/jinja2_native_types/test_template_newlines.j24
-rw-r--r--test/integration/targets/jinja2_native_types/test_types.yml20
-rw-r--r--test/integration/targets/jinja2_native_types/test_vault.yml16
-rw-r--r--test/integration/targets/jinja2_native_types/test_vault_pass1
-rw-r--r--test/integration/targets/known_hosts/aliases1
-rw-r--r--test/integration/targets/known_hosts/defaults/main.yml3
-rw-r--r--test/integration/targets/known_hosts/files/existing_known_hosts5
-rw-r--r--test/integration/targets/known_hosts/meta/main.yml2
-rw-r--r--test/integration/targets/known_hosts/tasks/main.yml377
-rw-r--r--test/integration/targets/limit_inventory/aliases1
-rw-r--r--test/integration/targets/limit_inventory/hosts.yml5
-rwxr-xr-xtest/integration/targets/limit_inventory/runme.sh31
-rw-r--r--test/integration/targets/lineinfile/aliases1
-rw-r--r--test/integration/targets/lineinfile/files/firstmatch.txt5
-rw-r--r--test/integration/targets/lineinfile/files/test.conf5
-rw-r--r--test/integration/targets/lineinfile/files/test.txt5
-rw-r--r--test/integration/targets/lineinfile/files/test_58923.txt4
-rw-r--r--test/integration/targets/lineinfile/files/testempty.txt0
-rw-r--r--test/integration/targets/lineinfile/files/testmultiple.txt7
-rw-r--r--test/integration/targets/lineinfile/files/testnoeof.txt2
-rw-r--r--test/integration/targets/lineinfile/meta/main.yml20
-rw-r--r--test/integration/targets/lineinfile/tasks/main.yml1157
-rw-r--r--test/integration/targets/lineinfile/vars/main.yml29
-rw-r--r--test/integration/targets/lookup_config/aliases3
-rw-r--r--test/integration/targets/lookup_config/tasks/main.yml59
-rw-r--r--test/integration/targets/lookup_dict/aliases3
-rw-r--r--test/integration/targets/lookup_dict/tasks/main.yml54
-rw-r--r--test/integration/targets/lookup_env/aliases3
-rw-r--r--test/integration/targets/lookup_env/tasks/main.yml15
-rw-r--r--test/integration/targets/lookup_file/aliases3
-rw-r--r--test/integration/targets/lookup_file/tasks/main.yml13
-rw-r--r--test/integration/targets/lookup_fileglob/aliases1
-rw-r--r--test/integration/targets/lookup_fileglob/find_levels/files/play_adj_subdir.txt1
-rw-r--r--test/integration/targets/lookup_fileglob/find_levels/files/somepath/play_adj_subsubdir.txt1
-rw-r--r--test/integration/targets/lookup_fileglob/find_levels/play.yml13
-rw-r--r--test/integration/targets/lookup_fileglob/find_levels/play_adj.txt1
-rw-r--r--test/integration/targets/lookup_fileglob/find_levels/roles/get_file/files/in_role.txt1
-rw-r--r--test/integration/targets/lookup_fileglob/find_levels/roles/get_file/files/otherpath/in_role_subdir.txt1
-rw-r--r--test/integration/targets/lookup_fileglob/find_levels/roles/get_file/tasks/main.yml10
-rw-r--r--test/integration/targets/lookup_fileglob/non_existent/play.yml6
-rwxr-xr-xtest/integration/targets/lookup_fileglob/runme.sh15
-rw-r--r--test/integration/targets/lookup_first_found/aliases3
-rw-r--r--test/integration/targets/lookup_first_found/files/bar11
-rw-r--r--test/integration/targets/lookup_first_found/files/foo11
-rw-r--r--test/integration/targets/lookup_first_found/tasks/main.yml73
-rw-r--r--test/integration/targets/lookup_indexed_items/aliases3
-rw-r--r--test/integration/targets/lookup_indexed_items/tasks/main.yml16
-rw-r--r--test/integration/targets/lookup_ini/aliases2
-rw-r--r--test/integration/targets/lookup_ini/lookup-8859-15.ini7
-rw-r--r--test/integration/targets/lookup_ini/lookup.ini25
-rw-r--r--test/integration/targets/lookup_ini/lookup.properties6
-rwxr-xr-xtest/integration/targets/lookup_ini/runme.sh5
-rw-r--r--test/integration/targets/lookup_ini/test_lookup_properties.yml71
-rw-r--r--test/integration/targets/lookup_inventory_hostnames/aliases2
-rw-r--r--test/integration/targets/lookup_inventory_hostnames/inventory6
-rw-r--r--test/integration/targets/lookup_inventory_hostnames/main.yml13
-rwxr-xr-xtest/integration/targets/lookup_inventory_hostnames/runme.sh5
-rw-r--r--test/integration/targets/lookup_items/aliases3
-rw-r--r--test/integration/targets/lookup_items/tasks/main.yml14
-rw-r--r--test/integration/targets/lookup_lines/aliases3
-rw-r--r--test/integration/targets/lookup_lines/tasks/main.yml13
-rw-r--r--test/integration/targets/lookup_list/aliases3
-rw-r--r--test/integration/targets/lookup_list/tasks/main.yml19
-rw-r--r--test/integration/targets/lookup_nested/aliases3
-rw-r--r--test/integration/targets/lookup_nested/tasks/main.yml18
-rw-r--r--test/integration/targets/lookup_password/aliases3
-rwxr-xr-xtest/integration/targets/lookup_password/runme.sh11
-rw-r--r--test/integration/targets/lookup_password/runme.yml4
-rw-r--r--test/integration/targets/lookup_password/tasks/main.yml104
-rw-r--r--test/integration/targets/lookup_pipe/aliases3
-rw-r--r--test/integration/targets/lookup_pipe/tasks/main.yml9
-rw-r--r--test/integration/targets/lookup_random_choice/aliases3
-rw-r--r--test/integration/targets/lookup_random_choice/tasks/main.yml10
-rw-r--r--test/integration/targets/lookup_sequence/aliases3
-rw-r--r--test/integration/targets/lookup_sequence/tasks/main.yml63
-rw-r--r--test/integration/targets/lookup_subelements/aliases3
-rw-r--r--test/integration/targets/lookup_subelements/tasks/main.yml45
-rw-r--r--test/integration/targets/lookup_subelements/vars/main.yml43
-rw-r--r--test/integration/targets/lookup_template/aliases3
-rw-r--r--test/integration/targets/lookup_template/tasks/main.yml19
-rw-r--r--test/integration/targets/lookup_template/templates/hello.txt1
-rw-r--r--test/integration/targets/lookup_template/templates/hello_string.txt1
-rw-r--r--test/integration/targets/lookup_template/templates/world.txt1
-rw-r--r--test/integration/targets/lookup_together/aliases3
-rw-r--r--test/integration/targets/lookup_together/tasks/main.yml14
-rw-r--r--test/integration/targets/lookup_unvault/aliases3
-rw-r--r--test/integration/targets/lookup_unvault/files/foot.txt1
-rw-r--r--test/integration/targets/lookup_unvault/files/foot.txt.vault6
-rwxr-xr-xtest/integration/targets/lookup_unvault/runme.sh6
-rw-r--r--test/integration/targets/lookup_unvault/secret1
-rw-r--r--test/integration/targets/lookup_unvault/unvault.yml9
-rw-r--r--test/integration/targets/lookup_url/aliases5
-rw-r--r--test/integration/targets/lookup_url/meta/main.yml2
-rw-r--r--test/integration/targets/lookup_url/tasks/main.yml28
-rw-r--r--test/integration/targets/lookup_vars/aliases3
-rw-r--r--test/integration/targets/lookup_vars/tasks/main.yml16
-rw-r--r--test/integration/targets/loop_control/aliases1
-rw-r--r--test/integration/targets/loop_control/extended.yml12
-rw-r--r--test/integration/targets/loop_control/inner.yml9
-rw-r--r--test/integration/targets/loop_control/label.yml23
-rwxr-xr-xtest/integration/targets/loop_control/runme.sh12
-rw-r--r--test/integration/targets/loops/aliases2
-rw-r--r--test/integration/targets/loops/files/data1.txt1
-rw-r--r--test/integration/targets/loops/files/data2.txt1
-rw-r--r--test/integration/targets/loops/tasks/index_var_tasks.yml3
-rw-r--r--test/integration/targets/loops/tasks/main.yml391
-rw-r--r--test/integration/targets/loops/tasks/templated_loop_var_tasks.yml4
-rw-r--r--test/integration/targets/loops/vars/64169.yml2
-rw-r--r--test/integration/targets/loops/vars/main.yml8
-rw-r--r--test/integration/targets/meta_tasks/aliases1
-rw-r--r--test/integration/targets/meta_tasks/inventory.yml9
-rwxr-xr-xtest/integration/targets/meta_tasks/runme.sh50
-rw-r--r--test/integration/targets/meta_tasks/test_end_host.yml14
-rw-r--r--test/integration/targets/meta_tasks/test_end_host_all.yml13
-rw-r--r--test/integration/targets/meta_tasks/test_end_host_all_fqcn.yml13
-rw-r--r--test/integration/targets/meta_tasks/test_end_host_fqcn.yml14
-rw-r--r--test/integration/targets/meta_tasks/test_end_play.yml12
-rw-r--r--test/integration/targets/meta_tasks/test_end_play_fqcn.yml12
-rw-r--r--test/integration/targets/missing_required_lib/aliases1
-rw-r--r--test/integration/targets/missing_required_lib/library/missing_required_lib.py37
-rwxr-xr-xtest/integration/targets/missing_required_lib/runme.sh5
-rw-r--r--test/integration/targets/missing_required_lib/runme.yml57
-rw-r--r--test/integration/targets/missing_required_lib/tasks/main.yml3
-rw-r--r--test/integration/targets/module_defaults/aliases1
-rw-r--r--test/integration/targets/module_defaults/collections/ansible_collections/testns/othercoll/plugins/action/other_echoaction.py8
-rw-r--r--test/integration/targets/module_defaults/collections/ansible_collections/testns/othercoll/plugins/modules/other_echo1.py13
-rw-r--r--test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/meta/runtime.yml9
-rw-r--r--test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/action/echoaction.py19
-rw-r--r--test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/module_utils/echo_impl.py15
-rw-r--r--test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/modules/echo1.py13
-rw-r--r--test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/modules/echo2.py13
-rw-r--r--test/integration/targets/module_defaults/library/test_module_defaults.py30
-rwxr-xr-xtest/integration/targets/module_defaults/runme.sh5
-rw-r--r--test/integration/targets/module_defaults/tasks/main.yml89
-rw-r--r--test/integration/targets/module_defaults/test_defaults.yml60
-rw-r--r--test/integration/targets/module_no_log/aliases5
-rw-r--r--test/integration/targets/module_no_log/library/module_that_logs.py18
-rw-r--r--test/integration/targets/module_no_log/tasks/main.yml61
-rw-r--r--test/integration/targets/module_precedence/aliases1
-rw-r--r--test/integration/targets/module_precedence/lib_no_extension/ping69
-rw-r--r--test/integration/targets/module_precedence/lib_with_extension/a.ini13
-rw-r--r--test/integration/targets/module_precedence/lib_with_extension/a.py13
-rw-r--r--test/integration/targets/module_precedence/lib_with_extension/ping.ini13
-rw-r--r--test/integration/targets/module_precedence/lib_with_extension/ping.py69
-rw-r--r--test/integration/targets/module_precedence/modules_test.yml10
-rw-r--r--test/integration/targets/module_precedence/modules_test_envvar.yml11
-rw-r--r--test/integration/targets/module_precedence/modules_test_envvar_ext.yml16
-rw-r--r--test/integration/targets/module_precedence/modules_test_multiple_roles.yml17
-rw-r--r--test/integration/targets/module_precedence/modules_test_multiple_roles_reverse_order.yml16
-rw-r--r--test/integration/targets/module_precedence/modules_test_role.yml13
-rw-r--r--test/integration/targets/module_precedence/modules_test_role_ext.yml18
-rw-r--r--test/integration/targets/module_precedence/multiple_roles/bar/library/ping.py69
-rw-r--r--test/integration/targets/module_precedence/multiple_roles/bar/tasks/main.yml10
-rw-r--r--test/integration/targets/module_precedence/multiple_roles/foo/library/ping.py69
-rw-r--r--test/integration/targets/module_precedence/multiple_roles/foo/tasks/main.yml10
-rw-r--r--test/integration/targets/module_precedence/roles_no_extension/foo/library/ping69
-rw-r--r--test/integration/targets/module_precedence/roles_no_extension/foo/tasks/main.yml10
-rw-r--r--test/integration/targets/module_precedence/roles_with_extension/foo/library/a.ini13
-rw-r--r--test/integration/targets/module_precedence/roles_with_extension/foo/library/a.py13
-rw-r--r--test/integration/targets/module_precedence/roles_with_extension/foo/library/ping.ini13
-rw-r--r--test/integration/targets/module_precedence/roles_with_extension/foo/library/ping.py69
-rw-r--r--test/integration/targets/module_precedence/roles_with_extension/foo/tasks/main.yml10
-rwxr-xr-xtest/integration/targets/module_precedence/runme.sh49
-rw-r--r--test/integration/targets/module_tracebacks/aliases3
-rw-r--r--test/integration/targets/module_tracebacks/inventory5
-rwxr-xr-xtest/integration/targets/module_tracebacks/runme.sh5
-rw-r--r--test/integration/targets/module_tracebacks/traceback.yml21
-rw-r--r--test/integration/targets/module_utils/aliases3
-rw-r--r--test/integration/targets/module_utils/library/test.py85
-rw-r--r--test/integration/targets/module_utils/library/test_alias_deprecation.py15
-rw-r--r--test/integration/targets/module_utils/library/test_cwd_missing.py33
-rw-r--r--test/integration/targets/module_utils/library/test_cwd_unreadable.py28
-rw-r--r--test/integration/targets/module_utils/library/test_env_override.py11
-rw-r--r--test/integration/targets/module_utils/library/test_failure.py14
-rw-r--r--test/integration/targets/module_utils/library/test_override.py7
-rw-r--r--test/integration/targets/module_utils/library/test_recursive_diff.py29
-rw-r--r--test/integration/targets/module_utils/module_utils/__init__.py0
-rw-r--r--test/integration/targets/module_utils/module_utils/a/__init__.py0
-rw-r--r--test/integration/targets/module_utils/module_utils/a/b/__init__.py0
-rw-r--r--test/integration/targets/module_utils/module_utils/a/b/c/__init__.py0
-rw-r--r--test/integration/targets/module_utils/module_utils/a/b/c/d/__init__.py0
-rw-r--r--test/integration/targets/module_utils/module_utils/a/b/c/d/e/__init__.py0
-rw-r--r--test/integration/targets/module_utils/module_utils/a/b/c/d/e/f/__init__.py0
-rw-r--r--test/integration/targets/module_utils/module_utils/a/b/c/d/e/f/g/__init__.py0
-rw-r--r--test/integration/targets/module_utils/module_utils/a/b/c/d/e/f/g/h/__init__.py1
-rw-r--r--test/integration/targets/module_utils/module_utils/bar0/__init__.py0
-rw-r--r--test/integration/targets/module_utils/module_utils/bar0/foo.py1
-rw-r--r--test/integration/targets/module_utils/module_utils/bar1/__init__.py1
-rw-r--r--test/integration/targets/module_utils/module_utils/bar2/__init__.py1
-rw-r--r--test/integration/targets/module_utils/module_utils/baz1/__init__.py0
-rw-r--r--test/integration/targets/module_utils/module_utils/baz1/one.py1
-rw-r--r--test/integration/targets/module_utils/module_utils/baz2/__init__.py0
-rw-r--r--test/integration/targets/module_utils/module_utils/baz2/one.py1
-rw-r--r--test/integration/targets/module_utils/module_utils/facts.py1
-rw-r--r--test/integration/targets/module_utils/module_utils/foo.py3
-rw-r--r--test/integration/targets/module_utils/module_utils/foo0.py1
-rw-r--r--test/integration/targets/module_utils/module_utils/foo1.py1
-rw-r--r--test/integration/targets/module_utils/module_utils/foo2.py1
-rw-r--r--test/integration/targets/module_utils/module_utils/qux1/__init__.py0
-rw-r--r--test/integration/targets/module_utils/module_utils/qux1/quux.py1
-rw-r--r--test/integration/targets/module_utils/module_utils/qux2/__init__.py0
-rw-r--r--test/integration/targets/module_utils/module_utils/qux2/quux.py1
-rw-r--r--test/integration/targets/module_utils/module_utils/qux2/quuz.py1
-rw-r--r--test/integration/targets/module_utils/module_utils/service.py1
-rw-r--r--test/integration/targets/module_utils/module_utils/spam1/__init__.py0
-rw-r--r--test/integration/targets/module_utils/module_utils/spam1/ham/__init__.py0
-rw-r--r--test/integration/targets/module_utils/module_utils/spam1/ham/eggs/__init__.py1
-rw-r--r--test/integration/targets/module_utils/module_utils/spam2/__init__.py0
-rw-r--r--test/integration/targets/module_utils/module_utils/spam2/ham/__init__.py0
-rw-r--r--test/integration/targets/module_utils/module_utils/spam2/ham/eggs/__init__.py1
-rw-r--r--test/integration/targets/module_utils/module_utils/spam3/__init__.py0
-rw-r--r--test/integration/targets/module_utils/module_utils/spam3/ham/__init__.py0
-rw-r--r--test/integration/targets/module_utils/module_utils/spam3/ham/bacon.py1
-rw-r--r--test/integration/targets/module_utils/module_utils/spam4/__init__.py0
-rw-r--r--test/integration/targets/module_utils/module_utils/spam4/ham/__init__.py0
-rw-r--r--test/integration/targets/module_utils/module_utils/spam4/ham/bacon.py1
-rw-r--r--test/integration/targets/module_utils/module_utils/spam5/__init__.py0
-rw-r--r--test/integration/targets/module_utils/module_utils/spam5/ham/__init__.py0
-rw-r--r--test/integration/targets/module_utils/module_utils/spam5/ham/bacon.py1
-rw-r--r--test/integration/targets/module_utils/module_utils/spam5/ham/eggs.py1
-rw-r--r--test/integration/targets/module_utils/module_utils/spam6/__init__.py0
-rw-r--r--test/integration/targets/module_utils/module_utils/spam6/ham/__init__.py2
-rw-r--r--test/integration/targets/module_utils/module_utils/spam7/__init__.py0
-rw-r--r--test/integration/targets/module_utils/module_utils/spam7/ham/__init__.py1
-rw-r--r--test/integration/targets/module_utils/module_utils/spam7/ham/bacon.py1
-rw-r--r--test/integration/targets/module_utils/module_utils/spam8/__init__.py0
-rw-r--r--test/integration/targets/module_utils/module_utils/spam8/ham/__init__.py1
-rw-r--r--test/integration/targets/module_utils/module_utils/spam8/ham/bacon.py1
-rw-r--r--test/integration/targets/module_utils/module_utils/sub/__init__.py0
-rw-r--r--test/integration/targets/module_utils/module_utils/sub/bam.py3
-rw-r--r--test/integration/targets/module_utils/module_utils/sub/bam/__init__.py0
-rw-r--r--test/integration/targets/module_utils/module_utils/sub/bam/bam.py3
-rw-r--r--test/integration/targets/module_utils/module_utils/sub/bar/__init__.py0
-rw-r--r--test/integration/targets/module_utils/module_utils/sub/bar/bam.py3
-rw-r--r--test/integration/targets/module_utils/module_utils/sub/bar/bar.py3
-rw-r--r--test/integration/targets/module_utils/module_utils/yak/__init__.py0
-rw-r--r--test/integration/targets/module_utils/module_utils/yak/zebra/__init__.py0
-rw-r--r--test/integration/targets/module_utils/module_utils/yak/zebra/foo.py1
-rw-r--r--test/integration/targets/module_utils/module_utils_basic_setcwd.yml22
-rw-r--r--test/integration/targets/module_utils/module_utils_common_dict_transformation.yml34
-rw-r--r--test/integration/targets/module_utils/module_utils_envvar.yml51
-rw-r--r--test/integration/targets/module_utils/module_utils_test.yml62
-rw-r--r--test/integration/targets/module_utils/other_mu_dir/__init__.py0
-rw-r--r--test/integration/targets/module_utils/other_mu_dir/a/__init__.py0
-rw-r--r--test/integration/targets/module_utils/other_mu_dir/a/b/__init__.py0
-rw-r--r--test/integration/targets/module_utils/other_mu_dir/a/b/c/__init__.py0
-rw-r--r--test/integration/targets/module_utils/other_mu_dir/a/b/c/d/__init__.py0
-rw-r--r--test/integration/targets/module_utils/other_mu_dir/a/b/c/d/e/__init__.py0
-rw-r--r--test/integration/targets/module_utils/other_mu_dir/a/b/c/d/e/f/__init__.py0
-rw-r--r--test/integration/targets/module_utils/other_mu_dir/a/b/c/d/e/f/g/__init__.py0
-rw-r--r--test/integration/targets/module_utils/other_mu_dir/a/b/c/d/e/f/g/h/__init__.py1
-rw-r--r--test/integration/targets/module_utils/other_mu_dir/facts.py1
-rw-r--r--test/integration/targets/module_utils/other_mu_dir/json_utils.py1
-rw-r--r--test/integration/targets/module_utils/other_mu_dir/mork.py1
-rwxr-xr-xtest/integration/targets/module_utils/runme.sh10
-rw-r--r--test/integration/targets/module_utils_Ansible.AccessToken/aliases3
-rw-r--r--test/integration/targets/module_utils_Ansible.AccessToken/library/ansible_access_token_tests.ps1378
-rw-r--r--test/integration/targets/module_utils_Ansible.AccessToken/tasks/main.yml29
-rw-r--r--test/integration/targets/module_utils_Ansible.Basic/aliases3
-rw-r--r--test/integration/targets/module_utils_Ansible.Basic/library/ansible_basic_tests.ps13098
-rw-r--r--test/integration/targets/module_utils_Ansible.Basic/tasks/main.yml9
-rw-r--r--test/integration/targets/module_utils_Ansible.Become/aliases3
-rw-r--r--test/integration/targets/module_utils_Ansible.Become/library/ansible_become_tests.ps11009
-rw-r--r--test/integration/targets/module_utils_Ansible.Become/tasks/main.yml28
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.AddType/aliases3
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.AddType/library/add_type_test.ps1299
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.AddType/tasks/main.yml10
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/aliases3
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/library/argv_parser_test.ps193
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/meta/main.yml3
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/tasks/main.yml9
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.Backup/aliases3
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.Backup/library/backup_file_test.ps189
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.Backup/tasks/main.yml10
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.CamelConversion/aliases3
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.CamelConversion/library/camel_conversion_test.ps174
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.CamelConversion/tasks/main.yml8
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/aliases3
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/library/command_util_test.ps1135
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/meta/main.yml3
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/tasks/main.yml9
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.FileUtil/aliases3
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.FileUtil/library/file_util_test.ps1108
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.FileUtil/tasks/main.yml8
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/aliases3
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/library/testlist.ps112
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/library/testpath.ps19
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/tasks/main.yml41
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.LinkUtil/aliases3
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.LinkUtil/library/symbolic_link_test.ps1170
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.LinkUtil/tasks/main.yml8
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.PrivilegeUtil/aliases3
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.PrivilegeUtil/library/privilege_util_test.ps1112
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.PrivilegeUtil/tasks/main.yml8
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.SID/aliases3
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.SID/library/sid_utils_test.ps193
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.SID/tasks/main.yml22
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/aliases4
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/library/web_request_test.ps1467
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/meta/main.yml3
-rw-r--r--test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/tasks/main.yml10
-rw-r--r--test/integration/targets/module_utils_Ansible.Privilege/aliases3
-rw-r--r--test/integration/targets/module_utils_Ansible.Privilege/library/ansible_privilege_tests.ps1324
-rw-r--r--test/integration/targets/module_utils_Ansible.Privilege/tasks/main.yml9
-rw-r--r--test/integration/targets/module_utils_Ansible.Process/aliases3
-rw-r--r--test/integration/targets/module_utils_Ansible.Process/library/ansible_process_tests.ps1236
-rw-r--r--test/integration/targets/module_utils_Ansible.Process/tasks/main.yml9
-rw-r--r--test/integration/targets/module_utils_Ansible.Service/aliases3
-rw-r--r--test/integration/targets/module_utils_Ansible.Service/library/ansible_service_tests.ps1937
-rw-r--r--test/integration/targets/module_utils_Ansible.Service/tasks/main.yml9
-rw-r--r--test/integration/targets/network_cli/aliases3
-rw-r--r--test/integration/targets/network_cli/passworded_user.yml14
-rwxr-xr-xtest/integration/targets/network_cli/runme.sh27
-rw-r--r--test/integration/targets/network_cli/setup.yml14
-rw-r--r--test/integration/targets/network_cli/teardown.yml14
-rw-r--r--test/integration/targets/no_log/aliases1
-rw-r--r--test/integration/targets/no_log/dynamic.yml27
-rw-r--r--test/integration/targets/no_log/library/module.py45
-rw-r--r--test/integration/targets/no_log/no_log_local.yml92
-rw-r--r--test/integration/targets/no_log/no_log_suboptions.yml24
-rw-r--r--test/integration/targets/no_log/no_log_suboptions_invalid.yml45
-rwxr-xr-xtest/integration/targets/no_log/runme.sh21
-rw-r--r--test/integration/targets/old_style_cache_plugins/aliases4
-rw-r--r--test/integration/targets/old_style_cache_plugins/inventory_config1
-rw-r--r--test/integration/targets/old_style_cache_plugins/plugins/cache/redis.py141
-rw-r--r--test/integration/targets/old_style_cache_plugins/plugins/inventory/test.py59
-rwxr-xr-xtest/integration/targets/old_style_cache_plugins/runme.sh80
-rw-r--r--test/integration/targets/old_style_cache_plugins/test_fact_gathering.yml6
-rw-r--r--test/integration/targets/old_style_modules_posix/aliases1
-rw-r--r--test/integration/targets/old_style_modules_posix/library/helloworld.sh29
-rw-r--r--test/integration/targets/old_style_modules_posix/meta/main.yml2
-rw-r--r--test/integration/targets/old_style_modules_posix/tasks/main.yml44
-rw-r--r--test/integration/targets/omit/48673.yml4
-rw-r--r--test/integration/targets/omit/aliases1
-rwxr-xr-xtest/integration/targets/omit/runme.sh5
-rw-r--r--test/integration/targets/order/aliases1
-rw-r--r--test/integration/targets/order/inventory9
-rw-r--r--test/integration/targets/order/order.yml39
-rwxr-xr-xtest/integration/targets/order/runme.sh24
-rw-r--r--test/integration/targets/package/aliases3
-rw-r--r--test/integration/targets/package/meta/main.yml2
-rw-r--r--test/integration/targets/package/tasks/main.yml114
-rw-r--r--test/integration/targets/package_facts/aliases4
-rw-r--r--test/integration/targets/package_facts/tasks/main.yml115
-rw-r--r--test/integration/targets/parsing/aliases1
-rw-r--r--test/integration/targets/parsing/bad_parsing.yml12
-rw-r--r--test/integration/targets/parsing/good_parsing.yml9
-rw-r--r--test/integration/targets/parsing/roles/test_bad_parsing/tasks/main.yml60
-rw-r--r--test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario1.yml4
-rw-r--r--test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario2.yml4
-rw-r--r--test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario3.yml4
-rw-r--r--test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario4.yml4
-rw-r--r--test/integration/targets/parsing/roles/test_bad_parsing/vars/main.yml2
-rw-r--r--test/integration/targets/parsing/roles/test_good_parsing/tasks/main.yml204
-rw-r--r--test/integration/targets/parsing/roles/test_good_parsing/tasks/test_include.yml1
-rw-r--r--test/integration/targets/parsing/roles/test_good_parsing/tasks/test_include_conditional.yml1
-rw-r--r--test/integration/targets/parsing/roles/test_good_parsing/tasks/test_include_nested.yml2
-rw-r--r--test/integration/targets/parsing/roles/test_good_parsing/vars/main.yml2
-rwxr-xr-xtest/integration/targets/parsing/runme.sh6
-rw-r--r--test/integration/targets/path_lookups/aliases1
-rw-r--r--test/integration/targets/path_lookups/play.yml49
-rw-r--r--test/integration/targets/path_lookups/roles/showfile/tasks/main.yml2
-rwxr-xr-xtest/integration/targets/path_lookups/runme.sh5
-rw-r--r--test/integration/targets/path_lookups/testplay.yml20
-rw-r--r--test/integration/targets/path_with_comma_in_inventory/aliases1
-rw-r--r--test/integration/targets/path_with_comma_in_inventory/playbook.yml9
-rwxr-xr-xtest/integration/targets/path_with_comma_in_inventory/runme.sh5
-rw-r--r--test/integration/targets/path_with_comma_in_inventory/this,path,has,commas/group_vars/all.yml1
-rw-r--r--test/integration/targets/path_with_comma_in_inventory/this,path,has,commas/hosts1
-rw-r--r--test/integration/targets/pause/aliases3
-rw-r--r--test/integration/targets/pause/pause-1.yml11
-rw-r--r--test/integration/targets/pause/pause-2.yml12
-rw-r--r--test/integration/targets/pause/pause-3.yml12
-rw-r--r--test/integration/targets/pause/pause-4.yml13
-rw-r--r--test/integration/targets/pause/pause-5.yml35
-rwxr-xr-xtest/integration/targets/pause/runme.sh30
-rw-r--r--test/integration/targets/pause/setup.yml4
-rw-r--r--test/integration/targets/pause/test-pause-background.yml10
-rw-r--r--test/integration/targets/pause/test-pause-no-tty.yml7
-rwxr-xr-xtest/integration/targets/pause/test-pause.py273
-rw-r--r--test/integration/targets/pause/test-pause.yml51
-rw-r--r--test/integration/targets/ping/aliases1
-rw-r--r--test/integration/targets/ping/tasks/main.yml53
-rw-r--r--test/integration/targets/pip/aliases3
-rw-r--r--test/integration/targets/pip/files/ansible_test_pip_chdir/__init__.py2
-rwxr-xr-xtest/integration/targets/pip/files/setup.py14
-rw-r--r--test/integration/targets/pip/meta/main.yml2
-rw-r--r--test/integration/targets/pip/tasks/default_cleanup.yml5
-rw-r--r--test/integration/targets/pip/tasks/freebsd_cleanup.yml6
-rw-r--r--test/integration/targets/pip/tasks/main.yml43
-rw-r--r--test/integration/targets/pip/tasks/pip.yml580
-rw-r--r--test/integration/targets/pip/vars/main.yml13
-rw-r--r--test/integration/targets/play_iterator/aliases1
-rw-r--r--test/integration/targets/play_iterator/playbook.yml10
-rwxr-xr-xtest/integration/targets/play_iterator/runme.sh5
-rw-r--r--test/integration/targets/playbook/aliases1
-rwxr-xr-xtest/integration/targets/playbook/runme.sh9
-rw-r--r--test/integration/targets/playbook/timeout.yml12
-rw-r--r--test/integration/targets/playbook/types.yml21
-rw-r--r--test/integration/targets/plugin_config_for_inventory/aliases1
-rw-r--r--test/integration/targets/plugin_config_for_inventory/config_with_parameter.yml3
-rw-r--r--test/integration/targets/plugin_config_for_inventory/config_without_parameter.yml1
-rwxr-xr-xtest/integration/targets/plugin_config_for_inventory/runme.sh16
-rw-r--r--test/integration/targets/plugin_config_for_inventory/test_inventory.py52
-rw-r--r--test/integration/targets/plugin_filtering/aliases1
-rw-r--r--test/integration/targets/plugin_filtering/copy.yml10
-rw-r--r--test/integration/targets/plugin_filtering/filter_lookup.ini4
-rw-r--r--test/integration/targets/plugin_filtering/filter_lookup.yml6
-rw-r--r--test/integration/targets/plugin_filtering/filter_modules.ini4
-rw-r--r--test/integration/targets/plugin_filtering/filter_modules.yml9
-rw-r--r--test/integration/targets/plugin_filtering/filter_ping.ini4
-rw-r--r--test/integration/targets/plugin_filtering/filter_ping.yml5
-rw-r--r--test/integration/targets/plugin_filtering/filter_stat.ini4
-rw-r--r--test/integration/targets/plugin_filtering/filter_stat.yml5
-rw-r--r--test/integration/targets/plugin_filtering/lookup.yml14
-rw-r--r--test/integration/targets/plugin_filtering/no_blacklist_module.ini3
-rw-r--r--test/integration/targets/plugin_filtering/no_blacklist_module.yml3
-rw-r--r--test/integration/targets/plugin_filtering/no_filters.ini4
-rw-r--r--test/integration/targets/plugin_filtering/pause.yml6
-rw-r--r--test/integration/targets/plugin_filtering/ping.yml6
-rwxr-xr-xtest/integration/targets/plugin_filtering/runme.sh137
-rw-r--r--test/integration/targets/plugin_filtering/stat.yml6
-rw-r--r--test/integration/targets/plugin_filtering/tempfile.yml9
-rw-r--r--test/integration/targets/plugin_loader/aliases1
-rw-r--r--test/integration/targets/plugin_loader/normal/filters.yml13
l---------test/integration/targets/plugin_loader/normal/library/_symlink.py1
-rw-r--r--test/integration/targets/plugin_loader/normal/library/_underscore.py13
-rw-r--r--test/integration/targets/plugin_loader/normal/underscore.yml15
-rw-r--r--test/integration/targets/plugin_loader/override/filter_plugins/core.py18
-rw-r--r--test/integration/targets/plugin_loader/override/filters.yml15
-rwxr-xr-xtest/integration/targets/plugin_loader/runme.sh24
-rw-r--r--test/integration/targets/plugin_namespace/aliases1
-rw-r--r--test/integration/targets/plugin_namespace/filter_plugins/test_filter.py15
-rw-r--r--test/integration/targets/plugin_namespace/lookup_plugins/lookup_name.py9
-rw-r--r--test/integration/targets/plugin_namespace/tasks/main.yml11
-rw-r--r--test/integration/targets/plugin_namespace/test_plugins/test_test.py16
-rw-r--r--test/integration/targets/prepare_http_tests/defaults/main.yml4
-rw-r--r--test/integration/targets/prepare_http_tests/meta/main.yml2
-rw-r--r--test/integration/targets/prepare_http_tests/tasks/default.yml64
-rw-r--r--test/integration/targets/prepare_http_tests/tasks/main.yml24
-rw-r--r--test/integration/targets/prepare_http_tests/tasks/windows.yml33
-rw-r--r--test/integration/targets/prepare_http_tests/vars/httptester.yml5
-rw-r--r--test/integration/targets/prepare_tests/tasks/main.yml0
-rw-r--r--test/integration/targets/pull/aliases2
-rw-r--r--test/integration/targets/pull/cleanup.yml16
-rw-r--r--test/integration/targets/pull/pull-integration-test/ansible.cfg2
-rw-r--r--test/integration/targets/pull/pull-integration-test/inventory2
-rw-r--r--test/integration/targets/pull/pull-integration-test/local.yml20
-rwxr-xr-xtest/integration/targets/pull/runme.sh69
-rw-r--r--test/integration/targets/pull/setup.yml11
-rw-r--r--test/integration/targets/raw/aliases1
-rw-r--r--test/integration/targets/raw/meta/main.yml2
-rwxr-xr-xtest/integration/targets/raw/runme.sh6
-rw-r--r--test/integration/targets/raw/runme.yml4
-rw-r--r--test/integration/targets/raw/tasks/main.yml107
-rw-r--r--test/integration/targets/reboot/aliases2
-rw-r--r--test/integration/targets/reboot/tasks/check_reboot.yml10
-rw-r--r--test/integration/targets/reboot/tasks/get_boot_time.yml3
-rw-r--r--test/integration/targets/reboot/tasks/main.yml111
-rw-r--r--test/integration/targets/reboot/vars/main.yml9
-rw-r--r--test/integration/targets/rel_plugin_loading/aliases1
-rw-r--r--test/integration/targets/rel_plugin_loading/notyaml.yml5
-rwxr-xr-xtest/integration/targets/rel_plugin_loading/runme.sh5
-rw-r--r--test/integration/targets/rel_plugin_loading/subdir/inventory_plugins/notyaml.py168
-rw-r--r--test/integration/targets/rel_plugin_loading/subdir/play.yml6
-rw-r--r--test/integration/targets/remote_tmp/aliases2
-rw-r--r--test/integration/targets/remote_tmp/playbook.yml57
-rwxr-xr-xtest/integration/targets/remote_tmp/runme.sh5
-rw-r--r--test/integration/targets/replace/aliases1
-rw-r--r--test/integration/targets/replace/meta/main.yml2
-rw-r--r--test/integration/targets/replace/tasks/main.yml265
-rw-r--r--test/integration/targets/retry_task_name_in_callback/aliases1
-rwxr-xr-xtest/integration/targets/retry_task_name_in_callback/runme.sh13
-rw-r--r--test/integration/targets/retry_task_name_in_callback/test.yml28
-rw-r--r--test/integration/targets/roles/aliases1
-rw-r--r--test/integration/targets/roles/allowed_dupes.yml18
-rw-r--r--test/integration/targets/roles/no_dupes.yml19
-rw-r--r--test/integration/targets/roles/roles/a/tasks/main.yml1
-rw-r--r--test/integration/targets/roles/roles/b/meta/main.yml2
-rw-r--r--test/integration/targets/roles/roles/b/tasks/main.yml1
-rw-r--r--test/integration/targets/roles/roles/c/meta/main.yml2
-rw-r--r--test/integration/targets/roles/roles/c/tasks/main.yml1
-rwxr-xr-xtest/integration/targets/roles/runme.sh14
-rw-r--r--test/integration/targets/rpm_key/aliases3
-rw-r--r--test/integration/targets/rpm_key/defaults/main.yaml0
-rw-r--r--test/integration/targets/rpm_key/tasks/main.yaml2
-rw-r--r--test/integration/targets/rpm_key/tasks/rpm_key.yaml195
-rw-r--r--test/integration/targets/run_modules/aliases1
-rw-r--r--test/integration/targets/run_modules/args.json1
-rw-r--r--test/integration/targets/run_modules/library/test.py7
-rwxr-xr-xtest/integration/targets/run_modules/runme.sh6
-rw-r--r--test/integration/targets/script/aliases1
-rwxr-xr-xtest/integration/targets/script/files/create_afile.sh3
-rw-r--r--test/integration/targets/script/files/no_shebang.py3
-rwxr-xr-xtest/integration/targets/script/files/remove_afile.sh3
-rwxr-xr-xtest/integration/targets/script/files/space path/test.sh3
-rwxr-xr-xtest/integration/targets/script/files/test.sh3
-rwxr-xr-xtest/integration/targets/script/files/test_with_args.sh5
-rw-r--r--test/integration/targets/script/meta/main.yml2
-rw-r--r--test/integration/targets/script/tasks/main.yml240
-rw-r--r--test/integration/targets/service/aliases5
-rw-r--r--test/integration/targets/service/files/ansible-broken.upstart10
-rw-r--r--test/integration/targets/service/files/ansible.rc16
-rw-r--r--test/integration/targets/service/files/ansible.systemd11
-rwxr-xr-xtest/integration/targets/service/files/ansible.sysv134
-rw-r--r--test/integration/targets/service/files/ansible.upstart9
-rw-r--r--test/integration/targets/service/files/ansible_test_service.py71
-rw-r--r--test/integration/targets/service/meta/main.yml20
-rw-r--r--test/integration/targets/service/tasks/main.yml58
-rw-r--r--test/integration/targets/service/tasks/rc_cleanup.yml9
-rw-r--r--test/integration/targets/service/tasks/rc_setup.yml21
-rw-r--r--test/integration/targets/service/tasks/systemd_cleanup.yml25
-rw-r--r--test/integration/targets/service/tasks/systemd_setup.yml17
-rw-r--r--test/integration/targets/service/tasks/sysv_cleanup.yml9
-rw-r--r--test/integration/targets/service/tasks/sysv_setup.yml11
-rw-r--r--test/integration/targets/service/tasks/tests.yml225
-rw-r--r--test/integration/targets/service/tasks/upstart_cleanup.yml17
-rw-r--r--test/integration/targets/service/tasks/upstart_setup.yml19
-rw-r--r--test/integration/targets/service/templates/main.yml0
-rw-r--r--test/integration/targets/service_facts/aliases5
-rw-r--r--test/integration/targets/service_facts/files/ansible.systemd11
-rw-r--r--test/integration/targets/service_facts/files/ansible_test_service.py73
-rw-r--r--test/integration/targets/service_facts/tasks/main.yml25
-rw-r--r--test/integration/targets/service_facts/tasks/systemd_cleanup.yml32
-rw-r--r--test/integration/targets/service_facts/tasks/systemd_setup.yml26
-rw-r--r--test/integration/targets/service_facts/tasks/tests.yml36
-rw-r--r--test/integration/targets/set_fact/aliases2
-rw-r--r--test/integration/targets/set_fact/incremental.yml35
-rw-r--r--test/integration/targets/set_fact/inventory3
-rw-r--r--test/integration/targets/set_fact/nowarn_clean_facts.yml10
-rwxr-xr-xtest/integration/targets/set_fact/runme.sh30
-rw-r--r--test/integration/targets/set_fact/set_fact_bool_conv.yml35
-rw-r--r--test/integration/targets/set_fact/set_fact_bool_conv_jinja2_native.yml35
-rw-r--r--test/integration/targets/set_fact/set_fact_cached_1.yml324
-rw-r--r--test/integration/targets/set_fact/set_fact_cached_2.yml57
-rw-r--r--test/integration/targets/set_fact/set_fact_no_cache.yml39
-rw-r--r--test/integration/targets/setup_cron/defaults/main.yml1
-rw-r--r--test/integration/targets/setup_cron/tasks/main.yml70
-rw-r--r--test/integration/targets/setup_cron/vars/debian.yml3
-rw-r--r--test/integration/targets/setup_cron/vars/default.yml0
-rw-r--r--test/integration/targets/setup_cron/vars/fedora.yml3
-rw-r--r--test/integration/targets/setup_cron/vars/freebsd.yml3
-rw-r--r--test/integration/targets/setup_cron/vars/redhat.yml4
-rw-r--r--test/integration/targets/setup_cron/vars/suse.yml3
-rw-r--r--test/integration/targets/setup_deb_repo/files/package_specs/foo-1.0.010
-rw-r--r--test/integration/targets/setup_deb_repo/files/package_specs/foo-1.0.110
-rw-r--r--test/integration/targets/setup_deb_repo/files/package_specs/foobar-1.0.011
-rw-r--r--test/integration/targets/setup_deb_repo/files/package_specs/foobar-1.0.110
-rw-r--r--test/integration/targets/setup_deb_repo/meta/main.yml2
-rw-r--r--test/integration/targets/setup_deb_repo/tasks/main.yml56
-rw-r--r--test/integration/targets/setup_epel/tasks/main.yml5
-rw-r--r--test/integration/targets/setup_gnutar/handlers/main.yml6
-rw-r--r--test/integration/targets/setup_gnutar/tasks/main.yml18
-rw-r--r--test/integration/targets/setup_nobody/handlers/main.yml5
-rw-r--r--test/integration/targets/setup_nobody/tasks/main.yml7
-rw-r--r--test/integration/targets/setup_paramiko/install-CentOS-6-python-2.yml3
-rw-r--r--test/integration/targets/setup_paramiko/install-FreeBSD-11-python-2.yml3
-rw-r--r--test/integration/targets/setup_paramiko/install-FreeBSD-11-python-3.yml9
-rw-r--r--test/integration/targets/setup_paramiko/install-FreeBSD-12-python-2.yml3
-rw-r--r--test/integration/targets/setup_paramiko/install-FreeBSD-12-python-3.yml3
-rw-r--r--test/integration/targets/setup_paramiko/install-MacOSX-10-python-3.yml6
-rw-r--r--test/integration/targets/setup_paramiko/install-RedHat-8-python-3.yml3
-rw-r--r--test/integration/targets/setup_paramiko/install-Ubuntu-16-python-2.yml3
-rw-r--r--test/integration/targets/setup_paramiko/install-fail.yml7
-rw-r--r--test/integration/targets/setup_paramiko/install-python-2.yml3
-rw-r--r--test/integration/targets/setup_paramiko/install-python-3.yml3
-rw-r--r--test/integration/targets/setup_paramiko/install.yml17
-rw-r--r--test/integration/targets/setup_paramiko/inventory1
-rw-r--r--test/integration/targets/setup_paramiko/library/detect_paramiko.py31
-rw-r--r--test/integration/targets/setup_paramiko/setup.sh8
-rw-r--r--test/integration/targets/setup_paramiko/uninstall-FreeBSD-11-python-2.yml4
-rw-r--r--test/integration/targets/setup_paramiko/uninstall-FreeBSD-11-python-3.yml4
-rw-r--r--test/integration/targets/setup_paramiko/uninstall-FreeBSD-12-python-2.yml4
-rw-r--r--test/integration/targets/setup_paramiko/uninstall-FreeBSD-12-python-3.yml4
-rw-r--r--test/integration/targets/setup_paramiko/uninstall-MacOSX-10-python-3.yml4
-rw-r--r--test/integration/targets/setup_paramiko/uninstall-RedHat-8-python-3.yml4
-rw-r--r--test/integration/targets/setup_paramiko/uninstall-apt-python-2.yml5
-rw-r--r--test/integration/targets/setup_paramiko/uninstall-apt-python-3.yml5
-rw-r--r--test/integration/targets/setup_paramiko/uninstall-dnf.yml4
-rw-r--r--test/integration/targets/setup_paramiko/uninstall-fail.yml7
-rw-r--r--test/integration/targets/setup_paramiko/uninstall-yum.yml4
-rw-r--r--test/integration/targets/setup_paramiko/uninstall-zypper-python-2.yml4
-rw-r--r--test/integration/targets/setup_paramiko/uninstall-zypper-python-3.yml4
-rw-r--r--test/integration/targets/setup_paramiko/uninstall.yml19
-rw-r--r--test/integration/targets/setup_passlib/tasks/main.yml4
-rw-r--r--test/integration/targets/setup_pexpect/tasks/main.yml4
-rw-r--r--test/integration/targets/setup_remote_constraints/aliases1
-rw-r--r--test/integration/targets/setup_remote_constraints/meta/main.yml2
-rw-r--r--test/integration/targets/setup_remote_constraints/tasks/main.yml8
-rw-r--r--test/integration/targets/setup_remote_tmp_dir/handlers/main.yml5
-rw-r--r--test/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml5
-rw-r--r--test/integration/targets/setup_remote_tmp_dir/tasks/default.yml11
-rw-r--r--test/integration/targets/setup_remote_tmp_dir/tasks/main.yml10
-rw-r--r--test/integration/targets/setup_remote_tmp_dir/tasks/windows-cleanup.yml4
-rw-r--r--test/integration/targets/setup_remote_tmp_dir/tasks/windows.yml11
-rw-r--r--test/integration/targets/setup_rpm_repo/aliases1
-rw-r--r--test/integration/targets/setup_rpm_repo/defaults/main.yml0
-rw-r--r--test/integration/targets/setup_rpm_repo/files/comps.xml36
-rw-r--r--test/integration/targets/setup_rpm_repo/files/create-repo.py69
-rw-r--r--test/integration/targets/setup_rpm_repo/tasks/main.yml97
-rw-r--r--test/integration/targets/setup_rpm_repo/vars/Fedora.yml3
-rw-r--r--test/integration/targets/setup_rpm_repo/vars/RedHat-6.yml4
-rw-r--r--test/integration/targets/setup_rpm_repo/vars/RedHat-7.yml4
-rw-r--r--test/integration/targets/setup_rpm_repo/vars/RedHat-8.yml4
-rw-r--r--test/integration/targets/setup_rpm_repo/vars/main.yml1
-rw-r--r--test/integration/targets/setup_win_printargv/files/PrintArgv.cs13
-rw-r--r--test/integration/targets/setup_win_printargv/meta/main.yml3
-rw-r--r--test/integration/targets/setup_win_printargv/tasks/main.yml9
-rw-r--r--test/integration/targets/shell/action_plugins/test_shell.py19
-rw-r--r--test/integration/targets/shell/aliases1
-rw-r--r--test/integration/targets/shell/connection_plugins/test_connection_default.py44
-rw-r--r--test/integration/targets/shell/connection_plugins/test_connection_override.py45
-rw-r--r--test/integration/targets/shell/tasks/main.yml36
-rw-r--r--test/integration/targets/slurp/aliases1
-rw-r--r--test/integration/targets/slurp/files/bar.binbin0 -> 256 bytes
-rw-r--r--test/integration/targets/slurp/tasks/main.yml98
-rw-r--r--test/integration/targets/special_vars/aliases2
-rw-r--r--test/integration/targets/special_vars/meta/main.yml2
-rw-r--r--test/integration/targets/special_vars/tasks/main.yml100
-rw-r--r--test/integration/targets/special_vars/templates/foo.j27
-rw-r--r--test/integration/targets/special_vars/vars/main.yml0
-rw-r--r--test/integration/targets/stat/aliases1
-rw-r--r--test/integration/targets/stat/files/foo.txt1
-rw-r--r--test/integration/targets/stat/meta/main.yml2
-rw-r--r--test/integration/targets/stat/tasks/main.yml157
-rw-r--r--test/integration/targets/strategy_linear/aliases1
-rw-r--r--test/integration/targets/strategy_linear/inventory3
-rw-r--r--test/integration/targets/strategy_linear/roles/role1/tasks/main.yml6
-rw-r--r--test/integration/targets/strategy_linear/roles/role1/tasks/tasks.yml7
-rw-r--r--test/integration/targets/strategy_linear/roles/role2/tasks/main.yml7
-rwxr-xr-xtest/integration/targets/strategy_linear/runme.sh5
-rw-r--r--test/integration/targets/strategy_linear/test_include_file_noop.yml16
-rw-r--r--test/integration/targets/subversion/aliases7
-rw-r--r--test/integration/targets/subversion/roles/subversion/defaults/main.yml10
-rw-r--r--test/integration/targets/subversion/roles/subversion/files/create_repo.sh6
-rw-r--r--test/integration/targets/subversion/roles/subversion/tasks/cleanup.yml8
-rw-r--r--test/integration/targets/subversion/roles/subversion/tasks/main.yml20
-rw-r--r--test/integration/targets/subversion/roles/subversion/tasks/setup.yml63
-rw-r--r--test/integration/targets/subversion/roles/subversion/tasks/setup_selinux.yml11
-rw-r--r--test/integration/targets/subversion/roles/subversion/tasks/tests.yml133
-rw-r--r--test/integration/targets/subversion/roles/subversion/tasks/warnings.yml7
-rw-r--r--test/integration/targets/subversion/roles/subversion/templates/subversion.conf.j267
-rwxr-xr-xtest/integration/targets/subversion/runme.sh32
-rw-r--r--test/integration/targets/subversion/runme.yml15
-rw-r--r--test/integration/targets/subversion/vars/Debian.yml6
-rw-r--r--test/integration/targets/subversion/vars/FreeBSD.yml7
-rw-r--r--test/integration/targets/subversion/vars/RedHat.yml10
-rw-r--r--test/integration/targets/subversion/vars/Suse.yml6
-rw-r--r--test/integration/targets/subversion/vars/Ubuntu-18.yml6
-rw-r--r--test/integration/targets/systemd/aliases2
-rw-r--r--test/integration/targets/systemd/defaults/main.yml1
-rw-r--r--test/integration/targets/systemd/meta/main.yml2
-rw-r--r--test/integration/targets/systemd/tasks/main.yml116
-rw-r--r--test/integration/targets/tags/aliases2
-rwxr-xr-xtest/integration/targets/tags/runme.sh49
-rw-r--r--test/integration/targets/tags/test_tags.yml33
-rw-r--r--test/integration/targets/task_ordering/aliases1
-rw-r--r--test/integration/targets/task_ordering/meta/main.yml2
-rw-r--r--test/integration/targets/task_ordering/tasks/main.yml15
-rw-r--r--test/integration/targets/task_ordering/tasks/taskorder-include.yml10
-rw-r--r--test/integration/targets/tasks/aliases1
-rw-r--r--test/integration/targets/tasks/tasks/main.yml4
-rw-r--r--test/integration/targets/template/aliases3
-rw-r--r--test/integration/targets/template/ansible_managed.cfg2
-rw-r--r--test/integration/targets/template/ansible_managed.yml14
-rw-r--r--test/integration/targets/template/corner_cases.yml51
-rw-r--r--test/integration/targets/template/custom_tasks/tasks/main.yml15
-rw-r--r--test/integration/targets/template/custom_tasks/templates/test1
-rw-r--r--test/integration/targets/template/custom_template.yml4
-rw-r--r--test/integration/targets/template/files/encoding_1252_utf-8.expected1
-rw-r--r--test/integration/targets/template/files/encoding_1252_windows-1252.expected1
-rw-r--r--test/integration/targets/template/files/foo-py26.txt9
-rw-r--r--test/integration/targets/template/files/foo.dos.txt3
-rw-r--r--test/integration/targets/template/files/foo.txt9
-rw-r--r--test/integration/targets/template/files/foo.unix.txt3
-rw-r--r--test/integration/targets/template/files/import_as.expected3
-rw-r--r--test/integration/targets/template/files/import_as_with_context.expected2
-rw-r--r--test/integration/targets/template/files/import_with_context.expected3
-rw-r--r--test/integration/targets/template/files/lstrip_blocks_false.expected4
-rw-r--r--test/integration/targets/template/files/lstrip_blocks_true.expected3
-rw-r--r--test/integration/targets/template/files/trim_blocks_false.expected4
-rw-r--r--test/integration/targets/template/files/trim_blocks_true.expected2
-rw-r--r--test/integration/targets/template/filter_plugins.yml9
-rw-r--r--test/integration/targets/template/meta/main.yml3
-rw-r--r--test/integration/targets/template/role_filter/filter_plugins/myplugin.py9
-rw-r--r--test/integration/targets/template/role_filter/tasks/main.yml3
-rwxr-xr-xtest/integration/targets/template/runme.sh27
-rw-r--r--test/integration/targets/template/tasks/backup_test.yml60
-rw-r--r--test/integration/targets/template/tasks/main.yml719
-rw-r--r--test/integration/targets/template/template.yml4
-rw-r--r--test/integration/targets/template/templates/bar1
-rw-r--r--test/integration/targets/template/templates/café.j21
-rw-r--r--test/integration/targets/template/templates/encoding_1252.j21
-rw-r--r--test/integration/targets/template/templates/foo.j23
-rw-r--r--test/integration/targets/template/templates/foo2.j23
-rw-r--r--test/integration/targets/template/templates/foo3.j23
-rw-r--r--test/integration/targets/template/templates/for_loop.j24
-rw-r--r--test/integration/targets/template/templates/for_loop_include.j23
-rw-r--r--test/integration/targets/template/templates/for_loop_include_nested.j21
-rw-r--r--test/integration/targets/template/templates/import_as.j24
-rw-r--r--test/integration/targets/template/templates/import_as_with_context.j23
-rw-r--r--test/integration/targets/template/templates/import_with_context.j24
-rw-r--r--test/integration/targets/template/templates/lstrip_blocks.j28
-rw-r--r--test/integration/targets/template/templates/parent.j23
-rw-r--r--test/integration/targets/template/templates/qux1
-rw-r--r--test/integration/targets/template/templates/short.j21
-rw-r--r--test/integration/targets/template/templates/subtemplate.j22
-rw-r--r--test/integration/targets/template/templates/template_destpath_test.j21
-rw-r--r--test/integration/targets/template/templates/trim_blocks.j24
-rw-r--r--test/integration/targets/template/templates/unused_vars_include.j21
-rw-r--r--test/integration/targets/template/templates/unused_vars_template.j22
-rw-r--r--test/integration/targets/template/undefined_var_info.yml15
-rw-r--r--test/integration/targets/template/unused_vars_include.yml8
-rw-r--r--test/integration/targets/template/vars/main.yml20
-rw-r--r--test/integration/targets/template_jinja2_latest/aliases4
-rw-r--r--test/integration/targets/template_jinja2_latest/main.yml4
-rw-r--r--test/integration/targets/template_jinja2_latest/requirements.txt2
-rwxr-xr-xtest/integration/targets/template_jinja2_latest/runme.sh12
-rw-r--r--test/integration/targets/templating_lookups/aliases2
-rwxr-xr-xtest/integration/targets/templating_lookups/runme.sh12
-rw-r--r--test/integration/targets/templating_lookups/runme.yml4
-rw-r--r--test/integration/targets/templating_lookups/template_deepcopy/hosts1
-rw-r--r--test/integration/targets/templating_lookups/template_deepcopy/playbook.yml10
-rw-r--r--test/integration/targets/templating_lookups/template_deepcopy/template.in1
-rw-r--r--test/integration/targets/templating_lookups/template_lookup_safe_eval_unicode/playbook.yml8
-rw-r--r--test/integration/targets/templating_lookups/template_lookup_safe_eval_unicode/template.json.j24
-rw-r--r--test/integration/targets/templating_lookups/template_lookup_vaulted/playbook.yml13
-rw-r--r--test/integration/targets/templating_lookups/template_lookup_vaulted/templates/vaulted_hello.j26
-rw-r--r--test/integration/targets/templating_lookups/template_lookup_vaulted/test_vault_pass1
-rw-r--r--test/integration/targets/templating_lookups/template_lookups/tasks/errors.yml31
-rw-r--r--test/integration/targets/templating_lookups/template_lookups/tasks/main.yml90
-rw-r--r--test/integration/targets/templating_lookups/template_lookups/vars/main.yml9
-rw-r--r--test/integration/targets/templating_settings/aliases1
-rw-r--r--test/integration/targets/templating_settings/dont_warn_register.yml6
-rwxr-xr-xtest/integration/targets/templating_settings/runme.sh6
-rw-r--r--test/integration/targets/templating_settings/test_templating_settings.yml14
-rw-r--r--test/integration/targets/test_core/aliases2
-rw-r--r--test/integration/targets/test_core/inventory1
-rwxr-xr-xtest/integration/targets/test_core/runme.sh5
-rw-r--r--test/integration/targets/test_core/runme.yml4
-rw-r--r--test/integration/targets/test_core/tasks/main.yml303
-rw-r--r--test/integration/targets/test_core/vault-password1
-rw-r--r--test/integration/targets/test_files/aliases2
-rw-r--r--test/integration/targets/test_files/tasks/main.yml60
-rw-r--r--test/integration/targets/test_mathstuff/aliases2
-rw-r--r--test/integration/targets/test_mathstuff/tasks/main.yml38
-rw-r--r--test/integration/targets/throttle/aliases1
-rw-r--r--test/integration/targets/throttle/group_vars/all.yml4
-rw-r--r--test/integration/targets/throttle/inventory6
-rwxr-xr-xtest/integration/targets/throttle/runme.sh7
-rwxr-xr-xtest/integration/targets/throttle/test_throttle.py34
-rw-r--r--test/integration/targets/throttle/test_throttle.yml84
-rw-r--r--test/integration/targets/unarchive/aliases4
-rw-r--r--test/integration/targets/unarchive/files/foo.txt1
-rw-r--r--test/integration/targets/unarchive/files/test-unarchive-nonascii-ãらã¨ã¿.tar.gzbin0 -> 4947 bytes
-rw-r--r--test/integration/targets/unarchive/meta/main.yml4
-rw-r--r--test/integration/targets/unarchive/tasks/main.yml16
-rw-r--r--test/integration/targets/unarchive/tasks/prepare_tests.yml92
-rw-r--r--test/integration/targets/unarchive/tasks/test_download.yml34
-rw-r--r--test/integration/targets/unarchive/tasks/test_exclude.yml48
-rw-r--r--test/integration/targets/unarchive/tasks/test_missing_files.yml47
-rw-r--r--test/integration/targets/unarchive/tasks/test_mode.yml151
-rw-r--r--test/integration/targets/unarchive/tasks/test_non_ascii_filename.yml66
-rw-r--r--test/integration/targets/unarchive/tasks/test_parent_not_writeable.yml32
-rw-r--r--test/integration/targets/unarchive/tasks/test_quotable_characters.yml38
-rw-r--r--test/integration/targets/unarchive/tasks/test_symlink.yml64
-rw-r--r--test/integration/targets/unarchive/tasks/test_tar.yml26
-rw-r--r--test/integration/targets/unarchive/tasks/test_tar_gz.yml28
-rw-r--r--test/integration/targets/unarchive/tasks/test_tar_gz_creates.yml53
-rw-r--r--test/integration/targets/unarchive/tasks/test_tar_gz_keep_newer.yml57
-rw-r--r--test/integration/targets/unarchive/tasks/test_tar_gz_owner_group.yml48
-rw-r--r--test/integration/targets/unarchive/tasks/test_unprivileged_user.yml86
-rw-r--r--test/integration/targets/unarchive/tasks/test_zip.yml45
-rw-r--r--test/integration/targets/undefined/aliases1
-rw-r--r--test/integration/targets/undefined/tasks/main.yml18
-rw-r--r--test/integration/targets/unicode/aliases1
-rw-r--r--test/integration/targets/unicode/inventory5
-rw-r--r--test/integration/targets/unicode/křížek-ansible-project/ansible.cfg2
-rwxr-xr-xtest/integration/targets/unicode/runme.sh13
-rwxr-xr-xtest/integration/targets/unicode/unicode-test-script7
-rw-r--r--test/integration/targets/unicode/unicode.yml149
-rw-r--r--test/integration/targets/until/aliases1
-rw-r--r--test/integration/targets/until/tasks/main.yml71
-rw-r--r--test/integration/targets/uri/aliases4
-rw-r--r--test/integration/targets/uri/files/README9
-rw-r--r--test/integration/targets/uri/files/fail0.json1
-rw-r--r--test/integration/targets/uri/files/fail1.json1
-rw-r--r--test/integration/targets/uri/files/fail10.json1
-rw-r--r--test/integration/targets/uri/files/fail11.json1
-rw-r--r--test/integration/targets/uri/files/fail12.json1
-rw-r--r--test/integration/targets/uri/files/fail13.json1
-rw-r--r--test/integration/targets/uri/files/fail14.json1
-rw-r--r--test/integration/targets/uri/files/fail15.json1
-rw-r--r--test/integration/targets/uri/files/fail16.json1
-rw-r--r--test/integration/targets/uri/files/fail17.json1
-rw-r--r--test/integration/targets/uri/files/fail18.json1
-rw-r--r--test/integration/targets/uri/files/fail19.json1
-rw-r--r--test/integration/targets/uri/files/fail2.json1
-rw-r--r--test/integration/targets/uri/files/fail20.json1
-rw-r--r--test/integration/targets/uri/files/fail21.json1
-rw-r--r--test/integration/targets/uri/files/fail22.json1
-rw-r--r--test/integration/targets/uri/files/fail23.json1
-rw-r--r--test/integration/targets/uri/files/fail24.json1
-rw-r--r--test/integration/targets/uri/files/fail25.json1
-rw-r--r--test/integration/targets/uri/files/fail26.json2
-rw-r--r--test/integration/targets/uri/files/fail27.json2
-rw-r--r--test/integration/targets/uri/files/fail28.json1
-rw-r--r--test/integration/targets/uri/files/fail29.json1
-rw-r--r--test/integration/targets/uri/files/fail3.json1
-rw-r--r--test/integration/targets/uri/files/fail30.json1
-rw-r--r--test/integration/targets/uri/files/fail4.json1
-rw-r--r--test/integration/targets/uri/files/fail5.json1
-rw-r--r--test/integration/targets/uri/files/fail6.json1
-rw-r--r--test/integration/targets/uri/files/fail7.json1
-rw-r--r--test/integration/targets/uri/files/fail8.json1
-rw-r--r--test/integration/targets/uri/files/fail9.json1
-rw-r--r--test/integration/targets/uri/files/formdata.txt1
-rw-r--r--test/integration/targets/uri/files/pass0.json58
-rw-r--r--test/integration/targets/uri/files/pass1.json1
-rw-r--r--test/integration/targets/uri/files/pass2.json6
-rw-r--r--test/integration/targets/uri/files/pass3.json1
-rw-r--r--test/integration/targets/uri/files/pass4.json1
-rw-r--r--test/integration/targets/uri/files/testserver.py20
-rw-r--r--test/integration/targets/uri/meta/main.yml5
-rw-r--r--test/integration/targets/uri/tasks/main.yml600
-rw-r--r--test/integration/targets/uri/tasks/redirect-all.yml272
-rw-r--r--test/integration/targets/uri/tasks/redirect-none.yml296
-rw-r--r--test/integration/targets/uri/tasks/redirect-safe.yml274
-rw-r--r--test/integration/targets/uri/tasks/redirect-urllib2.yml294
-rw-r--r--test/integration/targets/uri/tasks/return-content.yml49
-rw-r--r--test/integration/targets/uri/tasks/unexpected-failures.yml27
-rw-r--r--test/integration/targets/uri/templates/netrc.j23
-rw-r--r--test/integration/targets/uri/vars/main.yml20
-rw-r--r--test/integration/targets/user/aliases3
-rw-r--r--test/integration/targets/user/files/userlist.sh20
-rw-r--r--test/integration/targets/user/meta/main.yml2
-rw-r--r--test/integration/targets/user/tasks/expires_local.yml333
-rw-r--r--test/integration/targets/user/tasks/main.yml1136
-rw-r--r--test/integration/targets/user/vars/main.yml13
-rw-r--r--test/integration/targets/var_blending/aliases1
-rw-r--r--test/integration/targets/var_blending/group_vars/all9
-rw-r--r--test/integration/targets/var_blending/group_vars/local1
-rw-r--r--test/integration/targets/var_blending/host_vars/testhost4
-rw-r--r--test/integration/targets/var_blending/inventory26
-rw-r--r--test/integration/targets/var_blending/roles/test_var_blending/defaults/main.yml4
-rw-r--r--test/integration/targets/var_blending/roles/test_var_blending/files/foo.txt77
-rw-r--r--test/integration/targets/var_blending/roles/test_var_blending/tasks/main.yml57
-rw-r--r--test/integration/targets/var_blending/roles/test_var_blending/templates/foo.j277
-rw-r--r--test/integration/targets/var_blending/roles/test_var_blending/vars/main.yml4
-rw-r--r--test/integration/targets/var_blending/roles/test_var_blending/vars/more_vars.yml3
-rwxr-xr-xtest/integration/targets/var_blending/runme.sh5
-rw-r--r--test/integration/targets/var_blending/test_var_blending.yml8
-rw-r--r--test/integration/targets/var_blending/test_vars.yml1
-rw-r--r--test/integration/targets/var_blending/vars_file.yml12
-rw-r--r--test/integration/targets/var_precedence/aliases1
-rwxr-xr-xtest/integration/targets/var_precedence/ansible-var-precedence-check.py541
-rw-r--r--test/integration/targets/var_precedence/host_vars/testhost2
-rw-r--r--test/integration/targets/var_precedence/inventory13
-rw-r--r--test/integration/targets/var_precedence/roles/test_var_precedence/meta/main.yml4
-rw-r--r--test/integration/targets/var_precedence/roles/test_var_precedence/tasks/main.yml10
-rw-r--r--test/integration/targets/var_precedence/roles/test_var_precedence_dep/defaults/main.yml5
-rw-r--r--test/integration/targets/var_precedence/roles/test_var_precedence_dep/tasks/main.yml14
-rw-r--r--test/integration/targets/var_precedence/roles/test_var_precedence_dep/vars/main.yml4
-rw-r--r--test/integration/targets/var_precedence/roles/test_var_precedence_inven_override/tasks/main.yml5
-rw-r--r--test/integration/targets/var_precedence/roles/test_var_precedence_role1/defaults/main.yml5
-rw-r--r--test/integration/targets/var_precedence/roles/test_var_precedence_role1/meta/main.yml2
-rw-r--r--test/integration/targets/var_precedence/roles/test_var_precedence_role1/tasks/main.yml14
-rw-r--r--test/integration/targets/var_precedence/roles/test_var_precedence_role1/vars/main.yml4
-rw-r--r--test/integration/targets/var_precedence/roles/test_var_precedence_role2/defaults/main.yml5
-rw-r--r--test/integration/targets/var_precedence/roles/test_var_precedence_role2/tasks/main.yml14
-rw-r--r--test/integration/targets/var_precedence/roles/test_var_precedence_role2/vars/main.yml5
-rw-r--r--test/integration/targets/var_precedence/roles/test_var_precedence_role3/defaults/main.yml7
-rw-r--r--test/integration/targets/var_precedence/roles/test_var_precedence_role3/tasks/main.yml14
-rw-r--r--test/integration/targets/var_precedence/roles/test_var_precedence_role3/vars/main.yml3
-rwxr-xr-xtest/integration/targets/var_precedence/runme.sh9
-rw-r--r--test/integration/targets/var_precedence/test_var_precedence.yml44
-rw-r--r--test/integration/targets/var_precedence/vars/test_var_precedence.yml5
-rw-r--r--test/integration/targets/var_templating/aliases1
-rw-r--r--test/integration/targets/var_templating/group_vars/all.yml7
-rwxr-xr-xtest/integration/targets/var_templating/runme.sh17
-rw-r--r--test/integration/targets/var_templating/task_vars_templating.yml58
-rw-r--r--test/integration/targets/var_templating/test_connection_vars.yml26
-rw-r--r--test/integration/targets/var_templating/undall.yml6
-rw-r--r--test/integration/targets/var_templating/undefined.yml13
-rw-r--r--test/integration/targets/var_templating/vars/connection.yml3
-rw-r--r--test/integration/targets/vault/aliases2
-rw-r--r--test/integration/targets/vault/empty-password0
-rw-r--r--test/integration/targets/vault/encrypted-vault-password6
-rw-r--r--test/integration/targets/vault/encrypted_file_encrypted_var_password1
-rw-r--r--test/integration/targets/vault/example1_password1
-rw-r--r--test/integration/targets/vault/example2_password1
-rw-r--r--test/integration/targets/vault/example3_password1
-rwxr-xr-xtest/integration/targets/vault/faux-editor.py44
-rw-r--r--test/integration/targets/vault/files/test_assemble/nonsecret.txt1
-rw-r--r--test/integration/targets/vault/files/test_assemble/secret.vault7
-rw-r--r--test/integration/targets/vault/format_1_1_AES256.yml6
-rw-r--r--test/integration/targets/vault/format_1_2_AES256.yml6
-rw-r--r--test/integration/targets/vault/host_vars/myhost.yml7
-rw-r--r--test/integration/targets/vault/host_vars/testhost.yml7
-rw-r--r--test/integration/targets/vault/invalid_format/README.md1
-rw-r--r--test/integration/targets/vault/invalid_format/broken-group-vars-tasks.yml23
-rw-r--r--test/integration/targets/vault/invalid_format/broken-host-vars-tasks.yml7
-rw-r--r--test/integration/targets/vault/invalid_format/group_vars/broken-group-vars.yml8
-rw-r--r--test/integration/targets/vault/invalid_format/host_vars/broken-host-vars.example.com/vars11
-rw-r--r--test/integration/targets/vault/invalid_format/inventory5
-rw-r--r--test/integration/targets/vault/invalid_format/original-broken-host-vars6
-rw-r--r--test/integration/targets/vault/invalid_format/original-group-vars.yml2
-rw-r--r--test/integration/targets/vault/invalid_format/some-vars6
-rw-r--r--test/integration/targets/vault/invalid_format/vault-secret1
-rw-r--r--test/integration/targets/vault/inventory.toml5
-rwxr-xr-xtest/integration/targets/vault/password-script.py33
-rw-r--r--test/integration/targets/vault/roles/test_vault/tasks/main.yml9
-rw-r--r--test/integration/targets/vault/roles/test_vault/vars/main.yml9
-rw-r--r--test/integration/targets/vault/roles/test_vault_embedded/tasks/main.yml13
-rw-r--r--test/integration/targets/vault/roles/test_vault_embedded/vars/main.yml17
-rw-r--r--test/integration/targets/vault/roles/test_vault_embedded_ids/tasks/main.yml29
-rw-r--r--test/integration/targets/vault/roles/test_vault_embedded_ids/vars/main.yml194
-rw-r--r--test/integration/targets/vault/roles/test_vault_file_encrypted_embedded/README.md1
-rw-r--r--test/integration/targets/vault/roles/test_vault_file_encrypted_embedded/tasks/main.yml13
-rw-r--r--test/integration/targets/vault/roles/test_vault_file_encrypted_embedded/vars/main.yml76
-rw-r--r--test/integration/targets/vault/roles/test_vaulted_template/tasks/main.yml19
-rw-r--r--test/integration/targets/vault/roles/test_vaulted_template/templates/vaulted_template.j26
-rwxr-xr-xtest/integration/targets/vault/runme.sh524
-rwxr-xr-xtest/integration/targets/vault/runme_change_pip_installed.sh27
-rw-r--r--test/integration/targets/vault/single_vault_as_string.yml123
-rwxr-xr-xtest/integration/targets/vault/test-vault-client.py63
-rw-r--r--test/integration/targets/vault/test_dangling_temp.yml34
-rw-r--r--test/integration/targets/vault/test_utf8_value_in_filename.yml16
-rw-r--r--test/integration/targets/vault/test_vault.yml6
-rw-r--r--test/integration/targets/vault/test_vault_embedded.yml4
-rw-r--r--test/integration/targets/vault/test_vault_embedded_ids.yml4
-rw-r--r--test/integration/targets/vault/test_vault_file_encrypted_embedded.yml4
-rw-r--r--test/integration/targets/vault/test_vaulted_inventory.yml5
-rw-r--r--test/integration/targets/vault/test_vaulted_inventory_toml.yml9
-rw-r--r--test/integration/targets/vault/test_vaulted_template.yml6
-rw-r--r--test/integration/targets/vault/test_vaulted_utf8_value.yml15
-rw-r--r--test/integration/targets/vault/vault-café.yml6
-rw-r--r--test/integration/targets/vault/vault-password1
-rw-r--r--test/integration/targets/vault/vault-password-ansible1
-rw-r--r--test/integration/targets/vault/vault-password-wrong1
-rw-r--r--test/integration/targets/vault/vault-secret.txt6
-rw-r--r--test/integration/targets/vault/vaulted.inventory8
-rw-r--r--test/integration/targets/wait_for/aliases2
-rw-r--r--test/integration/targets/wait_for/files/testserver.py16
-rw-r--r--test/integration/targets/wait_for/files/zombie.py13
-rw-r--r--test/integration/targets/wait_for/meta/main.yml2
-rw-r--r--test/integration/targets/wait_for/tasks/main.yml177
-rw-r--r--test/integration/targets/wait_for/vars/main.yml4
-rw-r--r--test/integration/targets/wait_for_connection/aliases2
-rw-r--r--test/integration/targets/wait_for_connection/tasks/main.yml30
-rw-r--r--test/integration/targets/want_json_modules_posix/aliases1
-rw-r--r--test/integration/targets/want_json_modules_posix/library/helloworld.py31
-rw-r--r--test/integration/targets/want_json_modules_posix/meta/main.yml2
-rw-r--r--test/integration/targets/want_json_modules_posix/tasks/main.yml43
-rw-r--r--test/integration/targets/win_async_wrapper/aliases3
-rw-r--r--test/integration/targets/win_async_wrapper/library/async_test.ps148
-rw-r--r--test/integration/targets/win_async_wrapper/tasks/main.yml257
-rw-r--r--test/integration/targets/win_become/aliases2
-rw-r--r--test/integration/targets/win_become/tasks/main.yml251
-rw-r--r--test/integration/targets/win_exec_wrapper/aliases2
-rw-r--r--test/integration/targets/win_exec_wrapper/library/test_all_options.ps112
-rw-r--r--test/integration/targets/win_exec_wrapper/library/test_common_functions.ps140
-rw-r--r--test/integration/targets/win_exec_wrapper/library/test_fail.ps158
-rw-r--r--test/integration/targets/win_exec_wrapper/library/test_invalid_requires.ps19
-rw-r--r--test/integration/targets/win_exec_wrapper/library/test_min_os_version.ps18
-rw-r--r--test/integration/targets/win_exec_wrapper/library/test_min_ps_version.ps18
-rw-r--r--test/integration/targets/win_exec_wrapper/tasks/main.yml274
-rw-r--r--test/integration/targets/win_fetch/aliases1
-rw-r--r--test/integration/targets/win_fetch/meta/main.yml2
-rw-r--r--test/integration/targets/win_fetch/tasks/main.yml212
-rw-r--r--test/integration/targets/win_module_utils/aliases2
-rw-r--r--test/integration/targets/win_module_utils/library/csharp_util.ps112
-rw-r--r--test/integration/targets/win_module_utils/library/legacy_only_new_way.ps15
-rw-r--r--test/integration/targets/win_module_utils/library/legacy_only_new_way_win_line_ending.ps16
-rw-r--r--test/integration/targets/win_module_utils/library/legacy_only_old_way.ps15
-rw-r--r--test/integration/targets/win_module_utils/library/legacy_only_old_way_win_line_ending.ps14
-rw-r--r--test/integration/targets/win_module_utils/library/recursive_requires.ps113
-rw-r--r--test/integration/targets/win_module_utils/library/uses_bogus_utils.ps16
-rw-r--r--test/integration/targets/win_module_utils/library/uses_local_utils.ps19
-rw-r--r--test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.Recursive1.psm19
-rw-r--r--test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.Recursive2.psm112
-rw-r--r--test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.Recursive3.psm120
-rw-r--r--test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.ValidTestModule.psm13
-rw-r--r--test/integration/targets/win_module_utils/module_utils/Ansible.Test.cs26
-rw-r--r--test/integration/targets/win_module_utils/tasks/main.yml71
-rw-r--r--test/integration/targets/win_raw/aliases2
-rw-r--r--test/integration/targets/win_raw/tasks/main.yml143
-rw-r--r--test/integration/targets/win_script/aliases2
-rw-r--r--test/integration/targets/win_script/defaults/main.yml5
-rw-r--r--test/integration/targets/win_script/files/fail.bat1
-rw-r--r--test/integration/targets/win_script/files/space path/test_script.ps11
-rw-r--r--test/integration/targets/win_script/files/test_script.bat2
-rw-r--r--test/integration/targets/win_script/files/test_script.cmd2
-rw-r--r--test/integration/targets/win_script/files/test_script.ps12
-rw-r--r--test/integration/targets/win_script/files/test_script_bool.ps16
-rw-r--r--test/integration/targets/win_script/files/test_script_creates_file.ps13
-rw-r--r--test/integration/targets/win_script/files/test_script_removes_file.ps13
-rw-r--r--test/integration/targets/win_script/files/test_script_whoami.ps12
-rw-r--r--test/integration/targets/win_script/files/test_script_with_args.ps17
-rw-r--r--test/integration/targets/win_script/files/test_script_with_env.ps11
-rw-r--r--test/integration/targets/win_script/files/test_script_with_errors.ps19
-rw-r--r--test/integration/targets/win_script/files/test_script_with_splatting.ps16
-rw-r--r--test/integration/targets/win_script/tasks/main.yml316
-rw-r--r--test/integration/targets/windows-minimal/aliases4
-rw-r--r--test/integration/targets/windows-minimal/library/win_ping.ps121
-rw-r--r--test/integration/targets/windows-minimal/library/win_ping.py55
-rw-r--r--test/integration/targets/windows-minimal/library/win_ping_set_attr.ps131
-rw-r--r--test/integration/targets/windows-minimal/library/win_ping_strict_mode_error.ps130
-rw-r--r--test/integration/targets/windows-minimal/library/win_ping_syntax_error.ps130
-rw-r--r--test/integration/targets/windows-minimal/library/win_ping_throw.ps130
-rw-r--r--test/integration/targets/windows-minimal/library/win_ping_throw_string.ps130
-rw-r--r--test/integration/targets/windows-minimal/tasks/main.yml67
-rw-r--r--test/integration/targets/windows-paths/aliases3
-rw-r--r--test/integration/targets/windows-paths/tasks/main.yml191
-rw-r--r--test/integration/targets/yum/aliases6
-rw-r--r--test/integration/targets/yum/files/yum.conf5
-rw-r--r--test/integration/targets/yum/meta/main.yml4
-rw-r--r--test/integration/targets/yum/tasks/check_mode_consistency.yml61
-rw-r--r--test/integration/targets/yum/tasks/lock.yml28
-rw-r--r--test/integration/targets/yum/tasks/main.yml71
-rw-r--r--test/integration/targets/yum/tasks/proxy.yml186
-rw-r--r--test/integration/targets/yum/tasks/repo.yml705
-rw-r--r--test/integration/targets/yum/tasks/yum.yml873
-rw-r--r--test/integration/targets/yum/tasks/yum_group_remove.yml152
-rw-r--r--test/integration/targets/yum/tasks/yuminstallroot.yml122
-rw-r--r--test/integration/targets/yum_repository/aliases3
-rw-r--r--test/integration/targets/yum_repository/tasks/main.yml218
-rw-r--r--test/integration/targets/yum_repository/vars/CentOS-8.yml10
-rw-r--r--test/integration/targets/yum_repository/vars/CentOS.yml10
-rw-r--r--test/integration/targets/yum_repository/vars/Fedora.yml5
-rw-r--r--test/integration/targets/yum_repository/vars/default.yml0
-rw-r--r--test/lib/ansible_test/__init__.py0
-rw-r--r--test/lib/ansible_test/_data/ansible.cfg0
-rwxr-xr-xtest/lib/ansible_test/_data/cli/ansible_test_cli_stub.py28
-rw-r--r--test/lib/ansible_test/_data/collection_detail.py95
-rw-r--r--test/lib/ansible_test/_data/completion/docker.txt12
-rw-r--r--test/lib/ansible_test/_data/completion/network.txt2
-rw-r--r--test/lib/ansible_test/_data/completion/remote.txt11
-rw-r--r--test/lib/ansible_test/_data/completion/windows.txt6
-rw-r--r--test/lib/ansible_test/_data/coveragerc0
-rw-r--r--test/lib/ansible_test/_data/cryptography-constraints.txt3
l---------test/lib/ansible_test/_data/injector/ansible1
l---------test/lib/ansible_test/_data/injector/ansible-config1
l---------test/lib/ansible_test/_data/injector/ansible-connection1
l---------test/lib/ansible_test/_data/injector/ansible-console1
l---------test/lib/ansible_test/_data/injector/ansible-doc1
l---------test/lib/ansible_test/_data/injector/ansible-galaxy1
l---------test/lib/ansible_test/_data/injector/ansible-inventory1
l---------test/lib/ansible_test/_data/injector/ansible-playbook1
l---------test/lib/ansible_test/_data/injector/ansible-pull1
l---------test/lib/ansible_test/_data/injector/ansible-test1
l---------test/lib/ansible_test/_data/injector/ansible-vault1
l---------test/lib/ansible_test/_data/injector/importer.py1
l---------test/lib/ansible_test/_data/injector/pytest1
-rwxr-xr-xtest/lib/ansible_test/_data/injector/python.py80
-rw-r--r--test/lib/ansible_test/_data/injector/virtualenv-isolated.sh12
-rw-r--r--test/lib/ansible_test/_data/injector/virtualenv.sh8
-rw-r--r--test/lib/ansible_test/_data/inventory6
-rw-r--r--test/lib/ansible_test/_data/playbooks/windows_coverage_setup.yml19
-rw-r--r--test/lib/ansible_test/_data/playbooks/windows_coverage_teardown.yml77
-rw-r--r--test/lib/ansible_test/_data/pytest.ini9
-rw-r--r--test/lib/ansible_test/_data/pytest/plugins/ansible_pytest_collections.py67
-rw-r--r--test/lib/ansible_test/_data/pytest/plugins/ansible_pytest_coverage.py68
-rw-r--r--test/lib/ansible_test/_data/quiet_pip.py70
-rw-r--r--test/lib/ansible_test/_data/requirements/ansible-test.txt6
-rw-r--r--test/lib/ansible_test/_data/requirements/constraints.txt64
-rw-r--r--test/lib/ansible_test/_data/requirements/coverage.txt1
-rw-r--r--test/lib/ansible_test/_data/requirements/integration.cloud.aws.txt3
-rw-r--r--test/lib/ansible_test/_data/requirements/integration.cloud.azure.txt39
-rw-r--r--test/lib/ansible_test/_data/requirements/integration.cloud.cs.txt2
-rw-r--r--test/lib/ansible_test/_data/requirements/integration.cloud.hcloud.txt1
-rw-r--r--test/lib/ansible_test/_data/requirements/integration.cloud.nios.txt1
-rw-r--r--test/lib/ansible_test/_data/requirements/integration.cloud.opennebula.txt1
-rw-r--r--test/lib/ansible_test/_data/requirements/integration.cloud.openshift.txt1
-rw-r--r--test/lib/ansible_test/_data/requirements/integration.cloud.vcenter.txt2
-rw-r--r--test/lib/ansible_test/_data/requirements/integration.txt6
-rw-r--r--test/lib/ansible_test/_data/requirements/network-integration.txt7
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.ansible-doc.txt2
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.changelog.txt2
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.import.txt2
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.integration-aliases.txt1
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.pep8.txt1
-rwxr-xr-xtest/lib/ansible_test/_data/requirements/sanity.ps145
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.pylint.txt3
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.rstcheck.txt1
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.runtime-metadata.txt2
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.validate-modules.txt3
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.yamllint.txt1
-rw-r--r--test/lib/ansible_test/_data/requirements/units.txt7
-rw-r--r--test/lib/ansible_test/_data/requirements/windows-integration.txt11
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/action-plugin-docs.json13
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/action-plugin-docs.py68
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/changelog.json9
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/changelog.py49
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/empty-init.json14
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/empty-init.py16
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/future-import-boilerplate.json6
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/future-import-boilerplate.py46
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/line-endings.json4
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/line-endings.py18
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/metaclass-boilerplate.json6
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/metaclass-boilerplate.py44
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/no-assert.json10
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/no-assert.py24
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/no-basestring.json7
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/no-basestring.py21
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/no-dict-iteritems.json7
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/no-dict-iteritems.py21
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/no-dict-iterkeys.json7
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/no-dict-iterkeys.py21
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/no-dict-itervalues.json7
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/no-dict-itervalues.py21
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/no-get-exception.json7
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/no-get-exception.py28
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/no-illegal-filenames.json5
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/no-illegal-filenames.py82
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/no-main-display.json10
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/no-main-display.py21
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/no-smart-quotes.json5
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/no-smart-quotes.py28
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/no-unicode-literals.json7
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/no-unicode-literals.py21
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/replace-urlopen.json7
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/replace-urlopen.py21
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/runtime-metadata.json11
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/runtime-metadata.py150
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/shebang.json4
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/shebang.py120
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/symlinks.json5
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/symlinks.py32
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/use-argspec-type-path.json10
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/use-argspec-type-path.py21
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/use-compat-six.json6
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/use-compat-six.py21
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/compile/compile.py41
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/import/importer.py467
-rw-r--r--test/lib/ansible_test/_data/sanity/import/yaml_to_json.py27
-rw-r--r--test/lib/ansible_test/_data/sanity/integration-aliases/yaml_to_json.py15
-rw-r--r--test/lib/ansible_test/_data/sanity/pep8/current-ignore.txt4
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/pslint/pslint.ps143
-rw-r--r--test/lib/ansible_test/_data/sanity/pslint/settings.psd113
-rw-r--r--test/lib/ansible_test/_data/sanity/pylint/config/ansible-test.cfg39
-rw-r--r--test/lib/ansible_test/_data/sanity/pylint/config/collection.cfg135
-rw-r--r--test/lib/ansible_test/_data/sanity/pylint/config/default.cfg135
-rw-r--r--test/lib/ansible_test/_data/sanity/pylint/config/sanity.cfg42
-rw-r--r--test/lib/ansible_test/_data/sanity/pylint/plugins/blacklist.py242
-rw-r--r--test/lib/ansible_test/_data/sanity/pylint/plugins/deprecated.py250
-rw-r--r--test/lib/ansible_test/_data/sanity/pylint/plugins/string_format.py90
-rw-r--r--test/lib/ansible_test/_data/sanity/rstcheck/ignore-substitutions.txt5
-rw-r--r--test/lib/ansible_test/_data/sanity/shellcheck/exclude.txt3
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/validate-modules/main.py8
l---------test/lib/ansible_test/_data/sanity/validate-modules/validate-modules1
-rw-r--r--test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/__init__.py20
-rw-r--r--test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/main.py2442
-rw-r--r--test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/module_args.py170
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/validate-modules/validate_modules/ps_argspec.ps1110
-rw-r--r--test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/schema.py488
-rw-r--r--test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/utils.py218
-rw-r--r--test/lib/ansible_test/_data/sanity/yamllint/config/default.yml19
-rw-r--r--test/lib/ansible_test/_data/sanity/yamllint/config/modules.yml19
-rw-r--r--test/lib/ansible_test/_data/sanity/yamllint/config/plugins.yml19
-rw-r--r--test/lib/ansible_test/_data/sanity/yamllint/yamllinter.py249
-rw-r--r--test/lib/ansible_test/_data/setup/ConfigureRemotingForAnsible.ps1453
-rw-r--r--test/lib/ansible_test/_data/setup/docker.sh14
-rw-r--r--test/lib/ansible_test/_data/setup/remote.sh162
-rw-r--r--test/lib/ansible_test/_data/setup/windows-httptester.ps1228
-rwxr-xr-xtest/lib/ansible_test/_data/sslcheck.py24
-rwxr-xr-xtest/lib/ansible_test/_data/versions.py20
-rwxr-xr-xtest/lib/ansible_test/_data/virtualenvcheck.py16
-rwxr-xr-xtest/lib/ansible_test/_data/yamlcheck.py21
-rw-r--r--test/lib/ansible_test/_internal/__init__.py3
-rw-r--r--test/lib/ansible_test/_internal/ansible_util.py260
-rw-r--r--test/lib/ansible_test/_internal/cache.py35
-rw-r--r--test/lib/ansible_test/_internal/ci/__init__.py227
-rw-r--r--test/lib/ansible_test/_internal/ci/azp.py268
-rw-r--r--test/lib/ansible_test/_internal/ci/local.py217
-rw-r--r--test/lib/ansible_test/_internal/ci/shippable.py269
-rw-r--r--test/lib/ansible_test/_internal/classification.py975
-rw-r--r--test/lib/ansible_test/_internal/cli.py1217
-rw-r--r--test/lib/ansible_test/_internal/cloud/__init__.py429
-rw-r--r--test/lib/ansible_test/_internal/cloud/acme.py193
-rw-r--r--test/lib/ansible_test/_internal/cloud/aws.py124
-rw-r--r--test/lib/ansible_test/_internal/cloud/azure.py213
-rw-r--r--test/lib/ansible_test/_internal/cloud/cloudscale.py80
-rw-r--r--test/lib/ansible_test/_internal/cloud/cs.py300
-rw-r--r--test/lib/ansible_test/_internal/cloud/fallaxy.py177
-rw-r--r--test/lib/ansible_test/_internal/cloud/foreman.py191
-rw-r--r--test/lib/ansible_test/_internal/cloud/gcp.py62
-rw-r--r--test/lib/ansible_test/_internal/cloud/hcloud.py116
-rw-r--r--test/lib/ansible_test/_internal/cloud/nios.py193
-rw-r--r--test/lib/ansible_test/_internal/cloud/opennebula.py66
-rw-r--r--test/lib/ansible_test/_internal/cloud/openshift.py236
-rw-r--r--test/lib/ansible_test/_internal/cloud/scaleway.py72
-rw-r--r--test/lib/ansible_test/_internal/cloud/tower.py255
-rw-r--r--test/lib/ansible_test/_internal/cloud/vcenter.py232
-rw-r--r--test/lib/ansible_test/_internal/cloud/vultr.py71
-rw-r--r--test/lib/ansible_test/_internal/config.py356
-rw-r--r--test/lib/ansible_test/_internal/constants.py10
-rw-r--r--test/lib/ansible_test/_internal/core_ci.py680
-rw-r--r--test/lib/ansible_test/_internal/coverage/__init__.py323
-rw-r--r--test/lib/ansible_test/_internal/coverage/analyze/__init__.py19
-rw-r--r--test/lib/ansible_test/_internal/coverage/analyze/targets/__init__.py154
-rw-r--r--test/lib/ansible_test/_internal/coverage/analyze/targets/combine.py64
-rw-r--r--test/lib/ansible_test/_internal/coverage/analyze/targets/expand.py39
-rw-r--r--test/lib/ansible_test/_internal/coverage/analyze/targets/filter.py104
-rw-r--r--test/lib/ansible_test/_internal/coverage/analyze/targets/generate.py146
-rw-r--r--test/lib/ansible_test/_internal/coverage/analyze/targets/missing.py109
-rw-r--r--test/lib/ansible_test/_internal/coverage/combine.py297
-rw-r--r--test/lib/ansible_test/_internal/coverage/erase.py27
-rw-r--r--test/lib/ansible_test/_internal/coverage/html.py45
-rw-r--r--test/lib/ansible_test/_internal/coverage/report.py156
-rw-r--r--test/lib/ansible_test/_internal/coverage/xml.py191
-rw-r--r--test/lib/ansible_test/_internal/coverage_util.py125
-rw-r--r--test/lib/ansible_test/_internal/csharp_import_analysis.py106
-rw-r--r--test/lib/ansible_test/_internal/data.py200
-rw-r--r--test/lib/ansible_test/_internal/delegation.py667
-rw-r--r--test/lib/ansible_test/_internal/diff.py256
-rw-r--r--test/lib/ansible_test/_internal/docker_util.py399
-rw-r--r--test/lib/ansible_test/_internal/encoding.py41
-rw-r--r--test/lib/ansible_test/_internal/env.py293
-rw-r--r--test/lib/ansible_test/_internal/executor.py2186
-rw-r--r--test/lib/ansible_test/_internal/git.py137
-rw-r--r--test/lib/ansible_test/_internal/http.py181
-rw-r--r--test/lib/ansible_test/_internal/import_analysis.py362
-rw-r--r--test/lib/ansible_test/_internal/init.py16
-rw-r--r--test/lib/ansible_test/_internal/integration/__init__.py349
-rw-r--r--test/lib/ansible_test/_internal/io.py94
-rw-r--r--test/lib/ansible_test/_internal/manage_ci.py335
-rw-r--r--test/lib/ansible_test/_internal/metadata.py151
-rw-r--r--test/lib/ansible_test/_internal/payload.py146
-rw-r--r--test/lib/ansible_test/_internal/powershell_import_analysis.py105
-rw-r--r--test/lib/ansible_test/_internal/provider/__init__.py78
-rw-r--r--test/lib/ansible_test/_internal/provider/layout/__init__.py232
-rw-r--r--test/lib/ansible_test/_internal/provider/layout/ansible.py47
-rw-r--r--test/lib/ansible_test/_internal/provider/layout/collection.py123
-rw-r--r--test/lib/ansible_test/_internal/provider/source/__init__.py18
-rw-r--r--test/lib/ansible_test/_internal/provider/source/git.py72
-rw-r--r--test/lib/ansible_test/_internal/provider/source/installed.py43
-rw-r--r--test/lib/ansible_test/_internal/provider/source/unversioned.py87
-rw-r--r--test/lib/ansible_test/_internal/sanity/__init__.py946
-rw-r--r--test/lib/ansible_test/_internal/sanity/ansible_doc.py144
-rw-r--r--test/lib/ansible_test/_internal/sanity/bin_symlinks.py110
-rw-r--r--test/lib/ansible_test/_internal/sanity/compile.py92
-rw-r--r--test/lib/ansible_test/_internal/sanity/ignores.py89
-rw-r--r--test/lib/ansible_test/_internal/sanity/import.py184
-rw-r--r--test/lib/ansible_test/_internal/sanity/integration_aliases.py399
-rw-r--r--test/lib/ansible_test/_internal/sanity/pep8.py109
-rw-r--r--test/lib/ansible_test/_internal/sanity/pslint.py121
-rw-r--r--test/lib/ansible_test/_internal/sanity/pylint.py281
-rw-r--r--test/lib/ansible_test/_internal/sanity/rstcheck.py95
-rw-r--r--test/lib/ansible_test/_internal/sanity/sanity_docs.py62
-rw-r--r--test/lib/ansible_test/_internal/sanity/shellcheck.py110
-rw-r--r--test/lib/ansible_test/_internal/sanity/validate_modules.py149
-rw-r--r--test/lib/ansible_test/_internal/sanity/yamllint.py136
-rw-r--r--test/lib/ansible_test/_internal/target.py694
-rw-r--r--test/lib/ansible_test/_internal/test.py524
-rw-r--r--test/lib/ansible_test/_internal/thread.py57
-rw-r--r--test/lib/ansible_test/_internal/types.py32
-rw-r--r--test/lib/ansible_test/_internal/units/__init__.py159
-rw-r--r--test/lib/ansible_test/_internal/util.py853
-rw-r--r--test/lib/ansible_test/_internal/util_common.py487
-rw-r--r--test/lib/ansible_test/_internal/venv.py227
-rw-r--r--test/lib/ansible_test/config/cloud-config-aws.ini.template26
-rw-r--r--test/lib/ansible_test/config/cloud-config-azure.ini.template32
-rw-r--r--test/lib/ansible_test/config/cloud-config-cloudscale.ini.template9
-rw-r--r--test/lib/ansible_test/config/cloud-config-cs.ini.template18
-rw-r--r--test/lib/ansible_test/config/cloud-config-gcp.ini.template18
-rw-r--r--test/lib/ansible_test/config/cloud-config-hcloud.ini.template15
-rw-r--r--test/lib/ansible_test/config/cloud-config-opennebula.ini.template20
-rw-r--r--test/lib/ansible_test/config/cloud-config-openshift.kubeconfig.template12
-rw-r--r--test/lib/ansible_test/config/cloud-config-scaleway.ini.template13
-rw-r--r--test/lib/ansible_test/config/cloud-config-tower.ini.template18
-rw-r--r--test/lib/ansible_test/config/cloud-config-vcenter.ini.template26
-rw-r--r--test/lib/ansible_test/config/cloud-config-vultr.ini.template12
-rw-r--r--test/lib/ansible_test/config/inventory.networking.template42
-rw-r--r--test/lib/ansible_test/config/inventory.winrm.template28
-rw-r--r--test/sanity/code-smell/configure-remoting-ps1.json4
-rwxr-xr-xtest/sanity/code-smell/configure-remoting-ps1.py54
-rw-r--r--test/sanity/code-smell/deprecated-config.json10
-rwxr-xr-xtest/sanity/code-smell/deprecated-config.py102
-rw-r--r--test/sanity/code-smell/deprecated-config.requirements.txt2
-rw-r--r--test/sanity/code-smell/docs-build.json6
-rwxr-xr-xtest/sanity/code-smell/docs-build.py135
-rw-r--r--test/sanity/code-smell/docs-build.requirements.txt6
-rw-r--r--test/sanity/code-smell/no-unwanted-files.json7
-rwxr-xr-xtest/sanity/code-smell/no-unwanted-files.py47
-rw-r--r--test/sanity/code-smell/obsolete-files.json17
-rwxr-xr-xtest/sanity/code-smell/obsolete-files.py19
-rw-r--r--test/sanity/code-smell/package-data.json6
-rwxr-xr-xtest/sanity/code-smell/package-data.py376
-rw-r--r--test/sanity/code-smell/package-data.requirements.txt10
-rw-r--r--test/sanity/code-smell/release-names.json4
-rwxr-xr-xtest/sanity/code-smell/release-names.py50
-rw-r--r--test/sanity/code-smell/release-names.requirements.txt1
-rw-r--r--test/sanity/code-smell/required-and-default-attributes.json9
-rwxr-xr-xtest/sanity/code-smell/required-and-default-attributes.py21
-rw-r--r--test/sanity/code-smell/skip.txt2
-rw-r--r--test/sanity/code-smell/test-constraints.json9
-rwxr-xr-xtest/sanity/code-smell/test-constraints.py21
-rw-r--r--test/sanity/code-smell/update-bundled.json8
-rwxr-xr-xtest/sanity/code-smell/update-bundled.py165
-rw-r--r--test/sanity/code-smell/update-bundled.requirements.txt1
-rw-r--r--test/sanity/ignore.txt426
-rw-r--r--test/support/integration/plugins/cache/jsonfile.py63
-rw-r--r--test/support/integration/plugins/filter/json_query.py53
-rw-r--r--test/support/integration/plugins/inventory/aws_ec2.py760
-rw-r--r--test/support/integration/plugins/inventory/docker_swarm.py351
-rw-r--r--test/support/integration/plugins/inventory/foreman.py295
-rw-r--r--test/support/integration/plugins/lookup/rabbitmq.py190
-rw-r--r--test/support/integration/plugins/module_utils/aws/__init__.py0
-rw-r--r--test/support/integration/plugins/module_utils/aws/core.py335
-rw-r--r--test/support/integration/plugins/module_utils/aws/iam.py49
-rw-r--r--test/support/integration/plugins/module_utils/aws/s3.py50
-rw-r--r--test/support/integration/plugins/module_utils/aws/waiters.py405
-rw-r--r--test/support/integration/plugins/module_utils/azure_rm_common.py1473
-rw-r--r--test/support/integration/plugins/module_utils/azure_rm_common_rest.py97
-rw-r--r--test/support/integration/plugins/module_utils/cloud.py217
-rw-r--r--test/support/integration/plugins/module_utils/compat/__init__.py0
-rw-r--r--test/support/integration/plugins/module_utils/compat/ipaddress.py2476
-rw-r--r--test/support/integration/plugins/module_utils/crypto.py2125
-rw-r--r--test/support/integration/plugins/module_utils/database.py142
-rw-r--r--test/support/integration/plugins/module_utils/docker/__init__.py0
-rw-r--r--test/support/integration/plugins/module_utils/docker/common.py1022
-rw-r--r--test/support/integration/plugins/module_utils/docker/swarm.py280
-rw-r--r--test/support/integration/plugins/module_utils/ec2.py758
-rw-r--r--test/support/integration/plugins/module_utils/ecs/__init__.py0
-rw-r--r--test/support/integration/plugins/module_utils/ecs/api.py364
-rw-r--r--test/support/integration/plugins/module_utils/mysql.py106
-rw-r--r--test/support/integration/plugins/module_utils/net_tools/__init__.py0
-rw-r--r--test/support/integration/plugins/module_utils/network/__init__.py0
-rw-r--r--test/support/integration/plugins/module_utils/network/common/__init__.py0
-rw-r--r--test/support/integration/plugins/module_utils/network/common/utils.py643
-rw-r--r--test/support/integration/plugins/module_utils/postgres.py330
-rw-r--r--test/support/integration/plugins/module_utils/rabbitmq.py220
l---------test/support/integration/plugins/modules/_azure_rm_mariadbconfiguration_facts.py1
l---------test/support/integration/plugins/modules/_azure_rm_mariadbdatabase_facts.py1
l---------test/support/integration/plugins/modules/_azure_rm_mariadbfirewallrule_facts.py1
l---------test/support/integration/plugins/modules/_azure_rm_mariadbserver_facts.py1
l---------test/support/integration/plugins/modules/_azure_rm_resource_facts.py1
l---------test/support/integration/plugins/modules/_azure_rm_webapp_facts.py1
-rw-r--r--test/support/integration/plugins/modules/aws_az_info.py111
-rw-r--r--test/support/integration/plugins/modules/aws_s3.py925
-rw-r--r--test/support/integration/plugins/modules/azure_rm_appserviceplan.py379
-rw-r--r--test/support/integration/plugins/modules/azure_rm_functionapp.py421
-rw-r--r--test/support/integration/plugins/modules/azure_rm_functionapp_info.py207
-rw-r--r--test/support/integration/plugins/modules/azure_rm_mariadbconfiguration.py241
-rw-r--r--test/support/integration/plugins/modules/azure_rm_mariadbconfiguration_info.py217
-rw-r--r--test/support/integration/plugins/modules/azure_rm_mariadbdatabase.py304
-rw-r--r--test/support/integration/plugins/modules/azure_rm_mariadbdatabase_info.py212
-rw-r--r--test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule.py277
-rw-r--r--test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule_info.py208
-rw-r--r--test/support/integration/plugins/modules/azure_rm_mariadbserver.py388
-rw-r--r--test/support/integration/plugins/modules/azure_rm_mariadbserver_info.py265
-rw-r--r--test/support/integration/plugins/modules/azure_rm_resource.py427
-rw-r--r--test/support/integration/plugins/modules/azure_rm_resource_info.py432
-rw-r--r--test/support/integration/plugins/modules/azure_rm_storageaccount.py684
-rw-r--r--test/support/integration/plugins/modules/azure_rm_webapp.py1070
-rw-r--r--test/support/integration/plugins/modules/azure_rm_webapp_info.py489
-rw-r--r--test/support/integration/plugins/modules/azure_rm_webappslot.py1058
-rw-r--r--test/support/integration/plugins/modules/cloud_init_data_facts.py134
-rw-r--r--test/support/integration/plugins/modules/cloudformation.py837
-rw-r--r--test/support/integration/plugins/modules/cloudformation_info.py355
-rw-r--r--test/support/integration/plugins/modules/deploy_helper.py521
-rw-r--r--test/support/integration/plugins/modules/docker_swarm.py681
-rw-r--r--test/support/integration/plugins/modules/ec2.py1766
-rw-r--r--test/support/integration/plugins/modules/ec2_ami_info.py282
-rw-r--r--test/support/integration/plugins/modules/ec2_group.py1345
-rw-r--r--test/support/integration/plugins/modules/ec2_vpc_net.py524
-rw-r--r--test/support/integration/plugins/modules/ec2_vpc_subnet.py604
-rw-r--r--test/support/integration/plugins/modules/flatpak_remote.py243
-rw-r--r--test/support/integration/plugins/modules/htpasswd.py275
-rw-r--r--test/support/integration/plugins/modules/locale_gen.py237
-rw-r--r--test/support/integration/plugins/modules/lvg.py295
-rw-r--r--test/support/integration/plugins/modules/mongodb_parameter.py223
-rw-r--r--test/support/integration/plugins/modules/mongodb_user.py474
-rw-r--r--test/support/integration/plugins/modules/pids.py89
-rw-r--r--test/support/integration/plugins/modules/pkgng.py406
-rw-r--r--test/support/integration/plugins/modules/postgresql_db.py657
-rw-r--r--test/support/integration/plugins/modules/postgresql_privs.py1097
-rw-r--r--test/support/integration/plugins/modules/postgresql_query.py364
-rw-r--r--test/support/integration/plugins/modules/postgresql_set.py434
-rw-r--r--test/support/integration/plugins/modules/postgresql_table.py601
-rw-r--r--test/support/integration/plugins/modules/postgresql_user.py927
-rw-r--r--test/support/integration/plugins/modules/rabbitmq_plugin.py180
-rw-r--r--test/support/integration/plugins/modules/rabbitmq_queue.py257
-rw-r--r--test/support/integration/plugins/modules/s3_bucket.py740
-rw-r--r--test/support/integration/plugins/modules/sefcontext.py298
-rw-r--r--test/support/integration/plugins/modules/selogin.py260
-rw-r--r--test/support/integration/plugins/modules/synchronize.py618
-rw-r--r--test/support/integration/plugins/modules/timezone.py909
-rw-r--r--test/support/integration/plugins/modules/x509_crl.py783
-rw-r--r--test/support/integration/plugins/modules/x509_crl_info.py281
-rw-r--r--test/support/integration/plugins/modules/xml.py966
-rw-r--r--test/support/integration/plugins/modules/zypper.py540
-rw-r--r--test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/cli_config.py40
-rw-r--r--test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/net_base.py90
-rw-r--r--test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/net_get.py199
-rw-r--r--test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/net_put.py235
-rw-r--r--test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/network.py209
-rw-r--r--test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/become/enable.py42
-rw-r--r--test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/httpapi.py324
-rw-r--r--test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/netconf.py404
-rw-r--r--test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/network_cli.py924
-rw-r--r--test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/persistent.py97
-rw-r--r--test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/netconf.py66
-rw-r--r--test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/network_agnostic.py14
-rw-r--r--test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/filter/ipaddr.py1186
-rw-r--r--test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/filter/network.py531
-rw-r--r--test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/httpapi/restconf.py91
-rw-r--r--test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/compat/ipaddress.py2578
-rw-r--r--test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/cfg/base.py27
-rw-r--r--test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/config.py473
-rw-r--r--test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/facts/facts.py162
-rw-r--r--test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/netconf.py179
-rw-r--r--test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/network.py275
-rw-r--r--test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/parsing.py316
-rw-r--r--test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/utils.py686
-rw-r--r--test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/netconf/netconf.py147
-rw-r--r--test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/restconf/restconf.py61
-rw-r--r--test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/modules/cli_config.py444
-rw-r--r--test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/modules/net_get.py71
-rw-r--r--test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/modules/net_put.py82
-rw-r--r--test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/netconf/default.py70
-rw-r--r--test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/action/ios.py133
-rw-r--r--test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/cliconf/ios.py465
-rw-r--r--test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/doc_fragments/ios.py81
-rw-r--r--test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/module_utils/network/ios/ios.py197
-rw-r--r--test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_command.py229
-rw-r--r--test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_config.py596
-rw-r--r--test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/terminal/ios.py115
-rw-r--r--test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/action/vyos.py129
-rw-r--r--test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/cliconf/vyos.py342
-rw-r--r--test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/doc_fragments/vyos.py63
-rw-r--r--test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/facts/facts.py22
-rw-r--r--test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/firewall_rules/firewall_rules.py263
-rw-r--r--test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/interfaces/interfaces.py69
-rw-r--r--test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/l3_interfaces/l3_interfaces.py81
-rw-r--r--test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/lag_interfaces/lag_interfaces.py80
-rw-r--r--test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/lldp_global/lldp_global.py56
-rw-r--r--test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/lldp_interfaces/lldp_interfaces.py89
-rw-r--r--test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/static_routes/static_routes.py99
-rw-r--r--test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/config/lldp_interfaces/lldp_interfaces.py438
-rw-r--r--test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/facts.py83
-rw-r--r--test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/firewall_rules/firewall_rules.py380
-rw-r--r--test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/interfaces/interfaces.py134
-rw-r--r--test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/l3_interfaces/l3_interfaces.py143
-rw-r--r--test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/lag_interfaces/lag_interfaces.py152
-rw-r--r--test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/legacy/base.py162
-rw-r--r--test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/lldp_global/lldp_global.py116
-rw-r--r--test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/lldp_interfaces/lldp_interfaces.py155
-rw-r--r--test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/static_routes/static_routes.py181
-rw-r--r--test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/utils/utils.py231
-rw-r--r--test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/vyos.py124
-rw-r--r--test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_command.py223
-rw-r--r--test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_config.py354
-rw-r--r--test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_facts.py174
-rw-r--r--test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_lldp_interfaces.py513
-rw-r--r--test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/terminal/vyos.py53
l---------test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/action/win_copy.py1
l---------test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/async_status.ps11
l---------test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_acl.ps11
l---------test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_acl.py1
l---------test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_copy.ps11
l---------test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_copy.py1
l---------test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_file.ps11
l---------test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_file.py1
l---------test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_ping.ps11
l---------test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_ping.py1
l---------test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_shell.ps11
l---------test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_shell.py1
l---------test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_stat.ps11
l---------test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_stat.py1
-rw-r--r--test/support/windows-integration/plugins/action/win_copy.py522
-rw-r--r--test/support/windows-integration/plugins/action/win_reboot.py96
-rw-r--r--test/support/windows-integration/plugins/action/win_template.py29
-rw-r--r--test/support/windows-integration/plugins/become/runas.py70
-rw-r--r--test/support/windows-integration/plugins/module_utils/Ansible.Service.cs1341
-rw-r--r--test/support/windows-integration/plugins/modules/async_status.ps158
-rw-r--r--test/support/windows-integration/plugins/modules/setup.ps1516
-rw-r--r--test/support/windows-integration/plugins/modules/slurp.ps128
-rw-r--r--test/support/windows-integration/plugins/modules/win_acl.ps1225
-rw-r--r--test/support/windows-integration/plugins/modules/win_acl.py132
-rw-r--r--test/support/windows-integration/plugins/modules/win_certificate_store.ps1260
-rw-r--r--test/support/windows-integration/plugins/modules/win_certificate_store.py208
-rw-r--r--test/support/windows-integration/plugins/modules/win_command.ps178
-rw-r--r--test/support/windows-integration/plugins/modules/win_command.py136
-rw-r--r--test/support/windows-integration/plugins/modules/win_copy.ps1403
-rw-r--r--test/support/windows-integration/plugins/modules/win_copy.py207
-rw-r--r--test/support/windows-integration/plugins/modules/win_data_deduplication.ps1129
-rw-r--r--test/support/windows-integration/plugins/modules/win_data_deduplication.py87
-rw-r--r--test/support/windows-integration/plugins/modules/win_dsc.ps1398
-rw-r--r--test/support/windows-integration/plugins/modules/win_dsc.py183
-rw-r--r--test/support/windows-integration/plugins/modules/win_feature.ps1111
-rw-r--r--test/support/windows-integration/plugins/modules/win_feature.py149
-rw-r--r--test/support/windows-integration/plugins/modules/win_file.ps1152
-rw-r--r--test/support/windows-integration/plugins/modules/win_file.py70
-rw-r--r--test/support/windows-integration/plugins/modules/win_find.ps1416
-rw-r--r--test/support/windows-integration/plugins/modules/win_find.py345
-rw-r--r--test/support/windows-integration/plugins/modules/win_format.ps1200
-rw-r--r--test/support/windows-integration/plugins/modules/win_format.py103
-rw-r--r--test/support/windows-integration/plugins/modules/win_get_url.ps1274
-rw-r--r--test/support/windows-integration/plugins/modules/win_get_url.py215
-rw-r--r--test/support/windows-integration/plugins/modules/win_lineinfile.ps1450
-rw-r--r--test/support/windows-integration/plugins/modules/win_lineinfile.py180
-rw-r--r--test/support/windows-integration/plugins/modules/win_path.ps1145
-rw-r--r--test/support/windows-integration/plugins/modules/win_path.py79
-rw-r--r--test/support/windows-integration/plugins/modules/win_ping.ps121
-rw-r--r--test/support/windows-integration/plugins/modules/win_ping.py55
-rw-r--r--test/support/windows-integration/plugins/modules/win_psexec.ps1152
-rw-r--r--test/support/windows-integration/plugins/modules/win_psexec.py172
-rw-r--r--test/support/windows-integration/plugins/modules/win_reboot.py131
-rw-r--r--test/support/windows-integration/plugins/modules/win_regedit.ps1495
-rw-r--r--test/support/windows-integration/plugins/modules/win_regedit.py210
-rw-r--r--test/support/windows-integration/plugins/modules/win_security_policy.ps1196
-rw-r--r--test/support/windows-integration/plugins/modules/win_security_policy.py126
-rw-r--r--test/support/windows-integration/plugins/modules/win_shell.ps1138
-rw-r--r--test/support/windows-integration/plugins/modules/win_shell.py167
-rw-r--r--test/support/windows-integration/plugins/modules/win_stat.ps1186
-rw-r--r--test/support/windows-integration/plugins/modules/win_stat.py236
-rw-r--r--test/support/windows-integration/plugins/modules/win_tempfile.ps172
-rw-r--r--test/support/windows-integration/plugins/modules/win_tempfile.py67
-rw-r--r--test/support/windows-integration/plugins/modules/win_template.py66
-rw-r--r--test/support/windows-integration/plugins/modules/win_user.ps1273
-rw-r--r--test/support/windows-integration/plugins/modules/win_user.py194
-rw-r--r--test/support/windows-integration/plugins/modules/win_user_right.ps1349
-rw-r--r--test/support/windows-integration/plugins/modules/win_user_right.py108
-rw-r--r--test/support/windows-integration/plugins/modules/win_wait_for.ps1259
-rw-r--r--test/support/windows-integration/plugins/modules/win_wait_for.py155
-rw-r--r--test/support/windows-integration/plugins/modules/win_whoami.ps1837
-rw-r--r--test/support/windows-integration/plugins/modules/win_whoami.py203
-rw-r--r--test/units/__init__.py0
-rw-r--r--test/units/_vendor/test_vendor.py65
-rw-r--r--test/units/ansible_test/__init__.py0
-rw-r--r--test/units/ansible_test/ci/__init__.py0
-rw-r--r--test/units/ansible_test/ci/test_azp.py31
-rw-r--r--test/units/ansible_test/ci/test_shippable.py31
-rw-r--r--test/units/ansible_test/ci/util.py53
-rw-r--r--test/units/ansible_test/conftest.py14
-rw-r--r--test/units/cli/__init__.py0
-rw-r--r--test/units/cli/arguments/test_optparse_helpers.py37
-rw-r--r--test/units/cli/galaxy/test_collection_extract_tar.py61
-rw-r--r--test/units/cli/galaxy/test_display_collection.py47
-rw-r--r--test/units/cli/galaxy/test_display_header.py41
-rw-r--r--test/units/cli/galaxy/test_display_role.py28
-rw-r--r--test/units/cli/galaxy/test_execute_list.py40
-rw-r--r--test/units/cli/galaxy/test_execute_list_collection.py278
-rw-r--r--test/units/cli/galaxy/test_get_collection_widths.py37
-rw-r--r--test/units/cli/test_adhoc.py113
-rw-r--r--test/units/cli/test_cli.py381
-rw-r--r--test/units/cli/test_console.py51
-rw-r--r--test/units/cli/test_data/collection_skeleton/README.md1
-rw-r--r--test/units/cli/test_data/collection_skeleton/docs/My Collection.md1
-rw-r--r--test/units/cli/test_data/collection_skeleton/galaxy.yml.j27
-rw-r--r--test/units/cli/test_data/collection_skeleton/playbooks/main.yml0
-rw-r--r--test/units/cli/test_data/collection_skeleton/playbooks/templates/subfolder/test.conf.j22
-rw-r--r--test/units/cli/test_data/collection_skeleton/playbooks/templates/test.conf.j22
-rw-r--r--test/units/cli/test_data/collection_skeleton/plugins/action/.git_keep0
-rw-r--r--test/units/cli/test_data/collection_skeleton/plugins/filter/.git_keep0
-rw-r--r--test/units/cli/test_data/collection_skeleton/plugins/inventory/.git_keep0
-rw-r--r--test/units/cli/test_data/collection_skeleton/plugins/lookup/.git_keep0
-rw-r--r--test/units/cli/test_data/collection_skeleton/plugins/module_utils/.git_keep0
-rw-r--r--test/units/cli/test_data/collection_skeleton/plugins/modules/.git_keep0
-rw-r--r--test/units/cli/test_data/collection_skeleton/roles/common/tasks/main.yml.j23
-rw-r--r--test/units/cli/test_data/collection_skeleton/roles/common/templates/subfolder/test.conf.j22
-rw-r--r--test/units/cli/test_data/collection_skeleton/roles/common/templates/test.conf.j22
-rw-r--r--test/units/cli/test_data/role_skeleton/.travis.yml29
-rw-r--r--test/units/cli/test_data/role_skeleton/README.md38
-rw-r--r--test/units/cli/test_data/role_skeleton/defaults/main.yml.j22
-rw-r--r--test/units/cli/test_data/role_skeleton/files/.git_keep0
-rw-r--r--test/units/cli/test_data/role_skeleton/handlers/main.yml.j22
-rw-r--r--test/units/cli/test_data/role_skeleton/inventory1
-rw-r--r--test/units/cli/test_data/role_skeleton/meta/main.yml.j262
-rw-r--r--test/units/cli/test_data/role_skeleton/tasks/main.yml.j22
-rw-r--r--test/units/cli/test_data/role_skeleton/templates/.git_keep0
-rw-r--r--test/units/cli/test_data/role_skeleton/templates/subfolder/test.conf.j22
-rw-r--r--test/units/cli/test_data/role_skeleton/templates/test.conf.j22
-rw-r--r--test/units/cli/test_data/role_skeleton/templates_extra/templates.txt.j21
-rw-r--r--test/units/cli/test_data/role_skeleton/tests/test.yml.j25
-rw-r--r--test/units/cli/test_data/role_skeleton/vars/main.yml.j22
-rw-r--r--test/units/cli/test_doc.py35
-rw-r--r--test/units/cli/test_galaxy.py1341
-rw-r--r--test/units/cli/test_playbook.py46
-rw-r--r--test/units/cli/test_vault.py217
-rw-r--r--test/units/compat/__init__.py0
-rw-r--r--test/units/compat/builtins.py33
-rw-r--r--test/units/compat/mock.py122
-rw-r--r--test/units/compat/unittest.py38
-rw-r--r--test/units/config/manager/__init__.py0
-rw-r--r--test/units/config/manager/test_find_ini_config_file.py253
-rw-r--r--test/units/config/test.cfg4
-rw-r--r--test/units/config/test.yml55
-rw-r--r--test/units/config/test2.cfg4
-rw-r--r--test/units/config/test_data.py41
-rw-r--r--test/units/config/test_manager.py145
-rw-r--r--test/units/errors/__init__.py0
-rw-r--r--test/units/errors/test_errors.py121
-rw-r--r--test/units/executor/__init__.py0
-rw-r--r--test/units/executor/module_common/test_modify_module.py43
-rw-r--r--test/units/executor/module_common/test_module_common.py197
-rw-r--r--test/units/executor/module_common/test_recursive_finder.py127
-rw-r--r--test/units/executor/test_interpreter_discovery.py87
-rw-r--r--test/units/executor/test_play_iterator.py458
-rw-r--r--test/units/executor/test_playbook_executor.py148
-rw-r--r--test/units/executor/test_task_executor.py656
-rw-r--r--test/units/executor/test_task_queue_manager_callbacks.py121
-rw-r--r--test/units/executor/test_task_result.py171
-rw-r--r--test/units/galaxy/__init__.py0
-rw-r--r--test/units/galaxy/test_api.py912
-rw-r--r--test/units/galaxy/test_collection.py1326
-rw-r--r--test/units/galaxy/test_collection_install.py816
-rw-r--r--test/units/galaxy/test_token.py55
-rw-r--r--test/units/galaxy/test_user_agent.py18
-rw-r--r--test/units/inventory/test_group.py155
-rw-r--r--test/units/inventory/test_host.py112
-rw-r--r--test/units/inventory_test_data/group_vars/noparse/all.yml~2
-rw-r--r--test/units/inventory_test_data/group_vars/noparse/file.txt2
-rw-r--r--test/units/inventory_test_data/group_vars/parse/all.yml2
-rw-r--r--test/units/mock/__init__.py0
-rw-r--r--test/units/mock/loader.py116
-rw-r--r--test/units/mock/path.py8
-rw-r--r--test/units/mock/procenv.py90
-rw-r--r--test/units/mock/vault_helper.py39
-rw-r--r--test/units/mock/yaml_helper.py124
-rw-r--r--test/units/module_utils/__init__.py0
-rw-r--r--test/units/module_utils/basic/__init__.py0
-rw-r--r--test/units/module_utils/basic/test__log_invocation.py55
-rw-r--r--test/units/module_utils/basic/test__symbolic_mode_to_octal.py103
-rw-r--r--test/units/module_utils/basic/test_argument_spec.py706
-rw-r--r--test/units/module_utils/basic/test_atomic_move.py222
-rw-r--r--test/units/module_utils/basic/test_deprecate_warn.py73
-rw-r--r--test/units/module_utils/basic/test_dict_converters.py31
-rw-r--r--test/units/module_utils/basic/test_exit_json.py154
-rw-r--r--test/units/module_utils/basic/test_filesystem.py136
-rw-r--r--test/units/module_utils/basic/test_get_file_attributes.py50
-rw-r--r--test/units/module_utils/basic/test_get_module_path.py22
-rw-r--r--test/units/module_utils/basic/test_heuristic_log_sanitize.py89
-rw-r--r--test/units/module_utils/basic/test_imports.py128
-rw-r--r--test/units/module_utils/basic/test_log.py152
-rw-r--r--test/units/module_utils/basic/test_no_log.py160
-rw-r--r--test/units/module_utils/basic/test_platform_distribution.py199
-rw-r--r--test/units/module_utils/basic/test_run_command.py283
-rw-r--r--test/units/module_utils/basic/test_safe_eval.py70
-rw-r--r--test/units/module_utils/basic/test_sanitize_keys.py98
-rw-r--r--test/units/module_utils/basic/test_selinux.py254
-rw-r--r--test/units/module_utils/basic/test_set_cwd.py195
-rw-r--r--test/units/module_utils/basic/test_set_mode_if_different.py183
-rw-r--r--test/units/module_utils/basic/test_tmpdir.py119
-rw-r--r--test/units/module_utils/common/__init__.py0
-rw-r--r--test/units/module_utils/common/parameters/test_handle_aliases.py102
-rw-r--r--test/units/module_utils/common/parameters/test_list_deprecations.py44
-rw-r--r--test/units/module_utils/common/parameters/test_list_no_log_values.py228
-rw-r--r--test/units/module_utils/common/process/test_get_bin_path.py39
-rw-r--r--test/units/module_utils/common/test_collections.py175
-rw-r--r--test/units/module_utils/common/test_dict_transformations.py135
-rw-r--r--test/units/module_utils/common/test_network.py68
-rw-r--r--test/units/module_utils/common/test_removed.py62
-rw-r--r--test/units/module_utils/common/test_sys_info.py150
-rw-r--r--test/units/module_utils/common/test_utils.py46
-rw-r--r--test/units/module_utils/common/text/converters/test_container_to_bytes.py95
-rw-r--r--test/units/module_utils/common/text/converters/test_container_to_text.py78
-rw-r--r--test/units/module_utils/common/text/converters/test_json_encode_fallback.py55
-rw-r--r--test/units/module_utils/common/text/converters/test_jsonify.py27
-rw-r--r--test/units/module_utils/common/text/converters/test_to_str.py61
-rw-r--r--test/units/module_utils/common/text/formatters/test_bytes_to_human.py116
-rw-r--r--test/units/module_utils/common/text/formatters/test_human_to_bytes.py185
-rw-r--r--test/units/module_utils/common/text/formatters/test_lenient_lowercase.py68
-rw-r--r--test/units/module_utils/common/validation/test_check_mutually_exclusive.py57
-rw-r--r--test/units/module_utils/common/validation/test_check_required_arguments.py88
-rw-r--r--test/units/module_utils/common/validation/test_check_required_together.py57
-rw-r--r--test/units/module_utils/common/validation/test_check_type_bits.py43
-rw-r--r--test/units/module_utils/common/validation/test_check_type_bool.py49
-rw-r--r--test/units/module_utils/common/validation/test_check_type_bytes.py50
-rw-r--r--test/units/module_utils/common/validation/test_check_type_dict.py34
-rw-r--r--test/units/module_utils/common/validation/test_check_type_float.py38
-rw-r--r--test/units/module_utils/common/validation/test_check_type_int.py34
-rw-r--r--test/units/module_utils/common/validation/test_check_type_jsonarg.py36
-rw-r--r--test/units/module_utils/common/validation/test_check_type_list.py32
-rw-r--r--test/units/module_utils/common/validation/test_check_type_path.py28
-rw-r--r--test/units/module_utils/common/validation/test_check_type_raw.py23
-rw-r--r--test/units/module_utils/common/validation/test_check_type_str.py33
-rw-r--r--test/units/module_utils/common/validation/test_count_terms.py40
-rw-r--r--test/units/module_utils/common/warnings/test_deprecate.py96
-rw-r--r--test/units/module_utils/common/warnings/test_warn.py61
-rw-r--r--test/units/module_utils/conftest.py72
-rw-r--r--test/units/module_utils/facts/__init__.py0
-rw-r--r--test/units/module_utils/facts/base.py65
-rw-r--r--test/units/module_utils/facts/fixtures/cpuinfo/aarch64-4cpu-cpuinfo40
-rw-r--r--test/units/module_utils/facts/fixtures/cpuinfo/arm64-4cpu-cpuinfo32
-rw-r--r--test/units/module_utils/facts/fixtures/cpuinfo/armv6-rev7-1cpu-cpuinfo12
-rw-r--r--test/units/module_utils/facts/fixtures/cpuinfo/armv7-rev3-8cpu-cpuinfo75
-rw-r--r--test/units/module_utils/facts/fixtures/cpuinfo/armv7-rev4-4cpu-cpuinfo39
-rw-r--r--test/units/module_utils/facts/fixtures/cpuinfo/ppc64-power7-rhel7-8cpu-cpuinfo44
-rw-r--r--test/units/module_utils/facts/fixtures/cpuinfo/ppc64le-power8-24cpu-cpuinfo125
-rw-r--r--test/units/module_utils/facts/fixtures/cpuinfo/sparc-t5-debian-ldom-24vcpu61
-rw-r--r--test/units/module_utils/facts/fixtures/cpuinfo/x86_64-2cpu-cpuinfo56
-rw-r--r--test/units/module_utils/facts/fixtures/cpuinfo/x86_64-4cpu-cpuinfo104
-rw-r--r--test/units/module_utils/facts/fixtures/cpuinfo/x86_64-8cpu-cpuinfo216
-rw-r--r--test/units/module_utils/facts/fixtures/distribution_files/ClearLinux10
-rw-r--r--test/units/module_utils/facts/fixtures/distribution_files/CoreOS10
-rw-r--r--test/units/module_utils/facts/fixtures/distribution_files/LinuxMint12
-rw-r--r--test/units/module_utils/facts/fixtures/distribution_files/Slackware1
-rw-r--r--test/units/module_utils/facts/fixtures/distribution_files/SlackwareCurrent1
-rw-r--r--test/units/module_utils/facts/fixtures/findmount_output.txt40
-rw-r--r--test/units/module_utils/facts/hardware/__init__.py0
-rw-r--r--test/units/module_utils/facts/hardware/linux_data.py585
-rw-r--r--test/units/module_utils/facts/hardware/test_linux.py175
-rw-r--r--test/units/module_utils/facts/hardware/test_linux_get_cpu_info.py62
-rw-r--r--test/units/module_utils/facts/hardware/test_sunos_get_uptime_facts.py20
-rw-r--r--test/units/module_utils/facts/network/__init__.py0
-rw-r--r--test/units/module_utils/facts/network/test_fc_wwn.py94
-rw-r--r--test/units/module_utils/facts/network/test_generic_bsd.py175
-rw-r--r--test/units/module_utils/facts/network/test_iscsi_get_initiator.py54
-rw-r--r--test/units/module_utils/facts/other/__init__.py0
-rw-r--r--test/units/module_utils/facts/other/test_facter.py228
-rw-r--r--test/units/module_utils/facts/other/test_ohai.py6768
-rw-r--r--test/units/module_utils/facts/system/__init__.py0
-rw-r--r--test/units/module_utils/facts/system/distribution/__init__.py0
-rw-r--r--test/units/module_utils/facts/system/distribution/conftest.py21
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/arch_linux_na.json24
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/arch_linux_no_arch-release_na.json23
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/archlinux_rolling.json31
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/centos_6.7.json31
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/clearlinux_26580.json24
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/clearlinux_28120.json24
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/core_os_1911.5.0.json23
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/core_os_976.0.0.json23
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/cumulus_linux_2.5.4.json23
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/cumulus_linux_3.7.3.json23
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/debian_10.json40
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/debian_7.9.json39
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/debian_stretch_sid.json36
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/devuan.json23
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/fedora_22.json25
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/fedora_25.json25
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/fedora_31.json55
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/flatcar_2492.0.0.json24
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/kali_2019.1.json25
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/kde_neon_16.04.json42
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/linux_mint_18.2.json25
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/linux_mint_19.1.json24
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/nexenta_3.json25
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/nexenta_4.json24
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/omnios.json24
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/openeuler_20.03.json28
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/openindiana.json24
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/opensuse_13.2.json24
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/opensuse_leap_15.0.json23
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/opensuse_leap_15.1.json36
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/opensuse_leap_42.1.json24
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/opensuse_tumbleweed_20160917.json23
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/pop_os_20.04.json29
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/redhat_6.7.json25
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/redhat_7.2.json25
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/redhat_7.7.json43
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/sles_11.3.json23
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/sles_11.4.json24
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/sles_12_sp0.json24
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/sles_12_sp1.json24
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/smartos_global_zone.json24
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/smartos_zone.json25
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/smgl_na.json23
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/solaris_10.json25
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/solaris_11.3.json25
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/solaris_11.4.json35
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/solaris_11.json26
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/steamos_2.0.json40
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/ubuntu_10.04_guess.json23
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/ubuntu_12.04.json24
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/ubuntu_14.04.json24
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/ubuntu_16.04.json24
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/ubuntu_18.04.json39
-rw-r--r--test/units/module_utils/facts/system/distribution/fixtures/virtuozzo_7.3.json25
-rw-r--r--test/units/module_utils/facts/system/distribution/test_distribution_sles4sap.py33
-rw-r--r--test/units/module_utils/facts/system/distribution/test_distribution_version.py143
-rw-r--r--test/units/module_utils/facts/system/distribution/test_parse_distribution_file_ClearLinux.py51
-rw-r--r--test/units/module_utils/facts/system/distribution/test_parse_distribution_file_Slackware.py37
-rw-r--r--test/units/module_utils/facts/system/test_cmdline.py67
-rw-r--r--test/units/module_utils/facts/system/test_lsb.py108
-rw-r--r--test/units/module_utils/facts/test_ansible_collector.py504
-rw-r--r--test/units/module_utils/facts/test_collector.py563
-rw-r--r--test/units/module_utils/facts/test_collectors.py430
-rw-r--r--test/units/module_utils/facts/test_date_time.py103
-rw-r--r--test/units/module_utils/facts/test_facts.py644
-rw-r--r--test/units/module_utils/facts/test_timeout.py171
-rw-r--r--test/units/module_utils/facts/test_utils.py39
-rw-r--r--test/units/module_utils/json_utils/__init__.py0
-rw-r--r--test/units/module_utils/json_utils/test_filter_non_json_lines.py88
-rw-r--r--test/units/module_utils/parsing/test_convert_bool.py60
-rw-r--r--test/units/module_utils/test_api.py48
-rw-r--r--test/units/module_utils/test_distro.py38
-rw-r--r--test/units/module_utils/urls/__init__.py0
-rw-r--r--test/units/module_utils/urls/fixtures/client.key28
-rw-r--r--test/units/module_utils/urls/fixtures/client.pem81
-rw-r--r--test/units/module_utils/urls/fixtures/client.txt3
-rw-r--r--test/units/module_utils/urls/fixtures/multipart.txt166
-rw-r--r--test/units/module_utils/urls/fixtures/netrc3
-rw-r--r--test/units/module_utils/urls/test_RedirectHandlerFactory.py138
-rw-r--r--test/units/module_utils/urls/test_Request.py456
-rw-r--r--test/units/module_utils/urls/test_RequestWithMethod.py22
-rw-r--r--test/units/module_utils/urls/test_fetch_url.py220
-rw-r--r--test/units/module_utils/urls/test_generic_urlparse.py57
-rw-r--r--test/units/module_utils/urls/test_prepare_multipart.py102
-rw-r--r--test/units/module_utils/urls/test_urls.py109
-rw-r--r--test/units/modules/__init__.py0
-rw-r--r--test/units/modules/conftest.py31
-rw-r--r--test/units/modules/test_apt.py53
-rw-r--r--test/units/modules/test_async_wrapper.py57
-rw-r--r--test/units/modules/test_copy.py215
-rw-r--r--test/units/modules/test_iptables.py919
-rw-r--r--test/units/modules/test_known_hosts.py110
-rw-r--r--test/units/modules/test_pip.py38
-rw-r--r--test/units/modules/test_systemd.py52
-rw-r--r--test/units/modules/test_yum.py207
-rw-r--r--test/units/modules/utils.py50
-rw-r--r--test/units/parsing/__init__.py0
-rw-r--r--test/units/parsing/fixtures/ajson.json19
-rw-r--r--test/units/parsing/fixtures/vault.yml6
-rw-r--r--test/units/parsing/test_ajson.py187
-rw-r--r--test/units/parsing/test_dataloader.py239
-rw-r--r--test/units/parsing/test_mod_args.py137
-rw-r--r--test/units/parsing/test_splitter.py110
-rw-r--r--test/units/parsing/test_unquote.py51
-rw-r--r--test/units/parsing/utils/__init__.py0
-rw-r--r--test/units/parsing/utils/test_addresses.py98
-rw-r--r--test/units/parsing/utils/test_jsonify.py39
-rw-r--r--test/units/parsing/utils/test_yaml.py34
-rw-r--r--test/units/parsing/vault/__init__.py0
-rw-r--r--test/units/parsing/vault/test_vault.py941
-rw-r--r--test/units/parsing/vault/test_vault_editor.py517
-rw-r--r--test/units/parsing/yaml/__init__.py0
-rw-r--r--test/units/parsing/yaml/test_dumper.py103
-rw-r--r--test/units/parsing/yaml/test_loader.py436
-rw-r--r--test/units/parsing/yaml/test_objects.py164
-rw-r--r--test/units/playbook/__init__.py0
-rw-r--r--test/units/playbook/role/__init__.py0
-rw-r--r--test/units/playbook/role/test_include_role.py248
-rw-r--r--test/units/playbook/role/test_role.py422
-rw-r--r--test/units/playbook/test_attribute.py57
-rw-r--r--test/units/playbook/test_base.py630
-rw-r--r--test/units/playbook/test_block.py82
-rw-r--r--test/units/playbook/test_collectionsearch.py78
-rw-r--r--test/units/playbook/test_conditional.py240
-rw-r--r--test/units/playbook/test_helpers.py405
-rw-r--r--test/units/playbook/test_included_file.py332
-rw-r--r--test/units/playbook/test_play.py132
-rw-r--r--test/units/playbook/test_play_context.py111
-rw-r--r--test/units/playbook/test_playbook.py61
-rw-r--r--test/units/playbook/test_taggable.py102
-rw-r--r--test/units/playbook/test_task.py114
-rw-r--r--test/units/plugins/__init__.py0
-rw-r--r--test/units/plugins/action/__init__.py0
-rw-r--r--test/units/plugins/action/test_action.py683
-rw-r--r--test/units/plugins/action/test_gather_facts.py87
-rw-r--r--test/units/plugins/action/test_raw.py105
-rw-r--r--test/units/plugins/become/__init__.py0
-rw-r--r--test/units/plugins/become/conftest.py37
-rw-r--r--test/units/plugins/become/test_su.py40
-rw-r--r--test/units/plugins/become/test_sudo.py45
-rw-r--r--test/units/plugins/cache/__init__.py0
-rw-r--r--test/units/plugins/cache/test_cache.py167
-rw-r--r--test/units/plugins/callback/__init__.py0
-rw-r--r--test/units/plugins/callback/test_callback.py412
-rw-r--r--test/units/plugins/connection/__init__.py0
-rw-r--r--test/units/plugins/connection/test_connection.py169
-rw-r--r--test/units/plugins/connection/test_local.py40
-rw-r--r--test/units/plugins/connection/test_paramiko.py42
-rw-r--r--test/units/plugins/connection/test_psrp.py233
-rw-r--r--test/units/plugins/connection/test_ssh.py688
-rw-r--r--test/units/plugins/connection/test_winrm.py431
-rw-r--r--test/units/plugins/filter/__init__.py0
-rw-r--r--test/units/plugins/filter/test_core.py41
-rw-r--r--test/units/plugins/filter/test_mathstuff.py176
-rw-r--r--test/units/plugins/inventory/__init__.py0
-rw-r--r--test/units/plugins/inventory/test_constructed.py206
-rw-r--r--test/units/plugins/inventory/test_inventory.py207
-rw-r--r--test/units/plugins/inventory/test_script.py105
-rw-r--r--test/units/plugins/loader_fixtures/__init__.py0
-rw-r--r--test/units/plugins/loader_fixtures/import_fixture.py9
-rw-r--r--test/units/plugins/lookup/__init__.py0
-rw-r--r--test/units/plugins/lookup/test_env.py35
-rw-r--r--test/units/plugins/lookup/test_ini.py63
-rw-r--r--test/units/plugins/lookup/test_password.py501
-rw-r--r--test/units/plugins/shell/__init__.py0
-rw-r--r--test/units/plugins/shell/test_cmd.py19
-rw-r--r--test/units/plugins/shell/test_powershell.py83
-rw-r--r--test/units/plugins/strategy/__init__.py0
-rw-r--r--test/units/plugins/strategy/test_linear.py177
-rw-r--r--test/units/plugins/strategy/test_strategy.py546
-rw-r--r--test/units/plugins/test_plugins.py134
-rw-r--r--test/units/regex/test_invalid_var_names.py27
-rw-r--r--test/units/requirements.txt6
-rw-r--r--test/units/template/__init__.py0
-rw-r--r--test/units/template/test_native_concat.py28
-rw-r--r--test/units/template/test_safe_eval.py44
-rw-r--r--test/units/template/test_templar.py446
-rw-r--r--test/units/template/test_template_utilities.py117
-rw-r--r--test/units/template/test_vars.py81
-rw-r--r--test/units/test_constants.py122
-rw-r--r--test/units/test_context.py27
-rw-r--r--test/units/utils/__init__.py0
-rw-r--r--test/units/utils/collection_loader/__init__.py0
-rw-r--r--test/units/utils/collection_loader/fixtures/collections/ansible_collections/ansible/builtin/plugins/modules/shouldnotload.py4
-rw-r--r--test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/meta/runtime.yml4
-rw-r--r--test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/action/my_action.py8
-rw-r--r--test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/module_utils/__init__.py0
-rw-r--r--test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/module_utils/my_other_util.py4
-rw-r--r--test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/module_utils/my_util.py6
-rw-r--r--test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/modules/__init__.py5
-rw-r--r--test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/modules/amodule.py6
-rw-r--r--test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/roles/some_role/.gitkeep0
-rw-r--r--test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/__init__.py5
-rw-r--r--test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/ansible/__init__.py5
-rw-r--r--test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/testns/__init__.py5
-rw-r--r--test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/testns/testcoll/__init__.py5
-rw-r--r--test/units/utils/collection_loader/fixtures/playbook_path/collections/ansible_collections/ansible/playbook_adj_other/.gitkeep0
-rw-r--r--test/units/utils/collection_loader/fixtures/playbook_path/collections/ansible_collections/freshns/playbook_adj_other/.gitkeep0
-rw-r--r--test/units/utils/collection_loader/fixtures/playbook_path/collections/ansible_collections/testns/playbook_adj_other/.gitkeep0
-rw-r--r--test/units/utils/collection_loader/test_collection_loader.py834
-rw-r--r--test/units/utils/display/test_display.py20
-rw-r--r--test/units/utils/display/test_logger.py31
-rw-r--r--test/units/utils/display/test_warning.py42
-rw-r--r--test/units/utils/test_cleanup_tmp_file.py48
-rw-r--r--test/units/utils/test_context_objects.py70
-rw-r--r--test/units/utils/test_encrypt.py168
-rw-r--r--test/units/utils/test_helpers.py34
-rw-r--r--test/units/utils/test_isidentifier.py49
-rw-r--r--test/units/utils/test_plugin_docs.py333
-rw-r--r--test/units/utils/test_shlex.py41
-rw-r--r--test/units/utils/test_unsafe_proxy.py110
-rw-r--r--test/units/utils/test_vars.py282
-rw-r--r--test/units/utils/test_version.py335
-rw-r--r--test/units/vars/__init__.py0
-rw-r--r--test/units/vars/test_module_response_deepcopy.py60
-rw-r--r--test/units/vars/test_variable_manager.py307
4324 files changed, 425789 insertions, 0 deletions
diff --git a/COPYING b/COPYING
new file mode 100644
index 00000000..10926e87
--- /dev/null
+++ b/COPYING
@@ -0,0 +1,675 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
+
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 00000000..0eb9d004
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,36 @@
+include README.rst
+include COPYING
+include SYMLINK_CACHE.json
+include requirements.txt
+recursive-include docs *
+exclude docs/docsite/rst_warnings
+recursive-exclude docs/docsite/_build *
+recursive-exclude docs/docsite/_extensions *.pyc *.pyo
+include examples/hosts
+include examples/ansible.cfg
+include examples/scripts/ConfigureRemotingForAnsible.ps1
+include examples/scripts/upgrade_to_ps3.ps1
+recursive-include lib/ansible/executor/powershell *.ps1
+recursive-include lib/ansible/module_utils/csharp *.cs
+recursive-include lib/ansible/module_utils/powershell *.psm1
+recursive-include lib/ansible/modules/windows *.ps1
+recursive-include lib/ansible/galaxy/data *.yml *.j2 README.md ansible.cfg inventory .git_keep
+recursive-include lib/ansible/config *.yml
+recursive-include licenses *.txt
+recursive-include packaging *
+recursive-include test/ansible_test *.py Makefile
+recursive-include test/integration *
+recursive-include test/lib/ansible_test/config *.template
+recursive-include test/lib/ansible_test/_data *.cfg *.ini *.json *.ps1 *.psd1 *.py *.sh *.txt *.yml coveragerc inventory
+recursive-include test/lib/ansible_test/_data/injector ansible ansible-config ansible-connection ansible-console ansible-doc ansible-galaxy ansible-playbook ansible-pull ansible-test ansible-vault pytest
+recursive-include test/lib/ansible_test/_data/sanity/validate-modules validate-modules
+recursive-include test/sanity *.json *.py *.txt
+recursive-include test/support *.py *.ps1 *.psm1 *.cs
+exclude test/sanity/code-smell/botmeta.*
+recursive-include test/units *
+include Makefile
+include MANIFEST.in
+include changelogs/CHANGELOG*.rst
+include changelogs/changelog.yaml
+recursive-include hacking/build_library *.py
+include hacking/build-ansible.py
diff --git a/Makefile b/Makefile
new file mode 100644
index 00000000..f4e95348
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,288 @@
+# WARN: gmake syntax
+########################################################
+# Makefile for Ansible
+#
+# useful targets:
+# make clean ---------------- clean up
+# make webdocs -------------- produce ansible doc at docs/docsite/_build/html
+# make sdist ---------------- produce a tarball
+# make deb-src -------------- produce a DEB source
+# make deb ------------------ produce a DEB
+# make docs ----------------- rebuild the manpages (results are checked in)
+# make tests ---------------- run the tests (see https://docs.ansible.com/ansible/devel/dev_guide/testing_units.html for requirements)
+
+########################################################
+# variable section
+
+NAME = ansible-base
+OS = $(shell uname -s)
+PREFIX ?= '/usr/local'
+SDIST_DIR ?= 'dist'
+
+# This doesn't evaluate until it's called. The -D argument is the
+# directory of the target file ($@), kinda like `dirname`.
+MANPAGES ?= $(patsubst %.rst.in,%,$(wildcard ./docs/man/man1/ansible*.1.rst.in))
+ifneq ($(shell which rst2man 2>/dev/null),)
+ASCII2MAN = rst2man $< $@
+else ifneq ($(shell which rst2man.py 2>/dev/null),)
+ASCII2MAN = rst2man.py $< $@
+else
+ASCII2MAN = @echo "ERROR: rst2man from docutils command is not installed but is required to build $(MANPAGES)" && exit 1
+endif
+
+PYTHON=python
+GENERATE_CLI = hacking/build-ansible.py generate-man
+
+# fetch version from project release.py as single source-of-truth
+VERSION := $(shell $(PYTHON) packaging/release/versionhelper/version_helper.py --raw || echo error)
+ifeq ($(findstring error,$(VERSION)), error)
+$(error "version_helper failed")
+endif
+
+# Get the branch information from git
+ifneq ($(shell which git),)
+GIT_DATE := $(shell git log -n 1 --format="%ci")
+GIT_HASH := $(shell git log -n 1 --format="%h")
+GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD | sed 's/[-_.\/]//g')
+GITINFO = .$(GIT_HASH).$(GIT_BRANCH)
+else
+GITINFO = ""
+endif
+
+ifeq ($(shell echo $(OS) | egrep -c 'Darwin|FreeBSD|OpenBSD|DragonFly'),1)
+DATE := $(shell date -j -r $(shell git log -n 1 --format="%ct") +%Y%m%d%H%M)
+CPUS ?= $(shell sysctl hw.ncpu|awk '{print $$2}')
+else
+DATE := $(shell date --utc --date="$(GIT_DATE)" +%Y%m%d%H%M)
+CPUS ?= $(shell nproc)
+endif
+
+# DEB build parameters
+DEBUILD_BIN ?= debuild
+DEBUILD_OPTS = --source-option="-I"
+DPUT_BIN ?= dput
+DPUT_OPTS ?=
+DEB_DATE := $(shell LC_TIME=C date +"%a, %d %b %Y %T %z")
+DEB_VERSION ?= $(shell $(PYTHON) packaging/release/versionhelper/version_helper.py --debversion)
+ifeq ($(OFFICIAL),yes)
+ DEB_RELEASE ?= $(shell $(PYTHON) packaging/release/versionhelper/version_helper.py --debrelease)ppa
+ # Sign OFFICIAL builds using 'DEBSIGN_KEYID'
+ # DEBSIGN_KEYID is required when signing
+ ifneq ($(DEBSIGN_KEYID),)
+ DEBUILD_OPTS += -k$(DEBSIGN_KEYID)
+ endif
+else
+ DEB_RELEASE ?= 100.git$(DATE)$(GITINFO)
+ # Do not sign unofficial builds
+ DEBUILD_OPTS += -uc -us
+ DPUT_OPTS += -u
+endif
+DEBUILD = $(DEBUILD_BIN) $(DEBUILD_OPTS)
+DEB_PPA ?= ppa
+# Choose the desired Ubuntu release: lucid precise saucy trusty
+DEB_DIST ?= unstable
+
+# pbuilder parameters
+PBUILDER_ARCH ?= amd64
+PBUILDER_CACHE_DIR = /var/cache/pbuilder
+PBUILDER_BIN ?= pbuilder
+PBUILDER_OPTS ?= --debootstrapopts --variant=buildd --architecture $(PBUILDER_ARCH) --debbuildopts -b
+
+# ansible-test parameters
+ANSIBLE_TEST ?= bin/ansible-test
+TEST_FLAGS ?=
+
+# ansible-test units parameters (make test / make test-py3)
+PYTHON_VERSION ?= $(shell python2 -c 'import sys; print("%s.%s" % sys.version_info[:2])')
+PYTHON3_VERSION ?= $(shell python3 -c 'import sys; print("%s.%s" % sys.version_info[:2])')
+
+# ansible-test integration parameters (make integration)
+IMAGE ?= centos7
+TARGET ?=
+
+########################################################
+
+.PHONY: all
+all: clean python
+
+.PHONY: tests
+tests:
+ $(ANSIBLE_TEST) units -v --python $(PYTHON_VERSION) $(TEST_FLAGS)
+
+.PHONY: tests-py3
+tests-py3:
+ $(ANSIBLE_TEST) units -v --python $(PYTHON3_VERSION) $(TEST_FLAGS)
+
+.PHONY: integration
+integration:
+ $(ANSIBLE_TEST) integration -v --docker $(IMAGE) $(TARGET) $(TEST_FLAGS)
+
+# Regenerate %.1.rst if %.1.rst.in has been modified more
+# recently than %.1.rst.
+%.1.rst: %.1.rst.in
+ sed "s/%VERSION%/$(VERSION)/" $< > $@
+ rm $<
+
+# Regenerate %.1 if %.1.rst or release.py has been modified more
+# recently than %.1. (Implicitly runs the %.1.rst recipe)
+%.1: %.1.rst lib/ansible/release.py
+ $(ASCII2MAN)
+
+.PHONY: clean
+clean:
+ @echo "Cleaning up distutils stuff"
+ rm -rf build
+ rm -rf dist
+ rm -rf lib/ansible*.egg-info/
+ @echo "Cleaning up byte compiled python stuff"
+ find . -type f -regex ".*\.py[co]$$" -delete
+ find . -type d -name "__pycache__" -delete
+ @echo "Cleaning up editor backup files"
+ find . -type f -not -path ./test/units/inventory_test_data/group_vars/noparse/all.yml~ \( -name "*~" -or -name "#*" \) -delete
+ find . -type f \( -name "*.swp" \) -delete
+ @echo "Cleaning up manpage stuff"
+ find ./docs/man -type f -name "*.xml" -delete
+ find ./docs/man -type f -name "*.rst" -delete
+ find ./docs/man/man3 -type f -name "*.3" -delete
+ rm -f ./docs/man/man1/*
+ @echo "Cleaning up output from test runs"
+ rm -rf test/test_data
+ rm -rf shippable/
+ rm -rf logs/
+ rm -rf .cache/
+ rm -f test/units/.coverage*
+ rm -rf test/results/*/*
+ find test/ -type f -name '*.retry' -delete
+ @echo "Cleaning up Debian building stuff"
+ rm -rf debian
+ rm -rf deb-build
+ rm -rf docs/json
+ rm -rf docs/js
+ @echo "Cleaning up docsite"
+ $(MAKE) -C docs/docsite clean
+
+.PHONY: python
+python:
+ $(PYTHON) setup.py build
+
+.PHONY: install
+install:
+ $(PYTHON) setup.py install
+
+install_manpages:
+ gzip -9 $(wildcard ./docs/man/man1/ansible*.1)
+ cp $(wildcard ./docs/man/man1/ansible*.1.gz) $(PREFIX)/man/man1/
+
+.PHONY: sdist_check
+sdist_check:
+ $(PYTHON) -c 'import setuptools, sys; sys.exit(int(not (tuple(map(int, setuptools.__version__.split("."))) > (39, 2, 0))))'
+ $(PYTHON) packaging/sdist/check-link-behavior.py
+
+.PHONY: sdist
+sdist: sdist_check clean docs
+ _ANSIBLE_SDIST_FROM_MAKEFILE=1 $(PYTHON) setup.py sdist --dist-dir=$(SDIST_DIR)
+
+# Official releases generate the changelog as the last commit before the release.
+# Snapshots shouldn't result in new checkins so the changelog is generated as
+# part of creating the tarball.
+.PHONY: snapshot
+snapshot: sdist_check clean docs changelog
+ _ANSIBLE_SDIST_FROM_MAKEFILE=1 $(PYTHON) setup.py sdist --dist-dir=$(SDIST_DIR)
+
+.PHONY: sdist_upload
+sdist_upload: clean docs
+ $(PYTHON) setup.py sdist upload 2>&1 |tee upload.log
+
+.PHONY: changelog
+changelog:
+ PYTHONPATH=./lib antsibull-changelog release -vv --use-ansible-doc && PYTHONPATH=./lib antsibull-changelog generate -vv --use-ansible-doc
+
+.PHONY: debian
+debian: sdist
+ @for DIST in $(DEB_DIST) ; do \
+ mkdir -p deb-build/$${DIST} ; \
+ tar -C deb-build/$${DIST} -xvf dist/$(NAME)-$(VERSION).tar.gz ; \
+ cp -a packaging/debian deb-build/$${DIST}/$(NAME)-$(VERSION)/ ; \
+ sed -ie "s|%VERSION%|$(DEB_VERSION)|g;s|%RELEASE%|$(DEB_RELEASE)|;s|%DIST%|$${DIST}|g;s|%DATE%|$(DEB_DATE)|g" deb-build/$${DIST}/$(NAME)-$(VERSION)/debian/changelog ; \
+ done
+
+.PHONY: deb
+deb: deb-src
+ @for DIST in $(DEB_DIST) ; do \
+ PBUILDER_OPTS="$(PBUILDER_OPTS) --distribution $${DIST} --basetgz $(PBUILDER_CACHE_DIR)/$${DIST}-$(PBUILDER_ARCH)-base.tgz --buildresult $(CURDIR)/deb-build/$${DIST}" ; \
+ $(PBUILDER_BIN) create $${PBUILDER_OPTS} --othermirror "deb http://archive.ubuntu.com/ubuntu $${DIST} universe" ; \
+ $(PBUILDER_BIN) update $${PBUILDER_OPTS} ; \
+ $(PBUILDER_BIN) build $${PBUILDER_OPTS} deb-build/$${DIST}/$(NAME)_$(DEB_VERSION)-$(DEB_RELEASE)~$${DIST}.dsc ; \
+ done
+ @echo "#############################################"
+ @echo "Ansible DEB artifacts:"
+ @for DIST in $(DEB_DIST) ; do \
+ echo deb-build/$${DIST}/$(NAME)_$(DEB_VERSION)-$(DEB_RELEASE)~$${DIST}_amd64.changes ; \
+ done
+ @echo "#############################################"
+
+# Build package outside of pbuilder, with locally installed dependencies.
+# Install BuildRequires as noted in packaging/debian/control.
+.PHONY: local_deb
+local_deb: debian
+ @for DIST in $(DEB_DIST) ; do \
+ (cd deb-build/$${DIST}/$(NAME)-$(VERSION)/ && $(DEBUILD) -b) ; \
+ done
+ @echo "#############################################"
+ @echo "Ansible DEB artifacts:"
+ @for DIST in $(DEB_DIST) ; do \
+ echo deb-build/$${DIST}/$(NAME)_$(DEB_VERSION)-$(DEB_RELEASE)~$${DIST}_amd64.changes ; \
+ done
+ @echo "#############################################"
+
+.PHONY: deb-src
+deb-src: debian
+ @for DIST in $(DEB_DIST) ; do \
+ (cd deb-build/$${DIST}/$(NAME)-$(VERSION)/ && $(DEBUILD) -S) ; \
+ done
+ @echo "#############################################"
+ @echo "Ansible DEB artifacts:"
+ @for DIST in $(DEB_DIST) ; do \
+ echo deb-build/$${DIST}/$(NAME)_$(DEB_VERSION)-$(DEB_RELEASE)~$${DIST}_source.changes ; \
+ done
+ @echo "#############################################"
+
+.PHONY: deb-upload
+deb-upload: deb
+ @for DIST in $(DEB_DIST) ; do \
+ $(DPUT_BIN) $(DPUT_OPTS) $(DEB_PPA) deb-build/$${DIST}/$(NAME)_$(DEB_VERSION)-$(DEB_RELEASE)~$${DIST}_amd64.changes ; \
+ done
+
+.PHONY: deb-src-upload
+deb-src-upload: deb-src
+ @for DIST in $(DEB_DIST) ; do \
+ $(DPUT_BIN) $(DPUT_OPTS) $(DEB_PPA) deb-build/$${DIST}/$(NAME)_$(DEB_VERSION)-$(DEB_RELEASE)~$${DIST}_source.changes ; \
+ done
+
+.PHONY: epub
+epub:
+ (cd docs/docsite/; CPUS=$(CPUS) $(MAKE) epub)
+
+# for arch or gentoo, read instructions in the appropriate 'packaging' subdirectory directory
+.PHONY: webdocs
+webdocs:
+ (cd docs/docsite/; CPUS=$(CPUS) $(MAKE) docs)
+
+.PHONY: linkcheckdocs
+linkcheckdocs:
+ (cd docs/docsite/; CPUS=$(CPUS) $(MAKE) linkcheckdocs)
+
+.PHONY: generate_rst
+generate_rst: lib/ansible/cli/*.py
+ mkdir -p ./docs/man/man1/ ; \
+ $(GENERATE_CLI) --template-file=docs/templates/man.j2 --output-dir=docs/man/man1/ --output-format man lib/ansible/cli/*.py
+
+
+docs: generate_rst
+ $(MAKE) $(MANPAGES)
+
+.PHONY: alldocs
+alldocs: docs webdocs
+
+version:
+ @echo $(VERSION)
diff --git a/PKG-INFO b/PKG-INFO
new file mode 100644
index 00000000..af9262e3
--- /dev/null
+++ b/PKG-INFO
@@ -0,0 +1,150 @@
+Metadata-Version: 1.2
+Name: ansible-base
+Version: 2.10.4
+Summary: Radically simple IT automation
+Home-page: https://ansible.com/
+Author: Ansible, Inc.
+Author-email: info@ansible.com
+License: GPLv3+
+Project-URL: Bug Tracker, https://github.com/ansible/ansible/issues
+Project-URL: CI: Shippable, https://app.shippable.com/github/ansible/ansible
+Project-URL: Code of Conduct, https://docs.ansible.com/ansible/latest/community/code_of_conduct.html
+Project-URL: Documentation, https://docs.ansible.com/ansible/
+Project-URL: Mailing lists, https://docs.ansible.com/ansible/latest/community/communication.html#mailing-list-information
+Project-URL: Source Code, https://github.com/ansible/ansible
+Description: |PyPI version| |Docs badge| |Chat badge| |Build Status| |Code Of Conduct| |Mailing Lists| |License|
+
+ *******
+ Ansible
+ *******
+
+ Ansible is a radically simple IT automation system. It handles
+ configuration management, application deployment, cloud provisioning,
+ ad-hoc task execution, network automation, and multi-node orchestration. Ansible makes complex
+ changes like zero-downtime rolling updates with load balancers easy. More information on `the Ansible website <https://ansible.com/>`_.
+
+ Design Principles
+ =================
+
+ * Have a dead simple setup process and a minimal learning curve.
+ * Manage machines very quickly and in parallel.
+ * Avoid custom-agents and additional open ports, be agentless by
+ leveraging the existing SSH daemon.
+ * Describe infrastructure in a language that is both machine and human
+ friendly.
+ * Focus on security and easy auditability/review/rewriting of content.
+ * Manage new remote machines instantly, without bootstrapping any
+ software.
+ * Allow module development in any dynamic language, not just Python.
+ * Be usable as non-root.
+ * Be the easiest IT automation system to use, ever.
+
+ Use Ansible
+ ===========
+
+ You can install a released version of Ansible via ``pip``, a package manager, or
+ our `release repository <https://releases.ansible.com/ansible/>`_. See our
+ `installation guide <https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html>`_ for details on installing Ansible
+ on a variety of platforms.
+
+ Red Hat offers supported builds of `Ansible Engine <https://www.ansible.com/ansible-engine>`_.
+
+ Power users and developers can run the ``devel`` branch, which has the latest
+ features and fixes, directly. Although it is reasonably stable, you are more likely to encounter
+ breaking changes when running the ``devel`` branch. We recommend getting involved
+ in the Ansible community if you want to run the ``devel`` branch.
+
+ Get Involved
+ ============
+
+ * Read `Community
+ Information <https://docs.ansible.com/ansible/latest/community>`_ for all
+ kinds of ways to contribute to and interact with the project,
+ including mailing list information and how to submit bug reports and
+ code to Ansible.
+ * Join a `Working Group
+ <https://github.com/ansible/community/wiki>`_, an organized community devoted to a specific technology domain or platform.
+ * Submit a proposed code update through a pull request to the ``devel`` branch.
+ * Talk to us before making larger changes
+ to avoid duplicate efforts. This not only helps everyone
+ know what is going on, it also helps save time and effort if we decide
+ some changes are needed.
+ * For a list of email lists, IRC channels and Working Groups, see the
+ `Communication page <https://docs.ansible.com/ansible/latest/community/communication.html>`_
+
+ Coding Guidelines
+ =================
+
+ We document our Coding Guidelines in the `Developer Guide <https://docs.ansible.com/ansible/devel/dev_guide/>`_. We particularly suggest you review:
+
+ * `Contributing your module to Ansible <https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_checklist.html>`_
+ * `Conventions, tips and pitfalls <https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_best_practices.html>`_
+
+ Branch Info
+ ===========
+
+ * The ``devel`` branch corresponds to the release actively under development.
+ * The ``stable-2.X`` branches correspond to stable releases.
+ * Create a branch based on ``devel`` and set up a `dev environment <https://docs.ansible.com/ansible/latest/dev_guide/developing_modules_general.html#common-environment-setup>`_ if you want to open a PR.
+ * See the `Ansible release and maintenance <https://docs.ansible.com/ansible/latest/reference_appendices/release_and_maintenance.html>`_ page for information about active branches.
+
+ Roadmap
+ =======
+
+ Based on team and community feedback, an initial roadmap will be published for a major or minor version (ex: 2.7, 2.8).
+ The `Ansible Roadmap page <https://docs.ansible.com/ansible/devel/roadmap/>`_ details what is planned and how to influence the roadmap.
+
+ Authors
+ =======
+
+ Ansible was created by `Michael DeHaan <https://github.com/mpdehaan>`_
+ and has contributions from over 4700 users (and growing). Thanks everyone!
+
+ `Ansible <https://www.ansible.com>`_ is sponsored by `Red Hat, Inc.
+ <https://www.redhat.com>`_
+
+ License
+ =======
+
+ GNU General Public License v3.0 or later
+
+ See `COPYING <COPYING>`_ to see the full text.
+
+ .. |PyPI version| image:: https://img.shields.io/pypi/v/ansible-base.svg
+ :target: https://pypi.org/project/ansible-base
+ .. |Docs badge| image:: https://img.shields.io/badge/docs-latest-brightgreen.svg
+ :target: https://docs.ansible.com/ansible/latest/
+ .. |Build Status| image:: https://dev.azure.com/ansible/ansible/_apis/build/status/CI?branchName=stable-2.10
+ :target: https://dev.azure.com/ansible/ansible/_build/latest?definitionId=20&branchName=stable-2.10
+ .. |Chat badge| image:: https://img.shields.io/badge/chat-IRC-brightgreen.svg
+ :target: https://docs.ansible.com/ansible/latest/community/communication.html
+ .. |Code Of Conduct| image:: https://img.shields.io/badge/code%20of%20conduct-Ansible-silver.svg
+ :target: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html
+ :alt: Ansible Code of Conduct
+ .. |Mailing Lists| image:: https://img.shields.io/badge/mailing%20lists-Ansible-orange.svg
+ :target: https://docs.ansible.com/ansible/latest/community/communication.html#mailing-list-information
+ :alt: Ansible mailing lists
+ .. |License| image:: https://img.shields.io/badge/license-GPL%20v3.0-brightgreen.svg
+ :target: COPYING
+ :alt: Repository License
+
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Console
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Information Technology
+Classifier: Intended Audience :: System Administrators
+Classifier: License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)
+Classifier: Natural Language :: English
+Classifier: Operating System :: POSIX
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Topic :: System :: Installation/Setup
+Classifier: Topic :: System :: Systems Administration
+Classifier: Topic :: Utilities
+Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*
diff --git a/README.rst b/README.rst
new file mode 100644
index 00000000..44f61270
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,115 @@
+|PyPI version| |Docs badge| |Chat badge| |Build Status| |Code Of Conduct| |Mailing Lists| |License|
+
+*******
+Ansible
+*******
+
+Ansible is a radically simple IT automation system. It handles
+configuration management, application deployment, cloud provisioning,
+ad-hoc task execution, network automation, and multi-node orchestration. Ansible makes complex
+changes like zero-downtime rolling updates with load balancers easy. More information on `the Ansible website <https://ansible.com/>`_.
+
+Design Principles
+=================
+
+* Have a dead simple setup process and a minimal learning curve.
+* Manage machines very quickly and in parallel.
+* Avoid custom-agents and additional open ports, be agentless by
+ leveraging the existing SSH daemon.
+* Describe infrastructure in a language that is both machine and human
+ friendly.
+* Focus on security and easy auditability/review/rewriting of content.
+* Manage new remote machines instantly, without bootstrapping any
+ software.
+* Allow module development in any dynamic language, not just Python.
+* Be usable as non-root.
+* Be the easiest IT automation system to use, ever.
+
+Use Ansible
+===========
+
+You can install a released version of Ansible via ``pip``, a package manager, or
+our `release repository <https://releases.ansible.com/ansible/>`_. See our
+`installation guide <https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html>`_ for details on installing Ansible
+on a variety of platforms.
+
+Red Hat offers supported builds of `Ansible Engine <https://www.ansible.com/ansible-engine>`_.
+
+Power users and developers can run the ``devel`` branch, which has the latest
+features and fixes, directly. Although it is reasonably stable, you are more likely to encounter
+breaking changes when running the ``devel`` branch. We recommend getting involved
+in the Ansible community if you want to run the ``devel`` branch.
+
+Get Involved
+============
+
+* Read `Community
+ Information <https://docs.ansible.com/ansible/latest/community>`_ for all
+ kinds of ways to contribute to and interact with the project,
+ including mailing list information and how to submit bug reports and
+ code to Ansible.
+* Join a `Working Group
+ <https://github.com/ansible/community/wiki>`_, an organized community devoted to a specific technology domain or platform.
+* Submit a proposed code update through a pull request to the ``devel`` branch.
+* Talk to us before making larger changes
+ to avoid duplicate efforts. This not only helps everyone
+ know what is going on, it also helps save time and effort if we decide
+ some changes are needed.
+* For a list of email lists, IRC channels and Working Groups, see the
+ `Communication page <https://docs.ansible.com/ansible/latest/community/communication.html>`_
+
+Coding Guidelines
+=================
+
+We document our Coding Guidelines in the `Developer Guide <https://docs.ansible.com/ansible/devel/dev_guide/>`_. We particularly suggest you review:
+
+* `Contributing your module to Ansible <https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_checklist.html>`_
+* `Conventions, tips and pitfalls <https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_best_practices.html>`_
+
+Branch Info
+===========
+
+* The ``devel`` branch corresponds to the release actively under development.
+* The ``stable-2.X`` branches correspond to stable releases.
+* Create a branch based on ``devel`` and set up a `dev environment <https://docs.ansible.com/ansible/latest/dev_guide/developing_modules_general.html#common-environment-setup>`_ if you want to open a PR.
+* See the `Ansible release and maintenance <https://docs.ansible.com/ansible/latest/reference_appendices/release_and_maintenance.html>`_ page for information about active branches.
+
+Roadmap
+=======
+
+Based on team and community feedback, an initial roadmap will be published for a major or minor version (ex: 2.7, 2.8).
+The `Ansible Roadmap page <https://docs.ansible.com/ansible/devel/roadmap/>`_ details what is planned and how to influence the roadmap.
+
+Authors
+=======
+
+Ansible was created by `Michael DeHaan <https://github.com/mpdehaan>`_
+and has contributions from over 4700 users (and growing). Thanks everyone!
+
+`Ansible <https://www.ansible.com>`_ is sponsored by `Red Hat, Inc.
+<https://www.redhat.com>`_
+
+License
+=======
+
+GNU General Public License v3.0 or later
+
+See `COPYING <COPYING>`_ to see the full text.
+
+.. |PyPI version| image:: https://img.shields.io/pypi/v/ansible-base.svg
+ :target: https://pypi.org/project/ansible-base
+.. |Docs badge| image:: https://img.shields.io/badge/docs-latest-brightgreen.svg
+ :target: https://docs.ansible.com/ansible/latest/
+.. |Build Status| image:: https://dev.azure.com/ansible/ansible/_apis/build/status/CI?branchName=stable-2.10
+ :target: https://dev.azure.com/ansible/ansible/_build/latest?definitionId=20&branchName=stable-2.10
+.. |Chat badge| image:: https://img.shields.io/badge/chat-IRC-brightgreen.svg
+ :target: https://docs.ansible.com/ansible/latest/community/communication.html
+.. |Code Of Conduct| image:: https://img.shields.io/badge/code%20of%20conduct-Ansible-silver.svg
+ :target: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html
+ :alt: Ansible Code of Conduct
+.. |Mailing Lists| image:: https://img.shields.io/badge/mailing%20lists-Ansible-orange.svg
+ :target: https://docs.ansible.com/ansible/latest/community/communication.html#mailing-list-information
+ :alt: Ansible mailing lists
+.. |License| image:: https://img.shields.io/badge/license-GPL%20v3.0-brightgreen.svg
+ :target: COPYING
+ :alt: Repository License
diff --git a/SYMLINK_CACHE.json b/SYMLINK_CACHE.json
new file mode 100644
index 00000000..9e8c969b
--- /dev/null
+++ b/SYMLINK_CACHE.json
@@ -0,0 +1 @@
+{"script": {"ansible": ["ansible-config", "ansible-console", "ansible-doc", "ansible-galaxy", "ansible-inventory", "ansible-playbook", "ansible-pull", "ansible-vault"]}, "library": {"../release.py": ["ansible/module_utils/ansible_release.py"], "python.py": ["ansible_test/_data/injector/ansible", "ansible_test/_data/injector/ansible-config", "ansible_test/_data/injector/ansible-connection", "ansible_test/_data/injector/ansible-console", "ansible_test/_data/injector/ansible-doc", "ansible_test/_data/injector/ansible-galaxy", "ansible_test/_data/injector/ansible-inventory", "ansible_test/_data/injector/ansible-playbook", "ansible_test/_data/injector/ansible-pull", "ansible_test/_data/injector/ansible-test", "ansible_test/_data/injector/ansible-vault", "ansible_test/_data/injector/importer.py", "ansible_test/_data/injector/pytest"], "main.py": ["ansible_test/_data/sanity/validate-modules/validate-modules"]}} \ No newline at end of file
diff --git a/bin/ansible b/bin/ansible
new file mode 120000
index 00000000..1acbe230
--- /dev/null
+++ b/bin/ansible
@@ -0,0 +1 @@
+../lib/ansible/cli/scripts/ansible_cli_stub.py \ No newline at end of file
diff --git a/bin/ansible-config b/bin/ansible-config
new file mode 120000
index 00000000..cabb1f51
--- /dev/null
+++ b/bin/ansible-config
@@ -0,0 +1 @@
+ansible \ No newline at end of file
diff --git a/bin/ansible-connection b/bin/ansible-connection
new file mode 120000
index 00000000..a20affdb
--- /dev/null
+++ b/bin/ansible-connection
@@ -0,0 +1 @@
+../lib/ansible/cli/scripts/ansible_connection_cli_stub.py \ No newline at end of file
diff --git a/bin/ansible-console b/bin/ansible-console
new file mode 120000
index 00000000..cabb1f51
--- /dev/null
+++ b/bin/ansible-console
@@ -0,0 +1 @@
+ansible \ No newline at end of file
diff --git a/bin/ansible-doc b/bin/ansible-doc
new file mode 120000
index 00000000..cabb1f51
--- /dev/null
+++ b/bin/ansible-doc
@@ -0,0 +1 @@
+ansible \ No newline at end of file
diff --git a/bin/ansible-galaxy b/bin/ansible-galaxy
new file mode 120000
index 00000000..cabb1f51
--- /dev/null
+++ b/bin/ansible-galaxy
@@ -0,0 +1 @@
+ansible \ No newline at end of file
diff --git a/bin/ansible-inventory b/bin/ansible-inventory
new file mode 120000
index 00000000..cabb1f51
--- /dev/null
+++ b/bin/ansible-inventory
@@ -0,0 +1 @@
+ansible \ No newline at end of file
diff --git a/bin/ansible-playbook b/bin/ansible-playbook
new file mode 120000
index 00000000..cabb1f51
--- /dev/null
+++ b/bin/ansible-playbook
@@ -0,0 +1 @@
+ansible \ No newline at end of file
diff --git a/bin/ansible-pull b/bin/ansible-pull
new file mode 120000
index 00000000..cabb1f51
--- /dev/null
+++ b/bin/ansible-pull
@@ -0,0 +1 @@
+ansible \ No newline at end of file
diff --git a/bin/ansible-test b/bin/ansible-test
new file mode 120000
index 00000000..14f009af
--- /dev/null
+++ b/bin/ansible-test
@@ -0,0 +1 @@
+../test/lib/ansible_test/_data/cli/ansible_test_cli_stub.py \ No newline at end of file
diff --git a/bin/ansible-vault b/bin/ansible-vault
new file mode 120000
index 00000000..cabb1f51
--- /dev/null
+++ b/bin/ansible-vault
@@ -0,0 +1 @@
+ansible \ No newline at end of file
diff --git a/changelogs/CHANGELOG-v2.10.rst b/changelogs/CHANGELOG-v2.10.rst
new file mode 100644
index 00000000..0eac3697
--- /dev/null
+++ b/changelogs/CHANGELOG-v2.10.rst
@@ -0,0 +1,870 @@
+=======================================================
+Ansible Base 2.10 "When the Levee Breaks" Release Notes
+=======================================================
+
+.. contents:: Topics
+
+
+v2.10.4
+=======
+
+Release Summary
+---------------
+
+| Release Date: 2020-12-14
+| `Porting Guide <https://docs.ansible.com/ansible/devel/porting_guides.html>`__
+
+
+Minor Changes
+-------------
+
+- ansible-doc - provide ``has_action`` field in JSON output for modules. That information is currently only available in the text view (https://github.com/ansible/ansible/pull/72359).
+- ansible-galaxy - find any collection dependencies in the globally configured Galaxy servers and not just the server the parent collection is from.
+- ansible-test - Added a ``--export`` option to the ``ansible-test coverage combine`` command to facilitate multi-stage aggregation of coverage in CI pipelines.
+- ansible-test - Added the ``-remote rhel/7.9`` option to run tests on RHEL 7.9
+- ansible-test - CentOS 8 container is now 8.2.2004 (https://github.com/ansible/distro-test-containers/pull/45).
+- ansible-test - Fix container hostname/IP discovery for the ``acme`` test plugin.
+- ansible-test - OpenSuse container now uses Leap 15.2 (https://github.com/ansible/distro-test-containers/pull/48).
+- ansible-test - Ubuntu containers as well as ``default-test-container`` and ``ansible-base-test-container`` are now slightly smaller due to apt cleanup (https://github.com/ansible/distro-test-containers/pull/46).
+- ansible-test - ``default-test-container`` and ``ansible-base-test-container`` now use Python 3.9.0 instead of 3.9.0rc1.
+- ansible-test - centos6 end of life - container image updated to point to vault base repository (https://github.com/ansible/distro-test-containers/pull/54)
+- ansible-test validate-modules - no longer assume that ``default`` for ``type=bool`` options is ``false``, as the default is ``none`` and for some modules, ``none`` and ``false`` mean different things (https://github.com/ansible/ansible/issues/69561).
+- iptables - reorder comment postition to be at the end (https://github.com/ansible/ansible/issues/71444).
+
+Bugfixes
+--------
+
+- Adjust various hard-coded action names to also include their ``ansible.builtin.`` and ``ansible.legacy.`` prefixed version (https://github.com/ansible/ansible/issues/71817, https://github.com/ansible/ansible/issues/71818, https://github.com/ansible/ansible/pull/71824).
+- AnsibleModule - added arg ``ignore_invalid_cwd`` to ``AnsibleModule.run_command()``, to control its behaviour when ``cwd`` is invalid. (https://github.com/ansible/ansible/pull/72390)
+- Fixed issue when `netstat` is either missing or doesn't have execution permissions leading to incorrect command being executed.
+- Improve Ansible config deprecations to show the source of the deprecation (ansible-base). Also remove space before a comma in config deprecations (https://github.com/ansible/ansible/pull/72697).
+- Skip invalid collection names when listing in ansible-doc instead of throwing exception. Issue#72257
+- The ``docker`` and ``k8s`` action groups / module default groups now also support the moved modules in `community.docker <https://galaxy.ansible.com/community/docker>`_, `community.kubevirt <https://github.com/ansible-collections/community.kubevirt>`_, `community.okd <https://galaxy.ansible.com/community/okd>`_, and `kubernetes.core <https://galaxy.ansible.com/kubernetes/core>`_ (https://github.com/ansible/ansible/pull/72428).
+- account for bug in Python 2.6 that occurs during interpreter shutdown to avoid stack trace
+- ansible-test - Correctly detect changes in a GitHub pull request when running on Azure Pipelines.
+- ansible-test - Skip installing requirements if they are already installed.
+- ansible-test - ``cryptography`` is now limited to versions prior to 3.2 only when an incompatible OpenSSL version (earlier than 1.1.0) is detected
+- ansible-test - add constraint for ``cffi`` to prevent failure on systems with older versions of ``gcc`` (https://foss.heptapod.net/pypy/cffi/-/issues/480)
+- ansible-test - convert target paths to unicode on Python 2 to avoid ``UnicodeDecodeError`` (https://github.com/ansible/ansible/issues/68398, https://github.com/ansible/ansible/pull/72623).
+- ansible-test - improve classification of changes to ``.gitignore``, ``COPYING``, ``LICENSE``, ``Makefile``, and all files ending with one of ``.in`, ``.md`, ``.rst``, ``.toml``, ``.txt`` in the collection root directory (https://github.com/ansible/ansible/pull/72353).
+- ansible-test validate-modules - when a module uses ``add_file_common_args=True`` and does not use a keyword argument for ``argument_spec`` in ``AnsibleModule()``, the common file arguments were not considered added during validation (https://github.com/ansible/ansible/pull/72334).
+- basic.AnsibleModule - AnsibleModule.run_command silently ignores a non-existent directory in the ``cwd`` argument (https://github.com/ansible/ansible/pull/72390).
+- blockinfile - properly insert a block at the end of a file that does not have a trailing newline character (https://github.com/ansible/ansible/issues/72055)
+- dnf - fix filtering to avoid dependncy conflicts (https://github.com/ansible/ansible/issues/72316)
+- ensure 'local' connection always has the correct default user for actions to consume.
+- pause - Fix indefinite hang when using a pause task on a background process (https://github.com/ansible/ansible/issues/32142)
+- remove redundant remote_user setting in play_context for local as plugin already does it, also removes fork/thread issue from use of pwd library.
+- set_mode_if_different - handle symlink if it is inside a directory with sticky bit set (https://github.com/ansible/ansible/pull/45198)
+- systemd - account for templated unit files using ``@`` when searching for the unit file (https://github.com/ansible/ansible/pull/72347#issuecomment-730626228)
+- systemd - follow up fix to https://github.com/ansible/ansible/issues/72338 to use ``list-unit-files`` rather than ``list-units`` in order to show all units files on the system.
+- systemd - work around bug with ``systemd`` 245 and 5.8 kernel that does not correctly report service state (https://github.com/ansible/ansible/issues/71528)
+- wait_for - catch and ignore errors when getting active connections with psutil (https://github.com/ansible/ansible/issues/72322)
+
+v2.10.3
+=======
+
+Release Summary
+---------------
+
+| Release Date: 2020-11-02
+| `Porting Guide <https://docs.ansible.com/ansible/devel/porting_guides.html>`__
+
+
+Minor Changes
+-------------
+
+- ansible-test - Add a ``--docker-network`` option to choose the network for running containers when using the ``--docker`` option.
+- ansible-test - Collections can now specify pip constraints for unit and integration test requirements using ``tests/unit/constraints.txt`` and ``tests/integration/constraints.txt`` respectively.
+- ansible-test - python-cryptography is now bounded at <3.2, as 3.2 drops support for OpenSSL 1.0.2 upon which some of our CI infrastructure still depends.
+- dnf - now shows specific package changes (installations/removals) under ``results`` in check_mode. (https://github.com/ansible/ansible/issues/66132)
+
+Breaking Changes / Porting Guide
+--------------------------------
+
+- ansible-galaxy login command has been removed (see https://github.com/ansible/ansible/issues/71560)
+
+Bugfixes
+--------
+
+- Collection callbacks were ignoring options and rules for stdout and adhoc cases.
+- Collections - Ensure ``action_loader.get`` is called with ``collection_list`` to properly find collections when ``collections:`` search is specified (https://github.com/ansible/ansible/issues/72170)
+- Fix ``RecursionError`` when templating large vars structures (https://github.com/ansible/ansible/issues/71920)
+- ansible-doc - plugin option deprecations now also get ``collection_name`` added (https://github.com/ansible/ansible/pull/71735).
+- ansible-test - Always connect additional Docker containers to the network used by the current container (if any).
+- ansible-test - Always map ``/var/run/docker.sock`` into test containers created by the ``--docker`` option if the docker host is not ``localhost``.
+- ansible-test - Attempt to detect the Docker hostname instead of assuming ``localhost``.
+- ansible-test - Correctly detect running in a Docker container on Azure Pipelines.
+- ansible-test - Prefer container IP at ``.NetworkSettings.Networks.{NetworkName}.IPAddress`` over ``.NetworkSettings.IPAddress``.
+- ansible-test - The ``cs`` and ``openshift`` test plugins now search for containers on the current network instead of assuming the ``bridge`` network.
+- ansible-test - Using the ``--remote`` option on Azure Pipelines now works from a job running in a container.
+- async_wrapper - Fix race condition when ``~/.ansible_async`` folder tries to be created by multiple async tasks at the same time - https://github.com/ansible/ansible/issues/59306
+- dnf - it is now possible to specify both ``security: true`` and ``bugfix: true`` to install updates of both types. Previously, only security would get installed if both were true. (https://github.com/ansible/ansible/issues/70854)
+- facts - fix distribution fact for SLES4SAP (https://github.com/ansible/ansible/pull/71559).
+- is_string/vault - Ensure the is_string helper properly identifies AnsibleVaultEncryptedUnicode as a string (https://github.com/ansible/ansible/pull/71609)
+- powershell - remove getting the PowerShell version from the env var ``POWERSHELL_VERSION``. This feature never worked properly and can cause conflicts with other libraries that use this var
+- url lookup - make sure that options supplied in ansible.cfg are actually used (https://github.com/ansible/ansible/pull/71736).
+- user - AnsibleModule.run_command returns a tuple of return code, stdout and stderr. The module main function of the user module expects user.create_user to return a tuple of return code, stdout and stderr. Fix the locations where stdout and stderr got reversed.
+- user - Local users with an expiry date cannot be created as the ``luseradd`` / ``lusermod`` commands do not support the ``-e`` option. Set the expiry time in this case via ``lchage`` after the user was created / modified. (https://github.com/ansible/ansible/issues/71942)
+
+v2.10.2
+=======
+
+Release Summary
+---------------
+
+| Release Date: 2020-10-05
+| `Porting Guide <https://docs.ansible.com/ansible/devel/porting_guides.html>`__
+
+
+Minor Changes
+-------------
+
+- ansible-test - Raise the number of bytes scanned by ansible-test to determine if a file is binary to 4096.
+
+Bugfixes
+--------
+
+- Pass the connection's timeout to connection plugins instead of the task's timeout.
+- Provide more information in AnsibleUndefinedVariable (https://github.com/ansible/ansible/issues/55152)
+- ansible-doc - properly show plugin name when ``name:`` is used instead of ``<plugin_type>:`` (https://github.com/ansible/ansible/pull/71966).
+- ansible-test - Change classification using ``--changed`` now consistently handles common configuration files for supported CI providers.
+- ansible-test - The ``resource_prefix`` variable provided to tests running on Azure Pipelines is now converted to lowercase to match other CI providers.
+- collection loader - fix bogus code coverage entries for synthetic packages
+- psrp - Fix hang when copying an empty file to the remote target
+- runas - create a new token when running as ``SYSTEM`` to ensure it has the full privileges assigned to that account
+
+v2.10.1
+=======
+
+Release Summary
+---------------
+
+| Release Date: 2020-09-14
+| `Porting Guide <https://docs.ansible.com/ansible/devel/porting_guides.html>`__
+
+
+Minor Changes
+-------------
+
+- Fixed ansible-doc to not substitute for words followed by parenthesis. For instance, ``IBM(International Business Machines)`` will no longer be substituted with a link to a non-existent module. https://github.com/ansible/ansible/pull/71070
+- Updated network integration auth timeout to 90 secs.
+- ansible-doc will now format, ``L()``, ``R()``, and ``HORIZONTALLINE`` in plugin docs just as the website docs do. https://github.com/ansible/ansible/pull/71070
+- ansible-test - Add ``macos/10.15`` as a supported value for the ``--remote`` option.
+- ansible-test - Allow custom ``--remote-stage`` options for development and testing.
+- ansible-test - Fix ``ansible-test coverage`` reporting sub-commands (``report``, ``html``, ``xml``) on Python 2.6.
+- ansible-test - Remove ``pytest < 6.0.0`` constraint for managed installations on Python 3.x now that pytest 6 is supported.
+- ansible-test - Remove the discontinued ``us-east-2`` choice from the ``--remote-aws-region`` option.
+- ansible-test - Request remote resources by provider name for all provider types.
+- ansible-test - Show a warning when the obsolete ``--remote-aws-region`` option is used.
+- ansible-test - Support custom remote endpoints with the ``--remote-endpoint`` option.
+- ansible-test - Update built-in service endpoints for the ``--remote`` option.
+- ansible-test - Use new endpoint for Parallels based instances with the ``--remote`` option.
+- ansible-test - default container now uses default-test-container 2.7.0 and ansible-base-test-container 1.6.0. This brings in Python 3.9.0rc1 for testing.
+- ansible-test - the ACME test container was updated, it now supports external account creation and has a basic OCSP responder (https://github.com/ansible/ansible/pull/71097, https://github.com/ansible/acme-test-container/releases/tag/2.0.0).
+- galaxy - add documentation about galaxy parameters in examples/ansible.cfg (https://github.com/ansible/ansible/issues/68402).
+- iptables - add a note about ipv6-icmp in protocol parameter (https://github.com/ansible/ansible/issues/70905).
+- setup.py - Skip doing conflict checks for ``sdist`` and ``egg_info`` commands (https://github.com/ansible/ansible/pull/71310)
+- subelements - clarify the lookup plugin documentation for parameter handling (https://github.com/ansible/ansible/issues/38182).
+
+Security Fixes
+--------------
+
+- **security issue** - copy - Redact the value of the no_log 'content' parameter in the result's invocation.module_args in check mode. Previously when used with check mode and with '-vvv', the module would not censor the content if a change would be made to the destination path. (CVE-2020-14332)
+- The fix for CVE-2020-1736 has been reverted. Users are encouraged to specify a ``mode`` parameter in their file-based tasks when the files being manipulated contain sensitive data.
+- dnf - Previously, regardless of the ``disable_gpg_check`` option, packages were not GPG validated. They are now. (CVE-2020-14365)
+
+Bugfixes
+--------
+
+- ANSIBLE_COLLECTIONS_PATHS - remove deprecation so that users of Ansible 2.9 and 2.10+ can use the same var when specifying a collection path without a warning.
+- Confirmed commit fails with TypeError in IOS XR netconf plugin (https://github.com/ansible-collections/cisco.iosxr/issues/74)
+- Ensure password passed in by -k is used on delegated hosts that do not have ansible_password set
+- Fix an exit code for a non-failing playbook (https://github.com/ansible/ansible/issues/71306)
+- Fix execution of the meta tasks 'clear_facts', 'clear_host_errors', 'end_play', 'end_host', and 'reset_connection' when the CLI flag '--flush-cache' is provided.
+- Fix statistics reporting when rescue block contains another block (issue https://github.com/ansible/ansible/issues/61253).
+- Fixed Ansible reporting validate not supported by netconf server when enabled in netconf - (https://github.com/ansible-collections/ansible.netcommon/issues/119).
+- Skip literal_eval for string filters results in native jinja. (https://github.com/ansible/ansible/issues/70831)
+- Strategy - Ensure we only process expected types from the results queue and produce warnings for any object we receive from the queue that doesn't match our expectations. (https://github.com/ansible/ansible/issues/70023)
+- TOML inventory - Ensure we register dump functions for ``AnsibleUnsafe`` to support dumping unsafe values. Note that the TOML format has no functionality to mark that the data is unsafe for re-consumption. (https://github.com/ansible/ansible/issues/71307)
+- ansible-galaxy download - fix bug when downloading a collection in a SCM subdirectory
+- ansible-test units - fixed collection location code to work under pytest >= 6.0.0
+- avoid clobbering existing facts inside loop when task also returns ansible_facts.
+- cron - cron file should not be empty after adding var (https://github.com/ansible/ansible/pull/71207)
+- fortimanager httpapi plugin - fix redirect to point to the ``fortinet.fortimanager`` collection (https://github.com/ansible/ansible/pull/71073).
+- gluster modules - fix redirect to point to the ``gluster.gluster`` collection (https://github.com/ansible/ansible/pull/71240).
+- linux network facts - get the correct value for broadcast address (https://github.com/ansible/ansible/issues/64384)
+- native jinja2 types - properly handle Undefined in nested data.
+- powershell - fix escaping of strings that broken modules like fetch when dealing with special chars - https://github.com/ansible/ansible/issues/62781
+- powershell - fix the CLIXML parser when it contains nested CLIXML objects - https://github.com/ansible/ansible/issues/69550
+- psrp - Use native PSRP mechanism when copying files to support custom endpoints
+- strftime filter - Input epoch is allowed to be a float (https://github.com/ansible/ansible/issues/71257)
+- systemd - fixed chroot usage on new versions of systemd, that broke because of upstream changes in systemctl output
+- systemd - made the systemd module work correctly when the SYSTEMD_OFFLINE environment variable is set
+- templating - fix error message for ``x in y`` when y is undefined (https://github.com/ansible/ansible/issues/70984)
+- unarchive - check ``fut_gid`` against ``run_gid`` in addition to supplemental groups (https://github.com/ansible/ansible/issues/49284)
+
+v2.10.0
+=======
+
+Release Summary
+---------------
+
+| Release Date: 2020-08-13
+| `Porting Guide <https://docs.ansible.com/ansible/devel/porting_guides.html>`__
+
+
+Major Changes
+-------------
+
+- Both ansible-doc and ansible-console's help command will error for modules and plugins whose return documentation cannot be parsed as YAML. All modules and plugins passing ``ansible-test sanity --test yamllint`` will not be affected by this.
+- Collections may declare a list of supported/tested Ansible versions for the collection. A warning is issued if a collection does not support the Ansible version that loads it (can also be configured as silent or a fatal error). Collections that do not declare supported Ansible versions do not issue a warning/error.
+- Plugin routing allows collections to declare deprecation, redirection targets, and removals for all plugin types.
+- Plugins that import module_utils and other ansible namespaces that have moved to collections should continue to work unmodified.
+- Routing data built into Ansible 2.10 ensures that 2.9 content should work unmodified on 2.10. Formerly included modules and plugins that were moved to collections are still accessible by their original unqualified names, so long as their destination collections are installed.
+- When deprecations are done in code, they to specify a ``collection_name`` so that deprecation warnings can mention which collection - or ansible-base - is deprecating a feature. This affects all ``Display.deprecated()`` or ``AnsibleModule.deprecate()`` or ``Ansible.Basic.Deprecate()`` calls, and ``removed_in_version``/``removed_at_date`` or ``deprecated_aliases`` in module argument specs.
+- ansible-test now uses a different ``default`` test container for Ansible Collections
+
+Minor Changes
+-------------
+
+- 'Edit on GitHub' link for plugin, cli documentation fixed to navigate to correct plugin, cli source.
+- Add 'auth_url' field to galaxy server config stanzas in ansible.cfg The url should point to the token_endpoint of a Keycloak server.
+- Add --ask-vault-password and --vault-pass-file options to ansible cli commands
+- Add ``--pre`` flag to ``ansible-galaxy collection install`` to allow pulling in the most recent pre-release version of a collection (https://github.com/ansible/ansible/issues/64905)
+- Add a global toggle to control when vars plugins are executed (per task by default for backward compatibility or after importing inventory).
+- Add a new config parameter, WIN_ASYNC_STARTUP_TIMEOUT, which allows configuration of the named pipe connection timeout under Windows when launching async tasks.
+- Add a per-plugin stage option to override the global toggle to control the execution of individual vars plugins (per task, after inventory, or both).
+- Add an additional check for importing journal from systemd-python module (https://github.com/ansible/ansible/issues/60595).
+- Add an example for using var in with_sequence (https://github.com/ansible/ansible/issues/68836).
+- Add new magic variable ``ansible_collection`` that contains the collection name
+- Add new magic variable ``ansible_role_name`` that contains the FQCN of the role
+- Add standard Python 2/3 compatibility boilerplate to setup script, module_utils and docs_fragments which were missing them.
+- Added PopOS as a part of Debian OS distribution family (https://github.com/ansible/ansible/issues/69286).
+- Added hostname support for PopOS in hostname module.
+- Added openEuler OS in RedHat OS Family.
+- Added the ability to set ``DEFAULT_NO_TARGET_SYSLOG`` through the ``ansible_no_target_syslog`` variable on a task
+- Ansible CLI fails with warning if extra_vars parameter is used with filename without @ sign (https://github.com/ansible/ansible/issues/51857).
+- Ansible modules created with ``add_file_common_args=True`` added a number of undocumented arguments which were mostly there to ease implementing certain action plugins. The undocumented arguments ``src``, ``follow``, ``force``, ``content``, ``backup``, ``remote_src``, ``regexp``, ``delimiter``, and ``directory_mode`` are now no longer added. Modules relying on these options to be added need to specify them by themselves. Also, action plugins relying on these extra elements in ``FILE_COMMON_ARGUMENTS`` need to be adjusted.
+- Ansible now allows deprecation by date instead of deprecation by version. This is possible for plugins and modules (``meta/runtime.yml`` and ``deprecated.removed_at_date`` in ``DOCUMENTATION``, instead of ``deprecated.removed_in``), for plugin options (``deprecated.date`` instead of ``deprecated.version`` in ``DOCUMENTATION``), for module options (``removed_at_date`` instead of ``removed_in_version`` in argument spec), and for module option aliases (``deprecated_aliases.date`` instead of ``deprecated_aliases.version`` in argument spec).
+- Ansible should fail with error when non-existing limit file is provided in command line.
+- Ansible.Basic - Added the ability to specify multiple fragments to load in a generic way for modules that use a module_util with fragment options
+- Ansible.Basic.cs - Added support for ``deprecated_aliases`` to deprecated aliases in a standard way
+- Ansible.ModuleUtils.WebRequest - Move username and password aliases out of util to avoid option name collision
+- Change order of arguments in ansible cli to use --ask-vault-password and --vault-password-file by default
+- CollectionRequirement - Add a metadata property to update and retrieve the _metadata attribute.
+- Command module: Removed suggestions to use modules which have moved to collections and out of ansible-base
+- Enable Ansible Collections loader to discover and import collections from ``site-packages`` dir and ``PYTHONPATH``-added locations.
+- Enable testing the AIX platform as a remote OS in ansible-test
+- Flatten the directory hierarchy of modules
+- Ignore plesk-release file while parsing distribution release (https://github.com/ansible/ansible/issues/64101).
+- Openstack inventory script is migrated to ansible-openstack-collection, adjusted the link in documentation accordingly.
+- Openstack inventory script is moved to openstack.cloud from community.general.
+- PowerShell Add-Type - Add an easier way to reference extra types when compiling C# code on PowerShell Core
+- PowerShell Add-Type - Added the ``X86`` and ``AMD64`` preprocessor symbols for conditional compiling
+- Prevent losing useful error information by including both the loop and the conditional error messages (https://github.com/ansible/ansible/issues/66529)
+- Provides additional information about collection namespace name restrictions (https://github.com/ansible/ansible/issues/65151).
+- Raise error when no task file is provided to import_tasks (https://github.com/ansible/ansible/issues/54095).
+- Refactor test_distribution_version testcases.
+- Remove the deprecation message for the ``TRANSFORM_INVALID_GROUP_CHARS`` setting. (https://github.com/ansible/ansible/issues/61889)
+- Removed extras_require support from setup.py (and [azure] extra). Requirements will float with the collections, so it's not appropriate for ansible-base to host requirements for them any longer.
+- Simplify dict2items filter example in loop documentation (https://github.com/ansible/ansible/issues/65505).
+- Templating - Add globals to the jinja2 environment at ``Templar`` instantiation, instead of customizing the template object. Only customize the template object, to disable lookups. (https://github.com/ansible/ansible/pull/69278)
+- Templating - Add support to auto unroll generators produced by jinja2 filters, to prevent the need of explicit use of ``|list`` (https://github.com/ansible/ansible/pull/68014)
+- The plugin loader now keeps track of the collection where a plugin was resolved to, in particular whether the plugin was loaded from ansible-base's internal paths (``ansible.builtin``) or from user-supplied paths (no collection name).
+- The results queue and counter for results are now split for standard / handler results. This allows the governing strategy to be truly independent from the handler strategy, which basically follows the linear methodology.
+- Update required library message with correct grammer in basic.py.
+- Updated inventory script location for EC2, Openstack, and Cobbler after collection (https://github.com/ansible/ansible/issues/68897).
+- Updated inventory script location for infoblox, ec2 and other after collection migration (https://github.com/ansible/ansible/issues/69139).
+- Updates ``ansible_role_names``, ``ansible_play_role_names``, and ``ansible_dependent_role_names`` to include the FQCN
+- Use OrderedDict by default when importing mappings from YAML.
+- Windows - Add a check for the minimum PowerShell version so we can create a friendly error message on older hosts
+- Windows - add deprecation notice in the Windows setup module when running on Server 2008, 2008 R2, and Windows 7
+- `AnsibleModule.fail_json()` has always required that a message be passed in which informs the end user why the module failed. In the past this message had to be passed as the `msg` keyword argument but it can now be passed as the first positional argument instead.
+- ``AnsibleModule.load_file_common_arguments`` now allows to simply override ``path``.
+- add mechanism for storing warnings and deprecations globally and not attached to an ``AnsibleModule`` object (https://github.com/ansible/ansible/pull/58993)
+- added more ways to configure new uri options in 2.10.
+- ansible-doc - improve suboptions formatting (https://github.com/ansible/ansible/pull/69795).
+- ansible-doc - now indicates if an option is added by a doc fragment from another collection by prepending the collection name, or ``ansible.builtin`` for ansible-base, to the version number.
+- ansible-doc - return values will be properly formatted (https://github.com/ansible/ansible/pull/69796).
+- ansible-galaxy - Add ``download`` option for ``ansible-galaxy collection`` to download collections and their dependencies for an offline install
+- ansible-galaxy - Add a `verify` subcommand to `ansible-galaxy collection`. The collection found on the galaxy server is downloaded to a tempfile to compare the checksums of the files listed in the MANIFEST.json and the FILES.json with the contents of the installed collection.
+- ansible-galaxy - Add installation successful message
+- ansible-galaxy - Added the ability to display the progress wheel through the C.GALAXY_DISPLAY_PROGRESS config option. Also this now defaults to displaying the progress wheel if stdout has a tty.
+- ansible-galaxy - Added the ability to ignore further files and folders using a pattern with the ``build_ignore`` key in a collection's ``galaxy.yml`` (https://github.com/ansible/ansible/issues/59228).
+- ansible-galaxy - Allow installing collections from git repositories.
+- ansible-galaxy - Always ignore the ``tests/output`` directory when building a collection as it is used by ``ansible-test`` for test output (https://github.com/ansible/ansible/issues/59228).
+- ansible-galaxy - Change the output verbosity level of the download message from 3 to 0 (https://github.com/ansible/ansible/issues/70010)
+- ansible-galaxy - Display message if both collections and roles are specified in a requirements file but can't be installed together.
+- ansible-galaxy - Install both collections and roles with ``ansible-galaxy install -r requirements.yml`` in certain scenarios.
+- ansible-galaxy - Requirement entries for collections now support a 'type' key to indicate whether the collection is a galaxy artifact, file, url, or git repo.
+- ansible-galaxy - add ``--token`` argument which is the same as ``--api-key`` (https://github.com/ansible/ansible/issues/65955)
+- ansible-galaxy - add ``collection list`` command for listing installed collections (https://github.com/ansible/ansible/pull/65022)
+- ansible-galaxy - add ``validate_collection_path()`` utility function ()
+- ansible-galaxy - add collections path argument
+- ansible-galaxy - allow role to define dependency requirements that will be only installed by defining them in ``meta/requirements.yml`` (https://github.com/ansible/proposals/issues/57)
+- ansible-test - --docker flag now has an associated --docker-terminate flag which controls if and when the docker container is removed following tests
+- ansible-test - Add a test to prevent ``state=get``
+- ansible-test - Add a test to prevent ``state=list`` and ``state=info``
+- ansible-test - Add a verbosity option for displaying warnings.
+- ansible-test - Add support for Python 3.9.
+- ansible-test - Added CI provider support for Azure Pipelines.
+- ansible-test - Added a ``ansible-test coverage analyze targets filter`` command to filter aggregated coverage reports by path and/or target name.
+- ansible-test - Added a ``ansible-test coverage analyze targets`` command to analyze integration test code coverage by test target.
+- ansible-test - Added support for Ansible Core CI request signing for Shippable.
+- ansible-test - Added support for testing on Fedora 32.
+- ansible-test - General code cleanup.
+- ansible-test - Now includes testing support for RHEL 8.2
+- ansible-test - Provisioning of RHEL instances now includes installation of pinned versions of ``packaging`` and ``pyparsing`` to match the downstream vendored versions.
+- ansible-test - Refactor code to consolidate filesystem access and improve handling of encoding.
+- ansible-test - Refactored CI related logic into a basic provider abstraction.
+- ansible-test - Remove obsolete support for provisioning remote vCenter instances. The supporting services are no longer available.
+- ansible-test - Report the correct line number in the ``yamllint`` sanity test when reporting ``libyaml`` parse errors in module documentation.
+- ansible-test - Support writing compact JSON files instead of formatting and indenting the output.
+- ansible-test - Update Ubuntu 18.04 test container to version 1.13 which includes ``venv``
+- ansible-test - Update ``default-test-container`` to version 1.11, which includes Python 3.9.0a4.
+- ansible-test - Updated the default test containers to include Python 3.9.0b3.
+- ansible-test - Upgrade OpenSUSE containers to use Leap 15.1.
+- ansible-test - Upgrade distro test containers from 1.16.0 to 1.17.0
+- ansible-test - Upgrade from ansible-base-test-container 1.1 to 2.2
+- ansible-test - Upgrade from default-test-container 2.1 to 2.2
+- ansible-test - ``mutually_exclusive``, ``required_if``, ``required_by``, ``required_together`` and ``required_one_of`` in modules are now validated.
+- ansible-test - ``validate-modules`` now also accepts an ISO 8601 formatted date as ``deprecated.removed_at_date``, instead of requiring a version number in ``deprecated.removed_in``.
+- ansible-test - ``validate-modules`` now makes sure that module documentation deprecation removal version and/or date matches with removal version and/or date in meta/runtime.yml.
+- ansible-test - ``validate-modules`` now validates all version numbers in documentation and argument spec. Version numbers for collections are checked for being valid semantic versioning version number strings.
+- ansible-test - add ``validate-modules`` tests for ``removed_in_version`` and ``deprecated_aliases`` (https://github.com/ansible/ansible/pull/66920/).
+- ansible-test - add check for ``print()`` calls in modules and module_utils.
+- ansible-test - added a ``--no-pip-check`` option
+- ansible-test - added a ``--venv-system-site-packages`` option for use with the ``--venv`` option
+- ansible-test - added new ``changelog`` test, which runs if a `antsibull-changelog <https://pypi.org/project/antsibull-changelog/>`_ configuration or files in ``changelogs/fragments/`` are found (https://github.com/ansible/ansible/pull/69313).
+- ansible-test - allow delegation config to specify equivalents to the ``--no-pip-check``, ``--disable-httptester`` and `--no-temp-unicode`` options
+- ansible-test - allow sanity tests to check for optional errors by specifying ``--enable-optional-errors`` (https://github.com/ansible/ansible/pull/66920/).
+- ansible-test - also run the ``ansible-doc`` sanity test with ``--json`` to ensure that the documentation does not contain something that cannot be exported as JSON (https://github.com/ansible/ansible/issues/69238).
+- ansible-test - enable deprecated version testing for modules and ``module.deprecate()`` calls (https://github.com/ansible/ansible/pull/66920/).
+- ansible-test - extend alias validation.
+- ansible-test - fixed ``units`` command with ``--docker`` to (mostly) work under podman
+- ansible-test - improve module validation so that ``default``, ``sample`` and ``example`` contain JSON values and not arbitrary YAML values, like ``datetime`` objects or dictionaries with non-string keys.
+- ansible-test - module validation will now consider arguments added by ``add_file_common_arguments=True`` correctly.
+- ansible-test - switch from testing RHEL 8.0 and RHEL 8.1 Beta to RHEL 8.1
+- ansible-test - the argument spec of modules is now validated by a YAML schema.
+- ansible-test - the module validation code now checks whether ``elements`` documentation for options matches the argument_spec.
+- ansible-test - the module validation code now checks whether ``elements`` is defined when ``type=list``
+- ansible-test - the module validation code now checks whether ``requirement`` for options is documented correctly.
+- ansible-test add pyparsing constraint for Python 2.x to avoid compatibility issues with the upcoming pyparsing 3 release
+- ansible-test defaults to redacting sensitive values (disable with the ``--no-redact`` option)
+- ansible-test has been updated to use ``default-test-container:1.13`` which includes fewer Python requirements now that most modules and tests have been migrated to collections.
+- ansible-test no longer detects ``git`` submodule directories as files.
+- ansible-test no longer provides a ``--tox`` option. Use the ``--venv`` option instead. This only affects testing the Ansible source. The feature was never available for Ansible Collections or when running from an Ansible install.
+- ansible-test no longer tries to install sanity test dependencies on unsupported Python versions
+- ansible-test now checks for the minimum and maximum supported versions when importing ``coverage``
+- ansible-test now filters out unnecessary warnings and messages from pip when installing its own requirements
+- ansible-test now has a ``--list-files`` option to list files using the ``env`` command.
+- ansible-test now includes the ``pylint`` plugin ``mccabe`` in optional sanity tests enabled with ``--enable-optional-errors``
+- ansible-test now places the ansible source and collections content in separate directories when using the ``--docker`` or ``--remote`` options.
+- ansible-test now provides a more helpful error when loading coverage files created by ``coverage`` version 5 or later
+- ansible-test now supports provisioning of network resources when testing network collections
+- ansible-test now supports skip aliases in the format ``skip/{arch}/{platform}`` and ``skip/{arch}/{platform}/{version}`` where ``arch`` can be ``power``. These aliases are only effective for the ``--remote`` option.
+- ansible-test now supports skip aliases in the format ``skip/{platform}/{version}`` for the ``--remote`` option. This is preferred over the older ``skip/{platform}{version}`` format which included no ``/`` between the platform and version.
+- ansible-test now supports testing against RHEL 7.8 when using the ``--remote`` option.
+- ansible-test now supports the ``--remote power/centos/7`` platform option.
+- ansible-test now validates the schema of ansible_builtin_runtime.yml and a collections meta/runtime.yml file.
+- ansible-test provides clearer error messages when failing to detect the provider to use with the ``--remote`` option.
+- ansible-test provisioning of network devices for ``network-integration`` has been updated to use collections.
+- ansible_native_concat() - use ``to_text`` function rather than Jinja2's ``text_type`` which has been removed in Jinja2 master branch.
+- apt - Implemented an exponential backoff behaviour when retrying to update the cache with new params ``update_cache_retry_max_delay`` and ``update_cache_retries`` to control the behavior.
+- apt_repository - Implemented an exponential backoff behaviour when retrying to update the apt cache with new params ``update_cache_retry_max_delay`` and ``update_cache_retries`` to control the behavior.
+- blockinfile - Update module documentation to clarify insertbefore/insertafter usage.
+- callbacks - Allow modules to return `None` as before/after entries for diff. This should make it easier for modules to report the "not existing" state of the entity they touched.
+- combine filter - now accept a ``list_merge`` argument which modifies its behaviour when the hashes to merge contain arrays/lists.
+- conditionals - change the default of CONDITIONAL_BARE_VARS to False (https://github.com/ansible/ansible/issues/70682).
+- config - accept singular version of ``collections_path`` ini setting and ``ANSIBLE_COLLECTIONS_PATH`` environment variable setting
+- core filters - Adding ``path_join`` filter to the core filters list
+- debconf - add a note about no_log=True since module might expose sensitive information to logs (https://github.com/ansible/ansible/issues/32386).
+- default_callback - moving 'check_mode_markers' documentation in default_callback doc_fragment (https://github.com/ansible-collections/community.general/issues/565).
+- distro - Update bundled version of distro from 1.4.0 to 1.5.0
+- dnf - Properly handle idempotent transactions with package name wildcard globs (https://github.com/ansible/ansible/issues/62809)
+- dnf - Properly handle module AppStreams that don't define stream (https://github.com/ansible/ansible/issues/63683)
+- dnf param to pass allowerasing
+- downstream packagers may install packages under ansible._vendor, which will be added to head of sys.path at ansible package load
+- file - specifying ``src`` without ``state`` is now an error
+- get_bin_path() - change the interface to always raise ``ValueError`` if the command is not found (https://github.com/ansible/ansible/pull/56813)
+- get_url - Remove deprecated string format support for the headers option (https://github.com/ansible/ansible/issues/61891)
+- git - added an ``archive_prefix`` option to set a prefix to add to each file path in archive
+- host_group_vars plugin - Require whitelisting and whitelist by default.
+- new magic variable - ``ansible_config_file`` - full path of used Ansible config file
+- package_facts.py - Add support for Pacman package manager.
+- pipe lookup - update docs for Popen with shell=True usages (https://github.com/ansible/ansible/issues/70159).
+- plugin loader - Add MODULE_IGNORE_EXTS config option to skip over certain extensions when looking for script and binary modules.
+- powershell (shell plugin) - Fix `join_path` to support UNC paths (https://github.com/ansible/ansible/issues/66341)
+- regexp_replace filter - add multiline support for regex_replace filter (https://github.com/ansible/ansible/issues/61985)
+- rename ``_find_existing_collections()`` to ``find_existing_collections()`` to reflect its use across multiple files
+- reorganized code for the ``ansible-test coverage`` command for easier maintenance and feature additions
+- service_facts - Added undocumented 'indirect' and 'static' as service status (https://github.com/ansible/ansible/issues/69752).
+- ssh - connection plugin now supports a new variable ``sshpass_prompt`` which gets passed to ``sshpass`` allowing the user to set a custom substring to search for a password prompt (requires sshpass 1.06+)
+- systemd - default scope is now explicitly "system"
+- tests - Add new ``truthy`` and ``falsy`` jinja2 tests to evaluate the truthiness or falsiness of a value
+- to_nice_json filter - Removed now-useless exception handler
+- to_uuid - add a named parameter to let the user optionally set a custom namespace
+- update ansible-test default-test-container from version 1.13 to 1.14, which includes an update from Python 3.9.0a6 to Python 3.9.0b1
+- update ansible-test default-test-container from version 1.9.1 to 1.9.2
+- update ansible-test default-test-container from version 1.9.2 to 1.9.3
+- update ansible-test default-test-container from version 1.9.3 to 1.10.1
+- update ansible-test images to 1.16.0, which includes system updates and pins CentOS versions
+- uri/galaxy - Add new ``prepare_multipart`` helper function for creating a ``multipart/form-data`` body (https://github.com/ansible/ansible/pull/69376)
+- url_lookup_plugin - add parameters to match what is available in ``module_utils/urls.py``
+- user - allow groups, append parameters with local
+- user - usage of ``append: True`` without setting a list of groups. This is currently a no-op with a warning, and will change to an error in 2.14. (https://github.com/ansible/ansible/pull/65795)
+- validate-modules checks for deprecated in collections against meta/runtime.yml
+- validation - Sort missing parameters in exception message thrown by check_required_arguments
+- vars plugins - Support vars plugins in collections by adding the ability to whitelist plugins.
+- vars_prompt - throw error when encountering unsupported key
+- win_package - Added proxy support for retrieving packages from a URL - https://github.com/ansible/ansible/issues/43818
+- win_package - Added support for ``.appx``, ``.msix``, ``.appxbundle``, and ``.msixbundle`` package - https://github.com/ansible/ansible/issues/50765
+- win_package - Added support for ``.msp`` packages - https://github.com/ansible/ansible/issues/22789
+- win_package - Added support for specifying the HTTP method when getting files from a URL - https://github.com/ansible/ansible/issues/35377
+- win_package - Read uninstall strings from the ``QuietUninstallString`` if present to better support argumentless uninstalls of registry based packages.
+- win_package - Scan packages in the current user's registry hive - https://github.com/ansible/ansible/issues/45950
+- windows collections - Support relative module util imports in PowerShell modules and module_utils
+
+Deprecated Features
+-------------------
+
+- Using the DefaultCallback without the correspodning doc_fragment or copying the documentation.
+- hash_behaviour - Deprecate ``hash_behaviour`` for future removal.
+- script inventory plugin - The 'cache' option is deprecated and will be removed in 2.12. Its use has been removed from the plugin since it has never had any effect.
+
+Removed Features (previously deprecated)
+----------------------------------------
+
+- core - remove support for ``check_invalid_arguments`` in ``AnsibleModule``, ``AzureModule`` and ``UTMModule``.
+
+Security Fixes
+--------------
+
+- **security issue** - Convert CLI provided passwords to text initially, to prevent unsafe context being lost when converting from bytes->text during post processing of PlayContext. This prevents CLI provided passwords from being incorrectly templated (CVE-2019-14856)
+- **security issue** - Redact cloud plugin secrets in ansible-test when running integration tests using cloud plugins. Only present in 2.9.0b1.
+- **security issue** - TaskExecutor - Ensure we don't erase unsafe context in TaskExecutor.run on bytes. Only present in 2.9.0beta1 (https://github.com/ansible/ansible/issues/62237)
+- **security issue** - The ``subversion`` module provided the password via the svn command line option ``--password`` and can be retrieved from the host's /proc/<pid>/cmdline file. Update the module to use the secure ``--password-from-stdin`` option instead, and add a warning in the module and in the documentation if svn version is too old to support it. (CVE-2020-1739)
+- **security issue** - Update ``AnsibleUnsafeText`` and ``AnsibleUnsafeBytes`` to maintain unsafe context by overriding ``.encode`` and ``.decode``. This prevents future issues with ``to_text``, ``to_bytes``, or ``to_native`` removing the unsafe wrapper when converting between string types (CVE-2019-14856)
+- **security issue** - properly hide parameters marked with ``no_log`` in suboptions when invalid parameters are passed to the module (CVE-2019-14858)
+- **security issue** atomic_move - change default permissions when creating temporary files so they are not world readable (https://github.com/ansible/ansible/issues/67794) (CVE-2020-1736)
+- **security issue** win_unzip - normalize paths in archive to ensure extracted files do not escape from the target directory (CVE-2020-1737)
+- **security_issue** - create temporary vault file with strict permissions when editing and prevent race condition (CVE-2020-1740)
+- Ensure we get an error when creating a remote tmp if it already exists. CVE-2020-1733
+- In fetch action, avoid using slurp return to set up dest, also ensure no dir traversal CVE-2020-1735.
+- Sanitize no_log values from any response keys that might be returned from the uri module (CVE-2020-14330).
+- ansible-galaxy - Error when install finds a tar with a file that will be extracted outside the collection install directory - CVE-2020-10691
+
+Bugfixes
+--------
+
+- ActionBase - Add new ``cleanup`` method that is explicitly run by the ``TaskExecutor`` to ensure that the shell plugins ``tmpdir`` is always removed. This change means that individual action plugins need not be responsible for removing the temporary directory, which ensures that we don't have code paths that accidentally leave behind the temporary directory.
+- Add example setting for ``collections_paths`` parameter to ``examples/ansible.cfg``
+- Add missing gcp modules to gcp module defaults group
+- Added support for Flatcar Container Linux in distribution and hostname modules. (https://github.com/ansible/ansible/pull/69627)
+- Added support for OSMC distro in hostname module (https://github.com/ansible/ansible/issues/66189).
+- Address compat with rpmfluff-0.6 for integration tests
+- Address the deprecation of the use of stdlib distutils in packaging. It's a short-term hotfix for the problem (https://github.com/ansible/ansible/issues/70456, https://github.com/pypa/setuptools/issues/2230, https://github.com/pypa/setuptools/commit/bd110264)
+- Allow TypeErrors on Undefined variables in filters to be handled or deferred when processing for loops.
+- Allow tasks to notify a fqcn handler name (https://github.com/ansible/ansible/issues/68181)
+- An invalid value is hard to track down if you don't know where it came from, return field name instead.
+- Ansible output now uses stdout to determine column width instead of stdin
+- Ansible.Basic - Fix issue when setting a ``no_log`` parameter to an empty string - https://github.com/ansible/ansible/issues/62613
+- Ansible.ModuleUtils.WebRequest - actually set no proxy when ``use_proxy: no`` is set on a Windows module - https://github.com/ansible/ansible/issues/68528
+- AnsibleDumper - Add a representer for AnsibleUnsafeBytes (https://github.com/ansible/ansible/issues/62562).
+- AnsibleModule.run_command() - set ``close_fds`` to ``False`` on Python 2 if ``pass_fds`` are passed to ``run_command()``. Since ``subprocess.Popen()`` on Python 2 does not have the ``pass_fds`` option, there is no way to exclude a specific list of file descriptors from being closed.
+- Avoid bare select() for running commands to avoid too large file descriptor numbers failing tasks
+- Avoid running subfunctions that are passed to show_vars function when it will be a noop.
+- By passing the module_tmpdir as a parameter in the write_ssh_wrapper function instead of initalizing module_tmpdir via get_module_path()
+- CLI - the `ANSIBLE_PLAYBOOK_DIR` envvar or `playbook_dir` config can now substitute for the --playbook-dir arg on CLIs that support it (https://github.com/ansible/ansible/issues/59464)
+- Check NoneType for raw_params before proceeding in include_vars (https://github.com/ansible/ansible/issues/64939).
+- Collections - Allow a collection role to call a stand alone role, without needing to explicitly add ``ansible.legacy`` to the collection search order within the collection role. (https://github.com/ansible/ansible/issues/69101)
+- Correctly process raw_params in add_hosts.
+- Create an ``import_module`` compat util, for use across the codebase, to allow collection loading to work properly on Python26
+- DUPLICATE_YAML_DICT_KEY - Fix error output when configuration option DUPLICATE_YAML_DICT_KEY is set to error (https://github.com/ansible/ansible/issues/65366)
+- Do not keep empty blocks in PlayIterator after skipping tasks with tags.
+- Ensure DataLoader temp files are removed at appropriate times and that we observe the LOCAL_TMP setting.
+- Ensure that ``--version`` works with non-ascii ansible project paths (https://github.com/ansible/ansible/issues/66617)
+- Ensure that keywords defined as booleans are correctly interpreting their input, before patch any random string would be interpreted as False
+- Ensure we don't allow ansible_facts subkey of ansible_facts to override top level, also fix 'deprefixing' to prevent key transforms.
+- Fact Delegation - Add ability to indicate which facts must always be delegated. Primarily for ``discovered_interpreter_python`` right now, but extensible later. (https://github.com/ansible/ansible/issues/61002)
+- Fix ``delegate_facts: true`` when ``ansible_python_interpreter`` is not set. (https://github.com/ansible/ansible/issues/70168)
+- Fix a bug when a host was not removed from a play after ``meta: end_host`` and as a result the host was still present in ``ansible_play_hosts`` and ``ansible_play_batch`` variables.
+- Fix an issue with the ``fileglob`` plugin where passing a subdirectory of non-existent directory would cause it to fail - https://github.com/ansible/ansible/issues/69450
+- Fix case sensitivity for ``lookup()`` (https://github.com/ansible/ansible/issues/66464)
+- Fix collection install error that happened if a dependency specified dependencies to be null (https://github.com/ansible/ansible/issues/67574).
+- Fix https://github.com/ansible/galaxy-dev/issues/96 Add support for automation-hub authentication to ansible-galaxy
+- Fix incorrect "Could not match supplied host pattern" warning (https://github.com/ansible/ansible/issues/66764)
+- Fix issue git module cannot use custom `key_file` or `ssh_opts` as non-root user on system with noexec `/tmp` (https://github.com/ansible/ansible/issues/30064).
+- Fix issue git module ignores remote_tmp (https://github.com/ansible/ansible/issues/33947).
+- Fix issue where the collection loader tracebacks if ``collections_paths = ./`` is set in the config
+- Fix issue with callbacks ``set_options`` method that was not called with collections
+- Fix label lookup in the default callback for includes (https://github.com/ansible/ansible/issues/65904)
+- Fix regression when ``ansible_failed_task`` and ``ansible_failed_result`` are not defined in the rescue block (https://github.com/ansible/ansible/issues/64789)
+- Fix string parsing of inline vault strings for plugin config variable sources
+- Fix traceback when printing ``HostVars`` on native Jinja2 (https://github.com/ansible/ansible/issues/65365)
+- Fix warning for default permission change when no mode is specified. Follow up to https://github.com/ansible/ansible/issues/67794. (CVE-2020-1736)
+- Fixed a bug with the copy action plugin where mode=preserve was being passed on symlink files and causing a traceback (https://github.com/ansible/ansible/issues/68471).
+- Fixed the equality check for IncludedFiles to ensure they are not accidently merged when process_include_results runs.
+- Fixes ansible-test traceback when plugin author is not a string or a list of strings (https://github.com/ansible/ansible/pull/70507)
+- Fixes in network action plugins load from collections using module prefix (https://github.com/ansible/ansible/issues/65071)
+- Force collection names to be static so that a warning is generated because templating currently does not work (see https://github.com/ansible/ansible/issues/68704).
+- Handle empty extra vars in ansible cli (https://github.com/ansible/ansible/issues/61497).
+- Handle empty roles and empty collections in requirements.yml in ansible-galaxy install command (https://github.com/ansible/ansible/issues/68186).
+- Handle exception encountered while parsing the argument description in module when invoked via ansible-doc command (https://github.com/ansible/ansible/issues/60587).
+- Handle exception when /etc/shadow file is missing or not found, while operating user operation in user module (https://github.com/ansible/ansible/issues/63490).
+- HostVarsVars - Template the __repr__ value (https://github.com/ansible/ansible/issues/64128).
+- JSON Encoder - Ensure we treat single vault encrypted values as strings (https://github.com/ansible/ansible/issues/70784)
+- Make netconf plugin configurable to set ncclient device handler name in netconf plugin (https://github.com/ansible/ansible/pull/65718)
+- Make sure if a collection is supplied as a string that we transform it into a list.
+- Misc typo fixes in various documentation pages.
+- Module arguments in suboptions which were marked as deprecated with ``removed_in_version`` did not result in a warning.
+- On HTTP status code 304, return status_code
+- Plugin Metadata is supposed to have default values. When the metadata was missing entirely, we were properly setting the defaults. Fixed the metadata parsing so that the defaults are also set when we were missing just a few fields.
+- Prevent a race condition when running handlers using a combination of the free strategy and include_role.
+- Prevent rewriting nested Block's data in filter_tagged_tasks
+- Prevent templating unused variables for {% include %} (https://github.com/ansible/ansible/issues/68699)
+- Properly handle unicode in ``safe_eval``. (https://github.com/ansible/ansible/issues/66943)
+- Python module_utils finder - refactor logic to eliminate many corner cases, remove recursion, fix base module_utils redirections
+- Remove a temp directory created by wait_for_connection action plugin (https://github.com/ansible/ansible/issues/62407).
+- Remove the unnecessary warning about aptitude not being installed (https://github.com/ansible/ansible/issues/56832).
+- Remove unused Python imports in ``ansible-inventory``.
+- Restore the ability for changed_when/failed_when to function with group_by (#70844).
+- Role Installation - Ensure that a role containing files with non-ascii characters can be installed (https://github.com/ansible/ansible/issues/69133)
+- RoleRequirement - include stderr in the error message if a scm command fails (https://github.com/ansible/ansible/issues/41336)
+- SSH plugin - Improve error message when ssh client is not found on the host
+- Skipping of become for ``network_cli`` connections now works when ``network_cli`` is sourced from a collection.
+- Stop adding the connection variables to the output results
+- Strictly check string datatype for 'tasks_from', 'vars_from', 'defaults_from', and 'handlers_from' in include_role (https://github.com/ansible/ansible/issues/68515).
+- Strip no log values from module response keys (https://github.com/ansible/ansible/issues/68400)
+- TaskExecutor - Handle unexpected errors as failed while post validating loops (https://github.com/ansible/ansible/issues/70050).
+- TaskQueueManager - Explicitly set the mutliprocessing start method to ``fork`` to avoid issues with the default on macOS now being ``spawn``.
+- Template connection variables before using them (https://github.com/ansible/ansible/issues/70598).
+- Templating - Ansible was caching results of Jinja2 expressions in some cases where these expressions could have dynamic results, like password generation (https://github.com/ansible/ansible/issues/34144).
+- Terminal plugins - add "\e[m" to the list of ANSI sequences stripped from device output
+- The `ansible_become` value was not being treated as a boolean value when set in an INI format inventory file (fixes bug https://github.com/ansible/ansible/issues/70476).
+- The ansible-galaxy publish command was using an incorrect URL for v3 servers. The configuration for v3 servers includes part of the path fragment that was added in the new test.
+- The machine-readable changelog ``changelogs/changelog.yaml`` is now contained in the release.
+- Update ActionBase._low_level_execute_command to honor executable (https://github.com/ansible/ansible/issues/68054)
+- Update the warning message for ``CONDITIONAL_BARE_VARS`` to list the original conditional not the value of the original conditional (https://github.com/ansible/ansible/issues/67735)
+- Use ``sys.exit`` instead of ``exit`` in ``ansible-inventory``.
+- Use fqcr from command module invocation using shell module. Fixes https://github.com/ansible/ansible/issues/69788
+- Use hostnamectl command to get current hostname for host while using systemd strategy (https://github.com/ansible/ansible/issues/59438).
+- Using --start-at-task would fail when it attempted to skip over tasks with no name.
+- Validate include args in handlers.
+- Vault - Allow single vault encrypted values to be used directly as module parameters. (https://github.com/ansible/ansible/issues/68275)
+- Vault - Make the single vaulted value ``AnsibleVaultEncryptedUnicode`` class work more like a string by replicating the behavior of ``collections.UserString`` from Python. These changes don't allow it to be considered a string, but most common python string actions will now work as expected. (https://github.com/ansible/ansible/pull/67823)
+- ``AnsibleUnsafe``/``AnsibleContext``/``Templar`` - Do not treat ``AnsibleUndefined`` as being "unsafe" (https://github.com/ansible/ansible/issues/65198)
+- account for empty strings in when splitting the host pattern (https://github.com/ansible/ansible/issues/61964)
+- action plugins - change all action/module delegations to use FQ names while allowing overrides (https://github.com/ansible/ansible/issues/69788)
+- add constraints file for ``anisble_runner`` test since an update to ``psutil`` is now causing test failures
+- add magic/connection vars updates from delegated host info.
+- add parameter name to warning message when values are converted to strings (https://github.com/ansible/ansible/pull/57145)
+- add_host action now correctly shows idempotency/changed status
+- added 'unimplemented' prefix to file based caching
+- added new option for default callback to compat variable to avoid old 3rd party plugins from erroring out.
+- adhoc CLI - when playbook-dir is specified and inside a collection, use default collection logic to resolve modules/actions
+- allow external collections to be created in the 'ansible' collection namespace (https://github.com/ansible/ansible/issues/59988)
+- also strip spaces around config values in pathlist as we do in list types
+- ansiballz - remove '' and '.' from sys.path to fix a permissions issue on OpenBSD with pipelining (#69320)
+- ansible command now correctly sends v2_playbook_on_start to callbacks
+- ansible-connection persists even after playbook run is completed (https://github.com/ansible/ansible/pull/61591)
+- ansible-doc - Allow and give precedence to `removed_at_date` for deprecated modules.
+- ansible-doc - collection name for plugin top-level deprecation was not inserted when deprecating by version (https://github.com/ansible/ansible/pull/70344).
+- ansible-doc - improve error message in text formatter when ``description`` is missing for a (sub-)option or a return value or its ``contains`` (https://github.com/ansible/ansible/pull/70046).
+- ansible-doc - improve man page formatting to avoid problems when YAML anchors are used (https://github.com/ansible/ansible/pull/70045).
+- ansible-doc - include the collection name in the text output (https://github.com/ansible/ansible/pull/70401).
+- ansible-doc now properly handles removed modules/plugins
+- ansible-galaxy - Default collection install path to first path in COLLECTIONS_PATHS (https://github.com/ansible/ansible/pull/62870)
+- ansible-galaxy - Display proper error when invalid token is used for Galaxy servers
+- ansible-galaxy - Ensure we preserve the new URL when appending ``/api`` for the case where the GET succeeds on galaxy.ansible.com
+- ansible-galaxy - Expand the ``User-Agent`` to include more information and add it to more calls to Galaxy endpoints.
+- ansible-galaxy - Fix ``collection install`` when installing from a URL or a file - https://github.com/ansible/ansible/issues/65109
+- ansible-galaxy - Fix ``multipart/form-data`` body to include extra CRLF (https://github.com/ansible/ansible/pull/67942)
+- ansible-galaxy - Fix issue when compared installed dependencies with a collection having no ``MANIFEST.json`` or an empty version string in the json
+- ansible-galaxy - Fix pagination issue when retrieving role versions for install - https://github.com/ansible/ansible/issues/64355
+- ansible-galaxy - Fix up pagination searcher for collection versions on Automation Hub
+- ansible-galaxy - Fix url building to not truncate the URL (https://github.com/ansible/ansible/issues/61624)
+- ansible-galaxy - Handle the different task resource urls in API responses from publishing collection artifacts to galaxy servers using v2 and v3 APIs.
+- ansible-galaxy - Preserve symlinks when building and installing a collection
+- ansible-galaxy - Remove uneeded verbose messages when accessing local token file
+- ansible-galaxy - Return the HTTP code reason if no error msg was returned by the server - https://github.com/ansible/ansible/issues/64850
+- ansible-galaxy - Send SHA256 hashes when publishing a collection
+- ansible-galaxy - Set ``User-Agent`` to Ansible version when interacting with Galaxy or Automation Hub
+- ansible-galaxy - Treat the ``GALAXY_SERVER_LIST`` config entry that is defined but with no values as an empty list
+- ansible-galaxy - Utilize ``Templar`` for templating skeleton files, so that they have access to Ansible filters/tests/lookups (https://github.com/ansible/ansible/issues/69104)
+- ansible-galaxy - fix a bug where listing a specific role if it was not in the first path failed to find the role
+- ansible-galaxy - fix regression that prenented roles from being listed
+- ansible-galaxy - hide warning during collection installation if other installed collections do not contain a ``MANIFEST.json`` (https://github.com/ansible/ansible/issues/67490)
+- ansible-galaxy - properly list roles when the role name also happens to be in the role path (https://github.com/ansible/ansible/issues/67365)
+- ansible-galaxy - properly show the role description when running offline (https://github.com/ansible/ansible/issues/60167)
+- ansible-galaxy cli - fixed ``--version`` argument
+- ansible-galaxy collection - Preserve executable bit on build and preserve mode on install from what tar member is set to - https://github.com/ansible/ansible/issues/68415
+- ansible-galaxy collection download - fix downloading tar.gz files and collections in git repositories (https://github.com/ansible/ansible/issues/70429)
+- ansible-galaxy collection install - fix fallback mechanism if the AH server did not have the collection requested - https://github.com/ansible/ansible/issues/70940
+- ansible-galaxy role - Fix issue where ``--server`` was not being used for certain ``ansible-galaxy role`` actions - https://github.com/ansible/ansible/issues/61609
+- ansible-galaxy- On giving an invalid subcommand to ansible-galaxy, the help would be shown only for role subcommand (collection subcommand help is not shown). With this change, the entire help for ansible-galaxy (same as ansible-galaxy --help) is displayed along with the help for role subcommand. (https://github.com/ansible/ansible/issues/69009)
+- ansible-inventory - Fix long standing bug not loading vars plugins for group vars relative to the playbook dir when the '--playbook-dir' and '--export' flags are used together.
+- ansible-inventory - Fix regression loading vars plugins. (https://github.com/ansible/ansible/issues/65064)
+- ansible-inventory - Properly hide arguments that should not be shown (https://github.com/ansible/ansible/issues/61604)
+- ansible-inventory - Restore functionality to allow ``--graph`` to be limited by a host pattern
+- ansible-test - Add ``pytest < 6.0.0`` constraint for managed installations on Python 3.x to avoid issues with relative imports.
+- ansible-test - Change detection now properly resolves relative imports instead of treating them as absolute imports.
+- ansible-test - Code cleanup.
+- ansible-test - Disabled the ``duplicate-code`` and ``cyclic-import`` checks for the ``pylint`` sanity test due to inconsistent results.
+- ansible-test - Do not try to validate PowerShell modules ``setup.ps1``, ``slurp.ps1``, and ``async_status.ps1``
+- ansible-test - Do not warn on missing PowerShell or C# util that are in other collections
+- ansible-test - Fix PowerShell module util analysis to properly detect the names of a util when running in a collection
+- ansible-test - Fix regression introduced in https://github.com/ansible/ansible/pull/67063 which caused module_utils analysis to fail on Python 2.x.
+- ansible-test - Fix traceback in validate-modules test when argument_spec is None.
+- ansible-test - Make sure import sanity test virtual environments also remove ``pkg-resources`` if it is not removed by uninstalling ``setuptools``.
+- ansible-test - Remove out-of-date constraint on installing paramiko versions 2.5.0 or later in tests.
+- ansible-test - The ``ansible-doc`` sanity test now works for ``netconf`` plugins.
+- ansible-test - The ``import`` sanity test now correctly blocks access to python modules, not just packages, in the ``ansible`` package.
+- ansible-test - The ``import`` sanity test now correctly provides an empty ``ansible`` package.
+- ansible-test - The shebang sanity test now correctly identifies modules in subdirectories in collections.
+- ansible-test - Updated Python constraints for installing ``coverage`` to resolve issues on multiple Python versions when using the ``--coverage`` option.
+- ansible-test - Updated requirements to limit ``boto3`` and ``botocore`` versions on Python 2.6 to supported versions.
+- ansible-test - Use ``sys.exit`` instead of ``exit``.
+- ansible-test - Use ``virtualenv`` versions before 20 on provisioned macOS instances to remain compatible with an older pip install.
+- ansible-test - avoid use of deprecated junit_xml method
+- ansible-test - bump version of ACME test container. The new version includes updated dependencies.
+- ansible-test - during module validation, handle add_file_common_args only for top-level arguments.
+- ansible-test - during module validation, improve alias handling.
+- ansible-test - for local change detection, allow to specify branch to compare to with ``--base-branch`` for all types of tests (https://github.com/ansible/ansible/pull/69508).
+- ansible-test - improve ``deprecate()`` call checker.
+- ansible-test - integration and unit test change detection now works for filter, lookup and test plugins
+- ansible-test can now install argparse with ``--requirements`` or delegation when the pip version in use is older than version 7.1
+- ansible-test change detection - Run only sanity tests on ``docs/`` and ``changelogs/`` in collections, to avoid triggering full CI runs of integration and unit tests when files in these directories change.
+- ansible-test coverage - Fix the ``--all`` argument when generating coverage reports - https://github.com/ansible/ansible/issues/62096
+- ansible-test import sanity test now consistently reports errors against the file being tested.
+- ansible-test import sanity test now consistently reports warnings as errors.
+- ansible-test import sanity test now properly handles relative imports.
+- ansible-test import sanity test now properly invokes Ansible modules as scripts.
+- ansible-test is now able to find its ``egg-info`` directory when it contains the Ansible version number
+- ansible-test no longer errors reporting coverage when no Python coverage exists. This fixes issues reporting on PowerShell only coverage from collections.
+- ansible-test no longer fails when downloading test results for a collection without a ``tests`` directory when using the ``--docker`` option.
+- ansible-test no longer optimizes setting ``PATH`` by prepending the directory containing the selected Python interpreter when it is named ``python``. This avoids unintentionally making other programs available on ``PATH``, including an already installed version of Ansible.
+- ansible-test no longer tracebacks during change analysis due to processing an empty python file
+- ansible-test no longer tries to install ``coverage`` 5.0+ since those versions are unsupported
+- ansible-test no longer tries to install ``setuptools`` 45+ on Python 2.x since those versions are unsupported
+- ansible-test now always uses the ``--python`` option for ``virtualenv`` to select the correct interpreter when creating environments with the ``--venv`` option
+- ansible-test now correctly collects code coverage on the last task in a play. This should resolve issues with missing code coverage, empty coverage files and corrupted coverage files resulting from early worker termination.
+- ansible-test now correctly enumerates submodules when a collection resides below the repository root
+- ansible-test now correctly excludes the test results temporary directory when copying files from the remote test system to the local system
+- ansible-test now correctly includes inventory files ignored by git when running tests with the ``--docker`` option
+- ansible-test now correctly installs the requirements specified by the collection's unit and integration tests instead of the requirements specified for Ansible's own unit and integration tests
+- ansible-test now correctly recognizes imports in collections when using the ``--changed`` option.
+- ansible-test now correctly rewrites coverage paths for PowerShell files when testing collections
+- ansible-test now creates its integration test temporary directory within the collection so ansible-playbook can properly detect the default collection
+- ansible-test now enables color ``ls`` on a remote host only if the host supports the feature
+- ansible-test now ignores empty ``*.py`` files when analyzing module_utils imports for change detection
+- ansible-test now ignores version control within subdirectories of collections. Previously this condition was an error.
+- ansible-test now ignores warnings when comparing pip versions before and after integration tests run
+- ansible-test now installs sanity test requirements specific to each test instead of installing requirements for all sanity tests
+- ansible-test now installs the correct version of ``cryptography`` with ``--requirements`` or delegation when setuptools is older than version 18.5
+- ansible-test now limits Jinja2 installs to version 2.10 and earlier on Python 2.6
+- ansible-test now limits ``pathspec`` to versions prior to 0.6.0 on Python 2.6 to avoid installation errors
+- ansible-test now limits installation of ``hcloud`` to Python 2.7 and 3.5 - 3.8 since other versions are unsupported
+- ansible-test now limits the version of ``setuptools`` on Python 2.6 to versions older than 37
+- ansible-test now loads the collection loader plugin early enough for ansible_collections imports to work in unit test conftest.py modules
+- ansible-test now preserves existing SSH authorized keys when provisioning a remote host
+- ansible-test now properly activates the vcenter plugin for vcenter tests when docker is available
+- ansible-test now properly activates virtual environments created using the --venv option
+- ansible-test now properly creates a virtual environment using ``venv`` when running in a ``virtualenv`` created virtual environment
+- ansible-test now properly excludes the ``tests/output/`` directory from code coverage
+- ansible-test now properly handles creation of Python execv wrappers when the selected interpreter is a script
+- ansible-test now properly handles enumeration of git submodules. Enumeration is now done with ``git submodule status --recursive`` without specifying ``.`` for the path, since that could cause the command to fail. Instead, relative paths outside the current directory are filtered out of the results. Errors from ``git`` commands will now once again be reported as errors instead of warnings.
+- ansible-test now properly handles warnings for removed modules/plugins
+- ansible-test now properly ignores the ``tests/output//`` directory when not using git
+- ansible-test now properly installs requirements for multiple Python versions when running sanity tests
+- ansible-test now properly recognizes modules and module_utils in collections when using the ``blacklist`` plugin for the ``pylint`` sanity test
+- ansible-test now properly registers its own code in a virtual environment when running from an install
+- ansible-test now properly reports import errors for collections when running the import sanity test
+- ansible-test now properly searches for ``pythonX.Y`` instead of ``python`` when looking for the real python that created a ``virtualenv``
+- ansible-test now properly sets PYTHONPATH for tests when running from an Ansible installation
+- ansible-test now properly sets ``ANSIBLE_PLAYBOOK_DIR`` for integration tests so unqualified collection references work for adhoc ``ansible`` usage
+- ansible-test now properly uses a fresh copy of environment variables for each command invocation to avoid mixing vars between commands
+- ansible-test now shows sanity test doc links when installed (previously the links were only visible when running from source)
+- ansible-test now shows the correct source path instead of ``%s`` for collection role based test targets when the ``-v`` option is used
+- ansible-test now supports submodules using older ``git`` versions which require querying status from the top level directory of the repo.
+- ansible-test now updates SSH keys it generates with newer versions of ssh-keygen to function with Paramiko
+- ansible-test now upgrades ``pip`` with `--requirements`` or delegation as needed when the pip version in use is older than version 7.1
+- ansible-test now uses GNU tar format instead of the Python default when creating payloads for remote systems
+- ansible-test now uses ``pycodestyle`` frozen at version 2.6.0 for consistent test results.
+- ansible-test now uses modules from the ``ansible.windows`` collection for setup and teardown of ``windows-integration`` tests and code coverage
+- ansible-test once again properly collects code coverage for ``ansible-connection``
+- ansible-test validate-modules - Fix arg spec collector for PowerShell to find utils in both a collection and base.
+- ansible-test validate-modules - ``version_added`` on module level was not validated for modules in collections (https://github.com/ansible/ansible/pull/70869).
+- ansible-test validate-modules - return correct error codes ``option-invalid-version-added`` resp. ``return-invalid-version-added`` instead of the wrong error ``deprecation-either-date-or-version`` when an invalid value of ``version_added`` is specified for an option or a return value (https://github.com/ansible/ansible/pull/70869).
+- ansible-test validate-modules sanity test code ``missing-module-utils-import-c#-requirements`` is now ``missing-module-utils-import-csharp-requirements`` (fixes ignore bug).
+- ansible-test validate-modules sanity test code ``multiple-c#-utils-per-requires`` is now ``multiple-csharp-utils-per-requires`` (fixes ignore bug).
+- ansible-test validate-modules sanity test now checks for AnsibleModule initialization instead of module_utils imports, which did not work in many cases.
+- ansible-test validate-modules sanity test now properly handles collections imports using the Ansible collection loader.
+- ansible-test validate-modules sanity test now properly handles relative imports.
+- ansible-test validate-modules sanity test now properly handles sys.exit in modules.
+- ansible-test validate-modules sanity test now properly invokes Ansible modules as scripts.
+- ansible-test windows coverage - Ensure coverage reports are UTF-8 encoded without a BOM
+- ansible-test windows coverage - Output temp files as UTF-8 with BOM to standardise against non coverage runs
+- ansible-vault - Fix ``encrypt_string`` output in a tty when using ``--sdtin-name`` option (https://github.com/ansible/ansible/issues/65121)
+- ansible-vault create - Fix exception on no arguments given
+- api - time.clock is removed in Python 3.8, add backward compatible code (https://github.com/ansible/ansible/issues/70649).
+- apt - Fixed the issue the cache being updated while auto-installing its dependencies even when ``update_cache`` is set to false.
+- apt - include exception message from apt python library in error output
+- assemble - fix decrypt argument in the module (https://github.com/ansible/ansible/issues/65450).
+- assemble module - fix documentation - the remote_src property specified a default value of no but it's actually yes.
+- avoid fatal traceback when a bad FQCN for a callback is supplied in the whitelist (#69401).
+- basic - use PollSelector implementation when DefaultSelector fails (https://github.com/ansible/ansible/issues/70238).
+- become - Fix various plugins that still used play_context to get the become password instead of through the plugin - https://github.com/ansible/ansible/issues/62367
+- blockinfile - fix regression that results in incorrect block in file when the block to be inserted does not end in a line separator (https://github.com/ansible/ansible/pull/69734)
+- blockinfile - preserve line endings on update (https://github.com/ansible/ansible/issues/64966)
+- clean_facts - use correct variable to avoid unnecessary handling of ``AttributeError``
+- code - removes some Python compatibility code for dealing with socket timeouts in ``wait_for``
+- collection loader - ensure Jinja function cache is fully-populated before lookup
+- collection loader - fixed relative imports on Python 2.7, ensure pluginloader caches use full name to prevent names from being clobbered (https://github.com/ansible/ansible/pull/60317)
+- collection metadata - ensure collection loader uses libyaml/CSafeLoader to parse collection metadata if available
+- collection_loader - sort Windows modules below other plugin types so the correct builtin plugin inside a role is selected (https://github.com/ansible/ansible/issues/65298)
+- collections - Handle errors better for filters and tests in collections, where a non-existent collection is specified, or importing the plugin results in an exception (https://github.com/ansible/ansible/issues/66721)
+- combine filter - ``[dict1, [dict2]] | combine`` now raise an error; previously ``combine`` had an undocumented behaviour where it was flattening the list before combining it (https://github.com/ansible/ansible/pull/57894#discussion_r339517518).
+- config - encoding failures on config values should be non-fatal (https://github.com/ansible/ansible/issues/63310)
+- copy - Fix copy modes when using remote_src=yes and src is a directory with trailing slash.
+- copy - Fixed copy module not working in case that remote_src is enabled and dest ends in a / (https://github.com/ansible/ansible/pull/47238)
+- copy - recursive copy with ``remote_src=yes`` now recurses beyond first level. (Fixes https://github.com/ansible/ansible/issues/58284)
+- core - remove unneeded Python version checks.
+- core - replace a compatibility import of pycompat24.literal_eval with ast.literal_eval.
+- core filters - fix ``extract()`` filter when key does not exist in container (https://github.com/ansible/ansible/issues/64957)
+- cron - encode and decode crontab files in UTF-8 explicitly to allow non-ascii chars in cron filepath and job (https://github.com/ansible/ansible/issues/69492)
+- cron and cronvar - use get_bin_path utility to locate the default crontab executable instead of the hardcoded /usr/bin/crontab. (https://github.com/ansible/ansible/pull/59765)
+- cron cronvar - only run ``get_bin_path()`` once
+- cronvar - use correct binary name (https://github.com/ansible/ansible/issues/63274)
+- deal with cases in which just a file is pased and not a path with directories, now fileglob correctly searches in 'files/' subdirs.
+- debug - fixed an issue introduced in Ansible 2.4 where a loop of debug tasks would lose the "changed" status on each item.
+- discovery will NOT update incorrect host anymore when in delegate_to task.
+- display - Improve method of removing extra new line after warnings so it does not break Tower/Runner (https://github.com/ansible/ansible/pull/68517)
+- display - remove extra new line after warnings (https://github.com/ansible/ansible/pull/65199)
+- display - remove leading space when displaying WARNING messages
+- display logging - Fix issue where 3rd party modules will print tracebacks when attempting to log information when ``ANSIBLE_LOG_PATH`` is set - https://github.com/ansible/ansible/issues/65249
+- display logging - Fixed up the logging formatter to use the proper prefixes for ``u=user`` and ``p=process``
+- display logging - Re-added the ``name`` attribute to the log formatter so that the source of the log can be seen
+- dnf - Fix idempotence of `state: installed` (https://github.com/ansible/ansible/issues/64963)
+- dnf - Unified error messages when trying to install a nonexistent package with newer dnf (4.2.18) vs older dnf (4.2.9)
+- dnf - Unified error messages when trying to remove a wildcard name that is not currently installed, with newer dnf (4.2.18) vs older dnf (4.2.9)
+- dnf - enable logging using setup_loggers() API in dnf-4.2.17-6 or later
+- dnf - remove custom ``fetch_rpm_from_url`` method in favor of more general ``ansible.module_utils.urls.fetch_file``.
+- dnf module - Ensure the modules exit_json['msg'] response is always string, not sometimes a tuple.
+- ensure delegated vars can resolve hostvars object and access vars from hostvars[inventory_hostname].
+- ensure we pass on interpreter discovery values to delegated host.
+- env lookup plugin - Fix handling of environment variables values containing utf-8 characters. (https://github.com/ansible/ansible/issues/65298)
+- fact gathering - Display warnings and deprecation messages that are created during the fact gathering phase
+- facts - account for Slackware OS with ``+`` in the name (https://github.com/ansible/ansible/issues/38760)
+- facts - fix detection of virtualization type when dmi product name is KVM Server
+- facts - fix incorrect UTC timestamp in ``iso8601_micro`` and ``iso8601``
+- facts - introduce fact "ansible_processor_nproc" which reflects the number of vcpus available to processes (falls back to the number of vcpus available to the scheduler)
+- file - Removed unreachable code in module
+- file - change ``_diff_peek`` in argument spec to be the correct type, which is ``bool`` (https://github.com/ansible/ansible/issues/59433)
+- file - return ``'state': 'absent'`` when a file does not exist (https://github.com/ansible/ansible/issues/66171)
+- find - clarify description of ``contains`` (https://github.com/ansible/ansible/issues/61983)
+- fix issue in which symlinked collection cannot be listed, though the docs/plugins can be loaded if referenced directly.
+- fix issue with inventory_hostname and delegated host vars mixing on connection settings.
+- fix wrong command line length calculation in ``ansible-console`` when long command inputted
+- for those running uids for invalid users (containers), fallback to uid=<uid> when logging fixes #68007
+- free strategy - Include failed hosts when filtering notified hosts for handlers. The strategy base should determine whether or not to run handlers on those hosts depending on whether forcing handlers is enabled (https://github.com/ansible/ansible/issues/65254).
+- galaxy - Fix an AttributeError on ansible-galaxy install with an empty requirements.yml (https://github.com/ansible/ansible/issues/66725).
+- get_url - Don't treat no checksum as a checksum match (https://github.com/ansible/ansible/issues/61978)
+- get_url pass incorrect If-Modified-Since header (https://github.com/ansible/ansible/issues/67417)
+- git - when force=True, apply --force flag to git fetches as well
+- group - The group module was not correctly detecting whether a local group is existing or not with local set to yes if the same group exists in a non local group repository e.g. LDAP. (https://github.com/ansible/ansible/issues/58619)
+- group_by now should correctly refect changed status.
+- hostname - Fixed an issue where the hostname on the cloudlinux 6 server could not be set.
+- hostname - make module work on Manjaro Linux (https://github.com/ansible/ansible/issues/61382)
+- hurd - Address FIXMEs. Extract functionality and exit early.
+- if the ``type`` for a module parameter in the argument spec is callable, do not pass ``kwargs`` to avoid errors (https://github.com/ansible/ansible/issues/70017)
+- include_vars - fix stack trace when passing ``dirs`` in an ad-hoc command (https://github.com/ansible/ansible/issues/62633)
+- interpreter discovery will now use correct vars (from delegated host) when in delegate_to task.
+- junit callback - avoid use of deprecated junit_xml method
+- lineinfile - add example of using alternative backrefs syntax (https://github.com/ansible/ansible/issues/42794)
+- lineinfile - don't attempt mkdirs when path doesn't contain directory path
+- lineinfile - fix bug that caused multiple line insertions (https://github.com/ansible/ansible/issues/58923).
+- lineinfile - fix not subscriptable error in exception handling around file creation
+- lineinfile - properly handle inserting a line when backrefs are enabled and the line already exists in the file (https://github.com/ansible/ansible/issues/63756)
+- lineinfile - use ``module.tmpdir`` to allow configuration of the remote temp directory (https://github.com/ansible/ansible/issues/68218)
+- lineinfile - use correct index value when inserting a line at the end of a file (https://github.com/ansible/ansible/issues/63684)
+- loops - Do not indiscriminately mark loop items as unsafe, only apply unsafe to ``with_`` style loops. The items from ``loop`` should not be explicitly wrapped in unsafe. The underlying templating mechanism should dictate this. (https://github.com/ansible/ansible/issues/64379)
+- make ``no_log=False`` on a module option silence the ``no_log`` warning (https://github.com/ansible/ansible/issues/49465 https://github.com/ansible/ansible/issues/64656)
+- match docs for ssh and ensure pipelining is configurable per connection plugin.
+- module executor - Address issue where changes to Ansiballz module code, change the behavior of module execution as it pertains to ``__file__`` and ``sys.modules`` (https://github.com/ansible/ansible/issues/64664)
+- module_defaults - support candidate action names for relocated content
+- module_defaults - support short group names for content relocated to collections
+- now correclty merge and not just overwrite facts when gathering using multiple modules.
+- objects - Remove FIXME comment because no fix is needed.
+- optimize 'smart' detection from being run over and over and preferably do it at config time.
+- package_facts - fix value of ``vital`` attribute which is returned when ``pkg`` manager is used
+- package_facts - use module warnings rather than a custom implementation for reporting warnings
+- packaging_yum - replace legacy file handling with a file manager.
+- paramiko - catch and handle exception to prevent stack trace when running in FIPS mode
+- paramiko_ssh - Removed redundant conditional statement in ``_parse_proxy_command`` that always evaluated to True.
+- paramiko_ssh - improve authentication error message so it is less confusing
+- paramiko_ssh - optimized file handling by using a context manager.
+- pause - handle exception when there is no stdout (https://github.com/ansible/ansible/pull/47851)
+- pip - The virtualenv_command option can now include arguments without requiring the full path to the binary. (https://github.com/ansible/ansible/issues/52275)
+- pip - check_mode with ``state: present`` now returns the correct state for pre-release versioned packages
+- playbooks - detect and propagate failures in ``always`` blocks after ``rescue`` (https://github.com/ansible/ansible/issues/70000)
+- plugins - Allow ensure_type to decrypt the value for string types (and implicit string types) when value is an inline vault.
+- psexec - Fix issue where the Kerberos package was not detected as being available.
+- psexec - Fix issue where the ``interactive`` option was not being passed down to the library.
+- reboot - Add support for the runit init system, used on Void Linux, that does not support the normal Linux syntax.
+- reboot, win_reboot - add ``boot_time_command`` parameter to override the default command used to determine whether or not a system was rebooted (https://github.com/ansible/ansible/issues/58868)
+- remove update/restore of vars from play_context as it is now redundant.
+- replace use of deprecated functions from ``ansible.module_utils.basic``.
+- reset logging level to INFO due to CVE-2019-14846.
+- roles - Ensure that ``allow_duplicates: true`` enables to run single role multiple times (https://github.com/ansible/ansible/issues/64902)
+- runas - Fix the ``runas`` ``become_pass`` variable fallback from ``ansible_runas_runas`` to ``ansible_runas_pass``
+- service_facts - Now correctly parses systemd list-unit-files for systemd >=245
+- setup - properly detect yum package manager for IBM i.
+- setup - service_mgr - detect systemd even if it isn't running, such as during a container build
+- shell - fix quoting of mkdir command in creation of remote_tmp in order to allow spaces and other special characters (https://github.com/ansible/ansible/issues/69577).
+- shell cmd - Properly escape double quotes in the command argument
+- splunk httpapi plugin - switch from splunk.enterprise_security to splunk.es in runtime.yml to reflect upstream change of Collection Name
+- ssh connection plugin - use ``get_option()`` rather than ``_play_context`` to ensure ``ANSBILE_SSH_ARGS`` are applied properly (https://github.com/ansible/ansible/issues/70437)
+- synchronize - allow data to be passed between two managed nodes when using the docker connection plugin (https://github.com/ansible/ansible/pull/65698)
+- synchronize - fix password authentication on Python 2 (https://github.com/ansible/ansible/issues/56629)
+- sysctl - Remove FIXME comments to avoid confusion
+- systemd - don't require systemd to be running to enable/disable or mask/unmask units
+- systemd - the module should fail in check_mode when service not found on host (https://github.com/ansible/ansible/pull/68136).
+- sysvinit - Add missing parameter ``module`` in call to ``daemonize()``.
+- template lookup - ensure changes to the templar in the lookup, do not affect the templar context outside of the lookup (https://github.com/ansible/ansible/issues/60106)
+- template lookup - fix regression when templating hostvars (https://github.com/ansible/ansible/issues/63940)
+- the default parsing will now show existing JSON errors and not just YAML (last attempted), also we avoid YAML parsing when we know we only want JSON issue
+- throttle: the linear strategy didn't always stuck with the throttle limit
+- unarchive - Remove incorrect and unused function arguments.
+- unsafe_proxy - Ensure that data within a tuple is marked as unsafe (https://github.com/ansible/ansible/issues/65722)
+- update ``user`` module to support silencing ``no_log`` warnings in the future (see: https://github.com/ansible/ansible/pull/64733)
+- uri - Don't return the body even if it failed (https://github.com/ansible/ansible/issues/21003)
+- user - allow 13 asterisk characters in password field without warning
+- user - don't create home directory and missing parents when create_home == false (https://github.com/ansible/ansible/pull/70600).
+- user - fix comprasion on macOS so module does not improperly report a change (https://github.com/ansible/ansible/issues/62969)
+- user - fix stack trace on AIX when attempting to parse shadow file that does not exist (https://github.com/ansible/ansible/issues/62510)
+- user - on systems using busybox, honor the ``on_changed`` parameter to prevent unnecessary password changing (https://github.com/ansible/ansible/issues/65711)
+- user - update docs to reflect proper way to remove account from all groups
+- validate-modules - Fix hang when inspecting module with a delegate args spec type
+- virtual facts - detect generic container environment based on non-empty "container" env var
+- wait_for_connection - with pipelining enabled, interpreter discovery would fail if the first connection attempt was not successful
+- win setup - Fix redirection path for the windows setup module
+- win_exec_wrapper - Be more defensive when it comes to getting unhandled exceptions
+- win_package - Handle quoted and unquoted strings in the registry ``UninstallString`` value - https://github.com/ansible/ansible/issues/40973
+- win_uri win_get_url - Fix the behaviour of ``follow_redirects: safe`` to actual redirect on ``GET`` and ``HEAD`` requests - https://github.com/ansible/ansible/issues/65556
+- windows async - use full path when calling PowerShell to reduce reliance on environment vars being correct - https://github.com/ansible/ansible/issues/70655
+- windows environment - Support env vars that contain the unicode variant of single quotes - https://github.com/ansible-collections/ansible.windows/issues/45
+- winrm - preserve winrm forensic data on put_file failures
+- yum - fix bug that caused ``enablerepo`` to not be honored when used with disablerepo all wildcard/glob (https://github.com/ansible/ansible/issues/66549)
+- yum - fixed the handling of releasever parameter
+- yum - performance bugfix, the YumBase object was being instantiated multiple times unnecessarily, which lead to considerable overhead when operating against large sets of packages.
+- yum - yum tasks can no longer end up running non-yum modules
+- yum/dnf - check type of elements in a name
+
+New Plugins
+-----------
+
+Lookup
+~~~~~~
+
+- unvault - read vaulted file(s) contents
diff --git a/changelogs/CHANGELOG.rst b/changelogs/CHANGELOG.rst
new file mode 100644
index 00000000..6668a398
--- /dev/null
+++ b/changelogs/CHANGELOG.rst
@@ -0,0 +1,6 @@
+Placeholder changelog
+=====================
+
+This file is a placeholder; a version-specific ``CHANGELOG-vX.Y.rst`` will be generated during releases from fragments
+under changelogs/fragments. On release branches once a release has been created, consult the branch's version-specific
+file for changes that have occurred in that branch.
diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml
new file mode 100644
index 00000000..73aa9bb1
--- /dev/null
+++ b/changelogs/changelog.yaml
@@ -0,0 +1,2284 @@
+ancestor: 2.9.0
+releases:
+ 2.10.0:
+ changes:
+ bugfixes:
+ - Address compat with rpmfluff-0.6 for integration tests
+ release_summary: '| Release Date: 2020-08-13
+
+ | `Porting Guide <https://docs.ansible.com/ansible/devel/porting_guides.html>`__
+
+ '
+ codename: When the Levee Breaks
+ fragments:
+ - rpmfluff-compat-fixes.yml
+ - v2.10.0_summary.yaml
+ release_date: '2020-08-13'
+ 2.10.0b1:
+ changes:
+ bugfixes:
+ - ActionBase - Add new ``cleanup`` method that is explicitly run by the ``TaskExecutor``
+ to ensure that the shell plugins ``tmpdir`` is always removed. This change
+ means that individual action plugins need not be responsible for removing
+ the temporary directory, which ensures that we don't have code paths that
+ accidentally leave behind the temporary directory.
+ - Add example setting for ``collections_paths`` parameter to ``examples/ansible.cfg``
+ - Add missing gcp modules to gcp module defaults group
+ - Added support for Flatcar Container Linux in distribution and hostname modules.
+ (https://github.com/ansible/ansible/pull/69627)
+ - Added support for OSMC distro in hostname module (https://github.com/ansible/ansible/issues/66189).
+ - Allow tasks to notify a fqcn handler name (https://github.com/ansible/ansible/issues/68181)
+ - An invalid value is hard to track down if you don't know where it came from,
+ return field name instead.
+ - Ansible.Basic - Fix issue when setting a ``no_log`` parameter to an empty
+ string - https://github.com/ansible/ansible/issues/62613
+ - 'Ansible.ModuleUtils.WebRequest - actually set no proxy when ``use_proxy:
+ no`` is set on a Windows module - https://github.com/ansible/ansible/issues/68528'
+ - AnsibleDumper - Add a representer for AnsibleUnsafeBytes (https://github.com/ansible/ansible/issues/62562).
+ - 'AnsibleModule.run_command() - set ``close_fds`` to ``False`` on Python 2
+ if ``pass_fds`` are passed to ``run_command()``. Since ``subprocess.Popen()``
+ on Python 2 does not have the ``pass_fds`` option, there is no way to exclude
+ a specific list of file descriptors from being closed.
+
+ '
+ - Avoid bare select() for running commands to avoid too large file descriptor
+ numbers failing tasks
+ - Avoid running subfunctions that are passed to show_vars function when it will
+ be a noop.
+ - By passing the module_tmpdir as a parameter in the write_ssh_wrapper function
+ instead of initalizing module_tmpdir via get_module_path()
+ - CLI - the `ANSIBLE_PLAYBOOK_DIR` envvar or `playbook_dir` config can now substitute
+ for the --playbook-dir arg on CLIs that support it (https://github.com/ansible/ansible/issues/59464)
+ - Check NoneType for raw_params before proceeding in include_vars (https://github.com/ansible/ansible/issues/64939).
+ - Collections - Allow a collection role to call a stand alone role, without
+ needing to explicitly add ``ansible.legacy`` to the collection search order
+ within the collection role. (https://github.com/ansible/ansible/issues/69101)
+ - Correctly process raw_params in add_hosts.
+ - Create an ``import_module`` compat util, for use across the codebase, to allow
+ collection loading to work properly on Python26
+ - DUPLICATE_YAML_DICT_KEY - Fix error output when configuration option DUPLICATE_YAML_DICT_KEY
+ is set to error (https://github.com/ansible/ansible/issues/65366)
+ - Do not keep empty blocks in PlayIterator after skipping tasks with tags.
+ - Ensure DataLoader temp files are removed at appropriate times and that we
+ observe the LOCAL_TMP setting.
+ - Ensure that ``--version`` works with non-ascii ansible project paths (https://github.com/ansible/ansible/issues/66617)
+ - Ensure that keywords defined as booleans are correctly interpreting their
+ input, before patch any random string would be interpreted as False
+ - Ensure we don't allow ansible_facts subkey of ansible_facts to override top
+ level, also fix 'deprefixing' to prevent key transforms.
+ - Fact Delegation - Add ability to indicate which facts must always be delegated.
+ Primarily for ``discovered_interpreter_python`` right now, but extensible
+ later. (https://github.com/ansible/ansible/issues/61002)
+ - 'Fix a bug when a host was not removed from a play after ``meta: end_host``
+ and as a result the host was still present in ``ansible_play_hosts`` and ``ansible_play_batch``
+ variables.'
+ - Fix an issue with the ``fileglob`` plugin where passing a subdirectory of
+ non-existent directory would cause it to fail - https://github.com/ansible/ansible/issues/69450
+ - Fix case sensitivity for ``lookup()`` (https://github.com/ansible/ansible/issues/66464)
+ - Fix collection install error that happened if a dependency specified dependencies
+ to be null (https://github.com/ansible/ansible/issues/67574).
+ - Fix https://github.com/ansible/galaxy-dev/issues/96 Add support for automation-hub
+ authentication to ansible-galaxy
+ - Fix incorrect "Could not match supplied host pattern" warning (https://github.com/ansible/ansible/issues/66764)
+ - Fix issue git module cannot use custom `key_file` or `ssh_opts` as non-root
+ user on system with noexec `/tmp` (https://github.com/ansible/ansible/issues/30064).
+ - Fix issue git module ignores remote_tmp (https://github.com/ansible/ansible/issues/33947).
+ - Fix issue where the collection loader tracebacks if ``collections_paths =
+ ./`` is set in the config
+ - Fix issue with callbacks ``set_options`` method that was not called with collections
+ - Fix label lookup in the default callback for includes (https://github.com/ansible/ansible/issues/65904)
+ - Fix regression when ``ansible_failed_task`` and ``ansible_failed_result``
+ are not defined in the rescue block (https://github.com/ansible/ansible/issues/64789)
+ - Fix string parsing of inline vault strings for plugin config variable sources
+ - Fix traceback when printing ``HostVars`` on native Jinja2 (https://github.com/ansible/ansible/issues/65365)
+ - Fixed a bug with the copy action plugin where mode=preserve was being passed
+ on symlink files and causing a traceback (https://github.com/ansible/ansible/issues/68471).
+ - Fixed the equality check for IncludedFiles to ensure they are not accidently
+ merged when process_include_results runs.
+ - Fixes in network action plugins load from collections using module prefix
+ (https://github.com/ansible/ansible/issues/65071)
+ - Force collection names to be static so that a warning is generated because
+ templating currently does not work (see https://github.com/ansible/ansible/issues/68704).
+ - Handle empty extra vars in ansible cli (https://github.com/ansible/ansible/issues/61497).
+ - Handle empty roles and empty collections in requirements.yml in ansible-galaxy
+ install command (https://github.com/ansible/ansible/issues/68186).
+ - Handle exception encountered while parsing the argument description in module
+ when invoked via ansible-doc command (https://github.com/ansible/ansible/issues/60587).
+ - Handle exception when /etc/shadow file is missing or not found, while operating
+ user operation in user module (https://github.com/ansible/ansible/issues/63490).
+ - HostVarsVars - Template the __repr__ value (https://github.com/ansible/ansible/issues/64128).
+ - Make netconf plugin configurable to set ncclient device handler name in netconf
+ plugin (https://github.com/ansible/ansible/pull/65718)
+ - Make sure if a collection is supplied as a string that we transform it into
+ a list.
+ - Misc typo fixes in various documentation pages.
+ - Module arguments in suboptions which were marked as deprecated with ``removed_in_version``
+ did not result in a warning.
+ - On HTTP status code 304, return status_code
+ - Plugin Metadata is supposed to have default values. When the metadata was
+ missing entirely, we were properly setting the defaults. Fixed the metadata
+ parsing so that the defaults are also set when we were missing just a few
+ fields.
+ - Prevent a race condition when running handlers using a combination of the
+ free strategy and include_role.
+ - Prevent rewriting nested Block's data in filter_tagged_tasks
+ - Prevent templating unused variables for {% include %} (https://github.com/ansible/ansible/issues/68699)
+ - Properly handle unicode in ``safe_eval``. (https://github.com/ansible/ansible/issues/66943)
+ - Remove a temp directory created by wait_for_connection action plugin (https://github.com/ansible/ansible/issues/62407).
+ - Remove the unnecessary warning about aptitude not being installed (https://github.com/ansible/ansible/issues/56832).
+ - Remove unused Python imports in ``ansible-inventory``.
+ - Role Installation - Ensure that a role containing files with non-ascii characters
+ can be installed (https://github.com/ansible/ansible/issues/69133)
+ - RoleRequirement - include stderr in the error message if a scm command fails
+ (https://github.com/ansible/ansible/issues/41336)
+ - Skipping of become for ``network_cli`` connections now works when ``network_cli``
+ is sourced from a collection.
+ - Strictly check string datatype for 'tasks_from', 'vars_from', 'defaults_from',
+ and 'handlers_from' in include_role (https://github.com/ansible/ansible/issues/68515).
+ - Strip no log values from module response keys (https://github.com/ansible/ansible/issues/68400)
+ - TaskQueueManager - Explicitly set the mutliprocessing start method to ``fork``
+ to avoid issues with the default on macOS now being ``spawn``.
+ - Templating - Ansible was caching results of Jinja2 expressions in some cases
+ where these expressions could have dynamic results, like password generation
+ (https://github.com/ansible/ansible/issues/34144).
+ - The ansible-galaxy publish command was using an incorrect URL for v3 servers.
+ The configuration for v3 servers includes part of the path fragment that was
+ added in the new test.
+ - Update ActionBase._low_level_execute_command to honor executable (https://github.com/ansible/ansible/issues/68054)
+ - Update the warning message for ``CONDITIONAL_BARE_VARS`` to list the original
+ conditional not the value of the original conditional (https://github.com/ansible/ansible/issues/67735)
+ - Use ``sys.exit`` instead of ``exit`` in ``ansible-inventory``.
+ - Use fqcr from command module invocation using shell module. Fixes https://github.com/ansible/ansible/issues/69788
+ - Use hostnamectl command to get current hostname for host while using systemd
+ strategy (https://github.com/ansible/ansible/issues/59438).
+ - Using --start-at-task would fail when it attempted to skip over tasks with
+ no name.
+ - Validate include args in handlers.
+ - Vault - Make the single vaulted value ``AnsibleVaultEncryptedUnicode`` class
+ work more like a string by replicating the behavior of ``collections.UserString``
+ from Python. These changes don't allow it to be considered a string, but most
+ common python string actions will now work as expected. (https://github.com/ansible/ansible/pull/67823)
+ - '``AnsibleUnsafe``/``AnsibleContext``/``Templar`` - Do not treat ``AnsibleUndefined``
+ as being "unsafe" (https://github.com/ansible/ansible/issues/65198)'
+ - account for empty strings in when splitting the host pattern (https://github.com/ansible/ansible/issues/61964)
+ - add parameter name to warning message when values are converted to strings
+ (https://github.com/ansible/ansible/pull/57145)
+ - add_host action now correctly shows idempotency/changed status
+ - added 'unimplemented' prefix to file based caching
+ - added new option for default callback to compat variable to avoid old 3rd
+ party plugins from erroring out.
+ - adhoc CLI - when playbook-dir is specified and inside a collection, use default
+ collection logic to resolve modules/actions
+ - allow external collections to be created in the 'ansible' collection namespace
+ (https://github.com/ansible/ansible/issues/59988)
+ - also strip spaces around config values in pathlist as we do in list types
+ - ansiballz - remove '' and '.' from sys.path to fix a permissions issue on
+ OpenBSD with pipelining (#69320)
+ - ansible command now correctly sends v2_playbook_on_start to callbacks
+ - ansible-connection persists even after playbook run is completed (https://github.com/ansible/ansible/pull/61591)
+ - ansible-doc - Allow and give precedence to `removed_at_date` for deprecated
+ modules.
+ - ansible-doc now properly handles removed modules/plugins
+ - ansible-galaxy - Default collection install path to first path in COLLECTIONS_PATHS
+ (https://github.com/ansible/ansible/pull/62870)
+ - ansible-galaxy - Display proper error when invalid token is used for Galaxy
+ servers
+ - ansible-galaxy - Ensure we preserve the new URL when appending ``/api`` for
+ the case where the GET succeeds on galaxy.ansible.com
+ - ansible-galaxy - Expand the ``User-Agent`` to include more information and
+ add it to more calls to Galaxy endpoints.
+ - ansible-galaxy - Fix ``collection install`` when installing from a URL or
+ a file - https://github.com/ansible/ansible/issues/65109
+ - ansible-galaxy - Fix ``multipart/form-data`` body to include extra CRLF (https://github.com/ansible/ansible/pull/67942)
+ - ansible-galaxy - Fix issue when compared installed dependencies with a collection
+ having no ``MANIFEST.json`` or an empty version string in the json
+ - ansible-galaxy - Fix pagination issue when retrieving role versions for install
+ - https://github.com/ansible/ansible/issues/64355
+ - ansible-galaxy - Fix up pagination searcher for collection versions on Automation
+ Hub
+ - ansible-galaxy - Fix url building to not truncate the URL (https://github.com/ansible/ansible/issues/61624)
+ - ansible-galaxy - Handle the different task resource urls in API responses
+ from publishing collection artifacts to galaxy servers using v2 and v3 APIs.
+ - ansible-galaxy - Preserve symlinks when building and installing a collection
+ - ansible-galaxy - Remove uneeded verbose messages when accessing local token
+ file
+ - ansible-galaxy - Return the HTTP code reason if no error msg was returned
+ by the server - https://github.com/ansible/ansible/issues/64850
+ - ansible-galaxy - Send SHA256 hashes when publishing a collection
+ - ansible-galaxy - Set ``User-Agent`` to Ansible version when interacting with
+ Galaxy or Automation Hub
+ - ansible-galaxy - Treat the ``GALAXY_SERVER_LIST`` config entry that is defined
+ but with no values as an empty list
+ - ansible-galaxy - Utilize ``Templar`` for templating skeleton files, so that
+ they have access to Ansible filters/tests/lookups (https://github.com/ansible/ansible/issues/69104)
+ - 'ansible-galaxy - fix a bug where listing a specific role if it was not in
+ the first path failed to find the role
+
+ '
+ - ansible-galaxy - fix regression that prenented roles from being listed
+ - 'ansible-galaxy - hide warning during collection installation if other installed
+ collections do not contain a ``MANIFEST.json`` (https://github.com/ansible/ansible/issues/67490)
+
+ '
+ - ansible-galaxy - properly list roles when the role name also happens to be
+ in the role path (https://github.com/ansible/ansible/issues/67365)
+ - ansible-galaxy - properly show the role description when running offline (https://github.com/ansible/ansible/issues/60167)
+ - ansible-galaxy cli - fixed ``--version`` argument
+ - ansible-galaxy collection - Preserve executable bit on build and preserve
+ mode on install from what tar member is set to - https://github.com/ansible/ansible/issues/68415
+ - ansible-galaxy role - Fix issue where ``--server`` was not being used for
+ certain ``ansible-galaxy role`` actions - https://github.com/ansible/ansible/issues/61609
+ - ansible-galaxy- On giving an invalid subcommand to ansible-galaxy, the help
+ would be shown only for role subcommand (collection subcommand help is not
+ shown). With this change, the entire help for ansible-galaxy (same as ansible-galaxy
+ --help) is displayed along with the help for role subcommand. (https://github.com/ansible/ansible/issues/69009)
+ - ansible-inventory - Fix long standing bug not loading vars plugins for group
+ vars relative to the playbook dir when the '--playbook-dir' and '--export'
+ flags are used together.
+ - ansible-inventory - Fix regression loading vars plugins. (https://github.com/ansible/ansible/issues/65064)
+ - ansible-inventory - Properly hide arguments that should not be shown (https://github.com/ansible/ansible/issues/61604)
+ - ansible-inventory - Restore functionality to allow ``--graph`` to be limited
+ by a host pattern
+ - ansible-test - Code cleanup.
+ - ansible-test - Disabled the ``duplicate-code`` and ``cyclic-import`` checks
+ for the ``pylint`` sanity test due to inconsistent results.
+ - ansible-test - Do not warn on missing PowerShell or C# util that are in other
+ collections
+ - ansible-test - Fix PowerShell module util analysis to properly detect the
+ names of a util when running in a collection
+ - ansible-test - Fix regression introduced in https://github.com/ansible/ansible/pull/67063
+ which caused module_utils analysis to fail on Python 2.x.
+ - ansible-test - Fix traceback in validate-modules test when argument_spec is
+ None.
+ - ansible-test - Make sure import sanity test virtual environments also remove
+ ``pkg-resources`` if it is not removed by uninstalling ``setuptools``.
+ - ansible-test - Remove out-of-date constraint on installing paramiko versions
+ 2.5.0 or later in tests.
+ - ansible-test - The ``import`` sanity test now correctly blocks access to python
+ modules, not just packages, in the ``ansible`` package.
+ - ansible-test - The ``import`` sanity test now correctly provides an empty
+ ``ansible`` package.
+ - ansible-test - The shebang sanity test now correctly identifies modules in
+ subdirectories in collections.
+ - ansible-test - Updated Python constraints for installing ``coverage`` to resolve
+ issues on multiple Python versions when using the ``--coverage`` option.
+ - ansible-test - Updated requirements to limit ``boto3`` and ``botocore`` versions
+ on Python 2.6 to supported versions.
+ - ansible-test - Use ``sys.exit`` instead of ``exit``.
+ - ansible-test - Use ``virtualenv`` versions before 20 on provisioned macOS
+ instances to remain compatible with an older pip install.
+ - ansible-test - avoid use of deprecated junit_xml method
+ - ansible-test - bump version of ACME test container. The new version includes
+ updated dependencies.
+ - ansible-test - during module validation, handle add_file_common_args only
+ for top-level arguments.
+ - ansible-test - during module validation, improve alias handling.
+ - ansible-test - for local change detection, allow to specify branch to compare
+ to with ``--base-branch`` for all types of tests (https://github.com/ansible/ansible/pull/69508).
+ - ansible-test - improve ``deprecate()`` call checker.
+ - ansible-test can now install argparse with ``--requirements`` or delegation
+ when the pip version in use is older than version 7.1
+ - ansible-test change detection - Run only sanity tests on ``docs/`` and ``changelogs/``
+ in collections, to avoid triggering full CI runs of integration and unit tests
+ when files in these directories change.
+ - ansible-test coverage - Fix the ``--all`` argument when generating coverage
+ reports - https://github.com/ansible/ansible/issues/62096
+ - ansible-test import sanity test now consistently reports errors against the
+ file being tested.
+ - ansible-test import sanity test now consistently reports warnings as errors.
+ - ansible-test import sanity test now properly handles relative imports.
+ - ansible-test import sanity test now properly invokes Ansible modules as scripts.
+ - ansible-test is now able to find its ``egg-info`` directory when it contains
+ the Ansible version number
+ - ansible-test no longer errors reporting coverage when no Python coverage exists.
+ This fixes issues reporting on PowerShell only coverage from collections.
+ - ansible-test no longer fails when downloading test results for a collection
+ without a ``tests`` directory when using the ``--docker`` option.
+ - ansible-test no longer optimizes setting ``PATH`` by prepending the directory
+ containing the selected Python interpreter when it is named ``python``. This
+ avoids unintentionally making other programs available on ``PATH``, including
+ an already installed version of Ansible.
+ - ansible-test no longer tracebacks during change analysis due to processing
+ an empty python file
+ - ansible-test no longer tries to install ``coverage`` 5.0+ since those versions
+ are unsupported
+ - ansible-test no longer tries to install ``setuptools`` 45+ on Python 2.x since
+ those versions are unsupported
+ - ansible-test now correctly collects code coverage on the last task in a play.
+ This should resolve issues with missing code coverage, empty coverage files
+ and corrupted coverage files resulting from early worker termination.
+ - ansible-test now correctly enumerates submodules when a collection resides
+ below the repository root
+ - ansible-test now correctly excludes the test results temporary directory when
+ copying files from the remote test system to the local system
+ - ansible-test now correctly includes inventory files ignored by git when running
+ tests with the ``--docker`` option
+ - ansible-test now correctly installs the requirements specified by the collection's
+ unit and integration tests instead of the requirements specified for Ansible's
+ own unit and integration tests
+ - ansible-test now correctly recognizes imports in collections when using the
+ ``--changed`` option.
+ - ansible-test now correctly rewrites coverage paths for PowerShell files when
+ testing collections
+ - ansible-test now creates its integration test temporary directory within the
+ collection so ansible-playbook can properly detect the default collection
+ - ansible-test now enables color ``ls`` on a remote host only if the host supports
+ the feature
+ - ansible-test now ignores empty ``*.py`` files when analyzing module_utils
+ imports for change detection
+ - ansible-test now ignores version control within subdirectories of collections.
+ Previously this condition was an error.
+ - ansible-test now ignores warnings when comparing pip versions before and after
+ integration tests run
+ - ansible-test now installs sanity test requirements specific to each test instead
+ of installing requirements for all sanity tests
+ - ansible-test now installs the correct version of ``cryptography`` with ``--requirements``
+ or delegation when setuptools is older than version 18.5
+ - ansible-test now limits Jinja2 installs to version 2.10 and earlier on Python
+ 2.6
+ - ansible-test now limits ``pathspec`` to versions prior to 0.6.0 on Python
+ 2.6 to avoid installation errors
+ - ansible-test now limits installation of ``hcloud`` to Python 2.7 and 3.5 -
+ 3.8 since other versions are unsupported
+ - ansible-test now limits the version of ``setuptools`` on Python 2.6 to versions
+ older than 37
+ - ansible-test now loads the collection loader plugin early enough for ansible_collections
+ imports to work in unit test conftest.py modules
+ - ansible-test now preserves existing SSH authorized keys when provisioning
+ a remote host
+ - ansible-test now properly activates the vcenter plugin for vcenter tests when
+ docker is available
+ - ansible-test now properly activates virtual environments created using the
+ --venv option
+ - ansible-test now properly creates a virtual environment using ``venv`` when
+ running in a ``virtualenv`` created virtual environment
+ - ansible-test now properly excludes the ``tests/output/`` directory from code
+ coverage
+ - ansible-test now properly handles creation of Python execv wrappers when the
+ selected interpreter is a script
+ - ansible-test now properly handles enumeration of git submodules. Enumeration
+ is now done with ``git submodule status --recursive`` without specifying ``.``
+ for the path, since that could cause the command to fail. Instead, relative
+ paths outside the current directory are filtered out of the results. Errors
+ from ``git`` commands will now once again be reported as errors instead of
+ warnings.
+ - ansible-test now properly handles warnings for removed modules/plugins
+ - ansible-test now properly ignores the ``tests/output//`` directory when not
+ using git
+ - ansible-test now properly installs requirements for multiple Python versions
+ when running sanity tests
+ - ansible-test now properly recognizes modules and module_utils in collections
+ when using the ``blacklist`` plugin for the ``pylint`` sanity test
+ - ansible-test now properly registers its own code in a virtual environment
+ when running from an install
+ - ansible-test now properly reports import errors for collections when running
+ the import sanity test
+ - ansible-test now properly searches for ``pythonX.Y`` instead of ``python``
+ when looking for the real python that created a ``virtualenv``
+ - ansible-test now properly sets PYTHONPATH for tests when running from an Ansible
+ installation
+ - ansible-test now properly sets ``ANSIBLE_PLAYBOOK_DIR`` for integration tests
+ so unqualified collection references work for adhoc ``ansible`` usage
+ - ansible-test now properly uses a fresh copy of environment variables for each
+ command invocation to avoid mixing vars between commands
+ - ansible-test now shows sanity test doc links when installed (previously the
+ links were only visible when running from source)
+ - ansible-test now shows the correct source path instead of ``%s`` for collection
+ role based test targets when the ``-v`` option is used
+ - ansible-test now supports submodules using older ``git`` versions which require
+ querying status from the top level directory of the repo.
+ - ansible-test now updates SSH keys it generates with newer versions of ssh-keygen
+ to function with Paramiko
+ - ansible-test now upgrades ``pip`` with `--requirements`` or delegation as
+ needed when the pip version in use is older than version 7.1
+ - ansible-test now uses GNU tar format instead of the Python default when creating
+ payloads for remote systems
+ - ansible-test now uses ``pycodestyle`` frozen at version 2.6.0 for consistent
+ test results.
+ - ansible-test now uses modules from the ``ansible.windows`` collection for
+ setup and teardown of ``windows-integration`` tests and code coverage
+ - ansible-test once again properly collects code coverage for ``ansible-connection``
+ - ansible-test validate-modules - Fix arg spec collector for PowerShell to find
+ utils in both a collection and base.
+ - ansible-test validate-modules sanity test code ``missing-module-utils-import-c#-requirements``
+ is now ``missing-module-utils-import-csharp-requirements`` (fixes ignore bug).
+ - ansible-test validate-modules sanity test code ``multiple-c#-utils-per-requires``
+ is now ``multiple-csharp-utils-per-requires`` (fixes ignore bug).
+ - ansible-test validate-modules sanity test now checks for AnsibleModule initialization
+ instead of module_utils imports, which did not work in many cases.
+ - ansible-test validate-modules sanity test now properly handles collections
+ imports using the Ansible collection loader.
+ - ansible-test validate-modules sanity test now properly handles relative imports.
+ - ansible-test validate-modules sanity test now properly handles sys.exit in
+ modules.
+ - ansible-test validate-modules sanity test now properly invokes Ansible modules
+ as scripts.
+ - ansible-test windows coverage - Ensure coverage reports are UTF-8 encoded
+ without a BOM
+ - ansible-test windows coverage - Output temp files as UTF-8 with BOM to standardise
+ against non coverage runs
+ - ansible-vault - Fix ``encrypt_string`` output in a tty when using ``--sdtin-name``
+ option (https://github.com/ansible/ansible/issues/65121)
+ - ansible-vault create - Fix exception on no arguments given
+ - apt - Fixed the issue the cache being updated while auto-installing its dependencies
+ even when ``update_cache`` is set to false.
+ - assemble module - fix documentation - the remote_src property specified a
+ default value of no but it's actually yes.
+ - avoid fatal traceback when a bad FQCN for a callback is supplied in the whitelist
+ (#69401).
+ - become - Fix various plugins that still used play_context to get the become
+ password instead of through the plugin - https://github.com/ansible/ansible/issues/62367
+ - 'blockinfile - fix regression that results in incorrect block in file when
+ the block to be inserted does not end in a line separator (https://github.com/ansible/ansible/pull/69734)
+
+ '
+ - blockinfile - preserve line endings on update (https://github.com/ansible/ansible/issues/64966)
+ - clean_facts - use correct variable to avoid unnecessary handling of ``AttributeError``
+ - code - removes some Python compatibility code for dealing with socket timeouts
+ in ``wait_for``
+ - collection loader - ensure Jinja function cache is fully-populated before
+ lookup
+ - collection loader - fixed relative imports on Python 2.7, ensure pluginloader
+ caches use full name to prevent names from being clobbered (https://github.com/ansible/ansible/pull/60317)
+ - collection_loader - sort Windows modules below other plugin types so the correct
+ builtin plugin inside a role is selected (https://github.com/ansible/ansible/issues/65298)
+ - collections - Handle errors better for filters and tests in collections, where
+ a non-existent collection is specified, or importing the plugin results in
+ an exception (https://github.com/ansible/ansible/issues/66721)
+ - combine filter - ``[dict1, [dict2]] | combine`` now raise an error; previously
+ ``combine`` had an undocumented behaviour where it was flattening the list
+ before combining it (https://github.com/ansible/ansible/pull/57894#discussion_r339517518).
+ - config - encoding failures on config values should be non-fatal (https://github.com/ansible/ansible/issues/63310)
+ - copy - Fix copy modes when using remote_src=yes and src is a directory with
+ trailing slash.
+ - copy - Fixed copy module not working in case that remote_src is enabled and
+ dest ends in a / (https://github.com/ansible/ansible/pull/47238)
+ - copy - recursive copy with ``remote_src=yes`` now recurses beyond first level.
+ (Fixes https://github.com/ansible/ansible/issues/58284)
+ - core - remove unneeded Python version checks.
+ - core - replace a compatibility import of pycompat24.literal_eval with ast.literal_eval.
+ - core filters - fix ``extract()`` filter when key does not exist in container
+ (https://github.com/ansible/ansible/issues/64957)
+ - cron and cronvar - use get_bin_path utility to locate the default crontab
+ executable instead of the hardcoded /usr/bin/crontab. (https://github.com/ansible/ansible/pull/59765)
+ - cron cronvar - only run ``get_bin_path()`` once
+ - cronvar - use correct binary name (https://github.com/ansible/ansible/issues/63274)
+ - deal with cases in which just a file is pased and not a path with directories,
+ now fileglob correctly searches in 'files/' subdirs.
+ - debug - fixed an issue introduced in Ansible 2.4 where a loop of debug tasks
+ would lose the "changed" status on each item.
+ - discovery will NOT update incorrect host anymore when in delegate_to task.
+ - display - Improve method of removing extra new line after warnings so it does
+ not break Tower/Runner (https://github.com/ansible/ansible/pull/68517)
+ - display - remove extra new line after warnings (https://github.com/ansible/ansible/pull/65199)
+ - display - remove leading space when displaying WARNING messages
+ - display logging - Fix issue where 3rd party modules will print tracebacks
+ when attempting to log information when ``ANSIBLE_LOG_PATH`` is set - https://github.com/ansible/ansible/issues/65249
+ - display logging - Fixed up the logging formatter to use the proper prefixes
+ for ``u=user`` and ``p=process``
+ - display logging - Re-added the ``name`` attribute to the log formatter so
+ that the source of the log can be seen
+ - 'dnf - Fix idempotence of `state: installed` (https://github.com/ansible/ansible/issues/64963)'
+ - dnf - Unified error messages when trying to install a nonexistent package
+ with newer dnf (4.2.18) vs older dnf (4.2.9)
+ - dnf - Unified error messages when trying to remove a wildcard name that is
+ not currently installed, with newer dnf (4.2.18) vs older dnf (4.2.9)
+ - dnf - enable logging using setup_loggers() API in dnf-4.2.17-6 or later
+ - dnf - remove custom ``fetch_rpm_from_url`` method in favor of more general
+ ``ansible.module_utils.urls.fetch_file``.
+ - dnf module - Ensure the modules exit_json['msg'] response is always string,
+ not sometimes a tuple.
+ - ensure we pass on interpreter discovery values to delegated host.
+ - env lookup plugin - Fix handling of environment variables values containing
+ utf-8 characters. (https://github.com/ansible/ansible/issues/65298)
+ - fact gathering - Display warnings and deprecation messages that are created
+ during the fact gathering phase
+ - facts - fix detection of virtualization type when dmi product name is KVM
+ Server
+ - facts - introduce fact "ansible_processor_nproc" which reflects the number
+ of vcpus available to processes (falls back to the number of vcpus available
+ to the scheduler)
+ - file - Removed unreachable code in module
+ - file - change ``_diff_peek`` in argument spec to be the correct type, which
+ is ``bool`` (https://github.com/ansible/ansible/issues/59433)
+ - 'file - return ``''state'': ''absent''`` when a file does not exist (https://github.com/ansible/ansible/issues/66171)'
+ - find - clarify description of ``contains`` (https://github.com/ansible/ansible/issues/61983)
+ - fix issue in which symlinked collection cannot be listed, though the docs/plugins
+ can be loaded if referenced directly.
+ - fix wrong command line length calculation in ``ansible-console`` when long
+ command inputted
+ - 'for those running uids for invalid users (containers), fallback to uid=<uid>
+ when logging fixes #68007'
+ - free strategy - Include failed hosts when filtering notified hosts for handlers.
+ The strategy base should determine whether or not to run handlers on those
+ hosts depending on whether forcing handlers is enabled (https://github.com/ansible/ansible/issues/65254).
+ - galaxy - Fix an AttributeError on ansible-galaxy install with an empty requirements.yml
+ (https://github.com/ansible/ansible/issues/66725).
+ - get_url - Don't treat no checksum as a checksum match (https://github.com/ansible/ansible/issues/61978)
+ - get_url pass incorrect If-Modified-Since header (https://github.com/ansible/ansible/issues/67417)
+ - git - when force=True, apply --force flag to git fetches as well
+ - 'group - The group module was not correctly detecting whether a local group
+ is existing or not with local set to yes if the same group exists in a non
+ local group repository e.g. LDAP. (https://github.com/ansible/ansible/issues/58619)
+
+ '
+ - group_by now should correctly refect changed status.
+ - hostname - Fixed an issue where the hostname on the cloudlinux 6 server could
+ not be set.
+ - hostname - make module work on Manjaro Linux (https://github.com/ansible/ansible/issues/61382)
+ - hurd - Address FIXMEs. Extract functionality and exit early.
+ - include_vars - fix stack trace when passing ``dirs`` in an ad-hoc command
+ (https://github.com/ansible/ansible/issues/62633)
+ - interpreter discovery will now use correct vars (from delegated host) when
+ in delegate_to task.
+ - junit callback - avoid use of deprecated junit_xml method
+ - lineinfile - add example of using alternative backrefs syntax (https://github.com/ansible/ansible/issues/42794)
+ - lineinfile - don't attempt mkdirs when path doesn't contain directory path
+ - lineinfile - fix bug that caused multiple line insertions (https://github.com/ansible/ansible/issues/58923).
+ - lineinfile - properly handle inserting a line when backrefs are enabled and
+ the line already exists in the file (https://github.com/ansible/ansible/issues/63756)
+ - lineinfile - use ``module.tmpdir`` to allow configuration of the remote temp
+ directory (https://github.com/ansible/ansible/issues/68218)
+ - lineinfile - use correct index value when inserting a line at the end of a
+ file (https://github.com/ansible/ansible/issues/63684)
+ - loops - Do not indiscriminately mark loop items as unsafe, only apply unsafe
+ to ``with_`` style loops. The items from ``loop`` should not be explicitly
+ wrapped in unsafe. The underlying templating mechanism should dictate this.
+ (https://github.com/ansible/ansible/issues/64379)
+ - make ``no_log=False`` on a module option silence the ``no_log`` warning (https://github.com/ansible/ansible/issues/49465
+ https://github.com/ansible/ansible/issues/64656)
+ - match docs for ssh and ensure pipelining is configurable per connection plugin.
+ - module executor - Address issue where changes to Ansiballz module code, change
+ the behavior of module execution as it pertains to ``__file__`` and ``sys.modules``
+ (https://github.com/ansible/ansible/issues/64664)
+ - module_defaults - support candidate action names for relocated content
+ - module_defaults - support short group names for content relocated to collections
+ - now correclty merge and not just overwrite facts when gathering using multiple
+ modules.
+ - objects - Remove FIXME comment because no fix is needed.
+ - optimize 'smart' detection from being run over and over and preferably do
+ it at config time.
+ - package_facts - fix value of ``vital`` attribute which is returned when ``pkg``
+ manager is used
+ - package_facts - use module warnings rather than a custom implementation for
+ reporting warnings
+ - packaging_yum - replace legacy file handling with a file manager.
+ - paramiko - catch and handle exception to prevent stack trace when running
+ in FIPS mode
+ - paramiko_ssh - Removed redundant conditional statement in ``_parse_proxy_command``
+ that always evaluated to True.
+ - paramiko_ssh - improve authentication error message so it is less confusing
+ - paramiko_ssh - optimized file handling by using a context manager.
+ - pip - The virtualenv_command option can now include arguments without requiring
+ the full path to the binary. (https://github.com/ansible/ansible/issues/52275)
+ - 'pip - check_mode with ``state: present`` now returns the correct state for
+ pre-release versioned packages'
+ - plugins - Allow ensure_type to decrypt the value for string types (and implicit
+ string types) when value is an inline vault.
+ - psexec - Fix issue where the Kerberos package was not detected as being available.
+ - psexec - Fix issue where the ``interactive`` option was not being passed down
+ to the library.
+ - reboot, win_reboot - add ``boot_time_command`` parameter to override the default
+ command used to determine whether or not a system was rebooted (https://github.com/ansible/ansible/issues/58868)
+ - remove update/restore of vars from play_context as it is now redundant.
+ - replace use of deprecated functions from ``ansible.module_utils.basic``.
+ - 'roles - Ensure that ``allow_duplicates: true`` enables to run single role
+ multiple times (https://github.com/ansible/ansible/issues/64902)'
+ - runas - Fix the ``runas`` ``become_pass`` variable fallback from ``ansible_runas_runas``
+ to ``ansible_runas_pass``
+ - service_facts - Now correctly parses systemd list-unit-files for systemd >=245
+ - setup - properly detect yum package manager for IBM i.
+ - setup - service_mgr - detect systemd even if it isn't running, such as during
+ a container build
+ - shell cmd - Properly escape double quotes in the command argument
+ - synchronize - allow data to be passed between two managed nodes when using
+ the docker connection plugin (https://github.com/ansible/ansible/pull/65698)
+ - synchronize - fix password authentication on Python 2 (https://github.com/ansible/ansible/issues/56629)
+ - sysctl - Remove FIXME comments to avoid confusion
+ - systemd - don't require systemd to be running to enable/disable or mask/unmask
+ units
+ - systemd - the module should fail in check_mode when service not found on host
+ (https://github.com/ansible/ansible/pull/68136).
+ - sysvinit - Add missing parameter ``module`` in call to ``daemonize()``.
+ - template lookup - ensure changes to the templar in the lookup, do not affect
+ the templar context outside of the lookup (https://github.com/ansible/ansible/issues/60106)
+ - template lookup - fix regression when templating hostvars (https://github.com/ansible/ansible/issues/63940)
+ - the default parsing will now show existing JSON errors and not just YAML (last
+ attempted), also we avoid YAML parsing when we know we only want JSON issue
+ - 'throttle: the linear strategy didn''t always stuck with the throttle limit'
+ - unarchive - Remove incorrect and unused function arguments.
+ - unsafe_proxy - Ensure that data within a tuple is marked as unsafe (https://github.com/ansible/ansible/issues/65722)
+ - 'update ``user`` module to support silencing ``no_log`` warnings in the future
+ (see: https://github.com/ansible/ansible/pull/64733)'
+ - uri - Don't return the body even if it failed (https://github.com/ansible/ansible/issues/21003)
+ - user - allow 13 asterisk characters in password field without warning
+ - user - fix comprasion on macOS so module does not improperly report a change
+ (https://github.com/ansible/ansible/issues/62969)
+ - user - fix stack trace on AIX when attempting to parse shadow file that does
+ not exist (https://github.com/ansible/ansible/issues/62510)
+ - user - on systems using busybox, honor the ``on_changed`` parameter to prevent
+ unnecessary password changing (https://github.com/ansible/ansible/issues/65711)
+ - user - update docs to reflect proper way to remove account from all groups
+ - validate-modules - Fix hang when inspecting module with a delegate args spec
+ type
+ - virtual facts - detect generic container environment based on non-empty "container"
+ env var
+ - wait_for_connection - with pipelining enabled, interpreter discovery would
+ fail if the first connection attempt was not successful
+ - win_exec_wrapper - Be more defensive when it comes to getting unhandled exceptions
+ - win_package - Handle quoted and unquoted strings in the registry ``UninstallString``
+ value - https://github.com/ansible/ansible/issues/40973
+ - 'win_uri win_get_url - Fix the behaviour of ``follow_redirects: safe`` to
+ actual redirect on ``GET`` and ``HEAD`` requests - https://github.com/ansible/ansible/issues/65556'
+ - windows environment - Support env vars that contain the unicode variant of
+ single quotes - https://github.com/ansible-collections/ansible.windows/issues/45
+ - yum - fix bug that caused ``enablerepo`` to not be honored when used with
+ disablerepo all wildcard/glob (https://github.com/ansible/ansible/issues/66549)
+ - yum - fixed the handling of releasever parameter
+ - yum - performance bugfix, the YumBase object was being instantiated multiple
+ times unnecessarily, which lead to considerable overhead when operating against
+ large sets of packages.
+ - yum - yum tasks can no longer end up running non-yum modules
+ - yum/dnf - check type of elements in a name
+ deprecated_features:
+ - Using the DefaultCallback without the correspodning doc_fragment or copying
+ the documentation.
+ - hash_behaviour - Deprecate ``hash_behaviour`` for future removal.
+ - script inventory plugin - The 'cache' option is deprecated and will be removed
+ in 2.12. Its use has been removed from the plugin since it has never had any
+ effect.
+ major_changes:
+ - Both ansible-doc and ansible-console's help command will error for modules
+ and plugins whose return documentation cannot be parsed as YAML. All modules
+ and plugins passing ``ansible-test sanity --test yamllint`` will not be affected
+ by this.
+ - Collections may declare a list of supported/tested Ansible versions for the
+ collection. A warning is issued if a collection does not support the Ansible
+ version that loads it (can also be configured as silent or a fatal error).
+ Collections that do not declare supported Ansible versions do not issue a
+ warning/error.
+ - Plugin routing allows collections to declare deprecation, redirection targets,
+ and removals for all plugin types.
+ - Plugins that import module_utils and other ansible namespaces that have moved
+ to collections should continue to work unmodified.
+ - Routing data built into Ansible 2.10 ensures that 2.9 content should work
+ unmodified on 2.10. Formerly included modules and plugins that were moved
+ to collections are still accessible by their original unqualified names, so
+ long as their destination collections are installed.
+ - When deprecations are done in code, they to specify a ``collection_name``
+ so that deprecation warnings can mention which collection - or ansible-base
+ - is deprecating a feature. This affects all ``Display.deprecated()`` or ``AnsibleModule.deprecate()``
+ or ``Ansible.Basic.Deprecate()`` calls, and ``removed_in_version``/``removed_at_date``
+ or ``deprecated_aliases`` in module argument specs.
+ - ansible-test now uses a different ``default`` test container for Ansible Collections
+ minor_changes:
+ - '''Edit on GitHub'' link for plugin, cli documentation fixed to navigate to
+ correct plugin, cli source.'
+ - Add 'auth_url' field to galaxy server config stanzas in ansible.cfg The url
+ should point to the token_endpoint of a Keycloak server.
+ - Add --ask-vault-password and --vault-pass-file options to ansible cli commands
+ - Add ``--pre`` flag to ``ansible-galaxy collection install`` to allow pulling
+ in the most recent pre-release version of a collection (https://github.com/ansible/ansible/issues/64905)
+ - Add a global toggle to control when vars plugins are executed (per task by
+ default for backward compatibility or after importing inventory).
+ - Add a new config parameter, WIN_ASYNC_STARTUP_TIMEOUT, which allows configuration
+ of the named pipe connection timeout under Windows when launching async tasks.
+ - Add a per-plugin stage option to override the global toggle to control the
+ execution of individual vars plugins (per task, after inventory, or both).
+ - Add an additional check for importing journal from systemd-python module (https://github.com/ansible/ansible/issues/60595).
+ - Add new magic variable ``ansible_collection`` that contains the collection
+ name
+ - Add new magic variable ``ansible_role_name`` that contains the FQCN of the
+ role
+ - Added PopOS as a part of Debian OS distribution family (https://github.com/ansible/ansible/issues/69286).
+ - Added hostname support for PopOS in hostname module.
+ - Added openEuler OS in RedHat OS Family.
+ - Added the ability to set ``DEFAULT_NO_TARGET_SYSLOG`` through the ``ansible_no_target_syslog``
+ variable on a task
+ - Ansible CLI fails with warning if extra_vars parameter is used with filename
+ without @ sign (https://github.com/ansible/ansible/issues/51857).
+ - Ansible modules created with ``add_file_common_args=True`` added a number
+ of undocumented arguments which were mostly there to ease implementing certain
+ action plugins. The undocumented arguments ``src``, ``follow``, ``force``,
+ ``content``, ``backup``, ``remote_src``, ``regexp``, ``delimiter``, and ``directory_mode``
+ are now no longer added. Modules relying on these options to be added need
+ to specify them by themselves. Also, action plugins relying on these extra
+ elements in ``FILE_COMMON_ARGUMENTS`` need to be adjusted.
+ - Ansible now allows deprecation by date instead of deprecation by version.
+ This is possible for plugins and modules (``meta/runtime.yml`` and ``deprecated.removed_at_date``
+ in ``DOCUMENTATION``, instead of ``deprecated.removed_in``), for plugin options
+ (``deprecated.date`` instead of ``deprecated.version`` in ``DOCUMENTATION``),
+ for module options (``removed_at_date`` instead of ``removed_in_version``
+ in argument spec), and for module option aliases (``deprecated_aliases.date``
+ instead of ``deprecated_aliases.version`` in argument spec).
+ - Ansible should fail with error when non-existing limit file is provided in
+ command line.
+ - Ansible.Basic - Added the ability to specify multiple fragments to load in
+ a generic way for modules that use a module_util with fragment options
+ - Ansible.Basic.cs - Added support for ``deprecated_aliases`` to deprecated
+ aliases in a standard way
+ - Ansible.ModuleUtils.WebRequest - Move username and password aliases out of
+ util to avoid option name collision
+ - Change order of arguments in ansible cli to use --ask-vault-password and --vault-password-file
+ by default
+ - CollectionRequirement - Add a metadata property to update and retrieve the
+ _metadata attribute.
+ - Enable Ansible Collections loader to discover and import collections from
+ ``site-packages`` dir and ``PYTHONPATH``-added locations.
+ - Enable testing the AIX platform as a remote OS in ansible-test
+ - Flatten the directory hierarchy of modules
+ - Ignore plesk-release file while parsing distribution release (https://github.com/ansible/ansible/issues/64101).
+ - Openstack inventory script is migrated to ansible-openstack-collection, adjusted
+ the link in documentation accordingly.
+ - Openstack inventory script is moved to openstack.cloud from community.general.
+ - PowerShell Add-Type - Add an easier way to reference extra types when compiling
+ C# code on PowerShell Core
+ - PowerShell Add-Type - Added the ``X86`` and ``AMD64`` preprocessor symbols
+ for conditional compiling
+ - Prevent losing useful error information by including both the loop and the
+ conditional error messages (https://github.com/ansible/ansible/issues/66529)
+ - Provides additional information about collection namespace name restrictions
+ (https://github.com/ansible/ansible/issues/65151).
+ - Raise error when no task file is provided to import_tasks (https://github.com/ansible/ansible/issues/54095).
+ - Refactor test_distribution_version testcases.
+ - Remove the deprecation message for the ``TRANSFORM_INVALID_GROUP_CHARS`` setting.
+ (https://github.com/ansible/ansible/issues/61889)
+ - Removed extras_require support from setup.py (and [azure] extra). Requirements
+ will float with the collections, so it's not appropriate for ansible-base
+ to host requirements for them any longer.
+ - Simplify dict2items filter example in loop documentation (https://github.com/ansible/ansible/issues/65505).
+ - Templating - Add globals to the jinja2 environment at ``Templar`` instantiation,
+ instead of customizing the template object. Only customize the template object,
+ to disable lookups. (https://github.com/ansible/ansible/pull/69278)
+ - Templating - Add support to auto unroll generators produced by jinja2 filters,
+ to prevent the need of explicit use of ``|list`` (https://github.com/ansible/ansible/pull/68014)
+ - The results queue and counter for results are now split for standard / handler
+ results. This allows the governing strategy to be truly independent from the
+ handler strategy, which basically follows the linear methodology.
+ - Update required library message with correct grammer in basic.py.
+ - Updated inventory script location for EC2, Openstack, and Cobbler after collection
+ (https://github.com/ansible/ansible/issues/68897).
+ - Updated inventory script location for infoblox, ec2 and other after collection
+ migration (https://github.com/ansible/ansible/issues/69139).
+ - Updates ``ansible_role_names``, ``ansible_play_role_names``, and ``ansible_dependent_role_names``
+ to include the FQCN
+ - Use OrderedDict by default when importing mappings from YAML.
+ - Windows - Add a check for the minimum PowerShell version so we can create
+ a friendly error message on older hosts
+ - Windows - add deprecation notice in the Windows setup module when running
+ on Server 2008, 2008 R2, and Windows 7
+ - '`AnsibleModule.fail_json()` has always required that a message be passed
+ in which informs the end user why the module failed. In the past this message
+ had to be passed as the `msg` keyword argument but it can now be passed as
+ the first positional argument instead.'
+ - '``AnsibleModule.load_file_common_arguments`` now allows to simply override
+ ``path``.'
+ - add mechanism for storing warnings and deprecations globally and not attached
+ to an ``AnsibleModule`` object (https://github.com/ansible/ansible/pull/58993)
+ - added more ways to configure new uri options in 2.10.
+ - ansible-doc - improve suboptions formatting (https://github.com/ansible/ansible/pull/69795).
+ - ansible-doc - now indicates if an option is added by a doc fragment from another
+ collection by prepending the collection name, or ``ansible.builtin`` for ansible-base,
+ to the version number.
+ - ansible-doc - return values will be properly formatted (https://github.com/ansible/ansible/pull/69796).
+ - ansible-galaxy - Add ``download`` option for ``ansible-galaxy collection``
+ to download collections and their dependencies for an offline install
+ - ansible-galaxy - Add a `verify` subcommand to `ansible-galaxy collection`.
+ The collection found on the galaxy server is downloaded to a tempfile to compare
+ the checksums of the files listed in the MANIFEST.json and the FILES.json
+ with the contents of the installed collection.
+ - ansible-galaxy - Added the ability to display the progress wheel through the
+ C.GALAXY_DISPLAY_PROGRESS config option. Also this now defaults to displaying
+ the progress wheel if stdout has a tty.
+ - ansible-galaxy - Added the ability to ignore further files and folders using
+ a pattern with the ``build_ignore`` key in a collection's ``galaxy.yml`` (https://github.com/ansible/ansible/issues/59228).
+ - ansible-galaxy - Allow installing collections from git repositories.
+ - ansible-galaxy - Always ignore the ``tests/output`` directory when building
+ a collection as it is used by ``ansible-test`` for test output (https://github.com/ansible/ansible/issues/59228).
+ - ansible-galaxy - Display message if both collections and roles are specified
+ in a requirements file but can't be installed together.
+ - ansible-galaxy - Install both collections and roles with ``ansible-galaxy
+ install -r requirements.yml`` in certain scenarios.
+ - ansible-galaxy - Requirement entries for collections now support a 'type'
+ key to indicate whether the collection is a galaxy artifact, file, url, or
+ git repo.
+ - ansible-galaxy - add ``--token`` argument which is the same as ``--api-key``
+ (https://github.com/ansible/ansible/issues/65955)
+ - ansible-galaxy - add ``collection list`` command for listing installed collections
+ (https://github.com/ansible/ansible/pull/65022)
+ - ansible-galaxy - add ``validate_collection_path()`` utility function ()
+ - ansible-galaxy - add collections path argument
+ - ansible-galaxy - allow role to define dependency requirements that will be
+ only installed by defining them in ``meta/requirements.yml`` (https://github.com/ansible/proposals/issues/57)
+ - ansible-test - --docker flag now has an associated --docker-terminate flag
+ which controls if and when the docker container is removed following tests
+ - ansible-test - Add a test to prevent ``state=get``
+ - ansible-test - Add a test to prevent ``state=list`` and ``state=info``
+ - ansible-test - Add a verbosity option for displaying warnings.
+ - ansible-test - Add support for Python 3.9.
+ - ansible-test - Added CI provider support for Azure Pipelines.
+ - ansible-test - Added a ``ansible-test coverage analyze targets filter`` command
+ to filter aggregated coverage reports by path and/or target name.
+ - ansible-test - Added a ``ansible-test coverage analyze targets`` command to
+ analyze integration test code coverage by test target.
+ - ansible-test - Added support for Ansible Core CI request signing for Shippable.
+ - ansible-test - Added support for testing on Fedora 32.
+ - ansible-test - General code cleanup.
+ - ansible-test - Now includes testing support for RHEL 8.2
+ - ansible-test - Refactor code to consolidate filesystem access and improve
+ handling of encoding.
+ - ansible-test - Refactored CI related logic into a basic provider abstraction.
+ - ansible-test - Remove obsolete support for provisioning remote vCenter instances.
+ The supporting services are no longer available.
+ - ansible-test - Support writing compact JSON files instead of formatting and
+ indenting the output.
+ - ansible-test - Update Ubuntu 18.04 test container to version 1.13 which includes
+ ``venv``
+ - ansible-test - Update ``default-test-container`` to version 1.11, which includes
+ Python 3.9.0a4.
+ - ansible-test - Updated the default test containers to include Python 3.9.0b3.
+ - ansible-test - Upgrade OpenSUSE containers to use Leap 15.1.
+ - ansible-test - Upgrade distro test containers from 1.16.0 to 1.17.0
+ - ansible-test - Upgrade from ansible-base-test-container 1.1 to 2.2
+ - ansible-test - Upgrade from default-test-container 2.1 to 2.2
+ - ansible-test - ``mutually_exclusive``, ``required_if``, ``required_by``, ``required_together``
+ and ``required_one_of`` in modules are now validated.
+ - ansible-test - ``validate-modules`` now also accepts an ISO 8601 formatted
+ date as ``deprecated.removed_at_date``, instead of requiring a version number
+ in ``deprecated.removed_in``.
+ - ansible-test - ``validate-modules`` now makes sure that module documentation
+ deprecation removal version and/or date matches with removal version and/or
+ date in meta/runtime.yml.
+ - ansible-test - ``validate-modules`` now validates all version numbers in documentation
+ and argument spec. Version numbers for collections are checked for being valid
+ semantic versioning version number strings.
+ - ansible-test - add ``validate-modules`` tests for ``removed_in_version`` and
+ ``deprecated_aliases`` (https://github.com/ansible/ansible/pull/66920/).
+ - ansible-test - add check for ``print()`` calls in modules and module_utils.
+ - ansible-test - added a ``--no-pip-check`` option
+ - ansible-test - added a ``--venv-system-site-packages`` option for use with
+ the ``--venv`` option
+ - ansible-test - added new ``changelog`` test, which runs if a `antsibull-changelog
+ <https://pypi.org/project/antsibull-changelog/>`_ configuration or files in
+ ``changelogs/fragments/`` are found (https://github.com/ansible/ansible/pull/69313).
+ - ansible-test - allow delegation config to specify equivalents to the ``--no-pip-check``,
+ ``--disable-httptester`` and `--no-temp-unicode`` options
+ - ansible-test - allow sanity tests to check for optional errors by specifying
+ ``--enable-optional-errors`` (https://github.com/ansible/ansible/pull/66920/).
+ - ansible-test - also run the ``ansible-doc`` sanity test with ``--json`` to
+ ensure that the documentation does not contain something that cannot be exported
+ as JSON (https://github.com/ansible/ansible/issues/69238).
+ - ansible-test - enable deprecated version testing for modules and ``module.deprecate()``
+ calls (https://github.com/ansible/ansible/pull/66920/).
+ - ansible-test - extend alias validation.
+ - ansible-test - fixed ``units`` command with ``--docker`` to (mostly) work
+ under podman
+ - ansible-test - improve module validation so that ``default``, ``sample`` and
+ ``example`` contain JSON values and not arbitrary YAML values, like ``datetime``
+ objects or dictionaries with non-string keys.
+ - ansible-test - module validation will now consider arguments added by ``add_file_common_arguments=True``
+ correctly.
+ - ansible-test - switch from testing RHEL 8.0 and RHEL 8.1 Beta to RHEL 8.1
+ - ansible-test - the argument spec of modules is now validated by a YAML schema.
+ - ansible-test - the module validation code now checks whether ``elements``
+ documentation for options matches the argument_spec.
+ - ansible-test - the module validation code now checks whether ``elements``
+ is defined when ``type=list``
+ - ansible-test - the module validation code now checks whether ``requirement``
+ for options is documented correctly.
+ - ansible-test add pyparsing constraint for Python 2.x to avoid compatibility
+ issues with the upcoming pyparsing 3 release
+ - ansible-test defaults to redacting sensitive values (disable with the ``--no-redact``
+ option)
+ - ansible-test has been updated to use ``default-test-container:1.13`` which
+ includes fewer Python requirements now that most modules and tests have been
+ migrated to collections.
+ - ansible-test no longer detects ``git`` submodule directories as files.
+ - ansible-test no longer provides a ``--tox`` option. Use the ``--venv`` option
+ instead. This only affects testing the Ansible source. The feature was never
+ available for Ansible Collections or when running from an Ansible install.
+ - ansible-test no longer tries to install sanity test dependencies on unsupported
+ Python versions
+ - ansible-test now checks for the minimum and maximum supported versions when
+ importing ``coverage``
+ - ansible-test now filters out unnecessary warnings and messages from pip when
+ installing its own requirements
+ - ansible-test now has a ``--list-files`` option to list files using the ``env``
+ command.
+ - ansible-test now includes the ``pylint`` plugin ``mccabe`` in optional sanity
+ tests enabled with ``--enable-optional-errors``
+ - ansible-test now places the ansible source and collections content in separate
+ directories when using the ``--docker`` or ``--remote`` options.
+ - ansible-test now provides a more helpful error when loading coverage files
+ created by ``coverage`` version 5 or later
+ - ansible-test now supports provisioning of network resources when testing network
+ collections
+ - ansible-test now supports skip aliases in the format ``skip/{arch}/{platform}``
+ and ``skip/{arch}/{platform}/{version}`` where ``arch`` can be ``power``.
+ These aliases are only effective for the ``--remote`` option.
+ - ansible-test now supports skip aliases in the format ``skip/{platform}/{version}``
+ for the ``--remote`` option. This is preferred over the older ``skip/{platform}{version}``
+ format which included no ``/`` between the platform and version.
+ - ansible-test now supports testing against RHEL 7.8 when using the ``--remote``
+ option.
+ - ansible-test now supports the ``--remote power/centos/7`` platform option.
+ - ansible-test now validates the schema of ansible_builtin_runtime.yml and a
+ collections meta/runtime.yml file.
+ - ansible-test provides clearer error messages when failing to detect the provider
+ to use with the ``--remote`` option.
+ - ansible-test provisioning of network devices for ``network-integration`` has
+ been updated to use collections.
+ - ansible_native_concat() - use ``to_text`` function rather than Jinja2's ``text_type``
+ which has been removed in Jinja2 master branch.
+ - apt - Implemented an exponential backoff behaviour when retrying to update
+ the cache with new params ``update_cache_retry_max_delay`` and ``update_cache_retries``
+ to control the behavior.
+ - apt_repository - Implemented an exponential backoff behaviour when retrying
+ to update the apt cache with new params ``update_cache_retry_max_delay`` and
+ ``update_cache_retries`` to control the behavior.
+ - blockinfile - Update module documentation to clarify insertbefore/insertafter
+ usage.
+ - callbacks - Allow modules to return `None` as before/after entries for diff.
+ This should make it easier for modules to report the "not existing" state
+ of the entity they touched.
+ - combine filter - now accept a ``list_merge`` argument which modifies its behaviour
+ when the hashes to merge contain arrays/lists.
+ - config - accept singular version of ``collections_path`` ini setting and ``ANSIBLE_COLLECTIONS_PATH``
+ environment variable setting
+ - core filters - Adding ``path_join`` filter to the core filters list
+ - distro - Update bundled version of distro from 1.4.0 to 1.5.0
+ - dnf - Properly handle idempotent transactions with package name wildcard globs
+ (https://github.com/ansible/ansible/issues/62809)
+ - dnf - Properly handle module AppStreams that don't define stream (https://github.com/ansible/ansible/issues/63683)
+ - dnf param to pass allowerasing
+ - downstream packagers may install packages under ansible._vendor, which will
+ be added to head of sys.path at ansible package load
+ - file - specifying ``src`` without ``state`` is now an error
+ - get_bin_path() - change the interface to always raise ``ValueError`` if the
+ command is not found (https://github.com/ansible/ansible/pull/56813)
+ - get_url - Remove deprecated string format support for the headers option (https://github.com/ansible/ansible/issues/61891)
+ - git - added an ``archive_prefix`` option to set a prefix to add to each file
+ path in archive
+ - host_group_vars plugin - Require whitelisting and whitelist by default.
+ - new magic variable - ``ansible_config_file`` - full path of used Ansible config
+ file
+ - package_facts.py - Add support for Pacman package manager.
+ - plugin loader - Add MODULE_IGNORE_EXTS config option to skip over certain
+ extensions when looking for script and binary modules.
+ - powershell (shell plugin) - Fix `join_path` to support UNC paths (https://github.com/ansible/ansible/issues/66341)
+ - regexp_replace filter - add multiline support for regex_replace filter (https://github.com/ansible/ansible/issues/61985)
+ - 'rename ``_find_existing_collections()`` to ``find_existing_collections()``
+ to reflect its use across multiple files
+
+ '
+ - reorganized code for the ``ansible-test coverage`` command for easier maintenance
+ and feature additions
+ - service_facts - Added undocumented 'indirect' and 'static' as service status
+ (https://github.com/ansible/ansible/issues/69752).
+ - ssh - connection plugin now supports a new variable ``sshpass_prompt`` which
+ gets passed to ``sshpass`` allowing the user to set a custom substring to
+ search for a password prompt (requires sshpass 1.06+)
+ - systemd - default scope is now explicitly "system"
+ - tests - Add new ``truthy`` and ``falsy`` jinja2 tests to evaluate the truthiness
+ or falsiness of a value
+ - to_nice_json filter - Removed now-useless exception handler
+ - to_uuid - add a named parameter to let the user optionally set a custom namespace
+ - update ansible-test default-test-container from version 1.13 to 1.14, which
+ includes an update from Python 3.9.0a6 to Python 3.9.0b1
+ - update ansible-test default-test-container from version 1.9.1 to 1.9.2
+ - update ansible-test default-test-container from version 1.9.2 to 1.9.3
+ - update ansible-test default-test-container from version 1.9.3 to 1.10.1
+ - update ansible-test images to 1.16.0, which includes system updates and pins
+ CentOS versions
+ - uri/galaxy - Add new ``prepare_multipart`` helper function for creating a
+ ``multipart/form-data`` body (https://github.com/ansible/ansible/pull/69376)
+ - url_lookup_plugin - add parameters to match what is available in ``module_utils/urls.py``
+ - user - allow groups, append parameters with local
+ - 'user - usage of ``append: True`` without setting a list of groups. This is
+ currently a no-op with a warning, and will change to an error in 2.14. (https://github.com/ansible/ansible/pull/65795)'
+ - validate-modules checks for deprecated in collections against meta/runtime.yml
+ - validation - Sort missing parameters in exception message thrown by check_required_arguments
+ - vars plugins - Support vars plugins in collections by adding the ability to
+ whitelist plugins.
+ - vars_prompt - throw error when encountering unsupported key
+ - win_package - Added proxy support for retrieving packages from a URL - https://github.com/ansible/ansible/issues/43818
+ - win_package - Added support for ``.appx``, ``.msix``, ``.appxbundle``, and
+ ``.msixbundle`` package - https://github.com/ansible/ansible/issues/50765
+ - win_package - Added support for ``.msp`` packages - https://github.com/ansible/ansible/issues/22789
+ - win_package - Added support for specifying the HTTP method when getting files
+ from a URL - https://github.com/ansible/ansible/issues/35377
+ - win_package - Read uninstall strings from the ``QuietUninstallString`` if
+ present to better support argumentless uninstalls of registry based packages.
+ - win_package - Scan packages in the current user's registry hive - https://github.com/ansible/ansible/issues/45950
+ - windows collections - Support relative module util imports in PowerShell modules
+ and module_utils
+ release_summary: '| Release Date: 2020-06-17
+
+ | `Porting Guide <https://docs.ansible.com/ansible/devel/porting_guides.html>`__
+
+ '
+ removed_features:
+ - core - remove support for ``check_invalid_arguments`` in ``AnsibleModule``,
+ ``AzureModule`` and ``UTMModule``.
+ security_fixes:
+ - '**security issue** - Convert CLI provided passwords to text initially, to
+ prevent unsafe context being lost when converting from bytes->text during
+ post processing of PlayContext. This prevents CLI provided passwords from
+ being incorrectly templated (CVE-2019-14856)
+
+ '
+ - '**security issue** - Redact cloud plugin secrets in ansible-test when running
+ integration tests using cloud plugins. Only present in 2.9.0b1.
+
+ '
+ - '**security issue** - TaskExecutor - Ensure we don''t erase unsafe context
+ in TaskExecutor.run on bytes. Only present in 2.9.0beta1 (https://github.com/ansible/ansible/issues/62237)
+
+ '
+ - '**security issue** - The ``subversion`` module provided the password via
+ the svn command line option ``--password`` and can be retrieved from the host''s
+ /proc/<pid>/cmdline file. Update the module to use the secure ``--password-from-stdin``
+ option instead, and add a warning in the module and in the documentation if
+ svn version is too old to support it. (CVE-2020-1739)
+
+ '
+ - '**security issue** - Update ``AnsibleUnsafeText`` and ``AnsibleUnsafeBytes``
+ to maintain unsafe context by overriding ``.encode`` and ``.decode``. This
+ prevents future issues with ``to_text``, ``to_bytes``, or ``to_native`` removing
+ the unsafe wrapper when converting between string types (CVE-2019-14856)
+
+ '
+ - '**security issue** - properly hide parameters marked with ``no_log`` in suboptions
+ when invalid parameters are passed to the module (CVE-2019-14858)'
+ - '**security issue** win_unzip - normalize paths in archive to ensure extracted
+ files do not escape from the target directory (CVE-2020-1737)
+
+ '
+ - '**security_issue** - create temporary vault file with strict permissions
+ when editing and prevent race condition (CVE-2020-1740)'
+ - Ensure we get an error when creating a remote tmp if it already exists. CVE-2020-1733
+ - In fetch action, avoid using slurp return to set up dest, also ensure no dir
+ traversal CVE-2020-1735.
+ - ansible-galaxy - Error when install finds a tar with a file that will be extracted
+ outside the collection install directory - CVE-2020-10691
+ codename: When the Levee Breaks
+ fragments:
+ - 21003-uri-return-content.yml
+ - 34722-ssh-sshpass-prompt-variable.yml
+ - 47050-copy_ensure-_original_basename-is-set.yaml
+ - 51489-apt-not-honor-update-cache.yml
+ - 54095-import_tasks-fix_no_task.yml
+ - 56629-synchronize-password-auth.yaml
+ - 56832-remove-aptitude-warning.yml
+ - 57266-apt_repository-update-cache-retrying.yml
+ - 57779-module_defaults_groups_catchup_gcp.yml
+ - 57894-combine-filter-rework.yml
+ - 58323-copy-deep-recursive-with-remote_src.yaml
+ - 59060-validate-modules-aliases.yml
+ - 59438-hostname-use-hostnamectl.yml
+ - 59464-playbook-dir-envvar.yml
+ - 59765-cron-cronvar-use-get-bin-path.yaml
+ - 59772-fix_ansible_issue_58619.yaml
+ - 60106-templar-contextmanager.yml
+ - 60527-apt_exponential_backoff_cache_update_retry.yml
+ - 60587-doc_parsing.yml
+ - 60595-systemd_import.yml
+ - 61078-vars-plugin-whitelist-and-execution-settings.yaml
+ - 61604-ansible-inventory-hide-args.yaml
+ - 61624-fix-galaxy-url-building.yml
+ - 61659-load_file_common_arguments-override-path.yml
+ - 61889-change-transform_invalid_group_chars-default.yml
+ - 61891-get_url-remove-deprecated-string-headers.yml
+ - 61978-get-url-no-checksum.yml
+ - 62096-test-coverage-all.yml
+ - 62134-user-allow-groups-and-append-with-local.yml
+ - 62237-keep-unsafe-context.yml
+ - 62407-wait_for_connection.yml
+ - 62582-allow_diff_before_after_to_be_None.yml
+ - 62598-AnsibleDumper-representer.yaml
+ - 62713-add-path_join-filter.yaml
+ - 62766-package_facts-pkg-manager-fix-vital-value.yml
+ - 62809-dnf-wildcard-absent-failure.yml
+ - 62870-collection-install-default-path.yml
+ - 63194-lineinfile_insertafter_duplicate.yaml
+ - 63551-yum-single-YumBase-instantiation.yaml
+ - 63628-ansible-galaxy-fix-version.yml
+ - 63683-dnf-handle-empty-appstream-stream.yml
+ - 63782-add-ansible-ask-vault-password-and-vault-password-file-options.yaml
+ - 63919-lineinfile-create-no-dir-path.yml
+ - 63940-template-lookup-hostvars-regression.yml
+ - 63988-removes-python_compat_fallback.yml
+ - 63990-replace-deprecated-basic-functions.yml
+ - 64057-Add_named_parameter_to_the_to_uuid_filter.yaml
+ - 64076-urls-timeout-parameter.yml
+ - 64088-ast-literal.yml
+ - 64151-remove-unsed-inventory-script-option.yaml
+ - 64282-hostvarsvars-templating.yaml
+ - 64379-no-loop-unsafe.yml
+ - 64424-ansible-test-acme-container.yml
+ - 64664-fix-sys-modules-file.yml
+ - 64733-make-no_log-false-override-no_log-warnings.yml
+ - 64751-fix-wrong-promt-len-calc-in-ansible-console.yaml
+ - 64789-regression-rescue-vars-not-defined.yml
+ - 64810-hostname-add-manjaro-linux-distribution.yml
+ - 64892-add-parameters-to-url_lookup_plugin.yaml
+ - 64902-fix-allow-duplicates-in-single-role.yml
+ - 64905-semver.yml
+ - 64906-always-delegate-fact-prefixes.yml
+ - 64959-extract-filter-when-key-does-not-exist.yml
+ - 64963-dnf_idempotence.yml
+ - 65001-allow_configuring_async_startup_timeout.yml
+ - 65051-regex-replace-multiline.yaml
+ - 65058-fix-fd-out-of-range-in-select.yml
+ - 65073-fix-inventory-cli-loading-vars-plugins.yaml
+ - 65122-fix-encrypt_string-stdin-name-ouput-tty.yml
+ - 65198-ansibleundefined-is-not-unsafe.yml
+ - 65219-sanity-tests-print.yml
+ - 65302-dnf-msg-return.yml
+ - 65307-get_url-return-status-code-on-http-304.yaml
+ - 65365-fix-tb-printing-hostvars.yml
+ - 65376.yaml
+ - 65422-fix-throttle-with-linear-strategy.yml
+ - 65437-ansible-test-module-validation-required.yml
+ - 65541-fix-utf8-issue-env-lookup.yml
+ - 65576-fix-free-strategy-handler-filtering.yaml
+ - 65618-ansible-galaxy-collection-verify.yaml
+ - 65624-paramiko-ctx-man.yml
+ - 65698-synchronize-docker-controller-managed.yml
+ - 65722-unsafe-tuples.yml
+ - 65795-warn-if-user-has-set-append-but-not-set-groups.yaml
+ - 65904-fix-loop-label.yml
+ - 66006-RoleRequirement-include-stderr-error-msg.yaml
+ - 66067-git-archive_prefix-option.yaml
+ - 66085-ansible_config_file.yml
+ - 66128-fix-callback-set-options.yml
+ - 66189-hostname-osmc.yml
+ - 66219-update-user-module-for-64733.yml
+ - 66370-galaxy-add-metadata-property.yaml
+ - 66385-ansible-test-module-validation-elements.yml
+ - 66386-ansible-test-module-validation-list-elements.yml
+ - 66389-file-common-arguments.yml
+ - 66461-blockinfile_preserve_line_endings.yaml
+ - 66464-lookup-case-sensitivity-fix.yml
+ - 66529-display-both-loop-and-cond-errors.yml
+ - 66549-enablerepo-not-honored-when-used-with-disablerepo-all.yml
+ - 66569-introduce-fact-ansible_processor_nproc.yml
+ - 66596-package_facts-add-pacman-support.yaml
+ - 66604-powershell-unc-paths.yml
+ - 66617-version-unicode-fix.yml
+ - 66721-better-jinja2-collection-error-handling.yml
+ - 66726-galaxy-fix-attribute-error.yml
+ - 66762-fix-git-module-ignores-remote_tmp.yml
+ - 66764-host-pattern-warning.yml
+ - 66780-facts-detect-kvm-server.yml
+ - 66786-fix-duplicate-yaml-key-error.yaml
+ - 66898-sanity-state-list.yaml
+ - 66911-fix-cloudlinux6-hostname.yaml
+ - 66918-removed_in_version-fix.yml
+ - 66920-ansible-test-removed_in_version-deprecated_aliases.yml
+ - 66921-sanity-state-get.yaml
+ - 66943-handle-unicode-in-safe_eval.yml
+ - 66961-ansible-test-required-mutually.yml
+ - 67006-systemd-scope-default.yaml
+ - 67050-yum-releasever.yaml
+ - 67093-site-packages-pythonpath-collections-loader.yml
+ - 67243-file_common_arguments-defaults-sanity.yml
+ - 67365-role-list-role-name-in-path.yml
+ - 67407-pip-virtualenv_command-args.yml
+ - 67417-get_url-incorrect-if-modified-since.yaml
+ - 67429-jinja2-caching.yml
+ - 67492-fix-decrypting-str-types-for-plugins.yaml
+ - 67574-null_collection_dependency_list.yml
+ - 67735-warning-cleanup.yml
+ - 67771-validation-error.yml
+ - 67823-vault-unicode-string.yml
+ - 67942-fix-galaxy-multipart.yml
+ - 67972-git-fetch-force.yml
+ - 68014-auto-unroll-jinja2-generators.yml
+ - 68136-systemd_should_fail_in_check_mode_when_service_not_found.yml
+ - 68181-fqcn-handler-notification.yml
+ - 68186_collection_index_err.yml
+ - 68211-systemd-list-unit-files-parsing.yml
+ - 68247-file-unreachable-code.yaml
+ - 68288-galaxy-requirements-install-only.yml
+ - 68310-low_level_execute_command-honor-executable.yml
+ - 68400-strip-no-log-values-from-keys.yml
+ - 68471-copy-with-preserve.yaml
+ - 68482-remove-function-update-calls.yaml
+ - 68515_include_role_vars_from.yml
+ - 68518-to_nice_json-cleanup.yaml
+ - 68550-ansible-test-docs-changelogs.yml
+ - 68569-start-at-fix.yaml
+ - 68592-pip-check_mode-prereleases.yml
+ - 68667-dont-crash-ansible-vault-create-when-no-arguments.yaml
+ - 68699-prevent-templating-all-vars-when-copying-context.yml
+ - 68723-force-static-collections.yaml
+ - 69029-module-ignore-exts.yml
+ - 69054-collection-as-str.yaml
+ - 69101-collection-role-to-standalone-role.yml
+ - 69104-galaxy-cli-templar.yml
+ - 69133-role-install-non-ascii.yml
+ - 69139_inventory_doc_fix.yml
+ - 69154-install-collection-from-git-repo.yml
+ - 69160-add-missing-parameter.yaml
+ - 69164-remove-redundant-conditional.yaml
+ - 69175-address-fixmes-sysctl.yaml
+ - 69226-hurd-extract-functionality.yaml
+ - 69278-early-customize-jinja2.yml
+ - 69286_pop_os_distribution.yml
+ - 69286_popos_hostname.yml
+ - 69287-ansible-test-validate-default-sample-example.yml
+ - 69288-ansible-test-ansible-doc-json.yml
+ - 69313-sanity-antsibull-changelog.yml
+ - 69320-sys-path-cwd.yml
+ - 69357_optimize_inventory_graph_wo_vars.yml
+ - 69396-blockinfile-docs.yaml
+ - 69451-fix-fileglob-nonexistent-subdirs.yaml
+ - 69457-free-strategy-handler-race.yml
+ - 69458-updated-galaxy-cli-help.yaml
+ - 69465-remove-fixme-comment.yaml
+ - 69484-add-path-to-yum-for-ibmi.yml
+ - 69508-ansible-test-local-changes-detection.yml
+ - 69516_flatcar_distribution.yml
+ - 69521-free-strategy-include-fix.yml
+ - 69752_service_facts.yml
+ - 69788_fqcr_command_shell.yml
+ - 69795-ansible-doc-suboptions.yml
+ - 69796-ansible-doc-return-values.yml
+ - 69845-doc-assemble-remote_src.yaml
+ - 69881-add_host-show-changed.yml
+ - 69919-module_defaults-groups-collections.yml
+ - 69993-copy-remote-src-perms.yml
+ - action-plugin-always-cleanup.yml
+ - add-global-warnings-container.yaml
+ - add-type-typename.yaml
+ - add_hosts_fix.yml
+ - add_prefix_to_cache.yml
+ - adhoc_default_collection.yml
+ - af_clean.yml
+ - allow-fail-json-msg-to-be-positional.yaml
+ - allow_ansible_ns.yml
+ - ansible-adhoc-cb-playbook_start.yaml
+ - ansible-basic-util-fragment.yaml
+ - ansible-connection_persist_issue.yaml
+ - ansible-doc-remove_at_date.yaml
+ - ansible-doc-removed-traceback.yml
+ - ansible-doc-version_added-collection.yml
+ - ansible-galaxy-agent.yaml
+ - ansible-galaxy-cli-add-token-alias.yaml
+ - ansible-galaxy-collections.yaml
+ - ansible-galaxy-handle-import-task-url-changes.yml
+ - ansible-galaxy-ignore.yaml
+ - ansible-galaxy-install-manifest-warning.yaml
+ - ansible-galaxy-install.yaml
+ - ansible-galaxy-progress.yaml
+ - ansible-galaxy-role-list-specific-fix.yml
+ - ansible-galaxy-role-server.yaml
+ - ansible-galaxy-support-for-automation-hub.yml
+ - ansible-inventory-code-cleanup.yml
+ - ansible-test-ansible-doc.yml
+ - ansible-test-ast-parse-bytes.yml
+ - ansible-test-boto-constraints.yml
+ - ansible-test-change-detection-empty-python.yml
+ - ansible-test-change-detection-fix.yml
+ - ansible-test-ci-support-azure.yml
+ - ansible-test-ci-support-shippable-auth.yml
+ - ansible-test-ci-support.yml
+ - ansible-test-cloud-secrets.yml
+ - ansible-test-code-cleanup.yml
+ - ansible-test-collections-ansible-adhoc.yml
+ - ansible-test-collections-coverage-noise.yml
+ - ansible-test-collections-import-sanity-test.yml
+ - ansible-test-collections-requirements.yml
+ - ansible-test-color-ls.yml
+ - ansible-test-constraints-virtualenv.yml
+ - ansible-test-container-update.yml
+ - ansible-test-coverage-analyze-targets-filter.yml
+ - ansible-test-coverage-analyze-targets.yml
+ - ansible-test-coverage-ansible-connection.yml
+ - ansible-test-coverage-constraint.yml
+ - ansible-test-coverage-constraints.yml
+ - ansible-test-coverage-incomplete.yml
+ - ansible-test-coverage-reorg.yml
+ - ansible-test-coverage-version-check.yml
+ - ansible-test-default-containers-update.yml
+ - ansible-test-default-test-container-1.10.1.yml
+ - ansible-test-default-test-container-1.13.yml
+ - ansible-test-default-test-container-1.14.yml
+ - ansible-test-default-test-container-1.9.2.yml
+ - ansible-test-default-test-container-1.9.3.yml
+ - ansible-test-delegation-inventory.yml
+ - ansible-test-delegation-options.yml
+ - ansible-test-delegation-paths.yml
+ - ansible-test-delegation-tmp-dir.yml
+ - ansible-test-distro-container-venv.yml
+ - ansible-test-doc-links.yml
+ - ansible-test-docker-context.yml
+ - ansible-test-docker-terminate.yml
+ - ansible-test-egg-info-version.yml
+ - ansible-test-empty-coverage.yml
+ - ansible-test-enable-aix-os-testing.yml
+ - ansible-test-env-alteration.yml
+ - ansible-test-env-list-files.yml
+ - ansible-test-execv-wrapper-shebang.yml
+ - ansible-test-fix-import-sanity-test.yml
+ - ansible-test-fix-python-path.yml
+ - ansible-test-git-submodule-top-level.yml
+ - ansible-test-git-submodule.yml
+ - ansible-test-hcloud-constraint.yml
+ - ansible-test-ignore-pip-warnings.yml
+ - ansible-test-ignore-tests-output.yml
+ - ansible-test-integration-temp-dir.yml
+ - ansible-test-jinja2-python-2.6.yml
+ - ansible-test-nested-source-control.yml
+ - ansible-test-network-collections.yml
+ - ansible-test-network-testing.yml
+ - ansible-test-no-pip-check.yml
+ - ansible-test-obsolete-vcenter-proivisioning.yml
+ - ansible-test-opensuse-15.1.yml
+ - ansible-test-paramiko-constraint.yml
+ - ansible-test-path-to-python.yml
+ - ansible-test-pathspec-constraint.yml
+ - ansible-test-pip-filtering.yml
+ - ansible-test-podman-units.yml
+ - ansible-test-powershell-coverage-paths.yml
+ - ansible-test-preserve-remote-authorized-keys.yml
+ - ansible-test-pycodestyle-freeze.yml
+ - ansible-test-pylint-plugin-paths.yml
+ - ansible-test-pyparsing-constraint.yml
+ - ansible-test-pytest-mccabe.yml
+ - ansible-test-pytest-plugin-loading.yml
+ - ansible-test-python-3.9.yaml
+ - ansible-test-python-import-analysis.yml
+ - ansible-test-redact.yml
+ - ansible-test-refactor.yml
+ - ansible-test-remote-power.yml
+ - ansible-test-remote-tar-format.yml
+ - ansible-test-remove-tox-option.yml
+ - ansible-test-requirements-install.yml
+ - ansible-test-rhel-7.8.yml
+ - ansible-test-rhel-8.1-testing.yml
+ - ansible-test-rhel-82.yml
+ - ansible-test-sanity-constraints.yml
+ - ansible-test-sanity-import-fixes.yml
+ - ansible-test-sanity-pylint-config-fix.yml
+ - ansible-test-sanity-requirements.yml
+ - ansible-test-sanity-separate-requirements.yml
+ - ansible-test-setuptools-constraint.yml
+ - ansible-test-shebang-sanity.yml
+ - ansible-test-ssh-keygen-fix.yml
+ - ansible-test-submodule-dir-as-file.yml
+ - ansible-test-submodules.yml
+ - ansible-test-test-no-tests.yml
+ - ansible-test-test-source-message.yml
+ - ansible-test-update-images-1.16.0.yml
+ - ansible-test-validate-modules-deprecated-removed_at.yml
+ - ansible-test-validate-modules-fixes.yml
+ - ansible-test-validate-runtime-file.yml
+ - ansible-test-vcenter-plugin.yml
+ - ansible-test-venv-activation.yml
+ - ansible-test-venv-pkg-resources.yaml
+ - ansible-test-venv-pythonpath.yml
+ - ansible-test-venv-system-site-packages.yml
+ - ansible-test-version-validation.yml
+ - ansible-test-virtualenv-python-search.yml
+ - ansible-test-virtualenv-venv.yml
+ - ansible-test-windows-integration.yml
+ - ansible_basic_no_log_empty_string.yaml
+ - ansible_native_concat-use-to_text-rather-than-text_type.yml
+ - ansile-galaxy-preserve-api-append.yml
+ - become-pass-precedence.yaml
+ - blockinfile-line-ending-fix.yaml
+ - change-get_bin_path-always-raise-exception.yaml
+ - clean_facts-use-correct-variable-for-startswith.yaml
+ - collection-install-mode.yaml
+ - collection-install-url.yaml
+ - collection-prefix-basedir.yaml
+ - collection_error_fix.yml
+ - collection_jinja_cache_fix.yml
+ - collection_loader-sort-plugins.yaml
+ - collection_loader_import_fixes.yml
+ - collection_routing.yml
+ - config-manager-vault-str.yaml
+ - config_encoding_resilience.yml
+ - configurable_pipelining.yml
+ - cron-only-get-bin-path-once.yaml
+ - cronvar-correct-binary-name.yaml
+ - debug_loop_changed.yaml
+ - deprecate-by-date.yml
+ - deprecate-hash-behaviour.yml
+ - deprecate_default_call_fragmentless.yml
+ - deprecation-collection-name.yml
+ - detect-generic-container.yml
+ - dict2items.yml
+ - discovery_delegation_fix.yml
+ - display-warning-remove-erroneous-space.yaml
+ - distribution_release.yml
+ - distribution_test_refactor.yml
+ - distro-update-version.yml
+ - dnf-4-2-18.yml
+ - dnf-allowerasing.yaml
+ - dnf_setup_loggers.yml
+ - dont-template-cli-passwords.yml
+ - dont_hide_json_error.yml
+ - downstream_vendoring.yml
+ - end_host-remove_host_from_play.yml
+ - ensure_discovery_delegate.yml
+ - examples_add_collections_paths_to_config_file.yml
+ - extra-vars.yml
+ - extra_vars_with_at_sign.yml
+ - f32.yml
+ - fallback_uid.yml
+ - fetch_no_slurp.yml
+ - file-change-src-without-state-to-error.yaml
+ - file-fix-diff-peek-arg-spec.yaml
+ - file-return-state-when-file-does-not-exist.yaml
+ - fileglob_fixes.yml
+ - find-contains-docs.yaml
+ - fips-paramiko-import-error.yaml
+ - fix-ansible-galaxy-server.yml
+ - fix-metadata-defaults.yml
+ - fix_doc_symlinks.yml
+ - flatten-modules.yml
+ - galaxy-add-path-validation-utility-function.yaml
+ - galaxy-cli-add-collection-path-parser-arg.yaml
+ - galaxy-collection-install-version.yaml
+ - galaxy-collection-rename-private-function.yaml
+ - galaxy-collections-add-list.yml
+ - galaxy-collections.yaml
+ - galaxy-download.yaml
+ - galaxy-error-reason.yaml
+ - galaxy-install-tar-path-traversal.yaml
+ - galaxy-role-list-fix.yml
+ - galaxy-role-version.yaml
+ - galaxy-server-list.yaml
+ - galaxy-symlinks.yaml
+ - gather_facts-warnings.yaml
+ - gf_fix.yml
+ - group_by_changed.yml
+ - include_vars-ad-hoc-stack-trace-fix.yaml
+ - include_vars_fix_none.yml
+ - inventory_doc_fix.yml
+ - junit-compat.yml
+ - limit-file-exception.yml
+ - lineinfile-backrefs-match-object-type.yaml
+ - lineinfile-backrefs-syntax-example.yaml
+ - lineinfile-use-correct-index-value.yaml
+ - lineinfile-use-module-tempdir.yaml
+ - logging-traceback.yaml
+ - misc_typo_fix.yml
+ - module-validation-argument_spec-schema.yml
+ - multipart.yml
+ - netconf_plugin_device_handler.yml
+ - network-cli-become-collections.yml
+ - network_action_plugin_load.yml
+ - no-log-sub-options-invalid-parameter.yaml
+ - no_fatal_bad_cb.yml
+ - no_target_syslog-var.yaml
+ - only_be_smart_once.yml
+ - openbsd-disabled-account-no-warning-for-passwd.yaml
+ - openeuler_distribution_support.yml
+ - openstack_botmeta.yml
+ - openstack_inventory_migrated.yml
+ - package-facts-use-module-warnings.yaml
+ - paramiko_ssh-improve-error-message.yaml
+ - pathlist_strip.yml
+ - play_bools_strict.yml
+ - plugin_doc_link_fix.yml
+ - prevent-rewriting-nested-block-data-in-filter_tagged_tasks.yml
+ - ps-argspec-type.yaml
+ - ps_web_request-aliases.yaml
+ - ps_wrapper-deprecated_aliases.yaml
+ - psexec-kerb-and-interactive.yaml
+ - purge-empty-block.yml
+ - pwsh-minimum.yaml
+ - py26-collection-loader.yml
+ - python38-macos.yaml
+ - reboot-add-last-boot-time-parameter.yaml
+ - remote_mkdir_fix.yml
+ - remove-2.9-deprecations.yml
+ - remove_pc_vars_round.yml
+ - removed_extras_require.yml
+ - required_lib_message.yml
+ - server2008-dep.yaml
+ - service-mgr-systemd-offline.yml
+ - show_field_instead_of_value.yml
+ - singular-collection-path.yml
+ - split-host-pattern-empty-strings.yaml
+ - string-conversion-warning-add-parameter-name.yaml
+ - subversion_password.yaml
+ - systemd-offline.yml
+ - test-ps-utils.yaml
+ - truthiness-tests.yaml
+ - unarchive-code-cleanup.yml
+ - uri_options.yml
+ - user-aix-shadow-unbound-local.yaml
+ - user-alpine-on-changed-fix.yaml
+ - user-docs-group-fix.yaml
+ - user-fix-value-comparison-on-macos.yaml
+ - user_missing_etc_shadow.yml
+ - v2.10.0-initial-commit.yaml
+ - v2.10.0b1_summary.yaml
+ - valdate-modules-ps-arg-util.yaml
+ - validate-include-args-in-handlers.yml
+ - validate-modules-argument-spec.yml
+ - validate-modules-deprecated-collections.yml
+ - vars_prompt_error_on_unsupported_key.yaml
+ - vault_tmp_file.yml
+ - vault_tmp_race_fix.yml
+ - wait_for_connection-interpreter-discovery-retry.yaml
+ - warnings-remove-extra-newline-better.yaml
+ - warnings-remove-extra-newline.yaml
+ - win-coverage-out-encoding.yaml
+ - win-unzip-check-extraction-path.yml
+ - win-web-request-no_proxy.yaml
+ - win_collection_relative.yaml
+ - win_env_var.yaml
+ - win_exec-error.yaml
+ - win_get_url-redirection.yaml
+ - win_package-revamp.yaml
+ - windows-coverage-encoding.yaml
+ - yaml_orderd_mappings.yml
+ - yum-dnf-elements-type-in-name-list.yml
+ - yum_backend_validation.yml
+ plugins:
+ lookup:
+ - description: read vaulted file(s) contents
+ name: unvault
+ namespace: null
+ release_date: '2020-06-17'
+ 2.10.0rc1:
+ changes:
+ bugfixes:
+ - 'Address the deprecation of the use of stdlib distutils in packaging. It''s
+ a short-term hotfix for the problem (https://github.com/ansible/ansible/issues/70456,
+ https://github.com/pypa/setuptools/issues/2230, https://github.com/pypa/setuptools/commit/bd110264)
+
+ '
+ - Allow TypeErrors on Undefined variables in filters to be handled or deferred
+ when processing for loops.
+ - Ansible output now uses stdout to determine column width instead of stdin
+ - 'Fix ``delegate_facts: true`` when ``ansible_python_interpreter`` is not set.
+ (https://github.com/ansible/ansible/issues/70168)'
+ - JSON Encoder - Ensure we treat single vault encrypted values as strings (https://github.com/ansible/ansible/issues/70784)
+ - Python module_utils finder - refactor logic to eliminate many corner cases,
+ remove recursion, fix base module_utils redirections
+ - SSH plugin - Improve error message when ssh client is not found on the host
+ - TaskExecutor - Handle unexpected errors as failed while post validating loops
+ (https://github.com/ansible/ansible/issues/70050).
+ - Template connection variables before using them (https://github.com/ansible/ansible/issues/70598).
+ - Terminal plugins - add "\e[m" to the list of ANSI sequences stripped from
+ device output
+ - The `ansible_become` value was not being treated as a boolean value when set
+ in an INI format inventory file (fixes bug https://github.com/ansible/ansible/issues/70476).
+ - The machine-readable changelog ``changelogs/changelog.yaml`` is now contained
+ in the release.
+ - Vault - Allow single vault encrypted values to be used directly as module
+ parameters. (https://github.com/ansible/ansible/issues/68275)
+ - action plugins - change all action/module delegations to use FQ names while
+ allowing overrides (https://github.com/ansible/ansible/issues/69788)
+ - add constraints file for ``anisble_runner`` test since an update to ``psutil``
+ is now causing test failures
+ - add magic/connection vars updates from delegated host info.
+ - ansible-doc - collection name for plugin top-level deprecation was not inserted
+ when deprecating by version (https://github.com/ansible/ansible/pull/70344).
+ - ansible-doc - improve error message in text formatter when ``description``
+ is missing for a (sub-)option or a return value or its ``contains`` (https://github.com/ansible/ansible/pull/70046).
+ - ansible-doc - improve man page formatting to avoid problems when YAML anchors
+ are used (https://github.com/ansible/ansible/pull/70045).
+ - ansible-doc - include the collection name in the text output (https://github.com/ansible/ansible/pull/70401).
+ - ansible-test - Do not try to validate PowerShell modules ``setup.ps1``, ``slurp.ps1``,
+ and ``async_status.ps1``
+ - ansible-test - The ``ansible-doc`` sanity test now works for ``netconf`` plugins.
+ - ansible-test - integration and unit test change detection now works for filter,
+ lookup and test plugins
+ - ansible-test now always uses the ``--python`` option for ``virtualenv`` to
+ select the correct interpreter when creating environments with the ``--venv``
+ option
+ - api - time.clock is removed in Python 3.8, add backward compatible code (https://github.com/ansible/ansible/issues/70649).
+ - apt - include exception message from apt python library in error output
+ - assemble - fix decrypt argument in the module (https://github.com/ansible/ansible/issues/65450).
+ - basic - use PollSelector implementation when DefaultSelector fails (https://github.com/ansible/ansible/issues/70238).
+ - collection metadata - ensure collection loader uses libyaml/CSafeLoader to
+ parse collection metadata if available
+ - cron - encode and decode crontab files in UTF-8 explicitly to allow non-ascii
+ chars in cron filepath and job (https://github.com/ansible/ansible/issues/69492)
+ - ensure delegated vars can resolve hostvars object and access vars from hostvars[inventory_hostname].
+ - facts - account for Slackware OS with ``+`` in the name (https://github.com/ansible/ansible/issues/38760)
+ - fix issue with inventory_hostname and delegated host vars mixing on connection
+ settings.
+ - if the ``type`` for a module parameter in the argument spec is callable, do
+ not pass ``kwargs`` to avoid errors (https://github.com/ansible/ansible/issues/70017)
+ - pause - handle exception when there is no stdout (https://github.com/ansible/ansible/pull/47851)
+ - playbooks - detect and propagate failures in ``always`` blocks after ``rescue``
+ (https://github.com/ansible/ansible/issues/70000)
+ - shell - fix quoting of mkdir command in creation of remote_tmp in order to
+ allow spaces and other special characters (https://github.com/ansible/ansible/issues/69577).
+ - splunk httpapi plugin - switch from splunk.enterprise_security to splunk.es
+ in runtime.yml to reflect upstream change of Collection Name
+ - 'ssh connection plugin - use ``get_option()`` rather than ``_play_context``
+ to ensure ``ANSBILE_SSH_ARGS`` are applied properly (https://github.com/ansible/ansible/issues/70437)
+
+ '
+ - user - don't create home directory and missing parents when create_home ==
+ false (https://github.com/ansible/ansible/pull/70600).
+ - win setup - Fix redirection path for the windows setup module
+ - windows async - use full path when calling PowerShell to reduce reliance on
+ environment vars being correct - https://github.com/ansible/ansible/issues/70655
+ - winrm - preserve winrm forensic data on put_file failures
+ minor_changes:
+ - Add an example for using var in with_sequence (https://github.com/ansible/ansible/issues/68836).
+ - Add standard Python 2/3 compatibility boilerplate to setup script, module_utils
+ and docs_fragments which were missing them.
+ - 'Command module: Removed suggestions to use modules which have moved to collections
+ and out of ansible-base'
+ - The plugin loader now keeps track of the collection where a plugin was resolved
+ to, in particular whether the plugin was loaded from ansible-base's internal
+ paths (``ansible.builtin``) or from user-supplied paths (no collection name).
+ - ansible-galaxy - Add installation successful message
+ - ansible-galaxy - Change the output verbosity level of the download message
+ from 3 to 0 (https://github.com/ansible/ansible/issues/70010)
+ - ansible-test - Provisioning of RHEL instances now includes installation of
+ pinned versions of ``packaging`` and ``pyparsing`` to match the downstream
+ vendored versions.
+ - ansible-test - Report the correct line number in the ``yamllint`` sanity test
+ when reporting ``libyaml`` parse errors in module documentation.
+ - conditionals - change the default of CONDITIONAL_BARE_VARS to False (https://github.com/ansible/ansible/issues/70682).
+ - debconf - add a note about no_log=True since module might expose sensitive
+ information to logs (https://github.com/ansible/ansible/issues/32386).
+ - pipe lookup - update docs for Popen with shell=True usages (https://github.com/ansible/ansible/issues/70159).
+ release_summary: '| Release Date: 2020-07-23
+
+ | `Porting Guide <https://docs.ansible.com/ansible/devel/porting_guides.html>`__
+
+ '
+ security_fixes:
+ - '**security issue** atomic_move - change default permissions when creating
+ temporary files so they are not world readable (https://github.com/ansible/ansible/issues/67794)
+ (CVE-2020-1736)
+
+ '
+ - Sanitize no_log values from any response keys that might be returned from
+ the uri module (CVE-2020-14330).
+ codename: When the Levee Breaks
+ fragments:
+ - 32386_debconf_password.yml
+ - 38760-slackware-os-dist.yml
+ - 67794-atomic_move-default-perms.yml
+ - 68275-vault-module-args.yml
+ - 69578-shell-remote_tmp-quoting.yaml
+ - 70000-playbook-detect-failure-in-always.yml
+ - 70017-avoid-params-to-callable-checkers.yml
+ - 70045-ansible-doc-yaml-anchors.yml
+ - 70046-ansible-doc-description-crash.yml
+ - 70099-make-apt-errors-more-transparent.yaml
+ - 70122-improve-error-message-ssh-client-is-not-found.yml
+ - 70168-fix-delegate_facts-without-interpreter-set.yml
+ - 70238_selector.yml
+ - 70240-fix-fatal-post_validate-error.yml
+ - 70261_pipe_lookup.yml
+ - 70344-plugin-deprecation-collection-name.yml
+ - 70426-allow-non-ascii-chars-in-cron.yml
+ - 70437-ssh-args.yml
+ - 70465-assemble-fix-decrypt-argument.yaml
+ - 70484-bool-ansible-become.yaml
+ - 70525-setuptools-disutils-reorder.yml
+ - 70600-user-module-dont-create-home-when-create_home-is-false.yml
+ - 70649_time_clock.yml
+ - 70657-template-connection-vars.yaml
+ - 70683-terminal-ansi-re.yaml
+ - 70762-sanitize-uri-keys.yml
+ - 70784-vault-is-string.yml
+ - ansible-boilerplate.yml
+ - ansible-doc-collection-name.yml
+ - ansible-galaxy-stdout.yml
+ - ansible-test-plugin-classification.yml
+ - ansible-test-rhel-requirements.yml
+ - ansible-test-sanity-ansible-doc.yml
+ - ansible-test-sanity-yamllint-lineno.yml
+ - ansible-test-virtualenv-fix.yml
+ - better_winrm_putfile_error.yml
+ - changelog-yaml.yml
+ - collection_meta_use_libyaml.yml
+ - command-module-warnings.yml
+ - delegate_has_hostvars.yml
+ - display-stdout-column-width.yml
+ - fq_action_module_resolution.yml
+ - handle_undefined_in_type_errors_filters.yml
+ - module_utils_finder_refactor.yml
+ - pause-catch-error-when-no-std-exists.yml
+ - plugin-loader-collection-name.yml
+ - runtime-splunk-redirect.yml
+ - test-ansible-runner-pin-psutil.yml
+ - update-conditionals-bare-vars-default.yml
+ - v2.10.0rc1_summary.yaml
+ - validate-modules-ps-doc-blacklist.yaml
+ - win_async_full_path.yml
+ - win_setup-redirection.yaml
+ - with_seq_example.yml
+ release_date: '2020-07-23'
+ 2.10.0rc2:
+ changes:
+ bugfixes:
+ - Stop adding the connection variables to the output results
+ release_summary: '| Release Date: 2020-07-23
+
+ | `Porting Guide <https://docs.ansible.com/ansible/devel/porting_guides.html>`__
+
+ '
+ codename: When the Levee Breaks
+ fragments:
+ - set_fact-connection_vars.yml
+ - v2.10.0rc2_summary.yaml
+ release_date: '2020-07-23'
+ 2.10.0rc3:
+ changes:
+ bugfixes:
+ - reset logging level to INFO due to CVE-2019-14846.
+ release_summary: '| Release Date: 2020-07-24
+
+ | `Porting Guide <https://docs.ansible.com/ansible/devel/porting_guides.html>`__
+
+ '
+ codename: When the Levee Breaks
+ fragments:
+ - keep_log_at_info.yml
+ - v2.10.0rc3_summary.yaml
+ release_date: '2020-07-24'
+ 2.10.0rc4:
+ changes:
+ bugfixes:
+ - 'Fix warning for default permission change when no mode is specified. Follow
+ up to https://github.com/ansible/ansible/issues/67794. (CVE-2020-1736)
+
+ '
+ - Fixes ansible-test traceback when plugin author is not a string or a list
+ of strings (https://github.com/ansible/ansible/pull/70507)
+ - Restore the ability for changed_when/failed_when to function with group_by
+ (#70844).
+ - ansible-galaxy collection download - fix downloading tar.gz files and collections
+ in git repositories (https://github.com/ansible/ansible/issues/70429)
+ - ansible-galaxy collection install - fix fallback mechanism if the AH server
+ did not have the collection requested - https://github.com/ansible/ansible/issues/70940
+ - ansible-test - Add ``pytest < 6.0.0`` constraint for managed installations
+ on Python 3.x to avoid issues with relative imports.
+ - ansible-test - Change detection now properly resolves relative imports instead
+ of treating them as absolute imports.
+ - ansible-test validate-modules - ``version_added`` on module level was not
+ validated for modules in collections (https://github.com/ansible/ansible/pull/70869).
+ - ansible-test validate-modules - return correct error codes ``option-invalid-version-added``
+ resp. ``return-invalid-version-added`` instead of the wrong error ``deprecation-either-date-or-version``
+ when an invalid value of ``version_added`` is specified for an option or a
+ return value (https://github.com/ansible/ansible/pull/70869).
+ - facts - fix incorrect UTC timestamp in ``iso8601_micro`` and ``iso8601``
+ - lineinfile - fix not subscriptable error in exception handling around file
+ creation
+ - reboot - Add support for the runit init system, used on Void Linux, that does
+ not support the normal Linux syntax.
+ minor_changes:
+ - default_callback - moving 'check_mode_markers' documentation in default_callback
+ doc_fragment (https://github.com/ansible-collections/community.general/issues/565).
+ release_summary: '| Release Date: 2020-07-30
+
+ | `Porting Guide <https://docs.ansible.com/ansible/devel/porting_guides.html>`__
+
+ '
+ codename: When the Levee Breaks
+ fragments:
+ - 565_default_callback.yml
+ - 67794-default-permissions-warning-fix.yml
+ - 70507-validate-null-author.yaml
+ - 70524-fix-download-collections.yaml
+ - 70704-void-linux-reboot.yml
+ - 70869-ansible-test-validate-modules-version-added.yml
+ - ansible-test-pytest-cap.yml
+ - ansible-test-relative-import-analysis.yml
+ - changed_when_group_by.yml
+ - date-time-facts-fix-utctime.yml
+ - galaxy-collection-fallback.yml
+ - lineinfile_exc_fix.yml
+ - v2.10.0rc4_summary.yaml
+ release_date: '2020-07-30'
+ 2.10.1:
+ changes:
+ release_summary: '| Release Date: 2020-09-14
+
+ | `Porting Guide <https://docs.ansible.com/ansible/devel/porting_guides.html>`__
+
+ '
+ codename: When the Levee Breaks
+ fragments:
+ - v2.10.1_summary.yaml
+ release_date: '2020-09-14'
+ 2.10.1rc1:
+ changes:
+ bugfixes:
+ - ANSIBLE_COLLECTIONS_PATHS - remove deprecation so that users of Ansible 2.9
+ and 2.10+ can use the same var when specifying a collection path without a
+ warning.
+ - Confirmed commit fails with TypeError in IOS XR netconf plugin (https://github.com/ansible-collections/cisco.iosxr/issues/74)
+ - Ensure password passed in by -k is used on delegated hosts that do not have
+ ansible_password set
+ - Fix an exit code for a non-failing playbook (https://github.com/ansible/ansible/issues/71306)
+ - Fix execution of the meta tasks 'clear_facts', 'clear_host_errors', 'end_play',
+ 'end_host', and 'reset_connection' when the CLI flag '--flush-cache' is provided.
+ - Fix statistics reporting when rescue block contains another block (issue https://github.com/ansible/ansible/issues/61253).
+ - Fixed Ansible reporting validate not supported by netconf server when enabled
+ in netconf - (https://github.com/ansible-collections/ansible.netcommon/issues/119).
+ - Skip literal_eval for string filters results in native jinja. (https://github.com/ansible/ansible/issues/70831)
+ - Strategy - Ensure we only process expected types from the results queue and
+ produce warnings for any object we receive from the queue that doesn't match
+ our expectations. (https://github.com/ansible/ansible/issues/70023)
+ - TOML inventory - Ensure we register dump functions for ``AnsibleUnsafe`` to
+ support dumping unsafe values. Note that the TOML format has no functionality
+ to mark that the data is unsafe for re-consumption. (https://github.com/ansible/ansible/issues/71307)
+ - ansible-galaxy download - fix bug when downloading a collection in a SCM subdirectory
+ - ansible-test units - fixed collection location code to work under pytest >=
+ 6.0.0
+ - avoid clobbering existing facts inside loop when task also returns ansible_facts.
+ - cron - cron file should not be empty after adding var (https://github.com/ansible/ansible/pull/71207)
+ - fortimanager httpapi plugin - fix redirect to point to the ``fortinet.fortimanager``
+ collection (https://github.com/ansible/ansible/pull/71073).
+ - gluster modules - fix redirect to point to the ``gluster.gluster`` collection
+ (https://github.com/ansible/ansible/pull/71240).
+ - linux network facts - get the correct value for broadcast address (https://github.com/ansible/ansible/issues/64384)
+ - native jinja2 types - properly handle Undefined in nested data.
+ - powershell - fix escaping of strings that broken modules like fetch when dealing
+ with special chars - https://github.com/ansible/ansible/issues/62781
+ - powershell - fix the CLIXML parser when it contains nested CLIXML objects
+ - https://github.com/ansible/ansible/issues/69550
+ - psrp - Use native PSRP mechanism when copying files to support custom endpoints
+ - strftime filter - Input epoch is allowed to be a float (https://github.com/ansible/ansible/issues/71257)
+ - systemd - fixed chroot usage on new versions of systemd, that broke because
+ of upstream changes in systemctl output
+ - systemd - made the systemd module work correctly when the SYSTEMD_OFFLINE
+ environment variable is set
+ - templating - fix error message for ``x in y`` when y is undefined (https://github.com/ansible/ansible/issues/70984)
+ - unarchive - check ``fut_gid`` against ``run_gid`` in addition to supplemental
+ groups (https://github.com/ansible/ansible/issues/49284)
+ minor_changes:
+ - Fixed ansible-doc to not substitute for words followed by parenthesis. For
+ instance, ``IBM(International Business Machines)`` will no longer be substituted
+ with a link to a non-existent module. https://github.com/ansible/ansible/pull/71070
+ - Updated network integration auth timeout to 90 secs.
+ - ansible-doc will now format, ``L()``, ``R()``, and ``HORIZONTALLINE`` in plugin
+ docs just as the website docs do. https://github.com/ansible/ansible/pull/71070
+ - ansible-test - Remove ``pytest < 6.0.0`` constraint for managed installations
+ on Python 3.x now that pytest 6 is supported.
+ - ansible-test - the ACME test container was updated, it now supports external
+ account creation and has a basic OCSP responder (https://github.com/ansible/ansible/pull/71097,
+ https://github.com/ansible/acme-test-container/releases/tag/2.0.0).
+ - galaxy - add documentation about galaxy parameters in examples/ansible.cfg
+ (https://github.com/ansible/ansible/issues/68402).
+ - iptables - add a note about ipv6-icmp in protocol parameter (https://github.com/ansible/ansible/issues/70905).
+ - setup.py - Skip doing conflict checks for ``sdist`` and ``egg_info`` commands
+ (https://github.com/ansible/ansible/pull/71310)
+ - subelements - clarify the lookup plugin documentation for parameter handling
+ (https://github.com/ansible/ansible/issues/38182).
+ release_summary: '| Release Date: 2020-08-31
+
+ | `Porting Guide <https://docs.ansible.com/ansible/devel/porting_guides.html>`__
+
+ '
+ security_fixes:
+ - '**security issue** - copy - Redact the value of the no_log ''content'' parameter
+ in the result''s invocation.module_args in check mode. Previously when used
+ with check mode and with ''-vvv'', the module would not censor the content
+ if a change would be made to the destination path. (CVE-2020-14332)
+
+ '
+ - dnf - Previously, regardless of the ``disable_gpg_check`` option, packages
+ were not GPG validated. They are now. (CVE-2020-14365)
+ codename: When the Levee Breaks
+ fragments:
+ - 38182_subelements.yml
+ - 68402_galaxy.yml
+ - 70023-results-type-filtering.yml
+ - 70831-skip-literal_eval-string-filter-native-jinja.yml
+ - 70905_iptables_ipv6.yml
+ - 70922-fix-block-in-rescue.yml
+ - 70984-templating-ansibleundefined-in-operator.yml
+ - 71073-fortimanager-httpapi-redirect.yml
+ - 71097-ansible-test-acme-container.yml
+ - 71195-netconf_config_validate_issue.yaml
+ - 71197-systemctl-ignore-message.yaml
+ - 71238-update-auth-timeout.yml
+ - 71240-gluster-modules-redirect.yml
+ - 71257-strftime-float.yml
+ - 71279-skip-conflict-check.yml
+ - 71306-fix-exit-code-no-failure.yml
+ - 71307-toml-dumps-unsafe.yml
+ - ansible-doc-formats.yml
+ - ansible-test-pytest-cap-revert.yml
+ - copy-sanitize-check-mode-invocation-args.yaml
+ - delegation_password.yml
+ - dnf_gpg.yml
+ - fix-cron-file-regression.yaml
+ - fix_meta_tasks_with_flush_cache.yml
+ - galaxy-download-scm.yaml
+ - galaxy_collections_paths-remove-dep.yml
+ - iosxr_netconf_config_commit_fix.yaml
+ - linux-network-facts-broadcast-address.yaml
+ - native-jinja2-types-properly-handle-nested-undefined.yml
+ - no_fact_loop_loss.yml
+ - powershell-fix-quoting.yaml
+ - powershell-nested-clixml.yml
+ - psrp-copy.yaml
+ - pytest-collections-fix.yml
+ - unarchive-check-future-gid-against-run-gid.yml
+ - v2.10.1rc1_summary.yaml
+ release_date: '2020-08-31'
+ 2.10.1rc2:
+ changes:
+ release_summary: '| Release Date: 2020-09-01
+
+ | `Porting Guide <https://docs.ansible.com/ansible/devel/porting_guides.html>`__
+
+ '
+ security_fixes:
+ - The fix for CVE-2020-1736 has been reverted. Users are encouraged to specify
+ a ``mode`` parameter in their file-based tasks when the files being manipulated
+ contain sensitive data.
+ codename: When the Levee Breaks
+ fragments:
+ - cve-2020-1736-revert.yml
+ - v2.10.1rc2_summary.yaml
+ release_date: '2020-09-01'
+ 2.10.1rc3:
+ changes:
+ minor_changes:
+ - ansible-test - Add ``macos/10.15`` as a supported value for the ``--remote``
+ option.
+ - ansible-test - Allow custom ``--remote-stage`` options for development and
+ testing.
+ - ansible-test - Fix ``ansible-test coverage`` reporting sub-commands (``report``,
+ ``html``, ``xml``) on Python 2.6.
+ - ansible-test - Remove the discontinued ``us-east-2`` choice from the ``--remote-aws-region``
+ option.
+ - ansible-test - Request remote resources by provider name for all provider
+ types.
+ - ansible-test - Show a warning when the obsolete ``--remote-aws-region`` option
+ is used.
+ - ansible-test - Support custom remote endpoints with the ``--remote-endpoint``
+ option.
+ - ansible-test - Update built-in service endpoints for the ``--remote`` option.
+ - ansible-test - Use new endpoint for Parallels based instances with the ``--remote``
+ option.
+ - ansible-test - default container now uses default-test-container 2.7.0 and
+ ansible-base-test-container 1.6.0. This brings in Python 3.9.0rc1 for testing.
+ release_summary: '| Release Date: 2020-09-07
+
+ | `Porting Guide <https://docs.ansible.com/ansible/devel/porting_guides.html>`__
+
+ '
+ codename: When the Levee Breaks
+ fragments:
+ - ansible-test-coverage-py26.yml
+ - ansible-test-endpoint-update.yml
+ - ansible-test-macos-10.15.yml
+ - ansible-test-parallels-endpoint.yml
+ - default-test-container160.yml
+ - v2.10.1rc3_summary.yaml
+ release_date: '2020-09-07'
+ 2.10.2:
+ changes:
+ release_summary: '| Release Date: 2020-10-05
+
+ | `Porting Guide <https://docs.ansible.com/ansible/devel/porting_guides.html>`__
+
+ '
+ codename: When the Levee Breaks
+ fragments:
+ - v2.10.2_summary.yaml
+ release_date: '2020-10-05'
+ 2.10.2rc1:
+ changes:
+ bugfixes:
+ - Pass the connection's timeout to connection plugins instead of the task's
+ timeout.
+ - Provide more information in AnsibleUndefinedVariable (https://github.com/ansible/ansible/issues/55152)
+ - ansible-doc - properly show plugin name when ``name:`` is used instead of
+ ``<plugin_type>:`` (https://github.com/ansible/ansible/pull/71966).
+ - ansible-test - Change classification using ``--changed`` now consistently
+ handles common configuration files for supported CI providers.
+ - ansible-test - The ``resource_prefix`` variable provided to tests running
+ on Azure Pipelines is now converted to lowercase to match other CI providers.
+ - collection loader - fix bogus code coverage entries for synthetic packages
+ - psrp - Fix hang when copying an empty file to the remote target
+ - runas - create a new token when running as ``SYSTEM`` to ensure it has the
+ full privileges assigned to that account
+ minor_changes:
+ - ansible-test - Raise the number of bytes scanned by ansible-test to determine
+ if a file is binary to 4096.
+ release_summary: '| Release Date: 2020-09-28
+
+ | `Porting Guide <https://docs.ansible.com/ansible/devel/porting_guides.html>`__
+
+ '
+ codename: When the Levee Breaks
+ fragments:
+ - 55152-add-more-info-to-AnsibleUndefinedVariable.yml
+ - 71722-fix-default-connection-timeout.yaml
+ - 71921-raise-bytes-for-binary-test.yml
+ - 71966-ansible-doc-plugin-name.yml
+ - ansible-test-azp-resource-prefix.yml
+ - ansible-test-change-classification.yml
+ - fix_bogus_coverage.yml
+ - psrp-copy-empty-file.yml
+ - runas-become-system-privileges.yml
+ - v2.10.2rc1_summary.yaml
+ release_date: '2020-09-28'
+ 2.10.3:
+ changes:
+ release_summary: '| Release Date: 2020-11-02
+
+ | `Porting Guide <https://docs.ansible.com/ansible/devel/porting_guides.html>`__
+
+ '
+ codename: When the Levee Breaks
+ fragments:
+ - v2.10.3_summary.yaml
+ release_date: '2020-11-02'
+ 2.10.3rc1:
+ changes:
+ breaking_changes:
+ - ansible-galaxy login command has been removed (see https://github.com/ansible/ansible/issues/71560)
+ bugfixes:
+ - Collection callbacks were ignoring options and rules for stdout and adhoc
+ cases.
+ - Collections - Ensure ``action_loader.get`` is called with ``collection_list``
+ to properly find collections when ``collections:`` search is specified (https://github.com/ansible/ansible/issues/72170)
+ - Fix ``RecursionError`` when templating large vars structures (https://github.com/ansible/ansible/issues/71920)
+ - ansible-doc - plugin option deprecations now also get ``collection_name``
+ added (https://github.com/ansible/ansible/pull/71735).
+ - ansible-test - Always connect additional Docker containers to the network
+ used by the current container (if any).
+ - ansible-test - Always map ``/var/run/docker.sock`` into test containers created
+ by the ``--docker`` option if the docker host is not ``localhost``.
+ - ansible-test - Attempt to detect the Docker hostname instead of assuming ``localhost``.
+ - ansible-test - Correctly detect running in a Docker container on Azure Pipelines.
+ - ansible-test - Prefer container IP at ``.NetworkSettings.Networks.{NetworkName}.IPAddress``
+ over ``.NetworkSettings.IPAddress``.
+ - ansible-test - The ``cs`` and ``openshift`` test plugins now search for containers
+ on the current network instead of assuming the ``bridge`` network.
+ - ansible-test - Using the ``--remote`` option on Azure Pipelines now works
+ from a job running in a container.
+ - async_wrapper - Fix race condition when ``~/.ansible_async`` folder tries
+ to be created by multiple async tasks at the same time - https://github.com/ansible/ansible/issues/59306
+ - 'dnf - it is now possible to specify both ``security: true`` and ``bugfix:
+ true`` to install updates of both types. Previously, only security would get
+ installed if both were true. (https://github.com/ansible/ansible/issues/70854)'
+ - facts - fix distribution fact for SLES4SAP (https://github.com/ansible/ansible/pull/71559).
+ - is_string/vault - Ensure the is_string helper properly identifies AnsibleVaultEncryptedUnicode
+ as a string (https://github.com/ansible/ansible/pull/71609)
+ - powershell - remove getting the PowerShell version from the env var ``POWERSHELL_VERSION``.
+ This feature never worked properly and can cause conflicts with other libraries
+ that use this var
+ - url lookup - make sure that options supplied in ansible.cfg are actually used
+ (https://github.com/ansible/ansible/pull/71736).
+ - 'user - AnsibleModule.run_command returns a tuple of return code, stdout and
+ stderr. The module main function of the user module expects user.create_user
+ to return a tuple of return code, stdout and stderr. Fix the locations where
+ stdout and stderr got reversed.
+
+ '
+ - 'user - Local users with an expiry date cannot be created as the ``luseradd``
+ / ``lusermod`` commands do not support the ``-e`` option. Set the expiry time
+ in this case via ``lchage`` after the user was created / modified. (https://github.com/ansible/ansible/issues/71942)
+
+ '
+ minor_changes:
+ - ansible-test - Add a ``--docker-network`` option to choose the network for
+ running containers when using the ``--docker`` option.
+ - ansible-test - Collections can now specify pip constraints for unit and integration
+ test requirements using ``tests/unit/constraints.txt`` and ``tests/integration/constraints.txt``
+ respectively.
+ - ansible-test - python-cryptography is now bounded at <3.2, as 3.2 drops support
+ for OpenSSL 1.0.2 upon which some of our CI infrastructure still depends.
+ - dnf - now shows specific package changes (installations/removals) under ``results``
+ in check_mode. (https://github.com/ansible/ansible/issues/66132)
+ release_summary: '| Release Date: 2020-10-26
+
+ | `Porting Guide <https://docs.ansible.com/ansible/devel/porting_guides.html>`__
+
+ '
+ codename: When the Levee Breaks
+ fragments:
+ - 66132_dnf_show_pkgs_in_check_mode.yml
+ - 70854-dnf-mutually-exclusive-filters.yml
+ - 71559-fix-distribution-fact-sles4sap.yaml
+ - 71609-is_string-vault.yml
+ - 71735-deprecation-tagging.yml
+ - 71920-fix-templating-recursion-error.yml
+ - 72170-action-loader-collection-list.yml
+ - ansible-test-azp-agent-temp-dir.yml
+ - ansible-test-collection-constraints.yml
+ - ansible-test-constraints-cryptography-old-openssl.yml
+ - ansible-test-container-ip-lookup.yml
+ - ansible-test-docker-default-network.yml
+ - ansible-test-docker-detection-fix.yml
+ - ansible-test-docker-not-localhost.yml
+ - ansible-test-docker-socket.yml
+ - ansible-test-network-container-search.yml
+ - async-race-condition.yml
+ - collections_cb_fix.yml
+ - fix_ansible_issue_71942.yaml
+ - fix_reversed_return_value_order_72088.yaml
+ - galaxy_login_bye.yml
+ - powershell-version-env.yml
+ - url-lookup-ini.yml
+ - v2.10.3rc1_summary.yaml
+ release_date: '2020-10-26'
+ 2.10.4:
+ changes:
+ bugfixes:
+ - ansible-test - ``cryptography`` is now limited to versions prior to 3.2 only
+ when an incompatible OpenSSL version (earlier than 1.1.0) is detected
+ release_summary: '| Release Date: 2020-12-14
+
+ | `Porting Guide <https://docs.ansible.com/ansible/devel/porting_guides.html>`__
+
+ '
+ codename: When the Levee Breaks
+ fragments:
+ - ansible-test-constraints-cryptography.yml
+ - v2.10.4_summary.yaml
+ release_date: '2020-12-14'
+ 2.10.4rc1:
+ changes:
+ bugfixes:
+ - Adjust various hard-coded action names to also include their ``ansible.builtin.``
+ and ``ansible.legacy.`` prefixed version (https://github.com/ansible/ansible/issues/71817,
+ https://github.com/ansible/ansible/issues/71818, https://github.com/ansible/ansible/pull/71824).
+ - AnsibleModule - added arg ``ignore_invalid_cwd`` to ``AnsibleModule.run_command()``,
+ to control its behaviour when ``cwd`` is invalid. (https://github.com/ansible/ansible/pull/72390)
+ - Fixed issue when `netstat` is either missing or doesn't have execution permissions
+ leading to incorrect command being executed.
+ - Improve Ansible config deprecations to show the source of the deprecation
+ (ansible-base). Also remove space before a comma in config deprecations (https://github.com/ansible/ansible/pull/72697).
+ - Skip invalid collection names when listing in ansible-doc instead of throwing
+ exception. Issue#72257
+ - The ``docker`` and ``k8s`` action groups / module default groups now also
+ support the moved modules in `community.docker <https://galaxy.ansible.com/community/docker>`_,
+ `community.kubevirt <https://github.com/ansible-collections/community.kubevirt>`_,
+ `community.okd <https://galaxy.ansible.com/community/okd>`_, and `kubernetes.core
+ <https://galaxy.ansible.com/kubernetes/core>`_ (https://github.com/ansible/ansible/pull/72428).
+ - account for bug in Python 2.6 that occurs during interpreter shutdown to avoid
+ stack trace
+ - ansible-test - Correctly detect changes in a GitHub pull request when running
+ on Azure Pipelines.
+ - ansible-test - Skip installing requirements if they are already installed.
+ - 'ansible-test - add constraint for ``cffi`` to prevent failure on systems
+ with older versions of ``gcc`` (https://foss.heptapod.net/pypy/cffi/-/issues/480)
+
+ '
+ - ansible-test - convert target paths to unicode on Python 2 to avoid ``UnicodeDecodeError``
+ (https://github.com/ansible/ansible/issues/68398, https://github.com/ansible/ansible/pull/72623).
+ - ansible-test - improve classification of changes to ``.gitignore``, ``COPYING``,
+ ``LICENSE``, ``Makefile``, and all files ending with one of ``.in`, ``.md`,
+ ``.rst``, ``.toml``, ``.txt`` in the collection root directory (https://github.com/ansible/ansible/pull/72353).
+ - ansible-test validate-modules - when a module uses ``add_file_common_args=True``
+ and does not use a keyword argument for ``argument_spec`` in ``AnsibleModule()``,
+ the common file arguments were not considered added during validation (https://github.com/ansible/ansible/pull/72334).
+ - basic.AnsibleModule - AnsibleModule.run_command silently ignores a non-existent
+ directory in the ``cwd`` argument (https://github.com/ansible/ansible/pull/72390).
+ - 'blockinfile - properly insert a block at the end of a file that does not
+ have a trailing newline character (https://github.com/ansible/ansible/issues/72055)
+
+ '
+ - dnf - fix filtering to avoid dependncy conflicts (https://github.com/ansible/ansible/issues/72316)
+ - ensure 'local' connection always has the correct default user for actions
+ to consume.
+ - 'pause - Fix indefinite hang when using a pause task on a background process
+ (https://github.com/ansible/ansible/issues/32142)
+
+ '
+ - remove redundant remote_user setting in play_context for local as plugin already
+ does it, also removes fork/thread issue from use of pwd library.
+ - 'set_mode_if_different - handle symlink if it is inside a directory with sticky
+ bit set (https://github.com/ansible/ansible/pull/45198)
+
+ '
+ - 'systemd - account for templated unit files using ``@`` when searching for
+ the unit file (https://github.com/ansible/ansible/pull/72347#issuecomment-730626228)
+
+ '
+ - 'systemd - follow up fix to https://github.com/ansible/ansible/issues/72338
+ to use ``list-unit-files`` rather than ``list-units`` in order to show all
+ units files on the system.
+
+ '
+ - 'systemd - work around bug with ``systemd`` 245 and 5.8 kernel that does not
+ correctly report service state (https://github.com/ansible/ansible/issues/71528)
+
+ '
+ - wait_for - catch and ignore errors when getting active connections with psutil
+ (https://github.com/ansible/ansible/issues/72322)
+ minor_changes:
+ - ansible-doc - provide ``has_action`` field in JSON output for modules. That
+ information is currently only available in the text view (https://github.com/ansible/ansible/pull/72359).
+ - 'ansible-galaxy - find any collection dependencies in the globally configured
+ Galaxy servers and not just the server the parent collection is from.
+
+ '
+ - ansible-test - Added a ``--export`` option to the ``ansible-test coverage
+ combine`` command to facilitate multi-stage aggregation of coverage in CI
+ pipelines.
+ - ansible-test - Added the ``-remote rhel/7.9`` option to run tests on RHEL
+ 7.9
+ - ansible-test - CentOS 8 container is now 8.2.2004 (https://github.com/ansible/distro-test-containers/pull/45).
+ - ansible-test - Fix container hostname/IP discovery for the ``acme`` test plugin.
+ - ansible-test - OpenSuse container now uses Leap 15.2 (https://github.com/ansible/distro-test-containers/pull/48).
+ - ansible-test - Ubuntu containers as well as ``default-test-container`` and
+ ``ansible-base-test-container`` are now slightly smaller due to apt cleanup
+ (https://github.com/ansible/distro-test-containers/pull/46).
+ - ansible-test - ``default-test-container`` and ``ansible-base-test-container``
+ now use Python 3.9.0 instead of 3.9.0rc1.
+ - ansible-test - centos6 end of life - container image updated to point to vault
+ base repository (https://github.com/ansible/distro-test-containers/pull/54)
+ - ansible-test validate-modules - no longer assume that ``default`` for ``type=bool``
+ options is ``false``, as the default is ``none`` and for some modules, ``none``
+ and ``false`` mean different things (https://github.com/ansible/ansible/issues/69561).
+ - iptables - reorder comment postition to be at the end (https://github.com/ansible/ansible/issues/71444).
+ release_summary: '| Release Date: 2020-12-07
+
+ | `Porting Guide <https://docs.ansible.com/ansible/devel/porting_guides.html>`__
+
+ '
+ codename: When the Levee Breaks
+ fragments:
+ - 32143-pause-background-hangs.yml
+ - 71496-iptables-reorder-comment-position.yml
+ - 71528-systemd-capbpf-workaround.yml
+ - 71528-systemd-improve-unit-searching.yml
+ - 71528-systemd-list-unit-files.yml
+ - 71824-action-fqcns.yml
+ - 72055-blockinfile-fix-insert-after-line-no-linesep.yml
+ - 72316-dnf-filtering.yml
+ - 72322-wait-for-handle-errors.yml
+ - 72390-return-error-if-cwd-directory-does-not-exist.yml
+ - 72428-action-groups-docker-k8s.yml
+ - 72516-fix-aix-network-facts.yml
+ - 72623-ansible-test-unicode-paths.yml
+ - 72697-improve-config-deprecations.yml
+ - 72699-validate-modules-default-for-bools.yml
+ - ansible-base-update-containers.yml
+ - ansible-doc-has_action.yml
+ - ansible-test-acme-test-plugin.yml
+ - ansible-test-azp-change-detection.yml
+ - ansible-test-centos6-eol.yml
+ - ansible-test-collection-classification.yml
+ - ansible-test-coverage-combine-export.yml
+ - ansible-test-validate-modules-file-common-args.yml
+ - cffi-constraint.yml
+ - ensure_local_user_correctness.yml
+ - galaxy-servers.yml
+ - play_context_remove_redundant_pwd.yml
+ - ps-sanity-requirements.yml
+ - py26-multiprocess-queue-bug.yml
+ - rhel-7.9.yml
+ - run-command-cwd.yml
+ - set_mode_if_different-symlink-sticky-dir.yml
+ - skip_invalid_coll_name_when_listing.yml
+ - v2.10.4rc1_summary.yaml
+ release_date: '2020-12-07'
diff --git a/docs/bin/find-plugin-refs.py b/docs/bin/find-plugin-refs.py
new file mode 100755
index 00000000..dee8ce68
--- /dev/null
+++ b/docs/bin/find-plugin-refs.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+
+# To run this script, first make webdocs in the toplevel of the checkout. This will generate all
+# rst files from their sources. Then run this script ./docs/bin/find-plugin-refs.py
+#
+# No output means that there are no longer any bare module and plugin names referenced via :ref:
+#
+# For my listing of what needs to be changed after running this script, see the comment at the end
+# of the file
+
+
+import glob
+import os
+import re
+
+from ansible.module_utils._text import to_text
+
+
+TOPDIR = os.path.join(os.path.dirname(__file__), '..', 'docsite', 'rst')
+
+
+def plugin_names(topdir):
+ plugins = set()
+
+ # Modules are in a separate directory
+ for module_filename in glob.glob(os.path.join(topdir, 'modules', '*_module.rst')):
+ module_filename = os.path.basename(module_filename)
+ module_name = module_filename[:module_filename.index('_module.rst')]
+ plugins.add(module_name)
+
+ for plugin_filename in glob.glob(os.path.join(topdir, 'plugins', '*', '*.rst')):
+ plugin_filename = os.path.basename(plugin_filename)
+ plugin_name = plugin_filename[:plugin_filename.index('.rst')]
+ plugins.add(plugin_name)
+
+ return plugins
+
+
+def process_refs(topdir, plugin_names):
+ REF_RE = re.compile(':ref:`([^`]*)`')
+ LABEL_RE = re.compile('<([^>]*)>$')
+
+ # Walk the whole docs tree looking for :ref:. Anywhere those are found, search for `([^`]*)`
+ for dirpath, dirnames, filenames in os.walk(topdir):
+ for filename in filenames:
+ with open(os.path.join(dirpath, filename), 'rb') as f:
+ data = f.read()
+ data = to_text(data)
+ for ref_match in re.finditer(REF_RE, data):
+ label = ref_match.group(1)
+
+ # If the ref label includes "<", then search for the label inside of the "<>"
+ label_match = re.search(LABEL_RE, label)
+ if label_match:
+ label = label_match.group(1)
+
+ # If the ref label is listed in plugins, then print that the file contains an unported ref
+ if label in plugin_names:
+ print(':ref:`{0}` matching plugin {1} was found in {2}'.format(ref_match.group(1), label, os.path.join(dirpath, filename)))
+
+
+if __name__ == '__main__':
+
+ plugins = plugin_names(TOPDIR)
+
+ process_refs(TOPDIR, plugins)
+
+ # Fixes needed: docs/bin/plugin_formatter.py
+ # - t = _MODULE.sub(r":ref:`\1 <\1>`", t)
+ # + t = _MODULE.sub(r":ref:`\1 <module_\1>`", t)
+ #
+ # These have @{module}@ in the template and need to have something like module_@{module}@
+ # If any of these list plugins as well as modules, they will need to have a conditional or extra
+ # data passed in to handle that in a generic fashion:
+ #
+ # docs/templates/list_of_CATEGORY_modules.rst.j2
+ # docs/templates/list_of_CATEGORY_plugins.rst.j2
+ # docs/templates/modules_by_support.rst.j2
+ #
+ # These are just a simple manual fix:
+ # :ref:`command` matching plugin command was found in ./../docsite/rst/user_guide/intro_adhoc.rst
+ # :ref:`shell` matching plugin shell was found in ./../docsite/rst/user_guide/intro_adhoc.rst
+ # :ref:`config` matching plugin config was found in ./../docsite/rst/installation_guide/intro_configuration.rst
diff --git a/docs/bin/testing_formatter.sh b/docs/bin/testing_formatter.sh
new file mode 100755
index 00000000..5e3781b4
--- /dev/null
+++ b/docs/bin/testing_formatter.sh
@@ -0,0 +1,40 @@
+#!/bin/sh
+
+FILENAME=../docsite/rst/dev_guide/testing/sanity/index.rst
+
+cat <<- EOF >$FILENAME.new
+.. _all_sanity_tests:
+
+Sanity Tests
+============
+
+The following sanity tests are available as \`\`--test\`\` options for \`\`ansible-test sanity\`\`.
+This list is also available using \`\`ansible-test sanity --list-tests --allow-disabled\`\`.
+
+For information on how to run these tests, see :ref:\`sanity testing guide <testing_sanity>\`.
+
+.. toctree::
+ :maxdepth: 1
+
+$(for test in $(../../bin/ansible-test sanity --list-tests --allow-disabled); do echo " ${test}"; done)
+
+EOF
+
+# By default use sha1sum which exists on Linux, if not present select the correct binary
+# based on platform defaults
+SHA_CMD="sha1sum"
+if ! which ${SHA_CMD} > /dev/null 2>&1; then
+ if which sha1 > /dev/null 2>&1; then
+ SHA_CMD="sha1"
+ elif which shasum > /dev/null 2>&1; then
+ SHA_CMD="shasum"
+ else
+ # exit early with an error if no hashing binary can be found since it is required later
+ exit 1
+ fi
+fi
+
+# Put file into place if it has changed
+if [ "$(${SHA_CMD} <$FILENAME)" != "$(${SHA_CMD} <$FILENAME.new)" ]; then
+ mv -f $FILENAME.new $FILENAME
+fi
diff --git a/docs/docsite/.gitignore b/docs/docsite/.gitignore
new file mode 100644
index 00000000..8fade815
--- /dev/null
+++ b/docs/docsite/.gitignore
@@ -0,0 +1,19 @@
+# Old compiled python stuff
+*.py[co]
+# package building stuff
+build
+# Emacs backup files...
+*~
+.\#*
+.doctrees
+# Generated docs stuff
+ansible*.xml
+.buildinfo
+objects.inv
+.doctrees
+rst/dev_guide/testing/sanity/index.rst
+rst/modules/*.rst
+rst/playbooks_keywords.rst
+rst/collections/
+
+*.min.css
diff --git a/docs/docsite/.nojekyll b/docs/docsite/.nojekyll
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/docs/docsite/.nojekyll
diff --git a/docs/docsite/Makefile b/docs/docsite/Makefile
new file mode 100644
index 00000000..c3c2d4c7
--- /dev/null
+++ b/docs/docsite/Makefile
@@ -0,0 +1,139 @@
+OS := $(shell uname -s)
+SITELIB = $(shell python -c "from distutils.sysconfig import get_python_lib; print get_python_lib()"):
+PLUGIN_FORMATTER=../../hacking/build-ansible.py docs-build
+TESTING_FORMATTER=../bin/testing_formatter.sh
+KEYWORD_DUMPER=../../hacking/build-ansible.py document-keywords
+CONFIG_DUMPER=../../hacking/build-ansible.py document-config
+GENERATE_CLI=../../hacking/build-ansible.py generate-man
+COLLECTION_DUMPER=../../hacking/build-ansible.py collection-meta
+ifeq ($(shell echo $(OS) | egrep -ic 'Darwin|FreeBSD|OpenBSD|DragonFly'),1)
+CPUS ?= $(shell sysctl hw.ncpu|awk '{print $$2}')
+else
+CPUS ?= $(shell nproc)
+endif
+
+# Sets the build output directory for the main docsite if it's not already specified
+ifndef BUILDDIR
+ BUILDDIR = _build
+endif
+
+# Backwards compat for separate VARS
+PLUGIN_ARGS=
+ifdef MODULES
+ifndef PLUGINS
+ PLUGIN_ARGS = -l $(MODULES)
+else
+ PLUGIN_ARGS = -l $(MODULES),$(PLUGINS)
+endif
+else
+ifdef PLUGINS
+ PLUGIN_ARGS = -l $(PLUGINS)
+endif
+endif
+
+
+DOC_PLUGINS ?= become cache callback cliconf connection httpapi inventory lookup netconf shell strategy vars
+
+PYTHON=python
+# fetch version from project release.py as single source-of-truth
+VERSION := $(shell $(PYTHON) ../../packaging/release/versionhelper/version_helper.py --raw || echo error)
+ifeq ($(findstring error,$(VERSION)), error)
+$(error "version_helper failed")
+endif
+
+assertrst:
+ifndef rst
+ $(error specify document or pattern with rst=somefile.rst)
+endif
+
+all: docs
+
+docs: htmldocs
+
+generate_rst: collections_meta config cli keywords plugins testing
+base_generate_rst: collections_meta config cli keywords base_plugins testing
+
+htmldocs: generate_rst
+ CPUS=$(CPUS) $(MAKE) -f Makefile.sphinx html
+
+base_htmldocs: base_generate_rst
+ CPUS=$(CPUS) $(MAKE) -f Makefile.sphinx html
+
+singlehtmldocs: generate_rst
+ CPUS=$(CPUS) $(MAKE) -f Makefile.sphinx singlehtml
+
+base_singlehtmldocs: base_generate_rst
+ CPUS=$(CPUS) $(MAKE) -f Makefile.sphinx singlehtml
+
+linkcheckdocs: generate_rst
+ CPUS=$(CPUS) $(MAKE) -f Makefile.sphinx linkcheck
+
+webdocs: docs
+
+#TODO: leaving htmlout removal for those having older versions, should eventually be removed also
+clean:
+ @echo "Cleaning $(BUILDDIR)"
+ -rm -rf $(BUILDDIR)/doctrees
+ -rm -rf $(BUILDDIR)/html
+ -rm -rf htmlout
+ -rm -rf module_docs
+ -rm -rf $(BUILDDIR)
+ -rm -f .buildinfo
+ -rm -f objects.inv
+ -rm -rf *.doctrees
+ @echo "Cleaning up minified css files"
+ find . -type f -name "*.min.css" -delete
+ @echo "Cleaning up byte compiled python stuff"
+ find . -regex ".*\.py[co]$$" -delete
+ @echo "Cleaning up editor backup files"
+ find . -type f \( -name "*~" -or -name "#*" \) -delete
+ find . -type f \( -name "*.swp" \) -delete
+ @echo "Cleaning up generated rst"
+ rm -f rst/playbooks_directives.rst
+ rm -f rst/reference_appendices/config.rst
+ rm -f rst/reference_appendices/playbooks_keywords.rst
+ rm -f rst/dev_guide/collections_galaxy_meta.rst
+ rm -f rst/cli/*.rst
+ rm -rf rst/collections/*
+ @echo "Cleaning up legacy generated rst locations"
+ rm -rf rst/modules
+ rm -f rst/plugins/*/*.rst
+
+.PHONY: docs clean
+
+collections_meta: ../templates/collections_galaxy_meta.rst.j2
+ $(COLLECTION_DUMPER) --template-file=../templates/collections_galaxy_meta.rst.j2 --output-dir=rst/dev_guide/ ../../lib/ansible/galaxy/data/collections_galaxy_meta.yml
+
+# TODO: make generate_man output dir cli option
+cli:
+ mkdir -p rst/cli
+ $(GENERATE_CLI) --template-file=../templates/cli_rst.j2 --output-dir=rst/cli/ --output-format rst ../../lib/ansible/cli/*.py
+
+keywords: ../templates/playbooks_keywords.rst.j2
+ $(KEYWORD_DUMPER) --template-dir=../templates --output-dir=rst/reference_appendices/ ./keyword_desc.yml
+
+config: ../templates/config.rst.j2
+ $(CONFIG_DUMPER) --template-file=../templates/config.rst.j2 --output-dir=rst/reference_appendices/ ../../lib/ansible/config/base.yml
+
+# For now, if we're building on devel, just build base docs. In the future we'll want to build docs that
+# are the latest versions on galaxy (using a different antsibull-docs subcommand)
+plugins:
+ if expr "$(VERSION)" : '.*[.]dev[0-9]\{1,\}$$' &> /dev/null; then \
+ $(PLUGIN_FORMATTER) base -o rst $(PLUGIN_ARGS);\
+ else \
+ $(PLUGIN_FORMATTER) full -o rst $(PLUGIN_ARGS);\
+ fi
+
+# This only builds the plugin docs included with ansible-base
+base_plugins:
+ $(PLUGIN_FORMATTER) base -o rst $(PLUGIN_ARGS);\
+
+testing:
+ $(TESTING_FORMATTER)
+
+epub:
+ (CPUS=$(CPUS) $(MAKE) -f Makefile.sphinx epub)
+
+htmlsingle: assertrst
+ sphinx-build -j $(CPUS) -b html -d $(BUILDDIR)/doctrees ./rst $(BUILDDIR)/html rst/$(rst)
+ @echo "Output is in $(BUILDDIR)/html/$(rst:.rst=.html)"
diff --git a/docs/docsite/Makefile.sphinx b/docs/docsite/Makefile.sphinx
new file mode 100644
index 00000000..d8435064
--- /dev/null
+++ b/docs/docsite/Makefile.sphinx
@@ -0,0 +1,24 @@
+# Minimal makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS = -j $(CPUS) -n -w rst_warnings
+SPHINXBUILD = sphinx-build
+SPHINXPROJ = sdfsdf
+SOURCEDIR = rst
+
+# Sets the build output directory if it's not specified on the command line
+ifndef BUILDDIR
+ BUILDDIR = _build
+endif
+
+# Put it first so that "make" without argument is like "make help".
+help:
+ $(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+
+.PHONY: help Makefile.sphinx
+
+# Catch-all target: route all unknown targets to Sphinx using the new
+# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
+%: Makefile.sphinx
+ $(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
diff --git a/docs/docsite/README.md b/docs/docsite/README.md
new file mode 100644
index 00000000..d643792e
--- /dev/null
+++ b/docs/docsite/README.md
@@ -0,0 +1,26 @@
+Ansible documentation
+=====================
+
+This project hosts the source behind the general pages of [docs.ansible.com](https://docs.ansible.com/). Module-specific documentation is hosted in the various collections repositories. See [Ansible Galaxy](https://galaxy.ansible.com/), the list of [Ansible-maintained collections](https://docs.ansible.com/ansible/devel/community/contributing_maintained_collections.html), and the [ansible-collections organization](https://github.com/ansible-collections) for collections sources.
+
+To create clear, concise, and consistent contributions to Ansible documentation, please refer to the following information.
+
+Contributions
+=============
+Contributions to the documentation are welcome.
+
+The Ansible community produces guidance on contributions, building documentation, and submitting pull requests, which you can find in [Contributing to the Ansible Documentation](https://docs.ansible.com/ansible/latest/community/documentation_contributions.html).
+
+You can also join the [Docs Working Group](https://github.com/ansible/community/wiki/Docs) and/or the ``#ansible-docs`` channel on freenode IRC.
+
+Ansible style guide
+===================
+Ansible documentation is written in ReStructuredText(RST). The [Ansible style guide](https://docs.ansible.com/ansible/latest/dev_guide/style_guide/index.html#linguistic-guidelines) provides linguistic direction and technical guidelines for working with reStructuredText, in addition to other resources.
+
+Tools
+=====
+The Ansible community uses a range of tools and programs for working with Ansible documentation. Learn more about [Other Tools and Programs](https://docs.ansible.com/ansible/latest/community/other_tools_and_programs.html#popular-editors) in the Ansible Community Guide.
+
+GitHub
+======
+[Ansible documentation](https://github.com/ansible/ansible/tree/devel/docs/docsite) is hosted on the Ansible GitHub project and various collection repositories, especially those in the [ansible-collections organization](https://github.com/ansible-collections). For general GitHub workflows and other information, see the [GitHub Guides](https://guides.github.com/).
diff --git a/docs/docsite/_extensions/pygments_lexer.py b/docs/docsite/_extensions/pygments_lexer.py
new file mode 100644
index 00000000..62c7fdfd
--- /dev/null
+++ b/docs/docsite/_extensions/pygments_lexer.py
@@ -0,0 +1,187 @@
+# -*- coding: utf-8 -*-
+# pylint: disable=no-self-argument
+#
+# Copyright 2006-2017 by the Pygments team, see AUTHORS at
+# https://bitbucket.org/birkenfeld/pygments-main/raw/7941677dc77d4f2bf0bbd6140ade85a9454b8b80/AUTHORS
+# Copyright by Norman Richards (original author of JSON lexer).
+#
+# Licensed under BSD license:
+#
+# Copyright (c) 2006-2017 by the respective authors (see AUTHORS file).
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import, print_function
+
+from pygments.lexer import LexerContext, ExtendedRegexLexer, DelegatingLexer, RegexLexer, bygroups, include
+from pygments.lexers import DiffLexer
+from pygments import token
+
+import re
+
+
+class AnsibleOutputPrimaryLexer(RegexLexer):
+ name = 'Ansible-output-primary'
+
+ # The following definitions are borrowed from Pygment's JSON lexer.
+ # It has been originally authored by Norman Richards.
+
+ # integer part of a number
+ int_part = r'-?(0|[1-9]\d*)'
+
+ # fractional part of a number
+ frac_part = r'\.\d+'
+
+ # exponential part of a number
+ exp_part = r'[eE](\+|-)?\d+'
+
+ tokens = {
+ # #########################################
+ # # BEGIN: states from JSON lexer #########
+ # #########################################
+ 'whitespace': [
+ (r'\s+', token.Text),
+ ],
+
+ # represents a simple terminal value
+ 'simplevalue': [
+ (r'(true|false|null)\b', token.Keyword.Constant),
+ (('%(int_part)s(%(frac_part)s%(exp_part)s|'
+ '%(exp_part)s|%(frac_part)s)') % vars(),
+ token.Number.Float),
+ (int_part, token.Number.Integer),
+ (r'"(\\\\|\\"|[^"])*"', token.String),
+ ],
+
+
+ # the right hand side of an object, after the attribute name
+ 'objectattribute': [
+ include('value'),
+ (r':', token.Punctuation),
+ # comma terminates the attribute but expects more
+ (r',', token.Punctuation, '#pop'),
+ # a closing bracket terminates the entire object, so pop twice
+ (r'\}', token.Punctuation, '#pop:2'),
+ ],
+
+ # a json object - { attr, attr, ... }
+ 'objectvalue': [
+ include('whitespace'),
+ (r'"(\\\\|\\"|[^"])*"', token.Name.Tag, 'objectattribute'),
+ (r'\}', token.Punctuation, '#pop'),
+ ],
+
+ # json array - [ value, value, ... }
+ 'arrayvalue': [
+ include('whitespace'),
+ include('value'),
+ (r',', token.Punctuation),
+ (r'\]', token.Punctuation, '#pop'),
+ ],
+
+ # a json value - either a simple value or a complex value (object or array)
+ 'value': [
+ include('whitespace'),
+ include('simplevalue'),
+ (r'\{', token.Punctuation, 'objectvalue'),
+ (r'\[', token.Punctuation, 'arrayvalue'),
+ ],
+ # #########################################
+ # # END: states from JSON lexer ###########
+ # #########################################
+
+ 'host-postfix': [
+ (r'\n', token.Text, '#pop:3'),
+ (r'( )(=>)( )(\{)',
+ bygroups(token.Text, token.Punctuation, token.Text, token.Punctuation),
+ 'objectvalue'),
+ ],
+
+ 'host-error': [
+ (r'(?:(:)( )(UNREACHABLE|FAILED)(!))?',
+ bygroups(token.Punctuation, token.Text, token.Keyword, token.Punctuation),
+ 'host-postfix'),
+ (r'', token.Text, 'host-postfix'),
+ ],
+
+ 'host-name': [
+ (r'(\[)([^ \]]+)(?:( )(=>)( )([^\]]+))?(\])',
+ bygroups(token.Punctuation, token.Name.Variable, token.Text, token.Punctuation, token.Text, token.Name.Variable, token.Punctuation),
+ 'host-error')
+ ],
+
+ 'host-result': [
+ (r'\n', token.Text, '#pop'),
+ (r'( +)(ok|changed|failed|skipped|unreachable)(=)([0-9]+)',
+ bygroups(token.Text, token.Keyword, token.Punctuation, token.Number.Integer)),
+ ],
+
+ 'root': [
+ (r'(PLAY|TASK|PLAY RECAP)(?:( )(\[)([^\]]+)(\]))?( )(\*+)(\n)',
+ bygroups(token.Keyword, token.Text, token.Punctuation, token.Literal, token.Punctuation, token.Text, token.Name.Variable, token.Text)),
+ (r'(fatal|ok|changed|skipping)(:)( )',
+ bygroups(token.Keyword, token.Punctuation, token.Text),
+ 'host-name'),
+ (r'(\[)(WARNING)(\]:)([^\n]+)',
+ bygroups(token.Punctuation, token.Keyword, token.Punctuation, token.Text)),
+ (r'([^ ]+)( +)(:)',
+ bygroups(token.Name, token.Text, token.Punctuation),
+ 'host-result'),
+ (r'(\tto retry, use: )(.*)(\n)', bygroups(token.Text, token.Literal.String, token.Text)),
+ (r'.*\n', token.Other),
+ ],
+ }
+
+
+class AnsibleOutputLexer(DelegatingLexer):
+ name = 'Ansible-output'
+ aliases = ['ansible-output']
+
+ def __init__(self, **options):
+ super(AnsibleOutputLexer, self).__init__(DiffLexer, AnsibleOutputPrimaryLexer, **options)
+
+
+# ####################################################################################################
+# # Sphinx plugin ####################################################################################
+# ####################################################################################################
+
+__version__ = "0.1.0"
+__license__ = "BSD license"
+__author__ = "Felix Fontein"
+__author_email__ = "felix@fontein.de"
+
+
+def setup(app):
+ """ Initializer for Sphinx extension API.
+ See http://www.sphinx-doc.org/en/stable/extdev/index.html#dev-extensions.
+ """
+ for lexer in [
+ AnsibleOutputLexer(startinline=True)
+ ]:
+ app.add_lexer(lexer.name, lexer)
+ for alias in lexer.aliases:
+ app.add_lexer(alias, lexer)
+
+ return dict(version=__version__, parallel_read_safe=True)
diff --git a/docs/docsite/_static/ansible.css b/docs/docsite/_static/ansible.css
new file mode 100644
index 00000000..f9d0b1a4
--- /dev/null
+++ b/docs/docsite/_static/ansible.css
@@ -0,0 +1,59 @@
+/*! minified with http://css-minify.online-domain-tools.com/ - all comments
+ * must have ! to preserve during minifying with that tool *//*! Fix for read the docs theme:
+ * https://rackerlabs.github.io/docs-rackspace/tools/rtd-tables.html
+ *//*! override table width restrictions */@media screen and (min-width:767px){/*! If we ever publish to read the docs, we need to use !important for these
+ * two styles as read the docs itself loads their theme in a way that we
+ * can't otherwise override it.
+ */.wy-table-responsive table td{white-space:normal}.wy-table-responsive{overflow:visible}}/*!
+ * We use the class documentation-table for attribute tables where the first
+ * column is the name of an attribute and the second column is the description.
+ *//*! These tables look like this:
+ *
+ * Attribute Name Description
+ * -------------- -----------
+ * **NAME** This is a multi-line description
+ * str/required that can span multiple lines
+ * added in x.y
+ * With multiple paragraphs
+ * -------------- -----------
+ *
+ * **NAME** is given the class .value-name
+ * str is given the class .value-type
+ * / is given the class .value-separator
+ * required is given the class .value-required
+ * added in x.y is given the class .value-added-in
+ *//*! The extra .rst-content is so this will override rtd theme */.rst-content table.documentation-table td{vertical-align:top}table.documentation-table td:first-child{white-space:nowrap;vertical-align:top}table.documentation-table td:first-child p:first-child{font-weight:700;display:inline}/*! This is now redundant with above position-based styling *//*!
+table.documentation-table .value-name {
+ font-weight: bold;
+ display: inline;
+}
+*/table.documentation-table .value-type{font-size:x-small;color:purple;display:inline}table.documentation-table .value-separator{font-size:x-small;display:inline}table.documentation-table .value-required{font-size:x-small;color:red;display:inline}.value-added-in{font-size:x-small;font-style:italic;color:green;display:inline}/*! Ansible-specific CSS pulled out of rtd theme for 2.9 */.DocSiteProduct-header{flex:1;-webkit-flex:1;padding:10px 20px 20px;display:flex;display:-webkit-flex;flex-direction:column;-webkit-flex-direction:column;align-items:center;-webkit-align-items:center;justify-content:flex-start;-webkit-justify-content:flex-start;margin-left:20px;margin-right:20px;text-decoration:none;font-weight:400;font-family:'Open Sans',sans-serif}.DocSiteProduct-header:active,.DocSiteProduct-header:focus,.DocSiteProduct-header:visited{color:#fff}.DocSiteProduct-header--core{font-size:25px;background-color:#5bbdbf;border:2px solid #5bbdbf;border-top-left-radius:4px;border-top-right-radius:4px;color:#fff;padding-left:2px;margin-left:2px}.DocSiteProduct-headerAlign{width:100%}.DocSiteProduct-logo{width:60px;height:60px;margin-bottom:-9px}.DocSiteProduct-logoText{margin-top:6px;font-size:25px;text-align:left}.DocSiteProduct-CheckVersionPara{margin-left:2px;padding-bottom:4px;margin-right:2px;margin-bottom:10px}/*! Ansible color scheme */.wy-nav-top,.wy-side-nav-search{background-color:#5bbdbf}.wy-menu-vertical header,.wy-menu-vertical p.caption{color:#5bbdbf}.wy-menu-vertical a{padding:0}.wy-menu-vertical a.reference.internal{padding:.4045em 1.618em}/*! Override sphinx rtd theme max-with of 800px */.wy-nav-content{max-width:100%}/*! Override sphinx_rtd_theme - keeps left-nav from overwriting Documentation title */.wy-nav-side{top:45px}/*! Ansible - changed absolute to relative to remove extraneous side scroll bar */.wy-grid-for-nav{position:relative}/*! Ansible narrow the search box */.wy-side-nav-search input[type=text]{width:90%;padding-left:24px}/*! Ansible - remove so highlight indenting is correct */.rst-content .highlighted{padding:0}.DocSiteBanner{display:flex;display:-webkit-flex;justify-content:center;-webkit-justify-content:center;flex-wrap:wrap;-webkit-flex-wrap:wrap;margin-bottom:25px}.DocSiteBanner-imgWrapper{max-width:100%}td,th{min-width:100px}table{overflow-x:auto;display:block;max-width:100%}.documentation-table td.elbow-placeholder{border-left:1px solid #000;border-top:0;width:30px;min-width:30px}.documentation-table td,.documentation-table th{padding:4px;border-left:1px solid #000;border-top:1px solid #000}.documentation-table{border-right:1px solid #000;border-bottom:1px solid #000}@media print{*{background:0 0!important;color:#000!important;text-shadow:none!important;filter:none!important;-ms-filter:none!important}#nav,a,a:visited{text-decoration:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}/*! Don't show links for images, or javascript/internal links */pre,blockquote{border:0 solid #999;page-break-inside:avoid}thead{display:table-header-group}/*! h5bp.com/t */tr,img{page-break-inside:avoid}img{max-width:100%!important}@page{margin:.5cm}h2,h3,p{orphans:3;widows:3}h2,h3{page-break-after:avoid}#google_image_div,.DocSiteBanner{display:none!important}}#sideBanner,.DocSite-globalNav{display:none}.DocSite-sideNav{display:block;margin-bottom:40px}.DocSite-nav{display:none}.ansibleNav{background:#000;padding:0 20px;width:auto;border-bottom:1px solid #444;font-size:14px;z-index:1}.ansibleNav ul{list-style:none;padding-left:0;margin-top:0}.ansibleNav ul li{padding:7px 0;border-bottom:1px solid #444}.ansibleNav ul li:last-child{border:none}.ansibleNav ul li a{color:#fff;text-decoration:none;text-transform:uppercase;padding:6px 0}.ansibleNav ul li a:hover{color:#5bbdbf;background:0 0}h4{font-size:105%}h5{font-size:90%}h6{font-size:80%}@media screen and (min-width:768px){.DocSite-globalNav{display:block;position:fixed}#sideBanner{display:block}.DocSite-sideNav{display:none}.DocSite-nav{flex:initial;-webkit-flex:initial;display:flex;display:-webkit-flex;flex-direction:row;-webkit-flex-direction:row;justify-content:flex-start;-webkit-justify-content:flex-start;padding:15px;background-color:#000;text-decoration:none;font-family:'Open Sans',sans-serif}.DocSiteNav-logo{width:28px;height:28px;margin-right:8px;margin-top:-6px;position:fixed;z-index:1}.DocSiteNav-title{color:#fff;font-size:20px;position:fixed;margin-left:40px;margin-top:-4px;z-index:1}.ansibleNav{height:45px;width:100%;font-size:13px;padding:0 60px 0 0}.ansibleNav ul{float:right;display:flex;flex-wrap:nowrap;margin-top:13px}.ansibleNav ul li{padding:0;border-bottom:none}.ansibleNav ul li a{color:#fff;text-decoration:none;text-transform:uppercase;padding:8px 13px}h4{font-size:105%}h5{font-size:90%}h6{font-size:80%}}@media screen and (min-width:768px){#sideBanner,.DocSite-globalNav{display:block}.DocSite-sideNav{display:none}.DocSite-nav{flex:initial;-webkit-flex:initial;display:flex;display:-webkit-flex;flex-direction:row;-webkit-flex-direction:row;justify-content:flex-start;-webkit-justify-content:flex-start;padding:15px;background-color:#000;text-decoration:none;font-family:'Open Sans',sans-serif}.DocSiteNav-logo{width:28px;height:28px;margin-right:8px;margin-top:-6px;position:fixed}.DocSiteNav-title{color:#fff;font-size:20px;position:fixed;margin-left:40px;margin-top:-4px}.ansibleNav{height:45px;font-size:13px;padding:0 60px 0 0}.ansibleNav ul{float:right;display:flex;flex-wrap:nowrap;margin-top:13px}.ansibleNav ul li{padding:0;border-bottom:none}.ansibleNav ul li a{color:#fff;text-decoration:none;text-transform:uppercase;padding:8px 13px}h4{font-size:105%}h5{font-size:90%}h6{font-size:80%}}
+/* ansibleOptionLink is adapted from h1 .headerlink in sphinx_rtd_theme */
+tr:hover .ansibleOptionLink::after {
+ visibility: visible;
+}
+tr .ansibleOptionLink::after {
+ content: "ïƒ";
+ font-family: FontAwesome;
+}
+tr .ansibleOptionLink {
+ visibility: hidden;
+ display: inline-block;
+ font: normal normal normal 14px/1 FontAwesome;
+ text-rendering: auto;
+ -webkit-font-smoothing: antialiased;
+ -moz-osx-font-smoothing: grayscale;
+}
+
+@media screen and (min-width:767px){
+ /* Move anchors a bit up so that they aren't hidden by the header bar */
+ section [id] {
+ padding-top: 45px;
+ margin-top: -45px;
+ }
+ /* Without this, for example most links in the page's TOC aren't usable anymore */
+ section a[id] {
+ padding-top: 0;
+ margin-top: 0;
+ }
+} \ No newline at end of file
diff --git a/docs/docsite/_static/pygments.css b/docs/docsite/_static/pygments.css
new file mode 100644
index 00000000..8774dd3b
--- /dev/null
+++ b/docs/docsite/_static/pygments.css
@@ -0,0 +1,76 @@
+.highlight { background: #f8f8f8 }
+.highlight .hll { background-color: #ffffcc; border: 1px solid #edff00; padding-top: 2px; border-radius: 3px; display: block }
+.highlight .c { color: #6a737d; font-style: italic } /* Comment */
+.highlight .err { color: #a61717; background-color: #e3d2d2; color: #a61717; border: 1px solid #FF0000 } /* Error */
+.highlight .k { color: #007020; font-weight: bold } /* Keyword */
+.highlight .o { color: #666666; font-weight: bold } /* Operator */
+.highlight .ch { color: #6a737d; font-style: italic } /* Comment.Hashbang */
+.highlight .cm { color: #6a737d; font-style: italic } /* Comment.Multiline */
+.highlight .cp { color: #007020 } /* Comment.Preproc */
+.highlight .cpf { color: #6a737d; font-style: italic } /* Comment.PreprocFile */
+.highlight .c1 { color: #6a737d; font-style: italic } /* Comment.Single */
+.highlight .cs { color: #999999; font-weight: bold; font-style: italic; background-color: #fff0f0 } /* Comment.Special */
+.highlight .gd { color: #A00000; background-color: #ffdddd } /* Generic.Deleted */
+.highlight .gd .x { color: #A00000; background-color: #ffaaaa }
+.highlight .ge { font-style: italic } /* Generic.Emph */
+.highlight .gr { color: #aa0000 } /* Generic.Error */
+.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */
+.highlight .gi { color: #00A000; background-color: #ddffdd } /* Generic.Inserted */
+.highlight .gi .x { color: #00A000; background-color: #aaffaa; }
+.highlight .go { color: #333333 } /* Generic.Output */
+.highlight .gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */
+.highlight .gs { font-weight: bold } /* Generic.Strong */
+.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */
+.highlight .gt { color: #0040D0 } /* Generic.Traceback */
+.highlight .kc { color: #007020; font-weight: bold } /* Keyword.Constant */
+.highlight .kd { color: #007020; font-weight: bold } /* Keyword.Declaration */
+.highlight .kn { color: #007020; font-weight: bold } /* Keyword.Namespace */
+.highlight .kp { color: #007020 } /* Keyword.Pseudo */
+.highlight .kr { color: #007020; font-weight: bold } /* Keyword.Reserved */
+.highlight .kt { color: #902000 } /* Keyword.Type */
+.highlight .l { color: #032f62 } /* Literal */
+.highlight .m { color: #208050 } /* Literal.Number */
+.highlight .s { color: #4070a0 } /* Literal.String */
+.highlight .n { color: #333333 }
+.highlight .p { font-weight: bold }
+.highlight .na { color: teal } /* Name.Attribute */
+.highlight .nb { color: #0086b3 } /* Name.Builtin */
+.highlight .nc { color: #445588; font-weight: bold } /* Name.Class */
+.highlight .no { color: teal; } /* Name.Constant */
+.highlight .nd { color: #555555; font-weight: bold } /* Name.Decorator */
+.highlight .ni { color: purple; font-weight: bold } /* Name.Entity */
+.highlight .ne { color: #990000; font-weight: bold } /* Name.Exception */
+.highlight .nf { color: #990000; font-weight: bold } /* Name.Function */
+.highlight .nl { color: #002070; font-weight: bold } /* Name.Label */
+.highlight .nn { color: #555555; font-weight: bold } /* Name.Namespace */
+.highlight .nt { color: #22863a } /* Name.Tag */
+.highlight .nv { color: #9960b5; font-weight: bold } /* Name.Variable */
+.highlight .p { color: font-weight: bold } /* Indicator */
+.highlight .ow { color: #007020; font-weight: bold } /* Operator.Word */
+.highlight .w { color: #bbbbbb } /* Text.Whitespace */
+.highlight .mb { color: #009999 } /* Literal.Number.Bin */
+.highlight .mf { color: #009999 } /* Literal.Number.Float */
+.highlight .mh { color: #009999 } /* Literal.Number.Hex */
+.highlight .mi { color: #009999 } /* Literal.Number.Integer */
+.highlight .mo { color: #009999 } /* Literal.Number.Oct */
+.highlight .sa { color: #dd1144 } /* Literal.String.Affix */
+.highlight .sb { color: #dd1144 } /* Literal.String.Backtick */
+.highlight .sc { color: #dd1144 } /* Literal.String.Char */
+.highlight .dl { color: #dd1144 } /* Literal.String.Delimiter */
+.highlight .sd { color: #dd1144; font-style: italic } /* Literal.String.Doc */
+.highlight .s2 { color: #dd1144 } /* Literal.String.Double */
+.highlight .se { color: #dd1144; font-weight: bold } /* Literal.String.Escape */
+.highlight .sh { color: #dd1144 } /* Literal.String.Heredoc */
+.highlight .si { color: #dd1144; font-style: italic } /* Literal.String.Interpol */
+.highlight .sx { color: #dd1144 } /* Literal.String.Other */
+.highlight .sr { color: #009926 } /* Literal.String.Regex */
+.highlight .s1 { color: #dd1144 } /* Literal.String.Single */
+.highlight .ss { color: #990073 } /* Literal.String.Symbol */
+.highlight .bp { color: #999999 } /* Name.Builtin.Pseudo */
+.highlight .fm { color: #06287e } /* Name.Function.Magic */
+.highlight .vc { color: teal } /* Name.Variable.Class */
+.highlight .vg { color: teal } /* Name.Variable.Global */
+.highlight .vi { color: teal } /* Name.Variable.Instance */
+.highlight .vm { color: #bb60d5 } /* Name.Variable.Magic */
+.highlight .il { color: #009999 } /* Literal.Number.Integer.Long */
+.highlight .gc { color: #909090; background-color: #eaf2f5 }
diff --git a/docs/docsite/_themes/sphinx_rtd_theme/__init__.py b/docs/docsite/_themes/sphinx_rtd_theme/__init__.py
new file mode 100644
index 00000000..f449982e
--- /dev/null
+++ b/docs/docsite/_themes/sphinx_rtd_theme/__init__.py
@@ -0,0 +1,20 @@
+"""Sphinx ReadTheDocs theme.
+
+From https://github.com/ryan-roemer/sphinx-bootstrap-theme.
+
+"""
+from os import path
+
+__version__ = '0.2.5b2'
+__version_full__ = __version__
+
+
+def get_html_theme_path():
+ """Return list of HTML theme paths."""
+ cur_dir = path.abspath(path.dirname(path.dirname(__file__)))
+ return cur_dir
+
+
+# See http://www.sphinx-doc.org/en/stable/theming.html#distribute-your-theme-as-a-python-package
+def setup(app):
+ app.add_html_theme('sphinx_rtd_theme', path.abspath(path.dirname(__file__)))
diff --git a/docs/docsite/_themes/sphinx_rtd_theme/ansible_banner.html b/docs/docsite/_themes/sphinx_rtd_theme/ansible_banner.html
new file mode 100644
index 00000000..e3b6a575
--- /dev/null
+++ b/docs/docsite/_themes/sphinx_rtd_theme/ansible_banner.html
@@ -0,0 +1,45 @@
+<!--- Based on sphinx versionwarning extension. Extension currently only works on READTHEDOCS -->
+ <script>
+ startsWith = function(str, needle) {
+ return str.slice(0, needle.length) == needle
+ }
+ // Create a banner if we're not on the official docs site
+ if (location.host == "docs.testing.ansible.com") {
+ document.write('<div id="testing_banner_id" class="admonition important">');
+ document.write('<p>This is the testing site for Ansible Documentation. Unless you are reviewing pre-production changes, please visit the <a href="https://docs.ansible.com/ansible/latest/">official documentation website</a>.</p> <p></p>');
+ document.write('</div>');
+ }
+ {% if (not READTHEDOCS) and (available_versions is defined) %}
+ // Create a banner if we're not the latest version
+ current_url_path = window.location.pathname;
+ if (startsWith(current_url_path, "/ansible/latest/") || startsWith(current_url_path, "/ansible/{{ latest_version }}/")) {
+ /* temp banner to advertise survey */
+ document.write('<div id="banner_id" class="admonition important">');
+ document.write('<br><p>Please take our <a href="https://www.surveymonkey.co.uk/r/B9V3CDY">Docs survey</a> before December 31 to help us improve Ansible documentation.</p>');
+
+ document.write('<div id="banner_id" class="admonition caution">');
+ document.write('<p>You are reading the latest community version of the Ansible documentation. Red Hat subscribers, select <b>2.9</b> in the version selection to the left for the most recent Red Hat release.</p>');
+ document.write('</div>');
+
+ document.write('</div>');
+ } else if (startsWith(current_url_path, "/ansible/2.9/")) {
+ document.write('<div id="banner_id" class="admonition caution">');
+ document.write('<p>You are reading the latest Red Hat released version of the Ansible documentation. Community users can use this, or select any version in version selection to the left, including <b>latest</b> for the most recent community version.</p>');
+ document.write('</div>');
+ } else if (startsWith(current_url_path, "/ansible/devel/")) {
+ /* temp banner to advertise survey */
+ document.write('<div id="banner_id" class="admonition important">');
+ document.write('<br><p>Please take our <a href="https://www.surveymonkey.co.uk/r/B9V3CDY">Docs survey</a> before December 31 to help us improve Ansible documentation.</p><br>');
+ document.write('</div>');
+
+ document.write('<div id="banner_id" class="admonition caution">');
+ document.write('<p>You are reading the <b>devel</b> version of the Ansible documentation - this version is not guaranteed stable. Use the version selection to the left if you want the latest stable released version.</p>');
+ document.write('</div>');
+
+ } else {
+ document.write('<div id="banner_id" class="admonition caution">');
+ document.write('<p>You are reading an older version of the Ansible documentation. Use the version selection to the left if you want the latest stable released version.</p>');
+ document.write('</div>');
+ }
+ {% endif %}
+ </script>
diff --git a/docs/docsite/_themes/sphinx_rtd_theme/ansible_eol_banner.html b/docs/docsite/_themes/sphinx_rtd_theme/ansible_eol_banner.html
new file mode 100644
index 00000000..6f19d2c2
--- /dev/null
+++ b/docs/docsite/_themes/sphinx_rtd_theme/ansible_eol_banner.html
@@ -0,0 +1,4 @@
+{# Creates a banner at the top of the page for EOL versions. #}
+<div id='banner' class='Admonition caution'>
+ <p>You are reading an unmaintained version of the Ansible documentation. Unmaintained Ansible versions can contain unfixed security vulnerabilities (CVE). Please upgrade to a maintained version. See <a href="https://docs.ansible.com/ansible/latest/index.html">the latest Ansible documentation</a>.</p>
+</div>
diff --git a/docs/docsite/_themes/sphinx_rtd_theme/ansible_extrabody.html b/docs/docsite/_themes/sphinx_rtd_theme/ansible_extrabody.html
new file mode 100644
index 00000000..ed0d9981
--- /dev/null
+++ b/docs/docsite/_themes/sphinx_rtd_theme/ansible_extrabody.html
@@ -0,0 +1,25 @@
+<!---- extra body elements for Ansible beyond RTD Sphinx Theme --->
+<!-- Google Tag Manager -->
+<noscript><iframe src="//www.googletagmanager.com/ns.html?id=GTM-PSB293" height="0" width="0" style="display:none;visibility:hidden"></iframe></noscript>
+<script>(function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start': new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0], j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src='//www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f); })(window,document,'script','dataLayer','GTM-PSB293');</script>
+<!-- End Google Tag Manager -->
+
+ <div class="DocSite-globalNav ansibleNav">
+ <ul>
+ <li><a href="https://www.ansible.com/ansiblefest" target="_blank">AnsibleFest</a></li>
+ <li><a href="https://www.ansible.com/tower" target="_blank">Products</a></li>
+ <li><a href="https://www.ansible.com/community" target="_blank">Community</a></li>
+ <li><a href="https://www.ansible.com/webinars-training" target="_blank">Webinars & Training</a></li>
+ <li><a href="https://www.ansible.com/blog" target="_blank">Blog</a></li>
+ </ul>
+ </div>
+
+<a class="DocSite-nav" href="/" style="padding-bottom: 30px;">
+
+ <img class="DocSiteNav-logo"
+ src="{{ pathto('_static/', 1) }}images/logo_invert.png"
+ alt="Ansible Logo">
+ <div class="DocSiteNav-title">
+ Documentation
+ </div>
+</a>
diff --git a/docs/docsite/_themes/sphinx_rtd_theme/ansible_extrafooter.html b/docs/docsite/_themes/sphinx_rtd_theme/ansible_extrafooter.html
new file mode 100644
index 00000000..06a8a074
--- /dev/null
+++ b/docs/docsite/_themes/sphinx_rtd_theme/ansible_extrafooter.html
@@ -0,0 +1,18 @@
+<!-- extra footer elements for Ansible beyond RTD Sphinx Theme --->
+<!-- begin analytics -->
+<script type="text/javascript">
+var _hsq = _hsq || [];
+_hsq.push(["setContentType", "standard-page"]);
+ (function(d,s,i,r) {
+ if (d.getElementById(i)){return;}
+ var n = d.createElement(s),e = document.getElementsByTagName(s)[0];
+ n.id=i;n.src = '//js.hs-analytics.net/analytics/'+(Math.ceil(new Date()/r)*r)+'/330046.js';
+ e.parentNode.insertBefore(n, e);
+ })(document, "script", "hs-analytics",300000);
+</script>
+<!-- end analytics -->
+<script type="text/javascript">
+if (("undefined" !== typeof _satellite) && ("function" === typeof _satellite.pageBottom)) {
+ _satellite.pageBottom();
+}
+</script>
diff --git a/docs/docsite/_themes/sphinx_rtd_theme/ansible_extrahead.html b/docs/docsite/_themes/sphinx_rtd_theme/ansible_extrahead.html
new file mode 100644
index 00000000..1b7afd49
--- /dev/null
+++ b/docs/docsite/_themes/sphinx_rtd_theme/ansible_extrahead.html
@@ -0,0 +1,12 @@
+<!---- extra head elements for Ansible beyond RTD Sphinx Theme --->
+<script type="text/javascript" src="//www.redhat.com/dtm.js"></script>
+<!-- <meta class="swiftype" name="published_at" data-type="date" content="2017-12-13" /> -->
+<meta class="swiftype" name="version" data-type="string" content="{{ version }}">
+
+<!-- Google Tag Manager Data Layer -->
+<script>
+ dataLayer = [];
+</script>
+<!-- End Google Tag Manager Data Layer -->
+
+<script src="//cdnjs.cloudflare.com/ajax/libs/modernizr/2.6.2/modernizr.min.js"></script>
diff --git a/docs/docsite/_themes/sphinx_rtd_theme/ansible_extranav.html b/docs/docsite/_themes/sphinx_rtd_theme/ansible_extranav.html
new file mode 100644
index 00000000..e89c0f6b
--- /dev/null
+++ b/docs/docsite/_themes/sphinx_rtd_theme/ansible_extranav.html
@@ -0,0 +1,9 @@
+<!-- extra nav elements for Ansible beyond RTD Sphinx Theme --->
+<!-- changeable widget links to tower - do not change as image controlled by Ansible-->
+<div id="sideBanner">
+ <br/>
+ <a href="https://www.ansible.com/docs-left?utm_source=docs">
+ <img style="border-width:0px;" src="https://cdn2.hubspot.net/hubfs/330046/docs-graphics/ASB-docs-left-rail.png" />
+ </a>
+ <br/><br/><br/>
+</div>
diff --git a/docs/docsite/_themes/sphinx_rtd_theme/ansible_searchbox.html b/docs/docsite/_themes/sphinx_rtd_theme/ansible_searchbox.html
new file mode 100644
index 00000000..2f280996
--- /dev/null
+++ b/docs/docsite/_themes/sphinx_rtd_theme/ansible_searchbox.html
@@ -0,0 +1,10 @@
+{%- if builder != 'singlehtml' %}
+<div role="search">
+<!-- <form id="rtd-search-form" class="wy-form" action="{{ pathto('search') }}" -->
+ <form id="rtd-search-form" class="wy-form" method="get">
+ <input type="text" class="st-default-search-input" name="q" placeholder="{{ _('Search docs') }}" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+</div>
+{%- endif %}
diff --git a/docs/docsite/_themes/sphinx_rtd_theme/ansible_versions.html b/docs/docsite/_themes/sphinx_rtd_theme/ansible_versions.html
new file mode 100644
index 00000000..3d3d7fc6
--- /dev/null
+++ b/docs/docsite/_themes/sphinx_rtd_theme/ansible_versions.html
@@ -0,0 +1,29 @@
+<!--- Based on https://github.com/rtfd/sphinx_rtd_theme/pull/438/files -->
+{# Creates dropdown version selection in the top-left navigation. #}
+<div class="version">
+ {% if (not READTHEDOCS) and (available_versions is defined) %}
+ <div class="version-dropdown">
+ <select class="version-list" id="version-list" onchange="javascript:location.href = this.value;">
+ <script> x = document.getElementById("version-list"); </script>
+ {% for slug in available_versions %}
+ <script>
+ current_url = window.location.href;
+ option = document.createElement("option");
+ option.text = "{{ slug }}";
+ if ( "{{ slug }}" == "{{ current_version }}" ) {
+ option.selected = true;
+ }
+ if (current_url.search("{{ current_version }}") > -1) {
+ option.value = current_url.replace("{{ current_version }}","{{ slug }}");
+ } else {
+ option.value = current_url.replace("latest","{{ slug }}");
+ }
+ x.add(option);
+ </script>
+ {% endfor %}
+ </select>
+ </div>
+ {% else %}
+ {{ nav_version }}
+ {% endif %}
+</div>
diff --git a/docs/docsite/_themes/sphinx_rtd_theme/breadcrumbs.html b/docs/docsite/_themes/sphinx_rtd_theme/breadcrumbs.html
new file mode 100644
index 00000000..16342c17
--- /dev/null
+++ b/docs/docsite/_themes/sphinx_rtd_theme/breadcrumbs.html
@@ -0,0 +1,96 @@
+
+{# Support for Sphinx 1.3+ page_source_suffix, but don't break old builds. #}
+
+{% if page_source_suffix %}
+{% set suffix = page_source_suffix %}
+{% else %}
+{% set suffix = source_suffix %}
+{% endif %}
+
+{% if meta is defined and meta is not none %}
+{% set check_meta = True %}
+{% else %}
+{% set check_meta = False %}
+{% endif %}
+
+{% if check_meta and 'github_url' in meta %}
+{% set display_github = True %}
+{% endif %}
+
+{% if check_meta and 'bitbucket_url' in meta %}
+{% set display_bitbucket = True %}
+{% endif %}
+
+{% if check_meta and 'gitlab_url' in meta %}
+{% set display_gitlab = True %}
+{% endif %}
+
+<div role="navigation" aria-label="breadcrumbs navigation">
+
+ <ul class="wy-breadcrumbs">
+ {% block breadcrumbs %}
+ <li><a href="{{ pathto(master_doc) }}">{{ _('Docs') }}</a> &raquo;</li>
+ {% for doc in parents %}
+ <li><a href="{{ doc.link|e }}">{{ doc.title }}</a> &raquo;</li>
+ {% endfor %}
+ <li>{{ title }}</li>
+ {% endblock %}
+ {% block breadcrumbs_aside %}
+ <li class="wy-breadcrumbs-aside">
+ {% if hasdoc(pagename) %}
+ {% if display_github %}
+ {% if check_meta and 'github_url' in meta %}
+ <!-- User defined GitHub URL -->
+ <a href="{{ meta['github_url'] }}" class="fa fa-github"> {{ _('Edit on GitHub') }}</a>
+ {% else %}
+ <!-- Ansible-specific additions for modules etc -->
+ {% if (pagename.endswith('_module')) or (pagename.endswith('_become'))
+ or (pagename.endswith('_cache')) or (pagename.endswith('_callback'))
+ or (pagename.endswith('_connection')) or (pagename.endswith('_inventory'))
+ or (pagename.endswith('_lookup')) or (pagename.endswith('_shell'))
+ or (pagename.endswith('_strategy')) or (pagename.endswith('_vars'))
+ %}
+ <!-- <a href="https://{{ github_host|default("github.com") }}/{{ github_user }}/{{ github_repo }}/{{ theme_vcs_pageview_mode|default("blob") }}/{{ github_module_version }}{{ meta.get('source', '') }}?description=%23%23%23%23%23%20SUMMARY%0A%3C!---%20Your%20description%20here%20--%3E%0A%0A%0A%23%23%23%23%23%20ISSUE%20TYPE%0A-%20Docs%20Pull%20Request%0A%0A%2Blabel:%20docsite_pr" class="fa fa-github"> {{ _('Edit on GitHub') }}</a> -->
+ <br>
+ {% elif pagename.startswith('cli') and meta.get('source', None) %}
+ <a href="https://{{ github_host|default("github.com") }}/{{ github_user }}/{{ github_repo }}/{{ theme_vcs_pageview_mode|default("blob") }}/{{ github_cli_version }}{{ meta.get('source', '') }}?description=%23%23%23%23%23%20SUMMARY%0A%3C!---%20Your%20description%20here%20--%3E%0A%0A%0A%23%23%23%23%23%20ISSUE%20TYPE%0A-%20Docs%20Pull%20Request%0A%0A%2Blabel:%20docsite_pr" class="fa fa-github"> {{ _('Edit on GitHub') }}</a>
+ {% elif (not 'list_of' in pagename) and (not 'category' in pagename) %}
+ <a href="https://{{ github_host|default("github.com") }}/{{ github_user }}/{{ github_repo }}/{{ theme_vcs_pageview_mode|default("blob") }}/{{ github_version }}{{ conf_py_path }}{{ pagename }}{{ suffix }}?description=%23%23%23%23%23%20SUMMARY%0A%3C!---%20Your%20description%20here%20--%3E%0A%0A%0A%23%23%23%23%23%20ISSUE%20TYPE%0A-%20Docs%20Pull%20Request%0A%0A%2Blabel:%20docsite_pr" class="fa fa-github"> {{ _('Edit on GitHub') }}</a>
+ {% endif %}
+ {% endif %}
+ {% elif display_bitbucket %}
+ {% if check_meta and 'bitbucket_url' in meta %}
+ <!-- User defined Bitbucket URL -->
+ <a href="{{ meta['bitbucket_url'] }}" class="fa fa-bitbucket"> {{ _('Edit on Bitbucket') }}</a>
+ {% else %}
+ <a href="https://bitbucket.org/{{ bitbucket_user }}/{{ bitbucket_repo }}/src/{{ bitbucket_version}}{{ conf_py_path }}{{ pagename }}{{ suffix }}?mode={{ theme_vcs_pageview_mode|default("view") }}" class="fa fa-bitbucket"> {{ _('Edit on Bitbucket') }}</a>
+ {% endif %}
+ {% elif display_gitlab %}
+ {% if check_meta and 'gitlab_url' in meta %}
+ <!-- User defined GitLab URL -->
+ <a href="{{ meta['gitlab_url'] }}" class="fa fa-gitlab"> {{ _('Edit on GitLab') }}</a>
+ {% else %}
+ <a href="https://{{ gitlab_host|default("gitlab.com") }}/{{ gitlab_user }}/{{ gitlab_repo }}/{{ theme_vcs_pageview_mode|default("blob") }}/{{ gitlab_version }}{{ conf_py_path }}{{ pagename }}{{ suffix }}" class="fa fa-gitlab"> {{ _('Edit on GitLab') }}</a>
+ {% endif %}
+ {% elif show_source and source_url_prefix %}
+ <a href="{{ source_url_prefix }}{{ pagename }}{{ suffix }}">{{ _('View page source') }}</a>
+ {% elif show_source and has_source and sourcename %}
+ <a href="{{ pathto('_sources/' + sourcename, true)|e }}" rel="nofollow"> {{ _('View page source') }}</a>
+ {% endif %}
+ {% endif %}
+ </li>
+ {% endblock %}
+ </ul>
+
+ {% if (theme_prev_next_buttons_location == 'top' or theme_prev_next_buttons_location == 'both') and (next or prev) %}
+ <div class="rst-breadcrumbs-buttons" role="navigation" aria-label="breadcrumb navigation">
+ {% if next %}
+ <a href="{{ next.link|e }}" class="btn btn-neutral float-right" title="{{ next.title|striptags|e }}" accesskey="n">Next <span class="fa fa-arrow-circle-right"></span></a>
+ {% endif %}
+ {% if prev %}
+ <a href="{{ prev.link|e }}" class="btn btn-neutral float-left" title="{{ prev.title|striptags|e }}" accesskey="p"><span class="fa fa-arrow-circle-left"></span> Previous</a>
+ {% endif %}
+ </div>
+ {% endif %}
+ <hr/>
+</div>
diff --git a/docs/docsite/_themes/sphinx_rtd_theme/footer.html b/docs/docsite/_themes/sphinx_rtd_theme/footer.html
new file mode 100644
index 00000000..a1bc26d6
--- /dev/null
+++ b/docs/docsite/_themes/sphinx_rtd_theme/footer.html
@@ -0,0 +1,62 @@
+<footer>
+ {% if next or prev %}
+ <div class="rst-footer-buttons">
+ {% if next %}
+ <a href="{{ next.link|e }}" class="btn btn-neutral float-right" title="{{ next.title|striptags|e }}"/>Next <span class="icon icon-circle-arrow-right"></span></a>
+ {% endif %}
+ {% if prev %}
+ <a href="{{ prev.link|e }}" class="btn btn-neutral" title="{{ prev.title|striptags|e }}"><span class="icon icon-circle-arrow-left"></span> Previous</a>
+ {% endif %}
+ </div>
+ {% endif %}
+
+ <hr/>
+
+{# Ansible search with Swift - do not remove #}
+<script type="text/javascript">
+ (function(w,d,t,u,n,s,e){w['SwiftypeObject']=n;w[n]=w[n]||function(){
+ (w[n].q=w[n].q||[]).push(arguments);};s=d.createElement(t);
+ e=d.getElementsByTagName(t)[0];s.async=1;s.src=u;e.parentNode.insertBefore(s,e);
+ })(window,document,'script','//s.swiftypecdn.com/install/v2/st.js','_st');
+
+ _st('install','yABGvz2N8PwcwBxyfzUc','2.0.0');
+</script>
+
+ <div role="contentinfo">
+ <p>
+ {%- if show_copyright %}
+ {%- if hasdoc('copyright') %}
+ {% trans path=pathto('copyright'), copyright=copyright|e %}&copy; <a href="{{ path }}">Copyright</a> {{ copyright }}{% endtrans %}
+ {%- else %}
+ {% trans copyright=copyright|e %}&copy; Copyright {{ copyright }}{% endtrans %}
+ {%- endif %}
+ {%- endif %}
+
+ {%- if build_id and build_url %}
+ {% trans build_url=build_url, build_id=build_id %}
+ <span class="build">
+ Build
+ <a href="{{ build_url }}">{{ build_id }}</a>.
+ </span>
+ {% endtrans %}
+ {%- elif commit %}
+ {% trans commit=commit %}
+ <span class="commit">
+ Revision <code>{{ commit }}</code>.
+ </span>
+ {% endtrans %}
+ {%- elif last_updated %}
+ <span class="lastupdated">
+ {% trans last_updated=last_updated|e %}Last updated on {{ last_updated }}.{% endtrans %}
+ </span>
+ {%- endif %}
+
+ </p>
+ </div>
+
+ {%- if show_sphinx %}
+ {% trans %}Built with <a href="http://sphinx-doc.org/">Sphinx</a> using a <a href="https://github.com/rtfd/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>{% endtrans %}.
+ {%- endif %}
+
+ {%- block extrafooter %} {% endblock %}
+</footer>
diff --git a/docs/docsite/_themes/sphinx_rtd_theme/layout.html b/docs/docsite/_themes/sphinx_rtd_theme/layout.html
new file mode 100644
index 00000000..d41da997
--- /dev/null
+++ b/docs/docsite/_themes/sphinx_rtd_theme/layout.html
@@ -0,0 +1,250 @@
+{# TEMPLATE VAR SETTINGS #}
+{%- set url_root = pathto('', 1) %}
+{%- if url_root == '#' %}{% set url_root = '' %}{% endif %}
+{%- if not embedded and docstitle %}
+ {%- set titlesuffix = " &mdash; "|safe + docstitle|e %}
+{%- else %}
+ {%- set titlesuffix = "" %}
+{%- endif %}
+{%- set lang_attr = 'en' if language == None else (language | replace('_', '-')) %}
+
+<!DOCTYPE html>
+<!--[if IE 8]><html class="no-js lt-ie9" lang="{{ lang_attr }}" > <![endif]-->
+<!--[if gt IE 8]><!--> <html class="no-js" lang="{{ lang_attr }}" > <!--<![endif]-->
+<head>
+ <meta charset="utf-8">
+ {{ metatags }}
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
+ {% block htmltitle %}
+ <title>{{ title|striptags|e }}{{ titlesuffix }}</title>
+ {% endblock %}
+
+ {# FAVICON #}
+ {% if favicon %}
+ <link rel="shortcut icon" href="{{ pathto('_static/' + favicon, 1) }}"/>
+ {% endif %}
+ {# CANONICAL URL #}
+ {% if theme_canonical_url %}
+ <link rel="canonical" href="{{ theme_canonical_url }}{{ pagename }}.html"/>
+ {% endif %}
+
+ {# JAVASCRIPTS #}
+ {%- block scripts %}
+ <script type="text/javascript" src="{{ pathto('_static/js/modernizr.min.js', 1) }}"></script>
+ {%- if not embedded %}
+ {# XXX Sphinx 1.8.0 made this an external js-file, quick fix until we refactor the template to inherert more blocks directly from sphinx #}
+ {% if sphinx_version >= "1.8.0" %}
+ <script type="text/javascript" id="documentation_options" data-url_root="{{ pathto('', 1) }}" src="{{ pathto('_static/documentation_options.js', 1) }}"></script>
+ {%- for scriptfile in script_files %}
+ {{ js_tag(scriptfile) }}
+ {%- endfor %}
+ {% else %}
+ <script type="text/javascript">
+ var DOCUMENTATION_OPTIONS = {
+ URL_ROOT:'{{ url_root }}',
+ VERSION:'{{ release|e }}',
+ LANGUAGE:'{{ language }}',
+ COLLAPSE_INDEX:false,
+ FILE_SUFFIX:'{{ '' if no_search_suffix else file_suffix }}',
+ HAS_SOURCE: {{ has_source|lower }},
+ SOURCELINK_SUFFIX: '{{ sourcelink_suffix }}'
+ };
+ </script>
+ {%- for scriptfile in script_files %}
+ <script type="text/javascript" src="{{ pathto(scriptfile, 1) }}"></script>
+ {%- endfor %}
+ {% endif %}
+ <script type="text/javascript" src="{{ pathto('_static/js/theme.js', 1) }}"></script>
+
+ {# OPENSEARCH #}
+ {%- if use_opensearch %}
+ <link rel="search" type="application/opensearchdescription+xml"
+ title="{% trans docstitle=docstitle|e %}Search within {{ docstitle }}{% endtrans %}"
+ href="{{ pathto('_static/opensearch.xml', 1) }}"/>
+ {%- endif %}
+ {%- endif %}
+ {%- endblock %}
+
+ {# Ansible CCS additions #}
+ <link href='https://fonts.googleapis.com/css?family=Lato:400,700|Roboto+Slab:400,700|Inconsolata:400,700' rel='stylesheet' type='text/css'>
+ <link href='https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.0.3/css/font-awesome.min.css' rel='stylesheet' type='text/css'>
+
+ {# CSS #}
+ <link rel="stylesheet" href="{{ pathto('_static/' + style, 1) }}" type="text/css" />
+ <link rel="stylesheet" href="{{ pathto('_static/ansible.css', 1) }}" type="text/css" />
+ <link rel="stylesheet" href="{{ pathto('_static/pygments.css', 1) }}" type="text/css" />
+ {%- for css in css_files %}
+ {%- if css|attr("rel") %}
+ <link rel="{{ css.rel }}" href="{{ pathto(css.filename, 1) }}" type="text/css"{% if css.title is not none %} title="{{ css.title }}"{% endif %} />
+ {%- else %}
+ <link rel="stylesheet" href="{{ pathto(css, 1) }}" type="text/css" />
+ {%- endif %}
+ {%- endfor %}
+
+ {%- for cssfile in extra_css_files %}
+ <link rel="stylesheet" href="{{ pathto(cssfile, 1) }}" type="text/css" />
+ {%- endfor %}
+
+ {%- block linktags %}
+ {%- if hasdoc('about') %}
+ <link rel="author" title="{{ _('About these documents') }}" href="{{ pathto('about') }}" />
+ {%- endif %}
+ {%- if hasdoc('genindex') %}
+ <link rel="index" title="{{ _('Index') }}" href="{{ pathto('genindex') }}" />
+ {%- endif %}
+ {%- if hasdoc('search') %}
+ <link rel="search" title="{{ _('Search') }}" href="{{ pathto('search') }}" />
+ {%- endif %}
+ {%- if hasdoc('copyright') %}
+ <link rel="copyright" title="{{ _('Copyright') }}" href="{{ pathto('copyright') }}" />
+ {%- endif %}
+ {%- if next %}
+ <link rel="next" title="{{ next.title|striptags|e }}" href="{{ next.link|e }}" />
+ {%- endif %}
+ {%- if prev %}
+ <link rel="prev" title="{{ prev.title|striptags|e }}" href="{{ prev.link|e }}" />
+ {%- endif %}
+ {%- endblock %}
+ {%- block extrahead %} {% include "ansible_extrahead.html" %} {% endblock %}
+</head>
+
+<body class="wy-body-for-nav">
+ {% block extrabody %} {% include "ansible_extrabody.html" %} {% endblock %}
+ <div class="wy-grid-for-nav">
+ {# SIDE NAV, TOGGLES ON MOBILE #}
+ <nav data-toggle="wy-nav-shift" class="wy-nav-side">
+ <div class="wy-side-scroll">
+ <div class="wy-side-nav-search" {% if theme_style_nav_header_background %} style="background: {{theme_style_nav_header_background}}" {% endif %}>
+ {% block sidebartitle %}
+
+ {% if logo and theme_logo_only %}
+ <a href="{{ pathto(master_doc) }}">
+ {% else %}
+ <a href="{{ pathto(master_doc) }}" class="icon icon-home"> {{ project }}
+ {% endif %}
+
+ {% if logo %}
+ {# Not strictly valid HTML, but it's the only way to display/scale
+ it properly, without weird scripting or heaps of work
+ #}
+ <img src="{{ pathto('_static/' + logo, 1) }}" class="logo" alt="Logo"/>
+ {% endif %}
+ </a>
+
+ {% if theme_display_version %}
+ {%- set nav_version = version %}
+ {% if READTHEDOCS and current_version %}
+ {%- set nav_version = current_version %}
+ {% endif %}
+ {% if nav_version %}
+ <div class="version">
+ {{ nav_version }}
+ </div>
+ {% endif %}
+ {% endif %}
+
+ {% include "ansible_versions.html" %}
+ {% include "ansible_searchbox.html" %}
+
+ {% endblock %}
+ </div>
+
+ {% block navigation %}
+ <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
+ {% block menu %}
+ {#
+ The singlehtml builder doesn't handle this toctree call when the
+ toctree is empty. Skip building this for now.
+ #}
+ {% if 'singlehtml' not in builder %}
+ {% set global_toc = toctree(maxdepth=theme_navigation_depth|int,
+ collapse=theme_collapse_navigation|tobool,
+ includehidden=theme_includehidden|tobool,
+ titles_only=theme_titles_only|tobool) %}
+ {% endif %}
+ {% if global_toc %}
+ {{ global_toc }}
+ {% else %}
+ <!-- Local TOC -->
+ <div class="local-toc">{{ toc }}</div>
+ {% endif %}
+ {% endblock %}
+ </div>
+ {% endblock %}
+ {% block extranav %}{% include "ansible_extranav.html" %}{% endblock %}
+ </div>
+ </nav>
+ </div>
+
+ <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">
+
+ {# MOBILE NAV, TRIGGLES SIDE NAV ON TOGGLE #}
+ <nav class="wy-nav-top" aria-label="top navigation">
+ {% block mobile_nav %}
+ <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
+ <a href="{{ pathto(master_doc) }}">{{ project }}</a>
+ {% endblock %}
+ </nav>
+
+
+ <div class="wy-nav-content">
+ {%- block content %}
+ {% if theme_style_external_links|tobool %}
+ <div class="rst-content style-external-links">
+ {% else %}
+ <div class="rst-content">
+ {% endif %}
+ {% include "breadcrumbs.html" %}
+ {% include "ansible_banner.html" %}
+ <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
+ {%- block document %}
+ <div itemprop="articleBody">
+ {% block body %}{% endblock %}
+ </div>
+ {% if self.comments()|trim %}
+ <div class="articleComments">
+ {% block comments %}{% endblock %}
+ </div>
+ {% endif%}
+ </div>
+ {%- endblock %}
+ {% include "footer.html" %}
+ </div>
+ {%- endblock %}
+ </div>
+
+ </section>
+
+ </div>
+
+ <script type="text/javascript">
+ jQuery(function () {
+ {% if theme_sticky_navigation|tobool %}
+ SphinxRtdTheme.Navigation.enableSticky();
+ {% else %}
+ SphinxRtdTheme.Navigation.enable();
+ {% endif %}
+ });
+ </script>
+
+ {# Do not conflict with RTD insertion of analytics script #}
+ {% if not READTHEDOCS %}
+ {% if theme_analytics_id %}
+ <!-- Theme Analytics -->
+ <script>
+ (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+ (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
+ m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+ })(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
+
+ ga('create', '{{ theme_analytics_id }}', 'auto');
+ ga('send', 'pageview');
+ </script>
+
+ {% endif %}
+ {% endif %}
+
+ {%- block footer %} {% include "ansible_extrafooter.html" %} {% endblock %}
+
+</body>
+</html>
diff --git a/docs/docsite/_themes/sphinx_rtd_theme/search.html b/docs/docsite/_themes/sphinx_rtd_theme/search.html
new file mode 100644
index 00000000..e3aa9b5c
--- /dev/null
+++ b/docs/docsite/_themes/sphinx_rtd_theme/search.html
@@ -0,0 +1,50 @@
+{#
+ basic/search.html
+ ~~~~~~~~~~~~~~~~~
+
+ Template for the search page.
+
+ :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+#}
+{%- extends "layout.html" %}
+{% set title = _('Search') %}
+{% set script_files = script_files + ['_static/searchtools.js'] %}
+{% block footer %}
+ <script type="text/javascript">
+ jQuery(function() { Search.loadIndex("{{ pathto('searchindex.js', 1) }}"); });
+ </script>
+ {# this is used when loading the search index using $.ajax fails,
+ such as on Chrome for documents on localhost #}
+ <script type="text/javascript" id="searchindexloader"></script>
+ {{ super() }}
+{% endblock %}
+{% block body %}
+ <noscript>
+ <div id="fallback" class="admonition warning">
+ <p class="last">
+ {% trans %}Please activate JavaScript to enable the search
+ functionality.{% endtrans %}
+ </p>
+ </div>
+ </noscript>
+
+ {% if search_performed %}
+ <h2>{{ _('Search Results') }}</h2>
+ {% if not search_results %}
+ <p>{{ _('Your search did not match any documents. Please make sure that all words are spelled correctly and that you\'ve selected enough categories.') }}</p>
+ {% endif %}
+ {% endif %}
+ <div id="search-results">
+ {% if search_results %}
+ <ul>
+ {% for href, caption, context in search_results %}
+ <li>
+ <a href="{{ pathto(item.href) }}">{{ caption }}</a>
+ <p class="context">{{ context|e }}</p>
+ </li>
+ {% endfor %}
+ </ul>
+ {% endif %}
+ </div>
+{% endblock %}
diff --git a/docs/docsite/_themes/sphinx_rtd_theme/searchbox.html b/docs/docsite/_themes/sphinx_rtd_theme/searchbox.html
new file mode 100644
index 00000000..26ef850e
--- /dev/null
+++ b/docs/docsite/_themes/sphinx_rtd_theme/searchbox.html
@@ -0,0 +1,9 @@
+{%- if builder != 'singlehtml' %}
+<div role="search">
+ <form id="rtd-search-form" class="wy-form" action="{{ pathto('search') }}"
+ <input type="text" name="q" placeholder="{{ _(' Search') }}" />
+ <input type="hidden" name="check_keywords" value="yes" />
+ <input type="hidden" name="area" value="default" />
+ </form>
+</div>
+{%- endif %}
diff --git a/docs/docsite/_themes/sphinx_rtd_theme/static/css/badge_only.css b/docs/docsite/_themes/sphinx_rtd_theme/static/css/badge_only.css
new file mode 100644
index 00000000..a01ebc6e
--- /dev/null
+++ b/docs/docsite/_themes/sphinx_rtd_theme/static/css/badge_only.css
@@ -0,0 +1 @@
+.fa:before{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:before,.clearfix:after{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-weight:normal;font-style:normal;src:url("../fonts/fontawesome-webfont.eot");src:url("../fonts/fontawesome-webfont.eot?#iefix") format("embedded-opentype"),url("../fonts/fontawesome-webfont.woff") format("woff"),url("../fonts/fontawesome-webfont.ttf") format("truetype"),url("../fonts/fontawesome-webfont.svg#FontAwesome") format("svg")}.fa:before{display:inline-block;font-family:FontAwesome;font-style:normal;font-weight:normal;line-height:1;text-decoration:inherit}a .fa{display:inline-block;text-decoration:inherit}li .fa{display:inline-block}li .fa-large:before,li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-0.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before,ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before{content:""}.icon-book:before{content:""}.fa-caret-down:before{content:""}.icon-caret-down:before{content:""}.fa-caret-up:before{content:""}.icon-caret-up:before{content:""}.fa-caret-left:before{content:""}.icon-caret-left:before{content:""}.fa-caret-right:before{content:""}.icon-caret-right:before{content:""}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;z-index:400}.rst-versions a{color:#2980B9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27AE60;*zoom:1}.rst-versions .rst-current-version:before,.rst-versions .rst-current-version:after{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book{float:left}.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#E74C3C;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#F1C40F;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:gray;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:solid 1px #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .icon-book{float:none}.rst-versions.rst-badge .fa-book{float:none}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book{float:left}.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge .rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width: 768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}}
diff --git a/docs/docsite/_themes/sphinx_rtd_theme/static/css/theme.css b/docs/docsite/_themes/sphinx_rtd_theme/static/css/theme.css
new file mode 100644
index 00000000..aed8cef0
--- /dev/null
+++ b/docs/docsite/_themes/sphinx_rtd_theme/static/css/theme.css
@@ -0,0 +1,6 @@
+/* sphinx_rtd_theme version 0.4.3 | MIT license */
+/* Built 20190212 16:02 */
+*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}article,aside,details,figcaption,figure,footer,header,hgroup,nav,section{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}audio:not([controls]){display:none}[hidden]{display:none}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:100%;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a:hover,a:active{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:bold}blockquote{margin:0}dfn{font-style:italic}ins{background:#ff9;color:#000;text-decoration:none}mark{background:#ff0;color:#000;font-style:italic;font-weight:bold}pre,code,.rst-content tt,.rst-content code,kbd,samp{font-family:monospace,serif;_font-family:"courier new",monospace;font-size:1em}pre{white-space:pre}q{quotes:none}q:before,q:after{content:"";content:none}small{font-size:85%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-0.5em}sub{bottom:-0.25em}ul,ol,dl{margin:0;padding:0;list-style:none;list-style-image:none}li{list-style:none}dd{margin:0}img{border:0;-ms-interpolation-mode:bicubic;vertical-align:middle;max-width:100%}svg:not(:root){overflow:hidden}figure{margin:0}form{margin:0}fieldset{border:0;margin:0;padding:0}label{cursor:pointer}legend{border:0;*margin-left:-7px;padding:0;white-space:normal}button,input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}button,input{line-height:normal}button,input[type="button"],input[type="reset"],input[type="submit"]{cursor:pointer;-webkit-appearance:button;*overflow:visible}button[disabled],input[disabled]{cursor:default}input[type="checkbox"],input[type="radio"]{box-sizing:border-box;padding:0;*width:13px;*height:13px}input[type="search"]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}input[type="search"]::-webkit-search-decoration,input[type="search"]::-webkit-search-cancel-button{-webkit-appearance:none}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}textarea{overflow:auto;vertical-align:top;resize:vertical}table{border-collapse:collapse;border-spacing:0}td{vertical-align:top}.chromeframe{margin:.2em 0;background:#ccc;color:#000;padding:.2em 0}.ir{display:block;border:0;text-indent:-999em;overflow:hidden;background-color:transparent;background-repeat:no-repeat;text-align:left;direction:ltr;*line-height:0}.ir br{display:none}.hidden{display:none !important;visibility:hidden}.visuallyhidden{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.visuallyhidden.focusable:active,.visuallyhidden.focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}.invisible{visibility:hidden}.relative{position:relative}big,small{font-size:100%}@media print{html,body,section{background:none !important}*{box-shadow:none !important;text-shadow:none !important;filter:none !important;-ms-filter:none !important}a,a:visited{text-decoration:underline}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100% !important}@page{margin:.5cm}p,h2,.rst-content .toctree-wrapper p.caption,h3{orphans:3;widows:3}h2,.rst-content .toctree-wrapper p.caption,h3{page-break-after:avoid}}.fa:before,.wy-menu-vertical li span.toctree-expand:before,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li.current>a span.toctree-expand:before,.rst-content .admonition-title:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content dl dt .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content .code-block-caption .headerlink:before,.rst-content tt.download span:first-child:before,.rst-content code.download span:first-child:before,.icon:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-alert,.rst-content .note,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .warning,.rst-content .seealso,.rst-content .admonition-todo,.rst-content .admonition,.btn,input[type="text"],input[type="password"],input[type="email"],input[type="url"],input[type="date"],input[type="month"],input[type="time"],input[type="datetime"],input[type="datetime-local"],input[type="week"],input[type="number"],input[type="search"],input[type="tel"],input[type="color"],select,textarea,.wy-menu-vertical li.on a,.wy-menu-vertical li.current>a,.wy-side-nav-search>a,.wy-side-nav-search .wy-dropdown>a,.wy-nav-top a{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:before,.clearfix:after{display:table;content:""}.clearfix:after{clear:both}/*!
+ * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome
+ * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)
+ */@font-face{font-family:'FontAwesome';src:url("../fonts/fontawesome-webfont.eot?v=4.7.0");src:url("../fonts/fontawesome-webfont.eot?#iefix&v=4.7.0") format("embedded-opentype"),url("../fonts/fontawesome-webfont.woff2?v=4.7.0") format("woff2"),url("../fonts/fontawesome-webfont.woff?v=4.7.0") format("woff"),url("../fonts/fontawesome-webfont.ttf?v=4.7.0") format("truetype"),url("../fonts/fontawesome-webfont.svg?v=4.7.0#fontawesomeregular") format("svg");font-weight:normal;font-style:normal}.fa,.wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.current>a span.toctree-expand,.rst-content .admonition-title,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content dl dt .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink,.rst-content .code-block-caption .headerlink,.rst-content tt.download span:first-child,.rst-content code.download span:first-child,.icon{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.3333333333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.2857142857em;text-align:center}.fa-ul{padding-left:0;margin-left:2.1428571429em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.1428571429em;width:2.1428571429em;top:.1428571429em;text-align:center}.fa-li.fa-lg{left:-1.8571428571em}.fa-border{padding:.2em .25em .15em;border:solid 0.08em #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa.fa-pull-left,.wy-menu-vertical li span.fa-pull-left.toctree-expand,.wy-menu-vertical li.on a span.fa-pull-left.toctree-expand,.wy-menu-vertical li.current>a span.fa-pull-left.toctree-expand,.rst-content .fa-pull-left.admonition-title,.rst-content h1 .fa-pull-left.headerlink,.rst-content h2 .fa-pull-left.headerlink,.rst-content h3 .fa-pull-left.headerlink,.rst-content h4 .fa-pull-left.headerlink,.rst-content h5 .fa-pull-left.headerlink,.rst-content h6 .fa-pull-left.headerlink,.rst-content dl dt .fa-pull-left.headerlink,.rst-content p.caption .fa-pull-left.headerlink,.rst-content table>caption .fa-pull-left.headerlink,.rst-content .code-block-caption .fa-pull-left.headerlink,.rst-content tt.download span.fa-pull-left:first-child,.rst-content code.download span.fa-pull-left:first-child,.fa-pull-left.icon{margin-right:.3em}.fa.fa-pull-right,.wy-menu-vertical li span.fa-pull-right.toctree-expand,.wy-menu-vertical li.on a span.fa-pull-right.toctree-expand,.wy-menu-vertical li.current>a span.fa-pull-right.toctree-expand,.rst-content .fa-pull-right.admonition-title,.rst-content h1 .fa-pull-right.headerlink,.rst-content h2 .fa-pull-right.headerlink,.rst-content h3 .fa-pull-right.headerlink,.rst-content h4 .fa-pull-right.headerlink,.rst-content h5 .fa-pull-right.headerlink,.rst-content h6 .fa-pull-right.headerlink,.rst-content dl dt .fa-pull-right.headerlink,.rst-content p.caption .fa-pull-right.headerlink,.rst-content table>caption .fa-pull-right.headerlink,.rst-content .code-block-caption .fa-pull-right.headerlink,.rst-content tt.download span.fa-pull-right:first-child,.rst-content code.download span.fa-pull-right:first-child,.fa-pull-right.icon{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left,.wy-menu-vertical li span.pull-left.toctree-expand,.wy-menu-vertical li.on a span.pull-left.toctree-expand,.wy-menu-vertical li.current>a span.pull-left.toctree-expand,.rst-content .pull-left.admonition-title,.rst-content h1 .pull-left.headerlink,.rst-content h2 .pull-left.headerlink,.rst-content h3 .pull-left.headerlink,.rst-content h4 .pull-left.headerlink,.rst-content h5 .pull-left.headerlink,.rst-content h6 .pull-left.headerlink,.rst-content dl dt .pull-left.headerlink,.rst-content p.caption .pull-left.headerlink,.rst-content table>caption .pull-left.headerlink,.rst-content .code-block-caption .pull-left.headerlink,.rst-content tt.download span.pull-left:first-child,.rst-content code.download span.pull-left:first-child,.pull-left.icon{margin-right:.3em}.fa.pull-right,.wy-menu-vertical li span.pull-right.toctree-expand,.wy-menu-vertical li.on a span.pull-right.toctree-expand,.wy-menu-vertical li.current>a span.pull-right.toctree-expand,.rst-content .pull-right.admonition-title,.rst-content h1 .pull-right.headerlink,.rst-content h2 .pull-right.headerlink,.rst-content h3 .pull-right.headerlink,.rst-content h4 .pull-right.headerlink,.rst-content h5 .pull-right.headerlink,.rst-content h6 .pull-right.headerlink,.rst-content dl dt .pull-right.headerlink,.rst-content p.caption .pull-right.headerlink,.rst-content table>caption .pull-right.headerlink,.rst-content .code-block-caption .pull-right.headerlink,.rst-content tt.download span.pull-right:first-child,.rst-content code.download span.pull-right:first-child,.pull-right.icon{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s infinite linear;animation:fa-spin 2s infinite linear}.fa-pulse{-webkit-animation:fa-spin 1s infinite steps(8);animation:fa-spin 1s infinite steps(8)}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scale(-1, 1);-ms-transform:scale(-1, 1);transform:scale(-1, 1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scale(1, -1);-ms-transform:scale(1, -1);transform:scale(1, -1)}:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270,:root .fa-flip-horizontal,:root .fa-flip-vertical{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:""}.fa-music:before{content:"ï€"}.fa-search:before,.icon-search:before{content:""}.fa-envelope-o:before{content:""}.fa-heart:before{content:""}.fa-star:before{content:""}.fa-star-o:before{content:""}.fa-user:before{content:""}.fa-film:before{content:""}.fa-th-large:before{content:""}.fa-th:before{content:""}.fa-th-list:before{content:""}.fa-check:before{content:""}.fa-remove:before,.fa-close:before,.fa-times:before{content:"ï€"}.fa-search-plus:before{content:""}.fa-search-minus:before{content:"ï€"}.fa-power-off:before{content:""}.fa-signal:before{content:""}.fa-gear:before,.fa-cog:before{content:""}.fa-trash-o:before{content:""}.fa-home:before,.icon-home:before{content:""}.fa-file-o:before{content:""}.fa-clock-o:before{content:""}.fa-road:before{content:""}.fa-download:before,.rst-content tt.download span:first-child:before,.rst-content code.download span:first-child:before{content:""}.fa-arrow-circle-o-down:before{content:""}.fa-arrow-circle-o-up:before{content:""}.fa-inbox:before{content:""}.fa-play-circle-o:before{content:"ï€"}.fa-rotate-right:before,.fa-repeat:before{content:""}.fa-refresh:before{content:""}.fa-list-alt:before{content:""}.fa-lock:before{content:""}.fa-flag:before{content:""}.fa-headphones:before{content:""}.fa-volume-off:before{content:""}.fa-volume-down:before{content:""}.fa-volume-up:before{content:""}.fa-qrcode:before{content:""}.fa-barcode:before{content:""}.fa-tag:before{content:""}.fa-tags:before{content:""}.fa-book:before,.icon-book:before{content:""}.fa-bookmark:before{content:""}.fa-print:before{content:""}.fa-camera:before{content:""}.fa-font:before{content:""}.fa-bold:before{content:""}.fa-italic:before{content:""}.fa-text-height:before{content:""}.fa-text-width:before{content:""}.fa-align-left:before{content:""}.fa-align-center:before{content:""}.fa-align-right:before{content:""}.fa-align-justify:before{content:""}.fa-list:before{content:""}.fa-dedent:before,.fa-outdent:before{content:""}.fa-indent:before{content:""}.fa-video-camera:before{content:""}.fa-photo:before,.fa-image:before,.fa-picture-o:before{content:""}.fa-pencil:before{content:"ï€"}.fa-map-marker:before{content:"ï"}.fa-adjust:before{content:"ï‚"}.fa-tint:before{content:"ïƒ"}.fa-edit:before,.fa-pencil-square-o:before{content:"ï„"}.fa-share-square-o:before{content:"ï…"}.fa-check-square-o:before{content:"ï†"}.fa-arrows:before{content:"ï‡"}.fa-step-backward:before{content:"ïˆ"}.fa-fast-backward:before{content:"ï‰"}.fa-backward:before{content:"ïŠ"}.fa-play:before{content:"ï‹"}.fa-pause:before{content:"ïŒ"}.fa-stop:before{content:"ï"}.fa-forward:before{content:"ïŽ"}.fa-fast-forward:before{content:"ï"}.fa-step-forward:before{content:"ï‘"}.fa-eject:before{content:"ï’"}.fa-chevron-left:before{content:"ï“"}.fa-chevron-right:before{content:"ï”"}.fa-plus-circle:before{content:"ï•"}.fa-minus-circle:before{content:"ï–"}.fa-times-circle:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before{content:"ï—"}.fa-check-circle:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before{content:"ï˜"}.fa-question-circle:before{content:"ï™"}.fa-info-circle:before{content:"ïš"}.fa-crosshairs:before{content:"ï›"}.fa-times-circle-o:before{content:"ïœ"}.fa-check-circle-o:before{content:"ï"}.fa-ban:before{content:"ïž"}.fa-arrow-left:before{content:"ï "}.fa-arrow-right:before{content:"ï¡"}.fa-arrow-up:before{content:"ï¢"}.fa-arrow-down:before{content:"ï£"}.fa-mail-forward:before,.fa-share:before{content:"ï¤"}.fa-expand:before{content:"ï¥"}.fa-compress:before{content:"ï¦"}.fa-plus:before{content:"ï§"}.fa-minus:before{content:"ï¨"}.fa-asterisk:before{content:"ï©"}.fa-exclamation-circle:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.rst-content .admonition-title:before{content:"ïª"}.fa-gift:before{content:"ï«"}.fa-leaf:before{content:"ï¬"}.fa-fire:before,.icon-fire:before{content:"ï­"}.fa-eye:before{content:"ï®"}.fa-eye-slash:before{content:"ï°"}.fa-warning:before,.fa-exclamation-triangle:before{content:"ï±"}.fa-plane:before{content:"ï²"}.fa-calendar:before{content:"ï³"}.fa-random:before{content:"ï´"}.fa-comment:before{content:"ïµ"}.fa-magnet:before{content:"ï¶"}.fa-chevron-up:before{content:"ï·"}.fa-chevron-down:before{content:"ï¸"}.fa-retweet:before{content:"ï¹"}.fa-shopping-cart:before{content:"ïº"}.fa-folder:before{content:"ï»"}.fa-folder-open:before{content:"ï¼"}.fa-arrows-v:before{content:"ï½"}.fa-arrows-h:before{content:"ï¾"}.fa-bar-chart-o:before,.fa-bar-chart:before{content:"ï‚€"}.fa-twitter-square:before{content:"ï‚"}.fa-facebook-square:before{content:"ï‚‚"}.fa-camera-retro:before{content:""}.fa-key:before{content:"ï‚„"}.fa-gears:before,.fa-cogs:before{content:"ï‚…"}.fa-comments:before{content:""}.fa-thumbs-o-up:before{content:""}.fa-thumbs-o-down:before{content:""}.fa-star-half:before{content:""}.fa-heart-o:before{content:"ï‚Š"}.fa-sign-out:before{content:"ï‚‹"}.fa-linkedin-square:before{content:"ï‚Œ"}.fa-thumb-tack:before{content:"ï‚"}.fa-external-link:before{content:"ï‚Ž"}.fa-sign-in:before{content:"ï‚"}.fa-trophy:before{content:"ï‚‘"}.fa-github-square:before{content:"ï‚’"}.fa-upload:before{content:"ï‚“"}.fa-lemon-o:before{content:"ï‚”"}.fa-phone:before{content:"ï‚•"}.fa-square-o:before{content:"ï‚–"}.fa-bookmark-o:before{content:"ï‚—"}.fa-phone-square:before{content:""}.fa-twitter:before{content:"ï‚™"}.fa-facebook-f:before,.fa-facebook:before{content:"ï‚š"}.fa-github:before,.icon-github:before{content:"ï‚›"}.fa-unlock:before{content:"ï‚œ"}.fa-credit-card:before{content:"ï‚"}.fa-feed:before,.fa-rss:before{content:"ï‚ž"}.fa-hdd-o:before{content:"ï‚ "}.fa-bullhorn:before{content:"ï‚¡"}.fa-bell:before{content:""}.fa-certificate:before{content:"ï‚£"}.fa-hand-o-right:before{content:""}.fa-hand-o-left:before{content:"ï‚¥"}.fa-hand-o-up:before{content:""}.fa-hand-o-down:before{content:""}.fa-arrow-circle-left:before,.icon-circle-arrow-left:before{content:""}.fa-arrow-circle-right:before,.icon-circle-arrow-right:before{content:"ï‚©"}.fa-arrow-circle-up:before{content:""}.fa-arrow-circle-down:before{content:"ï‚«"}.fa-globe:before{content:""}.fa-wrench:before{content:"ï‚­"}.fa-tasks:before{content:"ï‚®"}.fa-filter:before{content:"ï‚°"}.fa-briefcase:before{content:""}.fa-arrows-alt:before{content:""}.fa-group:before,.fa-users:before{content:""}.fa-chain:before,.fa-link:before,.icon-link:before{content:"ïƒ"}.fa-cloud:before{content:""}.fa-flask:before{content:""}.fa-cut:before,.fa-scissors:before{content:""}.fa-copy:before,.fa-files-o:before{content:""}.fa-paperclip:before{content:""}.fa-save:before,.fa-floppy-o:before{content:""}.fa-square:before{content:""}.fa-navicon:before,.fa-reorder:before,.fa-bars:before{content:""}.fa-list-ul:before{content:""}.fa-list-ol:before{content:""}.fa-strikethrough:before{content:""}.fa-underline:before{content:"ïƒ"}.fa-table:before{content:""}.fa-magic:before{content:"ïƒ"}.fa-truck:before{content:""}.fa-pinterest:before{content:""}.fa-pinterest-square:before{content:""}.fa-google-plus-square:before{content:""}.fa-google-plus:before{content:""}.fa-money:before{content:""}.fa-caret-down:before,.wy-dropdown .caret:before,.icon-caret-down:before{content:""}.fa-caret-up:before{content:""}.fa-caret-left:before{content:""}.fa-caret-right:before{content:""}.fa-columns:before{content:""}.fa-unsorted:before,.fa-sort:before{content:""}.fa-sort-down:before,.fa-sort-desc:before{content:"ïƒ"}.fa-sort-up:before,.fa-sort-asc:before{content:""}.fa-envelope:before{content:""}.fa-linkedin:before{content:""}.fa-rotate-left:before,.fa-undo:before{content:""}.fa-legal:before,.fa-gavel:before{content:""}.fa-dashboard:before,.fa-tachometer:before{content:""}.fa-comment-o:before{content:""}.fa-comments-o:before{content:""}.fa-flash:before,.fa-bolt:before{content:""}.fa-sitemap:before{content:""}.fa-umbrella:before{content:""}.fa-paste:before,.fa-clipboard:before{content:""}.fa-lightbulb-o:before{content:""}.fa-exchange:before{content:""}.fa-cloud-download:before{content:""}.fa-cloud-upload:before{content:""}.fa-user-md:before{content:""}.fa-stethoscope:before{content:""}.fa-suitcase:before{content:""}.fa-bell-o:before{content:"ï‚¢"}.fa-coffee:before{content:""}.fa-cutlery:before{content:""}.fa-file-text-o:before{content:""}.fa-building-o:before{content:""}.fa-hospital-o:before{content:""}.fa-ambulance:before{content:""}.fa-medkit:before{content:""}.fa-fighter-jet:before{content:""}.fa-beer:before{content:""}.fa-h-square:before{content:""}.fa-plus-square:before{content:""}.fa-angle-double-left:before{content:"ï„€"}.fa-angle-double-right:before{content:"ï„"}.fa-angle-double-up:before{content:"ï„‚"}.fa-angle-double-down:before{content:""}.fa-angle-left:before{content:"ï„„"}.fa-angle-right:before{content:"ï„…"}.fa-angle-up:before{content:""}.fa-angle-down:before{content:""}.fa-desktop:before{content:""}.fa-laptop:before{content:""}.fa-tablet:before{content:"ï„Š"}.fa-mobile-phone:before,.fa-mobile:before{content:"ï„‹"}.fa-circle-o:before{content:"ï„Œ"}.fa-quote-left:before{content:"ï„"}.fa-quote-right:before{content:"ï„Ž"}.fa-spinner:before{content:"ï„"}.fa-circle:before{content:"ï„‘"}.fa-mail-reply:before,.fa-reply:before{content:"ï„’"}.fa-github-alt:before{content:"ï„“"}.fa-folder-o:before{content:"ï„”"}.fa-folder-open-o:before{content:"ï„•"}.fa-smile-o:before{content:""}.fa-frown-o:before{content:"ï„™"}.fa-meh-o:before{content:"ï„š"}.fa-gamepad:before{content:"ï„›"}.fa-keyboard-o:before{content:"ï„œ"}.fa-flag-o:before{content:"ï„"}.fa-flag-checkered:before{content:"ï„ž"}.fa-terminal:before{content:"ï„ "}.fa-code:before{content:"ï„¡"}.fa-mail-reply-all:before,.fa-reply-all:before{content:"ï„¢"}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:"ï„£"}.fa-location-arrow:before{content:""}.fa-crop:before{content:"ï„¥"}.fa-code-fork:before{content:""}.fa-unlink:before,.fa-chain-broken:before{content:""}.fa-question:before{content:""}.fa-info:before{content:"ï„©"}.fa-exclamation:before{content:""}.fa-superscript:before{content:"ï„«"}.fa-subscript:before{content:""}.fa-eraser:before{content:"ï„­"}.fa-puzzle-piece:before{content:"ï„®"}.fa-microphone:before{content:"ï„°"}.fa-microphone-slash:before{content:""}.fa-shield:before{content:""}.fa-calendar-o:before{content:""}.fa-fire-extinguisher:before{content:"ï„´"}.fa-rocket:before{content:""}.fa-maxcdn:before{content:""}.fa-chevron-circle-left:before{content:"ï„·"}.fa-chevron-circle-right:before{content:""}.fa-chevron-circle-up:before{content:""}.fa-chevron-circle-down:before{content:""}.fa-html5:before{content:"ï„»"}.fa-css3:before{content:""}.fa-anchor:before{content:""}.fa-unlock-alt:before{content:""}.fa-bullseye:before{content:"ï…€"}.fa-ellipsis-h:before{content:"ï…"}.fa-ellipsis-v:before{content:"ï…‚"}.fa-rss-square:before{content:"ï…ƒ"}.fa-play-circle:before{content:"ï…„"}.fa-ticket:before{content:"ï……"}.fa-minus-square:before{content:"ï…†"}.fa-minus-square-o:before,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li.current>a span.toctree-expand:before{content:"ï…‡"}.fa-level-up:before{content:"ï…ˆ"}.fa-level-down:before{content:"ï…‰"}.fa-check-square:before{content:"ï…Š"}.fa-pencil-square:before{content:"ï…‹"}.fa-external-link-square:before{content:"ï…Œ"}.fa-share-square:before{content:"ï…"}.fa-compass:before{content:"ï…Ž"}.fa-toggle-down:before,.fa-caret-square-o-down:before{content:"ï…"}.fa-toggle-up:before,.fa-caret-square-o-up:before{content:"ï…‘"}.fa-toggle-right:before,.fa-caret-square-o-right:before{content:"ï…’"}.fa-euro:before,.fa-eur:before{content:"ï…“"}.fa-gbp:before{content:"ï…”"}.fa-dollar:before,.fa-usd:before{content:"ï…•"}.fa-rupee:before,.fa-inr:before{content:"ï…–"}.fa-cny:before,.fa-rmb:before,.fa-yen:before,.fa-jpy:before{content:"ï…—"}.fa-ruble:before,.fa-rouble:before,.fa-rub:before{content:"ï…˜"}.fa-won:before,.fa-krw:before{content:"ï…™"}.fa-bitcoin:before,.fa-btc:before{content:"ï…š"}.fa-file:before{content:"ï…›"}.fa-file-text:before{content:"ï…œ"}.fa-sort-alpha-asc:before{content:"ï…"}.fa-sort-alpha-desc:before{content:"ï…ž"}.fa-sort-amount-asc:before{content:"ï… "}.fa-sort-amount-desc:before{content:"ï…¡"}.fa-sort-numeric-asc:before{content:"ï…¢"}.fa-sort-numeric-desc:before{content:"ï…£"}.fa-thumbs-up:before{content:"ï…¤"}.fa-thumbs-down:before{content:"ï…¥"}.fa-youtube-square:before{content:"ï…¦"}.fa-youtube:before{content:"ï…§"}.fa-xing:before{content:"ï…¨"}.fa-xing-square:before{content:"ï…©"}.fa-youtube-play:before{content:"ï…ª"}.fa-dropbox:before{content:"ï…«"}.fa-stack-overflow:before{content:"ï…¬"}.fa-instagram:before{content:"ï…­"}.fa-flickr:before{content:"ï…®"}.fa-adn:before{content:"ï…°"}.fa-bitbucket:before,.icon-bitbucket:before{content:"ï…±"}.fa-bitbucket-square:before{content:"ï…²"}.fa-tumblr:before{content:"ï…³"}.fa-tumblr-square:before{content:"ï…´"}.fa-long-arrow-down:before{content:"ï…µ"}.fa-long-arrow-up:before{content:"ï…¶"}.fa-long-arrow-left:before{content:"ï…·"}.fa-long-arrow-right:before{content:"ï…¸"}.fa-apple:before{content:"ï…¹"}.fa-windows:before{content:"ï…º"}.fa-android:before{content:"ï…»"}.fa-linux:before{content:"ï…¼"}.fa-dribbble:before{content:"ï…½"}.fa-skype:before{content:"ï…¾"}.fa-foursquare:before{content:""}.fa-trello:before{content:"ï†"}.fa-female:before{content:""}.fa-male:before{content:""}.fa-gittip:before,.fa-gratipay:before{content:""}.fa-sun-o:before{content:""}.fa-moon-o:before{content:""}.fa-archive:before{content:""}.fa-bug:before{content:""}.fa-vk:before{content:""}.fa-weibo:before{content:""}.fa-renren:before{content:""}.fa-pagelines:before{content:""}.fa-stack-exchange:before{content:"ï†"}.fa-arrow-circle-o-right:before{content:""}.fa-arrow-circle-o-left:before{content:"ï†"}.fa-toggle-left:before,.fa-caret-square-o-left:before{content:""}.fa-dot-circle-o:before{content:""}.fa-wheelchair:before{content:""}.fa-vimeo-square:before{content:""}.fa-turkish-lira:before,.fa-try:before{content:""}.fa-plus-square-o:before,.wy-menu-vertical li span.toctree-expand:before{content:""}.fa-space-shuttle:before{content:""}.fa-slack:before{content:""}.fa-envelope-square:before{content:""}.fa-wordpress:before{content:""}.fa-openid:before{content:""}.fa-institution:before,.fa-bank:before,.fa-university:before{content:""}.fa-mortar-board:before,.fa-graduation-cap:before{content:"ï†"}.fa-yahoo:before{content:""}.fa-google:before{content:""}.fa-reddit:before{content:""}.fa-reddit-square:before{content:""}.fa-stumbleupon-circle:before{content:""}.fa-stumbleupon:before{content:""}.fa-delicious:before{content:""}.fa-digg:before{content:""}.fa-pied-piper-pp:before{content:""}.fa-pied-piper-alt:before{content:""}.fa-drupal:before{content:""}.fa-joomla:before{content:""}.fa-language:before{content:""}.fa-fax:before{content:""}.fa-building:before{content:""}.fa-child:before{content:""}.fa-paw:before{content:""}.fa-spoon:before{content:""}.fa-cube:before{content:""}.fa-cubes:before{content:""}.fa-behance:before{content:""}.fa-behance-square:before{content:""}.fa-steam:before{content:""}.fa-steam-square:before{content:""}.fa-recycle:before{content:""}.fa-automobile:before,.fa-car:before{content:""}.fa-cab:before,.fa-taxi:before{content:""}.fa-tree:before{content:""}.fa-spotify:before{content:""}.fa-deviantart:before{content:""}.fa-soundcloud:before{content:""}.fa-database:before{content:""}.fa-file-pdf-o:before{content:"ï‡"}.fa-file-word-o:before{content:""}.fa-file-excel-o:before{content:""}.fa-file-powerpoint-o:before{content:""}.fa-file-photo-o:before,.fa-file-picture-o:before,.fa-file-image-o:before{content:""}.fa-file-zip-o:before,.fa-file-archive-o:before{content:""}.fa-file-sound-o:before,.fa-file-audio-o:before{content:""}.fa-file-movie-o:before,.fa-file-video-o:before{content:""}.fa-file-code-o:before{content:""}.fa-vine:before{content:""}.fa-codepen:before{content:""}.fa-jsfiddle:before{content:""}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-saver:before,.fa-support:before,.fa-life-ring:before{content:"ï‡"}.fa-circle-o-notch:before{content:""}.fa-ra:before,.fa-resistance:before,.fa-rebel:before{content:"ï‡"}.fa-ge:before,.fa-empire:before{content:""}.fa-git-square:before{content:""}.fa-git:before{content:""}.fa-y-combinator-square:before,.fa-yc-square:before,.fa-hacker-news:before{content:""}.fa-tencent-weibo:before{content:""}.fa-qq:before{content:""}.fa-wechat:before,.fa-weixin:before{content:""}.fa-send:before,.fa-paper-plane:before{content:""}.fa-send-o:before,.fa-paper-plane-o:before{content:""}.fa-history:before{content:""}.fa-circle-thin:before{content:""}.fa-header:before{content:""}.fa-paragraph:before{content:"ï‡"}.fa-sliders:before{content:""}.fa-share-alt:before{content:""}.fa-share-alt-square:before{content:""}.fa-bomb:before{content:""}.fa-soccer-ball-o:before,.fa-futbol-o:before{content:""}.fa-tty:before{content:""}.fa-binoculars:before{content:""}.fa-plug:before{content:""}.fa-slideshare:before{content:""}.fa-twitch:before{content:""}.fa-yelp:before{content:""}.fa-newspaper-o:before{content:""}.fa-wifi:before{content:""}.fa-calculator:before{content:""}.fa-paypal:before{content:""}.fa-google-wallet:before{content:""}.fa-cc-visa:before{content:""}.fa-cc-mastercard:before{content:""}.fa-cc-discover:before{content:""}.fa-cc-amex:before{content:""}.fa-cc-paypal:before{content:""}.fa-cc-stripe:before{content:""}.fa-bell-slash:before{content:""}.fa-bell-slash-o:before{content:""}.fa-trash:before{content:""}.fa-copyright:before{content:""}.fa-at:before{content:""}.fa-eyedropper:before{content:""}.fa-paint-brush:before{content:""}.fa-birthday-cake:before{content:""}.fa-area-chart:before{content:""}.fa-pie-chart:before{content:""}.fa-line-chart:before{content:"ïˆ"}.fa-lastfm:before{content:""}.fa-lastfm-square:before{content:""}.fa-toggle-off:before{content:""}.fa-toggle-on:before{content:""}.fa-bicycle:before{content:""}.fa-bus:before{content:""}.fa-ioxhost:before{content:""}.fa-angellist:before{content:""}.fa-cc:before{content:""}.fa-shekel:before,.fa-sheqel:before,.fa-ils:before{content:""}.fa-meanpath:before{content:""}.fa-buysellads:before{content:"ïˆ"}.fa-connectdevelop:before{content:""}.fa-dashcube:before{content:"ïˆ"}.fa-forumbee:before{content:""}.fa-leanpub:before{content:""}.fa-sellsy:before{content:""}.fa-shirtsinbulk:before{content:""}.fa-simplybuilt:before{content:""}.fa-skyatlas:before{content:""}.fa-cart-plus:before{content:""}.fa-cart-arrow-down:before{content:""}.fa-diamond:before{content:""}.fa-ship:before{content:""}.fa-user-secret:before{content:""}.fa-motorcycle:before{content:""}.fa-street-view:before{content:"ïˆ"}.fa-heartbeat:before{content:""}.fa-venus:before{content:""}.fa-mars:before{content:""}.fa-mercury:before{content:""}.fa-intersex:before,.fa-transgender:before{content:""}.fa-transgender-alt:before{content:""}.fa-venus-double:before{content:""}.fa-mars-double:before{content:""}.fa-venus-mars:before{content:""}.fa-mars-stroke:before{content:""}.fa-mars-stroke-v:before{content:""}.fa-mars-stroke-h:before{content:""}.fa-neuter:before{content:""}.fa-genderless:before{content:""}.fa-facebook-official:before{content:""}.fa-pinterest-p:before{content:""}.fa-whatsapp:before{content:""}.fa-server:before{content:""}.fa-user-plus:before{content:""}.fa-user-times:before{content:""}.fa-hotel:before,.fa-bed:before{content:""}.fa-viacoin:before{content:""}.fa-train:before{content:""}.fa-subway:before{content:""}.fa-medium:before{content:""}.fa-yc:before,.fa-y-combinator:before{content:""}.fa-optin-monster:before{content:""}.fa-opencart:before{content:""}.fa-expeditedssl:before{content:""}.fa-battery-4:before,.fa-battery:before,.fa-battery-full:before{content:""}.fa-battery-3:before,.fa-battery-three-quarters:before{content:"ï‰"}.fa-battery-2:before,.fa-battery-half:before{content:""}.fa-battery-1:before,.fa-battery-quarter:before{content:""}.fa-battery-0:before,.fa-battery-empty:before{content:""}.fa-mouse-pointer:before{content:""}.fa-i-cursor:before{content:""}.fa-object-group:before{content:""}.fa-object-ungroup:before{content:""}.fa-sticky-note:before{content:""}.fa-sticky-note-o:before{content:""}.fa-cc-jcb:before{content:""}.fa-cc-diners-club:before{content:""}.fa-clone:before{content:"ï‰"}.fa-balance-scale:before{content:""}.fa-hourglass-o:before{content:"ï‰"}.fa-hourglass-1:before,.fa-hourglass-start:before{content:""}.fa-hourglass-2:before,.fa-hourglass-half:before{content:""}.fa-hourglass-3:before,.fa-hourglass-end:before{content:""}.fa-hourglass:before{content:""}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:""}.fa-hand-stop-o:before,.fa-hand-paper-o:before{content:""}.fa-hand-scissors-o:before{content:""}.fa-hand-lizard-o:before{content:""}.fa-hand-spock-o:before{content:""}.fa-hand-pointer-o:before{content:""}.fa-hand-peace-o:before{content:""}.fa-trademark:before{content:""}.fa-registered:before{content:"ï‰"}.fa-creative-commons:before{content:""}.fa-gg:before{content:""}.fa-gg-circle:before{content:""}.fa-tripadvisor:before{content:""}.fa-odnoklassniki:before{content:""}.fa-odnoklassniki-square:before{content:""}.fa-get-pocket:before{content:""}.fa-wikipedia-w:before{content:""}.fa-safari:before{content:""}.fa-chrome:before{content:""}.fa-firefox:before{content:""}.fa-opera:before{content:""}.fa-internet-explorer:before{content:""}.fa-tv:before,.fa-television:before{content:""}.fa-contao:before{content:""}.fa-500px:before{content:""}.fa-amazon:before{content:""}.fa-calendar-plus-o:before{content:""}.fa-calendar-minus-o:before{content:""}.fa-calendar-times-o:before{content:""}.fa-calendar-check-o:before{content:""}.fa-industry:before{content:""}.fa-map-pin:before{content:""}.fa-map-signs:before{content:""}.fa-map-o:before{content:""}.fa-map:before{content:""}.fa-commenting:before{content:""}.fa-commenting-o:before{content:""}.fa-houzz:before{content:""}.fa-vimeo:before{content:""}.fa-black-tie:before{content:""}.fa-fonticons:before{content:""}.fa-reddit-alien:before{content:"ïŠ"}.fa-edge:before{content:""}.fa-credit-card-alt:before{content:""}.fa-codiepie:before{content:""}.fa-modx:before{content:""}.fa-fort-awesome:before{content:""}.fa-usb:before{content:""}.fa-product-hunt:before{content:""}.fa-mixcloud:before{content:""}.fa-scribd:before{content:""}.fa-pause-circle:before{content:""}.fa-pause-circle-o:before{content:""}.fa-stop-circle:before{content:"ïŠ"}.fa-stop-circle-o:before{content:""}.fa-shopping-bag:before{content:"ïŠ"}.fa-shopping-basket:before{content:""}.fa-hashtag:before{content:""}.fa-bluetooth:before{content:""}.fa-bluetooth-b:before{content:""}.fa-percent:before{content:""}.fa-gitlab:before,.icon-gitlab:before{content:""}.fa-wpbeginner:before{content:""}.fa-wpforms:before{content:""}.fa-envira:before{content:""}.fa-universal-access:before{content:""}.fa-wheelchair-alt:before{content:""}.fa-question-circle-o:before{content:""}.fa-blind:before{content:"ïŠ"}.fa-audio-description:before{content:""}.fa-volume-control-phone:before{content:""}.fa-braille:before{content:""}.fa-assistive-listening-systems:before{content:""}.fa-asl-interpreting:before,.fa-american-sign-language-interpreting:before{content:""}.fa-deafness:before,.fa-hard-of-hearing:before,.fa-deaf:before{content:""}.fa-glide:before{content:""}.fa-glide-g:before{content:""}.fa-signing:before,.fa-sign-language:before{content:""}.fa-low-vision:before{content:""}.fa-viadeo:before{content:""}.fa-viadeo-square:before{content:""}.fa-snapchat:before{content:""}.fa-snapchat-ghost:before{content:""}.fa-snapchat-square:before{content:""}.fa-pied-piper:before{content:""}.fa-first-order:before{content:""}.fa-yoast:before{content:""}.fa-themeisle:before{content:""}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:""}.fa-fa:before,.fa-font-awesome:before{content:""}.fa-handshake-o:before{content:""}.fa-envelope-open:before{content:""}.fa-envelope-open-o:before{content:""}.fa-linode:before{content:""}.fa-address-book:before{content:""}.fa-address-book-o:before{content:""}.fa-vcard:before,.fa-address-card:before{content:""}.fa-vcard-o:before,.fa-address-card-o:before{content:""}.fa-user-circle:before{content:""}.fa-user-circle-o:before{content:""}.fa-user-o:before{content:"ï‹€"}.fa-id-badge:before{content:"ï‹"}.fa-drivers-license:before,.fa-id-card:before{content:"ï‹‚"}.fa-drivers-license-o:before,.fa-id-card-o:before{content:""}.fa-quora:before{content:"ï‹„"}.fa-free-code-camp:before{content:"ï‹…"}.fa-telegram:before{content:""}.fa-thermometer-4:before,.fa-thermometer:before,.fa-thermometer-full:before{content:""}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:""}.fa-thermometer-2:before,.fa-thermometer-half:before{content:""}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:"ï‹Š"}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:"ï‹‹"}.fa-shower:before{content:"ï‹Œ"}.fa-bathtub:before,.fa-s15:before,.fa-bath:before{content:"ï‹"}.fa-podcast:before{content:"ï‹Ž"}.fa-window-maximize:before{content:"ï‹"}.fa-window-minimize:before{content:"ï‹‘"}.fa-window-restore:before{content:"ï‹’"}.fa-times-rectangle:before,.fa-window-close:before{content:"ï‹“"}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:"ï‹”"}.fa-bandcamp:before{content:"ï‹•"}.fa-grav:before{content:"ï‹–"}.fa-etsy:before{content:"ï‹—"}.fa-imdb:before{content:""}.fa-ravelry:before{content:"ï‹™"}.fa-eercast:before{content:"ï‹š"}.fa-microchip:before{content:"ï‹›"}.fa-snowflake-o:before{content:"ï‹œ"}.fa-superpowers:before{content:"ï‹"}.fa-wpexplorer:before{content:"ï‹ž"}.fa-meetup:before{content:"ï‹ "}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0, 0, 0, 0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}.fa,.wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.current>a span.toctree-expand,.rst-content .admonition-title,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content dl dt .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink,.rst-content .code-block-caption .headerlink,.rst-content tt.download span:first-child,.rst-content code.download span:first-child,.icon,.wy-dropdown .caret,.wy-inline-validate.wy-inline-validate-success .wy-input-context,.wy-inline-validate.wy-inline-validate-danger .wy-input-context,.wy-inline-validate.wy-inline-validate-warning .wy-input-context,.wy-inline-validate.wy-inline-validate-info .wy-input-context{font-family:inherit}.fa:before,.wy-menu-vertical li span.toctree-expand:before,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li.current>a span.toctree-expand:before,.rst-content .admonition-title:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content dl dt .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content .code-block-caption .headerlink:before,.rst-content tt.download span:first-child:before,.rst-content code.download span:first-child:before,.icon:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before{font-family:"FontAwesome";display:inline-block;font-style:normal;font-weight:normal;line-height:1;text-decoration:inherit}a .fa,a .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li a span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.current>a span.toctree-expand,a .rst-content .admonition-title,.rst-content a .admonition-title,a .rst-content h1 .headerlink,.rst-content h1 a .headerlink,a .rst-content h2 .headerlink,.rst-content h2 a .headerlink,a .rst-content h3 .headerlink,.rst-content h3 a .headerlink,a .rst-content h4 .headerlink,.rst-content h4 a .headerlink,a .rst-content h5 .headerlink,.rst-content h5 a .headerlink,a .rst-content h6 .headerlink,.rst-content h6 a .headerlink,a .rst-content dl dt .headerlink,.rst-content dl dt a .headerlink,a .rst-content p.caption .headerlink,.rst-content p.caption a .headerlink,a .rst-content table>caption .headerlink,.rst-content table>caption a .headerlink,a .rst-content .code-block-caption .headerlink,.rst-content .code-block-caption a .headerlink,a .rst-content tt.download span:first-child,.rst-content tt.download a span:first-child,a .rst-content code.download span:first-child,.rst-content code.download a span:first-child,a .icon{display:inline-block;text-decoration:inherit}.btn .fa,.btn .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li .btn span.toctree-expand,.btn .wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.on a .btn span.toctree-expand,.btn .wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.current>a .btn span.toctree-expand,.btn .rst-content .admonition-title,.rst-content .btn .admonition-title,.btn .rst-content h1 .headerlink,.rst-content h1 .btn .headerlink,.btn .rst-content h2 .headerlink,.rst-content h2 .btn .headerlink,.btn .rst-content h3 .headerlink,.rst-content h3 .btn .headerlink,.btn .rst-content h4 .headerlink,.rst-content h4 .btn .headerlink,.btn .rst-content h5 .headerlink,.rst-content h5 .btn .headerlink,.btn .rst-content h6 .headerlink,.rst-content h6 .btn .headerlink,.btn .rst-content dl dt .headerlink,.rst-content dl dt .btn .headerlink,.btn .rst-content p.caption .headerlink,.rst-content p.caption .btn .headerlink,.btn .rst-content table>caption .headerlink,.rst-content table>caption .btn .headerlink,.btn .rst-content .code-block-caption .headerlink,.rst-content .code-block-caption .btn .headerlink,.btn .rst-content tt.download span:first-child,.rst-content tt.download .btn span:first-child,.btn .rst-content code.download span:first-child,.rst-content code.download .btn span:first-child,.btn .icon,.nav .fa,.nav .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li .nav span.toctree-expand,.nav .wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.on a .nav span.toctree-expand,.nav .wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.current>a .nav span.toctree-expand,.nav .rst-content .admonition-title,.rst-content .nav .admonition-title,.nav .rst-content h1 .headerlink,.rst-content h1 .nav .headerlink,.nav .rst-content h2 .headerlink,.rst-content h2 .nav .headerlink,.nav .rst-content h3 .headerlink,.rst-content h3 .nav .headerlink,.nav .rst-content h4 .headerlink,.rst-content h4 .nav .headerlink,.nav .rst-content h5 .headerlink,.rst-content h5 .nav .headerlink,.nav .rst-content h6 .headerlink,.rst-content h6 .nav .headerlink,.nav .rst-content dl dt .headerlink,.rst-content dl dt .nav .headerlink,.nav .rst-content p.caption .headerlink,.rst-content p.caption .nav .headerlink,.nav .rst-content table>caption .headerlink,.rst-content table>caption .nav .headerlink,.nav .rst-content .code-block-caption .headerlink,.rst-content .code-block-caption .nav .headerlink,.nav .rst-content tt.download span:first-child,.rst-content tt.download .nav span:first-child,.nav .rst-content code.download span:first-child,.rst-content code.download .nav span:first-child,.nav .icon{display:inline}.btn .fa.fa-large,.btn .wy-menu-vertical li span.fa-large.toctree-expand,.wy-menu-vertical li .btn span.fa-large.toctree-expand,.btn .rst-content .fa-large.admonition-title,.rst-content .btn .fa-large.admonition-title,.btn .rst-content h1 .fa-large.headerlink,.rst-content h1 .btn .fa-large.headerlink,.btn .rst-content h2 .fa-large.headerlink,.rst-content h2 .btn .fa-large.headerlink,.btn .rst-content h3 .fa-large.headerlink,.rst-content h3 .btn .fa-large.headerlink,.btn .rst-content h4 .fa-large.headerlink,.rst-content h4 .btn .fa-large.headerlink,.btn .rst-content h5 .fa-large.headerlink,.rst-content h5 .btn .fa-large.headerlink,.btn .rst-content h6 .fa-large.headerlink,.rst-content h6 .btn .fa-large.headerlink,.btn .rst-content dl dt .fa-large.headerlink,.rst-content dl dt .btn .fa-large.headerlink,.btn .rst-content p.caption .fa-large.headerlink,.rst-content p.caption .btn .fa-large.headerlink,.btn .rst-content table>caption .fa-large.headerlink,.rst-content table>caption .btn .fa-large.headerlink,.btn .rst-content .code-block-caption .fa-large.headerlink,.rst-content .code-block-caption .btn .fa-large.headerlink,.btn .rst-content tt.download span.fa-large:first-child,.rst-content tt.download .btn span.fa-large:first-child,.btn .rst-content code.download span.fa-large:first-child,.rst-content code.download .btn span.fa-large:first-child,.btn .fa-large.icon,.nav .fa.fa-large,.nav .wy-menu-vertical li span.fa-large.toctree-expand,.wy-menu-vertical li .nav span.fa-large.toctree-expand,.nav .rst-content .fa-large.admonition-title,.rst-content .nav .fa-large.admonition-title,.nav .rst-content h1 .fa-large.headerlink,.rst-content h1 .nav .fa-large.headerlink,.nav .rst-content h2 .fa-large.headerlink,.rst-content h2 .nav .fa-large.headerlink,.nav .rst-content h3 .fa-large.headerlink,.rst-content h3 .nav .fa-large.headerlink,.nav .rst-content h4 .fa-large.headerlink,.rst-content h4 .nav .fa-large.headerlink,.nav .rst-content h5 .fa-large.headerlink,.rst-content h5 .nav .fa-large.headerlink,.nav .rst-content h6 .fa-large.headerlink,.rst-content h6 .nav .fa-large.headerlink,.nav .rst-content dl dt .fa-large.headerlink,.rst-content dl dt .nav .fa-large.headerlink,.nav .rst-content p.caption .fa-large.headerlink,.rst-content p.caption .nav .fa-large.headerlink,.nav .rst-content table>caption .fa-large.headerlink,.rst-content table>caption .nav .fa-large.headerlink,.nav .rst-content .code-block-caption .fa-large.headerlink,.rst-content .code-block-caption .nav .fa-large.headerlink,.nav .rst-content tt.download span.fa-large:first-child,.rst-content tt.download .nav span.fa-large:first-child,.nav .rst-content code.download span.fa-large:first-child,.rst-content code.download .nav span.fa-large:first-child,.nav .fa-large.icon{line-height:.9em}.btn .fa.fa-spin,.btn .wy-menu-vertical li span.fa-spin.toctree-expand,.wy-menu-vertical li .btn span.fa-spin.toctree-expand,.btn .rst-content .fa-spin.admonition-title,.rst-content .btn .fa-spin.admonition-title,.btn .rst-content h1 .fa-spin.headerlink,.rst-content h1 .btn .fa-spin.headerlink,.btn .rst-content h2 .fa-spin.headerlink,.rst-content h2 .btn .fa-spin.headerlink,.btn .rst-content h3 .fa-spin.headerlink,.rst-content h3 .btn .fa-spin.headerlink,.btn .rst-content h4 .fa-spin.headerlink,.rst-content h4 .btn .fa-spin.headerlink,.btn .rst-content h5 .fa-spin.headerlink,.rst-content h5 .btn .fa-spin.headerlink,.btn .rst-content h6 .fa-spin.headerlink,.rst-content h6 .btn .fa-spin.headerlink,.btn .rst-content dl dt .fa-spin.headerlink,.rst-content dl dt .btn .fa-spin.headerlink,.btn .rst-content p.caption .fa-spin.headerlink,.rst-content p.caption .btn .fa-spin.headerlink,.btn .rst-content table>caption .fa-spin.headerlink,.rst-content table>caption .btn .fa-spin.headerlink,.btn .rst-content .code-block-caption .fa-spin.headerlink,.rst-content .code-block-caption .btn .fa-spin.headerlink,.btn .rst-content tt.download span.fa-spin:first-child,.rst-content tt.download .btn span.fa-spin:first-child,.btn .rst-content code.download span.fa-spin:first-child,.rst-content code.download .btn span.fa-spin:first-child,.btn .fa-spin.icon,.nav .fa.fa-spin,.nav .wy-menu-vertical li span.fa-spin.toctree-expand,.wy-menu-vertical li .nav span.fa-spin.toctree-expand,.nav .rst-content .fa-spin.admonition-title,.rst-content .nav .fa-spin.admonition-title,.nav .rst-content h1 .fa-spin.headerlink,.rst-content h1 .nav .fa-spin.headerlink,.nav .rst-content h2 .fa-spin.headerlink,.rst-content h2 .nav .fa-spin.headerlink,.nav .rst-content h3 .fa-spin.headerlink,.rst-content h3 .nav .fa-spin.headerlink,.nav .rst-content h4 .fa-spin.headerlink,.rst-content h4 .nav .fa-spin.headerlink,.nav .rst-content h5 .fa-spin.headerlink,.rst-content h5 .nav .fa-spin.headerlink,.nav .rst-content h6 .fa-spin.headerlink,.rst-content h6 .nav .fa-spin.headerlink,.nav .rst-content dl dt .fa-spin.headerlink,.rst-content dl dt .nav .fa-spin.headerlink,.nav .rst-content p.caption .fa-spin.headerlink,.rst-content p.caption .nav .fa-spin.headerlink,.nav .rst-content table>caption .fa-spin.headerlink,.rst-content table>caption .nav .fa-spin.headerlink,.nav .rst-content .code-block-caption .fa-spin.headerlink,.rst-content .code-block-caption .nav .fa-spin.headerlink,.nav .rst-content tt.download span.fa-spin:first-child,.rst-content tt.download .nav span.fa-spin:first-child,.nav .rst-content code.download span.fa-spin:first-child,.rst-content code.download .nav span.fa-spin:first-child,.nav .fa-spin.icon{display:inline-block}.btn.fa:before,.wy-menu-vertical li span.btn.toctree-expand:before,.rst-content .btn.admonition-title:before,.rst-content h1 .btn.headerlink:before,.rst-content h2 .btn.headerlink:before,.rst-content h3 .btn.headerlink:before,.rst-content h4 .btn.headerlink:before,.rst-content h5 .btn.headerlink:before,.rst-content h6 .btn.headerlink:before,.rst-content dl dt .btn.headerlink:before,.rst-content p.caption .btn.headerlink:before,.rst-content table>caption .btn.headerlink:before,.rst-content .code-block-caption .btn.headerlink:before,.rst-content tt.download span.btn:first-child:before,.rst-content code.download span.btn:first-child:before,.btn.icon:before{opacity:.5;-webkit-transition:opacity .05s ease-in;-moz-transition:opacity .05s ease-in;transition:opacity .05s ease-in}.btn.fa:hover:before,.wy-menu-vertical li span.btn.toctree-expand:hover:before,.rst-content .btn.admonition-title:hover:before,.rst-content h1 .btn.headerlink:hover:before,.rst-content h2 .btn.headerlink:hover:before,.rst-content h3 .btn.headerlink:hover:before,.rst-content h4 .btn.headerlink:hover:before,.rst-content h5 .btn.headerlink:hover:before,.rst-content h6 .btn.headerlink:hover:before,.rst-content dl dt .btn.headerlink:hover:before,.rst-content p.caption .btn.headerlink:hover:before,.rst-content table>caption .btn.headerlink:hover:before,.rst-content .code-block-caption .btn.headerlink:hover:before,.rst-content tt.download span.btn:first-child:hover:before,.rst-content code.download span.btn:first-child:hover:before,.btn.icon:hover:before{opacity:1}.btn-mini .fa:before,.btn-mini .wy-menu-vertical li span.toctree-expand:before,.wy-menu-vertical li .btn-mini span.toctree-expand:before,.btn-mini .rst-content .admonition-title:before,.rst-content .btn-mini .admonition-title:before,.btn-mini .rst-content h1 .headerlink:before,.rst-content h1 .btn-mini .headerlink:before,.btn-mini .rst-content h2 .headerlink:before,.rst-content h2 .btn-mini .headerlink:before,.btn-mini .rst-content h3 .headerlink:before,.rst-content h3 .btn-mini .headerlink:before,.btn-mini .rst-content h4 .headerlink:before,.rst-content h4 .btn-mini .headerlink:before,.btn-mini .rst-content h5 .headerlink:before,.rst-content h5 .btn-mini .headerlink:before,.btn-mini .rst-content h6 .headerlink:before,.rst-content h6 .btn-mini .headerlink:before,.btn-mini .rst-content dl dt .headerlink:before,.rst-content dl dt .btn-mini .headerlink:before,.btn-mini .rst-content p.caption .headerlink:before,.rst-content p.caption .btn-mini .headerlink:before,.btn-mini .rst-content table>caption .headerlink:before,.rst-content table>caption .btn-mini .headerlink:before,.btn-mini .rst-content .code-block-caption .headerlink:before,.rst-content .code-block-caption .btn-mini .headerlink:before,.btn-mini .rst-content tt.download span:first-child:before,.rst-content tt.download .btn-mini span:first-child:before,.btn-mini .rst-content code.download span:first-child:before,.rst-content code.download .btn-mini span:first-child:before,.btn-mini .icon:before{font-size:14px;vertical-align:-15%}.wy-alert,.rst-content .note,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .warning,.rst-content .seealso,.rst-content .admonition-todo,.rst-content .admonition{padding:12px;line-height:24px;margin-bottom:24px;background:#e7f2fa}.wy-alert-title,.rst-content .admonition-title{color:#fff;font-weight:bold;display:block;color:#fff;background:#6ab0de;margin:-12px;padding:6px 12px;margin-bottom:12px}.wy-alert.wy-alert-danger,.rst-content .wy-alert-danger.note,.rst-content .wy-alert-danger.attention,.rst-content .wy-alert-danger.caution,.rst-content .danger,.rst-content .error,.rst-content .wy-alert-danger.hint,.rst-content .wy-alert-danger.important,.rst-content .wy-alert-danger.tip,.rst-content .wy-alert-danger.warning,.rst-content .wy-alert-danger.seealso,.rst-content .wy-alert-danger.admonition-todo,.rst-content .wy-alert-danger.admonition{background:#fdf3f2}.wy-alert.wy-alert-danger .wy-alert-title,.rst-content .wy-alert-danger.note .wy-alert-title,.rst-content .wy-alert-danger.attention .wy-alert-title,.rst-content .wy-alert-danger.caution .wy-alert-title,.rst-content .danger .wy-alert-title,.rst-content .error .wy-alert-title,.rst-content .wy-alert-danger.hint .wy-alert-title,.rst-content .wy-alert-danger.important .wy-alert-title,.rst-content .wy-alert-danger.tip .wy-alert-title,.rst-content .wy-alert-danger.warning .wy-alert-title,.rst-content .wy-alert-danger.seealso .wy-alert-title,.rst-content .wy-alert-danger.admonition-todo .wy-alert-title,.rst-content .wy-alert-danger.admonition .wy-alert-title,.wy-alert.wy-alert-danger .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-danger .admonition-title,.rst-content .wy-alert-danger.note .admonition-title,.rst-content .wy-alert-danger.attention .admonition-title,.rst-content .wy-alert-danger.caution .admonition-title,.rst-content .danger .admonition-title,.rst-content .error .admonition-title,.rst-content .wy-alert-danger.hint .admonition-title,.rst-content .wy-alert-danger.important .admonition-title,.rst-content .wy-alert-danger.tip .admonition-title,.rst-content .wy-alert-danger.warning .admonition-title,.rst-content .wy-alert-danger.seealso .admonition-title,.rst-content .wy-alert-danger.admonition-todo .admonition-title,.rst-content .wy-alert-danger.admonition .admonition-title{background:#f29f97}.wy-alert.wy-alert-warning,.rst-content .wy-alert-warning.note,.rst-content .attention,.rst-content .caution,.rst-content .wy-alert-warning.danger,.rst-content .wy-alert-warning.error,.rst-content .wy-alert-warning.hint,.rst-content .wy-alert-warning.important,.rst-content .wy-alert-warning.tip,.rst-content .warning,.rst-content .wy-alert-warning.seealso,.rst-content .admonition-todo,.rst-content .wy-alert-warning.admonition{background:#ffedcc}.wy-alert.wy-alert-warning .wy-alert-title,.rst-content .wy-alert-warning.note .wy-alert-title,.rst-content .attention .wy-alert-title,.rst-content .caution .wy-alert-title,.rst-content .wy-alert-warning.danger .wy-alert-title,.rst-content .wy-alert-warning.error .wy-alert-title,.rst-content .wy-alert-warning.hint .wy-alert-title,.rst-content .wy-alert-warning.important .wy-alert-title,.rst-content .wy-alert-warning.tip .wy-alert-title,.rst-content .warning .wy-alert-title,.rst-content .wy-alert-warning.seealso .wy-alert-title,.rst-content .admonition-todo .wy-alert-title,.rst-content .wy-alert-warning.admonition .wy-alert-title,.wy-alert.wy-alert-warning .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-warning .admonition-title,.rst-content .wy-alert-warning.note .admonition-title,.rst-content .attention .admonition-title,.rst-content .caution .admonition-title,.rst-content .wy-alert-warning.danger .admonition-title,.rst-content .wy-alert-warning.error .admonition-title,.rst-content .wy-alert-warning.hint .admonition-title,.rst-content .wy-alert-warning.important .admonition-title,.rst-content .wy-alert-warning.tip .admonition-title,.rst-content .warning .admonition-title,.rst-content .wy-alert-warning.seealso .admonition-title,.rst-content .admonition-todo .admonition-title,.rst-content .wy-alert-warning.admonition .admonition-title{background:#f0b37e}.wy-alert.wy-alert-info,.rst-content .note,.rst-content .wy-alert-info.attention,.rst-content .wy-alert-info.caution,.rst-content .wy-alert-info.danger,.rst-content .wy-alert-info.error,.rst-content .wy-alert-info.hint,.rst-content .wy-alert-info.important,.rst-content .wy-alert-info.tip,.rst-content .wy-alert-info.warning,.rst-content .seealso,.rst-content .wy-alert-info.admonition-todo,.rst-content .wy-alert-info.admonition{background:#e7f2fa}.wy-alert.wy-alert-info .wy-alert-title,.rst-content .note .wy-alert-title,.rst-content .wy-alert-info.attention .wy-alert-title,.rst-content .wy-alert-info.caution .wy-alert-title,.rst-content .wy-alert-info.danger .wy-alert-title,.rst-content .wy-alert-info.error .wy-alert-title,.rst-content .wy-alert-info.hint .wy-alert-title,.rst-content .wy-alert-info.important .wy-alert-title,.rst-content .wy-alert-info.tip .wy-alert-title,.rst-content .wy-alert-info.warning .wy-alert-title,.rst-content .seealso .wy-alert-title,.rst-content .wy-alert-info.admonition-todo .wy-alert-title,.rst-content .wy-alert-info.admonition .wy-alert-title,.wy-alert.wy-alert-info .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-info .admonition-title,.rst-content .note .admonition-title,.rst-content .wy-alert-info.attention .admonition-title,.rst-content .wy-alert-info.caution .admonition-title,.rst-content .wy-alert-info.danger .admonition-title,.rst-content .wy-alert-info.error .admonition-title,.rst-content .wy-alert-info.hint .admonition-title,.rst-content .wy-alert-info.important .admonition-title,.rst-content .wy-alert-info.tip .admonition-title,.rst-content .wy-alert-info.warning .admonition-title,.rst-content .seealso .admonition-title,.rst-content .wy-alert-info.admonition-todo .admonition-title,.rst-content .wy-alert-info.admonition .admonition-title{background:#6ab0de}.wy-alert.wy-alert-success,.rst-content .wy-alert-success.note,.rst-content .wy-alert-success.attention,.rst-content .wy-alert-success.caution,.rst-content .wy-alert-success.danger,.rst-content .wy-alert-success.error,.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .wy-alert-success.warning,.rst-content .wy-alert-success.seealso,.rst-content .wy-alert-success.admonition-todo,.rst-content .wy-alert-success.admonition{background:#dbfaf4}.wy-alert.wy-alert-success .wy-alert-title,.rst-content .wy-alert-success.note .wy-alert-title,.rst-content .wy-alert-success.attention .wy-alert-title,.rst-content .wy-alert-success.caution .wy-alert-title,.rst-content .wy-alert-success.danger .wy-alert-title,.rst-content .wy-alert-success.error .wy-alert-title,.rst-content .hint .wy-alert-title,.rst-content .important .wy-alert-title,.rst-content .tip .wy-alert-title,.rst-content .wy-alert-success.warning .wy-alert-title,.rst-content .wy-alert-success.seealso .wy-alert-title,.rst-content .wy-alert-success.admonition-todo .wy-alert-title,.rst-content .wy-alert-success.admonition .wy-alert-title,.wy-alert.wy-alert-success .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-success .admonition-title,.rst-content .wy-alert-success.note .admonition-title,.rst-content .wy-alert-success.attention .admonition-title,.rst-content .wy-alert-success.caution .admonition-title,.rst-content .wy-alert-success.danger .admonition-title,.rst-content .wy-alert-success.error .admonition-title,.rst-content .hint .admonition-title,.rst-content .important .admonition-title,.rst-content .tip .admonition-title,.rst-content .wy-alert-success.warning .admonition-title,.rst-content .wy-alert-success.seealso .admonition-title,.rst-content .wy-alert-success.admonition-todo .admonition-title,.rst-content .wy-alert-success.admonition .admonition-title{background:#1abc9c}.wy-alert.wy-alert-neutral,.rst-content .wy-alert-neutral.note,.rst-content .wy-alert-neutral.attention,.rst-content .wy-alert-neutral.caution,.rst-content .wy-alert-neutral.danger,.rst-content .wy-alert-neutral.error,.rst-content .wy-alert-neutral.hint,.rst-content .wy-alert-neutral.important,.rst-content .wy-alert-neutral.tip,.rst-content .wy-alert-neutral.warning,.rst-content .wy-alert-neutral.seealso,.rst-content .wy-alert-neutral.admonition-todo,.rst-content .wy-alert-neutral.admonition{background:#f3f6f6}.wy-alert.wy-alert-neutral .wy-alert-title,.rst-content .wy-alert-neutral.note .wy-alert-title,.rst-content .wy-alert-neutral.attention .wy-alert-title,.rst-content .wy-alert-neutral.caution .wy-alert-title,.rst-content .wy-alert-neutral.danger .wy-alert-title,.rst-content .wy-alert-neutral.error .wy-alert-title,.rst-content .wy-alert-neutral.hint .wy-alert-title,.rst-content .wy-alert-neutral.important .wy-alert-title,.rst-content .wy-alert-neutral.tip .wy-alert-title,.rst-content .wy-alert-neutral.warning .wy-alert-title,.rst-content .wy-alert-neutral.seealso .wy-alert-title,.rst-content .wy-alert-neutral.admonition-todo .wy-alert-title,.rst-content .wy-alert-neutral.admonition .wy-alert-title,.wy-alert.wy-alert-neutral .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-neutral .admonition-title,.rst-content .wy-alert-neutral.note .admonition-title,.rst-content .wy-alert-neutral.attention .admonition-title,.rst-content .wy-alert-neutral.caution .admonition-title,.rst-content .wy-alert-neutral.danger .admonition-title,.rst-content .wy-alert-neutral.error .admonition-title,.rst-content .wy-alert-neutral.hint .admonition-title,.rst-content .wy-alert-neutral.important .admonition-title,.rst-content .wy-alert-neutral.tip .admonition-title,.rst-content .wy-alert-neutral.warning .admonition-title,.rst-content .wy-alert-neutral.seealso .admonition-title,.rst-content .wy-alert-neutral.admonition-todo .admonition-title,.rst-content .wy-alert-neutral.admonition .admonition-title{color:#404040;background:#e1e4e5}.wy-alert.wy-alert-neutral a,.rst-content .wy-alert-neutral.note a,.rst-content .wy-alert-neutral.attention a,.rst-content .wy-alert-neutral.caution a,.rst-content .wy-alert-neutral.danger a,.rst-content .wy-alert-neutral.error a,.rst-content .wy-alert-neutral.hint a,.rst-content .wy-alert-neutral.important a,.rst-content .wy-alert-neutral.tip a,.rst-content .wy-alert-neutral.warning a,.rst-content .wy-alert-neutral.seealso a,.rst-content .wy-alert-neutral.admonition-todo a,.rst-content .wy-alert-neutral.admonition a{color:#2980B9}.wy-alert p:last-child,.rst-content .note p:last-child,.rst-content .attention p:last-child,.rst-content .caution p:last-child,.rst-content .danger p:last-child,.rst-content .error p:last-child,.rst-content .hint p:last-child,.rst-content .important p:last-child,.rst-content .tip p:last-child,.rst-content .warning p:last-child,.rst-content .seealso p:last-child,.rst-content .admonition-todo p:last-child,.rst-content .admonition p:last-child{margin-bottom:0}.wy-tray-container{position:fixed;bottom:0px;left:0;z-index:600}.wy-tray-container li{display:block;width:300px;background:transparent;color:#fff;text-align:center;box-shadow:0 5px 5px 0 rgba(0,0,0,0.1);padding:0 24px;min-width:20%;opacity:0;height:0;line-height:56px;overflow:hidden;-webkit-transition:all .3s ease-in;-moz-transition:all .3s ease-in;transition:all .3s ease-in}.wy-tray-container li.wy-tray-item-success{background:#27AE60}.wy-tray-container li.wy-tray-item-info{background:#2980B9}.wy-tray-container li.wy-tray-item-warning{background:#E67E22}.wy-tray-container li.wy-tray-item-danger{background:#E74C3C}.wy-tray-container li.on{opacity:1;height:56px}@media screen and (max-width: 768px){.wy-tray-container{bottom:auto;top:0;width:100%}.wy-tray-container li{width:100%}}button{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle;cursor:pointer;line-height:normal;-webkit-appearance:button;*overflow:visible}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}button[disabled]{cursor:default}.btn{display:inline-block;border-radius:2px;line-height:normal;white-space:nowrap;text-align:center;cursor:pointer;font-size:100%;padding:6px 12px 8px 12px;color:#fff;border:1px solid rgba(0,0,0,0.1);background-color:#27AE60;text-decoration:none;font-weight:normal;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;box-shadow:0px 1px 2px -1px rgba(255,255,255,0.5) inset,0px -2px 0px 0px rgba(0,0,0,0.1) inset;outline-none:false;vertical-align:middle;*display:inline;zoom:1;-webkit-user-drag:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;-webkit-transition:all .1s linear;-moz-transition:all .1s linear;transition:all .1s linear}.btn-hover{background:#2e8ece;color:#fff}.btn:hover{background:#2cc36b;color:#fff}.btn:focus{background:#2cc36b;outline:0}.btn:active{box-shadow:0px -1px 0px 0px rgba(0,0,0,0.05) inset,0px 2px 0px 0px rgba(0,0,0,0.1) inset;padding:8px 12px 6px 12px}.btn:visited{color:#fff}.btn:disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn-disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn-disabled:hover,.btn-disabled:focus,.btn-disabled:active{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn::-moz-focus-inner{padding:0;border:0}.btn-small{font-size:80%}.btn-info{background-color:#2980B9 !important}.btn-info:hover{background-color:#2e8ece !important}.btn-neutral{background-color:#f3f6f6 !important;color:#404040 !important}.btn-neutral:hover{background-color:#e5ebeb !important;color:#404040}.btn-neutral:visited{color:#404040 !important}.btn-success{background-color:#27AE60 !important}.btn-success:hover{background-color:#295 !important}.btn-danger{background-color:#E74C3C !important}.btn-danger:hover{background-color:#ea6153 !important}.btn-warning{background-color:#E67E22 !important}.btn-warning:hover{background-color:#e98b39 !important}.btn-invert{background-color:#222}.btn-invert:hover{background-color:#2f2f2f !important}.btn-link{background-color:transparent !important;color:#2980B9;box-shadow:none;border-color:transparent !important}.btn-link:hover{background-color:transparent !important;color:#409ad5 !important;box-shadow:none}.btn-link:active{background-color:transparent !important;color:#409ad5 !important;box-shadow:none}.btn-link:visited{color:#9B59B6}.wy-btn-group .btn,.wy-control .btn{vertical-align:middle}.wy-btn-group{margin-bottom:24px;*zoom:1}.wy-btn-group:before,.wy-btn-group:after{display:table;content:""}.wy-btn-group:after{clear:both}.wy-dropdown{position:relative;display:inline-block}.wy-dropdown-active .wy-dropdown-menu{display:block}.wy-dropdown-menu{position:absolute;left:0;display:none;float:left;top:100%;min-width:100%;background:#fcfcfc;z-index:100;border:solid 1px #cfd7dd;box-shadow:0 2px 2px 0 rgba(0,0,0,0.1);padding:12px}.wy-dropdown-menu>dd>a{display:block;clear:both;color:#404040;white-space:nowrap;font-size:90%;padding:0 12px;cursor:pointer}.wy-dropdown-menu>dd>a:hover{background:#2980B9;color:#fff}.wy-dropdown-menu>dd.divider{border-top:solid 1px #cfd7dd;margin:6px 0}.wy-dropdown-menu>dd.search{padding-bottom:12px}.wy-dropdown-menu>dd.search input[type="search"]{width:100%}.wy-dropdown-menu>dd.call-to-action{background:#e3e3e3;text-transform:uppercase;font-weight:500;font-size:80%}.wy-dropdown-menu>dd.call-to-action:hover{background:#e3e3e3}.wy-dropdown-menu>dd.call-to-action .btn{color:#fff}.wy-dropdown.wy-dropdown-up .wy-dropdown-menu{bottom:100%;top:auto;left:auto;right:0}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu{background:#fcfcfc;margin-top:2px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a{padding:6px 12px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a:hover{background:#2980B9;color:#fff}.wy-dropdown.wy-dropdown-left .wy-dropdown-menu{right:0;left:auto;text-align:right}.wy-dropdown-arrow:before{content:" ";border-bottom:5px solid #f5f5f5;border-left:5px solid transparent;border-right:5px solid transparent;position:absolute;display:block;top:-4px;left:50%;margin-left:-3px}.wy-dropdown-arrow.wy-dropdown-arrow-left:before{left:11px}.wy-form-stacked select{display:block}.wy-form-aligned input,.wy-form-aligned textarea,.wy-form-aligned select,.wy-form-aligned .wy-help-inline,.wy-form-aligned label{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-form-aligned .wy-control-group>label{display:inline-block;vertical-align:middle;width:10em;margin:6px 12px 0 0;float:left}.wy-form-aligned .wy-control{float:left}.wy-form-aligned .wy-control label{display:block}.wy-form-aligned .wy-control select{margin-top:6px}fieldset{border:0;margin:0;padding:0}legend{display:block;width:100%;border:0;padding:0;white-space:normal;margin-bottom:24px;font-size:150%;*margin-left:-7px}label{display:block;margin:0 0 .3125em 0;color:#333;font-size:90%}input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}.wy-control-group{margin-bottom:24px;*zoom:1;max-width:68em;margin-left:auto;margin-right:auto;*zoom:1}.wy-control-group:before,.wy-control-group:after{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group:before,.wy-control-group:after{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group.wy-control-group-required>label:after{content:" *";color:#E74C3C}.wy-control-group .wy-form-full,.wy-control-group .wy-form-halves,.wy-control-group .wy-form-thirds{padding-bottom:12px}.wy-control-group .wy-form-full select,.wy-control-group .wy-form-halves select,.wy-control-group .wy-form-thirds select{width:100%}.wy-control-group .wy-form-full input[type="text"],.wy-control-group .wy-form-full input[type="password"],.wy-control-group .wy-form-full input[type="email"],.wy-control-group .wy-form-full input[type="url"],.wy-control-group .wy-form-full input[type="date"],.wy-control-group .wy-form-full input[type="month"],.wy-control-group .wy-form-full input[type="time"],.wy-control-group .wy-form-full input[type="datetime"],.wy-control-group .wy-form-full input[type="datetime-local"],.wy-control-group .wy-form-full input[type="week"],.wy-control-group .wy-form-full input[type="number"],.wy-control-group .wy-form-full input[type="search"],.wy-control-group .wy-form-full input[type="tel"],.wy-control-group .wy-form-full input[type="color"],.wy-control-group .wy-form-halves input[type="text"],.wy-control-group .wy-form-halves input[type="password"],.wy-control-group .wy-form-halves input[type="email"],.wy-control-group .wy-form-halves input[type="url"],.wy-control-group .wy-form-halves input[type="date"],.wy-control-group .wy-form-halves input[type="month"],.wy-control-group .wy-form-halves input[type="time"],.wy-control-group .wy-form-halves input[type="datetime"],.wy-control-group .wy-form-halves input[type="datetime-local"],.wy-control-group .wy-form-halves input[type="week"],.wy-control-group .wy-form-halves input[type="number"],.wy-control-group .wy-form-halves input[type="search"],.wy-control-group .wy-form-halves input[type="tel"],.wy-control-group .wy-form-halves input[type="color"],.wy-control-group .wy-form-thirds input[type="text"],.wy-control-group .wy-form-thirds input[type="password"],.wy-control-group .wy-form-thirds input[type="email"],.wy-control-group .wy-form-thirds input[type="url"],.wy-control-group .wy-form-thirds input[type="date"],.wy-control-group .wy-form-thirds input[type="month"],.wy-control-group .wy-form-thirds input[type="time"],.wy-control-group .wy-form-thirds input[type="datetime"],.wy-control-group .wy-form-thirds input[type="datetime-local"],.wy-control-group .wy-form-thirds input[type="week"],.wy-control-group .wy-form-thirds input[type="number"],.wy-control-group .wy-form-thirds input[type="search"],.wy-control-group .wy-form-thirds input[type="tel"],.wy-control-group .wy-form-thirds input[type="color"]{width:100%}.wy-control-group .wy-form-full{float:left;display:block;margin-right:2.3576515979%;width:100%;margin-right:0}.wy-control-group .wy-form-full:last-child{margin-right:0}.wy-control-group .wy-form-halves{float:left;display:block;margin-right:2.3576515979%;width:48.821174201%}.wy-control-group .wy-form-halves:last-child{margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(2n){margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(2n+1){clear:left}.wy-control-group .wy-form-thirds{float:left;display:block;margin-right:2.3576515979%;width:31.7615656014%}.wy-control-group .wy-form-thirds:last-child{margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n){margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n+1){clear:left}.wy-control-group.wy-control-group-no-input .wy-control{margin:6px 0 0 0;font-size:90%}.wy-control-no-input{display:inline-block;margin:6px 0 0 0;font-size:90%}.wy-control-group.fluid-input input[type="text"],.wy-control-group.fluid-input input[type="password"],.wy-control-group.fluid-input input[type="email"],.wy-control-group.fluid-input input[type="url"],.wy-control-group.fluid-input input[type="date"],.wy-control-group.fluid-input input[type="month"],.wy-control-group.fluid-input input[type="time"],.wy-control-group.fluid-input input[type="datetime"],.wy-control-group.fluid-input input[type="datetime-local"],.wy-control-group.fluid-input input[type="week"],.wy-control-group.fluid-input input[type="number"],.wy-control-group.fluid-input input[type="search"],.wy-control-group.fluid-input input[type="tel"],.wy-control-group.fluid-input input[type="color"]{width:100%}.wy-form-message-inline{display:inline-block;padding-left:.3em;color:#666;vertical-align:middle;font-size:90%}.wy-form-message{display:block;color:#999;font-size:70%;margin-top:.3125em;font-style:italic}.wy-form-message p{font-size:inherit;font-style:italic;margin-bottom:6px}.wy-form-message p:last-child{margin-bottom:0}input{line-height:normal}input[type="button"],input[type="reset"],input[type="submit"]{-webkit-appearance:button;cursor:pointer;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;*overflow:visible}input[type="text"],input[type="password"],input[type="email"],input[type="url"],input[type="date"],input[type="month"],input[type="time"],input[type="datetime"],input[type="datetime-local"],input[type="week"],input[type="number"],input[type="search"],input[type="tel"],input[type="color"]{-webkit-appearance:none;padding:6px;display:inline-block;border:1px solid #ccc;font-size:80%;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;box-shadow:inset 0 1px 3px #ddd;border-radius:0;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}input[type="datetime-local"]{padding:.34375em .625em}input[disabled]{cursor:default}input[type="checkbox"],input[type="radio"]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;padding:0;margin-right:.3125em;*height:13px;*width:13px}input[type="search"]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type="search"]::-webkit-search-cancel-button,input[type="search"]::-webkit-search-decoration{-webkit-appearance:none}input[type="text"]:focus,input[type="password"]:focus,input[type="email"]:focus,input[type="url"]:focus,input[type="date"]:focus,input[type="month"]:focus,input[type="time"]:focus,input[type="datetime"]:focus,input[type="datetime-local"]:focus,input[type="week"]:focus,input[type="number"]:focus,input[type="search"]:focus,input[type="tel"]:focus,input[type="color"]:focus{outline:0;outline:thin dotted \9;border-color:#333}input.no-focus:focus{border-color:#ccc !important}input[type="file"]:focus,input[type="radio"]:focus,input[type="checkbox"]:focus{outline:thin dotted #333;outline:1px auto #129FEA}input[type="text"][disabled],input[type="password"][disabled],input[type="email"][disabled],input[type="url"][disabled],input[type="date"][disabled],input[type="month"][disabled],input[type="time"][disabled],input[type="datetime"][disabled],input[type="datetime-local"][disabled],input[type="week"][disabled],input[type="number"][disabled],input[type="search"][disabled],input[type="tel"][disabled],input[type="color"][disabled]{cursor:not-allowed;background-color:#fafafa}input:focus:invalid,textarea:focus:invalid,select:focus:invalid{color:#E74C3C;border:1px solid #E74C3C}input:focus:invalid:focus,textarea:focus:invalid:focus,select:focus:invalid:focus{border-color:#E74C3C}input[type="file"]:focus:invalid:focus,input[type="radio"]:focus:invalid:focus,input[type="checkbox"]:focus:invalid:focus{outline-color:#E74C3C}input.wy-input-large{padding:12px;font-size:100%}textarea{overflow:auto;vertical-align:top;width:100%;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif}select,textarea{padding:.5em .625em;display:inline-block;border:1px solid #ccc;font-size:80%;box-shadow:inset 0 1px 3px #ddd;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}select{border:1px solid #ccc;background-color:#fff}select[multiple]{height:auto}select:focus,textarea:focus{outline:0}select[disabled],textarea[disabled],input[readonly],select[readonly],textarea[readonly]{cursor:not-allowed;background-color:#fafafa}input[type="radio"][disabled],input[type="checkbox"][disabled]{cursor:not-allowed}.wy-checkbox,.wy-radio{margin:6px 0;color:#404040;display:block}.wy-checkbox input,.wy-radio input{vertical-align:baseline}.wy-form-message-inline{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-input-prefix,.wy-input-suffix{white-space:nowrap;padding:6px}.wy-input-prefix .wy-input-context,.wy-input-suffix .wy-input-context{line-height:27px;padding:0 8px;display:inline-block;font-size:80%;background-color:#f3f6f6;border:solid 1px #ccc;color:#999}.wy-input-suffix .wy-input-context{border-left:0}.wy-input-prefix .wy-input-context{border-right:0}.wy-switch{position:relative;display:block;height:24px;margin-top:12px;cursor:pointer}.wy-switch:before{position:absolute;content:"";display:block;left:0;top:0;width:36px;height:12px;border-radius:4px;background:#ccc;-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.wy-switch:after{position:absolute;content:"";display:block;width:18px;height:18px;border-radius:4px;background:#999;left:-3px;top:-3px;-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.wy-switch span{position:absolute;left:48px;display:block;font-size:12px;color:#ccc;line-height:1}.wy-switch.active:before{background:#1e8449}.wy-switch.active:after{left:24px;background:#27AE60}.wy-switch.disabled{cursor:not-allowed;opacity:.8}.wy-control-group.wy-control-group-error .wy-form-message,.wy-control-group.wy-control-group-error>label{color:#E74C3C}.wy-control-group.wy-control-group-error input[type="text"],.wy-control-group.wy-control-group-error input[type="password"],.wy-control-group.wy-control-group-error input[type="email"],.wy-control-group.wy-control-group-error input[type="url"],.wy-control-group.wy-control-group-error input[type="date"],.wy-control-group.wy-control-group-error input[type="month"],.wy-control-group.wy-control-group-error input[type="time"],.wy-control-group.wy-control-group-error input[type="datetime"],.wy-control-group.wy-control-group-error input[type="datetime-local"],.wy-control-group.wy-control-group-error input[type="week"],.wy-control-group.wy-control-group-error input[type="number"],.wy-control-group.wy-control-group-error input[type="search"],.wy-control-group.wy-control-group-error input[type="tel"],.wy-control-group.wy-control-group-error input[type="color"]{border:solid 1px #E74C3C}.wy-control-group.wy-control-group-error textarea{border:solid 1px #E74C3C}.wy-inline-validate{white-space:nowrap}.wy-inline-validate .wy-input-context{padding:.5em .625em;display:inline-block;font-size:80%}.wy-inline-validate.wy-inline-validate-success .wy-input-context{color:#27AE60}.wy-inline-validate.wy-inline-validate-danger .wy-input-context{color:#E74C3C}.wy-inline-validate.wy-inline-validate-warning .wy-input-context{color:#E67E22}.wy-inline-validate.wy-inline-validate-info .wy-input-context{color:#2980B9}.rotate-90{-webkit-transform:rotate(90deg);-moz-transform:rotate(90deg);-ms-transform:rotate(90deg);-o-transform:rotate(90deg);transform:rotate(90deg)}.rotate-180{-webkit-transform:rotate(180deg);-moz-transform:rotate(180deg);-ms-transform:rotate(180deg);-o-transform:rotate(180deg);transform:rotate(180deg)}.rotate-270{-webkit-transform:rotate(270deg);-moz-transform:rotate(270deg);-ms-transform:rotate(270deg);-o-transform:rotate(270deg);transform:rotate(270deg)}.mirror{-webkit-transform:scaleX(-1);-moz-transform:scaleX(-1);-ms-transform:scaleX(-1);-o-transform:scaleX(-1);transform:scaleX(-1)}.mirror.rotate-90{-webkit-transform:scaleX(-1) rotate(90deg);-moz-transform:scaleX(-1) rotate(90deg);-ms-transform:scaleX(-1) rotate(90deg);-o-transform:scaleX(-1) rotate(90deg);transform:scaleX(-1) rotate(90deg)}.mirror.rotate-180{-webkit-transform:scaleX(-1) rotate(180deg);-moz-transform:scaleX(-1) rotate(180deg);-ms-transform:scaleX(-1) rotate(180deg);-o-transform:scaleX(-1) rotate(180deg);transform:scaleX(-1) rotate(180deg)}.mirror.rotate-270{-webkit-transform:scaleX(-1) rotate(270deg);-moz-transform:scaleX(-1) rotate(270deg);-ms-transform:scaleX(-1) rotate(270deg);-o-transform:scaleX(-1) rotate(270deg);transform:scaleX(-1) rotate(270deg)}@media only screen and (max-width: 480px){.wy-form button[type="submit"]{margin:.7em 0 0}.wy-form input[type="text"],.wy-form input[type="password"],.wy-form input[type="email"],.wy-form input[type="url"],.wy-form input[type="date"],.wy-form input[type="month"],.wy-form input[type="time"],.wy-form input[type="datetime"],.wy-form input[type="datetime-local"],.wy-form input[type="week"],.wy-form input[type="number"],.wy-form input[type="search"],.wy-form input[type="tel"],.wy-form input[type="color"]{margin-bottom:.3em;display:block}.wy-form label{margin-bottom:.3em;display:block}.wy-form input[type="password"],.wy-form input[type="email"],.wy-form input[type="url"],.wy-form input[type="date"],.wy-form input[type="month"],.wy-form input[type="time"],.wy-form input[type="datetime"],.wy-form input[type="datetime-local"],.wy-form input[type="week"],.wy-form input[type="number"],.wy-form input[type="search"],.wy-form input[type="tel"],.wy-form input[type="color"]{margin-bottom:0}.wy-form-aligned .wy-control-group label{margin-bottom:.3em;text-align:left;display:block;width:100%}.wy-form-aligned .wy-control{margin:1.5em 0 0 0}.wy-form .wy-help-inline,.wy-form-message-inline,.wy-form-message{display:block;font-size:80%;padding:6px 0}}@media screen and (max-width: 768px){.tablet-hide{display:none}}@media screen and (max-width: 480px){.mobile-hide{display:none}}.float-left{float:left}.float-right{float:right}.full-width{width:100%}.wy-table,.rst-content table.docutils,.rst-content table.field-list{border-collapse:collapse;border-spacing:0;empty-cells:show;margin-bottom:24px}.wy-table caption,.rst-content table.docutils caption,.rst-content table.field-list caption{color:#000;font:italic 85%/1 arial,sans-serif;padding:1em 0;text-align:center}.wy-table td,.rst-content table.docutils td,.rst-content table.field-list td,.wy-table th,.rst-content table.docutils th,.rst-content table.field-list th{font-size:90%;margin:0;overflow:visible;padding:8px 16px}.wy-table td:first-child,.rst-content table.docutils td:first-child,.rst-content table.field-list td:first-child,.wy-table th:first-child,.rst-content table.docutils th:first-child,.rst-content table.field-list th:first-child{border-left-width:0}.wy-table thead,.rst-content table.docutils thead,.rst-content table.field-list thead{color:#000;text-align:left;vertical-align:bottom;white-space:nowrap}.wy-table thead th,.rst-content table.docutils thead th,.rst-content table.field-list thead th{font-weight:bold;border-bottom:solid 2px #e1e4e5}.wy-table td,.rst-content table.docutils td,.rst-content table.field-list td{background-color:transparent;vertical-align:middle}.wy-table td p,.rst-content table.docutils td p,.rst-content table.field-list td p{line-height:18px}.wy-table td p:last-child,.rst-content table.docutils td p:last-child,.rst-content table.field-list td p:last-child{margin-bottom:0}.wy-table .wy-table-cell-min,.rst-content table.docutils .wy-table-cell-min,.rst-content table.field-list .wy-table-cell-min{width:1%;padding-right:0}.wy-table .wy-table-cell-min input[type=checkbox],.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox],.wy-table .wy-table-cell-min input[type=checkbox],.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox]{margin:0}.wy-table-secondary{color:gray;font-size:90%}.wy-table-tertiary{color:gray;font-size:80%}.wy-table-odd td,.wy-table-striped tr:nth-child(2n-1) td,.rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td{background-color:#f3f6f6}.wy-table-backed{background-color:#f3f6f6}.wy-table-bordered-all,.rst-content table.docutils{border:1px solid #e1e4e5}.wy-table-bordered-all td,.rst-content table.docutils td{border-bottom:1px solid #e1e4e5;border-left:1px solid #e1e4e5}.wy-table-bordered-all tbody>tr:last-child td,.rst-content table.docutils tbody>tr:last-child td{border-bottom-width:0}.wy-table-bordered{border:1px solid #e1e4e5}.wy-table-bordered-rows td{border-bottom:1px solid #e1e4e5}.wy-table-bordered-rows tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal td,.wy-table-horizontal th{border-width:0 0 1px 0;border-bottom:1px solid #e1e4e5}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-responsive{margin-bottom:24px;max-width:100%;overflow:auto}.wy-table-responsive table{margin-bottom:0 !important}.wy-table-responsive table td,.wy-table-responsive table th{white-space:nowrap}a{color:#2980B9;text-decoration:none;cursor:pointer}a:hover{color:#3091d1}a:visited{color:#9B59B6}html{height:100%;overflow-x:hidden}body{font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;font-weight:normal;color:#404040;min-height:100%;overflow-x:hidden;background:#edf0f2}.wy-text-left{text-align:left}.wy-text-center{text-align:center}.wy-text-right{text-align:right}.wy-text-large{font-size:120%}.wy-text-normal{font-size:100%}.wy-text-small,small{font-size:80%}.wy-text-strike{text-decoration:line-through}.wy-text-warning{color:#E67E22 !important}a.wy-text-warning:hover{color:#eb9950 !important}.wy-text-info{color:#2980B9 !important}a.wy-text-info:hover{color:#409ad5 !important}.wy-text-success{color:#27AE60 !important}a.wy-text-success:hover{color:#36d278 !important}.wy-text-danger{color:#E74C3C !important}a.wy-text-danger:hover{color:#ed7669 !important}.wy-text-neutral{color:#404040 !important}a.wy-text-neutral:hover{color:#595959 !important}h1,h2,.rst-content .toctree-wrapper p.caption,h3,h4,h5,h6,legend{margin-top:0;font-weight:700;font-family:"Roboto Slab","ff-tisa-web-pro","Georgia",Arial,sans-serif}p{line-height:24px;margin:0;font-size:16px;margin-bottom:24px}h1{font-size:175%}h2,.rst-content .toctree-wrapper p.caption{font-size:150%}h3{font-size:125%}h4{font-size:115%}h5{font-size:110%}h6{font-size:100%}hr{display:block;height:1px;border:0;border-top:1px solid #e1e4e5;margin:24px 0;padding:0}code,.rst-content tt,.rst-content code{white-space:nowrap;max-width:100%;background:#fff;border:solid 1px #e1e4e5;font-size:75%;padding:0 5px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace;color:#E74C3C;overflow-x:auto}code.code-large,.rst-content tt.code-large{font-size:90%}.wy-plain-list-disc,.rst-content .section ul,.rst-content .toctree-wrapper ul,article ul{list-style:disc;line-height:24px;margin-bottom:24px}.wy-plain-list-disc li,.rst-content .section ul li,.rst-content .toctree-wrapper ul li,article ul li{list-style:disc;margin-left:24px}.wy-plain-list-disc li p:last-child,.rst-content .section ul li p:last-child,.rst-content .toctree-wrapper ul li p:last-child,article ul li p:last-child{margin-bottom:0}.wy-plain-list-disc li ul,.rst-content .section ul li ul,.rst-content .toctree-wrapper ul li ul,article ul li ul{margin-bottom:0}.wy-plain-list-disc li li,.rst-content .section ul li li,.rst-content .toctree-wrapper ul li li,article ul li li{list-style:circle}.wy-plain-list-disc li li li,.rst-content .section ul li li li,.rst-content .toctree-wrapper ul li li li,article ul li li li{list-style:square}.wy-plain-list-disc li ol li,.rst-content .section ul li ol li,.rst-content .toctree-wrapper ul li ol li,article ul li ol li{list-style:decimal}.wy-plain-list-decimal,.rst-content .section ol,.rst-content ol.arabic,article ol{list-style:decimal;line-height:24px;margin-bottom:24px}.wy-plain-list-decimal li,.rst-content .section ol li,.rst-content ol.arabic li,article ol li{list-style:decimal;margin-left:24px}.wy-plain-list-decimal li p:last-child,.rst-content .section ol li p:last-child,.rst-content ol.arabic li p:last-child,article ol li p:last-child{margin-bottom:0}.wy-plain-list-decimal li ul,.rst-content .section ol li ul,.rst-content ol.arabic li ul,article ol li ul{margin-bottom:0}.wy-plain-list-decimal li ul li,.rst-content .section ol li ul li,.rst-content ol.arabic li ul li,article ol li ul li{list-style:disc}.wy-breadcrumbs{*zoom:1}.wy-breadcrumbs:before,.wy-breadcrumbs:after{display:table;content:""}.wy-breadcrumbs:after{clear:both}.wy-breadcrumbs li{display:inline-block}.wy-breadcrumbs li.wy-breadcrumbs-aside{float:right}.wy-breadcrumbs li a{display:inline-block;padding:5px}.wy-breadcrumbs li a:first-child{padding-left:0}.wy-breadcrumbs li code,.wy-breadcrumbs li .rst-content tt,.rst-content .wy-breadcrumbs li tt{padding:5px;border:none;background:none}.wy-breadcrumbs li code.literal,.wy-breadcrumbs li .rst-content tt.literal,.rst-content .wy-breadcrumbs li tt.literal{color:#404040}.wy-breadcrumbs-extra{margin-bottom:0;color:#b3b3b3;font-size:80%;display:inline-block}@media screen and (max-width: 480px){.wy-breadcrumbs-extra{display:none}.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}@media print{.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}html{font-size:16px}.wy-affix{position:fixed;top:1.618em}.wy-menu a:hover{text-decoration:none}.wy-menu-horiz{*zoom:1}.wy-menu-horiz:before,.wy-menu-horiz:after{display:table;content:""}.wy-menu-horiz:after{clear:both}.wy-menu-horiz ul,.wy-menu-horiz li{display:inline-block}.wy-menu-horiz li:hover{background:rgba(255,255,255,0.1)}.wy-menu-horiz li.divide-left{border-left:solid 1px #404040}.wy-menu-horiz li.divide-right{border-right:solid 1px #404040}.wy-menu-horiz a{height:32px;display:inline-block;line-height:32px;padding:0 16px}.wy-menu-vertical{width:300px}.wy-menu-vertical header,.wy-menu-vertical p.caption{color:#3a7ca8;height:32px;display:inline-block;line-height:32px;padding:0 1.618em;margin:12px 0 0 0;display:block;font-weight:bold;text-transform:uppercase;font-size:85%;white-space:nowrap}.wy-menu-vertical ul{margin-bottom:0}.wy-menu-vertical li.divide-top{border-top:solid 1px #404040}.wy-menu-vertical li.divide-bottom{border-bottom:solid 1px #404040}.wy-menu-vertical li.current{background:#e3e3e3}.wy-menu-vertical li.current a{color:gray;border-right:solid 1px #c9c9c9;padding:.4045em 2.427em}.wy-menu-vertical li.current a:hover{background:#d6d6d6}.wy-menu-vertical li code,.wy-menu-vertical li .rst-content tt,.rst-content .wy-menu-vertical li tt{border:none;background:inherit;color:inherit;padding-left:0;padding-right:0}.wy-menu-vertical li span.toctree-expand{display:block;float:left;margin-left:-1.2em;font-size:.8em;line-height:1.6em;color:#4d4d4d}.wy-menu-vertical li.on a,.wy-menu-vertical li.current>a{color:#404040;padding:.4045em 1.618em;font-weight:bold;position:relative;background:#fcfcfc;border:none;padding-left:1.618em -4px}.wy-menu-vertical li.on a:hover,.wy-menu-vertical li.current>a:hover{background:#fcfcfc}.wy-menu-vertical li.on a:hover span.toctree-expand,.wy-menu-vertical li.current>a:hover span.toctree-expand{color:gray}.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.current>a span.toctree-expand{display:block;font-size:.8em;line-height:1.6em;color:#333}.wy-menu-vertical li.toctree-l1.current>a{border-bottom:solid 1px #c9c9c9;border-top:solid 1px #c9c9c9}.wy-menu-vertical li.toctree-l2 a,.wy-menu-vertical li.toctree-l3 a,.wy-menu-vertical li.toctree-l4 a{color:#404040}.wy-menu-vertical li.toctree-l1.current li.toctree-l2>ul,.wy-menu-vertical li.toctree-l2.current li.toctree-l3>ul{display:none}.wy-menu-vertical li.toctree-l1.current li.toctree-l2.current>ul,.wy-menu-vertical li.toctree-l2.current li.toctree-l3.current>ul{display:block}.wy-menu-vertical li.toctree-l2.current>a{background:#c9c9c9;padding:.4045em 2.427em}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{display:block;background:#c9c9c9;padding:.4045em 4.045em}.wy-menu-vertical li.toctree-l2 a:hover span.toctree-expand{color:gray}.wy-menu-vertical li.toctree-l2 span.toctree-expand{color:#a3a3a3}.wy-menu-vertical li.toctree-l3{font-size:.9em}.wy-menu-vertical li.toctree-l3.current>a{background:#bdbdbd;padding:.4045em 4.045em}.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{display:block;background:#bdbdbd;padding:.4045em 5.663em}.wy-menu-vertical li.toctree-l3 a:hover span.toctree-expand{color:gray}.wy-menu-vertical li.toctree-l3 span.toctree-expand{color:#969696}.wy-menu-vertical li.toctree-l4{font-size:.9em}.wy-menu-vertical li.current ul{display:block}.wy-menu-vertical li ul{margin-bottom:0;display:none}.wy-menu-vertical li ul li a{margin-bottom:0;color:#d9d9d9;font-weight:normal}.wy-menu-vertical a{display:inline-block;line-height:18px;padding:.4045em 1.618em;display:block;position:relative;font-size:90%;color:#d9d9d9}.wy-menu-vertical a:hover{background-color:#4e4a4a;cursor:pointer}.wy-menu-vertical a:hover span.toctree-expand{color:#d9d9d9}.wy-menu-vertical a:active{background-color:#2980B9;cursor:pointer;color:#fff}.wy-menu-vertical a:active span.toctree-expand{color:#fff}.wy-side-nav-search{display:block;width:300px;padding:.809em;margin-bottom:.809em;z-index:200;background-color:#2980B9;text-align:center;padding:.809em;display:block;color:#fcfcfc;margin-bottom:.809em}.wy-side-nav-search input[type=text]{width:100%;border-radius:50px;padding:6px 12px;border-color:#2472a4}.wy-side-nav-search img{display:block;margin:auto auto .809em auto;height:45px;width:45px;background-color:#2980B9;padding:5px;border-radius:100%}.wy-side-nav-search>a,.wy-side-nav-search .wy-dropdown>a{color:#fcfcfc;font-size:100%;font-weight:bold;display:inline-block;padding:4px 6px;margin-bottom:.809em}.wy-side-nav-search>a:hover,.wy-side-nav-search .wy-dropdown>a:hover{background:rgba(255,255,255,0.1)}.wy-side-nav-search>a img.logo,.wy-side-nav-search .wy-dropdown>a img.logo{display:block;margin:0 auto;height:auto;width:auto;border-radius:0;max-width:100%;background:transparent}.wy-side-nav-search>a.icon img.logo,.wy-side-nav-search .wy-dropdown>a.icon img.logo{margin-top:.85em}.wy-side-nav-search>div.version{margin-top:-.4045em;margin-bottom:.809em;font-weight:normal;color:rgba(255,255,255,0.3)}.wy-nav .wy-menu-vertical header{color:#2980B9}.wy-nav .wy-menu-vertical a{color:#b3b3b3}.wy-nav .wy-menu-vertical a:hover{background-color:#2980B9;color:#fff}[data-menu-wrap]{-webkit-transition:all .2s ease-in;-moz-transition:all .2s ease-in;transition:all .2s ease-in;position:absolute;opacity:1;width:100%;opacity:0}[data-menu-wrap].move-center{left:0;right:auto;opacity:1}[data-menu-wrap].move-left{right:auto;left:-100%;opacity:0}[data-menu-wrap].move-right{right:-100%;left:auto;opacity:0}.wy-body-for-nav{background:#fcfcfc}.wy-grid-for-nav{position:absolute;width:100%;height:100%}.wy-nav-side{position:fixed;top:0;bottom:0;left:0;padding-bottom:2em;width:300px;overflow-x:hidden;overflow-y:hidden;min-height:100%;color:#9b9b9b;background:#343131;z-index:200}.wy-side-scroll{width:320px;position:relative;overflow-x:hidden;overflow-y:scroll;height:100%}.wy-nav-top{display:none;background:#2980B9;color:#fff;padding:.4045em .809em;position:relative;line-height:50px;text-align:center;font-size:100%;*zoom:1}.wy-nav-top:before,.wy-nav-top:after{display:table;content:""}.wy-nav-top:after{clear:both}.wy-nav-top a{color:#fff;font-weight:bold}.wy-nav-top img{margin-right:12px;height:45px;width:45px;background-color:#2980B9;padding:5px;border-radius:100%}.wy-nav-top i{font-size:30px;float:left;cursor:pointer;padding-top:inherit}.wy-nav-content-wrap{margin-left:300px;background:#fcfcfc;min-height:100%}.wy-nav-content{padding:1.618em 3.236em;height:100%;max-width:800px;margin:auto}.wy-body-mask{position:fixed;width:100%;height:100%;background:rgba(0,0,0,0.2);display:none;z-index:499}.wy-body-mask.on{display:block}footer{color:gray}footer p{margin-bottom:12px}footer span.commit code,footer span.commit .rst-content tt,.rst-content footer span.commit tt{padding:0px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace;font-size:1em;background:none;border:none;color:gray}.rst-footer-buttons{*zoom:1}.rst-footer-buttons:before,.rst-footer-buttons:after{width:100%}.rst-footer-buttons:before,.rst-footer-buttons:after{display:table;content:""}.rst-footer-buttons:after{clear:both}.rst-breadcrumbs-buttons{margin-top:12px;*zoom:1}.rst-breadcrumbs-buttons:before,.rst-breadcrumbs-buttons:after{display:table;content:""}.rst-breadcrumbs-buttons:after{clear:both}#search-results .search li{margin-bottom:24px;border-bottom:solid 1px #e1e4e5;padding-bottom:24px}#search-results .search li:first-child{border-top:solid 1px #e1e4e5;padding-top:24px}#search-results .search li a{font-size:120%;margin-bottom:12px;display:inline-block}#search-results .context{color:gray;font-size:90%}.genindextable li>ul{margin-left:24px}@media screen and (max-width: 768px){.wy-body-for-nav{background:#fcfcfc}.wy-nav-top{display:block}.wy-nav-side{left:-300px}.wy-nav-side.shift{width:85%;left:0}.wy-side-scroll{width:auto}.wy-side-nav-search{width:auto}.wy-menu.wy-menu-vertical{width:auto}.wy-nav-content-wrap{margin-left:0}.wy-nav-content-wrap .wy-nav-content{padding:1.618em}.wy-nav-content-wrap.shift{position:fixed;min-width:100%;left:85%;top:0;height:100%;overflow:hidden}}@media screen and (min-width: 1100px){.wy-nav-content-wrap{background:rgba(0,0,0,0.05)}.wy-nav-content{margin:0;background:#fcfcfc}}@media print{.rst-versions,footer,.wy-nav-side{display:none}.wy-nav-content-wrap{margin-left:0}}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;z-index:400}.rst-versions a{color:#2980B9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27AE60;*zoom:1}.rst-versions .rst-current-version:before,.rst-versions .rst-current-version:after{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-versions .rst-current-version .fa,.rst-versions .rst-current-version .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li .rst-versions .rst-current-version span.toctree-expand,.rst-versions .rst-current-version .rst-content .admonition-title,.rst-content .rst-versions .rst-current-version .admonition-title,.rst-versions .rst-current-version .rst-content h1 .headerlink,.rst-content h1 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h2 .headerlink,.rst-content h2 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h3 .headerlink,.rst-content h3 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h4 .headerlink,.rst-content h4 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h5 .headerlink,.rst-content h5 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h6 .headerlink,.rst-content h6 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content dl dt .headerlink,.rst-content dl dt .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content p.caption .headerlink,.rst-content p.caption .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content table>caption .headerlink,.rst-content table>caption .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content .code-block-caption .headerlink,.rst-content .code-block-caption .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content tt.download span:first-child,.rst-content tt.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .rst-content code.download span:first-child,.rst-content code.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .icon{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#E74C3C;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#F1C40F;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:gray;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:solid 1px #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .icon-book{float:none}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge .rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width: 768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}}.rst-content img{max-width:100%;height:auto}.rst-content div.figure{margin-bottom:24px}.rst-content div.figure p.caption{font-style:italic}.rst-content div.figure p:last-child.caption{margin-bottom:0px}.rst-content div.figure.align-center{text-align:center}.rst-content .section>img,.rst-content .section>a>img{margin-bottom:24px}.rst-content abbr[title]{text-decoration:none}.rst-content.style-external-links a.reference.external:after{font-family:FontAwesome;content:"ï‚Ž";color:#b3b3b3;vertical-align:super;font-size:60%;margin:0 .2em}.rst-content blockquote{margin-left:24px;line-height:24px;margin-bottom:24px}.rst-content pre.literal-block{white-space:pre;margin:0;padding:12px 12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace;display:block;overflow:auto}.rst-content pre.literal-block,.rst-content div[class^='highlight']{border:1px solid #e1e4e5;overflow-x:auto;margin:1px 0 24px 0}.rst-content pre.literal-block div[class^='highlight'],.rst-content div[class^='highlight'] div[class^='highlight']{padding:0px;border:none;margin:0}.rst-content div[class^='highlight'] td.code{width:100%}.rst-content .linenodiv pre{border-right:solid 1px #e6e9ea;margin:0;padding:12px 12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace;user-select:none;pointer-events:none}.rst-content div[class^='highlight'] pre{white-space:pre;margin:0;padding:12px 12px;display:block;overflow:auto}.rst-content div[class^='highlight'] pre .hll{display:block;margin:0 -12px;padding:0 12px}.rst-content pre.literal-block,.rst-content div[class^='highlight'] pre,.rst-content .linenodiv pre{font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace;font-size:12px;line-height:1.4}.rst-content .code-block-caption{font-style:italic;font-size:85%;line-height:1;padding:1em 0;text-align:center}@media print{.rst-content .codeblock,.rst-content div[class^='highlight'],.rst-content div[class^='highlight'] pre{white-space:pre-wrap}}.rst-content .note .last,.rst-content .attention .last,.rst-content .caution .last,.rst-content .danger .last,.rst-content .error .last,.rst-content .hint .last,.rst-content .important .last,.rst-content .tip .last,.rst-content .warning .last,.rst-content .seealso .last,.rst-content .admonition-todo .last,.rst-content .admonition .last{margin-bottom:0}.rst-content .admonition-title:before{margin-right:4px}.rst-content .admonition table{border-color:rgba(0,0,0,0.1)}.rst-content .admonition table td,.rst-content .admonition table th{background:transparent !important;border-color:rgba(0,0,0,0.1) !important}.rst-content .section ol.loweralpha,.rst-content .section ol.loweralpha li{list-style:lower-alpha}.rst-content .section ol.upperalpha,.rst-content .section ol.upperalpha li{list-style:upper-alpha}.rst-content .section ol p,.rst-content .section ul p{margin-bottom:12px}.rst-content .section ol p:last-child,.rst-content .section ul p:last-child{margin-bottom:24px}.rst-content .line-block{margin-left:0px;margin-bottom:24px;line-height:24px}.rst-content .line-block .line-block{margin-left:24px;margin-bottom:0px}.rst-content .topic-title{font-weight:bold;margin-bottom:12px}.rst-content .toc-backref{color:#404040}.rst-content .align-right{float:right;margin:0px 0px 24px 24px}.rst-content .align-left{float:left;margin:0px 24px 24px 0px}.rst-content .align-center{margin:auto}.rst-content .align-center:not(table){display:block}.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content .toctree-wrapper p.caption .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content dl dt .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink,.rst-content .code-block-caption .headerlink{visibility:hidden;font-size:14px}.rst-content h1 .headerlink:after,.rst-content h2 .headerlink:after,.rst-content .toctree-wrapper p.caption .headerlink:after,.rst-content h3 .headerlink:after,.rst-content h4 .headerlink:after,.rst-content h5 .headerlink:after,.rst-content h6 .headerlink:after,.rst-content dl dt .headerlink:after,.rst-content p.caption .headerlink:after,.rst-content table>caption .headerlink:after,.rst-content .code-block-caption .headerlink:after{content:"ïƒ";font-family:FontAwesome}.rst-content h1:hover .headerlink:after,.rst-content h2:hover .headerlink:after,.rst-content .toctree-wrapper p.caption:hover .headerlink:after,.rst-content h3:hover .headerlink:after,.rst-content h4:hover .headerlink:after,.rst-content h5:hover .headerlink:after,.rst-content h6:hover .headerlink:after,.rst-content dl dt:hover .headerlink:after,.rst-content p.caption:hover .headerlink:after,.rst-content table>caption:hover .headerlink:after,.rst-content .code-block-caption:hover .headerlink:after{visibility:visible}.rst-content table>caption .headerlink:after{font-size:12px}.rst-content .centered{text-align:center}.rst-content .sidebar{float:right;width:40%;display:block;margin:0 0 24px 24px;padding:24px;background:#f3f6f6;border:solid 1px #e1e4e5}.rst-content .sidebar p,.rst-content .sidebar ul,.rst-content .sidebar dl{font-size:90%}.rst-content .sidebar .last{margin-bottom:0}.rst-content .sidebar .sidebar-title{display:block;font-family:"Roboto Slab","ff-tisa-web-pro","Georgia",Arial,sans-serif;font-weight:bold;background:#e1e4e5;padding:6px 12px;margin:-24px;margin-bottom:24px;font-size:100%}.rst-content .highlighted{background:#F1C40F;display:inline-block;font-weight:bold;padding:0 6px}.rst-content .footnote-reference,.rst-content .citation-reference{vertical-align:baseline;position:relative;top:-0.4em;line-height:0;font-size:90%}.rst-content table.docutils.citation,.rst-content table.docutils.footnote{background:none;border:none;color:gray}.rst-content table.docutils.citation td,.rst-content table.docutils.citation tr,.rst-content table.docutils.footnote td,.rst-content table.docutils.footnote tr{border:none;background-color:transparent !important;white-space:normal}.rst-content table.docutils.citation td.label,.rst-content table.docutils.footnote td.label{padding-left:0;padding-right:0;vertical-align:top}.rst-content table.docutils.citation tt,.rst-content table.docutils.citation code,.rst-content table.docutils.footnote tt,.rst-content table.docutils.footnote code{color:#555}.rst-content .wy-table-responsive.citation,.rst-content .wy-table-responsive.footnote{margin-bottom:0}.rst-content .wy-table-responsive.citation+:not(.citation),.rst-content .wy-table-responsive.footnote+:not(.footnote){margin-top:24px}.rst-content .wy-table-responsive.citation:last-child,.rst-content .wy-table-responsive.footnote:last-child{margin-bottom:24px}.rst-content table.docutils th{border-color:#e1e4e5}.rst-content table.docutils td .last,.rst-content table.docutils td .last :last-child{margin-bottom:0}.rst-content table.field-list{border:none}.rst-content table.field-list td{border:none}.rst-content table.field-list td p{font-size:inherit;line-height:inherit}.rst-content table.field-list td>strong{display:inline-block}.rst-content table.field-list .field-name{padding-right:10px;text-align:left;white-space:nowrap}.rst-content table.field-list .field-body{text-align:left}.rst-content tt,.rst-content tt,.rst-content code{color:#000;font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace;padding:2px 5px}.rst-content tt big,.rst-content tt em,.rst-content tt big,.rst-content code big,.rst-content tt em,.rst-content code em{font-size:100% !important;line-height:normal}.rst-content tt.literal,.rst-content tt.literal,.rst-content code.literal{color:#E74C3C}.rst-content tt.xref,a .rst-content tt,.rst-content tt.xref,.rst-content code.xref,a .rst-content tt,a .rst-content code{font-weight:bold;color:#404040}.rst-content pre,.rst-content kbd,.rst-content samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace}.rst-content a tt,.rst-content a tt,.rst-content a code{color:#2980B9}.rst-content dl{margin-bottom:24px}.rst-content dl dt{font-weight:bold;margin-bottom:12px}.rst-content dl p,.rst-content dl table,.rst-content dl ul,.rst-content dl ol{margin-bottom:12px !important}.rst-content dl dd{margin:0 0 12px 24px;line-height:24px}.rst-content dl:not(.docutils){margin-bottom:24px}.rst-content dl:not(.docutils) dt{display:table;margin:6px 0;font-size:90%;line-height:normal;background:#e7f2fa;color:#2980B9;border-top:solid 3px #6ab0de;padding:6px;position:relative}.rst-content dl:not(.docutils) dt:before{color:#6ab0de}.rst-content dl:not(.docutils) dt .headerlink{color:#404040;font-size:100% !important}.rst-content dl:not(.docutils) dl dt{margin-bottom:6px;border:none;border-left:solid 3px #ccc;background:#f0f0f0;color:#555}.rst-content dl:not(.docutils) dl dt .headerlink{color:#404040;font-size:100% !important}.rst-content dl:not(.docutils) dt:first-child{margin-top:0}.rst-content dl:not(.docutils) tt,.rst-content dl:not(.docutils) tt,.rst-content dl:not(.docutils) code{font-weight:bold}.rst-content dl:not(.docutils) tt.descname,.rst-content dl:not(.docutils) tt.descclassname,.rst-content dl:not(.docutils) tt.descname,.rst-content dl:not(.docutils) code.descname,.rst-content dl:not(.docutils) tt.descclassname,.rst-content dl:not(.docutils) code.descclassname{background-color:transparent;border:none;padding:0;font-size:100% !important}.rst-content dl:not(.docutils) tt.descname,.rst-content dl:not(.docutils) tt.descname,.rst-content dl:not(.docutils) code.descname{font-weight:bold}.rst-content dl:not(.docutils) .optional{display:inline-block;padding:0 4px;color:#000;font-weight:bold}.rst-content dl:not(.docutils) .property{display:inline-block;padding-right:8px}.rst-content .viewcode-link,.rst-content .viewcode-back{display:inline-block;color:#27AE60;font-size:80%;padding-left:24px}.rst-content .viewcode-back{display:block;float:right}.rst-content p.rubric{margin-bottom:12px;font-weight:bold}.rst-content tt.download,.rst-content code.download{background:inherit;padding:inherit;font-weight:normal;font-family:inherit;font-size:inherit;color:inherit;border:inherit;white-space:inherit}.rst-content tt.download span:first-child,.rst-content code.download span:first-child{-webkit-font-smoothing:subpixel-antialiased}.rst-content tt.download span:first-child:before,.rst-content code.download span:first-child:before{margin-right:4px}.rst-content .guilabel{border:1px solid #7fbbe3;background:#e7f2fa;font-size:80%;font-weight:700;border-radius:4px;padding:2.4px 6px;margin:auto 2px}.rst-content .versionmodified{font-style:italic}@media screen and (max-width: 480px){.rst-content .sidebar{width:100%}}span[id*='MathJax-Span']{color:#404040}.math{text-align:center}@font-face{font-family:"Lato";src:url("../fonts/Lato/lato-regular.eot");src:url("../fonts/Lato/lato-regular.eot?#iefix") format("embedded-opentype"),url("../fonts/Lato/lato-regular.woff2") format("woff2"),url("../fonts/Lato/lato-regular.woff") format("woff"),url("../fonts/Lato/lato-regular.ttf") format("truetype");font-weight:400;font-style:normal}@font-face{font-family:"Lato";src:url("../fonts/Lato/lato-bold.eot");src:url("../fonts/Lato/lato-bold.eot?#iefix") format("embedded-opentype"),url("../fonts/Lato/lato-bold.woff2") format("woff2"),url("../fonts/Lato/lato-bold.woff") format("woff"),url("../fonts/Lato/lato-bold.ttf") format("truetype");font-weight:700;font-style:normal}@font-face{font-family:"Lato";src:url("../fonts/Lato/lato-bolditalic.eot");src:url("../fonts/Lato/lato-bolditalic.eot?#iefix") format("embedded-opentype"),url("../fonts/Lato/lato-bolditalic.woff2") format("woff2"),url("../fonts/Lato/lato-bolditalic.woff") format("woff"),url("../fonts/Lato/lato-bolditalic.ttf") format("truetype");font-weight:700;font-style:italic}@font-face{font-family:"Lato";src:url("../fonts/Lato/lato-italic.eot");src:url("../fonts/Lato/lato-italic.eot?#iefix") format("embedded-opentype"),url("../fonts/Lato/lato-italic.woff2") format("woff2"),url("../fonts/Lato/lato-italic.woff") format("woff"),url("../fonts/Lato/lato-italic.ttf") format("truetype");font-weight:400;font-style:italic}@font-face{font-family:"Roboto Slab";font-style:normal;font-weight:400;src:url("../fonts/RobotoSlab/roboto-slab.eot");src:url("../fonts/RobotoSlab/roboto-slab-v7-regular.eot?#iefix") format("embedded-opentype"),url("../fonts/RobotoSlab/roboto-slab-v7-regular.woff2") format("woff2"),url("../fonts/RobotoSlab/roboto-slab-v7-regular.woff") format("woff"),url("../fonts/RobotoSlab/roboto-slab-v7-regular.ttf") format("truetype")}@font-face{font-family:"Roboto Slab";font-style:normal;font-weight:700;src:url("../fonts/RobotoSlab/roboto-slab-v7-bold.eot");src:url("../fonts/RobotoSlab/roboto-slab-v7-bold.eot?#iefix") format("embedded-opentype"),url("../fonts/RobotoSlab/roboto-slab-v7-bold.woff2") format("woff2"),url("../fonts/RobotoSlab/roboto-slab-v7-bold.woff") format("woff"),url("../fonts/RobotoSlab/roboto-slab-v7-bold.ttf") format("truetype")}
diff --git a/docs/docsite/_themes/sphinx_rtd_theme/static/fonts/FontAwesome.otf b/docs/docsite/_themes/sphinx_rtd_theme/static/fonts/FontAwesome.otf
new file mode 100644
index 00000000..401ec0f3
--- /dev/null
+++ b/docs/docsite/_themes/sphinx_rtd_theme/static/fonts/FontAwesome.otf
Binary files differ
diff --git a/docs/docsite/_themes/sphinx_rtd_theme/static/fonts/fontawesome-webfont.eot b/docs/docsite/_themes/sphinx_rtd_theme/static/fonts/fontawesome-webfont.eot
new file mode 100644
index 00000000..e9f60ca9
--- /dev/null
+++ b/docs/docsite/_themes/sphinx_rtd_theme/static/fonts/fontawesome-webfont.eot
Binary files differ
diff --git a/docs/docsite/_themes/sphinx_rtd_theme/static/fonts/fontawesome-webfont.svg b/docs/docsite/_themes/sphinx_rtd_theme/static/fonts/fontawesome-webfont.svg
new file mode 100644
index 00000000..855c845e
--- /dev/null
+++ b/docs/docsite/_themes/sphinx_rtd_theme/static/fonts/fontawesome-webfont.svg
@@ -0,0 +1,2671 @@
+<?xml version="1.0" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" >
+<svg>
+<metadata>
+Created by FontForge 20120731 at Mon Oct 24 17:37:40 2016
+ By ,,,
+Copyright Dave Gandy 2016. All rights reserved.
+</metadata>
+<defs>
+<font id="FontAwesome" horiz-adv-x="1536" >
+ <font-face
+ font-family="FontAwesome"
+ font-weight="400"
+ font-stretch="normal"
+ units-per-em="1792"
+ panose-1="0 0 0 0 0 0 0 0 0 0"
+ ascent="1536"
+ descent="-256"
+ bbox="-1.02083 -256.962 2304.6 1537.02"
+ underline-thickness="0"
+ underline-position="0"
+ unicode-range="U+0020-F500"
+ />
+<missing-glyph horiz-adv-x="896"
+d="M224 112h448v1312h-448v-1312zM112 0v1536h672v-1536h-672z" />
+ <glyph glyph-name=".notdef" horiz-adv-x="896"
+d="M224 112h448v1312h-448v-1312zM112 0v1536h672v-1536h-672z" />
+ <glyph glyph-name=".null" horiz-adv-x="0"
+ />
+ <glyph glyph-name="nonmarkingreturn" horiz-adv-x="597"
+ />
+ <glyph glyph-name="space" unicode=" " horiz-adv-x="448"
+ />
+ <glyph glyph-name="dieresis" unicode="&#xa8;" horiz-adv-x="1792"
+ />
+ <glyph glyph-name="copyright" unicode="&#xa9;" horiz-adv-x="1792"
+ />
+ <glyph glyph-name="registered" unicode="&#xae;" horiz-adv-x="1792"
+ />
+ <glyph glyph-name="acute" unicode="&#xb4;" horiz-adv-x="1792"
+ />
+ <glyph glyph-name="AE" unicode="&#xc6;" horiz-adv-x="1792"
+ />
+ <glyph glyph-name="Oslash" unicode="&#xd8;" horiz-adv-x="1792"
+ />
+ <glyph glyph-name="trademark" unicode="&#x2122;" horiz-adv-x="1792"
+ />
+ <glyph glyph-name="infinity" unicode="&#x221e;" horiz-adv-x="1792"
+ />
+ <glyph glyph-name="notequal" unicode="&#x2260;" horiz-adv-x="1792"
+ />
+ <glyph glyph-name="glass" unicode="&#xf000;" horiz-adv-x="1792"
+d="M1699 1350q0 -35 -43 -78l-632 -632v-768h320q26 0 45 -19t19 -45t-19 -45t-45 -19h-896q-26 0 -45 19t-19 45t19 45t45 19h320v768l-632 632q-43 43 -43 78q0 23 18 36.5t38 17.5t43 4h1408q23 0 43 -4t38 -17.5t18 -36.5z" />
+ <glyph glyph-name="music" unicode="&#xf001;"
+d="M1536 1312v-1120q0 -50 -34 -89t-86 -60.5t-103.5 -32t-96.5 -10.5t-96.5 10.5t-103.5 32t-86 60.5t-34 89t34 89t86 60.5t103.5 32t96.5 10.5q105 0 192 -39v537l-768 -237v-709q0 -50 -34 -89t-86 -60.5t-103.5 -32t-96.5 -10.5t-96.5 10.5t-103.5 32t-86 60.5t-34 89
+t34 89t86 60.5t103.5 32t96.5 10.5q105 0 192 -39v967q0 31 19 56.5t49 35.5l832 256q12 4 28 4q40 0 68 -28t28 -68z" />
+ <glyph glyph-name="search" unicode="&#xf002;" horiz-adv-x="1664"
+d="M1152 704q0 185 -131.5 316.5t-316.5 131.5t-316.5 -131.5t-131.5 -316.5t131.5 -316.5t316.5 -131.5t316.5 131.5t131.5 316.5zM1664 -128q0 -52 -38 -90t-90 -38q-54 0 -90 38l-343 342q-179 -124 -399 -124q-143 0 -273.5 55.5t-225 150t-150 225t-55.5 273.5
+t55.5 273.5t150 225t225 150t273.5 55.5t273.5 -55.5t225 -150t150 -225t55.5 -273.5q0 -220 -124 -399l343 -343q37 -37 37 -90z" />
+ <glyph glyph-name="envelope" unicode="&#xf003;" horiz-adv-x="1792"
+d="M1664 32v768q-32 -36 -69 -66q-268 -206 -426 -338q-51 -43 -83 -67t-86.5 -48.5t-102.5 -24.5h-1h-1q-48 0 -102.5 24.5t-86.5 48.5t-83 67q-158 132 -426 338q-37 30 -69 66v-768q0 -13 9.5 -22.5t22.5 -9.5h1472q13 0 22.5 9.5t9.5 22.5zM1664 1083v11v13.5t-0.5 13
+t-3 12.5t-5.5 9t-9 7.5t-14 2.5h-1472q-13 0 -22.5 -9.5t-9.5 -22.5q0 -168 147 -284q193 -152 401 -317q6 -5 35 -29.5t46 -37.5t44.5 -31.5t50.5 -27.5t43 -9h1h1q20 0 43 9t50.5 27.5t44.5 31.5t46 37.5t35 29.5q208 165 401 317q54 43 100.5 115.5t46.5 131.5z
+M1792 1120v-1088q0 -66 -47 -113t-113 -47h-1472q-66 0 -113 47t-47 113v1088q0 66 47 113t113 47h1472q66 0 113 -47t47 -113z" />
+ <glyph glyph-name="heart" unicode="&#xf004;" horiz-adv-x="1792"
+d="M896 -128q-26 0 -44 18l-624 602q-10 8 -27.5 26t-55.5 65.5t-68 97.5t-53.5 121t-23.5 138q0 220 127 344t351 124q62 0 126.5 -21.5t120 -58t95.5 -68.5t76 -68q36 36 76 68t95.5 68.5t120 58t126.5 21.5q224 0 351 -124t127 -344q0 -221 -229 -450l-623 -600
+q-18 -18 -44 -18z" />
+ <glyph glyph-name="star" unicode="&#xf005;" horiz-adv-x="1664"
+d="M1664 889q0 -22 -26 -48l-363 -354l86 -500q1 -7 1 -20q0 -21 -10.5 -35.5t-30.5 -14.5q-19 0 -40 12l-449 236l-449 -236q-22 -12 -40 -12q-21 0 -31.5 14.5t-10.5 35.5q0 6 2 20l86 500l-364 354q-25 27 -25 48q0 37 56 46l502 73l225 455q19 41 49 41t49 -41l225 -455
+l502 -73q56 -9 56 -46z" />
+ <glyph glyph-name="star_empty" unicode="&#xf006;" horiz-adv-x="1664"
+d="M1137 532l306 297l-422 62l-189 382l-189 -382l-422 -62l306 -297l-73 -421l378 199l377 -199zM1664 889q0 -22 -26 -48l-363 -354l86 -500q1 -7 1 -20q0 -50 -41 -50q-19 0 -40 12l-449 236l-449 -236q-22 -12 -40 -12q-21 0 -31.5 14.5t-10.5 35.5q0 6 2 20l86 500
+l-364 354q-25 27 -25 48q0 37 56 46l502 73l225 455q19 41 49 41t49 -41l225 -455l502 -73q56 -9 56 -46z" />
+ <glyph glyph-name="user" unicode="&#xf007;" horiz-adv-x="1280"
+d="M1280 137q0 -109 -62.5 -187t-150.5 -78h-854q-88 0 -150.5 78t-62.5 187q0 85 8.5 160.5t31.5 152t58.5 131t94 89t134.5 34.5q131 -128 313 -128t313 128q76 0 134.5 -34.5t94 -89t58.5 -131t31.5 -152t8.5 -160.5zM1024 1024q0 -159 -112.5 -271.5t-271.5 -112.5
+t-271.5 112.5t-112.5 271.5t112.5 271.5t271.5 112.5t271.5 -112.5t112.5 -271.5z" />
+ <glyph glyph-name="film" unicode="&#xf008;" horiz-adv-x="1920"
+d="M384 -64v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-128q0 -26 19 -45t45 -19h128q26 0 45 19t19 45zM384 320v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-128q0 -26 19 -45t45 -19h128q26 0 45 19t19 45zM384 704v128q0 26 -19 45t-45 19h-128
+q-26 0 -45 -19t-19 -45v-128q0 -26 19 -45t45 -19h128q26 0 45 19t19 45zM1408 -64v512q0 26 -19 45t-45 19h-768q-26 0 -45 -19t-19 -45v-512q0 -26 19 -45t45 -19h768q26 0 45 19t19 45zM384 1088v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-128q0 -26 19 -45
+t45 -19h128q26 0 45 19t19 45zM1792 -64v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-128q0 -26 19 -45t45 -19h128q26 0 45 19t19 45zM1408 704v512q0 26 -19 45t-45 19h-768q-26 0 -45 -19t-19 -45v-512q0 -26 19 -45t45 -19h768q26 0 45 19t19 45zM1792 320v128
+q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-128q0 -26 19 -45t45 -19h128q26 0 45 19t19 45zM1792 704v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-128q0 -26 19 -45t45 -19h128q26 0 45 19t19 45zM1792 1088v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19
+t-19 -45v-128q0 -26 19 -45t45 -19h128q26 0 45 19t19 45zM1920 1248v-1344q0 -66 -47 -113t-113 -47h-1600q-66 0 -113 47t-47 113v1344q0 66 47 113t113 47h1600q66 0 113 -47t47 -113z" />
+ <glyph glyph-name="th_large" unicode="&#xf009;" horiz-adv-x="1664"
+d="M768 512v-384q0 -52 -38 -90t-90 -38h-512q-52 0 -90 38t-38 90v384q0 52 38 90t90 38h512q52 0 90 -38t38 -90zM768 1280v-384q0 -52 -38 -90t-90 -38h-512q-52 0 -90 38t-38 90v384q0 52 38 90t90 38h512q52 0 90 -38t38 -90zM1664 512v-384q0 -52 -38 -90t-90 -38
+h-512q-52 0 -90 38t-38 90v384q0 52 38 90t90 38h512q52 0 90 -38t38 -90zM1664 1280v-384q0 -52 -38 -90t-90 -38h-512q-52 0 -90 38t-38 90v384q0 52 38 90t90 38h512q52 0 90 -38t38 -90z" />
+ <glyph glyph-name="th" unicode="&#xf00a;" horiz-adv-x="1792"
+d="M512 288v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68zM512 800v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68zM1152 288v-192q0 -40 -28 -68t-68 -28h-320
+q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68zM512 1312v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68zM1152 800v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68v192q0 40 28 68t68 28
+h320q40 0 68 -28t28 -68zM1792 288v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68zM1152 1312v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68zM1792 800v-192
+q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68zM1792 1312v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68z" />
+ <glyph glyph-name="th_list" unicode="&#xf00b;" horiz-adv-x="1792"
+d="M512 288v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68zM512 800v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68zM1792 288v-192q0 -40 -28 -68t-68 -28h-960
+q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h960q40 0 68 -28t28 -68zM512 1312v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68zM1792 800v-192q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68v192q0 40 28 68t68 28
+h960q40 0 68 -28t28 -68zM1792 1312v-192q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h960q40 0 68 -28t28 -68z" />
+ <glyph glyph-name="ok" unicode="&#xf00c;" horiz-adv-x="1792"
+d="M1671 970q0 -40 -28 -68l-724 -724l-136 -136q-28 -28 -68 -28t-68 28l-136 136l-362 362q-28 28 -28 68t28 68l136 136q28 28 68 28t68 -28l294 -295l656 657q28 28 68 28t68 -28l136 -136q28 -28 28 -68z" />
+ <glyph glyph-name="remove" unicode="&#xf00d;" horiz-adv-x="1408"
+d="M1298 214q0 -40 -28 -68l-136 -136q-28 -28 -68 -28t-68 28l-294 294l-294 -294q-28 -28 -68 -28t-68 28l-136 136q-28 28 -28 68t28 68l294 294l-294 294q-28 28 -28 68t28 68l136 136q28 28 68 28t68 -28l294 -294l294 294q28 28 68 28t68 -28l136 -136q28 -28 28 -68
+t-28 -68l-294 -294l294 -294q28 -28 28 -68z" />
+ <glyph glyph-name="zoom_in" unicode="&#xf00e;" horiz-adv-x="1664"
+d="M1024 736v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-224v-224q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v224h-224q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h224v224q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-224h224
+q13 0 22.5 -9.5t9.5 -22.5zM1152 704q0 185 -131.5 316.5t-316.5 131.5t-316.5 -131.5t-131.5 -316.5t131.5 -316.5t316.5 -131.5t316.5 131.5t131.5 316.5zM1664 -128q0 -53 -37.5 -90.5t-90.5 -37.5q-54 0 -90 38l-343 342q-179 -124 -399 -124q-143 0 -273.5 55.5
+t-225 150t-150 225t-55.5 273.5t55.5 273.5t150 225t225 150t273.5 55.5t273.5 -55.5t225 -150t150 -225t55.5 -273.5q0 -220 -124 -399l343 -343q37 -37 37 -90z" />
+ <glyph glyph-name="zoom_out" unicode="&#xf010;" horiz-adv-x="1664"
+d="M1024 736v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-576q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h576q13 0 22.5 -9.5t9.5 -22.5zM1152 704q0 185 -131.5 316.5t-316.5 131.5t-316.5 -131.5t-131.5 -316.5t131.5 -316.5t316.5 -131.5t316.5 131.5t131.5 316.5z
+M1664 -128q0 -53 -37.5 -90.5t-90.5 -37.5q-54 0 -90 38l-343 342q-179 -124 -399 -124q-143 0 -273.5 55.5t-225 150t-150 225t-55.5 273.5t55.5 273.5t150 225t225 150t273.5 55.5t273.5 -55.5t225 -150t150 -225t55.5 -273.5q0 -220 -124 -399l343 -343q37 -37 37 -90z
+" />
+ <glyph glyph-name="off" unicode="&#xf011;"
+d="M1536 640q0 -156 -61 -298t-164 -245t-245 -164t-298 -61t-298 61t-245 164t-164 245t-61 298q0 182 80.5 343t226.5 270q43 32 95.5 25t83.5 -50q32 -42 24.5 -94.5t-49.5 -84.5q-98 -74 -151.5 -181t-53.5 -228q0 -104 40.5 -198.5t109.5 -163.5t163.5 -109.5
+t198.5 -40.5t198.5 40.5t163.5 109.5t109.5 163.5t40.5 198.5q0 121 -53.5 228t-151.5 181q-42 32 -49.5 84.5t24.5 94.5q31 43 84 50t95 -25q146 -109 226.5 -270t80.5 -343zM896 1408v-640q0 -52 -38 -90t-90 -38t-90 38t-38 90v640q0 52 38 90t90 38t90 -38t38 -90z" />
+ <glyph glyph-name="signal" unicode="&#xf012;" horiz-adv-x="1792"
+d="M256 96v-192q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h192q14 0 23 -9t9 -23zM640 224v-320q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v320q0 14 9 23t23 9h192q14 0 23 -9t9 -23zM1024 480v-576q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23
+v576q0 14 9 23t23 9h192q14 0 23 -9t9 -23zM1408 864v-960q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v960q0 14 9 23t23 9h192q14 0 23 -9t9 -23zM1792 1376v-1472q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v1472q0 14 9 23t23 9h192q14 0 23 -9t9 -23z" />
+ <glyph glyph-name="cog" unicode="&#xf013;"
+d="M1024 640q0 106 -75 181t-181 75t-181 -75t-75 -181t75 -181t181 -75t181 75t75 181zM1536 749v-222q0 -12 -8 -23t-20 -13l-185 -28q-19 -54 -39 -91q35 -50 107 -138q10 -12 10 -25t-9 -23q-27 -37 -99 -108t-94 -71q-12 0 -26 9l-138 108q-44 -23 -91 -38
+q-16 -136 -29 -186q-7 -28 -36 -28h-222q-14 0 -24.5 8.5t-11.5 21.5l-28 184q-49 16 -90 37l-141 -107q-10 -9 -25 -9q-14 0 -25 11q-126 114 -165 168q-7 10 -7 23q0 12 8 23q15 21 51 66.5t54 70.5q-27 50 -41 99l-183 27q-13 2 -21 12.5t-8 23.5v222q0 12 8 23t19 13
+l186 28q14 46 39 92q-40 57 -107 138q-10 12 -10 24q0 10 9 23q26 36 98.5 107.5t94.5 71.5q13 0 26 -10l138 -107q44 23 91 38q16 136 29 186q7 28 36 28h222q14 0 24.5 -8.5t11.5 -21.5l28 -184q49 -16 90 -37l142 107q9 9 24 9q13 0 25 -10q129 -119 165 -170q7 -8 7 -22
+q0 -12 -8 -23q-15 -21 -51 -66.5t-54 -70.5q26 -50 41 -98l183 -28q13 -2 21 -12.5t8 -23.5z" />
+ <glyph glyph-name="trash" unicode="&#xf014;" horiz-adv-x="1408"
+d="M512 800v-576q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v576q0 14 9 23t23 9h64q14 0 23 -9t9 -23zM768 800v-576q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v576q0 14 9 23t23 9h64q14 0 23 -9t9 -23zM1024 800v-576q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v576
+q0 14 9 23t23 9h64q14 0 23 -9t9 -23zM1152 76v948h-896v-948q0 -22 7 -40.5t14.5 -27t10.5 -8.5h832q3 0 10.5 8.5t14.5 27t7 40.5zM480 1152h448l-48 117q-7 9 -17 11h-317q-10 -2 -17 -11zM1408 1120v-64q0 -14 -9 -23t-23 -9h-96v-948q0 -83 -47 -143.5t-113 -60.5h-832
+q-66 0 -113 58.5t-47 141.5v952h-96q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h309l70 167q15 37 54 63t79 26h320q40 0 79 -26t54 -63l70 -167h309q14 0 23 -9t9 -23z" />
+ <glyph glyph-name="home" unicode="&#xf015;" horiz-adv-x="1664"
+d="M1408 544v-480q0 -26 -19 -45t-45 -19h-384v384h-256v-384h-384q-26 0 -45 19t-19 45v480q0 1 0.5 3t0.5 3l575 474l575 -474q1 -2 1 -6zM1631 613l-62 -74q-8 -9 -21 -11h-3q-13 0 -21 7l-692 577l-692 -577q-12 -8 -24 -7q-13 2 -21 11l-62 74q-8 10 -7 23.5t11 21.5
+l719 599q32 26 76 26t76 -26l244 -204v195q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-408l219 -182q10 -8 11 -21.5t-7 -23.5z" />
+ <glyph glyph-name="file_alt" unicode="&#xf016;"
+d="M1468 1156q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48zM1024 1400v-376h376q-10 29 -22 41l-313 313q-12 12 -41 22zM1408 -128v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536h1280z
+" />
+ <glyph glyph-name="time" unicode="&#xf017;"
+d="M896 992v-448q0 -14 -9 -23t-23 -9h-320q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h224v352q0 14 9 23t23 9h64q14 0 23 -9t9 -23zM1312 640q0 148 -73 273t-198 198t-273 73t-273 -73t-198 -198t-73 -273t73 -273t198 -198t273 -73t273 73t198 198t73 273zM1536 640
+q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="road" unicode="&#xf018;" horiz-adv-x="1920"
+d="M1111 540v4l-24 320q-1 13 -11 22.5t-23 9.5h-186q-13 0 -23 -9.5t-11 -22.5l-24 -320v-4q-1 -12 8 -20t21 -8h244q12 0 21 8t8 20zM1870 73q0 -73 -46 -73h-704q13 0 22 9.5t8 22.5l-20 256q-1 13 -11 22.5t-23 9.5h-272q-13 0 -23 -9.5t-11 -22.5l-20 -256
+q-1 -13 8 -22.5t22 -9.5h-704q-46 0 -46 73q0 54 26 116l417 1044q8 19 26 33t38 14h339q-13 0 -23 -9.5t-11 -22.5l-15 -192q-1 -14 8 -23t22 -9h166q13 0 22 9t8 23l-15 192q-1 13 -11 22.5t-23 9.5h339q20 0 38 -14t26 -33l417 -1044q26 -62 26 -116z" />
+ <glyph glyph-name="download_alt" unicode="&#xf019;" horiz-adv-x="1664"
+d="M1280 192q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45t45 -19t45 19t19 45zM1536 192q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45t45 -19t45 19t19 45zM1664 416v-320q0 -40 -28 -68t-68 -28h-1472q-40 0 -68 28t-28 68v320q0 40 28 68t68 28h465l135 -136
+q58 -56 136 -56t136 56l136 136h464q40 0 68 -28t28 -68zM1339 985q17 -41 -14 -70l-448 -448q-18 -19 -45 -19t-45 19l-448 448q-31 29 -14 70q17 39 59 39h256v448q0 26 19 45t45 19h256q26 0 45 -19t19 -45v-448h256q42 0 59 -39z" />
+ <glyph glyph-name="download" unicode="&#xf01a;"
+d="M1120 608q0 -12 -10 -24l-319 -319q-11 -9 -23 -9t-23 9l-320 320q-15 16 -7 35q8 20 30 20h192v352q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-352h192q14 0 23 -9t9 -23zM768 1184q-148 0 -273 -73t-198 -198t-73 -273t73 -273t198 -198t273 -73t273 73t198 198t73 273
+t-73 273t-198 198t-273 73zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="upload" unicode="&#xf01b;"
+d="M1118 660q-8 -20 -30 -20h-192v-352q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v352h-192q-14 0 -23 9t-9 23q0 12 10 24l319 319q11 9 23 9t23 -9l320 -320q15 -16 7 -35zM768 1184q-148 0 -273 -73t-198 -198t-73 -273t73 -273t198 -198t273 -73t273 73t198 198
+t73 273t-73 273t-198 198t-273 73zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="inbox" unicode="&#xf01c;"
+d="M1023 576h316q-1 3 -2.5 8.5t-2.5 7.5l-212 496h-708l-212 -496q-1 -3 -2.5 -8.5t-2.5 -7.5h316l95 -192h320zM1536 546v-482q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45v482q0 62 25 123l238 552q10 25 36.5 42t52.5 17h832q26 0 52.5 -17t36.5 -42l238 -552
+q25 -61 25 -123z" />
+ <glyph glyph-name="play_circle" unicode="&#xf01d;"
+d="M1184 640q0 -37 -32 -55l-544 -320q-15 -9 -32 -9q-16 0 -32 8q-32 19 -32 56v640q0 37 32 56q33 18 64 -1l544 -320q32 -18 32 -55zM1312 640q0 148 -73 273t-198 198t-273 73t-273 -73t-198 -198t-73 -273t73 -273t198 -198t273 -73t273 73t198 198t73 273zM1536 640
+q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="repeat" unicode="&#xf01e;"
+d="M1536 1280v-448q0 -26 -19 -45t-45 -19h-448q-42 0 -59 40q-17 39 14 69l138 138q-148 137 -349 137q-104 0 -198.5 -40.5t-163.5 -109.5t-109.5 -163.5t-40.5 -198.5t40.5 -198.5t109.5 -163.5t163.5 -109.5t198.5 -40.5q119 0 225 52t179 147q7 10 23 12q15 0 25 -9
+l137 -138q9 -8 9.5 -20.5t-7.5 -22.5q-109 -132 -264 -204.5t-327 -72.5q-156 0 -298 61t-245 164t-164 245t-61 298t61 298t164 245t245 164t298 61q147 0 284.5 -55.5t244.5 -156.5l130 129q29 31 70 14q39 -17 39 -59z" />
+ <glyph glyph-name="refresh" unicode="&#xf021;"
+d="M1511 480q0 -5 -1 -7q-64 -268 -268 -434.5t-478 -166.5q-146 0 -282.5 55t-243.5 157l-129 -129q-19 -19 -45 -19t-45 19t-19 45v448q0 26 19 45t45 19h448q26 0 45 -19t19 -45t-19 -45l-137 -137q71 -66 161 -102t187 -36q134 0 250 65t186 179q11 17 53 117
+q8 23 30 23h192q13 0 22.5 -9.5t9.5 -22.5zM1536 1280v-448q0 -26 -19 -45t-45 -19h-448q-26 0 -45 19t-19 45t19 45l138 138q-148 137 -349 137q-134 0 -250 -65t-186 -179q-11 -17 -53 -117q-8 -23 -30 -23h-199q-13 0 -22.5 9.5t-9.5 22.5v7q65 268 270 434.5t480 166.5
+q146 0 284 -55.5t245 -156.5l130 129q19 19 45 19t45 -19t19 -45z" />
+ <glyph glyph-name="list_alt" unicode="&#xf022;" horiz-adv-x="1792"
+d="M384 352v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM384 608v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5z
+M384 864v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM1536 352v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-960q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h960q13 0 22.5 -9.5t9.5 -22.5z
+M1536 608v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-960q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h960q13 0 22.5 -9.5t9.5 -22.5zM1536 864v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-960q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h960q13 0 22.5 -9.5
+t9.5 -22.5zM1664 160v832q0 13 -9.5 22.5t-22.5 9.5h-1472q-13 0 -22.5 -9.5t-9.5 -22.5v-832q0 -13 9.5 -22.5t22.5 -9.5h1472q13 0 22.5 9.5t9.5 22.5zM1792 1248v-1088q0 -66 -47 -113t-113 -47h-1472q-66 0 -113 47t-47 113v1088q0 66 47 113t113 47h1472q66 0 113 -47
+t47 -113z" />
+ <glyph glyph-name="lock" unicode="&#xf023;" horiz-adv-x="1152"
+d="M320 768h512v192q0 106 -75 181t-181 75t-181 -75t-75 -181v-192zM1152 672v-576q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68v576q0 40 28 68t68 28h32v192q0 184 132 316t316 132t316 -132t132 -316v-192h32q40 0 68 -28t28 -68z" />
+ <glyph glyph-name="flag" unicode="&#xf024;" horiz-adv-x="1792"
+d="M320 1280q0 -72 -64 -110v-1266q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v1266q-64 38 -64 110q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM1792 1216v-763q0 -25 -12.5 -38.5t-39.5 -27.5q-215 -116 -369 -116q-61 0 -123.5 22t-108.5 48
+t-115.5 48t-142.5 22q-192 0 -464 -146q-17 -9 -33 -9q-26 0 -45 19t-19 45v742q0 32 31 55q21 14 79 43q236 120 421 120q107 0 200 -29t219 -88q38 -19 88 -19q54 0 117.5 21t110 47t88 47t54.5 21q26 0 45 -19t19 -45z" />
+ <glyph glyph-name="headphones" unicode="&#xf025;" horiz-adv-x="1664"
+d="M1664 650q0 -166 -60 -314l-20 -49l-185 -33q-22 -83 -90.5 -136.5t-156.5 -53.5v-32q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v576q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-32q71 0 130 -35.5t93 -95.5l68 12q29 95 29 193q0 148 -88 279t-236.5 209t-315.5 78
+t-315.5 -78t-236.5 -209t-88 -279q0 -98 29 -193l68 -12q34 60 93 95.5t130 35.5v32q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-576q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v32q-88 0 -156.5 53.5t-90.5 136.5l-185 33l-20 49q-60 148 -60 314q0 151 67 291t179 242.5
+t266 163.5t320 61t320 -61t266 -163.5t179 -242.5t67 -291z" />
+ <glyph glyph-name="volume_off" unicode="&#xf026;" horiz-adv-x="768"
+d="M768 1184v-1088q0 -26 -19 -45t-45 -19t-45 19l-333 333h-262q-26 0 -45 19t-19 45v384q0 26 19 45t45 19h262l333 333q19 19 45 19t45 -19t19 -45z" />
+ <glyph glyph-name="volume_down" unicode="&#xf027;" horiz-adv-x="1152"
+d="M768 1184v-1088q0 -26 -19 -45t-45 -19t-45 19l-333 333h-262q-26 0 -45 19t-19 45v384q0 26 19 45t45 19h262l333 333q19 19 45 19t45 -19t19 -45zM1152 640q0 -76 -42.5 -141.5t-112.5 -93.5q-10 -5 -25 -5q-26 0 -45 18.5t-19 45.5q0 21 12 35.5t29 25t34 23t29 36
+t12 56.5t-12 56.5t-29 36t-34 23t-29 25t-12 35.5q0 27 19 45.5t45 18.5q15 0 25 -5q70 -27 112.5 -93t42.5 -142z" />
+ <glyph glyph-name="volume_up" unicode="&#xf028;" horiz-adv-x="1664"
+d="M768 1184v-1088q0 -26 -19 -45t-45 -19t-45 19l-333 333h-262q-26 0 -45 19t-19 45v384q0 26 19 45t45 19h262l333 333q19 19 45 19t45 -19t19 -45zM1152 640q0 -76 -42.5 -141.5t-112.5 -93.5q-10 -5 -25 -5q-26 0 -45 18.5t-19 45.5q0 21 12 35.5t29 25t34 23t29 36
+t12 56.5t-12 56.5t-29 36t-34 23t-29 25t-12 35.5q0 27 19 45.5t45 18.5q15 0 25 -5q70 -27 112.5 -93t42.5 -142zM1408 640q0 -153 -85 -282.5t-225 -188.5q-13 -5 -25 -5q-27 0 -46 19t-19 45q0 39 39 59q56 29 76 44q74 54 115.5 135.5t41.5 173.5t-41.5 173.5
+t-115.5 135.5q-20 15 -76 44q-39 20 -39 59q0 26 19 45t45 19q13 0 26 -5q140 -59 225 -188.5t85 -282.5zM1664 640q0 -230 -127 -422.5t-338 -283.5q-13 -5 -26 -5q-26 0 -45 19t-19 45q0 36 39 59q7 4 22.5 10.5t22.5 10.5q46 25 82 51q123 91 192 227t69 289t-69 289
+t-192 227q-36 26 -82 51q-7 4 -22.5 10.5t-22.5 10.5q-39 23 -39 59q0 26 19 45t45 19q13 0 26 -5q211 -91 338 -283.5t127 -422.5z" />
+ <glyph glyph-name="qrcode" unicode="&#xf029;" horiz-adv-x="1408"
+d="M384 384v-128h-128v128h128zM384 1152v-128h-128v128h128zM1152 1152v-128h-128v128h128zM128 129h384v383h-384v-383zM128 896h384v384h-384v-384zM896 896h384v384h-384v-384zM640 640v-640h-640v640h640zM1152 128v-128h-128v128h128zM1408 128v-128h-128v128h128z
+M1408 640v-384h-384v128h-128v-384h-128v640h384v-128h128v128h128zM640 1408v-640h-640v640h640zM1408 1408v-640h-640v640h640z" />
+ <glyph glyph-name="barcode" unicode="&#xf02a;" horiz-adv-x="1792"
+d="M63 0h-63v1408h63v-1408zM126 1h-32v1407h32v-1407zM220 1h-31v1407h31v-1407zM377 1h-31v1407h31v-1407zM534 1h-62v1407h62v-1407zM660 1h-31v1407h31v-1407zM723 1h-31v1407h31v-1407zM786 1h-31v1407h31v-1407zM943 1h-63v1407h63v-1407zM1100 1h-63v1407h63v-1407z
+M1226 1h-63v1407h63v-1407zM1352 1h-63v1407h63v-1407zM1446 1h-63v1407h63v-1407zM1635 1h-94v1407h94v-1407zM1698 1h-32v1407h32v-1407zM1792 0h-63v1408h63v-1408z" />
+ <glyph glyph-name="tag" unicode="&#xf02b;"
+d="M448 1088q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM1515 512q0 -53 -37 -90l-491 -492q-39 -37 -91 -37q-53 0 -90 37l-715 716q-38 37 -64.5 101t-26.5 117v416q0 52 38 90t90 38h416q53 0 117 -26.5t102 -64.5
+l715 -714q37 -39 37 -91z" />
+ <glyph glyph-name="tags" unicode="&#xf02c;" horiz-adv-x="1920"
+d="M448 1088q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM1515 512q0 -53 -37 -90l-491 -492q-39 -37 -91 -37q-53 0 -90 37l-715 716q-38 37 -64.5 101t-26.5 117v416q0 52 38 90t90 38h416q53 0 117 -26.5t102 -64.5
+l715 -714q37 -39 37 -91zM1899 512q0 -53 -37 -90l-491 -492q-39 -37 -91 -37q-36 0 -59 14t-53 45l470 470q37 37 37 90q0 52 -37 91l-715 714q-38 38 -102 64.5t-117 26.5h224q53 0 117 -26.5t102 -64.5l715 -714q37 -39 37 -91z" />
+ <glyph glyph-name="book" unicode="&#xf02d;" horiz-adv-x="1664"
+d="M1639 1058q40 -57 18 -129l-275 -906q-19 -64 -76.5 -107.5t-122.5 -43.5h-923q-77 0 -148.5 53.5t-99.5 131.5q-24 67 -2 127q0 4 3 27t4 37q1 8 -3 21.5t-3 19.5q2 11 8 21t16.5 23.5t16.5 23.5q23 38 45 91.5t30 91.5q3 10 0.5 30t-0.5 28q3 11 17 28t17 23
+q21 36 42 92t25 90q1 9 -2.5 32t0.5 28q4 13 22 30.5t22 22.5q19 26 42.5 84.5t27.5 96.5q1 8 -3 25.5t-2 26.5q2 8 9 18t18 23t17 21q8 12 16.5 30.5t15 35t16 36t19.5 32t26.5 23.5t36 11.5t47.5 -5.5l-1 -3q38 9 51 9h761q74 0 114 -56t18 -130l-274 -906
+q-36 -119 -71.5 -153.5t-128.5 -34.5h-869q-27 0 -38 -15q-11 -16 -1 -43q24 -70 144 -70h923q29 0 56 15.5t35 41.5l300 987q7 22 5 57q38 -15 59 -43zM575 1056q-4 -13 2 -22.5t20 -9.5h608q13 0 25.5 9.5t16.5 22.5l21 64q4 13 -2 22.5t-20 9.5h-608q-13 0 -25.5 -9.5
+t-16.5 -22.5zM492 800q-4 -13 2 -22.5t20 -9.5h608q13 0 25.5 9.5t16.5 22.5l21 64q4 13 -2 22.5t-20 9.5h-608q-13 0 -25.5 -9.5t-16.5 -22.5z" />
+ <glyph glyph-name="bookmark" unicode="&#xf02e;" horiz-adv-x="1280"
+d="M1164 1408q23 0 44 -9q33 -13 52.5 -41t19.5 -62v-1289q0 -34 -19.5 -62t-52.5 -41q-19 -8 -44 -8q-48 0 -83 32l-441 424l-441 -424q-36 -33 -83 -33q-23 0 -44 9q-33 13 -52.5 41t-19.5 62v1289q0 34 19.5 62t52.5 41q21 9 44 9h1048z" />
+ <glyph glyph-name="print" unicode="&#xf02f;" horiz-adv-x="1664"
+d="M384 0h896v256h-896v-256zM384 640h896v384h-160q-40 0 -68 28t-28 68v160h-640v-640zM1536 576q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45t45 -19t45 19t19 45zM1664 576v-416q0 -13 -9.5 -22.5t-22.5 -9.5h-224v-160q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68
+v160h-224q-13 0 -22.5 9.5t-9.5 22.5v416q0 79 56.5 135.5t135.5 56.5h64v544q0 40 28 68t68 28h672q40 0 88 -20t76 -48l152 -152q28 -28 48 -76t20 -88v-256h64q79 0 135.5 -56.5t56.5 -135.5z" />
+ <glyph glyph-name="camera" unicode="&#xf030;" horiz-adv-x="1920"
+d="M960 864q119 0 203.5 -84.5t84.5 -203.5t-84.5 -203.5t-203.5 -84.5t-203.5 84.5t-84.5 203.5t84.5 203.5t203.5 84.5zM1664 1280q106 0 181 -75t75 -181v-896q0 -106 -75 -181t-181 -75h-1408q-106 0 -181 75t-75 181v896q0 106 75 181t181 75h224l51 136
+q19 49 69.5 84.5t103.5 35.5h512q53 0 103.5 -35.5t69.5 -84.5l51 -136h224zM960 128q185 0 316.5 131.5t131.5 316.5t-131.5 316.5t-316.5 131.5t-316.5 -131.5t-131.5 -316.5t131.5 -316.5t316.5 -131.5z" />
+ <glyph glyph-name="font" unicode="&#xf031;" horiz-adv-x="1664"
+d="M725 977l-170 -450q33 0 136.5 -2t160.5 -2q19 0 57 2q-87 253 -184 452zM0 -128l2 79q23 7 56 12.5t57 10.5t49.5 14.5t44.5 29t31 50.5l237 616l280 724h75h53q8 -14 11 -21l205 -480q33 -78 106 -257.5t114 -274.5q15 -34 58 -144.5t72 -168.5q20 -45 35 -57
+q19 -15 88 -29.5t84 -20.5q6 -38 6 -57q0 -5 -0.5 -13.5t-0.5 -12.5q-63 0 -190 8t-191 8q-76 0 -215 -7t-178 -8q0 43 4 78l131 28q1 0 12.5 2.5t15.5 3.5t14.5 4.5t15 6.5t11 8t9 11t2.5 14q0 16 -31 96.5t-72 177.5t-42 100l-450 2q-26 -58 -76.5 -195.5t-50.5 -162.5
+q0 -22 14 -37.5t43.5 -24.5t48.5 -13.5t57 -8.5t41 -4q1 -19 1 -58q0 -9 -2 -27q-58 0 -174.5 10t-174.5 10q-8 0 -26.5 -4t-21.5 -4q-80 -14 -188 -14z" />
+ <glyph glyph-name="bold" unicode="&#xf032;" horiz-adv-x="1408"
+d="M555 15q74 -32 140 -32q376 0 376 335q0 114 -41 180q-27 44 -61.5 74t-67.5 46.5t-80.5 25t-84 10.5t-94.5 2q-73 0 -101 -10q0 -53 -0.5 -159t-0.5 -158q0 -8 -1 -67.5t-0.5 -96.5t4.5 -83.5t12 -66.5zM541 761q42 -7 109 -7q82 0 143 13t110 44.5t74.5 89.5t25.5 142
+q0 70 -29 122.5t-79 82t-108 43.5t-124 14q-50 0 -130 -13q0 -50 4 -151t4 -152q0 -27 -0.5 -80t-0.5 -79q0 -46 1 -69zM0 -128l2 94q15 4 85 16t106 27q7 12 12.5 27t8.5 33.5t5.5 32.5t3 37.5t0.5 34v35.5v30q0 982 -22 1025q-4 8 -22 14.5t-44.5 11t-49.5 7t-48.5 4.5
+t-30.5 3l-4 83q98 2 340 11.5t373 9.5q23 0 68 -0.5t68 -0.5q70 0 136.5 -13t128.5 -42t108 -71t74 -104.5t28 -137.5q0 -52 -16.5 -95.5t-39 -72t-64.5 -57.5t-73 -45t-84 -40q154 -35 256.5 -134t102.5 -248q0 -100 -35 -179.5t-93.5 -130.5t-138 -85.5t-163.5 -48.5
+t-176 -14q-44 0 -132 3t-132 3q-106 0 -307 -11t-231 -12z" />
+ <glyph glyph-name="italic" unicode="&#xf033;" horiz-adv-x="1024"
+d="M0 -126l17 85q22 7 61.5 16.5t72 19t59.5 23.5q28 35 41 101q1 7 62 289t114 543.5t52 296.5v25q-24 13 -54.5 18.5t-69.5 8t-58 5.5l19 103q33 -2 120 -6.5t149.5 -7t120.5 -2.5q48 0 98.5 2.5t121 7t98.5 6.5q-5 -39 -19 -89q-30 -10 -101.5 -28.5t-108.5 -33.5
+q-8 -19 -14 -42.5t-9 -40t-7.5 -45.5t-6.5 -42q-27 -148 -87.5 -419.5t-77.5 -355.5q-2 -9 -13 -58t-20 -90t-16 -83.5t-6 -57.5l1 -18q17 -4 185 -31q-3 -44 -16 -99q-11 0 -32.5 -1.5t-32.5 -1.5q-29 0 -87 10t-86 10q-138 2 -206 2q-51 0 -143 -9t-121 -11z" />
+ <glyph glyph-name="text_height" unicode="&#xf034;" horiz-adv-x="1792"
+d="M1744 128q33 0 42 -18.5t-11 -44.5l-126 -162q-20 -26 -49 -26t-49 26l-126 162q-20 26 -11 44.5t42 18.5h80v1024h-80q-33 0 -42 18.5t11 44.5l126 162q20 26 49 26t49 -26l126 -162q20 -26 11 -44.5t-42 -18.5h-80v-1024h80zM81 1407l54 -27q12 -5 211 -5q44 0 132 2
+t132 2q36 0 107.5 -0.5t107.5 -0.5h293q6 0 21 -0.5t20.5 0t16 3t17.5 9t15 17.5l42 1q4 0 14 -0.5t14 -0.5q2 -112 2 -336q0 -80 -5 -109q-39 -14 -68 -18q-25 44 -54 128q-3 9 -11 48t-14.5 73.5t-7.5 35.5q-6 8 -12 12.5t-15.5 6t-13 2.5t-18 0.5t-16.5 -0.5
+q-17 0 -66.5 0.5t-74.5 0.5t-64 -2t-71 -6q-9 -81 -8 -136q0 -94 2 -388t2 -455q0 -16 -2.5 -71.5t0 -91.5t12.5 -69q40 -21 124 -42.5t120 -37.5q5 -40 5 -50q0 -14 -3 -29l-34 -1q-76 -2 -218 8t-207 10q-50 0 -151 -9t-152 -9q-3 51 -3 52v9q17 27 61.5 43t98.5 29t78 27
+q19 42 19 383q0 101 -3 303t-3 303v117q0 2 0.5 15.5t0.5 25t-1 25.5t-3 24t-5 14q-11 12 -162 12q-33 0 -93 -12t-80 -26q-19 -13 -34 -72.5t-31.5 -111t-42.5 -53.5q-42 26 -56 44v383z" />
+ <glyph glyph-name="text_width" unicode="&#xf035;"
+d="M81 1407l54 -27q12 -5 211 -5q44 0 132 2t132 2q70 0 246.5 1t304.5 0.5t247 -4.5q33 -1 56 31l42 1q4 0 14 -0.5t14 -0.5q2 -112 2 -336q0 -80 -5 -109q-39 -14 -68 -18q-25 44 -54 128q-3 9 -11 47.5t-15 73.5t-7 36q-10 13 -27 19q-5 2 -66 2q-30 0 -93 1t-103 1
+t-94 -2t-96 -7q-9 -81 -8 -136l1 -152v52q0 -55 1 -154t1.5 -180t0.5 -153q0 -16 -2.5 -71.5t0 -91.5t12.5 -69q40 -21 124 -42.5t120 -37.5q5 -40 5 -50q0 -14 -3 -29l-34 -1q-76 -2 -218 8t-207 10q-50 0 -151 -9t-152 -9q-3 51 -3 52v9q17 27 61.5 43t98.5 29t78 27
+q7 16 11.5 74t6 145.5t1.5 155t-0.5 153.5t-0.5 89q0 7 -2.5 21.5t-2.5 22.5q0 7 0.5 44t1 73t0 76.5t-3 67.5t-6.5 32q-11 12 -162 12q-41 0 -163 -13.5t-138 -24.5q-19 -12 -34 -71.5t-31.5 -111.5t-42.5 -54q-42 26 -56 44v383zM1310 125q12 0 42 -19.5t57.5 -41.5
+t59.5 -49t36 -30q26 -21 26 -49t-26 -49q-4 -3 -36 -30t-59.5 -49t-57.5 -41.5t-42 -19.5q-13 0 -20.5 10.5t-10 28.5t-2.5 33.5t1.5 33t1.5 19.5h-1024q0 -2 1.5 -19.5t1.5 -33t-2.5 -33.5t-10 -28.5t-20.5 -10.5q-12 0 -42 19.5t-57.5 41.5t-59.5 49t-36 30q-26 21 -26 49
+t26 49q4 3 36 30t59.5 49t57.5 41.5t42 19.5q13 0 20.5 -10.5t10 -28.5t2.5 -33.5t-1.5 -33t-1.5 -19.5h1024q0 2 -1.5 19.5t-1.5 33t2.5 33.5t10 28.5t20.5 10.5z" />
+ <glyph glyph-name="align_left" unicode="&#xf036;" horiz-adv-x="1792"
+d="M1792 192v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45zM1408 576v-128q0 -26 -19 -45t-45 -19h-1280q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1280q26 0 45 -19t19 -45zM1664 960v-128q0 -26 -19 -45
+t-45 -19h-1536q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1536q26 0 45 -19t19 -45zM1280 1344v-128q0 -26 -19 -45t-45 -19h-1152q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1152q26 0 45 -19t19 -45z" />
+ <glyph glyph-name="align_center" unicode="&#xf037;" horiz-adv-x="1792"
+d="M1792 192v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45zM1408 576v-128q0 -26 -19 -45t-45 -19h-896q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h896q26 0 45 -19t19 -45zM1664 960v-128q0 -26 -19 -45t-45 -19
+h-1408q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1408q26 0 45 -19t19 -45zM1280 1344v-128q0 -26 -19 -45t-45 -19h-640q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h640q26 0 45 -19t19 -45z" />
+ <glyph glyph-name="align_right" unicode="&#xf038;" horiz-adv-x="1792"
+d="M1792 192v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45zM1792 576v-128q0 -26 -19 -45t-45 -19h-1280q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1280q26 0 45 -19t19 -45zM1792 960v-128q0 -26 -19 -45
+t-45 -19h-1536q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1536q26 0 45 -19t19 -45zM1792 1344v-128q0 -26 -19 -45t-45 -19h-1152q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1152q26 0 45 -19t19 -45z" />
+ <glyph glyph-name="align_justify" unicode="&#xf039;" horiz-adv-x="1792"
+d="M1792 192v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45zM1792 576v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45zM1792 960v-128q0 -26 -19 -45
+t-45 -19h-1664q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45zM1792 1344v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45z" />
+ <glyph glyph-name="list" unicode="&#xf03a;" horiz-adv-x="1792"
+d="M256 224v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-192q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h192q13 0 22.5 -9.5t9.5 -22.5zM256 608v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-192q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h192q13 0 22.5 -9.5
+t9.5 -22.5zM256 992v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-192q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h192q13 0 22.5 -9.5t9.5 -22.5zM1792 224v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1344q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h1344
+q13 0 22.5 -9.5t9.5 -22.5zM256 1376v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-192q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h192q13 0 22.5 -9.5t9.5 -22.5zM1792 608v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1344q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5
+t22.5 9.5h1344q13 0 22.5 -9.5t9.5 -22.5zM1792 992v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1344q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h1344q13 0 22.5 -9.5t9.5 -22.5zM1792 1376v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1344q-13 0 -22.5 9.5t-9.5 22.5v192
+q0 13 9.5 22.5t22.5 9.5h1344q13 0 22.5 -9.5t9.5 -22.5z" />
+ <glyph glyph-name="indent_left" unicode="&#xf03b;" horiz-adv-x="1792"
+d="M384 992v-576q0 -13 -9.5 -22.5t-22.5 -9.5q-14 0 -23 9l-288 288q-9 9 -9 23t9 23l288 288q9 9 23 9q13 0 22.5 -9.5t9.5 -22.5zM1792 224v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1728q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h1728q13 0 22.5 -9.5
+t9.5 -22.5zM1792 608v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1088q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h1088q13 0 22.5 -9.5t9.5 -22.5zM1792 992v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1088q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h1088
+q13 0 22.5 -9.5t9.5 -22.5zM1792 1376v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1728q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h1728q13 0 22.5 -9.5t9.5 -22.5z" />
+ <glyph glyph-name="indent_right" unicode="&#xf03c;" horiz-adv-x="1792"
+d="M352 704q0 -14 -9 -23l-288 -288q-9 -9 -23 -9q-13 0 -22.5 9.5t-9.5 22.5v576q0 13 9.5 22.5t22.5 9.5q14 0 23 -9l288 -288q9 -9 9 -23zM1792 224v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1728q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h1728q13 0 22.5 -9.5
+t9.5 -22.5zM1792 608v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1088q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h1088q13 0 22.5 -9.5t9.5 -22.5zM1792 992v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1088q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h1088
+q13 0 22.5 -9.5t9.5 -22.5zM1792 1376v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1728q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h1728q13 0 22.5 -9.5t9.5 -22.5z" />
+ <glyph glyph-name="facetime_video" unicode="&#xf03d;" horiz-adv-x="1792"
+d="M1792 1184v-1088q0 -42 -39 -59q-13 -5 -25 -5q-27 0 -45 19l-403 403v-166q0 -119 -84.5 -203.5t-203.5 -84.5h-704q-119 0 -203.5 84.5t-84.5 203.5v704q0 119 84.5 203.5t203.5 84.5h704q119 0 203.5 -84.5t84.5 -203.5v-165l403 402q18 19 45 19q12 0 25 -5
+q39 -17 39 -59z" />
+ <glyph glyph-name="picture" unicode="&#xf03e;" horiz-adv-x="1920"
+d="M640 960q0 -80 -56 -136t-136 -56t-136 56t-56 136t56 136t136 56t136 -56t56 -136zM1664 576v-448h-1408v192l320 320l160 -160l512 512zM1760 1280h-1600q-13 0 -22.5 -9.5t-9.5 -22.5v-1216q0 -13 9.5 -22.5t22.5 -9.5h1600q13 0 22.5 9.5t9.5 22.5v1216
+q0 13 -9.5 22.5t-22.5 9.5zM1920 1248v-1216q0 -66 -47 -113t-113 -47h-1600q-66 0 -113 47t-47 113v1216q0 66 47 113t113 47h1600q66 0 113 -47t47 -113z" />
+ <glyph glyph-name="pencil" unicode="&#xf040;"
+d="M363 0l91 91l-235 235l-91 -91v-107h128v-128h107zM886 928q0 22 -22 22q-10 0 -17 -7l-542 -542q-7 -7 -7 -17q0 -22 22 -22q10 0 17 7l542 542q7 7 7 17zM832 1120l416 -416l-832 -832h-416v416zM1515 1024q0 -53 -37 -90l-166 -166l-416 416l166 165q36 38 90 38
+q53 0 91 -38l235 -234q37 -39 37 -91z" />
+ <glyph glyph-name="map_marker" unicode="&#xf041;" horiz-adv-x="1024"
+d="M768 896q0 106 -75 181t-181 75t-181 -75t-75 -181t75 -181t181 -75t181 75t75 181zM1024 896q0 -109 -33 -179l-364 -774q-16 -33 -47.5 -52t-67.5 -19t-67.5 19t-46.5 52l-365 774q-33 70 -33 179q0 212 150 362t362 150t362 -150t150 -362z" />
+ <glyph glyph-name="adjust" unicode="&#xf042;"
+d="M768 96v1088q-148 0 -273 -73t-198 -198t-73 -273t73 -273t198 -198t273 -73zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="tint" unicode="&#xf043;" horiz-adv-x="1024"
+d="M512 384q0 36 -20 69q-1 1 -15.5 22.5t-25.5 38t-25 44t-21 50.5q-4 16 -21 16t-21 -16q-7 -23 -21 -50.5t-25 -44t-25.5 -38t-15.5 -22.5q-20 -33 -20 -69q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM1024 512q0 -212 -150 -362t-362 -150t-362 150t-150 362
+q0 145 81 275q6 9 62.5 90.5t101 151t99.5 178t83 201.5q9 30 34 47t51 17t51.5 -17t33.5 -47q28 -93 83 -201.5t99.5 -178t101 -151t62.5 -90.5q81 -127 81 -275z" />
+ <glyph glyph-name="edit" unicode="&#xf044;" horiz-adv-x="1792"
+d="M888 352l116 116l-152 152l-116 -116v-56h96v-96h56zM1328 1072q-16 16 -33 -1l-350 -350q-17 -17 -1 -33t33 1l350 350q17 17 1 33zM1408 478v-190q0 -119 -84.5 -203.5t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5v832q0 119 84.5 203.5t203.5 84.5h832
+q63 0 117 -25q15 -7 18 -23q3 -17 -9 -29l-49 -49q-14 -14 -32 -8q-23 6 -45 6h-832q-66 0 -113 -47t-47 -113v-832q0 -66 47 -113t113 -47h832q66 0 113 47t47 113v126q0 13 9 22l64 64q15 15 35 7t20 -29zM1312 1216l288 -288l-672 -672h-288v288zM1756 1084l-92 -92
+l-288 288l92 92q28 28 68 28t68 -28l152 -152q28 -28 28 -68t-28 -68z" />
+ <glyph glyph-name="share" unicode="&#xf045;" horiz-adv-x="1664"
+d="M1408 547v-259q0 -119 -84.5 -203.5t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5v832q0 119 84.5 203.5t203.5 84.5h255v0q13 0 22.5 -9.5t9.5 -22.5q0 -27 -26 -32q-77 -26 -133 -60q-10 -4 -16 -4h-112q-66 0 -113 -47t-47 -113v-832q0 -66 47 -113t113 -47h832
+q66 0 113 47t47 113v214q0 19 18 29q28 13 54 37q16 16 35 8q21 -9 21 -29zM1645 1043l-384 -384q-18 -19 -45 -19q-12 0 -25 5q-39 17 -39 59v192h-160q-323 0 -438 -131q-119 -137 -74 -473q3 -23 -20 -34q-8 -2 -12 -2q-16 0 -26 13q-10 14 -21 31t-39.5 68.5t-49.5 99.5
+t-38.5 114t-17.5 122q0 49 3.5 91t14 90t28 88t47 81.5t68.5 74t94.5 61.5t124.5 48.5t159.5 30.5t196.5 11h160v192q0 42 39 59q13 5 25 5q26 0 45 -19l384 -384q19 -19 19 -45t-19 -45z" />
+ <glyph glyph-name="check" unicode="&#xf046;" horiz-adv-x="1664"
+d="M1408 606v-318q0 -119 -84.5 -203.5t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5v832q0 119 84.5 203.5t203.5 84.5h832q63 0 117 -25q15 -7 18 -23q3 -17 -9 -29l-49 -49q-10 -10 -23 -10q-3 0 -9 2q-23 6 -45 6h-832q-66 0 -113 -47t-47 -113v-832
+q0 -66 47 -113t113 -47h832q66 0 113 47t47 113v254q0 13 9 22l64 64q10 10 23 10q6 0 12 -3q20 -8 20 -29zM1639 1095l-814 -814q-24 -24 -57 -24t-57 24l-430 430q-24 24 -24 57t24 57l110 110q24 24 57 24t57 -24l263 -263l647 647q24 24 57 24t57 -24l110 -110
+q24 -24 24 -57t-24 -57z" />
+ <glyph glyph-name="move" unicode="&#xf047;" horiz-adv-x="1792"
+d="M1792 640q0 -26 -19 -45l-256 -256q-19 -19 -45 -19t-45 19t-19 45v128h-384v-384h128q26 0 45 -19t19 -45t-19 -45l-256 -256q-19 -19 -45 -19t-45 19l-256 256q-19 19 -19 45t19 45t45 19h128v384h-384v-128q0 -26 -19 -45t-45 -19t-45 19l-256 256q-19 19 -19 45
+t19 45l256 256q19 19 45 19t45 -19t19 -45v-128h384v384h-128q-26 0 -45 19t-19 45t19 45l256 256q19 19 45 19t45 -19l256 -256q19 -19 19 -45t-19 -45t-45 -19h-128v-384h384v128q0 26 19 45t45 19t45 -19l256 -256q19 -19 19 -45z" />
+ <glyph glyph-name="step_backward" unicode="&#xf048;" horiz-adv-x="1024"
+d="M979 1395q19 19 32 13t13 -32v-1472q0 -26 -13 -32t-32 13l-710 710q-9 9 -13 19v-678q0 -26 -19 -45t-45 -19h-128q-26 0 -45 19t-19 45v1408q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-678q4 10 13 19z" />
+ <glyph glyph-name="fast_backward" unicode="&#xf049;" horiz-adv-x="1792"
+d="M1747 1395q19 19 32 13t13 -32v-1472q0 -26 -13 -32t-32 13l-710 710q-9 9 -13 19v-710q0 -26 -13 -32t-32 13l-710 710q-9 9 -13 19v-678q0 -26 -19 -45t-45 -19h-128q-26 0 -45 19t-19 45v1408q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-678q4 10 13 19l710 710
+q19 19 32 13t13 -32v-710q4 10 13 19z" />
+ <glyph glyph-name="backward" unicode="&#xf04a;" horiz-adv-x="1664"
+d="M1619 1395q19 19 32 13t13 -32v-1472q0 -26 -13 -32t-32 13l-710 710q-9 9 -13 19v-710q0 -26 -13 -32t-32 13l-710 710q-19 19 -19 45t19 45l710 710q19 19 32 13t13 -32v-710q4 10 13 19z" />
+ <glyph glyph-name="play" unicode="&#xf04b;" horiz-adv-x="1408"
+d="M1384 609l-1328 -738q-23 -13 -39.5 -3t-16.5 36v1472q0 26 16.5 36t39.5 -3l1328 -738q23 -13 23 -31t-23 -31z" />
+ <glyph glyph-name="pause" unicode="&#xf04c;"
+d="M1536 1344v-1408q0 -26 -19 -45t-45 -19h-512q-26 0 -45 19t-19 45v1408q0 26 19 45t45 19h512q26 0 45 -19t19 -45zM640 1344v-1408q0 -26 -19 -45t-45 -19h-512q-26 0 -45 19t-19 45v1408q0 26 19 45t45 19h512q26 0 45 -19t19 -45z" />
+ <glyph glyph-name="stop" unicode="&#xf04d;"
+d="M1536 1344v-1408q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45v1408q0 26 19 45t45 19h1408q26 0 45 -19t19 -45z" />
+ <glyph glyph-name="forward" unicode="&#xf04e;" horiz-adv-x="1664"
+d="M45 -115q-19 -19 -32 -13t-13 32v1472q0 26 13 32t32 -13l710 -710q9 -9 13 -19v710q0 26 13 32t32 -13l710 -710q19 -19 19 -45t-19 -45l-710 -710q-19 -19 -32 -13t-13 32v710q-4 -10 -13 -19z" />
+ <glyph glyph-name="fast_forward" unicode="&#xf050;" horiz-adv-x="1792"
+d="M45 -115q-19 -19 -32 -13t-13 32v1472q0 26 13 32t32 -13l710 -710q9 -9 13 -19v710q0 26 13 32t32 -13l710 -710q9 -9 13 -19v678q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-1408q0 -26 -19 -45t-45 -19h-128q-26 0 -45 19t-19 45v678q-4 -10 -13 -19l-710 -710
+q-19 -19 -32 -13t-13 32v710q-4 -10 -13 -19z" />
+ <glyph glyph-name="step_forward" unicode="&#xf051;" horiz-adv-x="1024"
+d="M45 -115q-19 -19 -32 -13t-13 32v1472q0 26 13 32t32 -13l710 -710q9 -9 13 -19v678q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-1408q0 -26 -19 -45t-45 -19h-128q-26 0 -45 19t-19 45v678q-4 -10 -13 -19z" />
+ <glyph glyph-name="eject" unicode="&#xf052;" horiz-adv-x="1538"
+d="M14 557l710 710q19 19 45 19t45 -19l710 -710q19 -19 13 -32t-32 -13h-1472q-26 0 -32 13t13 32zM1473 0h-1408q-26 0 -45 19t-19 45v256q0 26 19 45t45 19h1408q26 0 45 -19t19 -45v-256q0 -26 -19 -45t-45 -19z" />
+ <glyph glyph-name="chevron_left" unicode="&#xf053;" horiz-adv-x="1280"
+d="M1171 1235l-531 -531l531 -531q19 -19 19 -45t-19 -45l-166 -166q-19 -19 -45 -19t-45 19l-742 742q-19 19 -19 45t19 45l742 742q19 19 45 19t45 -19l166 -166q19 -19 19 -45t-19 -45z" />
+ <glyph glyph-name="chevron_right" unicode="&#xf054;" horiz-adv-x="1280"
+d="M1107 659l-742 -742q-19 -19 -45 -19t-45 19l-166 166q-19 19 -19 45t19 45l531 531l-531 531q-19 19 -19 45t19 45l166 166q19 19 45 19t45 -19l742 -742q19 -19 19 -45t-19 -45z" />
+ <glyph glyph-name="plus_sign" unicode="&#xf055;"
+d="M1216 576v128q0 26 -19 45t-45 19h-256v256q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-256h-256q-26 0 -45 -19t-19 -45v-128q0 -26 19 -45t45 -19h256v-256q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v256h256q26 0 45 19t19 45zM1536 640q0 -209 -103 -385.5
+t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="minus_sign" unicode="&#xf056;"
+d="M1216 576v128q0 26 -19 45t-45 19h-768q-26 0 -45 -19t-19 -45v-128q0 -26 19 -45t45 -19h768q26 0 45 19t19 45zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5
+t103 -385.5z" />
+ <glyph glyph-name="remove_sign" unicode="&#xf057;"
+d="M1149 414q0 26 -19 45l-181 181l181 181q19 19 19 45q0 27 -19 46l-90 90q-19 19 -46 19q-26 0 -45 -19l-181 -181l-181 181q-19 19 -45 19q-27 0 -46 -19l-90 -90q-19 -19 -19 -46q0 -26 19 -45l181 -181l-181 -181q-19 -19 -19 -45q0 -27 19 -46l90 -90q19 -19 46 -19
+q26 0 45 19l181 181l181 -181q19 -19 45 -19q27 0 46 19l90 90q19 19 19 46zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="ok_sign" unicode="&#xf058;"
+d="M1284 802q0 28 -18 46l-91 90q-19 19 -45 19t-45 -19l-408 -407l-226 226q-19 19 -45 19t-45 -19l-91 -90q-18 -18 -18 -46q0 -27 18 -45l362 -362q19 -19 45 -19q27 0 46 19l543 543q18 18 18 45zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103
+t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="question_sign" unicode="&#xf059;"
+d="M896 160v192q0 14 -9 23t-23 9h-192q-14 0 -23 -9t-9 -23v-192q0 -14 9 -23t23 -9h192q14 0 23 9t9 23zM1152 832q0 88 -55.5 163t-138.5 116t-170 41q-243 0 -371 -213q-15 -24 8 -42l132 -100q7 -6 19 -6q16 0 25 12q53 68 86 92q34 24 86 24q48 0 85.5 -26t37.5 -59
+q0 -38 -20 -61t-68 -45q-63 -28 -115.5 -86.5t-52.5 -125.5v-36q0 -14 9 -23t23 -9h192q14 0 23 9t9 23q0 19 21.5 49.5t54.5 49.5q32 18 49 28.5t46 35t44.5 48t28 60.5t12.5 81zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5
+t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="info_sign" unicode="&#xf05a;"
+d="M1024 160v160q0 14 -9 23t-23 9h-96v512q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-160q0 -14 9 -23t23 -9h96v-320h-96q-14 0 -23 -9t-9 -23v-160q0 -14 9 -23t23 -9h448q14 0 23 9t9 23zM896 1056v160q0 14 -9 23t-23 9h-192q-14 0 -23 -9t-9 -23v-160q0 -14 9 -23
+t23 -9h192q14 0 23 9t9 23zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="screenshot" unicode="&#xf05b;"
+d="M1197 512h-109q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h109q-32 108 -112.5 188.5t-188.5 112.5v-109q0 -26 -19 -45t-45 -19h-128q-26 0 -45 19t-19 45v109q-108 -32 -188.5 -112.5t-112.5 -188.5h109q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-109
+q32 -108 112.5 -188.5t188.5 -112.5v109q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-109q108 32 188.5 112.5t112.5 188.5zM1536 704v-128q0 -26 -19 -45t-45 -19h-143q-37 -161 -154.5 -278.5t-278.5 -154.5v-143q0 -26 -19 -45t-45 -19h-128q-26 0 -45 19t-19 45v143
+q-161 37 -278.5 154.5t-154.5 278.5h-143q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h143q37 161 154.5 278.5t278.5 154.5v143q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-143q161 -37 278.5 -154.5t154.5 -278.5h143q26 0 45 -19t19 -45z" />
+ <glyph glyph-name="remove_circle" unicode="&#xf05c;"
+d="M1097 457l-146 -146q-10 -10 -23 -10t-23 10l-137 137l-137 -137q-10 -10 -23 -10t-23 10l-146 146q-10 10 -10 23t10 23l137 137l-137 137q-10 10 -10 23t10 23l146 146q10 10 23 10t23 -10l137 -137l137 137q10 10 23 10t23 -10l146 -146q10 -10 10 -23t-10 -23
+l-137 -137l137 -137q10 -10 10 -23t-10 -23zM1312 640q0 148 -73 273t-198 198t-273 73t-273 -73t-198 -198t-73 -273t73 -273t198 -198t273 -73t273 73t198 198t73 273zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5
+t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="ok_circle" unicode="&#xf05d;"
+d="M1171 723l-422 -422q-19 -19 -45 -19t-45 19l-294 294q-19 19 -19 45t19 45l102 102q19 19 45 19t45 -19l147 -147l275 275q19 19 45 19t45 -19l102 -102q19 -19 19 -45t-19 -45zM1312 640q0 148 -73 273t-198 198t-273 73t-273 -73t-198 -198t-73 -273t73 -273t198 -198
+t273 -73t273 73t198 198t73 273zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="ban_circle" unicode="&#xf05e;"
+d="M1312 643q0 161 -87 295l-754 -753q137 -89 297 -89q111 0 211.5 43.5t173.5 116.5t116 174.5t43 212.5zM313 344l755 754q-135 91 -300 91q-148 0 -273 -73t-198 -199t-73 -274q0 -162 89 -299zM1536 643q0 -157 -61 -300t-163.5 -246t-245 -164t-298.5 -61t-298.5 61
+t-245 164t-163.5 246t-61 300t61 299.5t163.5 245.5t245 164t298.5 61t298.5 -61t245 -164t163.5 -245.5t61 -299.5z" />
+ <glyph glyph-name="arrow_left" unicode="&#xf060;"
+d="M1536 640v-128q0 -53 -32.5 -90.5t-84.5 -37.5h-704l293 -294q38 -36 38 -90t-38 -90l-75 -76q-37 -37 -90 -37q-52 0 -91 37l-651 652q-37 37 -37 90q0 52 37 91l651 650q38 38 91 38q52 0 90 -38l75 -74q38 -38 38 -91t-38 -91l-293 -293h704q52 0 84.5 -37.5
+t32.5 -90.5z" />
+ <glyph glyph-name="arrow_right" unicode="&#xf061;"
+d="M1472 576q0 -54 -37 -91l-651 -651q-39 -37 -91 -37q-51 0 -90 37l-75 75q-38 38 -38 91t38 91l293 293h-704q-52 0 -84.5 37.5t-32.5 90.5v128q0 53 32.5 90.5t84.5 37.5h704l-293 294q-38 36 -38 90t38 90l75 75q38 38 90 38q53 0 91 -38l651 -651q37 -35 37 -90z" />
+ <glyph glyph-name="arrow_up" unicode="&#xf062;" horiz-adv-x="1664"
+d="M1611 565q0 -51 -37 -90l-75 -75q-38 -38 -91 -38q-54 0 -90 38l-294 293v-704q0 -52 -37.5 -84.5t-90.5 -32.5h-128q-53 0 -90.5 32.5t-37.5 84.5v704l-294 -293q-36 -38 -90 -38t-90 38l-75 75q-38 38 -38 90q0 53 38 91l651 651q35 37 90 37q54 0 91 -37l651 -651
+q37 -39 37 -91z" />
+ <glyph glyph-name="arrow_down" unicode="&#xf063;" horiz-adv-x="1664"
+d="M1611 704q0 -53 -37 -90l-651 -652q-39 -37 -91 -37q-53 0 -90 37l-651 652q-38 36 -38 90q0 53 38 91l74 75q39 37 91 37q53 0 90 -37l294 -294v704q0 52 38 90t90 38h128q52 0 90 -38t38 -90v-704l294 294q37 37 90 37q52 0 91 -37l75 -75q37 -39 37 -91z" />
+ <glyph glyph-name="share_alt" unicode="&#xf064;" horiz-adv-x="1792"
+d="M1792 896q0 -26 -19 -45l-512 -512q-19 -19 -45 -19t-45 19t-19 45v256h-224q-98 0 -175.5 -6t-154 -21.5t-133 -42.5t-105.5 -69.5t-80 -101t-48.5 -138.5t-17.5 -181q0 -55 5 -123q0 -6 2.5 -23.5t2.5 -26.5q0 -15 -8.5 -25t-23.5 -10q-16 0 -28 17q-7 9 -13 22
+t-13.5 30t-10.5 24q-127 285 -127 451q0 199 53 333q162 403 875 403h224v256q0 26 19 45t45 19t45 -19l512 -512q19 -19 19 -45z" />
+ <glyph glyph-name="resize_full" unicode="&#xf065;"
+d="M755 480q0 -13 -10 -23l-332 -332l144 -144q19 -19 19 -45t-19 -45t-45 -19h-448q-26 0 -45 19t-19 45v448q0 26 19 45t45 19t45 -19l144 -144l332 332q10 10 23 10t23 -10l114 -114q10 -10 10 -23zM1536 1344v-448q0 -26 -19 -45t-45 -19t-45 19l-144 144l-332 -332
+q-10 -10 -23 -10t-23 10l-114 114q-10 10 -10 23t10 23l332 332l-144 144q-19 19 -19 45t19 45t45 19h448q26 0 45 -19t19 -45z" />
+ <glyph glyph-name="resize_small" unicode="&#xf066;"
+d="M768 576v-448q0 -26 -19 -45t-45 -19t-45 19l-144 144l-332 -332q-10 -10 -23 -10t-23 10l-114 114q-10 10 -10 23t10 23l332 332l-144 144q-19 19 -19 45t19 45t45 19h448q26 0 45 -19t19 -45zM1523 1248q0 -13 -10 -23l-332 -332l144 -144q19 -19 19 -45t-19 -45
+t-45 -19h-448q-26 0 -45 19t-19 45v448q0 26 19 45t45 19t45 -19l144 -144l332 332q10 10 23 10t23 -10l114 -114q10 -10 10 -23z" />
+ <glyph glyph-name="plus" unicode="&#xf067;" horiz-adv-x="1408"
+d="M1408 800v-192q0 -40 -28 -68t-68 -28h-416v-416q0 -40 -28 -68t-68 -28h-192q-40 0 -68 28t-28 68v416h-416q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h416v416q0 40 28 68t68 28h192q40 0 68 -28t28 -68v-416h416q40 0 68 -28t28 -68z" />
+ <glyph glyph-name="minus" unicode="&#xf068;" horiz-adv-x="1408"
+d="M1408 800v-192q0 -40 -28 -68t-68 -28h-1216q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h1216q40 0 68 -28t28 -68z" />
+ <glyph glyph-name="asterisk" unicode="&#xf069;" horiz-adv-x="1664"
+d="M1482 486q46 -26 59.5 -77.5t-12.5 -97.5l-64 -110q-26 -46 -77.5 -59.5t-97.5 12.5l-266 153v-307q0 -52 -38 -90t-90 -38h-128q-52 0 -90 38t-38 90v307l-266 -153q-46 -26 -97.5 -12.5t-77.5 59.5l-64 110q-26 46 -12.5 97.5t59.5 77.5l266 154l-266 154
+q-46 26 -59.5 77.5t12.5 97.5l64 110q26 46 77.5 59.5t97.5 -12.5l266 -153v307q0 52 38 90t90 38h128q52 0 90 -38t38 -90v-307l266 153q46 26 97.5 12.5t77.5 -59.5l64 -110q26 -46 12.5 -97.5t-59.5 -77.5l-266 -154z" />
+ <glyph glyph-name="exclamation_sign" unicode="&#xf06a;"
+d="M768 1408q209 0 385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103zM896 161v190q0 14 -9 23.5t-22 9.5h-192q-13 0 -23 -10t-10 -23v-190q0 -13 10 -23t23 -10h192
+q13 0 22 9.5t9 23.5zM894 505l18 621q0 12 -10 18q-10 8 -24 8h-220q-14 0 -24 -8q-10 -6 -10 -18l17 -621q0 -10 10 -17.5t24 -7.5h185q14 0 23.5 7.5t10.5 17.5z" />
+ <glyph glyph-name="gift" unicode="&#xf06b;"
+d="M928 180v56v468v192h-320v-192v-468v-56q0 -25 18 -38.5t46 -13.5h192q28 0 46 13.5t18 38.5zM472 1024h195l-126 161q-26 31 -69 31q-40 0 -68 -28t-28 -68t28 -68t68 -28zM1160 1120q0 40 -28 68t-68 28q-43 0 -69 -31l-125 -161h194q40 0 68 28t28 68zM1536 864v-320
+q0 -14 -9 -23t-23 -9h-96v-416q0 -40 -28 -68t-68 -28h-1088q-40 0 -68 28t-28 68v416h-96q-14 0 -23 9t-9 23v320q0 14 9 23t23 9h440q-93 0 -158.5 65.5t-65.5 158.5t65.5 158.5t158.5 65.5q107 0 168 -77l128 -165l128 165q61 77 168 77q93 0 158.5 -65.5t65.5 -158.5
+t-65.5 -158.5t-158.5 -65.5h440q14 0 23 -9t9 -23z" />
+ <glyph glyph-name="leaf" unicode="&#xf06c;" horiz-adv-x="1792"
+d="M1280 832q0 26 -19 45t-45 19q-172 0 -318 -49.5t-259.5 -134t-235.5 -219.5q-19 -21 -19 -45q0 -26 19 -45t45 -19q24 0 45 19q27 24 74 71t67 66q137 124 268.5 176t313.5 52q26 0 45 19t19 45zM1792 1030q0 -95 -20 -193q-46 -224 -184.5 -383t-357.5 -268
+q-214 -108 -438 -108q-148 0 -286 47q-15 5 -88 42t-96 37q-16 0 -39.5 -32t-45 -70t-52.5 -70t-60 -32q-43 0 -63.5 17.5t-45.5 59.5q-2 4 -6 11t-5.5 10t-3 9.5t-1.5 13.5q0 35 31 73.5t68 65.5t68 56t31 48q0 4 -14 38t-16 44q-9 51 -9 104q0 115 43.5 220t119 184.5
+t170.5 139t204 95.5q55 18 145 25.5t179.5 9t178.5 6t163.5 24t113.5 56.5l29.5 29.5t29.5 28t27 20t36.5 16t43.5 4.5q39 0 70.5 -46t47.5 -112t24 -124t8 -96z" />
+ <glyph glyph-name="fire" unicode="&#xf06d;" horiz-adv-x="1408"
+d="M1408 -160v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-1344q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h1344q13 0 22.5 -9.5t9.5 -22.5zM1152 896q0 -78 -24.5 -144t-64 -112.5t-87.5 -88t-96 -77.5t-87.5 -72t-64 -81.5t-24.5 -96.5q0 -96 67 -224l-4 1l1 -1
+q-90 41 -160 83t-138.5 100t-113.5 122.5t-72.5 150.5t-27.5 184q0 78 24.5 144t64 112.5t87.5 88t96 77.5t87.5 72t64 81.5t24.5 96.5q0 94 -66 224l3 -1l-1 1q90 -41 160 -83t138.5 -100t113.5 -122.5t72.5 -150.5t27.5 -184z" />
+ <glyph glyph-name="eye_open" unicode="&#xf06e;" horiz-adv-x="1792"
+d="M1664 576q-152 236 -381 353q61 -104 61 -225q0 -185 -131.5 -316.5t-316.5 -131.5t-316.5 131.5t-131.5 316.5q0 121 61 225q-229 -117 -381 -353q133 -205 333.5 -326.5t434.5 -121.5t434.5 121.5t333.5 326.5zM944 960q0 20 -14 34t-34 14q-125 0 -214.5 -89.5
+t-89.5 -214.5q0 -20 14 -34t34 -14t34 14t14 34q0 86 61 147t147 61q20 0 34 14t14 34zM1792 576q0 -34 -20 -69q-140 -230 -376.5 -368.5t-499.5 -138.5t-499.5 139t-376.5 368q-20 35 -20 69t20 69q140 229 376.5 368t499.5 139t499.5 -139t376.5 -368q20 -35 20 -69z" />
+ <glyph glyph-name="eye_close" unicode="&#xf070;" horiz-adv-x="1792"
+d="M555 201l78 141q-87 63 -136 159t-49 203q0 121 61 225q-229 -117 -381 -353q167 -258 427 -375zM944 960q0 20 -14 34t-34 14q-125 0 -214.5 -89.5t-89.5 -214.5q0 -20 14 -34t34 -14t34 14t14 34q0 86 61 147t147 61q20 0 34 14t14 34zM1307 1151q0 -7 -1 -9
+q-106 -189 -316 -567t-315 -566l-49 -89q-10 -16 -28 -16q-12 0 -134 70q-16 10 -16 28q0 12 44 87q-143 65 -263.5 173t-208.5 245q-20 31 -20 69t20 69q153 235 380 371t496 136q89 0 180 -17l54 97q10 16 28 16q5 0 18 -6t31 -15.5t33 -18.5t31.5 -18.5t19.5 -11.5
+q16 -10 16 -27zM1344 704q0 -139 -79 -253.5t-209 -164.5l280 502q8 -45 8 -84zM1792 576q0 -35 -20 -69q-39 -64 -109 -145q-150 -172 -347.5 -267t-419.5 -95l74 132q212 18 392.5 137t301.5 307q-115 179 -282 294l63 112q95 -64 182.5 -153t144.5 -184q20 -34 20 -69z
+" />
+ <glyph glyph-name="warning_sign" unicode="&#xf071;" horiz-adv-x="1792"
+d="M1024 161v190q0 14 -9.5 23.5t-22.5 9.5h-192q-13 0 -22.5 -9.5t-9.5 -23.5v-190q0 -14 9.5 -23.5t22.5 -9.5h192q13 0 22.5 9.5t9.5 23.5zM1022 535l18 459q0 12 -10 19q-13 11 -24 11h-220q-11 0 -24 -11q-10 -7 -10 -21l17 -457q0 -10 10 -16.5t24 -6.5h185
+q14 0 23.5 6.5t10.5 16.5zM1008 1469l768 -1408q35 -63 -2 -126q-17 -29 -46.5 -46t-63.5 -17h-1536q-34 0 -63.5 17t-46.5 46q-37 63 -2 126l768 1408q17 31 47 49t65 18t65 -18t47 -49z" />
+ <glyph glyph-name="plane" unicode="&#xf072;" horiz-adv-x="1408"
+d="M1376 1376q44 -52 12 -148t-108 -172l-161 -161l160 -696q5 -19 -12 -33l-128 -96q-7 -6 -19 -6q-4 0 -7 1q-15 3 -21 16l-279 508l-259 -259l53 -194q5 -17 -8 -31l-96 -96q-9 -9 -23 -9h-2q-15 2 -24 13l-189 252l-252 189q-11 7 -13 23q-1 13 9 25l96 97q9 9 23 9
+q6 0 8 -1l194 -53l259 259l-508 279q-14 8 -17 24q-2 16 9 27l128 128q14 13 30 8l665 -159l160 160q76 76 172 108t148 -12z" />
+ <glyph glyph-name="calendar" unicode="&#xf073;" horiz-adv-x="1664"
+d="M128 -128h288v288h-288v-288zM480 -128h320v288h-320v-288zM128 224h288v320h-288v-320zM480 224h320v320h-320v-320zM128 608h288v288h-288v-288zM864 -128h320v288h-320v-288zM480 608h320v288h-320v-288zM1248 -128h288v288h-288v-288zM864 224h320v320h-320v-320z
+M512 1088v288q0 13 -9.5 22.5t-22.5 9.5h-64q-13 0 -22.5 -9.5t-9.5 -22.5v-288q0 -13 9.5 -22.5t22.5 -9.5h64q13 0 22.5 9.5t9.5 22.5zM1248 224h288v320h-288v-320zM864 608h320v288h-320v-288zM1248 608h288v288h-288v-288zM1280 1088v288q0 13 -9.5 22.5t-22.5 9.5h-64
+q-13 0 -22.5 -9.5t-9.5 -22.5v-288q0 -13 9.5 -22.5t22.5 -9.5h64q13 0 22.5 9.5t9.5 22.5zM1664 1152v-1280q0 -52 -38 -90t-90 -38h-1408q-52 0 -90 38t-38 90v1280q0 52 38 90t90 38h128v96q0 66 47 113t113 47h64q66 0 113 -47t47 -113v-96h384v96q0 66 47 113t113 47
+h64q66 0 113 -47t47 -113v-96h128q52 0 90 -38t38 -90z" />
+ <glyph glyph-name="random" unicode="&#xf074;" horiz-adv-x="1792"
+d="M666 1055q-60 -92 -137 -273q-22 45 -37 72.5t-40.5 63.5t-51 56.5t-63 35t-81.5 14.5h-224q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h224q250 0 410 -225zM1792 256q0 -14 -9 -23l-320 -320q-9 -9 -23 -9q-13 0 -22.5 9.5t-9.5 22.5v192q-32 0 -85 -0.5t-81 -1t-73 1
+t-71 5t-64 10.5t-63 18.5t-58 28.5t-59 40t-55 53.5t-56 69.5q59 93 136 273q22 -45 37 -72.5t40.5 -63.5t51 -56.5t63 -35t81.5 -14.5h256v192q0 14 9 23t23 9q12 0 24 -10l319 -319q9 -9 9 -23zM1792 1152q0 -14 -9 -23l-320 -320q-9 -9 -23 -9q-13 0 -22.5 9.5t-9.5 22.5
+v192h-256q-48 0 -87 -15t-69 -45t-51 -61.5t-45 -77.5q-32 -62 -78 -171q-29 -66 -49.5 -111t-54 -105t-64 -100t-74 -83t-90 -68.5t-106.5 -42t-128 -16.5h-224q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h224q48 0 87 15t69 45t51 61.5t45 77.5q32 62 78 171q29 66 49.5 111
+t54 105t64 100t74 83t90 68.5t106.5 42t128 16.5h256v192q0 14 9 23t23 9q12 0 24 -10l319 -319q9 -9 9 -23z" />
+ <glyph glyph-name="comment" unicode="&#xf075;" horiz-adv-x="1792"
+d="M1792 640q0 -174 -120 -321.5t-326 -233t-450 -85.5q-70 0 -145 8q-198 -175 -460 -242q-49 -14 -114 -22q-17 -2 -30.5 9t-17.5 29v1q-3 4 -0.5 12t2 10t4.5 9.5l6 9t7 8.5t8 9q7 8 31 34.5t34.5 38t31 39.5t32.5 51t27 59t26 76q-157 89 -247.5 220t-90.5 281
+q0 130 71 248.5t191 204.5t286 136.5t348 50.5q244 0 450 -85.5t326 -233t120 -321.5z" />
+ <glyph glyph-name="magnet" unicode="&#xf076;"
+d="M1536 704v-128q0 -201 -98.5 -362t-274 -251.5t-395.5 -90.5t-395.5 90.5t-274 251.5t-98.5 362v128q0 26 19 45t45 19h384q26 0 45 -19t19 -45v-128q0 -52 23.5 -90t53.5 -57t71 -30t64 -13t44 -2t44 2t64 13t71 30t53.5 57t23.5 90v128q0 26 19 45t45 19h384
+q26 0 45 -19t19 -45zM512 1344v-384q0 -26 -19 -45t-45 -19h-384q-26 0 -45 19t-19 45v384q0 26 19 45t45 19h384q26 0 45 -19t19 -45zM1536 1344v-384q0 -26 -19 -45t-45 -19h-384q-26 0 -45 19t-19 45v384q0 26 19 45t45 19h384q26 0 45 -19t19 -45z" />
+ <glyph glyph-name="chevron_up" unicode="&#xf077;" horiz-adv-x="1792"
+d="M1683 205l-166 -165q-19 -19 -45 -19t-45 19l-531 531l-531 -531q-19 -19 -45 -19t-45 19l-166 165q-19 19 -19 45.5t19 45.5l742 741q19 19 45 19t45 -19l742 -741q19 -19 19 -45.5t-19 -45.5z" />
+ <glyph glyph-name="chevron_down" unicode="&#xf078;" horiz-adv-x="1792"
+d="M1683 728l-742 -741q-19 -19 -45 -19t-45 19l-742 741q-19 19 -19 45.5t19 45.5l166 165q19 19 45 19t45 -19l531 -531l531 531q19 19 45 19t45 -19l166 -165q19 -19 19 -45.5t-19 -45.5z" />
+ <glyph glyph-name="retweet" unicode="&#xf079;" horiz-adv-x="1920"
+d="M1280 32q0 -13 -9.5 -22.5t-22.5 -9.5h-960q-8 0 -13.5 2t-9 7t-5.5 8t-3 11.5t-1 11.5v13v11v160v416h-192q-26 0 -45 19t-19 45q0 24 15 41l320 384q19 22 49 22t49 -22l320 -384q15 -17 15 -41q0 -26 -19 -45t-45 -19h-192v-384h576q16 0 25 -11l160 -192q7 -10 7 -21
+zM1920 448q0 -24 -15 -41l-320 -384q-20 -23 -49 -23t-49 23l-320 384q-15 17 -15 41q0 26 19 45t45 19h192v384h-576q-16 0 -25 12l-160 192q-7 9 -7 20q0 13 9.5 22.5t22.5 9.5h960q8 0 13.5 -2t9 -7t5.5 -8t3 -11.5t1 -11.5v-13v-11v-160v-416h192q26 0 45 -19t19 -45z
+" />
+ <glyph glyph-name="shopping_cart" unicode="&#xf07a;" horiz-adv-x="1664"
+d="M640 0q0 -52 -38 -90t-90 -38t-90 38t-38 90t38 90t90 38t90 -38t38 -90zM1536 0q0 -52 -38 -90t-90 -38t-90 38t-38 90t38 90t90 38t90 -38t38 -90zM1664 1088v-512q0 -24 -16.5 -42.5t-40.5 -21.5l-1044 -122q13 -60 13 -70q0 -16 -24 -64h920q26 0 45 -19t19 -45
+t-19 -45t-45 -19h-1024q-26 0 -45 19t-19 45q0 11 8 31.5t16 36t21.5 40t15.5 29.5l-177 823h-204q-26 0 -45 19t-19 45t19 45t45 19h256q16 0 28.5 -6.5t19.5 -15.5t13 -24.5t8 -26t5.5 -29.5t4.5 -26h1201q26 0 45 -19t19 -45z" />
+ <glyph glyph-name="folder_close" unicode="&#xf07b;" horiz-adv-x="1664"
+d="M1664 928v-704q0 -92 -66 -158t-158 -66h-1216q-92 0 -158 66t-66 158v960q0 92 66 158t158 66h320q92 0 158 -66t66 -158v-32h672q92 0 158 -66t66 -158z" />
+ <glyph glyph-name="folder_open" unicode="&#xf07c;" horiz-adv-x="1920"
+d="M1879 584q0 -31 -31 -66l-336 -396q-43 -51 -120.5 -86.5t-143.5 -35.5h-1088q-34 0 -60.5 13t-26.5 43q0 31 31 66l336 396q43 51 120.5 86.5t143.5 35.5h1088q34 0 60.5 -13t26.5 -43zM1536 928v-160h-832q-94 0 -197 -47.5t-164 -119.5l-337 -396l-5 -6q0 4 -0.5 12.5
+t-0.5 12.5v960q0 92 66 158t158 66h320q92 0 158 -66t66 -158v-32h544q92 0 158 -66t66 -158z" />
+ <glyph glyph-name="resize_vertical" unicode="&#xf07d;" horiz-adv-x="768"
+d="M704 1216q0 -26 -19 -45t-45 -19h-128v-1024h128q26 0 45 -19t19 -45t-19 -45l-256 -256q-19 -19 -45 -19t-45 19l-256 256q-19 19 -19 45t19 45t45 19h128v1024h-128q-26 0 -45 19t-19 45t19 45l256 256q19 19 45 19t45 -19l256 -256q19 -19 19 -45z" />
+ <glyph glyph-name="resize_horizontal" unicode="&#xf07e;" horiz-adv-x="1792"
+d="M1792 640q0 -26 -19 -45l-256 -256q-19 -19 -45 -19t-45 19t-19 45v128h-1024v-128q0 -26 -19 -45t-45 -19t-45 19l-256 256q-19 19 -19 45t19 45l256 256q19 19 45 19t45 -19t19 -45v-128h1024v128q0 26 19 45t45 19t45 -19l256 -256q19 -19 19 -45z" />
+ <glyph glyph-name="bar_chart" unicode="&#xf080;" horiz-adv-x="2048"
+d="M640 640v-512h-256v512h256zM1024 1152v-1024h-256v1024h256zM2048 0v-128h-2048v1536h128v-1408h1920zM1408 896v-768h-256v768h256zM1792 1280v-1152h-256v1152h256z" />
+ <glyph glyph-name="twitter_sign" unicode="&#xf081;"
+d="M1280 926q-56 -25 -121 -34q68 40 93 117q-65 -38 -134 -51q-61 66 -153 66q-87 0 -148.5 -61.5t-61.5 -148.5q0 -29 5 -48q-129 7 -242 65t-192 155q-29 -50 -29 -106q0 -114 91 -175q-47 1 -100 26v-2q0 -75 50 -133.5t123 -72.5q-29 -8 -51 -8q-13 0 -39 4
+q21 -63 74.5 -104t121.5 -42q-116 -90 -261 -90q-26 0 -50 3q148 -94 322 -94q112 0 210 35.5t168 95t120.5 137t75 162t24.5 168.5q0 18 -1 27q63 45 105 109zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5
+t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="facebook_sign" unicode="&#xf082;"
+d="M1248 1408q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-188v595h199l30 232h-229v148q0 56 23.5 84t91.5 28l122 1v207q-63 9 -178 9q-136 0 -217.5 -80t-81.5 -226v-171h-200v-232h200v-595h-532q-119 0 -203.5 84.5t-84.5 203.5v960
+q0 119 84.5 203.5t203.5 84.5h960z" />
+ <glyph glyph-name="camera_retro" unicode="&#xf083;" horiz-adv-x="1792"
+d="M928 704q0 14 -9 23t-23 9q-66 0 -113 -47t-47 -113q0 -14 9 -23t23 -9t23 9t9 23q0 40 28 68t68 28q14 0 23 9t9 23zM1152 574q0 -106 -75 -181t-181 -75t-181 75t-75 181t75 181t181 75t181 -75t75 -181zM128 0h1536v128h-1536v-128zM1280 574q0 159 -112.5 271.5
+t-271.5 112.5t-271.5 -112.5t-112.5 -271.5t112.5 -271.5t271.5 -112.5t271.5 112.5t112.5 271.5zM256 1216h384v128h-384v-128zM128 1024h1536v118v138h-828l-64 -128h-644v-128zM1792 1280v-1280q0 -53 -37.5 -90.5t-90.5 -37.5h-1536q-53 0 -90.5 37.5t-37.5 90.5v1280
+q0 53 37.5 90.5t90.5 37.5h1536q53 0 90.5 -37.5t37.5 -90.5z" />
+ <glyph glyph-name="key" unicode="&#xf084;" horiz-adv-x="1792"
+d="M832 1024q0 80 -56 136t-136 56t-136 -56t-56 -136q0 -42 19 -83q-41 19 -83 19q-80 0 -136 -56t-56 -136t56 -136t136 -56t136 56t56 136q0 42 -19 83q41 -19 83 -19q80 0 136 56t56 136zM1683 320q0 -17 -49 -66t-66 -49q-9 0 -28.5 16t-36.5 33t-38.5 40t-24.5 26
+l-96 -96l220 -220q28 -28 28 -68q0 -42 -39 -81t-81 -39q-40 0 -68 28l-671 671q-176 -131 -365 -131q-163 0 -265.5 102.5t-102.5 265.5q0 160 95 313t248 248t313 95q163 0 265.5 -102.5t102.5 -265.5q0 -189 -131 -365l355 -355l96 96q-3 3 -26 24.5t-40 38.5t-33 36.5
+t-16 28.5q0 17 49 66t66 49q13 0 23 -10q6 -6 46 -44.5t82 -79.5t86.5 -86t73 -78t28.5 -41z" />
+ <glyph glyph-name="cogs" unicode="&#xf085;" horiz-adv-x="1920"
+d="M896 640q0 106 -75 181t-181 75t-181 -75t-75 -181t75 -181t181 -75t181 75t75 181zM1664 128q0 52 -38 90t-90 38t-90 -38t-38 -90q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM1664 1152q0 52 -38 90t-90 38t-90 -38t-38 -90q0 -53 37.5 -90.5t90.5 -37.5
+t90.5 37.5t37.5 90.5zM1280 731v-185q0 -10 -7 -19.5t-16 -10.5l-155 -24q-11 -35 -32 -76q34 -48 90 -115q7 -11 7 -20q0 -12 -7 -19q-23 -30 -82.5 -89.5t-78.5 -59.5q-11 0 -21 7l-115 90q-37 -19 -77 -31q-11 -108 -23 -155q-7 -24 -30 -24h-186q-11 0 -20 7.5t-10 17.5
+l-23 153q-34 10 -75 31l-118 -89q-7 -7 -20 -7q-11 0 -21 8q-144 133 -144 160q0 9 7 19q10 14 41 53t47 61q-23 44 -35 82l-152 24q-10 1 -17 9.5t-7 19.5v185q0 10 7 19.5t16 10.5l155 24q11 35 32 76q-34 48 -90 115q-7 11 -7 20q0 12 7 20q22 30 82 89t79 59q11 0 21 -7
+l115 -90q34 18 77 32q11 108 23 154q7 24 30 24h186q11 0 20 -7.5t10 -17.5l23 -153q34 -10 75 -31l118 89q8 7 20 7q11 0 21 -8q144 -133 144 -160q0 -8 -7 -19q-12 -16 -42 -54t-45 -60q23 -48 34 -82l152 -23q10 -2 17 -10.5t7 -19.5zM1920 198v-140q0 -16 -149 -31
+q-12 -27 -30 -52q51 -113 51 -138q0 -4 -4 -7q-122 -71 -124 -71q-8 0 -46 47t-52 68q-20 -2 -30 -2t-30 2q-14 -21 -52 -68t-46 -47q-2 0 -124 71q-4 3 -4 7q0 25 51 138q-18 25 -30 52q-149 15 -149 31v140q0 16 149 31q13 29 30 52q-51 113 -51 138q0 4 4 7q4 2 35 20
+t59 34t30 16q8 0 46 -46.5t52 -67.5q20 2 30 2t30 -2q51 71 92 112l6 2q4 0 124 -70q4 -3 4 -7q0 -25 -51 -138q17 -23 30 -52q149 -15 149 -31zM1920 1222v-140q0 -16 -149 -31q-12 -27 -30 -52q51 -113 51 -138q0 -4 -4 -7q-122 -71 -124 -71q-8 0 -46 47t-52 68
+q-20 -2 -30 -2t-30 2q-14 -21 -52 -68t-46 -47q-2 0 -124 71q-4 3 -4 7q0 25 51 138q-18 25 -30 52q-149 15 -149 31v140q0 16 149 31q13 29 30 52q-51 113 -51 138q0 4 4 7q4 2 35 20t59 34t30 16q8 0 46 -46.5t52 -67.5q20 2 30 2t30 -2q51 71 92 112l6 2q4 0 124 -70
+q4 -3 4 -7q0 -25 -51 -138q17 -23 30 -52q149 -15 149 -31z" />
+ <glyph glyph-name="comments" unicode="&#xf086;" horiz-adv-x="1792"
+d="M1408 768q0 -139 -94 -257t-256.5 -186.5t-353.5 -68.5q-86 0 -176 16q-124 -88 -278 -128q-36 -9 -86 -16h-3q-11 0 -20.5 8t-11.5 21q-1 3 -1 6.5t0.5 6.5t2 6l2.5 5t3.5 5.5t4 5t4.5 5t4 4.5q5 6 23 25t26 29.5t22.5 29t25 38.5t20.5 44q-124 72 -195 177t-71 224
+q0 139 94 257t256.5 186.5t353.5 68.5t353.5 -68.5t256.5 -186.5t94 -257zM1792 512q0 -120 -71 -224.5t-195 -176.5q10 -24 20.5 -44t25 -38.5t22.5 -29t26 -29.5t23 -25q1 -1 4 -4.5t4.5 -5t4 -5t3.5 -5.5l2.5 -5t2 -6t0.5 -6.5t-1 -6.5q-3 -14 -13 -22t-22 -7
+q-50 7 -86 16q-154 40 -278 128q-90 -16 -176 -16q-271 0 -472 132q58 -4 88 -4q161 0 309 45t264 129q125 92 192 212t67 254q0 77 -23 152q129 -71 204 -178t75 -230z" />
+ <glyph glyph-name="thumbs_up_alt" unicode="&#xf087;"
+d="M256 192q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45t45 -19t45 19t19 45zM1408 768q0 51 -39 89.5t-89 38.5h-352q0 58 48 159.5t48 160.5q0 98 -32 145t-128 47q-26 -26 -38 -85t-30.5 -125.5t-59.5 -109.5q-22 -23 -77 -91q-4 -5 -23 -30t-31.5 -41t-34.5 -42.5
+t-40 -44t-38.5 -35.5t-40 -27t-35.5 -9h-32v-640h32q13 0 31.5 -3t33 -6.5t38 -11t35 -11.5t35.5 -12.5t29 -10.5q211 -73 342 -73h121q192 0 192 167q0 26 -5 56q30 16 47.5 52.5t17.5 73.5t-18 69q53 50 53 119q0 25 -10 55.5t-25 47.5q32 1 53.5 47t21.5 81zM1536 769
+q0 -89 -49 -163q9 -33 9 -69q0 -77 -38 -144q3 -21 3 -43q0 -101 -60 -178q1 -139 -85 -219.5t-227 -80.5h-36h-93q-96 0 -189.5 22.5t-216.5 65.5q-116 40 -138 40h-288q-53 0 -90.5 37.5t-37.5 90.5v640q0 53 37.5 90.5t90.5 37.5h274q36 24 137 155q58 75 107 128
+q24 25 35.5 85.5t30.5 126.5t62 108q39 37 90 37q84 0 151 -32.5t102 -101.5t35 -186q0 -93 -48 -192h176q104 0 180 -76t76 -179z" />
+ <glyph glyph-name="thumbs_down_alt" unicode="&#xf088;"
+d="M256 1088q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45t45 -19t45 19t19 45zM1408 512q0 35 -21.5 81t-53.5 47q15 17 25 47.5t10 55.5q0 69 -53 119q18 31 18 69q0 37 -17.5 73.5t-47.5 52.5q5 30 5 56q0 85 -49 126t-136 41h-128q-131 0 -342 -73q-5 -2 -29 -10.5
+t-35.5 -12.5t-35 -11.5t-38 -11t-33 -6.5t-31.5 -3h-32v-640h32q16 0 35.5 -9t40 -27t38.5 -35.5t40 -44t34.5 -42.5t31.5 -41t23 -30q55 -68 77 -91q41 -43 59.5 -109.5t30.5 -125.5t38 -85q96 0 128 47t32 145q0 59 -48 160.5t-48 159.5h352q50 0 89 38.5t39 89.5z
+M1536 511q0 -103 -76 -179t-180 -76h-176q48 -99 48 -192q0 -118 -35 -186q-35 -69 -102 -101.5t-151 -32.5q-51 0 -90 37q-34 33 -54 82t-25.5 90.5t-17.5 84.5t-31 64q-48 50 -107 127q-101 131 -137 155h-274q-53 0 -90.5 37.5t-37.5 90.5v640q0 53 37.5 90.5t90.5 37.5
+h288q22 0 138 40q128 44 223 66t200 22h112q140 0 226.5 -79t85.5 -216v-5q60 -77 60 -178q0 -22 -3 -43q38 -67 38 -144q0 -36 -9 -69q49 -73 49 -163z" />
+ <glyph glyph-name="star_half" unicode="&#xf089;" horiz-adv-x="896"
+d="M832 1504v-1339l-449 -236q-22 -12 -40 -12q-21 0 -31.5 14.5t-10.5 35.5q0 6 2 20l86 500l-364 354q-25 27 -25 48q0 37 56 46l502 73l225 455q19 41 49 41z" />
+ <glyph glyph-name="heart_empty" unicode="&#xf08a;" horiz-adv-x="1792"
+d="M1664 940q0 81 -21.5 143t-55 98.5t-81.5 59.5t-94 31t-98 8t-112 -25.5t-110.5 -64t-86.5 -72t-60 -61.5q-18 -22 -49 -22t-49 22q-24 28 -60 61.5t-86.5 72t-110.5 64t-112 25.5t-98 -8t-94 -31t-81.5 -59.5t-55 -98.5t-21.5 -143q0 -168 187 -355l581 -560l580 559
+q188 188 188 356zM1792 940q0 -221 -229 -450l-623 -600q-18 -18 -44 -18t-44 18l-624 602q-10 8 -27.5 26t-55.5 65.5t-68 97.5t-53.5 121t-23.5 138q0 220 127 344t351 124q62 0 126.5 -21.5t120 -58t95.5 -68.5t76 -68q36 36 76 68t95.5 68.5t120 58t126.5 21.5
+q224 0 351 -124t127 -344z" />
+ <glyph glyph-name="signout" unicode="&#xf08b;" horiz-adv-x="1664"
+d="M640 96q0 -4 1 -20t0.5 -26.5t-3 -23.5t-10 -19.5t-20.5 -6.5h-320q-119 0 -203.5 84.5t-84.5 203.5v704q0 119 84.5 203.5t203.5 84.5h320q13 0 22.5 -9.5t9.5 -22.5q0 -4 1 -20t0.5 -26.5t-3 -23.5t-10 -19.5t-20.5 -6.5h-320q-66 0 -113 -47t-47 -113v-704
+q0 -66 47 -113t113 -47h288h11h13t11.5 -1t11.5 -3t8 -5.5t7 -9t2 -13.5zM1568 640q0 -26 -19 -45l-544 -544q-19 -19 -45 -19t-45 19t-19 45v288h-448q-26 0 -45 19t-19 45v384q0 26 19 45t45 19h448v288q0 26 19 45t45 19t45 -19l544 -544q19 -19 19 -45z" />
+ <glyph glyph-name="linkedin_sign" unicode="&#xf08c;"
+d="M237 122h231v694h-231v-694zM483 1030q-1 52 -36 86t-93 34t-94.5 -34t-36.5 -86q0 -51 35.5 -85.5t92.5 -34.5h1q59 0 95 34.5t36 85.5zM1068 122h231v398q0 154 -73 233t-193 79q-136 0 -209 -117h2v101h-231q3 -66 0 -694h231v388q0 38 7 56q15 35 45 59.5t74 24.5
+q116 0 116 -157v-371zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="pushpin" unicode="&#xf08d;" horiz-adv-x="1152"
+d="M480 672v448q0 14 -9 23t-23 9t-23 -9t-9 -23v-448q0 -14 9 -23t23 -9t23 9t9 23zM1152 320q0 -26 -19 -45t-45 -19h-429l-51 -483q-2 -12 -10.5 -20.5t-20.5 -8.5h-1q-27 0 -32 27l-76 485h-404q-26 0 -45 19t-19 45q0 123 78.5 221.5t177.5 98.5v512q-52 0 -90 38
+t-38 90t38 90t90 38h640q52 0 90 -38t38 -90t-38 -90t-90 -38v-512q99 0 177.5 -98.5t78.5 -221.5z" />
+ <glyph glyph-name="external_link" unicode="&#xf08e;" horiz-adv-x="1792"
+d="M1408 608v-320q0 -119 -84.5 -203.5t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5v832q0 119 84.5 203.5t203.5 84.5h704q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-704q-66 0 -113 -47t-47 -113v-832q0 -66 47 -113t113 -47h832q66 0 113 47t47 113v320
+q0 14 9 23t23 9h64q14 0 23 -9t9 -23zM1792 1472v-512q0 -26 -19 -45t-45 -19t-45 19l-176 176l-652 -652q-10 -10 -23 -10t-23 10l-114 114q-10 10 -10 23t10 23l652 652l-176 176q-19 19 -19 45t19 45t45 19h512q26 0 45 -19t19 -45z" />
+ <glyph glyph-name="signin" unicode="&#xf090;"
+d="M1184 640q0 -26 -19 -45l-544 -544q-19 -19 -45 -19t-45 19t-19 45v288h-448q-26 0 -45 19t-19 45v384q0 26 19 45t45 19h448v288q0 26 19 45t45 19t45 -19l544 -544q19 -19 19 -45zM1536 992v-704q0 -119 -84.5 -203.5t-203.5 -84.5h-320q-13 0 -22.5 9.5t-9.5 22.5
+q0 4 -1 20t-0.5 26.5t3 23.5t10 19.5t20.5 6.5h320q66 0 113 47t47 113v704q0 66 -47 113t-113 47h-288h-11h-13t-11.5 1t-11.5 3t-8 5.5t-7 9t-2 13.5q0 4 -1 20t-0.5 26.5t3 23.5t10 19.5t20.5 6.5h320q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="trophy" unicode="&#xf091;" horiz-adv-x="1664"
+d="M458 653q-74 162 -74 371h-256v-96q0 -78 94.5 -162t235.5 -113zM1536 928v96h-256q0 -209 -74 -371q141 29 235.5 113t94.5 162zM1664 1056v-128q0 -71 -41.5 -143t-112 -130t-173 -97.5t-215.5 -44.5q-42 -54 -95 -95q-38 -34 -52.5 -72.5t-14.5 -89.5q0 -54 30.5 -91
+t97.5 -37q75 0 133.5 -45.5t58.5 -114.5v-64q0 -14 -9 -23t-23 -9h-832q-14 0 -23 9t-9 23v64q0 69 58.5 114.5t133.5 45.5q67 0 97.5 37t30.5 91q0 51 -14.5 89.5t-52.5 72.5q-53 41 -95 95q-113 5 -215.5 44.5t-173 97.5t-112 130t-41.5 143v128q0 40 28 68t68 28h288v96
+q0 66 47 113t113 47h576q66 0 113 -47t47 -113v-96h288q40 0 68 -28t28 -68z" />
+ <glyph glyph-name="github_sign" unicode="&#xf092;"
+d="M519 336q4 6 -3 13q-9 7 -14 2q-4 -6 3 -13q9 -7 14 -2zM491 377q-5 7 -12 4q-6 -4 0 -12q7 -8 12 -5q6 4 0 13zM450 417q2 4 -5 8q-7 2 -8 -2q-3 -5 4 -8q8 -2 9 2zM471 394q2 1 1.5 4.5t-3.5 5.5q-6 7 -10 3t1 -11q6 -6 11 -2zM557 319q2 7 -9 11q-9 3 -13 -4
+q-2 -7 9 -11q9 -3 13 4zM599 316q0 8 -12 8q-10 0 -10 -8t11 -8t11 8zM638 323q-2 7 -13 5t-9 -9q2 -8 12 -6t10 10zM1280 640q0 212 -150 362t-362 150t-362 -150t-150 -362q0 -167 98 -300.5t252 -185.5q18 -3 26.5 5t8.5 20q0 52 -1 95q-6 -1 -15.5 -2.5t-35.5 -2t-48 4
+t-43.5 20t-29.5 41.5q-23 59 -57 74q-2 1 -4.5 3.5l-8 8t-7 9.5t4 7.5t19.5 3.5q6 0 15 -2t30 -15.5t33 -35.5q16 -28 37.5 -42t43.5 -14t38 3.5t30 9.5q7 47 33 69q-49 6 -86 18.5t-73 39t-55.5 76t-19.5 119.5q0 79 53 137q-24 62 5 136q19 6 54.5 -7.5t60.5 -29.5l26 -16
+q58 17 128 17t128 -17q11 7 28.5 18t55.5 26t57 9q29 -74 5 -136q53 -58 53 -137q0 -57 -14 -100.5t-35.5 -70t-53.5 -44.5t-62.5 -26t-68.5 -12q35 -31 35 -95q0 -40 -0.5 -89t-0.5 -51q0 -12 8.5 -20t26.5 -5q154 52 252 185.5t98 300.5zM1536 1120v-960
+q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="upload_alt" unicode="&#xf093;" horiz-adv-x="1664"
+d="M1280 64q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45t45 -19t45 19t19 45zM1536 64q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45t45 -19t45 19t19 45zM1664 288v-320q0 -40 -28 -68t-68 -28h-1472q-40 0 -68 28t-28 68v320q0 40 28 68t68 28h427q21 -56 70.5 -92
+t110.5 -36h256q61 0 110.5 36t70.5 92h427q40 0 68 -28t28 -68zM1339 936q-17 -40 -59 -40h-256v-448q0 -26 -19 -45t-45 -19h-256q-26 0 -45 19t-19 45v448h-256q-42 0 -59 40q-17 39 14 69l448 448q18 19 45 19t45 -19l448 -448q31 -30 14 -69z" />
+ <glyph glyph-name="lemon" unicode="&#xf094;"
+d="M1407 710q0 44 -7 113.5t-18 96.5q-12 30 -17 44t-9 36.5t-4 48.5q0 23 5 68.5t5 67.5q0 37 -10 55q-4 1 -13 1q-19 0 -58 -4.5t-59 -4.5q-60 0 -176 24t-175 24q-43 0 -94.5 -11.5t-85 -23.5t-89.5 -34q-137 -54 -202 -103q-96 -73 -159.5 -189.5t-88 -236t-24.5 -248.5
+q0 -40 12.5 -120t12.5 -121q0 -23 -11 -66.5t-11 -65.5t12 -36.5t34 -14.5q24 0 72.5 11t73.5 11q57 0 169.5 -15.5t169.5 -15.5q181 0 284 36q129 45 235.5 152.5t166 245.5t59.5 275zM1535 712q0 -165 -70 -327.5t-196 -288t-281 -180.5q-124 -44 -326 -44
+q-57 0 -170 14.5t-169 14.5q-24 0 -72.5 -14.5t-73.5 -14.5q-73 0 -123.5 55.5t-50.5 128.5q0 24 11 68t11 67q0 40 -12.5 120.5t-12.5 121.5q0 111 18 217.5t54.5 209.5t100.5 194t150 156q78 59 232 120q194 78 316 78q60 0 175.5 -24t173.5 -24q19 0 57 5t58 5
+q81 0 118 -50.5t37 -134.5q0 -23 -5 -68t-5 -68q0 -13 2 -25t3.5 -16.5t7.5 -20.5t8 -20q16 -40 25 -118.5t9 -136.5z" />
+ <glyph glyph-name="phone" unicode="&#xf095;" horiz-adv-x="1408"
+d="M1408 296q0 -27 -10 -70.5t-21 -68.5q-21 -50 -122 -106q-94 -51 -186 -51q-27 0 -53 3.5t-57.5 12.5t-47 14.5t-55.5 20.5t-49 18q-98 35 -175 83q-127 79 -264 216t-216 264q-48 77 -83 175q-3 9 -18 49t-20.5 55.5t-14.5 47t-12.5 57.5t-3.5 53q0 92 51 186
+q56 101 106 122q25 11 68.5 21t70.5 10q14 0 21 -3q18 -6 53 -76q11 -19 30 -54t35 -63.5t31 -53.5q3 -4 17.5 -25t21.5 -35.5t7 -28.5q0 -20 -28.5 -50t-62 -55t-62 -53t-28.5 -46q0 -9 5 -22.5t8.5 -20.5t14 -24t11.5 -19q76 -137 174 -235t235 -174q2 -1 19 -11.5t24 -14
+t20.5 -8.5t22.5 -5q18 0 46 28.5t53 62t55 62t50 28.5q14 0 28.5 -7t35.5 -21.5t25 -17.5q25 -15 53.5 -31t63.5 -35t54 -30q70 -35 76 -53q3 -7 3 -21z" />
+ <glyph glyph-name="check_empty" unicode="&#xf096;" horiz-adv-x="1408"
+d="M1120 1280h-832q-66 0 -113 -47t-47 -113v-832q0 -66 47 -113t113 -47h832q66 0 113 47t47 113v832q0 66 -47 113t-113 47zM1408 1120v-832q0 -119 -84.5 -203.5t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5v832q0 119 84.5 203.5t203.5 84.5h832
+q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="bookmark_empty" unicode="&#xf097;" horiz-adv-x="1280"
+d="M1152 1280h-1024v-1242l423 406l89 85l89 -85l423 -406v1242zM1164 1408q23 0 44 -9q33 -13 52.5 -41t19.5 -62v-1289q0 -34 -19.5 -62t-52.5 -41q-19 -8 -44 -8q-48 0 -83 32l-441 424l-441 -424q-36 -33 -83 -33q-23 0 -44 9q-33 13 -52.5 41t-19.5 62v1289
+q0 34 19.5 62t52.5 41q21 9 44 9h1048z" />
+ <glyph glyph-name="phone_sign" unicode="&#xf098;"
+d="M1280 343q0 11 -2 16t-18 16.5t-40.5 25t-47.5 26.5t-45.5 25t-28.5 15q-5 3 -19 13t-25 15t-21 5q-15 0 -36.5 -20.5t-39.5 -45t-38.5 -45t-33.5 -20.5q-7 0 -16.5 3.5t-15.5 6.5t-17 9.5t-14 8.5q-99 55 -170 126.5t-127 170.5q-2 3 -8.5 14t-9.5 17t-6.5 15.5
+t-3.5 16.5q0 13 20.5 33.5t45 38.5t45 39.5t20.5 36.5q0 10 -5 21t-15 25t-13 19q-3 6 -15 28.5t-25 45.5t-26.5 47.5t-25 40.5t-16.5 18t-16 2q-48 0 -101 -22q-46 -21 -80 -94.5t-34 -130.5q0 -16 2.5 -34t5 -30.5t9 -33t10 -29.5t12.5 -33t11 -30q60 -164 216.5 -320.5
+t320.5 -216.5q6 -2 30 -11t33 -12.5t29.5 -10t33 -9t30.5 -5t34 -2.5q57 0 130.5 34t94.5 80q22 53 22 101zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z
+" />
+ <glyph glyph-name="twitter" unicode="&#xf099;" horiz-adv-x="1664"
+d="M1620 1128q-67 -98 -162 -167q1 -14 1 -42q0 -130 -38 -259.5t-115.5 -248.5t-184.5 -210.5t-258 -146t-323 -54.5q-271 0 -496 145q35 -4 78 -4q225 0 401 138q-105 2 -188 64.5t-114 159.5q33 -5 61 -5q43 0 85 11q-112 23 -185.5 111.5t-73.5 205.5v4q68 -38 146 -41
+q-66 44 -105 115t-39 154q0 88 44 163q121 -149 294.5 -238.5t371.5 -99.5q-8 38 -8 74q0 134 94.5 228.5t228.5 94.5q140 0 236 -102q109 21 205 78q-37 -115 -142 -178q93 10 186 50z" />
+ <glyph glyph-name="facebook" unicode="&#xf09a;" horiz-adv-x="1024"
+d="M959 1524v-264h-157q-86 0 -116 -36t-30 -108v-189h293l-39 -296h-254v-759h-306v759h-255v296h255v218q0 186 104 288.5t277 102.5q147 0 228 -12z" />
+ <glyph glyph-name="github" unicode="&#xf09b;"
+d="M768 1408q209 0 385.5 -103t279.5 -279.5t103 -385.5q0 -251 -146.5 -451.5t-378.5 -277.5q-27 -5 -40 7t-13 30q0 3 0.5 76.5t0.5 134.5q0 97 -52 142q57 6 102.5 18t94 39t81 66.5t53 105t20.5 150.5q0 119 -79 206q37 91 -8 204q-28 9 -81 -11t-92 -44l-38 -24
+q-93 26 -192 26t-192 -26q-16 11 -42.5 27t-83.5 38.5t-85 13.5q-45 -113 -8 -204q-79 -87 -79 -206q0 -85 20.5 -150t52.5 -105t80.5 -67t94 -39t102.5 -18q-39 -36 -49 -103q-21 -10 -45 -15t-57 -5t-65.5 21.5t-55.5 62.5q-19 32 -48.5 52t-49.5 24l-20 3q-21 0 -29 -4.5
+t-5 -11.5t9 -14t13 -12l7 -5q22 -10 43.5 -38t31.5 -51l10 -23q13 -38 44 -61.5t67 -30t69.5 -7t55.5 3.5l23 4q0 -38 0.5 -88.5t0.5 -54.5q0 -18 -13 -30t-40 -7q-232 77 -378.5 277.5t-146.5 451.5q0 209 103 385.5t279.5 279.5t385.5 103zM291 305q3 7 -7 12
+q-10 3 -13 -2q-3 -7 7 -12q9 -6 13 2zM322 271q7 5 -2 16q-10 9 -16 3q-7 -5 2 -16q10 -10 16 -3zM352 226q9 7 0 19q-8 13 -17 6q-9 -5 0 -18t17 -7zM394 184q8 8 -4 19q-12 12 -20 3q-9 -8 4 -19q12 -12 20 -3zM451 159q3 11 -13 16q-15 4 -19 -7t13 -15q15 -6 19 6z
+M514 154q0 13 -17 11q-16 0 -16 -11q0 -13 17 -11q16 0 16 11zM572 164q-2 11 -18 9q-16 -3 -14 -15t18 -8t14 14z" />
+ <glyph glyph-name="unlock" unicode="&#xf09c;" horiz-adv-x="1664"
+d="M1664 960v-256q0 -26 -19 -45t-45 -19h-64q-26 0 -45 19t-19 45v256q0 106 -75 181t-181 75t-181 -75t-75 -181v-192h96q40 0 68 -28t28 -68v-576q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68v576q0 40 28 68t68 28h672v192q0 185 131.5 316.5t316.5 131.5
+t316.5 -131.5t131.5 -316.5z" />
+ <glyph glyph-name="credit_card" unicode="&#xf09d;" horiz-adv-x="1920"
+d="M1760 1408q66 0 113 -47t47 -113v-1216q0 -66 -47 -113t-113 -47h-1600q-66 0 -113 47t-47 113v1216q0 66 47 113t113 47h1600zM160 1280q-13 0 -22.5 -9.5t-9.5 -22.5v-224h1664v224q0 13 -9.5 22.5t-22.5 9.5h-1600zM1760 0q13 0 22.5 9.5t9.5 22.5v608h-1664v-608
+q0 -13 9.5 -22.5t22.5 -9.5h1600zM256 128v128h256v-128h-256zM640 128v128h384v-128h-384z" />
+ <glyph glyph-name="rss" unicode="&#xf09e;" horiz-adv-x="1408"
+d="M384 192q0 -80 -56 -136t-136 -56t-136 56t-56 136t56 136t136 56t136 -56t56 -136zM896 69q2 -28 -17 -48q-18 -21 -47 -21h-135q-25 0 -43 16.5t-20 41.5q-22 229 -184.5 391.5t-391.5 184.5q-25 2 -41.5 20t-16.5 43v135q0 29 21 47q17 17 43 17h5q160 -13 306 -80.5
+t259 -181.5q114 -113 181.5 -259t80.5 -306zM1408 67q2 -27 -18 -47q-18 -20 -46 -20h-143q-26 0 -44.5 17.5t-19.5 42.5q-12 215 -101 408.5t-231.5 336t-336 231.5t-408.5 102q-25 1 -42.5 19.5t-17.5 43.5v143q0 28 20 46q18 18 44 18h3q262 -13 501.5 -120t425.5 -294
+q187 -186 294 -425.5t120 -501.5z" />
+ <glyph glyph-name="hdd" unicode="&#xf0a0;"
+d="M1040 320q0 -33 -23.5 -56.5t-56.5 -23.5t-56.5 23.5t-23.5 56.5t23.5 56.5t56.5 23.5t56.5 -23.5t23.5 -56.5zM1296 320q0 -33 -23.5 -56.5t-56.5 -23.5t-56.5 23.5t-23.5 56.5t23.5 56.5t56.5 23.5t56.5 -23.5t23.5 -56.5zM1408 160v320q0 13 -9.5 22.5t-22.5 9.5
+h-1216q-13 0 -22.5 -9.5t-9.5 -22.5v-320q0 -13 9.5 -22.5t22.5 -9.5h1216q13 0 22.5 9.5t9.5 22.5zM178 640h1180l-157 482q-4 13 -16 21.5t-26 8.5h-782q-14 0 -26 -8.5t-16 -21.5zM1536 480v-320q0 -66 -47 -113t-113 -47h-1216q-66 0 -113 47t-47 113v320q0 25 16 75
+l197 606q17 53 63 86t101 33h782q55 0 101 -33t63 -86l197 -606q16 -50 16 -75z" />
+ <glyph glyph-name="bullhorn" unicode="&#xf0a1;" horiz-adv-x="1792"
+d="M1664 896q53 0 90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5v-384q0 -52 -38 -90t-90 -38q-417 347 -812 380q-58 -19 -91 -66t-31 -100.5t40 -92.5q-20 -33 -23 -65.5t6 -58t33.5 -55t48 -50t61.5 -50.5q-29 -58 -111.5 -83t-168.5 -11.5t-132 55.5q-7 23 -29.5 87.5
+t-32 94.5t-23 89t-15 101t3.5 98.5t22 110.5h-122q-66 0 -113 47t-47 113v192q0 66 47 113t113 47h480q435 0 896 384q52 0 90 -38t38 -90v-384zM1536 292v954q-394 -302 -768 -343v-270q377 -42 768 -341z" />
+ <glyph glyph-name="bell" unicode="&#xf0a2;" horiz-adv-x="1792"
+d="M912 -160q0 16 -16 16q-59 0 -101.5 42.5t-42.5 101.5q0 16 -16 16t-16 -16q0 -73 51.5 -124.5t124.5 -51.5q16 0 16 16zM246 128h1300q-266 300 -266 832q0 51 -24 105t-69 103t-121.5 80.5t-169.5 31.5t-169.5 -31.5t-121.5 -80.5t-69 -103t-24 -105q0 -532 -266 -832z
+M1728 128q0 -52 -38 -90t-90 -38h-448q0 -106 -75 -181t-181 -75t-181 75t-75 181h-448q-52 0 -90 38t-38 90q50 42 91 88t85 119.5t74.5 158.5t50 206t19.5 260q0 152 117 282.5t307 158.5q-8 19 -8 39q0 40 28 68t68 28t68 -28t28 -68q0 -20 -8 -39q190 -28 307 -158.5
+t117 -282.5q0 -139 19.5 -260t50 -206t74.5 -158.5t85 -119.5t91 -88z" />
+ <glyph glyph-name="certificate" unicode="&#xf0a3;"
+d="M1376 640l138 -135q30 -28 20 -70q-12 -41 -52 -51l-188 -48l53 -186q12 -41 -19 -70q-29 -31 -70 -19l-186 53l-48 -188q-10 -40 -51 -52q-12 -2 -19 -2q-31 0 -51 22l-135 138l-135 -138q-28 -30 -70 -20q-41 11 -51 52l-48 188l-186 -53q-41 -12 -70 19q-31 29 -19 70
+l53 186l-188 48q-40 10 -52 51q-10 42 20 70l138 135l-138 135q-30 28 -20 70q12 41 52 51l188 48l-53 186q-12 41 19 70q29 31 70 19l186 -53l48 188q10 41 51 51q41 12 70 -19l135 -139l135 139q29 30 70 19q41 -10 51 -51l48 -188l186 53q41 12 70 -19q31 -29 19 -70
+l-53 -186l188 -48q40 -10 52 -51q10 -42 -20 -70z" />
+ <glyph glyph-name="hand_right" unicode="&#xf0a4;" horiz-adv-x="1792"
+d="M256 192q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45t45 -19t45 19t19 45zM1664 768q0 51 -39 89.5t-89 38.5h-576q0 20 15 48.5t33 55t33 68t15 84.5q0 67 -44.5 97.5t-115.5 30.5q-24 0 -90 -139q-24 -44 -37 -65q-40 -64 -112 -145q-71 -81 -101 -106
+q-69 -57 -140 -57h-32v-640h32q72 0 167 -32t193.5 -64t179.5 -32q189 0 189 167q0 26 -5 56q30 16 47.5 52.5t17.5 73.5t-18 69q53 50 53 119q0 25 -10 55.5t-25 47.5h331q52 0 90 38t38 90zM1792 769q0 -105 -75.5 -181t-180.5 -76h-169q-4 -62 -37 -119q3 -21 3 -43
+q0 -101 -60 -178q1 -139 -85 -219.5t-227 -80.5q-133 0 -322 69q-164 59 -223 59h-288q-53 0 -90.5 37.5t-37.5 90.5v640q0 53 37.5 90.5t90.5 37.5h288q10 0 21.5 4.5t23.5 14t22.5 18t24 22.5t20.5 21.5t19 21.5t14 17q65 74 100 129q13 21 33 62t37 72t40.5 63t55 49.5
+t69.5 17.5q125 0 206.5 -67t81.5 -189q0 -68 -22 -128h374q104 0 180 -76t76 -179z" />
+ <glyph glyph-name="hand_left" unicode="&#xf0a5;" horiz-adv-x="1792"
+d="M1376 128h32v640h-32q-35 0 -67.5 12t-62.5 37t-50 46t-49 54q-8 9 -12 14q-72 81 -112 145q-14 22 -38 68q-1 3 -10.5 22.5t-18.5 36t-20 35.5t-21.5 30.5t-18.5 11.5q-71 0 -115.5 -30.5t-44.5 -97.5q0 -43 15 -84.5t33 -68t33 -55t15 -48.5h-576q-50 0 -89 -38.5
+t-39 -89.5q0 -52 38 -90t90 -38h331q-15 -17 -25 -47.5t-10 -55.5q0 -69 53 -119q-18 -32 -18 -69t17.5 -73.5t47.5 -52.5q-4 -24 -4 -56q0 -85 48.5 -126t135.5 -41q84 0 183 32t194 64t167 32zM1664 192q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45t45 -19t45 19t19 45z
+M1792 768v-640q0 -53 -37.5 -90.5t-90.5 -37.5h-288q-59 0 -223 -59q-190 -69 -317 -69q-142 0 -230 77.5t-87 217.5l1 5q-61 76 -61 178q0 22 3 43q-33 57 -37 119h-169q-105 0 -180.5 76t-75.5 181q0 103 76 179t180 76h374q-22 60 -22 128q0 122 81.5 189t206.5 67
+q38 0 69.5 -17.5t55 -49.5t40.5 -63t37 -72t33 -62q35 -55 100 -129q2 -3 14 -17t19 -21.5t20.5 -21.5t24 -22.5t22.5 -18t23.5 -14t21.5 -4.5h288q53 0 90.5 -37.5t37.5 -90.5z" />
+ <glyph glyph-name="hand_up" unicode="&#xf0a6;"
+d="M1280 -64q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45t45 -19t45 19t19 45zM1408 700q0 189 -167 189q-26 0 -56 -5q-16 30 -52.5 47.5t-73.5 17.5t-69 -18q-50 53 -119 53q-25 0 -55.5 -10t-47.5 -25v331q0 52 -38 90t-90 38q-51 0 -89.5 -39t-38.5 -89v-576
+q-20 0 -48.5 15t-55 33t-68 33t-84.5 15q-67 0 -97.5 -44.5t-30.5 -115.5q0 -24 139 -90q44 -24 65 -37q64 -40 145 -112q81 -71 106 -101q57 -69 57 -140v-32h640v32q0 72 32 167t64 193.5t32 179.5zM1536 705q0 -133 -69 -322q-59 -164 -59 -223v-288q0 -53 -37.5 -90.5
+t-90.5 -37.5h-640q-53 0 -90.5 37.5t-37.5 90.5v288q0 10 -4.5 21.5t-14 23.5t-18 22.5t-22.5 24t-21.5 20.5t-21.5 19t-17 14q-74 65 -129 100q-21 13 -62 33t-72 37t-63 40.5t-49.5 55t-17.5 69.5q0 125 67 206.5t189 81.5q68 0 128 -22v374q0 104 76 180t179 76
+q105 0 181 -75.5t76 -180.5v-169q62 -4 119 -37q21 3 43 3q101 0 178 -60q139 1 219.5 -85t80.5 -227z" />
+ <glyph glyph-name="hand_down" unicode="&#xf0a7;"
+d="M1408 576q0 84 -32 183t-64 194t-32 167v32h-640v-32q0 -35 -12 -67.5t-37 -62.5t-46 -50t-54 -49q-9 -8 -14 -12q-81 -72 -145 -112q-22 -14 -68 -38q-3 -1 -22.5 -10.5t-36 -18.5t-35.5 -20t-30.5 -21.5t-11.5 -18.5q0 -71 30.5 -115.5t97.5 -44.5q43 0 84.5 15t68 33
+t55 33t48.5 15v-576q0 -50 38.5 -89t89.5 -39q52 0 90 38t38 90v331q46 -35 103 -35q69 0 119 53q32 -18 69 -18t73.5 17.5t52.5 47.5q24 -4 56 -4q85 0 126 48.5t41 135.5zM1280 1344q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45t45 -19t45 19t19 45zM1536 580
+q0 -142 -77.5 -230t-217.5 -87l-5 1q-76 -61 -178 -61q-22 0 -43 3q-54 -30 -119 -37v-169q0 -105 -76 -180.5t-181 -75.5q-103 0 -179 76t-76 180v374q-54 -22 -128 -22q-121 0 -188.5 81.5t-67.5 206.5q0 38 17.5 69.5t49.5 55t63 40.5t72 37t62 33q55 35 129 100
+q3 2 17 14t21.5 19t21.5 20.5t22.5 24t18 22.5t14 23.5t4.5 21.5v288q0 53 37.5 90.5t90.5 37.5h640q53 0 90.5 -37.5t37.5 -90.5v-288q0 -59 59 -223q69 -190 69 -317z" />
+ <glyph glyph-name="circle_arrow_left" unicode="&#xf0a8;"
+d="M1280 576v128q0 26 -19 45t-45 19h-502l189 189q19 19 19 45t-19 45l-91 91q-18 18 -45 18t-45 -18l-362 -362l-91 -91q-18 -18 -18 -45t18 -45l91 -91l362 -362q18 -18 45 -18t45 18l91 91q18 18 18 45t-18 45l-189 189h502q26 0 45 19t19 45zM1536 640
+q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="circle_arrow_right" unicode="&#xf0a9;"
+d="M1285 640q0 27 -18 45l-91 91l-362 362q-18 18 -45 18t-45 -18l-91 -91q-18 -18 -18 -45t18 -45l189 -189h-502q-26 0 -45 -19t-19 -45v-128q0 -26 19 -45t45 -19h502l-189 -189q-19 -19 -19 -45t19 -45l91 -91q18 -18 45 -18t45 18l362 362l91 91q18 18 18 45zM1536 640
+q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="circle_arrow_up" unicode="&#xf0aa;"
+d="M1284 641q0 27 -18 45l-362 362l-91 91q-18 18 -45 18t-45 -18l-91 -91l-362 -362q-18 -18 -18 -45t18 -45l91 -91q18 -18 45 -18t45 18l189 189v-502q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v502l189 -189q19 -19 45 -19t45 19l91 91q18 18 18 45zM1536 640
+q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="circle_arrow_down" unicode="&#xf0ab;"
+d="M1284 639q0 27 -18 45l-91 91q-18 18 -45 18t-45 -18l-189 -189v502q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-502l-189 189q-19 19 -45 19t-45 -19l-91 -91q-18 -18 -18 -45t18 -45l362 -362l91 -91q18 -18 45 -18t45 18l91 91l362 362q18 18 18 45zM1536 640
+q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="globe" unicode="&#xf0ac;"
+d="M768 1408q209 0 385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103zM1042 887q-2 -1 -9.5 -9.5t-13.5 -9.5q2 0 4.5 5t5 11t3.5 7q6 7 22 15q14 6 52 12q34 8 51 -11
+q-2 2 9.5 13t14.5 12q3 2 15 4.5t15 7.5l2 22q-12 -1 -17.5 7t-6.5 21q0 -2 -6 -8q0 7 -4.5 8t-11.5 -1t-9 -1q-10 3 -15 7.5t-8 16.5t-4 15q-2 5 -9.5 11t-9.5 10q-1 2 -2.5 5.5t-3 6.5t-4 5.5t-5.5 2.5t-7 -5t-7.5 -10t-4.5 -5q-3 2 -6 1.5t-4.5 -1t-4.5 -3t-5 -3.5
+q-3 -2 -8.5 -3t-8.5 -2q15 5 -1 11q-10 4 -16 3q9 4 7.5 12t-8.5 14h5q-1 4 -8.5 8.5t-17.5 8.5t-13 6q-8 5 -34 9.5t-33 0.5q-5 -6 -4.5 -10.5t4 -14t3.5 -12.5q1 -6 -5.5 -13t-6.5 -12q0 -7 14 -15.5t10 -21.5q-3 -8 -16 -16t-16 -12q-5 -8 -1.5 -18.5t10.5 -16.5
+q2 -2 1.5 -4t-3.5 -4.5t-5.5 -4t-6.5 -3.5l-3 -2q-11 -5 -20.5 6t-13.5 26q-7 25 -16 30q-23 8 -29 -1q-5 13 -41 26q-25 9 -58 4q6 1 0 15q-7 15 -19 12q3 6 4 17.5t1 13.5q3 13 12 23q1 1 7 8.5t9.5 13.5t0.5 6q35 -4 50 11q5 5 11.5 17t10.5 17q9 6 14 5.5t14.5 -5.5
+t14.5 -5q14 -1 15.5 11t-7.5 20q12 -1 3 17q-4 7 -8 9q-12 4 -27 -5q-8 -4 2 -8q-1 1 -9.5 -10.5t-16.5 -17.5t-16 5q-1 1 -5.5 13.5t-9.5 13.5q-8 0 -16 -15q3 8 -11 15t-24 8q19 12 -8 27q-7 4 -20.5 5t-19.5 -4q-5 -7 -5.5 -11.5t5 -8t10.5 -5.5t11.5 -4t8.5 -3
+q14 -10 8 -14q-2 -1 -8.5 -3.5t-11.5 -4.5t-6 -4q-3 -4 0 -14t-2 -14q-5 5 -9 17.5t-7 16.5q7 -9 -25 -6l-10 1q-4 0 -16 -2t-20.5 -1t-13.5 8q-4 8 0 20q1 4 4 2q-4 3 -11 9.5t-10 8.5q-46 -15 -94 -41q6 -1 12 1q5 2 13 6.5t10 5.5q34 14 42 7l5 5q14 -16 20 -25
+q-7 4 -30 1q-20 -6 -22 -12q7 -12 5 -18q-4 3 -11.5 10t-14.5 11t-15 5q-16 0 -22 -1q-146 -80 -235 -222q7 -7 12 -8q4 -1 5 -9t2.5 -11t11.5 3q9 -8 3 -19q1 1 44 -27q19 -17 21 -21q3 -11 -10 -18q-1 2 -9 9t-9 4q-3 -5 0.5 -18.5t10.5 -12.5q-7 0 -9.5 -16t-2.5 -35.5
+t-1 -23.5l2 -1q-3 -12 5.5 -34.5t21.5 -19.5q-13 -3 20 -43q6 -8 8 -9q3 -2 12 -7.5t15 -10t10 -10.5q4 -5 10 -22.5t14 -23.5q-2 -6 9.5 -20t10.5 -23q-1 0 -2.5 -1t-2.5 -1q3 -7 15.5 -14t15.5 -13q1 -3 2 -10t3 -11t8 -2q2 20 -24 62q-15 25 -17 29q-3 5 -5.5 15.5
+t-4.5 14.5q2 0 6 -1.5t8.5 -3.5t7.5 -4t2 -3q-3 -7 2 -17.5t12 -18.5t17 -19t12 -13q6 -6 14 -19.5t0 -13.5q9 0 20 -10.5t17 -19.5q5 -8 8 -26t5 -24q2 -7 8.5 -13.5t12.5 -9.5l16 -8t13 -7q5 -2 18.5 -10.5t21.5 -11.5q10 -4 16 -4t14.5 2.5t13.5 3.5q15 2 29 -15t21 -21
+q36 -19 55 -11q-2 -1 0.5 -7.5t8 -15.5t9 -14.5t5.5 -8.5q5 -6 18 -15t18 -15q6 4 7 9q-3 -8 7 -20t18 -10q14 3 14 32q-31 -15 -49 18q0 1 -2.5 5.5t-4 8.5t-2.5 8.5t0 7.5t5 3q9 0 10 3.5t-2 12.5t-4 13q-1 8 -11 20t-12 15q-5 -9 -16 -8t-16 9q0 -1 -1.5 -5.5t-1.5 -6.5
+q-13 0 -15 1q1 3 2.5 17.5t3.5 22.5q1 4 5.5 12t7.5 14.5t4 12.5t-4.5 9.5t-17.5 2.5q-19 -1 -26 -20q-1 -3 -3 -10.5t-5 -11.5t-9 -7q-7 -3 -24 -2t-24 5q-13 8 -22.5 29t-9.5 37q0 10 2.5 26.5t3 25t-5.5 24.5q3 2 9 9.5t10 10.5q2 1 4.5 1.5t4.5 0t4 1.5t3 6q-1 1 -4 3
+q-3 3 -4 3q7 -3 28.5 1.5t27.5 -1.5q15 -11 22 2q0 1 -2.5 9.5t-0.5 13.5q5 -27 29 -9q3 -3 15.5 -5t17.5 -5q3 -2 7 -5.5t5.5 -4.5t5 0.5t8.5 6.5q10 -14 12 -24q11 -40 19 -44q7 -3 11 -2t4.5 9.5t0 14t-1.5 12.5l-1 8v18l-1 8q-15 3 -18.5 12t1.5 18.5t15 18.5q1 1 8 3.5
+t15.5 6.5t12.5 8q21 19 15 35q7 0 11 9q-1 0 -5 3t-7.5 5t-4.5 2q9 5 2 16q5 3 7.5 11t7.5 10q9 -12 21 -2q8 8 1 16q5 7 20.5 10.5t18.5 9.5q7 -2 8 2t1 12t3 12q4 5 15 9t13 5l17 11q3 4 0 4q18 -2 31 11q10 11 -6 20q3 6 -3 9.5t-15 5.5q3 1 11.5 0.5t10.5 1.5
+q15 10 -7 16q-17 5 -43 -12zM879 10q206 36 351 189q-3 3 -12.5 4.5t-12.5 3.5q-18 7 -24 8q1 7 -2.5 13t-8 9t-12.5 8t-11 7q-2 2 -7 6t-7 5.5t-7.5 4.5t-8.5 2t-10 -1l-3 -1q-3 -1 -5.5 -2.5t-5.5 -3t-4 -3t0 -2.5q-21 17 -36 22q-5 1 -11 5.5t-10.5 7t-10 1.5t-11.5 -7
+q-5 -5 -6 -15t-2 -13q-7 5 0 17.5t2 18.5q-3 6 -10.5 4.5t-12 -4.5t-11.5 -8.5t-9 -6.5t-8.5 -5.5t-8.5 -7.5q-3 -4 -6 -12t-5 -11q-2 4 -11.5 6.5t-9.5 5.5q2 -10 4 -35t5 -38q7 -31 -12 -48q-27 -25 -29 -40q-4 -22 12 -26q0 -7 -8 -20.5t-7 -21.5q0 -6 2 -16z" />
+ <glyph glyph-name="wrench" unicode="&#xf0ad;" horiz-adv-x="1664"
+d="M384 64q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45t45 -19t45 19t19 45zM1028 484l-682 -682q-37 -37 -90 -37q-52 0 -91 37l-106 108q-38 36 -38 90q0 53 38 91l681 681q39 -98 114.5 -173.5t173.5 -114.5zM1662 919q0 -39 -23 -106q-47 -134 -164.5 -217.5
+t-258.5 -83.5q-185 0 -316.5 131.5t-131.5 316.5t131.5 316.5t316.5 131.5q58 0 121.5 -16.5t107.5 -46.5q16 -11 16 -28t-16 -28l-293 -169v-224l193 -107q5 3 79 48.5t135.5 81t70.5 35.5q15 0 23.5 -10t8.5 -25z" />
+ <glyph glyph-name="tasks" unicode="&#xf0ae;" horiz-adv-x="1792"
+d="M1024 128h640v128h-640v-128zM640 640h1024v128h-1024v-128zM1280 1152h384v128h-384v-128zM1792 320v-256q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45v256q0 26 19 45t45 19h1664q26 0 45 -19t19 -45zM1792 832v-256q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19
+t-19 45v256q0 26 19 45t45 19h1664q26 0 45 -19t19 -45zM1792 1344v-256q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45v256q0 26 19 45t45 19h1664q26 0 45 -19t19 -45z" />
+ <glyph glyph-name="filter" unicode="&#xf0b0;" horiz-adv-x="1408"
+d="M1403 1241q17 -41 -14 -70l-493 -493v-742q0 -42 -39 -59q-13 -5 -25 -5q-27 0 -45 19l-256 256q-19 19 -19 45v486l-493 493q-31 29 -14 70q17 39 59 39h1280q42 0 59 -39z" />
+ <glyph glyph-name="briefcase" unicode="&#xf0b1;" horiz-adv-x="1792"
+d="M640 1280h512v128h-512v-128zM1792 640v-480q0 -66 -47 -113t-113 -47h-1472q-66 0 -113 47t-47 113v480h672v-160q0 -26 19 -45t45 -19h320q26 0 45 19t19 45v160h672zM1024 640v-128h-256v128h256zM1792 1120v-384h-1792v384q0 66 47 113t113 47h352v160q0 40 28 68
+t68 28h576q40 0 68 -28t28 -68v-160h352q66 0 113 -47t47 -113z" />
+ <glyph glyph-name="fullscreen" unicode="&#xf0b2;"
+d="M1283 995l-355 -355l355 -355l144 144q29 31 70 14q39 -17 39 -59v-448q0 -26 -19 -45t-45 -19h-448q-42 0 -59 40q-17 39 14 69l144 144l-355 355l-355 -355l144 -144q31 -30 14 -69q-17 -40 -59 -40h-448q-26 0 -45 19t-19 45v448q0 42 40 59q39 17 69 -14l144 -144
+l355 355l-355 355l-144 -144q-19 -19 -45 -19q-12 0 -24 5q-40 17 -40 59v448q0 26 19 45t45 19h448q42 0 59 -40q17 -39 -14 -69l-144 -144l355 -355l355 355l-144 144q-31 30 -14 69q17 40 59 40h448q26 0 45 -19t19 -45v-448q0 -42 -39 -59q-13 -5 -25 -5q-26 0 -45 19z
+" />
+ <glyph glyph-name="group" unicode="&#xf0c0;" horiz-adv-x="1920"
+d="M593 640q-162 -5 -265 -128h-134q-82 0 -138 40.5t-56 118.5q0 353 124 353q6 0 43.5 -21t97.5 -42.5t119 -21.5q67 0 133 23q-5 -37 -5 -66q0 -139 81 -256zM1664 3q0 -120 -73 -189.5t-194 -69.5h-874q-121 0 -194 69.5t-73 189.5q0 53 3.5 103.5t14 109t26.5 108.5
+t43 97.5t62 81t85.5 53.5t111.5 20q10 0 43 -21.5t73 -48t107 -48t135 -21.5t135 21.5t107 48t73 48t43 21.5q61 0 111.5 -20t85.5 -53.5t62 -81t43 -97.5t26.5 -108.5t14 -109t3.5 -103.5zM640 1280q0 -106 -75 -181t-181 -75t-181 75t-75 181t75 181t181 75t181 -75
+t75 -181zM1344 896q0 -159 -112.5 -271.5t-271.5 -112.5t-271.5 112.5t-112.5 271.5t112.5 271.5t271.5 112.5t271.5 -112.5t112.5 -271.5zM1920 671q0 -78 -56 -118.5t-138 -40.5h-134q-103 123 -265 128q81 117 81 256q0 29 -5 66q66 -23 133 -23q59 0 119 21.5t97.5 42.5
+t43.5 21q124 0 124 -353zM1792 1280q0 -106 -75 -181t-181 -75t-181 75t-75 181t75 181t181 75t181 -75t75 -181z" />
+ <glyph glyph-name="link" unicode="&#xf0c1;" horiz-adv-x="1664"
+d="M1456 320q0 40 -28 68l-208 208q-28 28 -68 28q-42 0 -72 -32q3 -3 19 -18.5t21.5 -21.5t15 -19t13 -25.5t3.5 -27.5q0 -40 -28 -68t-68 -28q-15 0 -27.5 3.5t-25.5 13t-19 15t-21.5 21.5t-18.5 19q-33 -31 -33 -73q0 -40 28 -68l206 -207q27 -27 68 -27q40 0 68 26
+l147 146q28 28 28 67zM753 1025q0 40 -28 68l-206 207q-28 28 -68 28q-39 0 -68 -27l-147 -146q-28 -28 -28 -67q0 -40 28 -68l208 -208q27 -27 68 -27q42 0 72 31q-3 3 -19 18.5t-21.5 21.5t-15 19t-13 25.5t-3.5 27.5q0 40 28 68t68 28q15 0 27.5 -3.5t25.5 -13t19 -15
+t21.5 -21.5t18.5 -19q33 31 33 73zM1648 320q0 -120 -85 -203l-147 -146q-83 -83 -203 -83q-121 0 -204 85l-206 207q-83 83 -83 203q0 123 88 209l-88 88q-86 -88 -208 -88q-120 0 -204 84l-208 208q-84 84 -84 204t85 203l147 146q83 83 203 83q121 0 204 -85l206 -207
+q83 -83 83 -203q0 -123 -88 -209l88 -88q86 88 208 88q120 0 204 -84l208 -208q84 -84 84 -204z" />
+ <glyph glyph-name="cloud" unicode="&#xf0c2;" horiz-adv-x="1920"
+d="M1920 384q0 -159 -112.5 -271.5t-271.5 -112.5h-1088q-185 0 -316.5 131.5t-131.5 316.5q0 132 71 241.5t187 163.5q-2 28 -2 43q0 212 150 362t362 150q158 0 286.5 -88t187.5 -230q70 62 166 62q106 0 181 -75t75 -181q0 -75 -41 -138q129 -30 213 -134.5t84 -239.5z
+" />
+ <glyph glyph-name="beaker" unicode="&#xf0c3;" horiz-adv-x="1664"
+d="M1527 88q56 -89 21.5 -152.5t-140.5 -63.5h-1152q-106 0 -140.5 63.5t21.5 152.5l503 793v399h-64q-26 0 -45 19t-19 45t19 45t45 19h512q26 0 45 -19t19 -45t-19 -45t-45 -19h-64v-399zM748 813l-272 -429h712l-272 429l-20 31v37v399h-128v-399v-37z" />
+ <glyph glyph-name="cut" unicode="&#xf0c4;" horiz-adv-x="1792"
+d="M960 640q26 0 45 -19t19 -45t-19 -45t-45 -19t-45 19t-19 45t19 45t45 19zM1260 576l507 -398q28 -20 25 -56q-5 -35 -35 -51l-128 -64q-13 -7 -29 -7q-17 0 -31 8l-690 387l-110 -66q-8 -4 -12 -5q14 -49 10 -97q-7 -77 -56 -147.5t-132 -123.5q-132 -84 -277 -84
+q-136 0 -222 78q-90 84 -79 207q7 76 56 147t131 124q132 84 278 84q83 0 151 -31q9 13 22 22l122 73l-122 73q-13 9 -22 22q-68 -31 -151 -31q-146 0 -278 84q-82 53 -131 124t-56 147q-5 59 15.5 113t63.5 93q85 79 222 79q145 0 277 -84q83 -52 132 -123t56 -148
+q4 -48 -10 -97q4 -1 12 -5l110 -66l690 387q14 8 31 8q16 0 29 -7l128 -64q30 -16 35 -51q3 -36 -25 -56zM579 836q46 42 21 108t-106 117q-92 59 -192 59q-74 0 -113 -36q-46 -42 -21 -108t106 -117q92 -59 192 -59q74 0 113 36zM494 91q81 51 106 117t-21 108
+q-39 36 -113 36q-100 0 -192 -59q-81 -51 -106 -117t21 -108q39 -36 113 -36q100 0 192 59zM672 704l96 -58v11q0 36 33 56l14 8l-79 47l-26 -26q-3 -3 -10 -11t-12 -12q-2 -2 -4 -3.5t-3 -2.5zM896 480l96 -32l736 576l-128 64l-768 -431v-113l-160 -96l9 -8q2 -2 7 -6
+q4 -4 11 -12t11 -12l26 -26zM1600 64l128 64l-520 408l-177 -138q-2 -3 -13 -7z" />
+ <glyph glyph-name="copy" unicode="&#xf0c5;" horiz-adv-x="1792"
+d="M1696 1152q40 0 68 -28t28 -68v-1216q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68v288h-544q-40 0 -68 28t-28 68v672q0 40 20 88t48 76l408 408q28 28 76 48t88 20h416q40 0 68 -28t28 -68v-328q68 40 128 40h416zM1152 939l-299 -299h299v299zM512 1323l-299 -299
+h299v299zM708 676l316 316v416h-384v-416q0 -40 -28 -68t-68 -28h-416v-640h512v256q0 40 20 88t48 76zM1664 -128v1152h-384v-416q0 -40 -28 -68t-68 -28h-416v-640h896z" />
+ <glyph glyph-name="paper_clip" unicode="&#xf0c6;" horiz-adv-x="1408"
+d="M1404 151q0 -117 -79 -196t-196 -79q-135 0 -235 100l-777 776q-113 115 -113 271q0 159 110 270t269 111q158 0 273 -113l605 -606q10 -10 10 -22q0 -16 -30.5 -46.5t-46.5 -30.5q-13 0 -23 10l-606 607q-79 77 -181 77q-106 0 -179 -75t-73 -181q0 -105 76 -181
+l776 -777q63 -63 145 -63q64 0 106 42t42 106q0 82 -63 145l-581 581q-26 24 -60 24q-29 0 -48 -19t-19 -48q0 -32 25 -59l410 -410q10 -10 10 -22q0 -16 -31 -47t-47 -31q-12 0 -22 10l-410 410q-63 61 -63 149q0 82 57 139t139 57q88 0 149 -63l581 -581q100 -98 100 -235
+z" />
+ <glyph glyph-name="save" unicode="&#xf0c7;"
+d="M384 0h768v384h-768v-384zM1280 0h128v896q0 14 -10 38.5t-20 34.5l-281 281q-10 10 -34 20t-39 10v-416q0 -40 -28 -68t-68 -28h-576q-40 0 -68 28t-28 68v416h-128v-1280h128v416q0 40 28 68t68 28h832q40 0 68 -28t28 -68v-416zM896 928v320q0 13 -9.5 22.5t-22.5 9.5
+h-192q-13 0 -22.5 -9.5t-9.5 -22.5v-320q0 -13 9.5 -22.5t22.5 -9.5h192q13 0 22.5 9.5t9.5 22.5zM1536 896v-928q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68v1344q0 40 28 68t68 28h928q40 0 88 -20t76 -48l280 -280q28 -28 48 -76t20 -88z" />
+ <glyph glyph-name="sign_blank" unicode="&#xf0c8;"
+d="M1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="reorder" unicode="&#xf0c9;"
+d="M1536 192v-128q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1408q26 0 45 -19t19 -45zM1536 704v-128q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1408q26 0 45 -19t19 -45zM1536 1216v-128q0 -26 -19 -45
+t-45 -19h-1408q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h1408q26 0 45 -19t19 -45z" />
+ <glyph glyph-name="ul" unicode="&#xf0ca;" horiz-adv-x="1792"
+d="M384 128q0 -80 -56 -136t-136 -56t-136 56t-56 136t56 136t136 56t136 -56t56 -136zM384 640q0 -80 -56 -136t-136 -56t-136 56t-56 136t56 136t136 56t136 -56t56 -136zM1792 224v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1216q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5
+t22.5 9.5h1216q13 0 22.5 -9.5t9.5 -22.5zM384 1152q0 -80 -56 -136t-136 -56t-136 56t-56 136t56 136t136 56t136 -56t56 -136zM1792 736v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1216q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h1216q13 0 22.5 -9.5t9.5 -22.5z
+M1792 1248v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1216q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h1216q13 0 22.5 -9.5t9.5 -22.5z" />
+ <glyph glyph-name="ol" unicode="&#xf0cb;" horiz-adv-x="1792"
+d="M381 -84q0 -80 -54.5 -126t-135.5 -46q-106 0 -172 66l57 88q49 -45 106 -45q29 0 50.5 14.5t21.5 42.5q0 64 -105 56l-26 56q8 10 32.5 43.5t42.5 54t37 38.5v1q-16 0 -48.5 -1t-48.5 -1v-53h-106v152h333v-88l-95 -115q51 -12 81 -49t30 -88zM383 543v-159h-362
+q-6 36 -6 54q0 51 23.5 93t56.5 68t66 47.5t56.5 43.5t23.5 45q0 25 -14.5 38.5t-39.5 13.5q-46 0 -81 -58l-85 59q24 51 71.5 79.5t105.5 28.5q73 0 123 -41.5t50 -112.5q0 -50 -34 -91.5t-75 -64.5t-75.5 -50.5t-35.5 -52.5h127v60h105zM1792 224v-192q0 -13 -9.5 -22.5
+t-22.5 -9.5h-1216q-13 0 -22.5 9.5t-9.5 22.5v192q0 14 9 23t23 9h1216q13 0 22.5 -9.5t9.5 -22.5zM384 1123v-99h-335v99h107q0 41 0.5 121.5t0.5 121.5v12h-2q-8 -17 -50 -54l-71 76l136 127h106v-404h108zM1792 736v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1216
+q-13 0 -22.5 9.5t-9.5 22.5v192q0 14 9 23t23 9h1216q13 0 22.5 -9.5t9.5 -22.5zM1792 1248v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1216q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h1216q13 0 22.5 -9.5t9.5 -22.5z" />
+ <glyph glyph-name="strikethrough" unicode="&#xf0cc;" horiz-adv-x="1792"
+d="M1760 640q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-1728q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h1728zM483 704q-28 35 -51 80q-48 98 -48 188q0 181 134 309q133 127 393 127q50 0 167 -19q66 -12 177 -48q10 -38 21 -118q14 -123 14 -183q0 -18 -5 -45l-12 -3l-84 6
+l-14 2q-50 149 -103 205q-88 91 -210 91q-114 0 -182 -59q-67 -58 -67 -146q0 -73 66 -140t279 -129q69 -20 173 -66q58 -28 95 -52h-743zM990 448h411q7 -39 7 -92q0 -111 -41 -212q-23 -56 -71 -104q-37 -35 -109 -81q-80 -48 -153 -66q-80 -21 -203 -21q-114 0 -195 23
+l-140 40q-57 16 -72 28q-8 8 -8 22v13q0 108 -2 156q-1 30 0 68l2 37v44l102 2q15 -34 30 -71t22.5 -56t12.5 -27q35 -57 80 -94q43 -36 105 -57q59 -22 132 -22q64 0 139 27q77 26 122 86q47 61 47 129q0 84 -81 157q-34 29 -137 71z" />
+ <glyph glyph-name="underline" unicode="&#xf0cd;"
+d="M48 1313q-37 2 -45 4l-3 88q13 1 40 1q60 0 112 -4q132 -7 166 -7q86 0 168 3q116 4 146 5q56 0 86 2l-1 -14l2 -64v-9q-60 -9 -124 -9q-60 0 -79 -25q-13 -14 -13 -132q0 -13 0.5 -32.5t0.5 -25.5l1 -229l14 -280q6 -124 51 -202q35 -59 96 -92q88 -47 177 -47
+q104 0 191 28q56 18 99 51q48 36 65 64q36 56 53 114q21 73 21 229q0 79 -3.5 128t-11 122.5t-13.5 159.5l-4 59q-5 67 -24 88q-34 35 -77 34l-100 -2l-14 3l2 86h84l205 -10q76 -3 196 10l18 -2q6 -38 6 -51q0 -7 -4 -31q-45 -12 -84 -13q-73 -11 -79 -17q-15 -15 -15 -41
+q0 -7 1.5 -27t1.5 -31q8 -19 22 -396q6 -195 -15 -304q-15 -76 -41 -122q-38 -65 -112 -123q-75 -57 -182 -89q-109 -33 -255 -33q-167 0 -284 46q-119 47 -179 122q-61 76 -83 195q-16 80 -16 237v333q0 188 -17 213q-25 36 -147 39zM1536 -96v64q0 14 -9 23t-23 9h-1472
+q-14 0 -23 -9t-9 -23v-64q0 -14 9 -23t23 -9h1472q14 0 23 9t9 23z" />
+ <glyph glyph-name="table" unicode="&#xf0ce;" horiz-adv-x="1664"
+d="M512 160v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192q0 -14 9 -23t23 -9h320q14 0 23 9t9 23zM512 544v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192q0 -14 9 -23t23 -9h320q14 0 23 9t9 23zM1024 160v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23
+v-192q0 -14 9 -23t23 -9h320q14 0 23 9t9 23zM512 928v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192q0 -14 9 -23t23 -9h320q14 0 23 9t9 23zM1024 544v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192q0 -14 9 -23t23 -9h320q14 0 23 9t9 23zM1536 160v192
+q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192q0 -14 9 -23t23 -9h320q14 0 23 9t9 23zM1024 928v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192q0 -14 9 -23t23 -9h320q14 0 23 9t9 23zM1536 544v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192
+q0 -14 9 -23t23 -9h320q14 0 23 9t9 23zM1536 928v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192q0 -14 9 -23t23 -9h320q14 0 23 9t9 23zM1664 1248v-1088q0 -66 -47 -113t-113 -47h-1344q-66 0 -113 47t-47 113v1088q0 66 47 113t113 47h1344q66 0 113 -47t47 -113
+z" />
+ <glyph glyph-name="magic" unicode="&#xf0d0;" horiz-adv-x="1664"
+d="M1190 955l293 293l-107 107l-293 -293zM1637 1248q0 -27 -18 -45l-1286 -1286q-18 -18 -45 -18t-45 18l-198 198q-18 18 -18 45t18 45l1286 1286q18 18 45 18t45 -18l198 -198q18 -18 18 -45zM286 1438l98 -30l-98 -30l-30 -98l-30 98l-98 30l98 30l30 98zM636 1276
+l196 -60l-196 -60l-60 -196l-60 196l-196 60l196 60l60 196zM1566 798l98 -30l-98 -30l-30 -98l-30 98l-98 30l98 30l30 98zM926 1438l98 -30l-98 -30l-30 -98l-30 98l-98 30l98 30l30 98z" />
+ <glyph glyph-name="truck" unicode="&#xf0d1;" horiz-adv-x="1792"
+d="M640 128q0 52 -38 90t-90 38t-90 -38t-38 -90t38 -90t90 -38t90 38t38 90zM256 640h384v256h-158q-13 0 -22 -9l-195 -195q-9 -9 -9 -22v-30zM1536 128q0 52 -38 90t-90 38t-90 -38t-38 -90t38 -90t90 -38t90 38t38 90zM1792 1216v-1024q0 -15 -4 -26.5t-13.5 -18.5
+t-16.5 -11.5t-23.5 -6t-22.5 -2t-25.5 0t-22.5 0.5q0 -106 -75 -181t-181 -75t-181 75t-75 181h-384q0 -106 -75 -181t-181 -75t-181 75t-75 181h-64q-3 0 -22.5 -0.5t-25.5 0t-22.5 2t-23.5 6t-16.5 11.5t-13.5 18.5t-4 26.5q0 26 19 45t45 19v320q0 8 -0.5 35t0 38
+t2.5 34.5t6.5 37t14 30.5t22.5 30l198 198q19 19 50.5 32t58.5 13h160v192q0 26 19 45t45 19h1024q26 0 45 -19t19 -45z" />
+ <glyph glyph-name="pinterest" unicode="&#xf0d2;"
+d="M1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103q-111 0 -218 32q59 93 78 164q9 34 54 211q20 -39 73 -67.5t114 -28.5q121 0 216 68.5t147 188.5t52 270q0 114 -59.5 214t-172.5 163t-255 63q-105 0 -196 -29t-154.5 -77t-109 -110.5t-67 -129.5t-21.5 -134
+q0 -104 40 -183t117 -111q30 -12 38 20q2 7 8 31t8 30q6 23 -11 43q-51 61 -51 151q0 151 104.5 259.5t273.5 108.5q151 0 235.5 -82t84.5 -213q0 -170 -68.5 -289t-175.5 -119q-61 0 -98 43.5t-23 104.5q8 35 26.5 93.5t30 103t11.5 75.5q0 50 -27 83t-77 33
+q-62 0 -105 -57t-43 -142q0 -73 25 -122l-99 -418q-17 -70 -13 -177q-206 91 -333 281t-127 423q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="pinterest_sign" unicode="&#xf0d3;"
+d="M1248 1408q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-725q85 122 108 210q9 34 53 209q21 -39 73.5 -67t112.5 -28q181 0 295.5 147.5t114.5 373.5q0 84 -35 162.5t-96.5 139t-152.5 97t-197 36.5q-104 0 -194.5 -28.5t-153 -76.5
+t-107.5 -109.5t-66.5 -128t-21.5 -132.5q0 -102 39.5 -180t116.5 -110q13 -5 23.5 0t14.5 19q10 44 15 61q6 23 -11 42q-50 62 -50 150q0 150 103.5 256.5t270.5 106.5q149 0 232.5 -81t83.5 -210q0 -168 -67.5 -286t-173.5 -118q-60 0 -97 43.5t-23 103.5q8 34 26.5 92.5
+t29.5 102t11 74.5q0 49 -26.5 81.5t-75.5 32.5q-61 0 -103.5 -56.5t-42.5 -139.5q0 -72 24 -121l-98 -414q-24 -100 -7 -254h-183q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960z" />
+ <glyph glyph-name="google_plus_sign" unicode="&#xf0d4;"
+d="M917 631q0 26 -6 64h-362v-132h217q-3 -24 -16.5 -50t-37.5 -53t-66.5 -44.5t-96.5 -17.5q-99 0 -169 71t-70 171t70 171t169 71q92 0 153 -59l104 101q-108 100 -257 100q-160 0 -272 -112.5t-112 -271.5t112 -271.5t272 -112.5q165 0 266.5 105t101.5 270zM1262 585
+h109v110h-109v110h-110v-110h-110v-110h110v-110h110v110zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="google_plus" unicode="&#xf0d5;" horiz-adv-x="2304"
+d="M1437 623q0 -208 -87 -370.5t-248 -254t-369 -91.5q-149 0 -285 58t-234 156t-156 234t-58 285t58 285t156 234t234 156t285 58q286 0 491 -192l-199 -191q-117 113 -292 113q-123 0 -227.5 -62t-165.5 -168.5t-61 -232.5t61 -232.5t165.5 -168.5t227.5 -62
+q83 0 152.5 23t114.5 57.5t78.5 78.5t49 83t21.5 74h-416v252h692q12 -63 12 -122zM2304 745v-210h-209v-209h-210v209h-209v210h209v209h210v-209h209z" />
+ <glyph glyph-name="money" unicode="&#xf0d6;" horiz-adv-x="1920"
+d="M768 384h384v96h-128v448h-114l-148 -137l77 -80q42 37 55 57h2v-288h-128v-96zM1280 640q0 -70 -21 -142t-59.5 -134t-101.5 -101t-138 -39t-138 39t-101.5 101t-59.5 134t-21 142t21 142t59.5 134t101.5 101t138 39t138 -39t101.5 -101t59.5 -134t21 -142zM1792 384
+v512q-106 0 -181 75t-75 181h-1152q0 -106 -75 -181t-181 -75v-512q106 0 181 -75t75 -181h1152q0 106 75 181t181 75zM1920 1216v-1152q0 -26 -19 -45t-45 -19h-1792q-26 0 -45 19t-19 45v1152q0 26 19 45t45 19h1792q26 0 45 -19t19 -45z" />
+ <glyph glyph-name="caret_down" unicode="&#xf0d7;" horiz-adv-x="1024"
+d="M1024 832q0 -26 -19 -45l-448 -448q-19 -19 -45 -19t-45 19l-448 448q-19 19 -19 45t19 45t45 19h896q26 0 45 -19t19 -45z" />
+ <glyph glyph-name="caret_up" unicode="&#xf0d8;" horiz-adv-x="1024"
+d="M1024 320q0 -26 -19 -45t-45 -19h-896q-26 0 -45 19t-19 45t19 45l448 448q19 19 45 19t45 -19l448 -448q19 -19 19 -45z" />
+ <glyph glyph-name="caret_left" unicode="&#xf0d9;" horiz-adv-x="640"
+d="M640 1088v-896q0 -26 -19 -45t-45 -19t-45 19l-448 448q-19 19 -19 45t19 45l448 448q19 19 45 19t45 -19t19 -45z" />
+ <glyph glyph-name="caret_right" unicode="&#xf0da;" horiz-adv-x="640"
+d="M576 640q0 -26 -19 -45l-448 -448q-19 -19 -45 -19t-45 19t-19 45v896q0 26 19 45t45 19t45 -19l448 -448q19 -19 19 -45z" />
+ <glyph glyph-name="columns" unicode="&#xf0db;" horiz-adv-x="1664"
+d="M160 0h608v1152h-640v-1120q0 -13 9.5 -22.5t22.5 -9.5zM1536 32v1120h-640v-1152h608q13 0 22.5 9.5t9.5 22.5zM1664 1248v-1216q0 -66 -47 -113t-113 -47h-1344q-66 0 -113 47t-47 113v1216q0 66 47 113t113 47h1344q66 0 113 -47t47 -113z" />
+ <glyph glyph-name="sort" unicode="&#xf0dc;" horiz-adv-x="1024"
+d="M1024 448q0 -26 -19 -45l-448 -448q-19 -19 -45 -19t-45 19l-448 448q-19 19 -19 45t19 45t45 19h896q26 0 45 -19t19 -45zM1024 832q0 -26 -19 -45t-45 -19h-896q-26 0 -45 19t-19 45t19 45l448 448q19 19 45 19t45 -19l448 -448q19 -19 19 -45z" />
+ <glyph glyph-name="sort_down" unicode="&#xf0dd;" horiz-adv-x="1024"
+d="M1024 448q0 -26 -19 -45l-448 -448q-19 -19 -45 -19t-45 19l-448 448q-19 19 -19 45t19 45t45 19h896q26 0 45 -19t19 -45z" />
+ <glyph glyph-name="sort_up" unicode="&#xf0de;" horiz-adv-x="1024"
+d="M1024 832q0 -26 -19 -45t-45 -19h-896q-26 0 -45 19t-19 45t19 45l448 448q19 19 45 19t45 -19l448 -448q19 -19 19 -45z" />
+ <glyph glyph-name="envelope_alt" unicode="&#xf0e0;" horiz-adv-x="1792"
+d="M1792 826v-794q0 -66 -47 -113t-113 -47h-1472q-66 0 -113 47t-47 113v794q44 -49 101 -87q362 -246 497 -345q57 -42 92.5 -65.5t94.5 -48t110 -24.5h1h1q51 0 110 24.5t94.5 48t92.5 65.5q170 123 498 345q57 39 100 87zM1792 1120q0 -79 -49 -151t-122 -123
+q-376 -261 -468 -325q-10 -7 -42.5 -30.5t-54 -38t-52 -32.5t-57.5 -27t-50 -9h-1h-1q-23 0 -50 9t-57.5 27t-52 32.5t-54 38t-42.5 30.5q-91 64 -262 182.5t-205 142.5q-62 42 -117 115.5t-55 136.5q0 78 41.5 130t118.5 52h1472q65 0 112.5 -47t47.5 -113z" />
+ <glyph glyph-name="linkedin" unicode="&#xf0e1;"
+d="M349 911v-991h-330v991h330zM370 1217q1 -73 -50.5 -122t-135.5 -49h-2q-82 0 -132 49t-50 122q0 74 51.5 122.5t134.5 48.5t133 -48.5t51 -122.5zM1536 488v-568h-329v530q0 105 -40.5 164.5t-126.5 59.5q-63 0 -105.5 -34.5t-63.5 -85.5q-11 -30 -11 -81v-553h-329
+q2 399 2 647t-1 296l-1 48h329v-144h-2q20 32 41 56t56.5 52t87 43.5t114.5 15.5q171 0 275 -113.5t104 -332.5z" />
+ <glyph glyph-name="undo" unicode="&#xf0e2;"
+d="M1536 640q0 -156 -61 -298t-164 -245t-245 -164t-298 -61q-172 0 -327 72.5t-264 204.5q-7 10 -6.5 22.5t8.5 20.5l137 138q10 9 25 9q16 -2 23 -12q73 -95 179 -147t225 -52q104 0 198.5 40.5t163.5 109.5t109.5 163.5t40.5 198.5t-40.5 198.5t-109.5 163.5
+t-163.5 109.5t-198.5 40.5q-98 0 -188 -35.5t-160 -101.5l137 -138q31 -30 14 -69q-17 -40 -59 -40h-448q-26 0 -45 19t-19 45v448q0 42 40 59q39 17 69 -14l130 -129q107 101 244.5 156.5t284.5 55.5q156 0 298 -61t245 -164t164 -245t61 -298z" />
+ <glyph glyph-name="legal" unicode="&#xf0e3;" horiz-adv-x="1792"
+d="M1771 0q0 -53 -37 -90l-107 -108q-39 -37 -91 -37q-53 0 -90 37l-363 364q-38 36 -38 90q0 53 43 96l-256 256l-126 -126q-14 -14 -34 -14t-34 14q2 -2 12.5 -12t12.5 -13t10 -11.5t10 -13.5t6 -13.5t5.5 -16.5t1.5 -18q0 -38 -28 -68q-3 -3 -16.5 -18t-19 -20.5
+t-18.5 -16.5t-22 -15.5t-22 -9t-26 -4.5q-40 0 -68 28l-408 408q-28 28 -28 68q0 13 4.5 26t9 22t15.5 22t16.5 18.5t20.5 19t18 16.5q30 28 68 28q10 0 18 -1.5t16.5 -5.5t13.5 -6t13.5 -10t11.5 -10t13 -12.5t12 -12.5q-14 14 -14 34t14 34l348 348q14 14 34 14t34 -14
+q-2 2 -12.5 12t-12.5 13t-10 11.5t-10 13.5t-6 13.5t-5.5 16.5t-1.5 18q0 38 28 68q3 3 16.5 18t19 20.5t18.5 16.5t22 15.5t22 9t26 4.5q40 0 68 -28l408 -408q28 -28 28 -68q0 -13 -4.5 -26t-9 -22t-15.5 -22t-16.5 -18.5t-20.5 -19t-18 -16.5q-30 -28 -68 -28
+q-10 0 -18 1.5t-16.5 5.5t-13.5 6t-13.5 10t-11.5 10t-13 12.5t-12 12.5q14 -14 14 -34t-14 -34l-126 -126l256 -256q43 43 96 43q52 0 91 -37l363 -363q37 -39 37 -91z" />
+ <glyph glyph-name="dashboard" unicode="&#xf0e4;" horiz-adv-x="1792"
+d="M384 384q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM576 832q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM1004 351l101 382q6 26 -7.5 48.5t-38.5 29.5
+t-48 -6.5t-30 -39.5l-101 -382q-60 -5 -107 -43.5t-63 -98.5q-20 -77 20 -146t117 -89t146 20t89 117q16 60 -6 117t-72 91zM1664 384q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM1024 1024q0 53 -37.5 90.5
+t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM1472 832q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM1792 384q0 -261 -141 -483q-19 -29 -54 -29h-1402q-35 0 -54 29
+q-141 221 -141 483q0 182 71 348t191 286t286 191t348 71t348 -71t286 -191t191 -286t71 -348z" />
+ <glyph glyph-name="comment_alt" unicode="&#xf0e5;" horiz-adv-x="1792"
+d="M896 1152q-204 0 -381.5 -69.5t-282 -187.5t-104.5 -255q0 -112 71.5 -213.5t201.5 -175.5l87 -50l-27 -96q-24 -91 -70 -172q152 63 275 171l43 38l57 -6q69 -8 130 -8q204 0 381.5 69.5t282 187.5t104.5 255t-104.5 255t-282 187.5t-381.5 69.5zM1792 640
+q0 -174 -120 -321.5t-326 -233t-450 -85.5q-70 0 -145 8q-198 -175 -460 -242q-49 -14 -114 -22h-5q-15 0 -27 10.5t-16 27.5v1q-3 4 -0.5 12t2 10t4.5 9.5l6 9t7 8.5t8 9q7 8 31 34.5t34.5 38t31 39.5t32.5 51t27 59t26 76q-157 89 -247.5 220t-90.5 281q0 174 120 321.5
+t326 233t450 85.5t450 -85.5t326 -233t120 -321.5z" />
+ <glyph glyph-name="comments_alt" unicode="&#xf0e6;" horiz-adv-x="1792"
+d="M704 1152q-153 0 -286 -52t-211.5 -141t-78.5 -191q0 -82 53 -158t149 -132l97 -56l-35 -84q34 20 62 39l44 31l53 -10q78 -14 153 -14q153 0 286 52t211.5 141t78.5 191t-78.5 191t-211.5 141t-286 52zM704 1280q191 0 353.5 -68.5t256.5 -186.5t94 -257t-94 -257
+t-256.5 -186.5t-353.5 -68.5q-86 0 -176 16q-124 -88 -278 -128q-36 -9 -86 -16h-3q-11 0 -20.5 8t-11.5 21q-1 3 -1 6.5t0.5 6.5t2 6l2.5 5t3.5 5.5t4 5t4.5 5t4 4.5q5 6 23 25t26 29.5t22.5 29t25 38.5t20.5 44q-124 72 -195 177t-71 224q0 139 94 257t256.5 186.5
+t353.5 68.5zM1526 111q10 -24 20.5 -44t25 -38.5t22.5 -29t26 -29.5t23 -25q1 -1 4 -4.5t4.5 -5t4 -5t3.5 -5.5l2.5 -5t2 -6t0.5 -6.5t-1 -6.5q-3 -14 -13 -22t-22 -7q-50 7 -86 16q-154 40 -278 128q-90 -16 -176 -16q-271 0 -472 132q58 -4 88 -4q161 0 309 45t264 129
+q125 92 192 212t67 254q0 77 -23 152q129 -71 204 -178t75 -230q0 -120 -71 -224.5t-195 -176.5z" />
+ <glyph glyph-name="bolt" unicode="&#xf0e7;" horiz-adv-x="896"
+d="M885 970q18 -20 7 -44l-540 -1157q-13 -25 -42 -25q-4 0 -14 2q-17 5 -25.5 19t-4.5 30l197 808l-406 -101q-4 -1 -12 -1q-18 0 -31 11q-18 15 -13 39l201 825q4 14 16 23t28 9h328q19 0 32 -12.5t13 -29.5q0 -8 -5 -18l-171 -463l396 98q8 2 12 2q19 0 34 -15z" />
+ <glyph glyph-name="sitemap" unicode="&#xf0e8;" horiz-adv-x="1792"
+d="M1792 288v-320q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68v320q0 40 28 68t68 28h96v192h-512v-192h96q40 0 68 -28t28 -68v-320q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68v320q0 40 28 68t68 28h96v192h-512v-192h96q40 0 68 -28t28 -68v-320
+q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68v320q0 40 28 68t68 28h96v192q0 52 38 90t90 38h512v192h-96q-40 0 -68 28t-28 68v320q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-320q0 -40 -28 -68t-68 -28h-96v-192h512q52 0 90 -38t38 -90v-192h96q40 0 68 -28t28 -68
+z" />
+ <glyph glyph-name="umbrella" unicode="&#xf0e9;" horiz-adv-x="1664"
+d="M896 708v-580q0 -104 -76 -180t-180 -76t-180 76t-76 180q0 26 19 45t45 19t45 -19t19 -45q0 -50 39 -89t89 -39t89 39t39 89v580q33 11 64 11t64 -11zM1664 681q0 -13 -9.5 -22.5t-22.5 -9.5q-11 0 -23 10q-49 46 -93 69t-102 23q-68 0 -128 -37t-103 -97
+q-7 -10 -17.5 -28t-14.5 -24q-11 -17 -28 -17q-18 0 -29 17q-4 6 -14.5 24t-17.5 28q-43 60 -102.5 97t-127.5 37t-127.5 -37t-102.5 -97q-7 -10 -17.5 -28t-14.5 -24q-11 -17 -29 -17q-17 0 -28 17q-4 6 -14.5 24t-17.5 28q-43 60 -103 97t-128 37q-58 0 -102 -23t-93 -69
+q-12 -10 -23 -10q-13 0 -22.5 9.5t-9.5 22.5q0 5 1 7q45 183 172.5 319.5t298 204.5t360.5 68q140 0 274.5 -40t246.5 -113.5t194.5 -187t115.5 -251.5q1 -2 1 -7zM896 1408v-98q-42 2 -64 2t-64 -2v98q0 26 19 45t45 19t45 -19t19 -45z" />
+ <glyph glyph-name="paste" unicode="&#xf0ea;" horiz-adv-x="1792"
+d="M768 -128h896v640h-416q-40 0 -68 28t-28 68v416h-384v-1152zM1024 1312v64q0 13 -9.5 22.5t-22.5 9.5h-704q-13 0 -22.5 -9.5t-9.5 -22.5v-64q0 -13 9.5 -22.5t22.5 -9.5h704q13 0 22.5 9.5t9.5 22.5zM1280 640h299l-299 299v-299zM1792 512v-672q0 -40 -28 -68t-68 -28
+h-960q-40 0 -68 28t-28 68v160h-544q-40 0 -68 28t-28 68v1344q0 40 28 68t68 28h1088q40 0 68 -28t28 -68v-328q21 -13 36 -28l408 -408q28 -28 48 -76t20 -88z" />
+ <glyph glyph-name="light_bulb" unicode="&#xf0eb;" horiz-adv-x="1024"
+d="M736 960q0 -13 -9.5 -22.5t-22.5 -9.5t-22.5 9.5t-9.5 22.5q0 46 -54 71t-106 25q-13 0 -22.5 9.5t-9.5 22.5t9.5 22.5t22.5 9.5q50 0 99.5 -16t87 -54t37.5 -90zM896 960q0 72 -34.5 134t-90 101.5t-123 62t-136.5 22.5t-136.5 -22.5t-123 -62t-90 -101.5t-34.5 -134
+q0 -101 68 -180q10 -11 30.5 -33t30.5 -33q128 -153 141 -298h228q13 145 141 298q10 11 30.5 33t30.5 33q68 79 68 180zM1024 960q0 -155 -103 -268q-45 -49 -74.5 -87t-59.5 -95.5t-34 -107.5q47 -28 47 -82q0 -37 -25 -64q25 -27 25 -64q0 -52 -45 -81q13 -23 13 -47
+q0 -46 -31.5 -71t-77.5 -25q-20 -44 -60 -70t-87 -26t-87 26t-60 70q-46 0 -77.5 25t-31.5 71q0 24 13 47q-45 29 -45 81q0 37 25 64q-25 27 -25 64q0 54 47 82q-4 50 -34 107.5t-59.5 95.5t-74.5 87q-103 113 -103 268q0 99 44.5 184.5t117 142t164 89t186.5 32.5
+t186.5 -32.5t164 -89t117 -142t44.5 -184.5z" />
+ <glyph glyph-name="exchange" unicode="&#xf0ec;" horiz-adv-x="1792"
+d="M1792 352v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1376v-192q0 -13 -9.5 -22.5t-22.5 -9.5q-12 0 -24 10l-319 320q-9 9 -9 22q0 14 9 23l320 320q9 9 23 9q13 0 22.5 -9.5t9.5 -22.5v-192h1376q13 0 22.5 -9.5t9.5 -22.5zM1792 896q0 -14 -9 -23l-320 -320q-9 -9 -23 -9
+q-13 0 -22.5 9.5t-9.5 22.5v192h-1376q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h1376v192q0 14 9 23t23 9q12 0 24 -10l319 -319q9 -9 9 -23z" />
+ <glyph glyph-name="cloud_download" unicode="&#xf0ed;" horiz-adv-x="1920"
+d="M1280 608q0 14 -9 23t-23 9h-224v352q0 13 -9.5 22.5t-22.5 9.5h-192q-13 0 -22.5 -9.5t-9.5 -22.5v-352h-224q-13 0 -22.5 -9.5t-9.5 -22.5q0 -14 9 -23l352 -352q9 -9 23 -9t23 9l351 351q10 12 10 24zM1920 384q0 -159 -112.5 -271.5t-271.5 -112.5h-1088
+q-185 0 -316.5 131.5t-131.5 316.5q0 130 70 240t188 165q-2 30 -2 43q0 212 150 362t362 150q156 0 285.5 -87t188.5 -231q71 62 166 62q106 0 181 -75t75 -181q0 -76 -41 -138q130 -31 213.5 -135.5t83.5 -238.5z" />
+ <glyph glyph-name="cloud_upload" unicode="&#xf0ee;" horiz-adv-x="1920"
+d="M1280 672q0 14 -9 23l-352 352q-9 9 -23 9t-23 -9l-351 -351q-10 -12 -10 -24q0 -14 9 -23t23 -9h224v-352q0 -13 9.5 -22.5t22.5 -9.5h192q13 0 22.5 9.5t9.5 22.5v352h224q13 0 22.5 9.5t9.5 22.5zM1920 384q0 -159 -112.5 -271.5t-271.5 -112.5h-1088
+q-185 0 -316.5 131.5t-131.5 316.5q0 130 70 240t188 165q-2 30 -2 43q0 212 150 362t362 150q156 0 285.5 -87t188.5 -231q71 62 166 62q106 0 181 -75t75 -181q0 -76 -41 -138q130 -31 213.5 -135.5t83.5 -238.5z" />
+ <glyph glyph-name="user_md" unicode="&#xf0f0;" horiz-adv-x="1408"
+d="M384 192q0 -26 -19 -45t-45 -19t-45 19t-19 45t19 45t45 19t45 -19t19 -45zM1408 131q0 -121 -73 -190t-194 -69h-874q-121 0 -194 69t-73 190q0 68 5.5 131t24 138t47.5 132.5t81 103t120 60.5q-22 -52 -22 -120v-203q-58 -20 -93 -70t-35 -111q0 -80 56 -136t136 -56
+t136 56t56 136q0 61 -35.5 111t-92.5 70v203q0 62 25 93q132 -104 295 -104t295 104q25 -31 25 -93v-64q-106 0 -181 -75t-75 -181v-89q-32 -29 -32 -71q0 -40 28 -68t68 -28t68 28t28 68q0 42 -32 71v89q0 52 38 90t90 38t90 -38t38 -90v-89q-32 -29 -32 -71q0 -40 28 -68
+t68 -28t68 28t28 68q0 42 -32 71v89q0 68 -34.5 127.5t-93.5 93.5q0 10 0.5 42.5t0 48t-2.5 41.5t-7 47t-13 40q68 -15 120 -60.5t81 -103t47.5 -132.5t24 -138t5.5 -131zM1088 1024q0 -159 -112.5 -271.5t-271.5 -112.5t-271.5 112.5t-112.5 271.5t112.5 271.5t271.5 112.5
+t271.5 -112.5t112.5 -271.5z" />
+ <glyph glyph-name="stethoscope" unicode="&#xf0f1;" horiz-adv-x="1408"
+d="M1280 832q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45t45 -19t45 19t19 45zM1408 832q0 -62 -35.5 -111t-92.5 -70v-395q0 -159 -131.5 -271.5t-316.5 -112.5t-316.5 112.5t-131.5 271.5v132q-164 20 -274 128t-110 252v512q0 26 19 45t45 19q6 0 16 -2q17 30 47 48
+t65 18q53 0 90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5q-33 0 -64 18v-402q0 -106 94 -181t226 -75t226 75t94 181v402q-31 -18 -64 -18q-53 0 -90.5 37.5t-37.5 90.5t37.5 90.5t90.5 37.5q35 0 65 -18t47 -48q10 2 16 2q26 0 45 -19t19 -45v-512q0 -144 -110 -252
+t-274 -128v-132q0 -106 94 -181t226 -75t226 75t94 181v395q-57 21 -92.5 70t-35.5 111q0 80 56 136t136 56t136 -56t56 -136z" />
+ <glyph glyph-name="suitcase" unicode="&#xf0f2;" horiz-adv-x="1792"
+d="M640 1152h512v128h-512v-128zM288 1152v-1280h-64q-92 0 -158 66t-66 158v832q0 92 66 158t158 66h64zM1408 1152v-1280h-1024v1280h128v160q0 40 28 68t68 28h576q40 0 68 -28t28 -68v-160h128zM1792 928v-832q0 -92 -66 -158t-158 -66h-64v1280h64q92 0 158 -66
+t66 -158z" />
+ <glyph glyph-name="bell_alt" unicode="&#xf0f3;" horiz-adv-x="1792"
+d="M912 -160q0 16 -16 16q-59 0 -101.5 42.5t-42.5 101.5q0 16 -16 16t-16 -16q0 -73 51.5 -124.5t124.5 -51.5q16 0 16 16zM1728 128q0 -52 -38 -90t-90 -38h-448q0 -106 -75 -181t-181 -75t-181 75t-75 181h-448q-52 0 -90 38t-38 90q50 42 91 88t85 119.5t74.5 158.5
+t50 206t19.5 260q0 152 117 282.5t307 158.5q-8 19 -8 39q0 40 28 68t68 28t68 -28t28 -68q0 -20 -8 -39q190 -28 307 -158.5t117 -282.5q0 -139 19.5 -260t50 -206t74.5 -158.5t85 -119.5t91 -88z" />
+ <glyph glyph-name="coffee" unicode="&#xf0f4;" horiz-adv-x="1920"
+d="M1664 896q0 80 -56 136t-136 56h-64v-384h64q80 0 136 56t56 136zM0 128h1792q0 -106 -75 -181t-181 -75h-1280q-106 0 -181 75t-75 181zM1856 896q0 -159 -112.5 -271.5t-271.5 -112.5h-64v-32q0 -92 -66 -158t-158 -66h-704q-92 0 -158 66t-66 158v736q0 26 19 45
+t45 19h1152q159 0 271.5 -112.5t112.5 -271.5z" />
+ <glyph glyph-name="food" unicode="&#xf0f5;" horiz-adv-x="1408"
+d="M640 1472v-640q0 -61 -35.5 -111t-92.5 -70v-779q0 -52 -38 -90t-90 -38h-128q-52 0 -90 38t-38 90v779q-57 20 -92.5 70t-35.5 111v640q0 26 19 45t45 19t45 -19t19 -45v-416q0 -26 19 -45t45 -19t45 19t19 45v416q0 26 19 45t45 19t45 -19t19 -45v-416q0 -26 19 -45
+t45 -19t45 19t19 45v416q0 26 19 45t45 19t45 -19t19 -45zM1408 1472v-1600q0 -52 -38 -90t-90 -38h-128q-52 0 -90 38t-38 90v512h-224q-13 0 -22.5 9.5t-9.5 22.5v800q0 132 94 226t226 94h256q26 0 45 -19t19 -45z" />
+ <glyph glyph-name="file_text_alt" unicode="&#xf0f6;"
+d="M1468 1156q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48zM1024 1400v-376h376q-10 29 -22 41l-313 313q-12 12 -41 22zM1408 -128v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536h1280z
+M384 736q0 14 9 23t23 9h704q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-704q-14 0 -23 9t-9 23v64zM1120 512q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-704q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h704zM1120 256q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-704
+q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h704z" />
+ <glyph glyph-name="building" unicode="&#xf0f7;" horiz-adv-x="1408"
+d="M384 224v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM384 480v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5z
+M640 480v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM384 736v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5z
+M1152 224v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM896 480v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5z
+M640 736v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM384 992v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5z
+M1152 480v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM896 736v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5z
+M640 992v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM384 1248v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5z
+M1152 736v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM896 992v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5z
+M640 1248v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM1152 992v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5z
+M896 1248v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM1152 1248v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5z
+M896 -128h384v1536h-1152v-1536h384v224q0 13 9.5 22.5t22.5 9.5h320q13 0 22.5 -9.5t9.5 -22.5v-224zM1408 1472v-1664q0 -26 -19 -45t-45 -19h-1280q-26 0 -45 19t-19 45v1664q0 26 19 45t45 19h1280q26 0 45 -19t19 -45z" />
+ <glyph glyph-name="hospital" unicode="&#xf0f8;" horiz-adv-x="1408"
+d="M384 224v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM384 480v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5z
+M640 480v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM384 736v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5z
+M1152 224v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM896 480v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5z
+M640 736v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM1152 480v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5z
+M896 736v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5zM1152 736v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5z
+M896 -128h384v1152h-256v-32q0 -40 -28 -68t-68 -28h-448q-40 0 -68 28t-28 68v32h-256v-1152h384v224q0 13 9.5 22.5t22.5 9.5h320q13 0 22.5 -9.5t9.5 -22.5v-224zM896 1056v320q0 13 -9.5 22.5t-22.5 9.5h-64q-13 0 -22.5 -9.5t-9.5 -22.5v-96h-128v96q0 13 -9.5 22.5
+t-22.5 9.5h-64q-13 0 -22.5 -9.5t-9.5 -22.5v-320q0 -13 9.5 -22.5t22.5 -9.5h64q13 0 22.5 9.5t9.5 22.5v96h128v-96q0 -13 9.5 -22.5t22.5 -9.5h64q13 0 22.5 9.5t9.5 22.5zM1408 1088v-1280q0 -26 -19 -45t-45 -19h-1280q-26 0 -45 19t-19 45v1280q0 26 19 45t45 19h320
+v288q0 40 28 68t68 28h448q40 0 68 -28t28 -68v-288h320q26 0 45 -19t19 -45z" />
+ <glyph glyph-name="ambulance" unicode="&#xf0f9;" horiz-adv-x="1920"
+d="M640 128q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM256 640h384v256h-158q-14 -2 -22 -9l-195 -195q-7 -12 -9 -22v-30zM1536 128q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5
+t90.5 37.5t37.5 90.5zM1664 800v192q0 14 -9 23t-23 9h-224v224q0 14 -9 23t-23 9h-192q-14 0 -23 -9t-9 -23v-224h-224q-14 0 -23 -9t-9 -23v-192q0 -14 9 -23t23 -9h224v-224q0 -14 9 -23t23 -9h192q14 0 23 9t9 23v224h224q14 0 23 9t9 23zM1920 1344v-1152
+q0 -26 -19 -45t-45 -19h-192q0 -106 -75 -181t-181 -75t-181 75t-75 181h-384q0 -106 -75 -181t-181 -75t-181 75t-75 181h-128q-26 0 -45 19t-19 45t19 45t45 19v416q0 26 13 58t32 51l198 198q19 19 51 32t58 13h160v320q0 26 19 45t45 19h1152q26 0 45 -19t19 -45z" />
+ <glyph glyph-name="medkit" unicode="&#xf0fa;" horiz-adv-x="1792"
+d="M1280 416v192q0 14 -9 23t-23 9h-224v224q0 14 -9 23t-23 9h-192q-14 0 -23 -9t-9 -23v-224h-224q-14 0 -23 -9t-9 -23v-192q0 -14 9 -23t23 -9h224v-224q0 -14 9 -23t23 -9h192q14 0 23 9t9 23v224h224q14 0 23 9t9 23zM640 1152h512v128h-512v-128zM256 1152v-1280h-32
+q-92 0 -158 66t-66 158v832q0 92 66 158t158 66h32zM1440 1152v-1280h-1088v1280h160v160q0 40 28 68t68 28h576q40 0 68 -28t28 -68v-160h160zM1792 928v-832q0 -92 -66 -158t-158 -66h-32v1280h32q92 0 158 -66t66 -158z" />
+ <glyph glyph-name="fighter_jet" unicode="&#xf0fb;" horiz-adv-x="1920"
+d="M1920 576q-1 -32 -288 -96l-352 -32l-224 -64h-64l-293 -352h69q26 0 45 -4.5t19 -11.5t-19 -11.5t-45 -4.5h-96h-160h-64v32h64v416h-160l-192 -224h-96l-32 32v192h32v32h128v8l-192 24v128l192 24v8h-128v32h-32v192l32 32h96l192 -224h160v416h-64v32h64h160h96
+q26 0 45 -4.5t19 -11.5t-19 -11.5t-45 -4.5h-69l293 -352h64l224 -64l352 -32q128 -28 200 -52t80 -34z" />
+ <glyph glyph-name="beer" unicode="&#xf0fc;" horiz-adv-x="1664"
+d="M640 640v384h-256v-256q0 -53 37.5 -90.5t90.5 -37.5h128zM1664 192v-192h-1152v192l128 192h-128q-159 0 -271.5 112.5t-112.5 271.5v320l-64 64l32 128h480l32 128h960l32 -192l-64 -32v-800z" />
+ <glyph glyph-name="h_sign" unicode="&#xf0fd;"
+d="M1280 192v896q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-320h-512v320q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-896q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v320h512v-320q0 -26 19 -45t45 -19h128q26 0 45 19t19 45zM1536 1120v-960
+q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="f0fe" unicode="&#xf0fe;"
+d="M1280 576v128q0 26 -19 45t-45 19h-320v320q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-320h-320q-26 0 -45 -19t-19 -45v-128q0 -26 19 -45t45 -19h320v-320q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v320h320q26 0 45 19t19 45zM1536 1120v-960
+q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="double_angle_left" unicode="&#xf100;" horiz-adv-x="1024"
+d="M627 160q0 -13 -10 -23l-50 -50q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23t10 23l466 466q10 10 23 10t23 -10l50 -50q10 -10 10 -23t-10 -23l-393 -393l393 -393q10 -10 10 -23zM1011 160q0 -13 -10 -23l-50 -50q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23
+t10 23l466 466q10 10 23 10t23 -10l50 -50q10 -10 10 -23t-10 -23l-393 -393l393 -393q10 -10 10 -23z" />
+ <glyph glyph-name="double_angle_right" unicode="&#xf101;" horiz-adv-x="1024"
+d="M595 576q0 -13 -10 -23l-466 -466q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23t10 23l393 393l-393 393q-10 10 -10 23t10 23l50 50q10 10 23 10t23 -10l466 -466q10 -10 10 -23zM979 576q0 -13 -10 -23l-466 -466q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23t10 23
+l393 393l-393 393q-10 10 -10 23t10 23l50 50q10 10 23 10t23 -10l466 -466q10 -10 10 -23z" />
+ <glyph glyph-name="double_angle_up" unicode="&#xf102;" horiz-adv-x="1152"
+d="M1075 224q0 -13 -10 -23l-50 -50q-10 -10 -23 -10t-23 10l-393 393l-393 -393q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23t10 23l466 466q10 10 23 10t23 -10l466 -466q10 -10 10 -23zM1075 608q0 -13 -10 -23l-50 -50q-10 -10 -23 -10t-23 10l-393 393l-393 -393
+q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23t10 23l466 466q10 10 23 10t23 -10l466 -466q10 -10 10 -23z" />
+ <glyph glyph-name="double_angle_down" unicode="&#xf103;" horiz-adv-x="1152"
+d="M1075 672q0 -13 -10 -23l-466 -466q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23t10 23l50 50q10 10 23 10t23 -10l393 -393l393 393q10 10 23 10t23 -10l50 -50q10 -10 10 -23zM1075 1056q0 -13 -10 -23l-466 -466q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23
+t10 23l50 50q10 10 23 10t23 -10l393 -393l393 393q10 10 23 10t23 -10l50 -50q10 -10 10 -23z" />
+ <glyph glyph-name="angle_left" unicode="&#xf104;" horiz-adv-x="640"
+d="M627 992q0 -13 -10 -23l-393 -393l393 -393q10 -10 10 -23t-10 -23l-50 -50q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23t10 23l466 466q10 10 23 10t23 -10l50 -50q10 -10 10 -23z" />
+ <glyph glyph-name="angle_right" unicode="&#xf105;" horiz-adv-x="640"
+d="M595 576q0 -13 -10 -23l-466 -466q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23t10 23l393 393l-393 393q-10 10 -10 23t10 23l50 50q10 10 23 10t23 -10l466 -466q10 -10 10 -23z" />
+ <glyph glyph-name="angle_up" unicode="&#xf106;" horiz-adv-x="1152"
+d="M1075 352q0 -13 -10 -23l-50 -50q-10 -10 -23 -10t-23 10l-393 393l-393 -393q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23t10 23l466 466q10 10 23 10t23 -10l466 -466q10 -10 10 -23z" />
+ <glyph glyph-name="angle_down" unicode="&#xf107;" horiz-adv-x="1152"
+d="M1075 800q0 -13 -10 -23l-466 -466q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23t10 23l50 50q10 10 23 10t23 -10l393 -393l393 393q10 10 23 10t23 -10l50 -50q10 -10 10 -23z" />
+ <glyph glyph-name="desktop" unicode="&#xf108;" horiz-adv-x="1920"
+d="M1792 544v832q0 13 -9.5 22.5t-22.5 9.5h-1600q-13 0 -22.5 -9.5t-9.5 -22.5v-832q0 -13 9.5 -22.5t22.5 -9.5h1600q13 0 22.5 9.5t9.5 22.5zM1920 1376v-1088q0 -66 -47 -113t-113 -47h-544q0 -37 16 -77.5t32 -71t16 -43.5q0 -26 -19 -45t-45 -19h-512q-26 0 -45 19
+t-19 45q0 14 16 44t32 70t16 78h-544q-66 0 -113 47t-47 113v1088q0 66 47 113t113 47h1600q66 0 113 -47t47 -113z" />
+ <glyph glyph-name="laptop" unicode="&#xf109;" horiz-adv-x="1920"
+d="M416 256q-66 0 -113 47t-47 113v704q0 66 47 113t113 47h1088q66 0 113 -47t47 -113v-704q0 -66 -47 -113t-113 -47h-1088zM384 1120v-704q0 -13 9.5 -22.5t22.5 -9.5h1088q13 0 22.5 9.5t9.5 22.5v704q0 13 -9.5 22.5t-22.5 9.5h-1088q-13 0 -22.5 -9.5t-9.5 -22.5z
+M1760 192h160v-96q0 -40 -47 -68t-113 -28h-1600q-66 0 -113 28t-47 68v96h160h1600zM1040 96q16 0 16 16t-16 16h-160q-16 0 -16 -16t16 -16h160z" />
+ <glyph glyph-name="tablet" unicode="&#xf10a;" horiz-adv-x="1152"
+d="M640 128q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45t45 -19t45 19t19 45zM1024 288v960q0 13 -9.5 22.5t-22.5 9.5h-832q-13 0 -22.5 -9.5t-9.5 -22.5v-960q0 -13 9.5 -22.5t22.5 -9.5h832q13 0 22.5 9.5t9.5 22.5zM1152 1248v-1088q0 -66 -47 -113t-113 -47h-832
+q-66 0 -113 47t-47 113v1088q0 66 47 113t113 47h832q66 0 113 -47t47 -113z" />
+ <glyph glyph-name="mobile_phone" unicode="&#xf10b;" horiz-adv-x="768"
+d="M464 128q0 33 -23.5 56.5t-56.5 23.5t-56.5 -23.5t-23.5 -56.5t23.5 -56.5t56.5 -23.5t56.5 23.5t23.5 56.5zM672 288v704q0 13 -9.5 22.5t-22.5 9.5h-512q-13 0 -22.5 -9.5t-9.5 -22.5v-704q0 -13 9.5 -22.5t22.5 -9.5h512q13 0 22.5 9.5t9.5 22.5zM480 1136
+q0 16 -16 16h-160q-16 0 -16 -16t16 -16h160q16 0 16 16zM768 1152v-1024q0 -52 -38 -90t-90 -38h-512q-52 0 -90 38t-38 90v1024q0 52 38 90t90 38h512q52 0 90 -38t38 -90z" />
+ <glyph glyph-name="circle_blank" unicode="&#xf10c;"
+d="M768 1184q-148 0 -273 -73t-198 -198t-73 -273t73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103
+t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="quote_left" unicode="&#xf10d;" horiz-adv-x="1664"
+d="M768 576v-384q0 -80 -56 -136t-136 -56h-384q-80 0 -136 56t-56 136v704q0 104 40.5 198.5t109.5 163.5t163.5 109.5t198.5 40.5h64q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-64q-106 0 -181 -75t-75 -181v-32q0 -40 28 -68t68 -28h224q80 0 136 -56t56 -136z
+M1664 576v-384q0 -80 -56 -136t-136 -56h-384q-80 0 -136 56t-56 136v704q0 104 40.5 198.5t109.5 163.5t163.5 109.5t198.5 40.5h64q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-64q-106 0 -181 -75t-75 -181v-32q0 -40 28 -68t68 -28h224q80 0 136 -56t56 -136z" />
+ <glyph glyph-name="quote_right" unicode="&#xf10e;" horiz-adv-x="1664"
+d="M768 1216v-704q0 -104 -40.5 -198.5t-109.5 -163.5t-163.5 -109.5t-198.5 -40.5h-64q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h64q106 0 181 75t75 181v32q0 40 -28 68t-68 28h-224q-80 0 -136 56t-56 136v384q0 80 56 136t136 56h384q80 0 136 -56t56 -136zM1664 1216
+v-704q0 -104 -40.5 -198.5t-109.5 -163.5t-163.5 -109.5t-198.5 -40.5h-64q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h64q106 0 181 75t75 181v32q0 40 -28 68t-68 28h-224q-80 0 -136 56t-56 136v384q0 80 56 136t136 56h384q80 0 136 -56t56 -136z" />
+ <glyph glyph-name="spinner" unicode="&#xf110;" horiz-adv-x="1792"
+d="M526 142q0 -53 -37.5 -90.5t-90.5 -37.5q-52 0 -90 38t-38 90q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM1024 -64q0 -53 -37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5t37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM320 640q0 -53 -37.5 -90.5t-90.5 -37.5
+t-90.5 37.5t-37.5 90.5t37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM1522 142q0 -52 -38 -90t-90 -38q-53 0 -90.5 37.5t-37.5 90.5t37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM558 1138q0 -66 -47 -113t-113 -47t-113 47t-47 113t47 113t113 47t113 -47t47 -113z
+M1728 640q0 -53 -37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5t37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM1088 1344q0 -80 -56 -136t-136 -56t-136 56t-56 136t56 136t136 56t136 -56t56 -136zM1618 1138q0 -93 -66 -158.5t-158 -65.5q-93 0 -158.5 65.5t-65.5 158.5
+q0 92 65.5 158t158.5 66q92 0 158 -66t66 -158z" />
+ <glyph glyph-name="circle" unicode="&#xf111;"
+d="M1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="reply" unicode="&#xf112;" horiz-adv-x="1792"
+d="M1792 416q0 -166 -127 -451q-3 -7 -10.5 -24t-13.5 -30t-13 -22q-12 -17 -28 -17q-15 0 -23.5 10t-8.5 25q0 9 2.5 26.5t2.5 23.5q5 68 5 123q0 101 -17.5 181t-48.5 138.5t-80 101t-105.5 69.5t-133 42.5t-154 21.5t-175.5 6h-224v-256q0 -26 -19 -45t-45 -19t-45 19
+l-512 512q-19 19 -19 45t19 45l512 512q19 19 45 19t45 -19t19 -45v-256h224q713 0 875 -403q53 -134 53 -333z" />
+ <glyph glyph-name="github_alt" unicode="&#xf113;" horiz-adv-x="1664"
+d="M640 320q0 -40 -12.5 -82t-43 -76t-72.5 -34t-72.5 34t-43 76t-12.5 82t12.5 82t43 76t72.5 34t72.5 -34t43 -76t12.5 -82zM1280 320q0 -40 -12.5 -82t-43 -76t-72.5 -34t-72.5 34t-43 76t-12.5 82t12.5 82t43 76t72.5 34t72.5 -34t43 -76t12.5 -82zM1440 320
+q0 120 -69 204t-187 84q-41 0 -195 -21q-71 -11 -157 -11t-157 11q-152 21 -195 21q-118 0 -187 -84t-69 -204q0 -88 32 -153.5t81 -103t122 -60t140 -29.5t149 -7h168q82 0 149 7t140 29.5t122 60t81 103t32 153.5zM1664 496q0 -207 -61 -331q-38 -77 -105.5 -133t-141 -86
+t-170 -47.5t-171.5 -22t-167 -4.5q-78 0 -142 3t-147.5 12.5t-152.5 30t-137 51.5t-121 81t-86 115q-62 123 -62 331q0 237 136 396q-27 82 -27 170q0 116 51 218q108 0 190 -39.5t189 -123.5q147 35 309 35q148 0 280 -32q105 82 187 121t189 39q51 -102 51 -218
+q0 -87 -27 -168q136 -160 136 -398z" />
+ <glyph glyph-name="folder_close_alt" unicode="&#xf114;" horiz-adv-x="1664"
+d="M1536 224v704q0 40 -28 68t-68 28h-704q-40 0 -68 28t-28 68v64q0 40 -28 68t-68 28h-320q-40 0 -68 -28t-28 -68v-960q0 -40 28 -68t68 -28h1216q40 0 68 28t28 68zM1664 928v-704q0 -92 -66 -158t-158 -66h-1216q-92 0 -158 66t-66 158v960q0 92 66 158t158 66h320
+q92 0 158 -66t66 -158v-32h672q92 0 158 -66t66 -158z" />
+ <glyph glyph-name="folder_open_alt" unicode="&#xf115;" horiz-adv-x="1920"
+d="M1781 605q0 35 -53 35h-1088q-40 0 -85.5 -21.5t-71.5 -52.5l-294 -363q-18 -24 -18 -40q0 -35 53 -35h1088q40 0 86 22t71 53l294 363q18 22 18 39zM640 768h768v160q0 40 -28 68t-68 28h-576q-40 0 -68 28t-28 68v64q0 40 -28 68t-68 28h-320q-40 0 -68 -28t-28 -68
+v-853l256 315q44 53 116 87.5t140 34.5zM1909 605q0 -62 -46 -120l-295 -363q-43 -53 -116 -87.5t-140 -34.5h-1088q-92 0 -158 66t-66 158v960q0 92 66 158t158 66h320q92 0 158 -66t66 -158v-32h544q92 0 158 -66t66 -158v-160h192q54 0 99 -24.5t67 -70.5q15 -32 15 -68z
+" />
+ <glyph glyph-name="expand_alt" unicode="&#xf116;" horiz-adv-x="1792"
+ />
+ <glyph glyph-name="collapse_alt" unicode="&#xf117;" horiz-adv-x="1792"
+ />
+ <glyph glyph-name="smile" unicode="&#xf118;"
+d="M1134 461q-37 -121 -138 -195t-228 -74t-228 74t-138 195q-8 25 4 48.5t38 31.5q25 8 48.5 -4t31.5 -38q25 -80 92.5 -129.5t151.5 -49.5t151.5 49.5t92.5 129.5q8 26 32 38t49 4t37 -31.5t4 -48.5zM640 896q0 -53 -37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5
+t37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM1152 896q0 -53 -37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5t37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM1408 640q0 130 -51 248.5t-136.5 204t-204 136.5t-248.5 51t-248.5 -51t-204 -136.5t-136.5 -204t-51 -248.5
+t51 -248.5t136.5 -204t204 -136.5t248.5 -51t248.5 51t204 136.5t136.5 204t51 248.5zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="frown" unicode="&#xf119;"
+d="M1134 307q8 -25 -4 -48.5t-37 -31.5t-49 4t-32 38q-25 80 -92.5 129.5t-151.5 49.5t-151.5 -49.5t-92.5 -129.5q-8 -26 -31.5 -38t-48.5 -4q-26 8 -38 31.5t-4 48.5q37 121 138 195t228 74t228 -74t138 -195zM640 896q0 -53 -37.5 -90.5t-90.5 -37.5t-90.5 37.5
+t-37.5 90.5t37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM1152 896q0 -53 -37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5t37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM1408 640q0 130 -51 248.5t-136.5 204t-204 136.5t-248.5 51t-248.5 -51t-204 -136.5t-136.5 -204
+t-51 -248.5t51 -248.5t136.5 -204t204 -136.5t248.5 -51t248.5 51t204 136.5t136.5 204t51 248.5zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="meh" unicode="&#xf11a;"
+d="M1152 448q0 -26 -19 -45t-45 -19h-640q-26 0 -45 19t-19 45t19 45t45 19h640q26 0 45 -19t19 -45zM640 896q0 -53 -37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5t37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM1152 896q0 -53 -37.5 -90.5t-90.5 -37.5t-90.5 37.5
+t-37.5 90.5t37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM1408 640q0 130 -51 248.5t-136.5 204t-204 136.5t-248.5 51t-248.5 -51t-204 -136.5t-136.5 -204t-51 -248.5t51 -248.5t136.5 -204t204 -136.5t248.5 -51t248.5 51t204 136.5t136.5 204t51 248.5zM1536 640
+q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="gamepad" unicode="&#xf11b;" horiz-adv-x="1920"
+d="M832 448v128q0 14 -9 23t-23 9h-192v192q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-192h-192q-14 0 -23 -9t-9 -23v-128q0 -14 9 -23t23 -9h192v-192q0 -14 9 -23t23 -9h128q14 0 23 9t9 23v192h192q14 0 23 9t9 23zM1408 384q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5
+t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM1664 640q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM1920 512q0 -212 -150 -362t-362 -150q-192 0 -338 128h-220q-146 -128 -338 -128q-212 0 -362 150
+t-150 362t150 362t362 150h896q212 0 362 -150t150 -362z" />
+ <glyph glyph-name="keyboard" unicode="&#xf11c;" horiz-adv-x="1920"
+d="M384 368v-96q0 -16 -16 -16h-96q-16 0 -16 16v96q0 16 16 16h96q16 0 16 -16zM512 624v-96q0 -16 -16 -16h-224q-16 0 -16 16v96q0 16 16 16h224q16 0 16 -16zM384 880v-96q0 -16 -16 -16h-96q-16 0 -16 16v96q0 16 16 16h96q16 0 16 -16zM1408 368v-96q0 -16 -16 -16
+h-864q-16 0 -16 16v96q0 16 16 16h864q16 0 16 -16zM768 624v-96q0 -16 -16 -16h-96q-16 0 -16 16v96q0 16 16 16h96q16 0 16 -16zM640 880v-96q0 -16 -16 -16h-96q-16 0 -16 16v96q0 16 16 16h96q16 0 16 -16zM1024 624v-96q0 -16 -16 -16h-96q-16 0 -16 16v96q0 16 16 16
+h96q16 0 16 -16zM896 880v-96q0 -16 -16 -16h-96q-16 0 -16 16v96q0 16 16 16h96q16 0 16 -16zM1280 624v-96q0 -16 -16 -16h-96q-16 0 -16 16v96q0 16 16 16h96q16 0 16 -16zM1664 368v-96q0 -16 -16 -16h-96q-16 0 -16 16v96q0 16 16 16h96q16 0 16 -16zM1152 880v-96
+q0 -16 -16 -16h-96q-16 0 -16 16v96q0 16 16 16h96q16 0 16 -16zM1408 880v-96q0 -16 -16 -16h-96q-16 0 -16 16v96q0 16 16 16h96q16 0 16 -16zM1664 880v-352q0 -16 -16 -16h-224q-16 0 -16 16v96q0 16 16 16h112v240q0 16 16 16h96q16 0 16 -16zM1792 128v896h-1664v-896
+h1664zM1920 1024v-896q0 -53 -37.5 -90.5t-90.5 -37.5h-1664q-53 0 -90.5 37.5t-37.5 90.5v896q0 53 37.5 90.5t90.5 37.5h1664q53 0 90.5 -37.5t37.5 -90.5z" />
+ <glyph glyph-name="flag_alt" unicode="&#xf11d;" horiz-adv-x="1792"
+d="M1664 491v616q-169 -91 -306 -91q-82 0 -145 32q-100 49 -184 76.5t-178 27.5q-173 0 -403 -127v-599q245 113 433 113q55 0 103.5 -7.5t98 -26t77 -31t82.5 -39.5l28 -14q44 -22 101 -22q120 0 293 92zM320 1280q0 -35 -17.5 -64t-46.5 -46v-1266q0 -14 -9 -23t-23 -9
+h-64q-14 0 -23 9t-9 23v1266q-29 17 -46.5 46t-17.5 64q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM1792 1216v-763q0 -39 -35 -57q-10 -5 -17 -9q-218 -116 -369 -116q-88 0 -158 35l-28 14q-64 33 -99 48t-91 29t-114 14q-102 0 -235.5 -44t-228.5 -102
+q-15 -9 -33 -9q-16 0 -32 8q-32 19 -32 56v742q0 35 31 55q35 21 78.5 42.5t114 52t152.5 49.5t155 19q112 0 209 -31t209 -86q38 -19 89 -19q122 0 310 112q22 12 31 17q31 16 62 -2q31 -20 31 -55z" />
+ <glyph glyph-name="flag_checkered" unicode="&#xf11e;" horiz-adv-x="1792"
+d="M832 536v192q-181 -16 -384 -117v-185q205 96 384 110zM832 954v197q-172 -8 -384 -126v-189q215 111 384 118zM1664 491v184q-235 -116 -384 -71v224q-20 6 -39 15q-5 3 -33 17t-34.5 17t-31.5 15t-34.5 15.5t-32.5 13t-36 12.5t-35 8.5t-39.5 7.5t-39.5 4t-44 2
+q-23 0 -49 -3v-222h19q102 0 192.5 -29t197.5 -82q19 -9 39 -15v-188q42 -17 91 -17q120 0 293 92zM1664 918v189q-169 -91 -306 -91q-45 0 -78 8v-196q148 -42 384 90zM320 1280q0 -35 -17.5 -64t-46.5 -46v-1266q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v1266
+q-29 17 -46.5 46t-17.5 64q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM1792 1216v-763q0 -39 -35 -57q-10 -5 -17 -9q-218 -116 -369 -116q-88 0 -158 35l-28 14q-64 33 -99 48t-91 29t-114 14q-102 0 -235.5 -44t-228.5 -102q-15 -9 -33 -9q-16 0 -32 8
+q-32 19 -32 56v742q0 35 31 55q35 21 78.5 42.5t114 52t152.5 49.5t155 19q112 0 209 -31t209 -86q38 -19 89 -19q122 0 310 112q22 12 31 17q31 16 62 -2q31 -20 31 -55z" />
+ <glyph glyph-name="terminal" unicode="&#xf120;" horiz-adv-x="1664"
+d="M585 553l-466 -466q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23t10 23l393 393l-393 393q-10 10 -10 23t10 23l50 50q10 10 23 10t23 -10l466 -466q10 -10 10 -23t-10 -23zM1664 96v-64q0 -14 -9 -23t-23 -9h-960q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h960q14 0 23 -9
+t9 -23z" />
+ <glyph glyph-name="code" unicode="&#xf121;" horiz-adv-x="1920"
+d="M617 137l-50 -50q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23t10 23l466 466q10 10 23 10t23 -10l50 -50q10 -10 10 -23t-10 -23l-393 -393l393 -393q10 -10 10 -23t-10 -23zM1208 1204l-373 -1291q-4 -13 -15.5 -19.5t-23.5 -2.5l-62 17q-13 4 -19.5 15.5t-2.5 24.5
+l373 1291q4 13 15.5 19.5t23.5 2.5l62 -17q13 -4 19.5 -15.5t2.5 -24.5zM1865 553l-466 -466q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23t10 23l393 393l-393 393q-10 10 -10 23t10 23l50 50q10 10 23 10t23 -10l466 -466q10 -10 10 -23t-10 -23z" />
+ <glyph glyph-name="reply_all" unicode="&#xf122;" horiz-adv-x="1792"
+d="M640 454v-70q0 -42 -39 -59q-13 -5 -25 -5q-27 0 -45 19l-512 512q-19 19 -19 45t19 45l512 512q29 31 70 14q39 -17 39 -59v-69l-397 -398q-19 -19 -19 -45t19 -45zM1792 416q0 -58 -17 -133.5t-38.5 -138t-48 -125t-40.5 -90.5l-20 -40q-8 -17 -28 -17q-6 0 -9 1
+q-25 8 -23 34q43 400 -106 565q-64 71 -170.5 110.5t-267.5 52.5v-251q0 -42 -39 -59q-13 -5 -25 -5q-27 0 -45 19l-512 512q-19 19 -19 45t19 45l512 512q29 31 70 14q39 -17 39 -59v-262q411 -28 599 -221q169 -173 169 -509z" />
+ <glyph glyph-name="star_half_empty" unicode="&#xf123;" horiz-adv-x="1664"
+d="M1186 579l257 250l-356 52l-66 10l-30 60l-159 322v-963l59 -31l318 -168l-60 355l-12 66zM1638 841l-363 -354l86 -500q5 -33 -6 -51.5t-34 -18.5q-17 0 -40 12l-449 236l-449 -236q-23 -12 -40 -12q-23 0 -34 18.5t-6 51.5l86 500l-364 354q-32 32 -23 59.5t54 34.5
+l502 73l225 455q20 41 49 41q28 0 49 -41l225 -455l502 -73q45 -7 54 -34.5t-24 -59.5z" />
+ <glyph glyph-name="location_arrow" unicode="&#xf124;" horiz-adv-x="1408"
+d="M1401 1187l-640 -1280q-17 -35 -57 -35q-5 0 -15 2q-22 5 -35.5 22.5t-13.5 39.5v576h-576q-22 0 -39.5 13.5t-22.5 35.5t4 42t29 30l1280 640q13 7 29 7q27 0 45 -19q15 -14 18.5 -34.5t-6.5 -39.5z" />
+ <glyph glyph-name="crop" unicode="&#xf125;" horiz-adv-x="1664"
+d="M557 256h595v595zM512 301l595 595h-595v-595zM1664 224v-192q0 -14 -9 -23t-23 -9h-224v-224q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v224h-864q-14 0 -23 9t-9 23v864h-224q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h224v224q0 14 9 23t23 9h192q14 0 23 -9t9 -23
+v-224h851l246 247q10 9 23 9t23 -9q9 -10 9 -23t-9 -23l-247 -246v-851h224q14 0 23 -9t9 -23z" />
+ <glyph glyph-name="code_fork" unicode="&#xf126;" horiz-adv-x="1024"
+d="M288 64q0 40 -28 68t-68 28t-68 -28t-28 -68t28 -68t68 -28t68 28t28 68zM288 1216q0 40 -28 68t-68 28t-68 -28t-28 -68t28 -68t68 -28t68 28t28 68zM928 1088q0 40 -28 68t-68 28t-68 -28t-28 -68t28 -68t68 -28t68 28t28 68zM1024 1088q0 -52 -26 -96.5t-70 -69.5
+q-2 -287 -226 -414q-67 -38 -203 -81q-128 -40 -169.5 -71t-41.5 -100v-26q44 -25 70 -69.5t26 -96.5q0 -80 -56 -136t-136 -56t-136 56t-56 136q0 52 26 96.5t70 69.5v820q-44 25 -70 69.5t-26 96.5q0 80 56 136t136 56t136 -56t56 -136q0 -52 -26 -96.5t-70 -69.5v-497
+q54 26 154 57q55 17 87.5 29.5t70.5 31t59 39.5t40.5 51t28 69.5t8.5 91.5q-44 25 -70 69.5t-26 96.5q0 80 56 136t136 56t136 -56t56 -136z" />
+ <glyph glyph-name="unlink" unicode="&#xf127;" horiz-adv-x="1664"
+d="M439 265l-256 -256q-11 -9 -23 -9t-23 9q-9 10 -9 23t9 23l256 256q10 9 23 9t23 -9q9 -10 9 -23t-9 -23zM608 224v-320q0 -14 -9 -23t-23 -9t-23 9t-9 23v320q0 14 9 23t23 9t23 -9t9 -23zM384 448q0 -14 -9 -23t-23 -9h-320q-14 0 -23 9t-9 23t9 23t23 9h320
+q14 0 23 -9t9 -23zM1648 320q0 -120 -85 -203l-147 -146q-83 -83 -203 -83q-121 0 -204 85l-334 335q-21 21 -42 56l239 18l273 -274q27 -27 68 -27.5t68 26.5l147 146q28 28 28 67q0 40 -28 68l-274 275l18 239q35 -21 56 -42l336 -336q84 -86 84 -204zM1031 1044l-239 -18
+l-273 274q-28 28 -68 28q-39 0 -68 -27l-147 -146q-28 -28 -28 -67q0 -40 28 -68l274 -274l-18 -240q-35 21 -56 42l-336 336q-84 86 -84 204q0 120 85 203l147 146q83 83 203 83q121 0 204 -85l334 -335q21 -21 42 -56zM1664 960q0 -14 -9 -23t-23 -9h-320q-14 0 -23 9
+t-9 23t9 23t23 9h320q14 0 23 -9t9 -23zM1120 1504v-320q0 -14 -9 -23t-23 -9t-23 9t-9 23v320q0 14 9 23t23 9t23 -9t9 -23zM1527 1353l-256 -256q-11 -9 -23 -9t-23 9q-9 10 -9 23t9 23l256 256q10 9 23 9t23 -9q9 -10 9 -23t-9 -23z" />
+ <glyph glyph-name="question" unicode="&#xf128;" horiz-adv-x="1024"
+d="M704 280v-240q0 -16 -12 -28t-28 -12h-240q-16 0 -28 12t-12 28v240q0 16 12 28t28 12h240q16 0 28 -12t12 -28zM1020 880q0 -54 -15.5 -101t-35 -76.5t-55 -59.5t-57.5 -43.5t-61 -35.5q-41 -23 -68.5 -65t-27.5 -67q0 -17 -12 -32.5t-28 -15.5h-240q-15 0 -25.5 18.5
+t-10.5 37.5v45q0 83 65 156.5t143 108.5q59 27 84 56t25 76q0 42 -46.5 74t-107.5 32q-65 0 -108 -29q-35 -25 -107 -115q-13 -16 -31 -16q-12 0 -25 8l-164 125q-13 10 -15.5 25t5.5 28q160 266 464 266q80 0 161 -31t146 -83t106 -127.5t41 -158.5z" />
+ <glyph glyph-name="_279" unicode="&#xf129;" horiz-adv-x="640"
+d="M640 192v-128q0 -26 -19 -45t-45 -19h-512q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h64v384h-64q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h384q26 0 45 -19t19 -45v-576h64q26 0 45 -19t19 -45zM512 1344v-192q0 -26 -19 -45t-45 -19h-256q-26 0 -45 19t-19 45v192
+q0 26 19 45t45 19h256q26 0 45 -19t19 -45z" />
+ <glyph glyph-name="exclamation" unicode="&#xf12a;" horiz-adv-x="640"
+d="M512 288v-224q0 -26 -19 -45t-45 -19h-256q-26 0 -45 19t-19 45v224q0 26 19 45t45 19h256q26 0 45 -19t19 -45zM542 1344l-28 -768q-1 -26 -20.5 -45t-45.5 -19h-256q-26 0 -45.5 19t-20.5 45l-28 768q-1 26 17.5 45t44.5 19h320q26 0 44.5 -19t17.5 -45z" />
+ <glyph glyph-name="superscript" unicode="&#xf12b;"
+d="M897 167v-167h-248l-159 252l-24 42q-8 9 -11 21h-3q-1 -3 -2.5 -6.5t-3.5 -8t-3 -6.5q-10 -20 -25 -44l-155 -250h-258v167h128l197 291l-185 272h-137v168h276l139 -228q2 -4 23 -42q8 -9 11 -21h3q3 9 11 21l25 42l140 228h257v-168h-125l-184 -267l204 -296h109z
+M1534 846v-206h-514l-3 27q-4 28 -4 46q0 64 26 117t65 86.5t84 65t84 54.5t65 54t26 64q0 38 -29.5 62.5t-70.5 24.5q-51 0 -97 -39q-14 -11 -36 -38l-105 92q26 37 63 66q83 65 188 65q110 0 178 -59.5t68 -158.5q0 -56 -24.5 -103t-62 -76.5t-81.5 -58.5t-82 -50.5
+t-65.5 -51.5t-30.5 -63h232v80h126z" />
+ <glyph glyph-name="subscript" unicode="&#xf12c;"
+d="M897 167v-167h-248l-159 252l-24 42q-8 9 -11 21h-3q-1 -3 -2.5 -6.5t-3.5 -8t-3 -6.5q-10 -20 -25 -44l-155 -250h-258v167h128l197 291l-185 272h-137v168h276l139 -228q2 -4 23 -42q8 -9 11 -21h3q3 9 11 21l25 42l140 228h257v-168h-125l-184 -267l204 -296h109z
+M1536 -50v-206h-514l-4 27q-3 45 -3 46q0 64 26 117t65 86.5t84 65t84 54.5t65 54t26 64q0 38 -29.5 62.5t-70.5 24.5q-51 0 -97 -39q-14 -11 -36 -38l-105 92q26 37 63 66q80 65 188 65q110 0 178 -59.5t68 -158.5q0 -66 -34.5 -118.5t-84 -86t-99.5 -62.5t-87 -63t-41 -73
+h232v80h126z" />
+ <glyph glyph-name="_283" unicode="&#xf12d;" horiz-adv-x="1920"
+d="M896 128l336 384h-768l-336 -384h768zM1909 1205q15 -34 9.5 -71.5t-30.5 -65.5l-896 -1024q-38 -44 -96 -44h-768q-38 0 -69.5 20.5t-47.5 54.5q-15 34 -9.5 71.5t30.5 65.5l896 1024q38 44 96 44h768q38 0 69.5 -20.5t47.5 -54.5z" />
+ <glyph glyph-name="puzzle_piece" unicode="&#xf12e;" horiz-adv-x="1664"
+d="M1664 438q0 -81 -44.5 -135t-123.5 -54q-41 0 -77.5 17.5t-59 38t-56.5 38t-71 17.5q-110 0 -110 -124q0 -39 16 -115t15 -115v-5q-22 0 -33 -1q-34 -3 -97.5 -11.5t-115.5 -13.5t-98 -5q-61 0 -103 26.5t-42 83.5q0 37 17.5 71t38 56.5t38 59t17.5 77.5q0 79 -54 123.5
+t-135 44.5q-84 0 -143 -45.5t-59 -127.5q0 -43 15 -83t33.5 -64.5t33.5 -53t15 -50.5q0 -45 -46 -89q-37 -35 -117 -35q-95 0 -245 24q-9 2 -27.5 4t-27.5 4l-13 2q-1 0 -3 1q-2 0 -2 1v1024q2 -1 17.5 -3.5t34 -5t21.5 -3.5q150 -24 245 -24q80 0 117 35q46 44 46 89
+q0 22 -15 50.5t-33.5 53t-33.5 64.5t-15 83q0 82 59 127.5t144 45.5q80 0 134 -44.5t54 -123.5q0 -41 -17.5 -77.5t-38 -59t-38 -56.5t-17.5 -71q0 -57 42 -83.5t103 -26.5q64 0 180 15t163 17v-2q-1 -2 -3.5 -17.5t-5 -34t-3.5 -21.5q-24 -150 -24 -245q0 -80 35 -117
+q44 -46 89 -46q22 0 50.5 15t53 33.5t64.5 33.5t83 15q82 0 127.5 -59t45.5 -143z" />
+ <glyph glyph-name="microphone" unicode="&#xf130;" horiz-adv-x="1152"
+d="M1152 832v-128q0 -221 -147.5 -384.5t-364.5 -187.5v-132h256q26 0 45 -19t19 -45t-19 -45t-45 -19h-640q-26 0 -45 19t-19 45t19 45t45 19h256v132q-217 24 -364.5 187.5t-147.5 384.5v128q0 26 19 45t45 19t45 -19t19 -45v-128q0 -185 131.5 -316.5t316.5 -131.5
+t316.5 131.5t131.5 316.5v128q0 26 19 45t45 19t45 -19t19 -45zM896 1216v-512q0 -132 -94 -226t-226 -94t-226 94t-94 226v512q0 132 94 226t226 94t226 -94t94 -226z" />
+ <glyph glyph-name="microphone_off" unicode="&#xf131;" horiz-adv-x="1408"
+d="M271 591l-101 -101q-42 103 -42 214v128q0 26 19 45t45 19t45 -19t19 -45v-128q0 -53 15 -113zM1385 1193l-361 -361v-128q0 -132 -94 -226t-226 -94q-55 0 -109 19l-96 -96q97 -51 205 -51q185 0 316.5 131.5t131.5 316.5v128q0 26 19 45t45 19t45 -19t19 -45v-128
+q0 -221 -147.5 -384.5t-364.5 -187.5v-132h256q26 0 45 -19t19 -45t-19 -45t-45 -19h-640q-26 0 -45 19t-19 45t19 45t45 19h256v132q-125 13 -235 81l-254 -254q-10 -10 -23 -10t-23 10l-82 82q-10 10 -10 23t10 23l1234 1234q10 10 23 10t23 -10l82 -82q10 -10 10 -23
+t-10 -23zM1005 1325l-621 -621v512q0 132 94 226t226 94q102 0 184.5 -59t116.5 -152z" />
+ <glyph glyph-name="shield" unicode="&#xf132;" horiz-adv-x="1280"
+d="M1088 576v640h-448v-1137q119 63 213 137q235 184 235 360zM1280 1344v-768q0 -86 -33.5 -170.5t-83 -150t-118 -127.5t-126.5 -103t-121 -77.5t-89.5 -49.5t-42.5 -20q-12 -6 -26 -6t-26 6q-16 7 -42.5 20t-89.5 49.5t-121 77.5t-126.5 103t-118 127.5t-83 150
+t-33.5 170.5v768q0 26 19 45t45 19h1152q26 0 45 -19t19 -45z" />
+ <glyph glyph-name="calendar_empty" unicode="&#xf133;" horiz-adv-x="1664"
+d="M128 -128h1408v1024h-1408v-1024zM512 1088v288q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-288q0 -14 9 -23t23 -9h64q14 0 23 9t9 23zM1280 1088v288q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-288q0 -14 9 -23t23 -9h64q14 0 23 9t9 23zM1664 1152v-1280
+q0 -52 -38 -90t-90 -38h-1408q-52 0 -90 38t-38 90v1280q0 52 38 90t90 38h128v96q0 66 47 113t113 47h64q66 0 113 -47t47 -113v-96h384v96q0 66 47 113t113 47h64q66 0 113 -47t47 -113v-96h128q52 0 90 -38t38 -90z" />
+ <glyph glyph-name="fire_extinguisher" unicode="&#xf134;" horiz-adv-x="1408"
+d="M512 1344q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45t45 -19t45 19t19 45zM1408 1376v-320q0 -16 -12 -25q-8 -7 -20 -7q-4 0 -7 1l-448 96q-11 2 -18 11t-7 20h-256v-102q111 -23 183.5 -111t72.5 -203v-800q0 -26 -19 -45t-45 -19h-512q-26 0 -45 19t-19 45v800
+q0 106 62.5 190.5t161.5 114.5v111h-32q-59 0 -115 -23.5t-91.5 -53t-66 -66.5t-40.5 -53.5t-14 -24.5q-17 -35 -57 -35q-16 0 -29 7q-23 12 -31.5 37t3.5 49q5 10 14.5 26t37.5 53.5t60.5 70t85 67t108.5 52.5q-25 42 -25 86q0 66 47 113t113 47t113 -47t47 -113
+q0 -33 -14 -64h302q0 11 7 20t18 11l448 96q3 1 7 1q12 0 20 -7q12 -9 12 -25z" />
+ <glyph glyph-name="rocket" unicode="&#xf135;" horiz-adv-x="1664"
+d="M1440 1088q0 40 -28 68t-68 28t-68 -28t-28 -68t28 -68t68 -28t68 28t28 68zM1664 1376q0 -249 -75.5 -430.5t-253.5 -360.5q-81 -80 -195 -176l-20 -379q-2 -16 -16 -26l-384 -224q-7 -4 -16 -4q-12 0 -23 9l-64 64q-13 14 -8 32l85 276l-281 281l-276 -85q-3 -1 -9 -1
+q-14 0 -23 9l-64 64q-17 19 -5 39l224 384q10 14 26 16l379 20q96 114 176 195q188 187 358 258t431 71q14 0 24 -9.5t10 -22.5z" />
+ <glyph glyph-name="maxcdn" unicode="&#xf136;" horiz-adv-x="1792"
+d="M1745 763l-164 -763h-334l178 832q13 56 -15 88q-27 33 -83 33h-169l-204 -953h-334l204 953h-286l-204 -953h-334l204 953l-153 327h1276q101 0 189.5 -40.5t147.5 -113.5q60 -73 81 -168.5t0 -194.5z" />
+ <glyph glyph-name="chevron_sign_left" unicode="&#xf137;"
+d="M909 141l102 102q19 19 19 45t-19 45l-307 307l307 307q19 19 19 45t-19 45l-102 102q-19 19 -45 19t-45 -19l-454 -454q-19 -19 -19 -45t19 -45l454 -454q19 -19 45 -19t45 19zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5
+t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="chevron_sign_right" unicode="&#xf138;"
+d="M717 141l454 454q19 19 19 45t-19 45l-454 454q-19 19 -45 19t-45 -19l-102 -102q-19 -19 -19 -45t19 -45l307 -307l-307 -307q-19 -19 -19 -45t19 -45l102 -102q19 -19 45 -19t45 19zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5
+t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="chevron_sign_up" unicode="&#xf139;"
+d="M1165 397l102 102q19 19 19 45t-19 45l-454 454q-19 19 -45 19t-45 -19l-454 -454q-19 -19 -19 -45t19 -45l102 -102q19 -19 45 -19t45 19l307 307l307 -307q19 -19 45 -19t45 19zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5
+t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="chevron_sign_down" unicode="&#xf13a;"
+d="M813 237l454 454q19 19 19 45t-19 45l-102 102q-19 19 -45 19t-45 -19l-307 -307l-307 307q-19 19 -45 19t-45 -19l-102 -102q-19 -19 -19 -45t19 -45l454 -454q19 -19 45 -19t45 19zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5
+t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="html5" unicode="&#xf13b;" horiz-adv-x="1408"
+d="M1130 939l16 175h-884l47 -534h612l-22 -228l-197 -53l-196 53l-13 140h-175l22 -278l362 -100h4v1l359 99l50 544h-644l-15 181h674zM0 1408h1408l-128 -1438l-578 -162l-574 162z" />
+ <glyph glyph-name="css3" unicode="&#xf13c;" horiz-adv-x="1792"
+d="M275 1408h1505l-266 -1333l-804 -267l-698 267l71 356h297l-29 -147l422 -161l486 161l68 339h-1208l58 297h1209l38 191h-1208z" />
+ <glyph glyph-name="anchor" unicode="&#xf13d;" horiz-adv-x="1792"
+d="M960 1280q0 26 -19 45t-45 19t-45 -19t-19 -45t19 -45t45 -19t45 19t19 45zM1792 352v-352q0 -22 -20 -30q-8 -2 -12 -2q-12 0 -23 9l-93 93q-119 -143 -318.5 -226.5t-429.5 -83.5t-429.5 83.5t-318.5 226.5l-93 -93q-9 -9 -23 -9q-4 0 -12 2q-20 8 -20 30v352
+q0 14 9 23t23 9h352q22 0 30 -20q8 -19 -7 -35l-100 -100q67 -91 189.5 -153.5t271.5 -82.5v647h-192q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h192v163q-58 34 -93 92.5t-35 128.5q0 106 75 181t181 75t181 -75t75 -181q0 -70 -35 -128.5t-93 -92.5v-163h192q26 0 45 -19
+t19 -45v-128q0 -26 -19 -45t-45 -19h-192v-647q149 20 271.5 82.5t189.5 153.5l-100 100q-15 16 -7 35q8 20 30 20h352q14 0 23 -9t9 -23z" />
+ <glyph glyph-name="unlock_alt" unicode="&#xf13e;" horiz-adv-x="1152"
+d="M1056 768q40 0 68 -28t28 -68v-576q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68v576q0 40 28 68t68 28h32v320q0 185 131.5 316.5t316.5 131.5t316.5 -131.5t131.5 -316.5q0 -26 -19 -45t-45 -19h-64q-26 0 -45 19t-19 45q0 106 -75 181t-181 75t-181 -75t-75 -181
+v-320h736z" />
+ <glyph glyph-name="bullseye" unicode="&#xf140;"
+d="M1024 640q0 -106 -75 -181t-181 -75t-181 75t-75 181t75 181t181 75t181 -75t75 -181zM1152 640q0 159 -112.5 271.5t-271.5 112.5t-271.5 -112.5t-112.5 -271.5t112.5 -271.5t271.5 -112.5t271.5 112.5t112.5 271.5zM1280 640q0 -212 -150 -362t-362 -150t-362 150
+t-150 362t150 362t362 150t362 -150t150 -362zM1408 640q0 130 -51 248.5t-136.5 204t-204 136.5t-248.5 51t-248.5 -51t-204 -136.5t-136.5 -204t-51 -248.5t51 -248.5t136.5 -204t204 -136.5t248.5 -51t248.5 51t204 136.5t136.5 204t51 248.5zM1536 640
+q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="ellipsis_horizontal" unicode="&#xf141;" horiz-adv-x="1408"
+d="M384 800v-192q0 -40 -28 -68t-68 -28h-192q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h192q40 0 68 -28t28 -68zM896 800v-192q0 -40 -28 -68t-68 -28h-192q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h192q40 0 68 -28t28 -68zM1408 800v-192q0 -40 -28 -68t-68 -28h-192
+q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h192q40 0 68 -28t28 -68z" />
+ <glyph glyph-name="ellipsis_vertical" unicode="&#xf142;" horiz-adv-x="384"
+d="M384 288v-192q0 -40 -28 -68t-68 -28h-192q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h192q40 0 68 -28t28 -68zM384 800v-192q0 -40 -28 -68t-68 -28h-192q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h192q40 0 68 -28t28 -68zM384 1312v-192q0 -40 -28 -68t-68 -28h-192
+q-40 0 -68 28t-28 68v192q0 40 28 68t68 28h192q40 0 68 -28t28 -68z" />
+ <glyph glyph-name="_303" unicode="&#xf143;"
+d="M512 256q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM863 162q-13 233 -176.5 396.5t-396.5 176.5q-14 1 -24 -9t-10 -23v-128q0 -13 8.5 -22t21.5 -10q154 -11 264 -121t121 -264q1 -13 10 -21.5t22 -8.5h128
+q13 0 23 10t9 24zM1247 161q-5 154 -56 297.5t-139.5 260t-205 205t-260 139.5t-297.5 56q-14 1 -23 -9q-10 -10 -10 -23v-128q0 -13 9 -22t22 -10q204 -7 378 -111.5t278.5 -278.5t111.5 -378q1 -13 10 -22t22 -9h128q13 0 23 10q11 9 9 23zM1536 1120v-960
+q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="play_sign" unicode="&#xf144;"
+d="M768 1408q209 0 385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103zM1152 585q32 18 32 55t-32 55l-544 320q-31 19 -64 1q-32 -19 -32 -56v-640q0 -37 32 -56
+q16 -8 32 -8q17 0 32 9z" />
+ <glyph glyph-name="ticket" unicode="&#xf145;" horiz-adv-x="1792"
+d="M1024 1084l316 -316l-572 -572l-316 316zM813 105l618 618q19 19 19 45t-19 45l-362 362q-18 18 -45 18t-45 -18l-618 -618q-19 -19 -19 -45t19 -45l362 -362q18 -18 45 -18t45 18zM1702 742l-907 -908q-37 -37 -90.5 -37t-90.5 37l-126 126q56 56 56 136t-56 136
+t-136 56t-136 -56l-125 126q-37 37 -37 90.5t37 90.5l907 906q37 37 90.5 37t90.5 -37l125 -125q-56 -56 -56 -136t56 -136t136 -56t136 56l126 -125q37 -37 37 -90.5t-37 -90.5z" />
+ <glyph glyph-name="minus_sign_alt" unicode="&#xf146;"
+d="M1280 576v128q0 26 -19 45t-45 19h-896q-26 0 -45 -19t-19 -45v-128q0 -26 19 -45t45 -19h896q26 0 45 19t19 45zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5
+t84.5 -203.5z" />
+ <glyph glyph-name="check_minus" unicode="&#xf147;" horiz-adv-x="1408"
+d="M1152 736v-64q0 -14 -9 -23t-23 -9h-832q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h832q14 0 23 -9t9 -23zM1280 288v832q0 66 -47 113t-113 47h-832q-66 0 -113 -47t-47 -113v-832q0 -66 47 -113t113 -47h832q66 0 113 47t47 113zM1408 1120v-832q0 -119 -84.5 -203.5
+t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5v832q0 119 84.5 203.5t203.5 84.5h832q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="level_up" unicode="&#xf148;" horiz-adv-x="1024"
+d="M1018 933q-18 -37 -58 -37h-192v-864q0 -14 -9 -23t-23 -9h-704q-21 0 -29 18q-8 20 4 35l160 192q9 11 25 11h320v640h-192q-40 0 -58 37q-17 37 9 68l320 384q18 22 49 22t49 -22l320 -384q27 -32 9 -68z" />
+ <glyph glyph-name="level_down" unicode="&#xf149;" horiz-adv-x="1024"
+d="M32 1280h704q13 0 22.5 -9.5t9.5 -23.5v-863h192q40 0 58 -37t-9 -69l-320 -384q-18 -22 -49 -22t-49 22l-320 384q-26 31 -9 69q18 37 58 37h192v640h-320q-14 0 -25 11l-160 192q-13 14 -4 34q9 19 29 19z" />
+ <glyph glyph-name="check_sign" unicode="&#xf14a;"
+d="M685 237l614 614q19 19 19 45t-19 45l-102 102q-19 19 -45 19t-45 -19l-467 -467l-211 211q-19 19 -45 19t-45 -19l-102 -102q-19 -19 -19 -45t19 -45l358 -358q19 -19 45 -19t45 19zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5
+t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="edit_sign" unicode="&#xf14b;"
+d="M404 428l152 -152l-52 -52h-56v96h-96v56zM818 818q14 -13 -3 -30l-291 -291q-17 -17 -30 -3q-14 13 3 30l291 291q17 17 30 3zM544 128l544 544l-288 288l-544 -544v-288h288zM1152 736l92 92q28 28 28 68t-28 68l-152 152q-28 28 -68 28t-68 -28l-92 -92zM1536 1120
+v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="_312" unicode="&#xf14c;"
+d="M1280 608v480q0 26 -19 45t-45 19h-480q-42 0 -59 -39q-17 -41 14 -70l144 -144l-534 -534q-19 -19 -19 -45t19 -45l102 -102q19 -19 45 -19t45 19l534 534l144 -144q18 -19 45 -19q12 0 25 5q39 17 39 59zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960
+q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="share_sign" unicode="&#xf14d;"
+d="M1005 435l352 352q19 19 19 45t-19 45l-352 352q-30 31 -69 14q-40 -17 -40 -59v-160q-119 0 -216 -19.5t-162.5 -51t-114 -79t-76.5 -95.5t-44.5 -109t-21.5 -111.5t-5 -110.5q0 -181 167 -404q11 -12 25 -12q7 0 13 3q22 9 19 33q-44 354 62 473q46 52 130 75.5
+t224 23.5v-160q0 -42 40 -59q12 -5 24 -5q26 0 45 19zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="compass" unicode="&#xf14e;"
+d="M640 448l256 128l-256 128v-256zM1024 1039v-542l-512 -256v542zM1312 640q0 148 -73 273t-198 198t-273 73t-273 -73t-198 -198t-73 -273t73 -273t198 -198t273 -73t273 73t198 198t73 273zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103
+t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="collapse" unicode="&#xf150;"
+d="M1145 861q18 -35 -5 -66l-320 -448q-19 -27 -52 -27t-52 27l-320 448q-23 31 -5 66q17 35 57 35h640q40 0 57 -35zM1280 160v960q0 13 -9.5 22.5t-22.5 9.5h-960q-13 0 -22.5 -9.5t-9.5 -22.5v-960q0 -13 9.5 -22.5t22.5 -9.5h960q13 0 22.5 9.5t9.5 22.5zM1536 1120
+v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="collapse_top" unicode="&#xf151;"
+d="M1145 419q-17 -35 -57 -35h-640q-40 0 -57 35q-18 35 5 66l320 448q19 27 52 27t52 -27l320 -448q23 -31 5 -66zM1280 160v960q0 13 -9.5 22.5t-22.5 9.5h-960q-13 0 -22.5 -9.5t-9.5 -22.5v-960q0 -13 9.5 -22.5t22.5 -9.5h960q13 0 22.5 9.5t9.5 22.5zM1536 1120v-960
+q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="_317" unicode="&#xf152;"
+d="M1088 640q0 -33 -27 -52l-448 -320q-31 -23 -66 -5q-35 17 -35 57v640q0 40 35 57q35 18 66 -5l448 -320q27 -19 27 -52zM1280 160v960q0 14 -9 23t-23 9h-960q-14 0 -23 -9t-9 -23v-960q0 -14 9 -23t23 -9h960q14 0 23 9t9 23zM1536 1120v-960q0 -119 -84.5 -203.5
+t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="eur" unicode="&#xf153;" horiz-adv-x="1024"
+d="M976 229l35 -159q3 -12 -3 -22.5t-17 -14.5l-5 -1q-4 -2 -10.5 -3.5t-16 -4.5t-21.5 -5.5t-25.5 -5t-30 -5t-33.5 -4.5t-36.5 -3t-38.5 -1q-234 0 -409 130.5t-238 351.5h-95q-13 0 -22.5 9.5t-9.5 22.5v113q0 13 9.5 22.5t22.5 9.5h66q-2 57 1 105h-67q-14 0 -23 9
+t-9 23v114q0 14 9 23t23 9h98q67 210 243.5 338t400.5 128q102 0 194 -23q11 -3 20 -15q6 -11 3 -24l-43 -159q-3 -13 -14 -19.5t-24 -2.5l-4 1q-4 1 -11.5 2.5l-17.5 3.5t-22.5 3.5t-26 3t-29 2.5t-29.5 1q-126 0 -226 -64t-150 -176h468q16 0 25 -12q10 -12 7 -26
+l-24 -114q-5 -26 -32 -26h-488q-3 -37 0 -105h459q15 0 25 -12q9 -12 6 -27l-24 -112q-2 -11 -11 -18.5t-20 -7.5h-387q48 -117 149.5 -185.5t228.5 -68.5q18 0 36 1.5t33.5 3.5t29.5 4.5t24.5 5t18.5 4.5l12 3l5 2q13 5 26 -2q12 -7 15 -21z" />
+ <glyph glyph-name="gbp" unicode="&#xf154;" horiz-adv-x="1024"
+d="M1020 399v-367q0 -14 -9 -23t-23 -9h-956q-14 0 -23 9t-9 23v150q0 13 9.5 22.5t22.5 9.5h97v383h-95q-14 0 -23 9.5t-9 22.5v131q0 14 9 23t23 9h95v223q0 171 123.5 282t314.5 111q185 0 335 -125q9 -8 10 -20.5t-7 -22.5l-103 -127q-9 -11 -22 -12q-13 -2 -23 7
+q-5 5 -26 19t-69 32t-93 18q-85 0 -137 -47t-52 -123v-215h305q13 0 22.5 -9t9.5 -23v-131q0 -13 -9.5 -22.5t-22.5 -9.5h-305v-379h414v181q0 13 9 22.5t23 9.5h162q14 0 23 -9.5t9 -22.5z" />
+ <glyph glyph-name="usd" unicode="&#xf155;" horiz-adv-x="1024"
+d="M978 351q0 -153 -99.5 -263.5t-258.5 -136.5v-175q0 -14 -9 -23t-23 -9h-135q-13 0 -22.5 9.5t-9.5 22.5v175q-66 9 -127.5 31t-101.5 44.5t-74 48t-46.5 37.5t-17.5 18q-17 21 -2 41l103 135q7 10 23 12q15 2 24 -9l2 -2q113 -99 243 -125q37 -8 74 -8q81 0 142.5 43
+t61.5 122q0 28 -15 53t-33.5 42t-58.5 37.5t-66 32t-80 32.5q-39 16 -61.5 25t-61.5 26.5t-62.5 31t-56.5 35.5t-53.5 42.5t-43.5 49t-35.5 58t-21 66.5t-8.5 78q0 138 98 242t255 134v180q0 13 9.5 22.5t22.5 9.5h135q14 0 23 -9t9 -23v-176q57 -6 110.5 -23t87 -33.5
+t63.5 -37.5t39 -29t15 -14q17 -18 5 -38l-81 -146q-8 -15 -23 -16q-14 -3 -27 7q-3 3 -14.5 12t-39 26.5t-58.5 32t-74.5 26t-85.5 11.5q-95 0 -155 -43t-60 -111q0 -26 8.5 -48t29.5 -41.5t39.5 -33t56 -31t60.5 -27t70 -27.5q53 -20 81 -31.5t76 -35t75.5 -42.5t62 -50
+t53 -63.5t31.5 -76.5t13 -94z" />
+ <glyph glyph-name="inr" unicode="&#xf156;" horiz-adv-x="898"
+d="M898 1066v-102q0 -14 -9 -23t-23 -9h-168q-23 -144 -129 -234t-276 -110q167 -178 459 -536q14 -16 4 -34q-8 -18 -29 -18h-195q-16 0 -25 12q-306 367 -498 571q-9 9 -9 22v127q0 13 9.5 22.5t22.5 9.5h112q132 0 212.5 43t102.5 125h-427q-14 0 -23 9t-9 23v102
+q0 14 9 23t23 9h413q-57 113 -268 113h-145q-13 0 -22.5 9.5t-9.5 22.5v133q0 14 9 23t23 9h832q14 0 23 -9t9 -23v-102q0 -14 -9 -23t-23 -9h-233q47 -61 64 -144h171q14 0 23 -9t9 -23z" />
+ <glyph glyph-name="jpy" unicode="&#xf157;" horiz-adv-x="1027"
+d="M603 0h-172q-13 0 -22.5 9t-9.5 23v330h-288q-13 0 -22.5 9t-9.5 23v103q0 13 9.5 22.5t22.5 9.5h288v85h-288q-13 0 -22.5 9t-9.5 23v104q0 13 9.5 22.5t22.5 9.5h214l-321 578q-8 16 0 32q10 16 28 16h194q19 0 29 -18l215 -425q19 -38 56 -125q10 24 30.5 68t27.5 61
+l191 420q8 19 29 19h191q17 0 27 -16q9 -14 1 -31l-313 -579h215q13 0 22.5 -9.5t9.5 -22.5v-104q0 -14 -9.5 -23t-22.5 -9h-290v-85h290q13 0 22.5 -9.5t9.5 -22.5v-103q0 -14 -9.5 -23t-22.5 -9h-290v-330q0 -13 -9.5 -22.5t-22.5 -9.5z" />
+ <glyph glyph-name="rub" unicode="&#xf158;" horiz-adv-x="1280"
+d="M1043 971q0 100 -65 162t-171 62h-320v-448h320q106 0 171 62t65 162zM1280 971q0 -193 -126.5 -315t-326.5 -122h-340v-118h505q14 0 23 -9t9 -23v-128q0 -14 -9 -23t-23 -9h-505v-192q0 -14 -9.5 -23t-22.5 -9h-167q-14 0 -23 9t-9 23v192h-224q-14 0 -23 9t-9 23v128
+q0 14 9 23t23 9h224v118h-224q-14 0 -23 9t-9 23v149q0 13 9 22.5t23 9.5h224v629q0 14 9 23t23 9h539q200 0 326.5 -122t126.5 -315z" />
+ <glyph glyph-name="krw" unicode="&#xf159;" horiz-adv-x="1792"
+d="M514 341l81 299h-159l75 -300q1 -1 1 -3t1 -3q0 1 0.5 3.5t0.5 3.5zM630 768l35 128h-292l32 -128h225zM822 768h139l-35 128h-70zM1271 340l78 300h-162l81 -299q0 -1 0.5 -3.5t1.5 -3.5q0 1 0.5 3t0.5 3zM1382 768l33 128h-297l34 -128h230zM1792 736v-64q0 -14 -9 -23
+t-23 -9h-213l-164 -616q-7 -24 -31 -24h-159q-24 0 -31 24l-166 616h-209l-167 -616q-7 -24 -31 -24h-159q-11 0 -19.5 7t-10.5 17l-160 616h-208q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h175l-33 128h-142q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h109l-89 344q-5 15 5 28
+q10 12 26 12h137q26 0 31 -24l90 -360h359l97 360q7 24 31 24h126q24 0 31 -24l98 -360h365l93 360q5 24 31 24h137q16 0 26 -12q10 -13 5 -28l-91 -344h111q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-145l-34 -128h179q14 0 23 -9t9 -23z" />
+ <glyph glyph-name="btc" unicode="&#xf15a;" horiz-adv-x="1280"
+d="M1167 896q18 -182 -131 -258q117 -28 175 -103t45 -214q-7 -71 -32.5 -125t-64.5 -89t-97 -58.5t-121.5 -34.5t-145.5 -15v-255h-154v251q-80 0 -122 1v-252h-154v255q-18 0 -54 0.5t-55 0.5h-200l31 183h111q50 0 58 51v402h16q-6 1 -16 1v287q-13 68 -89 68h-111v164
+l212 -1q64 0 97 1v252h154v-247q82 2 122 2v245h154v-252q79 -7 140 -22.5t113 -45t82.5 -78t36.5 -114.5zM952 351q0 36 -15 64t-37 46t-57.5 30.5t-65.5 18.5t-74 9t-69 3t-64.5 -1t-47.5 -1v-338q8 0 37 -0.5t48 -0.5t53 1.5t58.5 4t57 8.5t55.5 14t47.5 21t39.5 30
+t24.5 40t9.5 51zM881 827q0 33 -12.5 58.5t-30.5 42t-48 28t-55 16.5t-61.5 8t-58 2.5t-54 -1t-39.5 -0.5v-307q5 0 34.5 -0.5t46.5 0t50 2t55 5.5t51.5 11t48.5 18.5t37 27t27 38.5t9 51z" />
+ <glyph glyph-name="file" unicode="&#xf15b;"
+d="M1024 1024v472q22 -14 36 -28l408 -408q14 -14 28 -36h-472zM896 992q0 -40 28 -68t68 -28h544v-1056q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68v1600q0 40 28 68t68 28h800v-544z" />
+ <glyph glyph-name="file_text" unicode="&#xf15c;"
+d="M1468 1060q14 -14 28 -36h-472v472q22 -14 36 -28zM992 896h544v-1056q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68v1600q0 40 28 68t68 28h800v-544q0 -40 28 -68t68 -28zM1152 160v64q0 14 -9 23t-23 9h-704q-14 0 -23 -9t-9 -23v-64q0 -14 9 -23t23 -9h704
+q14 0 23 9t9 23zM1152 416v64q0 14 -9 23t-23 9h-704q-14 0 -23 -9t-9 -23v-64q0 -14 9 -23t23 -9h704q14 0 23 9t9 23zM1152 672v64q0 14 -9 23t-23 9h-704q-14 0 -23 -9t-9 -23v-64q0 -14 9 -23t23 -9h704q14 0 23 9t9 23z" />
+ <glyph glyph-name="sort_by_alphabet" unicode="&#xf15d;" horiz-adv-x="1664"
+d="M1191 1128h177l-72 218l-12 47q-2 16 -2 20h-4l-3 -20q0 -1 -3.5 -18t-7.5 -29zM736 96q0 -12 -10 -24l-319 -319q-10 -9 -23 -9q-12 0 -23 9l-320 320q-15 16 -7 35q8 20 30 20h192v1376q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1376h192q14 0 23 -9t9 -23zM1572 -23
+v-233h-584v90l369 529q12 18 21 27l11 9v3q-2 0 -6.5 -0.5t-7.5 -0.5q-12 -3 -30 -3h-232v-115h-120v229h567v-89l-369 -530q-6 -8 -21 -26l-11 -11v-2l14 2q9 2 30 2h248v119h121zM1661 874v-106h-288v106h75l-47 144h-243l-47 -144h75v-106h-287v106h70l230 662h162
+l230 -662h70z" />
+ <glyph glyph-name="_329" unicode="&#xf15e;" horiz-adv-x="1664"
+d="M1191 104h177l-72 218l-12 47q-2 16 -2 20h-4l-3 -20q0 -1 -3.5 -18t-7.5 -29zM736 96q0 -12 -10 -24l-319 -319q-10 -9 -23 -9q-12 0 -23 9l-320 320q-15 16 -7 35q8 20 30 20h192v1376q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1376h192q14 0 23 -9t9 -23zM1661 -150
+v-106h-288v106h75l-47 144h-243l-47 -144h75v-106h-287v106h70l230 662h162l230 -662h70zM1572 1001v-233h-584v90l369 529q12 18 21 27l11 9v3q-2 0 -6.5 -0.5t-7.5 -0.5q-12 -3 -30 -3h-232v-115h-120v229h567v-89l-369 -530q-6 -8 -21 -26l-11 -10v-3l14 3q9 1 30 1h248
+v119h121z" />
+ <glyph glyph-name="sort_by_attributes" unicode="&#xf160;" horiz-adv-x="1792"
+d="M736 96q0 -12 -10 -24l-319 -319q-10 -9 -23 -9q-12 0 -23 9l-320 320q-15 16 -7 35q8 20 30 20h192v1376q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1376h192q14 0 23 -9t9 -23zM1792 -32v-192q0 -14 -9 -23t-23 -9h-832q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h832
+q14 0 23 -9t9 -23zM1600 480v-192q0 -14 -9 -23t-23 -9h-640q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h640q14 0 23 -9t9 -23zM1408 992v-192q0 -14 -9 -23t-23 -9h-448q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h448q14 0 23 -9t9 -23zM1216 1504v-192q0 -14 -9 -23t-23 -9h-256
+q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h256q14 0 23 -9t9 -23z" />
+ <glyph glyph-name="sort_by_attributes_alt" unicode="&#xf161;" horiz-adv-x="1792"
+d="M1216 -32v-192q0 -14 -9 -23t-23 -9h-256q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h256q14 0 23 -9t9 -23zM736 96q0 -12 -10 -24l-319 -319q-10 -9 -23 -9q-12 0 -23 9l-320 320q-15 16 -7 35q8 20 30 20h192v1376q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1376h192
+q14 0 23 -9t9 -23zM1408 480v-192q0 -14 -9 -23t-23 -9h-448q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h448q14 0 23 -9t9 -23zM1600 992v-192q0 -14 -9 -23t-23 -9h-640q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h640q14 0 23 -9t9 -23zM1792 1504v-192q0 -14 -9 -23t-23 -9h-832
+q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h832q14 0 23 -9t9 -23z" />
+ <glyph glyph-name="sort_by_order" unicode="&#xf162;"
+d="M1346 223q0 63 -44 116t-103 53q-52 0 -83 -37t-31 -94t36.5 -95t104.5 -38q50 0 85 27t35 68zM736 96q0 -12 -10 -24l-319 -319q-10 -9 -23 -9q-12 0 -23 9l-320 320q-15 16 -7 35q8 20 30 20h192v1376q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1376h192q14 0 23 -9t9 -23
+zM1486 165q0 -62 -13 -121.5t-41 -114t-68 -95.5t-98.5 -65.5t-127.5 -24.5q-62 0 -108 16q-24 8 -42 15l39 113q15 -7 31 -11q37 -13 75 -13q84 0 134.5 58.5t66.5 145.5h-2q-21 -23 -61.5 -37t-84.5 -14q-106 0 -173 71.5t-67 172.5q0 105 72 178t181 73q123 0 205 -94.5
+t82 -252.5zM1456 882v-114h-469v114h167v432q0 7 0.5 19t0.5 17v16h-2l-7 -12q-8 -13 -26 -31l-62 -58l-82 86l192 185h123v-654h165z" />
+ <glyph glyph-name="sort_by_order_alt" unicode="&#xf163;"
+d="M1346 1247q0 63 -44 116t-103 53q-52 0 -83 -37t-31 -94t36.5 -95t104.5 -38q50 0 85 27t35 68zM736 96q0 -12 -10 -24l-319 -319q-10 -9 -23 -9q-12 0 -23 9l-320 320q-15 16 -7 35q8 20 30 20h192v1376q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1376h192q14 0 23 -9
+t9 -23zM1456 -142v-114h-469v114h167v432q0 7 0.5 19t0.5 17v16h-2l-7 -12q-8 -13 -26 -31l-62 -58l-82 86l192 185h123v-654h165zM1486 1189q0 -62 -13 -121.5t-41 -114t-68 -95.5t-98.5 -65.5t-127.5 -24.5q-62 0 -108 16q-24 8 -42 15l39 113q15 -7 31 -11q37 -13 75 -13
+q84 0 134.5 58.5t66.5 145.5h-2q-21 -23 -61.5 -37t-84.5 -14q-106 0 -173 71.5t-67 172.5q0 105 72 178t181 73q123 0 205 -94.5t82 -252.5z" />
+ <glyph glyph-name="_334" unicode="&#xf164;" horiz-adv-x="1664"
+d="M256 192q0 26 -19 45t-45 19q-27 0 -45.5 -19t-18.5 -45q0 -27 18.5 -45.5t45.5 -18.5q26 0 45 18.5t19 45.5zM416 704v-640q0 -26 -19 -45t-45 -19h-288q-26 0 -45 19t-19 45v640q0 26 19 45t45 19h288q26 0 45 -19t19 -45zM1600 704q0 -86 -55 -149q15 -44 15 -76
+q3 -76 -43 -137q17 -56 0 -117q-15 -57 -54 -94q9 -112 -49 -181q-64 -76 -197 -78h-36h-76h-17q-66 0 -144 15.5t-121.5 29t-120.5 39.5q-123 43 -158 44q-26 1 -45 19.5t-19 44.5v641q0 25 18 43.5t43 20.5q24 2 76 59t101 121q68 87 101 120q18 18 31 48t17.5 48.5
+t13.5 60.5q7 39 12.5 61t19.5 52t34 50q19 19 45 19q46 0 82.5 -10.5t60 -26t40 -40.5t24 -45t12 -50t5 -45t0.5 -39q0 -38 -9.5 -76t-19 -60t-27.5 -56q-3 -6 -10 -18t-11 -22t-8 -24h277q78 0 135 -57t57 -135z" />
+ <glyph glyph-name="_335" unicode="&#xf165;" horiz-adv-x="1664"
+d="M256 960q0 -26 -19 -45t-45 -19q-27 0 -45.5 19t-18.5 45q0 27 18.5 45.5t45.5 18.5q26 0 45 -18.5t19 -45.5zM416 448v640q0 26 -19 45t-45 19h-288q-26 0 -45 -19t-19 -45v-640q0 -26 19 -45t45 -19h288q26 0 45 19t19 45zM1545 597q55 -61 55 -149q-1 -78 -57.5 -135
+t-134.5 -57h-277q4 -14 8 -24t11 -22t10 -18q18 -37 27 -57t19 -58.5t10 -76.5q0 -24 -0.5 -39t-5 -45t-12 -50t-24 -45t-40 -40.5t-60 -26t-82.5 -10.5q-26 0 -45 19q-20 20 -34 50t-19.5 52t-12.5 61q-9 42 -13.5 60.5t-17.5 48.5t-31 48q-33 33 -101 120q-49 64 -101 121
+t-76 59q-25 2 -43 20.5t-18 43.5v641q0 26 19 44.5t45 19.5q35 1 158 44q77 26 120.5 39.5t121.5 29t144 15.5h17h76h36q133 -2 197 -78q58 -69 49 -181q39 -37 54 -94q17 -61 0 -117q46 -61 43 -137q0 -32 -15 -76z" />
+ <glyph glyph-name="youtube_sign" unicode="&#xf166;"
+d="M919 233v157q0 50 -29 50q-17 0 -33 -16v-224q16 -16 33 -16q29 0 29 49zM1103 355h66v34q0 51 -33 51t-33 -51v-34zM532 621v-70h-80v-423h-74v423h-78v70h232zM733 495v-367h-67v40q-39 -45 -76 -45q-33 0 -42 28q-6 17 -6 54v290h66v-270q0 -24 1 -26q1 -15 15 -15
+q20 0 42 31v280h67zM985 384v-146q0 -52 -7 -73q-12 -42 -53 -42q-35 0 -68 41v-36h-67v493h67v-161q32 40 68 40q41 0 53 -42q7 -21 7 -74zM1236 255v-9q0 -29 -2 -43q-3 -22 -15 -40q-27 -40 -80 -40q-52 0 -81 38q-21 27 -21 86v129q0 59 20 86q29 38 80 38t78 -38
+q21 -29 21 -86v-76h-133v-65q0 -51 34 -51q24 0 30 26q0 1 0.5 7t0.5 16.5v21.5h68zM785 1079v-156q0 -51 -32 -51t-32 51v156q0 52 32 52t32 -52zM1318 366q0 177 -19 260q-10 44 -43 73.5t-76 34.5q-136 15 -412 15q-275 0 -411 -15q-44 -5 -76.5 -34.5t-42.5 -73.5
+q-20 -87 -20 -260q0 -176 20 -260q10 -43 42.5 -73t75.5 -35q137 -15 412 -15t412 15q43 5 75.5 35t42.5 73q20 84 20 260zM563 1017l90 296h-75l-51 -195l-53 195h-78q7 -23 23 -69l24 -69q35 -103 46 -158v-201h74v201zM852 936v130q0 58 -21 87q-29 38 -78 38
+q-51 0 -78 -38q-21 -29 -21 -87v-130q0 -58 21 -87q27 -38 78 -38q49 0 78 38q21 27 21 87zM1033 816h67v370h-67v-283q-22 -31 -42 -31q-15 0 -16 16q-1 2 -1 26v272h-67v-293q0 -37 6 -55q11 -27 43 -27q36 0 77 45v-40zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5
+h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="youtube" unicode="&#xf167;"
+d="M971 292v-211q0 -67 -39 -67q-23 0 -45 22v301q22 22 45 22q39 0 39 -67zM1309 291v-46h-90v46q0 68 45 68t45 -68zM343 509h107v94h-312v-94h105v-569h100v569zM631 -60h89v494h-89v-378q-30 -42 -57 -42q-18 0 -21 21q-1 3 -1 35v364h-89v-391q0 -49 8 -73
+q12 -37 58 -37q48 0 102 61v-54zM1060 88v197q0 73 -9 99q-17 56 -71 56q-50 0 -93 -54v217h-89v-663h89v48q45 -55 93 -55q54 0 71 55q9 27 9 100zM1398 98v13h-91q0 -51 -2 -61q-7 -36 -40 -36q-46 0 -46 69v87h179v103q0 79 -27 116q-39 51 -106 51q-68 0 -107 -51
+q-28 -37 -28 -116v-173q0 -79 29 -116q39 -51 108 -51q72 0 108 53q18 27 21 54q2 9 2 58zM790 1011v210q0 69 -43 69t-43 -69v-210q0 -70 43 -70t43 70zM1509 260q0 -234 -26 -350q-14 -59 -58 -99t-102 -46q-184 -21 -555 -21t-555 21q-58 6 -102.5 46t-57.5 99
+q-26 112 -26 350q0 234 26 350q14 59 58 99t103 47q183 20 554 20t555 -20q58 -7 102.5 -47t57.5 -99q26 -112 26 -350zM511 1536h102l-121 -399v-271h-100v271q-14 74 -61 212q-37 103 -65 187h106l71 -263zM881 1203v-175q0 -81 -28 -118q-38 -51 -106 -51q-67 0 -105 51
+q-28 38 -28 118v175q0 80 28 117q38 51 105 51q68 0 106 -51q28 -37 28 -117zM1216 1365v-499h-91v55q-53 -62 -103 -62q-46 0 -59 37q-8 24 -8 75v394h91v-367q0 -33 1 -35q3 -22 21 -22q27 0 57 43v381h91z" />
+ <glyph glyph-name="xing" unicode="&#xf168;" horiz-adv-x="1408"
+d="M597 869q-10 -18 -257 -456q-27 -46 -65 -46h-239q-21 0 -31 17t0 36l253 448q1 0 0 1l-161 279q-12 22 -1 37q9 15 32 15h239q40 0 66 -45zM1403 1511q11 -16 0 -37l-528 -934v-1l336 -615q11 -20 1 -37q-10 -15 -32 -15h-239q-42 0 -66 45l-339 622q18 32 531 942
+q25 45 64 45h241q22 0 31 -15z" />
+ <glyph glyph-name="xing_sign" unicode="&#xf169;"
+d="M685 771q0 1 -126 222q-21 34 -52 34h-184q-18 0 -26 -11q-7 -12 1 -29l125 -216v-1l-196 -346q-9 -14 0 -28q8 -13 24 -13h185q31 0 50 36zM1309 1268q-7 12 -24 12h-187q-30 0 -49 -35l-411 -729q1 -2 262 -481q20 -35 52 -35h184q18 0 25 12q8 13 -1 28l-260 476v1
+l409 723q8 16 0 28zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="youtube_play" unicode="&#xf16a;" horiz-adv-x="1792"
+d="M711 408l484 250l-484 253v-503zM896 1270q168 0 324.5 -4.5t229.5 -9.5l73 -4q1 0 17 -1.5t23 -3t23.5 -4.5t28.5 -8t28 -13t31 -19.5t29 -26.5q6 -6 15.5 -18.5t29 -58.5t26.5 -101q8 -64 12.5 -136.5t5.5 -113.5v-40v-136q1 -145 -18 -290q-7 -55 -25 -99.5t-32 -61.5
+l-14 -17q-14 -15 -29 -26.5t-31 -19t-28 -12.5t-28.5 -8t-24 -4.5t-23 -3t-16.5 -1.5q-251 -19 -627 -19q-207 2 -359.5 6.5t-200.5 7.5l-49 4l-36 4q-36 5 -54.5 10t-51 21t-56.5 41q-6 6 -15.5 18.5t-29 58.5t-26.5 101q-8 64 -12.5 136.5t-5.5 113.5v40v136
+q-1 145 18 290q7 55 25 99.5t32 61.5l14 17q14 15 29 26.5t31 19.5t28 13t28.5 8t23.5 4.5t23 3t17 1.5q251 18 627 18z" />
+ <glyph glyph-name="dropbox" unicode="&#xf16b;" horiz-adv-x="1792"
+d="M402 829l494 -305l-342 -285l-490 319zM1388 274v-108l-490 -293v-1l-1 1l-1 -1v1l-489 293v108l147 -96l342 284v2l1 -1l1 1v-2l343 -284zM554 1418l342 -285l-494 -304l-338 270zM1390 829l338 -271l-489 -319l-343 285zM1239 1418l489 -319l-338 -270l-494 304z" />
+ <glyph glyph-name="stackexchange" unicode="&#xf16c;"
+d="M1289 -96h-1118v480h-160v-640h1438v640h-160v-480zM347 428l33 157l783 -165l-33 -156zM450 802l67 146l725 -339l-67 -145zM651 1158l102 123l614 -513l-102 -123zM1048 1536l477 -641l-128 -96l-477 641zM330 65v159h800v-159h-800z" />
+ <glyph glyph-name="instagram" unicode="&#xf16d;"
+d="M1024 640q0 106 -75 181t-181 75t-181 -75t-75 -181t75 -181t181 -75t181 75t75 181zM1162 640q0 -164 -115 -279t-279 -115t-279 115t-115 279t115 279t279 115t279 -115t115 -279zM1270 1050q0 -38 -27 -65t-65 -27t-65 27t-27 65t27 65t65 27t65 -27t27 -65zM768 1270
+q-7 0 -76.5 0.5t-105.5 0t-96.5 -3t-103 -10t-71.5 -18.5q-50 -20 -88 -58t-58 -88q-11 -29 -18.5 -71.5t-10 -103t-3 -96.5t0 -105.5t0.5 -76.5t-0.5 -76.5t0 -105.5t3 -96.5t10 -103t18.5 -71.5q20 -50 58 -88t88 -58q29 -11 71.5 -18.5t103 -10t96.5 -3t105.5 0t76.5 0.5
+t76.5 -0.5t105.5 0t96.5 3t103 10t71.5 18.5q50 20 88 58t58 88q11 29 18.5 71.5t10 103t3 96.5t0 105.5t-0.5 76.5t0.5 76.5t0 105.5t-3 96.5t-10 103t-18.5 71.5q-20 50 -58 88t-88 58q-29 11 -71.5 18.5t-103 10t-96.5 3t-105.5 0t-76.5 -0.5zM1536 640q0 -229 -5 -317
+q-10 -208 -124 -322t-322 -124q-88 -5 -317 -5t-317 5q-208 10 -322 124t-124 322q-5 88 -5 317t5 317q10 208 124 322t322 124q88 5 317 5t317 -5q208 -10 322 -124t124 -322q5 -88 5 -317z" />
+ <glyph glyph-name="flickr" unicode="&#xf16e;"
+d="M1248 1408q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960zM698 640q0 88 -62 150t-150 62t-150 -62t-62 -150t62 -150t150 -62t150 62t62 150zM1262 640q0 88 -62 150
+t-150 62t-150 -62t-62 -150t62 -150t150 -62t150 62t62 150z" />
+ <glyph glyph-name="adn" unicode="&#xf170;"
+d="M768 914l201 -306h-402zM1133 384h94l-459 691l-459 -691h94l104 160h522zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="f171" unicode="&#xf171;" horiz-adv-x="1408"
+d="M815 677q8 -63 -50.5 -101t-111.5 -6q-39 17 -53.5 58t-0.5 82t52 58q36 18 72.5 12t64 -35.5t27.5 -67.5zM926 698q-14 107 -113 164t-197 13q-63 -28 -100.5 -88.5t-34.5 -129.5q4 -91 77.5 -155t165.5 -56q91 8 152 84t50 168zM1165 1240q-20 27 -56 44.5t-58 22
+t-71 12.5q-291 47 -566 -2q-43 -7 -66 -12t-55 -22t-50 -43q30 -28 76 -45.5t73.5 -22t87.5 -11.5q228 -29 448 -1q63 8 89.5 12t72.5 21.5t75 46.5zM1222 205q-8 -26 -15.5 -76.5t-14 -84t-28.5 -70t-58 -56.5q-86 -48 -189.5 -71.5t-202 -22t-201.5 18.5q-46 8 -81.5 18
+t-76.5 27t-73 43.5t-52 61.5q-25 96 -57 292l6 16l18 9q223 -148 506.5 -148t507.5 148q21 -6 24 -23t-5 -45t-8 -37zM1403 1166q-26 -167 -111 -655q-5 -30 -27 -56t-43.5 -40t-54.5 -31q-252 -126 -610 -88q-248 27 -394 139q-15 12 -25.5 26.5t-17 35t-9 34t-6 39.5
+t-5.5 35q-9 50 -26.5 150t-28 161.5t-23.5 147.5t-22 158q3 26 17.5 48.5t31.5 37.5t45 30t46 22.5t48 18.5q125 46 313 64q379 37 676 -50q155 -46 215 -122q16 -20 16.5 -51t-5.5 -54z" />
+ <glyph glyph-name="bitbucket_sign" unicode="&#xf172;"
+d="M848 666q0 43 -41 66t-77 1q-43 -20 -42.5 -72.5t43.5 -70.5q39 -23 81 4t36 72zM928 682q8 -66 -36 -121t-110 -61t-119 40t-56 113q-2 49 25.5 93t72.5 64q70 31 141.5 -10t81.5 -118zM1100 1073q-20 -21 -53.5 -34t-53 -16t-63.5 -8q-155 -20 -324 0q-44 6 -63 9.5
+t-52.5 16t-54.5 32.5q13 19 36 31t40 15.5t47 8.5q198 35 408 1q33 -5 51 -8.5t43 -16t39 -31.5zM1142 327q0 7 5.5 26.5t3 32t-17.5 16.5q-161 -106 -365 -106t-366 106l-12 -6l-5 -12q26 -154 41 -210q47 -81 204 -108q249 -46 428 53q34 19 49 51.5t22.5 85.5t12.5 71z
+M1272 1020q9 53 -8 75q-43 55 -155 88q-216 63 -487 36q-132 -12 -226 -46q-38 -15 -59.5 -25t-47 -34t-29.5 -54q8 -68 19 -138t29 -171t24 -137q1 -5 5 -31t7 -36t12 -27t22 -28q105 -80 284 -100q259 -28 440 63q24 13 39.5 23t31 29t19.5 40q48 267 80 473zM1536 1120
+v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="tumblr" unicode="&#xf173;" horiz-adv-x="1024"
+d="M944 207l80 -237q-23 -35 -111 -66t-177 -32q-104 -2 -190.5 26t-142.5 74t-95 106t-55.5 120t-16.5 118v544h-168v215q72 26 129 69.5t91 90t58 102t34 99t15 88.5q1 5 4.5 8.5t7.5 3.5h244v-424h333v-252h-334v-518q0 -30 6.5 -56t22.5 -52.5t49.5 -41.5t81.5 -14
+q78 2 134 29z" />
+ <glyph glyph-name="tumblr_sign" unicode="&#xf174;"
+d="M1136 75l-62 183q-44 -22 -103 -22q-36 -1 -62 10.5t-38.5 31.5t-17.5 40.5t-5 43.5v398h257v194h-256v326h-188q-8 0 -9 -10q-5 -44 -17.5 -87t-39 -95t-77 -95t-118.5 -68v-165h130v-418q0 -57 21.5 -115t65 -111t121 -85.5t176.5 -30.5q69 1 136.5 25t85.5 50z
+M1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="long_arrow_down" unicode="&#xf175;" horiz-adv-x="768"
+d="M765 237q8 -19 -5 -35l-350 -384q-10 -10 -23 -10q-14 0 -24 10l-355 384q-13 16 -5 35q9 19 29 19h224v1248q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1248h224q21 0 29 -19z" />
+ <glyph glyph-name="long_arrow_up" unicode="&#xf176;" horiz-adv-x="768"
+d="M765 1043q-9 -19 -29 -19h-224v-1248q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v1248h-224q-21 0 -29 19t5 35l350 384q10 10 23 10q14 0 24 -10l355 -384q13 -16 5 -35z" />
+ <glyph glyph-name="long_arrow_left" unicode="&#xf177;" horiz-adv-x="1792"
+d="M1792 736v-192q0 -14 -9 -23t-23 -9h-1248v-224q0 -21 -19 -29t-35 5l-384 350q-10 10 -10 23q0 14 10 24l384 354q16 14 35 6q19 -9 19 -29v-224h1248q14 0 23 -9t9 -23z" />
+ <glyph glyph-name="long_arrow_right" unicode="&#xf178;" horiz-adv-x="1792"
+d="M1728 643q0 -14 -10 -24l-384 -354q-16 -14 -35 -6q-19 9 -19 29v224h-1248q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h1248v224q0 21 19 29t35 -5l384 -350q10 -10 10 -23z" />
+ <glyph glyph-name="apple" unicode="&#xf179;" horiz-adv-x="1408"
+d="M1393 321q-39 -125 -123 -250q-129 -196 -257 -196q-49 0 -140 32q-86 32 -151 32q-61 0 -142 -33q-81 -34 -132 -34q-152 0 -301 259q-147 261 -147 503q0 228 113 374q113 144 284 144q72 0 177 -30q104 -30 138 -30q45 0 143 34q102 34 173 34q119 0 213 -65
+q52 -36 104 -100q-79 -67 -114 -118q-65 -94 -65 -207q0 -124 69 -223t158 -126zM1017 1494q0 -61 -29 -136q-30 -75 -93 -138q-54 -54 -108 -72q-37 -11 -104 -17q3 149 78 257q74 107 250 148q1 -3 2.5 -11t2.5 -11q0 -4 0.5 -10t0.5 -10z" />
+ <glyph glyph-name="windows" unicode="&#xf17a;" horiz-adv-x="1664"
+d="M682 530v-651l-682 94v557h682zM682 1273v-659h-682v565zM1664 530v-786l-907 125v661h907zM1664 1408v-794h-907v669z" />
+ <glyph glyph-name="android" unicode="&#xf17b;" horiz-adv-x="1408"
+d="M493 1053q16 0 27.5 11.5t11.5 27.5t-11.5 27.5t-27.5 11.5t-27 -11.5t-11 -27.5t11 -27.5t27 -11.5zM915 1053q16 0 27 11.5t11 27.5t-11 27.5t-27 11.5t-27.5 -11.5t-11.5 -27.5t11.5 -27.5t27.5 -11.5zM103 869q42 0 72 -30t30 -72v-430q0 -43 -29.5 -73t-72.5 -30
+t-73 30t-30 73v430q0 42 30 72t73 30zM1163 850v-666q0 -46 -32 -78t-77 -32h-75v-227q0 -43 -30 -73t-73 -30t-73 30t-30 73v227h-138v-227q0 -43 -30 -73t-73 -30q-42 0 -72 30t-30 73l-1 227h-74q-46 0 -78 32t-32 78v666h918zM931 1255q107 -55 171 -153.5t64 -215.5
+h-925q0 117 64 215.5t172 153.5l-71 131q-7 13 5 20q13 6 20 -6l72 -132q95 42 201 42t201 -42l72 132q7 12 20 6q12 -7 5 -20zM1408 767v-430q0 -43 -30 -73t-73 -30q-42 0 -72 30t-30 73v430q0 43 30 72.5t72 29.5q43 0 73 -29.5t30 -72.5z" />
+ <glyph glyph-name="linux" unicode="&#xf17c;"
+d="M663 1125q-11 -1 -15.5 -10.5t-8.5 -9.5q-5 -1 -5 5q0 12 19 15h10zM750 1111q-4 -1 -11.5 6.5t-17.5 4.5q24 11 32 -2q3 -6 -3 -9zM399 684q-4 1 -6 -3t-4.5 -12.5t-5.5 -13.5t-10 -13q-10 -11 -1 -12q4 -1 12.5 7t12.5 18q1 3 2 7t2 6t1.5 4.5t0.5 4v3t-1 2.5t-3 2z
+M1254 325q0 18 -55 42q4 15 7.5 27.5t5 26t3 21.5t0.5 22.5t-1 19.5t-3.5 22t-4 20.5t-5 25t-5.5 26.5q-10 48 -47 103t-72 75q24 -20 57 -83q87 -162 54 -278q-11 -40 -50 -42q-31 -4 -38.5 18.5t-8 83.5t-11.5 107q-9 39 -19.5 69t-19.5 45.5t-15.5 24.5t-13 15t-7.5 7
+q-14 62 -31 103t-29.5 56t-23.5 33t-15 40q-4 21 6 53.5t4.5 49.5t-44.5 25q-15 3 -44.5 18t-35.5 16q-8 1 -11 26t8 51t36 27q37 3 51 -30t4 -58q-11 -19 -2 -26.5t30 -0.5q13 4 13 36v37q-5 30 -13.5 50t-21 30.5t-23.5 15t-27 7.5q-107 -8 -89 -134q0 -15 -1 -15
+q-9 9 -29.5 10.5t-33 -0.5t-15.5 5q1 57 -16 90t-45 34q-27 1 -41.5 -27.5t-16.5 -59.5q-1 -15 3.5 -37t13 -37.5t15.5 -13.5q10 3 16 14q4 9 -7 8q-7 0 -15.5 14.5t-9.5 33.5q-1 22 9 37t34 14q17 0 27 -21t9.5 -39t-1.5 -22q-22 -15 -31 -29q-8 -12 -27.5 -23.5
+t-20.5 -12.5q-13 -14 -15.5 -27t7.5 -18q14 -8 25 -19.5t16 -19t18.5 -13t35.5 -6.5q47 -2 102 15q2 1 23 7t34.5 10.5t29.5 13t21 17.5q9 14 20 8q5 -3 6.5 -8.5t-3 -12t-16.5 -9.5q-20 -6 -56.5 -21.5t-45.5 -19.5q-44 -19 -70 -23q-25 -5 -79 2q-10 2 -9 -2t17 -19
+q25 -23 67 -22q17 1 36 7t36 14t33.5 17.5t30 17t24.5 12t17.5 2.5t8.5 -11q0 -2 -1 -4.5t-4 -5t-6 -4.5t-8.5 -5t-9 -4.5t-10 -5t-9.5 -4.5q-28 -14 -67.5 -44t-66.5 -43t-49 -1q-21 11 -63 73q-22 31 -25 22q-1 -3 -1 -10q0 -25 -15 -56.5t-29.5 -55.5t-21 -58t11.5 -63
+q-23 -6 -62.5 -90t-47.5 -141q-2 -18 -1.5 -69t-5.5 -59q-8 -24 -29 -3q-32 31 -36 94q-2 28 4 56q4 19 -1 18q-2 -1 -4 -5q-36 -65 10 -166q5 -12 25 -28t24 -20q20 -23 104 -90.5t93 -76.5q16 -15 17.5 -38t-14 -43t-45.5 -23q8 -15 29 -44.5t28 -54t7 -70.5q46 24 7 92
+q-4 8 -10.5 16t-9.5 12t-2 6q3 5 13 9.5t20 -2.5q46 -52 166 -36q133 15 177 87q23 38 34 30q12 -6 10 -52q-1 -25 -23 -92q-9 -23 -6 -37.5t24 -15.5q3 19 14.5 77t13.5 90q2 21 -6.5 73.5t-7.5 97t23 70.5q15 18 51 18q1 37 34.5 53t72.5 10.5t60 -22.5zM626 1152
+q3 17 -2.5 30t-11.5 15q-9 2 -9 -7q2 -5 5 -6q10 0 7 -15q-3 -20 8 -20q3 0 3 3zM1045 955q-2 8 -6.5 11.5t-13 5t-14.5 5.5q-5 3 -9.5 8t-7 8t-5.5 6.5t-4 4t-4 -1.5q-14 -16 7 -43.5t39 -31.5q9 -1 14.5 8t3.5 20zM867 1168q0 11 -5 19.5t-11 12.5t-9 3q-6 0 -8 -2t0 -4
+t5 -3q14 -4 18 -31q0 -3 8 2q2 2 2 3zM921 1401q0 2 -2.5 5t-9 7t-9.5 6q-15 15 -24 15q-9 -1 -11.5 -7.5t-1 -13t-0.5 -12.5q-1 -4 -6 -10.5t-6 -9t3 -8.5q4 -3 8 0t11 9t15 9q1 1 9 1t15 2t9 7zM1486 60q20 -12 31 -24.5t12 -24t-2.5 -22.5t-15.5 -22t-23.5 -19.5
+t-30 -18.5t-31.5 -16.5t-32 -15.5t-27 -13q-38 -19 -85.5 -56t-75.5 -64q-17 -16 -68 -19.5t-89 14.5q-18 9 -29.5 23.5t-16.5 25.5t-22 19.5t-47 9.5q-44 1 -130 1q-19 0 -57 -1.5t-58 -2.5q-44 -1 -79.5 -15t-53.5 -30t-43.5 -28.5t-53.5 -11.5q-29 1 -111 31t-146 43
+q-19 4 -51 9.5t-50 9t-39.5 9.5t-33.5 14.5t-17 19.5q-10 23 7 66.5t18 54.5q1 16 -4 40t-10 42.5t-4.5 36.5t10.5 27q14 12 57 14t60 12q30 18 42 35t12 51q21 -73 -32 -106q-32 -20 -83 -15q-34 3 -43 -10q-13 -15 5 -57q2 -6 8 -18t8.5 -18t4.5 -17t1 -22q0 -15 -17 -49
+t-14 -48q3 -17 37 -26q20 -6 84.5 -18.5t99.5 -20.5q24 -6 74 -22t82.5 -23t55.5 -4q43 6 64.5 28t23 48t-7.5 58.5t-19 52t-20 36.5q-121 190 -169 242q-68 74 -113 40q-11 -9 -15 15q-3 16 -2 38q1 29 10 52t24 47t22 42q8 21 26.5 72t29.5 78t30 61t39 54
+q110 143 124 195q-12 112 -16 310q-2 90 24 151.5t106 104.5q39 21 104 21q53 1 106 -13.5t89 -41.5q57 -42 91.5 -121.5t29.5 -147.5q-5 -95 30 -214q34 -113 133 -218q55 -59 99.5 -163t59.5 -191q8 -49 5 -84.5t-12 -55.5t-20 -22q-10 -2 -23.5 -19t-27 -35.5
+t-40.5 -33.5t-61 -14q-18 1 -31.5 5t-22.5 13.5t-13.5 15.5t-11.5 20.5t-9 19.5q-22 37 -41 30t-28 -49t7 -97q20 -70 1 -195q-10 -65 18 -100.5t73 -33t85 35.5q59 49 89.5 66.5t103.5 42.5q53 18 77 36.5t18.5 34.5t-25 28.5t-51.5 23.5q-33 11 -49.5 48t-15 72.5
+t15.5 47.5q1 -31 8 -56.5t14.5 -40.5t20.5 -28.5t21 -19t21.5 -13t16.5 -9.5z" />
+ <glyph glyph-name="dribble" unicode="&#xf17d;"
+d="M1024 36q-42 241 -140 498h-2l-2 -1q-16 -6 -43 -16.5t-101 -49t-137 -82t-131 -114.5t-103 -148l-15 11q184 -150 418 -150q132 0 256 52zM839 643q-21 49 -53 111q-311 -93 -673 -93q-1 -7 -1 -21q0 -124 44 -236.5t124 -201.5q50 89 123.5 166.5t142.5 124.5t130.5 81
+t99.5 48l37 13q4 1 13 3.5t13 4.5zM732 855q-120 213 -244 378q-138 -65 -234 -186t-128 -272q302 0 606 80zM1416 536q-210 60 -409 29q87 -239 128 -469q111 75 185 189.5t96 250.5zM611 1277q-1 0 -2 -1q1 1 2 1zM1201 1132q-185 164 -433 164q-76 0 -155 -19
+q131 -170 246 -382q69 26 130 60.5t96.5 61.5t65.5 57t37.5 40.5zM1424 647q-3 232 -149 410l-1 -1q-9 -12 -19 -24.5t-43.5 -44.5t-71 -60.5t-100 -65t-131.5 -64.5q25 -53 44 -95q2 -5 6.5 -17t7.5 -17q36 5 74.5 7t73.5 2t69 -1.5t64 -4t56.5 -5.5t48 -6.5t36.5 -6
+t25 -4.5zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="skype" unicode="&#xf17e;"
+d="M1173 473q0 50 -19.5 91.5t-48.5 68.5t-73 49t-82.5 34t-87.5 23l-104 24q-30 7 -44 10.5t-35 11.5t-30 16t-16.5 21t-7.5 30q0 77 144 77q43 0 77 -12t54 -28.5t38 -33.5t40 -29t48 -12q47 0 75.5 32t28.5 77q0 55 -56 99.5t-142 67.5t-182 23q-68 0 -132 -15.5
+t-119.5 -47t-89 -87t-33.5 -128.5q0 -61 19 -106.5t56 -75.5t80 -48.5t103 -32.5l146 -36q90 -22 112 -36q32 -20 32 -60q0 -39 -40 -64.5t-105 -25.5q-51 0 -91.5 16t-65 38.5t-45.5 45t-46 38.5t-54 16q-50 0 -75.5 -30t-25.5 -75q0 -92 122 -157.5t291 -65.5
+q73 0 140 18.5t122.5 53.5t88.5 93.5t33 131.5zM1536 256q0 -159 -112.5 -271.5t-271.5 -112.5q-130 0 -234 80q-77 -16 -150 -16q-143 0 -273.5 55.5t-225 150t-150 225t-55.5 273.5q0 73 16 150q-80 104 -80 234q0 159 112.5 271.5t271.5 112.5q130 0 234 -80
+q77 16 150 16q143 0 273.5 -55.5t225 -150t150 -225t55.5 -273.5q0 -73 -16 -150q80 -104 80 -234z" />
+ <glyph glyph-name="foursquare" unicode="&#xf180;" horiz-adv-x="1280"
+d="M1000 1102l37 194q5 23 -9 40t-35 17h-712q-23 0 -38.5 -17t-15.5 -37v-1101q0 -7 6 -1l291 352q23 26 38 33.5t48 7.5h239q22 0 37 14.5t18 29.5q24 130 37 191q4 21 -11.5 40t-36.5 19h-294q-29 0 -48 19t-19 48v42q0 29 19 47.5t48 18.5h346q18 0 35 13.5t20 29.5z
+M1227 1324q-15 -73 -53.5 -266.5t-69.5 -350t-35 -173.5q-6 -22 -9 -32.5t-14 -32.5t-24.5 -33t-38.5 -21t-58 -10h-271q-13 0 -22 -10q-8 -9 -426 -494q-22 -25 -58.5 -28.5t-48.5 5.5q-55 22 -55 98v1410q0 55 38 102.5t120 47.5h888q95 0 127 -53t10 -159zM1227 1324
+l-158 -790q4 17 35 173.5t69.5 350t53.5 266.5z" />
+ <glyph glyph-name="trello" unicode="&#xf181;"
+d="M704 192v1024q0 14 -9 23t-23 9h-480q-14 0 -23 -9t-9 -23v-1024q0 -14 9 -23t23 -9h480q14 0 23 9t9 23zM1376 576v640q0 14 -9 23t-23 9h-480q-14 0 -23 -9t-9 -23v-640q0 -14 9 -23t23 -9h480q14 0 23 9t9 23zM1536 1344v-1408q0 -26 -19 -45t-45 -19h-1408
+q-26 0 -45 19t-19 45v1408q0 26 19 45t45 19h1408q26 0 45 -19t19 -45z" />
+ <glyph glyph-name="female" unicode="&#xf182;" horiz-adv-x="1280"
+d="M1280 480q0 -40 -28 -68t-68 -28q-51 0 -80 43l-227 341h-45v-132l247 -411q9 -15 9 -33q0 -26 -19 -45t-45 -19h-192v-272q0 -46 -33 -79t-79 -33h-160q-46 0 -79 33t-33 79v272h-192q-26 0 -45 19t-19 45q0 18 9 33l247 411v132h-45l-227 -341q-29 -43 -80 -43
+q-40 0 -68 28t-28 68q0 29 16 53l256 384q73 107 176 107h384q103 0 176 -107l256 -384q16 -24 16 -53zM864 1280q0 -93 -65.5 -158.5t-158.5 -65.5t-158.5 65.5t-65.5 158.5t65.5 158.5t158.5 65.5t158.5 -65.5t65.5 -158.5z" />
+ <glyph glyph-name="male" unicode="&#xf183;" horiz-adv-x="1024"
+d="M1024 832v-416q0 -40 -28 -68t-68 -28t-68 28t-28 68v352h-64v-912q0 -46 -33 -79t-79 -33t-79 33t-33 79v464h-64v-464q0 -46 -33 -79t-79 -33t-79 33t-33 79v912h-64v-352q0 -40 -28 -68t-68 -28t-68 28t-28 68v416q0 80 56 136t136 56h640q80 0 136 -56t56 -136z
+M736 1280q0 -93 -65.5 -158.5t-158.5 -65.5t-158.5 65.5t-65.5 158.5t65.5 158.5t158.5 65.5t158.5 -65.5t65.5 -158.5z" />
+ <glyph glyph-name="gittip" unicode="&#xf184;"
+d="M773 234l350 473q16 22 24.5 59t-6 85t-61.5 79q-40 26 -83 25.5t-73.5 -17.5t-54.5 -45q-36 -40 -96 -40q-59 0 -95 40q-24 28 -54.5 45t-73.5 17.5t-84 -25.5q-46 -31 -60.5 -79t-6 -85t24.5 -59zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103
+t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="sun" unicode="&#xf185;" horiz-adv-x="1792"
+d="M1472 640q0 117 -45.5 223.5t-123 184t-184 123t-223.5 45.5t-223.5 -45.5t-184 -123t-123 -184t-45.5 -223.5t45.5 -223.5t123 -184t184 -123t223.5 -45.5t223.5 45.5t184 123t123 184t45.5 223.5zM1748 363q-4 -15 -20 -20l-292 -96v-306q0 -16 -13 -26q-15 -10 -29 -4
+l-292 94l-180 -248q-10 -13 -26 -13t-26 13l-180 248l-292 -94q-14 -6 -29 4q-13 10 -13 26v306l-292 96q-16 5 -20 20q-5 17 4 29l180 248l-180 248q-9 13 -4 29q4 15 20 20l292 96v306q0 16 13 26q15 10 29 4l292 -94l180 248q9 12 26 12t26 -12l180 -248l292 94
+q14 6 29 -4q13 -10 13 -26v-306l292 -96q16 -5 20 -20q5 -16 -4 -29l-180 -248l180 -248q9 -12 4 -29z" />
+ <glyph glyph-name="_366" unicode="&#xf186;"
+d="M1262 233q-54 -9 -110 -9q-182 0 -337 90t-245 245t-90 337q0 192 104 357q-201 -60 -328.5 -229t-127.5 -384q0 -130 51 -248.5t136.5 -204t204 -136.5t248.5 -51q144 0 273.5 61.5t220.5 171.5zM1465 318q-94 -203 -283.5 -324.5t-413.5 -121.5q-156 0 -298 61
+t-245 164t-164 245t-61 298q0 153 57.5 292.5t156 241.5t235.5 164.5t290 68.5q44 2 61 -39q18 -41 -15 -72q-86 -78 -131.5 -181.5t-45.5 -218.5q0 -148 73 -273t198 -198t273 -73q118 0 228 51q41 18 72 -13q14 -14 17.5 -34t-4.5 -38z" />
+ <glyph glyph-name="archive" unicode="&#xf187;" horiz-adv-x="1792"
+d="M1088 704q0 26 -19 45t-45 19h-256q-26 0 -45 -19t-19 -45t19 -45t45 -19h256q26 0 45 19t19 45zM1664 896v-960q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45v960q0 26 19 45t45 19h1408q26 0 45 -19t19 -45zM1728 1344v-256q0 -26 -19 -45t-45 -19h-1536
+q-26 0 -45 19t-19 45v256q0 26 19 45t45 19h1536q26 0 45 -19t19 -45z" />
+ <glyph glyph-name="bug" unicode="&#xf188;" horiz-adv-x="1664"
+d="M1632 576q0 -26 -19 -45t-45 -19h-224q0 -171 -67 -290l208 -209q19 -19 19 -45t-19 -45q-18 -19 -45 -19t-45 19l-198 197q-5 -5 -15 -13t-42 -28.5t-65 -36.5t-82 -29t-97 -13v896h-128v-896q-51 0 -101.5 13.5t-87 33t-66 39t-43.5 32.5l-15 14l-183 -207
+q-20 -21 -48 -21q-24 0 -43 16q-19 18 -20.5 44.5t15.5 46.5l202 227q-58 114 -58 274h-224q-26 0 -45 19t-19 45t19 45t45 19h224v294l-173 173q-19 19 -19 45t19 45t45 19t45 -19l173 -173h844l173 173q19 19 45 19t45 -19t19 -45t-19 -45l-173 -173v-294h224q26 0 45 -19
+t19 -45zM1152 1152h-640q0 133 93.5 226.5t226.5 93.5t226.5 -93.5t93.5 -226.5z" />
+ <glyph glyph-name="vk" unicode="&#xf189;" horiz-adv-x="1920"
+d="M1917 1016q23 -64 -150 -294q-24 -32 -65 -85q-40 -51 -55 -72t-30.5 -49.5t-12 -42t13 -34.5t32.5 -43t57 -53q4 -2 5 -4q141 -131 191 -221q3 -5 6.5 -12.5t7 -26.5t-0.5 -34t-25 -27.5t-59 -12.5l-256 -4q-24 -5 -56 5t-52 22l-20 12q-30 21 -70 64t-68.5 77.5t-61 58
+t-56.5 15.5q-3 -1 -8 -3.5t-17 -14.5t-21.5 -29.5t-17 -52t-6.5 -77.5q0 -15 -3.5 -27.5t-7.5 -18.5l-4 -5q-18 -19 -53 -22h-115q-71 -4 -146 16.5t-131.5 53t-103 66t-70.5 57.5l-25 24q-10 10 -27.5 30t-71.5 91t-106 151t-122.5 211t-130.5 272q-6 16 -6 27t3 16l4 6
+q15 19 57 19l274 2q12 -2 23 -6.5t16 -8.5l5 -3q16 -11 24 -32q20 -50 46 -103.5t41 -81.5l16 -29q29 -60 56 -104t48.5 -68.5t41.5 -38.5t34 -14t27 5q2 1 5 5t12 22t13.5 47t9.5 81t0 125q-2 40 -9 73t-14 46l-6 12q-25 34 -85 43q-13 2 5 24q16 19 38 30q53 26 239 24
+q82 -1 135 -13q20 -5 33.5 -13.5t20.5 -24t10.5 -32t3.5 -45.5t-1 -55t-2.5 -70.5t-1.5 -82.5q0 -11 -1 -42t-0.5 -48t3.5 -40.5t11.5 -39t22.5 -24.5q8 -2 17 -4t26 11t38 34.5t52 67t68 107.5q60 104 107 225q4 10 10 17.5t11 10.5l4 3l5 2.5t13 3t20 0.5l288 2
+q39 5 64 -2.5t31 -16.5z" />
+ <glyph glyph-name="weibo" unicode="&#xf18a;" horiz-adv-x="1792"
+d="M675 252q21 34 11 69t-45 50q-34 14 -73 1t-60 -46q-22 -34 -13 -68.5t43 -50.5t74.5 -2.5t62.5 47.5zM769 373q8 13 3.5 26.5t-17.5 18.5q-14 5 -28.5 -0.5t-21.5 -18.5q-17 -31 13 -45q14 -5 29 0.5t22 18.5zM943 266q-45 -102 -158 -150t-224 -12
+q-107 34 -147.5 126.5t6.5 187.5q47 93 151.5 139t210.5 19q111 -29 158.5 -119.5t2.5 -190.5zM1255 426q-9 96 -89 170t-208.5 109t-274.5 21q-223 -23 -369.5 -141.5t-132.5 -264.5q9 -96 89 -170t208.5 -109t274.5 -21q223 23 369.5 141.5t132.5 264.5zM1563 422
+q0 -68 -37 -139.5t-109 -137t-168.5 -117.5t-226 -83t-270.5 -31t-275 33.5t-240.5 93t-171.5 151t-65 199.5q0 115 69.5 245t197.5 258q169 169 341.5 236t246.5 -7q65 -64 20 -209q-4 -14 -1 -20t10 -7t14.5 0.5t13.5 3.5l6 2q139 59 246 59t153 -61q45 -63 0 -178
+q-2 -13 -4.5 -20t4.5 -12.5t12 -7.5t17 -6q57 -18 103 -47t80 -81.5t34 -116.5zM1489 1046q42 -47 54.5 -108.5t-6.5 -117.5q-8 -23 -29.5 -34t-44.5 -4q-23 8 -34 29.5t-4 44.5q20 63 -24 111t-107 35q-24 -5 -45 8t-25 37q-5 24 8 44.5t37 25.5q60 13 119 -5.5t101 -65.5z
+M1670 1209q87 -96 112.5 -222.5t-13.5 -241.5q-9 -27 -34 -40t-52 -4t-40 34t-5 52q28 82 10 172t-80 158q-62 69 -148 95.5t-173 8.5q-28 -6 -52 9.5t-30 43.5t9.5 51.5t43.5 29.5q123 26 244 -11.5t208 -134.5z" />
+ <glyph glyph-name="renren" unicode="&#xf18b;"
+d="M1133 -34q-171 -94 -368 -94q-196 0 -367 94q138 87 235.5 211t131.5 268q35 -144 132.5 -268t235.5 -211zM638 1394v-485q0 -252 -126.5 -459.5t-330.5 -306.5q-181 215 -181 495q0 187 83.5 349.5t229.5 269.5t325 137zM1536 638q0 -280 -181 -495
+q-204 99 -330.5 306.5t-126.5 459.5v485q179 -30 325 -137t229.5 -269.5t83.5 -349.5z" />
+ <glyph glyph-name="_372" unicode="&#xf18c;" horiz-adv-x="1408"
+d="M1402 433q-32 -80 -76 -138t-91 -88.5t-99 -46.5t-101.5 -14.5t-96.5 8.5t-86.5 22t-69.5 27.5t-46 22.5l-17 10q-113 -228 -289.5 -359.5t-384.5 -132.5q-19 0 -32 13t-13 32t13 31.5t32 12.5q173 1 322.5 107.5t251.5 294.5q-36 -14 -72 -23t-83 -13t-91 2.5t-93 28.5
+t-92 59t-84.5 100t-74.5 146q114 47 214 57t167.5 -7.5t124.5 -56.5t88.5 -77t56.5 -82q53 131 79 291q-7 -1 -18 -2.5t-46.5 -2.5t-69.5 0.5t-81.5 10t-88.5 23t-84 42.5t-75 65t-54.5 94.5t-28.5 127.5q70 28 133.5 36.5t112.5 -1t92 -30t73.5 -50t56 -61t42 -63t27.5 -56
+t16 -39.5l4 -16q12 122 12 195q-8 6 -21.5 16t-49 44.5t-63.5 71.5t-54 93t-33 112.5t12 127t70 138.5q73 -25 127.5 -61.5t84.5 -76.5t48 -85t20.5 -89t-0.5 -85.5t-13 -76.5t-19 -62t-17 -42l-7 -15q1 -4 1 -50t-1 -72q3 7 10 18.5t30.5 43t50.5 58t71 55.5t91.5 44.5
+t112 14.5t132.5 -24q-2 -78 -21.5 -141.5t-50 -104.5t-69.5 -71.5t-81.5 -45.5t-84.5 -24t-80 -9.5t-67.5 1t-46.5 4.5l-17 3q-23 -147 -73 -283q6 7 18 18.5t49.5 41t77.5 52.5t99.5 42t117.5 20t129 -23.5t137 -77.5z" />
+ <glyph glyph-name="stack_exchange" unicode="&#xf18d;" horiz-adv-x="1280"
+d="M1259 283v-66q0 -85 -57.5 -144.5t-138.5 -59.5h-57l-260 -269v269h-529q-81 0 -138.5 59.5t-57.5 144.5v66h1238zM1259 609v-255h-1238v255h1238zM1259 937v-255h-1238v255h1238zM1259 1077v-67h-1238v67q0 84 57.5 143.5t138.5 59.5h846q81 0 138.5 -59.5t57.5 -143.5z
+" />
+ <glyph glyph-name="_374" unicode="&#xf18e;"
+d="M1152 640q0 -14 -9 -23l-320 -320q-9 -9 -23 -9q-13 0 -22.5 9.5t-9.5 22.5v192h-352q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h352v192q0 14 9 23t23 9q12 0 24 -10l319 -319q9 -9 9 -23zM1312 640q0 148 -73 273t-198 198t-273 73t-273 -73t-198 -198
+t-73 -273t73 -273t198 -198t273 -73t273 73t198 198t73 273zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="arrow_circle_alt_left" unicode="&#xf190;"
+d="M1152 736v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-352v-192q0 -14 -9 -23t-23 -9q-12 0 -24 10l-319 319q-9 9 -9 23t9 23l320 320q9 9 23 9q13 0 22.5 -9.5t9.5 -22.5v-192h352q13 0 22.5 -9.5t9.5 -22.5zM1312 640q0 148 -73 273t-198 198t-273 73t-273 -73t-198 -198
+t-73 -273t73 -273t198 -198t273 -73t273 73t198 198t73 273zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="_376" unicode="&#xf191;"
+d="M1024 960v-640q0 -26 -19 -45t-45 -19q-20 0 -37 12l-448 320q-27 19 -27 52t27 52l448 320q17 12 37 12q26 0 45 -19t19 -45zM1280 160v960q0 13 -9.5 22.5t-22.5 9.5h-960q-13 0 -22.5 -9.5t-9.5 -22.5v-960q0 -13 9.5 -22.5t22.5 -9.5h960q13 0 22.5 9.5t9.5 22.5z
+M1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="dot_circle_alt" unicode="&#xf192;"
+d="M1024 640q0 -106 -75 -181t-181 -75t-181 75t-75 181t75 181t181 75t181 -75t75 -181zM768 1184q-148 0 -273 -73t-198 -198t-73 -273t73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73zM1536 640q0 -209 -103 -385.5t-279.5 -279.5
+t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="_378" unicode="&#xf193;" horiz-adv-x="1664"
+d="M1023 349l102 -204q-58 -179 -210 -290t-339 -111q-156 0 -288.5 77.5t-210 210t-77.5 288.5q0 181 104.5 330t274.5 211l17 -131q-122 -54 -195 -165.5t-73 -244.5q0 -185 131.5 -316.5t316.5 -131.5q126 0 232.5 65t165 175.5t49.5 236.5zM1571 249l58 -114l-256 -128
+q-13 -7 -29 -7q-40 0 -57 35l-239 477h-472q-24 0 -42.5 16.5t-21.5 40.5l-96 779q-2 17 6 42q14 51 57 82.5t97 31.5q66 0 113 -47t47 -113q0 -69 -52 -117.5t-120 -41.5l37 -289h423v-128h-407l16 -128h455q40 0 57 -35l228 -455z" />
+ <glyph glyph-name="vimeo_square" unicode="&#xf194;"
+d="M1292 898q10 216 -161 222q-231 8 -312 -261q44 19 82 19q85 0 74 -96q-4 -57 -74 -167t-105 -110q-43 0 -82 169q-13 54 -45 255q-30 189 -160 177q-59 -7 -164 -100l-81 -72l-81 -72l52 -67q76 52 87 52q57 0 107 -179q15 -55 45 -164.5t45 -164.5q68 -179 164 -179
+q157 0 383 294q220 283 226 444zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="_380" unicode="&#xf195;" horiz-adv-x="1152"
+d="M1152 704q0 -191 -94.5 -353t-256.5 -256.5t-353 -94.5h-160q-14 0 -23 9t-9 23v611l-215 -66q-3 -1 -9 -1q-10 0 -19 6q-13 10 -13 26v128q0 23 23 31l233 71v93l-215 -66q-3 -1 -9 -1q-10 0 -19 6q-13 10 -13 26v128q0 23 23 31l233 71v250q0 14 9 23t23 9h160
+q14 0 23 -9t9 -23v-181l375 116q15 5 28 -5t13 -26v-128q0 -23 -23 -31l-393 -121v-93l375 116q15 5 28 -5t13 -26v-128q0 -23 -23 -31l-393 -121v-487q188 13 318 151t130 328q0 14 9 23t23 9h160q14 0 23 -9t9 -23z" />
+ <glyph glyph-name="plus_square_o" unicode="&#xf196;" horiz-adv-x="1408"
+d="M1152 736v-64q0 -14 -9 -23t-23 -9h-352v-352q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v352h-352q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h352v352q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-352h352q14 0 23 -9t9 -23zM1280 288v832q0 66 -47 113t-113 47h-832
+q-66 0 -113 -47t-47 -113v-832q0 -66 47 -113t113 -47h832q66 0 113 47t47 113zM1408 1120v-832q0 -119 -84.5 -203.5t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5v832q0 119 84.5 203.5t203.5 84.5h832q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="_382" unicode="&#xf197;" horiz-adv-x="2176"
+d="M620 416q-110 -64 -268 -64h-128v64h-64q-13 0 -22.5 23.5t-9.5 56.5q0 24 7 49q-58 2 -96.5 10.5t-38.5 20.5t38.5 20.5t96.5 10.5q-7 25 -7 49q0 33 9.5 56.5t22.5 23.5h64v64h128q158 0 268 -64h1113q42 -7 106.5 -18t80.5 -14q89 -15 150 -40.5t83.5 -47.5t22.5 -40
+t-22.5 -40t-83.5 -47.5t-150 -40.5q-16 -3 -80.5 -14t-106.5 -18h-1113zM1739 668q53 -36 53 -92t-53 -92l81 -30q68 48 68 122t-68 122zM625 400h1015q-217 -38 -456 -80q-57 0 -113 -24t-83 -48l-28 -24l-288 -288q-26 -26 -70.5 -45t-89.5 -19h-96l-93 464h29
+q157 0 273 64zM352 816h-29l93 464h96q46 0 90 -19t70 -45l288 -288q4 -4 11 -10.5t30.5 -23t48.5 -29t61.5 -23t72.5 -10.5l456 -80h-1015q-116 64 -273 64z" />
+ <glyph glyph-name="_383" unicode="&#xf198;" horiz-adv-x="1664"
+d="M1519 760q62 0 103.5 -40.5t41.5 -101.5q0 -97 -93 -130l-172 -59l56 -167q7 -21 7 -47q0 -59 -42 -102t-101 -43q-47 0 -85.5 27t-53.5 72l-55 165l-310 -106l55 -164q8 -24 8 -47q0 -59 -42 -102t-102 -43q-47 0 -85 27t-53 72l-55 163l-153 -53q-29 -9 -50 -9
+q-61 0 -101.5 40t-40.5 101q0 47 27.5 85t71.5 53l156 53l-105 313l-156 -54q-26 -8 -48 -8q-60 0 -101 40.5t-41 100.5q0 47 27.5 85t71.5 53l157 53l-53 159q-8 24 -8 47q0 60 42 102.5t102 42.5q47 0 85 -27t53 -72l54 -160l310 105l-54 160q-8 24 -8 47q0 59 42.5 102
+t101.5 43q47 0 85.5 -27.5t53.5 -71.5l53 -161l162 55q21 6 43 6q60 0 102.5 -39.5t42.5 -98.5q0 -45 -30 -81.5t-74 -51.5l-157 -54l105 -316l164 56q24 8 46 8zM725 498l310 105l-105 315l-310 -107z" />
+ <glyph glyph-name="_384" unicode="&#xf199;"
+d="M1248 1408q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960zM1280 352v436q-31 -35 -64 -55q-34 -22 -132.5 -85t-151.5 -99q-98 -69 -164 -69v0v0q-66 0 -164 69
+q-47 32 -142 92.5t-142 92.5q-12 8 -33 27t-31 27v-436q0 -40 28 -68t68 -28h832q40 0 68 28t28 68zM1280 925q0 41 -27.5 70t-68.5 29h-832q-40 0 -68 -28t-28 -68q0 -37 30.5 -76.5t67.5 -64.5q47 -32 137.5 -89t129.5 -83q3 -2 17 -11.5t21 -14t21 -13t23.5 -13
+t21.5 -9.5t22.5 -7.5t20.5 -2.5t20.5 2.5t22.5 7.5t21.5 9.5t23.5 13t21 13t21 14t17 11.5l267 174q35 23 66.5 62.5t31.5 73.5z" />
+ <glyph glyph-name="_385" unicode="&#xf19a;" horiz-adv-x="1792"
+d="M127 640q0 163 67 313l367 -1005q-196 95 -315 281t-119 411zM1415 679q0 -19 -2.5 -38.5t-10 -49.5t-11.5 -44t-17.5 -59t-17.5 -58l-76 -256l-278 826q46 3 88 8q19 2 26 18.5t-2.5 31t-28.5 13.5l-205 -10q-75 1 -202 10q-12 1 -20.5 -5t-11.5 -15t-1.5 -18.5t9 -16.5
+t19.5 -8l80 -8l120 -328l-168 -504l-280 832q46 3 88 8q19 2 26 18.5t-2.5 31t-28.5 13.5l-205 -10q-7 0 -23 0.5t-26 0.5q105 160 274.5 253.5t367.5 93.5q147 0 280.5 -53t238.5 -149h-10q-55 0 -92 -40.5t-37 -95.5q0 -12 2 -24t4 -21.5t8 -23t9 -21t12 -22.5t12.5 -21
+t14.5 -24t14 -23q63 -107 63 -212zM909 573l237 -647q1 -6 5 -11q-126 -44 -255 -44q-112 0 -217 32zM1570 1009q95 -174 95 -369q0 -209 -104 -385.5t-279 -278.5l235 678q59 169 59 276q0 42 -6 79zM896 1536q182 0 348 -71t286 -191t191 -286t71 -348t-71 -348t-191 -286
+t-286 -191t-348 -71t-348 71t-286 191t-191 286t-71 348t71 348t191 286t286 191t348 71zM896 -215q173 0 331.5 68t273 182.5t182.5 273t68 331.5t-68 331.5t-182.5 273t-273 182.5t-331.5 68t-331.5 -68t-273 -182.5t-182.5 -273t-68 -331.5t68 -331.5t182.5 -273
+t273 -182.5t331.5 -68z" />
+ <glyph glyph-name="_386" unicode="&#xf19b;" horiz-adv-x="1792"
+d="M1086 1536v-1536l-272 -128q-228 20 -414 102t-293 208.5t-107 272.5q0 140 100.5 263.5t275 205.5t391.5 108v-172q-217 -38 -356.5 -150t-139.5 -255q0 -152 154.5 -267t388.5 -145v1360zM1755 954l37 -390l-525 114l147 83q-119 70 -280 99v172q277 -33 481 -157z" />
+ <glyph glyph-name="_387" unicode="&#xf19c;" horiz-adv-x="2048"
+d="M960 1536l960 -384v-128h-128q0 -26 -20.5 -45t-48.5 -19h-1526q-28 0 -48.5 19t-20.5 45h-128v128zM256 896h256v-768h128v768h256v-768h128v768h256v-768h128v768h256v-768h59q28 0 48.5 -19t20.5 -45v-64h-1664v64q0 26 20.5 45t48.5 19h59v768zM1851 -64
+q28 0 48.5 -19t20.5 -45v-128h-1920v128q0 26 20.5 45t48.5 19h1782z" />
+ <glyph glyph-name="_388" unicode="&#xf19d;" horiz-adv-x="2304"
+d="M1774 700l18 -316q4 -69 -82 -128t-235 -93.5t-323 -34.5t-323 34.5t-235 93.5t-82 128l18 316l574 -181q22 -7 48 -7t48 7zM2304 1024q0 -23 -22 -31l-1120 -352q-4 -1 -10 -1t-10 1l-652 206q-43 -34 -71 -111.5t-34 -178.5q63 -36 63 -109q0 -69 -58 -107l58 -433
+q2 -14 -8 -25q-9 -11 -24 -11h-192q-15 0 -24 11q-10 11 -8 25l58 433q-58 38 -58 107q0 73 65 111q11 207 98 330l-333 104q-22 8 -22 31t22 31l1120 352q4 1 10 1t10 -1l1120 -352q22 -8 22 -31z" />
+ <glyph glyph-name="_389" unicode="&#xf19e;"
+d="M859 579l13 -707q-62 11 -105 11q-41 0 -105 -11l13 707q-40 69 -168.5 295.5t-216.5 374.5t-181 287q58 -15 108 -15q44 0 111 15q63 -111 133.5 -229.5t167 -276.5t138.5 -227q37 61 109.5 177.5t117.5 190t105 176t107 189.5q54 -14 107 -14q56 0 114 14v0
+q-28 -39 -60 -88.5t-49.5 -78.5t-56.5 -96t-49 -84q-146 -248 -353 -610z" />
+ <glyph glyph-name="uniF1A0" unicode="&#xf1a0;"
+d="M768 750h725q12 -67 12 -128q0 -217 -91 -387.5t-259.5 -266.5t-386.5 -96q-157 0 -299 60.5t-245 163.5t-163.5 245t-60.5 299t60.5 299t163.5 245t245 163.5t299 60.5q300 0 515 -201l-209 -201q-123 119 -306 119q-129 0 -238.5 -65t-173.5 -176.5t-64 -243.5
+t64 -243.5t173.5 -176.5t238.5 -65q87 0 160 24t120 60t82 82t51.5 87t22.5 78h-436v264z" />
+ <glyph glyph-name="f1a1" unicode="&#xf1a1;" horiz-adv-x="1792"
+d="M1095 369q16 -16 0 -31q-62 -62 -199 -62t-199 62q-16 15 0 31q6 6 15 6t15 -6q48 -49 169 -49q120 0 169 49q6 6 15 6t15 -6zM788 550q0 -37 -26 -63t-63 -26t-63.5 26t-26.5 63q0 38 26.5 64t63.5 26t63 -26.5t26 -63.5zM1183 550q0 -37 -26.5 -63t-63.5 -26t-63 26
+t-26 63t26 63.5t63 26.5t63.5 -26t26.5 -64zM1434 670q0 49 -35 84t-85 35t-86 -36q-130 90 -311 96l63 283l200 -45q0 -37 26 -63t63 -26t63.5 26.5t26.5 63.5t-26.5 63.5t-63.5 26.5q-54 0 -80 -50l-221 49q-19 5 -25 -16l-69 -312q-180 -7 -309 -97q-35 37 -87 37
+q-50 0 -85 -35t-35 -84q0 -35 18.5 -64t49.5 -44q-6 -27 -6 -56q0 -142 140 -243t337 -101q198 0 338 101t140 243q0 32 -7 57q30 15 48 43.5t18 63.5zM1792 640q0 -182 -71 -348t-191 -286t-286 -191t-348 -71t-348 71t-286 191t-191 286t-71 348t71 348t191 286t286 191
+t348 71t348 -71t286 -191t191 -286t71 -348z" />
+ <glyph glyph-name="_392" unicode="&#xf1a2;"
+d="M939 407q13 -13 0 -26q-53 -53 -171 -53t-171 53q-13 13 0 26q5 6 13 6t13 -6q42 -42 145 -42t145 42q5 6 13 6t13 -6zM676 563q0 -31 -23 -54t-54 -23t-54 23t-23 54q0 32 22.5 54.5t54.5 22.5t54.5 -22.5t22.5 -54.5zM1014 563q0 -31 -23 -54t-54 -23t-54 23t-23 54
+q0 32 22.5 54.5t54.5 22.5t54.5 -22.5t22.5 -54.5zM1229 666q0 42 -30 72t-73 30q-42 0 -73 -31q-113 78 -267 82l54 243l171 -39q1 -32 23.5 -54t53.5 -22q32 0 54.5 22.5t22.5 54.5t-22.5 54.5t-54.5 22.5q-48 0 -69 -43l-189 42q-17 5 -21 -13l-60 -268q-154 -6 -265 -83
+q-30 32 -74 32q-43 0 -73 -30t-30 -72q0 -30 16 -55t42 -38q-5 -25 -5 -48q0 -122 120 -208.5t289 -86.5q170 0 290 86.5t120 208.5q0 25 -6 49q25 13 40.5 37.5t15.5 54.5zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960
+q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="_393" unicode="&#xf1a3;"
+d="M866 697l90 27v62q0 79 -58 135t-138 56t-138 -55.5t-58 -134.5v-283q0 -20 -14 -33.5t-33 -13.5t-32.5 13.5t-13.5 33.5v120h-151v-122q0 -82 57.5 -139t139.5 -57q81 0 138.5 56.5t57.5 136.5v280q0 19 13.5 33t33.5 14q19 0 32.5 -14t13.5 -33v-54zM1199 502v122h-150
+v-126q0 -20 -13.5 -33.5t-33.5 -13.5q-19 0 -32.5 14t-13.5 33v123l-90 -26l-60 28v-123q0 -80 58 -137t139 -57t138.5 57t57.5 139zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103
+t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="f1a4" unicode="&#xf1a4;" horiz-adv-x="1920"
+d="M1062 824v118q0 42 -30 72t-72 30t-72 -30t-30 -72v-612q0 -175 -126 -299t-303 -124q-178 0 -303.5 125.5t-125.5 303.5v266h328v-262q0 -43 30 -72.5t72 -29.5t72 29.5t30 72.5v620q0 171 126.5 292t301.5 121q176 0 302 -122t126 -294v-136l-195 -58zM1592 602h328
+v-266q0 -178 -125.5 -303.5t-303.5 -125.5q-177 0 -303 124.5t-126 300.5v268l131 -61l195 58v-270q0 -42 30 -71.5t72 -29.5t72 29.5t30 71.5v275z" />
+ <glyph glyph-name="_395" unicode="&#xf1a5;"
+d="M1472 160v480h-704v704h-480q-93 0 -158.5 -65.5t-65.5 -158.5v-480h704v-704h480q93 0 158.5 65.5t65.5 158.5zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5
+t84.5 -203.5z" />
+ <glyph glyph-name="_396" unicode="&#xf1a6;" horiz-adv-x="2048"
+d="M328 1254h204v-983h-532v697h328v286zM328 435v369h-123v-369h123zM614 968v-697h205v697h-205zM614 1254v-204h205v204h-205zM901 968h533v-942h-533v163h328v82h-328v697zM1229 435v369h-123v-369h123zM1516 968h532v-942h-532v163h327v82h-327v697zM1843 435v369h-123
+v-369h123z" />
+ <glyph glyph-name="_397" unicode="&#xf1a7;"
+d="M1046 516q0 -64 -38 -109t-91 -45q-43 0 -70 15v277q28 17 70 17q53 0 91 -45.5t38 -109.5zM703 944q0 -64 -38 -109.5t-91 -45.5q-43 0 -70 15v277q28 17 70 17q53 0 91 -45t38 -109zM1265 513q0 134 -88 229t-213 95q-20 0 -39 -3q-23 -78 -78 -136q-87 -95 -211 -101
+v-636l211 41v206q51 -19 117 -19q125 0 213 95t88 229zM922 940q0 134 -88.5 229t-213.5 95q-74 0 -141 -36h-186v-840l211 41v206q55 -19 116 -19q125 0 213.5 95t88.5 229zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960
+q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="_398" unicode="&#xf1a8;" horiz-adv-x="2038"
+d="M1222 607q75 3 143.5 -20.5t118 -58.5t101 -94.5t84 -108t75.5 -120.5q33 -56 78.5 -109t75.5 -80.5t99 -88.5q-48 -30 -108.5 -57.5t-138.5 -59t-114 -47.5q-44 37 -74 115t-43.5 164.5t-33 180.5t-42.5 168.5t-72.5 123t-122.5 48.5l-10 -2l-6 -4q4 -5 13 -14
+q6 -5 28 -23.5t25.5 -22t19 -18t18 -20.5t11.5 -21t10.5 -27.5t4.5 -31t4 -40.5l1 -33q1 -26 -2.5 -57.5t-7.5 -52t-12.5 -58.5t-11.5 -53q-35 1 -101 -9.5t-98 -10.5q-39 0 -72 10q-2 16 -2 47q0 74 3 96q2 13 31.5 41.5t57 59t26.5 51.5q-24 2 -43 -24
+q-36 -53 -111.5 -99.5t-136.5 -46.5q-25 0 -75.5 63t-106.5 139.5t-84 96.5q-6 4 -27 30q-482 -112 -513 -112q-16 0 -28 11t-12 27q0 15 8.5 26.5t22.5 14.5l486 106q-8 14 -8 25t5.5 17.5t16 11.5t20 7t23 4.5t18.5 4.5q4 1 15.5 7.5t17.5 6.5q15 0 28 -16t20 -33
+q163 37 172 37q17 0 29.5 -11t12.5 -28q0 -15 -8.5 -26t-23.5 -14l-182 -40l-1 -16q-1 -26 81.5 -117.5t104.5 -91.5q47 0 119 80t72 129q0 36 -23.5 53t-51 18.5t-51 11.5t-23.5 34q0 16 10 34l-68 19q43 44 43 117q0 26 -5 58q82 16 144 16q44 0 71.5 -1.5t48.5 -8.5
+t31 -13.5t20.5 -24.5t15.5 -33.5t17 -47.5t24 -60l50 25q-3 -40 -23 -60t-42.5 -21t-40 -6.5t-16.5 -20.5zM1282 842q-5 5 -13.5 15.5t-12 14.5t-10.5 11.5t-10 10.5l-8 8t-8.5 7.5t-8 5t-8.5 4.5q-7 3 -14.5 5t-20.5 2.5t-22 0.5h-32.5h-37.5q-126 0 -217 -43
+q16 30 36 46.5t54 29.5t65.5 36t46 36.5t50 55t43.5 50.5q12 -9 28 -31.5t32 -36.5t38 -13l12 1v-76l22 -1q247 95 371 190q28 21 50 39t42.5 37.5t33 31t29.5 34t24 31t24.5 37t23 38t27 47.5t29.5 53l7 9q-2 -53 -43 -139q-79 -165 -205 -264t-306 -142q-14 -3 -42 -7.5
+t-50 -9.5t-39 -14q3 -19 24.5 -46t21.5 -34q0 -11 -26 -30zM1061 -79q39 26 131.5 47.5t146.5 21.5q9 0 22.5 -15.5t28 -42.5t26 -50t24 -51t14.5 -33q-121 -45 -244 -45q-61 0 -125 11zM822 568l48 12l109 -177l-73 -48zM1323 51q3 -15 3 -16q0 -7 -17.5 -14.5t-46 -13
+t-54 -9.5t-53.5 -7.5t-32 -4.5l-7 43q21 2 60.5 8.5t72 10t60.5 3.5h14zM866 679l-96 -20l-6 17q10 1 32.5 7t34.5 6q19 0 35 -10zM1061 45h31l10 -83l-41 -12v95zM1950 1535v1v-1zM1950 1535l-1 -5l-2 -2l1 3zM1950 1535l1 1z" />
+ <glyph glyph-name="_399" unicode="&#xf1a9;"
+d="M1167 -50q-5 19 -24 5q-30 -22 -87 -39t-131 -17q-129 0 -193 49q-5 4 -13 4q-11 0 -26 -12q-7 -6 -7.5 -16t7.5 -20q34 -32 87.5 -46t102.5 -12.5t99 4.5q41 4 84.5 20.5t65 30t28.5 20.5q12 12 7 29zM1128 65q-19 47 -39 61q-23 15 -76 15q-47 0 -71 -10
+q-29 -12 -78 -56q-26 -24 -12 -44q9 -8 17.5 -4.5t31.5 23.5q3 2 10.5 8.5t10.5 8.5t10 7t11.5 7t12.5 5t15 4.5t16.5 2.5t20.5 1q27 0 44.5 -7.5t23 -14.5t13.5 -22q10 -17 12.5 -20t12.5 1q23 12 14 34zM1483 346q0 22 -5 44.5t-16.5 45t-34 36.5t-52.5 14
+q-33 0 -97 -41.5t-129 -83.5t-101 -42q-27 -1 -63.5 19t-76 49t-83.5 58t-100 49t-111 19q-115 -1 -197 -78.5t-84 -178.5q-2 -112 74 -164q29 -20 62.5 -28.5t103.5 -8.5q57 0 132 32.5t134 71t120 70.5t93 31q26 -1 65 -31.5t71.5 -67t68 -67.5t55.5 -32q35 -3 58.5 14
+t55.5 63q28 41 42.5 101t14.5 106zM1536 506q0 -164 -62 -304.5t-166 -236t-242.5 -149.5t-290.5 -54t-293 57.5t-247.5 157t-170.5 241.5t-64 302q0 89 19.5 172.5t49 145.5t70.5 118.5t78.5 94t78.5 69.5t64.5 46.5t42.5 24.5q14 8 51 26.5t54.5 28.5t48 30t60.5 44
+q36 28 58 72.5t30 125.5q129 -155 186 -193q44 -29 130 -68t129 -66q21 -13 39 -25t60.5 -46.5t76 -70.5t75 -95t69 -122t47 -148.5t19.5 -177.5z" />
+ <glyph glyph-name="_400" unicode="&#xf1aa;"
+d="M1070 463l-160 -160l-151 -152l-30 -30q-65 -64 -151.5 -87t-171.5 -2q-16 -70 -72 -115t-129 -45q-85 0 -145 60.5t-60 145.5q0 72 44.5 128t113.5 72q-22 86 1 173t88 152l12 12l151 -152l-11 -11q-37 -37 -37 -89t37 -90q37 -37 89 -37t89 37l30 30l151 152l161 160z
+M729 1145l12 -12l-152 -152l-12 12q-37 37 -89 37t-89 -37t-37 -89.5t37 -89.5l29 -29l152 -152l160 -160l-151 -152l-161 160l-151 152l-30 30q-68 67 -90 159.5t5 179.5q-70 15 -115 71t-45 129q0 85 60 145.5t145 60.5q76 0 133.5 -49t69.5 -123q84 20 169.5 -3.5
+t149.5 -87.5zM1536 78q0 -85 -60 -145.5t-145 -60.5q-74 0 -131 47t-71 118q-86 -28 -179.5 -6t-161.5 90l-11 12l151 152l12 -12q37 -37 89 -37t89 37t37 89t-37 89l-30 30l-152 152l-160 160l152 152l160 -160l152 -152l29 -30q64 -64 87.5 -150.5t2.5 -171.5
+q76 -11 126.5 -68.5t50.5 -134.5zM1534 1202q0 -77 -51 -135t-127 -69q26 -85 3 -176.5t-90 -158.5l-12 -12l-151 152l12 12q37 37 37 89t-37 89t-89 37t-89 -37l-30 -30l-152 -152l-160 -160l-152 152l161 160l152 152l29 30q67 67 159 89.5t178 -3.5q11 75 68.5 126
+t135.5 51q85 0 145 -60.5t60 -145.5z" />
+ <glyph glyph-name="f1ab" unicode="&#xf1ab;"
+d="M654 458q-1 -3 -12.5 0.5t-31.5 11.5l-20 9q-44 20 -87 49q-7 5 -41 31.5t-38 28.5q-67 -103 -134 -181q-81 -95 -105 -110q-4 -2 -19.5 -4t-18.5 0q6 4 82 92q21 24 85.5 115t78.5 118q17 30 51 98.5t36 77.5q-8 1 -110 -33q-8 -2 -27.5 -7.5t-34.5 -9.5t-17 -5
+q-2 -2 -2 -10.5t-1 -9.5q-5 -10 -31 -15q-23 -7 -47 0q-18 4 -28 21q-4 6 -5 23q6 2 24.5 5t29.5 6q58 16 105 32q100 35 102 35q10 2 43 19.5t44 21.5q9 3 21.5 8t14.5 5.5t6 -0.5q2 -12 -1 -33q0 -2 -12.5 -27t-26.5 -53.5t-17 -33.5q-25 -50 -77 -131l64 -28
+q12 -6 74.5 -32t67.5 -28q4 -1 10.5 -25.5t4.5 -30.5zM449 944q3 -15 -4 -28q-12 -23 -50 -38q-30 -12 -60 -12q-26 3 -49 26q-14 15 -18 41l1 3q3 -3 19.5 -5t26.5 0t58 16q36 12 55 14q17 0 21 -17zM1147 815l63 -227l-139 42zM39 15l694 232v1032l-694 -233v-1031z
+M1280 332l102 -31l-181 657l-100 31l-216 -536l102 -31l45 110l211 -65zM777 1294l573 -184v380zM1088 -29l158 -13l-54 -160l-40 66q-130 -83 -276 -108q-58 -12 -91 -12h-84q-79 0 -199.5 39t-183.5 85q-8 7 -8 16q0 8 5 13.5t13 5.5q4 0 18 -7.5t30.5 -16.5t20.5 -11
+q73 -37 159.5 -61.5t157.5 -24.5q95 0 167 14.5t157 50.5q15 7 30.5 15.5t34 19t28.5 16.5zM1536 1050v-1079l-774 246q-14 -6 -375 -127.5t-368 -121.5q-13 0 -18 13q0 1 -1 3v1078q3 9 4 10q5 6 20 11q107 36 149 50v384l558 -198q2 0 160.5 55t316 108.5t161.5 53.5
+q20 0 20 -21v-418z" />
+ <glyph glyph-name="_402" unicode="&#xf1ac;" horiz-adv-x="1792"
+d="M288 1152q66 0 113 -47t47 -113v-1088q0 -66 -47 -113t-113 -47h-128q-66 0 -113 47t-47 113v1088q0 66 47 113t113 47h128zM1664 989q58 -34 93 -93t35 -128v-768q0 -106 -75 -181t-181 -75h-864q-66 0 -113 47t-47 113v1536q0 40 28 68t68 28h672q40 0 88 -20t76 -48
+l152 -152q28 -28 48 -76t20 -88v-163zM928 0v128q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-128q0 -14 9 -23t23 -9h128q14 0 23 9t9 23zM928 256v128q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-128q0 -14 9 -23t23 -9h128q14 0 23 9t9 23zM928 512v128q0 14 -9 23
+t-23 9h-128q-14 0 -23 -9t-9 -23v-128q0 -14 9 -23t23 -9h128q14 0 23 9t9 23zM1184 0v128q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-128q0 -14 9 -23t23 -9h128q14 0 23 9t9 23zM1184 256v128q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-128q0 -14 9 -23t23 -9h128
+q14 0 23 9t9 23zM1184 512v128q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-128q0 -14 9 -23t23 -9h128q14 0 23 9t9 23zM1440 0v128q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-128q0 -14 9 -23t23 -9h128q14 0 23 9t9 23zM1440 256v128q0 14 -9 23t-23 9h-128
+q-14 0 -23 -9t-9 -23v-128q0 -14 9 -23t23 -9h128q14 0 23 9t9 23zM1440 512v128q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-128q0 -14 9 -23t23 -9h128q14 0 23 9t9 23zM1536 896v256h-160q-40 0 -68 28t-28 68v160h-640v-512h896z" />
+ <glyph glyph-name="_403" unicode="&#xf1ad;"
+d="M1344 1536q26 0 45 -19t19 -45v-1664q0 -26 -19 -45t-45 -19h-1280q-26 0 -45 19t-19 45v1664q0 26 19 45t45 19h1280zM512 1248v-64q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23zM512 992v-64q0 -14 9 -23t23 -9h64q14 0 23 9
+t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23zM512 736v-64q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23zM512 480v-64q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23zM384 160v64
+q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64q0 -14 9 -23t23 -9h64q14 0 23 9t9 23zM384 416v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64q0 -14 9 -23t23 -9h64q14 0 23 9t9 23zM384 672v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64q0 -14 9 -23t23 -9h64
+q14 0 23 9t9 23zM384 928v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64q0 -14 9 -23t23 -9h64q14 0 23 9t9 23zM384 1184v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64q0 -14 9 -23t23 -9h64q14 0 23 9t9 23zM896 -96v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9
+t-9 -23v-192q0 -14 9 -23t23 -9h320q14 0 23 9t9 23zM896 416v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64q0 -14 9 -23t23 -9h64q14 0 23 9t9 23zM896 672v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64q0 -14 9 -23t23 -9h64q14 0 23 9t9 23zM896 928v64
+q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64q0 -14 9 -23t23 -9h64q14 0 23 9t9 23zM896 1184v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64q0 -14 9 -23t23 -9h64q14 0 23 9t9 23zM1152 160v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64q0 -14 9 -23t23 -9h64
+q14 0 23 9t9 23zM1152 416v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64q0 -14 9 -23t23 -9h64q14 0 23 9t9 23zM1152 672v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64q0 -14 9 -23t23 -9h64q14 0 23 9t9 23zM1152 928v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9
+t-9 -23v-64q0 -14 9 -23t23 -9h64q14 0 23 9t9 23zM1152 1184v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64q0 -14 9 -23t23 -9h64q14 0 23 9t9 23z" />
+ <glyph glyph-name="_404" unicode="&#xf1ae;" horiz-adv-x="1280"
+d="M1188 988l-292 -292v-824q0 -46 -33 -79t-79 -33t-79 33t-33 79v384h-64v-384q0 -46 -33 -79t-79 -33t-79 33t-33 79v824l-292 292q-28 28 -28 68t28 68q29 28 68.5 28t67.5 -28l228 -228h368l228 228q28 28 68 28t68 -28q28 -29 28 -68.5t-28 -67.5zM864 1152
+q0 -93 -65.5 -158.5t-158.5 -65.5t-158.5 65.5t-65.5 158.5t65.5 158.5t158.5 65.5t158.5 -65.5t65.5 -158.5z" />
+ <glyph glyph-name="uniF1B1" unicode="&#xf1b0;" horiz-adv-x="1664"
+d="M780 1064q0 -60 -19 -113.5t-63 -92.5t-105 -39q-76 0 -138 57.5t-92 135.5t-30 151q0 60 19 113.5t63 92.5t105 39q77 0 138.5 -57.5t91.5 -135t30 -151.5zM438 581q0 -80 -42 -139t-119 -59q-76 0 -141.5 55.5t-100.5 133.5t-35 152q0 80 42 139.5t119 59.5
+q76 0 141.5 -55.5t100.5 -134t35 -152.5zM832 608q118 0 255 -97.5t229 -237t92 -254.5q0 -46 -17 -76.5t-48.5 -45t-64.5 -20t-76 -5.5q-68 0 -187.5 45t-182.5 45q-66 0 -192.5 -44.5t-200.5 -44.5q-183 0 -183 146q0 86 56 191.5t139.5 192.5t187.5 146t193 59zM1071 819
+q-61 0 -105 39t-63 92.5t-19 113.5q0 74 30 151.5t91.5 135t138.5 57.5q61 0 105 -39t63 -92.5t19 -113.5q0 -73 -30 -151t-92 -135.5t-138 -57.5zM1503 923q77 0 119 -59.5t42 -139.5q0 -74 -35 -152t-100.5 -133.5t-141.5 -55.5q-77 0 -119 59t-42 139q0 74 35 152.5
+t100.5 134t141.5 55.5z" />
+ <glyph glyph-name="_406" unicode="&#xf1b1;" horiz-adv-x="768"
+d="M704 1008q0 -145 -57 -243.5t-152 -135.5l45 -821q2 -26 -16 -45t-44 -19h-192q-26 0 -44 19t-16 45l45 821q-95 37 -152 135.5t-57 243.5q0 128 42.5 249.5t117.5 200t160 78.5t160 -78.5t117.5 -200t42.5 -249.5z" />
+ <glyph glyph-name="_407" unicode="&#xf1b2;" horiz-adv-x="1792"
+d="M896 -93l640 349v636l-640 -233v-752zM832 772l698 254l-698 254l-698 -254zM1664 1024v-768q0 -35 -18 -65t-49 -47l-704 -384q-28 -16 -61 -16t-61 16l-704 384q-31 17 -49 47t-18 65v768q0 40 23 73t61 47l704 256q22 8 44 8t44 -8l704 -256q38 -14 61 -47t23 -73z
+" />
+ <glyph glyph-name="_408" unicode="&#xf1b3;" horiz-adv-x="2304"
+d="M640 -96l384 192v314l-384 -164v-342zM576 358l404 173l-404 173l-404 -173zM1664 -96l384 192v314l-384 -164v-342zM1600 358l404 173l-404 173l-404 -173zM1152 651l384 165v266l-384 -164v-267zM1088 1030l441 189l-441 189l-441 -189zM2176 512v-416q0 -36 -19 -67
+t-52 -47l-448 -224q-25 -14 -57 -14t-57 14l-448 224q-4 2 -7 4q-2 -2 -7 -4l-448 -224q-25 -14 -57 -14t-57 14l-448 224q-33 16 -52 47t-19 67v416q0 38 21.5 70t56.5 48l434 186v400q0 38 21.5 70t56.5 48l448 192q23 10 50 10t50 -10l448 -192q35 -16 56.5 -48t21.5 -70
+v-400l434 -186q36 -16 57 -48t21 -70z" />
+ <glyph glyph-name="_409" unicode="&#xf1b4;" horiz-adv-x="2048"
+d="M1848 1197h-511v-124h511v124zM1596 771q-90 0 -146 -52.5t-62 -142.5h408q-18 195 -200 195zM1612 186q63 0 122 32t76 87h221q-100 -307 -427 -307q-214 0 -340.5 132t-126.5 347q0 208 130.5 345.5t336.5 137.5q138 0 240.5 -68t153 -179t50.5 -248q0 -17 -2 -47h-658
+q0 -111 57.5 -171.5t166.5 -60.5zM277 236h296q205 0 205 167q0 180 -199 180h-302v-347zM277 773h281q78 0 123.5 36.5t45.5 113.5q0 144 -190 144h-260v-294zM0 1282h594q87 0 155 -14t126.5 -47.5t90 -96.5t31.5 -154q0 -181 -172 -263q114 -32 172 -115t58 -204
+q0 -75 -24.5 -136.5t-66 -103.5t-98.5 -71t-121 -42t-134 -13h-611v1260z" />
+ <glyph glyph-name="_410" unicode="&#xf1b5;"
+d="M1248 1408q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960zM499 1041h-371v-787h382q117 0 197 57.5t80 170.5q0 158 -143 200q107 52 107 164q0 57 -19.5 96.5
+t-56.5 60.5t-79 29.5t-97 8.5zM477 723h-176v184h163q119 0 119 -90q0 -94 -106 -94zM486 388h-185v217h189q124 0 124 -113q0 -104 -128 -104zM1136 356q-68 0 -104 38t-36 107h411q1 10 1 30q0 132 -74.5 220.5t-203.5 88.5q-128 0 -210 -86t-82 -216q0 -135 79 -217
+t213 -82q205 0 267 191h-138q-11 -34 -47.5 -54t-75.5 -20zM1126 722q113 0 124 -122h-254q4 56 39 89t91 33zM964 988h319v-77h-319v77z" />
+ <glyph glyph-name="_411" unicode="&#xf1b6;" horiz-adv-x="1792"
+d="M1582 954q0 -101 -71.5 -172.5t-172.5 -71.5t-172.5 71.5t-71.5 172.5t71.5 172.5t172.5 71.5t172.5 -71.5t71.5 -172.5zM812 212q0 104 -73 177t-177 73q-27 0 -54 -6l104 -42q77 -31 109.5 -106.5t1.5 -151.5q-31 -77 -107 -109t-152 -1q-21 8 -62 24.5t-61 24.5
+q32 -60 91 -96.5t130 -36.5q104 0 177 73t73 177zM1642 953q0 126 -89.5 215.5t-215.5 89.5q-127 0 -216.5 -89.5t-89.5 -215.5q0 -127 89.5 -216t216.5 -89q126 0 215.5 89t89.5 216zM1792 953q0 -189 -133.5 -322t-321.5 -133l-437 -319q-12 -129 -109 -218t-229 -89
+q-121 0 -214 76t-118 192l-230 92v429l389 -157q79 48 173 48q13 0 35 -2l284 407q2 187 135.5 319t320.5 132q188 0 321.5 -133.5t133.5 -321.5z" />
+ <glyph glyph-name="_412" unicode="&#xf1b7;"
+d="M1242 889q0 80 -57 136.5t-137 56.5t-136.5 -57t-56.5 -136q0 -80 56.5 -136.5t136.5 -56.5t137 56.5t57 136.5zM632 301q0 -83 -58 -140.5t-140 -57.5q-56 0 -103 29t-72 77q52 -20 98 -40q60 -24 120 1.5t85 86.5q24 60 -1.5 120t-86.5 84l-82 33q22 5 42 5
+q82 0 140 -57.5t58 -140.5zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v153l172 -69q20 -92 93.5 -152t168.5 -60q104 0 181 70t87 173l345 252q150 0 255.5 105.5t105.5 254.5q0 150 -105.5 255.5t-255.5 105.5
+q-148 0 -253 -104.5t-107 -252.5l-225 -322q-9 1 -28 1q-75 0 -137 -37l-297 119v468q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5zM1289 887q0 -100 -71 -170.5t-171 -70.5t-170.5 70.5t-70.5 170.5t70.5 171t170.5 71q101 0 171.5 -70.5t70.5 -171.5z
+" />
+ <glyph glyph-name="_413" unicode="&#xf1b8;" horiz-adv-x="1792"
+d="M836 367l-15 -368l-2 -22l-420 29q-36 3 -67 31.5t-47 65.5q-11 27 -14.5 55t4 65t12 55t21.5 64t19 53q78 -12 509 -28zM449 953l180 -379l-147 92q-63 -72 -111.5 -144.5t-72.5 -125t-39.5 -94.5t-18.5 -63l-4 -21l-190 357q-17 26 -18 56t6 47l8 18q35 63 114 188
+l-140 86zM1680 436l-188 -359q-12 -29 -36.5 -46.5t-43.5 -20.5l-18 -4q-71 -7 -219 -12l8 -164l-230 367l211 362l7 -173q170 -16 283 -5t170 33zM895 1360q-47 -63 -265 -435l-317 187l-19 12l225 356q20 31 60 45t80 10q24 -2 48.5 -12t42 -21t41.5 -33t36 -34.5
+t36 -39.5t32 -35zM1550 1053l212 -363q18 -37 12.5 -76t-27.5 -74q-13 -20 -33 -37t-38 -28t-48.5 -22t-47 -16t-51.5 -14t-46 -12q-34 72 -265 436l313 195zM1407 1279l142 83l-220 -373l-419 20l151 86q-34 89 -75 166t-75.5 123.5t-64.5 80t-47 46.5l-17 13l405 -1
+q31 3 58 -10.5t39 -28.5l11 -15q39 -61 112 -190z" />
+ <glyph glyph-name="_414" unicode="&#xf1b9;" horiz-adv-x="2048"
+d="M480 448q0 66 -47 113t-113 47t-113 -47t-47 -113t47 -113t113 -47t113 47t47 113zM516 768h1016l-89 357q-2 8 -14 17.5t-21 9.5h-768q-9 0 -21 -9.5t-14 -17.5zM1888 448q0 66 -47 113t-113 47t-113 -47t-47 -113t47 -113t113 -47t113 47t47 113zM2048 544v-384
+q0 -14 -9 -23t-23 -9h-96v-128q0 -80 -56 -136t-136 -56t-136 56t-56 136v128h-1024v-128q0 -80 -56 -136t-136 -56t-136 56t-56 136v128h-96q-14 0 -23 9t-9 23v384q0 93 65.5 158.5t158.5 65.5h28l105 419q23 94 104 157.5t179 63.5h768q98 0 179 -63.5t104 -157.5
+l105 -419h28q93 0 158.5 -65.5t65.5 -158.5z" />
+ <glyph glyph-name="_415" unicode="&#xf1ba;" horiz-adv-x="2048"
+d="M1824 640q93 0 158.5 -65.5t65.5 -158.5v-384q0 -14 -9 -23t-23 -9h-96v-64q0 -80 -56 -136t-136 -56t-136 56t-56 136v64h-1024v-64q0 -80 -56 -136t-136 -56t-136 56t-56 136v64h-96q-14 0 -23 9t-9 23v384q0 93 65.5 158.5t158.5 65.5h28l105 419q23 94 104 157.5
+t179 63.5h128v224q0 14 9 23t23 9h448q14 0 23 -9t9 -23v-224h128q98 0 179 -63.5t104 -157.5l105 -419h28zM320 160q66 0 113 47t47 113t-47 113t-113 47t-113 -47t-47 -113t47 -113t113 -47zM516 640h1016l-89 357q-2 8 -14 17.5t-21 9.5h-768q-9 0 -21 -9.5t-14 -17.5z
+M1728 160q66 0 113 47t47 113t-47 113t-113 47t-113 -47t-47 -113t47 -113t113 -47z" />
+ <glyph glyph-name="_416" unicode="&#xf1bb;"
+d="M1504 64q0 -26 -19 -45t-45 -19h-462q1 -17 6 -87.5t5 -108.5q0 -25 -18 -42.5t-43 -17.5h-320q-25 0 -43 17.5t-18 42.5q0 38 5 108.5t6 87.5h-462q-26 0 -45 19t-19 45t19 45l402 403h-229q-26 0 -45 19t-19 45t19 45l402 403h-197q-26 0 -45 19t-19 45t19 45l384 384
+q19 19 45 19t45 -19l384 -384q19 -19 19 -45t-19 -45t-45 -19h-197l402 -403q19 -19 19 -45t-19 -45t-45 -19h-229l402 -403q19 -19 19 -45z" />
+ <glyph glyph-name="_417" unicode="&#xf1bc;"
+d="M1127 326q0 32 -30 51q-193 115 -447 115q-133 0 -287 -34q-42 -9 -42 -52q0 -20 13.5 -34.5t35.5 -14.5q5 0 37 8q132 27 243 27q226 0 397 -103q19 -11 33 -11q19 0 33 13.5t14 34.5zM1223 541q0 40 -35 61q-237 141 -548 141q-153 0 -303 -42q-48 -13 -48 -64
+q0 -25 17.5 -42.5t42.5 -17.5q7 0 37 8q122 33 251 33q279 0 488 -124q24 -13 38 -13q25 0 42.5 17.5t17.5 42.5zM1331 789q0 47 -40 70q-126 73 -293 110.5t-343 37.5q-204 0 -364 -47q-23 -7 -38.5 -25.5t-15.5 -48.5q0 -31 20.5 -52t51.5 -21q11 0 40 8q133 37 307 37
+q159 0 309.5 -34t253.5 -95q21 -12 40 -12q29 0 50.5 20.5t21.5 51.5zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="_418" unicode="&#xf1bd;" horiz-adv-x="1024"
+d="M1024 1233l-303 -582l24 -31h279v-415h-507l-44 -30l-142 -273l-30 -30h-301v303l303 583l-24 30h-279v415h507l44 30l142 273l30 30h301v-303z" />
+ <glyph glyph-name="_419" unicode="&#xf1be;" horiz-adv-x="2304"
+d="M784 164l16 241l-16 523q-1 10 -7.5 17t-16.5 7q-9 0 -16 -7t-7 -17l-14 -523l14 -241q1 -10 7.5 -16.5t15.5 -6.5q22 0 24 23zM1080 193l11 211l-12 586q0 16 -13 24q-8 5 -16 5t-16 -5q-13 -8 -13 -24l-1 -6l-10 -579q0 -1 11 -236v-1q0 -10 6 -17q9 -11 23 -11
+q11 0 20 9q9 7 9 20zM35 533l20 -128l-20 -126q-2 -9 -9 -9t-9 9l-17 126l17 128q2 9 9 9t9 -9zM121 612l26 -207l-26 -203q-2 -9 -10 -9q-9 0 -9 10l-23 202l23 207q0 9 9 9q8 0 10 -9zM401 159zM213 650l25 -245l-25 -237q0 -11 -11 -11q-10 0 -12 11l-21 237l21 245
+q2 12 12 12q11 0 11 -12zM307 657l23 -252l-23 -244q-2 -13 -14 -13q-13 0 -13 13l-21 244l21 252q0 13 13 13q12 0 14 -13zM401 639l21 -234l-21 -246q-2 -16 -16 -16q-6 0 -10.5 4.5t-4.5 11.5l-20 246l20 234q0 6 4.5 10.5t10.5 4.5q14 0 16 -15zM784 164zM495 785
+l21 -380l-21 -246q0 -7 -5 -12.5t-12 -5.5q-16 0 -18 18l-18 246l18 380q2 18 18 18q7 0 12 -5.5t5 -12.5zM589 871l19 -468l-19 -244q0 -8 -5.5 -13.5t-13.5 -5.5q-18 0 -20 19l-16 244l16 468q2 19 20 19q8 0 13.5 -5.5t5.5 -13.5zM687 911l18 -506l-18 -242
+q-2 -21 -22 -21q-19 0 -21 21l-16 242l16 506q0 9 6.5 15.5t14.5 6.5q9 0 15 -6.5t7 -15.5zM1079 169v0v0v0zM881 915l15 -510l-15 -239q0 -10 -7.5 -17.5t-17.5 -7.5t-17 7t-8 18l-14 239l14 510q0 11 7.5 18t17.5 7t17.5 -7t7.5 -18zM980 896l14 -492l-14 -236
+q0 -11 -8 -19t-19 -8t-19 8t-9 19l-12 236l12 492q1 12 9 20t19 8t18.5 -8t8.5 -20zM1192 404l-14 -231v0q0 -13 -9 -22t-22 -9t-22 9t-10 22l-6 114l-6 117l12 636v3q2 15 12 24q9 7 20 7q8 0 15 -5q14 -8 16 -26zM2304 423q0 -117 -83 -199.5t-200 -82.5h-786
+q-13 2 -22 11t-9 22v899q0 23 28 33q85 34 181 34q195 0 338 -131.5t160 -323.5q53 22 110 22q117 0 200 -83t83 -201z" />
+ <glyph glyph-name="uniF1C0" unicode="&#xf1c0;"
+d="M768 768q237 0 443 43t325 127v-170q0 -69 -103 -128t-280 -93.5t-385 -34.5t-385 34.5t-280 93.5t-103 128v170q119 -84 325 -127t443 -43zM768 0q237 0 443 43t325 127v-170q0 -69 -103 -128t-280 -93.5t-385 -34.5t-385 34.5t-280 93.5t-103 128v170q119 -84 325 -127
+t443 -43zM768 384q237 0 443 43t325 127v-170q0 -69 -103 -128t-280 -93.5t-385 -34.5t-385 34.5t-280 93.5t-103 128v170q119 -84 325 -127t443 -43zM768 1536q208 0 385 -34.5t280 -93.5t103 -128v-128q0 -69 -103 -128t-280 -93.5t-385 -34.5t-385 34.5t-280 93.5
+t-103 128v128q0 69 103 128t280 93.5t385 34.5z" />
+ <glyph glyph-name="uniF1C1" unicode="&#xf1c1;"
+d="M1468 1156q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48zM1024 1400v-376h376q-10 29 -22 41l-313 313q-12 12 -41 22zM1408 -128v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536h1280z
+M894 465q33 -26 84 -56q59 7 117 7q147 0 177 -49q16 -22 2 -52q0 -1 -1 -2l-2 -2v-1q-6 -38 -71 -38q-48 0 -115 20t-130 53q-221 -24 -392 -83q-153 -262 -242 -262q-15 0 -28 7l-24 12q-1 1 -6 5q-10 10 -6 36q9 40 56 91.5t132 96.5q14 9 23 -6q2 -2 2 -4q52 85 107 197
+q68 136 104 262q-24 82 -30.5 159.5t6.5 127.5q11 40 42 40h21h1q23 0 35 -15q18 -21 9 -68q-2 -6 -4 -8q1 -3 1 -8v-30q-2 -123 -14 -192q55 -164 146 -238zM318 54q52 24 137 158q-51 -40 -87.5 -84t-49.5 -74zM716 974q-15 -42 -2 -132q1 7 7 44q0 3 7 43q1 4 4 8
+q-1 1 -1 2q-1 2 -1 3q-1 22 -13 36q0 -1 -1 -2v-2zM592 313q135 54 284 81q-2 1 -13 9.5t-16 13.5q-76 67 -127 176q-27 -86 -83 -197q-30 -56 -45 -83zM1238 329q-24 24 -140 24q76 -28 124 -28q14 0 18 1q0 1 -2 3z" />
+ <glyph glyph-name="_422" unicode="&#xf1c2;"
+d="M1468 1156q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48zM1024 1400v-376h376q-10 29 -22 41l-313 313q-12 12 -41 22zM1408 -128v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536h1280z
+M233 768v-107h70l164 -661h159l128 485q7 20 10 46q2 16 2 24h4l3 -24q1 -3 3.5 -20t5.5 -26l128 -485h159l164 661h70v107h-300v-107h90l-99 -438q-5 -20 -7 -46l-2 -21h-4q0 3 -0.5 6.5t-1.5 8t-1 6.5q-1 5 -4 21t-5 25l-144 545h-114l-144 -545q-2 -9 -4.5 -24.5
+t-3.5 -21.5l-4 -21h-4l-2 21q-2 26 -7 46l-99 438h90v107h-300z" />
+ <glyph glyph-name="_423" unicode="&#xf1c3;"
+d="M1468 1156q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48zM1024 1400v-376h376q-10 29 -22 41l-313 313q-12 12 -41 22zM1408 -128v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536h1280z
+M429 106v-106h281v106h-75l103 161q5 7 10 16.5t7.5 13.5t3.5 4h2q1 -4 5 -10q2 -4 4.5 -7.5t6 -8t6.5 -8.5l107 -161h-76v-106h291v106h-68l-192 273l195 282h67v107h-279v-107h74l-103 -159q-4 -7 -10 -16.5t-9 -13.5l-2 -3h-2q-1 4 -5 10q-6 11 -17 23l-106 159h76v107
+h-290v-107h68l189 -272l-194 -283h-68z" />
+ <glyph glyph-name="_424" unicode="&#xf1c4;"
+d="M1468 1156q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48zM1024 1400v-376h376q-10 29 -22 41l-313 313q-12 12 -41 22zM1408 -128v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536h1280z
+M416 106v-106h327v106h-93v167h137q76 0 118 15q67 23 106.5 87t39.5 146q0 81 -37 141t-100 87q-48 19 -130 19h-368v-107h92v-555h-92zM769 386h-119v268h120q52 0 83 -18q56 -33 56 -115q0 -89 -62 -120q-31 -15 -78 -15z" />
+ <glyph glyph-name="_425" unicode="&#xf1c5;"
+d="M1468 1156q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48zM1024 1400v-376h376q-10 29 -22 41l-313 313q-12 12 -41 22zM1408 -128v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536h1280z
+M1280 320v-320h-1024v192l192 192l128 -128l384 384zM448 512q-80 0 -136 56t-56 136t56 136t136 56t136 -56t56 -136t-56 -136t-136 -56z" />
+ <glyph glyph-name="_426" unicode="&#xf1c6;"
+d="M640 1152v128h-128v-128h128zM768 1024v128h-128v-128h128zM640 896v128h-128v-128h128zM768 768v128h-128v-128h128zM1468 1156q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48zM1024 1400
+v-376h376q-10 29 -22 41l-313 313q-12 12 -41 22zM1408 -128v1024h-416q-40 0 -68 28t-28 68v416h-128v-128h-128v128h-512v-1536h1280zM781 593l107 -349q8 -27 8 -52q0 -83 -72.5 -137.5t-183.5 -54.5t-183.5 54.5t-72.5 137.5q0 25 8 52q21 63 120 396v128h128v-128h79
+q22 0 39 -13t23 -34zM640 128q53 0 90.5 19t37.5 45t-37.5 45t-90.5 19t-90.5 -19t-37.5 -45t37.5 -45t90.5 -19z" />
+ <glyph glyph-name="_427" unicode="&#xf1c7;"
+d="M1468 1156q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48zM1024 1400v-376h376q-10 29 -22 41l-313 313q-12 12 -41 22zM1408 -128v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536h1280z
+M620 686q20 -8 20 -30v-544q0 -22 -20 -30q-8 -2 -12 -2q-12 0 -23 9l-166 167h-131q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h131l166 167q16 15 35 7zM1037 -3q31 0 50 24q129 159 129 363t-129 363q-16 21 -43 24t-47 -14q-21 -17 -23.5 -43.5t14.5 -47.5
+q100 -123 100 -282t-100 -282q-17 -21 -14.5 -47.5t23.5 -42.5q18 -15 40 -15zM826 145q27 0 47 20q87 93 87 219t-87 219q-18 19 -45 20t-46 -17t-20 -44.5t18 -46.5q52 -57 52 -131t-52 -131q-19 -20 -18 -46.5t20 -44.5q20 -17 44 -17z" />
+ <glyph glyph-name="_428" unicode="&#xf1c8;"
+d="M1468 1156q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48zM1024 1400v-376h376q-10 29 -22 41l-313 313q-12 12 -41 22zM1408 -128v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536h1280z
+M768 768q52 0 90 -38t38 -90v-384q0 -52 -38 -90t-90 -38h-384q-52 0 -90 38t-38 90v384q0 52 38 90t90 38h384zM1260 766q20 -8 20 -30v-576q0 -22 -20 -30q-8 -2 -12 -2q-14 0 -23 9l-265 266v90l265 266q9 9 23 9q4 0 12 -2z" />
+ <glyph glyph-name="_429" unicode="&#xf1c9;"
+d="M1468 1156q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48zM1024 1400v-376h376q-10 29 -22 41l-313 313q-12 12 -41 22zM1408 -128v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536h1280z
+M480 768q8 11 21 12.5t24 -6.5l51 -38q11 -8 12.5 -21t-6.5 -24l-182 -243l182 -243q8 -11 6.5 -24t-12.5 -21l-51 -38q-11 -8 -24 -6.5t-21 12.5l-226 301q-14 19 0 38zM1282 467q14 -19 0 -38l-226 -301q-8 -11 -21 -12.5t-24 6.5l-51 38q-11 8 -12.5 21t6.5 24l182 243
+l-182 243q-8 11 -6.5 24t12.5 21l51 38q11 8 24 6.5t21 -12.5zM662 6q-13 2 -20.5 13t-5.5 24l138 831q2 13 13 20.5t24 5.5l63 -10q13 -2 20.5 -13t5.5 -24l-138 -831q-2 -13 -13 -20.5t-24 -5.5z" />
+ <glyph glyph-name="_430" unicode="&#xf1ca;"
+d="M1497 709v-198q-101 -23 -198 -23q-65 -136 -165.5 -271t-181.5 -215.5t-128 -106.5q-80 -45 -162 3q-28 17 -60.5 43.5t-85 83.5t-102.5 128.5t-107.5 184t-105.5 244t-91.5 314.5t-70.5 390h283q26 -218 70 -398.5t104.5 -317t121.5 -235.5t140 -195q169 169 287 406
+q-142 72 -223 220t-81 333q0 192 104 314.5t284 122.5q178 0 273 -105.5t95 -297.5q0 -159 -58 -286q-7 -1 -19.5 -3t-46 -2t-63 6t-62 25.5t-50.5 51.5q31 103 31 184q0 87 -29 132t-79 45q-53 0 -85 -49.5t-32 -140.5q0 -186 105 -293.5t267 -107.5q62 0 121 14z" />
+ <glyph glyph-name="_431" unicode="&#xf1cb;" horiz-adv-x="1792"
+d="M216 367l603 -402v359l-334 223zM154 511l193 129l-193 129v-258zM973 -35l603 402l-269 180l-334 -223v-359zM896 458l272 182l-272 182l-272 -182zM485 733l334 223v359l-603 -402zM1445 640l193 -129v258zM1307 733l269 180l-603 402v-359zM1792 913v-546
+q0 -41 -34 -64l-819 -546q-21 -13 -43 -13t-43 13l-819 546q-34 23 -34 64v546q0 41 34 64l819 546q21 13 43 13t43 -13l819 -546q34 -23 34 -64z" />
+ <glyph glyph-name="_432" unicode="&#xf1cc;" horiz-adv-x="2048"
+d="M1800 764q111 -46 179.5 -145.5t68.5 -221.5q0 -164 -118 -280.5t-285 -116.5q-4 0 -11.5 0.5t-10.5 0.5h-1209h-1h-2h-5q-170 10 -288 125.5t-118 280.5q0 110 55 203t147 147q-12 39 -12 82q0 115 82 196t199 81q95 0 172 -58q75 154 222.5 248t326.5 94
+q166 0 306 -80.5t221.5 -218.5t81.5 -301q0 -6 -0.5 -18t-0.5 -18zM468 498q0 -122 84 -193t208 -71q137 0 240 99q-16 20 -47.5 56.5t-43.5 50.5q-67 -65 -144 -65q-55 0 -93.5 33.5t-38.5 87.5q0 53 38.5 87t91.5 34q44 0 84.5 -21t73 -55t65 -75t69 -82t77 -75t97 -55
+t121.5 -21q121 0 204.5 71.5t83.5 190.5q0 121 -84 192t-207 71q-143 0 -241 -97l93 -108q66 64 142 64q52 0 92 -33t40 -84q0 -57 -37 -91.5t-94 -34.5q-43 0 -82.5 21t-72 55t-65.5 75t-69.5 82t-77.5 75t-96.5 55t-118.5 21q-122 0 -207 -70.5t-85 -189.5z" />
+ <glyph glyph-name="_433" unicode="&#xf1cd;" horiz-adv-x="1792"
+d="M896 1536q182 0 348 -71t286 -191t191 -286t71 -348t-71 -348t-191 -286t-286 -191t-348 -71t-348 71t-286 191t-191 286t-71 348t71 348t191 286t286 191t348 71zM896 1408q-190 0 -361 -90l194 -194q82 28 167 28t167 -28l194 194q-171 90 -361 90zM218 279l194 194
+q-28 82 -28 167t28 167l-194 194q-90 -171 -90 -361t90 -361zM896 -128q190 0 361 90l-194 194q-82 -28 -167 -28t-167 28l-194 -194q171 -90 361 -90zM896 256q159 0 271.5 112.5t112.5 271.5t-112.5 271.5t-271.5 112.5t-271.5 -112.5t-112.5 -271.5t112.5 -271.5
+t271.5 -112.5zM1380 473l194 -194q90 171 90 361t-90 361l-194 -194q28 -82 28 -167t-28 -167z" />
+ <glyph glyph-name="_434" unicode="&#xf1ce;" horiz-adv-x="1792"
+d="M1760 640q0 -176 -68.5 -336t-184 -275.5t-275.5 -184t-336 -68.5t-336 68.5t-275.5 184t-184 275.5t-68.5 336q0 213 97 398.5t265 305.5t374 151v-228q-221 -45 -366.5 -221t-145.5 -406q0 -130 51 -248.5t136.5 -204t204 -136.5t248.5 -51t248.5 51t204 136.5
+t136.5 204t51 248.5q0 230 -145.5 406t-366.5 221v228q206 -31 374 -151t265 -305.5t97 -398.5z" />
+ <glyph glyph-name="uniF1D0" unicode="&#xf1d0;" horiz-adv-x="1792"
+d="M19 662q8 217 116 406t305 318h5q0 -1 -1 -3q-8 -8 -28 -33.5t-52 -76.5t-60 -110.5t-44.5 -135.5t-14 -150.5t39 -157.5t108.5 -154q50 -50 102 -69.5t90.5 -11.5t69.5 23.5t47 32.5l16 16q39 51 53 116.5t6.5 122.5t-21 107t-26.5 80l-14 29q-10 25 -30.5 49.5t-43 41
+t-43.5 29.5t-35 19l-13 6l104 115q39 -17 78 -52t59 -61l19 -27q1 48 -18.5 103.5t-40.5 87.5l-20 31l161 183l160 -181q-33 -46 -52.5 -102.5t-22.5 -90.5l-4 -33q22 37 61.5 72.5t67.5 52.5l28 17l103 -115q-44 -14 -85 -50t-60 -65l-19 -29q-31 -56 -48 -133.5t-7 -170
+t57 -156.5q33 -45 77.5 -60.5t85 -5.5t76 26.5t57.5 33.5l21 16q60 53 96.5 115t48.5 121.5t10 121.5t-18 118t-37 107.5t-45.5 93t-45 72t-34.5 47.5l-13 17q-14 13 -7 13l10 -3q40 -29 62.5 -46t62 -50t64 -58t58.5 -65t55.5 -77t45.5 -88t38 -103t23.5 -117t10.5 -136
+q3 -259 -108 -465t-312 -321t-456 -115q-185 0 -351 74t-283.5 198t-184 293t-60.5 353z" />
+ <glyph glyph-name="uniF1D1" unicode="&#xf1d1;" horiz-adv-x="1792"
+d="M874 -102v-66q-208 6 -385 109.5t-283 275.5l58 34q29 -49 73 -99l65 57q148 -168 368 -212l-17 -86q65 -12 121 -13zM276 428l-83 -28q22 -60 49 -112l-57 -33q-98 180 -98 385t98 385l57 -33q-30 -56 -49 -112l82 -28q-35 -100 -35 -212q0 -109 36 -212zM1528 251
+l58 -34q-106 -172 -283 -275.5t-385 -109.5v66q56 1 121 13l-17 86q220 44 368 212l65 -57q44 50 73 99zM1377 805l-233 -80q14 -42 14 -85t-14 -85l232 -80q-31 -92 -98 -169l-185 162q-57 -67 -147 -85l48 -241q-52 -10 -98 -10t-98 10l48 241q-90 18 -147 85l-185 -162
+q-67 77 -98 169l232 80q-14 42 -14 85t14 85l-233 80q33 93 99 169l185 -162q59 68 147 86l-48 240q44 10 98 10t98 -10l-48 -240q88 -18 147 -86l185 162q66 -76 99 -169zM874 1448v-66q-65 -2 -121 -13l17 -86q-220 -42 -368 -211l-65 56q-38 -42 -73 -98l-57 33
+q106 172 282 275.5t385 109.5zM1705 640q0 -205 -98 -385l-57 33q27 52 49 112l-83 28q36 103 36 212q0 112 -35 212l82 28q-19 56 -49 112l57 33q98 -180 98 -385zM1585 1063l-57 -33q-35 56 -73 98l-65 -56q-148 169 -368 211l17 86q-56 11 -121 13v66q209 -6 385 -109.5
+t282 -275.5zM1748 640q0 173 -67.5 331t-181.5 272t-272 181.5t-331 67.5t-331 -67.5t-272 -181.5t-181.5 -272t-67.5 -331t67.5 -331t181.5 -272t272 -181.5t331 -67.5t331 67.5t272 181.5t181.5 272t67.5 331zM1792 640q0 -182 -71 -348t-191 -286t-286 -191t-348 -71
+t-348 71t-286 191t-191 286t-71 348t71 348t191 286t286 191t348 71t348 -71t286 -191t191 -286t71 -348z" />
+ <glyph glyph-name="uniF1D2" unicode="&#xf1d2;"
+d="M582 228q0 -66 -93 -66q-107 0 -107 63q0 64 98 64q102 0 102 -61zM546 694q0 -85 -74 -85q-77 0 -77 84q0 90 77 90q36 0 55 -25.5t19 -63.5zM712 769v125q-78 -29 -135 -29q-50 29 -110 29q-86 0 -145 -57t-59 -143q0 -50 29.5 -102t73.5 -67v-3q-38 -17 -38 -85
+q0 -53 41 -77v-3q-113 -37 -113 -139q0 -45 20 -78.5t54 -51t72 -25.5t81 -8q224 0 224 188q0 67 -48 99t-126 46q-27 5 -51.5 20.5t-24.5 39.5q0 44 49 52q77 15 122 70t45 134q0 24 -10 52q37 9 49 13zM771 350h137q-2 27 -2 82v387q0 46 2 69h-137q3 -23 3 -71v-392
+q0 -50 -3 -75zM1280 366v121q-30 -21 -68 -21q-53 0 -53 82v225h52q9 0 26.5 -1t26.5 -1v117h-105q0 82 3 102h-140q4 -24 4 -55v-47h-60v-117q36 3 37 3q3 0 11 -0.5t12 -0.5v-2h-2v-217q0 -37 2.5 -64t11.5 -56.5t24.5 -48.5t43.5 -31t66 -12q64 0 108 24zM924 1072
+q0 36 -24 63.5t-60 27.5t-60.5 -27t-24.5 -64q0 -36 25 -62.5t60 -26.5t59.5 27t24.5 62zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="_438" unicode="&#xf1d3;" horiz-adv-x="1792"
+d="M595 22q0 100 -165 100q-158 0 -158 -104q0 -101 172 -101q151 0 151 105zM536 777q0 61 -30 102t-89 41q-124 0 -124 -145q0 -135 124 -135q119 0 119 137zM805 1101v-202q-36 -12 -79 -22q16 -43 16 -84q0 -127 -73 -216.5t-197 -112.5q-40 -8 -59.5 -27t-19.5 -58
+q0 -31 22.5 -51.5t58 -32t78.5 -22t86 -25.5t78.5 -37.5t58 -64t22.5 -98.5q0 -304 -363 -304q-69 0 -130 12.5t-116 41t-87.5 82t-32.5 127.5q0 165 182 225v4q-67 41 -67 126q0 109 63 137v4q-72 24 -119.5 108.5t-47.5 165.5q0 139 95 231.5t235 92.5q96 0 178 -47
+q98 0 218 47zM1123 220h-222q4 45 4 134v609q0 94 -4 128h222q-4 -33 -4 -124v-613q0 -89 4 -134zM1724 442v-196q-71 -39 -174 -39q-62 0 -107 20t-70 50t-39.5 78t-18.5 92t-4 103v351h2v4q-7 0 -19 1t-18 1q-21 0 -59 -6v190h96v76q0 54 -6 89h227q-6 -41 -6 -165h171
+v-190q-15 0 -43.5 2t-42.5 2h-85v-365q0 -131 87 -131q61 0 109 33zM1148 1389q0 -58 -39 -101.5t-96 -43.5q-58 0 -98 43.5t-40 101.5q0 59 39.5 103t98.5 44q58 0 96.5 -44.5t38.5 -102.5z" />
+ <glyph glyph-name="_439" unicode="&#xf1d4;"
+d="M809 532l266 499h-112l-157 -312q-24 -48 -44 -92l-42 92l-155 312h-120l263 -493v-324h101v318zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="uniF1D5" unicode="&#xf1d5;" horiz-adv-x="1280"
+d="M842 964q0 -80 -57 -136.5t-136 -56.5q-60 0 -111 35q-62 -67 -115 -146q-247 -371 -202 -859q1 -22 -12.5 -38.5t-34.5 -18.5h-5q-20 0 -35 13.5t-17 33.5q-14 126 -3.5 247.5t29.5 217t54 186t69 155.5t74 125q61 90 132 165q-16 35 -16 77q0 80 56.5 136.5t136.5 56.5
+t136.5 -56.5t56.5 -136.5zM1223 953q0 -158 -78 -292t-212.5 -212t-292.5 -78q-64 0 -131 14q-21 5 -32.5 23.5t-6.5 39.5q5 20 23 31.5t39 7.5q51 -13 108 -13q97 0 186 38t153 102t102 153t38 186t-38 186t-102 153t-153 102t-186 38t-186 -38t-153 -102t-102 -153
+t-38 -186q0 -114 52 -218q10 -20 3.5 -40t-25.5 -30t-39.5 -3t-30.5 26q-64 123 -64 265q0 119 46.5 227t124.5 186t186 124t226 46q158 0 292.5 -78t212.5 -212.5t78 -292.5z" />
+ <glyph glyph-name="uniF1D6" unicode="&#xf1d6;" horiz-adv-x="1792"
+d="M270 730q-8 19 -8 52q0 20 11 49t24 45q-1 22 7.5 53t22.5 43q0 139 92.5 288.5t217.5 209.5q139 66 324 66q133 0 266 -55q49 -21 90 -48t71 -56t55 -68t42 -74t32.5 -84.5t25.5 -89.5t22 -98l1 -5q55 -83 55 -150q0 -14 -9 -40t-9 -38q0 -1 1.5 -3.5t3.5 -5t2 -3.5
+q77 -114 120.5 -214.5t43.5 -208.5q0 -43 -19.5 -100t-55.5 -57q-9 0 -19.5 7.5t-19 17.5t-19 26t-16 26.5t-13.5 26t-9 17.5q-1 1 -3 1l-5 -4q-59 -154 -132 -223q20 -20 61.5 -38.5t69 -41.5t35.5 -65q-2 -4 -4 -16t-7 -18q-64 -97 -302 -97q-53 0 -110.5 9t-98 20
+t-104.5 30q-15 5 -23 7q-14 4 -46 4.5t-40 1.5q-41 -45 -127.5 -65t-168.5 -20q-35 0 -69 1.5t-93 9t-101 20.5t-74.5 40t-32.5 64q0 40 10 59.5t41 48.5q11 2 40.5 13t49.5 12q4 0 14 2q2 2 2 4l-2 3q-48 11 -108 105.5t-73 156.5l-5 3q-4 0 -12 -20q-18 -41 -54.5 -74.5
+t-77.5 -37.5h-1q-4 0 -6 4.5t-5 5.5q-23 54 -23 100q0 275 252 466z" />
+ <glyph glyph-name="uniF1D7" unicode="&#xf1d7;" horiz-adv-x="2048"
+d="M580 1075q0 41 -25 66t-66 25q-43 0 -76 -25.5t-33 -65.5q0 -39 33 -64.5t76 -25.5q41 0 66 24.5t25 65.5zM1323 568q0 28 -25.5 50t-65.5 22q-27 0 -49.5 -22.5t-22.5 -49.5q0 -28 22.5 -50.5t49.5 -22.5q40 0 65.5 22t25.5 51zM1087 1075q0 41 -24.5 66t-65.5 25
+q-43 0 -76 -25.5t-33 -65.5q0 -39 33 -64.5t76 -25.5q41 0 65.5 24.5t24.5 65.5zM1722 568q0 28 -26 50t-65 22q-27 0 -49.5 -22.5t-22.5 -49.5q0 -28 22.5 -50.5t49.5 -22.5q39 0 65 22t26 51zM1456 965q-31 4 -70 4q-169 0 -311 -77t-223.5 -208.5t-81.5 -287.5
+q0 -78 23 -152q-35 -3 -68 -3q-26 0 -50 1.5t-55 6.5t-44.5 7t-54.5 10.5t-50 10.5l-253 -127l72 218q-290 203 -290 490q0 169 97.5 311t264 223.5t363.5 81.5q176 0 332.5 -66t262 -182.5t136.5 -260.5zM2048 404q0 -117 -68.5 -223.5t-185.5 -193.5l55 -181l-199 109
+q-150 -37 -218 -37q-169 0 -311 70.5t-223.5 191.5t-81.5 264t81.5 264t223.5 191.5t311 70.5q161 0 303 -70.5t227.5 -192t85.5 -263.5z" />
+ <glyph glyph-name="_443" unicode="&#xf1d8;" horiz-adv-x="1792"
+d="M1764 1525q33 -24 27 -64l-256 -1536q-5 -29 -32 -45q-14 -8 -31 -8q-11 0 -24 5l-453 185l-242 -295q-18 -23 -49 -23q-13 0 -22 4q-19 7 -30.5 23.5t-11.5 36.5v349l864 1059l-1069 -925l-395 162q-37 14 -40 55q-2 40 32 59l1664 960q15 9 32 9q20 0 36 -11z" />
+ <glyph glyph-name="_444" unicode="&#xf1d9;" horiz-adv-x="1792"
+d="M1764 1525q33 -24 27 -64l-256 -1536q-5 -29 -32 -45q-14 -8 -31 -8q-11 0 -24 5l-527 215l-298 -327q-18 -21 -47 -21q-14 0 -23 4q-19 7 -30 23.5t-11 36.5v452l-472 193q-37 14 -40 55q-3 39 32 59l1664 960q35 21 68 -2zM1422 26l221 1323l-1434 -827l336 -137
+l863 639l-478 -797z" />
+ <glyph glyph-name="_445" unicode="&#xf1da;"
+d="M1536 640q0 -156 -61 -298t-164 -245t-245 -164t-298 -61q-172 0 -327 72.5t-264 204.5q-7 10 -6.5 22.5t8.5 20.5l137 138q10 9 25 9q16 -2 23 -12q73 -95 179 -147t225 -52q104 0 198.5 40.5t163.5 109.5t109.5 163.5t40.5 198.5t-40.5 198.5t-109.5 163.5
+t-163.5 109.5t-198.5 40.5q-98 0 -188 -35.5t-160 -101.5l137 -138q31 -30 14 -69q-17 -40 -59 -40h-448q-26 0 -45 19t-19 45v448q0 42 40 59q39 17 69 -14l130 -129q107 101 244.5 156.5t284.5 55.5q156 0 298 -61t245 -164t164 -245t61 -298zM896 928v-448q0 -14 -9 -23
+t-23 -9h-320q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h224v352q0 14 9 23t23 9h64q14 0 23 -9t9 -23z" />
+ <glyph glyph-name="_446" unicode="&#xf1db;"
+d="M768 1280q-130 0 -248.5 -51t-204 -136.5t-136.5 -204t-51 -248.5t51 -248.5t136.5 -204t204 -136.5t248.5 -51t248.5 51t204 136.5t136.5 204t51 248.5t-51 248.5t-136.5 204t-204 136.5t-248.5 51zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103
+t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="_447" unicode="&#xf1dc;" horiz-adv-x="1792"
+d="M1682 -128q-44 0 -132.5 3.5t-133.5 3.5q-44 0 -132 -3.5t-132 -3.5q-24 0 -37 20.5t-13 45.5q0 31 17 46t39 17t51 7t45 15q33 21 33 140l-1 391q0 21 -1 31q-13 4 -50 4h-675q-38 0 -51 -4q-1 -10 -1 -31l-1 -371q0 -142 37 -164q16 -10 48 -13t57 -3.5t45 -15
+t20 -45.5q0 -26 -12.5 -48t-36.5 -22q-47 0 -139.5 3.5t-138.5 3.5q-43 0 -128 -3.5t-127 -3.5q-23 0 -35.5 21t-12.5 45q0 30 15.5 45t36 17.5t47.5 7.5t42 15q33 23 33 143l-1 57v813q0 3 0.5 26t0 36.5t-1.5 38.5t-3.5 42t-6.5 36.5t-11 31.5t-16 18q-15 10 -45 12t-53 2
+t-41 14t-18 45q0 26 12 48t36 22q46 0 138.5 -3.5t138.5 -3.5q42 0 126.5 3.5t126.5 3.5q25 0 37.5 -22t12.5 -48q0 -30 -17 -43.5t-38.5 -14.5t-49.5 -4t-43 -13q-35 -21 -35 -160l1 -320q0 -21 1 -32q13 -3 39 -3h699q25 0 38 3q1 11 1 32l1 320q0 139 -35 160
+q-18 11 -58.5 12.5t-66 13t-25.5 49.5q0 26 12.5 48t37.5 22q44 0 132 -3.5t132 -3.5q43 0 129 3.5t129 3.5q25 0 37.5 -22t12.5 -48q0 -30 -17.5 -44t-40 -14.5t-51.5 -3t-44 -12.5q-35 -23 -35 -161l1 -943q0 -119 34 -140q16 -10 46 -13.5t53.5 -4.5t41.5 -15.5t18 -44.5
+q0 -26 -12 -48t-36 -22z" />
+ <glyph glyph-name="_448" unicode="&#xf1dd;" horiz-adv-x="1280"
+d="M1278 1347v-73q0 -29 -18.5 -61t-42.5 -32q-50 0 -54 -1q-26 -6 -32 -31q-3 -11 -3 -64v-1152q0 -25 -18 -43t-43 -18h-108q-25 0 -43 18t-18 43v1218h-143v-1218q0 -25 -17.5 -43t-43.5 -18h-108q-26 0 -43.5 18t-17.5 43v496q-147 12 -245 59q-126 58 -192 179
+q-64 117 -64 259q0 166 88 286q88 118 209 159q111 37 417 37h479q25 0 43 -18t18 -43z" />
+ <glyph glyph-name="_449" unicode="&#xf1de;"
+d="M352 128v-128h-352v128h352zM704 256q26 0 45 -19t19 -45v-256q0 -26 -19 -45t-45 -19h-256q-26 0 -45 19t-19 45v256q0 26 19 45t45 19h256zM864 640v-128h-864v128h864zM224 1152v-128h-224v128h224zM1536 128v-128h-736v128h736zM576 1280q26 0 45 -19t19 -45v-256
+q0 -26 -19 -45t-45 -19h-256q-26 0 -45 19t-19 45v256q0 26 19 45t45 19h256zM1216 768q26 0 45 -19t19 -45v-256q0 -26 -19 -45t-45 -19h-256q-26 0 -45 19t-19 45v256q0 26 19 45t45 19h256zM1536 640v-128h-224v128h224zM1536 1152v-128h-864v128h864z" />
+ <glyph glyph-name="uniF1E0" unicode="&#xf1e0;"
+d="M1216 512q133 0 226.5 -93.5t93.5 -226.5t-93.5 -226.5t-226.5 -93.5t-226.5 93.5t-93.5 226.5q0 12 2 34l-360 180q-92 -86 -218 -86q-133 0 -226.5 93.5t-93.5 226.5t93.5 226.5t226.5 93.5q126 0 218 -86l360 180q-2 22 -2 34q0 133 93.5 226.5t226.5 93.5
+t226.5 -93.5t93.5 -226.5t-93.5 -226.5t-226.5 -93.5q-126 0 -218 86l-360 -180q2 -22 2 -34t-2 -34l360 -180q92 86 218 86z" />
+ <glyph glyph-name="_451" unicode="&#xf1e1;"
+d="M1280 341q0 88 -62.5 151t-150.5 63q-84 0 -145 -58l-241 120q2 16 2 23t-2 23l241 120q61 -58 145 -58q88 0 150.5 63t62.5 151t-62.5 150.5t-150.5 62.5t-151 -62.5t-63 -150.5q0 -7 2 -23l-241 -120q-62 57 -145 57q-88 0 -150.5 -62.5t-62.5 -150.5t62.5 -150.5
+t150.5 -62.5q83 0 145 57l241 -120q-2 -16 -2 -23q0 -88 63 -150.5t151 -62.5t150.5 62.5t62.5 150.5zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="_452" unicode="&#xf1e2;" horiz-adv-x="1792"
+d="M571 947q-10 25 -34 35t-49 0q-108 -44 -191 -127t-127 -191q-10 -25 0 -49t35 -34q13 -5 24 -5q42 0 60 40q34 84 98.5 148.5t148.5 98.5q25 11 35 35t0 49zM1513 1303l46 -46l-244 -243l68 -68q19 -19 19 -45.5t-19 -45.5l-64 -64q89 -161 89 -343q0 -143 -55.5 -273.5
+t-150 -225t-225 -150t-273.5 -55.5t-273.5 55.5t-225 150t-150 225t-55.5 273.5t55.5 273.5t150 225t225 150t273.5 55.5q182 0 343 -89l64 64q19 19 45.5 19t45.5 -19l68 -68zM1521 1359q-10 -10 -22 -10q-13 0 -23 10l-91 90q-9 10 -9 23t9 23q10 9 23 9t23 -9l90 -91
+q10 -9 10 -22.5t-10 -22.5zM1751 1129q-11 -9 -23 -9t-23 9l-90 91q-10 9 -10 22.5t10 22.5q9 10 22.5 10t22.5 -10l91 -90q9 -10 9 -23t-9 -23zM1792 1312q0 -14 -9 -23t-23 -9h-96q-14 0 -23 9t-9 23t9 23t23 9h96q14 0 23 -9t9 -23zM1600 1504v-96q0 -14 -9 -23t-23 -9
+t-23 9t-9 23v96q0 14 9 23t23 9t23 -9t9 -23zM1751 1449l-91 -90q-10 -10 -22 -10q-13 0 -23 10q-10 9 -10 22.5t10 22.5l90 91q10 9 23 9t23 -9q9 -10 9 -23t-9 -23z" />
+ <glyph glyph-name="_453" unicode="&#xf1e3;" horiz-adv-x="1792"
+d="M609 720l287 208l287 -208l-109 -336h-355zM896 1536q182 0 348 -71t286 -191t191 -286t71 -348t-71 -348t-191 -286t-286 -191t-348 -71t-348 71t-286 191t-191 286t-71 348t71 348t191 286t286 191t348 71zM1515 186q149 203 149 454v3l-102 -89l-240 224l63 323
+l134 -12q-150 206 -389 282l53 -124l-287 -159l-287 159l53 124q-239 -76 -389 -282l135 12l62 -323l-240 -224l-102 89v-3q0 -251 149 -454l30 132l326 -40l139 -298l-116 -69q117 -39 240 -39t240 39l-116 69l139 298l326 40z" />
+ <glyph glyph-name="_454" unicode="&#xf1e4;" horiz-adv-x="1792"
+d="M448 224v-192q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h192q14 0 23 -9t9 -23zM256 608v-192q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h192q14 0 23 -9t9 -23zM832 224v-192q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23
+v192q0 14 9 23t23 9h192q14 0 23 -9t9 -23zM640 608v-192q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h192q14 0 23 -9t9 -23zM66 768q-28 0 -47 19t-19 46v129h514v-129q0 -27 -19 -46t-46 -19h-383zM1216 224v-192q0 -14 -9 -23t-23 -9h-192
+q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h192q14 0 23 -9t9 -23zM1024 608v-192q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h192q14 0 23 -9t9 -23zM1600 224v-192q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h192q14 0 23 -9t9 -23
+zM1408 608v-192q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h192q14 0 23 -9t9 -23zM1792 1016v-13h-514v10q0 104 -382 102q-382 -1 -382 -102v-10h-514v13q0 17 8.5 43t34 64t65.5 75.5t110.5 76t160 67.5t224 47.5t293.5 18.5t293 -18.5t224 -47.5
+t160.5 -67.5t110.5 -76t65.5 -75.5t34 -64t8.5 -43zM1792 608v-192q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v192q0 14 9 23t23 9h192q14 0 23 -9t9 -23zM1792 962v-129q0 -27 -19 -46t-46 -19h-384q-27 0 -46 19t-19 46v129h514z" />
+ <glyph glyph-name="_455" unicode="&#xf1e5;" horiz-adv-x="1792"
+d="M704 1216v-768q0 -26 -19 -45t-45 -19v-576q0 -26 -19 -45t-45 -19h-512q-26 0 -45 19t-19 45v512l249 873q7 23 31 23h424zM1024 1216v-704h-256v704h256zM1792 320v-512q0 -26 -19 -45t-45 -19h-512q-26 0 -45 19t-19 45v576q-26 0 -45 19t-19 45v768h424q24 0 31 -23z
+M736 1504v-224h-352v224q0 14 9 23t23 9h288q14 0 23 -9t9 -23zM1408 1504v-224h-352v224q0 14 9 23t23 9h288q14 0 23 -9t9 -23z" />
+ <glyph glyph-name="_456" unicode="&#xf1e6;" horiz-adv-x="1792"
+d="M1755 1083q37 -38 37 -90.5t-37 -90.5l-401 -400l150 -150l-160 -160q-163 -163 -389.5 -186.5t-411.5 100.5l-362 -362h-181v181l362 362q-124 185 -100.5 411.5t186.5 389.5l160 160l150 -150l400 401q38 37 91 37t90 -37t37 -90.5t-37 -90.5l-400 -401l234 -234
+l401 400q38 37 91 37t90 -37z" />
+ <glyph glyph-name="_457" unicode="&#xf1e7;" horiz-adv-x="1792"
+d="M873 796q0 -83 -63.5 -142.5t-152.5 -59.5t-152.5 59.5t-63.5 142.5q0 84 63.5 143t152.5 59t152.5 -59t63.5 -143zM1375 796q0 -83 -63 -142.5t-153 -59.5q-89 0 -152.5 59.5t-63.5 142.5q0 84 63.5 143t152.5 59q90 0 153 -59t63 -143zM1600 616v667q0 87 -32 123.5
+t-111 36.5h-1112q-83 0 -112.5 -34t-29.5 -126v-673q43 -23 88.5 -40t81 -28t81 -18.5t71 -11t70 -4t58.5 -0.5t56.5 2t44.5 2q68 1 95 -27q6 -6 10 -9q26 -25 61 -51q7 91 118 87q5 0 36.5 -1.5t43 -2t45.5 -1t53 1t54.5 4.5t61 8.5t62 13.5t67 19.5t67.5 27t72 34.5z
+M1763 621q-121 -149 -372 -252q84 -285 -23 -465q-66 -113 -183 -148q-104 -32 -182 15q-86 51 -82 164l-1 326v1q-8 2 -24.5 6t-23.5 5l-1 -338q4 -114 -83 -164q-79 -47 -183 -15q-117 36 -182 150q-105 180 -22 463q-251 103 -372 252q-25 37 -4 63t60 -1q4 -2 11.5 -7
+t10.5 -8v694q0 72 47 123t114 51h1257q67 0 114 -51t47 -123v-694l21 15q39 27 60 1t-4 -63z" />
+ <glyph glyph-name="_458" unicode="&#xf1e8;" horiz-adv-x="1792"
+d="M896 1102v-434h-145v434h145zM1294 1102v-434h-145v434h145zM1294 342l253 254v795h-1194v-1049h326v-217l217 217h398zM1692 1536v-1013l-434 -434h-326l-217 -217h-217v217h-398v1158l109 289h1483z" />
+ <glyph glyph-name="_459" unicode="&#xf1e9;"
+d="M773 217v-127q-1 -292 -6 -305q-12 -32 -51 -40q-54 -9 -181.5 38t-162.5 89q-13 15 -17 36q-1 12 4 26q4 10 34 47t181 216q1 0 60 70q15 19 39.5 24.5t49.5 -3.5q24 -10 37.5 -29t12.5 -42zM624 468q-3 -55 -52 -70l-120 -39q-275 -88 -292 -88q-35 2 -54 36
+q-12 25 -17 75q-8 76 1 166.5t30 124.5t56 32q13 0 202 -77q71 -29 115 -47l84 -34q23 -9 35.5 -30.5t11.5 -48.5zM1450 171q-7 -54 -91.5 -161t-135.5 -127q-37 -14 -63 7q-14 10 -184 287l-47 77q-14 21 -11.5 46t19.5 46q35 43 83 26q1 -1 119 -40q203 -66 242 -79.5
+t47 -20.5q28 -22 22 -61zM778 803q5 -102 -54 -122q-58 -17 -114 71l-378 598q-8 35 19 62q41 43 207.5 89.5t224.5 31.5q40 -10 49 -45q3 -18 22 -305.5t24 -379.5zM1440 695q3 -39 -26 -59q-15 -10 -329 -86q-67 -15 -91 -23l1 2q-23 -6 -46 4t-37 32q-30 47 0 87
+q1 1 75 102q125 171 150 204t34 39q28 19 65 2q48 -23 123 -133.5t81 -167.5v-3z" />
+ <glyph glyph-name="_460" unicode="&#xf1ea;" horiz-adv-x="2048"
+d="M1024 1024h-384v-384h384v384zM1152 384v-128h-640v128h640zM1152 1152v-640h-640v640h640zM1792 384v-128h-512v128h512zM1792 640v-128h-512v128h512zM1792 896v-128h-512v128h512zM1792 1152v-128h-512v128h512zM256 192v960h-128v-960q0 -26 19 -45t45 -19t45 19
+t19 45zM1920 192v1088h-1536v-1088q0 -33 -11 -64h1483q26 0 45 19t19 45zM2048 1408v-1216q0 -80 -56 -136t-136 -56h-1664q-80 0 -136 56t-56 136v1088h256v128h1792z" />
+ <glyph glyph-name="_461" unicode="&#xf1eb;" horiz-adv-x="2048"
+d="M1024 13q-20 0 -93 73.5t-73 93.5q0 32 62.5 54t103.5 22t103.5 -22t62.5 -54q0 -20 -73 -93.5t-93 -73.5zM1294 284q-2 0 -40 25t-101.5 50t-128.5 25t-128.5 -25t-101 -50t-40.5 -25q-18 0 -93.5 75t-75.5 93q0 13 10 23q78 77 196 121t233 44t233 -44t196 -121
+q10 -10 10 -23q0 -18 -75.5 -93t-93.5 -75zM1567 556q-11 0 -23 8q-136 105 -252 154.5t-268 49.5q-85 0 -170.5 -22t-149 -53t-113.5 -62t-79 -53t-31 -22q-17 0 -92 75t-75 93q0 12 10 22q132 132 320 205t380 73t380 -73t320 -205q10 -10 10 -22q0 -18 -75 -93t-92 -75z
+M1838 827q-11 0 -22 9q-179 157 -371.5 236.5t-420.5 79.5t-420.5 -79.5t-371.5 -236.5q-11 -9 -22 -9q-17 0 -92.5 75t-75.5 93q0 13 10 23q187 186 445 288t527 102t527 -102t445 -288q10 -10 10 -23q0 -18 -75.5 -93t-92.5 -75z" />
+ <glyph glyph-name="_462" unicode="&#xf1ec;" horiz-adv-x="1792"
+d="M384 0q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM768 0q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM384 384q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5
+t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM1152 0q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM768 384q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5
+t37.5 90.5zM384 768q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM1152 384q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM768 768q0 53 -37.5 90.5t-90.5 37.5
+t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM1536 0v384q0 52 -38 90t-90 38t-90 -38t-38 -90v-384q0 -52 38 -90t90 -38t90 38t38 90zM1152 768q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5z
+M1536 1088v256q0 26 -19 45t-45 19h-1280q-26 0 -45 -19t-19 -45v-256q0 -26 19 -45t45 -19h1280q26 0 45 19t19 45zM1536 768q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM1664 1408v-1536q0 -52 -38 -90t-90 -38
+h-1408q-52 0 -90 38t-38 90v1536q0 52 38 90t90 38h1408q52 0 90 -38t38 -90z" />
+ <glyph glyph-name="_463" unicode="&#xf1ed;"
+d="M1519 890q18 -84 -4 -204q-87 -444 -565 -444h-44q-25 0 -44 -16.5t-24 -42.5l-4 -19l-55 -346l-2 -15q-5 -26 -24.5 -42.5t-44.5 -16.5h-251q-21 0 -33 15t-9 36q9 56 26.5 168t26.5 168t27 167.5t27 167.5q5 37 43 37h131q133 -2 236 21q175 39 287 144q102 95 155 246
+q24 70 35 133q1 6 2.5 7.5t3.5 1t6 -3.5q79 -59 98 -162zM1347 1172q0 -107 -46 -236q-80 -233 -302 -315q-113 -40 -252 -42q0 -1 -90 -1l-90 1q-100 0 -118 -96q-2 -8 -85 -530q-1 -10 -12 -10h-295q-22 0 -36.5 16.5t-11.5 38.5l232 1471q5 29 27.5 48t51.5 19h598
+q34 0 97.5 -13t111.5 -32q107 -41 163.5 -123t56.5 -196z" />
+ <glyph glyph-name="_464" unicode="&#xf1ee;" horiz-adv-x="1792"
+d="M441 864q33 0 52 -26q266 -364 362 -774h-446q-127 441 -367 749q-12 16 -3 33.5t29 17.5h373zM1000 507q-49 -199 -125 -393q-79 310 -256 594q40 221 44 449q211 -340 337 -650zM1099 1216q235 -324 384.5 -698.5t184.5 -773.5h-451q-41 665 -553 1472h435zM1792 640
+q0 -424 -101 -812q-67 560 -359 1083q-25 301 -106 584q-4 16 5.5 28.5t25.5 12.5h359q21 0 38.5 -13t22.5 -33q115 -409 115 -850z" />
+ <glyph glyph-name="uniF1F0" unicode="&#xf1f0;" horiz-adv-x="2304"
+d="M1975 546h-138q14 37 66 179l3 9q4 10 10 26t9 26l12 -55zM531 611l-58 295q-11 54 -75 54h-268l-2 -13q311 -79 403 -336zM710 960l-162 -438l-17 89q-26 70 -85 129.5t-131 88.5l135 -510h175l261 641h-176zM849 318h166l104 642h-166zM1617 944q-69 27 -149 27
+q-123 0 -201 -59t-79 -153q-1 -102 145 -174q48 -23 67 -41t19 -39q0 -30 -30 -46t-69 -16q-86 0 -156 33l-22 11l-23 -144q74 -34 185 -34q130 -1 208.5 59t80.5 160q0 106 -140 174q-49 25 -71 42t-22 38q0 22 24.5 38.5t70.5 16.5q70 1 124 -24l15 -8zM2042 960h-128
+q-65 0 -87 -54l-246 -588h174l35 96h212q5 -22 20 -96h154zM2304 1280v-1280q0 -52 -38 -90t-90 -38h-2048q-52 0 -90 38t-38 90v1280q0 52 38 90t90 38h2048q52 0 90 -38t38 -90z" />
+ <glyph glyph-name="_466" unicode="&#xf1f1;" horiz-adv-x="2304"
+d="M1119 1195q-128 85 -281 85q-103 0 -197.5 -40.5t-162.5 -108.5t-108.5 -162t-40.5 -197q0 -104 40.5 -198t108.5 -162t162 -108.5t198 -40.5q153 0 281 85q-131 107 -178 265.5t0.5 316.5t177.5 265zM1152 1171q-126 -99 -172 -249.5t-0.5 -300.5t172.5 -249
+q127 99 172.5 249t-0.5 300.5t-172 249.5zM1185 1195q130 -107 177.5 -265.5t0.5 -317t-178 -264.5q128 -85 281 -85q104 0 198 40.5t162 108.5t108.5 162t40.5 198q0 103 -40.5 197t-108.5 162t-162.5 108.5t-197.5 40.5q-153 0 -281 -85zM1926 473h7v3h-17v-3h7v-17h3v17z
+M1955 456h4v20h-5l-6 -13l-6 13h-5v-20h3v15l6 -13h4l5 13v-15zM1947 16v-2h-2h-3v3h3h2v-1zM1947 7h3l-4 5h2l1 1q1 1 1 3t-1 3l-1 1h-3h-6v-13h3v5h1zM685 75q0 19 11 31t30 12q18 0 29 -12.5t11 -30.5q0 -19 -11 -31t-29 -12q-19 0 -30 12t-11 31zM1158 119q30 0 35 -32
+h-70q5 32 35 32zM1514 75q0 19 11 31t29 12t29.5 -12.5t11.5 -30.5q0 -19 -11 -31t-30 -12q-18 0 -29 12t-11 31zM1786 75q0 18 11.5 30.5t29.5 12.5t29.5 -12.5t11.5 -30.5q0 -19 -11.5 -31t-29.5 -12t-29.5 12.5t-11.5 30.5zM1944 3q-2 0 -4 1q-1 0 -3 2t-2 3q-1 2 -1 4
+q0 3 1 4q0 2 2 4l1 1q2 0 2 1q2 1 4 1q3 0 4 -1l4 -2l2 -4v-1q1 -2 1 -3l-1 -1v-3t-1 -1l-1 -2q-2 -2 -4 -2q-1 -1 -4 -1zM599 7h30v85q0 24 -14.5 38.5t-39.5 15.5q-32 0 -47 -24q-14 24 -45 24q-24 0 -39 -20v16h-30v-135h30v75q0 36 33 36q30 0 30 -36v-75h29v75
+q0 36 33 36q30 0 30 -36v-75zM765 7h29v68v67h-29v-16q-17 20 -43 20q-29 0 -48 -20t-19 -51t19 -51t48 -20q28 0 43 20v-17zM943 48q0 34 -47 40l-14 2q-23 4 -23 14q0 15 25 15q23 0 43 -11l12 24q-22 14 -55 14q-26 0 -41 -12t-15 -32q0 -33 47 -39l13 -2q24 -4 24 -14
+q0 -17 -31 -17q-25 0 -45 14l-13 -23q25 -17 58 -17q29 0 45.5 12t16.5 32zM1073 14l-8 25q-13 -7 -26 -7q-19 0 -19 22v61h48v27h-48v41h-30v-41h-28v-27h28v-61q0 -50 47 -50q21 0 36 10zM1159 146q-29 0 -48 -20t-19 -51q0 -32 19.5 -51.5t49.5 -19.5q33 0 55 19l-14 22
+q-18 -15 -39 -15q-34 0 -41 33h101v12q0 32 -18 51.5t-46 19.5zM1318 146q-23 0 -35 -20v16h-30v-135h30v76q0 35 29 35q10 0 18 -4l9 28q-9 4 -21 4zM1348 75q0 -31 19.5 -51t52.5 -20q29 0 48 16l-14 24q-18 -13 -35 -12q-18 0 -29.5 12t-11.5 31t11.5 31t29.5 12
+q19 0 35 -12l14 24q-20 16 -48 16q-33 0 -52.5 -20t-19.5 -51zM1593 7h30v68v67h-30v-16q-15 20 -42 20q-29 0 -48.5 -20t-19.5 -51t19.5 -51t48.5 -20q28 0 42 20v-17zM1726 146q-23 0 -35 -20v16h-29v-135h29v76q0 35 29 35q10 0 18 -4l9 28q-8 4 -21 4zM1866 7h29v68v122
+h-29v-71q-15 20 -43 20t-47.5 -20.5t-19.5 -50.5t19.5 -50.5t47.5 -20.5q29 0 43 20v-17zM1944 27l-2 -1h-3q-2 -1 -4 -3q-3 -1 -3 -4q-1 -2 -1 -6q0 -3 1 -5q0 -2 3 -4q2 -2 4 -3t5 -1q4 0 6 1q0 1 2 2l2 1q1 1 3 4q1 2 1 5q0 4 -1 6q-1 1 -3 4q0 1 -2 2l-2 1q-1 0 -3 0.5
+t-3 0.5zM2304 1280v-1280q0 -52 -38 -90t-90 -38h-2048q-52 0 -90 38t-38 90v1280q0 52 38 90t90 38h2048q52 0 90 -38t38 -90z" />
+ <glyph glyph-name="_467" unicode="&#xf1f2;" horiz-adv-x="2304"
+d="M313 759q0 -51 -36 -84q-29 -26 -89 -26h-17v220h17q61 0 89 -27q36 -31 36 -83zM2089 824q0 -52 -64 -52h-19v101h20q63 0 63 -49zM380 759q0 74 -50 120.5t-129 46.5h-95v-333h95q74 0 119 38q60 51 60 128zM410 593h65v333h-65v-333zM730 694q0 40 -20.5 62t-75.5 42
+q-29 10 -39.5 19t-10.5 23q0 16 13.5 26.5t34.5 10.5q29 0 53 -27l34 44q-41 37 -98 37q-44 0 -74 -27.5t-30 -67.5q0 -35 18 -55.5t64 -36.5q37 -13 45 -19q19 -12 19 -34q0 -20 -14 -33.5t-36 -13.5q-48 0 -71 44l-42 -40q44 -64 115 -64q51 0 83 30.5t32 79.5zM1008 604
+v77q-37 -37 -78 -37q-49 0 -80.5 32.5t-31.5 82.5q0 48 31.5 81.5t77.5 33.5q43 0 81 -38v77q-40 20 -80 20q-74 0 -125.5 -50.5t-51.5 -123.5t51 -123.5t125 -50.5q42 0 81 19zM2240 0v527q-65 -40 -144.5 -84t-237.5 -117t-329.5 -137.5t-417.5 -134.5t-504 -118h1569
+q26 0 45 19t19 45zM1389 757q0 75 -53 128t-128 53t-128 -53t-53 -128t53 -128t128 -53t128 53t53 128zM1541 584l144 342h-71l-90 -224l-89 224h-71l142 -342h35zM1714 593h184v56h-119v90h115v56h-115v74h119v57h-184v-333zM2105 593h80l-105 140q76 16 76 94q0 47 -31 73
+t-87 26h-97v-333h65v133h9zM2304 1274v-1268q0 -56 -38.5 -95t-93.5 -39h-2040q-55 0 -93.5 39t-38.5 95v1268q0 56 38.5 95t93.5 39h2040q55 0 93.5 -39t38.5 -95z" />
+ <glyph glyph-name="f1f3" unicode="&#xf1f3;" horiz-adv-x="2304"
+d="M119 854h89l-45 108zM740 328l74 79l-70 79h-163v-49h142v-55h-142v-54h159zM898 406l99 -110v217zM1186 453q0 33 -40 33h-84v-69h83q41 0 41 36zM1475 457q0 29 -42 29h-82v-61h81q43 0 43 32zM1197 923q0 29 -42 29h-82v-60h81q43 0 43 31zM1656 854h89l-44 108z
+M699 1009v-271h-66v212l-94 -212h-57l-94 212v-212h-132l-25 60h-135l-25 -60h-70l116 271h96l110 -257v257h106l85 -184l77 184h108zM1255 453q0 -20 -5.5 -35t-14 -25t-22.5 -16.5t-26 -10t-31.5 -4.5t-31.5 -1t-32.5 0.5t-29.5 0.5v-91h-126l-80 90l-83 -90h-256v271h260
+l80 -89l82 89h207q109 0 109 -89zM964 794v-56h-217v271h217v-57h-152v-49h148v-55h-148v-54h152zM2304 235v-229q0 -55 -38.5 -94.5t-93.5 -39.5h-2040q-55 0 -93.5 39.5t-38.5 94.5v678h111l25 61h55l25 -61h218v46l19 -46h113l20 47v-47h541v99l10 1q10 0 10 -14v-86h279
+v23q23 -12 55 -18t52.5 -6.5t63 0.5t51.5 1l25 61h56l25 -61h227v58l34 -58h182v378h-180v-44l-25 44h-185v-44l-23 44h-249q-69 0 -109 -22v22h-172v-22q-24 22 -73 22h-628l-43 -97l-43 97h-198v-44l-22 44h-169l-78 -179v391q0 55 38.5 94.5t93.5 39.5h2040
+q55 0 93.5 -39.5t38.5 -94.5v-678h-120q-51 0 -81 -22v22h-177q-55 0 -78 -22v22h-316v-22q-31 22 -87 22h-209v-22q-23 22 -91 22h-234l-54 -58l-50 58h-349v-378h343l55 59l52 -59h211v89h21q59 0 90 13v-102h174v99h8q8 0 10 -2t2 -10v-87h529q57 0 88 24v-24h168
+q60 0 95 17zM1546 469q0 -23 -12 -43t-34 -29q25 -9 34 -26t9 -46v-54h-65v45q0 33 -12 43.5t-46 10.5h-69v-99h-65v271h154q48 0 77 -15t29 -58zM1269 936q0 -24 -12.5 -44t-33.5 -29q26 -9 34.5 -25.5t8.5 -46.5v-53h-65q0 9 0.5 26.5t0 25t-3 18.5t-8.5 16t-17.5 8.5
+t-29.5 3.5h-70v-98h-64v271l153 -1q49 0 78 -14.5t29 -57.5zM1798 327v-56h-216v271h216v-56h-151v-49h148v-55h-148v-54zM1372 1009v-271h-66v271h66zM2065 357q0 -86 -102 -86h-126v58h126q34 0 34 25q0 16 -17 21t-41.5 5t-49.5 3.5t-42 22.5t-17 55q0 39 26 60t66 21
+h130v-57h-119q-36 0 -36 -25q0 -16 17.5 -20.5t42 -4t49 -2.5t42 -21.5t17.5 -54.5zM2304 407v-101q-24 -35 -88 -35h-125v58h125q33 0 33 25q0 13 -12.5 19t-31 5.5t-40 2t-40 8t-31 24t-12.5 48.5q0 39 26.5 60t66.5 21h129v-57h-118q-36 0 -36 -25q0 -20 29 -22t68.5 -5
+t56.5 -26zM2139 1008v-270h-92l-122 203v-203h-132l-26 60h-134l-25 -60h-75q-129 0 -129 133q0 138 133 138h63v-59q-7 0 -28 1t-28.5 0.5t-23 -2t-21.5 -6.5t-14.5 -13.5t-11.5 -23t-3 -33.5q0 -38 13.5 -58t49.5 -20h29l92 213h97l109 -256v256h99l114 -188v188h66z" />
+ <glyph glyph-name="_469" unicode="&#xf1f4;" horiz-adv-x="2304"
+d="M745 630q0 -37 -25.5 -61.5t-62.5 -24.5q-29 0 -46.5 16t-17.5 44q0 37 25 62.5t62 25.5q28 0 46.5 -16.5t18.5 -45.5zM1530 779q0 -42 -22 -57t-66 -15l-32 -1l17 107q2 11 13 11h18q22 0 35 -2t25 -12.5t12 -30.5zM1881 630q0 -36 -25.5 -61t-61.5 -25q-29 0 -47 16
+t-18 44q0 37 25 62.5t62 25.5q28 0 46.5 -16.5t18.5 -45.5zM513 801q0 59 -38.5 85.5t-100.5 26.5h-160q-19 0 -21 -19l-65 -408q-1 -6 3 -11t10 -5h76q20 0 22 19l18 110q1 8 7 13t15 6.5t17 1.5t19 -1t14 -1q86 0 135 48.5t49 134.5zM822 489l41 261q1 6 -3 11t-10 5h-76
+q-14 0 -17 -33q-27 40 -95 40q-72 0 -122.5 -54t-50.5 -127q0 -59 34.5 -94t92.5 -35q28 0 58 12t48 32q-4 -12 -4 -21q0 -16 13 -16h69q19 0 22 19zM1269 752q0 5 -4 9.5t-9 4.5h-77q-11 0 -18 -10l-106 -156l-44 150q-5 16 -22 16h-75q-5 0 -9 -4.5t-4 -9.5q0 -2 19.5 -59
+t42 -123t23.5 -70q-82 -112 -82 -120q0 -13 13 -13h77q11 0 18 10l255 368q2 2 2 7zM1649 801q0 59 -38.5 85.5t-100.5 26.5h-159q-20 0 -22 -19l-65 -408q-1 -6 3 -11t10 -5h82q12 0 16 13l18 116q1 8 7 13t15 6.5t17 1.5t19 -1t14 -1q86 0 135 48.5t49 134.5zM1958 489
+l41 261q1 6 -3 11t-10 5h-76q-14 0 -17 -33q-26 40 -95 40q-72 0 -122.5 -54t-50.5 -127q0 -59 34.5 -94t92.5 -35q29 0 59 12t47 32q0 -1 -2 -9t-2 -12q0 -16 13 -16h69q19 0 22 19zM2176 898v1q0 14 -13 14h-74q-11 0 -13 -11l-65 -416l-1 -2q0 -5 4 -9.5t10 -4.5h66
+q19 0 21 19zM392 764q-5 -35 -26 -46t-60 -11l-33 -1l17 107q2 11 13 11h19q40 0 58 -11.5t12 -48.5zM2304 1280v-1280q0 -52 -38 -90t-90 -38h-2048q-52 0 -90 38t-38 90v1280q0 52 38 90t90 38h2048q52 0 90 -38t38 -90z" />
+ <glyph glyph-name="_470" unicode="&#xf1f5;" horiz-adv-x="2304"
+d="M1597 633q0 -69 -21 -106q-19 -35 -52 -35q-23 0 -41 9v224q29 30 57 30q57 0 57 -122zM2035 669h-110q6 98 56 98q51 0 54 -98zM476 534q0 59 -33 91.5t-101 57.5q-36 13 -52 24t-16 25q0 26 38 26q58 0 124 -33l18 112q-67 32 -149 32q-77 0 -123 -38q-48 -39 -48 -109
+q0 -58 32.5 -90.5t99.5 -56.5q39 -14 54.5 -25.5t15.5 -27.5q0 -31 -48 -31q-29 0 -70 12.5t-72 30.5l-18 -113q72 -41 168 -41q81 0 129 37q51 41 51 117zM771 749l19 111h-96v135l-129 -21l-18 -114l-46 -8l-17 -103h62v-219q0 -84 44 -120q38 -30 111 -30q32 0 79 11v118
+q-32 -7 -44 -7q-42 0 -42 50v197h77zM1087 724v139q-15 3 -28 3q-32 0 -55.5 -16t-33.5 -46l-10 56h-131v-471h150v306q26 31 82 31q16 0 26 -2zM1124 389h150v471h-150v-471zM1746 638q0 122 -45 179q-40 52 -111 52q-64 0 -117 -56l-8 47h-132v-645l150 25v151
+q36 -11 68 -11q83 0 134 56q61 65 61 202zM1278 986q0 33 -23 56t-56 23t-56 -23t-23 -56t23 -56.5t56 -23.5t56 23.5t23 56.5zM2176 629q0 113 -48 176q-50 64 -144 64q-96 0 -151.5 -66t-55.5 -180q0 -128 63 -188q55 -55 161 -55q101 0 160 40l-16 103q-57 -31 -128 -31
+q-43 0 -63 19q-23 19 -28 66h248q2 14 2 52zM2304 1280v-1280q0 -52 -38 -90t-90 -38h-2048q-52 0 -90 38t-38 90v1280q0 52 38 90t90 38h2048q52 0 90 -38t38 -90z" />
+ <glyph glyph-name="_471" unicode="&#xf1f6;" horiz-adv-x="2048"
+d="M1558 684q61 -356 298 -556q0 -52 -38 -90t-90 -38h-448q0 -106 -75 -181t-181 -75t-180.5 74.5t-75.5 180.5zM1024 -176q16 0 16 16t-16 16q-59 0 -101.5 42.5t-42.5 101.5q0 16 -16 16t-16 -16q0 -73 51.5 -124.5t124.5 -51.5zM2026 1424q8 -10 7.5 -23.5t-10.5 -22.5
+l-1872 -1622q-10 -8 -23.5 -7t-21.5 11l-84 96q-8 10 -7.5 23.5t10.5 21.5l186 161q-19 32 -19 66q50 42 91 88t85 119.5t74.5 158.5t50 206t19.5 260q0 152 117 282.5t307 158.5q-8 19 -8 39q0 40 28 68t68 28t68 -28t28 -68q0 -20 -8 -39q124 -18 219 -82.5t148 -157.5
+l418 363q10 8 23.5 7t21.5 -11z" />
+ <glyph glyph-name="_472" unicode="&#xf1f7;" horiz-adv-x="2048"
+d="M1040 -160q0 16 -16 16q-59 0 -101.5 42.5t-42.5 101.5q0 16 -16 16t-16 -16q0 -73 51.5 -124.5t124.5 -51.5q16 0 16 16zM503 315l877 760q-42 88 -132.5 146.5t-223.5 58.5q-93 0 -169.5 -31.5t-121.5 -80.5t-69 -103t-24 -105q0 -384 -137 -645zM1856 128
+q0 -52 -38 -90t-90 -38h-448q0 -106 -75 -181t-181 -75t-180.5 74.5t-75.5 180.5l149 129h757q-166 187 -227 459l111 97q61 -356 298 -556zM1942 1520l84 -96q8 -10 7.5 -23.5t-10.5 -22.5l-1872 -1622q-10 -8 -23.5 -7t-21.5 11l-84 96q-8 10 -7.5 23.5t10.5 21.5l186 161
+q-19 32 -19 66q50 42 91 88t85 119.5t74.5 158.5t50 206t19.5 260q0 152 117 282.5t307 158.5q-8 19 -8 39q0 40 28 68t68 28t68 -28t28 -68q0 -20 -8 -39q124 -18 219 -82.5t148 -157.5l418 363q10 8 23.5 7t21.5 -11z" />
+ <glyph glyph-name="_473" unicode="&#xf1f8;" horiz-adv-x="1408"
+d="M512 160v704q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-704q0 -14 9 -23t23 -9h64q14 0 23 9t9 23zM768 160v704q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-704q0 -14 9 -23t23 -9h64q14 0 23 9t9 23zM1024 160v704q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-704
+q0 -14 9 -23t23 -9h64q14 0 23 9t9 23zM480 1152h448l-48 117q-7 9 -17 11h-317q-10 -2 -17 -11zM1408 1120v-64q0 -14 -9 -23t-23 -9h-96v-948q0 -83 -47 -143.5t-113 -60.5h-832q-66 0 -113 58.5t-47 141.5v952h-96q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h309l70 167
+q15 37 54 63t79 26h320q40 0 79 -26t54 -63l70 -167h309q14 0 23 -9t9 -23z" />
+ <glyph glyph-name="_474" unicode="&#xf1f9;"
+d="M1150 462v-109q0 -50 -36.5 -89t-94 -60.5t-118 -32.5t-117.5 -11q-205 0 -342.5 139t-137.5 346q0 203 136 339t339 136q34 0 75.5 -4.5t93 -18t92.5 -34t69 -56.5t28 -81v-109q0 -16 -16 -16h-118q-16 0 -16 16v70q0 43 -65.5 67.5t-137.5 24.5q-140 0 -228.5 -91.5
+t-88.5 -237.5q0 -151 91.5 -249.5t233.5 -98.5q68 0 138 24t70 66v70q0 7 4.5 11.5t10.5 4.5h119q6 0 11 -4.5t5 -11.5zM768 1280q-130 0 -248.5 -51t-204 -136.5t-136.5 -204t-51 -248.5t51 -248.5t136.5 -204t204 -136.5t248.5 -51t248.5 51t204 136.5t136.5 204t51 248.5
+t-51 248.5t-136.5 204t-204 136.5t-248.5 51zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="_475" unicode="&#xf1fa;"
+d="M972 761q0 108 -53.5 169t-147.5 61q-63 0 -124 -30.5t-110 -84.5t-79.5 -137t-30.5 -180q0 -112 53.5 -173t150.5 -61q96 0 176 66.5t122.5 166t42.5 203.5zM1536 640q0 -111 -37 -197t-98.5 -135t-131.5 -74.5t-145 -27.5q-6 0 -15.5 -0.5t-16.5 -0.5q-95 0 -142 53
+q-28 33 -33 83q-52 -66 -131.5 -110t-173.5 -44q-161 0 -249.5 95.5t-88.5 269.5q0 157 66 290t179 210.5t246 77.5q87 0 155 -35.5t106 -99.5l2 19l11 56q1 6 5.5 12t9.5 6h118q5 0 13 -11q5 -5 3 -16l-120 -614q-5 -24 -5 -48q0 -39 12.5 -52t44.5 -13q28 1 57 5.5t73 24
+t77 50t57 89.5t24 137q0 292 -174 466t-466 174q-130 0 -248.5 -51t-204 -136.5t-136.5 -204t-51 -248.5t51 -248.5t136.5 -204t204 -136.5t248.5 -51q228 0 405 144q11 9 24 8t21 -12l41 -49q8 -12 7 -24q-2 -13 -12 -22q-102 -83 -227.5 -128t-258.5 -45q-156 0 -298 61
+t-245 164t-164 245t-61 298t61 298t164 245t245 164t298 61q344 0 556 -212t212 -556z" />
+ <glyph glyph-name="_476" unicode="&#xf1fb;" horiz-adv-x="1792"
+d="M1698 1442q94 -94 94 -226.5t-94 -225.5l-225 -223l104 -104q10 -10 10 -23t-10 -23l-210 -210q-10 -10 -23 -10t-23 10l-105 105l-603 -603q-37 -37 -90 -37h-203l-256 -128l-64 64l128 256v203q0 53 37 90l603 603l-105 105q-10 10 -10 23t10 23l210 210q10 10 23 10
+t23 -10l104 -104l223 225q93 94 225.5 94t226.5 -94zM512 64l576 576l-192 192l-576 -576v-192h192z" />
+ <glyph glyph-name="f1fc" unicode="&#xf1fc;" horiz-adv-x="1792"
+d="M1615 1536q70 0 122.5 -46.5t52.5 -116.5q0 -63 -45 -151q-332 -629 -465 -752q-97 -91 -218 -91q-126 0 -216.5 92.5t-90.5 219.5q0 128 92 212l638 579q59 54 130 54zM706 502q39 -76 106.5 -130t150.5 -76l1 -71q4 -213 -129.5 -347t-348.5 -134q-123 0 -218 46.5
+t-152.5 127.5t-86.5 183t-29 220q7 -5 41 -30t62 -44.5t59 -36.5t46 -17q41 0 55 37q25 66 57.5 112.5t69.5 76t88 47.5t103 25.5t125 10.5z" />
+ <glyph glyph-name="_478" unicode="&#xf1fd;" horiz-adv-x="1792"
+d="M1792 128v-384h-1792v384q45 0 85 14t59 27.5t47 37.5q30 27 51.5 38t56.5 11q24 0 44 -7t31 -15t33 -27q29 -25 47 -38t58 -27t86 -14q45 0 85 14.5t58 27t48 37.5q21 19 32.5 27t31 15t43.5 7q35 0 56.5 -11t51.5 -38q28 -24 47 -37.5t59 -27.5t85 -14t85 14t59 27.5
+t47 37.5q30 27 51.5 38t56.5 11q34 0 55.5 -11t51.5 -38q28 -24 47 -37.5t59 -27.5t85 -14zM1792 448v-192q-24 0 -44 7t-31 15t-33 27q-29 25 -47 38t-58 27t-85 14q-46 0 -86 -14t-58 -27t-47 -38q-22 -19 -33 -27t-31 -15t-44 -7q-35 0 -56.5 11t-51.5 38q-29 25 -47 38
+t-58 27t-86 14q-45 0 -85 -14.5t-58 -27t-48 -37.5q-21 -19 -32.5 -27t-31 -15t-43.5 -7q-35 0 -56.5 11t-51.5 38q-28 24 -47 37.5t-59 27.5t-85 14q-46 0 -86 -14t-58 -27t-47 -38q-30 -27 -51.5 -38t-56.5 -11v192q0 80 56 136t136 56h64v448h256v-448h256v448h256v-448
+h256v448h256v-448h64q80 0 136 -56t56 -136zM512 1312q0 -77 -36 -118.5t-92 -41.5q-53 0 -90.5 37.5t-37.5 90.5q0 29 9.5 51t23.5 34t31 28t31 31.5t23.5 44.5t9.5 67q38 0 83 -74t45 -150zM1024 1312q0 -77 -36 -118.5t-92 -41.5q-53 0 -90.5 37.5t-37.5 90.5
+q0 29 9.5 51t23.5 34t31 28t31 31.5t23.5 44.5t9.5 67q38 0 83 -74t45 -150zM1536 1312q0 -77 -36 -118.5t-92 -41.5q-53 0 -90.5 37.5t-37.5 90.5q0 29 9.5 51t23.5 34t31 28t31 31.5t23.5 44.5t9.5 67q38 0 83 -74t45 -150z" />
+ <glyph glyph-name="_479" unicode="&#xf1fe;" horiz-adv-x="2048"
+d="M2048 0v-128h-2048v1536h128v-1408h1920zM1664 1024l256 -896h-1664v576l448 576l576 -576z" />
+ <glyph glyph-name="_480" unicode="&#xf200;" horiz-adv-x="1792"
+d="M768 646l546 -546q-106 -108 -247.5 -168t-298.5 -60q-209 0 -385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103v-762zM955 640h773q0 -157 -60 -298.5t-168 -247.5zM1664 768h-768v768q209 0 385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="_481" unicode="&#xf201;" horiz-adv-x="2048"
+d="M2048 0v-128h-2048v1536h128v-1408h1920zM1920 1248v-435q0 -21 -19.5 -29.5t-35.5 7.5l-121 121l-633 -633q-10 -10 -23 -10t-23 10l-233 233l-416 -416l-192 192l585 585q10 10 23 10t23 -10l233 -233l464 464l-121 121q-16 16 -7.5 35.5t29.5 19.5h435q14 0 23 -9
+t9 -23z" />
+ <glyph glyph-name="_482" unicode="&#xf202;" horiz-adv-x="1792"
+d="M1292 832q0 -6 10 -41q10 -29 25 -49.5t41 -34t44 -20t55 -16.5q325 -91 325 -332q0 -146 -105.5 -242.5t-254.5 -96.5q-59 0 -111.5 18.5t-91.5 45.5t-77 74.5t-63 87.5t-53.5 103.5t-43.5 103t-39.5 106.5t-35.5 95q-32 81 -61.5 133.5t-73.5 96.5t-104 64t-142 20
+q-96 0 -183 -55.5t-138 -144.5t-51 -185q0 -160 106.5 -279.5t263.5 -119.5q177 0 258 95q56 63 83 116l84 -152q-15 -34 -44 -70l1 -1q-131 -152 -388 -152q-147 0 -269.5 79t-190.5 207.5t-68 274.5q0 105 43.5 206t116 176.5t172 121.5t204.5 46q87 0 159 -19t123.5 -50
+t95 -80t72.5 -99t58.5 -117t50.5 -124.5t50 -130.5t55 -127q96 -200 233 -200q81 0 138.5 48.5t57.5 128.5q0 42 -19 72t-50.5 46t-72.5 31.5t-84.5 27t-87.5 34t-81 52t-65 82t-39 122.5q-3 16 -3 33q0 110 87.5 192t198.5 78q78 -3 120.5 -14.5t90.5 -53.5h-1
+q12 -11 23 -24.5t26 -36t19 -27.5l-129 -99q-26 49 -54 70v1q-23 21 -97 21q-49 0 -84 -33t-35 -83z" />
+ <glyph glyph-name="_483" unicode="&#xf203;"
+d="M1432 484q0 173 -234 239q-35 10 -53 16.5t-38 25t-29 46.5q0 2 -2 8.5t-3 12t-1 7.5q0 36 24.5 59.5t60.5 23.5q54 0 71 -15h-1q20 -15 39 -51l93 71q-39 54 -49 64q-33 29 -67.5 39t-85.5 10q-80 0 -142 -57.5t-62 -137.5q0 -7 2 -23q16 -96 64.5 -140t148.5 -73
+q29 -8 49 -15.5t45 -21.5t38.5 -34.5t13.5 -46.5v-5q1 -58 -40.5 -93t-100.5 -35q-97 0 -167 144q-23 47 -51.5 121.5t-48 125.5t-54 110.5t-74 95.5t-103.5 60.5t-147 24.5q-101 0 -192 -56t-144 -148t-50 -192v-1q4 -108 50.5 -199t133.5 -147.5t196 -56.5q186 0 279 110
+q20 27 31 51l-60 109q-42 -80 -99 -116t-146 -36q-115 0 -191 87t-76 204q0 105 82 189t186 84q112 0 170 -53.5t104 -172.5q8 -21 25.5 -68.5t28.5 -76.5t31.5 -74.5t38.5 -74t45.5 -62.5t55.5 -53.5t66 -33t80 -13.5q107 0 183 69.5t76 174.5zM1536 1120v-960
+q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="_484" unicode="&#xf204;" horiz-adv-x="2048"
+d="M1152 640q0 104 -40.5 198.5t-109.5 163.5t-163.5 109.5t-198.5 40.5t-198.5 -40.5t-163.5 -109.5t-109.5 -163.5t-40.5 -198.5t40.5 -198.5t109.5 -163.5t163.5 -109.5t198.5 -40.5t198.5 40.5t163.5 109.5t109.5 163.5t40.5 198.5zM1920 640q0 104 -40.5 198.5
+t-109.5 163.5t-163.5 109.5t-198.5 40.5h-386q119 -90 188.5 -224t69.5 -288t-69.5 -288t-188.5 -224h386q104 0 198.5 40.5t163.5 109.5t109.5 163.5t40.5 198.5zM2048 640q0 -130 -51 -248.5t-136.5 -204t-204 -136.5t-248.5 -51h-768q-130 0 -248.5 51t-204 136.5
+t-136.5 204t-51 248.5t51 248.5t136.5 204t204 136.5t248.5 51h768q130 0 248.5 -51t204 -136.5t136.5 -204t51 -248.5z" />
+ <glyph glyph-name="_485" unicode="&#xf205;" horiz-adv-x="2048"
+d="M0 640q0 130 51 248.5t136.5 204t204 136.5t248.5 51h768q130 0 248.5 -51t204 -136.5t136.5 -204t51 -248.5t-51 -248.5t-136.5 -204t-204 -136.5t-248.5 -51h-768q-130 0 -248.5 51t-204 136.5t-136.5 204t-51 248.5zM1408 128q104 0 198.5 40.5t163.5 109.5
+t109.5 163.5t40.5 198.5t-40.5 198.5t-109.5 163.5t-163.5 109.5t-198.5 40.5t-198.5 -40.5t-163.5 -109.5t-109.5 -163.5t-40.5 -198.5t40.5 -198.5t109.5 -163.5t163.5 -109.5t198.5 -40.5z" />
+ <glyph glyph-name="_486" unicode="&#xf206;" horiz-adv-x="2304"
+d="M762 384h-314q-40 0 -57.5 35t6.5 67l188 251q-65 31 -137 31q-132 0 -226 -94t-94 -226t94 -226t226 -94q115 0 203 72.5t111 183.5zM576 512h186q-18 85 -75 148zM1056 512l288 384h-480l-99 -132q105 -103 126 -252h165zM2176 448q0 132 -94 226t-226 94
+q-60 0 -121 -24l174 -260q15 -23 10 -49t-27 -40q-15 -11 -36 -11q-35 0 -53 29l-174 260q-93 -95 -93 -225q0 -132 94 -226t226 -94t226 94t94 226zM2304 448q0 -185 -131.5 -316.5t-316.5 -131.5t-316.5 131.5t-131.5 316.5q0 97 39.5 183.5t109.5 149.5l-65 98l-353 -469
+q-18 -26 -51 -26h-197q-23 -164 -149 -274t-294 -110q-185 0 -316.5 131.5t-131.5 316.5t131.5 316.5t316.5 131.5q114 0 215 -55l137 183h-224q-26 0 -45 19t-19 45t19 45t45 19h384v-128h435l-85 128h-222q-26 0 -45 19t-19 45t19 45t45 19h256q33 0 53 -28l267 -400
+q91 44 192 44q185 0 316.5 -131.5t131.5 -316.5z" />
+ <glyph glyph-name="_487" unicode="&#xf207;"
+d="M384 320q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM1408 320q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM1362 716l-72 384q-5 23 -22.5 37.5t-40.5 14.5
+h-918q-23 0 -40.5 -14.5t-22.5 -37.5l-72 -384q-5 -30 14 -53t49 -23h1062q30 0 49 23t14 53zM1136 1328q0 20 -14 34t-34 14h-640q-20 0 -34 -14t-14 -34t14 -34t34 -14h640q20 0 34 14t14 34zM1536 603v-603h-128v-128q0 -53 -37.5 -90.5t-90.5 -37.5t-90.5 37.5
+t-37.5 90.5v128h-768v-128q0 -53 -37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5v128h-128v603q0 112 25 223l103 454q9 78 97.5 137t230 89t312.5 30t312.5 -30t230 -89t97.5 -137l105 -454q23 -102 23 -223z" />
+ <glyph glyph-name="_488" unicode="&#xf208;" horiz-adv-x="2048"
+d="M1463 704q0 -35 -25 -60.5t-61 -25.5h-702q-36 0 -61 25.5t-25 60.5t25 60.5t61 25.5h702q36 0 61 -25.5t25 -60.5zM1677 704q0 86 -23 170h-982q-36 0 -61 25t-25 60q0 36 25 61t61 25h908q-88 143 -235 227t-320 84q-177 0 -327.5 -87.5t-238 -237.5t-87.5 -327
+q0 -86 23 -170h982q36 0 61 -25t25 -60q0 -36 -25 -61t-61 -25h-908q88 -143 235.5 -227t320.5 -84q132 0 253 51.5t208 139t139 208t52 253.5zM2048 959q0 -35 -25 -60t-61 -25h-131q17 -85 17 -170q0 -167 -65.5 -319.5t-175.5 -263t-262.5 -176t-319.5 -65.5
+q-246 0 -448.5 133t-301.5 350h-189q-36 0 -61 25t-25 61q0 35 25 60t61 25h132q-17 85 -17 170q0 167 65.5 319.5t175.5 263t262.5 176t320.5 65.5q245 0 447.5 -133t301.5 -350h188q36 0 61 -25t25 -61z" />
+ <glyph glyph-name="_489" unicode="&#xf209;" horiz-adv-x="1280"
+d="M953 1158l-114 -328l117 -21q165 451 165 518q0 56 -38 56q-57 0 -130 -225zM654 471l33 -88q37 42 71 67l-33 5.5t-38.5 7t-32.5 8.5zM362 1367q0 -98 159 -521q17 10 49 10q15 0 75 -5l-121 351q-75 220 -123 220q-19 0 -29 -17.5t-10 -37.5zM283 608q0 -36 51.5 -119
+t117.5 -153t100 -70q14 0 25.5 13t11.5 27q0 24 -32 102q-13 32 -32 72t-47.5 89t-61.5 81t-62 32q-20 0 -45.5 -27t-25.5 -47zM125 273q0 -41 25 -104q59 -145 183.5 -227t281.5 -82q227 0 382 170q152 169 152 427q0 43 -1 67t-11.5 62t-30.5 56q-56 49 -211.5 75.5
+t-270.5 26.5q-37 0 -49 -11q-12 -5 -12 -35q0 -34 21.5 -60t55.5 -40t77.5 -23.5t87.5 -11.5t85 -4t70 0h23q24 0 40 -19q15 -19 19 -55q-28 -28 -96 -54q-61 -22 -93 -46q-64 -46 -108.5 -114t-44.5 -137q0 -31 18.5 -88.5t18.5 -87.5l-3 -12q-4 -12 -4 -14
+q-137 10 -146 216q-8 -2 -41 -2q2 -7 2 -21q0 -53 -40.5 -89.5t-94.5 -36.5q-82 0 -166.5 78t-84.5 159q0 34 33 67q52 -64 60 -76q77 -104 133 -104q12 0 26.5 8.5t14.5 20.5q0 34 -87.5 145t-116.5 111q-43 0 -70 -44.5t-27 -90.5zM11 264q0 101 42.5 163t136.5 88
+q-28 74 -28 104q0 62 61 123t122 61q29 0 70 -15q-163 462 -163 567q0 80 41 130.5t119 50.5q131 0 325 -581q6 -17 8 -23q6 16 29 79.5t43.5 118.5t54 127.5t64.5 123t70.5 86.5t76.5 36q71 0 112 -49t41 -122q0 -108 -159 -550q61 -15 100.5 -46t58.5 -78t26 -93.5
+t7 -110.5q0 -150 -47 -280t-132 -225t-211 -150t-278 -55q-111 0 -223 42q-149 57 -258 191.5t-109 286.5z" />
+ <glyph glyph-name="_490" unicode="&#xf20a;" horiz-adv-x="2048"
+d="M785 528h207q-14 -158 -98.5 -248.5t-214.5 -90.5q-162 0 -254.5 116t-92.5 316q0 194 93 311.5t233 117.5q148 0 232 -87t97 -247h-203q-5 64 -35.5 99t-81.5 35q-57 0 -88.5 -60.5t-31.5 -177.5q0 -48 5 -84t18 -69.5t40 -51.5t66 -18q95 0 109 139zM1497 528h206
+q-14 -158 -98 -248.5t-214 -90.5q-162 0 -254.5 116t-92.5 316q0 194 93 311.5t233 117.5q148 0 232 -87t97 -247h-204q-4 64 -35 99t-81 35q-57 0 -88.5 -60.5t-31.5 -177.5q0 -48 5 -84t18 -69.5t39.5 -51.5t65.5 -18q49 0 76.5 38t33.5 101zM1856 647q0 207 -15.5 307
+t-60.5 161q-6 8 -13.5 14t-21.5 15t-16 11q-86 63 -697 63q-625 0 -710 -63q-5 -4 -17.5 -11.5t-21 -14t-14.5 -14.5q-45 -60 -60 -159.5t-15 -308.5q0 -208 15 -307.5t60 -160.5q6 -8 15 -15t20.5 -14t17.5 -12q44 -33 239.5 -49t470.5 -16q610 0 697 65q5 4 17 11t20.5 14
+t13.5 16q46 60 61 159t15 309zM2048 1408v-1536h-2048v1536h2048z" />
+ <glyph glyph-name="_491" unicode="&#xf20b;"
+d="M992 912v-496q0 -14 -9 -23t-23 -9h-160q-14 0 -23 9t-9 23v496q0 112 -80 192t-192 80h-272v-1152q0 -14 -9 -23t-23 -9h-160q-14 0 -23 9t-9 23v1344q0 14 9 23t23 9h464q135 0 249 -66.5t180.5 -180.5t66.5 -249zM1376 1376v-880q0 -135 -66.5 -249t-180.5 -180.5
+t-249 -66.5h-464q-14 0 -23 9t-9 23v960q0 14 9 23t23 9h160q14 0 23 -9t9 -23v-768h272q112 0 192 80t80 192v880q0 14 9 23t23 9h160q14 0 23 -9t9 -23z" />
+ <glyph glyph-name="_492" unicode="&#xf20c;"
+d="M1311 694v-114q0 -24 -13.5 -38t-37.5 -14h-202q-24 0 -38 14t-14 38v114q0 24 14 38t38 14h202q24 0 37.5 -14t13.5 -38zM821 464v250q0 53 -32.5 85.5t-85.5 32.5h-133q-68 0 -96 -52q-28 52 -96 52h-130q-53 0 -85.5 -32.5t-32.5 -85.5v-250q0 -22 21 -22h55
+q22 0 22 22v230q0 24 13.5 38t38.5 14h94q24 0 38 -14t14 -38v-230q0 -22 21 -22h54q22 0 22 22v230q0 24 14 38t38 14h97q24 0 37.5 -14t13.5 -38v-230q0 -22 22 -22h55q21 0 21 22zM1410 560v154q0 53 -33 85.5t-86 32.5h-264q-53 0 -86 -32.5t-33 -85.5v-410
+q0 -21 22 -21h55q21 0 21 21v180q31 -42 94 -42h191q53 0 86 32.5t33 85.5zM1536 1176v-1072q0 -96 -68 -164t-164 -68h-1072q-96 0 -164 68t-68 164v1072q0 96 68 164t164 68h1072q96 0 164 -68t68 -164z" />
+ <glyph glyph-name="_493" unicode="&#xf20d;"
+d="M915 450h-294l147 551zM1001 128h311l-324 1024h-440l-324 -1024h311l383 314zM1536 1120v-960q0 -118 -85 -203t-203 -85h-960q-118 0 -203 85t-85 203v960q0 118 85 203t203 85h960q118 0 203 -85t85 -203z" />
+ <glyph glyph-name="_494" unicode="&#xf20e;" horiz-adv-x="2048"
+d="M2048 641q0 -21 -13 -36.5t-33 -19.5l-205 -356q3 -9 3 -18q0 -20 -12.5 -35.5t-32.5 -19.5l-193 -337q3 -8 3 -16q0 -23 -16.5 -40t-40.5 -17q-25 0 -41 18h-400q-17 -20 -43 -20t-43 20h-399q-17 -20 -43 -20q-23 0 -40 16.5t-17 40.5q0 8 4 20l-193 335
+q-20 4 -32.5 19.5t-12.5 35.5q0 9 3 18l-206 356q-20 5 -32.5 20.5t-12.5 35.5q0 21 13.5 36.5t33.5 19.5l199 344q0 1 -0.5 3t-0.5 3q0 36 34 51l209 363q-4 10 -4 18q0 24 17 40.5t40 16.5q26 0 44 -21h396q16 21 43 21t43 -21h398q18 21 44 21q23 0 40 -16.5t17 -40.5
+q0 -6 -4 -18l207 -358q23 -1 39 -17.5t16 -38.5q0 -13 -7 -27l187 -324q19 -4 31.5 -19.5t12.5 -35.5zM1063 -158h389l-342 354h-143l-342 -354h360q18 16 39 16t39 -16zM112 654q1 -4 1 -13q0 -10 -2 -15l208 -360l15 -6l188 199v347l-187 194q-13 -8 -29 -10zM986 1438
+h-388l190 -200l554 200h-280q-16 -16 -38 -16t-38 16zM1689 226q1 6 5 11l-64 68l-17 -79h76zM1583 226l22 105l-252 266l-296 -307l63 -64h463zM1495 -142l16 28l65 310h-427l333 -343q8 4 13 5zM578 -158h5l342 354h-373v-335l4 -6q14 -5 22 -13zM552 226h402l64 66
+l-309 321l-157 -166v-221zM359 226h163v189l-168 -177q4 -8 5 -12zM358 1051q0 -1 0.5 -2t0.5 -2q0 -16 -8 -29l171 -177v269zM552 1121v-311l153 -157l297 314l-223 236zM556 1425l-4 -8v-264l205 74l-191 201q-6 -2 -10 -3zM1447 1438h-16l-621 -224l213 -225zM1023 946
+l-297 -315l311 -319l296 307zM688 634l-136 141v-284zM1038 270l-42 -44h85zM1374 618l238 -251l132 624l-3 5l-1 1zM1718 1018q-8 13 -8 29v2l-216 376q-5 1 -13 5l-437 -463l310 -327zM522 1142v223l-163 -282zM522 196h-163l163 -283v283zM1607 196l-48 -227l130 227h-82
+zM1729 266l207 361q-2 10 -2 14q0 1 3 16l-171 296l-129 -612l77 -82q5 3 15 7z" />
+ <glyph glyph-name="f210" unicode="&#xf210;"
+d="M0 856q0 131 91.5 226.5t222.5 95.5h742l352 358v-1470q0 -132 -91.5 -227t-222.5 -95h-780q-131 0 -222.5 95t-91.5 227v790zM1232 102l-176 180v425q0 46 -32 79t-78 33h-484q-46 0 -78 -33t-32 -79v-492q0 -46 32.5 -79.5t77.5 -33.5h770z" />
+ <glyph glyph-name="_496" unicode="&#xf211;"
+d="M934 1386q-317 -121 -556 -362.5t-358 -560.5q-20 89 -20 176q0 208 102.5 384.5t278.5 279t384 102.5q82 0 169 -19zM1203 1267q93 -65 164 -155q-389 -113 -674.5 -400.5t-396.5 -676.5q-93 72 -155 162q112 386 395 671t667 399zM470 -67q115 356 379.5 622t619.5 384
+q40 -92 54 -195q-292 -120 -516 -345t-343 -518q-103 14 -194 52zM1536 -125q-193 50 -367 115q-135 -84 -290 -107q109 205 274 370.5t369 275.5q-21 -152 -101 -284q65 -175 115 -370z" />
+ <glyph glyph-name="f212" unicode="&#xf212;" horiz-adv-x="2048"
+d="M1893 1144l155 -1272q-131 0 -257 57q-200 91 -393 91q-226 0 -374 -148q-148 148 -374 148q-193 0 -393 -91q-128 -57 -252 -57h-5l155 1272q224 127 482 127q233 0 387 -106q154 106 387 106q258 0 482 -127zM1398 157q129 0 232 -28.5t260 -93.5l-124 1021
+q-171 78 -368 78q-224 0 -374 -141q-150 141 -374 141q-197 0 -368 -78l-124 -1021q105 43 165.5 65t148.5 39.5t178 17.5q202 0 374 -108q172 108 374 108zM1438 191l-55 907q-211 -4 -359 -155q-152 155 -374 155q-176 0 -336 -66l-114 -941q124 51 228.5 76t221.5 25
+q209 0 374 -102q172 107 374 102z" />
+ <glyph glyph-name="_498" unicode="&#xf213;" horiz-adv-x="2048"
+d="M1500 165v733q0 21 -15 36t-35 15h-93q-20 0 -35 -15t-15 -36v-733q0 -20 15 -35t35 -15h93q20 0 35 15t15 35zM1216 165v531q0 20 -15 35t-35 15h-101q-20 0 -35 -15t-15 -35v-531q0 -20 15 -35t35 -15h101q20 0 35 15t15 35zM924 165v429q0 20 -15 35t-35 15h-101
+q-20 0 -35 -15t-15 -35v-429q0 -20 15 -35t35 -15h101q20 0 35 15t15 35zM632 165v362q0 20 -15 35t-35 15h-101q-20 0 -35 -15t-15 -35v-362q0 -20 15 -35t35 -15h101q20 0 35 15t15 35zM2048 311q0 -166 -118 -284t-284 -118h-1244q-166 0 -284 118t-118 284
+q0 116 63 214.5t168 148.5q-10 34 -10 73q0 113 80.5 193.5t193.5 80.5q102 0 180 -67q45 183 194 300t338 117q149 0 275 -73.5t199.5 -199.5t73.5 -275q0 -66 -14 -122q135 -33 221 -142.5t86 -247.5z" />
+ <glyph glyph-name="_499" unicode="&#xf214;"
+d="M0 1536h1536v-1392l-776 -338l-760 338v1392zM1436 209v926h-1336v-926l661 -294zM1436 1235v201h-1336v-201h1336zM181 937v-115h-37v115h37zM181 789v-115h-37v115h37zM181 641v-115h-37v115h37zM181 493v-115h-37v115h37zM181 345v-115h-37v115h37zM207 202l15 34
+l105 -47l-15 -33zM343 142l15 34l105 -46l-15 -34zM478 82l15 34l105 -46l-15 -34zM614 23l15 33l104 -46l-15 -34zM797 10l105 46l15 -33l-105 -47zM932 70l105 46l15 -34l-105 -46zM1068 130l105 46l15 -34l-105 -46zM1203 189l105 47l15 -34l-105 -46zM259 1389v-36h-114
+v36h114zM421 1389v-36h-115v36h115zM583 1389v-36h-115v36h115zM744 1389v-36h-114v36h114zM906 1389v-36h-114v36h114zM1068 1389v-36h-115v36h115zM1230 1389v-36h-115v36h115zM1391 1389v-36h-114v36h114zM181 1049v-79h-37v115h115v-36h-78zM421 1085v-36h-115v36h115z
+M583 1085v-36h-115v36h115zM744 1085v-36h-114v36h114zM906 1085v-36h-114v36h114zM1068 1085v-36h-115v36h115zM1230 1085v-36h-115v36h115zM1355 970v79h-78v36h115v-115h-37zM1355 822v115h37v-115h-37zM1355 674v115h37v-115h-37zM1355 526v115h37v-115h-37zM1355 378
+v115h37v-115h-37zM1355 230v115h37v-115h-37zM760 265q-129 0 -221 91.5t-92 221.5q0 129 92 221t221 92q130 0 221.5 -92t91.5 -221q0 -130 -91.5 -221.5t-221.5 -91.5zM595 646q0 -36 19.5 -56.5t49.5 -25t64 -7t64 -2t49.5 -9t19.5 -30.5q0 -49 -112 -49q-97 0 -123 51
+h-3l-31 -63q67 -42 162 -42q29 0 56.5 5t55.5 16t45.5 33t17.5 53q0 46 -27.5 69.5t-67.5 27t-79.5 3t-67 5t-27.5 25.5q0 21 20.5 33t40.5 15t41 3q34 0 70.5 -11t51.5 -34h3l30 58q-3 1 -21 8.5t-22.5 9t-19.5 7t-22 7t-20 4.5t-24 4t-23 1q-29 0 -56.5 -5t-54 -16.5
+t-43 -34t-16.5 -53.5z" />
+ <glyph glyph-name="_500" unicode="&#xf215;" horiz-adv-x="2048"
+d="M863 504q0 112 -79.5 191.5t-191.5 79.5t-191 -79.5t-79 -191.5t79 -191t191 -79t191.5 79t79.5 191zM1726 505q0 112 -79 191t-191 79t-191.5 -79t-79.5 -191q0 -113 79.5 -192t191.5 -79t191 79.5t79 191.5zM2048 1314v-1348q0 -44 -31.5 -75.5t-76.5 -31.5h-1832
+q-45 0 -76.5 31.5t-31.5 75.5v1348q0 44 31.5 75.5t76.5 31.5h431q44 0 76 -31.5t32 -75.5v-161h754v161q0 44 32 75.5t76 31.5h431q45 0 76.5 -31.5t31.5 -75.5z" />
+ <glyph glyph-name="_501" unicode="&#xf216;" horiz-adv-x="2048"
+d="M1430 953zM1690 749q148 0 253 -98.5t105 -244.5q0 -157 -109 -261.5t-267 -104.5q-85 0 -162 27.5t-138 73.5t-118 106t-109 126t-103.5 132.5t-108.5 126.5t-117 106t-136 73.5t-159 27.5q-154 0 -251.5 -91.5t-97.5 -244.5q0 -157 104 -250t263 -93q100 0 208 37.5
+t193 98.5q5 4 21 18.5t30 24t22 9.5q14 0 24.5 -10.5t10.5 -24.5q0 -24 -60 -77q-101 -88 -234.5 -142t-260.5 -54q-133 0 -245.5 58t-180 165t-67.5 241q0 205 141.5 341t347.5 136q120 0 226.5 -43.5t185.5 -113t151.5 -153t139 -167.5t133.5 -153.5t149.5 -113
+t172.5 -43.5q102 0 168.5 61.5t66.5 162.5q0 95 -64.5 159t-159.5 64q-30 0 -81.5 -18.5t-68.5 -18.5q-20 0 -35.5 15t-15.5 35q0 18 8.5 57t8.5 59q0 159 -107.5 263t-266.5 104q-58 0 -111.5 -18.5t-84 -40.5t-55.5 -40.5t-33 -18.5q-15 0 -25.5 10.5t-10.5 25.5
+q0 19 25 46q59 67 147 103.5t182 36.5q191 0 318 -125.5t127 -315.5q0 -37 -4 -66q57 15 115 15z" />
+ <glyph glyph-name="_502" unicode="&#xf217;" horiz-adv-x="1664"
+d="M1216 832q0 26 -19 45t-45 19h-128v128q0 26 -19 45t-45 19t-45 -19t-19 -45v-128h-128q-26 0 -45 -19t-19 -45t19 -45t45 -19h128v-128q0 -26 19 -45t45 -19t45 19t19 45v128h128q26 0 45 19t19 45zM640 0q0 -53 -37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5
+t37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM1536 0q0 -53 -37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5t37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM1664 1088v-512q0 -24 -16 -42.5t-41 -21.5l-1044 -122q1 -7 4.5 -21.5t6 -26.5t2.5 -22q0 -16 -24 -64h920
+q26 0 45 -19t19 -45t-19 -45t-45 -19h-1024q-26 0 -45 19t-19 45q0 14 11 39.5t29.5 59.5t20.5 38l-177 823h-204q-26 0 -45 19t-19 45t19 45t45 19h256q16 0 28.5 -6.5t20 -15.5t13 -24.5t7.5 -26.5t5.5 -29.5t4.5 -25.5h1201q26 0 45 -19t19 -45z" />
+ <glyph glyph-name="_503" unicode="&#xf218;" horiz-adv-x="1664"
+d="M1280 832q0 26 -19 45t-45 19t-45 -19l-147 -146v293q0 26 -19 45t-45 19t-45 -19t-19 -45v-293l-147 146q-19 19 -45 19t-45 -19t-19 -45t19 -45l256 -256q19 -19 45 -19t45 19l256 256q19 19 19 45zM640 0q0 -53 -37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5
+t37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM1536 0q0 -53 -37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5t37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM1664 1088v-512q0 -24 -16 -42.5t-41 -21.5l-1044 -122q1 -7 4.5 -21.5t6 -26.5t2.5 -22q0 -16 -24 -64h920
+q26 0 45 -19t19 -45t-19 -45t-45 -19h-1024q-26 0 -45 19t-19 45q0 14 11 39.5t29.5 59.5t20.5 38l-177 823h-204q-26 0 -45 19t-19 45t19 45t45 19h256q16 0 28.5 -6.5t20 -15.5t13 -24.5t7.5 -26.5t5.5 -29.5t4.5 -25.5h1201q26 0 45 -19t19 -45z" />
+ <glyph glyph-name="_504" unicode="&#xf219;" horiz-adv-x="2048"
+d="M212 768l623 -665l-300 665h-323zM1024 -4l349 772h-698zM538 896l204 384h-262l-288 -384h346zM1213 103l623 665h-323zM683 896h682l-204 384h-274zM1510 896h346l-288 384h-262zM1651 1382l384 -512q14 -18 13 -41.5t-17 -40.5l-960 -1024q-18 -20 -47 -20t-47 20
+l-960 1024q-16 17 -17 40.5t13 41.5l384 512q18 26 51 26h1152q33 0 51 -26z" />
+ <glyph glyph-name="_505" unicode="&#xf21a;" horiz-adv-x="2048"
+d="M1811 -19q19 19 45 19t45 -19l128 -128l-90 -90l-83 83l-83 -83q-18 -19 -45 -19t-45 19l-83 83l-83 -83q-19 -19 -45 -19t-45 19l-83 83l-83 -83q-19 -19 -45 -19t-45 19l-83 83l-83 -83q-19 -19 -45 -19t-45 19l-83 83l-83 -83q-19 -19 -45 -19t-45 19l-83 83l-83 -83
+q-19 -19 -45 -19t-45 19l-83 83l-83 -83q-19 -19 -45 -19t-45 19l-128 128l90 90l83 -83l83 83q19 19 45 19t45 -19l83 -83l83 83q19 19 45 19t45 -19l83 -83l83 83q19 19 45 19t45 -19l83 -83l83 83q19 19 45 19t45 -19l83 -83l83 83q19 19 45 19t45 -19l83 -83l83 83
+q19 19 45 19t45 -19l83 -83zM237 19q-19 -19 -45 -19t-45 19l-128 128l90 90l83 -82l83 82q19 19 45 19t45 -19l83 -82l64 64v293l-210 314q-17 26 -7 56.5t40 40.5l177 58v299h128v128h256v128h256v-128h256v-128h128v-299l177 -58q30 -10 40 -40.5t-7 -56.5l-210 -314
+v-293l19 18q19 19 45 19t45 -19l83 -82l83 82q19 19 45 19t45 -19l128 -128l-90 -90l-83 83l-83 -83q-18 -19 -45 -19t-45 19l-83 83l-83 -83q-19 -19 -45 -19t-45 19l-83 83l-83 -83q-19 -19 -45 -19t-45 19l-83 83l-83 -83q-19 -19 -45 -19t-45 19l-83 83l-83 -83
+q-19 -19 -45 -19t-45 19l-83 83l-83 -83q-19 -19 -45 -19t-45 19l-83 83zM640 1152v-128l384 128l384 -128v128h-128v128h-512v-128h-128z" />
+ <glyph glyph-name="_506" unicode="&#xf21b;"
+d="M576 0l96 448l-96 128l-128 64zM832 0l128 640l-128 -64l-96 -128zM992 1010q-2 4 -4 6q-10 8 -96 8q-70 0 -167 -19q-7 -2 -21 -2t-21 2q-97 19 -167 19q-86 0 -96 -8q-2 -2 -4 -6q2 -18 4 -27q2 -3 7.5 -6.5t7.5 -10.5q2 -4 7.5 -20.5t7 -20.5t7.5 -17t8.5 -17t9 -14
+t12 -13.5t14 -9.5t17.5 -8t20.5 -4t24.5 -2q36 0 59 12.5t32.5 30t14.5 34.5t11.5 29.5t17.5 12.5h12q11 0 17.5 -12.5t11.5 -29.5t14.5 -34.5t32.5 -30t59 -12.5q13 0 24.5 2t20.5 4t17.5 8t14 9.5t12 13.5t9 14t8.5 17t7.5 17t7 20.5t7.5 20.5q2 7 7.5 10.5t7.5 6.5
+q2 9 4 27zM1408 131q0 -121 -73 -190t-194 -69h-874q-121 0 -194 69t-73 190q0 61 4.5 118t19 125.5t37.5 123.5t63.5 103.5t93.5 74.5l-90 220h214q-22 64 -22 128q0 12 2 32q-194 40 -194 96q0 57 210 99q17 62 51.5 134t70.5 114q32 37 76 37q30 0 84 -31t84 -31t84 31
+t84 31q44 0 76 -37q36 -42 70.5 -114t51.5 -134q210 -42 210 -99q0 -56 -194 -96q7 -81 -20 -160h214l-82 -225q63 -33 107.5 -96.5t65.5 -143.5t29 -151.5t8 -148.5z" />
+ <glyph glyph-name="_507" unicode="&#xf21c;" horiz-adv-x="2304"
+d="M2301 500q12 -103 -22 -198.5t-99 -163.5t-158.5 -106t-196.5 -31q-161 11 -279.5 125t-134.5 274q-12 111 27.5 210.5t118.5 170.5l-71 107q-96 -80 -151 -194t-55 -244q0 -27 -18.5 -46.5t-45.5 -19.5h-256h-69q-23 -164 -149 -274t-294 -110q-185 0 -316.5 131.5
+t-131.5 316.5t131.5 316.5t316.5 131.5q76 0 152 -27l24 45q-123 110 -304 110h-64q-26 0 -45 19t-19 45t19 45t45 19h128q78 0 145 -13.5t116.5 -38.5t71.5 -39.5t51 -36.5h512h115l-85 128h-222q-30 0 -49 22.5t-14 52.5q4 23 23 38t43 15h253q33 0 53 -28l70 -105
+l114 114q19 19 46 19h101q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-179l115 -172q131 63 275 36q143 -26 244 -134.5t118 -253.5zM448 128q115 0 203 72.5t111 183.5h-314q-35 0 -55 31q-18 32 -1 63l147 277q-47 13 -91 13q-132 0 -226 -94t-94 -226t94 -226
+t226 -94zM1856 128q132 0 226 94t94 226t-94 226t-226 94q-60 0 -121 -24l174 -260q15 -23 10 -49t-27 -40q-15 -11 -36 -11q-35 0 -53 29l-174 260q-93 -95 -93 -225q0 -132 94 -226t226 -94z" />
+ <glyph glyph-name="_508" unicode="&#xf21d;"
+d="M1408 0q0 -63 -61.5 -113.5t-164 -81t-225 -46t-253.5 -15.5t-253.5 15.5t-225 46t-164 81t-61.5 113.5q0 49 33 88.5t91 66.5t118 44.5t131 29.5q26 5 48 -10.5t26 -41.5q5 -26 -10.5 -48t-41.5 -26q-58 -10 -106 -23.5t-76.5 -25.5t-48.5 -23.5t-27.5 -19.5t-8.5 -12
+q3 -11 27 -26.5t73 -33t114 -32.5t160.5 -25t201.5 -10t201.5 10t160.5 25t114 33t73 33.5t27 27.5q-1 4 -8.5 11t-27.5 19t-48.5 23.5t-76.5 25t-106 23.5q-26 4 -41.5 26t-10.5 48q4 26 26 41.5t48 10.5q71 -12 131 -29.5t118 -44.5t91 -66.5t33 -88.5zM1024 896v-384
+q0 -26 -19 -45t-45 -19h-64v-384q0 -26 -19 -45t-45 -19h-256q-26 0 -45 19t-19 45v384h-64q-26 0 -45 19t-19 45v384q0 53 37.5 90.5t90.5 37.5h384q53 0 90.5 -37.5t37.5 -90.5zM928 1280q0 -93 -65.5 -158.5t-158.5 -65.5t-158.5 65.5t-65.5 158.5t65.5 158.5t158.5 65.5
+t158.5 -65.5t65.5 -158.5z" />
+ <glyph glyph-name="_509" unicode="&#xf21e;" horiz-adv-x="1792"
+d="M1280 512h305q-5 -6 -10 -10.5t-9 -7.5l-3 -4l-623 -600q-18 -18 -44 -18t-44 18l-624 602q-5 2 -21 20h369q22 0 39.5 13.5t22.5 34.5l70 281l190 -667q6 -20 23 -33t39 -13q21 0 38 13t23 33l146 485l56 -112q18 -35 57 -35zM1792 940q0 -145 -103 -300h-369l-111 221
+q-8 17 -25.5 27t-36.5 8q-45 -5 -56 -46l-129 -430l-196 686q-6 20 -23.5 33t-39.5 13t-39 -13.5t-22 -34.5l-116 -464h-423q-103 155 -103 300q0 220 127 344t351 124q62 0 126.5 -21.5t120 -58t95.5 -68.5t76 -68q36 36 76 68t95.5 68.5t120 58t126.5 21.5q224 0 351 -124
+t127 -344z" />
+ <glyph glyph-name="venus" unicode="&#xf221;" horiz-adv-x="1280"
+d="M1152 960q0 -221 -147.5 -384.5t-364.5 -187.5v-260h224q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-224v-224q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v224h-224q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h224v260q-150 16 -271.5 103t-186 224t-52.5 292
+q11 134 80.5 249t182 188t245.5 88q170 19 319 -54t236 -212t87 -306zM128 960q0 -185 131.5 -316.5t316.5 -131.5t316.5 131.5t131.5 316.5t-131.5 316.5t-316.5 131.5t-316.5 -131.5t-131.5 -316.5z" />
+ <glyph glyph-name="_511" unicode="&#xf222;"
+d="M1472 1408q26 0 45 -19t19 -45v-416q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v262l-382 -383q126 -156 126 -359q0 -117 -45.5 -223.5t-123 -184t-184 -123t-223.5 -45.5t-223.5 45.5t-184 123t-123 184t-45.5 223.5t45.5 223.5t123 184t184 123t223.5 45.5
+q203 0 359 -126l382 382h-261q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h416zM576 0q185 0 316.5 131.5t131.5 316.5t-131.5 316.5t-316.5 131.5t-316.5 -131.5t-131.5 -316.5t131.5 -316.5t316.5 -131.5z" />
+ <glyph glyph-name="_512" unicode="&#xf223;" horiz-adv-x="1280"
+d="M830 1220q145 -72 233.5 -210.5t88.5 -305.5q0 -221 -147.5 -384.5t-364.5 -187.5v-132h96q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-96v-96q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v96h-96q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h96v132q-217 24 -364.5 187.5
+t-147.5 384.5q0 167 88.5 305.5t233.5 210.5q-165 96 -228 273q-6 16 3.5 29.5t26.5 13.5h69q21 0 29 -20q44 -106 140 -171t214 -65t214 65t140 171q8 20 37 20h61q17 0 26.5 -13.5t3.5 -29.5q-63 -177 -228 -273zM576 256q185 0 316.5 131.5t131.5 316.5t-131.5 316.5
+t-316.5 131.5t-316.5 -131.5t-131.5 -316.5t131.5 -316.5t316.5 -131.5z" />
+ <glyph glyph-name="_513" unicode="&#xf224;"
+d="M1024 1504q0 14 9 23t23 9h288q26 0 45 -19t19 -45v-288q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v134l-254 -255q126 -158 126 -359q0 -221 -147.5 -384.5t-364.5 -187.5v-132h96q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-96v-96q0 -14 -9 -23t-23 -9h-64
+q-14 0 -23 9t-9 23v96h-96q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h96v132q-149 16 -270.5 103t-186.5 223.5t-53 291.5q16 204 160 353.5t347 172.5q118 14 228 -19t198 -103l255 254h-134q-14 0 -23 9t-9 23v64zM576 256q185 0 316.5 131.5t131.5 316.5t-131.5 316.5
+t-316.5 131.5t-316.5 -131.5t-131.5 -316.5t131.5 -316.5t316.5 -131.5z" />
+ <glyph glyph-name="_514" unicode="&#xf225;" horiz-adv-x="1792"
+d="M1280 1504q0 14 9 23t23 9h288q26 0 45 -19t19 -45v-288q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v134l-254 -255q126 -158 126 -359q0 -221 -147.5 -384.5t-364.5 -187.5v-132h96q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-96v-96q0 -14 -9 -23t-23 -9h-64
+q-14 0 -23 9t-9 23v96h-96q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h96v132q-217 24 -364.5 187.5t-147.5 384.5q0 201 126 359l-52 53l-101 -111q-9 -10 -22 -10.5t-23 7.5l-48 44q-10 8 -10.5 21.5t8.5 23.5l105 115l-111 112v-134q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9
+t-9 23v288q0 26 19 45t45 19h288q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-133l106 -107l86 94q9 10 22 10.5t23 -7.5l48 -44q10 -8 10.5 -21.5t-8.5 -23.5l-90 -99l57 -56q158 126 359 126t359 -126l255 254h-134q-14 0 -23 9t-9 23v64zM832 256q185 0 316.5 131.5
+t131.5 316.5t-131.5 316.5t-316.5 131.5t-316.5 -131.5t-131.5 -316.5t131.5 -316.5t316.5 -131.5z" />
+ <glyph glyph-name="_515" unicode="&#xf226;" horiz-adv-x="1792"
+d="M1790 1007q12 -155 -52.5 -292t-186 -224t-271.5 -103v-260h224q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-224v-224q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v224h-512v-224q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v224h-224q-14 0 -23 9t-9 23v64q0 14 9 23
+t23 9h224v260q-150 16 -271.5 103t-186 224t-52.5 292q17 206 164.5 356.5t352.5 169.5q206 21 377 -94q171 115 377 94q205 -19 352.5 -169.5t164.5 -356.5zM896 647q128 131 128 313t-128 313q-128 -131 -128 -313t128 -313zM576 512q115 0 218 57q-154 165 -154 391
+q0 224 154 391q-103 57 -218 57q-185 0 -316.5 -131.5t-131.5 -316.5t131.5 -316.5t316.5 -131.5zM1152 128v260q-137 15 -256 94q-119 -79 -256 -94v-260h512zM1216 512q185 0 316.5 131.5t131.5 316.5t-131.5 316.5t-316.5 131.5q-115 0 -218 -57q154 -167 154 -391
+q0 -226 -154 -391q103 -57 218 -57z" />
+ <glyph glyph-name="_516" unicode="&#xf227;" horiz-adv-x="1920"
+d="M1536 1120q0 14 9 23t23 9h288q26 0 45 -19t19 -45v-288q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v134l-254 -255q76 -95 107.5 -214t9.5 -247q-31 -182 -166 -312t-318 -156q-210 -29 -384.5 80t-241.5 300q-117 6 -221 57.5t-177.5 133t-113.5 192.5t-32 230
+q9 135 78 252t182 191.5t248 89.5q118 14 227.5 -19t198.5 -103l255 254h-134q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h288q26 0 45 -19t19 -45v-288q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v134l-254 -255q59 -74 93 -169q182 -9 328 -124l255 254h-134q-14 0 -23 9
+t-9 23v64zM1024 704q0 20 -4 58q-162 -25 -271 -150t-109 -292q0 -20 4 -58q162 25 271 150t109 292zM128 704q0 -168 111 -294t276 -149q-3 29 -3 59q0 210 135 369.5t338 196.5q-53 120 -163.5 193t-245.5 73q-185 0 -316.5 -131.5t-131.5 -316.5zM1088 -128
+q185 0 316.5 131.5t131.5 316.5q0 168 -111 294t-276 149q3 -28 3 -59q0 -210 -135 -369.5t-338 -196.5q53 -120 163.5 -193t245.5 -73z" />
+ <glyph glyph-name="_517" unicode="&#xf228;" horiz-adv-x="2048"
+d="M1664 1504q0 14 9 23t23 9h288q26 0 45 -19t19 -45v-288q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v134l-254 -255q76 -95 107.5 -214t9.5 -247q-32 -180 -164.5 -310t-313.5 -157q-223 -34 -409 90q-117 -78 -256 -93v-132h96q14 0 23 -9t9 -23v-64q0 -14 -9 -23
+t-23 -9h-96v-96q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v96h-96q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h96v132q-155 17 -279.5 109.5t-187 237.5t-39.5 307q25 187 159.5 322.5t320.5 164.5q224 34 410 -90q146 97 320 97q201 0 359 -126l255 254h-134q-14 0 -23 9
+t-9 23v64zM896 391q128 131 128 313t-128 313q-128 -131 -128 -313t128 -313zM128 704q0 -185 131.5 -316.5t316.5 -131.5q117 0 218 57q-154 167 -154 391t154 391q-101 57 -218 57q-185 0 -316.5 -131.5t-131.5 -316.5zM1216 256q185 0 316.5 131.5t131.5 316.5
+t-131.5 316.5t-316.5 131.5q-117 0 -218 -57q154 -167 154 -391t-154 -391q101 -57 218 -57z" />
+ <glyph glyph-name="_518" unicode="&#xf229;"
+d="M1472 1408q26 0 45 -19t19 -45v-416q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v262l-213 -214l140 -140q9 -10 9 -23t-9 -22l-46 -46q-9 -9 -22 -9t-23 9l-140 141l-78 -79q126 -156 126 -359q0 -117 -45.5 -223.5t-123 -184t-184 -123t-223.5 -45.5t-223.5 45.5
+t-184 123t-123 184t-45.5 223.5t45.5 223.5t123 184t184 123t223.5 45.5q203 0 359 -126l78 78l-172 172q-9 10 -9 23t9 22l46 46q9 9 22 9t23 -9l172 -172l213 213h-261q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h416zM576 0q185 0 316.5 131.5t131.5 316.5t-131.5 316.5
+t-316.5 131.5t-316.5 -131.5t-131.5 -316.5t131.5 -316.5t316.5 -131.5z" />
+ <glyph glyph-name="_519" unicode="&#xf22a;" horiz-adv-x="1280"
+d="M640 892q217 -24 364.5 -187.5t147.5 -384.5q0 -167 -87 -306t-236 -212t-319 -54q-133 15 -245.5 88t-182 188t-80.5 249q-12 155 52.5 292t186 224t271.5 103v132h-160q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h160v165l-92 -92q-10 -9 -23 -9t-22 9l-46 46q-9 9 -9 22
+t9 23l202 201q19 19 45 19t45 -19l202 -201q9 -10 9 -23t-9 -22l-46 -46q-9 -9 -22 -9t-23 9l-92 92v-165h160q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-160v-132zM576 -128q185 0 316.5 131.5t131.5 316.5t-131.5 316.5t-316.5 131.5t-316.5 -131.5t-131.5 -316.5
+t131.5 -316.5t316.5 -131.5z" />
+ <glyph glyph-name="_520" unicode="&#xf22b;" horiz-adv-x="2048"
+d="M1901 621q19 -19 19 -45t-19 -45l-294 -294q-9 -10 -22.5 -10t-22.5 10l-45 45q-10 9 -10 22.5t10 22.5l185 185h-294v-224q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v224h-132q-24 -217 -187.5 -364.5t-384.5 -147.5q-167 0 -306 87t-212 236t-54 319q15 133 88 245.5
+t188 182t249 80.5q155 12 292 -52.5t224 -186t103 -271.5h132v224q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-224h294l-185 185q-10 9 -10 22.5t10 22.5l45 45q9 10 22.5 10t22.5 -10zM576 128q185 0 316.5 131.5t131.5 316.5t-131.5 316.5t-316.5 131.5t-316.5 -131.5
+t-131.5 -316.5t131.5 -316.5t316.5 -131.5z" />
+ <glyph glyph-name="_521" unicode="&#xf22c;" horiz-adv-x="1280"
+d="M1152 960q0 -221 -147.5 -384.5t-364.5 -187.5v-612q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v612q-217 24 -364.5 187.5t-147.5 384.5q0 117 45.5 223.5t123 184t184 123t223.5 45.5t223.5 -45.5t184 -123t123 -184t45.5 -223.5zM576 512q185 0 316.5 131.5
+t131.5 316.5t-131.5 316.5t-316.5 131.5t-316.5 -131.5t-131.5 -316.5t131.5 -316.5t316.5 -131.5z" />
+ <glyph glyph-name="_522" unicode="&#xf22d;" horiz-adv-x="1280"
+d="M1024 576q0 185 -131.5 316.5t-316.5 131.5t-316.5 -131.5t-131.5 -316.5t131.5 -316.5t316.5 -131.5t316.5 131.5t131.5 316.5zM1152 576q0 -117 -45.5 -223.5t-123 -184t-184 -123t-223.5 -45.5t-223.5 45.5t-184 123t-123 184t-45.5 223.5t45.5 223.5t123 184t184 123
+t223.5 45.5t223.5 -45.5t184 -123t123 -184t45.5 -223.5z" />
+ <glyph glyph-name="_523" unicode="&#xf22e;" horiz-adv-x="1792"
+ />
+ <glyph glyph-name="_524" unicode="&#xf22f;" horiz-adv-x="1792"
+ />
+ <glyph glyph-name="_525" unicode="&#xf230;"
+d="M1451 1408q35 0 60 -25t25 -60v-1366q0 -35 -25 -60t-60 -25h-391v595h199l30 232h-229v148q0 56 23.5 84t91.5 28l122 1v207q-63 9 -178 9q-136 0 -217.5 -80t-81.5 -226v-171h-200v-232h200v-595h-735q-35 0 -60 25t-25 60v1366q0 35 25 60t60 25h1366z" />
+ <glyph glyph-name="_526" unicode="&#xf231;" horiz-adv-x="1280"
+d="M0 939q0 108 37.5 203.5t103.5 166.5t152 123t185 78t202 26q158 0 294 -66.5t221 -193.5t85 -287q0 -96 -19 -188t-60 -177t-100 -149.5t-145 -103t-189 -38.5q-68 0 -135 32t-96 88q-10 -39 -28 -112.5t-23.5 -95t-20.5 -71t-26 -71t-32 -62.5t-46 -77.5t-62 -86.5
+l-14 -5l-9 10q-15 157 -15 188q0 92 21.5 206.5t66.5 287.5t52 203q-32 65 -32 169q0 83 52 156t132 73q61 0 95 -40.5t34 -102.5q0 -66 -44 -191t-44 -187q0 -63 45 -104.5t109 -41.5q55 0 102 25t78.5 68t56 95t38 110.5t20 111t6.5 99.5q0 173 -109.5 269.5t-285.5 96.5
+q-200 0 -334 -129.5t-134 -328.5q0 -44 12.5 -85t27 -65t27 -45.5t12.5 -30.5q0 -28 -15 -73t-37 -45q-2 0 -17 3q-51 15 -90.5 56t-61 94.5t-32.5 108t-11 106.5z" />
+ <glyph glyph-name="_527" unicode="&#xf232;"
+d="M985 562q13 0 97.5 -44t89.5 -53q2 -5 2 -15q0 -33 -17 -76q-16 -39 -71 -65.5t-102 -26.5q-57 0 -190 62q-98 45 -170 118t-148 185q-72 107 -71 194v8q3 91 74 158q24 22 52 22q6 0 18 -1.5t19 -1.5q19 0 26.5 -6.5t15.5 -27.5q8 -20 33 -88t25 -75q0 -21 -34.5 -57.5
+t-34.5 -46.5q0 -7 5 -15q34 -73 102 -137q56 -53 151 -101q12 -7 22 -7q15 0 54 48.5t52 48.5zM782 32q127 0 243.5 50t200.5 134t134 200.5t50 243.5t-50 243.5t-134 200.5t-200.5 134t-243.5 50t-243.5 -50t-200.5 -134t-134 -200.5t-50 -243.5q0 -203 120 -368l-79 -233
+l242 77q158 -104 345 -104zM782 1414q153 0 292.5 -60t240.5 -161t161 -240.5t60 -292.5t-60 -292.5t-161 -240.5t-240.5 -161t-292.5 -60q-195 0 -365 94l-417 -134l136 405q-108 178 -108 389q0 153 60 292.5t161 240.5t240.5 161t292.5 60z" />
+ <glyph glyph-name="_528" unicode="&#xf233;" horiz-adv-x="1792"
+d="M128 128h1024v128h-1024v-128zM128 640h1024v128h-1024v-128zM1696 192q0 40 -28 68t-68 28t-68 -28t-28 -68t28 -68t68 -28t68 28t28 68zM128 1152h1024v128h-1024v-128zM1696 704q0 40 -28 68t-68 28t-68 -28t-28 -68t28 -68t68 -28t68 28t28 68zM1696 1216
+q0 40 -28 68t-68 28t-68 -28t-28 -68t28 -68t68 -28t68 28t28 68zM1792 384v-384h-1792v384h1792zM1792 896v-384h-1792v384h1792zM1792 1408v-384h-1792v384h1792z" />
+ <glyph glyph-name="_529" unicode="&#xf234;" horiz-adv-x="2048"
+d="M704 640q-159 0 -271.5 112.5t-112.5 271.5t112.5 271.5t271.5 112.5t271.5 -112.5t112.5 -271.5t-112.5 -271.5t-271.5 -112.5zM1664 512h352q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-352v-352q0 -13 -9.5 -22.5t-22.5 -9.5h-192q-13 0 -22.5 9.5
+t-9.5 22.5v352h-352q-13 0 -22.5 9.5t-9.5 22.5v192q0 13 9.5 22.5t22.5 9.5h352v352q0 13 9.5 22.5t22.5 9.5h192q13 0 22.5 -9.5t9.5 -22.5v-352zM928 288q0 -52 38 -90t90 -38h256v-238q-68 -50 -171 -50h-874q-121 0 -194 69t-73 190q0 53 3.5 103.5t14 109t26.5 108.5
+t43 97.5t62 81t85.5 53.5t111.5 20q19 0 39 -17q79 -61 154.5 -91.5t164.5 -30.5t164.5 30.5t154.5 91.5q20 17 39 17q132 0 217 -96h-223q-52 0 -90 -38t-38 -90v-192z" />
+ <glyph glyph-name="_530" unicode="&#xf235;" horiz-adv-x="2048"
+d="M704 640q-159 0 -271.5 112.5t-112.5 271.5t112.5 271.5t271.5 112.5t271.5 -112.5t112.5 -271.5t-112.5 -271.5t-271.5 -112.5zM1781 320l249 -249q9 -9 9 -23q0 -13 -9 -22l-136 -136q-9 -9 -22 -9q-14 0 -23 9l-249 249l-249 -249q-9 -9 -23 -9q-13 0 -22 9l-136 136
+q-9 9 -9 22q0 14 9 23l249 249l-249 249q-9 9 -9 23q0 13 9 22l136 136q9 9 22 9q14 0 23 -9l249 -249l249 249q9 9 23 9q13 0 22 -9l136 -136q9 -9 9 -22q0 -14 -9 -23zM1283 320l-181 -181q-37 -37 -37 -91q0 -53 37 -90l83 -83q-21 -3 -44 -3h-874q-121 0 -194 69
+t-73 190q0 53 3.5 103.5t14 109t26.5 108.5t43 97.5t62 81t85.5 53.5t111.5 20q19 0 39 -17q154 -122 319 -122t319 122q20 17 39 17q28 0 57 -6q-28 -27 -41 -50t-13 -56q0 -54 37 -91z" />
+ <glyph glyph-name="_531" unicode="&#xf236;" horiz-adv-x="2048"
+d="M256 512h1728q26 0 45 -19t19 -45v-448h-256v256h-1536v-256h-256v1216q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-704zM832 832q0 106 -75 181t-181 75t-181 -75t-75 -181t75 -181t181 -75t181 75t75 181zM2048 576v64q0 159 -112.5 271.5t-271.5 112.5h-704
+q-26 0 -45 -19t-19 -45v-384h1152z" />
+ <glyph glyph-name="_532" unicode="&#xf237;"
+d="M1536 1536l-192 -448h192v-192h-274l-55 -128h329v-192h-411l-357 -832l-357 832h-411v192h329l-55 128h-274v192h192l-192 448h256l323 -768h378l323 768h256zM768 320l108 256h-216z" />
+ <glyph glyph-name="_533" unicode="&#xf238;"
+d="M1088 1536q185 0 316.5 -93.5t131.5 -226.5v-896q0 -130 -125.5 -222t-305.5 -97l213 -202q16 -15 8 -35t-30 -20h-1056q-22 0 -30 20t8 35l213 202q-180 5 -305.5 97t-125.5 222v896q0 133 131.5 226.5t316.5 93.5h640zM768 192q80 0 136 56t56 136t-56 136t-136 56
+t-136 -56t-56 -136t56 -136t136 -56zM1344 768v512h-1152v-512h1152z" />
+ <glyph glyph-name="_534" unicode="&#xf239;"
+d="M1088 1536q185 0 316.5 -93.5t131.5 -226.5v-896q0 -130 -125.5 -222t-305.5 -97l213 -202q16 -15 8 -35t-30 -20h-1056q-22 0 -30 20t8 35l213 202q-180 5 -305.5 97t-125.5 222v896q0 133 131.5 226.5t316.5 93.5h640zM288 224q66 0 113 47t47 113t-47 113t-113 47
+t-113 -47t-47 -113t47 -113t113 -47zM704 768v512h-544v-512h544zM1248 224q66 0 113 47t47 113t-47 113t-113 47t-113 -47t-47 -113t47 -113t113 -47zM1408 768v512h-576v-512h576z" />
+ <glyph glyph-name="_535" unicode="&#xf23a;" horiz-adv-x="1792"
+d="M597 1115v-1173q0 -25 -12.5 -42.5t-36.5 -17.5q-17 0 -33 8l-465 233q-21 10 -35.5 33.5t-14.5 46.5v1140q0 20 10 34t29 14q14 0 44 -15l511 -256q3 -3 3 -5zM661 1014l534 -866l-534 266v600zM1792 996v-1054q0 -25 -14 -40.5t-38 -15.5t-47 13l-441 220zM1789 1116
+q0 -3 -256.5 -419.5t-300.5 -487.5l-390 634l324 527q17 28 52 28q14 0 26 -6l541 -270q4 -2 4 -6z" />
+ <glyph glyph-name="_536" unicode="&#xf23b;"
+d="M809 532l266 499h-112l-157 -312q-24 -48 -44 -92l-42 92l-155 312h-120l263 -493v-324h101v318zM1536 1408v-1536h-1536v1536h1536z" />
+ <glyph glyph-name="_537" unicode="&#xf23c;" horiz-adv-x="2296"
+d="M478 -139q-8 -16 -27 -34.5t-37 -25.5q-25 -9 -51.5 3.5t-28.5 31.5q-1 22 40 55t68 38q23 4 34 -21.5t2 -46.5zM1819 -139q7 -16 26 -34.5t38 -25.5q25 -9 51.5 3.5t27.5 31.5q2 22 -39.5 55t-68.5 38q-22 4 -33 -21.5t-2 -46.5zM1867 -30q13 -27 56.5 -59.5t77.5 -41.5
+q45 -13 82 4.5t37 50.5q0 46 -67.5 100.5t-115.5 59.5q-40 5 -63.5 -37.5t-6.5 -76.5zM428 -30q-13 -27 -56 -59.5t-77 -41.5q-45 -13 -82 4.5t-37 50.5q0 46 67.5 100.5t115.5 59.5q40 5 63 -37.5t6 -76.5zM1158 1094h1q-41 0 -76 -15q27 -8 44 -30.5t17 -49.5
+q0 -35 -27 -60t-65 -25q-52 0 -80 43q-5 -23 -5 -42q0 -74 56 -126.5t135 -52.5q80 0 136 52.5t56 126.5t-56 126.5t-136 52.5zM1462 1312q-99 109 -220.5 131.5t-245.5 -44.5q27 60 82.5 96.5t118 39.5t121.5 -17t99.5 -74.5t44.5 -131.5zM2212 73q8 -11 -11 -42
+q7 -23 7 -40q1 -56 -44.5 -112.5t-109.5 -91.5t-118 -37q-48 -2 -92 21.5t-66 65.5q-687 -25 -1259 0q-23 -41 -66.5 -65t-92.5 -22q-86 3 -179.5 80.5t-92.5 160.5q2 22 7 40q-19 31 -11 42q6 10 31 1q14 22 41 51q-7 29 2 38q11 10 39 -4q29 20 59 34q0 29 13 37
+q23 12 51 -16q35 5 61 -2q18 -4 38 -19v73q-11 0 -18 2q-53 10 -97 44.5t-55 87.5q-9 38 0 81q15 62 93 95q2 17 19 35.5t36 23.5t33 -7.5t19 -30.5h13q46 -5 60 -23q3 -3 5 -7q10 1 30.5 3.5t30.5 3.5q-15 11 -30 17q-23 40 -91 43q0 6 1 10q-62 2 -118.5 18.5t-84.5 47.5
+q-32 36 -42.5 92t-2.5 112q16 126 90 179q23 16 52 4.5t32 -40.5q0 -1 1.5 -14t2.5 -21t3 -20t5.5 -19t8.5 -10q27 -14 76 -12q48 46 98 74q-40 4 -162 -14l47 46q61 58 163 111q145 73 282 86q-20 8 -41 15.5t-47 14t-42.5 10.5t-47.5 11t-43 10q595 126 904 -139
+q98 -84 158 -222q85 -10 121 9h1q5 3 8.5 10t5.5 19t3 19.5t3 21.5l1 14q3 28 32 40t52 -5q73 -52 91 -178q7 -57 -3.5 -113t-42.5 -91q-28 -32 -83.5 -48.5t-115.5 -18.5v-10q-71 -2 -95 -43q-14 -5 -31 -17q11 -1 32 -3.5t30 -3.5q1 5 5 8q16 18 60 23h13q5 18 19 30t33 8
+t36 -23t19 -36q79 -32 93 -95q9 -40 1 -81q-12 -53 -56 -88t-97 -44q-10 -2 -17 -2q0 -49 -1 -73q20 15 38 19q26 7 61 2q28 28 51 16q14 -9 14 -37q33 -16 59 -34q27 13 38 4q10 -10 2 -38q28 -30 41 -51q23 8 31 -1zM1937 1025q0 -29 -9 -54q82 -32 112 -132
+q4 37 -9.5 98.5t-41.5 90.5q-20 19 -36 17t-16 -20zM1859 925q35 -42 47.5 -108.5t-0.5 -124.5q67 13 97 45q13 14 18 28q-3 64 -31 114.5t-79 66.5q-15 -15 -52 -21zM1822 921q-30 0 -44 1q42 -115 53 -239q21 0 43 3q16 68 1 135t-53 100zM258 839q30 100 112 132
+q-9 25 -9 54q0 18 -16.5 20t-35.5 -17q-28 -29 -41.5 -90.5t-9.5 -98.5zM294 737q29 -31 97 -45q-13 58 -0.5 124.5t47.5 108.5v0q-37 6 -52 21q-51 -16 -78.5 -66t-31.5 -115q9 -17 18 -28zM471 683q14 124 73 235q-19 -4 -55 -18l-45 -19v1q-46 -89 -20 -196q25 -3 47 -3z
+M1434 644q8 -38 16.5 -108.5t11.5 -89.5q3 -18 9.5 -21.5t23.5 4.5q40 20 62 85.5t23 125.5q-24 2 -146 4zM1152 1285q-116 0 -199 -82.5t-83 -198.5q0 -117 83 -199.5t199 -82.5t199 82.5t83 199.5q0 116 -83 198.5t-199 82.5zM1380 646q-105 2 -211 0v1q-1 -27 2.5 -86
+t13.5 -66q29 -14 93.5 -14.5t95.5 10.5q9 3 11 39t-0.5 69.5t-4.5 46.5zM1112 447q8 4 9.5 48t-0.5 88t-4 63v1q-212 -3 -214 -3q-4 -20 -7 -62t0 -83t14 -46q34 -15 101 -16t101 10zM718 636q-16 -59 4.5 -118.5t77.5 -84.5q15 -8 24 -5t12 21q3 16 8 90t10 103
+q-69 -2 -136 -6zM591 510q3 -23 -34 -36q132 -141 271.5 -240t305.5 -154q172 49 310.5 146t293.5 250q-33 13 -30 34q0 2 0.5 3.5t1.5 3t1 2.5v1v-1q-17 2 -50 5.5t-48 4.5q-26 -90 -82 -132q-51 -38 -82 1q-5 6 -9 14q-7 13 -17 62q-2 -5 -5 -9t-7.5 -7t-8 -5.5t-9.5 -4
+l-10 -2.5t-12 -2l-12 -1.5t-13.5 -1t-13.5 -0.5q-106 -9 -163 11q-4 -17 -10 -26.5t-21 -15t-23 -7t-36 -3.5q-6 -1 -9 -1q-179 -17 -203 40q-2 -63 -56 -54q-47 8 -91 54q-12 13 -20 26q-17 29 -26 65q-58 -6 -87 -10q1 -2 4 -10zM507 -118q3 14 3 30q-17 71 -51 130
+t-73 70q-41 12 -101.5 -14.5t-104.5 -80t-39 -107.5q35 -53 100 -93t119 -42q51 -2 94 28t53 79zM510 53q23 -63 27 -119q195 113 392 174q-98 52 -180.5 120t-179.5 165q-6 -4 -29 -13q0 -1 -1 -4t-1 -5q31 -18 22 -37q-12 -23 -56 -34q-10 -13 -29 -24h-1q-2 -83 1 -150
+q19 -34 35 -73zM579 -113q532 -21 1145 0q-254 147 -428 196q-76 -35 -156 -57q-8 -3 -16 0q-65 21 -129 49q-208 -60 -416 -188h-1v-1q1 0 1 1zM1763 -67q4 54 28 120q14 38 33 71l-1 -1q3 77 3 153q-15 8 -30 25q-42 9 -56 33q-9 20 22 38q-2 4 -2 9q-16 4 -28 12
+q-204 -190 -383 -284q198 -59 414 -176zM2155 -90q5 54 -39 107.5t-104 80t-102 14.5q-38 -11 -72.5 -70.5t-51.5 -129.5q0 -16 3 -30q10 -49 53 -79t94 -28q54 2 119 42t100 93z" />
+ <glyph glyph-name="_538" unicode="&#xf23d;" horiz-adv-x="2304"
+d="M1524 -25q0 -68 -48 -116t-116 -48t-116.5 48t-48.5 116t48.5 116.5t116.5 48.5t116 -48.5t48 -116.5zM775 -25q0 -68 -48.5 -116t-116.5 -48t-116 48t-48 116t48 116.5t116 48.5t116.5 -48.5t48.5 -116.5zM0 1469q57 -60 110.5 -104.5t121 -82t136 -63t166 -45.5
+t200 -31.5t250 -18.5t304 -9.5t372.5 -2.5q139 0 244.5 -5t181 -16.5t124 -27.5t71 -39.5t24 -51.5t-19.5 -64t-56.5 -76.5t-89.5 -91t-116 -104.5t-139 -119q-185 -157 -286 -247q29 51 76.5 109t94 105.5t94.5 98.5t83 91.5t54 80.5t13 70t-45.5 55.5t-116.5 41t-204 23.5
+t-304 5q-168 -2 -314 6t-256 23t-204.5 41t-159.5 51.5t-122.5 62.5t-91.5 66.5t-68 71.5t-50.5 69.5t-40 68t-36.5 59.5z" />
+ <glyph glyph-name="_539" unicode="&#xf23e;" horiz-adv-x="1792"
+d="M896 1472q-169 0 -323 -66t-265.5 -177.5t-177.5 -265.5t-66 -323t66 -323t177.5 -265.5t265.5 -177.5t323 -66t323 66t265.5 177.5t177.5 265.5t66 323t-66 323t-177.5 265.5t-265.5 177.5t-323 66zM896 1536q182 0 348 -71t286 -191t191 -286t71 -348t-71 -348
+t-191 -286t-286 -191t-348 -71t-348 71t-286 191t-191 286t-71 348t71 348t191 286t286 191t348 71zM496 704q16 0 16 -16v-480q0 -16 -16 -16h-32q-16 0 -16 16v480q0 16 16 16h32zM896 640q53 0 90.5 -37.5t37.5 -90.5q0 -35 -17.5 -64t-46.5 -46v-114q0 -14 -9 -23
+t-23 -9h-64q-14 0 -23 9t-9 23v114q-29 17 -46.5 46t-17.5 64q0 53 37.5 90.5t90.5 37.5zM896 1408q209 0 385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103zM544 928v-96
+q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v96q0 93 65.5 158.5t158.5 65.5t158.5 -65.5t65.5 -158.5v-96q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v96q0 146 -103 249t-249 103t-249 -103t-103 -249zM1408 192v512q0 26 -19 45t-45 19h-896q-26 0 -45 -19t-19 -45v-512
+q0 -26 19 -45t45 -19h896q26 0 45 19t19 45z" />
+ <glyph glyph-name="_540" unicode="&#xf240;" horiz-adv-x="2304"
+d="M1920 1024v-768h-1664v768h1664zM2048 448h128v384h-128v288q0 14 -9 23t-23 9h-1856q-14 0 -23 -9t-9 -23v-960q0 -14 9 -23t23 -9h1856q14 0 23 9t9 23v288zM2304 832v-384q0 -53 -37.5 -90.5t-90.5 -37.5v-160q0 -66 -47 -113t-113 -47h-1856q-66 0 -113 47t-47 113
+v960q0 66 47 113t113 47h1856q66 0 113 -47t47 -113v-160q53 0 90.5 -37.5t37.5 -90.5z" />
+ <glyph glyph-name="_541" unicode="&#xf241;" horiz-adv-x="2304"
+d="M256 256v768h1280v-768h-1280zM2176 960q53 0 90.5 -37.5t37.5 -90.5v-384q0 -53 -37.5 -90.5t-90.5 -37.5v-160q0 -66 -47 -113t-113 -47h-1856q-66 0 -113 47t-47 113v960q0 66 47 113t113 47h1856q66 0 113 -47t47 -113v-160zM2176 448v384h-128v288q0 14 -9 23t-23 9
+h-1856q-14 0 -23 -9t-9 -23v-960q0 -14 9 -23t23 -9h1856q14 0 23 9t9 23v288h128z" />
+ <glyph glyph-name="_542" unicode="&#xf242;" horiz-adv-x="2304"
+d="M256 256v768h896v-768h-896zM2176 960q53 0 90.5 -37.5t37.5 -90.5v-384q0 -53 -37.5 -90.5t-90.5 -37.5v-160q0 -66 -47 -113t-113 -47h-1856q-66 0 -113 47t-47 113v960q0 66 47 113t113 47h1856q66 0 113 -47t47 -113v-160zM2176 448v384h-128v288q0 14 -9 23t-23 9
+h-1856q-14 0 -23 -9t-9 -23v-960q0 -14 9 -23t23 -9h1856q14 0 23 9t9 23v288h128z" />
+ <glyph glyph-name="_543" unicode="&#xf243;" horiz-adv-x="2304"
+d="M256 256v768h512v-768h-512zM2176 960q53 0 90.5 -37.5t37.5 -90.5v-384q0 -53 -37.5 -90.5t-90.5 -37.5v-160q0 -66 -47 -113t-113 -47h-1856q-66 0 -113 47t-47 113v960q0 66 47 113t113 47h1856q66 0 113 -47t47 -113v-160zM2176 448v384h-128v288q0 14 -9 23t-23 9
+h-1856q-14 0 -23 -9t-9 -23v-960q0 -14 9 -23t23 -9h1856q14 0 23 9t9 23v288h128z" />
+ <glyph glyph-name="_544" unicode="&#xf244;" horiz-adv-x="2304"
+d="M2176 960q53 0 90.5 -37.5t37.5 -90.5v-384q0 -53 -37.5 -90.5t-90.5 -37.5v-160q0 -66 -47 -113t-113 -47h-1856q-66 0 -113 47t-47 113v960q0 66 47 113t113 47h1856q66 0 113 -47t47 -113v-160zM2176 448v384h-128v288q0 14 -9 23t-23 9h-1856q-14 0 -23 -9t-9 -23
+v-960q0 -14 9 -23t23 -9h1856q14 0 23 9t9 23v288h128z" />
+ <glyph glyph-name="_545" unicode="&#xf245;" horiz-adv-x="1280"
+d="M1133 493q31 -30 14 -69q-17 -40 -59 -40h-382l201 -476q10 -25 0 -49t-34 -35l-177 -75q-25 -10 -49 0t-35 34l-191 452l-312 -312q-19 -19 -45 -19q-12 0 -24 5q-40 17 -40 59v1504q0 42 40 59q12 5 24 5q27 0 45 -19z" />
+ <glyph glyph-name="_546" unicode="&#xf246;" horiz-adv-x="1024"
+d="M832 1408q-320 0 -320 -224v-416h128v-128h-128v-544q0 -224 320 -224h64v-128h-64q-272 0 -384 146q-112 -146 -384 -146h-64v128h64q320 0 320 224v544h-128v128h128v416q0 224 -320 224h-64v128h64q272 0 384 -146q112 146 384 146h64v-128h-64z" />
+ <glyph glyph-name="_547" unicode="&#xf247;" horiz-adv-x="2048"
+d="M2048 1152h-128v-1024h128v-384h-384v128h-1280v-128h-384v384h128v1024h-128v384h384v-128h1280v128h384v-384zM1792 1408v-128h128v128h-128zM128 1408v-128h128v128h-128zM256 -128v128h-128v-128h128zM1664 0v128h128v1024h-128v128h-1280v-128h-128v-1024h128v-128
+h1280zM1920 -128v128h-128v-128h128zM1280 896h384v-768h-896v256h-384v768h896v-256zM512 512h640v512h-640v-512zM1536 256v512h-256v-384h-384v-128h640z" />
+ <glyph glyph-name="_548" unicode="&#xf248;" horiz-adv-x="2304"
+d="M2304 768h-128v-640h128v-384h-384v128h-896v-128h-384v384h128v128h-384v-128h-384v384h128v640h-128v384h384v-128h896v128h384v-384h-128v-128h384v128h384v-384zM2048 1024v-128h128v128h-128zM1408 1408v-128h128v128h-128zM128 1408v-128h128v128h-128zM256 256
+v128h-128v-128h128zM1536 384h-128v-128h128v128zM384 384h896v128h128v640h-128v128h-896v-128h-128v-640h128v-128zM896 -128v128h-128v-128h128zM2176 -128v128h-128v-128h128zM2048 128v640h-128v128h-384v-384h128v-384h-384v128h-384v-128h128v-128h896v128h128z" />
+ <glyph glyph-name="_549" unicode="&#xf249;"
+d="M1024 288v-416h-928q-40 0 -68 28t-28 68v1344q0 40 28 68t68 28h1344q40 0 68 -28t28 -68v-928h-416q-40 0 -68 -28t-28 -68zM1152 256h381q-15 -82 -65 -132l-184 -184q-50 -50 -132 -65v381z" />
+ <glyph glyph-name="_550" unicode="&#xf24a;"
+d="M1400 256h-248v-248q29 10 41 22l185 185q12 12 22 41zM1120 384h288v896h-1280v-1280h896v288q0 40 28 68t68 28zM1536 1312v-1024q0 -40 -20 -88t-48 -76l-184 -184q-28 -28 -76 -48t-88 -20h-1024q-40 0 -68 28t-28 68v1344q0 40 28 68t68 28h1344q40 0 68 -28t28 -68
+z" />
+ <glyph glyph-name="_551" unicode="&#xf24b;" horiz-adv-x="2304"
+d="M1951 538q0 -26 -15.5 -44.5t-38.5 -23.5q-8 -2 -18 -2h-153v140h153q10 0 18 -2q23 -5 38.5 -23.5t15.5 -44.5zM1933 751q0 -25 -15 -42t-38 -21q-3 -1 -15 -1h-139v129h139q3 0 8.5 -0.5t6.5 -0.5q23 -4 38 -21.5t15 -42.5zM728 587v308h-228v-308q0 -58 -38 -94.5
+t-105 -36.5q-108 0 -229 59v-112q53 -15 121 -23t109 -9l42 -1q328 0 328 217zM1442 403v113q-99 -52 -200 -59q-108 -8 -169 41t-61 142t61 142t169 41q101 -7 200 -58v112q-48 12 -100 19.5t-80 9.5l-28 2q-127 6 -218.5 -14t-140.5 -60t-71 -88t-22 -106t22 -106t71 -88
+t140.5 -60t218.5 -14q101 4 208 31zM2176 518q0 54 -43 88.5t-109 39.5v3q57 8 89 41.5t32 79.5q0 55 -41 88t-107 36q-3 0 -12 0.5t-14 0.5h-455v-510h491q74 0 121.5 36.5t47.5 96.5zM2304 1280v-1280q0 -52 -38 -90t-90 -38h-2048q-52 0 -90 38t-38 90v1280q0 52 38 90
+t90 38h2048q52 0 90 -38t38 -90z" />
+ <glyph glyph-name="_552" unicode="&#xf24c;" horiz-adv-x="2304"
+d="M858 295v693q-106 -41 -172 -135.5t-66 -211.5t66 -211.5t172 -134.5zM1362 641q0 117 -66 211.5t-172 135.5v-694q106 41 172 135.5t66 211.5zM1577 641q0 -159 -78.5 -294t-213.5 -213.5t-294 -78.5q-119 0 -227.5 46.5t-187 125t-125 187t-46.5 227.5q0 159 78.5 294
+t213.5 213.5t294 78.5t294 -78.5t213.5 -213.5t78.5 -294zM1960 634q0 139 -55.5 261.5t-147.5 205.5t-213.5 131t-252.5 48h-301q-176 0 -323.5 -81t-235 -230t-87.5 -335q0 -171 87 -317.5t236 -231.5t323 -85h301q129 0 251.5 50.5t214.5 135t147.5 202.5t55.5 246z
+M2304 1280v-1280q0 -52 -38 -90t-90 -38h-2048q-52 0 -90 38t-38 90v1280q0 52 38 90t90 38h2048q52 0 90 -38t38 -90z" />
+ <glyph glyph-name="_553" unicode="&#xf24d;" horiz-adv-x="1792"
+d="M1664 -96v1088q0 13 -9.5 22.5t-22.5 9.5h-1088q-13 0 -22.5 -9.5t-9.5 -22.5v-1088q0 -13 9.5 -22.5t22.5 -9.5h1088q13 0 22.5 9.5t9.5 22.5zM1792 992v-1088q0 -66 -47 -113t-113 -47h-1088q-66 0 -113 47t-47 113v1088q0 66 47 113t113 47h1088q66 0 113 -47t47 -113
+zM1408 1376v-160h-128v160q0 13 -9.5 22.5t-22.5 9.5h-1088q-13 0 -22.5 -9.5t-9.5 -22.5v-1088q0 -13 9.5 -22.5t22.5 -9.5h160v-128h-160q-66 0 -113 47t-47 113v1088q0 66 47 113t113 47h1088q66 0 113 -47t47 -113z" />
+ <glyph glyph-name="_554" unicode="&#xf24e;" horiz-adv-x="2304"
+d="M1728 1088l-384 -704h768zM448 1088l-384 -704h768zM1269 1280q-14 -40 -45.5 -71.5t-71.5 -45.5v-1291h608q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-1344q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h608v1291q-40 14 -71.5 45.5t-45.5 71.5h-491q-14 0 -23 9t-9 23v64
+q0 14 9 23t23 9h491q21 57 70 92.5t111 35.5t111 -35.5t70 -92.5h491q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-491zM1088 1264q33 0 56.5 23.5t23.5 56.5t-23.5 56.5t-56.5 23.5t-56.5 -23.5t-23.5 -56.5t23.5 -56.5t56.5 -23.5zM2176 384q0 -73 -46.5 -131t-117.5 -91
+t-144.5 -49.5t-139.5 -16.5t-139.5 16.5t-144.5 49.5t-117.5 91t-46.5 131q0 11 35 81t92 174.5t107 195.5t102 184t56 100q18 33 56 33t56 -33q4 -7 56 -100t102 -184t107 -195.5t92 -174.5t35 -81zM896 384q0 -73 -46.5 -131t-117.5 -91t-144.5 -49.5t-139.5 -16.5
+t-139.5 16.5t-144.5 49.5t-117.5 91t-46.5 131q0 11 35 81t92 174.5t107 195.5t102 184t56 100q18 33 56 33t56 -33q4 -7 56 -100t102 -184t107 -195.5t92 -174.5t35 -81z" />
+ <glyph glyph-name="_555" unicode="&#xf250;"
+d="M1408 1408q0 -261 -106.5 -461.5t-266.5 -306.5q160 -106 266.5 -306.5t106.5 -461.5h96q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-1472q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h96q0 261 106.5 461.5t266.5 306.5q-160 106 -266.5 306.5t-106.5 461.5h-96q-14 0 -23 9
+t-9 23v64q0 14 9 23t23 9h1472q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-96zM874 700q77 29 149 92.5t129.5 152.5t92.5 210t35 253h-1024q0 -132 35 -253t92.5 -210t129.5 -152.5t149 -92.5q19 -7 30.5 -23.5t11.5 -36.5t-11.5 -36.5t-30.5 -23.5q-77 -29 -149 -92.5
+t-129.5 -152.5t-92.5 -210t-35 -253h1024q0 132 -35 253t-92.5 210t-129.5 152.5t-149 92.5q-19 7 -30.5 23.5t-11.5 36.5t11.5 36.5t30.5 23.5z" />
+ <glyph glyph-name="_556" unicode="&#xf251;"
+d="M1408 1408q0 -261 -106.5 -461.5t-266.5 -306.5q160 -106 266.5 -306.5t106.5 -461.5h96q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-1472q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h96q0 261 106.5 461.5t266.5 306.5q-160 106 -266.5 306.5t-106.5 461.5h-96q-14 0 -23 9
+t-9 23v64q0 14 9 23t23 9h1472q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-96zM1280 1408h-1024q0 -66 9 -128h1006q9 61 9 128zM1280 -128q0 130 -34 249.5t-90.5 208t-126.5 152t-146 94.5h-230q-76 -31 -146 -94.5t-126.5 -152t-90.5 -208t-34 -249.5h1024z" />
+ <glyph glyph-name="_557" unicode="&#xf252;"
+d="M1408 1408q0 -261 -106.5 -461.5t-266.5 -306.5q160 -106 266.5 -306.5t106.5 -461.5h96q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-1472q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h96q0 261 106.5 461.5t266.5 306.5q-160 106 -266.5 306.5t-106.5 461.5h-96q-14 0 -23 9
+t-9 23v64q0 14 9 23t23 9h1472q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-96zM1280 1408h-1024q0 -206 85 -384h854q85 178 85 384zM1223 192q-54 141 -145.5 241.5t-194.5 142.5h-230q-103 -42 -194.5 -142.5t-145.5 -241.5h910z" />
+ <glyph glyph-name="_558" unicode="&#xf253;"
+d="M1408 1408q0 -261 -106.5 -461.5t-266.5 -306.5q160 -106 266.5 -306.5t106.5 -461.5h96q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-1472q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h96q0 261 106.5 461.5t266.5 306.5q-160 106 -266.5 306.5t-106.5 461.5h-96q-14 0 -23 9
+t-9 23v64q0 14 9 23t23 9h1472q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-96zM874 700q77 29 149 92.5t129.5 152.5t92.5 210t35 253h-1024q0 -132 35 -253t92.5 -210t129.5 -152.5t149 -92.5q19 -7 30.5 -23.5t11.5 -36.5t-11.5 -36.5t-30.5 -23.5q-137 -51 -244 -196
+h700q-107 145 -244 196q-19 7 -30.5 23.5t-11.5 36.5t11.5 36.5t30.5 23.5z" />
+ <glyph glyph-name="_559" unicode="&#xf254;"
+d="M1504 -64q14 0 23 -9t9 -23v-128q0 -14 -9 -23t-23 -9h-1472q-14 0 -23 9t-9 23v128q0 14 9 23t23 9h1472zM130 0q3 55 16 107t30 95t46 87t53.5 76t64.5 69.5t66 60t70.5 55t66.5 47.5t65 43q-43 28 -65 43t-66.5 47.5t-70.5 55t-66 60t-64.5 69.5t-53.5 76t-46 87
+t-30 95t-16 107h1276q-3 -55 -16 -107t-30 -95t-46 -87t-53.5 -76t-64.5 -69.5t-66 -60t-70.5 -55t-66.5 -47.5t-65 -43q43 -28 65 -43t66.5 -47.5t70.5 -55t66 -60t64.5 -69.5t53.5 -76t46 -87t30 -95t16 -107h-1276zM1504 1536q14 0 23 -9t9 -23v-128q0 -14 -9 -23t-23 -9
+h-1472q-14 0 -23 9t-9 23v128q0 14 9 23t23 9h1472z" />
+ <glyph glyph-name="_560" unicode="&#xf255;"
+d="M768 1152q-53 0 -90.5 -37.5t-37.5 -90.5v-128h-32v93q0 48 -32 81.5t-80 33.5q-46 0 -79 -33t-33 -79v-429l-32 30v172q0 48 -32 81.5t-80 33.5q-46 0 -79 -33t-33 -79v-224q0 -47 35 -82l310 -296q39 -39 39 -102q0 -26 19 -45t45 -19h640q26 0 45 19t19 45v25
+q0 41 10 77l108 436q10 36 10 77v246q0 48 -32 81.5t-80 33.5q-46 0 -79 -33t-33 -79v-32h-32v125q0 40 -25 72.5t-64 40.5q-14 2 -23 2q-46 0 -79 -33t-33 -79v-128h-32v122q0 51 -32.5 89.5t-82.5 43.5q-5 1 -13 1zM768 1280q84 0 149 -50q57 34 123 34q59 0 111 -27
+t86 -76q27 7 59 7q100 0 170 -71.5t70 -171.5v-246q0 -51 -13 -108l-109 -436q-6 -24 -6 -71q0 -80 -56 -136t-136 -56h-640q-84 0 -138 58.5t-54 142.5l-308 296q-76 73 -76 175v224q0 99 70.5 169.5t169.5 70.5q11 0 16 -1q6 95 75.5 160t164.5 65q52 0 98 -21
+q72 69 174 69z" />
+ <glyph glyph-name="_561" unicode="&#xf256;" horiz-adv-x="1792"
+d="M880 1408q-46 0 -79 -33t-33 -79v-656h-32v528q0 46 -33 79t-79 33t-79 -33t-33 -79v-528v-256l-154 205q-38 51 -102 51q-53 0 -90.5 -37.5t-37.5 -90.5q0 -43 26 -77l384 -512q38 -51 102 -51h688q34 0 61 22t34 56l76 405q5 32 5 59v498q0 46 -33 79t-79 33t-79 -33
+t-33 -79v-272h-32v528q0 46 -33 79t-79 33t-79 -33t-33 -79v-528h-32v656q0 46 -33 79t-79 33zM880 1536q68 0 125.5 -35.5t88.5 -96.5q19 4 42 4q99 0 169.5 -70.5t70.5 -169.5v-17q105 6 180.5 -64t75.5 -175v-498q0 -40 -8 -83l-76 -404q-14 -79 -76.5 -131t-143.5 -52
+h-688q-60 0 -114.5 27.5t-90.5 74.5l-384 512q-51 68 -51 154q0 106 75 181t181 75q78 0 128 -34v434q0 99 70.5 169.5t169.5 70.5q23 0 42 -4q31 61 88.5 96.5t125.5 35.5z" />
+ <glyph glyph-name="_562" unicode="&#xf257;" horiz-adv-x="1792"
+d="M1073 -128h-177q-163 0 -226 141q-23 49 -23 102v5q-62 30 -98.5 88.5t-36.5 127.5q0 38 5 48h-261q-106 0 -181 75t-75 181t75 181t181 75h113l-44 17q-74 28 -119.5 93.5t-45.5 145.5q0 106 75 181t181 75q46 0 91 -17l628 -239h401q106 0 181 -75t75 -181v-668
+q0 -88 -54 -157.5t-140 -90.5l-339 -85q-92 -23 -186 -23zM1024 583l-155 -71l-163 -74q-30 -14 -48 -41.5t-18 -60.5q0 -46 33 -79t79 -33q26 0 46 10l338 154q-49 10 -80.5 50t-31.5 90v55zM1344 272q0 46 -33 79t-79 33q-26 0 -46 -10l-290 -132q-28 -13 -37 -17
+t-30.5 -17t-29.5 -23.5t-16 -29t-8 -40.5q0 -50 31.5 -82t81.5 -32q20 0 38 9l352 160q30 14 48 41.5t18 60.5zM1112 1024l-650 248q-24 8 -46 8q-53 0 -90.5 -37.5t-37.5 -90.5q0 -40 22.5 -73t59.5 -47l526 -200v-64h-640q-53 0 -90.5 -37.5t-37.5 -90.5t37.5 -90.5
+t90.5 -37.5h535l233 106v198q0 63 46 106l111 102h-69zM1073 0q82 0 155 19l339 85q43 11 70 45.5t27 78.5v668q0 53 -37.5 90.5t-90.5 37.5h-308l-136 -126q-36 -33 -36 -82v-296q0 -46 33 -77t79 -31t79 35t33 81v208h32v-208q0 -70 -57 -114q52 -8 86.5 -48.5t34.5 -93.5
+q0 -42 -23 -78t-61 -53l-310 -141h91z" />
+ <glyph glyph-name="_563" unicode="&#xf258;" horiz-adv-x="2048"
+d="M1151 1536q61 0 116 -28t91 -77l572 -781q118 -159 118 -359v-355q0 -80 -56 -136t-136 -56h-384q-80 0 -136 56t-56 136v177l-286 143h-546q-80 0 -136 56t-56 136v32q0 119 84.5 203.5t203.5 84.5h420l42 128h-686q-100 0 -173.5 67.5t-81.5 166.5q-65 79 -65 182v32
+q0 80 56 136t136 56h959zM1920 -64v355q0 157 -93 284l-573 781q-39 52 -103 52h-959q-26 0 -45 -19t-19 -45q0 -32 1.5 -49.5t9.5 -40.5t25 -43q10 31 35.5 50t56.5 19h832v-32h-832q-26 0 -45 -19t-19 -45q0 -44 3 -58q8 -44 44 -73t81 -29h640h91q40 0 68 -28t28 -68
+q0 -15 -5 -30l-64 -192q-10 -29 -35 -47.5t-56 -18.5h-443q-66 0 -113 -47t-47 -113v-32q0 -26 19 -45t45 -19h561q16 0 29 -7l317 -158q24 -13 38.5 -36t14.5 -50v-197q0 -26 19 -45t45 -19h384q26 0 45 19t19 45z" />
+ <glyph glyph-name="_564" unicode="&#xf259;" horiz-adv-x="2048"
+d="M459 -256q-77 0 -137.5 47.5t-79.5 122.5l-101 401q-13 57 -13 108q0 45 -5 67l-116 477q-7 27 -7 57q0 93 62 161t155 78q17 85 82.5 139t152.5 54q83 0 148 -51.5t85 -132.5l83 -348l103 428q20 81 85 132.5t148 51.5q89 0 155.5 -57.5t80.5 -144.5q92 -10 152 -79
+t60 -162q0 -24 -7 -59l-123 -512q10 7 37.5 28.5t38.5 29.5t35 23t41 20.5t41.5 11t49.5 5.5q105 0 180 -74t75 -179q0 -62 -28.5 -118t-78.5 -94l-507 -380q-68 -51 -153 -51h-694zM1104 1408q-38 0 -68.5 -24t-39.5 -62l-164 -682h-127l-145 602q-9 38 -39.5 62t-68.5 24
+q-48 0 -80 -33t-32 -80q0 -15 3 -28l132 -547h-26l-99 408q-9 37 -40 62.5t-69 25.5q-47 0 -80 -33t-33 -79q0 -14 3 -26l116 -478q7 -28 9 -86t10 -88l100 -401q8 -32 34 -52.5t59 -20.5h694q42 0 76 26l507 379q56 43 56 110q0 52 -37.5 88.5t-89.5 36.5q-43 0 -77 -26
+l-307 -230v227q0 4 32 138t68 282t39 161q4 18 4 29q0 47 -32 81t-79 34q-39 0 -69.5 -24t-39.5 -62l-116 -482h-26l150 624q3 14 3 28q0 48 -31.5 82t-79.5 34z" />
+ <glyph glyph-name="_565" unicode="&#xf25a;" horiz-adv-x="1792"
+d="M640 1408q-53 0 -90.5 -37.5t-37.5 -90.5v-512v-384l-151 202q-41 54 -107 54q-52 0 -89 -38t-37 -90q0 -43 26 -77l384 -512q38 -51 102 -51h718q22 0 39.5 13.5t22.5 34.5l92 368q24 96 24 194v217q0 41 -28 71t-68 30t-68 -28t-28 -68h-32v61q0 48 -32 81.5t-80 33.5
+q-46 0 -79 -33t-33 -79v-64h-32v90q0 55 -37 94.5t-91 39.5q-53 0 -90.5 -37.5t-37.5 -90.5v-96h-32v570q0 55 -37 94.5t-91 39.5zM640 1536q107 0 181.5 -77.5t74.5 -184.5v-220q22 2 32 2q99 0 173 -69q47 21 99 21q113 0 184 -87q27 7 56 7q94 0 159 -67.5t65 -161.5
+v-217q0 -116 -28 -225l-92 -368q-16 -64 -68 -104.5t-118 -40.5h-718q-60 0 -114.5 27.5t-90.5 74.5l-384 512q-51 68 -51 154q0 105 74.5 180.5t179.5 75.5q71 0 130 -35v547q0 106 75 181t181 75zM768 128v384h-32v-384h32zM1024 128v384h-32v-384h32zM1280 128v384h-32
+v-384h32z" />
+ <glyph glyph-name="_566" unicode="&#xf25b;"
+d="M1288 889q60 0 107 -23q141 -63 141 -226v-177q0 -94 -23 -186l-85 -339q-21 -86 -90.5 -140t-157.5 -54h-668q-106 0 -181 75t-75 181v401l-239 628q-17 45 -17 91q0 106 75 181t181 75q80 0 145.5 -45.5t93.5 -119.5l17 -44v113q0 106 75 181t181 75t181 -75t75 -181
+v-261q27 5 48 5q69 0 127.5 -36.5t88.5 -98.5zM1072 896q-33 0 -60.5 -18t-41.5 -48l-74 -163l-71 -155h55q50 0 90 -31.5t50 -80.5l154 338q10 20 10 46q0 46 -33 79t-79 33zM1293 761q-22 0 -40.5 -8t-29 -16t-23.5 -29.5t-17 -30.5t-17 -37l-132 -290q-10 -20 -10 -46
+q0 -46 33 -79t79 -33q33 0 60.5 18t41.5 48l160 352q9 18 9 38q0 50 -32 81.5t-82 31.5zM128 1120q0 -22 8 -46l248 -650v-69l102 111q43 46 106 46h198l106 233v535q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5v-640h-64l-200 526q-14 37 -47 59.5t-73 22.5
+q-53 0 -90.5 -37.5t-37.5 -90.5zM1180 -128q44 0 78.5 27t45.5 70l85 339q19 73 19 155v91l-141 -310q-17 -38 -53 -61t-78 -23q-53 0 -93.5 34.5t-48.5 86.5q-44 -57 -114 -57h-208v32h208q46 0 81 33t35 79t-31 79t-77 33h-296q-49 0 -82 -36l-126 -136v-308
+q0 -53 37.5 -90.5t90.5 -37.5h668z" />
+ <glyph glyph-name="_567" unicode="&#xf25c;" horiz-adv-x="1973"
+d="M857 992v-117q0 -13 -9.5 -22t-22.5 -9h-298v-812q0 -13 -9 -22.5t-22 -9.5h-135q-13 0 -22.5 9t-9.5 23v812h-297q-13 0 -22.5 9t-9.5 22v117q0 14 9 23t23 9h793q13 0 22.5 -9.5t9.5 -22.5zM1895 995l77 -961q1 -13 -8 -24q-10 -10 -23 -10h-134q-12 0 -21 8.5
+t-10 20.5l-46 588l-189 -425q-8 -19 -29 -19h-120q-20 0 -29 19l-188 427l-45 -590q-1 -12 -10 -20.5t-21 -8.5h-135q-13 0 -23 10q-9 10 -9 24l78 961q1 12 10 20.5t21 8.5h142q20 0 29 -19l220 -520q10 -24 20 -51q3 7 9.5 24.5t10.5 26.5l221 520q9 19 29 19h141
+q13 0 22 -8.5t10 -20.5z" />
+ <glyph glyph-name="_568" unicode="&#xf25d;" horiz-adv-x="1792"
+d="M1042 833q0 88 -60 121q-33 18 -117 18h-123v-281h162q66 0 102 37t36 105zM1094 548l205 -373q8 -17 -1 -31q-8 -16 -27 -16h-152q-20 0 -28 17l-194 365h-155v-350q0 -14 -9 -23t-23 -9h-134q-14 0 -23 9t-9 23v960q0 14 9 23t23 9h294q128 0 190 -24q85 -31 134 -109
+t49 -180q0 -92 -42.5 -165.5t-115.5 -109.5q6 -10 9 -16zM896 1376q-150 0 -286 -58.5t-234.5 -157t-157 -234.5t-58.5 -286t58.5 -286t157 -234.5t234.5 -157t286 -58.5t286 58.5t234.5 157t157 234.5t58.5 286t-58.5 286t-157 234.5t-234.5 157t-286 58.5zM1792 640
+q0 -182 -71 -348t-191 -286t-286 -191t-348 -71t-348 71t-286 191t-191 286t-71 348t71 348t191 286t286 191t348 71t348 -71t286 -191t191 -286t71 -348z" />
+ <glyph glyph-name="_569" unicode="&#xf25e;" horiz-adv-x="1792"
+d="M605 303q153 0 257 104q14 18 3 36l-45 82q-6 13 -24 17q-16 2 -27 -11l-4 -3q-4 -4 -11.5 -10t-17.5 -13.5t-23.5 -14.5t-28.5 -13t-33.5 -9.5t-37.5 -3.5q-76 0 -125 50t-49 127q0 76 48 125.5t122 49.5q37 0 71.5 -14t50.5 -28l16 -14q11 -11 26 -10q16 2 24 14l53 78
+q13 20 -2 39q-3 4 -11 12t-30 23.5t-48.5 28t-67.5 22.5t-86 10q-148 0 -246 -96.5t-98 -240.5q0 -146 97 -241.5t247 -95.5zM1235 303q153 0 257 104q14 18 4 36l-45 82q-8 14 -25 17q-16 2 -27 -11l-4 -3q-4 -4 -11.5 -10t-17.5 -13.5t-23.5 -14.5t-28.5 -13t-33.5 -9.5
+t-37.5 -3.5q-76 0 -125 50t-49 127q0 76 48 125.5t122 49.5q37 0 71.5 -14t50.5 -28l16 -14q11 -11 26 -10q16 2 24 14l53 78q13 20 -2 39q-3 4 -11 12t-30 23.5t-48.5 28t-67.5 22.5t-86 10q-147 0 -245.5 -96.5t-98.5 -240.5q0 -146 97 -241.5t247 -95.5zM896 1376
+q-150 0 -286 -58.5t-234.5 -157t-157 -234.5t-58.5 -286t58.5 -286t157 -234.5t234.5 -157t286 -58.5t286 58.5t234.5 157t157 234.5t58.5 286t-58.5 286t-157 234.5t-234.5 157t-286 58.5zM896 1536q182 0 348 -71t286 -191t191 -286t71 -348t-71 -348t-191 -286t-286 -191
+t-348 -71t-348 71t-286 191t-191 286t-71 348t71 348t191 286t286 191t348 71z" />
+ <glyph glyph-name="f260" unicode="&#xf260;" horiz-adv-x="2048"
+d="M736 736l384 -384l-384 -384l-672 672l672 672l168 -168l-96 -96l-72 72l-480 -480l480 -480l193 193l-289 287zM1312 1312l672 -672l-672 -672l-168 168l96 96l72 -72l480 480l-480 480l-193 -193l289 -287l-96 -96l-384 384z" />
+ <glyph glyph-name="f261" unicode="&#xf261;" horiz-adv-x="1792"
+d="M717 182l271 271l-279 279l-88 -88l192 -191l-96 -96l-279 279l279 279l40 -40l87 87l-127 128l-454 -454zM1075 190l454 454l-454 454l-271 -271l279 -279l88 88l-192 191l96 96l279 -279l-279 -279l-40 40l-87 -88zM1792 640q0 -182 -71 -348t-191 -286t-286 -191
+t-348 -71t-348 71t-286 191t-191 286t-71 348t71 348t191 286t286 191t348 71t348 -71t286 -191t191 -286t71 -348z" />
+ <glyph glyph-name="_572" unicode="&#xf262;" horiz-adv-x="2304"
+d="M651 539q0 -39 -27.5 -66.5t-65.5 -27.5q-39 0 -66.5 27.5t-27.5 66.5q0 38 27.5 65.5t66.5 27.5q38 0 65.5 -27.5t27.5 -65.5zM1805 540q0 -39 -27.5 -66.5t-66.5 -27.5t-66.5 27.5t-27.5 66.5t27.5 66t66.5 27t66.5 -27t27.5 -66zM765 539q0 79 -56.5 136t-136.5 57
+t-136.5 -56.5t-56.5 -136.5t56.5 -136.5t136.5 -56.5t136.5 56.5t56.5 136.5zM1918 540q0 80 -56.5 136.5t-136.5 56.5q-79 0 -136 -56.5t-57 -136.5t56.5 -136.5t136.5 -56.5t136.5 56.5t56.5 136.5zM850 539q0 -116 -81.5 -197.5t-196.5 -81.5q-116 0 -197.5 82t-81.5 197
+t82 196.5t197 81.5t196.5 -81.5t81.5 -196.5zM2004 540q0 -115 -81.5 -196.5t-197.5 -81.5q-115 0 -196.5 81.5t-81.5 196.5t81.5 196.5t196.5 81.5q116 0 197.5 -81.5t81.5 -196.5zM1040 537q0 191 -135.5 326.5t-326.5 135.5q-125 0 -231 -62t-168 -168.5t-62 -231.5
+t62 -231.5t168 -168.5t231 -62q191 0 326.5 135.5t135.5 326.5zM1708 1110q-254 111 -556 111q-319 0 -573 -110q117 0 223 -45.5t182.5 -122.5t122 -183t45.5 -223q0 115 43.5 219.5t118 180.5t177.5 123t217 50zM2187 537q0 191 -135 326.5t-326 135.5t-326.5 -135.5
+t-135.5 -326.5t135.5 -326.5t326.5 -135.5t326 135.5t135 326.5zM1921 1103h383q-44 -51 -75 -114.5t-40 -114.5q110 -151 110 -337q0 -156 -77 -288t-209 -208.5t-287 -76.5q-133 0 -249 56t-196 155q-47 -56 -129 -179q-11 22 -53.5 82.5t-74.5 97.5
+q-80 -99 -196.5 -155.5t-249.5 -56.5q-155 0 -287 76.5t-209 208.5t-77 288q0 186 110 337q-9 51 -40 114.5t-75 114.5h365q149 100 355 156.5t432 56.5q224 0 421 -56t348 -157z" />
+ <glyph glyph-name="f263" unicode="&#xf263;" horiz-adv-x="1280"
+d="M640 629q-188 0 -321 133t-133 320q0 188 133 321t321 133t321 -133t133 -321q0 -187 -133 -320t-321 -133zM640 1306q-92 0 -157.5 -65.5t-65.5 -158.5q0 -92 65.5 -157.5t157.5 -65.5t157.5 65.5t65.5 157.5q0 93 -65.5 158.5t-157.5 65.5zM1163 574q13 -27 15 -49.5
+t-4.5 -40.5t-26.5 -38.5t-42.5 -37t-61.5 -41.5q-115 -73 -315 -94l73 -72l267 -267q30 -31 30 -74t-30 -73l-12 -13q-31 -30 -74 -30t-74 30q-67 68 -267 268l-267 -268q-31 -30 -74 -30t-73 30l-12 13q-31 30 -31 73t31 74l267 267l72 72q-203 21 -317 94
+q-39 25 -61.5 41.5t-42.5 37t-26.5 38.5t-4.5 40.5t15 49.5q10 20 28 35t42 22t56 -2t65 -35q5 -4 15 -11t43 -24.5t69 -30.5t92 -24t113 -11q91 0 174 25.5t120 50.5l38 25q33 26 65 35t56 2t42 -22t28 -35z" />
+ <glyph glyph-name="_574" unicode="&#xf264;"
+d="M927 956q0 -66 -46.5 -112.5t-112.5 -46.5t-112.5 46.5t-46.5 112.5t46.5 112.5t112.5 46.5t112.5 -46.5t46.5 -112.5zM1141 593q-10 20 -28 32t-47.5 9.5t-60.5 -27.5q-10 -8 -29 -20t-81 -32t-127 -20t-124 18t-86 36l-27 18q-31 25 -60.5 27.5t-47.5 -9.5t-28 -32
+q-22 -45 -2 -74.5t87 -73.5q83 -53 226 -67l-51 -52q-142 -142 -191 -190q-22 -22 -22 -52.5t22 -52.5l9 -9q22 -22 52.5 -22t52.5 22l191 191q114 -115 191 -191q22 -22 52.5 -22t52.5 22l9 9q22 22 22 52.5t-22 52.5l-191 190l-52 52q141 14 225 67q67 44 87 73.5t-2 74.5
+zM1092 956q0 134 -95 229t-229 95t-229 -95t-95 -229t95 -229t229 -95t229 95t95 229zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="_575" unicode="&#xf265;" horiz-adv-x="1720"
+d="M1565 1408q65 0 110 -45.5t45 -110.5v-519q0 -176 -68 -336t-182.5 -275t-274 -182.5t-334.5 -67.5q-176 0 -335.5 67.5t-274.5 182.5t-183 275t-68 336v519q0 64 46 110t110 46h1409zM861 344q47 0 82 33l404 388q37 35 37 85q0 49 -34.5 83.5t-83.5 34.5q-47 0 -82 -33
+l-323 -310l-323 310q-35 33 -81 33q-49 0 -83.5 -34.5t-34.5 -83.5q0 -51 36 -85l405 -388q33 -33 81 -33z" />
+ <glyph glyph-name="_576" unicode="&#xf266;" horiz-adv-x="2304"
+d="M1494 -103l-295 695q-25 -49 -158.5 -305.5t-198.5 -389.5q-1 -1 -27.5 -0.5t-26.5 1.5q-82 193 -255.5 587t-259.5 596q-21 50 -66.5 107.5t-103.5 100.5t-102 43q0 5 -0.5 24t-0.5 27h583v-50q-39 -2 -79.5 -16t-66.5 -43t-10 -64q26 -59 216.5 -499t235.5 -540
+q31 61 140 266.5t131 247.5q-19 39 -126 281t-136 295q-38 69 -201 71v50l513 -1v-47q-60 -2 -93.5 -25t-12.5 -69q33 -70 87 -189.5t86 -187.5q110 214 173 363q24 55 -10 79.5t-129 26.5q1 7 1 25v24q64 0 170.5 0.5t180 1t92.5 0.5v-49q-62 -2 -119 -33t-90 -81
+l-213 -442q13 -33 127.5 -290t121.5 -274l441 1017q-14 38 -49.5 62.5t-65 31.5t-55.5 8v50l460 -4l1 -2l-1 -44q-139 -4 -201 -145q-526 -1216 -559 -1291h-49z" />
+ <glyph glyph-name="_577" unicode="&#xf267;" horiz-adv-x="1792"
+d="M949 643q0 -26 -16.5 -45t-41.5 -19q-26 0 -45 16.5t-19 41.5q0 26 17 45t42 19t44 -16.5t19 -41.5zM964 585l350 581q-9 -8 -67.5 -62.5t-125.5 -116.5t-136.5 -127t-117 -110.5t-50.5 -51.5l-349 -580q7 7 67 62t126 116.5t136 127t117 111t50 50.5zM1611 640
+q0 -201 -104 -371q-3 2 -17 11t-26.5 16.5t-16.5 7.5q-13 0 -13 -13q0 -10 59 -44q-74 -112 -184.5 -190.5t-241.5 -110.5l-16 67q-1 10 -15 10q-5 0 -8 -5.5t-2 -9.5l16 -68q-72 -15 -146 -15q-199 0 -372 105q1 2 13 20.5t21.5 33.5t9.5 19q0 13 -13 13q-6 0 -17 -14.5
+t-22.5 -34.5t-13.5 -23q-113 75 -192 187.5t-110 244.5l69 15q10 3 10 15q0 5 -5.5 8t-10.5 2l-68 -15q-14 72 -14 139q0 206 109 379q2 -1 18.5 -12t30 -19t17.5 -8q13 0 13 12q0 6 -12.5 15.5t-32.5 21.5l-20 12q77 112 189 189t244 107l15 -67q2 -10 15 -10q5 0 8 5.5
+t2 10.5l-15 66q71 13 134 13q204 0 379 -109q-39 -56 -39 -65q0 -13 12 -13q11 0 48 64q111 -75 187.5 -186t107.5 -241l-56 -12q-10 -2 -10 -16q0 -5 5.5 -8t9.5 -2l57 13q14 -72 14 -140zM1696 640q0 163 -63.5 311t-170.5 255t-255 170.5t-311 63.5t-311 -63.5
+t-255 -170.5t-170.5 -255t-63.5 -311t63.5 -311t170.5 -255t255 -170.5t311 -63.5t311 63.5t255 170.5t170.5 255t63.5 311zM1792 640q0 -182 -71 -348t-191 -286t-286 -191t-348 -71t-348 71t-286 191t-191 286t-71 348t71 348t191 286t286 191t348 71t348 -71t286 -191
+t191 -286t71 -348z" />
+ <glyph glyph-name="_578" unicode="&#xf268;" horiz-adv-x="1792"
+d="M893 1536q240 2 451 -120q232 -134 352 -372l-742 39q-160 9 -294 -74.5t-185 -229.5l-276 424q128 159 311 245.5t383 87.5zM146 1131l337 -663q72 -143 211 -217t293 -45l-230 -451q-212 33 -385 157.5t-272.5 316t-99.5 411.5q0 267 146 491zM1732 962
+q58 -150 59.5 -310.5t-48.5 -306t-153 -272t-246 -209.5q-230 -133 -498 -119l405 623q88 131 82.5 290.5t-106.5 277.5zM896 942q125 0 213.5 -88.5t88.5 -213.5t-88.5 -213.5t-213.5 -88.5t-213.5 88.5t-88.5 213.5t88.5 213.5t213.5 88.5z" />
+ <glyph glyph-name="_579" unicode="&#xf269;" horiz-adv-x="1792"
+d="M903 -256q-283 0 -504.5 150.5t-329.5 398.5q-58 131 -67 301t26 332.5t111 312t179 242.5l-11 -281q11 14 68 15.5t70 -15.5q42 81 160.5 138t234.5 59q-54 -45 -119.5 -148.5t-58.5 -163.5q25 -8 62.5 -13.5t63 -7.5t68 -4t50.5 -3q15 -5 9.5 -45.5t-30.5 -75.5
+q-5 -7 -16.5 -18.5t-56.5 -35.5t-101 -34l15 -189l-139 67q-18 -43 -7.5 -81.5t36 -66.5t65.5 -41.5t81 -6.5q51 9 98 34.5t83.5 45t73.5 17.5q61 -4 89.5 -33t19.5 -65q-1 -2 -2.5 -5.5t-8.5 -12.5t-18 -15.5t-31.5 -10.5t-46.5 -1q-60 -95 -144.5 -135.5t-209.5 -29.5
+q74 -61 162.5 -82.5t168.5 -6t154.5 52t128 87.5t80.5 104q43 91 39 192.5t-37.5 188.5t-78.5 125q87 -38 137 -79.5t77 -112.5q15 170 -57.5 343t-209.5 284q265 -77 412 -279.5t151 -517.5q2 -127 -40.5 -255t-123.5 -238t-189 -196t-247.5 -135.5t-288.5 -49.5z" />
+ <glyph glyph-name="_580" unicode="&#xf26a;" horiz-adv-x="1792"
+d="M1493 1308q-165 110 -359 110q-155 0 -293 -73t-240 -200q-75 -93 -119.5 -218t-48.5 -266v-42q4 -141 48.5 -266t119.5 -218q102 -127 240 -200t293 -73q194 0 359 110q-121 -108 -274.5 -168t-322.5 -60q-29 0 -43 1q-175 8 -333 82t-272 193t-181 281t-67 339
+q0 182 71 348t191 286t286 191t348 71h3q168 -1 320.5 -60.5t273.5 -167.5zM1792 640q0 -192 -77 -362.5t-213 -296.5q-104 -63 -222 -63q-137 0 -255 84q154 56 253.5 233t99.5 405q0 227 -99 404t-253 234q119 83 254 83q119 0 226 -65q135 -125 210.5 -295t75.5 -361z
+" />
+ <glyph glyph-name="_581" unicode="&#xf26b;" horiz-adv-x="1792"
+d="M1792 599q0 -56 -7 -104h-1151q0 -146 109.5 -244.5t257.5 -98.5q99 0 185.5 46.5t136.5 130.5h423q-56 -159 -170.5 -281t-267.5 -188.5t-321 -66.5q-187 0 -356 83q-228 -116 -394 -116q-237 0 -237 263q0 115 45 275q17 60 109 229q199 360 475 606
+q-184 -79 -427 -354q63 274 283.5 449.5t501.5 175.5q30 0 45 -1q255 117 433 117q64 0 116 -13t94.5 -40.5t66.5 -76.5t24 -115q0 -116 -75 -286q101 -182 101 -390zM1722 1239q0 83 -53 132t-137 49q-108 0 -254 -70q121 -47 222.5 -131.5t170.5 -195.5q51 135 51 216z
+M128 2q0 -86 48.5 -132.5t134.5 -46.5q115 0 266 83q-122 72 -213.5 183t-137.5 245q-98 -205 -98 -332zM632 715h728q-5 142 -113 237t-251 95q-144 0 -251.5 -95t-112.5 -237z" />
+ <glyph glyph-name="_582" unicode="&#xf26c;" horiz-adv-x="2048"
+d="M1792 288v960q0 13 -9.5 22.5t-22.5 9.5h-1600q-13 0 -22.5 -9.5t-9.5 -22.5v-960q0 -13 9.5 -22.5t22.5 -9.5h1600q13 0 22.5 9.5t9.5 22.5zM1920 1248v-960q0 -66 -47 -113t-113 -47h-736v-128h352q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-832q-14 0 -23 9t-9 23
+v64q0 14 9 23t23 9h352v128h-736q-66 0 -113 47t-47 113v960q0 66 47 113t113 47h1600q66 0 113 -47t47 -113z" />
+ <glyph glyph-name="_583" unicode="&#xf26d;" horiz-adv-x="1792"
+d="M138 1408h197q-70 -64 -126 -149q-36 -56 -59 -115t-30 -125.5t-8.5 -120t10.5 -132t21 -126t28 -136.5q4 -19 6 -28q51 -238 81 -329q57 -171 152 -275h-272q-48 0 -82 34t-34 82v1304q0 48 34 82t82 34zM1346 1408h308q48 0 82 -34t34 -82v-1304q0 -48 -34 -82t-82 -34
+h-178q212 210 196 565l-469 -101q-2 -45 -12 -82t-31 -72t-59.5 -59.5t-93.5 -36.5q-123 -26 -199 40q-32 27 -53 61t-51.5 129t-64.5 258q-35 163 -45.5 263t-5.5 139t23 77q20 41 62.5 73t102.5 45q45 12 83.5 6.5t67 -17t54 -35t43 -48t34.5 -56.5l468 100
+q-68 175 -180 287z" />
+ <glyph glyph-name="_584" unicode="&#xf26e;"
+d="M1401 -11l-6 -6q-113 -113 -259 -175q-154 -64 -317 -64q-165 0 -317 64q-148 63 -259 175q-113 112 -175 258q-42 103 -54 189q-4 28 48 36q51 8 56 -20q1 -1 1 -4q18 -90 46 -159q50 -124 152 -226q98 -98 226 -152q132 -56 276 -56q143 0 276 56q128 55 225 152l6 6
+q10 10 25 6q12 -3 33 -22q36 -37 17 -58zM929 604l-66 -66l63 -63q21 -21 -7 -49q-17 -17 -32 -17q-10 0 -19 10l-62 61l-66 -66q-5 -5 -15 -5q-15 0 -31 16l-2 2q-18 15 -18 29q0 7 8 17l66 65l-66 66q-16 16 14 45q18 18 31 18q6 0 13 -5l65 -66l65 65q18 17 48 -13
+q27 -27 11 -44zM1400 547q0 -118 -46 -228q-45 -105 -126 -186q-80 -80 -187 -126t-228 -46t-228 46t-187 126q-82 82 -125 186q-15 33 -15 40h-1q-9 27 43 44q50 16 60 -12q37 -99 97 -167h1v339v2q3 136 102 232q105 103 253 103q147 0 251 -103t104 -249
+q0 -147 -104.5 -251t-250.5 -104q-58 0 -112 16q-28 11 -13 61q16 51 44 43l14 -3q14 -3 33 -6t30 -3q104 0 176 71.5t72 174.5q0 101 -72 171q-71 71 -175 71q-107 0 -178 -80q-64 -72 -64 -160v-413q110 -67 242 -67q96 0 185 36.5t156 103.5t103.5 155t36.5 183
+q0 198 -141 339q-140 140 -339 140q-200 0 -340 -140q-53 -53 -77 -87l-2 -2q-8 -11 -13 -15.5t-21.5 -9.5t-38.5 3q-21 5 -36.5 16.5t-15.5 26.5v680q0 15 10.5 26.5t27.5 11.5h877q30 0 30 -55t-30 -55h-811v-483h1q40 42 102 84t108 61q109 46 231 46q121 0 228 -46
+t187 -126q81 -81 126 -186q46 -112 46 -229zM1369 1128q9 -8 9 -18t-5.5 -18t-16.5 -21q-26 -26 -39 -26q-9 0 -16 7q-106 91 -207 133q-128 56 -276 56q-133 0 -262 -49q-27 -10 -45 37q-9 25 -8 38q3 16 16 20q130 57 299 57q164 0 316 -64q137 -58 235 -152z" />
+ <glyph glyph-name="_585" unicode="&#xf270;" horiz-adv-x="1792"
+d="M1551 60q15 6 26 3t11 -17.5t-15 -33.5q-13 -16 -44 -43.5t-95.5 -68t-141 -74t-188 -58t-229.5 -24.5q-119 0 -238 31t-209 76.5t-172.5 104t-132.5 105t-84 87.5q-8 9 -10 16.5t1 12t8 7t11.5 2t11.5 -4.5q192 -117 300 -166q389 -176 799 -90q190 40 391 135z
+M1758 175q11 -16 2.5 -69.5t-28.5 -102.5q-34 -83 -85 -124q-17 -14 -26 -9t0 24q21 45 44.5 121.5t6.5 98.5q-5 7 -15.5 11.5t-27 6t-29.5 2.5t-35 0t-31.5 -2t-31 -3t-22.5 -2q-6 -1 -13 -1.5t-11 -1t-8.5 -1t-7 -0.5h-5.5h-4.5t-3 0.5t-2 1.5l-1.5 3q-6 16 47 40t103 30
+q46 7 108 1t76 -24zM1364 618q0 -31 13.5 -64t32 -58t37.5 -46t33 -32l13 -11l-227 -224q-40 37 -79 75.5t-58 58.5l-19 20q-11 11 -25 33q-38 -59 -97.5 -102.5t-127.5 -63.5t-140 -23t-137.5 21t-117.5 65.5t-83 113t-31 162.5q0 84 28 154t72 116.5t106.5 83t122.5 57
+t130 34.5t119.5 18.5t99.5 6.5v127q0 65 -21 97q-34 53 -121 53q-6 0 -16.5 -1t-40.5 -12t-56 -29.5t-56 -59.5t-48 -96l-294 27q0 60 22 119t67 113t108 95t151.5 65.5t190.5 24.5q100 0 181 -25t129.5 -61.5t81 -83t45 -86t12.5 -73.5v-589zM692 597q0 -86 70 -133
+q66 -44 139 -22q84 25 114 123q14 45 14 101v162q-59 -2 -111 -12t-106.5 -33.5t-87 -71t-32.5 -114.5z" />
+ <glyph glyph-name="_586" unicode="&#xf271;" horiz-adv-x="1792"
+d="M1536 1280q52 0 90 -38t38 -90v-1280q0 -52 -38 -90t-90 -38h-1408q-52 0 -90 38t-38 90v1280q0 52 38 90t90 38h128v96q0 66 47 113t113 47h64q66 0 113 -47t47 -113v-96h384v96q0 66 47 113t113 47h64q66 0 113 -47t47 -113v-96h128zM1152 1376v-288q0 -14 9 -23t23 -9
+h64q14 0 23 9t9 23v288q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23zM384 1376v-288q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v288q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23zM1536 -128v1024h-1408v-1024h1408zM896 448h224q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-224
+v-224q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v224h-224q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h224v224q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-224z" />
+ <glyph glyph-name="_587" unicode="&#xf272;" horiz-adv-x="1792"
+d="M1152 416v-64q0 -14 -9 -23t-23 -9h-576q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h576q14 0 23 -9t9 -23zM128 -128h1408v1024h-1408v-1024zM512 1088v288q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-288q0 -14 9 -23t23 -9h64q14 0 23 9t9 23zM1280 1088v288q0 14 -9 23
+t-23 9h-64q-14 0 -23 -9t-9 -23v-288q0 -14 9 -23t23 -9h64q14 0 23 9t9 23zM1664 1152v-1280q0 -52 -38 -90t-90 -38h-1408q-52 0 -90 38t-38 90v1280q0 52 38 90t90 38h128v96q0 66 47 113t113 47h64q66 0 113 -47t47 -113v-96h384v96q0 66 47 113t113 47h64q66 0 113 -47
+t47 -113v-96h128q52 0 90 -38t38 -90z" />
+ <glyph glyph-name="_588" unicode="&#xf273;" horiz-adv-x="1792"
+d="M1111 151l-46 -46q-9 -9 -22 -9t-23 9l-188 189l-188 -189q-10 -9 -23 -9t-22 9l-46 46q-9 9 -9 22t9 23l189 188l-189 188q-9 10 -9 23t9 22l46 46q9 9 22 9t23 -9l188 -188l188 188q10 9 23 9t22 -9l46 -46q9 -9 9 -22t-9 -23l-188 -188l188 -188q9 -10 9 -23t-9 -22z
+M128 -128h1408v1024h-1408v-1024zM512 1088v288q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-288q0 -14 9 -23t23 -9h64q14 0 23 9t9 23zM1280 1088v288q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-288q0 -14 9 -23t23 -9h64q14 0 23 9t9 23zM1664 1152v-1280
+q0 -52 -38 -90t-90 -38h-1408q-52 0 -90 38t-38 90v1280q0 52 38 90t90 38h128v96q0 66 47 113t113 47h64q66 0 113 -47t47 -113v-96h384v96q0 66 47 113t113 47h64q66 0 113 -47t47 -113v-96h128q52 0 90 -38t38 -90z" />
+ <glyph glyph-name="_589" unicode="&#xf274;" horiz-adv-x="1792"
+d="M1303 572l-512 -512q-10 -9 -23 -9t-23 9l-288 288q-9 10 -9 23t9 22l46 46q9 9 22 9t23 -9l220 -220l444 444q10 9 23 9t22 -9l46 -46q9 -9 9 -22t-9 -23zM128 -128h1408v1024h-1408v-1024zM512 1088v288q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-288q0 -14 9 -23
+t23 -9h64q14 0 23 9t9 23zM1280 1088v288q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-288q0 -14 9 -23t23 -9h64q14 0 23 9t9 23zM1664 1152v-1280q0 -52 -38 -90t-90 -38h-1408q-52 0 -90 38t-38 90v1280q0 52 38 90t90 38h128v96q0 66 47 113t113 47h64q66 0 113 -47
+t47 -113v-96h384v96q0 66 47 113t113 47h64q66 0 113 -47t47 -113v-96h128q52 0 90 -38t38 -90z" />
+ <glyph glyph-name="_590" unicode="&#xf275;" horiz-adv-x="1792"
+d="M448 1536q26 0 45 -19t19 -45v-891l536 429q17 14 40 14q26 0 45 -19t19 -45v-379l536 429q17 14 40 14q26 0 45 -19t19 -45v-1152q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45v1664q0 26 19 45t45 19h384z" />
+ <glyph glyph-name="_591" unicode="&#xf276;" horiz-adv-x="1024"
+d="M512 448q66 0 128 15v-655q0 -26 -19 -45t-45 -19h-128q-26 0 -45 19t-19 45v655q62 -15 128 -15zM512 1536q212 0 362 -150t150 -362t-150 -362t-362 -150t-362 150t-150 362t150 362t362 150zM512 1312q14 0 23 9t9 23t-9 23t-23 9q-146 0 -249 -103t-103 -249
+q0 -14 9 -23t23 -9t23 9t9 23q0 119 84.5 203.5t203.5 84.5z" />
+ <glyph glyph-name="_592" unicode="&#xf277;" horiz-adv-x="1792"
+d="M1745 1239q10 -10 10 -23t-10 -23l-141 -141q-28 -28 -68 -28h-1344q-26 0 -45 19t-19 45v256q0 26 19 45t45 19h576v64q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-64h512q40 0 68 -28zM768 320h256v-512q0 -26 -19 -45t-45 -19h-128q-26 0 -45 19t-19 45v512zM1600 768
+q26 0 45 -19t19 -45v-256q0 -26 -19 -45t-45 -19h-1344q-40 0 -68 28l-141 141q-10 10 -10 23t10 23l141 141q28 28 68 28h512v192h256v-192h576z" />
+ <glyph glyph-name="_593" unicode="&#xf278;" horiz-adv-x="2048"
+d="M2020 1525q28 -20 28 -53v-1408q0 -20 -11 -36t-29 -23l-640 -256q-24 -11 -48 0l-616 246l-616 -246q-10 -5 -24 -5q-19 0 -36 11q-28 20 -28 53v1408q0 20 11 36t29 23l640 256q24 11 48 0l616 -246l616 246q32 13 60 -6zM736 1390v-1270l576 -230v1270zM128 1173
+v-1270l544 217v1270zM1920 107v1270l-544 -217v-1270z" />
+ <glyph glyph-name="_594" unicode="&#xf279;" horiz-adv-x="1792"
+d="M512 1536q13 0 22.5 -9.5t9.5 -22.5v-1472q0 -20 -17 -28l-480 -256q-7 -4 -15 -4q-13 0 -22.5 9.5t-9.5 22.5v1472q0 20 17 28l480 256q7 4 15 4zM1760 1536q13 0 22.5 -9.5t9.5 -22.5v-1472q0 -20 -17 -28l-480 -256q-7 -4 -15 -4q-13 0 -22.5 9.5t-9.5 22.5v1472
+q0 20 17 28l480 256q7 4 15 4zM640 1536q8 0 14 -3l512 -256q18 -10 18 -29v-1472q0 -13 -9.5 -22.5t-22.5 -9.5q-8 0 -14 3l-512 256q-18 10 -18 29v1472q0 13 9.5 22.5t22.5 9.5z" />
+ <glyph glyph-name="_595" unicode="&#xf27a;" horiz-adv-x="1792"
+d="M640 640q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM1024 640q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM1408 640q0 53 -37.5 90.5t-90.5 37.5
+t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5zM1792 640q0 -174 -120 -321.5t-326 -233t-450 -85.5q-110 0 -211 18q-173 -173 -435 -229q-52 -10 -86 -13q-12 -1 -22 6t-13 18q-4 15 20 37q5 5 23.5 21.5t25.5 23.5t23.5 25.5t24 31.5t20.5 37
+t20 48t14.5 57.5t12.5 72.5q-146 90 -229.5 216.5t-83.5 269.5q0 174 120 321.5t326 233t450 85.5t450 -85.5t326 -233t120 -321.5z" />
+ <glyph glyph-name="_596" unicode="&#xf27b;" horiz-adv-x="1792"
+d="M640 640q0 -53 -37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5t37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM1024 640q0 -53 -37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5t37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM1408 640q0 -53 -37.5 -90.5t-90.5 -37.5
+t-90.5 37.5t-37.5 90.5t37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5zM896 1152q-204 0 -381.5 -69.5t-282 -187.5t-104.5 -255q0 -112 71.5 -213.5t201.5 -175.5l87 -50l-27 -96q-24 -91 -70 -172q152 63 275 171l43 38l57 -6q69 -8 130 -8q204 0 381.5 69.5t282 187.5
+t104.5 255t-104.5 255t-282 187.5t-381.5 69.5zM1792 640q0 -174 -120 -321.5t-326 -233t-450 -85.5q-70 0 -145 8q-198 -175 -460 -242q-49 -14 -114 -22h-5q-15 0 -27 10.5t-16 27.5v1q-3 4 -0.5 12t2 10t4.5 9.5l6 9t7 8.5t8 9q7 8 31 34.5t34.5 38t31 39.5t32.5 51
+t27 59t26 76q-157 89 -247.5 220t-90.5 281q0 130 71 248.5t191 204.5t286 136.5t348 50.5t348 -50.5t286 -136.5t191 -204.5t71 -248.5z" />
+ <glyph glyph-name="_597" unicode="&#xf27c;" horiz-adv-x="1024"
+d="M512 345l512 295v-591l-512 -296v592zM0 640v-591l512 296zM512 1527v-591l-512 -296v591zM512 936l512 295v-591z" />
+ <glyph glyph-name="_598" unicode="&#xf27d;" horiz-adv-x="1792"
+d="M1709 1018q-10 -236 -332 -651q-333 -431 -562 -431q-142 0 -240 263q-44 160 -132 482q-72 262 -157 262q-18 0 -127 -76l-77 98q24 21 108 96.5t130 115.5q156 138 241 146q95 9 153 -55.5t81 -203.5q44 -287 66 -373q55 -249 120 -249q51 0 154 161q101 161 109 246
+q13 139 -109 139q-57 0 -121 -26q120 393 459 382q251 -8 236 -326z" />
+ <glyph glyph-name="f27e" unicode="&#xf27e;"
+d="M0 1408h1536v-1536h-1536v1536zM1085 293l-221 631l221 297h-634l221 -297l-221 -631l317 -304z" />
+ <glyph glyph-name="uniF280" unicode="&#xf280;"
+d="M0 1408h1536v-1536h-1536v1536zM908 1088l-12 -33l75 -83l-31 -114l25 -25l107 57l107 -57l25 25l-31 114l75 83l-12 33h-95l-53 96h-32l-53 -96h-95zM641 925q32 0 44.5 -16t11.5 -63l174 21q0 55 -17.5 92.5t-50.5 56t-69 25.5t-85 7q-133 0 -199 -57.5t-66 -182.5v-72
+h-96v-128h76q20 0 20 -8v-382q0 -14 -5 -20t-18 -7l-73 -7v-88h448v86l-149 14q-6 1 -8.5 1.5t-3.5 2.5t-0.5 4t1 7t0.5 10v387h191l38 128h-231q-6 0 -2 6t4 9v80q0 27 1.5 40.5t7.5 28t19.5 20t36.5 5.5zM1248 96v86l-54 9q-7 1 -9.5 2.5t-2.5 3t1 7.5t1 12v520h-275
+l-23 -101l83 -22q23 -7 23 -27v-370q0 -14 -6 -18.5t-20 -6.5l-70 -9v-86h352z" />
+ <glyph glyph-name="uniF281" unicode="&#xf281;" horiz-adv-x="1792"
+d="M1792 690q0 -58 -29.5 -105.5t-79.5 -72.5q12 -46 12 -96q0 -155 -106.5 -287t-290.5 -208.5t-400 -76.5t-399.5 76.5t-290 208.5t-106.5 287q0 47 11 94q-51 25 -82 73.5t-31 106.5q0 82 58 140.5t141 58.5q85 0 145 -63q218 152 515 162l116 521q3 13 15 21t26 5
+l369 -81q18 37 54 59.5t79 22.5q62 0 106 -43.5t44 -105.5t-44 -106t-106 -44t-105.5 43.5t-43.5 105.5l-334 74l-104 -472q300 -9 519 -160q58 61 143 61q83 0 141 -58.5t58 -140.5zM418 491q0 -62 43.5 -106t105.5 -44t106 44t44 106t-44 105.5t-106 43.5q-61 0 -105 -44
+t-44 -105zM1228 136q11 11 11 26t-11 26q-10 10 -25 10t-26 -10q-41 -42 -121 -62t-160 -20t-160 20t-121 62q-11 10 -26 10t-25 -10q-11 -10 -11 -25.5t11 -26.5q43 -43 118.5 -68t122.5 -29.5t91 -4.5t91 4.5t122.5 29.5t118.5 68zM1225 341q62 0 105.5 44t43.5 106
+q0 61 -44 105t-105 44q-62 0 -106 -43.5t-44 -105.5t44 -106t106 -44z" />
+ <glyph glyph-name="_602" unicode="&#xf282;" horiz-adv-x="1792"
+d="M69 741h1q16 126 58.5 241.5t115 217t167.5 176t223.5 117.5t276.5 43q231 0 414 -105.5t294 -303.5q104 -187 104 -442v-188h-1125q1 -111 53.5 -192.5t136.5 -122.5t189.5 -57t213 -3t208 46.5t173.5 84.5v-377q-92 -55 -229.5 -92t-312.5 -38t-316 53
+q-189 73 -311.5 249t-124.5 372q-3 242 111 412t325 268q-48 -60 -78 -125.5t-46 -159.5h635q8 77 -8 140t-47 101.5t-70.5 66.5t-80.5 41t-75 20.5t-56 8.5l-22 1q-135 -5 -259.5 -44.5t-223.5 -104.5t-176 -140.5t-138 -163.5z" />
+ <glyph glyph-name="_603" unicode="&#xf283;" horiz-adv-x="2304"
+d="M0 32v608h2304v-608q0 -66 -47 -113t-113 -47h-1984q-66 0 -113 47t-47 113zM640 256v-128h384v128h-384zM256 256v-128h256v128h-256zM2144 1408q66 0 113 -47t47 -113v-224h-2304v224q0 66 47 113t113 47h1984z" />
+ <glyph glyph-name="_604" unicode="&#xf284;" horiz-adv-x="1792"
+d="M1584 246l-218 111q-74 -120 -196.5 -189t-263.5 -69q-147 0 -271 72t-196 196t-72 270q0 110 42.5 209.5t115 172t172 115t209.5 42.5q131 0 247.5 -60.5t192.5 -168.5l215 125q-110 169 -286.5 265t-378.5 96q-161 0 -308 -63t-253 -169t-169 -253t-63 -308t63 -308
+t169 -253t253 -169t308 -63q213 0 397.5 107t290.5 292zM1030 643l693 -352q-116 -253 -334.5 -400t-492.5 -147q-182 0 -348 71t-286 191t-191 286t-71 348t71 348t191 286t286 191t348 71q260 0 470.5 -133.5t335.5 -366.5zM1543 640h-39v-160h-96v352h136q32 0 54.5 -20
+t28.5 -48t1 -56t-27.5 -48t-57.5 -20z" />
+ <glyph glyph-name="uniF285" unicode="&#xf285;" horiz-adv-x="1792"
+d="M1427 827l-614 386l92 151h855zM405 562l-184 116v858l1183 -743zM1424 697l147 -95v-858l-532 335zM1387 718l-500 -802h-855l356 571z" />
+ <glyph glyph-name="uniF286" unicode="&#xf286;" horiz-adv-x="1792"
+d="M640 528v224q0 16 -16 16h-96q-16 0 -16 -16v-224q0 -16 16 -16h96q16 0 16 16zM1152 528v224q0 16 -16 16h-96q-16 0 -16 -16v-224q0 -16 16 -16h96q16 0 16 16zM1664 496v-752h-640v320q0 80 -56 136t-136 56t-136 -56t-56 -136v-320h-640v752q0 16 16 16h96
+q16 0 16 -16v-112h128v624q0 16 16 16h96q16 0 16 -16v-112h128v112q0 16 16 16h96q16 0 16 -16v-112h128v112q0 6 2.5 9.5t8.5 5t9.5 2t11.5 0t9 -0.5v391q-32 15 -32 50q0 23 16.5 39t38.5 16t38.5 -16t16.5 -39q0 -35 -32 -50v-17q45 10 83 10q21 0 59.5 -7.5t54.5 -7.5
+q17 0 47 7.5t37 7.5q16 0 16 -16v-210q0 -15 -35 -21.5t-62 -6.5q-18 0 -54.5 7.5t-55.5 7.5q-40 0 -90 -12v-133q1 0 9 0.5t11.5 0t9.5 -2t8.5 -5t2.5 -9.5v-112h128v112q0 16 16 16h96q16 0 16 -16v-112h128v112q0 16 16 16h96q16 0 16 -16v-624h128v112q0 16 16 16h96
+q16 0 16 -16z" />
+ <glyph glyph-name="_607" unicode="&#xf287;" horiz-adv-x="2304"
+d="M2288 731q16 -8 16 -27t-16 -27l-320 -192q-8 -5 -16 -5q-9 0 -16 4q-16 10 -16 28v128h-858q37 -58 83 -165q16 -37 24.5 -55t24 -49t27 -47t27 -34t31.5 -26t33 -8h96v96q0 14 9 23t23 9h320q14 0 23 -9t9 -23v-320q0 -14 -9 -23t-23 -9h-320q-14 0 -23 9t-9 23v96h-96
+q-32 0 -61 10t-51 23.5t-45 40.5t-37 46t-33.5 57t-28.5 57.5t-28 60.5q-23 53 -37 81.5t-36 65t-44.5 53.5t-46.5 17h-360q-22 -84 -91 -138t-157 -54q-106 0 -181 75t-75 181t75 181t181 75q88 0 157 -54t91 -138h104q24 0 46.5 17t44.5 53.5t36 65t37 81.5q19 41 28 60.5
+t28.5 57.5t33.5 57t37 46t45 40.5t51 23.5t61 10h107q21 57 70 92.5t111 35.5q80 0 136 -56t56 -136t-56 -136t-136 -56q-62 0 -111 35.5t-70 92.5h-107q-17 0 -33 -8t-31.5 -26t-27 -34t-27 -47t-24 -49t-24.5 -55q-46 -107 -83 -165h1114v128q0 18 16 28t32 -1z" />
+ <glyph glyph-name="_608" unicode="&#xf288;" horiz-adv-x="1792"
+d="M1150 774q0 -56 -39.5 -95t-95.5 -39h-253v269h253q56 0 95.5 -39.5t39.5 -95.5zM1329 774q0 130 -91.5 222t-222.5 92h-433v-896h180v269h253q130 0 222 91.5t92 221.5zM1792 640q0 -182 -71 -348t-191 -286t-286 -191t-348 -71t-348 71t-286 191t-191 286t-71 348
+t71 348t191 286t286 191t348 71t348 -71t286 -191t191 -286t71 -348z" />
+ <glyph glyph-name="_609" unicode="&#xf289;" horiz-adv-x="2304"
+d="M1645 438q0 59 -34 106.5t-87 68.5q-7 -45 -23 -92q-7 -24 -27.5 -38t-44.5 -14q-12 0 -24 3q-31 10 -45 38.5t-4 58.5q23 71 23 143q0 123 -61 227.5t-166 165.5t-228 61q-134 0 -247 -73t-167 -194q108 -28 188 -106q22 -23 22 -55t-22 -54t-54 -22t-55 22
+q-75 75 -180 75q-106 0 -181 -74.5t-75 -180.5t75 -180.5t181 -74.5h1046q79 0 134.5 55.5t55.5 133.5zM1798 438q0 -142 -100.5 -242t-242.5 -100h-1046q-169 0 -289 119.5t-120 288.5q0 153 100 267t249 136q62 184 221 298t354 114q235 0 408.5 -158.5t196.5 -389.5
+q116 -25 192.5 -118.5t76.5 -214.5zM2048 438q0 -175 -97 -319q-23 -33 -64 -33q-24 0 -43 13q-26 17 -32 48.5t12 57.5q71 104 71 233t-71 233q-18 26 -12 57t32 49t57.5 11.5t49.5 -32.5q97 -142 97 -318zM2304 438q0 -244 -134 -443q-23 -34 -64 -34q-23 0 -42 13
+q-26 18 -32.5 49t11.5 57q108 164 108 358q0 195 -108 357q-18 26 -11.5 57.5t32.5 48.5q26 18 57 12t49 -33q134 -198 134 -442z" />
+ <glyph glyph-name="_610" unicode="&#xf28a;"
+d="M1500 -13q0 -89 -63 -152.5t-153 -63.5t-153.5 63.5t-63.5 152.5q0 90 63.5 153.5t153.5 63.5t153 -63.5t63 -153.5zM1267 268q-115 -15 -192.5 -102.5t-77.5 -205.5q0 -74 33 -138q-146 -78 -379 -78q-109 0 -201 21t-153.5 54.5t-110.5 76.5t-76 85t-44.5 83
+t-23.5 66.5t-6 39.5q0 19 4.5 42.5t18.5 56t36.5 58t64 43.5t94.5 18t94 -17.5t63 -41t35.5 -53t17.5 -49t4 -33.5q0 -34 -23 -81q28 -27 82 -42t93 -17l40 -1q115 0 190 51t75 133q0 26 -9 48.5t-31.5 44.5t-49.5 41t-74 44t-93.5 47.5t-119.5 56.5q-28 13 -43 20
+q-116 55 -187 100t-122.5 102t-72 125.5t-20.5 162.5q0 78 20.5 150t66 137.5t112.5 114t166.5 77t221.5 28.5q120 0 220 -26t164.5 -67t109.5 -94t64 -105.5t19 -103.5q0 -46 -15 -82.5t-36.5 -58t-48.5 -36t-49 -19.5t-39 -5h-8h-32t-39 5t-44 14t-41 28t-37 46t-24 70.5
+t-10 97.5q-15 16 -59 25.5t-81 10.5l-37 1q-68 0 -117.5 -31t-70.5 -70t-21 -76q0 -24 5 -43t24 -46t53 -51t97 -53.5t150 -58.5q76 -25 138.5 -53.5t109 -55.5t83 -59t60.5 -59.5t41 -62.5t26.5 -62t14.5 -63.5t6 -62t1 -62.5z" />
+ <glyph glyph-name="_611" unicode="&#xf28b;"
+d="M704 352v576q0 14 -9 23t-23 9h-256q-14 0 -23 -9t-9 -23v-576q0 -14 9 -23t23 -9h256q14 0 23 9t9 23zM1152 352v576q0 14 -9 23t-23 9h-256q-14 0 -23 -9t-9 -23v-576q0 -14 9 -23t23 -9h256q14 0 23 9t9 23zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103
+t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="_612" unicode="&#xf28c;"
+d="M768 1408q209 0 385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103zM768 96q148 0 273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73t-198 -198t-73 -273
+t73 -273t198 -198t273 -73zM864 320q-14 0 -23 9t-9 23v576q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-576q0 -14 -9 -23t-23 -9h-192zM480 320q-14 0 -23 9t-9 23v576q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-576q0 -14 -9 -23t-23 -9h-192z" />
+ <glyph glyph-name="_613" unicode="&#xf28d;"
+d="M1088 352v576q0 14 -9 23t-23 9h-576q-14 0 -23 -9t-9 -23v-576q0 -14 9 -23t23 -9h576q14 0 23 9t9 23zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5
+t103 -385.5z" />
+ <glyph glyph-name="_614" unicode="&#xf28e;"
+d="M768 1408q209 0 385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103zM768 96q148 0 273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73t-198 -198t-73 -273
+t73 -273t198 -198t273 -73zM480 320q-14 0 -23 9t-9 23v576q0 14 9 23t23 9h576q14 0 23 -9t9 -23v-576q0 -14 -9 -23t-23 -9h-576z" />
+ <glyph glyph-name="_615" unicode="&#xf290;" horiz-adv-x="1792"
+d="M1757 128l35 -313q3 -28 -16 -50q-19 -21 -48 -21h-1664q-29 0 -48 21q-19 22 -16 50l35 313h1722zM1664 967l86 -775h-1708l86 775q3 24 21 40.5t43 16.5h256v-128q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5v128h384v-128q0 -53 37.5 -90.5t90.5 -37.5
+t90.5 37.5t37.5 90.5v128h256q25 0 43 -16.5t21 -40.5zM1280 1152v-256q0 -26 -19 -45t-45 -19t-45 19t-19 45v256q0 106 -75 181t-181 75t-181 -75t-75 -181v-256q0 -26 -19 -45t-45 -19t-45 19t-19 45v256q0 159 112.5 271.5t271.5 112.5t271.5 -112.5t112.5 -271.5z" />
+ <glyph glyph-name="_616" unicode="&#xf291;" horiz-adv-x="2048"
+d="M1920 768q53 0 90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5h-15l-115 -662q-8 -46 -44 -76t-82 -30h-1280q-46 0 -82 30t-44 76l-115 662h-15q-53 0 -90.5 37.5t-37.5 90.5t37.5 90.5t90.5 37.5h1792zM485 -32q26 2 43.5 22.5t15.5 46.5l-32 416q-2 26 -22.5 43.5
+t-46.5 15.5t-43.5 -22.5t-15.5 -46.5l32 -416q2 -25 20.5 -42t43.5 -17h5zM896 32v416q0 26 -19 45t-45 19t-45 -19t-19 -45v-416q0 -26 19 -45t45 -19t45 19t19 45zM1280 32v416q0 26 -19 45t-45 19t-45 -19t-19 -45v-416q0 -26 19 -45t45 -19t45 19t19 45zM1632 27l32 416
+q2 26 -15.5 46.5t-43.5 22.5t-46.5 -15.5t-22.5 -43.5l-32 -416q-2 -26 15.5 -46.5t43.5 -22.5h5q25 0 43.5 17t20.5 42zM476 1244l-93 -412h-132l101 441q19 88 89 143.5t160 55.5h167q0 26 19 45t45 19h384q26 0 45 -19t19 -45h167q90 0 160 -55.5t89 -143.5l101 -441
+h-132l-93 412q-11 44 -45.5 72t-79.5 28h-167q0 -26 -19 -45t-45 -19h-384q-26 0 -45 19t-19 45h-167q-45 0 -79.5 -28t-45.5 -72z" />
+ <glyph glyph-name="_617" unicode="&#xf292;" horiz-adv-x="1792"
+d="M991 512l64 256h-254l-64 -256h254zM1759 1016l-56 -224q-7 -24 -31 -24h-327l-64 -256h311q15 0 25 -12q10 -14 6 -28l-56 -224q-5 -24 -31 -24h-327l-81 -328q-7 -24 -31 -24h-224q-16 0 -26 12q-9 12 -6 28l78 312h-254l-81 -328q-7 -24 -31 -24h-225q-15 0 -25 12
+q-9 12 -6 28l78 312h-311q-15 0 -25 12q-9 12 -6 28l56 224q7 24 31 24h327l64 256h-311q-15 0 -25 12q-10 14 -6 28l56 224q5 24 31 24h327l81 328q7 24 32 24h224q15 0 25 -12q9 -12 6 -28l-78 -312h254l81 328q7 24 32 24h224q15 0 25 -12q9 -12 6 -28l-78 -312h311
+q15 0 25 -12q9 -12 6 -28z" />
+ <glyph glyph-name="_618" unicode="&#xf293;"
+d="M841 483l148 -148l-149 -149zM840 1094l149 -149l-148 -148zM710 -130l464 464l-306 306l306 306l-464 464v-611l-255 255l-93 -93l320 -321l-320 -321l93 -93l255 255v-611zM1429 640q0 -209 -32 -365.5t-87.5 -257t-140.5 -162.5t-181.5 -86.5t-219.5 -24.5
+t-219.5 24.5t-181.5 86.5t-140.5 162.5t-87.5 257t-32 365.5t32 365.5t87.5 257t140.5 162.5t181.5 86.5t219.5 24.5t219.5 -24.5t181.5 -86.5t140.5 -162.5t87.5 -257t32 -365.5z" />
+ <glyph glyph-name="_619" unicode="&#xf294;" horiz-adv-x="1024"
+d="M596 113l173 172l-173 172v-344zM596 823l173 172l-173 172v-344zM628 640l356 -356l-539 -540v711l-297 -296l-108 108l372 373l-372 373l108 108l297 -296v711l539 -540z" />
+ <glyph glyph-name="_620" unicode="&#xf295;"
+d="M1280 256q0 52 -38 90t-90 38t-90 -38t-38 -90t38 -90t90 -38t90 38t38 90zM512 1024q0 52 -38 90t-90 38t-90 -38t-38 -90t38 -90t90 -38t90 38t38 90zM1536 256q0 -159 -112.5 -271.5t-271.5 -112.5t-271.5 112.5t-112.5 271.5t112.5 271.5t271.5 112.5t271.5 -112.5
+t112.5 -271.5zM1440 1344q0 -20 -13 -38l-1056 -1408q-19 -26 -51 -26h-160q-26 0 -45 19t-19 45q0 20 13 38l1056 1408q19 26 51 26h160q26 0 45 -19t19 -45zM768 1024q0 -159 -112.5 -271.5t-271.5 -112.5t-271.5 112.5t-112.5 271.5t112.5 271.5t271.5 112.5
+t271.5 -112.5t112.5 -271.5z" />
+ <glyph glyph-name="_621" unicode="&#xf296;" horiz-adv-x="1792"
+d="M104 830l792 -1015l-868 630q-18 13 -25 34.5t0 42.5l101 308v0zM566 830h660l-330 -1015v0zM368 1442l198 -612h-462l198 612q8 23 33 23t33 -23zM1688 830l101 -308q7 -21 0 -42.5t-25 -34.5l-868 -630l792 1015v0zM1688 830h-462l198 612q8 23 33 23t33 -23z" />
+ <glyph glyph-name="_622" unicode="&#xf297;" horiz-adv-x="1792"
+d="M384 704h160v224h-160v-224zM1221 372v92q-104 -36 -243 -38q-135 -1 -259.5 46.5t-220.5 122.5l1 -96q88 -80 212 -128.5t272 -47.5q129 0 238 49zM640 704h640v224h-640v-224zM1792 736q0 -187 -99 -352q89 -102 89 -229q0 -157 -129.5 -268t-313.5 -111
+q-122 0 -225 52.5t-161 140.5q-19 -1 -57 -1t-57 1q-58 -88 -161 -140.5t-225 -52.5q-184 0 -313.5 111t-129.5 268q0 127 89 229q-99 165 -99 352q0 209 120 385.5t326.5 279.5t449.5 103t449.5 -103t326.5 -279.5t120 -385.5z" />
+ <glyph glyph-name="_623" unicode="&#xf298;"
+d="M515 625v-128h-252v128h252zM515 880v-127h-252v127h252zM1273 369v-128h-341v128h341zM1273 625v-128h-672v128h672zM1273 880v-127h-672v127h672zM1408 20v1240q0 8 -6 14t-14 6h-32l-378 -256l-210 171l-210 -171l-378 256h-32q-8 0 -14 -6t-6 -14v-1240q0 -8 6 -14
+t14 -6h1240q8 0 14 6t6 14zM553 1130l185 150h-406zM983 1130l221 150h-406zM1536 1260v-1240q0 -62 -43 -105t-105 -43h-1240q-62 0 -105 43t-43 105v1240q0 62 43 105t105 43h1240q62 0 105 -43t43 -105z" />
+ <glyph glyph-name="_624" unicode="&#xf299;" horiz-adv-x="1792"
+d="M896 720q-104 196 -160 278q-139 202 -347 318q-34 19 -70 36q-89 40 -94 32t34 -38l39 -31q62 -43 112.5 -93.5t94.5 -116.5t70.5 -113t70.5 -131q9 -17 13 -25q44 -84 84 -153t98 -154t115.5 -150t131 -123.5t148.5 -90.5q153 -66 154 -60q1 3 -49 37q-53 36 -81 57
+q-77 58 -179 211t-185 310zM549 177q-76 60 -132.5 125t-98 143.5t-71 154.5t-58.5 186t-52 209t-60.5 252t-76.5 289q273 0 497.5 -36t379 -92t271 -144.5t185.5 -172.5t110 -198.5t56 -199.5t12.5 -198.5t-9.5 -173t-20 -143.5t-13 -107l323 -327h-104l-281 285
+q-22 -2 -91.5 -14t-121.5 -19t-138 -6t-160.5 17t-167.5 59t-179 111z" />
+ <glyph glyph-name="_625" unicode="&#xf29a;" horiz-adv-x="1792"
+d="M1374 879q-6 26 -28.5 39.5t-48.5 7.5q-261 -62 -401 -62t-401 62q-26 6 -48.5 -7.5t-28.5 -39.5t7.5 -48.5t39.5 -28.5q194 -46 303 -58q-2 -158 -15.5 -269t-26.5 -155.5t-41 -115.5l-9 -21q-10 -25 1 -49t36 -34q9 -4 23 -4q44 0 60 41l8 20q54 139 71 259h42
+q17 -120 71 -259l8 -20q16 -41 60 -41q14 0 23 4q25 10 36 34t1 49l-9 21q-28 71 -41 115.5t-26.5 155.5t-15.5 269q109 12 303 58q26 6 39.5 28.5t7.5 48.5zM1024 1024q0 53 -37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5t37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5z
+M1600 640q0 -143 -55.5 -273.5t-150 -225t-225 -150t-273.5 -55.5t-273.5 55.5t-225 150t-150 225t-55.5 273.5t55.5 273.5t150 225t225 150t273.5 55.5t273.5 -55.5t225 -150t150 -225t55.5 -273.5zM896 1408q-156 0 -298 -61t-245 -164t-164 -245t-61 -298t61 -298
+t164 -245t245 -164t298 -61t298 61t245 164t164 245t61 298t-61 298t-164 245t-245 164t-298 61zM1792 640q0 -182 -71 -348t-191 -286t-286 -191t-348 -71t-348 71t-286 191t-191 286t-71 348t71 348t191 286t286 191t348 71t348 -71t286 -191t191 -286t71 -348z" />
+ <glyph glyph-name="_626" unicode="&#xf29b;"
+d="M1438 723q34 -35 29 -82l-44 -551q-4 -42 -34.5 -70t-71.5 -28q-6 0 -9 1q-44 3 -72.5 36.5t-25.5 77.5l35 429l-143 -8q55 -113 55 -240q0 -216 -148 -372l-137 137q91 101 91 235q0 145 -102.5 248t-247.5 103q-134 0 -236 -92l-137 138q120 114 284 141l264 300
+l-149 87l-181 -161q-33 -30 -77 -27.5t-73 35.5t-26.5 77t34.5 73l239 213q26 23 60 26.5t64 -14.5l488 -283q36 -21 48 -68q17 -67 -26 -117l-205 -232l371 20q49 3 83 -32zM1240 1180q-74 0 -126 52t-52 126t52 126t126 52t126.5 -52t52.5 -126t-52.5 -126t-126.5 -52z
+M613 -62q106 0 196 61l139 -139q-146 -116 -335 -116q-148 0 -273.5 73t-198.5 198t-73 273q0 188 116 336l139 -139q-60 -88 -60 -197q0 -145 102.5 -247.5t247.5 -102.5z" />
+ <glyph glyph-name="_627" unicode="&#xf29c;"
+d="M880 336v-160q0 -14 -9 -23t-23 -9h-160q-14 0 -23 9t-9 23v160q0 14 9 23t23 9h160q14 0 23 -9t9 -23zM1136 832q0 -50 -15 -90t-45.5 -69t-52 -44t-59.5 -36q-32 -18 -46.5 -28t-26 -24t-11.5 -29v-32q0 -14 -9 -23t-23 -9h-160q-14 0 -23 9t-9 23v68q0 35 10.5 64.5
+t24 47.5t39 35.5t41 25.5t44.5 21q53 25 75 43t22 49q0 42 -43.5 71.5t-95.5 29.5q-56 0 -95 -27q-29 -20 -80 -83q-9 -12 -25 -12q-11 0 -19 6l-108 82q-10 7 -12 20t5 23q122 192 349 192q129 0 238.5 -89.5t109.5 -214.5zM768 1280q-130 0 -248.5 -51t-204 -136.5
+t-136.5 -204t-51 -248.5t51 -248.5t136.5 -204t204 -136.5t248.5 -51t248.5 51t204 136.5t136.5 204t51 248.5t-51 248.5t-136.5 204t-204 136.5t-248.5 51zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5
+t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="_628" unicode="&#xf29d;" horiz-adv-x="1408"
+d="M366 1225q-64 0 -110 45.5t-46 110.5q0 64 46 109.5t110 45.5t109.5 -45.5t45.5 -109.5q0 -65 -45.5 -110.5t-109.5 -45.5zM917 583q0 -50 -30 -67.5t-63.5 -6.5t-47.5 34l-367 438q-7 12 -14 15.5t-11 1.5l-3 -3q-7 -8 4 -21l122 -139l1 -354l-161 -457
+q-67 -192 -92 -234q-15 -26 -28 -32q-50 -26 -103 -1q-29 13 -41.5 43t-9.5 57q2 17 197 618l5 416l-85 -164l35 -222q4 -24 -1 -42t-14 -27.5t-19 -16t-17 -7.5l-7 -2q-19 -3 -34.5 3t-24 16t-14 22t-7.5 19.5t-2 9.5l-46 299l211 381q23 34 113 34q75 0 107 -40l424 -521
+q7 -5 14 -17l3 -3l-1 -1q7 -13 7 -29zM514 433q43 -113 88.5 -225t69.5 -168l24 -55q36 -93 42 -125q11 -70 -36 -97q-35 -22 -66 -16t-51 22t-29 35h-1q-6 16 -8 25l-124 351zM1338 -159q31 -49 31 -57q0 -5 -3 -7q-9 -5 -14.5 0.5t-15.5 26t-16 30.5q-114 172 -423 661
+q3 -1 7 1t7 4l3 2q11 9 11 17z" />
+ <glyph glyph-name="_629" unicode="&#xf29e;" horiz-adv-x="2304"
+d="M504 542h171l-1 265zM1530 641q0 87 -50.5 140t-146.5 53h-54v-388h52q91 0 145 57t54 138zM956 1018l1 -756q0 -14 -9.5 -24t-23.5 -10h-216q-14 0 -23.5 10t-9.5 24v62h-291l-55 -81q-10 -15 -28 -15h-267q-21 0 -30.5 18t3.5 35l556 757q9 14 27 14h332q14 0 24 -10
+t10 -24zM1783 641q0 -193 -125.5 -303t-324.5 -110h-270q-14 0 -24 10t-10 24v756q0 14 10 24t24 10h268q200 0 326 -109t126 -302zM1939 640q0 -11 -0.5 -29t-8 -71.5t-21.5 -102t-44.5 -108t-73.5 -102.5h-51q38 45 66.5 104.5t41.5 112t21 98t9 72.5l1 27q0 8 -0.5 22.5
+t-7.5 60t-20 91.5t-41 111.5t-66 124.5h43q41 -47 72 -107t45.5 -111.5t23 -96t10.5 -70.5zM2123 640q0 -11 -0.5 -29t-8 -71.5t-21.5 -102t-45 -108t-74 -102.5h-51q38 45 66.5 104.5t41.5 112t21 98t9 72.5l1 27q0 8 -0.5 22.5t-7.5 60t-19.5 91.5t-40.5 111.5t-66 124.5
+h43q41 -47 72 -107t45.5 -111.5t23 -96t10.5 -70.5zM2304 640q0 -11 -0.5 -29t-8 -71.5t-21.5 -102t-44.5 -108t-73.5 -102.5h-51q38 45 66 104.5t41 112t21 98t9 72.5l1 27q0 8 -0.5 22.5t-7.5 60t-19.5 91.5t-40.5 111.5t-66 124.5h43q41 -47 72 -107t45.5 -111.5t23 -96
+t9.5 -70.5z" />
+ <glyph glyph-name="uniF2A0" unicode="&#xf2a0;" horiz-adv-x="1408"
+d="M617 -153q0 11 -13 58t-31 107t-20 69q-1 4 -5 26.5t-8.5 36t-13.5 21.5q-15 14 -51 14q-23 0 -70 -5.5t-71 -5.5q-34 0 -47 11q-6 5 -11 15.5t-7.5 20t-6.5 24t-5 18.5q-37 128 -37 255t37 255q1 4 5 18.5t6.5 24t7.5 20t11 15.5q13 11 47 11q24 0 71 -5.5t70 -5.5
+q36 0 51 14q9 8 13.5 21.5t8.5 36t5 26.5q2 9 20 69t31 107t13 58q0 22 -43.5 52.5t-75.5 42.5q-20 8 -45 8q-34 0 -98 -18q-57 -17 -96.5 -40.5t-71 -66t-46 -70t-45.5 -94.5q-6 -12 -9 -19q-49 -107 -68 -216t-19 -244t19 -244t68 -216q56 -122 83 -161q63 -91 179 -127
+l6 -2q64 -18 98 -18q25 0 45 8q32 12 75.5 42.5t43.5 52.5zM776 760q-26 0 -45 19t-19 45.5t19 45.5q37 37 37 90q0 52 -37 91q-19 19 -19 45t19 45t45 19t45 -19q75 -75 75 -181t-75 -181q-21 -19 -45 -19zM957 579q-27 0 -45 19q-19 19 -19 45t19 45q112 114 112 272
+t-112 272q-19 19 -19 45t19 45t45 19t45 -19q150 -150 150 -362t-150 -362q-18 -19 -45 -19zM1138 398q-27 0 -45 19q-19 19 -19 45t19 45q90 91 138.5 208t48.5 245t-48.5 245t-138.5 208q-19 19 -19 45t19 45t45 19t45 -19q109 -109 167 -249t58 -294t-58 -294t-167 -249
+q-18 -19 -45 -19z" />
+ <glyph glyph-name="uniF2A1" unicode="&#xf2a1;" horiz-adv-x="2176"
+d="M192 352q-66 0 -113 -47t-47 -113t47 -113t113 -47t113 47t47 113t-47 113t-113 47zM704 352q-66 0 -113 -47t-47 -113t47 -113t113 -47t113 47t47 113t-47 113t-113 47zM704 864q-66 0 -113 -47t-47 -113t47 -113t113 -47t113 47t47 113t-47 113t-113 47zM1472 352
+q-66 0 -113 -47t-47 -113t47 -113t113 -47t113 47t47 113t-47 113t-113 47zM1984 352q-66 0 -113 -47t-47 -113t47 -113t113 -47t113 47t47 113t-47 113t-113 47zM1472 864q-66 0 -113 -47t-47 -113t47 -113t113 -47t113 47t47 113t-47 113t-113 47zM1984 864
+q-66 0 -113 -47t-47 -113t47 -113t113 -47t113 47t47 113t-47 113t-113 47zM1984 1376q-66 0 -113 -47t-47 -113t47 -113t113 -47t113 47t47 113t-47 113t-113 47zM384 192q0 -80 -56 -136t-136 -56t-136 56t-56 136t56 136t136 56t136 -56t56 -136zM896 192q0 -80 -56 -136
+t-136 -56t-136 56t-56 136t56 136t136 56t136 -56t56 -136zM384 704q0 -80 -56 -136t-136 -56t-136 56t-56 136t56 136t136 56t136 -56t56 -136zM896 704q0 -80 -56 -136t-136 -56t-136 56t-56 136t56 136t136 56t136 -56t56 -136zM384 1216q0 -80 -56 -136t-136 -56
+t-136 56t-56 136t56 136t136 56t136 -56t56 -136zM1664 192q0 -80 -56 -136t-136 -56t-136 56t-56 136t56 136t136 56t136 -56t56 -136zM896 1216q0 -80 -56 -136t-136 -56t-136 56t-56 136t56 136t136 56t136 -56t56 -136zM2176 192q0 -80 -56 -136t-136 -56t-136 56
+t-56 136t56 136t136 56t136 -56t56 -136zM1664 704q0 -80 -56 -136t-136 -56t-136 56t-56 136t56 136t136 56t136 -56t56 -136zM2176 704q0 -80 -56 -136t-136 -56t-136 56t-56 136t56 136t136 56t136 -56t56 -136zM1664 1216q0 -80 -56 -136t-136 -56t-136 56t-56 136
+t56 136t136 56t136 -56t56 -136zM2176 1216q0 -80 -56 -136t-136 -56t-136 56t-56 136t56 136t136 56t136 -56t56 -136z" />
+ <glyph glyph-name="uniF2A2" unicode="&#xf2a2;" horiz-adv-x="1792"
+d="M128 -192q0 -26 -19 -45t-45 -19t-45 19t-19 45t19 45t45 19t45 -19t19 -45zM320 0q0 -26 -19 -45t-45 -19t-45 19t-19 45t19 45t45 19t45 -19t19 -45zM365 365l256 -256l-90 -90l-256 256zM704 384q0 -26 -19 -45t-45 -19t-45 19t-19 45t19 45t45 19t45 -19t19 -45z
+M1411 704q0 -59 -11.5 -108.5t-37.5 -93.5t-44 -67.5t-53 -64.5q-31 -35 -45.5 -54t-33.5 -50t-26.5 -64t-7.5 -74q0 -159 -112.5 -271.5t-271.5 -112.5q-26 0 -45 19t-19 45t19 45t45 19q106 0 181 75t75 181q0 57 11.5 105.5t37 91t43.5 66.5t52 63q40 46 59.5 72
+t37.5 74.5t18 103.5q0 185 -131.5 316.5t-316.5 131.5t-316.5 -131.5t-131.5 -316.5q0 -26 -19 -45t-45 -19t-45 19t-19 45q0 117 45.5 223.5t123 184t184 123t223.5 45.5t223.5 -45.5t184 -123t123 -184t45.5 -223.5zM896 576q0 -26 -19 -45t-45 -19t-45 19t-19 45t19 45
+t45 19t45 -19t19 -45zM1184 704q0 -26 -19 -45t-45 -19t-45 19t-19 45q0 93 -65.5 158.5t-158.5 65.5q-92 0 -158 -65.5t-66 -158.5q0 -26 -19 -45t-45 -19t-45 19t-19 45q0 146 103 249t249 103t249 -103t103 -249zM1578 993q10 -25 -1 -49t-36 -34q-9 -4 -23 -4
+q-19 0 -35.5 11t-23.5 30q-68 178 -224 295q-21 16 -25 42t12 47q17 21 43 25t47 -12q183 -137 266 -351zM1788 1074q9 -25 -1.5 -49t-35.5 -34q-11 -4 -23 -4q-44 0 -60 41q-92 238 -297 393q-22 16 -25.5 42t12.5 47q16 22 42 25.5t47 -12.5q235 -175 341 -449z" />
+ <glyph glyph-name="uniF2A3" unicode="&#xf2a3;" horiz-adv-x="2304"
+d="M1032 576q-59 2 -84 55q-17 34 -48 53.5t-68 19.5q-53 0 -90.5 -37.5t-37.5 -90.5q0 -56 36 -89l10 -8q34 -31 82 -31q37 0 68 19.5t48 53.5q25 53 84 55zM1600 704q0 56 -36 89l-10 8q-34 31 -82 31q-37 0 -68 -19.5t-48 -53.5q-25 -53 -84 -55q59 -2 84 -55
+q17 -34 48 -53.5t68 -19.5q53 0 90.5 37.5t37.5 90.5zM1174 925q-17 -35 -55 -48t-73 4q-62 31 -134 31q-51 0 -99 -17q3 0 9.5 0.5t9.5 0.5q92 0 170.5 -50t118.5 -133q17 -36 3.5 -73.5t-49.5 -54.5q-18 -9 -39 -9q21 0 39 -9q36 -17 49.5 -54.5t-3.5 -73.5
+q-40 -83 -118.5 -133t-170.5 -50h-6q-16 2 -44 4l-290 27l-239 -120q-14 -7 -29 -7q-40 0 -57 35l-160 320q-11 23 -4 47.5t29 37.5l209 119l148 267q17 155 91.5 291.5t195.5 236.5q31 25 70.5 21.5t64.5 -34.5t21.5 -70t-34.5 -65q-70 -59 -117 -128q123 84 267 101
+q40 5 71.5 -19t35.5 -64q5 -40 -19 -71.5t-64 -35.5q-84 -10 -159 -55q46 10 99 10q115 0 218 -50q36 -18 49 -55.5t-5 -73.5zM2137 1085l160 -320q11 -23 4 -47.5t-29 -37.5l-209 -119l-148 -267q-17 -155 -91.5 -291.5t-195.5 -236.5q-26 -22 -61 -22q-45 0 -74 35
+q-25 31 -21.5 70t34.5 65q70 59 117 128q-123 -84 -267 -101q-4 -1 -12 -1q-36 0 -63.5 24t-31.5 60q-5 40 19 71.5t64 35.5q84 10 159 55q-46 -10 -99 -10q-115 0 -218 50q-36 18 -49 55.5t5 73.5q17 35 55 48t73 -4q62 -31 134 -31q51 0 99 17q-3 0 -9.5 -0.5t-9.5 -0.5
+q-92 0 -170.5 50t-118.5 133q-17 36 -3.5 73.5t49.5 54.5q18 9 39 9q-21 0 -39 9q-36 17 -49.5 54.5t3.5 73.5q40 83 118.5 133t170.5 50h6h1q14 -2 42 -4l291 -27l239 120q14 7 29 7q40 0 57 -35z" />
+ <glyph glyph-name="uniF2A4" unicode="&#xf2a4;" horiz-adv-x="1792"
+d="M1056 704q0 -26 19 -45t45 -19t45 19t19 45q0 146 -103 249t-249 103t-249 -103t-103 -249q0 -26 19 -45t45 -19t45 19t19 45q0 93 66 158.5t158 65.5t158 -65.5t66 -158.5zM835 1280q-117 0 -223.5 -45.5t-184 -123t-123 -184t-45.5 -223.5q0 -26 19 -45t45 -19t45 19
+t19 45q0 185 131.5 316.5t316.5 131.5t316.5 -131.5t131.5 -316.5q0 -55 -18 -103.5t-37.5 -74.5t-59.5 -72q-34 -39 -52 -63t-43.5 -66.5t-37 -91t-11.5 -105.5q0 -106 -75 -181t-181 -75q-26 0 -45 -19t-19 -45t19 -45t45 -19q159 0 271.5 112.5t112.5 271.5q0 41 7.5 74
+t26.5 64t33.5 50t45.5 54q35 41 53 64.5t44 67.5t37.5 93.5t11.5 108.5q0 117 -45.5 223.5t-123 184t-184 123t-223.5 45.5zM591 561l226 -226l-579 -579q-12 -12 -29 -12t-29 12l-168 168q-12 12 -12 29t12 29zM1612 1524l168 -168q12 -12 12 -29t-12 -30l-233 -233
+l-26 -25l-71 -71q-66 153 -195 258l91 91l207 207q13 12 30 12t29 -12z" />
+ <glyph glyph-name="uniF2A5" unicode="&#xf2a5;"
+d="M866 1021q0 -27 -13 -94q-11 -50 -31.5 -150t-30.5 -150q-2 -11 -4.5 -12.5t-13.5 -2.5q-20 -2 -31 -2q-58 0 -84 49.5t-26 113.5q0 88 35 174t103 124q28 14 51 14q28 0 36.5 -16.5t8.5 -47.5zM1352 597q0 14 -39 75.5t-52 66.5q-21 8 -34 8q-91 0 -226 -77l-2 2
+q3 22 27.5 135t24.5 178q0 233 -242 233q-24 0 -68 -6q-94 -17 -168.5 -89.5t-111.5 -166.5t-37 -189q0 -146 80.5 -225t227.5 -79q25 0 25 -3t-1 -5q-4 -34 -26 -117q-14 -52 -51.5 -101t-82.5 -49q-42 0 -42 47q0 24 10.5 47.5t25 39.5t29.5 28.5t26 20t11 8.5q0 3 -7 10
+q-24 22 -58.5 36.5t-65.5 14.5q-35 0 -63.5 -34t-41 -75t-12.5 -75q0 -88 51.5 -142t138.5 -54q82 0 155 53t117.5 126t65.5 153q6 22 15.5 66.5t14.5 66.5q3 12 14 18q118 60 227 60q48 0 127 -18q1 -1 4 -1q5 0 9.5 4.5t4.5 8.5zM1536 1120v-960q0 -119 -84.5 -203.5
+t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="uniF2A6" unicode="&#xf2a6;" horiz-adv-x="1535"
+d="M744 1231q0 24 -2 38.5t-8.5 30t-21 23t-37.5 7.5q-39 0 -78 -23q-105 -58 -159 -190.5t-54 -269.5q0 -44 8.5 -85.5t26.5 -80.5t52.5 -62.5t81.5 -23.5q4 0 18 -0.5t20 0t16 3t15 8.5t7 16q16 77 48 231.5t48 231.5q19 91 19 146zM1498 575q0 -7 -7.5 -13.5t-15.5 -6.5
+l-6 1q-22 3 -62 11t-72 12.5t-63 4.5q-167 0 -351 -93q-15 -8 -21 -27q-10 -36 -24.5 -105.5t-22.5 -100.5q-23 -91 -70 -179.5t-112.5 -164.5t-154.5 -123t-185 -47q-135 0 -214.5 83.5t-79.5 219.5q0 53 19.5 117t63 116.5t97.5 52.5q38 0 120 -33.5t83 -61.5
+q0 -1 -16.5 -12.5t-39.5 -31t-46 -44.5t-39 -61t-16 -74q0 -33 16.5 -53t48.5 -20q45 0 85 31.5t66.5 78t48 105.5t32.5 107t16 90v9q0 2 -3.5 3.5t-8.5 1.5h-10t-10 -0.5t-6 -0.5q-227 0 -352 122.5t-125 348.5q0 108 34.5 221t96 210t156 167.5t204.5 89.5q52 9 106 9
+q374 0 374 -360q0 -98 -38 -273t-43 -211l3 -3q101 57 182.5 88t167.5 31q22 0 53 -13q19 -7 80 -102.5t61 -116.5z" />
+ <glyph glyph-name="uniF2A7" unicode="&#xf2a7;" horiz-adv-x="1664"
+d="M831 863q32 0 59 -18l222 -148q61 -40 110 -97l146 -170q40 -46 29 -106l-72 -413q-6 -32 -29.5 -53.5t-55.5 -25.5l-527 -56l-352 -32h-9q-39 0 -67.5 28t-28.5 68q0 37 27 64t65 32l260 32h-448q-41 0 -69.5 30t-26.5 71q2 39 32 65t69 26l442 1l-521 64q-41 5 -66 37
+t-19 73q6 35 34.5 57.5t65.5 22.5h10l481 -60l-351 94q-38 10 -62 41.5t-18 68.5q6 36 33 58.5t62 22.5q6 0 20 -2l448 -96l217 -37q1 0 3 -0.5t3 -0.5q23 0 30.5 23t-12.5 36l-186 125q-35 23 -42 63.5t18 73.5q27 38 76 38zM761 661l186 -125l-218 37l-5 2l-36 38
+l-238 262q-1 1 -2.5 3.5t-2.5 3.5q-24 31 -18.5 70t37.5 64q31 23 68 17.5t64 -33.5l142 -147q-2 -1 -5 -3.5t-4 -4.5q-32 -45 -23 -99t55 -85zM1648 1115l15 -266q4 -73 -11 -147l-48 -219q-12 -59 -67 -87l-106 -54q2 62 -39 109l-146 170q-53 61 -117 103l-222 148
+q-34 23 -76 23q-51 0 -88 -37l-235 312q-25 33 -18 73.5t41 63.5q33 22 71.5 14t62.5 -40l266 -352l-262 455q-21 35 -10.5 75t47.5 59q35 18 72.5 6t57.5 -46l241 -420l-136 337q-15 35 -4.5 74t44.5 56q37 19 76 6t56 -51l193 -415l101 -196q8 -15 23 -17.5t27 7.5t11 26
+l-12 224q-2 41 26 71t69 31q39 0 67 -28.5t30 -67.5z" />
+ <glyph glyph-name="uniF2A8" unicode="&#xf2a8;" horiz-adv-x="1792"
+d="M335 180q-2 0 -6 2q-86 57 -168.5 145t-139.5 180q-21 30 -21 69q0 9 2 19t4 18t7 18t8.5 16t10.5 17t10 15t12 15.5t11 14.5q184 251 452 365q-110 198 -110 211q0 19 17 29q116 64 128 64q18 0 28 -16l124 -229q92 19 192 19q266 0 497.5 -137.5t378.5 -369.5
+q20 -31 20 -69t-20 -69q-91 -142 -218.5 -253.5t-278.5 -175.5q110 -198 110 -211q0 -20 -17 -29q-116 -64 -127 -64q-19 0 -29 16l-124 229l-64 119l-444 820l7 7q-58 -24 -99 -47q3 -5 127 -234t243 -449t119 -223q0 -7 -9 -9q-13 -3 -72 -3q-57 0 -60 7l-456 841
+q-39 -28 -82 -68q24 -43 214 -393.5t190 -354.5q0 -10 -11 -10q-14 0 -82.5 22t-72.5 28l-106 197l-224 413q-44 -53 -78 -106q2 -3 18 -25t23 -34l176 -327q0 -10 -10 -10zM1165 282l49 -91q273 111 450 385q-180 277 -459 389q67 -64 103 -148.5t36 -176.5
+q0 -106 -47 -200.5t-132 -157.5zM848 896q0 -20 14 -34t34 -14q86 0 147 -61t61 -147q0 -20 14 -34t34 -14t34 14t14 34q0 126 -89 215t-215 89q-20 0 -34 -14t-14 -34zM1214 961l-9 4l7 -7z" />
+ <glyph glyph-name="uniF2A9" unicode="&#xf2a9;" horiz-adv-x="1280"
+d="M1050 430q0 -215 -147 -374q-148 -161 -378 -161q-232 0 -378 161q-147 159 -147 374q0 147 68 270.5t189 196.5t268 73q96 0 182 -31q-32 -62 -39 -126q-66 28 -143 28q-167 0 -280.5 -123t-113.5 -291q0 -170 112.5 -288.5t281.5 -118.5t281 118.5t112 288.5
+q0 89 -32 166q66 13 123 49q41 -98 41 -212zM846 619q0 -192 -79.5 -345t-238.5 -253l-14 -1q-29 0 -62 5q83 32 146.5 102.5t99.5 154.5t58.5 189t30 192.5t7.5 178.5q0 69 -3 103q55 -160 55 -326zM791 947v-2q-73 214 -206 440q88 -59 142.5 -186.5t63.5 -251.5z
+M1035 744q-83 0 -160 75q218 120 290 247q19 37 21 56q-42 -94 -139.5 -166.5t-204.5 -97.5q-35 54 -35 113q0 37 17 79t43 68q46 44 157 74q59 16 106 58.5t74 100.5q74 -105 74 -253q0 -109 -24 -170q-32 -77 -88.5 -130.5t-130.5 -53.5z" />
+ <glyph glyph-name="uniF2AA" unicode="&#xf2aa;"
+d="M1050 495q0 78 -28 147q-41 -25 -85 -34q22 -50 22 -114q0 -117 -77 -198.5t-193 -81.5t-193.5 81.5t-77.5 198.5q0 115 78 199.5t193 84.5q53 0 98 -19q4 43 27 87q-60 21 -125 21q-154 0 -257.5 -108.5t-103.5 -263.5t103.5 -261t257.5 -106t257.5 106.5t103.5 260.5z
+M872 850q2 -24 2 -71q0 -63 -5 -123t-20.5 -132.5t-40.5 -130t-68.5 -106t-100.5 -70.5q21 -3 42 -3h10q219 139 219 411q0 116 -38 225zM872 850q-4 80 -44 171.5t-98 130.5q92 -156 142 -302zM1207 955q0 102 -51 174q-41 -86 -124 -109q-69 -19 -109 -53.5t-40 -99.5
+q0 -40 24 -77q74 17 140.5 67t95.5 115q-4 -52 -74.5 -111.5t-138.5 -97.5q52 -52 110 -52q51 0 90 37t60 90q17 42 17 117zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5
+t84.5 -203.5z" />
+ <glyph glyph-name="uniF2AB" unicode="&#xf2ab;"
+d="M1279 388q0 22 -22 27q-67 15 -118 59t-80 108q-7 19 -7 25q0 15 19.5 26t43 17t43 20.5t19.5 36.5q0 19 -18.5 31.5t-38.5 12.5q-12 0 -32 -8t-31 -8q-4 0 -12 2q5 95 5 114q0 79 -17 114q-36 78 -103 121.5t-152 43.5q-199 0 -275 -165q-17 -35 -17 -114q0 -19 5 -114
+q-4 -2 -14 -2q-12 0 -32 7.5t-30 7.5q-21 0 -38.5 -12t-17.5 -32q0 -21 19.5 -35.5t43 -20.5t43 -17t19.5 -26q0 -6 -7 -25q-64 -138 -198 -167q-22 -5 -22 -27q0 -46 137 -68q2 -5 6 -26t11.5 -30.5t23.5 -9.5q12 0 37.5 4.5t39.5 4.5q35 0 67 -15t54 -32.5t57.5 -32.5
+t76.5 -15q43 0 79 15t57.5 32.5t53.5 32.5t67 15q14 0 39.5 -4t38.5 -4q16 0 23 10t11 30t6 25q137 22 137 68zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5
+t103 -385.5z" />
+ <glyph glyph-name="uniF2AC" unicode="&#xf2ac;" horiz-adv-x="1664"
+d="M848 1408q134 1 240.5 -68.5t163.5 -192.5q27 -58 27 -179q0 -47 -9 -191q14 -7 28 -7q18 0 51 13.5t51 13.5q29 0 56 -18t27 -46q0 -32 -31.5 -54t-69 -31.5t-69 -29t-31.5 -47.5q0 -15 12 -43q37 -82 102.5 -150t144.5 -101q28 -12 80 -23q28 -6 28 -35
+q0 -70 -219 -103q-7 -11 -11 -39t-14 -46.5t-33 -18.5q-20 0 -62 6.5t-64 6.5q-37 0 -62 -5q-32 -5 -63 -22.5t-58 -38t-58 -40.5t-76 -33.5t-99 -13.5q-52 0 -96.5 13.5t-75 33.5t-57.5 40.5t-58 38t-62 22.5q-26 5 -63 5q-24 0 -65.5 -7.5t-58.5 -7.5q-25 0 -35 18.5
+t-14 47.5t-11 40q-219 33 -219 103q0 29 28 35q52 11 80 23q78 32 144.5 101t102.5 150q12 28 12 43q0 28 -31.5 47.5t-69.5 29.5t-69.5 31.5t-31.5 52.5q0 27 26 45.5t55 18.5q15 0 48 -13t53 -13q18 0 32 7q-9 142 -9 190q0 122 27 180q64 137 172 198t264 63z" />
+ <glyph glyph-name="uniF2AD" unicode="&#xf2ad;"
+d="M1280 388q0 22 -22 27q-67 14 -118 58t-80 109q-7 14 -7 25q0 15 19.5 26t42.5 17t42.5 20.5t19.5 36.5q0 19 -18.5 31.5t-38.5 12.5q-11 0 -31 -8t-32 -8q-4 0 -12 2q5 63 5 115q0 78 -17 114q-36 78 -102.5 121.5t-152.5 43.5q-198 0 -275 -165q-18 -38 -18 -115
+q0 -38 6 -114q-10 -2 -15 -2q-11 0 -31.5 8t-30.5 8q-20 0 -37.5 -12.5t-17.5 -32.5q0 -21 19.5 -35.5t42.5 -20.5t42.5 -17t19.5 -26q0 -11 -7 -25q-64 -138 -198 -167q-22 -5 -22 -27q0 -47 138 -69q2 -5 6 -26t11 -30.5t23 -9.5q13 0 38.5 5t38.5 5q35 0 67.5 -15
+t54.5 -32.5t57.5 -32.5t76.5 -15q43 0 79 15t57.5 32.5t54 32.5t67.5 15q13 0 39 -4.5t39 -4.5q15 0 22.5 9.5t11.5 31t5 24.5q138 22 138 69zM1536 1120v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5v960q0 119 84.5 203.5t203.5 84.5h960
+q119 0 203.5 -84.5t84.5 -203.5z" />
+ <glyph glyph-name="uniF2AE" unicode="&#xf2ae;" horiz-adv-x="2304"
+d="M2304 1536q-69 -46 -125 -92t-89 -81t-59.5 -71.5t-37.5 -57.5t-22 -44.5t-14 -29.5q-10 -18 -35.5 -136.5t-48.5 -164.5q-15 -29 -50 -60.5t-67.5 -50.5t-72.5 -41t-48 -28q-47 -31 -151 -231q-341 14 -630 -158q-92 -53 -303 -179q47 16 86 31t55 22l15 7
+q71 27 163 64.5t133.5 53.5t108 34.5t142.5 31.5q186 31 465 -7q1 0 10 -3q11 -6 14 -17t-3 -22l-194 -345q-15 -29 -47 -22q-128 24 -354 24q-146 0 -402 -44.5t-392 -46.5q-82 -1 -149 13t-107 37t-61 40t-33 34l-1 1v2q0 6 6 6q138 0 371 55q192 366 374.5 524t383.5 158
+q5 0 14.5 -0.5t38 -5t55 -12t61.5 -24.5t63 -39.5t54 -59t40 -82.5l102 177q2 4 21 42.5t44.5 86.5t61 109.5t84 133.5t100.5 137q66 82 128 141.5t121.5 96.5t92.5 53.5t88 39.5z" />
+ <glyph glyph-name="uniF2B0" unicode="&#xf2b0;"
+d="M1322 640q0 -45 -5 -76l-236 14l224 -78q-19 -73 -58 -141l-214 103l177 -158q-44 -61 -107 -108l-157 178l103 -215q-61 -37 -140 -59l-79 228l14 -240q-38 -6 -76 -6t-76 6l14 238l-78 -226q-74 19 -140 59l103 215l-157 -178q-59 43 -108 108l178 158l-214 -104
+q-39 69 -58 141l224 79l-237 -14q-5 42 -5 76q0 35 5 77l238 -14l-225 79q19 73 58 140l214 -104l-177 159q46 61 107 108l158 -178l-103 215q67 39 140 58l77 -224l-13 236q36 6 75 6q38 0 76 -6l-14 -237l78 225q74 -19 140 -59l-103 -214l158 178q61 -47 107 -108
+l-177 -159l213 104q37 -62 58 -141l-224 -78l237 14q5 -31 5 -77zM1352 640q0 160 -78.5 295.5t-213 214t-292.5 78.5q-119 0 -227 -46.5t-186.5 -125t-124.5 -187.5t-46 -229q0 -119 46 -228t124.5 -187.5t186.5 -125t227 -46.5q158 0 292.5 78.5t213 214t78.5 294.5z
+M1425 1023v-766l-657 -383l-657 383v766l657 383zM768 -183l708 412v823l-708 411l-708 -411v-823zM1536 1088v-896l-768 -448l-768 448v896l768 448z" />
+ <glyph glyph-name="uniF2B1" unicode="&#xf2b1;" horiz-adv-x="1664"
+d="M339 1318h691l-26 -72h-665q-110 0 -188.5 -79t-78.5 -189v-771q0 -95 60.5 -169.5t153.5 -93.5q23 -5 98 -5v-72h-45q-140 0 -239.5 100t-99.5 240v771q0 140 99.5 240t239.5 100zM1190 1536h247l-482 -1294q-23 -61 -40.5 -103.5t-45 -98t-54 -93.5t-64.5 -78.5
+t-79.5 -65t-95.5 -41t-116 -18.5v195q163 26 220 182q20 52 20 105q0 54 -20 106l-285 733h228l187 -585zM1664 978v-1111h-795q37 55 45 73h678v1038q0 85 -49.5 155t-129.5 99l25 67q101 -34 163.5 -123.5t62.5 -197.5z" />
+ <glyph glyph-name="uniF2B2" unicode="&#xf2b2;" horiz-adv-x="1792"
+d="M852 1227q0 -29 -17 -52.5t-45 -23.5t-45 23.5t-17 52.5t17 52.5t45 23.5t45 -23.5t17 -52.5zM688 -149v114q0 30 -20.5 51.5t-50.5 21.5t-50 -21.5t-20 -51.5v-114q0 -30 20.5 -52t49.5 -22q30 0 50.5 22t20.5 52zM860 -149v114q0 30 -20 51.5t-50 21.5t-50.5 -21.5
+t-20.5 -51.5v-114q0 -30 20.5 -52t50.5 -22q29 0 49.5 22t20.5 52zM1034 -149v114q0 30 -20.5 51.5t-50.5 21.5t-50.5 -21.5t-20.5 -51.5v-114q0 -30 20.5 -52t50.5 -22t50.5 22t20.5 52zM1208 -149v114q0 30 -20.5 51.5t-50.5 21.5t-50.5 -21.5t-20.5 -51.5v-114
+q0 -30 20.5 -52t50.5 -22t50.5 22t20.5 52zM1476 535q-84 -160 -232 -259.5t-323 -99.5q-123 0 -229.5 51.5t-178.5 137t-113 197.5t-41 232q0 88 21 174q-104 -175 -104 -390q0 -162 65 -312t185 -251q30 57 91 57q56 0 86 -50q32 50 87 50q56 0 86 -50q32 50 87 50t87 -50
+q30 50 86 50q28 0 52.5 -15.5t37.5 -40.5q112 94 177 231.5t73 287.5zM1326 564q0 75 -72 75q-17 0 -47 -6q-95 -19 -149 -19q-226 0 -226 243q0 86 30 204q-83 -127 -83 -275q0 -150 89 -260.5t235 -110.5q111 0 210 70q13 48 13 79zM884 1223q0 50 -32 89.5t-81 39.5
+t-81 -39.5t-32 -89.5q0 -51 31.5 -90.5t81.5 -39.5t81.5 39.5t31.5 90.5zM1513 884q0 96 -37.5 179t-113 137t-173.5 54q-77 0 -149 -35t-127 -94q-48 -159 -48 -268q0 -104 45.5 -157t147.5 -53q53 0 142 19q36 6 53 6q51 0 77.5 -28t26.5 -80q0 -26 -4 -46
+q75 68 117.5 165.5t42.5 200.5zM1792 667q0 -111 -33.5 -249.5t-93.5 -204.5q-58 -64 -195 -142.5t-228 -104.5l-4 -1v-114q0 -43 -29.5 -75t-72.5 -32q-56 0 -86 50q-32 -50 -87 -50t-87 50q-30 -50 -86 -50q-55 0 -87 50q-30 -50 -86 -50q-47 0 -75 33.5t-28 81.5
+q-90 -68 -198 -68q-118 0 -211 80q54 1 106 20q-113 31 -182 127q32 -7 71 -7q89 0 164 46q-192 192 -240 306q-24 56 -24 160q0 57 9 125.5t31.5 146.5t55 141t86.5 105t120 42q59 0 81 -52q19 29 42 54q2 3 12 13t13 16q10 15 23 38t25 42t28 39q87 111 211.5 177
+t260.5 66q35 0 62 -4q59 64 146 64q83 0 140 -57q5 -5 5 -12q0 -5 -6 -13.5t-12.5 -16t-16 -17l-10.5 -10.5q17 -6 36 -18t19 -24q0 -6 -16 -25q157 -138 197 -378q25 30 60 30q45 0 100 -49q90 -80 90 -279z" />
+ <glyph glyph-name="uniF2B3" unicode="&#xf2b3;"
+d="M917 631q0 33 -6 64h-362v-132h217q-12 -76 -74.5 -120.5t-142.5 -44.5q-99 0 -169 71.5t-70 170.5t70 170.5t169 71.5q93 0 153 -59l104 101q-108 100 -257 100q-160 0 -272 -112.5t-112 -271.5t112 -271.5t272 -112.5q165 0 266.5 105t101.5 270zM1262 585h109v110
+h-109v110h-110v-110h-110v-110h110v-110h110v110zM1536 640q0 -209 -103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5t103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5z" />
+ <glyph glyph-name="uniF2B4" unicode="&#xf2b4;"
+d="M1536 1024v-839q0 -48 -49 -62q-174 -52 -338 -52q-73 0 -215.5 29.5t-227.5 29.5q-164 0 -370 -48v-338h-160v1368q-63 25 -101 81t-38 124q0 91 64 155t155 64t155 -64t64 -155q0 -68 -38 -124t-101 -81v-68q190 44 343 44q99 0 198 -15q14 -2 111.5 -22.5t149.5 -20.5
+q77 0 165 18q11 2 80 21t89 19q26 0 45 -19t19 -45z" />
+ <glyph glyph-name="uniF2B5" unicode="&#xf2b5;" horiz-adv-x="2304"
+d="M192 384q40 0 56 32t0 64t-56 32t-56 -32t0 -64t56 -32zM1665 442q-10 13 -38.5 50t-41.5 54t-38 49t-42.5 53t-40.5 47t-45 49l-125 -140q-83 -94 -208.5 -92t-205.5 98q-57 69 -56.5 158t58.5 157l177 206q-22 11 -51 16.5t-47.5 6t-56.5 -0.5t-49 -1q-92 0 -158 -66
+l-158 -158h-155v-544q5 0 21 0.5t22 0t19.5 -2t20.5 -4.5t17.5 -8.5t18.5 -13.5l297 -292q115 -111 227 -111q78 0 125 47q57 -20 112.5 8t72.5 85q74 -6 127 44q20 18 36 45.5t14 50.5q10 -10 43 -10q43 0 77 21t49.5 53t12 71.5t-30.5 73.5zM1824 384h96v512h-93l-157 180
+q-66 76 -169 76h-167q-89 0 -146 -67l-209 -243q-28 -33 -28 -75t27 -75q43 -51 110 -52t111 49l193 218q25 23 53.5 21.5t47 -27t8.5 -56.5q16 -19 56 -63t60 -68q29 -36 82.5 -105.5t64.5 -84.5q52 -66 60 -140zM2112 384q40 0 56 32t0 64t-56 32t-56 -32t0 -64t56 -32z
+M2304 960v-640q0 -26 -19 -45t-45 -19h-434q-27 -65 -82 -106.5t-125 -51.5q-33 -48 -80.5 -81.5t-102.5 -45.5q-42 -53 -104.5 -81.5t-128.5 -24.5q-60 -34 -126 -39.5t-127.5 14t-117 53.5t-103.5 81l-287 282h-358q-26 0 -45 19t-19 45v672q0 26 19 45t45 19h421
+q14 14 47 48t47.5 48t44 40t50.5 37.5t51 25.5t62 19.5t68 5.5h117q99 0 181 -56q82 56 181 56h167q35 0 67 -6t56.5 -14.5t51.5 -26.5t44.5 -31t43 -39.5t39 -42t41 -48t41.5 -48.5h355q26 0 45 -19t19 -45z" />
+ <glyph glyph-name="uniF2B6" unicode="&#xf2b6;" horiz-adv-x="1792"
+d="M1792 882v-978q0 -66 -47 -113t-113 -47h-1472q-66 0 -113 47t-47 113v978q0 15 11 24q8 7 39 34.5t41.5 36t45.5 37.5t70 55.5t96 73t143.5 107t192.5 140.5q5 4 52.5 40t71.5 52.5t64 35t69 18.5t69 -18.5t65 -35.5t71 -52t52 -40q110 -80 192.5 -140.5t143.5 -107
+t96 -73t70 -55.5t45.5 -37.5t41.5 -36t39 -34.5q11 -9 11 -24zM1228 297q263 191 345 252q11 8 12.5 20.5t-6.5 23.5l-38 52q-8 11 -21 12.5t-24 -6.5q-231 -169 -343 -250q-5 -3 -52 -39t-71.5 -52.5t-64.5 -35t-69 -18.5t-69 18.5t-64.5 35t-71.5 52.5t-52 39
+q-186 134 -343 250q-11 8 -24 6.5t-21 -12.5l-38 -52q-8 -11 -6.5 -23.5t12.5 -20.5q82 -61 345 -252q10 -8 50 -38t65 -47t64 -39.5t77.5 -33.5t75.5 -11t75.5 11t79 34.5t64.5 39.5t65 47.5t48 36.5z" />
+ <glyph glyph-name="uniF2B7" unicode="&#xf2b7;" horiz-adv-x="1792"
+d="M1474 623l39 -51q8 -11 6.5 -23.5t-11.5 -20.5q-43 -34 -126.5 -98.5t-146.5 -113t-67 -51.5q-39 -32 -60 -48t-60.5 -41t-76.5 -36.5t-74 -11.5h-1h-1q-37 0 -74 11.5t-76 36.5t-61 41.5t-60 47.5q-5 4 -65 50.5t-143.5 111t-122.5 94.5q-11 8 -12.5 20.5t6.5 23.5
+l37 52q8 11 21.5 13t24.5 -7q94 -73 306 -236q5 -4 43.5 -35t60.5 -46.5t56.5 -32.5t58.5 -17h1h1q24 0 58.5 17t56.5 32.5t60.5 46.5t43.5 35q258 198 313 242q11 8 24 6.5t21 -12.5zM1664 -96v928q-90 83 -159 139q-91 74 -389 304q-3 2 -43 35t-61 48t-56 32.5t-59 17.5
+h-1h-1q-24 0 -59 -17.5t-56 -32.5t-61 -48t-43 -35q-215 -166 -315.5 -245.5t-129.5 -104t-82 -74.5q-14 -12 -21 -19v-928q0 -13 9.5 -22.5t22.5 -9.5h1472q13 0 22.5 9.5t9.5 22.5zM1792 832v-928q0 -66 -47 -113t-113 -47h-1472q-66 0 -113 47t-47 113v928q0 56 41 94
+q123 114 350 290.5t233 181.5q36 30 59 47.5t61.5 42t76 36.5t74.5 12h1h1q37 0 74.5 -12t76 -36.5t61.5 -42t59 -47.5q43 -36 156 -122t226 -177t201 -173q41 -38 41 -94z" />
+ <glyph glyph-name="uniF2B8" unicode="&#xf2b8;"
+d="M330 1l202 -214l-34 236l-216 213zM556 -225l274 218l-11 245l-300 -215zM245 413l227 -213l-48 327l-245 204zM495 189l317 214l-14 324l-352 -200zM843 178l95 -80l-2 239l-103 79q0 -1 1 -8.5t0 -12t-5 -7.5l-78 -52l85 -70q7 -6 7 -88zM138 930l256 -200l-68 465
+l-279 173zM1173 267l15 234l-230 -164l2 -240zM417 722l373 194l-19 441l-423 -163zM1270 357l20 233l-226 142l-2 -105l144 -95q6 -4 4 -9l-7 -119zM1461 496l30 222l-179 -128l-20 -228zM1273 329l-71 49l-8 -117q0 -5 -4 -8l-234 -187q-7 -5 -14 0l-98 83l7 -161
+q0 -5 -4 -8l-293 -234q-4 -2 -6 -2q-8 2 -8 3l-228 242q-4 4 -59 277q-2 7 5 11l61 37q-94 86 -95 92l-72 351q-2 7 6 12l94 45q-133 100 -135 108l-96 466q-2 10 7 13l433 135q5 0 8 -1l317 -153q6 -4 6 -9l20 -463q0 -7 -6 -10l-118 -61l126 -85q5 -2 5 -8l5 -123l121 74
+q5 4 11 0l84 -56l3 110q0 6 5 9l206 126q6 3 11 0l245 -135q4 -4 5 -7t-6.5 -60t-17.5 -124.5t-10 -70.5q0 -5 -4 -7l-191 -153q-6 -5 -13 0z" />
+ <glyph glyph-name="uniF2B9" unicode="&#xf2b9;" horiz-adv-x="1664"
+d="M1201 298q0 57 -5.5 107t-21 100.5t-39.5 86t-64 58t-91 22.5q-6 -4 -33.5 -20.5t-42.5 -24.5t-40.5 -20t-49 -17t-46.5 -5t-46.5 5t-49 17t-40.5 20t-42.5 24.5t-33.5 20.5q-51 0 -91 -22.5t-64 -58t-39.5 -86t-21 -100.5t-5.5 -107q0 -73 42 -121.5t103 -48.5h576
+q61 0 103 48.5t42 121.5zM1028 892q0 108 -76.5 184t-183.5 76t-183.5 -76t-76.5 -184q0 -107 76.5 -183t183.5 -76t183.5 76t76.5 183zM1664 352v-192q0 -14 -9 -23t-23 -9h-96v-224q0 -66 -47 -113t-113 -47h-1216q-66 0 -113 47t-47 113v1472q0 66 47 113t113 47h1216
+q66 0 113 -47t47 -113v-224h96q14 0 23 -9t9 -23v-192q0 -14 -9 -23t-23 -9h-96v-128h96q14 0 23 -9t9 -23v-192q0 -14 -9 -23t-23 -9h-96v-128h96q14 0 23 -9t9 -23z" />
+ <glyph glyph-name="uniF2BA" unicode="&#xf2ba;" horiz-adv-x="1664"
+d="M1028 892q0 -107 -76.5 -183t-183.5 -76t-183.5 76t-76.5 183q0 108 76.5 184t183.5 76t183.5 -76t76.5 -184zM980 672q46 0 82.5 -17t60 -47.5t39.5 -67t24 -81t11.5 -82.5t3.5 -79q0 -67 -39.5 -118.5t-105.5 -51.5h-576q-66 0 -105.5 51.5t-39.5 118.5q0 48 4.5 93.5
+t18.5 98.5t36.5 91.5t63 64.5t93.5 26h5q7 -4 32 -19.5t35.5 -21t33 -17t37 -16t35 -9t39.5 -4.5t39.5 4.5t35 9t37 16t33 17t35.5 21t32 19.5zM1664 928q0 -13 -9.5 -22.5t-22.5 -9.5h-96v-128h96q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-96v-128h96
+q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-96v-224q0 -66 -47 -113t-113 -47h-1216q-66 0 -113 47t-47 113v1472q0 66 47 113t113 47h1216q66 0 113 -47t47 -113v-224h96q13 0 22.5 -9.5t9.5 -22.5v-192zM1408 -96v1472q0 13 -9.5 22.5t-22.5 9.5h-1216
+q-13 0 -22.5 -9.5t-9.5 -22.5v-1472q0 -13 9.5 -22.5t22.5 -9.5h1216q13 0 22.5 9.5t9.5 22.5z" />
+ <glyph glyph-name="uniF2BB" unicode="&#xf2bb;" horiz-adv-x="2048"
+d="M1024 405q0 64 -9 117.5t-29.5 103t-60.5 78t-97 28.5q-6 -4 -30 -18t-37.5 -21.5t-35.5 -17.5t-43 -14.5t-42 -4.5t-42 4.5t-43 14.5t-35.5 17.5t-37.5 21.5t-30 18q-57 0 -97 -28.5t-60.5 -78t-29.5 -103t-9 -117.5t37 -106.5t91 -42.5h512q54 0 91 42.5t37 106.5z
+M867 925q0 94 -66.5 160.5t-160.5 66.5t-160.5 -66.5t-66.5 -160.5t66.5 -160.5t160.5 -66.5t160.5 66.5t66.5 160.5zM1792 416v64q0 14 -9 23t-23 9h-576q-14 0 -23 -9t-9 -23v-64q0 -14 9 -23t23 -9h576q14 0 23 9t9 23zM1792 676v56q0 15 -10.5 25.5t-25.5 10.5h-568
+q-15 0 -25.5 -10.5t-10.5 -25.5v-56q0 -15 10.5 -25.5t25.5 -10.5h568q15 0 25.5 10.5t10.5 25.5zM1792 928v64q0 14 -9 23t-23 9h-576q-14 0 -23 -9t-9 -23v-64q0 -14 9 -23t23 -9h576q14 0 23 9t9 23zM2048 1248v-1216q0 -66 -47 -113t-113 -47h-352v96q0 14 -9 23t-23 9
+h-64q-14 0 -23 -9t-9 -23v-96h-768v96q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-96h-352q-66 0 -113 47t-47 113v1216q0 66 47 113t113 47h1728q66 0 113 -47t47 -113z" />
+ <glyph glyph-name="uniF2BC" unicode="&#xf2bc;" horiz-adv-x="2048"
+d="M1024 405q0 -64 -37 -106.5t-91 -42.5h-512q-54 0 -91 42.5t-37 106.5t9 117.5t29.5 103t60.5 78t97 28.5q6 -4 30 -18t37.5 -21.5t35.5 -17.5t43 -14.5t42 -4.5t42 4.5t43 14.5t35.5 17.5t37.5 21.5t30 18q57 0 97 -28.5t60.5 -78t29.5 -103t9 -117.5zM867 925
+q0 -94 -66.5 -160.5t-160.5 -66.5t-160.5 66.5t-66.5 160.5t66.5 160.5t160.5 66.5t160.5 -66.5t66.5 -160.5zM1792 480v-64q0 -14 -9 -23t-23 -9h-576q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h576q14 0 23 -9t9 -23zM1792 732v-56q0 -15 -10.5 -25.5t-25.5 -10.5h-568
+q-15 0 -25.5 10.5t-10.5 25.5v56q0 15 10.5 25.5t25.5 10.5h568q15 0 25.5 -10.5t10.5 -25.5zM1792 992v-64q0 -14 -9 -23t-23 -9h-576q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h576q14 0 23 -9t9 -23zM1920 32v1216q0 13 -9.5 22.5t-22.5 9.5h-1728q-13 0 -22.5 -9.5
+t-9.5 -22.5v-1216q0 -13 9.5 -22.5t22.5 -9.5h352v96q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-96h768v96q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-96h352q13 0 22.5 9.5t9.5 22.5zM2048 1248v-1216q0 -66 -47 -113t-113 -47h-1728q-66 0 -113 47t-47 113v1216q0 66 47 113
+t113 47h1728q66 0 113 -47t47 -113z" />
+ <glyph glyph-name="uniF2BD" unicode="&#xf2bd;" horiz-adv-x="1792"
+d="M1523 197q-22 155 -87.5 257.5t-184.5 118.5q-67 -74 -159.5 -115.5t-195.5 -41.5t-195.5 41.5t-159.5 115.5q-119 -16 -184.5 -118.5t-87.5 -257.5q106 -150 271 -237.5t356 -87.5t356 87.5t271 237.5zM1280 896q0 159 -112.5 271.5t-271.5 112.5t-271.5 -112.5
+t-112.5 -271.5t112.5 -271.5t271.5 -112.5t271.5 112.5t112.5 271.5zM1792 640q0 -182 -71 -347.5t-190.5 -286t-285.5 -191.5t-349 -71q-182 0 -348 71t-286 191t-191 286t-71 348t71 348t191 286t286 191t348 71t348 -71t286 -191t191 -286t71 -348z" />
+ <glyph glyph-name="uniF2BE" unicode="&#xf2be;" horiz-adv-x="1792"
+d="M896 1536q182 0 348 -71t286 -191t191 -286t71 -348q0 -181 -70.5 -347t-190.5 -286t-286 -191.5t-349 -71.5t-349 71t-285.5 191.5t-190.5 286t-71 347.5t71 348t191 286t286 191t348 71zM1515 185q149 205 149 455q0 156 -61 298t-164 245t-245 164t-298 61t-298 -61
+t-245 -164t-164 -245t-61 -298q0 -250 149 -455q66 327 306 327q131 -128 313 -128t313 128q240 0 306 -327zM1280 832q0 159 -112.5 271.5t-271.5 112.5t-271.5 -112.5t-112.5 -271.5t112.5 -271.5t271.5 -112.5t271.5 112.5t112.5 271.5z" />
+ <glyph glyph-name="uniF2C0" unicode="&#xf2c0;"
+d="M1201 752q47 -14 89.5 -38t89 -73t79.5 -115.5t55 -172t22 -236.5q0 -154 -100 -263.5t-241 -109.5h-854q-141 0 -241 109.5t-100 263.5q0 131 22 236.5t55 172t79.5 115.5t89 73t89.5 38q-79 125 -79 272q0 104 40.5 198.5t109.5 163.5t163.5 109.5t198.5 40.5
+t198.5 -40.5t163.5 -109.5t109.5 -163.5t40.5 -198.5q0 -147 -79 -272zM768 1408q-159 0 -271.5 -112.5t-112.5 -271.5t112.5 -271.5t271.5 -112.5t271.5 112.5t112.5 271.5t-112.5 271.5t-271.5 112.5zM1195 -128q88 0 150.5 71.5t62.5 173.5q0 239 -78.5 377t-225.5 145
+q-145 -127 -336 -127t-336 127q-147 -7 -225.5 -145t-78.5 -377q0 -102 62.5 -173.5t150.5 -71.5h854z" />
+ <glyph glyph-name="uniF2C1" unicode="&#xf2c1;" horiz-adv-x="1280"
+d="M1024 278q0 -64 -37 -107t-91 -43h-512q-54 0 -91 43t-37 107t9 118t29.5 104t61 78.5t96.5 28.5q80 -75 188 -75t188 75q56 0 96.5 -28.5t61 -78.5t29.5 -104t9 -118zM870 797q0 -94 -67.5 -160.5t-162.5 -66.5t-162.5 66.5t-67.5 160.5t67.5 160.5t162.5 66.5
+t162.5 -66.5t67.5 -160.5zM1152 -96v1376h-1024v-1376q0 -13 9.5 -22.5t22.5 -9.5h960q13 0 22.5 9.5t9.5 22.5zM1280 1376v-1472q0 -66 -47 -113t-113 -47h-960q-66 0 -113 47t-47 113v1472q0 66 47 113t113 47h352v-96q0 -14 9 -23t23 -9h192q14 0 23 9t9 23v96h352
+q66 0 113 -47t47 -113z" />
+ <glyph glyph-name="uniF2C2" unicode="&#xf2c2;" horiz-adv-x="2048"
+d="M896 324q0 54 -7.5 100.5t-24.5 90t-51 68.5t-81 25q-64 -64 -156 -64t-156 64q-47 0 -81 -25t-51 -68.5t-24.5 -90t-7.5 -100.5q0 -55 31.5 -93.5t75.5 -38.5h426q44 0 75.5 38.5t31.5 93.5zM768 768q0 80 -56 136t-136 56t-136 -56t-56 -136t56 -136t136 -56t136 56
+t56 136zM1792 288v64q0 14 -9 23t-23 9h-704q-14 0 -23 -9t-9 -23v-64q0 -14 9 -23t23 -9h704q14 0 23 9t9 23zM1408 544v64q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-64q0 -14 9 -23t23 -9h320q14 0 23 9t9 23zM1792 544v64q0 14 -9 23t-23 9h-192q-14 0 -23 -9t-9 -23
+v-64q0 -14 9 -23t23 -9h192q14 0 23 9t9 23zM1792 800v64q0 14 -9 23t-23 9h-704q-14 0 -23 -9t-9 -23v-64q0 -14 9 -23t23 -9h704q14 0 23 9t9 23zM128 1152h1792v96q0 14 -9 23t-23 9h-1728q-14 0 -23 -9t-9 -23v-96zM2048 1248v-1216q0 -66 -47 -113t-113 -47h-1728
+q-66 0 -113 47t-47 113v1216q0 66 47 113t113 47h1728q66 0 113 -47t47 -113z" />
+ <glyph glyph-name="uniF2C3" unicode="&#xf2c3;" horiz-adv-x="2048"
+d="M896 324q0 -55 -31.5 -93.5t-75.5 -38.5h-426q-44 0 -75.5 38.5t-31.5 93.5q0 54 7.5 100.5t24.5 90t51 68.5t81 25q64 -64 156 -64t156 64q47 0 81 -25t51 -68.5t24.5 -90t7.5 -100.5zM768 768q0 -80 -56 -136t-136 -56t-136 56t-56 136t56 136t136 56t136 -56t56 -136z
+M1792 352v-64q0 -14 -9 -23t-23 -9h-704q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h704q14 0 23 -9t9 -23zM1408 608v-64q0 -14 -9 -23t-23 -9h-320q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h320q14 0 23 -9t9 -23zM1792 608v-64q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v64
+q0 14 9 23t23 9h192q14 0 23 -9t9 -23zM1792 864v-64q0 -14 -9 -23t-23 -9h-704q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h704q14 0 23 -9t9 -23zM1920 32v1120h-1792v-1120q0 -13 9.5 -22.5t22.5 -9.5h1728q13 0 22.5 9.5t9.5 22.5zM2048 1248v-1216q0 -66 -47 -113t-113 -47
+h-1728q-66 0 -113 47t-47 113v1216q0 66 47 113t113 47h1728q66 0 113 -47t47 -113z" />
+ <glyph glyph-name="uniF2C4" unicode="&#xf2c4;" horiz-adv-x="1792"
+d="M1255 749q0 318 -105 474.5t-330 156.5q-222 0 -326 -157t-104 -474q0 -316 104 -471.5t326 -155.5q74 0 131 17q-22 43 -39 73t-44 65t-53.5 56.5t-63 36t-77.5 14.5q-46 0 -79 -16l-49 97q105 91 276 91q132 0 215.5 -54t150.5 -155q67 149 67 402zM1645 117h117
+q3 -27 -2 -67t-26.5 -95t-58 -100.5t-107 -78t-162.5 -32.5q-71 0 -130.5 19t-105.5 56t-79 78t-66 96q-97 -27 -205 -27q-150 0 -292.5 58t-253 158.5t-178 249t-67.5 317.5q0 170 67.5 319.5t178.5 250.5t253.5 159t291.5 58q121 0 238.5 -36t217 -106t176 -164.5
+t119.5 -219t43 -261.5q0 -190 -80.5 -347.5t-218.5 -264.5q47 -70 93.5 -106.5t104.5 -36.5q61 0 94 37.5t38 85.5z" />
+ <glyph glyph-name="uniF2C5" unicode="&#xf2c5;" horiz-adv-x="2304"
+d="M453 -101q0 -21 -16 -37.5t-37 -16.5q-1 0 -13 3q-63 15 -162 140q-225 284 -225 676q0 341 213 614q39 51 95 103.5t94 52.5q19 0 35 -13.5t16 -32.5q0 -27 -63 -90q-98 -102 -147 -184q-119 -199 -119 -449q0 -281 123 -491q50 -85 136 -173q2 -3 14.5 -16t19.5 -21
+t17 -20.5t14.5 -23.5t4.5 -21zM1796 33q0 -29 -17.5 -48.5t-46.5 -19.5h-1081q-26 0 -45 19t-19 45q0 29 17.5 48.5t46.5 19.5h1081q26 0 45 -19t19 -45zM1581 644q0 -134 -67 -233q-25 -38 -69.5 -78.5t-83.5 -60.5q-16 -10 -27 -10q-7 0 -15 6t-8 12q0 9 19 30t42 46
+t42 67.5t19 88.5q0 76 -35 130q-29 42 -46 42q-3 0 -3 -5q0 -12 7.5 -35.5t7.5 -36.5q0 -22 -21.5 -35t-44.5 -13q-66 0 -66 76q0 15 1.5 44t1.5 44q0 25 -10 46q-13 25 -42 53.5t-51 28.5q-5 0 -7 -0.5t-3.5 -2.5t-1.5 -6q0 -2 16 -26t16 -54q0 -37 -19 -68t-46 -54
+t-53.5 -46t-45.5 -54t-19 -68q0 -98 42 -160q29 -43 79 -63q16 -5 17 -10q1 -2 1 -5q0 -16 -18 -16q-6 0 -33 11q-119 43 -195 139.5t-76 218.5q0 55 24.5 115.5t60 115t70.5 108.5t59.5 113.5t24.5 111.5q0 53 -25 94q-29 48 -56 64q-19 9 -19 21q0 20 41 20q50 0 110 -29
+q41 -19 71 -44.5t49.5 -51t33.5 -62.5t22 -69t16 -80q0 -1 3 -17.5t4.5 -25t5.5 -25t9 -27t11 -21.5t14.5 -16.5t18.5 -5.5q23 0 37 14t14 37q0 25 -20 67t-20 52t10 10q27 0 93 -70q72 -76 102.5 -156t30.5 -186zM2304 615q0 -274 -138 -503q-19 -32 -48 -72t-68 -86.5
+t-81 -77t-74 -30.5q-16 0 -31 15.5t-15 31.5q0 15 29 50.5t68.5 77t48.5 52.5q183 230 183 531q0 131 -20.5 235t-72.5 211q-58 119 -163 228q-2 3 -13 13.5t-16.5 16.5t-15 17.5t-15 20t-9.5 18.5t-4 19q0 19 16 35.5t35 16.5q70 0 196 -169q98 -131 146 -273t60 -314
+q2 -42 2 -64z" />
+ <glyph glyph-name="uniF2C6" unicode="&#xf2c6;" horiz-adv-x="1792"
+d="M1189 229l147 693q9 44 -10.5 63t-51.5 7l-864 -333q-29 -11 -39.5 -25t-2.5 -26.5t32 -19.5l221 -69l513 323q21 14 32 6q7 -5 -4 -15l-415 -375v0v0l-16 -228q23 0 45 22l108 104l224 -165q64 -36 81 38zM1792 640q0 -182 -71 -348t-191 -286t-286 -191t-348 -71
+t-348 71t-286 191t-191 286t-71 348t71 348t191 286t286 191t348 71t348 -71t286 -191t191 -286t71 -348z" />
+ <glyph glyph-name="uniF2C7" unicode="&#xf2c7;" horiz-adv-x="1024"
+d="M640 192q0 -80 -56 -136t-136 -56t-136 56t-56 136q0 60 35 110t93 71v907h128v-907q58 -21 93 -71t35 -110zM768 192q0 77 -34 144t-94 112v768q0 80 -56 136t-136 56t-136 -56t-56 -136v-768q-60 -45 -94 -112t-34 -144q0 -133 93.5 -226.5t226.5 -93.5t226.5 93.5
+t93.5 226.5zM896 192q0 -185 -131.5 -316.5t-316.5 -131.5t-316.5 131.5t-131.5 316.5q0 182 128 313v711q0 133 93.5 226.5t226.5 93.5t226.5 -93.5t93.5 -226.5v-711q128 -131 128 -313zM1024 768v-128h-192v128h192zM1024 1024v-128h-192v128h192zM1024 1280v-128h-192
+v128h192z" />
+ <glyph glyph-name="uniF2C8" unicode="&#xf2c8;" horiz-adv-x="1024"
+d="M640 192q0 -80 -56 -136t-136 -56t-136 56t-56 136q0 60 35 110t93 71v651h128v-651q58 -21 93 -71t35 -110zM768 192q0 77 -34 144t-94 112v768q0 80 -56 136t-136 56t-136 -56t-56 -136v-768q-60 -45 -94 -112t-34 -144q0 -133 93.5 -226.5t226.5 -93.5t226.5 93.5
+t93.5 226.5zM896 192q0 -185 -131.5 -316.5t-316.5 -131.5t-316.5 131.5t-131.5 316.5q0 182 128 313v711q0 133 93.5 226.5t226.5 93.5t226.5 -93.5t93.5 -226.5v-711q128 -131 128 -313zM1024 768v-128h-192v128h192zM1024 1024v-128h-192v128h192zM1024 1280v-128h-192
+v128h192z" />
+ <glyph glyph-name="uniF2C9" unicode="&#xf2c9;" horiz-adv-x="1024"
+d="M640 192q0 -80 -56 -136t-136 -56t-136 56t-56 136q0 60 35 110t93 71v395h128v-395q58 -21 93 -71t35 -110zM768 192q0 77 -34 144t-94 112v768q0 80 -56 136t-136 56t-136 -56t-56 -136v-768q-60 -45 -94 -112t-34 -144q0 -133 93.5 -226.5t226.5 -93.5t226.5 93.5
+t93.5 226.5zM896 192q0 -185 -131.5 -316.5t-316.5 -131.5t-316.5 131.5t-131.5 316.5q0 182 128 313v711q0 133 93.5 226.5t226.5 93.5t226.5 -93.5t93.5 -226.5v-711q128 -131 128 -313zM1024 768v-128h-192v128h192zM1024 1024v-128h-192v128h192zM1024 1280v-128h-192
+v128h192z" />
+ <glyph glyph-name="uniF2CA" unicode="&#xf2ca;" horiz-adv-x="1024"
+d="M640 192q0 -80 -56 -136t-136 -56t-136 56t-56 136q0 60 35 110t93 71v139h128v-139q58 -21 93 -71t35 -110zM768 192q0 77 -34 144t-94 112v768q0 80 -56 136t-136 56t-136 -56t-56 -136v-768q-60 -45 -94 -112t-34 -144q0 -133 93.5 -226.5t226.5 -93.5t226.5 93.5
+t93.5 226.5zM896 192q0 -185 -131.5 -316.5t-316.5 -131.5t-316.5 131.5t-131.5 316.5q0 182 128 313v711q0 133 93.5 226.5t226.5 93.5t226.5 -93.5t93.5 -226.5v-711q128 -131 128 -313zM1024 768v-128h-192v128h192zM1024 1024v-128h-192v128h192zM1024 1280v-128h-192
+v128h192z" />
+ <glyph glyph-name="uniF2CB" unicode="&#xf2cb;" horiz-adv-x="1024"
+d="M640 192q0 -80 -56 -136t-136 -56t-136 56t-56 136q0 79 56 135.5t136 56.5t136 -56.5t56 -135.5zM768 192q0 77 -34 144t-94 112v768q0 80 -56 136t-136 56t-136 -56t-56 -136v-768q-60 -45 -94 -112t-34 -144q0 -133 93.5 -226.5t226.5 -93.5t226.5 93.5t93.5 226.5z
+M896 192q0 -185 -131.5 -316.5t-316.5 -131.5t-316.5 131.5t-131.5 316.5q0 182 128 313v711q0 133 93.5 226.5t226.5 93.5t226.5 -93.5t93.5 -226.5v-711q128 -131 128 -313zM1024 768v-128h-192v128h192zM1024 1024v-128h-192v128h192zM1024 1280v-128h-192v128h192z" />
+ <glyph glyph-name="uniF2CC" unicode="&#xf2cc;" horiz-adv-x="1920"
+d="M1433 1287q10 -10 10 -23t-10 -23l-626 -626q-10 -10 -23 -10t-23 10l-82 82q-10 10 -10 23t10 23l44 44q-72 91 -81.5 207t46.5 215q-74 71 -176 71q-106 0 -181 -75t-75 -181v-1280h-256v1280q0 104 40.5 198.5t109.5 163.5t163.5 109.5t198.5 40.5q106 0 201 -41
+t166 -115q94 39 197 24.5t185 -79.5l44 44q10 10 23 10t23 -10zM1344 1024q26 0 45 -19t19 -45t-19 -45t-45 -19t-45 19t-19 45t19 45t45 19zM1600 896q-26 0 -45 19t-19 45t19 45t45 19t45 -19t19 -45t-19 -45t-45 -19zM1856 1024q26 0 45 -19t19 -45t-19 -45t-45 -19
+t-45 19t-19 45t19 45t45 19zM1216 896q26 0 45 -19t19 -45t-19 -45t-45 -19t-45 19t-19 45t19 45t45 19zM1408 832q0 26 19 45t45 19t45 -19t19 -45t-19 -45t-45 -19t-45 19t-19 45zM1728 896q26 0 45 -19t19 -45t-19 -45t-45 -19t-45 19t-19 45t19 45t45 19zM1088 768
+q26 0 45 -19t19 -45t-19 -45t-45 -19t-45 19t-19 45t19 45t45 19zM1344 640q-26 0 -45 19t-19 45t19 45t45 19t45 -19t19 -45t-19 -45t-45 -19zM1600 768q26 0 45 -19t19 -45t-19 -45t-45 -19t-45 19t-19 45t19 45t45 19zM1216 512q-26 0 -45 19t-19 45t19 45t45 19t45 -19
+t19 -45t-19 -45t-45 -19zM1472 640q26 0 45 -19t19 -45t-19 -45t-45 -19t-45 19t-19 45t19 45t45 19zM1088 512q26 0 45 -19t19 -45t-19 -45t-45 -19t-45 19t-19 45t19 45t45 19zM1344 512q26 0 45 -19t19 -45t-19 -45t-45 -19t-45 19t-19 45t19 45t45 19zM1216 384
+q26 0 45 -19t19 -45t-19 -45t-45 -19t-45 19t-19 45t19 45t45 19zM1088 256q26 0 45 -19t19 -45t-19 -45t-45 -19t-45 19t-19 45t19 45t45 19z" />
+ <glyph glyph-name="uniF2CD" unicode="&#xf2cd;" horiz-adv-x="1792"
+d="M1664 448v-192q0 -169 -128 -286v-194q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v118q-63 -22 -128 -22h-768q-65 0 -128 22v-110q0 -17 -9.5 -28.5t-22.5 -11.5h-64q-13 0 -22.5 11.5t-9.5 28.5v186q-128 117 -128 286v192h1536zM704 864q0 -14 -9 -23t-23 -9t-23 9
+t-9 23t9 23t23 9t23 -9t9 -23zM768 928q0 -14 -9 -23t-23 -9t-23 9t-9 23t9 23t23 9t23 -9t9 -23zM704 992q0 -14 -9 -23t-23 -9t-23 9t-9 23t9 23t23 9t23 -9t9 -23zM832 992q0 -14 -9 -23t-23 -9t-23 9t-9 23t9 23t23 9t23 -9t9 -23zM768 1056q0 -14 -9 -23t-23 -9t-23 9
+t-9 23t9 23t23 9t23 -9t9 -23zM704 1120q0 -14 -9 -23t-23 -9t-23 9t-9 23t9 23t23 9t23 -9t9 -23zM1792 608v-64q0 -14 -9 -23t-23 -9h-1728q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h96v640q0 106 75 181t181 75q108 0 184 -78q46 19 98 12t93 -39l22 22q11 11 22 0l42 -42
+q11 -11 0 -22l-314 -314q-11 -11 -22 0l-42 42q-11 11 0 22l22 22q-36 46 -40.5 104t23.5 108q-37 35 -88 35q-53 0 -90.5 -37.5t-37.5 -90.5v-640h1504q14 0 23 -9t9 -23zM896 1056q0 -14 -9 -23t-23 -9t-23 9t-9 23t9 23t23 9t23 -9t9 -23zM832 1120q0 -14 -9 -23t-23 -9
+t-23 9t-9 23t9 23t23 9t23 -9t9 -23zM768 1184q0 -14 -9 -23t-23 -9t-23 9t-9 23t9 23t23 9t23 -9t9 -23zM960 1120q0 -14 -9 -23t-23 -9t-23 9t-9 23t9 23t23 9t23 -9t9 -23zM896 1184q0 -14 -9 -23t-23 -9t-23 9t-9 23t9 23t23 9t23 -9t9 -23zM832 1248q0 -14 -9 -23
+t-23 -9t-23 9t-9 23t9 23t23 9t23 -9t9 -23zM1024 1184q0 -14 -9 -23t-23 -9t-23 9t-9 23t9 23t23 9t23 -9t9 -23zM960 1248q0 -14 -9 -23t-23 -9t-23 9t-9 23t9 23t23 9t23 -9t9 -23zM1088 1248q0 -14 -9 -23t-23 -9t-23 9t-9 23t9 23t23 9t23 -9t9 -23z" />
+ <glyph glyph-name="uniF2CE" unicode="&#xf2ce;"
+d="M994 344q0 -86 -17 -197q-31 -215 -55 -313q-22 -90 -152 -90t-152 90q-24 98 -55 313q-17 110 -17 197q0 168 224 168t224 -168zM1536 768q0 -240 -134 -434t-350 -280q-8 -3 -15 3t-6 15q7 48 10 66q4 32 6 47q1 9 9 12q159 81 255.5 234t96.5 337q0 180 -91 330.5
+t-247 234.5t-337 74q-124 -7 -237 -61t-193.5 -140.5t-128 -202t-46.5 -240.5q1 -184 99 -336.5t257 -231.5q7 -3 9 -12q3 -21 6 -45q1 -9 5 -32.5t6 -35.5q1 -9 -6.5 -15t-15.5 -2q-148 58 -261 169.5t-173.5 264t-52.5 319.5q7 143 66 273.5t154.5 227t225 157.5t272.5 70
+q164 10 315.5 -46.5t261 -160.5t175 -250.5t65.5 -308.5zM994 800q0 -93 -65.5 -158.5t-158.5 -65.5t-158.5 65.5t-65.5 158.5t65.5 158.5t158.5 65.5t158.5 -65.5t65.5 -158.5zM1282 768q0 -122 -53.5 -228.5t-146.5 -177.5q-8 -6 -16 -2t-10 14q-6 52 -29 92q-7 10 3 20
+q58 54 91 127t33 155q0 111 -58.5 204t-157.5 141.5t-212 36.5q-133 -15 -229 -113t-109 -231q-10 -92 23.5 -176t98.5 -144q10 -10 3 -20q-24 -41 -29 -93q-2 -9 -10 -13t-16 2q-95 74 -148.5 183t-51.5 234q3 131 69 244t177 181.5t241 74.5q144 7 268 -60t196.5 -187.5
+t72.5 -263.5z" />
+ <glyph glyph-name="uniF2D0" unicode="&#xf2d0;" horiz-adv-x="1792"
+d="M256 128h1280v768h-1280v-768zM1792 1248v-1216q0 -66 -47 -113t-113 -47h-1472q-66 0 -113 47t-47 113v1216q0 66 47 113t113 47h1472q66 0 113 -47t47 -113z" />
+ <glyph glyph-name="uniF2D1" unicode="&#xf2d1;" horiz-adv-x="1792"
+d="M1792 224v-192q0 -66 -47 -113t-113 -47h-1472q-66 0 -113 47t-47 113v192q0 66 47 113t113 47h1472q66 0 113 -47t47 -113z" />
+ <glyph glyph-name="uniF2D2" unicode="&#xf2d2;" horiz-adv-x="2048"
+d="M256 0h768v512h-768v-512zM1280 512h512v768h-768v-256h96q66 0 113 -47t47 -113v-352zM2048 1376v-960q0 -66 -47 -113t-113 -47h-608v-352q0 -66 -47 -113t-113 -47h-960q-66 0 -113 47t-47 113v960q0 66 47 113t113 47h608v352q0 66 47 113t113 47h960q66 0 113 -47
+t47 -113z" />
+ <glyph glyph-name="uniF2D3" unicode="&#xf2d3;" horiz-adv-x="1792"
+d="M1175 215l146 146q10 10 10 23t-10 23l-233 233l233 233q10 10 10 23t-10 23l-146 146q-10 10 -23 10t-23 -10l-233 -233l-233 233q-10 10 -23 10t-23 -10l-146 -146q-10 -10 -10 -23t10 -23l233 -233l-233 -233q-10 -10 -10 -23t10 -23l146 -146q10 -10 23 -10t23 10
+l233 233l233 -233q10 -10 23 -10t23 10zM1792 1248v-1216q0 -66 -47 -113t-113 -47h-1472q-66 0 -113 47t-47 113v1216q0 66 47 113t113 47h1472q66 0 113 -47t47 -113z" />
+ <glyph glyph-name="uniF2D4" unicode="&#xf2d4;" horiz-adv-x="1792"
+d="M1257 425l-146 -146q-10 -10 -23 -10t-23 10l-169 169l-169 -169q-10 -10 -23 -10t-23 10l-146 146q-10 10 -10 23t10 23l169 169l-169 169q-10 10 -10 23t10 23l146 146q10 10 23 10t23 -10l169 -169l169 169q10 10 23 10t23 -10l146 -146q10 -10 10 -23t-10 -23
+l-169 -169l169 -169q10 -10 10 -23t-10 -23zM256 128h1280v1024h-1280v-1024zM1792 1248v-1216q0 -66 -47 -113t-113 -47h-1472q-66 0 -113 47t-47 113v1216q0 66 47 113t113 47h1472q66 0 113 -47t47 -113z" />
+ <glyph glyph-name="uniF2D5" unicode="&#xf2d5;" horiz-adv-x="1792"
+d="M1070 358l306 564h-654l-306 -564h654zM1792 640q0 -182 -71 -348t-191 -286t-286 -191t-348 -71t-348 71t-286 191t-191 286t-71 348t71 348t191 286t286 191t348 71t348 -71t286 -191t191 -286t71 -348z" />
+ <glyph glyph-name="uniF2D6" unicode="&#xf2d6;" horiz-adv-x="1794"
+d="M1291 1060q-15 17 -35 8.5t-26 -28.5t5 -38q14 -17 40 -14.5t34 20.5t-18 52zM895 814q-8 -8 -19.5 -8t-18.5 8q-8 8 -8 19t8 18q7 8 18.5 8t19.5 -8q7 -7 7 -18t-7 -19zM1060 740l-35 -35q-12 -13 -29.5 -13t-30.5 13l-38 38q-12 13 -12 30t12 30l35 35q12 12 29.5 12
+t30.5 -12l38 -39q12 -12 12 -29.5t-12 -29.5zM951 870q-7 -8 -18.5 -8t-19.5 8q-7 8 -7 19t7 19q8 8 19 8t19 -8t8 -19t-8 -19zM1354 968q-34 -64 -107.5 -85.5t-127.5 16.5q-38 28 -61 66.5t-21 87.5t39 92t75.5 53t70.5 -5t70 -51q2 -2 13 -12.5t14.5 -13.5t13 -13.5
+t12.5 -15.5t10 -15.5t8.5 -18t4 -18.5t1 -21t-5 -22t-9.5 -24zM1555 486q3 20 -8.5 34.5t-27.5 21.5t-33 17t-23 20q-40 71 -84 98.5t-113 11.5q19 13 40 18.5t33 4.5l12 -1q2 45 -34 90q6 20 6.5 40.5t-2.5 30.5l-3 10q43 24 71 65t34 91q10 84 -43 150.5t-137 76.5
+q-60 7 -114 -18.5t-82 -74.5q-30 -51 -33.5 -101t14.5 -87t43.5 -64t56.5 -42q-45 4 -88 36t-57 88q-28 108 32 222q-16 21 -29 32q-50 0 -89 -19q19 24 42 37t36 14l13 1q0 50 -13 78q-10 21 -32.5 28.5t-47 -3.5t-37.5 -40q2 4 4 7q-7 -28 -6.5 -75.5t19 -117t48.5 -122.5
+q-25 -14 -47 -36q-35 -16 -85.5 -70.5t-84.5 -101.5l-33 -46q-90 -34 -181 -125.5t-75 -162.5q1 -16 11 -27q-15 -12 -30 -30q-21 -25 -21 -54t21.5 -40t63.5 6q41 19 77 49.5t55 60.5q-2 2 -6.5 5t-20.5 7.5t-33 3.5q23 5 51 12.5t40 10t27.5 6t26 4t23.5 0.5q14 -7 22 34
+q7 37 7 90q0 102 -40 150q106 -103 101 -219q-1 -29 -15 -50t-27 -27l-13 -6q-4 -7 -19 -32t-26 -45.5t-26.5 -52t-25 -61t-17 -63t-6.5 -66.5t10 -63q-35 54 -37 80q-22 -24 -34.5 -39t-33.5 -42t-30.5 -46t-16.5 -41t-0.5 -38t25.5 -27q45 -25 144 64t190.5 221.5
+t122.5 228.5q86 52 145 115.5t86 119.5q47 -93 154 -178q104 -83 167 -80q39 2 46 43zM1794 640q0 -182 -71 -348t-191 -286t-286.5 -191t-348.5 -71t-348.5 71t-286.5 191t-191 286t-71 348t71 348t191 286t286.5 191t348.5 71t348.5 -71t286.5 -191t191 -286t71 -348z" />
+ <glyph glyph-name="uniF2D7" unicode="&#xf2d7;"
+d="M518 1353v-655q103 -1 191.5 1.5t125.5 5.5l37 3q68 2 90.5 24.5t39.5 94.5l33 142h103l-14 -322l7 -319h-103l-29 127q-15 68 -45 93t-84 26q-87 8 -352 8v-556q0 -78 43.5 -115.5t133.5 -37.5h357q35 0 59.5 2t55 7.5t54 18t48.5 32t46 50.5t39 73l93 216h89
+q-6 -37 -31.5 -252t-30.5 -276q-146 5 -263.5 8t-162.5 4h-44h-628l-376 -12v102l127 25q67 13 91.5 37t25.5 79l8 643q3 402 -8 645q-2 61 -25.5 84t-91.5 36l-127 24v102l376 -12h702q139 0 374 27q-6 -68 -14 -194.5t-12 -219.5l-5 -92h-93l-32 124q-31 121 -74 179.5
+t-113 58.5h-548q-28 0 -35.5 -8.5t-7.5 -30.5z" />
+ <glyph glyph-name="uniF2D8" unicode="&#xf2d8;"
+d="M922 739v-182q0 -4 0.5 -15t0 -15l-1.5 -12t-3.5 -11.5t-6.5 -7.5t-11 -5.5t-16 -1.5v309q9 0 16 -1t11 -5t6.5 -5.5t3.5 -9.5t1 -10.5v-13.5v-14zM1238 643v-121q0 -1 0.5 -12.5t0 -15.5t-2.5 -11.5t-7.5 -10.5t-13.5 -3q-9 0 -14 9q-4 10 -4 165v7v8.5v9t1.5 8.5l3.5 7
+t5 5.5t8 1.5q6 0 10 -1.5t6.5 -4.5t4 -6t2 -8.5t0.5 -8v-9.5v-9zM180 407h122v472h-122v-472zM614 407h106v472h-159l-28 -221q-20 148 -32 221h-158v-472h107v312l45 -312h76l43 319v-319zM1039 712q0 67 -5 90q-3 16 -11 28.5t-17 20.5t-25 14t-26.5 8.5t-31 4t-29 1.5
+h-29.5h-12h-91v-472h56q169 -1 197 24.5t25 180.5q-1 62 -1 100zM1356 515v133q0 29 -2 45t-9.5 33.5t-24.5 25t-46 7.5q-46 0 -77 -34v154h-117v-472h110l7 30q30 -36 77 -36q50 0 66 30.5t16 83.5zM1536 1248v-1216q0 -66 -47 -113t-113 -47h-1216q-66 0 -113 47t-47 113
+v1216q0 66 47 113t113 47h1216q66 0 113 -47t47 -113z" />
+ <glyph glyph-name="uniF2D9" unicode="&#xf2d9;" horiz-adv-x="2176"
+d="M1143 -197q-6 1 -11 4q-13 8 -36 23t-86 65t-116.5 104.5t-112 140t-89.5 172.5q-17 3 -175 37q66 -213 235 -362t391 -184zM502 409l168 -28q-25 76 -41 167.5t-19 145.5l-4 53q-84 -82 -121 -224q5 -65 17 -114zM612 1018q-43 -64 -77 -148q44 46 74 68zM2049 584
+q0 161 -62 307t-167.5 252t-250.5 168.5t-304 62.5q-147 0 -281 -52.5t-240 -148.5q-30 -58 -45 -160q60 51 143 83.5t158.5 43t143 13.5t108.5 -1l40 -3q33 -1 53 -15.5t24.5 -33t6.5 -37t-1 -28.5q-126 11 -227.5 0.5t-183 -43.5t-142.5 -71.5t-131 -98.5
+q4 -36 11.5 -92.5t35.5 -178t62 -179.5q123 -6 247.5 14.5t214.5 53.5t162.5 67t109.5 59l37 24q22 16 39.5 20.5t30.5 -5t17 -34.5q14 -97 -39 -121q-208 -97 -467 -134q-135 -20 -317 -16q41 -96 110 -176.5t137 -127t130.5 -79t101.5 -43.5l39 -12q143 -23 263 15
+q195 99 314 289t119 418zM2123 621q-14 -135 -40 -212q-70 -208 -181.5 -346.5t-318.5 -253.5q-48 -33 -82 -44q-72 -26 -163 -16q-36 -3 -73 -3q-283 0 -504.5 173t-295.5 442q-1 0 -4 0.5t-5 0.5q-6 -50 2.5 -112.5t26 -115t36 -98t31.5 -71.5l14 -26q8 -12 54 -82
+q-71 38 -124.5 106.5t-78.5 140t-39.5 137t-17.5 107.5l-2 42q-5 2 -33.5 12.5t-48.5 18t-53 20.5t-57.5 25t-50 25.5t-42.5 27t-25 25.5q19 -10 50.5 -25.5t113 -45.5t145.5 -38l2 32q11 149 94 290q41 202 176 365q28 115 81 214q15 28 32 45t49 32q158 74 303.5 104
+t302 11t306.5 -97q220 -115 333 -336t87 -474z" />
+ <glyph glyph-name="uniF2DA" unicode="&#xf2da;" horiz-adv-x="1792"
+d="M1341 752q29 44 -6.5 129.5t-121.5 142.5q-58 39 -125.5 53.5t-118 4.5t-68.5 -37q-12 -23 -4.5 -28t42.5 -10q23 -3 38.5 -5t44.5 -9.5t56 -17.5q36 -13 67.5 -31.5t53 -37t40 -38.5t30.5 -38t22 -34.5t16.5 -28.5t12 -18.5t10.5 -6t11 9.5zM1704 178
+q-52 -127 -148.5 -220t-214.5 -141.5t-253 -60.5t-266 13.5t-251 91t-210 161.5t-141.5 235.5t-46.5 303.5q1 41 8.5 84.5t12.5 64t24 80.5t23 73q-51 -208 1 -397t173 -318t291 -206t346 -83t349 74.5t289 244.5q20 27 18 14q0 -4 -4 -14zM1465 627q0 -104 -40.5 -199
+t-108.5 -164t-162 -109.5t-198 -40.5t-198 40.5t-162 109.5t-108.5 164t-40.5 199t40.5 199t108.5 164t162 109.5t198 40.5t198 -40.5t162 -109.5t108.5 -164t40.5 -199zM1752 915q-65 147 -180.5 251t-253 153.5t-292 53.5t-301 -36.5t-275.5 -129t-220 -211.5t-131 -297
+t-10 -373q-49 161 -51.5 311.5t35.5 272.5t109 227t165.5 180.5t207 126t232 71t242.5 9t236 -54t216 -124.5t178 -197q33 -50 62 -121t31 -112zM1690 573q12 244 -136.5 416t-396.5 240q-8 0 -10 5t24 8q125 -4 230 -50t173 -120t116 -168.5t58.5 -199t-1 -208
+t-61.5 -197.5t-122.5 -167t-185 -117.5t-248.5 -46.5q108 30 201.5 80t174 123t129.5 176.5t55 225.5z" />
+ <glyph glyph-name="uniF2DB" unicode="&#xf2db;"
+d="M192 256v-128h-112q-16 0 -16 16v16h-48q-16 0 -16 16v32q0 16 16 16h48v16q0 16 16 16h112zM192 512v-128h-112q-16 0 -16 16v16h-48q-16 0 -16 16v32q0 16 16 16h48v16q0 16 16 16h112zM192 768v-128h-112q-16 0 -16 16v16h-48q-16 0 -16 16v32q0 16 16 16h48v16
+q0 16 16 16h112zM192 1024v-128h-112q-16 0 -16 16v16h-48q-16 0 -16 16v32q0 16 16 16h48v16q0 16 16 16h112zM192 1280v-128h-112q-16 0 -16 16v16h-48q-16 0 -16 16v32q0 16 16 16h48v16q0 16 16 16h112zM1280 1440v-1472q0 -40 -28 -68t-68 -28h-832q-40 0 -68 28
+t-28 68v1472q0 40 28 68t68 28h832q40 0 68 -28t28 -68zM1536 208v-32q0 -16 -16 -16h-48v-16q0 -16 -16 -16h-112v128h112q16 0 16 -16v-16h48q16 0 16 -16zM1536 464v-32q0 -16 -16 -16h-48v-16q0 -16 -16 -16h-112v128h112q16 0 16 -16v-16h48q16 0 16 -16zM1536 720v-32
+q0 -16 -16 -16h-48v-16q0 -16 -16 -16h-112v128h112q16 0 16 -16v-16h48q16 0 16 -16zM1536 976v-32q0 -16 -16 -16h-48v-16q0 -16 -16 -16h-112v128h112q16 0 16 -16v-16h48q16 0 16 -16zM1536 1232v-32q0 -16 -16 -16h-48v-16q0 -16 -16 -16h-112v128h112q16 0 16 -16v-16
+h48q16 0 16 -16z" />
+ <glyph glyph-name="uniF2DC" unicode="&#xf2dc;" horiz-adv-x="1664"
+d="M1566 419l-167 -33l186 -107q23 -13 29.5 -38.5t-6.5 -48.5q-14 -23 -39 -29.5t-48 6.5l-186 106l55 -160q13 -38 -12 -63.5t-60.5 -20.5t-48.5 42l-102 300l-271 156v-313l208 -238q16 -18 17 -39t-11 -36.5t-28.5 -25t-37 -5.5t-36.5 22l-112 128v-214q0 -26 -19 -45
+t-45 -19t-45 19t-19 45v214l-112 -128q-16 -18 -36.5 -22t-37 5.5t-28.5 25t-11 36.5t17 39l208 238v313l-271 -156l-102 -300q-13 -37 -48.5 -42t-60.5 20.5t-12 63.5l55 160l-186 -106q-23 -13 -48 -6.5t-39 29.5q-13 23 -6.5 48.5t29.5 38.5l186 107l-167 33
+q-29 6 -42 29t-8.5 46.5t25.5 40t50 10.5l310 -62l271 157l-271 157l-310 -62q-4 -1 -13 -1q-27 0 -44 18t-19 40t11 43t40 26l167 33l-186 107q-23 13 -29.5 38.5t6.5 48.5t39 30t48 -7l186 -106l-55 160q-13 38 12 63.5t60.5 20.5t48.5 -42l102 -300l271 -156v313
+l-208 238q-16 18 -17 39t11 36.5t28.5 25t37 5.5t36.5 -22l112 -128v214q0 26 19 45t45 19t45 -19t19 -45v-214l112 128q16 18 36.5 22t37 -5.5t28.5 -25t11 -36.5t-17 -39l-208 -238v-313l271 156l102 300q13 37 48.5 42t60.5 -20.5t12 -63.5l-55 -160l186 106
+q23 13 48 6.5t39 -29.5q13 -23 6.5 -48.5t-29.5 -38.5l-186 -107l167 -33q27 -5 40 -26t11 -43t-19 -40t-44 -18q-9 0 -13 1l-310 62l-271 -157l271 -157l310 62q29 6 50 -10.5t25.5 -40t-8.5 -46.5t-42 -29z" />
+ <glyph glyph-name="uniF2DD" unicode="&#xf2dd;" horiz-adv-x="1792"
+d="M1473 607q7 118 -33 226.5t-113 189t-177 131t-221 57.5q-116 7 -225.5 -32t-192 -110.5t-135 -175t-59.5 -220.5q-7 -118 33 -226.5t113 -189t177.5 -131t221.5 -57.5q155 -9 293 59t224 195.5t94 283.5zM1792 1536l-349 -348q120 -117 180.5 -272t50.5 -321
+q-11 -183 -102 -339t-241 -255.5t-332 -124.5l-999 -132l347 347q-120 116 -180.5 271.5t-50.5 321.5q11 184 102 340t241.5 255.5t332.5 124.5q167 22 500 66t500 66z" />
+ <glyph glyph-name="uniF2DE" unicode="&#xf2de;" horiz-adv-x="1792"
+d="M948 508l163 -329h-51l-175 350l-171 -350h-49l179 374l-78 33l21 49l240 -102l-21 -50zM563 1100l304 -130l-130 -304l-304 130zM907 915l240 -103l-103 -239l-239 102zM1188 765l191 -81l-82 -190l-190 81zM1680 640q0 159 -62 304t-167.5 250.5t-250.5 167.5t-304 62
+t-304 -62t-250.5 -167.5t-167.5 -250.5t-62 -304t62 -304t167.5 -250.5t250.5 -167.5t304 -62t304 62t250.5 167.5t167.5 250.5t62 304zM1792 640q0 -182 -71 -348t-191 -286t-286 -191t-348 -71t-348 71t-286 191t-191 286t-71 348t71 348t191 286t286 191t348 71t348 -71
+t286 -191t191 -286t71 -348z" />
+ <glyph glyph-name="uniF2E0" unicode="&#xf2e0;" horiz-adv-x="1920"
+d="M1334 302q-4 24 -27.5 34t-49.5 10.5t-48.5 12.5t-25.5 38q-5 47 33 139.5t75 181t32 127.5q-14 101 -117 103q-45 1 -75 -16l-3 -2l-5 -2.5t-4.5 -2t-5 -2t-5 -0.5t-6 1.5t-6 3.5t-6.5 5q-3 2 -9 8.5t-9 9t-8.5 7.5t-9.5 7.5t-9.5 5.5t-11 4.5t-11.5 2.5q-30 5 -48 -3
+t-45 -31q-1 -1 -9 -8.5t-12.5 -11t-15 -10t-16.5 -5.5t-17 3q-54 27 -84 40q-41 18 -94 -5t-76 -65q-16 -28 -41 -98.5t-43.5 -132.5t-40 -134t-21.5 -73q-22 -69 18.5 -119t110.5 -46q30 2 50.5 15t38.5 46q7 13 79 199.5t77 194.5q6 11 21.5 18t29.5 0q27 -15 21 -53
+q-2 -18 -51 -139.5t-50 -132.5q-6 -38 19.5 -56.5t60.5 -7t55 49.5q4 8 45.5 92t81.5 163.5t46 88.5q20 29 41 28q29 0 25 -38q-2 -16 -65.5 -147.5t-70.5 -159.5q-12 -53 13 -103t74 -74q17 -9 51 -15.5t71.5 -8t62.5 14t20 48.5zM383 86q3 -15 -5 -27.5t-23 -15.5
+q-14 -3 -26.5 5t-15.5 23q-3 14 5 27t22 16t27 -5t16 -23zM953 -177q12 -17 8.5 -37.5t-20.5 -32.5t-37.5 -8t-32.5 21q-11 17 -7.5 37.5t20.5 32.5t37.5 8t31.5 -21zM177 635q-18 -27 -49.5 -33t-57.5 13q-26 18 -32 50t12 58q18 27 49.5 33t57.5 -12q26 -19 32 -50.5
+t-12 -58.5zM1467 -42q19 -28 13 -61.5t-34 -52.5t-60.5 -13t-51.5 34t-13 61t33 53q28 19 60.5 13t52.5 -34zM1579 562q69 -113 42.5 -244.5t-134.5 -207.5q-90 -63 -199 -60q-20 -80 -84.5 -127t-143.5 -44.5t-140 57.5q-12 -9 -13 -10q-103 -71 -225 -48.5t-193 126.5
+q-50 73 -53 164q-83 14 -142.5 70.5t-80.5 128t-2 152t81 138.5q-36 60 -38 128t24.5 125t79.5 98.5t121 50.5q32 85 99 148t146.5 91.5t168 17t159.5 -66.5q72 21 140 17.5t128.5 -36t104.5 -80t67.5 -115t17.5 -140.5q52 -16 87 -57t45.5 -89t-5.5 -99.5t-58 -87.5z
+M455 1222q14 -20 9.5 -44.5t-24.5 -38.5q-19 -14 -43.5 -9.5t-37.5 24.5q-14 20 -9.5 44.5t24.5 38.5q19 14 43.5 9.5t37.5 -24.5zM614 1503q4 -16 -5 -30.5t-26 -18.5t-31 5.5t-18 26.5q-3 17 6.5 31t25.5 18q17 4 31 -5.5t17 -26.5zM1800 555q4 -20 -6.5 -37t-30.5 -21
+q-19 -4 -36 6.5t-21 30.5t6.5 37t30.5 22q20 4 36.5 -7.5t20.5 -30.5zM1136 1448q16 -27 8.5 -58.5t-35.5 -47.5q-27 -16 -57.5 -8.5t-46.5 34.5q-16 28 -8.5 59t34.5 48t58 9t47 -36zM1882 792q4 -15 -4 -27.5t-23 -16.5q-15 -3 -27.5 5.5t-15.5 22.5q-3 15 5 28t23 16
+q14 3 26.5 -5t15.5 -23zM1691 1033q15 -22 10.5 -49t-26.5 -43q-22 -15 -49 -10t-42 27t-10 49t27 43t48.5 11t41.5 -28z" />
+ <glyph glyph-name="uniF2E1" unicode="&#xf2e1;" horiz-adv-x="1792"
+ />
+ <glyph glyph-name="uniF2E2" unicode="&#xf2e2;" horiz-adv-x="1792"
+ />
+ <glyph glyph-name="uniF2E3" unicode="&#xf2e3;" horiz-adv-x="1792"
+ />
+ <glyph glyph-name="uniF2E4" unicode="&#xf2e4;" horiz-adv-x="1792"
+ />
+ <glyph glyph-name="uniF2E5" unicode="&#xf2e5;" horiz-adv-x="1792"
+ />
+ <glyph glyph-name="uniF2E6" unicode="&#xf2e6;" horiz-adv-x="1792"
+ />
+ <glyph glyph-name="uniF2E7" unicode="&#xf2e7;" horiz-adv-x="1792"
+ />
+ <glyph glyph-name="_698" unicode="&#xf2e8;" horiz-adv-x="1792"
+ />
+ <glyph glyph-name="uniF2E9" unicode="&#xf2e9;" horiz-adv-x="1792"
+ />
+ <glyph glyph-name="uniF2EA" unicode="&#xf2ea;" horiz-adv-x="1792"
+ />
+ <glyph glyph-name="uniF2EB" unicode="&#xf2eb;" horiz-adv-x="1792"
+ />
+ <glyph glyph-name="uniF2EC" unicode="&#xf2ec;" horiz-adv-x="1792"
+ />
+ <glyph glyph-name="uniF2ED" unicode="&#xf2ed;" horiz-adv-x="1792"
+ />
+ <glyph glyph-name="uniF2EE" unicode="&#xf2ee;" horiz-adv-x="1792"
+ />
+ <glyph glyph-name="lessequal" unicode="&#xf500;" horiz-adv-x="1792"
+ />
+ </font>
+</defs></svg>
diff --git a/docs/docsite/_themes/sphinx_rtd_theme/static/fonts/fontawesome-webfont.ttf b/docs/docsite/_themes/sphinx_rtd_theme/static/fonts/fontawesome-webfont.ttf
new file mode 100644
index 00000000..35acda2f
--- /dev/null
+++ b/docs/docsite/_themes/sphinx_rtd_theme/static/fonts/fontawesome-webfont.ttf
Binary files differ
diff --git a/docs/docsite/_themes/sphinx_rtd_theme/static/fonts/fontawesome-webfont.woff b/docs/docsite/_themes/sphinx_rtd_theme/static/fonts/fontawesome-webfont.woff
new file mode 100644
index 00000000..400014a4
--- /dev/null
+++ b/docs/docsite/_themes/sphinx_rtd_theme/static/fonts/fontawesome-webfont.woff
Binary files differ
diff --git a/docs/docsite/_themes/sphinx_rtd_theme/static/fonts/fontawesome-webfont.woff2 b/docs/docsite/_themes/sphinx_rtd_theme/static/fonts/fontawesome-webfont.woff2
new file mode 100644
index 00000000..4d13fc60
--- /dev/null
+++ b/docs/docsite/_themes/sphinx_rtd_theme/static/fonts/fontawesome-webfont.woff2
Binary files differ
diff --git a/docs/docsite/_themes/sphinx_rtd_theme/static/images/logo_invert.png b/docs/docsite/_themes/sphinx_rtd_theme/static/images/logo_invert.png
new file mode 100644
index 00000000..ea565b75
--- /dev/null
+++ b/docs/docsite/_themes/sphinx_rtd_theme/static/images/logo_invert.png
Binary files differ
diff --git a/docs/docsite/_themes/sphinx_rtd_theme/static/js/modernizr.min.js b/docs/docsite/_themes/sphinx_rtd_theme/static/js/modernizr.min.js
new file mode 100644
index 00000000..f65d4797
--- /dev/null
+++ b/docs/docsite/_themes/sphinx_rtd_theme/static/js/modernizr.min.js
@@ -0,0 +1,4 @@
+/* Modernizr 2.6.2 (Custom Build) | MIT & BSD
+ * Build: http://modernizr.com/download/#-fontface-backgroundsize-borderimage-borderradius-boxshadow-flexbox-hsla-multiplebgs-opacity-rgba-textshadow-cssanimations-csscolumns-generatedcontent-cssgradients-cssreflections-csstransforms-csstransforms3d-csstransitions-applicationcache-canvas-canvastext-draganddrop-hashchange-history-audio-video-indexeddb-input-inputtypes-localstorage-postmessage-sessionstorage-websockets-websqldatabase-webworkers-geolocation-inlinesvg-smil-svg-svgclippaths-touch-webgl-shiv-mq-cssclasses-addtest-prefixed-teststyles-testprop-testallprops-hasevent-prefixes-domprefixes-load
+ */
+;window.Modernizr=function(a,b,c){function D(a){j.cssText=a}function E(a,b){return D(n.join(a+";")+(b||""))}function F(a,b){return typeof a===b}function G(a,b){return!!~(""+a).indexOf(b)}function H(a,b){for(var d in a){var e=a[d];if(!G(e,"-")&&j[e]!==c)return b=="pfx"?e:!0}return!1}function I(a,b,d){for(var e in a){var f=b[a[e]];if(f!==c)return d===!1?a[e]:F(f,"function")?f.bind(d||b):f}return!1}function J(a,b,c){var d=a.charAt(0).toUpperCase()+a.slice(1),e=(a+" "+p.join(d+" ")+d).split(" ");return F(b,"string")||F(b,"undefined")?H(e,b):(e=(a+" "+q.join(d+" ")+d).split(" "),I(e,b,c))}function K(){e.input=function(c){for(var d=0,e=c.length;d<e;d++)u[c[d]]=c[d]in k;return u.list&&(u.list=!!b.createElement("datalist")&&!!a.HTMLDataListElement),u}("autocomplete autofocus list placeholder max min multiple pattern required step".split(" ")),e.inputtypes=function(a){for(var d=0,e,f,h,i=a.length;d<i;d++)k.setAttribute("type",f=a[d]),e=k.type!=="text",e&&(k.value=l,k.style.cssText="position:absolute;visibility:hidden;",/^range$/.test(f)&&k.style.WebkitAppearance!==c?(g.appendChild(k),h=b.defaultView,e=h.getComputedStyle&&h.getComputedStyle(k,null).WebkitAppearance!=="textfield"&&k.offsetHeight!==0,g.removeChild(k)):/^(search|tel)$/.test(f)||(/^(url|email)$/.test(f)?e=k.checkValidity&&k.checkValidity()===!1:e=k.value!=l)),t[a[d]]=!!e;return t}("search tel url email datetime date month week time datetime-local number range color".split(" "))}var d="2.6.2",e={},f=!0,g=b.documentElement,h="modernizr",i=b.createElement(h),j=i.style,k=b.createElement("input"),l=":)",m={}.toString,n=" -webkit- -moz- -o- -ms- ".split(" "),o="Webkit Moz O ms",p=o.split(" "),q=o.toLowerCase().split(" "),r={svg:"http://www.w3.org/2000/svg"},s={},t={},u={},v=[],w=v.slice,x,y=function(a,c,d,e){var f,i,j,k,l=b.createElement("div"),m=b.body,n=m||b.createElement("body");if(parseInt(d,10))while(d--)j=b.createElement("div"),j.id=e?e[d]:h+(d+1),l.appendChild(j);return f=["&#173;",'<style id="s',h,'">',a,"</style>"].join(""),l.id=h,(m?l:n).innerHTML+=f,n.appendChild(l),m||(n.style.background="",n.style.overflow="hidden",k=g.style.overflow,g.style.overflow="hidden",g.appendChild(n)),i=c(l,a),m?l.parentNode.removeChild(l):(n.parentNode.removeChild(n),g.style.overflow=k),!!i},z=function(b){var c=a.matchMedia||a.msMatchMedia;if(c)return c(b).matches;var d;return y("@media "+b+" { #"+h+" { position: absolute; } }",function(b){d=(a.getComputedStyle?getComputedStyle(b,null):b.currentStyle)["position"]=="absolute"}),d},A=function(){function d(d,e){e=e||b.createElement(a[d]||"div"),d="on"+d;var f=d in e;return f||(e.setAttribute||(e=b.createElement("div")),e.setAttribute&&e.removeAttribute&&(e.setAttribute(d,""),f=F(e[d],"function"),F(e[d],"undefined")||(e[d]=c),e.removeAttribute(d))),e=null,f}var a={select:"input",change:"input",submit:"form",reset:"form",error:"img",load:"img",abort:"img"};return d}(),B={}.hasOwnProperty,C;!F(B,"undefined")&&!F(B.call,"undefined")?C=function(a,b){return B.call(a,b)}:C=function(a,b){return b in a&&F(a.constructor.prototype[b],"undefined")},Function.prototype.bind||(Function.prototype.bind=function(b){var c=this;if(typeof c!="function")throw new TypeError;var d=w.call(arguments,1),e=function(){if(this instanceof e){var a=function(){};a.prototype=c.prototype;var f=new a,g=c.apply(f,d.concat(w.call(arguments)));return Object(g)===g?g:f}return c.apply(b,d.concat(w.call(arguments)))};return e}),s.flexbox=function(){return J("flexWrap")},s.canvas=function(){var a=b.createElement("canvas");return!!a.getContext&&!!a.getContext("2d")},s.canvastext=function(){return!!e.canvas&&!!F(b.createElement("canvas").getContext("2d").fillText,"function")},s.webgl=function(){return!!a.WebGLRenderingContext},s.touch=function(){var c;return"ontouchstart"in a||a.DocumentTouch&&b instanceof DocumentTouch?c=!0:y(["@media (",n.join("touch-enabled),("),h,")","{#modernizr{top:9px;position:absolute}}"].join(""),function(a){c=a.offsetTop===9}),c},s.geolocation=function(){return"geolocation"in navigator},s.postmessage=function(){return!!a.postMessage},s.websqldatabase=function(){return!!a.openDatabase},s.indexedDB=function(){return!!J("indexedDB",a)},s.hashchange=function(){return A("hashchange",a)&&(b.documentMode===c||b.documentMode>7)},s.history=function(){return!!a.history&&!!history.pushState},s.draganddrop=function(){var a=b.createElement("div");return"draggable"in a||"ondragstart"in a&&"ondrop"in a},s.websockets=function(){return"WebSocket"in a||"MozWebSocket"in a},s.rgba=function(){return D("background-color:rgba(150,255,150,.5)"),G(j.backgroundColor,"rgba")},s.hsla=function(){return D("background-color:hsla(120,40%,100%,.5)"),G(j.backgroundColor,"rgba")||G(j.backgroundColor,"hsla")},s.multiplebgs=function(){return D("background:url(https://),url(https://),red url(https://)"),/(url\s*\(.*?){3}/.test(j.background)},s.backgroundsize=function(){return J("backgroundSize")},s.borderimage=function(){return J("borderImage")},s.borderradius=function(){return J("borderRadius")},s.boxshadow=function(){return J("boxShadow")},s.textshadow=function(){return b.createElement("div").style.textShadow===""},s.opacity=function(){return E("opacity:.55"),/^0.55$/.test(j.opacity)},s.cssanimations=function(){return J("animationName")},s.csscolumns=function(){return J("columnCount")},s.cssgradients=function(){var a="background-image:",b="gradient(linear,left top,right bottom,from(#9f9),to(white));",c="linear-gradient(left top,#9f9, white);";return D((a+"-webkit- ".split(" ").join(b+a)+n.join(c+a)).slice(0,-a.length)),G(j.backgroundImage,"gradient")},s.cssreflections=function(){return J("boxReflect")},s.csstransforms=function(){return!!J("transform")},s.csstransforms3d=function(){var a=!!J("perspective");return a&&"webkitPerspective"in g.style&&y("@media (transform-3d),(-webkit-transform-3d){#modernizr{left:9px;position:absolute;height:3px;}}",function(b,c){a=b.offsetLeft===9&&b.offsetHeight===3}),a},s.csstransitions=function(){return J("transition")},s.fontface=function(){var a;return y('@font-face {font-family:"font";src:url("https://")}',function(c,d){var e=b.getElementById("smodernizr"),f=e.sheet||e.styleSheet,g=f?f.cssRules&&f.cssRules[0]?f.cssRules[0].cssText:f.cssText||"":"";a=/src/i.test(g)&&g.indexOf(d.split(" ")[0])===0}),a},s.generatedcontent=function(){var a;return y(["#",h,"{font:0/0 a}#",h,':after{content:"',l,'";visibility:hidden;font:3px/1 a}'].join(""),function(b){a=b.offsetHeight>=3}),a},s.video=function(){var a=b.createElement("video"),c=!1;try{if(c=!!a.canPlayType)c=new Boolean(c),c.ogg=a.canPlayType('video/ogg; codecs="theora"').replace(/^no$/,""),c.h264=a.canPlayType('video/mp4; codecs="avc1.42E01E"').replace(/^no$/,""),c.webm=a.canPlayType('video/webm; codecs="vp8, vorbis"').replace(/^no$/,"")}catch(d){}return c},s.audio=function(){var a=b.createElement("audio"),c=!1;try{if(c=!!a.canPlayType)c=new Boolean(c),c.ogg=a.canPlayType('audio/ogg; codecs="vorbis"').replace(/^no$/,""),c.mp3=a.canPlayType("audio/mpeg;").replace(/^no$/,""),c.wav=a.canPlayType('audio/wav; codecs="1"').replace(/^no$/,""),c.m4a=(a.canPlayType("audio/x-m4a;")||a.canPlayType("audio/aac;")).replace(/^no$/,"")}catch(d){}return c},s.localstorage=function(){try{return localStorage.setItem(h,h),localStorage.removeItem(h),!0}catch(a){return!1}},s.sessionstorage=function(){try{return sessionStorage.setItem(h,h),sessionStorage.removeItem(h),!0}catch(a){return!1}},s.webworkers=function(){return!!a.Worker},s.applicationcache=function(){return!!a.applicationCache},s.svg=function(){return!!b.createElementNS&&!!b.createElementNS(r.svg,"svg").createSVGRect},s.inlinesvg=function(){var a=b.createElement("div");return a.innerHTML="<svg/>",(a.firstChild&&a.firstChild.namespaceURI)==r.svg},s.smil=function(){return!!b.createElementNS&&/SVGAnimate/.test(m.call(b.createElementNS(r.svg,"animate")))},s.svgclippaths=function(){return!!b.createElementNS&&/SVGClipPath/.test(m.call(b.createElementNS(r.svg,"clipPath")))};for(var L in s)C(s,L)&&(x=L.toLowerCase(),e[x]=s[L](),v.push((e[x]?"":"no-")+x));return e.input||K(),e.addTest=function(a,b){if(typeof a=="object")for(var d in a)C(a,d)&&e.addTest(d,a[d]);else{a=a.toLowerCase();if(e[a]!==c)return e;b=typeof b=="function"?b():b,typeof f!="undefined"&&f&&(g.className+=" "+(b?"":"no-")+a),e[a]=b}return e},D(""),i=k=null,function(a,b){function k(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x<style>"+b+"</style>",d.insertBefore(c.lastChild,d.firstChild)}function l(){var a=r.elements;return typeof a=="string"?a.split(" "):a}function m(a){var b=i[a[g]];return b||(b={},h++,a[g]=h,i[h]=b),b}function n(a,c,f){c||(c=b);if(j)return c.createElement(a);f||(f=m(c));var g;return f.cache[a]?g=f.cache[a].cloneNode():e.test(a)?g=(f.cache[a]=f.createElem(a)).cloneNode():g=f.createElem(a),g.canHaveChildren&&!d.test(a)?f.frag.appendChild(g):g}function o(a,c){a||(a=b);if(j)return a.createDocumentFragment();c=c||m(a);var d=c.frag.cloneNode(),e=0,f=l(),g=f.length;for(;e<g;e++)d.createElement(f[e]);return d}function p(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return r.shivMethods?n(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+l().join().replace(/\w+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(r,b.frag)}function q(a){a||(a=b);var c=m(a);return r.shivCSS&&!f&&!c.hasCSS&&(c.hasCSS=!!k(a,"article,aside,figcaption,figure,footer,header,hgroup,nav,section{display:block}mark{background:#FF0;color:#000}")),j||p(a,c),a}var c=a.html5||{},d=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,e=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,f,g="_html5shiv",h=0,i={},j;(function(){try{var a=b.createElement("a");a.innerHTML="<xyz></xyz>",f="hidden"in a,j=a.childNodes.length==1||function(){b.createElement("a");var a=b.createDocumentFragment();return typeof a.cloneNode=="undefined"||typeof a.createDocumentFragment=="undefined"||typeof a.createElement=="undefined"}()}catch(c){f=!0,j=!0}})();var r={elements:c.elements||"abbr article aside audio bdi canvas data datalist details figcaption figure footer header hgroup mark meter nav output progress section summary time video",shivCSS:c.shivCSS!==!1,supportsUnknownElements:j,shivMethods:c.shivMethods!==!1,type:"default",shivDocument:q,createElement:n,createDocumentFragment:o};a.html5=r,q(b)}(this,b),e._version=d,e._prefixes=n,e._domPrefixes=q,e._cssomPrefixes=p,e.mq=z,e.hasEvent=A,e.testProp=function(a){return H([a])},e.testAllProps=J,e.testStyles=y,e.prefixed=function(a,b,c){return b?J(a,b,c):J(a,"pfx")},g.className=g.className.replace(/(^|\s)no-js(\s|$)/,"$1$2")+(f?" js "+v.join(" "):""),e}(this,this.document),function(a,b,c){function d(a){return"[object Function]"==o.call(a)}function e(a){return"string"==typeof a}function f(){}function g(a){return!a||"loaded"==a||"complete"==a||"uninitialized"==a}function h(){var a=p.shift();q=1,a?a.t?m(function(){("c"==a.t?B.injectCss:B.injectJs)(a.s,0,a.a,a.x,a.e,1)},0):(a(),h()):q=0}function i(a,c,d,e,f,i,j){function k(b){if(!o&&g(l.readyState)&&(u.r=o=1,!q&&h(),l.onload=l.onreadystatechange=null,b)){"img"!=a&&m(function(){t.removeChild(l)},50);for(var d in y[c])y[c].hasOwnProperty(d)&&y[c][d].onload()}}var j=j||B.errorTimeout,l=b.createElement(a),o=0,r=0,u={t:d,s:c,e:f,a:i,x:j};1===y[c]&&(r=1,y[c]=[]),"object"==a?l.data=c:(l.src=c,l.type=a),l.width=l.height="0",l.onerror=l.onload=l.onreadystatechange=function(){k.call(this,r)},p.splice(e,0,u),"img"!=a&&(r||2===y[c]?(t.insertBefore(l,s?null:n),m(k,j)):y[c].push(l))}function j(a,b,c,d,f){return q=0,b=b||"j",e(a)?i("c"==b?v:u,a,b,this.i++,c,d,f):(p.splice(this.i++,0,a),1==p.length&&h()),this}function k(){var a=B;return a.loader={load:j,i:0},a}var l=b.documentElement,m=a.setTimeout,n=b.getElementsByTagName("script")[0],o={}.toString,p=[],q=0,r="MozAppearance"in l.style,s=r&&!!b.createRange().compareNode,t=s?l:n.parentNode,l=a.opera&&"[object Opera]"==o.call(a.opera),l=!!b.attachEvent&&!l,u=r?"object":l?"script":"img",v=l?"script":u,w=Array.isArray||function(a){return"[object Array]"==o.call(a)},x=[],y={},z={timeout:function(a,b){return b.length&&(a.timeout=b[0]),a}},A,B;B=function(a){function b(a){var a=a.split("!"),b=x.length,c=a.pop(),d=a.length,c={url:c,origUrl:c,prefixes:a},e,f,g;for(f=0;f<d;f++)g=a[f].split("="),(e=z[g.shift()])&&(c=e(c,g));for(f=0;f<b;f++)c=x[f](c);return c}function g(a,e,f,g,h){var i=b(a),j=i.autoCallback;i.url.split(".").pop().split("?").shift(),i.bypass||(e&&(e=d(e)?e:e[a]||e[g]||e[a.split("/").pop().split("?")[0]]),i.instead?i.instead(a,e,f,g,h):(y[i.url]?i.noexec=!0:y[i.url]=1,f.load(i.url,i.forceCSS||!i.forceJS&&"css"==i.url.split(".").pop().split("?").shift()?"c":c,i.noexec,i.attrs,i.timeout),(d(e)||d(j))&&f.load(function(){k(),e&&e(i.origUrl,h,g),j&&j(i.origUrl,h,g),y[i.url]=2})))}function h(a,b){function c(a,c){if(a){if(e(a))c||(j=function(){var a=[].slice.call(arguments);k.apply(this,a),l()}),g(a,j,b,0,h);else if(Object(a)===a)for(n in m=function(){var b=0,c;for(c in a)a.hasOwnProperty(c)&&b++;return b}(),a)a.hasOwnProperty(n)&&(!c&&!--m&&(d(j)?j=function(){var a=[].slice.call(arguments);k.apply(this,a),l()}:j[n]=function(a){return function(){var b=[].slice.call(arguments);a&&a.apply(this,b),l()}}(k[n])),g(a[n],j,b,n,h))}else!c&&l()}var h=!!a.test,i=a.load||a.both,j=a.callback||f,k=j,l=a.complete||f,m,n;c(h?a.yep:a.nope,!!i),i&&c(i)}var i,j,l=this.yepnope.loader;if(e(a))g(a,0,l,0);else if(w(a))for(i=0;i<a.length;i++)j=a[i],e(j)?g(j,0,l,0):w(j)?B(j):Object(j)===j&&h(j,l);else Object(a)===a&&h(a,l)},B.addPrefix=function(a,b){z[a]=b},B.addFilter=function(a){x.push(a)},B.errorTimeout=1e4,null==b.readyState&&b.addEventListener&&(b.readyState="loading",b.addEventListener("DOMContentLoaded",A=function(){b.removeEventListener("DOMContentLoaded",A,0),b.readyState="complete"},0)),a.yepnope=k(),a.yepnope.executeStack=h,a.yepnope.injectJs=function(a,c,d,e,i,j){var k=b.createElement("script"),l,o,e=e||B.errorTimeout;k.src=a;for(o in d)k.setAttribute(o,d[o]);c=j?h:c||f,k.onreadystatechange=k.onload=function(){!l&&g(k.readyState)&&(l=1,c(),k.onload=k.onreadystatechange=null)},m(function(){l||(l=1,c(1))},e),i?k.onload():n.parentNode.insertBefore(k,n)},a.yepnope.injectCss=function(a,c,d,e,g,i){var e=b.createElement("link"),j,c=i?h:c||f;e.href=a,e.rel="stylesheet",e.type="text/css";for(j in d)e.setAttribute(j,d[j]);g||(n.parentNode.insertBefore(e,n),m(c,0))}}(this,document),Modernizr.load=function(){yepnope.apply(window,[].slice.call(arguments,0))};
diff --git a/docs/docsite/_themes/sphinx_rtd_theme/static/js/theme.js b/docs/docsite/_themes/sphinx_rtd_theme/static/js/theme.js
new file mode 100644
index 00000000..aa5d7e06
--- /dev/null
+++ b/docs/docsite/_themes/sphinx_rtd_theme/static/js/theme.js
@@ -0,0 +1,3 @@
+/* sphinx_rtd_theme version 0.4.3 | MIT license */
+/* Built 20190212 16:02 */
+require=function r(s,a,l){function c(e,n){if(!a[e]){if(!s[e]){var i="function"==typeof require&&require;if(!n&&i)return i(e,!0);if(u)return u(e,!0);var t=new Error("Cannot find module '"+e+"'");throw t.code="MODULE_NOT_FOUND",t}var o=a[e]={exports:{}};s[e][0].call(o.exports,function(n){return c(s[e][1][n]||n)},o,o.exports,r,s,a,l)}return a[e].exports}for(var u="function"==typeof require&&require,n=0;n<l.length;n++)c(l[n]);return c}({"sphinx-rtd-theme":[function(n,e,i){var jQuery="undefined"!=typeof window?window.jQuery:n("jquery");e.exports.ThemeNav={navBar:null,win:null,winScroll:!1,winResize:!1,linkScroll:!1,winPosition:0,winHeight:null,docHeight:null,isRunning:!1,enable:function(e){var i=this;void 0===e&&(e=!0),i.isRunning||(i.isRunning=!0,jQuery(function(n){i.init(n),i.reset(),i.win.on("hashchange",i.reset),e&&i.win.on("scroll",function(){i.linkScroll||i.winScroll||(i.winScroll=!0,requestAnimationFrame(function(){i.onScroll()}))}),i.win.on("resize",function(){i.winResize||(i.winResize=!0,requestAnimationFrame(function(){i.onResize()}))}),i.onResize()}))},enableSticky:function(){this.enable(!0)},init:function(i){i(document);var t=this;this.navBar=i("div.wy-side-scroll:first"),this.win=i(window),i(document).on("click","[data-toggle='wy-nav-top']",function(){i("[data-toggle='wy-nav-shift']").toggleClass("shift"),i("[data-toggle='rst-versions']").toggleClass("shift")}).on("click",".wy-menu-vertical .current ul li a",function(){var n=i(this);i("[data-toggle='wy-nav-shift']").removeClass("shift"),i("[data-toggle='rst-versions']").toggleClass("shift"),t.toggleCurrent(n),t.hashChange()}).on("click","[data-toggle='rst-current-version']",function(){i("[data-toggle='rst-versions']").toggleClass("shift-up")}),i("table.docutils:not(.field-list,.footnote,.citation)").wrap("<div class='wy-table-responsive'></div>"),i("table.docutils.footnote").wrap("<div class='wy-table-responsive footnote'></div>"),i("table.docutils.citation").wrap("<div class='wy-table-responsive citation'></div>"),i(".wy-menu-vertical ul").not(".simple").siblings("a").each(function(){var e=i(this);expand=i('<span class="toctree-expand"></span>'),expand.on("click",function(n){return t.toggleCurrent(e),n.stopPropagation(),!1}),e.prepend(expand)})},reset:function(){var n=encodeURI(window.location.hash)||"#";try{var e=$(".wy-menu-vertical"),i=e.find('[href="'+n+'"]');if(0===i.length){var t=$('.document [id="'+n.substring(1)+'"]').closest("div.section");0===(i=e.find('[href="#'+t.attr("id")+'"]')).length&&(i=e.find('[href="#"]'))}0<i.length&&($(".wy-menu-vertical .current").removeClass("current"),i.addClass("current"),i.closest("li.toctree-l1").addClass("current"),i.closest("li.toctree-l1").parent().addClass("current"),i.closest("li.toctree-l1").addClass("current"),i.closest("li.toctree-l2").addClass("current"),i.closest("li.toctree-l3").addClass("current"),i.closest("li.toctree-l4").addClass("current"),i[0].scrollIntoView())}catch(o){console.log("Error expanding nav for anchor",o)}},onScroll:function(){this.winScroll=!1;var n=this.win.scrollTop(),e=n+this.winHeight,i=this.navBar.scrollTop()+(n-this.winPosition);n<0||e>this.docHeight||(this.navBar.scrollTop(i),this.winPosition=n)},onResize:function(){this.winResize=!1,this.winHeight=this.win.height(),this.docHeight=$(document).height()},hashChange:function(){this.linkScroll=!0,this.win.one("hashchange",function(){this.linkScroll=!1})},toggleCurrent:function(n){var e=n.closest("li");e.siblings("li.current").removeClass("current"),e.siblings().find("li.current").removeClass("current"),e.find("> ul li.current").removeClass("current"),e.toggleClass("current")}},"undefined"!=typeof window&&(window.SphinxRtdTheme={Navigation:e.exports.ThemeNav,StickyNav:e.exports.ThemeNav}),function(){for(var r=0,n=["ms","moz","webkit","o"],e=0;e<n.length&&!window.requestAnimationFrame;++e)window.requestAnimationFrame=window[n[e]+"RequestAnimationFrame"],window.cancelAnimationFrame=window[n[e]+"CancelAnimationFrame"]||window[n[e]+"CancelRequestAnimationFrame"];window.requestAnimationFrame||(window.requestAnimationFrame=function(n,e){var i=(new Date).getTime(),t=Math.max(0,16-(i-r)),o=window.setTimeout(function(){n(i+t)},t);return r=i+t,o}),window.cancelAnimationFrame||(window.cancelAnimationFrame=function(n){clearTimeout(n)})}()},{jquery:"jquery"}]},{},["sphinx-rtd-theme"]);
diff --git a/docs/docsite/_themes/sphinx_rtd_theme/theme.conf b/docs/docsite/_themes/sphinx_rtd_theme/theme.conf
new file mode 100644
index 00000000..822d7a37
--- /dev/null
+++ b/docs/docsite/_themes/sphinx_rtd_theme/theme.conf
@@ -0,0 +1,18 @@
+[theme]
+inherit = basic
+stylesheet = css/theme.css
+pygments_style = default
+
+[options]
+canonical_url =
+analytics_id =
+collapse_navigation = True
+sticky_navigation = True
+navigation_depth = 4
+includehidden = True
+titles_only =
+logo_only =
+display_version = True
+prev_next_buttons_location = bottom
+style_external_links = False
+vcs_pageview_mode =
diff --git a/docs/docsite/_themes/sphinx_rtd_theme/versions.html b/docs/docsite/_themes/sphinx_rtd_theme/versions.html
new file mode 100644
index 00000000..354cc376
--- /dev/null
+++ b/docs/docsite/_themes/sphinx_rtd_theme/versions.html
@@ -0,0 +1,36 @@
+{% if READTHEDOCS %}
+{# Add rst-badge after rst-versions for small badge style. #}
+ <div class="rst-versions" data-toggle="rst-versions" role="note" aria-label="versions">
+ <span class="rst-current-version" data-toggle="rst-current-version">
+ <span class="fa fa-book"> Read the Docs</span>
+ v: {{ current_version }}
+ <span class="fa fa-caret-down"></span>
+ </span>
+ <div class="rst-other-versions">
+ <dl>
+ <dt>{{ _('Versions') }}</dt>
+ {% for slug, url in versions %}
+ <dd><a href="{{ url }}">{{ slug }}</a></dd>
+ {% endfor %}
+ </dl>
+ <dl>
+ <dt>{{ _('Downloads') }}</dt>
+ {% for type, url in downloads %}
+ <dd><a href="{{ url }}">{{ type }}</a></dd>
+ {% endfor %}
+ </dl>
+ <dl>
+ <dt>{{ _('On Read the Docs') }}</dt>
+ <dd>
+ <a href="//{{ PRODUCTION_DOMAIN }}/projects/{{ slug }}/?fromdocs={{ slug }}">{{ _('Project Home') }}</a>
+ </dd>
+ <dd>
+ <a href="//{{ PRODUCTION_DOMAIN }}/builds/{{ slug }}/?fromdocs={{ slug }}">{{ _('Builds') }}</a>
+ </dd>
+ </dl>
+ <hr/>
+ {% trans %}Free document hosting provided by <a href="https://www.readthedocs.org">Read the Docs</a>.{% endtrans %}
+
+ </div>
+ </div>
+{% endif %}
diff --git a/docs/docsite/ansible_2_10.inv b/docs/docsite/ansible_2_10.inv
new file mode 100644
index 00000000..900df6e1
--- /dev/null
+++ b/docs/docsite/ansible_2_10.inv
Binary files differ
diff --git a/docs/docsite/ansible_2_5.inv b/docs/docsite/ansible_2_5.inv
new file mode 100644
index 00000000..05e5a2b0
--- /dev/null
+++ b/docs/docsite/ansible_2_5.inv
Binary files differ
diff --git a/docs/docsite/ansible_2_6.inv b/docs/docsite/ansible_2_6.inv
new file mode 100644
index 00000000..b84a2661
--- /dev/null
+++ b/docs/docsite/ansible_2_6.inv
Binary files differ
diff --git a/docs/docsite/ansible_2_7.inv b/docs/docsite/ansible_2_7.inv
new file mode 100644
index 00000000..81cea2cb
--- /dev/null
+++ b/docs/docsite/ansible_2_7.inv
Binary files differ
diff --git a/docs/docsite/ansible_2_8.inv b/docs/docsite/ansible_2_8.inv
new file mode 100644
index 00000000..1d8fcd07
--- /dev/null
+++ b/docs/docsite/ansible_2_8.inv
Binary files differ
diff --git a/docs/docsite/ansible_2_9.inv b/docs/docsite/ansible_2_9.inv
new file mode 100644
index 00000000..83badf67
--- /dev/null
+++ b/docs/docsite/ansible_2_9.inv
Binary files differ
diff --git a/docs/docsite/collection-plugins.yml b/docs/docsite/collection-plugins.yml
new file mode 100644
index 00000000..499274b4
--- /dev/null
+++ b/docs/docsite/collection-plugins.yml
@@ -0,0 +1,17 @@
+# We also need an example of modules hosted in Automation Hub
+# We'll likely move to data hosted in botmeta instead of a standalone file but
+# we'll need all of these same details.
+module:
+ purefa_user:
+ source: 'https://galaxy.ansible.com/'
+ fqcn: 'purestorage.flasharray'
+ purefa_vg:
+ source: 'https://galaxy.ansible.com/'
+ fqcn: 'purestorage.flasharray'
+ gcp_compute_firewall_info:
+ source: 'https://galaxy.ansible.com/'
+ fqcn: 'google.cloud'
+module_utils:
+ purefa:
+ source: 'https://galaxy.ansible.com/'
+ fqcn: 'purestorage.flasharray'
diff --git a/docs/docsite/jinja2.inv b/docs/docsite/jinja2.inv
new file mode 100644
index 00000000..552a9584
--- /dev/null
+++ b/docs/docsite/jinja2.inv
Binary files differ
diff --git a/docs/docsite/js/ansible/application.js b/docs/docsite/js/ansible/application.js
new file mode 100644
index 00000000..5e9f81ba
--- /dev/null
+++ b/docs/docsite/js/ansible/application.js
@@ -0,0 +1,106 @@
+angular.module('ansibleApp', []).filter('moduleVersion', function() {
+ return function(modules, version) {
+
+ var parseVersionString = function (str) {
+ if (typeof(str) != 'string') { return false; }
+ var x = str.split('.');
+ // parse from string or default to 0 if can't parse
+ var maj = parseInt(x[0]) || 0;
+ var min = parseInt(x[1]) || 0;
+ var pat = parseInt(x[2]) || 0;
+ return {
+ major: maj,
+ minor: min,
+ patch: pat
+ }
+ }
+
+ var vMinMet = function(vmin, vcurrent) {
+ minimum = parseVersionString(vmin);
+ running = parseVersionString(vcurrent);
+ if (running.major != minimum.major)
+ return (running.major > minimum.major);
+ else {
+ if (running.minor != minimum.minor)
+ return (running.minor > minimum.minor);
+ else {
+ if (running.patch != minimum.patch)
+ return (running.patch > minimum.patch);
+ else
+ return true;
+ }
+ }
+ };
+
+ var result = [];
+ if (!version) {
+ return modules;
+ }
+ for (var i = 0; i < modules.length; i++) {
+ if (vMinMet(modules[i].version_added, version)) {
+ result[result.length] = modules[i];
+ }
+ }
+
+ return result;
+ };
+}).filter('uniqueVersion', function() {
+ return function(modules) {
+ var result = [];
+ var inArray = function (needle, haystack) {
+ var length = haystack.length;
+ for(var i = 0; i < length; i++) {
+ if(haystack[i] == needle) return true;
+ }
+ return false;
+ }
+
+ var parseVersionString = function (str) {
+ if (typeof(str) != 'string') { return false; }
+ var x = str.split('.');
+ // parse from string or default to 0 if can't parse
+ var maj = parseInt(x[0]) || 0;
+ var min = parseInt(x[1]) || 0;
+ var pat = parseInt(x[2]) || 0;
+ return {
+ major: maj,
+ minor: min,
+ patch: pat
+ }
+ }
+
+ for (var i = 0; i < modules.length; i++) {
+ if (!inArray(modules[i].version_added, result)) {
+ // Some module do not define version
+ if (modules[i].version_added) {
+ result[result.length] = "" + modules[i].version_added;
+ }
+ }
+ }
+
+ result.sort(
+ function (a, b) {
+ ao = parseVersionString(a);
+ bo = parseVersionString(b);
+ if (ao.major == bo.major) {
+ if (ao.minor == bo.minor) {
+ if (ao.patch == bo.patch) {
+ return 0;
+ }
+ else {
+ return (ao.patch > bo.patch) ? 1 : -1;
+ }
+ }
+ else {
+ return (ao.minor > bo.minor) ? 1 : -1;
+ }
+ }
+ else {
+ return (ao.major > bo.major) ? 1 : -1;
+ }
+ });
+
+ return result;
+ };
+});
+
diff --git a/docs/docsite/keyword_desc.yml b/docs/docsite/keyword_desc.yml
new file mode 100644
index 00000000..8d5a0801
--- /dev/null
+++ b/docs/docsite/keyword_desc.yml
@@ -0,0 +1,79 @@
+accelerate: "*DEPRECATED*, set to True to use accelerate connection plugin."
+accelerate_ipv6: "*DEPRECATED*, set to True to force accelerate plugin to use ipv6 for its connection."
+accelerate_port: "*DEPRECATED*, set to override default port use for accelerate connection."
+action: "The 'action' to execute for a task, it normally translates into a C(module) or action plugin."
+args: "A secondary way to add arguments into a task. Takes a dictionary in which keys map to options and values."
+always: List of tasks, in a block, that execute no matter if there is an error in the block or not.
+any_errors_fatal: Force any un-handled task errors on any host to propagate to all hosts and end the play.
+async: Run a task asynchronously if the C(action) supports this; value is maximum runtime in seconds.
+become: Boolean that controls if privilege escalation is used or not on :term:`Task` execution. Implemented by the become plugin. See :ref:`become_plugins`.
+become_exe: Path to the executable used to elevate privileges. Implemented by the become plugin. See :ref:`become_plugins`.
+become_flags: A string of flag(s) to pass to the privilege escalation program when :term:`become` is True.
+become_method: Which method of privilege escalation to use (such as sudo or su).
+become_user: "User that you 'become' after using privilege escalation. The remote/login user must have permissions to become this user."
+block: List of tasks in a block.
+changed_when: "Conditional expression that overrides the task's normal 'changed' status."
+check_mode: A boolean that controls if a task is executed in 'check' mode. See :ref:`check_mode_dry`.
+collections: |
+
+ List of collection namespaces to search for modules, plugins, and roles. See :ref:`collections_using_playbook`
+
+ .. note::
+
+ Tasks within a role do not inherit the value of ``collections`` from the play. To have a role search a list of collections, use the ``collections`` keyword in ``meta/main.yml`` within a role.
+
+
+connection: Allows you to change the connection plugin used for tasks to execute on the target. See :ref:`using_connection`.
+debugger: Enable debugging tasks based on state of the task result. See :ref:`playbook_debugger`.
+delay: Number of seconds to delay between retries. This setting is only used in combination with :term:`until`.
+delegate_facts: Boolean that allows you to apply facts to a delegated host instead of inventory_hostname.
+delegate_to: Host to execute task instead of the target (inventory_hostname). Connection vars from the delegated host will also be used for the task.
+diff: "Toggle to make tasks return 'diff' information or not."
+environment: A dictionary that gets converted into environment vars to be provided for the task upon execution. This can ONLY be used with modules. This isn't supported for any other type of plugins nor Ansible itself nor its configuration, it just sets the variables for the code responsible for executing the task. This is not a recommended way to pass in confidential data.
+fact_path: Set the fact path option for the fact gathering plugin controlled by :term:`gather_facts`.
+failed_when: "Conditional expression that overrides the task's normal 'failed' status."
+force_handlers: Will force notified handler execution for hosts even if they failed during the play. Will not trigger if the play itself fails.
+gather_facts: "A boolean that controls if the play will automatically run the 'setup' task to gather facts for the hosts."
+gather_subset: Allows you to pass subset options to the fact gathering plugin controlled by :term:`gather_facts`.
+gather_timeout: Allows you to set the timeout for the fact gathering plugin controlled by :term:`gather_facts`.
+handlers: "A section with tasks that are treated as handlers, these won't get executed normally, only when notified after each section of tasks is complete. A handler's `listen` field is not templatable."
+hosts: "A list of groups, hosts or host pattern that translates into a list of hosts that are the play's target."
+ignore_errors: Boolean that allows you to ignore task failures and continue with play. It does not affect connection errors.
+ignore_unreachable: Boolean that allows you to ignore task failures due to an unreachable host and continue with the play. This does not affect other task errors (see :term:`ignore_errors`) but is useful for groups of volatile/ephemeral hosts.
+loop: "Takes a list for the task to iterate over, saving each list element into the ``item`` variable (configurable via loop_control)"
+loop_control: |
+ Several keys here allow you to modify/set loop behaviour in a task.
+
+ .. seealso:: :ref:`loop_control`
+
+max_fail_percentage: can be used to abort the run after a given percentage of hosts in the current batch has failed.
+module_defaults: Specifies default parameter values for modules.
+name: "Identifier. Can be used for documentation, in or tasks/handlers."
+no_log: Boolean that controls information disclosure.
+notify: "List of handlers to notify when the task returns a 'changed=True' status."
+order: Controls the sorting of hosts as they are used for executing the play. Possible values are inventory (default), sorted, reverse_sorted, reverse_inventory and shuffle.
+poll: Sets the polling interval in seconds for async tasks (default 10s).
+port: Used to override the default port used in a connection.
+post_tasks: A list of tasks to execute after the :term:`tasks` section.
+pre_tasks: A list of tasks to execute before :term:`roles`.
+remote_user: User used to log into the target via the connection plugin.
+register: Name of variable that will contain task status and module return data.
+rescue: List of tasks in a :term:`block` that run if there is a task error in the main :term:`block` list.
+retries: "Number of retries before giving up in a :term:`until` loop. This setting is only used in combination with :term:`until`."
+roles: List of roles to be imported into the play
+run_once: Boolean that will bypass the host loop, forcing the task to attempt to execute on the first host available and afterwards apply any results and facts to all active hosts in the same batch.
+serial: |
+ Explicitly define how Ansible batches the execution of the current play on the play's target
+
+ .. seealso:: :ref:`rolling_update_batch_size`
+
+strategy: Allows you to choose the connection plugin to use for the play.
+tags: Tags applied to the task or included tasks, this allows selecting subsets of tasks from the command line.
+tasks: Main list of tasks to execute in the play, they run after :term:`roles` and before :term:`post_tasks`.
+timeout: Time limit for task to execute in, if exceeded Ansible will interrupt and fail the task.
+throttle: Limit number of concurrent task runs on task, block and playbook level. This is independent of the forks and serial settings, but cannot be set higher than those limits. For example, if forks is set to 10 and the throttle is set to 15, at most 10 hosts will be operated on in parallel.
+until: "This keyword implies a ':term:`retries` loop' that will go on until the condition supplied here is met or we hit the :term:`retries` limit."
+vars: Dictionary/map of variables
+vars_files: List of files that contain vars to include in the play.
+vars_prompt: list of variables to prompt for.
+when: Conditional expression, determines if an iteration of a task is run or not.
diff --git a/docs/docsite/modules.js b/docs/docsite/modules.js
new file mode 100644
index 00000000..103bc2ca
--- /dev/null
+++ b/docs/docsite/modules.js
@@ -0,0 +1,5 @@
+function AnsibleModules($scope) {
+ $scope.modules = [];
+
+ $scope.orderProp = "module";
+} \ No newline at end of file
diff --git a/docs/docsite/python2.inv b/docs/docsite/python2.inv
new file mode 100644
index 00000000..7ea2dc1d
--- /dev/null
+++ b/docs/docsite/python2.inv
Binary files differ
diff --git a/docs/docsite/python3.inv b/docs/docsite/python3.inv
new file mode 100644
index 00000000..19216788
--- /dev/null
+++ b/docs/docsite/python3.inv
Binary files differ
diff --git a/docs/docsite/requirements.txt b/docs/docsite/requirements.txt
new file mode 100644
index 00000000..1290ac1d
--- /dev/null
+++ b/docs/docsite/requirements.txt
@@ -0,0 +1,9 @@
+#pip packages required to build docsite
+jinja2
+PyYAML
+rstcheck
+sphinx==2.1.2
+sphinx-notfound-page
+Pygments >= 2.4.0
+straight.plugin # Needed for hacking/build-ansible.py which is the backend build script
+antsibull >= 0.15.0
diff --git a/docs/docsite/rst/404.rst b/docs/docsite/rst/404.rst
new file mode 100644
index 00000000..4a869d22
--- /dev/null
+++ b/docs/docsite/rst/404.rst
@@ -0,0 +1,12 @@
+:orphan:
+
+*****
+Oops!
+*****
+
+The version of the Ansible documentation you were looking at doesn't contain that page.
+
+.. image:: images/cow.png
+ :alt: Cowsay 404
+
+Use the back button to return to the version you were browsing, or use the navigation at left to explore our latest release. Once you're on a non-404 page, you can use the version-changer to select a version.
diff --git a/docs/docsite/rst/api/index.rst b/docs/docsite/rst/api/index.rst
new file mode 100644
index 00000000..27afbe42
--- /dev/null
+++ b/docs/docsite/rst/api/index.rst
@@ -0,0 +1,107 @@
+:orphan:
+
+*************************
+Ansible API Documentation
+*************************
+
+The Ansible API is under construction. These stub references for attributes, classes, functions, methods, and modules will be documented in future.
+The :ref:`module utilities <ansible.module_utils>` included in ``ansible.module_utils.basic`` and ``AnsibleModule`` are documented under Reference & Appendices.
+
+.. contents::
+ :local:
+
+Attributes
+==========
+
+.. py:attribute:: AnsibleModule.params
+
+The parameters accepted by the module.
+
+.. py:attribute:: ansible.module_utils.basic.ANSIBLE_VERSION
+
+.. py:attribute:: ansible.module_utils.basic.SELINUX_SPECIAL_FS
+
+Deprecated in favor of ansibleModule._selinux_special_fs.
+
+.. py:attribute:: AnsibleModule.ansible_version
+
+.. py:attribute:: AnsibleModule._debug
+
+.. py:attribute:: AnsibleModule._diff
+
+.. py:attribute:: AnsibleModule.no_log
+
+.. py:attribute:: AnsibleModule._selinux_special_fs
+
+(formerly ansible.module_utils.basic.SELINUX_SPECIAL_FS)
+
+.. py:attribute:: AnsibleModule._syslog_facility
+
+.. py:attribute:: self.playbook
+
+.. py:attribute:: self.play
+
+.. py:attribute:: self.task
+
+.. py:attribute:: sys.path
+
+
+Classes
+=======
+
+.. py:class:: ``ansible.module_utils.basic.AnsibleModule``
+ :noindex:
+
+The basic utilities for AnsibleModule.
+
+.. py:class:: AnsibleModule
+
+The main class for an Ansible module.
+
+
+Functions
+=========
+
+.. py:function:: ansible.module_utils.basic._load_params()
+
+Load parameters.
+
+
+Methods
+=======
+
+.. py:method:: AnsibleModule.log()
+
+Logs the output of Ansible.
+
+.. py:method:: AnsibleModule.debug()
+
+Debugs Ansible.
+
+.. py:method:: Ansible.get_bin_path()
+
+Retrieves the path for executables.
+
+.. py:method:: AnsibleModule.run_command()
+
+Runs a command within an Ansible module.
+
+.. py:method:: module.fail_json()
+
+Exits and returns a failure.
+
+.. py:method:: module.exit_json()
+
+Exits and returns output.
+
+
+Modules
+=======
+
+.. py:module:: ansible.module_utils
+
+.. py:module:: ansible.module_utils.basic
+ :noindex:
+
+
+.. py:module:: ansible.module_utils.url
diff --git a/docs/docsite/rst/community/code_of_conduct.rst b/docs/docsite/rst/community/code_of_conduct.rst
new file mode 100644
index 00000000..9462618d
--- /dev/null
+++ b/docs/docsite/rst/community/code_of_conduct.rst
@@ -0,0 +1,146 @@
+.. _code_of_conduct:
+
+*************************
+Community Code of Conduct
+*************************
+
+.. contents:: Topics
+
+Every community can be strengthened by a diverse variety of viewpoints, insights,
+opinions, skillsets, and skill levels. However, with diversity comes the potential for
+disagreement and miscommunication. The purpose of this Code of Conduct is to ensure that
+disagreements and differences of opinion are conducted respectfully and on their own
+merits, without personal attacks or other behavior that might create an unsafe or
+unwelcoming environment.
+
+These policies are not designed to be a comprehensive set of Things You Cannot Do. We ask
+that you treat your fellow community members with respect and courtesy, and in general,
+Don't Be A Jerk. This Code of Conduct is meant to be followed in spirit as much as in
+letter and is not exhaustive.
+
+All Ansible events and participants therein are governed by this Code of Conduct and
+anti-harassment policy. We expect organizers to enforce these guidelines throughout all events,
+and we expect attendees, speakers, sponsors, and volunteers to help ensure a safe
+environment for our whole community. Specifically, this Code of Conduct covers
+participation in all Ansible-related forums and mailing lists, code and documentation
+contributions, public IRC channels, private correspondence, and public meetings.
+
+Ansible community members are...
+
+**Considerate**
+
+Contributions of every kind have far-ranging consequences. Just as your work depends on
+the work of others, decisions you make surrounding your contributions to the Ansible
+community will affect your fellow community members. You are strongly encouraged to take
+those consequences into account while making decisions.
+
+**Patient**
+
+Asynchronous communication can come with its own frustrations, even in the most responsive
+of communities. Please remember that our community is largely built on volunteered time,
+and that questions, contributions, and requests for support may take some time to receive
+a response. Repeated "bumps" or "reminders" in rapid succession are not good displays of
+patience. Additionally, it is considered poor manners to ping a specific person with
+general questions. Pose your question to the community as a whole, and wait patiently for
+a response.
+
+**Respectful**
+
+Every community inevitably has disagreements, but remember that it is
+possible to disagree respectfully and courteously. Disagreements are never an excuse for
+rudeness, hostility, threatening behavior, abuse (verbal or physical), or personal attacks.
+
+**Kind**
+
+Everyone should feel welcome in the Ansible community, regardless of their background.
+Please be courteous, respectful and polite to fellow community members. Do not make or
+post offensive comments related to skill level, gender, gender identity or expression,
+sexual orientation, disability, physical appearance, body size, race, or religion.
+Sexualized images or imagery, real or implied violence, intimidation, oppression,
+stalking, sustained disruption of activities, publishing the personal information of
+others without explicit permission to do so, unwanted physical contact, and unwelcome
+sexual attention are all strictly prohibited. Additionally, you are encouraged not to
+make assumptions about the background or identity of your fellow community members.
+
+**Inquisitive**
+
+The only stupid question is the one that does not get asked. We
+encourage our users to ask early and ask often. Rather than asking whether you can ask a
+question (the answer is always yes!), instead, simply ask your question. You are
+encouraged to provide as many specifics as possible. Code snippets in the form of Gists or
+other paste site links are almost always needed in order to get the most helpful answers.
+Refrain from pasting multiple lines of code directly into the IRC channels - instead use
+gist.github.com or another paste site to provide code snippets.
+
+**Helpful**
+
+The Ansible community is committed to being a welcoming environment for all users,
+regardless of skill level. We were all beginners once upon a time, and our community
+cannot grow without an environment where new users feel safe and comfortable asking questions.
+It can become frustrating to answer the same questions repeatedly; however, community
+members are expected to remain courteous and helpful to all users equally, regardless of
+skill or knowledge level. Avoid providing responses that prioritize snideness and snark over
+useful information. At the same time, everyone is expected to read the provided
+documentation thoroughly. We are happy to answer questions, provide strategic guidance,
+and suggest effective workflows, but we are not here to do your job for you.
+
+Anti-harassment policy
+======================
+
+Harassment includes (but is not limited to) all of the following behaviors:
+
+- Offensive comments related to gender (including gender expression and identity), age, sexual orientation, disability, physical appearance, body size, race, and religion
+- Derogatory terminology including words commonly known to be slurs
+- Posting sexualized images or imagery in public spaces
+- Deliberate intimidation
+- Stalking
+- Posting others' personal information without explicit permission
+- Sustained disruption of talks or other events
+- Inappropriate physical contact
+- Unwelcome sexual attention
+
+Participants asked to stop any harassing behavior are expected to comply immediately.
+Sponsors are also subject to the anti-harassment policy. In particular, sponsors should
+not use sexualized images, activities, or other material. Meetup organizing staff and
+other volunteer organizers should not use sexualized attire or otherwise create a
+sexualized environment at community events.
+
+In addition to the behaviors outlined above, continuing to behave a certain way after you
+have been asked to stop also constitutes harassment, even if that behavior is not
+specifically outlined in this policy. It is considerate and respectful to stop doing
+something after you have been asked to stop, and all community members are expected to
+comply with such requests immediately.
+
+Policy violations
+=================
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting `codeofconduct@ansible.com <mailto:codeofconduct@ansible.com>`_, to any channel
+operator in the community IRC channels, or to the local organizers of an event. Meetup
+organizers are encouraged to prominently display points of contact for reporting unacceptable
+behavior at local events.
+
+If a participant engages in harassing behavior, the meetup organizers may take any action
+they deem appropriate. These actions may include but are not limited to warning the
+offender, expelling the offender from the event, and barring the offender from future
+community events.
+
+Organizers will be happy to help participants contact security or local law enforcement,
+provide escorts to an alternate location, or otherwise assist those experiencing
+harassment to feel safe for the duration of the meetup. We value the safety and well-being
+of our community members and want everyone to feel welcome at our events, both online and
+offline.
+
+We expect all participants, organizers, speakers, and attendees to follow these policies at
+all of our event venues and event-related social events.
+
+The Ansible Community Code of Conduct is licensed under the Creative Commons
+Attribution-Share Alike 3.0 license. Our Code of Conduct was adapted from Codes of Conduct
+of other open source projects, including:
+
+* Contributor Covenant
+* Elastic
+* The Fedora Project
+* OpenStack
+* Puppet Labs
+* Ubuntu
diff --git a/docs/docsite/rst/community/committer_guidelines.rst b/docs/docsite/rst/community/committer_guidelines.rst
new file mode 100644
index 00000000..2caa3a56
--- /dev/null
+++ b/docs/docsite/rst/community/committer_guidelines.rst
@@ -0,0 +1,156 @@
+.. _community_committer_guidelines:
+
+*********************
+Committers Guidelines
+*********************
+
+These are the guidelines for people with commit privileges on the Ansible GitHub repository. Committers are essentially acting as members of the Ansible Core team, although not necessarily as employees of Ansible and Red Hat. Please read the guidelines before you commit.
+
+These guidelines apply to everyone. At the same time, this ISN'T a process document. So just use good judgment. You've been given commit access because we trust your judgment.
+
+That said, use the trust wisely.
+
+If you abuse the trust and break components and builds, and so on, the trust level falls and you may be asked not to commit or you may lose your commit privileges.
+
+Features, high-level design, and roadmap
+========================================
+
+As a core team member, you are an integral part of the team that develops the :ref:`roadmap <roadmaps>`. Please be engaged, and push for the features and fixes that you want to see. Also keep in mind that Red Hat, as a company, will commit to certain features, fixes, APIs, and so on, for various releases. Red Hat, the company, and the Ansible team must get these changes completed and released as scheduled. Obligations to users, the community, and customers must come first. Because of these commitments, a feature you want to develop yourself may not get into a release if it affects a lot of other parts within Ansible.
+
+Any other new features and changes to high level design should go through the proposal process (TBD), to ensure the community and core team have had a chance to review the idea and approve it. The core team has sole responsibility for merging new features based on proposals.
+
+Our workflow on GitHub
+======================
+
+As a committer, you may already know this, but our workflow forms a lot of our team policies. Please ensure you're aware of the following workflow steps:
+
+* Fork the repository upon which you want to do some work to your own personal repository
+* Work on the specific branch upon which you need to commit
+* Create a Pull Request back to the Ansible repository and tag the people you would like to review; assign someone as the primary "owner" of your request
+* Adjust code as necessary based on the Comments provided
+* Ask someone on the Core Team to do a final review and merge
+
+Addendum to workflow for committers:
+------------------------------------
+
+The Core Team is aware that this can be a difficult process at times. Sometimes, the team breaks the rules by making direct commits or merging their own PRs. This section is a set of guidelines. If you're changing a comma in a doc, or making a very minor change, you can use your best judgement. This is another trust thing. The process is critical for any major change, but for little things or getting something done quickly, use your best judgement and make sure people on the team are aware of your work.
+
+Roles on Core
+=============
+* Core committers: Fine to do PRs for most things, but we should have a timebox. Hanging PRs may merge on the judgement of these devs.
+* :ref:`Module maintainers <maintainers>`: Module maintainers own specific modules and have indirect commit access through the current module PR mechanisms.
+
+General rules
+=============
+Individuals with direct commit access to ansible/ansible are entrusted with powers that allow them to do a broad variety of things--probably more than we can write down. Rather than rules, treat these as general *guidelines*, individuals with this power are expected to use their best judgement.
+
+* Don't
+
+ - Commit directly.
+ - Merge your own PRs. Someone else should have a chance to review and approve the PR merge. If you are a Core Committer, you have a small amount of leeway here for very minor changes.
+ - Forget about alternate environments. Consider the alternatives--yes, people have bad environments, but they are the ones who need us the most.
+ - Drag your community team members down. Always discuss the technical merits, but you should never address the person's limitations (you can later go for beers and call them idiots, but not in IRC/GitHub/and so on).
+ - Forget about the maintenance burden. Some things are really cool to have, but they might not be worth shoehorning in if the maintenance burden is too great.
+ - Break playbooks. Always keep backwards compatibility in mind.
+ - Forget to keep it simple. Complexity breeds all kinds of problems.
+
+* Do
+
+ - Squash, avoid merges whenever possible, use GitHub's squash commits or cherry pick if needed (bisect thanks you).
+ - Be active. Committers who have no activity on the project (through merges, triage, commits, and so on) will have their permissions suspended.
+ - Consider backwards compatibility (goes back to "don't break existing playbooks").
+ - Write tests. PRs with tests are looked at with more priority than PRs without tests that should have them included. While not all changes require tests, be sure to add them for bug fixes or functionality changes.
+ - Discuss with other committers, specially when you are unsure of something.
+ - Document! If your PR is a new feature or a change to behavior, make sure you've updated all associated documentation or have notified the right people to do so. It also helps to add the version of ``ansible-base`` against which this documentation is compatible (to avoid confusion between stable and devel docs, for backwards compatibility, and so on).
+ - Consider scope, sometimes a fix can be generalized
+ - Keep it simple, then things are maintainable, debuggable and intelligible.
+
+Committers are expected to continue to follow the same community and contribution guidelines followed by the rest of the Ansible community.
+
+
+People
+======
+
+Individuals who've been asked to become a part of this group have generally been contributing in significant ways to the Ansible community for some time. Should they agree, they are requested to add their names and GitHub IDs to this file, in the section below, through a pull request. Doing so indicates that these individuals agree to act in the ways that their fellow committers trust that they will act.
+
++---------------------+----------------------+--------------------+----------------------+
+| Name | GitHub ID | IRC Nick | Other |
++=====================+======================+====================+======================+
+| James Cammarata | jimi-c | jimi | |
++---------------------+----------------------+--------------------+----------------------+
+| Brian Coca | bcoca | bcoca | |
++---------------------+----------------------+--------------------+----------------------+
+| Matt Davis | nitzmahone | nitzmahone | |
++---------------------+----------------------+--------------------+----------------------+
+| Toshio Kuratomi | abadger | abadger1999 | |
++---------------------+----------------------+--------------------+----------------------+
+| Jason McKerr | mckerrj | newtMcKerr | |
++---------------------+----------------------+--------------------+----------------------+
+| Robyn Bergeron | robynbergeron | rbergeron | |
++---------------------+----------------------+--------------------+----------------------+
+| Greg DeKoenigsberg | gregdek | gregdek | |
++---------------------+----------------------+--------------------+----------------------+
+| Monty Taylor | emonty | mordred | |
++---------------------+----------------------+--------------------+----------------------+
+| Matt Martz | sivel | sivel | |
++---------------------+----------------------+--------------------+----------------------+
+| Nate Case | qalthos | Qalthos | |
++---------------------+----------------------+--------------------+----------------------+
+| James Tanner | jctanner | jtanner | |
++---------------------+----------------------+--------------------+----------------------+
+| Peter Sprygada | privateip | privateip | |
++---------------------+----------------------+--------------------+----------------------+
+| Abhijit Menon-Sen | amenonsen | crab | |
++---------------------+----------------------+--------------------+----------------------+
+| Michael Scherer | mscherer | misc | |
++---------------------+----------------------+--------------------+----------------------+
+| René Moser | resmo | resmo | |
++---------------------+----------------------+--------------------+----------------------+
+| David Shrewsbury | Shrews | Shrews | |
++---------------------+----------------------+--------------------+----------------------+
+| Sandra Wills | docschick | docschick | |
++---------------------+----------------------+--------------------+----------------------+
+| Graham Mainwaring | ghjm | | |
++---------------------+----------------------+--------------------+----------------------+
+| Chris Houseknecht | chouseknecht | | |
++---------------------+----------------------+--------------------+----------------------+
+| Trond Hindenes | trondhindenes | | |
++---------------------+----------------------+--------------------+----------------------+
+| Jon Hawkesworth | jhawkesworth | jhawkesworth | |
++---------------------+----------------------+--------------------+----------------------+
+| Will Thames | willthames | willthames | |
++---------------------+----------------------+--------------------+----------------------+
+| Adrian Likins | alikins | alikins | |
++---------------------+----------------------+--------------------+----------------------+
+| Dag Wieers | dagwieers | dagwieers | dag@wieers.com |
++---------------------+----------------------+--------------------+----------------------+
+| Tim Rupp | caphrim007 | caphrim007 | |
++---------------------+----------------------+--------------------+----------------------+
+| Sloane Hertel | s-hertel | shertel | |
++---------------------+----------------------+--------------------+----------------------+
+| Sam Doran | samdoran | samdoran | |
++---------------------+----------------------+--------------------+----------------------+
+| Matt Clay | mattclay | mattclay | |
++---------------------+----------------------+--------------------+----------------------+
+| Martin Krizek | mkrizek | mkrizek | |
++---------------------+----------------------+--------------------+----------------------+
+| Ganesh Nalawade | ganeshrn | ganeshrn | |
++---------------------+----------------------+--------------------+----------------------+
+| Trishna Guha | trishnaguha | trishnag | |
++---------------------+----------------------+--------------------+----------------------+
+| Andrew Gaffney | agaffney | agaffney | |
++---------------------+----------------------+--------------------+----------------------+
+| Jordan Borean | jborean93 | jborean93 | |
++---------------------+----------------------+--------------------+----------------------+
+| Abhijeet Kasurde | Akasurde | akasurde | |
++---------------------+----------------------+--------------------+----------------------+
+| Adam Miller | maxamillion | maxamillion | |
++---------------------+----------------------+--------------------+----------------------+
+| Sviatoslav Sydorenko| webknjaz | webknjaz | |
++---------------------+----------------------+--------------------+----------------------+
+| Alicia Cozine | acozine | acozine | |
++---------------------+----------------------+--------------------+----------------------+
+| Sandra McCann | samccann | samccann | |
++---------------------+----------------------+--------------------+----------------------+
+| Felix Fontein | felixfontein | felixfontein | felix@fontein.de |
++---------------------+----------------------+--------------------+----------------------+
diff --git a/docs/docsite/rst/community/communication.rst b/docs/docsite/rst/community/communication.rst
new file mode 100644
index 00000000..29f8898f
--- /dev/null
+++ b/docs/docsite/rst/community/communication.rst
@@ -0,0 +1,103 @@
+.. _communication:
+
+*************
+Communicating
+*************
+
+.. contents::
+ :local:
+
+Code of Conduct
+===============
+
+Please read and understand the :ref:`code_of_conduct`.
+
+Mailing list information
+========================
+
+Ansible has several mailing lists. Your first post to the mailing list will be moderated (to reduce spam), so please allow up to a day or so for your first post to appear.
+
+* `Ansible Announce list <https://groups.google.com/forum/#!forum/ansible-announce>`_ is a read-only list that shares information about new releases of Ansible, and also rare infrequent event information, such as announcements about an upcoming AnsibleFest, which is our official conference series. Worth subscribing to!
+* `Ansible AWX List <https://groups.google.com/forum/#!forum/awx-project>`_ is for `Ansible AWX <https://github.com/ansible/awx>`_ the upstream version of `Red Hat Ansible Tower <https://www.ansible.com/products/tower>`_
+* `Ansible Container List <https://groups.google.com/forum/#!forum/ansible-container>`_ is for users and developers of the Ansible Container project.
+* `Ansible Development List <https://groups.google.com/forum/#!forum/ansible-devel>`_ is for learning how to develop on Ansible, asking about prospective feature design, or discussions about extending ansible or features in progress.
+* `Ansible Lockdown List <https://groups.google.com/forum/#!forum/ansible-lockdown>`_ is for all things related to Ansible Lockdown projects, including DISA STIG automation and CIS Benchmarks.
+* `Ansible Outreach List <https://groups.google.com/forum/#!forum/ansible-outreach>`_ help with promoting Ansible and `Ansible Meetups <https://ansible.meetup.com/>`_
+* `Ansible Project List <https://groups.google.com/forum/#!forum/ansible-project>`_ is for sharing Ansible tips, answering questions, and general user discussion.
+* `Molecule Discussions <https://github.com/ansible-community/molecule/discussions>`_ is designed to aid with the development and testing of Ansible roles with Molecule.
+
+To subscribe to a group from a non-Google account, you can send an email to the subscription address requesting the subscription. For example: ``ansible-devel+subscribe@googlegroups.com``
+
+.. _communication_irc:
+
+IRC channels
+============
+
+Ansible has several IRC channels on `Freenode <https://freenode.net/>`_.
+
+Our IRC channels may require you to register your nickname. If you receive an error when you connect, see `Freenode's Nickname Registration guide <https://freenode.net/kb/answer/registration>`_ for instructions.
+
+To find all ``ansible`` specific channels on a freenode network, use the following command in your IRC client::
+
+ /msg alis LIST #ansible* -min 5
+
+as described in `freenode docs <https://freenode.net/kb/answer/findingchannels>`_.
+
+General channels
+----------------
+
+- ``#ansible`` - For general use questions and support.
+- ``#ansible-devel`` - For discussions on developer topics and code related to features or bugs.
+- ``#ansible-meeting`` - For public community meetings. We will generally announce these on one or more of the above mailing lists. See the `meeting schedule and agenda page <https://github.com/ansible/community/blob/master/meetings/README.md>`_
+
+.. _working_group_list:
+
+Working groups
+--------------
+
+Many of our community `Working Groups <https://github.com/ansible/community/wiki#working-groups>`_ meet on Freenode IRC channels. If you want to get involved in a working group, join the channel where it meets or comment on the agenda.
+
+- `Amazon (AWS) Working Group <https://github.com/ansible/community/wiki/AWS>`_ - ``#ansible-aws``
+- `Ansible Lockdown Working Group <https://github.com/ansible/community/wiki/Lockdown>`_ | `gh/ansible/ansible-lockdown <https://github.com/ansible/ansible-lockdown>`_ - ``#ansible-lockdown``- Security playbooks/roles
+- `AWX Working Group <https://github.com/ansible/awx>`_ - ``#ansible-awx`` - Upstream for Ansible Tower
+- `Azure Working Group <https://github.com/ansible/community/wiki/Azure>`_ - ``#ansible-azure``
+- `Community Working Group <https://github.com/ansible/community/wiki/Community>`_ - ``#ansible-community`` - Including Meetups
+- `Container Working Group <https://github.com/ansible/community/wiki/Container>`_ - ``#ansible-container``
+- `Contributor Experience Working Group <https://github.com/ansible/community/wiki/Contributor-Experience>`_ - ``#ansible-community``
+- `Docker Working Group <https://github.com/ansible/community/wiki/Docker>`_ - ``#ansible-devel``
+- `Documentation Working Group <https://github.com/ansible/community/wiki/Docs>`_- ``#ansible-docs``
+- `Galaxy Working Group <https://github.com/ansible/community/wiki/Galaxy>`_ - ``#ansible-galaxy``
+- `JBoss Working Group <https://github.com/ansible/community/wiki/JBoss>`_ - ``#ansible-jboss``
+- `Kubernetes Working Group <https://github.com/ansible/community/wiki/Kubernetes>`_ - ``#ansible-kubernetes``
+- `Lightbulb Training <https://github.com/ansible/lightbulb>`_ - ``#ansible-lightbulb`` - Ansible training
+- `Linode Working Group <https://github.com/ansible/community/wiki/Linode>`_ - ``#ansible-linode``
+- `Molecule Working Group <https://github.com/ansible/community/wiki/Molecule>`_ | `molecule.io <https://molecule.readthedocs.io>`_ - ``#ansible-molecule`` - testing platform for Ansible playbooks and roles
+- `Network Working Group <https://github.com/ansible/community/wiki/Network>`_ - ``#ansible-network``
+- `Remote Management Working Group <https://github.com/ansible/community/issues/409>`_ - ``#ansible-devel``
+- `Testing Working Group <https://github.com/ansible/community/wiki/Testing>`_ - ``#ansible-devel``
+- `VMware Working Group <https://github.com/ansible/community/wiki/VMware>`_ - ``#ansible-vmware``
+- `Windows Working Group <https://github.com/ansible/community/wiki/Windows>`_ - ``#ansible-windows``
+
+Want to `form a new Working Group <https://github.com/ansible/community/blob/master/WORKING-GROUPS.md>`_?
+
+Regional and Language-specific channels
+---------------------------------------
+
+- ``#ansible-es`` - Channel for Spanish speaking Ansible community.
+- ``#ansible-eu`` - Channel for the European Ansible Community.
+- ``#ansible-fr`` - Channel for French speaking Ansible community.
+- ``#ansiblezh`` - Channel for Zurich/Swiss Ansible community.
+
+IRC meetings
+------------
+
+The Ansible community holds regular IRC meetings on various topics, and anyone who is interested is invited to
+participate. For more information about Ansible meetings, consult the `meeting schedule and agenda page <https://github.com/ansible/community/blob/master/meetings/README.md>`_.
+
+Ansible Tower support questions
+===============================
+
+Red Hat Ansible `Tower <https://www.ansible.com/products/tower>`_ is a UI, Server, and REST endpoint for Ansible.
+The Red Hat Ansible Automation subscription contains support for Ansible, Ansible Tower, Ansible Automation for Networking, and more.
+
+If you have a question about Ansible Tower, visit `Red Hat support <https://access.redhat.com/products/ansible-tower-red-hat/>`_ rather than using the IRC channel or the general project mailing list.
diff --git a/docs/docsite/rst/community/community.rst b/docs/docsite/rst/community/community.rst
new file mode 100644
index 00000000..5dadb7bc
--- /dev/null
+++ b/docs/docsite/rst/community/community.rst
@@ -0,0 +1,6 @@
+:orphan:
+
+Community Information & Contributing
+````````````````````````````````````
+
+This page is deprecated. Please see the updated :ref:`Ansible Community Guide <ansible_community_guide>`.
diff --git a/docs/docsite/rst/community/contributing_maintained_collections.rst b/docs/docsite/rst/community/contributing_maintained_collections.rst
new file mode 100644
index 00000000..f508d145
--- /dev/null
+++ b/docs/docsite/rst/community/contributing_maintained_collections.rst
@@ -0,0 +1,271 @@
+
+.. _contributing_maintained_collections:
+
+***********************************************
+Contributing to Ansible-maintained Collections
+***********************************************
+
+The Ansible team welcomes community contributions to the collections maintained by Red Hat Ansible Engineering. This section describes how you can open issues and create PRs with the required testing before your PR can be merged.
+
+.. contents::
+ :local:
+
+Ansible-maintained collections
+=================================
+
+The following table shows:
+
+* **Ansible-maintained collection** - Click the link to the collection on Galaxy, then click the ``repo`` button in Galaxy to find the GitHub repository for this collection.
+* **Related community collection** - Collection that holds community-created content (modules, roles, and so on) that may also be of interest to a user of the Ansible-maintained collection. You can, for example, add new modules to the community collection as a technical preview before the content is moved to the Ansible-maintained collection.
+* **Sponsor** - Working group that manages the collections. You can join the meetings to discuss important proposed changes and enhancements to the collections.
+* **Test requirements** - Testing required for any new or changed content for the Ansible-maintained collection.
+* **Developer details** - Describes whether the Ansible-maintained collection accepts direct community issues and PRs for existing collection content, as well as more specific developer guidelines based on the collection type.
+
+
+.. _ansible-collection-table:
+
+.. raw:: html
+
+ <style>
+ /* Style for this single table. Add delimiters between header columns */
+ table#ansible-collection-table th {
+ border-width: 1px;
+ border-color: #dddddd /*rgb(225, 228, 229)*/;
+ border-style: solid;
+ text-align: center;
+ padding: 5px;
+ background-color: #eeeeee;
+ }
+ tr, td {
+ border-width: 1px;
+ border-color: rgb(225, 228, 229);
+ border-style: solid;
+ text-align: center;
+ padding: 5px;
+
+ }
+ </style>
+
+ <table id="ansible-collection-table">
+ <tr>
+ <th colspan="3">Collection details</th>
+ <th colspan="4">Test requirements: Ansible collections</th>
+ <th colspan="2">Developer details</th>
+ </tr>
+ <tr>
+ <th>Ansible collection</th>
+ <th>Related community collection</th>
+ <th>Sponsor</th>
+ <th>Sanity</th>
+ <th>Unit</th>
+ <th>Integration</th>
+ <th>CI Platform</th>
+ <th>Open to PRs*</th>
+ <th>Guidelines</th>
+ </tr>
+ <tr>
+ <td><a href="https://galaxy.ansible.com/amazon/aws">amazon.aws</a></td>
+ <td><a href="https://galaxy.ansible.com/community/aws">community.aws</a></td>
+ <td><a href="https://github.com/ansible/community/tree/master/group-aws">Cloud</a></td>
+ <td>✓**</td>
+ <td>**</td>
+ <td>✓</td>
+ <td>Shippable</td>
+ <td>✓</td>
+ <td><a href="https://docs.ansible.com/ansible/devel/dev_guide/platforms/aws_guidelines.html">AWS guide</a></td>
+ </tr>
+ <tr>
+ <td><a href="https://galaxy.ansible.com/ansible/netcommon">ansible.netcommon***</a></td>
+ <td><a href="https://galaxy.ansible.com/community/network">community.network</a></td>
+ <td><a href="https://github.com/ansible/community/wiki/Network">Network</a></td>
+ <td>✓</td>
+ <td>✓</td>
+ <td>✓</td>
+ <td>Zuul</td>
+ <td>✓</td>
+ <td><a href="https://docs.ansible.com/ansible/devel/network/dev_guide/index.html">Network guide</a></td>
+ </tr>
+ <tr>
+ <td><a href="https://galaxy.ansible.com/ansible/posix">ansible.posix</a></td>
+ <td><a href="https://galaxy.ansible.com/community/general">community.general</a></td>
+ <td>Linux</a></td>
+ <td>✓</td>
+ <td></td>
+ <td></td>
+ <td>Shippable</td>
+ <td>✓</td>
+ <td><a href="https://docs.ansible.com/ansible/latest/dev_guide/index.html">Developer guide</a></td>
+ </tr>
+ <tr>
+ <td><a href="https://galaxy.ansible.com/ansible/windows">ansible.windows</a></td>
+ <td><a href="https://galaxy.ansible.com/community/windows">community.windows</a></td>
+ <td><a href="https://github.com/ansible/community/wiki/Windows">Windows</a></td>
+ <td>✓</td>
+ <td>✓****</td>
+ <td>✓</td>
+ <td>Shippable</td>
+ <td>✓</td>
+ <td><a href="https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_general_windows.html#developing-modules-general-windows">Windows guide</a></td>
+ </tr>
+ <tr>
+ <td><a href="https://galaxy.ansible.com/arista/eos">arista.eos</a></td>
+ <td><a href="https://galaxy.ansible.com/community/network">community.network</a></td>
+ <td><a href="https://github.com/ansible/community/wiki/Network">Network</a></td>
+ <td>✓</td>
+ <td>✓</td>
+ <td>✓</td>
+ <td>Zuul</td>
+ <td>✓</td>
+ <td><a href="https://docs.ansible.com/ansible/devel/network/dev_guide/index.html">Network guide</a></td>
+ </tr>
+ <tr>
+ <td><a href="https://galaxy.ansible.com/cisco/asa">cisco.asa</a></td>
+ <td><a href="https://github.com/ansible-collections/community.asa">community.asa</a></td>
+ <td><a href="https://github.com/ansible/community/wiki/Security-Automation">Security</a></td>
+ <td>✓</td>
+ <td>✓</td>
+ <td>✓</td>
+ <td>Zuul</td>
+ <td>✓</td>
+ <td><a href="https://docs.ansible.com/ansible/latest/dev_guide/index.html">Developer guide</a></td>
+ </tr>
+ <tr>
+ <td><a href="https://galaxy.ansible.com/cisco/ios">cisco.ios</a></td>
+ <td><a href="https://galaxy.ansible.com/community/network">community.network</a></td>
+ <td><a href="https://github.com/ansible/community/wiki/Network">Network</a></td>
+ <td>✓</td>
+ <td>✓</td>
+ <td>✓</td>
+ <td>Zuul</td>
+ <td>✓</td>
+ <td><a href="https://docs.ansible.com/ansible/devel/network/dev_guide/index.html">Network guide</a></td>
+ </tr>
+ <tr>
+ <td><a href="https://galaxy.ansible.com/cisco/iosxr">cisco.iosxr</a></td>
+ <td><a href="https://galaxy.ansible.com/community/network">community.network</a></td>
+ <td><a href="https://github.com/ansible/community/wiki/Network">Network</a></td>
+ <td>✓</td>
+ <td>✓</td>
+ <td>✓</td>
+ <td>Zuul</td>
+ <td>✓</td>
+ <td><a href="https://docs.ansible.com/ansible/devel/network/dev_guide/index.html">Network guide</a></td>
+ </tr>
+ <tr>
+ <td><a href="https://galaxy.ansible.com/cisco/nxos">cisco.nxos</a></td>
+ <td><a href="https://galaxy.ansible.com/community/network">community.network</a></td>
+ <td><a href="https://github.com/ansible/community/wiki/Network">Network</a></td>
+ <td>✓</td>
+ <td>✓</td>
+ <td>✓</td>
+ <td>Zuul</td>
+ <td>✓</td>
+ <td><a href="https://docs.ansible.com/ansible/devel/network/dev_guide/index.html">Network guide</a></td>
+ </tr>
+ <tr>
+ <td><a href="https://galaxy.ansible.com/ibm/qradar">ibm.qradar</a></td>
+ <td><a href="https://github.com/ansible-collections/community.qradar">community.qradar</a></td>
+ <td><a href="https://github.com/ansible/community/wiki/Security-Automation">Security</a></td>
+ <td>✓</td>
+ <td></td>
+ <td>✓</td>
+ <td>Zuul</td>
+ <td>✓</td>
+ <td><a href="https://docs.ansible.com/ansible/latest/dev_guide/index.html">Developer guide</a></td>
+ </tr>
+ <tr>
+ <td><a href="https://galaxy.ansible.com/junipernetworks/junos">junipernetworks.junos</a></td>
+ <td><a href="https://galaxy.ansible.com/community/network">community.network</a></td>
+ <td><a href="https://github.com/ansible/community/wiki/Network">Network</a></td>
+ <td>✓</td>
+ <td>✓</td>
+ <td>✓</td>
+ <td>Zuul</td>
+ <td>✓</td>
+ <td><a href="https://docs.ansible.com/ansible/devel/network/dev_guide/index.html">Network guide</a></td>
+ </tr>
+ <tr>
+ <td><a href="https://galaxy.ansible.com/openvswitch/openvswitch">openvswitch.openvswitch</a></td>
+ <td><a href="https://galaxy.ansible.com/community/network">community.network</a></td>
+ <td><a href="https://github.com/ansible/community/wiki/Network">Network</a></td>
+ <td>✓</td>
+ <td>✓</td>
+ <td>✓</td>
+ <td>Zuul</td>
+ <td>✓</td>
+ <td><a href="https://docs.ansible.com/ansible/devel/network/dev_guide/index.html">Network guide</a></td>
+ </tr>
+ <tr>
+ <td><a href="https://github.com/ansible-collections/splunk.es">splunk.es</a></td>
+ <td><a href="https://github.com/ansible-collections/community.es">community.es</a></td>
+ <td><a href="https://github.com/ansible/community/wiki/Security-Automation">Security</a></td>
+ <td>✓</td>
+ <td></td>
+ <td>✓</td>
+ <td>Zuul</td>
+ <td>✓</td>
+ <td><a href="https://docs.ansible.com/ansible/latest/dev_guide/index.html">Developer guide</a></td>
+ </tr>
+ <tr>
+ <td><a href="https://galaxy.ansible.com/vyos/vyos">vyos.vyos</a></td>
+ <td><a href="https://galaxy.ansible.com/community/network">community.network</a></td>
+ <td><a href="https://github.com/ansible/community/wiki/Network">Network</a></td>
+ <td>✓</td>
+ <td>✓</td>
+ <td>✓</td>
+ <td>Zuul</td>
+ <td>✓</td>
+ <td><a href="https://docs.ansible.com/ansible/devel/network/dev_guide/index.html">Network guide</a></td>
+ </tr>
+ </table>
+
+
+.. note::
+
+ \* A ✓ under **Open to PRs** means the collection welcomes GitHub issues and PRs for any changes to existing collection content (plugins, roles, and so on).
+
+ \*\* Integration tests are required and unit tests are welcomed but not required for the AWS collections. An exception to this is made in cases where integration tests are logistically not feasible due to external requirements. An example of this is AWS Direct Connect, as this service can not be functionally tested without the establishment of network peering connections. Unit tests are therefore required for modules that interact with AWS Direct Connect. Exceptions to ``amazon.aws`` must be approved by Red Hat, and exceptions to ``community.aws`` must be approved by the AWS community.
+
+ \*\*\* ``ansible.netcommon`` contains all foundational components for enabling many network and security :ref:`platform <platform_options>` collections. It contains all connection and filter plugins required, and installs as a dependency when you install the the platform collection.
+
+ \*\*\*\* Unit tests for Windows PowerShell modules are an exception to testing, but unit tests are valid and required for the remainder of the collection, including Ansible-side plugins.
+
+
+.. _which_collection:
+
+Deciding where your contribution belongs
+=========================================
+
+We welcome contributions to Ansible-maintained collections. Because these collections are part of a downstream supported Red Hat product, the criteria for contribution, testing, and release may be higher than other community collections. The related community collections (such as ``community.general`` and ``community.network``) have less-stringent requirements and are a great place for new functionality that may become part of the Ansible-maintained collection in a future release.
+
+The following scenarios use the ``arista.eos`` to help explain when to contribute to the Ansible-maintained collection, and when to propose your change or idea to the related community collection:
+
+
+1. You want to fix a problem in the ``arista.eos`` Ansible-maintained collection. Create the PR directly in the `arista.eos collection GitHub repository <https://github.com/ansible-collections/arista.eos>`_. Apply all the :ref:`merge requirements <ansible_collection_merge_requirements>`.
+
+2. You want to add a new Ansible module for Arista. Your options are one of the following:
+
+ * Propose a new module in the ``arista.eos`` collection (requires approval from Arista and Red Hat).
+ * Propose a new collection in the ``arista`` namespace (requires approval from Arista and Red Hat).
+ * Propose a new module in the ``community.network`` collection (requires network community approval).
+ * Place your new module in a collection in your own namespace (no approvals required).
+
+
+Most new content should go into either a related community collection or your own collection first so that is well established in the community before you can propose adding it to the ``arista`` namespace, where inclusion and maintenance criteria are much higher.
+
+
+.. _ansible_collection_merge_requirements:
+
+Requirements to merge your PR
+==============================
+
+Your PR must meet the following requirements before it can merge into an Ansible-maintained collection:
+
+
+#. The PR is in the intended scope of the collection. Communicate with the appropriate Ansible sponsor listed in the :ref:`Ansible-maintained collection table <ansible-collection-table>` for help.
+#. For network and security domains, the PR follows the :ref:`resource module development principles <developing_resource_modules>`.
+#. Passes :ref:`sanity tests and tox <tox_resource_modules>`.
+#. Passes unit, and integration tests, as listed in the :ref:`Ansible-maintained collection table <ansible-collection-table>` and described in :ref:`testing_resource_modules`.
+#. Follows Ansible guidelines. See :ref:`developing_modules` and :ref:`developing_collections`.
+#. Addresses all review comments.
+#. Includes an appropriate :ref:`changelog <community_changelogs>`.
diff --git a/docs/docsite/rst/community/contributor_license_agreement.rst b/docs/docsite/rst/community/contributor_license_agreement.rst
new file mode 100644
index 00000000..b0a0f117
--- /dev/null
+++ b/docs/docsite/rst/community/contributor_license_agreement.rst
@@ -0,0 +1,7 @@
+.. _contributor_license_agreement:
+
+******************************
+Contributors License Agreement
+******************************
+
+By contributing you agree that these contributions are your own (or approved by your employer) and you grant a full, complete, irrevocable copyright license to all users and developers of the project, present and future, pursuant to the license of the project.
diff --git a/docs/docsite/rst/community/development_process.rst b/docs/docsite/rst/community/development_process.rst
new file mode 100644
index 00000000..fc3c987a
--- /dev/null
+++ b/docs/docsite/rst/community/development_process.rst
@@ -0,0 +1,277 @@
+.. _community_development_process:
+
+*****************************
+The Ansible Development Cycle
+*****************************
+
+Ansible developers (including community contributors) add new features, fix bugs, and update code in many different repositories. The `ansible/ansible repository <https://github.com/ansible/ansible>`_ contains the code for basic features and functions, such as copying module code to managed nodes. This code is also known as ``ansible-base``. Other repositories contain plugins and modules that enable Ansible to execute specific tasks, like adding a user to a particular database or configuring a particular network device. These repositories contain the source code for collections.
+
+Development on ``ansible-base`` occurs on two levels. At the macro level, the ``ansible-base`` developers and maintainers plan releases and track progress with roadmaps and projects. At the micro level, each PR has its own lifecycle.
+
+Development on collections also occurs at the macro and micro levels. Each collection has its own macro development cycle. For more information on the collections development cycle, see :ref:`contributing_maintained_collections`. The micro-level lifecycle of a PR is similar in collections and in ``ansible-base``.
+
+.. contents::
+ :local:
+
+Macro development: ``ansible-base`` roadmaps, releases, and projects
+=====================================================================
+
+If you want to follow the conversation about what features will be added to ``ansible-base`` for upcoming releases and what bugs are being fixed, you can watch these resources:
+
+* the :ref:`roadmaps`
+* the :ref:`Ansible Release Schedule <release_and_maintenance>`
+* various GitHub `projects <https://github.com/ansible/ansible/projects>`_ - for example:
+
+ * the `2.10 release project <https://github.com/ansible/ansible/projects/39>`_
+ * the `network bugs project <https://github.com/ansible/ansible/projects/20>`_
+ * the `core documentation project <https://github.com/ansible/ansible/projects/27>`_
+
+.. _community_pull_requests:
+
+Micro development: the lifecycle of a PR
+========================================
+
+If you want to contribute a feature or fix a bug in ``ansible-base`` or in a collection, you must open a **pull request** ("PR" for short). GitHub provides a great overview of `how the pull request process works <https://help.github.com/articles/about-pull-requests/>`_ in general. The ultimate goal of any pull request is to get merged and become part of a collection or ``ansible-base``.
+Here's an overview of the PR lifecycle:
+
+* Contributor opens a PR
+* Ansibot reviews the PR
+* Ansibot assigns labels
+* Ansibot pings maintainers
+* Shippable runs the test suite
+* Developers, maintainers, community review the PR
+* Contributor addresses any feedback from reviewers
+* Developers, maintainers, community re-review
+* PR merged or closed
+
+Automated PR review: ansibullbot
+--------------------------------
+
+Because Ansible receives many pull requests, and because we love automating things, we have automated several steps of the process of reviewing and merging pull requests with a tool called Ansibullbot, or Ansibot for short.
+
+`Ansibullbot <https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md>`_ serves many functions:
+
+- Responds quickly to PR submitters to thank them for submitting their PR
+- Identifies the community maintainer responsible for reviewing PRs for any files affected
+- Tracks the current status of PRs
+- Pings responsible parties to remind them of any PR actions for which they may be responsible
+- Provides maintainers with the ability to move PRs through the workflow
+- Identifies PRs abandoned by their submitters so that we can close them
+- Identifies modules abandoned by their maintainers so that we can find new maintainers
+
+Ansibot workflow
+^^^^^^^^^^^^^^^^
+
+Ansibullbot runs continuously. You can generally expect to see changes to your issue or pull request within thirty minutes. Ansibullbot examines every open pull request in the repositories, and enforces state roughly according to the following workflow:
+
+- If a pull request has no workflow labels, it's considered **new**. Files in the pull request are identified, and the maintainers of those files are pinged by the bot, along with instructions on how to review the pull request. (Note: sometimes we strip labels from a pull request to "reboot" this process.)
+- If the module maintainer is not ``$team_ansible``, the pull request then goes into the **community_review** state.
+- If the module maintainer is ``$team_ansible``, the pull request then goes into the **core_review** state (and probably sits for a while).
+- If the pull request is in **community_review** and has received comments from the maintainer:
+
+ - If the maintainer says ``shipit``, the pull request is labeled **shipit**, whereupon the Core team assesses it for final merge.
+ - If the maintainer says ``needs_info``, the pull request is labeled **needs_info** and the submitter is asked for more info.
+ - If the maintainer says **needs_revision**, the pull request is labeled **needs_revision** and the submitter is asked to fix some things.
+
+- If the submitter says ``ready_for_review``, the pull request is put back into **community_review** or **core_review** and the maintainer is notified that the pull request is ready to be reviewed again.
+- If the pull request is labeled **needs_revision** or **needs_info** and the submitter has not responded lately:
+
+ - The submitter is first politely pinged after two weeks, pinged again after two more weeks and labeled **pending action**, and the issue or pull request will be closed two weeks after that.
+ - If the submitter responds at all, the clock is reset.
+- If the pull request is labeled **community_review** and the reviewer has not responded lately:
+
+ - The reviewer is first politely pinged after two weeks, pinged again after two more weeks and labeled **pending_action**, and then may be reassigned to ``$team_ansible`` or labeled **core_review**, or often the submitter of the pull request is asked to step up as a maintainer.
+- If Shippable tests fail, or if the code is not able to be merged, the pull request is automatically put into **needs_revision** along with a message to the submitter explaining why.
+
+There are corner cases and frequent refinements, but this is the workflow in general.
+
+PR labels
+^^^^^^^^^
+
+There are two types of PR Labels generally: **workflow** labels and **information** labels.
+
+Workflow labels
+"""""""""""""""
+
+- **community_review**: Pull requests for modules that are currently awaiting review by their maintainers in the Ansible community.
+- **core_review**: Pull requests for modules that are currently awaiting review by their maintainers on the Ansible Core team.
+- **needs_info**: Waiting on info from the submitter.
+- **needs_rebase**: Waiting on the submitter to rebase.
+- **needs_revision**: Waiting on the submitter to make changes.
+- **shipit**: Waiting for final review by the core team for potential merge.
+
+Information labels
+""""""""""""""""""
+
+- **backport**: this is applied automatically if the PR is requested against any branch that is not devel. The bot immediately assigns the labels backport and ``core_review``.
+- **bugfix_pull_request**: applied by the bot based on the templatized description of the PR.
+- **cloud**: applied by the bot based on the paths of the modified files.
+- **docs_pull_request**: applied by the bot based on the templatized description of the PR.
+- **easyfix**: applied manually, inconsistently used but sometimes useful.
+- **feature_pull_request**: applied by the bot based on the templatized description of the PR.
+- **networking**: applied by the bot based on the paths of the modified files.
+- **owner_pr**: largely deprecated. Formerly workflow, now informational. Originally, PRs submitted by the maintainer would automatically go to **shipit** based on this label. If the submitter is also a maintainer, we notify the other maintainers and still require one of the maintainers (including the submitter) to give a **shipit**.
+- **pending_action**: applied by the bot to PRs that are not moving. Reviewed every couple of weeks by the community team, who tries to figure out the appropriate action (closure, asking for new maintainers, and so on).
+
+
+Special Labels
+""""""""""""""
+
+- **new_plugin**: this is for new modules or plugins that are not yet in Ansible.
+
+**Note:** `new_plugin` kicks off a completely separate process, and frankly it doesn't work very well at present. We're working our best to improve this process.
+
+Human PR review
+---------------
+
+After Ansibot reviews the PR and applies labels, the PR is ready for human review. The most likely reviewers for any PR are the maintainers for the module that PR modifies.
+
+Each module has at least one assigned :ref:`maintainer <maintainers>`, listed in the `BOTMETA.yml <https://github.com/ansible/ansible/blob/devel/.github/BOTMETA.yml>`_ file.
+
+The maintainer's job is to review PRs that affect that module and decide whether they should be merged (``shipit``) or revised (``needs_revision``). We'd like to have at least one community maintainer for every module. If a module has no community maintainers assigned, the maintainer is listed as ``$team_ansible``.
+
+Once a human applies the ``shipit`` label, the :ref:`committers <community_committer_guidelines>` decide whether the PR is ready to be merged. Not every PR that gets the ``shipit`` label is actually ready to be merged, but the better our reviewers are, and the better our guidelines are, the more likely it will be that a PR that reaches **shipit** will be mergeable.
+
+
+Making your PR merge-worthy
+===========================
+
+We do not merge every PR. Here are some tips for making your PR useful, attractive, and merge-worthy.
+
+.. _community_changelogs:
+
+Changelogs
+----------
+
+Changelogs help users and developers keep up with changes to Ansible. Ansible builds a changelog for each release from fragments. You **must** add a changelog fragment to any PR that changes functionality or fixes a bug in ansible-base. You do not have to add a changelog fragment for PRs that add new modules and plugins, because our tooling does that for you automatically.
+
+We build short summary changelogs for minor releases as well as for major releases. If you backport a bugfix, include a changelog fragment with the backport PR.
+
+.. _changelogs_how_to:
+
+Creating a changelog fragment
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+A basic changelog fragment is a ``.yaml`` file placed in the ``changelogs/fragments/`` directory. Each file contains a yaml dict with keys like ``bugfixes`` or ``major_changes`` followed by a list of changelog entries of bugfixes or features. Each changelog entry is rst embedded inside of the yaml file which means that certain constructs would need to be escaped so they can be interpreted by rst and not by yaml (or escaped for both yaml and rst if you prefer). Each PR **must** use a new fragment file rather than adding to an existing one, so we can trace the change back to the PR that introduced it.
+
+To create a changelog entry, create a new file with a unique name in the ``changelogs/fragments/`` directory of the corresponding repository. The file name should include the PR number and a description of the change. It must end with the file extension ``.yaml``. For example: ``40696-user-backup-shadow-file.yaml``
+
+A single changelog fragment may contain multiple sections but most will only contain one section. The toplevel keys (bugfixes, major_changes, and so on) are defined in the `config file <https://github.com/ansible/ansible/blob/devel/changelogs/config.yaml>`_ for our `release note tool <https://github.com/ansible-community/antsibull-changelog/blob/main/docs/changelogs.rst>`_. Here are the valid sections and a description of each:
+
+**breaking_changes**
+ Changes that break existing playbooks or roles. This includes any change to existing behavior that forces users to update tasks. Displayed in both the changelogs and the :ref:`Porting Guides <porting_guides>`.
+
+**major_changes**
+ Major changes to Ansible itself. Generally does not include module or plugin changes. Displayed in both the changelogs and the :ref:`Porting Guides <porting_guides>`.
+
+**minor_changes**
+ Minor changes to Ansible, modules, or plugins. This includes new features, new parameters added to modules, or behavior changes to existing parameters.
+
+**deprecated_features**
+ Features that have been deprecated and are scheduled for removal in a future release. Displayed in both the changelogs and the :ref:`Porting Guides <porting_guides>`.
+
+**removed_features**
+ Features that were previously deprecated and are now removed. Displayed in both the changelogs and the :ref:`Porting Guides <porting_guides>`.
+
+**security_fixes**
+ Fixes that address CVEs or resolve security concerns. Include links to CVE information.
+
+**bugfixes**
+ Fixes that resolve issues.
+
+**known_issues**
+ Known issues that are currently not fixed or will not be fixed.
+
+Each changelog entry must contain a link to its issue between parentheses at the end. If there is no corresponding issue, the entry must contain a link to the PR itself.
+
+Most changelog entries will be ``bugfixes`` or ``minor_changes``. When writing a changelog entry that pertains to a particular module, start the entry with ``- [module name] -`` and the following sentence with a lowercase letter.
+
+Here are some examples:
+
+.. code-block:: yaml
+
+ bugfixes:
+ - apt_repository - fix crash caused by ``cache.update()`` raising an ``IOError``
+ due to a timeout in ``apt update`` (https://github.com/ansible/ansible/issues/51995).
+
+.. code-block:: yaml
+
+ minor_changes:
+ - lineinfile - add warning when using an empty regexp (https://github.com/ansible/ansible/issues/29443).
+
+.. code-block:: yaml
+
+ bugfixes:
+ - copy - the module was attempting to change the mode of files for
+ remote_src=True even if mode was not set as a parameter. This failed on
+ filesystems which do not have permission bits (https://github.com/ansible/ansible/issues/29444).
+
+You can find more example changelog fragments in the `changelog directory <https://github.com/ansible/ansible/tree/stable-2.10/changelogs/fragments>`_ for the 2.10 release.
+
+After you have written the changelog fragment for your PR, commit the file and include it with the pull request.
+
+.. _backport_process:
+
+Backporting merged PRs in ``ansible-base``
+===========================================
+
+All ``ansible-base`` PRs must be merged to the ``devel`` branch first. After a pull request has been accepted and merged to the ``devel`` branch, the following instructions will help you create a pull request to backport the change to a previous stable branch.
+
+We do **not** backport features.
+
+.. note::
+
+ These instructions assume that:
+
+ * ``stable-2.10`` is the targeted release branch for the backport
+ * ``https://github.com/ansible/ansible.git`` is configured as a
+ ``git remote`` named ``upstream``. If you do not use
+ a ``git remote`` named ``upstream``, adjust the instructions accordingly.
+ * ``https://github.com/<yourgithubaccount>/ansible.git``
+ is configured as a ``git remote`` named ``origin``. If you do not use
+ a ``git remote`` named ``origin``, adjust the instructions accordingly.
+
+#. Prepare your devel, stable, and feature branches:
+
+ ::
+
+ git fetch upstream
+ git checkout -b backport/2.10/[PR_NUMBER_FROM_DEVEL] upstream/stable-2.10
+
+#. Cherry pick the relevant commit SHA from the devel branch into your feature
+ branch, handling merge conflicts as necessary:
+
+ ::
+
+ git cherry-pick -x [SHA_FROM_DEVEL]
+
+#. Add a :ref:`changelog fragment <changelogs_how_to>` for the change, and commit it.
+
+#. Push your feature branch to your fork on GitHub:
+
+ ::
+
+ git push origin backport/2.10/[PR_NUMBER_FROM_DEVEL]
+
+#. Submit the pull request for ``backport/2.10/[PR_NUMBER_FROM_DEVEL]``
+ against the ``stable-2.10`` branch
+
+#. The Release Manager will decide whether to merge the backport PR before
+ the next minor release. There isn't any need to follow up. Just ensure that the automated
+ tests (CI) are green.
+
+.. note::
+
+ The choice to use ``backport/2.10/[PR_NUMBER_FROM_DEVEL]`` as the
+ name for the feature branch is somewhat arbitrary, but conveys meaning
+ about the purpose of that branch. It is not required to use this format,
+ but it can be helpful, especially when making multiple backport PRs for
+ multiple stable branches.
+
+.. note::
+
+ If you prefer, you can use CPython's cherry-picker tool
+ (``pip install --user 'cherry-picker >= 1.3.2'``) to backport commits
+ from devel to stable branches in Ansible. Take a look at the `cherry-picker
+ documentation <https://pypi.org/p/cherry-picker#cherry-picking>`_ for
+ details on installing, configuring, and using it.
diff --git a/docs/docsite/rst/community/documentation_contributions.rst b/docs/docsite/rst/community/documentation_contributions.rst
new file mode 100644
index 00000000..7b135580
--- /dev/null
+++ b/docs/docsite/rst/community/documentation_contributions.rst
@@ -0,0 +1,214 @@
+.. _community_documentation_contributions:
+
+*****************************************
+Contributing to the Ansible Documentation
+*****************************************
+
+Ansible has a lot of documentation and a small team of writers. Community support helps us keep up with new features, fixes, and changes.
+
+Improving the documentation is an easy way to make your first contribution to the Ansible project. You do not have to be a programmer, since most of our documentation is written in YAML (module documentation) or `reStructuredText <http://docutils.sourceforge.net/rst.html>`_ (rST). Some collection-level documentation is written in a subset of `Markdown <https://github.com/ansible/ansible/issues/68119#issuecomment-596723053>`_. If you are using Ansible, you already use YAML in your playbooks. rST and Markdown are mostly just text. You do not even need git experience, if you use the ``Edit on GitHub`` option.
+
+If you find a typo, a broken example, a missing topic, or any other error or omission on this documentation website, let us know. Here are some ways to support Ansible documentation:
+
+.. contents::
+ :local:
+
+Editing docs directly on GitHub
+===============================
+
+For typos and other quick fixes, you can edit most of the documentation right from the site. Look at the top right corner of this page. That ``Edit on GitHub`` link is available on all the guide pages in the documentation. If you have a GitHub account, you can submit a quick and easy pull request this way.
+
+.. note::
+
+ The source files for individual collection plugins exist in their respective repositories. Follow the link to the collection on Galaxy to find where the repository is located and any guidelines on how to contribute to that collection.
+
+To submit a documentation PR from docs.ansible.com with ``Edit on GitHub``:
+
+#. Click on ``Edit on GitHub``.
+#. If you don't already have a fork of the ansible repo on your GitHub account, you'll be prompted to create one.
+#. Fix the typo, update the example, or make whatever other change you have in mind.
+#. Enter a commit message in the first rectangle under the heading ``Propose file change`` at the bottom of the GitHub page. The more specific, the better. For example, "fixes typo in my_module description". You can put more detail in the second rectangle if you like. Leave the ``+label: docsite_pr`` there.
+#. Submit the suggested change by clicking on the green "Propose file change" button. GitHub will handle branching and committing for you, and open a page with the heading "Comparing Changes".
+#. Click on ``Create pull request`` to open the PR template.
+#. Fill out the PR template, including as much detail as appropriate for your change. You can change the title of your PR if you like (by default it's the same as your commit message). In the ``Issue Type`` section, delete all lines except the ``Docs Pull Request`` line.
+#. Submit your change by clicking on ``Create pull request`` button.
+#. Be patient while Ansibot, our automated script, adds labels, pings the docs maintainers, and kicks off a CI testing run.
+#. Keep an eye on your PR - the docs team may ask you for changes.
+
+Reviewing open PRs and issues
+=============================
+
+You can also contribute by reviewing open documentation `issues <https://github.com/ansible/ansible/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+label%3Adocs>`_ and `PRs <https://github.com/ansible/ansible/pulls?utf8=%E2%9C%93&q=is%3Apr+is%3Aopen+label%3Adocs>`_. To add a helpful review, please:
+
+- Include a comment - "looks good to me" only helps if we know why.
+- For issues, reproduce the problem.
+- For PRs, test the change.
+
+Opening a new issue and/or PR
+=============================
+
+If the problem you have noticed is too complex to fix with the ``Edit on GitHub`` option, and no open issue or PR already documents the problem, please open an issue and/or a PR on the correct underlying repo - ``ansible/ansible`` for most pages that are not plugin or module documentation. If the documentation page has no ``Edit on GitHub`` option, check if the page is for a module within a collection. If so, follow the link to the collection on Galaxy and select the ``repo`` button in the upper right corner to find the source repository for that collection and module. The Collection README file should contain information on how to contribute to that collection, or report issues.
+
+A great documentation GitHub issue or PR includes:
+
+- a specific title
+- a detailed description of the problem (even for a PR - it's hard to evaluate a suggested change unless we know what problem it's meant to solve)
+- links to other information (related issues/PRs, external documentation, pages on docs.ansible.com, and so on)
+
+
+Verifying your documentation PR
+================================
+
+If you make multiple changes to the documentation on ``ansible/ansible``, or add more than a line to it, before you open a pull request, please:
+
+#. Check that your text follows our :ref:`style_guide`.
+#. Test your changes for rST errors.
+#. Build the page, and preferably the entire documentation site, locally.
+
+.. note::
+
+ The following sections apply to documentation sourced from the ``ansible/ansible`` repo and does not apply to documentation from an individual collection. See the collection README file for details on how to contribute to that collection.
+
+Setting up your environment to build documentation locally
+----------------------------------------------------------
+
+To build documentation locally, ensure you have a working :ref:`development environment <environment_setup>`.
+
+To work with documentation on your local machine, you need to have python-3.5 or greater and the
+following packages installed:
+
+- gcc
+- jinja2
+- libyaml
+- Pygments >= 2.4.0
+- pyparsing
+- PyYAML
+- rstcheck
+- six
+- sphinx
+- sphinx-notfound-page
+- straight.plugin
+
+These required packages are listed in two :file:`requirements.txt` files to make installation easier:
+
+.. code-block:: bash
+
+ pip install --user -r requirements.txt
+ pip install --user -r docs/docsite/requirements.txt
+
+You can drop ``--user`` if you have set up a virtual environment (venv/virtenv).
+
+.. note::
+
+ On macOS with Xcode, you may need to install ``six`` and ``pyparsing`` with ``--ignore-installed`` to get versions that work with ``sphinx``.
+
+.. note::
+
+ After checking out ``ansible/ansible``, make sure the ``docs/docsite/rst`` directory has strict enough permissions. It should only be writable by the owner's account. If your default ``umask`` is not 022, you can use ``chmod go-w docs/docsite/rst`` to set the permissions correctly in your new branch. Optionally, you can set your ``umask`` to 022 to make all newly created files on your system (including those created by ``git clone``) have the correct permissions.
+
+.. _testing_documentation_locally:
+
+Testing the documentation locally
+---------------------------------
+
+To test an individual file for rST errors:
+
+.. code-block:: bash
+
+ rstcheck changed_file.rst
+
+Building the documentation locally
+----------------------------------
+
+Building the documentation is the best way to check for errors and review your changes. Once `rstcheck` runs with no errors, navigate to ``ansible/docs/docsite`` and then build the page(s) you want to review.
+
+Building a single rST page
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+To build a single rST file with the make utility:
+
+.. code-block:: bash
+
+ make htmlsingle rst=path/to/your_file.rst
+
+For example:
+
+.. code-block:: bash
+
+ make htmlsingle rst=community/documentation_contributions.rst
+
+This process compiles all the links but provides minimal log output. If you're writing a new page or want more detailed log output, refer to the instructions on :ref:`build_with_sphinx-build`
+
+.. note::
+
+ ``make htmlsingle`` adds ``rst/`` to the beginning of the path you provide in ``rst=``, so you can't type the filename with autocomplete. Here are the error messages you will see if you get this wrong:
+
+ - If you run ``make htmlsingle`` from the ``docs/docsite/rst/`` directory: ``make: *** No rule to make target `htmlsingle'. Stop.``
+ - If you run ``make htmlsingle`` from the ``docs/docsite/`` directory with the full path to your rST document: ``sphinx-build: error: cannot find files ['rst/rst/community/documentation_contributions.rst']``.
+
+
+Building all the rST pages
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+To build all the rST files without any module documentation:
+
+.. code-block:: bash
+
+ MODULES=none make webdocs
+
+Building module docs and rST pages
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+To build documentation for a few modules included in ``ansible/ansible`` plus all the rST files, use a comma-separated list:
+
+.. code-block:: bash
+
+ MODULES=one_module,another_module make webdocs
+
+To build all the module documentation plus all the rST files:
+
+.. code-block:: bash
+
+ make webdocs
+
+.. _build_with_sphinx-build:
+
+Building rST files with ``sphinx-build``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Advanced users can build one or more rST files with the sphinx utility directly. ``sphinx-build`` returns misleading ``undefined label`` warnings if you only build a single page, because it does not create internal links. However, ``sphinx-build`` returns more extensive syntax feedback, including warnings about indentation errors and ``x-string without end-string`` warnings. This can be useful, especially if you're creating a new page from scratch. To build a page or pages with ``sphinx-build``:
+
+.. code-block:: bash
+
+ sphinx-build [options] sourcedir outdir [filenames...]
+
+You can specify filenames, or ``–a`` for all files, or omit both to compile only new/changed files.
+
+For example:
+
+.. code-block:: bash
+
+ sphinx-build -b html -c rst/ rst/dev_guide/ _build/html/dev_guide/ rst/dev_guide/developing_modules_documenting.rst
+
+Running the final tests
+^^^^^^^^^^^^^^^^^^^^^^^
+
+When you submit a documentation pull request, automated tests are run. Those same tests can be run locally. To do so, navigate to the repository's top directory and run:
+
+.. code-block:: bash
+
+ make clean &&
+ bin/ansible-test sanity --test docs-build &&
+ bin/ansible-test sanity --test rstcheck
+
+Unfortunately, leftover rST-files from previous document-generating can occasionally confuse these tests. It is therefore safest to run them on a clean copy of the repository, which is the purpose of ``make clean``. If you type these three lines one at a time and manually check the success of each, you do not need the ``&&``.
+
+Joining the documentation working group
+=======================================
+
+The Documentation Working Group (DaWGs) meets weekly on Tuesdays on the #ansible-docs channel on freenode IRC. For more information, including links to our agenda and a calendar invite, please visit the `working group page in the community repo <https://github.com/ansible/community/wiki/Docs>`_.
+
+.. seealso::
+ :ref:`More about testing module documentation <testing_module_documentation>`
+
+ :ref:`More about documenting modules <module_documenting>`
diff --git a/docs/docsite/rst/community/github_admins.rst b/docs/docsite/rst/community/github_admins.rst
new file mode 100644
index 00000000..802b180d
--- /dev/null
+++ b/docs/docsite/rst/community/github_admins.rst
@@ -0,0 +1,32 @@
+.. _github_admins:
+
+*************
+GitHub Admins
+*************
+
+.. contents:: Topics
+
+GitHub Admins have more permissions on GitHub than normal contributors or even committers. There are
+a few responsibilities that come with that increased power.
+
+
+Adding and removing committers
+==============================
+
+The Ansible Team will periodically review who is actively contributing to Ansible to grant or revoke
+contributors' ability to commit on their own. GitHub Admins are the people who have the power to
+actually manage the GitHub permissions.
+
+
+Changing branch permissions for releases
+========================================
+
+When we make releases we make people go through a :ref:`release_managers` to push commits to that
+branch. The GitHub admins are responsible for setting the branch so only the Release Manager can
+commit to the branch when the release process reaches that stage and later opening the branch once
+the release has been made. The Release manager will let the GitHub Admin know when this needs to be
+done.
+
+.. seealso:: The `GitHub Admin Process Docs
+ <https://docs.google.com/document/d/1gWPtxNX4J39uIzwqQWLIsTZ1dY_AwEZzAd9bJ4XtZso/edit#heading=h.2wezayw9xsqz>`_ for instructions
+ on how to change branch permissions.
diff --git a/docs/docsite/rst/community/how_can_I_help.rst b/docs/docsite/rst/community/how_can_I_help.rst
new file mode 100644
index 00000000..cf0a64c2
--- /dev/null
+++ b/docs/docsite/rst/community/how_can_I_help.rst
@@ -0,0 +1,86 @@
+.. _how_can_i_help:
+
+***************
+How can I help?
+***************
+
+.. contents::
+ :local:
+
+Thanks for being interested in helping the Ansible project!
+
+There are many ways to help the Ansible project...but first, please read and understand the :ref:`code_of_conduct`.
+
+Become a power user
+===================
+
+A great way to help the Ansible project is to become a power user:
+
+* Use Ansible everywhere you can
+* Take tutorials and classes
+* Read the :ref:`official documentation <ansible_documentation>`
+* Study some of the `many excellent books <https://www.amazon.com/s/ref=nb_sb_ss_c_2_7?url=search-alias%3Dstripbooks&field-keywords=ansible&sprefix=ansible%2Caps%2C260>`_ about Ansible
+* `Get certified <https://www.ansible.com/products/training-certification>`_.
+
+When you become a power user, your ability and opportunities to help the Ansible project in other ways will multiply quickly.
+
+Ask and answer questions online
+===============================
+
+There are many forums online where Ansible users ask and answer questions. Reach out and communicate with your fellow Ansible users.
+
+You can find the official :ref:`Ansible communication channels <communication>`.
+
+Review, fix, and maintain the documentation
+===========================================
+
+Typos are everywhere, even in the Ansible documentation. We work hard to keep the documentation up-to-date, but you may also find outdated examples. We offer easy ways to :ref:`report and/or fix documentation errors <community_documentation_contributions>`.
+
+.. _ansible_community_meetup:
+
+Participate in your local meetup
+================================
+
+There are Ansible meetups `all over the world <https://www.meetup.com/topics/ansible/>`_. Join your local meetup. Attend regularly. Ask good questions. Volunteer to give a presentation about how you use Ansible.
+
+If there is no meetup near you, we are happy to help you `start one <https://www.ansible.com/community/events/ansible-meetups>`_.
+
+File and verify issues
+======================
+
+All software has bugs, and Ansible is no exception. When you find a bug, you can help tremendously by :ref:`telling us about it <reporting_bugs_and_features>`.
+
+
+If the bug you found already exists in an issue, you can help by verifying the behavior of the reported bug with a comment in that issue, or by reporting any additional information.
+
+Review and submit pull requests
+===============================
+
+As you become more familiar with how Ansible works, you may be able to fix issues or develop new features yourself. If you think you have a fix for a bug in Ansible, or if you have a new feature that you would like to share with millions of Ansible users, read all about the :ref:`Ansible development process <community_development_process>` and and :ref:`how to contribute to collections <contributing_maintained_collections>` to learn how to get your code accepted into Ansible.
+
+Another good way to help is to review pull requests that other Ansible users have submitted. The Ansible community keeps a full list of `open pull requests by file <https://ansible.sivel.net/pr/byfile.html>`_, so if a particular module or plugin interests you, you can easily keep track of all the relevant new pull requests and provide testing or feedback.
+
+Become a collection maintainer
+==============================
+
+Once you have learned about the development process and have contributed code to a collection, we encourage you to become a maintainer of that collection. There are hundreds of modules in dozens of Ansible collections, and the vast majority of them are written and maintained entirely by members of the Ansible community.
+
+To learn more about the responsibilities of being an Ansible module maintainer, please read our :ref:`collection maintainer guidelines <maintainers>`.
+
+.. _community_working_groups:
+
+Join a working group
+====================
+
+Working groups are a way for Ansible community members to self-organize around particular topics of interest. We have working groups around various topics. To join or create a working group, please read the :ref:`Ansible Working Groups<working_group_list>`.
+
+
+Teach Ansible to others
+=======================
+
+We are working on a standardized `Ansible workshop <https://ansible.github.io/workshops/>`_ that can provide a good hands-on introduction to Ansible usage and concepts.
+
+Social media
+============
+
+If you like Ansible and just want to spread the good word, feel free to share on your social media platform of choice, and let us know by using ``@ansible`` or ``#ansible``. We'll be looking for you.
diff --git a/docs/docsite/rst/community/index.rst b/docs/docsite/rst/community/index.rst
new file mode 100644
index 00000000..be08228c
--- /dev/null
+++ b/docs/docsite/rst/community/index.rst
@@ -0,0 +1,88 @@
+.. _ansible_community_guide:
+
+***********************
+Ansible Community Guide
+***********************
+
+Welcome to the Ansible Community Guide!
+
+The purpose of this guide is to teach you everything you need to know about being a contributing member of the Ansible community. All types of contributions are welcome and necessary to Ansible's continued success.
+
+This page outlines the most common situations and questions that bring readers to this section. If you prefer a :ref:`traditional table of contents <community_toc>`, you can find one at the bottom of the page.
+
+
+Getting started
+===============
+
+* I am new to the community. Where can I find the Ansible :ref:`code_of_conduct`?
+* I would like to know what I am agreeing to when I contribute to Ansible. Does Ansible have a :ref:`contributor_license_agreement`?
+* I would like to contribute but I am not sure how. Are there :ref:`easy ways to contribute <how_can_i_help>`?
+* I want to talk to other Ansible users. How do I find an `Ansible Meetup near me <https://www.meetup.com/topics/ansible/>`_?
+* I have a question. Which :ref:`Ansible email lists and IRC channels <communication>` will help me find answers?
+* I want to learn more about Ansible. What can I do?
+
+ * `Read books <https://www.ansible.com/resources/ebooks>`_.
+ * `Get certified <https://www.ansible.com/products/training-certification>`_.
+ * `Attend events <https://www.ansible.com/community/events>`_.
+ * `Review getting started guides <https://www.ansible.com/resources/get-started>`_.
+ * `Watch videos <https://www.ansible.com/resources/videos>`_ - includes Ansible Automates, AnsibleFest & webinar recordings.
+
+* I would like updates about new Ansible versions. How are `new releases announced <https://groups.google.com/forum/#!forum/ansible-announce>`_?
+* I want to use the current release. How do I know which :ref:`releases are current <release_schedule>`?
+
+Going deeper
+============
+
+* I think Ansible is broken. How do I :ref:`report a bug <reporting_bugs>`?
+* I need functionality that Ansible does not offer. How do I :ref:`request a feature <request_features>`?
+* How do I :ref:`contribute to an Ansible-maintained collection <contributing_maintained_collections>`?
+* I am waiting for a particular feature. How do I see what is :ref:`planned for future Ansible Releases <roadmaps>`?
+* I have a specific Ansible interest or expertise (for example, VMware, Linode, and so on). How do I get involved in a :ref:`working group <working_group_list>`?
+* I would like to participate in conversations about features and fixes. How do I review GitHub issues and pull requests?
+* I found a typo or another problem on docs.ansible.com. How can I :ref:`improve the documentation <community_documentation_contributions>`?
+
+
+Working with the Ansible repo
+=============================
+
+* I want to make my first code changes to a collection or to ``ansible-base``. How do I :ref:`set up my Python development environment <environment_setup>`?
+* I would like to get more efficient as a developer. How can I find :ref:`editors, linters, and other tools <other_tools_and_programs>` that will support my Ansible development efforts?
+* I want my code to meet Ansible's guidelines. Where can I find guidance on :ref:`coding in Ansible <developer_guide>`?
+* I want to learn more about Ansible roadmaps, releases, and projects. How do I find information on :ref:`the development cycle <community_development_process>`?
+* I would like to connect Ansible to a new API or other resource. How do I :ref:`create a collection <developing_modules_in_groups>`?
+* My pull request is marked ``needs_rebase``. How do I :ref:`rebase my PR <rebase_guide>`?
+* I am using an older version of Ansible and want a bug fixed in my version that has already been fixed on the ``devel`` branch. How do I :ref:`backport a bugfix PR <backport_process>`?
+* I have an open pull request with a failing test. How do I learn about Ansible's :ref:`testing (CI) process <developing_testing>`?
+* I am ready to step up as a collection maintainer. What are the :ref:`guidelines for maintainers <maintainers>`?
+* A module in a collection I maintain is obsolete. How do I :ref:`deprecate a module <deprecating_modules>`?
+
+.. _community_toc:
+
+Traditional Table of Contents
+=============================
+
+If you prefer to read the entire Community Guide, here is a list of the pages in order:
+
+.. toctree::
+ :maxdepth: 2
+
+ code_of_conduct
+ how_can_I_help
+ reporting_bugs_and_features
+ documentation_contributions
+ communication
+ development_process
+ contributing_maintained_collections
+ contributor_license_agreement
+ triage_process
+ other_tools_and_programs
+ ../dev_guide/style_guide/index
+
+.. toctree::
+ :caption: Guidelines for specific types of contributors
+ :maxdepth: 1
+
+ committer_guidelines
+ maintainers
+ release_managers
+ github_admins
diff --git a/docs/docsite/rst/community/maintainers.rst b/docs/docsite/rst/community/maintainers.rst
new file mode 100644
index 00000000..ac466d67
--- /dev/null
+++ b/docs/docsite/rst/community/maintainers.rst
@@ -0,0 +1,34 @@
+.. _maintainers:
+
+********************************
+Collection maintainer guidelines
+********************************
+
+Thank you for being a community collection maintainer. This guide offers an overview of your responsibilities as a maintainer along with resources for additional information. The Ansible community hopes that you will find that maintaining a collection is as rewarding for you as having the collection content is for the wider community.
+
+.. contents::
+ :local:
+
+In addition to the information below, module maintainers should be familiar with:
+
+* :ref:`General Ansible community development practices <ansible_community_guide>`
+* Documentation on :ref:`module development <developing_modules>`
+
+
+Maintainer responsibilities
+===========================
+
+When you contribute a module to a collection included in the ``ansible`` package, you become a maintainer for that module once it has been merged. Maintainership empowers you with the authority to accept, reject, or request revisions to pull requests on your module -- but as they say, "with great power comes great responsibility."
+
+Maintainers of Ansible collections are expected to provide feedback, responses, or actions on pull requests or issues to the collection(s) they maintain in a reasonably timely manner. You can also update the contributor guidelines for that collection, in collaboration with the Ansible community team and the other maintainers of that collection.
+
+Resources
+=========
+
+Please see :ref:`communication` for ways to contact the broader Ansible community. For maintainers, following the `ansible-devel <https://groups.google.com/forum/#!forum/ansible-devel>`_ mailing list is a great way to participate in conversations about coding, get assistance when you need it, and influence the overall direction, quality, and goals of Ansible and the collections. If you are not on this relatively low-volume list, please join us here: https://groups.google.com/forum/#!forum/ansible-devel
+
+
+Pull requests, issues, and workflow
+===================================
+
+Each collection community can set its own rules and workflow for managing pull requests, bug reports, documentation issues, and feature requests, as well as adding and replacing maintainers.
diff --git a/docs/docsite/rst/community/other_tools_and_programs.rst b/docs/docsite/rst/community/other_tools_and_programs.rst
new file mode 100644
index 00000000..4d7326bc
--- /dev/null
+++ b/docs/docsite/rst/community/other_tools_and_programs.rst
@@ -0,0 +1,123 @@
+.. _other_tools_and_programs:
+
+########################
+Other Tools And Programs
+########################
+
+.. contents::
+ :local:
+
+The Ansible community uses a range of tools for working with the Ansible project. This is a list of some of the most popular of these tools.
+
+If you know of any other tools that should be added, this list can be updated by clicking "Edit on GitHub" on the top right of this page.
+
+***************
+Popular Editors
+***************
+
+Atom
+====
+
+An open-source, free GUI text editor created and maintained by GitHub. You can keep track of git project
+changes, commit from the GUI, and see what branch you are on. You can customize the themes for different colors and install syntax highlighting packages for different languages. You can install Atom on Linux, macOS and Windows. Useful Atom plugins include:
+
+* `language-yaml <https://atom.io/packages/language-yaml>`_ - YAML highlighting for Atom (built-in).
+* `linter-js-yaml <https://atom.io/packages/linter-js-yaml>`_ - parses your YAML files in Atom through js-yaml.
+
+
+Emacs
+=====
+
+A free, open-source text editor and IDE that supports auto-indentation, syntax highlighting and built in terminal shell(among other things).
+
+* `yaml-mode <https://github.com/yoshiki/yaml-mode>`_ - YAML highlighting and syntax checking.
+* `jinja2-mode <https://github.com/paradoxxxzero/jinja2-mode>`_ - Jinja2 highlighting and syntax checking.
+* `magit-mode <https://github.com/magit/magit>`_ - Git porcelain within Emacs.
+
+
+PyCharm
+=======
+
+A full IDE (integrated development environment) for Python software development. It ships with everything you need to write python scripts and complete software, including support for YAML syntax highlighting. It's a little overkill for writing roles/playbooks, but it can be a very useful tool if you write modules and submit code for Ansible. Can be used to debug the Ansible engine.
+
+
+Sublime
+=======
+
+A closed-source, subscription GUI text editor. You can customize the GUI with themes and install packages for language highlighting and other refinements. You can install Sublime on Linux, macOS and Windows. Useful Sublime plugins include:
+
+* `GitGutter <https://packagecontrol.io/packages/GitGutter>`_ - shows information about files in a git repository.
+* `SideBarEnhancements <https://packagecontrol.io/packages/SideBarEnhancements>`_ - provides enhancements to the operations on Sidebar of Files and Folders.
+* `Sublime Linter <https://packagecontrol.io/packages/SublimeLinter>`_ - a code-linting framework for Sublime Text 3.
+* `Pretty YAML <https://packagecontrol.io/packages/Pretty%20YAML>`_ - prettifies YAML for Sublime Text 2 and 3.
+* `Yamllint <https://packagecontrol.io/packages/SublimeLinter-contrib-yamllint>`_ - a Sublime wrapper around yamllint.
+
+
+Visual Studio Code
+==================
+
+An open-source, free GUI text editor created and maintained by Microsoft. Useful Visual Studio Code plugins include:
+
+
+* `YAML Support by Red Hat <https://marketplace.visualstudio.com/items?itemName=redhat.vscode-yaml>`_ - provides YAML support through yaml-language-server with built-in Kubernetes and Kedge syntax support.
+* `Ansible Syntax Highlighting Extension <https://marketplace.visualstudio.com/items?itemName=haaaad.ansible>`_ - YAML & Jinja2 support.
+* `Visual Studio Code extension for Ansible <https://marketplace.visualstudio.com/items?itemName=vscoss.vscode-ansible>`_ - provides autocompletion, syntax highlighting.
+
+vim
+===
+
+An open-source, free command-line text editor. Useful vim plugins include:
+
+* `Ansible vim <https://github.com/pearofducks/ansible-vim>`_ - vim syntax plugin for Ansible 2.x, it supports YAML playbooks, Jinja2 templates, and Ansible's hosts files.
+
+JetBrains
+=========
+
+An open-source Community edition and closed-source Enterprise edition, integrated development environments based on IntelliJ's framework including IDEA, AppCode, CLion, GoLand, PhpStorm, PyCharm and others. Useful JetBrains platform plugins include:
+
+* `Ansible Vault Editor <https://plugins.jetbrains.com/plugin/14278-ansible-vault-editor>`_ - Ansible Vault Editor with auto encryption/decryption.
+
+
+*****************
+Development Tools
+*****************
+
+Finding related issues and PRs
+==============================
+
+There are various ways to find existing issues and pull requests (PRs)
+
+- `PR by File <https://ansible.sivel.net/pr/byfile.html>`_ - shows a current list of all open pull requests by individual file. An essential tool for Ansible module maintainers.
+- `jctanner's Ansible Tools <https://github.com/jctanner/ansible-tools>`_ - miscellaneous collection of useful helper scripts for Ansible development.
+
+.. _validate-playbook-tools:
+
+******************************
+Tools for Validating Playbooks
+******************************
+
+- `Ansible Lint <https://docs.ansible.com/ansible-lint/index.html>`_ - a highly configurable linter for Ansible playbooks.
+- `Ansible Review <https://github.com/willthames/ansible-review>`_ - an extension of Ansible Lint designed for code review.
+- `Molecule <https://molecule.readthedocs.io/en/latest/>`_ is a testing framework for Ansible plays and roles.
+- `yamllint <https://yamllint.readthedocs.io/en/stable/>`__ is a command-line utility to check syntax validity including key repetition and indentation issues.
+
+
+***********
+Other Tools
+***********
+
+- `Ansible cmdb <https://github.com/fboender/ansible-cmdb>`_ - takes the output of Ansible's fact gathering and converts it into a static HTML overview page containing system configuration information.
+- `Ansible Inventory Grapher <https://github.com/willthames/ansible-inventory-grapher>`_ - visually displays inventory inheritance hierarchies and at what level a variable is defined in inventory.
+- `Ansible Playbook Grapher <https://github.com/haidaraM/ansible-playbook-grapher>`_ - A command line tool to create a graph representing your Ansible playbook tasks and roles.
+- `Ansible Shell <https://github.com/dominis/ansible-shell>`_ - an interactive shell for Ansible with built-in tab completion for all the modules.
+- `Ansible Silo <https://github.com/groupon/ansible-silo>`_ - a self-contained Ansible environment by Docker.
+- `Ansigenome <https://github.com/nickjj/ansigenome>`_ - a command line tool designed to help you manage your Ansible roles.
+- `ARA <https://github.com/openstack/ara>`_ - records Ansible playbook runs and makes the recorded data available and intuitive for users and systems by integrating with Ansible as a callback plugin.
+- `Awesome Ansible <https://github.com/jdauphant/awesome-ansible>`_ - a collaboratively curated list of awesome Ansible resources.
+- `AWX <https://github.com/ansible/awx>`_ - provides a web-based user interface, REST API, and task engine built on top of Ansible. AWX is the upstream project for Red Hat Ansible Tower, part of the Red Hat Ansible Automation subscription.
+- `Mitogen for Ansible <https://mitogen.networkgenomics.com/ansible_detailed.html>`_ - uses the `Mitogen <https://github.com/dw/mitogen/>`_ library to execute Ansible playbooks in a more efficient way (decreases the execution time).
+- `nanvault <https://github.com/marcobellaccini/nanvault>`_ - a standalone tool to encrypt and decrypt files in the Ansible Vault format, featuring UNIX-style composability.
+- `OpsTools-ansible <https://github.com/centos-opstools/opstools-ansible>`_ - uses Ansible to configure an environment that provides the support of `OpsTools <https://wiki.centos.org/SpecialInterestGroup/OpsTools>`_, namely centralized logging and analysis, availability monitoring, and performance monitoring.
+- `TD4A <https://github.com/cidrblock/td4a>`_ - a template designer for automation. TD4A is a visual design aid for building and testing jinja2 templates. It will combine data in yaml format with a jinja2 template and render the output.
+- `PHP-Ansible <https://github.com/maschmann/php-ansible>`_ - an object oriented Ansible wrapper for PHP.
+
diff --git a/docs/docsite/rst/community/release_managers.rst b/docs/docsite/rst/community/release_managers.rst
new file mode 100644
index 00000000..d7c84cd5
--- /dev/null
+++ b/docs/docsite/rst/community/release_managers.rst
@@ -0,0 +1,82 @@
+.. _release_managers:
+
+**************************
+Release Manager Guidelines
+**************************
+
+.. contents:: Topics
+
+The release manager's purpose is to ensure a smooth release. To achieve that goal, they need to
+coordinate between:
+
+* Developers with commit privileges on the `Ansible GitHub repository <https://github.com/ansible/ansible/>`_
+* Contributors without commit privileges
+* The community
+* Ansible documentation team
+* Ansible Tower team
+
+Pre-releases: what and why
+==========================
+
+Pre-releases exist to draw testers. They give people who don't feel comfortable running from source
+control a means to get an early version of the code to test and give us feedback. To ensure we get
+good feedback about a release, we need to make sure all major changes in a release are put into
+a pre-release. Testers must be given time to test those changes before the final release. Ideally we
+want there to be sufficient time between pre-releases for people to install and test one version for
+a span of time. Then they can spend more time using the new code than installing the latest
+version.
+
+The right length of time for a tester is probably around two weeks. However, for our three-to-four month
+development cycle to work, we compress this down to one week; any less runs the risk
+of people spending more time installing the code instead of running it. However, if there's a time
+crunch (with a release date that cannot slip), it is better to release with new changes than to hold
+back those changes to give people time to test between. People cannot test what is not released, so
+we have to get those tarballs out there even if people feel they have to install more frequently.
+
+
+Beta releases
+-------------
+
+In a beta release, we know there are still bugs. We will continue to accept fixes for these.
+Although we review these fixes, sometimes they can be invasive or potentially destabilize other
+areas of the code.
+
+During the beta, we will no longer accept feature submissions.
+
+
+Release candidates
+------------------
+
+In a release candidate, we've fixed all known blockers. Any remaining bugfixes are
+ones that we are willing to leave out of the release. At this point we need user testing to
+determine if there are any other blocker bugs lurking.
+
+Blocker bugs generally are those that cause significant problems for users. Regressions are
+more likely to be considered blockers because they will break present users' usage of Ansible.
+
+The Release Manager will cherry-pick fixes for new release blockers. The release manager will also
+choose whether to accept bugfixes for isolated areas of the code or defer those to the next minor
+release. By themselves, non-blocker bugs will not trigger a new release; they will only make it
+into the next major release if blocker bugs require that a new release be made.
+
+The last RC should be as close to the final as possible. The following things may be changed:
+
+ * Version numbers are changed automatically and will differ as the pre-release tags are removed from
+ the versions.
+ * Tests and :file:`docs/docsite/` can differ if really needed as they do not break runtime.
+ However, the release manager may still reject them as they have the potential to cause
+ breakage that will be visible during the release process.
+
+.. note:: We want to specifically emphasize that code (in :file:`bin/`, :file:`lib/ansible/`, and
+ :file:`setup.py`) must be the same unless there are extraordinary extenuating circumstances. If
+ there are extenuating circumstances, the Release Manager is responsible for notifying groups
+ (like the Tower Team) which would want to test the code.
+
+
+Ansible release process
+=======================
+
+The release process is kept in a `separate document
+<https://docs.google.com/document/d/10EWLkMesi9s_CK_GmbZlE_ZLhuQr6TBrdMLKo5dnMAI/edit#heading=h.ooo3izcel3cz>`_
+so that it can be easily updated during a release. If you need access to edit this, please ask one
+of the current release managers to add you.
diff --git a/docs/docsite/rst/community/reporting_bugs_and_features.rst b/docs/docsite/rst/community/reporting_bugs_and_features.rst
new file mode 100644
index 00000000..4cf3ca62
--- /dev/null
+++ b/docs/docsite/rst/community/reporting_bugs_and_features.rst
@@ -0,0 +1,56 @@
+.. _reporting_bugs_and_features:
+
+**************************************
+Reporting bugs and requesting features
+**************************************
+
+.. contents::
+ :local:
+
+.. _reporting_bugs:
+
+Reporting a bug
+===============
+
+Security bugs
+-------------
+
+Ansible practices responsible disclosure - if this is a security-related bug, email `security@ansible.com <mailto:security@ansible.com>`_ instead of filing a ticket or posting to any public groups, and you will receive a prompt response.
+
+Bugs in ansible-base
+--------------------
+
+If you find a bug that affects multiple plugins, a plugin that remained in the ansible/ansible repo, or the overall functioning of Ansible, report it to `github.com/ansible/ansible/issues <https://github.com/ansible/ansible/issues>`_. You need a free GitHub account. Before reporting a bug, use the bug/issue search to see if the issue has already been reported. If you are not sure if something is a bug yet, you can report the behavior on the :ref:`mailing list or IRC first <communication>`.
+
+Do not open issues for "how do I do this" type questions. These are great topics for IRC or the mailing list, where things are likely to be more of a discussion.
+
+If you find a bug, open the issue yourself to ensure we have a record of it. Do not rely on someone else in the community to file the bug report for you. We have created an issue template, which saves time and helps us help everyone with their issues more quickly. Please fill it out as completely and as accurately as possible:
+
+ * Include the Ansible version
+ * Include any relevant configuration
+ * Include the exact commands or tasks you are running
+ * Describe the behavior you expected
+ * Provide steps to reproduce the bug
+ * Use minimal well-reduced and well-commented examples, not your entire production playbook
+ * When sharing YAML in playbooks, preserve the formatting by using `code blocks <https://help.github.com/articles/creating-and-highlighting-code-blocks/>`_.
+ * Document the behavior you got
+ * Include output where possible
+ * For multiple-file content, use gist.github.com, which is more durable than pastebin content
+
+Bugs in collections
+-------------------
+
+Many bugs only affect a single module or plugin. If you find a bug that affects a module or plugin hosted in a collection, file the bug in the repository of the :ref:`collection <collections>`:
+
+ #. Find the collection on Galaxy.
+ #. Click on the Issue Tracker link for that collection.
+ #. Follow the contributor guidelines or instructions in the collection repo.
+
+If you are not sure whether a bug is in ansible-base or in a collection, you can report the behavior on the :ref:`mailing list or IRC first <communication>`.
+
+.. _request_features:
+
+Requesting a feature
+====================
+
+The best way to get a feature into Ansible is to :ref:`submit a pull request <community_pull_requests>`, either against ansible-base or against a collection. See also :ref:`ansible_collection_merge_requirements`.
diff --git a/docs/docsite/rst/community/triage_process.rst b/docs/docsite/rst/community/triage_process.rst
new file mode 100644
index 00000000..5560f655
--- /dev/null
+++ b/docs/docsite/rst/community/triage_process.rst
@@ -0,0 +1,8 @@
+**************
+Triage Process
+**************
+
+The issue and PR triage processes are driven by the `Ansibot <https://github.com/ansible/ansibullbot>`_. Whenever an issue or PR is filed, the Ansibot examines the issue to ensure that all relevant data is present, and handles the routing of the issue as it works its way to eventual completion.
+
+For details on how Ansibot manages the triage process, please consult the `Ansibot
+Issue Guide <https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md>`_.
diff --git a/docs/docsite/rst/conf.py b/docs/docsite/rst/conf.py
new file mode 100644
index 00000000..0a72676e
--- /dev/null
+++ b/docs/docsite/rst/conf.py
@@ -0,0 +1,293 @@
+# -*- coding: utf-8 -*-
+#
+# documentation build configuration file, created by
+# sphinx-quickstart on Sat Sep 27 13:23:22 2008-2009.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# The contents of this file are pickled, so don't put values in the namespace
+# that aren't pickleable (module imports are okay, they're removed
+# automatically).
+#
+# All configuration values have a default value; values that are commented out
+# serve to show the default value.
+
+import sys
+import os
+
+# pip install sphinx_rtd_theme
+# import sphinx_rtd_theme
+# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
+
+# If your extensions are in another directory, add it here. If the directory
+# is relative to the documentation root, use os.path.abspath to make it
+# absolute, like shown here.
+# sys.path.append(os.path.abspath('some/directory'))
+#
+sys.path.insert(0, os.path.join('ansible', 'lib'))
+sys.path.append(os.path.abspath(os.path.join('..', '_extensions')))
+
+# We want sphinx to document the ansible modules contained in this repository,
+# not those that may happen to be installed in the version
+# of Python used to run sphinx. When sphinx loads in order to document,
+# the repository version needs to be the one that is loaded:
+sys.path.insert(0, os.path.abspath(os.path.join('..', '..', '..', 'lib')))
+
+VERSION = '2.10'
+AUTHOR = 'Ansible, Inc'
+
+
+# General configuration
+# ---------------------
+
+# Add any Sphinx extension module names here, as strings.
+# They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+# TEST: 'sphinxcontrib.fulltoc'
+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'pygments_lexer', 'notfound.extension']
+
+# Later on, add 'sphinx.ext.viewcode' to the list if you want to have
+# colorized code generated too for references.
+
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['.templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General substitutions.
+project = 'Ansible'
+copyright = "2019 Red Hat, Inc."
+
+# The default replacements for |version| and |release|, also used in various
+# other places throughout the built documents.
+#
+# The short X.Y version.
+version = VERSION
+# The full version, including alpha/beta/rc tags.
+release = VERSION
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+# today = ''
+# Else, today_fmt is used as the format for a strftime call.
+today_fmt = '%B %d, %Y'
+
+# List of documents that shouldn't be included in the build.
+# unused_docs = []
+
+# List of directories, relative to source directories, that shouldn't be
+# searched for source files.
+# exclude_dirs = []
+
+# A list of glob-style patterns that should be excluded when looking
+# for source files.
+# OBSOLETE - removing this - dharmabumstead 2018-02-06
+# exclude_patterns = ['modules']
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+# default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+# add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+# add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+# show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+highlight_language = 'YAML+Jinja'
+
+# Substitutions, variables, entities, & shortcuts for text which do not need to link to anything.
+# For titles which should be a link, use the intersphinx anchors set at the index, chapter, and section levels, such as qi_start_:
+# |br| is useful for formatting fields inside of tables
+# |_| is a nonbreaking space; similarly useful inside of tables
+rst_epilog = """
+.. |br| raw:: html
+
+ <br>
+.. |_| unicode:: 0xA0
+ :trim:
+"""
+
+
+# Options for HTML output
+# -----------------------
+
+html_theme_path = ['../_themes']
+html_theme = 'sphinx_rtd_theme'
+html_short_title = 'Ansible Documentation'
+html_show_sphinx = False
+
+html_theme_options = {
+ 'canonical_url': "https://docs.ansible.com/ansible/latest/",
+ 'vcs_pageview_mode': 'edit'
+}
+
+html_context = {
+ 'display_github': 'True',
+ 'github_user': 'ansible',
+ 'github_repo': 'ansible',
+ 'github_version': 'devel/docs/docsite/rst/',
+ 'github_module_version': 'devel/lib/ansible/modules/',
+ 'github_root_dir': 'devel/lib/ansible',
+ 'github_cli_version': 'devel/lib/ansible/cli/',
+ 'current_version': version,
+ 'latest_version': '2.10',
+ # list specifically out of order to make latest work
+ 'available_versions': ('latest', '2.9', '2.9_ja', '2.8', 'devel'),
+ 'css_files': ('_static/ansible.css', # overrides to the standard theme
+ ),
+}
+
+# The style sheet to use for HTML and HTML Help pages. A file of that name
+# must exist either in Sphinx' static/ path, or in one of the custom paths
+# given in html_static_path.
+# html_style = 'solar.css'
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+html_title = 'Ansible Documentation'
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+# html_short_title = None
+
+# The name of an image file (within the static path) to place at the top of
+# the sidebar.
+# html_logo =
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+# html_favicon = 'favicon.ico'
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['../_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+# html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+# html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+# html_additional_pages = {}
+
+# If false, no module index is generated.
+# html_use_modindex = True
+
+# If false, no index is generated.
+# html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+# html_split_index = False
+
+# If true, the reST sources are included in the HTML build as _sources/<name>.
+html_copy_source = False
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+# html_use_opensearch = 'https://docs.ansible.com/ansible/latest'
+
+# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
+# html_file_suffix = ''
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'Poseidodoc'
+
+# Configuration for sphinx-notfound-pages
+# with no 'notfound_template' and no 'notfound_context' set,
+# the extension builds 404.rst into a location-agnostic 404 page
+#
+# default is `en` - using this for the sub-site:
+notfound_default_language = "ansible"
+# default is `latest`:
+# setting explicitly - docsite serves up /ansible/latest/404.html
+# so keep this set to `latest` even on the `devel` branch
+# then no maintenance is needed when we branch a new stable_x.x
+notfound_default_version = "latest"
+# makes default setting explicit:
+notfound_no_urls_prefix = False
+
+# Options for LaTeX output
+# ------------------------
+
+# The paper size ('letter' or 'a4').
+# latex_paper_size = 'letter'
+
+# The font size ('10pt', '11pt' or '12pt').
+# latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, document class
+# [howto/manual]).
+latex_documents = [
+ ('index', 'ansible.tex', 'Ansible 2.2 Documentation', AUTHOR, 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+# latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+# latex_use_parts = False
+
+# Additional stuff for the LaTeX preamble.
+# latex_preamble = ''
+
+# Documents to append as an appendix to all manuals.
+# latex_appendices = []
+
+# If false, no module index is generated.
+# latex_use_modindex = True
+
+autoclass_content = 'both'
+
+# Note: Our strategy for intersphinx mappings is to have the upstream build location as the
+# canonical source and then cached copies of the mapping stored locally in case someone is building
+# when disconnected from the internet. We then have a script to update the cached copies.
+#
+# Because of that, each entry in this mapping should have this format:
+# name: ('http://UPSTREAM_URL', (None, 'path/to/local/cache.inv'))
+#
+# The update script depends on this format so deviating from this (for instance, adding a third
+# location for the mappning to live) will confuse it.
+intersphinx_mapping = {'python': ('https://docs.python.org/2/', (None, '../python2.inv')),
+ 'python3': ('https://docs.python.org/3/', (None, '../python3.inv')),
+ 'jinja2': ('http://jinja.palletsprojects.com/', (None, '../jinja2.inv')),
+ 'ansible_2_10': ('https://docs.ansible.com/ansible/2.10/', (None, '../ansible_2_10.inv')),
+ 'ansible_2_9': ('https://docs.ansible.com/ansible/2.9/', (None, '../ansible_2_9.inv')),
+ 'ansible_2_8': ('https://docs.ansible.com/ansible/2.8/', (None, '../ansible_2_8.inv')),
+ 'ansible_2_7': ('https://docs.ansible.com/ansible/2.7/', (None, '../ansible_2_7.inv')),
+ 'ansible_2_6': ('https://docs.ansible.com/ansible/2.6/', (None, '../ansible_2_6.inv')),
+ 'ansible_2_5': ('https://docs.ansible.com/ansible/2.5/', (None, '../ansible_2_5.inv')),
+ }
+
+# linckchecker settings
+linkcheck_ignore = [
+ r'http://irc\.freenode\.net',
+]
+linkcheck_workers = 25
+# linkcheck_anchors = False
diff --git a/docs/docsite/rst/dev_guide/debugging.rst b/docs/docsite/rst/dev_guide/debugging.rst
new file mode 100644
index 00000000..6885b252
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/debugging.rst
@@ -0,0 +1,112 @@
+.. _debugging_modules:
+
+*****************
+Debugging modules
+*****************
+
+.. contents::
+ :local:
+
+.. _detailed_debugging:
+
+Detailed debugging steps
+========================
+
+Ansible modules are put together as a zip file consisting of the module file and the various Python module boilerplate inside of a wrapper script. To see what is actually happening in the module, you need to extract the file from the wrapper. The wrapper script provides helper methods that let you do that.
+
+The following steps use ``localhost`` as the target host, but you can use the same steps to debug against remote hosts as well. For a simpler approach to debugging without using the temporary files, see :ref:`simple debugging <simple_debugging>`.
+
+
+#. Set :envvar:`ANSIBLE_KEEP_REMOTE_FILES` to ``1`` on the control host so Ansible will keep the remote module files instead of deleting them after the module finishes executing. Use the ``-vvv`` option to make Ansible more verbose. This will display the file name of the temporary module file.
+
+ .. code-block:: shell-session
+
+ $ ANSIBLE_KEEP_REMOTE_FILES=1 ansible localhost -m ping -a 'data=debugging_session' -vvv
+ <127.0.0.1> ESTABLISH LOCAL CONNECTION FOR USER: badger
+ <127.0.0.1> EXEC /bin/sh -c '( umask 77 && mkdir -p "` echo $HOME/.ansible/tmp/ansible-tmp-1461434734.35-235318071810595 `" && echo "` echo $HOME/.ansible/tmp/ansible-tmp-1461434734.35-235318071810595 `" )'
+ <127.0.0.1> PUT /var/tmp/tmpjdbJ1w TO /home/badger/.ansible/tmp/ansible-tmp-1461434734.35-235318071810595/AnsiballZ_ping.py
+ <127.0.0.1> EXEC /bin/sh -c 'LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8 LC_MESSAGES=en_US.UTF-8 /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461434734.35-235318071810595/AnsiballZ_ping.py && sleep 0'
+ localhost | SUCCESS => {
+ "changed": false,
+ "invocation": {
+ "module_args": {
+ "data": "debugging_session"
+ },
+ "module_name": "ping"
+ },
+ "ping": "debugging_session"
+ }
+
+#. Navigate to the temporary directory from the previous step. If the previous command was run against a remote host, connect to that host first before trying to navigate to the temporary directory.
+
+ .. code-block:: shell-session
+
+ $ ssh remotehost # only if not debugging against localhost
+ $ cd /home/badger/.ansible/tmp/ansible-tmp-1461434734.35-235318071810595
+
+#. Run the wrapper's ``explode`` command to turn the string into some Python files that you can work with.
+
+ .. code-block:: shell-session
+
+ $ python AnsiballZ_ping.py explode
+ Module expanded into:
+ /home/badger/.ansible/tmp/ansible-tmp-1461434734.35-235318071810595/debug_dir
+
+ If you want to examine the wrapper file you can. It will show a small Python script with a large base64 encoded string. The string contains the module to execute.
+
+#. When you look into the temporary directory you'll see a structure like this:
+
+ .. code-block:: shell-session
+
+ ├── AnsiballZ_ping.py
+ └── debug_dir
+ ├── ansible
+ │   ├── __init__.py
+ │   ├── module_utils
+ │   │   ├── __init__.py
+ │   │   ├── _text.py
+ │   │   ├── basic.py
+ │   │   ├── common
+ │   │   ├── compat
+ │   │   ├── distro
+ │   │   ├── parsing
+ │   │   ├── pycompat24.py
+ │   │   └── six
+ │   └── modules
+ │   ├── __init__.py
+ │   └── ping.py
+ └── args
+
+ * ``AnsiballZ_ping.py`` is the Python script with the the module code stored in a base64 encoded string. It contains various helper functions for executing the module.
+
+ * ``ping.py`` is the code for the module itself. You can modify this code to see what effect it would have on your module, or for debugging purposes.
+
+ * The ``args`` file contains a JSON string. The string is a dictionary containing the module arguments and other variables that Ansible passes into the module to change its behavior. Modify this file to change the parameters passed to the module.
+
+ * The ``ansible`` directory contains the module code in ``modules`` as well as code from :mod:`ansible.module_utils` that is used by the module. Ansible includes files for any :mod:`ansible.module_utils` imports in the module but not any files from any other module. If your module uses :mod:`ansible.module_utils.url` Ansible will include it for you. But if your module includes `requests <https://requests.readthedocs.io/en/master/api/>`_, then you'll have to make sure that the Python `requests library <https://pypi.org/project/requests/>`_ is installed on the system before running the module.
+
+ You can modify files in this directory if you suspect that the module is having a problem in some of this boilerplate code rather than in the module code you have written.
+
+#. Once you edit the code or arguments in the exploded tree, use the ``execute`` subcommand to run it:
+
+ .. code-block:: shell-session
+
+ $ python AnsiballZ_ping.py execute
+ {"invocation": {"module_args": {"data": "debugging_session"}}, "changed": false, "ping": "debugging_session"}
+
+ This subcommand inserts the absolute path to ``debug_dir`` as the first item in ``sys.path`` and invokes the script using the arguments in the ``args`` file. You can continue to run the module like this until you understand the problem. Then you can copy the changes back into your real module file and test that the real module works via ``ansible`` or ``ansible-playbook``.
+
+
+.. _simple_debugging:
+
+Simple debugging
+================
+
+The easiest way to run a debugger in a module, either local or remote, is to use `epdb <https://pypi.org/project/epdb/>`_. Add ``import epdb; epdb.serve()`` in the module code on the control node at the desired break point. To connect to the debugger, run ``epdb.connect()``. See the `epdb documentation <https://pypi.org/project/epdb/>`_ for how to specify the ``host`` and ``port``. If connecting to a remote node, make sure to use a port that is allowed by any firewall between the control node and the remote node.
+
+This technique should work with any remote debugger, but we do not guarantee any particual remote debugging tool will work.
+
+The `q <https://pypi.org/project/q/>`_ library is another very useful debugging tool.
+
+Since ``print()`` statements do not work inside modules, raising an exception is a good approach if you just want to see some specific data. Put ``raise Exception(some_value)`` somewhere in the module and run it normally. Ansible will handle this exception, pass the message back to the control node, and display it.
+
diff --git a/docs/docsite/rst/dev_guide/developing_api.rst b/docs/docsite/rst/dev_guide/developing_api.rst
new file mode 100644
index 00000000..eeff4684
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/developing_api.rst
@@ -0,0 +1,47 @@
+.. _developing_api:
+
+**********
+Python API
+**********
+
+.. contents:: Topics
+
+.. note:: This API is intended for internal Ansible use. Ansible may make changes to this API at any time that could break backward compatibility with older versions of the API. Because of this, external use is not supported by Ansible. If you want to use Python API only for executing playbooks or modules, consider `ansible-runner <https://ansible-runner.readthedocs.io/en/latest/>`_ first.
+
+There are several ways to use Ansible from an API perspective. You can use
+the Ansible Python API to control nodes, you can extend Ansible to respond to various Python events, you can
+write plugins, and you can plug in inventory data from external data sources. This document
+gives a basic overview and examples of the Ansible execution and playbook API.
+
+If you would like to use Ansible programmatically from a language other than Python, trigger events asynchronously,
+or have access control and logging demands, please see the `Ansible Tower documentation <https://docs.ansible.com/ansible-tower/>`_.
+
+.. note:: Because Ansible relies on forking processes, this API is not thread safe.
+
+.. _python_api_example:
+
+Python API example
+==================
+
+This example is a simple demonstration that shows how to minimally run a couple of tasks:
+
+.. literalinclude:: ../../../../examples/scripts/uptime.py
+ :language: python
+
+.. note:: Ansible emits warnings and errors via the display object, which prints directly to stdout, stderr and the Ansible log.
+
+The source code for the ``ansible``
+command line tools (``lib/ansible/cli/``) is `available on GitHub <https://github.com/ansible/ansible/tree/devel/lib/ansible/cli>`_.
+
+.. seealso::
+
+ :ref:`developing_inventory`
+ Developing dynamic inventory integrations
+ :ref:`developing_modules_general`
+ Getting started on developing a module
+ :ref:`developing_plugins`
+ How to develop plugins
+ `Development Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Mailing list for development topics
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/dev_guide/developing_collections.rst b/docs/docsite/rst/dev_guide/developing_collections.rst
new file mode 100644
index 00000000..dd757e55
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/developing_collections.rst
@@ -0,0 +1,801 @@
+
+.. _developing_collections:
+
+**********************
+Developing collections
+**********************
+
+Collections are a distribution format for Ansible content. You can use collections to package and distribute playbooks, roles, modules, and plugins.
+You can publish and use collections through `Ansible Galaxy <https://galaxy.ansible.com>`_.
+
+* For details on how to *use* collections see :ref:`collections`.
+* For the current development status of Collections and FAQ see `Ansible Collections Overview and FAQ <https://github.com/ansible-collections/overview/blob/main/README.rst>`_.
+
+.. contents::
+ :local:
+ :depth: 2
+
+.. _collection_structure:
+
+Collection structure
+====================
+
+Collections follow a simple data structure. None of the directories are required unless you have specific content that belongs in one of them. A collection does require a ``galaxy.yml`` file at the root level of the collection. This file contains all of the metadata that Galaxy and other tools need in order to package, build and publish the collection::
+
+ collection/
+ ├── docs/
+ ├── galaxy.yml
+ ├── meta/
+ │ └── runtime.yml
+ ├── plugins/
+ │ ├── modules/
+ │ │ └── module1.py
+ │ ├── inventory/
+ │ └── .../
+ ├── README.md
+ ├── roles/
+ │ ├── role1/
+ │ ├── role2/
+ │ └── .../
+ ├── playbooks/
+ │ ├── files/
+ │ ├── vars/
+ │ ├── templates/
+ │ └── tasks/
+ └── tests/
+
+
+.. note::
+ * Ansible only accepts ``.md`` extensions for the :file:`README` file and any files in the :file:`/docs` folder.
+ * See the `ansible-collections <https://github.com/ansible-collections/>`_ GitHub Org for examples of collection structure.
+ * Not all directories are currently in use. Those are placeholders for future features.
+
+.. _galaxy_yml:
+
+galaxy.yml
+----------
+
+A collection must have a ``galaxy.yml`` file that contains the necessary information to build a collection artifact.
+See :ref:`collections_galaxy_meta` for details.
+
+.. _collections_doc_dir:
+
+docs directory
+---------------
+
+Put general documentation for the collection here. Keep the specific documentation for plugins and modules embedded as Python docstrings. Use the ``docs`` folder to describe how to use the roles and plugins the collection provides, role requirements, and so on. Use markdown and do not add subfolders.
+
+Use ``ansible-doc`` to view documentation for plugins inside a collection:
+
+.. code-block:: bash
+
+ ansible-doc -t lookup my_namespace.my_collection.lookup1
+
+The ``ansible-doc`` command requires the fully qualified collection name (FQCN) to display specific plugin documentation. In this example, ``my_namespace`` is the Galaxy namespace and ``my_collection`` is the collection name within that namespace.
+
+.. note:: The Galaxy namespace of an Ansible collection is defined in the ``galaxy.yml`` file. It can be different from the GitHub organization or repository name.
+
+.. _collections_plugin_dir:
+
+plugins directory
+------------------
+
+Add a 'per plugin type' specific subdirectory here, including ``module_utils`` which is usable not only by modules, but by most plugins by using their FQCN. This is a way to distribute modules, lookups, filters, and so on without having to import a role in every play.
+
+Vars plugins are unsupported in collections. Cache plugins may be used in collections for fact caching, but are not supported for inventory plugins.
+
+.. _collection_module_utils:
+
+module_utils
+^^^^^^^^^^^^
+
+When coding with ``module_utils`` in a collection, the Python ``import`` statement needs to take into account the FQCN along with the ``ansible_collections`` convention. The resulting Python import will look like ``from ansible_collections.{namespace}.{collection}.plugins.module_utils.{util} import {something}``
+
+The following example snippets show a Python and PowerShell module using both default Ansible ``module_utils`` and
+those provided by a collection. In this example the namespace is ``community``, the collection is ``test_collection``.
+In the Python example the ``module_util`` in question is called ``qradar`` such that the FQCN is
+``community.test_collection.plugins.module_utils.qradar``:
+
+.. code-block:: python
+
+ from ansible.module_utils.basic import AnsibleModule
+ from ansible.module_utils._text import to_text
+
+ from ansible.module_utils.six.moves.urllib.parse import urlencode, quote_plus
+ from ansible.module_utils.six.moves.urllib.error import HTTPError
+ from ansible_collections.community.test_collection.plugins.module_utils.qradar import QRadarRequest
+
+ argspec = dict(
+ name=dict(required=True, type='str'),
+ state=dict(choices=['present', 'absent'], required=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argspec,
+ supports_check_mode=True
+ )
+
+ qradar_request = QRadarRequest(
+ module,
+ headers={"Content-Type": "application/json"},
+ not_rest_data_keys=['state']
+ )
+
+Note that importing something from an ``__init__.py`` file requires using the file name:
+
+.. code-block:: python
+
+ from ansible_collections.namespace.collection_name.plugins.callback.__init__ import CustomBaseClass
+
+In the PowerShell example the ``module_util`` in question is called ``hyperv`` such that the FCQN is
+``community.test_collection.plugins.module_utils.hyperv``:
+
+.. code-block:: powershell
+
+ #!powershell
+ #AnsibleRequires -CSharpUtil Ansible.Basic
+ #AnsibleRequires -PowerShell ansible_collections.community.test_collection.plugins.module_utils.hyperv
+
+ $spec = @{
+ name = @{ required = $true; type = "str" }
+ state = @{ required = $true; choices = @("present", "absent") }
+ }
+ $module = [Ansible.Basic.AnsibleModule]::Create($args, $spec)
+
+ Invoke-HyperVFunction -Name $module.Params.name
+
+ $module.ExitJson()
+
+.. _collections_roles_dir:
+
+roles directory
+----------------
+
+Collection roles are mostly the same as existing roles, but with a couple of limitations:
+
+ - Role names are now limited to contain only lowercase alphanumeric characters, plus ``_`` and start with an alpha character.
+ - Roles in a collection cannot contain plugins any more. Plugins must live in the collection ``plugins`` directory tree. Each plugin is accessible to all roles in the collection.
+
+The directory name of the role is used as the role name. Therefore, the directory name must comply with the
+above role name rules.
+The collection import into Galaxy will fail if a role name does not comply with these rules.
+
+You can migrate 'traditional roles' into a collection but they must follow the rules above. You may need to rename roles if they don't conform. You will have to move or link any role-based plugins to the collection specific directories.
+
+.. note::
+
+ For roles imported into Galaxy directly from a GitHub repository, setting the ``role_name`` value in the role's metadata overrides the role name used by Galaxy. For collections, that value is ignored. When importing a collection, Galaxy uses the role directory as the name of the role and ignores the ``role_name`` metadata value.
+
+playbooks directory
+--------------------
+
+TBD.
+
+.. _developing_collections_tests_directory:
+
+tests directory
+----------------
+
+Ansible Collections are tested much like Ansible itself, by using the
+`ansible-test` utility which is released as part of Ansible, version 2.9.0 and
+newer. Because Ansible Collections are tested using the same tooling as Ansible
+itself, via `ansible-test`, all Ansible developer documentation for testing is
+applicable for authoring Collections Tests with one key concept to keep in mind.
+
+See :ref:`testing_collections` for specific information on how to test collections
+with ``ansible-test``.
+
+When reading the :ref:`developing_testing` documentation, there will be content
+that applies to running Ansible from source code via a git clone, which is
+typical of an Ansible developer. However, it's not always typical for an Ansible
+Collection author to be running Ansible from source but instead from a stable
+release, and to create Collections it is not necessary to run Ansible from
+source. Therefore, when references of dealing with `ansible-test` binary paths,
+command completion, or environment variables are presented throughout the
+:ref:`developing_testing` documentation; keep in mind that it is not needed for
+Ansible Collection Testing because the act of installing the stable release of
+Ansible containing `ansible-test` is expected to setup those things for you.
+
+.. _meta_runtime_yml:
+
+meta directory
+--------------
+
+A collection can store some additional metadata in a ``runtime.yml`` file in the collection's ``meta`` directory. The ``runtime.yml`` file supports the top level keys:
+
+- *requires_ansible*:
+
+ The version of Ansible required to use the collection. Multiple versions can be separated with a comma.
+
+ .. code:: yaml
+
+ requires_ansible: ">=2.10,<2.11"
+
+ .. note:: although the version is a `PEP440 Version Specifier <https://www.python.org/dev/peps/pep-0440/#version-specifiers>`_ under the hood, Ansible deviates from PEP440 behavior by truncating prerelease segments from the Ansible version. This means that Ansible 2.11.0b1 is compatible with something that ``requires_ansible: ">=2.11"``.
+
+- *plugin_routing*:
+
+ Content in a collection that Ansible needs to load from another location or that has been deprecated/removed.
+ The top level keys of ``plugin_routing`` are types of plugins, with individual plugin names as subkeys.
+ To define a new location for a plugin, set the ``redirect`` field to another name.
+ To deprecate a plugin, use the ``deprecation`` field to provide a custom warning message and the removal version or date. If the plugin has been renamed or moved to a new location, the ``redirect`` field should also be provided. If a plugin is being removed entirely, ``tombstone`` can be used for the fatal error message and removal version or date.
+
+ .. code:: yaml
+
+ plugin_routing:
+ inventory:
+ kubevirt:
+ redirect: community.general.kubevirt
+ my_inventory:
+ tombstone:
+ removal_version: "2.0.0"
+ warning_text: my_inventory has been removed. Please use other_inventory instead.
+ modules:
+ my_module:
+ deprecation:
+ removal_date: "2021-11-30"
+ warning_text: my_module will be removed in a future release of this collection. Use another.collection.new_module instead.
+ redirect: another.collection.new_module
+ podman_image:
+ redirect: containers.podman.podman_image
+ module_utils:
+ ec2:
+ redirect: amazon.aws.ec2
+ util_dir.subdir.my_util:
+ redirect: namespace.name.my_util
+
+- *import_redirection*
+
+ A mapping of names for Python import statements and their redirected locations.
+
+ .. code:: yaml
+
+ import_redirection:
+ ansible.module_utils.old_utility:
+ redirect: ansible_collections.namespace_name.collection_name.plugins.module_utils.new_location
+
+
+.. _creating_collections_skeleton:
+
+Creating a collection skeleton
+------------------------------
+
+To start a new collection:
+
+.. code-block:: bash
+
+ collection_dir#> ansible-galaxy collection init my_namespace.my_collection
+
+.. note::
+
+ Both the namespace and collection names use the same strict set of requirements. See `Galaxy namespaces <https://galaxy.ansible.com/docs/contributing/namespaces.html#galaxy-namespaces>`_ on the Galaxy docsite for those requirements.
+
+Once the skeleton exists, you can populate the directories with the content you want inside the collection. See `ansible-collections <https://github.com/ansible-collections/>`_ GitHub Org to get a better idea of what you can place inside a collection.
+
+.. _creating_collections:
+
+Creating collections
+======================
+
+To create a collection:
+
+#. Create a collection skeleton with the ``collection init`` command. See :ref:`creating_collections_skeleton` above.
+#. Add your content to the collection.
+#. Build the collection into a collection artifact with :ref:`ansible-galaxy collection build<building_collections>`.
+#. Publish the collection artifact to Galaxy with :ref:`ansible-galaxy collection publish<publishing_collections>`.
+
+A user can then install your collection on their systems.
+
+Currently the ``ansible-galaxy collection`` command implements the following sub commands:
+
+* ``init``: Create a basic collection skeleton based on the default template included with Ansible or your own template.
+* ``build``: Create a collection artifact that can be uploaded to Galaxy or your own repository.
+* ``publish``: Publish a built collection artifact to Galaxy.
+* ``install``: Install one or more collections.
+
+To learn more about the ``ansible-galaxy`` command-line tool, see the :ref:`ansible-galaxy` man page.
+
+
+.. _docfragments_collections:
+
+Using documentation fragments in collections
+--------------------------------------------
+
+To include documentation fragments in your collection:
+
+#. Create the documentation fragment: ``plugins/doc_fragments/fragment_name``.
+
+#. Refer to the documentation fragment with its FQCN.
+
+.. code-block:: yaml
+
+ extends_documentation_fragment:
+ - community.kubernetes.k8s_name_options
+ - community.kubernetes.k8s_auth_options
+ - community.kubernetes.k8s_resource_options
+ - community.kubernetes.k8s_scale_options
+
+:ref:`module_docs_fragments` covers the basics for documentation fragments. The `kubernetes <https://github.com/ansible-collections/kubernetes>`_ collection includes a complete example.
+
+You can also share documentation fragments across collections with the FQCN.
+
+.. _building_collections:
+
+Building collections
+--------------------
+
+To build a collection, run ``ansible-galaxy collection build`` from inside the root directory of the collection:
+
+.. code-block:: bash
+
+ collection_dir#> ansible-galaxy collection build
+
+This creates a tarball of the built collection in the current directory which can be uploaded to Galaxy.::
+
+ my_collection/
+ ├── galaxy.yml
+ ├── ...
+ ├── my_namespace-my_collection-1.0.0.tar.gz
+ └── ...
+
+.. note::
+ * Certain files and folders are excluded when building the collection artifact. See :ref:`ignoring_files_and_folders_collections` to exclude other files you would not want to distribute.
+ * If you used the now-deprecated ``Mazer`` tool for any of your collections, delete any and all files it added to your :file:`releases/` directory before you build your collection with ``ansible-galaxy``.
+ * The current Galaxy maximum tarball size is 2 MB.
+
+
+This tarball is mainly intended to upload to Galaxy
+as a distribution method, but you can use it directly to install the collection on target systems.
+
+.. _ignoring_files_and_folders_collections:
+
+Ignoring files and folders
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+By default the build step will include all the files in the collection directory in the final build artifact except for the following:
+
+* ``galaxy.yml``
+* ``*.pyc``
+* ``*.retry``
+* ``tests/output``
+* previously built artifacts in the root directory
+* various version control directories like ``.git/``
+
+To exclude other files and folders when building the collection, you can set a list of file glob-like patterns in the
+``build_ignore`` key in the collection's ``galaxy.yml`` file. These patterns use the following special characters for
+wildcard matching:
+
+* ``*``: Matches everything
+* ``?``: Matches any single character
+* ``[seq]``: Matches and character in seq
+* ``[!seq]``:Matches any character not in seq
+
+For example, if you wanted to exclude the :file:`sensitive` folder within the ``playbooks`` folder as well any ``.tar.gz`` archives you
+can set the following in your ``galaxy.yml`` file:
+
+.. code-block:: yaml
+
+ build_ignore:
+ - playbooks/sensitive
+ - '*.tar.gz'
+
+.. note::
+ This feature is only supported when running ``ansible-galaxy collection build`` with Ansible 2.10 or newer.
+
+
+.. _trying_collection_locally:
+
+Trying collections locally
+--------------------------
+
+You can try your collection locally by installing it from the tarball. The following will enable an adjacent playbook to
+access the collection:
+
+.. code-block:: bash
+
+ ansible-galaxy collection install my_namespace-my_collection-1.0.0.tar.gz -p ./collections
+
+
+You should use one of the values configured in :ref:`COLLECTIONS_PATHS` for your path. This is also where Ansible itself will
+expect to find collections when attempting to use them. If you don't specify a path value, ``ansible-galaxy collection install``
+installs the collection in the first path defined in :ref:`COLLECTIONS_PATHS`, which by default is ``~/.ansible/collections``.
+
+Next, try using the local collection inside a playbook. For examples and more details see :ref:`Using collections <using_collections>`
+
+.. _collections_scm_install:
+
+Installing collections from a git repository
+--------------------------------------------
+
+You can also test a version of your collection in development by installing it from a git repository.
+
+.. code-block:: bash
+
+ ansible-galaxy collection install git+https://github.com/org/repo.git,devel
+
+.. include:: ../shared_snippets/installing_collections_git_repo.txt
+
+.. _publishing_collections:
+
+Publishing collections
+----------------------
+
+You can publish collections to Galaxy using the ``ansible-galaxy collection publish`` command or the Galaxy UI itself. You need a namespace on Galaxy to upload your collection. See `Galaxy namespaces <https://galaxy.ansible.com/docs/contributing/namespaces.html#galaxy-namespaces>`_ on the Galaxy docsite for details.
+
+.. note:: Once you upload a version of a collection, you cannot delete or modify that version. Ensure that everything looks okay before you upload it.
+
+.. _galaxy_get_token:
+
+Getting your API token
+^^^^^^^^^^^^^^^^^^^^^^
+
+To upload your collection to Galaxy, you must first obtain an API token (``--token`` in the ``ansible-galaxy`` CLI command or ``token`` in the :file:`ansible.cfg` file under the ``galaxy_server`` section). The API token is a secret token used to protect your content.
+
+To get your API token:
+
+* For Galaxy, go to the `Galaxy profile preferences <https://galaxy.ansible.com/me/preferences>`_ page and click :guilabel:`API Key`.
+* For Automation Hub, go to https://cloud.redhat.com/ansible/automation-hub/token/ and click :guilabel:`Load token` from the version dropdown.
+
+Storing or using your API token
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Once you have retrieved your API token, you can store or use the token for collections in two ways:
+
+* Pass the token to the ``ansible-galaxy`` command using the ``--token``.
+* Specify the token within a Galaxy server list in your :file:`ansible.cfg` file.
+
+Using the ``token`` argument
+............................
+
+You can use the ``--token`` argument with the ``ansible-galaxy`` command (in conjunction with the ``--server`` argument or :ref:`GALAXY_SERVER` setting in your :file:`ansible.cfg` file). You cannot use ``apt-key`` with any servers defined in your :ref:`Galaxy server list <galaxy_server_config>`.
+
+.. code-block:: text
+
+ ansible-galaxy collection publish ./geerlingguy-collection-1.2.3.tar.gz --token=<key goes here>
+
+
+Specify the token within a Galaxy server list
+.............................................
+
+With this option, you configure one or more servers for Galaxy in your :file:`ansible.cfg` file under the ``galaxy_server_list`` section. For each server, you also configure the token.
+
+
+.. code-block:: ini
+
+ [galaxy]
+ server_list = release_galaxy
+
+ [galaxy_server.release_galaxy]
+ url=https://galaxy.ansible.com/
+ token=my_token
+
+See :ref:`galaxy_server_config` for complete details.
+
+.. _upload_collection_ansible_galaxy:
+
+Upload using ansible-galaxy
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. note::
+ By default, ``ansible-galaxy`` uses https://galaxy.ansible.com as the Galaxy server (as listed in the :file:`ansible.cfg` file under :ref:`galaxy_server`). If you are only publishing your collection to Ansible Galaxy, you do not need any further configuration. If you are using Red Hat Automation Hub or any other Galaxy server, see :ref:`Configuring the ansible-galaxy client <galaxy_server_config>`.
+
+To upload the collection artifact with the ``ansible-galaxy`` command:
+
+.. code-block:: bash
+
+ ansible-galaxy collection publish path/to/my_namespace-my_collection-1.0.0.tar.gz
+
+.. note::
+
+ The above command assumes you have retrieved and stored your API token as part of a Galaxy server list. See :ref:`galaxy_get_token` for details.
+
+The ``ansible-galaxy collection publish`` command triggers an import process, just as if you uploaded the collection through the Galaxy website.
+The command waits until the import process completes before reporting the status back. If you want to continue
+without waiting for the import result, use the ``--no-wait`` argument and manually look at the import progress in your
+`My Imports <https://galaxy.ansible.com/my-imports/>`_ page.
+
+
+.. _upload_collection_galaxy:
+
+Upload a collection from the Galaxy website
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+To upload your collection artifact directly on Galaxy:
+
+#. Go to the `My Content <https://galaxy.ansible.com/my-content/namespaces>`_ page, and click the **Add Content** button on one of your namespaces.
+#. From the **Add Content** dialogue, click **Upload New Collection**, and select the collection archive file from your local filesystem.
+
+When uploading collections it doesn't matter which namespace you select. The collection will be uploaded to the
+namespace specified in the collection metadata in the ``galaxy.yml`` file. If you're not an owner of the
+namespace, the upload request will fail.
+
+Once Galaxy uploads and accepts a collection, you will be redirected to the **My Imports** page, which displays output from the
+import process, including any errors or warnings about the metadata and content contained in the collection.
+
+.. _collection_versions:
+
+Collection versions
+-------------------
+
+Once you upload a version of a collection, you cannot delete or modify that version. Ensure that everything looks okay before
+uploading. The only way to change a collection is to release a new version. The latest version of a collection (by highest version number)
+will be the version displayed everywhere in Galaxy; however, users will still be able to download older versions.
+
+Collection versions use `Semantic Versioning <https://semver.org/>`_ for version numbers. Please read the official documentation for details and examples. In summary:
+
+* Increment major (for example: x in `x.y.z`) version number for an incompatible API change.
+* Increment minor (for example: y in `x.y.z`) version number for new functionality in a backwards compatible manner (for example new modules/plugins, parameters, return values).
+* Increment patch (for example: z in `x.y.z`) version number for backwards compatible bug fixes.
+
+.. _migrate_to_collection:
+
+Migrating Ansible content to a different collection
+====================================================
+
+First, look at `Ansible Collection Checklist <https://github.com/ansible-collections/overview/blob/main/collection_requirements.rst>`_.
+
+To migrate content from one collection to another, if the collections are parts of `Ansible distribution <https://github.com/ansible-community/ansible-build-data/blob/main/2.10/ansible.in>`_:
+
+#. Copy content from the source (old) collection to the target collection.
+#. Deprecate the module/plugin with ``removal_version`` scheduled for the next major version in ``meta/runtime.yml`` of the source collection. The deprecation must be released after the copied content has been included in a release of the target collection.
+#. When the next major release comes:
+
+ * remove the module/plugin from the source collection
+ * add ``redirect`` to the corresponding entry in ``meta/runtime.yml``
+ * remove ``removal_version`` from there
+
+According to the above, you need to create at least three PRs as follows:
+
+#. Create a PR against the target collection to copy the content.
+#. Deprecate the module/plugin in the source collection.
+#. Later create a PR against the source collection to remove the content according to the schedule.
+
+
+Adding the content to the new collection
+----------------------------------------
+
+Create a PR in the new collection to:
+
+#. Copy ALL the related files from the old collection.
+#. If it is an action plugin, include the corresponding module with documentation.
+#. If it is a module, check if it has a corresponding action plugin that should move with it.
+#. Check ``meta/`` for relevant updates to ``runtime.yml`` if it exists.
+#. Carefully check the moved ``tests/integration`` and ``tests/units`` and update for FQCN.
+#. Review ``tests/sanity/ignore-*.txt`` entries in the old collection.
+#. Update ``meta/runtime.yml`` in the old collection.
+
+
+Removing the content from the old collection
+--------------------------------------------
+
+Create a PR against the source collection repository to remove the modules, module_utils, plugins, and docs_fragments related to this migration:
+
+#. If you are removing an action plugin, remove the corresponding module that contains the documentation.
+#. If you are removing a module, remove any corresponding action plugin that should stay with it.
+#. Remove any entries about removed plugins from ``meta/runtime.yml``. Ensure they are added into the new repo.
+#. Remove sanity ignore lines from ``tests/sanity/ignore\*.txt``
+#. Remove associated integration tests from ``tests/integrations/targets/`` and unit tests from ``tests/units/plugins/``.
+#. if you are removing from content from ``community.general`` or ``community.network``, remove entries from ``.github/BOTMETA.yml``.
+#. Carefully review ``meta/runtime.yml`` for any entries you may need to remove or update, in particular deprecated entries.
+#. Update ``meta/runtime.yml`` to contain redirects for EVERY PLUGIN, pointing to the new collection name.
+
+.. warning::
+
+ Maintainers for the old collection have to make sure that the PR is merged in a way that it does not break user experience and semantic versioning:
+
+ #. A new version containing the merged PR must not be released before the collection the content has been moved to has been released again, with that content contained in it. Otherwise the redirects cannot work and users relying on that content will experience breakage.
+ #. Once 1.0.0 of the collection from which the content has been removed has been released, such PRs can only be merged for a new **major** version (in other words, 2.0.0, 3.0.0, and so on).
+
+
+BOTMETA.yml
+-----------
+
+The ``BOTMETA.yml``, for example in `community.general collection repository <https://github.com/ansible-collections/community.general/blob/main/.github/BOTMETA.yml>`_, is the source of truth for:
+
+* ansibullbot
+
+If the old and/or new collection has ``ansibullbot``, its ``BOTMETA.yml`` must be updated correspondingly.
+
+Ansibulbot will know how to redirect existing issues and PRs to the new repo.
+The build process for docs.ansible.com will know where to find the module docs.
+
+.. code-block:: yaml
+
+ $modules/monitoring/grafana/grafana_plugin.py:
+ migrated_to: community.grafana
+ $modules/monitoring/grafana/grafana_dashboard.py:
+ migrated_to: community.grafana
+ $modules/monitoring/grafana/grafana_datasource.py:
+ migrated_to: community.grafana
+ $plugins/callback/grafana_annotations.py:
+ maintainers: $team_grafana
+ labels: monitoring grafana
+ migrated_to: community.grafana
+ $plugins/doc_fragments/grafana.py:
+ maintainers: $team_grafana
+ labels: monitoring grafana
+ migrated_to: community.grafana
+
+`Example PR <https://github.com/ansible/ansible/pull/66981/files>`_
+
+* The ``migrated_to:`` key must be added explicitly for every *file*. You cannot add ``migrated_to`` at the directory level. This is to allow module and plugin webdocs to be redirected to the new collection docs.
+* ``migrated_to:`` MUST be added for every:
+
+ * module
+ * plugin
+ * module_utils
+ * contrib/inventory script
+
+* You do NOT need to add ``migrated_to`` for:
+
+ * Unit tests
+ * Integration tests
+ * ReStructured Text docs (anything under ``docs/docsite/rst/``)
+ * Files that never existed in ``ansible/ansible:devel``
+
+.. _testing_collections:
+
+Testing collections
+===================
+
+The main tool for testing collections is ``ansible-test``, Ansible's testing tool described in :ref:`developing_testing`. You can run several compile and sanity checks, as well as run unit and integration tests for plugins using ``ansible-test``. When you test collections, test against the ansible-base version(s) you are targeting.
+
+You must always execute ``ansible-test`` from the root directory of a collection. You can run ``ansible-test`` in Docker containers without installing any special requirements. The Ansible team uses this approach in Shippable both in the ansible/ansible GitHub repository and in the large community collections such as `community.general <https://github.com/ansible-collections/community.general/>`_ and `community.network <https://github.com/ansible-collections/community.network/>`_. The examples below demonstrate running tests in Docker containers.
+
+Compile and sanity tests
+------------------------
+
+To run all compile and sanity tests::
+
+ ansible-test sanity --docker default -v
+
+See :ref:`testing_compile` and :ref:`testing_sanity` for more information. See the :ref:`full list of sanity tests <all_sanity_tests>` for details on the sanity tests and how to fix identified issues.
+
+Unit tests
+----------
+
+You must place unit tests in the appropriate``tests/unit/plugins/`` directory. For example, you would place tests for ``plugins/module_utils/foo/bar.py`` in ``tests/unit/plugins/module_utils/foo/test_bar.py`` or ``tests/unit/plugins/module_utils/foo/bar/test_bar.py``. For examples, see the `unit tests in community.general <https://github.com/ansible-collections/community.general/tree/master/tests/unit/>`_.
+
+To run all unit tests for all supported Python versions::
+
+ ansible-test units --docker default -v
+
+To run all unit tests only for a specific Python version::
+
+ ansible-test units --docker default -v --python 3.6
+
+To run only a specific unit test::
+
+ ansible-test units --docker default -v --python 3.6 tests/unit/plugins/module_utils/foo/test_bar.py
+
+You can specify Python requirements in the ``tests/unit/requirements.txt`` file. See :ref:`testing_units` for more information, especially on fixture files.
+
+Integration tests
+-----------------
+
+You must place integration tests in the appropriate ``tests/integration/targets/`` directory. For module integration tests, you can use the module name alone. For example, you would place integration tests for ``plugins/modules/foo.py`` in a directory called ``tests/integration/targets/foo/``. For non-module plugin integration tests, you must add the plugin type to the directory name. For example, you would place integration tests for ``plugins/connections/bar.py`` in a directory called ``tests/integration/targets/connection_bar/``. For lookup plugins, the directory must be called ``lookup_foo``, for inventory plugins, ``inventory_foo``, and so on.
+
+You can write two different kinds of integration tests:
+
+* Ansible role tests run with ``ansible-playbook`` and validate various aspects of the module. They can depend on other integration tests (usually named ``prepare_bar`` or ``setup_bar``, which prepare a service or install a requirement named ``bar`` in order to test module ``foo``) to set-up required resources, such as installing required libraries or setting up server services.
+* ``runme.sh`` tests run directly as scripts. They can set up inventory files, and execute ``ansible-playbook`` or ``ansible-inventory`` with various settings.
+
+For examples, see the `integration tests in community.general <https://github.com/ansible-collections/community.general/tree/master/tests/integration/targets/>`_. See also :ref:`testing_integration` for more details.
+
+Since integration tests can install requirements, and set-up, start and stop services, we recommended running them in docker containers or otherwise restricted environments whenever possible. By default, ``ansible-test`` supports Docker images for several operating systems. See the `list of supported docker images <https://github.com/ansible/ansible/blob/devel/test/lib/ansible_test/_data/completion/docker.txt>`_ for all options. Use the ``default`` image mainly for platform-independent integration tests, such as those for cloud modules. The following examples use the ``centos8`` image.
+
+To execute all integration tests for a collection::
+
+ ansible-test integration --docker centos8 -v
+
+If you want more detailed output, run the command with ``-vvv`` instead of ``-v``. Alternatively, specify ``--retry-on-error`` to automatically re-run failed tests with higher verbosity levels.
+
+To execute only the integration tests in a specific directory::
+
+ ansible-test integration --docker centos8 -v connection_bar
+
+You can specify multiple target names. Each target name is the name of a directory in ``tests/integration/targets/``.
+
+.. _hacking_collections:
+
+Contributing to collections
+===========================
+
+If you want to add functionality to an existing collection, modify a collection you are using to fix a bug, or change the behavior of a module in a collection, clone the git repository for that collection and make changes on a branch. You can combine changes to a collection with a local checkout of Ansible (``source hacking/env-setup``).
+
+This section describes the process for `community.general <https://github.com/ansible-collections/community.general/>`_. To contribute to other collections, replace the folder names ``community`` and ``general`` with the namespace and collection name of a different collection.
+
+We assume that you have included ``~/dev/ansible/collections/`` in :ref:`COLLECTIONS_PATHS`, and if that path mentions multiple directories, that you made sure that no other directory earlier in the search path contains a copy of ``community.general``. Create the directory ``~/dev/ansible/collections/ansible_collections/community``, and in it clone `the community.general Git repository <https://github.com/ansible-collections/community.general/>`_ or a fork of it into the folder ``general``::
+
+ mkdir -p ~/dev/ansible/collections/ansible_collections/community
+ cd ~/dev/ansible/collections/ansible_collections/community
+ git clone git@github.com:ansible-collections/community.general.git general
+
+If you clone a fork, add the original repository as a remote ``upstream``::
+
+ cd ~/dev/ansible/collections/ansible_collections/community/general
+ git remote add upstream git@github.com:ansible-collections/community.general.git
+
+Now you can use this checkout of ``community.general`` in playbooks and roles with whichever version of Ansible you have installed locally, including a local checkout of ``ansible/ansible``'s ``devel`` branch.
+
+For collections hosted in the ``ansible_collections`` GitHub org, create a branch and commit your changes on the branch. When you are done (remember to add tests, see :ref:`testing_collections`), push your changes to your fork of the collection and create a Pull Request. For other collections, especially for collections not hosted on GitHub, check the ``README.md`` of the collection for information on contributing to it.
+
+.. _collection_changelogs:
+
+Generating changelogs for a collection
+======================================
+
+We recommend that you use the `antsibull-changelog <https://github.com/ansible-community/antsibull-changelog>`_ tool to generate Ansible-compatible changelogs for your collection. The Ansible changelog uses the output of this tool to collate all the collections included in an Ansible release into one combined changelog for the release.
+
+.. note::
+
+ Ansible here refers to the Ansible 2.10 or later release that includes a curated set of collections.
+
+Understanding antsibull-changelog
+---------------------------------
+
+The ``antsibull-changelog`` tool allows you to create and update changelogs for Ansible collections that are compatible with the combined Ansible changelogs. This is an update to the changelog generator used in prior Ansible releases. The tool adds three new changelog fragment categories: ``breaking_changes``, ``security_fixes`` and ``trivial``. The tool also generates the ``changelog.yaml`` file that Ansible uses to create the combined ``CHANGELOG.rst`` file and Porting Guide for the release.
+
+See :ref:`changelogs_how_to` and the `antsibull-changelog documentation <https://github.com/ansible-community/antsibull-changelog/tree/main/docs>`_ for complete details.
+
+.. note::
+
+ The collection maintainers set the changelog policy for their collections. See the individual collection contributing guidelines for complete details.
+
+Generating changelogs
+---------------------
+
+To initialize changelog generation:
+
+#. Install ``antsibull-changelog``: :code:`pip install antsibull-changelog`.
+#. Initialize changelogs for your repository: :code:`antsibull-changelog init <path/to/your/collection>`.
+#. Optionally, edit the ``changelogs/config.yaml`` file to customize the location of the generated changelog ``.rst`` file or other options. See `Bootstrapping changelogs for collections <https://github.com/ansible-community/antsibull-changelog/blob/main/docs/changelogs.rst#bootstrapping-changelogs-for-collections>`_ for details.
+
+To generate changelogs from the changelog fragments you created:
+
+#. Optionally, validate your changelog fragments: :code:`antsibull-changelog lint`.
+#. Generate the changelog for your release: :code:`antsibull-changelog release [--version version_number]`.
+
+.. note::
+
+ Add the ``--reload-plugins`` option if you ran the ``antsibull-changelog release`` command previously and the version of the collection has not changed. ``antsibull-changelog`` caches the information on all plugins and does not update its cache until the collection version changes.
+
+
+Porting Guide entries
+----------------------
+
+The following changelog fragment categories are consumed by the Ansible changelog generator into the Ansible Porting Guide:
+
+* ``major_changes``
+* ``breaking_changes``
+* ``deprecated_features``
+* ``removed_features``
+
+Including collection changelogs into Ansible
+=============================================
+
+
+If your collection is part of Ansible, use one of the following three options to include your changelog into the Ansible release changelog:
+
+* Use the ``antsibull-changelog`` tool.
+
+* If are not using this tool, include the properly formatted ``changelog.yaml`` file into your collection. See the `changlog.yaml format <https://github.com/ansible-community/antsibull-changelog/blob/main/docs/changelog.yaml-format.md>`_ for details.
+
+* Add a link to own changelogs or release notes in any format by opening an issue at https://github.com/ansible-community/ansible-build-data/ with the HTML link to that information.
+
+.. note::
+
+ For the first two options, Ansible pulls the changelog details from Galaxy so your changelogs must be included in the collection version on Galaxy that is included in the upcoming Ansible release.
+
+.. seealso::
+
+ :ref:`collections`
+ Learn how to install and use collections.
+ :ref:`collections_galaxy_meta`
+ Understand the collections metadata structure.
+ :ref:`developing_modules_general`
+ Learn about how to write Ansible modules
+ `Mailing List <https://groups.google.com/group/ansible-devel>`_
+ The development mailing list
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/dev_guide/developing_core.rst b/docs/docsite/rst/dev_guide/developing_core.rst
new file mode 100644
index 00000000..602f9aaf
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/developing_core.rst
@@ -0,0 +1,21 @@
+***************************
+Developing ``ansible-base``
+***************************
+
+Although ``ansible-base`` (the code hosted in the `ansible/ansible repository <https://github.com/ansible/ansible>`_ on GitHub) includes a few plugins that can be swapped out via playbook directives or configuration, much of the code there is not modular. The documents here give insight into how the parts of ``ansible-base`` work together.
+
+.. toctree::
+ :maxdepth: 1
+
+ developing_program_flow_modules
+
+.. seealso::
+
+ :ref:`developing_api`
+ Learn about the Python API for task execution
+ :ref:`developing_plugins`
+ Learn about developing plugins
+ `Mailing List <https://groups.google.com/group/ansible-devel>`_
+ The development mailing list
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible-devel IRC chat channel
diff --git a/docs/docsite/rst/dev_guide/developing_inventory.rst b/docs/docsite/rst/dev_guide/developing_inventory.rst
new file mode 100644
index 00000000..26a56a36
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/developing_inventory.rst
@@ -0,0 +1,422 @@
+.. _developing_inventory:
+
+****************************
+Developing dynamic inventory
+****************************
+
+Ansible can pull inventory information from dynamic sources, including cloud sources, by using the supplied :ref:`inventory plugins <inventory_plugins>`. For details about how to pull inventory information, see :ref:`dynamic_inventory`. If the source you want is not currently covered by existing plugins, you can create your own inventory plugin as with any other plugin type.
+
+In previous versions, you had to create a script or program that could output JSON in the correct format when invoked with the proper arguments.
+You can still use and write inventory scripts, as we ensured backwards compatibility via the :ref:`script inventory plugin <script_inventory>`
+and there is no restriction on the programming language used.
+If you choose to write a script, however, you will need to implement some features yourself such as caching, configuration management, dynamic variable and group composition, and so on.
+If you use :ref:`inventory plugins <inventory_plugins>` instead, you can leverage the Ansible codebase and add these common features automatically.
+
+.. contents:: Topics
+ :local:
+
+
+.. _inventory_sources:
+
+Inventory sources
+=================
+
+Inventory sources are the input strings that inventory plugins work with.
+An inventory source can be a path to a file or to a script, or it can be raw data that the plugin can interpret.
+
+The table below shows some examples of inventory plugins and the source types that you can pass to them with ``-i`` on the command line.
+
++--------------------------------------------+-----------------------------------------+
+| Plugin | Source |
++--------------------------------------------+-----------------------------------------+
+| :ref:`host list <host_list_inventory>` | A comma-separated list of hosts |
++--------------------------------------------+-----------------------------------------+
+| :ref:`yaml <yaml_inventory>` | Path to a YAML format data file |
++--------------------------------------------+-----------------------------------------+
+| :ref:`constructed <constructed_inventory>` | Path to a YAML configuration file |
++--------------------------------------------+-----------------------------------------+
+| :ref:`ini <ini_inventory>` | Path to an INI formatted data file |
++--------------------------------------------+-----------------------------------------+
+| :ref:`virtualbox <virtualbox_inventory>` | Path to a YAML configuration file |
++--------------------------------------------+-----------------------------------------+
+| :ref:`script plugin <script_inventory>` | Path to an executable that outputs JSON |
++--------------------------------------------+-----------------------------------------+
+
+
+.. _developing_inventory_inventory_plugins:
+
+Inventory plugins
+=================
+
+Like most plugin types (except modules), inventory plugins must be developed in Python. They execute on the controller and should therefore adhere to the :ref:`control_node_requirements`.
+
+Most of the documentation in :ref:`developing_plugins` also applies here. You should read that document first for a general understanding and then come back to this document for specifics on inventory plugins.
+
+Normally, inventory plugins are executed at the start of a run, and before the playbooks, plays, or roles are loaded.
+However, you can use the ``meta: refresh_inventory`` task to clear the current inventory and execute the inventory plugins again, and this task will generate a new inventory.
+
+If you use the persistent cache, inventory plugins can also use the configured cache plugin to store and retrieve data. Caching inventory avoids making repeated and costly external calls.
+
+.. _developing_an_inventory_plugin:
+
+Developing an inventory plugin
+------------------------------
+
+The first thing you want to do is use the base class:
+
+.. code-block:: python
+
+ from ansible.plugins.inventory import BaseInventoryPlugin
+
+ class InventoryModule(BaseInventoryPlugin):
+
+ NAME = 'myplugin' # used internally by Ansible, it should match the file name but not required
+
+If the inventory plugin is in a collection, the NAME should be in the 'namespace.collection_name.myplugin' format. The base class has a couple of methods that each plugin should implement and a few helpers for parsing the inventory source and updating the inventory.
+
+After you have the basic plugin working, you can incorporate other features by adding more base classes:
+
+.. code-block:: python
+
+ from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
+
+ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+
+ NAME = 'myplugin'
+
+For the bulk of the work in a plugin, we mostly want to deal with 2 methods ``verify_file`` and ``parse``.
+
+.. _inventory_plugin_verify_file:
+
+verify_file method
+^^^^^^^^^^^^^^^^^^
+
+Ansible uses this method to quickly determine if the inventory source is usable by the plugin. The determination does not need to be 100% accurate, as there might be an overlap in what plugins can handle and by default Ansible will try the enabled plugins as per their sequence.
+
+.. code-block:: python
+
+ def verify_file(self, path):
+ ''' return true/false if this is possibly a valid file for this plugin to consume '''
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ # base class verifies that file exists and is readable by current user
+ if path.endswith(('virtualbox.yaml', 'virtualbox.yml', 'vbox.yaml', 'vbox.yml')):
+ valid = True
+ return valid
+
+In the above example, from the :ref:`virtualbox inventory plugin <virtualbox_inventory>`, we screen for specific file name patterns to avoid attempting to consume any valid YAML file. You can add any type of condition here, but the most common one is 'extension matching'. If you implement extension matching for YAML configuration files, the path suffix <plugin_name>.<yml|yaml> should be accepted. All valid extensions should be documented in the plugin description.
+
+The following is another example that does not use a 'file' but the inventory source string itself,
+from the :ref:`host list <host_list_inventory>` plugin:
+
+.. code-block:: python
+
+ def verify_file(self, path):
+ ''' don't call base class as we don't expect a path, but a host list '''
+ host_list = path
+ valid = False
+ b_path = to_bytes(host_list, errors='surrogate_or_strict')
+ if not os.path.exists(b_path) and ',' in host_list:
+ # the path does NOT exist and there is a comma to indicate this is a 'host list'
+ valid = True
+ return valid
+
+This method is just to expedite the inventory process and avoid unnecessary parsing of sources that are easy to filter out before causing a parse error.
+
+.. _inventory_plugin_parse:
+
+parse method
+^^^^^^^^^^^^
+
+This method does the bulk of the work in the plugin.
+It takes the following parameters:
+
+ * inventory: inventory object with existing data and the methods to add hosts/groups/variables to inventory
+ * loader: Ansible's DataLoader. The DataLoader can read files, auto load JSON/YAML and decrypt vaulted data, and cache read files.
+ * path: string with inventory source (this is usually a path, but is not required)
+ * cache: indicates whether the plugin should use or avoid caches (cache plugin and/or loader)
+
+
+The base class does some minimal assignment for reuse in other methods.
+
+.. code-block:: python
+
+ def parse(self, inventory, loader, path, cache=True):
+
+ self.loader = loader
+ self.inventory = inventory
+ self.templar = Templar(loader=loader)
+
+It is up to the plugin now to parse the provided inventory source and translate it into Ansible inventory.
+To facilitate this, the example below uses a few helper functions:
+
+.. code-block:: python
+
+ NAME = 'myplugin'
+
+ def parse(self, inventory, loader, path, cache=True):
+
+ # call base method to ensure properties are available for use with other helper methods
+ super(InventoryModule, self).parse(inventory, loader, path, cache)
+
+ # this method will parse 'common format' inventory sources and
+ # update any options declared in DOCUMENTATION as needed
+ config = self._read_config_data(path)
+
+ # if NOT using _read_config_data you should call set_options directly,
+ # to process any defined configuration for this plugin,
+ # if you don't define any options you can skip
+ #self.set_options()
+
+ # example consuming options from inventory source
+ mysession = apilib.session(user=self.get_option('api_user'),
+ password=self.get_option('api_pass'),
+ server=self.get_option('api_server')
+ )
+
+
+ # make requests to get data to feed into inventory
+ mydata = mysession.getitall()
+
+ #parse data and create inventory objects:
+ for colo in mydata:
+ for server in mydata[colo]['servers']:
+ self.inventory.add_host(server['name'])
+ self.inventory.set_variable(server['name'], 'ansible_host', server['external_ip'])
+
+The specifics will vary depending on API and structure returned. Remember that if you get an inventory source error or any other issue, you should ``raise AnsibleParserError`` to let Ansible know that the source was invalid or the process failed.
+
+For examples on how to implement an inventory plugin, see the source code here:
+`lib/ansible/plugins/inventory <https://github.com/ansible/ansible/tree/devel/lib/ansible/plugins/inventory>`_.
+
+.. _inventory_plugin_caching:
+
+inventory cache
+^^^^^^^^^^^^^^^
+
+To cache the inventory, extend the inventory plugin documentation with the inventory_cache documentation fragment and use the Cacheable base class.
+
+.. code-block:: yaml
+
+ extends_documentation_fragment:
+ - inventory_cache
+
+.. code-block:: python
+
+ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+
+ NAME = 'myplugin'
+
+Next, load the cache plugin specified by the user to read from and update the cache. If your inventory plugin uses YAML-based configuration files and the ``_read_config_data`` method, the cache plugin is loaded within that method. If your inventory plugin does not use ``_read_config_data``, you must load the cache explicitly with ``load_cache_plugin``.
+
+.. code-block:: python
+
+ NAME = 'myplugin'
+
+ def parse(self, inventory, loader, path, cache=True):
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ self.load_cache_plugin()
+
+Before using the cache plugin, you must retrieve a unique cache key by using the ``get_cache_key`` method. This task needs to be done by all inventory modules using the cache, so that you don't use/overwrite other parts of the cache.
+
+.. code-block:: python
+
+ def parse(self, inventory, loader, path, cache=True):
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ self.load_cache_plugin()
+ cache_key = self.get_cache_key(path)
+
+Now that you've enabled caching, loaded the correct plugin, and retrieved a unique cache key, you can set up the flow of data between the cache and your inventory using the ``cache`` parameter of the ``parse`` method. This value comes from the inventory manager and indicates whether the inventory is being refreshed (such as via ``--flush-cache`` or the meta task ``refresh_inventory``). Although the cache shouldn't be used to populate the inventory when being refreshed, the cache should be updated with the new inventory if the user has enabled caching. You can use ``self._cache`` like a dictionary. The following pattern allows refreshing the inventory to work in conjunction with caching.
+
+.. code-block:: python
+
+ def parse(self, inventory, loader, path, cache=True):
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ self.load_cache_plugin()
+ cache_key = self.get_cache_key(path)
+
+ # cache may be True or False at this point to indicate if the inventory is being refreshed
+ # get the user's cache option too to see if we should save the cache if it is changing
+ user_cache_setting = self.get_option('cache')
+
+ # read if the user has caching enabled and the cache isn't being refreshed
+ attempt_to_read_cache = user_cache_setting and cache
+ # update if the user has caching enabled and the cache is being refreshed; update this value to True if the cache has expired below
+ cache_needs_update = user_cache_setting and not cache
+
+ # attempt to read the cache if inventory isn't being refreshed and the user has caching enabled
+ if attempt_to_read_cache:
+ try:
+ results = self._cache[cache_key]
+ except KeyError:
+ # This occurs if the cache_key is not in the cache or if the cache_key expired, so the cache needs to be updated
+ cache_needs_update = True
+
+ if cache_needs_update:
+ results = self.get_inventory()
+
+ # set the cache
+ self._cache[cache_key] = results
+
+ self.populate(results)
+
+After the ``parse`` method is complete, the contents of ``self._cache`` is used to set the cache plugin if the contents of the cache have changed.
+
+You have three other cache methods available:
+ - ``set_cache_plugin`` forces the cache plugin to be set with the contents of ``self._cache``, before the ``parse`` method completes
+ - ``update_cache_if_changed`` sets the cache plugin only if ``self._cache`` has been modified, before the ``parse`` method completes
+ - ``clear_cache`` flushes the cache, ultimately by calling the cache plugin's ``flush()`` method, whose implementation is dependent upon the particular cache plugin in use. Note that if the user is using the same cache backend for facts and inventory, both will get flushed. To avoid this, the user can specify a distinct cache backend in their inventory plugin configuration.
+
+.. _inventory_source_common_format:
+
+Common format for inventory sources
+-----------------------------------
+
+To simplify development, most plugins use a standard YAML-based configuration file as the inventory source. The file has only one required field ``plugin``, which should contain the name of the plugin that is expected to consume the file.
+Depending on other common features used, you might need other fields, and you can add custom options in each plugin as required.
+For example, if you use the integrated caching, ``cache_plugin``, ``cache_timeout`` and other cache-related fields could be present.
+
+.. _inventory_development_auto:
+
+The 'auto' plugin
+-----------------
+
+From Ansible 2.5 onwards, we include the :ref:`auto inventory plugin <auto_inventory>` and enable it by default. If the ``plugin`` field in your standard configuration file matches the name of your inventory plugin, the ``auto`` inventory plugin will load your plugin. The 'auto' plugin makes it easier to use your plugin without having to update configurations.
+
+
+.. _inventory_scripts:
+.. _developing_inventory_scripts:
+
+Inventory scripts
+=================
+
+Even though we now have inventory plugins, we still support inventory scripts, not only for backwards compatibility but also to allow users to leverage other programming languages.
+
+
+.. _inventory_script_conventions:
+
+Inventory script conventions
+----------------------------
+
+Inventory scripts must accept the ``--list`` and ``--host <hostname>`` arguments. Although other arguments are allowed, Ansible will not use them.
+Such arguments might still be useful for executing the scripts directly.
+
+When the script is called with the single argument ``--list``, the script must output to stdout a JSON-encoded hash or
+dictionary that contains all the groups to be managed. Each group's value should be either a hash or dictionary containing a list of each host, any child groups, and potential group variables, or simply a list of hosts::
+
+
+ {
+ "group001": {
+ "hosts": ["host001", "host002"],
+ "vars": {
+ "var1": true
+ },
+ "children": ["group002"]
+ },
+ "group002": {
+ "hosts": ["host003","host004"],
+ "vars": {
+ "var2": 500
+ },
+ "children":[]
+ }
+
+ }
+
+If any of the elements of a group are empty, they may be omitted from the output.
+
+When called with the argument ``--host <hostname>`` (where <hostname> is a host from above), the script must print either an empty JSON hash/dictionary, or a hash/dictionary of variables to make them available to templates and playbooks. For example::
+
+
+ {
+ "VAR001": "VALUE",
+ "VAR002": "VALUE",
+ }
+
+Printing variables is optional. If the script does not print variables, it should print an empty hash or dictionary.
+
+.. _inventory_script_tuning:
+
+Tuning the external inventory script
+------------------------------------
+
+.. versionadded:: 1.3
+
+The stock inventory script system mentioned above works for all versions of Ansible, but calling ``--host`` for every host can be rather inefficient, especially if it involves API calls to a remote subsystem.
+
+To avoid this inefficiency, if the inventory script returns a top-level element called "_meta", it is possible to return all the host variables in a single script execution. When this meta element contains a value for "hostvars", the inventory script will not be invoked with ``--host`` for each host. This behavior results in a significant performance increase for large numbers of hosts.
+
+The data to be added to the top-level JSON dictionary looks like this::
+
+ {
+
+ # results of inventory script as above go here
+ # ...
+
+ "_meta": {
+ "hostvars": {
+ "host001": {
+ "var001" : "value"
+ },
+ "host002": {
+ "var002": "value"
+ }
+ }
+ }
+ }
+
+To satisfy the requirements of using ``_meta``, to prevent ansible from calling your inventory with ``--host`` you must at least populate ``_meta`` with an empty ``hostvars`` dictionary.
+For example::
+
+ {
+
+ # results of inventory script as above go here
+ # ...
+
+ "_meta": {
+ "hostvars": {}
+ }
+ }
+
+
+.. _replacing_inventory_ini_with_dynamic_provider:
+
+If you intend to replace an existing static inventory file with an inventory script, it must return a JSON object which contains an 'all' group that includes every host in the inventory as a member and every group in the inventory as a child. It should also include an 'ungrouped' group which contains all hosts which are not members of any other group.
+A skeleton example of this JSON object is:
+
+.. code-block:: json
+
+ {
+ "_meta": {
+ "hostvars": {}
+ },
+ "all": {
+ "children": [
+ "ungrouped"
+ ]
+ },
+ "ungrouped": {
+ "children": [
+ ]
+ }
+ }
+
+An easy way to see how this should look is using :ref:`ansible-inventory`, which also supports ``--list`` and ``--host`` parameters like an inventory script would.
+
+.. seealso::
+
+ :ref:`developing_api`
+ Python API to Playbooks and Ad Hoc Task Execution
+ :ref:`developing_modules_general`
+ Get started with developing a module
+ :ref:`developing_plugins`
+ How to develop plugins
+ `Ansible Tower <https://www.ansible.com/products/tower>`_
+ REST API endpoint and GUI for Ansible, syncs with dynamic inventory
+ `Development Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Mailing list for development topics
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/dev_guide/developing_locally.rst b/docs/docsite/rst/dev_guide/developing_locally.rst
new file mode 100644
index 00000000..4c7f6b71
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/developing_locally.rst
@@ -0,0 +1,105 @@
+.. _using_local_modules_and_plugins:
+.. _developing_locally:
+
+**********************************
+Adding modules and plugins locally
+**********************************
+
+The easiest, quickest, and the most popular way to extend Ansible is to use a local module or a plugin. You can create them or copy existing ones for local use. You can store a local module or plugin on your Ansible control node and share it with your team or organization. You can also share a local plugin or module by including it in a collection or embedding it in a role, then publishing the collection or role on Ansible Galaxy. If you are using roles on Ansible Galaxy, then you are already using local modules and plugins without realizing it.
+
+If you are using an existing module or plugin but Ansible can't find it, this page is all you need. However, if you want to create a plugin or a module, go to :ref:`developing_plugins` and :ref:`developing_modules_general` topics and then return to this page to know how to add it locally.
+
+Extending Ansible with local modules and plugins offers lots of shortcuts such as:
+
+* You can copy other people's modules and plugins.
+* When writing a new module, you can choose any programming language you like.
+* You do not have to clone any repositories.
+* You do not have to open a pull request.
+* You do not have to add tests (though we recommend that you do!).
+
+To save a local module or plugin such that Ansible can find and use it, add the module or plugin in the appropriate directory (the directories are specified in later parts of this topic).
+
+.. contents::
+ :local:
+
+.. _modules_vs_plugins:
+
+Modules and plugins: what is the difference?
+============================================
+If you are looking to add local functionality to Ansible, you might wonder whether you need a module or a plugin. Here is a quick overview to help you decide between the two:
+
+* Modules are reusable, standalone scripts that can be used by the Ansible API, the :command:`ansible` command, or the :command:`ansible-playbook` command. Modules provide a defined interface. Each module accepts arguments and returns information to Ansible by printing a JSON string to stdout before exiting. Modules execute on the target system (usually that means on a remote system) in separate processes.
+* :ref:`Plugins <plugins_lookup>` augment Ansible's core functionality and execute on the control node within the ``/usr/bin/ansible`` process. Plugins offer options and extensions for the core features of Ansible - transforming data, logging output, connecting to inventory, and more.
+
+.. _local_modules:
+
+Adding a module locally
+=======================
+Ansible automatically loads all executable files found in certain directories as modules.
+
+For local modules, use the name of the file as the module name: for example, if the module file is ``~/.ansible/plugins/modules/local_users.py``, use ``local_users`` as the module name.
+
+To load your local modules automatically and make them available to all playbooks and roles, add them in any of these locations:
+
+* any directory added to the ``ANSIBLE_LIBRARY`` environment variable (``$ANSIBLE_LIBRARY`` takes a colon-separated list like ``$PATH``)
+* ``~/.ansible/plugins/modules/``
+* ``/usr/share/ansible/plugins/modules/``
+
+After you save your module file in one of these locations, Ansible loads it and you can use it in any local task, playbook, or role.
+
+To confirm that ``my_custom_module`` is available:
+
+* type ``ansible localhost -m my_custom_module``. You should see the output for that module.
+
+or
+
+* type ``ansible-doc -t module my_custom_module``. You should see the documentation for that module.
+
+.. note::
+
+ Currently, the ``ansible-doc`` command can parse module documentation only from modules written in Python. If you have a module written in a programming language other than Python, please write the documentation in a Python file adjacent to the module file.
+
+You can limit the availability of your local module. If you want to use a local module only with selected playbooks or only with a single role, load it in one of the following locations:
+
+* In a selected playbook or playbooks: Store the module in a subdirectory called ``library`` in the directory that contains those playbooks.
+* In a single role: Store the module in a subdirectory called ``library`` within that role.
+
+.. _distributing_plugins:
+.. _local_plugins:
+
+Adding a plugin locally
+=======================
+Ansible loads plugins automatically too, and loads each type of plugin separately from a directory named for the type of plugin. Here's the full list of plugin directory names:
+
+ * action_plugins*
+ * cache_plugins
+ * callback_plugins
+ * connection_plugins
+ * filter_plugins*
+ * inventory_plugins
+ * lookup_plugins
+ * shell_plugins
+ * strategy_plugins
+ * test_plugins*
+ * vars_plugins
+
+.. note::
+
+ After you add the plugins and verify that they are available for use, you can see the documentation for all the plugins except for the ones marked with an asterisk (*) above.
+
+To load your local plugins automatically, add them in any of these locations:
+
+* any directory added to the relevant ``ANSIBLE_plugin_type_PLUGINS`` environment variable (these variables, such as ``$ANSIBLE_INVENTORY_PLUGINS`` and ``$ANSIBLE_VARS_PLUGINS`` take colon-separated lists like ``$PATH``)
+* the directory named for the correct ``plugin_type`` within ``~/.ansible/plugins/`` - for example, ``~/.ansible/plugins/callback``
+* the directory named for the correct ``plugin_type`` within ``/usr/share/ansible/plugins/`` - for example, ``/usr/share/ansible/plugins/action``
+
+After your plugin file is in one of these locations, Ansible loads it and you can use it in any local module, task, playbook, or role. Alternatively, you can edit your ``ansible.cfg`` file to add directories that contain local plugins. For details about adding directories of local plugins, see :ref:`ansible_configuration_settings`.
+
+To confirm that ``plugins/plugin_type/my_custom_plugin`` is available:
+
+* type ``ansible-doc -t <plugin_type> my_custom_lookup_plugin``. For example, ``ansible-doc -t lookup my_custom_lookup_plugin``. You should see the documentation for that plugin. This works for all plugin types except the ones marked with ``*`` in the list above - see :ref:`ansible-doc` for more details.
+
+You can limit the availability of your local plugin. If you want to use a local plugin only with selected playbooks or only with a single role, load it in one of the following locations:
+
+* In a selected playbook or playbooks: Store the plugin in a subdirectory for the correct ``plugin_type`` (for example, ``callback_plugins`` or ``inventory_plugins``) in the directory that contains the playbooks.
+* In a single role: Store the plugin in a subdirectory for the correct ``plugin_type`` (for example, ``cache_plugins`` or ``strategy_plugins``) within that role. When shipped as part of a role, the plugin is available as soon as the role is executed.
diff --git a/docs/docsite/rst/dev_guide/developing_module_utilities.rst b/docs/docsite/rst/dev_guide/developing_module_utilities.rst
new file mode 100644
index 00000000..dfeaef55
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/developing_module_utilities.rst
@@ -0,0 +1,69 @@
+.. _developing_module_utilities:
+
+*************************************
+Using and developing module utilities
+*************************************
+
+Ansible provides a number of module utilities, or snippets of shared code, that
+provide helper functions you can use when developing your own modules. The
+``basic.py`` module utility provides the main entry point for accessing the
+Ansible library, and all Python Ansible modules must import something from
+``ansible.module_utils``. A common option is to import ``AnsibleModule``::
+
+ from ansible.module_utils.basic import AnsibleModule
+
+The ``ansible.module_utils`` namespace is not a plain Python package: it is
+constructed dynamically for each task invocation, by extracting imports and
+resolving those matching the namespace against a :ref:`search path <ansible_search_path>` derived from the
+active configuration.
+
+To reduce the maintenance burden in a collection or in local modules, you can extract
+duplicated code into one or more module utilities and import them into your modules. For example, if you have your own custom modules that import a ``my_shared_code`` library, you can place that into a ``./module_utils/my_shared_code.py`` file like this::
+
+ from ansible.module_utils.my_shared_code import MySharedCodeClient
+
+When you run ``ansible-playbook``, Ansible will merge any files in your local ``module_utils`` directories into the ``ansible.module_utils`` namespace in the order defined by the :ref:`Ansible search path <ansible_search_path>`.
+
+Naming and finding module utilities
+===================================
+
+You can generally tell what a module utility does from its name and/or its location. Generic utilities (shared code used by many different kinds of modules) live in the main ansible/ansible codebase, in the ``common`` subdirectory or in the root directory of ``lib/ansible/module_utils``. Utilities used by a particular set of modules generally live in the same collection as those modules. For example:
+
+* ``lib/ansible/module_utils/urls.py`` contains shared code for parsing URLs
+* ``openstack.cloud.plugins.module_utils.openstack.py`` contains utilities for modules that work with OpenStack instances
+* ``ansible.netcommon.plugins.module_utils.network.common.config.py`` contains utility functions for use by networking modules
+
+Following this pattern with your own module utilities makes everything easy to find and use.
+
+.. _standard_mod_utils:
+
+Standard module utilities
+=========================
+
+Ansible ships with an extensive library of ``module_utils`` files. You can find the module utility source code in the ``lib/ansible/module_utils`` directory under your main Ansible path. We describe the most widely used utilities below. For more details on any specific module utility, please see the `source code for module_utils <https://github.com/ansible/ansible/tree/devel/lib/ansible/module_utils>`_.
+
+.. include:: shared_snippets/licensing.txt
+
+- ``api.py`` - Supports generic API modules
+- ``basic.py`` - General definitions and helper utilities for Ansible modules
+- ``common/dict_transformations.py`` - Helper functions for dictionary transformations
+- ``common/file.py`` - Helper functions for working with files
+- ``common/text/`` - Helper functions for converting and formatting text
+- ``common/parameters.py`` - Helper functions for dealing with module parameters
+- ``common/sys_info.py`` - Functions for getting distribution and platform information
+- ``common/validation.py`` - Helper functions for validating module parameters against a module argument spec
+- ``facts/`` - Directory of utilities for modules that return facts. See `PR 23012 <https://github.com/ansible/ansible/pull/23012>`_ for more information
+- ``json_utils.py`` - Utilities for filtering unrelated output around module JSON output, like leading and trailing lines
+- ``powershell/`` - Directory of definitions and helper functions for Windows PowerShell modules
+- ``pycompat24.py`` - Exception workaround for Python 2.4
+- ``service.py`` - Utilities to enable modules to work with Linux services (placeholder, not in use)
+- ``six/__init__.py`` - Bundled copy of the `Six Python library <https://pypi.org/project/six/>`_ to aid in writing code compatible with both Python 2 and Python 3
+- ``splitter.py`` - String splitting and manipulation utilities for working with Jinja2 templates
+- ``urls.py`` - Utilities for working with http and https requests
+
+Several commonly-used utilities migrated to collections in Ansible 2.10, including:
+
+- ``ismount.py`` migrated to ``ansible.posix.plugins.module_utils.mount.py`` - Single helper function that fixes os.path.ismount
+- ``known_hosts.py`` migrated to ``community.general.plugins.module_utils.known_hosts.py`` - utilities for working with known_hosts file
+
+For a list of migrated content with destination collections, see https://github.com/ansible/ansible/blob/devel/lib/ansible/config/ansible_builtin_runtime.yml.
diff --git a/docs/docsite/rst/dev_guide/developing_modules.rst b/docs/docsite/rst/dev_guide/developing_modules.rst
new file mode 100644
index 00000000..5cfcf15c
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/developing_modules.rst
@@ -0,0 +1,51 @@
+.. _developing_modules:
+.. _module_dev_should_you:
+
+****************************
+Should you develop a module?
+****************************
+
+Developing Ansible modules is easy, but often it is not necessary. Before you start writing a new module, ask:
+
+1. Does a similar module already exist?
+
+An existing module may cover the functionality you want. Ansible collections include thousands of modules. Search our :ref:`list of included collections <list_of_collections>` or `Ansible Galaxy <https://galaxy.ansible.com>`_ to see if an existing module does what you need.
+
+2. Should you use or develop an action plugin instead of a module?
+
+An action plugin may be the best way to get the functionality you want. Action plugins run on the control node instead of on the managed node, and their functionality is available to all modules. For more information about developing plugins, read the :ref:`developing plugins page <developing_plugins>`.
+
+3. Should you use a role instead of a module?
+
+A combination of existing modules may cover the functionality you want. You can write a role for this type of use case. Check out the :ref:`roles documentation<playbooks_reuse_roles>`.
+
+4. Should you create a collection instead of a single module?
+
+The functionality you want may be too large for a single module. If you want to connect Ansible to a new cloud provider, database, or network platform, you may need to :ref:`develop a new collection<developing_modules_in_groups>`.
+
+* Each module should have a concise and well defined functionality. Basically, follow the UNIX philosophy of doing one thing well.
+
+* A module should not require that a user know all the underlying options of an API/tool to be used. For instance, if the legal values for a required module parameter cannot be documented, that's a sign that the module would be rejected.
+
+* Modules should typically encompass much of the logic for interacting with a resource. A lightweight wrapper around an API that does not contain much logic would likely cause users to offload too much logic into a playbook, and for this reason the module would be rejected. Instead try creating multiple modules for interacting with smaller individual pieces of the API.
+
+If your use case isn't covered by an existing module, an action plugin, or a role, and you don't need to create multiple modules, then you're ready to start developing a new module. Choose from the topics below for next steps:
+
+* I want to :ref:`get started on a new module <developing_modules_general>`.
+* I want to review :ref:`tips and conventions for developing good modules <developing_modules_best_practices>`.
+* I want to :ref:`write a Windows module <developing_modules_general_windows>`.
+* I want :ref:`an overview of Ansible's architecture <developing_program_flow_modules>`.
+* I want to :ref:`document my module <developing_modules_documenting>`.
+* I want to :ref:`contribute my module back to Ansible Core <developing_modules_checklist>`.
+* I want to :ref:`add unit and integration tests to my module <developing_testing>`.
+* I want to :ref:`add Python 3 support to my module <developing_python_3>`.
+* I want to :ref:`write multiple modules <developing_modules_in_groups>`.
+
+.. seealso::
+
+ :ref:`list_of_collections`
+ Browse existing collections, modules, and plugins
+ `Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Development mailing list
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/dev_guide/developing_modules_best_practices.rst b/docs/docsite/rst/dev_guide/developing_modules_best_practices.rst
new file mode 100644
index 00000000..19787f69
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/developing_modules_best_practices.rst
@@ -0,0 +1,177 @@
+.. _developing_modules_best_practices:
+.. _module_dev_conventions:
+
+*******************************
+Conventions, tips, and pitfalls
+*******************************
+
+.. contents:: Topics
+ :local:
+
+As you design and develop modules, follow these basic conventions and tips for clean, usable code:
+
+Scoping your module(s)
+======================
+
+Especially if you want to contribute your module(s) to an existing Ansible Collection, make sure each module includes enough logic and functionality, but not too much. If these guidelines seem confusing, consider :ref:`whether you really need to write a module <module_dev_should_you>` at all.
+
+* Each module should have a concise and well-defined functionality. Basically, follow the UNIX philosophy of doing one thing well.
+* Do not add ``get``, ``list`` or ``info`` state options to an existing module - create a new ``_info`` or ``_facts`` module.
+* Modules should not require that a user know all the underlying options of an API/tool to be used. For instance, if the legal values for a required module option cannot be documented, the module does not belong in Ansible Core.
+* Modules should encompass much of the logic for interacting with a resource. A lightweight wrapper around a complex API forces users to offload too much logic into their playbooks. If you want to connect Ansible to a complex API, :ref:`create multiple modules <developing_modules_in_groups>` that interact with smaller individual pieces of the API.
+* Avoid creating a module that does the work of other modules; this leads to code duplication and divergence, and makes things less uniform, unpredictable and harder to maintain. Modules should be the building blocks. If you are asking 'how can I have a module execute other modules' ... you want to write a role.
+
+Designing module interfaces
+===========================
+
+* If your module is addressing an object, the option for that object should be called ``name`` whenever possible, or accept ``name`` as an alias.
+* Modules accepting boolean status should accept ``yes``, ``no``, ``true``, ``false``, or anything else a user may likely throw at them. The AnsibleModule common code supports this with ``type='bool'``.
+* Avoid ``action``/``command``, they are imperative and not declarative, there are other ways to express the same thing.
+
+General guidelines & tips
+=========================
+
+* Each module should be self-contained in one file, so it can be auto-transferred by ``ansible-base``.
+* Module name MUST use underscores instead of hyphens or spaces as a word separator. Using hyphens and spaces will prevent ``ansible-base`` from importing your module.
+* Always use the ``hacking/test-module.py`` script when developing modules - it will warn you about common pitfalls.
+* If you have a local module that returns information specific to your installations, a good name for this module is ``site_info``.
+* Eliminate or minimize dependencies. If your module has dependencies, document them at the top of the module file and raise JSON error messages when dependency import fails.
+* Don't write to files directly; use a temporary file and then use the ``atomic_move`` function from ``ansible.module_utils.basic`` to move the updated temporary file into place. This prevents data corruption and ensures that the correct context for the file is kept.
+* Avoid creating caches. Ansible is designed without a central server or authority, so you cannot guarantee it will not run with different permissions, options or locations. If you need a central authority, have it on top of Ansible (for example, using bastion/cm/ci server or tower); do not try to build it into modules.
+* If you package your module(s) in an RPM, install the modules on the control machine in ``/usr/share/ansible``. Packaging modules in RPMs is optional.
+
+Functions and Methods
+=====================
+
+* Each function should be concise and should describe a meaningful amount of work.
+* "Don't repeat yourself" is generally a good philosophy.
+* Function names should use underscores: ``my_function_name``.
+* The name of each function should describe what the function does.
+* Each function should have a docstring.
+* If your code is too nested, that's usually a sign the loop body could benefit from being a function. Parts of our existing code are not the best examples of this at times.
+
+Python tips
+===========
+
+* When fetching URLs, use ``fetch_url`` or ``open_url`` from ``ansible.module_utils.urls``. Do not use ``urllib2``, which does not natively verify TLS certificates and so is insecure for https.
+* Include a ``main`` function that wraps the normal execution.
+* Call your ``main`` function from a conditional so you can import it into unit tests - for example:
+
+.. code-block:: python
+
+ if __name__ == '__main__':
+ main()
+
+.. _shared_code:
+
+Importing and using shared code
+===============================
+
+* Use shared code whenever possible - don't reinvent the wheel. Ansible offers the ``AnsibleModule`` common Python code, plus :ref:`utilities <developing_module_utilities>` for many common use cases and patterns. You can also create documentation fragments for docs that apply to multiple modules.
+* Import ``ansible.module_utils`` code in the same place as you import other libraries.
+* Do NOT use wildcards (*) for importing other python modules; instead, list the function(s) you are importing (for example, ``from some.other_python_module.basic import otherFunction``).
+* Import custom packages in ``try``/``except``, capture any import errors, and handle them with ``fail_json()`` in ``main()``. For example:
+
+.. code-block:: python
+
+ import traceback
+
+ from ansible.module_utils.basic import missing_required_lib
+
+ LIB_IMP_ERR = None
+ try:
+ import foo
+ HAS_LIB = True
+ except:
+ HAS_LIB = False
+ LIB_IMP_ERR = traceback.format_exc()
+
+
+Then in ``main()``, just after the argspec, do
+
+.. code-block:: python
+
+ if not HAS_LIB:
+ module.fail_json(msg=missing_required_lib("foo"),
+ exception=LIB_IMP_ERR)
+
+
+And document the dependency in the ``requirements`` section of your module's :ref:`documentation_block`.
+
+.. _module_failures:
+
+Handling module failures
+========================
+
+When your module fails, help users understand what went wrong. If you are using the ``AnsibleModule`` common Python code, the ``failed`` element will be included for you automatically when you call ``fail_json``. For polite module failure behavior:
+
+* Include a key of ``failed`` along with a string explanation in ``msg``. If you don't do this, Ansible will use standard return codes: 0=success and non-zero=failure.
+* Don't raise a traceback (stacktrace). Ansible can deal with stacktraces and automatically converts anything unparseable into a failed result, but raising a stacktrace on module failure is not user-friendly.
+* Do not use ``sys.exit()``. Use ``fail_json()`` from the module object.
+
+Handling exceptions (bugs) gracefully
+=====================================
+
+* Validate upfront--fail fast and return useful and clear error messages.
+* Use defensive programming--use a simple design for your module, handle errors gracefully, and avoid direct stacktraces.
+* Fail predictably--if we must fail, do it in a way that is the most expected. Either mimic the underlying tool or the general way the system works.
+* Give out a useful message on what you were doing and add exception messages to that.
+* Avoid catchall exceptions, they are not very useful unless the underlying API gives very good error messages pertaining the attempted action.
+
+.. _module_output:
+
+Creating correct and informative module output
+==============================================
+
+Modules must output valid JSON only. Follow these guidelines for creating correct, useful module output:
+
+* Make your top-level return type a hash (dictionary).
+* Nest complex return values within the top-level hash.
+* Incorporate any lists or simple scalar values within the top-level return hash.
+* Do not send module output to standard error, because the system will merge standard out with standard error and prevent the JSON from parsing.
+* Capture standard error and return it as a variable in the JSON on standard out. This is how the command module is implemented.
+* Never do ``print("some status message")`` in a module, because it will not produce valid JSON output.
+* Always return useful data, even when there is no change.
+* Be consistent about returns (some modules are too random), unless it is detrimental to the state/action.
+* Make returns reusable--most of the time you don't want to read it, but you do want to process it and re-purpose it.
+* Return diff if in diff mode. This is not required for all modules, as it won't make sense for certain ones, but please include it when applicable.
+* Enable your return values to be serialized as JSON with Python's standard `JSON encoder and decoder <https://docs.python.org/3/library/json.html>`_ library. Basic python types (strings, int, dicts, lists, and so on) are serializable.
+* Do not return an object using exit_json(). Instead, convert the fields you need from the object into the fields of a dictionary and return the dictionary.
+* Results from many hosts will be aggregated at once, so your module should return only relevant output. Returning the entire contents of a log file is generally bad form.
+
+If a module returns stderr or otherwise fails to produce valid JSON, the actual output will still be shown in Ansible, but the command will not succeed.
+
+.. _module_conventions:
+
+Following Ansible conventions
+=============================
+
+Ansible conventions offer a predictable user interface across all modules, playbooks, and roles. To follow Ansible conventions in your module development:
+
+* Use consistent names across modules (yes, we have many legacy deviations - don't make the problem worse!).
+* Use consistent options (arguments) within your module(s).
+* Do not use 'message' or 'syslog_facility' as an option name, because this is used internally by Ansible.
+* Normalize options with other modules - if Ansible and the API your module connects to use different names for the same option, add aliases to your options so the user can choose which names to use in tasks and playbooks.
+* Return facts from ``*_facts`` modules in the ``ansible_facts`` field of the :ref:`result dictionary<common_return_values>` so other modules can access them.
+* Implement ``check_mode`` in all ``*_info`` and ``*_facts`` modules. Playbooks which conditionalize based on fact information will only conditionalize correctly in ``check_mode`` if the facts are returned in ``check_mode``. Usually you can add ``supports_check_mode=True`` when instantiating ``AnsibleModule``.
+* Use module-specific environment variables. For example, if you use the helpers in ``module_utils.api`` for basic authentication with ``module_utils.urls.fetch_url()`` and you fall back on environment variables for default values, use a module-specific environment variable like :code:`API_<MODULENAME>_USERNAME` to avoid conflicts between modules.
+* Keep module options simple and focused - if you're loading a lot of choices/states on an existing option, consider adding a new, simple option instead.
+* Keep options small when possible. Passing a large data structure to an option might save us a few tasks, but it adds a complex requirement that we cannot easily validate before passing on to the module.
+* If you want to pass complex data to an option, write an expert module that allows this, along with several smaller modules that provide a more 'atomic' operation against the underlying APIs and services. Complex operations require complex data. Let the user choose whether to reflect that complexity in tasks and plays or in vars files.
+* Implement declarative operations (not CRUD) so the user can ignore existing state and focus on final state. For example, use ``started/stopped``, ``present/absent``.
+* Strive for a consistent final state (aka idempotency). If running your module twice in a row against the same system would result in two different states, see if you can redesign or rewrite to achieve consistent final state. If you can't, document the behavior and the reasons for it.
+* Provide consistent return values within the standard Ansible return structure, even if NA/None are used for keys normally returned under other options.
+* Follow additional guidelines that apply to families of modules if applicable. For example, AWS modules should follow the :ref:`Amazon development checklist <AWS_module_development>`.
+
+
+Module Security
+===============
+
+* Avoid passing user input from the shell.
+* Always check return codes.
+* You must always use ``module.run_command``, not ``subprocess`` or ``Popen`` or ``os.system``.
+* Avoid using the shell unless absolutely necessary.
+* If you must use the shell, you must pass ``use_unsafe_shell=True`` to ``module.run_command``.
+* If any variables in your module can come from user input with ``use_unsafe_shell=True``, you must wrap them with ``pipes.quote(x)``.
+* When fetching URLs, use ``fetch_url`` or ``open_url`` from ``ansible.module_utils.urls``. Do not use ``urllib2``, which does not natively verify TLS certificates and so is insecure for https.
+* Sensitive values marked with ``no_log=True`` will automatically have that value stripped from module return values. If your module could return these sensitive values as part of a dictionary key name, you should call the ``ansible.module_utils.basic.sanitize_keys()`` function to strip the values from the keys. See the ``uri`` module for an example.
diff --git a/docs/docsite/rst/dev_guide/developing_modules_checklist.rst b/docs/docsite/rst/dev_guide/developing_modules_checklist.rst
new file mode 100644
index 00000000..492b6015
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/developing_modules_checklist.rst
@@ -0,0 +1,46 @@
+.. _developing_modules_checklist:
+.. _module_contribution:
+
+**********************************************************
+Contributing your module to an existing Ansible collection
+**********************************************************
+
+If you want to contribute a module to an existing collection, you must meet the community's objective and subjective requirements. Please read the details below, and also review our :ref:`tips for module development <developing_modules_best_practices>`.
+
+Modules accepted into certain collections are included in every Ansible release on PyPI. However, contributing to one of these collections is not the only way to distribute a module - you can :ref:`create your own collection <developing_collections>`, embed modules in roles on Galaxy or simply share copies of your module code for :ref:`local use <developing_locally>`.
+
+Contributing modules: objective requirements
+===============================================
+
+To contribute a module to most Ansible collections, you must:
+
+* write your module in either Python or Powershell for Windows
+* use the ``AnsibleModule`` common code
+* support Python 2.6 and Python 3.5 - if your module cannot support Python 2.6, explain the required minimum Python version and rationale in the requirements section in ``DOCUMENTATION``
+* use proper :ref:`Python 3 syntax <developing_python_3>`
+* follow `PEP 8 <https://www.python.org/dev/peps/pep-0008/>`_ Python style conventions - see :ref:`testing_pep8` for more information
+* license your module under the GPL license (GPLv3 or later)
+* understand the :ref:`license agreement <contributor_license_agreement>`, which applies to all contributions
+* conform to Ansible's :ref:`formatting and documentation <developing_modules_documenting>` standards
+* include comprehensive :ref:`tests <developing_testing>` for your module
+* minimize module dependencies
+* support :ref:`check_mode <check_mode_dry>` if possible
+* ensure your code is readable
+* if a module is named ``<something>_facts``, it should be because its main purpose is returning ``ansible_facts``. Do not name modules that do not do this with ``_facts``. Only use ``ansible_facts`` for information that is specific to the host machine, for example network interfaces and their configuration, which operating system and which programs are installed.
+* Modules that query/return general information (and not ``ansible_facts``) should be named ``_info``. General information is non-host specific information, for example information on online/cloud services (you can access different accounts for the same online service from the same host), or information on VMs and containers accessible from the machine.
+
+Additional requirements may apply for certain collections. Review the individual collection repositories for more information.
+
+Please make sure your module meets these requirements before you submit your PR/proposal. If you have questions, reach out via `Ansible's IRC chat channel <http://irc.freenode.net>`_ or the `Ansible development mailing list <https://groups.google.com/group/ansible-devel>`_.
+
+Contributing to Ansible: subjective requirements
+================================================
+
+If your module meets these objective requirements, collection maintainers will review your code to see if they think it's clear, concise, secure, and maintainable. They will consider whether your module provides a good user experience, helpful error messages, reasonable defaults, and more. This process is subjective, with no exact standards for acceptance. For the best chance of getting your module accepted, follow our :ref:`tips for module development <developing_modules_best_practices>`.
+
+Other checklists
+================
+
+* :ref:`Tips for module development <developing_modules_best_practices>`.
+* :ref:`Amazon development checklist <AWS_module_development>`.
+* :ref:`Windows development checklist <developing_modules_general_windows>`.
diff --git a/docs/docsite/rst/dev_guide/developing_modules_documenting.rst b/docs/docsite/rst/dev_guide/developing_modules_documenting.rst
new file mode 100644
index 00000000..6be4fcd0
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/developing_modules_documenting.rst
@@ -0,0 +1,438 @@
+.. _developing_modules_documenting:
+.. _module_documenting:
+
+*******************************
+Module format and documentation
+*******************************
+
+If you want to contribute your module to most Ansible collections, you must write your module in Python and follow the standard format described below. (Unless you're writing a Windows module, in which case the :ref:`Windows guidelines <developing_modules_general_windows>` apply.) In addition to following this format, you should review our :ref:`submission checklist <developing_modules_checklist>`, :ref:`programming tips <developing_modules_best_practices>`, and :ref:`strategy for maintaining Python 2 and Python 3 compatibility <developing_python_3>`, as well as information about :ref:`testing <developing_testing>` before you open a pull request.
+
+Every Ansible module written in Python must begin with seven standard sections in a particular order, followed by the code. The sections in order are:
+
+.. contents::
+ :depth: 1
+ :local:
+
+.. note:: Why don't the imports go first?
+
+ Keen Python programmers may notice that contrary to PEP 8's advice we don't put ``imports`` at the top of the file. This is because the ``DOCUMENTATION`` through ``RETURN`` sections are not used by the module code itself; they are essentially extra docstrings for the file. The imports are placed after these special variables for the same reason as PEP 8 puts the imports after the introductory comments and docstrings. This keeps the active parts of the code together and the pieces which are purely informational apart. The decision to exclude E402 is based on readability (which is what PEP 8 is about). Documentation strings in a module are much more similar to module level docstrings, than code, and are never utilized by the module itself. Placing the imports below this documentation and closer to the code, consolidates and groups all related code in a congruent manner to improve readability, debugging and understanding.
+
+.. warning:: **Copy old modules with care!**
+
+ Some older Ansible modules have ``imports`` at the bottom of the file, ``Copyright`` notices with the full GPL prefix, and/or ``DOCUMENTATION`` fields in the wrong order. These are legacy files that need updating - do not copy them into new modules. Over time we are updating and correcting older modules. Please follow the guidelines on this page!
+
+.. _shebang:
+
+Python shebang & UTF-8 coding
+===============================
+
+Begin your Ansible module with ``#!/usr/bin/python`` - this "shebang" allows ``ansible_python_interpreter`` to work. Follow the shebang immediately with ``# -*- coding: utf-8 -*-`` to clarify that the file is UTF-8 encoded.
+
+.. _copyright:
+
+Copyright and license
+=====================
+
+After the shebang and UTF-8 coding, add a `copyright line <https://www.gnu.org/licenses/gpl-howto.en.html>`_ with the original copyright holder and a license declaration. The license declaration should be ONLY one line, not the full GPL prefix.:
+
+.. code-block:: python
+
+ #!/usr/bin/python
+ # -*- coding: utf-8 -*-
+
+ # Copyright: (c) 2018, Terry Jones <terry.jones@example.org>
+ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+Major additions to the module (for instance, rewrites) may add additional copyright lines. Any legal review will include the source control history, so an exhaustive copyright header is not necessary.
+Please do not edit the existing copyright year. This simplifies project administration and is unlikely to cause any interesting legal issues.
+When adding a second copyright line for a significant feature or rewrite, add the newer line above the older one:
+
+.. code-block:: python
+
+ #!/usr/bin/python
+ # -*- coding: utf-8 -*-
+
+ # Copyright: (c) 2017, [New Contributor(s)]
+ # Copyright: (c) 2015, [Original Contributor(s)]
+ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+.. _ansible_metadata_block:
+
+ANSIBLE_METADATA block
+======================
+
+Since we moved to collections we have deprecated the METADATA functionality, it is no longer required for modules, but it will not break anything if present.
+
+
+.. _documentation_block:
+
+DOCUMENTATION block
+===================
+
+After the shebang, the UTF-8 coding, the copyright line, and the license section comes the ``DOCUMENTATION`` block. Ansible's online module documentation is generated from the ``DOCUMENTATION`` blocks in each module's source code. The ``DOCUMENTATION`` block must be valid YAML. You may find it easier to start writing your ``DOCUMENTATION`` string in an :ref:`editor with YAML syntax highlighting <other_tools_and_programs>` before you include it in your Python file. You can start by copying our `example documentation string <https://github.com/ansible/ansible/blob/devel/examples/DOCUMENTATION.yml>`_ into your module file and modifying it. If you run into syntax issues in your YAML, you can validate it on the `YAML Lint <http://www.yamllint.com/>`_ website.
+
+Module documentation should briefly and accurately define what each module and option does, and how it works with others in the underlying system. Documentation should be written for broad audience--readable both by experts and non-experts.
+ * Descriptions should always start with a capital letter and end with a full stop. Consistency always helps.
+ * Verify that arguments in doc and module spec dict are identical.
+ * For password / secret arguments ``no_log=True`` should be set.
+ * For arguments that seem to contain sensitive information but **do not** contain secrets, such as "password_length", set ``no_log=False`` to disable the warning message.
+ * If an option is only sometimes required, describe the conditions. For example, "Required when I(state=present)."
+ * If your module allows ``check_mode``, reflect this fact in the documentation.
+
+To create clear, concise, consistent, and useful documentation, follow the :ref:`style guide <style_guide>`.
+
+Each documentation field is described below. Before committing your module documentation, please test it at the command line and as HTML:
+
+* As long as your module file is :ref:`available locally <local_modules>`, you can use ``ansible-doc -t module my_module_name`` to view your module documentation at the command line. Any parsing errors will be obvious - you can view details by adding ``-vvv`` to the command.
+* You should also :ref:`test the HTML output <testing_module_documentation>` of your module documentation.
+
+Documentation fields
+--------------------
+
+All fields in the ``DOCUMENTATION`` block are lower-case. All fields are required unless specified otherwise:
+
+:module:
+
+ * The name of the module.
+ * Must be the same as the filename, without the ``.py`` extension.
+
+:short_description:
+
+ * A short description which is displayed on the :ref:`list_of_collections` page and ``ansible-doc -l``.
+ * The ``short_description`` is displayed by ``ansible-doc -l`` without any category grouping,
+ so it needs enough detail to explain the module's purpose without the context of the directory structure in which it lives.
+ * Unlike ``description:``, ``short_description`` should not have a trailing period/full stop.
+
+:description:
+
+ * A detailed description (generally two or more sentences).
+ * Must be written in full sentences, in other words, with capital letters and periods/full stops.
+ * Shouldn't mention the module name.
+ * Make use of multiple entries rather than using one long paragraph.
+ * Don't quote complete values unless it is required by YAML.
+
+:version_added:
+
+ * The version of Ansible when the module was added.
+ * This is a string, and not a float, for example, ``version_added: '2.1'``
+
+:author:
+
+ * Name of the module author in the form ``First Last (@GitHubID)``.
+ * Use a multi-line list if there is more than one author.
+ * Don't use quotes as it should not be required by YAML.
+
+:deprecated:
+
+ * Marks modules that will be removed in future releases. See also :ref:`module_lifecycle`.
+
+:options:
+
+ * Options are often called `parameters` or `arguments`. Because the documentation field is called `options`, we will use that term.
+ * If the module has no options (for example, it's a ``_facts`` module), all you need is one line: ``options: {}``.
+ * If your module has options (in other words, accepts arguments), each option should be documented thoroughly. For each module option, include:
+
+ :option-name:
+
+ * Declarative operation (not CRUD), to focus on the final state, for example `online:`, rather than `is_online:`.
+ * The name of the option should be consistent with the rest of the module, as well as other modules in the same category.
+ * When in doubt, look for other modules to find option names that are used for the same purpose, we like to offer consistency to our users.
+
+ :description:
+
+ * Detailed explanation of what this option does. It should be written in full sentences.
+ * The first entry is a description of the option itself; subsequent entries detail its use, dependencies, or format of possible values.
+ * Should not list the possible values (that's what ``choices:`` is for, though it should explain what the values do if they aren't obvious).
+ * If an option is only sometimes required, describe the conditions. For example, "Required when I(state=present)."
+ * Mutually exclusive options must be documented as the final sentence on each of the options.
+
+ :required:
+
+ * Only needed if ``true``.
+ * If missing, we assume the option is not required.
+
+ :default:
+
+ * If ``required`` is false/missing, ``default`` may be specified (assumed 'null' if missing).
+ * Ensure that the default value in the docs matches the default value in the code.
+ * The default field must not be listed as part of the description, unless it requires additional information or conditions.
+ * If the option is a boolean value, you can use any of the boolean values recognized by Ansible:
+ (such as true/false or yes/no). Choose the one that reads better in the context of the option.
+
+ :choices:
+
+ * List of option values.
+ * Should be absent if empty.
+
+ :type:
+
+ * Specifies the data type that option accepts, must match the ``argspec``.
+ * If an argument is ``type='bool'``, this field should be set to ``type: bool`` and no ``choices`` should be specified.
+ * If an argument is ``type='list'``, ``elements`` should be specified.
+
+ :elements:
+
+ * Specifies the data type for list elements in case ``type='list'``.
+
+ :aliases:
+ * List of optional name aliases.
+ * Generally not needed.
+
+ :version_added:
+
+ * Only needed if this option was extended after initial Ansible release, in other words, this is greater than the top level `version_added` field.
+ * This is a string, and not a float, for example, ``version_added: '2.3'``.
+
+ :suboptions:
+
+ * If this option takes a dict or list of dicts, you can define the structure here.
+ * See :ref:`ansible_collections.azure.azcollection.azure_rm_securitygroup_module`, :ref:`ansible_collections.azure.azcollection.azure_rm_azurefirewall_module`, and :ref:`ansible_collections.openstack.cloud.baremetal_node_action_module` for examples.
+
+:requirements:
+
+ * List of requirements (if applicable).
+ * Include minimum versions.
+
+:seealso:
+
+ * A list of references to other modules, documentation or Internet resources
+ * In Ansible 2.10 and later, references to modules must use the FQCN or ``ansible.builtin`` for modules in ``ansible-base``.
+ * A reference can be one of the following formats:
+
+
+ .. code-block:: yaml+jinja
+
+ seealso:
+
+ # Reference by module name
+ - module: cisco.aci.aci_tenant
+
+ # Reference by module name, including description
+ - module: cisco.aci.aci_tenant
+ description: ACI module to create tenants on a Cisco ACI fabric.
+
+ # Reference by rST documentation anchor
+ - ref: aci_guide
+ description: Detailed information on how to manage your ACI infrastructure using Ansible.
+
+ # Reference by Internet resource
+ - name: APIC Management Information Model reference
+ description: Complete reference of the APIC object model.
+ link: https://developer.cisco.com/docs/apic-mim-ref/
+
+:notes:
+
+ * Details of any important information that doesn't fit in one of the above sections.
+ * For example, whether ``check_mode`` is or is not supported.
+
+
+Linking and other format macros within module documentation
+-----------------------------------------------------------
+
+You can link from your module documentation to other module docs, other resources on docs.ansible.com, and resources elsewhere on the internet with the help of some pre-defined macros. The correct formats for these macros are:
+
+* ``L()`` for links with a heading. For example: ``See L(Ansible Tower,https://www.ansible.com/products/tower).`` As of Ansible 2.10, do not use ``L()`` for relative links between Ansible documentation and collection documentation.
+* ``U()`` for URLs. For example: ``See U(https://www.ansible.com/products/tower) for an overview.``
+* ``R()`` for cross-references with a heading (added in Ansible 2.10). For example: ``See R(Cisco IOS Platform Guide,ios_platform_options)``. Use the RST anchor for the cross-reference. See :ref:`adding_anchors_rst` for details.
+* ``M()`` for module names. For example: ``See also M(ansible.builtin.yum) or M(community.general.apt_rpm)``.
+
+There are also some macros which do not create links but we use them to display certain types of
+content in a uniform way:
+
+* ``I()`` for option names. For example: ``Required if I(state=present).`` This is italicized in
+ the documentation.
+* ``C()`` for files and option values. For example: ``If not set the environment variable C(ACME_PASSWORD) will be used.`` This displays with a mono-space font in the documentation.
+* ``B()`` currently has no standardized usage. It is displayed in boldface in the documentation.
+* ``HORIZONTALLINE`` is used sparingly as a separator in long descriptions. It becomes a horizontal rule (the ``<hr>`` html tag) in the documentation.
+
+.. note::
+
+ For links between modules and documentation within a collection, you can use any of the options above. For links outside of your collection, use ``R()`` if available. Otherwise, use ``U()`` or ``L()`` with full URLs (not relative links). For modules, use ``M()`` with the FQCN or ``ansible.builtin`` as shown in the example. If you are creating your own documentation site, you will need to use the `intersphinx extension <https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html>`_ to convert ``R()`` and ``M()`` to the correct links.
+
+
+.. note::
+ - To refer to a group of modules in a collection, use ``R()``. When a collection is not the right granularity, use ``C(..)``:
+
+ -``Refer to the R(community.kubernetes collection, plugins_in_community.kubernetes) for information on managing kubernetes clusters.``
+ -``The C(win_*) modules (spread across several collections) allow you to manage various aspects of windows hosts.``
+
+
+.. note::
+
+ Because it stands out better, use ``seealso`` for general references over the use of notes or adding links to the description.
+
+.. _module_docs_fragments:
+
+Documentation fragments
+-----------------------
+
+If you are writing multiple related modules, they may share common documentation, such as authentication details, file mode settings, ``notes:`` or ``seealso:`` entries. Rather than duplicate that information in each module's ``DOCUMENTATION`` block, you can save it once as a doc_fragment plugin and use it in each module's documentation. In Ansible, shared documentation fragments are contained in a ``ModuleDocFragment`` class in `lib/ansible/plugins/doc_fragments/ <https://github.com/ansible/ansible/tree/devel/lib/ansible/plugins/doc_fragments>`_ or the equivalent directory in a collection. To include a documentation fragment, add ``extends_documentation_fragment: FRAGMENT_NAME`` in your module documentation. Use the fully qualified collection name for the FRAGMENT_NAME (for example, ``community.kubernetes.k8s_auth_options``).
+
+Modules should only use items from a doc fragment if the module will implement all of the interface documented there in a manner that behaves the same as the existing modules which import that fragment. The goal is that items imported from the doc fragment will behave identically when used in another module that imports the doc fragment.
+
+By default, only the ``DOCUMENTATION`` property from a doc fragment is inserted into the module documentation. It is possible to define additional properties in the doc fragment in order to import only certain parts of a doc fragment or mix and match as appropriate. If a property is defined in both the doc fragment and the module, the module value overrides the doc fragment.
+
+Here is an example doc fragment named ``example_fragment.py``:
+
+.. code-block:: python
+
+ class ModuleDocFragment(object):
+ # Standard documentation
+ DOCUMENTATION = r'''
+ options:
+ # options here
+ '''
+
+ # Additional section
+ OTHER = r'''
+ options:
+ # other options here
+ '''
+
+
+To insert the contents of ``OTHER`` in a module:
+
+.. code-block:: yaml+jinja
+
+ extends_documentation_fragment: example_fragment.other
+
+Or use both :
+
+.. code-block:: yaml+jinja
+
+ extends_documentation_fragment:
+ - example_fragment
+ - example_fragment.other
+
+.. _note:
+ * Prior to Ansible 2.8, documentation fragments were kept in ``lib/ansible/utils/module_docs_fragments``.
+
+.. versionadded:: 2.8
+
+Since Ansible 2.8, you can have user-supplied doc_fragments by using a ``doc_fragments`` directory adjacent to play or role, just like any other plugin.
+
+For example, all AWS modules should include:
+
+.. code-block:: yaml+jinja
+
+ extends_documentation_fragment:
+ - aws
+ - ec2
+
+:ref:`docfragments_collections` describes how to incorporate documentation fragments in a collection.
+
+.. _examples_block:
+
+EXAMPLES block
+==============
+
+After the shebang, the UTF-8 coding, the copyright line, the license section, and the ``DOCUMENTATION`` block comes the ``EXAMPLES`` block. Here you show users how your module works with real-world examples in multi-line plain-text YAML format. The best examples are ready for the user to copy and paste into a playbook. Review and update your examples with every change to your module.
+
+Per playbook best practices, each example should include a ``name:`` line::
+
+ EXAMPLES = r'''
+ - name: Ensure foo is installed
+ namespace.collection.modulename:
+ name: foo
+ state: present
+ '''
+
+The ``name:`` line should be capitalized and not include a trailing dot.
+
+Use a fully qualified collection name (FQCN) as a part of the module's name like in the example above. For modules in ``ansible-base``, use the ``ansible.builtin.`` identifier, for example ``ansible.builtin.debug``.
+
+If your examples use boolean options, use yes/no values. Since the documentation generates boolean values as yes/no, having the examples use these values as well makes the module documentation more consistent.
+
+If your module returns facts that are often needed, an example of how to use them can be helpful.
+
+.. _return_block:
+
+RETURN block
+============
+
+After the shebang, the UTF-8 coding, the copyright line, the license section, ``DOCUMENTATION`` and ``EXAMPLES`` blocks comes the ``RETURN`` block. This section documents the information the module returns for use by other modules.
+
+If your module doesn't return anything (apart from the standard returns), this section of your module should read: ``RETURN = r''' # '''``
+Otherwise, for each value returned, provide the following fields. All fields are required unless specified otherwise.
+
+:return name:
+ Name of the returned field.
+
+ :description:
+ Detailed description of what this value represents. Capitalized and with trailing dot.
+ :returned:
+ When this value is returned, such as ``always``, ``changed`` or ``success``. This is a string and can contain any human-readable content.
+ :type:
+ Data type.
+ :elements:
+ If ``type='list'``, specifies the data type of the list's elements.
+ :sample:
+ One or more examples.
+ :version_added:
+ Only needed if this return was extended after initial Ansible release, in other words, this is greater than the top level `version_added` field.
+ This is a string, and not a float, for example, ``version_added: '2.3'``.
+ :contains:
+ Optional. To describe nested return values, set ``type: complex``, ``type: dict``, or ``type: list``/``elements: dict`` and repeat the elements above for each sub-field.
+
+Here are two example ``RETURN`` sections, one with three simple fields and one with a complex nested field::
+
+ RETURN = r'''
+ dest:
+ description: Destination file/path.
+ returned: success
+ type: str
+ sample: /path/to/file.txt
+ src:
+ description: Source file used for the copy on the target machine.
+ returned: changed
+ type: str
+ sample: /home/httpd/.ansible/tmp/ansible-tmp-1423796390.97-147729857856000/source
+ md5sum:
+ description: MD5 checksum of the file after running copy.
+ returned: when supported
+ type: str
+ sample: 2a5aeecc61dc98c4d780b14b330e3282
+ '''
+
+ RETURN = r'''
+ packages:
+ description: Information about package requirements.
+ returned: success
+ type: complex
+ contains:
+ missing:
+ description: Packages that are missing from the system.
+ returned: success
+ type: list
+ sample:
+ - libmysqlclient-dev
+ - libxml2-dev
+ badversion:
+ description: Packages that are installed but at bad versions.
+ returned: success
+ type: list
+ sample:
+ - package: libxml2-dev
+ version: 2.9.4+dfsg1-2
+ constraint: ">= 3.0"
+ '''
+
+.. _python_imports:
+
+Python imports
+==============
+
+After the shebang, the UTF-8 coding, the copyright line, the license, and the sections for ``DOCUMENTATION``, ``EXAMPLES``, and ``RETURN``, you can finally add the python imports. All modules must use Python imports in the form:
+
+.. code-block:: python
+
+ from module_utils.basic import AnsibleModule
+
+The use of "wildcard" imports such as ``from module_utils.basic import *`` is no longer allowed.
+
+.. _dev_testing_module_documentation:
+
+Testing module documentation
+============================
+
+To test Ansible documentation locally please :ref:`follow instruction<testing_module_documentation>`.
diff --git a/docs/docsite/rst/dev_guide/developing_modules_general.rst b/docs/docsite/rst/dev_guide/developing_modules_general.rst
new file mode 100644
index 00000000..cb183b70
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/developing_modules_general.rst
@@ -0,0 +1,221 @@
+.. _developing_modules_general:
+.. _module_dev_tutorial_sample:
+
+*******************************************
+Ansible module development: getting started
+*******************************************
+
+A module is a reusable, standalone script that Ansible runs on your behalf, either locally or remotely. Modules interact with your local machine, an API, or a remote system to perform specific tasks like changing a database password or spinning up a cloud instance. Each module can be used by the Ansible API, or by the :command:`ansible` or :command:`ansible-playbook` programs. A module provides a defined interface, accepts arguments, and returns information to Ansible by printing a JSON string to stdout before exiting.
+
+If you need functionality that is not available in any of the thousands of Ansible modules found in collections, you can easily write your own custom module. When you write a module for local use, you can choose any programming language and follow your own rules. Use this topic to learn how to create an Ansible module in Python. After you create a module, you must add it locally to the appropriate directory so that Ansible can find and execute it. For details about adding a module locally, see :ref:`developing_locally`.
+
+.. contents::
+ :local:
+
+.. _environment_setup:
+
+Environment setup
+=================
+
+Prerequisites via apt (Ubuntu)
+------------------------------
+
+Due to dependencies (for example ansible -> paramiko -> pynacl -> libffi):
+
+.. code:: bash
+
+ sudo apt update
+ sudo apt install build-essential libssl-dev libffi-dev python-dev
+
+Common environment setup
+------------------------------
+
+1. Clone the Ansible repository:
+ ``$ git clone https://github.com/ansible/ansible.git``
+2. Change directory into the repository root dir: ``$ cd ansible``
+3. Create a virtual environment: ``$ python3 -m venv venv`` (or for
+ Python 2 ``$ virtualenv venv``. Note, this requires you to install
+ the virtualenv package: ``$ pip install virtualenv``)
+4. Activate the virtual environment: ``$ . venv/bin/activate``
+5. Install development requirements:
+ ``$ pip install -r requirements.txt``
+6. Run the environment setup script for each new dev shell process:
+ ``$ . hacking/env-setup``
+
+.. note:: After the initial setup above, every time you are ready to start
+ developing Ansible you should be able to just run the following from the
+ root of the Ansible repo:
+ ``$ . venv/bin/activate && . hacking/env-setup``
+
+
+Creating an info or a facts module
+==================================
+
+Ansible gathers information about the target machines using facts modules, and gathers information on other objects or files using info modules.
+If you find yourself trying to add ``state: info`` or ``state: list`` to an existing module, that is often a sign that a new dedicated ``_facts`` or ``_info`` module is needed.
+
+In Ansible 2.8 and onwards, we have two type of information modules, they are ``*_info`` and ``*_facts``.
+
+If a module is named ``<something>_facts``, it should be because its main purpose is returning ``ansible_facts``. Do not name modules that do not do this with ``_facts``.
+Only use ``ansible_facts`` for information that is specific to the host machine, for example network interfaces and their configuration, which operating system and which programs are installed.
+
+Modules that query/return general information (and not ``ansible_facts``) should be named ``_info``.
+General information is non-host specific information, for example information on online/cloud services (you can access different accounts for the same online service from the same host), or information on VMs and containers accessible from the machine, or information on individual files or programs.
+
+Info and facts modules, are just like any other Ansible Module, with a few minor requirements:
+
+1. They MUST be named ``<something>_info`` or ``<something>_facts``, where <something> is singular.
+2. Info ``*_info`` modules MUST return in the form of the :ref:`result dictionary<common_return_values>` so other modules can access them.
+3. Fact ``*_facts`` modules MUST return in the ``ansible_facts`` field of the :ref:`result dictionary<common_return_values>` so other modules can access them.
+4. They MUST support :ref:`check_mode <check_mode_dry>`.
+5. They MUST NOT make any changes to the system.
+6. They MUST document the :ref:`return fields<return_block>` and :ref:`examples<examples_block>`.
+
+To create an info module:
+
+1. Navigate to the correct directory for your new module: ``$ cd lib/ansible/modules/``. If you are developing module using collection, ``$ cd plugins/modules/`` inside your collection development tree.
+2. Create your new module file: ``$ touch my_test_info.py``.
+3. Paste the content below into your new info module file. It includes the :ref:`required Ansible format and documentation <developing_modules_documenting>`, a simple :ref:`argument spec for declaring the module options <argument_spec>`, and some example code.
+4. Modify and extend the code to do what you want your new info module to do. See the :ref:`programming tips <developing_modules_best_practices>` and :ref:`Python 3 compatibility <developing_python_3>` pages for pointers on writing clean and concise module code.
+
+.. literalinclude:: ../../../../examples/scripts/my_test_info.py
+ :language: python
+
+Use the same process to create a facts module.
+
+.. literalinclude:: ../../../../examples/scripts/my_test_facts.py
+ :language: python
+
+Creating a module
+=================
+
+To create a new module:
+
+1. Navigate to the correct directory for your new module: ``$ cd lib/ansible/modules/``. If you are developing a module in a :ref:`collection <developing_collections>`, ``$ cd plugins/modules/`` inside your collection development tree.
+2. Create your new module file: ``$ touch my_test.py``.
+3. Paste the content below into your new module file. It includes the :ref:`required Ansible format and documentation <developing_modules_documenting>`, a simple :ref:`argument spec for declaring the module options <argument_spec>`, and some example code.
+4. Modify and extend the code to do what you want your new module to do. See the :ref:`programming tips <developing_modules_best_practices>` and :ref:`Python 3 compatibility <developing_python_3>` pages for pointers on writing clean and concise module code.
+
+.. literalinclude:: ../../../../examples/scripts/my_test.py
+ :language: python
+
+Exercising your module code
+===========================
+
+After you modify the sample code above to do what you want, you can try out your module.
+Our :ref:`debugging tips <debugging_modules>` will help if you run into bugs as you verify your module code.
+
+
+Exercising module code locally
+------------------------------
+
+If your module does not need to target a remote host, you can quickly and easily exercise your code locally like this:
+
+- Create an arguments file, a basic JSON config file that passes parameters to your module so you can run it. Name the arguments file ``/tmp/args.json`` and add the following content:
+
+.. code:: json
+
+ {
+ "ANSIBLE_MODULE_ARGS": {
+ "name": "hello",
+ "new": true
+ }
+ }
+
+- If you are using a virtual environment (highly recommended for
+ development) activate it: ``$ . venv/bin/activate``
+- Setup the environment for development: ``$ . hacking/env-setup``
+- Run your test module locally and directly:
+ ``$ python -m ansible.modules.my_test /tmp/args.json``
+
+This should return output like this:
+
+.. code:: json
+
+ {"changed": true, "state": {"original_message": "hello", "new_message": "goodbye"}, "invocation": {"module_args": {"name": "hello", "new": true}}}
+
+
+Exercising module code in a playbook
+------------------------------------
+
+The next step in testing your new module is to consume it with an Ansible playbook.
+
+- Create a playbook in any directory: ``$ touch testmod.yml``
+- Add the following to the new playbook file::
+
+ - name: test my new module
+ hosts: localhost
+ tasks:
+ - name: run the new module
+ my_test:
+ name: 'hello'
+ new: true
+ register: testout
+ - name: dump test output
+ debug:
+ msg: '{{ testout }}'
+
+- Run the playbook and analyze the output: ``$ ansible-playbook ./testmod.yml``
+
+Testing basics
+====================
+
+These two examples will get you started with testing your module code. Please review our :ref:`testing <developing_testing>` section for more detailed
+information, including instructions for :ref:`testing module documentation <testing_module_documentation>`, adding :ref:`integration tests <testing_integration>`, and more.
+
+.. note::
+ Every new module and plugin should have integration tests, even if the tests cannot be run on Ansible CI infrastructure.
+ In this case, the tests should be marked with the ``unsupported`` alias in `aliases file <https://docs.ansible.com/ansible/latest/dev_guide/testing/sanity/integration-aliases.html>`_.
+
+Performing sanity tests
+-----------------------
+
+You can run through Ansible's sanity checks in a container:
+
+``$ ansible-test sanity -v --docker --python 2.7 MODULE_NAME``
+
+.. note::
+ Note that this example requires Docker to be installed and running. If you'd rather not use a container for this, you can choose to use ``--venv`` instead of ``--docker``.
+
+Unit tests
+----------
+
+You can add unit tests for your module in ``./test/units/modules``. You must first setup your testing environment. In this example, we're using Python 3.5.
+
+- Install the requirements (outside of your virtual environment): ``$ pip3 install -r ./test/lib/ansible_test/_data/requirements/units.txt``
+- Run ``. hacking/env-setup``
+- To run all tests do the following: ``$ ansible-test units --python 3.5``. If you are using a CI environment, these tests will run automatically.
+
+.. note:: Ansible uses pytest for unit testing.
+
+To run pytest against a single test module, you can do the following (provide the path to the test module appropriately):
+
+``$ pytest -r a --cov=. --cov-report=html --fulltrace --color yes
+test/units/modules/.../test/my_test.py``
+
+Contributing back to Ansible
+============================
+
+If you would like to contribute to ``ansible-base`` by adding a new feature or fixing a bug, `create a fork <https://help.github.com/articles/fork-a-repo/>`_ of the ansible/ansible repository and develop against a new feature branch using the ``devel`` branch as a starting point. When you you have a good working code change, you can submit a pull request to the Ansible repository by selecting your feature branch as a source and the Ansible devel branch as a target.
+
+If you want to contribute a module to an :ref:`Ansible collection <contributing_maintained_collections>`, review our :ref:`submission checklist <developing_modules_checklist>`, :ref:`programming tips <developing_modules_best_practices>`, and :ref:`strategy for maintaining Python 2 and Python 3 compatibility <developing_python_3>`, as well as information about :ref:`testing <developing_testing>` before you open a pull request.
+
+The :ref:`Community Guide <ansible_community_guide>` covers how to open a pull request and what happens next.
+
+
+Communication and development support
+=====================================
+
+Join the IRC channel ``#ansible-devel`` on freenode for discussions
+surrounding Ansible development.
+
+For questions and discussions pertaining to using the Ansible product,
+use the ``#ansible`` channel.
+
+For more specific IRC channels look at :ref:`Community Guide, Communicating <communication_irc>`.
+
+Credit
+======
+
+Thank you to Thomas Stringer (`@trstringer <https://github.com/trstringer>`_) for contributing source
+material for this topic.
diff --git a/docs/docsite/rst/dev_guide/developing_modules_general_aci.rst b/docs/docsite/rst/dev_guide/developing_modules_general_aci.rst
new file mode 100644
index 00000000..97ee2b42
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/developing_modules_general_aci.rst
@@ -0,0 +1,443 @@
+.. _aci_dev_guide:
+
+****************************
+Developing Cisco ACI modules
+****************************
+This is a brief walk-through of how to create new Cisco ACI modules for Ansible.
+
+For more information about Cisco ACI, look at the :ref:`Cisco ACI user guide <aci_guide>`.
+
+What's covered in this section:
+
+.. contents::
+ :depth: 3
+ :local:
+
+
+.. _aci_dev_guide_intro:
+
+Introduction
+============
+The `cisco.aci collection <https://galaxy.ansible.com/cisco/aci>`_ already includes a large number of Cisco ACI modules, however the ACI object model is huge and covering all possible functionality would easily cover more than 1500 individual modules.
+
+If you need specific functionality, you have 2 options:
+
+- Learn the ACI object model and use the low-level APIC REST API using the :ref:`aci_rest <aci_rest_module>` module
+- Write your own dedicated modules, which is actually quite easy
+
+.. seealso::
+
+ `ACI Fundamentals: ACI Policy Model <https://www.cisco.com/c/en/us/td/docs/switches/datacenter/aci/apic/sw/1-x/aci-fundamentals/b_ACI-Fundamentals/b_ACI-Fundamentals_chapter_010001.html>`_
+ A good introduction to the ACI object model.
+ `APIC Management Information Model reference <https://developer.cisco.com/docs/apic-mim-ref/>`_
+ Complete reference of the APIC object model.
+ `APIC REST API Configuration Guide <https://www.cisco.com/c/en/us/td/docs/switches/datacenter/aci/apic/sw/2-x/rest_cfg/2_1_x/b_Cisco_APIC_REST_API_Configuration_Guide.html>`_
+ Detailed guide on how the APIC REST API is designed and used, incl. many examples.
+
+
+So let's look at how a typical ACI module is built up.
+
+
+.. _aci_dev_guide_module_structure:
+
+ACI module structure
+====================
+
+Importing objects from Python libraries
+---------------------------------------
+The following imports are standard across ACI modules:
+
+.. code-block:: python
+
+ from ansible.module_utils.aci import ACIModule, aci_argument_spec
+ from ansible.module_utils.basic import AnsibleModule
+
+
+Defining the argument spec
+--------------------------
+The first line adds the standard connection parameters to the module. After that, the next section will update the ``argument_spec`` dictionary with module-specific parameters. The module-specific parameters should include:
+
+* the object_id (usually the name)
+* the configurable properties of the object
+* the parent object IDs (all parents up to the root)
+* only child classes that are a 1-to-1 relationship (1-to-many/many-to-many require their own module to properly manage)
+* the state
+
+ + ``state: absent`` to ensure object does not exist
+ + ``state: present`` to ensure the object and configs exist; this is also the default
+ + ``state: query`` to retrieve information about objects in the class
+
+.. code-block:: python
+
+ def main():
+ argument_spec = aci_argument_spec()
+ argument_spec.update(
+ object_id=dict(type='str', aliases=['name']),
+ object_prop1=dict(type='str'),
+ object_prop2=dict(type='str', choices=['choice1', 'choice2', 'choice3']),
+ object_prop3=dict(type='int'),
+ parent_id=dict(type='str'),
+ child_object_id=dict(type='str'),
+ child_object_prop=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
+ )
+
+
+.. hint:: Do not provide default values for configuration arguments. Default values could cause unintended changes to the object.
+
+Using the AnsibleModule object
+------------------------------
+The following section creates an AnsibleModule instance. The module should support check-mode, so we pass the ``argument_spec`` and ``supports_check_mode`` arguments. Since these modules support querying the APIC for all objects of the module's class, the object/parent IDs should only be required if ``state: absent`` or ``state: present``.
+
+.. code-block:: python
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[
+ ['state', 'absent', ['object_id', 'parent_id']],
+ ['state', 'present', ['object_id', 'parent_id']],
+ ],
+ )
+
+
+Mapping variable definition
+---------------------------
+Once the AnsibleModule object has been initiated, the necessary parameter values should be extracted from ``params`` and any data validation should be done. Usually the only params that need to be extracted are those related to the ACI object configuration and its child configuration. If you have integer objects that you would like to validate, then the validation should be done here, and the ``ACIModule.payload()`` method will handle the string conversion.
+
+.. code-block:: python
+
+ object_id = object_id
+ object_prop1 = module.params['object_prop1']
+ object_prop2 = module.params['object_prop2']
+ object_prop3 = module.params['object_prop3']
+ if object_prop3 is not None and object_prop3 not in range(x, y):
+ module.fail_json(msg='Valid object_prop3 values are between x and (y-1)')
+ child_object_id = module.params[' child_objec_id']
+ child_object_prop = module.params['child_object_prop']
+ state = module.params['state']
+
+
+Using the ACIModule object
+--------------------------
+The ACIModule class handles most of the logic for the ACI modules. The ACIModule extends functionality to the AnsibleModule object, so the module instance must be passed into the class instantiation.
+
+.. code-block:: python
+
+ aci = ACIModule(module)
+
+The ACIModule has six main methods that are used by the modules:
+
+* construct_url
+* get_existing
+* payload
+* get_diff
+* post_config
+* delete_config
+
+The first two methods are used regardless of what value is passed to the ``state`` parameter.
+
+Constructing URLs
+^^^^^^^^^^^^^^^^^
+The ``construct_url()`` method is used to dynamically build the appropriate URL to interact with the object, and the appropriate filter string that should be appended to the URL to filter the results.
+
+* When the ``state`` is not ``query``, the URL is the base URL to access the APIC plus the distinguished name to access the object. The filter string will restrict the returned data to just the configuration data.
+* When ``state`` is ``query``, the URL and filter string used depends on what parameters are passed to the object. This method handles the complexity so that it is easier to add new modules and so that all modules are consistent in what type of data is returned.
+
+.. note:: Our design goal is to take all ID parameters that have values, and return the most specific data possible. If you do not supply any ID parameters to the task, then all objects of the class will be returned. If your task does consist of ID parameters sed, then the data for the specific object is returned. If a partial set of ID parameters are passed, then the module will use the IDs that are passed to build the URL and filter strings appropriately.
+
+The ``construct_url()`` method takes 2 required arguments:
+
+* **self** - passed automatically with the class instance
+* **root_class** - A dictionary consisting of ``aci_class``, ``aci_rn``, ``target_filter``, and ``module_object`` keys
+
+ + **aci_class**: The name of the class used by the APIC, for example ``fvTenant``
+
+ + **aci_rn**: The relative name of the object, for example ``tn-ACME``
+
+ + **target_filter**: A dictionary with key-value pairs that make up the query string for selecting a subset of entries, for example ``{'name': 'ACME'}``
+
+ + **module_object**: The particular object for this class, for example ``ACME``
+
+Example:
+
+.. code-block:: python
+
+ aci.construct_url(
+ root_class=dict(
+ aci_class='fvTenant',
+ aci_rn='tn-{0}'.format(tenant),
+ target_filter={'name': tenant},
+ module_object=tenant,
+ ),
+ )
+
+Some modules, like ``aci_tenant``, are the root class and so they would not need to pass any additional arguments to the method.
+
+The ``construct_url()`` method takes 4 optional arguments, the first three imitate the root class as described above, but are for child objects:
+
+* subclass_1 - A dictionary consisting of ``aci_class``, ``aci_rn``, ``target_filter``, and ``module_object`` keys
+
+ + Example: Application Profile Class (AP)
+
+* subclass_2 - A dictionary consisting of ``aci_class``, ``aci_rn``, ``target_filter``, and ``module_object`` keys
+
+ + Example: End Point Group (EPG)
+
+* subclass_3 - A dictionary consisting of ``aci_class``, ``aci_rn``, ``target_filter``, and ``module_object`` keys
+
+ + Example: Binding a Contract to an EPG
+
+* child_classes - The list of APIC names for the child classes supported by the modules.
+
+ + This is a list, even if it is a list of one
+ + These are the unfriendly names used by the APIC
+ + These are used to limit the returned child_classes when possible
+ + Example: ``child_classes=['fvRsBDSubnetToProfile', 'fvRsNdPfxPol']``
+
+.. note:: Sometimes the APIC will require special characters ([, ], and -) or will use object metadata in the name ("vlanns" for VLAN pools); the module should handle adding special characters or joining of multiple parameters in order to keep expected inputs simple.
+
+Getting the existing configuration
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Once the URL and filter string have been built, the module is ready to retrieve the existing configuration for the object:
+
+* ``state: present`` retrieves the configuration to use as a comparison against what was entered in the task. All values that are different than the existing values will be updated.
+* ``state: absent`` uses the existing configuration to see if the item exists and needs to be deleted.
+* ``state: query`` uses this to perform the query for the task and report back the existing data.
+
+.. code-block:: python
+
+ aci.get_existing()
+
+
+When state is present
+^^^^^^^^^^^^^^^^^^^^^
+When ``state: present``, the module needs to perform a diff against the existing configuration and the task entries. If any value needs to be updated, then the module will make a POST request with only the items that need to be updated. Some modules have children that are in a 1-to-1 relationship with another object; for these cases, the module can be used to manage the child objects.
+
+Building the ACI payload
+""""""""""""""""""""""""
+The ``aci.payload()`` method is used to build a dictionary of the proposed object configuration. All parameters that were not provided a value in the task will be removed from the dictionary (both for the object and its children). Any parameter that does have a value will be converted to a string and added to the final dictionary object that will be used for comparison against the existing configuration.
+
+The ``aci.payload()`` method takes two required arguments and 1 optional argument, depending on if the module manages child objects.
+
+* ``aci_class`` is the APIC name for the object's class, for example ``aci_class='fvBD'``
+* ``class_config`` is the appropriate dictionary to be used as the payload for the POST request
+
+ + The keys should match the names used by the APIC.
+ + The values should be the corresponding value in ``module.params``; these are the variables defined above
+
+* ``child_configs`` is optional, and is a list of child config dictionaries.
+
+ + The child configs include the full child object dictionary, not just the attributes configuration portion.
+ + The configuration portion is built the same way as the object.
+
+.. code-block:: python
+
+ aci.payload(
+ aci_class=aci_class,
+ class_config=dict(
+ name=bd,
+ descr=description,
+ type=bd_type,
+ ),
+ child_configs=[
+ dict(
+ fvRsCtx=dict(
+ attributes=dict(
+ tnFvCtxName=vrf
+ ),
+ ),
+ ),
+ ],
+ )
+
+
+Performing the request
+""""""""""""""""""""""
+The ``get_diff()`` method is used to perform the diff, and takes only one required argument, ``aci_class``.
+Example: ``aci.get_diff(aci_class='fvBD')``
+
+The ``post_config()`` method is used to make the POST request to the APIC if needed. This method doesn't take any arguments and handles check mode.
+Example: ``aci.post_config()``
+
+
+Example code
+""""""""""""
+.. code-block:: text
+
+ if state == 'present':
+ aci.payload(
+ aci_class='<object APIC class>',
+ class_config=dict(
+ name=object_id,
+ prop1=object_prop1,
+ prop2=object_prop2,
+ prop3=object_prop3,
+ ),
+ child_configs=[
+ dict(
+ '<child APIC class>'=dict(
+ attributes=dict(
+ child_key=child_object_id,
+ child_prop=child_object_prop
+ ),
+ ),
+ ),
+ ],
+ )
+
+ aci.get_diff(aci_class='<object APIC class>')
+
+ aci.post_config()
+
+
+When state is absent
+^^^^^^^^^^^^^^^^^^^^
+If the task sets the state to absent, then the ``delete_config()`` method is all that is needed. This method does not take any arguments, and handles check mode.
+
+.. code-block:: text
+
+ elif state == 'absent':
+ aci.delete_config()
+
+
+Exiting the module
+^^^^^^^^^^^^^^^^^^
+To have the module exit, call the ACIModule method ``exit_json()``. This method automatically takes care of returning the common return values for you.
+
+.. code-block:: text
+
+ aci.exit_json()
+
+ if __name__ == '__main__':
+ main()
+
+
+.. _aci_dev_guide_testing:
+
+Testing ACI library functions
+=============================
+You can test your ``construct_url()`` and ``payload()`` arguments without accessing APIC hardware by using the following python script:
+
+.. code-block:: text
+
+ #!/usr/bin/python
+ import json
+ from ansible.module_utils.network.aci.aci import ACIModule
+
+ # Just another class mimicing a bare AnsibleModule class for construct_url() and payload() methods
+ class AltModule():
+ params = dict(
+ host='dummy',
+ port=123,
+ protocol='https',
+ state='present',
+ output_level='debug',
+ )
+
+ # A sub-class of ACIModule to overload __init__ (we don't need to log into APIC)
+ class AltACIModule(ACIModule):
+ def __init__(self):
+ self.result = dict(changed=False)
+ self.module = AltModule()
+ self.params = self.module.params
+
+ # Instantiate our version of the ACI module
+ aci = AltACIModule()
+
+ # Define the variables you need below
+ aep = 'AEP'
+ aep_domain = 'uni/phys-DOMAIN'
+
+ # Below test the construct_url() arguments to see if it produced correct results
+ aci.construct_url(
+ root_class=dict(
+ aci_class='infraAttEntityP',
+ aci_rn='infra/attentp-{}'.format(aep),
+ target_filter={'name': aep},
+ module_object=aep,
+ ),
+ subclass_1=dict(
+ aci_class='infraRsDomP',
+ aci_rn='rsdomP-[{}]'.format(aep_domain),
+ target_filter={'tDn': aep_domain},
+ module_object=aep_domain,
+ ),
+ )
+
+ # Below test the payload arguments to see if it produced correct results
+ aci.payload(
+ aci_class='infraRsDomP',
+ class_config=dict(tDn=aep_domain),
+ )
+
+ # Print the URL and proposed payload
+ print 'URL:', json.dumps(aci.url, indent=4)
+ print 'PAYLOAD:', json.dumps(aci.proposed, indent=4)
+
+
+This will result in:
+
+.. code-block:: yaml
+
+ URL: "https://dummy/api/mo/uni/infra/attentp-AEP/rsdomP-[phys-DOMAIN].json"
+ PAYLOAD: {
+ "infraRsDomP": {
+ "attributes": {
+ "tDn": "phys-DOMAIN"
+ }
+ }
+ }
+
+Testing for sanity checks
+-------------------------
+You can run from your fork something like:
+
+.. code-block:: bash
+
+ $ ansible-test sanity --python 2.7 lib/ansible/modules/network/aci/aci_tenant.py
+
+.. seealso::
+
+ :ref:`testing_sanity`
+ Information on how to build sanity tests.
+
+
+Testing ACI integration tests
+-----------------------------
+You can run this:
+
+.. code-block:: bash
+
+ $ ansible-test network-integration --continue-on-error --allow-unsupported --diff -v aci_tenant
+
+.. note:: You may need to add ``--python 2.7`` or ``--python 3.6`` in order to use the correct python version for performing tests.
+
+You may want to edit the used inventory at *test/integration/inventory.networking* and add something like:
+
+.. code-block:: ini
+
+ [aci:vars]
+ aci_hostname=my-apic-1
+ aci_username=admin
+ aci_password=my-password
+ aci_use_ssl=yes
+ aci_use_proxy=no
+
+ [aci]
+ localhost ansible_ssh_host=127.0.0.1 ansible_connection=local
+
+.. seealso::
+
+ :ref:`testing_integration`
+ Information on how to build integration tests.
+
+
+Testing for test coverage
+-------------------------
+You can run this:
+
+.. code-block:: bash
+
+ $ ansible-test network-integration --python 2.7 --allow-unsupported --coverage aci_tenant
+ $ ansible-test coverage report
diff --git a/docs/docsite/rst/dev_guide/developing_modules_general_windows.rst b/docs/docsite/rst/dev_guide/developing_modules_general_windows.rst
new file mode 100644
index 00000000..3dd66c2e
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/developing_modules_general_windows.rst
@@ -0,0 +1,696 @@
+.. _developing_modules_general_windows:
+
+**************************************
+Windows module development walkthrough
+**************************************
+
+In this section, we will walk through developing, testing, and debugging an
+Ansible Windows module.
+
+Because Windows modules are written in Powershell and need to be run on a
+Windows host, this guide differs from the usual development walkthrough guide.
+
+What's covered in this section:
+
+.. contents::
+ :local:
+
+
+Windows environment setup
+=========================
+
+Unlike Python module development which can be run on the host that runs
+Ansible, Windows modules need to be written and tested for Windows hosts.
+While evaluation editions of Windows can be downloaded from
+Microsoft, these images are usually not ready to be used by Ansible without
+further modification. The easiest way to set up a Windows host so that it is
+ready to by used by Ansible is to set up a virtual machine using Vagrant.
+Vagrant can be used to download existing OS images called *boxes* that are then
+deployed to a hypervisor like VirtualBox. These boxes can either be created and
+stored offline or they can be downloaded from a central repository called
+Vagrant Cloud.
+
+This guide will use the Vagrant boxes created by the `packer-windoze <https://github.com/jborean93/packer-windoze>`_
+repository which have also been uploaded to `Vagrant Cloud <https://app.vagrantup.com/boxes/search?utf8=%E2%9C%93&sort=downloads&provider=&q=jborean93>`_.
+To find out more info on how these images are created, please go to the GitHub
+repo and look at the ``README`` file.
+
+Before you can get started, the following programs must be installed (please consult the Vagrant and
+VirtualBox documentation for installation instructions):
+
+- Vagrant
+- VirtualBox
+
+Create a Windows server in a VM
+===============================
+
+To create a single Windows Server 2016 instance, run the following:
+
+.. code-block:: shell
+
+ vagrant init jborean93/WindowsServer2016
+ vagrant up
+
+This will download the Vagrant box from Vagrant Cloud and add it to the local
+boxes on your host and then start up that instance in VirtualBox. When starting
+for the first time, the Windows VM will run through the sysprep process and
+then create a HTTP and HTTPS WinRM listener automatically. Vagrant will finish
+its process once the listeners are online, after which the VM can be used by Ansible.
+
+Create an Ansible inventory
+===========================
+
+The following Ansible inventory file can be used to connect to the newly
+created Windows VM:
+
+.. code-block:: ini
+
+ [windows]
+ WindowsServer ansible_host=127.0.0.1
+
+ [windows:vars]
+ ansible_user=vagrant
+ ansible_password=vagrant
+ ansible_port=55986
+ ansible_connection=winrm
+ ansible_winrm_transport=ntlm
+ ansible_winrm_server_cert_validation=ignore
+
+.. note:: The port ``55986`` is automatically forwarded by Vagrant to the
+ Windows host that was created, if this conflicts with an existing local
+ port then Vagrant will automatically use another one at random and display
+ show that in the output.
+
+The OS that is created is based on the image set. The following
+images can be used:
+
+- `jborean93/WindowsServer2008-x86 <https://app.vagrantup.com/jborean93/boxes/WindowsServer2008-x86>`_
+- `jborean93/WindowsServer2008-x64 <https://app.vagrantup.com/jborean93/boxes/WindowsServer2008-x64>`_
+- `jborean93/WindowsServer2008R2 <https://app.vagrantup.com/jborean93/boxes/WindowsServer2008R2>`_
+- `jborean93/WindowsServer2012 <https://app.vagrantup.com/jborean93/boxes/WindowsServer2012>`_
+- `jborean93/WindowsServer2012R2 <https://app.vagrantup.com/jborean93/boxes/WindowsServer2012R2>`_
+- `jborean93/WindowsServer2016 <https://app.vagrantup.com/jborean93/boxes/WindowsServer2016>`_
+
+When the host is online, it can accessible by RDP on ``127.0.0.1:3389`` but the
+port may differ depending if there was a conflict. To get rid of the host, run
+``vagrant destroy --force`` and Vagrant will automatically remove the VM and
+any other files associated with that VM.
+
+While this is useful when testing modules on a single Windows instance, these
+host won't work without modification with domain based modules. The Vagrantfile
+at `ansible-windows <https://github.com/jborean93/ansible-windows/tree/master/vagrant>`_
+can be used to create a test domain environment to be used in Ansible. This
+repo contains three files which are used by both Ansible and Vagrant to create
+multiple Windows hosts in a domain environment. These files are:
+
+- ``Vagrantfile``: The Vagrant file that reads the inventory setup of ``inventory.yml`` and provisions the hosts that are required
+- ``inventory.yml``: Contains the hosts that are required and other connection information such as IP addresses and forwarded ports
+- ``main.yml``: Ansible playbook called by Vagrant to provision the domain controller and join the child hosts to the domain
+
+By default, these files will create the following environment:
+
+- A single domain controller running on Windows Server 2016
+- Five child hosts for each major Windows Server version joined to that domain
+- A domain with the DNS name ``domain.local``
+- A local administrator account on each host with the username ``vagrant`` and password ``vagrant``
+- A domain admin account ``vagrant-domain@domain.local`` with the password ``VagrantPass1``
+
+The domain name and accounts can be modified by changing the variables
+``domain_*`` in the ``inventory.yml`` file if it is required. The inventory
+file can also be modified to provision more or less servers by changing the
+hosts that are defined under the ``domain_children`` key. The host variable
+``ansible_host`` is the private IP that will be assigned to the VirtualBox host
+only network adapter while ``vagrant_box`` is the box that will be used to
+create the VM.
+
+Provisioning the environment
+============================
+
+To provision the environment as is, run the following:
+
+.. code-block:: shell
+
+ git clone https://github.com/jborean93/ansible-windows.git
+ cd vagrant
+ vagrant up
+
+.. note:: Vagrant provisions each host sequentially so this can take some time
+ to complete. If any errors occur during the Ansible phase of setting up the
+ domain, run ``vagrant provision`` to rerun just that step.
+
+Unlike setting up a single Windows instance with Vagrant, these hosts can also
+be accessed using the IP address directly as well as through the forwarded
+ports. It is easier to access it over the host only network adapter as the
+normal protocol ports are used, for example RDP is still over ``3389``. In cases where
+the host cannot be resolved using the host only network IP, the following
+protocols can be access over ``127.0.0.1`` using these forwarded ports:
+
+- ``RDP``: 295xx
+- ``SSH``: 296xx
+- ``WinRM HTTP``: 297xx
+- ``WinRM HTTPS``: 298xx
+- ``SMB``: 299xx
+
+Replace ``xx`` with the entry number in the inventory file where the domain
+controller started with ``00`` and is incremented from there. For example, in
+the default ``inventory.yml`` file, WinRM over HTTPS for ``SERVER2012R2`` is
+forwarded over port ``29804`` as it's the fourth entry in ``domain_children``.
+
+.. note:: While an SSH server is available on all Windows hosts but Server
+ 2008 (non R2), it is not a support connection for Ansible managing Windows
+ hosts and should not be used with Ansible.
+
+Windows new module development
+==============================
+
+When creating a new module there are a few things to keep in mind:
+
+- Module code is in Powershell (.ps1) files while the documentation is contained in Python (.py) files of the same name
+- Avoid using ``Write-Host/Debug/Verbose/Error`` in the module and add what needs to be returned to the ``$module.Result`` variable
+- To fail a module, call ``$module.FailJson("failure message here")``, an Exception or ErrorRecord can be set to the second argument for a more descriptive error message
+- You can pass in the exception or ErrorRecord as a second argument to ``FailJson("failure", $_)`` to get a more detailed output
+- Most new modules require check mode and integration tests before they are merged into the main Ansible codebase
+- Avoid using try/catch statements over a large code block, rather use them for individual calls so the error message can be more descriptive
+- Try and catch specific exceptions when using try/catch statements
+- Avoid using PSCustomObjects unless necessary
+- Look for common functions in ``./lib/ansible/module_utils/powershell/`` and use the code there instead of duplicating work. These can be imported by adding the line ``#Requires -Module *`` where * is the filename to import, and will be automatically included with the module code sent to the Windows target when run via Ansible
+- As well as PowerShell module utils, C# module utils are stored in ``./lib/ansible/module_utils/csharp/`` and are automatically imported in a module execution if the line ``#AnsibleRequires -CSharpUtil *`` is present
+- C# and PowerShell module utils achieve the same goal but C# allows a developer to implement low level tasks, such as calling the Win32 API, and can be faster in some cases
+- Ensure the code runs under Powershell v3 and higher on Windows Server 2008 and higher; if higher minimum Powershell or OS versions are required, ensure the documentation reflects this clearly
+- Ansible runs modules under strictmode version 2.0. Be sure to test with that enabled by putting ``Set-StrictMode -Version 2.0`` at the top of your dev script
+- Favor native Powershell cmdlets over executable calls if possible
+- Use the full cmdlet name instead of aliases, for example ``Remove-Item`` over ``rm``
+- Use named parameters with cmdlets, for example ``Remove-Item -Path C:\temp`` over ``Remove-Item C:\temp``
+
+A very basic Powershell module `win_environment <https://github.com/ansible-collections/ansible.windows/blob/master/plugins/modules/win_environment.ps1>`_ incorporates best practices for Powershell modules. It demonstrates how to implement check-mode and diff-support, and also shows a warning to the user when a specific condition is met.
+
+A slightly more advanced module is `win_uri <https://github.com/ansible-collections/ansible.windows/blob/master/plugins/modules/win_uri.ps1>`_ which additionally shows how to use different parameter types (bool, str, int, list, dict, path) and a selection of choices for parameters, how to fail a module and how to handle exceptions.
+
+As part of the new ``AnsibleModule`` wrapper, the input parameters are defined and validated based on an argument
+spec. The following options can be set at the root level of the argument spec:
+
+- ``mutually_exclusive``: A list of lists, where the inner list contains module options that cannot be set together
+- ``no_log``: Stops the module from emitting any logs to the Windows Event log
+- ``options``: A dictionary where the key is the module option and the value is the spec for that option
+- ``required_by``: A dictionary where the option(s) specified by the value must be set if the option specified by the key is also set
+- ``required_if``: A list of lists where the inner list contains 3 or 4 elements;
+ * The first element is the module option to check the value against
+ * The second element is the value of the option specified by the first element, if matched then the required if check is run
+ * The third element is a list of required module options when the above is matched
+ * An optional fourth element is a boolean that states whether all module options in the third elements are required (default: ``$false``) or only one (``$true``)
+- ``required_one_of``: A list of lists, where the inner list contains module options where at least one must be set
+- ``required_together``: A list of lists, where the inner list contains module options that must be set together
+- ``supports_check_mode``: Whether the module supports check mode, by default this is ``$false``
+
+The actual input options for a module are set within the ``options`` value as a dictionary. The keys of this dictionary
+are the module option names while the values are the spec of that module option. Each spec can have the following
+options set:
+
+- ``aliases``: A list of aliases for the module option
+- ``choices``: A list of valid values for the module option, if ``type=list`` then each list value is validated against the choices and not the list itself
+- ``default``: The default value for the module option if not set
+- ``deprecated_aliases``: A list of hashtables that define aliases that are deprecated and the versions they will be removed in. Each entry must contain the keys ``name`` and ``collection_name`` with either ``version`` or ``date``
+- ``elements``: When ``type=list``, this sets the type of each list value, the values are the same as ``type``
+- ``no_log``: Will sanitise the input value before being returned in the ``module_invocation`` return value
+- ``removed_in_version``: States when a deprecated module option is to be removed, a warning is displayed to the end user if set
+- ``removed_at_date``: States the date (YYYY-MM-DD) when a deprecated module option will be removed, a warning is displayed to the end user if set
+- ``removed_from_collection``: States from which collection the deprecated module option will be removed; must be specified if one of ``removed_in_version`` and ``removed_at_date`` is specified
+- ``required``: Will fail when the module option is not set
+- ``type``: The type of the module option, if not set then it defaults to ``str``. The valid types are;
+ * ``bool``: A boolean value
+ * ``dict``: A dictionary value, if the input is a JSON or key=value string then it is converted to dictionary
+ * ``float``: A float or `Single <https://docs.microsoft.com/en-us/dotnet/api/system.single?view=netframework-4.7.2>`_ value
+ * ``int``: An Int32 value
+ * ``json``: A string where the value is converted to a JSON string if the input is a dictionary
+ * ``list``: A list of values, ``elements=<type>`` can convert the individual list value types if set. If ``elements=dict`` then ``options`` is defined, the values will be validated against the argument spec. When the input is a string then the string is split by ``,`` and any whitespace is trimmed
+ * ``path``: A string where values likes ``%TEMP%`` are expanded based on environment values. If the input value starts with ``\\?\`` then no expansion is run
+ * ``raw``: No conversions occur on the value passed in by Ansible
+ * ``sid``: Will convert Windows security identifier values or Windows account names to a `SecurityIdentifier <https://docs.microsoft.com/en-us/dotnet/api/system.security.principal.securityidentifier?view=netframework-4.7.2>`_ value
+ * ``str``: The value is converted to a string
+
+When ``type=dict``, or ``type=list`` and ``elements=dict``, the following keys can also be set for that module option:
+
+- ``apply_defaults``: The value is based on the ``options`` spec defaults for that key if ``True`` and null if ``False``. Only valid when the module option is not defined by the user and ``type=dict``.
+- ``mutually_exclusive``: Same as the root level ``mutually_exclusive`` but validated against the values in the sub dict
+- ``options``: Same as the root level ``options`` but contains the valid options for the sub option
+- ``required_if``: Same as the root level ``required_if`` but validated against the values in the sub dict
+- ``required_by``: Same as the root level ``required_by`` but validated against the values in the sub dict
+- ``required_together``: Same as the root level ``required_together`` but validated against the values in the sub dict
+- ``required_one_of``: Same as the root level ``required_one_of`` but validated against the values in the sub dict
+
+A module type can also be a delegate function that converts the value to whatever is required by the module option. For
+example the following snippet shows how to create a custom type that creates a ``UInt64`` value:
+
+.. code-block:: powershell
+
+ $spec = @{
+ uint64_type = @{ type = [Func[[Object], [UInt64]]]{ [System.UInt64]::Parse($args[0]) } }
+ }
+ $uint64_type = $module.Params.uint64_type
+
+When in doubt, look at some of the other core modules and see how things have been
+implemented there.
+
+Sometimes there are multiple ways that Windows offers to complete a task; this
+is the order to favor when writing modules:
+
+- Native Powershell cmdlets like ``Remove-Item -Path C:\temp -Recurse``
+- .NET classes like ``[System.IO.Path]::GetRandomFileName()``
+- WMI objects through the ``New-CimInstance`` cmdlet
+- COM objects through ``New-Object -ComObject`` cmdlet
+- Calls to native executables like ``Secedit.exe``
+
+PowerShell modules support a small subset of the ``#Requires`` options built
+into PowerShell as well as some Ansible-specific requirements specified by
+``#AnsibleRequires``. These statements can be placed at any point in the script,
+but are most commonly near the top. They are used to make it easier to state the
+requirements of the module without writing any of the checks. Each ``requires``
+statement must be on its own line, but there can be multiple requires statements
+in one script.
+
+These are the checks that can be used within Ansible modules:
+
+- ``#Requires -Module Ansible.ModuleUtils.<module_util>``: Added in Ansible 2.4, specifies a module_util to load in for the module execution.
+- ``#Requires -Version x.y``: Added in Ansible 2.5, specifies the version of PowerShell that is required by the module. The module will fail if this requirement is not met.
+- ``#AnsibleRequires -OSVersion x.y``: Added in Ansible 2.5, specifies the OS build version that is required by the module and will fail if this requirement is not met. The actual OS version is derived from ``[Environment]::OSVersion.Version``.
+- ``#AnsibleRequires -Become``: Added in Ansible 2.5, forces the exec runner to run the module with ``become``, which is primarily used to bypass WinRM restrictions. If ``ansible_become_user`` is not specified then the ``SYSTEM`` account is used instead.
+- ``#AnsibleRequires -CSharpUtil Ansible.<module_util>``: Added in Ansible 2.8, specifies a C# module_util to load in for the module execution.
+
+C# module utils can reference other C# utils by adding the line
+``using Ansible.<module_util>;`` to the top of the script with all the other
+using statements.
+
+
+Windows module utilities
+========================
+
+Like Python modules, PowerShell modules also provide a number of module
+utilities that provide helper functions within PowerShell. These module_utils
+can be imported by adding the following line to a PowerShell module:
+
+.. code-block:: powershell
+
+ #Requires -Module Ansible.ModuleUtils.Legacy
+
+This will import the module_util at ``./lib/ansible/module_utils/powershell/Ansible.ModuleUtils.Legacy.psm1``
+and enable calling all of its functions. As of Ansible 2.8, Windows module
+utils can also be written in C# and stored at ``lib/ansible/module_utils/csharp``.
+These module_utils can be imported by adding the following line to a PowerShell
+module:
+
+.. code-block:: powershell
+
+ #AnsibleRequires -CSharpUtil Ansible.Basic
+
+This will import the module_util at ``./lib/ansible/module_utils/csharp/Ansible.Basic.cs``
+and automatically load the types in the executing process. C# module utils can
+reference each other and be loaded together by adding the following line to the
+using statements at the top of the util:
+
+.. code-block:: csharp
+
+ using Ansible.Become;
+
+There are special comments that can be set in a C# file for controlling the
+compilation parameters. The following comments can be added to the script;
+
+- ``//AssemblyReference -Name <assembly dll> [-CLR [Core|Framework]]``: The assembly DLL to reference during compilation, the optional ``-CLR`` flag can also be used to state whether to reference when running under .NET Core, Framework, or both (if omitted)
+- ``//NoWarn -Name <error id> [-CLR [Core|Framework]]``: A compiler warning ID to ignore when compiling the code, the optional ``-CLR`` works the same as above. A list of warnings can be found at `Compiler errors <https://docs.microsoft.com/en-us/dotnet/csharp/language-reference/compiler-messages/index>`_
+
+As well as this, the following pre-processor symbols are defined;
+
+- ``CORECLR``: This symbol is present when PowerShell is running through .NET Core
+- ``WINDOWS``: This symbol is present when PowerShell is running on Windows
+- ``UNIX``: This symbol is present when PowerShell is running on Unix
+
+A combination of these flags help to make a module util interoperable on both
+.NET Framework and .NET Core, here is an example of them in action:
+
+.. code-block:: csharp
+
+ #if CORECLR
+ using Newtonsoft.Json;
+ #else
+ using System.Web.Script.Serialization;
+ #endif
+
+ //AssemblyReference -Name Newtonsoft.Json.dll -CLR Core
+ //AssemblyReference -Name System.Web.Extensions.dll -CLR Framework
+
+ // Ignore error CS1702 for all .NET types
+ //NoWarn -Name CS1702
+
+ // Ignore error CS1956 only for .NET Framework
+ //NoWarn -Name CS1956 -CLR Framework
+
+
+The following is a list of module_utils that are packaged with Ansible and a general description of what
+they do:
+
+- ArgvParser: Utiliy used to convert a list of arguments to an escaped string compliant with the Windows argument parsing rules.
+- CamelConversion: Utility used to convert camelCase strings/lists/dicts to snake_case.
+- CommandUtil: Utility used to execute a Windows process and return the stdout/stderr and rc as separate objects.
+- FileUtil: Utility that expands on the ``Get-ChildItem`` and ``Test-Path`` to work with special files like ``C:\pagefile.sys``.
+- Legacy: General definitions and helper utilities for Ansible module.
+- LinkUtil: Utility to create, remove, and get information about symbolic links, junction points and hard inks.
+- SID: Utilities used to convert a user or group to a Windows SID and vice versa.
+
+For more details on any specific module utility and their requirements, please see the `Ansible
+module utilities source code <https://github.com/ansible/ansible/tree/devel/lib/ansible/module_utils/powershell>`_.
+
+PowerShell module utilities can be stored outside of the standard Ansible
+distribution for use with custom modules. Custom module_utils are placed in a
+folder called ``module_utils`` located in the root folder of the playbook or role
+directory.
+
+C# module utilities can also be stored outside of the standard Ansible distribution for use with custom modules. Like
+PowerShell utils, these are stored in a folder called ``module_utils`` and the filename must end in the extension
+``.cs``, start with ``Ansible.`` and be named after the namespace defined in the util.
+
+The below example is a role structure that contains two PowerShell custom module_utils called
+``Ansible.ModuleUtils.ModuleUtil1``, ``Ansible.ModuleUtils.ModuleUtil2``, and a C# util containing the namespace
+``Ansible.CustomUtil``::
+
+ meta/
+ main.yml
+ defaults/
+ main.yml
+ module_utils/
+ Ansible.ModuleUtils.ModuleUtil1.psm1
+ Ansible.ModuleUtils.ModuleUtil2.psm1
+ Ansible.CustomUtil.cs
+ tasks/
+ main.yml
+
+Each PowerShell module_util must contain at least one function that has been exported with ``Export-ModuleMember``
+at the end of the file. For example
+
+.. code-block:: powershell
+
+ Export-ModuleMember -Function Invoke-CustomUtil, Get-CustomInfo
+
+
+Exposing shared module options
+++++++++++++++++++++++++++++++
+
+PowerShell module utils can easily expose common module options that a module can use when building its argument spec.
+This allows common features to be stored and maintained in one location and have those features used by multiple
+modules with minimal effort. Any new features or bugifxes added to one of these utils are then automatically used by
+the various modules that call that util.
+
+An example of this would be to have a module util that handles authentication and communication against an API This
+util can be used by multiple modules to expose a common set of module options like the API endpoint, username,
+password, timeout, cert validation, and so on without having to add those options to each module spec.
+
+The standard convention for a module util that has a shared argument spec would have
+
+- A ``Get-<namespace.name.util name>Spec`` function that outputs the common spec for a module
+ * It is highly recommended to make this function name be unique to the module to avoid any conflicts with other utils that can be loaded
+ * The format of the output spec is a Hashtable in the same format as the ``$spec`` used for normal modules
+- A function that takes in an ``AnsibleModule`` object called under the ``-Module`` parameter which it can use to get the shared options
+
+Because these options can be shared across various module it is highly recommended to keep the module option names and
+aliases in the shared spec as specific as they can be. For example do not have a util option called ``password``,
+rather you should prefix it with a unique name like ``acme_password``.
+
+.. warning::
+ Failure to have a unique option name or alias can prevent the util being used by module that also use those names or
+ aliases for its own options.
+
+The following is an example module util called ``ServiceAuth.psm1`` in a collection that implements a common way for
+modules to authentication with a service.
+
+.. code-block:: powershell
+
+ Invoke-MyServiceResource {
+ [CmdletBinding()]
+ param (
+ [Parameter(Mandatory=$true)]
+ [ValidateScript({ $_.GetType().FullName -eq 'Ansible.Basic.AnsibleModule' })]
+ $Module,
+
+ [Parameter(Mandatory=$true)]
+ [String]
+ $ResourceId
+
+ [String]
+ $State = 'present'
+ )
+
+ # Process the common module options known to the util
+ $params = @{
+ ServerUri = $Module.Params.my_service_url
+ }
+ if ($Module.Params.my_service_username) {
+ $params.Credential = Get-MyServiceCredential
+ }
+
+ if ($State -eq 'absent') {
+ Remove-MyService @params -ResourceId $ResourceId
+ } else {
+ New-MyService @params -ResourceId $ResourceId
+ }
+ }
+
+ Get-MyNamespaceMyCollectionServiceAuthSpec {
+ # Output the util spec
+ @{
+ options = @{
+ my_service_url = @{ type = 'str'; required = $true }
+ my_service_username = @{ type = 'str' }
+ my_service_password = @{ type = 'str'; no_log = $true }
+ }
+
+ required_together = @(
+ ,@('my_service_username', 'my_service_password')
+ )
+ }
+ }
+
+ $exportMembers = @{
+ Function = 'Get-MyNamespaceMyCollectionServiceAuthSpec', 'Invoke-MyServiceResource'
+ }
+ Export-ModuleMember @exportMembers
+
+
+For a module to take advantage of this common argument spec it can be set out like
+
+.. code-block:: powershell
+
+ #!powershell
+
+ # Include the module util ServiceAuth.psm1 from the my_namespace.my_collection collection
+ #AnsibleRequires -PowerShell ansible_collections.my_namespace.my_collection.plugins.module_utils.ServiceAuth
+
+ # Create the module spec like normal
+ $spec = @{
+ options = @{
+ resource_id = @{ type = 'str'; required = $true }
+ state = @{ type = 'str'; choices = 'absent', 'present' }
+ }
+ }
+
+ # Create the module from the module spec but also include the util spec to merge into our own.
+ $module = [Ansible.Basic.AnsibleModule]::Create($args, $spec, @(Get-MyNamespaceMyCollectionServiceAuthSpec))
+
+ # Call the ServiceAuth module util and pass in the module object so it can access the module options.
+ Invoke-MyServiceResource -Module $module -ResourceId $module.Params.resource_id -State $module.params.state
+
+ $module.ExitJson()
+
+
+.. note::
+ Options defined in the module spec will always have precedence over a util spec. Any list values under the same key
+ in a util spec will be appended to the module spec for that same key. Dictionary values will add any keys that are
+ missing from the module spec and merge any values that are lists or dictionaries. This is similar to how the doc
+ fragment plugins work when extending module documentation.
+
+To document these shared util options for a module, create a doc fragment plugin that documents the options implemented
+by the module util and extend the module docs for every module that implements the util to include that fragment in
+its docs.
+
+
+Windows playbook module testing
+===============================
+
+You can test a module with an Ansible playbook. For example:
+
+- Create a playbook in any directory ``touch testmodule.yml``.
+- Create an inventory file in the same directory ``touch hosts``.
+- Populate the inventory file with the variables required to connect to a Windows host(s).
+- Add the following to the new playbook file::
+
+ ---
+ - name: test out windows module
+ hosts: windows
+ tasks:
+ - name: test out module
+ win_module:
+ name: test name
+
+- Run the playbook ``ansible-playbook -i hosts testmodule.yml``
+
+This can be useful for seeing how Ansible runs with
+the new module end to end. Other possible ways to test the module are
+shown below.
+
+
+Windows debugging
+=================
+
+Debugging a module currently can only be done on a Windows host. This can be
+useful when developing a new module or implementing bug fixes. These
+are some steps that need to be followed to set this up:
+
+- Copy the module script to the Windows server
+- Copy the folders ``./lib/ansible/module_utils/powershell`` and ``./lib/ansible/module_utils/csharp`` to the same directory as the script above
+- Add an extra ``#`` to the start of any ``#Requires -Module`` lines in the module code, this is only required for any lines starting with ``#Requires -Module``
+- Add the following to the start of the module script that was copied to the server:
+
+.. code-block:: powershell
+
+ # Set $ErrorActionPreference to what's set during Ansible execution
+ $ErrorActionPreference = "Stop"
+
+ # Set the first argument as the path to a JSON file that contains the module args
+ $args = @("$($pwd.Path)\args.json")
+
+ # Or instead of an args file, set $complex_args to the pre-processed module args
+ $complex_args = @{
+ _ansible_check_mode = $false
+ _ansible_diff = $false
+ path = "C:\temp"
+ state = "present"
+ }
+
+ # Import any C# utils referenced with '#AnsibleRequires -CSharpUtil' or 'using Ansible.;
+ # The $_csharp_utils entries should be the context of the C# util files and not the path
+ Import-Module -Name "$($pwd.Path)\powershell\Ansible.ModuleUtils.AddType.psm1"
+ $_csharp_utils = @(
+ [System.IO.File]::ReadAllText("$($pwd.Path)\csharp\Ansible.Basic.cs")
+ )
+ Add-CSharpType -References $_csharp_utils -IncludeDebugInfo
+
+ # Import any PowerShell modules referenced with '#Requires -Module`
+ Import-Module -Name "$($pwd.Path)\powershell\Ansible.ModuleUtils.Legacy.psm1"
+
+ # End of the setup code and start of the module code
+ #!powershell
+
+You can add more args to ``$complex_args`` as required by the module or define the module options through a JSON file
+with the structure::
+
+ {
+ "ANSIBLE_MODULE_ARGS": {
+ "_ansible_check_mode": false,
+ "_ansible_diff": false,
+ "path": "C:\\temp",
+ "state": "present"
+ }
+ }
+
+There are multiple IDEs that can be used to debug a Powershell script, two of
+the most popular ones are
+
+- `Powershell ISE`_
+- `Visual Studio Code`_
+
+.. _Powershell ISE: https://docs.microsoft.com/en-us/powershell/scripting/core-powershell/ise/how-to-debug-scripts-in-windows-powershell-ise
+.. _Visual Studio Code: https://blogs.technet.microsoft.com/heyscriptingguy/2017/02/06/debugging-powershell-script-in-visual-studio-code-part-1/
+
+To be able to view the arguments as passed by Ansible to the module follow
+these steps.
+
+- Prefix the Ansible command with :envvar:`ANSIBLE_KEEP_REMOTE_FILES=1<ANSIBLE_KEEP_REMOTE_FILES>` to specify that Ansible should keep the exec files on the server.
+- Log onto the Windows server using the same user account that Ansible used to execute the module.
+- Navigate to ``%TEMP%\..``. It should contain a folder starting with ``ansible-tmp-``.
+- Inside this folder, open the PowerShell script for the module.
+- In this script is a raw JSON script under ``$json_raw`` which contains the module arguments under ``module_args``. These args can be assigned manually to the ``$complex_args`` variable that is defined on your debug script or put in the ``args.json`` file.
+
+
+Windows unit testing
+====================
+
+Currently there is no mechanism to run unit tests for Powershell modules under Ansible CI.
+
+
+Windows integration testing
+===========================
+
+Integration tests for Ansible modules are typically written as Ansible roles. These test
+roles are located in ``./test/integration/targets``. You must first set up your testing
+environment, and configure a test inventory for Ansible to connect to.
+
+In this example we will set up a test inventory to connect to two hosts and run the integration
+tests for win_stat:
+
+- Run the command ``source ./hacking/env-setup`` to prepare environment.
+- Create a copy of ``./test/integration/inventory.winrm.template`` and name it ``inventory.winrm``.
+- Fill in entries under ``[windows]`` and set the required variables that are needed to connect to the host.
+- :ref:`Install the required Python modules <windows_winrm>` to support WinRM and a configured authentication method.
+- To execute the integration tests, run ``ansible-test windows-integration win_stat``; you can replace ``win_stat`` with the role you want to test.
+
+This will execute all the tests currently defined for that role. You can set
+the verbosity level using the ``-v`` argument just as you would with
+ansible-playbook.
+
+When developing tests for a new module, it is recommended to test a scenario once in
+check mode and twice not in check mode. This ensures that check mode
+does not make any changes but reports a change, as well as that the second run is
+idempotent and does not report changes. For example:
+
+.. code-block:: yaml
+
+ - name: remove a file (check mode)
+ win_file:
+ path: C:\temp
+ state: absent
+ register: remove_file_check
+ check_mode: yes
+
+ - name: get result of remove a file (check mode)
+ win_command: powershell.exe "if (Test-Path -Path 'C:\temp') { 'true' } else { 'false' }"
+ register: remove_file_actual_check
+
+ - name: assert remove a file (check mode)
+ assert:
+ that:
+ - remove_file_check is changed
+ - remove_file_actual_check.stdout == 'true\r\n'
+
+ - name: remove a file
+ win_file:
+ path: C:\temp
+ state: absent
+ register: remove_file
+
+ - name: get result of remove a file
+ win_command: powershell.exe "if (Test-Path -Path 'C:\temp') { 'true' } else { 'false' }"
+ register: remove_file_actual
+
+ - name: assert remove a file
+ assert:
+ that:
+ - remove_file is changed
+ - remove_file_actual.stdout == 'false\r\n'
+
+ - name: remove a file (idempotent)
+ win_file:
+ path: C:\temp
+ state: absent
+ register: remove_file_again
+
+ - name: assert remove a file (idempotent)
+ assert:
+ that:
+ - not remove_file_again is changed
+
+
+Windows communication and development support
+=============================================
+
+Join the IRC channel ``#ansible-devel`` or ``#ansible-windows`` on freenode for
+discussions about Ansible development for Windows.
+
+For questions and discussions pertaining to using the Ansible product,
+use the ``#ansible`` channel.
diff --git a/docs/docsite/rst/dev_guide/developing_modules_in_groups.rst b/docs/docsite/rst/dev_guide/developing_modules_in_groups.rst
new file mode 100644
index 00000000..31a9ec9d
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/developing_modules_in_groups.rst
@@ -0,0 +1,80 @@
+.. _developing_modules_in_groups:
+
+*************************
+Creating a new collection
+*************************
+
+Starting with Ansible 2.10, related modules should be developed in a collection. The Ansible core team and community compiled these module development tips and tricks to help companies developing Ansible modules for their products and users developing Ansible modules for third-party products. See :ref:`developing_collections` for a more detailed description of the collections format and additional development guidelines.
+
+.. contents::
+ :local:
+
+.. include:: shared_snippets/licensing.txt
+
+Before you start coding
+=======================
+
+This list of prerequisites is designed to help ensure that you develop high-quality modules that work well with ansible-base and provide a seamless user experience.
+
+* Read though all the pages linked off :ref:`developing_modules_general`; paying particular focus to the :ref:`developing_modules_checklist`.
+* We encourage PEP 8 compliance. See :ref:`testing_pep8` for more information.
+* We encourage supporting :ref:`Python 2.6+ and Python 3.5+ <developing_python_3>`.
+* Look at Ansible Galaxy and review the naming conventions in your functional area (such as cloud, networking, databases).
+* With great power comes great responsibility: Ansible collection maintainers have a duty to help keep content up to date and release collections they are responsible for regularly. As with all successful community projects, collection maintainers should keep a watchful eye for reported issues and contributions.
+* We strongly recommend unit and/or integration tests. Unit tests are especially valuable when external resources (such as cloud or network devices) are required. For more information see :ref:`developing_testing` and the `Testing Working Group <https://github.com/ansible/community/blob/master/meetings/README.md>`_.
+
+
+Naming conventions
+==================
+
+Fully Qualified Collection Names (FQCNs) for plugins and modules include three elements:
+
+ * the Galaxy namespace, which generally represents the company or group
+ * the collection name, which generally represents the product or OS
+ * the plugin or module name
+ * always in lower case
+ * words separated with an underscore (``_``) character
+ * singular, rather than plural, for example, ``command`` not ``commands``
+
+For example, ``community.mongodb.mongodb_linux`` or ``cisco.meraki.meraki_device``.
+
+It is convenient if the organization and repository names on GitHub (or elsewhere) match your namespace and collection names on Ansible Galaxy, but it is not required. The plugin names you select, however, are always the same in your code repository and in your collection artifact on Galaxy.
+
+Speak to us
+===========
+
+Circulating your ideas before coding helps you adopt good practices and avoid common mistakes. After reading the "Before you start coding" section you should have a reasonable idea of the structure of your modules. Write a list of your proposed plugin and/or module names, with a short description of what each one does. Circulate that list on IRC or a mailing list so the Ansible community can review your ideas for consistency and familiarity. Names and functionality that are consistent, predictable, and familiar make your collection easier to use.
+
+Where to get support
+====================
+
+Ansible has a thriving and knowledgeable community of module developers that is a great resource for getting your questions answered.
+
+In the :ref:`ansible_community_guide` you can find how to:
+
+* Subscribe to the Mailing Lists - We suggest "Ansible Development List" and "Ansible Announce list"
+* ``#ansible-devel`` - We have found that IRC ``#ansible-devel`` on FreeNode's IRC network works best for developers so we can have an interactive dialogue.
+* IRC meetings - Join the various weekly IRC meetings `meeting schedule and agenda page <https://github.com/ansible/community/blob/master/meetings/README.md>`_
+
+Required files
+==============
+
+Your collection should include the following files to be usable:
+
+* an ``__init__.py`` file - An empty file to initialize namespace and allow Python to import the files. *Required*
+* at least one plugin, for example, ``/plugins/modules/$your_first_module.py``. *Required*
+* if needed, one or more ``/plugins/doc_fragments/$topic.py`` files - Code documentation, such as details regarding common arguments. *Optional*
+* if needed, one or more ``/plugins/module_utils/$topic.py`` files - Code shared between more than one module, such as common arguments. *Optional*
+
+When you have these files ready, review the :ref:`developing_modules_checklist` again. If you are creating a new collection, you are responsible for all procedures related to your repository, including setting rules for contributions, finding reviewers, and testing and maintaining the code in your collection.
+
+If you need help or advice, consider join the ``#ansible-devel`` IRC channel (see how in the "Where to get support").
+
+New to git or GitHub
+====================
+
+We realize this may be your first use of Git or GitHub. The following guides may be of use:
+
+* `How to create a fork of ansible/ansible <https://help.github.com/articles/fork-a-repo/>`_
+* `How to sync (update) your fork <https://help.github.com/articles/syncing-a-fork/>`_
+* `How to create a Pull Request (PR) <https://help.github.com/articles/about-pull-requests/>`_
diff --git a/docs/docsite/rst/dev_guide/developing_plugins.rst b/docs/docsite/rst/dev_guide/developing_plugins.rst
new file mode 100644
index 00000000..4ec08505
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/developing_plugins.rst
@@ -0,0 +1,495 @@
+.. _developing_plugins:
+.. _plugin_guidelines:
+
+******************
+Developing plugins
+******************
+
+.. contents::
+ :local:
+
+Plugins augment Ansible's core functionality with logic and features that are accessible to all modules. Ansible collections include a number of handy plugins, and you can easily write your own. All plugins must:
+
+* be written in Python
+* raise errors
+* return strings in unicode
+* conform to Ansible's configuration and documentation standards
+
+Once you've reviewed these general guidelines, you can skip to the particular type of plugin you want to develop.
+
+Writing plugins in Python
+=========================
+
+You must write your plugin in Python so it can be loaded by the ``PluginLoader`` and returned as a Python object that any module can use. Since your plugin will execute on the controller, you must write it in a :ref:`compatible version of Python <control_node_requirements>`.
+
+Raising errors
+==============
+
+You should return errors encountered during plugin execution by raising ``AnsibleError()`` or a similar class with a message describing the error. When wrapping other exceptions into error messages, you should always use the ``to_native`` Ansible function to ensure proper string compatibility across Python versions:
+
+.. code-block:: python
+
+ from ansible.module_utils._text import to_native
+
+ try:
+ cause_an_exception()
+ except Exception as e:
+ raise AnsibleError('Something happened, this was original exception: %s' % to_native(e))
+
+Check the different `AnsibleError objects <https://github.com/ansible/ansible/blob/devel/lib/ansible/errors/__init__.py>`_ and see which one applies best to your situation.
+
+String encoding
+===============
+
+You must convert any strings returned by your plugin into Python's unicode type. Converting to unicode ensures that these strings can run through Jinja2. To convert strings:
+
+.. code-block:: python
+
+ from ansible.module_utils._text import to_text
+ result_string = to_text(result_string)
+
+Plugin configuration & documentation standards
+==============================================
+
+To define configurable options for your plugin, describe them in the ``DOCUMENTATION`` section of the python file. Callback and connection plugins have declared configuration requirements this way since Ansible version 2.4; most plugin types now do the same. This approach ensures that the documentation of your plugin's options will always be correct and up-to-date. To add a configurable option to your plugin, define it in this format:
+
+.. code-block:: yaml
+
+ options:
+ option_name:
+ description: describe this config option
+ default: default value for this config option
+ env:
+ - name: NAME_OF_ENV_VAR
+ ini:
+ - section: section_of_ansible.cfg_where_this_config_option_is_defined
+ key: key_used_in_ansible.cfg
+ required: True/False
+ type: boolean/float/integer/list/none/path/pathlist/pathspec/string/tmppath
+ version_added: X.x
+
+To access the configuration settings in your plugin, use ``self.get_option(<option_name>)``. For most plugin types, the controller pre-populates the settings. If you need to populate settings explicitly, use a ``self.set_options()`` call.
+
+Plugins that support embedded documentation (see :ref:`ansible-doc` for the list) should include well-formed doc strings. If you inherit from a plugin, you must document the options it takes, either via a documentation fragment or as a copy. See :ref:`module_documenting` for more information on correct documentation. Thorough documentation is a good idea even if you're developing a plugin for local use.
+
+Developing particular plugin types
+==================================
+
+.. _developing_actions:
+
+Action plugins
+--------------
+
+Action plugins let you integrate local processing and local data with module functionality.
+
+To create an action plugin, create a new class with the Base(ActionBase) class as the parent:
+
+.. code-block:: python
+
+ from ansible.plugins.action import ActionBase
+
+ class ActionModule(ActionBase):
+ pass
+
+From there, execute the module using the ``_execute_module`` method to call the original module.
+After successful execution of the module, you can modify the module return data.
+
+.. code-block:: python
+
+ module_return = self._execute_module(module_name='<NAME_OF_MODULE>',
+ module_args=module_args,
+ task_vars=task_vars, tmp=tmp)
+
+
+For example, if you wanted to check the time difference between your Ansible controller and your target machine(s), you could write an action plugin to check the local time and compare it to the return data from Ansible's ``setup`` module:
+
+.. code-block:: python
+
+ #!/usr/bin/python
+ # Make coding more python3-ish, this is required for contributions to Ansible
+ from __future__ import (absolute_import, division, print_function)
+ __metaclass__ = type
+
+ from ansible.plugins.action import ActionBase
+ from datetime import datetime
+
+
+ class ActionModule(ActionBase):
+ def run(self, tmp=None, task_vars=None):
+ super(ActionModule, self).run(tmp, task_vars)
+ module_args = self._task.args.copy()
+ module_return = self._execute_module(module_name='setup',
+ module_args=module_args,
+ task_vars=task_vars, tmp=tmp)
+ ret = dict()
+ remote_date = None
+ if not module_return.get('failed'):
+ for key, value in module_return['ansible_facts'].items():
+ if key == 'ansible_date_time':
+ remote_date = value['iso8601']
+
+ if remote_date:
+ remote_date_obj = datetime.strptime(remote_date, '%Y-%m-%dT%H:%M:%SZ')
+ time_delta = datetime.now() - remote_date_obj
+ ret['delta_seconds'] = time_delta.seconds
+ ret['delta_days'] = time_delta.days
+ ret['delta_microseconds'] = time_delta.microseconds
+
+ return dict(ansible_facts=dict(ret))
+
+
+This code checks the time on the controller, captures the date and time for the remote machine using the ``setup`` module, and calculates the difference between the captured time and
+the local time, returning the time delta in days, seconds and microseconds.
+
+For practical examples of action plugins,
+see the source code for the `action plugins included with Ansible Core <https://github.com/ansible/ansible/tree/devel/lib/ansible/plugins/action>`_
+
+.. _developing_cache_plugins:
+
+Cache plugins
+-------------
+
+Cache plugins store gathered facts and data retrieved by inventory plugins.
+
+Import cache plugins using the cache_loader so you can use ``self.set_options()`` and ``self.get_option(<option_name>)``. If you import a cache plugin directly in the code base, you can only access options via ``ansible.constants``, and you break the cache plugin's ability to be used by an inventory plugin.
+
+.. code-block:: python
+
+ from ansible.plugins.loader import cache_loader
+ [...]
+ plugin = cache_loader.get('custom_cache', **cache_kwargs)
+
+There are two base classes for cache plugins, ``BaseCacheModule`` for database-backed caches, and ``BaseCacheFileModule`` for file-backed caches.
+
+To create a cache plugin, start by creating a new ``CacheModule`` class with the appropriate base class. If you're creating a plugin using an ``__init__`` method you should initialize the base class with any provided args and kwargs to be compatible with inventory plugin cache options. The base class calls ``self.set_options(direct=kwargs)``. After the base class ``__init__`` method is called ``self.get_option(<option_name>)`` should be used to access cache options.
+
+New cache plugins should take the options ``_uri``, ``_prefix``, and ``_timeout`` to be consistent with existing cache plugins.
+
+.. code-block:: python
+
+ from ansible.plugins.cache import BaseCacheModule
+
+ class CacheModule(BaseCacheModule):
+ def __init__(self, *args, **kwargs):
+ super(CacheModule, self).__init__(*args, **kwargs)
+ self._connection = self.get_option('_uri')
+ self._prefix = self.get_option('_prefix')
+ self._timeout = self.get_option('_timeout')
+
+If you use the ``BaseCacheModule``, you must implement the methods ``get``, ``contains``, ``keys``, ``set``, ``delete``, ``flush``, and ``copy``. The ``contains`` method should return a boolean that indicates if the key exists and has not expired. Unlike file-based caches, the ``get`` method does not raise a KeyError if the cache has expired.
+
+If you use the ``BaseFileCacheModule``, you must implement ``_load`` and ``_dump`` methods that will be called from the base class methods ``get`` and ``set``.
+
+If your cache plugin stores JSON, use ``AnsibleJSONEncoder`` in the ``_dump`` or ``set`` method and ``AnsibleJSONDecoder`` in the ``_load`` or ``get`` method.
+
+For example cache plugins, see the source code for the `cache plugins included with Ansible Core <https://github.com/ansible/ansible/tree/devel/lib/ansible/plugins/cache>`_.
+
+.. _developing_callbacks:
+
+Callback plugins
+----------------
+
+Callback plugins add new behaviors to Ansible when responding to events. By default, callback plugins control most of the output you see when running the command line programs.
+
+To create a callback plugin, create a new class with the Base(Callbacks) class as the parent:
+
+.. code-block:: python
+
+ from ansible.plugins.callback import CallbackBase
+
+ class CallbackModule(CallbackBase):
+ pass
+
+From there, override the specific methods from the CallbackBase that you want to provide a callback for.
+For plugins intended for use with Ansible version 2.0 and later, you should only override methods that start with ``v2``.
+For a complete list of methods that you can override, please see ``__init__.py`` in the
+`lib/ansible/plugins/callback <https://github.com/ansible/ansible/tree/devel/lib/ansible/plugins/callback>`_ directory.
+
+The following is a modified example of how Ansible's timer plugin is implemented,
+but with an extra option so you can see how configuration works in Ansible version 2.4 and later:
+
+.. code-block:: python
+
+ # Make coding more python3-ish, this is required for contributions to Ansible
+ from __future__ import (absolute_import, division, print_function)
+ __metaclass__ = type
+
+ # not only visible to ansible-doc, it also 'declares' the options the plugin requires and how to configure them.
+ DOCUMENTATION = '''
+ callback: timer
+ callback_type: aggregate
+ requirements:
+ - whitelist in configuration
+ short_description: Adds time to play stats
+ version_added: "2.0"
+ description:
+ - This callback just adds total play duration to the play stats.
+ options:
+ format_string:
+ description: format of the string shown to user at play end
+ ini:
+ - section: callback_timer
+ key: format_string
+ env:
+ - name: ANSIBLE_CALLBACK_TIMER_FORMAT
+ default: "Playbook run took %s days, %s hours, %s minutes, %s seconds"
+ '''
+ from datetime import datetime
+
+ from ansible.plugins.callback import CallbackBase
+
+
+ class CallbackModule(CallbackBase):
+ """
+ This callback module tells you how long your plays ran for.
+ """
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'namespace.collection_name.timer'
+
+ # only needed if you ship it and don't want to enable by default
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self):
+
+ # make sure the expected objects are present, calling the base's __init__
+ super(CallbackModule, self).__init__()
+
+ # start the timer when the plugin is loaded, the first play should start a few milliseconds after.
+ self.start_time = datetime.now()
+
+ def _days_hours_minutes_seconds(self, runtime):
+ ''' internal helper method for this callback '''
+ minutes = (runtime.seconds // 60) % 60
+ r_seconds = runtime.seconds - (minutes * 60)
+ return runtime.days, runtime.seconds // 3600, minutes, r_seconds
+
+ # this is only event we care about for display, when the play shows its summary stats; the rest are ignored by the base class
+ def v2_playbook_on_stats(self, stats):
+ end_time = datetime.now()
+ runtime = end_time - self.start_time
+
+ # Shows the usage of a config option declared in the DOCUMENTATION variable. Ansible will have set it when it loads the plugin.
+ # Also note the use of the display object to print to screen. This is available to all callbacks, and you should use this over printing yourself
+ self._display.display(self._plugin_options['format_string'] % (self._days_hours_minutes_seconds(runtime)))
+
+Note that the ``CALLBACK_VERSION`` and ``CALLBACK_NAME`` definitions are required for properly functioning plugins for Ansible version 2.0 and later. ``CALLBACK_TYPE`` is mostly needed to distinguish 'stdout' plugins from the rest, since you can only load one plugin that writes to stdout.
+
+For example callback plugins, see the source code for the `callback plugins included with Ansible Core <https://github.com/ansible/ansible/tree/devel/lib/ansible/plugins/callback>`_
+
+.. _developing_connection_plugins:
+
+Connection plugins
+------------------
+
+Connection plugins allow Ansible to connect to the target hosts so it can execute tasks on them. Ansible ships with many connection plugins, but only one can be used per host at a time. The most commonly used connection plugins are the ``paramiko`` SSH, native ssh (just called ``ssh``), and ``local`` connection types. All of these can be used in playbooks and with ``/usr/bin/ansible`` to connect to remote machines.
+
+Ansible version 2.1 introduced the ``smart`` connection plugin. The ``smart`` connection type allows Ansible to automatically select either the ``paramiko`` or ``openssh`` connection plugin based on system capabilities, or the ``ssh`` connection plugin if OpenSSH supports ControlPersist.
+
+To create a new connection plugin (for example, to support SNMP, Message bus, or other transports), copy the format of one of the existing connection plugins and drop it into ``connection`` directory on your :ref:`local plugin path <local_plugins>`.
+
+Connection plugins can support common options (such as the ``--timeout`` flag) by defining an entry in the documentation for the attribute name (in this case ``timeout``). If the common option has a non-null default, the plugin should define the same default since a different default would be ignored.
+
+For example connection plugins, see the source code for the `connection plugins included with Ansible Core <https://github.com/ansible/ansible/tree/devel/lib/ansible/plugins/connection>`_.
+
+.. _developing_filter_plugins:
+
+Filter plugins
+--------------
+
+Filter plugins manipulate data. They are a feature of Jinja2 and are also available in Jinja2 templates used by the ``template`` module. As with all plugins, they can be easily extended, but instead of having a file for each one you can have several per file. Most of the filter plugins shipped with Ansible reside in a ``core.py``.
+
+Filter plugins do not use the standard configuration and documentation system described above.
+
+For example filter plugins, see the source code for the `filter plugins included with Ansible Core <https://github.com/ansible/ansible/tree/devel/lib/ansible/plugins/filter>`_.
+
+.. _developing_inventory_plugins:
+
+Inventory plugins
+-----------------
+
+Inventory plugins parse inventory sources and form an in-memory representation of the inventory. Inventory plugins were added in Ansible version 2.4.
+
+You can see the details for inventory plugins in the :ref:`developing_inventory` page.
+
+.. _developing_lookup_plugins:
+
+Lookup plugins
+--------------
+
+Lookup plugins pull in data from external data stores. Lookup plugins can be used within playbooks both for looping --- playbook language constructs like ``with_fileglob`` and ``with_items`` are implemented via lookup plugins --- and to return values into a variable or parameter.
+
+Lookup plugins are very flexible, allowing you to retrieve and return any type of data. When writing lookup plugins, always return data of a consistent type that can be easily consumed in a playbook. Avoid parameters that change the returned data type. If there is a need to return a single value sometimes and a complex dictionary other times, write two different lookup plugins.
+
+Ansible includes many :ref:`filters <playbooks_filters>` which can be used to manipulate the data returned by a lookup plugin. Sometimes it makes sense to do the filtering inside the lookup plugin, other times it is better to return results that can be filtered in the playbook. Keep in mind how the data will be referenced when determining the appropriate level of filtering to be done inside the lookup plugin.
+
+Here's a simple lookup plugin implementation --- this lookup returns the contents of a text file as a variable:
+
+.. code-block:: python
+
+ # python 3 headers, required if submitting to Ansible
+ from __future__ import (absolute_import, division, print_function)
+ __metaclass__ = type
+
+ DOCUMENTATION = """
+ lookup: file
+ author: Daniel Hokka Zakrisson <daniel@hozac.com>
+ version_added: "0.9"
+ short_description: read file contents
+ description:
+ - This lookup returns the contents from a file on the Ansible controller's file system.
+ options:
+ _terms:
+ description: path(s) of files to read
+ required: True
+ notes:
+ - if read in variable context, the file can be interpreted as YAML if the content is valid to the parser.
+ - this lookup does not understand globing --- use the fileglob lookup instead.
+ """
+ from ansible.errors import AnsibleError, AnsibleParserError
+ from ansible.plugins.lookup import LookupBase
+ from ansible.utils.display import Display
+
+ display = Display()
+
+
+ class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+
+
+ # lookups in general are expected to both take a list as input and output a list
+ # this is done so they work with the looping construct 'with_'.
+ ret = []
+ for term in terms:
+ display.debug("File lookup term: %s" % term)
+
+ # Find the file in the expected search path, using a class method
+ # that implements the 'expected' search path for Ansible plugins.
+ lookupfile = self.find_file_in_search_path(variables, 'files', term)
+
+ # Don't use print or your own logging, the display class
+ # takes care of it in a unified way.
+ display.vvvv(u"File lookup using %s as file" % lookupfile)
+ try:
+ if lookupfile:
+ contents, show_data = self._loader._get_file_contents(lookupfile)
+ ret.append(contents.rstrip())
+ else:
+ # Always use ansible error classes to throw 'final' exceptions,
+ # so the Ansible engine will know how to deal with them.
+ # The Parser error indicates invalid options passed
+ raise AnsibleParserError()
+ except AnsibleParserError:
+ raise AnsibleError("could not locate file in lookup: %s" % term)
+
+ return ret
+
+The following is an example of how this lookup is called::
+
+ ---
+ - hosts: all
+ vars:
+ contents: "{{ lookup('namespace.collection_name.file', '/etc/foo.txt') }}"
+
+ tasks:
+
+ - debug:
+ msg: the value of foo.txt is {{ contents }} as seen today {{ lookup('pipe', 'date +"%Y-%m-%d"') }}
+
+For example lookup plugins, see the source code for the `lookup plugins included with Ansible Core <https://github.com/ansible/ansible/tree/devel/lib/ansible/plugins/lookup>`_.
+
+For more usage examples of lookup plugins, see :ref:`Using Lookups<playbooks_lookups>`.
+
+.. _developing_test_plugins:
+
+Test plugins
+------------
+
+Test plugins verify data. They are a feature of Jinja2 and are also available in Jinja2 templates used by the ``template`` module. As with all plugins, they can be easily extended, but instead of having a file for each one you can have several per file. Most of the test plugins shipped with Ansible reside in a ``core.py``. These are specially useful in conjunction with some filter plugins like ``map`` and ``select``; they are also available for conditional directives like ``when:``.
+
+Test plugins do not use the standard configuration and documentation system described above.
+
+For example test plugins, see the source code for the `test plugins included with Ansible Core <https://github.com/ansible/ansible/tree/devel/lib/ansible/plugins/test>`_.
+
+.. _developing_vars_plugins:
+
+Vars plugins
+------------
+
+Vars plugins inject additional variable data into Ansible runs that did not come from an inventory source, playbook, or command line. Playbook constructs like 'host_vars' and 'group_vars' work using vars plugins.
+
+Vars plugins were partially implemented in Ansible 2.0 and rewritten to be fully implemented starting with Ansible 2.4. Vars plugins are unsupported by collections.
+
+Older plugins used a ``run`` method as their main body/work:
+
+.. code-block:: python
+
+ def run(self, name, vault_password=None):
+ pass # your code goes here
+
+
+Ansible 2.0 did not pass passwords to older plugins, so vaults were unavailable.
+Most of the work now happens in the ``get_vars`` method which is called from the VariableManager when needed.
+
+.. code-block:: python
+
+ def get_vars(self, loader, path, entities):
+ pass # your code goes here
+
+The parameters are:
+
+ * loader: Ansible's DataLoader. The DataLoader can read files, auto-load JSON/YAML and decrypt vaulted data, and cache read files.
+ * path: this is 'directory data' for every inventory source and the current play's playbook directory, so they can search for data in reference to them. ``get_vars`` will be called at least once per available path.
+ * entities: these are host or group names that are pertinent to the variables needed. The plugin will get called once for hosts and again for groups.
+
+This ``get_vars`` method just needs to return a dictionary structure with the variables.
+
+Since Ansible version 2.4, vars plugins only execute as needed when preparing to execute a task. This avoids the costly 'always execute' behavior that occurred during inventory construction in older versions of Ansible. Since Ansible version 2.10, vars plugin execution can be toggled by the user to run when preparing to execute a task or after importing an inventory source.
+
+Since Ansible 2.10, vars plugins can require whitelisting. Vars plugins that don't require whitelisting will run by default. To require whitelisting for your plugin set the class variable ``REQUIRES_WHITELIST``:
+
+.. code-block:: python
+
+ class VarsModule(BaseVarsPlugin):
+ REQUIRES_WHITELIST = True
+
+Include the ``vars_plugin_staging`` documentation fragment to allow users to determine when vars plugins run.
+
+.. code-block:: python
+
+ DOCUMENTATION = '''
+ vars: custom_hostvars
+ version_added: "2.10"
+ short_description: Load custom host vars
+ description: Load custom host vars
+ options:
+ stage:
+ ini:
+ - key: stage
+ section: vars_custom_hostvars
+ env:
+ - name: ANSIBLE_VARS_PLUGIN_STAGE
+ extends_documentation_fragment:
+ - vars_plugin_staging
+ '''
+
+Also since Ansible 2.10, vars plugins can reside in collections. Vars plugins in collections must require whitelisting to be functional.
+
+For example vars plugins, see the source code for the `vars plugins included with Ansible Core
+<https://github.com/ansible/ansible/tree/devel/lib/ansible/plugins/vars>`_.
+
+.. seealso::
+
+ :ref:`list_of_collections`
+ Browse existing collections, modules, and plugins
+ :ref:`developing_api`
+ Learn about the Python API for task execution
+ :ref:`developing_inventory`
+ Learn about how to develop dynamic inventory sources
+ :ref:`developing_modules_general`
+ Learn about how to write Ansible modules
+ `Mailing List <https://groups.google.com/group/ansible-devel>`_
+ The development mailing list
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/dev_guide/developing_program_flow_modules.rst b/docs/docsite/rst/dev_guide/developing_program_flow_modules.rst
new file mode 100644
index 00000000..5300fb55
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/developing_program_flow_modules.rst
@@ -0,0 +1,880 @@
+.. _flow_modules:
+.. _developing_program_flow_modules:
+
+***************************
+Ansible module architecture
+***************************
+
+If you are working on the ``ansible-base`` code, writing an Ansible module, or developing an action plugin, you may need to understand how Ansible's program flow executes. If you are just using Ansible Modules in playbooks, you can skip this section.
+
+.. contents::
+ :local:
+
+.. _flow_types_of_modules:
+
+Types of modules
+================
+
+Ansible supports several different types of modules in its code base. Some of
+these are for backwards compatibility and others are to enable flexibility.
+
+.. _flow_action_plugins:
+
+Action plugins
+--------------
+
+Action plugins look like modules to anyone writing a playbook. Usage documentation for most action plugins lives inside a module of the same name. Some action plugins do all the work, with the module providing only documentation. Some action plugins execute modules. The ``normal`` action plugin executes modules that don't have special action plugins. Action plugins always execute on the controller.
+
+Some action plugins do all their work on the controller. For
+example, the :ref:`debug <debug_module>` action plugin (which prints text for
+the user to see) and the :ref:`assert <assert_module>` action plugin (which
+tests whether values in a playbook satisfy certain criteria) execute entirely on the controller.
+
+Most action plugins set up some values on the controller, then invoke an
+actual module on the managed node that does something with these values. For example, the :ref:`template <template_module>` action plugin takes values from
+the user to construct a file in a temporary location on the controller using
+variables from the playbook environment. It then transfers the temporary file
+to a temporary file on the remote system. After that, it invokes the
+:ref:`copy module <copy_module>` which operates on the remote system to move the file
+into its final location, sets file permissions, and so on.
+
+.. _flow_new_style_modules:
+
+New-style modules
+-----------------
+
+All of the modules that ship with Ansible fall into this category. While you can write modules in any language, all official modules (shipped with Ansible) use either Python or PowerShell.
+
+New-style modules have the arguments to the module embedded inside of them in
+some manner. Old-style modules must copy a separate file over to the
+managed node, which is less efficient as it requires two over-the-wire
+connections instead of only one.
+
+.. _flow_python_modules:
+
+Python
+^^^^^^
+
+New-style Python modules use the :ref:`Ansiballz` framework for constructing
+modules. These modules use imports from :code:`ansible.module_utils` to pull in
+boilerplate module code, such as argument parsing, formatting of return
+values as :term:`JSON`, and various file operations.
+
+.. note:: In Ansible, up to version 2.0.x, the official Python modules used the
+ :ref:`module_replacer` framework. For module authors, :ref:`Ansiballz` is
+ largely a superset of :ref:`module_replacer` functionality, so you usually
+ do not need to understand the differences between them.
+
+.. _flow_powershell_modules:
+
+PowerShell
+^^^^^^^^^^
+
+New-style PowerShell modules use the :ref:`module_replacer` framework for
+constructing modules. These modules get a library of PowerShell code embedded
+in them before being sent to the managed node.
+
+.. _flow_jsonargs_modules:
+
+JSONARGS modules
+----------------
+
+These modules are scripts that include the string
+``<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>`` in their body.
+This string is replaced with the JSON-formatted argument string. These modules typically set a variable to that value like this:
+
+.. code-block:: python
+
+ json_arguments = """<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>"""
+
+Which is expanded as:
+
+.. code-block:: python
+
+ json_arguments = """{"param1": "test's quotes", "param2": "\"To be or not to be\" - Hamlet"}"""
+
+.. note:: Ansible outputs a :term:`JSON` string with bare quotes. Double quotes are
+ used to quote string values, double quotes inside of string values are
+ backslash escaped, and single quotes may appear unescaped inside of
+ a string value. To use JSONARGS, your scripting language must have a way
+ to handle this type of string. The example uses Python's triple quoted
+ strings to do this. Other scripting languages may have a similar quote
+ character that won't be confused by any quotes in the JSON or it may
+ allow you to define your own start-of-quote and end-of-quote characters.
+ If the language doesn't give you any of these then you'll need to write
+ a :ref:`non-native JSON module <flow_want_json_modules>` or
+ :ref:`Old-style module <flow_old_style_modules>` instead.
+
+These modules typically parse the contents of ``json_arguments`` using a JSON
+library and then use them as native variables throughout the code.
+
+.. _flow_want_json_modules:
+
+Non-native want JSON modules
+----------------------------
+
+If a module has the string ``WANT_JSON`` in it anywhere, Ansible treats
+it as a non-native module that accepts a filename as its only command line
+parameter. The filename is for a temporary file containing a :term:`JSON`
+string containing the module's parameters. The module needs to open the file,
+read and parse the parameters, operate on the data, and print its return data
+as a JSON encoded dictionary to stdout before exiting.
+
+These types of modules are self-contained entities. As of Ansible 2.1, Ansible
+only modifies them to change a shebang line if present.
+
+.. seealso:: Examples of Non-native modules written in ruby are in the `Ansible
+ for Rubyists <https://github.com/ansible/ansible-for-rubyists>`_ repository.
+
+.. _flow_binary_modules:
+
+Binary modules
+--------------
+
+From Ansible 2.2 onwards, modules may also be small binary programs. Ansible
+doesn't perform any magic to make these portable to different systems so they
+may be specific to the system on which they were compiled or require other
+binary runtime dependencies. Despite these drawbacks, you may have
+to compile a custom module against a specific binary
+library if that's the only way to get access to certain resources.
+
+Binary modules take their arguments and return data to Ansible in the same
+way as :ref:`want JSON modules <flow_want_json_modules>`.
+
+.. seealso:: One example of a `binary module
+ <https://github.com/ansible/ansible/blob/devel/test/integration/targets/binary_modules/library/helloworld.go>`_
+ written in go.
+
+.. _flow_old_style_modules:
+
+Old-style modules
+-----------------
+
+Old-style modules are similar to
+:ref:`want JSON modules <flow_want_json_modules>`, except that the file that
+they take contains ``key=value`` pairs for their parameters instead of
+:term:`JSON`. Ansible decides that a module is old-style when it doesn't have
+any of the markers that would show that it is one of the other types.
+
+.. _flow_how_modules_are_executed:
+
+How modules are executed
+========================
+
+When a user uses :program:`ansible` or :program:`ansible-playbook`, they
+specify a task to execute. The task is usually the name of a module along
+with several parameters to be passed to the module. Ansible takes these
+values and processes them in various ways before they are finally executed on
+the remote machine.
+
+.. _flow_executor_task_executor:
+
+Executor/task_executor
+----------------------
+
+The TaskExecutor receives the module name and parameters that were parsed from
+the :term:`playbook <playbooks>` (or from the command line in the case of
+:command:`/usr/bin/ansible`). It uses the name to decide whether it's looking
+at a module or an :ref:`Action Plugin <flow_action_plugins>`. If it's
+a module, it loads the :ref:`Normal Action Plugin <flow_normal_action_plugin>`
+and passes the name, variables, and other information about the task and play
+to that Action Plugin for further processing.
+
+.. _flow_normal_action_plugin:
+
+The ``normal`` action plugin
+----------------------------
+
+The ``normal`` action plugin executes the module on the remote host. It is
+the primary coordinator of much of the work to actually execute the module on
+the managed machine.
+
+* It loads the appropriate connection plugin for the task, which then transfers
+ or executes as needed to create a connection to that host.
+* It adds any internal Ansible properties to the module's parameters (for
+ instance, the ones that pass along ``no_log`` to the module).
+* It works with other plugins (connection, shell, become, other action plugins)
+ to create any temporary files on the remote machine and
+ cleans up afterwards.
+* It pushes the module and module parameters to the
+ remote host, although the :ref:`module_common <flow_executor_module_common>`
+ code described in the next section decides which format
+ those will take.
+* It handles any special cases regarding modules (for instance, async
+ execution, or complications around Windows modules that must have the same names as Python modules, so that internal calling of modules from other Action Plugins work.)
+
+Much of this functionality comes from the `BaseAction` class,
+which lives in :file:`plugins/action/__init__.py`. It uses the
+``Connection`` and ``Shell`` objects to do its work.
+
+.. note::
+ When :term:`tasks <tasks>` are run with the ``async:`` parameter, Ansible
+ uses the ``async`` Action Plugin instead of the ``normal`` Action Plugin
+ to invoke it. That program flow is currently not documented. Read the
+ source for information on how that works.
+
+.. _flow_executor_module_common:
+
+Executor/module_common.py
+-------------------------
+
+Code in :file:`executor/module_common.py` assembles the module
+to be shipped to the managed node. The module is first read in, then examined
+to determine its type:
+
+* :ref:`PowerShell <flow_powershell_modules>` and :ref:`JSON-args modules <flow_jsonargs_modules>` are passed through :ref:`Module Replacer <module_replacer>`.
+* New-style :ref:`Python modules <flow_python_modules>` are assembled by :ref:`Ansiballz`.
+* :ref:`Non-native-want-JSON <flow_want_json_modules>`, :ref:`Binary modules <flow_binary_modules>`, and :ref:`Old-Style modules <flow_old_style_modules>` aren't touched by either of these and pass through unchanged.
+
+After the assembling step, one final
+modification is made to all modules that have a shebang line. Ansible checks
+whether the interpreter in the shebang line has a specific path configured via
+an ``ansible_$X_interpreter`` inventory variable. If it does, Ansible
+substitutes that path for the interpreter path given in the module. After
+this, Ansible returns the complete module data and the module type to the
+:ref:`Normal Action <flow_normal_action_plugin>` which continues execution of
+the module.
+
+Assembler frameworks
+--------------------
+
+Ansible supports two assembler frameworks: Ansiballz and the older Module Replacer.
+
+.. _module_replacer:
+
+Module Replacer framework
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The Module Replacer framework is the original framework implementing new-style
+modules, and is still used for PowerShell modules. It is essentially a preprocessor (like the C Preprocessor for those
+familiar with that programming language). It does straight substitutions of
+specific substring patterns in the module file. There are two types of
+substitutions:
+
+* Replacements that only happen in the module file. These are public
+ replacement strings that modules can utilize to get helpful boilerplate or
+ access to arguments.
+
+ - :code:`from ansible.module_utils.MOD_LIB_NAME import *` is replaced with the
+ contents of the :file:`ansible/module_utils/MOD_LIB_NAME.py` These should
+ only be used with :ref:`new-style Python modules <flow_python_modules>`.
+ - :code:`#<<INCLUDE_ANSIBLE_MODULE_COMMON>>` is equivalent to
+ :code:`from ansible.module_utils.basic import *` and should also only apply
+ to new-style Python modules.
+ - :code:`# POWERSHELL_COMMON` substitutes the contents of
+ :file:`ansible/module_utils/powershell.ps1`. It should only be used with
+ :ref:`new-style Powershell modules <flow_powershell_modules>`.
+
+* Replacements that are used by ``ansible.module_utils`` code. These are internal replacement patterns. They may be used internally, in the above public replacements, but shouldn't be used directly by modules.
+
+ - :code:`"<<ANSIBLE_VERSION>>"` is substituted with the Ansible version. In
+ :ref:`new-style Python modules <flow_python_modules>` under the
+ :ref:`Ansiballz` framework the proper way is to instead instantiate an
+ `AnsibleModule` and then access the version from
+ :attr:``AnsibleModule.ansible_version``.
+ - :code:`"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>"` is substituted with
+ a string which is the Python ``repr`` of the :term:`JSON` encoded module
+ parameters. Using ``repr`` on the JSON string makes it safe to embed in
+ a Python file. In new-style Python modules under the Ansiballz framework
+ this is better accessed by instantiating an `AnsibleModule` and
+ then using :attr:`AnsibleModule.params`.
+ - :code:`<<SELINUX_SPECIAL_FILESYSTEMS>>` substitutes a string which is
+ a comma separated list of file systems which have a file system dependent
+ security context in SELinux. In new-style Python modules, if you really
+ need this you should instantiate an `AnsibleModule` and then use
+ :attr:`AnsibleModule._selinux_special_fs`. The variable has also changed
+ from a comma separated string of file system names to an actual python
+ list of filesystem names.
+ - :code:`<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>` substitutes the module
+ parameters as a JSON string. Care must be taken to properly quote the
+ string as JSON data may contain quotes. This pattern is not substituted
+ in new-style Python modules as they can get the module parameters another
+ way.
+ - The string :code:`syslog.LOG_USER` is replaced wherever it occurs with the
+ ``syslog_facility`` which was named in :file:`ansible.cfg` or any
+ ``ansible_syslog_facility`` inventory variable that applies to this host. In
+ new-style Python modules this has changed slightly. If you really need to
+ access it, you should instantiate an `AnsibleModule` and then use
+ :attr:`AnsibleModule._syslog_facility` to access it. It is no longer the
+ actual syslog facility and is now the name of the syslog facility. See
+ the :ref:`documentation on internal arguments <flow_internal_arguments>`
+ for details.
+
+.. _Ansiballz:
+
+Ansiballz framework
+^^^^^^^^^^^^^^^^^^^
+
+The Ansiballz framework was adopted in Ansible 2.1 and is used for all new-style Python modules. Unlike the Module Replacer, Ansiballz uses real Python imports of things in
+:file:`ansible/module_utils` instead of merely preprocessing the module. It
+does this by constructing a zipfile -- which includes the module file, files
+in :file:`ansible/module_utils` that are imported by the module, and some
+boilerplate to pass in the module's parameters. The zipfile is then Base64
+encoded and wrapped in a small Python script which decodes the Base64 encoding
+and places the zipfile into a temp directory on the managed node. It then
+extracts just the Ansible module script from the zip file and places that in
+the temporary directory as well. Then it sets the PYTHONPATH to find Python
+modules inside of the zip file and imports the Ansible module as the special name, ``__main__``.
+Importing it as ``__main__`` causes Python to think that it is executing a script rather than simply
+importing a module. This lets Ansible run both the wrapper script and the module code in a single copy of Python on the remote machine.
+
+.. note::
+ * Ansible wraps the zipfile in the Python script for two reasons:
+
+ * for compatibility with Python 2.6 which has a less
+ functional version of Python's ``-m`` command line switch.
+
+ * so that pipelining will function properly. Pipelining needs to pipe the
+ Python module into the Python interpreter on the remote node. Python
+ understands scripts on stdin but does not understand zip files.
+
+ * Prior to Ansible 2.7, the module was executed via a second Python interpreter instead of being
+ executed inside of the same process. This change was made once Python-2.4 support was dropped
+ to speed up module execution.
+
+In Ansiballz, any imports of Python modules from the
+:py:mod:`ansible.module_utils` package trigger inclusion of that Python file
+into the zipfile. Instances of :code:`#<<INCLUDE_ANSIBLE_MODULE_COMMON>>` in
+the module are turned into :code:`from ansible.module_utils.basic import *`
+and :file:`ansible/module-utils/basic.py` is then included in the zipfile.
+Files that are included from :file:`module_utils` are themselves scanned for
+imports of other Python modules from :file:`module_utils` to be included in
+the zipfile as well.
+
+.. warning::
+ At present, the Ansiballz Framework cannot determine whether an import
+ should be included if it is a relative import. Always use an absolute
+ import that has :py:mod:`ansible.module_utils` in it to allow Ansiballz to
+ determine that the file should be included.
+
+
+.. _flow_passing_module_args:
+
+Passing args
+------------
+
+Arguments are passed differently by the two frameworks:
+
+* In :ref:`module_replacer`, module arguments are turned into a JSON-ified string and substituted into the combined module file.
+* In :ref:`Ansiballz`, the JSON-ified string is part of the script which wraps the zipfile. Just before the wrapper script imports the Ansible module as ``__main__``, it monkey-patches the private, ``_ANSIBLE_ARGS`` variable in ``basic.py`` with the variable values. When a :class:`ansible.module_utils.basic.AnsibleModule` is instantiated, it parses this string and places the args into :attr:`AnsibleModule.params` where it can be accessed by the module's other code.
+
+.. warning::
+ If you are writing modules, remember that the way we pass arguments is an internal implementation detail: it has changed in the past and will change again as soon as changes to the common module_utils
+ code allow Ansible modules to forgo using :class:`ansible.module_utils.basic.AnsibleModule`. Do not rely on the internal global ``_ANSIBLE_ARGS`` variable.
+
+ Very dynamic custom modules which need to parse arguments before they
+ instantiate an ``AnsibleModule`` may use ``_load_params`` to retrieve those parameters.
+ Although ``_load_params`` may change in breaking ways if necessary to support
+ changes in the code, it is likely to be more stable than either the way we pass parameters or the internal global variable.
+
+.. note::
+ Prior to Ansible 2.7, the Ansible module was invoked in a second Python interpreter and the
+ arguments were then passed to the script over the script's stdin.
+
+
+.. _flow_internal_arguments:
+
+Internal arguments
+------------------
+
+Both :ref:`module_replacer` and :ref:`Ansiballz` send additional arguments to
+the module beyond those which the user specified in the playbook. These
+additional arguments are internal parameters that help implement global
+Ansible features. Modules often do not need to know about these explicitly as
+the features are implemented in :py:mod:`ansible.module_utils.basic` but certain
+features need support from the module so it's good to know about them.
+
+The internal arguments listed here are global. If you need to add a local internal argument to a custom module, create an action plugin for that specific module - see ``_original_basename`` in the `copy action plugin <https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/action/copy.py#L329>`_ for an example.
+
+_ansible_no_log
+^^^^^^^^^^^^^^^
+
+Boolean. Set to True whenever a parameter in a task or play specifies ``no_log``. Any module that calls :py:meth:`AnsibleModule.log` handles this automatically. If a module implements its own logging then
+it needs to check this value. To access in a module, instantiate an
+``AnsibleModule`` and then check the value of :attr:`AnsibleModule.no_log`.
+
+.. note::
+ ``no_log`` specified in a module's argument_spec is handled by a different mechanism.
+
+_ansible_debug
+^^^^^^^^^^^^^^^
+
+Boolean. Turns more verbose logging on or off and turns on logging of
+external commands that the module executes. If a module uses
+:py:meth:`AnsibleModule.debug` rather than :py:meth:`AnsibleModule.log` then
+the messages are only logged if ``_ansible_debug`` is set to ``True``.
+To set, add ``debug: True`` to :file:`ansible.cfg` or set the environment
+variable :envvar:`ANSIBLE_DEBUG`. To access in a module, instantiate an
+``AnsibleModule`` and access :attr:`AnsibleModule._debug`.
+
+_ansible_diff
+^^^^^^^^^^^^^^^
+
+Boolean. If a module supports it, tells the module to show a unified diff of
+changes to be made to templated files. To set, pass the ``--diff`` command line
+option. To access in a module, instantiate an `AnsibleModule` and access
+:attr:`AnsibleModule._diff`.
+
+_ansible_verbosity
+^^^^^^^^^^^^^^^^^^
+
+Unused. This value could be used for finer grained control over logging.
+
+_ansible_selinux_special_fs
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+List. Names of filesystems which should have a special SELinux
+context. They are used by the `AnsibleModule` methods which operate on
+files (changing attributes, moving, and copying). To set, add a comma separated string of filesystem names in :file:`ansible.cfg`::
+
+ # ansible.cfg
+ [selinux]
+ special_context_filesystems=nfs,vboxsf,fuse,ramfs,vfat
+
+Most modules can use the built-in ``AnsibleModule`` methods to manipulate
+files. To access in a module that needs to know about these special context filesystems, instantiate an ``AnsibleModule`` and examine the list in
+:attr:`AnsibleModule._selinux_special_fs`.
+
+This replaces :attr:`ansible.module_utils.basic.SELINUX_SPECIAL_FS` from
+:ref:`module_replacer`. In module replacer it was a comma separated string of
+filesystem names. Under Ansiballz it's an actual list.
+
+.. versionadded:: 2.1
+
+_ansible_syslog_facility
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+This parameter controls which syslog facility Ansible module logs to. To set, change the ``syslog_facility`` value in :file:`ansible.cfg`. Most
+modules should just use :meth:`AnsibleModule.log` which will then make use of
+this. If a module has to use this on its own, it should instantiate an
+`AnsibleModule` and then retrieve the name of the syslog facility from
+:attr:`AnsibleModule._syslog_facility`. The Ansiballz code is less hacky than the old :ref:`module_replacer` code:
+
+.. code-block:: python
+
+ # Old module_replacer way
+ import syslog
+ syslog.openlog(NAME, 0, syslog.LOG_USER)
+
+ # New Ansiballz way
+ import syslog
+ facility_name = module._syslog_facility
+ facility = getattr(syslog, facility_name, syslog.LOG_USER)
+ syslog.openlog(NAME, 0, facility)
+
+.. versionadded:: 2.1
+
+_ansible_version
+^^^^^^^^^^^^^^^^
+
+This parameter passes the version of Ansible that runs the module. To access
+it, a module should instantiate an `AnsibleModule` and then retrieve it
+from :attr:`AnsibleModule.ansible_version`. This replaces
+:attr:`ansible.module_utils.basic.ANSIBLE_VERSION` from
+:ref:`module_replacer`.
+
+.. versionadded:: 2.1
+
+
+.. _flow_module_return_values:
+
+Module return values & Unsafe strings
+-------------------------------------
+
+At the end of a module's execution, it formats the data that it wants to return as a JSON string and prints the string to its stdout. The normal action plugin receives the JSON string, parses it into a Python dictionary, and returns it to the executor.
+
+If Ansible templated every string return value, it would be vulnerable to an attack from users with access to managed nodes. If an unscrupulous user disguised malicious code as Ansible return value strings, and if those strings were then templated on the controller, Ansible could execute arbitrary code. To prevent this scenario, Ansible marks all strings inside returned data as ``Unsafe``, emitting any Jinja2 templates in the strings verbatim, not expanded by Jinja2.
+
+Strings returned by invoking a module through ``ActionPlugin._execute_module()`` are automatically marked as ``Unsafe`` by the normal action plugin. If another action plugin retrieves information from a module through some other means, it must mark its return data as ``Unsafe`` on its own.
+
+In case a poorly-coded action plugin fails to mark its results as "Unsafe," Ansible audits the results again when they are returned to the executor,
+marking all strings as ``Unsafe``. The normal action plugin protects itself and any other code that it calls with the result data as a parameter. The check inside the executor protects the output of all other action plugins, ensuring that subsequent tasks run by Ansible will not template anything from those results either.
+
+.. _flow_special_considerations:
+
+Special considerations
+----------------------
+
+.. _flow_pipelining:
+
+Pipelining
+^^^^^^^^^^
+
+Ansible can transfer a module to a remote machine in one of two ways:
+
+* it can write out the module to a temporary file on the remote host and then
+ use a second connection to the remote host to execute it with the
+ interpreter that the module needs
+* or it can use what's known as pipelining to execute the module by piping it
+ into the remote interpreter's stdin.
+
+Pipelining only works with modules written in Python at this time because
+Ansible only knows that Python supports this mode of operation. Supporting
+pipelining means that whatever format the module payload takes before being
+sent over the wire must be executable by Python via stdin.
+
+.. _flow_args_over_stdin:
+
+Why pass args over stdin?
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Passing arguments via stdin was chosen for the following reasons:
+
+* When combined with :ref:`ANSIBLE_PIPELINING`, this keeps the module's arguments from
+ temporarily being saved onto disk on the remote machine. This makes it
+ harder (but not impossible) for a malicious user on the remote machine to
+ steal any sensitive information that may be present in the arguments.
+* Command line arguments would be insecure as most systems allow unprivileged
+ users to read the full commandline of a process.
+* Environment variables are usually more secure than the commandline but some
+ systems limit the total size of the environment. This could lead to
+ truncation of the parameters if we hit that limit.
+
+
+.. _flow_ansiblemodule:
+
+AnsibleModule
+-------------
+
+.. _argument_spec:
+
+Argument spec
+^^^^^^^^^^^^^
+
+The ``argument_spec`` provided to ``AnsibleModule`` defines the supported arguments for a module, as well as their type, defaults and more.
+
+Example ``argument_spec``:
+
+.. code-block:: python
+
+ module = AnsibleModule(argument_spec=dict(
+ top_level=dict(
+ type='dict',
+ options=dict(
+ second_level=dict(
+ default=True,
+ type='bool',
+ )
+ )
+ )
+ ))
+
+This section will discuss the behavioral attributes for arguments:
+
+:type:
+
+ ``type`` allows you to define the type of the value accepted for the argument. The default value for ``type`` is ``str``. Possible values are:
+
+ * str
+ * list
+ * dict
+ * bool
+ * int
+ * float
+ * path
+ * raw
+ * jsonarg
+ * json
+ * bytes
+ * bits
+
+ The ``raw`` type, performs no type validation or type casting, and maintains the type of the passed value.
+
+:elements:
+
+ ``elements`` works in combination with ``type`` when ``type='list'``. ``elements`` can then be defined as ``elements='int'`` or any other type, indicating that each element of the specified list should be of that type.
+
+:default:
+
+ The ``default`` option allows sets a default value for the argument for the scenario when the argument is not provided to the module. When not specified, the default value is ``None``.
+
+:fallback:
+
+ ``fallback`` accepts a ``tuple`` where the first argument is a callable (function) that will be used to perform the lookup, based on the second argument. The second argument is a list of values to be accepted by the callable.
+
+ The most common callable used is ``env_fallback`` which will allow an argument to optionally use an environment variable when the argument is not supplied.
+
+ Example:
+
+ .. code-block:: python
+
+ username=dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME']))
+
+:choices:
+
+ ``choices`` accepts a list of choices that the argument will accept. The types of ``choices`` should match the ``type``.
+
+:required:
+
+ ``required`` accepts a boolean, either ``True`` or ``False`` that indicates that the argument is required. When not specified, ``required`` defaults to ``False``. This should not be used in combination with ``default``.
+
+:no_log:
+
+ ``no_log`` accepts a boolean, either ``True`` or ``False``, that indicates explicitly whether or not the argument value should be masked in logs and output.
+
+ .. note::
+ In the absence of ``no_log``, if the parameter name appears to indicate that the argument value is a password or passphrase (such as "admin_password"), a warning will be shown and the value will be masked in logs but **not** output. To disable the warning and masking for parameters that do not contain sensitive information, set ``no_log`` to ``False``.
+
+:aliases:
+
+ ``aliases`` accepts a list of alternative argument names for the argument, such as the case where the argument is ``name`` but the module accepts ``aliases=['pkg']`` to allow ``pkg`` to be interchangeably with ``name``
+
+:options:
+
+ ``options`` implements the ability to create a sub-argument_spec, where the sub options of the top level argument are also validated using the attributes discussed in this section. The example at the top of this section demonstrates use of ``options``. ``type`` or ``elements`` should be ``dict`` is this case.
+
+:apply_defaults:
+
+ ``apply_defaults`` works alongside ``options`` and allows the ``default`` of the sub-options to be applied even when the top-level argument is not supplied.
+
+ In the example of the ``argument_spec`` at the top of this section, it would allow ``module.params['top_level']['second_level']`` to be defined, even if the user does not provide ``top_level`` when calling the module.
+
+:removed_in_version:
+
+ ``removed_in_version`` indicates which version of ansible-base or a collection a deprecated argument will be removed in. Mutually exclusive with ``removed_at_date``, and must be used with ``removed_from_collection``.
+
+ Example:
+
+ .. code-block:: python
+
+ 'option': {
+ 'type': 'str',
+ 'removed_in_version': '2.0.0',
+ 'collection_name': 'testns.testcol',
+ },
+
+:removed_at_date:
+
+ ``removed_at_date`` indicates that a deprecated argument will be removed in a minor ansible-base release or major collection release after this date. Mutually exclusive with ``removed_in_version``, and must be used with ``removed_from_collection``.
+
+ Example:
+
+ .. code-block:: python
+
+ 'option': {
+ 'type': 'str',
+ 'removed_at_date': '2020-12-31',
+ 'collection_name': 'testns.testcol',
+ },
+
+:removed_from_collection:
+
+ Specifies which collection (or ansible-base) deprecates this deprecated argument. Specify ``ansible.builtin`` for ansible-base, or the collection's name (format ``foo.bar``). Must be used with ``removed_in_version`` or ``removed_at_date``.
+
+:deprecated_aliases:
+
+ Deprecates aliases of this argument. Must contain a list or tuple of dictionaries having some the following keys:
+
+ :name:
+
+ The name of the alias to deprecate. (Required.)
+
+ :version:
+
+ The version of ansible-base or the collection this alias will be removed in. Either ``version`` or ``date`` must be specified.
+
+ :date:
+
+ The a date after which a minor release of ansible-base or a major collection release will no longer contain this alias.. Either ``version`` or ``date`` must be specified.
+
+ :collection_name:
+
+ Specifies which collection (or ansible-base) deprecates this deprecated alias. Specify ``ansible.builtin`` for ansible-base, or the collection's name (format ``foo.bar``). Must be used with ``version`` or ``date``.
+
+ Examples:
+
+ .. code-block:: python
+
+ 'option': {
+ 'type': 'str',
+ 'aliases': ['foo', 'bar'],
+ 'depecated_aliases': [
+ {
+ 'name': 'foo',
+ 'version': '2.0.0',
+ 'collection_name': 'testns.testcol',
+ },
+ {
+ 'name': 'foo',
+ 'date': '2020-12-31',
+ 'collection_name': 'testns.testcol',
+ },
+ ],
+ },
+
+
+:mutually_exclusive:
+
+ If ``options`` is specified, ``mutually_exclusive`` refers to the sub-options described in ``options`` and behaves as in :ref:`argument_spec_dependencies`.
+
+:required_together:
+
+ If ``options`` is specified, ``required_together`` refers to the sub-options described in ``options`` and behaves as in :ref:`argument_spec_dependencies`.
+
+:required_one_of:
+
+ If ``options`` is specified, ``required_one_of`` refers to the sub-options described in ``options`` and behaves as in :ref:`argument_spec_dependencies`.
+
+:required_if:
+
+ If ``options`` is specified, ``required_if`` refers to the sub-options described in ``options`` and behaves as in :ref:`argument_spec_dependencies`.
+
+:required_by:
+
+ If ``options`` is specified, ``required_by`` refers to the sub-options described in ``options`` and behaves as in :ref:`argument_spec_dependencies`.
+
+
+.. _argument_spec_dependencies:
+
+Dependencies between module options
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The following are optional arguments for ``AnsibleModule()``:
+
+.. code-block:: python
+
+ module = AnsibleModule(
+ argument_spec,
+ mutually_exclusive=[
+ ('path', 'content'),
+ ],
+ required_one_of=[
+ ('path', 'content'),
+ ],
+ )
+
+:mutually_exclusive:
+
+ Must be a sequence (list or tuple) of sequences of strings. Every sequence of strings is a list of option names which are mutually exclusive. If more than one options of a list are specified together, Ansible will fail the module with an error.
+
+ Example:
+
+ .. code-block:: python
+
+ mutually_exclusive=[
+ ('path', 'content'),
+ ('repository_url', 'repository_filename'),
+ ],
+
+ In this example, the options ``path`` and ``content`` must not specified at the same time. Also the options ``repository_url`` and ``repository_filename`` must not be specified at the same time. But specifying ``path`` and ``repository_url`` is accepted.
+
+ To ensure that precisely one of two (or more) options is specified, combine ``mutually_exclusive`` with ``required_one_of``.
+
+:required_together:
+
+ Must be a sequence (list or tuple) of sequences of strings. Every sequence of strings is a list of option names which are must be specified together. If at least one of these options are specified, the other ones from the same sequence must all be present.
+
+ Example:
+
+ .. code-block:: python
+
+ required_together=[
+ ('file_path', 'file_hash'),
+ ],
+
+ In this example, if one of the options ``file_path`` or ``file_hash`` is specified, Ansible will fail the module with an error if the other one is not specified.
+
+:required_one_of:
+
+ Must be a sequence (list or tuple) of sequences of strings. Every sequence of strings is a list of option names from which at least one must be specified. If none one of these options are specified, Ansible will fail module execution.
+
+ Example:
+
+ .. code-block:: python
+
+ required_one_of=[
+ ('path', 'content'),
+ ],
+
+ In this example, at least one of ``path`` and ``content`` must be specified. If none are specified, execution will fail. Specifying both is explicitly allowed; to prevent this, combine ``required_one_of`` with ``mutually_exclusive``.
+
+:required_if:
+
+ Must be a sequence of sequences. Every inner sequence describes one conditional dependency. Every sequence must have three or four values. The first two values are the option's name and the option's value which describes the condition. The further elements of the sequence are only needed if the option of that name has precisely this value.
+
+ If you want that all options in a list of option names are specified if the condition is met, use one of the following forms:
+
+ .. code-block:: python
+
+ ('option_name', option_value, ('option_a', 'option_b', ...)),
+ ('option_name', option_value, ('option_a', 'option_b', ...), False),
+
+ If you want that at least one option of a list of option names is specified if the condition is met, use the following form:
+
+ .. code-block:: python
+
+ ('option_name', option_value, ('option_a', 'option_b', ...), True),
+
+ Example:
+
+ .. code-block:: python
+
+ required_if=[
+ ('state', 'present', ('path', 'content'), True),
+ ('force', True, ('force_reason', 'force_code')),
+ ],
+
+ In this example, if the user specifies ``state=present``, at least one of the options ``path`` and ``content`` must be supplied (or both). To make sure that precisely one can be specified, combine ``required_if`` with ``mutually_exclusive``.
+
+ On the other hand, if ``force`` (a boolean parameter) is set to ``true``, ``yes`` etc., both ``force_reason`` and ``force_code`` must be specified.
+
+:required_by:
+
+ Must be a dictionary mapping option names to sequences of option names. If the option name in a dictionary key is specified, the option names it maps to must all also be specified. Note that instead of a sequence of option names, you can also specify one single option name.
+
+ Example:
+
+ .. code-block:: python
+
+ required_by={
+ 'force': 'force_reason',
+ 'path': ('mode', 'owner', 'group'),
+ },
+
+ In the example, if ``force`` is specified, ``force_reason`` must also be specified. Also, if ``path`` is specified, then three three options ``mode``, ``owner`` and ``group`` also must be specified.
+
+Declaring check mode support
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+To declare that a module supports check mode, supply ``supports_check_mode=True`` to the ``AnsibleModule()`` call:
+
+.. code-block:: python
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+The module can determine whether it is called in check mode by checking the boolean value ``module.check_mode``. If it evaluates to ``True``, the module must take care not to do any modification.
+
+If ``supports_check_mode=False`` is specified, which is the default value, the module will exit in check mode with ``skipped=True`` and message ``remote module (<insert module name here>) does not support check mode``.
+
+Adding file options
+^^^^^^^^^^^^^^^^^^^
+
+To declare that a module should add support for all common file options, supply ``add_file_common_args=True`` to the ``AnsibleModule()`` call:
+
+.. code-block:: python
+
+ module = AnsibleModule(argument_spec, add_file_common_args=True)
+
+You can find `a list of all file options here <https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/doc_fragments/files.py>`_. It is recommended that you make your ``DOCUMENTATION`` extend the doc fragment ``ansible.builtin.files`` (see :ref:`module_docs_fragments`) in this case, to make sure that all these fields are correctly documented.
+
+The helper functions ``module.load_file_common_arguments()`` and ``module.set_fs_attributes_if_different()`` can be used to handle these arguments for you:
+
+.. code-block:: python
+
+ argument_spec = {
+ 'path': {
+ 'type': 'str',
+ 'required': True,
+ },
+ }
+
+ module = AnsibleModule(argument_spec, add_file_common_args=True)
+ changed = False
+
+ # TODO do something with module.params['path'], like update it's contents
+
+ # Ensure that module.params['path'] satisfies the file options supplied by the user
+ file_args = module.load_file_common_arguments(module.params)
+ changed = module.set_fs_attributes_if_different(file_args, changed)
+
+ module.exit_json(changed=changed)
diff --git a/docs/docsite/rst/dev_guide/developing_python_3.rst b/docs/docsite/rst/dev_guide/developing_python_3.rst
new file mode 100644
index 00000000..3713e412
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/developing_python_3.rst
@@ -0,0 +1,404 @@
+.. _developing_python_3:
+
+********************
+Ansible and Python 3
+********************
+
+The ``ansible-base`` code runs on both Python 2 and Python 3 because we want Ansible to be able to manage a wide
+variety of machines. Contributors to ansible-base and to Ansible Collections should be aware of the tips in this document so that they can write code that will run on the same versions of Python as the rest of Ansible.
+
+.. contents::
+ :local:
+
+To ensure that your code runs on Python 3 as well as on Python 2, learn the tips and tricks and idioms
+described here. Most of these considerations apply to all three types of Ansible code:
+
+1. controller-side code - code that runs on the machine where you invoke :command:`/usr/bin/ansible`
+2. modules - the code which Ansible transmits to and invokes on the managed machine.
+3. shared ``module_utils`` code - the common code that's used by modules to perform tasks and sometimes used by controller-side code as well
+
+However, the three types of code do not use the same string strategy. If you're developing a module or some ``module_utils`` code, be sure to read the section on string strategy carefully.
+
+Minimum version of Python 3.x and Python 2.x
+============================================
+
+On the controller we support Python 3.5 or greater and Python 2.7 or greater. Module-side, we
+support Python 3.5 or greater and Python 2.6 or greater.
+
+Python 3.5 was chosen as a minimum because it is the earliest Python 3 version adopted as the
+default Python by a Long Term Support (LTS) Linux distribution (in this case, Ubuntu-16.04).
+Previous LTS Linux distributions shipped with a Python 2 version which users can rely upon instead
+of the Python 3 version.
+
+For Python 2, the default is for modules to run on at least Python 2.6. This allows
+users with older distributions that are stuck on Python 2.6 to manage their
+machines. Modules are allowed to drop support for Python 2.6 when one of
+their dependent libraries requires a higher version of Python. This is not an
+invitation to add unnecessary dependent libraries in order to force your
+module to be usable only with a newer version of Python; instead it is an
+acknowledgment that some libraries (for instance, boto3 and docker-py) will
+only function with a newer version of Python.
+
+.. note:: Python 2.4 Module-side Support:
+
+ Support for Python 2.4 and Python 2.5 was dropped in Ansible-2.4. RHEL-5
+ (and its rebuilds like CentOS-5) were supported until April of 2017.
+ Ansible-2.3 was released in April of 2017 and was the last Ansible release
+ to support Python 2.4 on the module-side.
+
+Developing Ansible code that supports Python 2 and Python 3
+===========================================================
+
+The best place to start learning about writing code that supports both Python 2 and Python 3
+is `Lennart Regebro's book: Porting to Python 3 <http://python3porting.com/>`_.
+The book describes several strategies for porting to Python 3. The one we're
+using is `to support Python 2 and Python 3 from a single code base
+<http://python3porting.com/strategies.html#python-2-and-python-3-without-conversion>`_
+
+Understanding strings in Python 2 and Python 3
+----------------------------------------------
+
+Python 2 and Python 3 handle strings differently, so when you write code that supports Python 3
+you must decide what string model to use. Strings can be an array of bytes (like in C) or
+they can be an array of text. Text is what we think of as letters, digits,
+numbers, other printable symbols, and a small number of unprintable "symbols"
+(control codes).
+
+In Python 2, the two types for these (:class:`str <python:str>` for bytes and
+:func:`unicode <python:unicode>` for text) are often used interchangeably. When dealing only
+with ASCII characters, the strings can be combined, compared, and converted
+from one type to another automatically. When non-ASCII characters are
+introduced, Python 2 starts throwing exceptions due to not knowing what encoding
+the non-ASCII characters should be in.
+
+Python 3 changes this behavior by making the separation between bytes (:class:`bytes <python3:bytes>`)
+and text (:class:`str <python3:str>`) more strict. Python 3 will throw an exception when
+trying to combine and compare the two types. The programmer has to explicitly
+convert from one type to the other to mix values from each.
+
+In Python 3 it's immediately apparent to the programmer when code is
+mixing the byte and text types inappropriately, whereas in Python 2, code that mixes those types
+may work until a user causes an exception by entering non-ASCII input.
+Python 3 forces programmers to proactively define a strategy for
+working with strings in their program so that they don't mix text and byte strings unintentionally.
+
+Ansible uses different strategies for working with strings in controller-side code, in
+:ref: `modules <module_string_strategy>`, and in :ref:`module_utils <module_utils_string_strategy>` code.
+
+.. _controller_string_strategy:
+
+Controller string strategy: the Unicode Sandwich
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In controller-side code we use a strategy known as the Unicode Sandwich (named
+after Python 2's :func:`unicode <python:unicode>` text type). For Unicode Sandwich we know that
+at the border of our code and the outside world (for example, file and network IO,
+environment variables, and some library calls) we are going to receive bytes.
+We need to transform these bytes into text and use that throughout the
+internal portions of our code. When we have to send those strings back out to
+the outside world we first convert the text back into bytes.
+To visualize this, imagine a 'sandwich' consisting of a top and bottom layer
+of bytes, a layer of conversion between, and all text type in the center.
+
+Unicode Sandwich common borders: places to convert bytes to text in controller code
+-----------------------------------------------------------------------------------
+
+This is a partial list of places where we have to convert to and from bytes
+when using the Unicode Sandwich string strategy. It's not exhaustive but
+it gives you an idea of where to watch for problems.
+
+Reading and writing to files
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In Python 2, reading from files yields bytes. In Python 3, it can yield text.
+To make code that's portable to both we don't make use of Python 3's ability
+to yield text but instead do the conversion explicitly ourselves. For example:
+
+.. code-block:: python
+
+ from ansible.module_utils._text import to_text
+
+ with open('filename-with-utf8-data.txt', 'rb') as my_file:
+ b_data = my_file.read()
+ try:
+ data = to_text(b_data, errors='surrogate_or_strict')
+ except UnicodeError:
+ # Handle the exception gracefully -- usually by displaying a good
+ # user-centric error message that can be traced back to this piece
+ # of code.
+ pass
+
+.. note:: Much of Ansible assumes that all encoded text is UTF-8. At some
+ point, if there is demand for other encodings we may change that, but for
+ now it is safe to assume that bytes are UTF-8.
+
+Writing to files is the opposite process:
+
+.. code-block:: python
+
+ from ansible.module_utils._text import to_bytes
+
+ with open('filename.txt', 'wb') as my_file:
+ my_file.write(to_bytes(some_text_string))
+
+Note that we don't have to catch :exc:`UnicodeError` here because we're
+transforming to UTF-8 and all text strings in Python can be transformed back
+to UTF-8.
+
+Filesystem interaction
+^^^^^^^^^^^^^^^^^^^^^^
+
+Dealing with filenames often involves dropping back to bytes because on UNIX-like
+systems filenames are bytes. On Python 2, if we pass a text string to these
+functions, the text string will be converted to a byte string inside of the
+function and a traceback will occur if non-ASCII characters are present. In
+Python 3, a traceback will only occur if the text string can't be decoded in
+the current locale, but it's still good to be explicit and have code which
+works on both versions:
+
+.. code-block:: python
+
+ import os.path
+
+ from ansible.module_utils._text import to_bytes
+
+ filename = u'/var/tmp/ãらã¨ã¿.txt'
+ f = open(to_bytes(filename), 'wb')
+ mtime = os.path.getmtime(to_bytes(filename))
+ b_filename = os.path.expandvars(to_bytes(filename))
+ if os.path.exists(to_bytes(filename)):
+ pass
+
+When you are only manipulating a filename as a string without talking to the
+filesystem (or a C library which talks to the filesystem) you can often get
+away without converting to bytes:
+
+.. code-block:: python
+
+ import os.path
+
+ os.path.join(u'/var/tmp/café', u'ãらã¨ã¿')
+ os.path.split(u'/var/tmp/café/ãらã¨ã¿')
+
+On the other hand, if the code needs to manipulate the filename and also talk
+to the filesystem, it can be more convenient to transform to bytes right away
+and manipulate in bytes.
+
+.. warning:: Make sure all variables passed to a function are the same type.
+ If you're working with something like :func:`python3:os.path.join` which takes
+ multiple strings and uses them in combination, you need to make sure that
+ all the types are the same (either all bytes or all text). Mixing
+ bytes and text will cause tracebacks.
+
+Interacting with other programs
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Interacting with other programs goes through the operating system and
+C libraries and operates on things that the UNIX kernel defines. These
+interfaces are all byte-oriented so the Python interface is byte oriented as
+well. On both Python 2 and Python 3, byte strings should be given to Python's
+subprocess library and byte strings should be expected back from it.
+
+One of the main places in Ansible's controller code that we interact with
+other programs is the connection plugins' ``exec_command`` methods. These
+methods transform any text strings they receive in the command (and arguments
+to the command) to execute into bytes and return stdout and stderr as byte strings
+Higher level functions (like action plugins' ``_low_level_execute_command``)
+transform the output into text strings.
+
+.. _module_string_strategy:
+
+Module string strategy: Native String
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In modules we use a strategy known as Native Strings. This makes things
+easier on the community members who maintain so many of Ansible's
+modules, by not breaking backwards compatibility by
+mandating that all strings inside of modules are text and converting between
+text and bytes at the borders.
+
+Native strings refer to the type that Python uses when you specify a bare
+string literal:
+
+.. code-block:: python
+
+ "This is a native string"
+
+In Python 2, these are byte strings. In Python 3 these are text strings. Modules should be
+coded to expect bytes on Python 2 and text on Python 3.
+
+.. _module_utils_string_strategy:
+
+Module_utils string strategy: hybrid
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In ``module_utils`` code we use a hybrid string strategy. Although Ansible's
+``module_utils`` code is largely like module code, some pieces of it are
+used by the controller as well. So it needs to be compatible with modules
+and with the controller's assumptions, particularly the string strategy.
+The module_utils code attempts to accept native strings as input
+to its functions and emit native strings as their output.
+
+In ``module_utils`` code:
+
+* Functions **must** accept string parameters as either text strings or byte strings.
+* Functions may return either the same type of string as they were given or the native string type for the Python version they are run on.
+* Functions that return strings **must** document whether they return strings of the same type as they were given or native strings.
+
+Module-utils functions are therefore often very defensive in nature.
+They convert their string parameters into text (using ``ansible.module_utils._text.to_text``)
+at the beginning of the function, do their work, and then convert
+the return values into the native string type (using ``ansible.module_utils._text.to_native``)
+or back to the string type that their parameters received.
+
+Tips, tricks, and idioms for Python 2/Python 3 compatibility
+------------------------------------------------------------
+
+Use forward-compatibility boilerplate
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Use the following boilerplate code at the top of all python files
+to make certain constructs act the same way on Python 2 and Python 3:
+
+.. code-block:: python
+
+ # Make coding more python3-ish
+ from __future__ import (absolute_import, division, print_function)
+ __metaclass__ = type
+
+``__metaclass__ = type`` makes all classes defined in the file into new-style
+classes without explicitly inheriting from :class:`object <python3:object>`.
+
+The ``__future__`` imports do the following:
+
+:absolute_import: Makes imports look in :data:`sys.path <python3:sys.path>` for the modules being
+ imported, skipping the directory in which the module doing the importing
+ lives. If the code wants to use the directory in which the module doing
+ the importing, there's a new dot notation to do so.
+:division: Makes division of integers always return a float. If you need to
+ find the quotient use ``x // y`` instead of ``x / y``.
+:print_function: Changes :func:`print <python3:print>` from a keyword into a function.
+
+.. seealso::
+ * `PEP 0328: Absolute Imports <https://www.python.org/dev/peps/pep-0328/#guido-s-decision>`_
+ * `PEP 0238: Division <https://www.python.org/dev/peps/pep-0238>`_
+ * `PEP 3105: Print function <https://www.python.org/dev/peps/pep-3105>`_
+
+Prefix byte strings with ``b_``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Since mixing text and bytes types leads to tracebacks we want to be clear
+about what variables hold text and what variables hold bytes. We do this by
+prefixing any variable holding bytes with ``b_``. For instance:
+
+.. code-block:: python
+
+ filename = u'/var/tmp/café.txt'
+ b_filename = to_bytes(filename)
+ with open(b_filename) as f:
+ data = f.read()
+
+We do not prefix the text strings instead because we only operate
+on byte strings at the borders, so there are fewer variables that need bytes
+than text.
+
+Import Ansible's bundled Python ``six`` library
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The third-party Python `six <https://pypi.org/project/six/>`_ library exists
+to help projects create code that runs on both Python 2 and Python 3. Ansible
+includes a version of the library in module_utils so that other modules can use it
+without requiring that it is installed on the remote system. To make use of
+it, import it like this:
+
+.. code-block:: python
+
+ from ansible.module_utils import six
+
+.. note:: Ansible can also use a system copy of six
+
+ Ansible will use a system copy of six if the system copy is a later
+ version than the one Ansible bundles.
+
+Handle exceptions with ``as``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In order for code to function on Python 2.6+ and Python 3, use the
+new exception-catching syntax which uses the ``as`` keyword:
+
+.. code-block:: python
+
+ try:
+ a = 2/0
+ except ValueError as e:
+ module.fail_json(msg="Tried to divide by zero: %s" % e)
+
+Do **not** use the following syntax as it will fail on every version of Python 3:
+
+.. This code block won't highlight because python2 isn't recognized. This is necessary to pass tests under python 3.
+.. code-block:: none
+
+ try:
+ a = 2/0
+ except ValueError, e:
+ module.fail_json(msg="Tried to divide by zero: %s" % e)
+
+Update octal numbers
+^^^^^^^^^^^^^^^^^^^^
+
+In Python 2.x, octal literals could be specified as ``0755``. In Python 3,
+octals must be specified as ``0o755``.
+
+String formatting for controller code
+-------------------------------------
+
+Use ``str.format()`` for Python 2.6 compatibility
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Starting in Python 2.6, strings gained a method called ``format()`` to put
+strings together. However, one commonly used feature of ``format()`` wasn't
+added until Python 2.7, so you need to remember not to use it in Ansible code:
+
+.. code-block:: python
+
+ # Does not work in Python 2.6!
+ new_string = "Dear {}, Welcome to {}".format(username, location)
+
+ # Use this instead
+ new_string = "Dear {0}, Welcome to {1}".format(username, location)
+
+Both of the format strings above map positional arguments of the ``format()``
+method into the string. However, the first version doesn't work in
+Python 2.6. Always remember to put numbers into the placeholders so the code
+is compatible with Python 2.6.
+
+.. seealso::
+ Python documentation on `format strings <https://docs.python.org/2/library/string.html#formatstrings>`_
+
+Use percent format with byte strings
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In Python 3.x, byte strings do not have a ``format()`` method. However, it
+does have support for the older, percent-formatting.
+
+.. code-block:: python
+
+ b_command_line = b'ansible-playbook --become-user %s -K %s' % (user, playbook_file)
+
+.. note:: Percent formatting added in Python 3.5
+
+ Percent formatting of byte strings was added back into Python 3 in 3.5.
+ This isn't a problem for us because Python 3.5 is our minimum version.
+ However, if you happen to be testing Ansible code with Python 3.4 or
+ earlier, you will find that the byte string formatting here won't work.
+ Upgrade to Python 3.5 to test.
+
+.. seealso::
+ Python documentation on `percent formatting <https://docs.python.org/2/library/stdtypes.html#string-formatting>`_
+
+.. _testing_modules_python_3:
+
+Testing modules on Python 3
+===================================
+
+Ansible modules are slightly harder to code to support Python 3 than normal code from other projects. A lot of mocking has to go into unit testing an Ansible module, so it's harder to test that your changes have fixed everything or to to make sure that later commits haven't regressed the Python 3 support. Review our :ref:`testing <developing_testing>` pages for more information.
diff --git a/docs/docsite/rst/dev_guide/developing_rebasing.rst b/docs/docsite/rst/dev_guide/developing_rebasing.rst
new file mode 100644
index 00000000..81936be1
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/developing_rebasing.rst
@@ -0,0 +1,83 @@
+.. _rebase_guide:
+
+***********************
+Rebasing a pull request
+***********************
+
+You may find that your pull request (PR) is out-of-date and needs to be rebased. This can happen for several reasons:
+
+- Files modified in your PR are in conflict with changes which have already been merged.
+- Your PR is old enough that significant changes to automated test infrastructure have occurred.
+
+Rebasing the branch used to create your PR will resolve both of these issues.
+
+Configuring your remotes
+========================
+
+Before you can rebase your PR, you need to make sure you have the proper remotes configured. These instructions apply to any repository on GitHub, including collections repositories. On other platforms (bitbucket, gitlab), the same principles and commands apply but the syntax may be different. We use the ansible/ansible repository here as an example. In other repositories, the branch names may be different. Assuming you cloned your fork in the usual fashion, the ``origin`` remote will point to your fork::
+
+ $ git remote -v
+ origin git@github.com:YOUR_GITHUB_USERNAME/ansible.git (fetch)
+ origin git@github.com:YOUR_GITHUB_USERNAME/ansible.git (push)
+
+However, you also need to add a remote which points to the upstream repository::
+
+ $ git remote add upstream https://github.com/ansible/ansible.git
+
+Which should leave you with the following remotes::
+
+ $ git remote -v
+ origin git@github.com:YOUR_GITHUB_USERNAME/ansible.git (fetch)
+ origin git@github.com:YOUR_GITHUB_USERNAME/ansible.git (push)
+ upstream https://github.com/ansible/ansible.git (fetch)
+ upstream https://github.com/ansible/ansible.git (push)
+
+Checking the status of your branch should show your fork is up-to-date with the ``origin`` remote::
+
+ $ git status
+ On branch YOUR_BRANCH
+ Your branch is up-to-date with 'origin/YOUR_BRANCH'.
+ nothing to commit, working tree clean
+
+Rebasing your branch
+====================
+
+Once you have an ``upstream`` remote configured, you can rebase the branch for your PR::
+
+ $ git pull --rebase upstream devel
+
+This will replay the changes in your branch on top of the changes made in the upstream ``devel`` branch.
+If there are merge conflicts, you will be prompted to resolve those before you can continue.
+
+After you rebase, the status of your branch changes::
+
+ $ git status
+ On branch YOUR_BRANCH
+ Your branch and 'origin/YOUR_BRANCH' have diverged,
+ and have 4 and 1 different commits each, respectively.
+ (use "git pull" to merge the remote branch into yours)
+ nothing to commit, working tree clean
+
+Don't worry, this is normal after a rebase. You should ignore the ``git status`` instructions to use ``git pull``. We'll cover what to do next in the following section.
+
+Updating your pull request
+==========================
+
+Now that you've rebased your branch, you need to push your changes to GitHub to update your PR.
+
+Since rebasing re-writes git history, you will need to use a force push::
+
+ $ git push --force-with-lease
+
+Your PR on GitHub has now been updated. This will automatically trigger testing of your changes.
+You should check in on the status of your PR after tests have completed to see if further changes are required.
+
+Getting help rebasing
+=====================
+
+For help with rebasing your PR, or other development related questions, join us on our #ansible-devel IRC chat channel on `freenode.net <https://freenode.net>`_.
+
+.. seealso::
+
+ :ref:`community_development_process`
+ Information on roadmaps, opening PRs, Ansibullbot, and more
diff --git a/docs/docsite/rst/dev_guide/index.rst b/docs/docsite/rst/dev_guide/index.rst
new file mode 100644
index 00000000..fb5b7f4a
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/index.rst
@@ -0,0 +1,92 @@
+.. _developer_guide:
+
+***************
+Developer Guide
+***************
+
+Welcome to the Ansible Developer Guide!
+
+**Who should use this guide?**
+
+If you want to extend Ansible by using a custom module or plugin locally, creating a module or plugin, adding functionality to an existing module, or expanding test coverage, this guide is for you. We've included detailed information for developers on how to test and document modules, as well as the prerequisites for getting your module or plugin accepted into the main Ansible repository.
+
+Find the task that best describes what you want to do:
+
+* I'm looking for a way to address a use case:
+
+ * I want to :ref:`add a custom plugin or module locally <developing_locally>`.
+ * I want to figure out if :ref:`developing a module is the right approach <module_dev_should_you>` for my use case.
+ * I want to :ref:`develop a collection <developing_collections>`.
+ * I want to :ref:`contribute to an Ansible-maintained collection <contributing_maintained_collections>`.
+ * I want to :ref:`contribute to a community-maintained collection <hacking_collections>`.
+ * I want to :ref:`migrate a role to a collection <migrating_roles>`.
+
+* I've read the info above, and I'm sure I want to develop a module:
+
+ * What do I need to know before I start coding?
+ * I want to :ref:`set up my Python development environment <environment_setup>`.
+ * I want to :ref:`get started writing a module <developing_modules_general>`.
+ * I want to write a specific kind of module:
+ * a :ref:`network module <developing_modules_network>`
+ * a :ref:`Windows module <developing_modules_general_windows>`.
+ * an :ref:`Amazon module <AWS_module_development>`.
+ * an :ref:`OpenStack module <OpenStack_module_development>`.
+ * an :ref:`oVirt/RHV module <oVirt_module_development>`.
+ * a :ref:`VMware module <VMware_module_development>`.
+ * I want to :ref:`write a series of related modules <developing_modules_in_groups>` that integrate Ansible with a new product (for example, a database, cloud provider, network platform, and so on).
+
+* I want to refine my code:
+
+ * I want to :ref:`debug my module code <debugging_modules>`.
+ * I want to :ref:`add tests <developing_testing>`.
+ * I want to :ref:`document my module <module_documenting>`.
+ * I want to :ref:`document my set of modules for a network platform <documenting_modules_network>`.
+ * I want to follow :ref:`conventions and tips for clean, usable module code <developing_modules_best_practices>`.
+ * I want to :ref:`make sure my code runs on Python 2 and Python 3 <developing_python_3>`.
+
+* I want to work on other development projects:
+
+ * I want to :ref:`write a plugin <developing_plugins>`.
+ * I want to :ref:`connect Ansible to a new source of inventory <developing_inventory>`.
+ * I want to :ref:`deprecate an outdated module <deprecating_modules>`.
+
+* I want to contribute back to the Ansible project:
+
+ * I want to :ref:`understand how to contribute to Ansible <ansible_community_guide>`.
+ * I want to :ref:`contribute my module or plugin <developing_modules_checklist>`.
+ * I want to :ref:`understand the license agreement <contributor_license_agreement>` for contributions to Ansible.
+
+If you prefer to read the entire guide, here's a list of the pages in order.
+
+.. toctree::
+ :maxdepth: 2
+
+ developing_locally
+ developing_modules
+ developing_modules_general
+ developing_modules_checklist
+ developing_modules_best_practices
+ developing_python_3
+ debugging
+ developing_modules_documenting
+ developing_modules_general_windows
+ developing_modules_general_aci
+ platforms/aws_guidelines
+ platforms/openstack_guidelines
+ platforms/ovirt_dev_guide
+ platforms/vmware_guidelines
+ developing_modules_in_groups
+ testing
+ module_lifecycle
+ developing_plugins
+ developing_inventory
+ developing_core
+ developing_program_flow_modules
+ developing_api
+ developing_rebasing
+ developing_module_utilities
+ developing_collections
+ migrating_roles
+ collections_galaxy_meta
+ migrating_roles
+ overview_architecture
diff --git a/docs/docsite/rst/dev_guide/migrating_roles.rst b/docs/docsite/rst/dev_guide/migrating_roles.rst
new file mode 100644
index 00000000..a32fa242
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/migrating_roles.rst
@@ -0,0 +1,410 @@
+
+.. _migrating_roles:
+
+*************************************************
+Migrating Roles to Roles in Collections on Galaxy
+*************************************************
+
+You can migrate any existing standalone role into a collection and host the collection on Galaxy. With Ansible collections, you can distribute many roles in a single cohesive unit of re-usable automation. Inside a collection, you can share custom plugins across all roles in the collection instead of duplicating them in each role's :file:`library/`` directory.
+
+You must migrate roles to collections if you want to distribute them as certified Ansible content.
+
+.. note::
+
+ If you want to import your collection to Galaxy, you need a `Galaxy namespace <https://galaxy.ansible.com/docs/contributing/namespaces.html>`_.
+
+See :ref:`developing_collections` for details on collections.
+
+
+.. contents::
+ :local:
+ :depth: 1
+
+Comparing standalone roles to collection roles
+===============================================
+
+:ref:`Standalone roles <playbooks_reuse_roles>` have the following directory structure:
+
+.. code-block:: bash
+ :emphasize-lines: 5,7,8
+
+ role/
+ ├── defaults
+ ├── files
+ ├── handlers
+ ├── library
+ ├── meta
+ ├── module_utils
+ ├── [*_plugins]
+ ├── tasks
+ ├── templates
+ ├── tests
+ └── vars
+
+
+The highlighted directories above will change when you migrate to a collection-based role. The collection directory structure includes a :file:`roles/` directory:
+
+.. code-block:: bash
+
+ mynamespace/
+ └── mycollection/
+ ├── docs/
+ ├── galaxy.yml
+ ├── plugins/
+ │ ├── modules/
+ │ │ └── module1.py
+ │ ├── inventory/
+ │ └── .../
+ ├── README.md
+ ├── roles/
+ │ ├── role1/
+ │ ├── role2/
+ │ └── .../
+ ├── playbooks/
+ │ ├── files/
+ │ ├── vars/
+ │ ├── templates/
+ │ └── tasks/
+ └── tests/
+
+You will need to use the Fully Qualified Collection Name (FQCN) to use the roles and plugins when you migrate your role into a collection. The FQCN is the combination of the collection ``namespace``, collection ``name``, and the content item you are referring to.
+
+So for example, in the above collection, the FQCN to access ``role1`` would be:
+
+.. code-block:: Python
+
+ mynamespace.mycollection.role1
+
+
+A collection can contain one or more roles in the :file:`roles/` directory and these are almost identical to standalone roles, except you need to move plugins out of the individual roles, and use the :abbr:`FQCN (Fully Qualified Collection Name)` in some places, as detailed in the next section.
+
+.. note::
+
+ In standalone roles, some of the plugin directories referenced their plugin types in the plural sense; this is not the case in collections.
+
+.. _simple_roles_in_collections:
+
+Migrating a role to a collection
+=================================
+
+To migrate from a standalone role that contains no plugins to a collection role:
+
+1. Create a local :file:`ansible_collections` directory and ``cd`` to this new directory.
+
+2. Create a collection. If you want to import this collection to Ansible Galaxy, you need a `Galaxy namespace <https://galaxy.ansible.com/docs/contributing/namespaces.html>`_.
+
+.. code-block:: bash
+
+ $ ansible-galaxy collection init mynamespace.mycollection
+
+This creates the collection directory structure.
+
+3. Copy the standalone role directory into the :file:`roles/` subdirectory of the collection. Roles in collections cannot have hyphens in the role name. Rename any such roles to use underscores instead.
+
+.. code-block:: bash
+
+ $ mkdir mynamespace/mycollection/roles/my_role/
+ $ cp -r /path/to/standalone/role/mynamespace/my_role/\* mynamespace/mycollection/roles/my_role/
+
+4. Update ``galaxy.yml`` to include any role dependencies.
+
+5. Update the collection README.md file to add links to any role README.md files.
+
+
+.. _complex_roles_in_collections:
+
+Migrating a role with plugins to a collection
+==============================================
+
+To migrate from a standalone role that has plugins to a collection role:
+
+1. Create a local :file:`ansible_collections directory` and ``cd`` to this new directory.
+
+2. Create a collection. If you want to import this collection to Ansible Galaxy, you need a `Galaxy namespace <https://galaxy.ansible.com/docs/contributing/namespaces.html>`_.
+
+.. code-block:: bash
+
+ $ ansible-galaxy collection init mynamespace.mycollection
+
+This creates the collection directory structure.
+
+3. Copy the standalone role directory into the :file:`roles/` subdirectory of the collection. Roles in collections cannot have hyphens in the role name. Rename any such roles to use underscores instead.
+
+.. code-block:: bash
+
+ $ mkdir mynamespace/mycollection/roles/my_role/
+ $ cp -r /path/to/standalone/role/mynamespace/my_role/\* mynamespace/mycollection/roles/my_role/
+
+
+4. Move any modules to the :file:`plugins/modules/` directory.
+
+.. code-block:: bash
+
+ $ mv -r mynamespace/mycollection/roles/my_role/library/\* mynamespace/mycollection/plugins/modules/
+
+5. Move any other plugins to the appropriate :file:`plugins/PLUGINTYPE/` directory. See :ref:`migrating_plugins_collection` for additional steps that may be required.
+
+6. Update ``galaxy.yml`` to include any role dependencies.
+
+7. Update the collection README.md file to add links to any role README.md files.
+
+8. Change any references to the role to use the :abbr:`FQCN (Fully Qualified Collection Name)`.
+
+.. code-block:: yaml
+
+ ---
+ - name: example role by FQCN
+ hosts: some_host_pattern
+ tasks:
+ - name: import FQCN role from a collection
+ import_role:
+ name: mynamespace.mycollection.my_role
+
+
+You can alternately use the ``collections`` keyword to simplify this:
+
+.. code-block:: yaml
+
+ ---
+ - name: example role by FQCN
+ hosts: some_host_pattern
+ collections:
+ - mynamespace.mycollection
+ tasks:
+ - name: import role from a collection
+ import_role:
+ name: my_role
+
+
+.. _migrating_plugins_collection:
+
+Migrating other role plugins to a collection
+---------------------------------------------
+
+To migrate other role plugins to a collection:
+
+
+1. Move each nonmodule plugins to the appropriate :file:`plugins/PLUGINTYPE/` directory. The :file:`mynamespace/mycollection/plugins/README.md` file explains the types of plugins that the collection can contain within optionally created subdirectories.
+
+.. code-block:: bash
+
+ $ mv -r mynamespace/mycollection/roles/my_role/filter_plugins/\* mynamespace/mycollection/plugins/filter/
+
+2. Update documentation to use the FQCN. Plugins that use ``doc_fragments`` need to use FQCN (for example, ``mydocfrag`` becomes ``mynamespace.mycollection.mydocfrag``).
+
+3. Update relative imports work in collections to start with a period. For example, :file:`./filename` and :file:`../asdfu/filestuff` works but :file:`filename` in same directory must be updated to :file:`./filename`.
+
+
+If you have a custom ``module_utils`` or import from ``__init__.py``, you must also:
+
+#. Change the Python namespace for custom ``module_utils`` to use the :abbr:`FQCN (Fully Qualified Collection Name)` along with the ``ansible_collections`` convention. See :ref:`update_module_utils_role`.
+
+#. Change how you import from ``__init__.py``. See :ref:`update_init_role`.
+
+
+.. _update_module_utils_role:
+
+Updating ``module_utils``
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If any of your custom modules use a custom module utility, once you migrate to a collection you cannot address the module utility in the top level ``ansible.module_utils`` Python namespace. Ansible does not merge content from collections into the the Ansible internal Python namespace. Update any Python import statements that refer to custom module utilities when you migrate your custom content to collections. See :ref:`module_utils in collections <collection_module_utils>` for more details.
+
+When coding with ``module_utils`` in a collection, the Python import statement needs to take into account the :abbr:`FQCN (Fully Qualified Collection Name)` along with the ``ansible_collections`` convention. The resulting Python import looks similar to the following example:
+
+.. code-block:: text
+
+ from ansible_collections.{namespace}.{collectionname}.plugins.module_utils.{util} import {something}
+
+.. note::
+
+ You need to follow the same rules in changing paths and using namespaced names for subclassed plugins.
+
+The following example code snippets show a Python and a PowerShell module using both default Ansible ``module_utils`` and those provided by a collection. In this example the namespace is ``ansible_example`` and the collection is ``community``.
+
+In the Python example the ``module_utils`` is ``helper`` and the :abbr:`FQCN (Fully Qualified Collection Name)` is ``ansible_example.community.plugins.module_utils.helper``:
+
+.. code-block:: text
+
+ from ansible.module_utils.basic import AnsibleModule
+ from ansible.module_utils._text import to_text
+ from ansible.module_utils.six.moves.urllib.parse import urlencode
+ from ansible.module_utils.six.moves.urllib.error import HTTPError
+ from ansible_collections.ansible_example.community.plugins.module_utils.helper import HelperRequest
+
+ argspec = dict(
+ name=dict(required=True, type='str'),
+ state=dict(choices=['present', 'absent'], required=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argspec,
+ supports_check_mode=True
+ )
+
+ _request = HelperRequest(
+ module,
+ headers={"Content-Type": "application/json"},
+ data=data
+ )
+
+In the PowerShell example the ``module_utils`` is ``hyperv`` and the :abbr:`FQCN (Fully Qualified Collection Name)` is ``ansible_example.community.plugins.module_utils.hyperv``:
+
+.. code-block:: powershell
+
+ #!powershell
+ #AnsibleRequires -CSharpUtil Ansible.Basic
+ #AnsibleRequires -PowerShell ansible_collections.ansible_example.community.plugins.module_utils.hyperv
+
+ $spec = @{
+ name = @{ required = $true; type = "str" }
+ state = @{ required = $true; choices = @("present", "absent") }
+ }
+ $module = [Ansible.Basic.AnsibleModule]::Create($args, $spec)
+
+ Invoke-HyperVFunction -Name $module.Params.name
+
+ $module.ExitJson()
+
+
+.. _update_init_role:
+
+Importing from __init__.py
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Because of the way that the CPython interpreter does imports, combined with the way the Ansible plugin loader works, if your custom embedded module or plugin requires importing something from an :file:`__init__.py` file, that also becomes part of your collection. You can either originate the content inside a standalone role or use the file name in the Python import statement. The following example is an :file:`__init__.py` file that is part of a callback plugin found inside a collection named ``ansible_example.community``.
+
+.. code-block:: python
+
+ from ansible_collections.ansible_example.community.plugins.callback.__init__ import CustomBaseClass
+
+
+Example: Migrating a standalone role with plugins to a collection
+-----------------------------------------------------------------
+
+In this example we have a standalone role called ``my-standalone-role.webapp`` to emulate a standalone role that contains dashes in the name (which is not valid in collections). This standalone role contains a custom module in the ``library/`` directory called ``manage_webserver``.
+
+.. code-block:: bash
+
+ my-standalone-role.webapp
+ ├── defaults
+ ├── files
+ ├── handlers
+ ├── library
+ ├── meta
+ ├── tasks
+ ├── templates
+ ├── tests
+ └── vars
+
+1. Create a new collection, for example, ``acme.webserver``:
+
+.. code-block:: bash
+
+ $ ansible-galaxy collection init acme.webserver
+ - Collection acme.webserver was created successfully
+ $ tree acme -d 1
+ acme
+ └── webserver
+ ├── docs
+ ├── plugins
+ └── roles
+
+2. Create the ``webapp`` role inside the collection and copy all contents from the standalone role:
+
+.. code-block:: bash
+
+ $ mkdir acme/webserver/roles/webapp
+ $ cp my-standalone-role.webapp/* acme/webserver/roles/webapp/
+
+3. Move the ``manage_webserver`` module to its new home in ``acme/webserver/plugins/modules/``:
+
+.. code-block:: bash
+
+ $ cp my-standalone-role.webapp/library/manage_webserver.py acme/webserver/plugins/modules/manage.py
+
+.. note::
+
+ This example changed the original source file ``manage_webserver.py`` to the destination file ``manage.py``. This is optional but the :abbr:`FQCN (Fully Qualified Collection Name)` provides the ``webserver`` context as ``acme.webserver.manage``.
+
+4. Change ``manage_webserver`` to ``acme.webserver.manage`` in :file:`tasks/` files in the role ( for example, ``my-standalone-role.webapp/tasks/main.yml``) and any use of the original module name.
+
+.. note::
+
+ This name change is only required if you changed the original module name, but illustrates content referenced by :abbr:`FQCN (Fully Qualified Collection Name)` can offer context and in turn can make module and plugin names shorter. If you anticipate using these modules independent of the role, keep the original naming conventions. Users can add the :ref:`collections keyword <collections_using_playbook>` in their playbooks. Typically roles are an abstraction layer and users won't use components of the role independently.
+
+
+Example: Supporting standalone roles and migrated collection roles in a downstream RPM
+---------------------------------------------------------------------------------------
+
+A standalone role can co-exist with its collection role counterpart (for example, as part of a support lifecycle of a product). This should only be done for a transition period, but these two can exist in downstream in packages such as RPMs. For example, the RHEL system roles could coexist with an `example of a RHEL system roles collection <https://github.com/maxamillion/collection-rhel-system-roles>`_ and provide existing backwards compatibility with the downstream RPM.
+
+This section walks through an example creating this coexistence in a downstream RPM and requires Ansible 2.9.0 or later.
+
+To deliver a role as both a standalone role and a collection role:
+
+#. Place the collection in :file:`/usr/share/ansible/collections/ansible_collections/`.
+#. Copy the contents of the role inside the collection into a directory named after the standalone role and place the standalone role in :file:`/usr/share/ansible/roles/`.
+
+All previously bundled modules and plugins used in the standalone role are now referenced by :abbr:`FQCN (Fully Qualified Collection Name)` so even though they are no longer embedded, they can be found from the collection contents.This is an example of how the content inside the collection is a unique entity and does not have to be bound to a role or otherwise. You could alternately create two separate collections: one for the modules and plugins and another for the standalone role to migrate to. The role must use the modules and plugins as :abbr:`FQCN (Fully Qualified Collection Name)`.
+
+The following is an example RPM spec file that accomplishes this using this example content:
+
+.. code-block:: text
+
+ Name: acme-ansible-content
+ Summary: Ansible Collection for deploying and configuring ACME webapp
+ Version: 1.0.0
+ Release: 1%{?dist}
+ License: GPLv3+
+ Source0: amce-webserver-1.0.0.tar.gz
+
+ Url: https://github.com/acme/webserver-ansible-collection
+ BuildArch: noarch
+
+ %global roleprefix my-standalone-role.
+ %global collection_namespace acme
+ %global collection_name webserver
+
+ %global collection_dir %{_datadir}/ansible/collections/ansible_collections/%{collection_namespace}/%{collection_name}
+
+ %description
+ Ansible Collection and standalone role (for backward compatibility and migration) to deploy, configure, and manage the ACME webapp software.
+
+ %prep
+ %setup -qc
+
+ %build
+
+ %install
+
+ mkdir -p %{buildroot}/%{collection_dir}
+ cp -r ./* %{buildroot}/%{collection_dir}/
+
+ mkdir -p %{buildroot}/%{_datadir}/ansible/roles
+ for role in %{buildroot}/%{collection_dir}/roles/*
+ do
+ cp -pR ${role} %{buildroot}/%{_datadir}/ansible/roles/%{roleprefix}$(basename ${role})
+
+ mkdir -p %{buildroot}/%{_pkgdocdir}/$(basename ${role})
+ for docfile in README.md COPYING LICENSE
+ do
+ if [ -f ${role}/${docfile} ]
+ then
+ cp -p ${role}/${docfile} %{buildroot}/%{_pkgdocdir}/$(basename ${role})/${docfile}
+ fi
+ done
+ done
+
+
+ %files
+ %dir %{_datadir}/ansible
+ %dir %{_datadir}/ansible/roles
+ %dir %{_datadir}/ansible/collections
+ %dir %{_datadir}/ansible/collections/ansible_collections
+ %{_datadir}/ansible/roles/
+ %doc %{_pkgdocdir}/*/README.md
+ %doc %{_datadir}/ansible/roles/%{roleprefix}*/README.md
+ %{collection_dir}
+ %doc %{collection_dir}/roles/*/README.md
+ %license %{_pkgdocdir}/*/COPYING
+ %license %{_pkgdocdir}/*/LICENSE
diff --git a/docs/docsite/rst/dev_guide/module_lifecycle.rst b/docs/docsite/rst/dev_guide/module_lifecycle.rst
new file mode 100644
index 00000000..1201fffa
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/module_lifecycle.rst
@@ -0,0 +1,50 @@
+.. _module_lifecycle:
+
+**********************************
+The lifecycle of an Ansible module
+**********************************
+
+Modules in the main Ansible repo have a defined life cycle, from first introduction to final removal. The module life cycle is tied to the `Ansible release cycle <release_cycle>`.
+A module may move through these four states:
+
+1. When a module is first accepted into Ansible, we consider it in tech preview and will mark it as such in the documentation.
+
+2. If a module matures, we will remove the 'preview' mark in the documentation. We support (though we cannot guarantee) backwards compatibility for these modules, which means their parameters should be maintained with stable meanings.
+
+3. If a module's target API changes radically, or if someone creates a better implementation of its functionality, we may mark it deprecated. Modules that are deprecated are still available but they are reaching the end of their life cycle. We retain deprecated modules for 4 release cycles with deprecation warnings to help users update playbooks and roles that use them.
+
+4. When a module has been deprecated for four release cycles, we remove the code and mark the stub file removed. Modules that are removed are no longer shipped with Ansible. The stub file helps users find alternative modules.
+
+.. _deprecating_modules:
+
+Deprecating modules
+===================
+
+To deprecate a module, you must:
+
+1. Rename the file so it starts with an ``_``, for example, rename ``old_cloud.py`` to ``_old_cloud.py``. This keeps the module available and marks it as deprecated on the module index pages.
+2. Mention the deprecation in the relevant ``CHANGELOG``.
+3. Reference the deprecation in the relevant ``porting_guide_x.y.rst``.
+4. Add ``deprecated:`` to the documentation with the following sub-values:
+
+ :removed_in: A ``string``, such as ``"2.10"``; the version of Ansible where the module will be replaced with a docs-only module stub. Usually current release +4. Mutually exclusive with :removed_by_date:.
+ :remove_by_date: (Added in Ansible 2.10). An ISO 8601 formatted date when the module will be removed. Usually 2 years from the date the module is deprecated. Mutually exclusive with :removed_in:.
+ :why: Optional string that used to detail why this has been removed.
+ :alternative: Inform users they should do instead, for example, ``Use M(whatmoduletouseinstead) instead.``.
+
+* note: with the advent of collections and ``routing.yml`` we might soon require another entry in this file to mark the deprecation.
+
+* For an example of documenting deprecation, see this `PR that deprecates multiple modules <https://github.com/ansible/ansible/pull/43781/files>`_.
+ Some of the elements in the PR might now be out of date.
+
+Changing a module name
+======================
+
+You can also rename a module and keep an alias to the old name by using a symlink that starts with _.
+This example allows the ``stat`` module to be called with ``fileinfo``, making the following examples equivalent::
+
+ EXAMPLES = '''
+ ln -s stat.py _fileinfo.py
+ ansible -m stat -a "path=/tmp" localhost
+ ansible -m fileinfo -a "path=/tmp" localhost
+ '''
diff --git a/docs/docsite/rst/dev_guide/overview_architecture.rst b/docs/docsite/rst/dev_guide/overview_architecture.rst
new file mode 100644
index 00000000..fdd90625
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/overview_architecture.rst
@@ -0,0 +1,149 @@
+********************
+Ansible architecture
+********************
+
+Ansible is a radically simple IT automation engine that automates cloud provisioning, configuration management, application deployment, intra-service orchestration, and many other IT needs.
+
+Being designed for multi-tier deployments since day one, Ansible models your IT infrastructure by describing how all of your systems inter-relate, rather than just managing one system at a time.
+
+It uses no agents and no additional custom security infrastructure, so it's easy to deploy - and most importantly, it uses a very simple language (YAML, in the form of Ansible Playbooks) that allow you to describe your automation jobs in a way that approaches plain English.
+
+In this section, we'll give you a really quick overview of how Ansible works so you can see how the pieces fit together.
+
+.. contents::
+ :local:
+
+Modules
+=======
+
+Ansible works by connecting to your nodes and pushing out scripts called "Ansible modules" to them. Most modules accept parameters that describe the desired state of the system.
+Ansible then executes these modules (over SSH by default), and removes them when finished. Your library of modules can reside on any machine, and there are no servers, daemons, or databases required.
+
+You can :ref:`write your own modules <developing_modules_general>`, though you should first consider :ref:`whether you should <developing_modules>`. Typically you'll work with your favorite terminal program, a text editor, and probably a version control system to keep track of changes to your content. You may write specialized modules in any language that can return JSON (Ruby, Python, bash, and so on).
+
+Module utilities
+================
+
+When multiple modules use the same code, Ansible stores those functions as module utilities to minimize duplication and maintenance. For example, the code that parses URLs is ``lib/ansible/module_utils/url.py``. You can :ref:`write your own module utilities <developing_module_utilities>` as well. Module utilities may only be written in Python or in PowerShell.
+
+Plugins
+=======
+
+:ref:`Plugins <plugins_lookup>` augment Ansible's core functionality. While modules execute on the target system in separate processes (usually that means on a remote system), plugins execute on the control node within the ``/usr/bin/ansible`` process. Plugins offer options and extensions for the core features of Ansible - transforming data, logging output, connecting to inventory, and more. Ansible ships with a number of handy plugins, and you can easily :ref:`write your own <developing_plugins>`. For example, you can write an :ref:`inventory plugin <developing_inventory>` to connect to any datasource that returns JSON. Plugins must be written in Python.
+
+Inventory
+=========
+
+By default, Ansible represents the machines it manages in a file (INI, YAML, and so on) that puts all of your managed machines in groups of your own choosing.
+
+To add new machines, there is no additional SSL signing server involved, so there's never any hassle deciding why a particular machine didn't get linked up due to obscure NTP or DNS issues.
+
+If there's another source of truth in your infrastructure, Ansible can also connect to that. Ansible can draw inventory, group, and variable information from sources like EC2, Rackspace, OpenStack, and more.
+
+Here's what a plain text inventory file looks like::
+
+ ---
+ [webservers]
+ www1.example.com
+ www2.example.com
+
+ [dbservers]
+ db0.example.com
+ db1.example.com
+
+Once inventory hosts are listed, variables can be assigned to them in simple text files (in a subdirectory called 'group_vars/' or 'host_vars/' or directly in the inventory file.
+
+Or, as already mentioned, use a dynamic inventory to pull your inventory from data sources like EC2, Rackspace, or OpenStack.
+
+Playbooks
+=========
+
+Playbooks can finely orchestrate multiple slices of your infrastructure topology, with very detailed control over how many machines to tackle at a time. This is where Ansible starts to get most interesting.
+
+Ansible's approach to orchestration is one of finely-tuned simplicity, as we believe your automation code should make perfect sense to you years down the road and there should be very little to remember about special syntax or features.
+
+Here's what a simple playbook looks like::
+
+ ---
+ - hosts: webservers
+ serial: 5 # update 5 machines at a time
+ roles:
+ - common
+ - webapp
+
+ - hosts: content_servers
+ roles:
+ - common
+ - content
+
+.. _ansible_search_path:
+
+The Ansible search path
+=======================
+
+Modules, module utilities, plugins, playbooks, and roles can live in multiple locations. If you
+write your own code to extend Ansible's core features, you may have multiple files with similar or the same names in different locations on your Ansible control node. The search path determines which of these files Ansible will discover and use on any given playbook run.
+
+Ansible's search path grows incrementally over a run. As
+Ansible finds each playbook and role included in a given run, it appends
+any directories related to that playbook or role to the search path. Those
+directories remain in scope for the duration of the run, even after the playbook or role
+has finished executing. Ansible loads modules, module utilities, and plugins in this order:
+
+1. Directories adjacent to a playbook specified on the command line. If you run Ansible with ``ansible-playbook /path/to/play.yml``, Ansible appends these directories if they exist:
+
+ .. code-block:: bash
+
+ /path/to/modules
+ /path/to/module_utils
+ /path/to/plugins
+
+2. Directories adjacent to a playbook that is statically imported by a
+ playbook specified on the command line. If ``play.yml`` includes
+ ``- import_playbook: /path/to/subdir/play1.yml``, Ansible appends these directories if they exist:
+
+ .. code-block:: bash
+
+ /path/to/subdir/modules
+ /path/to/subdir/module_utils
+ /path/to/subdir/plugins
+
+3. Subdirectories of a role directory referenced by a playbook. If
+ ``play.yml`` runs ``myrole``, Ansible appends these directories if they exist:
+
+ .. code-block:: bash
+
+ /path/to/roles/myrole/modules
+ /path/to/roles/myrole/module_utils
+ /path/to/roles/myrole/plugins
+
+4. Directories specified as default paths in ``ansible.cfg`` or by the related
+ environment variables, including the paths for the various plugin types. See :ref:`ansible_configuration_settings` for more information.
+ Sample ``ansible.cfg`` fields:
+
+ .. code-block:: bash
+
+ DEFAULT_MODULE_PATH
+ DEFAULT_MODULE_UTILS_PATH
+ DEFAULT_CACHE_PLUGIN_PATH
+ DEFAULT_FILTER_PLUGIN_PATH
+
+ Sample environment variables:
+
+ .. code-block:: bash
+
+ ANSIBLE_LIBRARY
+ ANSIBLE_MODULE_UTILS
+ ANSIBLE_CACHE_PLUGINS
+ ANSIBLE_FILTER_PLUGINS
+
+5. The standard directories that ship as part of the Ansible distribution.
+
+.. caution::
+
+ Modules, module utilities, and plugins in user-specified directories will
+ override the standard versions. This includes some files with generic names.
+ For example, if you have a file named ``basic.py`` in a user-specified
+ directory, it will override the standard ``ansible.module_utils.basic``.
+
+ If you have more than one module, module utility, or plugin with the same name in different user-specified directories, the order of commands at the command line and the order of includes and roles in each play will affect which one is found and used on that particular play.
diff --git a/docs/docsite/rst/dev_guide/platforms/aws_guidelines.rst b/docs/docsite/rst/dev_guide/platforms/aws_guidelines.rst
new file mode 100644
index 00000000..acce3de4
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/platforms/aws_guidelines.rst
@@ -0,0 +1,754 @@
+.. _AWS_module_development:
+
+****************************************************
+Guidelines for Ansible Amazon AWS module development
+****************************************************
+
+The Ansible AWS collection (on `Galaxy <https://galaxy.ansible.com/community/aws>`_, source code `repository <https://github.com/ansible-collections/community.aws>`_) is maintained by the Ansible AWS Working Group. For further information see the `AWS working group community page <https://github.com/ansible/community/wiki/aws>`_. If you are planning to contribute AWS modules to Ansible then getting in touch with the working group is a good way to start, especially because a similar module may already be under development.
+
+.. contents::
+ :local:
+
+Maintaining existing modules
+============================
+
+Fixing bugs
+-----------
+
+Bug fixes to code that relies on boto will still be accepted. When possible,
+the code should be ported to use boto3.
+
+Adding new features
+-------------------
+
+Try to keep backward compatibility with relatively recent versions of boto3. That means that if you
+want to implement some functionality that uses a new feature of boto3, it should only fail if that
+feature actually needs to be run, with a message stating the missing feature and minimum required
+version of boto3.
+
+Use feature testing (for example, ``hasattr('boto3.module', 'shiny_new_method')``) to check whether boto3
+supports a feature rather than version checking. For example, from the ``ec2`` module:
+
+.. code-block:: python
+
+ if boto_supports_profile_name_arg(ec2):
+ params['instance_profile_name'] = instance_profile_name
+ else:
+ if instance_profile_name is not None:
+ module.fail_json(msg="instance_profile_name parameter requires boto version 2.5.0 or higher")
+
+Migrating to boto3
+------------------
+
+Prior to Ansible 2.0, modules were written in either boto3 or boto. We are
+still porting some modules to boto3. Modules that still require boto should be ported to use boto3 rather than using both libraries (boto and boto3). We would like to remove the boto dependency from all modules.
+
+Porting code to AnsibleAWSModule
+---------------------------------
+
+Some old AWS modules use the generic ``AnsibleModule`` as a base rather than the more efficient ``AnsibleAWSModule``. To port an old module to ``AnsibleAWSModule``, change:
+
+.. code-block:: python
+
+ from ansible.module_utils.basic import AnsibleModule
+ ...
+ module = AnsibleModule(...)
+
+to:
+
+.. code-block:: python
+
+ from ansible.module_utils.aws.core import AnsibleAWSModule
+ ...
+ module = AnsibleAWSModule(...)
+
+Few other changes are required. AnsibleAWSModule
+does not inherit methods from AnsibleModule by default, but most useful methods
+are included. If you do find an issue, please raise a bug report.
+
+When porting, keep in mind that AnsibleAWSModule also will add the default ec2
+argument spec by default. In pre-port modules, you should see common arguments
+specified with:
+
+.. code-block:: python
+
+ def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ name=dict(default='default'),
+ # ... and so on ...
+ ))
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True,)
+
+These can be replaced with:
+
+.. code-block:: python
+
+ def main():
+ argument_spec = dict(
+ state=dict(default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ name=dict(default='default'),
+ # ... and so on ...
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True,)
+
+Creating new AWS modules
+========================
+
+Use boto3 and AnsibleAWSModule
+-------------------------------
+
+All new AWS modules must use boto3 and ``AnsibleAWSModule``.
+
+``AnsibleAWSModule`` greatly simplifies exception handling and library
+management, reducing the amount of boilerplate code. If you cannot
+use ``AnsibleAWSModule`` as a base, you must document the reason and request an exception to this rule.
+
+Naming your module
+------------------
+
+Base the name of the module on the part of AWS that you actually use. (A good rule of thumb is to
+take whatever module you use with boto as a starting point). Don't further abbreviate names - if
+something is a well known abbreviation of a major component of AWS (for example, VPC or ELB), that's fine, but
+don't create new ones independently.
+
+Unless the name of your service is quite unique, please consider using ``aws_`` as a prefix. For example ``aws_lambda``.
+
+Importing botocore and boto3
+----------------------------
+
+The ``ansible.module_utils.ec2`` module and ``ansible.module_utils.core.aws`` modules both
+automatically import boto3 and botocore. If boto3 is missing from the system then the variable
+``HAS_BOTO3`` will be set to false. Normally, this means that modules don't need to import
+boto3 directly. There is no need to check ``HAS_BOTO3`` when using AnsibleAWSModule
+as the module does that check:
+
+.. code-block:: python
+
+ from ansible.module_utils.aws.core import AnsibleAWSModule
+ try:
+ import botocore
+ except ImportError:
+ pass # handled by AnsibleAWSModule
+
+or:
+
+.. code-block:: python
+
+ from ansible.module_utils.basic import AnsibleModule
+ from ansible.module_utils.ec2 import HAS_BOTO3
+ try:
+ import botocore
+ except ImportError:
+ pass # handled by imported HAS_BOTO3
+
+ def main():
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 and botocore are required for this module')
+
+Supporting Module Defaults
+--------------------------
+
+The existing AWS modules support using :ref:`module_defaults <module_defaults>` for common
+authentication parameters. To do the same for your new module, add an entry for it in
+``lib/ansible/config/module_defaults.yml``. These entries take the form of:
+
+.. code-block:: yaml
+
+ aws_module_name:
+ - aws
+
+Connecting to AWS
+=================
+
+AnsibleAWSModule provides the ``resource`` and ``client`` helper methods for obtaining boto3 connections.
+These handle some of the more esoteric connection options, such as security tokens and boto profiles.
+
+If using the basic AnsibleModule then you should use ``get_aws_connection_info`` and then ``boto3_conn``
+to connect to AWS as these handle the same range of connection options.
+
+These helpers also for missing profiles or a region not set when it needs to be, so you don't have to.
+
+An example of connecting to ec2 is shown below. Note that unlike boto there is no ``NoAuthHandlerFound``
+exception handling like in boto. Instead, an ``AuthFailure`` exception will be thrown when you use the
+connection. To ensure that authorization, parameter validation and permissions errors are all caught,
+you should catch ``ClientError`` and ``BotoCoreError`` exceptions with every boto3 connection call.
+See exception handling:
+
+.. code-block:: python
+
+ module.client('ec2')
+
+or for the higher level ec2 resource:
+
+.. code-block:: python
+
+ module.resource('ec2')
+
+
+An example of the older style connection used for modules based on AnsibleModule rather than AnsibleAWSModule:
+
+.. code-block:: python
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
+ connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
+
+.. code-block:: python
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
+ connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
+
+
+Common Documentation Fragments for Connection Parameters
+--------------------------------------------------------
+
+There are two :ref:`common documentation fragments <module_docs_fragments>`
+that should be included into almost all AWS modules:
+
+* ``aws`` - contains the common boto connection parameters
+* ``ec2`` - contains the common region parameter required for many AWS modules
+
+These fragments should be used rather than re-documenting these properties to ensure consistency
+and that the more esoteric connection options are documented. For example:
+
+.. code-block:: python
+
+ DOCUMENTATION = '''
+ module: my_module
+ # some lines omitted here
+ requirements: [ 'botocore', 'boto3' ]
+ extends_documentation_fragment:
+ - aws
+ - ec2
+ '''
+
+Handling exceptions
+===================
+
+You should wrap any boto3 or botocore call in a try block. If an exception is thrown, then there
+are a number of possibilities for handling it.
+
+* Catch the general ``ClientError`` or look for a specific error code with
+ ``is_boto3_error_code``.
+* Use ``aws_module.fail_json_aws()`` to report the module failure in a standard way
+* Retry using AWSRetry
+* Use ``fail_json()`` to report the failure without using ``ansible.module_utils.aws.core``
+* Do something custom in the case where you know how to handle the exception
+
+For more information on botocore exception handling see the `botocore error documentation <https://botocore.readthedocs.io/en/latest/client_upgrades.html#error-handling>`_.
+
+Using is_boto3_error_code
+-------------------------
+
+To use ``ansible.module_utils.aws.core.is_boto3_error_code`` to catch a single
+AWS error code, call it in place of ``ClientError`` in your except clauses. In
+this case, *only* the ``InvalidGroup.NotFound`` error code will be caught here,
+and any other error will be raised for handling elsewhere in the program.
+
+.. code-block:: python
+
+ try:
+ info = connection.describe_security_groups(**kwargs)
+ except is_boto3_error_code('InvalidGroup.NotFound'):
+ pass
+ do_something(info) # do something with the info that was successfully returned
+
+Using fail_json_aws()
+---------------------
+
+In the AnsibleAWSModule there is a special method, ``module.fail_json_aws()`` for nice reporting of
+exceptions. Call this on your exception and it will report the error together with a traceback for
+use in Ansible verbose mode.
+
+You should use the AnsibleAWSModule for all new modules, unless not possible. If adding significant
+amounts of exception handling to existing modules, we recommend migrating the module to use AnsibleAWSModule
+(there are very few changes required to do this)
+
+.. code-block:: python
+
+ from ansible.module_utils.aws.core import AnsibleAWSModule
+
+ # Set up module parameters
+ # module params code here
+
+ # Connect to AWS
+ # connection code here
+
+ # Make a call to AWS
+ name = module.params.get['name']
+ try:
+ result = connection.describe_frooble(FroobleName=name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Couldn't obtain frooble %s" % name)
+
+Note that it should normally be acceptable to catch all normal exceptions here, however if you
+expect anything other than botocore exceptions you should test everything works as expected.
+
+If you need to perform an action based on the error boto3 returned, use the error code.
+
+.. code-block:: python
+
+ # Make a call to AWS
+ name = module.params.get['name']
+ try:
+ result = connection.describe_frooble(FroobleName=name)
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'FroobleNotFound':
+ workaround_failure() # This is an error that we can work around
+ else:
+ module.fail_json_aws(e, msg="Couldn't obtain frooble %s" % name)
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="Couldn't obtain frooble %s" % name)
+
+using fail_json() and avoiding ansible.module_utils.aws.core
+------------------------------------------------------------
+
+Boto3 provides lots of useful information when an exception is thrown so pass this to the user
+along with the message.
+
+.. code-block:: python
+
+ from ansible.module_utils.ec2 import HAS_BOTO3
+ try:
+ import botocore
+ except ImportError:
+ pass # caught by imported HAS_BOTO3
+
+ # Connect to AWS
+ # connection code here
+
+ # Make a call to AWS
+ name = module.params.get['name']
+ try:
+ result = connection.describe_frooble(FroobleName=name)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Couldn't obtain frooble %s: %s" % (name, str(e)),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+Note: we use `str(e)` rather than `e.message` as the latter doesn't
+work with python3
+
+If you need to perform an action based on the error boto3 returned, use the error code.
+
+.. code-block:: python
+
+ # Make a call to AWS
+ name = module.params.get['name']
+ try:
+ result = connection.describe_frooble(FroobleName=name)
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'FroobleNotFound':
+ workaround_failure() # This is an error that we can work around
+ else:
+ module.fail_json(msg="Couldn't obtain frooble %s: %s" % (name, str(e)),
+ exception=traceback.format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="Couldn't obtain frooble %s" % name)
+
+
+API throttling (rate limiting) and pagination
+=============================================
+
+For methods that return a lot of results, boto3 often provides
+`paginators <https://boto3.readthedocs.io/en/latest/guide/paginators.html>`_. If the method
+you're calling has ``NextToken`` or ``Marker`` parameters, you should probably
+check whether a paginator exists (the top of each boto3 service reference page has a link
+to Paginators, if the service has any). To use paginators, obtain a paginator object,
+call ``paginator.paginate`` with the appropriate arguments and then call ``build_full_result``.
+
+Any time that you are calling the AWS API a lot, you may experience API throttling,
+and there is an ``AWSRetry`` decorator that can be used to ensure backoff. Because
+exception handling could interfere with the retry working properly (as AWSRetry needs to
+catch throttling exceptions to work correctly), you'd need to provide a backoff function
+and then put exception handling around the backoff function.
+
+You can use ``exponential_backoff`` or ``jittered_backoff`` strategies - see
+the cloud ``module_utils`` ()/lib/ansible/module_utils/cloud.py)
+and `AWS Architecture blog <https://www.awsarchitectureblog.com/2015/03/backoff.html>`_ for more details.
+
+The combination of these two approaches is then:
+
+.. code-block:: python
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def describe_some_resource_with_backoff(client, **kwargs):
+ paginator = client.get_paginator('describe_some_resource')
+ return paginator.paginate(**kwargs).build_full_result()['SomeResource']
+
+ def describe_some_resource(client, module):
+ filters = ansible_dict_to_boto3_filter_list(module.params['filters'])
+ try:
+ return describe_some_resource_with_backoff(client, Filters=filters)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e, msg="Could not describe some resource")
+
+
+If the underlying ``describe_some_resources`` API call throws a ``ResourceNotFound``
+exception, ``AWSRetry`` takes this as a cue to retry until it's not thrown (this
+is so that when creating a resource, we can just retry until it exists).
+
+To handle authorization failures or parameter validation errors in
+``describe_some_resource_with_backoff``, where we just want to return ``None`` if
+the resource doesn't exist and not retry, we need:
+
+.. code-block:: python
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def describe_some_resource_with_backoff(client, **kwargs):
+ try:
+ return client.describe_some_resource(ResourceName=kwargs['name'])['Resources']
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'ResourceNotFound':
+ return None
+ else:
+ raise
+ except BotoCoreError as e:
+ raise
+
+ def describe_some_resource(client, module):
+ name = module.params.get['name']
+ try:
+ return describe_some_resource_with_backoff(client, name=name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Could not describe resource %s" % name)
+
+
+To make use of AWSRetry easier, it can now be wrapped around a client returned
+by ``AnsibleAWSModule``. any call from a client. To add retries to a client,
+create a client:
+
+.. code-block:: python
+
+ module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+
+Any calls from that client can be made to use the decorator passed at call-time
+using the `aws_retry` argument. By default, no retries are used.
+
+.. code-block:: python
+
+ ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+ ec2.describe_instances(InstanceIds=['i-123456789'], aws_retry=True)
+
+ # equivalent with normal AWSRetry
+ @AWSRetry.jittered_backoff(retries=10)
+ def describe_instances(client, **kwargs):
+ return ec2.describe_instances(**kwargs)
+
+ describe_instances(module.client('ec2'), InstanceIds=['i-123456789'])
+
+The call will be retried the specified number of times, so the calling functions
+don't need to be wrapped in the backoff decorator.
+
+You can also use customization for ``retries``, ``delay`` and ``max_delay`` parameters used by
+``AWSRetry.jittered_backoff`` API using module params. You can take a look at
+the `cloudformation <cloudformation_module>` module for example.
+
+To make all Amazon modules uniform, prefix the module param with ``backoff_``, so ``retries`` becomes ``backoff_retries``
+ and likewise with ``backoff_delay`` and ``backoff_max_delay``.
+
+Returning Values
+================
+
+When you make a call using boto3, you will probably get back some useful information that you
+should return in the module. As well as information related to the call itself, you will also have
+some response metadata. It is OK to return this to the user as well as they may find it useful.
+
+Boto3 returns all values CamelCased. Ansible follows Python standards for variable names and uses
+snake_case. There is a helper function in module_utils/ec2.py called `camel_dict_to_snake_dict`
+that allows you to easily convert the boto3 response to snake_case.
+
+You should use this helper function and avoid changing the names of values returned by Boto3.
+E.g. if boto3 returns a value called 'SecretAccessKey' do not change it to 'AccessKey'.
+
+.. code-block:: python
+
+ # Make a call to AWS
+ result = connection.aws_call()
+
+ # Return the result to the user
+ module.exit_json(changed=True, **camel_dict_to_snake_dict(result))
+
+Dealing with IAM JSON policy
+============================
+
+If your module accepts IAM JSON policies then set the type to 'json' in the module spec. For
+example:
+
+.. code-block:: python
+
+ argument_spec.update(
+ dict(
+ policy=dict(required=False, default=None, type='json'),
+ )
+ )
+
+Note that AWS is unlikely to return the policy in the same order that is was submitted. Therefore,
+use the `compare_policies` helper function which handles this variance.
+
+`compare_policies` takes two dictionaries, recursively sorts and makes them hashable for comparison
+and returns True if they are different.
+
+.. code-block:: python
+
+ from ansible.module_utils.ec2 import compare_policies
+
+ import json
+
+ # some lines skipped here
+
+ # Get the policy from AWS
+ current_policy = json.loads(aws_object.get_policy())
+ user_policy = json.loads(module.params.get('policy'))
+
+ # Compare the user submitted policy to the current policy ignoring order
+ if compare_policies(user_policy, current_policy):
+ # Update the policy
+ aws_object.set_policy(user_policy)
+ else:
+ # Nothing to do
+ pass
+
+Dealing with tags
+=================
+
+AWS has a concept of resource tags. Usually the boto3 API has separate calls for tagging and
+untagging a resource. For example, the ec2 API has a create_tags and delete_tags call.
+
+It is common practice in Ansible AWS modules to have a `purge_tags` parameter that defaults to
+true.
+
+The `purge_tags` parameter means that existing tags will be deleted if they are not specified by
+the Ansible task.
+
+There is a helper function `compare_aws_tags` to ease dealing with tags. It can compare two dicts
+and return the tags to set and the tags to delete. See the Helper function section below for more
+detail.
+
+Helper functions
+================
+
+Along with the connection functions in Ansible ec2.py module_utils, there are some other useful
+functions detailed below.
+
+camel_dict_to_snake_dict
+------------------------
+
+boto3 returns results in a dict. The keys of the dict are in CamelCase format. In keeping with
+Ansible format, this function will convert the keys to snake_case.
+
+``camel_dict_to_snake_dict`` takes an optional parameter called ``ignore_list`` which is a list of
+keys not to convert (this is usually useful for the ``tags`` dict, whose child keys should remain with
+case preserved)
+
+Another optional parameter is ``reversible``. By default, ``HTTPEndpoint`` is converted to ``http_endpoint``,
+which would then be converted by ``snake_dict_to_camel_dict`` to ``HttpEndpoint``.
+Passing ``reversible=True`` converts HTTPEndpoint to ``h_t_t_p_endpoint`` which converts back to ``HTTPEndpoint``.
+
+snake_dict_to_camel_dict
+------------------------
+
+`snake_dict_to_camel_dict` converts snake cased keys to camel case. By default, because it was
+first introduced for ECS purposes, this converts to dromedaryCase. An optional
+parameter called `capitalize_first`, which defaults to `False`, can be used to convert to CamelCase.
+
+ansible_dict_to_boto3_filter_list
+---------------------------------
+
+Converts a an Ansible list of filters to a boto3 friendly list of dicts. This is useful for any
+boto3 `_facts` modules.
+
+boto_exception
+--------------
+
+Pass an exception returned from boto or boto3, and this function will consistently get the message from the exception.
+
+Deprecated: use `AnsibleAWSModule`'s `fail_json_aws` instead.
+
+
+boto3_tag_list_to_ansible_dict
+------------------------------
+
+Converts a boto3 tag list to an Ansible dict. Boto3 returns tags as a list of dicts containing keys
+called 'Key' and 'Value' by default. This key names can be overridden when calling the function.
+For example, if you have already camel_cased your list of tags you may want to pass lowercase key
+names instead, in other words, 'key' and 'value'.
+
+This function converts the list in to a single dict where the dict key is the tag key and the dict
+value is the tag value.
+
+ansible_dict_to_boto3_tag_list
+------------------------------
+
+Opposite of above. Converts an Ansible dict to a boto3 tag list of dicts. You can again override
+the key names used if 'Key' and 'Value' is not suitable.
+
+get_ec2_security_group_ids_from_names
+-------------------------------------
+
+Pass this function a list of security group names or combination of security group names and IDs
+and this function will return a list of IDs. You should also pass the VPC ID if known because
+security group names are not necessarily unique across VPCs.
+
+compare_policies
+----------------
+
+Pass two dicts of policies to check if there are any meaningful differences and returns true
+if there are. This recursively sorts the dicts and makes them hashable before comparison.
+
+This method should be used any time policies are being compared so that a change in order
+doesn't result in unnecessary changes.
+
+compare_aws_tags
+----------------
+
+Pass two dicts of tags and an optional purge parameter and this function will return a dict
+containing key pairs you need to modify and a list of tag key names that you need to remove. Purge
+is True by default. If purge is False then any existing tags will not be modified.
+
+This function is useful when using boto3 'add_tags' and 'remove_tags' functions. Be sure to use the
+other helper function `boto3_tag_list_to_ansible_dict` to get an appropriate tag dict before
+calling this function. Since the AWS APIs are not uniform (for example, EC2 is different from Lambda) this will work
+without modification for some (Lambda) and others may need modification before using these values
+(such as EC2, with requires the tags to unset to be in the form `[{'Key': key1}, {'Key': key2}]`).
+
+Integration Tests for AWS Modules
+=================================
+
+All new AWS modules should include integration tests to ensure that any changes in AWS APIs that
+affect the module are detected. At a minimum this should cover the key API calls and check the
+documented return values are present in the module result.
+
+For general information on running the integration tests see the :ref:`Integration Tests page of the
+Module Development Guide <testing_integration>`, especially the section on configuration for cloud tests.
+
+The integration tests for your module should be added in `test/integration/targets/MODULE_NAME`.
+
+You must also have a aliases file in `test/integration/targets/MODULE_NAME/aliases`. This file serves
+two purposes. First indicates it's in an AWS test causing the test framework to make AWS credentials
+available during the test run. Second putting the test in a test group causing it to be run in the
+continuous integration build.
+
+Tests for new modules should be added to the same group as existing AWS tests. In general just copy
+an existing aliases file such as the `aws_s3 tests aliases file <https://github.com/ansible-collections/amazon.aws/blob/master/tests/integration/targets/aws_s3/aliases>`_.
+
+AWS Credentials for Integration Tests
+-------------------------------------
+
+The testing framework handles running the test with appropriate AWS credentials, these are made available
+to your test in the following variables:
+
+* `aws_region`
+* `aws_access_key`
+* `aws_secret_key`
+* `security_token`
+
+So all invocations of AWS modules in the test should set these parameters. To avoid duplicating these
+for every call, it's preferable to use :ref:`module_defaults <module_defaults>`. For example:
+
+.. code-block:: yaml
+
+ - name: set connection information for aws modules and run tasks
+ module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+
+ block:
+
+ - name: Do Something
+ ec2_instance:
+ ... params ...
+
+ - name: Do Something Else
+ ec2_instance:
+ ... params ...
+
+AWS Permissions for Integration Tests
+-------------------------------------
+
+As explained in the :ref:`Integration Test guide <testing_integration>`
+there are defined IAM policies in `mattclay/aws-terminator <https://github.com/mattclay/aws-terminator>`_ that contain the necessary permissions
+to run the AWS integration test.
+
+If your module interacts with a new service or otherwise requires new permissions, tests will fail when you submit a pull request and the
+`Ansibullbot <https://github.com/ansible/ansibullbot/blob/master/ISSUE_HELP.md>`_ will tag your PR as needing revision.
+We do not automatically grant additional permissions to the roles used by the continuous integration builds.
+You will need to raise a Pull Request against `mattclay/aws-terminator <https://github.com/mattclay/aws-terminator>`_ to add them.
+
+If your PR has test failures, check carefully to be certain the failure is only due to the missing permissions. If you've ruled out other sources of failure, add a comment with the `ready_for_review`
+tag and explain that it's due to missing permissions.
+
+Your pull request cannot be merged until the tests are passing. If your pull request is failing due to missing permissions,
+you must collect the minimum IAM permissions required to
+run the tests.
+
+There are two ways to figure out which IAM permissions you need for your PR to pass:
+
+* Start with the most permissive IAM policy, run the tests to collect information about which resources your tests actually use, then construct a policy based on that output. This approach only works on modules that use `AnsibleAWSModule`.
+* Start with the least permissive IAM policy, run the tests to discover a failure, add permissions for the resource that addresses that failure, then repeat. If your module uses `AnsibleModule` instead of `AnsibleAWSModule`, you must use this approach.
+
+To start with the most permissive IAM policy:
+
+1) `Create an IAM policy <https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start>`_ that allows all actions (set ``Action`` and ``Resource`` to ``*```).
+2) Run your tests locally with this policy. On AnsibleAWSModule-based modules, the ``debug_botocore_endpoint_logs`` option is automatically set to ``yes``, so you should see a list of AWS ACTIONS after the PLAY RECAP showing all the permissions used. If your tests use a boto/AnsibleModule module, you must start with the least permissive policy (see below).
+3) Modify your policy to allow only the actions your tests use. Restrict account, region, and prefix where possible. Wait a few minutes for your policy to update.
+4) Run the tests again with a user or role that allows only the new policy.
+5) If the tests fail, troubleshoot (see tips below), modify the policy, run the tests again, and repeat the process until the tests pass with a restrictive policy.
+6) Open a pull request proposing the minimum required policy to the `CI policies <https://github.com/mattclay/aws-terminator/tree/master/aws/policy>`_.
+
+To start from the least permissive IAM policy:
+
+1) Run the integration tests locally with no IAM permissions.
+2) Examine the error when the tests reach a failure.
+ a) If the error message indicates the action used in the request, add the action to your policy.
+ b) If the error message does not indicate the action used in the request:
+ - Usually the action is a CamelCase version of the method name - for example, for an ec2 client the method `describe_security_groups` correlates to the action `ec2:DescribeSecurityGroups`.
+ - Refer to the documentation to identify the action.
+ c) If the error message indicates the resource ARN used in the request, limit the action to that resource.
+ d) If the error message does not indicate the resource ARN used:
+ - Determine if the action can be restricted to a resource by examining the documentation.
+ - If the action can be restricted, use the documentation to construct the ARN and add it to the policy.
+3) Add the action or resource that caused the failure to `an IAM policy <https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start>`_. Wait a few minutes for your policy to update.
+4) Run the tests again with this policy attached to your user or role.
+5) If the tests still fail at the same place with the same error you will need to troubleshoot (see tips below). If the first test passes, repeat steps 2 and 3 for the next error. Repeat the process until the tests pass with a restrictive policy.
+6) Open a pull request proposing the minimum required policy to the `CI policies <https://github.com/mattclay/aws-terminator/tree/master/aws/policy>`_.
+
+Troubleshooting IAM policies
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- When you make changes to a policy, wait a few minutes for the policy to update before re-running the tests.
+- Use the `policy simulator <https://policysim.aws.amazon.com/>`_ to verify that each action (limited by resource when applicable) in your policy is allowed.
+- If you're restricting actions to certain resources, replace resources temporarily with `*`. If the tests pass with wildcard resources, there is a problem with the resource definition in your policy.
+- If the initial troubleshooting above doesn't provide any more insight, AWS may be using additional undisclosed resources and actions.
+- Examine the AWS FullAccess policy for the service for clues.
+- Re-read the AWS documentation, especially the list of `Actions, Resources and Condition Keys <https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_actions-resources-contextkeys.html>`_ for the various AWS services.
+- Look at the `cloudonaut <https://iam.cloudonaut.io>`_ documentation as a troubleshooting cross-reference.
+- Use a search engine.
+- Ask in the Ansible IRC channel #ansible-aws (on freenode IRC).
+
+Unsupported Integration tests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+There are a limited number of reasons why it may not be practical to run integration
+tests for a module within CI. Where these apply you should add the keyword
+`unsupported` to the aliases file in `test/integration/targets/MODULE_NAME/aliases`.
+
+Some cases where tests should be marked as unsupported:
+1) The tests take longer than 10 or 15 minutes to complete
+2) The tests create expensive resources
+3) The tests create inline policies
+4) The tests require the existance of external resources
+5) The tests manage Account level security policies such as the password policy or AWS Organizations.
+
+Where one of these reasons apply you should open a pull request proposing the minimum required policy to the
+`unsupported test policies <https://github.com/mattclay/aws-terminator/tree/master/hacking/aws_config/test_policies>`_.
+
+Unsupported integration tests will not be automatically run by CI. However, the
+necessary policies should be available so that the tests can be manually run by
+someone performing a PR review or writing a patch.
diff --git a/docs/docsite/rst/dev_guide/platforms/openstack_guidelines.rst b/docs/docsite/rst/dev_guide/platforms/openstack_guidelines.rst
new file mode 100644
index 00000000..8827cefb
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/platforms/openstack_guidelines.rst
@@ -0,0 +1,57 @@
+.. _OpenStack_module_development:
+
+OpenStack Ansible Modules
+=========================
+
+The OpenStack collection (on `Galaxy <https://galaxy.ansible.com/openstack/cloud>`_, source code `repository <https://opendev.org/openstack/ansible-collections-openstack.git>`_) contains modules for interacting with OpenStack as either an admin or an end user. If the module does not begin with ``os_``, it is either deprecated or soon to be deprecated. This document serves as developer coding guidelines for modules in this collection.
+
+.. contents::
+ :local:
+
+Naming
+------
+
+* All module names should start with ``os_``
+* Name any module that a cloud consumer would expect to use after the logical resource it manages: ``os_server`` not ``os_nova``. This naming convention acknowledges that the end user does not care which service manages the resource - that is a deployment detail. For example cloud consumers may not know whether their floating IPs are managed by Nova or Neutron.
+* Name any module that a cloud admin would expect to use with the service and the resource: ``os_keystone_domain``.
+* If the module is one that a cloud admin and a cloud consumer could both use,
+ the cloud consumer rules apply.
+
+Interface
+---------
+
+* If the resource being managed has an id, it should be returned.
+* If the resource being managed has an associated object more complex than
+ an id, it should also be returned.
+
+Interoperability
+----------------
+
+* It should be assumed that the cloud consumer does not know a bazillion
+ details about the deployment choices their cloud provider made, and a best
+ effort should be made to present one sane interface to the Ansible user
+ regardless of deployer insanity.
+* All modules should work appropriately against all existing known public
+ OpenStack clouds.
+* It should be assumed that a user may have more than one cloud account that
+ they wish to combine as part of a single Ansible-managed infrastructure.
+
+Libraries
+---------
+
+* All modules should use ``openstack_full_argument_spec`` to pick up the
+ standard input such as auth and ssl support.
+* All modules should include ``extends_documentation_fragment: openstack``.
+* All complex cloud interaction or interoperability code should be housed in
+ the `openstacksdk <https://git.openstack.org/cgit/openstack/openstacksdk>`_
+ library.
+* All OpenStack API interactions should happen via the openstacksdk and not via
+ OpenStack Client libraries. The OpenStack Client libraries do no have end
+ users as a primary audience, they are for intra-server communication.
+
+Testing
+-------
+
+* Integration testing is currently done in `OpenStack's CI system <https://git.openstack.org/cgit/openstack/openstacksdk/tree/openstack/tests/ansible>`_
+* Testing in openstacksdk produces an obvious chicken-and-egg scenario. Work is under
+ way to trigger from and report on PRs directly.
diff --git a/docs/docsite/rst/dev_guide/platforms/ovirt_dev_guide.rst b/docs/docsite/rst/dev_guide/platforms/ovirt_dev_guide.rst
new file mode 100644
index 00000000..bf461d40
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/platforms/ovirt_dev_guide.rst
@@ -0,0 +1,220 @@
+.. _oVirt_module_development:
+
+oVirt Ansible Modules
+=====================
+
+The set of modules for interacting with oVirt/RHV are currently part of the community.general collection (on `Galaxy <https://galaxy.ansible.com/community/general>`_, source code `repository <https://github.com/ansible-collections/community.general/tree/main/plugins/modules/cloud/ovirt>`_). This document serves as developer coding guidelines for creating oVirt/RHV modules.
+
+.. contents::
+ :local:
+
+Naming
+------
+
+- All modules should start with an ``ovirt_`` prefix.
+- All modules should be named after the resource it manages in singular
+ form.
+- All modules that gather information should have a ``_info``
+ suffix.
+
+Interface
+---------
+
+- Every module should return the ID of the resource it manages.
+- Every module should return the dictionary of the resource it manages.
+- Never change the name of the parameter, as we guarantee backward
+ compatibility. Use aliases instead.
+- If a parameter can't achieve idempotency for any reason, please
+ document it.
+
+Interoperability
+----------------
+
+- All modules should work against all minor versions of
+ version 4 of the API. Version 3 of the API is not supported.
+
+Libraries
+---------
+
+- All modules should use ``ovirt_full_argument_spec`` or
+ ``ovirt_info_full_argument_spec`` to pick up the standard input (such
+ as auth and ``fetch_nested``).
+- All modules should use ``extends_documentation_fragment``: ovirt to go
+ along with ``ovirt_full_argument_spec``.
+- All info modules should use ``extends_documentation_fragment``:
+ ``ovirt_info`` to go along with ``ovirt_info_full_argument_spec``.
+- Functions that are common to all modules should be implemented in the
+ ``module_utils/ovirt.py`` file, so they can be reused.
+- Python SDK version 4 must be used.
+
+New module development
+----------------------
+
+Please read :ref:`developing_modules`,
+first to know what common properties, functions and features every module must
+have.
+
+In order to achieve idempotency of oVirt entity attributes, a helper class
+was created. The first thing you need to do is to extend this class and override a few
+methods:
+
+.. code:: python
+
+ try:
+ import ovirtsdk4.types as otypes
+ except ImportError:
+ pass
+
+ from ansible.module_utils.ovirt import (
+ BaseModule,
+ equal
+ )
+
+ class ClustersModule(BaseModule):
+
+ # The build method builds the entity we want to create.
+ # Always be sure to build only the parameters the user specified
+ # in their yaml file, so we don't change the values which we shouldn't
+ # change. If you set the parameter to None, nothing will be changed.
+ def build_entity(self):
+ return otypes.Cluster(
+ name=self.param('name'),
+ comment=self.param('comment'),
+ description=self.param('description'),
+ )
+
+ # The update_check method checks if the update is needed to be done on
+ # the entity. The equal method doesn't check the values which are None,
+ # which means it doesn't check the values which user didn't set in yaml.
+ # All other values are checked and if there is found some mismatch,
+ # the update method is run on the entity, the entity is build by
+ # 'build_entity' method. You don't have to care about calling the update,
+ # it's called behind the scene by the 'BaseModule' class.
+ def update_check(self, entity):
+ return (
+ equal(self.param('comment'), entity.comment)
+ and equal(self.param('description'), entity.description)
+ )
+
+The code above handle the check if the entity should be updated, so we
+don't update the entity if not needed and also it construct the needed
+entity of the SDK.
+
+.. code:: python
+
+ from ansible.module_utils.basic import AnsibleModule
+ from ansible.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ ovirt_full_argument_spec,
+ )
+
+ # This module will support two states of the cluster,
+ # either it will be present or absent. The user can
+ # specify three parameters: name, comment and description,
+ # The 'ovirt_full_argument_spec' function, will merge the
+ # parameters created here with some common one like 'auth':
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ name=dict(default=None, required=True),
+ description=dict(default=None),
+ comment=dict(default=None),
+ )
+
+ # Create the Ansible module, please always implement the
+ # feautre called 'check_mode', for 'create', 'update' and
+ # 'delete' operations it's implemented by default in BaseModule:
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ # Check if the user has Python SDK installed:
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+
+ # Create the connection to the oVirt engine:
+ connection = create_connection(auth)
+
+ # Create the service which manages the entity:
+ clusters_service = connection.system_service().clusters_service()
+
+ # Create the module which will handle create, update and delete flow:
+ clusters_module = ClustersModule(
+ connection=connection,
+ module=module,
+ service=clusters_service,
+ )
+
+ # Check the state and call the appropriate method:
+ state = module.params['state']
+ if state == 'present':
+ ret = clusters_module.create()
+ elif state == 'absent':
+ ret = clusters_module.remove()
+
+ # The return value of the 'create' and 'remove' method is dictionary
+ # with the 'id' of the entity we manage and the type of the entity
+ # with filled in attributes of the entity. The 'change' status is
+ # also returned by those methods:
+ module.exit_json(**ret)
+ except Exception as e:
+ # Modules can't raises exception, it always must exit with
+ # 'module.fail_json' in case of exception. Always use
+ # 'exception=traceback.format_exc' for debugging purposes:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ # Logout only in case the user passed the 'token' in 'auth'
+ # parameter:
+ connection.close(logout=auth.get('token') is None)
+
+If your module must support action handling (for example,
+virtual machine start) you must ensure that you handle the states of the
+virtual machine correctly, and document the behavior of the
+module:
+
+.. code:: python
+
+ if state == 'running':
+ ret = vms_module.action(
+ action='start',
+ post_action=vms_module._post_start_action,
+ action_condition=lambda vm: (
+ vm.status not in [
+ otypes.VmStatus.MIGRATING,
+ otypes.VmStatus.POWERING_UP,
+ otypes.VmStatus.REBOOT_IN_PROGRESS,
+ otypes.VmStatus.WAIT_FOR_LAUNCH,
+ otypes.VmStatus.UP,
+ otypes.VmStatus.RESTORING_STATE,
+ ]
+ ),
+ wait_condition=lambda vm: vm.status == otypes.VmStatus.UP,
+ # Start action kwargs:
+ use_cloud_init=use_cloud_init,
+ use_sysprep=use_sysprep,
+ # ...
+ )
+
+As you can see from the preceding example, the ``action`` method accepts the ``action_condition`` and
+``wait_condition``, which are methods which accept the virtual machine
+object as a parameter, so you can check whether the virtual
+machine is in a proper state before the action. The rest of the
+parameters are for the ``start`` action. You may also handle pre-
+or post- action tasks by defining ``pre_action`` and ``post_action``
+parameters.
+
+Testing
+-------
+
+- Integration testing is currently done in oVirt's CI system
+ `on Jenkins <https://jenkins.ovirt.org/view/All/job/ovirt-system-tests_ansible-suite-master/>`__
+ and
+ `on GitHub <https://github.com/oVirt/ovirt-system-tests/tree/master/ansible-suite-master/>`__.
+- Please consider using these integration tests if you create a new module or add a new feature to an existing
+ module.
diff --git a/docs/docsite/rst/dev_guide/platforms/vmware_guidelines.rst b/docs/docsite/rst/dev_guide/platforms/vmware_guidelines.rst
new file mode 100644
index 00000000..7a5c8410
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/platforms/vmware_guidelines.rst
@@ -0,0 +1,270 @@
+.. _VMware_module_development:
+
+****************************************
+Guidelines for VMware module development
+****************************************
+
+The Ansible VMware collection (on `Galaxy <https://galaxy.ansible.com/community/vmware>`_, source code `repository <https://github.com/ansible-collections/vmware>`_) is maintained by the VMware Working Group. For further information see the `team community page <https://github.com/ansible/community/wiki/VMware>`_.
+
+.. contents::
+ :local:
+
+Testing with govcsim
+====================
+
+Most of the existing modules are covered by functional tests. The tests are located in the :file:`test/integration/targets/`.
+
+By default, the tests run against a vCenter API simulator called `govcsim <https://github.com/vmware/govmomi/tree/master/vcsim>`_. ``ansible-test`` will automatically pull a `govcsim container <https://quay.io/repository/ansible/vcenter-test-container>` and use it to set-up the test environment.
+
+You can trigger the test of a module manually with the ``ansible-test`` command. For example, to trigger ``vcenter_folder`` tests:
+
+.. code-block:: shell
+
+ source hacking/env-setup
+ ansible-test integration --python 3.7 vcenter_folder
+
+``govcsim`` is handy because it's much more fast that than a regular test environment. However, it does not
+support all the ESXi or vCenter features.
+
+.. note::
+
+ Do not confuse ``govcsim`` with ``vcsim``. It's old outdated version of vCenter simulator whereas govcsim is new and written in go lang
+
+Testing with your own infrastructure
+====================================
+
+You can also target a regular VMware environment. This paragraph explains step by step how you can run the test-suite yourself.
+
+Requirements
+------------
+
+- 2 ESXi hosts (6.5 or 6.7)
+ - with 2 NIC, the second ones should be available for the test
+- a VCSA host
+- a NFS server
+- Python dependencies:
+ - `pyvmomi <https://github.com/vmware/pyvmomi/tree/master/pyVmomi>`
+ - `requests <https://2.python-requests.org/en/master/>`.
+
+If you want to deploy your test environment in a hypervisor, both VMware or Libvirt <https://github.com/goneri/vmware-on-libvirt> work well.
+
+NFS server configuration
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Your NFS server must expose the following directory structure:
+
+.. code-block:: shell
+
+ $ tree /srv/share/
+ /srv/share/
+ ├── isos
+ │   ├── base.iso
+ │   ├── centos.iso
+ │   └── fedora.iso
+ └── vms
+ 2 directories, 3 files
+
+On a Linux system, you can expose the directory over NFS with the following export file:
+
+.. code-block:: shell
+
+ $ cat /etc/exports
+ /srv/share 192.168.122.0/255.255.255.0(rw,anonuid=1000,anongid=1000)
+
+.. note::
+
+ With this configuration all the new files will be owned by the user with the UID and GID 1000/1000.
+ Adjust the configuration to match your user's UID/GID.
+
+The service can be enabled with:
+
+.. code-block:: shell
+
+ $ sudo systemctl enable --now nfs-server
+
+
+Configure your installation
+---------------------------
+
+Prepare a configuration file that describes your set-up. The file
+should be called :file:`test/integration/cloud-config-vcenter.ini` and based on
+:file:`test/lib/ansible_test/config/cloud-config-vcenter.ini.template`. For instance, if you've deployed your lab with
+`vmware-on-libvirt <https://github.com/goneri/vmware-on-libvirt>`:
+
+.. code-block:: ini
+
+ [DEFAULT]
+ vcenter_username: administrator@vsphere.local
+ vcenter_password: !234AaAa56
+ vcenter_hostname: vcenter.test
+ vmware_validate_certs: false
+ esxi1_username: root
+ esxi1_hostname: esxi1.test
+ esxi1_password: root
+ esxi2_username: root
+ esxi2_hostname: test2.test
+ esxi2_password: root
+
+If you use an HTTP proxy
+-------------------------
+Support for hosting test infrastructure behind an HTTP proxy is currently in development. See the following pull requests for more information:
+
+- ansible-test: vcenter behind an HTTP proxy <https://github.com/ansible/ansible/pull/58208>
+- pyvmomi: proxy support <https://github.com/vmware/pyvmomi/pull/799>
+- VMware: add support for HTTP proxy in connection API <https://github.com/ansible/ansible/pull/52936>
+
+Once you have incorporated the code from those PRs, specify the location of the proxy server with the two extra keys:
+
+.. code-block:: ini
+
+ vmware_proxy_host: esxi1-gw.ws.testing.ansible.com
+ vmware_proxy_port: 11153
+
+In addition, you may need to adjust the variables of the following file to match the configuration of your lab:
+:file:`test/integration/targets/prepare_vmware_tests/vars/real_lab.yml`. If you use `vmware-on-libvirt <https://github.com/goneri/vmware-on-libvirt>` to prepare you lab, you don't have anything to change.
+
+Run the test-suite
+------------------
+
+Once your configuration is ready, you can trigger a run with the following command:
+
+.. code-block:: shell
+
+ source hacking/env-setup
+ VMWARE_TEST_PLATFORM=static ansible-test integration --python 3.7 vmware_host_firewall_manager
+
+``vmware_host_firewall_manager`` is the name of the module to test.
+
+``vmware_guest`` is much larger than any other test role and is rather slow. You can enable or disable some of its test playbooks in
+:file:`test/integration/targets/vmware_guest/defaults/main.yml`.
+
+
+Unit-test
+=========
+
+The VMware modules have limited unit-test coverage. You can run the test suite with the
+following commands:
+
+.. code-block:: shell
+
+ source hacking/env-setup
+ ansible-test units --venv --python 3.7 '.*vmware.*'
+
+Code style and best practice
+============================
+
+datacenter argument with ESXi
+-----------------------------
+
+The ``datacenter`` parameter should not use ``ha-datacenter`` by default. This is because the user may
+not realize that Ansible silently targets the wrong data center.
+
+esxi_hostname should not be mandatory
+-------------------------------------
+
+Depending upon the functionality provided by ESXi or vCenter, some modules can seamlessly work with both. In this case,
+``esxi_hostname`` parameter should be optional.
+
+.. code-block:: python
+
+ if self.is_vcenter():
+ esxi_hostname = module.params.get('esxi_hostname')
+ if not esxi_hostname:
+ self.module.fail_json("esxi_hostname parameter is mandatory")
+ self.host = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_hostname)[0]
+ else:
+ self.host = find_obj(self.content, [vim.HostSystem], None)
+ if self.host is None:
+ self.module.fail_json(msg="Failed to find host system.")
+
+Example should use the fully qualified collection name (FQCN)
+-------------------------------------------------------------
+
+Use FQCN for examples within module documentation For instance, you should use ``community.vmware.vmware_guest`` instead of just
+``vmware_guest``.
+
+This way, the examples don't depend on the ``collections`` directive of the
+playbook.
+
+Functional tests
+----------------
+
+Writing new tests
+~~~~~~~~~~~~~~~~~
+
+If you are writing a new collection of integration tests, there are a few VMware-specific things to note beyond
+the standard Ansible :ref:`integration testing<testing_integration>` process.
+
+The test-suite uses a set of common, pre-defined vars located in the :file:`test/integration/targets/prepare_vmware_tests/` role.
+The resources defined there are automatically created by importing that role at the start of your test:
+
+.. code-block:: yaml
+
+ - import_role:
+ name: prepare_vmware_tests
+ vars:
+ setup_datacenter: true
+
+This will give you a ready to use cluster, datacenter, datastores, folder, switch, dvswitch, ESXi hosts, and VMs.
+
+No need to create too much resources
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Most of the time, it's not necessary to use ``with_items`` to create multiple resources. By avoiding it,
+you speed up the test execution and you simplify the clean up afterwards.
+
+VM names should be predictable
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+If you need to create a new VM during your test, you can use ``test_vm1``, ``test_vm2`` or ``test_vm3``. This
+way it will be automatically clean up for you.
+
+Avoid the common boiler plate code in your test playbook
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+From Ansible 2.10, the test suite uses `modules_defaults`. This module
+allow us to preinitialize the following default keys of the VMware modules:
+
+- hostname
+- username
+- password
+- validate_certs
+
+For example, the following block:
+
+.. code-block:: yaml
+
+ - name: Add a VMware vSwitch
+ vmware_vswitch:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ validate_certs: 'no'
+ esxi_hostname: 'esxi1'
+ switch_name: "boby"
+ state: present
+
+should be simplified to just:
+
+.. code-block:: yaml
+
+ - name: Add a VMware vSwitch
+ vmware_vswitch:
+ esxi_hostname: 'esxi1'
+ switch_name: "boby"
+ state: present
+
+
+Typographic convention
+======================
+
+Nomenclature
+------------
+
+We try to enforce the following rules in our documentation:
+
+- VMware, not VMWare or vmware
+- ESXi, not esxi or ESXI
+- vCenter, not vcenter or VCenter
+
+We also refer to vcsim's Go implementation with ``govcsim``. This to avoid any confusion with the outdated implementation.
diff --git a/docs/docsite/rst/dev_guide/shared_snippets/licensing.txt b/docs/docsite/rst/dev_guide/shared_snippets/licensing.txt
new file mode 100644
index 00000000..2802c420
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/shared_snippets/licensing.txt
@@ -0,0 +1,9 @@
+.. note::
+ **LICENSING REQUIREMENTS** Ansible enforces the following licensing requirements:
+
+ * Utilities (files in ``lib/ansible/module_utils/``) may have one of two licenses:
+ * A file in ``module_utils`` used **only** for a specific vendor's hardware, provider, or service may be licensed under GPLv3+.
+ Adding a new file under ``module_utils`` with GPLv3+ needs to be approved by the core team.
+ * All other ``module_utils`` must be licensed under BSD, so GPL-licensed third-party and Galaxy modules can use them.
+ * If there's doubt about the appropriate license for a file in ``module_utils``, the Ansible Core Team will decide during an Ansible Core Community Meeting.
+ * All other files shipped with Ansible, including all modules, must be licensed under the GPL license (GPLv3 or later).
diff --git a/docs/docsite/rst/dev_guide/style_guide/basic_rules.rst b/docs/docsite/rst/dev_guide/style_guide/basic_rules.rst
new file mode 100644
index 00000000..034aece5
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/style_guide/basic_rules.rst
@@ -0,0 +1,69 @@
+.. _styleguide_basic:
+
+Basic rules
+===========
+.. contents::
+ :local:
+
+Use standard American English
+-----------------------------
+Ansible uses Standard American English. Watch for common words that are spelled differently in American English (color vs colour, organize vs organise, and so on).
+
+Write for a global audience
+---------------------------
+Everything you say should be understandable by people of different backgrounds and cultures. Avoid idioms and regionalism and maintain a neutral tone that cannot be misinterpreted. Avoid attempts at humor.
+
+Follow naming conventions
+-------------------------
+Always follow naming conventions and trademarks.
+
+.. good place to link to an Ansible terminology page
+
+Use clear sentence structure
+----------------------------
+Clear sentence structure means:
+
+- Start with the important information first.
+- Avoid padding/adding extra words that make the sentence harder to understand.
+- Keep it short - Longer sentences are harder to understand.
+
+Some examples of improving sentences:
+
+Bad:
+ The unwise walking about upon the area near the cliff edge may result in a dangerous fall and therefore it is recommended that one remains a safe distance to maintain personal safety.
+
+Better:
+ Danger! Stay away from the cliff.
+
+Bad:
+ Furthermore, large volumes of water are also required for the process of extraction.
+
+Better:
+ Extraction also requires large volumes of water.
+
+Avoid verbosity
+---------------
+Write short, succinct sentences. Avoid terms like:
+
+- "...as has been said before,"
+- "..each and every,"
+- "...point in time,"
+- "...in order to,"
+
+Highlight menu items and commands
+---------------------------------
+When documenting menus or commands, it helps to **bold** what is important.
+
+For menu procedures, bold the menu names, button names, and so on to help the user find them on the GUI:
+
+1. On the **File** menu, click **Open**.
+2. Type a name in the **User Name** field.
+3. In the **Open** dialog box, click **Save**.
+4. On the toolbar, click the **Open File** icon.
+
+For code or command snippets, use the RST `code-block directive <https://www.sphinx-doc.org/en/1.5/markup/code.html#directive-code-block>`_::
+
+ .. code-block:: bash
+
+ ssh my_vyos_user@vyos.example.net
+ show config
diff --git a/docs/docsite/rst/dev_guide/style_guide/grammar_punctuation.rst b/docs/docsite/rst/dev_guide/style_guide/grammar_punctuation.rst
new file mode 100644
index 00000000..4505e2d0
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/style_guide/grammar_punctuation.rst
@@ -0,0 +1,201 @@
+
+Grammar and Punctuation
+``````````````````````````````````````
+
+Common Styles and Usage, and Common Mistakes
+----------------------------------------------------
+
+Ansible
+~~~~~~~~~
+* Write "Ansible." Not "Ansible, Inc." or "AnsibleWorks The only exceptions to this rule are when we're writing legal or financial statements.
+
+* Never use the logotype by itself in body text. Always keep the same font you are using the rest of the sentence.
+
+* A company is singular in the US. In other words, Ansible is an "it," not a "they."
+
+
+Capitalization
+~~~~~~~~~~~~~~
+If it's not a real product, service, or department at Ansible, don't capitalize it. Not even if it seems important. Capitalize only the first letter of the first word in headlines.
+
+Colon
+~~~~~~~~~~~~~~~~~
+A colon is generally used before a list or series:
+- The Triangle Area consists of three cities: Raleigh, Durham, and Chapel Hill.
+
+But not if the list is a complement or object of an element in the sentence:
+- Before going on vacation, be sure to (1) set the alarm, (2) cancel the newspaper, and (3) ask a neighbor to collect your mail.
+
+Use a colon after "as follows" and "the following" if the related list comes immediately after:
+wedge The steps for changing directories are as follows:
+
+ 1. Open a terminal.
+ 2. Type cd...
+
+Use a colon to introduce a bullet list (or dash, or icon/symbol of your choice):
+
+ In the Properties dialog box, you'll find the following entries:
+
+ - Connection name
+ - Count
+ - Cost per item
+
+
+Commas
+~~~~~~~~~~~
+Use serial commas, the comma before the "and" in a series of three or more items:
+
+- "Item 1, item 2, and item 3."
+
+
+It's easier to read that way and helps avoid confusion. The primary exception to this you will see is in PR, where it is traditional not to use serial commas because it is often the style of journalists.
+
+Commas are always important, considering the vast difference in meanings of the following two statements.
+
+- Let's eat, Grandma
+- Let's eat Grandma.
+
+Correct punctuation could save Grandma's life.
+
+If that does not convince you, maybe this will:
+
+.. image:: images/commas-matter.jpg
+
+
+Contractions
+~~~~~~~~~~~~~
+Do not use contractions in Ansible documents.
+
+Em dashes
+~~~~~~~~~~
+When possible, use em-dashes with no space on either side. When full em-dashes aren't available, use double-dashes with no spaces on either side--like this.
+
+A pair of em dashes can be used in place of commas to enhance readability. Note, however, that dashes are always more emphatic than commas.
+
+A pair of em dashes can replace a pair of parentheses. Dashes are considered less formal than parentheses; they are also more intrusive. If you want to draw attention to the parenthetical content, use dashes. If you want to include the parenthetical content more subtly, use parentheses.
+
+.. note::
+ When dashes are used in place of parentheses, surrounding punctuation should be omitted. Compare the following examples.
+
+::
+
+ Upon discovering the errors (all 124 of them), the publisher immediately recalled the books.
+
+ Upon discovering the errors—all 124 of them—the publisher immediately recalled the books.
+
+
+When used in place of parentheses at the end of a sentence, only a single dash is used.
+
+::
+
+ After three weeks on set, the cast was fed up with his direction (or, rather, lack of direction).
+
+ After three weeks on set, the cast was fed up with his direction—or, rather, lack of direction.
+
+
+Exclamation points (!)
+~~~~~~~~~~~~~~~~~~~~~~~
+Do not use them at the end of sentences. An exclamation point can be used when referring to a command, such as the bang (!) command.
+
+Gender References
+~~~~~~~~~~~~~~~~~~
+Do not use gender-specific pronouns in documentation. It is far less awkward to read a sentence that uses "they" and "their" rather than "he/she" and "his/hers."
+
+It is fine to use "you" when giving instructions and "the user," "new users," and so on. in more general explanations.
+
+Never use "one" in place of "you" when writing technical documentation. Using "one" is far too formal.
+
+Never use "we" when writing. "We" aren't doing anything on the user side. Ansible's products are doing the work as requested by the user.
+
+
+Hyphen
+~~~~~~~~~~~~~~
+The hyphen's primary function is the formation of certain compound terms. Do not use a hyphen unless it serves a purpose. If a compound adjective cannot be misread or, as with many psychological terms, its meaning is established, a hyphen is not necessary.
+
+Use hyphens to avoid ambiguity or confusion:
+
+::
+
+ a little-used car
+ a little used-car
+
+ cross complaint
+ cross-complaint
+
+ high-school girl
+ high schoolgirl
+
+ fine-tooth comb (most people do not comb their teeth)
+
+ third-world war
+ third world war
+
+.. image:: images/hyphen-funny.jpg
+
+In professionally printed material (particularly books, magazines, and newspapers), the hyphen is used to divide words between the end of one line and the beginning of the next. This allows for an evenly aligned right margin without highly variable (and distracting) word spacing.
+
+
+Lists
+~~~~~~~
+Keep the structure of bulleted lists equivalent and consistent. If one bullet is a verb phrase, they should all be verb phrases. If one is a complete sentence, they should all be complete sentences, and so on.
+
+Capitalize the first word of each bullet. Unless it is obvious that it is just a list of items, such as a list of items like:
+* computer
+* monitor
+* keyboard
+* mouse
+
+When the bulleted list appears within the context of other copy, (unless it's a straight list like the previous example) add periods, even if the bullets are sentence fragments. Part of the reason behind this is that each bullet is said to complete the original sentence.
+
+In some cases where the bullets are appearing independently, such as in a poster or a homepage promotion, they do not need periods.
+
+When giving instructional steps, use numbered lists instead of bulleted lists.
+
+
+Months and States
+~~~~~~~~~~~~~~~~~~~~
+Abbreviate months and states according to AP. Months are only abbreviated if they are used in conjunction with a day. Example: "The President visited in January 1999." or "The President visited Jan. 12."
+
+Months: Jan., Feb., March, April, May, June, July, Aug., Sept., Nov., Dec.
+
+States: Ala., Ariz., Ark., Calif., Colo., Conn., Del., Fla., Ga., Ill., Ind., Kan., Ky., La., Md., Mass., Mich., Minn., Miss., Mo., Mont., Neb., Nev., NH, NJ, NM, NY, NC, ND, Okla., Ore., Pa., RI, SC, SD, Tenn., Vt., Va., Wash., W.Va., Wis., Wyo.
+
+Numbers
+~~~~~~~~~
+Numbers between one and nine are written out. 10 and above are numerals. The exception to this is writing "4 million" or "4 GB." It's also acceptable to use numerals in tables and charts.
+
+Phone Numbers
++++++++++++++++
+
+Phone number style: 1 (919) 555-0123 x002 and 1 888-GOTTEXT
+
+
+Quotations (Using Quotation Marks and Writing Quotes)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ "Place the punctuation inside the quotes," the editor said.
+
+Except in rare instances, use only "said" or "says" because anything else just gets in the way of the quote itself, and also tends to editorialize.
+
+Place the name first right after the quote:
+ "I like to write first-person because I like to become the character I'm writing," Wally Lamb said.
+
+Not:
+ "I like to write first-person because I like to become the character I'm writing," said Wally Lamb.
+
+
+Semicolon
+~~~~~~~~~~~~~~~
+Use a semicolon to separate items in a series if the items contain commas:
+
+- Everyday I have coffee, toast, and fruit for breakfast; a salad for lunch; and a peanut butter sandwich, cookies, ice cream, and chocolate cake for dinner.
+
+Use a semicolon before a conjunctive adverb (however, therefore, otherwise, namely, for example, and so on):
+- I think; therefore, I am.
+
+Spacing after sentences
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Use only a single space after a sentence.
+
+Time
+~~~~~~~~
+* Time of day is written as "4 p.m."
diff --git a/docs/docsite/rst/dev_guide/style_guide/images/commas-matter-2.jpg b/docs/docsite/rst/dev_guide/style_guide/images/commas-matter-2.jpg
new file mode 100644
index 00000000..2dec81c4
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/style_guide/images/commas-matter-2.jpg
Binary files differ
diff --git a/docs/docsite/rst/dev_guide/style_guide/images/commas-matter.jpg b/docs/docsite/rst/dev_guide/style_guide/images/commas-matter.jpg
new file mode 100644
index 00000000..1699a31a
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/style_guide/images/commas-matter.jpg
Binary files differ
diff --git a/docs/docsite/rst/dev_guide/style_guide/images/hyphen-funny.jpg b/docs/docsite/rst/dev_guide/style_guide/images/hyphen-funny.jpg
new file mode 100644
index 00000000..d642703f
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/style_guide/images/hyphen-funny.jpg
Binary files differ
diff --git a/docs/docsite/rst/dev_guide/style_guide/images/thenvsthan.jpg b/docs/docsite/rst/dev_guide/style_guide/images/thenvsthan.jpg
new file mode 100644
index 00000000..f4851b07
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/style_guide/images/thenvsthan.jpg
Binary files differ
diff --git a/docs/docsite/rst/dev_guide/style_guide/index.rst b/docs/docsite/rst/dev_guide/style_guide/index.rst
new file mode 100644
index 00000000..a50a3180
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/style_guide/index.rst
@@ -0,0 +1,244 @@
+.. _style_guide:
+
+*******************
+Ansible style guide
+*******************
+
+Welcome to the Ansible style guide!
+To create clear, concise, consistent, useful materials on docs.ansible.com, follow these guidelines:
+
+.. contents::
+ :local:
+
+Linguistic guidelines
+=====================
+
+We want the Ansible documentation to be:
+
+* clear
+* direct
+* conversational
+* easy to translate
+
+We want reading the docs to feel like having an experienced, friendly colleague
+explain how Ansible works.
+
+Stylistic cheat-sheet
+---------------------
+
+This cheat-sheet illustrates a few rules that help achieve the "Ansible tone":
+
++-------------------------------+------------------------------+----------------------------------------+
+| Rule | Good example | Bad example |
++===============================+==============================+========================================+
+| Use active voice | You can run a task by | A task can be run by |
++-------------------------------+------------------------------+----------------------------------------+
+| Use the present tense | This command creates a | This command will create a |
++-------------------------------+------------------------------+----------------------------------------+
+| Address the reader | As you expand your inventory | When the number of managed nodes grows |
++-------------------------------+------------------------------+----------------------------------------+
+| Use standard English | Return to this page | Hop back to this page |
++-------------------------------+------------------------------+----------------------------------------+
+| Use American English | The color of the output | The colour of the output |
++-------------------------------+------------------------------+----------------------------------------+
+
+Header case
+-----------
+
+Headers should be written in sentence case. For example, this section's title is
+``Header case``, not ``Header Case`` or ``HEADER CASE``.
+
+
+Avoid using Latin phrases
+-------------------------
+
+Latin words and phrases like ``e.g.`` or ``etc.``
+are easily understood by English speakers.
+They may be harder to understand for others and are also tricky for automated translation.
+
+Use the following English terms in place of Latin terms or abbreviations:
+
++-------------------------------+------------------------------+
+| Latin | English |
++===============================+==============================+
+| i.e | in other words |
++-------------------------------+------------------------------+
+| e.g. | for example |
++-------------------------------+------------------------------+
+| etc | and so on |
++-------------------------------+------------------------------+
+| via | by/ through |
++-------------------------------+------------------------------+
+| vs./versus | rather than/against |
++-------------------------------+------------------------------+
+
+
+reStructuredText guidelines
+===========================
+
+The Ansible documentation is written in reStructuredText and processed by Sphinx.
+We follow these technical or mechanical guidelines on all rST pages:
+
+Header notation
+---------------
+
+`Section headers in reStructuredText <https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html#sections>`_
+can use a variety of notations.
+Sphinx will 'learn on the fly' when creating a hierarchy of headers.
+To make our documents easy to read and to edit, we follow a standard set of header notations.
+We use:
+
+* ``###`` with overline, for parts:
+
+.. code-block:: rst
+
+ ###############
+ Developer guide
+ ###############
+
+* ``***`` with overline, for chapters:
+
+.. code-block:: rst
+
+ *******************
+ Ansible style guide
+ *******************
+
+* ``===`` for sections:
+
+.. code-block:: rst
+
+ Mechanical guidelines
+ =====================
+
+* ``---`` for subsections:
+
+.. code-block:: rst
+
+ Internal navigation
+ -------------------
+
+* ``^^^`` for sub-subsections:
+
+.. code-block:: rst
+
+ Adding anchors
+ ^^^^^^^^^^^^^^
+
+* ``"""`` for paragraphs:
+
+.. code-block:: rst
+
+ Paragraph that needs a title
+ """"""""""""""""""""""""""""
+
+
+Internal navigation
+-------------------
+
+`Anchors (also called labels) and links <https://www.sphinx-doc.org/en/master/usage/restructuredtext/roles.html#ref-role>`_
+work together to help users find related content.
+Local tables of contents also help users navigate quickly to the information they need.
+All internal links should use the ``:ref:`` syntax.
+Every page should have at least one anchor to support internal ``:ref:`` links.
+Long pages, or pages with multiple levels of headers, can also include a local TOC.
+
+.. _adding_anchors_rst:
+
+Adding anchors
+^^^^^^^^^^^^^^
+
+* Include at least one anchor on every page
+* Place the main anchor above the main header
+* If the file has a unique title, use that for the main page anchor::
+
+ .. _unique_page::
+
+* You may also add anchors elsewhere on the page
+
+Adding internal links
+^^^^^^^^^^^^^^^^^^^^^
+
+* All internal links must use ``:ref:`` syntax. These links both point to the anchor defined above:
+
+.. code-block:: rst
+
+ :ref:`unique_page`
+ :ref:`this page <unique_page>`
+
+The second example adds custom text for the link.
+
+Adding links to modules and plugins
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Ansible 2.10 and later require the extended Fully Qualified Collection Name (FQCN) as part of the links:
+
+.. code-block:: text
+
+ ansible_collections. + FQCN + _module
+
+For example:
+
+ .. code-block:: rst
+
+ :ref:`ansible.builtin.first_found lookup plugin <ansible_collections.ansible.builtin.first_found_lookup>`
+
+displays as :ref:`ansible.builtin.first_found lookup plugin <ansible_collections.ansible.builtin.first_found_lookup>`.
+
+Modules require different suffixes from other plugins:
+
+* Module links use this extended FQCN module name with ``_module`` for the anchor.
+* Plugin links use this extended FQCN plugin name with the plugin type (``_connection`` for example).
+
+.. code-block:: rst
+
+ :ref:`arista.eos.eos_config <ansible_collections.arista.eos.eos_config_module>`
+ :ref:`community.kubernetes.kubectl connection plugin <ansible_collections.community.kubernetes.kubectl_connection>`
+
+.. note::
+
+ ``ansible.builtin`` is the FQCN for modules included in ``ansible.base``. Documentation links are the only place you prepend ``ansible_collections`` to the FQCN. This is used by the documentation build scripts to correctly fetch documentation from collections on Ansible Galaxy.
+
+.. _local_toc:
+
+Adding local TOCs
+^^^^^^^^^^^^^^^^^
+
+The page you're reading includes a `local TOC <http://docutils.sourceforge.net/docs/ref/rst/directives.html#table-of-contents>`_.
+If you include a local TOC:
+
+* place it below, not above, the main heading and (optionally) introductory text
+* use the ``:local:`` directive so the page's main header is not included
+* do not include a title
+
+The syntax is:
+
+.. code-block:: rst
+
+ .. contents::
+ :local:
+
+More resources
+==============
+
+These pages offer more help with grammatical, stylistic, and technical rules for documentation.
+
+.. toctree::
+ :maxdepth: 1
+
+ basic_rules
+ voice_style
+ trademarks
+ grammar_punctuation
+ spelling_word_choice
+ search_hints
+ resources
+
+.. seealso::
+
+ :ref:`community_documentation_contributions`
+ How to contribute to the Ansible documentation
+ :ref:`testing_documentation_locally`
+ How to build the Ansible documentation
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible-docs IRC chat channel
diff --git a/docs/docsite/rst/dev_guide/style_guide/resources.rst b/docs/docsite/rst/dev_guide/style_guide/resources.rst
new file mode 100644
index 00000000..c624b12e
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/style_guide/resources.rst
@@ -0,0 +1,10 @@
+Resources
+````````````````
+* Follow the style of the :ref:`Ansible Documentation<ansible_documentation>`
+* Ask for advice on IRC, on the ``#ansible-devel`` Freenode channel
+* Review these online style guides:
+
+ * `AP Stylebook <https://www.apstylebook.com>`_
+ * `Chicago Manual of Style <https://www.chicagomanualofstyle.org/home.html>`_
+ * `Strunk and White's Elements of Style <https://www.crockford.com/wrrrld/style.html>`_
+
diff --git a/docs/docsite/rst/dev_guide/style_guide/search_hints.rst b/docs/docsite/rst/dev_guide/style_guide/search_hints.rst
new file mode 100644
index 00000000..d9bf3f66
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/style_guide/search_hints.rst
@@ -0,0 +1,48 @@
+
+.. _search_hints:
+
+Writing documentation so search can find it
+-------------------------------------------
+
+One of the keys to writing good documentation is to make it findable. Readers use a combination of internal site search and external search engines such as Google or duckduckgo.
+
+To ensure Ansible documentation is findable, you should:
+
+#. Use headings that clearly reflect what you are documenting.
+#. Use numbered lists for procedures or high-level steps where possible.
+#. Avoid linking to github blobs where possible.
+
+
+Using clear headings in documentation
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+We all use simple English when we want to find something. For example, the title of this page could have been any one of the following:
+
+* Search optimization
+* Findable documentation
+* Writing for findability
+
+What we are really trying to describe is - how do I write documentation so search engines can find my content? That simple phrase is what drove the title of this section. When you are creating your headings for documentation, spend some time to think about what you would type in a search box to find it, or more importantly, how someone less familiar with Ansible would try to find that information. Your heading should be the answer to that question.
+
+One word of caution - you do want to limit the size of your headings. A full heading such as `How do I write documentation so search engines can find my content?` is too long. Search engines would truncate anything over 50 - 60 characters. Long headings would also wrap on smaller devices such as a smart phone.
+
+Using numbered lists for `zero position` snippets
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Google can optimize the search results by adding a `feature snippet <https://support.google.com/websearch/answer/9351707>`_ at the top of the search results. This snippet provides a small window into the documentation on that first search result that adds more detail than the rest of the search results, and can occasionally answer the reader's questions right there, or at least verify that the linked page is what the reader is looking for.
+
+Google returns the feature snippet in the form of numbered steps. Where possible, you should add a numbered list near the top of your documentation page, where appropriate. The steps can be the exact procedure a reader would follow, or could be a high level introduction to the documentation topic, such as the numbered list at the top of this page.
+
+Problems with github blobs on search results
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Search engines do not typically return github blobs in search results, at least not in higher ranked positions. While it is possible and sometimes necessary to link to github blobs from documentation, the better approach would be to copy that information into an .rst page in Ansible documentation.
+
+Other search hints
+^^^^^^^^^^^^^^^^^^
+
+While it may not be possible to adapt your documentation to all search optimizations, keep the following in mind as you write your documentation:
+
+* **Search engines don't parse beyond the `#` in an html page.** So for example, all the subheadings on this page are appended to the main page URL. As such, when I search for 'Using number lists for zero position snippets', the search result would be a link to the top of this page, not a link directly to the subheading I searched for. Using :ref:`local TOCs <local_toc>` helps alleviate this problem as the reader can scan for the header at top of the page and click to the section they are looking for. For critical documentation, consider creating a new page that can be a direct search result page.
+
+* **Make your first few sentences clearly describe your page topic.** Search engines return not just the URL, but a short description of the information at the URL. For Ansible documentation, we do not have description metadata embedded on each page. Instead, the search engines return the first couple of sentences (140 characters) on the page. That makes your first sentence or two very important to the reader who is searching for something in Ansible.
diff --git a/docs/docsite/rst/dev_guide/style_guide/spelling_word_choice.rst b/docs/docsite/rst/dev_guide/style_guide/spelling_word_choice.rst
new file mode 100644
index 00000000..3f6d8d7b
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/style_guide/spelling_word_choice.rst
@@ -0,0 +1,327 @@
+Spelling - Word Usage - Common Words and Phrases to Use and Avoid
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Acronyms
+++++++++++++++++
+
+Always uppercase. An acronym is a word formed from the initial letters of a name, such as ROM for Read-only memory,
+SaaS for Software as a Service, or by combining initial letters or part of a series of words, such as LILO for LInux
+LOader.
+
+Spell out the acronym before using it in alone text, such as "The Embedded DevKit (EDK)..."
+
+Applications
++++++++++++++++++++
+When used as a proper name, use the capitalization of the product, such as GNUPro, Source-Navigator, and Ansible Tower. When used as a command, use lowercase as appropriate, such as "To start GCC, type ``gcc``."
+
+.. note::
+
+ "vi" is always lowercase.
+
+As
+++++++++
+This is often used to mean "because", but has other connotations, for example, parallel or simultaneous actions. If you mean "because", say "because".
+
+Asks for
+++++++++++++++++
+Use "requests" instead.
+
+Assure/Ensure/Insure
+++++++++++++++++++++++++++++
+Assure implies a sort of mental comfort. As in "I assured my husband that I would eventually bring home beer."
+
+Ensure means "to make sure."
+
+Insure relates to monetary insurance.
+
+
+Back up
+++++++++++++++
+This is a verb. You "back up" files; you do not "backup" files.
+
+Backup
+++++++++++
+This is a noun. You create "backup" files; you do not create "back up" files.
+
+Backward
+++++++++++++++
+Correct. Avoid using backwards unless you are stating that something has "backwards compatibility."
+
+Backwards compatibility
+++++++++++++++++++++++++
+Correct as is.
+
+By way of
+++++++++++++++++++
+Use "using" instead.
+
+Can/May
+++++++++++++++
+Use "can" to describe actions or conditions that are possible. Use "may" only to describe situations where permission is being given. If either "can," "could," or "may" apply, use "can" because it's less tentative.
+
+CD or cd
++++++++++++++++
+When referring to a compact disk, use CD, such as "Insert the CD into the CD-ROM drive." When referring to the change directory command, use cd.
+
+CD-ROM
++++++++++++++
+Correct. Do not use "cdrom," "CD-Rom," "CDROM," "cd-rom" or any other variation. When referring to the drive, use CD-ROM drive, such as "Insert the CD into the CD-ROM drive." The plural is "CD-ROMs."
+
+
+Command line
++++++++++++++++++++
+Correct. Do not use "command-line" or "commandline" as a noun. If used as an adjective, "command-line" is appropriate, for example "command-line arguments".
+
+Use "command line" to describes where to place options for a command, but not where to type the command. Use "shell prompt" instead to describe where to type commands. The line on the display screen where a command is expected. Generally, the command line is the line that contains the most recently displayed command prompt.
+
+
+Daylight saving time (DST)
++++++++++++++++++++++++++++++++
+
+Correct. Do not use daylight savings time. Daylight Saving Time (DST) is often misspelled "Daylight Savings", with an "s" at the end. Other common variations are "Summer Time"and "Daylight-Saving Time". (https://www.timeanddate.com/time/dst/daylight-savings-time.html)
+
+
+Download
+++++++++++++++++
+Correct. Do not use "down load" or "down-load."
+
+e.g.
+++++++++++
+Spell it out: "For example."
+
+Failover
++++++++++++++++
+When used as a noun, a failover is a backup operation that automatically switches to a standby database, server or network if the primary system fails or is temporarily shut down for servicing. Failover is an important fault tolerance function of mission-critical systems that rely on constant accessibility. Failover automatically and transparently to the user redirects requests from the failed or down system to the backup system that mimics the operations of the primary system.
+
+Fail over
+++++++++++++
+When used as a verb, fail over is two words since there can be different tenses such as failed over.
+
+Fewer
++++++++++++++++++++
+Fewer is used with plural nouns. Think things you could count. Time, money, distance, and weight are often listed as exceptions to the traditional "can you count it" rule, often thought of a singular amounts (the work will take less than 5 hours, for example).
+
+File name
++++++++++++++
+Correct. Do not use "filename."
+
+File system
++++++++++++++++++++
+Correct. Do not use "filesystem." The system that an operating system or program uses to organize and keep track of files. For example, a hierarchical file system is one that uses directories to organize files into a tree structure. Although the operating system provides its own file management system, you can buy separate file management systems. These systems interact smoothly with the operating system but provide more features, such as improved backup procedures and stricter file protection.
+
+For instance
+++++++++++++++
+For example," instead.
+
+For further/additional/whatever information
+++++++++++++++++++++++++++++++++++++++++++++++
+Use "For more information"
+
+For this reason
+++++++++++++++++++
+Use "therefore".
+
+Forward
+++++++++++++++
+Correct. Avoid using "forwards."
+
+Gigabyte (GB)
+++++++++++++++
+2 to the 30th power (1,073,741,824) bytes. One gigabyte is equal to 1,024 megabytes. Gigabyte is often abbreviated as G or GB.
+
+Got
+++++++++++++++
+Avoid. Use "must" instead.
+
+High-availability
+++++++++++++++++++
+Correct. Do not use "high availability."
+
+Highly available
+++++++++++++++++++
+Correct. Do not use highly-available."
+
+Hostname
++++++++++++++++++
+Correct. Do not use host name.
+
+i.e.
+++++++++++++++
+Spell it out: "That is."
+
+Installer
+++++++++++++++
+Avoid. Use "installation program" instead.
+
+It's and its
+++++++++++++++
+"It's" is a contraction for "it is;" use "it is" instead of "it's." Use "its" as a possessive pronoun (for example, "the store is known for its low prices").
+
+Less
+++++++++++++
+Less is used with singular nouns. For example "View less details" wouldn't be correct but "View less detail" works. Use fewer when you have plural nouns (things you can count).
+
+Linux
+++++++++++++++
+Correct. Do not use "LINUX" or "linux" unless referring to a command, such as "To start Linux, type linux." Linux is a registered trademark of Linus Torvalds.
+
+Login
+++++++++++++++
+A noun used to refer to the login prompt, such as "At the login prompt, enter your username."
+
+Log in
+++++++++++++++
+A verb used to refer to the act of logging in. Do not use "login," "loggin," "logon," and other variants. For example, "When starting your computer, you are requested to log in..."
+
+Log on
+++++++++++++++
+To make a computer system or network recognize you so that you can begin a computer session. Most personal computers have no log-on procedure -- you just turn the machine on and begin working. For larger systems and networks, however, you usually need to enter a username and password before the computer system will allow you to execute programs.
+
+Lots of
+++++++++++++++
+Use "Several" or something equivalent instead.
+
+Make sure
+++++++++++++++
+This means "be careful to remember, attend to, or find out something." For example, "...make sure that the rhedk group is listed in the output."
+Try to use verify or ensure instead.
+
+Manual/man page
+++++++++++++++++++
+Correct. Two words. Do not use "manpage"
+
+MB
+++++++++
+(1) When spelled MB, short for megabyte (1,000,000 or 1,048,576 bytes, depending on the context).
+(2) When spelled Mb, short for megabit.
+
+MBps
+++++++++++++++
+Short for megabytes per second, a measure of data transfer speed. Mass storage devices are generally measured in MBps.
+
+MySQL
+++++++++++++++
+Common open source database server and client package. Do not use "MYSQL" or "mySQL."
+
+Need to
+++++++++++++++
+Avoid. Use "must" instead.
+
+Read-only
+++++++++++++
+Correct. Use when referring to the access permissions of files or directories.
+
+Real time/real-time
+++++++++++++++++++++++
+Depends. If used as a noun, it is the actual time during which something takes place. For example, "The computer may partly analyze the data in real time (as it comes in) -- R. H. March." If used as an adjective, "real-time" is appropriate. For example, "XEmacs is a self-documenting, customizable, extensible, real-time display editor."
+
+Refer to
+++++++++++++++
+Use to indicate a reference (within a manual or website) or a cross-reference (to another manual or documentation source).
+
+See
+++++++++++++++
+Don't use. Use "Refer to" instead.
+
+Since
+++++++++
+This is often used to mean "because", but "since" has connotations of time, so be careful. If you mean "because", say "because".
+
+Tells
+++++++++++++++
+Use "Instructs" instead.
+
+That/which
+++++++++++++++
+"That" introduces a restrictive clause-a clause that must be there for the sentence to make sense. A restrictive clause often defines the noun or phrase preceding it. "Which" introduces a non-restrictive, parenthetical clause-a clause that could be omitted without affecting the meaning of the sentence. For example: The car was travelling at a speed that would endanger lives. The car, which was traveling at a speed that would endanger lives, swerved onto the sidewalk. Use "who" or "whom," rather than "that" or "which," when referring to a person.
+
+Then/than
+++++++++++++++
+ "Then" refers to a time in the past or the next step in a sequence. "Than" is used for comparisons.
+
+.. image:: images/thenvsthan.jpg
+
+Third-party
+++++++++++++++
+Correct. Do not use "third party".
+
+Troubleshoot
+++++++++++++++
+Correct. Do not use "trouble shoot" or "trouble-shoot." To isolate the source of a problem and fix it. In the case of computer systems, the term troubleshoot is usually used when the problem is suspected to be hardware -related. If the problem is known to be in software, the term debug is more commonly used.
+
+UK
+++++++++++++++
+Correct as is, no periods.
+
+UNIX®
+++++++++++++++
+Correct. Do not use "Unix" or "unix." UNIX® is a registered trademark of The Open Group.
+
+Unset
+++++++++++++++
+Don't use. Use Clear.
+
+US
+++++++++++++++
+Correct as is, no periods.
+
+User
+++++++++++++++
+When referring to the reader, use "you" instead of "user." For example, "The user must..." is incorrect. Use "You must..." instead. If referring to more than one user, calling the collection "users" is acceptable, such as "Other users may wish to access your database."
+
+Username
+++++++++++++++
+Correct. Do not use "user name."
+
+View
+++++++++++++++
+When using as a reference ("View the documentation available online."), do not use View. Use "Refer to" instead.
+
+Within
+++++++++++++++
+Don't use to refer to a file that exists in a directory. Use "In".
+
+World Wide Web
+++++++++++++++
+Correct. Capitalize each word. Abbreviate as "WWW" or "Web."
+
+Webpage
+++++++++++++++
+Correct. Do not use "web page" or "Web page."
+
+Web server
+++++++++++++++
+Correct. Do not use "webserver". For example, "The Apache HTTP Server is the default Web server..."
+
+Website
+++++++++++++++
+Correct. Do not use "web site" or "Web site." For example, "The Ansible website contains ..."
+
+Who/whom
+++++++++++++++
+Use the pronoun "who" as a subject. Use the pronoun "whom" as a direct object, an indirect object, or the object of a preposition. For example: Who owns this? To whom does this belong?
+
+Will
+++++++++++++++
+Do not use future tense unless it is absolutely necessary. For instance, do not use the sentence, "The next section will describe the process in more detail." Instead, use the sentence, "The next section describes the process in more detail."
+
+Wish
+++++++++++++++
+Use "need" instead of "desire" and "wish." Use "want" when the reader's actions are optional (that is, they may not "need" something but may still "want" something).
+
+x86
+++++++++++++++
+Correct. Do not capitalize the "x."
+
+x86_64
+++++++++++++++
+Do not use. Do not use "Hammer". Always use "AMD64 and Intel® EM64T" when referring to this architecture.
+
+You
+++++++++++++++
+Correct. Do not use "I," "he," or "she."
+
+You may
+++++++++++++++
+Try to avoid using this. For example, "you may" can be eliminated from this sentence "You may double-click on the desktop..."
+
diff --git a/docs/docsite/rst/dev_guide/style_guide/trademarks.rst b/docs/docsite/rst/dev_guide/style_guide/trademarks.rst
new file mode 100644
index 00000000..266f16bd
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/style_guide/trademarks.rst
@@ -0,0 +1,96 @@
+
+Trademark Usage
+``````````````````````````````````````
+Why is it important to use the TM, SM, and ® for our registered marks?
+
+Before a trademark is registered with the United States Patent and Trademark Office it is appropriate to use the TM or SM symbol depending whether the product is for goods or services. It is important to use the TM or SM as it is notification to the public that Ansible claims rights to the mark even though it has not yet been registered.
+
+Once the trademark is registered, it is appropriate to use the symbol in place of the TM or SM. The symbol designation must be used in conjunction with the trademark if Ansible is to fully protect its rights. If we don't protect these marks, we run the risk of losing them in the way of Aspirin or Trampoline or Escalator.
+
+General Rules:
++++++++++++++++
+
+Trademarks should be used on 1st references on a page or within a section.
+
+Use Red Hat® Ansible Tower® or Ansible®, on first reference when referring to products.
+
+Use "Ansible" alone as the company name, as in "Ansible announced quarterly results," which is not marked.
+
+Also add the trademark disclaimer.
+* When using Ansible trademarks in the body of written text, you should use the following credit line in a prominent place, usually a footnote.
+
+ For Registered Trademarks:
+ - [Name of Trademark] is a registered trademark of Red Hat, Inc. in the United States and other countries.
+
+ For Unregistered Trademarks (TMs/SMs):
+ - [Name of Trademark] is a trademark of Red Hat, Inc. in the United States and other countries.
+
+ For registered and unregistered trademarks:
+ - [Name of Trademark] is a registered trademark and [Name of Trademark] is a trademark of Red Hat, Inc. in the United States and other countries.
+
+Guidelines for the proper use of trademarks:
++++++++++++++++++++++++++++++++++++++++++++++
+
+ Always distinguish trademarks from surround text with at least initial capital letters or in all capital letters.
+
+Always use proper trademark form and spelling.
+
+Never use a trademark as a noun. Always use a trademark as an adjective modifying the noun.
+
+ Correct:
+ Red Hat® Ansible Tower® system performance is incredible.
+
+ Incorrect:
+ Ansible's performance is incredible.
+
+Never use a trademark as a verb. Trademarks are products or services, never actions.
+
+ Correct:
+ "Orchestrate your entire network using Red Hat® Ansible Tower®."
+
+ Incorrect:
+ "Ansible your entire network."
+
+Never modify a trademark to a plural form. Instead, change the generic word from the singular to the plural.
+
+ Correct:
+ "Corporate demand for Red Hat® Ansible Tower® configuration software is surging."
+
+ Incorrect:
+ "Corporate demand for Ansible is surging."
+
+Never modify a trademark from its possessive form, or make a trademark possessive. Always use it in the form it has been registered.
+
+Never translate a trademark into another language.
+
+Never use trademarks to coin new words or names.
+
+Never use trademarks to create a play on words.
+
+Never alter a trademark in any way including through unapproved fonts or visual identifiers.
+
+Never abbreviate or use any Ansible trademarks as an acronym.
+
+The importance of Ansible trademarks
+++++++++++++++++++++++++++++++++++++++++++++++++
+
+The Ansible trademark and the "A" logo in a shaded circle are our most valuable assets. The value of these trademarks encompass the Ansible Brand. Effective trademark use is more than just a name, it defines the level of quality the customer will receive and it ties a product or service to a corporate image. A trademark may serve as the basis for many of our everyday decisions and choices. The Ansible Brand is about how we treat customers and each other. In order to continue to build a stronger more valuable Brand we must use it in a clear and consistent manner.
+
+The mark consists of the letter "A" in a shaded circle. As of 5/11/15, this was a pending trademark (registration in process).
+
+Common Ansible Trademarks
++++++++++++++++++++++++++++++++++++++++
+* Ansible®
+* Ansible Tower®
+
+Other Common Trademarks and Resource Sites:
+++++++++++++++++++++++++++++++++++++++++++++++++
+- Linux is a registered trademark of Linus Torvalds.
+- UNIX® is a registered trademark of The Open Group.
+- Microsoft, Windows, Vista, XP, and NT are registered trademarks or trademarks of Microsoft Corporation in the United States and/or other countries. https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/en-us.aspx
+- Apple, Mac, Mac OS, Macintosh, Pages and TrueType are either registered trademarks or trademarks of Apple Computer, Inc. in the United States and/or other countries. https://www.apple.com/legal/intellectual-property/trademark/appletmlist.html
+- Adobe, Acrobat, GoLive, InDesign, Illustrator, PostScript , PhotoShop and the OpenType logo are either registered trademarks or trademarks of Adobe Systems Incorporated in the United States and/or other countries. https://www.adobe.com/legal/permissions/trademarks.html
+- Macromedia and Macromedia Flash are trademarks of Macromedia, Inc. https://www.adobe.com/legal/permissions/trademarks.html
+- IBM is a registered trademark of International Business Machines Corporation. https://www.ibm.com/legal/us/en/copytrade.shtml
+- Celeron, Celeron Inside, Centrino, Centrino logo, Core Inside, Intel Core, Intel Inside, Intel Inside logo, Itanium, Itanium Inside, Pentium, Pentium Inside,VTune, Xeon, and Xeon Inside are trademarks or registered trademarks of Intel Corporation or its subsidiaries in the United States and other countries. https://www.intel.com/content/www/us/en/legal/trademarks.html
+
diff --git a/docs/docsite/rst/dev_guide/style_guide/voice_style.rst b/docs/docsite/rst/dev_guide/style_guide/voice_style.rst
new file mode 100644
index 00000000..0dff7a87
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/style_guide/voice_style.rst
@@ -0,0 +1,20 @@
+
+Voice Style
+`````````````````````
+The essence of the Ansible writing style is short sentences that flow naturally together. Mix up sentence structures. Vary sentence subjects. Address the reader directly. Ask a question. And when the reader adjusts to the pace of shorter sentences, write a longer one.
+
+- Write how real people speak...
+- ...but try to avoid slang and colloquialisms that might not translate well into other languages.
+- Say big things with small words.
+- Be direct. Tell the reader exactly what you want them to do.
+- Be honest.
+- Short sentences show confidence.
+- Grammar rules are meant to be bent, but only if the reader knows you are doing this.
+- Choose words with fewer syllables for faster reading and better understanding.
+- Think of copy as one-on-one conversations rather than as a speech. It's more difficult to ignore someone who is speaking to you directly.
+- When possible, start task-oriented sentences (those that direct a user to do something) with action words. For example: Find software... Contact support... Install the media.... and so forth.
+
+Active Voice
+------------------
+Use the active voice ("Start Linuxconf by typing...") rather than passive ("Linuxconf can be started by typing...") whenever possible. Active voice makes for more lively, interesting reading.
+Also avoid future tense (or using the term "will") whenever possible For example, future tense ("The screen will display...") does not read as well as an active voice ("The screen displays"). Remember, the users you are writing for most often refer to the documentation while they are using the system, not after or in advance of using the system.
diff --git a/docs/docsite/rst/dev_guide/style_guide/why_use.rst b/docs/docsite/rst/dev_guide/style_guide/why_use.rst
new file mode 100644
index 00000000..0c1bf51a
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/style_guide/why_use.rst
@@ -0,0 +1,23 @@
+:orphan:
+
+Why Use a Style Guide?
+`````````````````````````````````
+
+Style guides are important because they ensure consistency in the content, look, and feel of a book or a website.
+
+Remember, a style guide is only useful if it is used, updated, and enforced. Style Guides are useful for engineering-related documentation, sales and marketing materials, support docs, community contributions, and more.
+
+As changes are made to the overall Ansible site design, be sure to update this style guide with those changes. Or, should other resources listed below have major revisions, consider including company information here for ease of reference.
+
+This style guide incorporates current Ansible resources and information so that overall site and documentation consistency can be met.
+
+.. raw:: html
+
+ <blockquote class="note info">
+
+ "If you don't find it in the index, look very carefully through the entire catalogue."
+ ― Sears, Roebuck and Co., 1897 Sears Roebuck & Co. Catalogue
+
+.. raw:: html
+
+ </blockquote>
diff --git a/docs/docsite/rst/dev_guide/testing.rst b/docs/docsite/rst/dev_guide/testing.rst
new file mode 100644
index 00000000..763f1672
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing.rst
@@ -0,0 +1,243 @@
+.. _developing_testing:
+
+***************
+Testing Ansible
+***************
+
+.. contents::
+ :local:
+
+
+Why test your Ansible contributions?
+====================================
+
+If you're a developer, one of the most valuable things you can do is to look at GitHub issues and help fix bugs, since bug-fixing is almost always prioritized over feature development. Even for non-developers, helping to test pull requests for bug fixes and features is still immensely valuable.
+
+Ansible users who understand how to write playbooks and roles should be able to test their work. GitHub pull requests will automatically run a variety of tests (for example, Shippable) that show bugs in action. However, contributors must also test their work outside of the automated GitHub checks and show evidence of these tests in the PR to ensure that their work will be more likely to be reviewed and merged.
+
+Read on to learn how Ansible is tested, how to test your contributions locally, and how to extend testing capabilities.
+
+If you want to learn about testing collections, read :ref:`testing_collections`
+
+
+
+Types of tests
+==============
+
+At a high level we have the following classifications of tests:
+
+:compile:
+ * :ref:`testing_compile`
+ * Test python code against a variety of Python versions.
+:sanity:
+ * :ref:`testing_sanity`
+ * Sanity tests are made up of scripts and tools used to perform static code analysis.
+ * The primary purpose of these tests is to enforce Ansible coding standards and requirements.
+:integration:
+ * :ref:`testing_integration`
+ * Functional tests of modules and Ansible core functionality.
+:units:
+ * :ref:`testing_units`
+ * Tests directly against individual parts of the code base.
+
+
+If you're a developer, one of the most valuable things you can do is look at the GitHub
+issues list and help fix bugs. We almost always prioritize bug fixing over feature
+development.
+
+Even for non developers, helping to test pull requests for bug fixes and features is still
+immensely valuable. Ansible users who understand writing playbooks and roles should be
+able to add integration tests and so GitHub pull requests with integration tests that show
+bugs in action will also be a great way to help.
+
+
+Testing within GitHub & Shippable
+=================================
+
+
+Organization
+------------
+
+When Pull Requests (PRs) are created they are tested using Shippable, a Continuous Integration (CI) tool. Results are shown at the end of every PR.
+
+When Shippable detects an error and it can be linked back to a file that has been modified in the PR then the relevant lines will be added as a GitHub comment. For example::
+
+ The test `ansible-test sanity --test pep8` failed with the following errors:
+
+ lib/ansible/modules/network/foo/bar.py:509:17: E265 block comment should start with '# '
+
+ The test `ansible-test sanity --test validate-modules` failed with the following error:
+ lib/ansible/modules/network/foo/bar.py:0:0: E307 version_added should be 2.4. Currently 2.3
+
+From the above example we can see that ``--test pep8`` and ``--test validate-modules`` have identified an issue. The commands given allow you to run the same tests locally to ensure you've fixed all issues without having to push your changes to GitHub and wait for Shippable, for example:
+
+If you haven't already got Ansible available, use the local checkout by running::
+
+ source hacking/env-setup
+
+Then run the tests detailed in the GitHub comment::
+
+ ansible-test sanity --test pep8
+ ansible-test sanity --test validate-modules
+
+If there isn't a GitHub comment stating what's failed you can inspect the results by clicking on the "Details" button under the "checks have failed" message at the end of the PR.
+
+Rerunning a failing CI job
+--------------------------
+
+Occasionally you may find your PR fails due to a reason unrelated to your change. This could happen for several reasons, including:
+
+* a temporary issue accessing an external resource, such as a yum or git repo
+* a timeout creating a virtual machine to run the tests on
+
+If either of these issues appear to be the case, you can rerun the Shippable test by:
+
+* adding a comment with ``/rebuild`` (full rebuild) or ``/rebuild_failed`` (rebuild only failed CI nodes) to the PR
+* closing and re-opening the PR (full rebuild)
+* making another change to the PR and pushing to GitHub
+
+If the issue persists, please contact us in ``#ansible-devel`` on Freenode IRC.
+
+
+How to test a PR
+================
+
+Ideally, code should add tests that prove that the code works. That's not always possible and tests are not always comprehensive, especially when a user doesn't have access to a wide variety of platforms, or is using an API or web service. In these cases, live testing against real equipment can be more valuable than automation that runs against simulated interfaces. In any case, things should always be tested manually the first time as well.
+
+Thankfully, helping to test Ansible is pretty straightforward, assuming you are familiar with how Ansible works.
+
+Setup: Checking out a Pull Request
+----------------------------------
+
+You can do this by:
+
+* checking out Ansible
+* fetching the proposed changes into a test branch
+* testing
+* commenting on that particular issue on GitHub
+
+Here's how:
+
+.. warning::
+ Testing source code from GitHub pull requests sent to us does have some inherent risk, as the source code
+ sent may have mistakes or malicious code that could have a negative impact on your system. We recommend
+ doing all testing on a virtual machine, whether a cloud instance, or locally. Some users like Vagrant
+ or Docker for this, but they are optional. It is also useful to have virtual machines of different Linux or
+ other flavors, since some features (for example, package managers such as apt or yum) are specific to those OS versions.
+
+
+Create a fresh area to work::
+
+
+ git clone https://github.com/ansible/ansible.git ansible-pr-testing
+ cd ansible-pr-testing
+
+Next, find the pull request you'd like to test and make note of its number. It will look something like this::
+
+ Use os.path.sep instead of hardcoding / #65381
+
+.. note:: Only test ``ansible:devel``
+
+ It is important that the PR request target be ``ansible:devel``, as we do not accept pull requests into any other branch. Dot releases are cherry-picked manually by Ansible staff.
+
+Use the pull request number when you fetch the proposed changes and create your branch for testing::
+
+ git fetch origin refs/pull/XXXX/head:testing_PRXXXX
+ git checkout testing_PRXXXX
+
+The first command fetches the proposed changes from the pull request and creates a new branch named ``testing_PRXXXX``, where the XXXX is the actual number associated with the pull request (for example, 65381). The second command checks out the newly created branch.
+
+.. note::
+ If the GitHub user interface shows that the pull request will not merge cleanly, we do not recommend proceeding if you are not somewhat familiar with git and coding, as you will have to resolve a merge conflict. This is the responsibility of the original pull request contributor.
+
+.. note::
+ Some users do not create feature branches, which can cause problems when they have multiple, unrelated commits in their version of ``devel``. If the source looks like ``someuser:devel``, make sure there is only one commit listed on the pull request.
+
+The Ansible source includes a script that allows you to use Ansible directly from source without requiring a
+full installation that is frequently used by developers on Ansible.
+
+Simply source it (to use the Linux/Unix terminology) to begin using it immediately::
+
+ source ./hacking/env-setup
+
+This script modifies the ``PYTHONPATH`` environment variables (along with a few other things), which will be temporarily
+set as long as your shell session is open.
+
+Testing the Pull Request
+------------------------
+
+At this point, you should be ready to begin testing!
+
+Some ideas of what to test are:
+
+* Create a test Playbook with the examples in and check if they function correctly
+* Test to see if any Python backtraces returned (that's a bug)
+* Test on different operating systems, or against different library versions
+
+Run sanity tests
+````````````````
+
+.. code:: shell
+
+ ansible-test sanity
+
+More information: :ref:`testing_sanity`
+
+Run unit tests
+``````````````
+
+.. code:: shell
+
+ ansible-test units
+
+More information: :ref:`testing_units`
+
+Run integration tests
+`````````````````````
+
+.. code:: shell
+
+ ansible-test integration -v ping
+
+More information: :ref:`testing_integration`
+
+Any potential issues should be added as comments on the pull request (and it's acceptable to comment if the feature works as well), remembering to include the output of ``ansible --version``
+
+Example::
+
+ Works for me! Tested on `Ansible 2.3.0`. I verified this on CentOS 6.5 and also Ubuntu 14.04.
+
+If the PR does not resolve the issue, or if you see any failures from the unit/integration tests, just include that output instead:
+
+ | This change causes errors for me.
+ |
+ | When I ran this Ubuntu 16.04 it failed with the following:
+ |
+ | \```
+ | some output
+ | StackTrace
+ | some other output
+ | \```
+
+Code Coverage Online
+````````````````````
+
+`The online code coverage reports <https://codecov.io/gh/ansible/ansible>`_ are a good way
+to identify areas for testing improvement in Ansible. By following red colors you can
+drill down through the reports to find files which have no tests at all. Adding both
+integration and unit tests which show clearly how code should work, verify important
+Ansible functions and increase testing coverage in areas where there is none is a valuable
+way to help improve Ansible.
+
+The code coverage reports only cover the ``devel`` branch of Ansible where new feature
+development takes place. Pull requests and new code will be missing from the codecov.io
+coverage reports so local reporting is needed. Most ``ansible-test`` commands allow you
+to collect code coverage, this is particularly useful to indicate where to extend
+testing. See :ref:`testing_running_locally` for more information.
+
+
+Want to know more about testing?
+================================
+
+If you'd like to know more about the plans for improving testing Ansible then why not join the
+`Testing Working Group <https://github.com/ansible/community/blob/master/meetings/README.md>`_.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/action-plugin-docs.rst b/docs/docsite/rst/dev_guide/testing/sanity/action-plugin-docs.rst
new file mode 100644
index 00000000..e3a5d8b8
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/action-plugin-docs.rst
@@ -0,0 +1,4 @@
+action-plugin-docs
+==================
+
+Each action plugin should have a matching module of the same name to provide documentation.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/ansible-doc.rst b/docs/docsite/rst/dev_guide/testing/sanity/ansible-doc.rst
new file mode 100644
index 00000000..9f2c4f5f
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/ansible-doc.rst
@@ -0,0 +1,4 @@
+ansible-doc
+===========
+
+Verifies that ``ansible-doc`` can parse module documentation on all supported Python versions.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/ansible-var-precedence-check.rst b/docs/docsite/rst/dev_guide/testing/sanity/ansible-var-precedence-check.rst
new file mode 100644
index 00000000..1906886f
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/ansible-var-precedence-check.rst
@@ -0,0 +1,6 @@
+:orphan:
+
+ansible-var-precedence-check
+============================
+
+Check the order of precedence for Ansible variables against :ref:`ansible_variable_precedence`.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/azure-requirements.rst b/docs/docsite/rst/dev_guide/testing/sanity/azure-requirements.rst
new file mode 100644
index 00000000..5e0cc044
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/azure-requirements.rst
@@ -0,0 +1,10 @@
+:orphan:
+
+azure-requirements
+==================
+
+Update the Azure integration test requirements file when changes are made to the Azure packaging requirements file:
+
+.. code-block:: bash
+
+ cp packaging/requirements/requirements-azure.txt test/lib/ansible_test/_data/requirements/integration.cloud.azure.txt
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/bin-symlinks.rst b/docs/docsite/rst/dev_guide/testing/sanity/bin-symlinks.rst
new file mode 100644
index 00000000..dcec7ed3
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/bin-symlinks.rst
@@ -0,0 +1,11 @@
+bin-symlinks
+============
+
+The ``bin/`` directory in Ansible must contain only symbolic links to executable files.
+These files must reside in the ``lib/ansible/`` or ``test/lib/ansible_test/`` directories.
+
+This is required to allow ``ansible-test`` to work with containers and remote hosts when running from an installed version of Ansible.
+
+Symlinks for each entry point in ``bin/`` must also be present in ``test/lib/ansible_test/_data/injector/``.
+Each symlink should point to the ``python.py`` script in the same directory.
+This facilitates running with the correct Python interpreter and enabling code coverage.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/boilerplate.rst b/docs/docsite/rst/dev_guide/testing/sanity/boilerplate.rst
new file mode 100644
index 00000000..51c0c089
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/boilerplate.rst
@@ -0,0 +1,11 @@
+:orphan:
+
+boilerplate
+===========
+
+Most Python files should include the following boilerplate:
+
+.. code-block:: python
+
+ from __future__ import (absolute_import, division, print_function)
+ __metaclass__ = type
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/botmeta.rst b/docs/docsite/rst/dev_guide/testing/sanity/botmeta.rst
new file mode 100644
index 00000000..639bb0bf
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/botmeta.rst
@@ -0,0 +1,4 @@
+botmeta
+=======
+
+Verifies that ``./github/BOTMETA.yml`` is valid.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/changelog.rst b/docs/docsite/rst/dev_guide/testing/sanity/changelog.rst
new file mode 100644
index 00000000..8cb53329
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/changelog.rst
@@ -0,0 +1,17 @@
+changelog
+=========
+
+Basic linting of changelog fragments with `antsibull-changelog lint <https://pypi.org/project/antsibull-changelog/>`_.
+
+One or more of the following sections are required:
+
+- major_changes
+- minor_changes
+- breaking_changes
+- deprecated_features
+- removed_features
+- security_fixes
+- bugfixes
+- known_issues
+
+New modules and plugins must not be included in changelog fragments.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/compile.rst b/docs/docsite/rst/dev_guide/testing/sanity/compile.rst
new file mode 100644
index 00000000..222f94e4
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/compile.rst
@@ -0,0 +1,4 @@
+compile
+=======
+
+See :ref:`testing_compile` for more information.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/configure-remoting-ps1.rst b/docs/docsite/rst/dev_guide/testing/sanity/configure-remoting-ps1.rst
new file mode 100644
index 00000000..e83bc78d
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/configure-remoting-ps1.rst
@@ -0,0 +1,5 @@
+configure-remoting-ps1
+======================
+
+The file ``examples/scripts/ConfigureRemotingForAnsible.ps1`` is required and must be a regular file.
+It is used by external automated processes and cannot be moved, renamed or replaced with a symbolic link.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/deprecated-config.rst b/docs/docsite/rst/dev_guide/testing/sanity/deprecated-config.rst
new file mode 100644
index 00000000..950805a2
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/deprecated-config.rst
@@ -0,0 +1,6 @@
+:orphan:
+
+deprecated-config
+=================
+
+``DOCUMENTATION`` config is scheduled for removal
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/docs-build.rst b/docs/docsite/rst/dev_guide/testing/sanity/docs-build.rst
new file mode 100644
index 00000000..23f3c552
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/docs-build.rst
@@ -0,0 +1,4 @@
+docs-build
+==========
+
+Verifies that ``make singlehtmldocs`` in ``docs/docsite/`` completes without errors.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/empty-init.rst b/docs/docsite/rst/dev_guide/testing/sanity/empty-init.rst
new file mode 100644
index 00000000..e87bb71e
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/empty-init.rst
@@ -0,0 +1,10 @@
+empty-init
+==========
+
+The ``__init__.py`` files under the following directories must be empty. For some of these (modules
+and tests), ``__init__.py`` files with code won't be used. For others (module_utils), we want the
+possibility of using Python namespaces which an empty ``__init__.py`` will allow for.
+
+- ``lib/ansible/modules/``
+- ``lib/ansible/module_utils/``
+- ``test/units/``
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/future-import-boilerplate.rst b/docs/docsite/rst/dev_guide/testing/sanity/future-import-boilerplate.rst
new file mode 100644
index 00000000..9d150e1f
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/future-import-boilerplate.rst
@@ -0,0 +1,51 @@
+future-import-boilerplate
+=========================
+
+Most Python files should include the following boilerplate at the top of the file, right after the
+comment header:
+
+.. code-block:: python
+
+ from __future__ import (absolute_import, division, print_function)
+
+This uses Python 3 semantics for absolute vs relative imports, division, and print. By doing this,
+we can write code which is portable between Python 2 and Python 3 by following the Python 3 semantics.
+
+
+absolute_import
+---------------
+
+When Python 2 encounters an import of a name in a file like ``import copy`` it attempts to load
+``copy.py`` from the same directory as the file is in. This can cause problems if there is a python
+file of that name in the directory and also a python module in ``sys.path`` with that same name. In
+that case, Python 2 would load the one in the same directory and there would be no way to load the
+one on ``sys.path``. Python 3 fixes this by making imports absolute by default. ``import copy``
+will find ``copy.py`` from ``sys.path``. If you want to import ``copy.py`` from the same directory,
+the code needs to be changed to perform a relative import: ``from . import copy``.
+
+.. seealso::
+
+ * `Absolute and relative imports <https://www.python.org/dev/peps/pep-0328>`_
+
+division
+--------
+
+In Python 2, the division operator (``/``) returns integer values when used with integers. If there
+was a remainder, this part would be left off (aka, `floor division`). In Python 3, the division
+operator (``/``) always returns a floating point number. Code that needs to calculate the integer
+portion of the quotient needs to switch to using the floor division operator (`//`) instead.
+
+.. seealso::
+
+ * `Changing the division operator <https://www.python.org/dev/peps/pep-0238>`_
+
+print_function
+--------------
+
+In Python 2, :func:`python:print` is a keyword. In Python 3, :func:`python3:print` is a function with different
+parameters. Using this ``__future__`` allows using the Python 3 print semantics everywhere.
+
+.. seealso::
+
+ * `Make print a function <https://www.python.org/dev/peps/pep-3105>`_
+
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/ignores.rst b/docs/docsite/rst/dev_guide/testing/sanity/ignores.rst
new file mode 100644
index 00000000..9d7a94c0
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/ignores.rst
@@ -0,0 +1,99 @@
+ignores
+=======
+
+Sanity tests for individual files can be skipped, and specific errors can be ignored.
+
+When to Ignore Errors
+---------------------
+
+Sanity tests are designed to improve code quality and identify common issues with content.
+When issues are identified during development, those issues should be corrected.
+
+As development of Ansible continues, sanity tests are expanded to detect issues that previous releases could not.
+To allow time for existing content to be updated to pass newer tests, ignore entries can be added.
+New content should not use ignores for existing sanity tests.
+
+When code is fixed to resolve sanity test errors, any relevant ignores must also be removed.
+If the ignores are not removed, this will be reported as an unnecessary ignore error.
+This is intended to prevent future regressions due to the same error recurring after being fixed.
+
+When to Skip Tests
+------------------
+
+Although rare, there are reasons for skipping a sanity test instead of ignoring the errors it reports.
+
+If a sanity test results in a traceback when processing content, that error cannot be ignored.
+If this occurs, open a new `bug report <https://github.com/ansible/ansible/issues/new?template=bug_report.md>`_ for the issue so it can be fixed.
+If the traceback occurs due to an issue with the content, that issue should be fixed.
+If the content is correct, the test will need to be skipped until the bug in the sanity test is fixed.
+
+ Caution should be used when skipping sanity tests instead of ignoring them.
+ Since the test is skipped entirely, resolution of the issue will not be automatically detected.
+ This will prevent prevent regression detection from working once the issue has been resolved.
+ For this reason it is a good idea to periodically review skipped entries manually to verify they are required.
+
+Ignore File Location
+--------------------
+
+The location of the ignore file depends on the type of content being tested.
+
+Ansible Collections
+~~~~~~~~~~~~~~~~~~~
+
+Since sanity tests change between Ansible releases, a separate ignore file is needed for each Ansible major release.
+
+The filename is ``tests/sanity/ignore-X.Y.txt`` where ``X.Y`` is the Ansible release being used to test the collection.
+
+Maintaining a separate file for each Ansible release allows a collection to pass tests for multiple versions of Ansible.
+
+Ansible
+~~~~~~~
+
+When testing Ansible, all ignores are placed in the ``test/sanity/ignore.txt`` file.
+
+Only a single file is needed because ``ansible-test`` is developed and released as a part of Ansible itself.
+
+Ignore File Format
+------------------
+
+The ignore file contains one entry per line.
+Each line consists of two columns, separated by a single space.
+Comments may be added at the end of an entry, started with a hash (``#``) character, which can be proceeded by zero or more spaces.
+Blank and comment only lines are not allowed.
+
+The first column specifies the file path that the entry applies to.
+File paths must be relative to the root of the content being tested.
+This is either the Ansible source or an Ansible collection.
+File paths cannot contain a space or the hash (``#``) character.
+
+The second column specifies the sanity test that the entry applies to.
+This will be the name of the sanity test.
+If the sanity test is specific to a version of Python, the name will include a dash (``-``) and the relevant Python version.
+If the named test uses error codes then the error code to ignore must be appended to the name of the test, separated by a colon (``:``).
+
+Below are some example ignore entries for an Ansible collection::
+
+ roles/my_role/files/my_script.sh shellcheck:SC2154 # ignore undefined variable
+ plugins/modules/my_module.py validate-modules:E105 # ignore license check
+ plugins/modules/my_module.py import-3.8 # needs update to support collections.abc on Python 3.8+
+
+It is also possible to skip a sanity test for a specific file.
+This is done by adding ``!skip`` after the sanity test name in the second column.
+When this is done, no error code is included, even if the sanity test uses error codes.
+
+Below are some example skip entries for an Ansible collection::
+
+ plugins/module_utils/my_util.py validate-modules!skip # waiting for bug fix in module validator
+ plugins/lookup/my_plugin.py compile-2.6!skip # Python 2.6 is not supported on the controller
+
+Ignore File Errors
+------------------
+
+There are various errors that can be reported for the ignore file itself:
+
+- syntax errors parsing the ignore file
+- references a file path that does not exist
+- references to a sanity test that does not exist
+- ignoring an error that does not occur
+- ignoring a file which is skipped
+- duplicate entries
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/import.rst b/docs/docsite/rst/dev_guide/testing/sanity/import.rst
new file mode 100644
index 00000000..4b29636a
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/import.rst
@@ -0,0 +1,5 @@
+import
+======
+
+All Python imports in ``lib/ansible/modules/`` and ``lib/ansible/module_utils/`` which are not from the Python standard library
+must be imported in a try/except ImportError block.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/integration-aliases.rst b/docs/docsite/rst/dev_guide/testing/sanity/integration-aliases.rst
new file mode 100644
index 00000000..e6cc1e91
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/integration-aliases.rst
@@ -0,0 +1,182 @@
+integration-aliases
+===================
+
+Integration tests are executed by ``ansible-test`` and reside in directories under ``test/integration/targets/``.
+Each test MUST have an ``aliases`` file to control test execution.
+
+Aliases are explained in the following sections. Each alias must be on a separate line in an ``aliases`` file.
+
+Groups
+------
+
+Tests must be configured to run in exactly one group. This is done by adding the appropriate group to the ``aliases`` file.
+
+The following are examples of some of the available groups:
+
+- ``shippable/posix/group1``
+- ``shippable/windows/group2``
+- ``shippable/azure/group3``
+- ``shippable/aws/group1``
+- ``shippable/cloud/group1``
+
+Groups are used to balance tests across multiple CI jobs to minimize test run time.
+They also improve efficiency by keeping tests with similar requirements running together.
+
+When selecting a group for a new test, use the same group as existing tests similar to the one being added.
+If more than one group is available, select one randomly.
+
+Setup
+-----
+
+Aliases can be used to execute setup targets before running tests:
+
+- ``setup/once/TARGET`` - Run the target ``TARGET`` before the first target that requires it.
+- ``setup/always/TARGET`` - Run the target ``TARGET`` before each target that requires it.
+
+Requirements
+------------
+
+Aliases can be used to express some test requirements:
+
+- ``needs/privileged`` - Requires ``--docker-privileged`` when running tests with ``--docker``.
+- ``needs/root`` - Requires running tests as ``root`` or with ``--docker``.
+- ``needs/ssh`` - Requires SSH connections to localhost (or the test container with ``--docker``) without a password.
+- ``needs/httptester`` - Requires use of the http-test-container to run tests.
+
+Dependencies
+------------
+
+Some test dependencies are automatically discovered:
+
+- Ansible role dependencies defined in ``meta/main.yml`` files.
+- Setup targets defined with ``setup/*`` aliases.
+- Symbolic links from one target to a file in another target.
+
+Aliases can be used to declare dependencies that are not handled automatically:
+
+- ``needs/target/TARGET`` - Requires use of the test target ``TARGET``.
+- ``needs/file/PATH`` - Requires use of the file ``PATH`` relative to the git root.
+
+Skipping
+--------
+
+Aliases can be used to skip platforms using one of the following:
+
+- ``skip/freebsd`` - Skip tests on FreeBSD.
+- ``skip/osx`` - Skip tests on macOS.
+- ``skip/rhel`` - Skip tests on RHEL.
+- ``skip/docker`` - Skip tests when running in a Docker container.
+
+Platform versions, as specified using the ``--remote`` option with ``/`` removed, can also be skipped:
+
+- ``skip/freebsd11.1`` - Skip tests on FreeBSD 11.1.
+- ``skip/rhel7.6`` - Skip tests on RHEL 7.6.
+
+Windows versions, as specified using the ``--windows`` option can also be skipped:
+
+- ``skip/windows/2008`` - Skip tests on Windows Server 2008.
+- ``skip/windows/2012-R2`` - Skip tests on Windows Server 2012 R2.
+
+Aliases can be used to skip Python major versions using one of the following:
+
+- ``skip/python2`` - Skip tests on Python 2.x.
+- ``skip/python3`` - Skip tests on Python 3.x.
+
+For more fine grained skipping, use conditionals in integration test playbooks, such as:
+
+.. code-block:: yaml
+
+ when: ansible_distribution in ('Ubuntu')
+
+
+Miscellaneous
+-------------
+
+There are several other aliases available as well:
+
+- ``destructive`` - Requires ``--allow-destructive`` to run without ``--docker`` or ``--remote``.
+- ``hidden`` - Target is ignored. Usable as a dependency. Automatic for ``setup_`` and ``prepare_`` prefixed targets.
+
+Unstable
+--------
+
+Tests which fail sometimes should be marked with the ``unstable`` alias until the instability has been fixed.
+These tests will continue to run for pull requests which modify the test or the module under test.
+
+This avoids unnecessary test failures for other pull requests, as well as tests on merge runs and nightly CI jobs.
+
+There are two ways to run unstable tests manually:
+
+- Use the ``--allow-unstable`` option for ``ansible-test``
+- Prefix the test name with ``unstable/`` when passing it to ``ansible-test``.
+
+Tests will be marked as unstable by a member of the Ansible Core Team.
+GitHub issues_ will be created to track each unstable test.
+
+Disabled
+--------
+
+Tests which always fail should be marked with the ``disabled`` alias until they can be fixed.
+
+Disabled tests are automatically skipped.
+
+There are two ways to run disabled tests manually:
+
+- Use the ``--allow-disabled`` option for ``ansible-test``
+- Prefix the test name with ``disabled/`` when passing it to ``ansible-test``.
+
+Tests will be marked as disabled by a member of the Ansible Core Team.
+GitHub issues_ will be created to track each disabled test.
+
+Unsupported
+-----------
+
+Tests which cannot be run in CI should be marked with the ``unsupported`` alias.
+Most tests can be supported through the use of simulators and/or cloud plugins.
+
+However, if that is not possible then marking a test as unsupported will prevent it from running in CI.
+
+There are two ways to run unsupported tests manually:
+
+* Use the ``--allow-unsupported`` option for ``ansible-test``
+* Prefix the test name with ``unsupported/`` when passing it to ``ansible-test``.
+
+Tests will be marked as unsupported by the contributor of the test.
+
+Cloud
+-----
+
+Tests for cloud services and other modules that require access to external APIs usually require special support for testing in CI.
+
+These require an additional alias to indicate the required test plugin.
+
+Some of the available aliases are:
+
+- ``cloud/aws``
+- ``cloud/azure``
+- ``cloud/cs``
+- ``cloud/foreman``
+- ``cloud/openshift``
+- ``cloud/tower``
+- ``cloud/vcenter``
+
+Untested
+--------
+
+Every module and plugin should have integration tests, even if the tests cannot be run in CI.
+
+Issues
+------
+
+Tests that are marked as unstable_ or disabled_ will have an issue created to track the status of the test.
+Each issue will be assigned to one of the following projects:
+
+- `AWS <https://github.com/ansible/ansible/projects/21>`_
+- `Azure <https://github.com/ansible/ansible/projects/22>`_
+- `Windows <https://github.com/ansible/ansible/projects/23>`_
+- `General <https://github.com/ansible/ansible/projects/25>`_
+
+Questions
+---------
+
+For questions about integration tests reach out to @mattclay or @gundalow on GitHub or ``#ansible-devel`` on IRC.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/line-endings.rst b/docs/docsite/rst/dev_guide/testing/sanity/line-endings.rst
new file mode 100644
index 00000000..d56cfc12
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/line-endings.rst
@@ -0,0 +1,4 @@
+line-endings
+============
+
+All files must use ``\n`` for line endings instead of ``\r\n``.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/metaclass-boilerplate.rst b/docs/docsite/rst/dev_guide/testing/sanity/metaclass-boilerplate.rst
new file mode 100644
index 00000000..c7327b39
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/metaclass-boilerplate.rst
@@ -0,0 +1,23 @@
+metaclass-boilerplate
+=====================
+
+Most Python files should include the following boilerplate at the top of the file, right after the
+comment header and ``from __future__ import``:
+
+.. code-block:: python
+
+ __metaclass__ = type
+
+
+Python 2 has "new-style classes" and "old-style classes" whereas Python 3 only has new-style classes.
+Adding the ``__metaclass__ = type`` boilerplate makes every class defined in that file into
+a new-style class as well.
+
+.. code-block:: python
+
+ from __future__ import absolute_import, division, print_function
+ __metaclass__ = type
+
+ class Foo:
+ # This is a new-style class even on Python 2 because of the __metaclass__
+ pass
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/no-assert.rst b/docs/docsite/rst/dev_guide/testing/sanity/no-assert.rst
new file mode 100644
index 00000000..489f917f
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/no-assert.rst
@@ -0,0 +1,16 @@
+no-assert
+=========
+
+Do not use ``assert`` in production Ansible python code. When running Python
+with optimizations, Python will remove ``assert`` statements, potentially
+allowing for unexpected behavior throughout the Ansible code base.
+
+Instead of using ``assert`` you should utilize simple ``if`` statements,
+that result in raising an exception. There is a new exception called
+``AnsibleAssertionError`` that inherits from ``AnsibleError`` and
+``AssertionError``. When possible, utilize a more specific exception
+than ``AnsibleAssertionError``.
+
+Modules will not have access to ``AnsibleAssertionError`` and should instead
+raise ``AssertionError``, a more specific exception, or just use
+``module.fail_json`` at the failure point.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/no-basestring.rst b/docs/docsite/rst/dev_guide/testing/sanity/no-basestring.rst
new file mode 100644
index 00000000..f1b6ba92
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/no-basestring.rst
@@ -0,0 +1,11 @@
+no-basestring
+=============
+
+Do not use ``isinstance(s, basestring)`` as basestring has been removed in
+Python3. You can import ``string_types``, ``binary_type``, or ``text_type``
+from ``ansible.module_utils.six`` and then use ``isinstance(s, string_types)``
+or ``isinstance(s, (binary_type, text_type))`` instead.
+
+If this is part of code to convert a string to a particular type,
+``ansible.module_utils._text`` contains several functions that may be even
+better for you: ``to_text``, ``to_bytes``, and ``to_native``.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/no-dict-iteritems.rst b/docs/docsite/rst/dev_guide/testing/sanity/no-dict-iteritems.rst
new file mode 100644
index 00000000..e231c796
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/no-dict-iteritems.rst
@@ -0,0 +1,16 @@
+no-dict-iteritems
+=================
+
+The ``dict.iteritems`` method has been removed in Python 3. There are two recommended alternatives:
+
+.. code-block:: python
+
+ for KEY, VALUE in DICT.items():
+ pass
+
+.. code-block:: python
+
+ from ansible.module_utils.six import iteritems
+
+ for KEY, VALUE in iteritems(DICT):
+ pass
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/no-dict-iterkeys.rst b/docs/docsite/rst/dev_guide/testing/sanity/no-dict-iterkeys.rst
new file mode 100644
index 00000000..9dc4a978
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/no-dict-iterkeys.rst
@@ -0,0 +1,9 @@
+no-dict-iterkeys
+================
+
+The ``dict.iterkeys`` method has been removed in Python 3. Use the following instead:
+
+.. code-block:: python
+
+ for KEY in DICT:
+ pass
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/no-dict-itervalues.rst b/docs/docsite/rst/dev_guide/testing/sanity/no-dict-itervalues.rst
new file mode 100644
index 00000000..979450e4
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/no-dict-itervalues.rst
@@ -0,0 +1,16 @@
+no-dict-itervalues
+==================
+
+The ``dict.itervalues`` method has been removed in Python 3. There are two recommended alternatives:
+
+.. code-block:: python
+
+ for VALUE in DICT.values():
+ pass
+
+.. code-block:: python
+
+ from ansible.module_utils.six import itervalues
+
+ for VALUE in itervalues(DICT):
+ pass
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/no-get-exception.rst b/docs/docsite/rst/dev_guide/testing/sanity/no-get-exception.rst
new file mode 100644
index 00000000..584fbc86
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/no-get-exception.rst
@@ -0,0 +1,28 @@
+no-get-exception
+================
+
+We created a function, ``ansible.module_utils.pycompat24.get_exception`` to
+help retrieve exceptions in a manner compatible with Python 2.4 through
+Python 3.6. We no longer support Python 2.4 and Python 2.5 so this is
+extraneous and we want to deprecate the function. Porting code should look
+something like this:
+
+.. code-block:: python
+
+ # Unfixed code:
+ try:
+ raise IOError('test')
+ except IOError:
+ e = get_excetion()
+ do_something(e)
+ except:
+ e = get_exception()
+ do_something_else(e)
+
+ # After fixing:
+ try:
+ raise IOError('test')
+ except IOErrors as e:
+ do_something(e)
+ except Exception as e:
+ do_something_else(e)
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/no-illegal-filenames.rst b/docs/docsite/rst/dev_guide/testing/sanity/no-illegal-filenames.rst
new file mode 100644
index 00000000..6e6f565e
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/no-illegal-filenames.rst
@@ -0,0 +1,61 @@
+no-illegal-filenames
+====================
+
+Files and directories should not contain illegal characters or names so that
+Ansible can be checked out on any Operating System.
+
+Illegal Characters
+------------------
+
+The following characters are not allowed to be used in any part of the file or
+directory name;
+
+* ``<``
+* ``>``
+* ``:``
+* ``"``
+* ``/``
+* ``\``
+* ``|``
+* ``?``
+* ``*``
+* Any characters whose integer representations are in the range from 0 through to 31 like ``\n``
+
+The following characters are not allowed to be used as the last character of a
+file or directory;
+
+* ``.``
+* ``" "`` (just the space character)
+
+Illegal Names
+-------------
+
+The following names are not allowed to be used as the name of a file or
+directory excluding the extension;
+
+* ``CON``
+* ``PRN``
+* ``AUX``
+* ``NUL``
+* ``COM1``
+* ``COM2``
+* ``COM3``
+* ``COM4``
+* ``COM5``
+* ``COM6``
+* ``COM7``
+* ``COM8``
+* ``COM9``
+* ``LPT1``
+* ``LPT2``
+* ``LPT3``
+* ``LPT4``
+* ``LPT5``
+* ``LPT6``
+* ``LPT7``
+* ``LPT8``
+* ``LPT9``
+
+For example, the file ``folder/COM1``, ``folder/COM1.txt`` are illegal but
+``folder/COM1-file`` or ``folder/COM1-file.txt`` is allowed.
+
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/no-main-display.rst b/docs/docsite/rst/dev_guide/testing/sanity/no-main-display.rst
new file mode 100644
index 00000000..7ccf0dc7
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/no-main-display.rst
@@ -0,0 +1,12 @@
+no-main-display
+===============
+
+As of Ansible 2.8, ``Display`` should no longer be imported from ``__main__``.
+
+``Display`` is now a singleton and should be utilized like the following::
+
+ from ansible.utils.display import Display
+ display = Display()
+
+There is no longer a need to attempt ``from __main__ import display`` inside
+a ``try/except`` block.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/no-smart-quotes.rst b/docs/docsite/rst/dev_guide/testing/sanity/no-smart-quotes.rst
new file mode 100644
index 00000000..50dc7baf
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/no-smart-quotes.rst
@@ -0,0 +1,4 @@
+no-smart-quotes
+===============
+
+Smart quotes (``â€â€œâ€˜â€™``) should not be used. Use plain ascii quotes (``"'``) instead.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/no-tests-as-filters.rst b/docs/docsite/rst/dev_guide/testing/sanity/no-tests-as-filters.rst
new file mode 100644
index 00000000..0c1f99ac
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/no-tests-as-filters.rst
@@ -0,0 +1,12 @@
+:orphan:
+
+no-tests-as-filters
+===================
+
+Using Ansible provided Jinja2 tests as filters will be removed in Ansible 2.9.
+
+Prior to Ansible 2.5, Jinja2 tests included within Ansible were most often used as filters. The large difference in use is that filters are referenced as ``variable | filter_name`` while Jinja2 tests are referenced as ``variable is test_name``.
+
+Jinja2 tests are used for comparisons, whereas filters are used for data manipulation, and have different applications in Jinja2. This change is to help differentiate the concepts for a better understanding of Jinja2, and where each can be appropriately used.
+
+As of Ansible 2.5 using an Ansible provided Jinja2 test with filter syntax will display a deprecation error.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/no-underscore-variable.rst b/docs/docsite/rst/dev_guide/testing/sanity/no-underscore-variable.rst
new file mode 100644
index 00000000..5174a43a
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/no-underscore-variable.rst
@@ -0,0 +1,30 @@
+:orphan:
+
+no-underscore-variable
+======================
+
+In the future, Ansible may use the identifier ``_`` to internationalize its
+message strings. To be ready for that, we need to make sure that there are
+no conflicting identifiers defined in the code base.
+
+In common practice, ``_`` is frequently used as a dummy variable (a variable
+to receive a value from a function where the value is useless and never used).
+In Ansible, we're using the identifier ``dummy`` for this purpose instead.
+
+Example of unfixed code:
+
+.. code-block:: python
+
+ for _ in range(0, retries):
+ success = retry_thing()
+ if success:
+ break
+
+Example of fixed code:
+
+.. code-block:: python
+
+ for dummy in range(0, retries):
+ success = retry_thing()
+ if success:
+ break
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/no-unicode-literals.rst b/docs/docsite/rst/dev_guide/testing/sanity/no-unicode-literals.rst
new file mode 100644
index 00000000..c4f3586a
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/no-unicode-literals.rst
@@ -0,0 +1,16 @@
+no-unicode_literals
+===================
+
+The use of :code:`from __future__ import unicode_literals` has been deemed an anti-pattern. The
+problems with it are:
+
+* It makes it so one can't jump into the middle of a file and know whether a bare literal string is
+ a byte string or text string. The programmer has to first check the top of the file to see if the
+ import is there.
+* It removes the ability to define native strings (a string which should be a byte string on python2
+ and a text string on python3) via a string literal.
+* It makes for more context switching. A programmer could be reading one file which has
+ `unicode_literals` and know that bare string literals are text strings but then switch to another
+ file (perhaps tracing program execution into a third party library) and have to switch their
+ understanding of what bare string literals are.
+
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/no-unwanted-files.rst b/docs/docsite/rst/dev_guide/testing/sanity/no-unwanted-files.rst
new file mode 100644
index 00000000..3d76324e
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/no-unwanted-files.rst
@@ -0,0 +1,13 @@
+no-unwanted-files
+=================
+
+Specific file types are allowed in certain directories:
+
+- ``lib`` - All content must reside in the ``lib/ansible`` directory.
+
+- ``lib/ansible`` - Only source code with one of the following extensions is allowed:
+
+ - ``*.cs`` - C#
+ - ``*.ps1`` - PowerShell
+ - ``*.psm1`` - PowerShell
+ - ``*.py`` - Python
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/no-wildcard-import.rst b/docs/docsite/rst/dev_guide/testing/sanity/no-wildcard-import.rst
new file mode 100644
index 00000000..fdaf07b0
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/no-wildcard-import.rst
@@ -0,0 +1,31 @@
+:orphan:
+
+no-wildcard-import
+==================
+
+Using :code:`import *` is a bad habit which pollutes your namespace, hinders
+debugging, and interferes with static analysis of code. For those reasons, we
+do want to limit the use of :code:`import *` in the ansible code. Change our
+code to import the specific names that you need instead.
+
+Examples of unfixed code:
+
+.. code-block:: python
+
+ from ansible.module_utils.six import *
+ if isinstance(variable, string_types):
+ do_something(variable)
+
+ from ansible.module_utils.basic import *
+ module = AnsibleModule()
+
+Examples of fixed code:
+
+.. code-block:: python
+
+ from ansible.module_utils import six
+ if isinstance(variable, six.string_types):
+ do_something(variable)
+
+ from ansible.module_utils.basic import AnsibleModule
+ module = AnsibleModule()
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/obsolete-files.rst b/docs/docsite/rst/dev_guide/testing/sanity/obsolete-files.rst
new file mode 100644
index 00000000..6e2fb2a5
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/obsolete-files.rst
@@ -0,0 +1,14 @@
+obsolete-files
+==============
+
+Directories in the Ansible source tree are sometimes made obsolete.
+Files should not exist in these directories.
+The new location (if any) is dependent on which directory has been made obsolete.
+
+Below are some of the obsolete directories and their new locations:
+
+- All of ``test/runner/`` is now under ``test/lib/ansible_test/`` instead. The organization of files in the new directory has changed.
+- Most subdirectories of ``test/sanity/`` (with some exceptions) are now under ``test/lib/ansible_test/_data/sanity/`` instead.
+
+This error occurs most frequently for open pull requests which add or modify files in directories which are now obsolete.
+Make sure the branch you are working from is current so that changes can be made in the correct location.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/package-data.rst b/docs/docsite/rst/dev_guide/testing/sanity/package-data.rst
new file mode 100644
index 00000000..220872dd
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/package-data.rst
@@ -0,0 +1,5 @@
+package-data
+============
+
+Verifies that the combination of ``MANIFEST.in`` and ``package_data`` from ``setup.py``
+properly installs data files from within ``lib/ansible``
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/pep8.rst b/docs/docsite/rst/dev_guide/testing/sanity/pep8.rst
new file mode 100644
index 00000000..8595d986
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/pep8.rst
@@ -0,0 +1,6 @@
+pep8
+====
+
+Python static analysis for PEP 8 style guideline compliance.
+
+See :ref:`testing_pep8` for more information.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/pslint.rst b/docs/docsite/rst/dev_guide/testing/sanity/pslint.rst
new file mode 100644
index 00000000..baa4fa03
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/pslint.rst
@@ -0,0 +1,4 @@
+pslint
+======
+
+PowerShell static analysis for common programming errors using `PSScriptAnalyzer <https://github.com/PowerShell/PSScriptAnalyzer/>`_.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/pylint-ansible-test.rst b/docs/docsite/rst/dev_guide/testing/sanity/pylint-ansible-test.rst
new file mode 100644
index 00000000..a80ddc1e
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/pylint-ansible-test.rst
@@ -0,0 +1,8 @@
+:orphan:
+
+pylint-ansible-test
+===================
+
+Python static analysis for common programming errors.
+
+A more strict set of rules applied to ``ansible-test``.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/pylint.rst b/docs/docsite/rst/dev_guide/testing/sanity/pylint.rst
new file mode 100644
index 00000000..2b2ef9e5
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/pylint.rst
@@ -0,0 +1,4 @@
+pylint
+======
+
+Python static analysis for common programming errors.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/release-names.rst b/docs/docsite/rst/dev_guide/testing/sanity/release-names.rst
new file mode 100644
index 00000000..359f7ecb
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/release-names.rst
@@ -0,0 +1,4 @@
+Release names
+=============
+
+Verifies that the most recent release name has been added to ``./github/RELEASE_NAMES.yml``
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/replace-urlopen.rst b/docs/docsite/rst/dev_guide/testing/sanity/replace-urlopen.rst
new file mode 100644
index 00000000..705195c9
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/replace-urlopen.rst
@@ -0,0 +1,4 @@
+replace-urlopen
+===============
+
+Use ``open_url`` from ``module_utils`` instead of ``urlopen``.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/required-and-default-attributes.rst b/docs/docsite/rst/dev_guide/testing/sanity/required-and-default-attributes.rst
new file mode 100644
index 00000000..573c3615
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/required-and-default-attributes.rst
@@ -0,0 +1,5 @@
+required-and-default-attributes
+===============================
+
+Use only one of ``default`` or ``required`` with ``FieldAttribute``.
+
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/rstcheck.rst b/docs/docsite/rst/dev_guide/testing/sanity/rstcheck.rst
new file mode 100644
index 00000000..8fcbbce3
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/rstcheck.rst
@@ -0,0 +1,4 @@
+rstcheck
+========
+
+Check reStructuredText files for syntax and formatting issues.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/runtime-metadata.rst b/docs/docsite/rst/dev_guide/testing/sanity/runtime-metadata.rst
new file mode 100644
index 00000000..cf6d9272
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/runtime-metadata.rst
@@ -0,0 +1,7 @@
+runtime-metadata.yml
+====================
+
+Validates the schema for:
+
+* ansible-base's ``lib/ansible/config/ansible_builtin_runtime.yml``
+* collection's ``meta/runtime.yml``
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/sanity-docs.rst b/docs/docsite/rst/dev_guide/testing/sanity/sanity-docs.rst
new file mode 100644
index 00000000..34265c34
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/sanity-docs.rst
@@ -0,0 +1,4 @@
+sanity-docs
+===========
+
+Documentation for each ``ansible-test sanity`` test is required.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/shebang.rst b/docs/docsite/rst/dev_guide/testing/sanity/shebang.rst
new file mode 100644
index 00000000..cff2aa09
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/shebang.rst
@@ -0,0 +1,16 @@
+shebang
+=======
+
+Most executable files should only use one of the following shebangs:
+
+- ``#!/bin/sh``
+- ``#!/bin/bash``
+- ``#!/usr/bin/make``
+- ``#!/usr/bin/env python``
+- ``#!/usr/bin/env bash``
+
+NOTE: For ``#!/bin/bash``, any of the options ``eux`` may also be used, such as ``#!/bin/bash -eux``.
+
+This does not apply to Ansible modules, which should not be executable and must always use ``#!/usr/bin/python``.
+
+Some exceptions are permitted. Ask if you have questions.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/shellcheck.rst b/docs/docsite/rst/dev_guide/testing/sanity/shellcheck.rst
new file mode 100644
index 00000000..446ee1ee
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/shellcheck.rst
@@ -0,0 +1,4 @@
+shellcheck
+==========
+
+Static code analysis for shell scripts using the excellent `shellcheck <https://www.shellcheck.net/>`_ tool.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/symlinks.rst b/docs/docsite/rst/dev_guide/testing/sanity/symlinks.rst
new file mode 100644
index 00000000..017209bd
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/symlinks.rst
@@ -0,0 +1,6 @@
+symlinks
+========
+
+Symbolic links are only permitted for files that exist to ensure proper tarball generation during a release.
+
+If other types of symlinks are needed for tests they must be created as part of the test.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/test-constraints.rst b/docs/docsite/rst/dev_guide/testing/sanity/test-constraints.rst
new file mode 100644
index 00000000..36ceb361
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/test-constraints.rst
@@ -0,0 +1,4 @@
+test-constraints
+================
+
+Constraints for test requirements should be in ``test/lib/ansible_test/_data/requirements/constraints.txt``.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/update-bundled.rst b/docs/docsite/rst/dev_guide/testing/sanity/update-bundled.rst
new file mode 100644
index 00000000..d8f19385
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/update-bundled.rst
@@ -0,0 +1,31 @@
+:orphan:
+
+update-bundled
+==============
+
+Check whether any of our known bundled code needs to be updated for a new upstream release.
+
+This test can error in the following ways:
+
+* The bundled code is out of date with regard to the latest release on pypi. Update the code
+ to the new version and update the version in _BUNDLED_METADATA to solve this.
+
+* The code is lacking a _BUNDLED_METADATA variable. This typically happens when a bundled version
+ is updated and we forget to add a _BUNDLED_METADATA variable to the updated file. Once that is
+ added, this error should go away.
+
+* A file has a _BUNDLED_METADATA variable but the file isn't specified in
+ :file:`test/sanity/code-smell/update-bundled.py`. This typically happens when a new bundled
+ library is added. Add the file to the `get_bundled_libs()` function in the `update-bundled.py`
+ test script to solve this error.
+
+_BUNDLED_METADATA has the following fields:
+
+:pypi_name: Name of the bundled package on pypi
+
+:version: Version of the package that we are including here
+
+:version_constraints: Optional PEP440 specifier for the version range that we are bundling.
+ Currently, the only valid use of this is to follow a version that is
+ compatible with the Python stdlib when newer versions of the pypi package
+ implement a new API.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/use-argspec-type-path.rst b/docs/docsite/rst/dev_guide/testing/sanity/use-argspec-type-path.rst
new file mode 100644
index 00000000..e06d83dd
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/use-argspec-type-path.rst
@@ -0,0 +1,10 @@
+use-argspec-type-path
+=====================
+
+The AnsibleModule argument_spec knows of several types beyond the standard python types. One of
+these is ``path``. When used, type ``path`` ensures that an argument is a string and expands any
+shell variables and tilde characters.
+
+This test looks for use of :func:`os.path.expanduser <python:os.path.expanduser>` in modules. When found, it tells the user to
+replace it with ``type='path'`` in the module's argument_spec or list it as a false positive in the
+test.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/use-compat-six.rst b/docs/docsite/rst/dev_guide/testing/sanity/use-compat-six.rst
new file mode 100644
index 00000000..1f415005
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/use-compat-six.rst
@@ -0,0 +1,4 @@
+use-compat-six
+==============
+
+Use ``six`` from ``module_utils`` instead of ``six``.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/validate-modules.rst b/docs/docsite/rst/dev_guide/testing/sanity/validate-modules.rst
new file mode 100644
index 00000000..efb58f20
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/validate-modules.rst
@@ -0,0 +1,6 @@
+validate-modules
+================
+
+Analyze modules for common issues in code and documentation.
+
+See :ref:`testing_validate-modules` for more information.
diff --git a/docs/docsite/rst/dev_guide/testing/sanity/yamllint.rst b/docs/docsite/rst/dev_guide/testing/sanity/yamllint.rst
new file mode 100644
index 00000000..5822bb7c
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing/sanity/yamllint.rst
@@ -0,0 +1,4 @@
+yamllint
+========
+
+Check YAML files for syntax and formatting issues.
diff --git a/docs/docsite/rst/dev_guide/testing_compile.rst b/docs/docsite/rst/dev_guide/testing_compile.rst
new file mode 100644
index 00000000..5c22194d
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing_compile.rst
@@ -0,0 +1,76 @@
+:orphan:
+
+.. _testing_compile:
+
+*************
+Compile Tests
+*************
+
+.. contents:: Topics
+
+Overview
+========
+
+Compile tests check source files for valid syntax on all supported python versions:
+
+- 2.4 (Ansible 2.3 only)
+- 2.6
+- 2.7
+- 3.5
+- 3.6
+- 3.7
+- 3.8
+- 3.9
+
+NOTE: In Ansible 2.4 and earlier the compile test was provided by a dedicated sub-command ``ansible-test compile`` instead of a sanity test using ``ansible-test sanity --test compile``.
+
+Running compile tests locally
+=============================
+
+Compile tests can be run across the whole code base by doing:
+
+.. code:: shell
+
+ cd /path/to/ansible/source
+ source hacking/env-setup
+ ansible-test sanity --test compile
+
+Against a single file by doing:
+
+.. code:: shell
+
+ ansible-test sanity --test compile lineinfile
+
+Or against a specific Python version by doing:
+
+.. code:: shell
+
+ ansible-test sanity --test compile --python 2.7 lineinfile
+
+For advanced usage see the help:
+
+.. code:: shell
+
+ ansible-test sanity --help
+
+
+Installing dependencies
+=======================
+
+``ansible-test`` has a number of dependencies , for ``compile`` tests we suggest running the tests with ``--local``, which is the default
+
+The dependencies can be installed using the ``--requirements`` argument. For example:
+
+.. code:: shell
+
+ ansible-test sanity --test compile --requirements lineinfile
+
+
+
+The full list of requirements can be found at `test/lib/ansible_test/_data/requirements <https://github.com/ansible/ansible/tree/devel/test/lib/ansible_test/_data/requirements>`_. Requirements files are named after their respective commands. See also the `constraints <https://github.com/ansible/ansible/blob/devel/test/lib/ansible_test/_data/requirements/constraints.txt>`_ applicable to all commands.
+
+
+Extending compile tests
+=======================
+
+If you believe changes are needed to the compile tests please add a comment on the `Testing Working Group Agenda <https://github.com/ansible/community/blob/master/meetings/README.md>`_ so it can be discussed.
diff --git a/docs/docsite/rst/dev_guide/testing_documentation.rst b/docs/docsite/rst/dev_guide/testing_documentation.rst
new file mode 100644
index 00000000..f9989395
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing_documentation.rst
@@ -0,0 +1,36 @@
+:orphan:
+
+.. _testing_module_documentation:
+
+****************************
+Testing module documentation
+****************************
+
+Before you submit a module for inclusion in the main Ansible repo, you must test your module documentation for correct HTML rendering and to ensure that the argspec matches the documentation in your Python file. The community pages offer more information on :ref:`testing reStructuredText documentation <testing_documentation_locally>`.
+
+To check the HTML output of your module documentation:
+
+#. Ensure working :ref:`development environment <environment_setup>`.
+#. Install required Python packages (drop '--user' in venv/virtualenv):
+
+ .. code-block:: bash
+
+ pip install --user -r requirements.txt
+ pip install --user -r docs/docsite/requirements.txt
+
+#. Ensure your module is in the correct directory: ``lib/ansible/modules/$CATEGORY/mymodule.py``.
+#. Build HTML from your module documentation: ``MODULES=mymodule make webdocs``.
+#. To build the HTML documentation for multiple modules, use a comma-separated list of module names: ``MODULES=mymodule,mymodule2 make webdocs``.
+#. View the HTML page at ``file:///path/to/docs/docsite/_build/html/modules/mymodule_module.html``.
+
+To ensure that your module documentation matches your ``argument_spec``:
+
+#. Install required Python packages (drop '--user' in venv/virtualenv):
+
+ .. code-block:: bash
+
+ pip install --user -r test/lib/ansible_test/_data/requirements/sanity.txt
+
+#. run the ``validate-modules`` test::
+
+ ansible-test sanity --test validate-modules mymodule
diff --git a/docs/docsite/rst/dev_guide/testing_httptester.rst b/docs/docsite/rst/dev_guide/testing_httptester.rst
new file mode 100644
index 00000000..a8806371
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing_httptester.rst
@@ -0,0 +1,27 @@
+:orphan:
+
+**********
+httptester
+**********
+
+.. contents:: Topics
+
+Overview
+========
+
+``httptester`` is a docker container used to host certain resources required by :ref:`testing_integration`. This is to avoid CI tests requiring external resources (such as git or package repos) which, if temporarily unavailable, would cause tests to fail.
+
+HTTP Testing endpoint which provides the following capabilities:
+
+* httpbin
+* nginx
+* SSL
+* SNI
+
+
+Source files can be found in the `http-test-container <https://github.com/ansible/http-test-container>`_ repository.
+
+Extending httptester
+====================
+
+If you have sometime to improve ``httptester`` please add a comment on the `Testing Working Group Agenda <https://github.com/ansible/community/blob/master/meetings/README.md>`_ to avoid duplicated effort.
diff --git a/docs/docsite/rst/dev_guide/testing_integration.rst b/docs/docsite/rst/dev_guide/testing_integration.rst
new file mode 100644
index 00000000..0880e5b1
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing_integration.rst
@@ -0,0 +1,236 @@
+:orphan:
+
+.. _testing_integration:
+
+*****************
+Integration tests
+*****************
+
+.. contents:: Topics
+
+The Ansible integration Test system.
+
+Tests for playbooks, by playbooks.
+
+Some tests may require credentials. Credentials may be specified with `credentials.yml`.
+
+Some tests may require root.
+
+.. note::
+ Every new module and plugin should have integration tests, even if the tests cannot be run on Ansible CI infrastructure.
+ In this case, the tests should be marked with the ``unsupported`` alias in `aliases file <https://docs.ansible.com/ansible/latest/dev_guide/testing/sanity/integration-aliases.html>`_.
+
+Quick Start
+===========
+
+It is highly recommended that you install and activate the ``argcomplete`` python package.
+It provides tab completion in ``bash`` for the ``ansible-test`` test runner.
+
+Configuration
+=============
+
+ansible-test command
+--------------------
+
+The example below assumes ``bin/`` is in your ``$PATH``. An easy way to achieve that
+is to initialize your environment with the ``env-setup`` command::
+
+ source hacking/env-setup
+ ansible-test --help
+
+You can also call ``ansible-test`` with the full path::
+
+ bin/ansible-test --help
+
+integration_config.yml
+----------------------
+
+Making your own version of ``integration_config.yml`` can allow for setting some
+tunable parameters to help run the tests better in your environment. Some
+tests (for example, cloud tests) will only run when access credentials are provided. For more
+information about supported credentials, refer to the various ``cloud-config-*.template``
+files in the ``test/integration/`` directory.
+
+Prerequisites
+=============
+
+Some tests assume things like hg, svn, and git are installed, and in path. Some tests
+(such as those for Amazon Web Services) need separate definitions, which will be covered
+later in this document.
+
+(Complete list pending)
+
+Non-destructive Tests
+=====================
+
+These tests will modify files in subdirectories, but will not do things that install or remove packages or things
+outside of those test subdirectories. They will also not reconfigure or bounce system services.
+
+.. note:: Running integration tests within Docker
+
+ To protect your system from any potential changes caused by integration tests, and to ensure a sensible set of dependencies are available we recommend that you always run integration tests with the ``--docker`` option, for example ``--docker centos8``. See the `list of supported docker images <https://github.com/ansible/ansible/blob/devel/test/lib/ansible_test/_data/completion/docker.txt>`_ for options (the ``default`` image is used for sanity and unit tests, as well as for platform independent integration tests such as those for cloud modules).
+
+.. note:: Avoiding pulling new Docker images
+
+ Use the ``--docker-no-pull`` option to avoid pulling the latest container image. This is required when using custom local images that are not available for download.
+
+Run as follows for all POSIX platform tests executed by our CI system in a fedora32 docker container::
+
+ ansible-test integration shippable/ --docker fedora32
+
+You can target a specific tests as well, such as for individual modules::
+
+ ansible-test integration ping
+
+You can use the ``-v`` option to make the output more verbose::
+
+ ansible-test integration lineinfile -vvv
+
+Use the following command to list all the available targets::
+
+ ansible-test integration --list-targets
+
+.. note:: Bash users
+
+ If you use ``bash`` with ``argcomplete``, obtain a full list by doing: ``ansible-test integration <tab><tab>``
+
+Destructive Tests
+=================
+
+These tests are allowed to install and remove some trivial packages. You will likely want to devote these
+to a virtual environment, such as Docker. They won't reformat your filesystem::
+
+ ansible-test integration destructive/ --docker fedora32
+
+Windows Tests
+=============
+
+These tests exercise the ``winrm`` connection plugin and Windows modules. You'll
+need to define an inventory with a remote Windows 2008 or 2012 Server to use
+for testing, and enable PowerShell Remoting to continue.
+
+Running these tests may result in changes to your Windows host, so don't run
+them against a production/critical Windows environment.
+
+Enable PowerShell Remoting (run on the Windows host via Remote Desktop)::
+
+ Enable-PSRemoting -Force
+
+Define Windows inventory::
+
+ cp inventory.winrm.template inventory.winrm
+ ${EDITOR:-vi} inventory.winrm
+
+Run the Windows tests executed by our CI system::
+
+ ansible-test windows-integration -v shippable/
+
+Tests in Docker containers
+==========================
+
+If you have a Linux system with Docker installed, running integration tests using the same Docker containers used by
+the Ansible continuous integration (CI) system is recommended.
+
+.. note:: Docker on non-Linux
+
+ Using Docker Engine to run Docker on a non-Linux host (such as macOS) is not recommended.
+ Some tests may fail, depending on the image used for testing.
+ Using the ``--docker-privileged`` option when running ``integration`` (not ``network-integration`` or ``windows-integration``) may resolve the issue.
+
+Running Integration Tests
+-------------------------
+
+To run all CI integration test targets for POSIX platforms in a Ubuntu 18.04 container::
+
+ ansible-test integration shippable/ --docker ubuntu1804
+
+You can also run specific tests or select a different Linux distribution.
+For example, to run tests for the ``ping`` module on a Ubuntu 18.04 container::
+
+ ansible-test integration ping --docker ubuntu1804
+
+Container Images
+----------------
+
+Python 2
+````````
+
+Most container images are for testing with Python 2:
+
+ - centos6
+ - centos7
+ - fedora28
+ - opensuse15py2
+ - ubuntu1404
+ - ubuntu1604
+
+Python 3
+````````
+
+To test with Python 3 use the following images:
+
+ - centos8
+ - fedora32
+ - opensuse15
+ - ubuntu1804
+
+
+Legacy Cloud Tests
+==================
+
+Some of the cloud tests run as normal integration tests, and others run as legacy tests; see the
+:ref:`testing_integration_legacy` page for more information.
+
+
+Other configuration for Cloud Tests
+===================================
+
+In order to run some tests, you must provide access credentials in a file named
+``cloud-config-aws.yml`` or ``cloud-config-cs.ini`` in the test/integration
+directory. Corresponding .template files are available for for syntax help. The newer AWS
+tests now use the file test/integration/cloud-config-aws.yml
+
+IAM policies for AWS
+====================
+
+Ansible needs fairly wide ranging powers to run the tests in an AWS account. This rights can be provided to a dedicated user. These need to be configured before running the test.
+
+testing-policies
+----------------
+
+The GitHub repository `mattclay/aws-terminator <https://github.com/mattclay/aws-terminator/>`_
+contains two sets of policies used for all existing AWS module integratoin tests.
+The `hacking/aws_config/setup_iam.yml` playbook can be used to setup two groups:
+
+ - `ansible-integration-ci` will have the policies applied necessary to run any
+ integration tests not marked as `unsupported` and are designed to mirror those
+ used by Ansible's CI.
+ - `ansible-integration-unsupported` will have the additional policies applied
+ necessary to run the integraion tests marked as `unsupported` including tests
+ for managing IAM roles, users and groups.
+
+Once the groups have been created, you'll need to create a user and make the user a member of these
+groups. The policies are designed to minimize the rights of that user. Please note that while this policy does limit
+the user to one region, this does not fully restrict the user (primarily due to the limitations of the Amazon ARN
+notation). The user will still have wide privileges for viewing account definitions, and will also able to manage
+some resources that are not related to testing (for example, AWS lambdas with different names). Tests should not
+be run in a primary production account in any case.
+
+Other Definitions required
+--------------------------
+
+Apart from installing the policy and giving it to the user identity running the tests, a
+lambda role `ansible_integration_tests` has to be created which has lambda basic execution
+privileges.
+
+
+Network Tests
+=============
+
+For guidance on writing network test see :ref:`testing_resource_modules`.
+
+
+Where to find out more
+======================
+
+If you'd like to know more about the plans for improving testing Ansible, join the `Testing Working Group <https://github.com/ansible/community/blob/master/meetings/README.md>`_.
diff --git a/docs/docsite/rst/dev_guide/testing_integration_legacy.rst b/docs/docsite/rst/dev_guide/testing_integration_legacy.rst
new file mode 100644
index 00000000..759285e3
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing_integration_legacy.rst
@@ -0,0 +1,108 @@
+:orphan:
+
+.. _testing_integration_legacy:
+
+*******************************************
+Testing using the Legacy Integration system
+*******************************************
+
+.. contents:: Topics
+
+This page details how to run the integration tests that haven't been ported to the new ``ansible-test`` framework.
+
+The following areas are still tested using the legacy ``make tests`` command:
+
+* amazon (some)
+* azure
+* cloudflare
+* cloudscale
+* cloudstack
+* consul
+* exoscale
+* gce
+* jenkins
+* rackspace
+
+Over time the above list will be reduced as tests are ported to the ``ansible-test`` framework.
+
+
+Running Cloud Tests
+====================
+
+Cloud tests exercise capabilities of cloud modules (for example, ec2_key). These are
+not 'tests run in the cloud' so much as tests that leverage the cloud modules
+and are organized by cloud provider.
+
+Some AWS tests may use environment variables. It is recommended to either unset any AWS environment variables( such as ``AWS_DEFAULT_PROFILE``, ``AWS_SECRET_ACCESS_KEY``, and so on) or be sure that the environment variables match the credentials provided in ``credentials.yml`` to ensure the tests run with consistency to their full capability on the expected account. See `AWS CLI docs <https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html>`_ for information on creating a profile.
+
+Subsets of tests may be run by ``#commenting`` out unnecessary roles in the appropriate playbook, such as ``test/integration/amazon.yml``.
+
+In order to run cloud tests, you must provide access credentials in a file
+named ``credentials.yml``. A sample credentials file named
+``credentials.template`` is available for syntax help.
+
+Provide cloud credentials::
+
+ cp credentials.template credentials.yml
+ ${EDITOR:-vi} credentials.yml
+
+
+Other configuration
+===================
+
+In order to run some tests, you must provide access credentials in a file named
+``credentials.yml``. A sample credentials file named ``credentials.template`` is available
+for syntax help.
+
+IAM policies for AWS
+====================
+
+In order to run the tests in an AWS account ansible needs fairly wide ranging powers which
+can be provided to a dedicated user or temporary credentials using a specific policy
+configured in the AWS account.
+
+testing-iam-policy.json.j2
+--------------------------
+
+The testing-iam-policy.json.j2 file contains a policy which can be given to the user
+running the tests to give close to minimum rights required to run the tests. Please note
+that this does not fully restrict the user; The user has wide privileges for viewing
+account definitions and is also able to manage some resources that are not related to
+testing (for example, AWS lambdas with different names) primarily due to the limitations of the
+Amazon ARN notation. At the very least the policy limits the user to one region, however
+tests should not be run in a primary production account in any case.
+
+Other Definitions required
+--------------------------
+
+Apart from installing the policy and giving it to the user identity running
+the tests, a lambda role `ansible_integration_tests` has to be created which
+has lambda basic execution privileges.
+
+
+Running Tests
+=============
+
+The tests are invoked via a ``Makefile``.
+
+If you haven't already got Ansible available use the local checkout by doing::
+
+ source hacking/env-setup
+
+Run the tests by doing::
+
+ cd test/integration/
+ # TARGET is the name of the test from the list at the top of this page
+ #make TARGET
+ # for example
+ make amazon
+ # To run all cloud tests you can do:
+ make cloud
+
+.. warning:: Possible cost of running cloud tests
+
+ Running cloud integration tests will create and destroy cloud
+ resources. Running these tests may result in additional fees associated with
+ your cloud account. Care is taken to ensure that created resources are
+ removed. However, it is advisable to inspect your AWS console to ensure no
+ unexpected resources are running.
diff --git a/docs/docsite/rst/dev_guide/testing_pep8.rst b/docs/docsite/rst/dev_guide/testing_pep8.rst
new file mode 100644
index 00000000..92630995
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing_pep8.rst
@@ -0,0 +1,24 @@
+:orphan:
+
+.. _testing_pep8:
+
+*****
+PEP 8
+*****
+
+.. contents:: Topics
+
+`PEP 8`_ style guidelines are enforced by `pycodestyle`_ on all python files in the repository by default.
+
+Running Locally
+===============
+
+The `PEP 8`_ check can be run locally with::
+
+
+ ansible-test sanity --test pep8 [file-or-directory-path-to-check] ...
+
+
+
+.. _PEP 8: https://www.python.org/dev/peps/pep-0008/
+.. _pycodestyle: https://pypi.org/project/pycodestyle/
diff --git a/docs/docsite/rst/dev_guide/testing_running_locally.rst b/docs/docsite/rst/dev_guide/testing_running_locally.rst
new file mode 100644
index 00000000..964a9e8d
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing_running_locally.rst
@@ -0,0 +1,89 @@
+:orphan:
+
+.. _testing_running_locally:
+
+***************
+Testing Ansible
+***************
+
+This document describes how to:
+
+* Run tests locally using ``ansible-test``
+* Extend
+
+.. contents::
+ :local:
+
+Requirements
+============
+
+There are no special requirements for running ``ansible-test`` on Python 2.7 or later.
+The ``argparse`` package is required for Python 2.6.
+The requirements for each ``ansible-test`` command are covered later.
+
+
+Test Environments
+=================
+
+Most ``ansible-test`` commands support running in one or more isolated test environments to simplify testing.
+
+
+Remote
+------
+
+The ``--remote`` option runs tests in a cloud hosted environment.
+An API key is required to use this feature.
+
+ Recommended for integration tests.
+
+See the `list of supported platforms and versions <https://github.com/ansible/ansible/blob/devel/test/lib/ansible_test/_data/completion/remote.txt>`_ for additional details.
+
+Environment Variables
+---------------------
+
+When using environment variables to manipulate tests there some limitations to keep in mind. Environment variables are:
+
+* Not propagated from the host to the test environment when using the ``--docker`` or ``--remote`` options.
+* Not exposed to the test environment unless whitelisted in ``test/lib/ansible_test/_internal/util.py`` in the ``common_environment`` function.
+
+ Example: ``ANSIBLE_KEEP_REMOTE_FILES=1`` can be set when running ``ansible-test integration --venv``. However, using the ``--docker`` option would
+ require running ``ansible-test shell`` to gain access to the Docker environment. Once at the shell prompt, the environment variable could be set
+ and the tests executed. This is useful for debugging tests inside a container by following the
+ :ref:`Debugging AnsibleModule-based modules <debugging_modules>` instructions.
+
+Interactive Shell
+=================
+
+Use the ``ansible-test shell`` command to get an interactive shell in the same environment used to run tests. Examples:
+
+* ``ansible-test shell --docker`` - Open a shell in the default docker container.
+* ``ansible-test shell --venv --python 3.6`` - Open a shell in a Python 3.6 virtual environment.
+
+
+Code Coverage
+=============
+
+Code coverage reports make it easy to identify untested code for which more tests should
+be written. Online reports are available but only cover the ``devel`` branch (see
+:ref:`developing_testing`). For new code local reports are needed.
+
+Add the ``--coverage`` option to any test command to collect code coverage data. If you
+aren't using the ``--venv`` or ``--docker`` options which create an isolated python
+environment then you may have to use the ``--requirements`` option to ensure that the
+correct version of the coverage module is installed::
+
+ ansible-test coverage erase
+ ansible-test units --coverage apt
+ ansible-test integration --coverage aws_lambda
+ ansible-test coverage html
+
+
+Reports can be generated in several different formats:
+
+* ``ansible-test coverage report`` - Console report.
+* ``ansible-test coverage html`` - HTML report.
+* ``ansible-test coverage xml`` - XML report.
+
+To clear data between test runs, use the ``ansible-test coverage erase`` command. For a full list of features see the online help::
+
+ ansible-test coverage --help
diff --git a/docs/docsite/rst/dev_guide/testing_sanity.rst b/docs/docsite/rst/dev_guide/testing_sanity.rst
new file mode 100644
index 00000000..a4f99edd
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing_sanity.rst
@@ -0,0 +1,53 @@
+:orphan:
+
+.. _testing_sanity:
+
+************
+Sanity Tests
+************
+
+.. contents:: Topics
+
+Sanity tests are made up of scripts and tools used to perform static code analysis.
+The primary purpose of these tests is to enforce Ansible coding standards and requirements.
+
+Tests are run with ``ansible-test sanity``.
+All available tests are run unless the ``--test`` option is used.
+
+
+How to run
+==========
+
+.. note::
+ To run sanity tests using docker, always use the default docker image
+ by passing the ``--docker`` or ``--docker default`` argument.
+
+.. note::
+ When using docker and the ``--base-branch`` argument,
+ also use the ``--docker-keep-git`` argument to avoid git related errors.
+
+.. code:: shell
+
+ source hacking/env-setup
+
+ # Run all sanity tests
+ ansible-test sanity
+
+ # Run all sanity tests including disabled ones
+ ansible-test sanity --allow-disabled
+
+ # Run all sanity tests against against certain files
+ ansible-test sanity lib/ansible/modules/files/template.py
+
+ # Run all tests inside docker (good if you don't have dependencies installed)
+ ansible-test sanity --docker default
+
+ # Run validate-modules against a specific file
+ ansible-test sanity --test validate-modules lib/ansible/modules/files/template.py
+
+Available Tests
+===============
+
+Tests can be listed with ``ansible-test sanity --list-tests``.
+
+See the full list of :ref:`sanity tests <all_sanity_tests>`, which details the various tests and details how to fix identified issues.
diff --git a/docs/docsite/rst/dev_guide/testing_units.rst b/docs/docsite/rst/dev_guide/testing_units.rst
new file mode 100644
index 00000000..7573da6f
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing_units.rst
@@ -0,0 +1,213 @@
+:orphan:
+
+.. _testing_units:
+
+**********
+Unit Tests
+**********
+
+Unit tests are small isolated tests that target a specific library or module. Unit tests
+in Ansible are currently the only way of driving tests from python within Ansible's
+continuous integration process. This means that in some circumstances the tests may be a
+bit wider than just units.
+
+.. contents:: Topics
+
+Available Tests
+===============
+
+Unit tests can be found in `test/units
+<https://github.com/ansible/ansible/tree/devel/test/units>`_. Notice that the directory
+structure of the tests matches that of ``lib/ansible/``.
+
+Running Tests
+=============
+
+.. note::
+ To run unit tests using docker, always use the default docker image
+ by passing the ``--docker`` or ``--docker default`` argument.
+
+The Ansible unit tests can be run across the whole code base by doing:
+
+.. code:: shell
+
+ cd /path/to/ansible/source
+ source hacking/env-setup
+ ansible-test units --docker -v
+
+Against a single file by doing:
+
+.. code:: shell
+
+ ansible-test units --docker -v apt
+
+Or against a specific Python version by doing:
+
+.. code:: shell
+
+ ansible-test units --docker -v --python 2.7 apt
+
+If you are running unit tests against things other than modules, such as module utilities, specify the whole file path:
+
+.. code:: shell
+
+ ansible-test units --docker -v test/units/module_utils/basic/test_imports.py
+
+For advanced usage see the online help::
+
+ ansible-test units --help
+
+You can also run tests in Ansible's continuous integration system by opening a pull
+request. This will automatically determine which tests to run based on the changes made
+in your pull request.
+
+
+Installing dependencies
+=======================
+
+If you are running ``ansible-test`` with the ``--docker`` or ``--venv`` option you do not need to install dependencies manually.
+
+Otherwise you can install dependencies using the ``--requirements`` option, which will
+install all the required dependencies needed for unit tests. For example:
+
+.. code:: shell
+
+ ansible-test units --python 2.7 --requirements apache2_module
+
+
+The list of unit test requirements can be found at `test/units/requirements.txt
+<https://github.com/ansible/ansible/tree/devel/test/units/requirements.txt>`_.
+
+This does not include the list of unit test requirements for ``ansible-test`` itself,
+which can be found at `test/lib/ansible_test/_data/requirements/units.txt
+<https://github.com/ansible/ansible/tree/devel/test/lib/ansible_test/_data/requirements/units.txt>`_.
+
+See also the `constraints
+<https://github.com/ansible/ansible/blob/devel/test/lib/ansible_test/_data/requirements/constraints.txt>`_
+applicable to all test commands.
+
+
+Extending unit tests
+====================
+
+
+.. warning:: What a unit test isn't
+
+ If you start writing a test that requires external services then
+ you may be writing an integration test, rather than a unit test.
+
+
+Structuring Unit Tests
+``````````````````````
+
+Ansible drives unit tests through `pytest <https://docs.pytest.org/en/latest/>`_. This
+means that tests can either be written a simple functions which are included in any file
+name like ``test_<something>.py`` or as classes.
+
+Here is an example of a function::
+
+ #this function will be called simply because it is called test_*()
+
+ def test_add()
+ a = 10
+ b = 23
+ c = 33
+ assert a + b = c
+
+Here is an example of a class::
+
+ import unittest
+
+ class AddTester(unittest.TestCase)
+
+ def SetUp()
+ self.a = 10
+ self.b = 23
+
+ # this function will
+ def test_add()
+ c = 33
+ assert self.a + self.b = c
+
+ # this function will
+ def test_subtract()
+ c = -13
+ assert self.a - self.b = c
+
+Both methods work fine in most circumstances; the function-based interface is simpler and
+quicker and so that's probably where you should start when you are just trying to add a
+few basic tests for a module. The class-based test allows more tidy set up and tear down
+of pre-requisites, so if you have many test cases for your module you may want to refactor
+to use that.
+
+Assertions using the simple ``assert`` function inside the tests will give full
+information on the cause of the failure with a trace-back of functions called during the
+assertion. This means that plain asserts are recommended over other external assertion
+libraries.
+
+A number of the unit test suites include functions that are shared between several
+modules, especially in the networking arena. In these cases a file is created in the same
+directory, which is then included directly.
+
+
+Module test case common code
+````````````````````````````
+
+Keep common code as specific as possible within the `test/units/` directory structure.
+Don't import common unit test code from directories outside the current or parent directories.
+
+Don't import other unit tests from a unit test. Any common code should be in dedicated
+files that aren't themselves tests.
+
+
+Fixtures files
+``````````````
+
+To mock out fetching results from devices, or provide other complex data structures that
+come from external libraries, you can use ``fixtures`` to read in pre-generated data.
+
+You can check how `fixtures <https://github.com/ansible/ansible/tree/devel/test/units/module_utils/facts/fixtures/cpuinfo>`_
+are used in `cpuinfo fact tests <https://github.com/ansible/ansible/blob/9f72ff80e3fe173baac83d74748ad87cb6e20e64/test/units/module_utils/facts/hardware/linux_data.py#L384>`_
+
+If you are simulating APIs you may find that Python placebo is useful. See
+:ref:`testing_units_modules` for more information.
+
+
+Code Coverage For New or Updated Unit Tests
+```````````````````````````````````````````
+New code will be missing from the codecov.io coverage reports (see :ref:`developing_testing`), so
+local reporting is needed. Most ``ansible-test`` commands allow you to collect code
+coverage; this is particularly useful when to indicate where to extend testing.
+
+To collect coverage data add the ``--coverage`` argument to your ``ansible-test`` command line:
+
+.. code:: shell
+
+ ansible-test units --coverage apt
+ ansible-test coverage html
+
+Results will be written to ``test/results/reports/coverage/index.html``
+
+Reports can be generated in several different formats:
+
+* ``ansible-test coverage report`` - Console report.
+* ``ansible-test coverage html`` - HTML report.
+* ``ansible-test coverage xml`` - XML report.
+
+To clear data between test runs, use the ``ansible-test coverage erase`` command. See
+:ref:`testing_running_locally` for more information about generating coverage
+reports.
+
+
+.. seealso::
+
+ :ref:`testing_units_modules`
+ Special considerations for unit testing modules
+ :ref:`testing_running_locally`
+ Running tests locally including gathering and reporting coverage data
+ `Python 3 documentation - 26.4. unittest — Unit testing framework <https://docs.python.org/3/library/unittest.html>`_
+ The documentation of the unittest framework in python 3
+ `Python 2 documentation - 25.3. unittest — Unit testing framework <https://docs.python.org/3/library/unittest.html>`_
+ The documentation of the earliest supported unittest framework - from Python 2.6
+ `pytest: helps you write better programs <https://docs.pytest.org/en/latest/>`_
+ The documentation of pytest - the framework actually used to run Ansible unit tests
diff --git a/docs/docsite/rst/dev_guide/testing_units_modules.rst b/docs/docsite/rst/dev_guide/testing_units_modules.rst
new file mode 100644
index 00000000..88763eb0
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing_units_modules.rst
@@ -0,0 +1,563 @@
+:orphan:
+
+.. _testing_units_modules:
+
+****************************
+Unit Testing Ansible Modules
+****************************
+
+.. highlight:: python
+
+.. contents:: Topics
+
+Introduction
+============
+
+This document explains why, how and when you should use unit tests for Ansible modules.
+The document doesn't apply to other parts of Ansible for which the recommendations are
+normally closer to the Python standard. There is basic documentation for Ansible unit
+tests in the developer guide :ref:`testing_units`. This document should
+be readable for a new Ansible module author. If you find it incomplete or confusing,
+please open a bug or ask for help on Ansible IRC.
+
+What Are Unit Tests?
+====================
+
+Ansible includes a set of unit tests in the :file:`test/units` directory. These tests primarily cover the
+internals but can also cover Ansible modules. The structure of the unit tests matches
+the structure of the code base, so the tests that reside in the :file:`test/units/modules/` directory
+are organized by module groups.
+
+Integration tests can be used for most modules, but there are situations where
+cases cannot be verified using integration tests. This means that Ansible unit test cases
+may extend beyond testing only minimal units and in some cases will include some
+level of functional testing.
+
+
+Why Use Unit Tests?
+===================
+
+Ansible unit tests have advantages and disadvantages. It is important to understand these.
+Advantages include:
+
+* Most unit tests are much faster than most Ansible integration tests. The complete suite
+ of unit tests can be run regularly by a developer on their local system.
+* Unit tests can be run by developers who don't have access to the system which the module is
+ designed to work on, allowing a level of verification that changes to core functions
+ haven't broken module expectations.
+* Unit tests can easily substitute system functions allowing testing of software that
+ would be impractical. For example, the ``sleep()`` function can be replaced and we check
+ that a ten minute sleep was called without actually waiting ten minutes.
+* Unit tests are run on different Python versions. This allows us to
+ ensure that the code behaves in the same way on different Python versions.
+
+There are also some potential disadvantages of unit tests. Unit tests don't normally
+directly test actual useful valuable features of software, instead just internal
+implementation
+
+* Unit tests that test the internal, non-visible features of software may make
+ refactoring difficult if those internal features have to change (see also naming in How
+ below)
+* Even if the internal feature is working correctly it is possible that there will be a
+ problem between the internal code tested and the actual result delivered to the user
+
+Normally the Ansible integration tests (which are written in Ansible YAML) provide better
+testing for most module functionality. If those tests already test a feature and perform
+well there may be little point in providing a unit test covering the same area as well.
+
+When To Use Unit Tests
+======================
+
+There are a number of situations where unit tests are a better choice than integration
+tests. For example, testing things which are impossible, slow or very difficult to test
+with integration tests, such as:
+
+* Forcing rare / strange / random situations that can't be forced, such as specific network
+ failures and exceptions
+* Extensive testing of slow configuration APIs
+* Situations where the integration tests cannot be run as part of the main Ansible
+ continuous integration running in Shippable.
+
+
+
+Providing quick feedback
+------------------------
+
+Example:
+ A single step of the rds_instance test cases can take up to 20
+ minutes (the time to create an RDS instance in Amazon). The entire
+ test run can last for well over an hour. All 16 of the unit tests
+ complete execution in less than 2 seconds.
+
+The time saving provided by being able to run the code in a unit test makes it worth
+creating a unit test when bug fixing a module, even if those tests do not often identify
+problems later. As a basic goal, every module should have at least one unit test which
+will give quick feedback in easy cases without having to wait for the integration tests to
+complete.
+
+Ensuring correct use of external interfaces
+-------------------------------------------
+
+Unit tests can check the way in which external services are run to ensure that they match
+specifications or are as efficient as possible *even when the final output will not be changed*.
+
+Example:
+ Package managers are often far more efficient when installing multiple packages at once
+ rather than each package separately. The final result is the
+ same: the packages are all installed, so the efficiency is difficult to verify through
+ integration tests. By providing a mock package manager and verifying that it is called
+ once, we can build a valuable test for module efficiency.
+
+Another related use is in the situation where an API has versions which behave
+differently. A programmer working on a new version may change the module to work with the
+new API version and unintentionally break the old version. A test case
+which checks that the call happens properly for the old version can help avoid the
+problem. In this situation it is very important to include version numbering in the test case
+name (see `Naming unit tests`_ below).
+
+Providing specific design tests
+--------------------------------
+
+By building a requirement for a particular part of the
+code and then coding to that requirement, unit tests _can_ sometimes improve the code and
+help future developers understand that code.
+
+Unit tests that test internal implementation details of code, on the other hand, almost
+always do more harm than good. Testing that your packages to install are stored in a list
+would slow down and confuse a future developer who might need to change that list into a
+dictionary for efficiency. This problem can be reduced somewhat with clear test naming so
+that the future developer immediately knows to delete the test case, but it is often
+better to simply leave out the test case altogether and test for a real valuable feature
+of the code, such as installing all of the packages supplied as arguments to the module.
+
+
+How to unit test Ansible modules
+================================
+
+There are a number of techniques for unit testing modules. Beware that most
+modules without unit tests are structured in a way that makes testing quite difficult and
+can lead to very complicated tests which need more work than the code. Effectively using unit
+tests may lead you to restructure your code. This is often a good thing and leads
+to better code overall. Good restructuring can make your code clearer and easier to understand.
+
+
+Naming unit tests
+-----------------
+
+Unit tests should have logical names. If a developer working on the module being tested
+breaks the test case, it should be easy to figure what the unit test covers from the name.
+If a unit test is designed to verify compatibility with a specific software or API version
+then include the version in the name of the unit test.
+
+As an example, ``test_v2_state_present_should_call_create_server_with_name()`` would be a
+good name, ``test_create_server()`` would not be.
+
+
+Use of Mocks
+------------
+
+Mock objects (from https://docs.python.org/3/library/unittest.mock.html) can be very
+useful in building unit tests for special / difficult cases, but they can also
+lead to complex and confusing coding situations. One good use for mocks would be in
+simulating an API. As for 'six', the 'mock' python package is bundled with Ansible (use
+``import units.compat.mock``).
+
+Ensuring failure cases are visible with mock objects
+----------------------------------------------------
+
+Functions like :meth:`module.fail_json` are normally expected to terminate execution. When you
+run with a mock module object this doesn't happen since the mock always returns another mock
+from a function call. You can set up the mock to raise an exception as shown above, or you can
+assert that these functions have not been called in each test. For example::
+
+ module = MagicMock()
+ function_to_test(module, argument)
+ module.fail_json.assert_not_called()
+
+This applies not only to calling the main module but almost any other
+function in a module which gets the module object.
+
+
+Mocking of the actual module
+----------------------------
+
+The setup of an actual module is quite complex (see `Passing Arguments`_ below) and often
+isn't needed for most functions which use a module. Instead you can use a mock object as
+the module and create any module attributes needed by the function you are testing. If
+you do this, beware that the module exit functions need special handling as mentioned
+above, either by throwing an exception or ensuring that they haven't been called. For example::
+
+ class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+ # you may also do the same to fail json
+ module = MagicMock()
+ module.exit_json.side_effect = AnsibleExitJson(Exception)
+ with self.assertRaises(AnsibleExitJson) as result:
+ return = my_module.test_this_function(module, argument)
+ module.fail_json.assert_not_called()
+ assert return["changed"] == True
+
+API definition with unit test cases
+-----------------------------------
+
+API interaction is usually best tested with the function tests defined in Ansible's
+integration testing section, which run against the actual API. There are several cases
+where the unit tests are likely to work better.
+
+Defining a module against an API specification
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This case is especially important for modules interacting with web services, which provide
+an API that Ansible uses but which are beyond the control of the user.
+
+By writing a custom emulation of the calls that return data from the API, we can ensure
+that only the features which are clearly defined in the specification of the API are
+present in the message. This means that we can check that we use the correct
+parameters and nothing else.
+
+
+*Example: in rds_instance unit tests a simple instance state is defined*::
+
+ def simple_instance_list(status, pending):
+ return {u'DBInstances': [{u'DBInstanceArn': 'arn:aws:rds:us-east-1:1234567890:db:fakedb',
+ u'DBInstanceStatus': status,
+ u'PendingModifiedValues': pending,
+ u'DBInstanceIdentifier': 'fakedb'}]}
+
+This is then used to create a list of states::
+
+ rds_client_double = MagicMock()
+ rds_client_double.describe_db_instances.side_effect = [
+ simple_instance_list('rebooting', {"a": "b", "c": "d"}),
+ simple_instance_list('available', {"c": "d", "e": "f"}),
+ simple_instance_list('rebooting', {"a": "b"}),
+ simple_instance_list('rebooting', {"e": "f", "g": "h"}),
+ simple_instance_list('rebooting', {}),
+ simple_instance_list('available', {"g": "h", "i": "j"}),
+ simple_instance_list('rebooting', {"i": "j", "k": "l"}),
+ simple_instance_list('available', {}),
+ simple_instance_list('available', {}),
+ ]
+
+These states are then used as returns from a mock object to ensure that the ``await`` function
+waits through all of the states that would mean the RDS instance has not yet completed
+configuration::
+
+ rds_i.await_resource(rds_client_double, "some-instance", "available", mod_mock,
+ await_pending=1)
+ assert(len(sleeper_double.mock_calls) > 5), "await_pending didn't wait enough"
+
+By doing this we check that the ``await`` function will keep waiting through
+potentially unusual that it would be impossible to reliably trigger through the
+integration tests but which happen unpredictably in reality.
+
+Defining a module to work against multiple API versions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This case is especially important for modules interacting with many different versions of
+software; for example, package installation modules that might be expected to work with
+many different operating system versions.
+
+By using previously stored data from various versions of an API we can ensure that the
+code is tested against the actual data which will be sent from that version of the system
+even when the version is very obscure and unlikely to be available during testing.
+
+Ansible special cases for unit testing
+======================================
+
+There are a number of special cases for unit testing the environment of an Ansible module.
+The most common are documented below, and suggestions for others can be found by looking
+at the source code of the existing unit tests or asking on the Ansible IRC channel or mailing
+lists.
+
+Module argument processing
+--------------------------
+
+There are two problems with running the main function of a module:
+
+* Since the module is supposed to accept arguments on ``STDIN`` it is a bit difficult to
+ set up the arguments correctly so that the module will get them as parameters.
+* All modules should finish by calling either the :meth:`module.fail_json` or
+ :meth:`module.exit_json`, but these won't work correctly in a testing environment.
+
+Passing Arguments
+-----------------
+
+.. This section should be updated once https://github.com/ansible/ansible/pull/31456 is
+ closed since the function below will be provided in a library file.
+
+To pass arguments to a module correctly, use the ``set_module_args`` method which accepts a dictionary
+as its parameter. Module creation and argument processing is
+handled through the :class:`AnsibleModule` object in the basic section of the utilities. Normally
+this accepts input on ``STDIN``, which is not convenient for unit testing. When the special
+variable is set it will be treated as if the input came on ``STDIN`` to the module. Simply call that function before setting up your module::
+
+ import json
+ from units.modules.utils import set_module_args
+ from ansible.module_utils._text import to_bytes
+
+ def test_already_registered(self):
+ set_module_args({
+ 'activationkey': 'key',
+ 'username': 'user',
+ 'password': 'pass',
+ })
+
+Handling exit correctly
+-----------------------
+
+.. This section should be updated once https://github.com/ansible/ansible/pull/31456 is
+ closed since the exit and failure functions below will be provided in a library file.
+
+The :meth:`module.exit_json` function won't work properly in a testing environment since it
+writes error information to ``STDOUT`` upon exit, where it
+is difficult to examine. This can be mitigated by replacing it (and :meth:`module.fail_json`) with
+a function that raises an exception::
+
+ def exit_json(*args, **kwargs):
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+Now you can ensure that the first function called is the one you expected simply by
+testing for the correct exception::
+
+ def test_returned_value(self):
+ set_module_args({
+ 'activationkey': 'key',
+ 'username': 'user',
+ 'password': 'pass',
+ })
+
+ with self.assertRaises(AnsibleExitJson) as result:
+ my_module.main()
+
+The same technique can be used to replace :meth:`module.fail_json` (which is used for failure
+returns from modules) and for the ``aws_module.fail_json_aws()`` (used in modules for Amazon
+Web Services).
+
+Running the main function
+-------------------------
+
+If you do want to run the actual main function of a module you must import the module, set
+the arguments as above, set up the appropriate exit exception and then run the module::
+
+ # This test is based around pytest's features for individual test functions
+ import pytest
+ import ansible.modules.module.group.my_module as my_module
+
+ def test_main_function(monkeypatch):
+ monkeypatch.setattr(my_module.AnsibleModule, "exit_json", fake_exit_json)
+ set_module_args({
+ 'activationkey': 'key',
+ 'username': 'user',
+ 'password': 'pass',
+ })
+ my_module.main()
+
+
+Handling calls to external executables
+--------------------------------------
+
+Module must use :meth:`AnsibleModule.run_command` in order to execute an external command. This
+method needs to be mocked:
+
+Here is a simple mock of :meth:`AnsibleModule.run_command` (taken from :file:`test/units/modules/packaging/os/test_rhn_register.py`)::
+
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.return_value = 0, '', '' # successful execution, no output
+ with self.assertRaises(AnsibleExitJson) as result:
+ self.module.main()
+ self.assertFalse(result.exception.args[0]['changed'])
+ # Check that run_command has been called
+ run_command.assert_called_once_with('/usr/bin/command args')
+ self.assertEqual(run_command.call_count, 1)
+ self.assertFalse(run_command.called)
+
+
+A Complete Example
+------------------
+
+The following example is a complete skeleton that reuses the mocks explained above and adds a new
+mock for :meth:`Ansible.get_bin_path`::
+
+ import json
+
+ from units.compat import unittest
+ from units.compat.mock import patch
+ from ansible.module_utils import basic
+ from ansible.module_utils._text import to_bytes
+ from ansible.modules.namespace import my_module
+
+
+ def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args)
+
+
+ class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+ class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+ def exit_json(*args, **kwargs):
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+ def fail_json(*args, **kwargs):
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+ def get_bin_path(self, arg, required=False):
+ """Mock AnsibleModule.get_bin_path"""
+ if arg.endswith('my_command'):
+ return '/usr/bin/my_command'
+ else:
+ if required:
+ fail_json(msg='%r not found !' % arg)
+
+
+ class TestMyModule(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json,
+ get_bin_path=get_bin_path)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def test_module_fail_when_required_args_missing(self):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({})
+ self.module.main()
+
+
+ def test_ensure_command_called(self):
+ set_module_args({
+ 'param1': 10,
+ 'param2': 'test',
+ })
+
+ with patch.object(basic.AnsibleModule, 'run_command') as mock_run_command:
+ stdout = 'configuration updated'
+ stderr = ''
+ rc = 0
+ mock_run_command.return_value = rc, stdout, stderr # successful execution
+
+ with self.assertRaises(AnsibleExitJson) as result:
+ my_module.main()
+ self.assertFalse(result.exception.args[0]['changed']) # ensure result is changed
+
+ mock_run_command.assert_called_once_with('/usr/bin/my_command --value 10 --name test')
+
+
+Restructuring modules to enable testing module set up and other processes
+-------------------------------------------------------------------------
+
+Often modules have a ``main()`` function which sets up the module and then performs other
+actions. This can make it difficult to check argument processing. This can be made easier by
+moving module configuration and initialization into a separate function. For example::
+
+ argument_spec = dict(
+ # module function variables
+ state=dict(choices=['absent', 'present', 'rebooted', 'restarted'], default='present'),
+ apply_immediately=dict(type='bool', default=False),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=600),
+ allocated_storage=dict(type='int', aliases=['size']),
+ db_instance_identifier=dict(aliases=["id"], required=True),
+ )
+
+ def setup_module_object():
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ required_if=required_if,
+ mutually_exclusive=[['old_instance_id', 'source_db_instance_identifier',
+ 'db_snapshot_identifier']],
+ )
+ return module
+
+ def main():
+ module = setup_module_object()
+ validate_parameters(module)
+ conn = setup_client(module)
+ return_dict = run_task(module, conn)
+ module.exit_json(**return_dict)
+
+This now makes it possible to run tests against the module initiation function::
+
+ def test_rds_module_setup_fails_if_db_instance_identifier_parameter_missing():
+ # db_instance_identifier parameter is missing
+ set_module_args({
+ 'state': 'absent',
+ 'apply_immediately': 'True',
+ })
+
+ with self.assertRaises(AnsibleFailJson) as result:
+ self.module.setup_json
+
+See also ``test/units/module_utils/aws/test_rds.py``
+
+Note that the ``argument_spec`` dictionary is visible in a module variable. This has
+advantages, both in allowing explicit testing of the arguments and in allowing the easy
+creation of module objects for testing.
+
+The same restructuring technique can be valuable for testing other functionality, such as the part of the module which queries the object that the module configures.
+
+Traps for maintaining Python 2 compatibility
+============================================
+
+If you use the ``mock`` library from the Python 2.6 standard library, a number of the
+assert functions are missing but will return as if successful. This means that test cases should take great care *not* use
+functions marked as _new_ in the Python 3 documentation, since the tests will likely always
+succeed even if the code is broken when run on older versions of Python.
+
+A helpful development approach to this should be to ensure that all of the tests have been
+run under Python 2.6 and that each assertion in the test cases has been checked to work by breaking
+the code in Ansible to trigger that failure.
+
+.. warning:: Maintain Python 2.6 compatibility
+
+ Please remember that modules need to maintain compatibility with Python 2.6 so the unittests for
+ modules should also be compatible with Python 2.6.
+
+
+.. seealso::
+
+ :ref:`testing_units`
+ Ansible unit tests documentation
+ :ref:`testing_running_locally`
+ Running tests locally including gathering and reporting coverage data
+ :ref:`developing_modules_general`
+ Get started developing a module
+ `Python 3 documentation - 26.4. unittest — Unit testing framework <https://docs.python.org/3/library/unittest.html>`_
+ The documentation of the unittest framework in python 3
+ `Python 2 documentation - 25.3. unittest — Unit testing framework <https://docs.python.org/3/library/unittest.html>`_
+ The documentation of the earliest supported unittest framework - from Python 2.6
+ `pytest: helps you write better programs <https://docs.pytest.org/en/latest/>`_
+ The documentation of pytest - the framework actually used to run Ansible unit tests
+ `Development Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Mailing list for development topics
+ `Testing Your Code (from The Hitchhiker's Guide to Python!) <https://docs.python-guide.org/writing/tests/>`_
+ General advice on testing Python code
+ `Uncle Bob's many videos on YouTube <https://www.youtube.com/watch?v=QedpQjxBPMA&list=PLlu0CT-JnSasQzGrGzddSczJQQU7295D2>`_
+ Unit testing is a part of the of various philosophies of software development, including
+ Extreme Programming (XP), Clean Coding. Uncle Bob talks through how to benefit from this
+ `"Why Most Unit Testing is Waste" <https://rbcs-us.com/documents/Why-Most-Unit-Testing-is-Waste.pdf>`_
+ An article warning against the costs of unit testing
+ `'A Response to "Why Most Unit Testing is Waste"' <https://henrikwarne.com/2014/09/04/a-response-to-why-most-unit-testing-is-waste/>`_
+ An response pointing to how to maintain the value of unit tests
diff --git a/docs/docsite/rst/dev_guide/testing_validate-modules.rst b/docs/docsite/rst/dev_guide/testing_validate-modules.rst
new file mode 100644
index 00000000..044a2c29
--- /dev/null
+++ b/docs/docsite/rst/dev_guide/testing_validate-modules.rst
@@ -0,0 +1,165 @@
+:orphan:
+
+.. _testing_validate-modules:
+
+****************
+validate-modules
+****************
+
+.. contents:: Topics
+
+Python program to help test or validate Ansible modules.
+
+``validate-modules`` is one of the ``ansible-test`` Sanity Tests, see :ref:`testing_sanity` for more information.
+
+Originally developed by Matt Martz (@sivel)
+
+
+Usage
+=====
+
+.. code:: shell
+
+ cd /path/to/ansible/source
+ source hacking/env-setup
+ ansible-test sanity --test validate-modules
+
+Help
+====
+
+.. code:: shell
+
+ usage: validate-modules [-h] [-w] [--exclude EXCLUDE] [--arg-spec]
+ [--base-branch BASE_BRANCH] [--format {json,plain}]
+ [--output OUTPUT]
+ modules [modules ...]
+
+ positional arguments:
+ modules Path to module or module directory
+
+ optional arguments:
+ -h, --help show this help message and exit
+ -w, --warnings Show warnings
+ --exclude EXCLUDE RegEx exclusion pattern
+ --arg-spec Analyze module argument spec
+ --base-branch BASE_BRANCH
+ Used in determining if new options were added
+ --format {json,plain}
+ Output format. Default: "plain"
+ --output OUTPUT Output location, use "-" for stdout. Default "-"
+
+
+Extending validate-modules
+==========================
+
+The ``validate-modules`` tool has a `schema.py <https://github.com/ansible/ansible/blob/devel/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/schema.py>`_ that is used to validate the YAML blocks, such as ``DOCUMENTATION`` and ``RETURNS``.
+
+
+Codes
+=====
+
+============================================================ ================== ==================== =========================================================================================
+ **Error Code** **Type** **Level** **Sample Message**
+------------------------------------------------------------ ------------------ -------------------- -----------------------------------------------------------------------------------------
+ ansible-deprecated-version Documentation Error A feature is deprecated and supposed to be removed in the current or an earlier Ansible version
+ ansible-invalid-version Documentation Error The Ansible version at which a feature is supposed to be removed cannot be parsed
+ ansible-module-not-initialized Syntax Error Execution of the module did not result in initialization of AnsibleModule
+ collection-deprecated-version Documentation Error A feature is deprecated and supposed to be removed in the current or an earlier collection version
+ collection-invalid-version Documentation Error The collection version at which a feature is supposed to be removed cannot be parsed (it must be a semantic version, see https://semver.org/)
+ deprecated-date Documentation Error A date before today appears as ``removed_at_date`` or in ``deprecated_aliases``
+ deprecation-mismatch Documentation Error Module marked as deprecated or removed in at least one of the filename, its metadata, or in DOCUMENTATION (setting DOCUMENTATION.deprecated for deprecation or removing all Documentation for removed) but not in all three places.
+ doc-choices-do-not-match-spec Documentation Error Value for "choices" from the argument_spec does not match the documentation
+ doc-choices-incompatible-type Documentation Error Choices value from the documentation is not compatible with type defined in the argument_spec
+ doc-default-does-not-match-spec Documentation Error Value for "default" from the argument_spec does not match the documentation
+ doc-default-incompatible-type Documentation Error Default value from the documentation is not compatible with type defined in the argument_spec
+ doc-elements-invalid Documentation Error Documentation specifies elements for argument, when "type" is not ``list``.
+ doc-elements-mismatch Documentation Error Argument_spec defines elements different than documentation does
+ doc-missing-type Documentation Error Documentation doesn't specify a type but argument in ``argument_spec`` use default type (``str``)
+ doc-required-mismatch Documentation Error argument in argument_spec is required but documentation says it is not, or vice versa
+ doc-type-does-not-match-spec Documentation Error Argument_spec defines type different than documentation does
+ documentation-error Documentation Error Unknown ``DOCUMENTATION`` error
+ documentation-syntax-error Documentation Error Invalid ``DOCUMENTATION`` schema
+ illegal-future-imports Imports Error Only the following ``from __future__`` imports are allowed: ``absolute_import``, ``division``, and ``print_function``.
+ import-before-documentation Imports Error Import found before documentation variables. All imports must appear below ``DOCUMENTATION``/``EXAMPLES``/``RETURN``
+ import-error Documentation Error ``Exception`` attempting to import module for ``argument_spec`` introspection
+ import-placement Locations Warning Imports should be directly below ``DOCUMENTATION``/``EXAMPLES``/``RETURN``
+ imports-improper-location Imports Error Imports should be directly below ``DOCUMENTATION``/``EXAMPLES``/``RETURN``
+ incompatible-choices Documentation Error Choices value from the argument_spec is not compatible with type defined in the argument_spec
+ incompatible-default-type Documentation Error Default value from the argument_spec is not compatible with type defined in the argument_spec
+ invalid-argument-name Documentation Error Argument in argument_spec must not be one of 'message', 'syslog_facility' as it is used internally by Ansible Core Engine
+ invalid-argument-spec Documentation Error Argument in argument_spec must be a dictionary/hash when used
+ invalid-argument-spec-options Documentation Error Suboptions in argument_spec are invalid
+ invalid-documentation Documentation Error ``DOCUMENTATION`` is not valid YAML
+ invalid-documentation-options Documentation Error ``DOCUMENTATION.options`` must be a dictionary/hash when used
+ invalid-examples Documentation Error ``EXAMPLES`` is not valid YAML
+ invalid-extension Naming Error Official Ansible modules must have a ``.py`` extension for python modules or a ``.ps1`` for powershell modules
+ invalid-module-schema Documentation Error ``AnsibleModule`` schema validation error
+ invalid-requires-extension Naming Error Module ``#AnsibleRequires -CSharpUtil`` should not end in .cs, Module ``#Requires`` should not end in .psm1
+ invalid-tagged-version Documentation Error All version numbers specified in code have to be explicitly tagged with the collection name, in other words, ``community.general:1.2.3`` or ``ansible.builtin:2.10``
+ last-line-main-call Syntax Error Call to ``main()`` not the last line (or ``removed_module()`` in the case of deprecated & docs only modules)
+ missing-doc-fragment Documentation Error ``DOCUMENTATION`` fragment missing
+ missing-existing-doc-fragment Documentation Warning Pre-existing ``DOCUMENTATION`` fragment missing
+ missing-documentation Documentation Error No ``DOCUMENTATION`` provided
+ missing-examples Documentation Error No ``EXAMPLES`` provided
+ missing-gplv3-license Documentation Error GPLv3 license header not found
+ missing-if-name-main Syntax Error Next to last line is not ``if __name__ == "__main__":``
+ missing-main-call Syntax Error Did not find a call to ``main()`` (or ``removed_module()`` in the case of deprecated & docs only modules)
+ missing-module-utils-basic-import Imports Warning Did not find ``ansible.module_utils.basic`` import
+ missing-module-utils-import-csharp-requirements Imports Error No ``Ansible.ModuleUtils`` or C# Ansible util requirements/imports found
+ missing-powershell-interpreter Syntax Error Interpreter line is not ``#!powershell``
+ missing-python-doc Naming Error Missing python documentation file
+ missing-python-interpreter Syntax Error Interpreter line is not ``#!/usr/bin/python``
+ missing-return Documentation Error No ``RETURN`` documentation provided
+ missing-return-legacy Documentation Warning No ``RETURN`` documentation provided for legacy module
+ missing-suboption-docs Documentation Error Argument in argument_spec has sub-options but documentation does not define sub-options
+ module-incorrect-version-added Documentation Error Module level ``version_added`` is incorrect
+ module-invalid-version-added Documentation Error Module level ``version_added`` is not a valid version number
+ module-utils-specific-import Imports Error ``module_utils`` imports should import specific components, not ``*``
+ multiple-utils-per-requires Imports Error ``Ansible.ModuleUtils`` requirements do not support multiple modules per statement
+ multiple-csharp-utils-per-requires Imports Error Ansible C# util requirements do not support multiple utils per statement
+ no-default-for-required-parameter Documentation Error Option is marked as required but specifies a default. Arguments with a default should not be marked as required
+ nonexistent-parameter-documented Documentation Error Argument is listed in DOCUMENTATION.options, but not accepted by the module
+ option-incorrect-version-added Documentation Error ``version_added`` for new option is incorrect
+ option-invalid-version-added Documentation Error ``version_added`` for option is not a valid version number
+ parameter-invalid Documentation Error Argument in argument_spec is not a valid python identifier
+ parameter-invalid-elements Documentation Error Value for "elements" is valid only when value of "type" is ``list``
+ implied-parameter-type-mismatch Documentation Error Argument_spec implies ``type="str"`` but documentation defines it as different data type
+ parameter-type-not-in-doc Documentation Error Type value is defined in ``argument_spec`` but documentation doesn't specify a type
+ parameter-alias-repeated Parameters Error argument in argument_spec has at least one alias specified multiple times in aliases
+ parameter-alias-self Parameters Error argument in argument_spec is specified as its own alias
+ parameter-documented-multiple-times Documentation Error argument in argument_spec with aliases is documented multiple times
+ parameter-list-no-elements Parameters Error argument in argument_spec "type" is specified as ``list`` without defining "elements"
+ parameter-state-invalid-choice Parameters Error Argument ``state`` includes ``get``, ``list`` or ``info`` as a choice. Functionality should be in an ``_info`` or (if further conditions apply) ``_facts`` module.
+ python-syntax-error Syntax Error Python ``SyntaxError`` while parsing module
+ return-syntax-error Documentation Error ``RETURN`` is not valid YAML, ``RETURN`` fragments missing or invalid
+ return-invalid-version-added Documentation Error ``version_added`` for return value is not a valid version number
+ subdirectory-missing-init Naming Error Ansible module subdirectories must contain an ``__init__.py``
+ try-except-missing-has Imports Warning Try/Except ``HAS_`` expression missing
+ undocumented-parameter Documentation Error Argument is listed in the argument_spec, but not documented in the module
+ unidiomatic-typecheck Syntax Error Type comparison using ``type()`` found. Use ``isinstance()`` instead
+ unknown-doc-fragment Documentation Warning Unknown pre-existing ``DOCUMENTATION`` error
+ use-boto3 Imports Error ``boto`` import found, new modules should use ``boto3``
+ use-fail-json-not-sys-exit Imports Error ``sys.exit()`` call found. Should be ``exit_json``/``fail_json``
+ use-module-utils-urls Imports Error ``requests`` import found, should use ``ansible.module_utils.urls`` instead
+ use-run-command-not-os-call Imports Error ``os.call`` used instead of ``module.run_command``
+ use-run-command-not-popen Imports Error ``subprocess.Popen`` used instead of ``module.run_command``
+ use-short-gplv3-license Documentation Error GPLv3 license header should be the :ref:`short form <copyright>` for new modules
+ mutually_exclusive-type Documentation Error mutually_exclusive entry contains non-string value
+ mutually_exclusive-collision Documentation Error mutually_exclusive entry has repeated terms
+ mutually_exclusive-unknown Documentation Error mutually_exclusive entry contains option which does not appear in argument_spec (potentially an alias of an option?)
+ required_one_of-type Documentation Error required_one_of entry contains non-string value
+ required_one_of-collision Documentation Error required_one_of entry has repeated terms
+ required_one_of-unknown Documentation Error required_one_of entry contains option which does not appear in argument_spec (potentially an alias of an option?)
+ required_together-type Documentation Error required_together entry contains non-string value
+ required_together-collision Documentation Error required_together entry has repeated terms
+ required_together-unknown Documentation Error required_together entry contains option which does not appear in argument_spec (potentially an alias of an option?)
+ required_if-is_one_of-type Documentation Error required_if entry has a fourth value which is not a bool
+ required_if-requirements-type Documentation Error required_if entry has a third value (requirements) which is not a list or tuple
+ required_if-requirements-collision Documentation Error required_if entry has repeated terms in requirements
+ required_if-requirements-unknown Documentation Error required_if entry's requirements contains option which does not appear in argument_spec (potentially an alias of an option?)
+ required_if-unknown-key Documentation Error required_if entry's key does not appear in argument_spec (potentially an alias of an option?)
+ required_if-key-in-requirements Documentation Error required_if entry contains its key in requirements list/tuple
+ required_if-value-type Documentation Error required_if entry's value is not of the type specified for its key
+ required_by-collision Documentation Error required_by entry has repeated terms
+ required_by-unknown Documentation Error required_by entry contains option which does not appear in argument_spec (potentially an alias of an option?)
+============================================================ ================== ==================== =========================================================================================
diff --git a/docs/docsite/rst/galaxy/dev_guide.rst b/docs/docsite/rst/galaxy/dev_guide.rst
new file mode 100644
index 00000000..62530a5d
--- /dev/null
+++ b/docs/docsite/rst/galaxy/dev_guide.rst
@@ -0,0 +1,246 @@
+.. _developing_galaxy:
+
+**********************
+Galaxy Developer Guide
+**********************
+
+You can host collections and roles on Galaxy to share with the Ansible community. Galaxy content is formatted in pre-packaged units of work such as :ref:`roles <playbooks_reuse_roles>`, and new in Galaxy 3.2, :ref:`collections <collections>`.
+You can create roles for provisioning infrastructure, deploying applications, and all of the tasks you do everyday. Taking this a step further, you can create collections which provide a comprehensive package of automation that may include multiple playbooks, roles, modules, and plugins.
+
+.. contents::
+ :local:
+ :depth: 2
+
+.. _creating_collections_galaxy:
+
+Creating collections for Galaxy
+===============================
+
+Collections are a distribution format for Ansible content. You can use collections to package and distribute playbooks, roles, modules, and plugins.
+You can publish and use collections through `Ansible Galaxy <https://galaxy.ansible.com>`_.
+
+See :ref:`developing_collections` for details on how to create collections.
+
+.. _creating_roles_galaxy:
+
+
+Creating roles for Galaxy
+=========================
+
+Use the ``init`` command to initialize the base structure of a new role, saving time on creating the various directories and main.yml files a role requires
+
+.. code-block:: bash
+
+ $ ansible-galaxy init role_name
+
+The above will create the following directory structure in the current working directory:
+
+.. code-block:: text
+
+ role_name/
+ README.md
+ .travis.yml
+ defaults/
+ main.yml
+ files/
+ handlers/
+ main.yml
+ meta/
+ main.yml
+ templates/
+ tests/
+ inventory
+ test.yml
+ vars/
+ main.yml
+
+If you want to create a repository for the role the repository root should be `role_name`.
+
+Force
+-----
+
+If a directory matching the name of the role already exists in the current working directory, the init command will result in an error. To ignore the error
+use the ``--force`` option. Force will create the above subdirectories and files, replacing anything that matches.
+
+Container enabled
+-----------------
+
+If you are creating a Container Enabled role, pass ``--type container`` to ``ansible-galaxy init``. This will create the same directory structure as above, but populate it
+with default files appropriate for a Container Enabled role. For instance, the README.md has a slightly different structure, the *.travis.yml* file tests
+the role using `Ansible Container <https://github.com/ansible/ansible-container>`_, and the meta directory includes a *container.yml* file.
+
+Using a custom role skeleton
+----------------------------
+
+A custom role skeleton directory can be supplied as follows:
+
+.. code-block:: bash
+
+ $ ansible-galaxy init --role-skeleton=/path/to/skeleton role_name
+
+When a skeleton is provided, init will:
+
+- copy all files and directories from the skeleton to the new role
+- any .j2 files found outside of a templates folder will be rendered as templates. The only useful variable at the moment is role_name
+- The .git folder and any .git_keep files will not be copied
+
+Alternatively, the role_skeleton and ignoring of files can be configured via ansible.cfg
+
+.. code-block:: text
+
+ [galaxy]
+ role_skeleton = /path/to/skeleton
+ role_skeleton_ignore = ^.git$,^.*/.git_keep$
+
+Authenticate with Galaxy
+------------------------
+
+Using the ``import``, ``delete`` and ``setup`` commands to manage your roles on the Galaxy website requires authentication, and the ``login`` command
+can be used to do just that. Before you can use the ``login`` command, you must create an account on the Galaxy website.
+
+The ``login`` command requires using your GitHub credentials. You can use your username and password, or you can create a `personal access token <https://help.github.com/articles/creating-an-access-token-for-command-line-use/>`_. If you choose to create a token, grant minimal access to the token, as it is used just to verify identify.
+
+The following shows authenticating with the Galaxy website using a GitHub username and password:
+
+.. code-block:: text
+
+ $ ansible-galaxy login
+
+ We need your GitHub login to identify you.
+ This information will not be sent to Galaxy, only to api.github.com.
+ The password will not be displayed.
+
+ Use --github-token if you do not want to enter your password.
+
+ GitHub Username: dsmith
+ Password for dsmith:
+ Successfully logged into Galaxy as dsmith
+
+When you choose to use your username and password, your password is not sent to Galaxy. It is used to authenticates with GitHub and create a personal access token.
+It then sends the token to Galaxy, which in turn verifies that your identity and returns a Galaxy access token. After authentication completes the GitHub token is
+destroyed.
+
+If you do not want to use your GitHub password, or if you have two-factor authentication enabled with GitHub, use the ``--github-token`` option to pass a personal access token that you create.
+
+
+Import a role
+-------------
+
+The ``import`` command requires that you first authenticate using the ``login`` command. Once authenticated you can import any GitHub repository that you own or have been granted access.
+
+Use the following to import to role:
+
+.. code-block:: bash
+
+ $ ansible-galaxy import github_user github_repo
+
+By default the command will wait for Galaxy to complete the import process, displaying the results as the import progresses:
+
+.. code-block:: text
+
+ Successfully submitted import request 41
+ Starting import 41: role_name=myrole repo=githubuser/ansible-role-repo ref=
+ Retrieving GitHub repo githubuser/ansible-role-repo
+ Accessing branch: master
+ Parsing and validating meta/main.yml
+ Parsing galaxy_tags
+ Parsing platforms
+ Adding dependencies
+ Parsing and validating README.md
+ Adding repo tags as role versions
+ Import completed
+ Status SUCCESS : warnings=0 errors=0
+
+Branch
+^^^^^^
+
+Use the ``--branch`` option to import a specific branch. If not specified, the default branch for the repo will be used.
+
+Role name
+^^^^^^^^^
+
+By default the name given to the role will be derived from the GitHub repository name. However, you can use the ``--role-name`` option to override this and set the name.
+
+No wait
+^^^^^^^
+
+If the ``--no-wait`` option is present, the command will not wait for results. Results of the most recent import for any of your roles is available on the Galaxy web site by visiting *My Imports*.
+
+Delete a role
+-------------
+
+The ``delete`` command requires that you first authenticate using the ``login`` command. Once authenticated you can remove a role from the Galaxy web site. You are only allowed to remove roles where you have access to the repository in GitHub.
+
+Use the following to delete a role:
+
+.. code-block:: bash
+
+ $ ansible-galaxy delete github_user github_repo
+
+This only removes the role from Galaxy. It does not remove or alter the actual GitHub repository.
+
+
+Travis integrations
+-------------------
+
+You can create an integration or connection between a role in Galaxy and `Travis <https://travis-ci.org>`_. Once the connection is established, a build in Travis will
+automatically trigger an import in Galaxy, updating the search index with the latest information about the role.
+
+You create the integration using the ``setup`` command, but before an integration can be created, you must first authenticate using the ``login`` command; you will
+also need an account in Travis, and your Travis token. Once you're ready, use the following command to create the integration:
+
+.. code-block:: bash
+
+ $ ansible-galaxy setup travis github_user github_repo xxx-travis-token-xxx
+
+The setup command requires your Travis token, however the token is not stored in Galaxy. It is used along with the GitHub username and repo to create a hash as described
+in `the Travis documentation <https://docs.travis-ci.com/user/notifications/>`_. The hash is stored in Galaxy and used to verify notifications received from Travis.
+
+The setup command enables Galaxy to respond to notifications. To configure Travis to run a build on your repository and send a notification, follow the
+`Travis getting started guide <https://docs.travis-ci.com/user/getting-started/>`_.
+
+To instruct Travis to notify Galaxy when a build completes, add the following to your .travis.yml file:
+
+.. code-block:: text
+
+ notifications:
+ webhooks: https://galaxy.ansible.com/api/v1/notifications/
+
+
+List Travis integrations
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+Use the ``--list`` option to display your Travis integrations:
+
+.. code-block:: bash
+
+ $ ansible-galaxy setup --list
+
+
+ ID Source Repo
+ ---------- ---------- ----------
+ 2 travis github_user/github_repo
+ 1 travis github_user/github_repo
+
+
+Remove Travis integrations
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Use the ``--remove`` option to disable and remove a Travis integration:
+
+ .. code-block:: bash
+
+ $ ansible-galaxy setup --remove ID
+
+Provide the ID of the integration to be disabled. You can find the ID by using the ``--list`` option.
+
+
+.. seealso::
+ :ref:`collections`
+ Shareable collections of modules, playbooks and roles
+ :ref:`playbooks_reuse_roles`
+ All about ansible roles
+ `Mailing List <https://groups.google.com/group/ansible-project>`_
+ Questions? Help? Ideas? Stop by the list on Google Groups
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/galaxy/user_guide.rst b/docs/docsite/rst/galaxy/user_guide.rst
new file mode 100644
index 00000000..85b20c07
--- /dev/null
+++ b/docs/docsite/rst/galaxy/user_guide.rst
@@ -0,0 +1,493 @@
+.. _using_galaxy:
+.. _ansible_galaxy:
+
+*****************
+Galaxy User Guide
+*****************
+
+:dfn:`Ansible Galaxy` refers to the `Galaxy <https://galaxy.ansible.com>`_ website, a free site for finding, downloading, and sharing community developed roles.
+
+Use Galaxy to jump-start your automation project with great content from the Ansible community. Galaxy provides pre-packaged units of work such as :ref:`roles <playbooks_reuse_roles>`, and new in Galaxy 3.2, :ref:`collections <collections>`
+You can find roles for provisioning infrastructure, deploying applications, and all of the tasks you do everyday. The collection format provides a comprehensive package of automation that may include multiple playbooks, roles, modules, and plugins.
+
+.. contents::
+ :local:
+ :depth: 2
+.. _finding_galaxy_collections:
+
+Finding collections on Galaxy
+=============================
+
+To find collections on Galaxy:
+
+#. Click the :guilabel:`Search` icon in the left-hand navigation.
+#. Set the filter to *collection*.
+#. Set other filters and press :guilabel:`enter`.
+
+Galaxy presents a list of collections that match your search criteria.
+
+.. _installing_galaxy_collections:
+
+
+Installing collections
+======================
+
+
+Installing a collection from Galaxy
+-----------------------------------
+
+.. include:: ../shared_snippets/installing_collections.txt
+
+.. _installing_ah_collection:
+
+Downloading a collection from Automation Hub
+----------------------------------------------------
+
+You can download collections from Automation Hub at the command line. Automation Hub content is available to subscribers only, so you must download an API token and configure your local environment to provide it before you can you download collections. To download a collection from Automation Hub with the ``ansible-galaxy`` command:
+
+1. Get your Automation Hub API token. Go to https://cloud.redhat.com/ansible/automation-hub/token/ and click :guilabel:`Get API token` from the version dropdown to copy your API token.
+2. Configure Red Hat Automation Hub server in the ``server_list`` option under the ``[galaxy]`` section in your :file:`ansible.cfg` file.
+
+ .. code-block:: ini
+
+ [galaxy]
+ server_list = automation_hub
+
+ [galaxy_server.automation_hub]
+ url=https://cloud.redhat.com/api/automation-hub/
+ auth_url=https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token
+ token=my_ah_token
+
+3. Download the collection hosted in Automation Hub.
+
+ .. code-block:: bash
+
+ ansible-galaxy collection install my_namespace.my_collection
+
+.. seealso::
+ `Getting started with Automation Hub <https://www.ansible.com/blog/getting-started-with-ansible-hub>`_
+ An introduction to Automation Hub
+
+Installing an older version of a collection
+-------------------------------------------
+
+.. include:: ../shared_snippets/installing_older_collection.txt
+
+Install multiple collections with a requirements file
+-----------------------------------------------------
+
+.. include:: ../shared_snippets/installing_multiple_collections.txt
+
+Downloading a collection for offline use
+-----------------------------------------
+
+.. include:: ../shared_snippets/download_tarball_collections.txt
+
+Installing a collection from a git repository
+---------------------------------------------
+
+.. include:: ../shared_snippets/installing_collections_git_repo.txt
+
+Listing installed collections
+-----------------------------
+
+To list installed collections, run ``ansible-galaxy collection list``. See :ref:`collections_listing` for more details.
+
+
+Configuring the ``ansible-galaxy`` client
+------------------------------------------
+
+.. include:: ../shared_snippets/galaxy_server_list.txt
+
+.. _finding_galaxy_roles:
+
+Finding roles on Galaxy
+=======================
+
+Search the Galaxy database by tags, platforms, author and multiple keywords. For example:
+
+.. code-block:: bash
+
+ $ ansible-galaxy search elasticsearch --author geerlingguy
+
+The search command will return a list of the first 1000 results matching your search:
+
+.. code-block:: text
+
+ Found 2 roles matching your search:
+
+ Name Description
+ ---- -----------
+ geerlingguy.elasticsearch Elasticsearch for Linux.
+ geerlingguy.elasticsearch-curator Elasticsearch curator for Linux.
+
+
+Get more information about a role
+---------------------------------
+
+Use the ``info`` command to view more detail about a specific role:
+
+.. code-block:: bash
+
+ $ ansible-galaxy info username.role_name
+
+This returns everything found in Galaxy for the role:
+
+.. code-block:: text
+
+ Role: username.role_name
+ description: Installs and configures a thing, a distributed, highly available NoSQL thing.
+ active: True
+ commit: c01947b7bc89ebc0b8a2e298b87ab416aed9dd57
+ commit_message: Adding travis
+ commit_url: https://github.com/username/repo_name/commit/c01947b7bc89ebc0b8a2e298b87ab
+ company: My Company, Inc.
+ created: 2015-12-08T14:17:52.773Z
+ download_count: 1
+ forks_count: 0
+ github_branch:
+ github_repo: repo_name
+ github_user: username
+ id: 6381
+ is_valid: True
+ issue_tracker_url:
+ license: Apache
+ min_ansible_version: 1.4
+ modified: 2015-12-08T18:43:49.085Z
+ namespace: username
+ open_issues_count: 0
+ path: /Users/username/projects/roles
+ scm: None
+ src: username.repo_name
+ stargazers_count: 0
+ travis_status_url: https://travis-ci.org/username/repo_name.svg?branch=master
+ version:
+ watchers_count: 1
+
+
+.. _installing_galaxy_roles:
+
+Installing roles from Galaxy
+============================
+
+The ``ansible-galaxy`` command comes bundled with Ansible, and you can use it to install roles from Galaxy or directly from a git based SCM. You can
+also use it to create a new role, remove roles, or perform tasks on the Galaxy website.
+
+The command line tool by default communicates with the Galaxy website API using the server address *https://galaxy.ansible.com*. If you run your own internal Galaxy server
+and want to use it instead of the default one, pass the ``--server`` option following the address of this galaxy server. You can set permanently this option by setting
+the Galaxy server value in your ``ansible.cfg`` file to use it . For information on setting the value in *ansible.cfg* see :ref:`galaxy_server`.
+
+
+Installing roles
+----------------
+
+Use the ``ansible-galaxy`` command to download roles from the `Galaxy website <https://galaxy.ansible.com>`_
+
+.. code-block:: bash
+
+ $ ansible-galaxy install namespace.role_name
+
+Setting where to install roles
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+By default, Ansible downloads roles to the first writable directory in the default list of paths ``~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles``. This installs roles in the home directory of the user running ``ansible-galaxy``.
+
+You can override this with one of the following options:
+
+* Set the environment variable :envvar:`ANSIBLE_ROLES_PATH` in your session.
+* Use the ``--roles-path`` option for the ``ansible-galaxy`` command.
+* Define ``roles_path`` in an ``ansible.cfg`` file.
+
+The following provides an example of using ``--roles-path`` to install the role into the current working directory:
+
+.. code-block:: bash
+
+ $ ansible-galaxy install --roles-path . geerlingguy.apache
+
+.. seealso::
+
+ :ref:`intro_configuration`
+ All about configuration files
+
+Installing a specific version of a role
+---------------------------------------
+
+When the Galaxy server imports a role, it imports any git tags matching the `Semantic Version <https://semver.org/>`_ format as versions.
+In turn, you can download a specific version of a role by specifying one of the imported tags.
+
+To see the available versions for a role:
+
+#. Locate the role on the Galaxy search page.
+#. Click on the name to view more details, including the available versions.
+
+You can also navigate directly to the role using the /<namespace>/<role name>. For example, to view the role geerlingguy.apache, go to `<https://galaxy.ansible.com/geerlingguy/apache>`_.
+
+To install a specific version of a role from Galaxy, append a comma and the value of a GitHub release tag. For example:
+
+.. code-block:: bash
+
+ $ ansible-galaxy install geerlingguy.apache,v1.0.0
+
+It is also possible to point directly to the git repository and specify a branch name or commit hash as the version. For example, the following will
+install a specific commit:
+
+.. code-block:: bash
+
+ $ ansible-galaxy install git+https://github.com/geerlingguy/ansible-role-apache.git,0b7cd353c0250e87a26e0499e59e7fd265cc2f25
+
+Installing multiple roles from a file
+-------------------------------------
+
+You can install multiple roles by including the roles in a :file:`requirements.yml` file. The format of the file is YAML, and the
+file extension must be either *.yml* or *.yaml*.
+
+Use the following command to install roles included in :file:`requirements.yml:`
+
+.. code-block:: bash
+
+ $ ansible-galaxy install -r requirements.yml
+
+Again, the extension is important. If the *.yml* extension is left off, the ``ansible-galaxy`` CLI assumes the file is in an older, now deprecated,
+"basic" format.
+
+Each role in the file will have one or more of the following attributes:
+
+ src
+ The source of the role. Use the format *namespace.role_name*, if downloading from Galaxy; otherwise, provide a URL pointing
+ to a repository within a git based SCM. See the examples below. This is a required attribute.
+ scm
+ Specify the SCM. As of this writing only *git* or *hg* are allowed. See the examples below. Defaults to *git*.
+ version:
+ The version of the role to download. Provide a release tag value, commit hash, or branch name. Defaults to the branch set as a default in the repository, otherwise defaults to the *master*.
+ name:
+ Download the role to a specific name. Defaults to the Galaxy name when downloading from Galaxy, otherwise it defaults
+ to the name of the repository.
+
+Use the following example as a guide for specifying roles in *requirements.yml*:
+
+.. code-block:: yaml
+
+ # from galaxy
+ - name: yatesr.timezone
+
+ # from locally cloned git repository (file:// requires full paths)
+ - src: file:///home/bennojoy/nginx
+
+ # from GitHub
+ - src: https://github.com/bennojoy/nginx
+
+ # from GitHub, overriding the name and specifying a specific tag
+ - name: nginx_role
+ src: https://github.com/bennojoy/nginx
+ version: master
+
+ # from GitHub, specifying a specific commit hash
+ - src: https://github.com/bennojoy/nginx
+ version: "ee8aa41"
+
+ # from a webserver, where the role is packaged in a tar.gz
+ - name: http-role-gz
+ src: https://some.webserver.example.com/files/master.tar.gz
+
+ # from a webserver, where the role is packaged in a tar.bz2
+ - name: http-role-bz2
+ src: https://some.webserver.example.com/files/master.tar.bz2
+
+ # from a webserver, where the role is packaged in a tar.xz (Python 3.x only)
+ - name: http-role-xz
+ src: https://some.webserver.example.com/files/master.tar.xz
+
+ # from Bitbucket
+ - src: git+https://bitbucket.org/willthames/git-ansible-galaxy
+ version: v1.4
+
+ # from Bitbucket, alternative syntax and caveats
+ - src: https://bitbucket.org/willthames/hg-ansible-galaxy
+ scm: hg
+
+ # from GitLab or other git-based scm, using git+ssh
+ - src: git@gitlab.company.com:mygroup/ansible-base.git
+ scm: git
+ version: "0.1" # quoted, so YAML doesn't parse this as a floating-point value
+
+.. warning::
+
+ Embedding credentials into a SCM URL is not secure. Make sure to use safe auth options for security reasons. For example, use `SSH <https://help.github.com/en/github/authenticating-to-github/connecting-to-github-with-ssh>`_, `netrc <https://linux.die.net/man/5/netrc>`_ or `http.extraHeader <https://git-scm.com/docs/git-config#Documentation/git-config.txt-httpextraHeader>`_/`url.<base>.pushInsteadOf <https://git-scm.com/docs/git-config#Documentation/git-config.txt-urlltbasegtpushInsteadOf>`_ in Git config to prevent your creds from being exposed in logs.
+
+Installing roles and collections from the same requirements.yml file
+---------------------------------------------------------------------
+
+You can install roles and collections from the same requirements files, with some caveats.
+
+.. code-block:: yaml
+
+ ---
+ roles:
+ # Install a role from Ansible Galaxy.
+ - name: geerlingguy.java
+ version: 1.9.6
+
+ collections:
+ # Install a collection from Ansible Galaxy.
+ - name: geerlingguy.php_roles
+ version: 0.9.3
+ source: https://galaxy.ansible.com
+
+.. note::
+ While both roles and collections can be specified in one requirements file, they need to be installed separately.
+ The ``ansible-galaxy role install -r requirements.yml`` will only install roles and ``ansible-galaxy collection install -r requirements.yml -p ./`` will only install collections.
+
+Installing multiple roles from multiple files
+---------------------------------------------
+
+For large projects, the ``include`` directive in a :file:`requirements.yml` file provides the ability to split a large file into multiple smaller files.
+
+For example, a project may have a :file:`requirements.yml` file, and a :file:`webserver.yml` file.
+
+Below are the contents of the :file:`webserver.yml` file:
+
+.. code-block:: bash
+
+ # from github
+ - src: https://github.com/bennojoy/nginx
+
+ # from Bitbucket
+ - src: git+http://bitbucket.org/willthames/git-ansible-galaxy
+ version: v1.4
+
+The following shows the contents of the :file:`requirements.yml` file that now includes the :file:`webserver.yml` file:
+
+.. code-block:: bash
+
+ # from galaxy
+ - name: yatesr.timezone
+ - include: <path_to_requirements>/webserver.yml
+
+To install all the roles from both files, pass the root file, in this case :file:`requirements.yml` on the
+command line, as follows:
+
+.. code-block:: bash
+
+ $ ansible-galaxy install -r requirements.yml
+
+.. _galaxy_dependencies:
+
+Dependencies
+------------
+
+Roles can also be dependent on other roles, and when you install a role that has dependencies, those dependencies will automatically be installed to the ``roles_path``.
+
+There are two ways to define the dependencies of a role:
+
+* using ``meta/requirements.yml``
+* using ``meta/main.yml``
+
+Using ``meta/requirements.yml``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+`.. versionadded:: 2.10`
+
+You can create the file ``meta/requirements.yml`` and define dependencies in the same format used for :file:`requirements.yml` described in the `Installing multiple roles from a file`_ section.
+
+From there, you can import or include the specified roles in your tasks.
+
+Using ``meta/main.yml``
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Alternatively, you can specify role dependencies in the ``meta/main.yml`` file by providing a list of roles under the ``dependencies`` section. If the source of a role is Galaxy, you can simply specify the role in
+the format ``namespace.role_name``. You can also use the more complex format in :file:`requirements.yml`, allowing you to provide ``src``, ``scm``, ``version``, and ``name``.
+
+Dependencies installed that way, depending on other factors described below, will also be executed **before** this role is executed during play execution.
+To better understand how dependencies are handled during play execution, see :ref:`playbooks_reuse_roles`.
+
+The following shows an example ``meta/main.yml`` file with dependent roles:
+
+.. code-block:: yaml
+
+ ---
+ dependencies:
+ - geerlingguy.java
+
+ galaxy_info:
+ author: geerlingguy
+ description: Elasticsearch for Linux.
+ company: "Midwestern Mac, LLC"
+ license: "license (BSD, MIT)"
+ min_ansible_version: 2.4
+ platforms:
+ - name: EL
+ versions:
+ - all
+ - name: Debian
+ versions:
+ - all
+ - name: Ubuntu
+ versions:
+ - all
+ galaxy_tags:
+ - web
+ - system
+ - monitoring
+ - logging
+ - lucene
+ - elk
+ - elasticsearch
+
+Tags are inherited *down* the dependency chain. In order for tags to be applied to a role and all its dependencies, the tag should be applied to the role, not to all the tasks within a role.
+
+Roles listed as dependencies are subject to conditionals and tag filtering, and may not execute fully depending on
+what tags and conditionals are applied.
+
+If the source of a role is Galaxy, specify the role in the format *namespace.role_name*:
+
+.. code-block:: yaml
+
+ dependencies:
+ - geerlingguy.apache
+ - geerlingguy.ansible
+
+
+Alternately, you can specify the role dependencies in the complex form used in :file:`requirements.yml` as follows:
+
+.. code-block:: yaml
+
+ dependencies:
+ - name: geerlingguy.ansible
+ - name: composer
+ src: git+https://github.com/geerlingguy/ansible-role-composer.git
+ version: 775396299f2da1f519f0d8885022ca2d6ee80ee8
+
+.. note::
+
+ Galaxy expects all role dependencies to exist in Galaxy, and therefore dependencies to be specified in the
+ ``namespace.role_name`` format. If you import a role with a dependency where the ``src`` value is a URL, the import process will fail.
+
+List installed roles
+--------------------
+
+Use ``list`` to show the name and version of each role installed in the *roles_path*.
+
+.. code-block:: bash
+
+ $ ansible-galaxy list
+ - ansible-network.network-engine, v2.7.2
+ - ansible-network.config_manager, v2.6.2
+ - ansible-network.cisco_nxos, v2.7.1
+ - ansible-network.vyos, v2.7.3
+ - ansible-network.cisco_ios, v2.7.0
+
+Remove an installed role
+------------------------
+
+Use ``remove`` to delete a role from *roles_path*:
+
+.. code-block:: bash
+
+ $ ansible-galaxy remove namespace.role_name
+
+
+.. seealso::
+ :ref:`collections`
+ Shareable collections of modules, playbooks and roles
+ :ref:`playbooks_reuse_roles`
+ Reusable tasks, handlers, and other files in a known directory structure
diff --git a/docs/docsite/rst/images/cow.png b/docs/docsite/rst/images/cow.png
new file mode 100644
index 00000000..9ace4401
--- /dev/null
+++ b/docs/docsite/rst/images/cow.png
Binary files differ
diff --git a/docs/docsite/rst/index.rst b/docs/docsite/rst/index.rst
new file mode 100644
index 00000000..4c99b502
--- /dev/null
+++ b/docs/docsite/rst/index.rst
@@ -0,0 +1,105 @@
+.. _ansible_documentation:
+
+Ansible Documentation
+=====================
+
+About Ansible
+`````````````
+
+Ansible is an IT automation tool. It can configure systems, deploy software, and orchestrate more advanced IT tasks such as continuous deployments or zero downtime rolling updates.
+
+Ansible's main goals are simplicity and ease-of-use. It also has a strong focus on security and reliability, featuring a minimum of moving parts, usage of OpenSSH for transport (with other transports and pull modes as alternatives), and a language that is designed around auditability by humans--even those not familiar with the program.
+
+We believe simplicity is relevant to all sizes of environments, so we design for busy users of all types: developers, sysadmins, release engineers, IT managers, and everyone in between. Ansible is appropriate for managing all environments, from small setups with a handful of instances to enterprise environments with many thousands of instances.
+
+You can learn more at `AnsibleFest <https://www.ansible.com/ansiblefest>`_, the annual event for all Ansible contributors, users, and customers hosted by Red Hat. AnsibleFest is the place to connect with others, learn new skills, and find a new friend to automate with.
+
+Ansible manages machines in an agent-less manner. There is never a question of how to upgrade remote daemons or the problem of not being able to manage systems because daemons are uninstalled. Because OpenSSH is one of the most peer-reviewed open source components, security exposure is greatly reduced. Ansible is decentralized--it relies on your existing OS credentials to control access to remote machines. If needed, Ansible can easily connect with Kerberos, LDAP, and other centralized authentication management systems.
+
+This documentation covers the version of Ansible noted in the upper left corner of this page. We maintain multiple versions of Ansible and of the documentation, so please be sure you are using the version of the documentation that covers the version of Ansible you're using. For recent features, we note the version of Ansible where the feature was added.
+
+Ansible releases a new major release of Ansible approximately three to four times per year. The core application evolves somewhat conservatively, valuing simplicity in language design and setup. Contributors develop and change modules and plugins, hosted in collections since version 2.10, much more quickly.
+
+.. toctree::
+ :maxdepth: 2
+ :caption: Installation, Upgrade & Configuration
+
+ installation_guide/index
+ porting_guides/porting_guides
+
+.. toctree::
+ :maxdepth: 2
+ :caption: Using Ansible
+
+ user_guide/index
+
+.. toctree::
+ :maxdepth: 2
+ :caption: Contributing to Ansible
+
+ community/index
+
+.. toctree::
+ :maxdepth: 2
+ :caption: Extending Ansible
+
+ dev_guide/index
+
+.. toctree::
+ :glob:
+ :maxdepth: 1
+ :caption: Common Ansible Scenarios
+
+ scenario_guides/cloud_guides
+ scenario_guides/network_guides
+ scenario_guides/virt_guides
+
+.. toctree::
+ :maxdepth: 2
+ :caption: Network Automation
+
+ network/getting_started/index
+ network/user_guide/index
+ network/dev_guide/index
+
+.. toctree::
+ :maxdepth: 2
+ :caption: Ansible Galaxy
+
+ galaxy/user_guide.rst
+ galaxy/dev_guide.rst
+
+
+.. toctree::
+ :maxdepth: 1
+ :caption: Reference & Appendices
+
+ collections/index
+ reference_appendices/playbooks_keywords
+ reference_appendices/common_return_values
+ reference_appendices/config
+ reference_appendices/general_precedence
+ reference_appendices/YAMLSyntax
+ reference_appendices/python_3_support
+ reference_appendices/interpreter_discovery
+ reference_appendices/release_and_maintenance
+ reference_appendices/test_strategies
+ dev_guide/testing/sanity/index
+ reference_appendices/faq
+ reference_appendices/glossary
+ reference_appendices/module_utils
+ reference_appendices/special_variables
+ reference_appendices/tower
+ reference_appendices/automationhub
+ reference_appendices/logging
+
+
+.. toctree::
+ :maxdepth: 2
+ :caption: Release Notes
+
+.. toctree::
+ :maxdepth: 2
+ :caption: Roadmaps
+
+ roadmap/index.rst
diff --git a/docs/docsite/rst/installation_guide/index.rst b/docs/docsite/rst/installation_guide/index.rst
new file mode 100644
index 00000000..8a7f41db
--- /dev/null
+++ b/docs/docsite/rst/installation_guide/index.rst
@@ -0,0 +1,13 @@
+******************
+Installation Guide
+******************
+
+Welcome to the Ansible Installation Guide!
+
+
+.. toctree::
+ :maxdepth: 2
+
+ intro_installation
+ intro_configuration
+
diff --git a/docs/docsite/rst/installation_guide/intro_configuration.rst b/docs/docsite/rst/installation_guide/intro_configuration.rst
new file mode 100644
index 00000000..131c6c44
--- /dev/null
+++ b/docs/docsite/rst/installation_guide/intro_configuration.rst
@@ -0,0 +1,59 @@
+.. _intro_configuration:
+
+*******************
+Configuring Ansible
+*******************
+
+.. contents:: Topics
+
+
+This topic describes how to control Ansible settings.
+
+
+.. _the_configuration_file:
+
+Configuration file
+==================
+
+Certain settings in Ansible are adjustable via a configuration file (ansible.cfg).
+The stock configuration should be sufficient for most users, but there may be reasons you would want to change them.
+Paths where configuration file is searched are listed in :ref:`reference documentation<ansible_configuration_settings_locations>`.
+
+.. _getting_the_latest_configuration:
+
+Getting the latest configuration
+--------------------------------
+
+If installing Ansible from a package manager, the latest ``ansible.cfg`` file should be present in ``/etc/ansible``, possibly
+as a ``.rpmnew`` file (or other) as appropriate in the case of updates.
+
+If you installed Ansible from pip or from source, you may want to create this file in order to override
+default settings in Ansible.
+
+An `example file is available on GitHub <https://github.com/ansible/ansible/blob/devel/examples/ansible.cfg>`_.
+
+For more details and a full listing of available configurations go to :ref:`configuration_settings<ansible_configuration_settings>`. Starting with Ansible version 2.4, you can use the :ref:`ansible-config` command line utility to list your available options and inspect the current values.
+
+For in-depth details, see :ref:`ansible_configuration_settings`.
+
+.. _environmental_configuration:
+
+Environmental configuration
+===========================
+
+Ansible also allows configuration of settings using environment variables.
+If these environment variables are set, they will override any setting loaded from the configuration file.
+
+You can get a full listing of available environment variables from :ref:`ansible_configuration_settings`.
+
+
+.. _command_line_configuration:
+
+Command line options
+====================
+
+Not all configuration options are present in the command line, just the ones deemed most useful or common.
+Settings in the command line will override those passed through the configuration file and the environment.
+
+The full list of options available is in :ref:`ansible-playbook` and :ref:`ansible`.
+
diff --git a/docs/docsite/rst/installation_guide/intro_installation.rst b/docs/docsite/rst/installation_guide/intro_installation.rst
new file mode 100644
index 00000000..eca7a92a
--- /dev/null
+++ b/docs/docsite/rst/installation_guide/intro_installation.rst
@@ -0,0 +1,608 @@
+.. _installation_guide:
+.. _intro_installation_guide:
+
+Installing Ansible
+===================
+
+This page describes how to install Ansible on different platforms.
+Ansible is an agentless automation tool that by default manages machines over the SSH protocol. Once installed, Ansible does
+not add a database, and there will be no daemons to start or keep running. You only need to install it on one machine (which could easily be a laptop) and it can manage an entire fleet of remote machines from that central point. When Ansible manages remote machines, it does not leave software installed or running on them, so there's no real question about how to upgrade Ansible when moving to a new version.
+
+
+.. contents::
+ :local:
+
+Prerequisites
+--------------
+
+You install Ansible on a control node, which then uses SSH (by default) to communicate with your managed nodes (those end devices you want to automate).
+
+.. _control_node_requirements:
+
+Control node requirements
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Currently Ansible can be run from any machine with Python 2 (version 2.7) or Python 3 (versions 3.5 and higher) installed.
+This includes Red Hat, Debian, CentOS, macOS, any of the BSDs, and so on.
+Windows is not supported for the control node, read more about this in `Matt Davis's blog post <http://blog.rolpdog.com/2020/03/why-no-ansible-controller-for-windows.html>`_.
+
+When choosing a control node, bear in mind that any management system benefits from being run near the machines being managed. If you are running Ansible in a cloud, consider running it from a machine inside that cloud. In most cases this will work better than on the open Internet.
+
+.. note::
+
+ macOS by default is configured for a small number of file handles, so if you want to use 15 or more forks you'll need to raise the ulimit with ``sudo launchctl limit maxfiles unlimited``. This command can also fix any "Too many open files" error.
+
+
+.. warning::
+
+ Please note that some modules and plugins have additional requirements. For modules these need to be satisfied on the 'target' machine (the managed node) and should be listed in the module specific docs.
+
+.. _managed_node_requirements:
+
+Managed node requirements
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+On the managed nodes, you need a way to communicate, which is normally SSH. By
+default this uses SFTP. If that's not available, you can switch to SCP in
+:ref:`ansible.cfg <ansible_configuration_settings>`. You also need Python 2 (version 2.6 or later) or Python 3 (version 3.5 or
+later).
+
+.. note::
+
+ * If you have SELinux enabled on remote nodes, you will also want to install
+ libselinux-python on them before using any copy/file/template related functions in Ansible. You
+ can use the :ref:`yum module<yum_module>` or :ref:`dnf module<dnf_module>` in Ansible to install this package on remote systems
+ that do not have it.
+
+ * By default, before the first Python module in a playbook runs on a host, Ansible attempts to discover a suitable Python interpreter on that host. You can override the discovery behavior by setting the :ref:`ansible_python_interpreter<ansible_python_interpreter>` inventory variable to a specific interpreter, and in other ways. See :ref:`interpreter_discovery` for details.
+
+ * Ansible's :ref:`raw module<raw_module>`, and the :ref:`script module<script_module>`, do not depend
+ on a client side install of Python to run. Technically, you can use Ansible to install a compatible
+ version of Python using the :ref:`raw module<raw_module>`, which then allows you to use everything else.
+ For example, if you need to bootstrap Python 2 onto a RHEL-based system, you can install it
+ as follows:
+
+ .. code-block:: shell
+
+ $ ansible myhost --become -m raw -a "yum install -y python2"
+
+.. _what_version:
+
+Selecting an Ansible version to install
+---------------------------------------
+
+Which Ansible version to install is based on your particular needs. You can choose any of the following ways to install Ansible:
+
+* Install the latest release with your OS package manager (for Red Hat Enterprise Linux (TM), CentOS, Fedora, Debian, or Ubuntu).
+* Install with ``pip`` (the Python package manager).
+* Install ``ansible-base`` from source to access the development (``devel``) version to develop or test the latest features.
+
+.. note::
+
+ You should only run ``ansible-base`` from ``devel`` if you are modifying ``ansible-base``, or trying out features under development. This is a rapidly changing source of code and can become unstable at any point.
+
+
+Ansible creates new releases two to three times a year. Due to this short release cycle,
+minor bugs will generally be fixed in the next release rather than maintaining backports on the stable branch.
+Major bugs will still have maintenance releases when needed, though these are infrequent.
+
+
+.. _installing_the_control_node:
+.. _from_yum:
+
+Installing Ansible on RHEL, CentOS, or Fedora
+----------------------------------------------
+
+On Fedora:
+
+.. code-block:: bash
+
+ $ sudo dnf install ansible
+
+On RHEL and CentOS:
+
+.. code-block:: bash
+
+ $ sudo yum install ansible
+
+RPMs for RHEL 7 and RHEL 8 are available from the `Ansible Engine repository <https://access.redhat.com/articles/3174981>`_.
+
+To enable the Ansible Engine repository for RHEL 8, run the following command:
+
+.. code-block:: bash
+
+ $ sudo subscription-manager repos --enable ansible-2.9-for-rhel-8-x86_64-rpms
+
+To enable the Ansible Engine repository for RHEL 7, run the following command:
+
+.. code-block:: bash
+
+ $ sudo subscription-manager repos --enable rhel-7-server-ansible-2.9-rpms
+
+RPMs for currently supported versions of RHEL and CentOS are also available from `EPEL <https://fedoraproject.org/wiki/EPEL>`_.
+
+.. note::
+
+ Since Ansible 2.10 for RHEL is not available at this time, continue to use Ansible 2.9.
+
+Ansible can manage older operating systems that contain Python 2.6 or higher.
+
+.. _from_apt:
+
+Installing Ansible on Ubuntu
+----------------------------
+
+Ubuntu builds are available `in a PPA here <https://launchpad.net/~ansible/+archive/ubuntu/ansible>`_.
+
+To configure the PPA on your machine and install Ansible run these commands:
+
+.. code-block:: bash
+
+ $ sudo apt update
+ $ sudo apt install software-properties-common
+ $ sudo apt-add-repository --yes --update ppa:ansible/ansible
+ $ sudo apt install ansible
+
+.. note:: On older Ubuntu distributions, "software-properties-common" is called "python-software-properties". You may want to use ``apt-get`` instead of ``apt`` in older versions. Also, be aware that only newer distributions (in other words, 18.04, 18.10, and so on) have a ``-u`` or ``--update`` flag, so adjust your script accordingly.
+
+Debian/Ubuntu packages can also be built from the source checkout, run:
+
+.. code-block:: bash
+
+ $ make deb
+
+You may also wish to run from source to get the development branch, which is covered below.
+
+Installing Ansible on Debian
+----------------------------
+
+Debian users may leverage the same source as the Ubuntu PPA.
+
+Add the following line to /etc/apt/sources.list:
+
+.. code-block:: bash
+
+ deb http://ppa.launchpad.net/ansible/ansible/ubuntu trusty main
+
+Then run these commands:
+
+.. code-block:: bash
+
+ $ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 93C4A3FD7BB9C367
+ $ sudo apt update
+ $ sudo apt install ansible
+
+.. note:: This method has been verified with the Trusty sources in Debian Jessie and Stretch but may not be supported in earlier versions. You may want to use ``apt-get`` instead of ``apt`` in older versions.
+
+Installing Ansible on Gentoo with portage
+-----------------------------------------
+
+.. code-block:: bash
+
+ $ emerge -av app-admin/ansible
+
+To install the newest version, you may need to unmask the Ansible package prior to emerging:
+
+.. code-block:: bash
+
+ $ echo 'app-admin/ansible' >> /etc/portage/package.accept_keywords
+
+Installing Ansible on FreeBSD
+-----------------------------
+
+Though Ansible works with both Python 2 and 3 versions, FreeBSD has different packages for each Python version.
+So to install you can use:
+
+.. code-block:: bash
+
+ $ sudo pkg install py27-ansible
+
+or:
+
+.. code-block:: bash
+
+ $ sudo pkg install py36-ansible
+
+
+You may also wish to install from ports, run:
+
+.. code-block:: bash
+
+ $ sudo make -C /usr/ports/sysutils/ansible install
+
+You can also choose a specific version, for example ``ansible25``.
+
+Older versions of FreeBSD worked with something like this (substitute for your choice of package manager):
+
+.. code-block:: bash
+
+ $ sudo pkg install ansible
+
+.. _on_macos:
+
+Installing Ansible on macOS
+---------------------------
+
+The preferred way to install Ansible on a Mac is with ``pip``.
+
+The instructions can be found in :ref:`from_pip`. If you are running macOS version 10.12 or older, then you should upgrade to the latest ``pip`` to connect to the Python Package Index securely. It should be noted that pip must be run as a module on macOS, and the linked ``pip`` instructions will show you how to do that.
+
+.. note::
+
+ If you have Ansible 2.9 or older installed, you need to use ``pip uninstall ansible`` first to remove older versions of Ansible before re-installing it.
+
+If you are installing on macOS Mavericks (10.9), you may encounter some noise from your compiler. A workaround is to do the following::
+
+ $ CFLAGS=-Qunused-arguments CPPFLAGS=-Qunused-arguments pip install --user ansible
+
+
+.. _from_pkgutil:
+
+Installing Ansible on Solaris
+-----------------------------
+
+Ansible is available for Solaris as `SysV package from OpenCSW <https://www.opencsw.org/packages/ansible/>`_.
+
+.. code-block:: bash
+
+ # pkgadd -d http://get.opencsw.org/now
+ # /opt/csw/bin/pkgutil -i ansible
+
+.. _from_pacman:
+
+Installing Ansible on Arch Linux
+---------------------------------
+
+Ansible is available in the Community repository::
+
+ $ pacman -S ansible
+
+The AUR has a PKGBUILD for pulling directly from GitHub called `ansible-git <https://aur.archlinux.org/packages/ansible-git>`_.
+
+Also see the `Ansible <https://wiki.archlinux.org/index.php/Ansible>`_ page on the ArchWiki.
+
+.. _from_sbopkg:
+
+Installing Ansible on Slackware Linux
+-------------------------------------
+
+Ansible build script is available in the `SlackBuilds.org <https://slackbuilds.org/apps/ansible/>`_ repository.
+Can be built and installed using `sbopkg <https://sbopkg.org/>`_.
+
+Create queue with Ansible and all dependencies::
+
+ # sqg -p ansible
+
+Build and install packages from a created queuefile (answer Q for question if sbopkg should use queue or package)::
+
+ # sbopkg -k -i ansible
+
+.. _from swupd:
+
+Installing Ansible on Clear Linux
+---------------------------------
+
+Ansible and its dependencies are available as part of the sysadmin host management bundle::
+
+ $ sudo swupd bundle-add sysadmin-hostmgmt
+
+Update of the software will be managed by the swupd tool::
+
+ $ sudo swupd update
+
+.. _from_pip:
+
+Installing Ansible with ``pip``
+--------------------------------
+
+Ansible can be installed with ``pip``, the Python package manager. If ``pip`` isn't already available on your system of Python, run the following commands to install it::
+
+ $ curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
+ $ python get-pip.py --user
+
+.. note::
+
+ If you have Ansible 2.9 or older installed, you need to use ``pip uninstall ansible`` first to remove older versions of Ansible before re-installing it.
+
+Then install Ansible [1]_::
+
+ $ python -m pip install --user ansible
+
+In order to use the ``paramiko`` connection plugin or modules that require ``paramiko``, install the required module [2]_::
+
+ $ python -m pip install --user paramiko
+
+If you wish to install Ansible globally, run the following commands::
+
+ $ sudo python get-pip.py
+ $ sudo python -m pip install ansible
+
+.. note::
+
+ Running ``pip`` with ``sudo`` will make global changes to the system. Since ``pip`` does not coordinate with system package managers, it could make changes to your system that leaves it in an inconsistent or non-functioning state. This is particularly true for macOS. Installing with ``--user`` is recommended unless you understand fully the implications of modifying global files on the system.
+
+.. note::
+
+ Older versions of ``pip`` default to http://pypi.python.org/simple, which no longer works.
+ Please make sure you have the latest version of ``pip`` before installing Ansible.
+ If you have an older version of ``pip`` installed, you can upgrade by following `pip's upgrade instructions <https://pip.pypa.io/en/stable/installing/#upgrading-pip>`_ .
+
+Upgrading Ansible from version 2.9 and older to version 2.10 or later
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Starting in version 2.10, Ansible is made of two packages. You need to first uninstall the old Ansible version (2.9 or earlier) before upgrading.
+If you do not uninstall the older version of Ansible, you will see the following message, and no change will be performed:
+
+.. code-block:: console
+
+ Cannot install ansible-base with a pre-existing ansible==2.x installation.
+
+ Installing ansible-base with ansible-2.9 or older currently installed with
+ pip is known to cause problems. Please uninstall ansible and install the new
+ version:
+
+ pip uninstall ansible
+ pip install ansible-base
+
+ ...
+
+As explained by the message, to upgrade you must first remove the version of Ansible installed and then install it
+to the latest version.
+
+.. code-block:: console
+
+ $ pip uninstall ansible
+ $ pip install ansible
+
+.. _from_pip_devel:
+
+Installing the development version of ``ansible-base``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In Ansible 2.10 and later, The `ansible/ansible repository <https://github.com/ansible/ansible>`_ contains the code for basic features and functions, such as copying module code to managed nodes. This code is also known as ``ansible-base``.
+
+.. note::
+
+ You should only run ``ansible-base`` from ``devel`` if you are modifying ``ansible-base`` or trying out features under development. This is a rapidly changing source of code and can become unstable at any point.
+
+.. note::
+
+ If you have Ansible 2.9 or older installed, you need to use ``pip uninstall ansible`` first to remove older versions of Ansible before re-installing it.
+
+
+You can install the development version of ``ansible-base`` directly from GitHub with pip.
+
+.. code-block:: bash
+
+ $ python -m pip install --user https://github.com/ansible/ansible/archive/devel.tar.gz
+
+Replace ``devel`` in the URL mentioned above, with any other branch or tag on GitHub to install older versions of Ansible (prior to ``ansible-base`` 2.10.) This installs all of Ansible.
+
+.. code-block:: bash
+
+ $ python -m pip install --user https://github.com/ansible/ansible/archive/stable-2.9.tar.gz
+
+See :ref:`from_source` for instructions on how to run ``ansible-base`` directly from source, without the requirement of installation.
+
+.. _from_pip_venv:
+
+Virtual Environments
+^^^^^^^^^^^^^^^^^^^^
+
+.. note::
+
+ If you have Ansible 2.9 or older installed, you need to use ``pip uninstall ansible`` first to remove older versions of Ansible before re-installing it.
+
+Ansible can also be installed inside a new or existing ``virtualenv``::
+
+ $ python -m virtualenv ansible # Create a virtualenv if one does not already exist
+ $ source ansible/bin/activate # Activate the virtual environment
+ $ python -m pip install ansible
+
+.. _from_source:
+
+Running ``ansible-base`` from source (devel)
+---------------------------------------------
+
+In Ansible 2.10 and later, The `ansible/ansible repository <https://github.com/ansible/ansible>`_ contains the code for basic features and functions, such as copying module code to managed nodes. This code is also known as ``ansible-base``.
+
+.. note::
+
+ You should only run ``ansible-base`` from ``devel`` if you are modifying ``ansible-base`` or trying out features under development. This is a rapidly changing source of code and can become unstable at any point.
+
+``ansible-base`` is easy to run from source. You do not need ``root`` permissions
+to use it and there is no software to actually install. No daemons
+or database setup are required.
+
+.. note::
+
+ If you want to use Ansible Tower as the control node, do not use a source installation of Ansible. Please use an OS package manager (like ``apt`` or ``yum``) or ``pip`` to install a stable version.
+
+
+To install from source, clone the ``ansible-base`` git repository:
+
+.. code-block:: bash
+
+ $ git clone https://github.com/ansible/ansible.git
+ $ cd ./ansible
+
+Once ``git`` has cloned the ``ansible-base`` repository, setup the Ansible environment:
+
+Using Bash:
+
+.. code-block:: bash
+
+ $ source ./hacking/env-setup
+
+Using Fish::
+
+ $ source ./hacking/env-setup.fish
+
+If you want to suppress spurious warnings/errors, use::
+
+ $ source ./hacking/env-setup -q
+
+If you don't have ``pip`` installed in your version of Python, install it::
+
+ $ curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
+ $ python get-pip.py --user
+
+Ansible also uses the following Python modules that need to be installed [1]_:
+
+.. code-block:: bash
+
+ $ python -m pip install --user -r ./requirements.txt
+
+To update ``ansible-base`` checkouts, use pull-with-rebase so any local changes are replayed.
+
+.. code-block:: bash
+
+ $ git pull --rebase
+
+.. code-block:: bash
+
+ $ git pull --rebase #same as above
+ $ git submodule update --init --recursive
+
+Once running the env-setup script you'll be running from checkout and the default inventory file
+will be ``/etc/ansible/hosts``. You can optionally specify an inventory file (see :ref:`inventory`)
+other than ``/etc/ansible/hosts``:
+
+.. code-block:: bash
+
+ $ echo "127.0.0.1" > ~/ansible_hosts
+ $ export ANSIBLE_INVENTORY=~/ansible_hosts
+
+You can read more about the inventory file at :ref:`inventory`.
+
+Now let's test things with a ping command:
+
+.. code-block:: bash
+
+ $ ansible all -m ping --ask-pass
+
+You can also use "sudo make install".
+
+.. _tagged_releases:
+
+Finding tarballs of tagged releases
+-----------------------------------
+
+Packaging Ansible or wanting to build a local package yourself, but don't want to do a git checkout? Tarballs of releases are available from ``pypi`` as https://pypi.python.org/packages/source/a/ansible/ansible-{{VERSION}}.tar.gz. You can make VERSION a variable in your package managing system that you update in one place whenever you package a new version. Alternately, you can download https://pypi.python.org/project/ansible to get the latest stable release.
+
+.. note::
+
+ If you are creating your own Ansible package, you must also download or package ``ansible-base`` as part of your Ansible package. You can download it as https://pypi.python.org/packages/source/a/ansible-base/ansible-base-{{VERSION}}.tar.gz.
+
+These releases are also tagged in the `git repository <https://github.com/ansible/ansible/releases>`_ with the release version.
+
+
+.. _shell_completion:
+
+Ansible command shell completion
+--------------------------------
+
+As of Ansible 2.9, shell completion of the Ansible command line utilities is available and provided through an optional dependency
+called ``argcomplete``. ``argcomplete`` supports bash, and has limited support for zsh and tcsh.
+
+You can install ``python-argcomplete`` from EPEL on Red Hat Enterprise based distributions, and or from the standard OS repositories for many other distributions.
+
+For more information about installing and configuration see the `argcomplete documentation <https://argcomplete.readthedocs.io/en/latest/>`_.
+
+Installing ``argcomplete`` on RHEL, CentOS, or Fedora
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+On Fedora:
+
+.. code-block:: bash
+
+ $ sudo dnf install python-argcomplete
+
+On RHEL and CentOS:
+
+.. code-block:: bash
+
+ $ sudo yum install epel-release
+ $ sudo yum install python-argcomplete
+
+
+Installing ``argcomplete`` with ``apt``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block:: bash
+
+ $ sudo apt install python-argcomplete
+
+
+Installing ``argcomplete`` with ``pip``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block:: bash
+
+ $ python -m pip install argcomplete
+
+Configuring ``argcomplete``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+There are 2 ways to configure ``argcomplete`` to allow shell completion of the Ansible command line utilities: globally or per command.
+
+Globally
+"""""""""
+
+Global completion requires bash 4.2.
+
+.. code-block:: bash
+
+ $ sudo activate-global-python-argcomplete
+
+This will write a bash completion file to a global location. Use ``--dest`` to change the location.
+
+Per command
+"""""""""""
+
+If you do not have bash 4.2, you must register each script independently.
+
+.. code-block:: bash
+
+ $ eval $(register-python-argcomplete ansible)
+ $ eval $(register-python-argcomplete ansible-config)
+ $ eval $(register-python-argcomplete ansible-console)
+ $ eval $(register-python-argcomplete ansible-doc)
+ $ eval $(register-python-argcomplete ansible-galaxy)
+ $ eval $(register-python-argcomplete ansible-inventory)
+ $ eval $(register-python-argcomplete ansible-playbook)
+ $ eval $(register-python-argcomplete ansible-pull)
+ $ eval $(register-python-argcomplete ansible-vault)
+
+You should place the above commands into your shells profile file such as ``~/.profile`` or ``~/.bash_profile``.
+
+``argcomplete`` with zsh or tcsh
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+See the `argcomplete documentation <https://argcomplete.readthedocs.io/en/latest/>`_.
+
+.. _getting_ansible:
+
+``ansible-base`` on GitHub
+---------------------------
+
+You may also wish to follow the `GitHub project <https://github.com/ansible/ansible>`_ if
+you have a GitHub account. This is also where we keep the issue tracker for sharing
+bugs and feature ideas.
+
+
+.. seealso::
+
+ :ref:`intro_adhoc`
+ Examples of basic commands
+ :ref:`working_with_playbooks`
+ Learning ansible's configuration management language
+ :ref:`installation_faqs`
+ Ansible Installation related to FAQs
+ `Mailing List <https://groups.google.com/group/ansible-project>`_
+ Questions? Help? Ideas? Stop by the list on Google Groups
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
+
+.. [1] If you have issues with the "pycrypto" package install on macOS, then you may need to try ``CC=clang sudo -E pip install pycrypto``.
+.. [2] ``paramiko`` was included in Ansible's ``requirements.txt`` prior to 2.8.
diff --git a/docs/docsite/rst/inventory/implicit_localhost.rst b/docs/docsite/rst/inventory/implicit_localhost.rst
new file mode 100644
index 00000000..2f065dc7
--- /dev/null
+++ b/docs/docsite/rst/inventory/implicit_localhost.rst
@@ -0,0 +1,35 @@
+:orphan:
+
+.. _implicit_localhost:
+
+Implicit 'localhost'
+====================
+
+When you try to reference a ``localhost`` and you don't have it defined in inventory, Ansible will create an implicit one for you.::
+
+ - hosts: all
+ tasks:
+ - name: check that i have log file for all hosts on my local machine
+ stat: path=/var/log/hosts/{{inventory_hostname}}.log
+ delegate_to: localhost
+
+In a case like this (or ``local_action``) when Ansible needs to contact a 'localhost' but you did not supply one, we create one for you. This host is defined with specific connection variables equivalent to this in an inventory::
+
+ ...
+
+ hosts:
+ localhost:
+ vars:
+ ansible_connection: local
+ ansible_python_interpreter: "{{ansible_playbook_python}}"
+
+This ensures that the proper connection and Python are used to execute your tasks locally.
+You can override the built-in implicit version by creating a ``localhost`` host entry in your inventory. At that point, all implicit behaviors are ignored; the ``localhost`` in inventory is treated just like any other host. Group and host vars will apply, including connection vars, which includes the ``ansible_python_interpreter`` setting. This will also affect ``delegate_to: localhost`` and ``local_action``, the latter being an alias to the former.
+
+.. note::
+ - This host is not targetable via any group, however it will use vars from ``host_vars`` and from the 'all' group.
+ - Implicit localhost does not appear in the ``hostvars`` magic variable unless demanded, such as by ``"{{ hostvars['localhost'] }}"``.
+ - The ``inventory_file`` and ``inventory_dir`` magic variables are not available for the implicit localhost as they are dependent on **each inventory host**.
+ - This implicit host also gets triggered by using ``127.0.0.1`` or ``::1`` as they are the IPv4 and IPv6 representations of 'localhost'.
+ - Even though there are many ways to create it, there will only ever be ONE implicit localhost, using the name first used to create it.
+ - Having ``connection: local`` does NOT trigger an implicit localhost, you are just changing the connection for the ``inventory_hostname``.
diff --git a/docs/docsite/rst/network/dev_guide/developing_plugins_network.rst b/docs/docsite/rst/network/dev_guide/developing_plugins_network.rst
new file mode 100644
index 00000000..45aee4b0
--- /dev/null
+++ b/docs/docsite/rst/network/dev_guide/developing_plugins_network.rst
@@ -0,0 +1,265 @@
+
+.. _developing_modules_network:
+.. _developing_plugins_network:
+
+**************************
+Developing network plugins
+**************************
+
+You can extend the existing network modules with custom plugins in your collection.
+
+.. contents::
+ :local:
+
+Network connection plugins
+==========================
+Each network connection plugin has a set of its own plugins which provide a specification of the
+connection for a particular set of devices. The specific plugin used is selected at runtime based
+on the value of the ``ansible_network_os`` variable assigned to the host. This variable should be
+set to the same value as the name of the plugin to be loaded. Thus, ``ansible_network_os=nxos``
+will try to load a plugin in a file named ``nxos.py``, so it is important to name the plugin in a
+way that will be sensible to users.
+
+Public methods of these plugins may be called from a module or module_utils with the connection
+proxy object just as other connection methods can. The following is a very simple example of using
+such a call in a module_utils file so it may be shared with other modules.
+
+.. code-block:: python
+
+ from ansible.module_utils.connection import Connection
+
+ def get_config(module):
+ # module is your AnsibleModule instance.
+ connection = Connection(module._socket_path)
+
+ # You can now call any method (that doesn't start with '_') of the connection
+ # plugin or its platform-specific plugin
+ return connection.get_config()
+
+.. contents::
+ :local:
+
+.. _developing_plugins_httpapi:
+
+Developing httpapi plugins
+==========================
+
+:ref:`httpapi plugins <httpapi_plugins>` serve as adapters for various HTTP(S) APIs for use with the ``httpapi`` connection plugin. They should implement a minimal set of convenience methods tailored to the API you are attempting to use.
+
+Specifically, there are a few methods that the ``httpapi`` connection plugin expects to exist.
+
+Making requests
+---------------
+
+The ``httpapi`` connection plugin has a ``send()`` method, but an httpapi plugin needs a ``send_request(self, data, **message_kwargs)`` method as a higher-level wrapper to ``send()``. This method should prepare requests by adding fixed values like common headers or URL root paths. This method may do more complex work such as turning data into formatted payloads, or determining which path or method to request. It may then also unpack responses to be more easily consumed by the caller.
+
+.. code-block:: python
+
+ from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+ def send_request(self, data, path, method='POST'):
+ # Fixed headers for requests
+ headers = {'Content-Type': 'application/json'}
+ try:
+ response, response_content = self.connection.send(path, data, method=method, headers=headers)
+ except HTTPError as exc:
+ return exc.code, exc.read()
+
+ # handle_response (defined separately) will take the format returned by the device
+ # and transform it into something more suitable for use by modules.
+ # This may be JSON text to Python dictionaries, for example.
+ return handle_response(response_content)
+
+Authenticating
+--------------
+
+By default, all requests will authenticate with HTTP Basic authentication. If a request can return some kind of token to stand in place of HTTP Basic, the ``update_auth(self, response, response_text)`` method should be implemented to inspect responses for such tokens. If the token is meant to be included with the headers of each request, it is sufficient to return a dictionary which will be merged with the computed headers for each request. The default implementation of this method does exactly this for cookies. If the token is used in another way, say in a query string, you should instead save that token to an instance variable, where the ``send_request()`` method (above) can add it to each request
+
+.. code-block:: python
+
+ def update_auth(self, response, response_text):
+ cookie = response.info().get('Set-Cookie')
+ if cookie:
+ return {'Cookie': cookie}
+
+ return None
+
+If instead an explicit login endpoint needs to be requested to receive an authentication token, the ``login(self, username, password)`` method can be implemented to call that endpoint. If implemented, this method will be called once before requesting any other resources of the server. By default, it will also be attempted once when a HTTP 401 is returned from a request.
+
+.. code-block:: python
+
+ def login(self, username, password):
+ login_path = '/my/login/path'
+ data = {'user': username, 'password': password}
+
+ response = self.send_request(data, path=login_path)
+ try:
+ # This is still sent as an HTTP header, so we can set our connection's _auth
+ # variable manually. If the token is returned to the device in another way,
+ # you will have to keep track of it another way and make sure that it is sent
+ # with the rest of the request from send_request()
+ self.connection._auth = {'X-api-token': response['token']}
+ except KeyError:
+ raise AnsibleAuthenticationFailure(message="Failed to acquire login token.")
+
+Similarly, ``logout(self)`` can be implemented to call an endpoint to invalidate and/or release the current token, if such an endpoint exists. This will be automatically called when the connection is closed (and, by extension, when reset).
+
+.. code-block:: python
+
+ def logout(self):
+ logout_path = '/my/logout/path'
+ self.send_request(None, path=logout_path)
+
+ # Clean up tokens
+ self.connection._auth = None
+
+Error handling
+--------------
+
+The ``handle_httperror(self, exception)`` method can deal with status codes returned by the server. The return value indicates how the plugin will continue with the request:
+
+* A value of ``true`` means that the request can be retried. This my be used to indicate a transient error, or one that has been resolved. For example, the default implementation will try to call ``login()`` when presented with a 401, and return ``true`` if successful.
+
+* A value of ``false`` means that the plugin is unable to recover from this response. The status code will be raised as an exception to the calling module.
+
+* Any other value will be taken as a nonfatal response from the request. This may be useful if the server returns error messages in the body of the response. Returning the original exception is usually sufficient in this case, as HTTPError objects have the same interface as a successful response.
+
+For example httpapi plugins, see the `source code for the httpapi plugins <https://github.com/ansible/ansible/tree/devel/lib/ansible/plugins/httpapi>`_ included with Ansible Core.
+
+
+
+Developing NETCONF plugins
+==========================
+
+The :ref:`netconf <netconf_connection>` connection plugin provides a connection to remote devices over the ``SSH NETCONF`` subsystem. Network devices typically use this connection plugin to send and receive ``RPC`` calls over ``NETCONF``.
+
+The ``netconf`` connection plugin uses the ``ncclient`` Python library under the hood to initiate a NETCONF session with a NETCONF-enabled remote network device. ``ncclient`` also executes NETCONF RPC requests and receives responses. You must install the ``ncclient`` on the local Ansible controller.
+
+To use the ``netconf`` connection plugin for network devices that support standard NETCONF (:RFC:`6241`) operations such as ``get``, ``get-config``, ``edit-config``, set ``ansible_network_os=default``.
+You can use :ref:`netconf_get <netconf_get_module>`, :ref:`netconf_config <netconf_config_module>` and :ref:`netconf_rpc <netconf_rpc_module>` modules to talk to a NETCONF enabled remote host.
+
+As a contributor and user, you should be able to use all the methods under the ``NetconfBase`` class if your device supports standard NETCONF. You can contribute a new plugin if the device you are working with has a vendor specific NETCONF RPC.
+To support a vendor specific NETCONF RPC, add the implementation in the network OS specific NETCONF plugin.
+
+For Junos for example:
+
+* See the vendor-specific Junos RPC methods implemented in ``plugins/netconf/junos.py``.
+* Set the value of ``ansible_network_os`` to the name of the netconf plugin file, that is ``junos`` in this case.
+
+.. _developing_plugins_network_cli:
+
+Developing network_cli plugins
+==============================
+
+The :ref:`network_cli <network_cli_connection>` connection type uses ``paramiko_ssh`` under the hood which creates a pseudo terminal to send commands and receive responses.
+``network_cli`` loads two platform specific plugins based on the value of ``ansible_network_os``:
+
+* Terminal plugin (for example ``plugins/terminal/ios.py``) - Controls the parameters related to terminal, such as setting terminal length and width, page disabling and privilege escalation. Also defines regex to identify the command prompt and error prompts.
+
+* :ref:`cliconf_plugins` (for example, :ref:`ios cliconf <ios_cliconf>`) - Provides an abstraction layer for low level send and receive operations. For example, the ``edit_config()`` method ensures that the prompt is in ``config`` mode before executing configuration commands.
+
+To contribute a new network operating system to work with the ``network_cli`` connection, implement the ``cliconf`` and ``terminal`` plugins for that network OS.
+
+The plugins can reside in:
+
+* Adjacent to playbook in folders
+
+ .. code-block:: bash
+
+ cliconf_plugins/
+ terminal_plugins/
+
+* Roles
+
+ .. code-block:: bash
+
+ myrole/cliconf_plugins/
+ myrole/terminal_plugins/
+
+* Collections
+
+ .. code-block:: bash
+
+ myorg/mycollection/plugins/terminal/
+ myorg/mycollection/plugins/cliconf/
+
+The user can also set the :ref:`DEFAULT_CLICONF_PLUGIN_PATH` to configure the ``cliconf`` plugin path.
+
+After adding the ``cliconf`` and ``terminal`` plugins in the expected locations, users can:
+
+* Use the :ref:`cli_command <cli_command_module>` to run an arbitrary command on the network device.
+* Use the :ref:`cli_config <cli_config_module>` to implement configuration changes on the remote hosts without platform-specific modules.
+
+
+.. _develop_cli_parse_plugins:
+
+Developing cli_parser plugins in a collection
+===============================================
+
+You can use ``cli_parse`` as an entry point for a cli_parser plugin in
+your own collection.
+
+The following sample shows the start of a custom cli_parser plugin:
+
+.. code-block:: python
+
+ from ansible_collections.ansible.netcommon.plugins.module_utils.cli_parser.cli_parserbase import (
+ CliParserBase,
+ )
+
+ class CliParser(CliParserBase):
+ """ Sample cli_parser plugin
+ """
+
+ # Use the follow extention when loading a template
+ DEFAULT_TEMPLATE_EXTENSION = "txt"
+ # Provide the contents of the template to the parse function
+ PROVIDE_TEMPLATE_CONTENTS = True
+
+ def myparser(text, template_contents):
+ # parse the text using the template contents
+ return {...}
+
+ def parse(self, *_args, **kwargs):
+ """ Standard entry point for a cli_parse parse execution
+
+ :return: Errors or parsed text as structured data
+ :rtype: dict
+
+ :example:
+
+ The parse function of a parser should return a dict:
+ {"errors": [a list of errors]}
+ or
+ {"parsed": obj}
+ """
+ template_contents = kwargs["template_contents"]
+ text = self._task_args.get("text")
+ try:
+ parsed = myparser(text, template_contents)
+ except Exception as exc:
+ msg = "Custom parser returned an error while parsing. Error: {err}"
+ return {"errors": [msg.format(err=to_native(exc))]}
+ return {"parsed": parsed}
+
+The following task uses this custom cli_parser plugin:
+
+.. code-block:: yaml
+
+ - name: Use a custom cli_parser
+ ansible.netcommon.cli_parse:
+ command: ls -l
+ parser:
+ name: my_organiztion.my_collection.custom_parser
+
+To develop a custom plugin:
+- Each cli_parser plugin requires a ``CliParser`` class.
+- Each cli_parser plugin requires a ``parse`` function.
+- Always return a dictionary with ``errors`` or ``parsed``.
+- Place the custom cli_parser in plugins/cli_parsers directory of the collection.
+- See the `current cli_parsers <https://github.com/ansible-collections/ansible.netcommon/tree/main/plugins/cli_parsers>`_ for examples to follow.
+
+
+.. seealso::
+
+ * :ref:`cli_parsing`
diff --git a/docs/docsite/rst/network/dev_guide/developing_resource_modules_network.rst b/docs/docsite/rst/network/dev_guide/developing_resource_modules_network.rst
new file mode 100644
index 00000000..e19067a3
--- /dev/null
+++ b/docs/docsite/rst/network/dev_guide/developing_resource_modules_network.rst
@@ -0,0 +1,819 @@
+
+.. _developing_resource_modules:
+
+***********************************
+Developing network resource modules
+***********************************
+
+.. contents::
+ :local:
+ :depth: 2
+
+Understanding network and security resource modules
+===================================================
+
+Network and security devices separate configuration into sections (such as interfaces, VLANs, and so on) that apply to a network or security service. Ansible resource modules take advantage of this to allow users to configure subsections or resources within the device configuration. Resource modules provide a consistent experience across different network and security devices. For example, a network resource module may only update the configuration for a specific portion of the network interfaces, VLANs, ACLs, and so on for a network device. The resource module:
+
+#. Fetches a piece of the configuration (fact gathering), for example, the interfaces configuration.
+#. Converts the returned configuration into key-value pairs.
+#. Places those key-value pairs into an internal agnostic structured data format.
+
+Now that the configuration data is normalized, the user can update and modify the data and then use the resource module to send the configuration data back to the device. This results in a full round-trip configuration update without the need for manual parsing, data manipulation, and data model management.
+
+The resource module has two top-level keys - ``config`` and ``state``:
+
+* ``config`` defines the resource configuration data model as key-value pairs. The type of the ``config`` option can be ``dict`` or ``list of dict`` based on the resource managed. That is, if the device has a single global configuration, it should be a ``dict`` (for example, a global LLDP configuration). If the device has multiple instances of configuration, it should be of type ``list`` with each element in the list of type ``dict`` (for example, interfaces configuration).
+
+
+* ``state`` defines the action the resource module takes on the end device.
+
+The ``state`` for a new resource module should support the following values (as applicable for the devices that support them):
+
+merged
+ Ansible merges the on-device configuration with the provided configuration in the task.
+
+replaced
+ Ansible replaces the on-device configuration subsection with the provided configuration subsection in the task.
+
+overridden
+ Ansible overrides the on-device configuration for the resource with the provided configuration in the task. Use caution with this state as you could remove your access to the device (for example, by overriding the management interface configuration).
+
+deleted
+ Ansible deletes the on-device configuration subsection and restores any default settings.
+
+gathered
+ Ansible displays the resource details gathered from the network device and accessed with the ``gathered`` key in the result.
+
+rendered
+ Ansible renders the provided configuration in the task in the device-native format (for example, Cisco IOS CLI). Ansible returns this rendered configuration in the ``rendered`` key in the result. Note this state does not communicate with the network device and can be used offline.
+
+parsed
+ Ansible parses the configuration from the ``running_configuration`` option into Ansible structured data in the ``parsed`` key in the result. Note this does not gather the configuration from the network device so this state can be used offline.
+
+
+Modules in Ansible-maintained collections must support these state values. If you develop a module with only "present" and "absent" for state, you may submit it to a community collection.
+
+.. note::
+
+ The states ``rendered``, ``gathered``, and ``parsed`` do not perform any change on the device.
+
+.. seealso::
+
+ `Deep Dive on VLANs Resource Modules for Network Automation <https://www.ansible.com/blog/deep-dive-on-vlans-resource-modules-for-network-automation>`_
+ Walkthrough of how state values are implemented for VLANs.
+
+
+Developing network and security resource modules
+=================================================
+
+The Ansible Engineering team ensures the module design and code pattern within Ansible-maintained collections is uniform across resources and across platforms to give a vendor-agnostic feel and deliver good quality code. We recommend you use the `resource module builder <https://github.com/ansible-network/resource_module_builder>`_ to develop a resource module.
+
+
+The highlevel process for developing a resource module is:
+
+#. Create and share a resource model design in the `resource module models repository <https://github.com/ansible-network/resource_module_models>`_ as a PR for review.
+#. Download the latest version of the `resource module builder <https://github.com/ansible-network/resource_module_builder>`_.
+#. Run the ``resource module builder`` to create a collection scaffold from your approved resource model.
+#. Write the code to implement your resource module.
+#. Develop integration and unit tests to verify your resource module.
+#. Create a PR to the appropriate collection that you want to add your new resource module to. See :ref:`contributing_maintained_collections` for details on determining the correct collection for your module.
+
+
+Understanding the model and resource module builder
+-----------------------------------------------------
+
+The resource module builder is an Ansible Playbook that helps developers scaffold and maintain an Ansible resource module. It uses a model as the single source of truth for the module. This model is a ``yaml`` file that is used for the module DOCUMENTATION section and the argument spec.
+
+The resource module builder has the following capabilities:
+
+- Uses a defined model to scaffold a resource module directory layout and initial class files.
+- Scaffolds either an Ansible role or a collection.
+- Subsequent uses of the resource module builder will only replace the module arspec and file containing the module docstring.
+- Allows you to store complex examples along side the model in the same directory.
+- Maintains the model as the source of truth for the module and use resource module builder to update the source files as needed.
+- Generates working sample modules for both ``<network_os>_<resource>`` and ``<network_os>_facts``.
+
+Accessing the resource module builder
+-------------------------------------
+
+To access the resource module builder:
+
+1. clone the github repository:
+
+ .. code-block:: bash
+
+ git clone https://github.com/ansible-network/resource_module_builder.git
+
+2. Install the requirements:
+
+ .. code-block:: bash
+
+ pip install -r requirements.txt
+
+Creating a model
+-----------------
+
+You must create a model for your new resource. The model is the single source of truth for both the argspec and docstring, keeping them in sync. Once your model is approved, you can use the resource module builder to generate three items based on the model:
+
+* The scaffold for a new module
+* The argspec for the new module
+* The docstring for the new module
+
+For any subsequent changes to the functionality, update the model first and use the resource module builder to update the module argspec and docstring.
+
+For example, the resource model builder includes the ``myos_interfaces.yml`` sample in the :file:`models` directory, as seen below:
+
+.. code-block:: yaml
+
+ ---
+ GENERATOR_VERSION: '1.0'
+
+ NETWORK_OS: myos
+ RESOURCE: interfaces
+ COPYRIGHT: Copyright 2019 Red Hat
+ LICENSE: gpl-3.0.txt
+
+ DOCUMENTATION: |
+ module: myos_interfaces
+ version_added: 1.0.0
+ short_description: 'Manages <xxxx> attributes of <network_os> <resource>'
+ description: 'Manages <xxxx> attributes of <network_os> <resource>.'
+ author: Ansible Network Engineer
+ notes:
+ - 'Tested against <network_os> <version>'
+ options:
+ config:
+ description: The provided configuration
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ type: str
+ description: The name of the <resource>
+ some_string:
+ type: str
+ description:
+ - The some_string_01
+ choices:
+ - choice_a
+ - choice_b
+ - choice_c
+ default: choice_a
+ some_bool:
+ description:
+ - The some_bool.
+ type: bool
+ some_int:
+ description:
+ - The some_int.
+ type: int
+ version_added: '1.1'
+ some_dict:
+ type: dict
+ description:
+ - The some_dict.
+ suboptions:
+ property_01:
+ description:
+ - The property_01
+ type: str
+ state:
+ description:
+ - The state of the configuration after module completion.
+ type: str
+ choices:
+ - merged
+ - replaced
+ - overridden
+ - deleted
+ default: merged
+ EXAMPLES:
+ - deleted_example_01.txt
+ - merged_example_01.txt
+ - overridden_example_01.txt
+ - replaced_example_01.txt
+
+Notice that you should include examples for each of the states that the resource supports. The resource module builder also includes these in the sample model.
+
+Share this model as a PR for review at `resource module models repository <https://github.com/ansible-network/resource_module_models>`_. You can also see more model examples at that location.
+
+
+Creating a collection scaffold from a resource model
+----------------------------------------------------
+
+To use the resource module builder to create a collection scaffold from your approved resource model:
+
+.. code-block:: bash
+
+ ansible-playbook -e rm_dest=<destination for modules and module utils> \
+ -e structure=collection \
+ -e collection_org=<collection_org> \
+ -e collection_name=<collection_name> \
+ -e model=<model> \
+ site.yml
+
+Where the parameters are as follows:
+
+- ``rm_dest``: The directory where the resource module builder places the files and directories for the resource module and facts modules.
+- ``structure``: The directory layout type (role or collection)
+
+ - ``role``: Generate a role directory layout.
+ - ``collection``: Generate a collection directory layout.
+
+- ``collection_org``: The organization of the collection, required when `structure=collection`.
+- ``collection_name``: The name of the collection, required when `structure=collection`.
+- ``model``: The path to the model file.
+
+To use the resource module builder to create a role scaffold:
+
+.. code-block:: bash
+
+ ansible-playbook -e rm_dest=<destination for modules and module utils> \
+ -e structure=role \
+ -e model=<model> \
+ site.yml
+
+Examples
+========
+
+Collection directory layout
+---------------------------
+
+This example shows the directory layout for the following:
+
+- ``network_os``: myos
+- ``resource``: interfaces
+
+.. code-block:: bash
+
+ ansible-playbook -e rm_dest=~/github/rm_example \
+ -e structure=collection \
+ -e collection_org=cidrblock \
+ -e collection_name=my_collection \
+ -e model=models/myos/interfaces/myos_interfaces.yml \
+ site.yml
+
+.. code-block:: text
+
+ ├── docs
+ ├── LICENSE.txt
+ ├── playbooks
+ ├── plugins
+ | ├── action
+ | ├── filter
+ | ├── inventory
+ | ├── modules
+ | | ├── __init__.py
+ | | ├── myos_facts.py
+ | | └── myos_interfaces.py
+ | └── module_utils
+ | ├── __init__.py
+ | └── network
+ | ├── __init__.py
+ | └── myos
+ | ├── argspec
+ | | ├── facts
+ | | | ├── facts.py
+ | | | └── __init__.py
+ | | ├── __init__.py
+ | | └── interfaces
+ | | ├── __init__.py
+ | | └── interfaces.py
+ | ├── config
+ | | ├── __init__.py
+ | | └── interfaces
+ | | ├── __init__.py
+ | | └── interfaces.py
+ | ├── facts
+ | | ├── facts.py
+ | | ├── __init__.py
+ | | └── interfaces
+ | | ├── __init__.py
+ | | └── interfaces.py
+ | ├── __init__.py
+ | └── utils
+ | ├── __init__.py
+ | └── utils.py
+ ├── README.md
+ └── roles
+
+
+Role directory layout
+---------------------
+
+This example displays the role directory layout for the following:
+
+- ``network_os``: myos
+- ``resource``: interfaces
+
+.. code-block:: bash
+
+ ansible-playbook -e rm_dest=~/github/rm_example/roles/my_role \
+ -e structure=role \
+ -e model=models/myos/interfaces/myos_interfaces.yml \
+ site.yml
+
+
+.. code-block:: text
+
+ roles
+ └── my_role
+ ├── library
+ │ ├── __init__.py
+ │ ├── myos_facts.py
+ │ └── myos_interfaces.py
+ ├── LICENSE.txt
+ ├── module_utils
+ │ ├── __init__.py
+ │ └── network
+ │ ├── __init__.py
+ │ └── myos
+ │ ├── argspec
+ │ │ ├── facts
+ │ │ │ ├── facts.py
+ │ │ │ └── __init__.py
+ │ │ ├── __init__.py
+ │ │ └── interfaces
+ │ │ ├── __init__.py
+ │ │ └── interfaces.py
+ │ ├── config
+ │ │ ├── __init__.py
+ │ │ └── interfaces
+ │ │ ├── __init__.py
+ │ │ └── interfaces.py
+ │ ├── facts
+ │ │ ├── facts.py
+ │ │ ├── __init__.py
+ │ │ └── interfaces
+ │ │ ├── __init__.py
+ │ │ └── interfaces.py
+ │ ├── __init__.py
+ │ └── utils
+ │ ├── __init__.py
+ │ └── utils.py
+ └── README.md
+
+
+Using the collection
+--------------------
+
+This example shows how to use the generated collection in a playbook:
+
+ .. code-block:: yaml
+
+ ----
+ - hosts: myos101
+ gather_facts: False
+ tasks:
+ - cidrblock.my_collection.myos_interfaces:
+ register: result
+ - debug:
+ var: result
+ - cidrblock.my_collection.myos_facts:
+ - debug:
+ var: ansible_network_resources
+
+
+Using the role
+--------------
+
+This example shows how to use the generated role in a playbook:
+
+.. code-block:: yaml
+
+ - hosts: myos101
+ gather_facts: False
+ roles:
+ - my_role
+
+ - hosts: myos101
+ gather_facts: False
+ tasks:
+ - myos_interfaces:
+ register: result
+ - debug:
+ var: result
+ - myos_facts:
+ - debug:
+ var: ansible_network_resources
+
+
+Resource module structure and workflow
+======================================
+
+The resource module structure includes the following components:
+
+Module
+ * ``library/<ansible_network_os>_<resource>.py``.
+ * Imports the ``module_utils`` resource package and calls ``execute_module`` API:
+
+ .. code-block:: text
+
+ def main():
+ result = <resource_package>(module).execute_module()
+
+Module argspec
+ * ``module_utils/<ansible_network_os>/argspec/<resource>/``.
+ * Argspec for the resource.
+
+Facts
+ * ``module_utils/<ansible_network_os>/facts/<resource>/``.
+ * Populate facts for the resource.
+ * Entry in ``module_utils/<ansible_network_os>/facts/facts.py`` for ``get_facts`` API to keep ``<ansible_network_os>_facts`` module and facts gathered for the resource module in sync for every subset.
+ * Entry of Resource subset in FACTS_RESOURCE_SUBSETS list in ``module_utils/<ansible_network_os>/facts/facts.py`` to make facts collection work.
+
+Module package in module_utils
+ * ``module_utils/<ansible_network_os>/<config>/<resource>/``.
+ * Implement ``execute_module`` API that loads the configuration to device and generates the result with ``changed``, ``commands``, ``before`` and ``after`` keys.
+ * Call ``get_facts`` API that returns the ``<resource>`` configuration facts or return the difference if the device has onbox diff support.
+ * Compare facts gathered and given key-values if diff is not supported.
+ * Generate final configuration.
+
+Utils
+ * ``module_utils/<ansible_network_os>/utils``.
+ * Utilities for the ``<ansible_network_os>`` platform.
+
+.. _tox_resource_modules:
+
+Running ``ansible-test sanity`` and ``tox`` on resource modules
+================================================================
+
+You should run ``ansible-test sanity`` and ``tox -elinters`` from the collection root directory before pushing your PR to an Ansible-maintained collection. The CI runs both and will fail if these tests fail. See :ref:`developing_testing` for details on ``ansible-test sanity``.
+
+To install the necessary packages:
+
+#. Ensure you have a valid Ansible development environment configured. See :ref:`environment_setup` for details.
+#. Run ``pip install -r requirements.txt`` from the collection root directory.
+
+
+ Running ``tox -elinters``:
+
+ * Reads :file:`tox.ini` from the collection root directory and installs required dependencies (such as ``black`` and ``flake8``).
+ * Runs these with preconfigured options (such as line-length and ignores.)
+ * Runs ``black`` in check mode to show which files will be formatted without actually formatting them.
+
+Testing resource modules
+========================
+
+The tests rely on a role generated by the resource module builder. After changes to the resource module builder, the role should be regenerated and the tests modified and run as needed. To generate the role after changes:
+
+.. code-block:: bash
+
+ rm -rf rmb_tests/roles/my_role
+ ansible-playbook -e rm_dest=./rmb_tests/roles/my_role \
+ -e structure=role \
+ -e model=models/myos/interfaces/myos_interfaces.yml \
+ site.yml
+
+
+.. _testing_resource_modules:
+
+Resource module integration tests
+----------------------------------
+
+High-level integration test requirements for new resource modules are as follows:
+
+#. Write a test case for every state.
+#. Write additional test cases to test the behavior of the module when an empty ``config.yaml`` is given.
+#. Add a round trip test case. This involves a ``merge`` operation, followed by ``gather_facts``, a ``merge`` update with additional configuration, and then reverting back to the base configuration using the previously gathered facts with the ``state`` set to ``overridden``.
+#. Wherever applicable, assertions should check after and before ``dicts`` against a hard coded Source of Truth.
+
+.. _using_zuul_resource_modules:
+
+We use Zuul as the CI to run the integration test.
+
+* To view the report, click :guilabel:`Details` on the CI comment in the PR
+* To view a failure report, click :guilabel:`ansible/check` and select the failed test.
+* To view logs while the test is running, check for your PR number in the `Zull status board <https://dashboard.zuul.ansible.com/t/ansible/status>`_.
+* To fix static test failure locally, run the :command:`tox -e black` **inside the root folder of collection**.
+
+To view The Ansible run logs and debug test failures:
+
+#. Click the failed job to get the summary, and click :guilabel:`Logs` for the log.
+#. Click :guilabel:`console` and scroll down to find the failed test.
+#. Click :guilabel:`>` next to the failed test for complete details.
+
+
+Integration test structure
+...........................
+
+Each test case should generally follow this pattern:
+
+* setup —> test —> assert —> test again (for idempotency) —> assert —> tear down (if needed) -> done. This keeps test playbooks from becoming monolithic and difficult to troubleshoot.
+* Include a name for each task that is not an assertion. You can add names to assertions as well, but it is easier to identify the broken task within a failed test if you add a name for each task.
+* Files containing test cases must end in ``.yaml``
+
+Implementation
+..............
+
+For platforms that support ``connection: local`` *and* ``connection: network_cli`` use the following guidance:
+
+* Name the :file:`targets/` directories after the module name.
+* The :file:`main.yaml` file should just reference the transport.
+
+The following example walks through the integration tests for the ``vyos.vyos.vyos_l3_interfaces`` module in the `vyos.vyos <https://github.com/ansible-collections/vyos.vyos/tree/master/tests/integration>`_ collection:
+
+``test/integration/targets/vyos_l3_interfaces/tasks/main.yaml``
+
+.. code-block:: yaml
+
+ ---
+ - include: cli.yaml
+ tags:
+ - cli
+
+``test/integration/targets/vyos_l3_interfaces/tasks/cli.yaml``
+
+.. code-block:: yaml
+
+ ---
+ - name: collect all cli test cases
+ find:
+ paths: "{{ role_path }}/tests/cli"
+ patterns: "{{ testcase }}.yaml"
+ register: test_cases
+ delegate_to: localhost
+
+ - name: set test_items
+ set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
+
+ - name: run test cases (connection=network_cli)
+ include: "{{ test_case_to_run }} ansible_connection=network_cli"
+ with_items: "{{ test_items }}"
+ loop_control:
+ loop_var: test_case_to_run
+
+ - name: run test case (connection=local)
+ include: "{{ test_case_to_run }} ansible_connection=local ansible_become=no"
+ with_first_found: "{{ test_items }}"
+ loop_control:
+ loop_var: test_case_to_run
+
+``test/integration/targets/vyos_l3_interfaces/tests/cli/overridden.yaml``
+
+.. code-block:: yaml
+
+ ---
+ - debug:
+ msg: START vyos_l3_interfaces merged integration tests on connection={{ ansible_connection
+ }}
+
+ - include_tasks: _remove_config.yaml
+
+ - block:
+
+ - include_tasks: _populate.yaml
+
+ - name: Overrides all device configuration with provided configuration
+ register: result
+ vyos.vyos.vyos_l3_interfaces: &id001
+ config:
+
+ - name: eth0
+ ipv4:
+
+ - address: dhcp
+
+ - name: eth1
+ ipv4:
+
+ - address: 192.0.2.15/24
+ state: overridden
+
+ - name: Assert that before dicts were correctly generated
+ assert:
+ that:
+ - "{{ populate | symmetric_difference(result['before']) |length == 0 }}"
+
+ - name: Assert that correct commands were generated
+ assert:
+ that:
+ - "{{ overridden['commands'] | symmetric_difference(result['commands'])\
+ \ |length == 0 }}"
+
+ - name: Assert that after dicts were correctly generated
+ assert:
+ that:
+ - "{{ overridden['after'] | symmetric_difference(result['after']) |length\
+ \ == 0 }}"
+
+ - name: Overrides all device configuration with provided configurations (IDEMPOTENT)
+ register: result
+ vyos.vyos.vyos_l3_interfaces: *id001
+
+ - name: Assert that the previous task was idempotent
+ assert:
+ that:
+ - result['changed'] == false
+
+ - name: Assert that before dicts were correctly generated
+ assert:
+ that:
+ - "{{ overridden['after'] | symmetric_difference(result['before']) |length\
+ \ == 0 }}"
+ always:
+
+ - include_tasks: _remove_config.yaml
+
+
+Detecting test resources at runtime
+...................................
+
+Your tests should detect resources (such as interfaces) at runtime rather than hard-coding them into the test. This allows the test to run on a variety of systems.
+
+For example:
+
+.. code-block:: yaml
+
+ - name: Collect interface list
+ connection: ansible.netcommon.network_cli
+ register: intout
+ cisco.nxos.nxos_command:
+ commands:
+ - show interface brief | json
+
+ - set_fact:
+ intdataraw: "{{ intout.stdout_lines[0]['TABLE_interface']['ROW_interface'] }}"
+
+ - set_fact:
+ nxos_int1: '{{ intdataraw[1].interface }}'
+
+ - set_fact:
+ nxos_int2: '{{ intdataraw[2].interface }}'
+
+ - set_fact:
+ nxos_int3: '{{ intdataraw[3].interface }}'
+
+
+See the complete test example of this at https://github.com/ansible-collections/cisco.nxos/blob/master/tests/integration/targets/prepare_nxos_tests/tasks/main.yml.
+
+
+Running network integration tests
+..................................
+
+Ansible uses Zuul to run an integration test suite on every PR, including new tests introduced by that PR. To find and fix problems in network modules, run the network integration test locally before you submit a PR.
+
+
+First, create an inventory file that points to your test machines. The inventory group should match the platform name (for example, ``eos``, ``ios``):
+
+.. code-block:: bash
+
+ cd test/integration
+ cp inventory.network.template inventory.networking
+ ${EDITOR:-vi} inventory.networking
+ # Add in machines for the platform(s) you wish to test
+
+To run these network integration tests, use ``ansible-test network-integration --inventory </path/to/inventory> <tests_to_run>``:
+
+.. code-block:: console
+
+ ansible-test network-integration --inventory ~/myinventory -vvv vyos_facts
+ ansible-test network-integration --inventory ~/myinventory -vvv vyos_.*
+
+
+
+To run all network tests for a particular platform:
+
+.. code-block:: bash
+
+ ansible-test network-integration --inventory /path/to-collection-module/test/integration/inventory.networking vyos_.*
+
+This example will run against all ``vyos`` modules. Note that ``vyos_.*`` is a regex match, not a bash wildcard - include the `.` if you modify this example.
+
+To run integration tests for a specific module:
+
+.. code-block:: bash
+
+ ansible-test network-integration --inventory /path/to-collection-module/test/integration/inventory.networking vyos_l3_interfaces
+
+To run a single test case on a specific module:
+
+.. code-block:: bash
+
+ # Only run vyos_l3_interfaces/tests/cli/gathered.yaml
+ ansible-test network-integration --inventory /path/to-collection-module/test/integration/inventory.networking vyos_l3_interfaces --testcase gathered
+
+To run integration tests for a specific transport:
+
+.. code-block:: bash
+
+ # Only run nxapi test
+ ansible-test network-integration --inventory /path/to-collection-module/test/integration/inventory.networking --tags="nxapi" nxos_.*
+
+ # Skip any cli tests
+ ansible-test network-integration --inventory /path/to-collection-module/test/integration/inventory.networking --skip-tags="cli" nxos_.*
+
+See `test/integration/targets/nxos_bgp/tasks/main.yaml <https://github.com/ansible-collections/cisco.nxos/blob/master/tests/integration/targets/nxos_bgp/tasks/main.yaml>`_ for how this is implemented in the tests.
+
+For more options:
+
+.. code-block:: bash
+
+ ansible-test network-integration --help
+
+If you need additional help or feedback, reach out in ``#ansible-network`` on Freenode.
+
+Unit test requirements
+-----------------------
+
+High-level unit test requirements that new resource modules should follow:
+
+#. Write test cases for all the states with all possible combinations of config values.
+#. Write test cases to test the error conditions ( negative scenarios).
+#. Check the value of ``changed`` and ``commands`` keys in every test case.
+
+We run all unit test cases on our Zuul test suite, on the latest python version supported by our CI setup.
+
+Use the :ref:`same procedure <using_zuul_resource_modules>` as the integration tests to view Zuul unit tests reports and logs.
+
+See :ref:`unit module testing <testing_units_modules>` for general unit test details.
+
+.. end of cut n .. parsed-literal::
+
+
+Example: Unit testing Ansible network resource modules
+======================================================
+
+
+This section walks through an example of how to develop unit tests for Ansible resource
+modules.
+
+See :ref:`testing_units` and :ref:`testing_units_modules` for general documentation on Ansible unit tests for modules.
+Please read those pages first to understand unit tests and why and when you should use them.
+
+
+Using mock objects to unit test Ansible network resource modules
+----------------------------------------------------------------
+
+
+`Mock objects <https://docs.python.org/3/library/unittest.mock.html>`_ can be very
+useful in building unit tests for special or difficult cases, but they can also
+lead to complex and confusing coding situations. One good use for mocks would be to
+simulate an API. The ``mock`` Python package is bundled with Ansible (use
+``import units.compat.mock``).
+
+You can mock the device connection and output from the device as follows:
+
+.. code-block:: python
+
+ self.mock_get_config = patch( "ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.get_config"
+ )
+ self.get_config = self.mock_get_config.start()
+
+ self.mock_load_config = patch(
+ "ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.load_config"
+ )
+ self.load_config = self.mock_load_config.start()
+
+ self.mock_get_resource_connection_config = patch(
+ "ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base.get_resource_connection"
+ )
+ self.get_resource_connection_config = (self.mock_get_resource_connection_config.start())
+
+ self.mock_get_resource_connection_facts = patch(
+ "ansible_collections.ansible.netcommon.plugins.module_utils.network.common.facts.facts.get_resource_connection"
+ )
+ self.get_resource_connection_facts = (self.mock_get_resource_connection_facts.start())
+
+ self.mock_edit_config = patch(
+ "ansible_collections.arista.eos.plugins.module_utils.network.eos.providers.providers.CliProvider.edit_config"
+ )
+ self.edit_config = self.mock_edit_config.start()
+
+ self.mock_execute_show_command = patch(
+ "ansible_collections.arista.eos.plugins.module_utils.network.eos.facts.l2_interfaces.l2_interfaces.L2_interfacesFacts.get_device_data"
+ )
+ self.execute_show_command = self.mock_execute_show_command.start()
+
+
+The facts file of the module now includes a new method, ``get_device_data``. Call ``get_device_data`` here to emulate the device output.
+
+
+Mocking device data
+-----------------------
+
+To mock fetching results from devices or provide other complex data structures that
+come from external libraries, you can use ``fixtures`` to read in pre-generated data. The text files for this pre-generated data live in ``test/units/modules/network/PLATFORM/fixtures/``. See for example the `eos_l2_interfaces.cfg file <https://github.com/ansible-collections/arista.eos/blob/master/tests/unit/modules/network/eos/fixtures/eos_l2_interfaces_config.cfg>`_.
+
+Load data using the ``load_fixture`` method and set this data as the return value of the
+``get_device_data`` method in the facts file:
+
+.. code-block:: python
+
+ def load_fixtures(self, commands=None, transport='cli'):
+ def load_from_file(*args, **kwargs):
+ return load_fixture('eos_l2_interfaces_config.cfg')
+ self.execute_show_command.side_effect = load_from_file
+
+See the unit test file `test_eos_l2_interfaces <https://github.com/ansible-collections/arista.eos/blob/master/tests/unit/modules/network/eos/test_eos_l2_interfaces.py>`_
+for a practical example.
+
+
+.. seealso::
+
+ :ref:`testing_units`
+ Deep dive into developing unit tests for Ansible modules
+ :ref:`testing_running_locally`
+ Running tests locally including gathering and reporting coverage data
+ :ref:`developing_modules_general`
+ Get started developing a module
diff --git a/docs/docsite/rst/network/dev_guide/documenting_modules_network.rst b/docs/docsite/rst/network/dev_guide/documenting_modules_network.rst
new file mode 100644
index 00000000..78c88b37
--- /dev/null
+++ b/docs/docsite/rst/network/dev_guide/documenting_modules_network.rst
@@ -0,0 +1,52 @@
+
+.. _documenting_modules_network:
+
+*********************************
+Documenting new network platforms
+*********************************
+
+.. contents::
+ :local:
+
+When you create network modules for a new platform, or modify the connections provided by an existing network platform(such as ``network_cli`` and ``httpapi``), you also need to update the :ref:`settings_by_platform` table and add or modify the Platform Options file for your platform.
+
+You should already have documented each module as described in :ref:`developing_modules_documenting`.
+
+Modifying the platform options table
+====================================
+
+The :ref:`settings_by_platform` table is a convenient summary of the connections options provided by each network platform that has modules in Ansible. Add a row for your platform to this table, in alphabetical order. For example:
+
+.. code-block:: text
+
+ +-------------------+-------------------------+-------------+---------+---------+----------+
+ | My OS | ``myos`` | ✓ | ✓ | | ✓ |
+
+Ensure that the table stays formatted correctly. That is:
+
+* Each row is inserted in alphabetical order.
+* The cell division ``|`` markers line up with the ``+`` markers.
+* The check marks appear only for the connection types provided by the network modules.
+
+
+
+Adding a platform-specific options section
+==========================================
+
+The platform- specific sections are individual ``.rst`` files that provide more detailed information for the users of your network platform modules. Name your new file ``platform_<name>.rst`` (for example, ``platform_myos.rst``). The platform name should match the module prefix. See `platform_eos.rst <https://github.com/ansible/ansible/blob/devel/docs/docsite/rst/network/user_guide/platform_eos.rst>`_ and :ref:`eos_platform_options` for an example of the details you should provide in your platform-specific options section.
+
+Your platform-specific section should include the following:
+
+* **Connections available table** - a deeper dive into each connection type, including details on credentials, indirect access, connections settings, and enable mode.
+* **How to use each connection type** - with working examples of each connection type.
+
+If your network platform supports SSH connections, also include the following at the bottom of your ``.rst`` file:
+
+.. code-block:: text
+
+ .. include:: shared_snippets/SSH_warning.txt
+
+Adding your new file to the table of contents
+=============================================
+
+As a final step, add your new file in alphabetical order in the ``platform_index.rst`` file. You should then build the documentation to verify your additions. See :ref:`community_documentation_contributions` for more details.
diff --git a/docs/docsite/rst/network/dev_guide/index.rst b/docs/docsite/rst/network/dev_guide/index.rst
new file mode 100644
index 00000000..5f0e7924
--- /dev/null
+++ b/docs/docsite/rst/network/dev_guide/index.rst
@@ -0,0 +1,32 @@
+.. _network_developer_guide:
+
+**********************************
+Network Developer Guide
+**********************************
+
+Welcome to the Developer Guide for Ansible Network Automation!
+
+**Who should use this guide?**
+
+If you want to extend Ansible for Network Automation by creating a module or plugin, this guide is for you. This guide is specific to networking. You should already be familiar with how to create, test, and document modules and plugins, as well as the prerequisites for getting your module or plugin accepted into the main Ansible repository. See the :ref:`developer_guide` for details. Before you proceed, please read:
+
+* How to :ref:`add a custom plugin or module locally <developing_locally>`.
+* How to figure out if :ref:`developing a module is the right approach <module_dev_should_you>` for my use case.
+* How to :ref:`set up my Python development environment <environment_setup>`.
+* How to :ref:`get started writing a module <developing_modules_general>`.
+
+
+Find the network developer task that best describes what you want to do:
+
+ * I want to :ref:`develop a network resource module <developing_resource_modules>`.
+ * I want to :ref:`develop a network connection plugin <developing_plugins_network>`.
+ * I want to :ref:`document my set of modules for a network platform <documenting_modules_network>`.
+
+If you prefer to read the entire guide, here's a list of the pages in order.
+
+.. toctree::
+ :maxdepth: 1
+
+ developing_resource_modules_network
+ developing_plugins_network
+ documenting_modules_network
diff --git a/docs/docsite/rst/network/getting_started/basic_concepts.rst b/docs/docsite/rst/network/getting_started/basic_concepts.rst
new file mode 100644
index 00000000..980b144d
--- /dev/null
+++ b/docs/docsite/rst/network/getting_started/basic_concepts.rst
@@ -0,0 +1,10 @@
+**************
+Basic Concepts
+**************
+
+These concepts are common to all uses of Ansible, including network automation. You need to understand them to use Ansible for network automation. This basic introduction provides the background you need to follow the examples in this guide.
+
+.. contents::
+ :local:
+
+.. include:: ../../shared_snippets/basic_concepts.txt
diff --git a/docs/docsite/rst/network/getting_started/first_inventory.rst b/docs/docsite/rst/network/getting_started/first_inventory.rst
new file mode 100644
index 00000000..d3d1528e
--- /dev/null
+++ b/docs/docsite/rst/network/getting_started/first_inventory.rst
@@ -0,0 +1,431 @@
+***********************************************
+Build Your Inventory
+***********************************************
+
+Running a playbook without an inventory requires several command-line flags. Also, running a playbook against a single device is not a huge efficiency gain over making the same change manually. The next step to harnessing the full power of Ansible is to use an inventory file to organize your managed nodes into groups with information like the ``ansible_network_os`` and the SSH user. A fully-featured inventory file can serve as the source of truth for your network. Using an inventory file, a single playbook can maintain hundreds of network devices with a single command. This page shows you how to build an inventory file, step by step.
+
+.. contents::
+ :local:
+
+Basic inventory
+==================================================
+
+First, group your inventory logically. Best practice is to group servers and network devices by their What (application, stack or microservice), Where (datacenter or region), and When (development stage):
+
+- **What**: db, web, leaf, spine
+- **Where**: east, west, floor_19, building_A
+- **When**: dev, test, staging, prod
+
+Avoid spaces, hyphens, and preceding numbers (use ``floor_19``, not ``19th_floor``) in your group names. Group names are case sensitive.
+
+This tiny example data center illustrates a basic group structure. You can group groups using the syntax ``[metagroupname:children]`` and listing groups as members of the metagroup. Here, the group ``network`` includes all leafs and all spines; the group ``datacenter`` includes all network devices plus all webservers.
+
+.. code-block:: yaml
+
+ ---
+
+ leafs:
+ hosts:
+ leaf01:
+ ansible_host: 10.16.10.11
+ leaf02:
+ ansible_host: 10.16.10.12
+
+ spines:
+ hosts:
+ spine01:
+ ansible_host: 10.16.10.13
+ spine02:
+ ansible_host: 10.16.10.14
+
+ network:
+ children:
+ leafs:
+ spines:
+
+ webservers:
+ hosts:
+ webserver01:
+ ansible_host: 10.16.10.15
+ webserver02:
+ ansible_host: 10.16.10.16
+
+ datacenter:
+ children:
+ network:
+ webservers:
+
+
+
+You can also create this same inventory in INI format.
+
+.. code-block:: ini
+
+ [leafs]
+ leaf01
+ leaf02
+
+ [spines]
+ spine01
+ spine02
+
+ [network:children]
+ leafs
+ spines
+
+ [webservers]
+ webserver01
+ webserver02
+
+ [datacenter:children]
+ network
+ webservers
+
+
+Add variables to the inventory
+================================================================================
+
+Next, you can set values for many of the variables you needed in your first Ansible command in the inventory, so you can skip them in the ``ansible-playbook`` command. In this example, the inventory includes each network device's IP, OS, and SSH user. If your network devices are only accessible by IP, you must add the IP to the inventory file. If you access your network devices using hostnames, the IP is not necessary.
+
+.. code-block:: yaml
+
+ ---
+
+ leafs:
+ hosts:
+ leaf01:
+ ansible_host: 10.16.10.11
+ ansible_network_os: vyos.vyos.vyos
+ ansible_user: my_vyos_user
+ leaf02:
+ ansible_host: 10.16.10.12
+ ansible_network_os: vyos.vyos.vyos
+ ansible_user: my_vyos_user
+
+ spines:
+ hosts:
+ spine01:
+ ansible_host: 10.16.10.13
+ ansible_network_os: vyos.vyos.vyos
+ ansible_user: my_vyos_user
+ spine02:
+ ansible_host: 10.16.10.14
+ ansible_network_os: vyos.vyos.vyos
+ ansible_user: my_vyos_user
+
+ network:
+ children:
+ leafs:
+ spines:
+
+ webservers:
+ hosts:
+ webserver01:
+ ansible_host: 10.16.10.15
+ ansible_user: my_server_user
+ webserver02:
+ ansible_host: 10.16.10.16
+ ansible_user: my_server_user
+
+ datacenter:
+ children:
+ network:
+ webservers:
+
+
+Group variables within inventory
+================================================================================
+
+When devices in a group share the same variable values, such as OS or SSH user, you can reduce duplication and simplify maintenance by consolidating these into group variables:
+
+.. code-block:: yaml
+
+ ---
+
+ leafs:
+ hosts:
+ leaf01:
+ ansible_host: 10.16.10.11
+ leaf02:
+ ansible_host: 10.16.10.12
+ vars:
+ ansible_network_os: vyos.vyos.vyos
+ ansible_user: my_vyos_user
+
+ spines:
+ hosts:
+ spine01:
+ ansible_host: 10.16.10.13
+ spine02:
+ ansible_host: 10.16.10.14
+ vars:
+ ansible_network_os: vyos.vyos.vyos
+ ansible_user: my_vyos_user
+
+ network:
+ children:
+ leafs:
+ spines:
+
+ webservers:
+ hosts:
+ webserver01:
+ ansible_host: 10.16.10.15
+ webserver02:
+ ansible_host: 10.16.10.16
+ vars:
+ ansible_user: my_server_user
+
+ datacenter:
+ children:
+ network:
+ webservers:
+
+Variable syntax
+================================================================================
+
+The syntax for variable values is different in inventory, in playbooks, and in the ``group_vars`` files, which are covered below. Even though playbook and ``group_vars`` files are both written in YAML, you use variables differently in each.
+
+- In an ini-style inventory file you **must** use the syntax ``key=value`` for variable values: ``ansible_network_os=vyos.vyos.vyos``.
+- In any file with the ``.yml`` or ``.yaml`` extension, including playbooks and ``group_vars`` files, you **must** use YAML syntax: ``key: value``.
+
+- In ``group_vars`` files, use the full ``key`` name: ``ansible_network_os: vyos.vyos.vyos``.
+- In playbooks, use the short-form ``key`` name, which drops the ``ansible`` prefix: ``network_os: vyos.vyos.vyos``.
+
+
+Group inventory by platform
+================================================================================
+
+As your inventory grows, you may want to group devices by platform. This allows you to specify platform-specific variables easily for all devices on that platform:
+
+.. code-block:: yaml
+
+ ---
+
+ leafs:
+ hosts:
+ leaf01:
+ ansible_host: 10.16.10.11
+ leaf02:
+ ansible_host: 10.16.10.12
+
+ spines:
+ hosts:
+ spine01:
+ ansible_host: 10.16.10.13
+ spine02:
+ ansible_host: 10.16.10.14
+
+ network:
+ children:
+ leafs:
+ spines:
+ vars:
+ ansible_connection: ansible.netcommon.network_cli
+ ansible_network_os: vyos.vyos.vyos
+ ansible_user: my_vyos_user
+
+ webservers:
+ hosts:
+ webserver01:
+ ansible_host: 10.16.10.15
+ webserver02:
+ ansible_host: 10.16.10.16
+ vars:
+ ansible_user: my_server_user
+
+ datacenter:
+ children:
+ network:
+ webservers:
+
+With this setup, you can run ``first_playbook.yml`` with only two flags:
+
+.. code-block:: console
+
+ ansible-playbook -i inventory.yml -k first_playbook.yml
+
+With the ``-k`` flag, you provide the SSH password(s) at the prompt. Alternatively, you can store SSH and other secrets and passwords securely in your group_vars files with ``ansible-vault``. See :ref:`network_vault` for details.
+
+Verifying the inventory
+=========================
+
+You can use the :ref:`ansible-inventory` CLI command to display the inventory as Ansible sees it.
+
+.. code-block:: console
+
+ $ ansible-inventory -i test.yml --list
+ {
+ "_meta": {
+ "hostvars": {
+ "leaf01": {
+ "ansible_connection": "ansible.netcommon.network_cli",
+ "ansible_host": "10.16.10.11",
+ "ansible_network_os": "vyos.vyos.vyos",
+ "ansible_user": "my_vyos_user"
+ },
+ "leaf02": {
+ "ansible_connection": "ansible.netcommon.network_cli",
+ "ansible_host": "10.16.10.12",
+ "ansible_network_os": "vyos.vyos.vyos",
+ "ansible_user": "my_vyos_user"
+ },
+ "spine01": {
+ "ansible_connection": "ansible.netcommon.network_cli",
+ "ansible_host": "10.16.10.13",
+ "ansible_network_os": "vyos.vyos.vyos",
+ "ansible_user": "my_vyos_user"
+ },
+ "spine02": {
+ "ansible_connection": "ansible.netcommon.network_cli",
+ "ansible_host": "10.16.10.14",
+ "ansible_network_os": "vyos.vyos.vyos",
+ "ansible_user": "my_vyos_user"
+ },
+ "webserver01": {
+ "ansible_host": "10.16.10.15",
+ "ansible_user": "my_server_user"
+ },
+ "webserver02": {
+ "ansible_host": "10.16.10.16",
+ "ansible_user": "my_server_user"
+ }
+ }
+ },
+ "all": {
+ "children": [
+ "datacenter",
+ "ungrouped"
+ ]
+ },
+ "datacenter": {
+ "children": [
+ "network",
+ "webservers"
+ ]
+ },
+ "leafs": {
+ "hosts": [
+ "leaf01",
+ "leaf02"
+ ]
+ },
+ "network": {
+ "children": [
+ "leafs",
+ "spines"
+ ]
+ },
+ "spines": {
+ "hosts": [
+ "spine01",
+ "spine02"
+ ]
+ },
+ "webservers": {
+ "hosts": [
+ "webserver01",
+ "webserver02"
+ ]
+ }
+ }
+
+.. _network_vault:
+
+Protecting sensitive variables with ``ansible-vault``
+================================================================================
+
+The ``ansible-vault`` command provides encryption for files and/or individual variables like passwords. This tutorial will show you how to encrypt a single SSH password. You can use the commands below to encrypt other sensitive information, such as database passwords, privilege-escalation passwords and more.
+
+First you must create a password for ansible-vault itself. It is used as the encryption key, and with this you can encrypt dozens of different passwords across your Ansible project. You can access all those secrets (encrypted values) with a single password (the ansible-vault password) when you run your playbooks. Here's a simple example.
+
+1. Create a file and write your password for ansible-vault to it:
+
+.. code-block:: console
+
+ echo "my-ansible-vault-pw" > ~/my-ansible-vault-pw-file
+
+2. Create the encrypted ssh password for your VyOS network devices, pulling your ansible-vault password from the file you just created:
+
+.. code-block:: console
+
+ ansible-vault encrypt_string --vault-id my_user@~/my-ansible-vault-pw-file 'VyOS_SSH_password' --name 'ansible_password'
+
+If you prefer to type your ansible-vault password rather than store it in a file, you can request a prompt:
+
+.. code-block:: console
+
+ ansible-vault encrypt_string --vault-id my_user@prompt 'VyOS_SSH_password' --name 'ansible_password'
+
+and type in the vault password for ``my_user``.
+
+The :option:`--vault-id <ansible-playbook --vault-id>` flag allows different vault passwords for different users or different levels of access. The output includes the user name ``my_user`` from your ``ansible-vault`` command and uses the YAML syntax ``key: value``:
+
+.. code-block:: yaml
+
+ ansible_password: !vault |
+ $ANSIBLE_VAULT;1.2;AES256;my_user
+ 66386134653765386232383236303063623663343437643766386435663632343266393064373933
+ 3661666132363339303639353538316662616638356631650a316338316663666439383138353032
+ 63393934343937373637306162366265383461316334383132626462656463363630613832313562
+ 3837646266663835640a313164343535316666653031353763613037656362613535633538386539
+ 65656439626166666363323435613131643066353762333232326232323565376635
+ Encryption successful
+
+This is an example using an extract from a YAML inventory, as the INI format does not support inline vaults:
+
+.. code-block:: yaml
+
+ ...
+
+ vyos: # this is a group in yaml inventory, but you can also do under a host
+ vars:
+ ansible_connection: ansible.netcommon.network_cli
+ ansible_network_os: vyos.vyos.vyos
+ ansible_user: my_vyos_user
+ ansible_password: !vault |
+ $ANSIBLE_VAULT;1.2;AES256;my_user
+ 66386134653765386232383236303063623663343437643766386435663632343266393064373933
+ 3661666132363339303639353538316662616638356631650a316338316663666439383138353032
+ 63393934343937373637306162366265383461316334383132626462656463363630613832313562
+ 3837646266663835640a313164343535316666653031353763613037656362613535633538386539
+ 65656439626166666363323435613131643066353762333232326232323565376635
+
+ ...
+
+To use an inline vaulted variables with an INI inventory you need to store it in a 'vars' file in YAML format,
+it can reside in host_vars/ or group_vars/ to be automatically picked up or referenced from a play via ``vars_files`` or ``include_vars``.
+
+To run a playbook with this setup, drop the ``-k`` flag and add a flag for your ``vault-id``:
+
+.. code-block:: console
+
+ ansible-playbook -i inventory --vault-id my_user@~/my-ansible-vault-pw-file first_playbook.yml
+
+Or with a prompt instead of the vault password file:
+
+.. code-block:: console
+
+ ansible-playbook -i inventory --vault-id my_user@prompt first_playbook.yml
+
+To see the original value, you can use the debug module. Please note if your YAML file defines the `ansible_connection` variable (as we used in our example), it will take effect when you execute the command below. To prevent this, please make a copy of the file without the ansible_connection variable.
+
+.. code-block:: console
+
+ cat vyos.yml | grep -v ansible_connection >> vyos_no_connection.yml
+
+ ansible localhost -m debug -a var="ansible_password" -e "@vyos_no_connection.yml" --ask-vault-pass
+ Vault password:
+
+ localhost | SUCCESS => {
+ "ansible_password": "VyOS_SSH_password"
+ }
+
+
+.. warning::
+
+ Vault content can only be decrypted with the password that was used to encrypt it. If you want to stop using one password and move to a new one, you can update and re-encrypt existing vault content with ``ansible-vault rekey myfile``, then provide the old password and the new password. Copies of vault content still encrypted with the old password can still be decrypted with old password.
+
+For more details on building inventory files, see :ref:`the introduction to inventory<intro_inventory>`; for more details on ansible-vault, see :ref:`the full Ansible Vault documentation<vault>`.
+
+Now that you understand the basics of commands, playbooks, and inventory, it's time to explore some more complex Ansible Network examples.
diff --git a/docs/docsite/rst/network/getting_started/first_playbook.rst b/docs/docsite/rst/network/getting_started/first_playbook.rst
new file mode 100644
index 00000000..b09814cd
--- /dev/null
+++ b/docs/docsite/rst/network/getting_started/first_playbook.rst
@@ -0,0 +1,212 @@
+
+.. _first_network_playbook:
+
+***************************************************
+Run Your First Command and Playbook
+***************************************************
+
+Put the concepts you learned to work with this quick tutorial. Install Ansible, execute a network configuration command manually, execute the same command with Ansible, then create a playbook so you can execute the command any time on multiple network devices.
+
+.. contents::
+ :local:
+
+Prerequisites
+==================================================
+
+Before you work through this tutorial you need:
+
+- Ansible 2.10 (or higher) installed
+- One or more network devices that are compatible with Ansible
+- Basic Linux command line knowledge
+- Basic knowledge of network switch & router configuration
+
+Install Ansible
+==================================================
+
+Install Ansible using your preferred method. See :ref:`installation_guide`. Then return to this tutorial.
+
+Confirm the version of Ansible (must be >= 2.10):
+
+.. code-block:: bash
+
+ ansible --version
+
+
+Establish a manual connection to a managed node
+==================================================
+
+To confirm your credentials, connect to a network device manually and retrieve its configuration. Replace the sample user and device name with your real credentials. For example, for a VyOS router:
+
+.. code-block:: bash
+
+ ssh my_vyos_user@vyos.example.net
+ show config
+ exit
+
+This manual connection also establishes the authenticity of the network device, adding its RSA key fingerprint to your list of known hosts. (If you have connected to the device before, you have already established its authenticity.)
+
+
+Run your first network Ansible command
+==================================================
+
+Instead of manually connecting and running a command on the network device, you can retrieve its configuration with a single, stripped-down Ansible command:
+
+.. code-block:: bash
+
+ ansible all -i vyos.example.net, -c ansible.netcommon.network_cli -u my_vyos_user -k -m vyos.vyos.vyos_facts -e ansible_network_os=vyos.vyos.vyos
+
+The flags in this command set seven values:
+ - the host group(s) to which the command should apply (in this case, all)
+ - the inventory (-i, the device or devices to target - without the trailing comma -i points to an inventory file)
+ - the connection method (-c, the method for connecting and executing ansible)
+ - the user (-u, the username for the SSH connection)
+ - the SSH connection method (-k, please prompt for the password)
+ - the module (-m, the Ansible module to run, using the fully qualified collection name (FQCN))
+ - an extra variable ( -e, in this case, setting the network OS value)
+
+NOTE: If you use ``ssh-agent`` with ssh keys, Ansible loads them automatically. You can omit ``-k`` flag.
+
+.. note::
+
+ If you are running Ansible in a virtual environment, you will also need to add the variable ``ansible_python_interpreter=/path/to/venv/bin/python``
+
+
+Create and run your first network Ansible Playbook
+==================================================
+
+If you want to run this command every day, you can save it in a playbook and run it with ``ansible-playbook`` instead of ``ansible``. The playbook can store a lot of the parameters you provided with flags at the command line, leaving less to type at the command line. You need two files for this - a playbook and an inventory file.
+
+1. Download :download:`first_playbook.yml <sample_files/first_playbook.yml>`, which looks like this:
+
+.. literalinclude:: sample_files/first_playbook.yml
+ :language: YAML
+
+The playbook sets three of the seven values from the command line above: the group (``hosts: all``), the connection method (``connection: ansible.netcommon.network_cli``) and the module (in each task). With those values set in the playbook, you can omit them on the command line. The playbook also adds a second task to show the config output. When a module runs in a playbook, the output is held in memory for use by future tasks instead of written to the console. The debug task here lets you see the results in your shell.
+
+2. Run the playbook with the command:
+
+.. code-block:: bash
+
+ ansible-playbook -i vyos.example.net, -u ansible -k -e ansible_network_os=vyos.vyos.vyos first_playbook.yml
+
+The playbook contains one play with two tasks, and should generate output like this:
+
+.. code-block:: bash
+
+ $ ansible-playbook -i vyos.example.net, -u ansible -k -e ansible_network_os=vyos.vyos.vyos first_playbook.yml
+
+ PLAY [First Playbook]
+ ***************************************************************************************************************************
+
+ TASK [Get config for VyOS devices]
+ ***************************************************************************************************************************
+ ok: [vyos.example.net]
+
+ TASK [Display the config]
+ ***************************************************************************************************************************
+ ok: [vyos.example.net] => {
+ "msg": "The hostname is vyos and the OS is VyOS 1.1.8"
+ }
+
+3. Now that you can retrieve the device config, try updating it with Ansible. Download :download:`first_playbook_ext.yml <sample_files/first_playbook_ext.yml>`, which is an extended version of the first playbook:
+
+.. literalinclude:: sample_files/first_playbook_ext.yml
+ :language: YAML
+
+The extended first playbook has four tasks in a single play. Run it with the same command you used above. The output shows you the change Ansible made to the config:
+
+.. code-block:: bash
+
+ $ ansible-playbook -i vyos.example.net, -u ansible -k -e ansible_network_os=vyos.vyos.vyos first_playbook_ext.yml
+
+ PLAY [First Playbook]
+ ************************************************************************************************************************************
+
+ TASK [Get config for VyOS devices]
+ **********************************************************************************************************************************
+ ok: [vyos.example.net]
+
+ TASK [Display the config]
+ *************************************************************************************************************************************
+ ok: [vyos.example.net] => {
+ "msg": "The hostname is vyos and the OS is VyOS 1.1.8"
+ }
+
+ TASK [Update the hostname]
+ *************************************************************************************************************************************
+ changed: [vyos.example.net]
+
+ TASK [Get changed config for VyOS devices]
+ *************************************************************************************************************************************
+ ok: [vyos.example.net]
+
+ TASK [Display the changed config]
+ *************************************************************************************************************************************
+ ok: [vyos.example.net] => {
+ "msg": "The new hostname is vyos-changed and the OS is VyOS 1.1.8"
+ }
+
+ PLAY RECAP
+ ************************************************************************************************************************************
+ vyos.example.net : ok=5 changed=1 unreachable=0 failed=0
+
+
+
+.. _network_gather_facts:
+
+Gathering facts from network devices
+====================================
+
+The ``gather_facts`` keyword now supports gathering network device facts in standardized key/value pairs. You can feed these network facts into further tasks to manage the network device.
+
+You can also use the new ``gather_network_resources`` parameter with the network ``*_facts`` modules (such as :ref:`arista.eos.eos_facts <ansible_collections.arista.eos.eos_facts_module>`) to return just a subset of the device configuration, as shown below.
+
+.. code-block:: yaml
+
+ - hosts: arista
+ gather_facts: True
+ gather_subset: interfaces
+ module_defaults:
+ arista.eos.eos_facts:
+ gather_network_resources: interfaces
+
+The playbook returns the following interface facts:
+
+.. code-block:: yaml
+
+ "network_resources": {
+ "interfaces": [
+ {
+ "description": "test-interface",
+ "enabled": true,
+ "mtu": "512",
+ "name": "Ethernet1"
+ },
+ {
+ "enabled": true,
+ "mtu": "3000",
+ "name": "Ethernet2"
+ },
+ {
+ "enabled": true,
+ "name": "Ethernet3"
+ },
+ {
+ "enabled": true,
+ "name": "Ethernet4"
+ },
+ {
+ "enabled": true,
+ "name": "Ethernet5"
+ },
+ {
+ "enabled": true,
+ "name": "Ethernet6"
+ },
+ ]
+ }
+
+
+Note that this returns a subset of what is returned by just setting ``gather_subset: interfaces``.
+
+You can store these facts and use them directly in another task, such as with the :ref:`eos_interfaces <ansible_collections.arista.eos.eos_interfaces_module>` resource module.
diff --git a/docs/docsite/rst/network/getting_started/index.rst b/docs/docsite/rst/network/getting_started/index.rst
new file mode 100644
index 00000000..d9638a5c
--- /dev/null
+++ b/docs/docsite/rst/network/getting_started/index.rst
@@ -0,0 +1,34 @@
+.. _network_getting_started:
+
+**********************************
+Network Getting Started
+**********************************
+
+Ansible collections support a wide range of vendors, device types, and actions, so you can manage your entire network with a single automation tool. With Ansible, you can:
+
+- Automate repetitive tasks to speed routine network changes and free up your time for more strategic work
+- Leverage the same simple, powerful, and agentless automation tool for network tasks that operations and development use
+- Separate the data model (in a playbook or role) from the execution layer (via Ansible modules) to manage heterogeneous network devices
+- Benefit from community and vendor-generated sample playbooks and roles to help accelerate network automation projects
+- Communicate securely with network hardware over SSH or HTTPS
+
+**Who should use this guide?**
+
+This guide is intended for network engineers using Ansible for the first time. If you understand networks but have never used Ansible, work through the guide from start to finish.
+
+This guide is also useful for experienced Ansible users automating network tasks for the first time. You can use Ansible commands, playbooks and modules to configure hubs, switches, routers, bridges and other network devices. But network modules are different from Linux/Unix and Windows modules, and you must understand some network-specific concepts to succeed. If you understand Ansible but have never automated a network task, start with the second section.
+
+This guide introduces basic Ansible concepts and guides you through your first Ansible commands, playbooks and inventory entries.
+
+.. toctree::
+ :maxdepth: 2
+ :caption: Getting Started Guide
+
+ basic_concepts
+ network_differences
+ first_playbook
+ first_inventory
+ network_roles
+ intermediate_concepts
+ network_connection_options
+ network_resources
diff --git a/docs/docsite/rst/network/getting_started/intermediate_concepts.rst b/docs/docsite/rst/network/getting_started/intermediate_concepts.rst
new file mode 100644
index 00000000..3496f22e
--- /dev/null
+++ b/docs/docsite/rst/network/getting_started/intermediate_concepts.rst
@@ -0,0 +1,39 @@
+*****************
+Beyond the basics
+*****************
+
+This page introduces some concepts that help you manage your Ansible workflow with directory structure and source control. Like the Basic Concepts at the beginning of this guide, these intermediate concepts are common to all uses of Ansible.
+
+.. contents::
+ :local:
+
+
+A typical Ansible filetree
+==========================
+
+Ansible expects to find certain files in certain places. As you expand your inventory and create and run more network playbooks, keep your files organized in your working Ansible project directory like this:
+
+.. code-block:: console
+
+ .
+ ├── backup
+ │   ├── vyos.example.net_config.2018-02-08@11:10:15
+ │   ├── vyos.example.net_config.2018-02-12@08:22:41
+ ├── first_playbook.yml
+ ├── inventory
+ ├── group_vars
+ │   ├── vyos.yml
+ │   └── eos.yml
+ ├── roles
+ │   ├── static_route
+ │   └── system
+ ├── second_playbook.yml
+ └── third_playbook.yml
+
+The ``backup`` directory and the files in it get created when you run modules like ``vyos_config`` with the ``backup: yes`` parameter.
+
+
+Tracking changes to inventory and playbooks: source control with git
+====================================================================
+
+As you expand your inventory, roles and playbooks, you should place your Ansible projects under source control. We recommend ``git`` for source control. ``git`` provides an audit trail, letting you track changes, roll back mistakes, view history and share the workload of managing, maintaining and expanding your Ansible ecosystem. There are plenty of tutorials and guides to using ``git`` available.
diff --git a/docs/docsite/rst/network/getting_started/network_connection_options.rst b/docs/docsite/rst/network/getting_started/network_connection_options.rst
new file mode 100644
index 00000000..c23e7307
--- /dev/null
+++ b/docs/docsite/rst/network/getting_started/network_connection_options.rst
@@ -0,0 +1,48 @@
+.. _network_connection_options:
+
+***************************************
+Working with network connection options
+***************************************
+
+Network modules can support multiple connection protocols, such as ``ansible.netcommon.network_cli``, ``ansible.netcommon.netconf``, and ``ansible.netcommon.httpapi``. These connections include some common options you can set to control how the connection to your network device behaves.
+
+Common options are:
+
+* ``become`` and ``become_method`` as described in :ref:`privilege_escalation`.
+* ``network_os`` - set to match your network platform you are communicating with. See the :ref:`platform-specific <platform_options>` pages.
+* ``remote_user`` as described in :ref:`connection_set_user`.
+* Timeout options - ``persistent_command_timeout``, ``persistent_connect_timeout``, and ``timeout``.
+
+.. _timeout_options:
+
+Setting timeout options
+=======================
+
+When communicating with a remote device, you have control over how long Ansible maintains the connection to that device, as well as how long Ansible waits for a command to complete on that device. Each of these options can be set as variables in your playbook files, environment variables, or settings in your :ref:`ansible.cfg file <ansible_configuration_settings>`.
+
+For example, the three options for controlling the connection timeout are as follows.
+
+Using vars (per task):
+
+.. code-block:: yaml
+
+ - name: save running-config
+ cisco.ios.ios_command:
+ commands: copy running-config startup-config
+ vars:
+ ansible_command_timeout: 30
+
+Using the environment variable:
+
+.. code-block:: bash
+
+ $export ANSIBLE_PERSISTENT_COMMAND_TIMEOUT=30
+
+Using the global configuration (in :file:`ansible.cfg`)
+
+.. code-block:: ini
+
+ [persistent_connection ]
+ command_timeout = 30
+
+See :ref:`ansible_variable_precedence` for details on the relative precedence of each of these variables. See the individual connection type to understand each option.
diff --git a/docs/docsite/rst/network/getting_started/network_differences.rst b/docs/docsite/rst/network/getting_started/network_differences.rst
new file mode 100644
index 00000000..76b18aa4
--- /dev/null
+++ b/docs/docsite/rst/network/getting_started/network_differences.rst
@@ -0,0 +1,68 @@
+************************************************************
+How Network Automation is Different
+************************************************************
+
+Network automation leverages the basic Ansible concepts, but there are important differences in how the network modules work. This introduction prepares you to understand the exercises in this guide.
+
+.. contents::
+ :local:
+
+Execution on the control node
+================================================================================
+
+Unlike most Ansible modules, network modules do not run on the managed nodes. From a user's point of view, network modules work like any other modules. They work with ad-hoc commands, playbooks, and roles. Behind the scenes, however, network modules use a different methodology than the other (Linux/Unix and Windows) modules use. Ansible is written and executed in Python. Because the majority of network devices can not run Python, the Ansible network modules are executed on the Ansible control node, where ``ansible`` or ``ansible-playbook`` runs.
+
+Network modules also use the control node as a destination for backup files, for those modules that offer a ``backup`` option. With Linux/Unix modules, where a configuration file already exists on the managed node(s), the backup file gets written by default in the same directory as the new, changed file. Network modules do not update configuration files on the managed nodes, because network configuration is not written in files. Network modules write backup files on the control node, usually in the `backup` directory under the playbook root directory.
+
+Multiple communication protocols
+================================================================================
+
+Because network modules execute on the control node instead of on the managed nodes, they can support multiple communication protocols. The communication protocol (XML over SSH, CLI over SSH, API over HTTPS) selected for each network module depends on the platform and the purpose of the module. Some network modules support only one protocol; some offer a choice. The most common protocol is CLI over SSH. You set the communication protocol with the ``ansible_connection`` variable:
+
+.. csv-table::
+ :header: "Value of ansible_connection", "Protocol", "Requires", "Persistent?"
+ :widths: 30, 10, 10, 10
+
+ "ansible.netcommon.network_cli", "CLI over SSH", "network_os setting", "yes"
+ "ansible.netcommon.netconf", "XML over SSH", "network_os setting", "yes"
+ "ansible.netcommon.httpapi", "API over HTTP/HTTPS", "network_os setting", "yes"
+ "local", "depends on provider", "provider setting", "no"
+
+.. note::
+ ``ansible.netcommon.httpapi`` deprecates ``eos_eapi`` and ``nxos_nxapi``. See :ref:`httpapi_plugins` for details and an example.
+
+The ``ansible_connection: local`` has been deprecated. Please use one of the persistent connection types listed above instead. With persistent connections, you can define the hosts and credentials only once, rather than in every task. You also need to set the ``network_os`` variable for the specific network platform you are communicating with. For more details on using each connection type on various platforms, see the :ref:`platform-specific <platform_options>` pages.
+
+
+Collections organized by network platform
+================================================================================
+
+A network platform is a set of network devices with a common operating system that can be managed by an Ansible collection, for example:
+
+- Arista: `arista.eos <https://galaxy.ansible.com/arista/eos>`_
+- Cisco: `cisco.ios <https://galaxy.ansible.com/cisco/ios>`_, `cisco.iosxr <https://galaxy.ansible.com/cisco/iosxr>`_, `cisco.nxos <https://galaxy.ansible.com/cisco/nxos>`_
+- Juniper: `junipernetworks.junos <https://galaxy.ansible.com/junipernetworks/junos>`_
+- VyOS `vyos.vyos <https://galaxy.ansible.com/vyos/vyos>`_
+
+All modules within a network platform share certain requirements. Some network platforms have specific differences - see the :ref:`platform-specific <platform_options>` documentation for details.
+
+.. _privilege_escalation:
+
+Privilege Escalation: ``enable`` mode, ``become``, and ``authorize``
+================================================================================
+
+Several network platforms support privilege escalation, where certain tasks must be done by a privileged user. On network devices this is called the ``enable`` mode (the equivalent of ``sudo`` in \*nix administration). Ansible network modules offer privilege escalation for those network devices that support it. For details of which platforms support ``enable`` mode, with examples of how to use it, see the :ref:`platform-specific <platform_options>` documentation.
+
+Using ``become`` for privilege escalation
+-----------------------------------------
+
+Use the top-level Ansible parameter ``become: yes`` with ``become_method: enable`` to run a task, play, or playbook with escalated privileges on any network platform that supports privilege escalation. You must use either ``connection: network_cli`` or ``connection: httpapi`` with ``become: yes`` with ``become_method: enable``. If you are using ``network_cli`` to connect Ansible to your network devices, a ``group_vars`` file would look like:
+
+.. code-block:: yaml
+
+ ansible_connection: ansible.netcommon.network_cli
+ ansible_network_os: cisco.ios.ios
+ ansible_become: yes
+ ansible_become_method: enable
+
+For more information, see :ref:`Become and Networks<become_network>`
diff --git a/docs/docsite/rst/network/getting_started/network_resources.rst b/docs/docsite/rst/network/getting_started/network_resources.rst
new file mode 100644
index 00000000..3451c476
--- /dev/null
+++ b/docs/docsite/rst/network/getting_started/network_resources.rst
@@ -0,0 +1,46 @@
+
+.. _network_resources:
+
+************************
+Resources and next steps
+************************
+
+.. contents::
+ :local:
+
+Documents
+=========
+
+Read more about Ansible for Network Automation:
+
+- Network Automation on the `Ansible website <https://www.ansible.com/overview/networking>`_
+- Ansible Network `Blog posts <https://www.ansible.com/blog/topic/networks>`_
+
+Events (on video and in person)
+===============================
+
+All sessions at Ansible events are recorded and include many Network-related topics (use Filter by Category to view only Network topics). You can also join us for future events in your area. See:
+
+- `Recorded AnsibleFests <https://www.ansible.com/resources/videos/ansiblefest>`_
+- `Recorded AnsibleAutomates <https://www.ansible.com/resources/webinars-training>`_
+- `Upcoming Ansible Events <https://www.ansible.com/community/events>`_ page.
+
+GitHub repos
+============
+
+Ansible hosts module code, examples, demonstrations, and other content on GitHub. Anyone with a GitHub account is able to create Pull Requests (PRs) or issues on these repos:
+
+- `Network-Automation <https://github.com/network-automation>`_ is an open community for all things network automation. Have an idea, some playbooks, or roles to share? Email ansible-network@redhat.com and we will add you as a contributor to the repository.
+
+- `Ansible collections <https://github.com/ansible-collections>`_ is the main repository for Ansible-maintained and community collections, including collections for network devices.
+
+
+
+IRC and Slack
+=============
+
+Join us on:
+
+* Freenode IRC - ``#ansible-network`` Freenode channel
+
+* Slack - `<https://ansiblenetwork.slack.com>`_
diff --git a/docs/docsite/rst/network/getting_started/network_roles.rst b/docs/docsite/rst/network/getting_started/network_roles.rst
new file mode 100644
index 00000000..b77d0611
--- /dev/null
+++ b/docs/docsite/rst/network/getting_started/network_roles.rst
@@ -0,0 +1,267 @@
+
+.. _using_network_roles:
+
+*************************
+Use Ansible network roles
+*************************
+
+Roles are sets of Ansible defaults, files, tasks, templates, variables, and other Ansible components that work together. As you saw on :ref:`first_network_playbook`, moving from a command to a playbook makes it easy to run multiple tasks and repeat the same tasks in the same order. Moving from a playbook to a role makes it even easier to reuse and share your ordered tasks. You can look at :ref:`Ansible Galaxy <ansible_galaxy>`, which lets you share your roles and use others' roles, either directly or as inspiration.
+
+.. contents::
+ :local:
+
+Understanding roles
+===================
+
+So what exactly is a role, and why should you care? Ansible roles are basically playbooks broken up into a known file structure. Moving to roles from a playbook makes sharing, reading, and updating your Ansible workflow easier. Users can write their own roles. So for example, you don't have to write your own DNS playbook. Instead, you specify a DNS server and a role to configure it for you.
+
+To simplify your workflow even further, the Ansible Network team has written a series of roles for common network use cases. Using these roles means you don't have to reinvent the wheel. Instead of writing and maintaining your own ``create_vlan`` playbooks or roles, you can concentrate on designing, codifying and maintaining the parser templates that describe your network topologies and inventory, and let Ansible's network roles do the work. See the `network-related roles <https://galaxy.ansible.com/ansible-network>`_ on Ansible Galaxy.
+
+A sample DNS playbook
+---------------------
+
+To demonstrate the concept of what a role is, the example ``playbook.yml`` below is a single YAML file containing a two-task playbook. This Ansible Playbook configures the hostname on a Cisco IOS XE device, then it configures the DNS (domain name system) servers.
+
+.. code-block:: yaml
+
+ ---
+ - name: configure cisco routers
+ hosts: routers
+ connection: ansible.netcommon.network_cli
+ gather_facts: no
+ vars:
+ dns: "8.8.8.8 8.8.4.4"
+
+ tasks:
+ - name: configure hostname
+ cisco.ios.ios_config:
+ lines: hostname {{ inventory_hostname }}
+
+ - name: configure DNS
+ cisco.ios.ios_config:
+ lines: ip name-server {{dns}}
+
+If you run this playbook using the ``ansible-playbook`` command, you'll see the output below. This example used ``-l`` option to limit the playbook to only executing on the **rtr1** node.
+
+.. code-block:: bash
+
+ [user@ansible ~]$ ansible-playbook playbook.yml -l rtr1
+
+ PLAY [configure cisco routers] *************************************************
+
+ TASK [configure hostname] ******************************************************
+ changed: [rtr1]
+
+ TASK [configure DNS] ***********************************************************
+ changed: [rtr1]
+
+ PLAY RECAP *********************************************************************
+ rtr1 : ok=2 changed=2 unreachable=0 failed=0
+
+
+This playbook configured the hostname and DNS servers. You can verify that configuration on the Cisco IOS XE **rtr1** router:
+
+.. code-block:: bash
+
+ rtr1#sh run | i name
+ hostname rtr1
+ ip name-server 8.8.8.8 8.8.4.4
+
+Convert the playbook into a role
+---------------------------------
+
+The next step is to convert this playbook into a reusable role. You can create the directory structure manually, or you can use ``ansible-galaxy init`` to create the standard framework for a role.
+
+.. code-block:: bash
+
+ [user@ansible ~]$ ansible-galaxy init system-demo
+ [user@ansible ~]$ cd system-demo/
+ [user@ansible system-demo]$ tree
+ .
+ ├── defaults
+ │ └── main.yml
+ ├── files
+ ├── handlers
+ │ └── main.yml
+ ├── meta
+ │ └── main.yml
+ ├── README.md
+ ├── tasks
+ │ └── main.yml
+ ├── templates
+ ├── tests
+ │ ├── inventory
+ │ └── test.yml
+ └── vars
+ └── main.yml
+
+This first demonstration uses only the **tasks** and **vars** directories. The directory structure would look as follows:
+
+.. code-block:: bash
+
+ [user@ansible system-demo]$ tree
+ .
+ ├── tasks
+ │ └── main.yml
+ └── vars
+ └── main.yml
+
+Next, move the content of the ``vars`` and ``tasks`` sections from the original Ansible Playbook into the role. First, move the two tasks into the ``tasks/main.yml`` file:
+
+.. code-block:: bash
+
+ [user@ansible system-demo]$ cat tasks/main.yml
+ ---
+ - name: configure hostname
+ cisco.ios.ios_config:
+ lines: hostname {{ inventory_hostname }}
+
+ - name: configure DNS
+ cisco.ios.ios_config:
+ lines: ip name-server {{dns}}
+
+Next, move the variables into the ``vars/main.yml`` file:
+
+.. code-block:: bash
+
+ [user@ansible system-demo]$ cat vars/main.yml
+ ---
+ dns: "8.8.8.8 8.8.4.4"
+
+Finally, modify the original Ansible Playbook to remove the ``tasks`` and ``vars`` sections and add the keyword ``roles`` with the name of the role, in this case ``system-demo``. You'll have this playbook:
+
+.. code-block:: yaml
+
+ ---
+ - name: configure cisco routers
+ hosts: routers
+ connection: ansible.netcommon.network_cli
+ gather_facts: no
+
+ roles:
+ - system-demo
+
+To summarize, this demonstration now has a total of three directories and three YAML files. There is the ``system-demo`` folder, which represents the role. This ``system-demo`` contains two folders, ``tasks`` and ``vars``. There is a ``main.yml`` is each respective folder. The ``vars/main.yml`` contains the variables from ``playbook.yml``. The ``tasks/main.yml`` contains the tasks from ``playbook.yml``. The ``playbook.yml`` file has been modified to call the role rather than specifying vars and tasks directly. Here is a tree of the current working directory:
+
+.. code-block:: bash
+
+ [user@ansible ~]$ tree
+ .
+ ├── playbook.yml
+ └── system-demo
+ ├── tasks
+ │ └── main.yml
+ └── vars
+ └── main.yml
+
+Running the playbook results in identical behavior with slightly different output:
+
+.. code-block:: bash
+
+ [user@ansible ~]$ ansible-playbook playbook.yml -l rtr1
+
+ PLAY [configure cisco routers] *************************************************
+
+ TASK [system-demo : configure hostname] ****************************************
+ ok: [rtr1]
+
+ TASK [system-demo : configure DNS] *********************************************
+ ok: [rtr1]
+
+ PLAY RECAP *********************************************************************
+ rtr1 : ok=2 changed=0 unreachable=0 failed=0
+
+As seen above each task is now prepended with the role name, in this case ``system-demo``. When running a playbook that contains several roles, this will help pinpoint where a task is being called from. This playbook returned ``ok`` instead of ``changed`` because it has identical behavior for the single file playbook we started from.
+
+As before, the playbook will generate the following configuration on a Cisco IOS-XE router:
+
+.. code-block:: bash
+
+ rtr1#sh run | i name
+ hostname rtr1
+ ip name-server 8.8.8.8 8.8.4.4
+
+
+This is why Ansible roles can be simply thought of as deconstructed playbooks. They are simple, effective and reusable. Now another user can simply include the ``system-demo`` role instead of having to create a custom "hard coded" playbook.
+
+Variable precedence
+-------------------
+
+What if you want to change the DNS servers? You aren't expected to change the ``vars/main.yml`` within the role structure. Ansible has many places where you can specify variables for a given play. See :ref:`playbooks_variables` for details on variables and precedence. There are actually 21 places to put variables. While this list can seem overwhelming at first glance, the vast majority of use cases only involve knowing the spot for variables of least precedence and how to pass variables with most precedence. See :ref:`ansible_variable_precedence` for more guidance on where you should put variables.
+
+Lowest precedence
+^^^^^^^^^^^^^^^^^
+
+The lowest precedence is the ``defaults`` directory within a role. This means all the other 20 locations you could potentially specify the variable will all take higher precedence than ``defaults``, no matter what. To immediately give the vars from the ``system-demo`` role the least precedence, rename the ``vars`` directory to ``defaults``.
+
+.. code-block:: bash
+
+ [user@ansible system-demo]$ mv vars defaults
+ [user@ansible system-demo]$ tree
+ .
+ ├── defaults
+ │ └── main.yml
+ ├── tasks
+ │ └── main.yml
+
+Add a new ``vars`` section to the playbook to override the default behavior (where the variable ``dns`` is set to 8.8.8.8 and 8.8.4.4). For this demonstration, set ``dns`` to 1.1.1.1, so ``playbook.yml`` becomes:
+
+.. code-block:: yaml
+
+ ---
+ - name: configure cisco routers
+ hosts: routers
+ connection: ansible.netcommon.network_cli
+ gather_facts: no
+ vars:
+ dns: 1.1.1.1
+ roles:
+ - system-demo
+
+Run this updated playbook on **rtr2**:
+
+.. code-block:: bash
+
+ [user@ansible ~]$ ansible-playbook playbook.yml -l rtr2
+
+The configuration on the **rtr2** Cisco router will look as follows:
+
+.. code-block:: bash
+
+ rtr2#sh run | i name-server
+ ip name-server 1.1.1.1
+
+The variable configured in the playbook now has precedence over the ``defaults`` directory. In fact, any other spot you configure variables would win over the values in the ``defaults`` directory.
+
+Highest precedence
+^^^^^^^^^^^^^^^^^^
+
+Specifying variables in the ``defaults`` directory within a role will always take the lowest precedence, while specifying ``vars`` as extra vars with the ``-e`` or ``--extra-vars=`` will always take the highest precedence, no matter what. Re-running the playbook with the ``-e`` option overrides both the ``defaults`` directory (8.8.4.4 and 8.8.8.8) as well as the newly created ``vars`` within the playbook that contains the 1.1.1.1 dns server.
+
+.. code-block:: bash
+
+ [user@ansible ~]$ ansible-playbook playbook.yml -e "dns=192.168.1.1" -l rtr3
+
+The result on the Cisco IOS XE router will only contain the highest precedence setting of 192.168.1.1:
+
+.. code-block:: bash
+
+ rtr3#sh run | i name-server
+ ip name-server 192.168.1.1
+
+How is this useful? Why should you care? Extra vars are commonly used by network operators to override defaults. A powerful example of this is with Red Hat Ansible Tower and the Survey feature. It is possible through the web UI to prompt a network operator to fill out parameters with a Web form. This can be really simple for non-technical playbook writers to execute a playbook using their Web browser. See `Ansible Tower Job Template Surveys <https://docs.ansible.com/ansible-tower/latest/html/userguide/workflow_templates.html#surveys>`_ for more details.
+
+
+Update an installed role
+------------------------
+
+The Ansible Galaxy page for a role lists all available versions. To update a locally installed role to a new or different version, use the ``ansible-galaxy install`` command with the version and ``--force`` option. You may also need to manually update any dependent roles to support this version. See the role **Read Me** tab in Galaxy for dependent role minimum version requirements.
+
+.. code-block:: bash
+
+ [user@ansible]$ ansible-galaxy install mynamespace.my_role,v2.7.1 --force
+
+.. seealso::
+
+ `Ansible Galaxy documentation <https://galaxy.ansible.com/docs/>`_
+ Ansible Galaxy user guide
diff --git a/docs/docsite/rst/network/getting_started/sample_files/first_playbook.yml b/docs/docsite/rst/network/getting_started/sample_files/first_playbook.yml
new file mode 100644
index 00000000..908b89f9
--- /dev/null
+++ b/docs/docsite/rst/network/getting_started/sample_files/first_playbook.yml
@@ -0,0 +1,15 @@
+---
+
+- name: Network Getting Started First Playbook
+ connection: ansible.netcommon.network_cli
+ gather_facts: false
+ hosts: all
+ tasks:
+
+ - name: Get config for VyOS devices
+ vyos.vyos.vyos_facts:
+ gather_subset: all
+
+ - name: Display the config
+ debug:
+ msg: "The hostname is {{ ansible_net_hostname }} and the OS is {{ ansible_net_version }}"
diff --git a/docs/docsite/rst/network/getting_started/sample_files/first_playbook_ext.yml b/docs/docsite/rst/network/getting_started/sample_files/first_playbook_ext.yml
new file mode 100644
index 00000000..2d5f6a5f
--- /dev/null
+++ b/docs/docsite/rst/network/getting_started/sample_files/first_playbook_ext.yml
@@ -0,0 +1,29 @@
+---
+
+- name: Network Getting Started First Playbook Extended
+ connection: ansible.netcommon.network_cli
+ gather_facts: false
+ hosts: all
+ tasks:
+
+ - name: Get config for VyOS devices
+ vyos.vyos.vyos_facts:
+ gather_subset: all
+
+ - name: Display the config
+ debug:
+ msg: "The hostname is {{ ansible_net_hostname }} and the OS is {{ ansible_net_version }}"
+
+ - name: Update the hostname
+ vyos.vyos.vyos_config:
+ backup: yes
+ lines:
+ - set system host-name vyos-changed
+
+ - name: Get changed config for VyOS devices
+ vyos.vyos.vyos_facts:
+ gather_subset: all
+
+ - name: Display the changed config
+ debug:
+ msg: "The new hostname is {{ ansible_net_hostname }} and the OS is {{ ansible_net_version }}"
diff --git a/docs/docsite/rst/network/index.rst b/docs/docsite/rst/network/index.rst
new file mode 100644
index 00000000..25756391
--- /dev/null
+++ b/docs/docsite/rst/network/index.rst
@@ -0,0 +1,20 @@
+:orphan:
+
+.. _network_guide:
+
+******************************
+Ansible for Network Automation
+******************************
+
+Ansible Network modules extend the benefits of simple, powerful, agentless automation to network administrators and teams. Ansible Network modules can configure your network stack, test and validate existing network state, and discover and correct network configuration drift.
+
+If you're new to Ansible, or new to using Ansible for network management, start with :ref:`network_getting_started`. If you are already familiar with network automation with Ansible, see :ref:`network_advanced`.
+
+For documentation on using a particular network module, consult the :ref:`list of all network modules<network_modules>`. Network modules for various hardware are supported by different teams including the hardware vendors themselves, volunteers from the Ansible community, and the Ansible Network Team.
+
+.. toctree::
+ :maxdepth: 3
+
+ getting_started/index
+ user_guide/index
+ dev_guide/index
diff --git a/docs/docsite/rst/network/user_guide/cli_parsing.rst b/docs/docsite/rst/network/user_guide/cli_parsing.rst
new file mode 100644
index 00000000..0dd81e2e
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/cli_parsing.rst
@@ -0,0 +1,719 @@
+.. _cli_parsing:
+
+*****************************************
+Parsing semi-structured text with Ansible
+*****************************************
+
+The :ref:`cli_parse <ansible_collections.ansible.netcommon.cli_parse_module>` module parses semi-structured data such as network configurations into structured data to allow programmatic use of the data from that device. You can pull information from a network device and update a CMDB in one playbook. Use cases include automated troubleshooting, creating dynamic documentation, updating IPAM (IP address management) tools and so on.
+
+
+.. contents::
+ :local:
+
+
+Understanding the CLI parser
+=============================
+
+The `ansible.netcommon <https://galaxy.ansible.com/ansible/netcommon>`_ collection version 1.2.0 or later includes the :ref:`cli_parse <ansible_collections.ansible.netcommon.cli_parse_module>` module that can run CLI commands and parse the semi-structured text output. You can use the ``cli_parse`` module on a device, host, or platform that only supports a command-line interface and the commands issued return semi-structured text. The ``cli_parse`` module can either run a CLI command on a device and return a parsed result or can simply parse any text document. The ``cli_parse`` module includes cli_parser plugins to interface with a variety of parsing engines.
+
+Why parse the text?
+--------------------
+
+Parsing semi-structured data such as network configurations into structured data allows programmatic use of the data from that device. Use cases include automated troubleshooting, creating dynamic documentation, updating IPAM (IP address management) tools and so on. You may prefer to do this with Ansible natively to take advantage of native Ansible constructs such as:
+
+- The ``when`` clause to conditionally run other tasks or roles
+- The ``assert`` module to check configuration and operational state compliance
+- The ``template`` module to generate reports about configuration and operational state information
+- Templates and ``command`` or ``config`` modules to generate host, device, or platform commands or configuration
+- The current platform ``facts`` modules to supplement native facts information
+
+By parsing semi-structured text into Ansible native data structures, you can take full advantage of Ansible's network modules and plugins.
+
+
+When not to parse the text
+---------------------------
+
+You should not parse semi-structured text when:
+
+- The device, host, or platform has a RESTAPI and returns JSON.
+- Existing Ansible facts modules already return the desired data.
+- Ansible network resource modules exist for configuration management of the device and resource.
+
+Parsing the CLI
+=========================
+
+The ``cli_parse`` module includes the following cli_parsing plugins:
+
+``native``
+ The native parsing engine built into Ansible and requires no addition python libraries
+``xml``
+ Convert XML to an Ansible native data structure
+``textfsm``
+ A python module which implements a template based state machine for parsing semi-formatted text
+``ntc_templates``
+ Predefined ``textfsm`` templates packages supporting a variety of platforms and commands
+``ttp``
+ A library for semi-structured text parsing using templates, with added capabilities to simplify the process
+``pyats``
+ Uses the parsers included with the Cisco Test Automation & Validation Solution
+``json``
+ Converts JSON output at the CLI to an Ansible native data structure
+
+Although Ansible contains a number of plugins that can convert XML to Ansible native data structures, the``cli_parse`` module runs the command on devices that return XML and returns the converted data in a single task.
+
+Because ``cli_parse`` uses a plugin based architecture, it can use additional parsing engines from any Ansible collection.
+
+.. note::
+
+ The ``ansible.netcommon.native`` and ``ansible.netcommon.json`` parsing engines are fully supported with a Red Hat Ansible Automation Platform subscription. Red Hat Ansible Automation Platform subscription support is limited to the use of the ``ntc_templates``, pyATS, ``textfsm``, ``xmltodict``, public APIs as documented.
+
+Parsing with the native parsing engine
+--------------------------------------
+
+The native parsing engine is included with the ``cli_parse`` module. It uses data captured using regular expressions to populate the parsed data structure. The native parsing engine requires a YAML template file to parse the command output.
+
+Networking example
+^^^^^^^^^^^^^^^^^^
+
+This example uses the output of a network device command and applies a native template to produce an output in Ansible structured data format.
+
+The ``show interface`` command output from the network device looks as follows:
+
+.. code-block:: console
+
+ Ethernet1/1 is up
+ admin state is up, Dedicated Interface
+ Hardware: 100/1000/10000 Ethernet, address: 5254.005a.f8bd (bia 5254.005a.f8bd)
+ MTU 1500 bytes, BW 1000000 Kbit, DLY 10 usec
+ reliability 255/255, txload 1/255, rxload 1/255
+ Encapsulation ARPA, medium is broadcast
+ Port mode is access
+ full-duplex, auto-speed
+ Beacon is turned off
+ Auto-Negotiation is turned on FEC mode is Auto
+ Input flow-control is off, output flow-control is off
+ Auto-mdix is turned off
+ Switchport monitor is off
+ EtherType is 0x8100
+ EEE (efficient-ethernet) : n/a
+ Last link flapped 4week(s) 6day(s)
+ Last clearing of "show interface" counters never
+ <...>
+
+
+Create the native template to match this output and store it as ``templates/nxos_show_interface.yaml``:
+
+.. code-block:: yaml
+
+ ---
+ - example: Ethernet1/1 is up
+ getval: '(?P<name>\S+) is (?P<oper_state>\S+)'
+ result:
+ "{{ name }}":
+ name: "{{ name }}"
+ state:
+ operating: "{{ oper_state }}"
+ shared: true
+
+ - example: admin state is up, Dedicated Interface
+ getval: 'admin state is (?P<admin_state>\S+),'
+ result:
+ "{{ name }}":
+ name: "{{ name }}"
+ state:
+ admin: "{{ admin_state }}"
+
+ - example: " Hardware: Ethernet, address: 5254.005a.f8b5 (bia 5254.005a.f8b5)"
+ getval: '\s+Hardware: (?P<hardware>.*), address: (?P<mac>\S+)'
+ result:
+ "{{ name }}":
+ hardware: "{{ hardware }}"
+ mac_address: "{{ mac }}"
+
+
+This native parser template is structured as a list of parsers, each containing the following key-value pairs:
+
+- ``example`` - An example line of the text line to be parsed
+- ``getval`` - A regular expression using named capture groups to store the extracted data
+- ``result`` - A data tree, populated as a template, from the parsed data
+- ``shared`` - (optional) The shared key makes the parsed values available to the rest of the parser entries until matched again.
+
+The following example task uses ``cli_parse`` with the native parser and the example template above to parse the ``show interface`` command from a Cisco NXOS device:
+
+.. code-block:: yaml
+
+ - name: "Run command and parse with native"
+ ansible.netcommon.cli_parse:
+ command: show interface
+ parser:
+ name: ansible.netcommon.native
+ set_fact: interfaces
+
+Taking a deeper dive into this task:
+
+- The ``command`` option provides the command you want to run on the device or host. Alternately, you can provide text from a previous command with the ``text`` option instead.
+- The ``parser`` option provides information specific to the parser engine.
+- The ``name`` suboption provides the fully qualified collection name (FQCN) of the parsing engine (``ansible.netcommon.native``).
+- The ``cli_parse`` module, by default, looks for the template in the templates directory as ``{{ short_os }}_{{ command }}.yaml``.
+
+ - The ``short_os`` in the template filename is derived from either the host ``ansible_network_os`` or ``ansible_distribution``.
+ - Spaces in the network or host command are replace with ``_`` in the ``command`` portion of the template filename. In this example, the ``show interfaces`` network CLI command becomes ``show_interfaces`` in the filename.
+
+.. note::
+
+ ``ansible.netcommon.native`` parsing engine is fully supported with a Red Hat Ansible Automation Platform subscription.
+
+Lastly in this task, the ``set_fact`` option sets the following ``interfaces`` fact for the device based on the now-structured data returned from ``cli_parse``:
+
+.. code-block:: yaml
+
+ Ethernet1/1:
+ hardware: 100/1000/10000 Ethernet
+ mac_address: 5254.005a.f8bd
+ name: Ethernet1/1
+ state:
+ admin: up
+ operating: up
+ Ethernet1/10:
+ hardware: 100/1000/10000 Ethernet
+ mac_address: 5254.005a.f8c6
+ <...>
+
+
+Linux example
+^^^^^^^^^^^^^
+
+You can also use the native parser to run commands and parse output from Linux hosts.
+
+The output of a sample Linux command (``ip addr show``) looks as follows:
+
+.. code-block:: bash
+
+ 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
+ link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
+ inet 127.0.0.1/8 scope host lo
+ valid_lft forever preferred_lft forever
+ inet6 ::1/128 scope host
+ valid_lft forever preferred_lft forever
+ 2: enp0s31f6: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc fq_codel state DOWN group default qlen 1000
+ link/ether x2:6a:64:9d:84:19 brd ff:ff:ff:ff:ff:ff
+ 3: wlp2s0: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000
+ link/ether x6:c2:44:f7:41:e0 brd ff:ff:ff:ff:ff:ff permaddr d8:f2:ca:99:5c:82
+
+Create the native template to match this output and store it as ``templates/fedora_ip_addr_show.yaml``:
+
+.. code-block:: yaml
+
+ ---
+ - example: '1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000'
+ getval: |
+ (?x) # free-spacing
+ \d+:\s # the interface index
+ (?P<name>\S+):\s # the name
+ <(?P<properties>\S+)> # the properties
+ \smtu\s(?P<mtu>\d+) # the mtu
+ .* # gunk
+ state\s(?P<state>\S+) # the state of the interface
+ result:
+ "{{ name }}":
+ name: "{{ name }}"
+ loopback: "{{ 'LOOPBACK' in stats.split(',') }}"
+ up: "{{ 'UP' in properties.split(',') }}"
+ carrier: "{{ not 'NO-CARRIER' in properties.split(',') }}"
+ broadcast: "{{ 'BROADCAST' in properties.split(',') }}"
+ multicast: "{{ 'MULTICAST' in properties.split(',') }}"
+ state: "{{ state|lower() }}"
+ mtu: "{{ mtu }}"
+ shared: True
+
+ - example: 'inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0'
+ getval: |
+ (?x) # free-spacing
+ \s+inet\s(?P<inet>([0-9]{1,3}\.){3}[0-9]{1,3}) # the ip address
+ /(?P<bits>\d{1,2}) # the mask bits
+ result:
+ "{{ name }}":
+ ip_address: "{{ inet }}"
+ mask_bits: "{{ bits }}"
+
+.. note::
+
+ The ``shared`` key in the parser template allows the interface name to be used in subsequent parser entries. The use of examples and free-spacing mode with the regular expressions makes the template easier to read.
+
+The following example task uses ``cli_parse`` with the native parser and the example template above to parse the Linux output:
+
+.. code-block:: yaml
+
+ - name: Run command and parse
+ ansible.netcommon.cli_parse:
+ command: ip addr show
+ parser:
+ name: ansible.netcommon.native
+ set_fact: interfaces
+
+This task assumes you previously gathered facts to determine the ``ansible_distribution`` needed to locate the template. Alternately, you could provide the path in the ``parser/template_path`` option.
+
+
+Lastly in this task, the ``set_fact`` option sets the following ``interfaces`` fact for the host, based on the now-structured data returned from ``cli_parse``:
+
+.. code-block:: yaml
+
+ lo:
+ broadcast: false
+ carrier: true
+ ip_address: 127.0.0.1
+ mask_bits: 8
+ mtu: 65536
+ multicast: false
+ name: lo
+ state: unknown
+ up: true
+ enp64s0u1:
+ broadcast: true
+ carrier: true
+ ip_address: 192.168.86.83
+ mask_bits: 24
+ mtu: 1500
+ multicast: true
+ name: enp64s0u1
+ state: up
+ up: true
+ <...>
+
+
+Parsing JSON
+-------------
+
+Although Ansible will natively convert serialized JSON to Ansible native data when recognized, you can also use the ``cli_parse`` module for this conversion.
+
+Example task:
+
+.. code-block:: yaml
+
+ - name: "Run command and parse as json"
+ ansible.netcommon.cli_parse:
+ command: show interface | json
+ parser:
+ name: ansible.netcommon.json
+ register: interfaces
+
+Taking a deeper dive into this task:
+
+- The ``show interface | json`` command is issued on the device.
+- The output is set as the ``interfaces`` fact for the device.
+- JSON support is provided primarily for playbook consistency.
+
+.. note::
+
+ The use of ``ansible.netcommon.json`` is fully supported with a Red Hat Ansible Automation Platform subscription
+
+Parsing with ntc_templates
+----------------------------
+
+The ``ntc_templates`` python library includes pre-defined ``textfsm`` templates for parsing a variety of network device commands output.
+
+Example task:
+
+.. code-block:: yaml
+
+ - name: "Run command and parse with ntc_templates"
+ ansible.netcommon.cli_parse:
+ command: show interface
+ parser:
+ name: ansible.netcommon.ntc_templates
+ set_fact: interfaces
+
+Taking a deeper dive into this task:
+
+- The ``ansible_network_os`` of the device is converted to the ntc_template format ``cisco_nxos``. Alternately, you can provide the ``os`` with the ``parser/os`` option instead.
+- The ``cisco_nxos_show_interface.textfsm`` template, included with the ``ntc_templates`` package, parses the output.
+- See `the ntc_templates README <https://github.com/networktocode/ntc-templates/blob/master/README.md>`_ for additional information about the ``ntc_templates`` python library.
+
+.. note::
+
+ Red Hat Ansible Automation Platform subscription support is limited to the use of the ``ntc_templates`` public APIs as documented.
+
+
+This task and and the predefined template sets the following fact as the ``interfaces`` fact for the host:
+
+.. code-block:: yaml
+
+ interfaces:
+ - address: 5254.005a.f8b5
+ admin_state: up
+ bandwidth: 1000000 Kbit
+ bia: 5254.005a.f8b5
+ delay: 10 usec
+ description: ''
+ duplex: full-duplex
+ encapsulation: ARPA
+ hardware_type: Ethernet
+ input_errors: ''
+ input_packets: ''
+ interface: mgmt0
+ ip_address: 192.168.101.14/24
+ last_link_flapped: ''
+ link_status: up
+ mode: ''
+ mtu: '1500'
+ output_errors: ''
+ output_packets: ''
+ speed: 1000 Mb/s
+ - address: 5254.005a.f8bd
+ admin_state: up
+ bandwidth: 1000000 Kbit
+ bia: 5254.005a.f8bd
+ delay: 10 usec
+
+
+Parsing with pyATS
+----------------------
+
+``pyATS`` is part of the Cisco Test Automation & Validation Solution. It includes many predefined parsers for a number of network platforms and commands. You can use the predefined parsers that are part of the ``pyATS`` package with the ``cli_parse`` module.
+
+Example task:
+
+.. code-block:: yaml
+
+ - name: "Run command and parse with pyats"
+ ansible.netcommon.cli_parse:
+ command: show interface
+ parser:
+ name: ansible.netcommon.pyats
+ set_fact: interfaces
+
+
+Taking a deeper dive into this task:
+
+- The ``cli_parse`` modules converts the ``ansible_network_os`` automatically (in this example, ``ansible_network_os`` set to ``cisco.nxos.nxos``, converts to ``nxos`` for pyATS. Alternately, you can set the OS with the ``parser/os`` option instead.
+- Using a combination of the command and OS, the pyATS selects the following parser: https://pubhub.devnetcloud.com/media/genie-feature-browser/docs/#/parsers/show%2520interface.
+- The ``cli_parse`` module sets ``cisco.ios.ios`` to ``iosxe`` for pyATS. You can override this with the ``parser/os`` option.
+- ``cli_parse`` only uses the predefined parsers in pyATS. See the `pyATS documentation <https://developer.cisco.com/docs/pyats/>`_ and the full list of `pyATS included parsers <https://pubhub.devnetcloud.com/media/genie-feature-browser/docs/#/parsers>`_.
+
+.. note::
+
+ Red Hat Ansible Automation Platform subscription support is limited to the use of the pyATS public APIs as documented.
+
+
+This task sets the following fact as the ``interfaces`` fact for the host:
+
+.. code-block:: yaml
+
+ mgmt0:
+ admin_state: up
+ auto_mdix: 'off'
+ auto_negotiate: true
+ bandwidth: 1000000
+ counters:
+ in_broadcast_pkts: 3
+ in_multicast_pkts: 1652395
+ in_octets: 556155103
+ in_pkts: 2236713
+ in_unicast_pkts: 584259
+ rate:
+ in_rate: 320
+ in_rate_pkts: 0
+ load_interval: 1
+ out_rate: 48
+ out_rate_pkts: 0
+ rx: true
+ tx: true
+ delay: 10
+ duplex_mode: full
+ enabled: true
+ encapsulations:
+ encapsulation: arpa
+ ethertype: '0x0000'
+ ipv4:
+ 192.168.101.14/24:
+ ip: 192.168.101.14
+ prefix_length: '24'
+ link_state: up
+ <...>
+
+
+Parsing with textfsm
+---------------------
+
+``textfsm`` is a Python module which implements a template-based state machine for parsing semi-formatted text.
+
+The following sample``textfsm`` template is stored as ``templates/nxos_show_interface.textfsm``
+
+.. code-block:: text
+
+
+ Value Required INTERFACE (\S+)
+ Value LINK_STATUS (.+?)
+ Value ADMIN_STATE (.+?)
+ Value HARDWARE_TYPE (.\*)
+ Value ADDRESS ([a-zA-Z0-9]+.[a-zA-Z0-9]+.[a-zA-Z0-9]+)
+ Value BIA ([a-zA-Z0-9]+.[a-zA-Z0-9]+.[a-zA-Z0-9]+)
+ Value DESCRIPTION (.\*)
+ Value IP_ADDRESS (\d+\.\d+\.\d+\.\d+\/\d+)
+ Value MTU (\d+)
+ Value MODE (\S+)
+ Value DUPLEX (.+duplex?)
+ Value SPEED (.+?)
+ Value INPUT_PACKETS (\d+)
+ Value OUTPUT_PACKETS (\d+)
+ Value INPUT_ERRORS (\d+)
+ Value OUTPUT_ERRORS (\d+)
+ Value BANDWIDTH (\d+\s+\w+)
+ Value DELAY (\d+\s+\w+)
+ Value ENCAPSULATION (\w+)
+ Value LAST_LINK_FLAPPED (.+?)
+
+ Start
+ ^\S+\s+is.+ -> Continue.Record
+ ^${INTERFACE}\s+is\s+${LINK_STATUS},\sline\sprotocol\sis\s${ADMIN_STATE}$$
+ ^${INTERFACE}\s+is\s+${LINK_STATUS}$$
+ ^admin\s+state\s+is\s+${ADMIN_STATE},
+ ^\s+Hardware(:|\s+is)\s+${HARDWARE_TYPE},\s+address(:|\s+is)\s+${ADDRESS}(.*bia\s+${BIA})*
+ ^\s+Description:\s+${DESCRIPTION}
+ ^\s+Internet\s+Address\s+is\s+${IP_ADDRESS}
+ ^\s+Port\s+mode\s+is\s+${MODE}
+ ^\s+${DUPLEX}, ${SPEED}(,|$$)
+ ^\s+MTU\s+${MTU}.\*BW\s+${BANDWIDTH}.\*DLY\s+${DELAY}
+ ^\s+Encapsulation\s+${ENCAPSULATION}
+ ^\s+${INPUT_PACKETS}\s+input\s+packets\s+\d+\s+bytes\s\*$$
+ ^\s+${INPUT_ERRORS}\s+input\s+error\s+\d+\s+short\s+frame\s+\d+\s+overrun\s+\d+\s+underrun\s+\d+\s+ignored\s\*$$
+ ^\s+${OUTPUT_PACKETS}\s+output\s+packets\s+\d+\s+bytes\s\*$$
+ ^\s+${OUTPUT_ERRORS}\s+output\s+error\s+\d+\s+collision\s+\d+\s+deferred\s+\d+\s+late\s+collision\s\*$$
+ ^\s+Last\s+link\s+flapped\s+${LAST_LINK_FLAPPED}\s\*$$
+
+The following task uses the example template for ``textfsm`` with the ``cli_parse`` module.
+
+.. code-block:: yaml
+
+ - name: "Run command and parse with textfsm"
+ ansible.netcommon.cli_parse:
+ command: show interface
+ parser:
+ name: ansible.netcommon.textfsm
+ set_fact: interfaces
+
+Taking a deeper dive into this task:
+
+- The ``ansible_network_os`` for the device (``cisco.nxos.nxos``) is converted to ``nxos``. Alternately you can provide the OS in the ``parser/os`` option instead.
+- The textfsm template name defaulted to ``templates/nxos_show_interface.textfsm`` using a combination of the OS and command run. Alternately you can override the generated template path with the ``parser/template_path`` option.
+- See the `textfsm README <https://github.com/google/textfsm>`_ for details.
+- ``textfsm`` was previously made available as a filter plugin. Ansible users should transition to the ``cli_parse`` module.
+
+.. note::
+
+ Red Hat Ansible Automation Platform subscription support is limited to the use of the ``textfsm`` public APIs as documented.
+
+This task sets the following fact as the ``interfaces`` fact for the host:
+
+.. code-block:: yaml
+
+ - ADDRESS: X254.005a.f8b5
+ ADMIN_STATE: up
+ BANDWIDTH: 1000000 Kbit
+ BIA: X254.005a.f8b5
+ DELAY: 10 usec
+ DESCRIPTION: ''
+ DUPLEX: full-duplex
+ ENCAPSULATION: ARPA
+ HARDWARE_TYPE: Ethernet
+ INPUT_ERRORS: ''
+ INPUT_PACKETS: ''
+ INTERFACE: mgmt0
+ IP_ADDRESS: 192.168.101.14/24
+ LAST_LINK_FLAPPED: ''
+ LINK_STATUS: up
+ MODE: ''
+ MTU: '1500'
+ OUTPUT_ERRORS: ''
+ OUTPUT_PACKETS: ''
+ SPEED: 1000 Mb/s
+ - ADDRESS: X254.005a.f8bd
+ ADMIN_STATE: up
+ BANDWIDTH: 1000000 Kbit
+ BIA: X254.005a.f8bd
+
+
+Parsing with TTP
+-----------------
+
+TTP is a Python library for semi-structured text parsing using templates. TTP uses a jinja-like syntax to limit the need for regular expressions. Users familiar with jinja templating may find the TTP template syntax familiar.
+
+The following is an example TTP template stored as ``templates/nxos_show_interfaces.ttp``:
+
+.. code-block:: jinja
+
+ {{ interface }} is {{ state }}
+ admin state is {{ admin_state }}{{ ignore(".\*") }}
+
+The following task uses this template to parse the ``show interface`` command output:
+
+.. code-block:: yaml
+
+ - name: "Run command and parse with ttp"
+ ansible.netcommon.cli_parse:
+ command: show interface
+ parser:
+ name: ansible.netcommon.ttp
+ set_fact: interfaces
+
+Taking a deeper dive in this task:
+
+- The default template path ``templates/nxos_show_interface.ttp`` was generated using the ``ansible_network_os`` for the host and ``command`` provided.
+- TTP supports several additional variables that will be passed to the parser. These include:
+
+ - ``parser/vars/ttp_init`` - Additional parameter passed when the parser is initialized.
+ - ``parser/vars/ttp_results`` - Additional parameters used to influence the parser output.
+ - ``parser/vars/ttp_vars`` - Additional variables made available in the template.
+
+- See the `TTP documentation <https://ttp.readthedocs.io>`_ for details.
+
+
+The task sets the follow fact as the ``interfaces`` fact for the host:
+
+.. code-block:: yaml
+
+ - admin_state: up,
+ interface: mgmt0
+ state: up
+ - admin_state: up,
+ interface: Ethernet1/1
+ state: up
+ - admin_state: up,
+ interface: Ethernet1/2
+ state: up
+
+
+Converting XML
+-----------------
+
+Although Ansible contains a number of plugins that can convert XML to Ansible native data structures, the``cli_parse`` module runs the command on devices that return XML and returns the converted data in a single task.
+
+This example task runs the ``show interface`` command and parses the output as XML:
+
+.. code-block:: yaml
+
+ - name: "Run command and parse as xml"
+ ansible.netcommon.cli_parse:
+ command: show interface | xml
+ parser:
+ name: ansible.netcommon.xml
+ set_fact: interfaces
+
+.. note::
+
+ Red Hat Ansible Automation Platform subscription support is limited to the use of the ``xmltodict`` public APIs as documented.
+
+This task sets the ``interfaces`` fact for the host based on this returned output:
+
+.. code-block:: yaml
+
+ nf:rpc-reply:
+ '@xmlns': http://www.cisco.com/nxos:1.0:if_manager
+ '@xmlns:nf': urn:ietf:params:xml:ns:netconf:base:1.0
+ nf:data:
+ show:
+ interface:
+ __XML__OPT_Cmd_show_interface_quick:
+ __XML__OPT_Cmd_show_interface___readonly__:
+ __readonly__:
+ TABLE_interface:
+ ROW_interface:
+ - admin_state: up
+ encapsulation: ARPA
+ eth_autoneg: 'on'
+ eth_bia_addr: x254.005a.f8b5
+ eth_bw: '1000000'
+
+
+Advanced use cases
+===================
+
+The ``cli_parse`` module supports several features to support more complex uses cases.
+
+Provide a full template path
+-----------------------------
+
+Use the ``template_path`` option to override the default template path in the task:
+
+.. code-block:: yaml
+
+ - name: "Run command and parse with native"
+ ansible.netcommon.cli_parse:
+ command: show interface
+ parser:
+ name: ansible.netcommon.native
+ template_path: /home/user/templates/filename.yaml
+
+
+Provide command to parser different than the command run
+-----------------------------------------------------------
+
+Use the ``command`` suboption for the ``parser`` to configure the command the parser expects if it is different from the command ``cli_parse`` runs:
+
+.. code-block:: yaml
+
+ - name: "Run command and parse with native"
+ ansible.netcommon.cli_parse:
+ command: sho int
+ parser:
+ name: ansible.netcommon.native
+ command: show interface
+
+Provide a custom OS value
+--------------------------------
+
+Use the ``os`` suboption to the parser to directly set the OS instead of using ``ansible_network_os`` or ``ansible_distribution`` to generate the template path or with the specified parser engine:
+
+.. code-block:: yaml
+
+ - name: Use ios instead of iosxe for pyats
+ ansible.netcommon.cli_parse:
+ command: show something
+ parser:
+ name: ansible.netcommon.pyats
+ os: ios
+
+ - name: Use linux instead of fedora from ansible_distribution
+ ansible.netcommon.cli_parse:
+ command: ps -ef
+ parser:
+ name: ansible.netcommon.native
+ os: linux
+
+
+Parse existing text
+--------------------
+
+Use the ``text`` option instead of ``command`` to parse text collected earlier in the playbook.
+
+.. code-block:: yaml
+
+ # using /home/user/templates/filename.yaml
+ - name: "Parse text from previous task"
+ ansible.netcommon.cli_parse:
+ text: "{{ output['stdout'] }}"
+ parser:
+ name: ansible.netcommon.native
+ template_path: /home/user/templates/filename.yaml
+
+ # using /home/user/templates/filename.yaml
+ - name: "Parse text from file"
+ ansible.netcommon.cli_parse:
+ text: "{{ lookup('file', 'path/to/file.txt') }}"
+ parser:
+ name: ansible.netcommon.native
+ template_path: /home/user/templates/filename.yaml
+
+ # using templates/nxos_show_version.yaml
+ - name: "Parse text from previous task"
+ ansible.netcommon.cli_parse:
+ text: "{{ sho_version['stdout'] }}"
+ parser:
+ name: ansible.netcommon.native
+ os: nxos
+ command: show version
+
+
+.. seealso::
+
+ * :ref:`develop_cli_parse_plugins`
diff --git a/docs/docsite/rst/network/user_guide/faq.rst b/docs/docsite/rst/network/user_guide/faq.rst
new file mode 100644
index 00000000..cb43ac28
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/faq.rst
@@ -0,0 +1,76 @@
+.. _network_faq:
+
+*******************
+Ansible Network FAQ
+*******************
+
+.. contents:: Topics
+
+.. _network_faq_performance:
+
+How can I improve performance for network playbooks?
+====================================================
+
+.. _network_faq_strategy_free:
+
+Consider ``strategy: free`` if you are running on multiple hosts
+---------------------------------------------------------------------------------
+
+The ``strategy`` plugin tells Ansible how to order multiple tasks on multiple hosts. :ref:`Strategy<strategy_plugins>` is set at the playbook level.
+
+The default strategy is ``linear``. With strategy set to ``linear``, Ansible waits until the current task has run on all hosts before starting the next task on any host. Ansible may have forks free, but will not use them until all hosts have completed the current task. If each task in your playbook must succeed on all hosts before you run the next task, use the ``linear`` strategy.
+
+Using the ``free`` strategy, Ansible uses available forks to execute tasks on each host as quickly as possible. Even if an earlier task is still running on one host, Ansible executes later tasks on other hosts. The ``free`` strategy uses available forks more efficiently. If your playbook stalls on each task, waiting for one slow host, consider using ``strategy: free`` to boost overall performance.
+
+.. _network_faq_limit_show_running:
+
+Execute ``show running`` only if you absolutely must
+---------------------------------------------------------------------------------
+
+The ``show running`` command is the most resource-intensive command to execute on a network device, because of the way queries are handled by the network OS. Using the command in your Ansible playbook will slow performance significantly, especially on large devices; repeating it will multiply the performance hit. If you have a playbook that checks the running config, then executes changes, then checks the running config again, you should expect that playbook to be very slow.
+
+.. _network_faq_limit_ProxyCommand:
+
+Use ``ProxyCommand`` only if you absolutely must
+---------------------------------------------------------------------------------
+
+Network modules support the use of a :ref:`proxy or jump host<network_delegate_to_vs_ProxyCommand>` with the ``ProxyCommand`` parameter. However, when you use a jump host, Ansible must open a new SSH connection for every task, even if you are using a persistent connection type (``network_cli`` or ``netconf``). To maximize the performance benefits of the persistent connection types introduced in version 2.5, avoid using jump hosts whenever possible.
+
+.. _network_faq_set_forks:
+
+Set ``--forks`` to match your needs
+---------------------------------------------------------------------------------
+
+Every time Ansible runs a task, it forks its own process. The ``--forks`` parameter defines the number of concurrent tasks - if you retain the default setting, which is ``--forks=5``, and you are running a playbook on 10 hosts, five of those hosts will have to wait until a fork is available. Of course, the more forks you allow, the more memory and processing power Ansible will use. Since most network tasks are run on the control host, this means your laptop can quickly become cpu- or memory-bound.
+
+.. _network_faq_redacted_output:
+
+Why is my output sometimes replaced with ``********``?
+======================================================
+
+Ansible replaces any string marked ``no_log``, including passwords, with ``********`` in Ansible output. This is done by design, to protect your sensitive data. Most users are happy to have their passwords redacted. However, Ansible replaces every string that matches your password with ``********``. If you use a common word for your password, this can be a problem. For example, if you choose ``Admin`` as your password, Ansible will replace every instance of the word ``Admin`` with ``********`` in your output. This may make your output harder to read. To avoid this problem, select a secure password that will not occur elsewhere in your Ansible output.
+
+.. _network_faq_no_abbreviations_with_config:
+
+Why do the ``*_config`` modules always return ``changed=true`` with abbreviated commands?
+=========================================================================================
+
+When you issue commands directly on a network device, you can use abbreviated commands. For example, ``int g1/0/11`` and ``interface GigabitEthernet1/0/11`` do the same thing; ``shut`` and ``shutdown`` do the same thing. Ansible Network ``*_command`` modules work with abbreviations, because they run commands through the network OS.
+
+When committing configuration, however, the network OS converts abbreviations into long-form commands. Whether you use ``shut`` or ``shutdown`` on ``GigabitEthernet1/0/11``, the result in the configuration is the same: ``shutdown``.
+
+Ansible Network ``*_config`` modules compare the text of the commands you specify in ``lines`` to the text in the configuration. If you use ``shut`` in the ``lines`` section of your task, and the configuration reads ``shutdown``, the module returns ``changed=true`` even though the configuration is already correct. Your task will update the configuration every time it runs.
+
+To avoid this problem, use long-form commands with the ``*_config`` modules:
+
+
+.. code-block:: yaml
+
+ ---
+ - hosts: all
+ gather_facts: no
+ tasks:
+ - cisco.ios.ios_config:
+ lines:
+ - shutdown
+ parents: interface GigabitEthernet1/0/11
diff --git a/docs/docsite/rst/network/user_guide/index.rst b/docs/docsite/rst/network/user_guide/index.rst
new file mode 100644
index 00000000..f5eff6f4
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/index.rst
@@ -0,0 +1,25 @@
+.. _network_advanced:
+
+**********************************
+Network Advanced Topics
+**********************************
+
+Once you have mastered the basics of network automation with Ansible, as presented in :ref:`network_getting_started`, use this guide understand platform-specific details, optimization, and troubleshooting tips for Ansible for network automation.
+
+**Who should use this guide?**
+
+This guide is intended for network engineers using Ansible for automation. It covers advanced topics. If you understand networks and Ansible, this guide is for you. You may read through the entire guide if you choose, or use the links below to find the specific information you need.
+
+If you're new to Ansible, or new to using Ansible for network automation, start with the :ref:`network_getting_started`.
+
+.. toctree::
+ :maxdepth: 2
+ :caption: Advanced Topics
+
+ network_resource_modules
+ network_best_practices_2.5
+ cli_parsing
+ network_debug_troubleshooting
+ network_working_with_command_output
+ faq
+ platform_index
diff --git a/docs/docsite/rst/network/user_guide/network_best_practices_2.5.rst b/docs/docsite/rst/network/user_guide/network_best_practices_2.5.rst
new file mode 100644
index 00000000..1101017c
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/network_best_practices_2.5.rst
@@ -0,0 +1,483 @@
+.. _network-best-practices:
+
+************************
+Ansible Network Examples
+************************
+
+This document describes some examples of using Ansible to manage your network infrastructure.
+
+.. contents::
+ :local:
+
+Prerequisites
+=============
+
+This example requires the following:
+
+* **Ansible 2.10** (or higher) installed. See :ref:`intro_installation_guide` for more information.
+* One or more network devices that are compatible with Ansible.
+* Basic understanding of YAML :ref:`yaml_syntax`.
+* Basic understanding of Jinja2 templates. See :ref:`playbooks_templating` for more information.
+* Basic Linux command line use.
+* Basic knowledge of network switch & router configurations.
+
+
+Groups and variables in an inventory file
+=========================================
+
+An ``inventory`` file is a YAML or INI-like configuration file that defines the mapping of hosts into groups.
+
+In our example, the inventory file defines the groups ``eos``, ``ios``, ``vyos`` and a "group of groups" called ``switches``. Further details about subgroups and inventory files can be found in the :ref:`Ansible inventory Group documentation <subgroups>`.
+
+Because Ansible is a flexible tool, there are a number of ways to specify connection information and credentials. We recommend using the ``[my_group:vars]`` capability in your inventory file.
+
+.. code-block:: ini
+
+ [all:vars]
+ # these defaults can be overridden for any group in the [group:vars] section
+ ansible_connection=ansible.netcommon.network_cli
+ ansible_user=ansible
+
+ [switches:children]
+ eos
+ ios
+ vyos
+
+ [eos]
+ veos01 ansible_host=veos-01.example.net
+ veos02 ansible_host=veos-02.example.net
+ veos03 ansible_host=veos-03.example.net
+ veos04 ansible_host=veos-04.example.net
+
+ [eos:vars]
+ ansible_become=yes
+ ansible_become_method=enable
+ ansible_network_os=arista.eos.eos
+ ansible_user=my_eos_user
+ ansible_password=my_eos_password
+
+ [ios]
+ ios01 ansible_host=ios-01.example.net
+ ios02 ansible_host=ios-02.example.net
+ ios03 ansible_host=ios-03.example.net
+
+ [ios:vars]
+ ansible_become=yes
+ ansible_become_method=enable
+ ansible_network_os=cisco.ios.ios
+ ansible_user=my_ios_user
+ ansible_password=my_ios_password
+
+ [vyos]
+ vyos01 ansible_host=vyos-01.example.net
+ vyos02 ansible_host=vyos-02.example.net
+ vyos03 ansible_host=vyos-03.example.net
+
+ [vyos:vars]
+ ansible_network_os=vyos.vyos.vyos
+ ansible_user=my_vyos_user
+ ansible_password=my_vyos_password
+
+If you use ssh-agent, you do not need the ``ansible_password`` lines. If you use ssh keys, but not ssh-agent, and you have multiple keys, specify the key to use for each connection in the ``[group:vars]`` section with ``ansible_ssh_private_key_file=/path/to/correct/key``. For more information on ``ansible_ssh_`` options see :ref:`behavioral_parameters`.
+
+.. FIXME FUTURE Gundalow - Link to network auth & proxy page (to be written)
+
+.. warning:: Never store passwords in plain text.
+
+Ansible vault for password encryption
+-------------------------------------
+
+The "Vault" feature of Ansible allows you to keep sensitive data such as passwords or keys in encrypted files, rather than as plain text in your playbooks or roles. These vault files can then be distributed or placed in source control. See :ref:`playbooks_vault` for more information.
+
+Here's what it would look like if you specified your SSH passwords (encrypted with Ansible Vault) among your variables:
+
+.. code-block:: yaml
+
+ ansible_connection: ansible.netcommon.network_cli
+ ansible_network_os: vyos.vyos.vyos
+ ansible_user: my_vyos_user
+ ansible_ssh_pass: !vault |
+ $ANSIBLE_VAULT;1.1;AES256
+ 39336231636137663964343966653162353431333566633762393034646462353062633264303765
+ 6331643066663534383564343537343334633031656538370a333737656236393835383863306466
+ 62633364653238323333633337313163616566383836643030336631333431623631396364663533
+ 3665626431626532630a353564323566316162613432373738333064366130303637616239396438
+ 9853
+
+Common inventory variables
+--------------------------
+
+The following variables are common for all platforms in the inventory, though they can be overwritten for a particular inventory group or host.
+
+:ansible_connection:
+
+ Ansible uses the ansible-connection setting to determine how to connect to a remote device. When working with Ansible Networking, set this to an appropriate network connection option, such as``ansible.netcommon.network_cli``, so Ansible treats the remote node as a network device with a limited execution environment. Without this setting, Ansible would attempt to use ssh to connect to the remote and execute the Python script on the network device, which would fail because Python generally isn't available on network devices.
+:ansible_network_os:
+ Informs Ansible which Network platform this hosts corresponds to. This is required when using the ``ansible.netcommon.*`` connection options.
+:ansible_user: The user to connect to the remote device (switch) as. Without this the user that is running ``ansible-playbook`` would be used.
+ Specifies which user on the network device the connection
+:ansible_password:
+ The corresponding password for ``ansible_user`` to log in as. If not specified SSH key will be used.
+:ansible_become:
+ If enable mode (privilege mode) should be used, see the next section.
+:ansible_become_method:
+ Which type of `become` should be used, for ``network_cli`` the only valid choice is ``enable``.
+
+Privilege escalation
+--------------------
+
+Certain network platforms, such as Arista EOS and Cisco IOS, have the concept of different privilege modes. Certain network modules, such as those that modify system state including users, will only work in high privilege states. Ansible supports ``become`` when using ``connection: ansible.netcommon.network_cli``. This allows privileges to be raised for the specific tasks that need them. Adding ``become: yes`` and ``become_method: enable`` informs Ansible to go into privilege mode before executing the task, as shown here:
+
+.. code-block:: ini
+
+ [eos:vars]
+ ansible_connection=ansible.netcommon.network_cli
+ ansible_network_os=arista.eos.eos
+ ansible_become=yes
+ ansible_become_method=enable
+
+For more information, see the :ref:`using become with network modules<become_network>` guide.
+
+
+Jump hosts
+----------
+
+If the Ansible Controller does not have a direct route to the remote device and you need to use a Jump Host, please see the :ref:`Ansible Network Proxy Command <network_delegate_to_vs_ProxyCommand>` guide for details on how to achieve this.
+
+Example 1: collecting facts and creating backup files with a playbook
+=====================================================================
+
+Ansible facts modules gather system information 'facts' that are available to the rest of your playbook.
+
+Ansible Networking ships with a number of network-specific facts modules. In this example, we use the ``_facts`` modules :ref:`arista.eos.eos_facts <ansible_collections.arista.eos.eos_facts_module>`, :ref:`cisco.ios.ios_facts <ansible_collections.cisco.ios.ios_facts_module>` and :ref:`vyos.vyos.vyos_facts <ansible_collections.vyos.vyos.vyos_facts_module>` to connect to the remote networking device. As the credentials are not explicitly passed with module arguments, Ansible uses the username and password from the inventory file.
+
+Ansible's "Network Fact modules" gather information from the system and store the results in facts prefixed with ``ansible_net_``. The data collected by these modules is documented in the `Return Values` section of the module docs, in this case :ref:`arista.eos.eos_facts <ansible_collections.arista.eos.eos_facts_module>` and :ref:`vyos.vyos.vyos_facts <ansible_collections.vyos.vyos.vyos_facts_module>`. We can use the facts, such as ``ansible_net_version`` late on in the "Display some facts" task.
+
+To ensure we call the correct mode (``*_facts``) the task is conditionally run based on the group defined in the inventory file, for more information on the use of conditionals in Ansible Playbooks see :ref:`the_when_statement`.
+
+In this example, we will create an inventory file containing some network switches, then run a playbook to connect to the network devices and return some information about them.
+
+Step 1: Creating the inventory
+------------------------------
+
+First, create a file called ``inventory``, containing:
+
+.. code-block:: ini
+
+ [switches:children]
+ eos
+ ios
+ vyos
+
+ [eos]
+ eos01.example.net
+
+ [ios]
+ ios01.example.net
+
+ [vyos]
+ vyos01.example.net
+
+
+Step 2: Creating the playbook
+-----------------------------
+
+Next, create a playbook file called ``facts-demo.yml`` containing the following:
+
+.. code-block:: yaml
+
+ - name: "Demonstrate connecting to switches"
+ hosts: switches
+ gather_facts: no
+
+ tasks:
+ ###
+ # Collect data
+ #
+ - name: Gather facts (eos)
+ arista.eos.eos_facts:
+ when: ansible_network_os == 'arista.eos.eos'
+
+ - name: Gather facts (ios)
+ cisco.ios.ios_facts:
+ when: ansible_network_os == 'cisco.ios.ios'
+
+ - name: Gather facts (vyos)
+ vyos.vyos.vyos_facts:
+ when: ansible_network_os == 'vyos.vyos.vyos'
+
+ ###
+ # Demonstrate variables
+ #
+ - name: Display some facts
+ debug:
+ msg: "The hostname is {{ ansible_net_hostname }} and the OS is {{ ansible_net_version }}"
+
+ - name: Facts from a specific host
+ debug:
+ var: hostvars['vyos01.example.net']
+
+ - name: Write facts to disk using a template
+ copy:
+ content: |
+ #jinja2: lstrip_blocks: True
+ EOS device info:
+ {% for host in groups['eos'] %}
+ Hostname: {{ hostvars[host].ansible_net_hostname }}
+ Version: {{ hostvars[host].ansible_net_version }}
+ Model: {{ hostvars[host].ansible_net_model }}
+ Serial: {{ hostvars[host].ansible_net_serialnum }}
+ {% endfor %}
+
+ IOS device info:
+ {% for host in groups['ios'] %}
+ Hostname: {{ hostvars[host].ansible_net_hostname }}
+ Version: {{ hostvars[host].ansible_net_version }}
+ Model: {{ hostvars[host].ansible_net_model }}
+ Serial: {{ hostvars[host].ansible_net_serialnum }}
+ {% endfor %}
+
+ VyOS device info:
+ {% for host in groups['vyos'] %}
+ Hostname: {{ hostvars[host].ansible_net_hostname }}
+ Version: {{ hostvars[host].ansible_net_version }}
+ Model: {{ hostvars[host].ansible_net_model }}
+ Serial: {{ hostvars[host].ansible_net_serialnum }}
+ {% endfor %}
+ dest: /tmp/switch-facts
+ run_once: yes
+
+ ###
+ # Get running configuration
+ #
+
+ - name: Backup switch (eos)
+ arista.eos.eos_config:
+ backup: yes
+ register: backup_eos_location
+ when: ansible_network_os == 'arista.eos.eos'
+
+ - name: backup switch (vyos)
+ vyos.vyos.vyos_config:
+ backup: yes
+ register: backup_vyos_location
+ when: ansible_network_os == 'vyos.vyos.vyos'
+
+ - name: Create backup dir
+ file:
+ path: "/tmp/backups/{{ inventory_hostname }}"
+ state: directory
+ recurse: yes
+
+ - name: Copy backup files into /tmp/backups/ (eos)
+ copy:
+ src: "{{ backup_eos_location.backup_path }}"
+ dest: "/tmp/backups/{{ inventory_hostname }}/{{ inventory_hostname }}.bck"
+ when: ansible_network_os == 'arista.eos.eos'
+
+ - name: Copy backup files into /tmp/backups/ (vyos)
+ copy:
+ src: "{{ backup_vyos_location.backup_path }}"
+ dest: "/tmp/backups/{{ inventory_hostname }}/{{ inventory_hostname }}.bck"
+ when: ansible_network_os == 'vyos.vyos.vyos'
+
+Step 3: Running the playbook
+----------------------------
+
+To run the playbook, run the following from a console prompt:
+
+.. code-block:: console
+
+ ansible-playbook -i inventory facts-demo.yml
+
+This should return output similar to the following:
+
+.. code-block:: console
+
+ PLAY RECAP
+ eos01.example.net : ok=7 changed=2 unreachable=0 failed=0
+ ios01.example.net : ok=7 changed=2 unreachable=0 failed=0
+ vyos01.example.net : ok=6 changed=2 unreachable=0 failed=0
+
+Step 4: Examining the playbook results
+--------------------------------------
+
+Next, look at the contents of the file we created containing the switch facts:
+
+.. code-block:: console
+
+ cat /tmp/switch-facts
+
+You can also look at the backup files:
+
+.. code-block:: console
+
+ find /tmp/backups
+
+
+If `ansible-playbook` fails, please follow the debug steps in :ref:`network_debug_troubleshooting`.
+
+
+.. _network-agnostic-examples:
+
+Example 2: simplifying playbooks with network agnostic modules
+==============================================================
+
+(This example originally appeared in the `Deep Dive on cli_command for Network Automation <https://www.ansible.com/blog/deep-dive-on-cli-command-for-network-automation>`_ blog post by Sean Cavanaugh -`@IPvSean <https://github.com/IPvSean>`_).
+
+If you have two or more network platforms in your environment, you can use the network agnostic modules to simplify your playbooks. You can use network agnostic modules such as ``ansible.netcommon.cli_command`` or ``ansible.netcommon.cli_config`` in place of the platform-specific modules such as ``arista.eos.eos_config``, ``cisco.ios.ios_config``, and ``junipernetworks.junos.junos_config``. This reduces the number of tasks and conditionals you need in your playbooks.
+
+.. note::
+ Network agnostic modules require the :ref:`ansible.netcommon.network_cli <ansible_collections.ansible.netcommon.network_cli_connection>` connection plugin.
+
+
+Sample playbook with platform-specific modules
+----------------------------------------------
+
+This example assumes three platforms, Arista EOS, Cisco NXOS, and Juniper JunOS. Without the network agnostic modules, a sample playbook might contain the following three tasks with platform-specific commands:
+
+.. code-block:: yaml
+
+ ---
+ - name: Run Arista command
+ arista.eos.eos_command:
+ commands: show ip int br
+ when: ansible_network_os == 'arista.eos.eos'
+
+ - name: Run Cisco NXOS command
+ cisco.nxos.nxos_command:
+ commands: show ip int br
+ when: ansible_network_os == 'cisco.nxos.nxos'
+
+ - name: Run Vyos command
+ vyos.vyos.vyos_command:
+ commands: show interface
+ when: ansible_network_os == 'vyos.vyos.vyos'
+
+Simplified playbook with ``cli_command`` network agnostic module
+----------------------------------------------------------------
+
+You can replace these platform-specific modules with the network agnostic ``ansible.netcommon.cli_command`` module as follows:
+
+.. code-block:: yaml
+
+ ---
+ - hosts: network
+ gather_facts: false
+ connection: ansible.netcommon.network_cli
+
+ tasks:
+ - name: Run cli_command on Arista and display results
+ block:
+ - name: Run cli_command on Arista
+ ansible.netcommon.cli_command:
+ command: show ip int br
+ register: result
+
+ - name: Display result to terminal window
+ debug:
+ var: result.stdout_lines
+ when: ansible_network_os == 'arista.eos.eos'
+
+ - name: Run cli_command on Cisco IOS and display results
+ block:
+ - name: Run cli_command on Cisco IOS
+ ansible.netcommon.cli_command:
+ command: show ip int br
+ register: result
+
+ - name: Display result to terminal window
+ debug:
+ var: result.stdout_lines
+ when: ansible_network_os == 'cisco.ios.ios'
+
+ - name: Run cli_command on Vyos and display results
+ block:
+ - name: Run cli_command on Vyos
+ ansible.netcommon.cli_command:
+ command: show interfaces
+ register: result
+
+ - name: Display result to terminal window
+ debug:
+ var: result.stdout_lines
+ when: ansible_network_os == 'vyos.vyos.vyos'
+
+
+If you use groups and group_vars by platform type, this playbook can be further simplified to :
+
+.. code-block:: yaml
+
+ ---
+ - name: Run command and print to terminal window
+ hosts: routers
+ gather_facts: false
+
+ tasks:
+ - name: Run show command
+ ansible.netcommon.cli_command:
+ command: "{{show_interfaces}}"
+ register: command_output
+
+
+You can see a full example of this using group_vars and also a configuration backup example at `Network agnostic examples <https://github.com/network-automation/agnostic_example>`_.
+
+Using multiple prompts with the ``ansible.netcommon.cli_command``
+-------------------------------------------------------------------
+
+The ``ansible.netcommon.cli_command`` also supports multiple prompts.
+
+.. code-block:: yaml
+
+ ---
+ - name: Change password to default
+ ansible.netcommon.cli_command:
+ command: "{{ item }}"
+ prompt:
+ - "New password"
+ - "Retype new password"
+ answer:
+ - "mypassword123"
+ - "mypassword123"
+ check_all: True
+ loop:
+ - "configure"
+ - "rollback"
+ - "set system root-authentication plain-text-password"
+ - "commit"
+
+See the :ref:`ansible.netcommon.cli_command <cli_command_module>` for full documentation on this command.
+
+
+Implementation Notes
+====================
+
+
+Demo variables
+--------------
+
+Although these tasks are not needed to write data to disk, they are used in this example to demonstrate some methods of accessing facts about the given devices or a named host.
+
+Ansible ``hostvars`` allows you to access variables from a named host. Without this we would return the details for the current host, rather than the named host.
+
+For more information, see :ref:`magic_variables_and_hostvars`.
+
+Get running configuration
+-------------------------
+
+The :ref:`arista.eos.eos_config <ansible_collections.arista.eos.eos_config_module>` and :ref:`vyos.vyos.vyos_config <ansible_collections.vyos.vyos.vyos_config_module>` modules have a ``backup:`` option that when set will cause the module to create a full backup of the current ``running-config`` from the remote device before any changes are made. The backup file is written to the ``backup`` folder in the playbook root directory. If the directory does not exist, it is created.
+
+To demonstrate how we can move the backup file to a different location, we register the result and move the file to the path stored in ``backup_path``.
+
+Note that when using variables from tasks in this way we use double quotes (``"``) and double curly-brackets (``{{...}}`` to tell Ansible that this is a variable.
+
+Troubleshooting
+===============
+
+If you receive an connection error please double check the inventory and playbook for typos or missing lines. If the issue still occurs follow the debug steps in :ref:`network_debug_troubleshooting`.
+
+.. seealso::
+
+ * :ref:`network_guide`
+ * :ref:`intro_inventory`
+ * :ref:`Keeping vaulted variables visible <tip_for_variables_and_vaults>`
diff --git a/docs/docsite/rst/network/user_guide/network_debug_troubleshooting.rst b/docs/docsite/rst/network/user_guide/network_debug_troubleshooting.rst
new file mode 100644
index 00000000..97f671bb
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/network_debug_troubleshooting.rst
@@ -0,0 +1,828 @@
+.. _network_debug_troubleshooting:
+
+***************************************
+Network Debug and Troubleshooting Guide
+***************************************
+
+This section discusses how to debug and troubleshoot network modules in Ansible.
+
+.. contents::
+ :local:
+
+
+How to troubleshoot
+===================
+
+Ansible network automation errors generally fall into one of the following categories:
+
+:Authentication issues:
+ * Not correctly specifying credentials
+ * Remote device (network switch/router) not falling back to other other authentication methods
+ * SSH key issues
+:Timeout issues:
+ * Can occur when trying to pull a large amount of data
+ * May actually be masking a authentication issue
+:Playbook issues:
+ * Use of ``delegate_to``, instead of ``ProxyCommand``. See :ref:`network proxy guide <network_delegate_to_vs_ProxyCommand>` for more information.
+
+.. warning:: ``unable to open shell``
+
+ The ``unable to open shell`` message means that the ``ansible-connection`` daemon has not been able to successfully
+ talk to the remote network device. This generally means that there is an authentication issue. See the "Authentication and connection issues" section
+ in this document for more information.
+
+.. _enable_network_logging:
+
+Enabling Networking logging and how to read the logfile
+-------------------------------------------------------
+
+**Platforms:** Any
+
+Ansible includes logging to help diagnose and troubleshoot issues regarding Ansible Networking modules.
+
+Because logging is very verbose, it is disabled by default. It can be enabled with the :envvar:`ANSIBLE_LOG_PATH` and :envvar:`ANSIBLE_DEBUG` options on the ansible-controller, that is the machine running ``ansible-playbook``.
+
+Before running ``ansible-playbook``, run the following commands to enable logging::
+
+ # Specify the location for the log file
+ export ANSIBLE_LOG_PATH=~/ansible.log
+ # Enable Debug
+ export ANSIBLE_DEBUG=True
+
+ # Run with 4*v for connection level verbosity
+ ansible-playbook -vvvv ...
+
+After Ansible has finished running you can inspect the log file which has been created on the ansible-controller:
+
+.. code::
+
+ less $ANSIBLE_LOG_PATH
+
+ 2017-03-30 13:19:52,740 p=28990 u=fred | creating new control socket for host veos01:22 as user admin
+ 2017-03-30 13:19:52,741 p=28990 u=fred | control socket path is /home/fred/.ansible/pc/ca5960d27a
+ 2017-03-30 13:19:52,741 p=28990 u=fred | current working directory is /home/fred/ansible/test/integration
+ 2017-03-30 13:19:52,741 p=28990 u=fred | using connection plugin network_cli
+ ...
+ 2017-03-30 13:20:14,771 paramiko.transport userauth is OK
+ 2017-03-30 13:20:15,283 paramiko.transport Authentication (keyboard-interactive) successful!
+ 2017-03-30 13:20:15,302 p=28990 u=fred | ssh connection done, setting terminal
+ 2017-03-30 13:20:15,321 p=28990 u=fred | ssh connection has completed successfully
+ 2017-03-30 13:20:15,322 p=28990 u=fred | connection established to veos01 in 0:00:22.580626
+
+
+From the log notice:
+
+* ``p=28990`` Is the PID (Process ID) of the ``ansible-connection`` process
+* ``u=fred`` Is the user `running` ansible, not the remote-user you are attempting to connect as
+* ``creating new control socket for host veos01:22 as user admin`` host:port as user
+* ``control socket path is`` location on disk where the persistent connection socket is created
+* ``using connection plugin network_cli`` Informs you that persistent connection is being used
+* ``connection established to veos01 in 0:00:22.580626`` Time taken to obtain a shell on the remote device
+
+
+.. note: Port None ``creating new control socket for host veos01:None``
+
+ If the log reports the port as ``None`` this means that the default port is being used.
+ A future Ansible release will improve this message so that the port is always logged.
+
+Because the log files are verbose, you can use grep to look for specific information. For example, once you have identified the ``pid`` from the ``creating new control socket for host`` line you can search for other connection log entries::
+
+ grep "p=28990" $ANSIBLE_LOG_PATH
+
+
+Enabling Networking device interaction logging
+----------------------------------------------
+
+**Platforms:** Any
+
+Ansible includes logging of device interaction in the log file to help diagnose and troubleshoot
+issues regarding Ansible Networking modules. The messages are logged in the file pointed to by the ``log_path`` configuration
+option in the Ansible configuration file or by setting the :envvar:`ANSIBLE_LOG_PATH`.
+
+.. warning::
+ The device interaction messages consist of command executed on the target device and the returned response. Since this
+ log data can contain sensitive information including passwords in plain text it is disabled by default.
+ Additionally, in order to prevent accidental leakage of data, a warning will be shown on every task with this
+ setting enabled, specifying which host has it enabled and where the data is being logged.
+
+Be sure to fully understand the security implications of enabling this option. The device interaction logging can be enabled either globally by setting in configuration file or by setting environment or enabled on per task basis by passing a special variable to the task.
+
+Before running ``ansible-playbook`` run the following commands to enable logging:
+
+.. code-block:: text
+
+ # Specify the location for the log file
+ export ANSIBLE_LOG_PATH=~/ansible.log
+
+
+Enable device interaction logging for a given task
+
+.. code-block:: yaml
+
+ - name: get version information
+ cisco.ios.ios_command:
+ commands:
+ - show version
+ vars:
+ ansible_persistent_log_messages: True
+
+
+To make this a global setting, add the following to your ``ansible.cfg`` file:
+
+.. code-block:: ini
+
+ [persistent_connection]
+ log_messages = True
+
+or enable the environment variable `ANSIBLE_PERSISTENT_LOG_MESSAGES`:
+
+.. code-block:: text
+
+ # Enable device interaction logging
+ export ANSIBLE_PERSISTENT_LOG_MESSAGES=True
+
+If the task is failing on connection initialization itself, you should enable this option
+globally. If an individual task is failing intermittently this option can be enabled for that task itself to find the root cause.
+
+After Ansible has finished running you can inspect the log file which has been created on the ansible-controller
+
+.. note:: Be sure to fully understand the security implications of enabling this option as it can log sensitive
+ information in log file thus creating security vulnerability.
+
+
+Isolating an error
+------------------
+
+**Platforms:** Any
+
+As with any effort to troubleshoot it's important to simplify the test case as much as possible.
+
+For Ansible this can be done by ensuring you are only running against one remote device:
+
+* Using ``ansible-playbook --limit switch1.example.net...``
+* Using an ad-hoc ``ansible`` command
+
+`ad-hoc` refers to running Ansible to perform some quick command using ``/usr/bin/ansible``, rather than the orchestration language, which is ``/usr/bin/ansible-playbook``. In this case we can ensure connectivity by attempting to execute a single command on the remote device::
+
+ ansible -m arista.eos.eos_command -a 'commands=?' -i inventory switch1.example.net -e 'ansible_connection=ansible.netcommon.network_cli' -u admin -k
+
+In the above example, we:
+
+* connect to ``switch1.example.net`` specified in the inventory file ``inventory``
+* use the module ``arista.eos.eos_command``
+* run the command ``?``
+* connect using the username ``admin``
+* inform the ``ansible`` command to prompt for the SSH password by specifying ``-k``
+
+If you have SSH keys configured correctly, you don't need to specify the ``-k`` parameter.
+
+If the connection still fails you can combine it with the enable_network_logging parameter. For example:
+
+.. code-block:: text
+
+ # Specify the location for the log file
+ export ANSIBLE_LOG_PATH=~/ansible.log
+ # Enable Debug
+ export ANSIBLE_DEBUG=True
+ # Run with ``-vvvv`` for connection level verbosity
+ ansible -m arista.eos.eos_command -a 'commands=?' -i inventory switch1.example.net -e 'ansible_connection=ansible.netcommon.network_cli' -u admin -k
+
+Then review the log file and find the relevant error message in the rest of this document.
+
+.. For details on other ways to authenticate, see LINKTOAUTHHOWTODOCS.
+
+.. _socket_path_issue:
+
+Troubleshooting socket path issues
+==================================
+
+**Platforms:** Any
+
+The ``Socket path does not exist or cannot be found`` and ``Unable to connect to socket`` messages indicate that the socket used to communicate with the remote network device is unavailable or does not exist.
+
+For example:
+
+.. code-block:: none
+
+ fatal: [spine02]: FAILED! => {
+ "changed": false,
+ "failed": true,
+ "module_stderr": "Traceback (most recent call last):\n File \"/tmp/ansible_TSqk5J/ansible_modlib.zip/ansible/module_utils/connection.py\", line 115, in _exec_jsonrpc\nansible.module_utils.connection.ConnectionError: Socket path XX does not exist or cannot be found. See Troubleshooting socket path issues in the Network Debug and Troubleshooting Guide\n",
+ "module_stdout": "",
+ "msg": "MODULE FAILURE",
+ "rc": 1
+ }
+
+or
+
+.. code-block:: none
+
+ fatal: [spine02]: FAILED! => {
+ "changed": false,
+ "failed": true,
+ "module_stderr": "Traceback (most recent call last):\n File \"/tmp/ansible_TSqk5J/ansible_modlib.zip/ansible/module_utils/connection.py\", line 123, in _exec_jsonrpc\nansible.module_utils.connection.ConnectionError: Unable to connect to socket XX. See Troubleshooting socket path issues in Network Debug and Troubleshooting Guide\n",
+ "module_stdout": "",
+ "msg": "MODULE FAILURE",
+ "rc": 1
+ }
+
+Suggestions to resolve:
+
+#. Verify that you have write access to the socket path described in the error message.
+
+#. Follow the steps detailed in :ref:`enable network logging <enable_network_logging>`.
+
+If the identified error message from the log file is:
+
+.. code-block:: yaml
+
+ 2017-04-04 12:19:05,670 p=18591 u=fred | command timeout triggered, timeout value is 30 secs
+
+or
+
+.. code-block:: yaml
+
+ 2017-04-04 12:19:05,670 p=18591 u=fred | persistent connection idle timeout triggered, timeout value is 30 secs
+
+Follow the steps detailed in :ref:`timeout issues <timeout_issues>`
+
+
+.. _unable_to_open_shell:
+
+Category "Unable to open shell"
+===============================
+
+
+**Platforms:** Any
+
+The ``unable to open shell`` message means that the ``ansible-connection`` daemon has not been able to successfully talk to the remote network device. This generally means that there is an authentication issue. It is a "catch all" message, meaning you need to enable :ref:`logging <a_note_about_logging>` to find the underlying issues.
+
+
+
+For example:
+
+.. code-block:: none
+
+ TASK [prepare_eos_tests : enable cli on remote device] **************************************************
+ fatal: [veos01]: FAILED! => {"changed": false, "failed": true, "msg": "unable to open shell"}
+
+
+or:
+
+
+.. code-block:: none
+
+ TASK [ios_system : configure name_servers] *************************************************************
+ task path:
+ fatal: [ios-csr1000v]: FAILED! => {
+ "changed": false,
+ "failed": true,
+ "msg": "unable to open shell",
+ }
+
+Suggestions to resolve:
+
+Follow the steps detailed in enable_network_logging_.
+
+Once you've identified the error message from the log file, the specific solution can be found in the rest of this document.
+
+
+
+Error: "[Errno -2] Name or service not known"
+---------------------------------------------
+
+**Platforms:** Any
+
+Indicates that the remote host you are trying to connect to can not be reached
+
+For example:
+
+.. code-block:: yaml
+
+ 2017-04-04 11:39:48,147 p=15299 u=fred | control socket path is /home/fred/.ansible/pc/ca5960d27a
+ 2017-04-04 11:39:48,147 p=15299 u=fred | current working directory is /home/fred/git/ansible-inc/stable-2.3/test/integration
+ 2017-04-04 11:39:48,147 p=15299 u=fred | using connection plugin network_cli
+ 2017-04-04 11:39:48,340 p=15299 u=fred | connecting to host veos01 returned an error
+ 2017-04-04 11:39:48,340 p=15299 u=fred | [Errno -2] Name or service not known
+
+
+Suggestions to resolve:
+
+* If you are using the ``provider:`` options ensure that its suboption ``host:`` is set correctly.
+* If you are not using ``provider:`` nor top-level arguments ensure your inventory file is correct.
+
+
+
+
+
+Error: "Authentication failed"
+------------------------------
+
+**Platforms:** Any
+
+Occurs if the credentials (username, passwords, or ssh keys) passed to ``ansible-connection`` (via ``ansible`` or ``ansible-playbook``) can not be used to connect to the remote device.
+
+
+
+For example:
+
+.. code-block:: yaml
+
+ <ios01> ESTABLISH CONNECTION FOR USER: cisco on PORT 22 TO ios01
+ <ios01> Authentication failed.
+
+
+Suggestions to resolve:
+
+If you are specifying credentials via ``password:`` (either directly or via ``provider:``) or the environment variable `ANSIBLE_NET_PASSWORD` it is possible that ``paramiko`` (the Python SSH library that Ansible uses) is using ssh keys, and therefore the credentials you are specifying are being ignored. To find out if this is the case, disable "look for keys". This can be done like this:
+
+.. code-block:: yaml
+
+ export ANSIBLE_PARAMIKO_LOOK_FOR_KEYS=False
+
+To make this a permanent change, add the following to your ``ansible.cfg`` file:
+
+.. code-block:: ini
+
+ [paramiko_connection]
+ look_for_keys = False
+
+
+Error: "connecting to host <hostname> returned an error" or "Bad address"
+-------------------------------------------------------------------------
+
+This may occur if the SSH fingerprint hasn't been added to Paramiko's (the Python SSH library) know hosts file.
+
+When using persistent connections with Paramiko, the connection runs in a background process. If the host doesn't already have a valid SSH key, by default Ansible will prompt to add the host key. This will cause connections running in background processes to fail.
+
+For example:
+
+.. code-block:: yaml
+
+ 2017-04-04 12:06:03,486 p=17981 u=fred | using connection plugin network_cli
+ 2017-04-04 12:06:04,680 p=17981 u=fred | connecting to host veos01 returned an error
+ 2017-04-04 12:06:04,682 p=17981 u=fred | (14, 'Bad address')
+ 2017-04-04 12:06:33,519 p=17981 u=fred | number of connection attempts exceeded, unable to connect to control socket
+ 2017-04-04 12:06:33,520 p=17981 u=fred | persistent_connect_interval=1, persistent_connect_retries=30
+
+
+Suggestions to resolve:
+
+Use ``ssh-keyscan`` to pre-populate the known_hosts. You need to ensure the keys are correct.
+
+.. code-block:: shell
+
+ ssh-keyscan veos01
+
+
+or
+
+You can tell Ansible to automatically accept the keys
+
+Environment variable method::
+
+ export ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD=True
+ ansible-playbook ...
+
+``ansible.cfg`` method:
+
+ansible.cfg
+
+.. code-block:: ini
+
+ [paramiko_connection]
+ host_key_auto_add = True
+
+
+
+.. warning: Security warning
+
+ Care should be taken before accepting keys.
+
+Error: "No authentication methods available"
+--------------------------------------------
+
+For example:
+
+.. code-block:: yaml
+
+ 2017-04-04 12:19:05,670 p=18591 u=fred | creating new control socket for host veos01:None as user admin
+ 2017-04-04 12:19:05,670 p=18591 u=fred | control socket path is /home/fred/.ansible/pc/ca5960d27a
+ 2017-04-04 12:19:05,670 p=18591 u=fred | current working directory is /home/fred/git/ansible-inc/ansible-workspace-2/test/integration
+ 2017-04-04 12:19:05,670 p=18591 u=fred | using connection plugin network_cli
+ 2017-04-04 12:19:06,606 p=18591 u=fred | connecting to host veos01 returned an error
+ 2017-04-04 12:19:06,606 p=18591 u=fred | No authentication methods available
+ 2017-04-04 12:19:35,708 p=18591 u=fred | connect retry timeout expired, unable to connect to control socket
+ 2017-04-04 12:19:35,709 p=18591 u=fred | persistent_connect_retry_timeout is 15 secs
+
+
+Suggestions to resolve:
+
+No password or SSH key supplied
+
+Clearing Out Persistent Connections
+-----------------------------------
+
+**Platforms:** Any
+
+In Ansible 2.3, persistent connection sockets are stored in ``~/.ansible/pc`` for all network devices. When an Ansible playbook runs, the persistent socket connection is displayed when verbose output is specified.
+
+``<switch> socket_path: /home/fred/.ansible/pc/f64ddfa760``
+
+To clear out a persistent connection before it times out (the default timeout is 30 seconds
+of inactivity), simple delete the socket file.
+
+
+.. _timeout_issues:
+
+Timeout issues
+==============
+
+Persistent connection idle timeout
+----------------------------------
+
+By default, ``ANSIBLE_PERSISTENT_CONNECT_TIMEOUT`` is set to 30 (seconds). You may see the following error if this value is too low:
+
+.. code-block:: yaml
+
+ 2017-04-04 12:19:05,670 p=18591 u=fred | persistent connection idle timeout triggered, timeout value is 30 secs
+
+Suggestions to resolve:
+
+Increase value of persistent connection idle timeout:
+
+.. code-block:: sh
+
+ export ANSIBLE_PERSISTENT_CONNECT_TIMEOUT=60
+
+To make this a permanent change, add the following to your ``ansible.cfg`` file:
+
+.. code-block:: ini
+
+ [persistent_connection]
+ connect_timeout = 60
+
+Command timeout
+---------------
+
+By default, ``ANSIBLE_PERSISTENT_COMMAND_TIMEOUT`` is set to 30 (seconds). Prior versions of Ansible had this value set to 10 seconds by default.
+You may see the following error if this value is too low:
+
+
+.. code-block:: yaml
+
+ 2017-04-04 12:19:05,670 p=18591 u=fred | command timeout triggered, timeout value is 30 secs
+
+Suggestions to resolve:
+
+* Option 1 (Global command timeout setting):
+ Increase value of command timeout in configuration file or by setting environment variable.
+
+ .. code-block:: yaml
+
+ export ANSIBLE_PERSISTENT_COMMAND_TIMEOUT=60
+
+ To make this a permanent change, add the following to your ``ansible.cfg`` file:
+
+ .. code-block:: ini
+
+ [persistent_connection]
+ command_timeout = 60
+
+* Option 2 (Per task command timeout setting):
+ Increase command timeout per task basis. All network modules support a
+ timeout value that can be set on a per task basis.
+ The timeout value controls the amount of time in seconds before the
+ task will fail if the command has not returned.
+
+ For local connection type:
+
+ .. FIXME: Detail error here
+
+ Suggestions to resolve:
+
+ .. code-block:: yaml
+
+ - name: save running-config
+ cisco.ios.ios_command:
+ commands: copy running-config startup-config
+ provider: "{{ cli }}"
+ timeout: 30
+
+
+ Suggestions to resolve:
+
+ .. code-block:: yaml
+
+ - name: save running-config
+ cisco.ios.ios_command:
+ commands: copy running-config startup-config
+ vars:
+ ansible_command_timeout: 60
+
+Some operations take longer than the default 30 seconds to complete. One good
+example is saving the current running config on IOS devices to startup config.
+In this case, changing the timeout value from the default 30 seconds to 60
+seconds will prevent the task from failing before the command completes
+successfully.
+
+Persistent connection retry timeout
+-----------------------------------
+
+By default, ``ANSIBLE_PERSISTENT_CONNECT_RETRY_TIMEOUT`` is set to 15 (seconds). You may see the following error if this value is too low:
+
+.. code-block:: yaml
+
+ 2017-04-04 12:19:35,708 p=18591 u=fred | connect retry timeout expired, unable to connect to control socket
+ 2017-04-04 12:19:35,709 p=18591 u=fred | persistent_connect_retry_timeout is 15 secs
+
+Suggestions to resolve:
+
+Increase the value of the persistent connection idle timeout.
+Note: This value should be greater than the SSH timeout value (the timeout value under the defaults
+section in the configuration file) and less than the value of the persistent
+connection idle timeout (connect_timeout).
+
+.. code-block:: yaml
+
+ export ANSIBLE_PERSISTENT_CONNECT_RETRY_TIMEOUT=30
+
+To make this a permanent change, add the following to your ``ansible.cfg`` file:
+
+.. code-block:: ini
+
+ [persistent_connection]
+ connect_retry_timeout = 30
+
+
+Timeout issue due to platform specific login menu with ``network_cli`` connection type
+--------------------------------------------------------------------------------------
+
+In Ansible 2.9 and later, the network_cli connection plugin configuration options are added
+to handle the platform specific login menu. These options can be set as group/host or tasks
+variables.
+
+Example: Handle single login menu prompts with host variables
+
+.. code-block:: console
+
+ $cat host_vars/<hostname>.yaml
+ ---
+ ansible_terminal_initial_prompt:
+ - "Connect to a host"
+ ansible_terminal_initial_answer:
+ - "3"
+
+Example: Handle remote host multiple login menu prompts with host variables
+
+.. code-block:: console
+
+ $cat host_vars/<inventory-hostname>.yaml
+ ---
+ ansible_terminal_initial_prompt:
+ - "Press any key to enter main menu"
+ - "Connect to a host"
+ ansible_terminal_initial_answer:
+ - "\\r"
+ - "3"
+ ansible_terminal_initial_prompt_checkall: True
+
+To handle multiple login menu prompts:
+
+* The values of ``ansible_terminal_initial_prompt`` and ``ansible_terminal_initial_answer`` should be a list.
+* The prompt sequence should match the answer sequence.
+* The value of ``ansible_terminal_initial_prompt_checkall`` should be set to ``True``.
+
+.. note:: If all the prompts in sequence are not received from remote host at the time connection initialization it will result in a timeout.
+
+
+Playbook issues
+===============
+
+This section details issues are caused by issues with the Playbook itself.
+
+Error: "Unable to enter configuration mode"
+-------------------------------------------
+
+**Platforms:** Arista EOS and Cisco IOS
+
+This occurs when you attempt to run a task that requires privileged mode in a user mode shell.
+
+For example:
+
+.. code-block:: console
+
+ TASK [ios_system : configure name_servers] *****************************************************************************
+ task path:
+ fatal: [ios-csr1000v]: FAILED! => {
+ "changed": false,
+ "failed": true,
+ "msg": "unable to enter configuration mode",
+ }
+
+Suggestions to resolve:
+
+ Use ``connection: ansible.netcommon.network_cli`` and ``become: yes``
+
+
+Proxy Issues
+============
+
+ .. _network_delegate_to_vs_ProxyCommand:
+
+delegate_to vs ProxyCommand
+---------------------------
+
+In order to use a bastion or intermediate jump host to connect to network devices over ``cli``
+transport, network modules support the use of ``ProxyCommand``.
+
+To use ``ProxyCommand``, configure the proxy settings in the Ansible inventory
+file to specify the proxy host.
+
+.. code-block:: ini
+
+ [nxos]
+ nxos01
+ nxos02
+
+ [nxos:vars]
+ ansible_ssh_common_args='-o ProxyCommand="ssh -W %h:%p -q bastion01"'
+
+
+With the configuration above, simply build and run the playbook as normal with
+no additional changes necessary. The network module will now connect to the
+network device by first connecting to the host specified in
+``ansible_ssh_common_args``, which is ``bastion01`` in the above example.
+
+You can also set the proxy target for all hosts by using environment variables.
+
+.. code-block:: sh
+
+ export ANSIBLE_SSH_ARGS='-o ProxyCommand="ssh -W %h:%p -q bastion01"'
+
+Using bastion/jump host with netconf connection
+-----------------------------------------------
+
+Enabling jump host setting
+--------------------------
+
+
+Bastion/jump host with netconf connection can be enabled by:
+ - Setting Ansible variable ``ansible_netconf_ssh_config`` either to ``True`` or custom ssh config file path
+ - Setting environment variable ``ANSIBLE_NETCONF_SSH_CONFIG`` to ``True`` or custom ssh config file path
+ - Setting ``ssh_config = 1`` or ``ssh_config = <ssh-file-path>`` under ``netconf_connection`` section
+
+If the configuration variable is set to 1 the proxycommand and other ssh variables are read from
+default ssh config file (~/.ssh/config).
+
+If the configuration variable is set to file path the proxycommand and other ssh variables are read
+from the given custom ssh file path
+
+Example ssh config file (~/.ssh/config)
+---------------------------------------
+
+.. code-block:: ini
+
+ Host jumphost
+ HostName jumphost.domain.name.com
+ User jumphost-user
+ IdentityFile "/path/to/ssh-key.pem"
+ Port 22
+
+ # Note: Due to the way that Paramiko reads the SSH Config file,
+ # you need to specify the NETCONF port that the host uses.
+ # In other words, it does not automatically use ansible_port
+ # As a result you need either:
+
+ Host junos01
+ HostName junos01
+ ProxyCommand ssh -W %h:22 jumphost
+
+ # OR
+
+ Host junos01
+ HostName junos01
+ ProxyCommand ssh -W %h:830 jumphost
+
+ # Depending on the netconf port used.
+
+Example Ansible inventory file
+
+.. code-block:: ini
+
+ [junos]
+ junos01
+
+ [junos:vars]
+ ansible_connection=ansible.netcommon.netconf
+ ansible_network_os=junipernetworks.junos.junos
+ ansible_user=myuser
+ ansible_password=!vault...
+
+
+.. note:: Using ``ProxyCommand`` with passwords via variables
+
+ By design, SSH doesn't support providing passwords via environment variables.
+ This is done to prevent secrets from leaking out, for example in ``ps`` output.
+
+ We recommend using SSH Keys, and if needed an ssh-agent, rather than passwords, where ever possible.
+
+Miscellaneous Issues
+====================
+
+
+Intermittent failure while using ``ansible.netcommon.network_cli`` connection type
+------------------------------------------------------------------------------------
+
+If the command prompt received in response is not matched correctly within
+the ``ansible.netcommon.network_cli`` connection plugin the task might fail intermittently with truncated
+response or with the error message ``operation requires privilege escalation``.
+Starting in 2.7.1 a new buffer read timer is added to ensure prompts are matched properly
+and a complete response is send in output. The timer default value is 0.2 seconds and
+can be adjusted on a per task basis or can be set globally in seconds.
+
+Example Per task timer setting
+
+.. code-block:: yaml
+
+ - name: gather ios facts
+ cisco.ios.ios_facts:
+ gather_subset: all
+ register: result
+ vars:
+ ansible_buffer_read_timeout: 2
+
+
+To make this a global setting, add the following to your ``ansible.cfg`` file:
+
+.. code-block:: ini
+
+ [persistent_connection]
+ buffer_read_timeout = 2
+
+This timer delay per command executed on remote host can be disabled by setting the value to zero.
+
+
+Task failure due to mismatched error regex within command response using ``ansible.netcommon.network_cli`` connection type
+----------------------------------------------------------------------------------------------------------------------------
+
+In Ansible 2.9 and later, the ``ansible.netcommon.network_cli`` connection plugin configuration options are added
+to handle the stdout and stderr regex to identify if the command execution response consist
+of a normal response or an error response. These options can be set group/host variables or as
+tasks variables.
+
+Example: For mismatched error response
+
+.. code-block:: yaml
+
+ - name: fetch logs from remote host
+ cisco.ios.ios_command:
+ commands:
+ - show logging
+
+
+Playbook run output:
+
+.. code-block:: console
+
+ TASK [first fetch logs] ********************************************************
+ fatal: [ios01]: FAILED! => {
+ "changed": false,
+ "msg": "RF Name:\r\n\r\n <--nsip-->
+ \"IPSEC-3-REPLAY_ERROR: Test log\"\r\n*Aug 1 08:36:18.483: %SYS-7-USERLOG_DEBUG:
+ Message from tty578(user id: ansible): test\r\nan-ios-02#"}
+
+Suggestions to resolve:
+
+Modify the error regex for individual task.
+
+.. code-block:: yaml
+
+ - name: fetch logs from remote host
+ cisco.ios.ios_command:
+ commands:
+ - show logging
+ vars:
+ ansible_terminal_stderr_re:
+ - pattern: 'connection timed out'
+ flags: 're.I'
+
+The terminal plugin regex options ``ansible_terminal_stderr_re`` and ``ansible_terminal_stdout_re`` have
+``pattern`` and ``flags`` as keys. The value of the ``flags`` key should be a value that is accepted by
+the ``re.compile`` python method.
+
+
+Intermittent failure while using ``ansible.netcommon.network_cli`` connection type due to slower network or remote target host
+----------------------------------------------------------------------------------------------------------------------------------
+
+In Ansible 2.9 and later, the ``ansible.netcommon.network_cli`` connection plugin configuration option is added to control
+the number of attempts to connect to a remote host. The default number of attempts is three.
+After every retry attempt the delay between retries is increased by power of 2 in seconds until either the
+maximum attempts are exhausted or either the ``persistent_command_timeout`` or ``persistent_connect_timeout`` timers are triggered.
+
+To make this a global setting, add the following to your ``ansible.cfg`` file:
+
+.. code-block:: ini
+
+ [persistent_connection]
+ network_cli_retries = 5
diff --git a/docs/docsite/rst/network/user_guide/network_resource_modules.rst b/docs/docsite/rst/network/user_guide/network_resource_modules.rst
new file mode 100644
index 00000000..f319d7cf
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/network_resource_modules.rst
@@ -0,0 +1,196 @@
+.. _resource_modules:
+
+************************
+Network Resource Modules
+************************
+
+Ansible network resource modules simplify and standardize how you manage different network devices. Network devices separate configuration into sections (such as interfaces and VLANs) that apply to a network service. Ansible network resource modules take advantage of this to allow you to configure subsections or *resources* within the network device configuration. Network resource modules provide a consistent experience across different network devices.
+
+
+.. contents::
+ :local:
+
+Network resource module states
+===============================
+
+You use the network resource modules by assigning a state to what you want the module to do. The resource modules support the following states:
+
+merged
+ Ansible merges the on-device configuration with the provided configuration in the task.
+
+replaced
+ Ansible replaces the on-device configuration subsection with the provided configuration subsection in the task.
+
+overridden
+ Ansible overrides the on-device configuration for the resource with the provided configuration in the task. Use caution with this state as you could remove your access to the device (for example, by overriding the management interface configuration).
+
+deleted
+ Ansible deletes the on-device configuration subsection and restores any default settings.
+
+gathered
+ Ansible displays the resource details gathered from the network device and accessed with the ``gathered`` key in the result.
+
+rendered
+ Ansible renders the provided configuration in the task in the device-native format (for example, Cisco IOS CLI). Ansible returns this rendered configuration in the ``rendered`` key in the result. Note this state does not communicate with the network device and can be used offline.
+
+parsed
+ Ansible parses the configuration from the ``running_configuration`` option into Ansible structured data in the ``parsed`` key in the result. Note this does not gather the configuration from the network device so this state can be used offline.
+
+Using network resource modules
+==============================
+
+This example configures the L3 interface resource on a Cisco IOS device, based on different state settings.
+
+ .. code-block:: yaml
+
+ - name: configure l3 interface
+ cisco.ios.ios_l3_interfaces:
+ config: "{{ config }}"
+ state: <state>
+
+The following table shows an example of how an initial resource configuration changes with this task for different states.
+
++-----------------------------------------+------------------------------------+-----------------------------------------+
+| Resource starting configuration | task-provided configuration (YAML) | Final resource configuration on device |
++=========================================+====================================+=========================================+
+| .. code-block:: text | .. code-block:: yaml | *merged* |
+| | | .. code-block:: text |
+| interface loopback100 | config: | |
+| ip address 10.10.1.100 255.255.255.0 | - ipv6: | interface loopback100 |
+| ipv6 address FC00:100/64 | - address: fc00::100/64 | ip address 10.10.1.100 255.255.255.0|
+| | - address: fc00::101/64 | ipv6 address FC00:100/64 |
+| | name: loopback100 | ipv6 address FC00:101/64 |
+| | +-----------------------------------------+
+| | | *replaced* |
+| | | .. code-block:: text |
+| | | |
+| | | interface loopback100 |
+| | | no ip address |
+| | | ipv6 address FC00:100/64 |
+| | | ipv6 address FC00:101/64 |
+| | +-----------------------------------------+
+| | | *overridden* |
+| | | Incorrect use case. This would remove |
+| | | all interfaces from the device |
+| | | (including the mgmt interface) except |
+| | | the configured loopback100 |
+| | +-----------------------------------------+
+| | | *deleted* |
+| | | .. code-block:: text |
+| | | |
+| | | interface loopback100 |
+| | | no ip address |
++-----------------------------------------+------------------------------------+-----------------------------------------+
+
+Network resource modules return the following details:
+
+* The *before* state - the existing resource configuration before the task was executed.
+* The *after* state - the new resource configuration that exists on the network device after the task was executed.
+* Commands - any commands configured on the device.
+
+.. code-block:: yaml
+
+ ok: [nxos101] =>
+ result:
+ after:
+ contact: IT Support
+ location: Room E, Building 6, Seattle, WA 98134
+ users:
+ - algorithm: md5
+ group: network-admin
+ localized_key: true
+ password: '0x73fd9a2cc8c53ed3dd4ed8f4ff157e69'
+ privacy_password: '0x73fd9a2cc8c53ed3dd4ed8f4ff157e69'
+ username: admin
+ before:
+ contact: IT Support
+ location: Room E, Building 5, Seattle HQ
+ users:
+ - algorithm: md5
+ group: network-admin
+ localized_key: true
+ password: '0x73fd9a2cc8c53ed3dd4ed8f4ff157e69'
+ privacy_password: '0x73fd9a2cc8c53ed3dd4ed8f4ff157e69'
+ username: admin
+ changed: true
+ commands:
+ - snmp-server location Room E, Building 6, Seattle, WA 98134
+ failed: false
+
+
+Example: Verifying the network device configuration has not changed
+====================================================================
+
+The following playbook uses the :ref:`arista.eos.eos_l3_interfaces <ansible_collections.arista.eos.eos_l3_interfaces_module>` module to gather a subset of the network device configuration (Layer 3 interfaces only) and verifies the information is accurate and has not changed. This playbook passes the results of :ref:`arista.eos.eos_facts <ansible_collections.arista.eos.eos_facts_module>` directly to the ``arista.eos.eos_l3_interfaces`` module.
+
+
+.. code-block:: yaml
+
+ - name: Example of facts being pushed right back to device.
+ hosts: arista
+ gather_facts: false
+ tasks:
+ - name: grab arista eos facts
+ arista.eos.eos_facts:
+ gather_subset: min
+ gather_network_resources: l3_interfaces
+
+ - name: Ensure that the IP address information is accurate.
+ arista.eos.eos_l3_interfaces:
+ config: "{{ ansible_network_resources['l3_interfaces'] }}"
+ register: result
+
+ - name: Ensure config did not change.
+ assert:
+ that: not result.changed
+
+Example: Acquiring and updating VLANs on a network device
+==========================================================
+
+This example shows how you can use resource modules to:
+
+#. Retrieve the current configuration on a network device.
+#. Save that configuration locally.
+#. Update that configuration and apply it to the network device.
+
+This example uses the ``cisco.ios.ios_vlans`` resource module to retrieve and update the VLANs on an IOS device.
+
+1. Retrieve the current IOS VLAN configuration:
+
+.. code-block:: yaml
+
+ - name: Gather VLAN information as structured data
+ cisco.ios.ios_facts:
+ gather_subset:
+ - '!all'
+ - '!min'
+ gather_network_resources:
+ - 'vlans'
+
+2. Store the VLAN configuration locally:
+
+.. code-block:: yaml
+
+ - name: Store VLAN facts to host_vars
+ copy:
+ content: "{{ ansible_network_resources | to_nice_yaml }}"
+ dest: "{{ playbook_dir }}/host_vars/{{ inventory_hostname }}"
+
+3. Modify the stored file to update the VLAN configuration locally.
+
+4. Merge the updated VLAN configuration with the existing configuration on the device:
+
+.. code-block:: yaml
+
+ - name: Make VLAN config changes by updating stored facts on the controller.
+ cisco.ios.ios_vlans:
+ config: "{{ vlans }}"
+ state: merged
+ tags: update_config
+
+.. seealso::
+
+ `Network Features in Ansible 2.9 <https://www.ansible.com/blog/network-features-coming-soon-in-ansible-engine-2.9>`_
+ A introductory blog post on network resource modules.
+ `Deep Dive into Network Resource Modules <https://www.ansible.com/deep-dive-into-ansible-network-resource-module>`_
+ A deeper dive presentation into network resource modules.
diff --git a/docs/docsite/rst/network/user_guide/network_working_with_command_output.rst b/docs/docsite/rst/network/user_guide/network_working_with_command_output.rst
new file mode 100644
index 00000000..12040d4b
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/network_working_with_command_output.rst
@@ -0,0 +1,122 @@
+.. _networking_working_with_command_output:
+
+**********************************************************
+Working with command output and prompts in network modules
+**********************************************************
+
+.. contents::
+ :local:
+
+Conditionals in networking modules
+===================================
+
+Ansible allows you to use conditionals to control the flow of your playbooks. Ansible networking command modules use the following unique conditional statements.
+
+* ``eq`` - Equal
+* ``neq`` - Not equal
+* ``gt`` - Greater than
+* ``ge`` - Greater than or equal
+* ``lt`` - Less than
+* ``le`` - Less than or equal
+* ``contains`` - Object contains specified item
+
+
+Conditional statements evaluate the results from the commands that are
+executed remotely on the device. Once the task executes the command
+set, the ``wait_for`` argument can be used to evaluate the results before
+returning control to the Ansible playbook.
+
+For example::
+
+ ---
+ - name: wait for interface to be admin enabled
+ arista.eos.eos_command:
+ commands:
+ - show interface Ethernet4 | json
+ wait_for:
+ - "result[0].interfaces.Ethernet4.interfaceStatus eq connected"
+
+In the above example task, the command :code:`show interface Ethernet4 | json`
+is executed on the remote device and the results are evaluated. If
+the path
+:code:`(result[0].interfaces.Ethernet4.interfaceStatus)` is not equal to
+"connected", then the command is retried. This process continues
+until either the condition is satisfied or the number of retries has
+expired (by default, this is 10 retries at 1 second intervals).
+
+The commands module can also evaluate more than one set of command
+results in an interface. For instance::
+
+ ---
+ - name: wait for interfaces to be admin enabled
+ arista.eos.eos_command:
+ commands:
+ - show interface Ethernet4 | json
+ - show interface Ethernet5 | json
+ wait_for:
+ - "result[0].interfaces.Ethernet4.interfaceStatus eq connected"
+ - "result[1].interfaces.Ethernet5.interfaceStatus eq connected"
+
+In the above example, two commands are executed on the
+remote device, and the results are evaluated. By specifying the result
+index value (0 or 1), the correct result output is checked against the
+conditional.
+
+The ``wait_for`` argument must always start with result and then the
+command index in ``[]``, where ``0`` is the first command in the commands list,
+``1`` is the second command, ``2`` is the third and so on.
+
+
+Handling prompts in network modules
+===================================
+
+Network devices may require that you answer a prompt before performing a change on the device. Individual network modules such as :ref:`cisco.ios.ios_command <ansible_collections.cisco.ios.ios_command_module>` and :ref:`cisco.nxos.nxos_command <ansible_collections.cisco.nxos.nxos_command_module>` can handle this with a ``prompt`` parameter.
+
+.. note::
+
+ ``prompt`` is a Python regex. If you add special characters such as ``?`` in the ``prompt`` value, the prompt won't match and you will get a timeout. To avoid this, ensure that the ``prompt`` value is a Python regex that matches the actual device prompt. Any special characters must be handled correctly in the ``prompt`` regex.
+
+You can also use the :ref:`ansible.netcommon.cli_command <ansible_collections.ansible.netcommon.cli_command_module>` to handle multiple prompts.
+
+.. code-block:: yaml
+
+ ---
+ - name: multiple prompt, multiple answer (mandatory check for all prompts)
+ ansible.netcommon.cli_command:
+ command: "copy sftp sftp://user@host//user/test.img"
+ check_all: True
+ prompt:
+ - "Confirm download operation"
+ - "Password"
+ - "Do you want to change that to the standby image"
+ answer:
+ - 'y'
+ - <password>
+ - 'y'
+
+You must list the prompt and the answers in the same order (that is, prompt[0] is answered by answer[0]).
+
+In the above example, ``check_all: True`` ensures that the task gives the matching answer to each prompt. Without that setting, a task with multiple prompts would give the first answer to every prompt.
+
+In the following example, the second answer would be ignored and ``y`` would be the answer given to both prompts. That is, this task only works because both answers are identical. Also notice again that ``prompt`` must be a Python regex, which is why the ``?`` is escaped in the first prompt.
+
+.. code-block:: yaml
+
+ ---
+ - name: reboot ios device
+ ansible.netcommon.cli_command:
+ command: reload
+ prompt:
+ - Save\?
+ - confirm
+ answer:
+ - y
+ - y
+
+.. seealso::
+
+ `Rebooting network devices with Ansible <https://www.ansible.com/blog/rebooting-network-devices-with-ansible>`_
+ Examples using ``wait_for``, ``wait_for_connection``, and ``prompt`` for network devices.
+
+ `Deep dive on cli_command <https://www.ansible.com/blog/deep-dive-on-cli-command-for-network-automation>`_
+ Detailed overview of how to use the ``cli_command``.
diff --git a/docs/docsite/rst/network/user_guide/platform_ce.rst b/docs/docsite/rst/network/user_guide/platform_ce.rst
new file mode 100644
index 00000000..19491748
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/platform_ce.rst
@@ -0,0 +1,213 @@
+.. _ce_platform_options:
+
+***************************************
+CloudEngine OS Platform Options
+***************************************
+
+CloudEngine CE OS is part of the `community.network <https://galaxy.ansible.com/community/network>`_ collection and supports multiple connections. This page offers details on how each connection works in Ansible and how to use it.
+
+.. contents::
+ :local:
+
+Connections available
+================================================================================
+
+.. table::
+ :class: documentation-table
+
+ ==================== ========================================== =========================
+ .. CLI NETCONF
+
+
+ ==================== ========================================== =========================
+ Protocol SSH XML over SSH
+
+ Credentials uses SSH keys / SSH-agent if present uses SSH keys / SSH-agent if present
+
+ accepts ``-u myuser -k`` if using password accepts ``-u myuser -k`` if using password
+
+ Indirect Access via a bastion (jump host) via a bastion (jump host)
+
+ Connection Settings ``ansible_connection:`` ``ansible_connection:``
+ ``ansible.netcommon.network_cli`` ``ansible.netcommon.netconf``
+
+ |enable_mode| not supported by ce OS not supported by ce OS
+
+ Returned Data Format Refer to individual module documentation Refer to individual module documentation
+ ==================== ========================================== =========================
+
+.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation)
+
+The ``ansible_connection: local`` has been deprecated. Please use ``ansible_connection: ansible.netcommon.netconf`` or ``ansible_connection=ansible.netcommon.network_cli`` instead.
+
+Using CLI in Ansible
+====================
+
+Example CLI inventory ``[ce:vars]``
+--------------------------------------
+
+.. code-block:: yaml
+
+ [ce:vars]
+ ansible_connection=ansible.netcommon.network_cli
+ ansible_network_os=community.network.ce
+ ansible_user=myuser
+ ansible_password=!vault...
+ ansible_ssh_common_args='-o ProxyCommand="ssh -W %h:%p -q bastion01"'
+
+
+- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration.
+- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration.
+- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables.
+
+Example CLI task
+----------------
+
+.. code-block:: yaml
+
+ - name: Retrieve CE OS version
+ community.network.ce_command:
+ commands: display version
+ when: ansible_network_os == 'community.network.ce'
+
+
+Using NETCONF in Ansible
+========================
+
+Enabling NETCONF
+----------------
+
+Before you can use NETCONF to connect to a switch, you must:
+
+- install the ``ncclient`` python package on your control node(s) with ``pip install ncclient``
+- enable NETCONF on the CloudEngine OS device(s)
+
+To enable NETCONF on a new switch using Ansible, use the ``community.network.ce_config`` module with the CLI connection. Set up your platform-level variables just like in the CLI example above, then run a playbook task like this:
+
+.. code-block:: yaml
+
+ - name: Enable NETCONF
+ connection: ansible.netcommon.network_cli
+ community.network.ce_config:
+ lines:
+ - snetconf server enable
+ when: ansible_network_os == 'community.network.ce'
+
+Once NETCONF is enabled, change your variables to use the NETCONF connection.
+
+Example NETCONF inventory ``[ce:vars]``
+------------------------------------------
+
+.. code-block:: yaml
+
+ [ce:vars]
+ ansible_connection=ansible.netcommon.netconf
+ ansible_network_os=community.network.ce
+ ansible_user=myuser
+ ansible_password=!vault |
+ ansible_ssh_common_args='-o ProxyCommand="ssh -W %h:%p -q bastion01"'
+
+
+Example NETCONF task
+--------------------
+
+.. code-block:: yaml
+
+ - name: Create a vlan, id is 50(ce)
+ community.network.ce_vlan:
+ vlan_id: 50
+ name: WEB
+ when: ansible_network_os == 'community.network.ce'
+
+
+Notes
+========================
+
+Modules that work with ``ansible.netcommon.network_cli``
+---------------------------------------------------------
+
+.. code-block:: yaml
+
+ community.network.ce_acl_interface
+ community.network.ce_command
+ community.network.ce_config
+ community.network.ce_evpn_bgp
+ community.network.ce_evpn_bgp_rr
+ community.network.ce_evpn_global
+ community.network.ce_facts
+ community.network.ce_mlag_interface
+ community.network.ce_mtu
+ community.network.ce_netstream_aging
+ community.network.ce_netstream_export
+ community.network.ce_netstream_global
+ community.network.ce_netstream_template
+ community.network.ce_ntp_auth
+ community.network.ce_rollback
+ community.network.ce_snmp_contact
+ community.network.ce_snmp_location
+ community.network.ce_snmp_traps
+ community.network.ce_startup
+ community.network.ce_stp
+ community.network.ce_vxlan_arp
+ community.network.ce_vxlan_gateway
+ community.network.ce_vxlan_global
+
+
+Modules that work with ``ansible.netcommon.netconf``
+-----------------------------------------------------
+
+.. code-block:: yaml
+
+ community.network.ce_aaa_server
+ community.network.ce_aaa_server_host
+ community.network.ce_acl
+ community.network.ce_acl_advance
+ community.network.ce_bfd_global
+ community.network.ce_bfd_session
+ community.network.ce_bfd_view
+ community.network.ce_bgp
+ community.network.ce_bgp_af
+ community.network.ce_bgp_neighbor
+ community.network.ce_bgp_neighbor_af
+ community.network.ce_dldp
+ community.network.ce_dldp_interface
+ community.network.ce_eth_trunk
+ community.network.ce_evpn_bd_vni
+ community.network.ce_file_copy
+ community.network.ce_info_center_debug
+ community.network.ce_info_center_global
+ community.network.ce_info_center_log
+ community.network.ce_info_center_trap
+ community.network.ce_interface
+ community.network.ce_interface_ospf
+ community.network.ce_ip_interface
+ community.network.ce_lacp
+ community.network.ce_link_status
+ community.network.ce_lldp
+ community.network.ce_lldp_interface
+ community.network.ce_mlag_config
+ community.network.ce_netconf
+ community.network.ce_ntp
+ community.network.ce_ospf
+ community.network.ce_ospf_vrf
+ community.network.ce_reboot
+ community.network.ce_sflow
+ community.network.ce_snmp_community
+ community.network.ce_snmp_target_host
+ community.network.ce_snmp_user
+ community.network.ce_static_route
+ community.network.ce_static_route_bfd
+ community.network.ce_switchport
+ community.network.ce_vlan
+ community.network.ce_vrf
+ community.network.ce_vrf_af
+ community.network.ce_vrf_interface
+ community.network.ce_vrrp
+ community.network.ce_vxlan_tunnel
+ community.network.ce_vxlan_vap
+
+.. include:: shared_snippets/SSH_warning.txt
+
+.. seealso::
+
+ :ref:`timeout_options`
diff --git a/docs/docsite/rst/network/user_guide/platform_cnos.rst b/docs/docsite/rst/network/user_guide/platform_cnos.rst
new file mode 100644
index 00000000..62e1d549
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/platform_cnos.rst
@@ -0,0 +1,78 @@
+.. _cnos_platform_options:
+
+***************************************
+CNOS Platform Options
+***************************************
+
+CNOS is part of the `community.network <https://galaxy.ansible.com/community/network>`_ collection and supports Enable Mode (Privilege Escalation). This page offers details on how to use Enable Mode on CNOS in Ansible.
+
+.. contents::
+ :local:
+
+Connections available
+================================================================================
+
+.. table::
+ :class: documentation-table
+
+ ==================== ==========================================
+ .. CLI
+ ==================== ==========================================
+ Protocol SSH
+
+ Credentials uses SSH keys / SSH-agent if present
+
+ accepts ``-u myuser -k`` if using password
+
+ Indirect Access via a bastion (jump host)
+
+ Connection Settings ``ansible_connection: ansible.netcommon.network_cli``
+
+ |enable_mode| supported: use ``ansible_become: yes``
+ with ``ansible_become_method: enable``
+ and ``ansible_become_password:``
+
+ Returned Data Format ``stdout[0].``
+ ==================== ==========================================
+
+.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation)
+
+The ``ansible_connection: local`` has been deprecated. Please use ``ansible_connection: ansible.netcommon.network_cli`` instead.
+
+Using CLI in Ansible
+================================================================================
+
+Example CLI ``group_vars/cnos.yml``
+--------------------------------------------------------------------------------
+
+.. code-block:: yaml
+
+ ansible_connection: ansible.netcommon.network_cli
+ ansible_network_os: community.network.cnos
+ ansible_user: myuser
+ ansible_password: !vault...
+ ansible_become: yes
+ ansible_become_method: enable
+ ansible_become_password: !vault...
+ ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"'
+
+
+- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration.
+- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration.
+- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables.
+
+Example CLI task
+----------------
+
+.. code-block:: yaml
+
+ - name: Retrieve CNOS OS version
+ community.network.cnos_command:
+ commands: show version
+ when: ansible_network_os == 'community.network.cnos'
+
+.. include:: shared_snippets/SSH_warning.txt
+
+.. seealso::
+
+ :ref:`timeout_options`
diff --git a/docs/docsite/rst/network/user_guide/platform_dellos10.rst b/docs/docsite/rst/network/user_guide/platform_dellos10.rst
new file mode 100644
index 00000000..638932a2
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/platform_dellos10.rst
@@ -0,0 +1,80 @@
+.. _dellos10_platform_options:
+
+***************************************
+Dell OS10 Platform Options
+***************************************
+
+The `dellemc.os10 <https://galaxy.ansible.com/dellemc_networking/os10>`_ collection supports Enable Mode (Privilege Escalation). This page offers details on how to use Enable Mode on OS10 in Ansible.
+
+.. contents::
+ :local:
+
+Connections available
+================================================================================
+
+.. table::
+ :class: documentation-table
+
+ ==================== ==========================================
+ .. CLI
+ ==================== ==========================================
+ Protocol SSH
+
+ Credentials uses SSH keys / SSH-agent if present
+
+ accepts ``-u myuser -k`` if using password
+
+ Indirect Access via a bastion (jump host)
+
+ Connection Settings ``ansible_connection: ansible.netcommon.network_cli``
+
+ |enable_mode| supported: use ``ansible_become: yes``
+ with ``ansible_become_method: enable``
+ and ``ansible_become_password:``
+
+ Returned Data Format ``stdout[0].``
+ ==================== ==========================================
+
+.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation)
+
+The ``ansible_connection: local`` has been deprecated. Please use ``ansible_connection: ansible.netcommon.network_cli`` instead.
+
+
+Using CLI in Ansible
+================================================================================
+
+Example CLI ``group_vars/dellos10.yml``
+---------------------------------------
+
+.. code-block:: yaml
+
+ ansible_connection: ansible.netcommon.network_cli
+ ansible_network_os: dellemc.os10.os10
+ ansible_user: myuser
+ ansible_password: !vault...
+ ansible_become: yes
+ ansible_become_method: enable
+ ansible_become_password: !vault...
+ ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"'
+
+
+- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration.
+- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration.
+- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables.
+
+Example CLI task
+----------------
+
+.. code-block:: yaml
+
+ - name: Backup current switch config (dellos10)
+ dellemc.os10.os10_config:
+ backup: yes
+ register: backup_dellos10_location
+ when: ansible_network_os == 'dellemc.os10.os10'
+
+.. include:: shared_snippets/SSH_warning.txt
+
+.. seealso::
+
+ :ref:`timeout_options`
diff --git a/docs/docsite/rst/network/user_guide/platform_dellos6.rst b/docs/docsite/rst/network/user_guide/platform_dellos6.rst
new file mode 100644
index 00000000..d315c59d
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/platform_dellos6.rst
@@ -0,0 +1,79 @@
+.. _dellos6_platform_options:
+
+***************************************
+Dell OS6 Platform Options
+***************************************
+
+The `dellemc.os6 <https://github.com/ansible-collections/dellemc.os6>`_ collection supports Enable Mode (Privilege Escalation). This page offers details on how to use Enable Mode on OS6 in Ansible.
+
+.. contents::
+ :local:
+
+Connections available
+================================================================================
+
+.. table::
+ :class: documentation-table
+
+ ==================== ==========================================
+ .. CLI
+ ==================== ==========================================
+ Protocol SSH
+
+ Credentials uses SSH keys / SSH-agent if present
+
+ accepts ``-u myuser -k`` if using password
+
+ Indirect Access via a bastion (jump host)
+
+ Connection Settings ``ansible_connection: ansible.netcommon.network_cli``
+
+ |enable_mode| supported: use ``ansible_become: yes``
+ with ``ansible_become_method: enable``
+ and ``ansible_become_password:``
+
+ Returned Data Format ``stdout[0].``
+ ==================== ==========================================
+
+.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation)
+
+The ``ansible_connection: local`` has been deprecated. Please use ``ansible_connection: ansible.netcommon.network_cli`` instead.
+
+Using CLI in Ansible
+================================================================================
+
+Example CLI ``group_vars/dellos6.yml``
+--------------------------------------
+
+.. code-block:: yaml
+
+ ansible_connection: ansible.netcommon.network_cli
+ ansible_network_os: dellemc.os6.os6
+ ansible_user: myuser
+ ansible_password: !vault...
+ ansible_become: yes
+ ansible_become_method: enable
+ ansible_become_password: !vault...
+ ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"'
+
+
+- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration.
+- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration.
+- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables.
+
+Example CLI task
+----------------
+
+.. code-block:: yaml
+
+ - name: Backup current switch config (dellos6)
+ dellemc.os6.os6_config:
+ backup: yes
+ register: backup_dellso6_location
+ when: ansible_network_os == 'dellemc.os6.os6'
+
+.. include:: shared_snippets/SSH_warning.txt
+
+.. seealso::
+
+ :ref:`timeout_options`
diff --git a/docs/docsite/rst/network/user_guide/platform_dellos9.rst b/docs/docsite/rst/network/user_guide/platform_dellos9.rst
new file mode 100644
index 00000000..cadde622
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/platform_dellos9.rst
@@ -0,0 +1,79 @@
+.. _dellos9_platform_options:
+
+***************************************
+Dell OS9 Platform Options
+***************************************
+
+The `dellemc.os9 <https://github.com/ansible-collections/dellemc.os9>`_ collection supports Enable Mode (Privilege Escalation). This page offers details on how to use Enable Mode on OS9 in Ansible.
+
+.. contents::
+ :local:
+
+Connections available
+================================================================================
+
+.. table::
+ :class: documentation-table
+
+ ==================== ==========================================
+ .. CLI
+ ==================== ==========================================
+ Protocol SSH
+
+ Credentials uses SSH keys / SSH-agent if present
+
+ accepts ``-u myuser -k`` if using password
+
+ Indirect Access via a bastion (jump host)
+
+ Connection Settings ``ansible_connection: ansible.netcommon.network_cli``
+
+ |enable_mode| supported: use ``ansible_become: yes``
+ with ``ansible_become_method: enable``
+ and ``ansible_become_password:``
+
+ Returned Data Format ``stdout[0].``
+ ==================== ==========================================
+
+.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation)
+
+The ``ansible_connection: local`` has been deprecated. Please use ``ansible_connection: ansible.netcommon.network_cli`` instead.
+
+Using CLI in Ansible
+================================================================================
+
+Example CLI ``group_vars/dellos9.yml``
+--------------------------------------
+
+.. code-block:: yaml
+
+ ansible_connection: ansible.netcommon.network_cli
+ ansible_network_os: dellemc.os9.os9
+ ansible_user: myuser
+ ansible_password: !vault...
+ ansible_become: yes
+ ansible_become_method: enable
+ ansible_become_password: !vault...
+ ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"'
+
+
+- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration.
+- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration.
+- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables.
+
+Example CLI task
+----------------
+
+.. code-block:: yaml
+
+ - name: Backup current switch config (dellos9)
+ dellemc.os9.os9_config:
+ backup: yes
+ register: backup_dellos9_location
+ when: ansible_network_os == 'dellemc.os9.os9'
+
+.. include:: shared_snippets/SSH_warning.txt
+
+.. seealso::
+
+ :ref:`timeout_options`
diff --git a/docs/docsite/rst/network/user_guide/platform_enos.rst b/docs/docsite/rst/network/user_guide/platform_enos.rst
new file mode 100644
index 00000000..58c0b83e
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/platform_enos.rst
@@ -0,0 +1,80 @@
+.. _enos_platform_options:
+
+***************************************
+ENOS Platform Options
+***************************************
+
+ENOS is part of the `community.network <https://galaxy.ansible.com/community/network>`_ collection and supports Enable Mode (Privilege Escalation). This page offers details on how to use Enable Mode on ENOS in Ansible.
+
+.. contents::
+ :local:
+
+Connections available
+================================================================================
+
+.. table::
+ :class: documentation-table
+
+ ==================== ==========================================
+ .. CLI
+ ==================== ==========================================
+ Protocol SSH
+
+ Credentials uses SSH keys / SSH-agent if present
+
+ accepts ``-u myuser -k`` if using password
+
+ Indirect Access via a bastion (jump host)
+
+ Connection Settings ``ansible_connection: ansible.netcommon.network_cli``
+
+ |enable_mode| supported: use ``ansible_become: yes``
+ with ``ansible_become_method: enable``
+ and ``ansible_become_password:``
+
+ Returned Data Format ``stdout[0].``
+ ==================== ==========================================
+
+.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation)
+
++---------------------------+-----------------------------------------------+
+
+The ``ansible_connection: local`` has been deprecated. Please use ``ansible_connection: ansible.netcommon.network_cli`` instead.
+
+Using CLI in Ansible
+================================================================================
+
+Example CLI ``group_vars/enos.yml``
+--------------------------------------------------------------------------------
+
+.. code-block:: yaml
+
+ ansible_connection: ansible.netcommon.network_cli
+ ansible_network_os: community.network.enos
+ ansible_user: myuser
+ ansible_password: !vault...
+ ansible_become: yes
+ ansible_become_method: enable
+ ansible_become_password: !vault...
+ ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"'
+
+
+- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration.
+- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration.
+- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables.
+
+Example CLI task
+----------------
+
+.. code-block:: yaml
+
+ - name: Retrieve ENOS OS version
+ community.network.enos_command:
+ commands: show version
+ when: ansible_network_os == 'community.network.enos'
+
+.. include:: shared_snippets/SSH_warning.txt
+
+.. seealso::
+
+ :ref:`timeout_options`
diff --git a/docs/docsite/rst/network/user_guide/platform_eos.rst b/docs/docsite/rst/network/user_guide/platform_eos.rst
new file mode 100644
index 00000000..065a7dc0
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/platform_eos.rst
@@ -0,0 +1,140 @@
+.. _eos_platform_options:
+
+***************************************
+EOS Platform Options
+***************************************
+
+The `Arista EOS <https://galaxy.ansible.com/arista/eos>`_ collection supports multiple connections. This page offers details on how each connection works in Ansible and how to use it.
+
+.. contents::
+ :local:
+
+Connections available
+================================================================================
+
+.. table::
+ :class: documentation-table
+
+ ==================== ========================================== ===========================
+ .. CLI eAPI
+ ==================== ========================================== ===========================
+ Protocol SSH HTTP(S)
+
+ Credentials uses SSH keys / SSH-agent if present uses HTTPS certificates if
+ present
+ accepts ``-u myuser -k`` if using password
+
+ Indirect Access via a bastion (jump host) via a web proxy
+
+ Connection Settings ``ansible_connection:`` ``ansible_connection:``
+ ``ansible.netcommon.network_cli`` ``ansible.netcommon.httpapi``
+
+
+ |enable_mode| supported: |br| supported: |br|
+
+ * use ``ansible_become: yes`` * ``httpapi``
+ with ``ansible_become_method: enable`` uses ``ansible_become: yes``
+ with ``ansible_become_method: enable``
+
+ Returned Data Format ``stdout[0].`` ``stdout[0].messages[0].``
+ ==================== ========================================== ===========================
+
+.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation)
+
+
+The ``ansible_connection: local`` has been deprecated. Please use ``ansible_connection: ansible.netcommon.network_cli`` or ``ansible_connection: ansible.netcommon.httpapi`` instead.
+
+Using CLI in Ansible
+====================
+
+Example CLI ``group_vars/eos.yml``
+----------------------------------
+
+.. code-block:: yaml
+
+ ansible_connection: ansible.netcommon.network_cli
+ ansible_network_os: arista.eos.eos
+ ansible_user: myuser
+ ansible_password: !vault...
+ ansible_become: yes
+ ansible_become_method: enable
+ ansible_become_password: !vault...
+ ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"'
+
+
+- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration.
+- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration.
+- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables.
+
+Example CLI task
+----------------
+
+.. code-block:: yaml
+
+ - name: Backup current switch config (eos)
+ arista.eos.eos_config:
+ backup: yes
+ register: backup_eos_location
+ when: ansible_network_os == 'arista.eos.eos'
+
+
+
+Using eAPI in Ansible
+=====================
+
+Enabling eAPI
+-------------
+
+Before you can use eAPI to connect to a switch, you must enable eAPI. To enable eAPI on a new switch with Ansible, use the ``arista.eos.eos_eapi`` module through the CLI connection. Set up ``group_vars/eos.yml`` just like in the CLI example above, then run a playbook task like this:
+
+.. code-block:: yaml
+
+ - name: Enable eAPI
+ arista.eos.eos_eapi:
+ enable_http: yes
+ enable_https: yes
+ become: true
+ become_method: enable
+ when: ansible_network_os == 'arista.eos.eos'
+
+You can find more options for enabling HTTP/HTTPS connections in the :ref:`arista.eos.eos_eapi <ansible_collections.arista.eos.eos_eapi_module>` module documentation.
+
+Once eAPI is enabled, change your ``group_vars/eos.yml`` to use the eAPI connection.
+
+Example eAPI ``group_vars/eos.yml``
+-----------------------------------
+
+.. code-block:: yaml
+
+ ansible_connection: ansible.netcommon.httpapi
+ ansible_network_os: arista.eos.eos
+ ansible_user: myuser
+ ansible_password: !vault...
+ ansible_become: yes
+ ansible_become_method: enable
+ proxy_env:
+ http_proxy: http://proxy.example.com:8080
+
+- If you are accessing your host directly (not through a web proxy) you can remove the ``proxy_env`` configuration.
+- If you are accessing your host through a web proxy using ``https``, change ``http_proxy`` to ``https_proxy``.
+
+
+Example eAPI task
+-----------------
+
+.. code-block:: yaml
+
+ - name: Backup current switch config (eos)
+ arista.eos.eos_config:
+ backup: yes
+ register: backup_eos_location
+ environment: "{{ proxy_env }}"
+ when: ansible_network_os == 'arista.eos.eos'
+
+In this example the ``proxy_env`` variable defined in ``group_vars`` gets passed to the ``environment`` option of the module in the task.
+
+.. include:: shared_snippets/SSH_warning.txt
+
+.. seealso::
+
+ :ref:`timeout_options`
diff --git a/docs/docsite/rst/network/user_guide/platform_eric_eccli.rst b/docs/docsite/rst/network/user_guide/platform_eric_eccli.rst
new file mode 100644
index 00000000..cdd45779
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/platform_eric_eccli.rst
@@ -0,0 +1,73 @@
+.. _eic_eccli_platform_options:
+
+***************************************
+ERIC_ECCLI Platform Options
+***************************************
+
+Extreme ERIC_ECCLI is part of the `community.network <https://galaxy.ansible.com/community/network>`_ collection and only supports CLI connections today. This page offers details on how to use ``ansible.netcommon.network_cli`` on ERIC_ECCLI in Ansible.
+
+.. contents::
+ :local:
+
+Connections available
+================================================================================
+
+.. table::
+ :class: documentation-table
+
+ ==================== ==========================================
+ .. CLI
+ ==================== ==========================================
+ Protocol SSH
+
+ Credentials uses SSH keys / SSH-agent if present
+
+ accepts ``-u myuser -k`` if using password
+
+ Indirect Access via a bastion (jump host)
+
+ Connection Settings ``ansible_connection: ansible.netcommon.network_cli``
+
+ |enable_mode| not supported by ERIC_ECCLI
+
+ Returned Data Format ``stdout[0].``
+ ==================== ==========================================
+
+.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation)
+
+ERIC_ECCLI does not support ``ansible_connection: local``. You must use ``ansible_connection: ansible.netcommon.network_cli``.
+
+Using CLI in Ansible
+====================
+
+Example CLI ``group_vars/eric_eccli.yml``
+-----------------------------------------
+
+.. code-block:: yaml
+
+ ansible_connection: ansible.netcommon.network_cli
+ ansible_network_os: community.network.eric_eccli
+ ansible_user: myuser
+ ansible_password: !vault...
+ ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"'
+
+
+- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration.
+- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration.
+- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables.
+
+Example CLI task
+----------------
+
+.. code-block:: yaml
+
+ - name: run show version on remote devices (eric_eccli)
+ community.network.eric_eccli_command:
+ commands: show version
+ when: ansible_network_os == 'community.network.eric_eccli'
+
+.. include:: shared_snippets/SSH_warning.txt
+
+.. seealso::
+
+ :ref:`timeout_options`
diff --git a/docs/docsite/rst/network/user_guide/platform_exos.rst b/docs/docsite/rst/network/user_guide/platform_exos.rst
new file mode 100644
index 00000000..e27e9ae4
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/platform_exos.rst
@@ -0,0 +1,108 @@
+.. _exos_platform_options:
+
+***************************************
+EXOS Platform Options
+***************************************
+
+Extreme EXOS is part of the `community.network <https://galaxy.ansible.com/community/network>`_ collection and supports multiple connections. This page offers details on how each connection works in Ansible and how to use it.
+
+.. contents::
+ :local:
+
+Connections available
+================================================================================
+
+
+.. table::
+ :class: documentation-table
+
+ ==================== ========================================== =========================
+ .. CLI EXOS-API
+ ==================== ========================================== =========================
+ Protocol SSH HTTP(S)
+
+ Credentials uses SSH keys / SSH-agent if present uses HTTPS certificates if present
+
+ accepts ``-u myuser -k`` if using password
+
+ Indirect Access via a bastion (jump host) via a web proxy
+
+ Connection Settings ``ansible_connection:`` ``ansible_connection:``
+ ``ansible.netcommon.network_cli`` ``ansible.netcommon.httpapi``
+
+ |enable_mode| not supported by EXOS not supported by EXOS
+
+ Returned Data Format ``stdout[0].`` ``stdout[0].messages[0].``
+ ==================== ========================================== =========================
+
+.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation)
+
+EXOS does not support ``ansible_connection: local``. You must use ``ansible_connection: ansible.netcommon.network_cli`` or ``ansible_connection: ansible.netcommon.httpapi``.
+
+Using CLI in Ansible
+====================
+
+Example CLI ``group_vars/exos.yml``
+-----------------------------------
+
+.. code-block:: yaml
+
+ ansible_connection: ansible.netcommon.network_cli
+ ansible_network_os: community.network.exos
+ ansible_user: myuser
+ ansible_password: !vault...
+ ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"'
+
+
+- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration.
+- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration.
+- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables.
+
+Example CLI task
+----------------
+
+.. code-block:: yaml
+
+ - name: Retrieve EXOS OS version
+ community.network.exos_command:
+ commands: show version
+ when: ansible_network_os == 'community.network.exos'
+
+
+
+Using EXOS-API in Ansible
+=========================
+
+Example EXOS-API ``group_vars/exos.yml``
+----------------------------------------
+
+.. code-block:: yaml
+
+ ansible_connection: ansible.netcommon.httpapi
+ ansible_network_os: community.network.exos
+ ansible_user: myuser
+ ansible_password: !vault...
+ proxy_env:
+ http_proxy: http://proxy.example.com:8080
+
+- If you are accessing your host directly (not through a web proxy) you can remove the ``proxy_env`` configuration.
+- If you are accessing your host through a web proxy using ``https``, change ``http_proxy`` to ``https_proxy``.
+
+
+Example EXOS-API task
+---------------------
+
+.. code-block:: yaml
+
+ - name: Retrieve EXOS OS version
+ community.network.exos_command:
+ commands: show version
+ when: ansible_network_os == 'community.network.exos'
+
+In this example the ``proxy_env`` variable defined in ``group_vars`` gets passed to the ``environment`` option of the module used in the task.
+
+.. include:: shared_snippets/SSH_warning.txt
+
+.. seealso::
+
+ :ref:`timeout_options`
diff --git a/docs/docsite/rst/network/user_guide/platform_frr.rst b/docs/docsite/rst/network/user_guide/platform_frr.rst
new file mode 100644
index 00000000..0d7bad14
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/platform_frr.rst
@@ -0,0 +1,73 @@
+.. _frr_platform_options:
+
+***************************************
+FRR Platform Options
+***************************************
+
+The `FRR <https://galaxy.ansible.com/frr/frr>`_ collection supports the ``ansible.netcommon.network_cli`` connection. This section provides details on how to use this connection for Free Range Routing (FRR).
+
+.. contents::
+ :local:
+
+Connections available
+================================================================================
+
+.. table::
+ :class: documentation-table
+
+ ==================== ==========================================
+ .. CLI
+ ==================== ==========================================
+ Protocol SSH
+
+ Credentials uses SSH keys / SSH-agent if present
+
+ accepts ``-u myuser -k`` if using password
+
+ Indirect Access via a bastion (jump host)
+
+ Connection Settings ``ansible_connection: ansible.netcommon.network_cli``
+
+ |enable_mode| not supported
+
+ Returned Data Format ``stdout[0].``
+ ==================== ==========================================
+
+.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation)
+
+
+Using CLI in Ansible
+====================
+
+Example CLI ``group_vars/frr.yml``
+----------------------------------
+
+.. code-block:: yaml
+
+ ansible_connection: ansible.netcommon.network_cli
+ ansible_network_os: frr.frr.frr
+ ansible_user: frruser
+ ansible_password: !vault...
+ ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"'
+
+- The ``ansible_user`` should be a part of the ``frrvty`` group and should have the default shell set to ``/bin/vtysh``.
+- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration.
+- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration.
+- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables.
+
+Example CLI task
+----------------
+
+.. code-block:: yaml
+
+ - name: Gather FRR facts
+ frr.frr.frr_facts:
+ gather_subset:
+ - config
+ - hardware
+
+.. include:: shared_snippets/SSH_warning.txt
+
+.. seealso::
+
+ :ref:`timeout_options`
diff --git a/docs/docsite/rst/network/user_guide/platform_icx.rst b/docs/docsite/rst/network/user_guide/platform_icx.rst
new file mode 100644
index 00000000..96777f1e
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/platform_icx.rst
@@ -0,0 +1,77 @@
+.. _icx_platform_options:
+
+***************************************
+ICX Platform Options
+***************************************
+
+ICX is part of the `community.network <https://galaxy.ansible.com/community/network>`_ collection supports Enable Mode (Privilege Escalation). This page offers details on how to use Enable Mode on ICX in Ansible.
+
+.. contents::
+ :local:
+
+Connections available
+================================================================================
+
+.. table::
+ :class: documentation-table
+
+ ==================== ==========================================
+ .. CLI
+ ==================== ==========================================
+ Protocol SSH
+
+ Credentials uses SSH keys / SSH-agent if present
+
+ accepts ``-u myuser -k`` if using password
+
+ Indirect Access via a bastion (jump host)
+
+ Connection Settings ``ansible_connection: ansible.netcommon.network_cli``
+
+ |enable_mode| supported: use ``ansible_become: yes`` with
+ ``ansible_become_method: enable`` and ``ansible_become_password:``
+
+ Returned Data Format ``stdout[0].``
+ ==================== ==========================================
+
+.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation)
+
+
+Using CLI in Ansible
+====================
+
+Example CLI ``group_vars/icx.yml``
+----------------------------------
+
+.. code-block:: yaml
+
+ ansible_connection: ansible.netcommon.network_cli
+ ansible_network_os: community.network.icx
+ ansible_user: myuser
+ ansible_password: !vault...
+ ansible_become: yes
+ ansible_become_method: enable
+ ansible_become_password: !vault...
+ ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"'
+
+
+- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration.
+- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration.
+- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables.
+
+Example CLI task
+----------------
+
+.. code-block:: yaml
+
+ - name: Backup current switch config (icx)
+ community.network.icx_config:
+ backup: yes
+ register: backup_icx_location
+ when: ansible_network_os == 'community.network.icx'
+
+.. include:: shared_snippets/SSH_warning.txt
+
+.. seealso::
+
+ :ref:`timeout_options`
diff --git a/docs/docsite/rst/network/user_guide/platform_index.rst b/docs/docsite/rst/network/user_guide/platform_index.rst
new file mode 100644
index 00000000..ad372de3
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/platform_index.rst
@@ -0,0 +1,121 @@
+.. _platform_options:
+
+****************
+Platform Options
+****************
+
+Some Ansible Network platforms support multiple connection types, privilege escalation (``enable`` mode), or other options. The pages in this section offer standardized guides to understanding available options on each network platform. We welcome contributions from community-maintained platforms to this section.
+
+.. toctree::
+ :maxdepth: 2
+ :caption: Platform Options
+
+ platform_ce
+ platform_cnos
+ platform_dellos6
+ platform_dellos9
+ platform_dellos10
+ platform_enos
+ platform_eos
+ platform_eric_eccli
+ platform_exos
+ platform_frr
+ platform_icx
+ platform_ios
+ platform_iosxr
+ platform_ironware
+ platform_junos
+ platform_meraki
+ platform_netvisor
+ platform_nos
+ platform_nxos
+ platform_routeros
+ platform_slxos
+ platform_voss
+ platform_vyos
+ platform_netconf_enabled
+
+.. _settings_by_platform:
+
+Settings by Platform
+================================
+
+.. raw:: html
+
+ <style>
+ /* Style for this single table. Add delimiters between header columns */
+ table#network-platform-table thead tr th.head {
+ border-left-width: 1px;
+ border-left-color: rgb(225, 228, 229);
+ border-left-style: solid;
+ }
+ </style>
+
+.. table::
+ :name: network-platform-table
+
+ =============================== ================================ =========== ======= ======= ===========
+ .. ``ansible_connection:`` settings available
+ ----------------------------------------------------------------- ------------------------------------------
+ Network OS ``ansible_network_os:`` network_cli netconf httpapi local
+ =============================== ================================ =========== ======= ======= ===========
+ `Arista EOS`_ `[†]`_ ``arista.eos.eos`` ✓ ✓ ✓
+ `Ciena SAOS6`_ ``ciena.saos6.saos6`` ✓ ✓
+ `Cisco ASA`_ `[†]`_ ``cisco.asa.asa`` ✓ ✓
+ `Cisco IOS`_ `[†]`_ ``cisco.ios.ios`` ✓ ✓
+ `Cisco IOS XR`_ `[†]`_ ``cisco.iosxr.iosxr`` ✓ ✓
+ `Cisco NX-OS`_ `[†]`_ ``cisco.nxos.nxos`` ✓ ✓ ✓
+ `Cloudengine OS`_ ``community.network.ce`` ✓ ✓ ✓
+ `Dell OS6`_ ``dellemc.os6.os6`` ✓ ✓
+ `Dell OS9`_ ``dellemc.os9.os9`` ✓ ✓
+ `Dell OS10`_ ``dellemc.os10.0s10`` ✓ ✓
+ `Ericsson ECCLI`_ ``community.network.eric_eccli`` ✓ ✓
+ `Extreme EXOS`_ ``community.network.exos`` ✓ ✓
+ `Extreme IronWare`_ ``community.network.ironware`` ✓ ✓
+ `Extreme NOS`_ ``community.network.nos`` ✓
+ `Extreme SLX-OS`_ ``community.network.slxos`` ✓
+ `Extreme VOSS`_ ``community.network.voss`` ✓
+ `F5 BIG-IP`_ ✓
+ `F5 BIG-IQ`_ ✓
+ `Junos OS`_ `[†]`_ ``junipernetworks.junos.junos`` ✓ ✓ ✓
+ `Lenovo CNOS`_ ``community.network.cnos`` ✓ ✓
+ `Lenovo ENOS`_ ``community.network.enos`` ✓ ✓
+ `Meraki`_ ✓
+ `MikroTik RouterOS`_ ``community.network.routeros`` ✓
+ `Nokia SR OS`_ ✓
+ `Pluribus Netvisor`_ ``community.network.netvisor`` ✓
+ `Ruckus ICX`_ ``community.network.icx`` ✓
+ `VyOS`_ `[†]`_ ``vyos.vyos.vyos`` ✓ ✓
+ OS that supports Netconf `[†]`_ ``<network-os>`` ✓ ✓
+ =============================== ================================ =========== ======= ======= ===========
+
+.. _Arista EOS: https://galaxy.ansible.com/arista/eos
+.. _Ciena SAOS6: https://galaxy.ansible.com/ciena/saos6
+.. _Cisco ASA: https://galaxy.ansible.com/cisco/asa
+.. _Cisco IOS: https://galaxy.ansible.com/cisco/ios
+.. _Cisco IOS XR: https://galaxy.ansible.com/cisco/iosxr
+.. _Cisco NX-OS: https://galaxy.ansible.com/cisco/nxos
+.. _Cloudengine OS: https://galaxy.ansible.com/community/network
+.. _Dell OS6: https://github.com/ansible-collections/dellemc.os6
+.. _Dell OS9: https://github.com/ansible-collections/dellemc.os9
+.. _Dell OS10: https://galaxy.ansible.com/dellemc/os10
+.. _Ericsson ECCLI: https://galaxy.ansible.com/community/network
+.. _Extreme EXOS: https://galaxy.ansible.com/community/network
+.. _Extreme IronWare: https://galaxy.ansible.com/community/network
+.. _Extreme NOS: https://galaxy.ansible.com/community/network
+.. _Extreme SLX-OS: https://galaxy.ansible.com/community/network
+.. _Extreme VOSS: https://galaxy.ansible.com/community/network
+.. _F5 BIG-IP: https://galaxy.ansible.com/f5networks/f5_modules
+.. _F5 BIG-IQ: https://galaxy.ansible.com/f5networks/f5_modules
+.. _Junos OS: https://galaxy.ansible.com/junipernetworks/junos
+.. _Lenovo CNOS: https://galaxy.ansible.com/community/network
+.. _Lenovo ENOS: https://galaxy.ansible.com/community/network
+.. _Meraki: https://galaxy.ansible.com/cisco/meraki
+.. _MikroTik RouterOS: https://galaxy.ansible.com/community/network
+.. _Nokia SR OS: https://galaxy.ansible.com/community/network
+.. _Pluribus Netvisor: https://galaxy.ansible.com/community/network
+.. _Ruckus ICX: https://galaxy.ansible.com/community/network
+.. _VyOS: https://galaxy.ansible.com/vyos/vyos
+.. _`[†]`:
+
+**[†]** Maintained by Ansible Network Team
diff --git a/docs/docsite/rst/network/user_guide/platform_ios.rst b/docs/docsite/rst/network/user_guide/platform_ios.rst
new file mode 100644
index 00000000..1c53a5ca
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/platform_ios.rst
@@ -0,0 +1,79 @@
+.. _ios_platform_options:
+
+***************************************
+IOS Platform Options
+***************************************
+
+The `Cisco IOS <https://galaxy.ansible.com/cisco/ios>`_ collection supports Enable Mode (Privilege Escalation). This page offers details on how to use Enable Mode on IOS in Ansible.
+
+.. contents::
+ :local:
+
+Connections available
+================================================================================
+
+.. table::
+ :class: documentation-table
+
+ ==================== ==========================================
+ .. CLI
+ ==================== ==========================================
+ Protocol SSH
+
+ Credentials uses SSH keys / SSH-agent if present
+
+ accepts ``-u myuser -k`` if using password
+
+ Indirect Access via a bastion (jump host)
+
+ Connection Settings ``ansible_connection: ansible.netcommon.network_cli``
+
+ |enable_mode| supported: use ``ansible_become: yes`` with
+ ``ansible_become_method: enable`` and ``ansible_become_password:``
+
+ Returned Data Format ``stdout[0].``
+ ==================== ==========================================
+
+.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation)
+
+
+The ``ansible_connection: local`` has been deprecated. Please use ``ansible_connection: ansible.netcommon.network_cli`` instead.
+
+Using CLI in Ansible
+====================
+
+Example CLI ``group_vars/ios.yml``
+----------------------------------
+
+.. code-block:: yaml
+
+ ansible_connection: ansible.netcommon.network_cli
+ ansible_network_os: cisco.ios.ios
+ ansible_user: myuser
+ ansible_password: !vault...
+ ansible_become: yes
+ ansible_become_method: enable
+ ansible_become_password: !vault...
+ ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"'
+
+
+- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration.
+- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration.
+- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables.
+
+Example CLI task
+----------------
+
+.. code-block:: yaml
+
+ - name: Backup current switch config (ios)
+ cisco.ios.ios_config:
+ backup: yes
+ register: backup_ios_location
+ when: ansible_network_os == 'cisco.ios.ios'
+
+.. include:: shared_snippets/SSH_warning.txt
+
+.. seealso::
+
+ :ref:`timeout_options`
diff --git a/docs/docsite/rst/network/user_guide/platform_iosxr.rst b/docs/docsite/rst/network/user_guide/platform_iosxr.rst
new file mode 100644
index 00000000..1e1eab27
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/platform_iosxr.rst
@@ -0,0 +1,130 @@
+.. _iosxr_platform_options:
+
+***************************************
+IOS-XR Platform Options
+***************************************
+
+The `Cisco IOS-XR collection <https://galaxy.ansible.com/cisco/iosxr>`_ supports multiple connections. This page offers details on how each connection works in Ansible and how to use it.
+
+.. contents::
+ :local:
+
+Connections available
+================================================================================
+
+.. table::
+ :class: documentation-table
+
+ ==================== ========================================== =========================
+ .. CLI NETCONF
+
+ only for modules ``iosxr_banner``,
+ ``iosxr_interface``, ``iosxr_logging``,
+ ``iosxr_system``, ``iosxr_user``
+ ==================== ========================================== =========================
+ Protocol SSH XML over SSH
+
+ Credentials uses SSH keys / SSH-agent if present uses SSH keys / SSH-agent if present
+
+ accepts ``-u myuser -k`` if using password accepts ``-u myuser -k`` if using password
+
+ Indirect Access via a bastion (jump host) via a bastion (jump host)
+
+ Connection Settings ``ansible_connection:`` ``ansible_connection:``
+ ``ansible.netcommon.network_cli`` ``ansible.netcommon.netconf``
+
+ |enable_mode| not supported not supported
+
+ Returned Data Format Refer to individual module documentation Refer to individual module documentation
+ ==================== ========================================== =========================
+
+.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation)
+
+
+The ``ansible_connection: local`` has been deprecated. Please use ``ansible_connection: ansible.netcommon.network_cli`` or ``ansible_connection: ansible.netcommon.netconf`` instead.
+
+Using CLI in Ansible
+====================
+
+Example CLI inventory ``[iosxr:vars]``
+--------------------------------------
+
+.. code-block:: yaml
+
+ [iosxr:vars]
+ ansible_connection=ansible.netcommon.network_cli
+ ansible_network_os=cisco.iosxr.iosxr
+ ansible_user=myuser
+ ansible_password=!vault...
+ ansible_ssh_common_args='-o ProxyCommand="ssh -W %h:%p -q bastion01"'
+
+
+- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration.
+- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration.
+- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables.
+
+Example CLI task
+----------------
+
+.. code-block:: yaml
+
+ - name: Retrieve IOS-XR version
+ cisco.iosxr.iosxr_command:
+ commands: show version
+ when: ansible_network_os == 'cisco.iosxr.iosxr'
+
+
+Using NETCONF in Ansible
+========================
+
+Enabling NETCONF
+----------------
+
+Before you can use NETCONF to connect to a switch, you must:
+
+- install the ``ncclient`` python package on your control node(s) with ``pip install ncclient``
+- enable NETCONF on the Cisco IOS-XR device(s)
+
+To enable NETCONF on a new switch via Ansible, use the ``cisco.iosxr.iosxr_netconf`` module through the CLI connection. Set up your platform-level variables just like in the CLI example above, then run a playbook task like this:
+
+.. code-block:: yaml
+
+ - name: Enable NETCONF
+ connection: ansible.netcommon.network_cli
+ cisco.iosxr.iosxr_netconf:
+ when: ansible_network_os == 'cisco.iosxr.iosxr'
+
+Once NETCONF is enabled, change your variables to use the NETCONF connection.
+
+Example NETCONF inventory ``[iosxr:vars]``
+------------------------------------------
+
+.. code-block:: yaml
+
+ [iosxr:vars]
+ ansible_connection=ansible.netcommon.netconf
+ ansible_network_os=cisco.iosxr.iosxr
+ ansible_user=myuser
+ ansible_password=!vault |
+ ansible_ssh_common_args='-o ProxyCommand="ssh -W %h:%p -q bastion01"'
+
+
+Example NETCONF task
+--------------------
+
+.. code-block:: yaml
+
+ - name: Configure hostname and domain-name
+ cisco.iosxr.iosxr_system:
+ hostname: iosxr01
+ domain_name: test.example.com
+ domain_search:
+ - ansible.com
+ - redhat.com
+ - cisco.com
+
+.. include:: shared_snippets/SSH_warning.txt
+
+.. seealso::
+
+ :ref:`timeout_options`
diff --git a/docs/docsite/rst/network/user_guide/platform_ironware.rst b/docs/docsite/rst/network/user_guide/platform_ironware.rst
new file mode 100644
index 00000000..a17141c4
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/platform_ironware.rst
@@ -0,0 +1,80 @@
+.. _ironware_platform_options:
+
+***************************************
+IronWare Platform Options
+***************************************
+
+IronWare is part of the `community.network <https://galaxy.ansible.com/community/network>`_ collection and supports Enable Mode (Privilege Escalation). This page offers details on how to use Enable Mode on IronWare in Ansible.
+
+.. contents::
+ :local:
+
+Connections available
+================================================================================
+
+.. table::
+ :class: documentation-table
+
+ ==================== ==========================================
+ .. CLI
+ ==================== ==========================================
+ Protocol SSH
+
+ Credentials uses SSH keys / SSH-agent if present
+
+ accepts ``-u myuser -k`` if using password
+
+ Indirect Access via a bastion (jump host)
+
+ Connection Settings ``ansible_connection: ansible.netcommon.network_cli``
+
+ |enable_mode| supported: use ``ansible_become: yes``
+ with ``ansible_become_method: enable``
+ and ``ansible_become_password:``
+
+ Returned Data Format ``stdout[0].``
+ ==================== ==========================================
+
+.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation)
+
+
+The ``ansible_connection: local`` has been deprecated. Please use ``ansible_connection: ansible.netcommon.network_cli`` instead.
+
+Using CLI in Ansible
+====================
+
+Example CLI ``group_vars/mlx.yml``
+----------------------------------
+
+.. code-block:: yaml
+
+ ansible_connection: ansible.netcommon.network_cli
+ ansible_network_os: community.network.ironware
+ ansible_user: myuser
+ ansible_password: !vault...
+ ansible_become: yes
+ ansible_become_method: enable
+ ansible_become_password: !vault...
+ ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"'
+
+
+- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration.
+- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration.
+- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables.
+
+Example CLI task
+----------------
+
+.. code-block:: yaml
+
+ - name: Backup current switch config (ironware)
+ community.network.ironware_config:
+ backup: yes
+ register: backup_ironware_location
+ when: ansible_network_os == 'community.network.ironware'
+
+.. include:: shared_snippets/SSH_warning.txt
+
+.. seealso::
+
+ :ref:`timeout_options`
diff --git a/docs/docsite/rst/network/user_guide/platform_junos.rst b/docs/docsite/rst/network/user_guide/platform_junos.rst
new file mode 100644
index 00000000..3b838103
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/platform_junos.rst
@@ -0,0 +1,129 @@
+.. _junos_platform_options:
+
+***************************************
+Junos OS Platform Options
+***************************************
+
+The `Juniper Junos OS <https://galaxy.ansible.com/junipernetworks/junos>`_ supports multiple connections. This page offers details on how each connection works in Ansible and how to use it.
+
+.. contents::
+ :local:
+
+Connections available
+================================================================================
+
+.. table::
+ :class: documentation-table
+
+ ==================== ========================================== =========================
+ .. CLI NETCONF
+
+ ``junos_netconf`` & ``junos_command`` all modules except ``junos_netconf``,
+ modules only which enables NETCONF
+ ==================== ========================================== =========================
+ Protocol SSH XML over SSH
+
+ Credentials uses SSH keys / SSH-agent if present uses SSH keys / SSH-agent if present
+
+ accepts ``-u myuser -k`` if using password accepts ``-u myuser -k`` if using password
+
+ Indirect Access via a bastion (jump host) via a bastion (jump host)
+
+ Connection Settings ``ansible_connection: ``ansible_connection:
+ ``ansible.netcommon.network_cli`` ``ansible.netcommon.netconf``
+
+ |enable_mode| not supported by Junos OS not supported by Junos OS
+
+ Returned Data Format ``stdout[0].`` * json: ``result[0]['software-information'][0]['host-name'][0]['data'] foo lo0``
+ * text: ``result[1].interface-information[0].physical-interface[0].name[0].data foo lo0``
+ * xml: ``result[1].rpc-reply.interface-information[0].physical-interface[0].name[0].data foo lo0``
+ ==================== ========================================== =========================
+
+.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation)
+
+
+The ``ansible_connection: local`` has been deprecated. Please use ``ansible_connection: ansible.netcommon.network_cli`` or ``ansible_connection: ansible.netcommon.netconf`` instead.
+
+Using CLI in Ansible
+====================
+
+Example CLI inventory ``[junos:vars]``
+--------------------------------------
+
+.. code-block:: yaml
+
+ [junos:vars]
+ ansible_connection=ansible.netcommon.network_cli
+ ansible_network_os=junipernetworks.junos.junos
+ ansible_user=myuser
+ ansible_password=!vault...
+ ansible_ssh_common_args='-o ProxyCommand="ssh -W %h:%p -q bastion01"'
+
+
+- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration.
+- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration.
+- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables.
+
+Example CLI task
+----------------
+
+.. code-block:: yaml
+
+ - name: Retrieve Junos OS version
+ junipernetworks.junos.junos_command:
+ commands: show version
+ when: ansible_network_os == 'junipernetworks.junos.junos'
+
+
+Using NETCONF in Ansible
+========================
+
+Enabling NETCONF
+----------------
+
+Before you can use NETCONF to connect to a switch, you must:
+
+- install the ``ncclient`` python package on your control node(s) with ``pip install ncclient``
+- enable NETCONF on the Junos OS device(s)
+
+To enable NETCONF on a new switch via Ansible, use the ``junipernetworks.junos.junos_netconf`` module through the CLI connection. Set up your platform-level variables just like in the CLI example above, then run a playbook task like this:
+
+.. code-block:: yaml
+
+ - name: Enable NETCONF
+ connection: ansible.netcommon.network_cli
+ junipernetworks.junos.junos_netconf:
+ when: ansible_network_os == 'junipernetworks.junos.junos'
+
+Once NETCONF is enabled, change your variables to use the NETCONF connection.
+
+Example NETCONF inventory ``[junos:vars]``
+------------------------------------------
+
+.. code-block:: yaml
+
+ [junos:vars]
+ ansible_connection=ansible.netcommon.netconf
+ ansible_network_os=junipernetworks.junos.junos
+ ansible_user=myuser
+ ansible_password=!vault |
+ ansible_ssh_common_args='-o ProxyCommand="ssh -W %h:%p -q bastion01"'
+
+
+Example NETCONF task
+--------------------
+
+.. code-block:: yaml
+
+ - name: Backup current switch config (junos)
+ junipernetworks.junos.junos_config:
+ backup: yes
+ register: backup_junos_location
+ when: ansible_network_os == 'junipernetworks.junos.junos'
+
+
+.. include:: shared_snippets/SSH_warning.txt
+
+.. seealso::
+
+ :ref:`timeout_options`
diff --git a/docs/docsite/rst/network/user_guide/platform_meraki.rst b/docs/docsite/rst/network/user_guide/platform_meraki.rst
new file mode 100644
index 00000000..e51ca5b9
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/platform_meraki.rst
@@ -0,0 +1,44 @@
+.. _meraki_platform_options:
+
+***************************************
+Meraki Platform Options
+***************************************
+
+The `cisco.meraki <https://galaxy.ansible.com/cisco/meraki>`_ collection only supports the ``local`` connection type at this time.
+
+.. contents::
+ :local:
+
+Connections available
+================================================================================
+
+.. table::
+ :class: documentation-table
+
+ ==================== ==========================================
+ .. Dashboard API
+ ==================== ==========================================
+ Protocol HTTP(S)
+
+ Credentials uses API key from Dashboard
+
+ Connection Settings ``ansible_connection: localhost``
+
+ Returned Data Format ``data.``
+ ==================== ==========================================
+
+
+Example Meraki task
+-------------------
+
+.. code-block:: yaml
+
+ cisco.meraki.meraki_organization:
+ auth_key: abc12345
+ org_name: YourOrg
+ state: present
+ delegate_to: localhost
+
+.. seealso::
+
+ :ref:`timeout_options`
diff --git a/docs/docsite/rst/network/user_guide/platform_netconf_enabled.rst b/docs/docsite/rst/network/user_guide/platform_netconf_enabled.rst
new file mode 100644
index 00000000..6169b076
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/platform_netconf_enabled.rst
@@ -0,0 +1,133 @@
+.. _netconf_enabled_platform_options:
+
+***************************************
+Netconf enabled Platform Options
+***************************************
+
+This page offers details on how the netconf connection works in Ansible and how to use it.
+
+.. contents::
+ :local:
+
+Connections available
+================================================================================
+.. table::
+ :class: documentation-table
+
+ ==================== ==========================================
+ .. NETCONF
+
+ all modules except ``junos_netconf``,
+ which enables NETCONF
+ ==================== ==========================================
+ Protocol XML over SSH
+
+ Credentials uses SSH keys / SSH-agent if present
+
+ accepts ``-u myuser -k`` if using password
+
+ Indirect Access via a bastion (jump host)
+
+ Connection Settings ``ansible_connection: ansible.netcommon.netconf``
+ ==================== ==========================================
+
+
+The ``ansible_connection: local`` has been deprecated. Please use ``ansible_connection: ansible.netcommon.netconf`` instead.
+
+Using NETCONF in Ansible
+========================
+
+Enabling NETCONF
+----------------
+
+Before you can use NETCONF to connect to a switch, you must:
+
+- install the ``ncclient`` Python package on your control node(s) with ``pip install ncclient``
+- enable NETCONF on the Junos OS device(s)
+
+To enable NETCONF on a new switch via Ansible, use the platform specific module via the CLI connection or set it manually.
+For example set up your platform-level variables just like in the CLI example above, then run a playbook task like this:
+
+.. code-block:: yaml
+
+ - name: Enable NETCONF
+ connection: ansible.netcommon.network_cli
+ junipernetworks.junos.junos_netconf:
+ when: ansible_network_os == 'junipernetworks.junos.junos'
+
+Once NETCONF is enabled, change your variables to use the NETCONF connection.
+
+Example NETCONF inventory ``[junos:vars]``
+------------------------------------------
+
+.. code-block:: yaml
+
+ [junos:vars]
+ ansible_connection=ansible.netcommon.netconf
+ ansible_network_os=junipernetworks.junos.junos
+ ansible_user=myuser
+ ansible_password=!vault |
+
+
+Example NETCONF task
+--------------------
+
+.. code-block:: yaml
+
+ - name: Backup current switch config
+ junipernetworks.junos.netconf_config:
+ backup: yes
+ register: backup_junos_location
+
+Example NETCONF task with configurable variables
+------------------------------------------------
+
+.. code-block:: yaml
+
+ - name: configure interface while providing different private key file path
+ junipernetworks.junos.netconf_config:
+ backup: yes
+ register: backup_junos_location
+ vars:
+ ansible_private_key_file: /home/admin/.ssh/newprivatekeyfile
+
+Note: For netconf connection plugin configurable variables see :ref:`ansible.netcommon.netconf <ansible_collections.ansible.netcommon.netconf_connection>`.
+
+Bastion/Jumphost configuration
+------------------------------
+To use a jump host to connect to a NETCONF enabled device you must set the ``ANSIBLE_NETCONF_SSH_CONFIG`` environment variable.
+
+``ANSIBLE_NETCONF_SSH_CONFIG`` can be set to either:
+ - 1 or TRUE (to trigger the use of the default SSH config file ~/.ssh/config)
+ - The absolute path to a custom SSH config file.
+
+The SSH config file should look something like:
+
+.. code-block:: ini
+
+ Host *
+ proxycommand ssh -o StrictHostKeyChecking=no -W %h:%p jumphost-username@jumphost.fqdn.com
+ StrictHostKeyChecking no
+
+Authentication for the jump host must use key based authentication.
+
+You can either specify the private key used in the SSH config file:
+
+.. code-block:: ini
+
+ IdentityFile "/absolute/path/to/private-key.pem"
+
+Or you can use an ssh-agent.
+
+ansible_network_os auto-detection
+---------------------------------
+
+If ``ansible_network_os`` is not specified for a host, then Ansible will attempt to automatically detect what ``network_os`` plugin to use.
+
+``ansible_network_os`` auto-detection can also be triggered by using ``auto`` as the ``ansible_network_os``. (Note: Previously ``default`` was used instead of ``auto``).
+
+.. include:: shared_snippets/SSH_warning.txt
+
+.. seealso::
+
+ :ref:`timeout_options`
diff --git a/docs/docsite/rst/network/user_guide/platform_netvisor.rst b/docs/docsite/rst/network/user_guide/platform_netvisor.rst
new file mode 100644
index 00000000..57748658
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/platform_netvisor.rst
@@ -0,0 +1,78 @@
+.. _netvisor_platform_options:
+
+**********************************
+Pluribus NETVISOR Platform Options
+**********************************
+
+Pluribus NETVISOR Ansible is part of the `community.network <https://galaxy.ansible.com/community/network>`_ collection and only supports CLI connections today. ``httpapi`` modules may be added in future.
+This page offers details on how to use ``ansible.netcommon.network_cli`` on NETVISOR in Ansible.
+
+.. contents::
+ :local:
+
+Connections available
+================================================================================
+
+.. table::
+ :class: documentation-table
+
+ ==================== ==========================================
+ .. CLI
+ ==================== ==========================================
+ Protocol SSH
+
+ Credentials uses SSH keys / SSH-agent if present
+
+ accepts ``-u myuser -k`` if using password
+
+ Indirect Access via a bastion (jump host)
+
+ Connection Settings ``ansible_connection: ansible.netcommon.network_cli``
+
+ |enable_mode| not supported by NETVISOR
+
+ Returned Data Format ``stdout[0].``
+ ==================== ==========================================
+
+.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation)
+
+Pluribus NETVISOR does not support ``ansible_connection: local``. You must use ``ansible_connection: ansible.netcommon.network_cli``.
+
+Using CLI in Ansible
+====================
+
+Example CLI ``group_vars/netvisor.yml``
+---------------------------------------
+
+.. code-block:: yaml
+
+ ansible_connection: ansible.netcommon.network_cli
+ ansible_network_os: community.netcommon.netvisor
+ ansible_user: myuser
+ ansible_password: !vault...
+ ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"'
+
+
+- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration.
+- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration.
+- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables.
+
+Example CLI task
+----------------
+
+.. code-block:: yaml
+
+ - name: Create access list
+ community.network.pn_access_list:
+ pn_name: "foo"
+ pn_scope: "local"
+ state: "present"
+ register: acc_list
+ when: ansible_network_os == 'community.network.netvisor'
+
+
+.. include:: shared_snippets/SSH_warning.txt
+
+.. seealso::
+
+ :ref:`timeout_options`
diff --git a/docs/docsite/rst/network/user_guide/platform_nos.rst b/docs/docsite/rst/network/user_guide/platform_nos.rst
new file mode 100644
index 00000000..0ea3f529
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/platform_nos.rst
@@ -0,0 +1,76 @@
+.. _nos_platform_options:
+
+***************************************
+NOS Platform Options
+***************************************
+
+Extreme NOS is part of the `community.network <https://galaxy.ansible.com/community/network>`_ collection and only supports CLI connections today. ``httpapi`` modules may be added in future.
+This page offers details on how to use ``ansible.netcommon.network_cli`` on NOS in Ansible.
+
+.. contents::
+ :local:
+
+Connections available
+================================================================================
+
+.. table::
+ :class: documentation-table
+
+ ==================== ==========================================
+ .. CLI
+ ==================== ==========================================
+ Protocol SSH
+
+ Credentials uses SSH keys / SSH-agent if present
+
+ accepts ``-u myuser -k`` if using password
+
+ Indirect Access via a bastion (jump host)
+
+ Connection Settings ``ansible_connection: community.netcommon.network_cli``
+
+ |enable_mode| not supported by NOS
+
+ Returned Data Format ``stdout[0].``
+ ==================== ==========================================
+
+.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation)
+
+NOS does not support ``ansible_connection: local``. You must use ``ansible_connection: ansible.netcommon.network_cli``.
+
+Using CLI in Ansible
+====================
+
+Example CLI ``group_vars/nos.yml``
+----------------------------------
+
+.. code-block:: yaml
+
+ ansible_connection: ansible.netcommon.network_cli
+ ansible_network_os: community.network.nos
+ ansible_user: myuser
+ ansible_password: !vault...
+ ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"'
+
+
+- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration.
+- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration.
+- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables.
+
+Example CLI task
+----------------
+
+.. code-block:: yaml
+
+ - name: Get version information (nos)
+ community.network.nos_command:
+ commands: "show version"
+ register: show_ver
+ when: ansible_network_os == 'community.network.nos'
+
+
+.. include:: shared_snippets/SSH_warning.txt
+
+.. seealso::
+
+ :ref:`timeout_options`
diff --git a/docs/docsite/rst/network/user_guide/platform_nxos.rst b/docs/docsite/rst/network/user_guide/platform_nxos.rst
new file mode 100644
index 00000000..e698b2d9
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/platform_nxos.rst
@@ -0,0 +1,164 @@
+.. _nxos_platform_options:
+
+***************************************
+NXOS Platform Options
+***************************************
+
+The `Cisco NXOS <https://galaxy.ansible.com/cisco/nxos>`_ supports multiple connections. This page offers details on how each connection works in Ansible and how to use it.
+
+.. contents::
+ :local:
+
+Connections available
+================================================================================
+
+.. table::
+ :class: documentation-table
+
+ ==================== ========================================== =========================
+ .. CLI NX-API
+ ==================== ========================================== =========================
+ Protocol SSH HTTP(S)
+
+ Credentials uses SSH keys / SSH-agent if present uses HTTPS certificates if
+ present
+ accepts ``-u myuser -k`` if using password
+
+ Indirect Access via a bastion (jump host) via a web proxy
+
+ Connection Settings ``ansible_connection:`` ``ansible_connection:``
+ ``ansible.netcommon.network_cli`` ``ansible.netcommon.httpapi``
+
+ |enable_mode| supported: use ``ansible_become: yes`` not supported by NX-API
+ with ``ansible_become_method: enable``
+ and ``ansible_become_password:``
+
+ Returned Data Format ``stdout[0].`` ``stdout[0].messages[0].``
+ ==================== ========================================== =========================
+
+.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation) |br| supported as of 2.5.3
+
+
+The ``ansible_connection: local`` has been deprecated. Please use ``ansible_connection: ansible.netcommon.network_cli`` or ``ansible_connection: ansible.netcommon.httpapi`` instead.
+
+Using CLI in Ansible
+====================
+
+Example CLI ``group_vars/nxos.yml``
+-----------------------------------
+
+.. code-block:: yaml
+
+ ansible_connection: ansible.netcommon.network_cli
+ ansible_network_os: cisco.nxos.nxos
+ ansible_user: myuser
+ ansible_password: !vault...
+ ansible_become: yes
+ ansible_become_method: enable
+ ansible_become_password: !vault...
+ ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"'
+
+
+- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration.
+- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration.
+- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables.
+
+Example CLI task
+----------------
+
+.. code-block:: yaml
+
+ - name: Backup current switch config (nxos)
+ cisco.nxos.nxos_config:
+ backup: yes
+ register: backup_nxos_location
+ when: ansible_network_os == 'cisco.nxos.nxos'
+
+
+
+Using NX-API in Ansible
+=======================
+
+Enabling NX-API
+---------------
+
+Before you can use NX-API to connect to a switch, you must enable NX-API. To enable NX-API on a new switch via Ansible, use the ``nxos_nxapi`` module via the CLI connection. Set up group_vars/nxos.yml just like in the CLI example above, then run a playbook task like this:
+
+.. code-block:: yaml
+
+ - name: Enable NX-API
+ cisco.nxos.nxos_nxapi:
+ enable_http: yes
+ enable_https: yes
+ when: ansible_network_os == 'cisco.nxos.nxos'
+
+To find out more about the options for enabling HTTP/HTTPS and local http see the :ref:`nxos_nxapi <nxos_nxapi_module>` module documentation.
+
+Once NX-API is enabled, change your ``group_vars/nxos.yml`` to use the NX-API connection.
+
+Example NX-API ``group_vars/nxos.yml``
+--------------------------------------
+
+.. code-block:: yaml
+
+ ansible_connection: ansible.netcommon.httpapi
+ ansible_network_os: cisco.nxos.nxos
+ ansible_user: myuser
+ ansible_password: !vault...
+ proxy_env:
+ http_proxy: http://proxy.example.com:8080
+
+- If you are accessing your host directly (not through a web proxy) you can remove the ``proxy_env`` configuration.
+- If you are accessing your host through a web proxy using ``https``, change ``http_proxy`` to ``https_proxy``.
+
+
+Example NX-API task
+-------------------
+
+.. code-block:: yaml
+
+ - name: Backup current switch config (nxos)
+ cisco.nxos.nxos_config:
+ backup: yes
+ register: backup_nxos_location
+ environment: "{{ proxy_env }}"
+ when: ansible_network_os == 'cisco.nxos.nxos'
+
+In this example the ``proxy_env`` variable defined in ``group_vars`` gets passed to the ``environment`` option of the module used in the task.
+
+.. include:: shared_snippets/SSH_warning.txt
+
+Cisco Nexus platform support matrix
+===================================
+
+The following platforms and software versions have been certified by Cisco to work with this version of Ansible.
+
+.. table:: Platform / Software Minimum Requirements
+ :align: center
+
+ =================== =====================
+ Supported Platforms Minimum NX-OS Version
+ =================== =====================
+ Cisco Nexus N3k 7.0(3)I2(5) and later
+ Cisco Nexus N9k 7.0(3)I2(5) and later
+ Cisco Nexus N5k 7.3(0)N1(1) and later
+ Cisco Nexus N6k 7.3(0)N1(1) and later
+ Cisco Nexus N7k 7.3(0)D1(1) and later
+ =================== =====================
+
+.. table:: Platform Models
+ :align: center
+
+ ======== ==============================================
+ Platform Description
+ ======== ==============================================
+ N3k Support includes N30xx, N31xx and N35xx models
+ N5k Support includes all N5xxx models
+ N6k Support includes all N6xxx models
+ N7k Support includes all N7xxx models
+ N9k Support includes all N9xxx models
+ ======== ==============================================
+
+.. seealso::
+
+ :ref:`timeout_options`
diff --git a/docs/docsite/rst/network/user_guide/platform_routeros.rst b/docs/docsite/rst/network/user_guide/platform_routeros.rst
new file mode 100644
index 00000000..387db92d
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/platform_routeros.rst
@@ -0,0 +1,80 @@
+.. _routeros_platform_options:
+
+***************************************
+RouterOS Platform Options
+***************************************
+
+RouterOS is part of the `community.network <https://galaxy.ansible.com/community/network>`_ collection and only supports CLI connections today. ``httpapi`` modules may be added in future.
+This page offers details on how to use ``ansible.netcommon.network_cli`` on RouterOS in Ansible.
+
+.. contents::
+ :local:
+
+Connections available
+================================================================================
+
+.. table::
+ :class: documentation-table
+
+ ==================== ==========================================
+ .. CLI
+ ==================== ==========================================
+ Protocol SSH
+
+ Credentials uses SSH keys / SSH-agent if present
+
+ accepts ``-u myuser -k`` if using password
+
+ Indirect Access via a bastion (jump host)
+
+ Connection Settings ``ansible_connection: ansible.network.network_cli``
+
+ |enable_mode| not supported by RouterOS
+
+ Returned Data Format ``stdout[0].``
+ ==================== ==========================================
+
+.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation)
+
+
+RouterOS does not support ``ansible_connection: local``. You must use ``ansible_connection: ansible.netcommon.network_cli``.
+
+Using CLI in Ansible
+====================
+
+Example CLI ``group_vars/routeros.yml``
+---------------------------------------
+
+.. code-block:: yaml
+
+ ansible_connection: ansible.netcommon.network_cli
+ ansible_network_os: community.network.routeros
+ ansible_user: myuser
+ ansible_password: !vault...
+ ansible_become: yes
+ ansible_become_method: enable
+ ansible_become_password: !vault...
+ ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"'
+
+
+- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration.
+- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration.
+- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables.
+- If you are getting timeout errors you may want to add ``+cet1024w`` suffix to your username which will disable console colors, enable "dumb" mode, tell RouterOS not to try detecting terminal capabilities and set terminal width to 1024 columns. See article `Console login process <https://wiki.mikrotik.com/wiki/Manual:Console_login_process>`_ in MikroTik wiki for more information.
+
+Example CLI task
+----------------
+
+.. code-block:: yaml
+
+ - name: Display resource statistics (routeros)
+ community.network.routeros_command:
+ commands: /system resource print
+ register: routeros_resources
+ when: ansible_network_os == 'community.network.routeros'
+
+.. include:: shared_snippets/SSH_warning.txt
+
+.. seealso::
+
+ :ref:`timeout_options`
diff --git a/docs/docsite/rst/network/user_guide/platform_slxos.rst b/docs/docsite/rst/network/user_guide/platform_slxos.rst
new file mode 100644
index 00000000..f433599c
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/platform_slxos.rst
@@ -0,0 +1,77 @@
+.. _slxos_platform_options:
+
+***************************************
+SLX-OS Platform Options
+***************************************
+
+Extreme SLX-OS is part of the `community.network <https://galaxy.ansible.com/community/network>`_ collection and only supports CLI connections today. ``httpapi`` modules may be added in future.
+This page offers details on how to use ``ansible.netcommon.network_cli`` on SLX-OS in Ansible.
+
+.. contents::
+ :local:
+
+Connections available
+================================================================================
+
+.. table::
+ :class: documentation-table
+
+ ==================== ==========================================
+ .. CLI
+ ==================== ==========================================
+ Protocol SSH
+
+ Credentials uses SSH keys / SSH-agent if present
+
+ accepts ``-u myuser -k`` if using password
+
+ Indirect Access via a bastion (jump host)
+
+ Connection Settings ``ansible_connection: ansible.netcommon.network_cli``
+
+ |enable_mode| not supported by SLX-OS
+
+ Returned Data Format ``stdout[0].``
+ ==================== ==========================================
+
+.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation)
+
+
+SLX-OS does not support ``ansible_connection: local``. You must use ``ansible_connection: ansible.netcommon.network_cli``.
+
+Using CLI in Ansible
+====================
+
+Example CLI ``group_vars/slxos.yml``
+------------------------------------
+
+.. code-block:: yaml
+
+ ansible_connection: ansible.netcommon.network_cli
+ ansible_network_os: community.network.slxos
+ ansible_user: myuser
+ ansible_password: !vault...
+ ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"'
+
+
+- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration.
+- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration.
+- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables.
+
+Example CLI task
+----------------
+
+.. code-block:: yaml
+
+ - name: Backup current switch config (slxos)
+ community.network.slxos_config:
+ backup: yes
+ register: backup_slxos_location
+ when: ansible_network_os == 'community.network.slxos'
+
+
+.. include:: shared_snippets/SSH_warning.txt
+
+.. seealso::
+
+ :ref:`timeout_options`
diff --git a/docs/docsite/rst/network/user_guide/platform_voss.rst b/docs/docsite/rst/network/user_guide/platform_voss.rst
new file mode 100644
index 00000000..b532e224
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/platform_voss.rst
@@ -0,0 +1,78 @@
+.. _voss_platform_options:
+
+***************************************
+VOSS Platform Options
+***************************************
+
+Extreme VOSS is part of the `community.network <https://galaxy.ansible.com/community/network>`_ collection and only supports CLI connections today. This page offers details on how to
+use ``ansible.netcommon.network_cli`` on VOSS in Ansible.
+
+.. contents::
+ :local:
+
+Connections available
+================================================================================
+
+.. table::
+ :class: documentation-table
+
+ ==================== ==========================================
+ .. CLI
+ ==================== ==========================================
+ Protocol SSH
+
+ Credentials uses SSH keys / SSH-agent if present
+
+ accepts ``-u myuser -k`` if using password
+
+ Indirect Access via a bastion (jump host)
+
+ Connection Settings ``ansible_connection: ansible.netcommon.network_cli``
+
+ |enable_mode| supported: use ``ansible_become: yes``
+ with ``ansible_become_method: enable``
+
+ Returned Data Format ``stdout[0].``
+ ==================== ==========================================
+
+.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation)
+
+
+VOSS does not support ``ansible_connection: local``. You must use ``ansible_connection: ansible.netcommon.network_cli``.
+
+Using CLI in Ansible
+====================
+
+Example CLI ``group_vars/voss.yml``
+-----------------------------------
+
+.. code-block:: yaml
+
+ ansible_connection: ansible.netcommon.network_cli
+ ansible_network_os: community.network.voss
+ ansible_user: myuser
+ ansible_become: yes
+ ansible_become_method: enable
+ ansible_password: !vault...
+ ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"'
+
+
+- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration.
+- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration.
+- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables.
+
+Example CLI task
+----------------
+
+.. code-block:: yaml
+
+ - name: Retrieve VOSS info
+ community.network.voss_command:
+ commands: show sys-info
+ when: ansible_network_os == 'community.network.voss'
+
+.. include:: shared_snippets/SSH_warning.txt
+
+.. seealso::
+
+ :ref:`timeout_options`
diff --git a/docs/docsite/rst/network/user_guide/platform_vyos.rst b/docs/docsite/rst/network/user_guide/platform_vyos.rst
new file mode 100644
index 00000000..5ec00f7b
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/platform_vyos.rst
@@ -0,0 +1,74 @@
+.. _vyos_platform_options:
+
+***************************************
+VyOS Platform Options
+***************************************
+
+The `VyOS <https://galaxy.ansible.com/vyos/vyos>`_ collection supports the ``ansible.netcommon.network_cli`` connection type. This page offers details on connection options to manage VyOS using Ansible.
+
+.. contents::
+ :local:
+
+Connections available
+================================================================================
+
+.. table::
+ :class: documentation-table
+
+ ==================== ==========================================
+ .. CLI
+ ==================== ==========================================
+ Protocol SSH
+
+ Credentials uses SSH keys / SSH-agent if present
+
+ accepts ``-u myuser -k`` if using password
+
+ Indirect Access via a bastion (jump host)
+
+ Connection Settings ``ansible_connection: ansible.netcommon.network_cli``
+
+ |enable_mode| not supported
+
+ Returned Data Format Refer to individual module documentation
+ ==================== ==========================================
+
+.. |enable_mode| replace:: Enable Mode |br| (Privilege Escalation)
+
+
+The ``ansible_connection: local`` has been deprecated. Please use ``ansible_connection: ansible.netcommon.network_cli`` instead.
+
+Using CLI in Ansible
+====================
+
+Example CLI ``group_vars/vyos.yml``
+-----------------------------------
+
+.. code-block:: yaml
+
+ ansible_connection: ansible.netcommon.network_cli
+ ansible_network_os: vyos.vyos.vyos
+ ansible_user: myuser
+ ansible_password: !vault...
+ ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q bastion01"'
+
+
+- If you are using SSH keys (including an ssh-agent) you can remove the ``ansible_password`` configuration.
+- If you are accessing your host directly (not through a bastion/jump host) you can remove the ``ansible_ssh_common_args`` configuration.
+- If you are accessing your host through a bastion/jump host, you cannot include your SSH password in the ``ProxyCommand`` directive. To prevent secrets from leaking out (for example in ``ps`` output), SSH does not support providing passwords via environment variables.
+
+Example CLI task
+----------------
+
+.. code-block:: yaml
+
+ - name: Retrieve VyOS version info
+ vyos.vyos.vyos_command:
+ commands: show version
+ when: ansible_network_os == 'vyos.vyos.vyos'
+
+.. include:: shared_snippets/SSH_warning.txt
+
+.. seealso::
+
+ :ref:`timeout_options`
diff --git a/docs/docsite/rst/network/user_guide/shared_snippets/SSH_warning.txt b/docs/docsite/rst/network/user_guide/shared_snippets/SSH_warning.txt
new file mode 100644
index 00000000..27424f57
--- /dev/null
+++ b/docs/docsite/rst/network/user_guide/shared_snippets/SSH_warning.txt
@@ -0,0 +1,2 @@
+.. warning::
+ Never store passwords in plain text. We recommend using SSH keys to authenticate SSH connections. Ansible supports ssh-agent to manage your SSH keys. If you must use passwords to authenticate SSH connections, we recommend encrypting them with :ref:`Ansible Vault <playbooks_vault>`.
diff --git a/docs/docsite/rst/plugins/action.rst b/docs/docsite/rst/plugins/action.rst
new file mode 100644
index 00000000..93c4e4ba
--- /dev/null
+++ b/docs/docsite/rst/plugins/action.rst
@@ -0,0 +1,56 @@
+.. _action_plugins:
+
+Action Plugins
+==============
+
+.. contents::
+ :local:
+ :depth: 2
+
+Action plugins act in conjunction with :ref:`modules <working_with_modules>` to execute the actions required by playbook tasks.
+They usually execute automatically in the background doing prerequisite work before modules execute.
+
+The 'normal' action plugin is used for modules that do not already have an action plugin.
+
+.. _enabling_action:
+
+Enabling action plugins
+-----------------------
+
+You can enable a custom action plugin by either dropping it into the ``action_plugins`` directory adjacent to your play, inside a role, or by putting it in one of the action plugin directory sources configured in :ref:`ansible.cfg <ansible_configuration_settings>`.
+
+.. _using_action:
+
+Using action plugins
+--------------------
+
+Action plugin are executed by default when an associated module is used; no action is required.
+
+Plugin list
+-----------
+
+You cannot list action plugins directly, they show up as their counterpart modules:
+
+Use ``ansible-doc -l`` to see the list of available modules.
+Use ``ansible-doc <name>`` to see specific documentation and examples, this should note if the module has a corresponding action plugin.
+
+.. seealso::
+
+ :ref:`cache_plugins`
+ Ansible Cache plugins
+ :ref:`callback_plugins`
+ Ansible callback plugins
+ :ref:`connection_plugins`
+ Ansible connection plugins
+ :ref:`inventory_plugins`
+ Ansible inventory plugins
+ :ref:`shell_plugins`
+ Ansible Shell plugins
+ :ref:`strategy_plugins`
+ Ansible Strategy plugins
+ :ref:`vars_plugins`
+ Ansible Vars plugins
+ `User Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/plugins/become.rst b/docs/docsite/rst/plugins/become.rst
new file mode 100644
index 00000000..c710bcf6
--- /dev/null
+++ b/docs/docsite/rst/plugins/become.rst
@@ -0,0 +1,67 @@
+.. _become_plugins:
+
+Become Plugins
+==============
+
+.. contents::
+ :local:
+ :depth: 2
+
+.. versionadded:: 2.8
+
+Become plugins work to ensure that Ansible can use certain privilege escalation systems when running the basic
+commands to work with the target machine as well as the modules required to execute the tasks specified in
+the play.
+
+These utilities (``sudo``, ``su``, ``doas``, and so on) generally let you 'become' another user to execute a command
+with the permissions of that user.
+
+
+.. _enabling_become:
+
+Enabling Become Plugins
+-----------------------
+
+The become plugins shipped with Ansible are already enabled. Custom plugins can be added by placing
+them into a ``become_plugins`` directory adjacent to your play, inside a role, or by placing them in one of
+the become plugin directory sources configured in :ref:`ansible.cfg <ansible_configuration_settings>`.
+
+
+.. _using_become:
+
+Using Become Plugins
+--------------------
+
+In addition to the default configuration settings in :ref:`ansible_configuration_settings` or the
+``--become-method`` command line option, you can use the ``become_method`` keyword in a play or, if you need
+to be 'host specific', the connection variable ``ansible_become_method`` to select the plugin to use.
+
+You can further control the settings for each plugin via other configuration options detailed in the plugin
+themselves (linked below).
+
+.. _become_plugin_list:
+
+Plugin List
+-----------
+
+You can use ``ansible-doc -t become -l`` to see the list of available plugins.
+Use ``ansible-doc -t become <plugin name>`` to see specific documentation and examples.
+
+.. seealso::
+
+ :ref:`about_playbooks`
+ An introduction to playbooks
+ :ref:`inventory_plugins`
+ Ansible inventory plugins
+ :ref:`callback_plugins`
+ Ansible callback plugins
+ :ref:`playbooks_filters`
+ Jinja2 filter plugins
+ :ref:`playbooks_tests`
+ Jinja2 test plugins
+ :ref:`playbooks_lookups`
+ Jinja2 lookup plugins
+ `User Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/plugins/cache.rst b/docs/docsite/rst/plugins/cache.rst
new file mode 100644
index 00000000..a13c78db
--- /dev/null
+++ b/docs/docsite/rst/plugins/cache.rst
@@ -0,0 +1,140 @@
+.. _cache_plugins:
+
+Cache Plugins
+=============
+
+.. contents::
+ :local:
+ :depth: 2
+
+Cache plugins allow Ansible to store gathered facts or inventory source data without the performance hit of retrieving them from source.
+
+The default cache plugin is the :ref:`memory <memory_cache>` plugin, which only caches the data for the current execution of Ansible. Other plugins with persistent storage are available to allow caching the data across runs. Some of these cache plugins write to files, others write to databases.
+
+You can use different cache plugins for inventory and facts. If you enable inventory caching without setting an inventory-specific cache plugin, Ansible uses the fact cache plugin for both facts and inventory.
+
+.. _enabling_cache:
+
+Enabling Fact Cache Plugins
+---------------------------
+
+Fact caching is always enabled. However, only one fact cache plugin can be active at a time. You can select the cache plugin to use for fact caching in the Ansible configuration, either with an environment variable:
+
+.. code-block:: shell
+
+ export ANSIBLE_CACHE_PLUGIN=jsonfile
+
+or in the ``ansible.cfg`` file:
+
+.. code-block:: ini
+
+ [defaults]
+ fact_caching=redis
+
+If the cache plugin is in a collection use the fully qualified name:
+
+.. code-block:: ini
+
+ [defaults]
+ fact_caching = namespace.collection_name.cache_plugin_name
+
+To enable a custom cache plugin, save it in a ``cache_plugins`` directory adjacent to your play, inside a role, or in one of the directory sources configured in :ref:`ansible.cfg <ansible_configuration_settings>`.
+
+You also need to configure other settings specific to each plugin. Consult the individual plugin documentation or the Ansible :ref:`configuration <ansible_configuration_settings>` for more details.
+
+
+Enabling Inventory Cache Plugins
+--------------------------------
+
+Inventory caching is disabled by default. To cache inventory data, you must enable inventory caching and then select the specific cache plugin you want to use. Not all inventory plugins support caching, so check the documentation for the inventory plugin(s) you want to use. You can enable inventory caching with an environment variable:
+
+.. code-block:: shell
+
+ export ANSIBLE_INVENTORY_CACHE=True
+
+or in the ``ansible.cfg`` file:
+
+.. code-block:: ini
+
+ [inventory]
+ cache=True
+
+or if the inventory plugin accepts a YAML configuration source, in the configuration file:
+
+.. code-block:: yaml
+
+ # dev.aws_ec2.yaml
+ plugin: aws_ec2
+ cache: True
+
+Only one inventory cache plugin can be active at a time. You can set it with an environment variable:
+
+.. code-block:: shell
+
+ export ANSIBLE_INVENTORY_CACHE_PLUGIN=jsonfile
+
+or in the ansible.cfg file:
+
+.. code-block:: ini
+
+ [inventory]
+ cache_plugin=jsonfile
+
+or if the inventory plugin accepts a YAML configuration source, in the configuration file:
+
+.. code-block:: yaml
+
+ # dev.aws_ec2.yaml
+ plugin: aws_ec2
+ cache_plugin: jsonfile
+
+To cache inventory with a custom plugin in your plugin path, follow the :ref:`developer guide on cache plugins<developing_cache_plugins>`.
+
+To cache inventory with a cache plugin in a collection, use the FQCN:
+
+.. code-block:: ini
+
+ [inventory]
+ cache_plugin=collection_namespace.collection_name.cache_plugin
+
+If you enable caching for inventory plugins without selecting an inventory-specific cache plugin, Ansible falls back to caching inventory with the fact cache plugin you configured. Consult the individual inventory plugin documentation or the Ansible :ref:`configuration <ansible_configuration_settings>` for more details.
+
+.. Note: In Ansible 2.7 and earlier, inventory plugins can only use file-based cache plugins, such as jsonfile, pickle, and yaml.
+
+
+.. _using_cache:
+
+Using Cache Plugins
+-------------------
+
+Cache plugins are used automatically once they are enabled.
+
+
+.. _cache_plugin_list:
+
+Plugin List
+-----------
+
+You can use ``ansible-doc -t cache -l`` to see the list of available plugins.
+Use ``ansible-doc -t cache <plugin name>`` to see specific documentation and examples.
+
+.. seealso::
+
+ :ref:`action_plugins`
+ Ansible Action plugins
+ :ref:`callback_plugins`
+ Ansible callback plugins
+ :ref:`connection_plugins`
+ Ansible connection plugins
+ :ref:`inventory_plugins`
+ Ansible inventory plugins
+ :ref:`shell_plugins`
+ Ansible Shell plugins
+ :ref:`strategy_plugins`
+ Ansible Strategy plugins
+ :ref:`vars_plugins`
+ Ansible Vars plugins
+ `User Mailing List <https://groups.google.com/forum/#!forum/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `webchat.freenode.net <https://webchat.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/plugins/callback.rst b/docs/docsite/rst/plugins/callback.rst
new file mode 100644
index 00000000..a8de3de9
--- /dev/null
+++ b/docs/docsite/rst/plugins/callback.rst
@@ -0,0 +1,101 @@
+.. _callback_plugins:
+
+Callback Plugins
+================
+
+.. contents::
+ :local:
+ :depth: 2
+
+Callback plugins enable adding new behaviors to Ansible when responding to events.
+By default, callback plugins control most of the output you see when running the command line programs,
+but can also be used to add additional output, integrate with other tools and marshall the events to a storage backend.
+
+.. _callback_examples:
+
+Example callback plugins
+------------------------
+
+The :ref:`log_plays <log_plays_callback>` callback is an example of how to record playbook events to a log file,
+and the :ref:`mail <mail_callback>` callback sends email on playbook failures.
+
+The :ref:`say <say_callback>` callback responds with computer synthesized speech in relation to playbook events.
+
+.. _enabling_callbacks:
+
+Enabling callback plugins
+-------------------------
+
+You can activate a custom callback by either dropping it into a ``callback_plugins`` directory adjacent to your play, inside a role, or by putting it in one of the callback directory sources configured in :ref:`ansible.cfg <ansible_configuration_settings>`.
+
+Plugins are loaded in alphanumeric order. For example, a plugin implemented in a file named `1_first.py` would run before a plugin file named `2_second.py`.
+
+Most callbacks shipped with Ansible are disabled by default and need to be whitelisted in your :ref:`ansible.cfg <ansible_configuration_settings>` file in order to function. For example:
+
+.. code-block:: ini
+
+ #callback_whitelist = timer, mail, profile_roles, collection_namespace.collection_name.custom_callback
+
+Setting a callback plugin for ``ansible-playbook``
+--------------------------------------------------
+
+You can only have one plugin be the main manager of your console output. If you want to replace the default, you should define CALLBACK_TYPE = stdout in the subclass and then configure the stdout plugin in :ref:`ansible.cfg <ansible_configuration_settings>`. For example:
+
+.. code-block:: ini
+
+ stdout_callback = dense
+
+or for my custom callback:
+
+.. code-block:: ini
+
+ stdout_callback = mycallback
+
+This only affects :ref:`ansible-playbook` by default.
+
+Setting a callback plugin for ad-hoc commands
+---------------------------------------------
+
+The :ref:`ansible` ad hoc command specifically uses a different callback plugin for stdout,
+so there is an extra setting in :ref:`ansible_configuration_settings` you need to add to use the stdout callback defined above:
+
+.. code-block:: ini
+
+ [defaults]
+ bin_ansible_callbacks=True
+
+You can also set this as an environment variable:
+
+.. code-block:: shell
+
+ export ANSIBLE_LOAD_CALLBACK_PLUGINS=1
+
+
+.. _callback_plugin_list:
+
+Plugin list
+-----------
+
+You can use ``ansible-doc -t callback -l`` to see the list of available plugins.
+Use ``ansible-doc -t callback <plugin name>`` to see specific documents and examples.
+
+.. seealso::
+
+ :ref:`action_plugins`
+ Ansible Action plugins
+ :ref:`cache_plugins`
+ Ansible cache plugins
+ :ref:`connection_plugins`
+ Ansible connection plugins
+ :ref:`inventory_plugins`
+ Ansible inventory plugins
+ :ref:`shell_plugins`
+ Ansible Shell plugins
+ :ref:`strategy_plugins`
+ Ansible Strategy plugins
+ :ref:`vars_plugins`
+ Ansible Vars plugins
+ `User Mailing List <https://groups.google.com/forum/#!forum/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `webchat.freenode.net <https://webchat.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/plugins/cliconf.rst b/docs/docsite/rst/plugins/cliconf.rst
new file mode 100644
index 00000000..2de12dd5
--- /dev/null
+++ b/docs/docsite/rst/plugins/cliconf.rst
@@ -0,0 +1,47 @@
+.. _cliconf_plugins:
+
+Cliconf Plugins
+===============
+
+.. contents::
+ :local:
+ :depth: 2
+
+Cliconf plugins are abstractions over the CLI interface to network devices. They provide a standard interface for Ansible to execute tasks on those network devices.
+
+These plugins generally correspond one-to-one to network device platforms. Ansible loads the appropriate cliconf plugin automatically based on the ``ansible_network_os`` variable.
+
+.. _enabling_cliconf:
+
+Adding cliconf plugins
+-------------------------
+
+You can extend Ansible to support other network devices by dropping a custom plugin into the ``cliconf_plugins`` directory.
+
+.. _using_cliconf:
+
+Using cliconf plugins
+------------------------
+
+The cliconf plugin to use is determined automatically from the ``ansible_network_os`` variable. There should be no reason to override this functionality.
+
+Most cliconf plugins can operate without configuration. A few have additional options that can be set to affect how tasks are translated into CLI commands.
+
+Plugins are self-documenting. Each plugin should document its configuration options.
+
+.. _cliconf_plugin_list:
+
+Viewing cliconf plugins
+-----------------------
+
+These plugins have migrated to collections on `Ansible Galaxy <https://galaxy.ansible.com>`_. If you installed Ansible version 2.10 or later using ``pip``, you have access to several cliconf plugins. To list all available cliconf plugins on your control node, type ``ansible-doc -t cliconf -l``. To view plugin-specific documentation and examples, use ``ansible-doc -t cliconf``.
+
+
+.. seealso::
+
+ :ref:`Ansible for Network Automation<network_guide>`
+ An overview of using Ansible to automate networking devices.
+ `User Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible-network IRC chat channel
diff --git a/docs/docsite/rst/plugins/connection.rst b/docs/docsite/rst/plugins/connection.rst
new file mode 100644
index 00000000..0417526f
--- /dev/null
+++ b/docs/docsite/rst/plugins/connection.rst
@@ -0,0 +1,78 @@
+.. _connection_plugins:
+
+Connection Plugins
+==================
+
+.. contents::
+ :local:
+ :depth: 2
+
+Connection plugins allow Ansible to connect to the target hosts so it can execute tasks on them. Ansible ships with many connection plugins, but only one can be used per host at a time.
+
+By default, Ansible ships with several plugins. The most commonly used are the :ref:`paramiko SSH<paramiko_ssh_connection>`, native ssh (just called :ref:`ssh<ssh_connection>`), and :ref:`local<local_connection>` connection types. All of these can be used in playbooks and with :command:`/usr/bin/ansible` to decide how you want to talk to remote machines.
+
+The basics of these connection types are covered in the :ref:`getting started<intro_getting_started>` section.
+
+.. _ssh_plugins:
+
+``ssh`` plugins
+---------------
+
+Because ssh is the default protocol used in system administration and the protocol most used in Ansible, ssh options are included in the command line tools. See :ref:`ansible-playbook` for more details.
+
+.. _enabling_connection:
+
+Adding connection plugins
+-------------------------
+
+You can extend Ansible to support other transports (such as SNMP or message bus) by dropping a custom plugin
+into the ``connection_plugins`` directory.
+
+.. _using_connection:
+
+Using connection plugins
+------------------------
+
+You can set the connection plugin globally via :ref:`configuration<ansible_configuration_settings>`, at the command line (``-c``, ``--connection``), as a :ref:`keyword <playbook_keywords>` in your play, or by setting a :ref:`variable<behavioral_parameters>`, most often in your inventory.
+For example, for Windows machines you might want to set the :ref:`winrm <winrm_connection>` plugin as an inventory variable.
+
+Most connection plugins can operate with minimal configuration. By default they use the :ref:`inventory hostname<inventory_hostnames_lookup>` and defaults to find the target host.
+
+Plugins are self-documenting. Each plugin should document its configuration options. The following are connection variables common to most connection plugins:
+
+:ref:`ansible_host<magic_variables_and_hostvars>`
+ The name of the host to connect to, if different from the :ref:`inventory <intro_inventory>` hostname.
+:ref:`ansible_port<faq_setting_users_and_ports>`
+ The ssh port number, for :ref:`ssh <ssh_connection>` and :ref:`paramiko_ssh <paramiko_ssh_connection>` it defaults to 22.
+:ref:`ansible_user<faq_setting_users_and_ports>`
+ The default user name to use for log in. Most plugins default to the 'current user running Ansible'.
+
+Each plugin might also have a specific version of a variable that overrides the general version. For example, ``ansible_ssh_host`` for the :ref:`ssh <ssh_connection>` plugin.
+
+.. _connection_plugin_list:
+
+Plugin List
+-----------
+
+You can use ``ansible-doc -t connection -l`` to see the list of available plugins.
+Use ``ansible-doc -t connection <plugin name>`` to see detailed documentation and examples.
+
+
+.. seealso::
+
+ :ref:`Working with Playbooks<working_with_playbooks>`
+ An introduction to playbooks
+ :ref:`callback_plugins`
+ Ansible callback plugins
+ :ref:`Filters<playbooks_filters>`
+ Jinja2 filter plugins
+ :ref:`Tests<playbooks_tests>`
+ Jinja2 test plugins
+ :ref:`Lookups<playbooks_lookups>`
+ Jinja2 lookup plugins
+ :ref:`vars_plugins`
+ Ansible vars plugins
+ `User Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/plugins/httpapi.rst b/docs/docsite/rst/plugins/httpapi.rst
new file mode 100644
index 00000000..cf9b0bbf
--- /dev/null
+++ b/docs/docsite/rst/plugins/httpapi.rst
@@ -0,0 +1,72 @@
+.. _httpapi_plugins:
+
+Httpapi Plugins
+===============
+
+.. contents::
+ :local:
+ :depth: 2
+
+Httpapi plugins tell Ansible how to interact with a remote device's HTTP-based API and execute tasks on the
+device.
+
+Each plugin represents a particular dialect of API. Some are platform-specific (Arista eAPI, Cisco NXAPI), while others might be usable on a variety of platforms (RESTCONF). Ansible loads the appropriate httpapi plugin automatically based on the ``ansible_network_os`` variable.
+
+
+.. _enabling_httpapi:
+
+Adding httpapi plugins
+-------------------------
+
+You can extend Ansible to support other APIs by dropping a custom plugin into the ``httpapi_plugins`` directory. See :ref:`developing_plugins_httpapi` for details.
+
+.. _using_httpapi:
+
+Using httpapi plugins
+------------------------
+
+The httpapi plugin to use is determined automatically from the ``ansible_network_os`` variable.
+
+Most httpapi plugins can operate without configuration. Additional options may be defined by each plugin.
+
+Plugins are self-documenting. Each plugin should document its configuration options.
+
+
+The following sample playbook shows the httpapi plugin for an Arista network device, assuming an inventory variable set as ``ansible_network_os=eos`` for the httpapi plugin to trigger off:
+
+.. code-block:: yaml
+
+ - hosts: leaf01
+ connection: httpapi
+ gather_facts: false
+ tasks:
+
+ - name: type a simple arista command
+ eos_command:
+ commands:
+ - show version | json
+ register: command_output
+
+ - name: print command output to terminal window
+ debug:
+ var: command_output.stdout[0]["version"]
+
+See the full working example `on GitHub <https://github.com/network-automation/httpapi>`_.
+
+.. _httpapi_plugin_list:
+
+Viewing httpapi plugins
+-----------------------
+
+These plugins have migrated to collections on `Ansible Galaxy <https://galaxy.ansible.com>`_. If you installed Ansible version 2.10 or later using ``pip``, you have access to several httpapi plugins. To list all available httpapi plugins on your control node, type ``ansible-doc -t httpapi -l``. To view plugin-specific documentation and examples, use ``ansible-doc -t httpapi``.
+
+.. seealso::
+
+ :ref:`Ansible for Network Automation<network_guide>`
+ An overview of using Ansible to automate networking devices.
+ :ref:`Developing network modules<developing_modules_network>`
+ How to develop network modules.
+ `User Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible-network IRC chat channel
diff --git a/docs/docsite/rst/plugins/index.html b/docs/docsite/rst/plugins/index.html
new file mode 100644
index 00000000..a7eac856
--- /dev/null
+++ b/docs/docsite/rst/plugins/index.html
@@ -0,0 +1,4 @@
+<html>
+<head><noscript><meta http-equiv="refresh" content="0; url=plugins.html"></noscript></head>
+<body onload="window.location = 'plugins.html'>Redirecting to <a href='plugins.html'>plugins</a> page.</body>
+</html>
diff --git a/docs/docsite/rst/plugins/inventory.rst b/docs/docsite/rst/plugins/inventory.rst
new file mode 100644
index 00000000..562c1303
--- /dev/null
+++ b/docs/docsite/rst/plugins/inventory.rst
@@ -0,0 +1,162 @@
+.. _inventory_plugins:
+
+Inventory Plugins
+=================
+
+.. contents::
+ :local:
+ :depth: 2
+
+Inventory plugins allow users to point at data sources to compile the inventory of hosts that Ansible uses to target tasks, either using the ``-i /path/to/file`` and/or ``-i 'host1, host2'`` command line parameters or from other configuration sources.
+
+
+.. _enabling_inventory:
+
+Enabling inventory plugins
+--------------------------
+
+Most inventory plugins shipped with Ansible are enabled by default or can be used by with the ``auto`` plugin.
+
+In some circumstances, for example, if the inventory plugin does not use a YAML configuration file, you may need to enable the specific plugin. You can do this by setting ``enable_plugins`` in your :ref:`ansible.cfg <ansible_configuration_settings>` file in the ``[inventory]`` section. Modifying this will override the default list of enabled plugins. Here is the default list of enabled plugins that ships with Ansible:
+
+.. code-block:: ini
+
+ [inventory]
+ enable_plugins = host_list, script, auto, yaml, ini, toml
+
+If the plugin is in a collection, use the fully qualified name:
+
+.. code-block:: ini
+
+ [inventory]
+ enable_plugins = namespace.collection_name.inventory_plugin_name
+
+
+.. _using_inventory:
+
+Using inventory plugins
+-----------------------
+
+To use an inventory plugin, you must provide an inventory source. Most of the time this is a file containing host information or a YAML configuration file with options for the plugin. You can use the ``-i`` flag to provide inventory sources or configure a default inventory path.
+
+.. code-block:: bash
+
+ ansible hostname -i inventory_source -m ansible.builtin.ping
+
+To start using an inventory plugin with a YAML configuration source, create a file with the accepted filename schema documented for the plugin in question, then add ``plugin: plugin_name``. Use the fully qualified name if the plugin is in a collection.
+
+.. code-block:: yaml
+
+ # demo.aws_ec2.yml
+ plugin: amazon.aws.aws_ec2
+
+Each plugin should document any naming restrictions. In addition, the YAML config file must end with the extension ``yml`` or ``yaml`` to be enabled by default with the ``auto`` plugin (otherwise, see the section above on enabling plugins).
+
+After providing any required options, you can view the populated inventory with ``ansible-inventory -i demo.aws_ec2.yml --graph``:
+
+.. code-block:: text
+
+ @all:
+ |--@aws_ec2:
+ | |--ec2-12-345-678-901.compute-1.amazonaws.com
+ | |--ec2-98-765-432-10.compute-1.amazonaws.com
+ |--@ungrouped:
+
+If you are using an inventory plugin in a playbook-adjacent collection and want to test your setup with ``ansible-inventory``, use the ``--playbook-dir`` flag.
+
+Your inventory source might be a directory of inventory configuration files. The constructed inventory plugin only operates on those hosts already in inventory, so you may want the constructed inventory configuration parsed at a particular point (such as last). Ansible parses the directory recursively, alphabetically. You cannot configure the parsing approach, so name your files to make it work predictably. Inventory plugins that extend constructed features directly can work around that restriction by adding constructed options in addition to the inventory plugin options. Otherwise, you can use ``-i`` with multiple sources to impose a specific order, for example ``-i demo.aws_ec2.yml -i clouds.yml -i constructed.yml``.
+
+You can create dynamic groups using host variables with the constructed ``keyed_groups`` option. The option ``groups`` can also be used to create groups and ``compose`` creates and modifies host variables. Here is an aws_ec2 example utilizing constructed features:
+
+.. code-block:: yaml
+
+ # demo.aws_ec2.yml
+ plugin: amazon.aws.aws_ec2
+ regions:
+ - us-east-1
+ - us-east-2
+ keyed_groups:
+ # add hosts to tag_Name_value groups for each aws_ec2 host's tags.Name variable
+ - key: tags.Name
+ prefix: tag_Name_
+ separator: ""
+ groups:
+ # add hosts to the group development if any of the dictionary's keys or values is the word 'devel'
+ development: "'devel' in (tags|list)"
+ compose:
+ # set the ansible_host variable to connect with the private IP address without changing the hostname
+ ansible_host: private_ip_address
+
+Now the output of ``ansible-inventory -i demo.aws_ec2.yml --graph``:
+
+.. code-block:: text
+
+ @all:
+ |--@aws_ec2:
+ | |--ec2-12-345-678-901.compute-1.amazonaws.com
+ | |--ec2-98-765-432-10.compute-1.amazonaws.com
+ | |--...
+ |--@development:
+ | |--ec2-12-345-678-901.compute-1.amazonaws.com
+ | |--ec2-98-765-432-10.compute-1.amazonaws.com
+ |--@tag_Name_ECS_Instance:
+ | |--ec2-98-765-432-10.compute-1.amazonaws.com
+ |--@tag_Name_Test_Server:
+ | |--ec2-12-345-678-901.compute-1.amazonaws.com
+ |--@ungrouped
+
+If a host does not have the variables in the configuration above (in other words, ``tags.Name``, ``tags``, ``private_ip_address``), the host will not be added to groups other than those that the inventory plugin creates and the ``ansible_host`` host variable will not be modified.
+
+Inventory plugins that support caching can use the general settings for the fact cache defined in the ``ansible.cfg`` file's ``[defaults]`` section or define inventory-specific settings in the ``[inventory]`` section. Individual plugins can define plugin-specific cache settings in their config file:
+
+.. code-block:: yaml
+
+ # demo.aws_ec2.yml
+ plugin: amazon.aws.aws_ec2
+ cache: yes
+ cache_plugin: ansible.builtin.jsonfile
+ cache_timeout: 7200
+ cache_connection: /tmp/aws_inventory
+ cache_prefix: aws_ec2
+
+Here is an example of setting inventory caching with some fact caching defaults for the cache plugin used and the timeout in an ``ansible.cfg`` file:
+
+.. code-block:: ini
+
+ [defaults]
+ fact_caching = ansible.builtin.jsonfile
+ fact_caching_connection = /tmp/ansible_facts
+ cache_timeout = 3600
+
+ [inventory]
+ cache = yes
+ cache_connection = /tmp/ansible_inventory
+
+.. _inventory_plugin_list:
+
+Plugin List
+-----------
+
+You can use ``ansible-doc -t inventory -l`` to see the list of available plugins.
+Use ``ansible-doc -t inventory <plugin name>`` to see plugin-specific documentation and examples.
+
+.. seealso::
+
+ :ref:`about_playbooks`
+ An introduction to playbooks
+ :ref:`callback_plugins`
+ Ansible callback plugins
+ :ref:`connection_plugins`
+ Ansible connection plugins
+ :ref:`playbooks_filters`
+ Jinja2 filter plugins
+ :ref:`playbooks_tests`
+ Jinja2 test plugins
+ :ref:`playbooks_lookups`
+ Jinja2 lookup plugins
+ :ref:`vars_plugins`
+ Ansible vars plugins
+ `User Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/plugins/lookup.rst b/docs/docsite/rst/plugins/lookup.rst
new file mode 100644
index 00000000..31183b15
--- /dev/null
+++ b/docs/docsite/rst/plugins/lookup.rst
@@ -0,0 +1,158 @@
+.. _lookup_plugins:
+
+Lookup Plugins
+==============
+
+.. contents::
+ :local:
+ :depth: 2
+
+Lookup plugins are an Ansible-specific extension to the Jinja2 templating language. You can use lookup plugins to access data from outside sources (files, databases, key/value stores, APIs, and other services) within your playbooks. Like all :ref:`templating <playbooks_templating>`, lookups execute and are evaluated on the Ansible control machine. Ansible makes the data returned by a lookup plugin available using the standard templating system. You can use lookup plugins to load variables or templates with information from external sources.
+
+.. note::
+ - Lookups are executed with a working directory relative to the role or play,
+ as opposed to local tasks, which are executed relative the executed script.
+ - Pass ``wantlist=True`` to lookups to use in Jinja2 template "for" loops.
+
+.. warning::
+ - Some lookups pass arguments to a shell. When using variables from a remote/untrusted source, use the `|quote` filter to ensure safe usage.
+
+
+.. _enabling_lookup:
+
+Enabling lookup plugins
+-----------------------
+
+Ansible enables all lookup plugins it can find. You can activate a custom lookup by either dropping it into a ``lookup_plugins`` directory adjacent to your play, inside the ``plugins/lookup/`` directory of a collection you have installed, inside a standalone role, or in one of the lookup directory sources configured in :ref:`ansible.cfg <ansible_configuration_settings>`.
+
+
+.. _using_lookup:
+
+Using lookup plugins
+--------------------
+
+You can use lookup plugins anywhere you can use templating in Ansible: in a play, in variables file, or in a Jinja2 template for the :ref:`template <template_module>` module.
+
+.. code-block:: YAML+Jinja
+
+ vars:
+ file_contents: "{{lookup('file', 'path/to/file.txt')}}"
+
+Lookups are an integral part of loops. Wherever you see ``with_``, the part after the underscore is the name of a lookup. For this reason, most lookups output lists and take lists as input; for example, ``with_items`` uses the :ref:`items <items_lookup>` lookup::
+
+ tasks:
+ - name: count to 3
+ debug: msg={{item}}
+ with_items: [1, 2, 3]
+
+You can combine lookups with :ref:`filters <playbooks_filters>`, :ref:`tests <playbooks_tests>` and even each other to do some complex data generation and manipulation. For example::
+
+ tasks:
+ - name: valid but useless and over complicated chained lookups and filters
+ debug: msg="find the answer here:\n{{ lookup('url', 'https://google.com/search/?q=' + item|urlencode)|join(' ') }}"
+ with_nested:
+ - "{{lookup('consul_kv', 'bcs/' + lookup('file', '/the/question') + ', host=localhost, port=2000')|shuffle}}"
+ - "{{lookup('sequence', 'end=42 start=2 step=2')|map('log', 4)|list)}}"
+ - ['a', 'c', 'd', 'c']
+
+.. versionadded:: 2.6
+
+You can control how errors behave in all lookup plugins by setting ``errors`` to ``ignore``, ``warn``, or ``strict``. The default setting is ``strict``, which causes the task to fail if the lookup returns an error. For example:
+
+To ignore lookup errors::
+
+ - name: if this file does not exist, I do not care .. file plugin itself warns anyway ...
+ debug: msg="{{ lookup('file', '/nosuchfile', errors='ignore') }}"
+
+.. code-block:: ansible-output
+
+ [WARNING]: Unable to find '/nosuchfile' in expected paths (use -vvvvv to see paths)
+
+ ok: [localhost] => {
+ "msg": ""
+ }
+
+
+To get a warning instead of a failure::
+
+ - name: if this file does not exist, let me know, but continue
+ debug: msg="{{ lookup('file', '/nosuchfile', errors='warn') }}"
+
+.. code-block:: ansible-output
+
+ [WARNING]: Unable to find '/nosuchfile' in expected paths (use -vvvvv to see paths)
+
+ [WARNING]: An unhandled exception occurred while running the lookup plugin 'file'. Error was a <class 'ansible.errors.AnsibleError'>, original message: could not locate file in lookup: /nosuchfile
+
+ ok: [localhost] => {
+ "msg": ""
+ }
+
+
+To get a fatal error (the default)::
+
+ - name: if this file does not exist, FAIL (this is the default)
+ debug: msg="{{ lookup('file', '/nosuchfile', errors='strict') }}"
+
+.. code-block:: ansible-output
+
+ [WARNING]: Unable to find '/nosuchfile' in expected paths (use -vvvvv to see paths)
+
+ fatal: [localhost]: FAILED! => {"msg": "An unhandled exception occurred while running the lookup plugin 'file'. Error was a <class 'ansible.errors.AnsibleError'>, original message: could not locate file in lookup: /nosuchfile"}
+
+
+.. _query:
+
+Forcing lookups to return lists: ``query`` and ``wantlist=True``
+----------------------------------------------------------------
+
+.. versionadded:: 2.5
+
+In Ansible 2.5, a new Jinja2 function called ``query`` was added for invoking lookup plugins. The difference between ``lookup`` and ``query`` is largely that ``query`` will always return a list.
+The default behavior of ``lookup`` is to return a string of comma separated values. ``lookup`` can be explicitly configured to return a list using ``wantlist=True``.
+
+This feature provides an easier and more consistent interface for interacting with the new ``loop`` keyword, while maintaining backwards compatibility with other uses of ``lookup``.
+
+The following examples are equivalent:
+
+.. code-block:: jinja
+
+ lookup('dict', dict_variable, wantlist=True)
+
+ query('dict', dict_variable)
+
+As demonstrated above, the behavior of ``wantlist=True`` is implicit when using ``query``.
+
+Additionally, ``q`` was introduced as a shortform of ``query``:
+
+.. code-block:: jinja
+
+ q('dict', dict_variable)
+
+
+.. _lookup_plugins_list:
+
+Plugin list
+-----------
+
+You can use ``ansible-doc -t lookup -l`` to see the list of available plugins. Use ``ansible-doc -t lookup <plugin name>`` to see specific documents and examples.
+
+
+.. seealso::
+
+ :ref:`about_playbooks`
+ An introduction to playbooks
+ :ref:`inventory_plugins`
+ Ansible inventory plugins
+ :ref:`callback_plugins`
+ Ansible callback plugins
+ :ref:`playbooks_filters`
+ Jinja2 filter plugins
+ :ref:`playbooks_tests`
+ Jinja2 test plugins
+ :ref:`playbooks_lookups`
+ Jinja2 lookup plugins
+ `User Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/plugins/netconf.rst b/docs/docsite/rst/plugins/netconf.rst
new file mode 100644
index 00000000..fef2aeb3
--- /dev/null
+++ b/docs/docsite/rst/plugins/netconf.rst
@@ -0,0 +1,47 @@
+.. _netconf_plugins:
+
+Netconf Plugins
+===============
+
+.. contents::
+ :local:
+ :depth: 2
+
+Netconf plugins are abstractions over the Netconf interface to network devices. They provide a standard interface for Ansible to execute tasks on those network devices.
+
+These plugins generally correspond one-to-one to network device platforms. Ansible loads the appropriate netconf plugin automatically based on the ``ansible_network_os`` variable. If the platform supports standard Netconf implementation as defined in the Netconf RFC specification, Ansible loads the ``default`` netconf plugin. If the platform supports propriety Netconf RPCs, Ansible loads the platform-specific netconf plugin.
+
+.. _enabling_netconf:
+
+Adding netconf plugins
+-------------------------
+
+You can extend Ansible to support other network devices by dropping a custom plugin into the ``netconf_plugins`` directory.
+
+.. _using_netconf:
+
+Using netconf plugins
+------------------------
+
+The netconf plugin to use is determined automatically from the ``ansible_network_os`` variable. There should be no reason to override this functionality.
+
+Most netconf plugins can operate without configuration. A few have additional options that can be set to affect how tasks are translated into netconf commands. A ncclient device specific handler name can be set in the netconf plugin or else the value of ``default`` is used as per ncclient device handler.
+
+Plugins are self-documenting. Each plugin should document its configuration options.
+
+.. _netconf_plugin_list:
+
+Listing netconf plugins
+-----------------------
+
+These plugins have migrated to collections on `Ansible Galaxy <https://galaxy.ansible.com>`_. If you installed Ansible version 2.10 or later using ``pip``, you have access to several netconf plugins. To list all available netconf plugins on your control node, type ``ansible-doc -t netconf -l``. To view plugin-specific documentation and examples, use ``ansible-doc -t netconf``.
+
+
+.. seealso::
+
+ :ref:`Ansible for Network Automation<network_guide>`
+ An overview of using Ansible to automate networking devices.
+ `User Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible-network IRC chat channel
diff --git a/docs/docsite/rst/plugins/plugins.rst b/docs/docsite/rst/plugins/plugins.rst
new file mode 100644
index 00000000..4dee6c6a
--- /dev/null
+++ b/docs/docsite/rst/plugins/plugins.rst
@@ -0,0 +1,44 @@
+.. _plugins_lookup:
+
+********************
+Working With Plugins
+********************
+
+Plugins are pieces of code that augment Ansible's core functionality. Ansible uses a plugin architecture to enable a rich, flexible and expandable feature set.
+
+Ansible ships with a number of handy plugins, and you can easily write your own.
+
+This section covers the various types of plugins that are included with Ansible:
+
+.. toctree::
+ :maxdepth: 1
+
+ action
+ become
+ cache
+ callback
+ cliconf
+ connection
+ httpapi
+ inventory
+ lookup
+ netconf
+ shell
+ strategy
+ vars
+ ../user_guide/playbooks_filters
+ ../user_guide/playbooks_tests
+ ../user_guide/plugin_filtering_config
+
+.. seealso::
+
+ :ref:`about_playbooks`
+ An introduction to playbooks
+ :ref:`ansible_configuration_settings`
+ Ansible configuration documentation and settings
+ :ref:`command_line_tools`
+ Ansible tools, description and options
+ `User Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/plugins/shell.rst b/docs/docsite/rst/plugins/shell.rst
new file mode 100644
index 00000000..b0846323
--- /dev/null
+++ b/docs/docsite/rst/plugins/shell.rst
@@ -0,0 +1,53 @@
+.. _shell_plugins:
+
+Shell Plugins
+=============
+
+.. contents::
+ :local:
+ :depth: 2
+
+Shell plugins work to ensure that the basic commands Ansible runs are properly formatted to work with
+the target machine and allow the user to configure certain behaviors related to how Ansible executes tasks.
+
+.. _enabling_shell:
+
+Enabling shell plugins
+----------------------
+
+You can add a custom shell plugin by dropping it into a ``shell_plugins`` directory adjacent to your play, inside a role,
+or by putting it in one of the shell plugin directory sources configured in :ref:`ansible.cfg <ansible_configuration_settings>`.
+
+.. warning:: You should not alter which plugin is used unless you have a setup in which the default ``/bin/sh``
+ is not a POSIX compatible shell or is not available for execution.
+
+.. _using_shell:
+
+Using shell plugins
+-------------------
+
+In addition to the default configuration settings in :ref:`ansible_configuration_settings`, you can use
+the connection variable :ref:`ansible_shell_type <ansible_shell_type>` to select the plugin to use.
+In this case, you will also want to update the :ref:`ansible_shell_executable <ansible_shell_executable>` to match.
+
+You can further control the settings for each plugin via other configuration options
+detailed in the plugin themselves (linked below).
+
+.. seealso::
+
+ :ref:`about_playbooks`
+ An introduction to playbooks
+ :ref:`inventory_plugins`
+ Ansible inventory plugins
+ :ref:`callback_plugins`
+ Ansible callback plugins
+ :ref:`playbooks_filters`
+ Jinja2 filter plugins
+ :ref:`playbooks_tests`
+ Jinja2 test plugins
+ :ref:`playbooks_lookups`
+ Jinja2 lookup plugins
+ `User Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/plugins/strategy.rst b/docs/docsite/rst/plugins/strategy.rst
new file mode 100644
index 00000000..e3623329
--- /dev/null
+++ b/docs/docsite/rst/plugins/strategy.rst
@@ -0,0 +1,79 @@
+.. _strategy_plugins:
+
+Strategy Plugins
+================
+
+.. contents::
+ :local:
+ :depth: 2
+
+Strategy plugins control the flow of play execution by handling task and host scheduling.
+
+.. _enable_strategy:
+
+Enabling strategy plugins
+-------------------------
+
+All strategy plugins shipped with Ansible are enabled by default. You can enable a custom strategy plugin by
+putting it in one of the lookup directory sources configured in :ref:`ansible.cfg <ansible_configuration_settings>`.
+
+.. _using_strategy:
+
+Using strategy plugins
+----------------------
+
+Only one strategy plugin can be used in a play, but you can use different ones for each play in a playbook or ansible run.
+The default is the :ref:`linear <linear_strategy>` plugin. You can change this default in Ansible :ref:`configuration <ansible_configuration_settings>` using an environment variable:
+
+.. code-block:: shell
+
+ export ANSIBLE_STRATEGY=free
+
+or in the `ansible.cfg` file:
+
+.. code-block:: ini
+
+ [defaults]
+ strategy=linear
+
+You can also specify the strategy plugin in the play via the :ref:`strategy keyword <playbook_keywords>` in a play::
+
+ - hosts: all
+ strategy: debug
+ tasks:
+ - copy: src=myhosts dest=/etc/hosts
+ notify: restart_tomcat
+
+ - package: name=tomcat state=present
+
+ handlers:
+ - name: restart_tomcat
+ service: name=tomcat state=restarted
+
+.. _strategy_plugin_list:
+
+Plugin list
+-----------
+
+You can use ``ansible-doc -t strategy -l`` to see the list of available plugins.
+Use ``ansible-doc -t strategy <plugin name>`` to see plugin-specific specific documentation and examples.
+
+
+.. seealso::
+
+ :ref:`about_playbooks`
+ An introduction to playbooks
+ :ref:`inventory_plugins`
+ Ansible inventory plugins
+ :ref:`callback_plugins`
+ Ansible callback plugins
+ :ref:`playbooks_filters`
+ Jinja2 filter plugins
+ :ref:`playbooks_tests`
+ Jinja2 test plugins
+ :ref:`playbooks_lookups`
+ Jinja2 lookup plugins
+ `User Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/plugins/vars.rst b/docs/docsite/rst/plugins/vars.rst
new file mode 100644
index 00000000..c24bdb81
--- /dev/null
+++ b/docs/docsite/rst/plugins/vars.rst
@@ -0,0 +1,79 @@
+.. _vars_plugins:
+
+Vars Plugins
+============
+
+.. contents::
+ :local:
+ :depth: 2
+
+Vars plugins inject additional variable data into Ansible runs that did not come from an inventory source, playbook, or command line. Playbook constructs like 'host_vars' and 'group_vars' work using vars plugins.
+
+Vars plugins were partially implemented in Ansible 2.0 and rewritten to be fully implemented starting with Ansible 2.4.
+
+The :ref:`host_group_vars <host_group_vars_vars>` plugin shipped with Ansible enables reading variables from :ref:`host_variables` and :ref:`group_variables`.
+
+
+.. _enable_vars:
+
+Enabling vars plugins
+---------------------
+
+You can activate a custom vars plugin by either dropping it into a ``vars_plugins`` directory adjacent to your play, inside a role, or by putting it in one of the directory sources configured in :ref:`ansible.cfg <ansible_configuration_settings>`.
+
+Starting in Ansible 2.10, vars plugins can require whitelisting rather than running by default. To enable a plugin that requires whitelisting set ``vars_plugins_enabled`` in the ``defaults`` section of :ref:`ansible.cfg <ansible_configuration_settings>` or set the ``ANSIBLE_VARS_ENABLED`` environment variable to the list of vars plugins you want to execute. By default, the :ref:`host_group_vars <host_group_vars_vars>` plugin shipped with Ansible is whitelisted.
+
+Starting in Ansible 2.10, you can use vars plugins in collections. All vars plugins in collections require whitelisting and need to use the fully qualified collection name in the format ``namespace.collection_name.vars_plugin_name``.
+
+.. code-block:: yaml
+
+ [defaults]
+ vars_plugins_enabled = host_group_vars,namespace.collection_name.vars_plugin_name
+
+.. _using_vars:
+
+Using vars plugins
+------------------
+
+By default, vars plugins are used on demand automatically after they are enabled.
+
+Starting in Ansible 2.10, vars plugins can be made to run at specific times. `ansible-inventory` does not use these settings, and always loads vars plugins.
+
+The global setting ``RUN_VARS_PLUGINS`` can be set in ``ansible.cfg`` using ``run_vars_plugins`` in the ``defaults`` section or by the ``ANSIBLE_RUN_VARS_PLUGINS`` environment variable. The default option, ``demand``, runs any enabled vars plugins relative to inventory sources whenever variables are demanded by tasks. You can use the option ``start`` to run any enabled vars plugins relative to inventory sources after importing that inventory source instead.
+
+You can also control vars plugin execution on a per-plugin basis for vars plugins that support the ``stage`` option. To run the :ref:`host_group_vars <host_group_vars_vars>` plugin after importing inventory you can add the following to :ref:`ansible.cfg <ansible_configuration_settings>`:
+
+.. code-block:: ini
+
+ [vars_host_group_vars]
+ stage = inventory
+
+.. _vars_plugin_list:
+
+Plugin Lists
+------------
+
+You can use ``ansible-doc -t vars -l`` to see the list of available plugins.
+Use ``ansible-doc -t vars <plugin name>`` to see specific plugin-specific documentation and examples.
+
+
+.. seealso::
+
+ :ref:`action_plugins`
+ Ansible Action plugins
+ :ref:`cache_plugins`
+ Ansible Cache plugins
+ :ref:`callback_plugins`
+ Ansible callback plugins
+ :ref:`connection_plugins`
+ Ansible connection plugins
+ :ref:`inventory_plugins`
+ Ansible inventory plugins
+ :ref:`shell_plugins`
+ Ansible Shell plugins
+ :ref:`strategy_plugins`
+ Ansible Strategy plugins
+ `User Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/porting_guides/porting_guide_2.0.rst b/docs/docsite/rst/porting_guides/porting_guide_2.0.rst
new file mode 100644
index 00000000..00879a6b
--- /dev/null
+++ b/docs/docsite/rst/porting_guides/porting_guide_2.0.rst
@@ -0,0 +1,13 @@
+:orphan:
+
+.. _porting_2.0_guide:
+
+*************************
+Ansible 2.0 Porting Guide
+*************************
+
+Ansible Porting Guides are maintained in the ``devel`` branch only. Please go to `the devel Ansible 2.0 Porting guide <https://docs.ansible.com/ansible/devel/porting_guides/porting_guide_2.0.html>`_ for up to date information.
+
+.. note::
+
+ This link takes you to a different version of the Ansible documentation. Use the version selection on the left or your browser back button to return to this version of the documentation.
diff --git a/docs/docsite/rst/porting_guides/porting_guide_2.10.rst b/docs/docsite/rst/porting_guides/porting_guide_2.10.rst
new file mode 100644
index 00000000..3a8ba48b
--- /dev/null
+++ b/docs/docsite/rst/porting_guides/porting_guide_2.10.rst
@@ -0,0 +1,13 @@
+:orphan:
+
+.. _porting_2.10_guide:
+
+==========================
+Ansible 2.10 Porting Guide
+==========================
+
+Ansible Porting Guides are maintained in the ``devel`` branch only. Please go to `the devel Ansible 2.10 Porting guide <https://docs.ansible.com/ansible/devel/porting_guides/porting_guide_2.10.html>`_ for up to date information.
+
+.. note::
+
+ This link takes you to a different version of the Ansible documentation. Use the version selection on the left or your browser back button to return to this version of the documentation.
diff --git a/docs/docsite/rst/porting_guides/porting_guide_2.3.rst b/docs/docsite/rst/porting_guides/porting_guide_2.3.rst
new file mode 100644
index 00000000..0db4a98f
--- /dev/null
+++ b/docs/docsite/rst/porting_guides/porting_guide_2.3.rst
@@ -0,0 +1,12 @@
+:orphan:
+
+.. _porting_2.3_guide:
+
+*************************
+Ansible 2.3 Porting Guide
+*************************
+Ansible Porting Guides are maintained in the ``devel`` branch only. Please go to `the devel Ansible 2.3 Porting guide <https://docs.ansible.com/ansible/devel/porting_guides/porting_guide_2.3.html>`_ for up to date information.
+
+.. note::
+
+ This link takes you to a different version of the Ansible documentation. Use the version selection on the left or your browser back button to return to this version of the documentation.
diff --git a/docs/docsite/rst/porting_guides/porting_guide_2.4.rst b/docs/docsite/rst/porting_guides/porting_guide_2.4.rst
new file mode 100644
index 00000000..eb2d9954
--- /dev/null
+++ b/docs/docsite/rst/porting_guides/porting_guide_2.4.rst
@@ -0,0 +1,13 @@
+:orphan:
+
+.. _porting_2.4_guide:
+
+*************************
+Ansible 2.4 Porting Guide
+*************************
+
+Ansible Porting Guides are maintained in the ``devel`` branch only. Please go to `the devel Ansible 2.4 Porting guide <https://docs.ansible.com/ansible/devel/porting_guides/porting_guide_2.4.html>`_ for up to date information.
+
+.. note::
+
+ This link takes you to a different version of the Ansible documentation. Use the version selection on the left or your browser back button to return to this version of the documentation.
diff --git a/docs/docsite/rst/porting_guides/porting_guide_2.5.rst b/docs/docsite/rst/porting_guides/porting_guide_2.5.rst
new file mode 100644
index 00000000..439cbae0
--- /dev/null
+++ b/docs/docsite/rst/porting_guides/porting_guide_2.5.rst
@@ -0,0 +1,13 @@
+:orphan:
+
+.. _porting_2.5_guide:
+
+*************************
+Ansible 2.5 Porting Guide
+*************************
+
+Ansible Porting Guides are maintained in the ``devel`` branch only. Please go to `the devel Ansible 2.5 Porting guide <https://docs.ansible.com/ansible/devel/porting_guides/porting_guide_2.5.html>`_ for up to date information.
+
+.. note::
+
+ This link takes you to a different version of the Ansible documentation. Use the version selection on the left or your browser back button to return to this version of the documentation.
diff --git a/docs/docsite/rst/porting_guides/porting_guide_2.6.rst b/docs/docsite/rst/porting_guides/porting_guide_2.6.rst
new file mode 100644
index 00000000..6150ccc9
--- /dev/null
+++ b/docs/docsite/rst/porting_guides/porting_guide_2.6.rst
@@ -0,0 +1,13 @@
+:orphan:
+
+.. _porting_2.6_guide:
+
+*************************
+Ansible 2.6 Porting Guide
+*************************
+
+Ansible Porting Guides are maintained in the ``devel`` branch only. Please go to `the devel Ansible 2.6 Porting guide <https://docs.ansible.com/ansible/devel/porting_guides/porting_guide_2.6.html>`_ for up to date information.
+
+.. note::
+
+ This link takes you to a different version of the Ansible documentation. Use the version selection on the left or your browser back button to return to this version of the documentation.
diff --git a/docs/docsite/rst/porting_guides/porting_guide_2.7.rst b/docs/docsite/rst/porting_guides/porting_guide_2.7.rst
new file mode 100644
index 00000000..2da9785e
--- /dev/null
+++ b/docs/docsite/rst/porting_guides/porting_guide_2.7.rst
@@ -0,0 +1,13 @@
+:orphan:
+
+.. _porting_2.7_guide:
+
+*************************
+Ansible 2.7 Porting Guide
+*************************
+
+Ansible Porting Guides are maintained in the ``devel`` branch only. Please go to `the devel Ansible 2.7 Porting guide <https://docs.ansible.com/ansible/devel/porting_guides/porting_guide_2.7.html>`_ for up to date information.
+
+.. note::
+
+ This link takes you to a different version of the Ansible documentation. Use the version selection on the left or your browser back button to return to this version of the documentation.
diff --git a/docs/docsite/rst/porting_guides/porting_guide_2.8.rst b/docs/docsite/rst/porting_guides/porting_guide_2.8.rst
new file mode 100644
index 00000000..56fb2db3
--- /dev/null
+++ b/docs/docsite/rst/porting_guides/porting_guide_2.8.rst
@@ -0,0 +1,13 @@
+:orphan:
+
+.. _porting_2.8_guide:
+
+*************************
+Ansible 2.8 Porting Guide
+*************************
+
+Ansible Porting Guides are maintained in the ``devel`` branch only. Please go to `the devel Ansible 2.8 Porting guide <https://docs.ansible.com/ansible/devel/porting_guides/porting_guide_2.8.html>`_ for up to date information.
+
+.. note::
+
+ This link takes you to a different version of the Ansible documentation. Use the version selection on the left or your browser back button to return to this version of the documentation.
diff --git a/docs/docsite/rst/porting_guides/porting_guide_2.9.rst b/docs/docsite/rst/porting_guides/porting_guide_2.9.rst
new file mode 100644
index 00000000..99742313
--- /dev/null
+++ b/docs/docsite/rst/porting_guides/porting_guide_2.9.rst
@@ -0,0 +1,13 @@
+:orphan:
+
+.. _porting_2.9_guide:
+
+*************************
+Ansible 2.9 Porting Guide
+*************************
+
+Ansible Porting Guides are maintained in the ``devel`` branch only. Please go to `the devel Ansible 2.9 Porting guide <https://docs.ansible.com/ansible/devel/porting_guides/porting_guide_2.9.html>`_ for up to date information.
+
+.. note::
+
+ This link takes you to a different version of the Ansible documentation. Use the version selection on the left or your browser back button to return to this version of the documentation.
diff --git a/docs/docsite/rst/porting_guides/porting_guide_base_2.10.rst b/docs/docsite/rst/porting_guides/porting_guide_base_2.10.rst
new file mode 100644
index 00000000..46724313
--- /dev/null
+++ b/docs/docsite/rst/porting_guides/porting_guide_base_2.10.rst
@@ -0,0 +1,13 @@
+:orphan:
+
+.. _porting_2.10_guide_base:
+
+*******************************
+Ansible-base 2.10 Porting Guide
+*******************************
+
+Ansible Porting Guides are maintained in the ``devel`` branch only. Please go to `the devel Ansible-base 2.10 Porting guide <https://docs.ansible.com/ansible/devel/porting_guides/porting_guide_base_2.10.html>`_ for up to date information.
+
+.. note::
+
+ This link takes you to a different version of the Ansible documentation. Use the version selection on the left or your browser back button to return to this version of the documentation.
diff --git a/docs/docsite/rst/porting_guides/porting_guides.rst b/docs/docsite/rst/porting_guides/porting_guides.rst
new file mode 100644
index 00000000..3dc29127
--- /dev/null
+++ b/docs/docsite/rst/porting_guides/porting_guides.rst
@@ -0,0 +1,11 @@
+.. _porting_guides:
+
+**********************
+Ansible Porting Guides
+**********************
+
+Ansible Porting Guides are maintained in the ``devel`` branch only. Please go to `the devel porting guides <https://docs.ansible.com/ansible/devel/porting_guides/porting_guides.html>`_ for up to date information.
+
+.. note::
+
+ This link takes you to a different version of the Ansible documentation. Use the version selection on the left or your browser back button to return to this version of the documentation.
diff --git a/docs/docsite/rst/reference_appendices/.rstcheck.cfg b/docs/docsite/rst/reference_appendices/.rstcheck.cfg
new file mode 100644
index 00000000..5b33c076
--- /dev/null
+++ b/docs/docsite/rst/reference_appendices/.rstcheck.cfg
@@ -0,0 +1,2 @@
+[rstcheck]
+ignore_directives=autoclass,automodule
diff --git a/docs/docsite/rst/reference_appendices/YAMLSyntax.rst b/docs/docsite/rst/reference_appendices/YAMLSyntax.rst
new file mode 100644
index 00000000..7d439664
--- /dev/null
+++ b/docs/docsite/rst/reference_appendices/YAMLSyntax.rst
@@ -0,0 +1,242 @@
+.. _yaml_syntax:
+
+
+YAML Syntax
+===========
+
+This page provides a basic overview of correct YAML syntax, which is how Ansible
+playbooks (our configuration management language) are expressed.
+
+We use YAML because it is easier for humans to read and write than other common
+data formats like XML or JSON. Further, there are libraries available in most
+programming languages for working with YAML.
+
+You may also wish to read :ref:`working_with_playbooks` at the same time to see how this
+is used in practice.
+
+
+YAML Basics
+-----------
+
+For Ansible, nearly every YAML file starts with a list.
+Each item in the list is a list of key/value pairs, commonly
+called a "hash" or a "dictionary". So, we need to know how
+to write lists and dictionaries in YAML.
+
+There's another small quirk to YAML. All YAML files (regardless of their association with Ansible or not) can optionally
+begin with ``---`` and end with ``...``. This is part of the YAML format and indicates the start and end of a document.
+
+All members of a list are lines beginning at the same indentation level starting with a ``"- "`` (a dash and a space)::
+
+ ---
+ # A list of tasty fruits
+ - Apple
+ - Orange
+ - Strawberry
+ - Mango
+ ...
+
+A dictionary is represented in a simple ``key: value`` form (the colon must be followed by a space)::
+
+ # An employee record
+ martin:
+ name: Martin D'vloper
+ job: Developer
+ skill: Elite
+
+More complicated data structures are possible, such as lists of dictionaries, dictionaries whose values are lists or a mix of both::
+
+ # Employee records
+ - martin:
+ name: Martin D'vloper
+ job: Developer
+ skills:
+ - python
+ - perl
+ - pascal
+ - tabitha:
+ name: Tabitha Bitumen
+ job: Developer
+ skills:
+ - lisp
+ - fortran
+ - erlang
+
+Dictionaries and lists can also be represented in an abbreviated form if you really want to::
+
+ ---
+ martin: {name: Martin D'vloper, job: Developer, skill: Elite}
+ ['Apple', 'Orange', 'Strawberry', 'Mango']
+
+These are called "Flow collections".
+
+.. _truthiness:
+
+Ansible doesn't really use these too much, but you can also specify a boolean value (true/false) in several forms::
+
+ create_key: yes
+ needs_agent: no
+ knows_oop: True
+ likes_emacs: TRUE
+ uses_cvs: false
+
+Use lowercase 'true' or 'false' for boolean values in dictionaries if you want to be compatible with default yamllint options.
+
+Values can span multiple lines using ``|`` or ``>``. Spanning multiple lines using a "Literal Block Scalar" ``|`` will include the newlines and any trailing spaces.
+Using a "Folded Block Scalar" ``>`` will fold newlines to spaces; it's used to make what would otherwise be a very long line easier to read and edit.
+In either case the indentation will be ignored.
+Examples are::
+
+ include_newlines: |
+ exactly as you see
+ will appear these three
+ lines of poetry
+
+ fold_newlines: >
+ this is really a
+ single line of text
+ despite appearances
+
+While in the above ``>`` example all newlines are folded into spaces, there are two ways to enforce a newline to be kept::
+
+ fold_some_newlines: >
+ a
+ b
+
+ c
+ d
+ e
+ f
+ same_as: "a b\nc d\n e\nf\n"
+
+Let's combine what we learned so far in an arbitrary YAML example.
+This really has nothing to do with Ansible, but will give you a feel for the format::
+
+ ---
+ # An employee record
+ name: Martin D'vloper
+ job: Developer
+ skill: Elite
+ employed: True
+ foods:
+ - Apple
+ - Orange
+ - Strawberry
+ - Mango
+ languages:
+ perl: Elite
+ python: Elite
+ pascal: Lame
+ education: |
+ 4 GCSEs
+ 3 A-Levels
+ BSc in the Internet of Things
+
+That's all you really need to know about YAML to start writing `Ansible` playbooks.
+
+Gotchas
+-------
+
+While you can put just about anything into an unquoted scalar, there are some exceptions.
+A colon followed by a space (or newline) ``": "`` is an indicator for a mapping.
+A space followed by the pound sign ``" #"`` starts a comment.
+
+Because of this, the following is going to result in a YAML syntax error::
+
+ foo: somebody said I should put a colon here: so I did
+
+ windows_drive: c:
+
+...but this will work::
+
+ windows_path: c:\windows
+
+You will want to quote hash values using colons followed by a space or the end of the line::
+
+ foo: 'somebody said I should put a colon here: so I did'
+
+ windows_drive: 'c:'
+
+...and then the colon will be preserved.
+
+Alternatively, you can use double quotes::
+
+ foo: "somebody said I should put a colon here: so I did"
+
+ windows_drive: "c:"
+
+The difference between single quotes and double quotes is that in double quotes
+you can use escapes::
+
+ foo: "a \t TAB and a \n NEWLINE"
+
+The list of allowed escapes can be found in the YAML Specification under "Escape Sequences" (YAML 1.1) or "Escape Characters" (YAML 1.2).
+
+The following is invalid YAML:
+
+.. code-block:: text
+
+ foo: "an escaped \' single quote"
+
+
+Further, Ansible uses "{{ var }}" for variables. If a value after a colon starts
+with a "{", YAML will think it is a dictionary, so you must quote it, like so::
+
+ foo: "{{ variable }}"
+
+If your value starts with a quote the entire value must be quoted, not just part of it. Here are some additional examples of how to properly quote things::
+
+ foo: "{{ variable }}/additional/string/literal"
+ foo2: "{{ variable }}\\backslashes\\are\\also\\special\\characters"
+ foo3: "even if it's just a string literal it must all be quoted"
+
+Not valid::
+
+ foo: "E:\\path\\"rest\\of\\path
+
+In addition to ``'`` and ``"`` there are a number of characters that are special (or reserved) and cannot be used
+as the first character of an unquoted scalar: ``[] {} > | * & ! % # ` @ ,``.
+
+You should also be aware of ``? : -``. In YAML, they are allowed at the beginning of a string if a non-space
+character follows, but YAML processor implementations differ, so it's better to use quotes.
+
+In Flow Collections, the rules are a bit more strict::
+
+ a scalar in block mapping: this } is [ all , valid
+
+ flow mapping: { key: "you { should [ use , quotes here" }
+
+Boolean conversion is helpful, but this can be a problem when you want a literal `yes` or other boolean values as a string.
+In these cases just use quotes::
+
+ non_boolean: "yes"
+ other_string: "False"
+
+
+YAML converts certain strings into floating-point values, such as the string
+`1.0`. If you need to specify a version number (in a requirements.yml file, for
+example), you will need to quote the value if it looks like a floating-point
+value::
+
+ version: "1.0"
+
+
+.. seealso::
+
+ :ref:`working_with_playbooks`
+ Learn what playbooks can do and how to write/run them.
+ `YAMLLint <http://yamllint.com/>`_
+ YAML Lint (online) helps you debug YAML syntax if you are having problems
+ `GitHub examples directory <https://github.com/ansible/ansible-examples>`_
+ Complete playbook files from the github project source
+ `Wikipedia YAML syntax reference <https://en.wikipedia.org/wiki/YAML>`_
+ A good guide to YAML syntax
+ `Mailing List <https://groups.google.com/group/ansible-project>`_
+ Questions? Help? Ideas? Stop by the list on Google Groups
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel and #yaml for YAML specific questions
+ `YAML 1.1 Specification <https://yaml.org/spec/1.1/>`_
+ The Specification for YAML 1.1, which PyYAML and libyaml are currently
+ implementing
+ `YAML 1.2 Specification <https://yaml.org/spec/1.2/spec.html>`_
+ For completeness, YAML 1.2 is the successor of 1.1
diff --git a/docs/docsite/rst/reference_appendices/automationhub.rst b/docs/docsite/rst/reference_appendices/automationhub.rst
new file mode 100644
index 00000000..dd70b98f
--- /dev/null
+++ b/docs/docsite/rst/reference_appendices/automationhub.rst
@@ -0,0 +1,10 @@
+.. _automation_hub:
+
+Ansible Automation Hub
+======================
+
+`Ansible Automation Hub <https://www.ansible.com/products/automation-hub>`_ is the official location to discover and download supported :ref:`collections <collections>`, included as part of an Ansible Automation Platform subscription. These content collections contain modules, plugins, roles, and playbooks in a downloadable package.
+
+Ansible Automation Hub gives you direct access to trusted content collections from Red Hat and Certified Partners. You can find content by topic or Ansible Partner organizations.
+
+Ansible Automation Hub is the downstream Red Hat supported product version of Ansible Galaxy. Find out more about Ansible Automation Hub features and how to access it at `Ansible Automation Hub <https://www.ansible.com/products/automation-hub>`_. Ansible Automation Hub is part of the Red Hat Ansible Automation Platform subscription, and comes bundled with support from Red Hat, Inc.
diff --git a/docs/docsite/rst/reference_appendices/common_return_values.rst b/docs/docsite/rst/reference_appendices/common_return_values.rst
new file mode 100644
index 00000000..392dc96c
--- /dev/null
+++ b/docs/docsite/rst/reference_appendices/common_return_values.rst
@@ -0,0 +1,251 @@
+.. _common_return_values:
+
+Return Values
+-------------
+
+.. contents:: Topics
+
+Ansible modules normally return a data structure that can be registered into a variable, or seen directly when output by
+the `ansible` program. Each module can optionally document its own unique return values (visible through ansible-doc and on the :ref:`main docsite<ansible_documentation>`).
+
+This document covers return values common to all modules.
+
+.. note:: Some of these keys might be set by Ansible itself once it processes the module's return information.
+
+
+Common
+^^^^^^
+
+backup_file
+```````````
+For those modules that implement `backup=no|yes` when manipulating files, a path to the backup file created.
+
+ .. code-block:: console
+
+ "backup_file": "./foo.txt.32729.2020-07-30@06:24:19~"
+
+
+changed
+```````
+A boolean indicating if the task had to make changes.
+
+ .. code-block:: console
+
+ "changed": true
+
+diff
+````
+Information on differences between the previous and current state. Often a dictionary with entries ``before`` and ``after``, which will then be formatted by the callback plugin to a diff view.
+
+ .. code-block:: console
+
+ "diff": [
+ {
+ "after": "",
+ "after_header": "foo.txt (content)",
+ "before": "",
+ "before_header": "foo.txt (content)"
+ },
+ {
+ "after_header": "foo.txt (file attributes)",
+ "before_header": "foo.txt (file attributes)"
+ }
+
+failed
+``````
+A boolean that indicates if the task was failed or not.
+
+ .. code-block:: console
+
+ "failed": false
+
+invocation
+``````````
+Information on how the module was invoked.
+
+ .. code-block:: console
+
+ "invocation": {
+ "module_args": {
+ "_original_basename": "foo.txt",
+ "attributes": null,
+ "backup": true,
+ "checksum": "da39a3ee5e6b4b0d3255bfef95601890afd80709",
+ "content": null,
+ "delimiter": null,
+ "dest": "./foo.txt",
+ "directory_mode": null,
+ "follow": false,
+ "force": true,
+ "group": null,
+ "local_follow": null,
+ "mode": "666",
+ "owner": null,
+ "regexp": null,
+ "remote_src": null,
+ "selevel": null,
+ "serole": null,
+ "setype": null,
+ "seuser": null,
+ "src": "/Users/foo/.ansible/tmp/ansible-tmp-1596115458.110205-105717464505158/source",
+ "unsafe_writes": null,
+ "validate": null
+ }
+
+msg
+```
+A string with a generic message relayed to the user.
+
+ .. code-block:: console
+
+ "msg": "line added"
+
+rc
+``
+Some modules execute command line utilities or are geared for executing commands directly (raw, shell, command, and so on), this field contains 'return code' of these utilities.
+
+ .. code-block:: console
+
+ "rc": 257
+
+results
+```````
+If this key exists, it indicates that a loop was present for the task and that it contains a list of the normal module 'result' per item.
+
+ .. code-block:: console
+
+ "results": [
+ {
+ "ansible_loop_var": "item",
+ "backup": "foo.txt.83170.2020-07-30@07:03:05~",
+ "changed": true,
+ "diff": [
+ {
+ "after": "",
+ "after_header": "foo.txt (content)",
+ "before": "",
+ "before_header": "foo.txt (content)"
+ },
+ {
+ "after_header": "foo.txt (file attributes)",
+ "before_header": "foo.txt (file attributes)"
+ }
+ ],
+ "failed": false,
+ "invocation": {
+ "module_args": {
+ "attributes": null,
+ "backrefs": false,
+ "backup": true
+ }
+ },
+ "item": "foo",
+ "msg": "line added"
+ },
+ {
+ "ansible_loop_var": "item",
+ "backup": "foo.txt.83187.2020-07-30@07:03:05~",
+ "changed": true,
+ "diff": [
+ {
+ "after": "",
+ "after_header": "foo.txt (content)",
+ "before": "",
+ "before_header": "foo.txt (content)"
+ },
+ {
+ "after_header": "foo.txt (file attributes)",
+ "before_header": "foo.txt (file attributes)"
+ }
+ ],
+ "failed": false,
+ "invocation": {
+ "module_args": {
+ "attributes": null,
+ "backrefs": false,
+ "backup": true
+ }
+ },
+ "item": "bar",
+ "msg": "line added"
+ }
+ ]
+
+skipped
+```````
+A boolean that indicates if the task was skipped or not
+
+ .. code-block:: console
+
+ "skipped": true
+
+stderr
+``````
+Some modules execute command line utilities or are geared for executing commands directly (raw, shell, command, and so on), this field contains the error output of these utilities.
+
+ .. code-block:: console
+
+ "stderr": "ls: foo: No such file or directory"
+
+stderr_lines
+````````````
+When `stderr` is returned we also always provide this field which is a list of strings, one item per line from the original.
+
+ .. code-block:: console
+
+ "stderr_lines": [
+ "ls: doesntexist: No such file or directory"
+ ]
+
+stdout
+``````
+Some modules execute command line utilities or are geared for executing commands directly (raw, shell, command, and so on). This field contains the normal output of these utilities.
+
+ .. code-block:: console
+
+ "stdout": "foo!"
+
+stdout_lines
+````````````
+When `stdout` is returned, Ansible always provides a list of strings, each containing one item per line from the original output.
+
+ .. code-block:: console
+
+ "stdout_lines": [
+ "foo!"
+ ]
+
+
+.. _internal_return_values:
+
+Internal use
+^^^^^^^^^^^^
+
+These keys can be added by modules but will be removed from registered variables; they are 'consumed' by Ansible itself.
+
+ansible_facts
+`````````````
+This key should contain a dictionary which will be appended to the facts assigned to the host. These will be directly accessible and don't require using a registered variable.
+
+exception
+`````````
+This key can contain traceback information caused by an exception in a module. It will only be displayed on high verbosity (-vvv).
+
+warnings
+````````
+This key contains a list of strings that will be presented to the user.
+
+deprecations
+````````````
+This key contains a list of dictionaries that will be presented to the user. Keys of the dictionaries are `msg` and `version`, values are string, value for the `version` key can be an empty string.
+
+.. seealso::
+
+ :ref:`list_of_collections`
+ Browse existing collections, modules, and plugins
+ `GitHub modules directory <https://github.com/ansible/ansible/tree/devel/lib/ansible/modules>`_
+ Browse source of core and extras modules
+ `Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Development mailing list
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/reference_appendices/faq.rst b/docs/docsite/rst/reference_appendices/faq.rst
new file mode 100644
index 00000000..329e2b4c
--- /dev/null
+++ b/docs/docsite/rst/reference_appendices/faq.rst
@@ -0,0 +1,766 @@
+.. _ansible_faq:
+
+Frequently Asked Questions
+==========================
+
+Here are some commonly asked questions and their answers.
+
+.. _collections_transition:
+
+Where did all the modules go?
++++++++++++++++++++++++++++++
+
+In July, 2019, we announced that collections would be the `future of Ansible content delivery <https://www.ansible.com/blog/the-future-of-ansible-content-delivery>`_. A collection is a distribution format for Ansible content that can include playbooks, roles, modules, and plugins. In Ansible 2.9 we added support for collections. In Ansible 2.10 we extracted most modules from the main ansible/ansible repository and placed them in :ref:`collections <list_of_collections>`. Collections may be maintained by the Ansible team, by the Ansible community, or by Ansible partners. The `ansible/ansible repository <https://github.com/ansible/ansible>`_ now contains the code for basic features and functions, such as copying module code to managed nodes. This code is also known as ``ansible-base``.
+
+* To learn more about using collections, see :ref:`collections`.
+* To learn more about developing collections, see :ref:`developing_collections`.
+* To learn more about contributing to existing collections, see the individual collection repository for guidelines, or see :ref:`contributing_maintained_collections` to contribute to one of the Ansible-maintained collections.
+
+.. _find_my_module:
+
+Where did this specific module go?
+++++++++++++++++++++++++++++++++++
+
+IF you are searching for a specific module, you can check the `runtime.yml <https://github.com/ansible/ansible/blob/devel/lib/ansible/config/ansible_builtin_runtime.yml>`_ file, which lists the first destination for each module that we extracted from the main ansible/ansible repository. Some modules have moved again since then. You can also search on `Ansible Galaxy <https://galaxy.ansible.com/>`_ or ask on one of our :ref:`IRC channels <communication_irc>`.
+
+.. _set_environment:
+
+How can I set the PATH or any other environment variable for a task or entire play?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+Setting environment variables can be done with the `environment` keyword. It can be used at the task or other levels in the play.
+
+.. code-block:: yaml
+
+ shell:
+ cmd: date
+ environment:
+ LANG=fr_FR.UTF-8
+
+.. code-block:: yaml
+
+ hosts: servers
+ environment:
+ PATH: "{{ ansible_env.PATH }}:/thingy/bin"
+ SOME: value
+
+.. note:: starting in 2.0.1 the setup task from ``gather_facts`` also inherits the environment directive from the play, you might need to use the ``|default`` filter to avoid errors if setting this at play level.
+
+.. _faq_setting_users_and_ports:
+
+How do I handle different machines needing different user accounts or ports to log in with?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+Setting inventory variables in the inventory file is the easiest way.
+
+For instance, suppose these hosts have different usernames and ports:
+
+.. code-block:: ini
+
+ [webservers]
+ asdf.example.com ansible_port=5000 ansible_user=alice
+ jkl.example.com ansible_port=5001 ansible_user=bob
+
+You can also dictate the connection type to be used, if you want:
+
+.. code-block:: ini
+
+ [testcluster]
+ localhost ansible_connection=local
+ /path/to/chroot1 ansible_connection=chroot
+ foo.example.com ansible_connection=paramiko
+
+You may also wish to keep these in group variables instead, or file them in a group_vars/<groupname> file.
+See the rest of the documentation for more information about how to organize variables.
+
+.. _use_ssh:
+
+How do I get ansible to reuse connections, enable Kerberized SSH, or have Ansible pay attention to my local SSH config file?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+Switch your default connection type in the configuration file to ``ssh``, or use ``-c ssh`` to use
+Native OpenSSH for connections instead of the python paramiko library. In Ansible 1.2.1 and later, ``ssh`` will be used
+by default if OpenSSH is new enough to support ControlPersist as an option.
+
+Paramiko is great for starting out, but the OpenSSH type offers many advanced options. You will want to run Ansible
+from a machine new enough to support ControlPersist, if you are using this connection type. You can still manage
+older clients. If you are using RHEL 6, CentOS 6, SLES 10 or SLES 11 the version of OpenSSH is still a bit old, so
+consider managing from a Fedora or openSUSE client even though you are managing older nodes, or just use paramiko.
+
+We keep paramiko as the default as if you are first installing Ansible on these enterprise operating systems, it offers a better experience for new users.
+
+.. _use_ssh_jump_hosts:
+
+How do I configure a jump host to access servers that I have no direct access to?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+You can set a ``ProxyCommand`` in the
+``ansible_ssh_common_args`` inventory variable. Any arguments specified in
+this variable are added to the sftp/scp/ssh command line when connecting
+to the relevant host(s). Consider the following inventory group:
+
+.. code-block:: ini
+
+ [gatewayed]
+ foo ansible_host=192.0.2.1
+ bar ansible_host=192.0.2.2
+
+You can create `group_vars/gatewayed.yml` with the following contents::
+
+ ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q user@gateway.example.com"'
+
+Ansible will append these arguments to the command line when trying to
+connect to any hosts in the group ``gatewayed``. (These arguments are used
+in addition to any ``ssh_args`` from ``ansible.cfg``, so you do not need to
+repeat global ``ControlPersist`` settings in ``ansible_ssh_common_args``.)
+
+Note that ``ssh -W`` is available only with OpenSSH 5.4 or later. With
+older versions, it's necessary to execute ``nc %h:%p`` or some equivalent
+command on the bastion host.
+
+With earlier versions of Ansible, it was necessary to configure a
+suitable ``ProxyCommand`` for one or more hosts in ``~/.ssh/config``,
+or globally by setting ``ssh_args`` in ``ansible.cfg``.
+
+.. _ssh_serveraliveinterval:
+
+How do I get Ansible to notice a dead target in a timely manner?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+You can add ``-o ServerAliveInterval=NumberOfSeconds`` in ``ssh_args`` from ``ansible.cfg``. Without this option,
+SSH and therefore Ansible will wait until the TCP connection times out. Another solution is to add ``ServerAliveInterval``
+into your global SSH configuration. A good value for ``ServerAliveInterval`` is up to you to decide; keep in mind that
+``ServerAliveCountMax=3`` is the SSH default so any value you set will be tripled before terminating the SSH session.
+
+.. _cloud_provider_performance:
+
+How do I speed up run of ansible for servers from cloud providers (EC2, openstack,.. )?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+Don't try to manage a fleet of machines of a cloud provider from your laptop.
+Rather connect to a management node inside this cloud provider first and run Ansible from there.
+
+.. _python_interpreters:
+
+How do I handle not having a Python interpreter at /usr/bin/python on a remote machine?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+While you can write Ansible modules in any language, most Ansible modules are written in Python,
+including the ones central to letting Ansible work.
+
+By default, Ansible assumes it can find a :command:`/usr/bin/python` on your remote system that is
+either Python2, version 2.6 or higher or Python3, 3.5 or higher.
+
+Setting the inventory variable ``ansible_python_interpreter`` on any host will tell Ansible to
+auto-replace the Python interpreter with that value instead. Thus, you can point to any Python you
+want on the system if :command:`/usr/bin/python` on your system does not point to a compatible
+Python interpreter.
+
+Some platforms may only have Python 3 installed by default. If it is not installed as
+:command:`/usr/bin/python`, you will need to configure the path to the interpreter via
+``ansible_python_interpreter``. Although most core modules will work with Python 3, there may be some
+special purpose ones which do not or you may encounter a bug in an edge case. As a temporary
+workaround you can install Python 2 on the managed host and configure Ansible to use that Python via
+``ansible_python_interpreter``. If there's no mention in the module's documentation that the module
+requires Python 2, you can also report a bug on our `bug tracker
+<https://github.com/ansible/ansible/issues>`_ so that the incompatibility can be fixed in a future release.
+
+Do not replace the shebang lines of your python modules. Ansible will do this for you automatically at deploy time.
+
+Also, this works for ANY interpreter, for example ruby: ``ansible_ruby_interpreter``, perl: ``ansible_perl_interpreter``, and so on,
+so you can use this for custom modules written in any scripting language and control the interpreter location.
+
+Keep in mind that if you put ``env`` in your module shebang line (``#!/usr/bin/env <other>``),
+this facility will be ignored so you will be at the mercy of the remote `$PATH`.
+
+.. _installation_faqs:
+
+How do I handle the package dependencies required by Ansible package dependencies during Ansible installation ?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+While installing Ansible, sometimes you may encounter errors such as `No package 'libffi' found` or `fatal error: Python.h: No such file or directory`
+These errors are generally caused by the missing packages, which are dependencies of the packages required by Ansible.
+For example, `libffi` package is dependency of `pynacl` and `paramiko` (Ansible -> paramiko -> pynacl -> libffi).
+
+In order to solve these kinds of dependency issues, you might need to install required packages using
+the OS native package managers, such as `yum`, `dnf`, or `apt`, or as mentioned in the package installation guide.
+
+Refer to the documentation of the respective package for such dependencies and their installation methods.
+
+Common Platform Issues
+++++++++++++++++++++++
+
+What customer platforms does Red Hat support?
+---------------------------------------------
+
+A number of them! For a definitive list please see this `Knowledge Base article <https://access.redhat.com/articles/3168091>`_.
+
+Running in a virtualenv
+-----------------------
+
+You can install Ansible into a virtualenv on the controller quite simply:
+
+.. code-block:: shell
+
+ $ virtualenv ansible
+ $ source ./ansible/bin/activate
+ $ pip install ansible
+
+If you want to run under Python 3 instead of Python 2 you may want to change that slightly:
+
+.. code-block:: shell
+
+ $ virtualenv -p python3 ansible
+ $ source ./ansible/bin/activate
+ $ pip install ansible
+
+If you need to use any libraries which are not available via pip (for instance, SELinux Python
+bindings on systems such as Red Hat Enterprise Linux or Fedora that have SELinux enabled), then you
+need to install them into the virtualenv. There are two methods:
+
+* When you create the virtualenv, specify ``--system-site-packages`` to make use of any libraries
+ installed in the system's Python:
+
+ .. code-block:: shell
+
+ $ virtualenv ansible --system-site-packages
+
+* Copy those files in manually from the system. For instance, for SELinux bindings you might do:
+
+ .. code-block:: shell
+
+ $ virtualenv ansible --system-site-packages
+ $ cp -r -v /usr/lib64/python3.*/site-packages/selinux/ ./py3-ansible/lib64/python3.*/site-packages/
+ $ cp -v /usr/lib64/python3.*/site-packages/*selinux*.so ./py3-ansible/lib64/python3.*/site-packages/
+
+
+Running on BSD
+--------------
+
+.. seealso:: :ref:`working_with_bsd`
+
+
+Running on Solaris
+------------------
+
+By default, Solaris 10 and earlier run a non-POSIX shell which does not correctly expand the default
+tmp directory Ansible uses ( :file:`~/.ansible/tmp`). If you see module failures on Solaris machines, this
+is likely the problem. There are several workarounds:
+
+* You can set ``remote_tmp`` to a path that will expand correctly with the shell you are using
+ (see the plugin documentation for :ref:`C shell<csh_shell>`, :ref:`fish shell<fish_shell>`,
+ and :ref:`Powershell<powershell_shell>`). For example, in the ansible config file you can set::
+
+ remote_tmp=$HOME/.ansible/tmp
+
+ In Ansible 2.5 and later, you can also set it per-host in inventory like this::
+
+ solaris1 ansible_remote_tmp=$HOME/.ansible/tmp
+
+* You can set :ref:`ansible_shell_executable<ansible_shell_executable>` to the path to a POSIX compatible shell. For
+ instance, many Solaris hosts have a POSIX shell located at :file:`/usr/xpg4/bin/sh` so you can set
+ this in inventory like so::
+
+ solaris1 ansible_shell_executable=/usr/xpg4/bin/sh
+
+ (bash, ksh, and zsh should also be POSIX compatible if you have any of those installed).
+
+Running on z/OS
+---------------
+
+There are a few common errors that one might run into when trying to execute Ansible on z/OS as a target.
+
+* Version 2.7.6 of python for z/OS will not work with Ansible because it represents strings internally as EBCDIC.
+
+ To get around this limitation, download and install a later version of `python for z/OS <https://www.rocketsoftware.com/zos-open-source>`_ (2.7.13 or 3.6.1) that represents strings internally as ASCII. Version 2.7.13 is verified to work.
+
+* When ``pipelining = False`` in `/etc/ansible/ansible.cfg` then Ansible modules are transferred in binary mode via sftp however execution of python fails with
+
+ .. error::
+ SyntaxError: Non-UTF-8 code starting with \'\\x83\' in file /a/user1/.ansible/tmp/ansible-tmp-1548232945.35-274513842609025/AnsiballZ_stat.py on line 1, but no encoding declared; see https://python.org/dev/peps/pep-0263/ for details
+
+ To fix it set ``pipelining = True`` in `/etc/ansible/ansible.cfg`.
+
+* Python interpret cannot be found in default location ``/usr/bin/python`` on target host.
+
+ .. error::
+ /usr/bin/python: EDC5129I No such file or directory
+
+ To fix this set the path to the python installation in your inventory like so::
+
+ zos1 ansible_python_interpreter=/usr/lpp/python/python-2017-04-12-py27/python27/bin/python
+
+* Start of python fails with ``The module libpython2.7.so was not found.``
+
+ .. error::
+ EE3501S The module libpython2.7.so was not found.
+
+ On z/OS, you must execute python from gnu bash. If gnu bash is installed at ``/usr/lpp/bash``, you can fix this in your inventory by specifying an ``ansible_shell_executable``::
+
+ zos1 ansible_shell_executable=/usr/lpp/bash/bin/bash
+
+
+Running under fakeroot
+----------------------
+
+Some issues arise as ``fakeroot`` does not create a full nor POSIX compliant system by default.
+It is known that it will not correctly expand the default tmp directory Ansible uses (:file:`~/.ansible/tmp`).
+If you see module failures, this is likely the problem.
+The simple workaround is to set ``remote_tmp`` to a path that will expand correctly (see documentation of the shell plugin you are using for specifics).
+
+For example, in the ansible config file (or via environment variable) you can set::
+
+ remote_tmp=$HOME/.ansible/tmp
+
+
+
+.. _use_roles:
+
+What is the best way to make content reusable/redistributable?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+If you have not done so already, read all about "Roles" in the playbooks documentation. This helps you make playbook content
+self-contained, and works well with things like git submodules for sharing content with others.
+
+If some of these plugin types look strange to you, see the API documentation for more details about ways Ansible can be extended.
+
+.. _configuration_file:
+
+Where does the configuration file live and what can I configure in it?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+
+See :ref:`intro_configuration`.
+
+.. _who_would_ever_want_to_disable_cowsay_but_ok_here_is_how:
+
+How do I disable cowsay?
+++++++++++++++++++++++++
+
+If cowsay is installed, Ansible takes it upon itself to make your day happier when running playbooks. If you decide
+that you would like to work in a professional cow-free environment, you can either uninstall cowsay, set ``nocows=1``
+in ``ansible.cfg``, or set the :envvar:`ANSIBLE_NOCOWS` environment variable:
+
+.. code-block:: shell-session
+
+ export ANSIBLE_NOCOWS=1
+
+.. _browse_facts:
+
+How do I see a list of all of the ansible\_ variables?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+Ansible by default gathers "facts" about the machines under management, and these facts can be accessed in playbooks
+and in templates. To see a list of all of the facts that are available about a machine, you can run the ``setup`` module
+as an ad-hoc action:
+
+.. code-block:: shell-session
+
+ ansible -m setup hostname
+
+This will print out a dictionary of all of the facts that are available for that particular host. You might want to pipe
+the output to a pager.This does NOT include inventory variables or internal 'magic' variables. See the next question
+if you need more than just 'facts'.
+
+
+.. _browse_inventory_vars:
+
+How do I see all the inventory variables defined for my host?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+By running the following command, you can see inventory variables for a host:
+
+.. code-block:: shell-session
+
+ ansible-inventory --list --yaml
+
+
+.. _browse_host_vars:
+
+How do I see all the variables specific to my host?
++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+To see all host specific variables, which might include facts and other sources:
+
+.. code-block:: shell-session
+
+ ansible -m debug -a "var=hostvars['hostname']" localhost
+
+Unless you are using a fact cache, you normally need to use a play that gathers facts first, for facts included in the task above.
+
+
+.. _host_loops:
+
+How do I loop over a list of hosts in a group, inside of a template?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+A pretty common pattern is to iterate over a list of hosts inside of a host group, perhaps to populate a template configuration
+file with a list of servers. To do this, you can just access the "$groups" dictionary in your template, like this:
+
+.. code-block:: jinja
+
+ {% for host in groups['db_servers'] %}
+ {{ host }}
+ {% endfor %}
+
+If you need to access facts about these hosts, for instance, the IP address of each hostname,
+you need to make sure that the facts have been populated. For example, make sure you have a play that talks to db_servers::
+
+ - hosts: db_servers
+ tasks:
+ - debug: msg="doesn't matter what you do, just that they were talked to previously."
+
+Then you can use the facts inside your template, like this:
+
+.. code-block:: jinja
+
+ {% for host in groups['db_servers'] %}
+ {{ hostvars[host]['ansible_eth0']['ipv4']['address'] }}
+ {% endfor %}
+
+.. _programatic_access_to_a_variable:
+
+How do I access a variable name programmatically?
++++++++++++++++++++++++++++++++++++++++++++++++++
+
+An example may come up where we need to get the ipv4 address of an arbitrary interface, where the interface to be used may be supplied
+via a role parameter or other input. Variable names can be built by adding strings together, like so:
+
+.. code-block:: jinja
+
+ {{ hostvars[inventory_hostname]['ansible_' + which_interface]['ipv4']['address'] }}
+
+The trick about going through hostvars is necessary because it's a dictionary of the entire namespace of variables. ``inventory_hostname``
+is a magic variable that indicates the current host you are looping over in the host loop.
+
+In the example above, if your interface names have dashes, you must replace them with underscores:
+
+.. code-block:: jinja
+
+ {{ hostvars[inventory_hostname]['ansible_' + which_interface | replace('_', '-') ]['ipv4']['address'] }}
+
+Also see dynamic_variables_.
+
+
+.. _access_group_variable:
+
+How do I access a group variable?
++++++++++++++++++++++++++++++++++
+
+Technically, you don't, Ansible does not really use groups directly. Groups are labels for host selection and a way to bulk assign variables,
+they are not a first class entity, Ansible only cares about Hosts and Tasks.
+
+That said, you could just access the variable by selecting a host that is part of that group, see first_host_in_a_group_ below for an example.
+
+
+.. _first_host_in_a_group:
+
+How do I access a variable of the first host in a group?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+What happens if we want the ip address of the first webserver in the webservers group? Well, we can do that too. Note that if we
+are using dynamic inventory, which host is the 'first' may not be consistent, so you wouldn't want to do this unless your inventory
+is static and predictable. (If you are using :ref:`ansible_tower`, it will use database order, so this isn't a problem even if you are using cloud
+based inventory scripts).
+
+Anyway, here's the trick:
+
+.. code-block:: jinja
+
+ {{ hostvars[groups['webservers'][0]]['ansible_eth0']['ipv4']['address'] }}
+
+Notice how we're pulling out the hostname of the first machine of the webservers group. If you are doing this in a template, you
+could use the Jinja2 '#set' directive to simplify this, or in a playbook, you could also use set_fact::
+
+ - set_fact: headnode={{ groups['webservers'][0] }}
+
+ - debug: msg={{ hostvars[headnode].ansible_eth0.ipv4.address }}
+
+Notice how we interchanged the bracket syntax for dots -- that can be done anywhere.
+
+.. _file_recursion:
+
+How do I copy files recursively onto a target host?
++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+The ``copy`` module has a recursive parameter. However, take a look at the ``synchronize`` module if you want to do something more efficient
+for a large number of files. The ``synchronize`` module wraps rsync. See the module index for info on both of these modules.
+
+.. _shell_env:
+
+How do I access shell environment variables?
+++++++++++++++++++++++++++++++++++++++++++++
+
+
+**On controller machine :** Access existing variables from controller use the ``env`` lookup plugin.
+For example, to access the value of the HOME environment variable on the management machine::
+
+ ---
+ # ...
+ vars:
+ local_home: "{{ lookup('env','HOME') }}"
+
+
+**On target machines :** Environment variables are available via facts in the ``ansible_env`` variable:
+
+.. code-block:: jinja
+
+ {{ ansible_env.HOME }}
+
+If you need to set environment variables for TASK execution, see :ref:`playbooks_environment`
+in the :ref:`Advanced Playbooks <playbooks_special_topics>` section.
+There are several ways to set environment variables on your target machines. You can use the
+:ref:`template <template_module>`, :ref:`replace <replace_module>`, or :ref:`lineinfile <lineinfile_module>`
+modules to introduce environment variables into files. The exact files to edit vary depending on your OS
+and distribution and local configuration.
+
+.. _user_passwords:
+
+How do I generate encrypted passwords for the user module?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+Ansible ad-hoc command is the easiest option:
+
+.. code-block:: shell-session
+
+ ansible all -i localhost, -m debug -a "msg={{ 'mypassword' | password_hash('sha512', 'mysecretsalt') }}"
+
+The ``mkpasswd`` utility that is available on most Linux systems is also a great option:
+
+.. code-block:: shell-session
+
+ mkpasswd --method=sha-512
+
+
+If this utility is not installed on your system (for example, you are using macOS) then you can still easily
+generate these passwords using Python. First, ensure that the `Passlib <https://foss.heptapod.net/python-libs/passlib/-/wikis/home>`_
+password hashing library is installed:
+
+.. code-block:: shell-session
+
+ pip install passlib
+
+Once the library is ready, SHA512 password values can then be generated as follows:
+
+.. code-block:: shell-session
+
+ python -c "from passlib.hash import sha512_crypt; import getpass; print(sha512_crypt.using(rounds=5000).hash(getpass.getpass()))"
+
+Use the integrated :ref:`hash_filters` to generate a hashed version of a password.
+You shouldn't put plaintext passwords in your playbook or host_vars; instead, use :ref:`playbooks_vault` to encrypt sensitive data.
+
+In OpenBSD, a similar option is available in the base system called ``encrypt (1)``
+
+.. _dot_or_array_notation:
+
+Ansible allows dot notation and array notation for variables. Which notation should I use?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+The dot notation comes from Jinja and works fine for variables without special
+characters. If your variable contains dots (.), colons (:), or dashes (-), if
+a key begins and ends with two underscores, or if a key uses any of the known
+public attributes, it is safer to use the array notation. See :ref:`playbooks_variables`
+for a list of the known public attributes.
+
+.. code-block:: jinja
+
+ item[0]['checksum:md5']
+ item['section']['2.1']
+ item['region']['Mid-Atlantic']
+ It is {{ temperature['Celsius']['-3'] }} outside.
+
+Also array notation allows for dynamic variable composition, see dynamic_variables_.
+
+Another problem with 'dot notation' is that some keys can cause problems because they collide with attributes and methods of python dictionaries.
+
+.. code-block:: jinja
+
+ item.update # this breaks if item is a dictionary, as 'update()' is a python method for dictionaries
+ item['update'] # this works
+
+
+.. _argsplat_unsafe:
+
+When is it unsafe to bulk-set task arguments from a variable?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+
+You can set all of a task's arguments from a dictionary-typed variable. This
+technique can be useful in some dynamic execution scenarios. However, it
+introduces a security risk. We do not recommend it, so Ansible issues a
+warning when you do something like this::
+
+ #...
+ vars:
+ usermod_args:
+ name: testuser
+ state: present
+ update_password: always
+ tasks:
+ - user: '{{ usermod_args }}'
+
+This particular example is safe. However, constructing tasks like this is
+risky because the parameters and values passed to ``usermod_args`` could
+be overwritten by malicious values in the ``host facts`` on a compromised
+target machine. To mitigate this risk:
+
+* set bulk variables at a level of precedence greater than ``host facts`` in the order of precedence
+ found in :ref:`ansible_variable_precedence` (the example above is safe because play vars take
+ precedence over facts)
+* disable the :ref:`inject_facts_as_vars` configuration setting to prevent fact values from colliding
+ with variables (this will also disable the original warning)
+
+
+.. _commercial_support:
+
+Can I get training on Ansible?
+++++++++++++++++++++++++++++++
+
+Yes! See our `services page <https://www.ansible.com/products/consulting>`_ for information on our services
+and training offerings. Email `info@ansible.com <mailto:info@ansible.com>`_ for further details.
+
+We also offer free web-based training classes on a regular basis. See our
+`webinar page <https://www.ansible.com/resources/webinars-training>`_ for more info on upcoming webinars.
+
+
+.. _web_interface:
+
+Is there a web interface / REST API / GUI?
+++++++++++++++++++++++++++++++++++++++++++++
+
+Yes! Ansible, Inc makes a great product that makes Ansible even more powerful and easy to use. See :ref:`ansible_tower`.
+
+
+.. _keep_secret_data:
+
+How do I keep secret data in my playbook?
++++++++++++++++++++++++++++++++++++++++++
+
+If you would like to keep secret data in your Ansible content and still share it publicly or keep things in source control, see :ref:`playbooks_vault`.
+
+If you have a task that you don't want to show the results or command given to it when using -v (verbose) mode, the following task or playbook attribute can be useful::
+
+ - name: secret task
+ shell: /usr/bin/do_something --value={{ secret_value }}
+ no_log: True
+
+This can be used to keep verbose output but hide sensitive information from others who would otherwise like to be able to see the output.
+
+The ``no_log`` attribute can also apply to an entire play::
+
+ - hosts: all
+ no_log: True
+
+Though this will make the play somewhat difficult to debug. It's recommended that this
+be applied to single tasks only, once a playbook is completed. Note that the use of the
+``no_log`` attribute does not prevent data from being shown when debugging Ansible itself via
+the :envvar:`ANSIBLE_DEBUG` environment variable.
+
+
+.. _when_to_use_brackets:
+.. _dynamic_variables:
+.. _interpolate_variables:
+
+When should I use {{ }}? Also, how to interpolate variables or dynamic variable names
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+A steadfast rule is 'always use ``{{ }}`` except when ``when:``'.
+Conditionals are always run through Jinja2 as to resolve the expression,
+so ``when:``, ``failed_when:`` and ``changed_when:`` are always templated and you should avoid adding ``{{ }}``.
+
+In most other cases you should always use the brackets, even if previously you could use variables without
+specifying (like ``loop`` or ``with_`` clauses), as this made it hard to distinguish between an undefined variable and a string.
+
+Another rule is 'moustaches don't stack'. We often see this:
+
+.. code-block:: jinja
+
+ {{ somevar_{{other_var}} }}
+
+The above DOES NOT WORK as you expect, if you need to use a dynamic variable use the following as appropriate:
+
+.. code-block:: jinja
+
+ {{ hostvars[inventory_hostname]['somevar_' + other_var] }}
+
+For 'non host vars' you can use the :ref:`vars lookup<vars_lookup>` plugin:
+
+.. code-block:: jinja
+
+ {{ lookup('vars', 'somevar_' + other_var) }}
+
+
+.. _why_no_wheel:
+
+Why don't you ship ansible in wheel format (or other packaging format) ?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+In most cases it has to do with maintainability. There are many ways to ship software and we do not have
+the resources to release Ansible on every platform.
+In some cases there are technical issues. For example, our dependencies are not present on Python Wheels.
+
+.. _ansible_host_delegated:
+
+How do I get the original ansible_host when I delegate a task?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+As the documentation states, connection variables are taken from the ``delegate_to`` host so ``ansible_host`` is overwritten,
+but you can still access the original via ``hostvars``::
+
+ original_host: "{{ hostvars[inventory_hostname]['ansible_host'] }}"
+
+This works for all overridden connection variables, like ``ansible_user``, ``ansible_port``, and so on.
+
+
+.. _scp_protocol_error_filename:
+
+How do I fix 'protocol error: filename does not match request' when fetching a file?
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+Since release ``7.9p1`` of OpenSSH there is a `bug <https://bugzilla.mindrot.org/show_bug.cgi?id=2966>`_
+in the SCP client that can trigger this error on the Ansible controller when using SCP as the file transfer mechanism::
+
+ failed to transfer file to /tmp/ansible/file.txt\r\nprotocol error: filename does not match request
+
+In these releases, SCP tries to validate that the path of the file to fetch matches the requested path.
+The validation
+fails if the remote filename requires quotes to escape spaces or non-ascii characters in its path. To avoid this error:
+
+* Use SFTP instead of SCP by setting ``scp_if_ssh`` to ``smart`` (which tries SFTP first) or to ``False``. You can do this in one of four ways:
+ * Rely on the default setting, which is ``smart`` - this works if ``scp_if_ssh`` is not explicitly set anywhere
+ * Set a :ref:`host variable <host_variables>` or :ref:`group variable <group_variables>` in inventory: ``ansible_scp_if_ssh: False``
+ * Set an environment variable on your control node: ``export ANSIBLE_SCP_IF_SSH=False``
+ * Pass an environment variable when you run Ansible: ``ANSIBLE_SCP_IF_SSH=smart ansible-playbook``
+ * Modify your ``ansible.cfg`` file: add ``scp_if_ssh=False`` to the ``[ssh_connection]`` section
+* If you must use SCP, set the ``-T`` arg to tell the SCP client to ignore path validation. You can do this in one of three ways:
+ * Set a :ref:`host variable <host_variables>` or :ref:`group variable <group_variables>`: ``ansible_scp_extra_args=-T``,
+ * Export or pass an environment variable: ``ANSIBLE_SCP_EXTRA_ARGS=-T``
+ * Modify your ``ansible.cfg`` file: add ``scp_extra_args=-T`` to the ``[ssh_connection]`` section
+
+.. note:: If you see an ``invalid argument`` error when using ``-T``, then your SCP client is not performing filename validation and will not trigger this error.
+
+.. _docs_contributions:
+
+How do I submit a change to the documentation?
+++++++++++++++++++++++++++++++++++++++++++++++
+
+Documentation for Ansible is kept in the main project git repository, and complete instructions
+for contributing can be found in the docs README `viewable on GitHub <https://github.com/ansible/ansible/blob/devel/docs/docsite/README.md>`_. Thanks!
+
+.. _i_dont_see_my_question:
+
+I don't see my question here
+++++++++++++++++++++++++++++
+
+Please see the section below for a link to IRC and the Google Group, where you can ask your question there.
+
+.. seealso::
+
+ :ref:`working_with_playbooks`
+ An introduction to playbooks
+ :ref:`playbooks_best_practices`
+ Tips and tricks for playbooks
+ `User Mailing List <https://groups.google.com/group/ansible-project>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/reference_appendices/general_precedence.rst b/docs/docsite/rst/reference_appendices/general_precedence.rst
new file mode 100644
index 00000000..90494b69
--- /dev/null
+++ b/docs/docsite/rst/reference_appendices/general_precedence.rst
@@ -0,0 +1,140 @@
+.. _general_precedence_rules:
+
+Controlling how Ansible behaves: precedence rules
+=================================================
+
+To give you maximum flexibility in managing your environments, Ansible offers many ways to control how Ansible behaves: how it connects to managed nodes, how it works once it has connected.
+If you use Ansible to manage a large number of servers, network devices, and cloud resources, you may define Ansible behavior in several different places and pass that information to Ansible in several different ways.
+This flexibility is convenient, but it can backfire if you do not understand the precedence rules.
+
+These precedence rules apply to any setting that can be defined in multiple ways (by configuration settings, command-line options, playbook keywords, variables).
+
+.. contents::
+ :local:
+
+Precedence categories
+---------------------
+
+Ansible offers four sources for controlling its behavior. In order of precedence from lowest (most easily overridden) to highest (overrides all others), the categories are:
+
+ * Configuration settings
+ * Command-line options
+ * Playbook keywords
+ * Variables
+
+Each category overrides any information from all lower-precedence categories. For example, a playbook keyword will override any configuration setting.
+
+Within each precedence category, specific rules apply. However, generally speaking, 'last defined' wins and overrides any previous definitions.
+
+Configuration settings
+^^^^^^^^^^^^^^^^^^^^^^
+
+:ref:`Configuration settings<ansible_configuration_settings>` include both values from the ``ansible.cfg`` file and environment variables. Within this category, values set in configuration files have lower precedence. Ansible uses the first ``ansible.cfg`` file it finds, ignoring all others. Ansible searches for ``ansible.cfg`` in these locations in order:
+
+ * ``ANSIBLE_CONFIG`` (environment variable if set)
+ * ``ansible.cfg`` (in the current directory)
+ * ``~/.ansible.cfg`` (in the home directory)
+ * ``/etc/ansible/ansible.cfg``
+
+Environment variables have a higher precedence than entries in ``ansible.cfg``. If you have environment variables set on your control node, they override the settings in whichever ``ansible.cfg`` file Ansible loads. The value of any given environment variable follows normal shell precedence: the last value defined overwrites previous values.
+
+Command-line options
+^^^^^^^^^^^^^^^^^^^^
+
+Any command-line option will override any configuration setting.
+
+When you type something directly at the command line, you may feel that your hand-crafted values should override all others, but Ansible does not work that way. Command-line options have low precedence - they override configuration only. They do not override playbook keywords, variables from inventory or variables from playbooks.
+
+You can override all other settings from all other sources in all other precedence categories at the command line by :ref:`general_precedence_extra_vars`, but that is not a command-line option, it is a way of passing a :ref:`variable<general_precedence_variables>`.
+
+At the command line, if you pass multiple values for a parameter that accepts only a single value, the last defined value wins. For example, this :ref:`ad-hoc task<intro_adhoc>` will connect as ``carol``, not as ``mike``::
+
+ ansible -u mike -m ping myhost -u carol
+
+Some parameters allow multiple values. In this case, Ansible will append all values from the hosts listed in inventory files inventory1 and inventory2::
+
+ ansible -i /path/inventory1 -i /path/inventory2 -m ping all
+
+The help for each :ref:`command-line tool<command_line_tools>` lists available options for that tool.
+
+Playbook keywords
+^^^^^^^^^^^^^^^^^
+
+Any :ref:`playbook keyword<playbook_keywords>` will override any command-line option and any configuration setting.
+
+Within playbook keywords, precedence flows with the playbook itself; the more specific wins against the more general:
+
+- play (most general)
+- blocks/includes/imports/roles (optional and can contain tasks and each other)
+- tasks (most specific)
+
+A simple example::
+
+ - hosts: all
+ connection: ssh
+ tasks:
+ - name: This task uses ssh.
+ ping:
+
+ - name: This task uses paramiko.
+ connection: paramiko
+ ping:
+
+In this example, the ``connection`` keyword is set to ``ssh`` at the play level. The first task inherits that value, and connects using ``ssh``. The second task inherits that value, overrides it, and connects using ``paramiko``.
+The same logic applies to blocks and roles as well. All tasks, blocks, and roles within a play inherit play-level keywords; any task, block, or role can override any keyword by defining a different value for that keyword within the task, block, or role.
+
+Remember that these are KEYWORDS, not variables. Both playbooks and variable files are defined in YAML but they have different significance.
+Playbooks are the command or 'state description' structure for Ansible, variables are data we use to help make playbooks more dynamic.
+
+.. _general_precedence_variables:
+
+Variables
+^^^^^^^^^
+
+Any variable will override any playbook keyword, any command-line option, and any configuration setting.
+
+Variables that have equivalent playbook keywords, command-line options, and configuration settings are known as :ref:`connection_variables`. Originally designed for connection parameters, this category has expanded to include other core variables like the temporary directory and the python interpreter.
+
+Connection variables, like all variables, can be set in multiple ways and places. You can define variables for hosts and groups in :ref:`inventory<intro_inventory>`. You can define variables for tasks and plays in ``vars:`` blocks in :ref:`playbooks<about_playbooks>`. However, they are still variables - they are data, not keywords or configuration settings. Variables that override playbook keywords, command-line options, and configuration settings follow the same rules of :ref:`variable precedence <ansible_variable_precedence>` as any other variables.
+
+When set in a playbook, variables follow the same inheritance rules as playbook keywords. You can set a value for the play, then override it in a task, block, or role::
+
+ - hosts: cloud
+ gather_facts: false
+ become: yes
+ vars:
+ ansible_become_user: admin
+ tasks:
+ - name: This task uses admin as the become user.
+ dnf:
+ name: some-service
+ state: latest
+ - block:
+ - name: This task uses service-admin as the become user.
+ # a task to configure the new service
+ - name: This task also uses service-admin as the become user, defined in the block.
+ # second task to configure the service
+ vars:
+ ansible_become_user: service-admin
+ - name: This task (outside of the block) uses admin as the become user again.
+ service:
+ name: some-service
+ state: restarted
+
+Variable scope: how long is a value available?
+""""""""""""""""""""""""""""""""""""""""""""""
+
+Variable values set in a playbook exist only within the playbook object that defines them. These 'playbook object scope' variables are not available to subsequent objects, including other plays.
+
+Variable values associated directly with a host or group, including variables defined in inventory, by vars plugins, or using modules like :ref:`set_fact<set_fact_module>` and :ref:`include_vars<include_vars_module>`, are available to all plays. These 'host scope' variables are also available via the ``hostvars[]`` dictionary.
+
+.. _general_precedence_extra_vars:
+
+Using ``-e`` extra variables at the command line
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+To override all other settings in all other categories, you can use extra variables: ``--extra-vars`` or ``-e`` at the command line. Values passed with ``-e`` are variables, not command-line options, and they will override configuration settings, command-line options, and playbook keywords as well as variables set elsewhere. For example, this task will connect as ``brian`` not as ``carol``::
+
+ ansible -u carol -e 'ansible_user=brian' -a whoami all
+
+You must specify both the variable name and the value with ``--extra-vars``.
diff --git a/docs/docsite/rst/reference_appendices/glossary.rst b/docs/docsite/rst/reference_appendices/glossary.rst
new file mode 100644
index 00000000..6b36a7ea
--- /dev/null
+++ b/docs/docsite/rst/reference_appendices/glossary.rst
@@ -0,0 +1,501 @@
+Glossary
+========
+
+The following is a list (and re-explanation) of term definitions used elsewhere in the Ansible documentation.
+
+Consult the documentation home page for the full documentation and to see the terms in context, but this should be a good resource
+to check your knowledge of Ansible's components and understand how they fit together. It's something you might wish to read for review or
+when a term comes up on the mailing list.
+
+.. glossary::
+
+ Action
+ An action is a part of a task that specifies which of the modules to
+ run and which arguments to pass to that module. Each task can have
+ only one action, but it may also have other parameters.
+
+ Ad Hoc
+ Refers to running Ansible to perform some quick command, using
+ :command:`/usr/bin/ansible`, rather than the :term:`orchestration`
+ language, which is :command:`/usr/bin/ansible-playbook`. An example
+ of an ad hoc command might be rebooting 50 machines in your
+ infrastructure. Anything you can do ad hoc can be accomplished by
+ writing a :term:`playbook <playbooks>` and playbooks can also glue
+ lots of other operations together.
+
+ Async
+ Refers to a task that is configured to run in the background rather
+ than waiting for completion. If you have a long process that would
+ run longer than the SSH timeout, it would make sense to launch that
+ task in async mode. Async modes can poll for completion every so many
+ seconds or can be configured to "fire and forget", in which case
+ Ansible will not even check on the task again; it will just kick it
+ off and proceed to future steps. Async modes work with both
+ :command:`/usr/bin/ansible` and :command:`/usr/bin/ansible-playbook`.
+
+ Callback Plugin
+ Refers to some user-written code that can intercept results from
+ Ansible and do something with them. Some supplied examples in the
+ GitHub project perform custom logging, send email, or even play sound
+ effects.
+
+ Check Mode
+ Refers to running Ansible with the ``--check`` option, which does not
+ make any changes on the remote systems, but only outputs the changes
+ that might occur if the command ran without this flag. This is
+ analogous to so-called "dry run" modes in other systems, though the
+ user should be warned that this does not take into account unexpected
+ command failures or cascade effects (which is true of similar modes in
+ other systems). Use this to get an idea of what might happen, but do
+ not substitute it for a good staging environment.
+
+ Connection Plugin
+ By default, Ansible talks to remote machines through pluggable
+ libraries. Ansible uses native OpenSSH (:term:`SSH (Native)`) or
+ a Python implementation called :term:`paramiko`. OpenSSH is preferred
+ if you are using a recent version, and also enables some features like
+ Kerberos and jump hosts. This is covered in the :ref:`getting
+ started section <remote_connection_information>`. There are also
+ other connection types like ``accelerate`` mode, which must be
+ bootstrapped over one of the SSH-based connection types but is very
+ fast, and local mode, which acts on the local system. Users can also
+ write their own connection plugins.
+
+ Conditionals
+ A conditional is an expression that evaluates to true or false that
+ decides whether a given task is executed on a given machine or not.
+ Ansible's conditionals are powered by the 'when' statement, which are
+ discussed in the :ref:`working_with_playbooks`.
+
+ Declarative
+ An approach to achieving a task that uses a description of the
+ final state rather than a description of the sequence of steps
+ necessary to achieve that state. For a real world example, a
+ declarative specification of a task would be: "put me in California".
+ Depending on your current location, the sequence of steps to get you to
+ California may vary, and if you are already in California, nothing
+ at all needs to be done. Ansible's Resources are declarative; it
+ figures out the steps needed to achieve the final state. It also lets
+ you know whether or not any steps needed to be taken to get to the
+ final state.
+
+ Diff Mode
+ A ``--diff`` flag can be passed to Ansible to show what changed on
+ modules that support it. You can combine it with ``--check`` to get a
+ good 'dry run'. File diffs are normally in unified diff format.
+
+ Executor
+ A core software component of Ansible that is the power behind
+ :command:`/usr/bin/ansible` directly -- and corresponds to the
+ invocation of each task in a :term:`playbook <playbooks>`. The
+ Executor is something Ansible developers may talk about, but it's not
+ really user land vocabulary.
+
+ Facts
+ Facts are simply things that are discovered about remote nodes. While
+ they can be used in :term:`playbooks` and templates just like
+ variables, facts are things that are inferred, rather than set. Facts
+ are automatically discovered by Ansible when running plays by
+ executing the internal :ref:`setup module <setup_module>` on the remote nodes. You
+ never have to call the setup module explicitly, it just runs, but it
+ can be disabled to save time if it is not needed or you can tell
+ ansible to collect only a subset of the full facts via the
+ ``gather_subset:`` option. For the convenience of users who are
+ switching from other configuration management systems, the fact module
+ will also pull in facts from the :program:`ohai` and :program:`facter`
+ tools if they are installed. These are fact libraries from Chef and
+ Puppet, respectively. (These may also be disabled via
+ ``gather_subset:``)
+
+ Filter Plugin
+ A filter plugin is something that most users will never need to
+ understand. These allow for the creation of new :term:`Jinja2`
+ filters, which are more or less only of use to people who know what
+ Jinja2 filters are. If you need them, you can learn how to write them
+ in the :ref:`API docs section <developing_filter_plugins>`.
+
+ Forks
+ Ansible talks to remote nodes in parallel and the level of parallelism
+ can be set either by passing ``--forks`` or editing the default in
+ a configuration file. The default is a very conservative five (5)
+ forks, though if you have a lot of RAM, you can easily set this to
+ a value like 50 for increased parallelism.
+
+ Gather Facts (Boolean)
+ :term:`Facts` are mentioned above. Sometimes when running a multi-play
+ :term:`playbook <playbooks>`, it is desirable to have some plays that
+ don't bother with fact computation if they aren't going to need to
+ utilize any of these values. Setting ``gather_facts: False`` on
+ a playbook allows this implicit fact gathering to be skipped.
+
+ Globbing
+ Globbing is a way to select lots of hosts based on wildcards, rather
+ than the name of the host specifically, or the name of the group they
+ are in. For instance, it is possible to select ``ww*`` to match all
+ hosts starting with ``www``. This concept is pulled directly from
+ :program:`Func`, one of Michael DeHaan's (an Ansible Founder) earlier
+ projects. In addition to basic globbing, various set operations are
+ also possible, such as 'hosts in this group and not in another group',
+ and so on.
+
+ Group
+ A group consists of several hosts assigned to a pool that can be
+ conveniently targeted together, as well as given variables that they
+ share in common.
+
+ Group Vars
+ The :file:`group_vars/` files are files that live in a directory
+ alongside an inventory file, with an optional filename named after
+ each group. This is a convenient place to put variables that are
+ provided to a given group, especially complex data structures, so that
+ these variables do not have to be embedded in the :term:`inventory`
+ file or :term:`playbook <playbooks>`.
+
+ Handlers
+ Handlers are just like regular tasks in an Ansible
+ :term:`playbook <playbooks>` (see :term:`Tasks`) but are only run if
+ the Task contains a ``notify`` directive and also indicates that it
+ changed something. For example, if a config file is changed, then the
+ task referencing the config file templating operation may notify
+ a service restart handler. This means services can be bounced only if
+ they need to be restarted. Handlers can be used for things other than
+ service restarts, but service restarts are the most common usage.
+
+ Host
+ A host is simply a remote machine that Ansible manages. They can have
+ individual variables assigned to them, and can also be organized in
+ groups. All hosts have a name they can be reached at (which is either
+ an IP address or a domain name) and, optionally, a port number, if they
+ are not to be accessed on the default SSH port.
+
+ Host Specifier
+ Each :term:`Play <plays>` in Ansible maps a series of :term:`tasks` (which define the role,
+ purpose, or orders of a system) to a set of systems.
+
+ This ``hosts:`` directive in each play is often called the hosts specifier.
+
+ It may select one system, many systems, one or more groups, or even
+ some hosts that are in one group and explicitly not in another.
+
+ Host Vars
+ Just like :term:`Group Vars`, a directory alongside the inventory file named
+ :file:`host_vars/` can contain a file named after each hostname in the
+ inventory file, in :term:`YAML` format. This provides a convenient place to
+ assign variables to the host without having to embed them in the
+ :term:`inventory` file. The Host Vars file can also be used to define complex
+ data structures that can't be represented in the inventory file.
+
+ Idempotency
+ An operation is idempotent if the result of performing it once is
+ exactly the same as the result of performing it repeatedly without
+ any intervening actions.
+
+ Includes
+ The idea that :term:`playbook <playbooks>` files (which are nothing
+ more than lists of :term:`plays`) can include other lists of plays,
+ and task lists can externalize lists of :term:`tasks` in other files,
+ and similarly with :term:`handlers`. Includes can be parameterized,
+ which means that the loaded file can pass variables. For instance, an
+ included play for setting up a WordPress blog may take a parameter
+ called ``user`` and that play could be included more than once to
+ create a blog for both ``alice`` and ``bob``.
+
+ Inventory
+ A file (by default, Ansible uses a simple INI format) that describes
+ :term:`Hosts <Host>` and :term:`Groups <Group>` in Ansible. Inventory
+ can also be provided via an :term:`Inventory Script` (sometimes called
+ an "External Inventory Script").
+
+ Inventory Script
+ A very simple program (or a complicated one) that looks up
+ :term:`hosts <Host>`, :term:`group` membership for hosts, and variable
+ information from an external resource -- whether that be a SQL
+ database, a CMDB solution, or something like LDAP. This concept was
+ adapted from Puppet (where it is called an "External Nodes
+ Classifier") and works more or less exactly the same way.
+
+ Jinja2
+ Jinja2 is the preferred templating language of Ansible's template
+ module. It is a very simple Python template language that is
+ generally readable and easy to write.
+
+ JSON
+ Ansible uses JSON for return data from remote modules. This allows
+ modules to be written in any language, not just Python.
+
+ Lazy Evaluation
+ In general, Ansible evaluates any variables in
+ :term:`playbook <playbooks>` content at the last possible second,
+ which means that if you define a data structure that data structure
+ itself can define variable values within it, and everything "just
+ works" as you would expect. This also means variable strings can
+ include other variables inside of those strings.
+
+ Library
+ A collection of modules made available to :command:`/usr/bin/ansible`
+ or an Ansible :term:`playbook <playbooks>`.
+
+ Limit Groups
+ By passing ``--limit somegroup`` to :command:`ansible` or
+ :command:`ansible-playbook`, the commands can be limited to a subset
+ of :term:`hosts <Host>`. For instance, this can be used to run
+ a :term:`playbook <playbooks>` that normally targets an entire set of
+ servers to one particular server.
+
+ Local Action
+ A local_action directive in a :term:`playbook <playbooks>` targeting
+ remote machines means that the given step will actually occur on the
+ local machine, but that the variable ``{{ ansible_hostname }}`` can be
+ passed in to reference the remote hostname being referred to in that
+ step. This can be used to trigger, for example, an rsync operation.
+
+ Local Connection
+ By using ``connection: local`` in a :term:`playbook <playbooks>`, or
+ passing ``-c local`` to :command:`/usr/bin/ansible`, this indicates
+ that we are managing the local host and not a remote machine.
+
+ Lookup Plugin
+ A lookup plugin is a way to get data into Ansible from the outside world.
+ Lookup plugins are an extension of Jinja2 and can be accessed in templates, for example,
+ ``{{ lookup('file','/path/to/file') }}``.
+ These are how such things as ``with_items``, are implemented.
+ There are also lookup plugins like ``file`` which loads data from
+ a file and ones for querying environment variables, DNS text records,
+ or key value stores.
+
+ Loops
+ Generally, Ansible is not a programming language. It prefers to be
+ more declarative, though various constructs like ``loop`` allow
+ a particular task to be repeated for multiple items in a list.
+ Certain modules, like :ref:`yum <yum_module>` and :ref:`apt <apt_module>`, actually take
+ lists directly, and can install all packages given in those lists
+ within a single transaction, dramatically speeding up total time to
+ configuration, so they can be used without loops.
+
+ Modules
+ Modules are the units of work that Ansible ships out to remote
+ machines. Modules are kicked off by either
+ :command:`/usr/bin/ansible` or :command:`/usr/bin/ansible-playbook`
+ (where multiple tasks use lots of different modules in conjunction).
+ Modules can be implemented in any language, including Perl, Bash, or
+ Ruby -- but can leverage some useful communal library code if written
+ in Python. Modules just have to return :term:`JSON`. Once modules are
+ executed on remote machines, they are removed, so no long running
+ daemons are used. Ansible refers to the collection of available
+ modules as a :term:`library`.
+
+ Multi-Tier
+ The concept that IT systems are not managed one system at a time, but
+ by interactions between multiple systems and groups of systems in
+ well defined orders. For instance, a web server may need to be
+ updated before a database server and pieces on the web server may
+ need to be updated after *THAT* database server and various load
+ balancers and monitoring servers may need to be contacted. Ansible
+ models entire IT topologies and workflows rather than looking at
+ configuration from a "one system at a time" perspective.
+
+ Notify
+ The act of a :term:`task <tasks>` registering a change event and
+ informing a :term:`handler <handlers>` task that another
+ :term:`action` needs to be run at the end of the :term:`play <plays>`. If
+ a handler is notified by multiple tasks, it will still be run only
+ once. Handlers are run in the order they are listed, not in the order
+ that they are notified.
+
+ Orchestration
+ Many software automation systems use this word to mean different
+ things. Ansible uses it as a conductor would conduct an orchestra.
+ A datacenter or cloud architecture is full of many systems, playing
+ many parts -- web servers, database servers, maybe load balancers,
+ monitoring systems, continuous integration systems, and so on. In
+ performing any process, it is necessary to touch systems in particular
+ orders, often to simulate rolling updates or to deploy software
+ correctly. Some system may perform some steps, then others, then
+ previous systems already processed may need to perform more steps.
+ Along the way, emails may need to be sent or web services contacted.
+ Ansible orchestration is all about modeling that kind of process.
+
+ paramiko
+ By default, Ansible manages machines over SSH. The library that
+ Ansible uses by default to do this is a Python-powered library called
+ paramiko. The paramiko library is generally fast and easy to manage,
+ though users who want to use Kerberos or Jump Hosts may wish to switch
+ to a native SSH binary such as OpenSSH by specifying the connection
+ type in their :term:`playbooks`, or using the ``-c ssh`` flag.
+
+ Playbooks
+ Playbooks are the language by which Ansible orchestrates, configures,
+ administers, or deploys systems. They are called playbooks partially
+ because it's a sports analogy, and it's supposed to be fun using them.
+ They aren't workbooks :)
+
+ Plays
+ A :term:`playbook <playbooks>` is a list of plays. A play is
+ minimally a mapping between a set of :term:`hosts <Host>` selected by a host
+ specifier (usually chosen by :term:`groups <Group>` but sometimes by
+ hostname :term:`globs <Globbing>`) and the :term:`tasks` which run on those
+ hosts to define the role that those systems will perform. There can be
+ one or many plays in a playbook.
+
+ Pull Mode
+ By default, Ansible runs in :term:`push mode`, which allows it very
+ fine-grained control over when it talks to each system. Pull mode is
+ provided for when you would rather have nodes check in every N minutes
+ on a particular schedule. It uses a program called
+ :command:`ansible-pull` and can also be set up (or reconfigured) using
+ a push-mode :term:`playbook <playbooks>`. Most Ansible users use push
+ mode, but pull mode is included for variety and the sake of having
+ choices.
+
+ :command:`ansible-pull` works by checking configuration orders out of
+ git on a crontab and then managing the machine locally, using the
+ :term:`local connection` plugin.
+
+ Push Mode
+ Push mode is the default mode of Ansible. In fact, it's not really
+ a mode at all -- it's just how Ansible works when you aren't thinking
+ about it. Push mode allows Ansible to be fine-grained and conduct
+ nodes through complex orchestration processes without waiting for them
+ to check in.
+
+ Register Variable
+ The result of running any :term:`task <tasks>` in Ansible can be
+ stored in a variable for use in a template or a conditional statement.
+ The keyword used to define the variable is called ``register``, taking
+ its name from the idea of registers in assembly programming (though
+ Ansible will never feel like assembly programming). There are an
+ infinite number of variable names you can use for registration.
+
+ Resource Model
+ Ansible modules work in terms of resources. For instance, the
+ :ref:`file module <file_module>` will select a particular file and ensure
+ that the attributes of that resource match a particular model. As an
+ example, we might wish to change the owner of :file:`/etc/motd` to
+ ``root`` if it is not already set to ``root``, or set its mode to
+ ``0644`` if it is not already set to ``0644``. The resource models
+ are :term:`idempotent <idempotency>` meaning change commands are not
+ run unless needed, and Ansible will bring the system back to a desired
+ state regardless of the actual state -- rather than you having to tell
+ it how to get to the state.
+
+ Roles
+ Roles are units of organization in Ansible. Assigning a role to
+ a group of :term:`hosts <Host>` (or a set of :term:`groups <group>`,
+ or :term:`host patterns <Globbing>`, and so on) implies that they should
+ implement a specific behavior. A role may include applying certain
+ variable values, certain :term:`tasks`, and certain :term:`handlers`
+ -- or just one or more of these things. Because of the file structure
+ associated with a role, roles become redistributable units that allow
+ you to share behavior among :term:`playbooks` -- or even with other users.
+
+ Rolling Update
+ The act of addressing a number of nodes in a group N at a time to
+ avoid updating them all at once and bringing the system offline. For
+ instance, in a web topology of 500 nodes handling very large volume,
+ it may be reasonable to update 10 or 20 machines at a time, moving on
+ to the next 10 or 20 when done. The ``serial:`` keyword in an Ansible
+ :term:`playbooks` control the size of the rolling update pool. The
+ default is to address the batch size all at once, so this is something
+ that you must opt-in to. OS configuration (such as making sure config
+ files are correct) does not typically have to use the rolling update
+ model, but can do so if desired.
+
+ Serial
+ .. seealso::
+
+ :term:`Rolling Update`
+
+ Sudo
+ Ansible does not require root logins, and since it's daemonless,
+ definitely does not require root level daemons (which can be
+ a security concern in sensitive environments). Ansible can log in and
+ perform many operations wrapped in a sudo command, and can work with
+ both password-less and password-based sudo. Some operations that
+ don't normally work with sudo (like scp file transfer) can be achieved
+ with Ansible's :ref:`copy <copy_module>`, :ref:`template <template_module>`, and
+ :ref:`fetch <fetch_module>` modules while running in sudo mode.
+
+ SSH (Native)
+ Native OpenSSH as an Ansible transport is specified with ``-c ssh``
+ (or a config file, or a directive in the :term:`playbook <playbooks>`)
+ and can be useful if wanting to login via Kerberized SSH or using SSH
+ jump hosts, and so on. In 1.2.1, ``ssh`` will be used by default if the
+ OpenSSH binary on the control machine is sufficiently new.
+ Previously, Ansible selected ``paramiko`` as a default. Using
+ a client that supports ``ControlMaster`` and ``ControlPersist`` is
+ recommended for maximum performance -- if you don't have that and
+ don't need Kerberos, jump hosts, or other features, ``paramiko`` is
+ a good choice. Ansible will warn you if it doesn't detect
+ ControlMaster/ControlPersist capability.
+
+ Tags
+ Ansible allows tagging resources in a :term:`playbook <playbooks>`
+ with arbitrary keywords, and then running only the parts of the
+ playbook that correspond to those keywords. For instance, it is
+ possible to have an entire OS configuration, and have certain steps
+ labeled ``ntp``, and then run just the ``ntp`` steps to reconfigure
+ the time server information on a remote host.
+
+ Task
+ :term:`Playbooks` exist to run tasks. Tasks combine an :term:`action`
+ (a module and its arguments) with a name and optionally some other
+ keywords (like :term:`looping directives <loops>`). :term:`Handlers`
+ are also tasks, but they are a special kind of task that do not run
+ unless they are notified by name when a task reports an underlying
+ change on a remote system.
+
+ Tasks
+ A list of :term:`Task`.
+
+ Templates
+ Ansible can easily transfer files to remote systems but often it is
+ desirable to substitute variables in other files. Variables may come
+ from the :term:`inventory` file, :term:`Host Vars`, :term:`Group
+ Vars`, or :term:`Facts`. Templates use the :term:`Jinja2` template
+ engine and can also include logical constructs like loops and if
+ statements.
+
+ Transport
+ Ansible uses :term:``Connection Plugins`` to define types of available
+ transports. These are simply how Ansible will reach out to managed
+ systems. Transports included are :term:`paramiko`,
+ :term:`ssh <SSH (Native)>` (using OpenSSH), and
+ :term:`local <Local Connection>`.
+
+ When
+ An optional conditional statement attached to a :term:`task <tasks>` that is used to
+ determine if the task should run or not. If the expression following
+ the ``when:`` keyword evaluates to false, the task will be ignored.
+
+ Vars (Variables)
+ As opposed to :term:`Facts`, variables are names of values (they can
+ be simple scalar values -- integers, booleans, strings) or complex
+ ones (dictionaries/hashes, lists) that can be used in templates and
+ :term:`playbooks`. They are declared things, not things that are
+ inferred from the remote system's current state or nature (which is
+ what Facts are).
+
+ YAML
+ Ansible does not want to force people to write programming language
+ code to automate infrastructure, so Ansible uses YAML to define
+ :term:`playbook <playbooks>` configuration languages and also variable
+ files. YAML is nice because it has a minimum of syntax and is very
+ clean and easy for people to skim. It is a good data format for
+ configuration files and humans, but also machine readable. Ansible's
+ usage of YAML stemmed from Michael DeHaan's first use of it inside of
+ Cobbler around 2006. YAML is fairly popular in the dynamic language
+ community and the format has libraries available for serialization in
+ many languages (Python, Perl, Ruby, and so on).
+
+.. seealso::
+
+ :ref:`ansible_faq`
+ Frequently asked questions
+ :ref:`working_with_playbooks`
+ An introduction to playbooks
+ :ref:`playbooks_best_practices`
+ Tips and tricks for playbooks
+ `User Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/reference_appendices/interpreter_discovery.rst b/docs/docsite/rst/reference_appendices/interpreter_discovery.rst
new file mode 100644
index 00000000..9fa7d585
--- /dev/null
+++ b/docs/docsite/rst/reference_appendices/interpreter_discovery.rst
@@ -0,0 +1,51 @@
+.. _interpreter_discovery:
+
+Interpreter Discovery
+=====================
+
+Most Ansible modules that execute under a POSIX environment require a Python
+interpreter on the target host. Unless configured otherwise, Ansible will
+attempt to discover a suitable Python interpreter on each target host
+the first time a Python module is executed for that host.
+
+To control the discovery behavior:
+
+* for individual hosts and groups, use the ``ansible_python_interpreter`` inventory variable
+* globally, use the ``interpreter_python`` key in the ``[defaults]`` section of ``ansible.cfg``
+
+Use one of the following values:
+
+auto_legacy : (default in 2.8)
+ Detects the target OS platform, distribution, and version, then consults a
+ table listing the correct Python interpreter and path for each
+ platform/distribution/version. If an entry is found, and ``/usr/bin/python`` is absent, uses the discovered interpreter (and path). If an entry
+ is found, and ``/usr/bin/python`` is present, uses ``/usr/bin/python``
+ and issues a warning.
+ This exception provides temporary compatibility with previous versions of
+ Ansible that always defaulted to ``/usr/bin/python``, so if you have
+ installed Python and other dependencies at ``/usr/bin/python`` on some hosts,
+ Ansible will find and use them with this setting.
+ If no entry is found, or the listed Python is not present on the
+ target host, searches a list of common Python interpreter
+ paths and uses the first one found; also issues a warning that future
+ installation of another Python interpreter could alter the one chosen.
+
+auto : (future default in 2.12)
+ Detects the target OS platform, distribution, and version, then consults a
+ table listing the correct Python interpreter and path for each
+ platform/distribution/version. If an entry is found, uses the discovered
+ interpreter.
+ If no entry is found, or the listed Python is not present on the
+ target host, searches a list of common Python interpreter
+ paths and uses the first one found; also issues a warning that future
+ installation of another Python interpreter could alter the one chosen.
+
+auto_legacy_silent
+ Same as ``auto_legacy``, but does not issue warnings.
+
+auto_silent
+ Same as ``auto``, but does not issue warnings.
+
+You can still set ``ansible_python_interpreter`` to a specific path at any
+variable level (for example, in host_vars, in vars files, in playbooks, and so on).
+Setting a specific path completely disables automatic interpreter discovery; Ansible always uses the path specified.
diff --git a/docs/docsite/rst/reference_appendices/logging.rst b/docs/docsite/rst/reference_appendices/logging.rst
new file mode 100644
index 00000000..6fbd0440
--- /dev/null
+++ b/docs/docsite/rst/reference_appendices/logging.rst
@@ -0,0 +1,14 @@
+**********************
+Logging Ansible output
+**********************
+
+By default Ansible sends output about plays, tasks, and module arguments to your screen (STDOUT) on the control node. If you want to capture Ansible output in a log, you have three options:
+
+* To save Ansible output in a single log on the control node, set the ``log_path`` :ref:`configuration file setting <intro_configuration>`. You may also want to set ``display_args_to_stdout``, which helps to differentiate similar tasks by including variable values in the Ansible output.
+* To save Ansible output in separate logs, one on each managed node, set the ``no_target_syslog`` and ``syslog_facility`` :ref:`configuration file settings <intro_configuration>`.
+* To save Ansible output to a secure database, use :ref:`Ansible Tower <ansible_tower>`. Tower allows you to review history based on hosts, projects, and particular inventories over time, using graphs and/or a REST API.
+
+Protecting sensitive data with ``no_log``
+=========================================
+
+If you save Ansible output to a log, you expose any secret data in your Ansible output, such as passwords and user names. To keep sensitive values out of your logs, mark tasks that expose them with the ``no_log: True`` attribute. However, the ``no_log`` attribute does not affect debugging output, so be careful not to debug playbooks in a production environment. See :ref:`keep_secret_data` for an example.
diff --git a/docs/docsite/rst/reference_appendices/module_utils.rst b/docs/docsite/rst/reference_appendices/module_utils.rst
new file mode 100644
index 00000000..7fa4620c
--- /dev/null
+++ b/docs/docsite/rst/reference_appendices/module_utils.rst
@@ -0,0 +1,27 @@
+.. _ansible.module_utils:
+.. _module_utils:
+
+***************************************************************
+Ansible Reference: Module Utilities
+***************************************************************
+
+This page documents utilities intended to be helpful when writing
+Ansible modules in Python.
+
+
+AnsibleModule
+--------------
+
+To use this functionality, include ``from ansible.module_utils.basic import AnsibleModule`` in your module.
+
+.. autoclass:: ansible.module_utils.basic.AnsibleModule
+ :members:
+ :noindex:
+
+Basic
+------
+
+To use this functionality, include ``import ansible.module_utils.basic`` in your module.
+
+.. automodule:: ansible.module_utils.basic
+ :members:
diff --git a/docs/docsite/rst/reference_appendices/python_3_support.rst b/docs/docsite/rst/reference_appendices/python_3_support.rst
new file mode 100644
index 00000000..da06023c
--- /dev/null
+++ b/docs/docsite/rst/reference_appendices/python_3_support.rst
@@ -0,0 +1,95 @@
+================
+Python 3 Support
+================
+
+Ansible 2.5 and above work with Python 3. Previous to 2.5, using Python 3 was
+considered a tech preview. This topic discusses how to set up your controller and managed machines
+to use Python 3.
+
+.. note:: On the controller we support Python 3.5 or greater and Python 2.7 or greater. Module-side, we support Python 3.5 or greater and Python 2.6 or greater.
+
+On the controller side
+----------------------
+
+The easiest way to run :command:`/usr/bin/ansible` under Python 3 is to install it with the Python3
+version of pip. This will make the default :command:`/usr/bin/ansible` run with Python3:
+
+.. code-block:: shell
+
+ $ pip3 install ansible
+ $ ansible --version | grep "python version"
+ python version = 3.6.2 (default, Sep 22 2017, 08:28:09) [GCC 7.2.1 20170915 (Red Hat 7.2.1-2)]
+
+If you are running Ansible :ref:`from_source` and want to use Python 3 with your source checkout, run your
+command via ``python3``. For example:
+
+.. code-block:: shell
+
+ $ source ./hacking/env-setup
+ $ python3 $(which ansible) localhost -m ping
+ $ python3 $(which ansible-playbook) sample-playbook.yml
+
+.. note:: Individual Linux distribution packages may be packaged for Python2 or Python3. When running from
+ distro packages you'll only be able to use Ansible with the Python version for which it was
+ installed. Sometimes distros will provide a means of installing for several Python versions
+ (via a separate package or via some commands that are run after install). You'll need to check
+ with your distro to see if that applies in your case.
+
+
+Using Python 3 on the managed machines with commands and playbooks
+------------------------------------------------------------------
+
+* Ansible will automatically detect and use Python 3 on many platforms that ship with it. To explicitly configure a
+ Python 3 interpreter, set the ``ansible_python_interpreter`` inventory variable at a group or host level to the
+ location of a Python 3 interpreter, such as :command:`/usr/bin/python3`. The default interpreter path may also be
+ set in ``ansible.cfg``.
+
+.. seealso:: :ref:`interpreter_discovery` for more information.
+
+.. code-block:: ini
+
+ # Example inventory that makes an alias for localhost that uses Python3
+ localhost-py3 ansible_host=localhost ansible_connection=local ansible_python_interpreter=/usr/bin/python3
+
+ # Example of setting a group of hosts to use Python3
+ [py3-hosts]
+ ubuntu16
+ fedora27
+
+ [py3-hosts:vars]
+ ansible_python_interpreter=/usr/bin/python3
+
+.. seealso:: :ref:`intro_inventory` for more information.
+
+* Run your command or playbook:
+
+.. code-block:: shell
+
+ $ ansible localhost-py3 -m ping
+ $ ansible-playbook sample-playbook.yml
+
+
+Note that you can also use the `-e` command line option to manually
+set the python interpreter when you run a command. This can be useful if you want to test whether
+a specific module or playbook has any bugs under Python 3. For example:
+
+.. code-block:: shell
+
+ $ ansible localhost -m ping -e 'ansible_python_interpreter=/usr/bin/python3'
+ $ ansible-playbook sample-playbook.yml -e 'ansible_python_interpreter=/usr/bin/python3'
+
+What to do if an incompatibility is found
+-----------------------------------------
+
+We have spent several releases squashing bugs and adding new tests so that Ansible's core feature
+set runs under both Python 2 and Python 3. However, bugs may still exist in edge cases and many of
+the modules shipped with Ansible are maintained by the community and not all of those may be ported
+yet.
+
+If you find a bug running under Python 3 you can submit a bug report on `Ansible's GitHub project
+<https://github.com/ansible/ansible/issues/>`_. Be sure to mention Python3 in the bug report so
+that the right people look at it.
+
+If you would like to fix the code and submit a pull request on github, you can
+refer to :ref:`developing_python_3` for information on how we fix
+common Python3 compatibility issues in the Ansible codebase.
diff --git a/docs/docsite/rst/reference_appendices/release_and_maintenance.rst b/docs/docsite/rst/reference_appendices/release_and_maintenance.rst
new file mode 100644
index 00000000..eef77130
--- /dev/null
+++ b/docs/docsite/rst/reference_appendices/release_and_maintenance.rst
@@ -0,0 +1,33 @@
+.. _release_and_maintenance:
+
+Release and maintenance
+=======================
+
+.. _release_cycle:
+.. _release_schedule:
+.. _support_life:
+.. _methods:
+.. _development_and_stable_version_maintenance_workflow:
+.. _release_changelogs:
+.. _release_freezing:
+
+Please go to `the devel release and maintenance page <https://docs.ansible.com/ansible/devel/reference_appendices/release_and_maintenance.html>`_ for up to date information.
+
+.. note::
+
+ This link takes you to a different version of the Ansible documentation. Use the version selection on the left or your browser back button to return to this version of the documentation.
+
+.. seealso::
+
+ :ref:`community_committer_guidelines`
+ Guidelines for Ansible core contributors and maintainers
+ :ref:`testing_strategies`
+ Testing strategies
+ :ref:`ansible_community_guide`
+ Community information and contributing
+ `Ansible release tarballs <https://releases.ansible.com/ansible/>`_
+ Ansible release tarballs
+ `Development Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Mailing list for development topics
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/reference_appendices/special_variables.rst b/docs/docsite/rst/reference_appendices/special_variables.rst
new file mode 100644
index 00000000..e4ecc177
--- /dev/null
+++ b/docs/docsite/rst/reference_appendices/special_variables.rst
@@ -0,0 +1,167 @@
+.. _special_variables:
+
+Special Variables
+=================
+
+Magic variables
+---------------
+These variables cannot be set directly by the user; Ansible will always override them to reflect internal state.
+
+ansible_check_mode
+ Boolean that indicates if we are in check mode or not
+
+ansible_config_file
+ The full path of used Ansible configuration file
+
+ansible_dependent_role_names
+ The names of the roles currently imported into the current play as dependencies of other plays
+
+ansible_diff_mode
+ Boolean that indicates if we are in diff mode or not
+
+ansible_forks
+ Integer reflecting the number of maximum forks available to this run
+
+ansible_inventory_sources
+ List of sources used as inventory
+
+ansible_limit
+ Contents of the ``--limit`` CLI option for the current execution of Ansible
+
+ansible_loop
+ A dictionary/map containing extended loop information when enabled via ``loop_control.extended``
+
+ansible_loop_var
+ The name of the value provided to ``loop_control.loop_var``. Added in ``2.8``
+
+ansible_index_var
+ The name of the value provided to ``loop_control.index_var``. Added in ``2.9``
+
+ansible_parent_role_names
+ When the current role is being executed by means of an :ref:`include_role <include_role_module>` or :ref:`import_role <import_role_module>` action, this variable contains a list of all parent roles, with the most recent role (in other words, the role that included/imported this role) being the first item in the list.
+ When multiple inclusions occur, this list lists the *last* role (in other words, the role that included this role) as the *first* item in the list. It is also possible that a specific role exists more than once in this list.
+
+ For example: When role **A** includes role **B**, inside role B, ``ansible_parent_role_names`` will equal to ``['A']``. If role **B** then includes role **C**, the list becomes ``['B', 'A']``.
+
+ansible_parent_role_paths
+ When the current role is being executed by means of an :ref:`include_role <include_role_module>` or :ref:`import_role <import_role_module>` action, this variable contains a list of all parent roles, with the most recent role (in other words, the role that included/imported this role) being the first item in the list.
+ Please refer to ``ansible_parent_role_names`` for the order of items in this list.
+
+ansible_play_batch
+ List of active hosts in the current play run limited by the serial, aka 'batch'. Failed/Unreachable hosts are not considered 'active'.
+
+ansible_play_hosts
+ List of hosts in the current play run, not limited by the serial. Failed/Unreachable hosts are included in this list.
+
+ansible_play_hosts_all
+ List of all the hosts that were targeted by the play
+
+ansible_play_role_names
+ The names of the roles currently imported into the current play. This list does **not** contain the role names that are
+ implicitly included via dependencies.
+
+ansible_playbook_python
+ The path to the python interpreter being used by Ansible on the controller
+
+ansible_role_names
+ The names of the roles currently imported into the current play, or roles referenced as dependencies of the roles
+ imported into the current play.
+
+ansible_role_name
+ The fully qualified collection role name, in the format of ``namespace.collection.role_name``
+
+ansible_collection_name
+ The name of the collection the task that is executing is a part of. In the format of ``namespace.collection``
+
+ansible_run_tags
+ Contents of the ``--tags`` CLI option, which specifies which tags will be included for the current run.
+
+ansible_search_path
+ Current search path for action plugins and lookups, in other words, where we search for relative paths when you do ``template: src=myfile``
+
+ansible_skip_tags
+ Contents of the ``--skip-tags`` CLI option, which specifies which tags will be skipped for the current run.
+
+ansible_verbosity
+ Current verbosity setting for Ansible
+
+ansible_version
+ Dictionary/map that contains information about the current running version of ansible, it has the following keys: full, major, minor, revision and string.
+
+group_names
+ List of groups the current host is part of
+
+groups
+ A dictionary/map with all the groups in inventory and each group has the list of hosts that belong to it
+
+hostvars
+ A dictionary/map with all the hosts in inventory and variables assigned to them
+
+inventory_hostname
+ The inventory name for the 'current' host being iterated over in the play
+
+inventory_hostname_short
+ The short version of `inventory_hostname`
+
+inventory_dir
+ The directory of the inventory source in which the `inventory_hostname` was first defined
+
+inventory_file
+ The file name of the inventory source in which the `inventory_hostname` was first defined
+
+omit
+ Special variable that allows you to 'omit' an option in a task, for example ``- user: name=bob home={{ bobs_home|default(omit) }}``
+
+play_hosts
+ Deprecated, the same as ansible_play_batch
+
+ansible_play_name
+ The name of the currently executed play. Added in ``2.8``.
+
+playbook_dir
+ The path to the directory of the playbook that was passed to the ``ansible-playbook`` command line.
+
+role_name
+ The name of the role currently being executed.
+
+role_names
+ Deprecated, the same as ansible_play_role_names
+
+role_path
+ The path to the dir of the currently running role
+
+Facts
+-----
+These are variables that contain information pertinent to the current host (`inventory_hostname`). They are only available if gathered first. See :ref:`vars_and_facts` for more information.
+
+ansible_facts
+ Contains any facts gathered or cached for the `inventory_hostname`
+ Facts are normally gathered by the :ref:`setup <setup_module>` module automatically in a play, but any module can return facts.
+
+ansible_local
+ Contains any 'local facts' gathered or cached for the `inventory_hostname`.
+ The keys available depend on the custom facts created.
+ See the :ref:`setup <setup_module>` module and :ref:`local_facts` for more details.
+
+.. _connection_variables:
+
+Connection variables
+---------------------
+Connection variables are normally used to set the specifics on how to execute actions on a target. Most of them correspond to connection plugins, but not all are specific to them; other plugins like shell, terminal and become are normally involved.
+Only the common ones are described as each connection/become/shell/etc plugin can define its own overrides and specific variables.
+See :ref:`general_precedence_rules` for how connection variables interact with :ref:`configuration settings<ansible_configuration_settings>`, :ref:`command-line options<command_line_tools>`, and :ref:`playbook keywords<playbook_keywords>`.
+
+ansible_become_user
+ The user Ansible 'becomes' after using privilege escalation. This must be available to the 'login user'.
+
+ansible_connection
+ The connection plugin actually used for the task on the target host.
+
+ansible_host
+ The ip/name of the target host to use instead of `inventory_hostname`.
+
+ansible_python_interpreter
+ The path to the Python executable Ansible should use on the target host.
+
+ansible_user
+ The user Ansible 'logs in' as.
diff --git a/docs/docsite/rst/reference_appendices/test_strategies.rst b/docs/docsite/rst/reference_appendices/test_strategies.rst
new file mode 100644
index 00000000..01da667a
--- /dev/null
+++ b/docs/docsite/rst/reference_appendices/test_strategies.rst
@@ -0,0 +1,275 @@
+.. _testing_strategies:
+
+Testing Strategies
+==================
+
+.. _testing_intro:
+
+Integrating Testing With Ansible Playbooks
+``````````````````````````````````````````
+
+Many times, people ask, "how can I best integrate testing with Ansible playbooks?" There are many options. Ansible is actually designed
+to be a "fail-fast" and ordered system, therefore it makes it easy to embed testing directly in Ansible playbooks. In this chapter,
+we'll go into some patterns for integrating tests of infrastructure and discuss the right level of testing that may be appropriate.
+
+.. note:: This is a chapter about testing the application you are deploying, not the chapter on how to test Ansible modules during development. For that content, please hop over to the Development section.
+
+By incorporating a degree of testing into your deployment workflow, there will be fewer surprises when code hits production and, in many cases,
+tests can be leveraged in production to prevent failed updates from migrating across an entire installation. Since it's push-based, it's
+also very easy to run the steps on the localhost or testing servers. Ansible lets you insert as many checks and balances into your upgrade workflow as you would like to have.
+
+The Right Level of Testing
+``````````````````````````
+
+Ansible resources are models of desired-state. As such, it should not be necessary to test that services are started, packages are
+installed, or other such things. Ansible is the system that will ensure these things are declaratively true. Instead, assert these
+things in your playbooks.
+
+.. code-block:: yaml
+
+ tasks:
+ - service:
+ name: foo
+ state: started
+ enabled: yes
+
+If you think the service may not be started, the best thing to do is request it to be started. If the service fails to start, Ansible
+will yell appropriately. (This should not be confused with whether the service is doing something functional, which we'll show more about how to
+do later).
+
+.. _check_mode_drift:
+
+Check Mode As A Drift Test
+``````````````````````````
+
+In the above setup, `--check` mode in Ansible can be used as a layer of testing as well. If running a deployment playbook against an
+existing system, using the `--check` flag to the `ansible` command will report if Ansible thinks it would have had to have made any changes to
+bring the system into a desired state.
+
+This can let you know up front if there is any need to deploy onto the given system. Ordinarily scripts and commands don't run in check mode, so if you
+want certain steps to execute in normal mode even when the `--check` flag is used, such as calls to the script module, disable check mode for those tasks::
+
+
+ roles:
+ - webserver
+
+ tasks:
+ - script: verify.sh
+ check_mode: no
+
+Modules That Are Useful for Testing
+```````````````````````````````````
+
+Certain playbook modules are particularly good for testing. Below is an example that ensures a port is open::
+
+ tasks:
+
+ - wait_for:
+ host: "{{ inventory_hostname }}"
+ port: 22
+ delegate_to: localhost
+
+Here's an example of using the URI module to make sure a web service returns::
+
+ tasks:
+
+ - action: uri url=http://www.example.com return_content=yes
+ register: webpage
+
+ - fail:
+ msg: 'service is not happy'
+ when: "'AWESOME' not in webpage.content"
+
+It's easy to push an arbitrary script (in any language) on a remote host and the script will automatically fail if it has a non-zero return code::
+
+ tasks:
+
+ - script: test_script1
+ - script: test_script2 --parameter value --parameter2 value
+
+If using roles (you should be, roles are great!), scripts pushed by the script module can live in the 'files/' directory of a role.
+
+And the assert module makes it very easy to validate various kinds of truth::
+
+ tasks:
+
+ - shell: /usr/bin/some-command --parameter value
+ register: cmd_result
+
+ - assert:
+ that:
+ - "'not ready' not in cmd_result.stderr"
+ - "'gizmo enabled' in cmd_result.stdout"
+
+Should you feel the need to test for existence of files that are not declaratively set by your Ansible configuration, the 'stat' module is a great choice::
+
+ tasks:
+
+ - stat:
+ path: /path/to/something
+ register: p
+
+ - assert:
+ that:
+ - p.stat.exists and p.stat.isdir
+
+
+As mentioned above, there's no need to check things like the return codes of commands. Ansible is checking them automatically.
+Rather than checking for a user to exist, consider using the user module to make it exist.
+
+Ansible is a fail-fast system, so when there is an error creating that user, it will stop the playbook run. You do not have
+to check up behind it.
+
+Testing Lifecycle
+`````````````````
+
+If writing some degree of basic validation of your application into your playbooks, they will run every time you deploy.
+
+As such, deploying into a local development VM and a staging environment will both validate that things are according to plan
+ahead of your production deploy.
+
+Your workflow may be something like this::
+
+ - Use the same playbook all the time with embedded tests in development
+ - Use the playbook to deploy to a staging environment (with the same playbooks) that simulates production
+ - Run an integration test battery written by your QA team against staging
+ - Deploy to production, with the same integrated tests.
+
+Something like an integration test battery should be written by your QA team if you are a production webservice. This would include
+things like Selenium tests or automated API tests and would usually not be something embedded into your Ansible playbooks.
+
+However, it does make sense to include some basic health checks into your playbooks, and in some cases it may be possible to run
+a subset of the QA battery against remote nodes. This is what the next section covers.
+
+Integrating Testing With Rolling Updates
+````````````````````````````````````````
+
+If you have read into :ref:`playbooks_delegation` it may quickly become apparent that the rolling update pattern can be extended, and you
+can use the success or failure of the playbook run to decide whether to add a machine into a load balancer or not.
+
+This is the great culmination of embedded tests::
+
+ ---
+
+ - hosts: webservers
+ serial: 5
+
+ pre_tasks:
+
+ - name: take out of load balancer pool
+ command: /usr/bin/take_out_of_pool {{ inventory_hostname }}
+ delegate_to: 127.0.0.1
+
+ roles:
+
+ - common
+ - webserver
+ - apply_testing_checks
+
+ post_tasks:
+
+ - name: add back to load balancer pool
+ command: /usr/bin/add_back_to_pool {{ inventory_hostname }}
+ delegate_to: 127.0.0.1
+
+Of course in the above, the "take out of the pool" and "add back" steps would be replaced with a call to a Ansible load balancer
+module or appropriate shell command. You might also have steps that use a monitoring module to start and end an outage window
+for the machine.
+
+However, what you can see from the above is that tests are used as a gate -- if the "apply_testing_checks" step is not performed,
+the machine will not go back into the pool.
+
+Read the delegation chapter about "max_fail_percentage" and you can also control how many failing tests will stop a rolling update
+from proceeding.
+
+This above approach can also be modified to run a step from a testing machine remotely against a machine::
+
+ ---
+
+ - hosts: webservers
+ serial: 5
+
+ pre_tasks:
+
+ - name: take out of load balancer pool
+ command: /usr/bin/take_out_of_pool {{ inventory_hostname }}
+ delegate_to: 127.0.0.1
+
+ roles:
+
+ - common
+ - webserver
+
+ tasks:
+ - script: /srv/qa_team/app_testing_script.sh --server {{ inventory_hostname }}
+ delegate_to: testing_server
+
+ post_tasks:
+
+ - name: add back to load balancer pool
+ command: /usr/bin/add_back_to_pool {{ inventory_hostname }}
+ delegate_to: 127.0.0.1
+
+In the above example, a script is run from the testing server against a remote node prior to bringing it back into
+the pool.
+
+In the event of a problem, fix the few servers that fail using Ansible's automatically generated
+retry file to repeat the deploy on just those servers.
+
+Achieving Continuous Deployment
+```````````````````````````````
+
+If desired, the above techniques may be extended to enable continuous deployment practices.
+
+The workflow may look like this::
+
+ - Write and use automation to deploy local development VMs
+ - Have a CI system like Jenkins deploy to a staging environment on every code change
+ - The deploy job calls testing scripts to pass/fail a build on every deploy
+ - If the deploy job succeeds, it runs the same deploy playbook against production inventory
+
+Some Ansible users use the above approach to deploy a half-dozen or dozen times an hour without taking all of their infrastructure
+offline. A culture of automated QA is vital if you wish to get to this level.
+
+If you are still doing a large amount of manual QA, you should still make the decision on whether to deploy manually as well, but
+it can still help to work in the rolling update patterns of the previous section and incorporate some basic health checks using
+modules like 'script', 'stat', 'uri', and 'assert'.
+
+Conclusion
+``````````
+
+Ansible believes you should not need another framework to validate basic things of your infrastructure is true. This is the case
+because Ansible is an order-based system that will fail immediately on unhandled errors for a host, and prevent further configuration
+of that host. This forces errors to the top and shows them in a summary at the end of the Ansible run.
+
+However, as Ansible is designed as a multi-tier orchestration system, it makes it very easy to incorporate tests into the end of
+a playbook run, either using loose tasks or roles. When used with rolling updates, testing steps can decide whether to put a machine
+back into a load balanced pool or not.
+
+Finally, because Ansible errors propagate all the way up to the return code of the Ansible program itself, and Ansible by default
+runs in an easy push-based mode, Ansible is a great step to put into a build environment if you wish to use it to roll out systems
+as part of a Continuous Integration/Continuous Delivery pipeline, as is covered in sections above.
+
+The focus should not be on infrastructure testing, but on application testing, so we strongly encourage getting together with your
+QA team and ask what sort of tests would make sense to run every time you deploy development VMs, and which sort of tests they would like
+to run against the staging environment on every deploy. Obviously at the development stage, unit tests are great too. But don't unit
+test your playbook. Ansible describes states of resources declaratively, so you don't have to. If there are cases where you want
+to be sure of something though, that's great, and things like stat/assert are great go-to modules for that purpose.
+
+In all, testing is a very organizational and site-specific thing. Everybody should be doing it, but what makes the most sense for your
+environment will vary with what you are deploying and who is using it -- but everyone benefits from a more robust and reliable deployment
+system.
+
+.. seealso::
+
+ :ref:`list_of_collections`
+ Browse existing collections, modules, and plugins
+ :ref:`working_with_playbooks`
+ An introduction to playbooks
+ :ref:`playbooks_delegation`
+ Delegation, useful for working with load balancers, clouds, and locally executed steps.
+ `User Mailing List <https://groups.google.com/group/ansible-project>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
+
diff --git a/docs/docsite/rst/reference_appendices/tower.rst b/docs/docsite/rst/reference_appendices/tower.rst
new file mode 100644
index 00000000..0ef8fe7d
--- /dev/null
+++ b/docs/docsite/rst/reference_appendices/tower.rst
@@ -0,0 +1,13 @@
+.. _ansible_tower:
+
+Red Hat Ansible Tower
+=====================
+
+`Red Hat Ansible Tower <https://www.ansible.com/products/tower>`_ is a web console and REST API for operationalizing Ansible across your team, organization, and enterprise. It's designed to be the hub for all of your automation tasks.
+
+Ansible Tower gives you role-based access control, including control over the use of securely stored credentials for SSH and other services. You can sync your Ansible Tower inventory with a wide variety of cloud sources, and powerful multi-playbook workflows allow you to model
+complex processes.
+
+It logs all of your jobs, integrates well with LDAP, SAML, and other authentication sources, and has an amazing browsable REST API. Command line tools are available for easy integration with Jenkins as well.
+
+Ansible Tower is the downstream Red-Hat supported product version of Ansible AWX. Find out more about Ansible Tower features and how to download it on the `Ansible Tower webpage <https://www.ansible.com/products/tower>`_. Ansible Tower is part of the Red Hat Ansible Automation subscription, and comes bundled with amazing support from Red Hat, Inc.
diff --git a/docs/docsite/rst/roadmap/COLLECTIONS_2_10.rst b/docs/docsite/rst/roadmap/COLLECTIONS_2_10.rst
new file mode 100644
index 00000000..dd845bad
--- /dev/null
+++ b/docs/docsite/rst/roadmap/COLLECTIONS_2_10.rst
@@ -0,0 +1,46 @@
+====================
+Ansible project 2.10
+====================
+
+This release schedule includes dates for the `ansible <https://pypi.org/project/ansible/>`_ package, with a few dates for the `ansible-base <https://pypi.org/project/ansible-base/>`_ package as well. All dates are subject to change. See :ref:`base_roadmap_2_10` for the most recent updates on ansible-base.
+
+.. contents::
+ :local:
+
+Release Schedule
+----------------
+
+.. note:: Dates subject to change.
+.. note:: We plan to post weekly alpha releases to the `PyPI ansible project <https://pypi.org/project/ansible/>`_ for testing.
+
+.. warning::
+ We initially were going to have feature freeze on 2020-08-18. We tried this but decided to
+ change course. Instead, we'll enter feature freeze when ansible-2.10.0 beta1 is released.
+
+- 2020-06-23: ansible-2.10 alpha freeze.
+ No net new collections will be added to the ``ansible-2.10`` package after this date.
+- 2020-07-10: Ansible collections freeze date for content shuffling.
+ Content should be in its final collection for the ansible-2.10 series of releases. No more content should move out of the ``community.general`` or ``community.network`` collections.
+- 2020-08-13: ansible-base 2.10 Release date, see :ref:`base_roadmap_2_10`.
+- 2020-08-14: final ansible-2.10 alpha.
+- 2020-09-01: ansible-2.10.0 beta1 and feature freeze.
+
+ - No new modules or major features will be added after this date. In practice this means we will freeze the semver collection versions to compatible release versions. For example, if the version of community.crypto on this date was community-crypto-1.1.0; ansible-2.10.0 could ship with community-crypto-1.1.1. It would not ship with community-crypto-1.2.0.
+
+- 2020-09-08: ansible-2.10.0 beta2.
+- 2020-09-15: ansible-2.10.0 rc1 and final freeze.
+
+ - After this date only changes blocking a release are accepted.
+ - Collections will only be updated to a new version if a blocker is approved. Collection owners should discuss any blockers at the community IRC meeting (on 9-17) to decide whether to bump the version of the collection for a fix. See the `Community IRC meeting agenda <https://github.com/ansible/community/issues/539>`_.
+
+** Additional release candidates to be published as needed as blockers are fixed **
+
+- 2020-09-22: ansible-2.10 GA release date.
+
+Ansible-2.10.x patch releases will occur roughly every three weeks if changes to collections have been made or if it is deemed necessary to force an upgrade to a later ansible-base-2.10.x. Ansible-2.10.x patch releases may contain new features but not backwards incompatibilities. In practice, this means we will include new collection versions where either the patch or the minor version number has changed but not when the major number has changed (example: Ansible-2.10 ships with community-crypto-1.1.0; ansible-2.10.1 may ship with community-crypto-1.2.0 but would not ship with community-crypto-2.0.0).
+
+Breaking changes may be introduced in ansible-2.11.0 although we encourage collection owners to use deprecation periods that will show up in at least one Ansible release before being changed incompatibly.
+
+The rough schedule for Ansible-2.11 and beyond (such as how many months we'll aim for between versions) is still to be discussed and likely will be made after 2.10.0 has been released.
+
+For more information, reach out on a mailing list or an IRC channel - see :ref:`communication` for more details.
diff --git a/docs/docsite/rst/roadmap/ROADMAP_2_10.rst b/docs/docsite/rst/roadmap/ROADMAP_2_10.rst
new file mode 100644
index 00000000..d303ca46
--- /dev/null
+++ b/docs/docsite/rst/roadmap/ROADMAP_2_10.rst
@@ -0,0 +1,51 @@
+.. _base_roadmap_2_10:
+
+=================
+Ansible-base 2.10
+=================
+
+.. contents::
+ :local:
+
+Release Schedule
+----------------
+
+Expected
+========
+
+PRs must be raised well in advance of the dates below to have a chance of being included in this ansible-base release.
+
+.. note:: There is no Alpha phase in 2.10.
+.. note:: Dates subject to change.
+
+- 2020-06-16 Beta 1 **Feature freeze**
+ No new functionality (including modules/plugins) to any code
+
+- 2020-07-21 Release Candidate 1 (bumped from 2020-07-14)
+- 2020-07-24 Release Candidate 2
+- 2020-07-25 Release Candidate 3
+- 2020-07-30 Release Candidate 4
+- 2020-08-13 Release
+
+Release Manager
+---------------
+
+@sivel
+
+Planned work
+============
+
+- Migrate non-base plugins and modules from the ``ansible/ansible`` repository to smaller collection repositories
+- Add functionality to ease transition to collections, such as automatic redirects from the 2.9 names to the new FQCN of the plugin
+- Create new ``ansible-base`` package representing the ``ansible/ansible`` repository
+
+Additional Resources
+====================
+
+The 2.10 release of Ansible will fundamentally change the scope of plugins included in the ``ansible/ansible`` repository, by
+moving much of the plugins into smaller collection repositories that will be shipped through https://galaxy.ansible.com/
+
+The following links have more information about this process:
+
+- https://groups.google.com/d/msg/ansible-devel/oKqgCeYTs-M/cHrOgMw8CAAJ
+- https://github.com/ansible-collections/overview/blob/main/README.rst
diff --git a/docs/docsite/rst/roadmap/ROADMAP_2_5.rst b/docs/docsite/rst/roadmap/ROADMAP_2_5.rst
new file mode 100644
index 00000000..34d376ce
--- /dev/null
+++ b/docs/docsite/rst/roadmap/ROADMAP_2_5.rst
@@ -0,0 +1,142 @@
+===========
+Ansible 2.5
+===========
+**Core Engine Freeze and Module Freeze: 22 January 2018**
+
+**Core and Curated Module Freeze: 22 January 2018**
+
+**Community Module Freeze: 7 February 2018**
+
+**Release Candidate 1 will be 21 February, 2018**
+
+**Target: March 2018**
+
+**Service Release schedule: every 2-3 weeks**
+
+.. contents:: Topics
+
+Release Manager
+---------------
+Matt Davis (IRC/GitHub: @nitzmahone)
+
+
+Engine improvements
+-------------------
+- Assemble module improvements
+ - assemble just skips when in check mode, it should be able to test if there is a difference and changed=true/false.
+ - The same with diff, it should work as template modules does
+- Handle Password reset prompts cleaner
+- Tasks stats for rescues and ignores
+- Normalize temp dir usage across all subsystems
+- Add option to set playbook dir for adhoc, inventory and console to allow for 'relative path loading'
+
+
+Ansible-Config
+--------------
+- Extend config to more plugin types and update plugins to support the new config
+
+Inventory
+---------
+- ansible-inventory option to output group variable assignment and data (--export)
+- Create inventory plugins for:
+ - aws
+
+Facts
+-----
+- Namespacing fact variables (via a config option) implemented in ansible/ansible PR `#18445 <https://github.com/ansible/ansible/pull/18445>`_.
+ Proposal found in ansible/proposals issue `#17 <https://github.com/ansible/proposals/issues/17>`_.
+- Make fact collectors and gather_subset specs finer grained
+- Eliminate unneeded deps between fact collectors
+- Allow fact collectors to indicate if they need information from another fact collector to be gathered first.
+
+Static Loop Keyword
+-------------------
+
+- A simpler altenative to ``with_``, ``loop:`` only takes a list
+- Remove complexity from loops, lookups are still available to users
+- Less confusing having a static directive vs a one that is dynamic depending on plugins loaded.
+
+Vault
+-----
+- Vault secrets client inc new 'keyring' client
+
+Runtime Check on Modules for Blacklisting
+-----------------------------------------
+- Filter on things like "supported_by" in module metadata
+- Provide users with an option of "warning, error or allow/ignore"
+- Configurable via ansible.cfg and environment variable
+
+Windows
+-------
+- Implement gather_subset on Windows facts
+- Fix Windows async + become to allow them to work together
+- Implement Windows become flags for controlling various modes **(done)**
+ - logontype
+ - elevation behavior
+- Convert win_updates to action plugin for auto reboot and extra features **(done)**
+- Spike out changing the connection over to PSRP instead of WSMV **(done- it's possible)**
+- Module updates
+
+ - win_updates **(done)**
+
+ - Fix win_updates to detect (or request) become
+ - Add whitelist/blacklist features to win_updates
+ - win_dsc further improvements **(done)**
+
+General Cloud
+-------------
+- Make multi-cloud provisioning easier
+- Diff mode will output provisioning task results of ansible-playbook runs
+- Terraform module
+
+AWS
+---
+- Focus on pull requests for various modules
+- Triage existing merges for modules
+- Module work
+
+ - ec2_instance
+ - ec2_vpc: Allow the addition of secondary IPv4 CIDRS to existing VPCs.
+ - AWS Network Load Balancer support (NLB module, ASG support, and so on)
+ - rds_instance
+
+Azure
+-----
+- Azure CLI auth **(done)**
+- Fix Azure module results to have "high-level" output instead of raw REST API dictionary **(partial, more to come in 2.6)**
+- Deprecate Azure automatic storage accounts in azure_rm_virtualmachine **(breaks on Azure Stack, punted until AS supports managed disks)**
+
+Network Roadmap
+---------------
+- Refactor common network shared code into package **(done)**
+- Convert various nxos modules to leverage declarative intent **(done)**
+- Refactor various modules to leverage the cliconf plugin **(done)**
+- Add various missing declarative modules for supported platforms and functions **(done)**
+- Implement a feature that handles platform differences and feature unavailability **(done)**
+- netconf-config.py should provide control for deployment strategy
+- Create netconf connection plugin **(done)**
+- Create netconf fact module
+- Turn network_cli into a usable connection type **(done)**
+- Implements jsonrpc message passing for ansible-connection **(done)**
+- Improve logging for ansible-connection **(done)**
+- Improve stdout output for failures whilst using persistent connection **(done)**
+- Create IOS-XR NetConf Plugin and refactor iosxr modules to leverage netconf plugin **(done)**
+- Refactor junos modules to use netconf plugin **(done)**
+- Filters: Add a filter to convert XML response from a network device to JSON object **(done)**
+
+Documentation
+-------------
+- Extend documentation to more plugins
+- Document vault-password-client scripts.
+- Network Documentation
+
+ - New landing page (to replace intro_networking) **(done)**
+ - Platform specific guides **(done)**
+ - Walk through: Getting Started **(done)**
+ - Networking and ``become`` **(done)**
+ - Best practice **(done)**
+
+Contributor Quality of Life
+---------------------------
+- Finish PSScriptAnalyer integration with ansible-test (for enforcing Powershell style) **(done)**
+- Resolve issues requiring skipping of some integration tests on Python 3.
diff --git a/docs/docsite/rst/roadmap/ROADMAP_2_6.rst b/docs/docsite/rst/roadmap/ROADMAP_2_6.rst
new file mode 100644
index 00000000..49a6ebab
--- /dev/null
+++ b/docs/docsite/rst/roadmap/ROADMAP_2_6.rst
@@ -0,0 +1,82 @@
+===========
+Ansible 2.6
+===========
+
+.. contents:: Topics
+
+Release Schedule
+----------------
+
+Actual
+======
+
+- 2018-05-17 Core Freeze (Engine and Core Modules/Plugins)
+- 2018-05-21 Alpha Release 1
+- 2018-05-25 Community Freeze (Non-Core Modules/Plugins)
+- 2018-05-25 Branch stable-2.6
+- 2018-05-30 Alpha Release 2
+- 2018-06-05 Release Candidate 1
+- 2018-06-08 Release Candidate 2
+- 2018-06-18 Release Candidate 3
+- 2018-06-25 Release Candidate 4
+- 2018-06-26 Release Candidate 5
+- 2018-06-28 Final Release
+
+
+Release Manager
+---------------
+* 2.6.0-2.6.12 Matt Clay (IRC/GitHub: @mattclay)
+* 2.6.13+ Toshio Kuratomi (IRC: abadger1999; GitHub: @abadger)
+
+
+Engine improvements
+-------------------
+
+- Version 2.6 is largely going to be a stabilization release for Core code.
+- Some of the items covered in this release, but are not limited to are the following:
+
+ - ``ansible-inventory``
+ - ``import_*``
+ - ``include_*``
+ - Test coverage
+ - Performance Testing
+
+Core Modules
+------------
+- Adopt-a-module Campaign
+
+ - Review current status of all Core Modules
+ - Reduce backlog of open issues against these modules
+
+Cloud Modules
+-------------
+
+Network
+-------
+
+Connection work
+================
+
+* New connection plugin: eAPI `proposal#102 <https://github.com/ansible/proposals/issues/102>`_
+* New connection plugin: NX-API
+* Support for configurable options for network_cli & netconf
+
+Modules
+=======
+
+* New ``net_get`` - platform agnostic module for pulling configuration via SCP/SFTP over network_cli
+* New ``net_put`` - platform agnostic module for pushing configuration via SCP/SFTP over network_cli
+* New ``netconf_get`` - Netconf module to fetch configuration and state data `proposal#104 <https://github.com/ansible/proposals/issues/104>`_
+
+Other Features
+================
+
+* Stretch & tech preview: Configuration caching for network_cli. Opt-in feature to avoid ``show running`` performance hit
+
+
+Windows
+-------
+
+
+
+
diff --git a/docs/docsite/rst/roadmap/ROADMAP_2_7.rst b/docs/docsite/rst/roadmap/ROADMAP_2_7.rst
new file mode 100644
index 00000000..bf65dcf7
--- /dev/null
+++ b/docs/docsite/rst/roadmap/ROADMAP_2_7.rst
@@ -0,0 +1,109 @@
+===========
+Ansible 2.7
+===========
+
+.. contents:: Topics
+
+Release Schedule
+----------------
+
+Expected
+========
+
+- 2018-08-23 Core Freeze (Engine and Core Modules/Plugins)
+- 2018-08-23 Alpha Release 1
+- 2018-08-30 Community Freeze (Non-Core Modules/Plugins)
+- 2018-08-30 Beta Release 1
+- 2018-09-06 Release Candidate 1 (If needed)
+- 2018-09-13 Release Candidate 2 (If needed)
+- 2018-09-20 Release Candidate 3 (If needed)
+- 2018-09-27 Release Candidate 4 (If needed)
+- 2018-10-04 General Availability
+
+Release Manager
+---------------
+Toshio Kuratomi (IRC: abadger1999; GitHub: @abadger)
+
+
+Cleaning Duty
+-------------
+
+- Drop Py2.6 for controllers `Docs PR #42971 <https://github.com/ansible/ansible/pull/42971>`_ and
+ `issue #42972 <https://github.com/ansible/ansible/issues/42972>`_
+- Remove dependency on simplejson `issue #42761 <https://github.com/ansible/ansible/issues/42761>`_
+
+
+Engine Improvements
+-------------------
+
+- Performance improvement invoking Python modules `pr #41749 <https://github.com/ansible/ansible/pull/41749>`_
+- Jinja native types will allow for users to render a Python native type. `pr #32738 <https://github.com/ansible/ansible/pull/32738>`_
+
+
+Core Modules
+------------
+
+- Include feature changes and improvements
+
+ - Create new argument ``apply`` that will allow for included tasks to inherit explicitly provided attributes. `pr #39236 <https://github.com/ansible/ansible/pull/39236>`_
+ - Create "private" functionality for allowing vars/default to be exposed outside of roles. `pr #41330 <https://github.com/ansible/ansible/pull/41330>`_
+- Provide a parameter for the ``template`` module to output to different encoding formats `pr
+ #42171 <https://github.com/ansible/ansible/pull/42171>`_
+- ``reboot`` module for Linux hosts (@samdoran) `pr #35205 <https://github.com/ansible/ansible/pull/35205>`_
+
+Cloud Modules
+-------------
+
+General
+=======
+* Cloud auth plugin `proposal #24 <https://github.com/ansible/proposals/issues/24>`_
+
+AWS
+===
+* Inventory plugin for RDS `pr #41919 <https://github.com/ansible/ansible/pull/41919>`_
+* Count support for `ec2_instance`
+* `aws_eks` module `pr #41183 <https://github.com/ansible/ansible/pull/41183>`_
+* Cloudformation stack sets support (`PR#41669 <https://github.com/ansible/ansible/pull/41669>`_)
+* RDS instance and snapshot modules `pr #39994 <https://github.com/ansible/ansible/pull/39994>`_ `pr #43789 <https://github.com/ansible/ansible/pull/43789>`_
+* Diff mode improvements for cloud modules `pr #44533 <https://github.com/ansible/ansible/pull/44533>`_
+
+Azure
+=====
+
+* Azure inventory plugin `issue #42769 <https://github.com/ansible/ansible/issues/42769>`__
+
+
+Network
+-------
+
+General
+=======
+
+* Refactor the APIs in cliconf (`issue #39056 <https://github.com/ansible/ansible/issues/39056>`_) and netconf (`issue #39160 <https://github.com/ansible/ansible/issues/39160>`_) plugins so that they have a uniform signature across supported network platforms. **done**
+ (`PR #41846 <https://github.com/ansible/ansible/pull/41846>`_) (`PR #43643 <https://github.com/ansible/ansible/pull/43643>`_) (`PR #43837 <https://github.com/ansible/ansible/pull/43837>`_)
+ (`PR #43203 <https://github.com/ansible/ansible/pull/43203>`_) (`PR #42300 <https://github.com/ansible/ansible/pull/42300>`_) (`PR #44157 <https://github.com/ansible/ansible/pull/44157>`_)
+
+Modules
+=======
+
+* New ``cli_config`` module `issue #39228 <https://github.com/ansible/ansible/issues/39228>`_ **done** `PR #42413 <https://github.com/ansible/ansible/pull/42413>`_.
+* New ``cli_command`` module `issue #39284 <https://github.com/ansible/ansible/issues/39284>`_
+* Refactor ``netconf_config`` module to add additional functionality. **done** `proposal #104 <https://github.com/ansible/proposals/issues/104>`_ (`PR #44379 <https://github.com/ansible/ansible/pull/44379>`_)
+
+Windows
+-------
+
+General
+=======
+
+* Added new connection plugin that uses PSRP as the connection protocol `pr #41729 <https://github.com/ansible/ansible/pull/41729>`__
+
+Modules
+=======
+
+* Revamp Chocolatey to fix bugs and support offline installation `pr #43013 <https://github.com/ansible/ansible/pull/43013>`_.
+* Add Chocolatey modules that can manage the following Chocolatey features
+
+ * `Sources <https://chocolatey.org/docs/commands-sources>`_ `pr #42790 <https://github.com/ansible/ansible/pull/42790>`_
+ * `Features <https://chocolatey.org/docs/chocolatey-configuration#features>`_ `pr #42848 <https://github.com/ansible/ansible/pull/42848>`_
+ * `Config <https://chocolatey.org/docs/chocolatey-configuration#config-settings>`_ `pr #42915 <h*ttps://github.com/ansible/ansible/pull/42915>`_
diff --git a/docs/docsite/rst/roadmap/ROADMAP_2_8.rst b/docs/docsite/rst/roadmap/ROADMAP_2_8.rst
new file mode 100644
index 00000000..04977aa7
--- /dev/null
+++ b/docs/docsite/rst/roadmap/ROADMAP_2_8.rst
@@ -0,0 +1,38 @@
+===========
+Ansible 2.8
+===========
+
+.. contents::
+ :local:
+
+Release Schedule
+----------------
+
+Expected
+========
+
+PRs must be raised well in advance of the dates below to have a chance of being included in this Ansible release.
+
+- 2019-04-04 Alpha 1 **Core freeze**
+ No new features to ``support:core`` code.
+ Includes no new options to existing Core modules
+
+- 2019-04-11 Beta 1 **Feature freeze**
+ No new functionality (including modules/plugins) to any code
+
+- 2019-04-25 Release Candidate 1
+- 2019-05-02 Release Candidate 2
+- 2019-05-10 Release Candidate 3
+- 2019-05-16 Release
+
+
+
+Release Manager
+---------------
+
+Toshio Kuratomi (IRC: abadger1999; GitHub: @abadger)
+
+Planned work
+============
+
+See the `Ansible 2.8 Project Board <https://github.com/ansible/ansible/projects/30>`_
diff --git a/docs/docsite/rst/roadmap/ROADMAP_2_9.rst b/docs/docsite/rst/roadmap/ROADMAP_2_9.rst
new file mode 100644
index 00000000..370930ac
--- /dev/null
+++ b/docs/docsite/rst/roadmap/ROADMAP_2_9.rst
@@ -0,0 +1,39 @@
+===========
+Ansible 2.9
+===========
+
+.. contents::
+ :local:
+
+Release Schedule
+----------------
+
+Expected
+========
+
+PRs must be raised well in advance of the dates below to have a chance of being included in this Ansible release.
+
+.. note:: There is no Alpha phase in 2.9.
+
+- 2019-08-29 Beta 1 **Feature freeze**
+ No new functionality (including modules/plugins) to any code
+
+- 2019-09-19 Release Candidate 1
+- 2019-10-03 Release Candidate 2
+- 2019-10-10 Release Candidate 3
+- 2019-10-17 Release Candidate 4 (if needed)
+- 2019-10-24 Release Candidate 5 (if needed)
+- 2019-10-31 Release
+
+
+
+Release Manager
+---------------
+TBD
+
+Temporarily, Matt Davis (@nitzmahone) or Matt Clay (@mattclay) on IRC or github.
+
+Planned work
+============
+
+See the `Ansible 2.9 Project Board <https://github.com/ansible/ansible/projects/34>`_
diff --git a/docs/docsite/rst/roadmap/ansible_base_roadmap_index.rst b/docs/docsite/rst/roadmap/ansible_base_roadmap_index.rst
new file mode 100644
index 00000000..4cf7740c
--- /dev/null
+++ b/docs/docsite/rst/roadmap/ansible_base_roadmap_index.rst
@@ -0,0 +1,27 @@
+.. _ansible_base_roadmaps:
+
+ansible-base Roadmap
+=====================
+
+The ``ansible-base`` team develops a roadmap for each major and minor ``ansible-base`` release. The latest roadmap shows current work; older roadmaps provide a history of the project. We don't publish roadmaps for subminor versions. So 2.10 and 2.11 have roadmaps, but 2.10.1 does not.
+
+We incorporate team and community feedback in each roadmap, and aim for further transparency and better inclusion of both community desires and submissions.
+
+Each roadmap offers a *best guess*, based on the ``ansible-base`` team's experience and on requests and feedback from the community, of what will be included in a given release. However, some items on the roadmap may be dropped due to time constraints, lack of community maintainers, and so on.
+
+Each roadmap is published both as an idea of what is upcoming in ``ansible-base``, and as a medium for seeking further feedback from the community.
+
+You can submit feedback on the current roadmap in multiple ways:
+
+- Edit the agenda of an IRC `Core Team Meeting <https://github.com/ansible/community/blob/master/meetings/README.md>`_ (preferred)
+- Post on the ``#ansible-devel`` Freenode IRC channel
+- Email the ansible-devel list
+
+See :ref:`Ansible communication channels <communication>` for details on how to join and use the email lists and IRC channels.
+
+.. toctree::
+ :maxdepth: 1
+ :glob:
+ :caption: ansible-base Roadmaps
+
+ ROADMAP_2_10
diff --git a/docs/docsite/rst/roadmap/ansible_roadmap_index.rst b/docs/docsite/rst/roadmap/ansible_roadmap_index.rst
new file mode 100644
index 00000000..d350023b
--- /dev/null
+++ b/docs/docsite/rst/roadmap/ansible_roadmap_index.rst
@@ -0,0 +1,26 @@
+.. _ansible_roadmaps:
+
+Ansible Roadmap
+===============
+
+The Ansible team develops a roadmap for each major and minor Ansible release. The latest roadmap shows current work; older roadmaps provide a history of the project. We don't publish roadmaps for subminor versions. So 2.10 and 2.11 have roadmaps, but 2.10.1 does not.
+
+We incorporate team and community feedback in each roadmap, and aim for further transparency and better inclusion of both community desires and submissions.
+
+Each roadmap offers a *best guess*, based on the Ansible team's experience and on requests and feedback from the community, of what will be included in a given release. However, some items on the roadmap may be dropped due to time constraints, lack of community maintainers, and so on.
+
+Each roadmap is published both as an idea of what is upcoming in Ansible, and as a medium for seeking further feedback from the community.
+
+You can submit feedback on the current roadmap in multiple ways:
+
+- Edit the agenda of an IRC `Ansible Community Meeting <https://github.com/ansible/community/issues/539>`_ (preferred)
+- Post on the ``#ansible-community`` Freenode IRC channel
+
+See :ref:`Ansible communication channels <communication>` for details on how to join and use the IRC channels.
+
+.. toctree::
+ :maxdepth: 1
+ :glob:
+ :caption: Ansible Release Roadmaps
+
+ COLLECTIONS_2_10
diff --git a/docs/docsite/rst/roadmap/index.rst b/docs/docsite/rst/roadmap/index.rst
new file mode 100644
index 00000000..d1e248b0
--- /dev/null
+++ b/docs/docsite/rst/roadmap/index.rst
@@ -0,0 +1,29 @@
+.. _roadmaps:
+
+Ansible Roadmap
+===============
+
+The Ansible team develops a roadmap for each major and minor Ansible release. The latest roadmap shows current work; older roadmaps provide a history of the project. We don't publish roadmaps for subminor versions. So 2.0 and 2.8 have roadmaps, but 2.7.1 does not.
+
+We incorporate team and community feedback in each roadmap, and aim for further transparency and better inclusion of both community desires and submissions.
+
+Each roadmap offers a *best guess*, based on the Ansible team's experience and on requests and feedback from the community, of what will be included in a given release. However, some items on the roadmap may be dropped due to time constraints, lack of community maintainers, etc.
+
+Each roadmap is published both as an idea of what is upcoming in Ansible, and as a medium for seeking further feedback from the community.
+
+You can submit feedback on the current roadmap in multiple ways:
+
+- Edit the agenda of an IRC `Core Team Meeting <https://github.com/ansible/community/blob/master/meetings/README.md>`_ (preferred)
+- Post on the ``#ansible-devel`` Freenode IRC channel
+- Email the ansible-devel list
+
+See :ref:`Ansible communication channels <communication>` for details on how to join and use the email lists and IRC channels.
+
+.. toctree::
+ :maxdepth: 1
+ :glob:
+ :caption: Ansible Roadmaps
+
+ ansible_base_roadmap_index
+ ansible_roadmap_index
+ old_roadmap_index
diff --git a/docs/docsite/rst/roadmap/old_roadmap_index.rst b/docs/docsite/rst/roadmap/old_roadmap_index.rst
new file mode 100644
index 00000000..78769f17
--- /dev/null
+++ b/docs/docsite/rst/roadmap/old_roadmap_index.rst
@@ -0,0 +1,19 @@
+.. _old_roadmaps:
+
+Older Roadmaps
+===============
+
+Older roadmaps are listed here to provide a history of the Ansible project.
+
+See :ref:`roadmaps` to find current Ansible and ``ansible-base`` roadmaps.
+
+.. toctree::
+ :maxdepth: 1
+ :glob:
+ :caption: Older Roadmaps
+
+ ROADMAP_2_9
+ ROADMAP_2_8
+ ROADMAP_2_7
+ ROADMAP_2_6
+ ROADMAP_2_5
diff --git a/docs/docsite/rst/scenario_guides/cloud_guides.rst b/docs/docsite/rst/scenario_guides/cloud_guides.rst
new file mode 100644
index 00000000..d430bdda
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/cloud_guides.rst
@@ -0,0 +1,22 @@
+.. _cloud_guides:
+
+*******************
+Public Cloud Guides
+*******************
+
+The guides in this section cover using Ansible with a range of public cloud platforms. They explore particular use cases in greater depth and provide a more "top-down" explanation of some basic features.
+
+.. toctree::
+ :maxdepth: 1
+
+ guide_alicloud
+ guide_aws
+ guide_cloudstack
+ guide_gce
+ guide_azure
+ guide_online
+ guide_oracle
+ guide_packet
+ guide_rax
+ guide_scaleway
+ guide_vultr
diff --git a/docs/docsite/rst/scenario_guides/guide_aci.rst b/docs/docsite/rst/scenario_guides/guide_aci.rst
new file mode 100644
index 00000000..5fe4c648
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/guide_aci.rst
@@ -0,0 +1,661 @@
+.. _aci_guide:
+
+Cisco ACI Guide
+===============
+
+
+.. _aci_guide_intro:
+
+What is Cisco ACI ?
+-------------------
+
+Application Centric Infrastructure (ACI)
+........................................
+The Cisco Application Centric Infrastructure (ACI) allows application requirements to define the network. This architecture simplifies, optimizes, and accelerates the entire application deployment life cycle.
+
+
+Application Policy Infrastructure Controller (APIC)
+...................................................
+The APIC manages the scalable ACI multi-tenant fabric. The APIC provides a unified point of automation and management, policy programming, application deployment, and health monitoring for the fabric. The APIC, which is implemented as a replicated synchronized clustered controller, optimizes performance, supports any application anywhere, and provides unified operation of the physical and virtual infrastructure.
+
+The APIC enables network administrators to easily define the optimal network for applications. Data center operators can clearly see how applications consume network resources, easily isolate and troubleshoot application and infrastructure problems, and monitor and profile resource usage patterns.
+
+The Cisco Application Policy Infrastructure Controller (APIC) API enables applications to directly connect with a secure, shared, high-performance resource pool that includes network, compute, and storage capabilities.
+
+
+ACI Fabric
+..........
+The Cisco Application Centric Infrastructure (ACI) Fabric includes Cisco Nexus 9000 Series switches with the APIC to run in the leaf/spine ACI fabric mode. These switches form a "fat-tree" network by connecting each leaf node to each spine node; all other devices connect to the leaf nodes. The APIC manages the ACI fabric.
+
+The ACI fabric provides consistent low-latency forwarding across high-bandwidth links (40 Gbps, with a 100-Gbps future capability). Traffic with the source and destination on the same leaf switch is handled locally, and all other traffic travels from the ingress leaf to the egress leaf through a spine switch. Although this architecture appears as two hops from a physical perspective, it is actually a single Layer 3 hop because the fabric operates as a single Layer 3 switch.
+
+The ACI fabric object-oriented operating system (OS) runs on each Cisco Nexus 9000 Series node. It enables programming of objects for each configurable element of the system. The ACI fabric OS renders policies from the APIC into a concrete model that runs in the physical infrastructure. The concrete model is analogous to compiled software; it is the form of the model that the switch operating system can execute.
+
+All the switch nodes contain a complete copy of the concrete model. When an administrator creates a policy in the APIC that represents a configuration, the APIC updates the logical model. The APIC then performs the intermediate step of creating a fully elaborated policy that it pushes into all the switch nodes where the concrete model is updated.
+
+The APIC is responsible for fabric activation, switch firmware management, network policy configuration, and instantiation. While the APIC acts as the centralized policy and network management engine for the fabric, it is completely removed from the data path, including the forwarding topology. Therefore, the fabric can still forward traffic even when communication with the APIC is lost.
+
+
+More information
+................
+Various resources exist to start learning ACI, here is a list of interesting articles from the community.
+
+- `Adam Raffe: Learning ACI <https://adamraffe.com/learning-aci/>`_
+- `Luca Relandini: ACI for dummies <https://lucarelandini.blogspot.be/2015/03/aci-for-dummies.html>`_
+- `Cisco DevNet Learning Labs about ACI <https://learninglabs.cisco.com/labs/tags/ACI>`_
+
+
+.. _aci_guide_modules:
+
+Using the ACI modules
+---------------------
+The Ansible ACI modules provide a user-friendly interface to managing your ACI environment using Ansible playbooks.
+
+For instance ensuring that a specific tenant exists, is done using the following Ansible task using the aci_tenant module:
+
+.. code-block:: yaml
+
+ - name: Ensure tenant customer-xyz exists
+ aci_tenant:
+ host: my-apic-1
+ username: admin
+ password: my-password
+
+ tenant: customer-xyz
+ description: Customer XYZ
+ state: present
+
+A complete list of existing ACI modules is available on the content tab of the `ACI collection on Ansible Galaxy <https://galaxy.ansible.com/cisco/aci>`_.
+
+If you want to learn how to write your own ACI modules to contribute, look at the :ref:`Developing Cisco ACI modules <aci_dev_guide>` section.
+
+Querying ACI configuration
+..........................
+
+A module can also be used to query a specific object.
+
+.. code-block:: yaml
+
+ - name: Query tenant customer-xyz
+ aci_tenant:
+ host: my-apic-1
+ username: admin
+ password: my-password
+
+ tenant: customer-xyz
+ state: query
+ register: my_tenant
+
+Or query all objects.
+
+.. code-block:: yaml
+
+ - name: Query all tenants
+ aci_tenant:
+ host: my-apic-1
+ username: admin
+ password: my-password
+
+ state: query
+ register: all_tenants
+
+After registering the return values of the aci_tenant task as shown above, you can access all tenant information from variable ``all_tenants``.
+
+
+Running on the controller locally
+.................................
+As originally designed, Ansible modules are shipped to and run on the remote target(s), however the ACI modules (like most network-related modules) do not run on the network devices or controller (in this case the APIC), but they talk directly to the APIC's REST interface.
+
+For this very reason, the modules need to run on the local Ansible controller (or are delegated to another system that *can* connect to the APIC).
+
+
+Gathering facts
+```````````````
+Because we run the modules on the Ansible controller gathering facts will not work. That is why when using these ACI modules it is mandatory to disable facts gathering. You can do this globally in your ``ansible.cfg`` or by adding ``gather_facts: no`` to every play.
+
+.. code-block:: yaml
+ :emphasize-lines: 3
+
+ - name: Another play in my playbook
+ hosts: my-apic-1
+ gather_facts: no
+ tasks:
+ - name: Create a tenant
+ aci_tenant:
+ ...
+
+Delegating to localhost
+```````````````````````
+So let us assume we have our target configured in the inventory using the FQDN name as the ``ansible_host`` value, as shown below.
+
+.. code-block:: yaml
+ :emphasize-lines: 3
+
+ apics:
+ my-apic-1:
+ ansible_host: apic01.fqdn.intra
+ ansible_user: admin
+ ansible_password: my-password
+
+One way to set this up is to add to every task the directive: ``delegate_to: localhost``.
+
+.. code-block:: yaml
+ :emphasize-lines: 8
+
+ - name: Query all tenants
+ aci_tenant:
+ host: '{{ ansible_host }}'
+ username: '{{ ansible_user }}'
+ password: '{{ ansible_password }}'
+
+ state: query
+ delegate_to: localhost
+ register: all_tenants
+
+If one would forget to add this directive, Ansible will attempt to connect to the APIC using SSH and attempt to copy the module and run it remotely. This will fail with a clear error, yet may be confusing to some.
+
+
+Using the local connection method
+`````````````````````````````````
+Another option frequently used, is to tie the ``local`` connection method to this target so that every subsequent task for this target will use the local connection method (hence run it locally, rather than use SSH).
+
+In this case the inventory may look like this:
+
+.. code-block:: yaml
+ :emphasize-lines: 6
+
+ apics:
+ my-apic-1:
+ ansible_host: apic01.fqdn.intra
+ ansible_user: admin
+ ansible_password: my-password
+ ansible_connection: local
+
+But used tasks do not need anything special added.
+
+.. code-block:: yaml
+
+ - name: Query all tenants
+ aci_tenant:
+ host: '{{ ansible_host }}'
+ username: '{{ ansible_user }}'
+ password: '{{ ansible_password }}'
+
+ state: query
+ register: all_tenants
+
+.. hint:: For clarity we have added ``delegate_to: localhost`` to all the examples in the module documentation. This helps to ensure first-time users can easily copy&paste parts and make them work with a minimum of effort.
+
+
+Common parameters
+.................
+Every Ansible ACI module accepts the following parameters that influence the module's communication with the APIC REST API:
+
+ host
+ Hostname or IP address of the APIC.
+
+ port
+ Port to use for communication. (Defaults to ``443`` for HTTPS, and ``80`` for HTTP)
+
+ username
+ User name used to log on to the APIC. (Defaults to ``admin``)
+
+ password
+ Password for ``username`` to log on to the APIC, using password-based authentication.
+
+ private_key
+ Private key for ``username`` to log on to APIC, using signature-based authentication.
+ This could either be the raw private key content (include header/footer) or a file that stores the key content.
+ *New in version 2.5*
+
+ certificate_name
+ Name of the certificate in the ACI Web GUI.
+ This defaults to either the ``username`` value or the ``private_key`` file base name).
+ *New in version 2.5*
+
+ timeout
+ Timeout value for socket-level communication.
+
+ use_proxy
+ Use system proxy settings. (Defaults to ``yes``)
+
+ use_ssl
+ Use HTTPS or HTTP for APIC REST communication. (Defaults to ``yes``)
+
+ validate_certs
+ Validate certificate when using HTTPS communication. (Defaults to ``yes``)
+
+ output_level
+ Influence the level of detail ACI modules return to the user. (One of ``normal``, ``info`` or ``debug``) *New in version 2.5*
+
+
+Proxy support
+.............
+By default, if an environment variable ``<protocol>_proxy`` is set on the target host, requests will be sent through that proxy. This behaviour can be overridden by setting a variable for this task (see :ref:`playbooks_environment`), or by using the ``use_proxy`` module parameter.
+
+HTTP redirects can redirect from HTTP to HTTPS so ensure that the proxy environment for both protocols is correctly configured.
+
+If proxy support is not needed, but the system may have it configured nevertheless, use the parameter ``use_proxy: no`` to avoid accidental system proxy usage.
+
+.. hint:: Selective proxy support using the ``no_proxy`` environment variable is also supported.
+
+
+Return values
+.............
+
+.. versionadded:: 2.5
+
+The following values are always returned:
+
+ current
+ The resulting state of the managed object, or results of your query.
+
+The following values are returned when ``output_level: info``:
+
+ previous
+ The original state of the managed object (before any change was made).
+
+ proposed
+ The proposed config payload, based on user-supplied values.
+
+ sent
+ The sent config payload, based on user-supplied values and the existing configuration.
+
+The following values are returned when ``output_level: debug`` or ``ANSIBLE_DEBUG=1``:
+
+ filter_string
+ The filter used for specific APIC queries.
+
+ method
+ The HTTP method used for the sent payload. (Either ``GET`` for queries, ``DELETE`` or ``POST`` for changes)
+
+ response
+ The HTTP response from the APIC.
+
+ status
+ The HTTP status code for the request.
+
+ url
+ The url used for the request.
+
+.. note:: The module return values are documented in detail as part of each module's documentation.
+
+
+More information
+................
+Various resources exist to start learn more about ACI programmability, we recommend the following links:
+
+- :ref:`Developing Cisco ACI modules <aci_dev_guide>`
+- `Jacob McGill: Automating Cisco ACI with Ansible <https://blogs.cisco.com/developer/automating-cisco-aci-with-ansible-eliminates-repetitive-day-to-day-tasks>`_
+- `Cisco DevNet Learning Labs about ACI and Ansible <https://learninglabs.cisco.com/labs/tags/ACI,Ansible>`_
+
+
+.. _aci_guide_auth:
+
+ACI authentication
+------------------
+
+Password-based authentication
+.............................
+If you want to log on using a username and password, you can use the following parameters with your ACI modules:
+
+.. code-block:: yaml
+
+ username: admin
+ password: my-password
+
+Password-based authentication is very simple to work with, but it is not the most efficient form of authentication from ACI's point-of-view as it requires a separate login-request and an open session to work. To avoid having your session time-out and requiring another login, you can use the more efficient Signature-based authentication.
+
+.. note:: Password-based authentication also may trigger anti-DoS measures in ACI v3.1+ that causes session throttling and results in HTTP 503 errors and login failures.
+
+.. warning:: Never store passwords in plain text.
+
+The "Vault" feature of Ansible allows you to keep sensitive data such as passwords or keys in encrypted files, rather than as plain text in your playbooks or roles. These vault files can then be distributed or placed in source control. See :ref:`playbooks_vault` for more information.
+
+
+Signature-based authentication using certificates
+.................................................
+
+.. versionadded:: 2.5
+
+Using signature-based authentication is more efficient and more reliable than password-based authentication.
+
+Generate certificate and private key
+````````````````````````````````````
+Signature-based authentication requires a (self-signed) X.509 certificate with private key, and a configuration step for your AAA user in ACI. To generate a working X.509 certificate and private key, use the following procedure:
+
+.. code-block:: bash
+
+ $ openssl req -new -newkey rsa:1024 -days 36500 -nodes -x509 -keyout admin.key -out admin.crt -subj '/CN=Admin/O=Your Company/C=US'
+
+Configure your local user
+`````````````````````````
+Perform the following steps:
+
+- Add the X.509 certificate to your ACI AAA local user at :guilabel:`ADMIN` » :guilabel:`AAA`
+- Click :guilabel:`AAA Authentication`
+- Check that in the :guilabel:`Authentication` field the :guilabel:`Realm` field displays :guilabel:`Local`
+- Expand :guilabel:`Security Management` » :guilabel:`Local Users`
+- Click the name of the user you want to add a certificate to, in the :guilabel:`User Certificates` area
+- Click the :guilabel:`+` sign and in the :guilabel:`Create X509 Certificate` enter a certificate name in the :guilabel:`Name` field
+
+ * If you use the basename of your private key here, you don't need to enter ``certificate_name`` in Ansible
+
+- Copy and paste your X.509 certificate in the :guilabel:`Data` field.
+
+You can automate this by using the following Ansible task:
+
+.. code-block:: yaml
+
+ - name: Ensure we have a certificate installed
+ aci_aaa_user_certificate:
+ host: my-apic-1
+ username: admin
+ password: my-password
+
+ aaa_user: admin
+ certificate_name: admin
+ certificate: "{{ lookup('file', 'pki/admin.crt') }}" # This will read the certificate data from a local file
+
+.. note:: Signature-based authentication only works with local users.
+
+
+Use signature-based authentication with Ansible
+```````````````````````````````````````````````
+You need the following parameters with your ACI module(s) for it to work:
+
+.. code-block:: yaml
+ :emphasize-lines: 2,3
+
+ username: admin
+ private_key: pki/admin.key
+ certificate_name: admin # This could be left out !
+
+or you can use the private key content:
+
+.. code-block:: yaml
+ :emphasize-lines: 2,3
+
+ username: admin
+ private_key: |
+ -----BEGIN PRIVATE KEY-----
+ <<your private key content>>
+ -----END PRIVATE KEY-----
+ certificate_name: admin # This could be left out !
+
+
+.. hint:: If you use a certificate name in ACI that matches the private key's basename, you can leave out the ``certificate_name`` parameter like the example above.
+
+
+Using Ansible Vault to encrypt the private key
+``````````````````````````````````````````````
+.. versionadded:: 2.8
+
+To start, encrypt the private key and give it a strong password.
+
+.. code-block:: bash
+
+ ansible-vault encrypt admin.key
+
+Use a text editor to open the private-key. You should have an encrypted cert now.
+
+.. code-block:: bash
+
+ $ANSIBLE_VAULT;1.1;AES256
+ 56484318584354658465121889743213151843149454864654151618131547984132165489484654
+ 45641818198456456489479874513215489484843614848456466655432455488484654848489498
+ ....
+
+Copy and paste the new encrypted cert into your playbook as a new variable.
+
+.. code-block:: yaml
+
+ private_key: !vault |
+ $ANSIBLE_VAULT;1.1;AES256
+ 56484318584354658465121889743213151843149454864654151618131547984132165489484654
+ 45641818198456456489479874513215489484843614848456466655432455488484654848489498
+ ....
+
+Use the new variable for the private_key:
+
+.. code-block:: yaml
+
+ username: admin
+ private_key: "{{ private_key }}"
+ certificate_name: admin # This could be left out !
+
+When running the playbook, use "--ask-vault-pass" to decrypt the private key.
+
+.. code-block:: bash
+
+ ansible-playbook site.yaml --ask-vault-pass
+
+
+More information
+````````````````
+- Detailed information about Signature-based Authentication is available from `Cisco APIC Signature-Based Transactions <https://www.cisco.com/c/en/us/td/docs/switches/datacenter/aci/apic/sw/kb/b_KB_Signature_Based_Transactions.html>`_.
+- More information on Ansible Vault can be found on the :ref:`Ansible Vault <vault>` page.
+
+
+.. _aci_guide_rest:
+
+Using ACI REST with Ansible
+---------------------------
+While already a lot of ACI modules exists in the Ansible distribution, and the most common actions can be performed with these existing modules, there's always something that may not be possible with off-the-shelf modules.
+
+The aci_rest module provides you with direct access to the APIC REST API and enables you to perform any task not already covered by the existing modules. This may seem like a complex undertaking, but you can generate the needed REST payload for any action performed in the ACI web interface effortlessly.
+
+
+Built-in idempotency
+....................
+Because the APIC REST API is intrinsically idempotent and can report whether a change was made, the aci_rest module automatically inherits both capabilities and is a first-class solution for automating your ACI infrastructure. As a result, users that require more powerful low-level access to their ACI infrastructure don't have to give up on idempotency and don't have to guess whether a change was performed when using the aci_rest module.
+
+
+Using the aci_rest module
+.........................
+The aci_rest module accepts the native XML and JSON payloads, but additionally accepts inline YAML payload (structured like JSON). The XML payload requires you to use a path ending with ``.xml`` whereas JSON or YAML require the path to end with ``.json``.
+
+When you're making modifications, you can use the POST or DELETE methods, whereas doing just queries require the GET method.
+
+For instance, if you would like to ensure a specific tenant exists on ACI, these below four examples are functionally identical:
+
+**XML** (Native ACI REST)
+
+.. code-block:: yaml
+
+ - aci_rest:
+ host: my-apic-1
+ private_key: pki/admin.key
+
+ method: post
+ path: /api/mo/uni.xml
+ content: |
+ <fvTenant name="customer-xyz" descr="Customer XYZ"/>
+
+**JSON** (Native ACI REST)
+
+.. code-block:: yaml
+
+ - aci_rest:
+ host: my-apic-1
+ private_key: pki/admin.key
+
+ method: post
+ path: /api/mo/uni.json
+ content:
+ {
+ "fvTenant": {
+ "attributes": {
+ "name": "customer-xyz",
+ "descr": "Customer XYZ"
+ }
+ }
+ }
+
+**YAML** (Ansible-style REST)
+
+.. code-block:: yaml
+
+ - aci_rest:
+ host: my-apic-1
+ private_key: pki/admin.key
+
+ method: post
+ path: /api/mo/uni.json
+ content:
+ fvTenant:
+ attributes:
+ name: customer-xyz
+ descr: Customer XYZ
+
+**Ansible task** (Dedicated module)
+
+.. code-block:: yaml
+
+ - aci_tenant:
+ host: my-apic-1
+ private_key: pki/admin.key
+
+ tenant: customer-xyz
+ description: Customer XYZ
+ state: present
+
+
+.. hint:: The XML format is more practical when there is a need to template the REST payload (inline), but the YAML format is more convenient for maintaining your infrastructure-as-code and feels more naturally integrated with Ansible playbooks. The dedicated modules offer a more simple, abstracted, but also a more limited experience. Use what feels best for your use-case.
+
+
+More information
+................
+Plenty of resources exist to learn about ACI's APIC REST interface, we recommend the links below:
+
+- `The ACI collection on Ansible Galaxy <https://galaxy.ansible.com/cisco/aci>`_
+- `APIC REST API Configuration Guide <https://www.cisco.com/c/en/us/td/docs/switches/datacenter/aci/apic/sw/2-x/rest_cfg/2_1_x/b_Cisco_APIC_REST_API_Configuration_Guide.html>`_ -- Detailed guide on how the APIC REST API is designed and used, incl. many examples
+- `APIC Management Information Model reference <https://developer.cisco.com/docs/apic-mim-ref/>`_ -- Complete reference of the APIC object model
+- `Cisco DevNet Learning Labs about ACI and REST <https://learninglabs.cisco.com/labs/tags/ACI,REST>`_
+
+
+.. _aci_guide_ops:
+
+Operational examples
+--------------------
+Here is a small overview of useful operational tasks to reuse in your playbooks.
+
+Feel free to contribute more useful snippets.
+
+
+Waiting for all controllers to be ready
+.......................................
+You can use the below task after you started to build your APICs and configured the cluster to wait until all the APICs have come online. It will wait until the number of controllers equals the number listed in the ``apic`` inventory group.
+
+.. code-block:: yaml
+
+ - name: Waiting for all controllers to be ready
+ aci_rest:
+ host: my-apic-1
+ private_key: pki/admin.key
+ method: get
+ path: /api/node/class/topSystem.json?query-target-filter=eq(topSystem.role,"controller")
+ register: topsystem
+ until: topsystem|success and topsystem.totalCount|int >= groups['apic']|count >= 3
+ retries: 20
+ delay: 30
+
+
+Waiting for cluster to be fully-fit
+...................................
+The below example waits until the cluster is fully-fit. In this example you know the number of APICs in the cluster and you verify each APIC reports a 'fully-fit' status.
+
+.. code-block:: yaml
+
+ - name: Waiting for cluster to be fully-fit
+ aci_rest:
+ host: my-apic-1
+ private_key: pki/admin.key
+ method: get
+ path: /api/node/class/infraWiNode.json?query-target-filter=wcard(infraWiNode.dn,"topology/pod-1/node-1/av")
+ register: infrawinode
+ until: >
+ infrawinode|success and
+ infrawinode.totalCount|int >= groups['apic']|count >= 3 and
+ infrawinode.imdata[0].infraWiNode.attributes.health == 'fully-fit' and
+ infrawinode.imdata[1].infraWiNode.attributes.health == 'fully-fit' and
+ infrawinode.imdata[2].infraWiNode.attributes.health == 'fully-fit'
+ retries: 30
+ delay: 30
+
+
+.. _aci_guide_errors:
+
+APIC error messages
+-------------------
+The following error messages may occur and this section can help you understand what exactly is going on and how to fix/avoid them.
+
+ APIC Error 122: unknown managed object class 'polUni'
+ In case you receive this error while you are certain your aci_rest payload and object classes are seemingly correct, the issue might be that your payload is not in fact correct JSON (for example, the sent payload is using single quotes, rather than double quotes), and as a result the APIC is not correctly parsing your object classes from the payload. One way to avoid this is by using a YAML or an XML formatted payload, which are easier to construct correctly and modify later.
+
+
+ APIC Error 400: invalid data at line '1'. Attributes are missing, tag 'attributes' must be specified first, before any other tag
+ Although the JSON specification allows unordered elements, the APIC REST API requires that the JSON ``attributes`` element precede the ``children`` array or other elements. So you need to ensure that your payload conforms to this requirement. Sorting your dictionary keys will do the trick just fine. If you don't have any attributes, it may be necessary to add: ``attributes: {}`` as the APIC does expect the entry to precede any ``children``.
+
+
+ APIC Error 801: property descr of uni/tn-TENANT/ap-AP failed validation for value 'A "legacy" network'
+ Some values in the APIC have strict format-rules to comply to, and the internal APIC validation check for the provided value failed. In the above case, the ``description`` parameter (internally known as ``descr``) only accepts values conforming to `Regex: [a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+ <https://pubhub-prod.s3.amazonaws.com/media/apic-mim-ref/docs/MO-fvAp.html#descr>`_, in general it must not include quotes or square brackets.
+
+
+.. _aci_guide_known_issues:
+
+Known issues
+------------
+The aci_rest module is a wrapper around the APIC REST API. As a result any issues related to the APIC will be reflected in the use of this module.
+
+All below issues either have been reported to the vendor, and most can simply be avoided.
+
+ Too many consecutive API calls may result in connection throttling
+ Starting with ACI v3.1 the APIC will actively throttle password-based authenticated connection rates over a specific threshold. This is as part of an anti-DDOS measure but can act up when using Ansible with ACI using password-based authentication. Currently, one solution is to increase this threshold within the nginx configuration, but using signature-based authentication is recommended.
+
+ **NOTE:** It is advisable to use signature-based authentication with ACI as it not only prevents connection-throttling, but also improves general performance when using the ACI modules.
+
+
+ Specific requests may not reflect changes correctly (`#35401 <https://github.com/ansible/ansible/issues/35041>`_)
+ There is a known issue where specific requests to the APIC do not properly reflect changed in the resulting output, even when we request those changes explicitly from the APIC. In one instance using the path ``api/node/mo/uni/infra.xml`` fails, where ``api/node/mo/uni/infra/.xml`` does work correctly.
+
+ **NOTE:** A workaround is to register the task return values (for example, ``register: this``) and influence when the task should report a change by adding: ``changed_when: this.imdata != []``.
+
+
+ Specific requests are known to not be idempotent (`#35050 <https://github.com/ansible/ansible/issues/35050>`_)
+ The behaviour of the APIC is inconsistent to the use of ``status="created"`` and ``status="deleted"``. The result is that when you use ``status="created"`` in your payload the resulting tasks are not idempotent and creation will fail when the object was already created. However this is not the case with ``status="deleted"`` where such call to an non-existing object does not cause any failure whatsoever.
+
+ **NOTE:** A workaround is to avoid using ``status="created"`` and instead use ``status="modified"`` when idempotency is essential to your workflow..
+
+
+ Setting user password is not idempotent (`#35544 <https://github.com/ansible/ansible/issues/35544>`_)
+ Due to an inconsistency in the APIC REST API, a task that sets the password of a locally-authenticated user is not idempotent. The APIC will complain with message ``Password history check: user dag should not use previous 5 passwords``.
+
+ **NOTE:** There is no workaround for this issue.
+
+
+.. _aci_guide_community:
+
+ACI Ansible community
+---------------------
+If you have specific issues with the ACI modules, or a feature request, or you like to contribute to the ACI project by proposing changes or documentation updates, look at the Ansible Community wiki ACI page at: https://github.com/ansible/community/wiki/Network:-ACI
+
+You will find our roadmap, an overview of open ACI issues and pull-requests, and more information about who we are. If you have an interest in using ACI with Ansible, feel free to join! We occasionally meet online to track progress and prepare for new Ansible releases.
+
+
+.. seealso::
+
+ `ACI collection on Ansible Galaxy <https://galaxy.ansible.com/cisco/aci>`_
+ View the content tab for a complete list of supported ACI modules.
+ :ref:`Developing Cisco ACI modules <aci_dev_guide>`
+ A walkthough on how to develop new Cisco ACI modules to contribute back.
+ `ACI community <https://github.com/ansible/community/wiki/Network:-ACI>`_
+ The Ansible ACI community wiki page, includes roadmap, ideas and development documentation.
+ :ref:`network_guide`
+ A detailed guide on how to use Ansible for automating network infrastructure.
+ `Network Working Group <https://github.com/ansible/community/tree/master/group-network>`_
+ The Ansible Network community page, includes contact information and meeting information.
+ `#ansible-network <https://webchat.freenode.net/?channels=ansible-network>`_
+ The #ansible-network IRC chat channel on Freenode.net.
+ `User Mailing List <https://groups.google.com/group/ansible-project>`_
+ Have a question? Stop by the google group!
diff --git a/docs/docsite/rst/scenario_guides/guide_alicloud.rst b/docs/docsite/rst/scenario_guides/guide_alicloud.rst
new file mode 100644
index 00000000..c91eaf7f
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/guide_alicloud.rst
@@ -0,0 +1,125 @@
+Alibaba Cloud Compute Services Guide
+====================================
+
+.. _alicloud_intro:
+
+Introduction
+````````````
+
+Ansible contains several modules for controlling and managing Alibaba Cloud Compute Services (Alicloud). This guide
+explains how to use the Alicloud Ansible modules together.
+
+All Alicloud modules require ``footmark`` - install it on your control machine with ``pip install footmark``.
+
+Cloud modules, including Alicloud modules, execute on your local machine (the control machine) with ``connection: local``, rather than on remote machines defined in your hosts.
+
+Normally, you'll use the following pattern for plays that provision Alicloud resources::
+
+ - hosts: localhost
+ connection: local
+ vars:
+ - ...
+ tasks:
+ - ...
+
+.. _alicloud_authentication:
+
+Authentication
+``````````````
+
+You can specify your Alicloud authentication credentials (access key and secret key) by passing them as
+environment variables or by storing them in a vars file.
+
+To pass authentication credentials as environment variables::
+
+ export ALICLOUD_ACCESS_KEY='Alicloud123'
+ export ALICLOUD_SECRET_KEY='AlicloudSecret123'
+
+To store authentication credentials in a vars_file, encrypt them with :ref:`Ansible Vault<vault>` to keep them secure, then list them::
+
+ ---
+ alicloud_access_key: "--REMOVED--"
+ alicloud_secret_key: "--REMOVED--"
+
+Note that if you store your credentials in a vars_file, you need to refer to them in each Alicloud module. For example::
+
+ - ali_instance:
+ alicloud_access_key: "{{alicloud_access_key}}"
+ alicloud_secret_key: "{{alicloud_secret_key}}"
+ image_id: "..."
+
+.. _alicloud_provisioning:
+
+Provisioning
+````````````
+
+Alicloud modules create Alicloud ECS instances, disks, virtual private clouds, virtual switches, security groups and other resources.
+
+You can use the ``count`` parameter to control the number of resources you create or terminate. For example, if you want exactly 5 instances tagged ``NewECS``,
+set the ``count`` of instances to 5 and the ``count_tag`` to ``NewECS``, as shown in the last task of the example playbook below.
+If there are no instances with the tag ``NewECS``, the task creates 5 new instances. If there are 2 instances with that tag, the task
+creates 3 more. If there are 8 instances with that tag, the task terminates 3 of those instances.
+
+If you do not specify a ``count_tag``, the task creates the number of instances you specify in ``count`` with the ``instance_name`` you provide.
+
+::
+
+ # alicloud_setup.yml
+
+ - hosts: localhost
+ connection: local
+
+ tasks:
+
+ - name: Create VPC
+ ali_vpc:
+ cidr_block: '{{ cidr_block }}'
+ vpc_name: new_vpc
+ register: created_vpc
+
+ - name: Create VSwitch
+ ali_vswitch:
+ alicloud_zone: '{{ alicloud_zone }}'
+ cidr_block: '{{ vsw_cidr }}'
+ vswitch_name: new_vswitch
+ vpc_id: '{{ created_vpc.vpc.id }}'
+ register: created_vsw
+
+ - name: Create security group
+ ali_security_group:
+ name: new_group
+ vpc_id: '{{ created_vpc.vpc.id }}'
+ rules:
+ - proto: tcp
+ port_range: 22/22
+ cidr_ip: 0.0.0.0/0
+ priority: 1
+ rules_egress:
+ - proto: tcp
+ port_range: 80/80
+ cidr_ip: 192.168.0.54/32
+ priority: 1
+ register: created_group
+
+ - name: Create a set of instances
+ ali_instance:
+ security_groups: '{{ created_group.group_id }}'
+ instance_type: ecs.n4.small
+ image_id: "{{ ami_id }}"
+ instance_name: "My-new-instance"
+ instance_tags:
+ Name: NewECS
+ Version: 0.0.1
+ count: 5
+ count_tag:
+ Name: NewECS
+ allocate_public_ip: true
+ max_bandwidth_out: 50
+ vswitch_id: '{{ created_vsw.vswitch.id}}'
+ register: create_instance
+
+In the example playbook above, data about the vpc, vswitch, group, and instances created by this playbook
+are saved in the variables defined by the "register" keyword in each task.
+
+Each Alicloud module offers a variety of parameter options. Not all options are demonstrated in the above example.
+See each individual module for further details and examples.
diff --git a/docs/docsite/rst/scenario_guides/guide_aws.rst b/docs/docsite/rst/scenario_guides/guide_aws.rst
new file mode 100644
index 00000000..ba453195
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/guide_aws.rst
@@ -0,0 +1,281 @@
+Amazon Web Services Guide
+=========================
+
+.. _aws_intro:
+
+Introduction
+````````````
+
+Ansible contains a number of modules for controlling Amazon Web Services (AWS). The purpose of this
+section is to explain how to put Ansible modules together (and use inventory scripts) to use Ansible in AWS context.
+
+Requirements for the AWS modules are minimal.
+
+All of the modules require and are tested against recent versions of boto, usually boto3. Check the module documentation for the minimum required version for each module. You must have the boto3 Python module installed on your control machine. You may also need the original boto package. You can install these modules from your OS distribution or using the python package installer: ``pip install boto3``.
+
+Whereas classically Ansible will execute tasks in its host loop against multiple remote machines, most cloud-control steps occur on your local machine with reference to the regions to control.
+
+In your playbook steps we'll typically be using the following pattern for provisioning steps::
+
+ - hosts: localhost
+ gather_facts: False
+ tasks:
+ - ...
+
+.. _aws_authentication:
+
+Authentication
+``````````````
+
+Authentication with the AWS-related modules is handled by either
+specifying your access and secret key as ENV variables or module arguments.
+
+For environment variables::
+
+ export AWS_ACCESS_KEY_ID='AK123'
+ export AWS_SECRET_ACCESS_KEY='abc123'
+
+For storing these in a vars_file, ideally encrypted with ansible-vault::
+
+ ---
+ ec2_access_key: "--REMOVED--"
+ ec2_secret_key: "--REMOVED--"
+
+Note that if you store your credentials in vars_file, you need to refer to them in each AWS-module. For example::
+
+ - ec2
+ aws_access_key: "{{ec2_access_key}}"
+ aws_secret_key: "{{ec2_secret_key}}"
+ image: "..."
+
+.. _aws_provisioning:
+
+Provisioning
+````````````
+
+The ec2 module provisions and de-provisions instances within EC2.
+
+An example of making sure there are only 5 instances tagged 'Demo' in EC2 follows.
+
+In the example below, the "exact_count" of instances is set to 5. This means if there are 0 instances already existing, then
+5 new instances would be created. If there were 2 instances, only 3 would be created, and if there were 8 instances, 3 instances would
+be terminated.
+
+What is being counted is specified by the "count_tag" parameter. The parameter "instance_tags" is used to apply tags to the newly created
+instance.::
+
+ # demo_setup.yml
+
+ - hosts: localhost
+ gather_facts: False
+
+ tasks:
+
+ - name: Provision a set of instances
+ ec2:
+ key_name: my_key
+ group: test
+ instance_type: t2.micro
+ image: "{{ ami_id }}"
+ wait: true
+ exact_count: 5
+ count_tag:
+ Name: Demo
+ instance_tags:
+ Name: Demo
+ register: ec2
+
+The data about what instances are created is being saved by the "register" keyword in the variable named "ec2".
+
+From this, we'll use the add_host module to dynamically create a host group consisting of these new instances. This facilitates performing configuration actions on the hosts immediately in a subsequent task.::
+
+ # demo_setup.yml
+
+ - hosts: localhost
+ gather_facts: False
+
+ tasks:
+
+ - name: Provision a set of instances
+ ec2:
+ key_name: my_key
+ group: test
+ instance_type: t2.micro
+ image: "{{ ami_id }}"
+ wait: true
+ exact_count: 5
+ count_tag:
+ Name: Demo
+ instance_tags:
+ Name: Demo
+ register: ec2
+
+ - name: Add all instance public IPs to host group
+ add_host: hostname={{ item.public_ip }} groups=ec2hosts
+ loop: "{{ ec2.instances }}"
+
+With the host group now created, a second play at the bottom of the same provisioning playbook file might now have some configuration steps::
+
+ # demo_setup.yml
+
+ - name: Provision a set of instances
+ hosts: localhost
+ # ... AS ABOVE ...
+
+ - hosts: ec2hosts
+ name: configuration play
+ user: ec2-user
+ gather_facts: true
+
+ tasks:
+
+ - name: Check NTP service
+ service: name=ntpd state=started
+
+.. _aws_security_groups:
+
+Security Groups
+```````````````
+
+Security groups on AWS are stateful. The response of a request from your instance is allowed to flow in regardless of inbound security group rules and vice-versa.
+In case you only want allow traffic with AWS S3 service, you need to fetch the current IP ranges of AWS S3 for one region and apply them as an egress rule.::
+
+ - name: fetch raw ip ranges for aws s3
+ set_fact:
+ raw_s3_ranges: "{{ lookup('aws_service_ip_ranges', region='eu-central-1', service='S3', wantlist=True) }}"
+
+ - name: prepare list structure for ec2_group module
+ set_fact:
+ s3_ranges: "{{ s3_ranges | default([]) + [{'proto': 'all', 'cidr_ip': item, 'rule_desc': 'S3 Service IP range'}] }}"
+ loop: "{{ raw_s3_ranges }}"
+
+ - name: set S3 IP ranges to egress rules
+ ec2_group:
+ name: aws_s3_ip_ranges
+ description: allow outgoing traffic to aws S3 service
+ region: eu-central-1
+ state: present
+ vpc_id: vpc-123456
+ purge_rules: true
+ purge_rules_egress: true
+ rules: []
+ rules_egress: "{{ s3_ranges }}"
+ tags:
+ Name: aws_s3_ip_ranges
+
+.. _aws_host_inventory:
+
+Host Inventory
+``````````````
+
+Once your nodes are spun up, you'll probably want to talk to them again. With a cloud setup, it's best to not maintain a static list of cloud hostnames
+in text files. Rather, the best way to handle this is to use the aws_ec2 inventory plugin. See :ref:`dynamic_inventory`.
+
+The plugin will also return instances that were created outside of Ansible and allow Ansible to manage them.
+
+.. _aws_tags_and_groups:
+
+Tags And Groups And Variables
+`````````````````````````````
+
+When using the inventory plugin, you can configure extra inventory structure based on the metadata returned by AWS.
+
+For instance, you might use ``keyed_groups`` to create groups from instance tags::
+
+ plugin: aws_ec2
+ keyed_groups:
+ - prefix: tag
+ key: tags
+
+
+You can then target all instances with a "class" tag where the value is "webserver" in a play::
+
+ - hosts: tag_class_webserver
+ tasks:
+ - ping
+
+You can also use these groups with 'group_vars' to set variables that are automatically applied to matching instances. See :ref:`splitting_out_vars`.
+
+.. _aws_pull:
+
+Autoscaling with Ansible Pull
+`````````````````````````````
+
+Amazon Autoscaling features automatically increase or decrease capacity based on load. There are also Ansible modules shown in the cloud documentation that
+can configure autoscaling policy.
+
+When nodes come online, it may not be sufficient to wait for the next cycle of an ansible command to come along and configure that node.
+
+To do this, pre-bake machine images which contain the necessary ansible-pull invocation. Ansible-pull is a command line tool that fetches a playbook from a git server and runs it locally.
+
+One of the challenges of this approach is that there needs to be a centralized way to store data about the results of pull commands in an autoscaling context.
+For this reason, the autoscaling solution provided below in the next section can be a better approach.
+
+Read :ref:`ansible-pull` for more information on pull-mode playbooks.
+
+.. _aws_autoscale:
+
+Autoscaling with Ansible Tower
+``````````````````````````````
+
+:ref:`ansible_tower` also contains a very nice feature for auto-scaling use cases. In this mode, a simple curl script can call
+a defined URL and the server will "dial out" to the requester and configure an instance that is spinning up. This can be a great way
+to reconfigure ephemeral nodes. See the Tower install and product documentation for more details.
+
+A benefit of using the callback in Tower over pull mode is that job results are still centrally recorded and less information has to be shared
+with remote hosts.
+
+.. _aws_cloudformation_example:
+
+Ansible With (And Versus) CloudFormation
+````````````````````````````````````````
+
+CloudFormation is a Amazon technology for defining a cloud stack as a JSON or YAML document.
+
+Ansible modules provide an easier to use interface than CloudFormation in many examples, without defining a complex JSON/YAML document.
+This is recommended for most users.
+
+However, for users that have decided to use CloudFormation, there is an Ansible module that can be used to apply a CloudFormation template
+to Amazon.
+
+When using Ansible with CloudFormation, typically Ansible will be used with a tool like Packer to build images, and CloudFormation will launch
+those images, or ansible will be invoked through user data once the image comes online, or a combination of the two.
+
+Please see the examples in the Ansible CloudFormation module for more details.
+
+.. _aws_image_build:
+
+AWS Image Building With Ansible
+```````````````````````````````
+
+Many users may want to have images boot to a more complete configuration rather than configuring them entirely after instantiation. To do this,
+one of many programs can be used with Ansible playbooks to define and upload a base image, which will then get its own AMI ID for usage with
+the ec2 module or other Ansible AWS modules such as ec2_asg or the cloudformation module. Possible tools include Packer, aminator, and Ansible's
+ec2_ami module.
+
+Generally speaking, we find most users using Packer.
+
+See the Packer documentation of the `Ansible local Packer provisioner <https://www.packer.io/docs/provisioners/ansible-local.html>`_ and `Ansible remote Packer provisioner <https://www.packer.io/docs/provisioners/ansible.html>`_.
+
+If you do not want to adopt Packer at this time, configuring a base-image with Ansible after provisioning (as shown above) is acceptable.
+
+.. _aws_next_steps:
+
+Next Steps: Explore Modules
+```````````````````````````
+
+Ansible ships with lots of modules for configuring a wide array of EC2 services. Browse the "Cloud" category of the module
+documentation for a full list with examples.
+
+.. seealso::
+
+ :ref:`list_of_collections`
+ Browse existing collections, modules, and plugins
+ :ref:`working_with_playbooks`
+ An introduction to playbooks
+ :ref:`playbooks_delegation`
+ Delegation, useful for working with loud balancers, clouds, and locally executed steps.
+ `User Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/scenario_guides/guide_azure.rst b/docs/docsite/rst/scenario_guides/guide_azure.rst
new file mode 100644
index 00000000..2317ade4
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/guide_azure.rst
@@ -0,0 +1,480 @@
+Microsoft Azure Guide
+=====================
+
+Ansible includes a suite of modules for interacting with Azure Resource Manager, giving you the tools to easily create
+and orchestrate infrastructure on the Microsoft Azure Cloud.
+
+Requirements
+------------
+
+Using the Azure Resource Manager modules requires having specific Azure SDK modules
+installed on the host running Ansible.
+
+.. code-block:: bash
+
+ $ pip install 'ansible[azure]'
+
+If you are running Ansible from source, you can install the dependencies from the
+root directory of the Ansible repo.
+
+.. code-block:: bash
+
+ $ pip install .[azure]
+
+You can also directly run Ansible in `Azure Cloud Shell <https://shell.azure.com>`_, where Ansible is pre-installed.
+
+Authenticating with Azure
+-------------------------
+
+Using the Azure Resource Manager modules requires authenticating with the Azure API. You can choose from two authentication strategies:
+
+* Active Directory Username/Password
+* Service Principal Credentials
+
+Follow the directions for the strategy you wish to use, then proceed to `Providing Credentials to Azure Modules`_ for
+instructions on how to actually use the modules and authenticate with the Azure API.
+
+
+Using Service Principal
+.......................
+
+There is now a detailed official tutorial describing `how to create a service principal <https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-create-service-principal-portal>`_.
+
+After stepping through the tutorial you will have:
+
+* Your Client ID, which is found in the "client id" box in the "Configure" page of your application in the Azure portal
+* Your Secret key, generated when you created the application. You cannot show the key after creation.
+ If you lost the key, you must create a new one in the "Configure" page of your application.
+* And finally, a tenant ID. It's a UUID (for example, ABCDEFGH-1234-ABCD-1234-ABCDEFGHIJKL) pointing to the AD containing your
+ application. You will find it in the URL from within the Azure portal, or in the "view endpoints" of any given URL.
+
+
+Using Active Directory Username/Password
+........................................
+
+To create an Active Directory username/password:
+
+* Connect to the Azure Classic Portal with your admin account
+* Create a user in your default AAD. You must NOT activate Multi-Factor Authentication
+* Go to Settings - Administrators
+* Click on Add and enter the email of the new user.
+* Check the checkbox of the subscription you want to test with this user.
+* Login to Azure Portal with this new user to change the temporary password to a new one. You will not be able to use the
+ temporary password for OAuth login.
+
+Providing Credentials to Azure Modules
+......................................
+
+The modules offer several ways to provide your credentials. For a CI/CD tool such as Ansible Tower or Jenkins, you will
+most likely want to use environment variables. For local development you may wish to store your credentials in a file
+within your home directory. And of course, you can always pass credentials as parameters to a task within a playbook. The
+order of precedence is parameters, then environment variables, and finally a file found in your home directory.
+
+Using Environment Variables
+```````````````````````````
+
+To pass service principal credentials via the environment, define the following variables:
+
+* AZURE_CLIENT_ID
+* AZURE_SECRET
+* AZURE_SUBSCRIPTION_ID
+* AZURE_TENANT
+
+To pass Active Directory username/password via the environment, define the following variables:
+
+* AZURE_AD_USER
+* AZURE_PASSWORD
+* AZURE_SUBSCRIPTION_ID
+
+To pass Active Directory username/password in ADFS via the environment, define the following variables:
+
+* AZURE_AD_USER
+* AZURE_PASSWORD
+* AZURE_CLIENT_ID
+* AZURE_TENANT
+* AZURE_ADFS_AUTHORITY_URL
+
+"AZURE_ADFS_AUTHORITY_URL" is optional. It's necessary only when you have own ADFS authority like https://yourdomain.com/adfs.
+
+Storing in a File
+`````````````````
+
+When working in a development environment, it may be desirable to store credentials in a file. The modules will look
+for credentials in ``$HOME/.azure/credentials``. This file is an ini style file. It will look as follows:
+
+.. code-block:: ini
+
+ [default]
+ subscription_id=xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ client_id=xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ secret=xxxxxxxxxxxxxxxxx
+ tenant=xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+
+.. note:: If your secret values contain non-ASCII characters, you must `URL Encode <https://www.w3schools.com/tags/ref_urlencode.asp>`_ them to avoid login errors.
+
+It is possible to store multiple sets of credentials within the credentials file by creating multiple sections. Each
+section is considered a profile. The modules look for the [default] profile automatically. Define AZURE_PROFILE in the
+environment or pass a profile parameter to specify a specific profile.
+
+Passing as Parameters
+`````````````````````
+
+If you wish to pass credentials as parameters to a task, use the following parameters for service principal:
+
+* client_id
+* secret
+* subscription_id
+* tenant
+
+Or, pass the following parameters for Active Directory username/password:
+
+* ad_user
+* password
+* subscription_id
+
+Or, pass the following parameters for ADFS username/pasword:
+
+* ad_user
+* password
+* client_id
+* tenant
+* adfs_authority_url
+
+"adfs_authority_url" is optional. It's necessary only when you have own ADFS authority like https://yourdomain.com/adfs.
+
+
+Other Cloud Environments
+------------------------
+
+To use an Azure Cloud other than the default public cloud (eg, Azure China Cloud, Azure US Government Cloud, Azure Stack),
+pass the "cloud_environment" argument to modules, configure it in a credential profile, or set the "AZURE_CLOUD_ENVIRONMENT"
+environment variable. The value is either a cloud name as defined by the Azure Python SDK (eg, "AzureChinaCloud",
+"AzureUSGovernment"; defaults to "AzureCloud") or an Azure metadata discovery URL (for Azure Stack).
+
+Creating Virtual Machines
+-------------------------
+
+There are two ways to create a virtual machine, both involving the azure_rm_virtualmachine module. We can either create
+a storage account, network interface, security group and public IP address and pass the names of these objects to the
+module as parameters, or we can let the module do the work for us and accept the defaults it chooses.
+
+Creating Individual Components
+..............................
+
+An Azure module is available to help you create a storage account, virtual network, subnet, network interface,
+security group and public IP. Here is a full example of creating each of these and passing the names to the
+azure_rm_virtualmachine module at the end:
+
+.. code-block:: yaml
+
+ - name: Create storage account
+ azure_rm_storageaccount:
+ resource_group: Testing
+ name: testaccount001
+ account_type: Standard_LRS
+
+ - name: Create virtual network
+ azure_rm_virtualnetwork:
+ resource_group: Testing
+ name: testvn001
+ address_prefixes: "10.10.0.0/16"
+
+ - name: Add subnet
+ azure_rm_subnet:
+ resource_group: Testing
+ name: subnet001
+ address_prefix: "10.10.0.0/24"
+ virtual_network: testvn001
+
+ - name: Create public ip
+ azure_rm_publicipaddress:
+ resource_group: Testing
+ allocation_method: Static
+ name: publicip001
+
+ - name: Create security group that allows SSH
+ azure_rm_securitygroup:
+ resource_group: Testing
+ name: secgroup001
+ rules:
+ - name: SSH
+ protocol: Tcp
+ destination_port_range: 22
+ access: Allow
+ priority: 101
+ direction: Inbound
+
+ - name: Create NIC
+ azure_rm_networkinterface:
+ resource_group: Testing
+ name: testnic001
+ virtual_network: testvn001
+ subnet: subnet001
+ public_ip_name: publicip001
+ security_group: secgroup001
+
+ - name: Create virtual machine
+ azure_rm_virtualmachine:
+ resource_group: Testing
+ name: testvm001
+ vm_size: Standard_D1
+ storage_account: testaccount001
+ storage_container: testvm001
+ storage_blob: testvm001.vhd
+ admin_username: admin
+ admin_password: Password!
+ network_interfaces: testnic001
+ image:
+ offer: CentOS
+ publisher: OpenLogic
+ sku: '7.1'
+ version: latest
+
+Each of the Azure modules offers a variety of parameter options. Not all options are demonstrated in the above example.
+See each individual module for further details and examples.
+
+
+Creating a Virtual Machine with Default Options
+...............................................
+
+If you simply want to create a virtual machine without specifying all the details, you can do that as well. The only
+caveat is that you will need a virtual network with one subnet already in your resource group. Assuming you have a
+virtual network already with an existing subnet, you can run the following to create a VM:
+
+.. code-block:: yaml
+
+ azure_rm_virtualmachine:
+ resource_group: Testing
+ name: testvm10
+ vm_size: Standard_D1
+ admin_username: chouseknecht
+ ssh_password_enabled: false
+ ssh_public_keys: "{{ ssh_keys }}"
+ image:
+ offer: CentOS
+ publisher: OpenLogic
+ sku: '7.1'
+ version: latest
+
+
+Creating a Virtual Machine in Availability Zones
+..................................................
+
+If you want to create a VM in an availability zone,
+consider the following:
+
+* Both OS disk and data disk must be a 'managed disk', not an 'unmanaged disk'.
+* When creating a VM with the ``azure_rm_virtualmachine`` module,
+ you need to explicitly set the ``managed_disk_type`` parameter
+ to change the OS disk to a managed disk.
+ Otherwise, the OS disk becomes an unmanaged disk..
+* When you create a data disk with the ``azure_rm_manageddisk`` module,
+ you need to explicitly specify the ``storage_account_type`` parameter
+ to make it a managed disk.
+ Otherwise, the data disk will be an unmanaged disk.
+* A managed disk does not require a storage account or a storage container,
+ unlike a n unmanaged disk.
+ In particular, note that once a VM is created on an unmanaged disk,
+ an unnecessary storage container named "vhds" is automatically created.
+* When you create an IP address with the ``azure_rm_publicipaddress`` module,
+ you must set the ``sku`` parameter to ``standard``.
+ Otherwise, the IP address cannot be used in an availability zone.
+
+
+Dynamic Inventory Script
+------------------------
+
+If you are not familiar with Ansible's dynamic inventory scripts, check out :ref:`Intro to Dynamic Inventory <intro_dynamic_inventory>`.
+
+The Azure Resource Manager inventory script is called `azure_rm.py <https://raw.githubusercontent.com/ansible-collections/community.general/main/scripts/inventory/azure_rm.py>`_. It authenticates with the Azure API exactly the same as the
+Azure modules, which means you will either define the same environment variables described above in `Using Environment Variables`_,
+create a ``$HOME/.azure/credentials`` file (also described above in `Storing in a File`_), or pass command line parameters. To see available command
+line options execute the following:
+
+.. code-block:: bash
+
+ $ wget https://raw.githubusercontent.com/ansible-collections/community.general/main/scripts/inventory/azure_rm.py
+ $ ./azure_rm.py --help
+
+As with all dynamic inventory scripts, the script can be executed directly, passed as a parameter to the ansible command,
+or passed directly to ansible-playbook using the -i option. No matter how it is executed the script produces JSON representing
+all of the hosts found in your Azure subscription. You can narrow this down to just hosts found in a specific set of
+Azure resource groups, or even down to a specific host.
+
+For a given host, the inventory script provides the following host variables:
+
+.. code-block:: JSON
+
+ {
+ "ansible_host": "XXX.XXX.XXX.XXX",
+ "computer_name": "computer_name2",
+ "fqdn": null,
+ "id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Compute/virtualMachines/object-name",
+ "image": {
+ "offer": "CentOS",
+ "publisher": "OpenLogic",
+ "sku": "7.1",
+ "version": "latest"
+ },
+ "location": "westus",
+ "mac_address": "00-00-5E-00-53-FE",
+ "name": "object-name",
+ "network_interface": "interface-name",
+ "network_interface_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkInterfaces/object-name1",
+ "network_security_group": null,
+ "network_security_group_id": null,
+ "os_disk": {
+ "name": "object-name",
+ "operating_system_type": "Linux"
+ },
+ "plan": null,
+ "powerstate": "running",
+ "private_ip": "172.26.3.6",
+ "private_ip_alloc_method": "Static",
+ "provisioning_state": "Succeeded",
+ "public_ip": "XXX.XXX.XXX.XXX",
+ "public_ip_alloc_method": "Static",
+ "public_ip_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/publicIPAddresses/object-name",
+ "public_ip_name": "object-name",
+ "resource_group": "galaxy-production",
+ "security_group": "object-name",
+ "security_group_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkSecurityGroups/object-name",
+ "tags": {
+ "db": "mysql"
+ },
+ "type": "Microsoft.Compute/virtualMachines",
+ "virtual_machine_size": "Standard_DS4"
+ }
+
+Host Groups
+...........
+
+By default hosts are grouped by:
+
+* azure (all hosts)
+* location name
+* resource group name
+* security group name
+* tag key
+* tag key_value
+* os_disk operating_system_type (Windows/Linux)
+
+You can control host groupings and host selection by either defining environment variables or creating an
+azure_rm.ini file in your current working directory.
+
+NOTE: An .ini file will take precedence over environment variables.
+
+NOTE: The name of the .ini file is the basename of the inventory script (in other words, 'azure_rm') with a '.ini'
+extension. This allows you to copy, rename and customize the inventory script and have matching .ini files all in
+the same directory.
+
+Control grouping using the following variables defined in the environment:
+
+* AZURE_GROUP_BY_RESOURCE_GROUP=yes
+* AZURE_GROUP_BY_LOCATION=yes
+* AZURE_GROUP_BY_SECURITY_GROUP=yes
+* AZURE_GROUP_BY_TAG=yes
+* AZURE_GROUP_BY_OS_FAMILY=yes
+
+Select hosts within specific resource groups by assigning a comma separated list to:
+
+* AZURE_RESOURCE_GROUPS=resource_group_a,resource_group_b
+
+Select hosts for specific tag key by assigning a comma separated list of tag keys to:
+
+* AZURE_TAGS=key1,key2,key3
+
+Select hosts for specific locations by assigning a comma separated list of locations to:
+
+* AZURE_LOCATIONS=eastus,eastus2,westus
+
+Or, select hosts for specific tag key:value pairs by assigning a comma separated list key:value pairs to:
+
+* AZURE_TAGS=key1:value1,key2:value2
+
+If you don't need the powerstate, you can improve performance by turning off powerstate fetching:
+
+* AZURE_INCLUDE_POWERSTATE=no
+
+A sample azure_rm.ini file is included along with the inventory script in
+`here <https://raw.githubusercontent.com/ansible-collections/community.general/main/scripts/inventory/azure_rm.ini>`_.
+An .ini file will contain the following:
+
+.. code-block:: ini
+
+ [azure]
+ # Control which resource groups are included. By default all resources groups are included.
+ # Set resource_groups to a comma separated list of resource groups names.
+ #resource_groups=
+
+ # Control which tags are included. Set tags to a comma separated list of keys or key:value pairs
+ #tags=
+
+ # Control which locations are included. Set locations to a comma separated list of locations.
+ #locations=
+
+ # Include powerstate. If you don't need powerstate information, turning it off improves runtime performance.
+ # Valid values: yes, no, true, false, True, False, 0, 1.
+ include_powerstate=yes
+
+ # Control grouping with the following boolean flags. Valid values: yes, no, true, false, True, False, 0, 1.
+ group_by_resource_group=yes
+ group_by_location=yes
+ group_by_security_group=yes
+ group_by_tag=yes
+ group_by_os_family=yes
+
+Examples
+........
+
+Here are some examples using the inventory script:
+
+.. code-block:: bash
+
+ # Download inventory script
+ $ wget https://raw.githubusercontent.com/ansible-collections/community.general/main/scripts/inventory/azure_rm.py
+
+ # Execute /bin/uname on all instances in the Testing resource group
+ $ ansible -i azure_rm.py Testing -m shell -a "/bin/uname -a"
+
+ # Execute win_ping on all Windows instances
+ $ ansible -i azure_rm.py windows -m win_ping
+
+ # Execute ping on all Linux instances
+ $ ansible -i azure_rm.py linux -m ping
+
+ # Use the inventory script to print instance specific information
+ $ ./azure_rm.py --host my_instance_host_name --resource-groups=Testing --pretty
+
+ # Use the inventory script with ansible-playbook
+ $ ansible-playbook -i ./azure_rm.py test_playbook.yml
+
+Here is a simple playbook to exercise the Azure inventory script:
+
+.. code-block:: yaml
+
+ - name: Test the inventory script
+ hosts: azure
+ connection: local
+ gather_facts: no
+ tasks:
+ - debug:
+ msg: "{{ inventory_hostname }} has powerstate {{ powerstate }}"
+
+You can execute the playbook with something like:
+
+.. code-block:: bash
+
+ $ ansible-playbook -i ./azure_rm.py test_azure_inventory.yml
+
+
+Disabling certificate validation on Azure endpoints
+...................................................
+
+When an HTTPS proxy is present, or when using Azure Stack, it may be necessary to disable certificate validation for
+Azure endpoints in the Azure modules. This is not a recommended security practice, but may be necessary when the system
+CA store cannot be altered to include the necessary CA certificate. Certificate validation can be controlled by setting
+the "cert_validation_mode" value in a credential profile, via the "AZURE_CERT_VALIDATION_MODE" environment variable, or
+by passing the "cert_validation_mode" argument to any Azure module. The default value is "validate"; setting the value
+to "ignore" will prevent all certificate validation. The module argument takes precedence over a credential profile value,
+which takes precedence over the environment value.
diff --git a/docs/docsite/rst/scenario_guides/guide_cloudstack.rst b/docs/docsite/rst/scenario_guides/guide_cloudstack.rst
new file mode 100644
index 00000000..fcfb8120
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/guide_cloudstack.rst
@@ -0,0 +1,377 @@
+CloudStack Cloud Guide
+======================
+
+.. _cloudstack_introduction:
+
+Introduction
+````````````
+The purpose of this section is to explain how to put Ansible modules together to use Ansible in a CloudStack context. You will find more usage examples in the details section of each module.
+
+Ansible contains a number of extra modules for interacting with CloudStack based clouds. All modules support check mode, are designed to be idempotent, have been created and tested, and are maintained by the community.
+
+.. note:: Some of the modules will require domain admin or root admin privileges.
+
+Prerequisites
+`````````````
+Prerequisites for using the CloudStack modules are minimal. In addition to Ansible itself, all of the modules require the python library ``cs`` https://pypi.org/project/cs/
+
+You'll need this Python module installed on the execution host, usually your workstation.
+
+.. code-block:: bash
+
+ $ pip install cs
+
+Or alternatively starting with Debian 9 and Ubuntu 16.04:
+
+.. code-block:: bash
+
+ $ sudo apt install python-cs
+
+.. note:: cs also includes a command line interface for ad-hoc interaction with the CloudStack API, for example ``$ cs listVirtualMachines state=Running``.
+
+Limitations and Known Issues
+````````````````````````````
+VPC support has been improved since Ansible 2.3 but is still not yet fully implemented. The community is working on the VPC integration.
+
+Credentials File
+````````````````
+You can pass credentials and the endpoint of your cloud as module arguments, however in most cases it is a far less work to store your credentials in the cloudstack.ini file.
+
+The python library cs looks for the credentials file in the following order (last one wins):
+
+* A ``.cloudstack.ini`` (note the dot) file in the home directory.
+* A ``CLOUDSTACK_CONFIG`` environment variable pointing to an .ini file.
+* A ``cloudstack.ini`` (without the dot) file in the current working directory, same directory as your playbooks are located.
+
+The structure of the ini file must look like this:
+
+.. code-block:: bash
+
+ $ cat $HOME/.cloudstack.ini
+ [cloudstack]
+ endpoint = https://cloud.example.com/client/api
+ key = api key
+ secret = api secret
+ timeout = 30
+
+.. Note:: The section ``[cloudstack]`` is the default section. ``CLOUDSTACK_REGION`` environment variable can be used to define the default section.
+
+.. versionadded:: 2.4
+
+The ENV variables support ``CLOUDSTACK_*`` as written in the documentation of the library ``cs``, like ``CLOUDSTACK_TIMEOUT``, ``CLOUDSTACK_METHOD``, and so on. has been implemented into Ansible. It is even possible to have some incomplete config in your cloudstack.ini:
+
+.. code-block:: bash
+
+ $ cat $HOME/.cloudstack.ini
+ [cloudstack]
+ endpoint = https://cloud.example.com/client/api
+ timeout = 30
+
+and fulfill the missing data by either setting ENV variables or tasks params:
+
+.. code-block:: yaml
+
+ ---
+ - name: provision our VMs
+ hosts: cloud-vm
+ tasks:
+ - name: ensure VMs are created and running
+ delegate_to: localhost
+ cs_instance:
+ api_key: your api key
+ api_secret: your api secret
+ ...
+
+Regions
+```````
+If you use more than one CloudStack region, you can define as many sections as you want and name them as you like, for example:
+
+.. code-block:: bash
+
+ $ cat $HOME/.cloudstack.ini
+ [exoscale]
+ endpoint = https://api.exoscale.ch/compute
+ key = api key
+ secret = api secret
+
+ [example_cloud_one]
+ endpoint = https://cloud-one.example.com/client/api
+ key = api key
+ secret = api secret
+
+ [example_cloud_two]
+ endpoint = https://cloud-two.example.com/client/api
+ key = api key
+ secret = api secret
+
+.. Hint:: Sections can also be used to for login into the same region using different accounts.
+
+By passing the argument ``api_region`` with the CloudStack modules, the region wanted will be selected.
+
+.. code-block:: yaml
+
+ - name: ensure my ssh public key exists on Exoscale
+ cs_sshkeypair:
+ name: my-ssh-key
+ public_key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
+ api_region: exoscale
+ delegate_to: localhost
+
+Or by looping over a regions list if you want to do the task in every region:
+
+.. code-block:: yaml
+
+ - name: ensure my ssh public key exists in all CloudStack regions
+ local_action: cs_sshkeypair
+ name: my-ssh-key
+ public_key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
+ api_region: "{{ item }}"
+ loop:
+ - exoscale
+ - example_cloud_one
+ - example_cloud_two
+
+Environment Variables
+`````````````````````
+.. versionadded:: 2.3
+
+Since Ansible 2.3 it is possible to use environment variables for domain (``CLOUDSTACK_DOMAIN``), account (``CLOUDSTACK_ACCOUNT``), project (``CLOUDSTACK_PROJECT``), VPC (``CLOUDSTACK_VPC``) and zone (``CLOUDSTACK_ZONE``). This simplifies the tasks by not repeating the arguments for every tasks.
+
+Below you see an example how it can be used in combination with Ansible's block feature:
+
+.. code-block:: yaml
+
+ - hosts: cloud-vm
+ tasks:
+ - block:
+ - name: ensure my ssh public key
+ cs_sshkeypair:
+ name: my-ssh-key
+ public_key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
+
+ - name: ensure my ssh public key
+ cs_instance:
+ display_name: "{{ inventory_hostname_short }}"
+ template: Linux Debian 7 64-bit 20GB Disk
+ service_offering: "{{ cs_offering }}"
+ ssh_key: my-ssh-key
+ state: running
+
+ delegate_to: localhost
+ environment:
+ CLOUDSTACK_DOMAIN: root/customers
+ CLOUDSTACK_PROJECT: web-app
+ CLOUDSTACK_ZONE: sf-1
+
+.. Note:: You are still able overwrite the environment variables using the module arguments, for example ``zone: sf-2``
+
+.. Note:: Unlike ``CLOUDSTACK_REGION`` these additional environment variables are ignored in the CLI ``cs``.
+
+Use Cases
+`````````
+The following should give you some ideas how to use the modules to provision VMs to the cloud. As always, there isn't only one way to do it. But as always: keep it simple for the beginning is always a good start.
+
+Use Case: Provisioning in a Advanced Networking CloudStack setup
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+Our CloudStack cloud has an advanced networking setup, we would like to provision web servers, which get a static NAT and open firewall ports 80 and 443. Further we provision database servers, to which we do not give any access to. For accessing the VMs by SSH we use a SSH jump host.
+
+This is how our inventory looks like:
+
+.. code-block:: none
+
+ [cloud-vm:children]
+ webserver
+ db-server
+ jumphost
+
+ [webserver]
+ web-01.example.com public_ip=198.51.100.20
+ web-02.example.com public_ip=198.51.100.21
+
+ [db-server]
+ db-01.example.com
+ db-02.example.com
+
+ [jumphost]
+ jump.example.com public_ip=198.51.100.22
+
+As you can see, the public IPs for our web servers and jumphost has been assigned as variable ``public_ip`` directly in the inventory.
+
+The configure the jumphost, web servers and database servers, we use ``group_vars``. The ``group_vars`` directory contains 4 files for configuration of the groups: cloud-vm, jumphost, webserver and db-server. The cloud-vm is there for specifying the defaults of our cloud infrastructure.
+
+.. code-block:: yaml
+
+ # file: group_vars/cloud-vm
+ ---
+ cs_offering: Small
+ cs_firewall: []
+
+Our database servers should get more CPU and RAM, so we define to use a ``Large`` offering for them.
+
+.. code-block:: yaml
+
+ # file: group_vars/db-server
+ ---
+ cs_offering: Large
+
+The web servers should get a ``Small`` offering as we would scale them horizontally, which is also our default offering. We also ensure the known web ports are opened for the world.
+
+.. code-block:: yaml
+
+ # file: group_vars/webserver
+ ---
+ cs_firewall:
+ - { port: 80 }
+ - { port: 443 }
+
+Further we provision a jump host which has only port 22 opened for accessing the VMs from our office IPv4 network.
+
+.. code-block:: yaml
+
+ # file: group_vars/jumphost
+ ---
+ cs_firewall:
+ - { port: 22, cidr: "17.17.17.0/24" }
+
+Now to the fun part. We create a playbook to create our infrastructure we call it ``infra.yml``:
+
+.. code-block:: yaml
+
+ # file: infra.yaml
+ ---
+ - name: provision our VMs
+ hosts: cloud-vm
+ tasks:
+ - name: run all enclosed tasks from localhost
+ delegate_to: localhost
+ block:
+ - name: ensure VMs are created and running
+ cs_instance:
+ name: "{{ inventory_hostname_short }}"
+ template: Linux Debian 7 64-bit 20GB Disk
+ service_offering: "{{ cs_offering }}"
+ state: running
+
+ - name: ensure firewall ports opened
+ cs_firewall:
+ ip_address: "{{ public_ip }}"
+ port: "{{ item.port }}"
+ cidr: "{{ item.cidr | default('0.0.0.0/0') }}"
+ loop: "{{ cs_firewall }}"
+ when: public_ip is defined
+
+ - name: ensure static NATs
+ cs_staticnat: vm="{{ inventory_hostname_short }}" ip_address="{{ public_ip }}"
+ when: public_ip is defined
+
+In the above play we defined 3 tasks and use the group ``cloud-vm`` as target to handle all VMs in the cloud but instead SSH to these VMs, we use ``delegate_to: localhost`` to execute the API calls locally from our workstation.
+
+In the first task, we ensure we have a running VM created with the Debian template. If the VM is already created but stopped, it would just start it. If you like to change the offering on an existing VM, you must add ``force: yes`` to the task, which would stop the VM, change the offering and start the VM again.
+
+In the second task we ensure the ports are opened if we give a public IP to the VM.
+
+In the third task we add static NAT to the VMs having a public IP defined.
+
+
+.. Note:: The public IP addresses must have been acquired in advance, also see ``cs_ip_address``
+
+.. Note:: For some modules, for example ``cs_sshkeypair`` you usually want this to be executed only once, not for every VM. Therefore you would make a separate play for it targeting localhost. You find an example in the use cases below.
+
+Use Case: Provisioning on a Basic Networking CloudStack setup
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+A basic networking CloudStack setup is slightly different: Every VM gets a public IP directly assigned and security groups are used for access restriction policy.
+
+This is how our inventory looks like:
+
+.. code-block:: none
+
+ [cloud-vm:children]
+ webserver
+
+ [webserver]
+ web-01.example.com
+ web-02.example.com
+
+The default for your VMs looks like this:
+
+.. code-block:: yaml
+
+ # file: group_vars/cloud-vm
+ ---
+ cs_offering: Small
+ cs_securitygroups: [ 'default']
+
+Our webserver will also be in security group ``web``:
+
+.. code-block:: yaml
+
+ # file: group_vars/webserver
+ ---
+ cs_securitygroups: [ 'default', 'web' ]
+
+The playbook looks like the following:
+
+.. code-block:: yaml
+
+ # file: infra.yaml
+ ---
+ - name: cloud base setup
+ hosts: localhost
+ tasks:
+ - name: upload ssh public key
+ cs_sshkeypair:
+ name: defaultkey
+ public_key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
+
+ - name: ensure security groups exist
+ cs_securitygroup:
+ name: "{{ item }}"
+ loop:
+ - default
+ - web
+
+ - name: add inbound SSH to security group default
+ cs_securitygroup_rule:
+ security_group: default
+ start_port: "{{ item }}"
+ end_port: "{{ item }}"
+ loop:
+ - 22
+
+ - name: add inbound TCP rules to security group web
+ cs_securitygroup_rule:
+ security_group: web
+ start_port: "{{ item }}"
+ end_port: "{{ item }}"
+ loop:
+ - 80
+ - 443
+
+ - name: install VMs in the cloud
+ hosts: cloud-vm
+ tasks:
+ - delegate_to: localhost
+ block:
+ - name: create and run VMs on CloudStack
+ cs_instance:
+ name: "{{ inventory_hostname_short }}"
+ template: Linux Debian 7 64-bit 20GB Disk
+ service_offering: "{{ cs_offering }}"
+ security_groups: "{{ cs_securitygroups }}"
+ ssh_key: defaultkey
+ state: Running
+ register: vm
+
+ - name: show VM IP
+ debug: msg="VM {{ inventory_hostname }} {{ vm.default_ip }}"
+
+ - name: assign IP to the inventory
+ set_fact: ansible_ssh_host={{ vm.default_ip }}
+
+ - name: waiting for SSH to come up
+ wait_for: port=22 host={{ vm.default_ip }} delay=5
+
+In the first play we setup the security groups, in the second play the VMs will created be assigned to these groups. Further you see, that we assign the public IP returned from the modules to the host inventory. This is needed as we do not know the IPs we will get in advance. In a next step you would configure the DNS servers with these IPs for accessing the VMs with their DNS name.
+
+In the last task we wait for SSH to be accessible, so any later play would be able to access the VM by SSH without failure.
diff --git a/docs/docsite/rst/scenario_guides/guide_docker.rst b/docs/docsite/rst/scenario_guides/guide_docker.rst
new file mode 100644
index 00000000..9c992a17
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/guide_docker.rst
@@ -0,0 +1,330 @@
+Docker Guide
+============
+
+Ansible offers the following modules for orchestrating Docker containers:
+
+ docker_compose
+ Use your existing Docker compose files to orchestrate containers on a single Docker daemon or on
+ Swarm. Supports compose versions 1 and 2.
+
+ docker_container
+ Manages the container lifecycle by providing the ability to create, update, stop, start and destroy a
+ container.
+
+ docker_image
+ Provides full control over images, including: build, pull, push, tag and remove.
+
+ docker_image_info
+ Inspects one or more images in the Docker host's image cache, providing the information for making
+ decision or assertions in a playbook.
+
+ docker_login
+ Authenticates with Docker Hub or any Docker registry and updates the Docker Engine config file, which
+ in turn provides password-free pushing and pulling of images to and from the registry.
+
+ docker (dynamic inventory)
+ Dynamically builds an inventory of all the available containers from a set of one or more Docker hosts.
+
+
+Ansible 2.1.0 includes major updates to the Docker modules, marking the start of a project to create a complete and
+integrated set of tools for orchestrating containers. In addition to the above modules, we are also working on the
+following:
+
+Still using Dockerfile to build images? Check out `ansible-bender <https://github.com/ansible-community/ansible-bender>`_,
+and start building images from your Ansible playbooks.
+
+Use `Ansible Operator <https://learn.openshift.com/ansibleop/ansible-operator-overview/>`_
+to launch your docker-compose file on `OpenShift <https://www.okd.io/>`_. Go from an app on your laptop to a fully
+scalable app in the cloud with Kubernetes in just a few moments.
+
+There's more planned. See the latest ideas and thinking at the `Ansible proposal repo <https://github.com/ansible/proposals/tree/master/docker>`_.
+
+Requirements
+------------
+
+Using the docker modules requires having the `Docker SDK for Python <https://docker-py.readthedocs.io/en/stable/>`_
+installed on the host running Ansible. You will need to have >= 1.7.0 installed. For Python 2.7 or
+Python 3, you can install it as follows:
+
+.. code-block:: bash
+
+ $ pip install docker
+
+For Python 2.6, you need a version before 2.0. For these versions, the SDK was called ``docker-py``,
+so you need to install it as follows:
+
+.. code-block:: bash
+
+ $ pip install 'docker-py>=1.7.0'
+
+Please note that only one of ``docker`` and ``docker-py`` must be installed. Installing both will result in
+a broken installation. If this happens, Ansible will detect it and inform you about it::
+
+ Cannot have both the docker-py and docker python modules installed together as they use the same
+ namespace and cause a corrupt installation. Please uninstall both packages, and re-install only
+ the docker-py or docker python module. It is recommended to install the docker module if no support
+ for Python 2.6 is required. Please note that simply uninstalling one of the modules can leave the
+ other module in a broken state.
+
+The docker_compose module also requires `docker-compose <https://github.com/docker/compose>`_
+
+.. code-block:: bash
+
+ $ pip install 'docker-compose>=1.7.0'
+
+
+Connecting to the Docker API
+----------------------------
+
+You can connect to a local or remote API using parameters passed to each task or by setting environment variables.
+The order of precedence is command line parameters and then environment variables. If neither a command line
+option or an environment variable is found, a default value will be used. The default values are provided under
+`Parameters`_
+
+
+Parameters
+..........
+
+Control how modules connect to the Docker API by passing the following parameters:
+
+ docker_host
+ The URL or Unix socket path used to connect to the Docker API. Defaults to ``unix://var/run/docker.sock``.
+ To connect to a remote host, provide the TCP connection string. For example: ``tcp://192.0.2.23:2376``. If
+ TLS is used to encrypt the connection to the API, then the module will automatically replace 'tcp' in the
+ connection URL with 'https'.
+
+ api_version
+ The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported
+ by docker-py.
+
+ timeout
+ The maximum amount of time in seconds to wait on a response from the API. Defaults to 60 seconds.
+
+ tls
+ Secure the connection to the API by using TLS without verifying the authenticity of the Docker host server.
+ Defaults to False.
+
+ tls_verify
+ Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
+ Default is False.
+
+ cacert_path
+ Use a CA certificate when performing server verification by providing the path to a CA certificate file.
+
+ cert_path
+ Path to the client's TLS certificate file.
+
+ key_path
+ Path to the client's TLS key file.
+
+ tls_hostname
+ When verifying the authenticity of the Docker Host server, provide the expected name of the server. Defaults
+ to 'localhost'.
+
+ ssl_version
+ Provide a valid SSL version number. Default value determined by docker-py, which at the time of this writing
+ was 1.0
+
+
+Environment Variables
+.....................
+
+Control how the modules connect to the Docker API by setting the following variables in the environment of the host
+running Ansible:
+
+ DOCKER_HOST
+ The URL or Unix socket path used to connect to the Docker API.
+
+ DOCKER_API_VERSION
+ The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported
+ by docker-py.
+
+ DOCKER_TIMEOUT
+ The maximum amount of time in seconds to wait on a response from the API.
+
+ DOCKER_CERT_PATH
+ Path to the directory containing the client certificate, client key and CA certificate.
+
+ DOCKER_SSL_VERSION
+ Provide a valid SSL version number.
+
+ DOCKER_TLS
+ Secure the connection to the API by using TLS without verifying the authenticity of the Docker Host.
+
+ DOCKER_TLS_VERIFY
+ Secure the connection to the API by using TLS and verify the authenticity of the Docker Host.
+
+
+Dynamic Inventory Script
+------------------------
+The inventory script generates dynamic inventory by making API requests to one or more Docker APIs. It's dynamic
+because the inventory is generated at run-time rather than being read from a static file. The script generates the
+inventory by connecting to one or many Docker APIs and inspecting the containers it finds at each API. Which APIs the
+script contacts can be defined using environment variables or a configuration file.
+
+Groups
+......
+The script will create the following host groups:
+
+ - container id
+ - container name
+ - container short id
+ - image_name (image_<image name>)
+ - docker_host
+ - running
+ - stopped
+
+Examples
+........
+
+You can run the script interactively from the command line or pass it as the inventory to a playbook. Here are few
+examples to get you started:
+
+.. code-block:: bash
+
+ # Connect to the Docker API on localhost port 4243 and format the JSON output
+ DOCKER_HOST=tcp://localhost:4243 ./docker.py --pretty
+
+ # Any container's ssh port exposed on 0.0.0.0 will be mapped to
+ # another IP address (where Ansible will attempt to connect via SSH)
+ DOCKER_DEFAULT_IP=192.0.2.5 ./docker.py --pretty
+
+ # Run as input to a playbook:
+ ansible-playbook -i ./docker.py docker_inventory_test.yml
+
+ # Simple playbook to invoke with the above example:
+
+ - name: Test docker_inventory, this will not connect to any hosts
+ hosts: all
+ gather_facts: no
+ tasks:
+ - debug:
+ msg: "Container - {{ inventory_hostname }}"
+
+Configuration
+.............
+You can control the behavior of the inventory script by defining environment variables, or
+creating a docker.yml file (sample provided in https://raw.githubusercontent.com/ansible-collections/community.general/main/scripts/inventory/docker.py). The order of precedence is the docker.yml
+file and then environment variables.
+
+
+Environment Variables
+;;;;;;;;;;;;;;;;;;;;;;
+
+To connect to a single Docker API the following variables can be defined in the environment to control the connection
+options. These are the same environment variables used by the Docker modules.
+
+ DOCKER_HOST
+ The URL or Unix socket path used to connect to the Docker API. Defaults to unix://var/run/docker.sock.
+
+ DOCKER_API_VERSION:
+ The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported
+ by docker-py.
+
+ DOCKER_TIMEOUT:
+ The maximum amount of time in seconds to wait on a response from the API. Defaults to 60 seconds.
+
+ DOCKER_TLS:
+ Secure the connection to the API by using TLS without verifying the authenticity of the Docker host server.
+ Defaults to False.
+
+ DOCKER_TLS_VERIFY:
+ Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
+ Default is False
+
+ DOCKER_TLS_HOSTNAME:
+ When verifying the authenticity of the Docker Host server, provide the expected name of the server. Defaults
+ to localhost.
+
+ DOCKER_CERT_PATH:
+ Path to the directory containing the client certificate, client key and CA certificate.
+
+ DOCKER_SSL_VERSION:
+ Provide a valid SSL version number. Default value determined by docker-py, which at the time of this writing
+ was 1.0
+
+In addition to the connection variables there are a couple variables used to control the execution and output of the
+script:
+
+ DOCKER_CONFIG_FILE
+ Path to the configuration file. Defaults to ./docker.yml.
+
+ DOCKER_PRIVATE_SSH_PORT:
+ The private port (container port) on which SSH is listening for connections. Defaults to 22.
+
+ DOCKER_DEFAULT_IP:
+ The IP address to assign to ansible_host when the container's SSH port is mapped to interface '0.0.0.0'.
+
+
+Configuration File
+;;;;;;;;;;;;;;;;;;
+
+Using a configuration file provides a means for defining a set of Docker APIs from which to build an inventory.
+
+The default name of the file is derived from the name of the inventory script. By default the script will look for
+basename of the script (in other words, docker) with an extension of '.yml'.
+
+You can also override the default name of the script by defining DOCKER_CONFIG_FILE in the environment.
+
+Here's what you can define in docker_inventory.yml:
+
+ defaults
+ Defines a default connection. Defaults will be taken from this and applied to any values not provided
+ for a host defined in the hosts list.
+
+ hosts
+ If you wish to get inventory from more than one Docker host, define a hosts list.
+
+For the default host and each host in the hosts list define the following attributes:
+
+.. code-block:: yaml
+
+ host:
+ description: The URL or Unix socket path used to connect to the Docker API.
+ required: yes
+
+ tls:
+ description: Connect using TLS without verifying the authenticity of the Docker host server.
+ default: false
+ required: false
+
+ tls_verify:
+ description: Connect using TLS without verifying the authenticity of the Docker host server.
+ default: false
+ required: false
+
+ cert_path:
+ description: Path to the client's TLS certificate file.
+ default: null
+ required: false
+
+ cacert_path:
+ description: Use a CA certificate when performing server verification by providing the path to a CA certificate file.
+ default: null
+ required: false
+
+ key_path:
+ description: Path to the client's TLS key file.
+ default: null
+ required: false
+
+ version:
+ description: The Docker API version.
+ required: false
+ default: will be supplied by the docker-py module.
+
+ timeout:
+ description: The amount of time in seconds to wait on an API response.
+ required: false
+ default: 60
+
+ default_ip:
+ description: The IP address to assign to ansible_host when the container's SSH port is mapped to interface
+ '0.0.0.0'.
+ required: false
+ default: 127.0.0.1
+
+ private_ssh_port:
+ description: The port containers use for SSH
+ required: false
+ default: 22
diff --git a/docs/docsite/rst/scenario_guides/guide_gce.rst b/docs/docsite/rst/scenario_guides/guide_gce.rst
new file mode 100644
index 00000000..6d9ca65a
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/guide_gce.rst
@@ -0,0 +1,302 @@
+Google Cloud Platform Guide
+===========================
+
+.. gce_intro:
+
+Introduction
+--------------------------
+
+Ansible + Google have been working together on a set of auto-generated
+Ansible modules designed to consistently and comprehensively cover the entirety
+of the Google Cloud Platform (GCP).
+
+Ansible contains modules for managing Google Cloud Platform resources,
+including creating instances, controlling network access, working with
+persistent disks, managing load balancers, and a lot more.
+
+These new modules can be found under a new consistent name scheme "gcp_*"
+(Note: gcp_target_proxy and gcp_url_map are legacy modules, despite the "gcp_*"
+name. Please use gcp_compute_target_proxy and gcp_compute_url_map instead).
+
+Additionally, the gcp_compute inventory plugin can discover all
+Google Compute Engine (GCE) instances
+and make them automatically available in your Ansible inventory.
+
+You may see a collection of other GCP modules that do not conform to this
+naming convention. These are the original modules primarily developed by the
+Ansible community. You will find some overlapping functionality such as with
+the "gce" module and the new "gcp_compute_instance" module. Either can be
+used, but you may experience issues trying to use them together.
+
+While the community GCP modules are not going away, Google is investing effort
+into the new "gcp_*" modules. Google is committed to ensuring the Ansible
+community has a great experience with GCP and therefore recommends adopting
+these new modules if possible.
+
+
+Requisites
+---------------
+The GCP modules require both the ``requests`` and the
+``google-auth`` libraries to be installed.
+
+.. code-block:: bash
+
+ $ pip install requests google-auth
+
+Alternatively for RHEL / CentOS, the ``python-requests`` package is also
+available to satisfy ``requests`` libraries.
+
+.. code-block:: bash
+
+ $ yum install python-requests
+
+Credentials
+-----------
+It's easy to create a GCP account with credentials for Ansible. You have multiple options to
+get your credentials - here are two of the most common options:
+
+* Service Accounts (Recommended): Use JSON service accounts with specific permissions.
+* Machine Accounts: Use the permissions associated with the GCP Instance you're using Ansible on.
+
+For the following examples, we'll be using service account credentials.
+
+To work with the GCP modules, you'll first need to get some credentials in the
+JSON format:
+
+1. `Create a Service Account <https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount>`_
+2. `Download JSON credentials <https://support.google.com/cloud/answer/6158849?hl=en&ref_topic=6262490#serviceaccounts>`_
+
+Once you have your credentials, there are two different ways to provide them to Ansible:
+
+* by specifying them directly as module parameters
+* by setting environment variables
+
+Providing Credentials as Module Parameters
+``````````````````````````````````````````
+
+For the GCE modules you can specify the credentials as arguments:
+
+* ``auth_kind``: type of authentication being used (choices: machineaccount, serviceaccount, application)
+* ``service_account_email``: email associated with the project
+* ``service_account_file``: path to the JSON credentials file
+* ``project``: id of the project
+* ``scopes``: The specific scopes that you want the actions to use.
+
+For example, to create a new IP address using the ``gcp_compute_address`` module,
+you can use the following configuration:
+
+.. code-block:: yaml
+
+ - name: Create IP address
+ hosts: localhost
+ gather_facts: no
+
+ vars:
+ service_account_file: /home/my_account.json
+ project: my-project
+ auth_kind: serviceaccount
+ scopes:
+ - https://www.googleapis.com/auth/compute
+
+ tasks:
+
+ - name: Allocate an IP Address
+ gcp_compute_address:
+ state: present
+ name: 'test-address1'
+ region: 'us-west1'
+ project: "{{ project }}"
+ auth_kind: "{{ auth_kind }}"
+ service_account_file: "{{ service_account_file }}"
+ scopes: "{{ scopes }}"
+
+Providing Credentials as Environment Variables
+``````````````````````````````````````````````
+
+Set the following environment variables before running Ansible in order to configure your credentials:
+
+.. code-block:: bash
+
+ GCP_AUTH_KIND
+ GCP_SERVICE_ACCOUNT_EMAIL
+ GCP_SERVICE_ACCOUNT_FILE
+ GCP_SCOPES
+
+GCE Dynamic Inventory
+---------------------
+
+The best way to interact with your hosts is to use the gcp_compute inventory plugin, which dynamically queries GCE and tells Ansible what nodes can be managed.
+
+To be able to use this GCE dynamic inventory plugin, you need to enable it first by specifying the following in the ``ansible.cfg`` file:
+
+.. code-block:: ini
+
+ [inventory]
+ enable_plugins = gcp_compute
+
+Then, create a file that ends in ``.gcp.yml`` in your root directory.
+
+The gcp_compute inventory script takes in the same authentication information as any module.
+
+Here's an example of a valid inventory file:
+
+.. code-block:: yaml
+
+ plugin: gcp_compute
+ projects:
+ - graphite-playground
+ auth_kind: serviceaccount
+ service_account_file: /home/alexstephen/my_account.json
+
+
+Executing ``ansible-inventory --list -i <filename>.gcp.yml`` will create a list of GCP instances that are ready to be configured using Ansible.
+
+Create an instance
+``````````````````
+
+The full range of GCP modules provide the ability to create a wide variety of
+GCP resources with the full support of the entire GCP API.
+
+The following playbook creates a GCE Instance. This instance relies on other GCP
+resources like Disk. By creating other resources separately, we can give as
+much detail as necessary about how we want to configure the other resources, for example
+formatting of the Disk. By registering it to a variable, we can simply insert the
+variable into the instance task. The gcp_compute_instance module will figure out the
+rest.
+
+.. code-block:: yaml
+
+ - name: Create an instance
+ hosts: localhost
+ gather_facts: no
+ vars:
+ gcp_project: my-project
+ gcp_cred_kind: serviceaccount
+ gcp_cred_file: /home/my_account.json
+ zone: "us-central1-a"
+ region: "us-central1"
+
+ tasks:
+ - name: create a disk
+ gcp_compute_disk:
+ name: 'disk-instance'
+ size_gb: 50
+ source_image: 'projects/ubuntu-os-cloud/global/images/family/ubuntu-1604-lts'
+ zone: "{{ zone }}"
+ project: "{{ gcp_project }}"
+ auth_kind: "{{ gcp_cred_kind }}"
+ service_account_file: "{{ gcp_cred_file }}"
+ scopes:
+ - https://www.googleapis.com/auth/compute
+ state: present
+ register: disk
+ - name: create a address
+ gcp_compute_address:
+ name: 'address-instance'
+ region: "{{ region }}"
+ project: "{{ gcp_project }}"
+ auth_kind: "{{ gcp_cred_kind }}"
+ service_account_file: "{{ gcp_cred_file }}"
+ scopes:
+ - https://www.googleapis.com/auth/compute
+ state: present
+ register: address
+ - name: create a instance
+ gcp_compute_instance:
+ state: present
+ name: test-vm
+ machine_type: n1-standard-1
+ disks:
+ - auto_delete: true
+ boot: true
+ source: "{{ disk }}"
+ network_interfaces:
+ - network: null # use default
+ access_configs:
+ - name: 'External NAT'
+ nat_ip: "{{ address }}"
+ type: 'ONE_TO_ONE_NAT'
+ zone: "{{ zone }}"
+ project: "{{ gcp_project }}"
+ auth_kind: "{{ gcp_cred_kind }}"
+ service_account_file: "{{ gcp_cred_file }}"
+ scopes:
+ - https://www.googleapis.com/auth/compute
+ register: instance
+
+ - name: Wait for SSH to come up
+ wait_for: host={{ address.address }} port=22 delay=10 timeout=60
+
+ - name: Add host to groupname
+ add_host: hostname={{ address.address }} groupname=new_instances
+
+
+ - name: Manage new instances
+ hosts: new_instances
+ connection: ssh
+ become: True
+ roles:
+ - base_configuration
+ - production_server
+
+Note that use of the "add_host" module above creates a temporary, in-memory group. This means that a play in the same playbook can then manage machines
+in the 'new_instances' group, if so desired. Any sort of arbitrary configuration is possible at this point.
+
+For more information about Google Cloud, please visit the `Google Cloud website <https://cloud.google.com>`_.
+
+Migration Guides
+----------------
+
+gce.py -> gcp_compute_instance.py
+`````````````````````````````````
+As of Ansible 2.8, we're encouraging everyone to move from the ``gce`` module to the
+``gcp_compute_instance`` module. The ``gcp_compute_instance`` module has better
+support for all of GCP's features, fewer dependencies, more flexibility, and
+better supports GCP's authentication systems.
+
+The ``gcp_compute_instance`` module supports all of the features of the ``gce``
+module (and more!). Below is a mapping of ``gce`` fields over to
+``gcp_compute_instance`` fields.
+
+============================ ========================================== ======================
+ gce.py gcp_compute_instance.py Notes
+============================ ========================================== ======================
+ state state/status State on gce has multiple values: "present", "absent", "stopped", "started", "terminated". State on gcp_compute_instance is used to describe if the instance exists (present) or does not (absent). Status is used to describe if the instance is "started", "stopped" or "terminated".
+ image disks[].initialize_params.source_image You'll need to create a single disk using the disks[] parameter and set it to be the boot disk (disks[].boot = true)
+ image_family disks[].initialize_params.source_image See above.
+ external_projects disks[].initialize_params.source_image The name of the source_image will include the name of the project.
+ instance_names Use a loop or multiple tasks. Using loops is a more Ansible-centric way of creating multiple instances and gives you the most flexibility.
+ service_account_email service_accounts[].email This is the service_account email address that you want the instance to be associated with. It is not the service_account email address that is used for the credentials necessary to create the instance.
+ service_account_permissions service_accounts[].scopes These are the permissions you want to grant to the instance.
+ pem_file Not supported. We recommend using JSON service account credentials instead of PEM files.
+ credentials_file service_account_file
+ project_id project
+ name name This field does not accept an array of names. Use a loop to create multiple instances.
+ num_instances Use a loop For maximum flexibility, we're encouraging users to use Ansible features to create multiple instances, rather than letting the module do it for you.
+ network network_interfaces[].network
+ subnetwork network_interfaces[].subnetwork
+ persistent_boot_disk disks[].type = 'PERSISTENT'
+ disks disks[]
+ ip_forward can_ip_forward
+ external_ip network_interfaces[].access_configs.nat_ip This field takes multiple types of values. You can create an IP address with ``gcp_compute_address`` and place the name/output of the address here. You can also place the string value of the IP address's GCP name or the actual IP address.
+ disks_auto_delete disks[].auto_delete
+ preemptible scheduling.preemptible
+ disk_size disks[].initialize_params.disk_size_gb
+============================ ========================================== ======================
+
+An example playbook is below:
+
+.. code:: yaml
+
+ gcp_compute_instance:
+ name: "{{ item }}"
+ machine_type: n1-standard-1
+ ... # any other settings
+ zone: us-central1-a
+ project: "my-project"
+ auth_kind: "service_account_file"
+ service_account_file: "~/my_account.json"
+ state: present
+ loop:
+ - instance-1
+ - instance-2
diff --git a/docs/docsite/rst/scenario_guides/guide_infoblox.rst b/docs/docsite/rst/scenario_guides/guide_infoblox.rst
new file mode 100644
index 00000000..2fa90703
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/guide_infoblox.rst
@@ -0,0 +1,288 @@
+.. _nios_guide:
+
+************************
+ Infoblox Guide
+************************
+
+.. contents:: Topics
+
+This guide describes how to use Ansible with the Infoblox Network Identity Operating System (NIOS). With Ansible integration, you can use Ansible playbooks to automate Infoblox Core Network Services for IP address management (IPAM), DNS, and inventory tracking.
+
+You can review simple example tasks in the documentation for any of the :ref:`NIOS modules <nios_net tools_modules>` or look at the `Use cases with modules`_ section for more elaborate examples. See the `Infoblox <https://www.infoblox.com/>`_ website for more information on the Infoblox product.
+
+.. note:: You can retrieve most of the example playbooks used in this guide from the `network-automation/infoblox_ansible <https://github.com/network-automation/infoblox_ansible>`_ GitHub repository.
+
+Prerequisites
+=============
+Before using Ansible ``nios`` modules with Infoblox, you must install the ``infoblox-client`` on your Ansible control node:
+
+.. code-block:: bash
+
+ $ sudo pip install infoblox-client
+
+.. note::
+ You need an NIOS account with the WAPI feature enabled to use Ansible with Infoblox.
+
+.. _nios_credentials:
+
+Credentials and authenticating
+==============================
+
+To use Infoblox ``nios`` modules in playbooks, you need to configure the credentials to access your Infoblox system. The examples in this guide use credentials stored in ``<playbookdir>/group_vars/nios.yml``. Replace these values with your Infoblox credentials:
+
+.. code-block:: yaml
+
+ ---
+ nios_provider:
+ host: 192.0.0.2
+ username: admin
+ password: ansible
+
+NIOS lookup plugins
+===================
+
+Ansible includes the following lookup plugins for NIOS:
+
+- :ref:`nios <nios_lookup>` Uses the Infoblox WAPI API to fetch NIOS specified objects, for example network views, DNS views, and host records.
+- :ref:`nios_next_ip <nios_next_ip_lookup>` Provides the next available IP address from a network. You'll see an example of this in `Creating a host record`_.
+- :ref:`nios_next_network <nios_next_network_lookup>` - Returns the next available network range for a network-container.
+
+You must run the NIOS lookup plugins locally by specifying ``connection: local``. See :ref:`lookup plugins <lookup_plugins>` for more detail.
+
+
+Retrieving all network views
+----------------------------
+
+To retrieve all network views and save them in a variable, use the :ref:`set_fact <set_fact_module>` module with the :ref:`nios <nios_lookup>` lookup plugin:
+
+.. code-block:: yaml
+
+ ---
+ - hosts: nios
+ connection: local
+ tasks:
+ - name: fetch all networkview objects
+ set_fact:
+ networkviews: "{{ lookup('nios', 'networkview', provider=nios_provider) }}"
+
+ - name: check the networkviews
+ debug:
+ var: networkviews
+
+
+Retrieving a host record
+------------------------
+
+To retrieve a set of host records, use the ``set_fact`` module with the ``nios`` lookup plugin and include a filter for the specific hosts you want to retrieve:
+
+.. code-block:: yaml
+
+ ---
+ - hosts: nios
+ connection: local
+ tasks:
+ - name: fetch host leaf01
+ set_fact:
+ host: "{{ lookup('nios', 'record:host', filter={'name': 'leaf01.ansible.com'}, provider=nios_provider) }}"
+
+ - name: check the leaf01 return variable
+ debug:
+ var: host
+
+ - name: debug specific variable (ipv4 address)
+ debug:
+ var: host.ipv4addrs[0].ipv4addr
+
+ - name: fetch host leaf02
+ set_fact:
+ host: "{{ lookup('nios', 'record:host', filter={'name': 'leaf02.ansible.com'}, provider=nios_provider) }}"
+
+ - name: check the leaf02 return variable
+ debug:
+ var: host
+
+
+If you run this ``get_host_record.yml`` playbook, you should see results similar to the following:
+
+.. code-block:: none
+
+ $ ansible-playbook get_host_record.yml
+
+ PLAY [localhost] ***************************************************************************************
+
+ TASK [fetch host leaf01] ******************************************************************************
+ ok: [localhost]
+
+ TASK [check the leaf01 return variable] *************************************************************
+ ok: [localhost] => {
+ < ...output shortened...>
+ "host": {
+ "ipv4addrs": [
+ {
+ "configure_for_dhcp": false,
+ "host": "leaf01.ansible.com",
+ }
+ ],
+ "name": "leaf01.ansible.com",
+ "view": "default"
+ }
+ }
+
+ TASK [debug specific variable (ipv4 address)] ******************************************************
+ ok: [localhost] => {
+ "host.ipv4addrs[0].ipv4addr": "192.168.1.11"
+ }
+
+ TASK [fetch host leaf02] ******************************************************************************
+ ok: [localhost]
+
+ TASK [check the leaf02 return variable] *************************************************************
+ ok: [localhost] => {
+ < ...output shortened...>
+ "host": {
+ "ipv4addrs": [
+ {
+ "configure_for_dhcp": false,
+ "host": "leaf02.example.com",
+ "ipv4addr": "192.168.1.12"
+ }
+ ],
+ }
+ }
+
+ PLAY RECAP ******************************************************************************************
+ localhost : ok=5 changed=0 unreachable=0 failed=0
+
+The output above shows the host record for ``leaf01.ansible.com`` and ``leaf02.ansible.com`` that were retrieved by the ``nios`` lookup plugin. This playbook saves the information in variables which you can use in other playbooks. This allows you to use Infoblox as a single source of truth to gather and use information that changes dynamically. See :ref:`playbooks_variables` for more information on using Ansible variables. See the :ref:`nios <nios_lookup>` examples for more data options that you can retrieve.
+
+You can access these playbooks at `Infoblox lookup playbooks <https://github.com/network-automation/infoblox_ansible/tree/master/lookup_playbooks>`_.
+
+Use cases with modules
+======================
+
+You can use the ``nios`` modules in tasks to simplify common Infoblox workflows. Be sure to set up your :ref:`NIOS credentials<nios_credentials>` before following these examples.
+
+Configuring an IPv4 network
+---------------------------
+
+To configure an IPv4 network, use the :ref:`nios_network <nios_network_module>` module:
+
+.. code-block:: yaml
+
+ ---
+ - hosts: nios
+ connection: local
+ tasks:
+ - name: Create a network on the default network view
+ nios_network:
+ network: 192.168.100.0/24
+ comment: sets the IPv4 network
+ options:
+ - name: domain-name
+ value: ansible.com
+ state: present
+ provider: "{{nios_provider}}"
+
+Notice the last parameter, ``provider``, uses the variable ``nios_provider`` defined in the ``group_vars/`` directory.
+
+Creating a host record
+----------------------
+
+To create a host record named `leaf03.ansible.com` on the newly-created IPv4 network:
+
+.. code-block:: yaml
+
+ ---
+ - hosts: nios
+ connection: local
+ tasks:
+ - name: configure an IPv4 host record
+ nios_host_record:
+ name: leaf03.ansible.com
+ ipv4addrs:
+ - ipv4addr:
+ "{{ lookup('nios_next_ip', '192.168.100.0/24', provider=nios_provider)[0] }}"
+ state: present
+ provider: "{{nios_provider}}"
+
+Notice the IPv4 address in this example uses the :ref:`nios_next_ip <nios_next_ip_lookup>` lookup plugin to find the next available IPv4 address on the network.
+
+Creating a forward DNS zone
+---------------------------
+
+To configure a forward DNS zone use, the ``nios_zone`` module:
+
+.. code-block:: yaml
+
+ ---
+ - hosts: nios
+ connection: local
+ tasks:
+ - name: Create a forward DNS zone called ansible-test.com
+ nios_zone:
+ name: ansible-test.com
+ comment: local DNS zone
+ state: present
+ provider: "{{ nios_provider }}"
+
+Creating a reverse DNS zone
+---------------------------
+
+To configure a reverse DNS zone:
+
+.. code-block:: yaml
+
+ ---
+ - hosts: nios
+ connection: local
+ tasks:
+ - name: configure a reverse mapping zone on the system using IPV6 zone format
+ nios_zone:
+ name: 100::1/128
+ zone_format: IPV6
+ state: present
+ provider: "{{ nios_provider }}"
+
+Dynamic inventory script
+========================
+
+You can use the Infoblox dynamic inventory script to import your network node inventory with Infoblox NIOS. To gather the inventory from Infoblox, you need two files:
+
+- `infoblox.yaml <https://raw.githubusercontent.com/ansible-collections/community.general/main/scripts/inventory/infoblox.yaml>`_ - A file that specifies the NIOS provider arguments and optional filters.
+
+- `infoblox.py <https://raw.githubusercontent.com/ansible-collections/community.general/main/scripts/inventory/infoblox.py>`_ - The python script that retrieves the NIOS inventory.
+
+To use the Infoblox dynamic inventory script:
+
+#. Download the ``infoblox.yaml`` file and save it in the ``/etc/ansible`` directory.
+
+#. Modify the ``infoblox.yaml`` file with your NIOS credentials.
+
+#. Download the ``infoblox.py`` file and save it in the ``/etc/ansible/hosts`` directory.
+
+#. Change the permissions on the ``infoblox.py`` file to make the file an executable:
+
+.. code-block:: bash
+
+ $ sudo chmod +x /etc/ansible/hosts/infoblox.py
+
+You can optionally use ``./infoblox.py --list`` to test the script. After a few minutes, you should see your Infoblox inventory in JSON format. You can explicitly use the Infoblox dynamic inventory script as follows:
+
+.. code-block:: bash
+
+ $ ansible -i infoblox.py all -m ping
+
+You can also implicitly use the Infoblox dynamic inventory script by including it in your inventory directory (``etc/ansible/hosts`` by default). See :ref:`dynamic_inventory` for more details.
+
+.. seealso::
+
+ `Infoblox website <https://www.infoblox.com//>`_
+ The Infoblox website
+ `Infoblox and Ansible Deployment Guide <https://www.infoblox.com/resources/deployment-guides/infoblox-and-ansible-integration>`_
+ The deployment guide for Ansible integration provided by Infoblox.
+ `Infoblox Integration in Ansible 2.5 <https://www.ansible.com/blog/infoblox-integration-in-ansible-2.5>`_
+ Ansible blog post about Infoblox.
+ :ref:`Ansible NIOS modules <nios_net tools_modules>`
+ The list of supported NIOS modules, with examples.
+ `Infoblox Ansible Examples <https://github.com/network-automation/infoblox_ansible>`_
+ Infoblox example playbooks.
diff --git a/docs/docsite/rst/scenario_guides/guide_kubernetes.rst b/docs/docsite/rst/scenario_guides/guide_kubernetes.rst
new file mode 100644
index 00000000..abd548de
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/guide_kubernetes.rst
@@ -0,0 +1,63 @@
+Kubernetes and OpenShift Guide
+==============================
+
+Modules for interacting with the Kubernetes (K8s) and OpenShift API are under development, and can be used in preview mode. To use them, review the requirements, and then follow the installation and use instructions.
+
+Requirements
+------------
+
+To use the modules, you'll need the following:
+
+- Run Ansible from source. For assistance, view :ref:`from_source`.
+- `OpenShift Rest Client <https://github.com/openshift/openshift-restclient-python>`_ installed on the host that will execute the modules.
+
+
+Installation and use
+--------------------
+
+The Kubernetes modules are part of the `Ansible Kubernetes collection <https://github.com/ansible-collections/community.kubernetes>`_.
+
+To install the collection, run the following:
+
+.. code-block:: bash
+
+ $ ansible-galaxy collection install community.kubernetes
+
+Next, include it in a playbook, as follows:
+
+.. code-block:: bash
+
+ ---
+ - hosts: localhost
+ tasks:
+ - name: Create a pod
+ community.kubernetes.k8s:
+ state: present
+ definition:
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ name: "utilitypod-1"
+ namespace: default
+ labels:
+ app: galaxy
+ spec:
+ containers:
+ - name: utilitypod
+ image: busybox
+
+
+Authenticating with the API
+---------------------------
+
+By default the OpenShift Rest Client will look for ``~/.kube/config``, and if found, connect using the active context. You can override the location of the file using the``kubeconfig`` parameter, and the context, using the ``context`` parameter.
+
+Basic authentication is also supported using the ``username`` and ``password`` options. You can override the URL using the ``host`` parameter. Certificate authentication works through the ``ssl_ca_cert``, ``cert_file``, and ``key_file`` parameters, and for token authentication, use the ``api_key`` parameter.
+
+To disable SSL certificate verification, set ``verify_ssl`` to false.
+
+Filing issues
+`````````````
+
+If you find a bug or have a suggestion regarding modules, please file issues at `Ansible Kubernetes collection <https://github.com/ansible-collections/community.kubernetes>`_.
+If you find a bug regarding OpenShift client, please file issues at `OpenShift REST Client issues <https://github.com/openshift/openshift-restclient-python/issues>`_.
diff --git a/docs/docsite/rst/scenario_guides/guide_meraki.rst b/docs/docsite/rst/scenario_guides/guide_meraki.rst
new file mode 100644
index 00000000..94c5b161
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/guide_meraki.rst
@@ -0,0 +1,193 @@
+.. _meraki_guide:
+
+******************
+Cisco Meraki Guide
+******************
+
+.. contents::
+ :local:
+
+
+.. _meraki_guide_intro:
+
+What is Cisco Meraki?
+=====================
+
+Cisco Meraki is an easy-to-use, cloud-based, network infrastructure platform for enterprise environments. While most network hardware uses command-line interfaces (CLIs) for configuration, Meraki uses an easy-to-use Dashboard hosted in the Meraki cloud. No on-premises management hardware or software is required - only the network infrastructure to run your business.
+
+MS Switches
+-----------
+
+Meraki MS switches come in multiple flavors and form factors. Meraki switches support 10/100/1000/10000 ports, as well as Cisco's mGig technology for 2.5/5/10Gbps copper connectivity. 8, 24, and 48 port flavors are available with PoE (802.3af/802.3at/UPoE) available on many models.
+
+MX Firewalls
+------------
+
+Meraki's MX firewalls support full layer 3-7 deep packet inspection. MX firewalls are compatible with a variety of VPN technologies including IPSec, SSL VPN, and Meraki's easy-to-use AutoVPN.
+
+MR Wireless Access Points
+-------------------------
+
+MR access points are enterprise-class, high-performance access points for the enterprise. MR access points have MIMO technology and integrated beamforming built-in for high performance applications. BLE allows for advanced location applications to be developed with no on-premises analytics platforms.
+
+Using the Meraki modules
+========================
+
+Meraki modules provide a user-friendly interface to manage your Meraki environment using Ansible. For example, details about SNMP settings for a particular organization can be discovered using the module `meraki_snmp <meraki_snmp_module>`.
+
+.. code-block:: yaml
+
+ - name: Query SNMP settings
+ meraki_snmp:
+ api_key: abc123
+ org_name: AcmeCorp
+ state: query
+ delegate_to: localhost
+
+Information about a particular object can be queried. For example, the `meraki_admin <meraki_admin_module>` module supports
+
+.. code-block:: yaml
+
+ - name: Gather information about Jane Doe
+ meraki_admin:
+ api_key: abc123
+ org_name: AcmeCorp
+ state: query
+ email: janedoe@email.com
+ delegate_to: localhost
+
+Common Parameters
+=================
+
+All Ansible Meraki modules support the following parameters which affect communication with the Meraki Dashboard API. Most of these should only be used by Meraki developers and not the general public.
+
+ host
+ Hostname or IP of Meraki Dashboard.
+
+ use_https
+ Specifies whether communication should be over HTTPS. (Defaults to ``yes``)
+
+ use_proxy
+ Whether to use a proxy for any communication.
+
+ validate_certs
+ Determine whether certificates should be validated or trusted. (Defaults to ``yes``)
+
+These are the common parameters which are used for most every module.
+
+ org_name
+ Name of organization to perform actions in.
+
+ org_id
+ ID of organization to perform actions in.
+
+ net_name
+ Name of network to perform actions in.
+
+ net_id
+ ID of network to perform actions in.
+
+ state
+ General specification of what action to take. ``query`` does lookups. ``present`` creates or edits. ``absent`` deletes.
+
+.. hint:: Use the ``org_id`` and ``net_id`` parameters when possible. ``org_name`` and ``net_name`` require additional behind-the-scenes API calls to learn the ID values. ``org_id`` and ``net_id`` will perform faster.
+
+Meraki Authentication
+=====================
+
+All API access with the Meraki Dashboard requires an API key. An API key can be generated from the organization's settings page. Each play in a playbook requires the ``api_key`` parameter to be specified.
+
+The "Vault" feature of Ansible allows you to keep sensitive data such as passwords or keys in encrypted files, rather than as plain text in your playbooks or roles. These vault files can then be distributed or placed in source control. See :ref:`playbooks_vault` for more information.
+
+Meraki's API returns a 404 error if the API key is not correct. It does not provide any specific error saying the key is incorrect. If you receive a 404 error, check the API key first.
+
+Returned Data Structures
+========================
+
+Meraki and its related Ansible modules return most information in the form of a list. For example, this is returned information by ``meraki_admin`` querying administrators. It returns a list even though there's only one.
+
+.. code-block:: json
+
+ [
+ {
+ "orgAccess": "full",
+ "name": "John Doe",
+ "tags": [],
+ "networks": [],
+ "email": "john@doe.com",
+ "id": "12345677890"
+ }
+ ]
+
+Handling Returned Data
+======================
+
+Since Meraki's response data uses lists instead of properly keyed dictionaries for responses, certain strategies should be used when querying data for particular information. For many situations, use the ``selectattr()`` Jinja2 function.
+
+Merging Existing and New Data
+=============================
+
+Ansible's Meraki modules do not allow for manipulating data. For example, you may need to insert a rule in the middle of a firewall ruleset. Ansible and the Meraki modules lack a way to directly merge to manipulate data. However, a playlist can use a few tasks to split the list where you need to insert a rule and then merge them together again with the new rule added. The steps involved are as follows:
+
+1. Create blank "front" and "back" lists.
+ ::
+
+ vars:
+ - front_rules: []
+ - back_rules: []
+2. Get existing firewall rules from Meraki and create a new variable.
+ ::
+
+ - name: Get firewall rules
+ meraki_mx_l3_firewall:
+ auth_key: abc123
+ org_name: YourOrg
+ net_name: YourNet
+ state: query
+ delegate_to: localhost
+ register: rules
+ - set_fact:
+ original_ruleset: '{{rules.data}}'
+3. Write the new rule. The new rule needs to be in a list so it can be merged with other lists in an upcoming step. The blank `-` puts the rule in a list so it can be merged.
+ ::
+
+ - set_fact:
+ new_rule:
+ -
+ - comment: Block traffic to server
+ src_cidr: 192.0.1.0/24
+ src_port: any
+ dst_cidr: 192.0.1.2/32
+ dst_port: any
+ protocol: any
+ policy: deny
+4. Split the rules into two lists. This assumes the existing ruleset is 2 rules long.
+ ::
+
+ - set_fact:
+ front_rules: '{{front_rules + [ original_ruleset[:1] ]}}'
+ - set_fact:
+ back_rules: '{{back_rules + [ original_ruleset[1:] ]}}'
+5. Merge rules with the new rule in the middle.
+ ::
+
+ - set_fact:
+ new_ruleset: '{{front_rules + new_rule + back_rules}}'
+6. Upload new ruleset to Meraki.
+ ::
+
+ - name: Set two firewall rules
+ meraki_mx_l3_firewall:
+ auth_key: abc123
+ org_name: YourOrg
+ net_name: YourNet
+ state: present
+ rules: '{{ new_ruleset }}'
+ delegate_to: localhost
+
+Error Handling
+==============
+
+Ansible's Meraki modules will often fail if improper or incompatible parameters are specified. However, there will likely be scenarios where the module accepts the information but the Meraki API rejects the data. If this happens, the error will be returned in the ``body`` field for HTTP status of 400 return code.
+
+Meraki's API returns a 404 error if the API key is not correct. It does not provide any specific error saying the key is incorrect. If you receive a 404 error, check the API key first. 404 errors can also occur if improper object IDs (ex. ``org_id``) are specified.
diff --git a/docs/docsite/rst/scenario_guides/guide_online.rst b/docs/docsite/rst/scenario_guides/guide_online.rst
new file mode 100644
index 00000000..2c181a94
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/guide_online.rst
@@ -0,0 +1,41 @@
+****************
+Online.net Guide
+****************
+
+Introduction
+============
+
+Online is a French hosting company mainly known for providing bare-metal servers named Dedibox.
+Check it out: `https://www.online.net/en <https://www.online.net/en>`_
+
+Dynamic inventory for Online resources
+--------------------------------------
+
+Ansible has a dynamic inventory plugin that can list your resources.
+
+1. Create a YAML configuration such as ``online_inventory.yml`` with this content:
+
+.. code-block:: yaml
+
+ plugin: online
+
+2. Set your ``ONLINE_TOKEN`` environment variable with your token.
+ You need to open an account and log into it before you can get a token.
+ You can find your token at the following page: `https://console.online.net/en/api/access <https://console.online.net/en/api/access>`_
+
+3. You can test that your inventory is working by running:
+
+.. code-block:: bash
+
+ $ ansible-inventory -v -i online_inventory.yml --list
+
+
+4. Now you can run your playbook or any other module with this inventory:
+
+.. code-block:: console
+
+ $ ansible all -i online_inventory.yml -m ping
+ sd-96735 | SUCCESS => {
+ "changed": false,
+ "ping": "pong"
+ }
diff --git a/docs/docsite/rst/scenario_guides/guide_oracle.rst b/docs/docsite/rst/scenario_guides/guide_oracle.rst
new file mode 100644
index 00000000..170ea903
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/guide_oracle.rst
@@ -0,0 +1,103 @@
+===================================
+Oracle Cloud Infrastructure Guide
+===================================
+
+************
+Introduction
+************
+
+Oracle provides a number of Ansible modules to interact with Oracle Cloud Infrastructure (OCI). In this guide, we will explain how you can use these modules to orchestrate, provision and configure your infrastructure on OCI.
+
+************
+Requirements
+************
+To use the OCI Ansible modules, you must have the following prerequisites on your control node, the computer from which Ansible playbooks are executed.
+
+1. `An Oracle Cloud Infrastructure account. <https://cloud.oracle.com/en_US/tryit>`_
+
+2. A user created in that account, in a security group with a policy that grants the necessary permissions for working with resources in those compartments. For guidance, see `How Policies Work <https://docs.cloud.oracle.com/iaas/Content/Identity/Concepts/policies.htm>`_.
+
+3. The necessary credentials and OCID information.
+
+************
+Installation
+************
+1. Install the Oracle Cloud Infrastructure Python SDK (`detailed installation instructions <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/installation.html>`_):
+
+.. code-block:: bash
+
+ pip install oci
+
+2. Install the Ansible OCI Modules in one of two ways:
+
+a. From Galaxy:
+
+.. code-block:: bash
+
+ ansible-galaxy install oracle.oci_ansible_modules
+
+b. From GitHub:
+
+.. code-block:: bash
+
+ $ git clone https://github.com/oracle/oci-ansible-modules.git
+
+.. code-block:: bash
+
+ $ cd oci-ansible-modules
+
+
+Run one of the following commands:
+
+- If Ansible is installed only for your user:
+
+.. code-block:: bash
+
+ $ ./install.py
+
+- If Ansible is installed as root:
+
+.. code-block:: bash
+
+ $ sudo ./install.py
+
+*************
+Configuration
+*************
+
+When creating and configuring Oracle Cloud Infrastructure resources, Ansible modules use the authentication information outlined `here <https://docs.cloud.oracle.com/iaas/Content/API/Concepts/sdkconfig.htm>`_.
+.
+
+********
+Examples
+********
+Launch a compute instance
+=========================
+This `sample launch playbook <https://github.com/oracle/oci-ansible-modules/tree/master/samples/compute/launch_compute_instance>`_
+launches a public Compute instance and then accesses the instance from an Ansible module over an SSH connection. The sample illustrates how to:
+
+- Generate a temporary, host-specific SSH key pair.
+- Specify the public key from the key pair for connecting to the instance, and then launch the instance.
+- Connect to the newly launched instance using SSH.
+
+Create and manage Autonomous Data Warehouses
+============================================
+This `sample warehouse playbook <https://github.com/oracle/oci-ansible-modules/tree/master/samples/database/autonomous_data_warehouse>`_ creates an Autonomous Data Warehouse and manage its lifecycle. The sample shows how to:
+
+- Set up an Autonomous Data Warehouse.
+- List all of the Autonomous Data Warehouse instances available in a compartment, filtered by the display name.
+- Get the "facts" for a specified Autonomous Data Warehouse.
+- Stop and start an Autonomous Data Warehouse instance.
+- Delete an Autonomous Data Warehouse instance.
+
+Create and manage Autonomous Transaction Processing
+===================================================
+This `sample playbook <https://github.com/oracle/oci-ansible-modules/tree/master/samples/database/autonomous_database>`_
+creates an Autonomous Transaction Processing database and manage its lifecycle. The sample shows how to:
+
+- Set up an Autonomous Transaction Processing database instance.
+- List all of the Autonomous Transaction Processing instances in a compartment, filtered by the display name.
+- Get the "facts" for a specified Autonomous Transaction Processing instance.
+- Delete an Autonomous Transaction Processing database instance.
+
+You can find more examples here: `Sample Ansible Playbooks <https://docs.cloud.oracle.com/iaas/Content/API/SDKDocs/ansiblesamples.htm>`_.
diff --git a/docs/docsite/rst/scenario_guides/guide_packet.rst b/docs/docsite/rst/scenario_guides/guide_packet.rst
new file mode 100644
index 00000000..c08eb947
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/guide_packet.rst
@@ -0,0 +1,311 @@
+**********************************
+Packet.net Guide
+**********************************
+
+Introduction
+============
+
+`Packet.net <https://packet.net>`_ is a bare metal infrastructure host that's supported by Ansible (>=2.3) via a dynamic inventory script and two cloud modules. The two modules are:
+
+- packet_sshkey: adds a public SSH key from file or value to the Packet infrastructure. Every subsequently-created device will have this public key installed in .ssh/authorized_keys.
+- packet_device: manages servers on Packet. You can use this module to create, restart and delete devices.
+
+Note, this guide assumes you are familiar with Ansible and how it works. If you're not, have a look at their :ref:`docs <ansible_documentation>` before getting started.
+
+Requirements
+============
+
+The Packet modules and inventory script connect to the Packet API using the packet-python package. You can install it with pip:
+
+.. code-block:: bash
+
+ $ pip install packet-python
+
+In order to check the state of devices created by Ansible on Packet, it's a good idea to install one of the `Packet CLI clients <https://www.packet.net/developers/integrations/>`_. Otherwise you can check them via the `Packet portal <https://app.packet.net/portal>`_.
+
+To use the modules and inventory script you'll need a Packet API token. You can generate an API token via the Packet portal `here <https://app.packet.net/portal#/api-keys>`__. The simplest way to authenticate yourself is to set the Packet API token in an environment variable:
+
+.. code-block:: bash
+
+ $ export PACKET_API_TOKEN=Bfse9F24SFtfs423Gsd3ifGsd43sSdfs
+
+If you're not comfortable exporting your API token, you can pass it as a parameter to the modules.
+
+On Packet, devices and reserved IP addresses belong to `projects <https://www.packet.com/developers/api/#projects>`_. In order to use the packet_device module, you need to specify the UUID of the project in which you want to create or manage devices. You can find a project's UUID in the Packet portal `here <https://app.packet.net/portal#/projects/list/table/>`_ (it's just under the project table) or via one of the available `CLIs <https://www.packet.net/developers/integrations/>`_.
+
+
+If you want to use a new SSH keypair in this tutorial, you can generate it to ``./id_rsa`` and ``./id_rsa.pub`` as:
+
+.. code-block:: bash
+
+ $ ssh-keygen -t rsa -f ./id_rsa
+
+If you want to use an existing keypair, just copy the private and public key over to the playbook directory.
+
+
+Device Creation
+===============
+
+The following code block is a simple playbook that creates one `Type 0 <https://www.packet.com/cloud/servers/t1-small/>`_ server (the 'plan' parameter). You have to supply 'plan' and 'operating_system'. 'location' defaults to 'ewr1' (Parsippany, NJ). You can find all the possible values for the parameters via a `CLI client <https://www.packet.net/developers/integrations/>`_.
+
+.. code-block:: yaml
+
+ # playbook_create.yml
+
+ - name: create ubuntu device
+ hosts: localhost
+ tasks:
+
+ - packet_sshkey:
+ key_file: ./id_rsa.pub
+ label: tutorial key
+
+ - packet_device:
+ project_id: <your_project_id>
+ hostnames: myserver
+ operating_system: ubuntu_16_04
+ plan: baremetal_0
+ facility: sjc1
+
+After running ``ansible-playbook playbook_create.yml``, you should have a server provisioned on Packet. You can verify via a CLI or in the `Packet portal <https://app.packet.net/portal#/projects/list/table>`__.
+
+If you get an error with the message "failed to set machine state present, error: Error 404: Not Found", please verify your project UUID.
+
+
+Updating Devices
+================
+
+The two parameters used to uniquely identify Packet devices are: "device_ids" and "hostnames". Both parameters accept either a single string (later converted to a one-element list), or a list of strings.
+
+The 'device_ids' and 'hostnames' parameters are mutually exclusive. The following values are all acceptable:
+
+- device_ids: a27b7a83-fc93-435b-a128-47a5b04f2dcf
+
+- hostnames: mydev1
+
+- device_ids: [a27b7a83-fc93-435b-a128-47a5b04f2dcf, 4887130f-0ccd-49a0-99b0-323c1ceb527b]
+
+- hostnames: [mydev1, mydev2]
+
+In addition, hostnames can contain a special '%d' formatter along with a 'count' parameter that lets you easily expand hostnames that follow a simple name and number pattern; in other words, ``hostnames: "mydev%d", count: 2`` will expand to [mydev1, mydev2].
+
+If your playbook acts on existing Packet devices, you can only pass the 'hostname' and 'device_ids' parameters. The following playbook shows how you can reboot a specific Packet device by setting the 'hostname' parameter:
+
+.. code-block:: yaml
+
+ # playbook_reboot.yml
+
+ - name: reboot myserver
+ hosts: localhost
+ tasks:
+
+ - packet_device:
+ project_id: <your_project_id>
+ hostnames: myserver
+ state: rebooted
+
+You can also identify specific Packet devices with the 'device_ids' parameter. The device's UUID can be found in the `Packet Portal <https://app.packet.net/portal>`_ or by using a `CLI <https://www.packet.net/developers/integrations/>`_. The following playbook removes a Packet device using the 'device_ids' field:
+
+.. code-block:: yaml
+
+ # playbook_remove.yml
+
+ - name: remove a device
+ hosts: localhost
+ tasks:
+
+ - packet_device:
+ project_id: <your_project_id>
+ device_ids: <myserver_device_id>
+ state: absent
+
+
+More Complex Playbooks
+======================
+
+In this example, we'll create a CoreOS cluster with `user data <https://packet.com/developers/docs/servers/key-features/user-data/>`_.
+
+
+The CoreOS cluster will use `etcd <https://etcd.io/>`_ for discovery of other servers in the cluster. Before provisioning your servers, you'll need to generate a discovery token for your cluster:
+
+.. code-block:: bash
+
+ $ curl -w "\n" 'https://discovery.etcd.io/new?size=3'
+
+The following playbook will create an SSH key, 3 Packet servers, and then wait until SSH is ready (or until 5 minutes passed). Make sure to substitute the discovery token URL in 'user_data', and the 'project_id' before running ``ansible-playbook``. Also, feel free to change 'plan' and 'facility'.
+
+.. code-block:: yaml
+
+ # playbook_coreos.yml
+
+ - name: Start 3 CoreOS nodes in Packet and wait until SSH is ready
+ hosts: localhost
+ tasks:
+
+ - packet_sshkey:
+ key_file: ./id_rsa.pub
+ label: new
+
+ - packet_device:
+ hostnames: [coreos-one, coreos-two, coreos-three]
+ operating_system: coreos_beta
+ plan: baremetal_0
+ facility: ewr1
+ project_id: <your_project_id>
+ wait_for_public_IPv: 4
+ user_data: |
+ #cloud-config
+ coreos:
+ etcd2:
+ discovery: https://discovery.etcd.io/<token>
+ advertise-client-urls: http://$private_ipv4:2379,http://$private_ipv4:4001
+ initial-advertise-peer-urls: http://$private_ipv4:2380
+ listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001
+ listen-peer-urls: http://$private_ipv4:2380
+ fleet:
+ public-ip: $private_ipv4
+ units:
+ - name: etcd2.service
+ command: start
+ - name: fleet.service
+ command: start
+ register: newhosts
+
+ - name: wait for ssh
+ wait_for:
+ delay: 1
+ host: "{{ item.public_ipv4 }}"
+ port: 22
+ state: started
+ timeout: 500
+ loop: "{{ newhosts.results[0].devices }}"
+
+
+As with most Ansible modules, the default states of the Packet modules are idempotent, meaning the resources in your project will remain the same after re-runs of a playbook. Thus, we can keep the ``packet_sshkey`` module call in our playbook. If the public key is already in your Packet account, the call will have no effect.
+
+The second module call provisions 3 Packet Type 0 (specified using the 'plan' parameter) servers in the project identified via the 'project_id' parameter. The servers are all provisioned with CoreOS beta (the 'operating_system' parameter) and are customized with cloud-config user data passed to the 'user_data' parameter.
+
+The ``packet_device`` module has a ``wait_for_public_IPv`` that is used to specify the version of the IP address to wait for (valid values are ``4`` or ``6`` for IPv4 or IPv6). If specified, Ansible will wait until the GET API call for a device contains an Internet-routeable IP address of the specified version. When referring to an IP address of a created device in subsequent module calls, it's wise to use the ``wait_for_public_IPv`` parameter, or ``state: active`` in the packet_device module call.
+
+Run the playbook:
+
+.. code-block:: bash
+
+ $ ansible-playbook playbook_coreos.yml
+
+Once the playbook quits, your new devices should be reachable via SSH. Try to connect to one and check if etcd has started properly:
+
+.. code-block:: bash
+
+ tomk@work $ ssh -i id_rsa core@$one_of_the_servers_ip
+ core@coreos-one ~ $ etcdctl cluster-health
+
+Once you create a couple of devices, you might appreciate the dynamic inventory script...
+
+
+Dynamic Inventory Script
+========================
+
+The dynamic inventory script queries the Packet API for a list of hosts, and exposes it to Ansible so you can easily identify and act on Packet devices.
+
+You can find it in Ansible Community General Collection's git repo at `scripts/inventory/packet_net.py <https://raw.githubusercontent.com/ansible-collections/community.general/main/scripts/inventory/packet_net.py>`_.
+
+The inventory script is configurable via a `ini file <https://raw.githubusercontent.com/ansible-collections/community.general/main/scripts/inventory/packet_net.ini>`_.
+
+If you want to use the inventory script, you must first export your Packet API token to a PACKET_API_TOKEN environment variable.
+
+You can either copy the inventory and ini config out from the cloned git repo, or you can download it to your working directory like so:
+
+.. code-block:: bash
+
+ $ wget https://raw.githubusercontent.com/ansible-collections/community.general/main/scripts/inventory/packet_net.py
+ $ chmod +x packet_net.py
+ $ wget https://raw.githubusercontent.com/ansible-collections/community.general/main/scripts/inventory/packet_net.ini
+
+In order to understand what the inventory script gives to Ansible you can run:
+
+.. code-block:: bash
+
+ $ ./packet_net.py --list
+
+It should print a JSON document looking similar to following trimmed dictionary:
+
+.. code-block:: json
+
+ {
+ "_meta": {
+ "hostvars": {
+ "147.75.64.169": {
+ "packet_billing_cycle": "hourly",
+ "packet_created_at": "2017-02-09T17:11:26Z",
+ "packet_facility": "ewr1",
+ "packet_hostname": "coreos-two",
+ "packet_href": "/devices/d0ab8972-54a8-4bff-832b-28549d1bec96",
+ "packet_id": "d0ab8972-54a8-4bff-832b-28549d1bec96",
+ "packet_locked": false,
+ "packet_operating_system": "coreos_beta",
+ "packet_plan": "baremetal_0",
+ "packet_state": "active",
+ "packet_updated_at": "2017-02-09T17:16:35Z",
+ "packet_user": "core",
+ "packet_userdata": "#cloud-config\ncoreos:\n etcd2:\n discovery: https://discovery.etcd.io/e0c8a4a9b8fe61acd51ec599e2a4f68e\n advertise-client-urls: http://$private_ipv4:2379,http://$private_ipv4:4001\n initial-advertise-peer-urls: http://$private_ipv4:2380\n listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001\n listen-peer-urls: http://$private_ipv4:2380\n fleet:\n public-ip: $private_ipv4\n units:\n - name: etcd2.service\n command: start\n - name: fleet.service\n command: start"
+ }
+ }
+ },
+ "baremetal_0": [
+ "147.75.202.255",
+ "147.75.202.251",
+ "147.75.202.249",
+ "147.75.64.129",
+ "147.75.192.51",
+ "147.75.64.169"
+ ],
+ "coreos_beta": [
+ "147.75.202.255",
+ "147.75.202.251",
+ "147.75.202.249",
+ "147.75.64.129",
+ "147.75.192.51",
+ "147.75.64.169"
+ ],
+ "ewr1": [
+ "147.75.64.129",
+ "147.75.192.51",
+ "147.75.64.169"
+ ],
+ "sjc1": [
+ "147.75.202.255",
+ "147.75.202.251",
+ "147.75.202.249"
+ ],
+ "coreos-two": [
+ "147.75.64.169"
+ ],
+ "d0ab8972-54a8-4bff-832b-28549d1bec96": [
+ "147.75.64.169"
+ ]
+ }
+
+In the ``['_meta']['hostvars']`` key, there is a list of devices (uniquely identified by their public IPv4 address) with their parameters. The other keys under ``['_meta']`` are lists of devices grouped by some parameter. Here, it is type (all devices are of type baremetal_0), operating system, and facility (ewr1 and sjc1).
+
+In addition to the parameter groups, there are also one-item groups with the UUID or hostname of the device.
+
+You can now target groups in playbooks! The following playbook will install a role that supplies resources for an Ansible target into all devices in the "coreos_beta" group:
+
+.. code-block:: yaml
+
+ # playbook_bootstrap.yml
+
+ - hosts: coreos_beta
+ gather_facts: false
+ roles:
+ - defunctzombie.coreos-boostrap
+
+Don't forget to supply the dynamic inventory in the ``-i`` argument!
+
+.. code-block:: bash
+
+ $ ansible-playbook -u core -i packet_net.py playbook_bootstrap.yml
+
+
+If you have any questions or comments let us know! help@packet.net
diff --git a/docs/docsite/rst/scenario_guides/guide_rax.rst b/docs/docsite/rst/scenario_guides/guide_rax.rst
new file mode 100644
index 00000000..b6100b8b
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/guide_rax.rst
@@ -0,0 +1,810 @@
+Rackspace Cloud Guide
+=====================
+
+.. _rax_introduction:
+
+Introduction
+````````````
+
+.. note:: This section of the documentation is under construction. We are in the process of adding more examples about the Rackspace modules and how they work together. Once complete, there will also be examples for Rackspace Cloud in `ansible-examples <https://github.com/ansible/ansible-examples/>`_.
+
+Ansible contains a number of core modules for interacting with Rackspace Cloud.
+
+The purpose of this section is to explain how to put Ansible modules together
+(and use inventory scripts) to use Ansible in a Rackspace Cloud context.
+
+Prerequisites for using the rax modules are minimal. In addition to ansible itself,
+all of the modules require and are tested against pyrax 1.5 or higher.
+You'll need this Python module installed on the execution host.
+
+``pyrax`` is not currently available in many operating system
+package repositories, so you will likely need to install it via pip:
+
+.. code-block:: bash
+
+ $ pip install pyrax
+
+Ansible creates an implicit localhost that executes in the same context as the ``ansible-playbook`` and the other CLI tools.
+If for any reason you need or want to have it in your inventory you should do something like the following:
+
+.. code-block:: ini
+
+ [localhost]
+ localhost ansible_connection=local ansible_python_interpreter=/usr/local/bin/python2
+
+For more information see :ref:`Implicit Localhost <implicit_localhost>`
+
+In playbook steps, we'll typically be using the following pattern:
+
+.. code-block:: yaml
+
+ - hosts: localhost
+ gather_facts: False
+ tasks:
+
+.. _credentials_file:
+
+Credentials File
+````````````````
+
+The `rax.py` inventory script and all `rax` modules support a standard `pyrax` credentials file that looks like:
+
+.. code-block:: ini
+
+ [rackspace_cloud]
+ username = myraxusername
+ api_key = d41d8cd98f00b204e9800998ecf8427e
+
+Setting the environment parameter ``RAX_CREDS_FILE`` to the path of this file will help Ansible find how to load
+this information.
+
+More information about this credentials file can be found at
+https://github.com/pycontribs/pyrax/blob/master/docs/getting_started.md#authenticating
+
+
+.. _virtual_environment:
+
+Running from a Python Virtual Environment (Optional)
+++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+Most users will not be using virtualenv, but some users, particularly Python developers sometimes like to.
+
+There are special considerations when Ansible is installed to a Python virtualenv, rather than the default of installing at a global scope. Ansible assumes, unless otherwise instructed, that the python binary will live at /usr/bin/python. This is done via the interpreter line in modules, however when instructed by setting the inventory variable 'ansible_python_interpreter', Ansible will use this specified path instead to find Python. This can be a cause of confusion as one may assume that modules running on 'localhost', or perhaps running via 'local_action', are using the virtualenv Python interpreter. By setting this line in the inventory, the modules will execute in the virtualenv interpreter and have available the virtualenv packages, specifically pyrax. If using virtualenv, you may wish to modify your localhost inventory definition to find this location as follows:
+
+.. code-block:: ini
+
+ [localhost]
+ localhost ansible_connection=local ansible_python_interpreter=/path/to/ansible_venv/bin/python
+
+.. note::
+
+ pyrax may be installed in the global Python package scope or in a virtual environment. There are no special considerations to keep in mind when installing pyrax.
+
+.. _provisioning:
+
+Provisioning
+````````````
+
+Now for the fun parts.
+
+The 'rax' module provides the ability to provision instances within Rackspace Cloud. Typically the provisioning task will be performed from your Ansible control server (in our example, localhost) against the Rackspace cloud API. This is done for several reasons:
+
+ - Avoiding installing the pyrax library on remote nodes
+ - No need to encrypt and distribute credentials to remote nodes
+ - Speed and simplicity
+
+.. note::
+
+ Authentication with the Rackspace-related modules is handled by either
+ specifying your username and API key as environment variables or passing
+ them as module arguments, or by specifying the location of a credentials
+ file.
+
+Here is a basic example of provisioning an instance in ad-hoc mode:
+
+.. code-block:: bash
+
+ $ ansible localhost -m rax -a "name=awx flavor=4 image=ubuntu-1204-lts-precise-pangolin wait=yes"
+
+Here's what it would look like in a playbook, assuming the parameters were defined in variables:
+
+.. code-block:: yaml
+
+ tasks:
+ - name: Provision a set of instances
+ rax:
+ name: "{{ rax_name }}"
+ flavor: "{{ rax_flavor }}"
+ image: "{{ rax_image }}"
+ count: "{{ rax_count }}"
+ group: "{{ group }}"
+ wait: yes
+ register: rax
+ delegate_to: localhost
+
+The rax module returns data about the nodes it creates, like IP addresses, hostnames, and login passwords. By registering the return value of the step, it is possible used this data to dynamically add the resulting hosts to inventory (temporarily, in memory). This facilitates performing configuration actions on the hosts in a follow-on task. In the following example, the servers that were successfully created using the above task are dynamically added to a group called "raxhosts", with each nodes hostname, IP address, and root password being added to the inventory.
+
+.. code-block:: yaml
+
+ - name: Add the instances we created (by public IP) to the group 'raxhosts'
+ add_host:
+ hostname: "{{ item.name }}"
+ ansible_host: "{{ item.rax_accessipv4 }}"
+ ansible_password: "{{ item.rax_adminpass }}"
+ groups: raxhosts
+ loop: "{{ rax.success }}"
+ when: rax.action == 'create'
+
+With the host group now created, the next play in this playbook could now configure servers belonging to the raxhosts group.
+
+.. code-block:: yaml
+
+ - name: Configuration play
+ hosts: raxhosts
+ user: root
+ roles:
+ - ntp
+ - webserver
+
+The method above ties the configuration of a host with the provisioning step. This isn't always what you want, and leads us
+to the next section.
+
+.. _host_inventory:
+
+Host Inventory
+``````````````
+
+Once your nodes are spun up, you'll probably want to talk to them again. The best way to handle this is to use the "rax" inventory plugin, which dynamically queries Rackspace Cloud and tells Ansible what nodes you have to manage. You might want to use this even if you are spinning up cloud instances via other tools, including the Rackspace Cloud user interface. The inventory plugin can be used to group resources by metadata, region, OS, and so on. Utilizing metadata is highly recommended in "rax" and can provide an easy way to sort between host groups and roles. If you don't want to use the ``rax.py`` dynamic inventory script, you could also still choose to manually manage your INI inventory file, though this is less recommended.
+
+In Ansible it is quite possible to use multiple dynamic inventory plugins along with INI file data. Just put them in a common directory and be sure the scripts are chmod +x, and the INI-based ones are not.
+
+.. _raxpy:
+
+rax.py
+++++++
+
+To use the Rackspace dynamic inventory script, copy ``rax.py`` into your inventory directory and make it executable. You can specify a credentials file for ``rax.py`` utilizing the ``RAX_CREDS_FILE`` environment variable.
+
+.. note:: Dynamic inventory scripts (like ``rax.py``) are saved in ``/usr/share/ansible/inventory`` if Ansible has been installed globally. If installed to a virtualenv, the inventory scripts are installed to ``$VIRTUALENV/share/inventory``.
+
+.. note:: Users of :ref:`ansible_tower` will note that dynamic inventory is natively supported by Tower, and all you have to do is associate a group with your Rackspace Cloud credentials, and it will easily synchronize without going through these steps::
+
+ $ RAX_CREDS_FILE=~/.raxpub ansible all -i rax.py -m setup
+
+``rax.py`` also accepts a ``RAX_REGION`` environment variable, which can contain an individual region, or a comma separated list of regions.
+
+When using ``rax.py``, you will not have a 'localhost' defined in the inventory.
+
+As mentioned previously, you will often be running most of these modules outside of the host loop, and will need 'localhost' defined. The recommended way to do this, would be to create an ``inventory`` directory, and place both the ``rax.py`` script and a file containing ``localhost`` in it.
+
+Executing ``ansible`` or ``ansible-playbook`` and specifying the ``inventory`` directory instead
+of an individual file, will cause ansible to evaluate each file in that directory for inventory.
+
+Let's test our inventory script to see if it can talk to Rackspace Cloud.
+
+.. code-block:: bash
+
+ $ RAX_CREDS_FILE=~/.raxpub ansible all -i inventory/ -m setup
+
+Assuming things are properly configured, the ``rax.py`` inventory script will output information similar to the
+following information, which will be utilized for inventory and variables.
+
+.. code-block:: json
+
+ {
+ "ORD": [
+ "test"
+ ],
+ "_meta": {
+ "hostvars": {
+ "test": {
+ "ansible_host": "198.51.100.1",
+ "rax_accessipv4": "198.51.100.1",
+ "rax_accessipv6": "2001:DB8::2342",
+ "rax_addresses": {
+ "private": [
+ {
+ "addr": "192.0.2.2",
+ "version": 4
+ }
+ ],
+ "public": [
+ {
+ "addr": "198.51.100.1",
+ "version": 4
+ },
+ {
+ "addr": "2001:DB8::2342",
+ "version": 6
+ }
+ ]
+ },
+ "rax_config_drive": "",
+ "rax_created": "2013-11-14T20:48:22Z",
+ "rax_flavor": {
+ "id": "performance1-1",
+ "links": [
+ {
+ "href": "https://ord.servers.api.rackspacecloud.com/111111/flavors/performance1-1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "rax_hostid": "e7b6961a9bd943ee82b13816426f1563bfda6846aad84d52af45a4904660cde0",
+ "rax_human_id": "test",
+ "rax_id": "099a447b-a644-471f-87b9-a7f580eb0c2a",
+ "rax_image": {
+ "id": "b211c7bf-b5b4-4ede-a8de-a4368750c653",
+ "links": [
+ {
+ "href": "https://ord.servers.api.rackspacecloud.com/111111/images/b211c7bf-b5b4-4ede-a8de-a4368750c653",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "rax_key_name": null,
+ "rax_links": [
+ {
+ "href": "https://ord.servers.api.rackspacecloud.com/v2/111111/servers/099a447b-a644-471f-87b9-a7f580eb0c2a",
+ "rel": "self"
+ },
+ {
+ "href": "https://ord.servers.api.rackspacecloud.com/111111/servers/099a447b-a644-471f-87b9-a7f580eb0c2a",
+ "rel": "bookmark"
+ }
+ ],
+ "rax_metadata": {
+ "foo": "bar"
+ },
+ "rax_name": "test",
+ "rax_name_attr": "name",
+ "rax_networks": {
+ "private": [
+ "192.0.2.2"
+ ],
+ "public": [
+ "198.51.100.1",
+ "2001:DB8::2342"
+ ]
+ },
+ "rax_os-dcf_diskconfig": "AUTO",
+ "rax_os-ext-sts_power_state": 1,
+ "rax_os-ext-sts_task_state": null,
+ "rax_os-ext-sts_vm_state": "active",
+ "rax_progress": 100,
+ "rax_status": "ACTIVE",
+ "rax_tenant_id": "111111",
+ "rax_updated": "2013-11-14T20:49:27Z",
+ "rax_user_id": "22222"
+ }
+ }
+ }
+ }
+
+.. _standard_inventory:
+
+Standard Inventory
+++++++++++++++++++
+
+When utilizing a standard ini formatted inventory file (as opposed to the inventory plugin), it may still be advantageous to retrieve discoverable hostvar information from the Rackspace API.
+
+This can be achieved with the ``rax_facts`` module and an inventory file similar to the following:
+
+.. code-block:: ini
+
+ [test_servers]
+ hostname1 rax_region=ORD
+ hostname2 rax_region=ORD
+
+.. code-block:: yaml
+
+ - name: Gather info about servers
+ hosts: test_servers
+ gather_facts: False
+ tasks:
+ - name: Get facts about servers
+ rax_facts:
+ credentials: ~/.raxpub
+ name: "{{ inventory_hostname }}"
+ region: "{{ rax_region }}"
+ delegate_to: localhost
+ - name: Map some facts
+ set_fact:
+ ansible_host: "{{ rax_accessipv4 }}"
+
+While you don't need to know how it works, it may be interesting to know what kind of variables are returned.
+
+The ``rax_facts`` module provides facts as followings, which match the ``rax.py`` inventory script:
+
+.. code-block:: json
+
+ {
+ "ansible_facts": {
+ "rax_accessipv4": "198.51.100.1",
+ "rax_accessipv6": "2001:DB8::2342",
+ "rax_addresses": {
+ "private": [
+ {
+ "addr": "192.0.2.2",
+ "version": 4
+ }
+ ],
+ "public": [
+ {
+ "addr": "198.51.100.1",
+ "version": 4
+ },
+ {
+ "addr": "2001:DB8::2342",
+ "version": 6
+ }
+ ]
+ },
+ "rax_config_drive": "",
+ "rax_created": "2013-11-14T20:48:22Z",
+ "rax_flavor": {
+ "id": "performance1-1",
+ "links": [
+ {
+ "href": "https://ord.servers.api.rackspacecloud.com/111111/flavors/performance1-1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "rax_hostid": "e7b6961a9bd943ee82b13816426f1563bfda6846aad84d52af45a4904660cde0",
+ "rax_human_id": "test",
+ "rax_id": "099a447b-a644-471f-87b9-a7f580eb0c2a",
+ "rax_image": {
+ "id": "b211c7bf-b5b4-4ede-a8de-a4368750c653",
+ "links": [
+ {
+ "href": "https://ord.servers.api.rackspacecloud.com/111111/images/b211c7bf-b5b4-4ede-a8de-a4368750c653",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "rax_key_name": null,
+ "rax_links": [
+ {
+ "href": "https://ord.servers.api.rackspacecloud.com/v2/111111/servers/099a447b-a644-471f-87b9-a7f580eb0c2a",
+ "rel": "self"
+ },
+ {
+ "href": "https://ord.servers.api.rackspacecloud.com/111111/servers/099a447b-a644-471f-87b9-a7f580eb0c2a",
+ "rel": "bookmark"
+ }
+ ],
+ "rax_metadata": {
+ "foo": "bar"
+ },
+ "rax_name": "test",
+ "rax_name_attr": "name",
+ "rax_networks": {
+ "private": [
+ "192.0.2.2"
+ ],
+ "public": [
+ "198.51.100.1",
+ "2001:DB8::2342"
+ ]
+ },
+ "rax_os-dcf_diskconfig": "AUTO",
+ "rax_os-ext-sts_power_state": 1,
+ "rax_os-ext-sts_task_state": null,
+ "rax_os-ext-sts_vm_state": "active",
+ "rax_progress": 100,
+ "rax_status": "ACTIVE",
+ "rax_tenant_id": "111111",
+ "rax_updated": "2013-11-14T20:49:27Z",
+ "rax_user_id": "22222"
+ },
+ "changed": false
+ }
+
+
+Use Cases
+`````````
+
+This section covers some additional usage examples built around a specific use case.
+
+.. _network_and_server:
+
+Network and Server
+++++++++++++++++++
+
+Create an isolated cloud network and build a server
+
+.. code-block:: yaml
+
+ - name: Build Servers on an Isolated Network
+ hosts: localhost
+ gather_facts: False
+ tasks:
+ - name: Network create request
+ rax_network:
+ credentials: ~/.raxpub
+ label: my-net
+ cidr: 192.168.3.0/24
+ region: IAD
+ state: present
+ delegate_to: localhost
+
+ - name: Server create request
+ rax:
+ credentials: ~/.raxpub
+ name: web%04d.example.org
+ flavor: 2
+ image: ubuntu-1204-lts-precise-pangolin
+ disk_config: manual
+ networks:
+ - public
+ - my-net
+ region: IAD
+ state: present
+ count: 5
+ exact_count: yes
+ group: web
+ wait: yes
+ wait_timeout: 360
+ register: rax
+ delegate_to: localhost
+
+.. _complete_environment:
+
+Complete Environment
+++++++++++++++++++++
+
+Build a complete webserver environment with servers, custom networks and load balancers, install nginx and create a custom index.html
+
+.. code-block:: yaml
+
+ ---
+ - name: Build environment
+ hosts: localhost
+ gather_facts: False
+ tasks:
+ - name: Load Balancer create request
+ rax_clb:
+ credentials: ~/.raxpub
+ name: my-lb
+ port: 80
+ protocol: HTTP
+ algorithm: ROUND_ROBIN
+ type: PUBLIC
+ timeout: 30
+ region: IAD
+ wait: yes
+ state: present
+ meta:
+ app: my-cool-app
+ register: clb
+
+ - name: Network create request
+ rax_network:
+ credentials: ~/.raxpub
+ label: my-net
+ cidr: 192.168.3.0/24
+ state: present
+ region: IAD
+ register: network
+
+ - name: Server create request
+ rax:
+ credentials: ~/.raxpub
+ name: web%04d.example.org
+ flavor: performance1-1
+ image: ubuntu-1204-lts-precise-pangolin
+ disk_config: manual
+ networks:
+ - public
+ - private
+ - my-net
+ region: IAD
+ state: present
+ count: 5
+ exact_count: yes
+ group: web
+ wait: yes
+ register: rax
+
+ - name: Add servers to web host group
+ add_host:
+ hostname: "{{ item.name }}"
+ ansible_host: "{{ item.rax_accessipv4 }}"
+ ansible_password: "{{ item.rax_adminpass }}"
+ ansible_user: root
+ groups: web
+ loop: "{{ rax.success }}"
+ when: rax.action == 'create'
+
+ - name: Add servers to Load balancer
+ rax_clb_nodes:
+ credentials: ~/.raxpub
+ load_balancer_id: "{{ clb.balancer.id }}"
+ address: "{{ item.rax_networks.private|first }}"
+ port: 80
+ condition: enabled
+ type: primary
+ wait: yes
+ region: IAD
+ loop: "{{ rax.success }}"
+ when: rax.action == 'create'
+
+ - name: Configure servers
+ hosts: web
+ handlers:
+ - name: restart nginx
+ service: name=nginx state=restarted
+
+ tasks:
+ - name: Install nginx
+ apt: pkg=nginx state=latest update_cache=yes cache_valid_time=86400
+ notify:
+ - restart nginx
+
+ - name: Ensure nginx starts on boot
+ service: name=nginx state=started enabled=yes
+
+ - name: Create custom index.html
+ copy: content="{{ inventory_hostname }}" dest=/usr/share/nginx/www/index.html
+ owner=root group=root mode=0644
+
+.. _rackconnect_and_manged_cloud:
+
+RackConnect and Managed Cloud
++++++++++++++++++++++++++++++
+
+When using RackConnect version 2 or Rackspace Managed Cloud there are Rackspace automation tasks that are executed on the servers you create after they are successfully built. If your automation executes before the RackConnect or Managed Cloud automation, you can cause failures and unusable servers.
+
+These examples show creating servers, and ensuring that the Rackspace automation has completed before Ansible continues onwards.
+
+For simplicity, these examples are joined, however both are only needed when using RackConnect. When only using Managed Cloud, the RackConnect portion can be ignored.
+
+The RackConnect portions only apply to RackConnect version 2.
+
+.. _using_a_control_machine:
+
+Using a Control Machine
+***********************
+
+.. code-block:: yaml
+
+ - name: Create an exact count of servers
+ hosts: localhost
+ gather_facts: False
+ tasks:
+ - name: Server build requests
+ rax:
+ credentials: ~/.raxpub
+ name: web%03d.example.org
+ flavor: performance1-1
+ image: ubuntu-1204-lts-precise-pangolin
+ disk_config: manual
+ region: DFW
+ state: present
+ count: 1
+ exact_count: yes
+ group: web
+ wait: yes
+ register: rax
+
+ - name: Add servers to in memory groups
+ add_host:
+ hostname: "{{ item.name }}"
+ ansible_host: "{{ item.rax_accessipv4 }}"
+ ansible_password: "{{ item.rax_adminpass }}"
+ ansible_user: root
+ rax_id: "{{ item.rax_id }}"
+ groups: web,new_web
+ loop: "{{ rax.success }}"
+ when: rax.action == 'create'
+
+ - name: Wait for rackconnect and managed cloud automation to complete
+ hosts: new_web
+ gather_facts: false
+ tasks:
+ - name: ensure we run all tasks from localhost
+ delegate_to: localhost
+ block:
+ - name: Wait for rackconnnect automation to complete
+ rax_facts:
+ credentials: ~/.raxpub
+ id: "{{ rax_id }}"
+ region: DFW
+ register: rax_facts
+ until: rax_facts.ansible_facts['rax_metadata']['rackconnect_automation_status']|default('') == 'DEPLOYED'
+ retries: 30
+ delay: 10
+
+ - name: Wait for managed cloud automation to complete
+ rax_facts:
+ credentials: ~/.raxpub
+ id: "{{ rax_id }}"
+ region: DFW
+ register: rax_facts
+ until: rax_facts.ansible_facts['rax_metadata']['rax_service_level_automation']|default('') == 'Complete'
+ retries: 30
+ delay: 10
+
+ - name: Update new_web hosts with IP that RackConnect assigns
+ hosts: new_web
+ gather_facts: false
+ tasks:
+ - name: Get facts about servers
+ rax_facts:
+ name: "{{ inventory_hostname }}"
+ region: DFW
+ delegate_to: localhost
+ - name: Map some facts
+ set_fact:
+ ansible_host: "{{ rax_accessipv4 }}"
+
+ - name: Base Configure Servers
+ hosts: web
+ roles:
+ - role: users
+
+ - role: openssh
+ opensshd_PermitRootLogin: "no"
+
+ - role: ntp
+
+.. _using_ansible_pull:
+
+Using Ansible Pull
+******************
+
+.. code-block:: yaml
+
+ ---
+ - name: Ensure Rackconnect and Managed Cloud Automation is complete
+ hosts: all
+ tasks:
+ - name: ensure we run all tasks from localhost
+ delegate_to: localhost
+ block:
+ - name: Check for completed bootstrap
+ stat:
+ path: /etc/bootstrap_complete
+ register: bootstrap
+
+ - name: Get region
+ command: xenstore-read vm-data/provider_data/region
+ register: rax_region
+ when: bootstrap.stat.exists != True
+
+ - name: Wait for rackconnect automation to complete
+ uri:
+ url: "https://{{ rax_region.stdout|trim }}.api.rackconnect.rackspace.com/v1/automation_status?format=json"
+ return_content: yes
+ register: automation_status
+ when: bootstrap.stat.exists != True
+ until: automation_status['automation_status']|default('') == 'DEPLOYED'
+ retries: 30
+ delay: 10
+
+ - name: Wait for managed cloud automation to complete
+ wait_for:
+ path: /tmp/rs_managed_cloud_automation_complete
+ delay: 10
+ when: bootstrap.stat.exists != True
+
+ - name: Set bootstrap completed
+ file:
+ path: /etc/bootstrap_complete
+ state: touch
+ owner: root
+ group: root
+ mode: 0400
+
+ - name: Base Configure Servers
+ hosts: all
+ roles:
+ - role: users
+
+ - role: openssh
+ opensshd_PermitRootLogin: "no"
+
+ - role: ntp
+
+.. _using_ansible_pull_with_xenstore:
+
+Using Ansible Pull with XenStore
+********************************
+
+.. code-block:: yaml
+
+ ---
+ - name: Ensure Rackconnect and Managed Cloud Automation is complete
+ hosts: all
+ tasks:
+ - name: Check for completed bootstrap
+ stat:
+ path: /etc/bootstrap_complete
+ register: bootstrap
+
+ - name: Wait for rackconnect_automation_status xenstore key to exist
+ command: xenstore-exists vm-data/user-metadata/rackconnect_automation_status
+ register: rcas_exists
+ when: bootstrap.stat.exists != True
+ failed_when: rcas_exists.rc|int > 1
+ until: rcas_exists.rc|int == 0
+ retries: 30
+ delay: 10
+
+ - name: Wait for rackconnect automation to complete
+ command: xenstore-read vm-data/user-metadata/rackconnect_automation_status
+ register: rcas
+ when: bootstrap.stat.exists != True
+ until: rcas.stdout|replace('"', '') == 'DEPLOYED'
+ retries: 30
+ delay: 10
+
+ - name: Wait for rax_service_level_automation xenstore key to exist
+ command: xenstore-exists vm-data/user-metadata/rax_service_level_automation
+ register: rsla_exists
+ when: bootstrap.stat.exists != True
+ failed_when: rsla_exists.rc|int > 1
+ until: rsla_exists.rc|int == 0
+ retries: 30
+ delay: 10
+
+ - name: Wait for managed cloud automation to complete
+ command: xenstore-read vm-data/user-metadata/rackconnect_automation_status
+ register: rsla
+ when: bootstrap.stat.exists != True
+ until: rsla.stdout|replace('"', '') == 'DEPLOYED'
+ retries: 30
+ delay: 10
+
+ - name: Set bootstrap completed
+ file:
+ path: /etc/bootstrap_complete
+ state: touch
+ owner: root
+ group: root
+ mode: 0400
+
+ - name: Base Configure Servers
+ hosts: all
+ roles:
+ - role: users
+
+ - role: openssh
+ opensshd_PermitRootLogin: "no"
+
+ - role: ntp
+
+.. _advanced_usage:
+
+Advanced Usage
+``````````````
+
+.. _awx_autoscale:
+
+Autoscaling with Tower
+++++++++++++++++++++++
+
+:ref:`ansible_tower` also contains a very nice feature for auto-scaling use cases.
+In this mode, a simple curl script can call a defined URL and the server will "dial out" to the requester
+and configure an instance that is spinning up. This can be a great way to reconfigure ephemeral nodes.
+See the Tower documentation for more details.
+
+A benefit of using the callback in Tower over pull mode is that job results are still centrally recorded
+and less information has to be shared with remote hosts.
+
+.. _pending_information:
+
+Orchestration in the Rackspace Cloud
+++++++++++++++++++++++++++++++++++++
+
+Ansible is a powerful orchestration tool, and rax modules allow you the opportunity to orchestrate complex tasks, deployments, and configurations. The key here is to automate provisioning of infrastructure, like any other piece of software in an environment. Complex deployments might have previously required manual manipulation of load balancers, or manual provisioning of servers. Utilizing the rax modules included with Ansible, one can make the deployment of additional nodes contingent on the current number of running nodes, or the configuration of a clustered application dependent on the number of nodes with common metadata. One could automate the following scenarios, for example:
+
+* Servers that are removed from a Cloud Load Balancer one-by-one, updated, verified, and returned to the load balancer pool
+* Expansion of an already-online environment, where nodes are provisioned, bootstrapped, configured, and software installed
+* A procedure where app log files are uploaded to a central location, like Cloud Files, before a node is decommissioned
+* Servers and load balancers that have DNS records created and destroyed on creation and decommissioning, respectively
+
+
+
+
diff --git a/docs/docsite/rst/scenario_guides/guide_scaleway.rst b/docs/docsite/rst/scenario_guides/guide_scaleway.rst
new file mode 100644
index 00000000..77af9ba7
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/guide_scaleway.rst
@@ -0,0 +1,293 @@
+.. _guide_scaleway:
+
+**************
+Scaleway Guide
+**************
+
+.. _scaleway_introduction:
+
+Introduction
+============
+
+`Scaleway <https://scaleway.com>`_ is a cloud provider supported by Ansible, version 2.6 or higher via a dynamic inventory plugin and modules.
+Those modules are:
+
+- :ref:`scaleway_sshkey_module`: adds a public SSH key from a file or value to the Packet infrastructure. Every subsequently-created device will have this public key installed in .ssh/authorized_keys.
+- :ref:`scaleway_compute_module`: manages servers on Scaleway. You can use this module to create, restart and delete servers.
+- :ref:`scaleway_volume_module`: manages volumes on Scaleway.
+
+.. note::
+ This guide assumes you are familiar with Ansible and how it works.
+ If you're not, have a look at :ref:`ansible_documentation` before getting started.
+
+.. _scaleway_requirements:
+
+Requirements
+============
+
+The Scaleway modules and inventory script connect to the Scaleway API using `Scaleway REST API <https://developer.scaleway.com>`_.
+To use the modules and inventory script you'll need a Scaleway API token.
+You can generate an API token via the Scaleway console `here <https://cloud.scaleway.com/#/credentials>`__.
+The simplest way to authenticate yourself is to set the Scaleway API token in an environment variable:
+
+.. code-block:: bash
+
+ $ export SCW_TOKEN=00000000-1111-2222-3333-444444444444
+
+If you're not comfortable exporting your API token, you can pass it as a parameter to the modules using the ``api_token`` argument.
+
+If you want to use a new SSH keypair in this tutorial, you can generate it to ``./id_rsa`` and ``./id_rsa.pub`` as:
+
+.. code-block:: bash
+
+ $ ssh-keygen -t rsa -f ./id_rsa
+
+If you want to use an existing keypair, just copy the private and public key over to the playbook directory.
+
+.. _scaleway_add_sshkey:
+
+How to add an SSH key?
+======================
+
+Connection to Scaleway Compute nodes use Secure Shell.
+SSH keys are stored at the account level, which means that you can re-use the same SSH key in multiple nodes.
+The first step to configure Scaleway compute resources is to have at least one SSH key configured.
+
+:ref:`scaleway_sshkey_module` is a module that manages SSH keys on your Scaleway account.
+You can add an SSH key to your account by including the following task in a playbook:
+
+.. code-block:: yaml
+
+ - name: "Add SSH key"
+ scaleway_sshkey:
+ ssh_pub_key: "ssh-rsa AAAA..."
+ state: "present"
+
+The ``ssh_pub_key`` parameter contains your ssh public key as a string. Here is an example inside a playbook:
+
+
+.. code-block:: yaml
+
+ - name: Test SSH key lifecycle on a Scaleway account
+ hosts: localhost
+ gather_facts: no
+ environment:
+ SCW_API_KEY: ""
+
+ tasks:
+
+ - scaleway_sshkey:
+ ssh_pub_key: "ssh-rsa AAAAB...424242 developer@example.com"
+ state: present
+ register: result
+
+ - assert:
+ that:
+ - result is success and result is changed
+
+.. _scaleway_create_instance:
+
+How to create a compute instance?
+=================================
+
+Now that we have an SSH key configured, the next step is to spin up a server!
+:ref:`scaleway_compute_module` is a module that can create, update and delete Scaleway compute instances:
+
+.. code-block:: yaml
+
+ - name: Create a server
+ scaleway_compute:
+ name: foobar
+ state: present
+ image: 00000000-1111-2222-3333-444444444444
+ organization: 00000000-1111-2222-3333-444444444444
+ region: ams1
+ commercial_type: START1-S
+
+Here are the parameter details for the example shown above:
+
+- ``name`` is the name of the instance (the one that will show up in your web console).
+- ``image`` is the UUID of the system image you would like to use.
+ A list of all images is available for each availability zone.
+- ``organization`` represents the organization that your account is attached to.
+- ``region`` represents the Availability Zone which your instance is in (for this example, par1 and ams1).
+- ``commercial_type`` represents the name of the commercial offers.
+ You can check out the Scaleway pricing page to find which instance is right for you.
+
+Take a look at this short playbook to see a working example using ``scaleway_compute``:
+
+.. code-block:: yaml
+
+ - name: Test compute instance lifecycle on a Scaleway account
+ hosts: localhost
+ gather_facts: no
+ environment:
+ SCW_API_KEY: ""
+
+ tasks:
+
+ - name: Create a server
+ register: server_creation_task
+ scaleway_compute:
+ name: foobar
+ state: present
+ image: 00000000-1111-2222-3333-444444444444
+ organization: 00000000-1111-2222-3333-444444444444
+ region: ams1
+ commercial_type: START1-S
+ wait: true
+
+ - debug: var=server_creation_task
+
+ - assert:
+ that:
+ - server_creation_task is success
+ - server_creation_task is changed
+
+ - name: Run it
+ scaleway_compute:
+ name: foobar
+ state: running
+ image: 00000000-1111-2222-3333-444444444444
+ organization: 00000000-1111-2222-3333-444444444444
+ region: ams1
+ commercial_type: START1-S
+ wait: true
+ tags:
+ - web_server
+ register: server_run_task
+
+ - debug: var=server_run_task
+
+ - assert:
+ that:
+ - server_run_task is success
+ - server_run_task is changed
+
+.. _scaleway_dynamic_inventory_tutorial:
+
+Dynamic Inventory Script
+========================
+
+Ansible ships with :ref:`scaleway_inventory`.
+You can now get a complete inventory of your Scaleway resources through this plugin and filter it on
+different parameters (``regions`` and ``tags`` are currently supported).
+
+Let's create an example!
+Suppose that we want to get all hosts that got the tag web_server.
+Create a file named ``scaleway_inventory.yml`` with the following content:
+
+.. code-block:: yaml
+
+ plugin: scaleway
+ regions:
+ - ams1
+ - par1
+ tags:
+ - web_server
+
+This inventory means that we want all hosts that got the tag ``web_server`` on the zones ``ams1`` and ``par1``.
+Once you have configured this file, you can get the information using the following command:
+
+.. code-block:: bash
+
+ $ ansible-inventory --list -i scaleway_inventory.yml
+
+The output will be:
+
+.. code-block:: yaml
+
+ {
+ "_meta": {
+ "hostvars": {
+ "dd8e3ae9-0c7c-459e-bc7b-aba8bfa1bb8d": {
+ "ansible_verbosity": 6,
+ "arch": "x86_64",
+ "commercial_type": "START1-S",
+ "hostname": "foobar",
+ "ipv4": "192.0.2.1",
+ "organization": "00000000-1111-2222-3333-444444444444",
+ "state": "running",
+ "tags": [
+ "web_server"
+ ]
+ }
+ }
+ },
+ "all": {
+ "children": [
+ "ams1",
+ "par1",
+ "ungrouped",
+ "web_server"
+ ]
+ },
+ "ams1": {},
+ "par1": {
+ "hosts": [
+ "dd8e3ae9-0c7c-459e-bc7b-aba8bfa1bb8d"
+ ]
+ },
+ "ungrouped": {},
+ "web_server": {
+ "hosts": [
+ "dd8e3ae9-0c7c-459e-bc7b-aba8bfa1bb8d"
+ ]
+ }
+ }
+
+As you can see, we get different groups of hosts.
+``par1`` and ``ams1`` are groups based on location.
+``web_server`` is a group based on a tag.
+
+In case a filter parameter is not defined, the plugin supposes all values possible are wanted.
+This means that for each tag that exists on your Scaleway compute nodes, a group based on each tag will be created.
+
+Scaleway S3 object storage
+==========================
+
+`Object Storage <https://www.scaleway.com/object-storage>`_ allows you to store any kind of objects (documents, images, videos, and so on).
+As the Scaleway API is S3 compatible, Ansible supports it natively through the modules: :ref:`s3_bucket_module`, :ref:`aws_s3_module`.
+
+You can find many examples in the `scaleway_s3 integration tests <https://github.com/ansible/ansible-legacy-tests/tree/devel/test/legacy/roles/scaleway_s3>`_.
+
+.. code-block:: yaml+jinja
+
+ - hosts: myserver
+ vars:
+ scaleway_region: nl-ams
+ s3_url: https://s3.nl-ams.scw.cloud
+ environment:
+ # AWS_ACCESS_KEY matches your scaleway organization id available at https://cloud.scaleway.com/#/account
+ AWS_ACCESS_KEY: 00000000-1111-2222-3333-444444444444
+ # AWS_SECRET_KEY matches a secret token that you can retrieve at https://cloud.scaleway.com/#/credentials
+ AWS_SECRET_KEY: aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee
+ module_defaults:
+ group/aws:
+ s3_url: '{{ s3_url }}'
+ region: '{{ scaleway_region }}'
+ tasks:
+ # use a fact instead of a variable, otherwise template is evaluate each time variable is used
+ - set_fact:
+ bucket_name: "{{ 99999999 | random | to_uuid }}"
+
+ # "requester_pays:" is mandatory because Scaleway doesn't implement related API
+ # another way is to use aws_s3 and "mode: create" !
+ - s3_bucket:
+ name: '{{ bucket_name }}'
+ requester_pays:
+
+ - name: Another way to create the bucket
+ aws_s3:
+ bucket: '{{ bucket_name }}'
+ mode: create
+ encrypt: false
+ register: bucket_creation_check
+
+ - name: add something in the bucket
+ aws_s3:
+ mode: put
+ bucket: '{{ bucket_name }}'
+ src: /tmp/test.txt # needs to be created before
+ object: test.txt
+ encrypt: false # server side encryption must be disabled
diff --git a/docs/docsite/rst/scenario_guides/guide_vagrant.rst b/docs/docsite/rst/scenario_guides/guide_vagrant.rst
new file mode 100644
index 00000000..f49477b0
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/guide_vagrant.rst
@@ -0,0 +1,136 @@
+Vagrant Guide
+=============
+
+.. _vagrant_intro:
+
+Introduction
+````````````
+
+`Vagrant <https://www.vagrantup.com/>`_ is a tool to manage virtual machine
+environments, and allows you to configure and use reproducible work
+environments on top of various virtualization and cloud platforms.
+It also has integration with Ansible as a provisioner for these virtual
+machines, and the two tools work together well.
+
+This guide will describe how to use Vagrant 1.7+ and Ansible together.
+
+If you're not familiar with Vagrant, you should visit `the documentation
+<https://www.vagrantup.com/docs/>`_.
+
+This guide assumes that you already have Ansible installed and working.
+Running from a Git checkout is fine. Follow the :ref:`installation_guide`
+guide for more information.
+
+.. _vagrant_setup:
+
+Vagrant Setup
+`````````````
+
+The first step once you've installed Vagrant is to create a ``Vagrantfile``
+and customize it to suit your needs. This is covered in detail in the Vagrant
+documentation, but here is a quick example that includes a section to use the
+Ansible provisioner to manage a single machine:
+
+.. code-block:: ruby
+
+ # This guide is optimized for Vagrant 1.8 and above.
+ # Older versions of Vagrant put less info in the inventory they generate.
+ Vagrant.require_version ">= 1.8.0"
+
+ Vagrant.configure(2) do |config|
+
+ config.vm.box = "ubuntu/bionic64"
+
+ config.vm.provision "ansible" do |ansible|
+ ansible.verbose = "v"
+ ansible.playbook = "playbook.yml"
+ end
+ end
+
+Notice the ``config.vm.provision`` section that refers to an Ansible playbook
+called ``playbook.yml`` in the same directory as the ``Vagrantfile``. Vagrant
+runs the provisioner once the virtual machine has booted and is ready for SSH
+access.
+
+There are a lot of Ansible options you can configure in your ``Vagrantfile``.
+Visit the `Ansible Provisioner documentation
+<https://www.vagrantup.com/docs/provisioning/ansible.html>`_ for more
+information.
+
+.. code-block:: bash
+
+ $ vagrant up
+
+This will start the VM, and run the provisioning playbook (on the first VM
+startup).
+
+
+To re-run a playbook on an existing VM, just run:
+
+.. code-block:: bash
+
+ $ vagrant provision
+
+This will re-run the playbook against the existing VM.
+
+Note that having the ``ansible.verbose`` option enabled will instruct Vagrant
+to show the full ``ansible-playbook`` command used behind the scene, as
+illustrated by this example:
+
+.. code-block:: bash
+
+ $ PYTHONUNBUFFERED=1 ANSIBLE_FORCE_COLOR=true ANSIBLE_HOST_KEY_CHECKING=false ANSIBLE_SSH_ARGS='-o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ControlMaster=auto -o ControlPersist=60s' ansible-playbook --connection=ssh --timeout=30 --limit="default" --inventory-file=/home/someone/coding-in-a-project/.vagrant/provisioners/ansible/inventory -v playbook.yml
+
+This information can be quite useful to debug integration issues and can also
+be used to manually execute Ansible from a shell, as explained in the next
+section.
+
+.. _running_ansible:
+
+Running Ansible Manually
+````````````````````````
+
+Sometimes you may want to run Ansible manually against the machines. This is
+faster than kicking ``vagrant provision`` and pretty easy to do.
+
+With our ``Vagrantfile`` example, Vagrant automatically creates an Ansible
+inventory file in ``.vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory``.
+This inventory is configured according to the SSH tunnel that Vagrant
+automatically creates. A typical automatically-created inventory file for a
+single machine environment may look something like this:
+
+.. code-block:: none
+
+ # Generated by Vagrant
+
+ default ansible_host=127.0.0.1 ansible_port=2222 ansible_user='vagrant' ansible_ssh_private_key_file='/home/someone/coding-in-a-project/.vagrant/machines/default/virtualbox/private_key'
+
+If you want to run Ansible manually, you will want to make sure to pass
+``ansible`` or ``ansible-playbook`` commands the correct arguments, at least
+for the *inventory*.
+
+.. code-block:: bash
+
+ $ ansible-playbook -i .vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory playbook.yml
+
+Advanced Usages
+```````````````
+
+The "Tips and Tricks" chapter of the `Ansible Provisioner documentation
+<https://www.vagrantup.com/docs/provisioning/ansible.html>`_ provides detailed information about more advanced Ansible features like:
+
+ - how to execute a playbook in parallel within a multi-machine environment
+ - how to integrate a local ``ansible.cfg`` configuration file
+
+.. seealso::
+
+ `Vagrant Home <https://www.vagrantup.com/>`_
+ The Vagrant homepage with downloads
+ `Vagrant Documentation <https://www.vagrantup.com/docs/>`_
+ Vagrant Documentation
+ `Ansible Provisioner <https://www.vagrantup.com/docs/provisioning/ansible.html>`_
+ The Vagrant documentation for the Ansible provisioner
+ `Vagrant Issue Tracker <https://github.com/hashicorp/vagrant/issues?q=is%3Aopen+is%3Aissue+label%3Aprovisioners%2Fansible>`_
+ The open issues for the Ansible provisioner in the Vagrant project
+ :ref:`working_with_playbooks`
+ An introduction to playbooks
diff --git a/docs/docsite/rst/scenario_guides/guide_vmware.rst b/docs/docsite/rst/scenario_guides/guide_vmware.rst
new file mode 100644
index 00000000..b31553d5
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/guide_vmware.rst
@@ -0,0 +1,33 @@
+.. _vmware_ansible:
+
+******************
+VMware Guide
+******************
+
+Welcome to the Ansible for VMware Guide!
+
+The purpose of this guide is to teach you everything you need to know about using Ansible with VMware.
+
+To get started, please select one of the following topics.
+
+.. toctree::
+ :maxdepth: 1
+
+ vmware_scenarios/vmware_intro
+ vmware_scenarios/vmware_concepts
+ vmware_scenarios/vmware_requirements
+ vmware_scenarios/vmware_inventory
+ vmware_scenarios/vmware_inventory_vm_attributes
+ vmware_scenarios/vmware_inventory_hostnames
+ vmware_scenarios/vmware_inventory_filters
+ vmware_scenarios/vmware_scenarios
+ vmware_scenarios/vmware_troubleshooting
+ vmware_scenarios/vmware_external_doc_links
+ vmware_scenarios/faq
+.. comments look like this - start with two dots
+.. getting_started content not ready
+.. vmware_scenarios/vmware_getting_started
+.. module index page not ready
+.. vmware_scenarios/vmware_module_reference
+.. always exclude the template file
+.. vmware_scenarios/vmware_scenario_1
diff --git a/docs/docsite/rst/scenario_guides/guide_vultr.rst b/docs/docsite/rst/scenario_guides/guide_vultr.rst
new file mode 100644
index 00000000..c5d5adec
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/guide_vultr.rst
@@ -0,0 +1,171 @@
+Vultr Guide
+===========
+
+Ansible offers a set of modules to interact with `Vultr <https://www.vultr.com>`_ cloud platform.
+
+This set of module forms a framework that allows one to easily manage and orchestrate one's infrastructure on Vultr cloud platform.
+
+
+Requirements
+------------
+
+There is actually no technical requirement; simply an already created Vultr account.
+
+
+Configuration
+-------------
+
+Vultr modules offer a rather flexible way with regard to configuration.
+
+Configuration is read in that order:
+
+- Environment Variables (eg. ``VULTR_API_KEY``, ``VULTR_API_TIMEOUT``)
+- File specified by environment variable ``VULTR_API_CONFIG``
+- ``vultr.ini`` file located in current working directory
+- ``$HOME/.vultr.ini``
+
+
+Ini file are structured this way:
+
+.. code-block:: ini
+
+ [default]
+ key = MY_API_KEY
+ timeout = 60
+
+ [personal_account]
+ key = MY_PERSONAL_ACCOUNT_API_KEY
+ timeout = 30
+
+
+If ``VULTR_API_ACCOUNT`` environment variable or ``api_account`` module parameter is not specified, modules will look for the section named "default".
+
+
+Authentication
+--------------
+
+Before using the Ansible modules to interact with Vultr, ones need an API key.
+If one doesn't own one yet, log in to `Vultr <https://www.vultr.com>`_ go to Account, then API, enable API then the API key should show up.
+
+Ensure you allow the usage of the API key from the proper IP addresses.
+
+Refer to the Configuration section to find out where to put this information.
+
+To check that everything is working properly run the following command:
+
+.. code-block:: console
+
+ #> VULTR_API_KEY=XXX ansible -m vultr_account_info localhost
+ localhost | SUCCESS => {
+ "changed": false,
+ "vultr_account_info": {
+ "balance": -8.9,
+ "last_payment_amount": -10.0,
+ "last_payment_date": "2018-07-21 11:34:46",
+ "pending_charges": 6.0
+ },
+ "vultr_api": {
+ "api_account": "default",
+ "api_endpoint": "https://api.vultr.com",
+ "api_retries": 5,
+ "api_timeout": 60
+ }
+ }
+
+
+If a similar output displays then everything is setup properly, else please ensure the proper ``VULTR_API_KEY`` has been specified and that Access Control on Vultr > Account > API page are accurate.
+
+
+Usage
+-----
+
+Since `Vultr <https://www.vultr.com>`_ offers a public API, the execution of the module to manage the infrastructure on their platform will happen on localhost. This translates to:
+
+.. code-block:: yaml
+
+ ---
+ - hosts: localhost
+ tasks:
+ - name: Create a 10G volume
+ vultr_block_storage:
+ name: my_disk
+ size: 10
+ region: New Jersey
+
+
+From that point on, only your creativity is the limit. Make sure to read the documentation of the `available modules <https://docs.ansible.com/ansible/latest/modules/list_of_cloud_modules.html#vultr>`_.
+
+
+Dynamic Inventory
+-----------------
+
+Ansible provides a dynamic inventory plugin for `Vultr <https://www.vultr.com>`_.
+The configuration process is exactly the same as the one for the modules.
+
+To be able to use it you need to enable it first by specifying the following in the ``ansible.cfg`` file:
+
+.. code-block:: ini
+
+ [inventory]
+ enable_plugins=vultr
+
+And provide a configuration file to be used with the plugin, the minimal configuration file looks like this:
+
+.. code-block:: yaml
+
+ ---
+ plugin: vultr
+
+To list the available hosts one can simply run:
+
+.. code-block:: console
+
+ #> ansible-inventory -i vultr.yml --list
+
+
+For example, this allows you to take action on nodes grouped by location or OS name:
+
+.. code-block:: yaml
+
+ ---
+ - hosts: Amsterdam
+ tasks:
+ - name: Rebooting the machine
+ shell: reboot
+ become: True
+
+
+Integration tests
+-----------------
+
+Ansible includes integration tests for all Vultr modules.
+
+These tests are meant to run against the public Vultr API and that is why they require a valid key to access the API.
+
+Prepare the test setup:
+
+.. code-block:: shell
+
+ $ cd ansible # location the ansible source is
+ $ source ./hacking/env-setup
+
+Set the Vultr API key:
+
+.. code-block:: shell
+
+ $ cd test/integration
+ $ cp cloud-config-vultr.ini.template cloud-config-vultr.ini
+ $ vi cloud-config-vultr.ini
+
+Run all Vultr tests:
+
+.. code-block:: shell
+
+ $ ansible-test integration cloud/vultr/ -v --diff --allow-unsupported
+
+
+To run a specific test, for example vultr_account_info:
+
+.. code-block:: shell
+
+ $ ansible-test integration cloud/vultr/vultr_account_info -v --diff --allow-unsupported
diff --git a/docs/docsite/rst/scenario_guides/guides.rst b/docs/docsite/rst/scenario_guides/guides.rst
new file mode 100644
index 00000000..2ff65bbc
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/guides.rst
@@ -0,0 +1,43 @@
+:orphan:
+
+.. unified index page included for backwards compatibility
+
+******************
+Scenario Guides
+******************
+
+The guides in this section cover integrating Ansible with a variety of
+platforms, products, and technologies. They explore particular use cases in greater depth and provide a more "top-down" explanation of some basic features.
+
+.. toctree::
+ :maxdepth: 1
+ :caption: Public Cloud Guides
+
+ guide_alicloud
+ guide_aws
+ guide_cloudstack
+ guide_gce
+ guide_azure
+ guide_online
+ guide_oracle
+ guide_packet
+ guide_rax
+ guide_scaleway
+ guide_vultr
+
+.. toctree::
+ :maxdepth: 1
+ :caption: Network Technology Guides
+
+ guide_aci
+ guide_meraki
+ guide_infoblox
+
+.. toctree::
+ :maxdepth: 1
+ :caption: Virtualization & Containerization Guides
+
+ guide_docker
+ guide_kubernetes
+ guide_vagrant
+ guide_vmware
diff --git a/docs/docsite/rst/scenario_guides/network_guides.rst b/docs/docsite/rst/scenario_guides/network_guides.rst
new file mode 100644
index 00000000..2b538ff0
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/network_guides.rst
@@ -0,0 +1,16 @@
+.. _network_guides:
+
+*************************
+Network Technology Guides
+*************************
+
+The guides in this section cover using Ansible with specific network technologies. They explore particular use cases in greater depth and provide a more "top-down" explanation of some basic features.
+
+.. toctree::
+ :maxdepth: 1
+
+ guide_aci
+ guide_meraki
+ guide_infoblox
+
+To learn more about Network Automation with Ansible, see :ref:`network_getting_started` and :ref:`network_advanced`.
diff --git a/docs/docsite/rst/scenario_guides/scenario_template.rst b/docs/docsite/rst/scenario_guides/scenario_template.rst
new file mode 100644
index 00000000..14695bed
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/scenario_template.rst
@@ -0,0 +1,53 @@
+:orphan:
+
+.. _scenario_template:
+
+*************************************
+Sample scenario for Ansible platforms
+*************************************
+
+*Use this ``rst`` file as a starting point to create a scenario guide for your platform. The sections below are suggestions on what should be in a scenario guide.*
+
+Introductory paragraph.
+
+.. contents::
+ :local:
+
+Prerequisites
+=============
+
+Describe the requirements and assumptions for this scenario. This should include applicable subsections for hardware, software, and any other caveats to using the scenarios in this guide.
+
+Credentials and authenticating
+==============================
+
+Describe credential requirements and how to authenticate to this platform.
+
+Using dynamic inventory
+=========================
+
+If applicable, describe how to use a dynamic inventory plugin for this platform.
+
+
+Example description
+===================
+
+Description and code here. Change the section header to something descriptive about this example, such as "Renaming a virtual machine". The goal is that this is the text someone would search for to find your example.
+
+
+Example output
+--------------
+
+What the user should expect to see.
+
+
+Troubleshooting
+---------------
+
+What to look for if it breaks.
+
+
+Conclusion and where to go next
+===============================
+
+Recap of important points. For more information please see: links.
diff --git a/docs/docsite/rst/scenario_guides/virt_guides.rst b/docs/docsite/rst/scenario_guides/virt_guides.rst
new file mode 100644
index 00000000..b623799f
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/virt_guides.rst
@@ -0,0 +1,15 @@
+.. _virtualization_guides:
+
+******************************************
+Virtualization and Containerization Guides
+******************************************
+
+The guides in this section cover integrating Ansible with popular tools for creating virtual machines and containers. They explore particular use cases in greater depth and provide a more "top-down" explanation of some basic features.
+
+.. toctree::
+ :maxdepth: 1
+
+ guide_docker
+ guide_kubernetes
+ guide_vagrant
+ guide_vmware
diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/faq.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/faq.rst
new file mode 100644
index 00000000..6987df0b
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/faq.rst
@@ -0,0 +1,26 @@
+.. _vmware_faq:
+
+******************
+Ansible VMware FAQ
+******************
+
+vmware_guest
+============
+
+Can I deploy a virtual machine on a standalone ESXi server ?
+------------------------------------------------------------
+
+Yes. ``vmware_guest`` can deploy a virtual machine with required settings on a standalone ESXi server.
+However, you must have a paid license to deploy virtual machines this way. If you are using the free version, the API is read-only.
+
+Is ``/vm`` required for ``vmware_guest`` module ?
+-------------------------------------------------
+
+Prior to Ansible version 2.5, ``folder`` was an optional parameter with a default value of ``/vm``.
+
+The folder parameter was used to discover information about virtual machines in the given infrastructure.
+
+Starting with Ansible version 2.5, ``folder`` is still an optional parameter with no default value.
+This parameter will be now used to identify a user's virtual machine, if multiple virtual machines or virtual
+machine templates are found with same name. VMware does not restrict the system administrator from creating virtual
+machines with same name.
diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_clone_template.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_clone_template.rst
new file mode 100644
index 00000000..2c7647ef
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_clone_template.rst
@@ -0,0 +1,222 @@
+.. _vmware_guest_from_template:
+
+****************************************
+Deploy a virtual machine from a template
+****************************************
+
+.. contents:: Topics
+
+Introduction
+============
+
+This guide will show you how to utilize Ansible to clone a virtual machine from already existing VMware template or existing VMware guest.
+
+Scenario Requirements
+=====================
+
+* Software
+
+ * Ansible 2.5 or later must be installed
+
+ * The Python module ``Pyvmomi`` must be installed on the Ansible (or Target host if not executing against localhost)
+
+ * Installing the latest ``Pyvmomi`` via ``pip`` is recommended [as the OS provided packages are usually out of date and incompatible]
+
+* Hardware
+
+ * vCenter Server with at least one ESXi server
+
+* Access / Credentials
+
+ * Ansible (or the target server) must have network access to the either vCenter server or the ESXi server you will be deploying to
+
+ * Username and Password
+
+ * Administrator user with following privileges
+
+ - ``Datastore.AllocateSpace`` on the destination datastore or datastore folder
+ - ``Network.Assign`` on the network to which the virtual machine will be assigned
+ - ``Resource.AssignVMToPool`` on the destination host, cluster, or resource pool
+ - ``VirtualMachine.Config.AddNewDisk`` on the datacenter or virtual machine folder
+ - ``VirtualMachine.Config.AddRemoveDevice`` on the datacenter or virtual machine folder
+ - ``VirtualMachine.Interact.PowerOn`` on the datacenter or virtual machine folder
+ - ``VirtualMachine.Inventory.CreateFromExisting`` on the datacenter or virtual machine folder
+ - ``VirtualMachine.Provisioning.Clone`` on the virtual machine you are cloning
+ - ``VirtualMachine.Provisioning.Customize`` on the virtual machine or virtual machine folder if you are customizing the guest operating system
+ - ``VirtualMachine.Provisioning.DeployTemplate`` on the template you are using
+ - ``VirtualMachine.Provisioning.ReadCustSpecs`` on the root vCenter Server if you are customizing the guest operating system
+
+ Depending on your requirements, you could also need one or more of the following privileges:
+
+ - ``VirtualMachine.Config.CPUCount`` on the datacenter or virtual machine folder
+ - ``VirtualMachine.Config.Memory`` on the datacenter or virtual machine folder
+ - ``VirtualMachine.Config.DiskExtend`` on the datacenter or virtual machine folder
+ - ``VirtualMachine.Config.Annotation`` on the datacenter or virtual machine folder
+ - ``VirtualMachine.Config.AdvancedConfig`` on the datacenter or virtual machine folder
+ - ``VirtualMachine.Config.EditDevice`` on the datacenter or virtual machine folder
+ - ``VirtualMachine.Config.Resource`` on the datacenter or virtual machine folder
+ - ``VirtualMachine.Config.Settings`` on the datacenter or virtual machine folder
+ - ``VirtualMachine.Config.UpgradeVirtualHardware`` on the datacenter or virtual machine folder
+ - ``VirtualMachine.Interact.SetCDMedia`` on the datacenter or virtual machine folder
+ - ``VirtualMachine.Interact.SetFloppyMedia`` on the datacenter or virtual machine folder
+ - ``VirtualMachine.Interact.DeviceConnection`` on the datacenter or virtual machine folder
+
+Assumptions
+===========
+
+- All variable names and VMware object names are case sensitive
+- VMware allows creation of virtual machine and templates with same name across datacenters and within datacenters
+- You need to use Python 2.7.9 version in order to use ``validate_certs`` option, as this version is capable of changing the SSL verification behaviours
+
+Caveats
+=======
+
+- Hosts in the ESXi cluster must have access to the datastore that the template resides on.
+- Multiple templates with the same name will cause module failures.
+- In order to utilize Guest Customization, VMware Tools must be installed on the template. For Linux, the ``open-vm-tools`` package is recommended, and it requires that ``Perl`` be installed.
+
+
+Example Description
+===================
+
+In this use case / example, we will be selecting a virtual machine template and cloning it into a specific folder in our Datacenter / Cluster. The following Ansible playbook showcases the basic parameters that are needed for this.
+
+.. code-block:: yaml
+
+ ---
+ - name: Create a VM from a template
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Clone the template
+ vmware_guest:
+ hostname: "{{ vcenter_ip }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: False
+ name: testvm_2
+ template: template_el7
+ datacenter: "{{ datacenter_name }}"
+ folder: /DC1/vm
+ state: poweredon
+ cluster: "{{ cluster_name }}"
+ wait_for_ip_address: yes
+
+
+Since Ansible utilizes the VMware API to perform actions, in this use case we will be connecting directly to the API from our localhost. This means that our playbooks will not be running from the vCenter or ESXi Server. We do not necessarily need to collect facts about our localhost, so the ``gather_facts`` parameter will be disabled. You can run these modules against another server that would then connect to the API if your localhost does not have access to vCenter. If so, the required Python modules will need to be installed on that target server.
+
+To begin, there are a few bits of information we will need. First and foremost is the hostname of the ESXi server or vCenter server. After this, you will need the username and password for this server. For now, you will be entering these directly, but in a more advanced playbook this can be abstracted out and stored in a more secure fashion using :ref:`ansible-vault` or using `Ansible Tower credentials <https://docs.ansible.com/ansible-tower/latest/html/userguide/credentials.html>`_. If your vCenter or ESXi server is not setup with proper CA certificates that can be verified from the Ansible server, then it is necessary to disable validation of these certificates by using the ``validate_certs`` parameter. To do this you need to set ``validate_certs=False`` in your playbook.
+
+Now you need to supply the information about the virtual machine which will be created. Give your virtual machine a name, one that conforms to all VMware requirements for naming conventions. Next, select the display name of the template from which you want to clone new virtual machine. This must match what's displayed in VMware Web UI exactly. Then you can specify a folder to place this new virtual machine in. This path can either be a relative path or a full path to the folder including the Datacenter. You may need to specify a state for the virtual machine. This simply tells the module which action you want to take, in this case you will be ensure that the virtual machine exists and is powered on. An optional parameter is ``wait_for_ip_address``, this will tell Ansible to wait for the virtual machine to fully boot up and VMware Tools is running before completing this task.
+
+
+What to expect
+--------------
+
+- You will see a bit of JSON output after this playbook completes. This output shows various parameters that are returned from the module and from vCenter about the newly created VM.
+
+.. code-block:: yaml
+
+ {
+ "changed": true,
+ "instance": {
+ "annotation": "",
+ "current_snapshot": null,
+ "customvalues": {},
+ "guest_consolidation_needed": false,
+ "guest_question": null,
+ "guest_tools_status": "guestToolsNotRunning",
+ "guest_tools_version": "0",
+ "hw_cores_per_socket": 1,
+ "hw_datastores": [
+ "ds_215"
+ ],
+ "hw_esxi_host": "192.0.2.44",
+ "hw_eth0": {
+ "addresstype": "assigned",
+ "ipaddresses": null,
+ "label": "Network adapter 1",
+ "macaddress": "00:50:56:8c:19:f4",
+ "macaddress_dash": "00-50-56-8c-19-f4",
+ "portgroup_key": "dvportgroup-17",
+ "portgroup_portkey": "0",
+ "summary": "DVSwitch: 50 0c 5b 22 b6 68 ab 89-fc 0b 59 a4 08 6e 80 fa"
+ },
+ "hw_files": [
+ "[ds_215] testvm_2/testvm_2.vmx",
+ "[ds_215] testvm_2/testvm_2.vmsd",
+ "[ds_215] testvm_2/testvm_2.vmdk"
+ ],
+ "hw_folder": "/DC1/vm",
+ "hw_guest_full_name": null,
+ "hw_guest_ha_state": null,
+ "hw_guest_id": null,
+ "hw_interfaces": [
+ "eth0"
+ ],
+ "hw_is_template": false,
+ "hw_memtotal_mb": 512,
+ "hw_name": "testvm_2",
+ "hw_power_status": "poweredOff",
+ "hw_processor_count": 2,
+ "hw_product_uuid": "420cb25b-81e8-8d3b-dd2d-a439ee54fcc5",
+ "hw_version": "vmx-13",
+ "instance_uuid": "500cd53b-ed57-d74e-2da8-0dc0eddf54d5",
+ "ipv4": null,
+ "ipv6": null,
+ "module_hw": true,
+ "snapshots": []
+ },
+ "invocation": {
+ "module_args": {
+ "annotation": null,
+ "cdrom": {},
+ "cluster": "DC1_C1",
+ "customization": {},
+ "customization_spec": null,
+ "customvalues": [],
+ "datacenter": "DC1",
+ "disk": [],
+ "esxi_hostname": null,
+ "folder": "/DC1/vm",
+ "force": false,
+ "guest_id": null,
+ "hardware": {},
+ "hostname": "192.0.2.44",
+ "is_template": false,
+ "linked_clone": false,
+ "name": "testvm_2",
+ "name_match": "first",
+ "networks": [],
+ "password": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER",
+ "port": 443,
+ "resource_pool": null,
+ "snapshot_src": null,
+ "state": "present",
+ "state_change_timeout": 0,
+ "template": "template_el7",
+ "username": "administrator@vsphere.local",
+ "uuid": null,
+ "validate_certs": false,
+ "vapp_properties": [],
+ "wait_for_ip_address": true
+ }
+ }
+ }
+
+- State is changed to ``True`` which notifies that the virtual machine is built using given template. The module will not complete until the clone task in VMware is finished. This can take some time depending on your environment.
+
+- If you utilize the ``wait_for_ip_address`` parameter, then it will also increase the clone time as it will wait until virtual machine boots into the OS and an IP Address has been assigned to the given NIC.
+
+
+
+Troubleshooting
+---------------
+
+Things to inspect
+
+- Check if the values provided for username and password are correct
+- Check if the datacenter you provided is available
+- Check if the template specified exists and you have permissions to access the datastore
+- Ensure the full folder path you specified already exists. It will not create folders automatically for you
+
diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_find_vm_folder.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_find_vm_folder.rst
new file mode 100644
index 00000000..62758867
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_find_vm_folder.rst
@@ -0,0 +1,120 @@
+.. _vmware_guest_find_folder:
+
+******************************************************
+Find folder path of an existing VMware virtual machine
+******************************************************
+
+.. contents:: Topics
+
+Introduction
+============
+
+This guide will show you how to utilize Ansible to find folder path of an existing VMware virtual machine.
+
+Scenario Requirements
+=====================
+
+* Software
+
+ * Ansible 2.5 or later must be installed.
+
+ * The Python module ``Pyvmomi`` must be installed on the Ansible control node (or Target host if not executing against localhost).
+
+ * We recommend installing the latest version with pip: ``pip install Pyvmomi`` (as the OS packages are usually out of date and incompatible).
+
+* Hardware
+
+ * At least one standalone ESXi server or
+
+ * vCenter Server with at least one ESXi server
+
+* Access / Credentials
+
+ * Ansible (or the target server) must have network access to the either vCenter server or the ESXi server
+
+ * Username and Password for vCenter or ESXi server
+
+Caveats
+=======
+
+- All variable names and VMware object names are case sensitive.
+- You need to use Python 2.7.9 version in order to use ``validate_certs`` option, as this version is capable of changing the SSL verification behaviours.
+
+
+Example Description
+===================
+
+With the following Ansible playbook you can find the folder path of an existing virtual machine using name.
+
+.. code-block:: yaml
+
+ ---
+ - name: Find folder path of an existing virtual machine
+ hosts: localhost
+ gather_facts: False
+ vars_files:
+ - vcenter_vars.yml
+ vars:
+ ansible_python_interpreter: "/usr/bin/env python3"
+ tasks:
+ - set_fact:
+ vm_name: "DC0_H0_VM0"
+
+ - name: "Find folder for VM - {{ vm_name }}"
+ vmware_guest_find:
+ hostname: "{{ vcenter_server }}"
+ username: "{{ vcenter_user }}"
+ password: "{{ vcenter_pass }}"
+ validate_certs: False
+ name: "{{ vm_name }}"
+ delegate_to: localhost
+ register: vm_facts
+
+
+Since Ansible utilizes the VMware API to perform actions, in this use case it will be connecting directly to the API from localhost.
+
+This means that playbooks will not be running from the vCenter or ESXi Server.
+
+Note that this play disables the ``gather_facts`` parameter, since you don't want to collect facts about localhost.
+
+You can run these modules against another server that would then connect to the API if localhost does not have access to vCenter. If so, the required Python modules will need to be installed on that target server. We recommend installing the latest version with pip: ``pip install Pyvmomi`` (as the OS packages are usually out of date and incompatible).
+
+Before you begin, make sure you have:
+
+- Hostname of the ESXi server or vCenter server
+- Username and password for the ESXi or vCenter server
+- Name of the existing Virtual Machine for which you want to collect folder path
+
+For now, you will be entering these directly, but in a more advanced playbook this can be abstracted out and stored in a more secure fashion using :ref:`ansible-vault` or using `Ansible Tower credentials <https://docs.ansible.com/ansible-tower/latest/html/userguide/credentials.html>`_.
+
+If your vCenter or ESXi server is not setup with proper CA certificates that can be verified from the Ansible server, then it is necessary to disable validation of these certificates by using the ``validate_certs`` parameter. To do this you need to set ``validate_certs=False`` in your playbook.
+
+The name of existing virtual machine will be used as input for ``vmware_guest_find`` module via ``name`` parameter.
+
+
+What to expect
+--------------
+
+Running this playbook can take some time, depending on your environment and network connectivity. When the run is complete you will see
+
+.. code-block:: yaml
+
+ "vm_facts": {
+ "changed": false,
+ "failed": false,
+ ...
+ "folders": [
+ "/F0/DC0/vm/F0"
+ ]
+ }
+
+
+Troubleshooting
+---------------
+
+If your playbook fails:
+
+- Check if the values provided for username and password are correct.
+- Check if the datacenter you provided is available.
+- Check if the virtual machine specified exists and you have respective permissions to access VMware object.
+- Ensure the full folder path you specified already exists.
diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_remove_vm.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_remove_vm.rst
new file mode 100644
index 00000000..620f8e0a
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_remove_vm.rst
@@ -0,0 +1,126 @@
+.. _vmware_guest_remove_virtual_machine:
+
+*****************************************
+Remove an existing VMware virtual machine
+*****************************************
+
+.. contents:: Topics
+
+Introduction
+============
+
+This guide will show you how to utilize Ansible to remove an existing VMware virtual machine.
+
+Scenario Requirements
+=====================
+
+* Software
+
+ * Ansible 2.5 or later must be installed.
+
+ * The Python module ``Pyvmomi`` must be installed on the Ansible control node (or Target host if not executing against localhost).
+
+ * We recommend installing the latest version with pip: ``pip install Pyvmomi`` (as the OS packages are usually out of date and incompatible).
+
+* Hardware
+
+ * At least one standalone ESXi server or
+
+ * vCenter Server with at least one ESXi server
+
+* Access / Credentials
+
+ * Ansible (or the target server) must have network access to the either vCenter server or the ESXi server
+
+ * Username and Password for vCenter or ESXi server
+
+ * Hosts in the ESXi cluster must have access to the datastore that the template resides on.
+
+Caveats
+=======
+
+- All variable names and VMware object names are case sensitive.
+- You need to use Python 2.7.9 version in order to use ``validate_certs`` option, as this version is capable of changing the SSL verification behaviours.
+- ``vmware_guest`` module tries to mimic VMware Web UI and workflow, so the virtual machine must be in powered off state in order to remove it from the VMware inventory.
+
+.. warning::
+
+ The removal VMware virtual machine using ``vmware_guest`` module is destructive operation and can not be reverted, so it is strongly recommended to take the backup of virtual machine and related files (vmx and vmdk files) before proceeding.
+
+Example Description
+===================
+
+In this use case / example, user will be removing a virtual machine using name. The following Ansible playbook showcases the basic parameters that are needed for this.
+
+.. code-block:: yaml
+
+ ---
+ - name: Remove virtual machine
+ gather_facts: no
+ vars_files:
+ - vcenter_vars.yml
+ vars:
+ ansible_python_interpreter: "/usr/bin/env python3"
+ hosts: localhost
+ tasks:
+ - set_fact:
+ vm_name: "VM_0003"
+ datacenter: "DC1"
+
+ - name: Remove "{{ vm_name }}"
+ vmware_guest:
+ hostname: "{{ vcenter_server }}"
+ username: "{{ vcenter_user }}"
+ password: "{{ vcenter_pass }}"
+ validate_certs: no
+ cluster: "DC1_C1"
+ name: "{{ vm_name }}"
+ state: absent
+ delegate_to: localhost
+ register: facts
+
+
+Since Ansible utilizes the VMware API to perform actions, in this use case it will be connecting directly to the API from localhost.
+
+This means that playbooks will not be running from the vCenter or ESXi Server.
+
+Note that this play disables the ``gather_facts`` parameter, since you don't want to collect facts about localhost.
+
+You can run these modules against another server that would then connect to the API if localhost does not have access to vCenter. If so, the required Python modules will need to be installed on that target server. We recommend installing the latest version with pip: ``pip install Pyvmomi`` (as the OS packages are usually out of date and incompatible).
+
+Before you begin, make sure you have:
+
+- Hostname of the ESXi server or vCenter server
+- Username and password for the ESXi or vCenter server
+- Name of the existing Virtual Machine you want to remove
+
+For now, you will be entering these directly, but in a more advanced playbook this can be abstracted out and stored in a more secure fashion using :ref:`ansible-vault` or using `Ansible Tower credentials <https://docs.ansible.com/ansible-tower/latest/html/userguide/credentials.html>`_.
+
+If your vCenter or ESXi server is not setup with proper CA certificates that can be verified from the Ansible server, then it is necessary to disable validation of these certificates by using the ``validate_certs`` parameter. To do this you need to set ``validate_certs=False`` in your playbook.
+
+The name of existing virtual machine will be used as input for ``vmware_guest`` module via ``name`` parameter.
+
+
+What to expect
+--------------
+
+- You will not see any JSON output after this playbook completes as compared to other operations performed using ``vmware_guest`` module.
+
+.. code-block:: yaml
+
+ {
+ "changed": true
+ }
+
+- State is changed to ``True`` which notifies that the virtual machine is removed from the VMware inventory. This can take some time depending upon your environment and network connectivity.
+
+
+Troubleshooting
+---------------
+
+If your playbook fails:
+
+- Check if the values provided for username and password are correct.
+- Check if the datacenter you provided is available.
+- Check if the virtual machine specified exists and you have permissions to access the datastore.
+- Ensure the full folder path you specified already exists. It will not create folders automatically for you.
diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_rename_vm.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_rename_vm.rst
new file mode 100644
index 00000000..81272897
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_rename_vm.rst
@@ -0,0 +1,173 @@
+.. _vmware_guest_rename_virtual_machine:
+
+**********************************
+Rename an existing virtual machine
+**********************************
+
+.. contents:: Topics
+
+Introduction
+============
+
+This guide will show you how to utilize Ansible to rename an existing virtual machine.
+
+Scenario Requirements
+=====================
+
+* Software
+
+ * Ansible 2.5 or later must be installed.
+
+ * The Python module ``Pyvmomi`` must be installed on the Ansible control node (or Target host if not executing against localhost).
+
+ * We recommend installing the latest version with pip: ``pip install Pyvmomi`` (as the OS packages are usually out of date and incompatible).
+
+* Hardware
+
+ * At least one standalone ESXi server or
+
+ * vCenter Server with at least one ESXi server
+
+* Access / Credentials
+
+ * Ansible (or the target server) must have network access to the either vCenter server or the ESXi server
+
+ * Username and Password for vCenter or ESXi server
+
+ * Hosts in the ESXi cluster must have access to the datastore that the template resides on.
+
+Caveats
+=======
+
+- All variable names and VMware object names are case sensitive.
+- You need to use Python 2.7.9 version in order to use ``validate_certs`` option, as this version is capable of changing the SSL verification behaviours.
+
+
+Example Description
+===================
+
+With the following Ansible playbook you can rename an existing virtual machine by changing the UUID.
+
+.. code-block:: yaml
+
+ ---
+ - name: Rename virtual machine from old name to new name using UUID
+ gather_facts: no
+ vars_files:
+ - vcenter_vars.yml
+ vars:
+ ansible_python_interpreter: "/usr/bin/env python3"
+ hosts: localhost
+ tasks:
+ - set_fact:
+ vm_name: "old_vm_name"
+ new_vm_name: "new_vm_name"
+ datacenter: "DC1"
+ cluster_name: "DC1_C1"
+
+ - name: Get VM "{{ vm_name }}" uuid
+ vmware_guest_facts:
+ hostname: "{{ vcenter_server }}"
+ username: "{{ vcenter_user }}"
+ password: "{{ vcenter_pass }}"
+ validate_certs: False
+ datacenter: "{{ datacenter }}"
+ folder: "/{{datacenter}}/vm"
+ name: "{{ vm_name }}"
+ register: vm_facts
+
+ - name: Rename "{{ vm_name }}" to "{{ new_vm_name }}"
+ vmware_guest:
+ hostname: "{{ vcenter_server }}"
+ username: "{{ vcenter_user }}"
+ password: "{{ vcenter_pass }}"
+ validate_certs: False
+ cluster: "{{ cluster_name }}"
+ uuid: "{{ vm_facts.instance.hw_product_uuid }}"
+ name: "{{ new_vm_name }}"
+
+Since Ansible utilizes the VMware API to perform actions, in this use case it will be connecting directly to the API from localhost.
+
+This means that playbooks will not be running from the vCenter or ESXi Server.
+
+Note that this play disables the ``gather_facts`` parameter, since you don't want to collect facts about localhost.
+
+You can run these modules against another server that would then connect to the API if localhost does not have access to vCenter. If so, the required Python modules will need to be installed on that target server. We recommend installing the latest version with pip: ``pip install Pyvmomi`` (as the OS packages are usually out of date and incompatible).
+
+Before you begin, make sure you have:
+
+- Hostname of the ESXi server or vCenter server
+- Username and password for the ESXi or vCenter server
+- The UUID of the existing Virtual Machine you want to rename
+
+For now, you will be entering these directly, but in a more advanced playbook this can be abstracted out and stored in a more secure fashion using :ref:`ansible-vault` or using `Ansible Tower credentials <https://docs.ansible.com/ansible-tower/latest/html/userguide/credentials.html>`_.
+
+If your vCenter or ESXi server is not setup with proper CA certificates that can be verified from the Ansible server, then it is necessary to disable validation of these certificates by using the ``validate_certs`` parameter. To do this you need to set ``validate_certs=False`` in your playbook.
+
+Now you need to supply the information about the existing virtual machine which will be renamed. For renaming virtual machine, ``vmware_guest`` module uses VMware UUID, which is unique across vCenter environment. This value is autogenerated and can not be changed. You will use ``vmware_guest_facts`` module to find virtual machine and get information about VMware UUID of the virtual machine.
+
+This value will be used input for ``vmware_guest`` module. Specify new name to virtual machine which conforms to all VMware requirements for naming conventions as ``name`` parameter. Also, provide ``uuid`` as the value of VMware UUID.
+
+What to expect
+--------------
+
+Running this playbook can take some time, depending on your environment and network connectivity. When the run is complete you will see
+
+.. code-block:: yaml
+
+ {
+ "changed": true,
+ "instance": {
+ "annotation": "",
+ "current_snapshot": null,
+ "customvalues": {},
+ "guest_consolidation_needed": false,
+ "guest_question": null,
+ "guest_tools_status": "guestToolsNotRunning",
+ "guest_tools_version": "10247",
+ "hw_cores_per_socket": 1,
+ "hw_datastores": ["ds_204_2"],
+ "hw_esxi_host": "10.x.x.x",
+ "hw_eth0": {
+ "addresstype": "assigned",
+ "ipaddresses": [],
+ "label": "Network adapter 1",
+ "macaddress": "00:50:56:8c:b8:42",
+ "macaddress_dash": "00-50-56-8c-b8-42",
+ "portgroup_key": "dvportgroup-31",
+ "portgroup_portkey": "15",
+ "summary": "DVSwitch: 50 0c 3a 69 df 78 2c 7b-6e 08 0a 89 e3 a6 31 17"
+ },
+ "hw_files": ["[ds_204_2] old_vm_name/old_vm_name.vmx", "[ds_204_2] old_vm_name/old_vm_name.nvram", "[ds_204_2] old_vm_name/old_vm_name.vmsd", "[ds_204_2] old_vm_name/vmware.log", "[ds_204_2] old_vm_name/old_vm_name.vmdk"],
+ "hw_folder": "/DC1/vm",
+ "hw_guest_full_name": null,
+ "hw_guest_ha_state": null,
+ "hw_guest_id": null,
+ "hw_interfaces": ["eth0"],
+ "hw_is_template": false,
+ "hw_memtotal_mb": 1024,
+ "hw_name": "new_vm_name",
+ "hw_power_status": "poweredOff",
+ "hw_processor_count": 1,
+ "hw_product_uuid": "420cbebb-835b-980b-7050-8aea9b7b0a6d",
+ "hw_version": "vmx-13",
+ "instance_uuid": "500c60a6-b7b4-8ae5-970f-054905246a6f",
+ "ipv4": null,
+ "ipv6": null,
+ "module_hw": true,
+ "snapshots": []
+ }
+ }
+
+confirming that you've renamed the virtual machine.
+
+
+Troubleshooting
+---------------
+
+If your playbook fails:
+
+- Check if the values provided for username and password are correct.
+- Check if the datacenter you provided is available.
+- Check if the virtual machine specified exists and you have permissions to access the datastore.
+- Ensure the full folder path you specified already exists.
diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_vmware_http.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_vmware_http.rst
new file mode 100644
index 00000000..e893c9d0
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/scenario_vmware_http.rst
@@ -0,0 +1,161 @@
+.. _vmware_http_api_usage:
+
+***********************************
+Using VMware HTTP API using Ansible
+***********************************
+
+.. contents:: Topics
+
+Introduction
+============
+
+This guide will show you how to utilize Ansible to use VMware HTTP APIs to automate various tasks.
+
+Scenario Requirements
+=====================
+
+* Software
+
+ * Ansible 2.5 or later must be installed.
+
+ * We recommend installing the latest version with pip: ``pip install Pyvmomi`` on the Ansible control node
+ (as the OS packages are usually out of date and incompatible) if you are planning to use any existing VMware modules.
+
+* Hardware
+
+ * vCenter Server 6.5 and above with at least one ESXi server
+
+* Access / Credentials
+
+ * Ansible (or the target server) must have network access to either the vCenter server or the ESXi server
+
+ * Username and Password for vCenter
+
+Caveats
+=======
+
+- All variable names and VMware object names are case sensitive.
+- You need to use Python 2.7.9 version in order to use ``validate_certs`` option, as this version is capable of changing the SSL verification behaviours.
+- VMware HTTP APIs are introduced in vSphere 6.5 and above so minimum level required in 6.5.
+- There are very limited number of APIs exposed, so you may need to rely on XMLRPC based VMware modules.
+
+
+Example Description
+===================
+
+With the following Ansible playbook you can find the VMware ESXi host system(s) and can perform various tasks depending on the list of host systems.
+This is a generic example to show how Ansible can be utilized to consume VMware HTTP APIs.
+
+.. code-block:: yaml
+
+ ---
+ - name: Example showing VMware HTTP API utilization
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vcenter_vars.yml
+ vars:
+ ansible_python_interpreter: "/usr/bin/env python3"
+ tasks:
+ - name: Login into vCenter and get cookies
+ uri:
+ url: https://{{ vcenter_server }}/rest/com/vmware/cis/session
+ force_basic_auth: yes
+ validate_certs: no
+ method: POST
+ user: "{{ vcenter_user }}"
+ password: "{{ vcenter_pass }}"
+ register: login
+
+ - name: Get all hosts from vCenter using cookies from last task
+ uri:
+ url: https://{{ vcenter_server }}/rest/vcenter/host
+ force_basic_auth: yes
+ validate_certs: no
+ headers:
+ Cookie: "{{ login.set_cookie }}"
+ register: vchosts
+
+ - name: Change Log level configuration of the given hostsystem
+ vmware_host_config_manager:
+ hostname: "{{ vcenter_server }}"
+ username: "{{ vcenter_user }}"
+ password: "{{ vcenter_pass }}"
+ esxi_hostname: "{{ item.name }}"
+ options:
+ 'Config.HostAgent.log.level': 'error'
+ validate_certs: no
+ loop: "{{ vchosts.json.value }}"
+ register: host_config_results
+
+
+Since Ansible utilizes the VMware HTTP API using the ``uri`` module to perform actions, in this use case it will be connecting directly to the VMware HTTP API from localhost.
+
+This means that playbooks will not be running from the vCenter or ESXi Server.
+
+Note that this play disables the ``gather_facts`` parameter, since you don't want to collect facts about localhost.
+
+Before you begin, make sure you have:
+
+- Hostname of the vCenter server
+- Username and password for the vCenter server
+- Version of vCenter is at least 6.5
+
+For now, you will be entering these directly, but in a more advanced playbook this can be abstracted out and stored in a more secure fashion using :ref:`ansible-vault` or using `Ansible Tower credentials <https://docs.ansible.com/ansible-tower/latest/html/userguide/credentials.html>`_.
+
+If your vCenter server is not setup with proper CA certificates that can be verified from the Ansible server, then it is necessary to disable validation of these certificates by using the ``validate_certs`` parameter. To do this you need to set ``validate_certs=False`` in your playbook.
+
+As you can see, we are using the ``uri`` module in first task to login into the vCenter server and storing result in the ``login`` variable using register. In the second task, using cookies from the first task we are gathering information about the ESXi host system.
+
+Using this information, we are changing the ESXi host system's advance configuration.
+
+What to expect
+--------------
+
+Running this playbook can take some time, depending on your environment and network connectivity. When the run is complete you will see
+
+.. code-block:: yaml
+
+ "results": [
+ {
+ ...
+ "invocation": {
+ "module_args": {
+ "cluster_name": null,
+ "esxi_hostname": "10.76.33.226",
+ "hostname": "10.65.223.114",
+ "options": {
+ "Config.HostAgent.log.level": "error"
+ },
+ "password": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER",
+ "port": 443,
+ "username": "administrator@vsphere.local",
+ "validate_certs": false
+ }
+ },
+ "item": {
+ "connection_state": "CONNECTED",
+ "host": "host-21",
+ "name": "10.76.33.226",
+ "power_state": "POWERED_ON"
+ },
+ "msg": "Config.HostAgent.log.level changed."
+ ...
+ }
+ ]
+
+
+Troubleshooting
+---------------
+
+If your playbook fails:
+
+- Check if the values provided for username and password are correct.
+- Check if you are using vCenter 6.5 and onwards to use this HTTP APIs.
+
+.. seealso::
+
+ `VMware vSphere and Ansible From Zero to Useful by @arielsanchezmor <https://www.youtube.com/watch?v=0_qwOKlBlo8>`_
+ vBrownBag session video related to VMware HTTP APIs
+ `Sample Playbooks for using VMware HTTP APIs <https://github.com/Akasurde/ansible-vmware-http>`_
+ GitHub repo for examples of Ansible playbook to manage VMware using HTTP APIs
diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_concepts.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_concepts.rst
new file mode 100644
index 00000000..ce1e831a
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_concepts.rst
@@ -0,0 +1,45 @@
+.. _vmware_concepts:
+
+***************************
+Ansible for VMware Concepts
+***************************
+
+Some of these concepts are common to all uses of Ansible, including VMware automation; some are specific to VMware. You need to understand them to use Ansible for VMware automation. This introduction provides the background you need to follow the :ref:`scenarios<vmware_scenarios>` in this guide.
+
+.. contents::
+ :local:
+
+Control Node
+============
+
+Any machine with Ansible installed. You can run commands and playbooks, invoking ``/usr/bin/ansible`` or ``/usr/bin/ansible-playbook``, from any control node. You can use any computer that has Python installed on it as a control node - laptops, shared desktops, and servers can all run Ansible. However, you cannot use a Windows machine as a control node. You can have multiple control nodes.
+
+Delegation
+==========
+
+Delegation allows you to select the system that executes a given task. If you do not have ``pyVmomi`` installed on your control node, use the ``delegate_to`` keyword on VMware-specific tasks to execute them on any host where you have ``pyVmomi`` installed.
+
+Modules
+=======
+
+The units of code Ansible executes. Each module has a particular use, from creating virtual machines on vCenter to managing distributed virtual switches in the vCenter environment. You can invoke a single module with a task, or invoke several different modules in a playbook. For an idea of how many modules Ansible includes, take a look at the :ref:`list of cloud modules<cloud_modules>`, which includes VMware modules.
+
+Playbooks
+=========
+
+Ordered lists of tasks, saved so you can run those tasks in that order repeatedly. Playbooks can include variables as well as tasks. Playbooks are written in YAML and are easy to read, write, share and understand.
+
+pyVmomi
+=======
+
+Ansible VMware modules are written on top of `pyVmomi <https://github.com/vmware/pyvmomi>`_. ``pyVmomi`` is the official Python SDK for the VMware vSphere API that allows user to manage ESX, ESXi, and vCenter infrastructure.
+
+You need to install this Python SDK on host from where you want to invoke VMware automation. For example, if you are using control node then ``pyVmomi`` must be installed on control node.
+
+If you are using any ``delegate_to`` host which is different from your control node then you need to install ``pyVmomi`` on that ``delegate_to`` node.
+
+You can install pyVmomi using pip:
+
+.. code-block:: bash
+
+ $ pip install pyvmomi
diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_external_doc_links.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_external_doc_links.rst
new file mode 100644
index 00000000..b50837dd
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_external_doc_links.rst
@@ -0,0 +1,11 @@
+.. _vmware_external_doc_links:
+
+*****************************
+Other useful VMware resources
+*****************************
+
+* `VMware API and SDK Documentation <https://www.vmware.com/support/pubs/sdk_pubs.html>`_
+* `VCSIM test container image <https://quay.io/repository/ansible/vcenter-test-container>`_
+* `Ansible VMware community wiki page <https://github.com/ansible/community/wiki/VMware>`_
+* `VMware's official Guest Operating system customization matrix <https://partnerweb.vmware.com/programs/guestOS/guest-os-customization-matrix.pdf>`_
+* `VMware Compatibility Guide <https://www.vmware.com/resources/compatibility/search.php>`_
diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_getting_started.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_getting_started.rst
new file mode 100644
index 00000000..fc5691b7
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_getting_started.rst
@@ -0,0 +1,9 @@
+:orphan:
+
+.. _vmware_ansible_getting_started:
+
+***************************************
+Getting Started with Ansible for VMware
+***************************************
+
+This will have a basic "hello world" scenario/walkthrough that gets the user introduced to the basics.
diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_intro.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_intro.rst
new file mode 100644
index 00000000..7006e665
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_intro.rst
@@ -0,0 +1,53 @@
+.. _vmware_ansible_intro:
+
+**********************************
+Introduction to Ansible for VMware
+**********************************
+
+.. contents:: Topics
+
+Introduction
+============
+
+Ansible provides various modules to manage VMware infrastructure, which includes datacenter, cluster,
+host system and virtual machine.
+
+Requirements
+============
+
+Ansible VMware modules are written on top of `pyVmomi <https://github.com/vmware/pyvmomi>`_.
+pyVmomi is the Python SDK for the VMware vSphere API that allows user to manage ESX, ESXi,
+and vCenter infrastructure. You can install pyVmomi using pip:
+
+.. code-block:: bash
+
+ $ pip install pyvmomi
+
+Ansible VMware modules leveraging latest vSphere(6.0+) features are using `vSphere Automation Python SDK <https://github.com/vmware/vsphere-automation-sdk-python>`_. The vSphere Automation Python SDK also has client libraries, documentation, and sample code for VMware Cloud on AWS Console APIs, NSX VMware Cloud on AWS integration APIs, VMware Cloud on AWS site recovery APIs, NSX-T APIs.
+
+You can install vSphere Automation Python SDK using pip:
+
+.. code-block:: bash
+
+ $ pip install --upgrade git+https://github.com/vmware/vsphere-automation-sdk-python.git
+
+Note:
+ Installing vSphere Automation Python SDK also installs ``pyvmomi``. A separate installation of ``pyvmomi`` is not required.
+
+vmware_guest module
+===================
+
+The :ref:`vmware_guest<vmware_guest_module>` module manages various operations related to virtual machines in the given ESXi or vCenter server.
+
+
+.. seealso::
+
+ `pyVmomi <https://github.com/vmware/pyvmomi>`_
+ The GitHub Page of pyVmomi
+ `pyVmomi Issue Tracker <https://github.com/vmware/pyvmomi/issues>`_
+ The issue tracker for the pyVmomi project
+ `govc <https://github.com/vmware/govmomi/tree/master/govc>`_
+ govc is a vSphere CLI built on top of govmomi
+ :ref:`working_with_playbooks`
+ An introduction to playbooks
+
diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_inventory.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_inventory.rst
new file mode 100644
index 00000000..f942dd00
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_inventory.rst
@@ -0,0 +1,90 @@
+.. _vmware_ansible_inventory:
+
+*************************************
+Using VMware dynamic inventory plugin
+*************************************
+
+.. contents:: Topics
+
+VMware Dynamic Inventory Plugin
+===============================
+
+
+The best way to interact with your hosts is to use the VMware dynamic inventory plugin, which dynamically queries VMware APIs and
+tells Ansible what nodes can be managed.
+
+Requirements
+------------
+
+To use the VMware dynamic inventory plugins, you must install `pyVmomi <https://github.com/vmware/pyvmomi>`_
+on your control node (the host running Ansible).
+
+To include tag-related information for the virtual machines in your dynamic inventory, you also need the `vSphere Automation SDK <https://code.vmware.com/web/sdk/65/vsphere-automation-python>`_, which supports REST API features like tagging and content libraries, on your control node.
+You can install the ``vSphere Automation SDK`` following `these instructions <https://github.com/vmware/vsphere-automation-sdk-python#installing-required-python-packages>`_.
+
+.. code-block:: bash
+
+ $ pip install pyvmomi
+
+To use this VMware dynamic inventory plugin, you need to enable it first by specifying the following in the ``ansible.cfg`` file:
+
+.. code-block:: ini
+
+ [inventory]
+ enable_plugins = vmware_vm_inventory
+
+Then, create a file that ends in ``.vmware.yml`` or ``.vmware.yaml`` in your working directory.
+
+The ``vmware_vm_inventory`` script takes in the same authentication information as any VMware module.
+
+Here's an example of a valid inventory file:
+
+.. code-block:: yaml
+
+ plugin: vmware_vm_inventory
+ strict: False
+ hostname: 10.65.223.31
+ username: administrator@vsphere.local
+ password: Esxi@123$%
+ validate_certs: False
+ with_tags: True
+
+
+Executing ``ansible-inventory --list -i <filename>.vmware.yml`` will create a list of VMware instances that are ready to be configured using Ansible.
+
+Using vaulted configuration files
+=================================
+
+Since the inventory configuration file contains vCenter password in plain text, a security risk, you may want to
+encrypt your entire inventory configuration file.
+
+You can encrypt a valid inventory configuration file as follows:
+
+.. code-block:: bash
+
+ $ ansible-vault encrypt <filename>.vmware.yml
+ New Vault password:
+ Confirm New Vault password:
+ Encryption successful
+
+And you can use this vaulted inventory configuration file using:
+
+.. code-block:: bash
+
+ $ ansible-inventory -i filename.vmware.yml --list --vault-password-file=/path/to/vault_password_file
+
+
+.. seealso::
+
+ `pyVmomi <https://github.com/vmware/pyvmomi>`_
+ The GitHub Page of pyVmomi
+ `pyVmomi Issue Tracker <https://github.com/vmware/pyvmomi/issues>`_
+ The issue tracker for the pyVmomi project
+ `vSphere Automation SDK GitHub Page <https://github.com/vmware/vsphere-automation-sdk-python>`_
+ The GitHub Page of vSphere Automation SDK for Python
+ `vSphere Automation SDK Issue Tracker <https://github.com/vmware/vsphere-automation-sdk-python/issues>`_
+ The issue tracker for vSphere Automation SDK for Python
+ :ref:`working_with_playbooks`
+ An introduction to playbooks
+ :ref:`playbooks_vault`
+ Using Vault in playbooks
diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_inventory_filters.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_inventory_filters.rst
new file mode 100644
index 00000000..1208dcad
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_inventory_filters.rst
@@ -0,0 +1,216 @@
+.. _vmware_ansible_inventory_using_filters:
+
+***********************************************
+Using VMware dynamic inventory plugin - Filters
+***********************************************
+
+.. contents::
+ :local:
+
+VMware dynamic inventory plugin - filtering VMware guests
+=========================================================
+
+
+VMware inventory plugin allows you to filter VMware guests using the ``filters`` configuration parameter.
+
+This section shows how you configure ``filters`` for the given VMware guest in the inventory.
+
+Requirements
+------------
+
+To use the VMware dynamic inventory plugins, you must install `pyVmomi <https://github.com/vmware/pyvmomi>`_
+on your control node (the host running Ansible).
+
+To include tag-related information for the virtual machines in your dynamic inventory, you also need the `vSphere Automation SDK <https://code.vmware.com/web/sdk/65/vsphere-automation-python>`_, which supports REST API features such as tagging and content libraries, on your control node.
+You can install the ``vSphere Automation SDK`` following `these instructions <https://github.com/vmware/vsphere-automation-sdk-python#installing-required-python-packages>`_.
+
+.. code-block:: bash
+
+ $ pip install pyvmomi
+
+Starting in Ansible 2.10, the VMware dynamic inventory plugin is available in the ``community.vmware`` collection included Ansible.
+Alternately, to install the latest ``community.vmware`` collection:
+
+.. code-block:: bash
+
+ $ ansible-galaxy collection install community.vmware
+
+To use this VMware dynamic inventory plugin:
+
+1. Enable it first by specifying the following in the ``ansible.cfg`` file:
+
+.. code-block:: ini
+
+ [inventory]
+ enable_plugins = community.vmware.vmware_vm_inventory
+
+2. Create a file that ends in ``vmware.yml`` or ``vmware.yaml`` in your working directory.
+
+The ``vmware_vm_inventory`` inventory plugin takes in the same authentication information as any other VMware modules does.
+
+Let us assume we want to list all RHEL7 VMs with the power state as "poweredOn". A valid inventory file with filters for the given VMware guest looks as follows:
+
+.. code-block:: yaml
+
+ plugin: community.vmware.vmware_vm_inventory
+ strict: False
+ hostname: 10.65.223.31
+ username: administrator@vsphere.local
+ password: Esxi@123$%
+ validate_certs: False
+ with_tags: False
+ hostnames:
+ - config.name
+ filters:
+ - config.guestId == "rhel7_64Guest"
+ - summary.runtime.powerState == "poweredOn"
+
+
+Here, we have configured two filters -
+
+* ``config.guestId`` is equal to ``rhel7_64Guest``
+* ``summary.runtime.powerState`` is equal to ``poweredOn``
+
+This retrieves all the VMs which satisfy these two conditions and populates them in the inventory.
+Notice that the conditions are combined using an ``and`` operation.
+
+Using ``or`` conditions in filters
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Let us assume you want filter RHEL7 and Ubuntu VMs. You can use multiple filters using ``or`` condition in your inventory file.
+
+A valid filter in the VMware inventory file for this example is:
+
+.. code-block:: yaml
+
+ plugin: community.vmware.vmware_vm_inventory
+ strict: False
+ hostname: 10.65.223.31
+ username: administrator@vsphere.local
+ password: Esxi@123$%
+ validate_certs: False
+ with_tags: False
+ hostnames:
+ - config.name
+ filters:
+ - config.guestId == "rhel7_64Guest" or config.guestId == "ubuntu64Guest"
+
+
+You can check all allowed properties for filters for the given virtual machine at :ref:`vmware_inventory_vm_attributes`.
+
+If you are using the ``properties`` parameter with custom VM properties, make sure that you include all the properties used by filters as well in your VM property list.
+
+For example, if we want all RHEL7 and Ubuntu VMs that are poweredOn, you can use inventory file:
+
+.. code-block:: yaml
+
+ plugin: community.vmware.vmware_vm_inventory
+ strict: False
+ hostname: 10.65.223.31
+ username: administrator@vsphere.local
+ password: Esxi@123$%
+ validate_certs: False
+ with_tags: False
+ hostnames:
+ - 'config.name'
+ properties:
+ - 'config.name'
+ - 'config.guestId'
+ - 'guest.ipAddress'
+ - 'summary.runtime.powerState'
+ filters:
+ - config.guestId == "rhel7_64Guest" or config.guestId == "ubuntu64Guest"
+ - summary.runtime.powerState == "poweredOn"
+
+Here, we are using minimum VM properties, that is ``config.name``, ``config.guestId``, ``summary.runtime.powerState``, and ``guest.ipAddress``.
+
+* ``config.name`` is used by the ``hostnames`` parameter.
+* ``config.guestId`` and ``summary.runtime.powerState`` are used by the ``filters`` parameter.
+* ``guest.guestId`` is used by ``ansible_host`` internally by the inventory plugin.
+
+Using regular expression in filters
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Let us assume you want filter VMs with specific IP range. You can use regular expression in ``filters`` in your inventory file.
+
+For example, if we want all RHEL7 and Ubuntu VMs that are poweredOn, you can use inventory file:
+
+.. code-block:: yaml
+
+ plugin: community.vmware.vmware_vm_inventory
+ strict: False
+ hostname: 10.65.223.31
+ username: administrator@vsphere.local
+ password: Esxi@123$%
+ validate_certs: False
+ with_tags: False
+ hostnames:
+ - 'config.name'
+ properties:
+ - 'config.name'
+ - 'config.guestId'
+ - 'guest.ipAddress'
+ - 'summary.runtime.powerState'
+ filters:
+ - guest.ipAddress is defined and guest.ipAddress is match('192.168.*')
+
+Here, we are using ``guest.ipAddress`` VM property. This property is optional and depended upon VMware tools installed on VMs.
+We are using ``match`` to validate the regular expression for the given IP range.
+
+Executing ``ansible-inventory --list -i <filename>.vmware.yml`` creates a list of the virtual machines that are ready to be configured using Ansible.
+
+What to expect
+--------------
+
+You will notice that the inventory hosts are filtered depending on your ``filters`` section.
+
+
+.. code-block:: yaml
+
+ {
+ "_meta": {
+ "hostvars": {
+ "template_001": {
+ "config.name": "template_001",
+ "config.guestId": "ubuntu64Guest",
+ ...
+ "guest.toolsStatus": "toolsNotInstalled",
+ "summary.runtime.powerState": "poweredOn",
+ },
+ "vm_8046": {
+ "config.name": "vm_8046",
+ "config.guestId": "rhel7_64Guest",
+ ...
+ "guest.toolsStatus": "toolsNotInstalled",
+ "summary.runtime.powerState": "poweredOn",
+ },
+ ...
+ }
+
+Troubleshooting filters
+-----------------------
+
+If the custom property specified in ``filters`` fails:
+
+- Check if the values provided for username and password are correct.
+- Make sure it is a valid property, see :ref:`vmware_inventory_vm_attributes`.
+- Use ``strict: True`` to get more information about the error.
+- Please make sure that you are using latest version of the VMware collection.
+
+
+.. seealso::
+
+ `pyVmomi <https://github.com/vmware/pyvmomi>`_
+ The GitHub Page of pyVmomi
+ `pyVmomi Issue Tracker <https://github.com/vmware/pyvmomi/issues>`_
+ The issue tracker for the pyVmomi project
+ `vSphere Automation SDK GitHub Page <https://github.com/vmware/vsphere-automation-sdk-python>`_
+ The GitHub Page of vSphere Automation SDK for Python
+ `vSphere Automation SDK Issue Tracker <https://github.com/vmware/vsphere-automation-sdk-python/issues>`_
+ The issue tracker for vSphere Automation SDK for Python
+ :ref:`vmware_inventory_vm_attributes`
+ Using Virtual machine attributes in VMware dynamic inventory plugin
+ :ref:`working_with_playbooks`
+ An introduction to playbooks
+ :ref:`playbooks_vault`
+ Using Vault in playbooks
diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_inventory_hostnames.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_inventory_hostnames.rst
new file mode 100644
index 00000000..9d284562
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_inventory_hostnames.rst
@@ -0,0 +1,128 @@
+.. _vmware_ansible_inventory_using_hostnames:
+
+*************************************************
+Using VMware dynamic inventory plugin - Hostnames
+*************************************************
+
+.. contents::
+ :local:
+
+VMware dynamic inventory plugin - customizing hostnames
+=======================================================
+
+
+VMware inventory plugin allows you to configure hostnames using the ``hostnames`` configuration parameter.
+
+In this scenario guide we will see how you configure hostnames from the given VMware guest in the inventory.
+
+Requirements
+------------
+
+To use the VMware dynamic inventory plugins, you must install `pyVmomi <https://github.com/vmware/pyvmomi>`_
+on your control node (the host running Ansible).
+
+To include tag-related information for the virtual machines in your dynamic inventory, you also need the `vSphere Automation SDK <https://code.vmware.com/web/sdk/65/vsphere-automation-python>`_, which supports REST API features such as tagging and content libraries, on your control node.
+You can install the ``vSphere Automation SDK`` following `these instructions <https://github.com/vmware/vsphere-automation-sdk-python#installing-required-python-packages>`_.
+
+.. code-block:: bash
+
+ $ pip install pyvmomi
+
+Starting in Ansible 2.10, the VMware dynamic inventory plugin is available in the ``community.vmware`` collection included Ansible.
+To install the latest ``community.vmware`` collection:
+
+.. code-block:: bash
+
+ $ ansible-galaxy collection install community.vmware
+
+To use this VMware dynamic inventory plugin:
+
+1. Enable it first by specifying the following in the ``ansible.cfg`` file:
+
+.. code-block:: ini
+
+ [inventory]
+ enable_plugins = community.vmware.vmware_vm_inventory
+
+2. Create a file that ends in ``vmware.yml`` or ``vmware.yaml`` in your working directory.
+
+The ``vmware_vm_inventory`` inventory plugin takes in the same authentication information as any other VMware modules does.
+
+Here's an example of a valid inventory file with custom hostname for the given VMware guest:
+
+.. code-block:: yaml
+
+ plugin: community.vmware.vmware_vm_inventory
+ strict: False
+ hostname: 10.65.223.31
+ username: administrator@vsphere.local
+ password: Esxi@123$%
+ validate_certs: False
+ with_tags: False
+ hostnames:
+ - config.name
+
+
+Here, we have configured a custom hostname by setting the ``hostnames`` parameter to ``config.name``. This will retrieve
+the ``config.name`` property from the virtual machine and populate it in the inventory.
+
+You can check all allowed properties for the given virtual machine at :ref:`vmware_inventory_vm_attributes`.
+
+Executing ``ansible-inventory --list -i <filename>.vmware.yml`` creates a list of the virtual machines that are ready to be configured using Ansible.
+
+What to expect
+--------------
+
+You will notice that instead of default behavior of representing the hostname as ``config.name + _ + config.uuid``,
+the inventory hosts show value as ``config.name``.
+
+
+.. code-block:: yaml
+
+ {
+ "_meta": {
+ "hostvars": {
+ "template_001": {
+ "config.name": "template_001",
+ "guest.toolsRunningStatus": "guestToolsNotRunning",
+ ...
+ "guest.toolsStatus": "toolsNotInstalled",
+ "name": "template_001"
+ },
+ "vm_8046": {
+ "config.name": "vm_8046",
+ "guest.toolsRunningStatus": "guestToolsNotRunning",
+ ...
+ "guest.toolsStatus": "toolsNotInstalled",
+ "name": "vm_8046"
+ },
+ ...
+ }
+
+Troubleshooting
+---------------
+
+If the custom property specified in ``hostnames`` fails:
+
+- Check if the values provided for username and password are correct.
+- Make sure it is a valid property, see :ref:`vmware_inventory_vm_attributes`.
+- Use ``strict: True`` to get more information about the error.
+- Please make sure that you are using latest version VMware collection.
+
+
+.. seealso::
+
+ `pyVmomi <https://github.com/vmware/pyvmomi>`_
+ The GitHub Page of pyVmomi
+ `pyVmomi Issue Tracker <https://github.com/vmware/pyvmomi/issues>`_
+ The issue tracker for the pyVmomi project
+ `vSphere Automation SDK GitHub Page <https://github.com/vmware/vsphere-automation-sdk-python>`_
+ The GitHub Page of vSphere Automation SDK for Python
+ `vSphere Automation SDK Issue Tracker <https://github.com/vmware/vsphere-automation-sdk-python/issues>`_
+ The issue tracker for vSphere Automation SDK for Python
+ :ref:`vmware_inventory_vm_attributes`
+ Using Virtual machine attributes in VMware dynamic inventory plugin
+ :ref:`working_with_playbooks`
+ An introduction to playbooks
+ :ref:`playbooks_vault`
+ Using Vault in playbooks
diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_inventory_vm_attributes.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_inventory_vm_attributes.rst
new file mode 100644
index 00000000..089c13d7
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_inventory_vm_attributes.rst
@@ -0,0 +1,1183 @@
+.. _vmware_inventory_vm_attributes:
+
+*******************************************************************
+Using Virtual machine attributes in VMware dynamic inventory plugin
+*******************************************************************
+
+.. contents:: Topics
+
+Virtual machine attributes
+==========================
+
+You can use virtual machine properties which can be used to populate ``hostvars`` for the given
+virtual machine in a VMware dynamic inventory plugin.
+
+capability
+----------
+
+This section describes settings for the runtime capabilities of the virtual machine.
+
+snapshotOperationsSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether or not a virtual machine supports snapshot operations.
+
+multipleSnapshotsSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether or not a virtual machine supports multiple snapshots.
+ This value is not set when the virtual machine is unavailable, for instance, when it is being created or deleted.
+
+snapshotConfigSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether or not a virtual machine supports snapshot config.
+
+poweredOffSnapshotsSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether or not a virtual machine supports snapshot operations in ``poweredOff`` state.
+
+memorySnapshotsSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether or not a virtual machine supports memory snapshots.
+
+revertToSnapshotSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether or not a virtual machine supports reverting to a snapshot.
+
+quiescedSnapshotsSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether or not a virtual machine supports quiesced snapshots.
+
+disableSnapshotsSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether or not snapshots can be disabled.
+
+lockSnapshotsSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether or not the snapshot tree can be locked.
+
+consolePreferencesSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether console preferences can be set for the virtual machine.
+
+cpuFeatureMaskSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether CPU feature requirements masks can be set for the virtual machine.
+
+s1AcpiManagementSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether or not a virtual machine supports ACPI S1 settings management.
+
+settingScreenResolutionSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether or not the virtual machine supports setting the screen resolution of the console window.
+
+toolsAutoUpdateSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Supports tools auto-update.
+
+vmNpivWwnSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Supports virtual machine NPIV WWN.
+
+npivWwnOnNonRdmVmSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Supports assigning NPIV WWN to virtual machines that do not have RDM disks.
+
+vmNpivWwnDisableSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether the NPIV disabling operation is supported on the virtual machine.
+
+vmNpivWwnUpdateSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether the update of NPIV WWNs are supported on the virtual machine.
+
+swapPlacementSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Flag indicating whether the virtual machine has a configurable (swapfile placement policy).
+
+toolsSyncTimeSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether asking tools to sync time with the host is supported.
+
+virtualMmuUsageSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether or not the use of nested page table hardware support can be explicitly set.
+
+diskSharesSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether resource settings for disks can be applied to the virtual machine.
+
+bootOptionsSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether boot options can be configured for the virtual machine.
+
+bootRetryOptionsSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether automatic boot retry can be configured for the virtual machine.
+
+settingVideoRamSizeSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Flag indicating whether the video RAM size of the virtual machine can be configured.
+
+settingDisplayTopologySupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether or not the virtual machine supports setting the display topology of the console window.
+
+recordReplaySupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether record and replay functionality is supported on the virtual machine.
+
+changeTrackingSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates that change tracking is supported for virtual disks of the virtual machine.
+ However, even if change tracking is supported, it might not be available for all disks of the virtual machine.
+ For example, passthru raw disk mappings or disks backed by any Ver1BackingInfo cannot be tracked.
+
+multipleCoresPerSocketSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether multiple virtual cores per socket is supported on the virtual machine.
+
+hostBasedReplicationSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates that host based replication is supported on the virtual machine.
+ However, even if host based replication is supported, it might not be available for all disk types.
+ For example, passthru raw disk mappings can not be replicated.
+
+guestAutoLockSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether or not guest autolock is supported on the virtual machine.
+
+memoryReservationLockSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether :ref:`memory_reservation_locked_to_max` may be set to true for the virtual machine.
+
+featureRequirementSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether the featureRequirement feature is supported.
+
+poweredOnMonitorTypeChangeSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether a monitor type change is supported while the virtual machine is in the ``poweredOn`` state.
+
+seSparseDiskSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether the virtual machine supports the Flex-SE (space-efficent, sparse) format for virtual disks.
+
+nestedHVSupported (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether the virtual machine supports nested hardware-assisted virtualization.
+
+vPMCSupported (bool)
+^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether the virtual machine supports virtualized CPU performance counters.
+
+
+config
+------
+
+This section describes the configuration settings of the virtual machine, including the name and UUID.
+This property is set when a virtual machine is created or when the ``reconfigVM`` method is called.
+The virtual machine configuration is not guaranteed to be available.
+For example, the configuration information would be unavailable if the server is unable to access the virtual machine files on disk, and is often also unavailable during the initial phases of virtual machine creation.
+
+changeVersion (str)
+^^^^^^^^^^^^^^^^^^^
+
+ The changeVersion is a unique identifier for a given version of the configuration.
+ Each change to the configuration updates this value. This is typically implemented as an ever increasing count or a time-stamp.
+ However, a client should always treat this as an opaque string.
+
+modified (datetime)
+^^^^^^^^^^^^^^^^^^^
+
+ Last time a virtual machine's configuration was modified.
+
+name (str)
+^^^^^^^^^^
+
+ Display name of the virtual machine. Any / (slash), \ (backslash), character used in this name element is escaped. Similarly, any % (percent) character used in this name element is escaped, unless it is used to start an escape sequence. A slash is escaped as %2F or %2f. A backslash is escaped as %5C or %5c, and a percent is escaped as %25.
+
+.. _guest_full_name:
+
+guestFullName (str)
+^^^^^^^^^^^^^^^^^^^
+
+ This is the full name of the guest operating system for the virtual machine. For example: Windows 2000 Professional. See :ref:`alternate_guest_name`.
+
+version (str)
+^^^^^^^^^^^^^
+
+ The version string for the virtual machine.
+
+uuid (str)
+^^^^^^^^^^
+
+ 128-bit SMBIOS UUID of a virtual machine represented as a hexadecimal string in "12345678-abcd-1234-cdef-123456789abc" format.
+
+instanceUuid (str, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ VirtualCenter-specific 128-bit UUID of a virtual machine, represented as a hexademical string. This identifier is used by VirtualCenter to uniquely identify all virtual machine instances, including those that may share the same SMBIOS UUID.
+
+npivNodeWorldWideName (long, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ A 64-bit node WWN (World Wide Name).
+
+npivPortWorldWideName (long, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ A 64-bit port WWN (World Wide Name).
+
+npivWorldWideNameType (str, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The source that provides/generates the assigned WWNs.
+
+npivDesiredNodeWwns (short, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The NPIV node WWNs to be extended from the original list of WWN numbers.
+
+npivDesiredPortWwns (short, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The NPIV port WWNs to be extended from the original list of WWN numbers.
+
+npivTemporaryDisabled (bool, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ This property is used to enable or disable the NPIV capability on a desired virtual machine on a temporary basis.
+
+npivOnNonRdmDisks (bool, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ This property is used to check whether the NPIV can be enabled on the Virtual machine with non-rdm disks in the configuration, so this is potentially not enabling npiv on vmfs disks.
+ Also this property is used to check whether RDM is required to generate WWNs for a virtual machine.
+
+locationId (str, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Hash incorporating the virtual machine's config file location and the UUID of the host assigned to run the virtual machine.
+
+template (bool)
+^^^^^^^^^^^^^^^
+
+ Flag indicating whether or not a virtual machine is a template.
+
+guestId (str)
+^^^^^^^^^^^^^
+
+ Guest operating system configured on a virtual machine.
+
+.. _alternate_guest_name:
+
+alternateGuestName (str)
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Used as display name for the operating system if guestId isotherorother-64. See :ref:`guest_full_name`.
+
+annotation (str, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Description for the virtual machine.
+
+files (vim.vm.FileInfo)
+^^^^^^^^^^^^^^^^^^^^^^^
+
+ Information about the files associated with a virtual machine.
+ This information does not include files for specific virtual disks or snapshots.
+
+tools (vim.vm.ToolsConfigInfo, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Configuration of VMware Tools running in the guest operating system.
+
+flags (vim.vm.FlagInfo)
+^^^^^^^^^^^^^^^^^^^^^^^
+
+ Additional flags for a virtual machine.
+
+consolePreferences (vim.vm.ConsolePreferences, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Legacy console viewer preferences when doing power operations.
+
+defaultPowerOps (vim.vm.DefaultPowerOpInfo)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Configuration of default power operations.
+
+hardware (vim.vm.VirtualHardware)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Processor, memory, and virtual devices for a virtual machine.
+
+cpuAllocation (vim.ResourceAllocationInfo, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Resource limits for CPU.
+
+memoryAllocation (vim.ResourceAllocationInfo, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Resource limits for memory.
+
+latencySensitivity (vim.LatencySensitivity, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The latency-sensitivity of the virtual machine.
+
+memoryHotAddEnabled (bool, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Whether memory can be added while the virtual machine is running.
+
+cpuHotAddEnabled (bool, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Whether virtual processors can be added while the virtual machine is running.
+
+cpuHotRemoveEnabled (bool, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Whether virtual processors can be removed while the virtual machine is running.
+
+hotPlugMemoryLimit (long, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The maximum amount of memory, in MB, than can be added to a running virtual machine.
+
+hotPlugMemoryIncrementSize (long, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Memory, in MB that can be added to a running virtual machine.
+
+cpuAffinity (vim.vm.AffinityInfo, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Affinity settings for CPU.
+
+memoryAffinity (vim.vm.AffinityInfo, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Affinity settings for memory.
+
+networkShaper (vim.vm.NetworkShaperInfo, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Resource limits for network.
+
+extraConfig (vim.option.OptionValue, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Additional configuration information for the virtual machine.
+
+cpuFeatureMask (vim.host.CpuIdInfo, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Specifies CPU feature compatibility masks that override the defaults from the ``GuestOsDescriptor`` of the virtual machine's guest OS.
+
+datastoreUrl (vim.vm.ConfigInfo.DatastoreUrlPair, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Enumerates the set of datastores that the virtual machine is stored on, as well as the URL identification for each of these.
+
+swapPlacement (str, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Virtual machine swapfile placement policy.
+
+bootOptions (vim.vm.BootOptions, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Configuration options for the boot behavior of the virtual machine.
+
+ftInfo (vim.vm.FaultToleranceConfigInfo, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Fault tolerance settings for the virtual machine.
+
+vAppConfig (vim.vApp.VmConfigInfo, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ vApp meta-data for the virtual machine.
+
+vAssertsEnabled (bool, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether user-configured virtual asserts will be triggered during virtual machine replay.
+
+changeTrackingEnabled (bool, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether changed block tracking for the virtual machine's disks is active.
+
+firmware (str, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Information about firmware type for the virtual machine.
+
+maxMksConnections (int, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates the maximum number of active remote display connections that the virtual machine will support.
+
+guestAutoLockEnabled (bool, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether the guest operating system will logout any active sessions whenever there are no remote display connections open to the virtual machine.
+
+managedBy (vim.ext.ManagedByInfo, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Specifies that the virtual machine is managed by a VC Extension.
+
+.. _memory_reservation_locked_to_max:
+
+memoryReservationLockedToMax (bool, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ If set true, memory resource reservation for the virtual machine will always be equal to the virtual machine's memory size; increases in memory size will be rejected when a corresponding reservation increase is not possible.
+
+initialOverhead (vim.vm.ConfigInfo.OverheadInfo), optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Set of values to be used only to perform admission control when determining if a host has sufficient resources for the virtual machine to power on.
+
+nestedHVEnabled (bool, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether the virtual machine is configured to use nested hardware-assisted virtualization.
+
+vPMCEnabled (bool, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether the virtual machine have virtual CPU performance counters enabled.
+
+scheduledHardwareUpgradeInfo (vim.vm.ScheduledHardwareUpgradeInfo, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Configuration of scheduled hardware upgrades and result from last attempt to run scheduled hardware upgrade.
+
+vFlashCacheReservation (long, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Specifies the total vFlash resource reservation for the vFlash caches associated with the virtual machine's virtual disks, in bytes.
+
+layout
+------
+
+Detailed information about the files that comprise the virtual machine.
+
+configFile (str, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ A list of files that makes up the configuration of the virtual machine (excluding the .vmx file, since that file is represented in the FileInfo).
+ These are relative paths from the configuration directory.
+ A slash is always used as a separator.
+ This list will typically include the NVRAM file, but could also include other meta-data files.
+
+logFile (str, optional)
+^^^^^^^^^^^^^^^^^^^^^^^
+
+ A list of files stored in the virtual machine's log directory.
+ These are relative paths from the ``logDirectory``.
+ A slash is always used as a separator.
+
+disk (vim.vm.FileLayout.DiskLayout, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Files making up each virtual disk.
+
+snapshot (vim.vm.FileLayout.SnapshotLayout, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Files of each snapshot.
+
+swapFile (str, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The swapfile specific to the virtual machine, if any. This is a complete datastore path, not a relative path.
+
+
+layoutEx
+--------
+
+Detailed information about the files that comprise the virtual machine.
+
+file (vim.vm.FileLayoutEx.FileInfo, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Information about all the files that constitute the virtual machine including configuration files, disks, swap file, suspend file, log files, core files, memory file and so on.
+
+disk (vim.vm.FileLayoutEx.DiskLayout, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Layout of each virtual disk attached to the virtual machine.
+ For a virtual machine with snaphots, this property gives only those disks that are attached to it at the current point of running.
+
+snapshot (vim.vm.FileLayoutEx.SnapshotLayout, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Layout of each snapshot of the virtual machine.
+
+timestamp (datetime)
+^^^^^^^^^^^^^^^^^^^^
+
+ Time when values in this structure were last updated.
+
+storage (vim.vm.StorageInfo)
+----------------------------
+
+Storage space used by the virtual machine, split by datastore.
+
+perDatastoreUsage (vim.vm.StorageInfo.UsageOnDatastore, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Storage space used by the virtual machine on all datastores that it is located on.
+ Total storage space committed to the virtual machine across all datastores is simply an aggregate of the property ``committed``
+
+timestamp (datetime)
+^^^^^^^^^^^^^^^^^^^^
+
+ Time when values in this structure were last updated.
+
+environmentBrowser (vim.EnvironmentBrowser)
+-------------------------------------------
+
+The current virtual machine's environment browser object.
+This contains information on all the configurations that can be used on the virtual machine.
+This is identical to the environment browser on the ComputeResource to which the virtual machine belongs.
+
+datastoreBrowser (vim.host.DatastoreBrowser)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ DatastoreBrowser to browse datastores that are available on this entity.
+
+resourcePool (vim.ResourcePool)
+-------------------------------
+
+The current resource pool that specifies resource allocation for the virtual machine.
+This property is set when a virtual machine is created or associated with a different resource pool.
+Returns null if the virtual machine is a template or the session has no access to the resource pool.
+
+summary (vim.ResourcePool.Summary)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Basic information about a resource pool.
+
+runtime (vim.ResourcePool.RuntimeInfo)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Runtime information about a resource pool.
+
+owner (vim.ComputeResource)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The ComputeResource to which this set of one or more nested resource pools belong.
+
+resourcePool (vim.ResourcePool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The set of child resource pools.
+
+vm (vim.VirtualMachine)
+^^^^^^^^^^^^^^^^^^^^^^^
+
+ The set of virtual machines associated with this resource pool.
+
+config (vim.ResourceConfigSpec)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Configuration of this resource pool.
+
+childConfiguration (vim.ResourceConfigSpec)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The resource configuration of all direct children (VirtualMachine and ResourcePool) of this resource group.
+
+parentVApp (vim.ManagedEntity)
+------------------------------
+
+Reference to the parent vApp.
+
+parent (vim.ManagedEntity)
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Parent of this entity.
+ This value is null for the root object and for (VirtualMachine) objects that are part of a (VirtualApp).
+
+customValue (vim.CustomFieldsManager.Value)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Custom field values.
+
+overallStatus (vim.ManagedEntity.Status)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ General health of this managed entity.
+
+configStatus (vim.ManagedEntity.Status)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The configStatus indicates whether or not the system has detected a configuration issue involving this entity.
+ For example, it might have detected a duplicate IP address or MAC address, or a host in a cluster might be out of ``compliance.property``.
+
+configIssue (vim.event.Event)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Current configuration issues that have been detected for this entity.
+
+effectiveRole (int)
+^^^^^^^^^^^^^^^^^^^
+
+ Access rights the current session has to this entity.
+
+permission (vim.AuthorizationManager.Permission)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ List of permissions defined for this entity.
+
+name (str)
+^^^^^^^^^^
+
+ Name of this entity, unique relative to its parent.
+ Any / (slash), \ (backslash), character used in this name element will be escaped.
+ Similarly, any % (percent) character used in this name element will be escaped, unless it is used to start an escape sequence.
+ A slash is escaped as %2F or %2f. A backslash is escaped as %5C or %5c, and a percent is escaped as %25.
+
+disabledMethod (str)
+^^^^^^^^^^^^^^^^^^^^
+
+ List of operations that are disabled, given the current runtime state of the entity.
+ For example, a power-on operation always fails if a virtual machine is already powered on.
+
+recentTask (vim.Task)
+^^^^^^^^^^^^^^^^^^^^^
+
+ The set of recent tasks operating on this managed entity.
+ A task in this list could be in one of the four states: pending, running, success or error.
+
+declaredAlarmState (vim.alarm.AlarmState)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ A set of alarm states for alarms that apply to this managed entity.
+
+triggeredAlarmState (vim.alarm.AlarmState)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ A set of alarm states for alarms triggered by this entity or by its descendants.
+
+alarmActionsEnabled (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Whether alarm actions are enabled for this entity. True if enabled; false otherwise.
+
+tag (vim.Tag)
+^^^^^^^^^^^^^
+
+ The set of tags associated with this managed entity. Experimental. Subject to change.
+
+resourceConfig (vim.ResourceConfigSpec)
+---------------------------------------
+
+ The resource configuration for a virtual machine.
+
+entity (vim.ManagedEntity, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Reference to the entity with this resource specification: either a VirtualMachine or a ResourcePool.
+
+changeVersion (str, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The changeVersion is a unique identifier for a given version of the configuration. Each change to the configuration will update this value.
+ This is typically implemented as an ever increasing count or a time-stamp.
+
+
+lastModified (datetime, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Timestamp when the resources were last modified. This is ignored when the object is used to update a configuration.
+
+cpuAllocation (vim.ResourceAllocationInfo)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Resource allocation for CPU.
+
+memoryAllocation (vim.ResourceAllocationInfo)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Resource allocation for memory.
+
+runtime (vim.vm.RuntimeInfo)
+----------------------------
+
+Execution state and history for the virtual machine.
+
+device (vim.vm.DeviceRuntimeInfo, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Per-device runtime info. This array will be empty if the host software does not provide runtime info for any of the device types currently in use by the virtual machine.
+
+host (vim.HostSystem, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The host that is responsible for running a virtual machine.
+ This property is null if the virtual machine is not running and is not assigned to run on a particular host.
+
+connectionState (vim.VirtualMachine.ConnectionState)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Indicates whether or not the virtual machine is available for management.
+
+powerState (vim.VirtualMachine.PowerState)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The current power state of the virtual machine.
+
+faultToleranceState (vim.VirtualMachine.FaultToleranceState)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The fault tolerance state of the virtual machine.
+
+dasVmProtection (vim.vm.RuntimeInfo.DasProtectionState, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The vSphere HA protection state for a virtual machine.
+ Property is unset if vSphere HA is not enabled.
+
+toolsInstallerMounted (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Flag to indicate whether or not the VMware Tools installer is mounted as a CD-ROM.
+
+suspendTime (datetime, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The timestamp when the virtual machine was most recently suspended.
+ This property is updated every time the virtual machine is suspended.
+
+bootTime (datetime, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The timestamp when the virtual machine was most recently powered on.
+ This property is updated when the virtual machine is powered on from the poweredOff state, and is cleared when the virtual machine is powered off.
+ This property is not updated when a virtual machine is resumed from a suspended state.
+
+suspendInterval (long, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The total time the virtual machine has been suspended since it was initially powered on.
+ This time excludes the current period, if the virtual machine is currently suspended.
+ This property is updated when the virtual machine resumes, and is reset to zero when the virtual machine is powered off.
+
+question (vim.vm.QuestionInfo, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The current question, if any, that is blocking the virtual machine's execution.
+
+memoryOverhead (long, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The amount of memory resource (in bytes) that will be used by the virtual machine above its guest memory requirements.
+ This value is set if and only if the virtual machine is registered on a host that supports memory resource allocation features.
+ For powered off VMs, this is the minimum overhead required to power on the VM on the registered host.
+ For powered on VMs, this is the current overhead reservation, a value which is almost always larger than the minimum overhead, and which grows with time.
+
+maxCpuUsage (int, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Current upper-bound on CPU usage.
+ The upper-bound is based on the host the virtual machine is current running on, as well as limits configured on the virtual machine itself or any parent resource pool.
+ Valid while the virtual machine is running.
+
+maxMemoryUsage (int, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Current upper-bound on memory usage.
+ The upper-bound is based on memory configuration of the virtual machine, as well as limits configured on the virtual machine itself or any parent resource pool.
+ Valid while the virtual machine is running.
+
+numMksConnections (int)
+^^^^^^^^^^^^^^^^^^^^^^^
+
+ Number of active MKS connections to the virtual machine.
+
+recordReplayState (vim.VirtualMachine.RecordReplayState)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Record / replay state of the virtual machine.
+
+cleanPowerOff (bool, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ For a powered off virtual machine, indicates whether the virtual machine's last shutdown was an orderly power off or not.
+ Unset if the virtual machine is running or suspended.
+
+needSecondaryReason (str, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ If set, indicates the reason the virtual machine needs a secondary.
+
+onlineStandby (bool)
+^^^^^^^^^^^^^^^^^^^^
+
+ This property indicates whether the guest has gone into one of the s1, s2 or s3 standby modes. False indicates the guest is awake.
+
+minRequiredEVCModeKey (str, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ For a powered-on or suspended virtual machine in a cluster with Enhanced VMotion Compatibility (EVC) enabled, this identifies the least-featured EVC mode (among those for the appropriate CPU vendor) that could admit the virtual machine.
+ This property will be unset if the virtual machine is powered off or is not in an EVC cluster.
+ This property may be used as a general indicator of the CPU feature baseline currently in use by the virtual machine.
+ However, the virtual machine may be suppressing some of the features present in the CPU feature baseline of the indicated mode, either explicitly (in the virtual machine's configured ``cpuFeatureMask``) or implicitly (in the default masks for the ``GuestOsDescriptor`` appropriate for the virtual machine's configured guest OS).
+
+consolidationNeeded (bool)
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Whether any disk of the virtual machine requires consolidation.
+ This can happen for example when a snapshot is deleted but its associated disk is not committed back to the base disk.
+
+offlineFeatureRequirement (vim.vm.FeatureRequirement, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ These requirements must have equivalent host capabilities ``featureCapability`` in order to power on.
+
+featureRequirement (vim.vm.FeatureRequirement, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ These requirements must have equivalent host capabilities ``featureCapability`` in order to power on, resume, or migrate to the host.
+
+featureMask (vim.host.FeatureMask, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The masks applied to an individual virtual machine as a result of its configuration.
+
+vFlashCacheAllocation (long, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Specifies the total allocated vFlash resource for the vFlash caches associated with VM's VMDKs when VM is powered on, in bytes.
+
+
+guest (vim.vm.GuestInfo)
+------------------------
+
+Information about VMware Tools and about the virtual machine from the perspective of VMware Tools.
+Information about the guest operating system is available in VirtualCenter.
+Guest operating system information reflects the last known state of the virtual machine.
+For powered on machines, this is current information.
+For powered off machines, this is the last recorded state before the virtual machine was powered off.
+
+toolsStatus (vim.vm.GuestInfo.ToolsStatus, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Current status of VMware Tools in the guest operating system, if known.
+
+toolsVersionStatus (str, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Current version status of VMware Tools in the guest operating system, if known.
+
+toolsVersionStatus2 (str, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Current version status of VMware Tools in the guest operating system, if known.
+
+toolsRunningStatus (str, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Current running status of VMware Tools in the guest operating system, if known.
+
+toolsVersion (str, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Current version of VMware Tools, if known.
+
+guestId (str, optional)
+^^^^^^^^^^^^^^^^^^^^^^^
+
+ Guest operating system identifier (short name), if known.
+
+guestFamily (str, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Guest operating system family, if known.
+
+guestFullName (str, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ See :ref:`guest_full_name`.
+
+hostName (str, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Hostname of the guest operating system, if known.
+
+ipAddress (str, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Primary IP address assigned to the guest operating system, if known.
+
+net (vim.vm.GuestInfo.NicInfo, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Guest information about network adapters, if known.
+
+ipStack (vim.vm.GuestInfo.StackInfo, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Guest information about IP networking stack, if known.
+
+disk (vim.vm.GuestInfo.DiskInfo, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Guest information about disks.
+ You can obtain Linux guest disk information for the following file system types only: Ext2, Ext3, Ext4, ReiserFS, ZFS, NTFS, VFAT, UFS, PCFS, HFS, and MS-DOS.
+
+screen (vim.vm.GuestInfo.ScreenInfo, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Guest screen resolution info, if known.
+
+guestState (str)
+^^^^^^^^^^^^^^^^
+
+ Operation mode of guest operating system.
+
+appHeartbeatStatus (str, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Application heartbeat status.
+
+appState (str, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Application state.
+ If vSphere HA is enabled and the vm is configured for Application Monitoring and this field's value is ``appStateNeedReset`` then HA will attempt immediately reset the virtual machine.
+ There are some system conditions which may delay the immediate reset.
+ The immediate reset will be performed as soon as allowed by vSphere HA and ESX.
+ If during these conditions the value is changed to ``appStateOk`` the reset will be cancelled.
+
+guestOperationsReady (bool, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Guest Operations availability. If true, the vitrual machine is ready to process guest operations.
+
+interactiveGuestOperationsReady (bool, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Interactive Guest Operations availability. If true, the virtual machine is ready to process guest operations as the user interacting with the guest desktop.
+
+generationInfo (vim.vm.GuestInfo.NamespaceGenerationInfo, privilege: VirtualMachine.Namespace.EventNotify, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ A list of namespaces and their corresponding generation numbers. Only namespaces with non-zero ``maxSizeEventsFromGuest`` are guaranteed to be present here.
+
+
+summary (vim.vm.Summary)
+------------------------
+
+ Basic information about the virtual machine.
+
+vm (vim.VirtualMachine, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Reference to the virtual machine managed object.
+
+runtime (vim.vm.RuntimeInfo)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Runtime and state information of a running virtual machine.
+ Most of this information is also available when a virtual machine is powered off.
+ In that case, it contains information from the last run, if available.
+
+guest (vim.vm.Summary.GuestSummary, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Guest operating system and VMware Tools information.
+
+config (vim.vm.Summary.ConfigSummary)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Basic configuration information about the virtual machine.
+ This information is not available when the virtual machine is unavailable, for instance, when it is being created or deleted.
+
+storage (vim.vm.Summary.StorageSummary, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Storage information of the virtual machine.
+
+quickStats (vim.vm.Summary.QuickStats)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ A set of statistics that are typically updated with near real-time regularity.
+
+overallStatus (vim.ManagedEntity.Status)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Overall alarm status on this node.
+
+customValue (vim.CustomFieldsManager.Value, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Custom field values.
+
+
+datastore (vim.Datastore)
+-------------------------
+
+ A collection of references to the subset of datastore objects in the datacenter that is used by the virtual machine.
+
+info (vim.Datastore.Info)
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Specific information about the datastore.
+
+summary (vim.Datastore.Summary)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Global properties of the datastore.
+
+host (vim.Datastore.HostMount)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Hosts attached to this datastore.
+
+vm (vim.VirtualMachine)
+^^^^^^^^^^^^^^^^^^^^^^^
+
+ Virtual machines stored on this datastore.
+
+browser (vim.host.DatastoreBrowser)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ DatastoreBrowser used to browse this datastore.
+
+capability (vim.Datastore.Capability)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Capabilities of this datastore.
+
+iormConfiguration (vim.StorageResourceManager.IORMConfigInfo)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Configuration of storage I/O resource management for the datastore.
+ Currently VMware only support storage I/O resource management on VMFS volumes of a datastore.
+ This configuration may not be available if the datastore is not accessible from any host, or if the datastore does not have VMFS volume.
+
+network (vim.Network)
+---------------------
+
+ A collection of references to the subset of network objects in the datacenter that is used by the virtual machine.
+
+name (str)
+^^^^^^^^^^
+
+ Name of this network.
+
+summary (vim.Network.Summary)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Properties of a network.
+
+host (vim.HostSystem)
+^^^^^^^^^^^^^^^^^^^^^
+
+ Hosts attached to this network.
+
+vm (vim.VirtualMachine)
+^^^^^^^^^^^^^^^^^^^^^^^
+
+ Virtual machines using this network.
+
+
+snapshot (vim.vm.SnapshotInfo)
+-------------------------------
+
+Current snapshot and tree.
+The property is valid if snapshots have been created for the virtual machine.
+
+currentSnapshot (vim.vm.Snapshot, optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Current snapshot of the virtual machineThis property is set by calling ``Snapshot.revert`` or ``VirtualMachine.createSnapshot``.
+ This property will be empty when the working snapshot is at the root of the snapshot tree.
+
+rootSnapshotList (vim.vm.SnapshotTree)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Data for the entire set of snapshots for one virtual machine.
+
+rootSnapshot (vim.vm.Snapshot)
+------------------------------
+
+The roots of all snapshot trees for the virtual machine.
+
+config (vim.vm.ConfigInfo)
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ Information about the configuration of the virtual machine when this snapshot was taken.
+ The datastore paths for the virtual machine disks point to the head of the disk chain that represents the disk at this given snapshot.
+
+childSnapshot (vim.vm.Snapshot)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ All snapshots for which this snapshot is the parent.
+
+guestHeartbeatStatus (vim.ManagedEntity.Status)
+-----------------------------------------------
+
+ The guest heartbeat.
+
+.. seealso::
+
+ `pyVmomi <https://github.com/vmware/pyvmomi>`_
+ The GitHub Page of pyVmomi
+ `pyVmomi Issue Tracker <https://github.com/vmware/pyvmomi/issues>`_
+ The issue tracker for the pyVmomi project
+ rst/scenario_guides/guide_vmware.rst
+ The GitHub Page of vSphere Automation SDK for Python
+ `vSphere Automation SDK Issue Tracker <https://github.com/vmware/vsphere-automation-sdk-python/issues>`_
+ The issue tracker for vSphere Automation SDK for Python
+ :ref:`working_with_playbooks`
+ An introduction to playbooks
+ :ref:`playbooks_vault`
+ Using Vault in playbooks
diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_module_reference.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_module_reference.rst
new file mode 100644
index 00000000..3c7de1dd
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_module_reference.rst
@@ -0,0 +1,9 @@
+:orphan:
+
+.. _vmware_ansible_module_index:
+
+***************************
+Ansible VMware Module Guide
+***************************
+
+This will be a listing similar to the module index in our core docs.
diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_requirements.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_requirements.rst
new file mode 100644
index 00000000..45e3ec8f
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_requirements.rst
@@ -0,0 +1,44 @@
+.. _vmware_requirements:
+
+********************
+VMware Prerequisites
+********************
+
+.. contents::
+ :local:
+
+Installing SSL Certificates
+===========================
+
+All vCenter and ESXi servers require SSL encryption on all connections to enforce secure communication. You must enable SSL encryption for Ansible by installing the server's SSL certificates on your Ansible control node or delegate node.
+
+If the SSL certificate of your vCenter or ESXi server is not correctly installed on your Ansible control node, you will see the following warning when using Ansible VMware modules:
+
+``Unable to connect to vCenter or ESXi API at xx.xx.xx.xx on TCP/443: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:777)``
+
+To install the SSL certificate for your VMware server, and run your Ansible VMware modules in encrypted mode, please follow the instructions for the server you are running with VMware.
+
+Installing vCenter SSL certificates for Ansible
+-----------------------------------------------
+
+* From any web browser, go to the base URL of the vCenter Server without port number like ``https://vcenter-domain.example.com``
+
+* Click the "Download trusted root CA certificates" link at the bottom of the grey box on the right and download the file.
+
+* Change the extension of the file to .zip. The file is a ZIP file of all root certificates and all CRLs.
+
+* Extract the contents of the zip file. The extracted directory contains a ``.certs`` directory that contains two types of files. Files with a number as the extension (.0, .1, and so on) are root certificates.
+
+* Install the certificate files are trusted certificates by the process that is appropriate for your operating system.
+
+
+Installing ESXi SSL certificates for Ansible
+--------------------------------------------
+
+* Enable SSH Service on ESXi either by using Ansible VMware module `vmware_host_service_manager <https://github.com/ansible-collections/vmware/blob/main/plugins/modules/vmware_host_config_manager.py>`_ or manually using vSphere Web interface.
+
+* SSH to ESXi server using administrative credentials, and navigate to directory ``/etc/vmware/ssl``
+
+* Secure copy (SCP) ``rui.crt`` located in ``/etc/vmware/ssl`` directory to Ansible control node.
+
+* Install the certificate file by the process that is appropriate for your operating system.
diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_scenarios.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_scenarios.rst
new file mode 100644
index 00000000..b044740b
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_scenarios.rst
@@ -0,0 +1,16 @@
+.. _vmware_scenarios:
+
+****************************
+Ansible for VMware Scenarios
+****************************
+
+These scenarios teach you how to accomplish common VMware tasks using Ansible. To get started, please select the task you want to accomplish.
+
+.. toctree::
+ :maxdepth: 1
+
+ scenario_clone_template
+ scenario_rename_vm
+ scenario_remove_vm
+ scenario_find_vm_folder
+ scenario_vmware_http
diff --git a/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_troubleshooting.rst b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_troubleshooting.rst
new file mode 100644
index 00000000..3ca5eac2
--- /dev/null
+++ b/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_troubleshooting.rst
@@ -0,0 +1,102 @@
+.. _vmware_troubleshooting:
+
+**********************************
+Troubleshooting Ansible for VMware
+**********************************
+
+.. contents:: Topics
+
+This section lists things that can go wrong and possible ways to fix them.
+
+Debugging Ansible for VMware
+============================
+
+When debugging or creating a new issue, you will need information about your VMware infrastructure. You can get this information using
+`govc <https://github.com/vmware/govmomi/tree/master/govc>`_, For example:
+
+
+.. code-block:: bash
+
+ $ export GOVC_USERNAME=ESXI_OR_VCENTER_USERNAME
+ $ export GOVC_PASSWORD=ESXI_OR_VCENTER_PASSWORD
+ $ export GOVC_URL=https://ESXI_OR_VCENTER_HOSTNAME:443
+ $ govc find /
+
+Known issues with Ansible for VMware
+====================================
+
+
+Network settings with vmware_guest in Ubuntu 18.04
+--------------------------------------------------
+
+Setting the network with ``vmware_guest`` in Ubuntu 18.04 is known to be broken, due to missing support for ``netplan`` in the ``open-vm-tools``.
+This issue is tracked via:
+
+* https://github.com/vmware/open-vm-tools/issues/240
+* https://github.com/ansible/ansible/issues/41133
+
+Potential Workarounds
+^^^^^^^^^^^^^^^^^^^^^
+
+There are several workarounds for this issue.
+
+1) Modify the Ubuntu 18.04 images and installing ``ifupdown`` in them via ``sudo apt install ifupdown``.
+ If so you need to remove ``netplan`` via ``sudo apt remove netplan.io`` and you need stop ``systemd-networkd`` via ``sudo systemctl disable systemctl-networkd``.
+
+2) Generate the ``systemd-networkd`` files with a task in your VMware Ansible role:
+
+.. code-block:: yaml
+
+ - name: make sure cache directory exists
+ file: path="{{ inventory_dir }}/cache" state=directory
+ delegate_to: localhost
+
+ - name: generate network templates
+ template: src=network.j2 dest="{{ inventory_dir }}/cache/{{ inventory_hostname }}.network"
+ delegate_to: localhost
+
+ - name: copy generated files to vm
+ vmware_guest_file_operation:
+ hostname: "{{ vmware_general.hostname }}"
+ username: "{{ vmware_username }}"
+ password: "{{ vmware_password }}"
+ datacenter: "{{ vmware_general.datacenter }}"
+ validate_certs: "{{ vmware_general.validate_certs }}"
+ vm_id: "{{ inventory_hostname }}"
+ vm_username: root
+ vm_password: "{{ template_password }}"
+ copy:
+ src: "{{ inventory_dir }}/cache/{{ inventory_hostname }}.network"
+ dest: "/etc/systemd/network/ens160.network"
+ overwrite: False
+ delegate_to: localhost
+
+ - name: restart systemd-networkd
+ vmware_vm_shell:
+ hostname: "{{ vmware_general.hostname }}"
+ username: "{{ vmware_username }}"
+ password: "{{ vmware_password }}"
+ datacenter: "{{ vmware_general.datacenter }}"
+ folder: /vm
+ vm_id: "{{ inventory_hostname}}"
+ vm_username: root
+ vm_password: "{{ template_password }}"
+ vm_shell: /bin/systemctl
+ vm_shell_args: " restart systemd-networkd"
+ delegate_to: localhost
+
+ - name: restart systemd-resolved
+ vmware_vm_shell:
+ hostname: "{{ vmware_general.hostname }}"
+ username: "{{ vmware_username }}"
+ password: "{{ vmware_password }}"
+ datacenter: "{{ vmware_general.datacenter }}"
+ folder: /vm
+ vm_id: "{{ inventory_hostname}}"
+ vm_username: root
+ vm_password: "{{ template_password }}"
+ vm_shell: /bin/systemctl
+ vm_shell_args: " restart systemd-resolved"
+ delegate_to: localhost
+
+3) Wait for ``netplan`` support in ``open-vm-tools``
diff --git a/docs/docsite/rst/shared_snippets/basic_concepts.txt b/docs/docsite/rst/shared_snippets/basic_concepts.txt
new file mode 100644
index 00000000..e10e2d4f
--- /dev/null
+++ b/docs/docsite/rst/shared_snippets/basic_concepts.txt
@@ -0,0 +1,34 @@
+Control node
+============
+
+Any machine with Ansible installed. You can run Ansible commands and playbooks by invoking the ``ansible`` or ``ansible-playbook`` command from any control node. You can use any computer that has a Python installation as a control node - laptops, shared desktops, and servers can all run Ansible. However, you cannot use a Windows machine as a control node. You can have multiple control nodes.
+
+Managed nodes
+=============
+
+The network devices (and/or servers) you manage with Ansible. Managed nodes are also sometimes called "hosts". Ansible is not installed on managed nodes.
+
+Inventory
+=========
+
+A list of managed nodes. An inventory file is also sometimes called a "hostfile". Your inventory can specify information like IP address for each managed node. An inventory can also organize managed nodes, creating and nesting groups for easier scaling. To learn more about inventory, see :ref:`the Working with Inventory<intro_inventory>` section.
+
+Collections
+===========
+
+Collections are a distribution format for Ansible content that can include playbooks, roles, modules, and plugins. You can install and use collections through `Ansible Galaxy <https://galaxy.ansible.com>`_. To learn more about collections, see :ref:`collections`.
+
+Modules
+=======
+
+The units of code Ansible executes. Each module has a particular use, from administering users on a specific type of database to managing VLAN interfaces on a specific type of network device. You can invoke a single module with a task, or invoke several different modules in a playbook. Starting in Ansible 2.10, modules are grouped in collections. For an idea of how many collections Ansible includes, take a look at the :ref:`list_of_collections`.
+
+Tasks
+=====
+
+The units of action in Ansible. You can execute a single task once with an ad-hoc command.
+
+Playbooks
+=========
+
+Ordered lists of tasks, saved so you can run those tasks in that order repeatedly. Playbooks can include variables as well as tasks. Playbooks are written in YAML and are easy to read, write, share and understand. To learn more about playbooks, see :ref:`about_playbooks`.
diff --git a/docs/docsite/rst/shared_snippets/download_tarball_collections.txt b/docs/docsite/rst/shared_snippets/download_tarball_collections.txt
new file mode 100644
index 00000000..045004be
--- /dev/null
+++ b/docs/docsite/rst/shared_snippets/download_tarball_collections.txt
@@ -0,0 +1,8 @@
+
+
+To download the collection tarball from Galaxy for offline use:
+
+#. Navigate to the collection page.
+#. Click on :guilabel:`Download tarball`.
+
+You may also need to manually download any dependent collections.
diff --git a/docs/docsite/rst/shared_snippets/galaxy_server_list.txt b/docs/docsite/rst/shared_snippets/galaxy_server_list.txt
new file mode 100644
index 00000000..0cbb7bbe
--- /dev/null
+++ b/docs/docsite/rst/shared_snippets/galaxy_server_list.txt
@@ -0,0 +1,80 @@
+
+
+By default, ``ansible-galaxy`` uses https://galaxy.ansible.com as the Galaxy server (as listed in the :file:`ansible.cfg` file under :ref:`galaxy_server`).
+
+You can use either option below to configure ``ansible-galaxy collection`` to use other servers (such as Red Hat Automation Hub or a custom Galaxy server):
+
+* Set the server list in the :ref:`galaxy_server_list` configuration option in :ref:`ansible_configuration_settings_locations`.
+* Use the ``--server`` command line argument to limit to an individual server.
+
+To configure a Galaxy server list in ``ansible.cfg``:
+
+
+#. Add the ``server_list`` option under the ``[galaxy]`` section to one or more server names.
+#. Create a new section for each server name.
+#. Set the ``url`` option for each server name.
+#. Optionally, set the API token for each server name. See :ref:`API token <collections_installing>` for details.
+
+.. note::
+ The ``url`` option for each server name must end with a forward slash ``/``. If you do not set the API token in your Galaxy server list, use the ``--api-key`` argument to pass in the token to the ``ansible-galaxy collection publish`` command.
+
+For Automation Hub, you additionally need to:
+
+#. Set the ``auth_url`` option for each server name.
+#. Set the API token for each server name. Go to https://cloud.redhat.com/ansible/automation-hub/token/ and click ::guilabel:`Get API token` from the version dropdown to copy your API token.
+
+The following example shows how to configure multiple servers:
+
+.. code-block:: ini
+
+ [galaxy]
+ server_list = automation_hub, my_org_hub, release_galaxy, test_galaxy
+
+ [galaxy_server.automation_hub]
+ url=https://cloud.redhat.com/api/automation-hub/
+ auth_url=https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token
+ token=my_ah_token
+
+ [galaxy_server.my_org_hub]
+ url=https://automation.my_org/
+ username=my_user
+ password=my_pass
+
+ [galaxy_server.release_galaxy]
+ url=https://galaxy.ansible.com/
+ token=my_token
+
+ [galaxy_server.test_galaxy]
+ url=https://galaxy-dev.ansible.com/
+ token=my_test_token
+
+.. note::
+ You can use the ``--server`` command line argument to select an explicit Galaxy server in the ``server_list`` and
+ the value of this argument should match the name of the server. To use a server not in the server list, set the value to the URL to access that server (all servers in the server list will be ignored). Also you cannot use the ``--api-key`` argument for any of the predefined servers. You can only use the ``api_key`` argument if you did not define a server list or if you specify a URL in the
+ ``--server`` argument.
+
+**Galaxy server list configuration options**
+
+The :ref:`galaxy_server_list` option is a list of server identifiers in a prioritized order. When searching for a
+collection, the install process will search in that order, for example, ``automation_hub`` first, then ``my_org_hub``, ``release_galaxy``, and
+finally ``test_galaxy`` until the collection is found. The actual Galaxy instance is then defined under the section
+``[galaxy_server.{{ id }}]`` where ``{{ id }}`` is the server identifier defined in the list. This section can then
+define the following keys:
+
+* ``url``: The URL of the Galaxy instance to connect to. Required.
+* ``token``: An API token key to use for authentication against the Galaxy instance. Mutually exclusive with ``username``.
+* ``username``: The username to use for basic authentication against the Galaxy instance. Mutually exclusive with ``token``.
+* ``password``: The password to use, in conjunction with ``username``, for basic authentication.
+* ``auth_url``: The URL of a Keycloak server 'token_endpoint' if using SSO authentication (for example, Automation Hub). Mutually exclusive with ``username``. Requires ``token``.
+
+As well as defining these server options in the ``ansible.cfg`` file, you can also define them as environment variables.
+The environment variable is in the form ``ANSIBLE_GALAXY_SERVER_{{ id }}_{{ key }}`` where ``{{ id }}`` is the upper
+case form of the server identifier and ``{{ key }}`` is the key to define. For example I can define ``token`` for
+``release_galaxy`` by setting ``ANSIBLE_GALAXY_SERVER_RELEASE_GALAXY_TOKEN=secret_token``.
+
+For operations that use only one Galaxy server (for example, the ``publish``, ``info``, or ``install`` commands). the ``ansible-galaxy collection`` command uses the first entry in the
+``server_list``, unless you pass in an explicit server with the ``--server`` argument.
+
+.. note::
+ Once a collection is found, any of its requirements are only searched within the same Galaxy instance as the parent
+ collection. The install process will not search for a collection requirement in a different Galaxy instance.
diff --git a/docs/docsite/rst/shared_snippets/installing_collections.txt b/docs/docsite/rst/shared_snippets/installing_collections.txt
new file mode 100644
index 00000000..16d14405
--- /dev/null
+++ b/docs/docsite/rst/shared_snippets/installing_collections.txt
@@ -0,0 +1,42 @@
+
+
+By default, ``ansible-galaxy collection install`` uses https://galaxy.ansible.com as the Galaxy server (as listed in the
+:file:`ansible.cfg` file under :ref:`galaxy_server`). You do not need any
+further configuration.
+
+See :ref:`Configuring the ansible-galaxy client <galaxy_server_config>` if you are using any other Galaxy server, such as Red Hat Automation Hub.
+
+To install a collection hosted in Galaxy:
+
+.. code-block:: bash
+
+ ansible-galaxy collection install my_namespace.my_collection
+
+You can also directly use the tarball from your build:
+
+.. code-block:: bash
+
+ ansible-galaxy collection install my_namespace-my_collection-1.0.0.tar.gz -p ./collections
+
+.. note::
+ The install command automatically appends the path ``ansible_collections`` to the one specified with the ``-p`` option unless the
+ parent directory is already in a folder called ``ansible_collections``.
+
+
+When using the ``-p`` option to specify the install path, use one of the values configured in :ref:`COLLECTIONS_PATHS`, as this is
+where Ansible itself will expect to find collections. If you don't specify a path, ``ansible-galaxy collection install`` installs
+the collection to the first path defined in :ref:`COLLECTIONS_PATHS`, which by default is ``~/.ansible/collections``
+
+You can also keep a collection adjacent to the current playbook, under a ``collections/ansible_collections/`` directory structure.
+
+.. code-block:: text
+
+ ./
+ ├── play.yml
+ ├── collections/
+ │ └── ansible_collections/
+ │ └── my_namespace/
+ │ └── my_collection/<collection structure lives here>
+
+
+See :ref:`collection_structure` for details on the collection directory structure.
diff --git a/docs/docsite/rst/shared_snippets/installing_collections_git_repo.txt b/docs/docsite/rst/shared_snippets/installing_collections_git_repo.txt
new file mode 100644
index 00000000..7eb87829
--- /dev/null
+++ b/docs/docsite/rst/shared_snippets/installing_collections_git_repo.txt
@@ -0,0 +1,84 @@
+You can install a collection in a git repository by providing the URI to the repository instead of a collection name or path to a ``tar.gz`` file. The collection must contain a ``galaxy.yml`` file, which will be used to generate the would-be collection artifact data from the directory. The URI should be prefixed with ``git+`` (or with ``git@`` to use a private repository with ssh authentication) and optionally supports a comma-separated `git commit-ish <https://git-scm.com/docs/gitglossary#def_commit-ish>`_ version (for example, a commit or tag).
+
+.. warning::
+
+ Embedding credentials into a git URI is not secure. Make sure to use safe auth options for security reasons. For example, use `SSH <https://help.github.com/en/github/authenticating-to-github/connecting-to-github-with-ssh>`_, `netrc <https://linux.die.net/man/5/netrc>`_ or `http.extraHeader <https://git-scm.com/docs/git-config#Documentation/git-config.txt-httpextraHeader>`_/`url.<base>.pushInsteadOf <https://git-scm.com/docs/git-config#Documentation/git-config.txt-urlltbasegtpushInsteadOf>`_ in Git config to prevent your creds from being exposed in logs.
+
+.. code-block:: bash
+
+ # Install a collection in a repository using the latest commit on the branch 'devel'
+ ansible-galaxy collection install git+https://github.com/organization/repo_name.git,devel
+
+ # Install a collection from a private github repository
+ ansible-galaxy collection install git@github.com:organization/repo_name.git
+
+ # Install a collection from a local git repository
+ ansible-galaxy collection install git+file:///home/user/path/to/repo/.git
+
+In a ``requirements.yml`` file, you can also use the ``type`` and ``version`` keys in addition to using the ``git+repo,version`` syntax for the collection name.
+
+.. code-block:: yaml
+
+ collections:
+ - name: https://github.com/organization/repo_name.git
+ type: git
+ version: devel
+
+Git repositories can be used for collection dependencies as well. This can be helpful for local development and testing but built/published artifacts should only have dependencies on other artifacts.
+
+.. code-block:: yaml
+
+ dependencies: {'git@github.com:organization/repo_name.git': 'devel'}
+
+Default repository search locations
+-----------------------------------
+
+There are two paths searched in a repository for collections by default.
+
+The first is the ``galaxy.yml`` file in the top level of the repository path. If the ``galaxy.yml`` file exists it's used as the collection metadata and the individual collection will be installed.
+
+.. code-block:: text
+
+ ├── galaxy.yml
+ ├── plugins/
+ │   ├── lookup/
+ │   ├── modules/
+ │   └── module_utils/
+ └─── README.md
+
+The second is a ``galaxy.yml`` file in each directory in the repository path (one level deep). In this scenario, each directory with a ``galaxy.yml`` is installed as a collection.
+
+.. code-block:: text
+
+ directory/
+ ├── docs/
+ ├── galaxy.yml
+ ├── plugins/
+ │   ├── inventory/
+ │   └── modules/
+ └── roles/
+
+Specifying the location to search for collections
+-------------------------------------------------
+
+If you have a different repository structure or only want to install a subset of collections, you can add a fragment to the end of your URI (before the optional comma-separated version) to indicate which path ansible-galaxy should inspect for ``galaxy.yml`` file(s). The path should be a directory to a collection or multiple collections (rather than the path to a ``galaxy.yml`` file).
+
+.. code-block:: text
+
+ namespace/
+ └── name/
+ ├── docs/
+ ├── galaxy.yml
+ ├── plugins/
+ │   ├── README.md
+ │   └── modules/
+ ├── README.md
+ └── roles/
+
+.. code-block:: bash
+
+ # Install all collections in a particular namespace
+ ansible-galaxy collection install git+https://github.com/organization/repo_name.git#/namespace/
+
+ # Install an individual collection using a specific commit
+ ansible-galaxy collection install git+https://github.com/organization/repo_name.git#/namespace/name/,7b60ddc245bc416b72d8ea6ed7b799885110f5e5
diff --git a/docs/docsite/rst/shared_snippets/installing_multiple_collections.txt b/docs/docsite/rst/shared_snippets/installing_multiple_collections.txt
new file mode 100644
index 00000000..e8c40b23
--- /dev/null
+++ b/docs/docsite/rst/shared_snippets/installing_multiple_collections.txt
@@ -0,0 +1,51 @@
+
+You can also setup a ``requirements.yml`` file to install multiple collections in one command. This file is a YAML file in the format:
+
+.. code-block:: yaml+jinja
+
+ ---
+ collections:
+ # With just the collection name
+ - my_namespace.my_collection
+
+ # With the collection name, version, and source options
+ - name: my_namespace.my_other_collection
+ version: 'version range identifiers (default: ``*``)'
+ source: 'The Galaxy URL to pull the collection from (default: ``--api-server`` from cmdline)'
+
+The supported keys for collection requirement entries are ``name``, ``version``, ``source``, and ``type``.
+
+The ``version`` key can take in the same range identifier format documented above. If you're installing a collection from a git repository instead of a built collection artifact, the ``version`` key refers to a `git commit-ish <https://git-scm.com/docs/gitglossary#def_commit-ish>`_.
+
+The ``type`` key can be set to ``galaxy``, ``url``, ``file``, and ``git``. If ``type`` is omitted, the ``name`` key is used to implicitly determine the source of the collection.
+
+Roles can also be specified and placed under the ``roles`` key. The values follow the same format as a requirements
+file used in older Ansible releases.
+
+.. code-block:: yaml
+
+ ---
+ roles:
+ # Install a role from Ansible Galaxy.
+ - name: geerlingguy.java
+ version: 1.9.6
+
+ collections:
+ # Install a collection from Ansible Galaxy.
+ - name: geerlingguy.php_roles
+ version: 0.9.3
+ source: https://galaxy.ansible.com
+
+To install both roles and collections at the same time with one command, run the following:
+
+.. code-block:: bash
+
+ $ ansible-galaxy install -r requirements.yml
+
+Running ``ansible-galaxy collection install -r`` or ``ansible-galaxy role install -r`` will only install collections,
+or roles respectively.
+
+.. note::
+ Installing both roles and collections from the same requirements file will not work when specifying a custom
+ collection or role install path. In this scenario the collections will be skipped and the command will process
+ each like ``ansible-galaxy role install`` would.
diff --git a/docs/docsite/rst/shared_snippets/installing_older_collection.txt b/docs/docsite/rst/shared_snippets/installing_older_collection.txt
new file mode 100644
index 00000000..511dd2a7
--- /dev/null
+++ b/docs/docsite/rst/shared_snippets/installing_older_collection.txt
@@ -0,0 +1,25 @@
+
+You can only have one version of a collection installed at a time. By default ``ansible-galaxy`` installs the latest available version. If you want to install a specific version, you can add a version range identifier. For example, to install the 1.0.0-beta.1 version of the collection:
+
+.. code-block:: bash
+
+ ansible-galaxy collection install my_namespace.my_collection:==1.0.0-beta.1
+
+You can specify multiple range identifiers separated by ``,``. Use single quotes so the shell passes the entire command, including ``>``, ``!``, and other operators, along. For example, to install the most recent version that is greater than or equal to 1.0.0 and less than 2.0.0:
+
+.. code-block:: bash
+
+ ansible-galaxy collection install 'my_namespace.my_collection:>=1.0.0,<2.0.0'
+
+Ansible will always install the most recent version that meets the range identifiers you specify. You can use the following range identifiers:
+
+* ``*``: The most recent version. This is the default.
+* ``!=``: Not equal to the version specified.
+* ``==``: Exactly the version specified.
+* ``>=``: Greater than or equal to the version specified.
+* ``>``: Greater than the version specified.
+* ``<=``: Less than or equal to the version specified.
+* ``<``: Less than the version specified.
+
+.. note::
+ By default ``ansible-galaxy`` ignores pre-release versions. To install a pre-release version, you must use the ``==`` range identifier to require it explicitly.
diff --git a/docs/docsite/rst/user_guide/basic_concepts.rst b/docs/docsite/rst/user_guide/basic_concepts.rst
new file mode 100644
index 00000000..76adc684
--- /dev/null
+++ b/docs/docsite/rst/user_guide/basic_concepts.rst
@@ -0,0 +1,12 @@
+.. _basic_concepts:
+
+****************
+Ansible concepts
+****************
+
+These concepts are common to all uses of Ansible. You need to understand them to use Ansible for any kind of automation. This basic introduction provides the background you need to follow the rest of the User Guide.
+
+.. contents::
+ :local:
+
+.. include:: /shared_snippets/basic_concepts.txt
diff --git a/docs/docsite/rst/user_guide/become.rst b/docs/docsite/rst/user_guide/become.rst
new file mode 100644
index 00000000..fed806bb
--- /dev/null
+++ b/docs/docsite/rst/user_guide/become.rst
@@ -0,0 +1,702 @@
+.. _become:
+
+******************************************
+Understanding privilege escalation: become
+******************************************
+
+Ansible uses existing privilege escalation systems to execute tasks with root privileges or with another user's permissions. Because this feature allows you to 'become' another user, different from the user that logged into the machine (remote user), we call it ``become``. The ``become`` keyword leverages existing privilege escalation tools like `sudo`, `su`, `pfexec`, `doas`, `pbrun`, `dzdo`, `ksu`, `runas`, `machinectl` and others.
+
+.. contents::
+ :local:
+
+Using become
+============
+
+You can control the use of ``become`` with play or task directives, connection variables, or at the command line. If you set privilege escalation properties in multiple ways, review the :ref:`general precedence rules<general_precedence_rules>` to understand which settings will be used.
+
+A full list of all become plugins that are included in Ansible can be found in the :ref:`become_plugin_list`.
+
+Become directives
+-----------------
+
+You can set the directives that control ``become`` at the play or task level. You can override these by setting connection variables, which often differ from one host to another. These variables and directives are independent. For example, setting ``become_user`` does not set ``become``.
+
+become
+ set to ``yes`` to activate privilege escalation.
+
+become_user
+ set to user with desired privileges — the user you `become`, NOT the user you login as. Does NOT imply ``become: yes``, to allow it to be set at host level. Default value is ``root``.
+
+become_method
+ (at play or task level) overrides the default method set in ansible.cfg, set to use any of the :ref:`become_plugins`.
+
+become_flags
+ (at play or task level) permit the use of specific flags for the tasks or role. One common use is to change the user to nobody when the shell is set to nologin. Added in Ansible 2.2.
+
+For example, to manage a system service (which requires ``root`` privileges) when connected as a non-``root`` user, you can use the default value of ``become_user`` (``root``):
+
+.. code-block:: yaml
+
+ - name: Ensure the httpd service is running
+ service:
+ name: httpd
+ state: started
+ become: yes
+
+To run a command as the ``apache`` user:
+
+.. code-block:: yaml
+
+ - name: Run a command as the apache user
+ command: somecommand
+ become: yes
+ become_user: apache
+
+To do something as the ``nobody`` user when the shell is nologin:
+
+.. code-block:: yaml
+
+ - name: Run a command as nobody
+ command: somecommand
+ become: yes
+ become_method: su
+ become_user: nobody
+ become_flags: '-s /bin/sh'
+
+To specify a password for sudo, run ``ansible-playbook`` with ``--ask-become-pass`` (``-K`` for short).
+If you run a playbook utilizing ``become`` and the playbook seems to hang, most likely it is stuck at the privilege escalation prompt. Stop it with `CTRL-c`, then execute the playbook with ``-K`` and the appropriate password.
+
+Become connection variables
+---------------------------
+
+You can define different ``become`` options for each managed node or group. You can define these variables in inventory or use them as normal variables.
+
+ansible_become
+ equivalent of the become directive, decides if privilege escalation is used or not.
+
+ansible_become_method
+ which privilege escalation method should be used
+
+ansible_become_user
+ set the user you become through privilege escalation; does not imply ``ansible_become: yes``
+
+ansible_become_password
+ set the privilege escalation password. See :ref:`playbooks_vault` for details on how to avoid having secrets in plain text
+
+For example, if you want to run all tasks as ``root`` on a server named ``webserver``, but you can only connect as the ``manager`` user, you could use an inventory entry like this:
+
+.. code-block:: text
+
+ webserver ansible_user=manager ansible_become=yes
+
+.. note::
+ The variables defined above are generic for all become plugins but plugin specific ones can also be set instead.
+ Please see the documentation for each plugin for a list of all options the plugin has and how they can be defined.
+ A full list of become plugins in Ansible can be found at :ref:`become_plugins`.
+
+Become command-line options
+---------------------------
+
+--ask-become-pass, -K
+ ask for privilege escalation password; does not imply become will be used. Note that this password will be used for all hosts.
+
+--become, -b
+ run operations with become (no password implied)
+
+--become-method=BECOME_METHOD
+ privilege escalation method to use (default=sudo),
+ valid choices: [ sudo | su | pbrun | pfexec | doas | dzdo | ksu | runas | machinectl ]
+
+--become-user=BECOME_USER
+ run operations as this user (default=root), does not imply --become/-b
+
+Risks and limitations of become
+===============================
+
+Although privilege escalation is mostly intuitive, there are a few limitations
+on how it works. Users should be aware of these to avoid surprises.
+
+Risks of becoming an unprivileged user
+--------------------------------------
+
+Ansible modules are executed on the remote machine by first substituting the
+parameters into the module file, then copying the file to the remote machine,
+and finally executing it there.
+
+Everything is fine if the module file is executed without using ``become``,
+when the ``become_user`` is root, or when the connection to the remote machine
+is made as root. In these cases Ansible creates the module file with permissions
+that only allow reading by the user and root, or only allow reading by the unprivileged
+user being switched to.
+
+However, when both the connection user and the ``become_user`` are unprivileged,
+the module file is written as the user that Ansible connects as, but the file needs to
+be readable by the user Ansible is set to ``become``. In this case, Ansible makes
+the module file world-readable for the duration of the Ansible module execution.
+Once the module is done executing, Ansible deletes the temporary file.
+
+If any of the parameters passed to the module are sensitive in nature, and you do
+not trust the client machines, then this is a potential danger.
+
+Ways to resolve this include:
+
+* Use `pipelining`. When pipelining is enabled, Ansible does not save the
+ module to a temporary file on the client. Instead it pipes the module to
+ the remote python interpreter's stdin. Pipelining does not work for
+ python modules involving file transfer (for example: :ref:`copy <copy_module>`,
+ :ref:`fetch <fetch_module>`, :ref:`template <template_module>`), or for non-python modules.
+
+* Install POSIX.1e filesystem acl support on the
+ managed host. If the temporary directory on the remote host is mounted with
+ POSIX acls enabled and the :command:`setfacl` tool is in the remote ``PATH``
+ then Ansible will use POSIX acls to share the module file with the second
+ unprivileged user instead of having to make the file readable by everyone.
+
+* Avoid becoming an unprivileged
+ user. Temporary files are protected by UNIX file permissions when you
+ ``become`` root or do not use ``become``. In Ansible 2.1 and above, UNIX
+ file permissions are also secure if you make the connection to the managed
+ machine as root and then use ``become`` to access an unprivileged account.
+
+.. warning:: Although the Solaris ZFS filesystem has filesystem ACLs, the ACLs
+ are not POSIX.1e filesystem acls (they are NFSv4 ACLs instead). Ansible
+ cannot use these ACLs to manage its temp file permissions so you may have
+ to resort to ``allow_world_readable_tmpfiles`` if the remote machines use ZFS.
+
+.. versionchanged:: 2.1
+
+Ansible makes it hard to unknowingly use ``become`` insecurely. Starting in Ansible 2.1,
+Ansible defaults to issuing an error if it cannot execute securely with ``become``.
+If you cannot use pipelining or POSIX ACLs, you must connect as an unprivileged user,
+you must use ``become`` to execute as a different unprivileged user,
+and you decide that your managed nodes are secure enough for the
+modules you want to run there to be world readable, you can turn on
+``allow_world_readable_tmpfiles`` in the :file:`ansible.cfg` file. Setting
+``allow_world_readable_tmpfiles`` will change this from an error into
+a warning and allow the task to run as it did prior to 2.1.
+
+Not supported by all connection plugins
+---------------------------------------
+
+Privilege escalation methods must also be supported by the connection plugin
+used. Most connection plugins will warn if they do not support become. Some
+will just ignore it as they always run as root (jail, chroot, and so on).
+
+Only one method may be enabled per host
+---------------------------------------
+
+Methods cannot be chained. You cannot use ``sudo /bin/su -`` to become a user,
+you need to have privileges to run the command as that user in sudo or be able
+to su directly to it (the same for pbrun, pfexec or other supported methods).
+
+Privilege escalation must be general
+------------------------------------
+
+You cannot limit privilege escalation permissions to certain commands.
+Ansible does not always
+use a specific command to do something but runs modules (code) from
+a temporary file name which changes every time. If you have '/sbin/service'
+or '/bin/chmod' as the allowed commands this will fail with ansible as those
+paths won't match with the temporary file that Ansible creates to run the
+module. If you have security rules that constrain your sudo/pbrun/doas environment
+to running specific command paths only, use Ansible from a special account that
+does not have this constraint, or use :ref:`ansible_tower` to manage indirect access to SSH credentials.
+
+May not access environment variables populated by pamd_systemd
+--------------------------------------------------------------
+
+For most Linux distributions using ``systemd`` as their init, the default
+methods used by ``become`` do not open a new "session", in the sense of
+systemd. Because the ``pam_systemd`` module will not fully initialize a new
+session, you might have surprises compared to a normal session opened through
+ssh: some environment variables set by ``pam_systemd``, most notably
+``XDG_RUNTIME_DIR``, are not populated for the new user and instead inherited
+or just emptied.
+
+This might cause trouble when trying to invoke systemd commands that depend on
+``XDG_RUNTIME_DIR`` to access the bus:
+
+.. code-block:: console
+
+ $ echo $XDG_RUNTIME_DIR
+
+ $ systemctl --user status
+ Failed to connect to bus: Permission denied
+
+To force ``become`` to open a new systemd session that goes through
+``pam_systemd``, you can use ``become_method: machinectl``.
+
+For more information, see `this systemd issue
+<https://github.com/systemd/systemd/issues/825#issuecomment-127917622>`_.
+
+.. _become_network:
+
+Become and network automation
+=============================
+
+As of version 2.6, Ansible supports ``become`` for privilege escalation (entering ``enable`` mode or privileged EXEC mode) on all Ansible-maintained network platforms that support ``enable`` mode. Using ``become`` replaces the ``authorize`` and ``auth_pass`` options in a ``provider`` dictionary.
+
+You must set the connection type to either ``connection: ansible.netcommon.network_cli`` or ``connection: ansible.netcommon.httpapi`` to use ``become`` for privilege escalation on network devices. Check the :ref:`platform_options` documentation for details.
+
+You can use escalated privileges on only the specific tasks that need them, on an entire play, or on all plays. Adding ``become: yes`` and ``become_method: enable`` instructs Ansible to enter ``enable`` mode before executing the task, play, or playbook where those parameters are set.
+
+If you see this error message, the task that generated it requires ``enable`` mode to succeed:
+
+.. code-block:: console
+
+ Invalid input (privileged mode required)
+
+To set ``enable`` mode for a specific task, add ``become`` at the task level:
+
+.. code-block:: yaml
+
+ - name: Gather facts (eos)
+ arista.eos.eos_facts:
+ gather_subset:
+ - "!hardware"
+ become: yes
+ become_method: enable
+
+To set enable mode for all tasks in a single play, add ``become`` at the play level:
+
+.. code-block:: yaml
+
+ - hosts: eos-switches
+ become: yes
+ become_method: enable
+ tasks:
+ - name: Gather facts (eos)
+ arista.eos.eos_facts:
+ gather_subset:
+ - "!hardware"
+
+Setting enable mode for all tasks
+---------------------------------
+
+Often you wish for all tasks in all plays to run using privilege mode, that is best achieved by using ``group_vars``:
+
+**group_vars/eos.yml**
+
+.. code-block:: yaml
+
+ ansible_connection: ansible.netcommon.network_cli
+ ansible_network_os: arista.eos.eos
+ ansible_user: myuser
+ ansible_become: yes
+ ansible_become_method: enable
+
+Passwords for enable mode
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If you need a password to enter ``enable`` mode, you can specify it in one of two ways:
+
+* providing the :option:`--ask-become-pass <ansible-playbook --ask-become-pass>` command line option
+* setting the ``ansible_become_password`` connection variable
+
+.. warning::
+
+ As a reminder passwords should never be stored in plain text. For information on encrypting your passwords and other secrets with Ansible Vault, see :ref:`vault`.
+
+authorize and auth_pass
+-----------------------
+
+Ansible still supports ``enable`` mode with ``connection: local`` for legacy network playbooks. To enter ``enable`` mode with ``connection: local``, use the module options ``authorize`` and ``auth_pass``:
+
+.. code-block:: yaml
+
+ - hosts: eos-switches
+ ansible_connection: local
+ tasks:
+ - name: Gather facts (eos)
+ eos_facts:
+ gather_subset:
+ - "!hardware"
+ provider:
+ authorize: yes
+ auth_pass: " {{ secret_auth_pass }}"
+
+We recommend updating your playbooks to use ``become`` for network-device ``enable`` mode consistently. The use of ``authorize`` and of ``provider`` dictionaries will be deprecated in future. Check the :ref:`platform_options` and :ref:`network_modules` documentation for details.
+
+.. _become_windows:
+
+Become and Windows
+==================
+
+Since Ansible 2.3, ``become`` can be used on Windows hosts through the
+``runas`` method. Become on Windows uses the same inventory setup and
+invocation arguments as ``become`` on a non-Windows host, so the setup and
+variable names are the same as what is defined in this document.
+
+While ``become`` can be used to assume the identity of another user, there are other uses for
+it with Windows hosts. One important use is to bypass some of the
+limitations that are imposed when running on WinRM, such as constrained network
+delegation or accessing forbidden system calls like the WUA API. You can use
+``become`` with the same user as ``ansible_user`` to bypass these limitations
+and run commands that are not normally accessible in a WinRM session.
+
+Administrative rights
+---------------------
+
+Many tasks in Windows require administrative privileges to complete. When using
+the ``runas`` become method, Ansible will attempt to run the module with the
+full privileges that are available to the remote user. If it fails to elevate
+the user token, it will continue to use the limited token during execution.
+
+A user must have the ``SeDebugPrivilege`` to run a become process with elevated
+privileges. This privilege is assigned to Administrators by default. If the
+debug privilege is not available, the become process will run with a limited
+set of privileges and groups.
+
+To determine the type of token that Ansible was able to get, run the following
+task:
+
+.. code-block:: yaml
+
+ - Check my user name
+ ansible.windows.win_whoami:
+ become: yes
+
+The output will look something similar to the below:
+
+.. code-block:: ansible-output
+
+ ok: [windows] => {
+ "account": {
+ "account_name": "vagrant-domain",
+ "domain_name": "DOMAIN",
+ "sid": "S-1-5-21-3088887838-4058132883-1884671576-1105",
+ "type": "User"
+ },
+ "authentication_package": "Kerberos",
+ "changed": false,
+ "dns_domain_name": "DOMAIN.LOCAL",
+ "groups": [
+ {
+ "account_name": "Administrators",
+ "attributes": [
+ "Mandatory",
+ "Enabled by default",
+ "Enabled",
+ "Owner"
+ ],
+ "domain_name": "BUILTIN",
+ "sid": "S-1-5-32-544",
+ "type": "Alias"
+ },
+ {
+ "account_name": "INTERACTIVE",
+ "attributes": [
+ "Mandatory",
+ "Enabled by default",
+ "Enabled"
+ ],
+ "domain_name": "NT AUTHORITY",
+ "sid": "S-1-5-4",
+ "type": "WellKnownGroup"
+ },
+ ],
+ "impersonation_level": "SecurityAnonymous",
+ "label": {
+ "account_name": "High Mandatory Level",
+ "domain_name": "Mandatory Label",
+ "sid": "S-1-16-12288",
+ "type": "Label"
+ },
+ "login_domain": "DOMAIN",
+ "login_time": "2018-11-18T20:35:01.9696884+00:00",
+ "logon_id": 114196830,
+ "logon_server": "DC01",
+ "logon_type": "Interactive",
+ "privileges": {
+ "SeBackupPrivilege": "disabled",
+ "SeChangeNotifyPrivilege": "enabled-by-default",
+ "SeCreateGlobalPrivilege": "enabled-by-default",
+ "SeCreatePagefilePrivilege": "disabled",
+ "SeCreateSymbolicLinkPrivilege": "disabled",
+ "SeDebugPrivilege": "enabled",
+ "SeDelegateSessionUserImpersonatePrivilege": "disabled",
+ "SeImpersonatePrivilege": "enabled-by-default",
+ "SeIncreaseBasePriorityPrivilege": "disabled",
+ "SeIncreaseQuotaPrivilege": "disabled",
+ "SeIncreaseWorkingSetPrivilege": "disabled",
+ "SeLoadDriverPrivilege": "disabled",
+ "SeManageVolumePrivilege": "disabled",
+ "SeProfileSingleProcessPrivilege": "disabled",
+ "SeRemoteShutdownPrivilege": "disabled",
+ "SeRestorePrivilege": "disabled",
+ "SeSecurityPrivilege": "disabled",
+ "SeShutdownPrivilege": "disabled",
+ "SeSystemEnvironmentPrivilege": "disabled",
+ "SeSystemProfilePrivilege": "disabled",
+ "SeSystemtimePrivilege": "disabled",
+ "SeTakeOwnershipPrivilege": "disabled",
+ "SeTimeZonePrivilege": "disabled",
+ "SeUndockPrivilege": "disabled"
+ },
+ "rights": [
+ "SeNetworkLogonRight",
+ "SeBatchLogonRight",
+ "SeInteractiveLogonRight",
+ "SeRemoteInteractiveLogonRight"
+ ],
+ "token_type": "TokenPrimary",
+ "upn": "vagrant-domain@DOMAIN.LOCAL",
+ "user_flags": []
+ }
+
+Under the ``label`` key, the ``account_name`` entry determines whether the user
+has Administrative rights. Here are the labels that can be returned and what
+they represent:
+
+* ``Medium``: Ansible failed to get an elevated token and ran under a limited
+ token. Only a subset of the privileges assigned to user are available during
+ the module execution and the user does not have administrative rights.
+
+* ``High``: An elevated token was used and all the privileges assigned to the
+ user are available during the module execution.
+
+* ``System``: The ``NT AUTHORITY\System`` account is used and has the highest
+ level of privileges available.
+
+The output will also show the list of privileges that have been granted to the
+user. When the privilege value is ``disabled``, the privilege is assigned to
+the logon token but has not been enabled. In most scenarios these privileges
+are automatically enabled when required.
+
+If running on a version of Ansible that is older than 2.5 or the normal
+``runas`` escalation process fails, an elevated token can be retrieved by:
+
+* Set the ``become_user`` to ``System`` which has full control over the
+ operating system.
+
+* Grant ``SeTcbPrivilege`` to the user Ansible connects with on
+ WinRM. ``SeTcbPrivilege`` is a high-level privilege that grants
+ full control over the operating system. No user is given this privilege by
+ default, and care should be taken if you grant this privilege to a user or group.
+ For more information on this privilege, please see
+ `Act as part of the operating system <https://docs.microsoft.com/en-us/previous-versions/windows/it-pro/windows-server-2012-R2-and-2012/dn221957(v=ws.11)>`_.
+ You can use the below task to set this privilege on a Windows host:
+
+ .. code-block:: yaml
+
+ - name: grant the ansible user the SeTcbPrivilege right
+ ansible.windows.win_user_right:
+ name: SeTcbPrivilege
+ users: '{{ansible_user}}'
+ action: add
+
+* Turn UAC off on the host and reboot before trying to become the user. UAC is
+ a security protocol that is designed to run accounts with the
+ ``least privilege`` principle. You can turn UAC off by running the following
+ tasks:
+
+ .. code-block:: yaml
+
+ - name: turn UAC off
+ win_regedit:
+ path: HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\policies\system
+ name: EnableLUA
+ data: 0
+ type: dword
+ state: present
+ register: uac_result
+
+ - name: reboot after disabling UAC
+ win_reboot:
+ when: uac_result is changed
+
+.. Note:: Granting the ``SeTcbPrivilege`` or turning UAC off can cause Windows
+ security vulnerabilities and care should be given if these steps are taken.
+
+Local service accounts
+----------------------
+
+Prior to Ansible version 2.5, ``become`` only worked on Windows with a local or domain
+user account. Local service accounts like ``System`` or ``NetworkService``
+could not be used as ``become_user`` in these older versions. This restriction
+has been lifted since the 2.5 release of Ansible. The three service accounts
+that can be set under ``become_user`` are:
+
+* System
+* NetworkService
+* LocalService
+
+Because local service accounts do not have passwords, the
+``ansible_become_password`` parameter is not required and is ignored if
+specified.
+
+Become without setting a password
+---------------------------------
+
+As of Ansible 2.8, ``become`` can be used to become a Windows local or domain account
+without requiring a password for that account. For this method to work, the
+following requirements must be met:
+
+* The connection user has the ``SeDebugPrivilege`` privilege assigned
+* The connection user is part of the ``BUILTIN\Administrators`` group
+* The ``become_user`` has either the ``SeBatchLogonRight`` or ``SeNetworkLogonRight`` user right
+
+Using become without a password is achieved in one of two different methods:
+
+* Duplicating an existing logon session's token if the account is already logged on
+* Using S4U to generate a logon token that is valid on the remote host only
+
+In the first scenario, the become process is spawned from another logon of that
+user account. This could be an existing RDP logon, console logon, but this is
+not guaranteed to occur all the time. This is similar to the
+``Run only when user is logged on`` option for a Scheduled Task.
+
+In the case where another logon of the become account does not exist, S4U is
+used to create a new logon and run the module through that. This is similar to
+the ``Run whether user is logged on or not`` with the ``Do not store password``
+option for a Scheduled Task. In this scenario, the become process will not be
+able to access any network resources like a normal WinRM process.
+
+To make a distinction between using become with no password and becoming an
+account that has no password make sure to keep ``ansible_become_password`` as
+undefined or set ``ansible_become_password:``.
+
+.. Note:: Because there are no guarantees an existing token will exist for a
+ user when Ansible runs, there's a high change the become process will only
+ have access to local resources. Use become with a password if the task needs
+ to access network resources
+
+Accounts without a password
+---------------------------
+
+.. Warning:: As a general security best practice, you should avoid allowing accounts without passwords.
+
+Ansible can be used to become a Windows account that does not have a password (like the
+``Guest`` account). To become an account without a password, set up the
+variables like normal but set ``ansible_become_password: ''``.
+
+Before become can work on an account like this, the local policy
+`Accounts: Limit local account use of blank passwords to console logon only <https://docs.microsoft.com/en-us/previous-versions/windows/it-pro/windows-server-2012-R2-and-2012/jj852174(v=ws.11)>`_
+must be disabled. This can either be done through a Group Policy Object (GPO)
+or with this Ansible task:
+
+.. code-block:: yaml
+
+ - name: allow blank password on become
+ ansible.windows.win_regedit:
+ path: HKLM:\SYSTEM\CurrentControlSet\Control\Lsa
+ name: LimitBlankPasswordUse
+ data: 0
+ type: dword
+ state: present
+
+.. Note:: This is only for accounts that do not have a password. You still need
+ to set the account's password under ``ansible_become_password`` if the
+ become_user has a password.
+
+Become flags for Windows
+------------------------
+
+Ansible 2.5 added the ``become_flags`` parameter to the ``runas`` become method.
+This parameter can be set using the ``become_flags`` task directive or set in
+Ansible's configuration using ``ansible_become_flags``. The two valid values
+that are initially supported for this parameter are ``logon_type`` and
+``logon_flags``.
+
+.. Note:: These flags should only be set when becoming a normal user account, not a local service account like LocalSystem.
+
+The key ``logon_type`` sets the type of logon operation to perform. The value
+can be set to one of the following:
+
+* ``interactive``: The default logon type. The process will be run under a
+ context that is the same as when running a process locally. This bypasses all
+ WinRM restrictions and is the recommended method to use.
+
+* ``batch``: Runs the process under a batch context that is similar to a
+ scheduled task with a password set. This should bypass most WinRM
+ restrictions and is useful if the ``become_user`` is not allowed to log on
+ interactively.
+
+* ``new_credentials``: Runs under the same credentials as the calling user, but
+ outbound connections are run under the context of the ``become_user`` and
+ ``become_password``, similar to ``runas.exe /netonly``. The ``logon_flags``
+ flag should also be set to ``netcredentials_only``. Use this flag if
+ the process needs to access a network resource (like an SMB share) using a
+ different set of credentials.
+
+* ``network``: Runs the process under a network context without any cached
+ credentials. This results in the same type of logon session as running a
+ normal WinRM process without credential delegation, and operates under the same
+ restrictions.
+
+* ``network_cleartext``: Like the ``network`` logon type, but instead caches
+ the credentials so it can access network resources. This is the same type of
+ logon session as running a normal WinRM process with credential delegation.
+
+For more information, see
+`dwLogonType <https://docs.microsoft.com/en-gb/windows/desktop/api/winbase/nf-winbase-logonusera>`_.
+
+The ``logon_flags`` key specifies how Windows will log the user on when creating
+the new process. The value can be set to none or multiple of the following:
+
+* ``with_profile``: The default logon flag set. The process will load the
+ user's profile in the ``HKEY_USERS`` registry key to ``HKEY_CURRENT_USER``.
+
+* ``netcredentials_only``: The process will use the same token as the caller
+ but will use the ``become_user`` and ``become_password`` when accessing a remote
+ resource. This is useful in inter-domain scenarios where there is no trust
+ relationship, and should be used with the ``new_credentials`` ``logon_type``.
+
+By default ``logon_flags=with_profile`` is set, if the profile should not be
+loaded set ``logon_flags=`` or if the profile should be loaded with
+``netcredentials_only``, set ``logon_flags=with_profile,netcredentials_only``.
+
+For more information, see `dwLogonFlags <https://docs.microsoft.com/en-gb/windows/desktop/api/winbase/nf-winbase-createprocesswithtokenw>`_.
+
+Here are some examples of how to use ``become_flags`` with Windows tasks:
+
+.. code-block:: yaml
+
+ - name: copy a file from a fileshare with custom credentials
+ ansible.windows.win_copy:
+ src: \\server\share\data\file.txt
+ dest: C:\temp\file.txt
+ remote_src: yes
+ vars:
+ ansible_become: yes
+ ansible_become_method: runas
+ ansible_become_user: DOMAIN\user
+ ansible_become_password: Password01
+ ansible_become_flags: logon_type=new_credentials logon_flags=netcredentials_only
+
+ - name: run a command under a batch logon
+ ansible.windows.win_whoami:
+ become: yes
+ become_flags: logon_type=batch
+
+ - name: run a command and not load the user profile
+ ansible.windows.win_whomai:
+ become: yes
+ become_flags: logon_flags=
+
+
+Limitations of become on Windows
+--------------------------------
+
+* Running a task with ``async`` and ``become`` on Windows Server 2008, 2008 R2
+ and Windows 7 only works when using Ansible 2.7 or newer.
+
+* By default, the become user logs on with an interactive session, so it must
+ have the right to do so on the Windows host. If it does not inherit the
+ ``SeAllowLogOnLocally`` privilege or inherits the ``SeDenyLogOnLocally``
+ privilege, the become process will fail. Either add the privilege or set the
+ ``logon_type`` flag to change the logon type used.
+
+* Prior to Ansible version 2.3, become only worked when
+ ``ansible_winrm_transport`` was either ``basic`` or ``credssp``. This
+ restriction has been lifted since the 2.4 release of Ansible for all hosts
+ except Windows Server 2008 (non R2 version).
+
+* The Secondary Logon service ``seclogon`` must be running to use ``ansible_become_method: runas``
+
+.. seealso::
+
+ `Mailing List <https://groups.google.com/forum/#!forum/ansible-project>`_
+ Questions? Help? Ideas? Stop by the list on Google Groups
+ `webchat.freenode.net <https://webchat.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/collections_using.rst b/docs/docsite/rst/user_guide/collections_using.rst
new file mode 100644
index 00000000..a9530a9e
--- /dev/null
+++ b/docs/docsite/rst/user_guide/collections_using.rst
@@ -0,0 +1,324 @@
+
+.. _collections:
+
+*****************
+Using collections
+*****************
+
+Collections are a distribution format for Ansible content that can include playbooks, roles, modules, and plugins. As modules move from the core Ansible repository into collections, the module documentation will move to the :ref:`collections pages <list_of_collections>`.
+
+You can install and use collections through `Ansible Galaxy <https://galaxy.ansible.com>`_.
+
+* For details on how to *develop* collections see :ref:`developing_collections`.
+* For the current development status of Collections and FAQ see `Ansible Collections Community Guide <https://github.com/ansible-collections/overview/blob/main/README.rst>`_.
+
+.. contents::
+ :local:
+ :depth: 2
+
+.. _collections_installing:
+
+Installing collections
+======================
+
+
+Installing collections with ``ansible-galaxy``
+----------------------------------------------
+
+.. include:: ../shared_snippets/installing_collections.txt
+
+.. _collections_older_version:
+
+Installing an older version of a collection
+-------------------------------------------
+
+.. include:: ../shared_snippets/installing_older_collection.txt
+
+Installing a collection from a git repository
+---------------------------------------------
+
+.. include:: ../shared_snippets/installing_collections_git_repo.txt
+
+.. _collection_requirements_file:
+
+Install multiple collections with a requirements file
+-----------------------------------------------------
+
+.. include:: ../shared_snippets/installing_multiple_collections.txt
+
+.. _collection_offline_download:
+
+Downloading a collection for offline use
+-----------------------------------------
+
+.. include:: ../shared_snippets/download_tarball_collections.txt
+
+
+.. _galaxy_server_config:
+
+Configuring the ``ansible-galaxy`` client
+------------------------------------------
+
+.. include:: ../shared_snippets/galaxy_server_list.txt
+
+.. _collections_downloading:
+
+Downloading collections
+=======================
+
+To download a collection and its dependencies for an offline install, run ``ansible-galaxy collection download``. This
+downloads the collections specified and their dependencies to the specified folder and creates a ``requirements.yml``
+file which can be used to install those collections on a host without access to a Galaxy server. All the collections
+are downloaded by default to the ``./collections`` folder.
+
+Just like the ``install`` command, the collections are sourced based on the
+:ref:`configured galaxy server config <galaxy_server_config>`. Even if a collection to download was specified by a URL
+or path to a tarball, the collection will be redownloaded from the configured Galaxy server.
+
+Collections can be specified as one or multiple collections or with a ``requirements.yml`` file just like
+``ansible-galaxy collection install``.
+
+To download a single collection and its dependencies:
+
+.. code-block:: bash
+
+ ansible-galaxy collection download my_namespace.my_collection
+
+To download a single collection at a specific version:
+
+.. code-block:: bash
+
+ ansible-galaxy collection download my_namespace.my_collection:1.0.0
+
+To download multiple collections either specify multiple collections as command line arguments as shown above or use a
+requirements file in the format documented with :ref:`collection_requirements_file`.
+
+.. code-block:: bash
+
+ ansible-galaxy collection download -r requirements.yml
+
+All the collections are downloaded by default to the ``./collections`` folder but you can use ``-p`` or
+``--download-path`` to specify another path:
+
+.. code-block:: bash
+
+ ansible-galaxy collection download my_namespace.my_collection -p ~/offline-collections
+
+Once you have downloaded the collections, the folder contains the collections specified, their dependencies, and a
+``requirements.yml`` file. You can use this folder as is with ``ansible-galaxy collection install`` to install the
+collections on a host without access to a Galaxy or Automation Hub server.
+
+.. code-block:: bash
+
+ # This must be run from the folder that contains the offline collections and requirements.yml file downloaded
+ # by the internet-connected host
+ cd ~/offline-collections
+ ansible-galaxy collection install -r requirements.yml
+
+.. _collections_listing:
+
+Listing collections
+===================
+
+To list installed collections, run ``ansible-galaxy collection list``. This shows all of the installed collections found in the configured collections search paths. It will also show collections under development which contain a galaxy.yml file instead of a MANIFEST.json. The path where the collections are located are displayed as well as version information. If no version information is available, a ``*`` is displayed for the version number.
+
+.. code-block:: shell
+
+ # /home/astark/.ansible/collections/ansible_collections
+ Collection Version
+ -------------------------- -------
+ cisco.aci 0.0.5
+ cisco.mso 0.0.4
+ sandwiches.ham *
+ splunk.es 0.0.5
+
+ # /usr/share/ansible/collections/ansible_collections
+ Collection Version
+ ----------------- -------
+ fortinet.fortios 1.0.6
+ pureport.pureport 0.0.8
+ sensu.sensu_go 1.3.0
+
+Run with ``-vvv`` to display more detailed information.
+
+To list a specific collection, pass a valid fully qualified collection name (FQCN) to the command ``ansible-galaxy collection list``. All instances of the collection will be listed.
+
+.. code-block:: shell
+
+ > ansible-galaxy collection list fortinet.fortios
+
+ # /home/astark/.ansible/collections/ansible_collections
+ Collection Version
+ ---------------- -------
+ fortinet.fortios 1.0.1
+
+ # /usr/share/ansible/collections/ansible_collections
+ Collection Version
+ ---------------- -------
+ fortinet.fortios 1.0.6
+
+To search other paths for collections, use the ``-p`` option. Specify multiple search paths by separating them with a ``:``. The list of paths specified on the command line will be added to the beginning of the configured collections search paths.
+
+.. code-block:: shell
+
+ > ansible-galaxy collection list -p '/opt/ansible/collections:/etc/ansible/collections'
+
+ # /opt/ansible/collections/ansible_collections
+ Collection Version
+ --------------- -------
+ sandwiches.club 1.7.2
+
+ # /etc/ansible/collections/ansible_collections
+ Collection Version
+ -------------- -------
+ sandwiches.pbj 1.2.0
+
+ # /home/astark/.ansible/collections/ansible_collections
+ Collection Version
+ -------------------------- -------
+ cisco.aci 0.0.5
+ cisco.mso 0.0.4
+ fortinet.fortios 1.0.1
+ sandwiches.ham *
+ splunk.es 0.0.5
+
+ # /usr/share/ansible/collections/ansible_collections
+ Collection Version
+ ----------------- -------
+ fortinet.fortios 1.0.6
+ pureport.pureport 0.0.8
+ sensu.sensu_go 1.3.0
+
+
+.. _using_collections:
+
+Verifying collections
+=====================
+
+Verifying collections with ``ansible-galaxy``
+---------------------------------------------
+
+Once installed, you can verify that the content of the installed collection matches the content of the collection on the server. This feature expects that the collection is installed in one of the configured collection paths and that the collection exists on one of the configured galaxy servers.
+
+.. code-block:: bash
+
+ ansible-galaxy collection verify my_namespace.my_collection
+
+The output of the ``ansible-galaxy collection verify`` command is quiet if it is successful. If a collection has been modified, the altered files are listed under the collection name.
+
+.. code-block:: bash
+
+ ansible-galaxy collection verify my_namespace.my_collection
+ Collection my_namespace.my_collection contains modified content in the following files:
+ my_namespace.my_collection
+ plugins/inventory/my_inventory.py
+ plugins/modules/my_module.py
+
+You can use the ``-vvv`` flag to display additional information, such as the version and path of the installed collection, the URL of the remote collection used for validation, and successful verification output.
+
+.. code-block:: bash
+
+ ansible-galaxy collection verify my_namespace.my_collection -vvv
+ ...
+ Verifying 'my_namespace.my_collection:1.0.0'.
+ Installed collection found at '/path/to/ansible_collections/my_namespace/my_collection/'
+ Remote collection found at 'https://galaxy.ansible.com/download/my_namespace-my_collection-1.0.0.tar.gz'
+ Successfully verified that checksums for 'my_namespace.my_collection:1.0.0' match the remote collection
+
+If you have a pre-release or non-latest version of a collection installed you should include the specific version to verify. If the version is omitted, the installed collection is verified against the latest version available on the server.
+
+.. code-block:: bash
+
+ ansible-galaxy collection verify my_namespace.my_collection:1.0.0
+
+In addition to the ``namespace.collection_name:version`` format, you can provide the collections to verify in a ``requirements.yml`` file. Dependencies listed in ``requirements.yml`` are not included in the verify process and should be verified separately.
+
+.. code-block:: bash
+
+ ansible-galaxy collection verify -r requirements.yml
+
+Verifying against ``tar.gz`` files is not supported. If your ``requirements.yml`` contains paths to tar files or URLs for installation, you can use the ``--ignore-errors`` flag to ensure that all collections using the ``namespace.name`` format in the file are processed.
+
+.. _collections_using_playbook:
+
+Using collections in a Playbook
+===============================
+
+Once installed, you can reference a collection content by its fully qualified collection name (FQCN):
+
+.. code-block:: yaml
+
+ - hosts: all
+ tasks:
+ - my_namespace.my_collection.mymodule:
+ option1: value
+
+This works for roles or any type of plugin distributed within the collection:
+
+.. code-block:: yaml
+
+ - hosts: all
+ tasks:
+ - import_role:
+ name: my_namespace.my_collection.role1
+
+ - my_namespace.mycollection.mymodule:
+ option1: value
+
+ - debug:
+ msg: '{{ lookup("my_namespace.my_collection.lookup1", 'param1')| my_namespace.my_collection.filter1 }}'
+
+Simplifying module names with the ``collections`` keyword
+=========================================================
+
+The ``collections`` keyword lets you define a list of collections that your role or playbook should search for unqualified module and action names. So you can use the ``collections`` keyword, then simply refer to modules and action plugins by their short-form names throughout that role or playbook.
+
+.. warning::
+ If your playbook uses both the ``collections`` keyword and one or more roles, the roles do not inherit the collections set by the playbook. See below for details.
+
+Using ``collections`` in roles
+------------------------------
+
+Within a role, you can control which collections Ansible searches for the tasks inside the role using the ``collections`` keyword in the role's ``meta/main.yml``. Ansible will use the collections list defined inside the role even if the playbook that calls the role defines different collections in a separate ``collections`` keyword entry. Roles defined inside a collection always implicitly search their own collection first, so you don't need to use the ``collections`` keyword to access modules, actions, or other roles contained in the same collection.
+
+.. code-block:: yaml
+
+ # myrole/meta/main.yml
+ collections:
+ - my_namespace.first_collection
+ - my_namespace.second_collection
+ - other_namespace.other_collection
+
+Using ``collections`` in playbooks
+----------------------------------
+
+In a playbook, you can control the collections Ansible searches for modules and action plugins to execute. However, any roles you call in your playbook define their own collections search order; they do not inherit the calling playbook's settings. This is true even if the role does not define its own ``collections`` keyword.
+
+.. code-block:: yaml
+
+ - hosts: all
+ collections:
+ - my_namespace.my_collection
+
+ tasks:
+ - import_role:
+ name: role1
+
+ - mymodule:
+ option1: value
+
+ - debug:
+ msg: '{{ lookup("my_namespace.my_collection.lookup1", 'param1')| my_namespace.my_collection.filter1 }}'
+
+The ``collections`` keyword merely creates an ordered 'search path' for non-namespaced plugin and role references. It does not install content or otherwise change Ansible's behavior around the loading of plugins or roles. Note that an FQCN is still required for non-action or module plugins (for example, lookups, filters, tests).
+
+.. seealso::
+
+ :ref:`developing_collections`
+ Develop or modify a collection.
+ :ref:`collections_galaxy_meta`
+ Understand the collections metadata structure.
+ `Mailing List <https://groups.google.com/group/ansible-devel>`_
+ The development mailing list
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/command_line_tools.rst b/docs/docsite/rst/user_guide/command_line_tools.rst
new file mode 100644
index 00000000..56561b59
--- /dev/null
+++ b/docs/docsite/rst/user_guide/command_line_tools.rst
@@ -0,0 +1,20 @@
+.. _command_line_tools:
+
+Working with command line tools
+===============================
+
+Most users are familiar with `ansible` and `ansible-playbook`, but those are not the only utilities Ansible provides.
+Below is a complete list of Ansible utilities. Each page contains a description of the utility and a listing of supported parameters.
+
+.. toctree::
+ :maxdepth: 1
+
+ ../cli/ansible.rst
+ ../cli/ansible-config.rst
+ ../cli/ansible-console.rst
+ ../cli/ansible-doc.rst
+ ../cli/ansible-galaxy.rst
+ ../cli/ansible-inventory.rst
+ ../cli/ansible-playbook.rst
+ ../cli/ansible-pull.rst
+ ../cli/ansible-vault.rst
diff --git a/docs/docsite/rst/user_guide/complex_data_manipulation.rst b/docs/docsite/rst/user_guide/complex_data_manipulation.rst
new file mode 100644
index 00000000..5aa230ad
--- /dev/null
+++ b/docs/docsite/rst/user_guide/complex_data_manipulation.rst
@@ -0,0 +1,243 @@
+.. _complex_data_manipulation:
+
+Data manipulation
+#########################
+
+In many cases, you need to do some complex operation with your variables, while Ansible is not recommended as a data processing/manipulation tool, you can use the existing Jinja2 templating in conjunction with the many added Ansible filters, lookups and tests to do some very complex transformations.
+
+Let's start with a quick definition of each type of plugin:
+ - lookups: Mainly used to query 'external data', in Ansible these were the primary part of loops using the ``with_<lookup>`` construct, but they can be used independently to return data for processing. They normally return a list due to their primary function in loops as mentioned previously. Used with the ``lookup`` or ``query`` Jinja2 operators.
+ - filters: used to change/transform data, used with the ``|`` Jinja2 operator.
+ - tests: used to validate data, used with the ``is`` Jinja2 operator.
+
+.. _note:
+ * Some tests and filters are provided directly by Jinja2, so their availability depends on the Jinja2 version, not Ansible.
+
+.. _for_loops_or_list_comprehensions:
+
+Loops and list comprehensions
+=============================
+
+Most programming languages have loops (``for``, ``while``, and so on) and list comprehensions to do transformations on lists including lists of objects. Jinja2 has a few filters that provide this functionality: ``map``, ``select``, ``reject``, ``selectattr``, ``rejectattr``.
+
+- map: this is a basic for loop that just allows you to change every item in a list, using the 'attribute' keyword you can do the transformation based on attributes of the list elements.
+- select/reject: this is a for loop with a condition, that allows you to create a subset of a list that matches (or not) based on the result of the condition.
+- selectattr/rejectattr: very similar to the above but it uses a specific attribute of the list elements for the conditional statement.
+
+
+.. _keys_from_dict_matching_list:
+
+Extract keys from a dictionary matching elements from a list
+------------------------------------------------------------
+
+The Python equivalent code would be:
+
+.. code-block:: python
+
+ chains = [1, 2]
+ for chain in chains:
+ for config in chains_config[chain]['configs']:
+ print(config['type'])
+
+There are several ways to do it in Ansible, this is just one example:
+
+.. code-block:: YAML+Jinja
+ :emphasize-lines: 3
+ :caption: Way to extract matching keys from a list of dictionaries
+
+ tasks:
+ - name: Show extracted list of keys from a list of dictionaries
+ debug: msg="{{ chains | map('extract', chains_config) | map(attribute='configs') | flatten | map(attribute='type') | flatten }}"
+ vars:
+ chains: [1, 2]
+ chains_config:
+ 1:
+ foo: bar
+ configs:
+ - type: routed
+ version: 0.1
+ - type: bridged
+ version: 0.2
+ 2:
+ foo: baz
+ configs:
+ - type: routed
+ version: 1.0
+ - type: bridged
+ version: 1.1
+
+
+.. code-block:: ansible-output
+ :caption: Results of debug task, a list with the extracted keys
+
+ ok: [localhost] => {
+ "msg": [
+ "routed",
+ "bridged",
+ "routed",
+ "bridged"
+ ]
+ }
+
+
+.. _find_mount_point:
+
+Find mount point
+----------------
+
+In this case, we want to find the mount point for a given path across our machines, since we already collect mount facts, we can use the following:
+
+.. code-block:: YAML+Jinja
+ :caption: Use selectattr to filter mounts into list I can then sort and select the last from
+ :emphasize-lines: 7
+
+ - hosts: all
+ gather_facts: True
+ vars:
+ path: /var/lib/cache
+ tasks:
+ - name: The mount point for {{path}}, found using the Ansible mount facts, [-1] is the same as the 'last' filter
+ debug: msg="{{(ansible_facts.mounts | selectattr('mount', 'in', path) | list | sort(attribute='mount'))[-1]['mount']}}"
+
+
+
+Omit elements from a list
+-------------------------
+
+The special ``omit`` variable ONLY works with module options, but we can still use it in other ways as an identifier to tailor a list of elements:
+
+.. code-block:: YAML+Jinja
+ :caption: Inline list filtering when feeding a module option
+ :emphasize-lines: 3, 7
+
+ - name: enable a list of Windows features, by name
+ set_fact:
+ win_feature_list: "{{ namestuff | reject('equalto', omit) | list }}"
+ vars:
+ namestuff:
+ - "{{ (fs_installed_smb_v1 | default(False)) | ternary(omit, 'FS-SMB1') }}"
+ - "foo"
+ - "bar"
+
+
+Another way is to avoid adding elements to the list in the first place, so you can just use it directly:
+
+.. code-block:: YAML+Jinja
+ :caption: Using set_fact in a loop to increment a list conditionally
+ :emphasize-lines: 3, 4, 6
+
+ - name: build unique list with some items conditionally omittted
+ set_fact:
+ namestuff: ' {{ (namestuff | default([])) | union([item]) }}'
+ when: item != omit
+ loop:
+ - "{{ (fs_installed_smb_v1 | default(False)) | ternary(omit, 'FS-SMB1') }}"
+ - "foo"
+ - "bar"
+
+
+.. _complex_type_transfomations:
+
+Complex Type transformations
+=============================
+
+Jinja provides filters for simple data type transformations (``int``, ``bool``, and so on), but when you want to transform data structures things are not as easy.
+You can use loops and list comprehensions as shown above to help, also other filters and lookups can be chained and leveraged to achieve more complex transformations.
+
+
+.. _create_dictionary_from_list:
+
+Create dictionary from list
+---------------------------
+
+In most languages it is easy to create a dictionary (a.k.a. map/associative array/hash and so on) from a list of pairs, in Ansible there are a couple of ways to do it and the best one for you might depend on the source of your data.
+
+
+These example produces ``{"a": "b", "c": "d"}``
+
+.. code-block:: YAML+Jinja
+ :caption: Simple list to dict by assuming the list is [key, value , key, value, ...]
+
+ vars:
+ single_list: [ 'a', 'b', 'c', 'd' ]
+ mydict: "{{ dict(single_list) | slice(2) | list }}"
+
+
+.. code-block:: YAML+Jinja
+ :caption: It is simpler when we have a list of pairs:
+
+ vars:
+ list_of_pairs: [ ['a', 'b'], ['c', 'd'] ]
+ mydict: "{{ dict(list_of_pairs) }}"
+
+Both end up being the same thing, with the ``slice(2) | list`` transforming ``single_list`` to the same structure as ``list_of_pairs``.
+
+
+
+A bit more complex, using ``set_fact`` and a ``loop`` to create/update a dictionary with key value pairs from 2 lists:
+
+.. code-block:: YAML+Jinja
+ :caption: Using set_fact to create a dictionary from a set of lists
+ :emphasize-lines: 3, 4
+
+ - name: Uses 'combine' to update the dictionary and 'zip' to make pairs of both lists
+ set_fact:
+ mydict: "{{ mydict | default({}) | combine({item[0]: item[1]}) }}"
+ loop: "{{ (keys | zip(values)) | list }}"
+ vars:
+ keys:
+ - foo
+ - var
+ - bar
+ values:
+ - a
+ - b
+ - c
+
+This results in ``{"foo": "a", "var": "b", "bar": "c"}``.
+
+
+You can even combine these simple examples with other filters and lookups to create a dictionary dynamically by matching patterns to variable names:
+
+.. code-block:: YAML+Jinja
+ :caption: Using 'vars' to define dictionary from a set of lists without needing a task
+
+ vars:
+ myvarnames: "{{ q('varnames', '^my') }}"
+ mydict: "{{ dict(myvarnames | zip(q('vars', *myvarnames))) }}"
+
+A quick explanation, since there is a lot to unpack from these two lines:
+
+ - The ``varnames`` lookup returns a list of variables that match "begin with ``my``".
+ - Then feeding the list from the previous step into the ``vars`` lookup to get the list of values.
+ The ``*`` is used to 'dereference the list' (a pythonism that works in Jinja), otherwise it would take the list as a single argument.
+ - Both lists get passed to the ``zip`` filter to pair them off into a unified list (key, value, key2, value2, ...).
+ - The dict function then takes this 'list of pairs' to create the dictionary.
+
+
+An example on how to use facts to find a host's data that meets condition X:
+
+
+.. code-block:: YAML+Jinja
+
+ vars:
+ uptime_of_host_most_recently_rebooted: "{{ansible_play_hosts_all | map('extract', hostvars, 'ansible_uptime_seconds') | sort | first}}"
+
+
+Using an example from @zoradache on reddit, to show the 'uptime in days/hours/minutes' (assumes facts where gathered).
+https://www.reddit.com/r/ansible/comments/gj5a93/trying_to_get_uptime_from_seconds/fqj2qr3/
+
+.. code-block:: YAML+Jinja
+
+ - debug:
+ msg: Timedelta {{ now() - now().fromtimestamp(now(fmt='%s') | int - ansible_uptime_seconds) }}
+
+
+.. seealso::
+
+ :doc:`playbooks_filters`
+ Jinja2 filters included with Ansible
+ :doc:`playbooks_tests`
+ Jinja2 tests included with Ansible
+ `Jinja2 Docs <http://jinja.pocoo.org/docs/>`_
+ Jinja2 documentation, includes lists for core filters and tests
diff --git a/docs/docsite/rst/user_guide/connection_details.rst b/docs/docsite/rst/user_guide/connection_details.rst
new file mode 100644
index 00000000..60f93cad
--- /dev/null
+++ b/docs/docsite/rst/user_guide/connection_details.rst
@@ -0,0 +1,116 @@
+.. _connections:
+
+******************************
+Connection methods and details
+******************************
+
+This section shows you how to expand and refine the connection methods Ansible uses for your inventory.
+
+ControlPersist and paramiko
+---------------------------
+
+By default, Ansible uses native OpenSSH, because it supports ControlPersist (a performance feature), Kerberos, and options in ``~/.ssh/config`` such as Jump Host setup. If your control machine uses an older version of OpenSSH that does not support ControlPersist, Ansible will fallback to a Python implementation of OpenSSH called 'paramiko'.
+
+.. _connection_set_user:
+
+Setting a remote user
+---------------------
+
+By default, Ansible connects to all remote devices with the user name you are using on the control node. If that user name does not exist on a remote device, you can set a different user name for the connection. If you just need to do some tasks as a different user, look at :ref:`become`. You can set the connection user in a playbook:
+
+.. code-block:: yaml
+
+ ---
+ - name: update webservers
+ hosts: webservers
+ remote_user: admin
+
+ tasks:
+ - name: thing to do first in this playbook
+ . . .
+
+as a host variable in inventory:
+
+.. code-block:: text
+
+ other1.example.com ansible_connection=ssh ansible_user=myuser
+ other2.example.com ansible_connection=ssh ansible_user=myotheruser
+
+or as a group variable in inventory:
+
+.. code-block:: yaml
+
+ cloud:
+ hosts:
+ cloud1: my_backup.cloud.com
+ cloud2: my_backup2.cloud.com
+ vars:
+ ansible_user: admin
+
+Setting up SSH keys
+-------------------
+
+By default, Ansible assumes you are using SSH keys to connect to remote machines. SSH keys are encouraged, but you can use password authentication if needed with the ``--ask-pass`` option. If you need to provide a password for :ref:`privilege escalation <become>` (sudo, pbrun, and so on), use ``--ask-become-pass``.
+
+.. include:: shared_snippets/SSH_password_prompt.txt
+
+To set up SSH agent to avoid retyping passwords, you can do:
+
+.. code-block:: bash
+
+ $ ssh-agent bash
+ $ ssh-add ~/.ssh/id_rsa
+
+Depending on your setup, you may wish to use Ansible's ``--private-key`` command line option to specify a pem file instead. You can also add the private key file:
+
+.. code-block:: bash
+
+ $ ssh-agent bash
+ $ ssh-add ~/.ssh/keypair.pem
+
+Another way to add private key files without using ssh-agent is using ``ansible_ssh_private_key_file`` in an inventory file as explained here: :ref:`intro_inventory`.
+
+Running against localhost
+-------------------------
+
+You can run commands against the control node by using "localhost" or "127.0.0.1" for the server name:
+
+.. code-block:: bash
+
+ $ ansible localhost -m ping -e 'ansible_python_interpreter="/usr/bin/env python"'
+
+You can specify localhost explicitly by adding this to your inventory file:
+
+.. code-block:: bash
+
+ localhost ansible_connection=local ansible_python_interpreter="/usr/bin/env python"
+
+.. _host_key_checking_on:
+
+Managing host key checking
+--------------------------
+
+Ansible enables host key checking by default. Checking host keys guards against server spoofing and man-in-the-middle attacks, but it does require some maintenance.
+
+If a host is reinstalled and has a different key in 'known_hosts', this will result in an error message until corrected. If a new host is not in 'known_hosts' your control node may prompt for confirmation of the key, which results in an interactive experience if using Ansible, from say, cron. You might not want this.
+
+If you understand the implications and wish to disable this behavior, you can do so by editing ``/etc/ansible/ansible.cfg`` or ``~/.ansible.cfg``:
+
+.. code-block:: text
+
+ [defaults]
+ host_key_checking = False
+
+Alternatively this can be set by the :envvar:`ANSIBLE_HOST_KEY_CHECKING` environment variable:
+
+.. code-block:: bash
+
+ $ export ANSIBLE_HOST_KEY_CHECKING=False
+
+Also note that host key checking in paramiko mode is reasonably slow, therefore switching to 'ssh' is also recommended when using this feature.
+
+Other connection methods
+------------------------
+
+Ansible can use a variety of connection methods beyond SSH. You can select any connection plugin, including managing things locally and managing chroot, lxc, and jail containers.
+A mode called 'ansible-pull' can also invert the system and have systems 'phone home' via scheduled git checkouts to pull configuration directives from a central repository.
diff --git a/docs/docsite/rst/user_guide/guide_rolling_upgrade.rst b/docs/docsite/rst/user_guide/guide_rolling_upgrade.rst
new file mode 100644
index 00000000..6f2ca742
--- /dev/null
+++ b/docs/docsite/rst/user_guide/guide_rolling_upgrade.rst
@@ -0,0 +1,324 @@
+**********************************************************
+Playbook Example: Continuous Delivery and Rolling Upgrades
+**********************************************************
+
+.. contents::
+ :local:
+
+.. _lamp_introduction:
+
+What is continuous delivery?
+============================
+
+Continuous delivery (CD) means frequently delivering updates to your software application.
+
+The idea is that by updating more often, you do not have to wait for a specific timed period, and your organization
+gets better at the process of responding to change.
+
+Some Ansible users are deploying updates to their end users on an hourly or even more frequent basis -- sometimes every time
+there is an approved code change. To achieve this, you need tools to be able to quickly apply those updates in a zero-downtime way.
+
+This document describes in detail how to achieve this goal, using one of Ansible's most complete example
+playbooks as a template: lamp_haproxy. This example uses a lot of Ansible features: roles, templates,
+and group variables, and it also comes with an orchestration playbook that can do zero-downtime
+rolling upgrades of the web application stack.
+
+.. note::
+
+ `Click here for the latest playbooks for this example
+ <https://github.com/ansible/ansible-examples/tree/master/lamp_haproxy>`_.
+
+The playbooks deploy Apache, PHP, MySQL, Nagios, and HAProxy to a CentOS-based set of servers.
+
+We're not going to cover how to run these playbooks here. Read the included README in the github project along with the
+example for that information. Instead, we're going to take a close look at every part of the playbook and describe what it does.
+
+.. _lamp_deployment:
+
+Site deployment
+===============
+
+Let's start with ``site.yml``. This is our site-wide deployment playbook. It can be used to initially deploy the site, as well
+as push updates to all of the servers:
+
+.. code-block:: yaml
+
+ ---
+ # This playbook deploys the whole application stack in this site.
+
+ # Apply common configuration to all hosts
+ - hosts: all
+
+ roles:
+ - common
+
+ # Configure and deploy database servers.
+ - hosts: dbservers
+
+ roles:
+ - db
+
+ # Configure and deploy the web servers. Note that we include two roles
+ # here, the 'base-apache' role which simply sets up Apache, and 'web'
+ # which includes our example web application.
+
+ - hosts: webservers
+
+ roles:
+ - base-apache
+ - web
+
+ # Configure and deploy the load balancer(s).
+ - hosts: lbservers
+
+ roles:
+ - haproxy
+
+ # Configure and deploy the Nagios monitoring node(s).
+ - hosts: monitoring
+
+ roles:
+ - base-apache
+ - nagios
+
+.. note::
+
+ If you're not familiar with terms like playbooks and plays, you should review :ref:`working_with_playbooks`.
+
+In this playbook we have 5 plays. The first one targets ``all`` hosts and applies the ``common`` role to all of the hosts.
+This is for site-wide things like yum repository configuration, firewall configuration, and anything else that needs to apply to all of the servers.
+
+The next four plays run against specific host groups and apply specific roles to those servers.
+Along with the roles for Nagios monitoring, the database, and the web application, we've implemented a
+``base-apache`` role that installs and configures a basic Apache setup. This is used by both the
+sample web application and the Nagios hosts.
+
+.. _lamp_roles:
+
+Reusable content: roles
+=======================
+
+By now you should have a bit of understanding about roles and how they work in Ansible. Roles are a way to organize
+content: tasks, handlers, templates, and files, into reusable components.
+
+This example has six roles: ``common``, ``base-apache``, ``db``, ``haproxy``, ``nagios``, and ``web``. How you organize
+your roles is up to you and your application, but most sites will have one or more common roles that are applied to
+all systems, and then a series of application-specific roles that install and configure particular parts of the site.
+
+Roles can have variables and dependencies, and you can pass in parameters to roles to modify their behavior.
+You can read more about roles in the :ref:`playbooks_reuse_roles` section.
+
+.. _lamp_group_variables:
+
+Configuration: group variables
+==============================
+
+Group variables are variables that are applied to groups of servers. They can be used in templates and in
+playbooks to customize behavior and to provide easily-changed settings and parameters. They are stored in
+a directory called ``group_vars`` in the same location as your inventory.
+Here is lamp_haproxy's ``group_vars/all`` file. As you might expect, these variables are applied to all of the machines in your inventory:
+
+.. code-block:: yaml
+
+ ---
+ httpd_port: 80
+ ntpserver: 192.0.2.23
+
+This is a YAML file, and you can create lists and dictionaries for more complex variable structures.
+In this case, we are just setting two variables, one for the port for the web server, and one for the
+NTP server that our machines should use for time synchronization.
+
+Here's another group variables file. This is ``group_vars/dbservers`` which applies to the hosts in the ``dbservers`` group:
+
+.. code-block:: yaml
+
+ ---
+ mysqlservice: mysqld
+ mysql_port: 3306
+ dbuser: root
+ dbname: foodb
+ upassword: usersecret
+
+If you look in the example, there are group variables for the ``webservers`` group and the ``lbservers`` group, similarly.
+
+These variables are used in a variety of places. You can use them in playbooks, like this, in ``roles/db/tasks/main.yml``:
+
+.. code-block:: yaml
+
+ - name: Create Application Database
+ mysql_db:
+ name: "{{ dbname }}"
+ state: present
+
+ - name: Create Application DB User
+ mysql_user:
+ name: "{{ dbuser }}"
+ password: "{{ upassword }}"
+ priv: "*.*:ALL"
+ host: '%'
+ state: present
+
+You can also use these variables in templates, like this, in ``roles/common/templates/ntp.conf.j2``:
+
+.. code-block:: text
+
+ driftfile /var/lib/ntp/drift
+
+ restrict 127.0.0.1
+ restrict -6 ::1
+
+ server {{ ntpserver }}
+
+ includefile /etc/ntp/crypto/pw
+
+ keys /etc/ntp/keys
+
+You can see that the variable substitution syntax of {{ and }} is the same for both templates and variables. The syntax
+inside the curly braces is Jinja2, and you can do all sorts of operations and apply different filters to the
+data inside. In templates, you can also use for loops and if statements to handle more complex situations,
+like this, in ``roles/common/templates/iptables.j2``:
+
+.. code-block:: jinja
+
+ {% if inventory_hostname in groups['dbservers'] %}
+ -A INPUT -p tcp --dport 3306 -j ACCEPT
+ {% endif %}
+
+This is testing to see if the inventory name of the machine we're currently operating on (``inventory_hostname``)
+exists in the inventory group ``dbservers``. If so, that machine will get an iptables ACCEPT line for port 3306.
+
+Here's another example, from the same template:
+
+.. code-block:: jinja
+
+ {% for host in groups['monitoring'] %}
+ -A INPUT -p tcp -s {{ hostvars[host].ansible_default_ipv4.address }} --dport 5666 -j ACCEPT
+ {% endfor %}
+
+This loops over all of the hosts in the group called ``monitoring``, and adds an ACCEPT line for
+each monitoring hosts' default IPv4 address to the current machine's iptables configuration, so that Nagios can monitor those hosts.
+
+You can learn a lot more about Jinja2 and its capabilities `here <http://jinja.pocoo.org/docs/>`_, and you
+can read more about Ansible variables in general in the :ref:`playbooks_variables` section.
+
+.. _lamp_rolling_upgrade:
+
+The rolling upgrade
+===================
+
+Now you have a fully-deployed site with web servers, a load balancer, and monitoring. How do you update it? This is where Ansible's
+orchestration features come into play. While some applications use the term 'orchestration' to mean basic ordering or command-blasting, Ansible
+refers to orchestration as 'conducting machines like an orchestra', and has a pretty sophisticated engine for it.
+
+Ansible has the capability to do operations on multi-tier applications in a coordinated way, making it easy to orchestrate a sophisticated zero-downtime rolling upgrade of our web application. This is implemented in a separate playbook, called ``rolling_update.yml``.
+
+Looking at the playbook, you can see it is made up of two plays. The first play is very simple and looks like this:
+
+.. code-block:: yaml
+
+ - hosts: monitoring
+ tasks: []
+
+What's going on here, and why are there no tasks? You might know that Ansible gathers "facts" from the servers before operating upon them. These facts are useful for all sorts of things: networking information, OS/distribution versions, and so on. In our case, we need to know something about all of the monitoring servers in our environment before we perform the update, so this simple play forces a fact-gathering step on our monitoring servers. You will see this pattern sometimes, and it's a useful trick to know.
+
+The next part is the update play. The first part looks like this:
+
+.. code-block:: yaml
+
+ - hosts: webservers
+ user: root
+ serial: 1
+
+This is just a normal play definition, operating on the ``webservers`` group. The ``serial`` keyword tells Ansible how many servers to operate on at once. If it's not specified, Ansible will parallelize these operations up to the default "forks" limit specified in the configuration file. But for a zero-downtime rolling upgrade, you may not want to operate on that many hosts at once. If you had just a handful of webservers, you may want to set ``serial`` to 1, for one host at a time. If you have 100, maybe you could set ``serial`` to 10, for ten at a time.
+
+Here is the next part of the update play:
+
+.. code-block:: yaml
+
+ pre_tasks:
+ - name: disable nagios alerts for this host webserver service
+ nagios:
+ action: disable_alerts
+ host: "{{ inventory_hostname }}"
+ services: webserver
+ delegate_to: "{{ item }}"
+ loop: "{{ groups.monitoring }}"
+
+ - name: disable the server in haproxy
+ shell: echo "disable server myapplb/{{ inventory_hostname }}" | socat stdio /var/lib/haproxy/stats
+ delegate_to: "{{ item }}"
+ loop: "{{ groups.lbservers }}"
+
+.. note::
+ - The ``serial`` keyword forces the play to be executed in 'batches'. Each batch counts as a full play with a subselection of hosts.
+ This has some consequences on play behavior. For example, if all hosts in a batch fails, the play fails, which in turn fails the entire run. You should consider this when combining with ``max_fail_percentage``.
+
+The ``pre_tasks`` keyword just lets you list tasks to run before the roles are called. This will make more sense in a minute. If you look at the names of these tasks, you can see that we are disabling Nagios alerts and then removing the webserver that we are currently updating from the HAProxy load balancing pool.
+
+The ``delegate_to`` and ``loop`` arguments, used together, cause Ansible to loop over each monitoring server and load balancer, and perform that operation (delegate that operation) on the monitoring or load balancing server, "on behalf" of the webserver. In programming terms, the outer loop is the list of web servers, and the inner loop is the list of monitoring servers.
+
+Note that the HAProxy step looks a little complicated. We're using HAProxy in this example because it's freely available, though if you have (for instance) an F5 or Netscaler in your infrastructure (or maybe you have an AWS Elastic IP setup?), you can use Ansible modules to communicate with them instead. You might also wish to use other monitoring modules instead of nagios, but this just shows the main goal of the 'pre tasks' section -- take the server out of monitoring, and take it out of rotation.
+
+The next step simply re-applies the proper roles to the web servers. This will cause any configuration management declarations in ``web`` and ``base-apache`` roles to be applied to the web servers, including an update of the web application code itself. We don't have to do it this way--we could instead just purely update the web application, but this is a good example of how roles can be used to reuse tasks:
+
+.. code-block:: yaml
+
+ roles:
+ - common
+ - base-apache
+ - web
+
+Finally, in the ``post_tasks`` section, we reverse the changes to the Nagios configuration and put the web server back in the load balancing pool:
+
+.. code-block:: yaml
+
+ post_tasks:
+ - name: Enable the server in haproxy
+ shell: echo "enable server myapplb/{{ inventory_hostname }}" | socat stdio /var/lib/haproxy/stats
+ delegate_to: "{{ item }}"
+ loop: "{{ groups.lbservers }}"
+
+ - name: re-enable nagios alerts
+ nagios:
+ action: enable_alerts
+ host: "{{ inventory_hostname }}"
+ services: webserver
+ delegate_to: "{{ item }}"
+ loop: "{{ groups.monitoring }}"
+
+Again, if you were using a Netscaler or F5 or Elastic Load Balancer, you would just substitute in the appropriate modules instead.
+
+.. _lamp_end_notes:
+
+Managing other load balancers
+=============================
+
+In this example, we use the simple HAProxy load balancer to front-end the web servers. It's easy to configure and easy to manage. As we have mentioned, Ansible has support for a variety of other load balancers like Citrix NetScaler, F5 BigIP, Amazon Elastic Load Balancers, and more. See the :ref:`working_with_modules` documentation for more information.
+
+For other load balancers, you may need to send shell commands to them (like we do for HAProxy above), or call an API, if your load balancer exposes one. For the load balancers for which Ansible has modules, you may want to run them as a ``local_action`` if they contact an API. You can read more about local actions in the :ref:`playbooks_delegation` section. Should you develop anything interesting for some hardware where there is not a module, it might make for a good contribution!
+
+.. _lamp_end_to_end:
+
+Continuous delivery end-to-end
+==============================
+
+Now that you have an automated way to deploy updates to your application, how do you tie it all together? A lot of organizations use a continuous integration tool like `Jenkins <https://jenkins.io/>`_ or `Atlassian Bamboo <https://www.atlassian.com/software/bamboo>`_ to tie the development, test, release, and deploy steps together. You may also want to use a tool like `Gerrit <https://www.gerritcodereview.com/>`_ to add a code review step to commits to either the application code itself, or to your Ansible playbooks, or both.
+
+Depending on your environment, you might be deploying continuously to a test environment, running an integration test battery against that environment, and then deploying automatically into production. Or you could keep it simple and just use the rolling-update for on-demand deployment into test or production specifically. This is all up to you.
+
+For integration with Continuous Integration systems, you can easily trigger playbook runs using the ``ansible-playbook`` command line tool, or, if you're using :ref:`ansible_tower`, the ``tower-cli`` or the built-in REST API. (The tower-cli command 'joblaunch' will spawn a remote job over the REST API and is pretty slick).
+
+This should give you a good idea of how to structure a multi-tier application with Ansible, and orchestrate operations upon that app, with the eventual goal of continuous delivery to your customers. You could extend the idea of the rolling upgrade to lots of different parts of the app; maybe add front-end web servers along with application servers, for instance, or replace the SQL database with something like MongoDB or Riak. Ansible gives you the capability to easily manage complicated environments and automate common operations.
+
+.. seealso::
+
+ `lamp_haproxy example <https://github.com/ansible/ansible-examples/tree/master/lamp_haproxy>`_
+ The lamp_haproxy example discussed here.
+ :ref:`working_with_playbooks`
+ An introduction to playbooks
+ :ref:`playbooks_reuse_roles`
+ An introduction to playbook roles
+ :ref:`playbooks_variables`
+ An introduction to Ansible variables
+ `Ansible.com: Continuous Delivery <https://www.ansible.com/use-cases/continuous-delivery>`_
+ An introduction to Continuous Delivery with Ansible
diff --git a/docs/docsite/rst/user_guide/index.rst b/docs/docsite/rst/user_guide/index.rst
new file mode 100644
index 00000000..e3f2aaf3
--- /dev/null
+++ b/docs/docsite/rst/user_guide/index.rst
@@ -0,0 +1,133 @@
+.. _user_guide_index:
+
+##########
+User Guide
+##########
+
+Welcome to the Ansible User Guide! This guide covers how to work with Ansible, including using the command line, working with inventory, interacting with data, writing tasks, plays, and playbooks; executing playbooks, and reference materials. This page outlines the most common situations and questions that bring readers to this section. If you prefer a traditional table of contents, you can find one at the bottom of the page.
+
+Getting started
+===============
+
+* I'd like an overview of how Ansible works. Where can I find:
+
+ * a :ref:`quick video overview <quickstart_guide>`
+ * a :ref:`text introduction <intro_getting_started>`
+
+* I'm ready to learn about Ansible. What :ref:`basic_concepts` do I need to learn?
+* I want to use Ansible without writing a playbook. How do I use :ref:`ad-hoc commands <intro_adhoc>`?
+
+Writing tasks, plays, and playbooks
+===================================
+
+* I'm writing my first playbook. What should I :ref:`know before I begin <playbooks_tips_and_tricks>`?
+* I have a specific use case for a task or play:
+
+ * Executing tasks with elevated privileges or as a different user with :ref:`become <become>`
+ * Repeating a task once for each item in a list with :ref:`loops <playbooks_loops>`
+ * Executing tasks on a different machine with :ref:`delegation <playbooks_delegation>`
+ * Running tasks only when certain conditions apply with :ref:`conditionals <playbooks_conditionals>` and evaluating conditions with :ref:`tests <playbooks_tests>`
+ * Grouping a set of tasks together with :ref:`blocks <playbooks_blocks>`
+ * Running tasks only when something has changed with :ref:`handlers <handlers>`
+ * Changing the way Ansible :ref:`handles failures <playbooks_error_handling>`
+ * Setting remote :ref:`environment values <playbooks_environment>`
+
+* I want to leverage the power of re-usable Ansible artifacts. How do I create re-usable :ref:`files <playbooks_reuse>` and :ref:`roles <playbooks_reuse_roles>`?
+* I need to incorporate one file or playbook inside another. What is the difference between :ref:`including and importing <playbooks_reuse_includes>`?
+* I want to run selected parts of my playbook. How do I add and use :ref:`tags <tags>`?
+
+Working with inventory
+======================
+
+* I have a list of servers and devices I want to automate. How do I create :ref:`inventory <intro_inventory>` to track them?
+* I use cloud services and constantly have servers and devices starting and stopping. How do I track them using :ref:`dynamic inventory <intro_dynamic_inventory>`?
+* I want to automate specific sub-sets of my inventory. How do I use :ref:`patterns <intro_patterns>`?
+
+Interacting with data
+=====================
+
+* I want to use a single playbook against multiple systems with different attributes. How do I use :ref:`variables <playbooks_variables>` to handle the differences?
+* I want to retrieve data about my systems. How do I access :ref:`Ansible facts <vars_and_facts>`?
+* I need to access sensitive data like passwords with Ansible. How can I protect that data with :ref:`Ansible vault <vault>`?
+* I want to change the data I have, so I can use it in a task. How do I use :ref:`filters <playbooks_filters>` to transform my data?
+* I need to retrieve data from an external datastore. How do I use :ref:`lookups <playbooks_lookups>` to access databases and APIs?
+* I want to ask playbook users to supply data. How do I get user input with :ref:`prompts <playbooks_prompts>`?
+* I use certain modules frequently. How do I streamline my inventory and playbooks by :ref:`setting default values for module parameters <module_defaults>`?
+
+Executing playbooks
+===================
+
+Once your playbook is ready to run, you may need to use these topics:
+
+* Executing "dry run" playbooks with :ref:`check mode and diff <check_mode_dry>`
+* Running playbooks while troubleshooting with :ref:`start and step <playbooks_start_and_step>`
+* Correcting tasks during execution with the :ref:`Ansible debugger <playbook_debugger>`
+* Controlling how my playbook executes with :ref:`strategies and more <playbooks_strategies>`
+* Running tasks, plays, and playbooks :ref:`asynchronously <playbooks_async>`
+
+Advanced features and reference
+===============================
+
+* Using :ref:`advanced syntax <playbooks_advanced_syntax>`
+* Manipulating :ref:`complex data <complex_data_manipulation>`
+* Using :ref:`plugins <plugins_lookup>`
+* Using :ref:`playbook keywords <playbook_keywords>`
+* Using :ref:`command-line tools <command_line_tools>`
+* Rejecting :ref:`specific modules <plugin_filtering_config>`
+* Module :ref:`maintenance <modules_support>`
+
+Traditional Table of Contents
+=============================
+
+If you prefer to read the entire User Guide, here's a list of the pages in order:
+
+.. toctree::
+ :maxdepth: 2
+
+ quickstart
+ basic_concepts
+ intro_getting_started
+ intro_adhoc
+ playbooks
+ playbooks_intro
+ playbooks_best_practices
+ become
+ playbooks_loops
+ playbooks_delegation
+ playbooks_conditionals
+ playbooks_tests
+ playbooks_blocks
+ playbooks_handlers
+ playbooks_error_handling
+ playbooks_environment
+ playbooks_reuse
+ playbooks_reuse_roles
+ playbooks_reuse_includes
+ playbooks_tags
+ intro_inventory
+ intro_dynamic_inventory
+ intro_patterns
+ connection_details
+ command_line_tools
+ playbooks_variables
+ playbooks_vars_facts
+ vault
+ playbooks_filters
+ playbooks_lookups
+ playbooks_prompts
+ playbooks_module_defaults
+ playbooks_checkmode
+ playbooks_startnstep
+ playbooks_debugger
+ playbooks_strategies
+ playbooks_async
+ playbooks_advanced_syntax
+ complex_data_manipulation
+ plugin_filtering_config
+ sample_setup
+ modules
+ ../plugins/plugins
+ ../reference_appendices/playbooks_keywords
+ intro_bsd
+ windows
+ collections_using
diff --git a/docs/docsite/rst/user_guide/intro.rst b/docs/docsite/rst/user_guide/intro.rst
new file mode 100644
index 00000000..d6ff243f
--- /dev/null
+++ b/docs/docsite/rst/user_guide/intro.rst
@@ -0,0 +1,15 @@
+:orphan:
+
+Introduction
+============
+
+Before we start exploring the main components of Ansible -- playbooks, configuration management, deployment, and orchestration -- we'll learn how to get Ansible installed and cover some basic concepts. We'll also go over how to execute ad-hoc commands in parallel across your nodes using /usr/bin/ansible, and see what modules are available in Ansible's core (you can also write your own, which is covered later).
+
+.. toctree::
+ :maxdepth: 1
+
+ ../installation_guide/index
+ ../dev_guide/overview_architecture
+ ../installation_guide/intro_configuration
+ intro_bsd
+ intro_windows
diff --git a/docs/docsite/rst/user_guide/intro_adhoc.rst b/docs/docsite/rst/user_guide/intro_adhoc.rst
new file mode 100644
index 00000000..a7aa8da3
--- /dev/null
+++ b/docs/docsite/rst/user_guide/intro_adhoc.rst
@@ -0,0 +1,206 @@
+.. _intro_adhoc:
+
+*******************************
+Introduction to ad-hoc commands
+*******************************
+
+An Ansible ad-hoc command uses the `/usr/bin/ansible` command-line tool to automate a single task on one or more managed nodes. Ad-hoc commands are quick and easy, but they are not reusable. So why learn about ad-hoc commands first? Ad-hoc commands demonstrate the simplicity and power of Ansible. The concepts you learn here will port over directly to the playbook language. Before reading and executing these examples, please read :ref:`intro_inventory`.
+
+.. contents::
+ :local:
+
+Why use ad-hoc commands?
+========================
+
+Ad-hoc commands are great for tasks you repeat rarely. For example, if you want to power off all the machines in your lab for Christmas vacation, you could execute a quick one-liner in Ansible without writing a playbook. An ad-hoc command looks like this:
+
+.. code-block:: bash
+
+ $ ansible [pattern] -m [module] -a "[module options]"
+
+You can learn more about :ref:`patterns<intro_patterns>` and :ref:`modules<working_with_modules>` on other pages.
+
+Use cases for ad-hoc tasks
+==========================
+
+Ad-hoc tasks can be used to reboot servers, copy files, manage packages and users, and much more. You can use any Ansible module in an ad-hoc task. Ad-hoc tasks, like playbooks, use a declarative model,
+calculating and executing the actions required to reach a specified final state. They
+achieve a form of idempotence by checking the current state before they begin and doing nothing unless the current state is different from the specified final state.
+
+Rebooting servers
+-----------------
+
+The default module for the ``ansible`` command-line utility is the :ref:`ansible.builtin.command module<command_module>`. You can use an ad-hoc task to call the command module and reboot all web servers in Atlanta, 10 at a time. Before Ansible can do this, you must have all servers in Atlanta listed in a group called [atlanta] in your inventory, and you must have working SSH credentials for each machine in that group. To reboot all the servers in the [atlanta] group:
+
+.. code-block:: bash
+
+ $ ansible atlanta -a "/sbin/reboot"
+
+By default Ansible uses only 5 simultaneous processes. If you have more hosts than the value set for the fork count, Ansible will talk to them, but it will take a little longer. To reboot the [atlanta] servers with 10 parallel forks:
+
+.. code-block:: bash
+
+ $ ansible atlanta -a "/sbin/reboot" -f 10
+
+/usr/bin/ansible will default to running from your user account. To connect as a different user:
+
+.. code-block:: bash
+
+ $ ansible atlanta -a "/sbin/reboot" -f 10 -u username
+
+Rebooting probably requires privilege escalation. You can connect to the server as ``username`` and run the command as the ``root`` user by using the :ref:`become <become>` keyword:
+
+.. code-block:: bash
+
+ $ ansible atlanta -a "/sbin/reboot" -f 10 -u username --become [--ask-become-pass]
+
+If you add ``--ask-become-pass`` or ``-K``, Ansible prompts you for the password to use for privilege escalation (sudo/su/pfexec/doas/etc).
+
+.. note::
+ The :ref:`command module <command_module>` does not support extended shell syntax like piping and
+ redirects (although shell variables will always work). If your command requires shell-specific
+ syntax, use the `shell` module instead. Read more about the differences on the
+ :ref:`working_with_modules` page.
+
+So far all our examples have used the default 'command' module. To use a different module, pass ``-m`` for module name. For example, to use the :ref:`ansible.builtin.shell module <shell_module>`:
+
+.. code-block:: bash
+
+ $ ansible raleigh -m ansible.builtin.shell -a 'echo $TERM'
+
+When running any command with the Ansible *ad hoc* CLI (as opposed to
+:ref:`Playbooks <working_with_playbooks>`), pay particular attention to shell quoting rules, so
+the local shell retains the variable and passes it to Ansible.
+For example, using double rather than single quotes in the above example would
+evaluate the variable on the box you were on.
+
+.. _file_transfer:
+
+Managing files
+--------------
+
+An ad-hoc task can harness the power of Ansible and SCP to transfer many files to multiple machines in parallel. To transfer a file directly to all servers in the [atlanta] group:
+
+.. code-block:: bash
+
+ $ ansible atlanta -m ansible.builtin.copy -a "src=/etc/hosts dest=/tmp/hosts"
+
+If you plan to repeat a task like this, use the :ref:`ansible.builtin.template<template_module>` module in a playbook.
+
+The :ref:`ansible.builtin.file<file_module>` module allows changing ownership and permissions on files. These
+same options can be passed directly to the ``copy`` module as well:
+
+.. code-block:: bash
+
+ $ ansible webservers -m ansible.builtin.file -a "dest=/srv/foo/a.txt mode=600"
+ $ ansible webservers -m ansible.builtin.file -a "dest=/srv/foo/b.txt mode=600 owner=mdehaan group=mdehaan"
+
+The ``file`` module can also create directories, similar to ``mkdir -p``:
+
+.. code-block:: bash
+
+ $ ansible webservers -m ansible.builtin.file -a "dest=/path/to/c mode=755 owner=mdehaan group=mdehaan state=directory"
+
+As well as delete directories (recursively) and delete files:
+
+.. code-block:: bash
+
+ $ ansible webservers -m ansible.builtin.file -a "dest=/path/to/c state=absent"
+
+.. _managing_packages:
+
+Managing packages
+-----------------
+
+You might also use an ad-hoc task to install, update, or remove packages on managed nodes using a package management module like yum. To ensure a package is installed without updating it:
+
+.. code-block:: bash
+
+ $ ansible webservers -m ansible.builtin.yum -a "name=acme state=present"
+
+To ensure a specific version of a package is installed:
+
+.. code-block:: bash
+
+ $ ansible webservers -m ansible.builtin.yum -a "name=acme-1.5 state=present"
+
+To ensure a package is at the latest version:
+
+.. code-block:: bash
+
+ $ ansible webservers -m ansible.builtin.yum -a "name=acme state=latest"
+
+To ensure a package is not installed:
+
+.. code-block:: bash
+
+ $ ansible webservers -m ansible.builtin.yum -a "name=acme state=absent"
+
+Ansible has modules for managing packages under many platforms. If there is no module for your package manager, you can install packages using the command module or create a module for your package manager.
+
+.. _users_and_groups:
+
+Managing users and groups
+-------------------------
+
+You can create, manage, and remove user accounts on your managed nodes with ad-hoc tasks:
+
+.. code-block:: bash
+
+ $ ansible all -m ansible.builtin.user -a "name=foo password=<crypted password here>"
+
+ $ ansible all -m ansible.builtin.user -a "name=foo state=absent"
+
+See the :ref:`ansible.builtin.user <user_module>` module documentation for details on all of the available options, including
+how to manipulate groups and group membership.
+
+.. _managing_services:
+
+Managing services
+-----------------
+
+Ensure a service is started on all webservers:
+
+.. code-block:: bash
+
+ $ ansible webservers -m ansible.builtin.service -a "name=httpd state=started"
+
+Alternatively, restart a service on all webservers:
+
+.. code-block:: bash
+
+ $ ansible webservers -m ansible.builtin.service -a "name=httpd state=restarted"
+
+Ensure a service is stopped:
+
+.. code-block:: bash
+
+ $ ansible webservers -m ansible.builtin.service -a "name=httpd state=stopped"
+
+.. _gathering_facts:
+
+Gathering facts
+---------------
+
+Facts represent discovered variables about a system. You can use facts to implement conditional execution of tasks but also just to get ad-hoc information about your systems. To see all facts:
+
+.. code-block:: bash
+
+ $ ansible all -m ansible.builtin.setup
+
+You can also filter this output to display only certain facts, see the :ref:`ansible.builtin.setup <setup_module>` module documentation for details.
+
+Now that you understand the basic elements of Ansible execution, you are ready to learn to automate repetitive tasks using :ref:`Ansible Playbooks <playbooks_intro>`.
+
+.. seealso::
+
+ :ref:`intro_configuration`
+ All about the Ansible config file
+ :ref:`list_of_collections`
+ Browse existing collections, modules, and plugins
+ :ref:`working_with_playbooks`
+ Using Ansible for configuration management & deployment
+ `Mailing List <https://groups.google.com/group/ansible-project>`_
+ Questions? Help? Ideas? Stop by the list on Google Groups
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/intro_bsd.rst b/docs/docsite/rst/user_guide/intro_bsd.rst
new file mode 100644
index 00000000..68a62f31
--- /dev/null
+++ b/docs/docsite/rst/user_guide/intro_bsd.rst
@@ -0,0 +1,106 @@
+.. _working_with_bsd:
+
+Ansible and BSD
+===============
+
+Managing BSD machines is different from managing other Unix-like machines. If you have managed nodes running BSD, review these topics.
+
+.. contents::
+ :local:
+
+Connecting to BSD nodes
+-----------------------
+
+Ansible connects to managed nodes using OpenSSH by default. This works on BSD if you use SSH keys for authentication. However, if you use SSH passwords for authentication, Ansible relies on sshpass. Most
+versions of sshpass do not deal well with BSD login prompts, so when using SSH passwords against BSD machines, use ``paramiko`` to connect instead of OpenSSH. You can do this in ansible.cfg globally or you can set it as an inventory/group/host variable. For example:
+
+.. code-block:: text
+
+ [freebsd]
+ mybsdhost1 ansible_connection=paramiko
+
+.. _bootstrap_bsd:
+
+Bootstrapping BSD
+-----------------
+
+Ansible is agentless by default, however, it requires Python on managed nodes. Only the :ref:`raw <raw_module>` module will operate without Python. Although this module can be used to bootstrap Ansible and install Python on BSD variants (see below), it is very limited and the use of Python is required to make full use of Ansible's features.
+
+The following example installs Python 2.7 which includes the json library required for full functionality of Ansible.
+On your control machine you can execute the following for most versions of FreeBSD:
+
+.. code-block:: bash
+
+ ansible -m raw -a "pkg install -y python27" mybsdhost1
+
+Or for OpenBSD:
+
+.. code-block:: bash
+
+ ansible -m raw -a "pkg_add python%3.7"
+
+Once this is done you can now use other Ansible modules apart from the ``raw`` module.
+
+.. note::
+ This example demonstrated using pkg on FreeBSD and pkg_add on OpenBSD, however you should be able to substitute the appropriate package tool for your BSD; the package name may also differ. Refer to the package list or documentation of the BSD variant you are using for the exact Python package name you intend to install.
+
+.. BSD_python_location:
+
+Setting the Python interpreter
+------------------------------
+
+To support a variety of Unix-like operating systems and distributions, Ansible cannot always rely on the existing environment or ``env`` variables to locate the correct Python binary. By default, modules point at ``/usr/bin/python`` as this is the most common location. On BSD variants, this path may differ, so it is advised to inform Ansible of the binary's location, through the ``ansible_python_interpreter`` inventory variable. For example:
+
+.. code-block:: text
+
+ [freebsd:vars]
+ ansible_python_interpreter=/usr/local/bin/python2.7
+ [openbsd:vars]
+ ansible_python_interpreter=/usr/local/bin/python3.7
+
+If you use additional plugins beyond those bundled with Ansible, you can set similar variables for ``bash``, ``perl`` or ``ruby``, depending on how the plugin is written. For example:
+
+.. code-block:: text
+
+ [freebsd:vars]
+ ansible_python_interpreter=/usr/local/bin/python
+ ansible_perl_interpreter=/usr/bin/perl5
+
+
+Which modules are available?
+----------------------------
+
+The majority of the core Ansible modules are written for a combination of Unix-like machines and other generic services, so most should function well on the BSDs with the obvious exception of those that are aimed at Linux-only technologies (such as LVG).
+
+Using BSD as the control node
+-----------------------------
+
+Using BSD as the control machine is as simple as installing the Ansible package for your BSD variant or by following the ``pip`` or 'from source' instructions.
+
+.. _bsd_facts:
+
+BSD facts
+---------
+
+Ansible gathers facts from the BSDs in a similar manner to Linux machines, but since the data, names and structures can vary for network, disks and other devices, one should expect the output to be slightly different yet still familiar to a BSD administrator.
+
+.. _bsd_contributions:
+
+BSD efforts and contributions
+-----------------------------
+
+BSD support is important to us at Ansible. Even though the majority of our contributors use and target Linux we have an active BSD community and strive to be as BSD-friendly as possible.
+Please feel free to report any issues or incompatibilities you discover with BSD; pull requests with an included fix are also welcome!
+
+.. seealso::
+
+ :ref:`intro_adhoc`
+ Examples of basic commands
+ :ref:`working_with_playbooks`
+ Learning ansible's configuration management language
+ :ref:`developing_modules`
+ How to write modules
+ `Mailing List <https://groups.google.com/group/ansible-project>`_
+ Questions? Help? Ideas? Stop by the list on Google Groups
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/intro_dynamic_inventory.rst b/docs/docsite/rst/user_guide/intro_dynamic_inventory.rst
new file mode 100644
index 00000000..69016655
--- /dev/null
+++ b/docs/docsite/rst/user_guide/intro_dynamic_inventory.rst
@@ -0,0 +1,249 @@
+.. _intro_dynamic_inventory:
+.. _dynamic_inventory:
+
+******************************
+Working with dynamic inventory
+******************************
+
+.. contents::
+ :local:
+
+If your Ansible inventory fluctuates over time, with hosts spinning up and shutting down in response to business demands, the static inventory solutions described in :ref:`inventory` will not serve your needs. You may need to track hosts from multiple sources: cloud providers, LDAP, `Cobbler <https://cobbler.github.io>`_, and/or enterprise CMDB systems.
+
+Ansible integrates all of these options through a dynamic external inventory system. Ansible supports two ways to connect with external inventory: :ref:`inventory_plugins` and `inventory scripts`.
+
+Inventory plugins take advantage of the most recent updates to the Ansible core code. We recommend plugins over scripts for dynamic inventory. You can :ref:`write your own plugin <developing_inventory>` to connect to additional dynamic inventory sources.
+
+You can still use inventory scripts if you choose. When we implemented inventory plugins, we ensured backwards compatibility through the script inventory plugin. The examples below illustrate how to use inventory scripts.
+
+If you prefer a GUI for handling dynamic inventory, the :ref:`ansible_tower` inventory database syncs with all your dynamic inventory sources, provides web and REST access to the results, and offers a graphical inventory editor. With a database record of all of your hosts, you can correlate past event history and see which hosts have had failures on their last playbook runs.
+
+.. _cobbler_example:
+
+Inventory script example: Cobbler
+=================================
+
+Ansible integrates seamlessly with `Cobbler <https://cobbler.github.io>`_, a Linux installation server originally written by Michael DeHaan and now led by James Cammarata, who works for Ansible.
+
+While primarily used to kickoff OS installations and manage DHCP and DNS, Cobbler has a generic
+layer that can represent data for multiple configuration management systems (even at the same time) and serve as a 'lightweight CMDB'.
+
+To tie your Ansible inventory to Cobbler, copy `this script <https://raw.githubusercontent.com/ansible-collections/community.general/main/scripts/inventory/cobbler.py>`_ to ``/etc/ansible`` and ``chmod +x`` the file. Run ``cobblerd`` any time you use Ansible and use the ``-i`` command line option (for example, ``-i /etc/ansible/cobbler.py``) to communicate with Cobbler using Cobbler's XMLRPC API.
+
+Add a ``cobbler.ini`` file in ``/etc/ansible`` so Ansible knows where the Cobbler server is and some cache improvements can be used. For example:
+
+.. code-block:: text
+
+ [cobbler]
+
+ # Set Cobbler's hostname or IP address
+ host = http://127.0.0.1/cobbler_api
+
+ # API calls to Cobbler can be slow. For this reason, we cache the results of an API
+ # call. Set this to the path you want cache files to be written to. Two files
+ # will be written to this directory:
+ # - ansible-cobbler.cache
+ # - ansible-cobbler.index
+
+ cache_path = /tmp
+
+ # The number of seconds a cache file is considered valid. After this many
+ # seconds, a new API call will be made, and the cache file will be updated.
+
+ cache_max_age = 900
+
+
+First test the script by running ``/etc/ansible/cobbler.py`` directly. You should see some JSON data output, but it may not have anything in it just yet.
+
+Let's explore what this does. In Cobbler, assume a scenario somewhat like the following:
+
+.. code-block:: bash
+
+ cobbler profile add --name=webserver --distro=CentOS6-x86_64
+ cobbler profile edit --name=webserver --mgmt-classes="webserver" --ksmeta="a=2 b=3"
+ cobbler system edit --name=foo --dns-name="foo.example.com" --mgmt-classes="atlanta" --ksmeta="c=4"
+ cobbler system edit --name=bar --dns-name="bar.example.com" --mgmt-classes="atlanta" --ksmeta="c=5"
+
+In the example above, the system 'foo.example.com' is addressable by ansible directly, but is also addressable when using the group names 'webserver' or 'atlanta'. Since Ansible uses SSH, it contacts system foo over 'foo.example.com', only, never just 'foo'. Similarly, if you tried "ansible foo", it would not find the system... but "ansible 'foo*'" would do, because the system DNS name starts with 'foo'.
+
+The script provides more than host and group info. In addition, as a bonus, when the 'setup' module is run (which happens automatically when using playbooks), the variables 'a', 'b', and 'c' will all be auto-populated in the templates:
+
+.. code-block:: text
+
+ # file: /srv/motd.j2
+ Welcome, I am templated with a value of a={{ a }}, b={{ b }}, and c={{ c }}
+
+Which could be executed just like this:
+
+.. code-block:: bash
+
+ ansible webserver -m setup
+ ansible webserver -m template -a "src=/tmp/motd.j2 dest=/etc/motd"
+
+.. note::
+ The name 'webserver' came from Cobbler, as did the variables for
+ the config file. You can still pass in your own variables like
+ normal in Ansible, but variables from the external inventory script
+ will override any that have the same name.
+
+So, with the template above (``motd.j2``), this results in the following data being written to ``/etc/motd`` for system 'foo':
+
+.. code-block:: text
+
+ Welcome, I am templated with a value of a=2, b=3, and c=4
+
+And on system 'bar' (bar.example.com):
+
+.. code-block:: text
+
+ Welcome, I am templated with a value of a=2, b=3, and c=5
+
+And technically, though there is no major good reason to do it, this also works:
+
+.. code-block:: bash
+
+ ansible webserver -m ansible.builtin.shell -a "echo {{ a }}"
+
+So, in other words, you can use those variables in arguments/actions as well.
+
+.. _openstack_example:
+
+Inventory script example: OpenStack
+===================================
+
+If you use an OpenStack-based cloud, instead of manually maintaining your own inventory file, you can use the ``openstack_inventory.py`` dynamic inventory to pull information about your compute instances directly from OpenStack.
+
+You can download the latest version of the OpenStack inventory script `here <https://raw.githubusercontent.com/openstack/ansible-collections-openstack/master/scripts/inventory/openstack_inventory.py>`_.
+
+You can use the inventory script explicitly (by passing the `-i openstack_inventory.py` argument to Ansible) or implicitly (by placing the script at `/etc/ansible/hosts`).
+
+Explicit use of OpenStack inventory script
+------------------------------------------
+
+Download the latest version of the OpenStack dynamic inventory script and make it executable::
+
+ wget https://raw.githubusercontent.com/openstack/ansible-collections-openstack/master/scripts/inventory/openstack_inventory.py
+ chmod +x openstack_inventory.py
+
+.. note::
+ Do not name it `openstack.py`. This name will conflict with imports from openstacksdk.
+
+Source an OpenStack RC file:
+
+.. code-block:: bash
+
+ source openstack.rc
+
+.. note::
+
+ An OpenStack RC file contains the environment variables required by the client tools to establish a connection with the cloud provider, such as the authentication URL, user name, password and region name. For more information on how to download, create or source an OpenStack RC file, please refer to `Set environment variables using the OpenStack RC file <https://docs.openstack.org/user-guide/common/cli_set_environment_variables_using_openstack_rc.html>`_.
+
+You can confirm the file has been successfully sourced by running a simple command, such as `nova list` and ensuring it returns no errors.
+
+.. note::
+
+ The OpenStack command line clients are required to run the `nova list` command. For more information on how to install them, please refer to `Install the OpenStack command-line clients <https://docs.openstack.org/user-guide/common/cli_install_openstack_command_line_clients.html>`_.
+
+You can test the OpenStack dynamic inventory script manually to confirm it is working as expected::
+
+ ./openstack_inventory.py --list
+
+After a few moments you should see some JSON output with information about your compute instances.
+
+Once you confirm the dynamic inventory script is working as expected, you can tell Ansible to use the `openstack_inventory.py` script as an inventory file, as illustrated below:
+
+.. code-block:: bash
+
+ ansible -i openstack_inventory.py all -m ansible.builtin.ping
+
+Implicit use of OpenStack inventory script
+------------------------------------------
+
+Download the latest version of the OpenStack dynamic inventory script, make it executable and copy it to `/etc/ansible/hosts`:
+
+.. code-block:: bash
+
+ wget https://raw.githubusercontent.com/openstack/ansible-collections-openstack/master/scripts/inventory/openstack_inventory.py
+ chmod +x openstack_inventory.py
+ sudo cp openstack_inventory.py /etc/ansible/hosts
+
+Download the sample configuration file, modify it to suit your needs and copy it to `/etc/ansible/openstack.yml`:
+
+.. code-block:: bash
+
+ wget https://raw.githubusercontent.com/openstack/ansible-collections-openstack/master/scripts/inventory/openstack.yml
+ vi openstack.yml
+ sudo cp openstack.yml /etc/ansible/
+
+You can test the OpenStack dynamic inventory script manually to confirm it is working as expected:
+
+.. code-block:: bash
+
+ /etc/ansible/hosts --list
+
+After a few moments you should see some JSON output with information about your compute instances.
+
+Refreshing the cache
+--------------------
+
+Note that the OpenStack dynamic inventory script will cache results to avoid repeated API calls. To explicitly clear the cache, you can run the openstack_inventory.py (or hosts) script with the ``--refresh`` parameter:
+
+.. code-block:: bash
+
+ ./openstack_inventory.py --refresh --list
+
+.. _other_inventory_scripts:
+
+Other inventory scripts
+=======================
+
+In Ansible 2.10 and later, inventory scripts moved to their associated collections. Many are now in the `community.general scripts/inventory directory <https://github.com/ansible-collections/community.general/tree/main/scripts/inventory>`_. We recommend you use :ref:`inventory_plugins` instead.
+
+.. _using_multiple_sources:
+
+Using inventory directories and multiple inventory sources
+==========================================================
+
+If the location given to ``-i`` in Ansible is a directory (or as so configured in ``ansible.cfg``), Ansible can use multiple inventory sources
+at the same time. When doing so, it is possible to mix both dynamic and statically managed inventory sources in the same ansible run. Instant
+hybrid cloud!
+
+In an inventory directory, executable files are treated as dynamic inventory sources and most other files as static sources. Files which end with any of the following are ignored:
+
+.. code-block:: text
+
+ ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo
+
+You can replace this list with your own selection by configuring an ``inventory_ignore_extensions`` list in ``ansible.cfg``, or setting the :envvar:`ANSIBLE_INVENTORY_IGNORE` environment variable. The value in either case must be a comma-separated list of patterns, as shown above.
+
+Any ``group_vars`` and ``host_vars`` subdirectories in an inventory directory are interpreted as expected, making inventory directories a powerful way to organize different sets of configurations. See :ref:`using_multiple_inventory_sources` for more information.
+
+.. _static_groups_of_dynamic:
+
+Static groups of dynamic groups
+===============================
+
+When defining groups of groups in the static inventory file, the child groups
+must also be defined in the static inventory file, otherwise ansible returns an
+error. If you want to define a static group of dynamic child groups, define
+the dynamic groups as empty in the static inventory file. For example:
+
+.. code-block:: text
+
+ [tag_Name_staging_foo]
+
+ [tag_Name_staging_bar]
+
+ [staging:children]
+ tag_Name_staging_foo
+ tag_Name_staging_bar
+
+
+.. seealso::
+
+ :ref:`intro_inventory`
+ All about static inventory files
+ `Mailing List <https://groups.google.com/group/ansible-project>`_
+ Questions? Help? Ideas? Stop by the list on Google Groups
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/intro_getting_started.rst b/docs/docsite/rst/user_guide/intro_getting_started.rst
new file mode 100644
index 00000000..0fde0281
--- /dev/null
+++ b/docs/docsite/rst/user_guide/intro_getting_started.rst
@@ -0,0 +1,139 @@
+.. _intro_getting_started:
+
+***************
+Getting Started
+***************
+
+Now that you have read the :ref:`installation guide<installation_guide>` and installed Ansible on a control node, you are ready to learn how Ansible works. A basic Ansible command or playbook:
+
+* selects machines to execute against from inventory
+* connects to those machines (or network devices, or other managed nodes), usually over SSH
+* copies one or more modules to the remote machines and starts execution there
+
+Ansible can do much more, but you should understand the most common use case before exploring all the powerful configuration, deployment, and orchestration features of Ansible. This page illustrates the basic process with a simple inventory and an ad-hoc command. Once you understand how Ansible works, you can read more details about :ref:`ad-hoc commands<intro_adhoc>`, organize your infrastructure with :ref:`inventory<intro_inventory>`, and harness the full power of Ansible with :ref:`playbooks<playbooks_intro>`.
+
+.. contents::
+ :local:
+
+Selecting machines from inventory
+=================================
+
+Ansible reads information about which machines you want to manage from your inventory. Although you can pass an IP address to an ad-hoc command, you need inventory to take advantage of the full flexibility and repeatability of Ansible.
+
+Action: create a basic inventory
+--------------------------------
+For this basic inventory, edit (or create) ``/etc/ansible/hosts`` and add a few remote systems to it. For this example, use either IP addresses or FQDNs:
+
+.. code-block:: text
+
+ 192.0.2.50
+ aserver.example.org
+ bserver.example.org
+
+Beyond the basics
+-----------------
+Your inventory can store much more than IPs and FQDNs. You can create :ref:`aliases<inventory_aliases>`, set variable values for a single host with :ref:`host vars<host_variables>`, or set variable values for multiple hosts with :ref:`group vars<group_variables>`.
+
+.. _remote_connection_information:
+
+Connecting to remote nodes
+==========================
+
+Ansible communicates with remote machines over the `SSH protocol <https://www.ssh.com/ssh/protocol/>`_. By default, Ansible uses native OpenSSH and connects to remote machines using your current user name, just as SSH does.
+
+Action: check your SSH connections
+----------------------------------
+Confirm that you can connect using SSH to all the nodes in your inventory using the same username. If necessary, add your public SSH key to the ``authorized_keys`` file on those systems.
+
+Beyond the basics
+-----------------
+You can override the default remote user name in several ways, including:
+
+* passing the ``-u`` parameter at the command line
+* setting user information in your inventory file
+* setting user information in your configuration file
+* setting environment variables
+
+See :ref:`general_precedence_rules` for details on the (sometimes unintuitive) precedence of each method of passing user information. You can read more about connections in :ref:`connections`.
+
+Copying and executing modules
+=============================
+
+Once it has connected, Ansible transfers the modules required by your command or playbook to the remote machine(s) for execution.
+
+Action: run your first Ansible commands
+---------------------------------------
+Use the ping module to ping all the nodes in your inventory:
+
+.. code-block:: bash
+
+ $ ansible all -m ping
+
+Now run a live command on all of your nodes:
+
+.. code-block:: bash
+
+ $ ansible all -a "/bin/echo hello"
+
+You should see output for each host in your inventory, similar to this:
+
+.. code-block:: ansible-output
+
+ aserver.example.org | SUCCESS => {
+ "ansible_facts": {
+ "discovered_interpreter_python": "/usr/bin/python"
+ },
+ "changed": false,
+ "ping": "pong"
+ }
+
+Beyond the basics
+-----------------
+By default Ansible uses SFTP to transfer files. If the machine or device you want to manage does not support SFTP, you can switch to SCP mode in :ref:`intro_configuration`. The files are placed in a temporary directory and executed from there.
+
+If you need privilege escalation (sudo and similar) to run a command, pass the ``become`` flags:
+
+.. code-block:: bash
+
+ # as bruce
+ $ ansible all -m ping -u bruce
+ # as bruce, sudoing to root (sudo is default method)
+ $ ansible all -m ping -u bruce --become
+ # as bruce, sudoing to batman
+ $ ansible all -m ping -u bruce --become --become-user batman
+
+You can read more about privilege escalation in :ref:`become`.
+
+Congratulations! You have contacted your nodes using Ansible. You used a basic inventory file and an ad-hoc command to direct Ansible to connect to specific remote nodes, copy a module file there and execute it, and return output. You have a fully working infrastructure.
+
+Resources
+=================================
+- `Product Demos <https://github.com/ansible/product-demos>`_
+- `Katakoda <https://katacoda.com/rhel-labs>`_
+- `Workshops <https://github.com/ansible/workshops>`_
+- `Ansible Examples <https://github.com/ansible/ansible-examples>`_
+- `Ansible Baseline <https://github.com/ansible/ansible-baseline>`_
+
+Next steps
+==========
+Next you can read about more real-world cases in :ref:`intro_adhoc`,
+explore what you can do with different modules, or read about the Ansible
+:ref:`working_with_playbooks` language. Ansible is not just about running commands, it
+also has powerful configuration management and deployment features.
+
+.. seealso::
+
+ :ref:`intro_inventory`
+ More information about inventory
+ :ref:`intro_adhoc`
+ Examples of basic commands
+ :ref:`working_with_playbooks`
+ Learning Ansible's configuration management language
+ `Ansible Demos <https://github.com/ansible/product-demos>`_
+ Demonstrations of different Ansible usecases
+ `RHEL Labs <https://katacoda.com/rhel-labs>`_
+ Labs to provide further knowledge on different topics
+ `Mailing List <https://groups.google.com/group/ansible-project>`_
+ Questions? Help? Ideas? Stop by the list on Google Groups
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/intro_inventory.rst b/docs/docsite/rst/user_guide/intro_inventory.rst
new file mode 100644
index 00000000..0b8b002c
--- /dev/null
+++ b/docs/docsite/rst/user_guide/intro_inventory.rst
@@ -0,0 +1,788 @@
+.. _intro_inventory:
+.. _inventory:
+
+***************************
+How to build your inventory
+***************************
+
+Ansible works against multiple managed nodes or "hosts" in your infrastructure at the same time, using a list or group of lists known as inventory. Once your inventory is defined, you use :ref:`patterns <intro_patterns>` to select the hosts or groups you want Ansible to run against.
+
+The default location for inventory is a file called ``/etc/ansible/hosts``. You can specify a different inventory file at the command line using the ``-i <path>`` option. You can also use multiple inventory files at the same time, and/or pull inventory from dynamic or cloud sources or different formats (YAML, ini, and so on), as described in :ref:`intro_dynamic_inventory`.
+Introduced in version 2.4, Ansible has :ref:`inventory_plugins` to make this flexible and customizable.
+
+.. contents::
+ :local:
+
+.. _inventoryformat:
+
+Inventory basics: formats, hosts, and groups
+============================================
+
+The inventory file can be in one of many formats, depending on the inventory plugins you have.
+The most common formats are INI and YAML. A basic INI ``/etc/ansible/hosts`` might look like this:
+
+.. code-block:: text
+
+ mail.example.com
+
+ [webservers]
+ foo.example.com
+ bar.example.com
+
+ [dbservers]
+ one.example.com
+ two.example.com
+ three.example.com
+
+The headings in brackets are group names, which are used in classifying hosts
+and deciding what hosts you are controlling at what times and for what purpose.
+Group names should follow the same guidelines as :ref:`valid_variable_names`.
+
+Here's that same basic inventory file in YAML format:
+
+.. code-block:: yaml
+
+ all:
+ hosts:
+ mail.example.com:
+ children:
+ webservers:
+ hosts:
+ foo.example.com:
+ bar.example.com:
+ dbservers:
+ hosts:
+ one.example.com:
+ two.example.com:
+ three.example.com:
+
+.. _default_groups:
+
+Default groups
+--------------
+
+There are two default groups: ``all`` and ``ungrouped``. The ``all`` group contains every host.
+The ``ungrouped`` group contains all hosts that don't have another group aside from ``all``.
+Every host will always belong to at least 2 groups (``all`` and ``ungrouped`` or ``all`` and some other group). Though ``all`` and ``ungrouped`` are always present, they can be implicit and not appear in group listings like ``group_names``.
+
+.. _host_multiple_groups:
+
+Hosts in multiple groups
+------------------------
+
+You can (and probably will) put each host in more than one group. For example a production webserver in a datacenter in Atlanta might be included in groups called [prod] and [atlanta] and [webservers]. You can create groups that track:
+
+* What - An application, stack or microservice (for example, database servers, web servers, and so on).
+* Where - A datacenter or region, to talk to local DNS, storage, and so on (for example, east, west).
+* When - The development stage, to avoid testing on production resources (for example, prod, test).
+
+Extending the previous YAML inventory to include what, when, and where would look like:
+
+.. code-block:: yaml
+
+ all:
+ hosts:
+ mail.example.com:
+ children:
+ webservers:
+ hosts:
+ foo.example.com:
+ bar.example.com:
+ dbservers:
+ hosts:
+ one.example.com:
+ two.example.com:
+ three.example.com:
+ east:
+ hosts:
+ foo.example.com:
+ one.example.com:
+ two.example.com:
+ west:
+ hosts:
+ bar.example.com:
+ three.example.com:
+ prod:
+ hosts:
+ foo.example.com:
+ one.example.com:
+ two.example.com:
+ test:
+ hosts:
+ bar.example.com:
+ three.example.com:
+
+You can see that ``one.example.com`` exists in the ``dbservers``, ``east``, and ``prod`` groups.
+
+You can also use nested groups to simplify ``prod`` and ``test`` in this inventory, for the same result:
+
+.. code-block:: yaml
+
+ all:
+ hosts:
+ mail.example.com:
+ children:
+ webservers:
+ hosts:
+ foo.example.com:
+ bar.example.com:
+ dbservers:
+ hosts:
+ one.example.com:
+ two.example.com:
+ three.example.com:
+ east:
+ hosts:
+ foo.example.com:
+ one.example.com:
+ two.example.com:
+ west:
+ hosts:
+ bar.example.com:
+ three.example.com:
+ prod:
+ children:
+ east:
+ test:
+ children:
+ west:
+
+You can find more examples on how to organize your inventories and group your hosts in :ref:`inventory_setup_examples`.
+
+Adding ranges of hosts
+----------------------
+
+If you have a lot of hosts with a similar pattern, you can add them as a range rather than listing each hostname separately:
+
+In INI:
+
+.. code-block:: text
+
+ [webservers]
+ www[01:50].example.com
+
+In YAML:
+
+.. code-block:: yaml
+
+ ...
+ webservers:
+ hosts:
+ www[01:50].example.com:
+
+You can specify a stride (increments between sequence numbers) when defining a numeric range of hosts:
+
+In INI:
+
+.. code-block:: text
+
+ [webservers]
+ www[01:50:2].example.com
+
+In YAML:
+
+.. code-block:: yaml
+
+ ...
+ webservers:
+ hosts:
+ www[01:50:2].example.com:
+
+For numeric patterns, leading zeros can be included or removed, as desired. Ranges are inclusive. You can also define alphabetic ranges:
+
+.. code-block:: text
+
+ [databases]
+ db-[a:f].example.com
+
+.. _variables_in_inventory:
+
+Adding variables to inventory
+=============================
+
+You can store variable values that relate to a specific host or group in inventory. To start with, you may add variables directly to the hosts and groups in your main inventory file. As you add more and more managed nodes to your Ansible inventory, however, you will likely want to store variables in separate host and group variable files. See :ref:`define_variables_in_inventory` for details.
+
+.. _host_variables:
+
+Assigning a variable to one machine: host variables
+===================================================
+
+You can easily assign a variable to a single host, then use it later in playbooks. In INI:
+
+.. code-block:: text
+
+ [atlanta]
+ host1 http_port=80 maxRequestsPerChild=808
+ host2 http_port=303 maxRequestsPerChild=909
+
+In YAML:
+
+.. code-block:: yaml
+
+ atlanta:
+ host1:
+ http_port: 80
+ maxRequestsPerChild: 808
+ host2:
+ http_port: 303
+ maxRequestsPerChild: 909
+
+Unique values like non-standard SSH ports work well as host variables. You can add them to your Ansible inventory by adding the port number after the hostname with a colon:
+
+.. code-block:: text
+
+ badwolf.example.com:5309
+
+Connection variables also work well as host variables:
+
+.. code-block:: text
+
+ [targets]
+
+ localhost ansible_connection=local
+ other1.example.com ansible_connection=ssh ansible_user=myuser
+ other2.example.com ansible_connection=ssh ansible_user=myotheruser
+
+.. note:: If you list non-standard SSH ports in your SSH config file, the ``openssh`` connection will find and use them, but the ``paramiko`` connection will not.
+
+.. _inventory_aliases:
+
+Inventory aliases
+-----------------
+
+You can also define aliases in your inventory:
+
+In INI:
+
+.. code-block:: text
+
+ jumper ansible_port=5555 ansible_host=192.0.2.50
+
+In YAML:
+
+.. code-block:: yaml
+
+ ...
+ hosts:
+ jumper:
+ ansible_port: 5555
+ ansible_host: 192.0.2.50
+
+In the above example, running Ansible against the host alias "jumper" will connect to 192.0.2.50 on port 5555. See :ref:`behavioral inventory parameters <behavioral_parameters>` to further customize the connection to hosts.
+
+.. note::
+ Values passed in the INI format using the ``key=value`` syntax are interpreted differently depending on where they are declared:
+
+ * When declared inline with the host, INI values are interpreted as Python literal structures (strings, numbers, tuples, lists, dicts, booleans, None). Host lines accept multiple ``key=value`` parameters per line. Therefore they need a way to indicate that a space is part of a value rather than a separator.
+
+ * When declared in a ``:vars`` section, INI values are interpreted as strings. For example ``var=FALSE`` would create a string equal to 'FALSE'. Unlike host lines, ``:vars`` sections accept only a single entry per line, so everything after the ``=`` must be the value for the entry.
+
+ * If a variable value set in an INI inventory must be a certain type (for example, a string or a boolean value), always specify the type with a filter in your task. Do not rely on types set in INI inventories when consuming variables.
+
+ * Consider using YAML format for inventory sources to avoid confusion on the actual type of a variable. The YAML inventory plugin processes variable values consistently and correctly.
+
+Generally speaking, this is not the best way to define variables that describe your system policy. Setting variables in the main inventory file is only a shorthand. See :ref:`splitting_out_vars` for guidelines on storing variable values in individual files in the 'host_vars' directory.
+
+.. _group_variables:
+
+Assigning a variable to many machines: group variables
+======================================================
+
+If all hosts in a group share a variable value, you can apply that variable to an entire group at once. In INI:
+
+.. code-block:: text
+
+ [atlanta]
+ host1
+ host2
+
+ [atlanta:vars]
+ ntp_server=ntp.atlanta.example.com
+ proxy=proxy.atlanta.example.com
+
+In YAML:
+
+.. code-block:: yaml
+
+ atlanta:
+ hosts:
+ host1:
+ host2:
+ vars:
+ ntp_server: ntp.atlanta.example.com
+ proxy: proxy.atlanta.example.com
+
+Group variables are a convenient way to apply variables to multiple hosts at once. Before executing, however, Ansible always flattens variables, including inventory variables, to the host level. If a host is a member of multiple groups, Ansible reads variable values from all of those groups. If you assign different values to the same variable in different groups, Ansible chooses which value to use based on internal :ref:`rules for merging <how_we_merge>`.
+
+.. _subgroups:
+
+Inheriting variable values: group variables for groups of groups
+----------------------------------------------------------------
+
+You can make groups of groups using the ``:children`` suffix in INI or the ``children:`` entry in YAML.
+You can apply variables to these groups of groups using ``:vars`` or ``vars:``:
+
+In INI:
+
+.. code-block:: text
+
+ [atlanta]
+ host1
+ host2
+
+ [raleigh]
+ host2
+ host3
+
+ [southeast:children]
+ atlanta
+ raleigh
+
+ [southeast:vars]
+ some_server=foo.southeast.example.com
+ halon_system_timeout=30
+ self_destruct_countdown=60
+ escape_pods=2
+
+ [usa:children]
+ southeast
+ northeast
+ southwest
+ northwest
+
+In YAML:
+
+.. code-block:: yaml
+
+ all:
+ children:
+ usa:
+ children:
+ southeast:
+ children:
+ atlanta:
+ hosts:
+ host1:
+ host2:
+ raleigh:
+ hosts:
+ host2:
+ host3:
+ vars:
+ some_server: foo.southeast.example.com
+ halon_system_timeout: 30
+ self_destruct_countdown: 60
+ escape_pods: 2
+ northeast:
+ northwest:
+ southwest:
+
+If you need to store lists or hash data, or prefer to keep host and group specific variables separate from the inventory file, see :ref:`splitting_out_vars`.
+
+Child groups have a couple of properties to note:
+
+ - Any host that is member of a child group is automatically a member of the parent group.
+ - A child group's variables will have higher precedence (override) a parent group's variables.
+ - Groups can have multiple parents and children, but not circular relationships.
+ - Hosts can also be in multiple groups, but there will only be **one** instance of a host, merging the data from the multiple groups.
+
+.. _splitting_out_vars:
+
+Organizing host and group variables
+===================================
+
+Although you can store variables in the main inventory file, storing separate host and group variables files may help you organize your variable values more easily. Host and group variable files must use YAML syntax. Valid file extensions include '.yml', '.yaml', '.json', or no file extension.
+See :ref:`yaml_syntax` if you are new to YAML.
+
+Ansible loads host and group variable files by searching paths relative to the inventory file or the playbook file. If your inventory file at ``/etc/ansible/hosts`` contains a host named 'foosball' that belongs to two groups, 'raleigh' and 'webservers', that host will use variables in YAML files at the following locations:
+
+.. code-block:: bash
+
+ /etc/ansible/group_vars/raleigh # can optionally end in '.yml', '.yaml', or '.json'
+ /etc/ansible/group_vars/webservers
+ /etc/ansible/host_vars/foosball
+
+For example, if you group hosts in your inventory by datacenter, and each datacenter uses its own NTP server and database server, you can create a file called ``/etc/ansible/group_vars/raleigh`` to store the variables for the ``raleigh`` group:
+
+.. code-block:: yaml
+
+ ---
+ ntp_server: acme.example.org
+ database_server: storage.example.org
+
+You can also create *directories* named after your groups or hosts. Ansible will read all the files in these directories in lexicographical order. An example with the 'raleigh' group:
+
+.. code-block:: bash
+
+ /etc/ansible/group_vars/raleigh/db_settings
+ /etc/ansible/group_vars/raleigh/cluster_settings
+
+All hosts in the 'raleigh' group will have the variables defined in these files
+available to them. This can be very useful to keep your variables organized when a single
+file gets too big, or when you want to use :ref:`Ansible Vault<playbooks_vault>` on some group variables.
+
+You can also add ``group_vars/`` and ``host_vars/`` directories to your playbook directory. The ``ansible-playbook`` command looks for these directories in the current working directory by default. Other Ansible commands (for example, ``ansible``, ``ansible-console``, and so on) will only look for ``group_vars/`` and ``host_vars/`` in the inventory directory. If you want other commands to load group and host variables from a playbook directory, you must provide the ``--playbook-dir`` option on the command line.
+If you load inventory files from both the playbook directory and the inventory directory, variables in the playbook directory will override variables set in the inventory directory.
+
+Keeping your inventory file and variables in a git repo (or other version control)
+is an excellent way to track changes to your inventory and host variables.
+
+.. _how_we_merge:
+
+How variables are merged
+========================
+
+By default variables are merged/flattened to the specific host before a play is run. This keeps Ansible focused on the Host and Task, so groups don't really survive outside of inventory and host matching. By default, Ansible overwrites variables including the ones defined for a group and/or host (see :ref:`DEFAULT_HASH_BEHAVIOUR<DEFAULT_HASH_BEHAVIOUR>`). The order/precedence is (from lowest to highest):
+
+- all group (because it is the 'parent' of all other groups)
+- parent group
+- child group
+- host
+
+By default Ansible merges groups at the same parent/child level in ASCII order, and the last group loaded overwrites the previous groups. For example, an a_group will be merged with b_group and b_group vars that match will overwrite the ones in a_group.
+
+You can change this behavior by setting the group variable ``ansible_group_priority`` to change the merge order for groups of the same level (after the parent/child order is resolved). The larger the number, the later it will be merged, giving it higher priority. This variable defaults to ``1`` if not set. For example:
+
+.. code-block:: yaml
+
+ a_group:
+ testvar: a
+ ansible_group_priority: 10
+ b_group:
+ testvar: b
+
+In this example, if both groups have the same priority, the result would normally have been ``testvar == b``, but since we are giving the ``a_group`` a higher priority the result will be ``testvar == a``.
+
+.. note:: ``ansible_group_priority`` can only be set in the inventory source and not in group_vars/, as the variable is used in the loading of group_vars.
+
+.. _using_multiple_inventory_sources:
+
+Using multiple inventory sources
+================================
+
+You can target multiple inventory sources (directories, dynamic inventory scripts
+or files supported by inventory plugins) at the same time by giving multiple inventory parameters from the command
+line or by configuring :envvar:`ANSIBLE_INVENTORY`. This can be useful when you want to target normally
+separate environments, like staging and production, at the same time for a specific action.
+
+Target two sources from the command line like this:
+
+.. code-block:: bash
+
+ ansible-playbook get_logs.yml -i staging -i production
+
+Keep in mind that if there are variable conflicts in the inventories, they are resolved according
+to the rules described in :ref:`how_we_merge` and :ref:`ansible_variable_precedence`.
+The merging order is controlled by the order of the inventory source parameters.
+If ``[all:vars]`` in staging inventory defines ``myvar = 1``, but production inventory defines ``myvar = 2``,
+the playbook will be run with ``myvar = 2``. The result would be reversed if the playbook was run with
+``-i production -i staging``.
+
+**Aggregating inventory sources with a directory**
+
+You can also create an inventory by combining multiple inventory sources and source types under a directory.
+This can be useful for combining static and dynamic hosts and managing them as one inventory.
+The following inventory combines an inventory plugin source, a dynamic inventory script,
+and a file with static hosts:
+
+.. code-block:: text
+
+ inventory/
+ openstack.yml # configure inventory plugin to get hosts from Openstack cloud
+ dynamic-inventory.py # add additional hosts with dynamic inventory script
+ static-inventory # add static hosts and groups
+ group_vars/
+ all.yml # assign variables to all hosts
+
+You can target this inventory directory simply like this:
+
+.. code-block:: bash
+
+ ansible-playbook example.yml -i inventory
+
+It can be useful to control the merging order of the inventory sources if there's variable
+conflicts or group of groups dependencies to the other inventory sources. The inventories
+are merged in ASCII order according to the filenames so the result can
+be controlled by adding prefixes to the files:
+
+.. code-block:: text
+
+ inventory/
+ 01-openstack.yml # configure inventory plugin to get hosts from Openstack cloud
+ 02-dynamic-inventory.py # add additional hosts with dynamic inventory script
+ 03-static-inventory # add static hosts
+ group_vars/
+ all.yml # assign variables to all hosts
+
+If ``01-openstack.yml`` defines ``myvar = 1`` for the group ``all``, ``02-dynamic-inventory.py`` defines ``myvar = 2``,
+and ``03-static-inventory`` defines ``myvar = 3``, the playbook will be run with ``myvar = 3``.
+
+For more details on inventory plugins and dynamic inventory scripts see :ref:`inventory_plugins` and :ref:`intro_dynamic_inventory`.
+
+.. _behavioral_parameters:
+
+Connecting to hosts: behavioral inventory parameters
+====================================================
+
+As described above, setting the following variables control how Ansible interacts with remote hosts.
+
+Host connection:
+
+.. include:: shared_snippets/SSH_password_prompt.txt
+
+ansible_connection
+ Connection type to the host. This can be the name of any of ansible's connection plugins. SSH protocol types are ``smart``, ``ssh`` or ``paramiko``. The default is smart. Non-SSH based types are described in the next section.
+
+General for all connections:
+
+ansible_host
+ The name of the host to connect to, if different from the alias you wish to give to it.
+ansible_port
+ The connection port number, if not the default (22 for ssh)
+ansible_user
+ The user name to use when connecting to the host
+ansible_password
+ The password to use to authenticate to the host (never store this variable in plain text; always use a vault. See :ref:`tip_for_variables_and_vaults`)
+
+
+Specific to the SSH connection:
+
+ansible_ssh_private_key_file
+ Private key file used by ssh. Useful if using multiple keys and you don't want to use SSH agent.
+ansible_ssh_common_args
+ This setting is always appended to the default command line for :command:`sftp`, :command:`scp`,
+ and :command:`ssh`. Useful to configure a ``ProxyCommand`` for a certain host (or
+ group).
+ansible_sftp_extra_args
+ This setting is always appended to the default :command:`sftp` command line.
+ansible_scp_extra_args
+ This setting is always appended to the default :command:`scp` command line.
+ansible_ssh_extra_args
+ This setting is always appended to the default :command:`ssh` command line.
+ansible_ssh_pipelining
+ Determines whether or not to use SSH pipelining. This can override the ``pipelining`` setting in :file:`ansible.cfg`.
+ansible_ssh_executable (added in version 2.2)
+ This setting overrides the default behavior to use the system :command:`ssh`. This can override the ``ssh_executable`` setting in :file:`ansible.cfg`.
+
+
+Privilege escalation (see :ref:`Ansible Privilege Escalation<become>` for further details):
+
+ansible_become
+ Equivalent to ``ansible_sudo`` or ``ansible_su``, allows to force privilege escalation
+ansible_become_method
+ Allows to set privilege escalation method
+ansible_become_user
+ Equivalent to ``ansible_sudo_user`` or ``ansible_su_user``, allows to set the user you become through privilege escalation
+ansible_become_password
+ Equivalent to ``ansible_sudo_password`` or ``ansible_su_password``, allows you to set the privilege escalation password (never store this variable in plain text; always use a vault. See :ref:`tip_for_variables_and_vaults`)
+ansible_become_exe
+ Equivalent to ``ansible_sudo_exe`` or ``ansible_su_exe``, allows you to set the executable for the escalation method selected
+ansible_become_flags
+ Equivalent to ``ansible_sudo_flags`` or ``ansible_su_flags``, allows you to set the flags passed to the selected escalation method. This can be also set globally in :file:`ansible.cfg` in the ``sudo_flags`` option
+
+Remote host environment parameters:
+
+.. _ansible_shell_type:
+
+ansible_shell_type
+ The shell type of the target system. You should not use this setting unless you have set the
+ :ref:`ansible_shell_executable<ansible_shell_executable>` to a non-Bourne (sh) compatible shell. By default commands are
+ formatted using ``sh``-style syntax. Setting this to ``csh`` or ``fish`` will cause commands
+ executed on target systems to follow those shell's syntax instead.
+
+.. _ansible_python_interpreter:
+
+ansible_python_interpreter
+ The target host python path. This is useful for systems with more
+ than one Python or not located at :command:`/usr/bin/python` such as \*BSD, or where :command:`/usr/bin/python`
+ is not a 2.X series Python. We do not use the :command:`/usr/bin/env` mechanism as that requires the remote user's
+ path to be set right and also assumes the :program:`python` executable is named python, where the executable might
+ be named something like :program:`python2.6`.
+
+ansible_*_interpreter
+ Works for anything such as ruby or perl and works just like :ref:`ansible_python_interpreter<ansible_python_interpreter>`.
+ This replaces shebang of modules which will run on that host.
+
+.. versionadded:: 2.1
+
+.. _ansible_shell_executable:
+
+ansible_shell_executable
+ This sets the shell the ansible controller will use on the target machine,
+ overrides ``executable`` in :file:`ansible.cfg` which defaults to
+ :command:`/bin/sh`. You should really only change it if is not possible
+ to use :command:`/bin/sh` (in other words, if :command:`/bin/sh` is not installed on the target
+ machine or cannot be run from sudo.).
+
+Examples from an Ansible-INI host file:
+
+.. code-block:: text
+
+ some_host ansible_port=2222 ansible_user=manager
+ aws_host ansible_ssh_private_key_file=/home/example/.ssh/aws.pem
+ freebsd_host ansible_python_interpreter=/usr/local/bin/python
+ ruby_module_host ansible_ruby_interpreter=/usr/bin/ruby.1.9.3
+
+Non-SSH connection types
+------------------------
+
+As stated in the previous section, Ansible executes playbooks over SSH but it is not limited to this connection type.
+With the host specific parameter ``ansible_connection=<connector>``, the connection type can be changed.
+The following non-SSH based connectors are available:
+
+**local**
+
+This connector can be used to deploy the playbook to the control machine itself.
+
+**docker**
+
+This connector deploys the playbook directly into Docker containers using the local Docker client. The following parameters are processed by this connector:
+
+ansible_host
+ The name of the Docker container to connect to.
+ansible_user
+ The user name to operate within the container. The user must exist inside the container.
+ansible_become
+ If set to ``true`` the ``become_user`` will be used to operate within the container.
+ansible_docker_extra_args
+ Could be a string with any additional arguments understood by Docker, which are not command specific. This parameter is mainly used to configure a remote Docker daemon to use.
+
+Here is an example of how to instantly deploy to created containers:
+
+.. code-block:: yaml
+
+ - name: Create a jenkins container
+ community.general.docker_container:
+ docker_host: myserver.net:4243
+ name: my_jenkins
+ image: jenkins
+
+ - name: Add the container to inventory
+ ansible.builtin.add_host:
+ name: my_jenkins
+ ansible_connection: docker
+ ansible_docker_extra_args: "--tlsverify --tlscacert=/path/to/ca.pem --tlscert=/path/to/client-cert.pem --tlskey=/path/to/client-key.pem -H=tcp://myserver.net:4243"
+ ansible_user: jenkins
+ changed_when: false
+
+ - name: Create a directory for ssh keys
+ delegate_to: my_jenkins
+ ansible.builtin.file:
+ path: "/var/jenkins_home/.ssh/jupiter"
+ state: directory
+
+For a full list with available plugins and examples, see :ref:`connection_plugin_list`.
+
+.. note:: If you're reading the docs from the beginning, this may be the first example you've seen of an Ansible playbook. This is not an inventory file.
+ Playbooks will be covered in great detail later in the docs.
+
+.. _inventory_setup_examples:
+
+Inventory setup examples
+========================
+
+See also :ref:`sample_setup`, which shows inventory along with playbooks and other Ansible artifacts.
+
+.. _inventory_setup-per_environment:
+
+Example: One inventory per environment
+--------------------------------------
+
+If you need to manage multiple environments it's sometimes prudent to
+have only hosts of a single environment defined per inventory. This
+way, it is harder to, for instance, accidentally change the state of
+nodes inside the "test" environment when you actually wanted to update
+some "staging" servers.
+
+For the example mentioned above you could have an
+:file:`inventory_test` file:
+
+.. code-block:: ini
+
+ [dbservers]
+ db01.test.example.com
+ db02.test.example.com
+
+ [appservers]
+ app01.test.example.com
+ app02.test.example.com
+ app03.test.example.com
+
+That file only includes hosts that are part of the "test"
+environment. Define the "staging" machines in another file
+called :file:`inventory_staging`:
+
+.. code-block:: ini
+
+ [dbservers]
+ db01.staging.example.com
+ db02.staging.example.com
+
+ [appservers]
+ app01.staging.example.com
+ app02.staging.example.com
+ app03.staging.example.com
+
+To apply a playbook called :file:`site.yml`
+to all the app servers in the test environment, use the
+following command::
+
+ ansible-playbook -i inventory_test site.yml -l appservers
+
+.. _inventory_setup-per_function:
+
+Example: Group by function
+--------------------------
+
+In the previous section you already saw an example for using groups in
+order to cluster hosts that have the same function. This allows you,
+for instance, to define firewall rules inside a playbook or role
+without affecting database servers:
+
+.. code-block:: yaml
+
+ - hosts: dbservers
+ tasks:
+ - name: Allow access from 10.0.0.1
+ ansible.builtin.iptables:
+ chain: INPUT
+ jump: ACCEPT
+ source: 10.0.0.1
+
+.. _inventory_setup-per_location:
+
+Example: Group by location
+--------------------------
+
+Other tasks might be focused on where a certain host is located. Let's
+say that ``db01.test.example.com`` and ``app01.test.example.com`` are
+located in DC1 while ``db02.test.example.com`` is in DC2:
+
+.. code-block:: ini
+
+ [dc1]
+ db01.test.example.com
+ app01.test.example.com
+
+ [dc2]
+ db02.test.example.com
+
+In practice, you might even end up mixing all these setups as you
+might need to, on one day, update all nodes in a specific data center
+while, on another day, update all the application servers no matter
+their location.
+
+.. seealso::
+
+ :ref:`inventory_plugins`
+ Pulling inventory from dynamic or static sources
+ :ref:`intro_dynamic_inventory`
+ Pulling inventory from dynamic sources, such as cloud providers
+ :ref:`intro_adhoc`
+ Examples of basic commands
+ :ref:`working_with_playbooks`
+ Learning Ansible's configuration, deployment, and orchestration language.
+ `Mailing List <https://groups.google.com/group/ansible-project>`_
+ Questions? Help? Ideas? Stop by the list on Google Groups
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/intro_patterns.rst b/docs/docsite/rst/user_guide/intro_patterns.rst
new file mode 100644
index 00000000..edc25ad6
--- /dev/null
+++ b/docs/docsite/rst/user_guide/intro_patterns.rst
@@ -0,0 +1,171 @@
+.. _intro_patterns:
+
+Patterns: targeting hosts and groups
+====================================
+
+When you execute Ansible through an ad-hoc command or by running a playbook, you must choose which managed nodes or groups you want to execute against. Patterns let you run commands and playbooks against specific hosts and/or groups in your inventory. An Ansible pattern can refer to a single host, an IP address, an inventory group, a set of groups, or all hosts in your inventory. Patterns are highly flexible - you can exclude or require subsets of hosts, use wildcards or regular expressions, and more. Ansible executes on all inventory hosts included in the pattern.
+
+.. contents::
+ :local:
+
+Using patterns
+--------------
+
+You use a pattern almost any time you execute an ad-hoc command or a playbook. The pattern is the only element of an :ref:`ad-hoc command<intro_adhoc>` that has no flag. It is usually the second element::
+
+ ansible <pattern> -m <module_name> -a "<module options>"
+
+For example::
+
+ ansible webservers -m service -a "name=httpd state=restarted"
+
+In a playbook the pattern is the content of the ``hosts:`` line for each play:
+
+.. code-block:: yaml
+
+ - name: <play_name>
+ hosts: <pattern>
+
+For example::
+
+ - name: restart webservers
+ hosts: webservers
+
+Since you often want to run a command or playbook against multiple hosts at once, patterns often refer to inventory groups. Both the ad-hoc command and the playbook above will execute against all machines in the ``webservers`` group.
+
+.. _common_patterns:
+
+Common patterns
+---------------
+
+This table lists common patterns for targeting inventory hosts and groups.
+
+.. table::
+ :class: documentation-table
+
+ ====================== ================================ ===================================================
+ Description Pattern(s) Targets
+ ====================== ================================ ===================================================
+ All hosts all (or \*)
+
+ One host host1
+
+ Multiple hosts host1:host2 (or host1,host2)
+
+ One group webservers
+
+ Multiple groups webservers:dbservers all hosts in webservers plus all hosts in dbservers
+
+ Excluding groups webservers:!atlanta all hosts in webservers except those in atlanta
+
+ Intersection of groups webservers:&staging any hosts in webservers that are also in staging
+ ====================== ================================ ===================================================
+
+.. note:: You can use either a comma (``,``) or a colon (``:``) to separate a list of hosts. The comma is preferred when dealing with ranges and IPv6 addresses.
+
+Once you know the basic patterns, you can combine them. This example::
+
+ webservers:dbservers:&staging:!phoenix
+
+targets all machines in the groups 'webservers' and 'dbservers' that are also in
+the group 'staging', except any machines in the group 'phoenix'.
+
+You can use wildcard patterns with FQDNs or IP addresses, as long as the hosts are named in your inventory by FQDN or IP address::
+
+ 192.0.\*
+ \*.example.com
+ \*.com
+
+You can mix wildcard patterns and groups at the same time::
+
+ one*.com:dbservers
+
+Limitations of patterns
+-----------------------
+
+Patterns depend on inventory. If a host or group is not listed in your inventory, you cannot use a pattern to target it. If your pattern includes an IP address or hostname that does not appear in your inventory, you will see an error like this:
+
+.. code-block:: text
+
+ [WARNING]: No inventory was parsed, only implicit localhost is available
+ [WARNING]: Could not match supplied host pattern, ignoring: *.not_in_inventory.com
+
+Your pattern must match your inventory syntax. If you define a host as an :ref:`alias<inventory_aliases>`:
+
+.. code-block:: yaml
+
+ atlanta:
+ host1:
+ http_port: 80
+ maxRequestsPerChild: 808
+ host: 127.0.0.2
+
+you must use the alias in your pattern. In the example above, you must use ``host1`` in your pattern. If you use the IP address, you will once again get the error::
+
+ [WARNING]: Could not match supplied host pattern, ignoring: 127.0.0.2
+
+Advanced pattern options
+------------------------
+
+The common patterns described above will meet most of your needs, but Ansible offers several other ways to define the hosts and groups you want to target.
+
+Using variables in patterns
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+You can use variables to enable passing group specifiers via the ``-e`` argument to ansible-playbook::
+
+ webservers:!{{ excluded }}:&{{ required }}
+
+Using group position in patterns
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+You can define a host or subset of hosts by its position in a group. For example, given the following group::
+
+ [webservers]
+ cobweb
+ webbing
+ weber
+
+you can use subscripts to select individual hosts or ranges within the webservers group::
+
+ webservers[0] # == cobweb
+ webservers[-1] # == weber
+ webservers[0:2] # == webservers[0],webservers[1]
+ # == cobweb,webbing
+ webservers[1:] # == webbing,weber
+ webservers[:3] # == cobweb,webbing,weber
+
+Using regexes in patterns
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+You can specify a pattern as a regular expression by starting the pattern with ``~``::
+
+ ~(web|db).*\.example\.com
+
+Patterns and ansible-playbook flags
+-----------------------------------
+
+You can change the behavior of the patterns defined in playbooks using command-line options. For example, you can run a playbook that defines ``hosts: all`` on a single host by specifying ``-i 127.0.0.2,`` (note the trailing comma). This works even if the host you target is not defined in your inventory. You can also limit the hosts you target on a particular run with the ``--limit`` flag::
+
+ ansible-playbook site.yml --limit datacenter2
+
+Finally, you can use ``--limit`` to read the list of hosts from a file by prefixing the file name with ``@``::
+
+ ansible-playbook site.yml --limit @retry_hosts.txt
+
+If :ref:`RETRY_FILES_ENABLED` is set to ``True``, a ``.retry`` file will be created after the ``ansible-playbook`` run containing a list of failed hosts from all plays. This file is overwritten each time ``ansible-playook`` finishes running.
+
+ ansible-playbook site.yml --limit @site.retry
+
+To apply your knowledge of patterns with Ansible commands and playbooks, read :ref:`intro_adhoc` and :ref:`playbooks_intro`.
+
+.. seealso::
+
+ :ref:`intro_adhoc`
+ Examples of basic commands
+ :ref:`working_with_playbooks`
+ Learning the Ansible configuration management language
+ `Mailing List <https://groups.google.com/group/ansible-project>`_
+ Questions? Help? Ideas? Stop by the list on Google Groups
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/intro_windows.rst b/docs/docsite/rst/user_guide/intro_windows.rst
new file mode 100644
index 00000000..ba81f6d6
--- /dev/null
+++ b/docs/docsite/rst/user_guide/intro_windows.rst
@@ -0,0 +1,4 @@
+Windows Support
+===============
+
+This page has been split up and moved to the new section :ref:`windows`.
diff --git a/docs/docsite/rst/user_guide/modules.rst b/docs/docsite/rst/user_guide/modules.rst
new file mode 100644
index 00000000..70dac884
--- /dev/null
+++ b/docs/docsite/rst/user_guide/modules.rst
@@ -0,0 +1,36 @@
+.. _working_with_modules:
+
+Working With Modules
+====================
+
+.. toctree::
+ :maxdepth: 1
+
+ modules_intro
+ modules_support
+ ../reference_appendices/common_return_values
+
+
+Ansible ships with a number of modules (called the 'module library')
+that can be executed directly on remote hosts or through :ref:`Playbooks <working_with_playbooks>`.
+
+Users can also write their own modules. These modules can control system resources,
+like services, packages, or files (anything really), or handle executing system commands.
+
+
+.. seealso::
+
+ :ref:`intro_adhoc`
+ Examples of using modules in /usr/bin/ansible
+ :ref:`playbooks_intro`
+ Introduction to using modules with /usr/bin/ansible-playbook
+ :ref:`developing_modules_general`
+ How to write your own modules
+ :ref:`developing_api`
+ Examples of using modules with the Python API
+ :ref:`interpreter_discovery`
+ Configuring the right Python interpreter on target hosts
+ `Mailing List <https://groups.google.com/group/ansible-project>`_
+ Questions? Help? Ideas? Stop by the list on Google Groups
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/modules_intro.rst b/docs/docsite/rst/user_guide/modules_intro.rst
new file mode 100644
index 00000000..bb6d2cd7
--- /dev/null
+++ b/docs/docsite/rst/user_guide/modules_intro.rst
@@ -0,0 +1,52 @@
+.. _intro_modules:
+
+Introduction to modules
+=======================
+
+Modules (also referred to as "task plugins" or "library plugins") are discrete units of code that can be used from the command line or in a playbook task. Ansible executes each module, usually on the remote managed node, and collects return values. In Ansible 2.10 and later, most modules are hosted in collections.
+
+You can execute modules from the command line::
+
+ ansible webservers -m service -a "name=httpd state=started"
+ ansible webservers -m ping
+ ansible webservers -m command -a "/sbin/reboot -t now"
+
+Each module supports taking arguments. Nearly all modules take ``key=value`` arguments, space delimited. Some modules take no arguments, and the command/shell modules simply take the string of the command you want to run.
+
+From playbooks, Ansible modules are executed in a very similar way::
+
+ - name: reboot the servers
+ command: /sbin/reboot -t now
+
+Another way to pass arguments to a module is using YAML syntax, also called 'complex args' ::
+
+ - name: restart webserver
+ service:
+ name: httpd
+ state: restarted
+
+All modules return JSON format data. This means modules can be written in any programming language. Modules should be idempotent, and should avoid making any changes if they detect that the current state matches the desired final state. When used in an Ansible playbook, modules can trigger 'change events' in the form of notifying :ref:`handlers <handlers>` to run additional tasks.
+
+You can access the documentation for each module from the command line with the ansible-doc tool::
+
+ ansible-doc yum
+
+For a list of all available modules, see the :ref:`Collection docs <list_of_collections>`, or run the following at a command prompt::
+
+ ansible-doc -l
+
+
+.. seealso::
+
+ :ref:`intro_adhoc`
+ Examples of using modules in /usr/bin/ansible
+ :ref:`working_with_playbooks`
+ Examples of using modules with /usr/bin/ansible-playbook
+ :ref:`developing_modules`
+ How to write your own modules
+ :ref:`developing_api`
+ Examples of using modules with the Python API
+ `Mailing List <https://groups.google.com/group/ansible-project>`_
+ Questions? Help? Ideas? Stop by the list on Google Groups
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/modules_support.rst b/docs/docsite/rst/user_guide/modules_support.rst
new file mode 100644
index 00000000..6faa7333
--- /dev/null
+++ b/docs/docsite/rst/user_guide/modules_support.rst
@@ -0,0 +1,70 @@
+.. _modules_support:
+
+****************************
+Module Maintenance & Support
+****************************
+
+If you are using a module and you discover a bug, you may want to know where to report that bug, who is responsible for fixing it, and how you can track changes to the module. If you are a Red Hat subscriber, you may want to know whether you can get support for the issue you are facing.
+
+Starting in Ansible 2.10, most modules live in collections. The distribution method for each collection reflects the maintenance and support for the modules in that collection.
+
+.. contents::
+ :local:
+
+Maintenance
+===========
+
+.. table::
+ :class: documentation-table
+
+ ============================= ========================================== ==========================
+ Collection Code location Maintained by
+ ============================= ========================================== ==========================
+ ansible.builtin `ansible/ansible repo`_ on GitHub core team
+
+ distributed on Galaxy various; follow ``repo`` link community or partners
+
+ distributed on Automation Hub various; follow ``repo`` link content team or partners
+ ============================= ========================================== ==========================
+
+.. _ansible/ansible repo: https://github.com/ansible/ansible/tree/devel/lib/ansible/modules
+
+Issue Reporting
+===============
+
+If you find a bug that affects a plugin in the main Ansible repo, also known as ``ansible-base``:
+
+ #. Confirm that you are running the latest stable version of Ansible or the devel branch.
+ #. Look at the `issue tracker in the Ansible repo <https://github.com/ansible/ansible/issues>`_ to see if an issue has already been filed.
+ #. Create an issue if one does not already exist. Include as much detail as you can about the behavior you discovered.
+
+If you find a bug that affects a plugin in a Galaxy collection:
+
+ #. Find the collection on Galaxy.
+ #. Find the issue tracker for the collection.
+ #. Look there to see if an issue has already been filed.
+ #. Create an issue if one does not already exist. Include as much detail as you can about the behavior you discovered.
+
+Some partner collections may be hosted in private repositories.
+
+If you are not sure whether the behavior you see is a bug, if you have questions, if you want to discuss development-oriented topics, or if you just want to get in touch, use one of our Google groups or IRC channels to :ref:`communicate with Ansiblers <communication>`.
+
+If you find a bug that affects a module in an Automation Hub collection:
+
+ #. If the collection offers an Issue Tracker link on Automation Hub, click there and open an issue on the collection repository. If it does not, follow the standard process for reporting issues on the `Red Hat Customer Portal <https://access.redhat.com/>`_. You must have a subscription to the Red Hat Ansible Automation Platform to create an issue on the portal.
+
+Support
+=======
+
+All plugins that remain in ``ansible-base`` and all collections hosted in Automation Hub are supported by Red Hat. No other plugins or collections are supported by Red Hat. If you have a subscription to the Red Hat Ansible Automation Platform, you can find more information and resources on the `Red Hat Customer Portal. <https://access.redhat.com/>`_
+
+.. seealso::
+
+ :ref:`intro_adhoc`
+ Examples of using modules in /usr/bin/ansible
+ :ref:`working_with_playbooks`
+ Examples of using modules with /usr/bin/ansible-playbook
+ `Mailing List <https://groups.google.com/group/ansible-project>`_
+ Questions? Help? Ideas? Stop by the list on Google Groups
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/playbook_pathing.rst b/docs/docsite/rst/user_guide/playbook_pathing.rst
new file mode 100644
index 00000000..7fc6059b
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbook_pathing.rst
@@ -0,0 +1,42 @@
+:orphan:
+
+***********************
+Search paths in Ansible
+***********************
+
+You can control the paths Ansible searches to find resources on your control node (including configuration, modules, roles, ssh keys, and more) as well as resources on the remote nodes you are managing. Use absolute paths to tell Ansible where to find resources whenever you can. However, absolute paths are not always practical. This page covers how Ansible interprets relative search paths, along with ways to troubleshoot when Ansible cannot find the resource you need.
+
+.. contents::
+ :local:
+
+Config paths
+============
+
+By default these should be relative to the config file, some are specifically relative to the current working directory or the playbook and should have this noted in their description. Things like ssh keys are left to use the current working directory because it mirrors how the underlying tools would use it.
+
+
+Task paths
+==========
+
+Task paths include two different scopes: task evaluation and task execution. For task evaluation, all paths are local, like in lookups. For task execution, which usually happens on the remote nodes, local paths do not usually apply. However, if a task uses an action plugin, it uses a local path. The template and copy modules are examples of modules that use action plugins, and therefore use local paths.
+
+The magic of 'local' paths
+--------------------------
+
+Lookups and action plugins both use a special 'search magic' to find things, taking the current play into account, it uses from most specific to most general playbook dir in which a task is contained (this includes roles and includes).
+
+Using this magic, relative paths get attempted first with a 'files|templates|vars' appended (if not already present), depending on action being taken, 'files' is the default. (in other words, include_vars will use vars/). The paths will be searched from most specific to most general (in other words, role before play).
+dependent roles WILL be traversed (in other words, task is in role2, role2 is a dependency of role1, role2 will be looked at first, then role1, then play).
+i.e ::
+
+ role search path is rolename/{files|vars|templates}/, rolename/tasks/.
+ play search path is playdir/{files|vars|templates}/, playdir/.
+
+
+By default, Ansible does not search the current working directory unless it happens to coincide with one of the paths above. If you `include` a task file from a role, it will NOT trigger role behavior, this only happens when running as a role, `include_role` will work. A new variable `ansible_search_path` var will have the search path used, in order (but without the appended subdirs). Using 5 "v"s (`-vvvvv`) should show the detail of the search as it happens.
+
+As for includes, they try the path of the included file first and fall back to the play/role that includes them.
+
+
+
+.. note: The current working directory might vary depending on the connection plugin and if the action is local or remote. For the remote it is normally the directory on which the login shell puts the user. For local it is either the directory you executed ansible from or in some cases the playbook directory.
diff --git a/docs/docsite/rst/user_guide/playbooks.rst b/docs/docsite/rst/user_guide/playbooks.rst
new file mode 100644
index 00000000..8c851c12
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks.rst
@@ -0,0 +1,21 @@
+.. _working_with_playbooks:
+
+Working with playbooks
+======================
+
+Playbooks record and execute Ansible's configuration, deployment, and orchestration functions. They can describe a policy you want your remote systems to enforce, or a set of steps in a general IT process.
+
+If Ansible modules are the tools in your workshop, playbooks are your instruction manuals, and your inventory of hosts are your raw material.
+
+At a basic level, playbooks can be used to manage configurations of and deployments to remote machines. At a more advanced level, they can sequence multi-tier rollouts involving rolling updates, and can delegate actions to other hosts, interacting with monitoring servers and load balancers along the way.
+
+Playbooks are designed to be human-readable and are developed in a basic text language. There are multiple ways to organize playbooks and the files they include, and we'll offer up some suggestions on that and making the most out of Ansible.
+
+You should look at `Example Playbooks <https://github.com/ansible/ansible-examples>`_ while reading along with the playbook documentation. These illustrate best practices as well as how to put many of the various concepts together.
+
+.. toctree::
+ :maxdepth: 2
+
+ playbooks_templating
+ playbooks_special_topics
+ guide_rolling_upgrade
diff --git a/docs/docsite/rst/user_guide/playbooks_advanced_syntax.rst b/docs/docsite/rst/user_guide/playbooks_advanced_syntax.rst
new file mode 100644
index 00000000..2edf36d3
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_advanced_syntax.rst
@@ -0,0 +1,112 @@
+.. _playbooks_advanced_syntax:
+
+***************
+Advanced Syntax
+***************
+
+The advanced YAML syntax examples on this page give you more control over the data placed in YAML files used by Ansible. You can find additional information about Python-specific YAML in the official `PyYAML Documentation <https://pyyaml.org/wiki/PyYAMLDocumentation#YAMLtagsandPythontypes>`_.
+
+.. contents::
+ :local:
+
+.. _unsafe_strings:
+
+Unsafe or raw strings
+=====================
+
+When handling values returned by lookup plugins, Ansible uses a data type called ``unsafe`` to block templating. Marking data as unsafe prevents malicious users from abusing Jinja2 templates to execute arbitrary code on target machines. The Ansible implementation ensures that unsafe values are never templated. It is more comprehensive than escaping Jinja2 with ``{% raw %} ... {% endraw %}`` tags.
+
+You can use the same ``unsafe`` data type in variables you define, to prevent templating errors and information disclosure. You can mark values supplied by :ref:`vars_prompts<unsafe_prompts>` as unsafe. You can also use ``unsafe`` in playbooks. The most common use cases include passwords that allow special characters like ``{`` or ``%``, and JSON arguments that look like templates but should not be templated. For example:
+
+.. code-block:: yaml
+
+ ---
+ mypassword: !unsafe 234%234{435lkj{{lkjsdf
+
+In a playbook::
+
+ ---
+ hosts: all
+ vars:
+ my_unsafe_variable: !unsafe 'unsafe % value'
+ tasks:
+ ...
+
+For complex variables such as hashes or arrays, use ``!unsafe`` on the individual elements::
+
+ ---
+ my_unsafe_array:
+ - !unsafe 'unsafe element'
+ - 'safe element'
+
+ my_unsafe_hash:
+ unsafe_key: !unsafe 'unsafe value'
+
+.. _anchors_and_aliases:
+
+YAML anchors and aliases: sharing variable values
+=================================================
+
+`YAML anchors and aliases <https://yaml.org/spec/1.2/spec.html#id2765878>`_ help you define, maintain, and use shared variable values in a flexible way.
+You define an anchor with ``&``, then refer to it using an alias, denoted with ``*``. Here's an example that sets three values with an anchor, uses two of those values with an alias, and overrides the third value::
+
+ ---
+ ...
+ vars:
+ app1:
+ jvm: &jvm_opts
+ opts: '-Xms1G -Xmx2G'
+ port: 1000
+ path: /usr/lib/app1
+ app2:
+ jvm:
+ <<: *jvm_opts
+ path: /usr/lib/app2
+ ...
+
+Here, ``app1`` and ``app2`` share the values for ``opts`` and ``port`` using the anchor ``&jvm_opts`` and the alias ``*jvm_opts``.
+The value for ``path`` is merged by ``<<`` or `merge operator <https://yaml.org/type/merge.html>`_.
+
+Anchors and aliases also let you share complex sets of variable values, including nested variables. If you have one variable value that includes another variable value, you can define them separately::
+
+ vars:
+ webapp_version: 1.0
+ webapp_custom_name: ToDo_App-1.0
+
+This is inefficient and, at scale, means more maintenance. To incorporate the version value in the name, you can use an anchor in ``app_version`` and an alias in ``custom_name``::
+
+ vars:
+ webapp:
+ version: &my_version 1.0
+ custom_name:
+ - "ToDo_App"
+ - *my_version
+
+Now, you can re-use the value of ``app_version`` within the value of ``custom_name`` and use the output in a template::
+
+ ---
+ - name: Using values nested inside dictionary
+ hosts: localhost
+ vars:
+ webapp:
+ version: &my_version 1.0
+ custom_name:
+ - "ToDo_App"
+ - *my_version
+ tasks:
+ - name: Using Anchor value
+ debug:
+ msg: My app is called "{{ webapp.custom_name | join('-') }}".
+
+You've anchored the value of ``version`` with the ``&my_version`` anchor, and re-used it with the ``*my_version`` alias. Anchors and aliases let you access nested values inside dictionaries.
+
+.. seealso::
+
+ :ref:`playbooks_variables`
+ All about variables
+ :doc:`complex_data_manipulation`
+ Doing complex data manipulation in Ansible
+ `User Mailing List <https://groups.google.com/group/ansible-project>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/playbooks_async.rst b/docs/docsite/rst/user_guide/playbooks_async.rst
new file mode 100644
index 00000000..09fe5d5d
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_async.rst
@@ -0,0 +1,161 @@
+.. _playbooks_async:
+
+Asynchronous actions and polling
+================================
+
+By default Ansible runs tasks synchronously, holding the connection to the remote node open until the action is completed. This means within a playbook, each task blocks the next task by default, meaning subsequent tasks will not run until the current task completes. This behavior can create challenges. For example, a task may take longer to complete than the SSH session allows for, causing a timeout. Or you may want a long-running process to execute in the background while you perform other tasks concurrently. Asynchronous mode lets you control how long-running tasks execute.
+
+.. contents::
+ :local:
+
+Asynchronous ad-hoc tasks
+-------------------------
+
+You can execute long-running operations in the background with :ref:`ad-hoc tasks <intro_adhoc>`. For example, to execute ``long_running_operation`` asynchronously in the background, with a timeout (``-B``) of 3600 seconds, and without polling (``-P``)::
+
+ $ ansible all -B 3600 -P 0 -a "/usr/bin/long_running_operation --do-stuff"
+
+To check on the job status later, use the ``async_status`` module, passing it the job ID that was returned when you ran the original job in the background::
+
+ $ ansible web1.example.com -m async_status -a "jid=488359678239.2844"
+
+Ansible can also check on the status of your long-running job automatically with polling. In most cases, Ansible will keep the connection to your remote node open between polls. To run for 30 minutes and poll for status every 60 seconds::
+
+ $ ansible all -B 1800 -P 60 -a "/usr/bin/long_running_operation --do-stuff"
+
+Poll mode is smart so all jobs will be started before polling begins on any machine. Be sure to use a high enough ``--forks`` value if you want to get all of your jobs started very quickly. After the time limit (in seconds) runs out (``-B``), the process on the remote nodes will be terminated.
+
+Asynchronous mode is best suited to long-running shell commands or software upgrades. Running the copy module asynchronously, for example, does not do a background file transfer.
+
+Asynchronous playbook tasks
+---------------------------
+
+:ref:`Playbooks <working_with_playbooks>` also support asynchronous mode and polling, with a simplified syntax. You can use asynchronous mode in playbooks to avoid connection timeouts or to avoid blocking subsequent tasks. The behavior of asynchronous mode in a playbook depends on the value of `poll`.
+
+Avoid connection timeouts: poll > 0
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If you want to set a longer timeout limit for a certain task in your playbook, use ``async`` with ``poll`` set to a positive value. Ansible will still block the next task in your playbook, waiting until the async task either completes, fails or times out. However, the task will only time out if it exceeds the timeout limit you set with the ``async`` parameter.
+
+To avoid timeouts on a task, specify its maximum runtime and how frequently you would like to poll for status::
+
+ ---
+
+ - hosts: all
+ remote_user: root
+
+ tasks:
+
+ - name: Simulate long running op (15 sec), wait for up to 45 sec, poll every 5 sec
+ ansible.builtin.command: /bin/sleep 15
+ async: 45
+ poll: 5
+
+.. note::
+ The default poll value is set by the :ref:`DEFAULT_POLL_INTERVAL` setting.
+ There is no default for the async time limit. If you leave off the
+ 'async' keyword, the task runs synchronously, which is Ansible's
+ default.
+
+.. note::
+ As of Ansible 2.3, async does not support check mode and will fail the
+ task when run in check mode. See :ref:`check_mode_dry` on how to
+ skip a task in check mode.
+
+Run tasks concurrently: poll = 0
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If you want to run multiple tasks in a playbook concurrently, use ``async`` with ``poll`` set to 0. When you set ``poll: 0``, Ansible starts the task and immediately moves on to the next task without waiting for a result. Each async task runs until it either completes, fails or times out (runs longer than its ``async`` value). The playbook run ends without checking back on async tasks.
+
+To run a playbook task asynchronously::
+
+ ---
+
+ - hosts: all
+ remote_user: root
+
+ tasks:
+
+ - name: Simulate long running op, allow to run for 45 sec, fire and forget
+ ansible.builtin.command: /bin/sleep 15
+ async: 45
+ poll: 0
+
+.. note::
+ Do not specify a poll value of 0 with operations that require exclusive locks (such as yum transactions) if you expect to run other commands later in the playbook against those same resources.
+
+.. note::
+ Using a higher value for ``--forks`` will result in kicking off asynchronous tasks even faster. This also increases the efficiency of polling.
+
+If you need a synchronization point with an async task, you can register it to obtain its job ID and use the :ref:`async_status <async_status_module>` module to observe it in a later task. For example::
+
+ - name: Run an async task
+ ansible.builtin.yum:
+ name: docker-io
+ state: present
+ async: 1000
+ poll: 0
+ register: yum_sleeper
+
+ - name: Check on an async task
+ async_status:
+ jid: "{{ yum_sleeper.ansible_job_id }}"
+ register: job_result
+ until: job_result.finished
+ retries: 100
+ delay: 10
+
+.. note::
+ If the value of ``async:`` is not high enough, this will cause the
+ "check on it later" task to fail because the temporary status file that
+ the ``async_status:`` is looking for will not have been written or no longer exist
+
+To run multiple asynchronous tasks while limiting the number of tasks running concurrently::
+
+ #####################
+ # main.yml
+ #####################
+ - name: Run items asynchronously in batch of two items
+ vars:
+ sleep_durations:
+ - 1
+ - 2
+ - 3
+ - 4
+ - 5
+ durations: "{{ item }}"
+ include_tasks: execute_batch.yml
+ loop: "{{ sleep_durations | batch(2) | list }}"
+
+ #####################
+ # execute_batch.yml
+ #####################
+ - name: Async sleeping for batched_items
+ ansible.builtin.command: sleep {{ async_item }}
+ async: 45
+ poll: 0
+ loop: "{{ durations }}"
+ loop_control:
+ loop_var: "async_item"
+ register: async_results
+
+ - name: Check sync status
+ async_status:
+ jid: "{{ async_result_item.ansible_job_id }}"
+ loop: "{{ async_results.results }}"
+ loop_control:
+ loop_var: "async_result_item"
+ register: async_poll_results
+ until: async_poll_results.finished
+ retries: 30
+
+.. seealso::
+
+ :ref:`playbooks_strategies`
+ Options for controlling playbook execution
+ :ref:`playbooks_intro`
+ An introduction to playbooks
+ `User Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/playbooks_best_practices.rst b/docs/docsite/rst/user_guide/playbooks_best_practices.rst
new file mode 100644
index 00000000..86915f51
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_best_practices.rst
@@ -0,0 +1,167 @@
+.. _playbooks_tips_and_tricks:
+.. _playbooks_best_practices:
+
+***************
+Tips and tricks
+***************
+
+These tips and tricks have helped us optimize our Ansible usage, and we offer them here as suggestions. We hope they will help you organize content, write playbooks, maintain inventory, and execute Ansible. Ultimately, though, you should use Ansible in the way that makes most sense for your organization and your goals.
+
+.. contents::
+ :local:
+
+General tips
+============
+
+These concepts apply to all Ansible activities and artifacts.
+
+Keep it simple
+--------------
+
+Whenever you can, do things simply. Use advanced features only when necessary, and select the feature that best matches your use case. For example, you will probably not need ``vars``, ``vars_files``, ``vars_prompt`` and ``--extra-vars`` all at once, while also using an external inventory file. If something feels complicated, it probably is. Take the time to look for a simpler solution.
+
+Use version control
+-------------------
+
+Keep your playbooks, roles, inventory, and variables files in git or another version control system and make commits to the repository when you make changes. Version control gives you an audit trail describing when and why you changed the rules that automate your infrastructure.
+
+Playbook tips
+=============
+
+These tips help make playbooks and roles easier to read, maintain, and debug.
+
+Use whitespace
+--------------
+
+Generous use of whitespace, for example, a blank line before each block or task, makes a playbook easy to scan.
+
+Always name tasks
+-----------------
+
+Task names are optional, but extremely useful. In its output, Ansible shows you the name of each task it runs. Choose names that describe what each task does and why.
+
+Always mention the state
+------------------------
+
+For many modules, the 'state' parameter is optional. Different modules have different default settings for 'state', and some modules support several 'state' settings. Explicitly setting 'state=present' or 'state=absent' makes playbooks and roles clearer.
+
+Use comments
+------------
+
+Even with task names and explicit state, sometimes a part of a playbook or role (or inventory/variable file) needs more explanation. Adding a comment (any line starting with '#') helps others (and possibly yourself in future) understand what a play or task (or variable setting) does, how it does it, and why.
+
+Inventory tips
+==============
+
+These tips help keep your inventory well organized.
+
+Use dynamic inventory with clouds
+---------------------------------
+
+With cloud providers and other systems that maintain canonical lists of your infrastructure, use :ref:`dynamic inventory <intro_dynamic_inventory>` to retrieve those lists instead of manually updating static inventory files. With cloud resources, you can use tags to differentiate production and staging environments.
+
+Group inventory by function
+---------------------------
+
+A system can be in multiple groups. See :ref:`intro_inventory` and :ref:`intro_patterns`. If you create groups named for the function of the nodes in the group, for example *webservers* or *dbservers*, your playbooks can target machines based on function. You can assign function-specific variables using the group variable system, and design Ansible roles to handle function-specific use cases. See :ref:`playbooks_reuse_roles`.
+
+Separate production and staging inventory
+-----------------------------------------
+
+You can keep your production environment separate from development, test, and staging environments by using separate inventory files or directories for each environment. This way you pick with -i what you are targeting. Keeping all your environments in one file can lead to surprises!
+
+.. _tip_for_variables_and_vaults:
+
+Keep vaulted variables safely visible
+-------------------------------------
+
+You should encrypt sensitive or secret variables with Ansible Vault. However, encrypting the variable names as well as the variable values makes it hard to find the source of the values. You can keep the names of your variables accessible (by ``grep``, for example) without exposing any secrets by adding a layer of indirection:
+
+#. Create a ``group_vars/`` subdirectory named after the group.
+#. Inside this subdirectory, create two files named ``vars`` and ``vault``.
+#. In the ``vars`` file, define all of the variables needed, including any sensitive ones.
+#. Copy all of the sensitive variables over to the ``vault`` file and prefix these variables with ``vault_``.
+#. Adjust the variables in the ``vars`` file to point to the matching ``vault_`` variables using jinja2 syntax: ``db_password: {{ vault_db_password }}``.
+#. Encrypt the ``vault`` file to protect its contents.
+#. Use the variable name from the ``vars`` file in your playbooks.
+
+When running a playbook, Ansible finds the variables in the unencrypted file, which pulls the sensitive variable values from the encrypted file. There is no limit to the number of variable and vault files or their names.
+
+Execution tricks
+================
+
+These tips apply to using Ansible, rather than to Ansible artifacts.
+
+Try it in staging first
+-----------------------
+
+Testing changes in a staging environment before rolling them out in production is always a great idea. Your environments need not be the same size and you can use group variables to control the differences between those environments.
+
+Update in batches
+-----------------
+
+Use the 'serial' keyword to control how many machines you update at once in the batch. See :ref:`playbooks_delegation`.
+
+.. _os_variance:
+
+Handling OS and distro differences
+----------------------------------
+
+Group variables files and the ``group_by`` module work together to help Ansible execute across a range of operating systems and distributions that require different settings, packages, and tools. The ``group_by`` module creates a dynamic group of hosts matching certain criteria. This group does not need to be defined in the inventory file. This approach lets you execute different tasks on different operating systems or distributions. For example::
+
+ ---
+
+ - name: talk to all hosts just so we can learn about them
+ hosts: all
+ tasks:
+ - name: Classify hosts depending on their OS distribution
+ group_by:
+ key: os_{{ ansible_facts['distribution'] }}
+
+ # now just on the CentOS hosts...
+
+ - hosts: os_CentOS
+ gather_facts: False
+ tasks:
+ - # tasks that only happen on CentOS go in this play
+
+The first play categorizes all systems into dynamic groups based on the operating system name. Later plays can use these groups as patterns on the ``hosts`` line. You can also add group-specific settings in group vars files. All three names must match: the name created by the ``group_by`` task, the name of the pattern in subsequent plays, and the name of the group vars file. For example::
+
+ ---
+ # file: group_vars/all
+ asdf: 10
+
+ ---
+ # file: group_vars/os_CentOS.yml
+ asdf: 42
+
+In this example, CentOS machines get the value of '42' for asdf, but other machines get '10'.
+This can be used not only to set variables, but also to apply certain roles to only certain systems.
+
+You can use the same setup with ``include_vars`` when you only need OS-specific variables, not tasks::
+
+ - hosts: all
+ tasks:
+ - name: Set OS distribution dependent variables
+ include_vars: "os_{{ ansible_facts['distribution'] }}.yml"
+ - debug:
+ var: asdf
+
+This pulls in variables from the group_vars/os_CentOS.yml file.
+
+.. seealso::
+
+ :ref:`yaml_syntax`
+ Learn about YAML syntax
+ :ref:`working_with_playbooks`
+ Review the basic playbook features
+ :ref:`list_of_collections`
+ Browse existing collections, modules, and plugins
+ :ref:`developing_modules`
+ Learn how to extend Ansible by writing your own modules
+ :ref:`intro_patterns`
+ Learn about how to select hosts
+ `GitHub examples directory <https://github.com/ansible/ansible-examples>`_
+ Complete playbook files from the github project source
+ `Mailing List <https://groups.google.com/group/ansible-project>`_
+ Questions? Help? Ideas? Stop by the list on Google Groups
diff --git a/docs/docsite/rst/user_guide/playbooks_blocks.rst b/docs/docsite/rst/user_guide/playbooks_blocks.rst
new file mode 100644
index 00000000..dc516312
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_blocks.rst
@@ -0,0 +1,189 @@
+.. _playbooks_blocks:
+
+******
+Blocks
+******
+
+Blocks create logical groups of tasks. Blocks also offer ways to handle task errors, similar to exception handling in many programming languages.
+
+.. contents::
+ :local:
+
+Grouping tasks with blocks
+==========================
+
+All tasks in a block inherit directives applied at the block level. Most of what you can apply to a single task (with the exception of loops) can be applied at the block level, so blocks make it much easier to set data or directives common to the tasks. The directive does not affect the block itself, it is only inherited by the tasks enclosed by a block. For example, a `when` statement is applied to the tasks within a block, not to the block itself.
+
+.. code-block:: YAML
+ :emphasize-lines: 3
+ :caption: Block example with named tasks inside the block
+
+ tasks:
+ - name: Install, configure, and start Apache
+ block:
+ - name: Install httpd and memcached
+ ansible.builtin.yum:
+ name:
+ - httpd
+ - memcached
+ state: present
+
+ - name: Apply the foo config template
+ ansible.builtin.template:
+ src: templates/src.j2
+ dest: /etc/foo.conf
+
+ - name: Start service bar and enable it
+ ansible.builtin.service:
+ name: bar
+ state: started
+ enabled: True
+ when: ansible_facts['distribution'] == 'CentOS'
+ become: true
+ become_user: root
+ ignore_errors: yes
+
+In the example above, the 'when' condition will be evaluated before Ansible runs each of the three tasks in the block. All three tasks also inherit the privilege escalation directives, running as the root user. Finally, ``ignore_errors: yes`` ensures that Ansible continues to execute the playbook even if some of the tasks fail.
+
+Names for blocks have been available since Ansible 2.3. We recommend using names in all tasks, within blocks or elsewhere, for better visibility into the tasks being executed when you run the playbook.
+
+.. _block_error_handling:
+
+Handling errors with blocks
+===========================
+
+You can control how Ansible responds to task errors using blocks with ``rescue`` and ``always`` sections.
+
+Rescue blocks specify tasks to run when an earlier task in a block fails. This approach is similar to exception handling in many programming languages. Ansible only runs rescue blocks after a task returns a 'failed' state. Bad task definitions and unreachable hosts will not trigger the rescue block.
+
+.. _block_rescue:
+.. code-block:: YAML
+ :emphasize-lines: 3,10
+ :caption: Block error handling example
+
+ tasks:
+ - name: Handle the error
+ block:
+ - name: Print a message
+ ansible.builtin.debug:
+ msg: 'I execute normally'
+
+ - name: Force a failure
+ ansible.builtin.command: /bin/false
+
+ - name: Never print this
+ ansible.builtin.debug:
+ msg: 'I never execute, due to the above task failing, :-('
+ rescue:
+ - name: Print when errors
+ ansible.builtin.debug:
+ msg: 'I caught an error, can do stuff here to fix it, :-)'
+
+You can also add an ``always`` section to a block. Tasks in the ``always`` section run no matter what the task status of the previous block is.
+
+.. _block_always:
+.. code-block:: YAML
+ :emphasize-lines: 2,9
+ :caption: Block with always section
+
+ - name: Always do X
+ block:
+ - name: Print a message
+ ansible.builtin.debug:
+ msg: 'I execute normally'
+
+ - name: Force a failure
+ ansible.builtin.command: /bin/false
+
+ - name: Never print this
+ ansible.builtin.debug:
+ msg: 'I never execute :-('
+ always:
+ - name: Always do this
+ ansible.builtin.debug:
+ msg: "This always executes, :-)"
+
+Together, these elements offer complex error handling.
+
+.. code-block:: YAML
+ :emphasize-lines: 2,9,16
+ :caption: Block with all sections
+
+ - name: Attempt and graceful roll back demo
+ block:
+ - name: Print a message
+ ansible.builtin.debug:
+ msg: 'I execute normally'
+
+ - name: Force a failure
+ ansible.builtin.command: /bin/false
+
+ - name: Never print this
+ ansible.builtin.debug:
+ msg: 'I never execute, due to the above task failing, :-('
+ rescue:
+ - name: Print when errors
+ ansible.builtin.debug:
+ msg: 'I caught an error'
+
+ - name: Force a failure in middle of recovery! >:-)
+ ansible.builtin.command: /bin/false
+
+ - name: Never print this
+ ansible.builtin.debug:
+ msg: 'I also never execute :-('
+ always:
+ - name: Always do this
+ ansible.builtin.debug:
+ msg: "This always executes"
+
+The tasks in the ``block`` execute normally. If any tasks in the block return ``failed``, the ``rescue`` section executes tasks to recover from the error. The ``always`` section runs regardless of the results of the ``block`` and ``rescue`` sections.
+
+If an error occurs in the block and the rescue task succeeds, Ansible reverts the failed status of the original task for the run and continues to run the play as if the original task had succeeded. The rescued task is considered successful, and does not trigger ``max_fail_percentage`` or ``any_errors_fatal`` configurations. However, Ansible still reports a failure in the playbook statistics.
+
+You can use blocks with ``flush_handlers`` in a rescue task to ensure that all handlers run even if an error occurs:
+
+.. code-block:: YAML
+ :emphasize-lines: 6,10
+ :caption: Block run handlers in error handling
+
+ tasks:
+ - name: Attempt and graceful roll back demo
+ block:
+ - name: Print a message
+ ansible.builtin.debug:
+ msg: 'I execute normally'
+ changed_when: yes
+ notify: run me even after an error
+
+ - name: Force a failure
+ ansible.builtin.command: /bin/false
+ rescue:
+ - name: Make sure all handlers run
+ meta: flush_handlers
+ handlers:
+ - name: Run me even after an error
+ ansible.builtin.debug:
+ msg: 'This handler runs even on error'
+
+
+.. versionadded:: 2.1
+
+Ansible provides a couple of variables for tasks in the ``rescue`` portion of a block:
+
+ansible_failed_task
+ The task that returned 'failed' and triggered the rescue. For example, to get the name use ``ansible_failed_task.name``.
+
+ansible_failed_result
+ The captured return result of the failed task that triggered the rescue. This would equate to having used this var in the ``register`` keyword.
+
+.. seealso::
+
+ :ref:`playbooks_intro`
+ An introduction to playbooks
+ :ref:`playbooks_reuse_roles`
+ Playbook organization by roles
+ `User Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/playbooks_checkmode.rst b/docs/docsite/rst/user_guide/playbooks_checkmode.rst
new file mode 100644
index 00000000..36b16aa8
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_checkmode.rst
@@ -0,0 +1,97 @@
+.. _check_mode_dry:
+
+******************************************
+Validating tasks: check mode and diff mode
+******************************************
+
+Ansible provides two modes of execution that validate tasks: check mode and diff mode. These modes can be used separately or together. They are useful when you are creating or editing a playbook or role and you want to know what it will do. In check mode, Ansible runs without making any changes on remote systems. Modules that support check mode report the changes they would have made. Modules that do not support check mode report nothing and do nothing. In diff mode, Ansible provides before-and-after comparisons. Modules that support diff mode display detailed information. You can combine check mode and diff mode for detailed validation of your playbook or role.
+
+.. contents::
+ :local:
+
+Using check mode
+================
+
+Check mode is just a simulation. It will not generate output for tasks that use :ref:`conditionals based on registered variables <conditionals_registered_vars>` (results of prior tasks). However, it is great for validating configuration management playbooks that run on one node at a time. To run a playbook in check mode::
+
+ ansible-playbook foo.yml --check
+
+.. _forcing_to_run_in_check_mode:
+
+Enforcing or preventing check mode on tasks
+-------------------------------------------
+
+.. versionadded:: 2.2
+
+If you want certain tasks to run in check mode always, or never, regardless of whether you run the playbook with or without ``--check``, you can add the ``check_mode`` option to those tasks:
+
+ - To force a task to run in check mode, even when the playbook is called without ``--check``, set ``check_mode: yes``.
+ - To force a task to run in normal mode and make changes to the system, even when the playbook is called with ``--check``, set ``check_mode: no``.
+
+For example::
+
+ tasks:
+ - name: This task will always make changes to the system
+ ansible.builtin.command: /something/to/run --even-in-check-mode
+ check_mode: no
+
+ - name: This task will never make changes to the system
+ ansible.builtin.lineinfile:
+ line: "important config"
+ dest: /path/to/myconfig.conf
+ state: present
+ check_mode: yes
+ register: changes_to_important_config
+
+Running single tasks with ``check_mode: yes`` can be useful for testing Ansible modules, either to test the module itself or to test the conditions under which a module would make changes. You can register variables (see :ref:`playbooks_conditionals`) on these tasks for even more detail on the potential changes.
+
+.. note:: Prior to version 2.2 only the equivalent of ``check_mode: no`` existed. The notation for that was ``always_run: yes``.
+
+Skipping tasks or ignoring errors in check mode
+-----------------------------------------------
+
+.. versionadded:: 2.1
+
+If you want to skip a task or ignore errors on a task when you run Ansible in check mode, you can use a boolean magic variable ``ansible_check_mode``, which is set to ``True`` when Ansible runs in check mode. For example::
+
+ tasks:
+
+ - name: This task will be skipped in check mode
+ ansible.builtin.git:
+ repo: ssh://git@github.com/mylogin/hello.git
+ dest: /home/mylogin/hello
+ when: not ansible_check_mode
+
+ - name: This task will ignore errors in check mode
+ ansible.builtin.git:
+ repo: ssh://git@github.com/mylogin/hello.git
+ dest: /home/mylogin/hello
+ ignore_errors: "{{ ansible_check_mode }}"
+
+.. _diff_mode:
+
+Using diff mode
+===============
+
+The ``--diff`` option for ansible-playbook can be used alone or with ``--check``. When you run in diff mode, any module that supports diff mode reports the changes made or, if used with ``--check``, the changes that would have been made. Diff mode is most common in modules that manipulate files (for example, the template module) but other modules might also show 'before and after' information (for example, the user module).
+
+Diff mode produces a large amount of output, so it is best used when checking a single host at a time. For example::
+
+ ansible-playbook foo.yml --check --diff --limit foo.example.com
+
+.. versionadded:: 2.4
+
+Enforcing or preventing diff mode on tasks
+------------------------------------------
+
+Because the ``--diff`` option can reveal sensitive information, you can disable it for a task by specifying ``diff: no``. For example::
+
+ tasks:
+ - name: This task will not report a diff when the file changes
+ ansible.builtin.template:
+ src: secret.conf.j2
+ dest: /etc/secret.conf
+ owner: root
+ group: root
+ mode: '0600'
+ diff: no
diff --git a/docs/docsite/rst/user_guide/playbooks_conditionals.rst b/docs/docsite/rst/user_guide/playbooks_conditionals.rst
new file mode 100644
index 00000000..76599cb3
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_conditionals.rst
@@ -0,0 +1,508 @@
+.. _playbooks_conditionals:
+
+************
+Conditionals
+************
+
+In a playbook, you may want to execute different tasks, or have different goals, depending on the value of a fact (data about the remote system), a variable, or the result of a previous task. You may want the value of some variables to depend on the value of other variables. Or you may want to create additional groups of hosts based on whether the hosts match other criteria. You can do all of these things with conditionals.
+
+Ansible uses Jinja2 :ref:`tests <playbooks_tests>` and :ref:`filters <playbooks_filters>` in conditionals. Ansible supports all the standard tests and filters, and adds some unique ones as well.
+
+.. note::
+
+ There are many options to control execution flow in Ansible. You can find more examples of supported conditionals at `<https://jinja.palletsprojects.com/en/master/templates/#comparisons>`_.
+
+.. contents::
+ :local:
+
+.. _the_when_statement:
+
+Basic conditionals with ``when``
+================================
+
+The simplest conditional statement applies to a single task. Create the task, then add a ``when`` statement that applies a test. The ``when`` clause is a raw Jinja2 expression without double curly braces (see :ref:`group_by_module`). When you run the task or playbook, Ansible evaluates the test for all hosts. On any host where the test passes (returns a value of True), Ansible runs that task. For example, if you are installing mysql on multiple machines, some of which have SELinux enabled, you might have a task to configure SELinux to allow mysql to run. You would only want that task to run on machines that have SELinux enabled:
+
+.. code-block:: yaml
+
+ tasks:
+ - name: Configure SELinux to start mysql on any port
+ ansible.posix.seboolean:
+ name: mysql_connect_any
+ state: true
+ persistent: yes
+ when: ansible_selinux.status == "enabled"
+ # all variables can be used directly in conditionals without double curly braces
+
+Conditionals based on ansible_facts
+-----------------------------------
+
+Often you want to execute or skip a task based on facts. Facts are attributes of individual hosts, including IP address, operating system, the status of a filesystem, and many more. With conditionals based on facts:
+
+ - You can install a certain package only when the operating system is a particular version.
+ - You can skip configuring a firewall on hosts with internal IP addresses.
+ - You can perform cleanup tasks only when a filesystem is getting full.
+
+See :ref:`commonly_used_facts` for a list of facts that frequently appear in conditional statements. Not all facts exist for all hosts. For example, the 'lsb_major_release' fact used in an example below only exists when the lsb_release package is installed on the target host. To see what facts are available on your systems, add a debug task to your playbook::
+
+ - name: Show facts available on the system
+ ansible.builtin.debug:
+ var: ansible_facts
+
+Here is a sample conditional based on a fact:
+
+.. code-block:: yaml
+
+ tasks:
+ - name: Shut down Debian flavored systems
+ ansible.builtin.command: /sbin/shutdown -t now
+ when: ansible_facts['os_family'] == "Debian"
+
+If you have multiple conditions, you can group them with parentheses:
+
+.. code-block:: yaml
+
+ tasks:
+ - name: Shut down CentOS 6 and Debian 7 systems
+ ansible.builtin.command: /sbin/shutdown -t now
+ when: (ansible_facts['distribution'] == "CentOS" and ansible_facts['distribution_major_version'] == "6") or
+ (ansible_facts['distribution'] == "Debian" and ansible_facts['distribution_major_version'] == "7")
+
+You can use `logical operators <https://jinja.palletsprojects.com/en/master/templates/#logic>`_ to combine conditions. When you have multiple conditions that all need to be true (that is, a logical ``and``), you can specify them as a list::
+
+ tasks:
+ - name: Shut down CentOS 6 systems
+ ansible.builtin.command: /sbin/shutdown -t now
+ when:
+ - ansible_facts['distribution'] == "CentOS"
+ - ansible_facts['distribution_major_version'] == "6"
+
+If a fact or variable is a string, and you need to run a mathematical comparison on it, use a filter to ensure that Ansible reads the value as an integer::
+
+ tasks:
+ - ansible.builtin.shell: echo "only on Red Hat 6, derivatives, and later"
+ when: ansible_facts['os_family'] == "RedHat" and ansible_facts['lsb']['major_release'] | int >= 6
+
+.. _conditionals_registered_vars:
+
+Conditions based on registered variables
+----------------------------------------
+
+Often in a playbook you want to execute or skip a task based on the outcome of an earlier task. For example, you might want to configure a service after it is upgraded by an earlier task. To create a conditional based on a registered variable:
+
+ #. Register the outcome of the earlier task as a variable.
+ #. Create a conditional test based on the registered variable.
+
+You create the name of the registered variable using the ``register`` keyword. A registered variable always contains the status of the task that created it as well as any output that task generated. You can use registered variables in templates and action lines as well as in conditional ``when`` statements. You can access the string contents of the registered variable using ``variable.stdout``. For example::
+
+ - name: Test play
+ hosts: all
+
+ tasks:
+
+ - name: Register a variable
+ ansible.builtin.shell: cat /etc/motd
+ register: motd_contents
+
+ - name: Use the variable in conditional statement
+ ansible.builtin.shell: echo "motd contains the word hi"
+ when: motd_contents.stdout.find('hi') != -1
+
+You can use registered results in the loop of a task if the variable is a list. If the variable is not a list, you can convert it into a list, with either ``stdout_lines`` or with ``variable.stdout.split()``. You can also split the lines by other fields::
+
+ - name: Registered variable usage as a loop list
+ hosts: all
+ tasks:
+
+ - name: Retrieve the list of home directories
+ ansible.builtin.command: ls /home
+ register: home_dirs
+
+ - name: Add home dirs to the backup spooler
+ ansible.builtin.file:
+ path: /mnt/bkspool/{{ item }}
+ src: /home/{{ item }}
+ state: link
+ loop: "{{ home_dirs.stdout_lines }}"
+ # same as loop: "{{ home_dirs.stdout.split() }}"
+
+The string content of a registered variable can be empty. If you want to run another task only on hosts where the stdout of your registered variable is empty, check the registered variable's string contents for emptiness:
+
+.. code-block:: yaml
+
+ - name: check registered variable for emptiness
+ hosts: all
+
+ tasks:
+
+ - name: List contents of directory
+ ansible.builtin.command: ls mydir
+ register: contents
+
+ - name: Check contents for emptiness
+ ansible.builtin.debug:
+ msg: "Directory is empty"
+ when: contents.stdout == ""
+
+Ansible always registers something in a registered variable for every host, even on hosts where a task fails or Ansible skips a task because a condition is not met. To run a follow-up task on these hosts, query the registered variable for ``is skipped`` (not for "undefined" or "default"). See :ref:`registered_variables` for more information. Here are sample conditionals based on the success or failure of a task. Remember to ignore errors if you want Ansible to continue executing on a host when a failure occurs:
+
+.. code-block:: yaml
+
+ tasks:
+ - name: Register a variable, ignore errors and continue
+ ansible.builtin.command: /bin/false
+ register: result
+ ignore_errors: true
+
+ - name: Run only if the task that registered the "result" variable fails
+ ansible.builtin.command: /bin/something
+ when: result is failed
+
+ - name: Run only if the task that registered the "result" variable succeeds
+ ansible.builtin.command: /bin/something_else
+ when: result is succeeded
+
+ - name: Run only if the task that registered the "result" variable is skipped
+ ansible.builtin.command: /bin/still/something_else
+ when: result is skipped
+
+.. note:: Older versions of Ansible used ``success`` and ``fail``, but ``succeeded`` and ``failed`` use the correct tense. All of these options are now valid.
+
+
+Conditionals based on variables
+-------------------------------
+
+You can also create conditionals based on variables defined in the playbooks or inventory. Because conditionals require boolean input (a test must evaluate as True to trigger the condition), you must apply the ``| bool`` filter to non boolean variables, such as string variables with content like 'yes', 'on', '1', or 'true'. You can define variables like this:
+
+.. code-block:: yaml
+
+ vars:
+ epic: true
+ monumental: "yes"
+
+With the variables above, Ansible would run one of these tasks and skip the other:
+
+.. code-block:: yaml
+
+ tasks:
+ - name: Run the command if "epic" or "monumental" is true
+ ansible.builtin.shell: echo "This certainly is epic!"
+ when: epic or monumental | bool
+
+ - name: Run the command if "epic" is false
+ ansible.builtin.shell: echo "This certainly isn't epic!"
+ when: not epic
+
+If a required variable has not been set, you can skip or fail using Jinja2's `defined` test. For example:
+
+.. code-block:: yaml
+
+ tasks:
+ - name: Run the command if "foo" is defined
+ ansible.builtin.shell: echo "I've got '{{ foo }}' and am not afraid to use it!"
+ when: foo is defined
+
+ - name: Fail if "bar" is undefined
+ ansible.builtin.fail: msg="Bailing out. This play requires 'bar'"
+ when: bar is undefined
+
+This is especially useful in combination with the conditional import of vars files (see below).
+As the examples show, you do not need to use `{{ }}` to use variables inside conditionals, as these are already implied.
+
+.. _loops_and_conditionals:
+
+Using conditionals in loops
+---------------------------
+
+If you combine a ``when`` statement with a :ref:`loop <playbooks_loops>`, Ansible processes the condition separately for each item. This is by design, so you can execute the task on some items in the loop and skip it on other items. For example:
+
+.. code-block:: yaml
+
+ tasks:
+ - name: Run with items greater than 5
+ ansible.builtin.command: echo {{ item }}
+ loop: [ 0, 2, 4, 6, 8, 10 ]
+ when: item > 5
+
+If you need to skip the whole task when the loop variable is undefined, use the `|default` filter to provide an empty iterator. For example, when looping over a list:
+
+.. code-block:: yaml
+
+ - name: Skip the whole task when a loop variable is undefined
+ ansible.builtin.command: echo {{ item }}
+ loop: "{{ mylist|default([]) }}"
+ when: item > 5
+
+You can do the same thing when looping over a dict:
+
+.. code-block:: yaml
+
+ - name: The same as above using a dict
+ ansible.builtin.command: echo {{ item.key }}
+ loop: "{{ query('dict', mydict|default({})) }}"
+ when: item.value > 5
+
+.. _loading_in_custom_facts:
+
+Loading custom facts
+--------------------
+
+You can provide your own facts, as described in :ref:`developing_modules`. To run them, just make a call to your own custom fact gathering module at the top of your list of tasks, and variables returned there will be accessible to future tasks:
+
+.. code-block:: yaml
+
+ tasks:
+ - name: Gather site specific fact data
+ action: site_facts
+
+ - name: Use a custom fact
+ ansible.builtin.command: /usr/bin/thingy
+ when: my_custom_fact_just_retrieved_from_the_remote_system == '1234'
+
+.. _when_with_reuse:
+
+Conditionals with re-use
+------------------------
+
+You can use conditionals with re-usable tasks files, playbooks, or roles. Ansible executes these conditional statements differently for dynamic re-use (includes) and for static re-use (imports). See :ref:`playbooks_reuse` for more information on re-use in Ansible.
+
+.. _conditional_imports:
+
+Conditionals with imports
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When you add a conditional to an import statement, Ansible applies the condition to all tasks within the imported file. This behavior is the equivalent of :ref:`tag_inheritance`. Ansible applies the condition to every task, and evaluates each task separately. For example, you might have a playbook called ``main.yml`` and a tasks file called ``other_tasks.yml``::
+
+ # all tasks within an imported file inherit the condition from the import statement
+ # main.yml
+ - import_tasks: other_tasks.yml # note "import"
+ when: x is not defined
+
+ # other_tasks.yml
+ - name: Set a variable
+ ansible.builtin.set_fact:
+ x: foo
+
+ - name: Print a variable
+ ansible.builtin.debug:
+ var: x
+
+Ansible expands this at execution time to the equivalent of::
+
+ - name: Set a variable if not defined
+ ansible.builtin.set_fact:
+ x: foo
+ when: x is not defined
+ # this task sets a value for x
+
+ - name: Do the task if "x" is not defined
+ ansible.builin.debug:
+ var: x
+ when: x is not defined
+ # Ansible skips this task, because x is now defined
+
+Thus if ``x`` is initially undefined, the ``debug`` task will be skipped. If this is not the behavior you want, use an ``include_*`` statement to apply a condition only to that statement itself.
+
+You can apply conditions to ``import_playbook`` as well as to the other ``import_*`` statements. When you use this approach, Ansible returns a 'skipped' message for every task on every host that does not match the criteria, creating repetitive output. In many cases the :ref:`group_by module <group_by_module>` can be a more streamlined way to accomplish the same objective; see :ref:`os_variance`.
+
+.. _conditional_includes:
+
+Conditionals with includes
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When you use a conditional on an ``include_*`` statement, the condition is applied only to the include task itself and not to any other tasks within the included file(s). To contrast with the example used for conditionals on imports above, look at the same playbook and tasks file, but using an include instead of an import::
+
+ # Includes let you re-use a file to define a variable when it is not already defined
+
+ # main.yml
+ - include_tasks: other_tasks.yml
+ when: x is not defined
+
+ # other_tasks.yml
+ - name: Set a variable
+ ansible.builtin.set_fact:
+ x: foo
+
+ - name: Print a variable
+ ansible.builtin.debug:
+ var: x
+
+Ansible expands this at execution time to the equivalent of::
+
+ # main.yml
+ - include_tasks: other_tasks.yml
+ when: x is not defined
+ # if condition is met, Ansible includes other_tasks.yml
+
+ # other_tasks.yml
+ - name: Set a variable
+ ansible.builtin.set_fact:
+ x: foo
+ # no condition applied to this task, Ansible sets the value of x to foo
+
+ - name: Print a variable
+ ansible.builtin.debug:
+ var: x
+ # no condition applied to this task, Ansible prints the debug statement
+
+By using ``include_tasks`` instead of ``import_tasks``, both tasks from ``other_tasks.yml`` will be executed as expected. For more information on the differences between ``include`` v ``import`` see :ref:`playbooks_reuse`.
+
+Conditionals with roles
+^^^^^^^^^^^^^^^^^^^^^^^
+
+There are three ways to apply conditions to roles:
+
+ - Add the same condition or conditions to all tasks in the role by placing your ``when`` statement under the ``roles`` keyword. See the example in this section.
+ - Add the same condition or conditions to all tasks in the role by placing your ``when`` statement on a static ``import_role`` in your playbook.
+ - Add a condition or conditions to individual tasks or blocks within the role itself. This is the only approach that allows you to select or skip some tasks within the role based on your ``when`` statement. To select or skip tasks within the role, you must have conditions set on individual tasks or blocks, use the dynamic ``include_role`` in your playbook, and add the condition or conditions to the include. When you use this approach, Ansible applies the condition to the include itself plus any tasks in the role that also have that ``when`` statement.
+
+When you incorporate a role in your playbook statically with the ``roles`` keyword, Ansible adds the conditions you define to all the tasks in the role. For example:
+
+.. code-block:: yaml
+
+ - hosts: webservers
+ roles:
+ - role: debian_stock_config
+ when: ansible_facts['os_family'] == 'Debian'
+
+.. _conditional_variable_and_files:
+
+Selecting variables, files, or templates based on facts
+-------------------------------------------------------
+
+Sometimes the facts about a host determine the values you want to use for certain variables or even the file or template you want to select for that host. For example, the names of packages are different on CentOS and on Debian. The configuration files for common services are also different on different OS flavors and versions. To load different variables file, templates, or other files based on a fact about the hosts:
+
+ 1) name your vars files, templates, or files to match the Ansible fact that differentiates them
+
+ 2) select the correct vars file, template, or file for each host with a variable based on that Ansible fact
+
+Ansible separates variables from tasks, keeping your playbooks from turning into arbitrary code with nested conditionals. This approach results in more streamlined and auditable configuration rules because there are fewer decision points to track.
+
+Selecting variables files based on facts
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+You can create a playbook that works on multiple platforms and OS versions with a minimum of syntax by placing your variable values in vars files and conditionally importing them. If you want to install Apache on some CentOS and some Debian servers, create variables files with YAML keys and values. For example::
+
+ ---
+ # for vars/RedHat.yml
+ apache: httpd
+ somethingelse: 42
+
+Then import those variables files based on the facts you gather on the hosts in your playbook::
+
+ ---
+ - hosts: webservers
+ remote_user: root
+ vars_files:
+ - "vars/common.yml"
+ - [ "vars/{{ ansible_facts['os_family'] }}.yml", "vars/os_defaults.yml" ]
+ tasks:
+ - name: Make sure apache is started
+ ansible.builtin.service:
+ name: '{{ apache }}'
+ state: started
+
+Ansible gathers facts on the hosts in the webservers group, then interpolates the variable "ansible_facts['os_family']" into a list of filenames. If you have hosts with Red Hat operating systems (CentOS, for example), Ansible looks for 'vars/RedHat.yml'. If that file does not exist, Ansible attempts to load 'vars/os_defaults.yml'. For Debian hosts, Ansible first looks for 'vars/Debian.yml', before falling back on 'vars/os_defaults.yml'. If no files in the list are found, Ansible raises an error.
+
+Selecting files and templates based on facts
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+You can use the same approach when different OS flavors or versions require different configuration files or templates. Select the appropriate file or template based on the variables assigned to each host. This approach is often much cleaner than putting a lot of conditionals into a single template to cover multiple OS or package versions.
+
+For example, you can template out a configuration file that is very different between, say, CentOS and Debian::
+
+ - name: Template a file
+ ansible.builtin.template:
+ src: "{{ item }}"
+ dest: /etc/myapp/foo.conf
+ loop: "{{ query('first_found', { 'files': myfiles, 'paths': mypaths}) }}"
+ vars:
+ myfiles:
+ - "{{ ansible_facts['distribution'] }}.conf"
+ - default.conf
+ mypaths: ['search_location_one/somedir/', '/opt/other_location/somedir/']
+
+.. _commonly_used_facts:
+
+Commonly-used facts
+===================
+
+The following Ansible facts are frequently used in conditionals.
+
+.. _ansible_distribution:
+
+ansible_facts['distribution']
+-----------------------------
+
+Possible values (sample, not complete list)::
+
+ Alpine
+ Altlinux
+ Amazon
+ Archlinux
+ ClearLinux
+ Coreos
+ CentOS
+ Debian
+ Fedora
+ Gentoo
+ Mandriva
+ NA
+ OpenWrt
+ OracleLinux
+ RedHat
+ Slackware
+ SLES
+ SMGL
+ SUSE
+ Ubuntu
+ VMwareESX
+
+.. See `OSDIST_LIST`
+
+.. _ansible_distribution_major_version:
+
+ansible_facts['distribution_major_version']
+-------------------------------------------
+
+The major version of the operating system. For example, the value is `16` for Ubuntu 16.04.
+
+.. _ansible_os_family:
+
+ansible_facts['os_family']
+--------------------------
+
+Possible values (sample, not complete list)::
+
+ AIX
+ Alpine
+ Altlinux
+ Archlinux
+ Darwin
+ Debian
+ FreeBSD
+ Gentoo
+ HP-UX
+ Mandrake
+ RedHat
+ SGML
+ Slackware
+ Solaris
+ Suse
+ Windows
+
+.. Ansible checks `OS_FAMILY_MAP`; if there's no match, it returns the value of `platform.system()`.
+
+.. seealso::
+
+ :ref:`working_with_playbooks`
+ An introduction to playbooks
+ :ref:`playbooks_reuse_roles`
+ Playbook organization by roles
+ :ref:`playbooks_best_practices`
+ Tips and tricks for playbooks
+ :ref:`playbooks_variables`
+ All about variables
+ `User Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/playbooks_debugger.rst b/docs/docsite/rst/user_guide/playbooks_debugger.rst
new file mode 100644
index 00000000..cc330cc5
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_debugger.rst
@@ -0,0 +1,329 @@
+.. _playbook_debugger:
+
+***************
+Debugging tasks
+***************
+
+Ansible offers a task debugger so you can fix errors during execution instead of editing your playbook and running it again to see if your change worked. You have access to all of the features of the debugger in the context of the task. You can check or set the value of variables, update module arguments, and re-run the task with the new variables and arguments. The debugger lets you resolve the cause of the failure and continue with playbook execution.
+
+.. contents::
+ :local:
+
+Enabling the debugger
+=====================
+
+The debugger is not enabled by default. If you want to invoke the debugger during playbook execution, you must enable it first.
+
+Use one of these three methods to enable the debugger:
+
+ * with the debugger keyword
+ * in configuration or an environment variable, or
+ * as a strategy
+
+Enabling the debugger with the ``debugger`` keyword
+---------------------------------------------------
+
+.. versionadded:: 2.5
+
+You can use the ``debugger`` keyword to enable (or disable) the debugger for a specific play, role, block, or task. This option is especially useful when developing or extending playbooks, plays, and roles. You can enable the debugger on new or updated tasks. If they fail, you can fix the errors efficiently. The ``debugger`` keyword accepts five values:
+
+.. table::
+ :class: documentation-table
+
+ ========================= ======================================================
+ Value Result
+ ========================= ======================================================
+ always Always invoke the debugger, regardless of the outcome
+
+ never Never invoke the debugger, regardless of the outcome
+
+ on_failed Only invoke the debugger if a task fails
+
+ on_unreachable Only invoke the debugger if a host is unreachable
+
+ on_skipped Only invoke the debugger if the task is skipped
+
+ ========================= ======================================================
+
+When you use the ``debugger`` keyword, the value you specify overrides any global configuration to enable or disable the debugger. If you define ``debugger`` at multiple levels, such as in a role and in a task, Ansible honors the most granular definition. The definition at the play or role level applies to all blocks and tasks within that play or role, unless they specify a different value. The definition at the block level overrides the definition at the play or role level, and applies to all tasks within that block, unless they specify a different value. The definition at the task level always applies to the task; it overrides the definitions at the block, play, or role level.
+
+Examples of using the ``debugger`` keyword
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Example of setting the ``debugger`` keyword on a task:
+
+.. code-block:: yaml
+
+ - name: Execute a command
+ ansible.builtin.command: "false"
+ debugger: on_failed
+
+Example of setting the ``debugger`` keyword on a play:
+
+.. code-block:: yaml
+
+ - name: My play
+ hosts: all
+ debugger: on_skipped
+ tasks:
+ - name: Execute a command
+ ansible.builtin.command: "true"
+ when: False
+
+Example of setting the ``debugger`` keyword at multiple levels:
+
+.. code-block:: yaml
+
+
+ - name: Play
+ hosts: all
+ debugger: never
+ tasks:
+ - name: Execute a command
+ ansible.builtin.command: "false"
+ debugger: on_failed
+
+In this example, the debugger is set to ``never`` at the play level and to ``on_failed`` at the task level. If the task fails, Ansible invokes the debugger, because the definition on the task overrides the definition on its parent play.
+
+Enabling the debugger in configuration or an environment variable
+-----------------------------------------------------------------
+
+.. versionadded:: 2.5
+
+You can enable the task debugger globally with a setting in ansible.cfg or with an environment variable. The only options are ``True`` or ``False``. If you set the configuration option or environment variable to ``True``, Ansible runs the debugger on failed tasks by default.
+
+To enable the task debugger from ansible.cfg, add this setting to the defaults section::
+
+ [defaults]
+ enable_task_debugger = True
+
+To enable the task debugger with an environment variable, pass the variable when you run your playbook::
+
+ ANSIBLE_ENABLE_TASK_DEBUGGER=True ansible-playbook -i hosts site.yml
+
+When you enable the debugger globally, every failed task invokes the debugger, unless the role, play, block, or task explicity disables the debugger. If you need more granular control over what conditions trigger the debugger, use the ``debugger`` keyword.
+
+Enabling the debugger as a strategy
+-----------------------------------
+
+If you are running legacy playbooks or roles, you may see the debugger enabled as a :ref:`strategy <strategy_plugins>`. You can do this at the play level, in ansible.cfg, or with the environment variable ``ANSIBLE_STRATEGY=debug``. For example:
+
+.. code-block:: yaml
+
+ - hosts: test
+ strategy: debug
+ tasks:
+ ...
+
+Or in ansible.cfg::
+
+ [defaults]
+ strategy = debug
+
+.. note::
+
+ This backwards-compatible method, which matches Ansible versions before 2.5, may be removed in a future release.
+
+Resolving errors in the debugger
+================================
+
+After Ansible invokes the debugger, you can use the seven :ref:`debugger commands <available_commands>` to resolve the error that Ansible encountered. Consider this example playbook, which defines the ``var1`` variable but uses the undefined ``wrong_var`` variable in a task by mistake.
+
+.. code-block:: yaml
+
+ - hosts: test
+ debugger: on_failed
+ gather_facts: no
+ vars:
+ var1: value1
+ tasks:
+ - name: Use a wrong variable
+ ansible.builtin.ping: data={{ wrong_var }}
+
+If you run this playbook, Ansible invokes the debugger when the task fails. From the debug prompt, you can change the module arguments or the variables and run the task again.
+
+.. code-block:: none
+
+ PLAY ***************************************************************************
+
+ TASK [wrong variable] **********************************************************
+ fatal: [192.0.2.10]: FAILED! => {"failed": true, "msg": "ERROR! 'wrong_var' is undefined"}
+ Debugger invoked
+ [192.0.2.10] TASK: wrong variable (debug)> p result._result
+ {'failed': True,
+ 'msg': 'The task includes an option with an undefined variable. The error '
+ "was: 'wrong_var' is undefined\n"
+ '\n'
+ 'The error appears to have been in '
+ "'playbooks/debugger.yml': line 7, "
+ 'column 7, but may\n'
+ 'be elsewhere in the file depending on the exact syntax problem.\n'
+ '\n'
+ 'The offending line appears to be:\n'
+ '\n'
+ ' tasks:\n'
+ ' - name: wrong variable\n'
+ ' ^ here\n'}
+ [192.0.2.10] TASK: wrong variable (debug)> p task.args
+ {u'data': u'{{ wrong_var }}'}
+ [192.0.2.10] TASK: wrong variable (debug)> task.args['data'] = '{{ var1 }}'
+ [192.0.2.10] TASK: wrong variable (debug)> p task.args
+ {u'data': '{{ var1 }}'}
+ [192.0.2.10] TASK: wrong variable (debug)> redo
+ ok: [192.0.2.10]
+
+ PLAY RECAP *********************************************************************
+ 192.0.2.10 : ok=1 changed=0 unreachable=0 failed=0
+
+Changing the task arguments in the debugger to use ``var1`` instead of ``wrong_var`` makes the task run successfully.
+
+.. _available_commands:
+
+Available debug commands
+========================
+
+You can use these seven commands at the debug prompt:
+
+.. table::
+ :class: documentation-table
+
+ ========================== ============ =========================================================
+ Command Shortcut Action
+ ========================== ============ =========================================================
+ print p Print information about the task
+
+ task.args[*key*] = *value* no shortcut Update module arguments
+
+ task_vars[*key*] = *value* no shortcut Update task variables (you must ``update_task`` next)
+
+ update_task u Recreate a task with updated task variables
+
+ redo r Run the task again
+
+ continue c Continue executing, starting with the next task
+
+ quit q Quit the debugger
+
+ ========================== ============ =========================================================
+
+For more details, see the individual descriptions and examples below.
+
+.. _pprint_command:
+
+Print command
+-------------
+
+``print *task/task.args/task_vars/host/result*`` prints information about the task::
+
+ [192.0.2.10] TASK: install package (debug)> p task
+ TASK: install package
+ [192.0.2.10] TASK: install package (debug)> p task.args
+ {u'name': u'{{ pkg_name }}'}
+ [192.0.2.10] TASK: install package (debug)> p task_vars
+ {u'ansible_all_ipv4_addresses': [u'192.0.2.10'],
+ u'ansible_architecture': u'x86_64',
+ ...
+ }
+ [192.0.2.10] TASK: install package (debug)> p task_vars['pkg_name']
+ u'bash'
+ [192.0.2.10] TASK: install package (debug)> p host
+ 192.0.2.10
+ [192.0.2.10] TASK: install package (debug)> p result._result
+ {'_ansible_no_log': False,
+ 'changed': False,
+ u'failed': True,
+ ...
+ u'msg': u"No package matching 'not_exist' is available"}
+
+.. _update_args_command:
+
+Update args command
+-------------------
+
+``task.args[*key*] = *value*`` updates a module argument. This sample playbook has an invalid package name::
+
+ - hosts: test
+ strategy: debug
+ gather_facts: yes
+ vars:
+ pkg_name: not_exist
+ tasks:
+ - name: Install a package
+ ansible.builtin.apt: name={{ pkg_name }}
+
+When you run the playbook, the invalid package name triggers an error, and Ansible invokes the debugger. You can fix the package name by viewing, then updating the module argument::
+
+ [192.0.2.10] TASK: install package (debug)> p task.args
+ {u'name': u'{{ pkg_name }}'}
+ [192.0.2.10] TASK: install package (debug)> task.args['name'] = 'bash'
+ [192.0.2.10] TASK: install package (debug)> p task.args
+ {u'name': 'bash'}
+ [192.0.2.10] TASK: install package (debug)> redo
+
+After you update the module argument, use ``redo`` to run the task again with the new args.
+
+.. _update_vars_command:
+
+Update vars command
+-------------------
+
+``task_vars[*key*] = *value*`` updates the ``task_vars``. You could fix the playbook above by viewing, then updating the task variables instead of the module args::
+
+ [192.0.2.10] TASK: install package (debug)> p task_vars['pkg_name']
+ u'not_exist'
+ [192.0.2.10] TASK: install package (debug)> task_vars['pkg_name'] = 'bash'
+ [192.0.2.10] TASK: install package (debug)> p task_vars['pkg_name']
+ 'bash'
+ [192.0.2.10] TASK: install package (debug)> update_task
+ [192.0.2.10] TASK: install package (debug)> redo
+
+After you update the task variables, you must use ``update_task`` to load the new variables before using ``redo`` to run the task again.
+
+.. note::
+ In 2.5 this was updated from ``vars`` to ``task_vars`` to avoid conflicts with the ``vars()`` python function.
+
+.. _update_task_command:
+
+Update task command
+-------------------
+
+.. versionadded:: 2.8
+
+``u`` or ``update_task`` recreates the task from the original task data structure and templates with updated task variables. See the entry :ref:`update_vars_command` for an example of use.
+
+.. _redo_command:
+
+Redo command
+------------
+
+``r`` or ``redo`` runs the task again.
+
+.. _continue_command:
+
+Continue command
+----------------
+
+``c`` or ``continue`` continues executing, starting with the next task.
+
+.. _quit_command:
+
+Quit command
+------------
+
+``q`` or ``quit`` quits the debugger. The playbook execution is aborted.
+
+How the debugger interacts with the free strategy
+=================================================
+
+With the default ``linear`` strategy enabled, Ansible halts execution while the debugger is active, and runs the debugged task immediately after you enter the ``redo`` command. With the ``free`` strategy enabled, however, Ansible does not wait for all hosts, and may queue later tasks on one host before a task fails on another host. With the ``free`` strategy, Ansible does not queue or execute any tasks while the debugger is active. However, all queued tasks remain in the queue and run as soon as you exit the debugger. If you use ``redo`` to reschedule a task from the debugger, other queued tasks may execute before your rescheduled task. For more information about strategies, see :ref:`playbooks_strategies`.
+
+.. seealso::
+
+ :ref:`playbooks_start_and_step`
+ Running playbooks while debugging or testing
+ :ref:`playbooks_intro`
+ An introduction to playbooks
+ `User Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/playbooks_delegation.rst b/docs/docsite/rst/user_guide/playbooks_delegation.rst
new file mode 100644
index 00000000..1042bafb
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_delegation.rst
@@ -0,0 +1,136 @@
+.. _playbooks_delegation:
+
+Controlling where tasks run: delegation and local actions
+=========================================================
+
+By default Ansible gathers facts and executes all tasks on the machines that match the ``hosts`` line of your playbook. This page shows you how to delegate tasks to a different machine or group, delegate facts to specific machines or groups, or run an entire playbook locally. Using these approaches, you can manage inter-related environments precisely and efficiently. For example, when updating your webservers, you might need to remove them from a load-balanced pool temporarily. You cannot perform this task on the webservers themselves. By delegating the task to localhost, you keep all the tasks within the same play.
+
+.. contents::
+ :local:
+
+Tasks that cannot be delegated
+------------------------------
+
+Some tasks always execute on the controller. These tasks, including ``include``, ``add_host``, and ``debug``, cannot be delegated.
+
+.. _delegation:
+
+Delegating tasks
+----------------
+
+If you want to perform a task on one host with reference to other hosts, use the ``delegate_to`` keyword on a task. This is ideal for managing nodes in a load balanced pool or for controlling outage windows. You can use delegation with the :ref:`serial <rolling_update_batch_size>` keyword to control the number of hosts executing at one time::
+
+ ---
+ - hosts: webservers
+ serial: 5
+
+ tasks:
+ - name: Take out of load balancer pool
+ ansible.builtin.command: /usr/bin/take_out_of_pool {{ inventory_hostname }}
+ delegate_to: 127.0.0.1
+
+ - name: Actual steps would go here
+ ansible.builtin.yum:
+ name: acme-web-stack
+ state: latest
+
+ - name: Add back to load balancer pool
+ ansible.builtin.command: /usr/bin/add_back_to_pool {{ inventory_hostname }}
+ delegate_to: 127.0.0.1
+
+The first and third tasks in this play run on 127.0.0.1, which is the machine running Ansible. There is also a shorthand syntax that you can use on a per-task basis: ``local_action``. Here is the same playbook as above, but using the shorthand syntax for delegating to 127.0.0.1::
+
+ ---
+ # ...
+
+ tasks:
+ - name: Take out of load balancer pool
+ local_action: ansible.builtin.command /usr/bin/take_out_of_pool {{ inventory_hostname }}
+
+ # ...
+
+ - name: Add back to load balancer pool
+ local_action: ansible.builtin.command /usr/bin/add_back_to_pool {{ inventory_hostname }}
+
+You can use a local action to call 'rsync' to recursively copy files to the managed servers::
+
+ ---
+ # ...
+
+ tasks:
+ - name: Recursively copy files from management server to target
+ local_action: ansible.builtin.command rsync -a /path/to/files {{ inventory_hostname }}:/path/to/target/
+
+Note that you must have passphrase-less SSH keys or an ssh-agent configured for this to work, otherwise rsync asks for a passphrase.
+
+To specify more arguments, use the following syntax::
+
+ ---
+ # ...
+
+ tasks:
+ - name: Send summary mail
+ local_action:
+ module: community.general.mail
+ subject: "Summary Mail"
+ to: "{{ mail_recipient }}"
+ body: "{{ mail_body }}"
+ run_once: True
+
+The `ansible_host` variable reflects the host a task is delegated to.
+
+.. _delegate_facts:
+
+Delegating facts
+----------------
+
+Delegating Ansible tasks is like delegating tasks in the real world - your groceries belong to you, even if someone else delivers them to your home. Similarly, any facts gathered by a delegated task are assigned by default to the `inventory_hostname` (the current host), not to the host which produced the facts (the delegated to host). To assign gathered facts to the delegated host instead of the current host, set ``delegate_facts`` to ``true``::
+
+ ---
+ - hosts: app_servers
+
+ tasks:
+ - name: Gather facts from db servers
+ ansible.builtin.setup:
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ loop: "{{ groups['dbservers'] }}"
+
+This task gathers facts for the machines in the dbservers group and assigns the facts to those machines, even though the play targets the app_servers group. This way you can lookup `hostvars['dbhost1']['ansible_default_ipv4']['address']` even though dbservers were not part of the play, or left out by using `--limit`.
+
+.. _local_playbooks:
+
+Local playbooks
+---------------
+
+It may be useful to use a playbook locally on a remote host, rather than by connecting over SSH. This can be useful for assuring the configuration of a system by putting a playbook in a crontab. This may also be used
+to run a playbook inside an OS installer, such as an Anaconda kickstart.
+
+To run an entire playbook locally, just set the ``hosts:`` line to ``hosts: 127.0.0.1`` and then run the playbook like so::
+
+ ansible-playbook playbook.yml --connection=local
+
+Alternatively, a local connection can be used in a single playbook play, even if other plays in the playbook
+use the default remote connection type::
+
+ ---
+ - hosts: 127.0.0.1
+ connection: local
+
+.. note::
+ If you set the connection to local and there is no ansible_python_interpreter set, modules will run under /usr/bin/python and not
+ under {{ ansible_playbook_python }}. Be sure to set ansible_python_interpreter: "{{ ansible_playbook_python }}" in
+ host_vars/localhost.yml, for example. You can avoid this issue by using ``local_action`` or ``delegate_to: localhost`` instead.
+
+.. seealso::
+
+ :ref:`playbooks_intro`
+ An introduction to playbooks
+ :ref:`playbooks_strategies`
+ More ways to control how and where Ansible executes
+ `Ansible Examples on GitHub <https://github.com/ansible/ansible-examples>`_
+ Many examples of full-stack deployments
+ `User Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/playbooks_environment.rst b/docs/docsite/rst/user_guide/playbooks_environment.rst
new file mode 100644
index 00000000..7d97b954
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_environment.rst
@@ -0,0 +1,141 @@
+.. _playbooks_environment:
+
+Setting the remote environment
+==============================
+
+.. versionadded:: 1.1
+
+You can use the ``environment`` keyword at the play, block, or task level to set an environment variable for an action on a remote host. With this keyword, you can enable using a proxy for a task that does http requests, set the required environment variables for language-specific version managers, and more.
+
+When you set a value with ``environment:`` at the play or block level, it is available only to tasks within the play or block that are executed by the same user. The ``environment:`` keyword does not affect Ansible itself, Ansible configuration settings, the environment for other users, or the execution of other plugins like lookups and filters. Variables set with ``environment:`` do not automatically become Ansible facts, even when you set them at the play level. You must include an explicit ``gather_facts`` task in your playbook and set the ``environment`` keyword on that task to turn these values into Ansible facts.
+
+.. contents::
+ :local:
+
+Setting the remote environment in a task
+----------------------------------------
+
+You can set the environment directly at the task level::
+
+ - hosts: all
+ remote_user: root
+
+ tasks:
+
+ - name: Install cobbler
+ ansible.builtin.package:
+ name: cobbler
+ state: present
+ environment:
+ http_proxy: http://proxy.example.com:8080
+
+You can re-use environment settings by defining them as variables in your play and accessing them in a task as you would access any stored Ansible variable::
+
+ - hosts: all
+ remote_user: root
+
+ # create a variable named "proxy_env" that is a dictionary
+ vars:
+ proxy_env:
+ http_proxy: http://proxy.example.com:8080
+
+ tasks:
+
+ - name: Install cobbler
+ ansible.builtin.package:
+ name: cobbler
+ state: present
+ environment: "{{ proxy_env }}"
+
+You can store environment settings for re-use in multiple playbooks by defining them in a group_vars file::
+
+ ---
+ # file: group_vars/boston
+
+ ntp_server: ntp.bos.example.com
+ backup: bak.bos.example.com
+ proxy_env:
+ http_proxy: http://proxy.bos.example.com:8080
+ https_proxy: http://proxy.bos.example.com:8080
+
+You can set the remote environment at the play level::
+
+ - hosts: testing
+
+ roles:
+ - php
+ - nginx
+
+ environment:
+ http_proxy: http://proxy.example.com:8080
+
+These examples show proxy settings, but you can provide any number of settings this way.
+
+Working with language-specific version managers
+===============================================
+
+Some language-specific version managers (such as rbenv and nvm) require you to set environment variables while these tools are in use. When using these tools manually, you usually source some environment variables from a script or from lines added to your shell configuration file. In Ansible, you can do this with the environment keyword at the play level::
+
+ ---
+ ### A playbook demonstrating a common npm workflow:
+ # - Check for package.json in the application directory
+ # - If package.json exists:
+ # * Run npm prune
+ # * Run npm install
+
+ - hosts: application
+ become: false
+
+ vars:
+ node_app_dir: /var/local/my_node_app
+
+ environment:
+ NVM_DIR: /var/local/nvm
+ PATH: /var/local/nvm/versions/node/v4.2.1/bin:{{ ansible_env.PATH }}
+
+ tasks:
+ - name: Check for package.json
+ ansible.builtin.stat:
+ path: '{{ node_app_dir }}/package.json'
+ register: packagejson
+
+ - name: Run npm prune
+ ansible.builtin.command: npm prune
+ args:
+ chdir: '{{ node_app_dir }}'
+ when: packagejson.stat.exists
+
+ - name: Run npm install
+ community.general.npm:
+ path: '{{ node_app_dir }}'
+ when: packagejson.stat.exists
+
+.. note::
+ The example above uses ``ansible_env`` as part of the PATH. Basing variables on ``ansible_env`` is risky. Ansible populates ``ansible_env`` values by gathering facts, so the value of the variables depends on the remote_user or become_user Ansible used when gathering those facts. If you change remote_user/become_user the values in ``ansible-env`` may not be the ones you expect.
+
+.. warning::
+ Environment variables are normally passed in clear text (shell plugin dependent) so they are not a recommended way of passing secrets to the module being executed.
+
+You can also specify the environment at the task level::
+
+ ---
+ - name: Install ruby 2.3.1
+ ansible.builtin.command: rbenv install {{ rbenv_ruby_version }}
+ args:
+ creates: '{{ rbenv_root }}/versions/{{ rbenv_ruby_version }}/bin/ruby'
+ vars:
+ rbenv_root: /usr/local/rbenv
+ rbenv_ruby_version: 2.3.1
+ environment:
+ CONFIGURE_OPTS: '--disable-install-doc'
+ RBENV_ROOT: '{{ rbenv_root }}'
+ PATH: '{{ rbenv_root }}/bin:{{ rbenv_root }}/shims:{{ rbenv_plugins }}/ruby-build/bin:{{ ansible_env.PATH }}'
+
+.. seealso::
+
+ :ref:`playbooks_intro`
+ An introduction to playbooks
+ `User Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/playbooks_error_handling.rst b/docs/docsite/rst/user_guide/playbooks_error_handling.rst
new file mode 100644
index 00000000..c73067cc
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_error_handling.rst
@@ -0,0 +1,245 @@
+.. _playbooks_error_handling:
+
+***************************
+Error handling in playbooks
+***************************
+
+When Ansible receives a non-zero return code from a command or a failure from a module, by default it stops executing on that host and continues on other hosts. However, in some circumstances you may want different behavior. Sometimes a non-zero return code indicates success. Sometimes you want a failure on one host to stop execution on all hosts. Ansible provides tools and settings to handle these situations and help you get the behavior, output, and reporting you want.
+
+.. contents::
+ :local:
+
+.. _ignoring_failed_commands:
+
+Ignoring failed commands
+========================
+
+By default Ansible stops executing tasks on a host when a task fails on that host. You can use ``ignore_errors`` to continue on in spite of the failure::
+
+ - name: Do not count this as a failure
+ ansible.builtin.command: /bin/false
+ ignore_errors: yes
+
+The ``ignore_errors`` directive only works when the task is able to run and returns a value of 'failed'. It does not make Ansible ignore undefined variable errors, connection failures, execution issues (for example, missing packages), or syntax errors.
+
+Ignoring unreachable host errors
+================================
+
+.. versionadded:: 2.7
+
+You can ignore a task failure due to the host instance being 'UNREACHABLE' with the ``ignore_unreachable`` keyword. Ansible ignores the task errors, but continues to execute future tasks against the unreachable host. For example, at the task level::
+
+ - name: This executes, fails, and the failure is ignored
+ ansible.builtin.command: /bin/true
+ ignore_unreachable: yes
+
+ - name: This executes, fails, and ends the play for this host
+ ansible.builtin.command: /bin/true
+
+And at the playbook level::
+
+ - hosts: all
+ ignore_unreachable: yes
+ tasks:
+ - name: This executes, fails, and the failure is ignored
+ ansible.builtin.command: /bin/true
+
+ - name: This executes, fails, and ends the play for this host
+ ansible.builtin.command: /bin/true
+ ignore_unreachable: no
+
+.. _resetting_unreachable:
+
+Resetting unreachable hosts
+===========================
+
+If Ansible cannot connect to a host, it marks that host as 'UNREACHABLE' and removes it from the list of active hosts for the run. You can use `meta: clear_host_errors` to reactivate all hosts, so subsequent tasks can try to reach them again.
+
+
+.. _handlers_and_failure:
+
+Handlers and failure
+====================
+
+Ansible runs :ref:`handlers <handlers>` at the end of each play. If a task notifies a handler but
+another task fails later in the play, by default the handler does *not* run on that host,
+which may leave the host in an unexpected state. For example, a task could update
+a configuration file and notify a handler to restart some service. If a
+task later in the same play fails, the configuration file might be changed but
+the service will not be restarted.
+
+You can change this behavior with the ``--force-handlers`` command-line option,
+by including ``force_handlers: True`` in a play, or by adding ``force_handlers = True``
+to ansible.cfg. When handlers are forced, Ansible will run all notified handlers on
+all hosts, even hosts with failed tasks. (Note that certain errors could still prevent
+the handler from running, such as a host becoming unreachable.)
+
+.. _controlling_what_defines_failure:
+
+Defining failure
+================
+
+Ansible lets you define what "failure" means in each task using the ``failed_when`` conditional. As with all conditionals in Ansible, lists of multiple ``failed_when`` conditions are joined with an implicit ``and``, meaning the task only fails when *all* conditions are met. If you want to trigger a failure when any of the conditions is met, you must define the conditions in a string with an explicit ``or`` operator.
+
+You may check for failure by searching for a word or phrase in the output of a command::
+
+ - name: Fail task when the command error output prints FAILED
+ ansible.builtin.command: /usr/bin/example-command -x -y -z
+ register: command_result
+ failed_when: "'FAILED' in command_result.stderr"
+
+or based on the return code::
+
+ - name: Fail task when both files are identical
+ ansible.builtin.raw: diff foo/file1 bar/file2
+ register: diff_cmd
+ failed_when: diff_cmd.rc == 0 or diff_cmd.rc >= 2
+
+You can also combine multiple conditions for failure. This task will fail if both conditions are true::
+
+ - name: Check if a file exists in temp and fail task if it does
+ ansible.builtin.command: ls /tmp/this_should_not_be_here
+ register: result
+ failed_when:
+ - result.rc == 0
+ - '"No such" not in result.stdout'
+
+If you want the task to fail when only one condition is satisfied, change the ``failed_when`` definition to::
+
+ failed_when: result.rc == 0 or "No such" not in result.stdout
+
+If you have too many conditions to fit neatly into one line, you can split it into a multi-line yaml value with ``>``::
+
+ - name: example of many failed_when conditions with OR
+ ansible.builtin.shell: "./myBinary"
+ register: ret
+ failed_when: >
+ ("No such file or directory" in ret.stdout) or
+ (ret.stderr != '') or
+ (ret.rc == 10)
+
+.. _override_the_changed_result:
+
+Defining "changed"
+==================
+
+Ansible lets you define when a particular task has "changed" a remote node using the ``changed_when`` conditional. This lets you determine, based on return codes or output, whether a change should be reported in Ansible statistics and whether a handler should be triggered or not. As with all conditionals in Ansible, lists of multiple ``changed_when`` conditions are joined with an implicit ``and``, meaning the task only reports a change when *all* conditions are met. If you want to report a change when any of the conditions is met, you must define the conditions in a string with an explicit ``or`` operator. For example::
+
+ tasks:
+
+ - name: Report 'changed' when the return code is not equal to 2
+ ansible.builtin.shell: /usr/bin/billybass --mode="take me to the river"
+ register: bass_result
+ changed_when: "bass_result.rc != 2"
+
+ - name: This will never report 'changed' status
+ ansible.builtin.shell: wall 'beep'
+ changed_when: False
+
+You can also combine multiple conditions to override "changed" result::
+
+ - name: Combine multiple conditions to override 'changed' result
+ ansible.builtin.command: /bin/fake_command
+ register: result
+ ignore_errors: True
+ changed_when:
+ - '"ERROR" in result.stderr'
+ - result.rc == 2
+
+See :ref:`controlling_what_defines_failure` for more conditional syntax examples.
+
+Ensuring success for command and shell
+======================================
+
+The :ref:`command <command_module>` and :ref:`shell <shell_module>` modules care about return codes, so if you have a command whose successful exit code is not zero, you can do this::
+
+ tasks:
+ - name: Run this command and ignore the result
+ ansible.builtin.shell: /usr/bin/somecommand || /bin/true
+
+
+Aborting a play on all hosts
+============================
+
+Sometimes you want a failure on a single host, or failures on a certain percentage of hosts, to abort the entire play on all hosts. You can stop play execution after the first failure happens with ``any_errors_fatal``. For finer-grained control, you can use ``max_fail_percentage`` to abort the run after a given percentage of hosts has failed.
+
+Aborting on the first error: any_errors_fatal
+---------------------------------------------
+
+If you set ``any_errors_fatal`` and a task returns an error, Ansible finishes the fatal task on all hosts in the current batch, then stops executing the play on all hosts. Subsequent tasks and plays are not executed. You can recover from fatal errors by adding a :ref:`rescue section <block_error_handling>` to the block. You can set ``any_errors_fatal`` at the play or block level::
+
+ - hosts: somehosts
+ any_errors_fatal: true
+ roles:
+ - myrole
+
+ - hosts: somehosts
+ tasks:
+ - block:
+ - include_tasks: mytasks.yml
+ any_errors_fatal: true
+
+You can use this feature when all tasks must be 100% successful to continue playbook execution. For example, if you run a service on machines in multiple data centers with load balancers to pass traffic from users to the service, you want all load balancers to be disabled before you stop the service for maintenance. To ensure that any failure in the task that disables the load balancers will stop all other tasks::
+
+ ---
+ - hosts: load_balancers_dc_a
+ any_errors_fatal: true
+
+ tasks:
+ - name: Shut down datacenter 'A'
+ ansible.builtin.command: /usr/bin/disable-dc
+
+ - hosts: frontends_dc_a
+
+ tasks:
+ - name: Stop service
+ ansible.builtin.command: /usr/bin/stop-software
+
+ - name: Update software
+ ansible.builtin.command: /usr/bin/upgrade-software
+
+ - hosts: load_balancers_dc_a
+
+ tasks:
+ - name: Start datacenter 'A'
+ ansible.builtin.command: /usr/bin/enable-dc
+
+In this example Ansible starts the software upgrade on the front ends only if all of the load balancers are successfully disabled.
+
+.. _maximum_failure_percentage:
+
+Setting a maximum failure percentage
+------------------------------------
+
+By default, Ansible continues to execute tasks as long as there are hosts that have not yet failed. In some situations, such as when executing a rolling update, you may want to abort the play when a certain threshold of failures has been reached. To achieve this, you can set a maximum failure percentage on a play::
+
+ ---
+ - hosts: webservers
+ max_fail_percentage: 30
+ serial: 10
+
+The ``max_fail_percentage`` setting applies to each batch when you use it with :ref:`serial <rolling_update_batch_size>`. In the example above, if more than 3 of the 10 servers in the first (or any) batch of servers failed, the rest of the play would be aborted.
+
+.. note::
+
+ The percentage set must be exceeded, not equaled. For example, if serial were set to 4 and you wanted the task to abort the play when 2 of the systems failed, set the max_fail_percentage at 49 rather than 50.
+
+Controlling errors in blocks
+============================
+
+You can also use blocks to define responses to task errors. This approach is similar to exception handling in many programming languages. See :ref:`block_error_handling` for details and examples.
+
+.. seealso::
+
+ :ref:`playbooks_intro`
+ An introduction to playbooks
+ :ref:`playbooks_best_practices`
+ Tips and tricks for playbooks
+ :ref:`playbooks_conditionals`
+ Conditional statements in playbooks
+ :ref:`playbooks_variables`
+ All about variables
+ `User Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/playbooks_filters.rst b/docs/docsite/rst/user_guide/playbooks_filters.rst
new file mode 100644
index 00000000..f009900a
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_filters.rst
@@ -0,0 +1,1696 @@
+.. _playbooks_filters:
+
+********************************
+Using filters to manipulate data
+********************************
+
+Filters let you transform JSON data into YAML data, split a URL to extract the hostname, get the SHA1 hash of a string, add or multiply integers, and much more. You can use the Ansible-specific filters documented here to manipulate your data, or use any of the standard filters shipped with Jinja2 - see the list of :ref:`built-in filters <jinja2:builtin-filters>` in the official Jinja2 template documentation. You can also use :ref:`Python methods <jinja2:python-methods>` to transform data. You can :ref:`create custom Ansible filters as plugins <developing_filter_plugins>`, though we generally welcome new filters into the ansible-base repo so everyone can use them.
+
+Because templating happens on the Ansible controller, **not** on the target host, filters execute on the controller and transform data locally.
+
+.. contents::
+ :local:
+
+Handling undefined variables
+============================
+
+Filters can help you manage missing or undefined variables by providing defaults or making some variables optional. If you configure Ansible to ignore most undefined variables, you can mark some variables as requiring values with the ``mandatory`` filter.
+
+.. _defaulting_undefined_variables:
+
+Providing default values
+------------------------
+
+You can provide default values for variables directly in your templates using the Jinja2 'default' filter. This is often a better approach than failing if a variable is not defined::
+
+ {{ some_variable | default(5) }}
+
+In the above example, if the variable 'some_variable' is not defined, Ansible uses the default value 5, rather than raising an "undefined variable" error and failing. If you are working within a role, you can also add a ``defaults/main.yml`` to define the default values for variables in your role.
+
+Beginning in version 2.8, attempting to access an attribute of an Undefined value in Jinja will return another Undefined value, rather than throwing an error immediately. This means that you can now simply use
+a default with a value in a nested data structure (in other words, :code:`{{ foo.bar.baz | default('DEFAULT') }}`) when you do not know if the intermediate values are defined.
+
+If you want to use the default value when variables evaluate to false or an empty string you have to set the second parameter to ``true``::
+
+ {{ lookup('env', 'MY_USER') | default('admin', true) }}
+
+.. _omitting_undefined_variables:
+
+Making variables optional
+-------------------------
+
+By default Ansible requires values for all variables in a templated expression. However, you can make specific variables optional. For example, you might want to use a system default for some items and control the value for others. To make a variable optional, set the default value to the special variable ``omit``::
+
+ - name: Touch files with an optional mode
+ ansible.builtin.file:
+ dest: "{{ item.path }}"
+ state: touch
+ mode: "{{ item.mode | default(omit) }}"
+ loop:
+ - path: /tmp/foo
+ - path: /tmp/bar
+ - path: /tmp/baz
+ mode: "0444"
+
+In this example, the default mode for the files ``/tmp/foo`` and ``/tmp/bar`` is determined by the umask of the system. Ansible does not send a value for ``mode``. Only the third file, ``/tmp/baz``, receives the `mode=0444` option.
+
+.. note:: If you are "chaining" additional filters after the ``default(omit)`` filter, you should instead do something like this:
+ ``"{{ foo | default(None) | some_filter or omit }}"``. In this example, the default ``None`` (Python null) value will cause the later filters to fail, which will trigger the ``or omit`` portion of the logic. Using ``omit`` in this manner is very specific to the later filters you are chaining though, so be prepared for some trial and error if you do this.
+
+.. _forcing_variables_to_be_defined:
+
+Defining mandatory values
+-------------------------
+
+If you configure Ansible to ignore undefined variables, you may want to define some values as mandatory. By default, Ansible fails if a variable in your playbook or command is undefined. You can configure Ansible to allow undefined variables by setting :ref:`DEFAULT_UNDEFINED_VAR_BEHAVIOR` to ``false``. In that case, you may want to require some variables to be defined. You can do this with::
+
+ {{ variable | mandatory }}
+
+The variable value will be used as is, but the template evaluation will raise an error if it is undefined.
+
+Defining different values for true/false/null (ternary)
+=======================================================
+
+You can create a test, then define one value to use when the test returns true and another when the test returns false (new in version 1.9)::
+
+ {{ (status == 'needs_restart') | ternary('restart', 'continue') }}
+
+In addition, you can define a one value to use on true, one value on false and a third value on null (new in version 2.8)::
+
+ {{ enabled | ternary('no shutdown', 'shutdown', omit) }}
+
+Managing data types
+===================
+
+You might need to know, change, or set the data type on a variable. For example, a registered variable might contain a dictionary when your next task needs a list, or a user :ref:`prompt <playbooks_prompts>` might return a string when your playbook needs a boolean value. Use the ``type_debug``, ``dict2items``, and ``items2dict`` filters to manage data types. You can also use the data type itself to cast a value as a specific data type.
+
+Discovering the data type
+-------------------------
+
+.. versionadded:: 2.3
+
+If you are unsure of the underlying Python type of a variable, you can use the ``type_debug`` filter to display it. This is useful in debugging when you need a particular type of variable::
+
+ {{ myvar | type_debug }}
+
+
+.. _dict_filter:
+
+Transforming dictionaries into lists
+------------------------------------
+
+.. versionadded:: 2.6
+
+
+Use the ``dict2items`` filter to transform a dictionary into a list of items suitable for :ref:`looping <playbooks_loops>`::
+
+ {{ dict | dict2items }}
+
+Dictionary data (before applying the ``dict2items`` filter)::
+
+ tags:
+ Application: payment
+ Environment: dev
+
+List data (after applying the ``dict2items`` filter)::
+
+ - key: Application
+ value: payment
+ - key: Environment
+ value: dev
+
+.. versionadded:: 2.8
+
+The ``dict2items`` filter is the reverse of the ``items2dict`` filter.
+
+If you want to configure the names of the keys, the ``dict2items`` filter accepts 2 keyword arguments. Pass the ``key_name`` and ``value_name`` arguments to configure the names of the keys in the list output::
+
+ {{ files | dict2items(key_name='file', value_name='path') }}
+
+Dictionary data (before applying the ``dict2items`` filter)::
+
+ files:
+ users: /etc/passwd
+ groups: /etc/group
+
+List data (after applying the ``dict2items`` filter)::
+
+ - file: users
+ path: /etc/passwd
+ - file: groups
+ path: /etc/group
+
+
+Transforming lists into dictionaries
+------------------------------------
+
+.. versionadded:: 2.7
+
+Use the ``items2dict`` filter to transform a list into a dictionary, mapping the content into ``key: value`` pairs::
+
+ {{ tags | items2dict }}
+
+List data (before applying the ``items2dict`` filter)::
+
+ tags:
+ - key: Application
+ value: payment
+ - key: Environment
+ value: dev
+
+Dictionary data (after applying the ``items2dict`` filter)::
+
+ Application: payment
+ Environment: dev
+
+The ``items2dict`` filter is the reverse of the ``dict2items`` filter.
+
+Not all lists use ``key`` to designate keys and ``value`` to designate values. For example::
+
+ fruits:
+ - fruit: apple
+ color: red
+ - fruit: pear
+ color: yellow
+ - fruit: grapefruit
+ color: yellow
+
+In this example, you must pass the ``key_name`` and ``value_name`` arguments to configure the transformation. For example::
+
+ {{ tags | items2dict(key_name='fruit', value_name='color') }}
+
+If you do not pass these arguments, or do not pass the correct values for your list, you will see ``KeyError: key`` or ``KeyError: my_typo``.
+
+Forcing the data type
+---------------------
+
+You can cast values as certain types. For example, if you expect the input "True" from a :ref:`vars_prompt <playbooks_prompts>` and you want Ansible to recognize it as a boolean value instead of a string::
+
+ - debug:
+ msg: test
+ when: some_string_value | bool
+
+If you want to perform a mathematical comparison on a fact and you want Ansible to recognize it as an integer instead of a string::
+
+ - shell: echo "only on Red Hat 6, derivatives, and later"
+ when: ansible_facts['os_family'] == "RedHat" and ansible_facts['lsb']['major_release'] | int >= 6
+
+
+.. versionadded:: 1.6
+
+.. _filters_for_formatting_data:
+
+Formatting data: YAML and JSON
+==============================
+
+You can switch a data structure in a template from or to JSON or YAML format, with options for formatting, indenting, and loading data. The basic filters are occasionally useful for debugging::
+
+ {{ some_variable | to_json }}
+ {{ some_variable | to_yaml }}
+
+For human readable output, you can use::
+
+ {{ some_variable | to_nice_json }}
+ {{ some_variable | to_nice_yaml }}
+
+You can change the indentation of either format::
+
+ {{ some_variable | to_nice_json(indent=2) }}
+ {{ some_variable | to_nice_yaml(indent=8) }}
+
+The ``to_yaml`` and ``to_nice_yaml`` filters use the `PyYAML library`_ which has a default 80 symbol string length limit. That causes unexpected line break after 80th symbol (if there is a space after 80th symbol)
+To avoid such behavior and generate long lines, use the ``width`` option. You must use a hardcoded number to define the width, instead of a construction like ``float("inf")``, because the filter does not support proxying Python functions. For example::
+
+ {{ some_variable | to_yaml(indent=8, width=1337) }}
+ {{ some_variable | to_nice_yaml(indent=8, width=1337) }}
+
+The filter does support passing through other YAML parameters. For a full list, see the `PyYAML documentation`_.
+
+If you are reading in some already formatted data::
+
+ {{ some_variable | from_json }}
+ {{ some_variable | from_yaml }}
+
+for example::
+
+ tasks:
+ - name: Register JSON output as a variable
+ ansible.builtin.shell: cat /some/path/to/file.json
+ register: result
+
+ - name: Set a variable
+ ansible.builtin.set_fact:
+ myvar: "{{ result.stdout | from_json }}"
+
+
+Filter `to_json` and Unicode support
+------------------------------------
+
+By default `to_json` and `to_nice_json` will convert data received to ASCII, so::
+
+ {{ 'München'| to_json }}
+
+will return::
+
+ 'M\u00fcnchen'
+
+To keep Unicode characters, pass the parameter `ensure_ascii=False` to the filter::
+
+ {{ 'München'| to_json(ensure_ascii=False) }}
+
+ 'München'
+
+.. versionadded:: 2.7
+
+To parse multi-document YAML strings, the ``from_yaml_all`` filter is provided.
+The ``from_yaml_all`` filter will return a generator of parsed YAML documents.
+
+for example::
+
+ tasks:
+ - name: Register a file content as a variable
+ ansible.builtin.shell: cat /some/path/to/multidoc-file.yaml
+ register: result
+
+ - name: Print the transformed variable
+ ansible.builtin.debug:
+ msg: '{{ item }}'
+ loop: '{{ result.stdout | from_yaml_all | list }}'
+
+Combining and selecting data
+============================
+
+You can combine data from multiple sources and types, and select values from large data structures, giving you precise control over complex data.
+
+.. _zip_filter:
+
+Combining items from multiple lists: zip and zip_longest
+--------------------------------------------------------
+
+.. versionadded:: 2.3
+
+To get a list combining the elements of other lists use ``zip``::
+
+ - name: Give me list combo of two lists
+ ansible.builtin.debug:
+ msg: "{{ [1,2,3,4,5] | zip(['a','b','c','d','e','f']) | list }}"
+
+ - name: Give me shortest combo of two lists
+ ansible.builtin.debug:
+ msg: "{{ [1,2,3] | zip(['a','b','c','d','e','f']) | list }}"
+
+To always exhaust all lists use ``zip_longest``::
+
+ - name: Give me longest combo of three lists , fill with X
+ ansible.builtin.debug:
+ msg: "{{ [1,2,3] | zip_longest(['a','b','c','d','e','f'], [21, 22, 23], fillvalue='X') | list }}"
+
+Similarly to the output of the ``items2dict`` filter mentioned above, these filters can be used to construct a ``dict``::
+
+ {{ dict(keys_list | zip(values_list)) }}
+
+List data (before applying the ``zip`` filter)::
+
+ keys_list:
+ - one
+ - two
+ values_list:
+ - apple
+ - orange
+
+Dictonary data (after applying the ``zip`` filter)::
+
+ one: apple
+ two: orange
+
+Combining objects and subelements
+---------------------------------
+
+.. versionadded:: 2.7
+
+The ``subelements`` filter produces a product of an object and the subelement values of that object, similar to the ``subelements`` lookup. This lets you specify individual subelements to use in a template. For example, this expression::
+
+ {{ users | subelements('groups', skip_missing=True) }}
+
+Data before applying the ``subelements`` filter::
+
+ users:
+ - name: alice
+ authorized:
+ - /tmp/alice/onekey.pub
+ - /tmp/alice/twokey.pub
+ groups:
+ - wheel
+ - docker
+ - name: bob
+ authorized:
+ - /tmp/bob/id_rsa.pub
+ groups:
+ - docker
+
+Data after applying the ``subelements`` filter::
+
+ -
+ - name: alice
+ groups:
+ - wheel
+ - docker
+ authorized:
+ - /tmp/alice/onekey.pub
+ - /tmp/alice/twokey.pub
+ - wheel
+ -
+ - name: alice
+ groups:
+ - wheel
+ - docker
+ authorized:
+ - /tmp/alice/onekey.pub
+ - /tmp/alice/twokey.pub
+ - docker
+ -
+ - name: bob
+ authorized:
+ - /tmp/bob/id_rsa.pub
+ groups:
+ - docker
+ - docker
+
+You can use the transformed data with ``loop`` to iterate over the same subelement for multiple objects::
+
+ - name: Set authorized ssh key, extracting just that data from 'users'
+ ansible.posix.authorized_key:
+ user: "{{ item.0.name }}"
+ key: "{{ lookup('file', item.1) }}"
+ loop: "{{ users | subelements('authorized') }}"
+
+.. _combine_filter:
+
+Combining hashes/dictionaries
+-----------------------------
+
+.. versionadded:: 2.0
+
+The ``combine`` filter allows hashes to be merged. For example, the following would override keys in one hash::
+
+ {{ {'a':1, 'b':2} | combine({'b':3}) }}
+
+The resulting hash would be::
+
+ {'a':1, 'b':3}
+
+The filter can also take multiple arguments to merge::
+
+ {{ a | combine(b, c, d) }}
+ {{ [a, b, c, d] | combine }}
+
+In this case, keys in ``d`` would override those in ``c``, which would override those in ``b``, and so on.
+
+The filter also accepts two optional parameters: ``recursive`` and ``list_merge``.
+
+recursive
+ Is a boolean, default to ``False``.
+ Should the ``combine`` recursively merge nested hashes.
+ Note: It does **not** depend on the value of the ``hash_behaviour`` setting in ``ansible.cfg``.
+
+list_merge
+ Is a string, its possible values are ``replace`` (default), ``keep``, ``append``, ``prepend``, ``append_rp`` or ``prepend_rp``.
+ It modifies the behaviour of ``combine`` when the hashes to merge contain arrays/lists.
+
+.. code-block:: yaml
+
+ default:
+ a:
+ x: default
+ y: default
+ b: default
+ c: default
+ patch:
+ a:
+ y: patch
+ z: patch
+ b: patch
+
+If ``recursive=False`` (the default), nested hash aren't merged::
+
+ {{ default | combine(patch) }}
+
+This would result in::
+
+ a:
+ y: patch
+ z: patch
+ b: patch
+ c: default
+
+If ``recursive=True``, recurse into nested hash and merge their keys::
+
+ {{ default | combine(patch, recursive=True) }}
+
+This would result in::
+
+ a:
+ x: default
+ y: patch
+ z: patch
+ b: patch
+ c: default
+
+If ``list_merge='replace'`` (the default), arrays from the right hash will "replace" the ones in the left hash::
+
+ default:
+ a:
+ - default
+ patch:
+ a:
+ - patch
+
+.. code-block:: jinja
+
+ {{ default | combine(patch) }}
+
+This would result in::
+
+ a:
+ - patch
+
+If ``list_merge='keep'``, arrays from the left hash will be kept::
+
+ {{ default | combine(patch, list_merge='keep') }}
+
+This would result in::
+
+ a:
+ - default
+
+If ``list_merge='append'``, arrays from the right hash will be appended to the ones in the left hash::
+
+ {{ default | combine(patch, list_merge='append') }}
+
+This would result in::
+
+ a:
+ - default
+ - patch
+
+If ``list_merge='prepend'``, arrays from the right hash will be prepended to the ones in the left hash::
+
+ {{ default | combine(patch, list_merge='prepend') }}
+
+This would result in::
+
+ a:
+ - patch
+ - default
+
+If ``list_merge='append_rp'``, arrays from the right hash will be appended to the ones in the left hash. Elements of arrays in the left hash that are also in the corresponding array of the right hash will be removed ("rp" stands for "remove present"). Duplicate elements that aren't in both hashes are kept::
+
+ default:
+ a:
+ - 1
+ - 1
+ - 2
+ - 3
+ patch:
+ a:
+ - 3
+ - 4
+ - 5
+ - 5
+
+.. code-block:: jinja
+
+ {{ default | combine(patch, list_merge='append_rp') }}
+
+This would result in::
+
+ a:
+ - 1
+ - 1
+ - 2
+ - 3
+ - 4
+ - 5
+ - 5
+
+If ``list_merge='prepend_rp'``, the behavior is similar to the one for ``append_rp``, but elements of arrays in the right hash are prepended::
+
+ {{ default | combine(patch, list_merge='prepend_rp') }}
+
+This would result in::
+
+ a:
+ - 3
+ - 4
+ - 5
+ - 5
+ - 1
+ - 1
+ - 2
+
+``recursive`` and ``list_merge`` can be used together::
+
+ default:
+ a:
+ a':
+ x: default_value
+ y: default_value
+ list:
+ - default_value
+ b:
+ - 1
+ - 1
+ - 2
+ - 3
+ patch:
+ a:
+ a':
+ y: patch_value
+ z: patch_value
+ list:
+ - patch_value
+ b:
+ - 3
+ - 4
+ - 4
+ - key: value
+
+.. code-block:: jinja
+
+ {{ default | combine(patch, recursive=True, list_merge='append_rp') }}
+
+This would result in::
+
+ a:
+ a':
+ x: default_value
+ y: patch_value
+ z: patch_value
+ list:
+ - default_value
+ - patch_value
+ b:
+ - 1
+ - 1
+ - 2
+ - 3
+ - 4
+ - 4
+ - key: value
+
+
+.. _extract_filter:
+
+Selecting values from arrays or hashtables
+-------------------------------------------
+
+.. versionadded:: 2.1
+
+The `extract` filter is used to map from a list of indices to a list of values from a container (hash or array)::
+
+ {{ [0,2] | map('extract', ['x','y','z']) | list }}
+ {{ ['x','y'] | map('extract', {'x': 42, 'y': 31}) | list }}
+
+The results of the above expressions would be::
+
+ ['x', 'z']
+ [42, 31]
+
+The filter can take another argument::
+
+ {{ groups['x'] | map('extract', hostvars, 'ec2_ip_address') | list }}
+
+This takes the list of hosts in group 'x', looks them up in `hostvars`, and then looks up the `ec2_ip_address` of the result. The final result is a list of IP addresses for the hosts in group 'x'.
+
+The third argument to the filter can also be a list, for a recursive lookup inside the container::
+
+ {{ ['a'] | map('extract', b, ['x','y']) | list }}
+
+This would return a list containing the value of `b['a']['x']['y']`.
+
+Combining lists
+---------------
+
+This set of filters returns a list of combined lists.
+
+
+permutations
+^^^^^^^^^^^^
+To get permutations of a list::
+
+ - name: Give me largest permutations (order matters)
+ ansible.builtin.debug:
+ msg: "{{ [1,2,3,4,5] | permutations | list }}"
+
+ - name: Give me permutations of sets of three
+ ansible.builtin.debug:
+ msg: "{{ [1,2,3,4,5] | permutations(3) | list }}"
+
+combinations
+^^^^^^^^^^^^
+Combinations always require a set size::
+
+ - name: Give me combinations for sets of two
+ ansible.builtin.debug:
+ msg: "{{ [1,2,3,4,5] | combinations(2) | list }}"
+
+Also see the :ref:`zip_filter`
+
+products
+^^^^^^^^
+The product filter returns the `cartesian product <https://docs.python.org/3/library/itertools.html#itertools.product>`_ of the input iterables. This is roughly equivalent to nested for-loops in a generator expression.
+
+For example::
+
+ - name: Generate multiple hostnames
+ ansible.builtin.debug:
+ msg: "{{ ['foo', 'bar'] | product(['com']) | map('join', '.') | join(',') }}"
+
+This would result in::
+
+ { "msg": "foo.com,bar.com" }
+
+.. json_query_filter:
+
+Selecting JSON data: JSON queries
+---------------------------------
+
+To select a single element or a data subset from a complex data structure in JSON format (for example, Ansible facts), use the ``json_query`` filter. The ``json_query`` filter lets you query a complex JSON structure and iterate over it using a loop structure.
+
+.. note::
+
+ This filter has migrated to the `community.general <https://galaxy.ansible.com/community/general>`_ collection. Follow the installation instructions to install that collection.
+
+
+.. note:: This filter is built upon **jmespath**, and you can use the same syntax. For examples, see `jmespath examples <http://jmespath.org/examples.html>`_.
+
+Consider this data structure::
+
+ {
+ "domain_definition": {
+ "domain": {
+ "cluster": [
+ {
+ "name": "cluster1"
+ },
+ {
+ "name": "cluster2"
+ }
+ ],
+ "server": [
+ {
+ "name": "server11",
+ "cluster": "cluster1",
+ "port": "8080"
+ },
+ {
+ "name": "server12",
+ "cluster": "cluster1",
+ "port": "8090"
+ },
+ {
+ "name": "server21",
+ "cluster": "cluster2",
+ "port": "9080"
+ },
+ {
+ "name": "server22",
+ "cluster": "cluster2",
+ "port": "9090"
+ }
+ ],
+ "library": [
+ {
+ "name": "lib1",
+ "target": "cluster1"
+ },
+ {
+ "name": "lib2",
+ "target": "cluster2"
+ }
+ ]
+ }
+ }
+ }
+
+To extract all clusters from this structure, you can use the following query::
+
+ - name: Display all cluster names
+ ansible.builtin.debug:
+ var: item
+ loop: "{{ domain_definition | community.general.json_query('domain.cluster[*].name') }}"
+
+To extract all server names::
+
+ - name: Display all server names
+ ansible.builtin.debug:
+ var: item
+ loop: "{{ domain_definition | community.general.json_query('domain.server[*].name') }}"
+
+To extract ports from cluster1::
+
+ - ansible.builtin.name: Display all ports from cluster1
+ debug:
+ var: item
+ loop: "{{ domain_definition | community.general.json_query(server_name_cluster1_query) }}"
+ vars:
+ server_name_cluster1_query: "domain.server[?cluster=='cluster1'].port"
+
+.. note:: You can use a variable to make the query more readable.
+
+To print out the ports from cluster1 in a comma separated string::
+
+ - name: Display all ports from cluster1 as a string
+ ansible.builtin.debug:
+ msg: "{{ domain_definition | community.general.json_query('domain.server[?cluster==`cluster1`].port') | join(', ') }}"
+
+.. note:: In the example above, quoting literals using backticks avoids escaping quotes and maintains readability.
+
+You can use YAML `single quote escaping <https://yaml.org/spec/current.html#id2534365>`_::
+
+ - name: Display all ports from cluster1
+ ansible.builtin.debug:
+ var: item
+ loop: "{{ domain_definition | community.general.json_query('domain.server[?cluster==''cluster1''].port') }}"
+
+.. note:: Escaping single quotes within single quotes in YAML is done by doubling the single quote.
+
+To get a hash map with all ports and names of a cluster::
+
+ - name: Display all server ports and names from cluster1
+ ansible.builtin.debug:
+ var: item
+ loop: "{{ domain_definition | community.general.json_query(server_name_cluster1_query) }}"
+ vars:
+ server_name_cluster1_query: "domain.server[?cluster=='cluster2'].{name: name, port: port}"
+
+
+Randomizing data
+================
+
+When you need a randomly generated value, use one of these filters.
+
+
+.. _random_mac_filter:
+
+Random MAC addresses
+--------------------
+
+.. versionadded:: 2.6
+
+This filter can be used to generate a random MAC address from a string prefix.
+
+.. note::
+
+ This filter has migrated to the `community.general <https://galaxy.ansible.com/community/general>`_ collection. Follow the installation instructions to install that collection.
+
+To get a random MAC address from a string prefix starting with '52:54:00'::
+
+ "{{ '52:54:00' | community.general.random_mac }}"
+ # => '52:54:00:ef:1c:03'
+
+Note that if anything is wrong with the prefix string, the filter will issue an error.
+
+ .. versionadded:: 2.9
+
+As of Ansible version 2.9, you can also initialize the random number generator from a seed to create random-but-idempotent MAC addresses::
+
+ "{{ '52:54:00' | community.general.random_mac(seed=inventory_hostname) }}"
+
+
+.. _random_filter:
+
+Random items or numbers
+-----------------------
+
+The ``random`` filter in Ansible is an extension of the default Jinja2 random filter, and can be used to return a random item from a sequence of items or to generate a random number based on a range.
+
+To get a random item from a list::
+
+ "{{ ['a','b','c'] | random }}"
+ # => 'c'
+
+To get a random number between 0 and a specified number::
+
+ "{{ 60 | random }} * * * * root /script/from/cron"
+ # => '21 * * * * root /script/from/cron'
+
+To get a random number from 0 to 100 but in steps of 10::
+
+ {{ 101 | random(step=10) }}
+ # => 70
+
+To get a random number from 1 to 100 but in steps of 10::
+
+ {{ 101 | random(1, 10) }}
+ # => 31
+ {{ 101 | random(start=1, step=10) }}
+ # => 51
+
+You can initialize the random number generator from a seed to create random-but-idempotent numbers::
+
+ "{{ 60 | random(seed=inventory_hostname) }} * * * * root /script/from/cron"
+
+Shuffling a list
+----------------
+
+The ``shuffle`` filter randomizes an existing list, giving a different order every invocation.
+
+To get a random list from an existing list::
+
+ {{ ['a','b','c'] | shuffle }}
+ # => ['c','a','b']
+ {{ ['a','b','c'] | shuffle }}
+ # => ['b','c','a']
+
+You can initialize the shuffle generator from a seed to generate a random-but-idempotent order::
+
+ {{ ['a','b','c'] | shuffle(seed=inventory_hostname) }}
+ # => ['b','a','c']
+
+The shuffle filter returns a list whenever possible. If you use it with a non 'listable' item, the filter does nothing.
+
+
+.. _list_filters:
+
+Managing list variables
+=======================
+
+You can search for the minimum or maximum value in a list, or flatten a multi-level list.
+
+To get the minimum value from list of numbers::
+
+ {{ list1 | min }}
+
+To get the maximum value from a list of numbers::
+
+ {{ [3, 4, 2] | max }}
+
+.. versionadded:: 2.5
+
+Flatten a list (same thing the `flatten` lookup does)::
+
+ {{ [3, [4, 2] ] | flatten }}
+
+Flatten only the first level of a list (akin to the `items` lookup)::
+
+ {{ [3, [4, [2]] ] | flatten(levels=1) }}
+
+
+.. _set_theory_filters:
+
+Selecting from sets or lists (set theory)
+=========================================
+
+You can select or combine items from sets or lists.
+
+.. versionadded:: 1.4
+
+To get a unique set from a list::
+
+ # list1: [1, 2, 5, 1, 3, 4, 10]
+ {{ list1 | unique }}
+ # => [1, 2, 5, 3, 4, 10]
+
+To get a union of two lists::
+
+ # list1: [1, 2, 5, 1, 3, 4, 10]
+ # list2: [1, 2, 3, 4, 5, 11, 99]
+ {{ list1 | union(list2) }}
+ # => [1, 2, 5, 1, 3, 4, 10, 11, 99]
+
+To get the intersection of 2 lists (unique list of all items in both)::
+
+ # list1: [1, 2, 5, 3, 4, 10]
+ # list2: [1, 2, 3, 4, 5, 11, 99]
+ {{ list1 | intersect(list2) }}
+ # => [1, 2, 5, 3, 4]
+
+To get the difference of 2 lists (items in 1 that don't exist in 2)::
+
+ # list1: [1, 2, 5, 1, 3, 4, 10]
+ # list2: [1, 2, 3, 4, 5, 11, 99]
+ {{ list1 | difference(list2) }}
+ # => [10]
+
+To get the symmetric difference of 2 lists (items exclusive to each list)::
+
+ # list1: [1, 2, 5, 1, 3, 4, 10]
+ # list2: [1, 2, 3, 4, 5, 11, 99]
+ {{ list1 | symmetric_difference(list2) }}
+ # => [10, 11, 99]
+
+.. _math_stuff:
+
+Calculating numbers (math)
+==========================
+
+.. versionadded:: 1.9
+
+You can calculate logs, powers, and roots of numbers with Ansible filters. Jinja2 provides other mathematical functions like abs() and round().
+
+Get the logarithm (default is e)::
+
+ {{ myvar | log }}
+
+Get the base 10 logarithm::
+
+ {{ myvar | log(10) }}
+
+Give me the power of 2! (or 5)::
+
+ {{ myvar | pow(2) }}
+ {{ myvar | pow(5) }}
+
+Square root, or the 5th::
+
+ {{ myvar | root }}
+ {{ myvar | root(5) }}
+
+
+Managing network interactions
+=============================
+
+These filters help you with common network tasks.
+
+.. note::
+
+ These filters have migrated to the `ansible.netcommon <https://galaxy.ansible.com/ansible/netcommon>`_ collection. Follow the installation instructions to install that collection.
+
+.. _ipaddr_filter:
+
+IP address filters
+------------------
+
+.. versionadded:: 1.9
+
+To test if a string is a valid IP address::
+
+ {{ myvar | ansible.netcommon.ipaddr }}
+
+You can also require a specific IP protocol version::
+
+ {{ myvar | ansible.netcommon.ipv4 }}
+ {{ myvar | ansible.netcommon.ipv6 }}
+
+IP address filter can also be used to extract specific information from an IP
+address. For example, to get the IP address itself from a CIDR, you can use::
+
+ {{ '192.0.2.1/24' | ansible.netcommon.ipaddr('address') }}
+
+More information about ``ipaddr`` filter and complete usage guide can be found
+in :ref:`playbooks_filters_ipaddr`.
+
+.. _network_filters:
+
+Network CLI filters
+-------------------
+
+.. versionadded:: 2.4
+
+To convert the output of a network device CLI command into structured JSON
+output, use the ``parse_cli`` filter::
+
+ {{ output | ansible.netcommon.parse_cli('path/to/spec') }}
+
+The ``parse_cli`` filter will load the spec file and pass the command output
+through it, returning JSON output. The YAML spec file defines how to parse the CLI output.
+
+The spec file should be valid formatted YAML. It defines how to parse the CLI
+output and return JSON data. Below is an example of a valid spec file that
+will parse the output from the ``show vlan`` command.
+
+.. code-block:: yaml
+
+ ---
+ vars:
+ vlan:
+ vlan_id: "{{ item.vlan_id }}"
+ name: "{{ item.name }}"
+ enabled: "{{ item.state != 'act/lshut' }}"
+ state: "{{ item.state }}"
+
+ keys:
+ vlans:
+ value: "{{ vlan }}"
+ items: "^(?P<vlan_id>\\d+)\\s+(?P<name>\\w+)\\s+(?P<state>active|act/lshut|suspended)"
+ state_static:
+ value: present
+
+
+The spec file above will return a JSON data structure that is a list of hashes
+with the parsed VLAN information.
+
+The same command could be parsed into a hash by using the key and values
+directives. Here is an example of how to parse the output into a hash
+value using the same ``show vlan`` command.
+
+.. code-block:: yaml
+
+ ---
+ vars:
+ vlan:
+ key: "{{ item.vlan_id }}"
+ values:
+ vlan_id: "{{ item.vlan_id }}"
+ name: "{{ item.name }}"
+ enabled: "{{ item.state != 'act/lshut' }}"
+ state: "{{ item.state }}"
+
+ keys:
+ vlans:
+ value: "{{ vlan }}"
+ items: "^(?P<vlan_id>\\d+)\\s+(?P<name>\\w+)\\s+(?P<state>active|act/lshut|suspended)"
+ state_static:
+ value: present
+
+Another common use case for parsing CLI commands is to break a large command
+into blocks that can be parsed. This can be done using the ``start_block`` and
+``end_block`` directives to break the command into blocks that can be parsed.
+
+.. code-block:: yaml
+
+ ---
+ vars:
+ interface:
+ name: "{{ item[0].match[0] }}"
+ state: "{{ item[1].state }}"
+ mode: "{{ item[2].match[0] }}"
+
+ keys:
+ interfaces:
+ value: "{{ interface }}"
+ start_block: "^Ethernet.*$"
+ end_block: "^$"
+ items:
+ - "^(?P<name>Ethernet\\d\\/\\d*)"
+ - "admin state is (?P<state>.+),"
+ - "Port mode is (.+)"
+
+
+The example above will parse the output of ``show interface`` into a list of
+hashes.
+
+The network filters also support parsing the output of a CLI command using the
+TextFSM library. To parse the CLI output with TextFSM use the following
+filter::
+
+ {{ output.stdout[0] | ansible.netcommon.parse_cli_textfsm('path/to/fsm') }}
+
+Use of the TextFSM filter requires the TextFSM library to be installed.
+
+Network XML filters
+-------------------
+
+.. versionadded:: 2.5
+
+To convert the XML output of a network device command into structured JSON
+output, use the ``parse_xml`` filter::
+
+ {{ output | ansible.netcommon.parse_xml('path/to/spec') }}
+
+The ``parse_xml`` filter will load the spec file and pass the command output
+through formatted as JSON.
+
+The spec file should be valid formatted YAML. It defines how to parse the XML
+output and return JSON data.
+
+Below is an example of a valid spec file that
+will parse the output from the ``show vlan | display xml`` command.
+
+.. code-block:: yaml
+
+ ---
+ vars:
+ vlan:
+ vlan_id: "{{ item.vlan_id }}"
+ name: "{{ item.name }}"
+ desc: "{{ item.desc }}"
+ enabled: "{{ item.state.get('inactive') != 'inactive' }}"
+ state: "{% if item.state.get('inactive') == 'inactive'%} inactive {% else %} active {% endif %}"
+
+ keys:
+ vlans:
+ value: "{{ vlan }}"
+ top: configuration/vlans/vlan
+ items:
+ vlan_id: vlan-id
+ name: name
+ desc: description
+ state: ".[@inactive='inactive']"
+
+
+The spec file above will return a JSON data structure that is a list of hashes
+with the parsed VLAN information.
+
+The same command could be parsed into a hash by using the key and values
+directives. Here is an example of how to parse the output into a hash
+value using the same ``show vlan | display xml`` command.
+
+.. code-block:: yaml
+
+ ---
+ vars:
+ vlan:
+ key: "{{ item.vlan_id }}"
+ values:
+ vlan_id: "{{ item.vlan_id }}"
+ name: "{{ item.name }}"
+ desc: "{{ item.desc }}"
+ enabled: "{{ item.state.get('inactive') != 'inactive' }}"
+ state: "{% if item.state.get('inactive') == 'inactive'%} inactive {% else %} active {% endif %}"
+
+ keys:
+ vlans:
+ value: "{{ vlan }}"
+ top: configuration/vlans/vlan
+ items:
+ vlan_id: vlan-id
+ name: name
+ desc: description
+ state: ".[@inactive='inactive']"
+
+
+The value of ``top`` is the XPath relative to the XML root node.
+In the example XML output given below, the value of ``top`` is ``configuration/vlans/vlan``,
+which is an XPath expression relative to the root node (<rpc-reply>).
+``configuration`` in the value of ``top`` is the outer most container node, and ``vlan``
+is the inner-most container node.
+
+``items`` is a dictionary of key-value pairs that map user-defined names to XPath expressions
+that select elements. The Xpath expression is relative to the value of the XPath value contained in ``top``.
+For example, the ``vlan_id`` in the spec file is a user defined name and its value ``vlan-id`` is the
+relative to the value of XPath in ``top``
+
+Attributes of XML tags can be extracted using XPath expressions. The value of ``state`` in the spec
+is an XPath expression used to get the attributes of the ``vlan`` tag in output XML.::
+
+ <rpc-reply>
+ <configuration>
+ <vlans>
+ <vlan inactive="inactive">
+ <name>vlan-1</name>
+ <vlan-id>200</vlan-id>
+ <description>This is vlan-1</description>
+ </vlan>
+ </vlans>
+ </configuration>
+ </rpc-reply>
+
+.. note::
+ For more information on supported XPath expressions, see `XPath Support <https://docs.python.org/2/library/xml.etree.elementtree.html#xpath-support>`_.
+
+Network VLAN filters
+--------------------
+
+.. versionadded:: 2.8
+
+Use the ``vlan_parser`` filter to transform an unsorted list of VLAN integers into a
+sorted string list of integers according to IOS-like VLAN list rules. This list has the following properties:
+
+* Vlans are listed in ascending order.
+* Three or more consecutive VLANs are listed with a dash.
+* The first line of the list can be first_line_len characters long.
+* Subsequent list lines can be other_line_len characters.
+
+To sort a VLAN list::
+
+ {{ [3003, 3004, 3005, 100, 1688, 3002, 3999] | ansible.netcommon.vlan_parser }}
+
+This example renders the following sorted list::
+
+ ['100,1688,3002-3005,3999']
+
+
+Another example Jinja template::
+
+ {% set parsed_vlans = vlans | ansible.netcommon.vlan_parser %}
+ switchport trunk allowed vlan {{ parsed_vlans[0] }}
+ {% for i in range (1, parsed_vlans | count) %}
+ switchport trunk allowed vlan add {{ parsed_vlans[i] }}
+
+This allows for dynamic generation of VLAN lists on a Cisco IOS tagged interface. You can store an exhaustive raw list of the exact VLANs required for an interface and then compare that to the parsed IOS output that would actually be generated for the configuration.
+
+
+.. _hash_filters:
+
+Encrypting and checksumming strings and passwords
+=================================================
+
+.. versionadded:: 1.9
+
+To get the sha1 hash of a string::
+
+ {{ 'test1' | hash('sha1') }}
+
+To get the md5 hash of a string::
+
+ {{ 'test1' | hash('md5') }}
+
+Get a string checksum::
+
+ {{ 'test2' | checksum }}
+
+Other hashes (platform dependent)::
+
+ {{ 'test2' | hash('blowfish') }}
+
+To get a sha512 password hash (random salt)::
+
+ {{ 'passwordsaresecret' | password_hash('sha512') }}
+
+To get a sha256 password hash with a specific salt::
+
+ {{ 'secretpassword' | password_hash('sha256', 'mysecretsalt') }}
+
+An idempotent method to generate unique hashes per system is to use a salt that is consistent between runs::
+
+ {{ 'secretpassword' | password_hash('sha512', 65534 | random(seed=inventory_hostname) | string) }}
+
+Hash types available depend on the master system running Ansible, 'hash' depends on hashlib, password_hash depends on passlib (https://passlib.readthedocs.io/en/stable/lib/passlib.hash.html).
+
+.. versionadded:: 2.7
+
+Some hash types allow providing a rounds parameter::
+
+ {{ 'secretpassword' | password_hash('sha256', 'mysecretsalt', rounds=10000) }}
+
+.. _other_useful_filters:
+
+Manipulating text
+=================
+
+Several filters work with text, including URLs, file names, and path names.
+
+.. _comment_filter:
+
+Adding comments to files
+------------------------
+
+The ``comment`` filter lets you create comments in a file from text in a template, with a variety of comment styles. By default Ansible uses ``#`` to start a comment line and adds a blank comment line above and below your comment text. For example the following::
+
+ {{ "Plain style (default)" | comment }}
+
+produces this output:
+
+.. code-block:: text
+
+ #
+ # Plain style (default)
+ #
+
+Ansible offers styles for comments in C (``//...``), C block
+(``/*...*/``), Erlang (``%...``) and XML (``<!--...-->``)::
+
+ {{ "C style" | comment('c') }}
+ {{ "C block style" | comment('cblock') }}
+ {{ "Erlang style" | comment('erlang') }}
+ {{ "XML style" | comment('xml') }}
+
+You can define a custom comment character. This filter::
+
+ {{ "My Special Case" | comment(decoration="! ") }}
+
+produces:
+
+.. code-block:: text
+
+ !
+ ! My Special Case
+ !
+
+You can fully customize the comment style::
+
+ {{ "Custom style" | comment('plain', prefix='#######\n#', postfix='#\n#######\n ###\n #') }}
+
+That creates the following output:
+
+.. code-block:: text
+
+ #######
+ #
+ # Custom style
+ #
+ #######
+ ###
+ #
+
+The filter can also be applied to any Ansible variable. For example to
+make the output of the ``ansible_managed`` variable more readable, we can
+change the definition in the ``ansible.cfg`` file to this:
+
+.. code-block:: jinja
+
+ [defaults]
+
+ ansible_managed = This file is managed by Ansible.%n
+ template: {file}
+ date: %Y-%m-%d %H:%M:%S
+ user: {uid}
+ host: {host}
+
+and then use the variable with the `comment` filter::
+
+ {{ ansible_managed | comment }}
+
+which produces this output:
+
+.. code-block:: sh
+
+ #
+ # This file is managed by Ansible.
+ #
+ # template: /home/ansible/env/dev/ansible_managed/roles/role1/templates/test.j2
+ # date: 2015-09-10 11:02:58
+ # user: ansible
+ # host: myhost
+ #
+
+Splitting URLs
+--------------
+
+.. versionadded:: 2.4
+
+The ``urlsplit`` filter extracts the fragment, hostname, netloc, password, path, port, query, scheme, and username from an URL. With no arguments, returns a dictionary of all the fields::
+
+ {{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit('hostname') }}
+ # => 'www.acme.com'
+
+ {{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit('netloc') }}
+ # => 'user:password@www.acme.com:9000'
+
+ {{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit('username') }}
+ # => 'user'
+
+ {{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit('password') }}
+ # => 'password'
+
+ {{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit('path') }}
+ # => '/dir/index.html'
+
+ {{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit('port') }}
+ # => '9000'
+
+ {{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit('scheme') }}
+ # => 'http'
+
+ {{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit('query') }}
+ # => 'query=term'
+
+ {{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit('fragment') }}
+ # => 'fragment'
+
+ {{ "http://user:password@www.acme.com:9000/dir/index.html?query=term#fragment" | urlsplit }}
+ # =>
+ # {
+ # "fragment": "fragment",
+ # "hostname": "www.acme.com",
+ # "netloc": "user:password@www.acme.com:9000",
+ # "password": "password",
+ # "path": "/dir/index.html",
+ # "port": 9000,
+ # "query": "query=term",
+ # "scheme": "http",
+ # "username": "user"
+ # }
+
+Searching strings with regular expressions
+------------------------------------------
+
+To search a string with a regex, use the "regex_search" filter::
+
+ # search for "foo" in "foobar"
+ {{ 'foobar' | regex_search('(foo)') }}
+
+ # will return empty if it cannot find a match
+ {{ 'ansible' | regex_search('(foobar)') }}
+
+ # case insensitive search in multiline mode
+ {{ 'foo\nBAR' | regex_search("^bar", multiline=True, ignorecase=True) }}
+
+
+To search for all occurrences of regex matches, use the "regex_findall" filter::
+
+ # Return a list of all IPv4 addresses in the string
+ {{ 'Some DNS servers are 8.8.8.8 and 8.8.4.4' | regex_findall('\\b(?:[0-9]{1,3}\\.){3}[0-9]{1,3}\\b') }}
+
+
+To replace text in a string with regex, use the "regex_replace" filter::
+
+ # convert "ansible" to "able"
+ {{ 'ansible' | regex_replace('^a.*i(.*)$', 'a\\1') }}
+
+ # convert "foobar" to "bar"
+ {{ 'foobar' | regex_replace('^f.*o(.*)$', '\\1') }}
+
+ # convert "localhost:80" to "localhost, 80" using named groups
+ {{ 'localhost:80' | regex_replace('^(?P<host>.+):(?P<port>\\d+)$', '\\g<host>, \\g<port>') }}
+
+ # convert "localhost:80" to "localhost"
+ {{ 'localhost:80' | regex_replace(':80') }}
+
+ # change a multiline string
+ {{ var | regex_replace('^', '#CommentThis#', multiline=True) }}
+
+.. note::
+ If you want to match the whole string and you are using ``*`` make sure to always wraparound your regular expression with the start/end anchors. For example ``^(.*)$`` will always match only one result, while ``(.*)`` on some Python versions will match the whole string and an empty string at the end, which means it will make two replacements::
+
+ # add "https://" prefix to each item in a list
+ GOOD:
+ {{ hosts | map('regex_replace', '^(.*)$', 'https://\\1') | list }}
+ {{ hosts | map('regex_replace', '(.+)', 'https://\\1') | list }}
+ {{ hosts | map('regex_replace', '^', 'https://') | list }}
+
+ BAD:
+ {{ hosts | map('regex_replace', '(.*)', 'https://\\1') | list }}
+
+ # append ':80' to each item in a list
+ GOOD:
+ {{ hosts | map('regex_replace', '^(.*)$', '\\1:80') | list }}
+ {{ hosts | map('regex_replace', '(.+)', '\\1:80') | list }}
+ {{ hosts | map('regex_replace', '$', ':80') | list }}
+
+ BAD:
+ {{ hosts | map('regex_replace', '(.*)', '\\1:80') | list }}
+
+.. note::
+ Prior to ansible 2.0, if "regex_replace" filter was used with variables inside YAML arguments (as opposed to simpler 'key=value' arguments), then you needed to escape backreferences (for example, ``\\1``) with 4 backslashes (``\\\\``) instead of 2 (``\\``).
+
+.. versionadded:: 2.0
+
+To escape special characters within a standard Python regex, use the "regex_escape" filter (using the default re_type='python' option)::
+
+ # convert '^f.*o(.*)$' to '\^f\.\*o\(\.\*\)\$'
+ {{ '^f.*o(.*)$' | regex_escape() }}
+
+.. versionadded:: 2.8
+
+To escape special characters within a POSIX basic regex, use the "regex_escape" filter with the re_type='posix_basic' option::
+
+ # convert '^f.*o(.*)$' to '\^f\.\*o(\.\*)\$'
+ {{ '^f.*o(.*)$' | regex_escape('posix_basic') }}
+
+
+Managing file names and path names
+----------------------------------
+
+To get the last name of a file path, like 'foo.txt' out of '/etc/asdf/foo.txt'::
+
+ {{ path | basename }}
+
+To get the last name of a windows style file path (new in version 2.0)::
+
+ {{ path | win_basename }}
+
+To separate the windows drive letter from the rest of a file path (new in version 2.0)::
+
+ {{ path | win_splitdrive }}
+
+To get only the windows drive letter::
+
+ {{ path | win_splitdrive | first }}
+
+To get the rest of the path without the drive letter::
+
+ {{ path | win_splitdrive | last }}
+
+To get the directory from a path::
+
+ {{ path | dirname }}
+
+To get the directory from a windows path (new version 2.0)::
+
+ {{ path | win_dirname }}
+
+To expand a path containing a tilde (`~`) character (new in version 1.5)::
+
+ {{ path | expanduser }}
+
+To expand a path containing environment variables::
+
+ {{ path | expandvars }}
+
+.. note:: `expandvars` expands local variables; using it on remote paths can lead to errors.
+
+.. versionadded:: 2.6
+
+To get the real path of a link (new in version 1.8)::
+
+ {{ path | realpath }}
+
+To get the relative path of a link, from a start point (new in version 1.7)::
+
+ {{ path | relpath('/etc') }}
+
+To get the root and extension of a path or file name (new in version 2.0)::
+
+ # with path == 'nginx.conf' the return would be ('nginx', '.conf')
+ {{ path | splitext }}
+
+The ``splitext`` filter returns a string. The individual components can be accessed by using the ``first`` and ``last`` filters::
+
+ # with path == 'nginx.conf' the return would be 'nginx'
+ {{ path | splitext | first }}
+
+ # with path == 'nginx.conf' the return would be 'conf'
+ {{ path | splitext | last }}
+
+To join one or more path components::
+
+ {{ ('/etc', path, 'subdir', file) | path_join }}
+
+.. versionadded:: 2.10
+
+Manipulating strings
+====================
+
+To add quotes for shell usage::
+
+ - name: Run a shell command
+ ansible.builtin.shell: echo {{ string_value | quote }}
+
+To concatenate a list into a string::
+
+ {{ list | join(" ") }}
+
+To work with Base64 encoded strings::
+
+ {{ encoded | b64decode }}
+ {{ decoded | string | b64encode }}
+
+As of version 2.6, you can define the type of encoding to use, the default is ``utf-8``::
+
+ {{ encoded | b64decode(encoding='utf-16-le') }}
+ {{ decoded | string | b64encode(encoding='utf-16-le') }}
+
+.. note:: The ``string`` filter is only required for Python 2 and ensures that text to encode is a unicode string. Without that filter before b64encode the wrong value will be encoded.
+
+.. versionadded:: 2.6
+
+Managing UUIDs
+==============
+
+To create a namespaced UUIDv5::
+
+ {{ string | to_uuid(namespace='11111111-2222-3333-4444-555555555555') }}
+
+.. versionadded:: 2.10
+
+To create a namespaced UUIDv5 using the default Ansible namespace '361E6D51-FAEC-444A-9079-341386DA8E2E'::
+
+ {{ string | to_uuid }}
+
+.. versionadded:: 1.9
+
+To make use of one attribute from each item in a list of complex variables, use the :func:`Jinja2 map filter <jinja2:map>`::
+
+ # get a comma-separated list of the mount points (for example, "/,/mnt/stuff") on a host
+ {{ ansible_mounts | map(attribute='mount') | join(',') }}
+
+Handling dates and times
+========================
+
+To get a date object from a string use the `to_datetime` filter::
+
+ # Get total amount of seconds between two dates. Default date format is %Y-%m-%d %H:%M:%S but you can pass your own format
+ {{ (("2016-08-14 20:00:12" | to_datetime) - ("2015-12-25" | to_datetime('%Y-%m-%d'))).total_seconds() }}
+
+ # Get remaining seconds after delta has been calculated. NOTE: This does NOT convert years, days, hours, and so on to seconds. For that, use total_seconds()
+ {{ (("2016-08-14 20:00:12" | to_datetime) - ("2016-08-14 18:00:00" | to_datetime)).seconds }}
+ # This expression evaluates to "12" and not "132". Delta is 2 hours, 12 seconds
+
+ # get amount of days between two dates. This returns only number of days and discards remaining hours, minutes, and seconds
+ {{ (("2016-08-14 20:00:12" | to_datetime) - ("2015-12-25" | to_datetime('%Y-%m-%d'))).days }}
+
+.. versionadded:: 2.4
+
+To format a date using a string (like with the shell date command), use the "strftime" filter::
+
+ # Display year-month-day
+ {{ '%Y-%m-%d' | strftime }}
+
+ # Display hour:min:sec
+ {{ '%H:%M:%S' | strftime }}
+
+ # Use ansible_date_time.epoch fact
+ {{ '%Y-%m-%d %H:%M:%S' | strftime(ansible_date_time.epoch) }}
+
+ # Use arbitrary epoch value
+ {{ '%Y-%m-%d' | strftime(0) }} # => 1970-01-01
+ {{ '%Y-%m-%d' | strftime(1441357287) }} # => 2015-09-04
+
+.. note:: To get all string possibilities, check https://docs.python.org/3/library/time.html#time.strftime
+
+Getting Kubernetes resource names
+=================================
+
+.. note::
+
+ These filters have migrated to the `community.kubernetes <https://galaxy.ansible.com/community/kubernetes>`_ collection. Follow the installation instructions to install that collection.
+
+Use the "k8s_config_resource_name" filter to obtain the name of a Kubernetes ConfigMap or Secret,
+including its hash::
+
+ {{ configmap_resource_definition | community.kubernetes.k8s_config_resource_name }}
+
+This can then be used to reference hashes in Pod specifications::
+
+ my_secret:
+ kind: Secret
+ name: my_secret_name
+
+ deployment_resource:
+ kind: Deployment
+ spec:
+ template:
+ spec:
+ containers:
+ - envFrom:
+ - secretRef:
+ name: {{ my_secret | community.kubernetes.k8s_config_resource_name }}
+
+.. versionadded:: 2.8
+
+.. _PyYAML library: https://pyyaml.org/
+
+.. _PyYAML documentation: https://pyyaml.org/wiki/PyYAMLDocumentation
+
+
+.. seealso::
+
+ :ref:`about_playbooks`
+ An introduction to playbooks
+ :ref:`playbooks_conditionals`
+ Conditional statements in playbooks
+ :ref:`playbooks_variables`
+ All about variables
+ :ref:`playbooks_loops`
+ Looping in playbooks
+ :ref:`playbooks_reuse_roles`
+ Playbook organization by roles
+ :ref:`playbooks_best_practices`
+ Tips and tricks for playbooks
+ `User Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/playbooks_filters_ipaddr.rst b/docs/docsite/rst/user_guide/playbooks_filters_ipaddr.rst
new file mode 100644
index 00000000..0a6d4825
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_filters_ipaddr.rst
@@ -0,0 +1,744 @@
+:orphan:
+
+.. _playbooks_filters_ipaddr:
+
+ipaddr filter
+`````````````
+
+.. versionadded:: 1.9
+
+``ipaddr()`` is a Jinja2 filter designed to provide an interface to the `netaddr`_
+Python package from within Ansible. It can operate on strings or lists of
+items, test various data to check if they are valid IP addresses, and manipulate
+the input data to extract requested information. ``ipaddr()`` works with both
+IPv4 and IPv6 addresses in various forms. There are also additional functions
+available to manipulate IP subnets and MAC addresses.
+
+.. note::
+
+ The ``ipaddr()`` filter migrated to the `ansible.netcommon <https://galaxy.ansible.com/ansible/netcommon>`_ collection. Follow the installation instructions to install that collection.
+
+To use this filter in Ansible, you need to install the `netaddr`_ Python library on
+a computer on which you use Ansible (it is not required on remote hosts).
+It can usually be installed with either your system package manager or using
+``pip``::
+
+ pip install netaddr
+
+.. _netaddr: https://pypi.org/project/netaddr/
+
+.. contents:: Topics
+ :local:
+ :depth: 2
+ :backlinks: top
+
+
+Basic tests
+^^^^^^^^^^^
+
+``ipaddr()`` is designed to return the input value if a query is True, and
+``False`` if a query is False. This way it can be easily used in chained
+filters. To use the filter, pass a string to it:
+
+.. code-block:: none
+
+ {{ '192.0.2.0' | ansible.netcommon.ipaddr }}
+
+You can also pass the values as variables::
+
+ {{ myvar | ansible.netcommon.ipaddr }}
+
+Here are some example test results of various input strings::
+
+ # These values are valid IP addresses or network ranges
+ '192.168.0.1' -> 192.168.0.1
+ '192.168.32.0/24' -> 192.168.32.0/24
+ 'fe80::100/10' -> fe80::100/10
+ 45443646733 -> ::a:94a7:50d
+ '523454/24' -> 0.7.252.190/24
+
+ # Values that are not valid IP addresses or network ranges
+ 'localhost' -> False
+ True -> False
+ 'space bar' -> False
+ False -> False
+ '' -> False
+ ':' -> False
+ 'fe80:/10' -> False
+
+Sometimes you need either IPv4 or IPv6 addresses. To filter only for a particular
+type, ``ipaddr()`` filter has two "aliases", ``ipv4()`` and ``ipv6()``.
+
+Example use of an IPv4 filter::
+
+ {{ myvar | ansible.netcommon.ipv4 }}
+
+A similar example of an IPv6 filter::
+
+ {{ myvar | ansible.netcommon.ipv6 }}
+
+Here's some example test results to look for IPv4 addresses::
+
+ '192.168.0.1' -> 192.168.0.1
+ '192.168.32.0/24' -> 192.168.32.0/24
+ 'fe80::100/10' -> False
+ 45443646733 -> False
+ '523454/24' -> 0.7.252.190/24
+
+And the same data filtered for IPv6 addresses::
+
+ '192.168.0.1' -> False
+ '192.168.32.0/24' -> False
+ 'fe80::100/10' -> fe80::100/10
+ 45443646733 -> ::a:94a7:50d
+ '523454/24' -> False
+
+
+Filtering lists
+^^^^^^^^^^^^^^^
+
+You can filter entire lists - ``ipaddr()`` will return a list with values
+valid for a particular query::
+
+ # Example list of values
+ test_list = ['192.24.2.1', 'host.fqdn', '::1', '192.168.32.0/24', 'fe80::100/10', True, '', '42540766412265424405338506004571095040/64']
+
+ # {{ test_list | ansible.netcommon.ipaddr }}
+ ['192.24.2.1', '::1', '192.168.32.0/24', 'fe80::100/10', '2001:db8:32c:faad::/64']
+
+ # {{ test_list | ansible.netcommon.ipv4 }}
+ ['192.24.2.1', '192.168.32.0/24']
+
+ # {{ test_list | ansible.netcommon.ipv6 }}
+ ['::1', 'fe80::100/10', '2001:db8:32c:faad::/64']
+
+
+Wrapping IPv6 addresses in [ ] brackets
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Some configuration files require IPv6 addresses to be "wrapped" in square
+brackets (``[ ]``). To accomplish that, you can use the ``ipwrap()`` filter. It
+will wrap all IPv6 addresses and leave any other strings intact::
+
+ # {{ test_list | ansible.netcommon.ipwrap }}
+ ['192.24.2.1', 'host.fqdn', '[::1]', '192.168.32.0/24', '[fe80::100]/10', True, '', '[2001:db8:32c:faad::]/64']
+
+As you can see, ``ipwrap()`` did not filter out non-IP address values, which is
+usually what you want when for example you are mixing IP addresses with
+hostnames. If you still want to filter out all non-IP address values, you can
+chain both filters together::
+
+ # {{ test_list | ansible.netcommon.ipaddr | ansible.netcommon.ipwrap }}
+ ['192.24.2.1', '[::1]', '192.168.32.0/24', '[fe80::100]/10', '[2001:db8:32c:faad::]/64']
+
+
+Basic queries
+^^^^^^^^^^^^^
+
+You can provide a single argument to each ``ipaddr()`` filter. The filter will then
+treat it as a query and return values modified by that query. Lists will
+contain only values that you are querying for.
+
+Types of queries include:
+
+- query by name: ``ansible.netcommon.ipaddr('address')``, ``ansible.netcommon.ipv4('network')``;
+- query by CIDR range: ``ansible.netcommon.ipaddr('192.168.0.0/24')``, ``ansible.netcommon.ipv6('2001:db8::/32')``;
+- query by index number: ``ansible.netcommon.ipaddr('1')``, ``ansible.netcommon.ipaddr('-1')``;
+
+If a query type is not recognized, Ansible will raise an error.
+
+
+Getting information about hosts and networks
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Here's our test list again::
+
+ # Example list of values
+ test_list = ['192.24.2.1', 'host.fqdn', '::1', '192.168.32.0/24', 'fe80::100/10', True, '', '42540766412265424405338506004571095040/64']
+
+Let's take the list above and get only those elements that are host IP addresses
+and not network ranges::
+
+ # {{ test_list | ansible.netcommon.ipaddr('address') }}
+ ['192.24.2.1', '::1', 'fe80::100']
+
+As you can see, even though some values had a host address with a CIDR prefix,
+they were dropped by the filter. If you want host IP addresses with their correct
+CIDR prefixes (as is common with IPv6 addressing), you can use the
+``ipaddr('host')`` filter::
+
+ # {{ test_list | ansible.netcommon.ipaddr('host') }}
+ ['192.24.2.1/32', '::1/128', 'fe80::100/10']
+
+Filtering by IP address type also works::
+
+ # {{ test_list | ansible.netcommon.ipv4('address') }}
+ ['192.24.2.1']
+
+ # {{ test_list | ansible.netcommon.ipv6('address') }}
+ ['::1', 'fe80::100']
+
+You can check if IP addresses or network ranges are accessible on a public
+Internet, or if they are in private networks::
+
+ # {{ test_list | ansible.netcommon.ipaddr('public') }}
+ ['192.24.2.1', '2001:db8:32c:faad::/64']
+
+ # {{ test_list | ansible.netcommon.ipaddr('private') }}
+ ['192.168.32.0/24', 'fe80::100/10']
+
+You can check which values are specifically network ranges::
+
+ # {{ test_list | ansible.netcommon.ipaddr('net') }}
+ ['192.168.32.0/24', '2001:db8:32c:faad::/64']
+
+You can also check how many IP addresses can be in a certain range::
+
+ # {{ test_list | ansible.netcommon.ipaddr('net') | ansible.netcommon.ipaddr('size') }}
+ [256, 18446744073709551616L]
+
+By specifying a network range as a query, you can check if a given value is in
+that range::
+
+ # {{ test_list | ansible.netcommon.ipaddr('192.0.0.0/8') }}
+ ['192.24.2.1', '192.168.32.0/24']
+
+If you specify a positive or negative integer as a query, ``ipaddr()`` will
+treat this as an index and will return the specific IP address from a network
+range, in the 'host/prefix' format::
+
+ # First IP address (network address)
+ # {{ test_list | ansible.netcommon.ipaddr('net') | ansible.netcommon.ipaddr('0') }}
+ ['192.168.32.0/24', '2001:db8:32c:faad::/64']
+
+ # Second IP address (usually the gateway host)
+ # {{ test_list | ansible.netcommon.ipaddr('net') | ansible.netcommon.ipaddr('1') }}
+ ['192.168.32.1/24', '2001:db8:32c:faad::1/64']
+
+ # Last IP address (the broadcast address in IPv4 networks)
+ # {{ test_list | ansible.netcommon.ipaddr('net') | ansible.netcommon.ipaddr('-1') }}
+ ['192.168.32.255/24', '2001:db8:32c:faad:ffff:ffff:ffff:ffff/64']
+
+You can also select IP addresses from a range by their index, from the start or
+end of the range::
+
+ # Returns from the start of the range
+ # {{ test_list | ansible.netcommon.ipaddr('net') | ansible.netcommon.ipaddr('200') }}
+ ['192.168.32.200/24', '2001:db8:32c:faad::c8/64']
+
+ # Returns from the end of the range
+ # {{ test_list | ansible.netcommon.ipaddr('net') | ansible.netcommon.ipaddr('-200') }}
+ ['192.168.32.56/24', '2001:db8:32c:faad:ffff:ffff:ffff:ff38/64']
+
+ # {{ test_list | ansible.netcommon.ipaddr('net') | ansible.netcommon.ipaddr('400') }}
+ ['2001:db8:32c:faad::190/64']
+
+
+Getting information from host/prefix values
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+You frequently use a combination of IP addresses and subnet prefixes
+("CIDR"), this is even more common with IPv6. The ``ansible.netcommon.ipaddr()`` filter can extract
+useful data from these prefixes.
+
+Here's an example set of two host prefixes (with some "control" values)::
+
+ host_prefix = ['2001:db8:deaf:be11::ef3/64', '192.0.2.48/24', '127.0.0.1', '192.168.0.0/16']
+
+First, let's make sure that we only work with correct host/prefix values, not
+just subnets or single IP addresses::
+
+ # {{ host_prefix | ansible.netcommon.ipaddr('host/prefix') }}
+ ['2001:db8:deaf:be11::ef3/64', '192.0.2.48/24']
+
+In Debian-based systems, the network configuration stored in the ``/etc/network/interfaces`` file uses a combination of IP address, network address, netmask and broadcast address to configure an IPv4 network interface. We can get these values from a single 'host/prefix' combination:
+
+.. code-block:: jinja
+
+ # Jinja2 template
+ {% set ipv4_host = host_prefix | unique | ansible.netcommon.ipv4('host/prefix') | first %}
+ iface eth0 inet static
+ address {{ ipv4_host | ansible.netcommon.ipaddr('address') }}
+ network {{ ipv4_host | ansible.netcommon.ipaddr('network') }}
+ netmask {{ ipv4_host | ansible.netcommon.ipaddr('netmask') }}
+ broadcast {{ ipv4_host | ansible.netcommon.ipaddr('broadcast') }}
+
+ # Generated configuration file
+ iface eth0 inet static
+ address 192.0.2.48
+ network 192.0.2.0
+ netmask 255.255.255.0
+ broadcast 192.0.2.255
+
+In the above example, we needed to handle the fact that values were stored in
+a list, which is unusual in IPv4 networks, where only a single IP address can be
+set on an interface. However, IPv6 networks can have multiple IP addresses set
+on an interface:
+
+.. code-block:: jinja
+
+ # Jinja2 template
+ iface eth0 inet6 static
+ {% set ipv6_list = host_prefix | unique | ansible.netcommon.ipv6('host/prefix') %}
+ address {{ ipv6_list[0] }}
+ {% if ipv6_list | length > 1 %}
+ {% for subnet in ipv6_list[1:] %}
+ up /sbin/ip address add {{ subnet }} dev eth0
+ down /sbin/ip address del {{ subnet }} dev eth0
+ {% endfor %}
+ {% endif %}
+
+ # Generated configuration file
+ iface eth0 inet6 static
+ address 2001:db8:deaf:be11::ef3/64
+
+If needed, you can extract subnet and prefix information from the 'host/prefix' value::
+
+.. code-block:: jinja
+
+ # {{ host_prefix | ansible.netcommon.ipaddr('host/prefix') | ansible.netcommon.ipaddr('subnet') }}
+ ['2001:db8:deaf:be11::/64', '192.0.2.0/24']
+
+ # {{ host_prefix | ansible.netcommon.ipaddr('host/prefix') | ansible.netcommon.ipaddr('prefix') }}
+ [64, 24]
+
+
+Converting subnet masks to CIDR notation
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Given a subnet in the form of network address and subnet mask, the ``ipaddr()`` filter can convert it into CIDR notation. This can be useful for converting Ansible facts gathered about network configuration from subnet masks into CIDR format::
+
+ ansible_default_ipv4: {
+ address: "192.168.0.11",
+ alias: "eth0",
+ broadcast: "192.168.0.255",
+ gateway: "192.168.0.1",
+ interface: "eth0",
+ macaddress: "fa:16:3e:c4:bd:89",
+ mtu: 1500,
+ netmask: "255.255.255.0",
+ network: "192.168.0.0",
+ type: "ether"
+ }
+
+First concatenate the network and netmask::
+
+ net_mask = "{{ ansible_default_ipv4.network }}/{{ ansible_default_ipv4.netmask }}"
+ '192.168.0.0/255.255.255.0'
+
+This result can be converted to canonical form with ``ipaddr()`` to produce a subnet in CIDR format::
+
+ # {{ net_mask | ansible.netcommon.ipaddr('prefix') }}
+ '24'
+
+ # {{ net_mask | ansible.netcommon.ipaddr('net') }}
+ '192.168.0.0/24'
+
+
+Getting information about the network in CIDR notation
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Given an IP address, the ``ipaddr()`` filter can produce the network address in CIDR notation.
+This can be useful when you want to obtain the network address from the IP address in CIDR format.
+
+Here's an example of IP address::
+
+ ip_address = "{{ ansible_default_ipv4.address }}/{{ ansible_default_ipv4.netmask }}"
+ '192.168.0.11/255.255.255.0'
+
+This can be used to obtain the network address in CIDR notation format::
+
+ # {{ ip_address | ansible.netcommon.ipaddr('network/prefix') }}
+ '192.168.0.0/24'
+
+
+IP address conversion
+^^^^^^^^^^^^^^^^^^^^^
+
+Here's our test list again::
+
+ # Example list of values
+ test_list = ['192.24.2.1', 'host.fqdn', '::1', '192.168.32.0/24', 'fe80::100/10', True, '', '42540766412265424405338506004571095040/64']
+
+You can convert IPv4 addresses into IPv6 addresses::
+
+ # {{ test_list | ansible.netcommon.ipv4('ipv6') }}
+ ['::ffff:192.24.2.1/128', '::ffff:192.168.32.0/120']
+
+Converting from IPv6 to IPv4 works very rarely::
+
+ # {{ test_list | ansible.netcommon.ipv6('ipv4') }}
+ ['0.0.0.1/32']
+
+But we can make a double conversion if needed::
+
+ # {{ test_list | ansible.netcommon.ipaddr('ipv6') | ansible.netcommon.ipaddr('ipv4') }}
+ ['192.24.2.1/32', '0.0.0.1/32', '192.168.32.0/24']
+
+You can convert IP addresses to integers, the same way that you can convert
+integers into IP addresses::
+
+ # {{ test_list | ansible.netcommon.ipaddr('address') | ansible.netcommon.ipaddr('int') }}
+ [3222798849, 1, '3232243712/24', '338288524927261089654018896841347694848/10', '42540766412265424405338506004571095040/64']
+
+You can convert IPv4 address to `Hexadecimal notation <https://en.wikipedia.org/wiki/Hexadecimal>`_ with optional delimiter::
+
+ # {{ '192.168.1.5' | ansible.netcommon.ip4_hex }}
+ c0a80105
+ # {{ '192.168.1.5' | ansible.netcommon.ip4_hex(':') }}
+ c0:a8:01:05
+
+You can convert IP addresses to PTR records::
+
+ # {% for address in test_list | ansible.netcommon.ipaddr %}
+ # {{ address | ansible.netcommon.ipaddr('revdns') }}
+ # {% endfor %}
+ 1.2.24.192.in-addr.arpa.
+ 1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa.
+ 0.32.168.192.in-addr.arpa.
+ 0.0.1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.e.f.ip6.arpa.
+ 0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.d.a.a.f.c.2.3.0.8.b.d.0.1.0.0.2.ip6.arpa.
+
+
+Converting IPv4 address to a 6to4 address
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+A `6to4`_ tunnel is a way to access the IPv6 Internet from an IPv4-only network. If you
+have a public IPv4 address, you can automatically configure its IPv6
+equivalent in the ``2002::/16`` network range. After conversion you will gain
+access to a ``2002:xxxx:xxxx::/48`` subnet which could be split into 65535
+``/64`` subnets if needed.
+
+To convert your IPv4 address, just send it through the ``'6to4'`` filter. It will
+be automatically converted to a router address (with a ``::1/48`` host address)::
+
+ # {{ '193.0.2.0' | ansible.netcommon.ipaddr('6to4') }}
+ 2002:c100:0200::1/48
+
+.. _6to4: https://en.wikipedia.org/wiki/6to4
+
+
+Finding IP addresses within a range
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+To find usable IP addresses within an IP range, try these ``ipaddr`` filters:
+
+To find the next usable IP address in a range, use ``next_usable`` ::
+
+ # {{ '192.168.122.1/24' | ansible.netcommon.ipaddr('next_usable') }}
+ 192.168.122.2
+
+To find the last usable IP address from a range, use ``last_usable``::
+
+ # {{ '192.168.122.1/24' | ansible.netcommon.ipaddr('last_usable') }}
+ 192.168.122.254
+
+To find the available range of IP addresses from the given network address, use ``range_usable``::
+
+ # {{ '192.168.122.1/24' | ansible.netcommon.ipaddr('range_usable') }}
+ 192.168.122.1-192.168.122.254
+
+To find the peer IP address for a point to point link, use ``peer``::
+
+ # {{ '192.168.122.1/31' | ansible.netcommon.ipaddr('peer') }}
+ 192.168.122.0
+ # {{ '192.168.122.1/30' | ansible.netcommon.ipaddr('peer') }}
+ 192.168.122.2
+
+To return the nth ip from a network, use the filter ``nthhost``::
+
+ # {{ '10.0.0.0/8' | ansible.netcommon.nthhost(305) }}
+ 10.0.1.49
+
+``nthhost`` also supports a negative value::
+
+ # {{ '10.0.0.0/8' | ansible.netcommon.nthhost(-1) }}
+ 10.255.255.255
+
+To find the next nth usable IP address in relation to another within a range, use ``next_nth_usable``
+In the example, ``next_nth_usable`` returns the second usable IP address for the given IP range::
+
+ # {{ '192.168.122.1/24' | ansible.netcommon.next_nth_usable(2) }}
+ 192.168.122.3
+
+If there is no usable address, it returns an empty string::
+
+ # {{ '192.168.122.254/24' | ansible.netcommon.next_nth_usable(2) }}
+ ""
+
+Just like ``next_nth_ansible``, you have ``previous_nth_usable`` to find the previous usable address::
+
+ # {{ '192.168.122.10/24' | ansible.netcommon.previous_nth_usable(2) }}
+ 192.168.122.8
+
+
+Testing if a address belong to a network range
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``network_in_usable`` filter returns whether an address passed as an argument is usable in a network.
+Usable addresses are addresses that can be assigned to a host. The network ID and the broadcast address
+are not usable addresses.::
+
+ # {{ '192.168.0.0/24' | ansible.netcommon.network_in_usable( '192.168.0.1' ) }}
+ True
+
+ # {{ '192.168.0.0/24' | ansible.netcommon.network_in_usable( '192.168.0.255' ) }}
+ False
+
+ # {{ '192.168.0.0/16' | ansible.netcommon.network_in_usable( '192.168.0.255' ) }}
+ True
+
+The ``network_in_network`` filter returns whether an address or a network passed as argument is in a network.::
+
+ # {{ '192.168.0.0/24' | ansible.netcommon.network_in_network( '192.168.0.1' ) }}
+ True
+
+ # {{ '192.168.0.0/24' | ansible.netcommon.network_in_network( '192.168.0.0/24' ) }}
+ True
+
+ # {{ '192.168.0.0/24' | ansible.netcommon.network_in_network( '192.168.0.255' ) }}
+ True
+
+ # Check in a network is part of another network
+ # {{ '192.168.0.0/16' | ansible.netcommon.network_in_network( '192.168.0.0/24' ) }}
+ True
+
+To check whether multiple addresses belong to a network, use the ``reduce_on_network`` filter::
+
+ # {{ ['192.168.0.34', '10.3.0.3', '192.168.2.34'] | ansible.netcommon.reduce_on_network( '192.168.0.0/24' ) }}
+ ['192.168.0.34']
+
+
+IP Math
+^^^^^^^
+
+.. versionadded:: 2.7
+
+The ``ipmath()`` filter can be used to do simple IP math/arithmetic.
+
+Here are a few simple examples::
+
+ # Get the next five addresses based on an IP address
+ # {{ '192.168.1.5' | ansible.netcommon.ipmath(5) }}
+ 192.168.1.10
+
+ # Get the ten previous addresses based on an IP address
+ # {{ '192.168.0.5' | ansible.netcommon.ipmath(-10) }}
+ 192.167.255.251
+
+ # Get the next five addresses using CIDR notation
+ # {{ '192.168.1.1/24' | ansible.netcommon.ipmath(5) }}
+ 192.168.1.6
+
+ # Get the previous five addresses using CIDR notation
+ # {{ '192.168.1.6/24' | ansible.netcommon.ipmath(-5) }}
+ 192.168.1.1
+
+ # Get the previous ten address using cidr notation
+ # It returns a address of the previous network range
+ # {{ '192.168.2.6/24' | ansible.netcommon.ipmath(-10) }}
+ 192.168.1.252
+
+ # Get the next ten addresses in IPv6
+ # {{ '2001::1' | ansible.netcommon.ipmath(10) }}
+ 2001::b
+
+ # Get the previous ten address in IPv6
+ # {{ '2001::5' | ansible.netcommon.ipmath(-10) }}
+ 2000:ffff:ffff:ffff:ffff:ffff:ffff:fffb
+
+
+
+Subnet manipulation
+^^^^^^^^^^^^^^^^^^^
+
+The ``ipsubnet()`` filter can be used to manipulate network subnets in several ways.
+
+Here is an example IP address and subnet::
+
+ address = '192.168.144.5'
+ subnet = '192.168.0.0/16'
+
+To check if a given string is a subnet, pass it through the filter without any
+arguments. If the given string is an IP address, it will be converted into
+a subnet::
+
+ # {{ address | ansible.netcommon.ipsubnet }}
+ 192.168.144.5/32
+
+ # {{ subnet | ansible.netcommon.ipsubnet }}
+ 192.168.0.0/16
+
+If you specify a subnet size as the first parameter of the ``ipsubnet()`` filter, and
+the subnet size is **smaller than the current one**, you will get the number of subnets
+a given subnet can be split into::
+
+ # {{ subnet | ansible.netcommon.ipsubnet(20) }}
+ 16
+
+The second argument of the ``ipsubnet()`` filter is an index number; by specifying it
+you can get a new subnet with the specified size::
+
+ # First subnet
+ # {{ subnet | ansible.netcommon.ipsubnet(20, 0) }}
+ 192.168.0.0/20
+
+ # Last subnet
+ # {{ subnet | ansible.netcommon.ipsubnet(20, -1) }}
+ 192.168.240.0/20
+
+ # Fifth subnet
+ # {{ subnet | ansible.netcommon.ipsubnet(20, 5) }}
+ 192.168.80.0/20
+
+ # Fifth to last subnet
+ # {{ subnet | ansible.netcommon.ipsubnet(20, -5) }}
+ 192.168.176.0/20
+
+If you specify an IP address instead of a subnet, and give a subnet size as
+the first argument, the ``ipsubnet()`` filter will instead return the biggest subnet that
+contains that given IP address::
+
+ # {{ address | ansible.netcommon.ipsubnet(20) }}
+ 192.168.144.0/20
+
+By specifying an index number as a second argument, you can select smaller and
+smaller subnets::
+
+ # First subnet
+ # {{ address | ansible.netcommon.ipsubnet(18, 0) }}
+ 192.168.128.0/18
+
+ # Last subnet
+ # {{ address | ansible.netcommon.ipsubnet(18, -1) }}
+ 192.168.144.4/31
+
+ # Fifth subnet
+ # {{ address | ansible.netcommon.ipsubnet(18, 5) }}
+ 192.168.144.0/23
+
+ # Fifth to last subnet
+ # {{ address | ansible.netcommon.ipsubnet(18, -5) }}
+ 192.168.144.0/27
+
+By specifying another subnet as a second argument, if the second subnet includes
+the first, you can determine the rank of the first subnet in the second ::
+
+ # The rank of the IP in the subnet (the IP is the 36870nth /32 of the subnet)
+ # {{ address | ansible.netcommon.ipsubnet(subnet) }}
+ 36870
+
+ # The rank in the /24 that contain the address
+ # {{ address | ansible.netcommon.ipsubnet('192.168.144.0/24') }}
+ 6
+
+ # An IP with the subnet in the first /30 in a /24
+ # {{ '192.168.144.1/30' | ansible.netcommon.ipsubnet('192.168.144.0/24') }}
+ 1
+
+ # The fifth subnet /30 in a /24
+ # {{ '192.168.144.16/30' | ansible.netcommon.ipsubnet('192.168.144.0/24') }}
+ 5
+
+If the second subnet doesn't include the first subnet, the ``ipsubnet()`` filter raises an error.
+
+
+You can use the ``ipsubnet()`` filter with the ``ipaddr()`` filter to, for example, split
+a given ``/48`` prefix into smaller ``/64`` subnets::
+
+ # {{ '193.0.2.0' | ansible.netcommon.ipaddr('6to4') | ipsubnet(64, 58820) | ansible.netcommon.ipaddr('1') }}
+ 2002:c100:200:e5c4::1/64
+
+Because of the size of IPv6 subnets, iteration over all of them to find the
+correct one may take some time on slower computers, depending on the size
+difference between the subnets.
+
+
+Subnet Merging
+^^^^^^^^^^^^^^
+
+.. versionadded:: 2.6
+
+The ``cidr_merge()`` filter can be used to merge subnets or individual addresses
+into their minimal representation, collapsing overlapping subnets and merging
+adjacent ones wherever possible::
+
+ {{ ['192.168.0.0/17', '192.168.128.0/17', '192.168.128.1' ] | cidr_merge }}
+ # => ['192.168.0.0/16']
+
+ {{ ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24'] | cidr_merge }}
+ # => ['192.168.0.0/23', '192.168.3.0/24']
+
+Changing the action from 'merge' to 'span' will instead return the smallest
+subnet which contains all of the inputs::
+
+ {{ ['192.168.0.0/24', '192.168.3.0/24'] | ansible.netcommon.cidr_merge('span') }}
+ # => '192.168.0.0/22'
+
+ {{ ['192.168.1.42', '192.168.42.1'] | ansible.netcommon.cidr_merge('span') }}
+ # => '192.168.0.0/18'
+
+
+MAC address filter
+^^^^^^^^^^^^^^^^^^
+
+You can use the ``hwaddr()`` filter to check if a given string is a MAC address or
+convert it between various formats. Examples::
+
+ # Example MAC address
+ macaddress = '1a:2b:3c:4d:5e:6f'
+
+ # Check if given string is a MAC address
+ # {{ macaddress | ansible.netcommon.hwaddr }}
+ 1a:2b:3c:4d:5e:6f
+
+ # Convert MAC address to PostgreSQL format
+ # {{ macaddress | ansible.netcommon.hwaddr('pgsql') }}
+ 1a2b3c:4d5e6f
+
+ # Convert MAC address to Cisco format
+ # {{ macaddress | ansible.netcommon.hwaddr('cisco') }}
+ 1a2b.3c4d.5e6f
+
+The supported formats result in the following conversions for the ``1a:2b:3c:4d:5e:6f`` MAC address::
+
+ bare: 1A2B3C4D5E6F
+ bool: True
+ int: 28772997619311
+ cisco: 1a2b.3c4d.5e6f
+ eui48 or win: 1A-2B-3C-4D-5E-6F
+ linux or unix: 1a:2b:3c:4d:5e:6f:
+ pgsql, postgresql, or psql: 1a2b3c:4d5e6f
+
+
+Generate an IPv6 address in Stateless Configuration (SLAAC)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+the filter ``slaac()`` generates an IPv6 address for a given network and a MAC Address in Stateless Configuration::
+
+ # {{ fdcf:1894:23b5:d38c:0000:0000:0000:0000 | slaac('c2:31:b3:83:bf:2b') }}
+ fdcf:1894:23b5:d38c:c031:b3ff:fe83:bf2b
+
+.. seealso::
+
+
+ `ansible.netcommon <https://galaxy.ansible.com/ansible/netcommon>`_
+ Ansible network collection for common code
+ :ref:`about_playbooks`
+ An introduction to playbooks
+ :ref:`playbooks_filters`
+ Introduction to Jinja2 filters and their uses
+ :ref:`playbooks_conditionals`
+ Conditional statements in playbooks
+ :ref:`playbooks_variables`
+ All about variables
+ :ref:`playbooks_loops`
+ Looping in playbooks
+ :ref:`playbooks_reuse_roles`
+ Playbook organization by roles
+ :ref:`playbooks_best_practices`
+ Tips and tricks for playbooks
+ `User Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/playbooks_handlers.rst b/docs/docsite/rst/user_guide/playbooks_handlers.rst
new file mode 100644
index 00000000..4650d5e7
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_handlers.rst
@@ -0,0 +1,148 @@
+.. _handlers:
+
+Handlers: running operations on change
+======================================
+
+Sometimes you want a task to run only when a change is made on a machine. For example, you may want to restart a service if a task updates the configuration of that service, but not if the configuration is unchanged. Ansible uses handlers to address this use case. Handlers are tasks that only run when notified. Each handler should have a globally unique name.
+
+.. contents::
+ :local:
+
+Handler example
+---------------
+
+This playbook, ``verify-apache.yml``, contains a single play with a handler::
+
+ ---
+ - name: Verify apache installation
+ hosts: webservers
+ vars:
+ http_port: 80
+ max_clients: 200
+ remote_user: root
+ tasks:
+ - name: Ensure apache is at the latest version
+ ansible.builtin.yum:
+ name: httpd
+ state: latest
+
+ - name: Write the apache config file
+ ansible.builtin.template:
+ src: /srv/httpd.j2
+ dest: /etc/httpd.conf
+ notify:
+ - Restart apache
+
+ - name: Ensure apache is running
+ ansible.builtin.service:
+ name: httpd
+ state: started
+
+ handlers:
+ - name: Restart apache
+ ansible.builtin.service:
+ name: httpd
+ state: restarted
+
+In this example playbook, the second task notifies the handler. A single task can notify more than one handler::
+
+ - name: Template configuration file
+ ansible.builtin.template:
+ src: template.j2
+ dest: /etc/foo.conf
+ notify:
+ - Restart memcached
+ - Restart apache
+
+ handlers:
+ - name: Restart memcached
+ ansible.builtin.service:
+ name: memcached
+ state: restarted
+
+ - name: Restart apache
+ ansible.builtin.service:
+ name: apache
+ state: restarted
+
+Controlling when handlers run
+-----------------------------
+
+By default, handlers run after all the tasks in a particular play have been completed. This approach is efficient, because the handler only runs once, regardless of how many tasks notify it. For example, if multiple tasks update a configuration file and notify a handler to restart Apache, Ansible only bounces Apache once to avoid unnecessary restarts.
+
+If you need handlers to run before the end of the play, add a task to flush them using the :ref:`meta module <meta_module>`, which executes Ansible actions::
+
+ tasks:
+ - name: Some tasks go here
+ ansible.builtin.shell: ...
+
+ - name: Flush handlers
+ meta: flush_handlers
+
+ - name: Some other tasks
+ ansible.builtin.shell: ...
+
+The ``meta: flush_handlers`` task triggers any handlers that have been notified at that point in the play.
+
+Using variables with handlers
+-----------------------------
+
+You may want your Ansible handlers to use variables. For example, if the name of a service varies slightly by distribution, you want your output to show the exact name of the restarted service for each target machine. Avoid placing variables in the name of the handler. Since handler names are templated early on, Ansible may not have a value available for a handler name like this::
+
+ handlers:
+ # This handler name may cause your play to fail!
+ - name: Restart "{{ web_service_name }}"
+
+If the variable used in the handler name is not available, the entire play fails. Changing that variable mid-play **will not** result in newly created handler.
+
+Instead, place variables in the task parameters of your handler. You can load the values using ``include_vars`` like this:
+
+ .. code-block:: yaml+jinja
+
+ tasks:
+ - name: Set host variables based on distribution
+ include_vars: "{{ ansible_facts.distribution }}.yml"
+
+ handlers:
+ - name: Restart web service
+ ansible.builtin.service:
+ name: "{{ web_service_name | default('httpd') }}"
+ state: restarted
+
+Handlers can also "listen" to generic topics, and tasks can notify those topics as follows::
+
+ handlers:
+ - name: Restart memcached
+ ansible.builtin.service:
+ name: memcached
+ state: restarted
+ listen: "restart web services"
+
+ - name: Restart apache
+ ansible.builtin.service:
+ name: apache
+ state: restarted
+ listen: "restart web services"
+
+ tasks:
+ - name: Restart everything
+ ansible.builtin.command: echo "this task will restart the web services"
+ notify: "restart web services"
+
+This use makes it much easier to trigger multiple handlers. It also decouples handlers from their names,
+making it easier to share handlers among playbooks and roles (especially when using 3rd party roles from
+a shared source like Galaxy).
+
+.. note::
+ * Handlers always run in the order they are defined, not in the order listed in the notify-statement. This is also the case for handlers using `listen`.
+ * Handler names and `listen` topics live in a global namespace.
+ * Handler names are templatable and `listen` topics are not.
+ * Use unique handler names. If you trigger more than one handler with the same name, the first one(s) get overwritten. Only the last one defined will run.
+ * You can notify a handler defined inside a static include.
+ * You cannot notify a handler defined inside a dynamic include.
+
+When using handlers within roles, note that:
+
+* handlers notified within ``pre_tasks``, ``tasks``, and ``post_tasks`` sections are automatically flushed at the end of section where they were notified.
+* handlers notified within ``roles`` section are automatically flushed at the end of ``tasks`` section, but before any ``tasks`` handlers.
+* handlers are play scoped and as such can be used outside of the role they are defined in.
diff --git a/docs/docsite/rst/user_guide/playbooks_intro.rst b/docs/docsite/rst/user_guide/playbooks_intro.rst
new file mode 100644
index 00000000..24037b3e
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_intro.rst
@@ -0,0 +1,151 @@
+.. _about_playbooks:
+.. _playbooks_intro:
+
+******************
+Intro to playbooks
+******************
+
+Ansible Playbooks offer a repeatable, re-usable, simple configuration management and multi-machine deployment system, one that is well suited to deploying complex applications. If you need to execute a task with Ansible more than once, write a playbook and put it under source control. Then you can use the playbook to push out new configuration or confirm the configuration of remote systems. The playbooks in the `ansible-examples repository <https://github.com/ansible/ansible-examples>`_ illustrate many useful techniques. You may want to look at these in another tab as you read the documentation.
+
+Playbooks can:
+
+* declare configurations
+* orchestrate steps of any manual ordered process, on multiple sets of machines, in a defined order
+* launch tasks synchronously or :ref:`asynchronously <playbooks_async>`
+
+.. contents::
+ :local:
+
+.. _playbook_language_example:
+
+Playbook syntax
+===============
+
+Playbooks are expressed in YAML format with a minimum of syntax. If you are not familiar with YAML, look at our overview of :ref:`yaml_syntax` and consider installing an add-on for your text editor (see :ref:`other_tools_and_programs`) to help you write clean YAML syntax in your playbooks.
+
+A playbook is composed of one or more 'plays' in an ordered list. The terms 'playbook' and 'play' are sports analogies. Each play executes part of the overall goal of the playbook, running one or more tasks. Each task calls an Ansible module.
+
+Playbook execution
+==================
+
+A playbook runs in order from top to bottom. Within each play, tasks also run in order from top to bottom. Playbooks with multiple 'plays' can orchestrate multi-machine deployments, running one play on your webservers, then another play on your database servers, then a third play on your network infrastructure, and so on. At a minimum, each play defines two things:
+
+* the managed nodes to target, using a :ref:`pattern <intro_patterns>`
+* at least one task to execute
+
+In this example, the first play targets the web servers; the second play targets the database servers::
+
+ ---
+ - name: update web servers
+ hosts: webservers
+ remote_user: root
+
+ tasks:
+ - name: ensure apache is at the latest version
+ yum:
+ name: httpd
+ state: latest
+ - name: write the apache config file
+ template:
+ src: /srv/httpd.j2
+ dest: /etc/httpd.conf
+
+ - name: update db servers
+ hosts: databases
+ remote_user: root
+
+ tasks:
+ - name: ensure postgresql is at the latest version
+ yum:
+ name: postgresql
+ state: latest
+ - name: ensure that postgresql is started
+ service:
+ name: postgresql
+ state: started
+
+Your playbook can include more than just a hosts line and tasks. For example, the playbook above sets a ``remote_user`` for each play. This is the user account for the SSH connection. You can add other :ref:`playbook_keywords` at the playbook, play, or task level to influence how Ansible behaves. Playbook keywords can control the :ref:`connection plugin <connection_plugins>`, whether to use :ref:`privilege escalation <become>`, how to handle errors, and more. To support a variety of environments, Ansible lets you set many of these parameters as command-line flags, in your Ansible configuration, or in your inventory. Learning the :ref:`precedence rules <general_precedence_rules>` for these sources of data will help you as you expand your Ansible ecosystem.
+
+.. _tasks_list:
+
+Task execution
+--------------
+
+By default, Ansible executes each task in order, one at a time, against all machines matched by the host pattern. Each task executes a module with specific arguments. When a task has executed on all target machines, Ansible moves on to the next task. You can use :ref:`strategies <playbooks_strategies>` to change this default behavior. Within each play, Ansible applies the same task directives to all hosts. If a task fails on a host, Ansible takes that host out of the rotation for the rest of the playbook.
+
+When you run a playbook, Ansible returns information about connections, the ``name`` lines of all your plays and tasks, whether each task has succeeded or failed on each machine, and whether each task has made a change on each machine. At the bottom of the playbook execution, Ansible provides a summary of the nodes that were targeted and how they performed. General failures and fatal "unreachable" communication attempts are kept separate in the counts.
+
+.. _idempotency:
+
+Desired state and 'idempotency'
+-------------------------------
+
+Most Ansible modules check whether the desired final state has already been achieved, and exit without performing any actions if that state has been achieved, so that repeating the task does not change the final state. Modules that behave this way are often called 'idempotent.' Whether you run a playbook once, or multiple times, the outcome should be the same. However, not all playbooks and not all modules behave this way. If you are unsure, test your playbooks in a sandbox environment before running them multiple times in production.
+
+.. _executing_a_playbook:
+
+Running playbooks
+-----------------
+
+To run your playbook, use the :ref:`ansible-playbook` command::
+
+ ansible-playbook playbook.yml -f 10
+
+Use the ``--verbose`` flag when running your playbook to see detailed output from successful modules as well as unsuccessful ones.
+
+.. _playbook_ansible-pull:
+
+Ansible-Pull
+============
+
+Should you want to invert the architecture of Ansible, so that nodes check in to a central location, instead
+of pushing configuration out to them, you can.
+
+The ``ansible-pull`` is a small script that will checkout a repo of configuration instructions from git, and then
+run ``ansible-playbook`` against that content.
+
+Assuming you load balance your checkout location, ``ansible-pull`` scales essentially infinitely.
+
+Run ``ansible-pull --help`` for details.
+
+There's also a `clever playbook <https://github.com/ansible/ansible-examples/blob/master/language_features/ansible_pull.yml>`_ available to configure ``ansible-pull`` via a crontab from push mode.
+
+Verifying playbooks
+===================
+
+You may want to verify your playbooks to catch syntax errors and other problems before you run them. The :ref:`ansible-playbook` command offers several options for verification, including ``--check``, ``--diff``, ``--list-hosts``, ``list-tasks``, and ``--syntax-check``. The :ref:`validate-playbook-tools` describes other tools for validating and testing playbooks.
+
+.. _linting_playbooks:
+
+ansible-lint
+------------
+
+You can use `ansible-lint <https://docs.ansible.com/ansible-lint/index.html>`_ for detailed, Ansible-specific feedback on your playbooks before you execute them. For example, if you run ``ansible-lint`` on the playbook called ``verify-apache.yml`` near the top of this page, you should get the following results:
+
+.. code-block:: bash
+
+ $ ansible-lint verify-apache.yml
+ [403] Package installs should not use latest
+ verify-apache.yml:8
+ Task/Handler: ensure apache is at the latest version
+
+The `ansible-lint default rules <https://docs.ansible.com/ansible-lint/rules/default_rules.html>`_ page describes each error. For ``[403]``, the recommended fix is to change ``state: latest`` to ``state: present`` in the playbook.
+
+.. seealso::
+
+ `ansible-lint <https://docs.ansible.com/ansible-lint/index.html>`_
+ Learn how to test Ansible Playbooks syntax
+ :ref:`yaml_syntax`
+ Learn about YAML syntax
+ :ref:`playbooks_best_practices`
+ Tips for managing playbooks in the real world
+ :ref:`list_of_collections`
+ Browse existing collections, modules, and plugins
+ :ref:`developing_modules`
+ Learn to extend Ansible by writing your own modules
+ :ref:`intro_patterns`
+ Learn about how to select hosts
+ `GitHub examples directory <https://github.com/ansible/ansible-examples>`_
+ Complete end-to-end playbook examples
+ `Mailing List <https://groups.google.com/group/ansible-project>`_
+ Questions? Help? Ideas? Stop by the list on Google Groups
diff --git a/docs/docsite/rst/user_guide/playbooks_lookups.rst b/docs/docsite/rst/user_guide/playbooks_lookups.rst
new file mode 100644
index 00000000..004db708
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_lookups.rst
@@ -0,0 +1,37 @@
+.. _playbooks_lookups:
+
+*******
+Lookups
+*******
+
+Lookup plugins retrieve data from outside sources such as files, databases, key/value stores, APIs, and other services. Like all templating, lookups execute and are evaluated on the Ansible control machine. Ansible makes the data returned by a lookup plugin available using the standard templating system. Before Ansible 2.5, lookups were mostly used indirectly in ``with_<lookup>`` constructs for looping. Starting with Ansible 2.5, lookups are used more explicitly as part of Jinja2 expressions fed into the ``loop`` keyword.
+
+.. _lookups_and_variables:
+
+Using lookups in variables
+==========================
+
+You can populate variables using lookups. Ansible evaluates the value each time it is executed in a task (or template)::
+
+ vars:
+ motd_value: "{{ lookup('file', '/etc/motd') }}"
+ tasks:
+ - debug:
+ msg: "motd value is {{ motd_value }}"
+
+For more details and a list of lookup plugins in ansible-base, see :ref:`plugins_lookup`. You may also find lookup plugins in collections. You can review a list of lookup plugins installed on your control machine with the command ``ansible-doc -l -t lookup``.
+
+.. seealso::
+
+ :ref:`working_with_playbooks`
+ An introduction to playbooks
+ :ref:`playbooks_conditionals`
+ Conditional statements in playbooks
+ :ref:`playbooks_variables`
+ All about variables
+ :ref:`playbooks_loops`
+ Looping in playbooks
+ `User Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/playbooks_loops.rst b/docs/docsite/rst/user_guide/playbooks_loops.rst
new file mode 100644
index 00000000..0934eeed
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_loops.rst
@@ -0,0 +1,445 @@
+.. _playbooks_loops:
+
+*****
+Loops
+*****
+
+Sometimes you want to repeat a task multiple times. In computer programming, this is called a loop. Common Ansible loops include changing ownership on several files and/or directories with the :ref:`file module <file_module>`, creating multiple users with the :ref:`user module <user_module>`, and
+repeating a polling step until a certain result is reached. Ansible offers two keywords for creating loops: ``loop`` and ``with_<lookup>``.
+
+.. note::
+ * We added ``loop`` in Ansible 2.5. It is not yet a full replacement for ``with_<lookup>``, but we recommend it for most use cases.
+ * We have not deprecated the use of ``with_<lookup>`` - that syntax will still be valid for the foreseeable future.
+ * We are looking to improve ``loop`` syntax - watch this page and the `changelog <https://github.com/ansible/ansible/tree/devel/changelogs>`_ for updates.
+
+.. contents::
+ :local:
+
+Comparing ``loop`` and ``with_*``
+=================================
+
+* The ``with_<lookup>`` keywords rely on :ref:`lookup_plugins` - even ``items`` is a lookup.
+* The ``loop`` keyword is equivalent to ``with_list``, and is the best choice for simple loops.
+* The ``loop`` keyword will not accept a string as input, see :ref:`query_vs_lookup`.
+* Generally speaking, any use of ``with_*`` covered in :ref:`migrating_to_loop` can be updated to use ``loop``.
+* Be careful when changing ``with_items`` to ``loop``, as ``with_items`` performed implicit single-level flattening. You may need to use ``flatten(1)`` with ``loop`` to match the exact outcome. For example, to get the same output as:
+
+.. code-block:: yaml
+
+ with_items:
+ - 1
+ - [2,3]
+ - 4
+
+you would need::
+
+ loop: "{{ [1, [2,3] ,4] | flatten(1) }}"
+
+* Any ``with_*`` statement that requires using ``lookup`` within a loop should not be converted to use the ``loop`` keyword. For example, instead of doing:
+
+.. code-block:: yaml
+
+ loop: "{{ lookup('fileglob', '*.txt', wantlist=True) }}"
+
+it's cleaner to keep::
+
+ with_fileglob: '*.txt'
+
+.. _standard_loops:
+
+Standard loops
+==============
+
+Iterating over a simple list
+----------------------------
+
+Repeated tasks can be written as standard loops over a simple list of strings. You can define the list directly in the task::
+
+ - name: Add several users
+ ansible.builtin.user:
+ name: "{{ item }}"
+ state: present
+ groups: "wheel"
+ loop:
+ - testuser1
+ - testuser2
+
+You can define the list in a variables file, or in the 'vars' section of your play, then refer to the name of the list in the task::
+
+ loop: "{{ somelist }}"
+
+Either of these examples would be the equivalent of::
+
+ - name: Add user testuser1
+ ansible.builtin.user:
+ name: "testuser1"
+ state: present
+ groups: "wheel"
+
+ - name: Add user testuser2
+ ansible.builtin.user:
+ name: "testuser2"
+ state: present
+ groups: "wheel"
+
+You can pass a list directly to a parameter for some plugins. Most of the packaging modules, like :ref:`yum <yum_module>` and :ref:`apt <apt_module>`, have this capability. When available, passing the list to a parameter is better than looping over the task. For example::
+
+ - name: Optimal yum
+ ansible.builtin.yum:
+ name: "{{ list_of_packages }}"
+ state: present
+
+ - name: Non-optimal yum, slower and may cause issues with interdependencies
+ ansible.builtin.yum:
+ name: "{{ item }}"
+ state: present
+ loop: "{{ list_of_packages }}"
+
+Check the :ref:`module documentation <modules_by_category>` to see if you can pass a list to any particular module's parameter(s).
+
+Iterating over a list of hashes
+-------------------------------
+
+If you have a list of hashes, you can reference subkeys in a loop. For example::
+
+ - name: Add several users
+ ansible.builtin.user:
+ name: "{{ item.name }}"
+ state: present
+ groups: "{{ item.groups }}"
+ loop:
+ - { name: 'testuser1', groups: 'wheel' }
+ - { name: 'testuser2', groups: 'root' }
+
+When combining :ref:`conditionals <playbooks_conditionals>` with a loop, the ``when:`` statement is processed separately for each item.
+See :ref:`the_when_statement` for examples.
+
+Iterating over a dictionary
+---------------------------
+
+To loop over a dict, use the :ref:`dict2items <dict_filter>`:
+
+.. code-block:: yaml
+
+ - name: Using dict2items
+ ansible.builtin.debug:
+ msg: "{{ item.key }} - {{ item.value }}"
+ loop: "{{ tag_data | dict2items }}"
+ vars:
+ tag_data:
+ Environment: dev
+ Application: payment
+
+Here, we are iterating over `tag_data` and printing the key and the value from it.
+
+Registering variables with a loop
+=================================
+
+You can register the output of a loop as a variable. For example::
+
+ - name: Register loop output as a variable
+ ansible.builtin.shell: "echo {{ item }}"
+ loop:
+ - "one"
+ - "two"
+ register: echo
+
+When you use ``register`` with a loop, the data structure placed in the variable will contain a ``results`` attribute that is a list of all responses from the module. This differs from the data structure returned when using ``register`` without a loop::
+
+ {
+ "changed": true,
+ "msg": "All items completed",
+ "results": [
+ {
+ "changed": true,
+ "cmd": "echo \"one\" ",
+ "delta": "0:00:00.003110",
+ "end": "2013-12-19 12:00:05.187153",
+ "invocation": {
+ "module_args": "echo \"one\"",
+ "module_name": "shell"
+ },
+ "item": "one",
+ "rc": 0,
+ "start": "2013-12-19 12:00:05.184043",
+ "stderr": "",
+ "stdout": "one"
+ },
+ {
+ "changed": true,
+ "cmd": "echo \"two\" ",
+ "delta": "0:00:00.002920",
+ "end": "2013-12-19 12:00:05.245502",
+ "invocation": {
+ "module_args": "echo \"two\"",
+ "module_name": "shell"
+ },
+ "item": "two",
+ "rc": 0,
+ "start": "2013-12-19 12:00:05.242582",
+ "stderr": "",
+ "stdout": "two"
+ }
+ ]
+ }
+
+Subsequent loops over the registered variable to inspect the results may look like::
+
+ - name: Fail if return code is not 0
+ ansible.builtin.fail:
+ msg: "The command ({{ item.cmd }}) did not have a 0 return code"
+ when: item.rc != 0
+ loop: "{{ echo.results }}"
+
+During iteration, the result of the current item will be placed in the variable::
+
+ - name: Place the result of the current item in the variable
+ ansible.builtin.shell: echo "{{ item }}"
+ loop:
+ - one
+ - two
+ register: echo
+ changed_when: echo.stdout != "one"
+
+.. _complex_loops:
+
+Complex loops
+=============
+
+Iterating over nested lists
+---------------------------
+
+You can use Jinja2 expressions to iterate over complex lists. For example, a loop can combine nested lists::
+
+ - name: Give users access to multiple databases
+ community.mysql.mysql_user:
+ name: "{{ item[0] }}"
+ priv: "{{ item[1] }}.*:ALL"
+ append_privs: yes
+ password: "foo"
+ loop: "{{ ['alice', 'bob'] |product(['clientdb', 'employeedb', 'providerdb'])|list }}"
+
+
+.. _do_until_loops:
+
+Retrying a task until a condition is met
+----------------------------------------
+
+.. versionadded:: 1.4
+
+You can use the ``until`` keyword to retry a task until a certain condition is met. Here's an example::
+
+ - name: Retry a task until a certain condition is met
+ ansible.builtin.shell: /usr/bin/foo
+ register: result
+ until: result.stdout.find("all systems go") != -1
+ retries: 5
+ delay: 10
+
+This task runs up to 5 times with a delay of 10 seconds between each attempt. If the result of any attempt has "all systems go" in its stdout, the task succeeds. The default value for "retries" is 3 and "delay" is 5.
+
+To see the results of individual retries, run the play with ``-vv``.
+
+When you run a task with ``until`` and register the result as a variable, the registered variable will include a key called "attempts", which records the number of the retries for the task.
+
+.. note:: You must set the ``until`` parameter if you want a task to retry. If ``until`` is not defined, the value for the ``retries`` parameter is forced to 1.
+
+Looping over inventory
+----------------------
+
+To loop over your inventory, or just a subset of it, you can use a regular ``loop`` with the ``ansible_play_batch`` or ``groups`` variables::
+
+ - name: Show all the hosts in the inventory
+ ansible.builtin.debug:
+ msg: "{{ item }}"
+ loop: "{{ groups['all'] }}"
+
+ - name: Show all the hosts in the current play
+ ansible.builtin.debug:
+ msg: "{{ item }}"
+ loop: "{{ ansible_play_batch }}"
+
+There is also a specific lookup plugin ``inventory_hostnames`` that can be used like this::
+
+ - name: Show all the hosts in the inventory
+ ansible.builtin.debug:
+ msg: "{{ item }}"
+ loop: "{{ query('inventory_hostnames', 'all') }}"
+
+ - name: Show all the hosts matching the pattern, ie all but the group www
+ ansible.builtin.debug:
+ msg: "{{ item }}"
+ loop: "{{ query('inventory_hostnames', 'all:!www') }}"
+
+More information on the patterns can be found in :ref:`intro_patterns`.
+
+.. _query_vs_lookup:
+
+Ensuring list input for ``loop``: using ``query`` rather than ``lookup``
+========================================================================
+
+The ``loop`` keyword requires a list as input, but the ``lookup`` keyword returns a string of comma-separated values by default. Ansible 2.5 introduced a new Jinja2 function named :ref:`query <query>` that always returns a list, offering a simpler interface and more predictable output from lookup plugins when using the ``loop`` keyword.
+
+You can force ``lookup`` to return a list to ``loop`` by using ``wantlist=True``, or you can use ``query`` instead.
+
+These examples do the same thing::
+
+ loop: "{{ query('inventory_hostnames', 'all') }}"
+
+ loop: "{{ lookup('inventory_hostnames', 'all', wantlist=True) }}"
+
+
+.. _loop_control:
+
+Adding controls to loops
+========================
+.. versionadded:: 2.1
+
+The ``loop_control`` keyword lets you manage your loops in useful ways.
+
+Limiting loop output with ``label``
+-----------------------------------
+.. versionadded:: 2.2
+
+When looping over complex data structures, the console output of your task can be enormous. To limit the displayed output, use the ``label`` directive with ``loop_control``::
+
+ - name: Create servers
+ digital_ocean:
+ name: "{{ item.name }}"
+ state: present
+ loop:
+ - name: server1
+ disks: 3gb
+ ram: 15Gb
+ network:
+ nic01: 100Gb
+ nic02: 10Gb
+ ...
+ loop_control:
+ label: "{{ item.name }}"
+
+The output of this task will display just the ``name`` field for each ``item`` instead of the entire contents of the multi-line ``{{ item }}`` variable.
+
+.. note:: This is for making console output more readable, not protecting sensitive data. If there is sensitive data in ``loop``, set ``no_log: yes`` on the task to prevent disclosure.
+
+Pausing within a loop
+---------------------
+.. versionadded:: 2.2
+
+To control the time (in seconds) between the execution of each item in a task loop, use the ``pause`` directive with ``loop_control``::
+
+ # main.yml
+ - name: Create servers, pause 3s before creating next
+ community.digitalocean.digital_ocean:
+ name: "{{ item }}"
+ state: present
+ loop:
+ - server1
+ - server2
+ loop_control:
+ pause: 3
+
+Tracking progress through a loop with ``index_var``
+---------------------------------------------------
+.. versionadded:: 2.5
+
+To keep track of where you are in a loop, use the ``index_var`` directive with ``loop_control``. This directive specifies a variable name to contain the current loop index::
+
+ - name: Count our fruit
+ ansible.builtin.debug:
+ msg: "{{ item }} with index {{ my_idx }}"
+ loop:
+ - apple
+ - banana
+ - pear
+ loop_control:
+ index_var: my_idx
+
+.. note:: `index_var` is 0 indexed.
+
+Defining inner and outer variable names with ``loop_var``
+---------------------------------------------------------
+.. versionadded:: 2.1
+
+You can nest two looping tasks using ``include_tasks``. However, by default Ansible sets the loop variable ``item`` for each loop. This means the inner, nested loop will overwrite the value of ``item`` from the outer loop.
+You can specify the name of the variable for each loop using ``loop_var`` with ``loop_control``::
+
+ # main.yml
+ - include_tasks: inner.yml
+ loop:
+ - 1
+ - 2
+ - 3
+ loop_control:
+ loop_var: outer_item
+
+ # inner.yml
+ - name: Print outer and inner items
+ ansible.builtin.debug:
+ msg: "outer item={{ outer_item }} inner item={{ item }}"
+ loop:
+ - a
+ - b
+ - c
+
+.. note:: If Ansible detects that the current loop is using a variable which has already been defined, it will raise an error to fail the task.
+
+Extended loop variables
+-----------------------
+.. versionadded:: 2.8
+
+As of Ansible 2.8 you can get extended loop information using the ``extended`` option to loop control. This option will expose the following information.
+
+========================== ===========
+Variable Description
+-------------------------- -----------
+``ansible_loop.allitems`` The list of all items in the loop
+``ansible_loop.index`` The current iteration of the loop. (1 indexed)
+``ansible_loop.index0`` The current iteration of the loop. (0 indexed)
+``ansible_loop.revindex`` The number of iterations from the end of the loop (1 indexed)
+``ansible_loop.revindex0`` The number of iterations from the end of the loop (0 indexed)
+``ansible_loop.first`` ``True`` if first iteration
+``ansible_loop.last`` ``True`` if last iteration
+``ansible_loop.length`` The number of items in the loop
+``ansible_loop.previtem`` The item from the previous iteration of the loop. Undefined during the first iteration.
+``ansible_loop.nextitem`` The item from the following iteration of the loop. Undefined during the last iteration.
+========================== ===========
+
+::
+
+ loop_control:
+ extended: yes
+
+Accessing the name of your loop_var
+-----------------------------------
+.. versionadded:: 2.8
+
+As of Ansible 2.8 you can get the name of the value provided to ``loop_control.loop_var`` using the ``ansible_loop_var`` variable
+
+For role authors, writing roles that allow loops, instead of dictating the required ``loop_var`` value, you can gather the value via::
+
+ "{{ lookup('vars', ansible_loop_var) }}"
+
+.. _migrating_to_loop:
+
+Migrating from with_X to loop
+=============================
+
+.. include:: shared_snippets/with2loop.txt
+
+.. seealso::
+
+ :ref:`about_playbooks`
+ An introduction to playbooks
+ :ref:`playbooks_reuse_roles`
+ Playbook organization by roles
+ :ref:`playbooks_best_practices`
+ Tips and tricks for playbooks
+ :ref:`playbooks_conditionals`
+ Conditional statements in playbooks
+ :ref:`playbooks_variables`
+ All about variables
+ `User Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/playbooks_module_defaults.rst b/docs/docsite/rst/user_guide/playbooks_module_defaults.rst
new file mode 100644
index 00000000..f1260e22
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_module_defaults.rst
@@ -0,0 +1,143 @@
+.. _module_defaults:
+
+Module defaults
+===============
+
+If you frequently call the same module with the same arguments, it can be useful to define default arguments for that particular module using the ``module_defaults`` attribute.
+
+Here is a basic example::
+
+ - hosts: localhost
+ module_defaults:
+ ansible.builtin.file:
+ owner: root
+ group: root
+ mode: 0755
+ tasks:
+ - name: Create file1
+ ansible.builtin.file:
+ state: touch
+ path: /tmp/file1
+
+ - name: Create file2
+ ansible.builtin.file:
+ state: touch
+ path: /tmp/file2
+
+ - name: Create file3
+ ansible.builtin.file:
+ state: touch
+ path: /tmp/file3
+
+The ``module_defaults`` attribute can be used at the play, block, and task level. Any module arguments explicitly specified in a task will override any established default for that module argument::
+
+ - block:
+ - name: Print a message
+ ansible.builtin.debug:
+ msg: "Different message"
+ module_defaults:
+ ansible.builtin.debug:
+ msg: "Default message"
+
+You can remove any previously established defaults for a module by specifying an empty dict::
+
+ - name: Create file1
+ ansible.builtin.file:
+ state: touch
+ path: /tmp/file1
+ module_defaults:
+ file: {}
+
+.. note::
+ Any module defaults set at the play level (and block/task level when using ``include_role`` or ``import_role``) will apply to any roles used, which may cause unexpected behavior in the role.
+
+Here are some more realistic use cases for this feature.
+
+Interacting with an API that requires auth::
+
+ - hosts: localhost
+ module_defaults:
+ ansible.builtin.uri:
+ force_basic_auth: true
+ user: some_user
+ password: some_password
+ tasks:
+ - name: Interact with a web service
+ ansible.builtin.uri:
+ url: http://some.api.host/v1/whatever1
+
+ - name: Interact with a web service
+ ansible.builtin.uri:
+ url: http://some.api.host/v1/whatever2
+
+ - name: Interact with a web service
+ ansible.builtin.uri:
+ url: http://some.api.host/v1/whatever3
+
+Setting a default AWS region for specific EC2-related modules::
+
+ - hosts: localhost
+ vars:
+ my_region: us-west-2
+ module_defaults:
+ amazon.aws.ec2:
+ region: '{{ my_region }}'
+ community.aws.ec2_instance_info:
+ region: '{{ my_region }}'
+ amazon.aws.ec2_vpc_net_info:
+ region: '{{ my_region }}'
+
+.. _module_defaults_groups:
+
+Module defaults groups
+----------------------
+
+.. versionadded:: 2.7
+
+Ansible 2.7 adds a preview-status feature to group together modules that share common sets of parameters. This makes it easier to author playbooks making heavy use of API-based modules such as cloud modules.
+
++---------+---------------------------+-----------------+
+| Group | Purpose | Ansible Version |
++=========+===========================+=================+
+| aws | Amazon Web Services | 2.7 |
++---------+---------------------------+-----------------+
+| azure | Azure | 2.7 |
++---------+---------------------------+-----------------+
+| gcp | Google Cloud Platform | 2.7 |
++---------+---------------------------+-----------------+
+| k8s | Kubernetes | 2.8 |
++---------+---------------------------+-----------------+
+| os | OpenStack | 2.8 |
++---------+---------------------------+-----------------+
+| acme | ACME | 2.10 |
++---------+---------------------------+-----------------+
+| docker* | Docker | 2.10 |
++---------+---------------------------+-----------------+
+| ovirt | oVirt | 2.10 |
++---------+---------------------------+-----------------+
+| vmware | VMware | 2.10 |
++---------+---------------------------+-----------------+
+
+* The `docker_stack <docker_stack_module>`_ module is not included in the ``docker`` defaults group.
+
+Use the groups with ``module_defaults`` by prefixing the group name with ``group/`` - for example ``group/aws``.
+
+In a playbook, you can set module defaults for whole groups of modules, such as setting a common AWS region.
+
+.. code-block:: YAML
+
+ # example_play.yml
+ - hosts: localhost
+ module_defaults:
+ group/aws:
+ region: us-west-2
+ tasks:
+ - name: Get info
+ aws_s3_bucket_info:
+
+ # now the region is shared between both info modules
+
+ - name: Get info
+ ec2_ami_info:
+ filters:
+ name: 'RHEL*7.5*'
diff --git a/docs/docsite/rst/user_guide/playbooks_prompts.rst b/docs/docsite/rst/user_guide/playbooks_prompts.rst
new file mode 100644
index 00000000..856f7037
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_prompts.rst
@@ -0,0 +1,116 @@
+.. _playbooks_prompts:
+
+**************************
+Interactive input: prompts
+**************************
+
+If you want your playbook to prompt the user for certain input, add a 'vars_prompt' section. Prompting the user for variables lets you avoid recording sensitive data like passwords. In addition to security, prompts support flexibility. For example, if you use one playbook across multiple software releases, you could prompt for the particular release version.
+
+.. contents::
+ :local:
+
+Here is a most basic example::
+
+ ---
+ - hosts: all
+ vars_prompt:
+
+ - name: username
+ prompt: What is your username?
+ private: no
+
+ - name: password
+ prompt: What is your password?
+
+ tasks:
+
+ - name: Print a message
+ ansible.builtin.debug:
+ msg: 'Logging in as {{ username }}'
+
+The user input is hidden by default but it can be made visible by setting ``private: no``.
+
+.. note::
+ Prompts for individual ``vars_prompt`` variables will be skipped for any variable that is already defined through the command line ``--extra-vars`` option, or when running from a non-interactive session (such as cron or Ansible Tower). See :ref:`passing_variables_on_the_command_line`.
+
+If you have a variable that changes infrequently, you can provide a default value that can be overridden::
+
+ vars_prompt:
+
+ - name: release_version
+ prompt: Product release version
+ default: "1.0"
+
+Encrypting values supplied by ``vars_prompt``
+---------------------------------------------
+
+You can encrypt the entered value so you can use it, for instance, with the user module to define a password::
+
+ vars_prompt:
+
+ - name: my_password2
+ prompt: Enter password2
+ private: yes
+ encrypt: sha512_crypt
+ confirm: yes
+ salt_size: 7
+
+If you have `Passlib <https://passlib.readthedocs.io/en/stable/>`_ installed, you can use any crypt scheme the library supports:
+
+- *des_crypt* - DES Crypt
+- *bsdi_crypt* - BSDi Crypt
+- *bigcrypt* - BigCrypt
+- *crypt16* - Crypt16
+- *md5_crypt* - MD5 Crypt
+- *bcrypt* - BCrypt
+- *sha1_crypt* - SHA-1 Crypt
+- *sun_md5_crypt* - Sun MD5 Crypt
+- *sha256_crypt* - SHA-256 Crypt
+- *sha512_crypt* - SHA-512 Crypt
+- *apr_md5_crypt* - Apache's MD5-Crypt variant
+- *phpass* - PHPass' Portable Hash
+- *pbkdf2_digest* - Generic PBKDF2 Hashes
+- *cta_pbkdf2_sha1* - Cryptacular's PBKDF2 hash
+- *dlitz_pbkdf2_sha1* - Dwayne Litzenberger's PBKDF2 hash
+- *scram* - SCRAM Hash
+- *bsd_nthash* - FreeBSD's MCF-compatible nthash encoding
+
+The only parameters accepted are 'salt' or 'salt_size'. You can use your own salt by defining
+'salt', or have one generated automatically using 'salt_size'. By default Ansible generates a salt
+of size 8.
+
+.. versionadded:: 2.7
+
+If you do not have Passlib installed, Ansible uses the `crypt <https://docs.python.org/2/library/crypt.html>`_ library as a fallback. Ansible supports at most four crypt schemes, depending on your platform at most the following crypt schemes are supported:
+
+- *bcrypt* - BCrypt
+- *md5_crypt* - MD5 Crypt
+- *sha256_crypt* - SHA-256 Crypt
+- *sha512_crypt* - SHA-512 Crypt
+
+.. versionadded:: 2.8
+.. _unsafe_prompts:
+
+Allowing special characters in ``vars_prompt`` values
+-----------------------------------------------------
+
+Some special characters, such as ``{`` and ``%`` can create templating errors. If you need to accept special characters, use the ``unsafe`` option::
+
+ vars_prompt:
+ - name: my_password_with_weird_chars
+ prompt: Enter password
+ unsafe: yes
+ private: yes
+
+.. seealso::
+
+ :ref:`playbooks_intro`
+ An introduction to playbooks
+ :ref:`playbooks_conditionals`
+ Conditional statements in playbooks
+ :ref:`playbooks_variables`
+ All about variables
+ `User Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/playbooks_python_version.rst b/docs/docsite/rst/user_guide/playbooks_python_version.rst
new file mode 100644
index 00000000..60821b37
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_python_version.rst
@@ -0,0 +1,64 @@
+.. _pb-py-compat:
+
+********************
+Python3 in templates
+********************
+
+Ansible uses Jinja2 to leverage Python data types and standard functions in templates and variables.
+You can use these data types and standard functions to perform a rich set of operations on your data. However,
+if you use templates, you must be aware of differences between Python versions.
+
+These topics help you design templates that work on both Python2 and Python3. They might also help if you are upgrading from Python2 to Python3. Upgrading within Python2 or Python3 does not usually introduce changes that affect Jinja2 templates.
+
+.. _pb-py-compat-dict-views:
+
+Dictionary views
+================
+
+In Python2, the :meth:`dict.keys`, :meth:`dict.values`, and :meth:`dict.items`
+methods return a list. Jinja2 returns that to Ansible via a string
+representation that Ansible can turn back into a list.
+
+In Python3, those methods return a :ref:`dictionary view <python3:dict-views>` object. The
+string representation that Jinja2 returns for dictionary views cannot be parsed back
+into a list by Ansible. It is, however, easy to make this portable by
+using the :func:`list <jinja2:list>` filter whenever using :meth:`dict.keys`,
+:meth:`dict.values`, or :meth:`dict.items`::
+
+ vars:
+ hosts:
+ testhost1: 127.0.0.2
+ testhost2: 127.0.0.3
+ tasks:
+ - debug:
+ msg: '{{ item }}'
+ # Only works with Python 2
+ #loop: "{{ hosts.keys() }}"
+ # Works with both Python 2 and Python 3
+ loop: "{{ hosts.keys() | list }}"
+
+.. _pb-py-compat-iteritems:
+
+dict.iteritems()
+================
+
+Python2 dictionaries have :meth:`~dict.iterkeys`, :meth:`~dict.itervalues`, and :meth:`~dict.iteritems` methods.
+
+Python3 dictionaries do not have these methods. Use :meth:`dict.keys`, :meth:`dict.values`, and :meth:`dict.items` to make your playbooks and templates compatible with both Python2 and Python3::
+
+ vars:
+ hosts:
+ testhost1: 127.0.0.2
+ testhost2: 127.0.0.3
+ tasks:
+ - debug:
+ msg: '{{ item }}'
+ # Only works with Python 2
+ #loop: "{{ hosts.iteritems() }}"
+ # Works with both Python 2 and Python 3
+ loop: "{{ hosts.items() | list }}"
+
+.. seealso::
+ * The :ref:`pb-py-compat-dict-views` entry for information on
+ why the :func:`list filter <jinja2:list>` is necessary
+ here.
diff --git a/docs/docsite/rst/user_guide/playbooks_reuse.rst b/docs/docsite/rst/user_guide/playbooks_reuse.rst
new file mode 100644
index 00000000..3e80f5c2
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_reuse.rst
@@ -0,0 +1,201 @@
+.. _playbooks_reuse:
+
+**************************
+Re-using Ansible artifacts
+**************************
+
+You can write a simple playbook in one very large file, and most users learn the one-file approach first. However, breaking tasks up into different files is an excellent way to organize complex sets of tasks and reuse them. Smaller, more distributed artifacts let you re-use the same variables, tasks, and plays in multiple playbooks to address different use cases. You can use distributed artifacts across multiple parent playbooks or even multiple times within one playbook. For example, you might want to update your customer database as part of several different playbooks. If you put all the tasks related to updating your database in a tasks file, you can re-use them in many playbooks while only maintaining them in one place.
+
+.. contents::
+ :local:
+
+Creating re-usable files and roles
+==================================
+
+Ansible offers four distributed, re-usable artifacts: variables files, task files, playbooks, and roles.
+
+ - A variables file contains only variables.
+ - A task file contains only tasks.
+ - A playbook contains at least one play, and may contain variables, tasks, and other content. You can re-use tightly focused playbooks, but you can only re-use them statically, not dynamically.
+ - A role contains a set of related tasks, variables, defaults, handlers, and even modules or other plugins in a defined file-tree. Unlike variables files, task files, or playbooks, roles can be easily uploaded and shared via Ansible Galaxy. See :ref:`playbooks_reuse_roles` for details about creating and using roles.
+
+.. versionadded:: 2.4
+
+Re-using playbooks
+==================
+
+You can incorporate multiple playbooks into a master playbook. However, you can only use imports to re-use playbooks. For example:
+
+.. code-block:: yaml
+
+ - import_playbook: webservers.yml
+ - import_playbook: databases.yml
+
+Importing incorporates playbooks in other playbooks statically. Ansible runs the plays and tasks in each imported playbook in the order they are listed, just as if they had been defined directly in the master playbook.
+
+Re-using files and roles
+========================
+
+Ansible offers two ways to re-use files and roles in a playbook: dynamic and static.
+
+ - For dynamic re-use, add an ``include_*`` task in the tasks section of a play:
+
+ - :ref:`include_role <include_role_module>`
+ - :ref:`include_tasks <include_tasks_module>`
+ - :ref:`include_vars <include_vars_module>`
+
+ - For static re-use, add an ``import_*`` task in the tasks section of a play:
+
+ - :ref:`import_role <import_role_module>`
+ - :ref:`import_tasks <import_tasks_module>`
+
+Task include and import statements can be used at arbitrary depth.
+
+You can still use the bare :ref:`roles <roles_keyword>` keyword at the play level to incorporate a role in a playbook statically. However, the bare :ref:`include <include_module>` keyword, once used for both task files and playbook-level includes, is now deprecated.
+
+Includes: dynamic re-use
+------------------------
+
+Including roles, tasks, or variables adds them to a playbook dynamically. Ansible processes included files and roles as they come up in a playbook, so included tasks can be affected by the results of earlier tasks within the top-level playbook. Included roles and tasks are similar to handlers - they may or may not run, depending on the results of other tasks in the top-level playbook.
+
+The primary advantage of using ``include_*`` statements is looping. When a loop is used with an include, the included tasks or role will be executed once for each item in the loop.
+
+You can pass variables into includes. See :ref:`ansible_variable_precedence` for more details on variable inheritance and precedence.
+
+Imports: static re-use
+----------------------
+
+Importing roles, tasks, or playbooks adds them to a playbook statically. Ansible pre-processes imported files and roles before it runs any tasks in a playbook, so imported content is never affected by other tasks within the top-level playbook.
+
+You can pass variables to imports. You must pass variables if you want to run an imported file more than once in a playbook. For example:
+
+.. code-block:: yaml
+
+ tasks:
+ - import_tasks: wordpress.yml
+ vars:
+ wp_user: timmy
+
+ - import_tasks: wordpress.yml
+ vars:
+ wp_user: alice
+
+ - import_tasks: wordpress.yml
+ vars:
+ wp_user: bob
+
+See :ref:`ansible_variable_precedence` for more details on variable inheritance and precedence.
+
+.. _dynamic_vs_static:
+
+Comparing includes and imports: dynamic and static re-use
+------------------------------------------------------------
+
+Each approach to re-using distributed Ansible artifacts has advantages and limitations. You may choose dynamic re-use for some playbooks and static re-use for others. Although you can use both dynamic and static re-use in a single playbook, it is best to select one approach per playbook. Mixing static and dynamic re-use can introduce difficult-to-diagnose bugs into your playbooks. This table summarizes the main differences so you can choose the best approach for each playbook you create.
+
+.. table::
+ :class: documentation-table
+
+ ========================= ======================================== ========================================
+ .. Include_* Import_*
+ ========================= ======================================== ========================================
+ Type of re-use Dynamic Static
+
+ When processed At runtime, when encountered Pre-processed during playbook parsing
+
+ Task or play All includes are tasks ``import_playbook`` cannot be a task
+
+ Task options Apply only to include task itself Apply to all child tasks in import
+
+ Calling from loops Executed once for each loop item Cannot be used in a loop
+
+ Using ``--list-tags`` Tags within includes not listed All tags appear with ``--list-tags``
+
+ Using ``--list-tasks`` Tasks within includes not listed All tasks appear with ``--list-tasks``
+
+ Notifying handlers Cannot trigger handlers within includes Can trigger individual imported handlers
+
+ Using ``--start-at-task`` Cannot start at tasks within includes Can start at imported tasks
+
+ Using inventory variables Can ``include_*: {{ inventory_var }}`` Cannot ``import_*: {{ inventory_var }}``
+
+ With playbooks No ``include_playbook`` Can import full playbooks
+
+ With variables files Can include variables files Use ``vars_files:`` to import variables
+
+ ========================= ======================================== ========================================
+
+Re-using tasks as handlers
+==========================
+
+You can also use includes and imports in the :ref:`handlers` section of a playbook. For instance, if you want to define how to restart Apache, you only have to do that once for all of your playbooks. You might make a ``restarts.yml`` file that looks like:
+
+.. code-block:: yaml
+
+ # restarts.yml
+ - name: Restart apache
+ ansible.builtin.service:
+ name: apache
+ state: restarted
+
+ - name: Restart mysql
+ ansible.builtin.service:
+ name: mysql
+ state: restarted
+
+You can trigger handlers from either an import or an include, but the procedure is different for each method of re-use. If you include the file, you must notify the include itself, which triggers all the tasks in ``restarts.yml``. If you import the file, you must notify the individual task(s) within ``restarts.yml``. You can mix direct tasks and handlers with included or imported tasks and handlers.
+
+Triggering included (dynamic) handlers
+--------------------------------------
+
+Includes are executed at run-time, so the name of the include exists during play execution, but the included tasks do not exist until the include itself is triggered. To use the ``Restart apache`` task with dynamic re-use, refer to the name of the include itself. This approach triggers all tasks in the included file as handlers. For example, with the task file shown above:
+
+.. code-block:: yaml
+
+ - trigger an included (dynamic) handler
+ hosts: localhost
+ handlers:
+ - name: Restart services
+ include_tasks: restarts.yml
+ tasks:
+ - command: "true"
+ notify: Restart services
+
+Triggering imported (static) handlers
+-------------------------------------
+
+Imports are processed before the play begins, so the name of the import no longer exists during play execution, but the names of the individual imported tasks do exist. To use the ``Restart apache`` task with static re-use, refer to the name of each task or tasks within the imported file. For example, with the task file shown above:
+
+.. code-block:: yaml
+
+ - trigger an imported (static) handler
+ hosts: localhost
+ handlers:
+ - name: Restart services
+ import_tasks: restarts.yml
+ tasks:
+ - command: "true"
+ notify: Restart apache
+ - command: "true"
+ notify: Restart mysql
+
+.. seealso::
+
+ :ref:`utilities_modules`
+ Documentation of the ``include*`` and ``import*`` modules discussed here.
+ :ref:`working_with_playbooks`
+ Review the basic Playbook language features
+ :ref:`playbooks_variables`
+ All about variables in playbooks
+ :ref:`playbooks_conditionals`
+ Conditionals in playbooks
+ :ref:`playbooks_loops`
+ Loops in playbooks
+ :ref:`playbooks_best_practices`
+ Tips and tricks for playbooks
+ :ref:`ansible_galaxy`
+ How to share roles on galaxy, role management
+ `GitHub Ansible examples <https://github.com/ansible/ansible-examples>`_
+ Complete playbook files from the GitHub project source
+ `Mailing List <https://groups.google.com/group/ansible-project>`_
+ Questions? Help? Ideas? Stop by the list on Google Groups
diff --git a/docs/docsite/rst/user_guide/playbooks_reuse_includes.rst b/docs/docsite/rst/user_guide/playbooks_reuse_includes.rst
new file mode 100644
index 00000000..ecce954a
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_reuse_includes.rst
@@ -0,0 +1,32 @@
+:orphan:
+
+.. _playbooks_reuse_includes:
+
+Including and importing
+=======================
+
+The content on this page has been moved to :ref:`playbooks_reuse`.
+
+
+.. seealso::
+
+ :ref:`yaml_syntax`
+ Learn about YAML syntax
+ :ref:`working_with_playbooks`
+ Review the basic Playbook language features
+ :ref:`playbooks_best_practices`
+ Tips and tricks for playbooks
+ :ref:`playbooks_variables`
+ All about variables in playbooks
+ :ref:`playbooks_conditionals`
+ Conditionals in playbooks
+ :ref:`playbooks_loops`
+ Loops in playbooks
+ :ref:`list_of_collections`
+ Browse existing collections, modules, and plugins
+ :ref:`developing_modules`
+ Learn how to extend Ansible by writing your own modules
+ `GitHub Ansible examples <https://github.com/ansible/ansible-examples>`_
+ Complete playbook files from the GitHub project source
+ `Mailing List <https://groups.google.com/group/ansible-project>`_
+ Questions? Help? Ideas? Stop by the list on Google Groups
diff --git a/docs/docsite/rst/user_guide/playbooks_reuse_roles.rst b/docs/docsite/rst/user_guide/playbooks_reuse_roles.rst
new file mode 100644
index 00000000..56093d3d
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_reuse_roles.rst
@@ -0,0 +1,490 @@
+.. _playbooks_reuse_roles:
+
+*****
+Roles
+*****
+
+Roles let you automatically load related vars_files, tasks, handlers, and other Ansible artifacts based on a known file structure. Once you group your content in roles, you can easily reuse them and share them with other users.
+
+.. contents::
+ :local:
+
+Role directory structure
+========================
+
+An Ansible role has a defined directory structure with seven main standard directories. You must include at least one of these directories in each role. You can omit any directories the role does not use. For example:
+
+.. code-block:: text
+
+ # playbooks
+ site.yml
+ webservers.yml
+ fooservers.yml
+ roles/
+ common/
+ tasks/
+ handlers/
+ library/
+ files/
+ templates/
+ vars/
+ defaults/
+ meta/
+ webservers/
+ tasks/
+ defaults/
+ meta/
+
+By default Ansible will look in each directory within a role for a ``main.yml`` file for relevant content (also ``main.yaml`` and ``main``):
+
+- ``tasks/main.yml`` - the main list of tasks that the role executes.
+- ``handlers/main.yml`` - handlers, which may be used within or outside this role.
+- ``library/my_module.py`` - modules, which may be used within this role (see :ref:`embedding_modules_and_plugins_in_roles` for more information).
+- ``defaults/main.yml`` - default variables for the role (see :ref:`playbooks_variables` for more information). These variables have the lowest priority of any variables available, and can be easily overridden by any other variable, including inventory variables.
+- ``vars/main.yml`` - other variables for the role (see :ref:`playbooks_variables` for more information).
+- ``files/main.yml`` - files that the role deploys.
+- ``templates/main.yml`` - templates that the role deploys.
+- ``meta/main.yml`` - metadata for the role, including role dependencies.
+
+You can add other YAML files in some directories. For example, you can place platform-specific tasks in separate files and refer to them in the ``tasks/main.yml`` file:
+
+.. code-block:: yaml
+
+ # roles/example/tasks/main.yml
+ - name: Install the correct web server for RHEL
+ import_tasks: redhat.yml
+ when: ansible_facts['os_family']|lower == 'redhat'
+
+ - name: Install the correct web server for Debian
+ import_tasks: debian.yml
+ when: ansible_facts['os_family']|lower == 'debian'
+
+ # roles/example/tasks/redhat.yml
+ - name: Install web server
+ ansible.builtin.yum:
+ name: "httpd"
+ state: present
+
+ # roles/example/tasks/debian.yml
+ - name: Install web server
+ ansible.builtin.apt:
+ name: "apache2"
+ state: present
+
+Roles may also include modules and other plugin types in a directory called ``library``. For more information, please refer to :ref:`embedding_modules_and_plugins_in_roles` below.
+
+.. _role_search_path:
+
+Storing and finding roles
+=========================
+
+By default, Ansible looks for roles in two locations:
+
+- in a directory called ``roles/``, relative to the playbook file
+- in ``/etc/ansible/roles``
+
+If you store your roles in a different location, set the :ref:`roles_path <DEFAULT_ROLES_PATH>` configuration option so Ansible can find your roles. Checking shared roles into a single location makes them easier to use in multiple playbooks. See :ref:`intro_configuration` for details about managing settings in ansible.cfg.
+
+Alternatively, you can call a role with a fully qualified path:
+
+.. code-block:: yaml
+
+ ---
+ - hosts: webservers
+ roles:
+ - role: '/path/to/my/roles/common'
+
+Using roles
+===========
+
+You can use roles in three ways:
+
+- at the play level with the ``roles`` option: This is the classic way of using roles in a play.
+- at the tasks level with ``include_role``: You can reuse roles dynamically anywhere in the ``tasks`` section of a play using ``include_role``.
+- at the tasks level with ``import_role``: You can reuse roles statically anywhere in the ``tasks`` section of a play using ``import_role``.
+
+.. _roles_keyword:
+
+Using roles at the play level
+-----------------------------
+
+The classic (original) way to use roles is with the ``roles`` option for a given play:
+
+.. code-block:: yaml
+
+ ---
+ - hosts: webservers
+ roles:
+ - common
+ - webservers
+
+When you use the ``roles`` option at the play level, for each role 'x':
+
+- If roles/x/tasks/main.yml exists, Ansible adds the tasks in that file to the play.
+- If roles/x/handlers/main.yml exists, Ansible adds the handlers in that file to the play.
+- If roles/x/vars/main.yml exists, Ansible adds the variables in that file to the play.
+- If roles/x/defaults/main.yml exists, Ansible adds the variables in that file to the play.
+- If roles/x/meta/main.yml exists, Ansible adds any role dependencies in that file to the list of roles.
+- Any copy, script, template or include tasks (in the role) can reference files in roles/x/{files,templates,tasks}/ (dir depends on task) without having to path them relatively or absolutely.
+
+When you use the ``roles`` option at the play level, Ansible treats the roles as static imports and processes them during playbook parsing. Ansible executes your playbook in this order:
+
+- Any ``pre_tasks`` defined in the play.
+- Any handlers triggered by pre_tasks.
+- Each role listed in ``roles:``, in the order listed. Any role dependencies defined in the role's ``meta/main.yml`` run first, subject to tag filtering and conditionals. See :ref:`role_dependencies` for more details.
+- Any ``tasks`` defined in the play.
+- Any handlers triggered by the roles or tasks.
+- Any ``post_tasks`` defined in the play.
+- Any handlers triggered by post_tasks.
+
+.. note::
+ If using tags with tasks in a role, be sure to also tag your pre_tasks, post_tasks, and role dependencies and pass those along as well, especially if the pre/post tasks and role dependencies are used for monitoring outage window control or load balancing. See :ref:`tags` for details on adding and using tags.
+
+You can pass other keywords to the ``roles`` option:
+
+.. code-block:: yaml
+
+ ---
+ - hosts: webservers
+ roles:
+ - common
+ - role: foo_app_instance
+ vars:
+ dir: '/opt/a'
+ app_port: 5000
+ tags: typeA
+ - role: foo_app_instance
+ vars:
+ dir: '/opt/b'
+ app_port: 5001
+ tags: typeB
+
+When you add a tag to the ``role`` option, Ansible applies the tag to ALL tasks within the role.
+
+When using ``vars:`` within the ``roles:`` section of a playbook, the variables are added to the play variables, making them available to all tasks within the play before and after the role. This behavior can be changed by :ref:`DEFAULT_PRIVATE_ROLE_VARS`.
+
+Including roles: dynamic reuse
+------------------------------
+
+You can reuse roles dynamically anywhere in the ``tasks`` section of a play using ``include_role``. While roles added in a ``roles`` section run before any other tasks in a playbook, included roles run in the order they are defined. If there are other tasks before an ``include_role`` task, the other tasks will run first.
+
+To include a role:
+
+.. code-block:: yaml
+
+ ---
+ - hosts: webservers
+ tasks:
+ - name: Print a message
+ ansible.builtin.debug:
+ msg: "this task runs before the example role"
+
+ - name: Include the example role
+ include_role:
+ name: example
+
+ - name: Print a message
+ ansible.builtin.debug:
+ msg: "this task runs after the example role"
+
+You can pass other keywords, including variables and tags, when including roles:
+
+.. code-block:: yaml
+
+ ---
+ - hosts: webservers
+ tasks:
+ - name: Include the foo_app_instance role
+ include_role:
+ name: foo_app_instance
+ vars:
+ dir: '/opt/a'
+ app_port: 5000
+ tags: typeA
+ ...
+
+When you add a :ref:`tag <tags>` to an ``include_role`` task, Ansible applies the tag `only` to the include itself. This means you can pass ``--tags`` to run only selected tasks from the role, if those tasks themselves have the same tag as the include statement. See :ref:`selective_reuse` for details.
+
+You can conditionally include a role:
+
+.. code-block:: yaml
+
+ ---
+ - hosts: webservers
+ tasks:
+ - name: Include the some_role role
+ include_role:
+ name: some_role
+ when: "ansible_facts['os_family'] == 'RedHat'"
+
+Importing roles: static reuse
+-----------------------------
+
+You can reuse roles statically anywhere in the ``tasks`` section of a play using ``import_role``. The behavior is the same as using the ``roles`` keyword. For example:
+
+.. code-block:: yaml
+
+ ---
+ - hosts: webservers
+ tasks:
+ - name: Print a message
+ ansible.builtin.debug:
+ msg: "before we run our role"
+
+ - name: Import the example role
+ import_role:
+ name: example
+
+ - name: Print a message
+ ansible.builtin.debug:
+ msg: "after we ran our role"
+
+You can pass other keywords, including variables and tags, when importing roles:
+
+.. code-block:: yaml
+
+ ---
+ - hosts: webservers
+ tasks:
+ - name: Import the foo_app_instance role
+ import_role:
+ name: foo_app_instance
+ vars:
+ dir: '/opt/a'
+ app_port: 5000
+ ...
+
+When you add a tag to an ``import_role`` statement, Ansible applies the tag to `all` tasks within the role. See :ref:`tag_inheritance` for details.
+
+.. _run_role_twice:
+
+Running a role multiple times in one playbook
+=============================================
+
+Ansible only executes each role once, even if you define it multiple times, unless the parameters defined on the role are different for each definition. For example, Ansible only runs the role ``foo`` once in a play like this:
+
+.. code-block:: yaml
+
+ ---
+ - hosts: webservers
+ roles:
+ - foo
+ - bar
+ - foo
+
+You have two options to force Ansible to run a role more than once.
+
+Passing different parameters
+----------------------------
+
+You can pass different parameters in each role definition as:
+
+.. code-block:: yaml
+
+ ---
+ - hosts: webservers
+ roles:
+ - { role: foo, vars: { message: "first" } }
+ - { role: foo, vars: { message: "second" } }
+
+or
+
+.. code-block:: yaml
+
+ ---
+ - hosts: webservers
+ roles:
+ - role: foo
+ vars:
+ message: "first"
+ - role: foo
+ vars:
+ message: "second"
+
+In this example, because each role definition has different parameters, Ansible runs ``foo`` twice.
+
+Using ``allow_duplicates: true``
+--------------------------------
+
+Add ``allow_duplicates: true`` to the ``meta/main.yml`` file for the role:
+
+.. code-block:: yaml
+
+ # playbook.yml
+ ---
+ - hosts: webservers
+ roles:
+ - foo
+ - foo
+
+ # roles/foo/meta/main.yml
+ ---
+ allow_duplicates: true
+
+In this example, Ansible runs ``foo`` twice because we have explicitly enabled it to do so.
+
+.. _role_dependencies:
+
+Using role dependencies
+=======================
+
+Role dependencies let you automatically pull in other roles when using a role. Ansible does not execute role dependencies when you include or import a role. You must use the ``roles`` keyword if you want Ansible to execute role dependencies.
+
+Role dependencies are stored in the ``meta/main.yml`` file within the role directory. This file should contain a list of roles and parameters to insert before the specified role. For example:
+
+.. code-block:: yaml
+
+ # roles/myapp/meta/main.yml
+ ---
+ dependencies:
+ - role: common
+ vars:
+ some_parameter: 3
+ - role: apache
+ vars:
+ apache_port: 80
+ - role: postgres
+ vars:
+ dbname: blarg
+ other_parameter: 12
+
+Ansible always executes role dependencies before the role that includes them. Ansible executes recursive role dependencies as well. If one role depends on a second role, and the second role depends on a third role, Ansible executes the third role, then the second role, then the first role.
+
+Running role dependencies multiple times in one playbook
+--------------------------------------------------------
+
+Ansible treats duplicate role dependencies like duplicate roles listed under ``roles:``: Ansible only executes role dependencies once, even if defined multiple times, unless the parameters, tags, or when clause defined on the role are different for each definition. If two roles in a playbook both list a third role as a dependency, Ansible only runs that role dependency once, unless you pass different parameters, tags, when clause, or use ``allow_duplicates: true`` in the dependent (third) role. See :ref:`Galaxy role dependencies <galaxy_dependencies>` for more details.
+
+For example, a role named ``car`` depends on a role named ``wheel`` as follows:
+
+.. code-block:: yaml
+
+ ---
+ dependencies:
+ - role: wheel
+ vars:
+ n: 1
+ - role: wheel
+ vars:
+ n: 2
+ - role: wheel
+ vars:
+ n: 3
+ - role: wheel
+ vars:
+ n: 4
+
+And the ``wheel`` role depends on two roles: ``tire`` and ``brake``. The ``meta/main.yml`` for wheel would then contain the following:
+
+.. code-block:: yaml
+
+ ---
+ dependencies:
+ - role: tire
+ - role: brake
+
+And the ``meta/main.yml`` for ``tire`` and ``brake`` would contain the following:
+
+.. code-block:: yaml
+
+ ---
+ allow_duplicates: true
+
+The resulting order of execution would be as follows:
+
+.. code-block:: text
+
+ tire(n=1)
+ brake(n=1)
+ wheel(n=1)
+ tire(n=2)
+ brake(n=2)
+ wheel(n=2)
+ ...
+ car
+
+To use ``allow_duplicates: true`` with role dependencies, you must specify it for the dependent role, not for the parent role. In the example above, ``allow_duplicates: true`` appears in the ``meta/main.yml`` of the ``tire`` and ``brake`` roles. The ``wheel`` role does not require ``allow_duplicates: true``, because each instance defined by ``car`` uses different parameter values.
+
+.. note::
+ See :ref:`playbooks_variables` for details on how Ansible chooses among variable values defined in different places (variable inheritance and scope).
+
+.. _embedding_modules_and_plugins_in_roles:
+
+Embedding modules and plugins in roles
+======================================
+
+If you write a custom module (see :ref:`developing_modules`) or a plugin (see :ref:`developing_plugins`), you might wish to distribute it as part of a role. For example, if you write a module that helps configure your company's internal software, and you want other people in your organization to use this module, but you do not want to tell everyone how to configure their Ansible library path, you can include the module in your internal_config role.
+
+To add a module or a plugin to a role:
+Alongside the 'tasks' and 'handlers' structure of a role, add a directory named 'library' and then include the module directly inside the 'library' directory.
+
+Assuming you had this:
+
+.. code-block:: text
+
+ roles/
+ my_custom_modules/
+ library/
+ module1
+ module2
+
+The module will be usable in the role itself, as well as any roles that are called *after* this role, as follows:
+
+.. code-block:: yaml
+
+ ---
+ - hosts: webservers
+ roles:
+ - my_custom_modules
+ - some_other_role_using_my_custom_modules
+ - yet_another_role_using_my_custom_modules
+
+If necessary, you can also embed a module in a role to modify a module in Ansible's core distribution. For example, you can use the development version of a particular module before it is released in production releases by copying the module and embedding the copy in a role. Use this approach with caution, as API signatures may change in core components, and this workaround is not guaranteed to work.
+
+The same mechanism can be used to embed and distribute plugins in a role, using the same schema. For example, for a filter plugin:
+
+.. code-block:: text
+
+ roles/
+ my_custom_filter/
+ filter_plugins
+ filter1
+ filter2
+
+These filters can then be used in a Jinja template in any role called after 'my_custom_filter'.
+
+Sharing roles: Ansible Galaxy
+=============================
+
+`Ansible Galaxy <https://galaxy.ansible.com>`_ is a free site for finding, downloading, rating, and reviewing all kinds of community-developed Ansible roles and can be a great way to get a jumpstart on your automation projects.
+
+The client ``ansible-galaxy`` is included in Ansible. The Galaxy client allows you to download roles from Ansible Galaxy, and also provides an excellent default framework for creating your own roles.
+
+Read the `Ansible Galaxy documentation <https://galaxy.ansible.com/docs/>`_ page for more information
+
+.. seealso::
+
+ :ref:`ansible_galaxy`
+ How to create new roles, share roles on Galaxy, role management
+ :ref:`yaml_syntax`
+ Learn about YAML syntax
+ :ref:`working_with_playbooks`
+ Review the basic Playbook language features
+ :ref:`playbooks_best_practices`
+ Tips and tricks for playbooks
+ :ref:`playbooks_variables`
+ Variables in playbooks
+ :ref:`playbooks_conditionals`
+ Conditionals in playbooks
+ :ref:`playbooks_loops`
+ Loops in playbooks
+ :ref:`tags`
+ Using tags to select or skip roles/tasks in long playbooks
+ :ref:`list_of_collections`
+ Browse existing collections, modules, and plugins
+ :ref:`developing_modules`
+ Extending Ansible by writing your own modules
+ `GitHub Ansible examples <https://github.com/ansible/ansible-examples>`_
+ Complete playbook files from the GitHub project source
+ `Mailing List <https://groups.google.com/group/ansible-project>`_
+ Questions? Help? Ideas? Stop by the list on Google Groups
diff --git a/docs/docsite/rst/user_guide/playbooks_roles.rst b/docs/docsite/rst/user_guide/playbooks_roles.rst
new file mode 100644
index 00000000..f79e2308
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_roles.rst
@@ -0,0 +1,19 @@
+:orphan:
+
+Playbook Roles and Include Statements
+=====================================
+
+.. contents:: Topics
+
+
+The documentation regarding roles and includes for playbooks have moved. Their new location is here: :ref:`playbooks_reuse`. Please update any links you may have made directly to this page.
+
+.. seealso::
+
+ :ref:`ansible_galaxy`
+ How to share roles on galaxy, role management
+ :ref:`working_with_playbooks`
+ Review the basic Playbook language features
+ :ref:`playbooks_reuse`
+ Creating reusable Playbooks.
+
diff --git a/docs/docsite/rst/user_guide/playbooks_special_topics.rst b/docs/docsite/rst/user_guide/playbooks_special_topics.rst
new file mode 100644
index 00000000..5df72c11
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_special_topics.rst
@@ -0,0 +1,8 @@
+:orphan:
+
+.. _playbooks_special_topics:
+
+Advanced playbooks features
+===========================
+
+This page is obsolete. Refer to the :ref:`main User Guide index page <user_guide_index>` for links to all playbook-related topics. Please update any links you may have made directly to this page.
diff --git a/docs/docsite/rst/user_guide/playbooks_startnstep.rst b/docs/docsite/rst/user_guide/playbooks_startnstep.rst
new file mode 100644
index 00000000..e3b62961
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_startnstep.rst
@@ -0,0 +1,40 @@
+.. _playbooks_start_and_step:
+
+***************************************
+Executing playbooks for troubleshooting
+***************************************
+
+When you are testing new plays or debugging playbooks, you may need to run the same play multiple times. To make this more efficient, Ansible offers two alternative ways to execute a playbook: start-at-task and step mode.
+
+.. _start_at_task:
+
+start-at-task
+-------------
+
+To start executing your playbook at a particular task (usually the task that failed on the previous run), use the ``--start-at-task`` option::
+
+ ansible-playbook playbook.yml --start-at-task="install packages"
+
+In this example, Ansible starts executing your playbook at a task named "install packages". This feature does not work with tasks inside dynamically re-used roles or tasks (``include_*``), see :ref:`dynamic_vs_static`.
+
+.. _step:
+
+Step mode
+---------
+
+To execute a playbook interactively, use ``--step``::
+
+ ansible-playbook playbook.yml --step
+
+With this option, Ansible stops on each task, and asks if it should execute that task. For example, if you have a task called "configure ssh", the playbook run will stop and ask::
+
+ Perform task: configure ssh (y/n/c):
+
+Answer "y" to execute the task, answer "n" to skip the task, and answer "c" to exit step mode, executing all remaining tasks without asking.
+
+.. seealso::
+
+ :ref:`playbooks_intro`
+ An introduction to playbooks
+ :ref:`playbook_debugger`
+ Using the Ansible debugger
diff --git a/docs/docsite/rst/user_guide/playbooks_strategies.rst b/docs/docsite/rst/user_guide/playbooks_strategies.rst
new file mode 100644
index 00000000..a97f0447
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_strategies.rst
@@ -0,0 +1,216 @@
+.. _playbooks_strategies:
+
+Controlling playbook execution: strategies and more
+===================================================
+
+By default, Ansible runs each task on all hosts affected by a play before starting the next task on any host, using 5 forks. If you want to change this default behavior, you can use a different strategy plugin, change the number of forks, or apply one of several keywords like ``serial``.
+
+.. contents::
+ :local:
+
+Selecting a strategy
+--------------------
+The default behavior described above is the :ref:`linear strategy<linear_strategy>`. Ansible offers other strategies, including the :ref:`debug strategy<debug_strategy>` (see also :ref:`playbook_debugger`) and the :ref:`free strategy<free_strategy>`, which allows each host to run until the end of the play as fast as it can::
+
+ - hosts: all
+ strategy: free
+ tasks:
+ ...
+
+You can select a different strategy for each play as shown above, or set your preferred strategy globally in ``ansible.cfg``, under the ``defaults`` stanza::
+
+ [defaults]
+ strategy = free
+
+All strategies are implemented as :ref:`strategy plugins<strategy_plugins>`. Please review the documentation for each strategy plugin for details on how it works.
+
+Setting the number of forks
+---------------------------
+If you have the processing power available and want to use more forks, you can set the number in ``ansible.cfg``::
+
+ [defaults]
+ forks = 30
+
+or pass it on the command line: `ansible-playbook -f 30 my_playbook.yml`.
+
+Using keywords to control execution
+-----------------------------------
+
+In addition to strategies, several :ref:`keywords<playbook_keywords>` also affect play execution. You can set a number, a percentage, or a list of numbers of hosts you want to manage at a time with ``serial``. Ansible completes the play on the specified number or percentage of hosts before starting the next batch of hosts. You can restrict the number of workers allotted to a block or task with ``throttle``. You can control how Ansible selects the next host in a group to execute against with ``order``. You can run a task on a single host with ``run_once``. These keywords are not strategies. They are directives or options applied to a play, block, or task.
+
+.. _rolling_update_batch_size:
+
+Setting the batch size with ``serial``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+By default, Ansible runs in parallel against all the hosts in the :ref:`pattern <intro_patterns>` you set in the ``hosts:`` field of each play. If you want to manage only a few machines at a time, for example during a rolling update, you can define how many hosts Ansible should manage at a single time using the ``serial`` keyword::
+
+ ---
+ - name: test play
+ hosts: webservers
+ serial: 2
+ gather_facts: False
+
+ tasks:
+ - name: first task
+ command: hostname
+ - name: second task
+ command: hostname
+
+In the above example, if we had 4 hosts in the group 'webservers', Ansible would execute the play completely (both tasks) on 2 of the hosts before moving on to the next 2 hosts::
+
+
+ PLAY [webservers] ****************************************
+
+ TASK [first task] ****************************************
+ changed: [web2]
+ changed: [web1]
+
+ TASK [second task] ***************************************
+ changed: [web1]
+ changed: [web2]
+
+ PLAY [webservers] ****************************************
+
+ TASK [first task] ****************************************
+ changed: [web3]
+ changed: [web4]
+
+ TASK [second task] ***************************************
+ changed: [web3]
+ changed: [web4]
+
+ PLAY RECAP ***********************************************
+ web1 : ok=2 changed=2 unreachable=0 failed=0
+ web2 : ok=2 changed=2 unreachable=0 failed=0
+ web3 : ok=2 changed=2 unreachable=0 failed=0
+ web4 : ok=2 changed=2 unreachable=0 failed=0
+
+
+You can also specify a percentage with the ``serial`` keyword. Ansible applies the percentage to the total number of hosts in a play to determine the number of hosts per pass::
+
+ ---
+ - name: test play
+ hosts: webservers
+ serial: "30%"
+
+If the number of hosts does not divide equally into the number of passes, the final pass contains the remainder. In this example, if you had 20 hosts in the webservers group, the first batch would contain 6 hosts, the second batch would contain 6 hosts, the third batch would contain 6 hosts, and the last batch would contain 2 hosts.
+
+You can also specify batch sizes as a list. For example::
+
+ ---
+ - name: test play
+ hosts: webservers
+ serial:
+ - 1
+ - 5
+ - 10
+
+In the above example, the first batch would contain a single host, the next would contain 5 hosts, and (if there are any hosts left), every following batch would contain either 10 hosts or all the remaining hosts, if fewer than 10 hosts remained.
+
+You can list multiple batch sizes as percentages::
+
+ ---
+ - name: test play
+ hosts: webservers
+ serial:
+ - "10%"
+ - "20%"
+ - "100%"
+
+You can also mix and match the values::
+
+ ---
+ - name: test play
+ hosts: webservers
+ serial:
+ - 1
+ - 5
+ - "20%"
+
+.. note::
+ No matter how small the percentage, the number of hosts per pass will always be 1 or greater.
+
+Restricting execution with ``throttle``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``throttle`` keyword limits the number of workers for a particular task. It can be set at the block and task level. Use ``throttle`` to restrict tasks that may be CPU-intensive or interact with a rate-limiting API::
+
+ tasks:
+ - command: /path/to/cpu_intensive_command
+ throttle: 1
+
+If you have already restricted the number of forks or the number of machines to execute against in parallel, you can reduce the number of workers with ``throttle``, but you cannot increase it. In other words, to have an effect, your ``throttle`` setting must be lower than your ``forks`` or ``serial`` setting if you are using them together.
+
+Ordering execution based on inventory
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``order`` keyword controls the order in which hosts are run. Possible values for order are:
+
+inventory:
+ (default) The order provided in the inventory
+reverse_inventory:
+ The reverse of the order provided by the inventory
+sorted:
+ Sorted alphabetically sorted by name
+reverse_sorted:
+ Sorted by name in reverse alphabetical order
+shuffle:
+ Randomly ordered on each run
+
+Other keywords that affect play execution include ``ignore_errors``, ``ignore_unreachable``, and ``any_errors_fatal``. These options are documented in :ref:`playbooks_error_handling`.
+
+.. _run_once:
+
+Running on a single machine with ``run_once``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If you want a task to run only on the first host in your batch of hosts, set ``run_once`` to true on that task::
+
+ ---
+ # ...
+
+ tasks:
+
+ # ...
+
+ - command: /opt/application/upgrade_db.py
+ run_once: true
+
+ # ...
+
+Ansible executes this task on the first host in the current batch and applies all results and facts to all the hosts in the same batch. This approach is similar to applying a conditional to a task such as::
+
+ - command: /opt/application/upgrade_db.py
+ when: inventory_hostname == webservers[0]
+
+However, with ``run_once``, the results are applied to all the hosts. To run the task on a specific host, instead of the first host in the batch, delegate the task::
+
+ - command: /opt/application/upgrade_db.py
+ run_once: true
+ delegate_to: web01.example.org
+
+As always with :ref:`delegation <playbooks_delegation>`, the action will be executed on the delegated host, but the information is still that of the original host in the task.
+
+.. note::
+ When used together with ``serial``, tasks marked as ``run_once`` will be run on one host in *each* serial batch. If the task must run only once regardless of ``serial`` mode, use
+ :code:`when: inventory_hostname == ansible_play_hosts_all[0]` construct.
+
+.. note::
+ Any conditional (in other words, `when:`) will use the variables of the 'first host' to decide if the task runs or not, no other hosts will be tested.
+
+.. note::
+ If you want to avoid the default behavior of setting the fact for all hosts, set ``delegate_facts: True`` for the specific task or block.
+
+.. seealso::
+
+ :ref:`about_playbooks`
+ An introduction to playbooks
+ :ref:`playbooks_delegation`
+ Running tasks on or assigning facts to specific machines
+ :ref:`playbooks_reuse_roles`
+ Playbook organization by roles
+ `User Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/playbooks_tags.rst b/docs/docsite/rst/user_guide/playbooks_tags.rst
new file mode 100644
index 00000000..93c26636
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_tags.rst
@@ -0,0 +1,428 @@
+.. _tags:
+
+****
+Tags
+****
+
+If you have a large playbook, it may be useful to run only specific parts of it instead of running the entire playbook. You can do this with Ansible tags. Using tags to execute or skip selected tasks is a two-step process:
+
+ #. Add tags to your tasks, either individually or with tag inheritance from a block, play, role, or import.
+ #. Select or skip tags when you run your playbook.
+
+.. contents::
+ :local:
+
+Adding tags with the tags keyword
+=================================
+
+You can add tags to a single task or include. You can also add tags to multiple tasks by defining them at the level of a block, play, role, or import. The keyword ``tags`` addresses all these use cases. The ``tags`` keyword always defines tags and adds them to tasks; it does not select or skip tasks for execution. You can only select or skip tasks based on tags at the command line when you run a playbook. See :ref:`using_tags` for more details.
+
+Adding tags to individual tasks
+-------------------------------
+
+At the simplest level, you can apply one or more tags to an individual task. You can add tags to tasks in playbooks, in task files, or within a role. Here is an example that tags two tasks with different tags:
+
+.. code-block:: yaml
+
+ tasks:
+ - name: Install the servers
+ ansible.builtin.yum:
+ name:
+ - httpd
+ - memcached
+ state: present
+ tags:
+ - packages
+ - webservers
+
+ - name: Configure the service
+ ansible.builtin.template:
+ src: templates/src.j2
+ dest: /etc/foo.conf
+ tags:
+ - configuration
+
+You can apply the same tag to more than one individual task. This example tags several tasks with the same tag, "ntp":
+
+.. code-block:: yaml
+
+ ---
+ # file: roles/common/tasks/main.yml
+
+ - name: Install ntp
+ ansible.builtin.yum:
+ name: ntp
+ state: present
+ tags: ntp
+
+ - name: Configure ntp
+ ansible.builtin.template:
+ src: ntp.conf.j2
+ dest: /etc/ntp.conf
+ notify:
+ - restart ntpd
+ tags: ntp
+
+ - name: Enable and run ntpd
+ ansible.builtin.service:
+ name: ntpd
+ state: started
+ enabled: yes
+ tags: ntp
+
+ - name: Install NFS utils
+ ansible.builtin.yum:
+ name:
+ - nfs-utils
+ - nfs-util-lib
+ state: present
+ tags: filesharing
+
+If you ran these four tasks in a playbook with ``--tags ntp``, Ansible would run the three tasks tagged ``ntp`` and skip the one task that does not have that tag.
+
+.. _tags_on_includes:
+
+Adding tags to includes
+-----------------------
+
+You can apply tags to dynamic includes in a playbook. As with tags on an individual task, tags on an ``include_*`` task apply only to the include itself, not to any tasks within the included file or role. If you add ``mytag`` to a dynamic include, then run that playbook with ``--tags mytag``, Ansible runs the include itself, runs any tasks within the included file or role tagged with ``mytag``, and skips any tasks within the included file or role without that tag. See :ref:`selective_reuse` for more details.
+
+You add tags to includes the same way you add tags to any other task:
+
+.. code-block:: yaml
+
+ ---
+ # file: roles/common/tasks/main.yml
+
+ - name: Dynamic re-use of database tasks
+ include_tasks: db.yml
+ tags: db
+
+You can add a tag only to the dynamic include of a role. In this example, the ``foo`` tag will `not` apply to tasks inside the ``bar`` role:
+
+.. code-block:: yaml
+
+ ---
+ - hosts: webservers
+ tasks:
+ - name: Include the bar role
+ include_role:
+ name: bar
+ tags:
+ - foo
+
+With plays, blocks, the ``role`` keyword, and static imports, Ansible applies tag inheritance, adding the tags you define to every task inside the play, block, role, or imported file. However, tag inheritance does *not* apply to dynamic re-use with ``include_role`` and ``include_tasks``. With dynamic re-use (includes), the tags you define apply only to the include itself. If you need tag inheritance, use a static import. If you cannot use an import because the rest of your playbook uses includes, see :ref:`apply_keyword` for ways to work around this behavior.
+
+.. _tag_inheritance:
+
+Tag inheritance: adding tags to multiple tasks
+----------------------------------------------
+
+If you want to apply the same tag or tags to multiple tasks without adding a ``tags`` line to every task, you can define the tags at the level of your play or block, or when you add a role or import a file. Ansible applies the tags down the dependency chain to all child tasks. With roles and imports, Ansible appends the tags set by the ``roles`` section or import to any tags set on individual tasks or blocks within the role or imported file. This is called tag inheritance. Tag inheritance is convenient, because you do not have to tag every task. However, the tags still apply to the tasks individually.
+
+Adding tags to blocks
+^^^^^^^^^^^^^^^^^^^^^
+
+If you want to apply a tag to many, but not all, of the tasks in your play, use a :ref:`block <playbooks_blocks>` and define the tags at that level. For example, we could edit the NTP example shown above to use a block:
+
+.. code-block:: yaml
+
+ # myrole/tasks/main.yml
+ tasks:
+ - block:
+ tags: ntp
+ - name: Install ntp
+ ansible.builtin.yum:
+ name: ntp
+ state: present
+
+ - name: Configure ntp
+ ansible.builtin.template:
+ src: ntp.conf.j2
+ dest: /etc/ntp.conf
+ notify:
+ - restart ntpd
+
+ - name: Enable and run ntpd
+ ansible.builtin.service:
+ name: ntpd
+ state: started
+ enabled: yes
+
+ - name: Install NFS utils
+ ansible.builtin.yum:
+ name:
+ - nfs-utils
+ - nfs-util-lib
+ state: present
+ tags: filesharing
+
+Adding tags to plays
+^^^^^^^^^^^^^^^^^^^^
+
+If all the tasks in a play should get the same tag, you can add the tag at the level of the play. For example, if you had a play with only the NTP tasks, you could tag the entire play:
+
+.. code-block:: yaml
+
+ - hosts: all
+ tags: ntp
+ tasks:
+ - name: Install ntp
+ ansible.builtin.yum:
+ name: ntp
+ state: present
+
+ - name: Configure ntp
+ ansible.builtin.template:
+ src: ntp.conf.j2
+ dest: /etc/ntp.conf
+ notify:
+ - restart ntpd
+
+ - name: Enable and run ntpd
+ ansible.builtin.service:
+ name: ntpd
+ state: started
+ enabled: yes
+
+ - hosts: fileservers
+ tags: filesharing
+ tasks:
+ ...
+
+Adding tags to roles
+^^^^^^^^^^^^^^^^^^^^
+
+There are three ways to add tags to roles:
+
+ #. Add the same tag or tags to all tasks in the role by setting tags under ``roles``. See examples in this section.
+ #. Add the same tag or tags to all tasks in the role by setting tags on a static ``import_role`` in your playbook. See examples in :ref:`tags_on_imports`.
+ #. Add a tag or tags to to individual tasks or blocks within the role itself. This is the only approach that allows you to select or skip some tasks within the role. To select or skip tasks within the role, you must have tags set on individual tasks or blocks, use the dynamic ``include_role`` in your playbook, and add the same tag or tags to the include. When you use this approach, and then run your playbook with ``--tags foo``, Ansible runs the include itself plus any tasks in the role that also have the tag ``foo``. See :ref:`tags_on_includes` for details.
+
+When you incorporate a role in your playbook statically with the ``roles`` keyword, Ansible adds any tags you define to all the tasks in the role. For example:
+
+.. code-block:: yaml
+
+ roles:
+ - role: webserver
+ vars:
+ port: 5000
+ tags: [ web, foo ]
+
+or:
+
+.. code-block:: yaml
+
+ ---
+ - hosts: webservers
+ roles:
+ - role: foo
+ tags:
+ - bar
+ - baz
+ # using YAML shorthand, this is equivalent to:
+ # - { role: foo, tags: ["bar", "baz"] }
+
+.. _tags_on_imports:
+
+Adding tags to imports
+^^^^^^^^^^^^^^^^^^^^^^
+
+You can also apply a tag or tags to all the tasks imported by the static ``import_role`` and ``import_tasks`` statements:
+
+.. code-block:: yaml
+
+ ---
+ - hosts: webservers
+ tasks:
+ - name: Import the foo role
+ import_role:
+ name: foo
+ tags:
+ - bar
+ - baz
+
+ - name: Import tasks from foo.yml
+ import_tasks: foo.yml
+ tags: [ web, foo ]
+
+.. _apply_keyword:
+
+Tag inheritance for includes: blocks and the ``apply`` keyword
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+By default, Ansible does not apply :ref:`tag inheritance <tag_inheritance>` to dynamic re-use with ``include_role`` and ``include_tasks``. If you add tags to an include, they apply only to the include itself, not to any tasks in the included file or role. This allows you to execute selected tasks within a role or task file - see :ref:`selective_reuse` when you run your playbook.
+
+If you want tag inheritance, you probably want to use imports. However, using both includes and imports in a single playbook can lead to difficult-to-diagnose bugs. For this reason, if your playbook uses ``include_*`` to re-use roles or tasks, and you need tag inheritance on one include, Ansible offers two workarounds. You can use the ``apply`` keyword:
+
+.. code-block:: yaml
+
+ - name: Apply the db tag to the include and to all tasks in db.yaml
+ include_tasks:
+ file: db.yml
+ # adds 'db' tag to tasks within db.yml
+ apply:
+ tags: db
+ # adds 'db' tag to this 'include_tasks' itself
+ tags: db
+
+Or you can use a block:
+
+.. code-block:: yaml
+
+ - block:
+ - name: Include tasks from db.yml
+ include_tasks: db.yml
+ tags: db
+
+.. _special_tags:
+
+Special tags: always and never
+==============================
+
+Ansible reserves two tag names for special behavior: always and never. If you assign the ``always`` tag to a task or play, Ansible will always run that task or play, unless you specifically skip it (``--skip-tags always``).
+
+For example:
+
+.. code-block:: yaml
+
+ tasks:
+ - name: Print a message
+ ansible.builtin.debug:
+ msg: "Always runs"
+ tags:
+ - always
+
+ - name: Print a message
+ ansible.builtin.debug:
+ msg: "runs when you use tag1"
+ tags:
+ - tag1
+
+.. warning::
+ * Fact gathering is tagged with 'always' by default. It is only skipped if
+ you apply a tag and then use a different tag in ``--tags`` or the same
+ tag in ``--skip-tags``.
+
+.. versionadded:: 2.5
+
+If you assign the ``never`` tag to a task or play, Ansible will skip that task or play unless you specifically request it (``--tags never``).
+
+For example:
+
+.. code-block:: yaml
+
+ tasks:
+ - name: Run the rarely-used debug task
+ ansible.builtin.debug:
+ msg: '{{ showmevar }}'
+ tags: [ never, debug ]
+
+The rarely-used debug task in the example above only runs when you specifically request the ``debug`` or ``never`` tags.
+
+.. _using_tags:
+
+Selecting or skipping tags when you run a playbook
+==================================================
+
+Once you have added tags to your tasks, includes, blocks, plays, roles, and imports, you can selectively execute or skip tasks based on their tags when you run :ref:`ansible-playbook`. Ansible runs or skips all tasks with tags that match the tags you pass at the command line. If you have added a tag at the block or play level, with ``roles``, or with an import, that tag applies to every task within the block, play, role, or imported role or file. If you have a role with lots of tags and you want to call subsets of the role at different times, either :ref:`use it with dynamic includes <selective_reuse>`, or split the role into multiple roles.
+
+:ref:`ansible-playbook` offers five tag-related command-line options:
+
+* ``--tags all`` - run all tasks, ignore tags (default behavior)
+* ``--tags [tag1, tag2]`` - run only tasks with the tags ``tag1`` and ``tag2``
+* ``--skip-tags [tag3, tag4]`` - run all tasks except those with the tags ``tag3`` and ``tag4``
+* ``--tags tagged`` - run only tasks with at least one tag
+* ``--tags untagged`` - run only tasks with no tags
+
+For example, to run only tasks and blocks tagged ``configuration`` and ``packages`` in a very long playbook:
+
+.. code-block:: bash
+
+ ansible-playbook example.yml --tags "configuration,packages"
+
+To run all tasks except those tagged ``packages``:
+
+.. code-block:: bash
+
+ ansible-playbook example.yml --skip-tags "packages"
+
+Previewing the results of using tags
+------------------------------------
+
+When you run a role or playbook, you might not know or remember which tasks have which tags, or which tags exist at all. Ansible offers two command-line flags for :ref:`ansible-playbook` that help you manage tagged playbooks:
+
+* ``--list-tags`` - generate a list of available tags
+* ``--list-tasks`` - when used with ``--tags tagname`` or ``--skip-tags tagname``, generate a preview of tagged tasks
+
+For example, if you do not know whether the tag for configuration tasks is ``config`` or ``conf`` in a playbook, role, or tasks file, you can display all available tags without running any tasks:
+
+.. code-block:: bash
+
+ ansible-playbook example.yml --list-tags
+
+If you do not know which tasks have the tags ``configuration`` and ``packages``, you can pass those tags and add ``--list-tasks``. Ansible lists the tasks but does not execute any of them.
+
+.. code-block:: bash
+
+ ansible-playbook example.yml --tags "configuration,packages" --list-tasks
+
+These command-line flags have one limitation: they cannot show tags or tasks within dynamically included files or roles. See :ref:`dynamic_vs_static` for more information on differences between static imports and dynamic includes.
+
+.. _selective_reuse:
+
+Selectively running tagged tasks in re-usable files
+---------------------------------------------------
+
+If you have a role or a tasks file with tags defined at the task or block level, you can selectively run or skip those tagged tasks in a playbook if you use a dynamic include instead of a static import. You must use the same tag on the included tasks and on the include statement itself. For example you might create a file with some tagged and some untagged tasks:
+
+.. code-block:: yaml
+
+ # mixed.yml
+ tasks:
+ - name: Run the task with no tags
+ ansible.builtin.debug:
+ msg: this task has no tags
+
+ - name: Run the tagged task
+ ansible.builtin.debug:
+ msg: this task is tagged with mytag
+ tags: mytag
+
+ - block:
+ - name: Run the first block task with mytag
+ ...
+ - name: Run the second block task with mytag
+ ...
+ tags:
+ - mytag
+
+And you might include the tasks file above in a playbook:
+
+.. code-block:: yaml
+
+ # myplaybook.yml
+ - hosts: all
+ tasks:
+ - name: Run tasks from mixed.yml
+ include_tasks:
+ name: mixed.yml
+ tags: mytag
+
+When you run the playbook with ``ansible-playbook -i hosts myplaybook.yml --tags "mytag"``, Ansible skips the task with no tags, runs the tagged individual task, and runs the two tasks in the block.
+
+Configuring tags globally
+-------------------------
+
+If you run or skip certain tags by default, you can use the :ref:`TAGS_RUN` and :ref:`TAGS_SKIP` options in Ansible configuration to set those defaults.
+
+.. seealso::
+
+ :ref:`playbooks_intro`
+ An introduction to playbooks
+ :ref:`playbooks_reuse_roles`
+ Playbook organization by roles
+ `User Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/playbooks_templating.rst b/docs/docsite/rst/user_guide/playbooks_templating.rst
new file mode 100644
index 00000000..162ab813
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_templating.rst
@@ -0,0 +1,55 @@
+.. _playbooks_templating:
+
+*******************
+Templating (Jinja2)
+*******************
+
+Ansible uses Jinja2 templating to enable dynamic expressions and access to variables. Ansible includes a lot of specialized filters and tests for templating. You can use all the standard filters and tests included in Jinja2 as well. Ansible also offers a new plugin type: :ref:`lookup_plugins`.
+
+All templating happens on the Ansible controller **before** the task is sent and executed on the target machine. This approach minimizes the package requirements on the target (jinja2 is only required on the controller). It also limits the amount of data Ansible passes to the target machine. Ansible parses templates on the controller and passes only the information needed for each task to the target machine, instead of passing all the data on the controller and parsing it on the target.
+
+.. contents::
+ :local:
+
+.. toctree::
+ :maxdepth: 2
+
+ playbooks_filters
+ playbooks_tests
+ playbooks_lookups
+ playbooks_python_version
+
+.. _templating_now:
+
+Get the current time
+====================
+
+.. versionadded:: 2.8
+
+The ``now()`` Jinja2 function retrieves a Python datetime object or a string representation for the current time.
+
+The ``now()`` function supports 2 arguments:
+
+utc
+ Specify ``True`` to get the current time in UTC. Defaults to ``False``.
+
+fmt
+ Accepts a `strftime <https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior>`_ string that returns a formatted date time string.
+
+
+.. seealso::
+
+ :ref:`playbooks_intro`
+ An introduction to playbooks
+ :ref:`playbooks_conditionals`
+ Conditional statements in playbooks
+ :ref:`playbooks_loops`
+ Looping in playbooks
+ :ref:`playbooks_reuse_roles`
+ Playbook organization by roles
+ :ref:`playbooks_best_practices`
+ Tips and tricks for playbooks
+ `User Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/playbooks_tests.rst b/docs/docsite/rst/user_guide/playbooks_tests.rst
new file mode 100644
index 00000000..0a1aa8d9
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_tests.rst
@@ -0,0 +1,395 @@
+.. _playbooks_tests:
+
+*****
+Tests
+*****
+
+`Tests <http://jinja.pocoo.org/docs/dev/templates/#tests>`_ in Jinja are a way of evaluating template expressions and returning True or False. Jinja ships with many of these. See `builtin tests`_ in the official Jinja template documentation.
+
+The main difference between tests and filters are that Jinja tests are used for comparisons, whereas filters are used for data manipulation, and have different applications in jinja. Tests can also be used in list processing filters, like ``map()`` and ``select()`` to choose items in the list.
+
+Like all templating, tests always execute on the Ansible controller, **not** on the target of a task, as they test local data.
+
+In addition to those Jinja2 tests, Ansible supplies a few more and users can easily create their own.
+
+.. contents::
+ :local:
+
+.. _test_syntax:
+
+Test syntax
+===========
+
+`Test syntax <http://jinja.pocoo.org/docs/dev/templates/#tests>`_ varies from `filter syntax <http://jinja.pocoo.org/docs/dev/templates/#filters>`_ (``variable | filter``). Historically Ansible has registered tests as both jinja tests and jinja filters, allowing for them to be referenced using filter syntax.
+
+As of Ansible 2.5, using a jinja test as a filter will generate a warning.
+
+The syntax for using a jinja test is as follows::
+
+ variable is test_name
+
+Such as::
+
+ result is failed
+
+.. _testing_strings:
+
+Testing strings
+===============
+
+To match strings against a substring or a regular expression, use the ``match``, ``search`` or ``regex`` tests::
+
+ vars:
+ url: "http://example.com/users/foo/resources/bar"
+
+ tasks:
+ - debug:
+ msg: "matched pattern 1"
+ when: url is match("http://example.com/users/.*/resources/")
+
+ - debug:
+ msg: "matched pattern 2"
+ when: url is search("/users/.*/resources/.*")
+
+ - debug:
+ msg: "matched pattern 3"
+ when: url is search("/users/")
+
+ - debug:
+ msg: "matched pattern 4"
+ when: url is regex("example.com/\w+/foo")
+
+``match`` succeeds if it finds the pattern at the beginning of the string, while ``search`` succeeds if it finds the pattern anywhere within string. By default, ``regex`` works like ``search``, but ``regex`` can be configured to perform other tests as well, by passing the ``match_type`` keyword argument. In particular, ``match_type`` determines the ``re`` method that gets used to perform the search. The full list can be found in the relevant Python documentation `here <https://docs.python.org/3/library/re.html#regular-expression-objects>`_.
+
+All of the string tests also take optional ``ignorecase`` and ``multiline`` arguments. These correspond to ``re.I`` and ``re.M`` from Python's ``re`` library, respectively.
+
+.. _testing_vault:
+
+Vault
+=====
+
+.. versionadded:: 2.10
+
+You can test whether a variable is an inline single vault encrypted value using the ``vault_encrypted`` test.
+
+.. code-block:: yaml
+
+ vars:
+ variable: !vault |
+ $ANSIBLE_VAULT;1.2;AES256;dev
+ 61323931353866666336306139373937316366366138656131323863373866376666353364373761
+ 3539633234313836346435323766306164626134376564330a373530313635343535343133316133
+ 36643666306434616266376434363239346433643238336464643566386135356334303736353136
+ 6565633133366366360a326566323363363936613664616364623437336130623133343530333739
+ 3039
+
+ tasks:
+ - debug:
+ msg: '{{ (variable is vault_encrypted) | ternary("Vault encrypted", "Not vault encrypted") }}'
+
+.. _testing_truthiness:
+
+Testing truthiness
+==================
+
+.. versionadded:: 2.10
+
+As of Ansible 2.10, you can now perform Python like truthy and falsy checks.
+
+.. code-block:: yaml
+
+ - debug:
+ msg: "Truthy"
+ when: value is truthy
+ vars:
+ value: "some string"
+
+ - debug:
+ msg: "Falsy"
+ when: value is falsy
+ vars:
+ value: ""
+
+Additionally, the ``truthy`` and ``falsy`` tests accept an optional parameter called ``convert_bool`` that will attempt
+to convert boolean indicators to actual booleans.
+
+.. code-block:: yaml
+
+ - debug:
+ msg: "Truthy"
+ when: value is truthy(convert_bool=True)
+ vars:
+ value: "yes"
+
+ - debug:
+ msg: "Falsy"
+ when: value is falsy(convert_bool=True)
+ vars:
+ value: "off"
+
+.. _testing_versions:
+
+Comparing versions
+==================
+
+.. versionadded:: 1.6
+
+.. note:: In 2.5 ``version_compare`` was renamed to ``version``
+
+To compare a version number, such as checking if the ``ansible_facts['distribution_version']``
+version is greater than or equal to '12.04', you can use the ``version`` test.
+
+The ``version`` test can also be used to evaluate the ``ansible_facts['distribution_version']``::
+
+ {{ ansible_facts['distribution_version'] is version('12.04', '>=') }}
+
+If ``ansible_facts['distribution_version']`` is greater than or equal to 12.04, this test returns True, otherwise False.
+
+The ``version`` test accepts the following operators::
+
+ <, lt, <=, le, >, gt, >=, ge, ==, =, eq, !=, <>, ne
+
+This test also accepts a 3rd parameter, ``strict`` which defines if strict version parsing as defined by ``distutils.version.StrictVersion`` should be used. The default is ``False`` (using ``distutils.version.LooseVersion``), ``True`` enables strict version parsing::
+
+ {{ sample_version_var is version('1.0', operator='lt', strict=True) }}
+
+When using ``version`` in a playbook or role, don't use ``{{ }}`` as described in the `FAQ <https://docs.ansible.com/ansible/latest/reference_appendices/faq.html#when-should-i-use-also-how-to-interpolate-variables-or-dynamic-variable-names>`_::
+
+ vars:
+ my_version: 1.2.3
+
+ tasks:
+ - debug:
+ msg: "my_version is higher than 1.0.0"
+ when: my_version is version('1.0.0', '>')
+
+.. _math_tests:
+
+Set theory tests
+================
+
+.. versionadded:: 2.1
+
+.. note:: In 2.5 ``issubset`` and ``issuperset`` were renamed to ``subset`` and ``superset``
+
+To see if a list includes or is included by another list, you can use 'subset' and 'superset'::
+
+ vars:
+ a: [1,2,3,4,5]
+ b: [2,3]
+ tasks:
+ - debug:
+ msg: "A includes B"
+ when: a is superset(b)
+
+ - debug:
+ msg: "B is included in A"
+ when: b is subset(a)
+
+.. _contains_test:
+
+Testing if a list contains a value
+==================================
+
+.. versionadded:: 2.8
+
+Ansible includes a ``contains`` test which operates similarly, but in reverse of the Jinja2 provided ``in`` test.
+The ``contains`` test is designed to work with the ``select``, ``reject``, ``selectattr``, and ``rejectattr`` filters::
+
+ vars:
+ lacp_groups:
+ - master: lacp0
+ network: 10.65.100.0/24
+ gateway: 10.65.100.1
+ dns4:
+ - 10.65.100.10
+ - 10.65.100.11
+ interfaces:
+ - em1
+ - em2
+
+ - master: lacp1
+ network: 10.65.120.0/24
+ gateway: 10.65.120.1
+ dns4:
+ - 10.65.100.10
+ - 10.65.100.11
+ interfaces:
+ - em3
+ - em4
+
+ tasks:
+ - debug:
+ msg: "{{ (lacp_groups|selectattr('interfaces', 'contains', 'em1')|first).master }}"
+
+.. versionadded:: 2.4
+
+Testing if a list value is True
+===============================
+
+You can use `any` and `all` to check if any or all elements in a list are true or not::
+
+ vars:
+ mylist:
+ - 1
+ - "{{ 3 == 3 }}"
+ - True
+ myotherlist:
+ - False
+ - True
+ tasks:
+
+ - debug:
+ msg: "all are true!"
+ when: mylist is all
+
+ - debug:
+ msg: "at least one is true"
+ when: myotherlist is any
+
+.. _path_tests:
+
+Testing paths
+=============
+
+.. note:: In 2.5 the following tests were renamed to remove the ``is_`` prefix
+
+The following tests can provide information about a path on the controller::
+
+ - debug:
+ msg: "path is a directory"
+ when: mypath is directory
+
+ - debug:
+ msg: "path is a file"
+ when: mypath is file
+
+ - debug:
+ msg: "path is a symlink"
+ when: mypath is link
+
+ - debug:
+ msg: "path already exists"
+ when: mypath is exists
+
+ - debug:
+ msg: "path is {{ (mypath is abs)|ternary('absolute','relative')}}"
+
+ - debug:
+ msg: "path is the same file as path2"
+ when: mypath is same_file(path2)
+
+ - debug:
+ msg: "path is a mount"
+ when: mypath is mount
+
+
+Testing size formats
+====================
+
+The ``human_readable`` and ``human_to_bytes`` functions let you test your
+playbooks to make sure you are using the right size format in your tasks, and that
+you provide Byte format to computers and human-readable format to people.
+
+Human readable
+--------------
+
+Asserts whether the given string is human readable or not.
+
+For example::
+
+ - name: "Human Readable"
+ assert:
+ that:
+ - '"1.00 Bytes" == 1|human_readable'
+ - '"1.00 bits" == 1|human_readable(isbits=True)'
+ - '"10.00 KB" == 10240|human_readable'
+ - '"97.66 MB" == 102400000|human_readable'
+ - '"0.10 GB" == 102400000|human_readable(unit="G")'
+ - '"0.10 Gb" == 102400000|human_readable(isbits=True, unit="G")'
+
+This would result in::
+
+ { "changed": false, "msg": "All assertions passed" }
+
+Human to bytes
+--------------
+
+Returns the given string in the Bytes format.
+
+For example::
+
+ - name: "Human to Bytes"
+ assert:
+ that:
+ - "{{'0'|human_to_bytes}} == 0"
+ - "{{'0.1'|human_to_bytes}} == 0"
+ - "{{'0.9'|human_to_bytes}} == 1"
+ - "{{'1'|human_to_bytes}} == 1"
+ - "{{'10.00 KB'|human_to_bytes}} == 10240"
+ - "{{ '11 MB'|human_to_bytes}} == 11534336"
+ - "{{ '1.1 GB'|human_to_bytes}} == 1181116006"
+ - "{{'10.00 Kb'|human_to_bytes(isbits=True)}} == 10240"
+
+This would result in::
+
+ { "changed": false, "msg": "All assertions passed" }
+
+
+.. _test_task_results:
+
+Testing task results
+====================
+
+The following tasks are illustrative of the tests meant to check the status of tasks::
+
+ tasks:
+
+ - shell: /usr/bin/foo
+ register: result
+ ignore_errors: True
+
+ - debug:
+ msg: "it failed"
+ when: result is failed
+
+ # in most cases you'll want a handler, but if you want to do something right now, this is nice
+ - debug:
+ msg: "it changed"
+ when: result is changed
+
+ - debug:
+ msg: "it succeeded in Ansible >= 2.1"
+ when: result is succeeded
+
+ - debug:
+ msg: "it succeeded"
+ when: result is success
+
+ - debug:
+ msg: "it was skipped"
+ when: result is skipped
+
+.. note:: From 2.1, you can also use success, failure, change, and skip so that the grammar matches, for those who need to be strict about it.
+
+
+.. _builtin tests: http://jinja.palletsprojects.com/templates/#builtin-tests
+
+.. seealso::
+
+ :ref:`playbooks_intro`
+ An introduction to playbooks
+ :ref:`playbooks_conditionals`
+ Conditional statements in playbooks
+ :ref:`playbooks_variables`
+ All about variables
+ :ref:`playbooks_loops`
+ Looping in playbooks
+ :ref:`playbooks_reuse_roles`
+ Playbook organization by roles
+ :ref:`playbooks_best_practices`
+ Tips and tricks for playbooks
+ `User Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/playbooks_variables.rst b/docs/docsite/rst/user_guide/playbooks_variables.rst
new file mode 100644
index 00000000..eb2b58f7
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_variables.rst
@@ -0,0 +1,466 @@
+.. _playbooks_variables:
+
+***************
+Using Variables
+***************
+
+Ansible uses variables to manage differences between systems. With Ansible, you can execute tasks and playbooks on multiple different systems with a single command. To represent the variations among those different systems, you can create variables with standard YAML syntax, including lists and dictionaries. You can define these variables in your playbooks, in your :ref:`inventory <intro_inventory>`, in re-usable :ref:`files <playbooks_reuse>` or :ref:`roles <playbooks_reuse_roles>`, or at the command line. You can also create variables during a playbook run by registering the return value or values of a task as a new variable.
+
+After you create variables, either by defining them in a file, passing them at the command line, or registering the return value or values of a task as a new variable, you can use those variables in module arguments, in :ref:`conditional "when" statements <playbooks_conditionals>`, in :ref:`templates <playbooks_templating>`, and in :ref:`loops <playbooks_loops>`. The `ansible-examples github repository <https://github.com/ansible/ansible-examples>`_ contains many examples of using variables in Ansible.
+
+Once you understand the concepts and examples on this page, read about :ref:`Ansible facts <vars_and_facts>`, which are variables you retrieve from remote systems.
+
+.. contents::
+ :local:
+
+.. _valid_variable_names:
+
+Creating valid variable names
+=============================
+
+Not all strings are valid Ansible variable names. A variable name can only include letters, numbers, and underscores. `Python keywords`_ or :ref:`playbook keywords<playbook_keywords>` are not valid variable names. A variable name cannot begin with a number.
+
+Variable names can begin with an underscore. In many programming languages, variables that begin with an underscore are private. This is not true in Ansible. Variables that begin with an underscore are treated exactly the same as any other variable. Do not rely on this convention for privacy or security.
+
+This table gives examples of valid and invalid variable names:
+
+.. table::
+ :class: documentation-table
+
+ ====================== ====================================================================
+ Valid variable names Not valid
+ ====================== ====================================================================
+ ``foo`` ``*foo``, `Python keywords`_ such as ``async`` and ``lambda``
+
+ ``foo_env`` :ref:`playbook keywords<playbook_keywords>` such as ``environment``
+
+ ``foo_port`` ``foo-port``, ``foo port``, ``foo.port``
+
+ ``foo5``, ``_foo`` ``5foo``, ``12``
+ ====================== ====================================================================
+
+.. _Python keywords: https://docs.python.org/3/reference/lexical_analysis.html#keywords
+
+Simple variables
+================
+
+Simple variables combine a variable name with a single value. You can use this syntax (and the syntax for lists and dictionaries shown below) in a variety of places. For details about setting variables in inventory, in playbooks, in reusable files, in roles, or at the command line, see :ref:`setting_variables`.
+
+Defining simple variables
+-------------------------
+
+You can define a simple variable using standard YAML syntax. For example::
+
+ remote_install_path: /opt/my_app_config
+
+Referencing simple variables
+----------------------------
+
+After you define a variable, use Jinja2 syntax to reference it. Jinja2 variables use double curly braces. For example, the expression ``My amp goes to {{ max_amp_value }}`` demonstrates the most basic form of variable substitution. You can use Jinja2 syntax in playbooks. For example::
+
+ ansible.builtin.template:
+ src: foo.cfg.j2
+ dest: '{{ remote_install_path }}/foo.cfg'
+
+In this example, the variable defines the location of a file, which can vary from one system to another.
+
+.. note::
+
+ Ansible allows Jinja2 loops and conditionals in :ref:`templates <playbooks_templating>` but not in playbooks. You cannot create a loop of tasks. Ansible playbooks are pure machine-parseable YAML.
+
+.. _yaml_gotchas:
+
+When to quote variables (a YAML gotcha)
+=======================================
+
+If you start a value with ``{{ foo }}``, you must quote the whole expression to create valid YAML syntax. If you do not quote the whole expression, the YAML parser cannot interpret the syntax - it might be a variable or it might be the start of a YAML dictionary. For guidance on writing YAML, see the :ref:`yaml_syntax` documentation.
+
+If you use a variable without quotes like this::
+
+ - hosts: app_servers
+ vars:
+ app_path: {{ base_path }}/22
+
+You will see: ``ERROR! Syntax Error while loading YAML.`` If you add quotes, Ansible works correctly::
+
+ - hosts: app_servers
+ vars:
+ app_path: "{{ base_path }}/22"
+
+.. _list_variables:
+
+List variables
+==============
+
+A list variable combines a variable name with multiple values. The multiple values can be stored as an itemized list or in square brackets ``[]``, separated with commas.
+
+Defining variables as lists
+---------------------------
+
+You can define variables with multiple values using YAML lists. For example::
+
+ region:
+ - northeast
+ - southeast
+ - midwest
+
+Referencing list variables
+--------------------------
+
+When you use variables defined as a list (also called an array), you can use individual, specific fields from that list. The first item in a list is item 0, the second item is item 1. For example::
+
+ region: "{{ region[0] }}"
+
+The value of this expression would be "northeast".
+
+.. _dictionary_variables:
+
+Dictionary variables
+====================
+
+A dictionary stores the data in key-value pairs. Usually, dictionaries are used to store related data, such as the information contained in an ID or a user profile.
+
+Defining variables as key:value dictionaries
+--------------------------------------------
+
+You can define more complex variables using YAML dictionaries. A YAML dictionary maps keys to values. For example::
+
+ foo:
+ field1: one
+ field2: two
+
+Referencing key:value dictionary variables
+------------------------------------------
+
+When you use variables defined as a key:value dictionary (also called a hash), you can use individual, specific fields from that dictionary using either bracket notation or dot notation::
+
+ foo['field1']
+ foo.field1
+
+Both of these examples reference the same value ("one"). Bracket notation always works. Dot notation can cause problems because some keys collide with attributes and methods of python dictionaries. Use bracket notation if you use keys which start and end with two underscores (which are reserved for special meanings in python) or are any of the known public attributes:
+
+``add``, ``append``, ``as_integer_ratio``, ``bit_length``, ``capitalize``, ``center``, ``clear``, ``conjugate``, ``copy``, ``count``, ``decode``, ``denominator``, ``difference``, ``difference_update``, ``discard``, ``encode``, ``endswith``, ``expandtabs``, ``extend``, ``find``, ``format``, ``fromhex``, ``fromkeys``, ``get``, ``has_key``, ``hex``, ``imag``, ``index``, ``insert``, ``intersection``, ``intersection_update``, ``isalnum``, ``isalpha``, ``isdecimal``, ``isdigit``, ``isdisjoint``, ``is_integer``, ``islower``, ``isnumeric``, ``isspace``, ``issubset``, ``issuperset``, ``istitle``, ``isupper``, ``items``, ``iteritems``, ``iterkeys``, ``itervalues``, ``join``, ``keys``, ``ljust``, ``lower``, ``lstrip``, ``numerator``, ``partition``, ``pop``, ``popitem``, ``real``, ``remove``, ``replace``, ``reverse``, ``rfind``, ``rindex``, ``rjust``, ``rpartition``, ``rsplit``, ``rstrip``, ``setdefault``, ``sort``, ``split``, ``splitlines``, ``startswith``, ``strip``, ``swapcase``, ``symmetric_difference``, ``symmetric_difference_update``, ``title``, ``translate``, ``union``, ``update``, ``upper``, ``values``, ``viewitems``, ``viewkeys``, ``viewvalues``, ``zfill``.
+
+.. _registered_variables:
+
+Registering variables
+=====================
+
+You can create variables from the output of an Ansible task with the task keyword ``register``. You can use registered variables in any later tasks in your play. For example::
+
+ - hosts: web_servers
+
+ tasks:
+
+ - name: Run a shell command and register its output as a variable
+ ansible.builtin.shell: /usr/bin/foo
+ register: foo_result
+ ignore_errors: true
+
+ - name: Run a shell command using output of the previous task
+ ansible.builtin.shell: /usr/bin/bar
+ when: foo_result.rc == 5
+
+For more examples of using registered variables in conditions on later tasks, see :ref:`playbooks_conditionals`. Registered variables may be simple variables, list variables, dictionary variables, or complex nested data structures. The documentation for each module includes a ``RETURN`` section describing the return values for that module. To see the values for a particular task, run your playbook with ``-v``.
+
+Registered variables are stored in memory. You cannot cache registered variables for use in future plays. Registered variables are only valid on the host for the rest of the current playbook run.
+
+Registered variables are host-level variables. When you register a variable in a task with a loop, the registered variable contains a value for each item in the loop. The data structure placed in the variable during the loop will contain a ``results`` attribute, that is a list of all responses from the module. For a more in-depth example of how this works, see the :ref:`playbooks_loops` section on using register with a loop.
+
+.. note:: If a task fails or is skipped, Ansible still registers a variable with a failure or skipped status, unless the task is skipped based on tags. See :ref:`tags` for information on adding and using tags.
+
+.. _accessing_complex_variable_data:
+
+Referencing nested variables
+============================
+
+Many registered variables (and :ref:`facts <vars_and_facts>`) are nested YAML or JSON data structures. You cannot access values from these nested data structures with the simple ``{{ foo }}`` syntax. You must use either bracket notation or dot notation. For example, to reference an IP address from your facts using the bracket notation::
+
+ {{ ansible_facts["eth0"]["ipv4"]["address"] }}
+
+To reference an IP address from your facts using the dot notation::
+
+ {{ ansible_facts.eth0.ipv4.address }}
+
+.. _about_jinja2:
+.. _jinja2_filters:
+
+Transforming variables with Jinja2 filters
+==========================================
+
+Jinja2 filters let you transform the value of a variable within a template expression. For example, the ``capitalize`` filter capitalizes any value passed to it; the ``to_yaml`` and ``to_json`` filters change the format of your variable values. Jinja2 includes many `built-in filters <http://jinja.pocoo.org/docs/templates/#builtin-filters>`_ and Ansible supplies many more filters. To find more examples of filters, see :ref:`playbooks_filters`.
+
+.. _setting_variables:
+
+Where to set variables
+======================
+
+You can define variables in a variety of places, such as in inventory, in playbooks, in reusable files, in roles, and at the command line. Ansible loads every possible variable it finds, then chooses the variable to apply based on :ref:`variable precedence rules <ansible_variable_precedence>`.
+
+.. _define_variables_in_inventory:
+
+Defining variables in inventory
+-------------------------------
+
+You can define different variables for each individual host, or set shared variables for a group of hosts in your inventory. For example, if all machines in the ``[Boston]`` group use 'boston.ntp.example.com' as an NTP server, you can set a group variable. The :ref:`intro_inventory` page has details on setting :ref:`host variables <host_variables>` and :ref:`group variables <group_variables>` in inventory.
+
+.. _playbook_variables:
+
+Defining variables in a playbook
+--------------------------------
+
+You can define variables directly in a playbook::
+
+ - hosts: webservers
+ vars:
+ http_port: 80
+
+When you define variables in a playbook, they are visible to anyone who runs that playbook. This is especially useful if you share playbooks widely.
+
+.. _included_variables:
+.. _variable_file_separation_details:
+
+Defining variables in included files and roles
+----------------------------------------------
+
+You can define variables in reusable variables files and/or in reusable roles. When you define variables in reusable variable files, the sensitive variables are separated from playbooks. This separation enables you to store your playbooks in a source control software and even share the playbooks, without the risk of exposing passwords or other sensitive and personal data. For information about creating reusable files and roles, see :ref:`playbooks_reuse`.
+
+This example shows how you can include variables defined in an external file::
+
+ ---
+
+ - hosts: all
+ remote_user: root
+ vars:
+ favcolor: blue
+ vars_files:
+ - /vars/external_vars.yml
+
+ tasks:
+
+ - name: This is just a placeholder
+ ansible.builtin.command: /bin/echo foo
+
+The contents of each variables file is a simple YAML dictionary. For example::
+
+ ---
+ # in the above example, this would be vars/external_vars.yml
+ somevar: somevalue
+ password: magic
+
+.. note::
+ You can keep per-host and per-group variables in similar files. To learn about organizing your variables, see :ref:`splitting_out_vars`.
+
+.. _passing_variables_on_the_command_line:
+
+Defining variables at runtime
+-----------------------------
+
+You can define variables when you run your playbook by passing variables at the command line using the ``--extra-vars`` (or ``-e``) argument. You can also request user input with a ``vars_prompt`` (see :ref:`playbooks_prompts`). When you pass variables at the command line, use a single quoted string, that contains one or more variables, in one of the formats below.
+
+key=value format
+^^^^^^^^^^^^^^^^
+
+Values passed in using the ``key=value`` syntax are interpreted as strings. Use the JSON format if you need to pass non-string values such as Booleans, integers, floats, lists, and so on.
+
+.. code-block:: text
+
+ ansible-playbook release.yml --extra-vars "version=1.23.45 other_variable=foo"
+
+JSON string format
+^^^^^^^^^^^^^^^^^^
+
+.. code-block:: text
+
+ ansible-playbook release.yml --extra-vars '{"version":"1.23.45","other_variable":"foo"}'
+ ansible-playbook arcade.yml --extra-vars '{"pacman":"mrs","ghosts":["inky","pinky","clyde","sue"]}'
+
+When passing variables with ``--extra-vars``, you must escape quotes and other special characters appropriately for both your markup (for example, JSON), and for your shell::
+
+ ansible-playbook arcade.yml --extra-vars "{\"name\":\"Conan O\'Brien\"}"
+ ansible-playbook arcade.yml --extra-vars '{"name":"Conan O'\\\''Brien"}'
+ ansible-playbook script.yml --extra-vars "{\"dialog\":\"He said \\\"I just can\'t get enough of those single and double-quotes"\!"\\\"\"}"
+
+If you have a lot of special characters, use a JSON or YAML file containing the variable definitions.
+
+vars from a JSON or YAML file
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block:: text
+
+ ansible-playbook release.yml --extra-vars "@some_file.json"
+
+
+.. _ansible_variable_precedence:
+
+Variable precedence: Where should I put a variable?
+===================================================
+
+You can set multiple variables with the same name in many different places. When you do this, Ansible loads every possible variable it finds, then chooses the variable to apply based on variable precedence. In other words, the different variables will override each other in a certain order.
+
+Teams and projects that agree on guidelines for defining variables (where to define certain types of variables) usually avoid variable precedence concerns. We suggest that you define each variable in one place: figure out where to define a variable, and keep it simple. For examples, see :ref:`variable_examples`.
+
+Some behavioral parameters that you can set in variables you can also set in Ansible configuration, as command-line options, and using playbook keywords. For example, you can define the user Ansible uses to connect to remote devices as a variable with ``ansible_user``, in a configuration file with ``DEFAULT_REMOTE_USER``, as a command-line option with ``-u``, and with the playbook keyword ``remote_user``. If you define the same parameter in a variable and by another method, the variable overrides the other setting. This approach allows host-specific settings to override more general settings. For examples and more details on the precedence of these various settings, see :ref:`general_precedence_rules`.
+
+Understanding variable precedence
+---------------------------------
+
+Ansible does apply variable precedence, and you might have a use for it. Here is the order of precedence from least to greatest (the last listed variables override all other variables):
+
+ #. command line values (for example, ``-u my_user``, these are not variables)
+ #. role defaults (defined in role/defaults/main.yml) [1]_
+ #. inventory file or script group vars [2]_
+ #. inventory group_vars/all [3]_
+ #. playbook group_vars/all [3]_
+ #. inventory group_vars/* [3]_
+ #. playbook group_vars/* [3]_
+ #. inventory file or script host vars [2]_
+ #. inventory host_vars/* [3]_
+ #. playbook host_vars/* [3]_
+ #. host facts / cached set_facts [4]_
+ #. play vars
+ #. play vars_prompt
+ #. play vars_files
+ #. role vars (defined in role/vars/main.yml)
+ #. block vars (only for tasks in block)
+ #. task vars (only for the task)
+ #. include_vars
+ #. set_facts / registered vars
+ #. role (and include_role) params
+ #. include params
+ #. extra vars (for example, ``-e "user=my_user"``)(always win precedence)
+
+In general, Ansible gives precedence to variables that were defined more recently, more actively, and with more explicit scope. Variables in the the defaults folder inside a role are easily overridden. Anything in the vars directory of the role overrides previous versions of that variable in the namespace. Host and/or inventory variables override role defaults, but explicit includes such as the vars directory or an ``include_vars`` task override inventory variables.
+
+Ansible merges different variables set in inventory so that more specific settings override more generic settings. For example, ``ansible_ssh_user`` specified as a group_var is overridden by ``ansible_user`` specified as a host_var. For details about the precedence of variables set in inventory, see :ref:`how_we_merge`.
+
+.. rubric:: Footnotes
+
+.. [1] Tasks in each role see their own role's defaults. Tasks defined outside of a role see the last role's defaults.
+.. [2] Variables defined in inventory file or provided by dynamic inventory.
+.. [3] Includes vars added by 'vars plugins' as well as host_vars and group_vars which are added by the default vars plugin shipped with Ansible.
+.. [4] When created with set_facts's cacheable option, variables have the high precedence in the play,
+ but are the same as a host facts precedence when they come from the cache.
+
+.. note:: Within any section, redefining a var overrides the previous instance.
+ If multiple groups have the same variable, the last one loaded wins.
+ If you define a variable twice in a play's ``vars:`` section, the second one wins.
+.. note:: The previous describes the default config ``hash_behaviour=replace``, switch to ``merge`` to only partially overwrite.
+
+.. _variable_scopes:
+
+Scoping variables
+-----------------
+
+You can decide where to set a variable based on the scope you want that value to have. Ansible has three main scopes:
+
+ * Global: this is set by config, environment variables and the command line
+ * Play: each play and contained structures, vars entries (vars; vars_files; vars_prompt), role defaults and vars.
+ * Host: variables directly associated to a host, like inventory, include_vars, facts or registered task outputs
+
+Inside a template, you automatically have access to all variables that are in scope for a host, plus any registered variables, facts, and magic variables.
+
+.. _variable_examples:
+
+Tips on where to set variables
+------------------------------
+
+You should choose where to define a variable based on the kind of control you might want over values.
+
+Set variables in inventory that deal with geography or behavior. Since groups are frequently the entity that maps roles onto hosts, you can often set variables on the group instead of defining them on a role. Remember: child groups override parent groups, and host variables override group variables. See :ref:`define_variables_in_inventory` for details on setting host and group variables.
+
+Set common defaults in a ``group_vars/all`` file. See :ref:`splitting_out_vars` for details on how to organize host and group variables in your inventory. Group variables are generally placed alongside your inventory file, but they can also be returned by dynamic inventory (see :ref:`intro_dynamic_inventory`) or defined in :ref:`ansible_tower` from the UI or API::
+
+ ---
+ # file: /etc/ansible/group_vars/all
+ # this is the site wide default
+ ntp_server: default-time.example.com
+
+Set location-specific variables in ``group_vars/my_location`` files. All groups are children of the ``all`` group, so variables set here override those set in ``group_vars/all``::
+
+ ---
+ # file: /etc/ansible/group_vars/boston
+ ntp_server: boston-time.example.com
+
+If one host used a different NTP server, you could set that in a host_vars file, which would override the group variable::
+
+ ---
+ # file: /etc/ansible/host_vars/xyz.boston.example.com
+ ntp_server: override.example.com
+
+Set defaults in roles to avoid undefined-variable errors. If you share your roles, other users can rely on the reasonable defaults you added in the ``roles/x/defaults/main.yml`` file, or they can easily override those values in inventory or at the command line. See :ref:`playbooks_reuse_roles` for more info. For example::
+
+ ---
+ # file: roles/x/defaults/main.yml
+ # if no other value is supplied in inventory or as a parameter, this value will be used
+ http_port: 80
+
+Set variables in roles to ensure a value is used in that role, and is not overridden by inventory variables. If you are not sharing your role with others, you can define app-specific behaviors like ports this way, in ``roles/x/vars/main.yml``. If you are sharing roles with others, putting variables here makes them harder to override, although they still can by passing a parameter to the role or setting a variable with ``-e``::
+
+ ---
+ # file: roles/x/vars/main.yml
+ # this will absolutely be used in this role
+ http_port: 80
+
+Pass variables as parameters when you call roles for maximum clarity, flexibility, and visibility. This approach overrides any defaults that exist for a role. For example::
+
+ roles:
+ - role: apache
+ vars:
+ http_port: 8080
+
+When you read this playbook it is clear that you have chosen to set a variable or override a default. You can also pass multiple values, which allows you to run the same role multiple times. See :ref:`run_role_twice` for more details. For example::
+
+ roles:
+ - role: app_user
+ vars:
+ myname: Ian
+ - role: app_user
+ vars:
+ myname: Terry
+ - role: app_user
+ vars:
+ myname: Graham
+ - role: app_user
+ vars:
+ myname: John
+
+Variables set in one role are available to later roles. You can set variables in a ``roles/common_settings/vars/main.yml`` file and use them in other roles and elsewhere in your playbook::
+
+ roles:
+ - role: common_settings
+ - role: something
+ vars:
+ foo: 12
+ - role: something_else
+
+.. note:: There are some protections in place to avoid the need to namespace variables.
+ In this example, variables defined in 'common_settings' are available to 'something' and 'something_else' tasks, but tasks in 'something' have foo set at 12, even if 'common_settings' sets foo to 20.
+
+Instead of worrying about variable precedence, we encourage you to think about how easily or how often you want to override a variable when deciding where to set it. If you are not sure what other variables are defined, and you need a particular value, use ``--extra-vars`` (``-e``) to override all other variables.
+
+Using advanced variable syntax
+==============================
+
+For information about advanced YAML syntax used to declare variables and have more control over the data placed in YAML files used by Ansible, see :ref:`playbooks_advanced_syntax`.
+
+.. seealso::
+
+ :ref:`about_playbooks`
+ An introduction to playbooks
+ :ref:`playbooks_conditionals`
+ Conditional statements in playbooks
+ :ref:`playbooks_filters`
+ Jinja2 filters and their uses
+ :ref:`playbooks_loops`
+ Looping in playbooks
+ :ref:`playbooks_reuse_roles`
+ Playbook organization by roles
+ :ref:`playbooks_best_practices`
+ Tips and tricks for playbooks
+ :ref:`special_variables`
+ List of special variables
+ `User Mailing List <https://groups.google.com/group/ansible-devel>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/playbooks_vars_facts.rst b/docs/docsite/rst/user_guide/playbooks_vars_facts.rst
new file mode 100644
index 00000000..3828b8e3
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_vars_facts.rst
@@ -0,0 +1,680 @@
+.. _vars_and_facts:
+
+************************************************
+Discovering variables: facts and magic variables
+************************************************
+
+With Ansible you can retrieve or discover certain variables containing information about your remote systems or about Ansible itself. Variables related to remote systems are called facts. With facts, you can use the behavior or state of one system as configuration on other systems. For example, you can use the IP address of one system as a configuration value on another system. Variables related to Ansible are called magic variables.
+
+.. contents::
+ :local:
+
+Ansible facts
+=============
+
+Ansible facts are data related to your remote systems, including operating systems, IP addresses, attached filesystems, and more. You can access this data in the ``ansible_facts`` variable. By default, you can also access some Ansible facts as top-level variables with the ``ansible_`` prefix. You can disable this behavior using the :ref:`INJECT_FACTS_AS_VARS` setting. To see all available facts, add this task to a play::
+
+ - name: Print all available facts
+ ansible.builtin.debug:
+ var: ansible_facts
+
+To see the 'raw' information as gathered, run this command at the command line::
+
+ ansible <hostname> -m ansible.builtin.setup
+
+Facts include a large amount of variable data, which may look like this:
+
+.. code-block:: json
+
+ {
+ "ansible_all_ipv4_addresses": [
+ "REDACTED IP ADDRESS"
+ ],
+ "ansible_all_ipv6_addresses": [
+ "REDACTED IPV6 ADDRESS"
+ ],
+ "ansible_apparmor": {
+ "status": "disabled"
+ },
+ "ansible_architecture": "x86_64",
+ "ansible_bios_date": "11/28/2013",
+ "ansible_bios_version": "4.1.5",
+ "ansible_cmdline": {
+ "BOOT_IMAGE": "/boot/vmlinuz-3.10.0-862.14.4.el7.x86_64",
+ "console": "ttyS0,115200",
+ "no_timer_check": true,
+ "nofb": true,
+ "nomodeset": true,
+ "ro": true,
+ "root": "LABEL=cloudimg-rootfs",
+ "vga": "normal"
+ },
+ "ansible_date_time": {
+ "date": "2018-10-25",
+ "day": "25",
+ "epoch": "1540469324",
+ "hour": "12",
+ "iso8601": "2018-10-25T12:08:44Z",
+ "iso8601_basic": "20181025T120844109754",
+ "iso8601_basic_short": "20181025T120844",
+ "iso8601_micro": "2018-10-25T12:08:44.109968Z",
+ "minute": "08",
+ "month": "10",
+ "second": "44",
+ "time": "12:08:44",
+ "tz": "UTC",
+ "tz_offset": "+0000",
+ "weekday": "Thursday",
+ "weekday_number": "4",
+ "weeknumber": "43",
+ "year": "2018"
+ },
+ "ansible_default_ipv4": {
+ "address": "REDACTED",
+ "alias": "eth0",
+ "broadcast": "REDACTED",
+ "gateway": "REDACTED",
+ "interface": "eth0",
+ "macaddress": "REDACTED",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "network": "REDACTED",
+ "type": "ether"
+ },
+ "ansible_default_ipv6": {},
+ "ansible_device_links": {
+ "ids": {},
+ "labels": {
+ "xvda1": [
+ "cloudimg-rootfs"
+ ],
+ "xvdd": [
+ "config-2"
+ ]
+ },
+ "masters": {},
+ "uuids": {
+ "xvda1": [
+ "cac81d61-d0f8-4b47-84aa-b48798239164"
+ ],
+ "xvdd": [
+ "2018-10-25-12-05-57-00"
+ ]
+ }
+ },
+ "ansible_devices": {
+ "xvda": {
+ "holders": [],
+ "host": "",
+ "links": {
+ "ids": [],
+ "labels": [],
+ "masters": [],
+ "uuids": []
+ },
+ "model": null,
+ "partitions": {
+ "xvda1": {
+ "holders": [],
+ "links": {
+ "ids": [],
+ "labels": [
+ "cloudimg-rootfs"
+ ],
+ "masters": [],
+ "uuids": [
+ "cac81d61-d0f8-4b47-84aa-b48798239164"
+ ]
+ },
+ "sectors": "83883999",
+ "sectorsize": 512,
+ "size": "40.00 GB",
+ "start": "2048",
+ "uuid": "cac81d61-d0f8-4b47-84aa-b48798239164"
+ }
+ },
+ "removable": "0",
+ "rotational": "0",
+ "sas_address": null,
+ "sas_device_handle": null,
+ "scheduler_mode": "deadline",
+ "sectors": "83886080",
+ "sectorsize": "512",
+ "size": "40.00 GB",
+ "support_discard": "0",
+ "vendor": null,
+ "virtual": 1
+ },
+ "xvdd": {
+ "holders": [],
+ "host": "",
+ "links": {
+ "ids": [],
+ "labels": [
+ "config-2"
+ ],
+ "masters": [],
+ "uuids": [
+ "2018-10-25-12-05-57-00"
+ ]
+ },
+ "model": null,
+ "partitions": {},
+ "removable": "0",
+ "rotational": "0",
+ "sas_address": null,
+ "sas_device_handle": null,
+ "scheduler_mode": "deadline",
+ "sectors": "131072",
+ "sectorsize": "512",
+ "size": "64.00 MB",
+ "support_discard": "0",
+ "vendor": null,
+ "virtual": 1
+ },
+ "xvde": {
+ "holders": [],
+ "host": "",
+ "links": {
+ "ids": [],
+ "labels": [],
+ "masters": [],
+ "uuids": []
+ },
+ "model": null,
+ "partitions": {
+ "xvde1": {
+ "holders": [],
+ "links": {
+ "ids": [],
+ "labels": [],
+ "masters": [],
+ "uuids": []
+ },
+ "sectors": "167770112",
+ "sectorsize": 512,
+ "size": "80.00 GB",
+ "start": "2048",
+ "uuid": null
+ }
+ },
+ "removable": "0",
+ "rotational": "0",
+ "sas_address": null,
+ "sas_device_handle": null,
+ "scheduler_mode": "deadline",
+ "sectors": "167772160",
+ "sectorsize": "512",
+ "size": "80.00 GB",
+ "support_discard": "0",
+ "vendor": null,
+ "virtual": 1
+ }
+ },
+ "ansible_distribution": "CentOS",
+ "ansible_distribution_file_parsed": true,
+ "ansible_distribution_file_path": "/etc/redhat-release",
+ "ansible_distribution_file_variety": "RedHat",
+ "ansible_distribution_major_version": "7",
+ "ansible_distribution_release": "Core",
+ "ansible_distribution_version": "7.5.1804",
+ "ansible_dns": {
+ "nameservers": [
+ "127.0.0.1"
+ ]
+ },
+ "ansible_domain": "",
+ "ansible_effective_group_id": 1000,
+ "ansible_effective_user_id": 1000,
+ "ansible_env": {
+ "HOME": "/home/zuul",
+ "LANG": "en_US.UTF-8",
+ "LESSOPEN": "||/usr/bin/lesspipe.sh %s",
+ "LOGNAME": "zuul",
+ "MAIL": "/var/mail/zuul",
+ "PATH": "/usr/local/bin:/usr/bin",
+ "PWD": "/home/zuul",
+ "SELINUX_LEVEL_REQUESTED": "",
+ "SELINUX_ROLE_REQUESTED": "",
+ "SELINUX_USE_CURRENT_RANGE": "",
+ "SHELL": "/bin/bash",
+ "SHLVL": "2",
+ "SSH_CLIENT": "REDACTED 55672 22",
+ "SSH_CONNECTION": "REDACTED 55672 REDACTED 22",
+ "USER": "zuul",
+ "XDG_RUNTIME_DIR": "/run/user/1000",
+ "XDG_SESSION_ID": "1",
+ "_": "/usr/bin/python2"
+ },
+ "ansible_eth0": {
+ "active": true,
+ "device": "eth0",
+ "ipv4": {
+ "address": "REDACTED",
+ "broadcast": "REDACTED",
+ "netmask": "255.255.255.0",
+ "network": "REDACTED"
+ },
+ "ipv6": [
+ {
+ "address": "REDACTED",
+ "prefix": "64",
+ "scope": "link"
+ }
+ ],
+ "macaddress": "REDACTED",
+ "module": "xen_netfront",
+ "mtu": 1500,
+ "pciid": "vif-0",
+ "promisc": false,
+ "type": "ether"
+ },
+ "ansible_eth1": {
+ "active": true,
+ "device": "eth1",
+ "ipv4": {
+ "address": "REDACTED",
+ "broadcast": "REDACTED",
+ "netmask": "255.255.224.0",
+ "network": "REDACTED"
+ },
+ "ipv6": [
+ {
+ "address": "REDACTED",
+ "prefix": "64",
+ "scope": "link"
+ }
+ ],
+ "macaddress": "REDACTED",
+ "module": "xen_netfront",
+ "mtu": 1500,
+ "pciid": "vif-1",
+ "promisc": false,
+ "type": "ether"
+ },
+ "ansible_fips": false,
+ "ansible_form_factor": "Other",
+ "ansible_fqdn": "centos-7-rax-dfw-0003427354",
+ "ansible_hostname": "centos-7-rax-dfw-0003427354",
+ "ansible_interfaces": [
+ "lo",
+ "eth1",
+ "eth0"
+ ],
+ "ansible_is_chroot": false,
+ "ansible_kernel": "3.10.0-862.14.4.el7.x86_64",
+ "ansible_lo": {
+ "active": true,
+ "device": "lo",
+ "ipv4": {
+ "address": "127.0.0.1",
+ "broadcast": "host",
+ "netmask": "255.0.0.0",
+ "network": "127.0.0.0"
+ },
+ "ipv6": [
+ {
+ "address": "::1",
+ "prefix": "128",
+ "scope": "host"
+ }
+ ],
+ "mtu": 65536,
+ "promisc": false,
+ "type": "loopback"
+ },
+ "ansible_local": {},
+ "ansible_lsb": {
+ "codename": "Core",
+ "description": "CentOS Linux release 7.5.1804 (Core)",
+ "id": "CentOS",
+ "major_release": "7",
+ "release": "7.5.1804"
+ },
+ "ansible_machine": "x86_64",
+ "ansible_machine_id": "2db133253c984c82aef2fafcce6f2bed",
+ "ansible_memfree_mb": 7709,
+ "ansible_memory_mb": {
+ "nocache": {
+ "free": 7804,
+ "used": 173
+ },
+ "real": {
+ "free": 7709,
+ "total": 7977,
+ "used": 268
+ },
+ "swap": {
+ "cached": 0,
+ "free": 0,
+ "total": 0,
+ "used": 0
+ }
+ },
+ "ansible_memtotal_mb": 7977,
+ "ansible_mounts": [
+ {
+ "block_available": 7220998,
+ "block_size": 4096,
+ "block_total": 9817227,
+ "block_used": 2596229,
+ "device": "/dev/xvda1",
+ "fstype": "ext4",
+ "inode_available": 10052341,
+ "inode_total": 10419200,
+ "inode_used": 366859,
+ "mount": "/",
+ "options": "rw,seclabel,relatime,data=ordered",
+ "size_available": 29577207808,
+ "size_total": 40211361792,
+ "uuid": "cac81d61-d0f8-4b47-84aa-b48798239164"
+ },
+ {
+ "block_available": 0,
+ "block_size": 2048,
+ "block_total": 252,
+ "block_used": 252,
+ "device": "/dev/xvdd",
+ "fstype": "iso9660",
+ "inode_available": 0,
+ "inode_total": 0,
+ "inode_used": 0,
+ "mount": "/mnt/config",
+ "options": "ro,relatime,mode=0700",
+ "size_available": 0,
+ "size_total": 516096,
+ "uuid": "2018-10-25-12-05-57-00"
+ }
+ ],
+ "ansible_nodename": "centos-7-rax-dfw-0003427354",
+ "ansible_os_family": "RedHat",
+ "ansible_pkg_mgr": "yum",
+ "ansible_processor": [
+ "0",
+ "GenuineIntel",
+ "Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz",
+ "1",
+ "GenuineIntel",
+ "Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz",
+ "2",
+ "GenuineIntel",
+ "Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz",
+ "3",
+ "GenuineIntel",
+ "Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz",
+ "4",
+ "GenuineIntel",
+ "Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz",
+ "5",
+ "GenuineIntel",
+ "Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz",
+ "6",
+ "GenuineIntel",
+ "Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz",
+ "7",
+ "GenuineIntel",
+ "Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz"
+ ],
+ "ansible_processor_cores": 8,
+ "ansible_processor_count": 8,
+ "ansible_processor_nproc": 8,
+ "ansible_processor_threads_per_core": 1,
+ "ansible_processor_vcpus": 8,
+ "ansible_product_name": "HVM domU",
+ "ansible_product_serial": "REDACTED",
+ "ansible_product_uuid": "REDACTED",
+ "ansible_product_version": "4.1.5",
+ "ansible_python": {
+ "executable": "/usr/bin/python2",
+ "has_sslcontext": true,
+ "type": "CPython",
+ "version": {
+ "major": 2,
+ "micro": 5,
+ "minor": 7,
+ "releaselevel": "final",
+ "serial": 0
+ },
+ "version_info": [
+ 2,
+ 7,
+ 5,
+ "final",
+ 0
+ ]
+ },
+ "ansible_python_version": "2.7.5",
+ "ansible_real_group_id": 1000,
+ "ansible_real_user_id": 1000,
+ "ansible_selinux": {
+ "config_mode": "enforcing",
+ "mode": "enforcing",
+ "policyvers": 31,
+ "status": "enabled",
+ "type": "targeted"
+ },
+ "ansible_selinux_python_present": true,
+ "ansible_service_mgr": "systemd",
+ "ansible_ssh_host_key_ecdsa_public": "REDACTED KEY VALUE",
+ "ansible_ssh_host_key_ed25519_public": "REDACTED KEY VALUE",
+ "ansible_ssh_host_key_rsa_public": "REDACTED KEY VALUE",
+ "ansible_swapfree_mb": 0,
+ "ansible_swaptotal_mb": 0,
+ "ansible_system": "Linux",
+ "ansible_system_capabilities": [
+ ""
+ ],
+ "ansible_system_capabilities_enforced": "True",
+ "ansible_system_vendor": "Xen",
+ "ansible_uptime_seconds": 151,
+ "ansible_user_dir": "/home/zuul",
+ "ansible_user_gecos": "",
+ "ansible_user_gid": 1000,
+ "ansible_user_id": "zuul",
+ "ansible_user_shell": "/bin/bash",
+ "ansible_user_uid": 1000,
+ "ansible_userspace_architecture": "x86_64",
+ "ansible_userspace_bits": "64",
+ "ansible_virtualization_role": "guest",
+ "ansible_virtualization_type": "xen",
+ "gather_subset": [
+ "all"
+ ],
+ "module_setup": true
+ }
+
+You can reference the model of the first disk in the facts shown above in a template or playbook as::
+
+ {{ ansible_facts['devices']['xvda']['model'] }}
+
+To reference the system hostname::
+
+ {{ ansible_facts['nodename'] }}
+
+You can use facts in conditionals (see :ref:`playbooks_conditionals`) and also in templates. You can also use facts to create dynamic groups of hosts that match particular criteria, see the :ref:`group_by module <group_by_module>` documentation for details.
+
+.. _fact_requirements:
+
+Package requirements for fact gathering
+---------------------------------------
+
+On some distros, you may see missing fact values or facts set to default values because the packages that support gathering those facts are not installed by default. You can install the necessary packages on your remote hosts using the OS package manager. Known dependencies include:
+
+* Linux Network fact gathering - Depends on the ``ip`` binary, commonly included in the ``iproute2`` package.
+
+.. _fact_caching:
+
+Caching facts
+-------------
+
+Like registered variables, facts are stored in memory by default. However, unlike registered variables, facts can be gathered independently and cached for repeated use. With cached facts, you can refer to facts from one system when configuring a second system, even if Ansible executes the current play on the second system first. For example::
+
+ {{ hostvars['asdf.example.com']['ansible_facts']['os_family'] }}
+
+Caching is controlled by the cache plugins. By default, Ansible uses the memory cache plugin, which stores facts in memory for the duration of the current playbook run. To retain Ansible facts for repeated use, select a different cache plugin. See :ref:`cache_plugins` for details.
+
+Fact caching can improve performance. If you manage thousands of hosts, you can configure fact caching to run nightly, then manage configuration on a smaller set of servers periodically throughout the day. With cached facts, you have access to variables and information about all hosts even when you are only managing a small number of servers.
+
+.. _disabling_facts:
+
+Disabling facts
+---------------
+
+By default, Ansible gathers facts at the beginning of each play. If you do not need to gather facts (for example, if you know everything about your systems centrally), you can turn off fact gathering at the play level to improve scalability. Disabling facts may particularly improve performance in push mode with very large numbers of systems, or if you are using Ansible on experimental platforms. To disable fact gathering::
+
+ - hosts: whatever
+ gather_facts: no
+
+Adding custom facts
+-------------------
+
+The setup module in Ansible automatically discovers a standard set of facts about each host. If you want to add custom values to your facts, you can write a custom facts module, set temporary facts with a ``ansible.builtin.set_fact`` task, or provide permanent custom facts using the facts.d directory.
+
+.. _local_facts:
+
+facts.d or local facts
+^^^^^^^^^^^^^^^^^^^^^^
+
+.. versionadded:: 1.3
+
+You can add static custom facts by adding static files to facts.d, or add dynamic facts by adding executable scripts to facts.d. For example, you can add a list of all users on a host to your facts by creating and running a script in facts.d.
+
+To use facts.d, create an ``/etc/ansible/facts.d`` directory on the remote host or hosts. If you prefer a different directory, create it and specify it using the ``fact_path`` play keyword. Add files to the directory to supply your custom facts. All file names must end with ``.fact``. The files can be JSON, INI, or executable files returning JSON.
+
+To add static facts, simply add a file with the ``.facts`` extension. For example, create ``/etc/ansible/facts.d/preferences.fact`` with this content::
+
+ [general]
+ asdf=1
+ bar=2
+
+The next time fact gathering runs, your facts will include a hash variable fact named ``general`` with ``asdf`` and ``bar`` as members. To validate this, run the following::
+
+ ansible <hostname> -m ansible.builtin.setup -a "filter=ansible_local"
+
+And you will see your custom fact added::
+
+ "ansible_local": {
+ "preferences": {
+ "general": {
+ "asdf" : "1",
+ "bar" : "2"
+ }
+ }
+ }
+
+The ansible_local namespace separates custom facts created by facts.d from system facts or variables defined elsewhere in the playbook, so variables will not override each other. You can access this custom fact in a template or playbook as::
+
+ {{ ansible_local['preferences']['general']['asdf'] }}
+
+.. note:: The key part in the key=value pairs will be converted into lowercase inside the ansible_local variable. Using the example above, if the ini file contained ``XYZ=3`` in the ``[general]`` section, then you should expect to access it as: ``{{ ansible_local['preferences']['general']['xyz'] }}`` and not ``{{ ansible_local['preferences']['general']['XYZ'] }}``. This is because Ansible uses Python's `ConfigParser`_ which passes all option names through the `optionxform`_ method and this method's default implementation converts option names to lower case.
+
+.. _ConfigParser: https://docs.python.org/2/library/configparser.html
+.. _optionxform: https://docs.python.org/2/library/configparser.html#ConfigParser.RawConfigParser.optionxform
+
+You can also use facts.d to execute a script on the remote host, generating dynamic custom facts to the ansible_local namespace. For example, you can generate a list of all users that exist on a remote host as a fact about that host. To generate dynamic custom facts using facts.d:
+
+ #. Write and test a script to generate the JSON data you want.
+ #. Save the script in your facts.d directory.
+ #. Make sure your script has the ``.fact`` file extension.
+ #. Make sure your script is executable by the Ansible connection user.
+ #. Gather facts to execute the script and add the JSON output to ansible_local.
+
+By default, fact gathering runs once at the beginning of each play. If you create a custom fact using facts.d in a playbook, it will be available in the next play that gathers facts. If you want to use it in the same play where you created it, you must explicitly re-run the setup module. For example::
+
+ - hosts: webservers
+ tasks:
+
+ - name: Create directory for ansible custom facts
+ ansible.builtin.file:
+ state: directory
+ recurse: yes
+ path: /etc/ansible/facts.d
+
+ - name: Install custom ipmi fact
+ ansible.builtin.copy:
+ src: ipmi.fact
+ dest: /etc/ansible/facts.d
+
+ - name: Re-read facts after adding custom fact
+ ansible.builtin.setup:
+ filter: ansible_local
+
+If you use this pattern frequently, a custom facts module would be more efficient than facts.d.
+
+.. _magic_variables_and_hostvars:
+
+Information about Ansible: magic variables
+==========================================
+
+You can access information about Ansible operations, including the python version being used, the hosts and groups in inventory, and the directories for playbooks and roles, using "magic" variables. Like connection variables, magic variables are :ref:`special_variables`. Magic variable names are reserved - do not set variables with these names. The variable ``environment`` is also reserved.
+
+The most commonly used magic variables are ``hostvars``, ``groups``, ``group_names``, and ``inventory_hostname``. With ``hostvars``, you can access variables defined for any host in the play, at any point in a playbook. You can access Ansible facts using the ``hostvars`` variable too, but only after you have gathered (or cached) facts.
+
+If you want to configure your database server using the value of a 'fact' from another node, or the value of an inventory variable assigned to another node, you can use ``hostvars`` in a template or on an action line::
+
+ {{ hostvars['test.example.com']['ansible_facts']['distribution'] }}
+
+With ``groups``, a list of all the groups (and hosts) in the inventory, you can enumerate all hosts within a group. For example:
+
+.. code-block:: jinja
+
+ {% for host in groups['app_servers'] %}
+ # something that applies to all app servers.
+ {% endfor %}
+
+You can use ``groups`` and ``hostvars`` together to find all the IP addresses in a group.
+
+.. code-block:: jinja
+
+ {% for host in groups['app_servers'] %}
+ {{ hostvars[host]['ansible_facts']['eth0']['ipv4']['address'] }}
+ {% endfor %}
+
+You can use this approach to point a frontend proxy server to all the hosts in your app servers group, to set up the correct firewall rules between servers, and so on. You must either cache facts or gather facts for those hosts before the task that fills out the template.
+
+With ``group_names``, a list (array) of all the groups the current host is in, you can create templated files that vary based on the group membership (or role) of the host:
+
+.. code-block:: jinja
+
+ {% if 'webserver' in group_names %}
+ # some part of a configuration file that only applies to webservers
+ {% endif %}
+
+You can use the magic variable ``inventory_hostname``, the name of the host as configured in your inventory, as an alternative to ``ansible_hostname`` when fact-gathering is disabled. If you have a long FQDN, you can use ``inventory_hostname_short``, which contains the part up to the first period, without the rest of the domain.
+
+Other useful magic variables refer to the current play or playbook. These vars may be useful for filling out templates with multiple hostnames or for injecting the list into the rules for a load balancer.
+
+``ansible_play_hosts`` is the list of all hosts still active in the current play.
+
+``ansible_play_batch`` is a list of hostnames that are in scope for the current 'batch' of the play.
+
+The batch size is defined by ``serial``, when not set it is equivalent to the whole play (making it the same as ``ansible_play_hosts``).
+
+``ansible_playbook_python`` is the path to the python executable used to invoke the Ansible command line tool.
+
+``inventory_dir`` is the pathname of the directory holding Ansible's inventory host file.
+
+``inventory_file`` is the pathname and the filename pointing to the Ansible's inventory host file.
+
+``playbook_dir`` contains the playbook base directory.
+
+``role_path`` contains the current role's pathname and only works inside a role.
+
+``ansible_check_mode`` is a boolean, set to ``True`` if you run Ansible with ``--check``.
+
+.. _ansible_version:
+
+Ansible version
+---------------
+
+.. versionadded:: 1.8
+
+To adapt playbook behavior to different versions of Ansible, you can use the variable ``ansible_version``, which has the following structure::
+
+ "ansible_version": {
+ "full": "2.10.1",
+ "major": 2,
+ "minor": 10,
+ "revision": 1,
+ "string": "2.10.1"
+ }
diff --git a/docs/docsite/rst/user_guide/playbooks_vault.rst b/docs/docsite/rst/user_guide/playbooks_vault.rst
new file mode 100644
index 00000000..03bd2c04
--- /dev/null
+++ b/docs/docsite/rst/user_guide/playbooks_vault.rst
@@ -0,0 +1,6 @@
+:orphan:
+
+Using vault in playbooks
+========================
+
+The documentation regarding Ansible Vault has moved. The new location is here: :ref:`vault`. Please update any links you may have made directly to this page.
diff --git a/docs/docsite/rst/user_guide/plugin_filtering_config.rst b/docs/docsite/rst/user_guide/plugin_filtering_config.rst
new file mode 100644
index 00000000..2e9900c9
--- /dev/null
+++ b/docs/docsite/rst/user_guide/plugin_filtering_config.rst
@@ -0,0 +1,26 @@
+.. _plugin_filtering_config:
+
+Blacklisting modules
+====================
+
+If you want to avoid using certain modules, you can blacklist them to prevent Ansible from loading them. To blacklist plugins, create a yaml configuration file. The default location for this file is :file:`/etc/ansible/plugin_filters.yml`, or you can select a different path for the blacklist file using the :ref:`PLUGIN_FILTERS_CFG` setting in the ``defaults`` section of your ansible.cfg. Here is an example blacklist file:
+
+.. code-block:: YAML
+
+ ---
+ filter_version: '1.0'
+ module_blacklist:
+ # Deprecated
+ - docker
+ # We only allow pip, not easy_install
+ - easy_install
+
+The file contains two fields:
+
+ * A file version so that you can update the format while keeping backwards compatibility in the future. The present version should be the string, ``"1.0"``
+
+ * A list of modules to blacklist. Any module in this list will not be loaded by Ansible when it searches for a module to invoke for a task.
+
+.. note::
+
+ You cannot blacklist the ``stat`` module, as it is required for Ansible to run.
diff --git a/docs/docsite/rst/user_guide/quickstart.rst b/docs/docsite/rst/user_guide/quickstart.rst
new file mode 100644
index 00000000..7e97d9ab
--- /dev/null
+++ b/docs/docsite/rst/user_guide/quickstart.rst
@@ -0,0 +1,20 @@
+.. _quickstart_guide:
+
+Ansible Quickstart Guide
+========================
+
+We've recorded a short video that introduces Ansible.
+
+The `quickstart video <https://www.ansible.com/resources/videos/quick-start-video>`_ is about 13 minutes long and gives you a high level
+introduction to Ansible -- what it does and how to use it. We'll also tell you about other products in the Ansible ecosystem.
+
+Enjoy, and be sure to visit the rest of the documentation to learn more.
+
+.. seealso::
+
+ `A system administrators guide to getting started with Ansible <https://www.redhat.com/en/blog/system-administrators-guide-getting-started-ansible-fast>`_
+ A step by step introduction to Ansible
+ `Ansible Automation for SysAdmins <https://opensource.com/downloads/ansible-quickstart>`_
+ A downloadable guide for getting started with Ansible
+ :ref:`network_getting_started`
+ A guide for network engineers using Ansible for the first time
diff --git a/docs/docsite/rst/user_guide/sample_setup.rst b/docs/docsite/rst/user_guide/sample_setup.rst
new file mode 100644
index 00000000..9be60004
--- /dev/null
+++ b/docs/docsite/rst/user_guide/sample_setup.rst
@@ -0,0 +1,285 @@
+.. _sample_setup:
+
+********************
+Sample Ansible setup
+********************
+
+You have learned about playbooks, inventory, roles, and variables. This section pulls all those elements together, outlining a sample setup for automating a web service. You can find more example playbooks illustrating these patterns in our `ansible-examples repository <https://github.com/ansible/ansible-examples>`_. (NOTE: These may not use all of the features in the latest release, but are still an excellent reference!).
+
+The sample setup organizes playbooks, roles, inventory, and variables files by function, with tags at the play and task level for greater granularity and control. This is a powerful and flexible approach, but there are other ways to organize Ansible content. Your usage of Ansible should fit your needs, not ours, so feel free to modify this approach and organize your content as you see fit.
+
+.. contents::
+ :local:
+
+Sample directory layout
+-----------------------
+
+This layout organizes most tasks in roles, with a single inventory file for each environment and a few playbooks in the top-level directory::
+
+ production # inventory file for production servers
+ staging # inventory file for staging environment
+
+ group_vars/
+ group1.yml # here we assign variables to particular groups
+ group2.yml
+ host_vars/
+ hostname1.yml # here we assign variables to particular systems
+ hostname2.yml
+
+ library/ # if any custom modules, put them here (optional)
+ module_utils/ # if any custom module_utils to support modules, put them here (optional)
+ filter_plugins/ # if any custom filter plugins, put them here (optional)
+
+ site.yml # master playbook
+ webservers.yml # playbook for webserver tier
+ dbservers.yml # playbook for dbserver tier
+ tasks/ # task files included from playbooks
+ webservers-extra.yml # <-- avoids confusing playbook with task files
+
+ roles/
+ common/ # this hierarchy represents a "role"
+ tasks/ #
+ main.yml # <-- tasks file can include smaller files if warranted
+ handlers/ #
+ main.yml # <-- handlers file
+ templates/ # <-- files for use with the template resource
+ ntp.conf.j2 # <------- templates end in .j2
+ files/ #
+ bar.txt # <-- files for use with the copy resource
+ foo.sh # <-- script files for use with the script resource
+ vars/ #
+ main.yml # <-- variables associated with this role
+ defaults/ #
+ main.yml # <-- default lower priority variables for this role
+ meta/ #
+ main.yml # <-- role dependencies
+ library/ # roles can also include custom modules
+ module_utils/ # roles can also include custom module_utils
+ lookup_plugins/ # or other types of plugins, like lookup in this case
+
+ webtier/ # same kind of structure as "common" was above, done for the webtier role
+ monitoring/ # ""
+ fooapp/ # ""
+
+.. note: By default, Ansible assumes your playbooks are stored in one directory with roles stored in a sub-directory called ``roles/``. As you use Ansible to automate more tasks, you may want to move your playbooks into a sub-directory called ``playbooks/``. If you do this, you must configure the path to your ``roles/`` directory using the ``roles_path`` setting in ansible.cfg.
+
+Alternative directory layout
+----------------------------
+
+Alternatively you can put each inventory file with its ``group_vars``/``host_vars`` in a separate directory. This is particularly useful if your ``group_vars``/``host_vars`` don't have that much in common in different environments. The layout could look something like this::
+
+ inventories/
+ production/
+ hosts # inventory file for production servers
+ group_vars/
+ group1.yml # here we assign variables to particular groups
+ group2.yml
+ host_vars/
+ hostname1.yml # here we assign variables to particular systems
+ hostname2.yml
+
+ staging/
+ hosts # inventory file for staging environment
+ group_vars/
+ group1.yml # here we assign variables to particular groups
+ group2.yml
+ host_vars/
+ stagehost1.yml # here we assign variables to particular systems
+ stagehost2.yml
+
+ library/
+ module_utils/
+ filter_plugins/
+
+ site.yml
+ webservers.yml
+ dbservers.yml
+
+ roles/
+ common/
+ webtier/
+ monitoring/
+ fooapp/
+
+This layout gives you more flexibility for larger environments, as well as a total separation of inventory variables between different environments. However, this approach is harder to maintain, because there are more files. For more information on organizing group and host variables, see :ref:`splitting_out_vars`.
+
+.. _groups_and_hosts:
+
+Sample group and host variables
+-------------------------------
+
+These sample group and host variables files record the variable values that apply to each machine or group of machines. For instance, the data center in Atlanta has its own NTP servers, so when setting up ntp.conf, we should use them::
+
+ ---
+ # file: group_vars/atlanta
+ ntp: ntp-atlanta.example.com
+ backup: backup-atlanta.example.com
+
+Similarly, the webservers have some configuration that does not apply to the database servers::
+
+ ---
+ # file: group_vars/webservers
+ apacheMaxRequestsPerChild: 3000
+ apacheMaxClients: 900
+
+Default values, or values that are universally true, belong in a file called group_vars/all::
+
+ ---
+ # file: group_vars/all
+ ntp: ntp-boston.example.com
+ backup: backup-boston.example.com
+
+If necessary, you can define specific hardware variance in systems in a host_vars file::
+
+ ---
+ # file: host_vars/db-bos-1.example.com
+ foo_agent_port: 86
+ bar_agent_port: 99
+
+Again, if you are using :ref:`dynamic inventory <dynamic_inventory>`, Ansible creates many dynamic groups automatically. So a tag like "class:webserver" would load in variables from the file "group_vars/ec2_tag_class_webserver" automatically.
+
+.. _split_by_role:
+
+Sample playbooks organized by function
+--------------------------------------
+
+With this setup, a single playbook can define all the infrastructure. The site.yml playbook imports two other playbooks, one for the webservers and one for the database servers::
+
+ ---
+ # file: site.yml
+ - import_playbook: webservers.yml
+ - import_playbook: dbservers.yml
+
+The webservers.yml file, also at the top level, maps the configuration of the webservers group to the roles related to the webservers group::
+
+ ---
+ # file: webservers.yml
+ - hosts: webservers
+ roles:
+ - common
+ - webtier
+
+With this setup, you can configure your whole infrastructure by "running" site.yml, or run a subset by running webservers.yml. This is analogous to the Ansible "--limit" parameter but a little more explicit::
+
+ ansible-playbook site.yml --limit webservers
+ ansible-playbook webservers.yml
+
+.. _role_organization:
+
+Sample task and handler files in a function-based role
+------------------------------------------------------
+
+Ansible loads any file called ``main.yml`` in a role sub-directory. This sample ``tasks/main.yml`` file is simple - it sets up NTP, but it could do more if we wanted::
+
+ ---
+ # file: roles/common/tasks/main.yml
+
+ - name: be sure ntp is installed
+ yum:
+ name: ntp
+ state: present
+ tags: ntp
+
+ - name: be sure ntp is configured
+ template:
+ src: ntp.conf.j2
+ dest: /etc/ntp.conf
+ notify:
+ - restart ntpd
+ tags: ntp
+
+ - name: be sure ntpd is running and enabled
+ service:
+ name: ntpd
+ state: started
+ enabled: yes
+ tags: ntp
+
+Here is an example handlers file. As a review, handlers are only fired when certain tasks report changes, and are run at the end
+of each play::
+
+ ---
+ # file: roles/common/handlers/main.yml
+ - name: restart ntpd
+ service:
+ name: ntpd
+ state: restarted
+
+See :ref:`playbooks_reuse_roles` for more information.
+
+
+.. _organization_examples:
+
+What the sample setup enables
+-----------------------------
+
+The basic organizational structure described above enables a lot of different automation options. To reconfigure your entire infrastructure::
+
+ ansible-playbook -i production site.yml
+
+To reconfigure NTP on everything::
+
+ ansible-playbook -i production site.yml --tags ntp
+
+To reconfigure only the webservers::
+
+ ansible-playbook -i production webservers.yml
+
+To reconfigure only the webservers in Boston::
+
+ ansible-playbook -i production webservers.yml --limit boston
+
+To reconfigure only the first 10 webservers in Boston, and then the next 10::
+
+ ansible-playbook -i production webservers.yml --limit boston[0:9]
+ ansible-playbook -i production webservers.yml --limit boston[10:19]
+
+The sample setup also supports basic ad-hoc commands::
+
+ ansible boston -i production -m ping
+ ansible boston -i production -m command -a '/sbin/reboot'
+
+To discover what tasks would run or what hostnames would be affected by a particular Ansible command::
+
+ # confirm what task names would be run if I ran this command and said "just ntp tasks"
+ ansible-playbook -i production webservers.yml --tags ntp --list-tasks
+
+ # confirm what hostnames might be communicated with if I said "limit to boston"
+ ansible-playbook -i production webservers.yml --limit boston --list-hosts
+
+.. _dep_vs_config:
+
+Organizing for deployment or configuration
+------------------------------------------
+
+The sample setup models a typical configuration topology. When doing multi-tier deployments, there are going
+to be some additional playbooks that hop between tiers to roll out an application. In this case, 'site.yml'
+may be augmented by playbooks like 'deploy_exampledotcom.yml' but the general concepts still apply. Ansible allows you to deploy and configure using the same tool, so you would likely reuse groups and keep the OS configuration in separate playbooks or roles from the app deployment.
+
+Consider "playbooks" as a sports metaphor -- you can have one set of plays to use against all your infrastructure and situational plays that you use at different times and for different purposes.
+
+.. _ship_modules_with_playbooks:
+
+Using local Ansible modules
+---------------------------
+
+If a playbook has a :file:`./library` directory relative to its YAML file, this directory can be used to add Ansible modules that will
+automatically be in the Ansible module path. This is a great way to keep modules that go with a playbook together. This is shown
+in the directory structure example at the start of this section.
+
+.. seealso::
+
+ :ref:`yaml_syntax`
+ Learn about YAML syntax
+ :ref:`working_with_playbooks`
+ Review the basic playbook features
+ :ref:`list_of_collections`
+ Browse existing collections, modules, and plugins
+ :ref:`developing_modules`
+ Learn how to extend Ansible by writing your own modules
+ :ref:`intro_patterns`
+ Learn about how to select hosts
+ `GitHub examples directory <https://github.com/ansible/ansible-examples>`_
+ Complete playbook files from the github project source
+ `Mailing List <https://groups.google.com/group/ansible-project>`_
+ Questions? Help? Ideas? Stop by the list on Google Groups
diff --git a/docs/docsite/rst/user_guide/shared_snippets/SSH_password_prompt.txt b/docs/docsite/rst/user_guide/shared_snippets/SSH_password_prompt.txt
new file mode 100644
index 00000000..dc61ab38
--- /dev/null
+++ b/docs/docsite/rst/user_guide/shared_snippets/SSH_password_prompt.txt
@@ -0,0 +1,2 @@
+.. note::
+ Ansible does not expose a channel to allow communication between the user and the ssh process to accept a password manually to decrypt an ssh key when using the ssh connection plugin (which is the default). The use of ``ssh-agent`` is highly recommended.
diff --git a/docs/docsite/rst/user_guide/shared_snippets/with2loop.txt b/docs/docsite/rst/user_guide/shared_snippets/with2loop.txt
new file mode 100644
index 00000000..5217f942
--- /dev/null
+++ b/docs/docsite/rst/user_guide/shared_snippets/with2loop.txt
@@ -0,0 +1,205 @@
+In most cases, loops work best with the ``loop`` keyword instead of ``with_X`` style loops. The ``loop`` syntax is usually best expressed using filters instead of more complex use of ``query`` or ``lookup``.
+
+These examples show how to convert many common ``with_`` style loops to ``loop`` and filters.
+
+with_list
+---------
+
+``with_list`` is directly replaced by ``loop``.
+
+.. code-block:: yaml+jinja
+
+ - name: with_list
+ ansible.builtin.debug:
+ msg: "{{ item }}"
+ with_list:
+ - one
+ - two
+
+ - name: with_list -> loop
+ ansible.builtin.debug:
+ msg: "{{ item }}"
+ loop:
+ - one
+ - two
+
+with_items
+----------
+
+``with_items`` is replaced by ``loop`` and the ``flatten`` filter.
+
+.. code-block:: yaml+jinja
+
+ - name: with_items
+ ansible.builtin.debug:
+ msg: "{{ item }}"
+ with_items: "{{ items }}"
+
+ - name: with_items -> loop
+ ansible.builtin.debug:
+ msg: "{{ item }}"
+ loop: "{{ items|flatten(levels=1) }}"
+
+with_indexed_items
+------------------
+
+``with_indexed_items`` is replaced by ``loop``, the ``flatten`` filter and ``loop_control.index_var``.
+
+.. code-block:: yaml+jinja
+
+ - name: with_indexed_items
+ ansible.builtin.debug:
+ msg: "{{ item.0 }} - {{ item.1 }}"
+ with_indexed_items: "{{ items }}"
+
+ - name: with_indexed_items -> loop
+ ansible.builtin.debug:
+ msg: "{{ index }} - {{ item }}"
+ loop: "{{ items|flatten(levels=1) }}"
+ loop_control:
+ index_var: index
+
+with_flattened
+--------------
+
+``with_flattened`` is replaced by ``loop`` and the ``flatten`` filter.
+
+.. code-block:: yaml+jinja
+
+ - name: with_flattened
+ ansible.builtin.debug:
+ msg: "{{ item }}"
+ with_flattened: "{{ items }}"
+
+ - name: with_flattened -> loop
+ ansible.builtin.debug:
+ msg: "{{ item }}"
+ loop: "{{ items|flatten }}"
+
+with_together
+-------------
+
+``with_together`` is replaced by ``loop`` and the ``zip`` filter.
+
+.. code-block:: yaml+jinja
+
+ - name: with_together
+ ansible.builtin.debug:
+ msg: "{{ item.0 }} - {{ item.1 }}"
+ with_together:
+ - "{{ list_one }}"
+ - "{{ list_two }}"
+
+ - name: with_together -> loop
+ ansible.builtin.debug:
+ msg: "{{ item.0 }} - {{ item.1 }}"
+ loop: "{{ list_one|zip(list_two)|list }}"
+
+Another example with complex data
+
+.. code-block:: yaml+jinja
+
+ - name: with_together -> loop
+ ansible.builtin.debug:
+ msg: "{{ item.0 }} - {{ item.1 }} - {{ item.2 }}"
+ loop: "{{ data[0]|zip(*data[1:])|list }}"
+ vars:
+ data:
+ - ['a', 'b', 'c']
+ - ['d', 'e', 'f']
+ - ['g', 'h', 'i']
+
+with_dict
+---------
+
+``with_dict`` can be substituted by ``loop`` and either the ``dictsort`` or ``dict2items`` filters.
+
+.. code-block:: yaml+jinja
+
+ - name: with_dict
+ ansible.builtin.debug:
+ msg: "{{ item.key }} - {{ item.value }}"
+ with_dict: "{{ dictionary }}"
+
+ - name: with_dict -> loop (option 1)
+ ansible.builtin.debug:
+ msg: "{{ item.key }} - {{ item.value }}"
+ loop: "{{ dictionary|dict2items }}"
+
+ - name: with_dict -> loop (option 2)
+ ansible.builtin.debug:
+ msg: "{{ item.0 }} - {{ item.1 }}"
+ loop: "{{ dictionary|dictsort }}"
+
+with_sequence
+-------------
+
+``with_sequence`` is replaced by ``loop`` and the ``range`` function, and potentially the ``format`` filter.
+
+.. code-block:: yaml+jinja
+
+ - name: with_sequence
+ ansible.builtin.debug:
+ msg: "{{ item }}"
+ with_sequence: start=0 end=4 stride=2 format=testuser%02x
+
+ - name: with_sequence -> loop
+ ansible.builtin.debug:
+ msg: "{{ 'testuser%02x' | format(item) }}"
+ # range is exclusive of the end point
+ loop: "{{ range(0, 4 + 1, 2)|list }}"
+
+with_subelements
+----------------
+
+``with_subelements`` is replaced by ``loop`` and the ``subelements`` filter.
+
+.. code-block:: yaml+jinja
+
+ - name: with_subelements
+ ansible.builtin.debug:
+ msg: "{{ item.0.name }} - {{ item.1 }}"
+ with_subelements:
+ - "{{ users }}"
+ - mysql.hosts
+
+ - name: with_subelements -> loop
+ ansible.builtin.debug:
+ msg: "{{ item.0.name }} - {{ item.1 }}"
+ loop: "{{ users|subelements('mysql.hosts') }}"
+
+with_nested/with_cartesian
+--------------------------
+
+``with_nested`` and ``with_cartesian`` are replaced by loop and the ``product`` filter.
+
+.. code-block:: yaml+jinja
+
+ - name: with_nested
+ ansible.builtin.debug:
+ msg: "{{ item.0 }} - {{ item.1 }}"
+ with_nested:
+ - "{{ list_one }}"
+ - "{{ list_two }}"
+
+ - name: with_nested -> loop
+ ansible.builtin.debug:
+ msg: "{{ item.0 }} - {{ item.1 }}"
+ loop: "{{ list_one|product(list_two)|list }}"
+
+with_random_choice
+------------------
+
+``with_random_choice`` is replaced by just use of the ``random`` filter, without need of ``loop``.
+
+.. code-block:: yaml+jinja
+
+ - name: with_random_choice
+ ansible.builtin.debug:
+ msg: "{{ item }}"
+ with_random_choice: "{{ my_list }}"
+
+ - name: with_random_choice -> loop (No loop is needed here)
+ ansible.builtin.debug:
+ msg: "{{ my_list|random }}"
+ tags: random
diff --git a/docs/docsite/rst/user_guide/vault.rst b/docs/docsite/rst/user_guide/vault.rst
new file mode 100644
index 00000000..d84aefec
--- /dev/null
+++ b/docs/docsite/rst/user_guide/vault.rst
@@ -0,0 +1,660 @@
+.. _vault:
+
+*************************************
+Encrypting content with Ansible Vault
+*************************************
+
+Ansible Vault encrypts variables and files so you can protect sensitive content such as passwords or keys rather than leaving it visible as plaintext in playbooks or roles. To use Ansible Vault you need one or more passwords to encrypt and decrypt content. If you store your vault passwords in a third-party tool such as a secret manager, you need a script to access them. Use the passwords with the :ref:`ansible-vault` command-line tool to create and view encrypted variables, create encrypted files, encrypt existing files, or edit, re-key, or decrypt files. You can then place encrypted content under source control and share it more safely.
+
+.. warning::
+ * Encryption with Ansible Vault ONLY protects 'data at rest'. Once the content is decrypted ('data in use'), play and plugin authors are responsible for avoiding any secret disclosure, see :ref:`no_log <keep_secret_data>` for details on hiding output and :ref:`vault_securing_editor` for security considerations on editors you use with Ansible Vault.
+
+You can use encrypted variables and files in ad-hoc commands and playbooks by supplying the passwords you used to encrypt them. You can modify your ``ansible.cfg`` file to specify the location of a password file or to always prompt for the password.
+
+.. contents::
+ :local:
+
+Managing vault passwords
+========================
+
+Managing your encrypted content is easier if you develop a strategy for managing your vault passwords. A vault password can be any string you choose. There is no special command to create a vault password. However, you need to keep track of your vault passwords. Each time you encrypt a variable or file with Ansible Vault, you must provide a password. When you use an encrypted variable or file in a command or playbook, you must provide the same password that was used to encrypt it. To develop a strategy for managing vault passwords, start with two questions:
+
+ * Do you want to encrypt all your content with the same password, or use different passwords for different needs?
+ * Where do you want to store your password or passwords?
+
+Choosing between a single password and multiple passwords
+---------------------------------------------------------
+
+If you have a small team or few sensitive values, you can use a single password for everything you encrypt with Ansible Vault. Store your vault password securely in a file or a secret manager as described below.
+
+If you have a larger team or many sensitive values, you can use multiple passwords. For example, you can use different passwords for different users or different levels of access. Depending on your needs, you might want a different password for each encrypted file, for each directory, or for each environment. For example, you might have a playbook that includes two vars files, one for the dev environment and one for the production environment, encrypted with two different passwords. When you run the playbook, select the correct vault password for the environment you are targeting, using a vault ID.
+
+.. _vault_ids:
+
+Managing multiple passwords with vault IDs
+------------------------------------------
+
+If you use multiple vault passwords, you can differentiate one password from another with vault IDs. You use the vault ID in three ways:
+
+ * Pass it with :option:`--vault-id <ansible-playbook --vault-id>` to the :ref:`ansible-vault` command when you create encrypted content
+ * Include it wherever you store the password for that vault ID (see :ref:`storing_vault_passwords`)
+ * Pass it with :option:`--vault-id <ansible-playbook --vault-id>` to the :ref:`ansible-playbook` command when you run a playbook that uses content you encrypted with that vault ID
+
+When you pass a vault ID as an option to the :ref:`ansible-vault` command, you add a label (a hint or nickname) to the encrypted content. This label documents which password you used to encrypt it. The encrypted variable or file includes the vault ID label in plain text in the header. The vault ID is the last element before the encrypted content. For example::
+
+ my_encrytped_var: !vault |
+ $ANSIBLE_VAULT;1.2;AES256;dev
+ 30613233633461343837653833666333643061636561303338373661313838333565653635353162
+ 3263363434623733343538653462613064333634333464660a663633623939393439316636633863
+ 61636237636537333938306331383339353265363239643939666639386530626330633337633833
+ 6664656334373166630a363736393262666465663432613932613036303963343263623137386239
+ 6330
+
+In addition to the label, you must provide a source for the related password. The source can be a prompt, a file, or a script, depending on how you are storing your vault passwords. The pattern looks like this:
+
+.. code-block:: bash
+
+ --vault-id label@source
+
+If your playbook uses multiple encrypted variables or files that you encrypted with different passwords, you must pass the vault IDs when you run that playbook. You can use :option:`--vault-id <ansible-playbook --vault-id>` by itself, with :option:`--vault-password-file <ansible-playbook --vault-password-file>`, or with :option:`--ask-vault-pass <ansible-playbook --ask-vault-pass>`. The pattern is the same as when you create encrypted content: include the label and the source for the matching password.
+
+See below for examples of encrypting content with vault IDs and using content encrypted with vault IDs. The :option:`--vault-id <ansible-playbook --vault-id>` option works with any Ansible command that interacts with vaults, including :ref:`ansible-vault`, :ref:`ansible-playbook`, and so on.
+
+Limitations of vault IDs
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+Ansible does not enforce using the same password every time you use a particular vault ID label. You can encrypt different variables or files with the same vault ID label but different passwords. This usually happens when you type the password at a prompt and make a mistake. It is possible to use different passwords with the same vault ID label on purpose. For example, you could use each label as a reference to a class of passwords, rather than a single password. In this scenario, you must always know which specific password or file to use in context. However, you are more likely to encrypt two files with the same vault ID label and different passwords by mistake. If you encrypt two files with the same label but different passwords by accident, you can :ref:`rekey <rekeying_files>` one file to fix the issue.
+
+Enforcing vault ID matching
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+By default the vault ID label is only a hint to remind you which password you used to encrypt a variable or file. Ansible does not check that the vault ID in the header of the encrypted content matches the vault ID you provide when you use the content. Ansible decrypts all files and variables called by your command or playbook that are encrypted with the password you provide. To check the encrypted content and decrypt it only when the vault ID it contains matches the one you provide with ``--vault-id``, set the config option :ref:`DEFAULT_VAULT_ID_MATCH`. When you set :ref:`DEFAULT_VAULT_ID_MATCH`, each password is only used to decrypt data that was encrypted with the same label. This is efficient, predictable, and can reduce errors when different values are encrypted with different passwords.
+
+.. note::
+ Even with the :ref:`DEFAULT_VAULT_ID_MATCH` setting enabled, Ansible does not enforce using the same password every time you use a particular vault ID label.
+
+.. _storing_vault_passwords:
+
+Storing and accessing vault passwords
+-------------------------------------
+
+You can memorize your vault password, or manually copy vault passwords from any source and paste them at a command-line prompt, but most users store them securely and access them as needed from within Ansible. You have two options for storing vault passwords that work from within Ansible: in files, or in a third-party tool such as the system keyring or a secret manager. If you store your passwords in a third-party tool, you need a vault password client script to retrieve them from within Ansible.
+
+Storing passwords in files
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+To store a vault password in a file, enter the password as a string on a single line in the file. Make sure the permissions on the file are appropriate. Do not add password files to source control. If you have multiple passwords, you can store them all in a single file, as long as they all have vault IDs. For each password, create a separate line and enter the vault ID, a space, then the password as a string. For example:
+
+.. code-block:: text
+
+ dev my_dev_pass
+ test my_test_pass
+ prod my_prod_pass
+
+
+.. _vault_password_client_scripts:
+
+Storing passwords in third-party tools with vault password client scripts
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+You can store your vault passwords on the system keyring, in a database, or in a secret manager and retrieve them from within Ansible using a vault password client script. Enter the password as a string on a single line. If your password has a vault ID, store it in a way that works with your password storage tool.
+
+To create a vault password client script:
+
+ * Create a file with a name ending in ``-client.py``
+ * Make the file executable
+ * Within the script itself:
+ * Print the passwords to standard output
+ * Accept a ``--vault-id`` option
+ * If the script prompts for data (for example, a database password), send the prompts to standard error
+
+When you run a playbook that uses vault passwords stored in a third-party tool, specify the script as the source within the ``--vault-id`` flag. For example:
+
+.. code-block:: bash
+
+ ansible-playbook --vault-id dev@contrib/vault/vault-keyring-client.py
+
+Ansible executes the client script with a ``--vault-id`` option so the script knows which vault ID label you specified. For example a script loading passwords from a secret manager can use the vault ID label to pick either the 'dev' or 'prod' password. The example command above results in the following execution of the client script:
+
+.. code-block:: bash
+
+ contrib/vault/vault-keyring-client.py --vault-id dev
+
+For an example of a client script that loads passwords from the system keyring, see :file:`contrib/vault/vault-keyring-client.py`.
+
+
+Encrypting content with Ansible Vault
+=====================================
+
+Once you have a strategy for managing and storing vault passwords, you can start encrypting content. You can encrypt two types of content with Ansible Vault: variables and files. Encrypted content always includes the ``!vault`` tag, which tells Ansible and YAML that the content needs to be decrypted, and a ``|`` character, which allows multi-line strings. Encrypted content created with ``--vault-id`` also contains the vault ID label. For more details about the encryption process and the format of content encrypted with Ansible Vault, see :ref:`vault_format`. This table shows the main differences between encrypted variables and encrypted files:
+
+.. table::
+ :class: documentation-table
+
+ ====================== ================================= ====================================
+ .. Encrypted variables Encrypted files
+ ====================== ================================= ====================================
+ How much is encrypted? Variables within a plaintext file The entire file
+
+ When is it decrypted? On demand, only when needed Whenever loaded or referenced [#f1]_
+
+ What can be encrypted? Only variables Any structured data file
+
+ ====================== ================================= ====================================
+
+.. [#f1] Ansible cannot know if it needs content from an encrypted file unless it decrypts the file, so it decrypts all encrypted files referenced in your playbooks and roles.
+
+.. _encrypting_variables:
+.. _single_encrypted_variable:
+
+Encrypting individual variables with Ansible Vault
+--------------------------------------------------
+
+You can encrypt single values inside a YAML file using the :ref:`ansible-vault encrypt_string <ansible_vault_encrypt_string>` command. For one way to keep your vaulted variables safely visible, see :ref:`tip_for_variables_and_vaults`.
+
+Advantages and disadvantages of encrypting variables
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+With variable-level encryption, your files are still easily legible. You can mix plaintext and encrypted variables, even inline in a play or role. However, password rotation is not as simple as with file-level encryption. You cannot :ref:`rekey <rekeying_files>` encrypted variables. Also, variable-level encryption only works on variables. If you want to encrypt tasks or other content, you must encrypt the entire file.
+
+.. _encrypt_string_for_use_in_yaml:
+
+Creating encrypted variables
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The :ref:`ansible-vault encrypt_string <ansible_vault_encrypt_string>` command encrypts and formats any string you type (or copy or generate) into a format that can be included in a playbook, role, or variables file. To create a basic encrypted variable, pass three options to the :ref:`ansible-vault encrypt_string <ansible_vault_encrypt_string>` command:
+
+ * a source for the vault password (prompt, file, or script, with or without a vault ID)
+ * the string to encrypt
+ * the string name (the name of the variable)
+
+The pattern looks like this:
+
+.. code-block:: bash
+
+ ansible-vault encrypt_string <password_source> '<string_to_encrypt>' --name '<string_name_of_variable>'
+
+For example, to encrypt the string 'foobar' using the only password stored in 'a_password_file' and name the variable 'the_secret':
+
+.. code-block:: bash
+
+ ansible-vault encrypt_string --vault-password-file a_password_file 'foobar' --name 'the_secret'
+
+The command above creates this content::
+
+ the_secret: !vault |
+ $ANSIBLE_VAULT;1.1;AES256
+ 62313365396662343061393464336163383764373764613633653634306231386433626436623361
+ 6134333665353966363534333632666535333761666131620a663537646436643839616531643561
+ 63396265333966386166373632626539326166353965363262633030333630313338646335303630
+ 3438626666666137650a353638643435666633633964366338633066623234616432373231333331
+ 6564
+
+To encrypt the string 'foooodev', add the vault ID label 'dev' with the 'dev' vault password stored in 'a_password_file', and call the encrypted variable 'the_dev_secret':
+
+.. code-block:: bash
+
+ ansible-vault encrypt_string --vault-id dev@a_password_file 'foooodev' --name 'the_dev_secret'
+
+The command above creates this content::
+
+ the_dev_secret: !vault |
+ $ANSIBLE_VAULT;1.2;AES256;dev
+ 30613233633461343837653833666333643061636561303338373661313838333565653635353162
+ 3263363434623733343538653462613064333634333464660a663633623939393439316636633863
+ 61636237636537333938306331383339353265363239643939666639386530626330633337633833
+ 6664656334373166630a363736393262666465663432613932613036303963343263623137386239
+ 6330
+
+To encrypt the string 'letmein' read from stdin, add the vault ID 'test' using the 'test' vault password stored in `a_password_file`, and name the variable 'test_db_password':
+
+.. code-block:: bash
+
+ echo -n 'letmein' | ansible-vault encrypt_string --vault-id test@a_password_file --stdin-name 'test_db_password'
+
+.. warning::
+
+ Typing secret content directly at the command line (without a prompt) leaves the secret string in your shell history. Do not do this outside of testing.
+
+The command above creates this output::
+
+ Reading plaintext input from stdin. (ctrl-d to end input, twice if your content does not already have a new line)
+ db_password: !vault |
+ $ANSIBLE_VAULT;1.2;AES256;dev
+ 61323931353866666336306139373937316366366138656131323863373866376666353364373761
+ 3539633234313836346435323766306164626134376564330a373530313635343535343133316133
+ 36643666306434616266376434363239346433643238336464643566386135356334303736353136
+ 6565633133366366360a326566323363363936613664616364623437336130623133343530333739
+ 3039
+
+To be prompted for a string to encrypt, encrypt it with the 'dev' vault password from 'a_password_file', name the variable 'new_user_password' and give it the vault ID label 'dev':
+
+.. code-block:: bash
+
+ ansible-vault encrypt_string --vault-id dev@a_password_file --stdin-name 'new_user_password'
+
+The command above triggers this prompt:
+
+.. code-block:: text
+
+ Reading plaintext input from stdin. (ctrl-d to end input, twice if your content does not already have a new line)
+
+Type the string to encrypt (for example, 'hunter2'), hit ctrl-d, and wait.
+
+.. warning::
+
+ Do not press ``Enter`` after supplying the string to encrypt. That will add a newline to the encrypted value.
+
+The sequence above creates this output::
+
+ new_user_password: !vault |
+ $ANSIBLE_VAULT;1.2;AES256;dev
+ 37636561366636643464376336303466613062633537323632306566653533383833366462366662
+ 6565353063303065303831323539656138653863353230620a653638643639333133306331336365
+ 62373737623337616130386137373461306535383538373162316263386165376131623631323434
+ 3866363862363335620a376466656164383032633338306162326639643635663936623939666238
+ 3161
+
+You can add the output from any of the examples above to any playbook, variables file, or role for future use. Encrypted variables are larger than plain-text variables, but they protect your sensitive content while leaving the rest of the playbook, variables file, or role in plain text so you can easily read it.
+
+Viewing encrypted variables
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+You can view the original value of an encrypted variable using the debug module. You must pass the password that was used to encrypt the variable. For example, if you stored the variable created by the last example above in a file called 'vars.yml', you could view the unencrypted value of that variable like this:
+
+.. code-block:: console
+
+ ansible localhost -m ansible.builtin.debug -a var="new_user_password" -e "@vars.yml" --vault-id dev@a_password_file
+
+ localhost | SUCCESS => {
+ "new_user_password": "hunter2"
+ }
+
+
+Encrypting files with Ansible Vault
+-----------------------------------
+
+Ansible Vault can encrypt any structured data file used by Ansible, including:
+
+ * group variables files from inventory
+ * host variables files from inventory
+ * variables files passed to ansible-playbook with ``-e @file.yml`` or ``-e @file.json``
+ * variables files loaded by ``include_vars`` or ``vars_files``
+ * variables files in roles
+ * defaults files in roles
+ * tasks files
+ * handlers files
+ * binary files or other arbitrary files
+
+The full file is encrypted in the vault.
+
+.. note::
+
+ Ansible Vault uses an editor to create or modify encrypted files. See :ref:`vault_securing_editor` for some guidance on securing the editor.
+
+
+Advantages and disadvantages of encrypting files
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+File-level encryption is easy to use. Password rotation for encrypted files is straightforward with the :ref:`rekey <rekeying_files>` command. Encrypting files can hide not only sensitive values, but the names of the variables you use. However, with file-level encryption the contents of files are no longer easy to access and read. This may be a problem with encrypted tasks files. When encrypting a variables file, see :ref:`tip_for_variables_and_vaults` for one way to keep references to these variables in a non-encrypted file. Ansible always decrypts the entire encrypted file when it is when loaded or referenced, because Ansible cannot know if it needs the content unless it decrypts it.
+
+.. _creating_files:
+
+Creating encrypted files
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+To create a new encrypted data file called 'foo.yml' with the 'test' vault password from 'multi_password_file':
+
+.. code-block:: bash
+
+ ansible-vault create --vault-id test@multi_password_file foo.yml
+
+The tool launches an editor (whatever editor you have defined with $EDITOR, default editor is vi). Add the content. When you close the the editor session, the file is saved as encrypted data. The file header reflects the vault ID used to create it:
+
+.. code-block:: text
+
+ ``$ANSIBLE_VAULT;1.2;AES256;test``
+
+To create a new encrypted data file with the vault ID 'my_new_password' assigned to it and be prompted for the password:
+
+.. code-block:: bash
+
+ ansible-vault create --vault-id my_new_password@prompt foo.yml
+
+Again, add content to the file in the editor and save. Be sure to store the new password you created at the prompt, so you can find it when you want to decrypt that file.
+
+.. _encrypting_files:
+
+Encrypting existing files
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+To encrypt an existing file, use the :ref:`ansible-vault encrypt <ansible_vault_encrypt>` command. This command can operate on multiple files at once. For example:
+
+.. code-block:: bash
+
+ ansible-vault encrypt foo.yml bar.yml baz.yml
+
+To encrypt existing files with the 'project' ID and be prompted for the password:
+
+.. code-block:: bash
+
+ ansible-vault encrypt --vault-id project@prompt foo.yml bar.yml baz.yml
+
+
+.. _viewing_files:
+
+Viewing encrypted files
+^^^^^^^^^^^^^^^^^^^^^^^
+
+To view the contents of an encrypted file without editing it, you can use the :ref:`ansible-vault view <ansible_vault_view>` command:
+
+.. code-block:: bash
+
+ ansible-vault view foo.yml bar.yml baz.yml
+
+
+.. _editing_encrypted_files:
+
+Editing encrypted files
+^^^^^^^^^^^^^^^^^^^^^^^
+
+To edit an encrypted file in place, use the :ref:`ansible-vault edit <ansible_vault_edit>` command. This command decrypts the file to a temporary file, allows you to edit the content, then saves and re-encrypts the content and removes the temporary file when you close the editor. For example:
+
+.. code-block:: bash
+
+ ansible-vault edit foo.yml
+
+To edit a file encrypted with the ``vault2`` password file and assigned the vault ID ``pass2``:
+
+.. code-block:: bash
+
+ ansible-vault edit --vault-id pass2@vault2 foo.yml
+
+
+.. _rekeying_files:
+
+Changing the password and/or vault ID on encrypted files
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+To change the password on an encrypted file or files, use the :ref:`rekey <ansible_vault_rekey>` command:
+
+.. code-block:: bash
+
+ ansible-vault rekey foo.yml bar.yml baz.yml
+
+This command can rekey multiple data files at once and will ask for the original password and also the new password. To set a different ID for the rekeyed files, pass the new ID to ``--new-vault-id``. For example, to rekey a list of files encrypted with the 'preprod1' vault ID from the 'ppold' file to the 'preprod2' vault ID and be prompted for the new password:
+
+.. code-block:: bash
+
+ ansible-vault rekey --vault-id preprod1@ppold --new-vault-id preprod2@prompt foo.yml bar.yml baz.yml
+
+
+.. _decrypting_files:
+
+Decrypting encrypted files
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If you have an encrypted file that you no longer want to keep encrypted, you can permanently decrypt it by running the :ref:`ansible-vault decrypt <ansible_vault_decrypt>` command. This command will save the file unencrypted to the disk, so be sure you do not want to :ref:`edit <ansible_vault_edit>` it instead.
+
+.. code-block:: bash
+
+ ansible-vault decrypt foo.yml bar.yml baz.yml
+
+
+.. _vault_securing_editor:
+
+Steps to secure your editor
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Ansible Vault relies on your configured editor, which can be a source of disclosures. Most editors have ways to prevent loss of data, but these normally rely on extra plain text files that can have a clear text copy of your secrets. Consult your editor documentation to configure the editor to avoid disclosing secure data. The following sections provide some guidance on common editors but should not be taken as a complete guide to securing your editor.
+
+
+vim
+...
+
+You can set the following ``vim`` options in command mode to avoid cases of disclosure. There may be more settings you need to modify to ensure security, especially when using plugins, so consult the ``vim`` documentation.
+
+
+1. Disable swapfiles that act like an autosave in case of crash or interruption.
+
+.. code-block:: text
+
+ set noswapfile
+
+2. Disable creation of backup files.
+
+.. code-block:: text
+
+ set nobackup
+ set nowritebackup
+
+3. Disable the viminfo file from copying data from your current session.
+
+.. code-block:: text
+
+ set viminfo=
+
+4. Disable copying to the system clipboard.
+
+.. code-block:: text
+
+ set clipboard=
+
+
+You can optionally add these settings in ``.vimrc`` for all files, or just specific paths or extensions. See the ``vim`` manual for details.
+
+
+Emacs
+......
+
+You can set the following Emacs options to avoid cases of disclosure. There may be more settings you need to modify to ensure security, especially when using plugins, so consult the Emacs documentation.
+
+1. Do not copy data to the system clipboard.
+
+.. code-block:: text
+
+ (setq x-select-enable-clipboard nil)
+
+2. Disable creation of backup files.
+
+.. code-block:: text
+
+ (setq make-backup-files nil)
+
+3. Disable autosave files.
+
+.. code-block:: text
+
+ (setq auto-save-default nil)
+
+
+.. _playbooks_vault:
+.. _providing_vault_passwords:
+
+Using encrypted variables and files
+===================================
+
+When you run a task or playbook that uses encrypted variables or files, you must provide the passwords to decrypt the variables or files. You can do this at the command line or in the playbook itself.
+
+Passing a single password
+-------------------------
+
+If all the encrypted variables and files your task or playbook needs use a single password, you can use the :option:`--ask-vault-pass <ansible-playbook --ask-vault-pass>` or :option:`--vault-password-file <ansible-playbook --vault-password-file>` cli options.
+
+To prompt for the password:
+
+.. code-block:: bash
+
+ ansible-playbook --ask-vault-pass site.yml
+
+To retrieve the password from the :file:`/path/to/my/vault-password-file` file:
+
+.. code-block:: bash
+
+ ansible-playbook --vault-password-file /path/to/my/vault-password-file site.yml
+
+To get the password from the vault password client script :file:`my-vault-password-client.py`:
+
+.. code-block:: bash
+
+ ansible-playbook --vault-password-file my-vault-password-client.py
+
+
+.. _specifying_vault_ids:
+
+Passing vault IDs
+-----------------
+
+You can also use the :option:`--vault-id <ansible-playbook --vault-id>` option to pass a single password with its vault label. This approach is clearer when multiple vaults are used within a single inventory.
+
+To prompt for the password for the 'dev' vault ID:
+
+.. code-block:: bash
+
+ ansible-playbook --vault-id dev@prompt site.yml
+
+To retrieve the password for the 'dev' vault ID from the :file:`dev-password` file:
+
+.. code-block:: bash
+
+ ansible-playbook --vault-id dev@dev-password site.yml
+
+To get the password for the 'dev' vault ID from the vault password client script :file:`my-vault-password-client.py`:
+
+.. code-block:: bash
+
+ ansible-playbook --vault-id dev@my-vault-password-client.py
+
+Passing multiple vault passwords
+--------------------------------
+
+If your task or playbook requires multiple encrypted variables or files that you encrypted with different vault IDs, you must use the :option:`--vault-id <ansible-playbook --vault-id>` option, passing multiple ``--vault-id`` options to specify the vault IDs ('dev', 'prod', 'cloud', 'db') and sources for the passwords (prompt, file, script). . For example, to use a 'dev' password read from a file and to be prompted for the 'prod' password:
+
+.. code-block:: bash
+
+ ansible-playbook --vault-id dev@dev-password --vault-id prod@prompt site.yml
+
+By default the vault ID labels (dev, prod and so on) are only hints. Ansible attempts to decrypt vault content with each password. The password with the same label as the encrypted data will be tried first, after that each vault secret will be tried in the order they were provided on the command line.
+
+Where the encrypted data has no label, or the label does not match any of the provided labels, the passwords will be tried in the order they are specified. In the example above, the 'dev' password will be tried first, then the 'prod' password for cases where Ansible doesn't know which vault ID is used to encrypt something.
+
+Using ``--vault-id`` without a vault ID
+---------------------------------------
+
+The :option:`--vault-id <ansible-playbook --vault-id>` option can also be used without specifying a vault-id. This behavior is equivalent to :option:`--ask-vault-pass <ansible-playbook --ask-vault-pass>` or :option:`--vault-password-file <ansible-playbook --vault-password-file>` so is rarely used.
+
+For example, to use a password file :file:`dev-password`:
+
+.. code-block:: bash
+
+ ansible-playbook --vault-id dev-password site.yml
+
+To prompt for the password:
+
+.. code-block:: bash
+
+ ansible-playbook --vault-id @prompt site.yml
+
+To get the password from an executable script :file:`my-vault-password-client.py`:
+
+.. code-block:: bash
+
+ ansible-playbook --vault-id my-vault-password-client.py
+
+
+Configuring defaults for using encrypted content
+================================================
+
+Setting a default vault ID
+--------------------------
+
+If you use one vault ID more frequently than any other, you can set the config option :ref:`DEFAULT_VAULT_IDENTITY_LIST` to specify a default vault ID and password source. Ansible will use the default vault ID and source any time you do not specify :option:`--vault-id <ansible-playbook --vault-id>`. You can set multiple values for this option. Setting multiple values is equivalent to passing multiple :option:`--vault-id <ansible-playbook --vault-id>` cli options.
+
+Setting a default password source
+---------------------------------
+
+If you use one vault password file more frequently than any other, you can set the :ref:`DEFAULT_VAULT_PASSWORD_FILE` config option or the :envvar:`ANSIBLE_VAULT_PASSWORD_FILE` environment variable to specify that file. For example, if you set ``ANSIBLE_VAULT_PASSWORD_FILE=~/.vault_pass.txt``, Ansible will automatically search for the password in that file. This is useful if, for example, you use Ansible from a continuous integration system such as Jenkins.
+
+When are encrypted files made visible?
+======================================
+
+In general, content you encrypt with Ansible Vault remains encrypted after execution. However, there is one exception. If you pass an encrypted file as the ``src`` argument to the :ref:`copy <copy_module>`, :ref:`template <template_module>`, :ref:`unarchive <unarchive_module>`, :ref:`script <script_module>` or :ref:`assemble <assemble_module>` module, the file will not be encrypted on the target host (assuming you supply the correct vault password when you run the play). This behavior is intended and useful. You can encrypt a configuration file or template to avoid sharing the details of your configuration, but when you copy that configuration to servers in your environment, you want it to be decrypted so local users and processes can access it.
+
+.. _speeding_up_vault:
+
+Speeding up Ansible Vault
+=========================
+
+If you have many encrypted files, decrypting them at startup may cause a perceptible delay. To speed this up, install the cryptography package:
+
+.. code-block:: bash
+
+ pip install cryptography
+
+
+.. _vault_format:
+
+Format of files encrypted with Ansible Vault
+============================================
+
+Ansible Vault creates UTF-8 encoded txt files. The file format includes a newline terminated header. For example::
+
+ $ANSIBLE_VAULT;1.1;AES256
+
+or::
+
+ $ANSIBLE_VAULT;1.2;AES256;vault-id-label
+
+The header contains up to four elements, separated by semi-colons (``;``).
+
+ 1. The format ID (``$ANSIBLE_VAULT``). Currently ``$ANSIBLE_VAULT`` is the only valid format ID. The format ID identifies content that is encrypted with Ansible Vault (via vault.is_encrypted_file()).
+
+ 2. The vault format version (``1.X``). All supported versions of Ansible will currently default to '1.1' or '1.2' if a labeled vault ID is supplied. The '1.0' format is supported for reading only (and will be converted automatically to the '1.1' format on write). The format version is currently used as an exact string compare only (version numbers are not currently 'compared').
+
+ 3. The cipher algorithm used to encrypt the data (``AES256``). Currently ``AES256`` is the only supported cipher algorithm. Vault format 1.0 used 'AES', but current code always uses 'AES256'.
+
+ 4. The vault ID label used to encrypt the data (optional, ``vault-id-label``) For example, if you encrypt a file with ``--vault-id dev@prompt``, the vault-id-label is ``dev``.
+
+Note: In the future, the header could change. Fields after the format ID and format version depend on the format version, and future vault format versions may add more cipher algorithm options and/or additional fields.
+
+The rest of the content of the file is the 'vaulttext'. The vaulttext is a text armored version of the
+encrypted ciphertext. Each line is 80 characters wide, except for the last line which may be shorter.
+
+Ansible Vault payload format 1.1 - 1.2
+--------------------------------------
+
+The vaulttext is a concatenation of the ciphertext and a SHA256 digest with the result 'hexlifyied'.
+
+'hexlify' refers to the ``hexlify()`` method of the Python Standard Library's `binascii <https://docs.python.org/3/library/binascii.html>`_ module.
+
+hexlify()'ed result of:
+
+- hexlify()'ed string of the salt, followed by a newline (``0x0a``)
+- hexlify()'ed string of the crypted HMAC, followed by a newline. The HMAC is:
+
+ - a `RFC2104 <https://www.ietf.org/rfc/rfc2104.txt>`_ style HMAC
+
+ - inputs are:
+
+ - The AES256 encrypted ciphertext
+ - A PBKDF2 key. This key, the cipher key, and the cipher IV are generated from:
+
+ - the salt, in bytes
+ - 10000 iterations
+ - SHA256() algorithm
+ - the first 32 bytes are the cipher key
+ - the second 32 bytes are the HMAC key
+ - remaining 16 bytes are the cipher IV
+
+- hexlify()'ed string of the ciphertext. The ciphertext is:
+
+ - AES256 encrypted data. The data is encrypted using:
+
+ - AES-CTR stream cipher
+ - cipher key
+ - IV
+ - a 128 bit counter block seeded from an integer IV
+ - the plaintext
+
+ - the original plaintext
+ - padding up to the AES256 blocksize. (The data used for padding is based on `RFC5652 <https://tools.ietf.org/html/rfc5652#section-6.3>`_)
diff --git a/docs/docsite/rst/user_guide/windows.rst b/docs/docsite/rst/user_guide/windows.rst
new file mode 100644
index 00000000..24277189
--- /dev/null
+++ b/docs/docsite/rst/user_guide/windows.rst
@@ -0,0 +1,21 @@
+.. _windows:
+
+Windows Guides
+``````````````
+
+The following sections provide information on managing
+Windows hosts with Ansible.
+
+Because Windows is a non-POSIX-compliant operating system, there are differences between
+how Ansible interacts with them and the way Windows works. These guides will highlight
+some of the differences between Linux/Unix hosts and hosts running Windows.
+
+.. toctree::
+ :maxdepth: 2
+
+ windows_setup
+ windows_winrm
+ windows_usage
+ windows_dsc
+ windows_performance
+ windows_faq
diff --git a/docs/docsite/rst/user_guide/windows_dsc.rst b/docs/docsite/rst/user_guide/windows_dsc.rst
new file mode 100644
index 00000000..40416305
--- /dev/null
+++ b/docs/docsite/rst/user_guide/windows_dsc.rst
@@ -0,0 +1,505 @@
+Desired State Configuration
+===========================
+
+.. contents:: Topics
+ :local:
+
+What is Desired State Configuration?
+````````````````````````````````````
+Desired State Configuration, or DSC, is a tool built into PowerShell that can
+be used to define a Windows host setup through code. The overall purpose of DSC
+is the same as Ansible, it is just executed in a different manner. Since
+Ansible 2.4, the ``win_dsc`` module has been added and can be used to leverage
+existing DSC resources when interacting with a Windows host.
+
+More details on DSC can be viewed at `DSC Overview <https://docs.microsoft.com/en-us/powershell/scripting/dsc/overview/overview>`_.
+
+Host Requirements
+`````````````````
+To use the ``win_dsc`` module, a Windows host must have PowerShell v5.0 or
+newer installed. All supported hosts, except for Windows Server 2008 (non R2) can be
+upgraded to PowerShell v5.
+
+Once the PowerShell requirements have been met, using DSC is as simple as
+creating a task with the ``win_dsc`` module.
+
+Why Use DSC?
+````````````
+DSC and Ansible modules have a common goal which is to define and ensure the state of a
+resource. Because of
+this, resources like the DSC `File resource <https://docs.microsoft.com/en-us/powershell/scripting/dsc/reference/resources/windows/fileresource>`_
+and Ansible ``win_file`` can be used to achieve the same result. Deciding which to use depends
+on the scenario.
+
+Reasons for using an Ansible module over a DSC resource:
+
+* The host does not support PowerShell v5.0, or it cannot easily be upgraded
+* The DSC resource does not offer a feature present in an Ansible module. For example
+ win_regedit can manage the ``REG_NONE`` property type, while the DSC
+ ``Registry`` resource cannot
+* DSC resources have limited check mode support, while some Ansible modules have
+ better checks
+* DSC resources do not support diff mode, while some Ansible modules do
+* Custom resources require further installation steps to be run on the host
+ beforehand, while Ansible modules are built-in to Ansible
+* There are bugs in a DSC resource where an Ansible module works
+
+Reasons for using a DSC resource over an Ansible module:
+
+* The Ansible module does not support a feature present in a DSC resource
+* There is no Ansible module available
+* There are bugs in an existing Ansible module
+
+In the end, it doesn't matter whether the task is performed with DSC or an
+Ansible module; what matters is that the task is performed correctly and the
+playbooks are still readable. If you have more experience with DSC over Ansible
+and it does the job, just use DSC for that task.
+
+How to Use DSC?
+```````````````
+The ``win_dsc`` module takes in a free-form of options so that it changes
+according to the resource it is managing. A list of built in resources can be
+found at `resources <https://docs.microsoft.com/en-us/powershell/scripting/dsc/resources/resources>`_.
+
+Using the `Registry <https://docs.microsoft.com/en-us/powershell/scripting/dsc/reference/resources/windows/registryresource>`_
+resource as an example, this is the DSC definition as documented by Microsoft:
+
+.. code-block:: powershell
+
+ Registry [string] #ResourceName
+ {
+ Key = [string]
+ ValueName = [string]
+ [ Ensure = [string] { Enable | Disable } ]
+ [ Force = [bool] ]
+ [ Hex = [bool] ]
+ [ DependsOn = [string[]] ]
+ [ ValueData = [string[]] ]
+ [ ValueType = [string] { Binary | Dword | ExpandString | MultiString | Qword | String } ]
+ }
+
+When defining the task, ``resource_name`` must be set to the DSC resource being
+used - in this case the ``resource_name`` should be set to ``Registry``. The
+``module_version`` can refer to a specific version of the DSC resource
+installed; if left blank it will default to the latest version. The other
+options are parameters that are used to define the resource, such as ``Key`` and
+``ValueName``. While the options in the task are not case sensitive,
+keeping the case as-is is recommended because it makes it easier to distinguish DSC
+resource options from Ansible's ``win_dsc`` options.
+
+This is what the Ansible task version of the above DSC Registry resource would look like:
+
+.. code-block:: yaml+jinja
+
+ - name: Use win_dsc module with the Registry DSC resource
+ win_dsc:
+ resource_name: Registry
+ Ensure: Present
+ Key: HKEY_LOCAL_MACHINE\SOFTWARE\ExampleKey
+ ValueName: TestValue
+ ValueData: TestData
+
+Starting in Ansible 2.8, the ``win_dsc`` module automatically validates the
+input options from Ansible with the DSC definition. This means Ansible will
+fail if the option name is incorrect, a mandatory option is not set, or the
+value is not a valid choice. When running Ansible with a verbosity level of 3
+or more (``-vvv``), the return value will contain the possible invocation
+options based on the ``resource_name`` specified. Here is an example of the
+invocation output for the above ``Registry`` task:
+
+.. code-block:: ansible-output
+
+ changed: [2016] => {
+ "changed": true,
+ "invocation": {
+ "module_args": {
+ "DependsOn": null,
+ "Ensure": "Present",
+ "Force": null,
+ "Hex": null,
+ "Key": "HKEY_LOCAL_MACHINE\\SOFTWARE\\ExampleKey",
+ "PsDscRunAsCredential_password": null,
+ "PsDscRunAsCredential_username": null,
+ "ValueData": [
+ "TestData"
+ ],
+ "ValueName": "TestValue",
+ "ValueType": null,
+ "module_version": "latest",
+ "resource_name": "Registry"
+ }
+ },
+ "module_version": "1.1",
+ "reboot_required": false,
+ "verbose_set": [
+ "Perform operation 'Invoke CimMethod' with following parameters, ''methodName' = ResourceSet,'className' = MSFT_DSCLocalConfigurationManager,'namespaceName' = root/Microsoft/Windows/DesiredStateConfiguration'.",
+ "An LCM method call arrived from computer SERVER2016 with user sid S-1-5-21-3088887838-4058132883-1884671576-1105.",
+ "[SERVER2016]: LCM: [ Start Set ] [[Registry]DirectResourceAccess]",
+ "[SERVER2016]: [[Registry]DirectResourceAccess] (SET) Create registry key 'HKLM:\\SOFTWARE\\ExampleKey'",
+ "[SERVER2016]: [[Registry]DirectResourceAccess] (SET) Set registry key value 'HKLM:\\SOFTWARE\\ExampleKey\\TestValue' to 'TestData' of type 'String'",
+ "[SERVER2016]: LCM: [ End Set ] [[Registry]DirectResourceAccess] in 0.1930 seconds.",
+ "[SERVER2016]: LCM: [ End Set ] in 0.2720 seconds.",
+ "Operation 'Invoke CimMethod' complete.",
+ "Time taken for configuration job to complete is 0.402 seconds"
+ ],
+ "verbose_test": [
+ "Perform operation 'Invoke CimMethod' with following parameters, ''methodName' = ResourceTest,'className' = MSFT_DSCLocalConfigurationManager,'namespaceName' = root/Microsoft/Windows/DesiredStateConfiguration'.",
+ "An LCM method call arrived from computer SERVER2016 with user sid S-1-5-21-3088887838-4058132883-1884671576-1105.",
+ "[SERVER2016]: LCM: [ Start Test ] [[Registry]DirectResourceAccess]",
+ "[SERVER2016]: [[Registry]DirectResourceAccess] Registry key 'HKLM:\\SOFTWARE\\ExampleKey' does not exist",
+ "[SERVER2016]: LCM: [ End Test ] [[Registry]DirectResourceAccess] False in 0.2510 seconds.",
+ "[SERVER2016]: LCM: [ End Set ] in 0.3310 seconds.",
+ "Operation 'Invoke CimMethod' complete.",
+ "Time taken for configuration job to complete is 0.475 seconds"
+ ]
+ }
+
+The ``invocation.module_args`` key shows the actual values that were set as
+well as other possible values that were not set. Unfortunately this will not
+show the default value for a DSC property, only what was set from the Ansible
+task. Any ``*_password`` option will be masked in the output for security
+reasons, if there are any other sensitive module options, set ``no_log: True``
+on the task to stop all task output from being logged.
+
+
+Property Types
+--------------
+Each DSC resource property has a type that is associated with it. Ansible
+will try to convert the defined options to the correct type during execution.
+For simple types like ``[string]`` and ``[bool]`` this is a simple operation,
+but complex types like ``[PSCredential]`` or arrays (like ``[string[]]``) this
+require certain rules.
+
+PSCredential
+++++++++++++
+A ``[PSCredential]`` object is used to store credentials in a secure way, but
+Ansible has no way to serialize this over JSON. To set a DSC PSCredential property,
+the definition of that parameter should have two entries that are suffixed with
+``_username`` and ``_password`` for the username and password respectively.
+For example:
+
+.. code-block:: yaml+jinja
+
+ PsDscRunAsCredential_username: '{{ ansible_user }}'
+ PsDscRunAsCredential_password: '{{ ansible_password }}'
+
+ SourceCredential_username: AdminUser
+ SourceCredential_password: PasswordForAdminUser
+
+.. Note:: On versions of Ansible older than 2.8, you should set ``no_log: yes``
+ on the task definition in Ansible to ensure any credentials used are not
+ stored in any log file or console output.
+
+A ``[PSCredential]`` is defined with ``EmbeddedInstance("MSFT_Credential")`` in
+a DSC resource MOF definition.
+
+CimInstance Type
+++++++++++++++++
+A ``[CimInstance]`` object is used by DSC to store a dictionary object based on
+a custom class defined by that resource. Defining a value that takes in a
+``[CimInstance]`` in YAML is the same as defining a dictionary in YAML.
+For example, to define a ``[CimInstance]`` value in Ansible:
+
+.. code-block:: yaml+jinja
+
+ # [CimInstance]AuthenticationInfo == MSFT_xWebAuthenticationInformation
+ AuthenticationInfo:
+ Anonymous: no
+ Basic: yes
+ Digest: no
+ Windows: yes
+
+In the above example, the CIM instance is a representation of the class
+`MSFT_xWebAuthenticationInformation <https://github.com/dsccommunity/xWebAdministration/blob/master/source/DSCResources/MSFT_xWebSite/MSFT_xWebSite.schema.mof>`_.
+This class accepts four boolean variables, ``Anonymous``, ``Basic``,
+``Digest``, and ``Windows``. The keys to use in a ``[CimInstance]`` depend on
+the class it represents. Please read through the documentation of the resource
+to determine the keys that can be used and the types of each key value. The
+class definition is typically located in the ``<resource name>.schema.mof``.
+
+HashTable Type
+++++++++++++++
+A ``[HashTable]`` object is also a dictionary but does not have a strict set of
+keys that can/need to be defined. Like a ``[CimInstance]``, define it like a
+normal dictionary value in YAML. A ``[HashTable]]`` is defined with
+``EmbeddedInstance("MSFT_KeyValuePair")`` in a DSC resource MOF definition.
+
+Arrays
+++++++
+Simple type arrays like ``[string[]]`` or ``[UInt32[]]`` are defined as a list
+or as a comma separated string which are then cast to their type. Using a list
+is recommended because the values are not manually parsed by the ``win_dsc``
+module before being passed to the DSC engine. For example, to define a simple
+type array in Ansible:
+
+.. code-block:: yaml+jinja
+
+ # [string[]]
+ ValueData: entry1, entry2, entry3
+ ValueData:
+ - entry1
+ - entry2
+ - entry3
+
+ # [UInt32[]]
+ ReturnCode: 0,3010
+ ReturnCode:
+ - 0
+ - 3010
+
+Complex type arrays like ``[CimInstance[]]`` (array of dicts), can be defined
+like this example:
+
+.. code-block:: yaml+jinja
+
+ # [CimInstance[]]BindingInfo == MSFT_xWebBindingInformation
+ BindingInfo:
+ - Protocol: https
+ Port: 443
+ CertificateStoreName: My
+ CertificateThumbprint: C676A89018C4D5902353545343634F35E6B3A659
+ HostName: DSCTest
+ IPAddress: '*'
+ SSLFlags: 1
+ - Protocol: http
+ Port: 80
+ IPAddress: '*'
+
+The above example, is an array with two values of the class `MSFT_xWebBindingInformation <https://github.com/dsccommunity/xWebAdministration/blob/master/source/DSCResources/MSFT_xWebSite/MSFT_xWebSite.schema.mof>`_.
+When defining a ``[CimInstance[]]``, be sure to read the resource documentation
+to find out what keys to use in the definition.
+
+DateTime
+++++++++
+A ``[DateTime]`` object is a DateTime string representing the date and time in
+the `ISO 8601 <https://www.w3.org/TR/NOTE-datetime>`_ date time format. The
+value for a ``[DateTime]`` field should be quoted in YAML to ensure the string
+is properly serialized to the Windows host. Here is an example of how to define
+a ``[DateTime]`` value in Ansible:
+
+.. code-block:: yaml+jinja
+
+ # As UTC-0 (No timezone)
+ DateTime: '2019-02-22T13:57:31.2311892+00:00'
+
+ # As UTC+4
+ DateTime: '2019-02-22T17:57:31.2311892+04:00'
+
+ # As UTC-4
+ DateTime: '2019-02-22T09:57:31.2311892-04:00'
+
+All the values above are equal to a UTC date time of February 22nd 2019 at
+1:57pm with 31 seconds and 2311892 milliseconds.
+
+Run As Another User
+-------------------
+By default, DSC runs each resource as the SYSTEM account and not the account
+that Ansible use to run the module. This means that resources that are dynamically
+loaded based on a user profile, like the ``HKEY_CURRENT_USER`` registry hive,
+will be loaded under the ``SYSTEM`` profile. The parameter
+``PsDscRunAsCredential`` is a parameter that can be set for every DSC resource
+force the DSC engine to run under a different account. As
+``PsDscRunAsCredential`` has a type of ``PSCredential``, it is defined with the
+``_username`` and ``_password`` suffix.
+
+Using the Registry resource type as an example, this is how to define a task
+to access the ``HKEY_CURRENT_USER`` hive of the Ansible user:
+
+.. code-block:: yaml+jinja
+
+ - name: Use win_dsc with PsDscRunAsCredential to run as a different user
+ win_dsc:
+ resource_name: Registry
+ Ensure: Present
+ Key: HKEY_CURRENT_USER\ExampleKey
+ ValueName: TestValue
+ ValueData: TestData
+ PsDscRunAsCredential_username: '{{ ansible_user }}'
+ PsDscRunAsCredential_password: '{{ ansible_password }}'
+ no_log: yes
+
+Custom DSC Resources
+````````````````````
+DSC resources are not limited to the built-in options from Microsoft. Custom
+modules can be installed to manage other resources that are not usually available.
+
+Finding Custom DSC Resources
+----------------------------
+You can use the
+`PSGallery <https://www.powershellgallery.com/>`_ to find custom resources, along with documentation on how to install them on a Windows host.
+
+The ``Find-DscResource`` cmdlet can also be used to find custom resources. For example:
+
+.. code-block:: powershell
+
+ # Find all DSC resources in the configured repositories
+ Find-DscResource
+
+ # Find all DSC resources that relate to SQL
+ Find-DscResource -ModuleName "*sql*"
+
+.. Note:: DSC resources developed by Microsoft that start with ``x``, means the
+ resource is experimental and comes with no support.
+
+Installing a Custom Resource
+----------------------------
+There are three ways that a DSC resource can be installed on a host:
+
+* Manually with the ``Install-Module`` cmdlet
+* Using the ``win_psmodule`` Ansible module
+* Saving the module manually and copying it another host
+
+This is an example of installing the ``xWebAdministration`` resources using
+``win_psmodule``:
+
+.. code-block:: yaml+jinja
+
+ - name: Install xWebAdministration DSC resource
+ win_psmodule:
+ name: xWebAdministration
+ state: present
+
+Once installed, the win_dsc module will be able to use the resource by referencing it
+with the ``resource_name`` option.
+
+The first two methods above only work when the host has access to the internet.
+When a host does not have internet access, the module must first be installed
+using the methods above on another host with internet access and then copied
+across. To save a module to a local filepath, the following PowerShell cmdlet
+can be run::
+
+ Save-Module -Name xWebAdministration -Path C:\temp
+
+This will create a folder called ``xWebAdministration`` in ``C:\temp`` which
+can be copied to any host. For PowerShell to see this offline resource, it must
+be copied to a directory set in the ``PSModulePath`` environment variable.
+In most cases the path ``C:\Program Files\WindowsPowerShell\Module`` is set
+through this variable, but the ``win_path`` module can be used to add different
+paths.
+
+Examples
+````````
+Extract a zip file
+------------------
+
+.. code-block:: yaml+jinja
+
+ - name: Extract a zip file
+ win_dsc:
+ resource_name: Archive
+ Destination: C:\temp\output
+ Path: C:\temp\zip.zip
+ Ensure: Present
+
+Create a directory
+------------------
+
+.. code-block:: yaml+jinja
+
+ - name: Create file with some text
+ win_dsc:
+ resource_name: File
+ DestinationPath: C:\temp\file
+ Contents: |
+ Hello
+ World
+ Ensure: Present
+ Type: File
+
+ - name: Create directory that is hidden is set with the System attribute
+ win_dsc:
+ resource_name: File
+ DestinationPath: C:\temp\hidden-directory
+ Attributes: Hidden,System
+ Ensure: Present
+ Type: Directory
+
+Interact with Azure
+-------------------
+
+.. code-block:: yaml+jinja
+
+ - name: Install xAzure DSC resources
+ win_psmodule:
+ name: xAzure
+ state: present
+
+ - name: Create virtual machine in Azure
+ win_dsc:
+ resource_name: xAzureVM
+ ImageName: a699494373c04fc0bc8f2bb1389d6106__Windows-Server-2012-R2-201409.01-en.us-127GB.vhd
+ Name: DSCHOST01
+ ServiceName: ServiceName
+ StorageAccountName: StorageAccountName
+ InstanceSize: Medium
+ Windows: yes
+ Ensure: Present
+ Credential_username: '{{ ansible_user }}'
+ Credential_password: '{{ ansible_password }}'
+
+Setup IIS Website
+-----------------
+
+.. code-block:: yaml+jinja
+
+ - name: Install xWebAdministration module
+ win_psmodule:
+ name: xWebAdministration
+ state: present
+
+ - name: Install IIS features that are required
+ win_dsc:
+ resource_name: WindowsFeature
+ Name: '{{ item }}'
+ Ensure: Present
+ loop:
+ - Web-Server
+ - Web-Asp-Net45
+
+ - name: Setup web content
+ win_dsc:
+ resource_name: File
+ DestinationPath: C:\inetpub\IISSite\index.html
+ Type: File
+ Contents: |
+ <html>
+ <head><title>IIS Site</title></head>
+ <body>This is the body</body>
+ </html>
+ Ensure: present
+
+ - name: Create new website
+ win_dsc:
+ resource_name: xWebsite
+ Name: NewIISSite
+ State: Started
+ PhysicalPath: C:\inetpub\IISSite\index.html
+ BindingInfo:
+ - Protocol: https
+ Port: 8443
+ CertificateStoreName: My
+ CertificateThumbprint: C676A89018C4D5902353545343634F35E6B3A659
+ HostName: DSCTest
+ IPAddress: '*'
+ SSLFlags: 1
+ - Protocol: http
+ Port: 8080
+ IPAddress: '*'
+ AuthenticationInfo:
+ Anonymous: no
+ Basic: yes
+ Digest: no
+ Windows: yes
+
+.. seealso::
+
+ :ref:`playbooks_intro`
+ An introduction to playbooks
+ :ref:`playbooks_best_practices`
+ Tips and tricks for playbooks
+ :ref:`List of Windows Modules <windows_modules>`
+ Windows specific module list, all implemented in PowerShell
+ `User Mailing List <https://groups.google.com/group/ansible-project>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/windows_faq.rst b/docs/docsite/rst/user_guide/windows_faq.rst
new file mode 100644
index 00000000..75e99d2e
--- /dev/null
+++ b/docs/docsite/rst/user_guide/windows_faq.rst
@@ -0,0 +1,236 @@
+.. _windows_faq:
+
+Windows Frequently Asked Questions
+==================================
+
+Here are some commonly asked questions in regards to Ansible and Windows and
+their answers.
+
+.. note:: This document covers questions about managing Microsoft Windows servers with Ansible.
+ For questions about Ansible Core, please see the
+ :ref:`general FAQ page <ansible_faq>`.
+
+Does Ansible work with Windows XP or Server 2003?
+``````````````````````````````````````````````````
+Ansible does not work with Windows XP or Server 2003 hosts. Ansible does work with these Windows operating system versions:
+
+* Windows Server 2008 :sup:`1`
+* Windows Server 2008 R2 :sup:`1`
+* Windows Server 2012
+* Windows Server 2012 R2
+* Windows Server 2016
+* Windows Server 2019
+* Windows 7 :sup:`1`
+* Windows 8.1
+* Windows 10
+
+1 - See the :ref:`Server 2008 FAQ <windows_faq_server2008>` entry for more details.
+
+Ansible also has minimum PowerShell version requirements - please see
+:ref:`windows_setup` for the latest information.
+
+.. _windows_faq_server2008:
+
+Are Server 2008, 2008 R2 and Windows 7 supported?
+`````````````````````````````````````````````````
+Microsoft ended Extended Support for these versions of Windows on January 14th, 2020, and Ansible deprecated official support in the 2.10 release. No new feature development will occur targeting these operating systems, and automated testing has ceased. However, existing modules and features will likely continue to work, and simple pull requests to resolve issues with these Windows versions may be accepted.
+
+Can I manage Windows Nano Server with Ansible?
+``````````````````````````````````````````````
+Ansible does not currently work with Windows Nano Server, since it does
+not have access to the full .NET Framework that is used by the majority of the
+modules and internal components.
+
+Can Ansible run on Windows?
+```````````````````````````
+No, Ansible can only manage Windows hosts. Ansible cannot run on a Windows host
+natively, though it can run under the Windows Subsystem for Linux (WSL).
+
+.. note:: The Windows Subsystem for Linux is not supported by Ansible and
+ should not be used for production systems.
+
+To install Ansible on WSL, the following commands
+can be run in the bash terminal:
+
+.. code-block:: shell
+
+ sudo apt-get update
+ sudo apt-get install python-pip git libffi-dev libssl-dev -y
+ pip install --user ansible pywinrm
+
+To run Ansible from source instead of a release on the WSL, simply uninstall the pip
+installed version and then clone the git repo.
+
+.. code-block:: shell
+
+ pip uninstall ansible -y
+ git clone https://github.com/ansible/ansible.git
+ source ansible/hacking/env-setup
+
+ # To enable Ansible on login, run the following
+ echo ". ~/ansible/hacking/env-setup -q' >> ~/.bashrc
+
+Can I use SSH keys to authenticate to Windows hosts?
+````````````````````````````````````````````````````
+You cannot use SSH keys with the WinRM or PSRP connection plugins.
+These connection plugins use X509 certificates for authentication instead
+of the SSH key pairs that SSH uses.
+
+The way X509 certificates are generated and mapped to a user is different
+from the SSH implementation; consult the :ref:`windows_winrm` documentation for
+more information.
+
+Ansible 2.8 has added an experimental option to use the SSH connection plugin,
+which uses SSH keys for authentication, for Windows servers. See :ref:`this question <windows_faq_ssh>`
+for more information.
+
+.. _windows_faq_winrm:
+
+Why can I run a command locally that does not work under Ansible?
+`````````````````````````````````````````````````````````````````
+Ansible executes commands through WinRM. These processes are different from
+running a command locally in these ways:
+
+* Unless using an authentication option like CredSSP or Kerberos with
+ credential delegation, the WinRM process does not have the ability to
+ delegate the user's credentials to a network resource, causing ``Access is
+ Denied`` errors.
+
+* All processes run under WinRM are in a non-interactive session. Applications
+ that require an interactive session will not work.
+
+* When running through WinRM, Windows restricts access to internal Windows
+ APIs like the Windows Update API and DPAPI, which some installers and
+ programs rely on.
+
+Some ways to bypass these restrictions are to:
+
+* Use ``become``, which runs a command as it would when run locally. This will
+ bypass most WinRM restrictions, as Windows is unaware the process is running
+ under WinRM when ``become`` is used. See the :ref:`become` documentation for more
+ information.
+
+* Use a scheduled task, which can be created with ``win_scheduled_task``. Like
+ ``become``, it will bypass all WinRM restrictions, but it can only be used to run
+ commands, not modules.
+
+* Use ``win_psexec`` to run a command on the host. PSExec does not use WinRM
+ and so will bypass any of the restrictions.
+
+* To access network resources without any of these workarounds, you can use
+ CredSSP or Kerberos with credential delegation enabled.
+
+See :ref:`become` more info on how to use become. The limitations section at
+:ref:`windows_winrm` has more details around WinRM limitations.
+
+This program won't install on Windows with Ansible
+``````````````````````````````````````````````````
+See :ref:`this question <windows_faq_winrm>` for more information about WinRM limitations.
+
+What Windows modules are available?
+```````````````````````````````````
+Most of the Ansible modules in Ansible Core are written for a combination of
+Linux/Unix machines and arbitrary web services. These modules are written in
+Python and most of them do not work on Windows.
+
+Because of this, there are dedicated Windows modules that are written in
+PowerShell and are meant to be run on Windows hosts. A list of these modules
+can be found :ref:`here <windows_modules>`.
+
+In addition, the following Ansible Core modules/action-plugins work with Windows:
+
+* add_host
+* assert
+* async_status
+* debug
+* fail
+* fetch
+* group_by
+* include
+* include_role
+* include_vars
+* meta
+* pause
+* raw
+* script
+* set_fact
+* set_stats
+* setup
+* slurp
+* template (also: win_template)
+* wait_for_connection
+
+Can I run Python modules on Windows hosts?
+``````````````````````````````````````````
+No, the WinRM connection protocol is set to use PowerShell modules, so Python
+modules will not work. A way to bypass this issue to use
+``delegate_to: localhost`` to run a Python module on the Ansible controller.
+This is useful if during a playbook, an external service needs to be contacted
+and there is no equivalent Windows module available.
+
+.. _windows_faq_ssh:
+
+Can I connect to Windows hosts over SSH?
+````````````````````````````````````````
+Ansible 2.8 has added an experimental option to use the SSH connection plugin
+to manage Windows hosts. To connect to Windows hosts over SSH, you must install and configure the `Win32-OpenSSH <https://github.com/PowerShell/Win32-OpenSSH>`_
+fork that is in development with Microsoft on
+the Windows host(s). While most of the basics should work with SSH,
+``Win32-OpenSSH`` is rapidly changing, with new features added and bugs
+fixed in every release. It is highly recommend you `install <https://github.com/PowerShell/Win32-OpenSSH/wiki/Install-Win32-OpenSSH>`_ the latest release
+of ``Win32-OpenSSH`` from the GitHub Releases page when using it with Ansible
+on Windows hosts.
+
+To use SSH as the connection to a Windows host, set the following variables in
+the inventory::
+
+ ansible_connection=ssh
+
+ # Set either cmd or powershell not both
+ ansible_shell_type=cmd
+ # ansible_shell_type=powershell
+
+The value for ``ansible_shell_type`` should either be ``cmd`` or ``powershell``.
+Use ``cmd`` if the ``DefaultShell`` has not been configured on the SSH service
+and ``powershell`` if that has been set as the ``DefaultShell``.
+
+Why is connecting to a Windows host via SSH failing?
+````````````````````````````````````````````````````
+Unless you are using ``Win32-OpenSSH`` as described above, you must connect to
+Windows hosts using :ref:`windows_winrm`. If your Ansible output indicates that
+SSH was used, either you did not set the connection vars properly or the host is not inheriting them correctly.
+
+Make sure ``ansible_connection: winrm`` is set in the inventory for the Windows
+host(s).
+
+Why are my credentials being rejected?
+``````````````````````````````````````
+This can be due to a myriad of reasons unrelated to incorrect credentials.
+
+See HTTP 401/Credentials Rejected at :ref:`windows_setup` for a more detailed
+guide of this could mean.
+
+Why am I getting an error SSL CERTIFICATE_VERIFY_FAILED?
+````````````````````````````````````````````````````````
+When the Ansible controller is running on Python 2.7.9+ or an older version of Python that
+has backported SSLContext (like Python 2.7.5 on RHEL 7), the controller will attempt to
+validate the certificate WinRM is using for an HTTPS connection. If the
+certificate cannot be validated (such as in the case of a self signed cert), it will
+fail the verification process.
+
+To ignore certificate validation, add
+``ansible_winrm_server_cert_validation: ignore`` to inventory for the Windows
+host.
+
+.. seealso::
+
+ :ref:`windows`
+ The Windows documentation index
+ :ref:`about_playbooks`
+ An introduction to playbooks
+ :ref:`playbooks_best_practices`
+ Tips and tricks for playbooks
+ `User Mailing List <https://groups.google.com/group/ansible-project>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/windows_performance.rst b/docs/docsite/rst/user_guide/windows_performance.rst
new file mode 100644
index 00000000..5eb5dbbd
--- /dev/null
+++ b/docs/docsite/rst/user_guide/windows_performance.rst
@@ -0,0 +1,61 @@
+.. _windows_performance:
+
+Windows performance
+===================
+This document offers some performance optimizations you might like to apply to
+your Windows hosts to speed them up specifically in the context of using Ansible
+with them, and generally.
+
+Optimise PowerShell performance to reduce Ansible task overhead
+---------------------------------------------------------------
+To speed up the startup of PowerShell by around 10x, run the following
+PowerShell snippet in an Administrator session. Expect it to take tens of
+seconds.
+
+.. note::
+
+ If native images have already been created by the ngen task or service, you
+ will observe no difference in performance (but this snippet will at that
+ point execute faster than otherwise).
+
+.. code-block:: powershell
+
+ function Optimize-PowershellAssemblies {
+ # NGEN powershell assembly, improves startup time of powershell by 10x
+ $old_path = $env:path
+ try {
+ $env:path = [Runtime.InteropServices.RuntimeEnvironment]::GetRuntimeDirectory()
+ [AppDomain]::CurrentDomain.GetAssemblies() | % {
+ if (! $_.location) {continue}
+ $Name = Split-Path $_.location -leaf
+ if ($Name.startswith("Microsoft.PowerShell.")) {
+ Write-Progress -Activity "Native Image Installation" -Status "$name"
+ ngen install $_.location | % {"`t$_"}
+ }
+ }
+ } finally {
+ $env:path = $old_path
+ }
+ }
+ Optimize-PowershellAssemblies
+
+PowerShell is used by every Windows Ansible module. This optimisation reduces
+the time PowerShell takes to start up, removing that overhead from every invocation.
+
+This snippet uses `the native image generator, ngen <https://docs.microsoft.com/en-us/dotnet/framework/tools/ngen-exe-native-image-generator#WhenToUse>`_
+to pre-emptively create native images for the assemblies that PowerShell relies on.
+
+Fix high-CPU-on-boot for VMs/cloud instances
+--------------------------------------------
+If you are creating golden images to spawn instances from, you can avoid a disruptive
+high CPU task near startup via `processing the ngen queue <https://docs.microsoft.com/en-us/dotnet/framework/tools/ngen-exe-native-image-generator#native-image-service>`_
+within your golden image creation, if you know the CPU types won't change between
+golden image build process and runtime.
+
+Place the following near the end of your playbook, bearing in mind the factors that can cause native images to be invalidated (`see MSDN <https://docs.microsoft.com/en-us/dotnet/framework/tools/ngen-exe-native-image-generator#native-images-and-jit-compilation>`_).
+
+.. code-block:: yaml
+
+ - name: generate native .NET images for CPU
+ win_dotnet_ngen:
+
diff --git a/docs/docsite/rst/user_guide/windows_setup.rst b/docs/docsite/rst/user_guide/windows_setup.rst
new file mode 100644
index 00000000..910fa06f
--- /dev/null
+++ b/docs/docsite/rst/user_guide/windows_setup.rst
@@ -0,0 +1,573 @@
+.. _windows_setup:
+
+Setting up a Windows Host
+=========================
+This document discusses the setup that is required before Ansible can communicate with a Microsoft Windows host.
+
+.. contents::
+ :local:
+
+Host Requirements
+`````````````````
+For Ansible to communicate to a Windows host and use Windows modules, the
+Windows host must meet these requirements:
+
+* Ansible can generally manage Windows versions under current
+ and extended support from Microsoft. Ansible can manage desktop OSs including
+ Windows 7, 8.1, and 10, and server OSs including Windows Server 2008,
+ 2008 R2, 2012, 2012 R2, 2016, and 2019.
+
+* Ansible requires PowerShell 3.0 or newer and at least .NET 4.0 to be
+ installed on the Windows host.
+
+* A WinRM listener should be created and activated. More details for this can be
+ found below.
+
+.. Note:: While these are the base requirements for Ansible connectivity, some Ansible
+ modules have additional requirements, such as a newer OS or PowerShell
+ version. Please consult the module's documentation page
+ to determine whether a host meets those requirements.
+
+Upgrading PowerShell and .NET Framework
+---------------------------------------
+Ansible requires PowerShell version 3.0 and .NET Framework 4.0 or newer to function on older operating systems like Server 2008 and Windows 7. The base image does not meet this
+requirement. You can use the `Upgrade-PowerShell.ps1 <https://github.com/jborean93/ansible-windows/blob/master/scripts/Upgrade-PowerShell.ps1>`_ script to update these.
+
+This is an example of how to run this script from PowerShell:
+
+.. code-block:: powershell
+
+ $url = "https://raw.githubusercontent.com/jborean93/ansible-windows/master/scripts/Upgrade-PowerShell.ps1"
+ $file = "$env:temp\Upgrade-PowerShell.ps1"
+ $username = "Administrator"
+ $password = "Password"
+
+ (New-Object -TypeName System.Net.WebClient).DownloadFile($url, $file)
+ Set-ExecutionPolicy -ExecutionPolicy Unrestricted -Force
+
+ # Version can be 3.0, 4.0 or 5.1
+ &$file -Version 5.1 -Username $username -Password $password -Verbose
+
+Once completed, you will need to remove auto logon
+and set the execution policy back to the default of ``Restricted``. You can
+do this with the following PowerShell commands:
+
+.. code-block:: powershell
+
+ # This isn't needed but is a good security practice to complete
+ Set-ExecutionPolicy -ExecutionPolicy Restricted -Force
+
+ $reg_winlogon_path = "HKLM:\Software\Microsoft\Windows NT\CurrentVersion\Winlogon"
+ Set-ItemProperty -Path $reg_winlogon_path -Name AutoAdminLogon -Value 0
+ Remove-ItemProperty -Path $reg_winlogon_path -Name DefaultUserName -ErrorAction SilentlyContinue
+ Remove-ItemProperty -Path $reg_winlogon_path -Name DefaultPassword -ErrorAction SilentlyContinue
+
+The script works by checking to see what programs need to be installed
+(such as .NET Framework 4.5.2) and what PowerShell version is required. If a reboot
+is required and the ``username`` and ``password`` parameters are set, the
+script will automatically reboot and logon when it comes back up from the
+reboot. The script will continue until no more actions are required and the
+PowerShell version matches the target version. If the ``username`` and
+``password`` parameters are not set, the script will prompt the user to
+manually reboot and logon when required. When the user is next logged in, the
+script will continue where it left off and the process continues until no more
+actions are required.
+
+.. Note:: If running on Server 2008, then SP2 must be installed. If running on
+ Server 2008 R2 or Windows 7, then SP1 must be installed.
+
+.. Note:: Windows Server 2008 can only install PowerShell 3.0; specifying a
+ newer version will result in the script failing.
+
+.. Note:: The ``username`` and ``password`` parameters are stored in plain text
+ in the registry. Make sure the cleanup commands are run after the script finishes
+ to ensure no credentials are still stored on the host.
+
+WinRM Memory Hotfix
+-------------------
+When running on PowerShell v3.0, there is a bug with the WinRM service that
+limits the amount of memory available to WinRM. Without this hotfix installed,
+Ansible will fail to execute certain commands on the Windows host. These
+hotfixes should be installed as part of the system bootstrapping or
+imaging process. The script `Install-WMF3Hotfix.ps1 <https://github.com/jborean93/ansible-windows/blob/master/scripts/Install-WMF3Hotfix.ps1>`_ can be used to install the hotfix on affected hosts.
+
+The following PowerShell command will install the hotfix:
+
+.. code-block:: powershell
+
+ $url = "https://raw.githubusercontent.com/jborean93/ansible-windows/master/scripts/Install-WMF3Hotfix.ps1"
+ $file = "$env:temp\Install-WMF3Hotfix.ps1"
+
+ (New-Object -TypeName System.Net.WebClient).DownloadFile($url, $file)
+ powershell.exe -ExecutionPolicy ByPass -File $file -Verbose
+
+For more details, please refer to the `Hotfix document <https://support.microsoft.com/en-us/help/2842230/out-of-memory-error-on-a-computer-that-has-a-customized-maxmemorypersh>`_ from Microsoft.
+
+WinRM Setup
+```````````
+Once Powershell has been upgraded to at least version 3.0, the final step is for the
+WinRM service to be configured so that Ansible can connect to it. There are two
+main components of the WinRM service that governs how Ansible can interface with
+the Windows host: the ``listener`` and the ``service`` configuration settings.
+
+Details about each component can be read below, but the script
+`ConfigureRemotingForAnsible.ps1 <https://github.com/ansible/ansible/blob/devel/examples/scripts/ConfigureRemotingForAnsible.ps1>`_
+can be used to set up the basics. This script sets up both HTTP and HTTPS
+listeners with a self-signed certificate and enables the ``Basic``
+authentication option on the service.
+
+To use this script, run the following in PowerShell:
+
+.. code-block:: powershell
+
+ $url = "https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1"
+ $file = "$env:temp\ConfigureRemotingForAnsible.ps1"
+
+ (New-Object -TypeName System.Net.WebClient).DownloadFile($url, $file)
+
+ powershell.exe -ExecutionPolicy ByPass -File $file
+
+There are different switches and parameters (like ``-EnableCredSSP`` and
+``-ForceNewSSLCert``) that can be set alongside this script. The documentation
+for these options are located at the top of the script itself.
+
+.. Note:: The ConfigureRemotingForAnsible.ps1 script is intended for training and
+ development purposes only and should not be used in a
+ production environment, since it enables settings (like ``Basic`` authentication)
+ that can be inherently insecure.
+
+WinRM Listener
+--------------
+The WinRM services listens for requests on one or more ports. Each of these ports must have a
+listener created and configured.
+
+To view the current listeners that are running on the WinRM service, run the
+following command:
+
+.. code-block:: powershell
+
+ winrm enumerate winrm/config/Listener
+
+This will output something like::
+
+ Listener
+ Address = *
+ Transport = HTTP
+ Port = 5985
+ Hostname
+ Enabled = true
+ URLPrefix = wsman
+ CertificateThumbprint
+ ListeningOn = 10.0.2.15, 127.0.0.1, 192.168.56.155, ::1, fe80::5efe:10.0.2.15%6, fe80::5efe:192.168.56.155%8, fe80::
+ ffff:ffff:fffe%2, fe80::203d:7d97:c2ed:ec78%3, fe80::e8ea:d765:2c69:7756%7
+
+ Listener
+ Address = *
+ Transport = HTTPS
+ Port = 5986
+ Hostname = SERVER2016
+ Enabled = true
+ URLPrefix = wsman
+ CertificateThumbprint = E6CDAA82EEAF2ECE8546E05DB7F3E01AA47D76CE
+ ListeningOn = 10.0.2.15, 127.0.0.1, 192.168.56.155, ::1, fe80::5efe:10.0.2.15%6, fe80::5efe:192.168.56.155%8, fe80::
+ ffff:ffff:fffe%2, fe80::203d:7d97:c2ed:ec78%3, fe80::e8ea:d765:2c69:7756%7
+
+In the example above there are two listeners activated; one is listening on
+port 5985 over HTTP and the other is listening on port 5986 over HTTPS. Some of
+the key options that are useful to understand are:
+
+* ``Transport``: Whether the listener is run over HTTP or HTTPS, it is
+ recommended to use a listener over HTTPS as the data is encrypted without
+ any further changes required.
+
+* ``Port``: The port the listener runs on, by default it is ``5985`` for HTTP
+ and ``5986`` for HTTPS. This port can be changed to whatever is required and
+ corresponds to the host var ``ansible_port``.
+
+* ``URLPrefix``: The URL prefix to listen on, by default it is ``wsman``. If
+ this is changed, the host var ``ansible_winrm_path`` must be set to the same
+ value.
+
+* ``CertificateThumbprint``: If running over an HTTPS listener, this is the
+ thumbprint of the certificate in the Windows Certificate Store that is used
+ in the connection. To get the details of the certificate itself, run this
+ command with the relevant certificate thumbprint in PowerShell::
+
+ $thumbprint = "E6CDAA82EEAF2ECE8546E05DB7F3E01AA47D76CE"
+ Get-ChildItem -Path cert:\LocalMachine\My -Recurse | Where-Object { $_.Thumbprint -eq $thumbprint } | Select-Object *
+
+Setup WinRM Listener
+++++++++++++++++++++
+There are three ways to set up a WinRM listener:
+
+* Using ``winrm quickconfig`` for HTTP or
+ ``winrm quickconfig -transport:https`` for HTTPS. This is the easiest option
+ to use when running outside of a domain environment and a simple listener is
+ required. Unlike the other options, this process also has the added benefit of
+ opening up the Firewall for the ports required and starts the WinRM service.
+
+* Using Group Policy Objects. This is the best way to create a listener when the
+ host is a member of a domain because the configuration is done automatically
+ without any user input. For more information on group policy objects, see the
+ `Group Policy Objects documentation <https://msdn.microsoft.com/en-us/library/aa374162(v=vs.85).aspx>`_.
+
+* Using PowerShell to create the listener with a specific configuration. This
+ can be done by running the following PowerShell commands:
+
+ .. code-block:: powershell
+
+ $selector_set = @{
+ Address = "*"
+ Transport = "HTTPS"
+ }
+ $value_set = @{
+ CertificateThumbprint = "E6CDAA82EEAF2ECE8546E05DB7F3E01AA47D76CE"
+ }
+
+ New-WSManInstance -ResourceURI "winrm/config/Listener" -SelectorSet $selector_set -ValueSet $value_set
+
+ To see the other options with this PowerShell cmdlet, see
+ `New-WSManInstance <https://docs.microsoft.com/en-us/powershell/module/microsoft.wsman.management/new-wsmaninstance?view=powershell-5.1>`_.
+
+.. Note:: When creating an HTTPS listener, an existing certificate needs to be
+ created and stored in the ``LocalMachine\My`` certificate store. Without a
+ certificate being present in this store, most commands will fail.
+
+Delete WinRM Listener
++++++++++++++++++++++
+To remove a WinRM listener::
+
+ # Remove all listeners
+ Remove-Item -Path WSMan:\localhost\Listener\* -Recurse -Force
+
+ # Only remove listeners that are run over HTTPS
+ Get-ChildItem -Path WSMan:\localhost\Listener | Where-Object { $_.Keys -contains "Transport=HTTPS" } | Remove-Item -Recurse -Force
+
+.. Note:: The ``Keys`` object is an array of strings, so it can contain different
+ values. By default it contains a key for ``Transport=`` and ``Address=``
+ which correspond to the values from winrm enumerate winrm/config/Listeners.
+
+WinRM Service Options
+---------------------
+There are a number of options that can be set to control the behavior of the WinRM service component,
+including authentication options and memory settings.
+
+To get an output of the current service configuration options, run the
+following command:
+
+.. code-block:: powershell
+
+ winrm get winrm/config/Service
+ winrm get winrm/config/Winrs
+
+This will output something like::
+
+ Service
+ RootSDDL = O:NSG:BAD:P(A;;GA;;;BA)(A;;GR;;;IU)S:P(AU;FA;GA;;;WD)(AU;SA;GXGW;;;WD)
+ MaxConcurrentOperations = 4294967295
+ MaxConcurrentOperationsPerUser = 1500
+ EnumerationTimeoutms = 240000
+ MaxConnections = 300
+ MaxPacketRetrievalTimeSeconds = 120
+ AllowUnencrypted = false
+ Auth
+ Basic = true
+ Kerberos = true
+ Negotiate = true
+ Certificate = true
+ CredSSP = true
+ CbtHardeningLevel = Relaxed
+ DefaultPorts
+ HTTP = 5985
+ HTTPS = 5986
+ IPv4Filter = *
+ IPv6Filter = *
+ EnableCompatibilityHttpListener = false
+ EnableCompatibilityHttpsListener = false
+ CertificateThumbprint
+ AllowRemoteAccess = true
+
+ Winrs
+ AllowRemoteShellAccess = true
+ IdleTimeout = 7200000
+ MaxConcurrentUsers = 2147483647
+ MaxShellRunTime = 2147483647
+ MaxProcessesPerShell = 2147483647
+ MaxMemoryPerShellMB = 2147483647
+ MaxShellsPerUser = 2147483647
+
+While many of these options should rarely be changed, a few can easily impact
+the operations over WinRM and are useful to understand. Some of the important
+options are:
+
+* ``Service\AllowUnencrypted``: This option defines whether WinRM will allow
+ traffic that is run over HTTP without message encryption. Message level
+ encryption is only possible when ``ansible_winrm_transport`` is ``ntlm``,
+ ``kerberos`` or ``credssp``. By default this is ``false`` and should only be
+ set to ``true`` when debugging WinRM messages.
+
+* ``Service\Auth\*``: These flags define what authentication
+ options are allowed with the WinRM service. By default, ``Negotiate (NTLM)``
+ and ``Kerberos`` are enabled.
+
+* ``Service\Auth\CbtHardeningLevel``: Specifies whether channel binding tokens are
+ not verified (None), verified but not required (Relaxed), or verified and
+ required (Strict). CBT is only used when connecting with NTLM or Kerberos
+ over HTTPS.
+
+* ``Service\CertificateThumbprint``: This is the thumbprint of the certificate
+ used to encrypt the TLS channel used with CredSSP authentication. By default
+ this is empty; a self-signed certificate is generated when the WinRM service
+ starts and is used in the TLS process.
+
+* ``Winrs\MaxShellRunTime``: This is the maximum time, in milliseconds, that a
+ remote command is allowed to execute.
+
+* ``Winrs\MaxMemoryPerShellMB``: This is the maximum amount of memory allocated
+ per shell, including the shell's child processes.
+
+To modify a setting under the ``Service`` key in PowerShell::
+
+ # substitute {path} with the path to the option after winrm/config/Service
+ Set-Item -Path WSMan:\localhost\Service\{path} -Value "value here"
+
+ # for example, to change Service\Auth\CbtHardeningLevel run
+ Set-Item -Path WSMan:\localhost\Service\Auth\CbtHardeningLevel -Value Strict
+
+To modify a setting under the ``Winrs`` key in PowerShell::
+
+ # Substitute {path} with the path to the option after winrm/config/Winrs
+ Set-Item -Path WSMan:\localhost\Shell\{path} -Value "value here"
+
+ # For example, to change Winrs\MaxShellRunTime run
+ Set-Item -Path WSMan:\localhost\Shell\MaxShellRunTime -Value 2147483647
+
+.. Note:: If running in a domain environment, some of these options are set by
+ GPO and cannot be changed on the host itself. When a key has been
+ configured with GPO, it contains the text ``[Source="GPO"]`` next to the value.
+
+Common WinRM Issues
+-------------------
+Because WinRM has a wide range of configuration options, it can be difficult
+to setup and configure. Because of this complexity, issues that are shown by Ansible
+could in fact be issues with the host setup instead.
+
+One easy way to determine whether a problem is a host issue is to
+run the following command from another Windows host to connect to the
+target Windows host::
+
+ # Test out HTTP
+ winrs -r:http://server:5985/wsman -u:Username -p:Password ipconfig
+
+ # Test out HTTPS (will fail if the cert is not verifiable)
+ winrs -r:https://server:5986/wsman -u:Username -p:Password -ssl ipconfig
+
+ # Test out HTTPS, ignoring certificate verification
+ $username = "Username"
+ $password = ConvertTo-SecureString -String "Password" -AsPlainText -Force
+ $cred = New-Object -TypeName System.Management.Automation.PSCredential -ArgumentList $username, $password
+
+ $session_option = New-PSSessionOption -SkipCACheck -SkipCNCheck -SkipRevocationCheck
+ Invoke-Command -ComputerName server -UseSSL -ScriptBlock { ipconfig } -Credential $cred -SessionOption $session_option
+
+If this fails, the issue is probably related to the WinRM setup. If it works, the issue may not be related to the WinRM setup; please continue reading for more troubleshooting suggestions.
+
+HTTP 401/Credentials Rejected
++++++++++++++++++++++++++++++
+A HTTP 401 error indicates the authentication process failed during the initial
+connection. Some things to check for this are:
+
+* Verify that the credentials are correct and set properly in your inventory with
+ ``ansible_user`` and ``ansible_password``
+
+* Ensure that the user is a member of the local Administrators group or has been explicitly
+ granted access (a connection test with the ``winrs`` command can be used to
+ rule this out).
+
+* Make sure that the authentication option set by ``ansible_winrm_transport`` is enabled under
+ ``Service\Auth\*``
+
+* If running over HTTP and not HTTPS, use ``ntlm``, ``kerberos`` or ``credssp``
+ with ``ansible_winrm_message_encryption: auto`` to enable message encryption.
+ If using another authentication option or if the installed pywinrm version cannot be
+ upgraded, the ``Service\AllowUnencrypted`` can be set to ``true`` but this is
+ only recommended for troubleshooting
+
+* Ensure the downstream packages ``pywinrm``, ``requests-ntlm``,
+ ``requests-kerberos``, and/or ``requests-credssp`` are up to date using ``pip``.
+
+* If using Kerberos authentication, ensure that ``Service\Auth\CbtHardeningLevel`` is
+ not set to ``Strict``.
+
+* When using Basic or Certificate authentication, make sure that the user is a local account and
+ not a domain account. Domain accounts do not work with Basic and Certificate
+ authentication.
+
+HTTP 500 Error
+++++++++++++++
+These indicate an error has occurred with the WinRM service. Some things
+to check for include:
+
+* Verify that the number of current open shells has not exceeded either
+ ``WinRsMaxShellsPerUser`` or any of the other Winrs quotas haven't been
+ exceeded.
+
+Timeout Errors
++++++++++++++++
+These usually indicate an error with the network connection where
+Ansible is unable to reach the host. Some things to check for include:
+
+* Make sure the firewall is not set to block the configured WinRM listener ports
+* Ensure that a WinRM listener is enabled on the port and path set by the host vars
+* Ensure that the ``winrm`` service is running on the Windows host and configured for
+ automatic start
+
+Connection Refused Errors
++++++++++++++++++++++++++
+These usually indicate an error when trying to communicate with the
+WinRM service on the host. Some things to check for:
+
+* Ensure that the WinRM service is up and running on the host. Use
+ ``(Get-Service -Name winrm).Status`` to get the status of the service.
+* Check that the host firewall is allowing traffic over the WinRM port. By default
+ this is ``5985`` for HTTP and ``5986`` for HTTPS.
+
+Sometimes an installer may restart the WinRM or HTTP service and cause this error. The
+best way to deal with this is to use ``win_psexec`` from another
+Windows host.
+
+Failure to Load Builtin Modules
++++++++++++++++++++++++++++++++
+If powershell fails with an error message similar to ``The 'Out-String' command was found in the module 'Microsoft.PowerShell.Utility', but the module could not be loaded.``
+then there could be a problem trying to access all the paths specified by the ``PSModulePath`` environment variable.
+A common cause of this issue is that the ``PSModulePath`` environment variable contains a UNC path to a file share and
+because of the double hop/credential delegation issue the Ansible process cannot access these folders. The way around
+this problems is to either:
+
+* Remove the UNC path from the ``PSModulePath`` environment variable, or
+* Use an authentication option that supports credential delegation like ``credssp`` or ``kerberos`` with credential delegation enabled
+
+See `KB4076842 <https://support.microsoft.com/en-us/help/4076842>`_ for more information on this problem.
+
+
+Windows SSH Setup
+`````````````````
+Ansible 2.8 has added an experimental SSH connection for Windows managed nodes.
+
+.. warning::
+ Use this feature at your own risk!
+ Using SSH with Windows is experimental, the implementation may make
+ backwards incompatible changes in feature releases. The server side
+ components can be unreliable depending on the version that is installed.
+
+Installing Win32-OpenSSH
+------------------------
+The first step to using SSH with Windows is to install the `Win32-OpenSSH <https://github.com/PowerShell/Win32-OpenSSH>`_
+service on the Windows host. Microsoft offers a way to install ``Win32-OpenSSH`` through a Windows
+capability but currently the version that is installed through this process is
+too old to work with Ansible. To install ``Win32-OpenSSH`` for use with
+Ansible, select one of these three installation options:
+
+* Manually install the service, following the `install instructions <https://github.com/PowerShell/Win32-OpenSSH/wiki/Install-Win32-OpenSSH>`_
+ from Microsoft.
+
+* Install the `openssh <https://chocolatey.org/packages/openssh>`_ package using Chocolatey::
+
+ choco install --package-parameters=/SSHServerFeature openssh
+
+* Use ``win_chocolatey`` to install the service::
+
+ - name: install the Win32-OpenSSH service
+ win_chocolatey:
+ name: openssh
+ package_params: /SSHServerFeature
+ state: present
+
+* Use an existing Ansible Galaxy role like `jborean93.win_openssh <https://galaxy.ansible.com/jborean93/win_openssh>`_::
+
+ # Make sure the role has been downloaded first
+ ansible-galaxy install jborean93.win_openssh
+
+ # main.yml
+ - name: install Win32-OpenSSH service
+ hosts: windows
+ gather_facts: no
+ roles:
+ - role: jborean93.win_openssh
+ opt_openssh_setup_service: True
+
+.. note:: ``Win32-OpenSSH`` is still a beta product and is constantly
+ being updated to include new features and bugfixes. If you are using SSH as
+ a connection option for Windows, it is highly recommend you install the
+ latest release from one of the 3 methods above.
+
+Configuring the Win32-OpenSSH shell
+-----------------------------------
+
+By default ``Win32-OpenSSH`` will use ``cmd.exe`` as a shell. To configure a
+different shell, use an Ansible task to define the registry setting::
+
+ - name: set the default shell to PowerShell
+ win_regedit:
+ path: HKLM:\SOFTWARE\OpenSSH
+ name: DefaultShell
+ data: C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe
+ type: string
+ state: present
+
+ # Or revert the settings back to the default, cmd
+ - name: set the default shell to cmd
+ win_regedit:
+ path: HKLM:\SOFTWARE\OpenSSH
+ name: DefaultShell
+ state: absent
+
+Win32-OpenSSH Authentication
+----------------------------
+Win32-OpenSSH authentication with Windows is similar to SSH
+authentication on Unix/Linux hosts. You can use a plaintext password or
+SSH public key authentication, add public keys to an ``authorized_key`` file
+in the ``.ssh`` folder of the user's profile directory, and configure the
+service using the ``sshd_config`` file used by the SSH service as you would on
+a Unix/Linux host.
+
+When using SSH key authentication with Ansible, the remote session won't have access to the
+user's credentials and will fail when attempting to access a network resource.
+This is also known as the double-hop or credential delegation issue. There are
+two ways to work around this issue:
+
+* Use plaintext password auth by setting ``ansible_password``
+* Use ``become`` on the task with the credentials of the user that needs access to the remote resource
+
+Configuring Ansible for SSH on Windows
+--------------------------------------
+To configure Ansible to use SSH for Windows hosts, you must set two connection variables:
+
+* set ``ansible_connection`` to ``ssh``
+* set ``ansible_shell_type`` to ``cmd`` or ``powershell``
+
+The ``ansible_shell_type`` variable should reflect the ``DefaultShell``
+configured on the Windows host. Set to ``cmd`` for the default shell or set to
+``powershell`` if the ``DefaultShell`` has been changed to PowerShell.
+
+Known issues with SSH on Windows
+--------------------------------
+Using SSH with Windows is experimental, and we expect to uncover more issues.
+Here are the known ones:
+
+* Win32-OpenSSH versions older than ``v7.9.0.0p1-Beta`` do not work when ``powershell`` is the shell type
+* While SCP should work, SFTP is the recommended SSH file transfer mechanism to use when copying or fetching a file
+
+
+.. seealso::
+
+ :ref:`about_playbooks`
+ An introduction to playbooks
+ :ref:`playbooks_best_practices`
+ Tips and tricks for playbooks
+ :ref:`List of Windows Modules <windows_modules>`
+ Windows specific module list, all implemented in PowerShell
+ `User Mailing List <https://groups.google.com/group/ansible-project>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/windows_usage.rst b/docs/docsite/rst/user_guide/windows_usage.rst
new file mode 100644
index 00000000..b39413cd
--- /dev/null
+++ b/docs/docsite/rst/user_guide/windows_usage.rst
@@ -0,0 +1,513 @@
+Using Ansible and Windows
+=========================
+When using Ansible to manage Windows, many of the syntax and rules that apply
+for Unix/Linux hosts also apply to Windows, but there are still some differences
+when it comes to components like path separators and OS-specific tasks.
+This document covers details specific to using Ansible for Windows.
+
+.. contents:: Topics
+ :local:
+
+Use Cases
+`````````
+Ansible can be used to orchestrate a multitude of tasks on Windows servers.
+Below are some examples and info about common tasks.
+
+Installing Software
+-------------------
+There are three main ways that Ansible can be used to install software:
+
+* Using the ``win_chocolatey`` module. This sources the program data from the default
+ public `Chocolatey <https://chocolatey.org/>`_ repository. Internal repositories can
+ be used instead by setting the ``source`` option.
+
+* Using the ``win_package`` module. This installs software using an MSI or .exe installer
+ from a local/network path or URL.
+
+* Using the ``win_command`` or ``win_shell`` module to run an installer manually.
+
+The ``win_chocolatey`` module is recommended since it has the most complete logic for checking to see if a package has already been installed and is up-to-date.
+
+Below are some examples of using all three options to install 7-Zip:
+
+.. code-block:: yaml+jinja
+
+ # Install/uninstall with chocolatey
+ - name: Ensure 7-Zip is installed via Chocolatey
+ win_chocolatey:
+ name: 7zip
+ state: present
+
+ - name: Ensure 7-Zip is not installed via Chocolatey
+ win_chocolatey:
+ name: 7zip
+ state: absent
+
+ # Install/uninstall with win_package
+ - name: Download the 7-Zip package
+ win_get_url:
+ url: https://www.7-zip.org/a/7z1701-x64.msi
+ dest: C:\temp\7z.msi
+
+ - name: Ensure 7-Zip is installed via win_package
+ win_package:
+ path: C:\temp\7z.msi
+ state: present
+
+ - name: Ensure 7-Zip is not installed via win_package
+ win_package:
+ path: C:\temp\7z.msi
+ state: absent
+
+ # Install/uninstall with win_command
+ - name: Download the 7-Zip package
+ win_get_url:
+ url: https://www.7-zip.org/a/7z1701-x64.msi
+ dest: C:\temp\7z.msi
+
+ - name: Check if 7-Zip is already installed
+ win_reg_stat:
+ name: HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\{23170F69-40C1-2702-1701-000001000000}
+ register: 7zip_installed
+
+ - name: Ensure 7-Zip is installed via win_command
+ win_command: C:\Windows\System32\msiexec.exe /i C:\temp\7z.msi /qn /norestart
+ when: 7zip_installed.exists == false
+
+ - name: Ensure 7-Zip is uninstalled via win_command
+ win_command: C:\Windows\System32\msiexec.exe /x {23170F69-40C1-2702-1701-000001000000} /qn /norestart
+ when: 7zip_installed.exists == true
+
+Some installers like Microsoft Office or SQL Server require credential delegation or
+access to components restricted by WinRM. The best method to bypass these
+issues is to use ``become`` with the task. With ``become``, Ansible will run
+the installer as if it were run interactively on the host.
+
+.. Note:: Many installers do not properly pass back error information over WinRM. In these cases, if the install has been verified to work locally the recommended method is to use become.
+
+.. Note:: Some installers restart the WinRM or HTTP services, or cause them to become temporarily unavailable, making Ansible assume the system is unreachable.
+
+Installing Updates
+------------------
+The ``win_updates`` and ``win_hotfix`` modules can be used to install updates
+or hotfixes on a host. The module ``win_updates`` is used to install multiple
+updates by category, while ``win_hotfix`` can be used to install a single
+update or hotfix file that has been downloaded locally.
+
+.. Note:: The ``win_hotfix`` module has a requirement that the DISM PowerShell cmdlets are
+ present. These cmdlets were only added by default on Windows Server 2012
+ and newer and must be installed on older Windows hosts.
+
+The following example shows how ``win_updates`` can be used:
+
+.. code-block:: yaml+jinja
+
+ - name: Install all critical and security updates
+ win_updates:
+ category_names:
+ - CriticalUpdates
+ - SecurityUpdates
+ state: installed
+ register: update_result
+
+ - name: Reboot host if required
+ win_reboot:
+ when: update_result.reboot_required
+
+The following example show how ``win_hotfix`` can be used to install a single
+update or hotfix:
+
+.. code-block:: yaml+jinja
+
+ - name: Download KB3172729 for Server 2012 R2
+ win_get_url:
+ url: http://download.windowsupdate.com/d/msdownload/update/software/secu/2016/07/windows8.1-kb3172729-x64_e8003822a7ef4705cbb65623b72fd3cec73fe222.msu
+ dest: C:\temp\KB3172729.msu
+
+ - name: Install hotfix
+ win_hotfix:
+ hotfix_kb: KB3172729
+ source: C:\temp\KB3172729.msu
+ state: present
+ register: hotfix_result
+
+ - name: Reboot host if required
+ win_reboot:
+ when: hotfix_result.reboot_required
+
+Set Up Users and Groups
+-----------------------
+Ansible can be used to create Windows users and groups both locally and on a domain.
+
+Local
++++++
+The modules ``win_user``, ``win_group`` and ``win_group_membership`` manage
+Windows users, groups and group memberships locally.
+
+The following is an example of creating local accounts and groups that can
+access a folder on the same host:
+
+.. code-block:: yaml+jinja
+
+ - name: Create local group to contain new users
+ win_group:
+ name: LocalGroup
+ description: Allow access to C:\Development folder
+
+ - name: Create local user
+ win_user:
+ name: '{{ item.name }}'
+ password: '{{ item.password }}'
+ groups: LocalGroup
+ update_password: no
+ password_never_expires: yes
+ loop:
+ - name: User1
+ password: Password1
+ - name: User2
+ password: Password2
+
+ - name: Create Development folder
+ win_file:
+ path: C:\Development
+ state: directory
+
+ - name: Set ACL of Development folder
+ win_acl:
+ path: C:\Development
+ rights: FullControl
+ state: present
+ type: allow
+ user: LocalGroup
+
+ - name: Remove parent inheritance of Development folder
+ win_acl_inheritance:
+ path: C:\Development
+ reorganize: yes
+ state: absent
+
+Domain
+++++++
+The modules ``win_domain_user`` and ``win_domain_group`` manages users and
+groups in a domain. The below is an example of ensuring a batch of domain users
+are created:
+
+.. code-block:: yaml+jinja
+
+ - name: Ensure each account is created
+ win_domain_user:
+ name: '{{ item.name }}'
+ upn: '{{ item.name }}@MY.DOMAIN.COM'
+ password: '{{ item.password }}'
+ password_never_expires: no
+ groups:
+ - Test User
+ - Application
+ company: Ansible
+ update_password: on_create
+ loop:
+ - name: Test User
+ password: Password
+ - name: Admin User
+ password: SuperSecretPass01
+ - name: Dev User
+ password: '@fvr3IbFBujSRh!3hBg%wgFucD8^x8W5'
+
+Running Commands
+----------------
+In cases where there is no appropriate module available for a task,
+a command or script can be run using the ``win_shell``, ``win_command``, ``raw``, and ``script`` modules.
+
+The ``raw`` module simply executes a Powershell command remotely. Since ``raw``
+has none of the wrappers that Ansible typically uses, ``become``, ``async``
+and environment variables do not work.
+
+The ``script`` module executes a script from the Ansible controller on
+one or more Windows hosts. Like ``raw``, ``script`` currently does not support
+``become``, ``async``, or environment variables.
+
+The ``win_command`` module is used to execute a command which is either an
+executable or batch file, while the ``win_shell`` module is used to execute commands within a shell.
+
+Choosing Command or Shell
++++++++++++++++++++++++++
+The ``win_shell`` and ``win_command`` modules can both be used to execute a command or commands.
+The ``win_shell`` module is run within a shell-like process like ``PowerShell`` or ``cmd``, so it has access to shell
+operators like ``<``, ``>``, ``|``, ``;``, ``&&``, and ``||``. Multi-lined commands can also be run in ``win_shell``.
+
+The ``win_command`` module simply runs a process outside of a shell. It can still
+run a shell command like ``mkdir`` or ``New-Item`` by passing the shell commands
+to a shell executable like ``cmd.exe`` or ``PowerShell.exe``.
+
+Here are some examples of using ``win_command`` and ``win_shell``:
+
+.. code-block:: yaml+jinja
+
+ - name: Run a command under PowerShell
+ win_shell: Get-Service -Name service | Stop-Service
+
+ - name: Run a command under cmd
+ win_shell: mkdir C:\temp
+ args:
+ executable: cmd.exe
+
+ - name: Run a multiple shell commands
+ win_shell: |
+ New-Item -Path C:\temp -ItemType Directory
+ Remove-Item -Path C:\temp -Force -Recurse
+ $path_info = Get-Item -Path C:\temp
+ $path_info.FullName
+
+ - name: Run an executable using win_command
+ win_command: whoami.exe
+
+ - name: Run a cmd command
+ win_command: cmd.exe /c mkdir C:\temp
+
+ - name: Run a vbs script
+ win_command: cscript.exe script.vbs
+
+.. Note:: Some commands like ``mkdir``, ``del``, and ``copy`` only exist in
+ the CMD shell. To run them with ``win_command`` they must be
+ prefixed with ``cmd.exe /c``.
+
+Argument Rules
+++++++++++++++
+When running a command through ``win_command``, the standard Windows argument
+rules apply:
+
+* Each argument is delimited by a white space, which can either be a space or a
+ tab.
+
+* An argument can be surrounded by double quotes ``"``. Anything inside these
+ quotes is interpreted as a single argument even if it contains whitespace.
+
+* A double quote preceded by a backslash ``\`` is interpreted as just a double
+ quote ``"`` and not as an argument delimiter.
+
+* Backslashes are interpreted literally unless it immediately precedes double
+ quotes; for example ``\`` == ``\`` and ``\"`` == ``"``
+
+* If an even number of backslashes is followed by a double quote, one
+ backslash is used in the argument for every pair, and the double quote is
+ used as a string delimiter for the argument.
+
+* If an odd number of backslashes is followed by a double quote, one backslash
+ is used in the argument for every pair, and the double quote is escaped and
+ made a literal double quote in the argument.
+
+With those rules in mind, here are some examples of quoting:
+
+.. code-block:: yaml+jinja
+
+ - win_command: C:\temp\executable.exe argument1 "argument 2" "C:\path\with space" "double \"quoted\""
+
+ argv[0] = C:\temp\executable.exe
+ argv[1] = argument1
+ argv[2] = argument 2
+ argv[3] = C:\path\with space
+ argv[4] = double "quoted"
+
+ - win_command: '"C:\Program Files\Program\program.exe" "escaped \\\" backslash" unquoted-end-backslash\'
+
+ argv[0] = C:\Program Files\Program\program.exe
+ argv[1] = escaped \" backslash
+ argv[2] = unquoted-end-backslash\
+
+ # Due to YAML and Ansible parsing '\"' must be written as '{% raw %}\\{% endraw %}"'
+ - win_command: C:\temp\executable.exe C:\no\space\path "arg with end \ before end quote{% raw %}\\{% endraw %}"
+
+ argv[0] = C:\temp\executable.exe
+ argv[1] = C:\no\space\path
+ argv[2] = arg with end \ before end quote\"
+
+For more information, see `escaping arguments <https://msdn.microsoft.com/en-us/library/17w5ykft(v=vs.85).aspx>`_.
+
+Creating and Running a Scheduled Task
+-------------------------------------
+WinRM has some restrictions in place that cause errors when running certain
+commands. One way to bypass these restrictions is to run a command through a
+scheduled task. A scheduled task is a Windows component that provides the
+ability to run an executable on a schedule and under a different account.
+
+Ansible version 2.5 added modules that make it easier to work with scheduled tasks in Windows.
+The following is an example of running a script as a scheduled task that deletes itself after
+running:
+
+.. code-block:: yaml+jinja
+
+ - name: Create scheduled task to run a process
+ win_scheduled_task:
+ name: adhoc-task
+ username: SYSTEM
+ actions:
+ - path: PowerShell.exe
+ arguments: |
+ Start-Sleep -Seconds 30 # This isn't required, just here as a demonstration
+ New-Item -Path C:\temp\test -ItemType Directory
+ # Remove this action if the task shouldn't be deleted on completion
+ - path: cmd.exe
+ arguments: /c schtasks.exe /Delete /TN "adhoc-task" /F
+ triggers:
+ - type: registration
+
+ - name: Wait for the scheduled task to complete
+ win_scheduled_task_stat:
+ name: adhoc-task
+ register: task_stat
+ until: (task_stat.state is defined and task_stat.state.status != "TASK_STATE_RUNNING") or (task_stat.task_exists == False)
+ retries: 12
+ delay: 10
+
+.. Note:: The modules used in the above example were updated/added in Ansible
+ version 2.5.
+
+Path Formatting for Windows
+```````````````````````````
+Windows differs from a traditional POSIX operating system in many ways. One of
+the major changes is the shift from ``/`` as the path separator to ``\``. This
+can cause major issues with how playbooks are written, since ``\`` is often used
+as an escape character on POSIX systems.
+
+Ansible allows two different styles of syntax; each deals with path separators for Windows differently:
+
+YAML Style
+----------
+When using the YAML syntax for tasks, the rules are well-defined by the YAML
+standard:
+
+* When using a normal string (without quotes), YAML will not consider the
+ backslash an escape character.
+
+* When using single quotes ``'``, YAML will not consider the backslash an
+ escape character.
+
+* When using double quotes ``"``, the backslash is considered an escape
+ character and needs to escaped with another backslash.
+
+.. Note:: You should only quote strings when it is absolutely
+ necessary or required by YAML, and then use single quotes.
+
+The YAML specification considers the following `escape sequences <https://yaml.org/spec/current.html#id2517668>`_:
+
+* ``\0``, ``\\``, ``\"``, ``\_``, ``\a``, ``\b``, ``\e``, ``\f``, ``\n``, ``\r``, ``\t``,
+ ``\v``, ``\L``, ``\N`` and ``\P`` -- Single character escape
+
+* ``<TAB>``, ``<SPACE>``, ``<NBSP>``, ``<LNSP>``, ``<PSP>`` -- Special
+ characters
+
+* ``\x..`` -- 2-digit hex escape
+
+* ``\u....`` -- 4-digit hex escape
+
+* ``\U........`` -- 8-digit hex escape
+
+Here are some examples on how to write Windows paths::
+
+ # GOOD
+ tempdir: C:\Windows\Temp
+
+ # WORKS
+ tempdir: 'C:\Windows\Temp'
+ tempdir: "C:\\Windows\\Temp"
+
+ # BAD, BUT SOMETIMES WORKS
+ tempdir: C:\\Windows\\Temp
+ tempdir: 'C:\\Windows\\Temp'
+ tempdir: C:/Windows/Temp
+
+This is an example which will fail:
+
+.. code-block:: text
+
+ # FAILS
+ tempdir: "C:\Windows\Temp"
+
+This example shows the use of single quotes when they are required::
+
+ ---
+ - name: Copy tomcat config
+ win_copy:
+ src: log4j.xml
+ dest: '{{tc_home}}\lib\log4j.xml'
+
+Legacy key=value Style
+----------------------
+The legacy ``key=value`` syntax is used on the command line for ad-hoc commands,
+or inside playbooks. The use of this style is discouraged within playbooks
+because backslash characters need to be escaped, making playbooks harder to read.
+The legacy syntax depends on the specific implementation in Ansible, and quoting
+(both single and double) does not have any effect on how it is parsed by
+Ansible.
+
+The Ansible key=value parser parse_kv() considers the following escape
+sequences:
+
+* ``\``, ``'``, ``"``, ``\a``, ``\b``, ``\f``, ``\n``, ``\r``, ``\t`` and
+ ``\v`` -- Single character escape
+
+* ``\x..`` -- 2-digit hex escape
+
+* ``\u....`` -- 4-digit hex escape
+
+* ``\U........`` -- 8-digit hex escape
+
+* ``\N{...}`` -- Unicode character by name
+
+This means that the backslash is an escape character for some sequences, and it
+is usually safer to escape a backslash when in this form.
+
+Here are some examples of using Windows paths with the key=value style:
+
+.. code-block:: ini
+
+ # GOOD
+ tempdir=C:\\Windows\\Temp
+
+ # WORKS
+ tempdir='C:\\Windows\\Temp'
+ tempdir="C:\\Windows\\Temp"
+
+ # BAD, BUT SOMETIMES WORKS
+ tempdir=C:\Windows\Temp
+ tempdir='C:\Windows\Temp'
+ tempdir="C:\Windows\Temp"
+ tempdir=C:/Windows/Temp
+
+ # FAILS
+ tempdir=C:\Windows\temp
+ tempdir='C:\Windows\temp'
+ tempdir="C:\Windows\temp"
+
+The failing examples don't fail outright but will substitute ``\t`` with the
+``<TAB>`` character resulting in ``tempdir`` being ``C:\Windows<TAB>emp``.
+
+Limitations
+```````````
+Some things you cannot do with Ansible and Windows are:
+
+* Upgrade PowerShell
+
+* Interact with the WinRM listeners
+
+Because WinRM is reliant on the services being online and running during normal operations, you cannot upgrade PowerShell or interact with WinRM listeners with Ansible. Both of these actions will cause the connection to fail. This can technically be avoided by using ``async`` or a scheduled task, but those methods are fragile if the process it runs breaks the underlying connection Ansible uses, and are best left to the bootstrapping process or before an image is
+created.
+
+Developing Windows Modules
+``````````````````````````
+Because Ansible modules for Windows are written in PowerShell, the development
+guides for Windows modules differ substantially from those for standard standard modules. Please see
+:ref:`developing_modules_general_windows` for more information.
+
+.. seealso::
+
+ :ref:`playbooks_intro`
+ An introduction to playbooks
+ :ref:`playbooks_best_practices`
+ Tips and tricks for playbooks
+ :ref:`List of Windows Modules <windows_modules>`
+ Windows specific module list, all implemented in PowerShell
+ `User Mailing List <https://groups.google.com/group/ansible-project>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/rst/user_guide/windows_winrm.rst b/docs/docsite/rst/user_guide/windows_winrm.rst
new file mode 100644
index 00000000..03421cfb
--- /dev/null
+++ b/docs/docsite/rst/user_guide/windows_winrm.rst
@@ -0,0 +1,913 @@
+.. _windows_winrm:
+
+Windows Remote Management
+=========================
+Unlike Linux/Unix hosts, which use SSH by default, Windows hosts are
+configured with WinRM. This topic covers how to configure and use WinRM with Ansible.
+
+.. contents:: Topics
+ :local:
+
+What is WinRM?
+``````````````
+WinRM is a management protocol used by Windows to remotely communicate with
+another server. It is a SOAP-based protocol that communicates over HTTP/HTTPS, and is
+included in all recent Windows operating systems. Since Windows
+Server 2012, WinRM has been enabled by default, but in most cases extra
+configuration is required to use WinRM with Ansible.
+
+Ansible uses the `pywinrm <https://github.com/diyan/pywinrm>`_ package to
+communicate with Windows servers over WinRM. It is not installed by default
+with the Ansible package, but can be installed by running the following:
+
+.. code-block:: shell
+
+ pip install "pywinrm>=0.3.0"
+
+.. Note:: on distributions with multiple python versions, use pip2 or pip2.x,
+ where x matches the python minor version Ansible is running under.
+
+.. Warning::
+ Using the ``winrm`` or ``psrp`` connection plugins in Ansible on MacOS in
+ the latest releases typically fail. This is a known problem that occurs
+ deep within the Python stack and cannot be changed by Ansible. The only
+ workaround today is to set the environment variable ``no_proxy=*`` and
+ avoid using Kerberos auth.
+
+
+Authentication Options
+``````````````````````
+When connecting to a Windows host, there are several different options that can be used
+when authenticating with an account. The authentication type may be set on inventory
+hosts or groups with the ``ansible_winrm_transport`` variable.
+
+The following matrix is a high level overview of the options:
+
++-------------+----------------+---------------------------+-----------------------+-----------------+
+| Option | Local Accounts | Active Directory Accounts | Credential Delegation | HTTP Encryption |
++=============+================+===========================+=======================+=================+
+| Basic | Yes | No | No | No |
++-------------+----------------+---------------------------+-----------------------+-----------------+
+| Certificate | Yes | No | No | No |
++-------------+----------------+---------------------------+-----------------------+-----------------+
+| Kerberos | No | Yes | Yes | Yes |
++-------------+----------------+---------------------------+-----------------------+-----------------+
+| NTLM | Yes | Yes | No | Yes |
++-------------+----------------+---------------------------+-----------------------+-----------------+
+| CredSSP | Yes | Yes | Yes | Yes |
++-------------+----------------+---------------------------+-----------------------+-----------------+
+
+Basic
+-----
+Basic authentication is one of the simplest authentication options to use, but is
+also the most insecure. This is because the username and password are simply
+base64 encoded, and if a secure channel is not in use (eg, HTTPS) then it can be
+decoded by anyone. Basic authentication can only be used for local accounts (not domain accounts).
+
+The following example shows host vars configured for basic authentication:
+
+.. code-block:: yaml+jinja
+
+ ansible_user: LocalUsername
+ ansible_password: Password
+ ansible_connection: winrm
+ ansible_winrm_transport: basic
+
+Basic authentication is not enabled by default on a Windows host but can be
+enabled by running the following in PowerShell::
+
+ Set-Item -Path WSMan:\localhost\Service\Auth\Basic -Value $true
+
+Certificate
+-----------
+Certificate authentication uses certificates as keys similar to SSH key
+pairs, but the file format and key generation process is different.
+
+The following example shows host vars configured for certificate authentication:
+
+.. code-block:: yaml+jinja
+
+ ansible_connection: winrm
+ ansible_winrm_cert_pem: /path/to/certificate/public/key.pem
+ ansible_winrm_cert_key_pem: /path/to/certificate/private/key.pem
+ ansible_winrm_transport: certificate
+
+Certificate authentication is not enabled by default on a Windows host but can
+be enabled by running the following in PowerShell::
+
+ Set-Item -Path WSMan:\localhost\Service\Auth\Certificate -Value $true
+
+.. Note:: Encrypted private keys cannot be used as the urllib3 library that
+ is used by Ansible for WinRM does not support this functionality.
+
+Generate a Certificate
+++++++++++++++++++++++
+A certificate must be generated before it can be mapped to a local user.
+This can be done using one of the following methods:
+
+* OpenSSL
+* PowerShell, using the ``New-SelfSignedCertificate`` cmdlet
+* Active Directory Certificate Services
+
+Active Directory Certificate Services is beyond of scope in this documentation but may be
+the best option to use when running in a domain environment. For more information,
+see the `Active Directory Certificate Services documentation <https://docs.microsoft.com/en-us/previous-versions/windows/it-pro/windows-server-2008-R2-and-2008/cc732625(v=ws.11)>`_.
+
+.. Note:: Using the PowerShell cmdlet ``New-SelfSignedCertificate`` to generate
+ a certificate for authentication only works when being generated from a
+ Windows 10 or Windows Server 2012 R2 host or later. OpenSSL is still required to
+ extract the private key from the PFX certificate to a PEM file for Ansible
+ to use.
+
+To generate a certificate with ``OpenSSL``:
+
+.. code-block:: shell
+
+ # Set the name of the local user that will have the key mapped to
+ USERNAME="username"
+
+ cat > openssl.conf << EOL
+ distinguished_name = req_distinguished_name
+ [req_distinguished_name]
+ [v3_req_client]
+ extendedKeyUsage = clientAuth
+ subjectAltName = otherName:1.3.6.1.4.1.311.20.2.3;UTF8:$USERNAME@localhost
+ EOL
+
+ export OPENSSL_CONF=openssl.conf
+ openssl req -x509 -nodes -days 3650 -newkey rsa:2048 -out cert.pem -outform PEM -keyout cert_key.pem -subj "/CN=$USERNAME" -extensions v3_req_client
+ rm openssl.conf
+
+
+To generate a certificate with ``New-SelfSignedCertificate``:
+
+.. code-block:: powershell
+
+ # Set the name of the local user that will have the key mapped
+ $username = "username"
+ $output_path = "C:\temp"
+
+ # Instead of generating a file, the cert will be added to the personal
+ # LocalComputer folder in the certificate store
+ $cert = New-SelfSignedCertificate -Type Custom `
+ -Subject "CN=$username" `
+ -TextExtension @("2.5.29.37={text}1.3.6.1.5.5.7.3.2","2.5.29.17={text}upn=$username@localhost") `
+ -KeyUsage DigitalSignature,KeyEncipherment `
+ -KeyAlgorithm RSA `
+ -KeyLength 2048
+
+ # Export the public key
+ $pem_output = @()
+ $pem_output += "-----BEGIN CERTIFICATE-----"
+ $pem_output += [System.Convert]::ToBase64String($cert.RawData) -replace ".{64}", "$&`n"
+ $pem_output += "-----END CERTIFICATE-----"
+ [System.IO.File]::WriteAllLines("$output_path\cert.pem", $pem_output)
+
+ # Export the private key in a PFX file
+ [System.IO.File]::WriteAllBytes("$output_path\cert.pfx", $cert.Export("Pfx"))
+
+
+.. Note:: To convert the PFX file to a private key that pywinrm can use, run
+ the following command with OpenSSL
+ ``openssl pkcs12 -in cert.pfx -nocerts -nodes -out cert_key.pem -passin pass: -passout pass:``
+
+Import a Certificate to the Certificate Store
++++++++++++++++++++++++++++++++++++++++++++++
+Once a certificate has been generated, the issuing certificate needs to be
+imported into the ``Trusted Root Certificate Authorities`` of the
+``LocalMachine`` store, and the client certificate public key must be present
+in the ``Trusted People`` folder of the ``LocalMachine`` store. For this example,
+both the issuing certificate and public key are the same.
+
+Following example shows how to import the issuing certificate:
+
+.. code-block:: powershell
+
+ $cert = New-Object -TypeName System.Security.Cryptography.X509Certificates.X509Certificate2
+ $cert.Import("cert.pem")
+
+ $store_name = [System.Security.Cryptography.X509Certificates.StoreName]::Root
+ $store_location = [System.Security.Cryptography.X509Certificates.StoreLocation]::LocalMachine
+ $store = New-Object -TypeName System.Security.Cryptography.X509Certificates.X509Store -ArgumentList $store_name, $store_location
+ $store.Open("MaxAllowed")
+ $store.Add($cert)
+ $store.Close()
+
+
+.. Note:: If using ADCS to generate the certificate, then the issuing
+ certificate will already be imported and this step can be skipped.
+
+The code to import the client certificate public key is:
+
+.. code-block:: powershell
+
+ $cert = New-Object -TypeName System.Security.Cryptography.X509Certificates.X509Certificate2
+ $cert.Import("cert.pem")
+
+ $store_name = [System.Security.Cryptography.X509Certificates.StoreName]::TrustedPeople
+ $store_location = [System.Security.Cryptography.X509Certificates.StoreLocation]::LocalMachine
+ $store = New-Object -TypeName System.Security.Cryptography.X509Certificates.X509Store -ArgumentList $store_name, $store_location
+ $store.Open("MaxAllowed")
+ $store.Add($cert)
+ $store.Close()
+
+
+Mapping a Certificate to an Account
++++++++++++++++++++++++++++++++++++
+Once the certificate has been imported, map it to the local user account::
+
+ $username = "username"
+ $password = ConvertTo-SecureString -String "password" -AsPlainText -Force
+ $credential = New-Object -TypeName System.Management.Automation.PSCredential -ArgumentList $username, $password
+
+ # This is the issuer thumbprint which in the case of a self generated cert
+ # is the public key thumbprint, additional logic may be required for other
+ # scenarios
+ $thumbprint = (Get-ChildItem -Path cert:\LocalMachine\root | Where-Object { $_.Subject -eq "CN=$username" }).Thumbprint
+
+ New-Item -Path WSMan:\localhost\ClientCertificate `
+ -Subject "$username@localhost" `
+ -URI * `
+ -Issuer $thumbprint `
+ -Credential $credential `
+ -Force
+
+
+Once this is complete, the hostvar ``ansible_winrm_cert_pem`` should be set to
+the path of the public key and the ``ansible_winrm_cert_key_pem`` variable should be set to
+the path of the private key.
+
+NTLM
+----
+NTLM is an older authentication mechanism used by Microsoft that can support
+both local and domain accounts. NTLM is enabled by default on the WinRM
+service, so no setup is required before using it.
+
+NTLM is the easiest authentication protocol to use and is more secure than
+``Basic`` authentication. If running in a domain environment, ``Kerberos`` should be used
+instead of NTLM.
+
+Kerberos has several advantages over using NTLM:
+
+* NTLM is an older protocol and does not support newer encryption
+ protocols.
+* NTLM is slower to authenticate because it requires more round trips to the host in
+ the authentication stage.
+* Unlike Kerberos, NTLM does not allow credential delegation.
+
+This example shows host variables configured to use NTLM authentication:
+
+.. code-block:: yaml+jinja
+
+ ansible_user: LocalUsername
+ ansible_password: Password
+ ansible_connection: winrm
+ ansible_winrm_transport: ntlm
+
+Kerberos
+--------
+Kerberos is the recommended authentication option to use when running in a
+domain environment. Kerberos supports features like credential delegation and
+message encryption over HTTP and is one of the more secure options that
+is available through WinRM.
+
+Kerberos requires some additional setup work on the Ansible host before it can be
+used properly.
+
+The following example shows host vars configured for Kerberos authentication:
+
+.. code-block:: yaml+jinja
+
+ ansible_user: username@MY.DOMAIN.COM
+ ansible_password: Password
+ ansible_connection: winrm
+ ansible_winrm_transport: kerberos
+
+As of Ansible version 2.3, the Kerberos ticket will be created based on
+``ansible_user`` and ``ansible_password``. If running on an older version of
+Ansible or when ``ansible_winrm_kinit_mode`` is ``manual``, a Kerberos
+ticket must already be obtained. See below for more details.
+
+There are some extra host variables that can be set::
+
+ ansible_winrm_kinit_mode: managed/manual (manual means Ansible will not obtain a ticket)
+ ansible_winrm_kinit_cmd: the kinit binary to use to obtain a Kerberos ticket (default to kinit)
+ ansible_winrm_service: overrides the SPN prefix that is used, the default is ``HTTP`` and should rarely ever need changing
+ ansible_winrm_kerberos_delegation: allows the credentials to traverse multiple hops
+ ansible_winrm_kerberos_hostname_override: the hostname to be used for the kerberos exchange
+
+Installing the Kerberos Library
++++++++++++++++++++++++++++++++
+Some system dependencies that must be installed prior to using Kerberos. The script below lists the dependencies based on the distro:
+
+.. code-block:: shell
+
+ # Via Yum (RHEL/Centos/Fedora)
+ yum -y install gcc python-devel krb5-devel krb5-libs krb5-workstation
+
+ # Via Apt (Ubuntu)
+ sudo apt-get install python-dev libkrb5-dev krb5-user
+
+ # Via Portage (Gentoo)
+ emerge -av app-crypt/mit-krb5
+ emerge -av dev-python/setuptools
+
+ # Via Pkg (FreeBSD)
+ sudo pkg install security/krb5
+
+ # Via OpenCSW (Solaris)
+ pkgadd -d http://get.opencsw.org/now
+ /opt/csw/bin/pkgutil -U
+ /opt/csw/bin/pkgutil -y -i libkrb5_3
+
+ # Via Pacman (Arch Linux)
+ pacman -S krb5
+
+
+Once the dependencies have been installed, the ``python-kerberos`` wrapper can
+be install using ``pip``:
+
+.. code-block:: shell
+
+ pip install pywinrm[kerberos]
+
+
+.. note::
+ While Ansible has supported Kerberos auth through ``pywinrm`` for some
+ time, optional features or more secure options may only be available in
+ newer versions of the ``pywinrm`` and/or ``pykerberos`` libraries. It is
+ recommended you upgrade each version to the latest available to resolve
+ any warnings or errors. This can be done through tools like ``pip`` or a
+ system package manager like ``dnf``, ``yum``, ``apt`` but the package
+ names and versions available may differ between tools.
+
+
+Configuring Host Kerberos
++++++++++++++++++++++++++
+Once the dependencies have been installed, Kerberos needs to be configured so
+that it can communicate with a domain. This configuration is done through the
+``/etc/krb5.conf`` file, which is installed with the packages in the script above.
+
+To configure Kerberos, in the section that starts with:
+
+.. code-block:: ini
+
+ [realms]
+
+Add the full domain name and the fully qualified domain names of the primary
+and secondary Active Directory domain controllers. It should look something
+like this:
+
+.. code-block:: ini
+
+ [realms]
+ MY.DOMAIN.COM = {
+ kdc = domain-controller1.my.domain.com
+ kdc = domain-controller2.my.domain.com
+ }
+
+In the section that starts with:
+
+.. code-block:: ini
+
+ [domain_realm]
+
+Add a line like the following for each domain that Ansible needs access for:
+
+.. code-block:: ini
+
+ [domain_realm]
+ .my.domain.com = MY.DOMAIN.COM
+
+You can configure other settings in this file such as the default domain. See
+`krb5.conf <https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html>`_
+for more details.
+
+Automatic Kerberos Ticket Management
+++++++++++++++++++++++++++++++++++++
+Ansible version 2.3 and later defaults to automatically managing Kerberos tickets
+when both ``ansible_user`` and ``ansible_password`` are specified for a host. In
+this process, a new ticket is created in a temporary credential cache for each
+host. This is done before each task executes to minimize the chance of ticket
+expiration. The temporary credential caches are deleted after each task
+completes and will not interfere with the default credential cache.
+
+To disable automatic ticket management, set ``ansible_winrm_kinit_mode=manual``
+via the inventory.
+
+Automatic ticket management requires a standard ``kinit`` binary on the control
+host system path. To specify a different location or binary name, set the
+``ansible_winrm_kinit_cmd`` hostvar to the fully qualified path to a MIT krbv5
+``kinit``-compatible binary.
+
+Manual Kerberos Ticket Management
++++++++++++++++++++++++++++++++++
+To manually manage Kerberos tickets, the ``kinit`` binary is used. To
+obtain a new ticket the following command is used:
+
+.. code-block:: shell
+
+ kinit username@MY.DOMAIN.COM
+
+.. Note:: The domain must match the configured Kerberos realm exactly, and must be in upper case.
+
+To see what tickets (if any) have been acquired, use the following command:
+
+.. code-block:: shell
+
+ klist
+
+To destroy all the tickets that have been acquired, use the following command:
+
+.. code-block:: shell
+
+ kdestroy
+
+Troubleshooting Kerberos
+++++++++++++++++++++++++
+Kerberos is reliant on a properly-configured environment to
+work. To troubleshoot Kerberos issues, ensure that:
+
+* The hostname set for the Windows host is the FQDN and not an IP address.
+
+* The forward and reverse DNS lookups are working properly in the domain. To
+ test this, ping the windows host by name and then use the ip address returned
+ with ``nslookup``. The same name should be returned when using ``nslookup``
+ on the IP address.
+
+* The Ansible host's clock is synchronized with the domain controller. Kerberos
+ is time sensitive, and a little clock drift can cause the ticket generation
+ process to fail.
+
+* Ensure that the fully qualified domain name for the domain is configured in
+ the ``krb5.conf`` file. To check this, run::
+
+ kinit -C username@MY.DOMAIN.COM
+ klist
+
+ If the domain name returned by ``klist`` is different from the one requested,
+ an alias is being used. The ``krb5.conf`` file needs to be updated so that
+ the fully qualified domain name is used and not an alias.
+
+* If the default kerberos tooling has been replaced or modified (some IdM solutions may do this), this may cause issues when installing or upgrading the Python Kerberos library. As of the time of this writing, this library is called ``pykerberos`` and is known to work with both MIT and Heimdal Kerberos libraries. To resolve ``pykerberos`` installation issues, ensure the system dependencies for Kerberos have been met (see: `Installing the Kerberos Library`_), remove any custom Kerberos tooling paths from the PATH environment variable, and retry the installation of Python Kerberos library package.
+
+CredSSP
+-------
+CredSSP authentication is a newer authentication protocol that allows
+credential delegation. This is achieved by encrypting the username and password
+after authentication has succeeded and sending that to the server using the
+CredSSP protocol.
+
+Because the username and password are sent to the server to be used for double
+hop authentication, ensure that the hosts that the Windows host communicates with are
+not compromised and are trusted.
+
+CredSSP can be used for both local and domain accounts and also supports
+message encryption over HTTP.
+
+To use CredSSP authentication, the host vars are configured like so:
+
+.. code-block:: yaml+jinja
+
+ ansible_user: Username
+ ansible_password: Password
+ ansible_connection: winrm
+ ansible_winrm_transport: credssp
+
+There are some extra host variables that can be set as shown below::
+
+ ansible_winrm_credssp_disable_tlsv1_2: when true, will not use TLS 1.2 in the CredSSP auth process
+
+CredSSP authentication is not enabled by default on a Windows host, but can
+be enabled by running the following in PowerShell:
+
+.. code-block:: powershell
+
+ Enable-WSManCredSSP -Role Server -Force
+
+Installing CredSSP Library
+++++++++++++++++++++++++++
+
+The ``requests-credssp`` wrapper can be installed using ``pip``:
+
+.. code-block:: bash
+
+ pip install pywinrm[credssp]
+
+CredSSP and TLS 1.2
++++++++++++++++++++
+By default the ``requests-credssp`` library is configured to authenticate over
+the TLS 1.2 protocol. TLS 1.2 is installed and enabled by default for Windows Server 2012
+and Windows 8 and more recent releases.
+
+There are two ways that older hosts can be used with CredSSP:
+
+* Install and enable a hotfix to enable TLS 1.2 support (recommended
+ for Server 2008 R2 and Windows 7).
+
+* Set ``ansible_winrm_credssp_disable_tlsv1_2=True`` in the inventory to run
+ over TLS 1.0. This is the only option when connecting to Windows Server 2008, which
+ has no way of supporting TLS 1.2
+
+See :ref:`winrm_tls12` for more information on how to enable TLS 1.2 on the
+Windows host.
+
+Set CredSSP Certificate
++++++++++++++++++++++++
+CredSSP works by encrypting the credentials through the TLS protocol and uses a self-signed certificate by default. The ``CertificateThumbprint`` option under the WinRM service configuration can be used to specify the thumbprint of
+another certificate.
+
+.. Note:: This certificate configuration is independent of the WinRM listener
+ certificate. With CredSSP, message transport still occurs over the WinRM listener,
+ but the TLS-encrypted messages inside the channel use the service-level certificate.
+
+To explicitly set the certificate to use for CredSSP::
+
+ # Note the value $certificate_thumbprint will be different in each
+ # situation, this needs to be set based on the cert that is used.
+ $certificate_thumbprint = "7C8DCBD5427AFEE6560F4AF524E325915F51172C"
+
+ # Set the thumbprint value
+ Set-Item -Path WSMan:\localhost\Service\CertificateThumbprint -Value $certificate_thumbprint
+
+Non-Administrator Accounts
+``````````````````````````
+WinRM is configured by default to only allow connections from accounts in the local
+``Administrators`` group. This can be changed by running:
+
+.. code-block:: powershell
+
+ winrm configSDDL default
+
+This will display an ACL editor, where new users or groups may be added. To run commands
+over WinRM, users and groups must have at least the ``Read`` and ``Execute`` permissions
+enabled.
+
+While non-administrative accounts can be used with WinRM, most typical server administration
+tasks require some level of administrative access, so the utility is usually limited.
+
+WinRM Encryption
+````````````````
+By default WinRM will fail to work when running over an unencrypted channel.
+The WinRM protocol considers the channel to be encrypted if using TLS over HTTP
+(HTTPS) or using message level encryption. Using WinRM with TLS is the
+recommended option as it works with all authentication options, but requires
+a certificate to be created and used on the WinRM listener.
+
+The ``ConfigureRemotingForAnsible.ps1`` creates a self-signed certificate and
+creates the listener with that certificate. If in a domain environment, ADCS
+can also create a certificate for the host that is issued by the domain itself.
+
+If using HTTPS is not an option, then HTTP can be used when the authentication
+option is ``NTLM``, ``Kerberos`` or ``CredSSP``. These protocols will encrypt
+the WinRM payload with their own encryption method before sending it to the
+server. The message-level encryption is not used when running over HTTPS because the
+encryption uses the more secure TLS protocol instead. If both transport and
+message encryption is required, set ``ansible_winrm_message_encryption=always``
+in the host vars.
+
+.. Note:: Message encryption over HTTP requires pywinrm>=0.3.0.
+
+A last resort is to disable the encryption requirement on the Windows host. This
+should only be used for development and debugging purposes, as anything sent
+from Ansible can be viewed, manipulated and also the remote session can completely
+be taken over by anyone on the same network. To disable the encryption
+requirement::
+
+ Set-Item -Path WSMan:\localhost\Service\AllowUnencrypted -Value $true
+
+.. Note:: Do not disable the encryption check unless it is
+ absolutely required. Doing so could allow sensitive information like
+ credentials and files to be intercepted by others on the network.
+
+Inventory Options
+`````````````````
+Ansible's Windows support relies on a few standard variables to indicate the
+username, password, and connection type of the remote hosts. These variables
+are most easily set up in the inventory, but can be set on the ``host_vars``/
+``group_vars`` level.
+
+When setting up the inventory, the following variables are required:
+
+.. code-block:: yaml+jinja
+
+ # It is suggested that these be encrypted with ansible-vault:
+ # ansible-vault edit group_vars/windows.yml
+ ansible_connection: winrm
+
+ # May also be passed on the command-line via --user
+ ansible_user: Administrator
+
+ # May also be supplied at runtime with --ask-pass
+ ansible_password: SecretPasswordGoesHere
+
+
+Using the variables above, Ansible will connect to the Windows host with Basic
+authentication through HTTPS. If ``ansible_user`` has a UPN value like
+``username@MY.DOMAIN.COM`` then the authentication option will automatically attempt
+to use Kerberos unless ``ansible_winrm_transport`` has been set to something other than
+``kerberos``.
+
+The following custom inventory variables are also supported
+for additional configuration of WinRM connections:
+
+* ``ansible_port``: The port WinRM will run over, HTTPS is ``5986`` which is
+ the default while HTTP is ``5985``
+
+* ``ansible_winrm_scheme``: Specify the connection scheme (``http`` or
+ ``https``) to use for the WinRM connection. Ansible uses ``https`` by default
+ unless ``ansible_port`` is ``5985``
+
+* ``ansible_winrm_path``: Specify an alternate path to the WinRM endpoint,
+ Ansible uses ``/wsman`` by default
+
+* ``ansible_winrm_realm``: Specify the realm to use for Kerberos
+ authentication. If ``ansible_user`` contains ``@``, Ansible will use the part
+ of the username after ``@`` by default
+
+* ``ansible_winrm_transport``: Specify one or more authentication transport
+ options as a comma-separated list. By default, Ansible will use ``kerberos,
+ basic`` if the ``kerberos`` module is installed and a realm is defined,
+ otherwise it will be ``plaintext``
+
+* ``ansible_winrm_server_cert_validation``: Specify the server certificate
+ validation mode (``ignore`` or ``validate``). Ansible defaults to
+ ``validate`` on Python 2.7.9 and higher, which will result in certificate
+ validation errors against the Windows self-signed certificates. Unless
+ verifiable certificates have been configured on the WinRM listeners, this
+ should be set to ``ignore``
+
+* ``ansible_winrm_operation_timeout_sec``: Increase the default timeout for
+ WinRM operations, Ansible uses ``20`` by default
+
+* ``ansible_winrm_read_timeout_sec``: Increase the WinRM read timeout, Ansible
+ uses ``30`` by default. Useful if there are intermittent network issues and
+ read timeout errors keep occurring
+
+* ``ansible_winrm_message_encryption``: Specify the message encryption
+ operation (``auto``, ``always``, ``never``) to use, Ansible uses ``auto`` by
+ default. ``auto`` means message encryption is only used when
+ ``ansible_winrm_scheme`` is ``http`` and ``ansible_winrm_transport`` supports
+ message encryption. ``always`` means message encryption will always be used
+ and ``never`` means message encryption will never be used
+
+* ``ansible_winrm_ca_trust_path``: Used to specify a different cacert container
+ than the one used in the ``certifi`` module. See the HTTPS Certificate
+ Validation section for more details.
+
+* ``ansible_winrm_send_cbt``: When using ``ntlm`` or ``kerberos`` over HTTPS,
+ the authentication library will try to send channel binding tokens to
+ mitigate against man in the middle attacks. This flag controls whether these
+ bindings will be sent or not (default: ``yes``).
+
+* ``ansible_winrm_*``: Any additional keyword arguments supported by
+ ``winrm.Protocol`` may be provided in place of ``*``
+
+In addition, there are also specific variables that need to be set
+for each authentication option. See the section on authentication above for more information.
+
+.. Note:: Ansible 2.0 has deprecated the "ssh" from ``ansible_ssh_user``,
+ ``ansible_ssh_pass``, ``ansible_ssh_host``, and ``ansible_ssh_port`` to
+ become ``ansible_user``, ``ansible_password``, ``ansible_host``, and
+ ``ansible_port``. If using a version of Ansible prior to 2.0, the older
+ style (``ansible_ssh_*``) should be used instead. The shorter variables
+ are ignored, without warning, in older versions of Ansible.
+
+.. Note:: ``ansible_winrm_message_encryption`` is different from transport
+ encryption done over TLS. The WinRM payload is still encrypted with TLS
+ when run over HTTPS, even if ``ansible_winrm_message_encryption=never``.
+
+IPv6 Addresses
+``````````````
+IPv6 addresses can be used instead of IPv4 addresses or hostnames. This option
+is normally set in an inventory. Ansible will attempt to parse the address
+using the `ipaddress <https://docs.python.org/3/library/ipaddress.html>`_
+package and pass to pywinrm correctly.
+
+When defining a host using an IPv6 address, just add the IPv6 address as you
+would an IPv4 address or hostname:
+
+.. code-block:: ini
+
+ [windows-server]
+ 2001:db8::1
+
+ [windows-server:vars]
+ ansible_user=username
+ ansible_password=password
+ ansible_connection=winrm
+
+
+.. Note:: The ipaddress library is only included by default in Python 3.x. To
+ use IPv6 addresses in Python 2.7, make sure to run ``pip install ipaddress`` which installs
+ a backported package.
+
+HTTPS Certificate Validation
+````````````````````````````
+As part of the TLS protocol, the certificate is validated to ensure the host
+matches the subject and the client trusts the issuer of the server certificate.
+When using a self-signed certificate or setting
+``ansible_winrm_server_cert_validation: ignore`` these security mechanisms are
+bypassed. While self signed certificates will always need the ``ignore`` flag,
+certificates that have been issued from a certificate authority can still be
+validated.
+
+One of the more common ways of setting up a HTTPS listener in a domain
+environment is to use Active Directory Certificate Service (AD CS). AD CS is
+used to generate signed certificates from a Certificate Signing Request (CSR).
+If the WinRM HTTPS listener is using a certificate that has been signed by
+another authority, like AD CS, then Ansible can be set up to trust that
+issuer as part of the TLS handshake.
+
+To get Ansible to trust a Certificate Authority (CA) like AD CS, the issuer
+certificate of the CA can be exported as a PEM encoded certificate. This
+certificate can then be copied locally to the Ansible controller and used as a
+source of certificate validation, otherwise known as a CA chain.
+
+The CA chain can contain a single or multiple issuer certificates and each
+entry is contained on a new line. To then use the custom CA chain as part of
+the validation process, set ``ansible_winrm_ca_trust_path`` to the path of the
+file. If this variable is not set, the default CA chain is used instead which
+is located in the install path of the Python package
+`certifi <https://github.com/certifi/python-certifi>`_.
+
+.. Note:: Each HTTP call is done by the Python requests library which does not
+ use the systems built-in certificate store as a trust authority.
+ Certificate validation will fail if the server's certificate issuer is
+ only added to the system's truststore.
+
+.. _winrm_tls12:
+
+TLS 1.2 Support
+```````````````
+As WinRM runs over the HTTP protocol, using HTTPS means that the TLS protocol
+is used to encrypt the WinRM messages. TLS will automatically attempt to
+negotiate the best protocol and cipher suite that is available to both the
+client and the server. If a match cannot be found then Ansible will error out
+with a message similar to::
+
+ HTTPSConnectionPool(host='server', port=5986): Max retries exceeded with url: /wsman (Caused by SSLError(SSLError(1, '[SSL: UNSUPPORTED_PROTOCOL] unsupported protocol (_ssl.c:1056)')))
+
+Commonly this is when the Windows host has not been configured to support
+TLS v1.2 but it could also mean the Ansible controller has an older OpenSSL
+version installed.
+
+Windows 8 and Windows Server 2012 come with TLS v1.2 installed and enabled by
+default but older hosts, like Server 2008 R2 and Windows 7, have to be enabled
+manually.
+
+.. Note:: There is a bug with the TLS 1.2 patch for Server 2008 which will stop
+ Ansible from connecting to the Windows host. This means that Server 2008
+ cannot be configured to use TLS 1.2. Server 2008 R2 and Windows 7 are not
+ affected by this issue and can use TLS 1.2.
+
+To verify what protocol the Windows host supports, you can run the following
+command on the Ansible controller::
+
+ openssl s_client -connect <hostname>:5986
+
+The output will contain information about the TLS session and the ``Protocol``
+line will display the version that was negotiated::
+
+ New, TLSv1/SSLv3, Cipher is ECDHE-RSA-AES256-SHA
+ Server public key is 2048 bit
+ Secure Renegotiation IS supported
+ Compression: NONE
+ Expansion: NONE
+ No ALPN negotiated
+ SSL-Session:
+ Protocol : TLSv1
+ Cipher : ECDHE-RSA-AES256-SHA
+ Session-ID: 962A00001C95D2A601BE1CCFA7831B85A7EEE897AECDBF3D9ECD4A3BE4F6AC9B
+ Session-ID-ctx:
+ Master-Key: ....
+ Start Time: 1552976474
+ Timeout : 7200 (sec)
+ Verify return code: 21 (unable to verify the first certificate)
+ ---
+
+ New, TLSv1/SSLv3, Cipher is ECDHE-RSA-AES256-GCM-SHA384
+ Server public key is 2048 bit
+ Secure Renegotiation IS supported
+ Compression: NONE
+ Expansion: NONE
+ No ALPN negotiated
+ SSL-Session:
+ Protocol : TLSv1.2
+ Cipher : ECDHE-RSA-AES256-GCM-SHA384
+ Session-ID: AE16000050DA9FD44D03BB8839B64449805D9E43DBD670346D3D9E05D1AEEA84
+ Session-ID-ctx:
+ Master-Key: ....
+ Start Time: 1552976538
+ Timeout : 7200 (sec)
+ Verify return code: 21 (unable to verify the first certificate)
+
+If the host is returning ``TLSv1`` then it should be configured so that
+TLS v1.2 is enable. You can do this by running the following PowerShell
+script:
+
+.. code-block:: powershell
+
+ Function Enable-TLS12 {
+ param(
+ [ValidateSet("Server", "Client")]
+ [String]$Component = "Server"
+ )
+
+ $protocols_path = 'HKLM:\SYSTEM\CurrentControlSet\Control\SecurityProviders\SCHANNEL\Protocols'
+ New-Item -Path "$protocols_path\TLS 1.2\$Component" -Force
+ New-ItemProperty -Path "$protocols_path\TLS 1.2\$Component" -Name Enabled -Value 1 -Type DWORD -Force
+ New-ItemProperty -Path "$protocols_path\TLS 1.2\$Component" -Name DisabledByDefault -Value 0 -Type DWORD -Force
+ }
+
+ Enable-TLS12 -Component Server
+
+ # Not required but highly recommended to enable the Client side TLS 1.2 components
+ Enable-TLS12 -Component Client
+
+ Restart-Computer
+
+The below Ansible tasks can also be used to enable TLS v1.2:
+
+.. code-block:: yaml+jinja
+
+ - name: enable TLSv1.2 support
+ win_regedit:
+ path: HKLM:\SYSTEM\CurrentControlSet\Control\SecurityProviders\SCHANNEL\Protocols\TLS 1.2\{{ item.type }}
+ name: '{{ item.property }}'
+ data: '{{ item.value }}'
+ type: dword
+ state: present
+ register: enable_tls12
+ loop:
+ - type: Server
+ property: Enabled
+ value: 1
+ - type: Server
+ property: DisabledByDefault
+ value: 0
+ - type: Client
+ property: Enabled
+ value: 1
+ - type: Client
+ property: DisabledByDefault
+ value: 0
+
+ - name: reboot if TLS config was applied
+ win_reboot:
+ when: enable_tls12 is changed
+
+There are other ways to configure the TLS protocols as well as the cipher
+suites that are offered by the Windows host. One tool that can give you a GUI
+to manage these settings is `IIS Crypto <https://www.nartac.com/Products/IISCrypto/>`_
+from Nartac Software.
+
+Limitations
+```````````
+Due to the design of the WinRM protocol , there are a few limitations
+when using WinRM that can cause issues when creating playbooks for Ansible.
+These include:
+
+* Credentials are not delegated for most authentication types, which causes
+ authentication errors when accessing network resources or installing certain
+ programs.
+
+* Many calls to the Windows Update API are blocked when running over WinRM.
+
+* Some programs fail to install with WinRM due to no credential delegation or
+ because they access forbidden Windows API like WUA over WinRM.
+
+* Commands under WinRM are done under a non-interactive session, which can prevent
+ certain commands or executables from running.
+
+* You cannot run a process that interacts with ``DPAPI``, which is used by some
+ installers (like Microsoft SQL Server).
+
+Some of these limitations can be mitigated by doing one of the following:
+
+* Set ``ansible_winrm_transport`` to ``credssp`` or ``kerberos`` (with
+ ``ansible_winrm_kerberos_delegation=true``) to bypass the double hop issue
+ and access network resources
+
+* Use ``become`` to bypass all WinRM restrictions and run a command as it would
+ locally. Unlike using an authentication transport like ``credssp``, this will
+ also remove the non-interactive restriction and API restrictions like WUA and
+ DPAPI
+
+* Use a scheduled task to run a command which can be created with the
+ ``win_scheduled_task`` module. Like ``become``, this bypasses all WinRM
+ restrictions but can only run a command and not modules.
+
+
+.. seealso::
+
+ :ref:`playbooks_intro`
+ An introduction to playbooks
+ :ref:`playbooks_best_practices`
+ Tips and tricks for playbooks
+ :ref:`List of Windows Modules <windows_modules>`
+ Windows specific module list, all implemented in PowerShell
+ `User Mailing List <https://groups.google.com/group/ansible-project>`_
+ Have a question? Stop by the google group!
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/docsite/variables.dot b/docs/docsite/variables.dot
new file mode 100644
index 00000000..f5860dcb
--- /dev/null
+++ b/docs/docsite/variables.dot
@@ -0,0 +1,38 @@
+digraph G {
+
+ subgraph cluster_0 {
+ "command line variables" -> "--extra-args"
+ }
+
+ subgraph cluster_1 {
+ "role variables" -> "roles/rolename/vars.yml" -> "parameters passed to role" -> "parameters from dependent roles"
+ }
+
+ subgraph cluster_2 {
+ "top-level playbook variables" -> "vars: directives" -> "vars_files: directives";
+ }
+
+ subgraph cluster_3 {
+ "inventory variables" -> "group_vars/all" -> "group_vars/grandparent1" -> "group_vars/parent1" -> "host_vars/myhostname";
+ "group_vars/all" -> "group_vars/grandparent2";
+ "group_vars/grandparent1" -> "group_vars/parent2"
+ "group_vars/grandparent2" -> "host_vars/myhostname";
+ "group_vars/parent2" -> "host_vars/myhostname"
+ }
+
+ subgraph cluster_4 {
+ "facts" -> "gathered host facts"
+ "facts" -> "host facts from /etc/ansible/facts.d"
+ "facts" -> "set_fact"
+ "facts" -> "include_vars"
+ }
+
+ subgraph cluster_5 {
+ "role defaults" -> "roles/rolename/defaults.yml"
+ }
+
+ "command line variables" -> "role variables" -> "top-level playbook variables" -> "inventory variables" -> "role defaults" -> "facts"
+
+
+
+}
diff --git a/docs/man/.gitignore b/docs/man/.gitignore
new file mode 100644
index 00000000..81a33679
--- /dev/null
+++ b/docs/man/.gitignore
@@ -0,0 +1,2 @@
+*.xml
+*.asciidoc
diff --git a/docs/man/man1/ansible-config.1 b/docs/man/man1/ansible-config.1
new file mode 100644
index 00000000..326196f7
--- /dev/null
+++ b/docs/man/man1/ansible-config.1
@@ -0,0 +1,135 @@
+.\" Man page generated from reStructuredText.
+.
+.TH ANSIBLE-CONFIG 1 "" "Ansible 2.10.4" "System administration commands"
+.SH NAME
+ansible-config \- View ansible configuration.
+.
+.nr rst2man-indent-level 0
+.
+.de1 rstReportMargin
+\\$1 \\n[an-margin]
+level \\n[rst2man-indent-level]
+level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
+-
+\\n[rst2man-indent0]
+\\n[rst2man-indent1]
+\\n[rst2man-indent2]
+..
+.de1 INDENT
+.\" .rstReportMargin pre:
+. RS \\$1
+. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
+. nr rst2man-indent-level +1
+.\" .rstReportMargin post:
+..
+.de UNINDENT
+. RE
+.\" indent \\n[an-margin]
+.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.nr rst2man-indent-level -1
+.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
+..
+.SH SYNOPSIS
+.sp
+usage: ansible\-config [\-h] [\-\-version] [\-v] {list,dump,view} ...
+.SH DESCRIPTION
+.sp
+Config command line class
+.SH COMMON OPTIONS
+.sp
+\fB\-\-version\fP
+.INDENT 0.0
+.INDENT 3.5
+show program\(aqs version number, config file location, configured module search path, module location, executable location and exit
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-h\fP, \fB\-\-help\fP
+.INDENT 0.0
+.INDENT 3.5
+show this help message and exit
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-v\fP, \fB\-\-verbose\fP
+.INDENT 0.0
+.INDENT 3.5
+verbose mode (\-vvv for more, \-vvvv to enable connection debugging)
+.UNINDENT
+.UNINDENT
+.SH ACTIONS
+.INDENT 0.0
+.TP
+.B \fBlist\fP
+list all current configs reading lib/constants.py and shows env and config file setting names
+.sp
+\fB\-c\fP \(aqCONFIG_FILE\(aq, \fB\-\-config\fP \(aqCONFIG_FILE\(aq
+.INDENT 7.0
+.INDENT 3.5
+path to configuration file, defaults to first file found in precedence.
+.UNINDENT
+.UNINDENT
+.TP
+.B \fBdump\fP
+Shows the current settings, merges ansible.cfg if specified
+.sp
+\fB\-\-only\-changed\fP
+.INDENT 7.0
+.INDENT 3.5
+Only show configurations that have changed from the default
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-c\fP \(aqCONFIG_FILE\(aq, \fB\-\-config\fP \(aqCONFIG_FILE\(aq
+.INDENT 7.0
+.INDENT 3.5
+path to configuration file, defaults to first file found in precedence.
+.UNINDENT
+.UNINDENT
+.TP
+.B \fBview\fP
+Displays the current config file
+.sp
+\fB\-c\fP \(aqCONFIG_FILE\(aq, \fB\-\-config\fP \(aqCONFIG_FILE\(aq
+.INDENT 7.0
+.INDENT 3.5
+path to configuration file, defaults to first file found in precedence.
+.UNINDENT
+.UNINDENT
+.UNINDENT
+.SH ENVIRONMENT
+.sp
+The following environment variables may be specified.
+.sp
+ANSIBLE_CONFIG \-\- Specify override location for the ansible config file
+.sp
+Many more are available for most options in ansible.cfg
+.sp
+For a full list check \fI\%https://docs.ansible.com/\fP\&. or use the \fIansible\-config\fP command.
+.SH FILES
+.sp
+/etc/ansible/ansible.cfg \-\- Config file, used if present
+.sp
+~/.ansible.cfg \-\- User config file, overrides the default config if present
+.sp
+\&./ansible.cfg \-\- Local config file (in current working directory) assumed to be \(aqproject specific\(aq and overrides the rest if present.
+.sp
+As mentioned above, the ANSIBLE_CONFIG environment variable will override all others.
+.SH AUTHOR
+.sp
+Ansible was originally written by Michael DeHaan.
+.SH COPYRIGHT
+.sp
+Copyright © 2018 Red Hat, Inc | Ansible.
+Ansible is released under the terms of the GPLv3 license.
+.SH SEE ALSO
+.sp
+\fBansible\fP (1), \fBansible\-console\fP (1), \fBansible\-doc\fP (1), \fBansible\-galaxy\fP (1), \fBansible\-inventory\fP (1), \fBansible\-playbook\fP (1), \fBansible\-pull\fP (1), \fBansible\-vault\fP (1)
+.sp
+Extensive documentation is available in the documentation site:
+<\fI\%https://docs.ansible.com\fP>.
+IRC and mailing list info can be found in file CONTRIBUTING.md,
+available in: <\fI\%https://github.com/ansible/ansible\fP>
+.\" Generated by docutils manpage writer.
+.
diff --git a/docs/man/man1/ansible-console.1 b/docs/man/man1/ansible-console.1
new file mode 100644
index 00000000..7c752210
--- /dev/null
+++ b/docs/man/man1/ansible-console.1
@@ -0,0 +1,299 @@
+.\" Man page generated from reStructuredText.
+.
+.TH ANSIBLE-CONSOLE 1 "" "Ansible 2.10.4" "System administration commands"
+.SH NAME
+ansible-console \- REPL console for executing Ansible tasks.
+.
+.nr rst2man-indent-level 0
+.
+.de1 rstReportMargin
+\\$1 \\n[an-margin]
+level \\n[rst2man-indent-level]
+level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
+-
+\\n[rst2man-indent0]
+\\n[rst2man-indent1]
+\\n[rst2man-indent2]
+..
+.de1 INDENT
+.\" .rstReportMargin pre:
+. RS \\$1
+. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
+. nr rst2man-indent-level +1
+.\" .rstReportMargin post:
+..
+.de UNINDENT
+. RE
+.\" indent \\n[an-margin]
+.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.nr rst2man-indent-level -1
+.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
+..
+.SH SYNOPSIS
+.INDENT 0.0
+.TP
+.B usage: ansible\-console [\-h] [\-\-version] [\-v] [\-b]
+[\-\-become\-method BECOME_METHOD]
+[\-\-become\-user BECOME_USER] [\-K] [\-i INVENTORY]
+[\-\-list\-hosts] [\-l SUBSET] [\-k]
+[\-\-private\-key PRIVATE_KEY_FILE] [\-u REMOTE_USER]
+[\-c CONNECTION] [\-T TIMEOUT]
+[\-\-ssh\-common\-args SSH_COMMON_ARGS]
+[\-\-sftp\-extra\-args SFTP_EXTRA_ARGS]
+[\-\-scp\-extra\-args SCP_EXTRA_ARGS]
+[\-\-ssh\-extra\-args SSH_EXTRA_ARGS] [\-C] [\-\-syntax\-check]
+[\-D] [\-\-vault\-id VAULT_IDS]
+[\-\-ask\-vault\-password | \-\-vault\-password\-file VAULT_PASSWORD_FILES]
+[\-f FORKS] [\-M MODULE_PATH] [\-\-playbook\-dir BASEDIR]
+[\-\-step]
+[pattern]
+.UNINDENT
+.SH DESCRIPTION
+.sp
+a REPL that allows for running ad\-hoc tasks against a chosen inventory (based
+on dominis\(aq ansible\-shell).
+.SH COMMON OPTIONS
+.INDENT 0.0
+.INDENT 3.5
+host pattern
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-ask\-vault\-password\fP, \fB\-\-ask\-vault\-pass\fP
+.INDENT 0.0
+.INDENT 3.5
+ask for vault password
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-become\-method\fP \(aqBECOME_METHOD\(aq
+.INDENT 0.0
+.INDENT 3.5
+privilege escalation method to use (default=sudo), use \fIansible\-doc \-t become \-l\fP to list valid choices.
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-become\-user\fP \(aqBECOME_USER\(aq
+.INDENT 0.0
+.INDENT 3.5
+run operations as this user (default=root)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-list\-hosts\fP
+.INDENT 0.0
+.INDENT 3.5
+outputs a list of matching hosts; does not execute anything else
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-playbook\-dir\fP \(aqBASEDIR\(aq
+.INDENT 0.0
+.INDENT 3.5
+Since this tool does not use playbooks, use this as a substitute playbook directory.This sets the relative path for many features including roles/ group_vars/ etc.
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-private\-key\fP \(aqPRIVATE_KEY_FILE\(aq, \fB\-\-key\-file\fP \(aqPRIVATE_KEY_FILE\(aq
+.INDENT 0.0
+.INDENT 3.5
+use this file to authenticate the connection
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-scp\-extra\-args\fP \(aqSCP_EXTRA_ARGS\(aq
+.INDENT 0.0
+.INDENT 3.5
+specify extra arguments to pass to scp only (e.g. \-l)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-sftp\-extra\-args\fP \(aqSFTP_EXTRA_ARGS\(aq
+.INDENT 0.0
+.INDENT 3.5
+specify extra arguments to pass to sftp only (e.g. \-f, \-l)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-ssh\-common\-args\fP \(aqSSH_COMMON_ARGS\(aq
+.INDENT 0.0
+.INDENT 3.5
+specify common arguments to pass to sftp/scp/ssh (e.g. ProxyCommand)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-ssh\-extra\-args\fP \(aqSSH_EXTRA_ARGS\(aq
+.INDENT 0.0
+.INDENT 3.5
+specify extra arguments to pass to ssh only (e.g. \-R)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-step\fP
+.INDENT 0.0
+.INDENT 3.5
+one\-step\-at\-a\-time: confirm each task before running
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-syntax\-check\fP
+.INDENT 0.0
+.INDENT 3.5
+perform a syntax check on the playbook, but do not execute it
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-vault\-id\fP
+.INDENT 0.0
+.INDENT 3.5
+the vault identity to use
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-vault\-password\-file\fP, \fB\-\-vault\-pass\-file\fP
+.INDENT 0.0
+.INDENT 3.5
+vault password file
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-version\fP
+.INDENT 0.0
+.INDENT 3.5
+show program\(aqs version number, config file location, configured module search path, module location, executable location and exit
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-C\fP, \fB\-\-check\fP
+.INDENT 0.0
+.INDENT 3.5
+don\(aqt make any changes; instead, try to predict some of the changes that may occur
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-D\fP, \fB\-\-diff\fP
+.INDENT 0.0
+.INDENT 3.5
+when changing (small) files and templates, show the differences in those files; works great with \-\-check
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-K\fP, \fB\-\-ask\-become\-pass\fP
+.INDENT 0.0
+.INDENT 3.5
+ask for privilege escalation password
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-M\fP, \fB\-\-module\-path\fP
+.INDENT 0.0
+.INDENT 3.5
+prepend colon\-separated path(s) to module library (default=~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-T\fP \(aqTIMEOUT\(aq, \fB\-\-timeout\fP \(aqTIMEOUT\(aq
+.INDENT 0.0
+.INDENT 3.5
+override the connection timeout in seconds (default=10)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-b\fP, \fB\-\-become\fP
+.INDENT 0.0
+.INDENT 3.5
+run operations with become (does not imply password prompting)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-c\fP \(aqCONNECTION\(aq, \fB\-\-connection\fP \(aqCONNECTION\(aq
+.INDENT 0.0
+.INDENT 3.5
+connection type to use (default=smart)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-f\fP \(aqFORKS\(aq, \fB\-\-forks\fP \(aqFORKS\(aq
+.INDENT 0.0
+.INDENT 3.5
+specify number of parallel processes to use (default=5)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-h\fP, \fB\-\-help\fP
+.INDENT 0.0
+.INDENT 3.5
+show this help message and exit
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-i\fP, \fB\-\-inventory\fP, \fB\-\-inventory\-file\fP
+.INDENT 0.0
+.INDENT 3.5
+specify inventory host path or comma separated host list. \-\-inventory\-file is deprecated
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-k\fP, \fB\-\-ask\-pass\fP
+.INDENT 0.0
+.INDENT 3.5
+ask for connection password
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-l\fP \(aqSUBSET\(aq, \fB\-\-limit\fP \(aqSUBSET\(aq
+.INDENT 0.0
+.INDENT 3.5
+further limit selected hosts to an additional pattern
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-u\fP \(aqREMOTE_USER\(aq, \fB\-\-user\fP \(aqREMOTE_USER\(aq
+.INDENT 0.0
+.INDENT 3.5
+connect as this user (default=None)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-v\fP, \fB\-\-verbose\fP
+.INDENT 0.0
+.INDENT 3.5
+verbose mode (\-vvv for more, \-vvvv to enable connection debugging)
+.UNINDENT
+.UNINDENT
+.SH ENVIRONMENT
+.sp
+The following environment variables may be specified.
+.sp
+ANSIBLE_CONFIG \-\- Specify override location for the ansible config file
+.sp
+Many more are available for most options in ansible.cfg
+.sp
+For a full list check \fI\%https://docs.ansible.com/\fP\&. or use the \fIansible\-config\fP command.
+.SH FILES
+.sp
+/etc/ansible/ansible.cfg \-\- Config file, used if present
+.sp
+~/.ansible.cfg \-\- User config file, overrides the default config if present
+.sp
+\&./ansible.cfg \-\- Local config file (in current working directory) assumed to be \(aqproject specific\(aq and overrides the rest if present.
+.sp
+As mentioned above, the ANSIBLE_CONFIG environment variable will override all others.
+.SH AUTHOR
+.sp
+Ansible was originally written by Michael DeHaan.
+.SH COPYRIGHT
+.sp
+Copyright © 2018 Red Hat, Inc | Ansible.
+Ansible is released under the terms of the GPLv3 license.
+.SH SEE ALSO
+.sp
+\fBansible\fP (1), \fBansible\-config\fP (1), \fBansible\-doc\fP (1), \fBansible\-galaxy\fP (1), \fBansible\-inventory\fP (1), \fBansible\-playbook\fP (1), \fBansible\-pull\fP (1), \fBansible\-vault\fP (1)
+.sp
+Extensive documentation is available in the documentation site:
+<\fI\%https://docs.ansible.com\fP>.
+IRC and mailing list info can be found in file CONTRIBUTING.md,
+available in: <\fI\%https://github.com/ansible/ansible\fP>
+.\" Generated by docutils manpage writer.
+.
diff --git a/docs/man/man1/ansible-doc.1 b/docs/man/man1/ansible-doc.1
new file mode 100644
index 00000000..6dce5337
--- /dev/null
+++ b/docs/man/man1/ansible-doc.1
@@ -0,0 +1,165 @@
+.\" Man page generated from reStructuredText.
+.
+.TH ANSIBLE-DOC 1 "" "Ansible 2.10.4" "System administration commands"
+.SH NAME
+ansible-doc \- plugin documentation tool
+.
+.nr rst2man-indent-level 0
+.
+.de1 rstReportMargin
+\\$1 \\n[an-margin]
+level \\n[rst2man-indent-level]
+level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
+-
+\\n[rst2man-indent0]
+\\n[rst2man-indent1]
+\\n[rst2man-indent2]
+..
+.de1 INDENT
+.\" .rstReportMargin pre:
+. RS \\$1
+. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
+. nr rst2man-indent-level +1
+.\" .rstReportMargin post:
+..
+.de UNINDENT
+. RE
+.\" indent \\n[an-margin]
+.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.nr rst2man-indent-level -1
+.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
+..
+.SH SYNOPSIS
+.INDENT 0.0
+.TP
+.B usage: ansible\-doc [\-h] [\-\-version] [\-v] [\-M MODULE_PATH]
+[\-\-playbook\-dir BASEDIR]
+[\-t {become,cache,callback,cliconf,connection,httpapi,inventory,lookup,netconf,shell,vars,module,strategy}]
+[\-j] [\-F | \-l | \-s | \-\-metadata\-dump]
+[plugin [plugin ...]]
+.UNINDENT
+.SH DESCRIPTION
+.sp
+displays information on modules installed in Ansible libraries.
+It displays a terse listing of plugins and their short descriptions,
+provides a printout of their DOCUMENTATION strings,
+and it can create a short "snippet" which can be pasted into a playbook.
+.SH COMMON OPTIONS
+.INDENT 0.0
+.INDENT 3.5
+Plugin
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-metadata\-dump\fP
+.INDENT 0.0
+.INDENT 3.5
+\fBFor internal testing only\fP Dump json metadata for all plugins.
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-playbook\-dir\fP \(aqBASEDIR\(aq
+.INDENT 0.0
+.INDENT 3.5
+Since this tool does not use playbooks, use this as a substitute playbook directory.This sets the relative path for many features including roles/ group_vars/ etc.
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-version\fP
+.INDENT 0.0
+.INDENT 3.5
+show program\(aqs version number, config file location, configured module search path, module location, executable location and exit
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-F\fP, \fB\-\-list_files\fP
+.INDENT 0.0
+.INDENT 3.5
+Show plugin names and their source files without summaries (implies \-\-list). A supplied argument will be used for filtering, can be a namespace or full collection name.
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-M\fP, \fB\-\-module\-path\fP
+.INDENT 0.0
+.INDENT 3.5
+prepend colon\-separated path(s) to module library (default=~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-h\fP, \fB\-\-help\fP
+.INDENT 0.0
+.INDENT 3.5
+show this help message and exit
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-j\fP, \fB\-\-json\fP
+.INDENT 0.0
+.INDENT 3.5
+Change output into json format.
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-l\fP, \fB\-\-list\fP
+.INDENT 0.0
+.INDENT 3.5
+List available plugins. A supplied argument will be used for filtering, can be a namespace or full collection name.
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-s\fP, \fB\-\-snippet\fP
+.INDENT 0.0
+.INDENT 3.5
+Show playbook snippet for specified plugin(s)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-t\fP \(aqTYPE\(aq, \fB\-\-type\fP \(aqTYPE\(aq
+.INDENT 0.0
+.INDENT 3.5
+Choose which plugin type (defaults to "module"). Available plugin types are : (\(aqbecome\(aq, \(aqcache\(aq, \(aqcallback\(aq, \(aqcliconf\(aq, \(aqconnection\(aq, \(aqhttpapi\(aq, \(aqinventory\(aq, \(aqlookup\(aq, \(aqnetconf\(aq, \(aqshell\(aq, \(aqvars\(aq, \(aqmodule\(aq, \(aqstrategy\(aq)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-v\fP, \fB\-\-verbose\fP
+.INDENT 0.0
+.INDENT 3.5
+verbose mode (\-vvv for more, \-vvvv to enable connection debugging)
+.UNINDENT
+.UNINDENT
+.SH ENVIRONMENT
+.sp
+The following environment variables may be specified.
+.sp
+ANSIBLE_CONFIG \-\- Specify override location for the ansible config file
+.sp
+Many more are available for most options in ansible.cfg
+.sp
+For a full list check \fI\%https://docs.ansible.com/\fP\&. or use the \fIansible\-config\fP command.
+.SH FILES
+.sp
+/etc/ansible/ansible.cfg \-\- Config file, used if present
+.sp
+~/.ansible.cfg \-\- User config file, overrides the default config if present
+.sp
+\&./ansible.cfg \-\- Local config file (in current working directory) assumed to be \(aqproject specific\(aq and overrides the rest if present.
+.sp
+As mentioned above, the ANSIBLE_CONFIG environment variable will override all others.
+.SH AUTHOR
+.sp
+Ansible was originally written by Michael DeHaan.
+.SH COPYRIGHT
+.sp
+Copyright © 2018 Red Hat, Inc | Ansible.
+Ansible is released under the terms of the GPLv3 license.
+.SH SEE ALSO
+.sp
+\fBansible\fP (1), \fBansible\-config\fP (1), \fBansible\-console\fP (1), \fBansible\-galaxy\fP (1), \fBansible\-inventory\fP (1), \fBansible\-playbook\fP (1), \fBansible\-pull\fP (1), \fBansible\-vault\fP (1)
+.sp
+Extensive documentation is available in the documentation site:
+<\fI\%https://docs.ansible.com\fP>.
+IRC and mailing list info can be found in file CONTRIBUTING.md,
+available in: <\fI\%https://github.com/ansible/ansible\fP>
+.\" Generated by docutils manpage writer.
+.
diff --git a/docs/man/man1/ansible-galaxy.1 b/docs/man/man1/ansible-galaxy.1
new file mode 100644
index 00000000..70a622e8
--- /dev/null
+++ b/docs/man/man1/ansible-galaxy.1
@@ -0,0 +1,105 @@
+.\" Man page generated from reStructuredText.
+.
+.TH ANSIBLE-GALAXY 1 "" "Ansible 2.10.4" "System administration commands"
+.SH NAME
+ansible-galaxy \- Perform various Role and Collection related operations.
+.
+.nr rst2man-indent-level 0
+.
+.de1 rstReportMargin
+\\$1 \\n[an-margin]
+level \\n[rst2man-indent-level]
+level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
+-
+\\n[rst2man-indent0]
+\\n[rst2man-indent1]
+\\n[rst2man-indent2]
+..
+.de1 INDENT
+.\" .rstReportMargin pre:
+. RS \\$1
+. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
+. nr rst2man-indent-level +1
+.\" .rstReportMargin post:
+..
+.de UNINDENT
+. RE
+.\" indent \\n[an-margin]
+.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.nr rst2man-indent-level -1
+.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
+..
+.SH SYNOPSIS
+.sp
+usage: ansible\-galaxy [\-h] [\-\-version] [\-v] TYPE ...
+.SH DESCRIPTION
+.sp
+command to manage Ansible roles in shared repositories, the default of which is
+Ansible Galaxy \fIhttps://galaxy.ansible.com\fP\&.
+.SH COMMON OPTIONS
+.sp
+\fB\-\-version\fP
+.INDENT 0.0
+.INDENT 3.5
+show program\(aqs version number, config file location, configured module search path, module location, executable location and exit
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-h\fP, \fB\-\-help\fP
+.INDENT 0.0
+.INDENT 3.5
+show this help message and exit
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-v\fP, \fB\-\-verbose\fP
+.INDENT 0.0
+.INDENT 3.5
+verbose mode (\-vvv for more, \-vvvv to enable connection debugging)
+.UNINDENT
+.UNINDENT
+.SH ACTIONS
+.INDENT 0.0
+.TP
+.B \fBcollection\fP
+Perform the action on an Ansible Galaxy collection. Must be combined with a further action like init/install as listed below.
+.TP
+.B \fBrole\fP
+Perform the action on an Ansible Galaxy role. Must be combined with a further action like delete/install/init as listed below.
+.UNINDENT
+.SH ENVIRONMENT
+.sp
+The following environment variables may be specified.
+.sp
+ANSIBLE_CONFIG \-\- Specify override location for the ansible config file
+.sp
+Many more are available for most options in ansible.cfg
+.sp
+For a full list check \fI\%https://docs.ansible.com/\fP\&. or use the \fIansible\-config\fP command.
+.SH FILES
+.sp
+/etc/ansible/ansible.cfg \-\- Config file, used if present
+.sp
+~/.ansible.cfg \-\- User config file, overrides the default config if present
+.sp
+\&./ansible.cfg \-\- Local config file (in current working directory) assumed to be \(aqproject specific\(aq and overrides the rest if present.
+.sp
+As mentioned above, the ANSIBLE_CONFIG environment variable will override all others.
+.SH AUTHOR
+.sp
+Ansible was originally written by Michael DeHaan.
+.SH COPYRIGHT
+.sp
+Copyright © 2018 Red Hat, Inc | Ansible.
+Ansible is released under the terms of the GPLv3 license.
+.SH SEE ALSO
+.sp
+\fBansible\fP (1), \fBansible\-config\fP (1), \fBansible\-console\fP (1), \fBansible\-doc\fP (1), \fBansible\-inventory\fP (1), \fBansible\-playbook\fP (1), \fBansible\-pull\fP (1), \fBansible\-vault\fP (1)
+.sp
+Extensive documentation is available in the documentation site:
+<\fI\%https://docs.ansible.com\fP>.
+IRC and mailing list info can be found in file CONTRIBUTING.md,
+available in: <\fI\%https://github.com/ansible/ansible\fP>
+.\" Generated by docutils manpage writer.
+.
diff --git a/docs/man/man1/ansible-inventory.1 b/docs/man/man1/ansible-inventory.1
new file mode 100644
index 00000000..d934332e
--- /dev/null
+++ b/docs/man/man1/ansible-inventory.1
@@ -0,0 +1,213 @@
+.\" Man page generated from reStructuredText.
+.
+.TH ANSIBLE-INVENTORY 1 "" "Ansible 2.10.4" "System administration commands"
+.SH NAME
+ansible-inventory \- None
+.
+.nr rst2man-indent-level 0
+.
+.de1 rstReportMargin
+\\$1 \\n[an-margin]
+level \\n[rst2man-indent-level]
+level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
+-
+\\n[rst2man-indent0]
+\\n[rst2man-indent1]
+\\n[rst2man-indent2]
+..
+.de1 INDENT
+.\" .rstReportMargin pre:
+. RS \\$1
+. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
+. nr rst2man-indent-level +1
+.\" .rstReportMargin post:
+..
+.de UNINDENT
+. RE
+.\" indent \\n[an-margin]
+.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.nr rst2man-indent-level -1
+.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
+..
+.SH SYNOPSIS
+.INDENT 0.0
+.TP
+.B usage: ansible\-inventory [\-h] [\-\-version] [\-v] [\-i INVENTORY]
+[\-\-vault\-id VAULT_IDS]
+[\-\-ask\-vault\-password | \-\-vault\-password\-file VAULT_PASSWORD_FILES]
+[\-\-playbook\-dir BASEDIR] [\-\-list] [\-\-host HOST]
+[\-\-graph] [\-y] [\-\-toml] [\-\-vars] [\-\-export]
+[\-\-output OUTPUT_FILE]
+[host|group]
+.UNINDENT
+.SH DESCRIPTION
+.sp
+used to display or dump the configured inventory as Ansible sees it
+.SH COMMON OPTIONS
+.INDENT 0.0
+.INDENT 3.5
+None
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-ask\-vault\-password\fP, \fB\-\-ask\-vault\-pass\fP
+.INDENT 0.0
+.INDENT 3.5
+ask for vault password
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-export\fP
+.INDENT 0.0
+.INDENT 3.5
+When doing an \-\-list, represent in a way that is optimized for export,not as an accurate representation of how Ansible has processed it
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-graph\fP
+.INDENT 0.0
+.INDENT 3.5
+create inventory graph, if supplying pattern it must be a valid group name
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-host\fP \(aqHOST\(aq
+.INDENT 0.0
+.INDENT 3.5
+Output specific host info, works as inventory script
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-list\fP
+.INDENT 0.0
+.INDENT 3.5
+Output all hosts info, works as inventory script
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-list\-hosts\fP
+.INDENT 0.0
+.INDENT 3.5
+==SUPPRESS==
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-output\fP \(aqOUTPUT_FILE\(aq
+.INDENT 0.0
+.INDENT 3.5
+When doing \-\-list, send the inventory to a file instead of to the screen
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-playbook\-dir\fP \(aqBASEDIR\(aq
+.INDENT 0.0
+.INDENT 3.5
+Since this tool does not use playbooks, use this as a substitute playbook directory.This sets the relative path for many features including roles/ group_vars/ etc.
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-toml\fP
+.INDENT 0.0
+.INDENT 3.5
+Use TOML format instead of default JSON, ignored for \-\-graph
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-vars\fP
+.INDENT 0.0
+.INDENT 3.5
+Add vars to graph display, ignored unless used with \-\-graph
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-vault\-id\fP
+.INDENT 0.0
+.INDENT 3.5
+the vault identity to use
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-vault\-password\-file\fP, \fB\-\-vault\-pass\-file\fP
+.INDENT 0.0
+.INDENT 3.5
+vault password file
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-version\fP
+.INDENT 0.0
+.INDENT 3.5
+show program\(aqs version number, config file location, configured module search path, module location, executable location and exit
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-h\fP, \fB\-\-help\fP
+.INDENT 0.0
+.INDENT 3.5
+show this help message and exit
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-i\fP, \fB\-\-inventory\fP, \fB\-\-inventory\-file\fP
+.INDENT 0.0
+.INDENT 3.5
+specify inventory host path or comma separated host list. \-\-inventory\-file is deprecated
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-l\fP, \fB\-\-limit\fP
+.INDENT 0.0
+.INDENT 3.5
+==SUPPRESS==
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-v\fP, \fB\-\-verbose\fP
+.INDENT 0.0
+.INDENT 3.5
+verbose mode (\-vvv for more, \-vvvv to enable connection debugging)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-y\fP, \fB\-\-yaml\fP
+.INDENT 0.0
+.INDENT 3.5
+Use YAML format instead of default JSON, ignored for \-\-graph
+.UNINDENT
+.UNINDENT
+.SH ENVIRONMENT
+.sp
+The following environment variables may be specified.
+.sp
+ANSIBLE_CONFIG \-\- Specify override location for the ansible config file
+.sp
+Many more are available for most options in ansible.cfg
+.sp
+For a full list check \fI\%https://docs.ansible.com/\fP\&. or use the \fIansible\-config\fP command.
+.SH FILES
+.sp
+/etc/ansible/ansible.cfg \-\- Config file, used if present
+.sp
+~/.ansible.cfg \-\- User config file, overrides the default config if present
+.sp
+\&./ansible.cfg \-\- Local config file (in current working directory) assumed to be \(aqproject specific\(aq and overrides the rest if present.
+.sp
+As mentioned above, the ANSIBLE_CONFIG environment variable will override all others.
+.SH AUTHOR
+.sp
+Ansible was originally written by Michael DeHaan.
+.SH COPYRIGHT
+.sp
+Copyright © 2018 Red Hat, Inc | Ansible.
+Ansible is released under the terms of the GPLv3 license.
+.SH SEE ALSO
+.sp
+\fBansible\fP (1), \fBansible\-config\fP (1), \fBansible\-console\fP (1), \fBansible\-doc\fP (1), \fBansible\-galaxy\fP (1), \fBansible\-playbook\fP (1), \fBansible\-pull\fP (1), \fBansible\-vault\fP (1)
+.sp
+Extensive documentation is available in the documentation site:
+<\fI\%https://docs.ansible.com\fP>.
+IRC and mailing list info can be found in file CONTRIBUTING.md,
+available in: <\fI\%https://github.com/ansible/ansible\fP>
+.\" Generated by docutils manpage writer.
+.
diff --git a/docs/man/man1/ansible-playbook.1 b/docs/man/man1/ansible-playbook.1
new file mode 100644
index 00000000..de60183c
--- /dev/null
+++ b/docs/man/man1/ansible-playbook.1
@@ -0,0 +1,350 @@
+.\" Man page generated from reStructuredText.
+.
+.TH ANSIBLE-PLAYBOOK 1 "" "Ansible 2.10.4" "System administration commands"
+.SH NAME
+ansible-playbook \- Runs Ansible playbooks, executing the defined tasks on the targeted hosts.
+.
+.nr rst2man-indent-level 0
+.
+.de1 rstReportMargin
+\\$1 \\n[an-margin]
+level \\n[rst2man-indent-level]
+level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
+-
+\\n[rst2man-indent0]
+\\n[rst2man-indent1]
+\\n[rst2man-indent2]
+..
+.de1 INDENT
+.\" .rstReportMargin pre:
+. RS \\$1
+. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
+. nr rst2man-indent-level +1
+.\" .rstReportMargin post:
+..
+.de UNINDENT
+. RE
+.\" indent \\n[an-margin]
+.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.nr rst2man-indent-level -1
+.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
+..
+.SH SYNOPSIS
+.INDENT 0.0
+.TP
+.B usage: ansible\-playbook [\-h] [\-\-version] [\-v] [\-k]
+[\-\-private\-key PRIVATE_KEY_FILE] [\-u REMOTE_USER]
+[\-c CONNECTION] [\-T TIMEOUT]
+[\-\-ssh\-common\-args SSH_COMMON_ARGS]
+[\-\-sftp\-extra\-args SFTP_EXTRA_ARGS]
+[\-\-scp\-extra\-args SCP_EXTRA_ARGS]
+[\-\-ssh\-extra\-args SSH_EXTRA_ARGS] [\-\-force\-handlers]
+[\-\-flush\-cache] [\-b] [\-\-become\-method BECOME_METHOD]
+[\-\-become\-user BECOME_USER] [\-K] [\-t TAGS]
+[\-\-skip\-tags SKIP_TAGS] [\-C] [\-\-syntax\-check] [\-D]
+[\-i INVENTORY] [\-\-list\-hosts] [\-l SUBSET]
+[\-e EXTRA_VARS] [\-\-vault\-id VAULT_IDS]
+[\-\-ask\-vault\-password | \-\-vault\-password\-file VAULT_PASSWORD_FILES]
+[\-f FORKS] [\-M MODULE_PATH] [\-\-list\-tasks]
+[\-\-list\-tags] [\-\-step] [\-\-start\-at\-task START_AT_TASK]
+playbook [playbook ...]
+.UNINDENT
+.SH DESCRIPTION
+.sp
+the tool to run \fIAnsible playbooks\fP, which are a configuration and multinode
+deployment system.
+See the project home page (\fI\%https://docs.ansible.com\fP) for more information.
+.SH COMMON OPTIONS
+.INDENT 0.0
+.INDENT 3.5
+Playbook(s)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-ask\-vault\-password\fP, \fB\-\-ask\-vault\-pass\fP
+.INDENT 0.0
+.INDENT 3.5
+ask for vault password
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-become\-method\fP \(aqBECOME_METHOD\(aq
+.INDENT 0.0
+.INDENT 3.5
+privilege escalation method to use (default=sudo), use \fIansible\-doc \-t become \-l\fP to list valid choices.
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-become\-user\fP \(aqBECOME_USER\(aq
+.INDENT 0.0
+.INDENT 3.5
+run operations as this user (default=root)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-flush\-cache\fP
+.INDENT 0.0
+.INDENT 3.5
+clear the fact cache for every host in inventory
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-force\-handlers\fP
+.INDENT 0.0
+.INDENT 3.5
+run handlers even if a task fails
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-list\-hosts\fP
+.INDENT 0.0
+.INDENT 3.5
+outputs a list of matching hosts; does not execute anything else
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-list\-tags\fP
+.INDENT 0.0
+.INDENT 3.5
+list all available tags
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-list\-tasks\fP
+.INDENT 0.0
+.INDENT 3.5
+list all tasks that would be executed
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-private\-key\fP \(aqPRIVATE_KEY_FILE\(aq, \fB\-\-key\-file\fP \(aqPRIVATE_KEY_FILE\(aq
+.INDENT 0.0
+.INDENT 3.5
+use this file to authenticate the connection
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-scp\-extra\-args\fP \(aqSCP_EXTRA_ARGS\(aq
+.INDENT 0.0
+.INDENT 3.5
+specify extra arguments to pass to scp only (e.g. \-l)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-sftp\-extra\-args\fP \(aqSFTP_EXTRA_ARGS\(aq
+.INDENT 0.0
+.INDENT 3.5
+specify extra arguments to pass to sftp only (e.g. \-f, \-l)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-skip\-tags\fP
+.INDENT 0.0
+.INDENT 3.5
+only run plays and tasks whose tags do not match these values
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-ssh\-common\-args\fP \(aqSSH_COMMON_ARGS\(aq
+.INDENT 0.0
+.INDENT 3.5
+specify common arguments to pass to sftp/scp/ssh (e.g. ProxyCommand)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-ssh\-extra\-args\fP \(aqSSH_EXTRA_ARGS\(aq
+.INDENT 0.0
+.INDENT 3.5
+specify extra arguments to pass to ssh only (e.g. \-R)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-start\-at\-task\fP \(aqSTART_AT_TASK\(aq
+.INDENT 0.0
+.INDENT 3.5
+start the playbook at the task matching this name
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-step\fP
+.INDENT 0.0
+.INDENT 3.5
+one\-step\-at\-a\-time: confirm each task before running
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-syntax\-check\fP
+.INDENT 0.0
+.INDENT 3.5
+perform a syntax check on the playbook, but do not execute it
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-vault\-id\fP
+.INDENT 0.0
+.INDENT 3.5
+the vault identity to use
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-vault\-password\-file\fP, \fB\-\-vault\-pass\-file\fP
+.INDENT 0.0
+.INDENT 3.5
+vault password file
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-version\fP
+.INDENT 0.0
+.INDENT 3.5
+show program\(aqs version number, config file location, configured module search path, module location, executable location and exit
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-C\fP, \fB\-\-check\fP
+.INDENT 0.0
+.INDENT 3.5
+don\(aqt make any changes; instead, try to predict some of the changes that may occur
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-D\fP, \fB\-\-diff\fP
+.INDENT 0.0
+.INDENT 3.5
+when changing (small) files and templates, show the differences in those files; works great with \-\-check
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-K\fP, \fB\-\-ask\-become\-pass\fP
+.INDENT 0.0
+.INDENT 3.5
+ask for privilege escalation password
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-M\fP, \fB\-\-module\-path\fP
+.INDENT 0.0
+.INDENT 3.5
+prepend colon\-separated path(s) to module library (default=~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-T\fP \(aqTIMEOUT\(aq, \fB\-\-timeout\fP \(aqTIMEOUT\(aq
+.INDENT 0.0
+.INDENT 3.5
+override the connection timeout in seconds (default=10)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-b\fP, \fB\-\-become\fP
+.INDENT 0.0
+.INDENT 3.5
+run operations with become (does not imply password prompting)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-c\fP \(aqCONNECTION\(aq, \fB\-\-connection\fP \(aqCONNECTION\(aq
+.INDENT 0.0
+.INDENT 3.5
+connection type to use (default=smart)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-e\fP, \fB\-\-extra\-vars\fP
+.INDENT 0.0
+.INDENT 3.5
+set additional variables as key=value or YAML/JSON, if filename prepend with @
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-f\fP \(aqFORKS\(aq, \fB\-\-forks\fP \(aqFORKS\(aq
+.INDENT 0.0
+.INDENT 3.5
+specify number of parallel processes to use (default=5)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-h\fP, \fB\-\-help\fP
+.INDENT 0.0
+.INDENT 3.5
+show this help message and exit
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-i\fP, \fB\-\-inventory\fP, \fB\-\-inventory\-file\fP
+.INDENT 0.0
+.INDENT 3.5
+specify inventory host path or comma separated host list. \-\-inventory\-file is deprecated
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-k\fP, \fB\-\-ask\-pass\fP
+.INDENT 0.0
+.INDENT 3.5
+ask for connection password
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-l\fP \(aqSUBSET\(aq, \fB\-\-limit\fP \(aqSUBSET\(aq
+.INDENT 0.0
+.INDENT 3.5
+further limit selected hosts to an additional pattern
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-t\fP, \fB\-\-tags\fP
+.INDENT 0.0
+.INDENT 3.5
+only run plays and tasks tagged with these values
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-u\fP \(aqREMOTE_USER\(aq, \fB\-\-user\fP \(aqREMOTE_USER\(aq
+.INDENT 0.0
+.INDENT 3.5
+connect as this user (default=None)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-v\fP, \fB\-\-verbose\fP
+.INDENT 0.0
+.INDENT 3.5
+verbose mode (\-vvv for more, \-vvvv to enable connection debugging)
+.UNINDENT
+.UNINDENT
+.SH ENVIRONMENT
+.sp
+The following environment variables may be specified.
+.sp
+ANSIBLE_CONFIG \-\- Specify override location for the ansible config file
+.sp
+Many more are available for most options in ansible.cfg
+.sp
+For a full list check \fI\%https://docs.ansible.com/\fP\&. or use the \fIansible\-config\fP command.
+.SH FILES
+.sp
+/etc/ansible/ansible.cfg \-\- Config file, used if present
+.sp
+~/.ansible.cfg \-\- User config file, overrides the default config if present
+.sp
+\&./ansible.cfg \-\- Local config file (in current working directory) assumed to be \(aqproject specific\(aq and overrides the rest if present.
+.sp
+As mentioned above, the ANSIBLE_CONFIG environment variable will override all others.
+.SH AUTHOR
+.sp
+Ansible was originally written by Michael DeHaan.
+.SH COPYRIGHT
+.sp
+Copyright © 2018 Red Hat, Inc | Ansible.
+Ansible is released under the terms of the GPLv3 license.
+.SH SEE ALSO
+.sp
+\fBansible\fP (1), \fBansible\-config\fP (1), \fBansible\-console\fP (1), \fBansible\-doc\fP (1), \fBansible\-galaxy\fP (1), \fBansible\-inventory\fP (1), \fBansible\-pull\fP (1), \fBansible\-vault\fP (1)
+.sp
+Extensive documentation is available in the documentation site:
+<\fI\%https://docs.ansible.com\fP>.
+IRC and mailing list info can be found in file CONTRIBUTING.md,
+available in: <\fI\%https://github.com/ansible/ansible\fP>
+.\" Generated by docutils manpage writer.
+.
diff --git a/docs/man/man1/ansible-pull.1 b/docs/man/man1/ansible-pull.1
new file mode 100644
index 00000000..f90f5484
--- /dev/null
+++ b/docs/man/man1/ansible-pull.1
@@ -0,0 +1,371 @@
+.\" Man page generated from reStructuredText.
+.
+.TH ANSIBLE-PULL 1 "" "Ansible 2.10.4" "System administration commands"
+.SH NAME
+ansible-pull \- pulls playbooks from a VCS repo and executes them for the local host
+.
+.nr rst2man-indent-level 0
+.
+.de1 rstReportMargin
+\\$1 \\n[an-margin]
+level \\n[rst2man-indent-level]
+level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
+-
+\\n[rst2man-indent0]
+\\n[rst2man-indent1]
+\\n[rst2man-indent2]
+..
+.de1 INDENT
+.\" .rstReportMargin pre:
+. RS \\$1
+. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
+. nr rst2man-indent-level +1
+.\" .rstReportMargin post:
+..
+.de UNINDENT
+. RE
+.\" indent \\n[an-margin]
+.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.nr rst2man-indent-level -1
+.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
+..
+.SH SYNOPSIS
+.INDENT 0.0
+.TP
+.B usage: ansible\-pull [\-h] [\-\-version] [\-v] [\-k]
+[\-\-private\-key PRIVATE_KEY_FILE] [\-u REMOTE_USER]
+[\-c CONNECTION] [\-T TIMEOUT]
+[\-\-ssh\-common\-args SSH_COMMON_ARGS]
+[\-\-sftp\-extra\-args SFTP_EXTRA_ARGS]
+[\-\-scp\-extra\-args SCP_EXTRA_ARGS]
+[\-\-ssh\-extra\-args SSH_EXTRA_ARGS] [\-\-vault\-id VAULT_IDS]
+[\-\-ask\-vault\-password | \-\-vault\-password\-file VAULT_PASSWORD_FILES]
+[\-e EXTRA_VARS] [\-t TAGS] [\-\-skip\-tags SKIP_TAGS]
+[\-i INVENTORY] [\-\-list\-hosts] [\-l SUBSET] [\-M MODULE_PATH]
+[\-K] [\-\-purge] [\-o] [\-s SLEEP] [\-f] [\-d DEST] [\-U URL]
+[\-\-full] [\-C CHECKOUT] [\-\-accept\-host\-key]
+[\-m MODULE_NAME] [\-\-verify\-commit] [\-\-clean]
+[\-\-track\-subs] [\-\-check] [\-\-diff]
+[playbook.yml [playbook.yml ...]]
+.UNINDENT
+.SH DESCRIPTION
+.sp
+Used to pull a remote copy of ansible on each managed node,
+each set to run via cron and update playbook source via a source repository.
+This inverts the default \fIpush\fP architecture of ansible into a \fIpull\fP
+architecture,
+which has near\-limitless scaling potential.
+.sp
+The setup playbook can be tuned to change the cron frequency, logging
+locations, and parameters to ansible\-pull.
+This is useful both for extreme scale\-out as well as periodic remediation.
+Usage of the \(aqfetch\(aq module to retrieve logs from ansible\-pull runs would be an
+excellent way to gather and analyze remote logs from ansible\-pull.
+.SH COMMON OPTIONS
+.INDENT 0.0
+.INDENT 3.5
+Playbook(s)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-accept\-host\-key\fP
+.INDENT 0.0
+.INDENT 3.5
+adds the hostkey for the repo url if not already added
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-ask\-vault\-password\fP, \fB\-\-ask\-vault\-pass\fP
+.INDENT 0.0
+.INDENT 3.5
+ask for vault password
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-check\fP
+.INDENT 0.0
+.INDENT 3.5
+don\(aqt make any changes; instead, try to predict some of the changes that may occur
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-clean\fP
+.INDENT 0.0
+.INDENT 3.5
+modified files in the working repository will be discarded
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-diff\fP
+.INDENT 0.0
+.INDENT 3.5
+when changing (small) files and templates, show the differences in those files; works great with \-\-check
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-full\fP
+.INDENT 0.0
+.INDENT 3.5
+Do a full clone, instead of a shallow one.
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-list\-hosts\fP
+.INDENT 0.0
+.INDENT 3.5
+outputs a list of matching hosts; does not execute anything else
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-private\-key\fP \(aqPRIVATE_KEY_FILE\(aq, \fB\-\-key\-file\fP \(aqPRIVATE_KEY_FILE\(aq
+.INDENT 0.0
+.INDENT 3.5
+use this file to authenticate the connection
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-purge\fP
+.INDENT 0.0
+.INDENT 3.5
+purge checkout after playbook run
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-scp\-extra\-args\fP \(aqSCP_EXTRA_ARGS\(aq
+.INDENT 0.0
+.INDENT 3.5
+specify extra arguments to pass to scp only (e.g. \-l)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-sftp\-extra\-args\fP \(aqSFTP_EXTRA_ARGS\(aq
+.INDENT 0.0
+.INDENT 3.5
+specify extra arguments to pass to sftp only (e.g. \-f, \-l)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-skip\-tags\fP
+.INDENT 0.0
+.INDENT 3.5
+only run plays and tasks whose tags do not match these values
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-ssh\-common\-args\fP \(aqSSH_COMMON_ARGS\(aq
+.INDENT 0.0
+.INDENT 3.5
+specify common arguments to pass to sftp/scp/ssh (e.g. ProxyCommand)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-ssh\-extra\-args\fP \(aqSSH_EXTRA_ARGS\(aq
+.INDENT 0.0
+.INDENT 3.5
+specify extra arguments to pass to ssh only (e.g. \-R)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-track\-subs\fP
+.INDENT 0.0
+.INDENT 3.5
+submodules will track the latest changes. This is equivalent to specifying the \-\-remote flag to git submodule update
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-vault\-id\fP
+.INDENT 0.0
+.INDENT 3.5
+the vault identity to use
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-vault\-password\-file\fP, \fB\-\-vault\-pass\-file\fP
+.INDENT 0.0
+.INDENT 3.5
+vault password file
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-verify\-commit\fP
+.INDENT 0.0
+.INDENT 3.5
+verify GPG signature of checked out commit, if it fails abort running the playbook. This needs the corresponding VCS module to support such an operation
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-version\fP
+.INDENT 0.0
+.INDENT 3.5
+show program\(aqs version number, config file location, configured module search path, module location, executable location and exit
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-C\fP \(aqCHECKOUT\(aq, \fB\-\-checkout\fP \(aqCHECKOUT\(aq
+.INDENT 0.0
+.INDENT 3.5
+branch/tag/commit to checkout. Defaults to behavior of repository module.
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-K\fP, \fB\-\-ask\-become\-pass\fP
+.INDENT 0.0
+.INDENT 3.5
+ask for privilege escalation password
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-M\fP, \fB\-\-module\-path\fP
+.INDENT 0.0
+.INDENT 3.5
+prepend colon\-separated path(s) to module library (default=~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-T\fP \(aqTIMEOUT\(aq, \fB\-\-timeout\fP \(aqTIMEOUT\(aq
+.INDENT 0.0
+.INDENT 3.5
+override the connection timeout in seconds (default=10)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-U\fP \(aqURL\(aq, \fB\-\-url\fP \(aqURL\(aq
+.INDENT 0.0
+.INDENT 3.5
+URL of the playbook repository
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-c\fP \(aqCONNECTION\(aq, \fB\-\-connection\fP \(aqCONNECTION\(aq
+.INDENT 0.0
+.INDENT 3.5
+connection type to use (default=smart)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-d\fP \(aqDEST\(aq, \fB\-\-directory\fP \(aqDEST\(aq
+.INDENT 0.0
+.INDENT 3.5
+directory to checkout repository to
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-e\fP, \fB\-\-extra\-vars\fP
+.INDENT 0.0
+.INDENT 3.5
+set additional variables as key=value or YAML/JSON, if filename prepend with @
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-f\fP, \fB\-\-force\fP
+.INDENT 0.0
+.INDENT 3.5
+run the playbook even if the repository could not be updated
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-h\fP, \fB\-\-help\fP
+.INDENT 0.0
+.INDENT 3.5
+show this help message and exit
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-i\fP, \fB\-\-inventory\fP, \fB\-\-inventory\-file\fP
+.INDENT 0.0
+.INDENT 3.5
+specify inventory host path or comma separated host list. \-\-inventory\-file is deprecated
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-k\fP, \fB\-\-ask\-pass\fP
+.INDENT 0.0
+.INDENT 3.5
+ask for connection password
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-l\fP \(aqSUBSET\(aq, \fB\-\-limit\fP \(aqSUBSET\(aq
+.INDENT 0.0
+.INDENT 3.5
+further limit selected hosts to an additional pattern
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-m\fP \(aqMODULE_NAME\(aq, \fB\-\-module\-name\fP \(aqMODULE_NAME\(aq
+.INDENT 0.0
+.INDENT 3.5
+Repository module name, which ansible will use to check out the repo. Choices are (\(aqgit\(aq, \(aqsubversion\(aq, \(aqhg\(aq, \(aqbzr\(aq). Default is git.
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-o\fP, \fB\-\-only\-if\-changed\fP
+.INDENT 0.0
+.INDENT 3.5
+only run the playbook if the repository has been updated
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-s\fP \(aqSLEEP\(aq, \fB\-\-sleep\fP \(aqSLEEP\(aq
+.INDENT 0.0
+.INDENT 3.5
+sleep for random interval (between 0 and n number of seconds) before starting. This is a useful way to disperse git requests
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-t\fP, \fB\-\-tags\fP
+.INDENT 0.0
+.INDENT 3.5
+only run plays and tasks tagged with these values
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-u\fP \(aqREMOTE_USER\(aq, \fB\-\-user\fP \(aqREMOTE_USER\(aq
+.INDENT 0.0
+.INDENT 3.5
+connect as this user (default=None)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-v\fP, \fB\-\-verbose\fP
+.INDENT 0.0
+.INDENT 3.5
+verbose mode (\-vvv for more, \-vvvv to enable connection debugging)
+.UNINDENT
+.UNINDENT
+.SH ENVIRONMENT
+.sp
+The following environment variables may be specified.
+.sp
+ANSIBLE_CONFIG \-\- Specify override location for the ansible config file
+.sp
+Many more are available for most options in ansible.cfg
+.sp
+For a full list check \fI\%https://docs.ansible.com/\fP\&. or use the \fIansible\-config\fP command.
+.SH FILES
+.sp
+/etc/ansible/ansible.cfg \-\- Config file, used if present
+.sp
+~/.ansible.cfg \-\- User config file, overrides the default config if present
+.sp
+\&./ansible.cfg \-\- Local config file (in current working directory) assumed to be \(aqproject specific\(aq and overrides the rest if present.
+.sp
+As mentioned above, the ANSIBLE_CONFIG environment variable will override all others.
+.SH AUTHOR
+.sp
+Ansible was originally written by Michael DeHaan.
+.SH COPYRIGHT
+.sp
+Copyright © 2018 Red Hat, Inc | Ansible.
+Ansible is released under the terms of the GPLv3 license.
+.SH SEE ALSO
+.sp
+\fBansible\fP (1), \fBansible\-config\fP (1), \fBansible\-console\fP (1), \fBansible\-doc\fP (1), \fBansible\-galaxy\fP (1), \fBansible\-inventory\fP (1), \fBansible\-playbook\fP (1), \fBansible\-vault\fP (1)
+.sp
+Extensive documentation is available in the documentation site:
+<\fI\%https://docs.ansible.com\fP>.
+IRC and mailing list info can be found in file CONTRIBUTING.md,
+available in: <\fI\%https://github.com/ansible/ansible\fP>
+.\" Generated by docutils manpage writer.
+.
diff --git a/docs/man/man1/ansible-vault.1 b/docs/man/man1/ansible-vault.1
new file mode 100644
index 00000000..b8b7b6de
--- /dev/null
+++ b/docs/man/man1/ansible-vault.1
@@ -0,0 +1,371 @@
+.\" Man page generated from reStructuredText.
+.
+.TH ANSIBLE-VAULT 1 "" "Ansible 2.10.4" "System administration commands"
+.SH NAME
+ansible-vault \- encryption/decryption utility for Ansible data files
+.
+.nr rst2man-indent-level 0
+.
+.de1 rstReportMargin
+\\$1 \\n[an-margin]
+level \\n[rst2man-indent-level]
+level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
+-
+\\n[rst2man-indent0]
+\\n[rst2man-indent1]
+\\n[rst2man-indent2]
+..
+.de1 INDENT
+.\" .rstReportMargin pre:
+. RS \\$1
+. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
+. nr rst2man-indent-level +1
+.\" .rstReportMargin post:
+..
+.de UNINDENT
+. RE
+.\" indent \\n[an-margin]
+.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.nr rst2man-indent-level -1
+.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
+..
+.SH SYNOPSIS
+.INDENT 0.0
+.TP
+.B usage: ansible\-vault [\-h] [\-\-version] [\-v]
+{create,decrypt,edit,view,encrypt,encrypt_string,rekey}
+...
+.UNINDENT
+.SH DESCRIPTION
+.sp
+can encrypt any structured data file used by Ansible.
+This can include \fIgroup_vars/\fP or \fIhost_vars/\fP inventory variables,
+variables loaded by \fIinclude_vars\fP or \fIvars_files\fP, or variable files
+passed on the ansible\-playbook command line with \fI\-e @file.yml\fP or \fI\-e
+@file.json\fP\&.
+Role variables and defaults are also included!
+.sp
+Because Ansible tasks, handlers, and other objects are data, these can also be
+encrypted with vault.
+If you\(aqd like to not expose what variables you are using, you can keep an
+individual task file entirely encrypted.
+.SH COMMON OPTIONS
+.sp
+\fB\-\-version\fP
+.INDENT 0.0
+.INDENT 3.5
+show program\(aqs version number, config file location, configured module search path, module location, executable location and exit
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-h\fP, \fB\-\-help\fP
+.INDENT 0.0
+.INDENT 3.5
+show this help message and exit
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-v\fP, \fB\-\-verbose\fP
+.INDENT 0.0
+.INDENT 3.5
+verbose mode (\-vvv for more, \-vvvv to enable connection debugging)
+.UNINDENT
+.UNINDENT
+.SH ACTIONS
+.INDENT 0.0
+.TP
+.B \fBcreate\fP
+create and open a file in an editor that will be encrypted with the provided vault secret when closed
+.sp
+\fB\-\-ask\-vault\-password\fP, \fB\-\-ask\-vault\-pass\fP
+.INDENT 7.0
+.INDENT 3.5
+ask for vault password
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-encrypt\-vault\-id\fP \(aqENCRYPT_VAULT_ID\(aq
+.INDENT 7.0
+.INDENT 3.5
+the vault id used to encrypt (required if more than one vault\-id is provided)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-vault\-id\fP
+.INDENT 7.0
+.INDENT 3.5
+the vault identity to use
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-vault\-password\-file\fP, \fB\-\-vault\-pass\-file\fP
+.INDENT 7.0
+.INDENT 3.5
+vault password file
+.UNINDENT
+.UNINDENT
+.TP
+.B \fBdecrypt\fP
+decrypt the supplied file using the provided vault secret
+.sp
+\fB\-\-ask\-vault\-password\fP, \fB\-\-ask\-vault\-pass\fP
+.INDENT 7.0
+.INDENT 3.5
+ask for vault password
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-output\fP \(aqOUTPUT_FILE\(aq
+.INDENT 7.0
+.INDENT 3.5
+output file name for encrypt or decrypt; use \- for stdout
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-vault\-id\fP
+.INDENT 7.0
+.INDENT 3.5
+the vault identity to use
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-vault\-password\-file\fP, \fB\-\-vault\-pass\-file\fP
+.INDENT 7.0
+.INDENT 3.5
+vault password file
+.UNINDENT
+.UNINDENT
+.TP
+.B \fBedit\fP
+open and decrypt an existing vaulted file in an editor, that will be encrypted again when closed
+.sp
+\fB\-\-ask\-vault\-password\fP, \fB\-\-ask\-vault\-pass\fP
+.INDENT 7.0
+.INDENT 3.5
+ask for vault password
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-encrypt\-vault\-id\fP \(aqENCRYPT_VAULT_ID\(aq
+.INDENT 7.0
+.INDENT 3.5
+the vault id used to encrypt (required if more than one vault\-id is provided)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-vault\-id\fP
+.INDENT 7.0
+.INDENT 3.5
+the vault identity to use
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-vault\-password\-file\fP, \fB\-\-vault\-pass\-file\fP
+.INDENT 7.0
+.INDENT 3.5
+vault password file
+.UNINDENT
+.UNINDENT
+.TP
+.B \fBview\fP
+open, decrypt and view an existing vaulted file using a pager using the supplied vault secret
+.sp
+\fB\-\-ask\-vault\-password\fP, \fB\-\-ask\-vault\-pass\fP
+.INDENT 7.0
+.INDENT 3.5
+ask for vault password
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-vault\-id\fP
+.INDENT 7.0
+.INDENT 3.5
+the vault identity to use
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-vault\-password\-file\fP, \fB\-\-vault\-pass\-file\fP
+.INDENT 7.0
+.INDENT 3.5
+vault password file
+.UNINDENT
+.UNINDENT
+.TP
+.B \fBencrypt\fP
+encrypt the supplied file using the provided vault secret
+.sp
+\fB\-\-ask\-vault\-password\fP, \fB\-\-ask\-vault\-pass\fP
+.INDENT 7.0
+.INDENT 3.5
+ask for vault password
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-encrypt\-vault\-id\fP \(aqENCRYPT_VAULT_ID\(aq
+.INDENT 7.0
+.INDENT 3.5
+the vault id used to encrypt (required if more than one vault\-id is provided)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-output\fP \(aqOUTPUT_FILE\(aq
+.INDENT 7.0
+.INDENT 3.5
+output file name for encrypt or decrypt; use \- for stdout
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-vault\-id\fP
+.INDENT 7.0
+.INDENT 3.5
+the vault identity to use
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-vault\-password\-file\fP, \fB\-\-vault\-pass\-file\fP
+.INDENT 7.0
+.INDENT 3.5
+vault password file
+.UNINDENT
+.UNINDENT
+.TP
+.B \fBencrypt_string\fP
+encrypt the supplied string using the provided vault secret
+.sp
+\fB\-\-ask\-vault\-password\fP, \fB\-\-ask\-vault\-pass\fP
+.INDENT 7.0
+.INDENT 3.5
+ask for vault password
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-encrypt\-vault\-id\fP \(aqENCRYPT_VAULT_ID\(aq
+.INDENT 7.0
+.INDENT 3.5
+the vault id used to encrypt (required if more than one vault\-id is provided)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-output\fP \(aqOUTPUT_FILE\(aq
+.INDENT 7.0
+.INDENT 3.5
+output file name for encrypt or decrypt; use \- for stdout
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-stdin\-name\fP \(aqENCRYPT_STRING_STDIN_NAME\(aq
+.INDENT 7.0
+.INDENT 3.5
+Specify the variable name for stdin
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-vault\-id\fP
+.INDENT 7.0
+.INDENT 3.5
+the vault identity to use
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-vault\-password\-file\fP, \fB\-\-vault\-pass\-file\fP
+.INDENT 7.0
+.INDENT 3.5
+vault password file
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-n\fP, \fB\-\-name\fP
+.INDENT 7.0
+.INDENT 3.5
+Specify the variable name
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-p\fP, \fB\-\-prompt\fP
+.INDENT 7.0
+.INDENT 3.5
+Prompt for the string to encrypt
+.UNINDENT
+.UNINDENT
+.TP
+.B \fBrekey\fP
+re\-encrypt a vaulted file with a new secret, the previous secret is required
+.sp
+\fB\-\-ask\-vault\-password\fP, \fB\-\-ask\-vault\-pass\fP
+.INDENT 7.0
+.INDENT 3.5
+ask for vault password
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-encrypt\-vault\-id\fP \(aqENCRYPT_VAULT_ID\(aq
+.INDENT 7.0
+.INDENT 3.5
+the vault id used to encrypt (required if more than one vault\-id is provided)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-new\-vault\-id\fP \(aqNEW_VAULT_ID\(aq
+.INDENT 7.0
+.INDENT 3.5
+the new vault identity to use for rekey
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-new\-vault\-password\-file\fP \(aqNEW_VAULT_PASSWORD_FILE\(aq
+.INDENT 7.0
+.INDENT 3.5
+new vault password file for rekey
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-vault\-id\fP
+.INDENT 7.0
+.INDENT 3.5
+the vault identity to use
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-vault\-password\-file\fP, \fB\-\-vault\-pass\-file\fP
+.INDENT 7.0
+.INDENT 3.5
+vault password file
+.UNINDENT
+.UNINDENT
+.UNINDENT
+.SH ENVIRONMENT
+.sp
+The following environment variables may be specified.
+.sp
+ANSIBLE_CONFIG \-\- Specify override location for the ansible config file
+.sp
+Many more are available for most options in ansible.cfg
+.sp
+For a full list check \fI\%https://docs.ansible.com/\fP\&. or use the \fIansible\-config\fP command.
+.SH FILES
+.sp
+/etc/ansible/ansible.cfg \-\- Config file, used if present
+.sp
+~/.ansible.cfg \-\- User config file, overrides the default config if present
+.sp
+\&./ansible.cfg \-\- Local config file (in current working directory) assumed to be \(aqproject specific\(aq and overrides the rest if present.
+.sp
+As mentioned above, the ANSIBLE_CONFIG environment variable will override all others.
+.SH AUTHOR
+.sp
+Ansible was originally written by Michael DeHaan.
+.SH COPYRIGHT
+.sp
+Copyright © 2018 Red Hat, Inc | Ansible.
+Ansible is released under the terms of the GPLv3 license.
+.SH SEE ALSO
+.sp
+\fBansible\fP (1), \fBansible\-config\fP (1), \fBansible\-console\fP (1), \fBansible\-doc\fP (1), \fBansible\-galaxy\fP (1), \fBansible\-inventory\fP (1), \fBansible\-playbook\fP (1), \fBansible\-pull\fP (1),
+.sp
+Extensive documentation is available in the documentation site:
+<\fI\%https://docs.ansible.com\fP>.
+IRC and mailing list info can be found in file CONTRIBUTING.md,
+available in: <\fI\%https://github.com/ansible/ansible\fP>
+.\" Generated by docutils manpage writer.
+.
diff --git a/docs/man/man1/ansible.1 b/docs/man/man1/ansible.1
new file mode 100644
index 00000000..b5efa1ff
--- /dev/null
+++ b/docs/man/man1/ansible.1
@@ -0,0 +1,341 @@
+.\" Man page generated from reStructuredText.
+.
+.TH ANSIBLE 1 "" "Ansible 2.10.4" "System administration commands"
+.SH NAME
+ansible \- Define and run a single task 'playbook' against a set of hosts
+.
+.nr rst2man-indent-level 0
+.
+.de1 rstReportMargin
+\\$1 \\n[an-margin]
+level \\n[rst2man-indent-level]
+level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
+-
+\\n[rst2man-indent0]
+\\n[rst2man-indent1]
+\\n[rst2man-indent2]
+..
+.de1 INDENT
+.\" .rstReportMargin pre:
+. RS \\$1
+. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
+. nr rst2man-indent-level +1
+.\" .rstReportMargin post:
+..
+.de UNINDENT
+. RE
+.\" indent \\n[an-margin]
+.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.nr rst2man-indent-level -1
+.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
+.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
+..
+.SH SYNOPSIS
+.INDENT 0.0
+.TP
+.B usage: ansible [\-h] [\-\-version] [\-v] [\-b] [\-\-become\-method BECOME_METHOD]
+[\-\-become\-user BECOME_USER] [\-K] [\-i INVENTORY] [\-\-list\-hosts]
+[\-l SUBSET] [\-P POLL_INTERVAL] [\-B SECONDS] [\-o] [\-t TREE] [\-k]
+[\-\-private\-key PRIVATE_KEY_FILE] [\-u REMOTE_USER]
+[\-c CONNECTION] [\-T TIMEOUT]
+[\-\-ssh\-common\-args SSH_COMMON_ARGS]
+[\-\-sftp\-extra\-args SFTP_EXTRA_ARGS]
+[\-\-scp\-extra\-args SCP_EXTRA_ARGS]
+[\-\-ssh\-extra\-args SSH_EXTRA_ARGS] [\-C] [\-\-syntax\-check] [\-D]
+[\-e EXTRA_VARS] [\-\-vault\-id VAULT_IDS]
+[\-\-ask\-vault\-password | \-\-vault\-password\-file VAULT_PASSWORD_FILES]
+[\-f FORKS] [\-M MODULE_PATH] [\-\-playbook\-dir BASEDIR]
+[\-a MODULE_ARGS] [\-m MODULE_NAME]
+pattern
+.UNINDENT
+.SH DESCRIPTION
+.sp
+is an extra\-simple tool/framework/API for doing \(aqremote things\(aq.
+this command allows you to define and run a single task \(aqplaybook\(aq against a
+set of hosts
+.SH COMMON OPTIONS
+.INDENT 0.0
+.INDENT 3.5
+host pattern
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-ask\-vault\-password\fP, \fB\-\-ask\-vault\-pass\fP
+.INDENT 0.0
+.INDENT 3.5
+ask for vault password
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-become\-method\fP \(aqBECOME_METHOD\(aq
+.INDENT 0.0
+.INDENT 3.5
+privilege escalation method to use (default=sudo), use \fIansible\-doc \-t become \-l\fP to list valid choices.
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-become\-user\fP \(aqBECOME_USER\(aq
+.INDENT 0.0
+.INDENT 3.5
+run operations as this user (default=root)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-list\-hosts\fP
+.INDENT 0.0
+.INDENT 3.5
+outputs a list of matching hosts; does not execute anything else
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-playbook\-dir\fP \(aqBASEDIR\(aq
+.INDENT 0.0
+.INDENT 3.5
+Since this tool does not use playbooks, use this as a substitute playbook directory.This sets the relative path for many features including roles/ group_vars/ etc.
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-private\-key\fP \(aqPRIVATE_KEY_FILE\(aq, \fB\-\-key\-file\fP \(aqPRIVATE_KEY_FILE\(aq
+.INDENT 0.0
+.INDENT 3.5
+use this file to authenticate the connection
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-scp\-extra\-args\fP \(aqSCP_EXTRA_ARGS\(aq
+.INDENT 0.0
+.INDENT 3.5
+specify extra arguments to pass to scp only (e.g. \-l)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-sftp\-extra\-args\fP \(aqSFTP_EXTRA_ARGS\(aq
+.INDENT 0.0
+.INDENT 3.5
+specify extra arguments to pass to sftp only (e.g. \-f, \-l)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-ssh\-common\-args\fP \(aqSSH_COMMON_ARGS\(aq
+.INDENT 0.0
+.INDENT 3.5
+specify common arguments to pass to sftp/scp/ssh (e.g. ProxyCommand)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-ssh\-extra\-args\fP \(aqSSH_EXTRA_ARGS\(aq
+.INDENT 0.0
+.INDENT 3.5
+specify extra arguments to pass to ssh only (e.g. \-R)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-syntax\-check\fP
+.INDENT 0.0
+.INDENT 3.5
+perform a syntax check on the playbook, but do not execute it
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-vault\-id\fP
+.INDENT 0.0
+.INDENT 3.5
+the vault identity to use
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-vault\-password\-file\fP, \fB\-\-vault\-pass\-file\fP
+.INDENT 0.0
+.INDENT 3.5
+vault password file
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-\-version\fP
+.INDENT 0.0
+.INDENT 3.5
+show program\(aqs version number, config file location, configured module search path, module location, executable location and exit
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-B\fP \(aqSECONDS\(aq, \fB\-\-background\fP \(aqSECONDS\(aq
+.INDENT 0.0
+.INDENT 3.5
+run asynchronously, failing after X seconds (default=N/A)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-C\fP, \fB\-\-check\fP
+.INDENT 0.0
+.INDENT 3.5
+don\(aqt make any changes; instead, try to predict some of the changes that may occur
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-D\fP, \fB\-\-diff\fP
+.INDENT 0.0
+.INDENT 3.5
+when changing (small) files and templates, show the differences in those files; works great with \-\-check
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-K\fP, \fB\-\-ask\-become\-pass\fP
+.INDENT 0.0
+.INDENT 3.5
+ask for privilege escalation password
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-M\fP, \fB\-\-module\-path\fP
+.INDENT 0.0
+.INDENT 3.5
+prepend colon\-separated path(s) to module library (default=~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-P\fP \(aqPOLL_INTERVAL\(aq, \fB\-\-poll\fP \(aqPOLL_INTERVAL\(aq
+.INDENT 0.0
+.INDENT 3.5
+set the poll interval if using \-B (default=15)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-T\fP \(aqTIMEOUT\(aq, \fB\-\-timeout\fP \(aqTIMEOUT\(aq
+.INDENT 0.0
+.INDENT 3.5
+override the connection timeout in seconds (default=10)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-a\fP \(aqMODULE_ARGS\(aq, \fB\-\-args\fP \(aqMODULE_ARGS\(aq
+.INDENT 0.0
+.INDENT 3.5
+module arguments
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-b\fP, \fB\-\-become\fP
+.INDENT 0.0
+.INDENT 3.5
+run operations with become (does not imply password prompting)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-c\fP \(aqCONNECTION\(aq, \fB\-\-connection\fP \(aqCONNECTION\(aq
+.INDENT 0.0
+.INDENT 3.5
+connection type to use (default=smart)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-e\fP, \fB\-\-extra\-vars\fP
+.INDENT 0.0
+.INDENT 3.5
+set additional variables as key=value or YAML/JSON, if filename prepend with @
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-f\fP \(aqFORKS\(aq, \fB\-\-forks\fP \(aqFORKS\(aq
+.INDENT 0.0
+.INDENT 3.5
+specify number of parallel processes to use (default=5)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-h\fP, \fB\-\-help\fP
+.INDENT 0.0
+.INDENT 3.5
+show this help message and exit
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-i\fP, \fB\-\-inventory\fP, \fB\-\-inventory\-file\fP
+.INDENT 0.0
+.INDENT 3.5
+specify inventory host path or comma separated host list. \-\-inventory\-file is deprecated
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-k\fP, \fB\-\-ask\-pass\fP
+.INDENT 0.0
+.INDENT 3.5
+ask for connection password
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-l\fP \(aqSUBSET\(aq, \fB\-\-limit\fP \(aqSUBSET\(aq
+.INDENT 0.0
+.INDENT 3.5
+further limit selected hosts to an additional pattern
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-m\fP \(aqMODULE_NAME\(aq, \fB\-\-module\-name\fP \(aqMODULE_NAME\(aq
+.INDENT 0.0
+.INDENT 3.5
+module name to execute (default=command)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-o\fP, \fB\-\-one\-line\fP
+.INDENT 0.0
+.INDENT 3.5
+condense output
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-t\fP \(aqTREE\(aq, \fB\-\-tree\fP \(aqTREE\(aq
+.INDENT 0.0
+.INDENT 3.5
+log output to this directory
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-u\fP \(aqREMOTE_USER\(aq, \fB\-\-user\fP \(aqREMOTE_USER\(aq
+.INDENT 0.0
+.INDENT 3.5
+connect as this user (default=None)
+.UNINDENT
+.UNINDENT
+.sp
+\fB\-v\fP, \fB\-\-verbose\fP
+.INDENT 0.0
+.INDENT 3.5
+verbose mode (\-vvv for more, \-vvvv to enable connection debugging)
+.UNINDENT
+.UNINDENT
+.SH ENVIRONMENT
+.sp
+The following environment variables may be specified.
+.sp
+ANSIBLE_CONFIG \-\- Specify override location for the ansible config file
+.sp
+Many more are available for most options in ansible.cfg
+.sp
+For a full list check \fI\%https://docs.ansible.com/\fP\&. or use the \fIansible\-config\fP command.
+.SH FILES
+.sp
+/etc/ansible/ansible.cfg \-\- Config file, used if present
+.sp
+~/.ansible.cfg \-\- User config file, overrides the default config if present
+.sp
+\&./ansible.cfg \-\- Local config file (in current working directory) assumed to be \(aqproject specific\(aq and overrides the rest if present.
+.sp
+As mentioned above, the ANSIBLE_CONFIG environment variable will override all others.
+.SH AUTHOR
+.sp
+Ansible was originally written by Michael DeHaan.
+.SH COPYRIGHT
+.sp
+Copyright © 2018 Red Hat, Inc | Ansible.
+Ansible is released under the terms of the GPLv3 license.
+.SH SEE ALSO
+.sp
+\fBansible\-config\fP (1), \fBansible\-console\fP (1), \fBansible\-doc\fP (1), \fBansible\-galaxy\fP (1), \fBansible\-inventory\fP (1), \fBansible\-playbook\fP (1), \fBansible\-pull\fP (1), \fBansible\-vault\fP (1)
+.sp
+Extensive documentation is available in the documentation site:
+<\fI\%https://docs.ansible.com\fP>.
+IRC and mailing list info can be found in file CONTRIBUTING.md,
+available in: <\fI\%https://github.com/ansible/ansible\fP>
+.\" Generated by docutils manpage writer.
+.
diff --git a/docs/man/man3/.gitdir b/docs/man/man3/.gitdir
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/docs/man/man3/.gitdir
diff --git a/docs/templates/cli_rst.j2 b/docs/templates/cli_rst.j2
new file mode 100644
index 00000000..0e4c0f16
--- /dev/null
+++ b/docs/templates/cli_rst.j2
@@ -0,0 +1,161 @@
+:source: {{ cli }}.py
+
+{% set name = cli_name -%}
+{% set name_slug = cli_name -%}
+
+.. _{{name}}:
+
+{% set name_len = name|length + 0-%}
+{{ '=' * name_len }}
+{{name}}
+{{ '=' * name_len }}
+
+
+:strong:`{{short_desc|default('')}}`
+
+
+.. contents::
+ :local:
+ :depth: {{content_depth}}
+
+
+.. program:: {{cli_name}}
+
+Synopsis
+========
+
+.. code-block:: bash
+
+ {{ usage|replace('%prog', cli_name) }}
+
+
+Description
+===========
+
+
+{{ long_desc|default('', True) }}
+
+{% if options %}
+Common Options
+==============
+
+
+{% for option in options|sort(attribute='options') if option.options %}
+
+.. option:: {% for switch in option['options'] %}{{switch}}{% if option['arg'] %} <{{option['arg']}}>{% endif %}{% if not loop.last %}, {% endif %}{% endfor %}
+
+ {{ option['desc'] }}
+{% endfor %}
+{% endif %}
+
+{% if arguments %}
+ARGUMENTS
+=========
+
+.. program:: {{cli_name}}
+
+{% for arg in arguments %}
+.. option:: {{ arg }}
+
+ {{ (arguments[arg]|default(' '))}}
+
+{% endfor %}
+{% endif %}
+
+{% if actions %}
+Actions
+=======
+
+{% for action in actions %}
+
+.. program:: {{cli_name}} {{action}}
+.. _{{cli_name|replace('-','_')}}_{{action}}:
+
+{{ action}}
+{{ '-' * action|length}}
+
+{{ (actions[action]['desc']|default(' '))}}
+
+{% if actions[action]['options'] %}
+
+
+{% for option in actions[action]['options']|sort(attribute='options') %}
+.. option:: {% for switch in option['options'] if switch in actions[action]['option_names'] %}{{switch}} {% if option['arg'] %} <{{option['arg']}}>{% endif %}{% if not loop.last %}, {% endif %}{% endfor %}
+
+ {{ (option['desc']) }}
+{% endfor %}
+{% endif %}
+
+{% for sub_action in actions[action]['actions'] %}
+
+
+.. program:: {{cli_name}} {{action}} {{sub_action}}
+.. _{{cli_name|replace('-','_')}}_{{action}}_{{sub_action}}:
+
+{{ action + " " + sub_action }}
+{{ '+' * (action|length + sub_action|length + 1) }}
+
+{{ (actions[action]['actions'][sub_action]['desc']|default(' '))}}
+
+{% if actions[action]['actions'][sub_action]['options'] %}
+
+
+{% for option in actions[action]['actions'][sub_action]['options']|sort(attribute='options') %}
+.. option:: {% for switch in option['options'] if switch in actions[action]['actions'][sub_action]['option_names'] %}{{switch}} {% if option['arg'] %} <{{option['arg']}}>{% endif %}{% if not loop.last %}, {% endif %}{% endfor %}
+
+ {{ (option['desc']) }}
+{% endfor %}
+{% endif %}
+
+{% endfor %}
+
+{% endfor %}
+.. program:: {{cli_name}}
+{% endif %}
+
+Environment
+===========
+
+The following environment variables may be specified.
+
+{% if inventory %}
+:envvar:`ANSIBLE_INVENTORY` -- Override the default ansible inventory file
+
+{% endif %}
+{% if library %}
+:envvar:`ANSIBLE_LIBRARY` -- Override the default ansible module library path
+
+{% endif %}
+:envvar:`ANSIBLE_CONFIG` -- Override the default ansible config file
+
+Many more are available for most options in ansible.cfg
+
+
+Files
+=====
+
+{% if inventory %}
+:file:`/etc/ansible/hosts` -- Default inventory file
+
+{% endif %}
+:file:`/etc/ansible/ansible.cfg` -- Config file, used if present
+
+:file:`~/.ansible.cfg` -- User config file, overrides the default config if present
+
+Author
+======
+
+Ansible was originally written by Michael DeHaan.
+
+See the `AUTHORS` file for a complete list of contributors.
+
+
+License
+=======
+
+Ansible is released under the terms of the GPLv3+ License.
+
+See also
+========
+
+{% for other in cli_bin_name_list|sort %}:manpage:`{{other}}(1)`, {% endfor %}
diff --git a/docs/templates/collections_galaxy_meta.rst.j2 b/docs/templates/collections_galaxy_meta.rst.j2
new file mode 100644
index 00000000..f7ca6670
--- /dev/null
+++ b/docs/templates/collections_galaxy_meta.rst.j2
@@ -0,0 +1,98 @@
+.. _collections_galaxy_meta:
+
+************************************
+Collection Galaxy metadata structure
+************************************
+
+A key component of an Ansible collection is the ``galaxy.yml`` file placed in the root directory of a collection. This
+file contains the metadata of the collection that is used to generate a collection artifact.
+
+Structure
+=========
+
+The ``galaxy.yml`` file must contain the following keys in valid YAML:
+
+
+.. rst-class:: documentation-table
+
+.. list-table::
+ :header-rows: 1
+ :widths: auto
+
+ * - Key
+ - Comment
+
+{%- for entry in options %}
+
+
+ * - .. rst-class:: value-name
+
+ @{ entry.key }@ |br|
+
+ .. rst-class:: value-type
+
+ @{ entry.type | documented_type }@ |_|
+
+ {% if entry.get('required', False) -%}
+ .. rst-class:: value-separator
+
+ / |_|
+
+ .. rst-class:: value-required
+
+ required
+ {%- endif %}
+
+ {% if 'version_added' in entry -%}
+
+ .. rst-class:: value-added-in
+
+ |br| version_added: @{ entry.version_added }@
+
+ |_|
+
+ {%- endif %}
+
+ - {% for desc in entry.description -%}
+ @{ desc | trim | rst_ify }@
+
+ {% endfor -%}
+{%- endfor %}
+
+
+Examples
+========
+
+.. code-block:: yaml
+
+ namespace: "namespace_name"
+ name: "collection_name"
+ version: "1.0.12"
+ readme: "README.md"
+ authors:
+ - "Author1"
+ - "Author2 (https://author2.example.com)"
+ - "Author3 <author3@example.com>"
+ dependencies:
+ "other_namespace.collection1": ">=1.0.0"
+ "other_namespace.collection2": ">=2.0.0,<3.0.0"
+ "anderson55.my_collection": "*" # note: "*" selects the highest version available
+ license:
+ - "MIT"
+ tags:
+ - demo
+ - collection
+ repository: "https://www.github.com/my_org/my_collection"
+
+.. seealso::
+
+ :ref:`developing_collections`
+ Develop or modify a collection.
+ :ref:`developing_modules_general`
+ Learn about how to write Ansible modules
+ :ref:`collections`
+ Learn how to install and use collections.
+ `Mailing List <https://groups.google.com/group/ansible-devel>`_
+ The development mailing list
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docs/templates/config.rst.j2 b/docs/templates/config.rst.j2
new file mode 100644
index 00000000..35a54017
--- /dev/null
+++ b/docs/templates/config.rst.j2
@@ -0,0 +1,227 @@
+.. _ansible_configuration_settings:
+
+{% set name = 'Ansible Configuration Settings' -%}
+{% set name_slug = 'config' -%}
+
+{% set name_len = name|length + 0-%}
+{{ '=' * name_len }}
+{{name}}
+{{ '=' * name_len }}
+
+Ansible supports several sources for configuring its behavior, including an ini file named ``ansible.cfg``, environment variables, command-line options, playbook keywords, and variables. See :ref:`general_precedence_rules` for details on the relative precedence of each source.
+
+The ``ansible-config`` utility allows users to see all the configuration settings available, their defaults, how to set them and
+where their current value comes from. See :ref:`ansible-config` for more information.
+
+.. _ansible_configuration_settings_locations:
+
+The configuration file
+======================
+
+Changes can be made and used in a configuration file which will be searched for in the following order:
+
+ * ``ANSIBLE_CONFIG`` (environment variable if set)
+ * ``ansible.cfg`` (in the current directory)
+ * ``~/.ansible.cfg`` (in the home directory)
+ * ``/etc/ansible/ansible.cfg``
+
+Ansible will process the above list and use the first file found, all others are ignored.
+
+.. note::
+
+ The configuration file is one variant of an INI format.
+ Both the hash sign (``#``) and semicolon (``;``) are allowed as
+ comment markers when the comment starts the line.
+ However, if the comment is inline with regular values,
+ only the semicolon is allowed to introduce the comment.
+ For instance::
+
+ # some basic default values...
+ inventory = /etc/ansible/hosts ; This points to the file that lists your hosts
+
+
+.. _cfg_in_world_writable_dir:
+
+Avoiding security risks with ``ansible.cfg`` in the current directory
+---------------------------------------------------------------------
+
+
+If Ansible were to load ``ansible.cfg`` from a world-writable current working
+directory, it would create a serious security risk. Another user could place
+their own config file there, designed to make Ansible run malicious code both
+locally and remotely, possibly with elevated privileges. For this reason,
+Ansible will not automatically load a config file from the current working
+directory if the directory is world-writable.
+
+If you depend on using Ansible with a config file in the current working
+directory, the best way to avoid this problem is to restrict access to your
+Ansible directories to particular user(s) and/or group(s). If your Ansible
+directories live on a filesystem which has to emulate Unix permissions, like
+Vagrant or Windows Subsystem for Linux (WSL), you may, at first, not know how
+you can fix this as ``chmod``, ``chown``, and ``chgrp`` might not work there.
+In most of those cases, the correct fix is to modify the mount options of the
+filesystem so the files and directories are readable and writable by the users
+and groups running Ansible but closed to others. For more details on the
+correct settings, see:
+
+* for Vagrant, the `Vagrant documentation <https://www.vagrantup.com/docs/synced-folders/>`_ covers synced folder permissions.
+* for WSL, the `WSL docs <https://docs.microsoft.com/en-us/windows/wsl/wsl-config#set-wsl-launch-settings>`_
+ and this `Microsoft blog post <https://blogs.msdn.microsoft.com/commandline/2018/01/12/chmod-chown-wsl-improvements/>`_ cover mount options.
+
+If you absolutely depend on storing your Ansible config in a world-writable current
+working directory, you can explicitly specify the config file via the
+:envvar:`ANSIBLE_CONFIG` environment variable. Please take
+appropriate steps to mitigate the security concerns above before doing so.
+
+
+Relative paths for configuration
+--------------------------------
+
+You can specify a relative path for many configuration options. In most of
+those cases the path used will be relative to the ``ansible.cfg`` file used
+for the current execution. If you need a path relative to your current working
+directory (CWD) you can use the ``{%raw%}{{CWD}}{%endraw%}`` macro to specify
+it. We do not recommend this approach, as using your CWD as the root of
+relative paths can be a security risk. For example:
+``cd /tmp; secureinfo=./newrootpassword ansible-playbook ~/safestuff/change_root_pwd.yml``.
+
+
+Common Options
+==============
+
+This is a copy of the options available from our release, your local install might have extra options due to additional plugins,
+you can use the command line utility mentioned above (`ansible-config`) to browse through those.
+
+{% if config_options %}
+
+
+{% for config_option in config_options|sort %}
+{% set config_len = config_option|length -%}
+{% set config = config_options[config_option] %}
+.. _{{config_option}}:
+
+{{config_option}}
+{{ '-' * config_len }}
+
+{% if config['description'] and config['description'] != [''] %}
+{% if config['description'] != ['TODO: write it'] %}
+:Description: {{' '.join(config['description'])}}
+{% endif %}
+{% endif %}
+{% if config['type'] %}
+:Type: {{config['type']}}
+{% endif %}
+{% if 'default' in config %}
+:Default: {{config['default']}}
+{% endif %}
+{% if config.get('choices', False) %}
+:Choices:
+{% if config['choices'] is mapping %}
+{% for key in config['choices'].keys() %}
+ - :{{key}}: {{ config['choices'][key] }}
+{% endfor %}
+{% else %}
+{% for key in config['choices'] %}
+ - :{{key}}:
+{% endfor %}
+{% endif %}
+{% endif %}
+{% if config['version_added'] %}
+:Version Added: {{config['version_added']}}
+{% endif %}
+{% if config.get('ini', False) %}
+:Ini:
+{% for ini_map in config['ini']|sort(attribute='section') %}
+ {% if config['ini']|length > 1 %}- {% endif %}:Section: [{{ini_map['section']}}]
+ {% if config['ini']|length > 1 %} {% endif %}:Key: {{ini_map['key']}}
+{% if ini_map['version_added'] %}
+ :Version Added: {{ini_map['version_added']}}
+{% endif %}
+{% if ini_map['deprecated'] %}
+ :Deprecated in: {{ini_map['deprecated']['version']}}
+ :Deprecated detail: {{ini_map['deprecated']['why']}}
+{% if ini_map['deprecated']['alternatives'] %}
+ :Deprecated alternatives: {{ini_map['deprecated']['alternatives']}}
+{% endif %}
+{% endif %}
+{% endfor %}
+{% endif %}
+{% if config.get('env', False) %}
+:Environment:
+{% for env_var_map in config['env']|sort(attribute='name') %}
+ {% if config['env']|length > 1 %}- {% endif %}:Variable: :envvar:`{{env_var_map['name']}}`
+{% if env_var_map['version_added'] %}
+ :Version Added: {{env_var_map['version_added']}}
+{% endif %}
+{% if env_var_map['deprecated'] %}
+ :Deprecated in: {{env_var_map['deprecated']['version']}}
+ :Deprecated detail: {{env_var_map['deprecated']['why']}}
+{% if env_var_map['deprecated']['alternatives'] %}
+ :Deprecated alternatives: {{env_var_map['deprecated']['alternatives']}}
+{% endif %}
+{% endif %}
+{% endfor %}
+{% endif %}
+{% if config.get('vars', False) %}
+:Variables:
+{% for a_var in config['vars']|sort(attribute='name') %}
+ {% if config['vars']|length > 1 %}- {%endif%}:name: `{{a_var['name']}}`
+{% if a_var['version_added'] %}
+ :Version Added: {{a_var['version_added']}}
+{% endif %}
+{% if a_var['deprecated'] %}
+ :Deprecated in: {{a_var['deprecated']['version']}}
+ :Deprecated detail: {{a_Var['deprecated']['why']}}
+{% if a_var['deprecated']['alternatives'] %}
+ :Deprecated alternatives: {{a_var['deprecated']['alternatives']}}
+{% endif %}
+{% endif %}
+{% endfor %}
+{% endif %}
+{% if config['deprecated'] %}
+:Deprecated in: {{config['deprecated']['version']}}
+:Deprecated detail: {{config['deprecated']['why']}}
+{% if config['deprecated']['alternatives'] %}
+:Deprecated alternatives: {{config['deprecated']['alternatives']}}
+{% endif %}
+{% endif %}
+
+{% endfor %}
+
+Environment Variables
+=====================
+
+.. envvar:: ANSIBLE_CONFIG
+
+
+ Override the default ansible config file
+
+
+{% for config_option in config_options %}
+{% for env_var_map in config_options[config_option]['env'] %}
+.. envvar:: {{env_var_map['name']}}
+
+{% if config_options[config_option]['description'] and config_options[config_option]['description'] != [''] %}
+{% if config_options[config_option]['description'] != ['TODO: write it'] %}
+ {{ ''.join(config_options[config_option]['description']) }}
+{% endif %}
+{% endif %}
+
+ See also :ref:`{{config_option}} <{{config_option}}>`
+
+{% if env_var_map['version_added'] %}
+ :Version Added: {{env_var_map['version_added']}}
+{% endif %}
+{% if env_var_map['deprecated'] %}
+ :Deprecated in: {{env_var_map['deprecated']['version']}}
+ :Deprecated detail: {{env_var_map['deprecated']['why']}}
+{% if env_var_map['deprecated']['alternatives'] %}
+ :Deprecated alternatives: {{env_var_map['deprecated']['alternatives']}}
+{% endif %}
+{% endif %}
+
+{% endfor %}
+
+{% endfor %}
+
+{% endif %}
diff --git a/docs/templates/man.j2 b/docs/templates/man.j2
new file mode 100644
index 00000000..8bd3644c
--- /dev/null
+++ b/docs/templates/man.j2
@@ -0,0 +1,128 @@
+{% set name = ('ansible' if cli == 'adhoc' else 'ansible-%s' % cli) -%}
+{{name}}
+{{ '=' * ( name|length|int ) }}
+
+{{ '-' * ( short_desc|default('')|string|length|int ) }}
+{{short_desc|default('')}}
+{{ '-' * ( short_desc|default('')|string|length|int ) }}
+
+:Version: Ansible %VERSION%
+:Manual section: 1
+:Manual group: System administration commands
+
+
+
+SYNOPSIS
+--------
+{{ usage|replace('%prog', name) }}
+
+
+DESCRIPTION
+-----------
+{{ long_desc|default('', True)|wordwrap }}
+
+{% if options %}
+COMMON OPTIONS
+--------------
+{% for option in options|sort(attribute='options') %}
+{% for switch in option['options'] %}**{{switch}}**{% if option['arg'] %} '{{option['arg']}}'{% endif %}{% if not loop.last %}, {% endif %}{% endfor %}
+
+ {{ option['desc'] }}
+{% endfor %}
+{% endif %}
+
+{% if arguments %}
+ARGUMENTS
+---------
+
+{% for arg in arguments %}
+{{ arg }}
+
+{{ (arguments[arg]|default(' '))|wordwrap }}
+
+{% endfor %}
+{% endif %}
+
+{% if actions %}
+ACTIONS
+-------
+{% for action in actions %}
+**{{ action }}**
+ {{ (actions[action]['desc']|default(' ')) |replace('\n', ' ')}}
+
+{% if actions[action]['options'] %}
+{% for option in actions[action]['options']|sort(attribute='options') %}
+{% for switch in option['options'] if switch in actions[action]['option_names'] %} **{{switch}}**{% if option['arg'] %} '{{option['arg']}}'{% endif %}{% if not loop.last %}, {% endif %}{% endfor %}
+
+ {{ (option['desc']) }}
+{% endfor %}
+{% endif %}
+{% endfor %}
+{% endif %}
+
+
+{% if inventory %}
+INVENTORY
+---------
+
+Ansible stores the hosts it can potentially operate on in an inventory.
+This can be an YAML file, ini-like file, a script, directory, list, etc.
+For additional options, see the documentation on https://docs.ansible.com/.
+
+{% endif %}
+ENVIRONMENT
+-----------
+
+The following environment variables may be specified.
+
+{% if inventory %}
+ANSIBLE_INVENTORY -- Override the default ansible inventory sources
+
+{% endif %}
+{% if library %}
+ANSIBLE_LIBRARY -- Override the default ansible module library path
+
+{% endif %}
+ANSIBLE_CONFIG -- Specify override location for the ansible config file
+
+Many more are available for most options in ansible.cfg
+
+For a full list check https://docs.ansible.com/. or use the `ansible-config` command.
+
+FILES
+-----
+
+{% if inventory %}
+/etc/ansible/hosts -- Default inventory file
+
+{% endif %}
+/etc/ansible/ansible.cfg -- Config file, used if present
+
+~/.ansible.cfg -- User config file, overrides the default config if present
+
+./ansible.cfg -- Local config file (in current working directory) assumed to be 'project specific' and overrides the rest if present.
+
+As mentioned above, the ANSIBLE_CONFIG environment variable will override all others.
+
+AUTHOR
+------
+
+Ansible was originally written by Michael DeHaan.
+
+
+COPYRIGHT
+---------
+
+Copyright © 2018 Red Hat, Inc | Ansible.
+Ansible is released under the terms of the GPLv3 license.
+
+
+SEE ALSO
+--------
+
+{% for other in cli_list|sort %}{% if other != cli %}**ansible{% if other != 'adhoc' %}-{{other}}{% endif %}** (1){% if not loop.last %}, {% endif %}{% endif %}{% endfor %}
+
+Extensive documentation is available in the documentation site:
+<https://docs.ansible.com>.
+IRC and mailing list info can be found in file CONTRIBUTING.md,
+available in: <https://github.com/ansible/ansible>
diff --git a/docs/templates/modules_by_category.rst.j2 b/docs/templates/modules_by_category.rst.j2
new file mode 100644
index 00000000..77635284
--- /dev/null
+++ b/docs/templates/modules_by_category.rst.j2
@@ -0,0 +1,17 @@
+.. _modules_by_category:
+
+{# avoids rST "isn't included in any toctree" errors for module index docs #}
+:orphan:
+
+Module Index
+============
+
+
+.. toctree:: :maxdepth: 1
+
+{% for name in categories %}
+{# strip out empty category names as a result flattening the dir structure #}
+{% if name %}
+ list_of_@{ name }@_modules
+{% endif %}
+{% endfor %}
diff --git a/docs/templates/playbooks_keywords.rst.j2 b/docs/templates/playbooks_keywords.rst.j2
new file mode 100644
index 00000000..4ce19aaa
--- /dev/null
+++ b/docs/templates/playbooks_keywords.rst.j2
@@ -0,0 +1,33 @@
+.. _playbook_keywords:
+
+Playbook Keywords
+=================
+
+These are the keywords available on common playbook objects. Keywords are one of several sources for configuring Ansible behavior. See :ref:`general_precedence_rules` for details on the relative precedence of each source.
+
+
+.. note:: Please note:
+
+ * Aliases for the directives are not reflected here, nor are mutable one. For example,
+ :term:`action` in task can be substituted by the name of any Ansible module.
+ * The keywords do not have ``version_added`` information at this time
+ * Some keywords set defaults for the objects inside of them rather than for the objects
+ themselves
+
+
+.. contents::
+ :local:
+ :depth: 1
+
+{% for name in playbook_class_names %}
+
+{{ name }}
+{{ '-' * name|length }}
+.. glossary::
+
+{% for attribute in pb_keywords[name]|sort %}
+ {{ attribute }}
+ {{ pb_keywords[name][attribute] |indent(8) }}
+
+{% endfor %}
+{% endfor %}
diff --git a/examples/ansible.cfg b/examples/ansible.cfg
new file mode 100644
index 00000000..ae5cc64a
--- /dev/null
+++ b/examples/ansible.cfg
@@ -0,0 +1,525 @@
+# Example config file for ansible -- https://ansible.com/
+# =======================================================
+
+# Nearly all parameters can be overridden in ansible-playbook
+# or with command line flags. Ansible will read ANSIBLE_CONFIG,
+# ansible.cfg in the current working directory, .ansible.cfg in
+# the home directory, or /etc/ansible/ansible.cfg, whichever it
+# finds first
+
+# For a full list of available options, run ansible-config list or see the
+# documentation: https://docs.ansible.com/ansible/latest/reference_appendices/config.html.
+
+[defaults]
+#inventory = /etc/ansible/hosts
+#library = ~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules
+#module_utils = ~/.ansible/plugins/module_utils:/usr/share/ansible/plugins/module_utils
+#remote_tmp = ~/.ansible/tmp
+#local_tmp = ~/.ansible/tmp
+#forks = 5
+#poll_interval = 0.001
+#ask_pass = False
+#transport = smart
+
+# Plays will gather facts by default, which contain information about
+# the remote system.
+#
+# smart - gather by default, but don't regather if already gathered
+# implicit - gather by default, turn off with gather_facts: False
+# explicit - do not gather by default, must say gather_facts: True
+#gathering = implicit
+
+# This only affects the gathering done by a play's gather_facts directive,
+# by default gathering retrieves all facts subsets
+# all - gather all subsets
+# network - gather min and network facts
+# hardware - gather hardware facts (longest facts to retrieve)
+# virtual - gather min and virtual facts
+# facter - import facts from facter
+# ohai - import facts from ohai
+# You can combine them using comma (ex: network,virtual)
+# You can negate them using ! (ex: !hardware,!facter,!ohai)
+# A minimal set of facts is always gathered.
+#
+#gather_subset = all
+
+# some hardware related facts are collected
+# with a maximum timeout of 10 seconds. This
+# option lets you increase or decrease that
+# timeout to something more suitable for the
+# environment.
+#
+#gather_timeout = 10
+
+# Ansible facts are available inside the ansible_facts.* dictionary
+# namespace. This setting maintains the behaviour which was the default prior
+# to 2.5, duplicating these variables into the main namespace, each with a
+# prefix of 'ansible_'.
+# This variable is set to True by default for backwards compatibility. It
+# will be changed to a default of 'False' in a future release.
+#
+#inject_facts_as_vars = True
+
+# Paths to search for collections, colon separated
+# collections_paths = ~/.ansible/collections:/usr/share/ansible/collections
+
+# Paths to search for roles, colon separated
+#roles_path = ~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles
+
+# Host key checking is enabled by default
+#host_key_checking = True
+
+# You can only have one 'stdout' callback type enabled at a time. The default
+# is 'default'. The 'yaml' or 'debug' stdout callback plugins are easier to read.
+#
+#stdout_callback = default
+#stdout_callback = yaml
+#stdout_callback = debug
+
+
+# Ansible ships with some plugins that require whitelisting,
+# this is done to avoid running all of a type by default.
+# These setting lists those that you want enabled for your system.
+# Custom plugins should not need this unless plugin author disables them
+# by default.
+#
+# Enable callback plugins, they can output to stdout but cannot be 'stdout' type.
+#callback_whitelist = timer, mail
+
+# Determine whether includes in tasks and handlers are "static" by
+# default. As of 2.0, includes are dynamic by default. Setting these
+# values to True will make includes behave more like they did in the
+# 1.x versions.
+#
+#task_includes_static = False
+#handler_includes_static = False
+
+# Controls if a missing handler for a notification event is an error or a warning
+#error_on_missing_handler = True
+
+# Default timeout for connection plugins
+#timeout = 10
+
+# Default user to use for playbooks if user is not specified
+# Uses the connection plugin's default, normally the user currently executing Ansible,
+# unless a different user is specified here.
+#
+#remote_user = root
+
+# Logging is off by default unless this path is defined.
+#log_path = /var/log/ansible.log
+
+# Default module to use when running ad-hoc commands
+#module_name = command
+
+# Use this shell for commands executed under sudo.
+# you may need to change this to /bin/bash in rare instances
+# if sudo is constrained.
+#
+#executable = /bin/sh
+
+# By default, variables from roles will be visible in the global variable
+# scope. To prevent this, set the following option to True, and only
+# tasks and handlers within the role will see the variables there
+#
+#private_role_vars = False
+
+# List any Jinja2 extensions to enable here.
+#jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n
+
+# If set, always use this private key file for authentication, same as
+# if passing --private-key to ansible or ansible-playbook
+#
+#private_key_file = /path/to/file
+
+# If set, configures the path to the Vault password file as an alternative to
+# specifying --vault-password-file on the command line. This can also be
+# an executable script that returns the vault password to stdout.
+#
+#vault_password_file = /path/to/vault_password_file
+
+# Format of string {{ ansible_managed }} available within Jinja2
+# templates indicates to users editing templates files will be replaced.
+# replacing {file}, {host} and {uid} and strftime codes with proper values.
+#
+#ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}
+
+# {file}, {host}, {uid}, and the timestamp can all interfere with idempotence
+# in some situations so the default is a static string:
+#
+#ansible_managed = Ansible managed
+
+# By default, ansible-playbook will display "Skipping [host]" if it determines a task
+# should not be run on a host. Set this to "False" if you don't want to see these "Skipping"
+# messages. NOTE: the task header will still be shown regardless of whether or not the
+# task is skipped.
+#
+#display_skipped_hosts = True
+
+# By default, if a task in a playbook does not include a name: field then
+# ansible-playbook will construct a header that includes the task's action but
+# not the task's args. This is a security feature because ansible cannot know
+# if the *module* considers an argument to be no_log at the time that the
+# header is printed. If your environment doesn't have a problem securing
+# stdout from ansible-playbook (or you have manually specified no_log in your
+# playbook on all of the tasks where you have secret information) then you can
+# safely set this to True to get more informative messages.
+#
+#display_args_to_stdout = False
+
+# Ansible will raise errors when attempting to dereference
+# Jinja2 variables that are not set in templates or action lines. Uncomment this line
+# to change this behavior.
+#
+#error_on_undefined_vars = False
+
+# Ansible may display warnings based on the configuration of the
+# system running ansible itself. This may include warnings about 3rd party packages or
+# other conditions that should be resolved if possible.
+# To disable these warnings, set the following value to False:
+#
+#system_warnings = True
+
+# Ansible may display deprecation warnings for language
+# features that should no longer be used and will be removed in future versions.
+# To disable these warnings, set the following value to False:
+#
+#deprecation_warnings = True
+
+# Ansible can optionally warn when usage of the shell and
+# command module appear to be simplified by using a default Ansible module
+# instead. These warnings can be silenced by adjusting the following
+# setting or adding warn=yes or warn=no to the end of the command line
+# parameter string. This will for example suggest using the git module
+# instead of shelling out to the git command.
+#
+#command_warnings = False
+
+
+# set plugin path directories here, separate with colons
+#action_plugins = /usr/share/ansible/plugins/action
+#become_plugins = /usr/share/ansible/plugins/become
+#cache_plugins = /usr/share/ansible/plugins/cache
+#callback_plugins = /usr/share/ansible/plugins/callback
+#connection_plugins = /usr/share/ansible/plugins/connection
+#lookup_plugins = /usr/share/ansible/plugins/lookup
+#inventory_plugins = /usr/share/ansible/plugins/inventory
+#vars_plugins = /usr/share/ansible/plugins/vars
+#filter_plugins = /usr/share/ansible/plugins/filter
+#test_plugins = /usr/share/ansible/plugins/test
+#terminal_plugins = /usr/share/ansible/plugins/terminal
+#strategy_plugins = /usr/share/ansible/plugins/strategy
+
+
+# Ansible will use the 'linear' strategy but you may want to try another one.
+#strategy = linear
+
+# By default, callbacks are not loaded for /bin/ansible. Enable this if you
+# want, for example, a notification or logging callback to also apply to
+# /bin/ansible runs
+#
+#bin_ansible_callbacks = False
+
+
+# Don't like cows? that's unfortunate.
+# set to 1 if you don't want cowsay support or export ANSIBLE_NOCOWS=1
+#nocows = 1
+
+# Set which cowsay stencil you'd like to use by default. When set to 'random',
+# a random stencil will be selected for each task. The selection will be filtered
+# against the `cow_whitelist` option below.
+#
+#cow_selection = default
+#cow_selection = random
+
+# When using the 'random' option for cowsay, stencils will be restricted to this list.
+# it should be formatted as a comma-separated list with no spaces between names.
+# NOTE: line continuations here are for formatting purposes only, as the INI parser
+# in python does not support them.
+#
+#cow_whitelist=bud-frogs,bunny,cheese,daemon,default,dragon,elephant-in-snake,elephant,eyes,\
+# hellokitty,kitty,luke-koala,meow,milk,moofasa,moose,ren,sheep,small,stegosaurus,\
+# stimpy,supermilker,three-eyes,turkey,turtle,tux,udder,vader-koala,vader,www
+
+# Don't like colors either?
+# set to 1 if you don't want colors, or export ANSIBLE_NOCOLOR=1
+#
+#nocolor = 1
+
+# If set to a persistent type (not 'memory', for example 'redis') fact values
+# from previous runs in Ansible will be stored. This may be useful when
+# wanting to use, for example, IP information from one group of servers
+# without having to talk to them in the same playbook run to get their
+# current IP information.
+#
+#fact_caching = memory
+
+# This option tells Ansible where to cache facts. The value is plugin dependent.
+# For the jsonfile plugin, it should be a path to a local directory.
+# For the redis plugin, the value is a host:port:database triplet: fact_caching_connection = localhost:6379:0
+#
+#fact_caching_connection=/tmp
+
+# retry files
+# When a playbook fails a .retry file can be created that will be placed in ~/
+# You can enable this feature by setting retry_files_enabled to True
+# and you can change the location of the files by setting retry_files_save_path
+#
+#retry_files_enabled = False
+#retry_files_save_path = ~/.ansible-retry
+
+# prevents logging of task data, off by default
+#no_log = False
+
+# prevents logging of tasks, but only on the targets, data is still logged on the master/controller
+#no_target_syslog = False
+
+# Controls whether Ansible will raise an error or warning if a task has no
+# choice but to create world readable temporary files to execute a module on
+# the remote machine. This option is False by default for security. Users may
+# turn this on to have behaviour more like Ansible prior to 2.1.x. See
+# https://docs.ansible.com/ansible/latest/user_guide/become.html#becoming-an-unprivileged-user
+# for more secure ways to fix this than enabling this option.
+#
+#allow_world_readable_tmpfiles = False
+
+# Controls what compression method is used for new-style ansible modules when
+# they are sent to the remote system. The compression types depend on having
+# support compiled into both the controller's python and the client's python.
+# The names should match with the python Zipfile compression types:
+# * ZIP_STORED (no compression. available everywhere)
+# * ZIP_DEFLATED (uses zlib, the default)
+# These values may be set per host via the ansible_module_compression inventory variable.
+#
+#module_compression = 'ZIP_DEFLATED'
+
+# This controls the cutoff point (in bytes) on --diff for files
+# set to 0 for unlimited (RAM may suffer!).
+#
+#max_diff_size = 104448
+
+# Controls showing custom stats at the end, off by default
+#show_custom_stats = False
+
+# Controls which files to ignore when using a directory as inventory with
+# possibly multiple sources (both static and dynamic)
+#
+#inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo
+
+# This family of modules use an alternative execution path optimized for network appliances
+# only update this setting if you know how this works, otherwise it can break module execution
+#
+#network_group_modules=eos, nxos, ios, iosxr, junos, vyos
+
+# When enabled, this option allows lookups (via variables like {{lookup('foo')}} or when used as
+# a loop with `with_foo`) to return data that is not marked "unsafe". This means the data may contain
+# jinja2 templating language which will be run through the templating engine.
+# ENABLING THIS COULD BE A SECURITY RISK
+#
+#allow_unsafe_lookups = False
+
+# set default errors for all plays
+#any_errors_fatal = False
+
+
+[inventory]
+# List of enabled inventory plugins and the order in which they are used.
+#enable_plugins = host_list, script, auto, yaml, ini, toml
+
+# Ignore these extensions when parsing a directory as inventory source
+#ignore_extensions = .pyc, .pyo, .swp, .bak, ~, .rpm, .md, .txt, ~, .orig, .ini, .cfg, .retry
+
+# ignore files matching these patterns when parsing a directory as inventory source
+#ignore_patterns=
+
+# If 'True' unparsed inventory sources become fatal errors, otherwise they are warnings.
+#unparsed_is_failed = False
+
+
+[privilege_escalation]
+#become = False
+#become_method = sudo
+#become_ask_pass = False
+
+
+## Connection Plugins ##
+
+# Settings for each connection plugin go under a section titled '[[plugin_name]_connection]'
+# To view available connection plugins, run ansible-doc -t connection -l
+# To view available options for a connection plugin, run ansible-doc -t connection [plugin_name]
+# https://docs.ansible.com/ansible/latest/plugins/connection.html
+
+[paramiko_connection]
+# uncomment this line to cause the paramiko connection plugin to not record new host
+# keys encountered. Increases performance on new host additions. Setting works independently of the
+# host key checking setting above.
+#record_host_keys=False
+
+# by default, Ansible requests a pseudo-terminal for commands executed under sudo. Uncomment this
+# line to disable this behaviour.
+#pty = False
+
+# paramiko will default to looking for SSH keys initially when trying to
+# authenticate to remote devices. This is a problem for some network devices
+# that close the connection after a key failure. Uncomment this line to
+# disable the Paramiko look for keys function
+#look_for_keys = False
+
+# When using persistent connections with Paramiko, the connection runs in a
+# background process. If the host doesn't already have a valid SSH key, by
+# default Ansible will prompt to add the host key. This will cause connections
+# running in background processes to fail. Uncomment this line to have
+# Paramiko automatically add host keys.
+#host_key_auto_add = True
+
+
+[ssh_connection]
+# ssh arguments to use
+# Leaving off ControlPersist will result in poor performance, so use
+# paramiko on older platforms rather than removing it, -C controls compression use
+#ssh_args = -C -o ControlMaster=auto -o ControlPersist=60s
+
+# The base directory for the ControlPath sockets.
+# This is the "%(directory)s" in the control_path option
+#
+# Example:
+# control_path_dir = /tmp/.ansible/cp
+#control_path_dir = ~/.ansible/cp
+
+# The path to use for the ControlPath sockets. This defaults to a hashed string of the hostname,
+# port and username (empty string in the config). The hash mitigates a common problem users
+# found with long hostnames and the conventional %(directory)s/ansible-ssh-%%h-%%p-%%r format.
+# In those cases, a "too long for Unix domain socket" ssh error would occur.
+#
+# Example:
+# control_path = %(directory)s/%%C
+#control_path =
+
+# Enabling pipelining reduces the number of SSH operations required to
+# execute a module on the remote server. This can result in a significant
+# performance improvement when enabled, however when using "sudo:" you must
+# first disable 'requiretty' in /etc/sudoers
+#
+# By default, this option is disabled to preserve compatibility with
+# sudoers configurations that have requiretty (the default on many distros).
+#
+#pipelining = False
+
+# Control the mechanism for transferring files (old)
+# * smart = try sftp and then try scp [default]
+# * True = use scp only
+# * False = use sftp only
+#scp_if_ssh = smart
+
+# Control the mechanism for transferring files (new)
+# If set, this will override the scp_if_ssh option
+# * sftp = use sftp to transfer files
+# * scp = use scp to transfer files
+# * piped = use 'dd' over SSH to transfer files
+# * smart = try sftp, scp, and piped, in that order [default]
+#transfer_method = smart
+
+# If False, sftp will not use batch mode to transfer files. This may cause some
+# types of file transfer failures impossible to catch however, and should
+# only be disabled if your sftp version has problems with batch mode
+#sftp_batch_mode = False
+
+# The -tt argument is passed to ssh when pipelining is not enabled because sudo
+# requires a tty by default.
+#usetty = True
+
+# Number of times to retry an SSH connection to a host, in case of UNREACHABLE.
+# For each retry attempt, there is an exponential backoff,
+# so after the first attempt there is 1s wait, then 2s, 4s etc. up to 30s (max).
+#retries = 3
+
+
+[persistent_connection]
+# Configures the persistent connection timeout value in seconds. This value is
+# how long the persistent connection will remain idle before it is destroyed.
+# If the connection doesn't receive a request before the timeout value
+# expires, the connection is shutdown. The default value is 30 seconds.
+#connect_timeout = 30
+
+# The command timeout value defines the amount of time to wait for a command
+# or RPC call before timing out. The value for the command timeout must
+# be less than the value of the persistent connection idle timeout (connect_timeout)
+# The default value is 30 second.
+#command_timeout = 30
+
+
+## Become Plugins ##
+
+# Settings for become plugins go under a section named '[[plugin_name]_become_plugin]'
+# To view available become plugins, run ansible-doc -t become -l
+# To view available options for a specific plugin, run ansible-doc -t become [plugin_name]
+# https://docs.ansible.com/ansible/latest/plugins/become.html
+
+[sudo_become_plugin]
+#flags = -H -S -n
+#user = root
+
+
+[selinux]
+# file systems that require special treatment when dealing with security context
+# the default behaviour that copies the existing context or uses the user default
+# needs to be changed to use the file system dependent context.
+#special_context_filesystems=fuse,nfs,vboxsf,ramfs,9p,vfat
+
+# Set this to True to allow libvirt_lxc connections to work without SELinux.
+#libvirt_lxc_noseclabel = False
+
+
+[colors]
+#highlight = white
+#verbose = blue
+#warn = bright purple
+#error = red
+#debug = dark gray
+#deprecate = purple
+#skip = cyan
+#unreachable = red
+#ok = green
+#changed = yellow
+#diff_add = green
+#diff_remove = red
+#diff_lines = cyan
+
+
+[diff]
+# Always print diff when running ( same as always running with -D/--diff )
+#always = False
+
+# Set how many context lines to show in diff
+#context = 3
+
+[galaxy]
+# Controls whether the display wheel is shown or not
+#display_progress=
+
+# Validate TLS certificates for Galaxy server
+#ignore_certs = False
+
+# Role or collection skeleton directory to use as a template for
+# the init action in ansible-galaxy command
+#role_skeleton=
+
+# Patterns of files to ignore inside a Galaxy role or collection
+# skeleton directory
+#role_skeleton_ignore="^.git$", "^.*/.git_keep$"
+
+# Galaxy Server URL
+#server=https://galaxy.ansible.com
+
+# A list of Galaxy servers to use when installing a collection.
+#server_list=automation_hub, release_galaxy
+
+# Server specific details which are mentioned in server_list
+#[galaxy_server.automation_hub]
+#url=https://cloud.redhat.com/api/automation-hub/
+#auth_url=https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token
+#token=my_ah_token
+#
+#[galaxy_server.release_galaxy]
+#url=https://galaxy.ansible.com/
+#token=my_token
diff --git a/examples/hosts b/examples/hosts
new file mode 100644
index 00000000..e84a30cd
--- /dev/null
+++ b/examples/hosts
@@ -0,0 +1,44 @@
+# This is the default ansible 'hosts' file.
+#
+# It should live in /etc/ansible/hosts
+#
+# - Comments begin with the '#' character
+# - Blank lines are ignored
+# - Groups of hosts are delimited by [header] elements
+# - You can enter hostnames or ip addresses
+# - A hostname/ip can be a member of multiple groups
+
+# Ex 1: Ungrouped hosts, specify before any group headers:
+
+## green.example.com
+## blue.example.com
+## 192.168.100.1
+## 192.168.100.10
+
+# Ex 2: A collection of hosts belonging to the 'webservers' group:
+
+## [webservers]
+## alpha.example.org
+## beta.example.org
+## 192.168.1.100
+## 192.168.1.110
+
+# If you have multiple hosts following a pattern, you can specify
+# them like this:
+
+## www[001:006].example.com
+
+# Ex 3: A collection of database servers in the 'dbservers' group:
+
+## [dbservers]
+##
+## db01.intranet.mydomain.net
+## db02.intranet.mydomain.net
+## 10.25.1.56
+## 10.25.1.57
+
+# Here's another example of host ranges, this time there are no
+# leading 0s:
+
+## db-[99:101]-node.example.com
+
diff --git a/examples/scripts/ConfigureRemotingForAnsible.ps1 b/examples/scripts/ConfigureRemotingForAnsible.ps1
new file mode 100644
index 00000000..7e039bb4
--- /dev/null
+++ b/examples/scripts/ConfigureRemotingForAnsible.ps1
@@ -0,0 +1,453 @@
+#Requires -Version 3.0
+
+# Configure a Windows host for remote management with Ansible
+# -----------------------------------------------------------
+#
+# This script checks the current WinRM (PS Remoting) configuration and makes
+# the necessary changes to allow Ansible to connect, authenticate and
+# execute PowerShell commands.
+#
+# All events are logged to the Windows EventLog, useful for unattended runs.
+#
+# Use option -Verbose in order to see the verbose output messages.
+#
+# Use option -CertValidityDays to specify how long this certificate is valid
+# starting from today. So you would specify -CertValidityDays 3650 to get
+# a 10-year valid certificate.
+#
+# Use option -ForceNewSSLCert if the system has been SysPreped and a new
+# SSL Certificate must be forced on the WinRM Listener when re-running this
+# script. This is necessary when a new SID and CN name is created.
+#
+# Use option -EnableCredSSP to enable CredSSP as an authentication option.
+#
+# Use option -DisableBasicAuth to disable basic authentication.
+#
+# Use option -SkipNetworkProfileCheck to skip the network profile check.
+# Without specifying this the script will only run if the device's interfaces
+# are in DOMAIN or PRIVATE zones. Provide this switch if you want to enable
+# WinRM on a device with an interface in PUBLIC zone.
+#
+# Use option -SubjectName to specify the CN name of the certificate. This
+# defaults to the system's hostname and generally should not be specified.
+
+# Written by Trond Hindenes <trond@hindenes.com>
+# Updated by Chris Church <cchurch@ansible.com>
+# Updated by Michael Crilly <mike@autologic.cm>
+# Updated by Anton Ouzounov <Anton.Ouzounov@careerbuilder.com>
+# Updated by Nicolas Simond <contact@nicolas-simond.com>
+# Updated by Dag Wieërs <dag@wieers.com>
+# Updated by Jordan Borean <jborean93@gmail.com>
+# Updated by Erwan Quélin <erwan.quelin@gmail.com>
+# Updated by David Norman <david@dkn.email>
+#
+# Version 1.0 - 2014-07-06
+# Version 1.1 - 2014-11-11
+# Version 1.2 - 2015-05-15
+# Version 1.3 - 2016-04-04
+# Version 1.4 - 2017-01-05
+# Version 1.5 - 2017-02-09
+# Version 1.6 - 2017-04-18
+# Version 1.7 - 2017-11-23
+# Version 1.8 - 2018-02-23
+# Version 1.9 - 2018-09-21
+
+# Support -Verbose option
+[CmdletBinding()]
+
+Param (
+ [string]$SubjectName = $env:COMPUTERNAME,
+ [int]$CertValidityDays = 1095,
+ [switch]$SkipNetworkProfileCheck,
+ $CreateSelfSignedCert = $true,
+ [switch]$ForceNewSSLCert,
+ [switch]$GlobalHttpFirewallAccess,
+ [switch]$DisableBasicAuth = $false,
+ [switch]$EnableCredSSP
+)
+
+Function Write-Log
+{
+ $Message = $args[0]
+ Write-EventLog -LogName Application -Source $EventSource -EntryType Information -EventId 1 -Message $Message
+}
+
+Function Write-VerboseLog
+{
+ $Message = $args[0]
+ Write-Verbose $Message
+ Write-Log $Message
+}
+
+Function Write-HostLog
+{
+ $Message = $args[0]
+ Write-Output $Message
+ Write-Log $Message
+}
+
+Function New-LegacySelfSignedCert
+{
+ Param (
+ [string]$SubjectName,
+ [int]$ValidDays = 1095
+ )
+
+ $hostnonFQDN = $env:computerName
+ $hostFQDN = [System.Net.Dns]::GetHostByName(($env:computerName)).Hostname
+ $SignatureAlgorithm = "SHA256"
+
+ $name = New-Object -COM "X509Enrollment.CX500DistinguishedName.1"
+ $name.Encode("CN=$SubjectName", 0)
+
+ $key = New-Object -COM "X509Enrollment.CX509PrivateKey.1"
+ $key.ProviderName = "Microsoft Enhanced RSA and AES Cryptographic Provider"
+ $key.KeySpec = 1
+ $key.Length = 4096
+ $key.SecurityDescriptor = "D:PAI(A;;0xd01f01ff;;;SY)(A;;0xd01f01ff;;;BA)(A;;0x80120089;;;NS)"
+ $key.MachineContext = 1
+ $key.Create()
+
+ $serverauthoid = New-Object -COM "X509Enrollment.CObjectId.1"
+ $serverauthoid.InitializeFromValue("1.3.6.1.5.5.7.3.1")
+ $ekuoids = New-Object -COM "X509Enrollment.CObjectIds.1"
+ $ekuoids.Add($serverauthoid)
+ $ekuext = New-Object -COM "X509Enrollment.CX509ExtensionEnhancedKeyUsage.1"
+ $ekuext.InitializeEncode($ekuoids)
+
+ $cert = New-Object -COM "X509Enrollment.CX509CertificateRequestCertificate.1"
+ $cert.InitializeFromPrivateKey(2, $key, "")
+ $cert.Subject = $name
+ $cert.Issuer = $cert.Subject
+ $cert.NotBefore = (Get-Date).AddDays(-1)
+ $cert.NotAfter = $cert.NotBefore.AddDays($ValidDays)
+
+ $SigOID = New-Object -ComObject X509Enrollment.CObjectId
+ $SigOID.InitializeFromValue(([Security.Cryptography.Oid]$SignatureAlgorithm).Value)
+
+ [string[]] $AlternativeName += $hostnonFQDN
+ $AlternativeName += $hostFQDN
+ $IAlternativeNames = New-Object -ComObject X509Enrollment.CAlternativeNames
+
+ foreach ($AN in $AlternativeName)
+ {
+ $AltName = New-Object -ComObject X509Enrollment.CAlternativeName
+ $AltName.InitializeFromString(0x3,$AN)
+ $IAlternativeNames.Add($AltName)
+ }
+
+ $SubjectAlternativeName = New-Object -ComObject X509Enrollment.CX509ExtensionAlternativeNames
+ $SubjectAlternativeName.InitializeEncode($IAlternativeNames)
+
+ [String[]]$KeyUsage = ("DigitalSignature", "KeyEncipherment")
+ $KeyUsageObj = New-Object -ComObject X509Enrollment.CX509ExtensionKeyUsage
+ $KeyUsageObj.InitializeEncode([int][Security.Cryptography.X509Certificates.X509KeyUsageFlags]($KeyUsage))
+ $KeyUsageObj.Critical = $true
+
+ $cert.X509Extensions.Add($KeyUsageObj)
+ $cert.X509Extensions.Add($ekuext)
+ $cert.SignatureInformation.HashAlgorithm = $SigOID
+ $CERT.X509Extensions.Add($SubjectAlternativeName)
+ $cert.Encode()
+
+ $enrollment = New-Object -COM "X509Enrollment.CX509Enrollment.1"
+ $enrollment.InitializeFromRequest($cert)
+ $certdata = $enrollment.CreateRequest(0)
+ $enrollment.InstallResponse(2, $certdata, 0, "")
+
+ # extract/return the thumbprint from the generated cert
+ $parsed_cert = New-Object System.Security.Cryptography.X509Certificates.X509Certificate2
+ $parsed_cert.Import([System.Text.Encoding]::UTF8.GetBytes($certdata))
+
+ return $parsed_cert.Thumbprint
+}
+
+Function Enable-GlobalHttpFirewallAccess
+{
+ Write-Verbose "Forcing global HTTP firewall access"
+ # this is a fairly naive implementation; could be more sophisticated about rule matching/collapsing
+ $fw = New-Object -ComObject HNetCfg.FWPolicy2
+
+ # try to find/enable the default rule first
+ $add_rule = $false
+ $matching_rules = $fw.Rules | Where-Object { $_.Name -eq "Windows Remote Management (HTTP-In)" }
+ $rule = $null
+ If ($matching_rules) {
+ If ($matching_rules -isnot [Array]) {
+ Write-Verbose "Editing existing single HTTP firewall rule"
+ $rule = $matching_rules
+ }
+ Else {
+ # try to find one with the All or Public profile first
+ Write-Verbose "Found multiple existing HTTP firewall rules..."
+ $rule = $matching_rules | ForEach-Object { $_.Profiles -band 4 }[0]
+
+ If (-not $rule -or $rule -is [Array]) {
+ Write-Verbose "Editing an arbitrary single HTTP firewall rule (multiple existed)"
+ # oh well, just pick the first one
+ $rule = $matching_rules[0]
+ }
+ }
+ }
+
+ If (-not $rule) {
+ Write-Verbose "Creating a new HTTP firewall rule"
+ $rule = New-Object -ComObject HNetCfg.FWRule
+ $rule.Name = "Windows Remote Management (HTTP-In)"
+ $rule.Description = "Inbound rule for Windows Remote Management via WS-Management. [TCP 5985]"
+ $add_rule = $true
+ }
+
+ $rule.Profiles = 0x7FFFFFFF
+ $rule.Protocol = 6
+ $rule.LocalPorts = 5985
+ $rule.RemotePorts = "*"
+ $rule.LocalAddresses = "*"
+ $rule.RemoteAddresses = "*"
+ $rule.Enabled = $true
+ $rule.Direction = 1
+ $rule.Action = 1
+ $rule.Grouping = "Windows Remote Management"
+
+ If ($add_rule) {
+ $fw.Rules.Add($rule)
+ }
+
+ Write-Verbose "HTTP firewall rule $($rule.Name) updated"
+}
+
+# Setup error handling.
+Trap
+{
+ $_
+ Exit 1
+}
+$ErrorActionPreference = "Stop"
+
+# Get the ID and security principal of the current user account
+$myWindowsID=[System.Security.Principal.WindowsIdentity]::GetCurrent()
+$myWindowsPrincipal=new-object System.Security.Principal.WindowsPrincipal($myWindowsID)
+
+# Get the security principal for the Administrator role
+$adminRole=[System.Security.Principal.WindowsBuiltInRole]::Administrator
+
+# Check to see if we are currently running "as Administrator"
+if (-Not $myWindowsPrincipal.IsInRole($adminRole))
+{
+ Write-Output "ERROR: You need elevated Administrator privileges in order to run this script."
+ Write-Output " Start Windows PowerShell by using the Run as Administrator option."
+ Exit 2
+}
+
+$EventSource = $MyInvocation.MyCommand.Name
+If (-Not $EventSource)
+{
+ $EventSource = "Powershell CLI"
+}
+
+If ([System.Diagnostics.EventLog]::Exists('Application') -eq $False -or [System.Diagnostics.EventLog]::SourceExists($EventSource) -eq $False)
+{
+ New-EventLog -LogName Application -Source $EventSource
+}
+
+# Detect PowerShell version.
+If ($PSVersionTable.PSVersion.Major -lt 3)
+{
+ Write-Log "PowerShell version 3 or higher is required."
+ Throw "PowerShell version 3 or higher is required."
+}
+
+# Find and start the WinRM service.
+Write-Verbose "Verifying WinRM service."
+If (!(Get-Service "WinRM"))
+{
+ Write-Log "Unable to find the WinRM service."
+ Throw "Unable to find the WinRM service."
+}
+ElseIf ((Get-Service "WinRM").Status -ne "Running")
+{
+ Write-Verbose "Setting WinRM service to start automatically on boot."
+ Set-Service -Name "WinRM" -StartupType Automatic
+ Write-Log "Set WinRM service to start automatically on boot."
+ Write-Verbose "Starting WinRM service."
+ Start-Service -Name "WinRM" -ErrorAction Stop
+ Write-Log "Started WinRM service."
+
+}
+
+# WinRM should be running; check that we have a PS session config.
+If (!(Get-PSSessionConfiguration -Verbose:$false) -or (!(Get-ChildItem WSMan:\localhost\Listener)))
+{
+ If ($SkipNetworkProfileCheck) {
+ Write-Verbose "Enabling PS Remoting without checking Network profile."
+ Enable-PSRemoting -SkipNetworkProfileCheck -Force -ErrorAction Stop
+ Write-Log "Enabled PS Remoting without checking Network profile."
+ }
+ Else {
+ Write-Verbose "Enabling PS Remoting."
+ Enable-PSRemoting -Force -ErrorAction Stop
+ Write-Log "Enabled PS Remoting."
+ }
+}
+Else
+{
+ Write-Verbose "PS Remoting is already enabled."
+}
+
+# Ensure LocalAccountTokenFilterPolicy is set to 1
+# https://github.com/ansible/ansible/issues/42978
+$token_path = "HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System"
+$token_prop_name = "LocalAccountTokenFilterPolicy"
+$token_key = Get-Item -Path $token_path
+$token_value = $token_key.GetValue($token_prop_name, $null)
+if ($token_value -ne 1) {
+ Write-Verbose "Setting LocalAccountTOkenFilterPolicy to 1"
+ if ($null -ne $token_value) {
+ Remove-ItemProperty -Path $token_path -Name $token_prop_name
+ }
+ New-ItemProperty -Path $token_path -Name $token_prop_name -Value 1 -PropertyType DWORD > $null
+}
+
+# Make sure there is a SSL listener.
+$listeners = Get-ChildItem WSMan:\localhost\Listener
+If (!($listeners | Where-Object {$_.Keys -like "TRANSPORT=HTTPS"}))
+{
+ # We cannot use New-SelfSignedCertificate on 2012R2 and earlier
+ $thumbprint = New-LegacySelfSignedCert -SubjectName $SubjectName -ValidDays $CertValidityDays
+ Write-HostLog "Self-signed SSL certificate generated; thumbprint: $thumbprint"
+
+ # Create the hashtables of settings to be used.
+ $valueset = @{
+ Hostname = $SubjectName
+ CertificateThumbprint = $thumbprint
+ }
+
+ $selectorset = @{
+ Transport = "HTTPS"
+ Address = "*"
+ }
+
+ Write-Verbose "Enabling SSL listener."
+ New-WSManInstance -ResourceURI 'winrm/config/Listener' -SelectorSet $selectorset -ValueSet $valueset
+ Write-Log "Enabled SSL listener."
+}
+Else
+{
+ Write-Verbose "SSL listener is already active."
+
+ # Force a new SSL cert on Listener if the $ForceNewSSLCert
+ If ($ForceNewSSLCert)
+ {
+
+ # We cannot use New-SelfSignedCertificate on 2012R2 and earlier
+ $thumbprint = New-LegacySelfSignedCert -SubjectName $SubjectName -ValidDays $CertValidityDays
+ Write-HostLog "Self-signed SSL certificate generated; thumbprint: $thumbprint"
+
+ $valueset = @{
+ CertificateThumbprint = $thumbprint
+ Hostname = $SubjectName
+ }
+
+ # Delete the listener for SSL
+ $selectorset = @{
+ Address = "*"
+ Transport = "HTTPS"
+ }
+ Remove-WSManInstance -ResourceURI 'winrm/config/Listener' -SelectorSet $selectorset
+
+ # Add new Listener with new SSL cert
+ New-WSManInstance -ResourceURI 'winrm/config/Listener' -SelectorSet $selectorset -ValueSet $valueset
+ }
+}
+
+# Check for basic authentication.
+$basicAuthSetting = Get-ChildItem WSMan:\localhost\Service\Auth | Where-Object {$_.Name -eq "Basic"}
+
+If ($DisableBasicAuth)
+{
+ If (($basicAuthSetting.Value) -eq $true)
+ {
+ Write-Verbose "Disabling basic auth support."
+ Set-Item -Path "WSMan:\localhost\Service\Auth\Basic" -Value $false
+ Write-Log "Disabled basic auth support."
+ }
+ Else
+ {
+ Write-Verbose "Basic auth is already disabled."
+ }
+}
+Else
+{
+ If (($basicAuthSetting.Value) -eq $false)
+ {
+ Write-Verbose "Enabling basic auth support."
+ Set-Item -Path "WSMan:\localhost\Service\Auth\Basic" -Value $true
+ Write-Log "Enabled basic auth support."
+ }
+ Else
+ {
+ Write-Verbose "Basic auth is already enabled."
+ }
+}
+
+# If EnableCredSSP if set to true
+If ($EnableCredSSP)
+{
+ # Check for CredSSP authentication
+ $credsspAuthSetting = Get-ChildItem WSMan:\localhost\Service\Auth | Where-Object {$_.Name -eq "CredSSP"}
+ If (($credsspAuthSetting.Value) -eq $false)
+ {
+ Write-Verbose "Enabling CredSSP auth support."
+ Enable-WSManCredSSP -role server -Force
+ Write-Log "Enabled CredSSP auth support."
+ }
+}
+
+If ($GlobalHttpFirewallAccess) {
+ Enable-GlobalHttpFirewallAccess
+}
+
+# Configure firewall to allow WinRM HTTPS connections.
+$fwtest1 = netsh advfirewall firewall show rule name="Allow WinRM HTTPS"
+$fwtest2 = netsh advfirewall firewall show rule name="Allow WinRM HTTPS" profile=any
+If ($fwtest1.count -lt 5)
+{
+ Write-Verbose "Adding firewall rule to allow WinRM HTTPS."
+ netsh advfirewall firewall add rule profile=any name="Allow WinRM HTTPS" dir=in localport=5986 protocol=TCP action=allow
+ Write-Log "Added firewall rule to allow WinRM HTTPS."
+}
+ElseIf (($fwtest1.count -ge 5) -and ($fwtest2.count -lt 5))
+{
+ Write-Verbose "Updating firewall rule to allow WinRM HTTPS for any profile."
+ netsh advfirewall firewall set rule name="Allow WinRM HTTPS" new profile=any
+ Write-Log "Updated firewall rule to allow WinRM HTTPS for any profile."
+}
+Else
+{
+ Write-Verbose "Firewall rule already exists to allow WinRM HTTPS."
+}
+
+# Test a remoting connection to localhost, which should work.
+$httpResult = Invoke-Command -ComputerName "localhost" -ScriptBlock {$env:COMPUTERNAME} -ErrorVariable httpError -ErrorAction SilentlyContinue
+$httpsOptions = New-PSSessionOption -SkipCACheck -SkipCNCheck -SkipRevocationCheck
+
+$httpsResult = New-PSSession -UseSSL -ComputerName "localhost" -SessionOption $httpsOptions -ErrorVariable httpsError -ErrorAction SilentlyContinue
+
+If ($httpResult -and $httpsResult)
+{
+ Write-Verbose "HTTP: Enabled | HTTPS: Enabled"
+}
+ElseIf ($httpsResult -and !$httpResult)
+{
+ Write-Verbose "HTTP: Disabled | HTTPS: Enabled"
+}
+ElseIf ($httpResult -and !$httpsResult)
+{
+ Write-Verbose "HTTP: Enabled | HTTPS: Disabled"
+}
+Else
+{
+ Write-Log "Unable to establish an HTTP or HTTPS remoting session."
+ Throw "Unable to establish an HTTP or HTTPS remoting session."
+}
+Write-VerboseLog "PS Remoting has been successfully configured for Ansible."
diff --git a/examples/scripts/upgrade_to_ps3.ps1 b/examples/scripts/upgrade_to_ps3.ps1
new file mode 100644
index 00000000..359b835d
--- /dev/null
+++ b/examples/scripts/upgrade_to_ps3.ps1
@@ -0,0 +1,93 @@
+
+# Powershell script to upgrade a PowerShell 2.0 system to PowerShell 3.0
+# based on http://occasionalutility.blogspot.com/2013/11/everyday-powershell-part-7-powershell.html
+#
+# some Ansible modules that may use Powershell 3 features, so systems may need
+# to be upgraded. This may be used by a sample playbook. Refer to the windows
+# documentation on docs.ansible.com for details.
+#
+# - hosts: windows
+# tasks:
+# - script: upgrade_to_ps3.ps1
+
+# Get version of OS
+
+# 6.0 is 2008
+# 6.1 is 2008 R2
+# 6.2 is 2012
+# 6.3 is 2012 R2
+
+
+if ($PSVersionTable.psversion.Major -ge 3)
+{
+ Write-Output "Powershell 3 Installed already; You don't need this"
+ Exit
+}
+
+$powershellpath = "C:\powershell"
+
+function download-file
+{
+ param ([string]$path, [string]$local)
+ $client = new-object system.net.WebClient
+ $client.Headers.Add("user-agent", "PowerShell")
+ $client.downloadfile($path, $local)
+}
+
+if (!(test-path $powershellpath))
+{
+ New-Item -ItemType directory -Path $powershellpath
+}
+
+
+# .NET Framework 4.0 is necessary.
+
+#if (($PSVersionTable.CLRVersion.Major) -lt 2)
+#{
+# $DownloadUrl = "http://download.microsoft.com/download/B/A/4/BA4A7E71-2906-4B2D-A0E1-80CF16844F5F/dotNetFx45_Full_x86_x64.exe"
+# $FileName = $DownLoadUrl.Split('/')[-1]
+# download-file $downloadurl "$powershellpath\$filename"
+# ."$powershellpath\$filename" /quiet /norestart
+#}
+
+#You may need to reboot after the .NET install if so just run the script again.
+
+# If the Operating System is above 6.2, then you already have PowerShell Version > 3
+if ([Environment]::OSVersion.Version.Major -gt 6)
+{
+ Write-Output "OS is new; upgrade not needed."
+ Exit
+}
+
+
+$osminor = [environment]::OSVersion.Version.Minor
+
+$architecture = $ENV:PROCESSOR_ARCHITECTURE
+
+if ($architecture -eq "AMD64")
+{
+ $architecture = "x64"
+}
+else
+{
+ $architecture = "x86"
+}
+
+if ($osminor -eq 1)
+{
+ $DownloadUrl = "http://download.microsoft.com/download/E/7/6/E76850B8-DA6E-4FF5-8CCE-A24FC513FD16/Windows6.1-KB2506143-" + $architecture + ".msu"
+}
+elseif ($osminor -eq 0)
+{
+ $DownloadUrl = "http://download.microsoft.com/download/E/7/6/E76850B8-DA6E-4FF5-8CCE-A24FC513FD16/Windows6.0-KB2506146-" + $architecture + ".msu"
+}
+else
+{
+ # Nothing to do; In theory this point will never be reached.
+ Exit
+}
+
+$FileName = $DownLoadUrl.Split('/')[-1]
+download-file $downloadurl "$powershellpath\$filename"
+
+Start-Process -FilePath "$powershellpath\$filename" -ArgumentList /quiet
diff --git a/hacking/build-ansible.py b/hacking/build-ansible.py
new file mode 100755
index 00000000..8ebb88d3
--- /dev/null
+++ b/hacking/build-ansible.py
@@ -0,0 +1,103 @@
+#!/usr/bin/env python3
+# coding: utf-8
+# PYTHON_ARGCOMPLETE_OK
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import argparse
+import os.path
+import sys
+
+from straight.plugin import load
+
+try:
+ import argcomplete
+except ImportError:
+ argcomplete = None
+
+
+def build_lib_path(this_script=__file__):
+ """Return path to the common build library directory."""
+ hacking_dir = os.path.dirname(this_script)
+ libdir = os.path.abspath(os.path.join(hacking_dir, 'build_library'))
+
+ return libdir
+
+
+def ansible_lib_path(this_script=__file__):
+ """Return path to the common build library directory."""
+ hacking_dir = os.path.dirname(this_script)
+ libdir = os.path.abspath(os.path.join(hacking_dir, '..', 'lib'))
+
+ return libdir
+
+
+sys.path.insert(0, ansible_lib_path())
+sys.path.insert(0, build_lib_path())
+
+
+from build_ansible import commands, errors
+
+
+def create_arg_parser(program_name):
+ """
+ Creates a command line argument parser
+
+ :arg program_name: The name of the script. Used in help texts
+ """
+ parser = argparse.ArgumentParser(prog=program_name,
+ description="Implements utilities to build Ansible")
+ return parser
+
+
+def main():
+ """
+ Start our run.
+
+ "It all starts here"
+ """
+ subcommands = load('build_ansible.command_plugins', subclasses=commands.Command)
+
+ arg_parser = create_arg_parser(os.path.basename(sys.argv[0]))
+ arg_parser.add_argument('--debug', dest='debug', required=False, default=False,
+ action='store_true',
+ help='Show tracebacks and other debugging information')
+ subparsers = arg_parser.add_subparsers(title='Subcommands', dest='command',
+ help='for help use build-ansible.py SUBCOMMANDS -h')
+ subcommands.pipe('init_parser', subparsers.add_parser)
+
+ if argcomplete:
+ argcomplete.autocomplete(arg_parser)
+
+ args = arg_parser.parse_args(sys.argv[1:])
+ if args.command is None:
+ print('Please specify a subcommand to run')
+ sys.exit(1)
+
+ for subcommand in subcommands:
+ if subcommand.name == args.command:
+ command = subcommand
+ break
+ else:
+ # Note: We should never trigger this because argparse should shield us from it
+ print('Error: {0} was not a recognized subcommand'.format(args.command))
+ sys.exit(1)
+
+ try:
+ retval = command.main(args)
+ except (errors.DependencyError, errors.MissingUserInput, errors.InvalidUserInput) as e:
+ print(e)
+ if args.debug:
+ raise
+ sys.exit(2)
+
+ sys.exit(retval)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/hacking/build_library/__init__.py b/hacking/build_library/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/hacking/build_library/__init__.py
diff --git a/hacking/build_library/build_ansible/__init__.py b/hacking/build_library/build_ansible/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/hacking/build_library/build_ansible/__init__.py
diff --git a/hacking/build_library/build_ansible/announce.py b/hacking/build_library/build_ansible/announce.py
new file mode 100644
index 00000000..c245bfb9
--- /dev/null
+++ b/hacking/build_library/build_ansible/announce.py
@@ -0,0 +1,293 @@
+# coding: utf-8
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import asyncio
+import datetime
+import hashlib
+
+import aiohttp
+from jinja2 import Environment, DictLoader
+
+
+VERSION_FRAGMENT = """
+{%- if versions | length > 1 %}
+ {% for version in versions %}
+ {% if loop.last %}and {{ pretty_version(version) }}{% else %}
+ {% if versions | length == 2 %}{{ pretty_version(version) }} {% else %}{{ pretty_version(version) }}, {% endif -%}
+ {% endif -%}
+ {% endfor -%}
+{%- else %}{{ pretty_version(versions[0]) }}{% endif -%}
+"""
+
+LONG_TEMPLATE = """
+{% set plural = False if versions | length == 1 else True %}
+{% set latest_ver = (versions | sort(attribute='ver_obj'))[-1] %}
+
+To: ansible-releases@redhat.com, ansible-devel@googlegroups.com, ansible-project@googlegroups.com, ansible-announce@googlegroups.com
+Subject: New release{% if plural %}s{% endif %}: {{ version_str }}
+
+{% filter wordwrap %}
+Hi all- we're happy to announce that the general release of {{ version_str }}{% if plural %} are{%- else %} is{%- endif %} now available!
+{% endfilter %}
+
+
+
+How to get it
+-------------
+
+{% for version in versions %}
+$ pip install ansible{% if is_ansible_base(version) %}-base{% endif %}=={{ version }} --user
+{% if not loop.last %}
+or
+{% endif %}
+{% endfor %}
+
+The tar.gz of the release{% if plural %}s{% endif %} can be found here:
+
+{% for version in versions %}
+* {{ pretty_version(version) }}
+{% if is_ansible_base(version) %}
+ https://pypi.python.org/packages/source/a/ansible-base/ansible-base-{{ version }}.tar.gz
+{% else %}
+ https://pypi.python.org/packages/source/a/ansible/ansible-{{ version }}.tar.gz
+{% endif %}
+ SHA256: {{ hashes[version] }}
+{% endfor %}
+
+
+What's new in {{ version_str }}
+{{ '-' * (14 + version_str | length) }}
+
+{% filter wordwrap %}
+{% if plural %}These releases are{% else %}This release is a{% endif %} maintenance release{% if plural %}s{% endif %} containing numerous bugfixes. The full {% if plural %} changelogs are{% else %} changelog is{% endif %} at:
+{% endfilter %}
+
+
+{% for version in versions %}
+* {{ version }}
+ https://github.com/ansible/ansible/blob/stable-{{ version.split('.')[:2] | join('.') }}/changelogs/CHANGELOG-v{{ version.split('.')[:2] | join('.') }}.rst
+{% endfor %}
+
+
+What's the schedule for future maintenance releases?
+----------------------------------------------------
+
+{% filter wordwrap %}
+Future maintenance releases will occur approximately every 3 weeks. So expect the next one around {{ next_release.strftime('%Y-%m-%d') }}.
+{% endfilter %}
+
+
+
+Porting Help
+------------
+
+{% filter wordwrap %}
+We've published a porting guide at
+https://docs.ansible.com/ansible/devel/porting_guides/porting_guide_{{ latest_ver.split('.')[:2] | join('.') }}.html to help migrate your content to {{ latest_ver.split('.')[:2] | join('.') }}.
+{% endfilter %}
+
+
+
+{% filter wordwrap %}
+If you discover any errors or if any of your working playbooks break when you upgrade to {{ latest_ver }}, please use the following link to report the regression:
+{% endfilter %}
+
+
+ https://github.com/ansible/ansible/issues/new/choose
+
+{% filter wordwrap %}
+In your issue, be sure to mention the version that works and the one that doesn't.
+{% endfilter %}
+
+
+Thanks!
+
+-{{ name }}
+
+""" # noqa for E501 (line length).
+# jinja2 is horrid about getting rid of extra newlines so we have to have a single per paragraph for
+# proper wrapping to occur
+
+SHORT_TEMPLATE = """
+{% set plural = False if versions | length == 1 else True %}
+{% set version = (versions|sort(attribute='ver_obj'))[-1] %}
+@ansible
+{{ version_str }}
+{% if plural %}
+ have
+{% else %}
+ has
+{% endif %}
+been released! Get
+{% if plural %}
+them
+{% else %}
+it
+{% endif %}
+on PyPI: pip install ansible{% if is_ansible_base(version) %}-base{% endif %}=={{ version }},
+the Ansible PPA on Launchpad, or GitHub. Happy automating!
+""" # noqa for E501 (line length).
+# jinja2 is horrid about getting rid of extra newlines so we have to have a single per paragraph for
+# proper wrapping to occur
+
+JINJA_ENV = Environment(
+ loader=DictLoader({'long': LONG_TEMPLATE,
+ 'short': SHORT_TEMPLATE,
+ 'version_string': VERSION_FRAGMENT,
+ }),
+ extensions=['jinja2.ext.i18n'],
+ trim_blocks=True,
+ lstrip_blocks=True,
+)
+
+
+async def calculate_hash_from_tarball(session, version):
+ tar_url = f'https://pypi.python.org/packages/source/a/ansible-base/ansible-base-{version}.tar.gz'
+ tar_task = asyncio.create_task(session.get(tar_url))
+ tar_response = await tar_task
+
+ tar_hash = hashlib.sha256()
+ while True:
+ chunk = await tar_response.content.read(1024)
+ if not chunk:
+ break
+ tar_hash.update(chunk)
+
+ return tar_hash.hexdigest()
+
+
+async def parse_hash_from_file(session, version):
+ filename = f'ansible-base-{version}.tar.gz'
+ hash_url = f'https://releases.ansible.com/ansible-base/{filename}.sha'
+ hash_task = asyncio.create_task(session.get(hash_url))
+ hash_response = await hash_task
+
+ hash_content = await hash_response.read()
+ precreated_hash, precreated_filename = hash_content.split(None, 1)
+ if filename != precreated_filename.strip().decode('utf-8'):
+ raise ValueError(f'Hash file contains hash for a different file: {precreated_filename}')
+
+ return precreated_hash.decode('utf-8')
+
+
+async def get_hash(session, version):
+ calculated_hash = await calculate_hash_from_tarball(session, version)
+ precreated_hash = await parse_hash_from_file(session, version)
+
+ if calculated_hash != precreated_hash:
+ raise ValueError(f'Hash in file ansible-base-{version}.tar.gz.sha {precreated_hash} does not'
+ f' match hash of tarball from pypi {calculated_hash}')
+
+ return calculated_hash
+
+
+async def get_hashes(versions):
+ hashes = {}
+ requestors = {}
+ async with aiohttp.ClientSession() as aio_session:
+ for version in versions:
+ requestors[version] = asyncio.create_task(get_hash(aio_session, version))
+
+ for version, request in requestors.items():
+ await request
+ hashes[version] = request.result()
+
+ return hashes
+
+
+def next_release_date(weeks=3):
+ days_in_the_future = weeks * 7
+ today = datetime.datetime.now()
+ numeric_today = today.weekday()
+
+ # We release on Thursdays
+ if numeric_today == 3:
+ # 3 is Thursday
+ pass
+ elif numeric_today == 4:
+ # If this is Friday, we can adjust back to Thursday for the next release
+ today -= datetime.timedelta(days=1)
+ elif numeric_today < 3:
+ # Otherwise, slide forward to Thursday
+ today += datetime.timedelta(days=(3 - numeric_today))
+ else:
+ # slightly different formula if it's past Thursday this week. We need to go forward to
+ # Thursday of next week
+ today += datetime.timedelta(days=(10 - numeric_today))
+
+ next_release = today + datetime.timedelta(days=days_in_the_future)
+ return next_release
+
+
+def is_ansible_base(version):
+ '''
+ Determines if a version is an ansible-base version or not, by checking
+ if it is >= 2.10.0. Stops comparing when it gets to the first non-numeric
+ component to allow for .dev and .beta suffixes.
+ '''
+ # Ignore .beta/.dev suffixes
+ ver_split = []
+ for component in version.split('.'):
+ if not component.isdigit():
+ if 'rc' in component:
+ ver_split.append(int(component.split('rc')[0]))
+ if 'b' in component:
+ ver_split.append(int(component.split('b')[0]))
+ continue
+ ver_split.append(int(component))
+ return tuple(ver_split) >= (2, 10, 0)
+
+
+# Currently only use with a single element list, but left general for later
+# in case we need to refer to the releases collectively.
+def release_variants(versions):
+ if all(is_ansible_base(v) for v in versions):
+ return 'ansible-base'
+
+ if all(not is_ansible_base(v) for v in versions):
+ return 'Ansible'
+
+ return 'Ansible and ansible-base'
+
+
+def pretty_version(version):
+ return '{0} {1}'.format(
+ release_variants([version]),
+ version,
+ )
+
+
+def create_long_message(versions, name):
+ hashes = asyncio.run(get_hashes(versions))
+
+ version_template = JINJA_ENV.get_template('version_string')
+ version_str = version_template.render(versions=versions,
+ pretty_version=pretty_version).strip()
+
+ next_release = next_release_date()
+
+ template = JINJA_ENV.get_template('long')
+ message = template.render(versions=versions, version_str=version_str,
+ name=name, hashes=hashes, next_release=next_release,
+ is_ansible_base=is_ansible_base,
+ pretty_version=pretty_version)
+ return message
+
+
+def create_short_message(versions):
+ version_template = JINJA_ENV.get_template('version_string')
+ version_str = version_template.render(versions=versions,
+ pretty_version=pretty_version).strip()
+
+ template = JINJA_ENV.get_template('short')
+ message = template.render(versions=versions, version_str=version_str,
+ is_ansible_base=is_ansible_base,
+ pretty_version=pretty_version)
+ message = ' '.join(message.split()) + '\n'
+ return message
diff --git a/hacking/build_library/build_ansible/change_detection.py b/hacking/build_library/build_ansible/change_detection.py
new file mode 100644
index 00000000..22e21d3c
--- /dev/null
+++ b/hacking/build_library/build_ansible/change_detection.py
@@ -0,0 +1,33 @@
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def update_file_if_different(filename, b_data):
+ """
+ Replace file content only if content is different.
+
+ This preserves timestamps in case the file content has not changed. It performs multiple
+ operations on the file so it is not atomic and may be slower than simply writing to the file.
+
+ :arg filename: The filename to write to
+ :b_data: Byte string containing the data to write to the file
+ """
+ try:
+ with open(filename, 'rb') as f:
+ b_data_old = f.read()
+ except IOError as e:
+ if e.errno != 2:
+ raise
+ # File did not exist, set b_data_old to a sentinel value so that
+ # b_data gets written to the filename
+ b_data_old = None
+
+ if b_data_old != b_data:
+ with open(filename, 'wb') as f:
+ f.write(b_data)
+ return True
+
+ return False
diff --git a/hacking/build_library/build_ansible/command_plugins/collection_meta.py b/hacking/build_library/build_ansible/command_plugins/collection_meta.py
new file mode 100644
index 00000000..08c20c94
--- /dev/null
+++ b/hacking/build_library/build_ansible/command_plugins/collection_meta.py
@@ -0,0 +1,72 @@
+# coding: utf-8
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import os.path
+import pathlib
+
+import yaml
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_bytes
+from antsibull.jinja2.environment import doc_environment
+
+# Pylint doesn't understand Python3 namespace modules.
+from ..change_detection import update_file_if_different # pylint: disable=relative-beyond-top-level
+from ..commands import Command # pylint: disable=relative-beyond-top-level
+
+
+DEFAULT_TEMPLATE_FILE = 'collections_galaxy_meta.rst.j2'
+DEFAULT_TEMPLATE_DIR = pathlib.Path(__file__).parents[4] / 'docs/templates'
+
+
+def normalize_options(options):
+ """Normalize the options to make for easy templating"""
+ for opt in options:
+ if isinstance(opt['description'], string_types):
+ opt['description'] = [opt['description']]
+
+
+class DocumentCollectionMeta(Command):
+ name = 'collection-meta'
+
+ @classmethod
+ def init_parser(cls, add_parser):
+ parser = add_parser(cls.name, description='Generate collection galaxy.yml documentation from shared metadata')
+ parser.add_argument("-t", "--template-file", action="store", dest="template_file",
+ default=DEFAULT_TEMPLATE_FILE,
+ help="Jinja2 template to use for the config")
+ parser.add_argument("-T", "--template-dir", action="store", dest="template_dir",
+ default=str(DEFAULT_TEMPLATE_DIR),
+ help="directory containing Jinja2 templates")
+ parser.add_argument("-o", "--output-dir", action="store", dest="output_dir", default='/tmp/',
+ help="Output directory for rst files")
+ parser.add_argument("collection_defs", metavar="COLLECTION-OPTION-DEFINITIONS.yml", type=str,
+ help="Source for collection metadata option docs")
+
+ @staticmethod
+ def main(args):
+ output_dir = os.path.abspath(args.output_dir)
+ template_file_full_path = os.path.abspath(os.path.join(args.template_dir, args.template_file))
+ template_file = os.path.basename(template_file_full_path)
+ template_dir = os.path.dirname(template_file_full_path)
+
+ with open(args.collection_defs) as f:
+ options = yaml.safe_load(f)
+
+ normalize_options(options)
+
+ env = doc_environment(template_dir)
+
+ template = env.get_template(template_file)
+ output_name = os.path.join(output_dir, template_file.replace('.j2', ''))
+ temp_vars = {'options': options}
+
+ data = to_bytes(template.render(temp_vars))
+ update_file_if_different(output_name, data)
+
+ return 0
diff --git a/hacking/build_library/build_ansible/command_plugins/docs_build.py b/hacking/build_library/build_ansible/command_plugins/docs_build.py
new file mode 100644
index 00000000..21b09b03
--- /dev/null
+++ b/hacking/build_library/build_ansible/command_plugins/docs_build.py
@@ -0,0 +1,168 @@
+# coding: utf-8
+# Copyright: (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import absolute_import, division, print_function
+
+import glob
+import os
+import os.path
+import pathlib
+import shutil
+from tempfile import TemporaryDirectory
+
+import yaml
+
+from ansible.release import __version__ as ansible_base__version__
+
+# Pylint doesn't understand Python3 namespace modules.
+# pylint: disable=relative-beyond-top-level
+from ..commands import Command
+# pylint: enable=relative-beyond-top-level
+
+
+__metaclass__ = type
+
+
+DEFAULT_TOP_DIR = pathlib.Path(__file__).parents[4]
+DEFAULT_OUTPUT_DIR = pathlib.Path(__file__).parents[4] / 'docs/docsite'
+
+
+#
+# Subcommand base
+#
+
+def generate_base_docs(args):
+ """Regenerate the documentation for all plugins listed in the plugin_to_collection_file."""
+ # imports here so that they don't cause unnecessary deps for all of the plugins
+ from antsibull.cli import antsibull_docs
+
+ with TemporaryDirectory() as tmp_dir:
+ #
+ # Construct a deps file with our version of ansible_base in it
+ #
+ modified_deps_file = os.path.join(tmp_dir, 'ansible.deps')
+
+ # The _ansible_version doesn't matter since we're only building docs for base
+ deps_file_contents = {'_ansible_version': ansible_base__version__,
+ '_ansible_base_version': ansible_base__version__}
+
+ with open(modified_deps_file, 'w') as f:
+ f.write(yaml.dump(deps_file_contents))
+
+ # Generate the plugin rst
+ return antsibull_docs.run(['antsibull-docs', 'stable', '--deps-file', modified_deps_file,
+ '--ansible-base-source', str(args.top_dir),
+ '--dest-dir', args.output_dir])
+
+ # If we make this more than just a driver for antsibull:
+ # Run other rst generation
+ # Run sphinx build
+
+
+#
+# Subcommand full
+#
+
+def generate_full_docs(args):
+ """Regenerate the documentation for all plugins listed in the plugin_to_collection_file."""
+ # imports here so that they don't cause unnecessary deps for all of the plugins
+ import sh
+ from antsibull.cli import antsibull_docs
+ from packaging.version import Version
+
+ ansible_base_ver = Version(ansible_base__version__)
+ ansible_base_major_ver = '{0}.{1}'.format(ansible_base_ver.major, ansible_base_ver.minor)
+
+ with TemporaryDirectory() as tmp_dir:
+ sh.git(['clone', 'https://github.com/ansible-community/ansible-build-data'], _cwd=tmp_dir)
+ # This is wrong. Once ansible and ansible-base major.minor versions get out of sync this
+ # will stop working. We probably need to walk all subdirectories in reverse version order
+ # looking for the latest ansible version which uses something compatible with
+ # ansible_base_major_ver.
+ deps_files = glob.glob(os.path.join(tmp_dir, 'ansible-build-data',
+ ansible_base_major_ver, '*.deps'))
+ if not deps_files:
+ raise Exception('No deps files exist for version {0}'.format(ansible_base_major_ver))
+
+ # Find the latest version of the deps file for this version
+ latest = None
+ latest_ver = Version('0')
+ for filename in deps_files:
+ with open(filename, 'r') as f:
+ deps_data = yaml.safe_load(f.read())
+ new_version = Version(deps_data['_ansible_version'])
+ if new_version > latest_ver:
+ latest_ver = new_version
+ latest = filename
+
+ # Make a copy of the deps file so that we can set the ansible-base version to use
+ modified_deps_file = os.path.join(tmp_dir, 'ansible.deps')
+ shutil.copyfile(latest, modified_deps_file)
+
+ # Put our version of ansible-base into the deps file
+ with open(modified_deps_file, 'r') as f:
+ deps_data = yaml.safe_load(f.read())
+
+ deps_data['_ansible_base_version'] = ansible_base__version__
+
+ with open(modified_deps_file, 'w') as f:
+ f.write(yaml.dump(deps_data))
+
+ # Generate the plugin rst
+ return antsibull_docs.run(['antsibull-docs', 'stable', '--deps-file', modified_deps_file,
+ '--ansible-base-source', str(args.top_dir),
+ '--dest-dir', args.output_dir])
+
+ # If we make this more than just a driver for antsibull:
+ # Run other rst generation
+ # Run sphinx build
+
+
+class CollectionPluginDocs(Command):
+ name = 'docs-build'
+ _ACTION_HELP = """Action to perform.
+ full: Regenerate the rst for the full ansible website.
+ base: Regenerate the rst for plugins in ansible-base and then build the website.
+ named: Regenerate the rst for the named plugins and then build the website.
+ """
+
+ @classmethod
+ def init_parser(cls, add_parser):
+ parser = add_parser(cls.name,
+ description='Generate documentation for plugins in collections.'
+ ' Plugins in collections will have a stub file in the normal plugin'
+ ' documentation location that says the module is in a collection and'
+ ' point to generated plugin documentation under the collections/'
+ ' hierarchy.')
+ parser.add_argument('action', action='store', choices=('full', 'base', 'named'),
+ default='full', help=cls._ACTION_HELP)
+ parser.add_argument("-o", "--output-dir", action="store", dest="output_dir",
+ default=DEFAULT_OUTPUT_DIR,
+ help="Output directory for generated doc files")
+ parser.add_argument("-t", "--top-dir", action="store", dest="top_dir",
+ default=DEFAULT_TOP_DIR,
+ help="Toplevel directory of this ansible-base checkout or expanded"
+ " tarball.")
+ parser.add_argument("-l", "--limit-to-modules", '--limit-to', action="store",
+ dest="limit_to", default=None,
+ help="Limit building module documentation to comma-separated list of"
+ " plugins. Specify non-existing plugin name for no plugins.")
+
+ @staticmethod
+ def main(args):
+ # normalize CLI args
+
+ if not args.output_dir:
+ args.output_dir = os.path.abspath(str(DEFAULT_OUTPUT_DIR))
+
+ if args.action == 'full':
+ return generate_full_docs(args)
+
+ if args.action == 'base':
+ return generate_base_docs(args)
+ # args.action == 'named' (Invalid actions are caught by argparse)
+ raise NotImplementedError('Building docs for specific files is not yet implemented')
+
+ # return 0
diff --git a/hacking/build_library/build_ansible/command_plugins/dump_config.py b/hacking/build_library/build_ansible/command_plugins/dump_config.py
new file mode 100644
index 00000000..7811f465
--- /dev/null
+++ b/hacking/build_library/build_ansible/command_plugins/dump_config.py
@@ -0,0 +1,76 @@
+# coding: utf-8
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import os.path
+import pathlib
+
+import yaml
+from jinja2 import Environment, FileSystemLoader
+from ansible.module_utils._text import to_bytes
+
+# Pylint doesn't understand Python3 namespace modules.
+from ..change_detection import update_file_if_different # pylint: disable=relative-beyond-top-level
+from ..commands import Command # pylint: disable=relative-beyond-top-level
+
+
+DEFAULT_TEMPLATE_FILE = 'config.rst.j2'
+DEFAULT_TEMPLATE_DIR = pathlib.Path(__file__).parents[4] / 'docs/templates'
+
+
+def fix_description(config_options):
+ '''some descriptions are strings, some are lists. workaround it...'''
+
+ for config_key in config_options:
+ description = config_options[config_key].get('description', [])
+ if isinstance(description, list):
+ desc_list = description
+ else:
+ desc_list = [description]
+ config_options[config_key]['description'] = desc_list
+ return config_options
+
+
+class DocumentConfig(Command):
+ name = 'document-config'
+
+ @classmethod
+ def init_parser(cls, add_parser):
+ parser = add_parser(cls.name, description='Generate module documentation from metadata')
+ parser.add_argument("-t", "--template-file", action="store", dest="template_file",
+ default=DEFAULT_TEMPLATE_FILE,
+ help="Jinja2 template to use for the config")
+ parser.add_argument("-T", "--template-dir", action="store", dest="template_dir",
+ default=str(DEFAULT_TEMPLATE_DIR),
+ help="directory containing Jinja2 templates")
+ parser.add_argument("-o", "--output-dir", action="store", dest="output_dir", default='/tmp/',
+ help="Output directory for rst files")
+ parser.add_argument("config_defs", metavar="CONFIG-OPTION-DEFINITIONS.yml", type=str,
+ help="Source for config option docs")
+
+ @staticmethod
+ def main(args):
+ output_dir = os.path.abspath(args.output_dir)
+ template_file_full_path = os.path.abspath(os.path.join(args.template_dir, args.template_file))
+ template_file = os.path.basename(template_file_full_path)
+ template_dir = os.path.dirname(template_file_full_path)
+
+ with open(args.config_defs) as f:
+ config_options = yaml.safe_load(f)
+
+ config_options = fix_description(config_options)
+
+ env = Environment(loader=FileSystemLoader(template_dir), trim_blocks=True,)
+ template = env.get_template(template_file)
+ output_name = os.path.join(output_dir, template_file.replace('.j2', ''))
+ temp_vars = {'config_options': config_options}
+
+ data = to_bytes(template.render(temp_vars))
+ update_file_if_different(output_name, data)
+
+ return 0
diff --git a/hacking/build_library/build_ansible/command_plugins/dump_keywords.py b/hacking/build_library/build_ansible/command_plugins/dump_keywords.py
new file mode 100644
index 00000000..2fc6e5d2
--- /dev/null
+++ b/hacking/build_library/build_ansible/command_plugins/dump_keywords.py
@@ -0,0 +1,121 @@
+# coding: utf-8
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import importlib
+import os.path
+import pathlib
+import re
+from distutils.version import LooseVersion
+
+import jinja2
+import yaml
+from jinja2 import Environment, FileSystemLoader
+
+from ansible.module_utils._text import to_bytes
+
+# Pylint doesn't understand Python3 namespace modules.
+from ..change_detection import update_file_if_different # pylint: disable=relative-beyond-top-level
+from ..commands import Command # pylint: disable=relative-beyond-top-level
+
+
+DEFAULT_TEMPLATE_DIR = str(pathlib.Path(__file__).resolve().parents[4] / 'docs/templates')
+TEMPLATE_FILE = 'playbooks_keywords.rst.j2'
+PLAYBOOK_CLASS_NAMES = ['Play', 'Role', 'Block', 'Task']
+
+
+def load_definitions(keyword_definitions_file):
+ docs = {}
+ with open(keyword_definitions_file) as f:
+ docs = yaml.safe_load(f)
+
+ return docs
+
+
+def extract_keywords(keyword_definitions):
+ pb_keywords = {}
+ for pb_class_name in PLAYBOOK_CLASS_NAMES:
+ if pb_class_name == 'Play':
+ module_name = 'ansible.playbook'
+ else:
+ module_name = 'ansible.playbook.{0}'.format(pb_class_name.lower())
+ module = importlib.import_module(module_name)
+ playbook_class = getattr(module, pb_class_name, None)
+ if playbook_class is None:
+ raise ImportError("We weren't able to import the module {0}".format(module_name))
+
+ # Maintain order of the actual class names for our output
+ # Build up a mapping of playbook classes to the attributes that they hold
+ pb_keywords[pb_class_name] = {k: v for (k, v) in playbook_class._valid_attrs.items()
+ # Filter private attributes as they're not usable in playbooks
+ if not v.private}
+
+ # pick up definitions if they exist
+ for keyword in tuple(pb_keywords[pb_class_name]):
+ if keyword in keyword_definitions:
+ pb_keywords[pb_class_name][keyword] = keyword_definitions[keyword]
+ else:
+ # check if there is an alias, otherwise undocumented
+ alias = getattr(getattr(playbook_class, '_%s' % keyword), 'alias', None)
+ if alias and alias in keyword_definitions:
+ pb_keywords[pb_class_name][alias] = keyword_definitions[alias]
+ del pb_keywords[pb_class_name][keyword]
+ else:
+ pb_keywords[pb_class_name][keyword] = ' UNDOCUMENTED!! '
+
+ # loop is really with_ for users
+ if pb_class_name == 'Task':
+ pb_keywords[pb_class_name]['with_<lookup_plugin>'] = (
+ 'The same as ``loop`` but magically adds the output of any lookup plugin to'
+ ' generate the item list.')
+
+ # local_action is implicit with action
+ if 'action' in pb_keywords[pb_class_name]:
+ pb_keywords[pb_class_name]['local_action'] = ('Same as action but also implies'
+ ' ``delegate_to: localhost``')
+
+ return pb_keywords
+
+
+def generate_page(pb_keywords, template_dir):
+ env = Environment(loader=FileSystemLoader(template_dir), trim_blocks=True,)
+ template = env.get_template(TEMPLATE_FILE)
+ tempvars = {'pb_keywords': pb_keywords, 'playbook_class_names': PLAYBOOK_CLASS_NAMES}
+
+ keyword_page = template.render(tempvars)
+ if LooseVersion(jinja2.__version__) < LooseVersion('2.10'):
+ # jinja2 < 2.10's indent filter indents blank lines. Cleanup
+ keyword_page = re.sub(' +\n', '\n', keyword_page)
+
+ return keyword_page
+
+
+class DocumentKeywords(Command):
+ name = 'document-keywords'
+
+ @classmethod
+ def init_parser(cls, add_parser):
+ parser = add_parser(cls.name, description='Generate playbook keyword documentation from'
+ ' code and descriptions')
+ parser.add_argument("-T", "--template-dir", action="store", dest="template_dir",
+ default=DEFAULT_TEMPLATE_DIR,
+ help="directory containing Jinja2 templates")
+ parser.add_argument("-o", "--output-dir", action="store", dest="output_dir",
+ default='/tmp/', help="Output directory for rst files")
+ parser.add_argument("keyword_defs", metavar="KEYWORD-DEFINITIONS.yml", type=str,
+ help="Source for playbook keyword docs")
+
+ @staticmethod
+ def main(args):
+ keyword_definitions = load_definitions(args.keyword_defs)
+ pb_keywords = extract_keywords(keyword_definitions)
+
+ keyword_page = generate_page(pb_keywords, args.template_dir)
+ outputname = os.path.join(args.output_dir, TEMPLATE_FILE.replace('.j2', ''))
+ update_file_if_different(outputname, to_bytes(keyword_page))
+
+ return 0
diff --git a/hacking/build_library/build_ansible/command_plugins/file_deprecated_issues.py b/hacking/build_library/build_ansible/command_plugins/file_deprecated_issues.py
new file mode 100644
index 00000000..139ecc4d
--- /dev/null
+++ b/hacking/build_library/build_ansible/command_plugins/file_deprecated_issues.py
@@ -0,0 +1,153 @@
+# -*- coding: utf-8 -*-
+# (c) 2017, Matt Martz <matt@sivel.net>
+# (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import argparse
+import os
+import time
+
+from collections import defaultdict
+
+from ansible.release import __version__ as ansible_version
+
+# Pylint doesn't understand Python3 namespace modules.
+from ..commands import Command # pylint: disable=relative-beyond-top-level
+from .. import errors # pylint: disable=relative-beyond-top-level
+
+ANSIBLE_MAJOR_VERSION = '.'.join(ansible_version.split('.')[:2])
+
+
+def get_token(token_file):
+ if token_file:
+ return token_file.read().strip()
+
+ token = os.getenv('GITHUB_TOKEN').strip()
+ if not token:
+ raise errors.MissingUserInput(
+ 'Please provide a file containing a github oauth token with public_repo scope'
+ ' via the --github-token argument or set the GITHUB_TOKEN env var with your'
+ ' github oauth token'
+ )
+ return token
+
+
+def parse_deprecations(problems_file_handle):
+ deprecated = defaultdict(list)
+ deprecation_errors = problems_file_handle.read()
+ for line in deprecation_errors.splitlines():
+ path = line.split(':')[0]
+ if path.endswith('__init__.py'):
+ component = os.path.basename(os.path.dirname(path))
+ else:
+ component, dummy = os.path.splitext(os.path.basename(path).lstrip('_'))
+
+ title = (
+ '%s contains deprecated call to be removed in %s' %
+ (component, ANSIBLE_MAJOR_VERSION)
+ )
+ deprecated[component].append(
+ dict(title=title, path=path, line=line)
+ )
+ return deprecated
+
+
+def find_project_todo_column(repo, project_name):
+ project = None
+ for project in repo.projects():
+ if project.name.lower() == project_name:
+ break
+ else:
+ raise errors.InvalidUserInput('%s was an invalid project name' % project_name)
+
+ for project_column in project.columns():
+ column_name = project_column.name.lower()
+ if 'todo' in column_name or 'backlog' in column_name or 'to do' in column_name:
+ return project_column
+
+ raise Exception('Unable to determine the todo column in'
+ ' project %s' % project_name)
+
+
+def create_issues(deprecated, body_tmpl, repo):
+ issues = []
+
+ for component, items in deprecated.items():
+ title = items[0]['title']
+ path = '\n'.join(set((i['path']) for i in items))
+ line = '\n'.join(i['line'] for i in items)
+ body = body_tmpl % dict(component=component, path=path,
+ line=line,
+ version=ANSIBLE_MAJOR_VERSION)
+
+ issue = repo.create_issue(title, body=body, labels=['deprecated'])
+ print(issue)
+ issues.append(issue)
+
+ # Sleep a little, so that the API doesn't block us
+ time.sleep(0.5)
+
+ return issues
+
+
+class FileDeprecationTickets(Command):
+ name = 'file-deprecation-tickets'
+
+ @classmethod
+ def init_parser(cls, add_parser):
+ parser = add_parser(cls.name, description='File tickets to cleanup deprecated features for'
+ ' the next release')
+ parser.add_argument('--template', default='deprecated_issue_template.md',
+ type=argparse.FileType('r'),
+ help='Path to markdown file template to be used for issue '
+ 'body. Default: %(default)s')
+ parser.add_argument('--project-name', default='', type=str,
+ help='Name of a github project to assign all issues to')
+ parser.add_argument('--github-token', type=argparse.FileType('r'),
+ help='Path to file containing a github token with public_repo scope.'
+ ' This token in this file will be used to open the deprcation'
+ ' tickets and add them to the github project. If not given,'
+ ' the GITHUB_TOKEN environment variable will be tried')
+ parser.add_argument('problems', type=argparse.FileType('r'),
+ help='Path to file containing pylint output for the '
+ 'ansible-deprecated-version check')
+
+ @staticmethod
+ def main(args):
+ try:
+ from github3 import GitHub
+ except ImportError:
+ raise errors.DependencyError(
+ 'This command needs the github3.py library installed to work'
+ )
+
+ token = get_token(args.github_token)
+ args.github_token.close()
+
+ deprecated = parse_deprecations(args.problems)
+ args.problems.close()
+
+ body_tmpl = args.template.read()
+ args.template.close()
+
+ project_name = args.project_name.strip().lower()
+
+ gh_conn = GitHub(token=token)
+ repo = gh_conn.repository('abadger', 'ansible')
+
+ if project_name:
+ project_column = find_project_todo_column(repo, project_name)
+
+ issues = create_issues(deprecated, body_tmpl, repo)
+
+ if project_column:
+ for issue in issues:
+ project_column.create_card_with_issue(issue)
+ time.sleep(0.5)
+
+ return 0
diff --git a/hacking/build_library/build_ansible/command_plugins/generate_man.py b/hacking/build_library/build_ansible/command_plugins/generate_man.py
new file mode 100644
index 00000000..3795c0d2
--- /dev/null
+++ b/hacking/build_library/build_ansible/command_plugins/generate_man.py
@@ -0,0 +1,303 @@
+# coding: utf-8
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import argparse
+import os.path
+import pathlib
+import sys
+
+from jinja2 import Environment, FileSystemLoader
+
+from ansible.module_utils._text import to_bytes
+
+# Pylint doesn't understand Python3 namespace modules.
+from ..change_detection import update_file_if_different # pylint: disable=relative-beyond-top-level
+from ..commands import Command # pylint: disable=relative-beyond-top-level
+
+
+DEFAULT_TEMPLATE_FILE = pathlib.Path(__file__).parents[4] / 'docs/templates/man.j2'
+
+
+# from https://www.python.org/dev/peps/pep-0257/
+def trim_docstring(docstring):
+ if not docstring:
+ return ''
+ # Convert tabs to spaces (following the normal Python rules)
+ # and split into a list of lines:
+ lines = docstring.expandtabs().splitlines()
+ # Determine minimum indentation (first line doesn't count):
+ indent = sys.maxsize
+ for line in lines[1:]:
+ stripped = line.lstrip()
+ if stripped:
+ indent = min(indent, len(line) - len(stripped))
+ # Remove indentation (first line is special):
+ trimmed = [lines[0].strip()]
+ if indent < sys.maxsize:
+ for line in lines[1:]:
+ trimmed.append(line[indent:].rstrip())
+ # Strip off trailing and leading blank lines:
+ while trimmed and not trimmed[-1]:
+ trimmed.pop()
+ while trimmed and not trimmed[0]:
+ trimmed.pop(0)
+ # Return a single string:
+ return '\n'.join(trimmed)
+
+
+def get_options(optlist):
+ ''' get actual options '''
+
+ opts = []
+ for opt in optlist:
+ res = {
+ 'desc': opt.help,
+ 'options': opt.option_strings
+ }
+ if isinstance(opt, argparse._StoreAction):
+ res['arg'] = opt.dest.upper()
+ elif not res['options']:
+ continue
+ opts.append(res)
+
+ return opts
+
+
+def dedupe_groups(parser):
+ action_groups = []
+ for action_group in parser._action_groups:
+ found = False
+ for a in action_groups:
+ if a._actions == action_group._actions:
+ found = True
+ break
+ if not found:
+ action_groups.append(action_group)
+ return action_groups
+
+
+def get_option_groups(option_parser):
+ groups = []
+ for action_group in dedupe_groups(option_parser)[1:]:
+ group_info = {}
+ group_info['desc'] = action_group.description
+ group_info['options'] = action_group._actions
+ group_info['group_obj'] = action_group
+ groups.append(group_info)
+ return groups
+
+
+def opt_doc_list(parser):
+ ''' iterate over options lists '''
+
+ results = []
+ for option_group in dedupe_groups(parser)[1:]:
+ results.extend(get_options(option_group._actions))
+
+ results.extend(get_options(parser._actions))
+
+ return results
+
+
+# def opts_docs(cli, name):
+def opts_docs(cli_class_name, cli_module_name):
+ ''' generate doc structure from options '''
+
+ cli_name = 'ansible-%s' % cli_module_name
+ if cli_module_name == 'adhoc':
+ cli_name = 'ansible'
+
+ # WIth no action/subcommand
+ # shared opts set
+ # instantiate each cli and ask its options
+ cli_klass = getattr(__import__("ansible.cli.%s" % cli_module_name,
+ fromlist=[cli_class_name]), cli_class_name)
+ cli = cli_klass([cli_name])
+
+ # parse the common options
+ try:
+ cli.init_parser()
+ except Exception:
+ pass
+
+ # base/common cli info
+ docs = {
+ 'cli': cli_module_name,
+ 'cli_name': cli_name,
+ 'usage': cli.parser.format_usage(),
+ 'short_desc': cli.parser.description,
+ 'long_desc': trim_docstring(cli.__doc__),
+ 'actions': {},
+ 'content_depth': 2,
+ }
+ option_info = {'option_names': [],
+ 'options': [],
+ 'groups': []}
+
+ for extras in ('ARGUMENTS'):
+ if hasattr(cli, extras):
+ docs[extras.lower()] = getattr(cli, extras)
+
+ common_opts = opt_doc_list(cli.parser)
+ groups_info = get_option_groups(cli.parser)
+ shared_opt_names = []
+ for opt in common_opts:
+ shared_opt_names.extend(opt.get('options', []))
+
+ option_info['options'] = common_opts
+ option_info['option_names'] = shared_opt_names
+
+ option_info['groups'].extend(groups_info)
+
+ docs.update(option_info)
+
+ # now for each action/subcommand
+ # force populate parser with per action options
+
+ def get_actions(parser, docs):
+ # use class attrs not the attrs on a instance (not that it matters here...)
+ try:
+ subparser = parser._subparsers._group_actions[0].choices
+ except AttributeError:
+ subparser = {}
+
+ depth = 0
+
+ for action, parser in subparser.items():
+ action_info = {'option_names': [],
+ 'options': [],
+ 'actions': {}}
+ # docs['actions'][action] = {}
+ # docs['actions'][action]['name'] = action
+ action_info['name'] = action
+ action_info['desc'] = trim_docstring(getattr(cli, 'execute_%s' % action).__doc__)
+
+ # docs['actions'][action]['desc'] = getattr(cli, 'execute_%s' % action).__doc__.strip()
+ action_doc_list = opt_doc_list(parser)
+
+ uncommon_options = []
+ for action_doc in action_doc_list:
+ # uncommon_options = []
+
+ option_aliases = action_doc.get('options', [])
+ for option_alias in option_aliases:
+
+ if option_alias in shared_opt_names:
+ continue
+
+ # TODO: use set
+ if option_alias not in action_info['option_names']:
+ action_info['option_names'].append(option_alias)
+
+ if action_doc in action_info['options']:
+ continue
+
+ uncommon_options.append(action_doc)
+
+ action_info['options'] = uncommon_options
+
+ depth = 1 + get_actions(parser, action_info)
+
+ docs['actions'][action] = action_info
+
+ return depth
+
+ action_depth = get_actions(cli.parser, docs)
+ docs['content_depth'] = action_depth + 1
+
+ docs['options'] = opt_doc_list(cli.parser)
+ return docs
+
+
+class GenerateMan(Command):
+ name = 'generate-man'
+
+ @classmethod
+ def init_parser(cls, add_parser):
+ parser = add_parser(name=cls.name,
+ description='Generate cli documentation from cli docstrings')
+
+ parser.add_argument("-t", "--template-file", action="store", dest="template_file",
+ default=DEFAULT_TEMPLATE_FILE, help="path to jinja2 template")
+ parser.add_argument("-o", "--output-dir", action="store", dest="output_dir",
+ default='/tmp/', help="Output directory for rst files")
+ parser.add_argument("-f", "--output-format", action="store", dest="output_format",
+ default='man',
+ help="Output format for docs (the default 'man' or 'rst')")
+ parser.add_argument('cli_modules', help='CLI module name(s)', metavar='MODULE_NAME', nargs='*')
+
+ @staticmethod
+ def main(args):
+ template_file = args.template_file
+ template_path = os.path.expanduser(template_file)
+ template_dir = os.path.abspath(os.path.dirname(template_path))
+ template_basename = os.path.basename(template_file)
+
+ output_dir = os.path.abspath(args.output_dir)
+ output_format = args.output_format
+
+ cli_modules = args.cli_modules
+
+ # various cli parsing things checks sys.argv if the 'args' that are passed in are []
+ # so just remove any args so the cli modules dont try to parse them resulting in warnings
+ sys.argv = [sys.argv[0]]
+
+ allvars = {}
+ output = {}
+ cli_list = []
+ cli_bin_name_list = []
+
+ # for binary in os.listdir('../../lib/ansible/cli'):
+ for cli_module_name in cli_modules:
+ binary = os.path.basename(os.path.expanduser(cli_module_name))
+
+ if not binary.endswith('.py'):
+ continue
+ elif binary == '__init__.py':
+ continue
+
+ cli_name = os.path.splitext(binary)[0]
+
+ if cli_name == 'adhoc':
+ cli_class_name = 'AdHocCLI'
+ # myclass = 'AdHocCLI'
+ output[cli_name] = 'ansible.1.rst.in'
+ cli_bin_name = 'ansible'
+ else:
+ # myclass = "%sCLI" % libname.capitalize()
+ cli_class_name = "%sCLI" % cli_name.capitalize()
+ output[cli_name] = 'ansible-%s.1.rst.in' % cli_name
+ cli_bin_name = 'ansible-%s' % cli_name
+
+ # FIXME:
+ allvars[cli_name] = opts_docs(cli_class_name, cli_name)
+ cli_bin_name_list.append(cli_bin_name)
+
+ cli_list = allvars.keys()
+
+ doc_name_formats = {'man': '%s.1.rst.in',
+ 'rst': '%s.rst'}
+
+ for cli_name in cli_list:
+
+ # template it!
+ env = Environment(loader=FileSystemLoader(template_dir))
+ template = env.get_template(template_basename)
+
+ # add rest to vars
+ tvars = allvars[cli_name]
+ tvars['cli_list'] = cli_list
+ tvars['cli_bin_name_list'] = cli_bin_name_list
+ tvars['cli'] = cli_name
+ if '-i' in tvars['options']:
+ print('uses inventory')
+
+ manpage = template.render(tvars)
+ filename = os.path.join(output_dir, doc_name_formats[output_format] % tvars['cli_name'])
+ update_file_if_different(filename, to_bytes(manpage))
diff --git a/hacking/build_library/build_ansible/command_plugins/porting_guide.py b/hacking/build_library/build_ansible/command_plugins/porting_guide.py
new file mode 100644
index 00000000..40097a3f
--- /dev/null
+++ b/hacking/build_library/build_ansible/command_plugins/porting_guide.py
@@ -0,0 +1,138 @@
+# coding: utf-8
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from jinja2 import Environment, DictLoader
+
+# Pylint doesn't understand Python3 namespace modules.
+from ..commands import Command # pylint: disable=relative-beyond-top-level
+
+
+PORTING_GUIDE_TEMPLATE = """
+.. _porting_{{ ver }}_guide:
+
+**************************
+Ansible {{ ver }} Porting Guide
+**************************
+
+This section discusses the behavioral changes between Ansible {{ prev_ver }} and Ansible {{ ver }}.
+
+It is intended to assist in updating your playbooks, plugins and other parts of your Ansible infrastructure so they will work with this version of Ansible.
+
+We suggest you read this page along with `Ansible Changelog for {{ ver }} <https://github.com/ansible/ansible/blob/devel/changelogs/CHANGELOG-v{{ ver }}.rst>`_ to understand what updates you may need to make.
+
+This document is part of a collection on porting. The complete list of porting guides can be found at :ref:`porting guides <porting_guides>`.
+
+.. contents:: Topics
+
+
+Playbook
+========
+
+No notable changes
+
+
+Command Line
+============
+
+No notable changes
+
+
+Deprecated
+==========
+
+No notable changes
+
+
+Modules
+=======
+
+No notable changes
+
+
+Modules removed
+---------------
+
+The following modules no longer exist:
+
+* No notable changes
+
+
+Deprecation notices
+-------------------
+
+No notable changes
+
+
+Noteworthy module changes
+-------------------------
+
+No notable changes
+
+
+Plugins
+=======
+
+No notable changes
+
+
+Porting custom scripts
+======================
+
+No notable changes
+
+
+Networking
+==========
+
+No notable changes
+
+""" # noqa for E501 (line length).
+# jinja2 is horrid about getting rid of extra newlines so we have to have a single line per
+# paragraph for proper wrapping to occur
+
+JINJA_ENV = Environment(
+ loader=DictLoader({'porting_guide': PORTING_GUIDE_TEMPLATE,
+ }),
+ extensions=['jinja2.ext.i18n'],
+ trim_blocks=True,
+ lstrip_blocks=True,
+)
+
+
+def generate_porting_guide(version):
+ template = JINJA_ENV.get_template('porting_guide')
+
+ version_list = version.split('.')
+ version_list[-1] = str(int(version_list[-1]) - 1)
+ previous_version = '.'.join(version_list)
+
+ content = template.render(ver=version, prev_ver=previous_version)
+ return content
+
+
+def write_guide(version, guide_content):
+ filename = 'porting_guide_{0}.rst'.format(version)
+ with open(filename, 'w') as out_file:
+ out_file.write(guide_content)
+
+
+class PortingGuideCommand(Command):
+ name = 'porting-guide'
+
+ @classmethod
+ def init_parser(cls, add_parser):
+ parser = add_parser(cls.name, description="Generate a fresh porting guide template")
+ parser.add_argument("--version", dest="version", type=str, required=True, action='store',
+ help="Version of Ansible to write the porting guide for")
+
+ @staticmethod
+ def main(args):
+ guide_content = generate_porting_guide(args.version)
+ write_guide(args.version, guide_content)
+ return 0
diff --git a/hacking/build_library/build_ansible/command_plugins/release_announcement.py b/hacking/build_library/build_ansible/command_plugins/release_announcement.py
new file mode 100644
index 00000000..620dda0d
--- /dev/null
+++ b/hacking/build_library/build_ansible/command_plugins/release_announcement.py
@@ -0,0 +1,78 @@
+# coding: utf-8
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import sys
+from collections import UserString
+from distutils.version import LooseVersion
+
+# Pylint doesn't understand Python3 namespace modules.
+from ..commands import Command # pylint: disable=relative-beyond-top-level
+from .. import errors # pylint: disable=relative-beyond-top-level
+
+
+class VersionStr(UserString):
+ def __init__(self, string):
+ super().__init__(string.strip())
+ self.ver_obj = LooseVersion(string)
+
+
+def transform_args(args):
+ # Make it possible to sort versions in the jinja2 templates
+ new_versions = []
+ for version in args.versions:
+ new_versions.append(VersionStr(version))
+ args.versions = new_versions
+
+ return args
+
+
+def write_message(filename, message):
+ if filename != '-':
+ with open(filename, 'w') as out_file:
+ out_file.write(message)
+ else:
+ sys.stdout.write('\n\n')
+ sys.stdout.write(message)
+
+
+class ReleaseAnnouncementCommand(Command):
+ name = 'release-announcement'
+
+ @classmethod
+ def init_parser(cls, add_parser):
+ parser = add_parser(cls.name,
+ description="Generate email and twitter announcements from template")
+
+ parser.add_argument("--version", dest="versions", type=str, required=True, action='append',
+ help="Versions of Ansible to announce")
+ parser.add_argument("--name", type=str, required=True, help="Real name to use on emails")
+ parser.add_argument("--email-out", type=str, default="-",
+ help="Filename to place the email announcement into")
+ parser.add_argument("--twitter-out", type=str, default="-",
+ help="Filename to place the twitter announcement into")
+
+ @classmethod
+ def main(cls, args):
+ if sys.version_info < (3, 6):
+ raise errors.DependencyError('The {0} subcommand needs Python-3.6+'
+ ' to run'.format(cls.name))
+
+ # Import here because these functions are invalid on Python-3.5 and the command plugins and
+ # init_parser() method need to be compatible with Python-3.4+ for now.
+ # Pylint doesn't understand Python3 namespace modules.
+ from .. announce import create_short_message, create_long_message # pylint: disable=relative-beyond-top-level
+
+ args = transform_args(args)
+
+ twitter_message = create_short_message(args.versions)
+ email_message = create_long_message(args.versions, args.name)
+
+ write_message(args.twitter_out, twitter_message)
+ write_message(args.email_out, email_message)
+ return 0
diff --git a/hacking/build_library/build_ansible/command_plugins/update_intersphinx.py b/hacking/build_library/build_ansible/command_plugins/update_intersphinx.py
new file mode 100644
index 00000000..9337859f
--- /dev/null
+++ b/hacking/build_library/build_ansible/command_plugins/update_intersphinx.py
@@ -0,0 +1,101 @@
+# -*- coding: utf-8 -*-
+# (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import argparse
+import importlib
+import os
+import pathlib
+import time
+import urllib.parse
+
+from collections import defaultdict
+
+from ansible.module_utils.common.collections import is_iterable
+from ansible.module_utils.urls import Request
+
+# Pylint doesn't understand Python3 namespace modules.
+from ..commands import Command # pylint: disable=relative-beyond-top-level
+from .. import errors # pylint: disable=relative-beyond-top-level
+
+
+EXAMPLE_CONF = """
+A proper intersphinx_mapping entry should look like:
+ intersphinx_mapping = {
+ 'python3': ('https://docs.python.org/3', (None, 'python3.inv'))
+ }
+
+See the intersphinx docs for more info:
+ https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html#confval-intersphinx_mapping
+"""
+
+
+class UpdateIntersphinxCache(Command):
+ name = 'update-intersphinx-cache'
+
+ @classmethod
+ def init_parser(cls, add_parser):
+ parser = add_parser(cls.name, description='Update cached intersphinx mappings. This'
+ ' updates the cached intersphinx mappings for docs to reference'
+ ' documentation from other projects.')
+ parser.add_argument('-o', '--output-dir', action='store',
+ help='Path to directory the cached objects.inv files are stored in')
+ parser.add_argument('-c', '--conf-file', action='store',
+ help='Path to a sphinx config file to retrieve intersphinx config from')
+
+ @staticmethod
+ def main(args):
+ # Retrieve the intersphinx information from the sphinx config file
+ conf_dir = pathlib.Path(args.conf_file).parent
+
+ conf_module_spec = importlib.util.spec_from_file_location('sphinxconf', args.conf_file)
+ conf_module = importlib.util.module_from_spec(conf_module_spec)
+ conf_module_spec.loader.exec_module(conf_module)
+ intersphinx_mapping = conf_module.intersphinx_mapping
+
+ for intersphinx_name, inventory in intersphinx_mapping.items():
+ if not is_iterable(inventory) or len(inventory) != 2:
+ print('WARNING: The intersphinx entry for {0} must be'
+ ' a two-tuple.\n{1}'.format(intersphinx_name, EXAMPLE_CONF))
+ continue
+
+ url = cache_file = None
+ for inv_source in inventory:
+ if isinstance(inv_source, str) and url is None:
+ url = inv_source
+ elif is_iterable(inv_source) and cache_file is None:
+ if len(inv_source) != 2:
+ print('WARNING: The fallback entry for {0} should be a tuple of (None,'
+ ' filename).\n{1}'.format(intersphinx_name, EXAMPLE_CONF))
+ continue
+ cache_file = inv_source[1]
+ else:
+ print('WARNING: The configuration for {0} should be a tuple of one url and one'
+ ' tuple for a fallback filename.\n{1}'.format(intersphinx_name,
+ EXAMPLE_CONF))
+ continue
+
+ if url is None or cache_file is None:
+ print('WARNING: Could not figure out the url or fallback'
+ ' filename for {0}.\n{1}'.format(intersphinx_name, EXAMPLE_CONF))
+ continue
+
+ url = urllib.parse.urljoin(url, 'objects.inv')
+ # Resolve any relative cache files to be relative to the conf file
+ cache_file = conf_dir / cache_file
+
+ # Retrieve the inventory and cache it
+ # The jinja CDN seems to be blocking the default urllib User-Agent
+ requestor = Request(headers={'User-Agent': 'Definitely Not Python ;-)'})
+ with requestor.open('GET', url) as source_file:
+ with open(cache_file, 'wb') as f:
+ f.write(source_file.read())
+
+ print('Download of new cache files complete. Remember to git commit -a the changes')
+
+ return 0
diff --git a/hacking/build_library/build_ansible/commands.py b/hacking/build_library/build_ansible/commands.py
new file mode 100644
index 00000000..82679934
--- /dev/null
+++ b/hacking/build_library/build_ansible/commands.py
@@ -0,0 +1,50 @@
+# coding: utf-8
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from abc import ABCMeta, abstractmethod, abstractproperty
+
+
+class Command(metaclass=ABCMeta):
+ """
+ Subcommands of :program:`build-ansible.py`.
+
+ This defines an interface that all subcommands must conform to. :program:`build-ansible.py`
+ will require that these things are present in order to proceed.
+ """
+ @staticmethod
+ @abstractproperty
+ def name():
+ """Name of the subcommand. It's the string to invoked it via on the command line"""
+
+ @staticmethod
+ @abstractmethod
+ def init_parser(add_parser):
+ """
+ Initialize and register an argparse ArgumentParser
+
+ :arg add_parser: function which creates an ArgumentParser for the main program.
+
+ Implementations should first create an ArgumentParser using `add_parser` and then populate
+ it with the command line arguments that are needed.
+
+ .. seealso:
+ `add_parser` information in the :py:meth:`ArgumentParser.add_subparsers` documentation.
+ """
+
+ @staticmethod
+ @abstractmethod
+ def main(arguments):
+ """
+ Run the command
+
+ :arg arguments: The **parsed** command line args
+
+ This is the Command's entrypoint. The command line args are already parsed but from here
+ on, the command can do its work.
+ """
diff --git a/hacking/build_library/build_ansible/errors.py b/hacking/build_library/build_ansible/errors.py
new file mode 100644
index 00000000..a53d1fb1
--- /dev/null
+++ b/hacking/build_library/build_ansible/errors.py
@@ -0,0 +1,19 @@
+# coding: utf-8
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class DependencyError(Exception):
+ """A dependency was unmet"""
+
+
+class MissingUserInput(Exception):
+ """The user failed to provide input (via cli arg or interactively"""
+
+
+class InvalidUserInput(Exception):
+ """The user provided invalid input"""
diff --git a/lib/ansible/__init__.py b/lib/ansible/__init__.py
new file mode 100644
index 00000000..e4905a18
--- /dev/null
+++ b/lib/ansible/__init__.py
@@ -0,0 +1,31 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# make vendored top-level modules accessible EARLY
+import ansible._vendor
+
+# Note: Do not add any code to this file. The ansible module may be
+# a namespace package when using Ansible-2.1+ Anything in this file may not be
+# available if one of the other packages in the namespace is loaded first.
+#
+# This is for backwards compat. Code should be ported to get these from
+# ansible.release instead of from here.
+from ansible.release import __version__, __author__
diff --git a/lib/ansible/_vendor/__init__.py b/lib/ansible/_vendor/__init__.py
new file mode 100644
index 00000000..e6a4c56d
--- /dev/null
+++ b/lib/ansible/_vendor/__init__.py
@@ -0,0 +1,46 @@
+# (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import pkgutil
+import sys
+import warnings
+
+# This package exists to host vendored top-level Python packages for downstream packaging. Any Python packages
+# installed beneath this one will be masked from the Ansible loader, and available from the front of sys.path.
+# It is expected that the vendored packages will be loaded very early, so a warning will be fired on import of
+# the top-level ansible package if any packages beneath this are already loaded at that point.
+#
+# Python packages may be installed here during downstream packaging using something like:
+# pip install --upgrade -t (path to this dir) cryptography pyyaml packaging jinja2
+
+# mask vendored content below this package from being accessed as an ansible subpackage
+__path__ = []
+
+
+def _ensure_vendored_path_entry():
+ """
+ Ensure that any downstream-bundled content beneath this package is available at the top of sys.path
+ """
+ # patch our vendored dir onto sys.path
+ vendored_path_entry = os.path.dirname(__file__)
+ vendored_module_names = set(m[1] for m in pkgutil.iter_modules([vendored_path_entry], '')) # m[1] == m.name
+
+ if vendored_module_names:
+ # patch us early to load vendored deps transparently
+ if vendored_path_entry in sys.path:
+ # handle reload case by removing the existing entry, wherever it might be
+ sys.path.remove(vendored_path_entry)
+ sys.path.insert(0, vendored_path_entry)
+
+ already_loaded_vendored_modules = set(sys.modules.keys()).intersection(vendored_module_names)
+
+ if already_loaded_vendored_modules:
+ warnings.warn('One or more Python packages bundled by this ansible-base distribution were already '
+ 'loaded ({0}). This may result in undefined behavior.'.format(', '.join(sorted(already_loaded_vendored_modules))))
+
+
+_ensure_vendored_path_entry()
diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py
new file mode 100644
index 00000000..86a7e521
--- /dev/null
+++ b/lib/ansible/cli/__init__.py
@@ -0,0 +1,491 @@
+# Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import getpass
+import os
+import subprocess
+import sys
+
+from abc import ABCMeta, abstractmethod
+
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible import constants as C
+from ansible import context
+from ansible.errors import AnsibleError
+from ansible.inventory.manager import InventoryManager
+from ansible.module_utils.six import with_metaclass, string_types
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.parsing.dataloader import DataLoader
+from ansible.parsing.vault import PromptVaultSecret, get_file_vault_secret
+from ansible.plugins.loader import add_all_plugin_dirs
+from ansible.release import __version__
+from ansible.utils.collection_loader import AnsibleCollectionConfig
+from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path
+from ansible.utils.display import Display
+from ansible.utils.path import unfrackpath
+from ansible.utils.unsafe_proxy import to_unsafe_text
+from ansible.vars.manager import VariableManager
+
+try:
+ import argcomplete
+ HAS_ARGCOMPLETE = True
+except ImportError:
+ HAS_ARGCOMPLETE = False
+
+
+display = Display()
+
+
+class CLI(with_metaclass(ABCMeta, object)):
+ ''' code behind bin/ansible* programs '''
+
+ PAGER = 'less'
+
+ # -F (quit-if-one-screen) -R (allow raw ansi control chars)
+ # -S (chop long lines) -X (disable termcap init and de-init)
+ LESS_OPTS = 'FRSX'
+ SKIP_INVENTORY_DEFAULTS = False
+
+ def __init__(self, args, callback=None):
+ """
+ Base init method for all command line programs
+ """
+
+ if not args:
+ raise ValueError('A non-empty list for args is required')
+
+ self.args = args
+ self.parser = None
+ self.callback = callback
+
+ if C.DEVEL_WARNING and __version__.endswith('dev0'):
+ display.warning(
+ 'You are running the development version of Ansible. You should only run Ansible from "devel" if '
+ 'you are modifying the Ansible engine, or trying out features under development. This is a rapidly '
+ 'changing source of code and can become unstable at any point.'
+ )
+
+ @abstractmethod
+ def run(self):
+ """Run the ansible command
+
+ Subclasses must implement this method. It does the actual work of
+ running an Ansible command.
+ """
+ self.parse()
+
+ display.vv(to_text(opt_help.version(self.parser.prog)))
+
+ if C.CONFIG_FILE:
+ display.v(u"Using %s as config file" % to_text(C.CONFIG_FILE))
+ else:
+ display.v(u"No config file found; using defaults")
+
+ # warn about deprecated config options
+ for deprecated in C.config.DEPRECATED:
+ name = deprecated[0]
+ why = deprecated[1]['why']
+ if 'alternatives' in deprecated[1]:
+ alt = ', use %s instead' % deprecated[1]['alternatives']
+ else:
+ alt = ''
+ ver = deprecated[1].get('version')
+ date = deprecated[1].get('date')
+ collection_name = deprecated[1].get('collection_name')
+ display.deprecated("%s option, %s%s" % (name, why, alt),
+ version=ver, date=date, collection_name=collection_name)
+
+ @staticmethod
+ def split_vault_id(vault_id):
+ # return (before_@, after_@)
+ # if no @, return whole string as after_
+ if '@' not in vault_id:
+ return (None, vault_id)
+
+ parts = vault_id.split('@', 1)
+ ret = tuple(parts)
+ return ret
+
+ @staticmethod
+ def build_vault_ids(vault_ids, vault_password_files=None,
+ ask_vault_pass=None, create_new_password=None,
+ auto_prompt=True):
+ vault_password_files = vault_password_files or []
+ vault_ids = vault_ids or []
+
+ # convert vault_password_files into vault_ids slugs
+ for password_file in vault_password_files:
+ id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, password_file)
+
+ # note this makes --vault-id higher precedence than --vault-password-file
+ # if we want to intertwingle them in order probably need a cli callback to populate vault_ids
+ # used by --vault-id and --vault-password-file
+ vault_ids.append(id_slug)
+
+ # if an action needs an encrypt password (create_new_password=True) and we dont
+ # have other secrets setup, then automatically add a password prompt as well.
+ # prompts cant/shouldnt work without a tty, so dont add prompt secrets
+ if ask_vault_pass or (not vault_ids and auto_prompt):
+
+ id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, u'prompt_ask_vault_pass')
+ vault_ids.append(id_slug)
+
+ return vault_ids
+
+ # TODO: remove the now unused args
+ @staticmethod
+ def setup_vault_secrets(loader, vault_ids, vault_password_files=None,
+ ask_vault_pass=None, create_new_password=False,
+ auto_prompt=True):
+ # list of tuples
+ vault_secrets = []
+
+ # Depending on the vault_id value (including how --ask-vault-pass / --vault-password-file create a vault_id)
+ # we need to show different prompts. This is for compat with older Towers that expect a
+ # certain vault password prompt format, so 'promp_ask_vault_pass' vault_id gets the old format.
+ prompt_formats = {}
+
+ # If there are configured default vault identities, they are considered 'first'
+ # so we prepend them to vault_ids (from cli) here
+
+ vault_password_files = vault_password_files or []
+ if C.DEFAULT_VAULT_PASSWORD_FILE:
+ vault_password_files.append(C.DEFAULT_VAULT_PASSWORD_FILE)
+
+ if create_new_password:
+ prompt_formats['prompt'] = ['New vault password (%(vault_id)s): ',
+ 'Confirm new vault password (%(vault_id)s): ']
+ # 2.3 format prompts for --ask-vault-pass
+ prompt_formats['prompt_ask_vault_pass'] = ['New Vault password: ',
+ 'Confirm New Vault password: ']
+ else:
+ prompt_formats['prompt'] = ['Vault password (%(vault_id)s): ']
+ # The format when we use just --ask-vault-pass needs to match 'Vault password:\s*?$'
+ prompt_formats['prompt_ask_vault_pass'] = ['Vault password: ']
+
+ vault_ids = CLI.build_vault_ids(vault_ids,
+ vault_password_files,
+ ask_vault_pass,
+ create_new_password,
+ auto_prompt=auto_prompt)
+
+ for vault_id_slug in vault_ids:
+ vault_id_name, vault_id_value = CLI.split_vault_id(vault_id_slug)
+ if vault_id_value in ['prompt', 'prompt_ask_vault_pass']:
+
+ # --vault-id some_name@prompt_ask_vault_pass --vault-id other_name@prompt_ask_vault_pass will be a little
+ # confusing since it will use the old format without the vault id in the prompt
+ built_vault_id = vault_id_name or C.DEFAULT_VAULT_IDENTITY
+
+ # choose the prompt based on --vault-id=prompt or --ask-vault-pass. --ask-vault-pass
+ # always gets the old format for Tower compatibility.
+ # ie, we used --ask-vault-pass, so we need to use the old vault password prompt
+ # format since Tower needs to match on that format.
+ prompted_vault_secret = PromptVaultSecret(prompt_formats=prompt_formats[vault_id_value],
+ vault_id=built_vault_id)
+
+ # a empty or invalid password from the prompt will warn and continue to the next
+ # without erroring globally
+ try:
+ prompted_vault_secret.load()
+ except AnsibleError as exc:
+ display.warning('Error in vault password prompt (%s): %s' % (vault_id_name, exc))
+ raise
+
+ vault_secrets.append((built_vault_id, prompted_vault_secret))
+
+ # update loader with new secrets incrementally, so we can load a vault password
+ # that is encrypted with a vault secret provided earlier
+ loader.set_vault_secrets(vault_secrets)
+ continue
+
+ # assuming anything else is a password file
+ display.vvvvv('Reading vault password file: %s' % vault_id_value)
+ # read vault_pass from a file
+ file_vault_secret = get_file_vault_secret(filename=vault_id_value,
+ vault_id=vault_id_name,
+ loader=loader)
+
+ # an invalid password file will error globally
+ try:
+ file_vault_secret.load()
+ except AnsibleError as exc:
+ display.warning('Error in vault password file loading (%s): %s' % (vault_id_name, to_text(exc)))
+ raise
+
+ if vault_id_name:
+ vault_secrets.append((vault_id_name, file_vault_secret))
+ else:
+ vault_secrets.append((C.DEFAULT_VAULT_IDENTITY, file_vault_secret))
+
+ # update loader with as-yet-known vault secrets
+ loader.set_vault_secrets(vault_secrets)
+
+ return vault_secrets
+
+ @staticmethod
+ def ask_passwords():
+ ''' prompt for connection and become passwords if needed '''
+
+ op = context.CLIARGS
+ sshpass = None
+ becomepass = None
+ become_prompt = ''
+
+ become_prompt_method = "BECOME" if C.AGNOSTIC_BECOME_PROMPT else op['become_method'].upper()
+
+ try:
+ if op['ask_pass']:
+ sshpass = getpass.getpass(prompt="SSH password: ")
+ become_prompt = "%s password[defaults to SSH password]: " % become_prompt_method
+ else:
+ become_prompt = "%s password: " % become_prompt_method
+
+ if op['become_ask_pass']:
+ becomepass = getpass.getpass(prompt=become_prompt)
+ if op['ask_pass'] and becomepass == '':
+ becomepass = sshpass
+ except EOFError:
+ pass
+
+ # we 'wrap' the passwords to prevent templating as
+ # they can contain special chars and trigger it incorrectly
+ if sshpass:
+ sshpass = to_unsafe_text(sshpass)
+ if becomepass:
+ becomepass = to_unsafe_text(becomepass)
+
+ return (sshpass, becomepass)
+
+ def validate_conflicts(self, op, runas_opts=False, fork_opts=False):
+ ''' check for conflicting options '''
+
+ if fork_opts:
+ if op.forks < 1:
+ self.parser.error("The number of processes (--forks) must be >= 1")
+
+ return op
+
+ @abstractmethod
+ def init_parser(self, usage="", desc=None, epilog=None):
+ """
+ Create an options parser for most ansible scripts
+
+ Subclasses need to implement this method. They will usually call the base class's
+ init_parser to create a basic version and then add their own options on top of that.
+
+ An implementation will look something like this::
+
+ def init_parser(self):
+ super(MyCLI, self).init_parser(usage="My Ansible CLI", inventory_opts=True)
+ ansible.arguments.option_helpers.add_runas_options(self.parser)
+ self.parser.add_option('--my-option', dest='my_option', action='store')
+ """
+ self.parser = opt_help.create_base_parser(os.path.basename(self.args[0]), usage=usage, desc=desc, epilog=epilog, )
+
+ @abstractmethod
+ def post_process_args(self, options):
+ """Process the command line args
+
+ Subclasses need to implement this method. This method validates and transforms the command
+ line arguments. It can be used to check whether conflicting values were given, whether filenames
+ exist, etc.
+
+ An implementation will look something like this::
+
+ def post_process_args(self, options):
+ options = super(MyCLI, self).post_process_args(options)
+ if options.addition and options.subtraction:
+ raise AnsibleOptionsError('Only one of --addition and --subtraction can be specified')
+ if isinstance(options.listofhosts, string_types):
+ options.listofhosts = string_types.split(',')
+ return options
+ """
+
+ # process tags
+ if hasattr(options, 'tags') and not options.tags:
+ # optparse defaults does not do what's expected
+ options.tags = ['all']
+ if hasattr(options, 'tags') and options.tags:
+ tags = set()
+ for tag_set in options.tags:
+ for tag in tag_set.split(u','):
+ tags.add(tag.strip())
+ options.tags = list(tags)
+
+ # process skip_tags
+ if hasattr(options, 'skip_tags') and options.skip_tags:
+ skip_tags = set()
+ for tag_set in options.skip_tags:
+ for tag in tag_set.split(u','):
+ skip_tags.add(tag.strip())
+ options.skip_tags = list(skip_tags)
+
+ # process inventory options except for CLIs that require their own processing
+ if hasattr(options, 'inventory') and not self.SKIP_INVENTORY_DEFAULTS:
+
+ if options.inventory:
+
+ # should always be list
+ if isinstance(options.inventory, string_types):
+ options.inventory = [options.inventory]
+
+ # Ensure full paths when needed
+ options.inventory = [unfrackpath(opt, follow=False) if ',' not in opt else opt for opt in options.inventory]
+ else:
+ options.inventory = C.DEFAULT_HOST_LIST
+
+ # Dup args set on the root parser and sub parsers results in the root parser ignoring the args. e.g. doing
+ # 'ansible-galaxy -vvv init' has no verbosity set but 'ansible-galaxy init -vvv' sets a level of 3. To preserve
+ # back compat with pre-argparse changes we manually scan and set verbosity based on the argv values.
+ if self.parser.prog in ['ansible-galaxy', 'ansible-vault'] and not options.verbosity:
+ verbosity_arg = next(iter([arg for arg in self.args if arg.startswith('-v')]), None)
+ if verbosity_arg:
+ display.deprecated("Setting verbosity before the arg sub command is deprecated, set the verbosity "
+ "after the sub command", "2.13", collection_name='ansible.builtin')
+ options.verbosity = verbosity_arg.count('v')
+
+ return options
+
+ def parse(self):
+ """Parse the command line args
+
+ This method parses the command line arguments. It uses the parser
+ stored in the self.parser attribute and saves the args and options in
+ context.CLIARGS.
+
+ Subclasses need to implement two helper methods, init_parser() and post_process_args() which
+ are called from this function before and after parsing the arguments.
+ """
+ self.init_parser()
+
+ if HAS_ARGCOMPLETE:
+ argcomplete.autocomplete(self.parser)
+
+ try:
+ options = self.parser.parse_args(self.args[1:])
+ except SystemExit as e:
+ if(e.code != 0):
+ self.parser.exit(status=2, message=" \n%s " % self.parser.format_help())
+ raise
+ options = self.post_process_args(options)
+ context._init_global_context(options)
+
+ @staticmethod
+ def version_info(gitinfo=False):
+ ''' return full ansible version info '''
+ if gitinfo:
+ # expensive call, user with care
+ ansible_version_string = opt_help.version()
+ else:
+ ansible_version_string = __version__
+ ansible_version = ansible_version_string.split()[0]
+ ansible_versions = ansible_version.split('.')
+ for counter in range(len(ansible_versions)):
+ if ansible_versions[counter] == "":
+ ansible_versions[counter] = 0
+ try:
+ ansible_versions[counter] = int(ansible_versions[counter])
+ except Exception:
+ pass
+ if len(ansible_versions) < 3:
+ for counter in range(len(ansible_versions), 3):
+ ansible_versions.append(0)
+ return {'string': ansible_version_string.strip(),
+ 'full': ansible_version,
+ 'major': ansible_versions[0],
+ 'minor': ansible_versions[1],
+ 'revision': ansible_versions[2]}
+
+ @staticmethod
+ def pager(text):
+ ''' find reasonable way to display text '''
+ # this is a much simpler form of what is in pydoc.py
+ if not sys.stdout.isatty():
+ display.display(text, screen_only=True)
+ elif 'PAGER' in os.environ:
+ if sys.platform == 'win32':
+ display.display(text, screen_only=True)
+ else:
+ CLI.pager_pipe(text, os.environ['PAGER'])
+ else:
+ p = subprocess.Popen('less --version', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p.communicate()
+ if p.returncode == 0:
+ CLI.pager_pipe(text, 'less')
+ else:
+ display.display(text, screen_only=True)
+
+ @staticmethod
+ def pager_pipe(text, cmd):
+ ''' pipe text through a pager '''
+ if 'LESS' not in os.environ:
+ os.environ['LESS'] = CLI.LESS_OPTS
+ try:
+ cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout)
+ cmd.communicate(input=to_bytes(text))
+ except IOError:
+ pass
+ except KeyboardInterrupt:
+ pass
+
+ @staticmethod
+ def _play_prereqs():
+ options = context.CLIARGS
+
+ # all needs loader
+ loader = DataLoader()
+
+ basedir = options.get('basedir', False)
+ if basedir:
+ loader.set_basedir(basedir)
+ add_all_plugin_dirs(basedir)
+ AnsibleCollectionConfig.playbook_paths = basedir
+ default_collection = _get_collection_name_from_path(basedir)
+ if default_collection:
+ display.warning(u'running with default collection {0}'.format(default_collection))
+ AnsibleCollectionConfig.default_collection = default_collection
+
+ vault_ids = list(options['vault_ids'])
+ default_vault_ids = C.DEFAULT_VAULT_IDENTITY_LIST
+ vault_ids = default_vault_ids + vault_ids
+
+ vault_secrets = CLI.setup_vault_secrets(loader,
+ vault_ids=vault_ids,
+ vault_password_files=list(options['vault_password_files']),
+ ask_vault_pass=options['ask_vault_pass'],
+ auto_prompt=False)
+ loader.set_vault_secrets(vault_secrets)
+
+ # create the inventory, and filter it based on the subset specified (if any)
+ inventory = InventoryManager(loader=loader, sources=options['inventory'])
+
+ # create the variable manager, which will be shared throughout
+ # the code, ensuring a consistent view of global variables
+ variable_manager = VariableManager(loader=loader, inventory=inventory, version_info=CLI.version_info(gitinfo=False))
+
+ return loader, inventory, variable_manager
+
+ @staticmethod
+ def get_host_list(inventory, subset, pattern='all'):
+
+ no_hosts = False
+ if len(inventory.list_hosts()) == 0:
+ # Empty inventory
+ if C.LOCALHOST_WARNING and pattern not in C.LOCALHOST:
+ display.warning("provided hosts list is empty, only localhost is available. Note that the implicit localhost does not match 'all'")
+ no_hosts = True
+
+ inventory.subset(subset)
+
+ hosts = inventory.list_hosts(pattern)
+ if not hosts and no_hosts is False:
+ raise AnsibleError("Specified hosts and/or --limit does not match any hosts")
+
+ return hosts
diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py
new file mode 100644
index 00000000..fd39f1a5
--- /dev/null
+++ b/lib/ansible/cli/adhoc.py
@@ -0,0 +1,175 @@
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible import constants as C
+from ansible import context
+from ansible.cli import CLI
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.errors import AnsibleError, AnsibleOptionsError
+from ansible.executor.task_queue_manager import TaskQueueManager
+from ansible.module_utils._text import to_text
+from ansible.parsing.splitter import parse_kv
+from ansible.playbook import Playbook
+from ansible.playbook.play import Play
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class AdHocCLI(CLI):
+ ''' is an extra-simple tool/framework/API for doing 'remote things'.
+ this command allows you to define and run a single task 'playbook' against a set of hosts
+ '''
+
+ def init_parser(self):
+ ''' create an options parser for bin/ansible '''
+ super(AdHocCLI, self).init_parser(usage='%prog <host-pattern> [options]',
+ desc="Define and run a single task 'playbook' against"
+ " a set of hosts",
+ epilog="Some modules do not make sense in Ad-Hoc (include,"
+ " meta, etc)")
+
+ opt_help.add_runas_options(self.parser)
+ opt_help.add_inventory_options(self.parser)
+ opt_help.add_async_options(self.parser)
+ opt_help.add_output_options(self.parser)
+ opt_help.add_connect_options(self.parser)
+ opt_help.add_check_options(self.parser)
+ opt_help.add_runtask_options(self.parser)
+ opt_help.add_vault_options(self.parser)
+ opt_help.add_fork_options(self.parser)
+ opt_help.add_module_options(self.parser)
+ opt_help.add_basedir_options(self.parser)
+
+ # options unique to ansible ad-hoc
+ self.parser.add_argument('-a', '--args', dest='module_args',
+ help="module arguments", default=C.DEFAULT_MODULE_ARGS)
+ self.parser.add_argument('-m', '--module-name', dest='module_name',
+ help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME,
+ default=C.DEFAULT_MODULE_NAME)
+ self.parser.add_argument('args', metavar='pattern', help='host pattern')
+
+ def post_process_args(self, options):
+ '''Post process and validate options for bin/ansible '''
+
+ options = super(AdHocCLI, self).post_process_args(options)
+
+ display.verbosity = options.verbosity
+ self.validate_conflicts(options, runas_opts=True, fork_opts=True)
+
+ return options
+
+ def _play_ds(self, pattern, async_val, poll):
+ check_raw = context.CLIARGS['module_name'] in C.MODULE_REQUIRE_ARGS
+
+ mytask = {'action': {'module': context.CLIARGS['module_name'], 'args': parse_kv(context.CLIARGS['module_args'], check_raw=check_raw)}}
+
+ # avoid adding to tasks that don't support it, unless set, then give user an error
+ if context.CLIARGS['module_name'] not in C._ACTION_ALL_INCLUDE_ROLE_TASKS and any(frozenset((async_val, poll))):
+ mytask['async_val'] = async_val
+ mytask['poll'] = poll
+
+ return dict(
+ name="Ansible Ad-Hoc",
+ hosts=pattern,
+ gather_facts='no',
+ tasks=[mytask])
+
+ def run(self):
+ ''' create and execute the single task playbook '''
+
+ super(AdHocCLI, self).run()
+
+ # only thing left should be host pattern
+ pattern = to_text(context.CLIARGS['args'], errors='surrogate_or_strict')
+
+ sshpass = None
+ becomepass = None
+
+ (sshpass, becomepass) = self.ask_passwords()
+ passwords = {'conn_pass': sshpass, 'become_pass': becomepass}
+
+ # get basic objects
+ loader, inventory, variable_manager = self._play_prereqs()
+
+ try:
+ hosts = self.get_host_list(inventory, context.CLIARGS['subset'], pattern)
+ except AnsibleError:
+ if context.CLIARGS['subset']:
+ raise
+ else:
+ hosts = []
+ display.warning("No hosts matched, nothing to do")
+
+ if context.CLIARGS['listhosts']:
+ display.display(' hosts (%d):' % len(hosts))
+ for host in hosts:
+ display.display(' %s' % host)
+ return 0
+
+ if context.CLIARGS['module_name'] in C.MODULE_REQUIRE_ARGS and not context.CLIARGS['module_args']:
+ err = "No argument passed to %s module" % context.CLIARGS['module_name']
+ if pattern.endswith(".yml"):
+ err = err + ' (did you mean to run ansible-playbook?)'
+ raise AnsibleOptionsError(err)
+
+ # Avoid modules that don't work with ad-hoc
+ if context.CLIARGS['module_name'] in C._ACTION_IMPORT_PLAYBOOK:
+ raise AnsibleOptionsError("'%s' is not a valid action for ad-hoc commands"
+ % context.CLIARGS['module_name'])
+
+ play_ds = self._play_ds(pattern, context.CLIARGS['seconds'], context.CLIARGS['poll_interval'])
+ play = Play().load(play_ds, variable_manager=variable_manager, loader=loader)
+
+ # used in start callback
+ playbook = Playbook(loader)
+ playbook._entries.append(play)
+ playbook._file_name = '__adhoc_playbook__'
+
+ if self.callback:
+ cb = self.callback
+ elif context.CLIARGS['one_line']:
+ cb = 'oneline'
+ # Respect custom 'stdout_callback' only with enabled 'bin_ansible_callbacks'
+ elif C.DEFAULT_LOAD_CALLBACK_PLUGINS and C.DEFAULT_STDOUT_CALLBACK != 'default':
+ cb = C.DEFAULT_STDOUT_CALLBACK
+ else:
+ cb = 'minimal'
+
+ run_tree = False
+ if context.CLIARGS['tree']:
+ C.DEFAULT_CALLBACK_WHITELIST.append('tree')
+ C.TREE_DIR = context.CLIARGS['tree']
+ run_tree = True
+
+ # now create a task queue manager to execute the play
+ self._tqm = None
+ try:
+ self._tqm = TaskQueueManager(
+ inventory=inventory,
+ variable_manager=variable_manager,
+ loader=loader,
+ passwords=passwords,
+ stdout_callback=cb,
+ run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS,
+ run_tree=run_tree,
+ forks=context.CLIARGS['forks'],
+ )
+
+ self._tqm.load_callbacks()
+ self._tqm.send_callback('v2_playbook_on_start', playbook)
+
+ result = self._tqm.run(play)
+
+ self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)
+ finally:
+ if self._tqm:
+ self._tqm.cleanup()
+ if loader:
+ loader.cleanup_all_tmp_files()
+
+ return result
diff --git a/lib/ansible/cli/arguments/__init__.py b/lib/ansible/cli/arguments/__init__.py
new file mode 100644
index 00000000..7398e33f
--- /dev/null
+++ b/lib/ansible/cli/arguments/__init__.py
@@ -0,0 +1,5 @@
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
diff --git a/lib/ansible/cli/arguments/option_helpers.py b/lib/ansible/cli/arguments/option_helpers.py
new file mode 100644
index 00000000..e18cd6ce
--- /dev/null
+++ b/lib/ansible/cli/arguments/option_helpers.py
@@ -0,0 +1,369 @@
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import copy
+import operator
+import argparse
+import os
+import os.path
+import sys
+import time
+import yaml
+
+import ansible
+from ansible import constants as C
+from ansible.module_utils._text import to_native
+from ansible.release import __version__
+from ansible.utils.path import unfrackpath
+
+
+#
+# Special purpose OptionParsers
+#
+class SortingHelpFormatter(argparse.HelpFormatter):
+ def add_arguments(self, actions):
+ actions = sorted(actions, key=operator.attrgetter('option_strings'))
+ super(SortingHelpFormatter, self).add_arguments(actions)
+
+
+class AnsibleVersion(argparse.Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ ansible_version = to_native(version(getattr(parser, 'prog')))
+ print(ansible_version)
+ parser.exit()
+
+
+class UnrecognizedArgument(argparse.Action):
+ def __init__(self, option_strings, dest, const=True, default=None, required=False, help=None, metavar=None, nargs=0):
+ super(UnrecognizedArgument, self).__init__(option_strings=option_strings, dest=dest, nargs=nargs, const=const,
+ default=default, required=required, help=help)
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ parser.error('unrecognized arguments: %s' % option_string)
+
+
+class PrependListAction(argparse.Action):
+ """A near clone of ``argparse._AppendAction``, but designed to prepend list values
+ instead of appending.
+ """
+ def __init__(self, option_strings, dest, nargs=None, const=None, default=None, type=None,
+ choices=None, required=False, help=None, metavar=None):
+ if nargs == 0:
+ raise ValueError('nargs for append actions must be > 0; if arg '
+ 'strings are not supplying the value to append, '
+ 'the append const action may be more appropriate')
+ if const is not None and nargs != argparse.OPTIONAL:
+ raise ValueError('nargs must be %r to supply const' % argparse.OPTIONAL)
+ super(PrependListAction, self).__init__(
+ option_strings=option_strings,
+ dest=dest,
+ nargs=nargs,
+ const=const,
+ default=default,
+ type=type,
+ choices=choices,
+ required=required,
+ help=help,
+ metavar=metavar
+ )
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ items = copy.copy(ensure_value(namespace, self.dest, []))
+ items[0:0] = values
+ setattr(namespace, self.dest, items)
+
+
+def ensure_value(namespace, name, value):
+ if getattr(namespace, name, None) is None:
+ setattr(namespace, name, value)
+ return getattr(namespace, name)
+
+
+#
+# Callbacks to validate and normalize Options
+#
+def unfrack_path(pathsep=False):
+ """Turn an Option's data into a single path in Ansible locations"""
+ def inner(value):
+ if pathsep:
+ return [unfrackpath(x) for x in value.split(os.pathsep) if x]
+
+ if value == '-':
+ return value
+
+ return unfrackpath(value)
+ return inner
+
+
+def _git_repo_info(repo_path):
+ """ returns a string containing git branch, commit id and commit date """
+ result = None
+ if os.path.exists(repo_path):
+ # Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
+ if os.path.isfile(repo_path):
+ try:
+ gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
+ # There is a possibility the .git file to have an absolute path.
+ if os.path.isabs(gitdir):
+ repo_path = gitdir
+ else:
+ repo_path = os.path.join(repo_path[:-4], gitdir)
+ except (IOError, AttributeError):
+ return ''
+ with open(os.path.join(repo_path, "HEAD")) as f:
+ line = f.readline().rstrip("\n")
+ if line.startswith("ref:"):
+ branch_path = os.path.join(repo_path, line[5:])
+ else:
+ branch_path = None
+ if branch_path and os.path.exists(branch_path):
+ branch = '/'.join(line.split('/')[2:])
+ with open(branch_path) as f:
+ commit = f.readline()[:10]
+ else:
+ # detached HEAD
+ commit = line[:10]
+ branch = 'detached HEAD'
+ branch_path = os.path.join(repo_path, "HEAD")
+
+ date = time.localtime(os.stat(branch_path).st_mtime)
+ if time.daylight == 0:
+ offset = time.timezone
+ else:
+ offset = time.altzone
+ result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit, time.strftime("%Y/%m/%d %H:%M:%S", date), int(offset / -36))
+ else:
+ result = ''
+ return result
+
+
+def _gitinfo():
+ basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
+ repo_path = os.path.join(basedir, '.git')
+ result = _git_repo_info(repo_path)
+ submodules = os.path.join(basedir, '.gitmodules')
+
+ if not os.path.exists(submodules):
+ return result
+
+ with open(submodules) as f:
+ for line in f:
+ tokens = line.strip().split(' ')
+ if tokens[0] == 'path':
+ submodule_path = tokens[2]
+ submodule_info = _git_repo_info(os.path.join(basedir, submodule_path, '.git'))
+ if not submodule_info:
+ submodule_info = ' not found - use git submodule update --init ' + submodule_path
+ result += "\n {0}: {1}".format(submodule_path, submodule_info)
+ return result
+
+
+def version(prog=None):
+ """ return ansible version """
+ if prog:
+ result = " ".join((prog, __version__))
+ else:
+ result = __version__
+
+ gitinfo = _gitinfo()
+ if gitinfo:
+ result = result + " {0}".format(gitinfo)
+ result += "\n config file = %s" % C.CONFIG_FILE
+ if C.DEFAULT_MODULE_PATH is None:
+ cpath = "Default w/o overrides"
+ else:
+ cpath = C.DEFAULT_MODULE_PATH
+ result = result + "\n configured module search path = %s" % cpath
+ result = result + "\n ansible python module location = %s" % ':'.join(ansible.__path__)
+ result = result + "\n executable location = %s" % sys.argv[0]
+ result = result + "\n python version = %s" % ''.join(sys.version.splitlines())
+ return result
+
+
+#
+# Functions to add pre-canned options to an OptionParser
+#
+
+def create_base_parser(prog, usage="", desc=None, epilog=None):
+ """
+ Create an options parser for all ansible scripts
+ """
+ # base opts
+ parser = argparse.ArgumentParser(
+ prog=prog,
+ formatter_class=SortingHelpFormatter,
+ epilog=epilog,
+ description=desc,
+ conflict_handler='resolve',
+ )
+ version_help = "show program's version number, config file location, configured module search path," \
+ " module location, executable location and exit"
+
+ parser.add_argument('--version', action=AnsibleVersion, nargs=0, help=version_help)
+ add_verbosity_options(parser)
+ return parser
+
+
+def add_verbosity_options(parser):
+ """Add options for verbosity"""
+ parser.add_argument('-v', '--verbose', dest='verbosity', default=C.DEFAULT_VERBOSITY, action="count",
+ help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
+
+
+def add_async_options(parser):
+ """Add options for commands which can launch async tasks"""
+ parser.add_argument('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type=int, dest='poll_interval',
+ help="set the poll interval if using -B (default=%s)" % C.DEFAULT_POLL_INTERVAL)
+ parser.add_argument('-B', '--background', dest='seconds', type=int, default=0,
+ help='run asynchronously, failing after X seconds (default=N/A)')
+
+
+def add_basedir_options(parser):
+ """Add options for commands which can set a playbook basedir"""
+ parser.add_argument('--playbook-dir', default=C.config.get_config_value('PLAYBOOK_DIR'), dest='basedir', action='store',
+ help="Since this tool does not use playbooks, use this as a substitute playbook directory."
+ "This sets the relative path for many features including roles/ group_vars/ etc.")
+
+
+def add_check_options(parser):
+ """Add options for commands which can run with diagnostic information of tasks"""
+ parser.add_argument("-C", "--check", default=False, dest='check', action='store_true',
+ help="don't make any changes; instead, try to predict some of the changes that may occur")
+ parser.add_argument('--syntax-check', dest='syntax', action='store_true',
+ help="perform a syntax check on the playbook, but do not execute it")
+ parser.add_argument("-D", "--diff", default=C.DIFF_ALWAYS, dest='diff', action='store_true',
+ help="when changing (small) files and templates, show the differences in those"
+ " files; works great with --check")
+
+
+def add_connect_options(parser):
+ """Add options for commands which need to connection to other hosts"""
+ connect_group = parser.add_argument_group("Connection Options", "control as whom and how to connect to hosts")
+
+ connect_group.add_argument('-k', '--ask-pass', default=C.DEFAULT_ASK_PASS, dest='ask_pass', action='store_true',
+ help='ask for connection password')
+ connect_group.add_argument('--private-key', '--key-file', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
+ help='use this file to authenticate the connection', type=unfrack_path())
+ connect_group.add_argument('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user',
+ help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER)
+ connect_group.add_argument('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT,
+ help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT)
+ connect_group.add_argument('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type=int, dest='timeout',
+ help="override the connection timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT)
+ connect_group.add_argument('--ssh-common-args', default='', dest='ssh_common_args',
+ help="specify common arguments to pass to sftp/scp/ssh (e.g. ProxyCommand)")
+ connect_group.add_argument('--sftp-extra-args', default='', dest='sftp_extra_args',
+ help="specify extra arguments to pass to sftp only (e.g. -f, -l)")
+ connect_group.add_argument('--scp-extra-args', default='', dest='scp_extra_args',
+ help="specify extra arguments to pass to scp only (e.g. -l)")
+ connect_group.add_argument('--ssh-extra-args', default='', dest='ssh_extra_args',
+ help="specify extra arguments to pass to ssh only (e.g. -R)")
+
+ parser.add_argument_group(connect_group)
+
+
+def add_fork_options(parser):
+ """Add options for commands that can fork worker processes"""
+ parser.add_argument('-f', '--forks', dest='forks', default=C.DEFAULT_FORKS, type=int,
+ help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS)
+
+
+def add_inventory_options(parser):
+ """Add options for commands that utilize inventory"""
+ parser.add_argument('-i', '--inventory', '--inventory-file', dest='inventory', action="append",
+ help="specify inventory host path or comma separated host list. --inventory-file is deprecated")
+ parser.add_argument('--list-hosts', dest='listhosts', action='store_true',
+ help='outputs a list of matching hosts; does not execute anything else')
+ parser.add_argument('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset',
+ help='further limit selected hosts to an additional pattern')
+
+
+def add_meta_options(parser):
+ """Add options for commands which can launch meta tasks from the command line"""
+ parser.add_argument('--force-handlers', default=C.DEFAULT_FORCE_HANDLERS, dest='force_handlers', action='store_true',
+ help="run handlers even if a task fails")
+ parser.add_argument('--flush-cache', dest='flush_cache', action='store_true',
+ help="clear the fact cache for every host in inventory")
+
+
+def add_module_options(parser):
+ """Add options for commands that load modules"""
+ module_path = C.config.get_configuration_definition('DEFAULT_MODULE_PATH').get('default', '')
+ parser.add_argument('-M', '--module-path', dest='module_path', default=None,
+ help="prepend colon-separated path(s) to module library (default=%s)" % module_path,
+ type=unfrack_path(pathsep=True), action=PrependListAction)
+
+
+def add_output_options(parser):
+ """Add options for commands which can change their output"""
+ parser.add_argument('-o', '--one-line', dest='one_line', action='store_true',
+ help='condense output')
+ parser.add_argument('-t', '--tree', dest='tree', default=None,
+ help='log output to this directory')
+
+
+def add_runas_options(parser):
+ """
+ Add options for commands which can run tasks as another user
+
+ Note that this includes the options from add_runas_prompt_options(). Only one of these
+ functions should be used.
+ """
+ runas_group = parser.add_argument_group("Privilege Escalation Options", "control how and which user you become as on target hosts")
+
+ # consolidated privilege escalation (become)
+ runas_group.add_argument("-b", "--become", default=C.DEFAULT_BECOME, action="store_true", dest='become',
+ help="run operations with become (does not imply password prompting)")
+ runas_group.add_argument('--become-method', dest='become_method', default=C.DEFAULT_BECOME_METHOD,
+ help='privilege escalation method to use (default=%s)' % C.DEFAULT_BECOME_METHOD +
+ ', use `ansible-doc -t become -l` to list valid choices.')
+ runas_group.add_argument('--become-user', default=None, dest='become_user', type=str,
+ help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER)
+
+ add_runas_prompt_options(parser, runas_group=runas_group)
+
+
+def add_runas_prompt_options(parser, runas_group=None):
+ """
+ Add options for commands which need to prompt for privilege escalation credentials
+
+ Note that add_runas_options() includes these options already. Only one of the two functions
+ should be used.
+ """
+ if runas_group is None:
+ runas_group = parser.add_argument_group("Privilege Escalation Options",
+ "control how and which user you become as on target hosts")
+
+ runas_group.add_argument('-K', '--ask-become-pass', dest='become_ask_pass', action='store_true',
+ default=C.DEFAULT_BECOME_ASK_PASS,
+ help='ask for privilege escalation password')
+
+ parser.add_argument_group(runas_group)
+
+
+def add_runtask_options(parser):
+ """Add options for commands that run a task"""
+ parser.add_argument('-e', '--extra-vars', dest="extra_vars", action="append",
+ help="set additional variables as key=value or YAML/JSON, if filename prepend with @", default=[])
+
+
+def add_subset_options(parser):
+ """Add options for commands which can run a subset of tasks"""
+ parser.add_argument('-t', '--tags', dest='tags', default=C.TAGS_RUN, action='append',
+ help="only run plays and tasks tagged with these values")
+ parser.add_argument('--skip-tags', dest='skip_tags', default=C.TAGS_SKIP, action='append',
+ help="only run plays and tasks whose tags do not match these values")
+
+
+def add_vault_options(parser):
+ """Add options for loading vault files"""
+ parser.add_argument('--vault-id', default=[], dest='vault_ids', action='append', type=str,
+ help='the vault identity to use')
+ base_group = parser.add_mutually_exclusive_group()
+ base_group.add_argument('--ask-vault-password', '--ask-vault-pass', default=C.DEFAULT_ASK_VAULT_PASS, dest='ask_vault_pass', action='store_true',
+ help='ask for vault password')
+ base_group.add_argument('--vault-password-file', '--vault-pass-file', default=[], dest='vault_password_files',
+ help="vault password file", type=unfrack_path(), action='append')
diff --git a/lib/ansible/cli/config.py b/lib/ansible/cli/config.py
new file mode 100644
index 00000000..a3f84456
--- /dev/null
+++ b/lib/ansible/cli/config.py
@@ -0,0 +1,188 @@
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import shlex
+import subprocess
+import yaml
+
+from ansible import context
+from ansible.cli import CLI
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.config.manager import ConfigManager, Setting, find_ini_config_file
+from ansible.errors import AnsibleError, AnsibleOptionsError
+from ansible.module_utils._text import to_native, to_text, to_bytes
+from ansible.parsing.yaml.dumper import AnsibleDumper
+from ansible.utils.color import stringc
+from ansible.utils.display import Display
+from ansible.utils.path import unfrackpath
+
+display = Display()
+
+
+class ConfigCLI(CLI):
+ """ Config command line class """
+
+ def __init__(self, args, callback=None):
+
+ self.config_file = None
+ self.config = None
+ super(ConfigCLI, self).__init__(args, callback)
+
+ def init_parser(self):
+
+ super(ConfigCLI, self).init_parser(
+ desc="View ansible configuration.",
+ )
+
+ common = opt_help.argparse.ArgumentParser(add_help=False)
+ opt_help.add_verbosity_options(common)
+ common.add_argument('-c', '--config', dest='config_file',
+ help="path to configuration file, defaults to first file found in precedence.")
+
+ subparsers = self.parser.add_subparsers(dest='action')
+ subparsers.required = True
+
+ list_parser = subparsers.add_parser('list', help='Print all config options', parents=[common])
+ list_parser.set_defaults(func=self.execute_list)
+
+ dump_parser = subparsers.add_parser('dump', help='Dump configuration', parents=[common])
+ dump_parser.set_defaults(func=self.execute_dump)
+ dump_parser.add_argument('--only-changed', dest='only_changed', action='store_true',
+ help="Only show configurations that have changed from the default")
+
+ view_parser = subparsers.add_parser('view', help='View configuration file', parents=[common])
+ view_parser.set_defaults(func=self.execute_view)
+
+ # update_parser = subparsers.add_parser('update', help='Update configuration option')
+ # update_parser.set_defaults(func=self.execute_update)
+ # update_parser.add_argument('-s', '--setting', dest='setting',
+ # help="config setting, the section defaults to 'defaults'",
+ # metavar='[section.]setting=value')
+
+ # search_parser = subparsers.add_parser('search', help='Search configuration')
+ # search_parser.set_defaults(func=self.execute_search)
+ # search_parser.add_argument('args', help='Search term', metavar='<search term>')
+
+ def post_process_args(self, options):
+ options = super(ConfigCLI, self).post_process_args(options)
+ display.verbosity = options.verbosity
+
+ return options
+
+ def run(self):
+
+ super(ConfigCLI, self).run()
+
+ if context.CLIARGS['config_file']:
+ self.config_file = unfrackpath(context.CLIARGS['config_file'], follow=False)
+ b_config = to_bytes(self.config_file)
+ if os.path.exists(b_config) and os.access(b_config, os.R_OK):
+ self.config = ConfigManager(self.config_file)
+ else:
+ raise AnsibleOptionsError('The provided configuration file is missing or not accessible: %s' % to_native(self.config_file))
+ else:
+ self.config = ConfigManager()
+ self.config_file = find_ini_config_file()
+
+ if self.config_file:
+ try:
+ if not os.path.exists(self.config_file):
+ raise AnsibleOptionsError("%s does not exist or is not accessible" % (self.config_file))
+ elif not os.path.isfile(self.config_file):
+ raise AnsibleOptionsError("%s is not a valid file" % (self.config_file))
+
+ os.environ['ANSIBLE_CONFIG'] = to_native(self.config_file)
+ except Exception:
+ if context.CLIARGS['action'] in ['view']:
+ raise
+ elif context.CLIARGS['action'] in ['edit', 'update']:
+ display.warning("File does not exist, used empty file: %s" % self.config_file)
+
+ elif context.CLIARGS['action'] == 'view':
+ raise AnsibleError('Invalid or no config file was supplied')
+
+ context.CLIARGS['func']()
+
+ def execute_update(self):
+ '''
+ Updates a single setting in the specified ansible.cfg
+ '''
+ raise AnsibleError("Option not implemented yet")
+
+ # pylint: disable=unreachable
+ if context.CLIARGS['setting'] is None:
+ raise AnsibleOptionsError("update option requires a setting to update")
+
+ (entry, value) = context.CLIARGS['setting'].split('=')
+ if '.' in entry:
+ (section, option) = entry.split('.')
+ else:
+ section = 'defaults'
+ option = entry
+ subprocess.call([
+ 'ansible',
+ '-m', 'ini_file',
+ 'localhost',
+ '-c', 'local',
+ '-a', '"dest=%s section=%s option=%s value=%s backup=yes"' % (self.config_file, section, option, value)
+ ])
+
+ def execute_view(self):
+ '''
+ Displays the current config file
+ '''
+ try:
+ with open(self.config_file, 'rb') as f:
+ self.pager(to_text(f.read(), errors='surrogate_or_strict'))
+ except Exception as e:
+ raise AnsibleError("Failed to open config file: %s" % to_native(e))
+
+ def execute_edit(self):
+ '''
+ Opens ansible.cfg in the default EDITOR
+ '''
+ raise AnsibleError("Option not implemented yet")
+
+ # pylint: disable=unreachable
+ try:
+ editor = shlex.split(os.environ.get('EDITOR', 'vi'))
+ editor.append(self.config_file)
+ subprocess.call(editor)
+ except Exception as e:
+ raise AnsibleError("Failed to open editor: %s" % to_native(e))
+
+ def execute_list(self):
+ '''
+ list all current configs reading lib/constants.py and shows env and config file setting names
+ '''
+ self.pager(to_text(yaml.dump(self.config.get_configuration_definitions(), Dumper=AnsibleDumper), errors='surrogate_or_strict'))
+
+ def execute_dump(self):
+ '''
+ Shows the current settings, merges ansible.cfg if specified
+ '''
+ # FIXME: deal with plugins, not just base config
+ text = []
+ defaults = self.config.get_configuration_definitions().copy()
+ for setting in self.config.data.get_settings():
+ if setting.name in defaults:
+ defaults[setting.name] = setting
+
+ for setting in sorted(defaults):
+ if isinstance(defaults[setting], Setting):
+ if defaults[setting].origin == 'default':
+ color = 'green'
+ else:
+ color = 'yellow'
+ msg = "%s(%s) = %s" % (setting, defaults[setting].origin, defaults[setting].value)
+ else:
+ color = 'green'
+ msg = "%s(%s) = %s" % (setting, 'default', defaults[setting].get('default'))
+ if not context.CLIARGS['only_changed'] or color == 'yellow':
+ text.append(stringc(msg, color))
+
+ self.pager(to_text('\n'.join(text), errors='surrogate_or_strict'))
diff --git a/lib/ansible/cli/console.py b/lib/ansible/cli/console.py
new file mode 100644
index 00000000..2383ebdd
--- /dev/null
+++ b/lib/ansible/cli/console.py
@@ -0,0 +1,454 @@
+# Copyright: (c) 2014, Nandor Sivok <dominis@haxor.hu>
+# Copyright: (c) 2016, Redhat Inc
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+########################################################
+# ansible-console is an interactive REPL shell for ansible
+# with built-in tab completion for all the documented modules
+#
+# Available commands:
+# cd - change host/group (you can use host patterns eg.: app*.dc*:!app01*)
+# list - list available hosts in the current path
+# forks - change fork
+# become - become
+# ! - forces shell module instead of the ansible module (!yum update -y)
+
+import atexit
+import cmd
+import getpass
+import readline
+import os
+import sys
+
+from ansible import constants as C
+from ansible import context
+from ansible.cli import CLI
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.executor.task_queue_manager import TaskQueueManager
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.parsing.splitter import parse_kv
+from ansible.playbook.play import Play
+from ansible.plugins.loader import module_loader, fragment_loader
+from ansible.utils import plugin_docs
+from ansible.utils.color import stringc
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class ConsoleCLI(CLI, cmd.Cmd):
+ ''' a REPL that allows for running ad-hoc tasks against a chosen inventory (based on dominis' ansible-shell).'''
+
+ modules = []
+ ARGUMENTS = {'host-pattern': 'A name of a group in the inventory, a shell-like glob '
+ 'selecting hosts in inventory or any combination of the two separated by commas.'}
+
+ # use specific to console, but fallback to highlight for backwards compatibility
+ NORMAL_PROMPT = C.COLOR_CONSOLE_PROMPT or C.COLOR_HIGHLIGHT
+
+ def __init__(self, args):
+
+ super(ConsoleCLI, self).__init__(args)
+
+ self.intro = 'Welcome to the ansible console.\nType help or ? to list commands.\n'
+
+ self.groups = []
+ self.hosts = []
+ self.pattern = None
+ self.variable_manager = None
+ self.loader = None
+ self.passwords = dict()
+
+ self.modules = None
+ self.cwd = '*'
+
+ # Defaults for these are set from the CLI in run()
+ self.remote_user = None
+ self.become = None
+ self.become_user = None
+ self.become_method = None
+ self.check_mode = None
+ self.diff = None
+ self.forks = None
+
+ cmd.Cmd.__init__(self)
+
+ def init_parser(self):
+ super(ConsoleCLI, self).init_parser(
+ desc="REPL console for executing Ansible tasks.",
+ epilog="This is not a live session/connection, each task executes in the background and returns it's results."
+ )
+ opt_help.add_runas_options(self.parser)
+ opt_help.add_inventory_options(self.parser)
+ opt_help.add_connect_options(self.parser)
+ opt_help.add_check_options(self.parser)
+ opt_help.add_vault_options(self.parser)
+ opt_help.add_fork_options(self.parser)
+ opt_help.add_module_options(self.parser)
+ opt_help.add_basedir_options(self.parser)
+
+ # options unique to shell
+ self.parser.add_argument('pattern', help='host pattern', metavar='pattern', default='all', nargs='?')
+ self.parser.add_argument('--step', dest='step', action='store_true',
+ help="one-step-at-a-time: confirm each task before running")
+
+ def post_process_args(self, options):
+ options = super(ConsoleCLI, self).post_process_args(options)
+ display.verbosity = options.verbosity
+ self.validate_conflicts(options, runas_opts=True, fork_opts=True)
+ return options
+
+ def get_names(self):
+ return dir(self)
+
+ def cmdloop(self):
+ try:
+ cmd.Cmd.cmdloop(self)
+ except KeyboardInterrupt:
+ self.do_exit(self)
+
+ def set_prompt(self):
+ login_user = self.remote_user or getpass.getuser()
+ self.selected = self.inventory.list_hosts(self.cwd)
+ prompt = "%s@%s (%d)[f:%s]" % (login_user, self.cwd, len(self.selected), self.forks)
+ if self.become and self.become_user in [None, 'root']:
+ prompt += "# "
+ color = C.COLOR_ERROR
+ else:
+ prompt += "$ "
+ color = self.NORMAL_PROMPT
+ self.prompt = stringc(prompt, color, wrap_nonvisible_chars=True)
+
+ def list_modules(self):
+ modules = set()
+ if context.CLIARGS['module_path']:
+ for path in context.CLIARGS['module_path']:
+ if path:
+ module_loader.add_directory(path)
+
+ module_paths = module_loader._get_paths()
+ for path in module_paths:
+ if path is not None:
+ modules.update(self._find_modules_in_path(path))
+ return modules
+
+ def _find_modules_in_path(self, path):
+
+ if os.path.isdir(path):
+ for module in os.listdir(path):
+ if module.startswith('.'):
+ continue
+ elif os.path.isdir(module):
+ self._find_modules_in_path(module)
+ elif module.startswith('__'):
+ continue
+ elif any(module.endswith(x) for x in C.BLACKLIST_EXTS):
+ continue
+ elif module in C.IGNORE_FILES:
+ continue
+ elif module.startswith('_'):
+ fullpath = '/'.join([path, module])
+ if os.path.islink(fullpath): # avoids aliases
+ continue
+ module = module.replace('_', '', 1)
+
+ module = os.path.splitext(module)[0] # removes the extension
+ yield module
+
+ def default(self, arg, forceshell=False):
+ """ actually runs modules """
+ if arg.startswith("#"):
+ return False
+
+ if not self.cwd:
+ display.error("No host found")
+ return False
+
+ if arg.split()[0] in self.modules:
+ module = arg.split()[0]
+ module_args = ' '.join(arg.split()[1:])
+ else:
+ module = 'shell'
+ module_args = arg
+
+ if forceshell is True:
+ module = 'shell'
+ module_args = arg
+
+ result = None
+ try:
+ check_raw = module in C._ACTION_ALLOWS_RAW_ARGS
+ play_ds = dict(
+ name="Ansible Shell",
+ hosts=self.cwd,
+ gather_facts='no',
+ tasks=[dict(action=dict(module=module, args=parse_kv(module_args, check_raw=check_raw)))],
+ remote_user=self.remote_user,
+ become=self.become,
+ become_user=self.become_user,
+ become_method=self.become_method,
+ check_mode=self.check_mode,
+ diff=self.diff,
+ )
+ play = Play().load(play_ds, variable_manager=self.variable_manager, loader=self.loader)
+ except Exception as e:
+ display.error(u"Unable to build command: %s" % to_text(e))
+ return False
+
+ try:
+ cb = 'minimal' # FIXME: make callbacks configurable
+ # now create a task queue manager to execute the play
+ self._tqm = None
+ try:
+ self._tqm = TaskQueueManager(
+ inventory=self.inventory,
+ variable_manager=self.variable_manager,
+ loader=self.loader,
+ passwords=self.passwords,
+ stdout_callback=cb,
+ run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS,
+ run_tree=False,
+ forks=self.forks,
+ )
+
+ result = self._tqm.run(play)
+ finally:
+ if self._tqm:
+ self._tqm.cleanup()
+ if self.loader:
+ self.loader.cleanup_all_tmp_files()
+
+ if result is None:
+ display.error("No hosts found")
+ return False
+ except KeyboardInterrupt:
+ display.error('User interrupted execution')
+ return False
+ except Exception as e:
+ display.error(to_text(e))
+ # FIXME: add traceback in very very verbose mode
+ return False
+
+ def emptyline(self):
+ return
+
+ def do_shell(self, arg):
+ """
+ You can run shell commands through the shell module.
+
+ eg.:
+ shell ps uax | grep java | wc -l
+ shell killall python
+ shell halt -n
+
+ You can use the ! to force the shell module. eg.:
+ !ps aux | grep java | wc -l
+ """
+ self.default(arg, True)
+
+ def do_forks(self, arg):
+ """Set the number of forks"""
+ if not arg:
+ display.display('Usage: forks <number>')
+ return
+
+ forks = int(arg)
+ if forks <= 0:
+ display.display('forks must be greater than or equal to 1')
+ return
+
+ self.forks = forks
+ self.set_prompt()
+
+ do_serial = do_forks
+
+ def do_verbosity(self, arg):
+ """Set verbosity level"""
+ if not arg:
+ display.display('Usage: verbosity <number>')
+ else:
+ display.verbosity = int(arg)
+ display.v('verbosity level set to %s' % arg)
+
+ def do_cd(self, arg):
+ """
+ Change active host/group. You can use hosts patterns as well eg.:
+ cd webservers
+ cd webservers:dbservers
+ cd webservers:!phoenix
+ cd webservers:&staging
+ cd webservers:dbservers:&staging:!phoenix
+ """
+ if not arg:
+ self.cwd = '*'
+ elif arg in '/*':
+ self.cwd = 'all'
+ elif self.inventory.get_hosts(arg):
+ self.cwd = arg
+ else:
+ display.display("no host matched")
+
+ self.set_prompt()
+
+ def do_list(self, arg):
+ """List the hosts in the current group"""
+ if arg == 'groups':
+ for group in self.groups:
+ display.display(group)
+ else:
+ for host in self.selected:
+ display.display(host.name)
+
+ def do_become(self, arg):
+ """Toggle whether plays run with become"""
+ if arg:
+ self.become = boolean(arg, strict=False)
+ display.v("become changed to %s" % self.become)
+ self.set_prompt()
+ else:
+ display.display("Please specify become value, e.g. `become yes`")
+
+ def do_remote_user(self, arg):
+ """Given a username, set the remote user plays are run by"""
+ if arg:
+ self.remote_user = arg
+ self.set_prompt()
+ else:
+ display.display("Please specify a remote user, e.g. `remote_user root`")
+
+ def do_become_user(self, arg):
+ """Given a username, set the user that plays are run by when using become"""
+ if arg:
+ self.become_user = arg
+ else:
+ display.display("Please specify a user, e.g. `become_user jenkins`")
+ display.v("Current user is %s" % self.become_user)
+ self.set_prompt()
+
+ def do_become_method(self, arg):
+ """Given a become_method, set the privilege escalation method when using become"""
+ if arg:
+ self.become_method = arg
+ display.v("become_method changed to %s" % self.become_method)
+ else:
+ display.display("Please specify a become_method, e.g. `become_method su`")
+
+ def do_check(self, arg):
+ """Toggle whether plays run with check mode"""
+ if arg:
+ self.check_mode = boolean(arg, strict=False)
+ display.v("check mode changed to %s" % self.check_mode)
+ else:
+ display.display("Please specify check mode value, e.g. `check yes`")
+
+ def do_diff(self, arg):
+ """Toggle whether plays run with diff"""
+ if arg:
+ self.diff = boolean(arg, strict=False)
+ display.v("diff mode changed to %s" % self.diff)
+ else:
+ display.display("Please specify a diff value , e.g. `diff yes`")
+
+ def do_exit(self, args):
+ """Exits from the console"""
+ sys.stdout.write('\n')
+ return -1
+
+ do_EOF = do_exit
+
+ def helpdefault(self, module_name):
+ if module_name in self.modules:
+ in_path = module_loader.find_plugin(module_name)
+ if in_path:
+ oc, a, _, _ = plugin_docs.get_docstring(in_path, fragment_loader)
+ if oc:
+ display.display(oc['short_description'])
+ display.display('Parameters:')
+ for opt in oc['options'].keys():
+ display.display(' ' + stringc(opt, self.NORMAL_PROMPT) + ' ' + oc['options'][opt]['description'][0])
+ else:
+ display.error('No documentation found for %s.' % module_name)
+ else:
+ display.error('%s is not a valid command, use ? to list all valid commands.' % module_name)
+
+ def complete_cd(self, text, line, begidx, endidx):
+ mline = line.partition(' ')[2]
+ offs = len(mline) - len(text)
+
+ if self.cwd in ('all', '*', '\\'):
+ completions = self.hosts + self.groups
+ else:
+ completions = [x.name for x in self.inventory.list_hosts(self.cwd)]
+
+ return [to_native(s)[offs:] for s in completions if to_native(s).startswith(to_native(mline))]
+
+ def completedefault(self, text, line, begidx, endidx):
+ if line.split()[0] in self.modules:
+ mline = line.split(' ')[-1]
+ offs = len(mline) - len(text)
+ completions = self.module_args(line.split()[0])
+
+ return [s[offs:] + '=' for s in completions if s.startswith(mline)]
+
+ def module_args(self, module_name):
+ in_path = module_loader.find_plugin(module_name)
+ oc, a, _, _ = plugin_docs.get_docstring(in_path, fragment_loader, is_module=True)
+ return list(oc['options'].keys())
+
+ def run(self):
+
+ super(ConsoleCLI, self).run()
+
+ sshpass = None
+ becomepass = None
+
+ # hosts
+ self.pattern = context.CLIARGS['pattern']
+ self.cwd = self.pattern
+
+ # Defaults from the command line
+ self.remote_user = context.CLIARGS['remote_user']
+ self.become = context.CLIARGS['become']
+ self.become_user = context.CLIARGS['become_user']
+ self.become_method = context.CLIARGS['become_method']
+ self.check_mode = context.CLIARGS['check']
+ self.diff = context.CLIARGS['diff']
+ self.forks = context.CLIARGS['forks']
+
+ # dynamically add modules as commands
+ self.modules = self.list_modules()
+ for module in self.modules:
+ setattr(self, 'do_' + module, lambda arg, module=module: self.default(module + ' ' + arg))
+ setattr(self, 'help_' + module, lambda module=module: self.helpdefault(module))
+
+ (sshpass, becomepass) = self.ask_passwords()
+ self.passwords = {'conn_pass': sshpass, 'become_pass': becomepass}
+
+ self.loader, self.inventory, self.variable_manager = self._play_prereqs()
+
+ hosts = self.get_host_list(self.inventory, context.CLIARGS['subset'], self.pattern)
+
+ self.groups = self.inventory.list_groups()
+ self.hosts = [x.name for x in hosts]
+
+ # This hack is to work around readline issues on a mac:
+ # http://stackoverflow.com/a/7116997/541202
+ if 'libedit' in readline.__doc__:
+ readline.parse_and_bind("bind ^I rl_complete")
+ else:
+ readline.parse_and_bind("tab: complete")
+
+ histfile = os.path.join(os.path.expanduser("~"), ".ansible-console_history")
+ try:
+ readline.read_history_file(histfile)
+ except IOError:
+ pass
+
+ atexit.register(readline.write_history_file, histfile)
+ self.set_prompt()
+ self.cmdloop()
diff --git a/lib/ansible/cli/doc.py b/lib/ansible/cli/doc.py
new file mode 100644
index 00000000..41baa796
--- /dev/null
+++ b/lib/ansible/cli/doc.py
@@ -0,0 +1,740 @@
+# Copyright: (c) 2014, James Tanner <tanner.jc@gmail.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import datetime
+import json
+import os
+import re
+import textwrap
+import traceback
+import yaml
+
+import ansible.plugins.loader as plugin_loader
+
+from ansible import constants as C
+from ansible import context
+from ansible.cli import CLI
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.collections.list import list_collection_dirs
+from ansible.errors import AnsibleError, AnsibleOptionsError
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.common._collections_compat import Container, Sequence
+from ansible.module_utils.common.json import AnsibleJSONEncoder
+from ansible.module_utils.six import string_types
+from ansible.parsing.plugin_docs import read_docstub
+from ansible.parsing.yaml.dumper import AnsibleDumper
+from ansible.plugins.loader import action_loader, fragment_loader
+from ansible.utils.collection_loader import AnsibleCollectionConfig
+from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path
+from ansible.utils.display import Display
+from ansible.utils.plugin_docs import (
+ BLACKLIST,
+ remove_current_collection_from_versions_and_dates,
+ get_docstring,
+ get_versioned_doclink,
+)
+
+display = Display()
+
+
+def jdump(text):
+ try:
+ display.display(json.dumps(text, cls=AnsibleJSONEncoder, sort_keys=True, indent=4))
+ except TypeError as e:
+ raise AnsibleError('We could not convert all the documentation into JSON as there was a conversion issue: %s' % to_native(e))
+
+
+def add_collection_plugins(plugin_list, plugin_type, coll_filter=None):
+
+ # TODO: take into account runtime.yml once implemented
+ b_colldirs = list_collection_dirs(coll_filter=coll_filter)
+ for b_path in b_colldirs:
+ path = to_text(b_path, errors='surrogate_or_strict')
+ collname = _get_collection_name_from_path(b_path)
+ ptype = C.COLLECTION_PTYPE_COMPAT.get(plugin_type, plugin_type)
+ plugin_list.update(DocCLI.find_plugins(os.path.join(path, 'plugins', ptype), False, plugin_type, collection=collname))
+
+
+class PluginNotFound(Exception):
+ pass
+
+
+class DocCLI(CLI):
+ ''' displays information on modules installed in Ansible libraries.
+ It displays a terse listing of plugins and their short descriptions,
+ provides a printout of their DOCUMENTATION strings,
+ and it can create a short "snippet" which can be pasted into a playbook. '''
+
+ # default ignore list for detailed views
+ IGNORE = ('module', 'docuri', 'version_added', 'short_description', 'now_date', 'plainexamples', 'returndocs', 'collection')
+
+ # Warning: If you add more elements here, you also need to add it to the docsite build (in the
+ # ansible-community/antsibull repo)
+ _ITALIC = re.compile(r"\bI\(([^)]+)\)")
+ _BOLD = re.compile(r"\bB\(([^)]+)\)")
+ _MODULE = re.compile(r"\bM\(([^)]+)\)")
+ _LINK = re.compile(r"\bL\(([^)]+), *([^)]+)\)")
+ _URL = re.compile(r"\bU\(([^)]+)\)")
+ _REF = re.compile(r"\bR\(([^)]+), *([^)]+)\)")
+ _CONST = re.compile(r"\bC\(([^)]+)\)")
+ _RULER = re.compile(r"\bHORIZONTALLINE\b")
+
+ def __init__(self, args):
+
+ super(DocCLI, self).__init__(args)
+ self.plugin_list = set()
+
+ @classmethod
+ def tty_ify(cls, text):
+
+ t = cls._ITALIC.sub(r"`\1'", text) # I(word) => `word'
+ t = cls._BOLD.sub(r"*\1*", t) # B(word) => *word*
+ t = cls._MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word]
+ t = cls._URL.sub(r"\1", t) # U(word) => word
+ t = cls._LINK.sub(r"\1 <\2>", t) # L(word, url) => word <url>
+ t = cls._REF.sub(r"\1", t) # R(word, sphinx-ref) => word
+ t = cls._CONST.sub("`" + r"\1" + "'", t) # C(word) => `word'
+ t = cls._RULER.sub("\n{0}\n".format("-" * 13), t) # HORIZONTALLINE => -------
+
+ return t
+
+ def init_parser(self):
+
+ coll_filter = 'A supplied argument will be used for filtering, can be a namespace or full collection name.'
+
+ super(DocCLI, self).init_parser(
+ desc="plugin documentation tool",
+ epilog="See man pages for Ansible CLI options or website for tutorials https://docs.ansible.com"
+ )
+ opt_help.add_module_options(self.parser)
+ opt_help.add_basedir_options(self.parser)
+
+ self.parser.add_argument('args', nargs='*', help='Plugin', metavar='plugin')
+ self.parser.add_argument("-t", "--type", action="store", default='module', dest='type',
+ help='Choose which plugin type (defaults to "module"). '
+ 'Available plugin types are : {0}'.format(C.DOCUMENTABLE_PLUGINS),
+ choices=C.DOCUMENTABLE_PLUGINS)
+ self.parser.add_argument("-j", "--json", action="store_true", default=False, dest='json_format',
+ help='Change output into json format.')
+
+ exclusive = self.parser.add_mutually_exclusive_group()
+ exclusive.add_argument("-F", "--list_files", action="store_true", default=False, dest="list_files",
+ help='Show plugin names and their source files without summaries (implies --list). %s' % coll_filter)
+ exclusive.add_argument("-l", "--list", action="store_true", default=False, dest='list_dir',
+ help='List available plugins. %s' % coll_filter)
+ exclusive.add_argument("-s", "--snippet", action="store_true", default=False, dest='show_snippet',
+ help='Show playbook snippet for specified plugin(s)')
+ exclusive.add_argument("--metadata-dump", action="store_true", default=False, dest='dump',
+ help='**For internal testing only** Dump json metadata for all plugins.')
+
+ def post_process_args(self, options):
+ options = super(DocCLI, self).post_process_args(options)
+
+ display.verbosity = options.verbosity
+
+ return options
+
+ def display_plugin_list(self, results):
+
+ # format for user
+ displace = max(len(x) for x in self.plugin_list)
+ linelimit = display.columns - displace - 5
+ text = []
+
+ # format display per option
+ if context.CLIARGS['list_files']:
+ # list plugin file names
+ for plugin in results.keys():
+ filename = results[plugin]
+ text.append("%-*s %-*.*s" % (displace, plugin, linelimit, len(filename), filename))
+ else:
+ # list plugin names and short desc
+ deprecated = []
+ for plugin in results.keys():
+ desc = DocCLI.tty_ify(results[plugin])
+
+ if len(desc) > linelimit:
+ desc = desc[:linelimit] + '...'
+
+ if plugin.startswith('_'): # Handle deprecated # TODO: add mark for deprecated collection plugins
+ deprecated.append("%-*s %-*.*s" % (displace, plugin[1:], linelimit, len(desc), desc))
+ else:
+ text.append("%-*s %-*.*s" % (displace, plugin, linelimit, len(desc), desc))
+
+ if len(deprecated) > 0:
+ text.append("\nDEPRECATED:")
+ text.extend(deprecated)
+
+ # display results
+ DocCLI.pager("\n".join(text))
+
+ def run(self):
+
+ super(DocCLI, self).run()
+
+ plugin_type = context.CLIARGS['type']
+ do_json = context.CLIARGS['json_format']
+
+ if plugin_type in C.DOCUMENTABLE_PLUGINS:
+ loader = getattr(plugin_loader, '%s_loader' % plugin_type)
+ else:
+ raise AnsibleOptionsError("Unknown or undocumentable plugin type: %s" % plugin_type)
+
+ # add to plugin paths from command line
+ basedir = context.CLIARGS['basedir']
+ if basedir:
+ AnsibleCollectionConfig.playbook_paths = basedir
+ loader.add_directory(basedir, with_subdir=True)
+
+ if context.CLIARGS['module_path']:
+ for path in context.CLIARGS['module_path']:
+ if path:
+ loader.add_directory(path)
+
+ # save only top level paths for errors
+ search_paths = DocCLI.print_paths(loader)
+ loader._paths = None # reset so we can use subdirs below
+
+ # list plugins names or filepath for type, both options share most code
+ if context.CLIARGS['list_files'] or context.CLIARGS['list_dir']:
+
+ coll_filter = None
+ if len(context.CLIARGS['args']) == 1:
+ coll_filter = context.CLIARGS['args'][0]
+
+ if coll_filter in ('', None):
+ paths = loader._get_paths_with_context()
+ for path_context in paths:
+ self.plugin_list.update(
+ DocCLI.find_plugins(path_context.path, path_context.internal, plugin_type))
+
+ add_collection_plugins(self.plugin_list, plugin_type, coll_filter=coll_filter)
+
+ # get appropriate content depending on option
+ if context.CLIARGS['list_dir']:
+ results = self._get_plugin_list_descriptions(loader)
+ elif context.CLIARGS['list_files']:
+ results = self._get_plugin_list_filenames(loader)
+
+ if do_json:
+ jdump(results)
+ elif self.plugin_list:
+ self.display_plugin_list(results)
+ else:
+ display.warning("No plugins found.")
+ # dump plugin desc/data as JSON
+ elif context.CLIARGS['dump']:
+ plugin_data = {}
+ plugin_names = DocCLI.get_all_plugins_of_type(plugin_type)
+ for plugin_name in plugin_names:
+ plugin_info = DocCLI.get_plugin_metadata(plugin_type, plugin_name)
+ if plugin_info is not None:
+ plugin_data[plugin_name] = plugin_info
+
+ jdump(plugin_data)
+ else:
+ # display specific plugin docs
+ if len(context.CLIARGS['args']) == 0:
+ raise AnsibleOptionsError("Incorrect options passed")
+
+ # get the docs for plugins in the command line list
+ plugin_docs = {}
+ for plugin in context.CLIARGS['args']:
+ try:
+ doc, plainexamples, returndocs, metadata = DocCLI._get_plugin_doc(plugin, plugin_type, loader, search_paths)
+ except PluginNotFound:
+ display.warning("%s %s not found in:\n%s\n" % (plugin_type, plugin, search_paths))
+ continue
+ except Exception as e:
+ display.vvv(traceback.format_exc())
+ raise AnsibleError("%s %s missing documentation (or could not parse"
+ " documentation): %s\n" %
+ (plugin_type, plugin, to_native(e)))
+
+ if not doc:
+ # The doc section existed but was empty
+ continue
+
+ plugin_docs[plugin] = DocCLI._combine_plugin_doc(plugin, plugin_type, doc, plainexamples, returndocs, metadata)
+
+ if do_json:
+ jdump(plugin_docs)
+
+ else:
+ # Some changes to how plain text docs are formatted
+ text = []
+ for plugin, doc_data in plugin_docs.items():
+ textret = DocCLI.format_plugin_doc(plugin, plugin_type,
+ doc_data['doc'], doc_data['examples'],
+ doc_data['return'], doc_data['metadata'])
+ if textret:
+ text.append(textret)
+ else:
+ display.warning("No valid documentation was retrieved from '%s'" % plugin)
+
+ if text:
+ DocCLI.pager(''.join(text))
+
+ return 0
+
+ @staticmethod
+ def get_all_plugins_of_type(plugin_type):
+ loader = getattr(plugin_loader, '%s_loader' % plugin_type)
+ plugin_list = set()
+ paths = loader._get_paths_with_context()
+ for path_context in paths:
+ plugins_to_add = DocCLI.find_plugins(path_context.path, path_context.internal, plugin_type)
+ plugin_list.update(plugins_to_add)
+ return sorted(set(plugin_list))
+
+ @staticmethod
+ def get_plugin_metadata(plugin_type, plugin_name):
+ # if the plugin lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
+ loader = getattr(plugin_loader, '%s_loader' % plugin_type)
+ result = loader.find_plugin_with_context(plugin_name, mod_type='.py', ignore_deprecated=True, check_aliases=True)
+ if not result.resolved:
+ raise AnsibleError("unable to load {0} plugin named {1} ".format(plugin_type, plugin_name))
+ filename = result.plugin_resolved_path
+ collection_name = result.plugin_resolved_collection
+
+ try:
+ doc, __, __, __ = get_docstring(filename, fragment_loader, verbose=(context.CLIARGS['verbosity'] > 0),
+ collection_name=collection_name, is_module=(plugin_type == 'module'))
+ except Exception:
+ display.vvv(traceback.format_exc())
+ raise AnsibleError("%s %s at %s has a documentation formatting error or is missing documentation." % (plugin_type, plugin_name, filename))
+
+ if doc is None:
+ # Removed plugins don't have any documentation
+ return None
+
+ return dict(
+ name=plugin_name,
+ namespace=DocCLI.namespace_from_plugin_filepath(filename, plugin_name, loader.package_path),
+ description=doc.get('short_description', "UNKNOWN"),
+ version_added=doc.get('version_added', "UNKNOWN")
+ )
+
+ @staticmethod
+ def namespace_from_plugin_filepath(filepath, plugin_name, basedir):
+ if not basedir.endswith('/'):
+ basedir += '/'
+ rel_path = filepath.replace(basedir, '')
+ extension_free = os.path.splitext(rel_path)[0]
+ namespace_only = extension_free.rsplit(plugin_name, 1)[0].strip('/_')
+ clean_ns = namespace_only.replace('/', '.')
+ if clean_ns == '':
+ clean_ns = None
+
+ return clean_ns
+
+ @staticmethod
+ def _get_plugin_doc(plugin, plugin_type, loader, search_paths):
+ # if the plugin lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
+ result = loader.find_plugin_with_context(plugin, mod_type='.py', ignore_deprecated=True, check_aliases=True)
+ if not result.resolved:
+ raise PluginNotFound('%s was not found in %s' % (plugin, search_paths))
+ plugin_name = result.plugin_resolved_name
+ filename = result.plugin_resolved_path
+ collection_name = result.plugin_resolved_collection
+
+ doc, plainexamples, returndocs, metadata = get_docstring(
+ filename, fragment_loader, verbose=(context.CLIARGS['verbosity'] > 0),
+ collection_name=collection_name, is_module=(plugin_type == 'module'))
+
+ # If the plugin existed but did not have a DOCUMENTATION element and was not removed, it's an error
+ if doc is None:
+ raise ValueError('%s did not contain a DOCUMENTATION attribute' % plugin)
+
+ doc['filename'] = filename
+ doc['collection'] = collection_name
+ return doc, plainexamples, returndocs, metadata
+
+ @staticmethod
+ def _combine_plugin_doc(plugin, plugin_type, doc, plainexamples, returndocs, metadata):
+ # generate extra data
+ if plugin_type == 'module':
+ # is there corresponding action plugin?
+ if plugin in action_loader:
+ doc['has_action'] = True
+ else:
+ doc['has_action'] = False
+
+ # return everything as one dictionary
+ return {'doc': doc, 'examples': plainexamples, 'return': returndocs, 'metadata': metadata}
+
+ @staticmethod
+ def format_plugin_doc(plugin, plugin_type, doc, plainexamples, returndocs, metadata):
+ collection_name = doc['collection']
+
+ # TODO: do we really want this?
+ # add_collection_to_versions_and_dates(doc, '(unknown)', is_module=(plugin_type == 'module'))
+ # remove_current_collection_from_versions_and_dates(doc, collection_name, is_module=(plugin_type == 'module'))
+ # remove_current_collection_from_versions_and_dates(
+ # returndocs, collection_name, is_module=(plugin_type == 'module'), return_docs=True)
+
+ # assign from other sections
+ doc['plainexamples'] = plainexamples
+ doc['returndocs'] = returndocs
+ doc['metadata'] = metadata
+
+ if context.CLIARGS['show_snippet'] and plugin_type == 'module':
+ text = DocCLI.get_snippet_text(doc)
+ else:
+ try:
+ text = DocCLI.get_man_text(doc, collection_name, plugin_type)
+ except Exception as e:
+ raise AnsibleError("Unable to retrieve documentation from '%s' due to: %s" % (plugin, to_native(e)))
+
+ return text
+
+ @staticmethod
+ def find_plugins(path, internal, ptype, collection=None):
+ # if internal, collection could be set to `ansible.builtin`
+
+ display.vvvv("Searching %s for plugins" % path)
+
+ plugin_list = set()
+
+ if not os.path.exists(path):
+ display.vvvv("%s does not exist" % path)
+ return plugin_list
+
+ if not os.path.isdir(path):
+ display.vvvv("%s is not a directory" % path)
+ return plugin_list
+
+ bkey = ptype.upper()
+ for plugin in os.listdir(path):
+ display.vvvv("Found %s" % plugin)
+ full_path = '/'.join([path, plugin])
+
+ if plugin.startswith('.'):
+ continue
+ elif os.path.isdir(full_path):
+ continue
+ elif any(plugin.endswith(x) for x in C.BLACKLIST_EXTS):
+ continue
+ elif plugin.startswith('__'):
+ continue
+ elif plugin in C.IGNORE_FILES:
+ continue
+ elif plugin .startswith('_'):
+ if os.path.islink(full_path): # avoids aliases
+ continue
+
+ plugin = os.path.splitext(plugin)[0] # removes the extension
+ plugin = plugin.lstrip('_') # remove underscore from deprecated plugins
+
+ if plugin not in BLACKLIST.get(bkey, ()):
+
+ if collection:
+ plugin = '%s.%s' % (collection, plugin)
+
+ plugin_list.add(plugin)
+ display.vvvv("Added %s" % plugin)
+
+ return plugin_list
+
+ def _get_plugin_list_descriptions(self, loader):
+
+ descs = {}
+ plugins = self._get_plugin_list_filenames(loader)
+ for plugin in plugins.keys():
+
+ filename = plugins[plugin]
+
+ doc = None
+ try:
+ doc = read_docstub(filename)
+ except Exception:
+ display.warning("%s has a documentation formatting error" % plugin)
+ continue
+
+ if not doc or not isinstance(doc, dict):
+ desc = 'UNDOCUMENTED'
+ else:
+ desc = doc.get('short_description', 'INVALID SHORT DESCRIPTION').strip()
+
+ descs[plugin] = desc
+
+ return descs
+
+ def _get_plugin_list_filenames(self, loader):
+ pfiles = {}
+ for plugin in sorted(self.plugin_list):
+
+ try:
+ # if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
+ filename = loader.find_plugin(plugin, mod_type='.py', ignore_deprecated=True, check_aliases=True)
+
+ if filename is None:
+ continue
+ if filename.endswith(".ps1"):
+ continue
+ if os.path.isdir(filename):
+ continue
+
+ pfiles[plugin] = filename
+
+ except Exception as e:
+ raise AnsibleError("Failed reading docs at %s: %s" % (plugin, to_native(e)), orig_exc=e)
+
+ return pfiles
+
+ @staticmethod
+ def print_paths(finder):
+ ''' Returns a string suitable for printing of the search path '''
+
+ # Uses a list to get the order right
+ ret = []
+ for i in finder._get_paths(subdirs=False):
+ i = to_text(i, errors='surrogate_or_strict')
+ if i not in ret:
+ ret.append(i)
+ return os.pathsep.join(ret)
+
+ @staticmethod
+ def get_snippet_text(doc):
+
+ text = []
+ desc = DocCLI.tty_ify(doc['short_description'])
+ text.append("- name: %s" % (desc))
+ text.append(" %s:" % (doc['module']))
+ pad = 31
+ subdent = " " * pad
+ limit = display.columns - pad
+
+ for o in sorted(doc['options'].keys()):
+ opt = doc['options'][o]
+ if isinstance(opt['description'], string_types):
+ desc = DocCLI.tty_ify(opt['description'])
+ else:
+ desc = DocCLI.tty_ify(" ".join(opt['description']))
+
+ required = opt.get('required', False)
+ if not isinstance(required, bool):
+ raise("Incorrect value for 'Required', a boolean is needed.: %s" % required)
+ if required:
+ desc = "(required) %s" % desc
+ o = '%s:' % o
+ text.append(" %-20s # %s" % (o, textwrap.fill(desc, limit, subsequent_indent=subdent)))
+ text.append('')
+
+ return "\n".join(text)
+
+ @staticmethod
+ def _dump_yaml(struct, indent):
+ return DocCLI.tty_ify('\n'.join([indent + line for line in
+ yaml.dump(struct, default_flow_style=False,
+ Dumper=AnsibleDumper).split('\n')]))
+
+ @staticmethod
+ def add_fields(text, fields, limit, opt_indent, return_values=False, base_indent=''):
+
+ for o in sorted(fields):
+ # Create a copy so we don't modify the original (in case YAML anchors have been used)
+ opt = dict(fields[o])
+
+ required = opt.pop('required', False)
+ if not isinstance(required, bool):
+ raise AnsibleError("Incorrect value for 'Required', a boolean is needed.: %s" % required)
+ if required:
+ opt_leadin = "="
+ else:
+ opt_leadin = "-"
+
+ text.append("%s%s %s" % (base_indent, opt_leadin, o))
+
+ if 'description' not in opt:
+ raise AnsibleError("All (sub-)options and return values must have a 'description' field")
+ if isinstance(opt['description'], list):
+ for entry_idx, entry in enumerate(opt['description'], 1):
+ if not isinstance(entry, string_types):
+ raise AnsibleError("Expected string in description of %s at index %s, got %s" % (o, entry_idx, type(entry)))
+ text.append(textwrap.fill(DocCLI.tty_ify(entry), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
+ else:
+ if not isinstance(opt['description'], string_types):
+ raise AnsibleError("Expected string in description of %s, got %s" % (o, type(opt['description'])))
+ text.append(textwrap.fill(DocCLI.tty_ify(opt['description']), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
+ del opt['description']
+
+ aliases = ''
+ if 'aliases' in opt:
+ if len(opt['aliases']) > 0:
+ aliases = "(Aliases: " + ", ".join(to_text(i) for i in opt['aliases']) + ")"
+ del opt['aliases']
+ choices = ''
+ if 'choices' in opt:
+ if len(opt['choices']) > 0:
+ choices = "(Choices: " + ", ".join(to_text(i) for i in opt['choices']) + ")"
+ del opt['choices']
+ default = ''
+ if not return_values:
+ if 'default' in opt or not required:
+ default = "[Default: %s" % to_text(opt.pop('default', '(null)')) + "]"
+
+ text.append(textwrap.fill(DocCLI.tty_ify(aliases + choices + default), limit,
+ initial_indent=opt_indent, subsequent_indent=opt_indent))
+
+ suboptions = []
+ for subkey in ('options', 'suboptions', 'contains', 'spec'):
+ if subkey in opt:
+ suboptions.append((subkey, opt.pop(subkey)))
+
+ conf = {}
+ for config in ('env', 'ini', 'yaml', 'vars', 'keywords'):
+ if config in opt and opt[config]:
+ # Create a copy so we don't modify the original (in case YAML anchors have been used)
+ conf[config] = [dict(item) for item in opt.pop(config)]
+ for ignore in DocCLI.IGNORE:
+ for item in conf[config]:
+ if ignore in item:
+ del item[ignore]
+
+ if conf:
+ text.append(DocCLI._dump_yaml({'set_via': conf}, opt_indent))
+
+ for k in sorted(opt):
+ if k.startswith('_'):
+ continue
+ if isinstance(opt[k], string_types):
+ text.append('%s%s: %s' % (opt_indent, k,
+ textwrap.fill(DocCLI.tty_ify(opt[k]),
+ limit - (len(k) + 2),
+ subsequent_indent=opt_indent)))
+ elif isinstance(opt[k], (Sequence)) and all(isinstance(x, string_types) for x in opt[k]):
+ text.append(DocCLI.tty_ify('%s%s: %s' % (opt_indent, k, ', '.join(opt[k]))))
+ else:
+ text.append(DocCLI._dump_yaml({k: opt[k]}, opt_indent))
+
+ for subkey, subdata in suboptions:
+ text.append('')
+ text.append("%s%s:\n" % (opt_indent, subkey.upper()))
+ DocCLI.add_fields(text, subdata, limit, opt_indent + ' ', return_values, opt_indent)
+ if not suboptions:
+ text.append('')
+
+ @staticmethod
+ def get_man_text(doc, collection_name='', plugin_type=''):
+ # Create a copy so we don't modify the original
+ doc = dict(doc)
+
+ DocCLI.IGNORE = DocCLI.IGNORE + (context.CLIARGS['type'],)
+ opt_indent = " "
+ text = []
+ pad = display.columns * 0.20
+ limit = max(display.columns - int(pad), 70)
+
+ plugin_name = doc.get(context.CLIARGS['type'], doc.get('name')) or doc.get('plugin_type') or plugin_type
+ if collection_name:
+ plugin_name = '%s.%s' % (collection_name, plugin_name)
+
+ text.append("> %s (%s)\n" % (plugin_name.upper(), doc.pop('filename')))
+
+ if isinstance(doc['description'], list):
+ desc = " ".join(doc.pop('description'))
+ else:
+ desc = doc.pop('description')
+
+ text.append("%s\n" % textwrap.fill(DocCLI.tty_ify(desc), limit, initial_indent=opt_indent,
+ subsequent_indent=opt_indent))
+
+ if doc.get('deprecated', False):
+ text.append("DEPRECATED: \n")
+ if isinstance(doc['deprecated'], dict):
+ if 'removed_at_date' in doc['deprecated']:
+ text.append(
+ "\tReason: %(why)s\n\tWill be removed in a release after %(removed_at_date)s\n\tAlternatives: %(alternative)s" % doc.pop('deprecated')
+ )
+ else:
+ if 'version' in doc['deprecated'] and 'removed_in' not in doc['deprecated']:
+ doc['deprecated']['removed_in'] = doc['deprecated']['version']
+ text.append("\tReason: %(why)s\n\tWill be removed in: Ansible %(removed_in)s\n\tAlternatives: %(alternative)s" % doc.pop('deprecated'))
+ else:
+ text.append("%s" % doc.pop('deprecated'))
+ text.append("\n")
+
+ if doc.pop('has_action', False):
+ text.append(" * note: %s\n" % "This module has a corresponding action plugin.")
+
+ if doc.get('options', False):
+ text.append("OPTIONS (= is mandatory):\n")
+ DocCLI.add_fields(text, doc.pop('options'), limit, opt_indent)
+ text.append('')
+
+ if doc.get('notes', False):
+ text.append("NOTES:")
+ for note in doc['notes']:
+ text.append(textwrap.fill(DocCLI.tty_ify(note), limit - 6,
+ initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent))
+ text.append('')
+ text.append('')
+ del doc['notes']
+
+ if doc.get('seealso', False):
+ text.append("SEE ALSO:")
+ for item in doc['seealso']:
+ if 'module' in item:
+ text.append(textwrap.fill(DocCLI.tty_ify('Module %s' % item['module']),
+ limit - 6, initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent))
+ description = item.get('description', 'The official documentation on the %s module.' % item['module'])
+ text.append(textwrap.fill(DocCLI.tty_ify(description), limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' '))
+ text.append(textwrap.fill(DocCLI.tty_ify(get_versioned_doclink('modules/%s_module.html' % item['module'])),
+ limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent))
+ elif 'name' in item and 'link' in item and 'description' in item:
+ text.append(textwrap.fill(DocCLI.tty_ify(item['name']),
+ limit - 6, initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent))
+ text.append(textwrap.fill(DocCLI.tty_ify(item['description']),
+ limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' '))
+ text.append(textwrap.fill(DocCLI.tty_ify(item['link']),
+ limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' '))
+ elif 'ref' in item and 'description' in item:
+ text.append(textwrap.fill(DocCLI.tty_ify('Ansible documentation [%s]' % item['ref']),
+ limit - 6, initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent))
+ text.append(textwrap.fill(DocCLI.tty_ify(item['description']),
+ limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' '))
+ text.append(textwrap.fill(DocCLI.tty_ify(get_versioned_doclink('/#stq=%s&stp=1' % item['ref'])),
+ limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' '))
+
+ text.append('')
+ text.append('')
+ del doc['seealso']
+
+ if doc.get('requirements', False):
+ req = ", ".join(doc.pop('requirements'))
+ text.append("REQUIREMENTS:%s\n" % textwrap.fill(DocCLI.tty_ify(req), limit - 16, initial_indent=" ", subsequent_indent=opt_indent))
+
+ # Generic handler
+ for k in sorted(doc):
+ if k in DocCLI.IGNORE or not doc[k]:
+ continue
+ if isinstance(doc[k], string_types):
+ text.append('%s: %s' % (k.upper(), textwrap.fill(DocCLI.tty_ify(doc[k]), limit - (len(k) + 2), subsequent_indent=opt_indent)))
+ elif isinstance(doc[k], (list, tuple)):
+ text.append('%s: %s' % (k.upper(), ', '.join(doc[k])))
+ else:
+ # use empty indent since this affects the start of the yaml doc, not it's keys
+ text.append(DocCLI._dump_yaml({k.upper(): doc[k]}, ''))
+ del doc[k]
+ text.append('')
+
+ if doc.get('plainexamples', False):
+ text.append("EXAMPLES:")
+ text.append('')
+ if isinstance(doc['plainexamples'], string_types):
+ text.append(doc.pop('plainexamples').strip())
+ else:
+ text.append(yaml.dump(doc.pop('plainexamples'), indent=2, default_flow_style=False))
+ text.append('')
+ text.append('')
+
+ if doc.get('returndocs', False):
+ text.append("RETURN VALUES:")
+ DocCLI.add_fields(text, doc.pop('returndocs'), limit, opt_indent, return_values=True)
+
+ return "\n".join(text)
diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py
new file mode 100644
index 00000000..447fd128
--- /dev/null
+++ b/lib/ansible/cli/galaxy.py
@@ -0,0 +1,1492 @@
+# Copyright: (c) 2013, James Cammarata <jcammarata@ansible.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os.path
+import re
+import shutil
+import textwrap
+import time
+import yaml
+
+from yaml.error import YAMLError
+
+import ansible.constants as C
+from ansible import context
+from ansible.cli import CLI
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.errors import AnsibleError, AnsibleOptionsError
+from ansible.galaxy import Galaxy, get_collections_galaxy_meta_info
+from ansible.galaxy.api import GalaxyAPI
+from ansible.galaxy.collection import (
+ build_collection,
+ CollectionRequirement,
+ download_collections,
+ find_existing_collections,
+ install_collections,
+ publish_collection,
+ validate_collection_name,
+ validate_collection_path,
+ verify_collections
+)
+
+from ansible.galaxy.role import GalaxyRole
+from ansible.galaxy.token import BasicAuthToken, GalaxyToken, KeycloakToken, NoTokenSentinel
+from ansible.module_utils.ansible_release import __version__ as ansible_version
+from ansible.module_utils.common.collections import is_iterable
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils import six
+from ansible.parsing.dataloader import DataLoader
+from ansible.parsing.yaml.loader import AnsibleLoader
+from ansible.playbook.role.requirement import RoleRequirement
+from ansible.template import Templar
+from ansible.utils.display import Display
+from ansible.utils.plugin_docs import get_versioned_doclink
+
+display = Display()
+urlparse = six.moves.urllib.parse.urlparse
+
+
+def _display_header(path, h1, h2, w1=10, w2=7):
+ display.display('\n# {0}\n{1:{cwidth}} {2:{vwidth}}\n{3} {4}\n'.format(
+ path,
+ h1,
+ h2,
+ '-' * max([len(h1), w1]), # Make sure that the number of dashes is at least the width of the header
+ '-' * max([len(h2), w2]),
+ cwidth=w1,
+ vwidth=w2,
+ ))
+
+
+def _display_role(gr):
+ install_info = gr.install_info
+ version = None
+ if install_info:
+ version = install_info.get("version", None)
+ if not version:
+ version = "(unknown version)"
+ display.display("- %s, %s" % (gr.name, version))
+
+
+def _display_collection(collection, cwidth=10, vwidth=7, min_cwidth=10, min_vwidth=7):
+ display.display('{fqcn:{cwidth}} {version:{vwidth}}'.format(
+ fqcn=to_text(collection),
+ version=collection.latest_version,
+ cwidth=max(cwidth, min_cwidth), # Make sure the width isn't smaller than the header
+ vwidth=max(vwidth, min_vwidth)
+ ))
+
+
+def _get_collection_widths(collections):
+ if is_iterable(collections):
+ fqcn_set = set(to_text(c) for c in collections)
+ version_set = set(to_text(c.latest_version) for c in collections)
+ else:
+ fqcn_set = set([to_text(collections)])
+ version_set = set([collections.latest_version])
+
+ fqcn_length = len(max(fqcn_set, key=len))
+ version_length = len(max(version_set, key=len))
+
+ return fqcn_length, version_length
+
+
+class GalaxyCLI(CLI):
+ '''command to manage Ansible roles in shared repositories, the default of which is Ansible Galaxy *https://galaxy.ansible.com*.'''
+
+ SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url")
+
+ def __init__(self, args):
+ self._raw_args = args
+ self._implicit_role = False
+
+ if len(args) > 1:
+ # Inject role into sys.argv[1] as a backwards compatibility step
+ if args[1] not in ['-h', '--help', '--version'] and 'role' not in args and 'collection' not in args:
+ # TODO: Should we add a warning here and eventually deprecate the implicit role subcommand choice
+ # Remove this in Ansible 2.13 when we also remove -v as an option on the root parser for ansible-galaxy.
+ idx = 2 if args[1].startswith('-v') else 1
+ args.insert(idx, 'role')
+ self._implicit_role = True
+ # since argparse doesn't allow hidden subparsers, handle dead login arg from raw args after "role" normalization
+ if args[1:3] == ['role', 'login']:
+ display.error(
+ "The login command was removed in late 2020. An API key is now required to publish roles or collections "
+ "to Galaxy. The key can be found at https://galaxy.ansible.com/me/preferences, and passed to the "
+ "ansible-galaxy CLI via a file at {0} or (insecurely) via the `--token` "
+ "command-line argument.".format(to_text(C.GALAXY_TOKEN_PATH)))
+ exit(1)
+
+ self.api_servers = []
+ self.galaxy = None
+ super(GalaxyCLI, self).__init__(args)
+
+ def init_parser(self):
+ ''' create an options parser for bin/ansible '''
+
+ super(GalaxyCLI, self).init_parser(
+ desc="Perform various Role and Collection related operations.",
+ )
+
+ # Common arguments that apply to more than 1 action
+ common = opt_help.argparse.ArgumentParser(add_help=False)
+ common.add_argument('-s', '--server', dest='api_server', help='The Galaxy API server URL')
+ common.add_argument('--token', '--api-key', dest='api_key',
+ help='The Ansible Galaxy API key which can be found at '
+ 'https://galaxy.ansible.com/me/preferences.')
+ common.add_argument('-c', '--ignore-certs', action='store_true', dest='ignore_certs',
+ default=C.GALAXY_IGNORE_CERTS, help='Ignore SSL certificate validation errors.')
+ opt_help.add_verbosity_options(common)
+
+ force = opt_help.argparse.ArgumentParser(add_help=False)
+ force.add_argument('-f', '--force', dest='force', action='store_true', default=False,
+ help='Force overwriting an existing role or collection')
+
+ github = opt_help.argparse.ArgumentParser(add_help=False)
+ github.add_argument('github_user', help='GitHub username')
+ github.add_argument('github_repo', help='GitHub repository')
+
+ offline = opt_help.argparse.ArgumentParser(add_help=False)
+ offline.add_argument('--offline', dest='offline', default=False, action='store_true',
+ help="Don't query the galaxy API when creating roles")
+
+ default_roles_path = C.config.get_configuration_definition('DEFAULT_ROLES_PATH').get('default', '')
+ roles_path = opt_help.argparse.ArgumentParser(add_help=False)
+ roles_path.add_argument('-p', '--roles-path', dest='roles_path', type=opt_help.unfrack_path(pathsep=True),
+ default=C.DEFAULT_ROLES_PATH, action=opt_help.PrependListAction,
+ help='The path to the directory containing your roles. The default is the first '
+ 'writable one configured via DEFAULT_ROLES_PATH: %s ' % default_roles_path)
+
+ collections_path = opt_help.argparse.ArgumentParser(add_help=False)
+ collections_path.add_argument('-p', '--collection-path', dest='collections_path', type=opt_help.unfrack_path(pathsep=True),
+ default=C.COLLECTIONS_PATHS, action=opt_help.PrependListAction,
+ help="One or more directories to search for collections in addition "
+ "to the default COLLECTIONS_PATHS. Separate multiple paths "
+ "with '{0}'.".format(os.path.pathsep))
+
+ # Add sub parser for the Galaxy role type (role or collection)
+ type_parser = self.parser.add_subparsers(metavar='TYPE', dest='type')
+ type_parser.required = True
+
+ # Add sub parser for the Galaxy collection actions
+ collection = type_parser.add_parser('collection', help='Manage an Ansible Galaxy collection.')
+ collection_parser = collection.add_subparsers(metavar='COLLECTION_ACTION', dest='action')
+ collection_parser.required = True
+ self.add_download_options(collection_parser, parents=[common])
+ self.add_init_options(collection_parser, parents=[common, force])
+ self.add_build_options(collection_parser, parents=[common, force])
+ self.add_publish_options(collection_parser, parents=[common])
+ self.add_install_options(collection_parser, parents=[common, force])
+ self.add_list_options(collection_parser, parents=[common, collections_path])
+ self.add_verify_options(collection_parser, parents=[common, collections_path])
+
+ # Add sub parser for the Galaxy role actions
+ role = type_parser.add_parser('role', help='Manage an Ansible Galaxy role.')
+ role_parser = role.add_subparsers(metavar='ROLE_ACTION', dest='action')
+ role_parser.required = True
+ self.add_init_options(role_parser, parents=[common, force, offline])
+ self.add_remove_options(role_parser, parents=[common, roles_path])
+ self.add_delete_options(role_parser, parents=[common, github])
+ self.add_list_options(role_parser, parents=[common, roles_path])
+ self.add_search_options(role_parser, parents=[common])
+ self.add_import_options(role_parser, parents=[common, github])
+ self.add_setup_options(role_parser, parents=[common, roles_path])
+
+ self.add_info_options(role_parser, parents=[common, roles_path, offline])
+ self.add_install_options(role_parser, parents=[common, force, roles_path])
+
+ def add_download_options(self, parser, parents=None):
+ download_parser = parser.add_parser('download', parents=parents,
+ help='Download collections and their dependencies as a tarball for an '
+ 'offline install.')
+ download_parser.set_defaults(func=self.execute_download)
+
+ download_parser.add_argument('args', help='Collection(s)', metavar='collection', nargs='*')
+
+ download_parser.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
+ help="Don't download collection(s) listed as dependencies.")
+
+ download_parser.add_argument('-p', '--download-path', dest='download_path',
+ default='./collections',
+ help='The directory to download the collections to.')
+ download_parser.add_argument('-r', '--requirements-file', dest='requirements',
+ help='A file containing a list of collections to be downloaded.')
+ download_parser.add_argument('--pre', dest='allow_pre_release', action='store_true',
+ help='Include pre-release versions. Semantic versioning pre-releases are ignored by default')
+
+ def add_init_options(self, parser, parents=None):
+ galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role'
+
+ init_parser = parser.add_parser('init', parents=parents,
+ help='Initialize new {0} with the base structure of a '
+ '{0}.'.format(galaxy_type))
+ init_parser.set_defaults(func=self.execute_init)
+
+ init_parser.add_argument('--init-path', dest='init_path', default='./',
+ help='The path in which the skeleton {0} will be created. The default is the '
+ 'current working directory.'.format(galaxy_type))
+ init_parser.add_argument('--{0}-skeleton'.format(galaxy_type), dest='{0}_skeleton'.format(galaxy_type),
+ default=C.GALAXY_ROLE_SKELETON,
+ help='The path to a {0} skeleton that the new {0} should be based '
+ 'upon.'.format(galaxy_type))
+
+ obj_name_kwargs = {}
+ if galaxy_type == 'collection':
+ obj_name_kwargs['type'] = validate_collection_name
+ init_parser.add_argument('{0}_name'.format(galaxy_type), help='{0} name'.format(galaxy_type.capitalize()),
+ **obj_name_kwargs)
+
+ if galaxy_type == 'role':
+ init_parser.add_argument('--type', dest='role_type', action='store', default='default',
+ help="Initialize using an alternate role type. Valid types include: 'container', "
+ "'apb' and 'network'.")
+
+ def add_remove_options(self, parser, parents=None):
+ remove_parser = parser.add_parser('remove', parents=parents, help='Delete roles from roles_path.')
+ remove_parser.set_defaults(func=self.execute_remove)
+
+ remove_parser.add_argument('args', help='Role(s)', metavar='role', nargs='+')
+
+ def add_delete_options(self, parser, parents=None):
+ delete_parser = parser.add_parser('delete', parents=parents,
+ help='Removes the role from Galaxy. It does not remove or alter the actual '
+ 'GitHub repository.')
+ delete_parser.set_defaults(func=self.execute_delete)
+
+ def add_list_options(self, parser, parents=None):
+ galaxy_type = 'role'
+ if parser.metavar == 'COLLECTION_ACTION':
+ galaxy_type = 'collection'
+
+ list_parser = parser.add_parser('list', parents=parents,
+ help='Show the name and version of each {0} installed in the {0}s_path.'.format(galaxy_type))
+
+ list_parser.set_defaults(func=self.execute_list)
+
+ list_parser.add_argument(galaxy_type, help=galaxy_type.capitalize(), nargs='?', metavar=galaxy_type)
+
+ def add_search_options(self, parser, parents=None):
+ search_parser = parser.add_parser('search', parents=parents,
+ help='Search the Galaxy database by tags, platforms, author and multiple '
+ 'keywords.')
+ search_parser.set_defaults(func=self.execute_search)
+
+ search_parser.add_argument('--platforms', dest='platforms', help='list of OS platforms to filter by')
+ search_parser.add_argument('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by')
+ search_parser.add_argument('--author', dest='author', help='GitHub username')
+ search_parser.add_argument('args', help='Search terms', metavar='searchterm', nargs='*')
+
+ def add_import_options(self, parser, parents=None):
+ import_parser = parser.add_parser('import', parents=parents, help='Import a role into a galaxy server')
+ import_parser.set_defaults(func=self.execute_import)
+
+ import_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True,
+ help="Don't wait for import results.")
+ import_parser.add_argument('--branch', dest='reference',
+ help='The name of a branch to import. Defaults to the repository\'s default branch '
+ '(usually master)')
+ import_parser.add_argument('--role-name', dest='role_name',
+ help='The name the role should have, if different than the repo name')
+ import_parser.add_argument('--status', dest='check_status', action='store_true', default=False,
+ help='Check the status of the most recent import request for given github_'
+ 'user/github_repo.')
+
+ def add_setup_options(self, parser, parents=None):
+ setup_parser = parser.add_parser('setup', parents=parents,
+ help='Manage the integration between Galaxy and the given source.')
+ setup_parser.set_defaults(func=self.execute_setup)
+
+ setup_parser.add_argument('--remove', dest='remove_id', default=None,
+ help='Remove the integration matching the provided ID value. Use --list to see '
+ 'ID values.')
+ setup_parser.add_argument('--list', dest="setup_list", action='store_true', default=False,
+ help='List all of your integrations.')
+ setup_parser.add_argument('source', help='Source')
+ setup_parser.add_argument('github_user', help='GitHub username')
+ setup_parser.add_argument('github_repo', help='GitHub repository')
+ setup_parser.add_argument('secret', help='Secret')
+
+ def add_info_options(self, parser, parents=None):
+ info_parser = parser.add_parser('info', parents=parents, help='View more details about a specific role.')
+ info_parser.set_defaults(func=self.execute_info)
+
+ info_parser.add_argument('args', nargs='+', help='role', metavar='role_name[,version]')
+
+ def add_verify_options(self, parser, parents=None):
+ galaxy_type = 'collection'
+ verify_parser = parser.add_parser('verify', parents=parents, help='Compare checksums with the collection(s) '
+ 'found on the server and the installed copy. This does not verify dependencies.')
+ verify_parser.set_defaults(func=self.execute_verify)
+
+ verify_parser.add_argument('args', metavar='{0}_name'.format(galaxy_type), nargs='*', help='The collection(s) name or '
+ 'path/url to a tar.gz collection artifact. This is mutually exclusive with --requirements-file.')
+ verify_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
+ help='Ignore errors during verification and continue with the next specified collection.')
+ verify_parser.add_argument('-r', '--requirements-file', dest='requirements',
+ help='A file containing a list of collections to be verified.')
+
+ def add_install_options(self, parser, parents=None):
+ galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role'
+
+ args_kwargs = {}
+ if galaxy_type == 'collection':
+ args_kwargs['help'] = 'The collection(s) name or path/url to a tar.gz collection artifact. This is ' \
+ 'mutually exclusive with --requirements-file.'
+ ignore_errors_help = 'Ignore errors during installation and continue with the next specified ' \
+ 'collection. This will not ignore dependency conflict errors.'
+ else:
+ args_kwargs['help'] = 'Role name, URL or tar file'
+ ignore_errors_help = 'Ignore errors and continue with the next specified role.'
+
+ install_parser = parser.add_parser('install', parents=parents,
+ help='Install {0}(s) from file(s), URL(s) or Ansible '
+ 'Galaxy'.format(galaxy_type))
+ install_parser.set_defaults(func=self.execute_install)
+
+ install_parser.add_argument('args', metavar='{0}_name'.format(galaxy_type), nargs='*', **args_kwargs)
+ install_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
+ help=ignore_errors_help)
+
+ install_exclusive = install_parser.add_mutually_exclusive_group()
+ install_exclusive.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
+ help="Don't download {0}s listed as dependencies.".format(galaxy_type))
+ install_exclusive.add_argument('--force-with-deps', dest='force_with_deps', action='store_true', default=False,
+ help="Force overwriting an existing {0} and its "
+ "dependencies.".format(galaxy_type))
+
+ if galaxy_type == 'collection':
+ install_parser.add_argument('-p', '--collections-path', dest='collections_path',
+ default=self._get_default_collection_path(),
+ help='The path to the directory containing your collections.')
+ install_parser.add_argument('-r', '--requirements-file', dest='requirements',
+ help='A file containing a list of collections to be installed.')
+ install_parser.add_argument('--pre', dest='allow_pre_release', action='store_true',
+ help='Include pre-release versions. Semantic versioning pre-releases are ignored by default')
+ else:
+ install_parser.add_argument('-r', '--role-file', dest='requirements',
+ help='A file containing a list of roles to be installed.')
+ install_parser.add_argument('-g', '--keep-scm-meta', dest='keep_scm_meta', action='store_true',
+ default=False,
+ help='Use tar instead of the scm archive option when packaging the role.')
+
+ def add_build_options(self, parser, parents=None):
+ build_parser = parser.add_parser('build', parents=parents,
+ help='Build an Ansible collection artifact that can be publish to Ansible '
+ 'Galaxy.')
+ build_parser.set_defaults(func=self.execute_build)
+
+ build_parser.add_argument('args', metavar='collection', nargs='*', default=('.',),
+ help='Path to the collection(s) directory to build. This should be the directory '
+ 'that contains the galaxy.yml file. The default is the current working '
+ 'directory.')
+ build_parser.add_argument('--output-path', dest='output_path', default='./',
+ help='The path in which the collection is built to. The default is the current '
+ 'working directory.')
+
+ def add_publish_options(self, parser, parents=None):
+ publish_parser = parser.add_parser('publish', parents=parents,
+ help='Publish a collection artifact to Ansible Galaxy.')
+ publish_parser.set_defaults(func=self.execute_publish)
+
+ publish_parser.add_argument('args', metavar='collection_path',
+ help='The path to the collection tarball to publish.')
+ publish_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True,
+ help="Don't wait for import validation results.")
+ publish_parser.add_argument('--import-timeout', dest='import_timeout', type=int, default=0,
+ help="The time to wait for the collection import process to finish.")
+
+ def post_process_args(self, options):
+ options = super(GalaxyCLI, self).post_process_args(options)
+ display.verbosity = options.verbosity
+ return options
+
+ def run(self):
+
+ super(GalaxyCLI, self).run()
+
+ self.galaxy = Galaxy()
+
+ def server_config_def(section, key, required):
+ return {
+ 'description': 'The %s of the %s Galaxy server' % (key, section),
+ 'ini': [
+ {
+ 'section': 'galaxy_server.%s' % section,
+ 'key': key,
+ }
+ ],
+ 'env': [
+ {'name': 'ANSIBLE_GALAXY_SERVER_%s_%s' % (section.upper(), key.upper())},
+ ],
+ 'required': required,
+ }
+ server_def = [('url', True), ('username', False), ('password', False), ('token', False),
+ ('auth_url', False)]
+
+ validate_certs = not context.CLIARGS['ignore_certs']
+
+ config_servers = []
+
+ # Need to filter out empty strings or non truthy values as an empty server list env var is equal to [''].
+ server_list = [s for s in C.GALAXY_SERVER_LIST or [] if s]
+ for server_key in server_list:
+ # Config definitions are looked up dynamically based on the C.GALAXY_SERVER_LIST entry. We look up the
+ # section [galaxy_server.<server>] for the values url, username, password, and token.
+ config_dict = dict((k, server_config_def(server_key, k, req)) for k, req in server_def)
+ defs = AnsibleLoader(yaml.safe_dump(config_dict)).get_single_data()
+ C.config.initialize_plugin_configuration_definitions('galaxy_server', server_key, defs)
+
+ server_options = C.config.get_plugin_options('galaxy_server', server_key)
+ # auth_url is used to create the token, but not directly by GalaxyAPI, so
+ # it doesn't need to be passed as kwarg to GalaxyApi
+ auth_url = server_options.pop('auth_url', None)
+ token_val = server_options['token'] or NoTokenSentinel
+ username = server_options['username']
+
+ # default case if no auth info is provided.
+ server_options['token'] = None
+
+ if username:
+ server_options['token'] = BasicAuthToken(username,
+ server_options['password'])
+ else:
+ if token_val:
+ if auth_url:
+ server_options['token'] = KeycloakToken(access_token=token_val,
+ auth_url=auth_url,
+ validate_certs=validate_certs)
+ else:
+ # The galaxy v1 / github / django / 'Token'
+ server_options['token'] = GalaxyToken(token=token_val)
+
+ server_options['validate_certs'] = validate_certs
+
+ config_servers.append(GalaxyAPI(self.galaxy, server_key, **server_options))
+
+ cmd_server = context.CLIARGS['api_server']
+ cmd_token = GalaxyToken(token=context.CLIARGS['api_key'])
+ if cmd_server:
+ # Cmd args take precedence over the config entry but fist check if the arg was a name and use that config
+ # entry, otherwise create a new API entry for the server specified.
+ config_server = next((s for s in config_servers if s.name == cmd_server), None)
+ if config_server:
+ self.api_servers.append(config_server)
+ else:
+ self.api_servers.append(GalaxyAPI(self.galaxy, 'cmd_arg', cmd_server, token=cmd_token,
+ validate_certs=validate_certs))
+ else:
+ self.api_servers = config_servers
+
+ # Default to C.GALAXY_SERVER if no servers were defined
+ if len(self.api_servers) == 0:
+ self.api_servers.append(GalaxyAPI(self.galaxy, 'default', C.GALAXY_SERVER, token=cmd_token,
+ validate_certs=validate_certs))
+
+ context.CLIARGS['func']()
+
+ @property
+ def api(self):
+ return self.api_servers[0]
+
+ def _get_default_collection_path(self):
+ return C.COLLECTIONS_PATHS[0]
+
+ def _parse_requirements_file(self, requirements_file, allow_old_format=True):
+ """
+ Parses an Ansible requirement.yml file and returns all the roles and/or collections defined in it. There are 2
+ requirements file format:
+
+ # v1 (roles only)
+ - src: The source of the role, required if include is not set. Can be Galaxy role name, URL to a SCM repo or tarball.
+ name: Downloads the role to the specified name, defaults to Galaxy name from Galaxy or name of repo if src is a URL.
+ scm: If src is a URL, specify the SCM. Only git or hd are supported and defaults ot git.
+ version: The version of the role to download. Can also be tag, commit, or branch name and defaults to master.
+ include: Path to additional requirements.yml files.
+
+ # v2 (roles and collections)
+ ---
+ roles:
+ # Same as v1 format just under the roles key
+
+ collections:
+ - namespace.collection
+ - name: namespace.collection
+ version: version identifier, multiple identifiers are separated by ','
+ source: the URL or a predefined source name that relates to C.GALAXY_SERVER_LIST
+ type: git|file|url|galaxy
+
+ :param requirements_file: The path to the requirements file.
+ :param allow_old_format: Will fail if a v1 requirements file is found and this is set to False.
+ :return: a dict containing roles and collections to found in the requirements file.
+ """
+ requirements = {
+ 'roles': [],
+ 'collections': [],
+ }
+
+ b_requirements_file = to_bytes(requirements_file, errors='surrogate_or_strict')
+ if not os.path.exists(b_requirements_file):
+ raise AnsibleError("The requirements file '%s' does not exist." % to_native(requirements_file))
+
+ display.vvv("Reading requirement file at '%s'" % requirements_file)
+ with open(b_requirements_file, 'rb') as req_obj:
+ try:
+ file_requirements = yaml.safe_load(req_obj)
+ except YAMLError as err:
+ raise AnsibleError(
+ "Failed to parse the requirements yml at '%s' with the following error:\n%s"
+ % (to_native(requirements_file), to_native(err)))
+
+ if file_requirements is None:
+ raise AnsibleError("No requirements found in file '%s'" % to_native(requirements_file))
+
+ def parse_role_req(requirement):
+ if "include" not in requirement:
+ role = RoleRequirement.role_yaml_parse(requirement)
+ display.vvv("found role %s in yaml file" % to_text(role))
+ if "name" not in role and "src" not in role:
+ raise AnsibleError("Must specify name or src for role")
+ return [GalaxyRole(self.galaxy, self.api, **role)]
+ else:
+ b_include_path = to_bytes(requirement["include"], errors="surrogate_or_strict")
+ if not os.path.isfile(b_include_path):
+ raise AnsibleError("Failed to find include requirements file '%s' in '%s'"
+ % (to_native(b_include_path), to_native(requirements_file)))
+
+ with open(b_include_path, 'rb') as f_include:
+ try:
+ return [GalaxyRole(self.galaxy, self.api, **r) for r in
+ (RoleRequirement.role_yaml_parse(i) for i in yaml.safe_load(f_include))]
+ except Exception as e:
+ raise AnsibleError("Unable to load data from include requirements file: %s %s"
+ % (to_native(requirements_file), to_native(e)))
+
+ if isinstance(file_requirements, list):
+ # Older format that contains only roles
+ if not allow_old_format:
+ raise AnsibleError("Expecting requirements file to be a dict with the key 'collections' that contains "
+ "a list of collections to install")
+
+ for role_req in file_requirements:
+ requirements['roles'] += parse_role_req(role_req)
+
+ else:
+ # Newer format with a collections and/or roles key
+ extra_keys = set(file_requirements.keys()).difference(set(['roles', 'collections']))
+ if extra_keys:
+ raise AnsibleError("Expecting only 'roles' and/or 'collections' as base keys in the requirements "
+ "file. Found: %s" % (to_native(", ".join(extra_keys))))
+
+ for role_req in file_requirements.get('roles') or []:
+ requirements['roles'] += parse_role_req(role_req)
+
+ for collection_req in file_requirements.get('collections') or []:
+ if isinstance(collection_req, dict):
+ req_name = collection_req.get('name', None)
+ if req_name is None:
+ raise AnsibleError("Collections requirement entry should contain the key name.")
+
+ req_type = collection_req.get('type')
+ if req_type not in ('file', 'galaxy', 'git', 'url', None):
+ raise AnsibleError("The collection requirement entry key 'type' must be one of file, galaxy, git, or url.")
+
+ req_version = collection_req.get('version', '*')
+ req_source = collection_req.get('source', None)
+ if req_source:
+ # Try and match up the requirement source with our list of Galaxy API servers defined in the
+ # config, otherwise create a server with that URL without any auth.
+ req_source = next(iter([a for a in self.api_servers if req_source in [a.name, a.api_server]]),
+ GalaxyAPI(self.galaxy,
+ "explicit_requirement_%s" % req_name,
+ req_source,
+ validate_certs=not context.CLIARGS['ignore_certs']))
+
+ requirements['collections'].append((req_name, req_version, req_source, req_type))
+ else:
+ requirements['collections'].append((collection_req, '*', None, None))
+
+ return requirements
+
+ @staticmethod
+ def exit_without_ignore(rc=1):
+ """
+ Exits with the specified return code unless the
+ option --ignore-errors was specified
+ """
+ if not context.CLIARGS['ignore_errors']:
+ raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
+
+ @staticmethod
+ def _display_role_info(role_info):
+
+ text = [u"", u"Role: %s" % to_text(role_info['name'])]
+
+ # Get the top-level 'description' first, falling back to galaxy_info['galaxy_info']['description'].
+ galaxy_info = role_info.get('galaxy_info', {})
+ description = role_info.get('description', galaxy_info.get('description', ''))
+ text.append(u"\tdescription: %s" % description)
+
+ for k in sorted(role_info.keys()):
+
+ if k in GalaxyCLI.SKIP_INFO_KEYS:
+ continue
+
+ if isinstance(role_info[k], dict):
+ text.append(u"\t%s:" % (k))
+ for key in sorted(role_info[k].keys()):
+ if key in GalaxyCLI.SKIP_INFO_KEYS:
+ continue
+ text.append(u"\t\t%s: %s" % (key, role_info[k][key]))
+ else:
+ text.append(u"\t%s: %s" % (k, role_info[k]))
+
+ return u'\n'.join(text)
+
+ @staticmethod
+ def _resolve_path(path):
+ return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))
+
+ @staticmethod
+ def _get_skeleton_galaxy_yml(template_path, inject_data):
+ with open(to_bytes(template_path, errors='surrogate_or_strict'), 'rb') as template_obj:
+ meta_template = to_text(template_obj.read(), errors='surrogate_or_strict')
+
+ galaxy_meta = get_collections_galaxy_meta_info()
+
+ required_config = []
+ optional_config = []
+ for meta_entry in galaxy_meta:
+ config_list = required_config if meta_entry.get('required', False) else optional_config
+
+ value = inject_data.get(meta_entry['key'], None)
+ if not value:
+ meta_type = meta_entry.get('type', 'str')
+
+ if meta_type == 'str':
+ value = ''
+ elif meta_type == 'list':
+ value = []
+ elif meta_type == 'dict':
+ value = {}
+
+ meta_entry['value'] = value
+ config_list.append(meta_entry)
+
+ link_pattern = re.compile(r"L\(([^)]+),\s+([^)]+)\)")
+ const_pattern = re.compile(r"C\(([^)]+)\)")
+
+ def comment_ify(v):
+ if isinstance(v, list):
+ v = ". ".join([l.rstrip('.') for l in v])
+
+ v = link_pattern.sub(r"\1 <\2>", v)
+ v = const_pattern.sub(r"'\1'", v)
+
+ return textwrap.fill(v, width=117, initial_indent="# ", subsequent_indent="# ", break_on_hyphens=False)
+
+ loader = DataLoader()
+ templar = Templar(loader, variables={'required_config': required_config, 'optional_config': optional_config})
+ templar.environment.filters['comment_ify'] = comment_ify
+
+ meta_value = templar.template(meta_template)
+
+ return meta_value
+
+ def _require_one_of_collections_requirements(self, collections, requirements_file):
+ if collections and requirements_file:
+ raise AnsibleError("The positional collection_name arg and --requirements-file are mutually exclusive.")
+ elif not collections and not requirements_file:
+ raise AnsibleError("You must specify a collection name or a requirements file.")
+ elif requirements_file:
+ requirements_file = GalaxyCLI._resolve_path(requirements_file)
+ requirements = self._parse_requirements_file(requirements_file, allow_old_format=False)
+ else:
+ requirements = {'collections': [], 'roles': []}
+ for collection_input in collections:
+ requirement = None
+ if os.path.isfile(to_bytes(collection_input, errors='surrogate_or_strict')) or \
+ urlparse(collection_input).scheme.lower() in ['http', 'https'] or \
+ collection_input.startswith(('git+', 'git@')):
+ # Arg is a file path or URL to a collection
+ name = collection_input
+ else:
+ name, dummy, requirement = collection_input.partition(':')
+ requirements['collections'].append((name, requirement or '*', None, None))
+ return requirements
+
+ ############################
+ # execute actions
+ ############################
+
+ def execute_role(self):
+ """
+ Perform the action on an Ansible Galaxy role. Must be combined with a further action like delete/install/init
+ as listed below.
+ """
+ # To satisfy doc build
+ pass
+
+ def execute_collection(self):
+ """
+ Perform the action on an Ansible Galaxy collection. Must be combined with a further action like init/install as
+ listed below.
+ """
+ # To satisfy doc build
+ pass
+
+ def execute_build(self):
+ """
+ Build an Ansible Galaxy collection artifact that can be stored in a central repository like Ansible Galaxy.
+ By default, this command builds from the current working directory. You can optionally pass in the
+ collection input path (where the ``galaxy.yml`` file is).
+ """
+ force = context.CLIARGS['force']
+ output_path = GalaxyCLI._resolve_path(context.CLIARGS['output_path'])
+ b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
+
+ if not os.path.exists(b_output_path):
+ os.makedirs(b_output_path)
+ elif os.path.isfile(b_output_path):
+ raise AnsibleError("- the output collection directory %s is a file - aborting" % to_native(output_path))
+
+ for collection_path in context.CLIARGS['args']:
+ collection_path = GalaxyCLI._resolve_path(collection_path)
+ build_collection(collection_path, output_path, force)
+
+ def execute_download(self):
+ collections = context.CLIARGS['args']
+ no_deps = context.CLIARGS['no_deps']
+ download_path = context.CLIARGS['download_path']
+ ignore_certs = context.CLIARGS['ignore_certs']
+
+ requirements_file = context.CLIARGS['requirements']
+ if requirements_file:
+ requirements_file = GalaxyCLI._resolve_path(requirements_file)
+
+ requirements = self._require_one_of_collections_requirements(collections, requirements_file)['collections']
+
+ download_path = GalaxyCLI._resolve_path(download_path)
+ b_download_path = to_bytes(download_path, errors='surrogate_or_strict')
+ if not os.path.exists(b_download_path):
+ os.makedirs(b_download_path)
+
+ download_collections(requirements, download_path, self.api_servers, (not ignore_certs), no_deps,
+ context.CLIARGS['allow_pre_release'])
+
+ return 0
+
+ def execute_init(self):
+ """
+ Creates the skeleton framework of a role or collection that complies with the Galaxy metadata format.
+ Requires a role or collection name. The collection name must be in the format ``<namespace>.<collection>``.
+ """
+
+ galaxy_type = context.CLIARGS['type']
+ init_path = context.CLIARGS['init_path']
+ force = context.CLIARGS['force']
+ obj_skeleton = context.CLIARGS['{0}_skeleton'.format(galaxy_type)]
+
+ obj_name = context.CLIARGS['{0}_name'.format(galaxy_type)]
+
+ inject_data = dict(
+ description='your {0} description'.format(galaxy_type),
+ ansible_plugin_list_dir=get_versioned_doclink('plugins/plugins.html'),
+ )
+ if galaxy_type == 'role':
+ inject_data.update(dict(
+ author='your name',
+ company='your company (optional)',
+ license='license (GPL-2.0-or-later, MIT, etc)',
+ role_name=obj_name,
+ role_type=context.CLIARGS['role_type'],
+ issue_tracker_url='http://example.com/issue/tracker',
+ repository_url='http://example.com/repository',
+ documentation_url='http://docs.example.com',
+ homepage_url='http://example.com',
+ min_ansible_version=ansible_version[:3], # x.y
+ dependencies=[],
+ ))
+
+ obj_path = os.path.join(init_path, obj_name)
+ elif galaxy_type == 'collection':
+ namespace, collection_name = obj_name.split('.', 1)
+
+ inject_data.update(dict(
+ namespace=namespace,
+ collection_name=collection_name,
+ version='1.0.0',
+ readme='README.md',
+ authors=['your name <example@domain.com>'],
+ license=['GPL-2.0-or-later'],
+ repository='http://example.com/repository',
+ documentation='http://docs.example.com',
+ homepage='http://example.com',
+ issues='http://example.com/issue/tracker',
+ build_ignore=[],
+ ))
+
+ obj_path = os.path.join(init_path, namespace, collection_name)
+
+ b_obj_path = to_bytes(obj_path, errors='surrogate_or_strict')
+
+ if os.path.exists(b_obj_path):
+ if os.path.isfile(obj_path):
+ raise AnsibleError("- the path %s already exists, but is a file - aborting" % to_native(obj_path))
+ elif not force:
+ raise AnsibleError("- the directory %s already exists. "
+ "You can use --force to re-initialize this directory,\n"
+ "however it will reset any main.yml files that may have\n"
+ "been modified there already." % to_native(obj_path))
+
+ if obj_skeleton is not None:
+ own_skeleton = False
+ skeleton_ignore_expressions = C.GALAXY_ROLE_SKELETON_IGNORE
+ else:
+ own_skeleton = True
+ obj_skeleton = self.galaxy.default_role_skeleton_path
+ skeleton_ignore_expressions = ['^.*/.git_keep$']
+
+ obj_skeleton = os.path.expanduser(obj_skeleton)
+ skeleton_ignore_re = [re.compile(x) for x in skeleton_ignore_expressions]
+
+ if not os.path.exists(obj_skeleton):
+ raise AnsibleError("- the skeleton path '{0}' does not exist, cannot init {1}".format(
+ to_native(obj_skeleton), galaxy_type)
+ )
+
+ loader = DataLoader()
+ templar = Templar(loader, variables=inject_data)
+
+ # create role directory
+ if not os.path.exists(b_obj_path):
+ os.makedirs(b_obj_path)
+
+ for root, dirs, files in os.walk(obj_skeleton, topdown=True):
+ rel_root = os.path.relpath(root, obj_skeleton)
+ rel_dirs = rel_root.split(os.sep)
+ rel_root_dir = rel_dirs[0]
+ if galaxy_type == 'collection':
+ # A collection can contain templates in playbooks/*/templates and roles/*/templates
+ in_templates_dir = rel_root_dir in ['playbooks', 'roles'] and 'templates' in rel_dirs
+ else:
+ in_templates_dir = rel_root_dir == 'templates'
+
+ dirs = [d for d in dirs if not any(r.match(d) for r in skeleton_ignore_re)]
+
+ for f in files:
+ filename, ext = os.path.splitext(f)
+
+ if any(r.match(os.path.join(rel_root, f)) for r in skeleton_ignore_re):
+ continue
+
+ if galaxy_type == 'collection' and own_skeleton and rel_root == '.' and f == 'galaxy.yml.j2':
+ # Special use case for galaxy.yml.j2 in our own default collection skeleton. We build the options
+ # dynamically which requires special options to be set.
+
+ # The templated data's keys must match the key name but the inject data contains collection_name
+ # instead of name. We just make a copy and change the key back to name for this file.
+ template_data = inject_data.copy()
+ template_data['name'] = template_data.pop('collection_name')
+
+ meta_value = GalaxyCLI._get_skeleton_galaxy_yml(os.path.join(root, rel_root, f), template_data)
+ b_dest_file = to_bytes(os.path.join(obj_path, rel_root, filename), errors='surrogate_or_strict')
+ with open(b_dest_file, 'wb') as galaxy_obj:
+ galaxy_obj.write(to_bytes(meta_value, errors='surrogate_or_strict'))
+ elif ext == ".j2" and not in_templates_dir:
+ src_template = os.path.join(root, f)
+ dest_file = os.path.join(obj_path, rel_root, filename)
+ template_data = to_text(loader._get_file_contents(src_template)[0], errors='surrogate_or_strict')
+ b_rendered = to_bytes(templar.template(template_data), errors='surrogate_or_strict')
+ with open(dest_file, 'wb') as df:
+ df.write(b_rendered)
+ else:
+ f_rel_path = os.path.relpath(os.path.join(root, f), obj_skeleton)
+ shutil.copyfile(os.path.join(root, f), os.path.join(obj_path, f_rel_path))
+
+ for d in dirs:
+ b_dir_path = to_bytes(os.path.join(obj_path, rel_root, d), errors='surrogate_or_strict')
+ if not os.path.exists(b_dir_path):
+ os.makedirs(b_dir_path)
+
+ display.display("- %s %s was created successfully" % (galaxy_type.title(), obj_name))
+
+ def execute_info(self):
+ """
+ prints out detailed information about an installed role as well as info available from the galaxy API.
+ """
+
+ roles_path = context.CLIARGS['roles_path']
+
+ data = ''
+ for role in context.CLIARGS['args']:
+
+ role_info = {'path': roles_path}
+ gr = GalaxyRole(self.galaxy, self.api, role)
+
+ install_info = gr.install_info
+ if install_info:
+ if 'version' in install_info:
+ install_info['installed_version'] = install_info['version']
+ del install_info['version']
+ role_info.update(install_info)
+
+ if not context.CLIARGS['offline']:
+ remote_data = None
+ try:
+ remote_data = self.api.lookup_role_by_name(role, False)
+ except AnsibleError as e:
+ if e.http_code == 400 and 'Bad Request' in e.message:
+ # Role does not exist in Ansible Galaxy
+ data = u"- the role %s was not found" % role
+ break
+
+ raise AnsibleError("Unable to find info about '%s': %s" % (role, e))
+
+ if remote_data:
+ role_info.update(remote_data)
+
+ elif context.CLIARGS['offline'] and not gr._exists:
+ data = u"- the role %s was not found" % role
+ break
+
+ if gr.metadata:
+ role_info.update(gr.metadata)
+
+ req = RoleRequirement()
+ role_spec = req.role_yaml_parse({'role': role})
+ if role_spec:
+ role_info.update(role_spec)
+
+ data = self._display_role_info(role_info)
+
+ self.pager(data)
+
+ def execute_verify(self):
+
+ collections = context.CLIARGS['args']
+ search_paths = context.CLIARGS['collections_path']
+ ignore_certs = context.CLIARGS['ignore_certs']
+ ignore_errors = context.CLIARGS['ignore_errors']
+ requirements_file = context.CLIARGS['requirements']
+
+ requirements = self._require_one_of_collections_requirements(collections, requirements_file)['collections']
+
+ resolved_paths = [validate_collection_path(GalaxyCLI._resolve_path(path)) for path in search_paths]
+
+ verify_collections(requirements, resolved_paths, self.api_servers, (not ignore_certs), ignore_errors,
+ allow_pre_release=True)
+
+ return 0
+
+ def execute_install(self):
+ """
+ Install one or more roles(``ansible-galaxy role install``), or one or more collections(``ansible-galaxy collection install``).
+ You can pass in a list (roles or collections) or use the file
+ option listed below (these are mutually exclusive). If you pass in a list, it
+ can be a name (which will be downloaded via the galaxy API and github), or it can be a local tar archive file.
+ """
+ install_items = context.CLIARGS['args']
+ requirements_file = context.CLIARGS['requirements']
+ collection_path = None
+
+ if requirements_file:
+ requirements_file = GalaxyCLI._resolve_path(requirements_file)
+
+ two_type_warning = "The requirements file '%s' contains {0}s which will be ignored. To install these {0}s " \
+ "run 'ansible-galaxy {0} install -r' or to install both at the same time run " \
+ "'ansible-galaxy install -r' without a custom install path." % to_text(requirements_file)
+
+ # TODO: Would be nice to share the same behaviour with args and -r in collections and roles.
+ collection_requirements = []
+ role_requirements = []
+ if context.CLIARGS['type'] == 'collection':
+ collection_path = GalaxyCLI._resolve_path(context.CLIARGS['collections_path'])
+ requirements = self._require_one_of_collections_requirements(install_items, requirements_file)
+
+ collection_requirements = requirements['collections']
+ if requirements['roles']:
+ display.vvv(two_type_warning.format('role'))
+ else:
+ if not install_items and requirements_file is None:
+ raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
+
+ if requirements_file:
+ if not (requirements_file.endswith('.yaml') or requirements_file.endswith('.yml')):
+ raise AnsibleError("Invalid role requirements file, it must end with a .yml or .yaml extension")
+
+ requirements = self._parse_requirements_file(requirements_file)
+ role_requirements = requirements['roles']
+
+ # We can only install collections and roles at the same time if the type wasn't specified and the -p
+ # argument was not used. If collections are present in the requirements then at least display a msg.
+ galaxy_args = self._raw_args
+ if requirements['collections'] and (not self._implicit_role or '-p' in galaxy_args or
+ '--roles-path' in galaxy_args):
+
+ # We only want to display a warning if 'ansible-galaxy install -r ... -p ...'. Other cases the user
+ # was explicit about the type and shouldn't care that collections were skipped.
+ display_func = display.warning if self._implicit_role else display.vvv
+ display_func(two_type_warning.format('collection'))
+ else:
+ collection_path = self._get_default_collection_path()
+ collection_requirements = requirements['collections']
+ else:
+ # roles were specified directly, so we'll just go out grab them
+ # (and their dependencies, unless the user doesn't want us to).
+ for rname in context.CLIARGS['args']:
+ role = RoleRequirement.role_yaml_parse(rname.strip())
+ role_requirements.append(GalaxyRole(self.galaxy, self.api, **role))
+
+ if not role_requirements and not collection_requirements:
+ display.display("Skipping install, no requirements found")
+ return
+
+ if role_requirements:
+ display.display("Starting galaxy role install process")
+ self._execute_install_role(role_requirements)
+
+ if collection_requirements:
+ display.display("Starting galaxy collection install process")
+ # Collections can technically be installed even when ansible-galaxy is in role mode so we need to pass in
+ # the install path as context.CLIARGS['collections_path'] won't be set (default is calculated above).
+ self._execute_install_collection(collection_requirements, collection_path)
+
+ def _execute_install_collection(self, requirements, path):
+ force = context.CLIARGS['force']
+ ignore_certs = context.CLIARGS['ignore_certs']
+ ignore_errors = context.CLIARGS['ignore_errors']
+ no_deps = context.CLIARGS['no_deps']
+ force_with_deps = context.CLIARGS['force_with_deps']
+ allow_pre_release = context.CLIARGS['allow_pre_release'] if 'allow_pre_release' in context.CLIARGS else False
+
+ collections_path = C.COLLECTIONS_PATHS
+ if len([p for p in collections_path if p.startswith(path)]) == 0:
+ display.warning("The specified collections path '%s' is not part of the configured Ansible "
+ "collections paths '%s'. The installed collection won't be picked up in an Ansible "
+ "run." % (to_text(path), to_text(":".join(collections_path))))
+
+ output_path = validate_collection_path(path)
+ b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
+ if not os.path.exists(b_output_path):
+ os.makedirs(b_output_path)
+
+ install_collections(requirements, output_path, self.api_servers, (not ignore_certs), ignore_errors,
+ no_deps, force, force_with_deps, allow_pre_release=allow_pre_release)
+
+ return 0
+
+ def _execute_install_role(self, requirements):
+ role_file = context.CLIARGS['requirements']
+ no_deps = context.CLIARGS['no_deps']
+ force_deps = context.CLIARGS['force_with_deps']
+ force = context.CLIARGS['force'] or force_deps
+
+ for role in requirements:
+ # only process roles in roles files when names matches if given
+ if role_file and context.CLIARGS['args'] and role.name not in context.CLIARGS['args']:
+ display.vvv('Skipping role %s' % role.name)
+ continue
+
+ display.vvv('Processing role %s ' % role.name)
+
+ # query the galaxy API for the role data
+
+ if role.install_info is not None:
+ if role.install_info['version'] != role.version or force:
+ if force:
+ display.display('- changing role %s from %s to %s' %
+ (role.name, role.install_info['version'], role.version or "unspecified"))
+ role.remove()
+ else:
+ display.warning('- %s (%s) is already installed - use --force to change version to %s' %
+ (role.name, role.install_info['version'], role.version or "unspecified"))
+ continue
+ else:
+ if not force:
+ display.display('- %s is already installed, skipping.' % str(role))
+ continue
+
+ try:
+ installed = role.install()
+ except AnsibleError as e:
+ display.warning(u"- %s was NOT installed successfully: %s " % (role.name, to_text(e)))
+ self.exit_without_ignore()
+ continue
+
+ # install dependencies, if we want them
+ if not no_deps and installed:
+ if not role.metadata:
+ display.warning("Meta file %s is empty. Skipping dependencies." % role.path)
+ else:
+ role_dependencies = (role.metadata.get('dependencies') or []) + role.requirements
+ for dep in role_dependencies:
+ display.debug('Installing dep %s' % dep)
+ dep_req = RoleRequirement()
+ dep_info = dep_req.role_yaml_parse(dep)
+ dep_role = GalaxyRole(self.galaxy, self.api, **dep_info)
+ if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
+ # we know we can skip this, as it's not going to
+ # be found on galaxy.ansible.com
+ continue
+ if dep_role.install_info is None:
+ if dep_role not in requirements:
+ display.display('- adding dependency: %s' % to_text(dep_role))
+ requirements.append(dep_role)
+ else:
+ display.display('- dependency %s already pending installation.' % dep_role.name)
+ else:
+ if dep_role.install_info['version'] != dep_role.version:
+ if force_deps:
+ display.display('- changing dependent role %s from %s to %s' %
+ (dep_role.name, dep_role.install_info['version'], dep_role.version or "unspecified"))
+ dep_role.remove()
+ requirements.append(dep_role)
+ else:
+ display.warning('- dependency %s (%s) from role %s differs from already installed version (%s), skipping' %
+ (to_text(dep_role), dep_role.version, role.name, dep_role.install_info['version']))
+ else:
+ if force_deps:
+ requirements.append(dep_role)
+ else:
+ display.display('- dependency %s is already installed, skipping.' % dep_role.name)
+
+ if not installed:
+ display.warning("- %s was NOT installed successfully." % role.name)
+ self.exit_without_ignore()
+
+ return 0
+
+ def execute_remove(self):
+ """
+ removes the list of roles passed as arguments from the local system.
+ """
+
+ if not context.CLIARGS['args']:
+ raise AnsibleOptionsError('- you must specify at least one role to remove.')
+
+ for role_name in context.CLIARGS['args']:
+ role = GalaxyRole(self.galaxy, self.api, role_name)
+ try:
+ if role.remove():
+ display.display('- successfully removed %s' % role_name)
+ else:
+ display.display('- %s is not installed, skipping.' % role_name)
+ except Exception as e:
+ raise AnsibleError("Failed to remove role %s: %s" % (role_name, to_native(e)))
+
+ return 0
+
+ def execute_list(self):
+ """
+ List installed collections or roles
+ """
+
+ if context.CLIARGS['type'] == 'role':
+ self.execute_list_role()
+ elif context.CLIARGS['type'] == 'collection':
+ self.execute_list_collection()
+
+ def execute_list_role(self):
+ """
+ List all roles installed on the local system or a specific role
+ """
+
+ path_found = False
+ role_found = False
+ warnings = []
+ roles_search_paths = context.CLIARGS['roles_path']
+ role_name = context.CLIARGS['role']
+
+ for path in roles_search_paths:
+ role_path = GalaxyCLI._resolve_path(path)
+ if os.path.isdir(path):
+ path_found = True
+ else:
+ warnings.append("- the configured path {0} does not exist.".format(path))
+ continue
+
+ if role_name:
+ # show the requested role, if it exists
+ gr = GalaxyRole(self.galaxy, self.api, role_name, path=os.path.join(role_path, role_name))
+ if os.path.isdir(gr.path):
+ role_found = True
+ display.display('# %s' % os.path.dirname(gr.path))
+ _display_role(gr)
+ break
+ warnings.append("- the role %s was not found" % role_name)
+ else:
+ if not os.path.exists(role_path):
+ warnings.append("- the configured path %s does not exist." % role_path)
+ continue
+
+ if not os.path.isdir(role_path):
+ warnings.append("- the configured path %s, exists, but it is not a directory." % role_path)
+ continue
+
+ display.display('# %s' % role_path)
+ path_files = os.listdir(role_path)
+ for path_file in path_files:
+ gr = GalaxyRole(self.galaxy, self.api, path_file, path=path)
+ if gr.metadata:
+ _display_role(gr)
+
+ # Do not warn if the role was found in any of the search paths
+ if role_found and role_name:
+ warnings = []
+
+ for w in warnings:
+ display.warning(w)
+
+ if not path_found:
+ raise AnsibleOptionsError("- None of the provided paths were usable. Please specify a valid path with --{0}s-path".format(context.CLIARGS['type']))
+
+ return 0
+
+ def execute_list_collection(self):
+ """
+ List all collections installed on the local system
+ """
+
+ collections_search_paths = set(context.CLIARGS['collections_path'])
+ collection_name = context.CLIARGS['collection']
+ default_collections_path = C.config.get_configuration_definition('COLLECTIONS_PATHS').get('default')
+
+ warnings = []
+ path_found = False
+ collection_found = False
+ for path in collections_search_paths:
+ collection_path = GalaxyCLI._resolve_path(path)
+ if not os.path.exists(path):
+ if path in default_collections_path:
+ # don't warn for missing default paths
+ continue
+ warnings.append("- the configured path {0} does not exist.".format(collection_path))
+ continue
+
+ if not os.path.isdir(collection_path):
+ warnings.append("- the configured path {0}, exists, but it is not a directory.".format(collection_path))
+ continue
+
+ path_found = True
+
+ if collection_name:
+ # list a specific collection
+
+ validate_collection_name(collection_name)
+ namespace, collection = collection_name.split('.')
+
+ collection_path = validate_collection_path(collection_path)
+ b_collection_path = to_bytes(os.path.join(collection_path, namespace, collection), errors='surrogate_or_strict')
+
+ if not os.path.exists(b_collection_path):
+ warnings.append("- unable to find {0} in collection paths".format(collection_name))
+ continue
+
+ if not os.path.isdir(collection_path):
+ warnings.append("- the configured path {0}, exists, but it is not a directory.".format(collection_path))
+ continue
+
+ collection_found = True
+ collection = CollectionRequirement.from_path(b_collection_path, False, fallback_metadata=True)
+ fqcn_width, version_width = _get_collection_widths(collection)
+
+ _display_header(collection_path, 'Collection', 'Version', fqcn_width, version_width)
+ _display_collection(collection, fqcn_width, version_width)
+
+ else:
+ # list all collections
+ collection_path = validate_collection_path(path)
+ if os.path.isdir(collection_path):
+ display.vvv("Searching {0} for collections".format(collection_path))
+ collections = find_existing_collections(collection_path, fallback_metadata=True)
+ else:
+ # There was no 'ansible_collections/' directory in the path, so there
+ # or no collections here.
+ display.vvv("No 'ansible_collections' directory found at {0}".format(collection_path))
+ continue
+
+ if not collections:
+ display.vvv("No collections found at {0}".format(collection_path))
+ continue
+
+ # Display header
+ fqcn_width, version_width = _get_collection_widths(collections)
+ _display_header(collection_path, 'Collection', 'Version', fqcn_width, version_width)
+
+ # Sort collections by the namespace and name
+ collections.sort(key=to_text)
+ for collection in collections:
+ _display_collection(collection, fqcn_width, version_width)
+
+ # Do not warn if the specific collection was found in any of the search paths
+ if collection_found and collection_name:
+ warnings = []
+
+ for w in warnings:
+ display.warning(w)
+
+ if not path_found:
+ raise AnsibleOptionsError("- None of the provided paths were usable. Please specify a valid path with --{0}s-path".format(context.CLIARGS['type']))
+
+ return 0
+
+ def execute_publish(self):
+ """
+ Publish a collection into Ansible Galaxy. Requires the path to the collection tarball to publish.
+ """
+ collection_path = GalaxyCLI._resolve_path(context.CLIARGS['args'])
+ wait = context.CLIARGS['wait']
+ timeout = context.CLIARGS['import_timeout']
+
+ publish_collection(collection_path, self.api, wait, timeout)
+
+ def execute_search(self):
+ ''' searches for roles on the Ansible Galaxy server'''
+ page_size = 1000
+ search = None
+
+ if context.CLIARGS['args']:
+ search = '+'.join(context.CLIARGS['args'])
+
+ if not search and not context.CLIARGS['platforms'] and not context.CLIARGS['galaxy_tags'] and not context.CLIARGS['author']:
+ raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
+
+ response = self.api.search_roles(search, platforms=context.CLIARGS['platforms'],
+ tags=context.CLIARGS['galaxy_tags'], author=context.CLIARGS['author'], page_size=page_size)
+
+ if response['count'] == 0:
+ display.display("No roles match your search.", color=C.COLOR_ERROR)
+ return True
+
+ data = [u'']
+
+ if response['count'] > page_size:
+ data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size))
+ else:
+ data.append(u"Found %d roles matching your search:" % response['count'])
+
+ max_len = []
+ for role in response['results']:
+ max_len.append(len(role['username'] + '.' + role['name']))
+ name_len = max(max_len)
+ format_str = u" %%-%ds %%s" % name_len
+ data.append(u'')
+ data.append(format_str % (u"Name", u"Description"))
+ data.append(format_str % (u"----", u"-----------"))
+ for role in response['results']:
+ data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description']))
+
+ data = u'\n'.join(data)
+ self.pager(data)
+
+ return True
+
+ def execute_import(self):
+ """ used to import a role into Ansible Galaxy """
+
+ colors = {
+ 'INFO': 'normal',
+ 'WARNING': C.COLOR_WARN,
+ 'ERROR': C.COLOR_ERROR,
+ 'SUCCESS': C.COLOR_OK,
+ 'FAILED': C.COLOR_ERROR,
+ }
+
+ github_user = to_text(context.CLIARGS['github_user'], errors='surrogate_or_strict')
+ github_repo = to_text(context.CLIARGS['github_repo'], errors='surrogate_or_strict')
+
+ if context.CLIARGS['check_status']:
+ task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
+ else:
+ # Submit an import request
+ task = self.api.create_import_task(github_user, github_repo,
+ reference=context.CLIARGS['reference'],
+ role_name=context.CLIARGS['role_name'])
+
+ if len(task) > 1:
+ # found multiple roles associated with github_user/github_repo
+ display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user, github_repo),
+ color='yellow')
+ display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
+ for t in task:
+ display.display('%s.%s' % (t['summary_fields']['role']['namespace'], t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
+ display.display(u'\nTo properly namespace this role, remove each of the above and re-import %s/%s from scratch' % (github_user, github_repo),
+ color=C.COLOR_CHANGED)
+ return 0
+ # found a single role as expected
+ display.display("Successfully submitted import request %d" % task[0]['id'])
+ if not context.CLIARGS['wait']:
+ display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
+ display.display("Repo: %s/%s" % (task[0]['github_user'], task[0]['github_repo']))
+
+ if context.CLIARGS['check_status'] or context.CLIARGS['wait']:
+ # Get the status of the import
+ msg_list = []
+ finished = False
+ while not finished:
+ task = self.api.get_import_task(task_id=task[0]['id'])
+ for msg in task[0]['summary_fields']['task_messages']:
+ if msg['id'] not in msg_list:
+ display.display(msg['message_text'], color=colors[msg['message_type']])
+ msg_list.append(msg['id'])
+ if task[0]['state'] in ['SUCCESS', 'FAILED']:
+ finished = True
+ else:
+ time.sleep(10)
+
+ return 0
+
+ def execute_setup(self):
+ """ Setup an integration from Github or Travis for Ansible Galaxy roles"""
+
+ if context.CLIARGS['setup_list']:
+ # List existing integration secrets
+ secrets = self.api.list_secrets()
+ if len(secrets) == 0:
+ # None found
+ display.display("No integrations found.")
+ return 0
+ display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
+ display.display("---------- ---------- ----------", color=C.COLOR_OK)
+ for secret in secrets:
+ display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
+ secret['github_repo']), color=C.COLOR_OK)
+ return 0
+
+ if context.CLIARGS['remove_id']:
+ # Remove a secret
+ self.api.remove_secret(context.CLIARGS['remove_id'])
+ display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
+ return 0
+
+ source = context.CLIARGS['source']
+ github_user = context.CLIARGS['github_user']
+ github_repo = context.CLIARGS['github_repo']
+ secret = context.CLIARGS['secret']
+
+ resp = self.api.add_secret(source, github_user, github_repo, secret)
+ display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
+
+ return 0
+
+ def execute_delete(self):
+ """ Delete a role from Ansible Galaxy. """
+
+ github_user = context.CLIARGS['github_user']
+ github_repo = context.CLIARGS['github_repo']
+ resp = self.api.delete_role(github_user, github_repo)
+
+ if len(resp['deleted_roles']) > 1:
+ display.display("Deleted the following roles:")
+ display.display("ID User Name")
+ display.display("------ --------------- ----------")
+ for role in resp['deleted_roles']:
+ display.display("%-8s %-15s %s" % (role.id, role.namespace, role.name))
+
+ display.display(resp['status'])
+
+ return True
diff --git a/lib/ansible/cli/inventory.py b/lib/ansible/cli/inventory.py
new file mode 100644
index 00000000..9f423747
--- /dev/null
+++ b/lib/ansible/cli/inventory.py
@@ -0,0 +1,391 @@
+# Copyright: (c) 2017, Brian Coca <bcoca@ansible.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+
+import argparse
+from operator import attrgetter
+
+from ansible import constants as C
+from ansible import context
+from ansible.cli import CLI
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.errors import AnsibleError, AnsibleOptionsError
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.utils.vars import combine_vars
+from ansible.utils.display import Display
+from ansible.vars.plugins import get_vars_from_inventory_sources, get_vars_from_path
+
+display = Display()
+
+INTERNAL_VARS = frozenset(['ansible_diff_mode',
+ 'ansible_config_file',
+ 'ansible_facts',
+ 'ansible_forks',
+ 'ansible_inventory_sources',
+ 'ansible_limit',
+ 'ansible_playbook_python',
+ 'ansible_run_tags',
+ 'ansible_skip_tags',
+ 'ansible_verbosity',
+ 'ansible_version',
+ 'inventory_dir',
+ 'inventory_file',
+ 'inventory_hostname',
+ 'inventory_hostname_short',
+ 'groups',
+ 'group_names',
+ 'omit',
+ 'playbook_dir', ])
+
+
+class InventoryCLI(CLI):
+ ''' used to display or dump the configured inventory as Ansible sees it '''
+
+ ARGUMENTS = {'host': 'The name of a host to match in the inventory, relevant when using --list',
+ 'group': 'The name of a group in the inventory, relevant when using --graph', }
+
+ def __init__(self, args):
+
+ super(InventoryCLI, self).__init__(args)
+ self.vm = None
+ self.loader = None
+ self.inventory = None
+
+ def init_parser(self):
+ super(InventoryCLI, self).init_parser(
+ usage='usage: %prog [options] [host|group]',
+ epilog='Show Ansible inventory information, by default it uses the inventory script JSON format')
+
+ opt_help.add_inventory_options(self.parser)
+ opt_help.add_vault_options(self.parser)
+ opt_help.add_basedir_options(self.parser)
+
+ # remove unused default options
+ self.parser.add_argument('-l', '--limit', help=argparse.SUPPRESS, action=opt_help.UnrecognizedArgument, nargs='?')
+ self.parser.add_argument('--list-hosts', help=argparse.SUPPRESS, action=opt_help.UnrecognizedArgument)
+
+ self.parser.add_argument('args', metavar='host|group', nargs='?')
+
+ # Actions
+ action_group = self.parser.add_argument_group("Actions", "One of following must be used on invocation, ONLY ONE!")
+ action_group.add_argument("--list", action="store_true", default=False, dest='list', help='Output all hosts info, works as inventory script')
+ action_group.add_argument("--host", action="store", default=None, dest='host', help='Output specific host info, works as inventory script')
+ action_group.add_argument("--graph", action="store_true", default=False, dest='graph',
+ help='create inventory graph, if supplying pattern it must be a valid group name')
+ self.parser.add_argument_group(action_group)
+
+ # graph
+ self.parser.add_argument("-y", "--yaml", action="store_true", default=False, dest='yaml',
+ help='Use YAML format instead of default JSON, ignored for --graph')
+ self.parser.add_argument('--toml', action='store_true', default=False, dest='toml',
+ help='Use TOML format instead of default JSON, ignored for --graph')
+ self.parser.add_argument("--vars", action="store_true", default=False, dest='show_vars',
+ help='Add vars to graph display, ignored unless used with --graph')
+
+ # list
+ self.parser.add_argument("--export", action="store_true", default=C.INVENTORY_EXPORT, dest='export',
+ help="When doing an --list, represent in a way that is optimized for export,"
+ "not as an accurate representation of how Ansible has processed it")
+ self.parser.add_argument('--output', default=None, dest='output_file',
+ help="When doing --list, send the inventory to a file instead of to the screen")
+ # self.parser.add_argument("--ignore-vars-plugins", action="store_true", default=False, dest='ignore_vars_plugins',
+ # help="When doing an --list, skip vars data from vars plugins, by default, this would include group_vars/ and host_vars/")
+
+ def post_process_args(self, options):
+ options = super(InventoryCLI, self).post_process_args(options)
+
+ display.verbosity = options.verbosity
+ self.validate_conflicts(options)
+
+ # there can be only one! and, at least, one!
+ used = 0
+ for opt in (options.list, options.host, options.graph):
+ if opt:
+ used += 1
+ if used == 0:
+ raise AnsibleOptionsError("No action selected, at least one of --host, --graph or --list needs to be specified.")
+ elif used > 1:
+ raise AnsibleOptionsError("Conflicting options used, only one of --host, --graph or --list can be used at the same time.")
+
+ # set host pattern to default if not supplied
+ if options.args:
+ options.pattern = options.args
+ else:
+ options.pattern = 'all'
+
+ return options
+
+ def run(self):
+
+ super(InventoryCLI, self).run()
+
+ # Initialize needed objects
+ self.loader, self.inventory, self.vm = self._play_prereqs()
+
+ results = None
+ if context.CLIARGS['host']:
+ hosts = self.inventory.get_hosts(context.CLIARGS['host'])
+ if len(hosts) != 1:
+ raise AnsibleOptionsError("You must pass a single valid host to --host parameter")
+
+ myvars = self._get_host_variables(host=hosts[0])
+
+ # FIXME: should we template first?
+ results = self.dump(myvars)
+
+ elif context.CLIARGS['graph']:
+ results = self.inventory_graph()
+ elif context.CLIARGS['list']:
+ top = self._get_group('all')
+ if context.CLIARGS['yaml']:
+ results = self.yaml_inventory(top)
+ elif context.CLIARGS['toml']:
+ results = self.toml_inventory(top)
+ else:
+ results = self.json_inventory(top)
+ results = self.dump(results)
+
+ if results:
+ outfile = context.CLIARGS['output_file']
+ if outfile is None:
+ # FIXME: pager?
+ display.display(results)
+ else:
+ try:
+ with open(to_bytes(outfile), 'wt') as f:
+ f.write(results)
+ except (OSError, IOError) as e:
+ raise AnsibleError('Unable to write to destination file (%s): %s' % (to_native(outfile), to_native(e)))
+ sys.exit(0)
+
+ sys.exit(1)
+
+ @staticmethod
+ def dump(stuff):
+
+ if context.CLIARGS['yaml']:
+ import yaml
+ from ansible.parsing.yaml.dumper import AnsibleDumper
+ results = yaml.dump(stuff, Dumper=AnsibleDumper, default_flow_style=False)
+ elif context.CLIARGS['toml']:
+ from ansible.plugins.inventory.toml import toml_dumps, HAS_TOML
+ if not HAS_TOML:
+ raise AnsibleError(
+ 'The python "toml" library is required when using the TOML output format'
+ )
+ results = toml_dumps(stuff)
+ else:
+ import json
+ from ansible.parsing.ajson import AnsibleJSONEncoder
+ results = json.dumps(stuff, cls=AnsibleJSONEncoder, sort_keys=True, indent=4, preprocess_unsafe=True)
+
+ return results
+
+ def _get_group_variables(self, group):
+
+ # get info from inventory source
+ res = group.get_vars()
+
+ # Always load vars plugins
+ res = combine_vars(res, get_vars_from_inventory_sources(self.loader, self.inventory._sources, [group], 'all'))
+ if context.CLIARGS['basedir']:
+ res = combine_vars(res, get_vars_from_path(self.loader, context.CLIARGS['basedir'], [group], 'all'))
+
+ if group.priority != 1:
+ res['ansible_group_priority'] = group.priority
+
+ return self._remove_internal(res)
+
+ def _get_host_variables(self, host):
+
+ if context.CLIARGS['export']:
+ # only get vars defined directly host
+ hostvars = host.get_vars()
+
+ # Always load vars plugins
+ hostvars = combine_vars(hostvars, get_vars_from_inventory_sources(self.loader, self.inventory._sources, [host], 'all'))
+ if context.CLIARGS['basedir']:
+ hostvars = combine_vars(hostvars, get_vars_from_path(self.loader, context.CLIARGS['basedir'], [host], 'all'))
+ else:
+ # get all vars flattened by host, but skip magic hostvars
+ hostvars = self.vm.get_vars(host=host, include_hostvars=False, stage='all')
+
+ return self._remove_internal(hostvars)
+
+ def _get_group(self, gname):
+ group = self.inventory.groups.get(gname)
+ return group
+
+ @staticmethod
+ def _remove_internal(dump):
+
+ for internal in INTERNAL_VARS:
+ if internal in dump:
+ del dump[internal]
+
+ return dump
+
+ @staticmethod
+ def _remove_empty(dump):
+ # remove empty keys
+ for x in ('hosts', 'vars', 'children'):
+ if x in dump and not dump[x]:
+ del dump[x]
+
+ @staticmethod
+ def _show_vars(dump, depth):
+ result = []
+ for (name, val) in sorted(dump.items()):
+ result.append(InventoryCLI._graph_name('{%s = %s}' % (name, val), depth))
+ return result
+
+ @staticmethod
+ def _graph_name(name, depth=0):
+ if depth:
+ name = " |" * (depth) + "--%s" % name
+ return name
+
+ def _graph_group(self, group, depth=0):
+
+ result = [self._graph_name('@%s:' % group.name, depth)]
+ depth = depth + 1
+ for kid in sorted(group.child_groups, key=attrgetter('name')):
+ result.extend(self._graph_group(kid, depth))
+
+ if group.name != 'all':
+ for host in sorted(group.hosts, key=attrgetter('name')):
+ result.append(self._graph_name(host.name, depth))
+ if context.CLIARGS['show_vars']:
+ result.extend(self._show_vars(self._get_host_variables(host), depth + 1))
+
+ if context.CLIARGS['show_vars']:
+ result.extend(self._show_vars(self._get_group_variables(group), depth))
+
+ return result
+
+ def inventory_graph(self):
+
+ start_at = self._get_group(context.CLIARGS['pattern'])
+ if start_at:
+ return '\n'.join(self._graph_group(start_at))
+ else:
+ raise AnsibleOptionsError("Pattern must be valid group name when using --graph")
+
+ def json_inventory(self, top):
+
+ seen = set()
+
+ def format_group(group):
+ results = {}
+ results[group.name] = {}
+ if group.name != 'all':
+ results[group.name]['hosts'] = [h.name for h in sorted(group.hosts, key=attrgetter('name'))]
+ results[group.name]['children'] = []
+ for subgroup in sorted(group.child_groups, key=attrgetter('name')):
+ results[group.name]['children'].append(subgroup.name)
+ if subgroup.name not in seen:
+ results.update(format_group(subgroup))
+ seen.add(subgroup.name)
+ if context.CLIARGS['export']:
+ results[group.name]['vars'] = self._get_group_variables(group)
+
+ self._remove_empty(results[group.name])
+ if not results[group.name]:
+ del results[group.name]
+
+ return results
+
+ results = format_group(top)
+
+ # populate meta
+ results['_meta'] = {'hostvars': {}}
+ hosts = self.inventory.get_hosts()
+ for host in hosts:
+ hvars = self._get_host_variables(host)
+ if hvars:
+ results['_meta']['hostvars'][host.name] = hvars
+
+ return results
+
+ def yaml_inventory(self, top):
+
+ seen = []
+
+ def format_group(group):
+ results = {}
+
+ # initialize group + vars
+ results[group.name] = {}
+
+ # subgroups
+ results[group.name]['children'] = {}
+ for subgroup in sorted(group.child_groups, key=attrgetter('name')):
+ if subgroup.name != 'all':
+ results[group.name]['children'].update(format_group(subgroup))
+
+ # hosts for group
+ results[group.name]['hosts'] = {}
+ if group.name != 'all':
+ for h in sorted(group.hosts, key=attrgetter('name')):
+ myvars = {}
+ if h.name not in seen: # avoid defining host vars more than once
+ seen.append(h.name)
+ myvars = self._get_host_variables(host=h)
+ results[group.name]['hosts'][h.name] = myvars
+
+ if context.CLIARGS['export']:
+ gvars = self._get_group_variables(group)
+ if gvars:
+ results[group.name]['vars'] = gvars
+
+ self._remove_empty(results[group.name])
+
+ return results
+
+ return format_group(top)
+
+ def toml_inventory(self, top):
+ seen = set()
+ has_ungrouped = bool(next(g.hosts for g in top.child_groups if g.name == 'ungrouped'))
+
+ def format_group(group):
+ results = {}
+ results[group.name] = {}
+
+ results[group.name]['children'] = []
+ for subgroup in sorted(group.child_groups, key=attrgetter('name')):
+ if subgroup.name == 'ungrouped' and not has_ungrouped:
+ continue
+ if group.name != 'all':
+ results[group.name]['children'].append(subgroup.name)
+ results.update(format_group(subgroup))
+
+ if group.name != 'all':
+ for host in sorted(group.hosts, key=attrgetter('name')):
+ if host.name not in seen:
+ seen.add(host.name)
+ host_vars = self._get_host_variables(host=host)
+ else:
+ host_vars = {}
+ try:
+ results[group.name]['hosts'][host.name] = host_vars
+ except KeyError:
+ results[group.name]['hosts'] = {host.name: host_vars}
+
+ if context.CLIARGS['export']:
+ results[group.name]['vars'] = self._get_group_variables(group)
+
+ self._remove_empty(results[group.name])
+ if not results[group.name]:
+ del results[group.name]
+
+ return results
+
+ results = format_group(top)
+
+ return results
diff --git a/lib/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py
new file mode 100644
index 00000000..f8246b2a
--- /dev/null
+++ b/lib/ansible/cli/playbook.py
@@ -0,0 +1,203 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import stat
+
+from ansible import constants as C
+from ansible import context
+from ansible.cli import CLI
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.errors import AnsibleError
+from ansible.executor.playbook_executor import PlaybookExecutor
+from ansible.module_utils._text import to_bytes
+from ansible.playbook.block import Block
+from ansible.utils.display import Display
+from ansible.utils.collection_loader import AnsibleCollectionConfig
+from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path
+from ansible.plugins.loader import add_all_plugin_dirs
+
+
+display = Display()
+
+
+class PlaybookCLI(CLI):
+ ''' the tool to run *Ansible playbooks*, which are a configuration and multinode deployment system.
+ See the project home page (https://docs.ansible.com) for more information. '''
+
+ def init_parser(self):
+
+ # create parser for CLI options
+ super(PlaybookCLI, self).init_parser(
+ usage="%prog [options] playbook.yml [playbook2 ...]",
+ desc="Runs Ansible playbooks, executing the defined tasks on the targeted hosts.")
+
+ opt_help.add_connect_options(self.parser)
+ opt_help.add_meta_options(self.parser)
+ opt_help.add_runas_options(self.parser)
+ opt_help.add_subset_options(self.parser)
+ opt_help.add_check_options(self.parser)
+ opt_help.add_inventory_options(self.parser)
+ opt_help.add_runtask_options(self.parser)
+ opt_help.add_vault_options(self.parser)
+ opt_help.add_fork_options(self.parser)
+ opt_help.add_module_options(self.parser)
+
+ # ansible playbook specific opts
+ self.parser.add_argument('--list-tasks', dest='listtasks', action='store_true',
+ help="list all tasks that would be executed")
+ self.parser.add_argument('--list-tags', dest='listtags', action='store_true',
+ help="list all available tags")
+ self.parser.add_argument('--step', dest='step', action='store_true',
+ help="one-step-at-a-time: confirm each task before running")
+ self.parser.add_argument('--start-at-task', dest='start_at_task',
+ help="start the playbook at the task matching this name")
+ self.parser.add_argument('args', help='Playbook(s)', metavar='playbook', nargs='+')
+
+ def post_process_args(self, options):
+ options = super(PlaybookCLI, self).post_process_args(options)
+
+ display.verbosity = options.verbosity
+ self.validate_conflicts(options, runas_opts=True, fork_opts=True)
+
+ return options
+
+ def run(self):
+
+ super(PlaybookCLI, self).run()
+
+ # Note: slightly wrong, this is written so that implicit localhost
+ # manages passwords
+ sshpass = None
+ becomepass = None
+ passwords = {}
+
+ # initial error check, to make sure all specified playbooks are accessible
+ # before we start running anything through the playbook executor
+
+ b_playbook_dirs = []
+ for playbook in context.CLIARGS['args']:
+ if not os.path.exists(playbook):
+ raise AnsibleError("the playbook: %s could not be found" % playbook)
+ if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)):
+ raise AnsibleError("the playbook: %s does not appear to be a file" % playbook)
+
+ b_playbook_dir = os.path.dirname(os.path.abspath(to_bytes(playbook, errors='surrogate_or_strict')))
+ # load plugins from all playbooks in case they add callbacks/inventory/etc
+ add_all_plugin_dirs(b_playbook_dir)
+
+ b_playbook_dirs.append(b_playbook_dir)
+
+ AnsibleCollectionConfig.playbook_paths = b_playbook_dirs
+
+ playbook_collection = _get_collection_name_from_path(b_playbook_dirs[0])
+
+ if playbook_collection:
+ display.warning("running playbook inside collection {0}".format(playbook_collection))
+ AnsibleCollectionConfig.default_collection = playbook_collection
+
+ # don't deal with privilege escalation or passwords when we don't need to
+ if not (context.CLIARGS['listhosts'] or context.CLIARGS['listtasks'] or
+ context.CLIARGS['listtags'] or context.CLIARGS['syntax']):
+ (sshpass, becomepass) = self.ask_passwords()
+ passwords = {'conn_pass': sshpass, 'become_pass': becomepass}
+
+ # create base objects
+ loader, inventory, variable_manager = self._play_prereqs()
+
+ # (which is not returned in list_hosts()) is taken into account for
+ # warning if inventory is empty. But it can't be taken into account for
+ # checking if limit doesn't match any hosts. Instead we don't worry about
+ # limit if only implicit localhost was in inventory to start with.
+ #
+ # Fix this when we rewrite inventory by making localhost a real host (and thus show up in list_hosts())
+ CLI.get_host_list(inventory, context.CLIARGS['subset'])
+
+ # flush fact cache if requested
+ if context.CLIARGS['flush_cache']:
+ self._flush_cache(inventory, variable_manager)
+
+ # create the playbook executor, which manages running the plays via a task queue manager
+ pbex = PlaybookExecutor(playbooks=context.CLIARGS['args'], inventory=inventory,
+ variable_manager=variable_manager, loader=loader,
+ passwords=passwords)
+
+ results = pbex.run()
+
+ if isinstance(results, list):
+ for p in results:
+
+ display.display('\nplaybook: %s' % p['playbook'])
+ for idx, play in enumerate(p['plays']):
+ if play._included_path is not None:
+ loader.set_basedir(play._included_path)
+ else:
+ pb_dir = os.path.realpath(os.path.dirname(p['playbook']))
+ loader.set_basedir(pb_dir)
+
+ msg = "\n play #%d (%s): %s" % (idx + 1, ','.join(play.hosts), play.name)
+ mytags = set(play.tags)
+ msg += '\tTAGS: [%s]' % (','.join(mytags))
+
+ if context.CLIARGS['listhosts']:
+ playhosts = set(inventory.get_hosts(play.hosts))
+ msg += "\n pattern: %s\n hosts (%d):" % (play.hosts, len(playhosts))
+ for host in playhosts:
+ msg += "\n %s" % host
+
+ display.display(msg)
+
+ all_tags = set()
+ if context.CLIARGS['listtags'] or context.CLIARGS['listtasks']:
+ taskmsg = ''
+ if context.CLIARGS['listtasks']:
+ taskmsg = ' tasks:\n'
+
+ def _process_block(b):
+ taskmsg = ''
+ for task in b.block:
+ if isinstance(task, Block):
+ taskmsg += _process_block(task)
+ else:
+ if task.action in C._ACTION_META:
+ continue
+
+ all_tags.update(task.tags)
+ if context.CLIARGS['listtasks']:
+ cur_tags = list(mytags.union(set(task.tags)))
+ cur_tags.sort()
+ if task.name:
+ taskmsg += " %s" % task.get_name()
+ else:
+ taskmsg += " %s" % task.action
+ taskmsg += "\tTAGS: [%s]\n" % ', '.join(cur_tags)
+
+ return taskmsg
+
+ all_vars = variable_manager.get_vars(play=play)
+ for block in play.compile():
+ block = block.filter_tagged_tasks(all_vars)
+ if not block.has_tasks():
+ continue
+ taskmsg += _process_block(block)
+
+ if context.CLIARGS['listtags']:
+ cur_tags = list(mytags.union(all_tags))
+ cur_tags.sort()
+ taskmsg += " TASK TAGS: [%s]\n" % ', '.join(cur_tags)
+
+ display.display(taskmsg)
+
+ return 0
+ else:
+ return results
+
+ @staticmethod
+ def _flush_cache(inventory, variable_manager):
+ for host in inventory.list_hosts():
+ hostname = host.get_name()
+ variable_manager.clear_facts(hostname)
diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py
new file mode 100644
index 00000000..55b5a2b0
--- /dev/null
+++ b/lib/ansible/cli/pull.py
@@ -0,0 +1,336 @@
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import datetime
+import os
+import platform
+import random
+import shutil
+import socket
+import sys
+import time
+
+from ansible import constants as C
+from ansible import context
+from ansible.cli import CLI
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.errors import AnsibleOptionsError
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.plugins.loader import module_loader
+from ansible.utils.cmd_functions import run_cmd
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class PullCLI(CLI):
+ ''' Used to pull a remote copy of ansible on each managed node,
+ each set to run via cron and update playbook source via a source repository.
+ This inverts the default *push* architecture of ansible into a *pull* architecture,
+ which has near-limitless scaling potential.
+
+ The setup playbook can be tuned to change the cron frequency, logging locations, and parameters to ansible-pull.
+ This is useful both for extreme scale-out as well as periodic remediation.
+ Usage of the 'fetch' module to retrieve logs from ansible-pull runs would be an
+ excellent way to gather and analyze remote logs from ansible-pull.
+ '''
+
+ DEFAULT_REPO_TYPE = 'git'
+ DEFAULT_PLAYBOOK = 'local.yml'
+ REPO_CHOICES = ('git', 'subversion', 'hg', 'bzr')
+ PLAYBOOK_ERRORS = {
+ 1: 'File does not exist',
+ 2: 'File is not readable',
+ }
+ SUPPORTED_REPO_MODULES = ['git']
+ ARGUMENTS = {'playbook.yml': 'The name of one the YAML format files to run as an Ansible playbook.'
+ 'This can be a relative path within the checkout. By default, Ansible will'
+ "look for a playbook based on the host's fully-qualified domain name,"
+ 'on the host hostname and finally a playbook named *local.yml*.', }
+
+ SKIP_INVENTORY_DEFAULTS = True
+
+ @staticmethod
+ def _get_inv_cli():
+ inv_opts = ''
+ if context.CLIARGS.get('inventory', False):
+ for inv in context.CLIARGS['inventory']:
+ if isinstance(inv, list):
+ inv_opts += " -i '%s' " % ','.join(inv)
+ elif ',' in inv or os.path.exists(inv):
+ inv_opts += ' -i %s ' % inv
+
+ return inv_opts
+
+ def init_parser(self):
+ ''' create an options parser for bin/ansible '''
+
+ super(PullCLI, self).init_parser(
+ usage='%prog -U <repository> [options] [<playbook.yml>]',
+ desc="pulls playbooks from a VCS repo and executes them for the local host")
+
+ # Do not add check_options as there's a conflict with --checkout/-C
+ opt_help.add_connect_options(self.parser)
+ opt_help.add_vault_options(self.parser)
+ opt_help.add_runtask_options(self.parser)
+ opt_help.add_subset_options(self.parser)
+ opt_help.add_inventory_options(self.parser)
+ opt_help.add_module_options(self.parser)
+ opt_help.add_runas_prompt_options(self.parser)
+
+ self.parser.add_argument('args', help='Playbook(s)', metavar='playbook.yml', nargs='*')
+
+ # options unique to pull
+ self.parser.add_argument('--purge', default=False, action='store_true', help='purge checkout after playbook run')
+ self.parser.add_argument('-o', '--only-if-changed', dest='ifchanged', default=False, action='store_true',
+ help='only run the playbook if the repository has been updated')
+ self.parser.add_argument('-s', '--sleep', dest='sleep', default=None,
+ help='sleep for random interval (between 0 and n number of seconds) before starting. '
+ 'This is a useful way to disperse git requests')
+ self.parser.add_argument('-f', '--force', dest='force', default=False, action='store_true',
+ help='run the playbook even if the repository could not be updated')
+ self.parser.add_argument('-d', '--directory', dest='dest', default=None, help='directory to checkout repository to')
+ self.parser.add_argument('-U', '--url', dest='url', default=None, help='URL of the playbook repository')
+ self.parser.add_argument('--full', dest='fullclone', action='store_true', help='Do a full clone, instead of a shallow one.')
+ self.parser.add_argument('-C', '--checkout', dest='checkout',
+ help='branch/tag/commit to checkout. Defaults to behavior of repository module.')
+ self.parser.add_argument('--accept-host-key', default=False, dest='accept_host_key', action='store_true',
+ help='adds the hostkey for the repo url if not already added')
+ self.parser.add_argument('-m', '--module-name', dest='module_name', default=self.DEFAULT_REPO_TYPE,
+ help='Repository module name, which ansible will use to check out the repo. Choices are %s. Default is %s.'
+ % (self.REPO_CHOICES, self.DEFAULT_REPO_TYPE))
+ self.parser.add_argument('--verify-commit', dest='verify', default=False, action='store_true',
+ help='verify GPG signature of checked out commit, if it fails abort running the playbook. '
+ 'This needs the corresponding VCS module to support such an operation')
+ self.parser.add_argument('--clean', dest='clean', default=False, action='store_true',
+ help='modified files in the working repository will be discarded')
+ self.parser.add_argument('--track-subs', dest='tracksubs', default=False, action='store_true',
+ help='submodules will track the latest changes. This is equivalent to specifying the --remote flag to git submodule update')
+ # add a subset of the check_opts flag group manually, as the full set's
+ # shortcodes conflict with above --checkout/-C
+ self.parser.add_argument("--check", default=False, dest='check', action='store_true',
+ help="don't make any changes; instead, try to predict some of the changes that may occur")
+ self.parser.add_argument("--diff", default=C.DIFF_ALWAYS, dest='diff', action='store_true',
+ help="when changing (small) files and templates, show the differences in those files; works great with --check")
+
+ def post_process_args(self, options):
+ options = super(PullCLI, self).post_process_args(options)
+
+ if not options.dest:
+ hostname = socket.getfqdn()
+ # use a hostname dependent directory, in case of $HOME on nfs
+ options.dest = os.path.join('~/.ansible/pull', hostname)
+ options.dest = os.path.expandvars(os.path.expanduser(options.dest))
+
+ if os.path.exists(options.dest) and not os.path.isdir(options.dest):
+ raise AnsibleOptionsError("%s is not a valid or accessible directory." % options.dest)
+
+ if options.sleep:
+ try:
+ secs = random.randint(0, int(options.sleep))
+ options.sleep = secs
+ except ValueError:
+ raise AnsibleOptionsError("%s is not a number." % options.sleep)
+
+ if not options.url:
+ raise AnsibleOptionsError("URL for repository not specified, use -h for help")
+
+ if options.module_name not in self.SUPPORTED_REPO_MODULES:
+ raise AnsibleOptionsError("Unsupported repo module %s, choices are %s" % (options.module_name, ','.join(self.SUPPORTED_REPO_MODULES)))
+
+ display.verbosity = options.verbosity
+ self.validate_conflicts(options)
+
+ return options
+
+ def run(self):
+ ''' use Runner lib to do SSH things '''
+
+ super(PullCLI, self).run()
+
+ # log command line
+ now = datetime.datetime.now()
+ display.display(now.strftime("Starting Ansible Pull at %F %T"))
+ display.display(' '.join(sys.argv))
+
+ # Build Checkout command
+ # Now construct the ansible command
+ node = platform.node()
+ host = socket.getfqdn()
+ limit_opts = 'localhost,%s,127.0.0.1' % ','.join(set([host, node, host.split('.')[0], node.split('.')[0]]))
+ base_opts = '-c local '
+ if context.CLIARGS['verbosity'] > 0:
+ base_opts += ' -%s' % ''.join(["v" for x in range(0, context.CLIARGS['verbosity'])])
+
+ # Attempt to use the inventory passed in as an argument
+ # It might not yet have been downloaded so use localhost as default
+ inv_opts = self._get_inv_cli()
+ if not inv_opts:
+ inv_opts = " -i localhost, "
+ # avoid interpreter discovery since we already know which interpreter to use on localhost
+ inv_opts += '-e %s ' % shlex_quote('ansible_python_interpreter=%s' % sys.executable)
+
+ # SCM specific options
+ if context.CLIARGS['module_name'] == 'git':
+ repo_opts = "name=%s dest=%s" % (context.CLIARGS['url'], context.CLIARGS['dest'])
+ if context.CLIARGS['checkout']:
+ repo_opts += ' version=%s' % context.CLIARGS['checkout']
+
+ if context.CLIARGS['accept_host_key']:
+ repo_opts += ' accept_hostkey=yes'
+
+ if context.CLIARGS['private_key_file']:
+ repo_opts += ' key_file=%s' % context.CLIARGS['private_key_file']
+
+ if context.CLIARGS['verify']:
+ repo_opts += ' verify_commit=yes'
+
+ if context.CLIARGS['tracksubs']:
+ repo_opts += ' track_submodules=yes'
+
+ if not context.CLIARGS['fullclone']:
+ repo_opts += ' depth=1'
+ elif context.CLIARGS['module_name'] == 'subversion':
+ repo_opts = "repo=%s dest=%s" % (context.CLIARGS['url'], context.CLIARGS['dest'])
+ if context.CLIARGS['checkout']:
+ repo_opts += ' revision=%s' % context.CLIARGS['checkout']
+ if not context.CLIARGS['fullclone']:
+ repo_opts += ' export=yes'
+ elif context.CLIARGS['module_name'] == 'hg':
+ repo_opts = "repo=%s dest=%s" % (context.CLIARGS['url'], context.CLIARGS['dest'])
+ if context.CLIARGS['checkout']:
+ repo_opts += ' revision=%s' % context.CLIARGS['checkout']
+ elif context.CLIARGS['module_name'] == 'bzr':
+ repo_opts = "name=%s dest=%s" % (context.CLIARGS['url'], context.CLIARGS['dest'])
+ if context.CLIARGS['checkout']:
+ repo_opts += ' version=%s' % context.CLIARGS['checkout']
+ else:
+ raise AnsibleOptionsError('Unsupported (%s) SCM module for pull, choices are: %s'
+ % (context.CLIARGS['module_name'],
+ ','.join(self.REPO_CHOICES)))
+
+ # options common to all supported SCMS
+ if context.CLIARGS['clean']:
+ repo_opts += ' force=yes'
+
+ path = module_loader.find_plugin(context.CLIARGS['module_name'])
+ if path is None:
+ raise AnsibleOptionsError(("module '%s' not found.\n" % context.CLIARGS['module_name']))
+
+ bin_path = os.path.dirname(os.path.abspath(sys.argv[0]))
+ # hardcode local and inventory/host as this is just meant to fetch the repo
+ cmd = '%s/ansible %s %s -m %s -a "%s" all -l "%s"' % (bin_path, inv_opts, base_opts,
+ context.CLIARGS['module_name'],
+ repo_opts, limit_opts)
+ for ev in context.CLIARGS['extra_vars']:
+ cmd += ' -e %s' % shlex_quote(ev)
+
+ # Nap?
+ if context.CLIARGS['sleep']:
+ display.display("Sleeping for %d seconds..." % context.CLIARGS['sleep'])
+ time.sleep(context.CLIARGS['sleep'])
+
+ # RUN the Checkout command
+ display.debug("running ansible with VCS module to checkout repo")
+ display.vvvv('EXEC: %s' % cmd)
+ rc, b_out, b_err = run_cmd(cmd, live=True)
+
+ if rc != 0:
+ if context.CLIARGS['force']:
+ display.warning("Unable to update repository. Continuing with (forced) run of playbook.")
+ else:
+ return rc
+ elif context.CLIARGS['ifchanged'] and b'"changed": true' not in b_out:
+ display.display("Repository has not changed, quitting.")
+ return 0
+
+ playbook = self.select_playbook(context.CLIARGS['dest'])
+ if playbook is None:
+ raise AnsibleOptionsError("Could not find a playbook to run.")
+
+ # Build playbook command
+ cmd = '%s/ansible-playbook %s %s' % (bin_path, base_opts, playbook)
+ if context.CLIARGS['vault_password_files']:
+ for vault_password_file in context.CLIARGS['vault_password_files']:
+ cmd += " --vault-password-file=%s" % vault_password_file
+ if context.CLIARGS['vault_ids']:
+ for vault_id in context.CLIARGS['vault_ids']:
+ cmd += " --vault-id=%s" % vault_id
+
+ for ev in context.CLIARGS['extra_vars']:
+ cmd += ' -e %s' % shlex_quote(ev)
+ if context.CLIARGS['become_ask_pass']:
+ cmd += ' --ask-become-pass'
+ if context.CLIARGS['skip_tags']:
+ cmd += ' --skip-tags "%s"' % to_native(u','.join(context.CLIARGS['skip_tags']))
+ if context.CLIARGS['tags']:
+ cmd += ' -t "%s"' % to_native(u','.join(context.CLIARGS['tags']))
+ if context.CLIARGS['subset']:
+ cmd += ' -l "%s"' % context.CLIARGS['subset']
+ else:
+ cmd += ' -l "%s"' % limit_opts
+ if context.CLIARGS['check']:
+ cmd += ' -C'
+ if context.CLIARGS['diff']:
+ cmd += ' -D'
+
+ os.chdir(context.CLIARGS['dest'])
+
+ # redo inventory options as new files might exist now
+ inv_opts = self._get_inv_cli()
+ if inv_opts:
+ cmd += inv_opts
+
+ # RUN THE PLAYBOOK COMMAND
+ display.debug("running ansible-playbook to do actual work")
+ display.debug('EXEC: %s' % cmd)
+ rc, b_out, b_err = run_cmd(cmd, live=True)
+
+ if context.CLIARGS['purge']:
+ os.chdir('/')
+ try:
+ shutil.rmtree(context.CLIARGS['dest'])
+ except Exception as e:
+ display.error(u"Failed to remove %s: %s" % (context.CLIARGS['dest'], to_text(e)))
+
+ return rc
+
+ @staticmethod
+ def try_playbook(path):
+ if not os.path.exists(path):
+ return 1
+ if not os.access(path, os.R_OK):
+ return 2
+ return 0
+
+ @staticmethod
+ def select_playbook(path):
+ playbook = None
+ if context.CLIARGS['args'] and context.CLIARGS['args'][0] is not None:
+ playbook = os.path.join(path, context.CLIARGS['args'][0])
+ rc = PullCLI.try_playbook(playbook)
+ if rc != 0:
+ display.warning("%s: %s" % (playbook, PullCLI.PLAYBOOK_ERRORS[rc]))
+ return None
+ return playbook
+ else:
+ fqdn = socket.getfqdn()
+ hostpb = os.path.join(path, fqdn + '.yml')
+ shorthostpb = os.path.join(path, fqdn.split('.')[0] + '.yml')
+ localpb = os.path.join(path, PullCLI.DEFAULT_PLAYBOOK)
+ errors = []
+ for pb in [hostpb, shorthostpb, localpb]:
+ rc = PullCLI.try_playbook(pb)
+ if rc == 0:
+ playbook = pb
+ break
+ else:
+ errors.append("%s: %s" % (pb, PullCLI.PLAYBOOK_ERRORS[rc]))
+ if playbook is None:
+ display.warning("\n".join(errors))
+ return playbook
diff --git a/lib/ansible/cli/scripts/__init__.py b/lib/ansible/cli/scripts/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/lib/ansible/cli/scripts/__init__.py
diff --git a/lib/ansible/cli/scripts/ansible_cli_stub.py b/lib/ansible/cli/scripts/ansible_cli_stub.py
new file mode 100755
index 00000000..2ede010e
--- /dev/null
+++ b/lib/ansible/cli/scripts/ansible_cli_stub.py
@@ -0,0 +1,165 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# PYTHON_ARGCOMPLETE_OK
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+__requires__ = ['ansible_base']
+
+
+import errno
+import os
+import shutil
+import sys
+import traceback
+
+from ansible import context
+from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError
+from ansible.module_utils._text import to_text
+
+
+# Used for determining if the system is running a new enough python version
+# and should only restrict on our documented minimum versions
+_PY3_MIN = sys.version_info[:2] >= (3, 5)
+_PY2_MIN = (2, 6) <= sys.version_info[:2] < (3,)
+_PY_MIN = _PY3_MIN or _PY2_MIN
+if not _PY_MIN:
+ raise SystemExit('ERROR: Ansible requires a minimum of Python2 version 2.6 or Python3 version 3.5. Current version: %s' % ''.join(sys.version.splitlines()))
+
+
+class LastResort(object):
+ # OUTPUT OF LAST RESORT
+ def display(self, msg, log_only=None):
+ print(msg, file=sys.stderr)
+
+ def error(self, msg, wrap_text=None):
+ print(msg, file=sys.stderr)
+
+
+if __name__ == '__main__':
+
+ display = LastResort()
+
+ try: # bad ANSIBLE_CONFIG or config options can force ugly stacktrace
+ import ansible.constants as C
+ from ansible.utils.display import Display
+ except AnsibleOptionsError as e:
+ display.error(to_text(e), wrap_text=False)
+ sys.exit(5)
+
+ cli = None
+ me = os.path.basename(sys.argv[0])
+
+ try:
+ display = Display()
+ display.debug("starting run")
+
+ sub = None
+ target = me.split('-')
+ if target[-1][0].isdigit():
+ # Remove any version or python version info as downstreams
+ # sometimes add that
+ target = target[:-1]
+
+ if len(target) > 1:
+ sub = target[1]
+ myclass = "%sCLI" % sub.capitalize()
+ elif target[0] == 'ansible':
+ sub = 'adhoc'
+ myclass = 'AdHocCLI'
+ else:
+ raise AnsibleError("Unknown Ansible alias: %s" % me)
+
+ try:
+ mycli = getattr(__import__("ansible.cli.%s" % sub, fromlist=[myclass]), myclass)
+ except ImportError as e:
+ # ImportError members have changed in py3
+ if 'msg' in dir(e):
+ msg = e.msg
+ else:
+ msg = e.message
+ if msg.endswith(' %s' % sub):
+ raise AnsibleError("Ansible sub-program not implemented: %s" % me)
+ else:
+ raise
+
+ b_ansible_dir = os.path.expanduser(os.path.expandvars(b"~/.ansible"))
+ try:
+ os.mkdir(b_ansible_dir, 0o700)
+ except OSError as exc:
+ if exc.errno != errno.EEXIST:
+ display.warning("Failed to create the directory '%s': %s"
+ % (to_text(b_ansible_dir, errors='surrogate_or_replace'),
+ to_text(exc, errors='surrogate_or_replace')))
+ else:
+ display.debug("Created the '%s' directory" % to_text(b_ansible_dir, errors='surrogate_or_replace'))
+
+ try:
+ args = [to_text(a, errors='surrogate_or_strict') for a in sys.argv]
+ except UnicodeError:
+ display.error('Command line args are not in utf-8, unable to continue. Ansible currently only understands utf-8')
+ display.display(u"The full traceback was:\n\n%s" % to_text(traceback.format_exc()))
+ exit_code = 6
+ else:
+ cli = mycli(args)
+ exit_code = cli.run()
+
+ except AnsibleOptionsError as e:
+ cli.parser.print_help()
+ display.error(to_text(e), wrap_text=False)
+ exit_code = 5
+ except AnsibleParserError as e:
+ display.error(to_text(e), wrap_text=False)
+ exit_code = 4
+# TQM takes care of these, but leaving comment to reserve the exit codes
+# except AnsibleHostUnreachable as e:
+# display.error(str(e))
+# exit_code = 3
+# except AnsibleHostFailed as e:
+# display.error(str(e))
+# exit_code = 2
+ except AnsibleError as e:
+ display.error(to_text(e), wrap_text=False)
+ exit_code = 1
+ except KeyboardInterrupt:
+ display.error("User interrupted execution")
+ exit_code = 99
+ except Exception as e:
+ if C.DEFAULT_DEBUG:
+ # Show raw stacktraces in debug mode, It also allow pdb to
+ # enter post mortem mode.
+ raise
+ have_cli_options = bool(context.CLIARGS)
+ display.error("Unexpected Exception, this is probably a bug: %s" % to_text(e), wrap_text=False)
+ if not have_cli_options or have_cli_options and context.CLIARGS['verbosity'] > 2:
+ log_only = False
+ if hasattr(e, 'orig_exc'):
+ display.vvv('\nexception type: %s' % to_text(type(e.orig_exc)))
+ why = to_text(e.orig_exc)
+ if to_text(e) != why:
+ display.vvv('\noriginal msg: %s' % why)
+ else:
+ display.display("to see the full traceback, use -vvv")
+ log_only = True
+ display.display(u"the full traceback was:\n\n%s" % to_text(traceback.format_exc()), log_only=log_only)
+ exit_code = 250
+
+ sys.exit(exit_code)
diff --git a/lib/ansible/cli/scripts/ansible_connection_cli_stub.py b/lib/ansible/cli/scripts/ansible_connection_cli_stub.py
new file mode 100755
index 00000000..d701f156
--- /dev/null
+++ b/lib/ansible/cli/scripts/ansible_connection_cli_stub.py
@@ -0,0 +1,342 @@
+#!/usr/bin/env python
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+__requires__ = ['ansible_base']
+
+
+import fcntl
+import hashlib
+import os
+import signal
+import socket
+import sys
+import time
+import traceback
+import errno
+import json
+
+from contextlib import contextmanager
+
+from ansible import constants as C
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.module_utils.six import PY3
+from ansible.module_utils.six.moves import cPickle, StringIO
+from ansible.module_utils.connection import Connection, ConnectionError, send_data, recv_data
+from ansible.module_utils.service import fork_process
+from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
+from ansible.playbook.play_context import PlayContext
+from ansible.plugins.loader import connection_loader
+from ansible.utils.path import unfrackpath, makedirs_safe
+from ansible.utils.display import Display
+from ansible.utils.jsonrpc import JsonRpcServer
+
+
+def read_stream(byte_stream):
+ size = int(byte_stream.readline().strip())
+
+ data = byte_stream.read(size)
+ if len(data) < size:
+ raise Exception("EOF found before data was complete")
+
+ data_hash = to_text(byte_stream.readline().strip())
+ if data_hash != hashlib.sha1(data).hexdigest():
+ raise Exception("Read {0} bytes, but data did not match checksum".format(size))
+
+ # restore escaped loose \r characters
+ data = data.replace(br'\r', b'\r')
+
+ return data
+
+
+@contextmanager
+def file_lock(lock_path):
+ """
+ Uses contextmanager to create and release a file lock based on the
+ given path. This allows us to create locks using `with file_lock()`
+ to prevent deadlocks related to failure to unlock properly.
+ """
+
+ lock_fd = os.open(lock_path, os.O_RDWR | os.O_CREAT, 0o600)
+ fcntl.lockf(lock_fd, fcntl.LOCK_EX)
+ yield
+ fcntl.lockf(lock_fd, fcntl.LOCK_UN)
+ os.close(lock_fd)
+
+
+class ConnectionProcess(object):
+ '''
+ The connection process wraps around a Connection object that manages
+ the connection to a remote device that persists over the playbook
+ '''
+ def __init__(self, fd, play_context, socket_path, original_path, task_uuid=None, ansible_playbook_pid=None):
+ self.play_context = play_context
+ self.socket_path = socket_path
+ self.original_path = original_path
+ self._task_uuid = task_uuid
+
+ self.fd = fd
+ self.exception = None
+
+ self.srv = JsonRpcServer()
+ self.sock = None
+
+ self.connection = None
+ self._ansible_playbook_pid = ansible_playbook_pid
+
+ def start(self, variables):
+ try:
+ messages = list()
+ result = {}
+
+ messages.append(('vvvv', 'control socket path is %s' % self.socket_path))
+
+ # If this is a relative path (~ gets expanded later) then plug the
+ # key's path on to the directory we originally came from, so we can
+ # find it now that our cwd is /
+ if self.play_context.private_key_file and self.play_context.private_key_file[0] not in '~/':
+ self.play_context.private_key_file = os.path.join(self.original_path, self.play_context.private_key_file)
+ self.connection = connection_loader.get(self.play_context.connection, self.play_context, '/dev/null',
+ task_uuid=self._task_uuid, ansible_playbook_pid=self._ansible_playbook_pid)
+ self.connection.set_options(var_options=variables)
+
+ self.connection._socket_path = self.socket_path
+ self.srv.register(self.connection)
+ messages.extend([('vvvv', msg) for msg in sys.stdout.getvalue().splitlines()])
+
+ self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ self.sock.bind(self.socket_path)
+ self.sock.listen(1)
+ messages.append(('vvvv', 'local domain socket listeners started successfully'))
+ except Exception as exc:
+ messages.extend(self.connection.pop_messages())
+ result['error'] = to_text(exc)
+ result['exception'] = traceback.format_exc()
+ finally:
+ result['messages'] = messages
+ self.fd.write(json.dumps(result, cls=AnsibleJSONEncoder))
+ self.fd.close()
+
+ def run(self):
+ try:
+ while not self.connection._conn_closed:
+ signal.signal(signal.SIGALRM, self.connect_timeout)
+ signal.signal(signal.SIGTERM, self.handler)
+ signal.alarm(self.connection.get_option('persistent_connect_timeout'))
+
+ self.exception = None
+ (s, addr) = self.sock.accept()
+ signal.alarm(0)
+ signal.signal(signal.SIGALRM, self.command_timeout)
+ while True:
+ data = recv_data(s)
+ if not data:
+ break
+ log_messages = self.connection.get_option('persistent_log_messages')
+
+ if log_messages:
+ display.display("jsonrpc request: %s" % data, log_only=True)
+
+ request = json.loads(to_text(data, errors='surrogate_or_strict'))
+ if request.get('method') == "exec_command" and not self.connection.connected:
+ self.connection._connect()
+
+ signal.alarm(self.connection.get_option('persistent_command_timeout'))
+
+ resp = self.srv.handle_request(data)
+ signal.alarm(0)
+
+ if log_messages:
+ display.display("jsonrpc response: %s" % resp, log_only=True)
+
+ send_data(s, to_bytes(resp))
+
+ s.close()
+
+ except Exception as e:
+ # socket.accept() will raise EINTR if the socket.close() is called
+ if hasattr(e, 'errno'):
+ if e.errno != errno.EINTR:
+ self.exception = traceback.format_exc()
+ else:
+ self.exception = traceback.format_exc()
+
+ finally:
+ # allow time for any exception msg send over socket to receive at other end before shutting down
+ time.sleep(0.1)
+
+ # when done, close the connection properly and cleanup the socket file so it can be recreated
+ self.shutdown()
+
+ def connect_timeout(self, signum, frame):
+ msg = 'persistent connection idle timeout triggered, timeout value is %s secs.\nSee the timeout setting options in the Network Debug and ' \
+ 'Troubleshooting Guide.' % self.connection.get_option('persistent_connect_timeout')
+ display.display(msg, log_only=True)
+ raise Exception(msg)
+
+ def command_timeout(self, signum, frame):
+ msg = 'command timeout triggered, timeout value is %s secs.\nSee the timeout setting options in the Network Debug and Troubleshooting Guide.'\
+ % self.connection.get_option('persistent_command_timeout')
+ display.display(msg, log_only=True)
+ raise Exception(msg)
+
+ def handler(self, signum, frame):
+ msg = 'signal handler called with signal %s.' % signum
+ display.display(msg, log_only=True)
+ raise Exception(msg)
+
+ def shutdown(self):
+ """ Shuts down the local domain socket
+ """
+ lock_path = unfrackpath("%s/.ansible_pc_lock_%s" % os.path.split(self.socket_path))
+ if os.path.exists(self.socket_path):
+ try:
+ if self.sock:
+ self.sock.close()
+ if self.connection:
+ self.connection.close()
+ if self.connection.get_option("persistent_log_messages"):
+ for _level, message in self.connection.pop_messages():
+ display.display(message, log_only=True)
+ except Exception:
+ pass
+ finally:
+ if os.path.exists(self.socket_path):
+ os.remove(self.socket_path)
+ setattr(self.connection, '_socket_path', None)
+ setattr(self.connection, '_connected', False)
+
+ if os.path.exists(lock_path):
+ os.remove(lock_path)
+
+ display.display('shutdown complete', log_only=True)
+
+
+def main():
+ """ Called to initiate the connect to the remote device
+ """
+ rc = 0
+ result = {}
+ messages = list()
+ socket_path = None
+
+ # Need stdin as a byte stream
+ if PY3:
+ stdin = sys.stdin.buffer
+ else:
+ stdin = sys.stdin
+
+ # Note: update the below log capture code after Display.display() is refactored.
+ saved_stdout = sys.stdout
+ sys.stdout = StringIO()
+
+ try:
+ # read the play context data via stdin, which means depickling it
+ vars_data = read_stream(stdin)
+ init_data = read_stream(stdin)
+
+ if PY3:
+ pc_data = cPickle.loads(init_data, encoding='bytes')
+ variables = cPickle.loads(vars_data, encoding='bytes')
+ else:
+ pc_data = cPickle.loads(init_data)
+ variables = cPickle.loads(vars_data)
+
+ play_context = PlayContext()
+ play_context.deserialize(pc_data)
+ display.verbosity = play_context.verbosity
+
+ except Exception as e:
+ rc = 1
+ result.update({
+ 'error': to_text(e),
+ 'exception': traceback.format_exc()
+ })
+
+ if rc == 0:
+ ssh = connection_loader.get('ssh', class_only=True)
+ ansible_playbook_pid = sys.argv[1]
+ task_uuid = sys.argv[2]
+ cp = ssh._create_control_path(play_context.remote_addr, play_context.port, play_context.remote_user, play_context.connection, ansible_playbook_pid)
+ # create the persistent connection dir if need be and create the paths
+ # which we will be using later
+ tmp_path = unfrackpath(C.PERSISTENT_CONTROL_PATH_DIR)
+ makedirs_safe(tmp_path)
+
+ socket_path = unfrackpath(cp % dict(directory=tmp_path))
+ lock_path = unfrackpath("%s/.ansible_pc_lock_%s" % os.path.split(socket_path))
+
+ with file_lock(lock_path):
+ if not os.path.exists(socket_path):
+ messages.append(('vvvv', 'local domain socket does not exist, starting it'))
+ original_path = os.getcwd()
+ r, w = os.pipe()
+ pid = fork_process()
+
+ if pid == 0:
+ try:
+ os.close(r)
+ wfd = os.fdopen(w, 'w')
+ process = ConnectionProcess(wfd, play_context, socket_path, original_path, task_uuid, ansible_playbook_pid)
+ process.start(variables)
+ except Exception:
+ messages.append(('error', traceback.format_exc()))
+ rc = 1
+
+ if rc == 0:
+ process.run()
+ else:
+ process.shutdown()
+
+ sys.exit(rc)
+
+ else:
+ os.close(w)
+ rfd = os.fdopen(r, 'r')
+ data = json.loads(rfd.read(), cls=AnsibleJSONDecoder)
+ messages.extend(data.pop('messages'))
+ result.update(data)
+
+ else:
+ messages.append(('vvvv', 'found existing local domain socket, using it!'))
+ conn = Connection(socket_path)
+ conn.set_options(var_options=variables)
+ pc_data = to_text(init_data)
+ try:
+ conn.update_play_context(pc_data)
+ conn.set_check_prompt(task_uuid)
+ except Exception as exc:
+ # Only network_cli has update_play context and set_check_prompt, so missing this is
+ # not fatal e.g. netconf
+ if isinstance(exc, ConnectionError) and getattr(exc, 'code', None) == -32601:
+ pass
+ else:
+ result.update({
+ 'error': to_text(exc),
+ 'exception': traceback.format_exc()
+ })
+
+ if os.path.exists(socket_path):
+ messages.extend(Connection(socket_path).pop_messages())
+ messages.append(('vvvv', sys.stdout.getvalue()))
+ result.update({
+ 'messages': messages,
+ 'socket_path': socket_path
+ })
+
+ sys.stdout = saved_stdout
+ if 'exception' in result:
+ rc = 1
+ sys.stderr.write(json.dumps(result, cls=AnsibleJSONEncoder))
+ else:
+ rc = 0
+ sys.stdout.write(json.dumps(result, cls=AnsibleJSONEncoder))
+
+ sys.exit(rc)
+
+
+if __name__ == '__main__':
+ display = Display()
+ main()
diff --git a/lib/ansible/cli/vault.py b/lib/ansible/cli/vault.py
new file mode 100644
index 00000000..0425a1a3
--- /dev/null
+++ b/lib/ansible/cli/vault.py
@@ -0,0 +1,457 @@
+# (c) 2014, James Tanner <tanner.jc@gmail.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+from ansible import constants as C
+from ansible import context
+from ansible.cli import CLI
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.errors import AnsibleOptionsError
+from ansible.module_utils._text import to_text, to_bytes
+from ansible.parsing.dataloader import DataLoader
+from ansible.parsing.vault import VaultEditor, VaultLib, match_encrypt_secret
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class VaultCLI(CLI):
+ ''' can encrypt any structured data file used by Ansible.
+ This can include *group_vars/* or *host_vars/* inventory variables,
+ variables loaded by *include_vars* or *vars_files*, or variable files
+ passed on the ansible-playbook command line with *-e @file.yml* or *-e @file.json*.
+ Role variables and defaults are also included!
+
+ Because Ansible tasks, handlers, and other objects are data, these can also be encrypted with vault.
+ If you'd like to not expose what variables you are using, you can keep an individual task file entirely encrypted.
+ '''
+
+ FROM_STDIN = "stdin"
+ FROM_ARGS = "the command line args"
+ FROM_PROMPT = "the interactive prompt"
+
+ def __init__(self, args):
+
+ self.b_vault_pass = None
+ self.b_new_vault_pass = None
+ self.encrypt_string_read_stdin = False
+
+ self.encrypt_secret = None
+ self.encrypt_vault_id = None
+ self.new_encrypt_secret = None
+ self.new_encrypt_vault_id = None
+
+ super(VaultCLI, self).__init__(args)
+
+ def init_parser(self):
+ super(VaultCLI, self).init_parser(
+ desc="encryption/decryption utility for Ansible data files",
+ epilog="\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
+ )
+
+ common = opt_help.argparse.ArgumentParser(add_help=False)
+ opt_help.add_vault_options(common)
+ opt_help.add_verbosity_options(common)
+
+ subparsers = self.parser.add_subparsers(dest='action')
+ subparsers.required = True
+
+ output = opt_help.argparse.ArgumentParser(add_help=False)
+ output.add_argument('--output', default=None, dest='output_file',
+ help='output file name for encrypt or decrypt; use - for stdout',
+ type=opt_help.unfrack_path())
+
+ # For encrypting actions, we can also specify which of multiple vault ids should be used for encrypting
+ vault_id = opt_help.argparse.ArgumentParser(add_help=False)
+ vault_id.add_argument('--encrypt-vault-id', default=[], dest='encrypt_vault_id',
+ action='store', type=str,
+ help='the vault id used to encrypt (required if more than one vault-id is provided)')
+
+ create_parser = subparsers.add_parser('create', help='Create new vault encrypted file', parents=[vault_id, common])
+ create_parser.set_defaults(func=self.execute_create)
+ create_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
+
+ decrypt_parser = subparsers.add_parser('decrypt', help='Decrypt vault encrypted file', parents=[output, common])
+ decrypt_parser.set_defaults(func=self.execute_decrypt)
+ decrypt_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
+
+ edit_parser = subparsers.add_parser('edit', help='Edit vault encrypted file', parents=[vault_id, common])
+ edit_parser.set_defaults(func=self.execute_edit)
+ edit_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
+
+ view_parser = subparsers.add_parser('view', help='View vault encrypted file', parents=[common])
+ view_parser.set_defaults(func=self.execute_view)
+ view_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
+
+ encrypt_parser = subparsers.add_parser('encrypt', help='Encrypt YAML file', parents=[common, output, vault_id])
+ encrypt_parser.set_defaults(func=self.execute_encrypt)
+ encrypt_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
+
+ enc_str_parser = subparsers.add_parser('encrypt_string', help='Encrypt a string', parents=[common, output, vault_id])
+ enc_str_parser.set_defaults(func=self.execute_encrypt_string)
+ enc_str_parser.add_argument('args', help='String to encrypt', metavar='string_to_encrypt', nargs='*')
+ enc_str_parser.add_argument('-p', '--prompt', dest='encrypt_string_prompt',
+ action='store_true',
+ help="Prompt for the string to encrypt")
+ enc_str_parser.add_argument('-n', '--name', dest='encrypt_string_names',
+ action='append',
+ help="Specify the variable name")
+ enc_str_parser.add_argument('--stdin-name', dest='encrypt_string_stdin_name',
+ default=None,
+ help="Specify the variable name for stdin")
+
+ rekey_parser = subparsers.add_parser('rekey', help='Re-key a vault encrypted file', parents=[common, vault_id])
+ rekey_parser.set_defaults(func=self.execute_rekey)
+ rekey_new_group = rekey_parser.add_mutually_exclusive_group()
+ rekey_new_group.add_argument('--new-vault-password-file', default=None, dest='new_vault_password_file',
+ help="new vault password file for rekey", type=opt_help.unfrack_path())
+ rekey_new_group.add_argument('--new-vault-id', default=None, dest='new_vault_id', type=str,
+ help='the new vault identity to use for rekey')
+ rekey_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
+
+ def post_process_args(self, options):
+ options = super(VaultCLI, self).post_process_args(options)
+
+ display.verbosity = options.verbosity
+
+ if options.vault_ids:
+ for vault_id in options.vault_ids:
+ if u';' in vault_id:
+ raise AnsibleOptionsError("'%s' is not a valid vault id. The character ';' is not allowed in vault ids" % vault_id)
+
+ if getattr(options, 'output_file', None) and len(options.args) > 1:
+ raise AnsibleOptionsError("At most one input file may be used with the --output option")
+
+ if options.action == 'encrypt_string':
+ if '-' in options.args or not options.args or options.encrypt_string_stdin_name:
+ self.encrypt_string_read_stdin = True
+
+ # TODO: prompting from stdin and reading from stdin seem mutually exclusive, but verify that.
+ if options.encrypt_string_prompt and self.encrypt_string_read_stdin:
+ raise AnsibleOptionsError('The --prompt option is not supported if also reading input from stdin')
+
+ return options
+
+ def run(self):
+ super(VaultCLI, self).run()
+ loader = DataLoader()
+
+ # set default restrictive umask
+ old_umask = os.umask(0o077)
+
+ vault_ids = list(context.CLIARGS['vault_ids'])
+
+ # there are 3 types of actions, those that just 'read' (decrypt, view) and only
+ # need to ask for a password once, and those that 'write' (create, encrypt) that
+ # ask for a new password and confirm it, and 'read/write (rekey) that asks for the
+ # old password, then asks for a new one and confirms it.
+
+ default_vault_ids = C.DEFAULT_VAULT_IDENTITY_LIST
+ vault_ids = default_vault_ids + vault_ids
+
+ action = context.CLIARGS['action']
+
+ # TODO: instead of prompting for these before, we could let VaultEditor
+ # call a callback when it needs it.
+ if action in ['decrypt', 'view', 'rekey', 'edit']:
+ vault_secrets = self.setup_vault_secrets(loader, vault_ids=vault_ids,
+ vault_password_files=list(context.CLIARGS['vault_password_files']),
+ ask_vault_pass=context.CLIARGS['ask_vault_pass'])
+ if not vault_secrets:
+ raise AnsibleOptionsError("A vault password is required to use Ansible's Vault")
+
+ if action in ['encrypt', 'encrypt_string', 'create']:
+
+ encrypt_vault_id = None
+ # no --encrypt-vault-id context.CLIARGS['encrypt_vault_id'] for 'edit'
+ if action not in ['edit']:
+ encrypt_vault_id = context.CLIARGS['encrypt_vault_id'] or C.DEFAULT_VAULT_ENCRYPT_IDENTITY
+
+ vault_secrets = None
+ vault_secrets = \
+ self.setup_vault_secrets(loader,
+ vault_ids=vault_ids,
+ vault_password_files=list(context.CLIARGS['vault_password_files']),
+ ask_vault_pass=context.CLIARGS['ask_vault_pass'],
+ create_new_password=True)
+
+ if len(vault_secrets) > 1 and not encrypt_vault_id:
+ raise AnsibleOptionsError("The vault-ids %s are available to encrypt. Specify the vault-id to encrypt with --encrypt-vault-id" %
+ ','.join([x[0] for x in vault_secrets]))
+
+ if not vault_secrets:
+ raise AnsibleOptionsError("A vault password is required to use Ansible's Vault")
+
+ encrypt_secret = match_encrypt_secret(vault_secrets,
+ encrypt_vault_id=encrypt_vault_id)
+
+ # only one secret for encrypt for now, use the first vault_id and use its first secret
+ # TODO: exception if more than one?
+ self.encrypt_vault_id = encrypt_secret[0]
+ self.encrypt_secret = encrypt_secret[1]
+
+ if action in ['rekey']:
+ encrypt_vault_id = context.CLIARGS['encrypt_vault_id'] or C.DEFAULT_VAULT_ENCRYPT_IDENTITY
+ # print('encrypt_vault_id: %s' % encrypt_vault_id)
+ # print('default_encrypt_vault_id: %s' % default_encrypt_vault_id)
+
+ # new_vault_ids should only ever be one item, from
+ # load the default vault ids if we are using encrypt-vault-id
+ new_vault_ids = []
+ if encrypt_vault_id:
+ new_vault_ids = default_vault_ids
+ if context.CLIARGS['new_vault_id']:
+ new_vault_ids.append(context.CLIARGS['new_vault_id'])
+
+ new_vault_password_files = []
+ if context.CLIARGS['new_vault_password_file']:
+ new_vault_password_files.append(context.CLIARGS['new_vault_password_file'])
+
+ new_vault_secrets = \
+ self.setup_vault_secrets(loader,
+ vault_ids=new_vault_ids,
+ vault_password_files=new_vault_password_files,
+ ask_vault_pass=context.CLIARGS['ask_vault_pass'],
+ create_new_password=True)
+
+ if not new_vault_secrets:
+ raise AnsibleOptionsError("A new vault password is required to use Ansible's Vault rekey")
+
+ # There is only one new_vault_id currently and one new_vault_secret, or we
+ # use the id specified in --encrypt-vault-id
+ new_encrypt_secret = match_encrypt_secret(new_vault_secrets,
+ encrypt_vault_id=encrypt_vault_id)
+
+ self.new_encrypt_vault_id = new_encrypt_secret[0]
+ self.new_encrypt_secret = new_encrypt_secret[1]
+
+ loader.set_vault_secrets(vault_secrets)
+
+ # FIXME: do we need to create VaultEditor here? its not reused
+ vault = VaultLib(vault_secrets)
+ self.editor = VaultEditor(vault)
+
+ context.CLIARGS['func']()
+
+ # and restore umask
+ os.umask(old_umask)
+
+ def execute_encrypt(self):
+ ''' encrypt the supplied file using the provided vault secret '''
+
+ if not context.CLIARGS['args'] and sys.stdin.isatty():
+ display.display("Reading plaintext input from stdin", stderr=True)
+
+ for f in context.CLIARGS['args'] or ['-']:
+ # Fixme: use the correct vau
+ self.editor.encrypt_file(f, self.encrypt_secret,
+ vault_id=self.encrypt_vault_id,
+ output_file=context.CLIARGS['output_file'])
+
+ if sys.stdout.isatty():
+ display.display("Encryption successful", stderr=True)
+
+ @staticmethod
+ def format_ciphertext_yaml(b_ciphertext, indent=None, name=None):
+ indent = indent or 10
+
+ block_format_var_name = ""
+ if name:
+ block_format_var_name = "%s: " % name
+
+ block_format_header = "%s!vault |" % block_format_var_name
+ lines = []
+ vault_ciphertext = to_text(b_ciphertext)
+
+ lines.append(block_format_header)
+ for line in vault_ciphertext.splitlines():
+ lines.append('%s%s' % (' ' * indent, line))
+
+ yaml_ciphertext = '\n'.join(lines)
+ return yaml_ciphertext
+
+ def execute_encrypt_string(self):
+ ''' encrypt the supplied string using the provided vault secret '''
+ b_plaintext = None
+
+ # Holds tuples (the_text, the_source_of_the_string, the variable name if its provided).
+ b_plaintext_list = []
+
+ # remove the non-option '-' arg (used to indicate 'read from stdin') from the candidate args so
+ # we don't add it to the plaintext list
+ args = [x for x in context.CLIARGS['args'] if x != '-']
+
+ # We can prompt and read input, or read from stdin, but not both.
+ if context.CLIARGS['encrypt_string_prompt']:
+ msg = "String to encrypt: "
+
+ name = None
+ name_prompt_response = display.prompt('Variable name (enter for no name): ')
+
+ # TODO: enforce var naming rules?
+ if name_prompt_response != "":
+ name = name_prompt_response
+
+ # TODO: could prompt for which vault_id to use for each plaintext string
+ # currently, it will just be the default
+ # could use private=True for shadowed input if useful
+ prompt_response = display.prompt(msg)
+
+ if prompt_response == '':
+ raise AnsibleOptionsError('The plaintext provided from the prompt was empty, not encrypting')
+
+ b_plaintext = to_bytes(prompt_response)
+ b_plaintext_list.append((b_plaintext, self.FROM_PROMPT, name))
+
+ # read from stdin
+ if self.encrypt_string_read_stdin:
+ if sys.stdout.isatty():
+ display.display("Reading plaintext input from stdin. (ctrl-d to end input, twice if your content does not already have a newline)", stderr=True)
+
+ stdin_text = sys.stdin.read()
+ if stdin_text == '':
+ raise AnsibleOptionsError('stdin was empty, not encrypting')
+
+ if sys.stdout.isatty() and not stdin_text.endswith("\n"):
+ display.display("\n")
+
+ b_plaintext = to_bytes(stdin_text)
+
+ # defaults to None
+ name = context.CLIARGS['encrypt_string_stdin_name']
+ b_plaintext_list.append((b_plaintext, self.FROM_STDIN, name))
+
+ # use any leftover args as strings to encrypt
+ # Try to match args up to --name options
+ if context.CLIARGS.get('encrypt_string_names', False):
+ name_and_text_list = list(zip(context.CLIARGS['encrypt_string_names'], args))
+
+ # Some but not enough --name's to name each var
+ if len(args) > len(name_and_text_list):
+ # Trying to avoid ever showing the plaintext in the output, so this warning is vague to avoid that.
+ display.display('The number of --name options do not match the number of args.',
+ stderr=True)
+ display.display('The last named variable will be "%s". The rest will not have'
+ ' names.' % context.CLIARGS['encrypt_string_names'][-1],
+ stderr=True)
+
+ # Add the rest of the args without specifying a name
+ for extra_arg in args[len(name_and_text_list):]:
+ name_and_text_list.append((None, extra_arg))
+
+ # if no --names are provided, just use the args without a name.
+ else:
+ name_and_text_list = [(None, x) for x in args]
+
+ # Convert the plaintext text objects to bytestrings and collect
+ for name_and_text in name_and_text_list:
+ name, plaintext = name_and_text
+
+ if plaintext == '':
+ raise AnsibleOptionsError('The plaintext provided from the command line args was empty, not encrypting')
+
+ b_plaintext = to_bytes(plaintext)
+ b_plaintext_list.append((b_plaintext, self.FROM_ARGS, name))
+
+ # TODO: specify vault_id per string?
+ # Format the encrypted strings and any corresponding stderr output
+ outputs = self._format_output_vault_strings(b_plaintext_list, vault_id=self.encrypt_vault_id)
+
+ for output in outputs:
+ err = output.get('err', None)
+ out = output.get('out', '')
+ if err:
+ sys.stderr.write(err)
+ print(out)
+
+ if sys.stdout.isatty():
+ display.display("Encryption successful", stderr=True)
+
+ # TODO: offer block or string ala eyaml
+
+ def _format_output_vault_strings(self, b_plaintext_list, vault_id=None):
+ # If we are only showing one item in the output, we don't need to included commented
+ # delimiters in the text
+ show_delimiter = False
+ if len(b_plaintext_list) > 1:
+ show_delimiter = True
+
+ # list of dicts {'out': '', 'err': ''}
+ output = []
+
+ # Encrypt the plaintext, and format it into a yaml block that can be pasted into a playbook.
+ # For more than one input, show some differentiating info in the stderr output so we can tell them
+ # apart. If we have a var name, we include that in the yaml
+ for index, b_plaintext_info in enumerate(b_plaintext_list):
+ # (the text itself, which input it came from, its name)
+ b_plaintext, src, name = b_plaintext_info
+
+ b_ciphertext = self.editor.encrypt_bytes(b_plaintext, self.encrypt_secret,
+ vault_id=vault_id)
+
+ # block formatting
+ yaml_text = self.format_ciphertext_yaml(b_ciphertext, name=name)
+
+ err_msg = None
+ if show_delimiter:
+ human_index = index + 1
+ if name:
+ err_msg = '# The encrypted version of variable ("%s", the string #%d from %s).\n' % (name, human_index, src)
+ else:
+ err_msg = '# The encrypted version of the string #%d from %s.)\n' % (human_index, src)
+ output.append({'out': yaml_text, 'err': err_msg})
+
+ return output
+
+ def execute_decrypt(self):
+ ''' decrypt the supplied file using the provided vault secret '''
+
+ if not context.CLIARGS['args'] and sys.stdin.isatty():
+ display.display("Reading ciphertext input from stdin", stderr=True)
+
+ for f in context.CLIARGS['args'] or ['-']:
+ self.editor.decrypt_file(f, output_file=context.CLIARGS['output_file'])
+
+ if sys.stdout.isatty():
+ display.display("Decryption successful", stderr=True)
+
+ def execute_create(self):
+ ''' create and open a file in an editor that will be encrypted with the provided vault secret when closed'''
+
+ if len(context.CLIARGS['args']) != 1:
+ raise AnsibleOptionsError("ansible-vault create can take only one filename argument")
+
+ self.editor.create_file(context.CLIARGS['args'][0], self.encrypt_secret,
+ vault_id=self.encrypt_vault_id)
+
+ def execute_edit(self):
+ ''' open and decrypt an existing vaulted file in an editor, that will be encrypted again when closed'''
+ for f in context.CLIARGS['args']:
+ self.editor.edit_file(f)
+
+ def execute_view(self):
+ ''' open, decrypt and view an existing vaulted file using a pager using the supplied vault secret '''
+
+ for f in context.CLIARGS['args']:
+ # Note: vault should return byte strings because it could encrypt
+ # and decrypt binary files. We are responsible for changing it to
+ # unicode here because we are displaying it and therefore can make
+ # the decision that the display doesn't have to be precisely what
+ # the input was (leave that to decrypt instead)
+ plaintext = self.editor.plaintext(f)
+ self.pager(to_text(plaintext))
+
+ def execute_rekey(self):
+ ''' re-encrypt a vaulted file with a new secret, the previous secret is required '''
+ for f in context.CLIARGS['args']:
+ # FIXME: plumb in vault_id, use the default new_vault_secret for now
+ self.editor.rekey_file(f, self.new_encrypt_secret,
+ self.new_encrypt_vault_id)
+
+ display.display("Rekey successful", stderr=True)
diff --git a/lib/ansible/collections/__init__.py b/lib/ansible/collections/__init__.py
new file mode 100644
index 00000000..6b3e2a7d
--- /dev/null
+++ b/lib/ansible/collections/__init__.py
@@ -0,0 +1,29 @@
+# (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.module_utils._text import to_bytes
+
+B_FLAG_FILES = frozenset([b'MANIFEST.json', b'galaxy.yml'])
+
+
+def is_collection_path(path):
+ """
+ Verify that a path meets min requirements to be a collection
+ :param path: byte-string path to evaluate for collection containment
+ :return: boolean signifying 'collectionness'
+ """
+
+ is_coll = False
+ b_path = to_bytes(path)
+ if os.path.isdir(b_path):
+ for b_flag in B_FLAG_FILES:
+ if os.path.exists(os.path.join(b_path, b_flag)):
+ is_coll = True
+ break
+
+ return is_coll
diff --git a/lib/ansible/collections/list.py b/lib/ansible/collections/list.py
new file mode 100644
index 00000000..a1d99017
--- /dev/null
+++ b/lib/ansible/collections/list.py
@@ -0,0 +1,101 @@
+# (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from collections import defaultdict
+
+from ansible.errors import AnsibleError
+from ansible.collections import is_collection_path
+from ansible.module_utils._text import to_bytes
+from ansible.utils.collection_loader import AnsibleCollectionConfig
+from ansible.utils.display import Display
+
+display = Display()
+
+
+def list_valid_collection_paths(search_paths=None, warn=False):
+ """
+ Filter out non existing or invalid search_paths for collections
+ :param search_paths: list of text-string paths, if none load default config
+ :param warn: display warning if search_path does not exist
+ :return: subset of original list
+ """
+
+ if search_paths is None:
+ search_paths = []
+
+ search_paths.extend(AnsibleCollectionConfig.collection_paths)
+
+ for path in search_paths:
+
+ b_path = to_bytes(path)
+ if not os.path.exists(b_path):
+ # warn for missing, but not if default
+ if warn:
+ display.warning("The configured collection path {0} does not exist.".format(path))
+ continue
+
+ if not os.path.isdir(b_path):
+ if warn:
+ display.warning("The configured collection path {0}, exists, but it is not a directory.".format(path))
+ continue
+
+ yield path
+
+
+def list_collection_dirs(search_paths=None, coll_filter=None):
+ """
+ Return paths for the specific collections found in passed or configured search paths
+ :param search_paths: list of text-string paths, if none load default config
+ :param coll_filter: limit collections to just the specific namespace or collection, if None all are returned
+ :return: list of collection directory paths
+ """
+
+ collection = None
+ namespace = None
+ if coll_filter is not None:
+ if '.' in coll_filter:
+ try:
+ (namespace, collection) = coll_filter.split('.')
+ except ValueError:
+ raise AnsibleError("Invalid collection pattern supplied: %s" % coll_filter)
+ else:
+ namespace = coll_filter
+
+ collections = defaultdict(dict)
+ for path in list_valid_collection_paths(search_paths):
+
+ b_path = to_bytes(path)
+ if os.path.isdir(b_path):
+ b_coll_root = to_bytes(os.path.join(path, 'ansible_collections'))
+
+ if os.path.exists(b_coll_root) and os.path.isdir(b_coll_root):
+
+ if namespace is None:
+ namespaces = os.listdir(b_coll_root)
+ else:
+ namespaces = [namespace]
+
+ for ns in namespaces:
+ b_namespace_dir = os.path.join(b_coll_root, to_bytes(ns))
+
+ if os.path.isdir(b_namespace_dir):
+
+ if collection is None:
+ colls = os.listdir(b_namespace_dir)
+ else:
+ colls = [collection]
+
+ for mycoll in colls:
+
+ # skip dupe collections as they will be masked in execution
+ if mycoll not in collections[ns]:
+ b_coll = to_bytes(mycoll)
+ b_coll_dir = os.path.join(b_namespace_dir, b_coll)
+ if is_collection_path(b_coll_dir):
+ collections[ns][mycoll] = b_coll_dir
+ yield b_coll_dir
diff --git a/lib/ansible/compat/__init__.py b/lib/ansible/compat/__init__.py
new file mode 100644
index 00000000..2990c6f5
--- /dev/null
+++ b/lib/ansible/compat/__init__.py
@@ -0,0 +1,26 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat library for ansible. This contains compatibility definitions for older python
+When we need to import a module differently depending on python version, do it
+here. Then in the code we can simply import from compat in order to get what we want.
+'''
diff --git a/lib/ansible/compat/selectors/__init__.py b/lib/ansible/compat/selectors/__init__.py
new file mode 100644
index 00000000..6bbf6d8b
--- /dev/null
+++ b/lib/ansible/compat/selectors/__init__.py
@@ -0,0 +1,30 @@
+# (c) 2014, 2017 Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat selectors library. Python-3.5 has this builtin. The selectors2
+package exists on pypi to backport the functionality as far as python-2.6.
+Implementation previously resided here - maintaining this file after the
+move to ansible.module_utils for code backwards compatibility.
+'''
+import sys
+from ansible.module_utils.compat import selectors
+sys.modules['ansible.compat.selectors'] = selectors
diff --git a/lib/ansible/config/__init__.py b/lib/ansible/config/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/lib/ansible/config/__init__.py
diff --git a/lib/ansible/config/ansible_builtin_runtime.yml b/lib/ansible/config/ansible_builtin_runtime.yml
new file mode 100644
index 00000000..3ad73d98
--- /dev/null
+++ b/lib/ansible/config/ansible_builtin_runtime.yml
@@ -0,0 +1,9662 @@
+# Copyright (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+plugin_routing:
+ connection:
+ # test entries
+ redirected_local:
+ redirect: ansible.builtin.local
+ buildah:
+ redirect: containers.podman.buildah
+ podman:
+ redirect: containers.podman.podman
+ aws_ssm:
+ redirect: community.aws.aws_ssm
+ chroot:
+ redirect: community.general.chroot
+ docker:
+ redirect: community.general.docker
+ funcd:
+ redirect: community.general.funcd
+ iocage:
+ redirect: community.general.iocage
+ jail:
+ redirect: community.general.jail
+ kubectl:
+ redirect: community.kubernetes.kubectl
+ libvirt_lxc:
+ redirect: community.libvirt.libvirt_lxc
+ lxc:
+ redirect: community.general.lxc
+ lxd:
+ redirect: community.general.lxd
+ oc:
+ redirect: community.general.oc
+ qubes:
+ redirect: community.general.qubes
+ saltstack:
+ redirect: community.general.saltstack
+ zone:
+ redirect: community.general.zone
+ vmware_tools:
+ redirect: community.vmware.vmware_tools
+ httpapi:
+ redirect: ansible.netcommon.httpapi
+ napalm:
+ redirect: ansible.netcommon.napalm
+ netconf:
+ redirect: ansible.netcommon.netconf
+ network_cli:
+ redirect: ansible.netcommon.network_cli
+ persistent:
+ redirect: ansible.netcommon.persistent
+ modules:
+ # test entry
+ formerly_core_ping:
+ redirect: testns.testcoll.ping
+ # test entry
+ uses_redirected_action:
+ redirect: ansible.builtin.ping
+ podman_container_info:
+ redirect: containers.podman.podman_container_info
+ podman_image_info:
+ redirect: containers.podman.podman_image_info
+ podman_image:
+ redirect: containers.podman.podman_image
+ podman_volume_info:
+ redirect: containers.podman.podman_volume_info
+ frr_facts:
+ redirect: frr.frr.frr_facts
+ frr_bgp:
+ redirect: frr.frr.frr_bgp
+ apt_repo:
+ redirect: community.general.apt_repo
+ aws_acm_facts:
+ redirect: community.aws.aws_acm_facts
+ aws_kms_facts:
+ redirect: community.aws.aws_kms_facts
+ aws_region_facts:
+ redirect: community.aws.aws_region_facts
+ aws_s3_bucket_facts:
+ redirect: community.aws.aws_s3_bucket_facts
+ aws_sgw_facts:
+ redirect: community.aws.aws_sgw_facts
+ aws_waf_facts:
+ redirect: community.aws.aws_waf_facts
+ cloudfront_facts:
+ redirect: community.aws.cloudfront_facts
+ cloudwatchlogs_log_group_facts:
+ redirect: community.aws.cloudwatchlogs_log_group_facts
+ ec2_asg_facts:
+ redirect: community.aws.ec2_asg_facts
+ ec2_customer_gateway_facts:
+ redirect: community.aws.ec2_customer_gateway_facts
+ ec2_instance_facts:
+ redirect: community.aws.ec2_instance_facts
+ ec2_eip_facts:
+ redirect: community.aws.ec2_eip_facts
+ ec2_elb_facts:
+ redirect: community.aws.ec2_elb_facts
+ ec2_lc_facts:
+ redirect: community.aws.ec2_lc_facts
+ ec2_placement_group_facts:
+ redirect: community.aws.ec2_placement_group_facts
+ ec2_vpc_endpoint_facts:
+ redirect: community.aws.ec2_vpc_endpoint_facts
+ ec2_vpc_igw_facts:
+ redirect: community.aws.ec2_vpc_igw_facts
+ ec2_vpc_nacl_facts:
+ redirect: community.aws.ec2_vpc_nacl_facts
+ ec2_vpc_nat_gateway_facts:
+ redirect: community.aws.ec2_vpc_nat_gateway_facts
+ ec2_vpc_peering_facts:
+ redirect: community.aws.ec2_vpc_peering_facts
+ ec2_vpc_route_table_facts:
+ redirect: community.aws.ec2_vpc_route_table_facts
+ ec2_vpc_vgw_facts:
+ redirect: community.aws.ec2_vpc_vgw_facts
+ ec2_vpc_vpn_facts:
+ redirect: community.aws.ec2_vpc_vpn_facts
+ ecs_service_facts:
+ redirect: community.aws.ecs_service_facts
+ ecs_taskdefinition_facts:
+ redirect: community.aws.ecs_taskdefinition_facts
+ efs_facts:
+ redirect: community.aws.efs_facts
+ elasticache_facts:
+ redirect: community.aws.elasticache_facts
+ elb_application_lb_facts:
+ redirect: community.aws.elb_application_lb_facts
+ elb_classic_lb_facts:
+ redirect: community.aws.elb_classic_lb_facts
+ elb_target_facts:
+ redirect: community.aws.elb_target_facts
+ elb_target_group_facts:
+ redirect: community.aws.elb_target_group_facts
+ iam_cert_facts:
+ redirect: community.aws.iam_cert_facts
+ iam_mfa_device_facts:
+ redirect: community.aws.iam_mfa_device_facts
+ iam_role_facts:
+ redirect: community.aws.iam_role_facts
+ iam_server_certificate_facts:
+ redirect: community.aws.iam_server_certificate_facts
+ lambda_facts:
+ redirect: community.aws.lambda_facts
+ rds_instance_facts:
+ redirect: community.aws.rds_instance_facts
+ rds_snapshot_facts:
+ redirect: community.aws.rds_snapshot_facts
+ redshift_facts:
+ redirect: community.aws.redshift_facts
+ route53_facts:
+ redirect: community.aws.route53_facts
+ aws_acm:
+ redirect: community.aws.aws_acm
+ aws_acm_info:
+ redirect: community.aws.aws_acm_info
+ aws_api_gateway:
+ redirect: community.aws.aws_api_gateway
+ aws_application_scaling_policy:
+ redirect: community.aws.aws_application_scaling_policy
+ aws_batch_compute_environment:
+ redirect: community.aws.aws_batch_compute_environment
+ aws_batch_job_definition:
+ redirect: community.aws.aws_batch_job_definition
+ aws_batch_job_queue:
+ redirect: community.aws.aws_batch_job_queue
+ aws_codebuild:
+ redirect: community.aws.aws_codebuild
+ aws_codecommit:
+ redirect: community.aws.aws_codecommit
+ aws_codepipeline:
+ redirect: community.aws.aws_codepipeline
+ aws_config_aggregation_authorization:
+ redirect: community.aws.aws_config_aggregation_authorization
+ aws_config_aggregator:
+ redirect: community.aws.aws_config_aggregator
+ aws_config_delivery_channel:
+ redirect: community.aws.aws_config_delivery_channel
+ aws_config_recorder:
+ redirect: community.aws.aws_config_recorder
+ aws_config_rule:
+ redirect: community.aws.aws_config_rule
+ aws_direct_connect_connection:
+ redirect: community.aws.aws_direct_connect_connection
+ aws_direct_connect_gateway:
+ redirect: community.aws.aws_direct_connect_gateway
+ aws_direct_connect_link_aggregation_group:
+ redirect: community.aws.aws_direct_connect_link_aggregation_group
+ aws_direct_connect_virtual_interface:
+ redirect: community.aws.aws_direct_connect_virtual_interface
+ aws_eks_cluster:
+ redirect: community.aws.aws_eks_cluster
+ aws_elasticbeanstalk_app:
+ redirect: community.aws.aws_elasticbeanstalk_app
+ aws_glue_connection:
+ redirect: community.aws.aws_glue_connection
+ aws_glue_job:
+ redirect: community.aws.aws_glue_job
+ aws_inspector_target:
+ redirect: community.aws.aws_inspector_target
+ aws_kms:
+ redirect: community.aws.aws_kms
+ aws_kms_info:
+ redirect: community.aws.aws_kms_info
+ aws_region_info:
+ redirect: community.aws.aws_region_info
+ aws_s3_bucket_info:
+ redirect: community.aws.aws_s3_bucket_info
+ aws_s3_cors:
+ redirect: community.aws.aws_s3_cors
+ aws_secret:
+ redirect: community.aws.aws_secret
+ aws_ses_identity:
+ redirect: community.aws.aws_ses_identity
+ aws_ses_identity_policy:
+ redirect: community.aws.aws_ses_identity_policy
+ aws_ses_rule_set:
+ redirect: community.aws.aws_ses_rule_set
+ aws_sgw_info:
+ redirect: community.aws.aws_sgw_info
+ aws_ssm_parameter_store:
+ redirect: community.aws.aws_ssm_parameter_store
+ aws_step_functions_state_machine:
+ redirect: community.aws.aws_step_functions_state_machine
+ aws_step_functions_state_machine_execution:
+ redirect: community.aws.aws_step_functions_state_machine_execution
+ aws_waf_condition:
+ redirect: community.aws.aws_waf_condition
+ aws_waf_info:
+ redirect: community.aws.aws_waf_info
+ aws_waf_rule:
+ redirect: community.aws.aws_waf_rule
+ aws_waf_web_acl:
+ redirect: community.aws.aws_waf_web_acl
+ cloudformation_stack_set:
+ redirect: community.aws.cloudformation_stack_set
+ cloudformation_exports_info:
+ redirect: community.aws.cloudformation_exports_info
+ cloudfront_distribution:
+ redirect: community.aws.cloudfront_distribution
+ cloudfront_info:
+ redirect: community.aws.cloudfront_info
+ cloudfront_invalidation:
+ redirect: community.aws.cloudfront_invalidation
+ cloudfront_origin_access_identity:
+ redirect: community.aws.cloudfront_origin_access_identity
+ cloudtrail:
+ redirect: community.aws.cloudtrail
+ cloudwatchevent_rule:
+ redirect: community.aws.cloudwatchevent_rule
+ cloudwatchlogs_log_group:
+ redirect: community.aws.cloudwatchlogs_log_group
+ cloudwatchlogs_log_group_info:
+ redirect: community.aws.cloudwatchlogs_log_group_info
+ cloudwatchlogs_log_group_metric_filter:
+ redirect: community.aws.cloudwatchlogs_log_group_metric_filter
+ data_pipeline:
+ redirect: community.aws.data_pipeline
+ dms_endpoint:
+ redirect: community.aws.dms_endpoint
+ dms_replication_subnet_group:
+ redirect: community.aws.dms_replication_subnet_group
+ dynamodb_table:
+ redirect: community.aws.dynamodb_table
+ dynamodb_ttl:
+ redirect: community.aws.dynamodb_ttl
+ ec2_ami_copy:
+ redirect: community.aws.ec2_ami_copy
+ ec2_asg:
+ redirect: community.aws.ec2_asg
+ ec2_asg_info:
+ redirect: community.aws.ec2_asg_info
+ ec2_asg_lifecycle_hook:
+ redirect: community.aws.ec2_asg_lifecycle_hook
+ ec2_customer_gateway:
+ redirect: community.aws.ec2_customer_gateway
+ ec2_customer_gateway_info:
+ redirect: community.aws.ec2_customer_gateway_info
+ ec2_eip:
+ redirect: community.aws.ec2_eip
+ ec2_eip_info:
+ redirect: community.aws.ec2_eip_info
+ ec2_elb:
+ redirect: community.aws.ec2_elb
+ ec2_elb_info:
+ redirect: community.aws.ec2_elb_info
+ ec2_instance:
+ redirect: community.aws.ec2_instance
+ ec2_instance_info:
+ redirect: community.aws.ec2_instance_info
+ ec2_launch_template:
+ redirect: community.aws.ec2_launch_template
+ ec2_lc:
+ redirect: community.aws.ec2_lc
+ ec2_lc_find:
+ redirect: community.aws.ec2_lc_find
+ ec2_lc_info:
+ redirect: community.aws.ec2_lc_info
+ ec2_metric_alarm:
+ redirect: community.aws.ec2_metric_alarm
+ ec2_placement_group:
+ redirect: community.aws.ec2_placement_group
+ ec2_placement_group_info:
+ redirect: community.aws.ec2_placement_group_info
+ ec2_scaling_policy:
+ redirect: community.aws.ec2_scaling_policy
+ ec2_snapshot_copy:
+ redirect: community.aws.ec2_snapshot_copy
+ ec2_transit_gateway:
+ redirect: community.aws.ec2_transit_gateway
+ ec2_transit_gateway_info:
+ redirect: community.aws.ec2_transit_gateway_info
+ ec2_vpc_egress_igw:
+ redirect: community.aws.ec2_vpc_egress_igw
+ ec2_vpc_endpoint:
+ redirect: community.aws.ec2_vpc_endpoint
+ ec2_vpc_endpoint_info:
+ redirect: community.aws.ec2_vpc_endpoint_info
+ ec2_vpc_igw:
+ redirect: community.aws.ec2_vpc_igw
+ ec2_vpc_igw_info:
+ redirect: community.aws.ec2_vpc_igw_info
+ ec2_vpc_nacl:
+ redirect: community.aws.ec2_vpc_nacl
+ ec2_vpc_nacl_info:
+ redirect: community.aws.ec2_vpc_nacl_info
+ ec2_vpc_nat_gateway:
+ redirect: community.aws.ec2_vpc_nat_gateway
+ ec2_vpc_nat_gateway_info:
+ redirect: community.aws.ec2_vpc_nat_gateway_info
+ ec2_vpc_peer:
+ redirect: community.aws.ec2_vpc_peer
+ ec2_vpc_peering_info:
+ redirect: community.aws.ec2_vpc_peering_info
+ ec2_vpc_route_table:
+ redirect: community.aws.ec2_vpc_route_table
+ ec2_vpc_route_table_info:
+ redirect: community.aws.ec2_vpc_route_table_info
+ ec2_vpc_vgw:
+ redirect: community.aws.ec2_vpc_vgw
+ ec2_vpc_vgw_info:
+ redirect: community.aws.ec2_vpc_vgw_info
+ ec2_vpc_vpn:
+ redirect: community.aws.ec2_vpc_vpn
+ ec2_vpc_vpn_info:
+ redirect: community.aws.ec2_vpc_vpn_info
+ ec2_win_password:
+ redirect: community.aws.ec2_win_password
+ ecs_attribute:
+ redirect: community.aws.ecs_attribute
+ ecs_cluster:
+ redirect: community.aws.ecs_cluster
+ ecs_ecr:
+ redirect: community.aws.ecs_ecr
+ ecs_service:
+ redirect: community.aws.ecs_service
+ ecs_service_info:
+ redirect: community.aws.ecs_service_info
+ ecs_tag:
+ redirect: community.aws.ecs_tag
+ ecs_task:
+ redirect: community.aws.ecs_task
+ ecs_taskdefinition:
+ redirect: community.aws.ecs_taskdefinition
+ ecs_taskdefinition_info:
+ redirect: community.aws.ecs_taskdefinition_info
+ efs:
+ redirect: community.aws.efs
+ efs_info:
+ redirect: community.aws.efs_info
+ elasticache:
+ redirect: community.aws.elasticache
+ elasticache_info:
+ redirect: community.aws.elasticache_info
+ elasticache_parameter_group:
+ redirect: community.aws.elasticache_parameter_group
+ elasticache_snapshot:
+ redirect: community.aws.elasticache_snapshot
+ elasticache_subnet_group:
+ redirect: community.aws.elasticache_subnet_group
+ elb_application_lb:
+ redirect: community.aws.elb_application_lb
+ elb_application_lb_info:
+ redirect: community.aws.elb_application_lb_info
+ elb_classic_lb:
+ redirect: community.aws.elb_classic_lb
+ elb_classic_lb_info:
+ redirect: community.aws.elb_classic_lb_info
+ elb_instance:
+ redirect: community.aws.elb_instance
+ elb_network_lb:
+ redirect: community.aws.elb_network_lb
+ elb_target:
+ redirect: community.aws.elb_target
+ elb_target_group:
+ redirect: community.aws.elb_target_group
+ elb_target_group_info:
+ redirect: community.aws.elb_target_group_info
+ elb_target_info:
+ redirect: community.aws.elb_target_info
+ execute_lambda:
+ redirect: community.aws.execute_lambda
+ iam:
+ redirect: community.aws.iam
+ iam_cert:
+ redirect: community.aws.iam_cert
+ iam_group:
+ redirect: community.aws.iam_group
+ iam_managed_policy:
+ redirect: community.aws.iam_managed_policy
+ iam_mfa_device_info:
+ redirect: community.aws.iam_mfa_device_info
+ iam_password_policy:
+ redirect: community.aws.iam_password_policy
+ iam_policy:
+ redirect: community.aws.iam_policy
+ iam_policy_info:
+ redirect: community.aws.iam_policy_info
+ iam_role:
+ redirect: community.aws.iam_role
+ iam_role_info:
+ redirect: community.aws.iam_role_info
+ iam_saml_federation:
+ redirect: community.aws.iam_saml_federation
+ iam_server_certificate_info:
+ redirect: community.aws.iam_server_certificate_info
+ iam_user:
+ redirect: community.aws.iam_user
+ iam_user_info:
+ redirect: community.aws.iam_user_info
+ kinesis_stream:
+ redirect: community.aws.kinesis_stream
+ lambda:
+ redirect: community.aws.lambda
+ lambda_alias:
+ redirect: community.aws.lambda_alias
+ lambda_event:
+ redirect: community.aws.lambda_event
+ lambda_info:
+ redirect: community.aws.lambda_info
+ lambda_policy:
+ redirect: community.aws.lambda_policy
+ lightsail:
+ redirect: community.aws.lightsail
+ rds:
+ redirect: community.aws.rds
+ rds_instance:
+ redirect: community.aws.rds_instance
+ rds_instance_info:
+ redirect: community.aws.rds_instance_info
+ rds_param_group:
+ redirect: community.aws.rds_param_group
+ rds_snapshot:
+ redirect: community.aws.rds_snapshot
+ rds_snapshot_info:
+ redirect: community.aws.rds_snapshot_info
+ rds_subnet_group:
+ redirect: community.aws.rds_subnet_group
+ redshift:
+ redirect: community.aws.redshift
+ redshift_cross_region_snapshots:
+ redirect: community.aws.redshift_cross_region_snapshots
+ redshift_info:
+ redirect: community.aws.redshift_info
+ redshift_subnet_group:
+ redirect: community.aws.redshift_subnet_group
+ route53:
+ redirect: community.aws.route53
+ route53_health_check:
+ redirect: community.aws.route53_health_check
+ route53_info:
+ redirect: community.aws.route53_info
+ route53_zone:
+ redirect: community.aws.route53_zone
+ s3_bucket_notification:
+ redirect: community.aws.s3_bucket_notification
+ s3_lifecycle:
+ redirect: community.aws.s3_lifecycle
+ s3_logging:
+ redirect: community.aws.s3_logging
+ s3_sync:
+ redirect: community.aws.s3_sync
+ s3_website:
+ redirect: community.aws.s3_website
+ sns:
+ redirect: community.aws.sns
+ sns_topic:
+ redirect: community.aws.sns_topic
+ sqs_queue:
+ redirect: community.aws.sqs_queue
+ sts_assume_role:
+ redirect: community.aws.sts_assume_role
+ sts_session_token:
+ redirect: community.aws.sts_session_token
+ ali_instance_facts:
+ redirect: community.general.ali_instance_facts
+ ali_instance:
+ redirect: community.general.ali_instance
+ ali_instance_info:
+ redirect: community.general.ali_instance_info
+ atomic_container:
+ redirect: community.general.atomic_container
+ atomic_host:
+ redirect: community.general.atomic_host
+ atomic_image:
+ redirect: community.general.atomic_image
+ clc_aa_policy:
+ redirect: community.general.clc_aa_policy
+ clc_alert_policy:
+ redirect: community.general.clc_alert_policy
+ clc_blueprint_package:
+ redirect: community.general.clc_blueprint_package
+ clc_firewall_policy:
+ redirect: community.general.clc_firewall_policy
+ clc_group:
+ redirect: community.general.clc_group
+ clc_loadbalancer:
+ redirect: community.general.clc_loadbalancer
+ clc_modify_server:
+ redirect: community.general.clc_modify_server
+ clc_publicip:
+ redirect: community.general.clc_publicip
+ clc_server:
+ redirect: community.general.clc_server
+ clc_server_snapshot:
+ redirect: community.general.clc_server_snapshot
+ cloudscale_floating_ip:
+ redirect: cloudscale_ch.cloud.floating_ip
+ cloudscale_server:
+ redirect: cloudscale_ch.cloud.server
+ cloudscale_server_group:
+ redirect: cloudscale_ch.cloud.server_group
+ cloudscale_volume:
+ redirect: cloudscale_ch.cloud.volume
+ cs_instance_facts:
+ redirect: ngine_io.cloudstack.cs_instance_info
+ cs_zone_facts:
+ redirect: ngine_io.cloudstack.cs_zone_info
+ cs_account:
+ redirect: ngine_io.cloudstack.cs_account
+ cs_affinitygroup:
+ redirect: ngine_io.cloudstack.cs_affinitygroup
+ cs_cluster:
+ redirect: ngine_io.cloudstack.cs_cluster
+ cs_configuration:
+ redirect: ngine_io.cloudstack.cs_configuration
+ cs_disk_offering:
+ redirect: ngine_io.cloudstack.cs_disk_offering
+ cs_domain:
+ redirect: ngine_io.cloudstack.cs_domain
+ cs_facts:
+ redirect: ngine_io.cloudstack.cs_facts
+ cs_firewall:
+ redirect: ngine_io.cloudstack.cs_firewall
+ cs_host:
+ redirect: ngine_io.cloudstack.cs_host
+ cs_image_store:
+ redirect: ngine_io.cloudstack.cs_image_store
+ cs_instance:
+ redirect: ngine_io.cloudstack.cs_instance
+ cs_instance_info:
+ redirect: ngine_io.cloudstack.cs_instance_info
+ cs_instance_nic:
+ redirect: ngine_io.cloudstack.cs_instance_nic
+ cs_instance_nic_secondaryip:
+ redirect: ngine_io.cloudstack.cs_instance_nic_secondaryip
+ cs_instance_password_reset:
+ redirect: ngine_io.cloudstack.cs_instance_password_reset
+ cs_instancegroup:
+ redirect: ngine_io.cloudstack.cs_instancegroup
+ cs_ip_address:
+ redirect: ngine_io.cloudstack.cs_ip_address
+ cs_iso:
+ redirect: ngine_io.cloudstack.cs_iso
+ cs_loadbalancer_rule:
+ redirect: ngine_io.cloudstack.cs_loadbalancer_rule
+ cs_loadbalancer_rule_member:
+ redirect: ngine_io.cloudstack.cs_loadbalancer_rule_member
+ cs_network:
+ redirect: ngine_io.cloudstack.cs_network
+ cs_network_acl:
+ redirect: ngine_io.cloudstack.cs_network_acl
+ cs_network_acl_rule:
+ redirect: ngine_io.cloudstack.cs_network_acl_rule
+ cs_network_offering:
+ redirect: ngine_io.cloudstack.cs_network_offering
+ cs_physical_network:
+ redirect: ngine_io.cloudstack.cs_physical_network
+ cs_pod:
+ redirect: ngine_io.cloudstack.cs_pod
+ cs_portforward:
+ redirect: ngine_io.cloudstack.cs_portforward
+ cs_project:
+ redirect: ngine_io.cloudstack.cs_project
+ cs_region:
+ redirect: ngine_io.cloudstack.cs_region
+ cs_resourcelimit:
+ redirect: ngine_io.cloudstack.cs_resourcelimit
+ cs_role:
+ redirect: ngine_io.cloudstack.cs_role
+ cs_role_permission:
+ redirect: ngine_io.cloudstack.cs_role_permission
+ cs_router:
+ redirect: ngine_io.cloudstack.cs_router
+ cs_securitygroup:
+ redirect: ngine_io.cloudstack.cs_securitygroup
+ cs_securitygroup_rule:
+ redirect: ngine_io.cloudstack.cs_securitygroup_rule
+ cs_service_offering:
+ redirect: ngine_io.cloudstack.cs_service_offering
+ cs_snapshot_policy:
+ redirect: ngine_io.cloudstack.cs_snapshot_policy
+ cs_sshkeypair:
+ redirect: ngine_io.cloudstack.cs_sshkeypair
+ cs_staticnat:
+ redirect: ngine_io.cloudstack.cs_staticnat
+ cs_storage_pool:
+ redirect: ngine_io.cloudstack.cs_storage_pool
+ cs_template:
+ redirect: ngine_io.cloudstack.cs_template
+ cs_traffic_type:
+ redirect: ngine_io.cloudstack.cs_traffic_type
+ cs_user:
+ redirect: ngine_io.cloudstack.cs_user
+ cs_vlan_ip_range:
+ redirect: ngine_io.cloudstack.cs_vlan_ip_range
+ cs_vmsnapshot:
+ redirect: ngine_io.cloudstack.cs_vmsnapshot
+ cs_volume:
+ redirect: ngine_io.cloudstack.cs_volume
+ cs_vpc:
+ redirect: ngine_io.cloudstack.cs_vpc
+ cs_vpc_offering:
+ redirect: ngine_io.cloudstack.cs_vpc_offering
+ cs_vpn_connection:
+ redirect: ngine_io.cloudstack.cs_vpn_connection
+ cs_vpn_customer_gateway:
+ redirect: ngine_io.cloudstack.cs_vpn_customer_gateway
+ cs_vpn_gateway:
+ redirect: ngine_io.cloudstack.cs_vpn_gateway
+ cs_zone:
+ redirect: ngine_io.cloudstack.cs_zone
+ cs_zone_info:
+ redirect: ngine_io.cloudstack.cs_zone_info
+ digital_ocean:
+ redirect: community.digitalocean.digital_ocean
+ digital_ocean_account_facts:
+ redirect: community.digitalocean.digital_ocean_account_facts
+ digital_ocean_certificate_facts:
+ redirect: community.digitalocean.digital_ocean_certificate_facts
+ digital_ocean_domain_facts:
+ redirect: community.digitalocean.digital_ocean_domain_facts
+ digital_ocean_firewall_facts:
+ redirect: community.digitalocean.digital_ocean_firewall_facts
+ digital_ocean_floating_ip_facts:
+ redirect: community.digitalocean.digital_ocean_floating_ip_facts
+ digital_ocean_image_facts:
+ redirect: community.digitalocean.digital_ocean_image_facts
+ digital_ocean_load_balancer_facts:
+ redirect: community.digitalocean.digital_ocean_load_balancer_facts
+ digital_ocean_region_facts:
+ redirect: community.digitalocean.digital_ocean_region_facts
+ digital_ocean_size_facts:
+ redirect: community.digitalocean.digital_ocean_size_facts
+ digital_ocean_snapshot_facts:
+ redirect: community.digitalocean.digital_ocean_snapshot_facts
+ digital_ocean_sshkey_facts:
+ redirect: community.digitalocean.digital_ocean_sshkey_facts
+ digital_ocean_tag_facts:
+ redirect: community.digitalocean.digital_ocean_tag_facts
+ digital_ocean_volume_facts:
+ redirect: community.digitalocean.digital_ocean_volume_facts
+ digital_ocean_account_info:
+ redirect: community.digitalocean.digital_ocean_account_info
+ digital_ocean_block_storage:
+ redirect: community.digitalocean.digital_ocean_block_storage
+ digital_ocean_certificate:
+ redirect: community.digitalocean.digital_ocean_certificate
+ digital_ocean_certificate_info:
+ redirect: community.digitalocean.digital_ocean_certificate_info
+ digital_ocean_domain:
+ redirect: community.digitalocean.digital_ocean_domain
+ digital_ocean_domain_info:
+ redirect: community.digitalocean.digital_ocean_domain_info
+ digital_ocean_droplet:
+ redirect: community.digitalocean.digital_ocean_droplet
+ digital_ocean_firewall_info:
+ redirect: community.digitalocean.digital_ocean_firewall_info
+ digital_ocean_floating_ip:
+ redirect: community.digitalocean.digital_ocean_floating_ip
+ digital_ocean_floating_ip_info:
+ redirect: community.digitalocean.digital_ocean_floating_ip_info
+ digital_ocean_image_info:
+ redirect: community.digitalocean.digital_ocean_image_info
+ digital_ocean_load_balancer_info:
+ redirect: community.digitalocean.digital_ocean_load_balancer_info
+ digital_ocean_region_info:
+ redirect: community.digitalocean.digital_ocean_region_info
+ digital_ocean_size_info:
+ redirect: community.digitalocean.digital_ocean_size_info
+ digital_ocean_snapshot_info:
+ redirect: community.digitalocean.digital_ocean_snapshot_info
+ digital_ocean_sshkey:
+ redirect: community.digitalocean.digital_ocean_sshkey
+ digital_ocean_sshkey_info:
+ redirect: community.digitalocean.digital_ocean_sshkey_info
+ digital_ocean_tag:
+ redirect: community.digitalocean.digital_ocean_tag
+ digital_ocean_tag_info:
+ redirect: community.digitalocean.digital_ocean_tag_info
+ digital_ocean_volume_info:
+ redirect: community.digitalocean.digital_ocean_volume_info
+ dimensiondata_network:
+ redirect: community.general.dimensiondata_network
+ dimensiondata_vlan:
+ redirect: community.general.dimensiondata_vlan
+ docker_image_facts:
+ redirect: community.general.docker_image_facts
+ docker_service:
+ redirect: community.general.docker_service
+ docker_compose:
+ redirect: community.general.docker_compose
+ docker_config:
+ redirect: community.general.docker_config
+ docker_container:
+ redirect: community.general.docker_container
+ docker_container_info:
+ redirect: community.general.docker_container_info
+ docker_host_info:
+ redirect: community.general.docker_host_info
+ docker_image:
+ redirect: community.general.docker_image
+ docker_image_info:
+ redirect: community.general.docker_image_info
+ docker_login:
+ redirect: community.general.docker_login
+ docker_network:
+ redirect: community.general.docker_network
+ docker_network_info:
+ redirect: community.general.docker_network_info
+ docker_node:
+ redirect: community.general.docker_node
+ docker_node_info:
+ redirect: community.general.docker_node_info
+ docker_prune:
+ redirect: community.general.docker_prune
+ docker_secret:
+ redirect: community.general.docker_secret
+ docker_stack:
+ redirect: community.general.docker_stack
+ docker_swarm:
+ redirect: community.general.docker_swarm
+ docker_swarm_info:
+ redirect: community.general.docker_swarm_info
+ docker_swarm_service:
+ redirect: community.general.docker_swarm_service
+ docker_swarm_service_info:
+ redirect: community.general.docker_swarm_service_info
+ docker_volume:
+ redirect: community.general.docker_volume
+ docker_volume_info:
+ redirect: community.general.docker_volume_info
+ gcdns_record:
+ redirect: community.general.gcdns_record
+ gcdns_zone:
+ redirect: community.general.gcdns_zone
+ gce:
+ redirect: community.general.gce
+ gcp_backend_service:
+ redirect: community.general.gcp_backend_service
+ gcp_bigquery_dataset_facts:
+ redirect: google.cloud.gcp_bigquery_dataset_info
+ gcp_bigquery_table_facts:
+ redirect: google.cloud.gcp_bigquery_table_info
+ gcp_cloudbuild_trigger_facts:
+ redirect: google.cloud.gcp_cloudbuild_trigger_info
+ gcp_compute_address_facts:
+ redirect: google.cloud.gcp_compute_address_info
+ gcp_compute_backend_bucket_facts:
+ redirect: google.cloud.gcp_compute_backend_bucket_info
+ gcp_compute_backend_service_facts:
+ redirect: google.cloud.gcp_compute_backend_service_info
+ gcp_compute_disk_facts:
+ redirect: google.cloud.gcp_compute_disk_info
+ gcp_compute_firewall_facts:
+ redirect: google.cloud.gcp_compute_firewall_info
+ gcp_compute_forwarding_rule_facts:
+ redirect: google.cloud.gcp_compute_forwarding_rule_info
+ gcp_compute_global_address_facts:
+ redirect: google.cloud.gcp_compute_global_address_info
+ gcp_compute_global_forwarding_rule_facts:
+ redirect: google.cloud.gcp_compute_global_forwarding_rule_info
+ gcp_compute_health_check_facts:
+ redirect: google.cloud.gcp_compute_health_check_info
+ gcp_compute_http_health_check_facts:
+ redirect: google.cloud.gcp_compute_http_health_check_info
+ gcp_compute_https_health_check_facts:
+ redirect: google.cloud.gcp_compute_https_health_check_info
+ gcp_compute_image_facts:
+ redirect: google.cloud.gcp_compute_image_info
+ gcp_compute_instance_facts:
+ redirect: google.cloud.gcp_compute_instance_info
+ gcp_compute_instance_group_facts:
+ redirect: google.cloud.gcp_compute_instance_group_info
+ gcp_compute_instance_group_manager_facts:
+ redirect: google.cloud.gcp_compute_instance_group_manager_info
+ gcp_compute_instance_template_facts:
+ redirect: google.cloud.gcp_compute_instance_template_info
+ gcp_compute_interconnect_attachment_facts:
+ redirect: google.cloud.gcp_compute_interconnect_attachment_info
+ gcp_compute_network_facts:
+ redirect: google.cloud.gcp_compute_network_info
+ gcp_compute_region_disk_facts:
+ redirect: google.cloud.gcp_compute_region_disk_info
+ gcp_compute_route_facts:
+ redirect: google.cloud.gcp_compute_route_info
+ gcp_compute_router_facts:
+ redirect: google.cloud.gcp_compute_router_info
+ gcp_compute_ssl_certificate_facts:
+ redirect: google.cloud.gcp_compute_ssl_certificate_info
+ gcp_compute_ssl_policy_facts:
+ redirect: google.cloud.gcp_compute_ssl_policy_info
+ gcp_compute_subnetwork_facts:
+ redirect: google.cloud.gcp_compute_subnetwork_info
+ gcp_compute_target_http_proxy_facts:
+ redirect: google.cloud.gcp_compute_target_http_proxy_info
+ gcp_compute_target_https_proxy_facts:
+ redirect: google.cloud.gcp_compute_target_https_proxy_info
+ gcp_compute_target_pool_facts:
+ redirect: google.cloud.gcp_compute_target_pool_info
+ gcp_compute_target_ssl_proxy_facts:
+ redirect: google.cloud.gcp_compute_target_ssl_proxy_info
+ gcp_compute_target_tcp_proxy_facts:
+ redirect: google.cloud.gcp_compute_target_tcp_proxy_info
+ gcp_compute_target_vpn_gateway_facts:
+ redirect: google.cloud.gcp_compute_target_vpn_gateway_info
+ gcp_compute_url_map_facts:
+ redirect: google.cloud.gcp_compute_url_map_info
+ gcp_compute_vpn_tunnel_facts:
+ redirect: google.cloud.gcp_compute_vpn_tunnel_info
+ gcp_container_cluster_facts:
+ redirect: google.cloud.gcp_container_cluster_info
+ gcp_container_node_pool_facts:
+ redirect: google.cloud.gcp_container_node_pool_info
+ gcp_dns_managed_zone_facts:
+ redirect: google.cloud.gcp_dns_managed_zone_info
+ gcp_dns_resource_record_set_facts:
+ redirect: google.cloud.gcp_dns_resource_record_set_info
+ gcp_forwarding_rule:
+ redirect: community.general.gcp_forwarding_rule
+ gcp_healthcheck:
+ redirect: community.general.gcp_healthcheck
+ gcp_iam_role_facts:
+ redirect: google.cloud.gcp_iam_role_info
+ gcp_iam_service_account_facts:
+ redirect: google.cloud.gcp_iam_service_account_info
+ gcp_pubsub_subscription_facts:
+ redirect: google.cloud.gcp_pubsub_subscription_info
+ gcp_pubsub_topic_facts:
+ redirect: google.cloud.gcp_pubsub_topic_info
+ gcp_redis_instance_facts:
+ redirect: google.cloud.gcp_redis_instance_info
+ gcp_resourcemanager_project_facts:
+ redirect: google.cloud.gcp_resourcemanager_project_info
+ gcp_sourcerepo_repository_facts:
+ redirect: google.cloud.gcp_sourcerepo_repository_info
+ gcp_spanner_database_facts:
+ redirect: google.cloud.gcp_spanner_database_info
+ gcp_spanner_instance_facts:
+ redirect: google.cloud.gcp_spanner_instance_info
+ gcp_sql_database_facts:
+ redirect: google.cloud.gcp_sql_database_info
+ gcp_sql_instance_facts:
+ redirect: google.cloud.gcp_sql_instance_info
+ gcp_sql_user_facts:
+ redirect: google.cloud.gcp_sql_user_info
+ gcp_target_proxy:
+ redirect: community.general.gcp_target_proxy
+ gcp_tpu_node_facts:
+ redirect: google.cloud.gcp_tpu_node_info
+ gcp_url_map:
+ redirect: community.general.gcp_url_map
+ gcpubsub_facts:
+ redirect: community.general.gcpubsub_facts
+ gcspanner:
+ redirect: community.general.gcspanner
+ gc_storage:
+ redirect: community.general.gc_storage
+ gce_eip:
+ redirect: community.general.gce_eip
+ gce_img:
+ redirect: community.general.gce_img
+ gce_instance_template:
+ redirect: community.general.gce_instance_template
+ gce_labels:
+ redirect: community.general.gce_labels
+ gce_lb:
+ redirect: community.general.gce_lb
+ gce_mig:
+ redirect: community.general.gce_mig
+ gce_net:
+ redirect: community.general.gce_net
+ gce_pd:
+ redirect: community.general.gce_pd
+ gce_snapshot:
+ redirect: community.general.gce_snapshot
+ gce_tag:
+ redirect: community.general.gce_tag
+ gcpubsub:
+ redirect: community.general.gcpubsub
+ gcpubsub_info:
+ redirect: community.general.gcpubsub_info
+ heroku_collaborator:
+ redirect: community.general.heroku_collaborator
+ hwc_ecs_instance:
+ redirect: community.general.hwc_ecs_instance
+ hwc_evs_disk:
+ redirect: community.general.hwc_evs_disk
+ hwc_network_vpc:
+ redirect: community.general.hwc_network_vpc
+ hwc_smn_topic:
+ redirect: community.general.hwc_smn_topic
+ hwc_vpc_eip:
+ redirect: community.general.hwc_vpc_eip
+ hwc_vpc_peering_connect:
+ redirect: community.general.hwc_vpc_peering_connect
+ hwc_vpc_port:
+ redirect: community.general.hwc_vpc_port
+ hwc_vpc_private_ip:
+ redirect: community.general.hwc_vpc_private_ip
+ hwc_vpc_route:
+ redirect: community.general.hwc_vpc_route
+ hwc_vpc_security_group:
+ redirect: community.general.hwc_vpc_security_group
+ hwc_vpc_security_group_rule:
+ redirect: community.general.hwc_vpc_security_group_rule
+ hwc_vpc_subnet:
+ redirect: community.general.hwc_vpc_subnet
+ kubevirt_cdi_upload:
+ redirect: community.general.kubevirt_cdi_upload
+ kubevirt_preset:
+ redirect: community.general.kubevirt_preset
+ kubevirt_pvc:
+ redirect: community.general.kubevirt_pvc
+ kubevirt_rs:
+ redirect: community.general.kubevirt_rs
+ kubevirt_template:
+ redirect: community.general.kubevirt_template
+ kubevirt_vm:
+ redirect: community.general.kubevirt_vm
+ linode:
+ redirect: community.general.linode
+ linode_v4:
+ redirect: community.general.linode_v4
+ lxc_container:
+ redirect: community.general.lxc_container
+ lxd_container:
+ redirect: community.general.lxd_container
+ lxd_profile:
+ redirect: community.general.lxd_profile
+ memset_memstore_facts:
+ redirect: community.general.memset_memstore_facts
+ memset_server_facts:
+ redirect: community.general.memset_server_facts
+ memset_dns_reload:
+ redirect: community.general.memset_dns_reload
+ memset_memstore_info:
+ redirect: community.general.memset_memstore_info
+ memset_server_info:
+ redirect: community.general.memset_server_info
+ memset_zone:
+ redirect: community.general.memset_zone
+ memset_zone_domain:
+ redirect: community.general.memset_zone_domain
+ memset_zone_record:
+ redirect: community.general.memset_zone_record
+ cloud_init_data_facts:
+ redirect: community.general.cloud_init_data_facts
+ helm:
+ redirect: community.general.helm
+ ovirt:
+ redirect: community.general.ovirt
+ proxmox:
+ redirect: community.general.proxmox
+ proxmox_kvm:
+ redirect: community.general.proxmox_kvm
+ proxmox_template:
+ redirect: community.general.proxmox_template
+ rhevm:
+ redirect: community.general.rhevm
+ serverless:
+ redirect: community.general.serverless
+ terraform:
+ redirect: community.general.terraform
+ virt:
+ redirect: community.libvirt.virt
+ virt_net:
+ redirect: community.libvirt.virt_net
+ virt_pool:
+ redirect: community.libvirt.virt_pool
+ xenserver_facts:
+ redirect: community.general.xenserver_facts
+ oneandone_firewall_policy:
+ redirect: community.general.oneandone_firewall_policy
+ oneandone_load_balancer:
+ redirect: community.general.oneandone_load_balancer
+ oneandone_monitoring_policy:
+ redirect: community.general.oneandone_monitoring_policy
+ oneandone_private_network:
+ redirect: community.general.oneandone_private_network
+ oneandone_public_ip:
+ redirect: community.general.oneandone_public_ip
+ oneandone_server:
+ redirect: community.general.oneandone_server
+ online_server_facts:
+ redirect: community.general.online_server_facts
+ online_user_facts:
+ redirect: community.general.online_user_facts
+ online_server_info:
+ redirect: community.general.online_server_info
+ online_user_info:
+ redirect: community.general.online_user_info
+ one_image_facts:
+ redirect: community.general.one_image_facts
+ one_host:
+ redirect: community.general.one_host
+ one_image:
+ redirect: community.general.one_image
+ one_image_info:
+ redirect: community.general.one_image_info
+ one_service:
+ redirect: community.general.one_service
+ one_vm:
+ redirect: community.general.one_vm
+ os_flavor_facts:
+ redirect: openstack.cloud.os_flavor_info
+ os_image_facts:
+ redirect: openstack.cloud.os_image_info
+ os_keystone_domain_facts:
+ redirect: openstack.cloud.os_keystone_domain_info
+ os_networks_facts:
+ redirect: openstack.cloud.os_networks_info
+ os_port_facts:
+ redirect: openstack.cloud.os_port_info
+ os_project_facts:
+ redirect: openstack.cloud.os_project_info
+ os_server_facts:
+ redirect: openstack.cloud.os_server_info
+ os_subnets_facts:
+ redirect: openstack.cloud.os_subnets_info
+ os_user_facts:
+ redirect: openstack.cloud.os_user_info
+ oci_vcn:
+ redirect: community.general.oci_vcn
+ ovh_ip_failover:
+ redirect: community.general.ovh_ip_failover
+ ovh_ip_loadbalancing_backend:
+ redirect: community.general.ovh_ip_loadbalancing_backend
+ ovh_monthly_billing:
+ redirect: community.general.ovh_monthly_billing
+ ovirt_affinity_label_facts:
+ redirect: community.general.ovirt_affinity_label_facts
+ ovirt_api_facts:
+ redirect: community.general.ovirt_api_facts
+ ovirt_cluster_facts:
+ redirect: community.general.ovirt_cluster_facts
+ ovirt_datacenter_facts:
+ redirect: community.general.ovirt_datacenter_facts
+ ovirt_disk_facts:
+ redirect: community.general.ovirt_disk_facts
+ ovirt_event_facts:
+ redirect: community.general.ovirt_event_facts
+ ovirt_external_provider_facts:
+ redirect: community.general.ovirt_external_provider_facts
+ ovirt_group_facts:
+ redirect: community.general.ovirt_group_facts
+ ovirt_host_facts:
+ redirect: community.general.ovirt_host_facts
+ ovirt_host_storage_facts:
+ redirect: community.general.ovirt_host_storage_facts
+ ovirt_network_facts:
+ redirect: community.general.ovirt_network_facts
+ ovirt_nic_facts:
+ redirect: community.general.ovirt_nic_facts
+ ovirt_permission_facts:
+ redirect: community.general.ovirt_permission_facts
+ ovirt_quota_facts:
+ redirect: community.general.ovirt_quota_facts
+ ovirt_scheduling_policy_facts:
+ redirect: community.general.ovirt_scheduling_policy_facts
+ ovirt_snapshot_facts:
+ redirect: community.general.ovirt_snapshot_facts
+ ovirt_storage_domain_facts:
+ redirect: community.general.ovirt_storage_domain_facts
+ ovirt_storage_template_facts:
+ redirect: community.general.ovirt_storage_template_facts
+ ovirt_storage_vm_facts:
+ redirect: community.general.ovirt_storage_vm_facts
+ ovirt_tag_facts:
+ redirect: community.general.ovirt_tag_facts
+ ovirt_template_facts:
+ redirect: community.general.ovirt_template_facts
+ ovirt_user_facts:
+ redirect: community.general.ovirt_user_facts
+ ovirt_vm_facts:
+ redirect: community.general.ovirt_vm_facts
+ ovirt_vmpool_facts:
+ redirect: community.general.ovirt_vmpool_facts
+ packet_device:
+ redirect: community.general.packet_device
+ packet_ip_subnet:
+ redirect: community.general.packet_ip_subnet
+ packet_project:
+ redirect: community.general.packet_project
+ packet_sshkey:
+ redirect: community.general.packet_sshkey
+ packet_volume:
+ redirect: community.general.packet_volume
+ packet_volume_attachment:
+ redirect: community.general.packet_volume_attachment
+ profitbricks:
+ redirect: community.general.profitbricks
+ profitbricks_datacenter:
+ redirect: community.general.profitbricks_datacenter
+ profitbricks_nic:
+ redirect: community.general.profitbricks_nic
+ profitbricks_volume:
+ redirect: community.general.profitbricks_volume
+ profitbricks_volume_attachments:
+ redirect: community.general.profitbricks_volume_attachments
+ pubnub_blocks:
+ redirect: community.general.pubnub_blocks
+ rax:
+ redirect: community.general.rax
+ rax_cbs:
+ redirect: community.general.rax_cbs
+ rax_cbs_attachments:
+ redirect: community.general.rax_cbs_attachments
+ rax_cdb:
+ redirect: community.general.rax_cdb
+ rax_cdb_database:
+ redirect: community.general.rax_cdb_database
+ rax_cdb_user:
+ redirect: community.general.rax_cdb_user
+ rax_clb:
+ redirect: community.general.rax_clb
+ rax_clb_nodes:
+ redirect: community.general.rax_clb_nodes
+ rax_clb_ssl:
+ redirect: community.general.rax_clb_ssl
+ rax_dns:
+ redirect: community.general.rax_dns
+ rax_dns_record:
+ redirect: community.general.rax_dns_record
+ rax_facts:
+ redirect: community.general.rax_facts
+ rax_files:
+ redirect: community.general.rax_files
+ rax_files_objects:
+ redirect: community.general.rax_files_objects
+ rax_identity:
+ redirect: community.general.rax_identity
+ rax_keypair:
+ redirect: community.general.rax_keypair
+ rax_meta:
+ redirect: community.general.rax_meta
+ rax_mon_alarm:
+ redirect: community.general.rax_mon_alarm
+ rax_mon_check:
+ redirect: community.general.rax_mon_check
+ rax_mon_entity:
+ redirect: community.general.rax_mon_entity
+ rax_mon_notification:
+ redirect: community.general.rax_mon_notification
+ rax_mon_notification_plan:
+ redirect: community.general.rax_mon_notification_plan
+ rax_network:
+ redirect: community.general.rax_network
+ rax_queue:
+ redirect: community.general.rax_queue
+ rax_scaling_group:
+ redirect: community.general.rax_scaling_group
+ rax_scaling_policy:
+ redirect: community.general.rax_scaling_policy
+ scaleway_image_facts:
+ redirect: community.general.scaleway_image_facts
+ scaleway_ip_facts:
+ redirect: community.general.scaleway_ip_facts
+ scaleway_organization_facts:
+ redirect: community.general.scaleway_organization_facts
+ scaleway_security_group_facts:
+ redirect: community.general.scaleway_security_group_facts
+ scaleway_server_facts:
+ redirect: community.general.scaleway_server_facts
+ scaleway_snapshot_facts:
+ redirect: community.general.scaleway_snapshot_facts
+ scaleway_volume_facts:
+ redirect: community.general.scaleway_volume_facts
+ scaleway_compute:
+ redirect: community.general.scaleway_compute
+ scaleway_image_info:
+ redirect: community.general.scaleway_image_info
+ scaleway_ip:
+ redirect: community.general.scaleway_ip
+ scaleway_ip_info:
+ redirect: community.general.scaleway_ip_info
+ scaleway_lb:
+ redirect: community.general.scaleway_lb
+ scaleway_organization_info:
+ redirect: community.general.scaleway_organization_info
+ scaleway_security_group:
+ redirect: community.general.scaleway_security_group
+ scaleway_security_group_info:
+ redirect: community.general.scaleway_security_group_info
+ scaleway_security_group_rule:
+ redirect: community.general.scaleway_security_group_rule
+ scaleway_server_info:
+ redirect: community.general.scaleway_server_info
+ scaleway_snapshot_info:
+ redirect: community.general.scaleway_snapshot_info
+ scaleway_sshkey:
+ redirect: community.general.scaleway_sshkey
+ scaleway_user_data:
+ redirect: community.general.scaleway_user_data
+ scaleway_volume:
+ redirect: community.general.scaleway_volume
+ scaleway_volume_info:
+ redirect: community.general.scaleway_volume_info
+ smartos_image_facts:
+ redirect: community.general.smartos_image_facts
+ imgadm:
+ redirect: community.general.imgadm
+ nictagadm:
+ redirect: community.general.nictagadm
+ smartos_image_info:
+ redirect: community.general.smartos_image_info
+ vmadm:
+ redirect: community.general.vmadm
+ sl_vm:
+ redirect: community.general.sl_vm
+ spotinst_aws_elastigroup:
+ redirect: community.general.spotinst_aws_elastigroup
+ udm_dns_record:
+ redirect: community.general.udm_dns_record
+ udm_dns_zone:
+ redirect: community.general.udm_dns_zone
+ udm_group:
+ redirect: community.general.udm_group
+ udm_share:
+ redirect: community.general.udm_share
+ udm_user:
+ redirect: community.general.udm_user
+ vr_account_facts:
+ redirect: ngine_io.vultr.vultr_account_facts
+ vr_dns_domain:
+ redirect: ngine_io.vultr.vultr_dns_domain
+ vr_dns_record:
+ redirect: ngine_io.vultr.vultr_dns_record
+ vr_firewall_group:
+ redirect: ngine_io.vultr.vultr_firewall_group
+ vr_firewall_rule:
+ redirect: ngine_io.vultr.vultr_firewall_rule
+ vr_server:
+ redirect: ngine_io.vultr.vultr_server
+ vr_ssh_key:
+ redirect: ngine_io.vultr.vultr_ssh_key
+ vr_startup_script:
+ redirect: ngine_io.vultr.vultr_startup_script
+ vr_user:
+ redirect: ngine_io.vultr.vultr_user
+ vultr_account_facts:
+ redirect: ngine_io.vultr.vultr_account_info
+ vultr_block_storage_facts:
+ redirect: ngine_io.vultr.vultr_block_storage_info
+ vultr_dns_domain_facts:
+ redirect: ngine_io.vultr.vultr_dns_domain_info
+ vultr_firewall_group_facts:
+ redirect: ngine_io.vultr.vultr_firewall_group_info
+ vultr_network_facts:
+ redirect: ngine_io.vultr.vultr_network_info
+ vultr_os_facts:
+ redirect: ngine_io.vultr.vultr_os_info
+ vultr_plan_facts:
+ redirect: ngine_io.vultr.vultr_plan_info
+ vultr_region_facts:
+ redirect: ngine_io.vultr.vultr_region_info
+ vultr_server_facts:
+ redirect: ngine_io.vultr.vultr_server_info
+ vultr_ssh_key_facts:
+ redirect: ngine_io.vultr.vultr_ssh_key_info
+ vultr_startup_script_facts:
+ redirect: ngine_io.vultr.vultr_startup_script_info
+ vultr_user_facts:
+ redirect: ngine_io.vultr.vultr_user_info
+ vultr_account_info:
+ redirect: ngine_io.vultr.vultr_account_info
+ vultr_block_storage:
+ redirect: ngine_io.vultr.vultr_block_storage
+ vultr_block_storage_info:
+ redirect: ngine_io.vultr.vultr_block_storage_info
+ vultr_dns_domain:
+ redirect: ngine_io.vultr.vultr_dns_domain
+ vultr_dns_domain_info:
+ redirect: ngine_io.vultr.vultr_dns_domain_info
+ vultr_dns_record:
+ redirect: ngine_io.vultr.vultr_dns_record
+ vultr_firewall_group:
+ redirect: ngine_io.vultr.vultr_firewall_group
+ vultr_firewall_group_info:
+ redirect: ngine_io.vultr.vultr_firewall_group_info
+ vultr_firewall_rule:
+ redirect: ngine_io.vultr.vultr_firewall_rule
+ vultr_network:
+ redirect: ngine_io.vultr.vultr_network
+ vultr_network_info:
+ redirect: ngine_io.vultr.vultr_network_info
+ vultr_os_info:
+ redirect: ngine_io.vultr.vultr_os_info
+ vultr_plan_info:
+ redirect: ngine_io.vultr.vultr_plan_info
+ vultr_region_info:
+ redirect: ngine_io.vultr.vultr_region_info
+ vultr_server:
+ redirect: ngine_io.vultr.vultr_server
+ vultr_server_info:
+ redirect: ngine_io.vultr.vultr_server_info
+ vultr_ssh_key:
+ redirect: ngine_io.vultr.vultr_ssh_key
+ vultr_ssh_key_info:
+ redirect: ngine_io.vultr.vultr_ssh_key_info
+ vultr_startup_script:
+ redirect: ngine_io.vultr.vultr_startup_script
+ vultr_startup_script_info:
+ redirect: ngine_io.vultr.vultr_startup_script_info
+ vultr_user:
+ redirect: ngine_io.vultr.vultr_user
+ vultr_user_info:
+ redirect: ngine_io.vultr.vultr_user_info
+ webfaction_app:
+ redirect: community.general.webfaction_app
+ webfaction_db:
+ redirect: community.general.webfaction_db
+ webfaction_domain:
+ redirect: community.general.webfaction_domain
+ webfaction_mailbox:
+ redirect: community.general.webfaction_mailbox
+ webfaction_site:
+ redirect: community.general.webfaction_site
+ xenserver_guest_facts:
+ redirect: community.general.xenserver_guest_facts
+ xenserver_guest:
+ redirect: community.general.xenserver_guest
+ xenserver_guest_info:
+ redirect: community.general.xenserver_guest_info
+ xenserver_guest_powerstate:
+ redirect: community.general.xenserver_guest_powerstate
+ consul:
+ redirect: community.general.consul
+ consul_acl:
+ redirect: community.general.consul_acl
+ consul_kv:
+ redirect: community.general.consul_kv
+ consul_session:
+ redirect: community.general.consul_session
+ etcd3:
+ redirect: community.general.etcd3
+ pacemaker_cluster:
+ redirect: community.general.pacemaker_cluster
+ znode:
+ redirect: community.general.znode
+ aerospike_migrations:
+ redirect: community.general.aerospike_migrations
+ influxdb_database:
+ redirect: community.general.influxdb_database
+ influxdb_query:
+ redirect: community.general.influxdb_query
+ influxdb_retention_policy:
+ redirect: community.general.influxdb_retention_policy
+ influxdb_user:
+ redirect: community.general.influxdb_user
+ influxdb_write:
+ redirect: community.general.influxdb_write
+ elasticsearch_plugin:
+ redirect: community.general.elasticsearch_plugin
+ kibana_plugin:
+ redirect: community.general.kibana_plugin
+ redis:
+ redirect: community.general.redis
+ riak:
+ redirect: community.general.riak
+ mssql_db:
+ redirect: community.general.mssql_db
+ mysql_db:
+ redirect: community.mysql.mysql_db
+ mysql_info:
+ redirect: community.mysql.mysql_info
+ mysql_query:
+ redirect: community.mysql.mysql_query
+ mysql_replication:
+ redirect: community.mysql.mysql_replication
+ mysql_user:
+ redirect: community.mysql.mysql_user
+ mysql_variables:
+ redirect: community.mysql.mysql_variables
+ postgresql_copy:
+ redirect: community.general.postgresql_copy
+ postgresql_db:
+ redirect: community.general.postgresql_db
+ postgresql_ext:
+ redirect: community.general.postgresql_ext
+ postgresql_idx:
+ redirect: community.general.postgresql_idx
+ postgresql_info:
+ redirect: community.general.postgresql_info
+ postgresql_lang:
+ redirect: community.general.postgresql_lang
+ postgresql_membership:
+ redirect: community.general.postgresql_membership
+ postgresql_owner:
+ redirect: community.general.postgresql_owner
+ postgresql_pg_hba:
+ redirect: community.general.postgresql_pg_hba
+ postgresql_ping:
+ redirect: community.general.postgresql_ping
+ postgresql_privs:
+ redirect: community.general.postgresql_privs
+ postgresql_publication:
+ redirect: community.general.postgresql_publication
+ postgresql_query:
+ redirect: community.general.postgresql_query
+ postgresql_schema:
+ redirect: community.general.postgresql_schema
+ postgresql_sequence:
+ redirect: community.general.postgresql_sequence
+ postgresql_set:
+ redirect: community.general.postgresql_set
+ postgresql_slot:
+ redirect: community.general.postgresql_slot
+ postgresql_subscription:
+ redirect: community.general.postgresql_subscription
+ postgresql_table:
+ redirect: community.general.postgresql_table
+ postgresql_tablespace:
+ redirect: community.general.postgresql_tablespace
+ postgresql_user:
+ redirect: community.general.postgresql_user
+ postgresql_user_obj_stat_info:
+ redirect: community.general.postgresql_user_obj_stat_info
+ proxysql_backend_servers:
+ redirect: community.proxysql.proxysql_backend_servers
+ proxysql_global_variables:
+ redirect: community.proxysql.proxysql_global_variables
+ proxysql_manage_config:
+ redirect: community.proxysql.proxysql_manage_config
+ proxysql_mysql_users:
+ redirect: community.proxysql.proxysql_mysql_users
+ proxysql_query_rules:
+ redirect: community.proxysql.proxysql_query_rules
+ proxysql_replication_hostgroups:
+ redirect: community.proxysql.proxysql_replication_hostgroups
+ proxysql_scheduler:
+ redirect: community.proxysql.proxysql_scheduler
+ vertica_facts:
+ redirect: community.general.vertica_facts
+ vertica_configuration:
+ redirect: community.general.vertica_configuration
+ vertica_info:
+ redirect: community.general.vertica_info
+ vertica_role:
+ redirect: community.general.vertica_role
+ vertica_schema:
+ redirect: community.general.vertica_schema
+ vertica_user:
+ redirect: community.general.vertica_user
+ archive:
+ redirect: community.general.archive
+ ini_file:
+ redirect: community.general.ini_file
+ iso_extract:
+ redirect: community.general.iso_extract
+ patch:
+ redirect: ansible.posix.patch
+ read_csv:
+ redirect: community.general.read_csv
+ xattr:
+ redirect: community.general.xattr
+ xml:
+ redirect: community.general.xml
+ onepassword_facts:
+ redirect: community.general.onepassword_facts
+ ipa_config:
+ redirect: community.general.ipa_config
+ ipa_dnsrecord:
+ redirect: community.general.ipa_dnsrecord
+ ipa_dnszone:
+ redirect: community.general.ipa_dnszone
+ ipa_group:
+ redirect: community.general.ipa_group
+ ipa_hbacrule:
+ redirect: community.general.ipa_hbacrule
+ ipa_host:
+ redirect: community.general.ipa_host
+ ipa_hostgroup:
+ redirect: community.general.ipa_hostgroup
+ ipa_role:
+ redirect: community.general.ipa_role
+ ipa_service:
+ redirect: community.general.ipa_service
+ ipa_subca:
+ redirect: community.general.ipa_subca
+ ipa_sudocmd:
+ redirect: community.general.ipa_sudocmd
+ ipa_sudocmdgroup:
+ redirect: community.general.ipa_sudocmdgroup
+ ipa_sudorule:
+ redirect: community.general.ipa_sudorule
+ ipa_user:
+ redirect: community.general.ipa_user
+ ipa_vault:
+ redirect: community.general.ipa_vault
+ keycloak_client:
+ redirect: community.general.keycloak_client
+ keycloak_clienttemplate:
+ redirect: community.general.keycloak_clienttemplate
+ keycloak_group:
+ redirect: community.general.keycloak_group
+ onepassword_info:
+ redirect: community.general.onepassword_info
+ opendj_backendprop:
+ redirect: community.general.opendj_backendprop
+ rabbitmq_binding:
+ redirect: community.rabbitmq.rabbitmq_binding
+ rabbitmq_exchange:
+ redirect: community.rabbitmq.rabbitmq_exchange
+ rabbitmq_global_parameter:
+ redirect: community.rabbitmq.rabbitmq_global_parameter
+ rabbitmq_parameter:
+ redirect: community.rabbitmq.rabbitmq_parameter
+ rabbitmq_plugin:
+ redirect: community.rabbitmq.rabbitmq_plugin
+ rabbitmq_policy:
+ redirect: community.rabbitmq.rabbitmq_policy
+ rabbitmq_queue:
+ redirect: community.rabbitmq.rabbitmq_queue
+ rabbitmq_user:
+ redirect: community.rabbitmq.rabbitmq_user
+ rabbitmq_vhost:
+ redirect: community.rabbitmq.rabbitmq_vhost
+ rabbitmq_vhost_limits:
+ redirect: community.rabbitmq.rabbitmq_vhost_limits
+ airbrake_deployment:
+ redirect: community.general.airbrake_deployment
+ bigpanda:
+ redirect: community.general.bigpanda
+ circonus_annotation:
+ redirect: community.general.circonus_annotation
+ datadog_event:
+ redirect: community.general.datadog_event
+ datadog_monitor:
+ redirect: community.general.datadog_monitor
+ honeybadger_deployment:
+ redirect: community.general.honeybadger_deployment
+ icinga2_feature:
+ redirect: community.general.icinga2_feature
+ icinga2_host:
+ redirect: community.general.icinga2_host
+ librato_annotation:
+ redirect: community.general.librato_annotation
+ logentries:
+ redirect: community.general.logentries
+ logicmonitor:
+ redirect: community.general.logicmonitor
+ logicmonitor_facts:
+ redirect: community.general.logicmonitor_facts
+ logstash_plugin:
+ redirect: community.general.logstash_plugin
+ monit:
+ redirect: community.general.monit
+ nagios:
+ redirect: community.general.nagios
+ newrelic_deployment:
+ redirect: community.general.newrelic_deployment
+ pagerduty:
+ redirect: community.general.pagerduty
+ pagerduty_alert:
+ redirect: community.general.pagerduty_alert
+ pingdom:
+ redirect: community.general.pingdom
+ rollbar_deployment:
+ redirect: community.general.rollbar_deployment
+ sensu_check:
+ redirect: community.general.sensu_check
+ sensu_client:
+ redirect: community.general.sensu_client
+ sensu_handler:
+ redirect: community.general.sensu_handler
+ sensu_silence:
+ redirect: community.general.sensu_silence
+ sensu_subscription:
+ redirect: community.general.sensu_subscription
+ spectrum_device:
+ redirect: community.general.spectrum_device
+ stackdriver:
+ redirect: community.general.stackdriver
+ statusio_maintenance:
+ redirect: community.general.statusio_maintenance
+ uptimerobot:
+ redirect: community.general.uptimerobot
+ zabbix_group_facts:
+ redirect: community.zabbix.zabbix_group_facts
+ zabbix_host_facts:
+ redirect: community.zabbix.zabbix_host_facts
+ zabbix_action:
+ redirect: community.zabbix.zabbix_action
+ zabbix_group:
+ redirect: community.zabbix.zabbix_group
+ zabbix_group_info:
+ redirect: community.zabbix.zabbix_group_info
+ zabbix_host:
+ redirect: community.zabbix.zabbix_host
+ zabbix_host_events_info:
+ redirect: community.zabbix.zabbix_host_events_info
+ zabbix_host_info:
+ redirect: community.zabbix.zabbix_host_info
+ zabbix_hostmacro:
+ redirect: community.zabbix.zabbix_hostmacro
+ zabbix_maintenance:
+ redirect: community.zabbix.zabbix_maintenance
+ zabbix_map:
+ redirect: community.zabbix.zabbix_map
+ zabbix_mediatype:
+ redirect: community.zabbix.zabbix_mediatype
+ zabbix_proxy:
+ redirect: community.zabbix.zabbix_proxy
+ zabbix_screen:
+ redirect: community.zabbix.zabbix_screen
+ zabbix_service:
+ redirect: community.zabbix.zabbix_service
+ zabbix_template:
+ redirect: community.zabbix.zabbix_template
+ zabbix_template_info:
+ redirect: community.zabbix.zabbix_template_info
+ zabbix_user:
+ redirect: community.zabbix.zabbix_user
+ zabbix_user_info:
+ redirect: community.zabbix.zabbix_user_info
+ zabbix_valuemap:
+ redirect: community.zabbix.zabbix_valuemap
+ cloudflare_dns:
+ redirect: community.general.cloudflare_dns
+ dnsimple:
+ redirect: community.general.dnsimple
+ dnsmadeeasy:
+ redirect: community.general.dnsmadeeasy
+ exo_dns_domain:
+ redirect: ngine_io.exoscale.exo_dns_domain
+ exo_dns_record:
+ redirect: ngine_io.exoscale.exo_dns_record
+ haproxy:
+ redirect: community.general.haproxy
+ hetzner_failover_ip:
+ redirect: community.general.hetzner_failover_ip
+ hetzner_failover_ip_info:
+ redirect: community.general.hetzner_failover_ip_info
+ hetzner_firewall:
+ redirect: community.general.hetzner_firewall
+ hetzner_firewall_info:
+ redirect: community.general.hetzner_firewall_info
+ infinity:
+ redirect: community.general.infinity
+ ip_netns:
+ redirect: community.general.ip_netns
+ ipify_facts:
+ redirect: community.general.ipify_facts
+ ipinfoio_facts:
+ redirect: community.general.ipinfoio_facts
+ ipwcli_dns:
+ redirect: community.general.ipwcli_dns
+ ldap_attr:
+ redirect: community.general.ldap_attr
+ ldap_attrs:
+ redirect: community.general.ldap_attrs
+ ldap_entry:
+ redirect: community.general.ldap_entry
+ ldap_passwd:
+ redirect: community.general.ldap_passwd
+ lldp:
+ redirect: community.general.lldp
+ netcup_dns:
+ redirect: community.general.netcup_dns
+ nios_a_record:
+ redirect: community.general.nios_a_record
+ nios_aaaa_record:
+ redirect: community.general.nios_aaaa_record
+ nios_cname_record:
+ redirect: community.general.nios_cname_record
+ nios_dns_view:
+ redirect: community.general.nios_dns_view
+ nios_fixed_address:
+ redirect: community.general.nios_fixed_address
+ nios_host_record:
+ redirect: community.general.nios_host_record
+ nios_member:
+ redirect: community.general.nios_member
+ nios_mx_record:
+ redirect: community.general.nios_mx_record
+ nios_naptr_record:
+ redirect: community.general.nios_naptr_record
+ nios_network:
+ redirect: community.general.nios_network
+ nios_network_view:
+ redirect: community.general.nios_network_view
+ nios_nsgroup:
+ redirect: community.general.nios_nsgroup
+ nios_ptr_record:
+ redirect: community.general.nios_ptr_record
+ nios_srv_record:
+ redirect: community.general.nios_srv_record
+ nios_txt_record:
+ redirect: community.general.nios_txt_record
+ nios_zone:
+ redirect: community.general.nios_zone
+ nmcli:
+ redirect: community.general.nmcli
+ nsupdate:
+ redirect: community.general.nsupdate
+ omapi_host:
+ redirect: community.general.omapi_host
+ snmp_facts:
+ redirect: community.general.snmp_facts
+ a10_server:
+ redirect: community.network.a10_server
+ a10_server_axapi3:
+ redirect: community.network.a10_server_axapi3
+ a10_service_group:
+ redirect: community.network.a10_service_group
+ a10_virtual_server:
+ redirect: community.network.a10_virtual_server
+ aci_intf_policy_fc:
+ redirect: cisco.aci.aci_interface_policy_fc
+ aci_intf_policy_l2:
+ redirect: cisco.aci.aci_interface_policy_l2
+ aci_intf_policy_lldp:
+ redirect: cisco.aci.aci_interface_policy_lldp
+ aci_intf_policy_mcp:
+ redirect: cisco.aci.aci_interface_policy_mcp
+ aci_intf_policy_port_channel:
+ redirect: cisco.aci.aci_interface_policy_port_channel
+ aci_intf_policy_port_security:
+ redirect: cisco.aci.aci_interface_policy_port_security
+ mso_schema_template_external_epg_contract:
+ redirect: cisco.mso.mso_schema_template_external_epg_contract
+ mso_schema_template_external_epg_subnet:
+ redirect: cisco.mso.mso_schema_template_external_epg_subnet
+ aireos_command:
+ redirect: community.network.aireos_command
+ aireos_config:
+ redirect: community.network.aireos_config
+ apconos_command:
+ redirect: community.network.apconos_command
+ aruba_command:
+ redirect: community.network.aruba_command
+ aruba_config:
+ redirect: community.network.aruba_config
+ avi_actiongroupconfig:
+ redirect: community.network.avi_actiongroupconfig
+ avi_alertconfig:
+ redirect: community.network.avi_alertconfig
+ avi_alertemailconfig:
+ redirect: community.network.avi_alertemailconfig
+ avi_alertscriptconfig:
+ redirect: community.network.avi_alertscriptconfig
+ avi_alertsyslogconfig:
+ redirect: community.network.avi_alertsyslogconfig
+ avi_analyticsprofile:
+ redirect: community.network.avi_analyticsprofile
+ avi_api_session:
+ redirect: community.network.avi_api_session
+ avi_api_version:
+ redirect: community.network.avi_api_version
+ avi_applicationpersistenceprofile:
+ redirect: community.network.avi_applicationpersistenceprofile
+ avi_applicationprofile:
+ redirect: community.network.avi_applicationprofile
+ avi_authprofile:
+ redirect: community.network.avi_authprofile
+ avi_autoscalelaunchconfig:
+ redirect: community.network.avi_autoscalelaunchconfig
+ avi_backup:
+ redirect: community.network.avi_backup
+ avi_backupconfiguration:
+ redirect: community.network.avi_backupconfiguration
+ avi_certificatemanagementprofile:
+ redirect: community.network.avi_certificatemanagementprofile
+ avi_cloud:
+ redirect: community.network.avi_cloud
+ avi_cloudconnectoruser:
+ redirect: community.network.avi_cloudconnectoruser
+ avi_cloudproperties:
+ redirect: community.network.avi_cloudproperties
+ avi_cluster:
+ redirect: community.network.avi_cluster
+ avi_clusterclouddetails:
+ redirect: community.network.avi_clusterclouddetails
+ avi_controllerproperties:
+ redirect: community.network.avi_controllerproperties
+ avi_customipamdnsprofile:
+ redirect: community.network.avi_customipamdnsprofile
+ avi_dnspolicy:
+ redirect: community.network.avi_dnspolicy
+ avi_errorpagebody:
+ redirect: community.network.avi_errorpagebody
+ avi_errorpageprofile:
+ redirect: community.network.avi_errorpageprofile
+ avi_gslb:
+ redirect: community.network.avi_gslb
+ avi_gslbgeodbprofile:
+ redirect: community.network.avi_gslbgeodbprofile
+ avi_gslbservice:
+ redirect: community.network.avi_gslbservice
+ avi_gslbservice_patch_member:
+ redirect: community.network.avi_gslbservice_patch_member
+ avi_hardwaresecuritymodulegroup:
+ redirect: community.network.avi_hardwaresecuritymodulegroup
+ avi_healthmonitor:
+ redirect: community.network.avi_healthmonitor
+ avi_httppolicyset:
+ redirect: community.network.avi_httppolicyset
+ avi_ipaddrgroup:
+ redirect: community.network.avi_ipaddrgroup
+ avi_ipamdnsproviderprofile:
+ redirect: community.network.avi_ipamdnsproviderprofile
+ avi_l4policyset:
+ redirect: community.network.avi_l4policyset
+ avi_microservicegroup:
+ redirect: community.network.avi_microservicegroup
+ avi_network:
+ redirect: community.network.avi_network
+ avi_networkprofile:
+ redirect: community.network.avi_networkprofile
+ avi_networksecuritypolicy:
+ redirect: community.network.avi_networksecuritypolicy
+ avi_pkiprofile:
+ redirect: community.network.avi_pkiprofile
+ avi_pool:
+ redirect: community.network.avi_pool
+ avi_poolgroup:
+ redirect: community.network.avi_poolgroup
+ avi_poolgroupdeploymentpolicy:
+ redirect: community.network.avi_poolgroupdeploymentpolicy
+ avi_prioritylabels:
+ redirect: community.network.avi_prioritylabels
+ avi_role:
+ redirect: community.network.avi_role
+ avi_scheduler:
+ redirect: community.network.avi_scheduler
+ avi_seproperties:
+ redirect: community.network.avi_seproperties
+ avi_serverautoscalepolicy:
+ redirect: community.network.avi_serverautoscalepolicy
+ avi_serviceengine:
+ redirect: community.network.avi_serviceengine
+ avi_serviceenginegroup:
+ redirect: community.network.avi_serviceenginegroup
+ avi_snmptrapprofile:
+ redirect: community.network.avi_snmptrapprofile
+ avi_sslkeyandcertificate:
+ redirect: community.network.avi_sslkeyandcertificate
+ avi_sslprofile:
+ redirect: community.network.avi_sslprofile
+ avi_stringgroup:
+ redirect: community.network.avi_stringgroup
+ avi_systemconfiguration:
+ redirect: community.network.avi_systemconfiguration
+ avi_tenant:
+ redirect: community.network.avi_tenant
+ avi_trafficcloneprofile:
+ redirect: community.network.avi_trafficcloneprofile
+ avi_user:
+ redirect: community.network.avi_user
+ avi_useraccount:
+ redirect: community.network.avi_useraccount
+ avi_useraccountprofile:
+ redirect: community.network.avi_useraccountprofile
+ avi_virtualservice:
+ redirect: community.network.avi_virtualservice
+ avi_vrfcontext:
+ redirect: community.network.avi_vrfcontext
+ avi_vsdatascriptset:
+ redirect: community.network.avi_vsdatascriptset
+ avi_vsvip:
+ redirect: community.network.avi_vsvip
+ avi_webhook:
+ redirect: community.network.avi_webhook
+ bcf_switch:
+ redirect: community.network.bcf_switch
+ bigmon_chain:
+ redirect: community.network.bigmon_chain
+ bigmon_policy:
+ redirect: community.network.bigmon_policy
+ checkpoint_access_layer_facts:
+ redirect: check_point.mgmt.checkpoint_access_layer_facts
+ checkpoint_access_rule:
+ redirect: check_point.mgmt.checkpoint_access_rule
+ checkpoint_access_rule_facts:
+ redirect: check_point.mgmt.checkpoint_access_rule_facts
+ checkpoint_host:
+ redirect: check_point.mgmt.checkpoint_host
+ checkpoint_host_facts:
+ redirect: check_point.mgmt.checkpoint_host_facts
+ checkpoint_object_facts:
+ redirect: check_point.mgmt.checkpoint_object_facts
+ checkpoint_run_script:
+ redirect: check_point.mgmt.checkpoint_run_script
+ checkpoint_session:
+ redirect: check_point.mgmt.checkpoint_session
+ checkpoint_task_facts:
+ redirect: check_point.mgmt.checkpoint_task_facts
+ cp_publish:
+ redirect: community.network.cp_publish
+ ce_aaa_server:
+ redirect: community.network.ce_aaa_server
+ ce_aaa_server_host:
+ redirect: community.network.ce_aaa_server_host
+ ce_acl:
+ redirect: community.network.ce_acl
+ ce_acl_advance:
+ redirect: community.network.ce_acl_advance
+ ce_acl_interface:
+ redirect: community.network.ce_acl_interface
+ ce_bfd_global:
+ redirect: community.network.ce_bfd_global
+ ce_bfd_session:
+ redirect: community.network.ce_bfd_session
+ ce_bfd_view:
+ redirect: community.network.ce_bfd_view
+ ce_bgp:
+ redirect: community.network.ce_bgp
+ ce_bgp_af:
+ redirect: community.network.ce_bgp_af
+ ce_bgp_neighbor:
+ redirect: community.network.ce_bgp_neighbor
+ ce_bgp_neighbor_af:
+ redirect: community.network.ce_bgp_neighbor_af
+ ce_command:
+ redirect: community.network.ce_command
+ ce_config:
+ redirect: community.network.ce_config
+ ce_dldp:
+ redirect: community.network.ce_dldp
+ ce_dldp_interface:
+ redirect: community.network.ce_dldp_interface
+ ce_eth_trunk:
+ redirect: community.network.ce_eth_trunk
+ ce_evpn_bd_vni:
+ redirect: community.network.ce_evpn_bd_vni
+ ce_evpn_bgp:
+ redirect: community.network.ce_evpn_bgp
+ ce_evpn_bgp_rr:
+ redirect: community.network.ce_evpn_bgp_rr
+ ce_evpn_global:
+ redirect: community.network.ce_evpn_global
+ ce_facts:
+ redirect: community.network.ce_facts
+ ce_file_copy:
+ redirect: community.network.ce_file_copy
+ ce_info_center_debug:
+ redirect: community.network.ce_info_center_debug
+ ce_info_center_global:
+ redirect: community.network.ce_info_center_global
+ ce_info_center_log:
+ redirect: community.network.ce_info_center_log
+ ce_info_center_trap:
+ redirect: community.network.ce_info_center_trap
+ ce_interface:
+ redirect: community.network.ce_interface
+ ce_interface_ospf:
+ redirect: community.network.ce_interface_ospf
+ ce_ip_interface:
+ redirect: community.network.ce_ip_interface
+ ce_is_is_instance:
+ redirect: community.network.ce_is_is_instance
+ ce_is_is_interface:
+ redirect: community.network.ce_is_is_interface
+ ce_is_is_view:
+ redirect: community.network.ce_is_is_view
+ ce_lacp:
+ redirect: community.network.ce_lacp
+ ce_link_status:
+ redirect: community.network.ce_link_status
+ ce_lldp:
+ redirect: community.network.ce_lldp
+ ce_lldp_interface:
+ redirect: community.network.ce_lldp_interface
+ ce_mdn_interface:
+ redirect: community.network.ce_mdn_interface
+ ce_mlag_config:
+ redirect: community.network.ce_mlag_config
+ ce_mlag_interface:
+ redirect: community.network.ce_mlag_interface
+ ce_mtu:
+ redirect: community.network.ce_mtu
+ ce_multicast_global:
+ redirect: community.network.ce_multicast_global
+ ce_multicast_igmp_enable:
+ redirect: community.network.ce_multicast_igmp_enable
+ ce_netconf:
+ redirect: community.network.ce_netconf
+ ce_netstream_aging:
+ redirect: community.network.ce_netstream_aging
+ ce_netstream_export:
+ redirect: community.network.ce_netstream_export
+ ce_netstream_global:
+ redirect: community.network.ce_netstream_global
+ ce_netstream_template:
+ redirect: community.network.ce_netstream_template
+ ce_ntp:
+ redirect: community.network.ce_ntp
+ ce_ntp_auth:
+ redirect: community.network.ce_ntp_auth
+ ce_ospf:
+ redirect: community.network.ce_ospf
+ ce_ospf_vrf:
+ redirect: community.network.ce_ospf_vrf
+ ce_reboot:
+ redirect: community.network.ce_reboot
+ ce_rollback:
+ redirect: community.network.ce_rollback
+ ce_sflow:
+ redirect: community.network.ce_sflow
+ ce_snmp_community:
+ redirect: community.network.ce_snmp_community
+ ce_snmp_contact:
+ redirect: community.network.ce_snmp_contact
+ ce_snmp_location:
+ redirect: community.network.ce_snmp_location
+ ce_snmp_target_host:
+ redirect: community.network.ce_snmp_target_host
+ ce_snmp_traps:
+ redirect: community.network.ce_snmp_traps
+ ce_snmp_user:
+ redirect: community.network.ce_snmp_user
+ ce_startup:
+ redirect: community.network.ce_startup
+ ce_static_route:
+ redirect: community.network.ce_static_route
+ ce_static_route_bfd:
+ redirect: community.network.ce_static_route_bfd
+ ce_stp:
+ redirect: community.network.ce_stp
+ ce_switchport:
+ redirect: community.network.ce_switchport
+ ce_vlan:
+ redirect: community.network.ce_vlan
+ ce_vrf:
+ redirect: community.network.ce_vrf
+ ce_vrf_af:
+ redirect: community.network.ce_vrf_af
+ ce_vrf_interface:
+ redirect: community.network.ce_vrf_interface
+ ce_vrrp:
+ redirect: community.network.ce_vrrp
+ ce_vxlan_arp:
+ redirect: community.network.ce_vxlan_arp
+ ce_vxlan_gateway:
+ redirect: community.network.ce_vxlan_gateway
+ ce_vxlan_global:
+ redirect: community.network.ce_vxlan_global
+ ce_vxlan_tunnel:
+ redirect: community.network.ce_vxlan_tunnel
+ ce_vxlan_vap:
+ redirect: community.network.ce_vxlan_vap
+ cv_server_provision:
+ redirect: community.network.cv_server_provision
+ cnos_backup:
+ redirect: community.network.cnos_backup
+ cnos_banner:
+ redirect: community.network.cnos_banner
+ cnos_bgp:
+ redirect: community.network.cnos_bgp
+ cnos_command:
+ redirect: community.network.cnos_command
+ cnos_conditional_command:
+ redirect: community.network.cnos_conditional_command
+ cnos_conditional_template:
+ redirect: community.network.cnos_conditional_template
+ cnos_config:
+ redirect: community.network.cnos_config
+ cnos_factory:
+ redirect: community.network.cnos_factory
+ cnos_facts:
+ redirect: community.network.cnos_facts
+ cnos_image:
+ redirect: community.network.cnos_image
+ cnos_interface:
+ redirect: community.network.cnos_interface
+ cnos_l2_interface:
+ redirect: community.network.cnos_l2_interface
+ cnos_l3_interface:
+ redirect: community.network.cnos_l3_interface
+ cnos_linkagg:
+ redirect: community.network.cnos_linkagg
+ cnos_lldp:
+ redirect: community.network.cnos_lldp
+ cnos_logging:
+ redirect: community.network.cnos_logging
+ cnos_reload:
+ redirect: community.network.cnos_reload
+ cnos_rollback:
+ redirect: community.network.cnos_rollback
+ cnos_save:
+ redirect: community.network.cnos_save
+ cnos_showrun:
+ redirect: community.network.cnos_showrun
+ cnos_static_route:
+ redirect: community.network.cnos_static_route
+ cnos_system:
+ redirect: community.network.cnos_system
+ cnos_template:
+ redirect: community.network.cnos_template
+ cnos_user:
+ redirect: community.network.cnos_user
+ cnos_vlag:
+ redirect: community.network.cnos_vlag
+ cnos_vlan:
+ redirect: community.network.cnos_vlan
+ cnos_vrf:
+ redirect: community.network.cnos_vrf
+ nclu:
+ redirect: community.network.nclu
+ edgeos_command:
+ redirect: community.network.edgeos_command
+ edgeos_config:
+ redirect: community.network.edgeos_config
+ edgeos_facts:
+ redirect: community.network.edgeos_facts
+ edgeswitch_facts:
+ redirect: community.network.edgeswitch_facts
+ edgeswitch_vlan:
+ redirect: community.network.edgeswitch_vlan
+ enos_command:
+ redirect: community.network.enos_command
+ enos_config:
+ redirect: community.network.enos_config
+ enos_facts:
+ redirect: community.network.enos_facts
+ eric_eccli_command:
+ redirect: community.network.eric_eccli_command
+ exos_command:
+ redirect: community.network.exos_command
+ exos_config:
+ redirect: community.network.exos_config
+ exos_facts:
+ redirect: community.network.exos_facts
+ exos_l2_interfaces:
+ redirect: community.network.exos_l2_interfaces
+ exos_lldp_global:
+ redirect: community.network.exos_lldp_global
+ exos_lldp_interfaces:
+ redirect: community.network.exos_lldp_interfaces
+ exos_vlans:
+ redirect: community.network.exos_vlans
+ bigip_asm_policy:
+ tombstone:
+ removal_date: 2019-11-06
+ warning_text: bigip_asm_policy has been removed please use bigip_asm_policy_manage instead.
+ bigip_device_facts:
+ redirect: f5networks.f5_modules.bigip_device_info
+ bigip_iapplx_package:
+ redirect: f5networks.f5_modules.bigip_lx_package
+ bigip_security_address_list:
+ redirect: f5networks.f5_modules.bigip_firewall_address_list
+ bigip_security_port_list:
+ redirect: f5networks.f5_modules.bigip_firewall_port_list
+ bigip_traffic_group:
+ redirect: f5networks.f5_modules.bigip_device_traffic_group
+ bigip_facts:
+ tombstone:
+ removal_date: 2019-11-06
+ warning_text: bigip_facts has been removed please use bigip_device_info module.
+ bigip_gtm_facts:
+ tombstone:
+ removal_date: 2019-11-06
+ warning_text: bigip_gtm_facts has been removed please use bigip_device_info module.
+ faz_device:
+ redirect: community.network.faz_device
+ fmgr_device:
+ redirect: community.network.fmgr_device
+ fmgr_device_config:
+ redirect: community.network.fmgr_device_config
+ fmgr_device_group:
+ redirect: community.network.fmgr_device_group
+ fmgr_device_provision_template:
+ redirect: community.network.fmgr_device_provision_template
+ fmgr_fwobj_address:
+ redirect: community.network.fmgr_fwobj_address
+ fmgr_fwobj_ippool:
+ redirect: community.network.fmgr_fwobj_ippool
+ fmgr_fwobj_ippool6:
+ redirect: community.network.fmgr_fwobj_ippool6
+ fmgr_fwobj_service:
+ redirect: community.network.fmgr_fwobj_service
+ fmgr_fwobj_vip:
+ redirect: community.network.fmgr_fwobj_vip
+ fmgr_fwpol_ipv4:
+ redirect: community.network.fmgr_fwpol_ipv4
+ fmgr_fwpol_package:
+ redirect: community.network.fmgr_fwpol_package
+ fmgr_ha:
+ redirect: community.network.fmgr_ha
+ fmgr_provisioning:
+ redirect: community.network.fmgr_provisioning
+ fmgr_query:
+ redirect: community.network.fmgr_query
+ fmgr_script:
+ redirect: community.network.fmgr_script
+ fmgr_secprof_appctrl:
+ redirect: community.network.fmgr_secprof_appctrl
+ fmgr_secprof_av:
+ redirect: community.network.fmgr_secprof_av
+ fmgr_secprof_dns:
+ redirect: community.network.fmgr_secprof_dns
+ fmgr_secprof_ips:
+ redirect: community.network.fmgr_secprof_ips
+ fmgr_secprof_profile_group:
+ redirect: community.network.fmgr_secprof_profile_group
+ fmgr_secprof_proxy:
+ redirect: community.network.fmgr_secprof_proxy
+ fmgr_secprof_spam:
+ redirect: community.network.fmgr_secprof_spam
+ fmgr_secprof_ssl_ssh:
+ redirect: community.network.fmgr_secprof_ssl_ssh
+ fmgr_secprof_voip:
+ redirect: community.network.fmgr_secprof_voip
+ fmgr_secprof_waf:
+ redirect: community.network.fmgr_secprof_waf
+ fmgr_secprof_wanopt:
+ redirect: community.network.fmgr_secprof_wanopt
+ fmgr_secprof_web:
+ redirect: community.network.fmgr_secprof_web
+ ftd_configuration:
+ redirect: community.network.ftd_configuration
+ ftd_file_download:
+ redirect: community.network.ftd_file_download
+ ftd_file_upload:
+ redirect: community.network.ftd_file_upload
+ ftd_install:
+ redirect: community.network.ftd_install
+ icx_banner:
+ redirect: community.network.icx_banner
+ icx_command:
+ redirect: community.network.icx_command
+ icx_config:
+ redirect: community.network.icx_config
+ icx_copy:
+ redirect: community.network.icx_copy
+ icx_facts:
+ redirect: community.network.icx_facts
+ icx_interface:
+ redirect: community.network.icx_interface
+ icx_l3_interface:
+ redirect: community.network.icx_l3_interface
+ icx_linkagg:
+ redirect: community.network.icx_linkagg
+ icx_lldp:
+ redirect: community.network.icx_lldp
+ icx_logging:
+ redirect: community.network.icx_logging
+ icx_ping:
+ redirect: community.network.icx_ping
+ icx_static_route:
+ redirect: community.network.icx_static_route
+ icx_system:
+ redirect: community.network.icx_system
+ icx_user:
+ redirect: community.network.icx_user
+ icx_vlan:
+ redirect: community.network.icx_vlan
+ dladm_etherstub:
+ redirect: community.network.dladm_etherstub
+ dladm_iptun:
+ redirect: community.network.dladm_iptun
+ dladm_linkprop:
+ redirect: community.network.dladm_linkprop
+ dladm_vlan:
+ redirect: community.network.dladm_vlan
+ dladm_vnic:
+ redirect: community.network.dladm_vnic
+ flowadm:
+ redirect: community.network.flowadm
+ ipadm_addr:
+ redirect: community.network.ipadm_addr
+ ipadm_addrprop:
+ redirect: community.network.ipadm_addrprop
+ ipadm_if:
+ redirect: community.network.ipadm_if
+ ipadm_ifprop:
+ redirect: community.network.ipadm_ifprop
+ ipadm_prop:
+ redirect: community.network.ipadm_prop
+ ig_config:
+ redirect: community.network.ig_config
+ ig_unit_information:
+ redirect: community.network.ig_unit_information
+ ironware_command:
+ redirect: community.network.ironware_command
+ ironware_config:
+ redirect: community.network.ironware_config
+ ironware_facts:
+ redirect: community.network.ironware_facts
+ iap_start_workflow:
+ redirect: community.network.iap_start_workflow
+ iap_token:
+ redirect: community.network.iap_token
+ netact_cm_command:
+ redirect: community.network.netact_cm_command
+ netscaler_cs_action:
+ redirect: community.network.netscaler_cs_action
+ netscaler_cs_policy:
+ redirect: community.network.netscaler_cs_policy
+ netscaler_cs_vserver:
+ redirect: community.network.netscaler_cs_vserver
+ netscaler_gslb_service:
+ redirect: community.network.netscaler_gslb_service
+ netscaler_gslb_site:
+ redirect: community.network.netscaler_gslb_site
+ netscaler_gslb_vserver:
+ redirect: community.network.netscaler_gslb_vserver
+ netscaler_lb_monitor:
+ redirect: community.network.netscaler_lb_monitor
+ netscaler_lb_vserver:
+ redirect: community.network.netscaler_lb_vserver
+ netscaler_nitro_request:
+ redirect: community.network.netscaler_nitro_request
+ netscaler_save_config:
+ redirect: community.network.netscaler_save_config
+ netscaler_server:
+ redirect: community.network.netscaler_server
+ netscaler_service:
+ redirect: community.network.netscaler_service
+ netscaler_servicegroup:
+ redirect: community.network.netscaler_servicegroup
+ netscaler_ssl_certkey:
+ redirect: community.network.netscaler_ssl_certkey
+ pn_cluster:
+ redirect: community.network.pn_cluster
+ pn_ospf:
+ redirect: community.network.pn_ospf
+ pn_ospfarea:
+ redirect: community.network.pn_ospfarea
+ pn_show:
+ redirect: community.network.pn_show
+ pn_trunk:
+ redirect: community.network.pn_trunk
+ pn_vlag:
+ redirect: community.network.pn_vlag
+ pn_vlan:
+ redirect: community.network.pn_vlan
+ pn_vrouter:
+ redirect: community.network.pn_vrouter
+ pn_vrouterbgp:
+ redirect: community.network.pn_vrouterbgp
+ pn_vrouterif:
+ redirect: community.network.pn_vrouterif
+ pn_vrouterlbif:
+ redirect: community.network.pn_vrouterlbif
+ pn_access_list:
+ redirect: community.network.pn_access_list
+ pn_access_list_ip:
+ redirect: community.network.pn_access_list_ip
+ pn_admin_service:
+ redirect: community.network.pn_admin_service
+ pn_admin_session_timeout:
+ redirect: community.network.pn_admin_session_timeout
+ pn_admin_syslog:
+ redirect: community.network.pn_admin_syslog
+ pn_connection_stats_settings:
+ redirect: community.network.pn_connection_stats_settings
+ pn_cpu_class:
+ redirect: community.network.pn_cpu_class
+ pn_cpu_mgmt_class:
+ redirect: community.network.pn_cpu_mgmt_class
+ pn_dhcp_filter:
+ redirect: community.network.pn_dhcp_filter
+ pn_dscp_map:
+ redirect: community.network.pn_dscp_map
+ pn_dscp_map_pri_map:
+ redirect: community.network.pn_dscp_map_pri_map
+ pn_fabric_local:
+ redirect: community.network.pn_fabric_local
+ pn_igmp_snooping:
+ redirect: community.network.pn_igmp_snooping
+ pn_ipv6security_raguard:
+ redirect: community.network.pn_ipv6security_raguard
+ pn_ipv6security_raguard_port:
+ redirect: community.network.pn_ipv6security_raguard_port
+ pn_ipv6security_raguard_vlan:
+ redirect: community.network.pn_ipv6security_raguard_vlan
+ pn_log_audit_exception:
+ redirect: community.network.pn_log_audit_exception
+ pn_port_config:
+ redirect: community.network.pn_port_config
+ pn_port_cos_bw:
+ redirect: community.network.pn_port_cos_bw
+ pn_port_cos_rate_setting:
+ redirect: community.network.pn_port_cos_rate_setting
+ pn_prefix_list:
+ redirect: community.network.pn_prefix_list
+ pn_prefix_list_network:
+ redirect: community.network.pn_prefix_list_network
+ pn_role:
+ redirect: community.network.pn_role
+ pn_snmp_community:
+ redirect: community.network.pn_snmp_community
+ pn_snmp_trap_sink:
+ redirect: community.network.pn_snmp_trap_sink
+ pn_snmp_vacm:
+ redirect: community.network.pn_snmp_vacm
+ pn_stp:
+ redirect: community.network.pn_stp
+ pn_stp_port:
+ redirect: community.network.pn_stp_port
+ pn_switch_setup:
+ redirect: community.network.pn_switch_setup
+ pn_user:
+ redirect: community.network.pn_user
+ pn_vflow_table_profile:
+ redirect: community.network.pn_vflow_table_profile
+ pn_vrouter_bgp:
+ redirect: community.network.pn_vrouter_bgp
+ pn_vrouter_bgp_network:
+ redirect: community.network.pn_vrouter_bgp_network
+ pn_vrouter_interface_ip:
+ redirect: community.network.pn_vrouter_interface_ip
+ pn_vrouter_loopback_interface:
+ redirect: community.network.pn_vrouter_loopback_interface
+ pn_vrouter_ospf:
+ redirect: community.network.pn_vrouter_ospf
+ pn_vrouter_ospf6:
+ redirect: community.network.pn_vrouter_ospf6
+ pn_vrouter_packet_relay:
+ redirect: community.network.pn_vrouter_packet_relay
+ pn_vrouter_pim_config:
+ redirect: community.network.pn_vrouter_pim_config
+ pn_vtep:
+ redirect: community.network.pn_vtep
+ nos_command:
+ redirect: community.network.nos_command
+ nos_config:
+ redirect: community.network.nos_config
+ nos_facts:
+ redirect: community.network.nos_facts
+ nso_action:
+ redirect: community.network.nso_action
+ nso_config:
+ redirect: community.network.nso_config
+ nso_query:
+ redirect: community.network.nso_query
+ nso_show:
+ redirect: community.network.nso_show
+ nso_verify:
+ redirect: community.network.nso_verify
+ nuage_vspk:
+ redirect: community.network.nuage_vspk
+ onyx_aaa:
+ redirect: mellanox.onyx.onyx_aaa
+ onyx_bfd:
+ redirect: mellanox.onyx.onyx_bfd
+ onyx_bgp:
+ redirect: mellanox.onyx.onyx_bgp
+ onyx_buffer_pool:
+ redirect: mellanox.onyx.onyx_buffer_pool
+ onyx_command:
+ redirect: mellanox.onyx.onyx_command
+ onyx_config:
+ redirect: mellanox.onyx.onyx_config
+ onyx_facts:
+ redirect: mellanox.onyx.onyx_facts
+ onyx_igmp:
+ redirect: mellanox.onyx.onyx_igmp
+ onyx_igmp_interface:
+ redirect: mellanox.onyx.onyx_igmp_interface
+ onyx_igmp_vlan:
+ redirect: mellanox.onyx.onyx_igmp_vlan
+ onyx_interface:
+ redirect: mellanox.onyx.onyx_interface
+ onyx_l2_interface:
+ redirect: mellanox.onyx.onyx_l2_interface
+ onyx_l3_interface:
+ redirect: mellanox.onyx.onyx_l3_interface
+ onyx_linkagg:
+ redirect: mellanox.onyx.onyx_linkagg
+ onyx_lldp:
+ redirect: mellanox.onyx.onyx_lldp
+ onyx_lldp_interface:
+ redirect: mellanox.onyx.onyx_lldp_interface
+ onyx_magp:
+ redirect: mellanox.onyx.onyx_magp
+ onyx_mlag_ipl:
+ redirect: mellanox.onyx.onyx_mlag_ipl
+ onyx_mlag_vip:
+ redirect: mellanox.onyx.onyx_mlag_vip
+ onyx_ntp:
+ redirect: mellanox.onyx.onyx_ntp
+ onyx_ntp_servers_peers:
+ redirect: mellanox.onyx.onyx_ntp_servers_peers
+ onyx_ospf:
+ redirect: mellanox.onyx.onyx_ospf
+ onyx_pfc_interface:
+ redirect: mellanox.onyx.onyx_pfc_interface
+ onyx_protocol:
+ redirect: mellanox.onyx.onyx_protocol
+ onyx_ptp_global:
+ redirect: mellanox.onyx.onyx_ptp_global
+ onyx_ptp_interface:
+ redirect: mellanox.onyx.onyx_ptp_interface
+ onyx_qos:
+ redirect: mellanox.onyx.onyx_qos
+ onyx_snmp:
+ redirect: mellanox.onyx.onyx_snmp
+ onyx_snmp_hosts:
+ redirect: mellanox.onyx.onyx_snmp_hosts
+ onyx_snmp_users:
+ redirect: mellanox.onyx.onyx_snmp_users
+ onyx_syslog_files:
+ redirect: mellanox.onyx.onyx_syslog_files
+ onyx_syslog_remote:
+ redirect: mellanox.onyx.onyx_syslog_remote
+ onyx_traffic_class:
+ redirect: mellanox.onyx.onyx_traffic_class
+ onyx_username:
+ redirect: mellanox.onyx.onyx_username
+ onyx_vlan:
+ redirect: mellanox.onyx.onyx_vlan
+ onyx_vxlan:
+ redirect: mellanox.onyx.onyx_vxlan
+ onyx_wjh:
+ redirect: mellanox.onyx.onyx_wjh
+ opx_cps:
+ redirect: community.network.opx_cps
+ ordnance_config:
+ redirect: community.network.ordnance_config
+ ordnance_facts:
+ redirect: community.network.ordnance_facts
+ panos_admin:
+ redirect: community.network.panos_admin
+ panos_admpwd:
+ redirect: community.network.panos_admpwd
+ panos_cert_gen_ssh:
+ redirect: community.network.panos_cert_gen_ssh
+ panos_check:
+ redirect: community.network.panos_check
+ panos_commit:
+ redirect: community.network.panos_commit
+ panos_dag:
+ redirect: community.network.panos_dag
+ panos_dag_tags:
+ redirect: community.network.panos_dag_tags
+ panos_import:
+ redirect: community.network.panos_import
+ panos_interface:
+ redirect: community.network.panos_interface
+ panos_lic:
+ redirect: community.network.panos_lic
+ panos_loadcfg:
+ redirect: community.network.panos_loadcfg
+ panos_match_rule:
+ redirect: community.network.panos_match_rule
+ panos_mgtconfig:
+ redirect: community.network.panos_mgtconfig
+ panos_nat_rule:
+ redirect: community.network.panos_nat_rule
+ panos_object:
+ redirect: community.network.panos_object
+ panos_op:
+ redirect: community.network.panos_op
+ panos_pg:
+ redirect: community.network.panos_pg
+ panos_query_rules:
+ redirect: community.network.panos_query_rules
+ panos_restart:
+ redirect: community.network.panos_restart
+ panos_sag:
+ redirect: community.network.panos_sag
+ panos_security_rule:
+ redirect: community.network.panos_security_rule
+ panos_set:
+ redirect: community.network.panos_set
+ vdirect_commit:
+ redirect: community.network.vdirect_commit
+ vdirect_file:
+ redirect: community.network.vdirect_file
+ vdirect_runnable:
+ redirect: community.network.vdirect_runnable
+ routeros_command:
+ redirect: community.network.routeros_command
+ routeros_facts:
+ redirect: community.network.routeros_facts
+ slxos_command:
+ redirect: community.network.slxos_command
+ slxos_config:
+ redirect: community.network.slxos_config
+ slxos_facts:
+ redirect: community.network.slxos_facts
+ slxos_interface:
+ redirect: community.network.slxos_interface
+ slxos_l2_interface:
+ redirect: community.network.slxos_l2_interface
+ slxos_l3_interface:
+ redirect: community.network.slxos_l3_interface
+ slxos_linkagg:
+ redirect: community.network.slxos_linkagg
+ slxos_lldp:
+ redirect: community.network.slxos_lldp
+ slxos_vlan:
+ redirect: community.network.slxos_vlan
+ sros_command:
+ redirect: community.network.sros_command
+ sros_config:
+ redirect: community.network.sros_config
+ sros_rollback:
+ redirect: community.network.sros_rollback
+ voss_command:
+ redirect: community.network.voss_command
+ voss_config:
+ redirect: community.network.voss_config
+ voss_facts:
+ redirect: community.network.voss_facts
+ osx_say:
+ redirect: community.general.say
+ bearychat:
+ redirect: community.general.bearychat
+ campfire:
+ redirect: community.general.campfire
+ catapult:
+ redirect: community.general.catapult
+ cisco_spark:
+ redirect: community.general.cisco_spark
+ flowdock:
+ redirect: community.general.flowdock
+ grove:
+ redirect: community.general.grove
+ hipchat:
+ redirect: community.general.hipchat
+ irc:
+ redirect: community.general.irc
+ jabber:
+ redirect: community.general.jabber
+ logentries_msg:
+ redirect: community.general.logentries_msg
+ mail:
+ redirect: community.general.mail
+ matrix:
+ redirect: community.general.matrix
+ mattermost:
+ redirect: community.general.mattermost
+ mqtt:
+ redirect: community.general.mqtt
+ nexmo:
+ redirect: community.general.nexmo
+ office_365_connector_card:
+ redirect: community.general.office_365_connector_card
+ pushbullet:
+ redirect: community.general.pushbullet
+ pushover:
+ redirect: community.general.pushover
+ rabbitmq_publish:
+ redirect: community.rabbitmq.rabbitmq_publish
+ rocketchat:
+ redirect: community.general.rocketchat
+ say:
+ redirect: community.general.say
+ sendgrid:
+ redirect: community.general.sendgrid
+ slack:
+ redirect: community.general.slack
+ syslogger:
+ redirect: community.general.syslogger
+ telegram:
+ redirect: community.general.telegram
+ twilio:
+ redirect: community.general.twilio
+ typetalk:
+ redirect: community.general.typetalk
+ bower:
+ redirect: community.general.bower
+ bundler:
+ redirect: community.general.bundler
+ composer:
+ redirect: community.general.composer
+ cpanm:
+ redirect: community.general.cpanm
+ easy_install:
+ redirect: community.general.easy_install
+ gem:
+ redirect: community.general.gem
+ maven_artifact:
+ redirect: community.general.maven_artifact
+ npm:
+ redirect: community.general.npm
+ pear:
+ redirect: community.general.pear
+ pip_package_info:
+ redirect: community.general.pip_package_info
+ yarn:
+ redirect: community.general.yarn
+ apk:
+ redirect: community.general.apk
+ apt_rpm:
+ redirect: community.general.apt_rpm
+ flatpak:
+ redirect: community.general.flatpak
+ flatpak_remote:
+ redirect: community.general.flatpak_remote
+ homebrew:
+ redirect: community.general.homebrew
+ homebrew_cask:
+ redirect: community.general.homebrew_cask
+ homebrew_tap:
+ redirect: community.general.homebrew_tap
+ installp:
+ redirect: community.general.installp
+ layman:
+ redirect: community.general.layman
+ macports:
+ redirect: community.general.macports
+ mas:
+ redirect: community.general.mas
+ openbsd_pkg:
+ redirect: community.general.openbsd_pkg
+ opkg:
+ redirect: community.general.opkg
+ pacman:
+ redirect: community.general.pacman
+ pkg5:
+ redirect: community.general.pkg5
+ pkg5_publisher:
+ redirect: community.general.pkg5_publisher
+ pkgin:
+ redirect: community.general.pkgin
+ pkgng:
+ redirect: community.general.pkgng
+ pkgutil:
+ redirect: community.general.pkgutil
+ portage:
+ redirect: community.general.portage
+ portinstall:
+ redirect: community.general.portinstall
+ pulp_repo:
+ redirect: community.general.pulp_repo
+ redhat_subscription:
+ redirect: community.general.redhat_subscription
+ rhn_channel:
+ redirect: community.general.rhn_channel
+ rhn_register:
+ redirect: community.general.rhn_register
+ rhsm_release:
+ redirect: community.general.rhsm_release
+ rhsm_repository:
+ redirect: community.general.rhsm_repository
+ slackpkg:
+ redirect: community.general.slackpkg
+ snap:
+ redirect: community.general.snap
+ sorcery:
+ redirect: community.general.sorcery
+ svr4pkg:
+ redirect: community.general.svr4pkg
+ swdepot:
+ redirect: community.general.swdepot
+ swupd:
+ redirect: community.general.swupd
+ urpmi:
+ redirect: community.general.urpmi
+ xbps:
+ redirect: community.general.xbps
+ zypper:
+ redirect: community.general.zypper
+ zypper_repository:
+ redirect: community.general.zypper_repository
+ cobbler_sync:
+ redirect: community.general.cobbler_sync
+ cobbler_system:
+ redirect: community.general.cobbler_system
+ idrac_firmware:
+ redirect: community.general.idrac_firmware
+ idrac_server_config_profile:
+ redirect: community.general.idrac_server_config_profile
+ ome_device_info:
+ redirect: community.general.ome_device_info
+ foreman:
+ redirect: community.general.foreman
+ katello:
+ redirect: community.general.katello
+ hpilo_facts:
+ redirect: community.general.hpilo_facts
+ hpilo_boot:
+ redirect: community.general.hpilo_boot
+ hpilo_info:
+ redirect: community.general.hpilo_info
+ hponcfg:
+ redirect: community.general.hponcfg
+ imc_rest:
+ redirect: community.general.imc_rest
+ ipmi_boot:
+ redirect: community.general.ipmi_boot
+ ipmi_power:
+ redirect: community.general.ipmi_power
+ lxca_cmms:
+ redirect: community.general.lxca_cmms
+ lxca_nodes:
+ redirect: community.general.lxca_nodes
+ manageiq_alert_profiles:
+ redirect: community.general.manageiq_alert_profiles
+ manageiq_alerts:
+ redirect: community.general.manageiq_alerts
+ manageiq_group:
+ redirect: community.general.manageiq_group
+ manageiq_policies:
+ redirect: community.general.manageiq_policies
+ manageiq_provider:
+ redirect: community.general.manageiq_provider
+ manageiq_tags:
+ redirect: community.general.manageiq_tags
+ manageiq_tenant:
+ redirect: community.general.manageiq_tenant
+ manageiq_user:
+ redirect: community.general.manageiq_user
+ oneview_datacenter_facts:
+ redirect: community.general.oneview_datacenter_facts
+ oneview_enclosure_facts:
+ redirect: community.general.oneview_enclosure_facts
+ oneview_ethernet_network_facts:
+ redirect: community.general.oneview_ethernet_network_facts
+ oneview_fc_network_facts:
+ redirect: community.general.oneview_fc_network_facts
+ oneview_fcoe_network_facts:
+ redirect: community.general.oneview_fcoe_network_facts
+ oneview_logical_interconnect_group_facts:
+ redirect: community.general.oneview_logical_interconnect_group_facts
+ oneview_network_set_facts:
+ redirect: community.general.oneview_network_set_facts
+ oneview_san_manager_facts:
+ redirect: community.general.oneview_san_manager_facts
+ oneview_datacenter_info:
+ redirect: community.general.oneview_datacenter_info
+ oneview_enclosure_info:
+ redirect: community.general.oneview_enclosure_info
+ oneview_ethernet_network:
+ redirect: community.general.oneview_ethernet_network
+ oneview_ethernet_network_info:
+ redirect: community.general.oneview_ethernet_network_info
+ oneview_fc_network:
+ redirect: community.general.oneview_fc_network
+ oneview_fc_network_info:
+ redirect: community.general.oneview_fc_network_info
+ oneview_fcoe_network:
+ redirect: community.general.oneview_fcoe_network
+ oneview_fcoe_network_info:
+ redirect: community.general.oneview_fcoe_network_info
+ oneview_logical_interconnect_group:
+ redirect: community.general.oneview_logical_interconnect_group
+ oneview_logical_interconnect_group_info:
+ redirect: community.general.oneview_logical_interconnect_group_info
+ oneview_network_set:
+ redirect: community.general.oneview_network_set
+ oneview_network_set_info:
+ redirect: community.general.oneview_network_set_info
+ oneview_san_manager:
+ redirect: community.general.oneview_san_manager
+ oneview_san_manager_info:
+ redirect: community.general.oneview_san_manager_info
+ idrac_redfish_facts:
+ redirect: community.general.idrac_redfish_facts
+ redfish_facts:
+ redirect: community.general.redfish_facts
+ idrac_redfish_command:
+ redirect: community.general.idrac_redfish_command
+ idrac_redfish_config:
+ redirect: community.general.idrac_redfish_config
+ idrac_redfish_info:
+ redirect: community.general.idrac_redfish_info
+ redfish_command:
+ redirect: community.general.redfish_command
+ redfish_config:
+ redirect: community.general.redfish_config
+ redfish_info:
+ redirect: community.general.redfish_info
+ stacki_host:
+ redirect: community.general.stacki_host
+ wakeonlan:
+ redirect: community.general.wakeonlan
+ bitbucket_access_key:
+ redirect: community.general.bitbucket_access_key
+ bitbucket_pipeline_key_pair:
+ redirect: community.general.bitbucket_pipeline_key_pair
+ bitbucket_pipeline_known_host:
+ redirect: community.general.bitbucket_pipeline_known_host
+ bitbucket_pipeline_variable:
+ redirect: community.general.bitbucket_pipeline_variable
+ bzr:
+ redirect: community.general.bzr
+ git_config:
+ redirect: community.general.git_config
+ github_hooks:
+ redirect: community.general.github_hooks
+ github_webhook_facts:
+ redirect: community.general.github_webhook_info
+ github_deploy_key:
+ redirect: community.general.github_deploy_key
+ github_issue:
+ redirect: community.general.github_issue
+ github_key:
+ redirect: community.general.github_key
+ github_release:
+ redirect: community.general.github_release
+ github_webhook:
+ redirect: community.general.github_webhook
+ github_webhook_info:
+ redirect: community.general.github_webhook_info
+ gitlab_hooks:
+ redirect: community.general.gitlab_hook
+ gitlab_deploy_key:
+ redirect: community.general.gitlab_deploy_key
+ gitlab_group:
+ redirect: community.general.gitlab_group
+ gitlab_hook:
+ redirect: community.general.gitlab_hook
+ gitlab_project:
+ redirect: community.general.gitlab_project
+ gitlab_project_variable:
+ redirect: community.general.gitlab_project_variable
+ gitlab_runner:
+ redirect: community.general.gitlab_runner
+ gitlab_user:
+ redirect: community.general.gitlab_user
+ hg:
+ redirect: community.general.hg
+ emc_vnx_sg_member:
+ redirect: community.general.emc_vnx_sg_member
+ gluster_heal_facts:
+ redirect: gluster.gluster.gluster_heal_info
+ gluster_heal_info:
+ redirect: gluster.gluster.gluster_heal_info
+ gluster_peer:
+ redirect: gluster.gluster.gluster_peer
+ gluster_volume:
+ redirect: gluster.gluster.gluster_volume
+ ss_3par_cpg:
+ redirect: community.general.ss_3par_cpg
+ ibm_sa_domain:
+ redirect: community.general.ibm_sa_domain
+ ibm_sa_host:
+ redirect: community.general.ibm_sa_host
+ ibm_sa_host_ports:
+ redirect: community.general.ibm_sa_host_ports
+ ibm_sa_pool:
+ redirect: community.general.ibm_sa_pool
+ ibm_sa_vol:
+ redirect: community.general.ibm_sa_vol
+ ibm_sa_vol_map:
+ redirect: community.general.ibm_sa_vol_map
+ infini_export:
+ redirect: infinidat.infinibox.infini_export
+ infini_export_client:
+ redirect: infinidat.infinibox.infini_export_client
+ infini_fs:
+ redirect: infinidat.infinibox.infini_fs
+ infini_host:
+ redirect: infinidat.infinibox.infini_host
+ infini_pool:
+ redirect: infinidat.infinibox.infini_pool
+ infini_vol:
+ redirect: infinidat.infinibox.infini_vol
+ na_cdot_aggregate:
+ redirect: community.general.na_cdot_aggregate
+ na_cdot_license:
+ redirect: community.general.na_cdot_license
+ na_cdot_lun:
+ redirect: community.general.na_cdot_lun
+ na_cdot_qtree:
+ redirect: community.general.na_cdot_qtree
+ na_cdot_svm:
+ redirect: community.general.na_cdot_svm
+ na_cdot_user:
+ redirect: community.general.na_cdot_user
+ na_cdot_user_role:
+ redirect: community.general.na_cdot_user_role
+ na_cdot_volume:
+ redirect: community.general.na_cdot_volume
+ na_ontap_gather_facts:
+ redirect: community.general.na_ontap_gather_facts
+ sf_account_manager:
+ redirect: community.general.sf_account_manager
+ sf_check_connections:
+ redirect: community.general.sf_check_connections
+ sf_snapshot_schedule_manager:
+ redirect: community.general.sf_snapshot_schedule_manager
+ sf_volume_access_group_manager:
+ redirect: community.general.sf_volume_access_group_manager
+ sf_volume_manager:
+ redirect: community.general.sf_volume_manager
+ netapp_e_alerts:
+ redirect: netapp_eseries.santricity.netapp_e_alerts
+ netapp_e_amg:
+ redirect: netapp_eseries.santricity.netapp_e_amg
+ netapp_e_amg_role:
+ redirect: netapp_eseries.santricity.netapp_e_amg_role
+ netapp_e_amg_sync:
+ redirect: netapp_eseries.santricity.netapp_e_amg_sync
+ netapp_e_asup:
+ redirect: netapp_eseries.santricity.netapp_e_asup
+ netapp_e_auditlog:
+ redirect: netapp_eseries.santricity.netapp_e_auditlog
+ netapp_e_auth:
+ redirect: netapp_eseries.santricity.netapp_e_auth
+ netapp_e_drive_firmware:
+ redirect: netapp_eseries.santricity.netapp_e_drive_firmware
+ netapp_e_facts:
+ redirect: netapp_eseries.santricity.netapp_e_facts
+ netapp_e_firmware:
+ redirect: netapp_eseries.santricity.netapp_e_firmware
+ netapp_e_flashcache:
+ redirect: netapp_eseries.santricity.netapp_e_flashcache
+ netapp_e_global:
+ redirect: netapp_eseries.santricity.netapp_e_global
+ netapp_e_host:
+ redirect: netapp_eseries.santricity.netapp_e_host
+ netapp_e_hostgroup:
+ redirect: netapp_eseries.santricity.netapp_e_hostgroup
+ netapp_e_iscsi_interface:
+ redirect: netapp_eseries.santricity.netapp_e_iscsi_interface
+ netapp_e_iscsi_target:
+ redirect: netapp_eseries.santricity.netapp_e_iscsi_target
+ netapp_e_ldap:
+ redirect: netapp_eseries.santricity.netapp_e_ldap
+ netapp_e_lun_mapping:
+ redirect: netapp_eseries.santricity.netapp_e_lun_mapping
+ netapp_e_mgmt_interface:
+ redirect: netapp_eseries.santricity.netapp_e_mgmt_interface
+ netapp_e_snapshot_group:
+ redirect: netapp_eseries.santricity.netapp_e_snapshot_group
+ netapp_e_snapshot_images:
+ redirect: netapp_eseries.santricity.netapp_e_snapshot_images
+ netapp_e_snapshot_volume:
+ redirect: netapp_eseries.santricity.netapp_e_snapshot_volume
+ netapp_e_storage_system:
+ redirect: netapp_eseries.santricity.netapp_e_storage_system
+ netapp_e_storagepool:
+ redirect: netapp_eseries.santricity.netapp_e_storagepool
+ netapp_e_syslog:
+ redirect: netapp_eseries.santricity.netapp_e_syslog
+ netapp_e_volume:
+ redirect: netapp_eseries.santricity.netapp_e_volume
+ netapp_e_volume_copy:
+ redirect: netapp_eseries.santricity.netapp_e_volume_copy
+ purefa_facts:
+ redirect: community.general.purefa_facts
+ purefb_facts:
+ redirect: community.general.purefb_facts
+ vexata_eg:
+ redirect: community.general.vexata_eg
+ vexata_volume:
+ redirect: community.general.vexata_volume
+ zfs:
+ redirect: community.general.zfs
+ zfs_delegate_admin:
+ redirect: community.general.zfs_delegate_admin
+ zfs_facts:
+ redirect: community.general.zfs_facts
+ zpool_facts:
+ redirect: community.general.zpool_facts
+ python_requirements_facts:
+ redirect: community.general.python_requirements_facts
+ aix_devices:
+ redirect: community.general.aix_devices
+ aix_filesystem:
+ redirect: community.general.aix_filesystem
+ aix_inittab:
+ redirect: community.general.aix_inittab
+ aix_lvg:
+ redirect: community.general.aix_lvg
+ aix_lvol:
+ redirect: community.general.aix_lvol
+ alternatives:
+ redirect: community.general.alternatives
+ awall:
+ redirect: community.general.awall
+ beadm:
+ redirect: community.general.beadm
+ capabilities:
+ redirect: community.general.capabilities
+ cronvar:
+ redirect: community.general.cronvar
+ crypttab:
+ redirect: community.general.crypttab
+ dconf:
+ redirect: community.general.dconf
+ facter:
+ redirect: community.general.facter
+ filesystem:
+ redirect: community.general.filesystem
+ firewalld:
+ redirect: community.general.firewalld
+ gconftool2:
+ redirect: community.general.gconftool2
+ interfaces_file:
+ redirect: community.general.interfaces_file
+ java_cert:
+ redirect: community.general.java_cert
+ java_keystore:
+ redirect: community.general.java_keystore
+ kernel_blacklist:
+ redirect: community.general.kernel_blacklist
+ lbu:
+ redirect: community.general.lbu
+ listen_ports_facts:
+ redirect: community.general.listen_ports_facts
+ locale_gen:
+ redirect: community.general.locale_gen
+ lvg:
+ redirect: community.general.lvg
+ lvol:
+ redirect: community.general.lvol
+ make:
+ redirect: community.general.make
+ mksysb:
+ redirect: community.general.mksysb
+ modprobe:
+ redirect: community.general.modprobe
+ nosh:
+ redirect: community.general.nosh
+ ohai:
+ redirect: community.general.ohai
+ open_iscsi:
+ redirect: community.general.open_iscsi
+ openwrt_init:
+ redirect: community.general.openwrt_init
+ osx_defaults:
+ redirect: community.general.osx_defaults
+ pam_limits:
+ redirect: community.general.pam_limits
+ pamd:
+ redirect: community.general.pamd
+ parted:
+ redirect: community.general.parted
+ pids:
+ redirect: community.general.pids
+ puppet:
+ redirect: community.general.puppet
+ python_requirements_info:
+ redirect: community.general.python_requirements_info
+ runit:
+ redirect: community.general.runit
+ sefcontext:
+ redirect: community.general.sefcontext
+ selinux_permissive:
+ redirect: community.general.selinux_permissive
+ selogin:
+ redirect: community.general.selogin
+ seport:
+ redirect: community.general.seport
+ solaris_zone:
+ redirect: community.general.solaris_zone
+ svc:
+ redirect: community.general.svc
+ syspatch:
+ redirect: community.general.syspatch
+ timezone:
+ redirect: community.general.timezone
+ ufw:
+ redirect: community.general.ufw
+ vdo:
+ redirect: community.general.vdo
+ xfconf:
+ redirect: community.general.xfconf
+ xfs_quota:
+ redirect: community.general.xfs_quota
+ jenkins_job_facts:
+ redirect: community.general.jenkins_job_facts
+ nginx_status_facts:
+ redirect: community.general.nginx_status_facts
+ apache2_mod_proxy:
+ redirect: community.general.apache2_mod_proxy
+ apache2_module:
+ redirect: community.general.apache2_module
+ deploy_helper:
+ redirect: community.general.deploy_helper
+ django_manage:
+ redirect: community.general.django_manage
+ ejabberd_user:
+ redirect: community.general.ejabberd_user
+ gunicorn:
+ redirect: community.general.gunicorn
+ htpasswd:
+ redirect: community.general.htpasswd
+ jboss:
+ redirect: community.general.jboss
+ jenkins_job:
+ redirect: community.general.jenkins_job
+ jenkins_job_info:
+ redirect: community.general.jenkins_job_info
+ jenkins_plugin:
+ redirect: community.general.jenkins_plugin
+ jenkins_script:
+ redirect: community.general.jenkins_script
+ jira:
+ redirect: community.general.jira
+ nginx_status_info:
+ redirect: community.general.nginx_status_info
+ rundeck_acl_policy:
+ redirect: community.general.rundeck_acl_policy
+ rundeck_project:
+ redirect: community.general.rundeck_project
+ utm_aaa_group:
+ redirect: community.general.utm_aaa_group
+ utm_aaa_group_info:
+ redirect: community.general.utm_aaa_group_info
+ utm_ca_host_key_cert:
+ redirect: community.general.utm_ca_host_key_cert
+ utm_ca_host_key_cert_info:
+ redirect: community.general.utm_ca_host_key_cert_info
+ utm_dns_host:
+ redirect: community.general.utm_dns_host
+ utm_network_interface_address:
+ redirect: community.general.utm_network_interface_address
+ utm_network_interface_address_info:
+ redirect: community.general.utm_network_interface_address_info
+ utm_proxy_auth_profile:
+ redirect: community.general.utm_proxy_auth_profile
+ utm_proxy_exception:
+ redirect: community.general.utm_proxy_exception
+ utm_proxy_frontend:
+ redirect: community.general.utm_proxy_frontend
+ utm_proxy_frontend_info:
+ redirect: community.general.utm_proxy_frontend_info
+ utm_proxy_location:
+ redirect: community.general.utm_proxy_location
+ utm_proxy_location_info:
+ redirect: community.general.utm_proxy_location_info
+ supervisorctl:
+ redirect: community.general.supervisorctl
+ taiga_issue:
+ redirect: community.general.taiga_issue
+ grafana_dashboard:
+ redirect: community.grafana.grafana_dashboard
+ grafana_datasource:
+ redirect: community.grafana.grafana_datasource
+ grafana_plugin:
+ redirect: community.grafana.grafana_plugin
+ k8s_facts:
+ redirect: community.kubernetes.k8s_facts
+ k8s_raw:
+ redirect: community.kubernetes.k8s_raw
+ k8s:
+ redirect: community.kubernetes.k8s
+ k8s_auth:
+ redirect: community.kubernetes.k8s_auth
+ k8s_info:
+ redirect: community.kubernetes.k8s_info
+ k8s_scale:
+ redirect: community.kubernetes.k8s_scale
+ k8s_service:
+ redirect: community.kubernetes.k8s_service
+ openshift_raw:
+ redirect: community.kubernetes.openshift_raw
+ openshift_scale:
+ redirect: community.kubernetes.openshift_scale
+ openssh_cert:
+ redirect: community.crypto.openssh_cert
+ openssl_pkcs12:
+ redirect: community.crypto.openssl_pkcs12
+ openssl_csr:
+ redirect: community.crypto.openssl_csr
+ openssl_certificate:
+ redirect: community.crypto.x509_certificate
+ openssl_certificate_info:
+ redirect: community.crypto.x509_certificate_info
+ x509_crl:
+ redirect: community.crypto.x509_crl
+ openssl_privatekey_info:
+ redirect: community.crypto.openssl_privatekey_info
+ x509_crl_info:
+ redirect: community.crypto.x509_crl_info
+ get_certificate:
+ redirect: community.crypto.get_certificate
+ openssh_keypair:
+ redirect: community.crypto.openssh_keypair
+ openssl_publickey:
+ redirect: community.crypto.openssl_publickey
+ openssl_csr_info:
+ redirect: community.crypto.openssl_csr_info
+ luks_device:
+ redirect: community.crypto.luks_device
+ openssl_dhparam:
+ redirect: community.crypto.openssl_dhparam
+ openssl_privatekey:
+ redirect: community.crypto.openssl_privatekey
+ certificate_complete_chain:
+ redirect: community.crypto.certificate_complete_chain
+ acme_inspect:
+ redirect: community.crypto.acme_inspect
+ acme_certificate_revoke:
+ redirect: community.crypto.acme_certificate_revoke
+ acme_certificate:
+ redirect: community.crypto.acme_certificate
+ acme_account:
+ redirect: community.crypto.acme_account
+ acme_account_facts:
+ redirect: community.crypto.acme_account_facts
+ acme_challenge_cert_helper:
+ redirect: community.crypto.acme_challenge_cert_helper
+ acme_account_info:
+ redirect: community.crypto.acme_account_info
+ ecs_domain:
+ redirect: community.crypto.ecs_domain
+ ecs_certificate:
+ redirect: community.crypto.ecs_certificate
+ mongodb_parameter:
+ redirect: community.mongodb.mongodb_parameter
+ mongodb_info:
+ redirect: community.mongodb.mongodb_info
+ mongodb_replicaset:
+ redirect: community.mongodb.mongodb_replicaset
+ mongodb_user:
+ redirect: community.mongodb.mongodb_user
+ mongodb_shard:
+ redirect: community.mongodb.mongodb_shard
+ vcenter_extension_facts:
+ redirect: community.vmware.vcenter_extension_facts
+ vmware_about_facts:
+ redirect: community.vmware.vmware_about_facts
+ vmware_category_facts:
+ redirect: community.vmware.vmware_category_facts
+ vmware_cluster_facts:
+ redirect: community.vmware.vmware_cluster_facts
+ vmware_datastore_facts:
+ redirect: community.vmware.vmware_datastore_facts
+ vmware_dns_config:
+ redirect: community.vmware.vmware_dns_config
+ vmware_drs_group_facts:
+ redirect: community.vmware.vmware_drs_group_facts
+ vmware_drs_rule_facts:
+ redirect: community.vmware.vmware_drs_rule_facts
+ vmware_dvs_portgroup_facts:
+ redirect: community.vmware.vmware_dvs_portgroup_facts
+ vmware_guest_boot_facts:
+ redirect: community.vmware.vmware_guest_boot_facts
+ vmware_guest_customization_facts:
+ redirect: community.vmware.vmware_guest_customization_facts
+ vmware_guest_disk_facts:
+ redirect: community.vmware.vmware_guest_disk_facts
+ vmware_guest_facts:
+ redirect: community.vmware.vmware_guest_facts
+ vmware_guest_snapshot_facts:
+ redirect: community.vmware.vmware_guest_snapshot_facts
+ vmware_host_capability_facts:
+ redirect: community.vmware.vmware_host_capability_facts
+ vmware_host_config_facts:
+ redirect: community.vmware.vmware_host_config_facts
+ vmware_host_dns_facts:
+ redirect: community.vmware.vmware_host_dns_facts
+ vmware_host_feature_facts:
+ redirect: community.vmware.vmware_host_feature_facts
+ vmware_host_firewall_facts:
+ redirect: community.vmware.vmware_host_firewall_facts
+ vmware_host_ntp_facts:
+ redirect: community.vmware.vmware_host_ntp_facts
+ vmware_host_package_facts:
+ redirect: community.vmware.vmware_host_package_facts
+ vmware_host_service_facts:
+ redirect: community.vmware.vmware_host_service_facts
+ vmware_host_ssl_facts:
+ redirect: community.vmware.vmware_host_ssl_facts
+ vmware_host_vmhba_facts:
+ redirect: community.vmware.vmware_host_vmhba_facts
+ vmware_host_vmnic_facts:
+ redirect: community.vmware.vmware_host_vmnic_facts
+ vmware_local_role_facts:
+ redirect: community.vmware.vmware_local_role_facts
+ vmware_local_user_facts:
+ redirect: community.vmware.vmware_local_user_facts
+ vmware_portgroup_facts:
+ redirect: community.vmware.vmware_portgroup_facts
+ vmware_resource_pool_facts:
+ redirect: community.vmware.vmware_resource_pool_facts
+ vmware_tag_facts:
+ redirect: community.vmware.vmware_tag_facts
+ vmware_target_canonical_facts:
+ redirect: community.vmware.vmware_target_canonical_facts
+ vmware_vm_facts:
+ redirect: community.vmware.vmware_vm_facts
+ vmware_vmkernel_facts:
+ redirect: community.vmware.vmware_vmkernel_facts
+ vmware_vswitch_facts:
+ redirect: community.vmware.vmware_vswitch_facts
+ vca_fw:
+ redirect: community.vmware.vca_fw
+ vca_nat:
+ redirect: community.vmware.vca_nat
+ vca_vapp:
+ redirect: community.vmware.vca_vapp
+ vcenter_extension:
+ redirect: community.vmware.vcenter_extension
+ vcenter_extension_info:
+ redirect: community.vmware.vcenter_extension_info
+ vcenter_folder:
+ redirect: community.vmware.vcenter_folder
+ vcenter_license:
+ redirect: community.vmware.vcenter_license
+ vmware_about_info:
+ redirect: community.vmware.vmware_about_info
+ vmware_category:
+ redirect: community.vmware.vmware_category
+ vmware_category_info:
+ redirect: community.vmware.vmware_category_info
+ vmware_cfg_backup:
+ redirect: community.vmware.vmware_cfg_backup
+ vmware_cluster:
+ redirect: community.vmware.vmware_cluster
+ vmware_cluster_drs:
+ redirect: community.vmware.vmware_cluster_drs
+ vmware_cluster_ha:
+ redirect: community.vmware.vmware_cluster_ha
+ vmware_cluster_info:
+ redirect: community.vmware.vmware_cluster_info
+ vmware_cluster_vsan:
+ redirect: community.vmware.vmware_cluster_vsan
+ vmware_content_deploy_template:
+ redirect: community.vmware.vmware_content_deploy_template
+ vmware_content_library_info:
+ redirect: community.vmware.vmware_content_library_info
+ vmware_content_library_manager:
+ redirect: community.vmware.vmware_content_library_manager
+ vmware_datacenter:
+ redirect: community.vmware.vmware_datacenter
+ vmware_datastore_cluster:
+ redirect: community.vmware.vmware_datastore_cluster
+ vmware_datastore_info:
+ redirect: community.vmware.vmware_datastore_info
+ vmware_datastore_maintenancemode:
+ redirect: community.vmware.vmware_datastore_maintenancemode
+ vmware_deploy_ovf:
+ redirect: community.vmware.vmware_deploy_ovf
+ vmware_drs_group:
+ redirect: community.vmware.vmware_drs_group
+ vmware_drs_group_info:
+ redirect: community.vmware.vmware_drs_group_info
+ vmware_drs_rule_info:
+ redirect: community.vmware.vmware_drs_rule_info
+ vmware_dvs_host:
+ redirect: community.vmware.vmware_dvs_host
+ vmware_dvs_portgroup:
+ redirect: community.vmware.vmware_dvs_portgroup
+ vmware_dvs_portgroup_find:
+ redirect: community.vmware.vmware_dvs_portgroup_find
+ vmware_dvs_portgroup_info:
+ redirect: community.vmware.vmware_dvs_portgroup_info
+ vmware_dvswitch:
+ redirect: community.vmware.vmware_dvswitch
+ vmware_dvswitch_lacp:
+ redirect: community.vmware.vmware_dvswitch_lacp
+ vmware_dvswitch_nioc:
+ redirect: community.vmware.vmware_dvswitch_nioc
+ vmware_dvswitch_pvlans:
+ redirect: community.vmware.vmware_dvswitch_pvlans
+ vmware_dvswitch_uplink_pg:
+ redirect: community.vmware.vmware_dvswitch_uplink_pg
+ vmware_evc_mode:
+ redirect: community.vmware.vmware_evc_mode
+ vmware_export_ovf:
+ redirect: community.vmware.vmware_export_ovf
+ vmware_folder_info:
+ redirect: community.vmware.vmware_folder_info
+ vmware_guest:
+ redirect: community.vmware.vmware_guest
+ vmware_guest_boot_info:
+ redirect: community.vmware.vmware_guest_boot_info
+ vmware_guest_boot_manager:
+ redirect: community.vmware.vmware_guest_boot_manager
+ vmware_guest_controller:
+ redirect: community.vmware.vmware_guest_controller
+ vmware_guest_cross_vc_clone:
+ redirect: community.vmware.vmware_guest_cross_vc_clone
+ vmware_guest_custom_attribute_defs:
+ redirect: community.vmware.vmware_guest_custom_attribute_defs
+ vmware_guest_custom_attributes:
+ redirect: community.vmware.vmware_guest_custom_attributes
+ vmware_guest_customization_info:
+ redirect: community.vmware.vmware_guest_customization_info
+ vmware_guest_disk:
+ redirect: community.vmware.vmware_guest_disk
+ vmware_guest_disk_info:
+ redirect: community.vmware.vmware_guest_disk_info
+ vmware_guest_file_operation:
+ redirect: community.vmware.vmware_guest_file_operation
+ vmware_guest_find:
+ redirect: community.vmware.vmware_guest_find
+ vmware_guest_info:
+ redirect: community.vmware.vmware_guest_info
+ vmware_guest_move:
+ redirect: community.vmware.vmware_guest_move
+ vmware_guest_network:
+ redirect: community.vmware.vmware_guest_network
+ vmware_guest_powerstate:
+ redirect: community.vmware.vmware_guest_powerstate
+ vmware_guest_register_operation:
+ redirect: community.vmware.vmware_guest_register_operation
+ vmware_guest_screenshot:
+ redirect: community.vmware.vmware_guest_screenshot
+ vmware_guest_sendkey:
+ redirect: community.vmware.vmware_guest_sendkey
+ vmware_guest_serial_port:
+ redirect: community.vmware.vmware_guest_serial_port
+ vmware_guest_snapshot:
+ redirect: community.vmware.vmware_guest_snapshot
+ vmware_guest_snapshot_info:
+ redirect: community.vmware.vmware_guest_snapshot_info
+ vmware_guest_tools_info:
+ redirect: community.vmware.vmware_guest_tools_info
+ vmware_guest_tools_upgrade:
+ redirect: community.vmware.vmware_guest_tools_upgrade
+ vmware_guest_tools_wait:
+ redirect: community.vmware.vmware_guest_tools_wait
+ vmware_guest_video:
+ redirect: community.vmware.vmware_guest_video
+ vmware_guest_vnc:
+ redirect: community.vmware.vmware_guest_vnc
+ vmware_host:
+ redirect: community.vmware.vmware_host
+ vmware_host_acceptance:
+ redirect: community.vmware.vmware_host_acceptance
+ vmware_host_active_directory:
+ redirect: community.vmware.vmware_host_active_directory
+ vmware_host_auto_start:
+ redirect: community.vmware.vmware_host_auto_start
+ vmware_host_capability_info:
+ redirect: community.vmware.vmware_host_capability_info
+ vmware_host_config_info:
+ redirect: community.vmware.vmware_host_config_info
+ vmware_host_config_manager:
+ redirect: community.vmware.vmware_host_config_manager
+ vmware_host_datastore:
+ redirect: community.vmware.vmware_host_datastore
+ vmware_host_dns:
+ redirect: community.vmware.vmware_host_dns
+ vmware_host_dns_info:
+ redirect: community.vmware.vmware_host_dns_info
+ vmware_host_facts:
+ redirect: community.vmware.vmware_host_facts
+ vmware_host_feature_info:
+ redirect: community.vmware.vmware_host_feature_info
+ vmware_host_firewall_info:
+ redirect: community.vmware.vmware_host_firewall_info
+ vmware_host_firewall_manager:
+ redirect: community.vmware.vmware_host_firewall_manager
+ vmware_host_hyperthreading:
+ redirect: community.vmware.vmware_host_hyperthreading
+ vmware_host_ipv6:
+ redirect: community.vmware.vmware_host_ipv6
+ vmware_host_kernel_manager:
+ redirect: community.vmware.vmware_host_kernel_manager
+ vmware_host_lockdown:
+ redirect: community.vmware.vmware_host_lockdown
+ vmware_host_ntp:
+ redirect: community.vmware.vmware_host_ntp
+ vmware_host_ntp_info:
+ redirect: community.vmware.vmware_host_ntp_info
+ vmware_host_package_info:
+ redirect: community.vmware.vmware_host_package_info
+ vmware_host_powermgmt_policy:
+ redirect: community.vmware.vmware_host_powermgmt_policy
+ vmware_host_powerstate:
+ redirect: community.vmware.vmware_host_powerstate
+ vmware_host_scanhba:
+ redirect: community.vmware.vmware_host_scanhba
+ vmware_host_service_info:
+ redirect: community.vmware.vmware_host_service_info
+ vmware_host_service_manager:
+ redirect: community.vmware.vmware_host_service_manager
+ vmware_host_snmp:
+ redirect: community.vmware.vmware_host_snmp
+ vmware_host_ssl_info:
+ redirect: community.vmware.vmware_host_ssl_info
+ vmware_host_vmhba_info:
+ redirect: community.vmware.vmware_host_vmhba_info
+ vmware_host_vmnic_info:
+ redirect: community.vmware.vmware_host_vmnic_info
+ vmware_local_role_info:
+ redirect: community.vmware.vmware_local_role_info
+ vmware_local_role_manager:
+ redirect: community.vmware.vmware_local_role_manager
+ vmware_local_user_info:
+ redirect: community.vmware.vmware_local_user_info
+ vmware_local_user_manager:
+ redirect: community.vmware.vmware_local_user_manager
+ vmware_maintenancemode:
+ redirect: community.vmware.vmware_maintenancemode
+ vmware_migrate_vmk:
+ redirect: community.vmware.vmware_migrate_vmk
+ vmware_object_role_permission:
+ redirect: community.vmware.vmware_object_role_permission
+ vmware_portgroup:
+ redirect: community.vmware.vmware_portgroup
+ vmware_portgroup_info:
+ redirect: community.vmware.vmware_portgroup_info
+ vmware_resource_pool:
+ redirect: community.vmware.vmware_resource_pool
+ vmware_resource_pool_info:
+ redirect: community.vmware.vmware_resource_pool_info
+ vmware_tag:
+ redirect: community.vmware.vmware_tag
+ vmware_tag_info:
+ redirect: community.vmware.vmware_tag_info
+ vmware_tag_manager:
+ redirect: community.vmware.vmware_tag_manager
+ vmware_target_canonical_info:
+ redirect: community.vmware.vmware_target_canonical_info
+ vmware_vcenter_settings:
+ redirect: community.vmware.vmware_vcenter_settings
+ vmware_vcenter_statistics:
+ redirect: community.vmware.vmware_vcenter_statistics
+ vmware_vm_host_drs_rule:
+ redirect: community.vmware.vmware_vm_host_drs_rule
+ vmware_vm_info:
+ redirect: community.vmware.vmware_vm_info
+ vmware_vm_shell:
+ redirect: community.vmware.vmware_vm_shell
+ vmware_vm_storage_policy_info:
+ redirect: community.vmware.vmware_vm_storage_policy_info
+ vmware_vm_vm_drs_rule:
+ redirect: community.vmware.vmware_vm_vm_drs_rule
+ vmware_vm_vss_dvs_migrate:
+ redirect: community.vmware.vmware_vm_vss_dvs_migrate
+ vmware_vmkernel:
+ redirect: community.vmware.vmware_vmkernel
+ vmware_vmkernel_info:
+ redirect: community.vmware.vmware_vmkernel_info
+ vmware_vmkernel_ip_config:
+ redirect: community.vmware.vmware_vmkernel_ip_config
+ vmware_vmotion:
+ redirect: community.vmware.vmware_vmotion
+ vmware_vsan_cluster:
+ redirect: community.vmware.vmware_vsan_cluster
+ vmware_vsan_health_info:
+ redirect: community.vmware.vmware_vsan_health_info
+ vmware_vspan_session:
+ redirect: community.vmware.vmware_vspan_session
+ vmware_vswitch:
+ redirect: community.vmware.vmware_vswitch
+ vmware_vswitch_info:
+ redirect: community.vmware.vmware_vswitch_info
+ vsphere_copy:
+ redirect: community.vmware.vsphere_copy
+ vsphere_file:
+ redirect: community.vmware.vsphere_file
+ psexec:
+ redirect: community.windows.psexec
+ win_audit_policy_system:
+ redirect: community.windows.win_audit_policy_system
+ win_audit_rule:
+ redirect: community.windows.win_audit_rule
+ win_chocolatey:
+ redirect: chocolatey.chocolatey.win_chocolatey
+ win_chocolatey_config:
+ redirect: chocolatey.chocolatey.win_chocolatey_config
+ win_chocolatey_facts:
+ redirect: chocolatey.chocolatey.win_chocolatey_facts
+ win_chocolatey_feature:
+ redirect: chocolatey.chocolatey.win_chocolatey_feature
+ win_chocolatey_source:
+ redirect: chocolatey.chocolatey.win_chocolatey_source
+ win_credential:
+ redirect: community.windows.win_credential
+ win_defrag:
+ redirect: community.windows.win_defrag
+ win_disk_facts:
+ redirect: community.windows.win_disk_facts
+ win_disk_image:
+ redirect: community.windows.win_disk_image
+ win_dns_record:
+ redirect: community.windows.win_dns_record
+ win_domain_computer:
+ redirect: community.windows.win_domain_computer
+ win_domain_group:
+ redirect: community.windows.win_domain_group
+ win_domain_group_membership:
+ redirect: community.windows.win_domain_group_membership
+ win_domain_user:
+ redirect: community.windows.win_domain_user
+ win_dotnet_ngen:
+ redirect: community.windows.win_dotnet_ngen
+ win_eventlog:
+ redirect: community.windows.win_eventlog
+ win_eventlog_entry:
+ redirect: community.windows.win_eventlog_entry
+ win_file_version:
+ redirect: community.windows.win_file_version
+ win_firewall:
+ redirect: community.windows.win_firewall
+ win_firewall_rule:
+ redirect: community.windows.win_firewall_rule
+ win_format:
+ redirect: community.windows.win_format
+ win_hosts:
+ redirect: community.windows.win_hosts
+ win_hotfix:
+ redirect: community.windows.win_hotfix
+ win_http_proxy:
+ redirect: community.windows.win_http_proxy
+ win_iis_virtualdirectory:
+ redirect: community.windows.win_iis_virtualdirectory
+ win_iis_webapplication:
+ redirect: community.windows.win_iis_webapplication
+ win_iis_webapppool:
+ redirect: community.windows.win_iis_webapppool
+ win_iis_webbinding:
+ redirect: community.windows.win_iis_webbinding
+ win_iis_website:
+ redirect: community.windows.win_iis_website
+ win_inet_proxy:
+ redirect: community.windows.win_inet_proxy
+ win_initialize_disk:
+ redirect: community.windows.win_initialize_disk
+ win_lineinfile:
+ redirect: community.windows.win_lineinfile
+ win_mapped_drive:
+ redirect: community.windows.win_mapped_drive
+ win_msg:
+ redirect: community.windows.win_msg
+ win_netbios:
+ redirect: community.windows.win_netbios
+ win_nssm:
+ redirect: community.windows.win_nssm
+ win_pagefile:
+ redirect: community.windows.win_pagefile
+ win_partition:
+ redirect: community.windows.win_partition
+ win_pester:
+ redirect: community.windows.win_pester
+ win_power_plan:
+ redirect: community.windows.win_power_plan
+ win_product_facts:
+ redirect: community.windows.win_product_facts
+ win_psexec:
+ redirect: community.windows.win_psexec
+ win_psmodule:
+ redirect: community.windows.win_psmodule
+ win_psrepository:
+ redirect: community.windows.win_psrepository
+ win_rabbitmq_plugin:
+ redirect: community.windows.win_rabbitmq_plugin
+ win_rds_cap:
+ redirect: community.windows.win_rds_cap
+ win_rds_rap:
+ redirect: community.windows.win_rds_rap
+ win_rds_settings:
+ redirect: community.windows.win_rds_settings
+ win_region:
+ redirect: community.windows.win_region
+ win_regmerge:
+ redirect: community.windows.win_regmerge
+ win_robocopy:
+ redirect: community.windows.win_robocopy
+ win_route:
+ redirect: community.windows.win_route
+ win_say:
+ redirect: community.windows.win_say
+ win_scheduled_task:
+ redirect: community.windows.win_scheduled_task
+ win_scheduled_task_stat:
+ redirect: community.windows.win_scheduled_task_stat
+ win_security_policy:
+ redirect: community.windows.win_security_policy
+ win_shortcut:
+ redirect: community.windows.win_shortcut
+ win_snmp:
+ redirect: community.windows.win_snmp
+ win_timezone:
+ redirect: community.windows.win_timezone
+ win_toast:
+ redirect: community.windows.win_toast
+ win_unzip:
+ redirect: community.windows.win_unzip
+ win_user_profile:
+ redirect: community.windows.win_user_profile
+ win_wait_for_process:
+ redirect: community.windows.win_wait_for_process
+ win_wakeonlan:
+ redirect: community.windows.win_wakeonlan
+ win_webpicmd:
+ redirect: community.windows.win_webpicmd
+ win_xml:
+ redirect: community.windows.win_xml
+ azure_rm_aks_facts:
+ redirect: community.azure.azure_rm_aks_facts
+ azure_rm_dnsrecordset_facts:
+ redirect: community.azure.azure_rm_dnsrecordset_facts
+ azure_rm_dnszone_facts:
+ redirect: community.azure.azure_rm_dnszone_facts
+ azure_rm_networkinterface_facts:
+ redirect: community.azure.azure_rm_networkinterface_facts
+ azure_rm_publicipaddress_facts:
+ redirect: community.azure.azure_rm_publicipaddress_facts
+ azure_rm_securitygroup_facts:
+ redirect: community.azure.azure_rm_securitygroup_facts
+ azure_rm_storageaccount_facts:
+ redirect: community.azure.azure_rm_storageaccount_facts
+ azure_rm_virtualmachine_facts:
+ redirect: community.azure.azure_rm_virtualmachine_facts
+ azure_rm_virtualnetwork_facts:
+ redirect: community.azure.azure_rm_virtualnetwork_facts
+ azure_rm_roledefinition_facts:
+ redirect: community.azure.azure_rm_roledefinition_facts
+ azure_rm_autoscale_facts:
+ redirect: community.azure.azure_rm_autoscale_facts
+ azure_rm_mysqldatabase_facts:
+ redirect: community.azure.azure_rm_mysqldatabase_facts
+ azure_rm_devtestlabschedule_facts:
+ redirect: community.azure.azure_rm_devtestlabschedule_facts
+ azure_rm_virtualmachinescaleset_facts:
+ redirect: community.azure.azure_rm_virtualmachinescaleset_facts
+ azure_rm_devtestlabcustomimage_facts:
+ redirect: community.azure.azure_rm_devtestlabcustomimage_facts
+ azure_rm_cosmosdbaccount_facts:
+ redirect: community.azure.azure_rm_cosmosdbaccount_facts
+ azure_rm_subnet_facts:
+ redirect: community.azure.azure_rm_subnet_facts
+ azure_rm_aksversion_facts:
+ redirect: community.azure.azure_rm_aksversion_facts
+ azure_rm_hdinsightcluster_facts:
+ redirect: community.azure.azure_rm_hdinsightcluster_facts
+ azure_rm_virtualmachinescalesetextension_facts:
+ redirect: community.azure.azure_rm_virtualmachinescalesetextension_facts
+ azure_rm_loadbalancer_facts:
+ redirect: community.azure.azure_rm_loadbalancer_facts
+ azure_rm_roleassignment_facts:
+ redirect: community.azure.azure_rm_roleassignment_facts
+ azure_rm_manageddisk_facts:
+ redirect: community.azure.azure_rm_manageddisk_facts
+ azure_rm_mysqlserver_facts:
+ redirect: community.azure.azure_rm_mysqlserver_facts
+ azure_rm_servicebus_facts:
+ redirect: community.azure.azure_rm_servicebus_facts
+ azure_rm_rediscache_facts:
+ redirect: community.azure.azure_rm_rediscache_facts
+ azure_rm_resource_facts:
+ redirect: community.azure.azure_rm_resource_facts
+ azure_rm_routetable_facts:
+ redirect: community.azure.azure_rm_routetable_facts
+ azure_rm_virtualmachine_extension:
+ redirect: community.azure.azure_rm_virtualmachine_extension
+ azure_rm_loganalyticsworkspace_facts:
+ redirect: community.azure.azure_rm_loganalyticsworkspace_facts
+ azure_rm_sqldatabase_facts:
+ redirect: community.azure.azure_rm_sqldatabase_facts
+ azure_rm_devtestlabartifactsource_facts:
+ redirect: community.azure.azure_rm_devtestlabartifactsource_facts
+ azure_rm_deployment_facts:
+ redirect: community.azure.azure_rm_deployment_facts
+ azure_rm_virtualmachineextension_facts:
+ redirect: community.azure.azure_rm_virtualmachineextension_facts
+ azure_rm_applicationsecuritygroup_facts:
+ redirect: community.azure.azure_rm_applicationsecuritygroup_facts
+ azure_rm_availabilityset_facts:
+ redirect: community.azure.azure_rm_availabilityset_facts
+ azure_rm_mariadbdatabase_facts:
+ redirect: community.azure.azure_rm_mariadbdatabase_facts
+ azure_rm_devtestlabenvironment_facts:
+ redirect: community.azure.azure_rm_devtestlabenvironment_facts
+ azure_rm_appserviceplan_facts:
+ redirect: community.azure.azure_rm_appserviceplan_facts
+ azure_rm_containerinstance_facts:
+ redirect: community.azure.azure_rm_containerinstance_facts
+ azure_rm_devtestlabarmtemplate_facts:
+ redirect: community.azure.azure_rm_devtestlabarmtemplate_facts
+ azure_rm_devtestlabartifact_facts:
+ redirect: community.azure.azure_rm_devtestlabartifact_facts
+ azure_rm_virtualmachinescalesetinstance_facts:
+ redirect: community.azure.azure_rm_virtualmachinescalesetinstance_facts
+ azure_rm_cdnendpoint_facts:
+ redirect: community.azure.azure_rm_cdnendpoint_facts
+ azure_rm_trafficmanagerprofile_facts:
+ redirect: community.azure.azure_rm_trafficmanagerprofile_facts
+ azure_rm_functionapp_facts:
+ redirect: community.azure.azure_rm_functionapp_facts
+ azure_rm_virtualmachineimage_facts:
+ redirect: community.azure.azure_rm_virtualmachineimage_facts
+ azure_rm_mariadbconfiguration_facts:
+ redirect: community.azure.azure_rm_mariadbconfiguration_facts
+ azure_rm_virtualnetworkpeering_facts:
+ redirect: community.azure.azure_rm_virtualnetworkpeering_facts
+ azure_rm_sqlserver_facts:
+ redirect: community.azure.azure_rm_sqlserver_facts
+ azure_rm_mariadbfirewallrule_facts:
+ redirect: community.azure.azure_rm_mariadbfirewallrule_facts
+ azure_rm_mysqlconfiguration_facts:
+ redirect: community.azure.azure_rm_mysqlconfiguration_facts
+ azure_rm_mysqlfirewallrule_facts:
+ redirect: community.azure.azure_rm_mysqlfirewallrule_facts
+ azure_rm_postgresqlfirewallrule_facts:
+ redirect: community.azure.azure_rm_postgresqlfirewallrule_facts
+ azure_rm_mariadbserver_facts:
+ redirect: community.azure.azure_rm_mariadbserver_facts
+ azure_rm_postgresqldatabase_facts:
+ redirect: community.azure.azure_rm_postgresqldatabase_facts
+ azure_rm_devtestlabvirtualnetwork_facts:
+ redirect: community.azure.azure_rm_devtestlabvirtualnetwork_facts
+ azure_rm_devtestlabpolicy_facts:
+ redirect: community.azure.azure_rm_devtestlabpolicy_facts
+ azure_rm_trafficmanagerendpoint_facts:
+ redirect: community.azure.azure_rm_trafficmanagerendpoint_facts
+ azure_rm_sqlfirewallrule_facts:
+ redirect: community.azure.azure_rm_sqlfirewallrule_facts
+ azure_rm_containerregistry_facts:
+ redirect: community.azure.azure_rm_containerregistry_facts
+ azure_rm_postgresqlconfiguration_facts:
+ redirect: community.azure.azure_rm_postgresqlconfiguration_facts
+ azure_rm_postgresqlserver_facts:
+ redirect: community.azure.azure_rm_postgresqlserver_facts
+ azure_rm_devtestlab_facts:
+ redirect: community.azure.azure_rm_devtestlab_facts
+ azure_rm_cdnprofile_facts:
+ redirect: community.azure.azure_rm_cdnprofile_facts
+ azure_rm_virtualmachine_scaleset:
+ redirect: community.azure.azure_rm_virtualmachine_scaleset
+ azure_rm_webapp_facts:
+ redirect: community.azure.azure_rm_webapp_facts
+ azure_rm_devtestlabvirtualmachine_facts:
+ redirect: community.azure.azure_rm_devtestlabvirtualmachine_facts
+ azure_rm_image_facts:
+ redirect: community.azure.azure_rm_image_facts
+ azure_rm_managed_disk:
+ redirect: community.azure.azure_rm_managed_disk
+ azure_rm_automationaccount_facts:
+ redirect: community.azure.azure_rm_automationaccount_facts
+ azure_rm_lock_facts:
+ redirect: community.azure.azure_rm_lock_facts
+ azure_rm_managed_disk_facts:
+ redirect: community.azure.azure_rm_managed_disk_facts
+ azure_rm_resourcegroup_facts:
+ redirect: community.azure.azure_rm_resourcegroup_facts
+ azure_rm_virtualmachine_scaleset_facts:
+ redirect: community.azure.azure_rm_virtualmachine_scaleset_facts
+ snow_record:
+ redirect: servicenow.servicenow.snow_record
+ snow_record_find:
+ redirect: servicenow.servicenow.snow_record_find
+ aws_az_facts:
+ redirect: amazon.aws.aws_az_facts
+ aws_caller_facts:
+ redirect: amazon.aws.aws_caller_facts
+ cloudformation_facts:
+ redirect: amazon.aws.cloudformation_facts
+ ec2_ami_facts:
+ redirect: amazon.aws.ec2_ami_facts
+ ec2_eni_facts:
+ redirect: amazon.aws.ec2_eni_facts
+ ec2_group_facts:
+ redirect: amazon.aws.ec2_group_facts
+ ec2_snapshot_facts:
+ redirect: amazon.aws.ec2_snapshot_facts
+ ec2_vol_facts:
+ redirect: amazon.aws.ec2_vol_facts
+ ec2_vpc_dhcp_option_facts:
+ redirect: amazon.aws.ec2_vpc_dhcp_option_facts
+ ec2_vpc_net_facts:
+ redirect: amazon.aws.ec2_vpc_net_facts
+ ec2_vpc_subnet_facts:
+ redirect: amazon.aws.ec2_vpc_subnet_facts
+ aws_az_info:
+ redirect: amazon.aws.aws_az_info
+ aws_caller_info:
+ redirect: amazon.aws.aws_caller_info
+ aws_s3:
+ redirect: amazon.aws.aws_s3
+ cloudformation:
+ redirect: amazon.aws.cloudformation
+ cloudformation_info:
+ redirect: amazon.aws.cloudformation_info
+ ec2:
+ redirect: amazon.aws.ec2
+ ec2_ami:
+ redirect: amazon.aws.ec2_ami
+ ec2_ami_info:
+ redirect: amazon.aws.ec2_ami_info
+ ec2_elb_lb:
+ redirect: amazon.aws.ec2_elb_lb
+ ec2_eni:
+ redirect: amazon.aws.ec2_eni
+ ec2_eni_info:
+ redirect: amazon.aws.ec2_eni_info
+ ec2_group:
+ redirect: amazon.aws.ec2_group
+ ec2_group_info:
+ redirect: amazon.aws.ec2_group_info
+ ec2_key:
+ redirect: amazon.aws.ec2_key
+ ec2_metadata_facts:
+ redirect: amazon.aws.ec2_metadata_facts
+ ec2_snapshot:
+ redirect: amazon.aws.ec2_snapshot
+ ec2_snapshot_info:
+ redirect: amazon.aws.ec2_snapshot_info
+ ec2_tag:
+ redirect: amazon.aws.ec2_tag
+ ec2_tag_info:
+ redirect: amazon.aws.ec2_tag_info
+ ec2_vol:
+ redirect: amazon.aws.ec2_vol
+ ec2_vol_info:
+ redirect: amazon.aws.ec2_vol_info
+ ec2_vpc_dhcp_option:
+ redirect: amazon.aws.ec2_vpc_dhcp_option
+ ec2_vpc_dhcp_option_info:
+ redirect: amazon.aws.ec2_vpc_dhcp_option_info
+ ec2_vpc_net:
+ redirect: amazon.aws.ec2_vpc_net
+ ec2_vpc_net_info:
+ redirect: amazon.aws.ec2_vpc_net_info
+ ec2_vpc_subnet:
+ redirect: amazon.aws.ec2_vpc_subnet
+ ec2_vpc_subnet_info:
+ redirect: amazon.aws.ec2_vpc_subnet_info
+ s3_bucket:
+ redirect: amazon.aws.s3_bucket
+ telnet:
+ redirect: ansible.netcommon.telnet
+ cli_command:
+ redirect: ansible.netcommon.cli_command
+ cli_config:
+ redirect: ansible.netcommon.cli_config
+ net_put:
+ redirect: ansible.netcommon.net_put
+ net_get:
+ redirect: ansible.netcommon.net_get
+ net_linkagg:
+ redirect: ansible.netcommon.net_linkagg
+ net_interface:
+ redirect: ansible.netcommon.net_interface
+ net_lldp_interface:
+ redirect: ansible.netcommon.net_lldp_interface
+ net_vlan:
+ redirect: ansible.netcommon.net_vlan
+ net_l2_interface:
+ redirect: ansible.netcommon.net_l2_interface
+ net_l3_interface:
+ redirect: ansible.netcommon.net_l3_interface
+ net_vrf:
+ redirect: ansible.netcommon.net_vrf
+ netconf_config:
+ redirect: ansible.netcommon.netconf_config
+ netconf_rpc:
+ redirect: ansible.netcommon.netconf_rpc
+ netconf_get:
+ redirect: ansible.netcommon.netconf_get
+ net_lldp:
+ redirect: ansible.netcommon.net_lldp
+ restconf_get:
+ redirect: ansible.netcommon.restconf_get
+ restconf_config:
+ redirect: ansible.netcommon.restconf_config
+ net_static_route:
+ redirect: ansible.netcommon.net_static_route
+ net_system:
+ redirect: ansible.netcommon.net_system
+ net_logging:
+ redirect: ansible.netcommon.net_logging
+ net_user:
+ redirect: ansible.netcommon.net_user
+ net_ping:
+ redirect: ansible.netcommon.net_ping
+ net_banner:
+ redirect: ansible.netcommon.net_banner
+ acl:
+ redirect: ansible.posix.acl
+ synchronize:
+ redirect: ansible.posix.synchronize
+ at:
+ redirect: ansible.posix.at
+ authorized_key:
+ redirect: ansible.posix.authorized_key
+ mount:
+ redirect: ansible.posix.mount
+ seboolean:
+ redirect: ansible.posix.seboolean
+ selinux:
+ redirect: ansible.posix.selinux
+ sysctl:
+ redirect: ansible.posix.sysctl
+ async_status.ps1:
+ redirect: ansible.windows.async_status
+ setup.ps1:
+ redirect: ansible.windows.setup
+ slurp.ps1:
+ redirect: ansible.windows.slurp
+ win_acl:
+ redirect: ansible.windows.win_acl
+ win_acl_inheritance:
+ redirect: ansible.windows.win_acl_inheritance
+ win_certificate_store:
+ redirect: ansible.windows.win_certificate_store
+ win_command:
+ redirect: ansible.windows.win_command
+ win_copy:
+ redirect: ansible.windows.win_copy
+ win_dns_client:
+ redirect: ansible.windows.win_dns_client
+ win_domain:
+ redirect: ansible.windows.win_domain
+ win_domain_controller:
+ redirect: ansible.windows.win_domain_controller
+ win_domain_membership:
+ redirect: ansible.windows.win_domain_membership
+ win_dsc:
+ redirect: ansible.windows.win_dsc
+ win_environment:
+ redirect: ansible.windows.win_environment
+ win_feature:
+ redirect: ansible.windows.win_feature
+ win_file:
+ redirect: ansible.windows.win_file
+ win_find:
+ redirect: ansible.windows.win_find
+ win_get_url:
+ redirect: ansible.windows.win_get_url
+ win_group:
+ redirect: ansible.windows.win_group
+ win_group_membership:
+ redirect: ansible.windows.win_group_membership
+ win_hostname:
+ redirect: ansible.windows.win_hostname
+ win_optional_feature:
+ redirect: ansible.windows.win_optional_feature
+ win_owner:
+ redirect: ansible.windows.win_owner
+ win_package:
+ redirect: ansible.windows.win_package
+ win_path:
+ redirect: ansible.windows.win_path
+ win_ping:
+ redirect: ansible.windows.win_ping
+ win_reboot:
+ redirect: ansible.windows.win_reboot
+ win_reg_stat:
+ redirect: ansible.windows.win_reg_stat
+ win_regedit:
+ redirect: ansible.windows.win_regedit
+ win_service:
+ redirect: ansible.windows.win_service
+ win_share:
+ redirect: ansible.windows.win_share
+ win_shell:
+ redirect: ansible.windows.win_shell
+ win_stat:
+ redirect: ansible.windows.win_stat
+ win_tempfile:
+ redirect: ansible.windows.win_tempfile
+ win_template:
+ redirect: ansible.windows.win_template
+ win_updates:
+ redirect: ansible.windows.win_updates
+ win_uri:
+ redirect: ansible.windows.win_uri
+ win_user:
+ redirect: ansible.windows.win_user
+ win_user_right:
+ redirect: ansible.windows.win_user_right
+ win_wait_for:
+ redirect: ansible.windows.win_wait_for
+ win_whoami:
+ redirect: ansible.windows.win_whoami
+ fortios_address:
+ redirect: fortinet.fortios.fortios_address
+ fortios_alertemail_setting:
+ redirect: fortinet.fortios.fortios_alertemail_setting
+ fortios_antivirus_heuristic:
+ redirect: fortinet.fortios.fortios_antivirus_heuristic
+ fortios_antivirus_profile:
+ redirect: fortinet.fortios.fortios_antivirus_profile
+ fortios_antivirus_quarantine:
+ redirect: fortinet.fortios.fortios_antivirus_quarantine
+ fortios_antivirus_settings:
+ redirect: fortinet.fortios.fortios_antivirus_settings
+ fortios_application_custom:
+ redirect: fortinet.fortios.fortios_application_custom
+ fortios_application_group:
+ redirect: fortinet.fortios.fortios_application_group
+ fortios_application_list:
+ redirect: fortinet.fortios.fortios_application_list
+ fortios_application_name:
+ redirect: fortinet.fortios.fortios_application_name
+ fortios_application_rule_settings:
+ redirect: fortinet.fortios.fortios_application_rule_settings
+ fortios_authentication_rule:
+ redirect: fortinet.fortios.fortios_authentication_rule
+ fortios_authentication_scheme:
+ redirect: fortinet.fortios.fortios_authentication_scheme
+ fortios_authentication_setting:
+ redirect: fortinet.fortios.fortios_authentication_setting
+ fortios_config:
+ redirect: fortinet.fortios.fortios_config
+ fortios_dlp_filepattern:
+ redirect: fortinet.fortios.fortios_dlp_filepattern
+ fortios_dlp_fp_doc_source:
+ redirect: fortinet.fortios.fortios_dlp_fp_doc_source
+ fortios_dlp_fp_sensitivity:
+ redirect: fortinet.fortios.fortios_dlp_fp_sensitivity
+ fortios_dlp_sensor:
+ redirect: fortinet.fortios.fortios_dlp_sensor
+ fortios_dlp_settings:
+ redirect: fortinet.fortios.fortios_dlp_settings
+ fortios_dnsfilter_domain_filter:
+ redirect: fortinet.fortios.fortios_dnsfilter_domain_filter
+ fortios_dnsfilter_profile:
+ redirect: fortinet.fortios.fortios_dnsfilter_profile
+ fortios_endpoint_control_client:
+ redirect: fortinet.fortios.fortios_endpoint_control_client
+ fortios_endpoint_control_forticlient_ems:
+ redirect: fortinet.fortios.fortios_endpoint_control_forticlient_ems
+ fortios_endpoint_control_forticlient_registration_sync:
+ redirect: fortinet.fortios.fortios_endpoint_control_forticlient_registration_sync
+ fortios_endpoint_control_profile:
+ redirect: fortinet.fortios.fortios_endpoint_control_profile
+ fortios_endpoint_control_settings:
+ redirect: fortinet.fortios.fortios_endpoint_control_settings
+ fortios_extender_controller_extender:
+ redirect: fortinet.fortios.fortios_extender_controller_extender
+ fortios_facts:
+ redirect: fortinet.fortios.fortios_facts
+ fortios_firewall_address:
+ redirect: fortinet.fortios.fortios_firewall_address
+ fortios_firewall_address6:
+ redirect: fortinet.fortios.fortios_firewall_address6
+ fortios_firewall_address6_template:
+ redirect: fortinet.fortios.fortios_firewall_address6_template
+ fortios_firewall_addrgrp:
+ redirect: fortinet.fortios.fortios_firewall_addrgrp
+ fortios_firewall_addrgrp6:
+ redirect: fortinet.fortios.fortios_firewall_addrgrp6
+ fortios_firewall_auth_portal:
+ redirect: fortinet.fortios.fortios_firewall_auth_portal
+ fortios_firewall_central_snat_map:
+ redirect: fortinet.fortios.fortios_firewall_central_snat_map
+ fortios_firewall_DoS_policy:
+ redirect: fortinet.fortios.fortios_firewall_DoS_policy
+ fortios_firewall_DoS_policy6:
+ redirect: fortinet.fortios.fortios_firewall_DoS_policy6
+ fortios_firewall_dnstranslation:
+ redirect: fortinet.fortios.fortios_firewall_dnstranslation
+ fortios_firewall_identity_based_route:
+ redirect: fortinet.fortios.fortios_firewall_identity_based_route
+ fortios_firewall_interface_policy:
+ redirect: fortinet.fortios.fortios_firewall_interface_policy
+ fortios_firewall_interface_policy6:
+ redirect: fortinet.fortios.fortios_firewall_interface_policy6
+ fortios_firewall_internet_service:
+ redirect: fortinet.fortios.fortios_firewall_internet_service
+ fortios_firewall_internet_service_custom:
+ redirect: fortinet.fortios.fortios_firewall_internet_service_custom
+ fortios_firewall_internet_service_group:
+ redirect: fortinet.fortios.fortios_firewall_internet_service_group
+ fortios_firewall_ip_translation:
+ redirect: fortinet.fortios.fortios_firewall_ip_translation
+ fortios_firewall_ipmacbinding_setting:
+ redirect: fortinet.fortios.fortios_firewall_ipmacbinding_setting
+ fortios_firewall_ipmacbinding_table:
+ redirect: fortinet.fortios.fortios_firewall_ipmacbinding_table
+ fortios_firewall_ippool:
+ redirect: fortinet.fortios.fortios_firewall_ippool
+ fortios_firewall_ippool6:
+ redirect: fortinet.fortios.fortios_firewall_ippool6
+ fortios_firewall_ipv6_eh_filter:
+ redirect: fortinet.fortios.fortios_firewall_ipv6_eh_filter
+ fortios_firewall_ldb_monitor:
+ redirect: fortinet.fortios.fortios_firewall_ldb_monitor
+ fortios_firewall_local_in_policy:
+ redirect: fortinet.fortios.fortios_firewall_local_in_policy
+ fortios_firewall_local_in_policy6:
+ redirect: fortinet.fortios.fortios_firewall_local_in_policy6
+ fortios_firewall_multicast_address:
+ redirect: fortinet.fortios.fortios_firewall_multicast_address
+ fortios_firewall_multicast_address6:
+ redirect: fortinet.fortios.fortios_firewall_multicast_address6
+ fortios_firewall_multicast_policy:
+ redirect: fortinet.fortios.fortios_firewall_multicast_policy
+ fortios_firewall_multicast_policy6:
+ redirect: fortinet.fortios.fortios_firewall_multicast_policy6
+ fortios_firewall_policy:
+ redirect: fortinet.fortios.fortios_firewall_policy
+ fortios_firewall_policy46:
+ redirect: fortinet.fortios.fortios_firewall_policy46
+ fortios_firewall_policy6:
+ redirect: fortinet.fortios.fortios_firewall_policy6
+ fortios_firewall_policy64:
+ redirect: fortinet.fortios.fortios_firewall_policy64
+ fortios_firewall_profile_group:
+ redirect: fortinet.fortios.fortios_firewall_profile_group
+ fortios_firewall_profile_protocol_options:
+ redirect: fortinet.fortios.fortios_firewall_profile_protocol_options
+ fortios_firewall_proxy_address:
+ redirect: fortinet.fortios.fortios_firewall_proxy_address
+ fortios_firewall_proxy_addrgrp:
+ redirect: fortinet.fortios.fortios_firewall_proxy_addrgrp
+ fortios_firewall_proxy_policy:
+ redirect: fortinet.fortios.fortios_firewall_proxy_policy
+ fortios_firewall_schedule_group:
+ redirect: fortinet.fortios.fortios_firewall_schedule_group
+ fortios_firewall_schedule_onetime:
+ redirect: fortinet.fortios.fortios_firewall_schedule_onetime
+ fortios_firewall_schedule_recurring:
+ redirect: fortinet.fortios.fortios_firewall_schedule_recurring
+ fortios_firewall_service_category:
+ redirect: fortinet.fortios.fortios_firewall_service_category
+ fortios_firewall_service_custom:
+ redirect: fortinet.fortios.fortios_firewall_service_custom
+ fortios_firewall_service_group:
+ redirect: fortinet.fortios.fortios_firewall_service_group
+ fortios_firewall_shaper_per_ip_shaper:
+ redirect: fortinet.fortios.fortios_firewall_shaper_per_ip_shaper
+ fortios_firewall_shaper_traffic_shaper:
+ redirect: fortinet.fortios.fortios_firewall_shaper_traffic_shaper
+ fortios_firewall_shaping_policy:
+ redirect: fortinet.fortios.fortios_firewall_shaping_policy
+ fortios_firewall_shaping_profile:
+ redirect: fortinet.fortios.fortios_firewall_shaping_profile
+ fortios_firewall_sniffer:
+ redirect: fortinet.fortios.fortios_firewall_sniffer
+ fortios_firewall_ssh_host_key:
+ redirect: fortinet.fortios.fortios_firewall_ssh_host_key
+ fortios_firewall_ssh_local_ca:
+ redirect: fortinet.fortios.fortios_firewall_ssh_local_ca
+ fortios_firewall_ssh_local_key:
+ redirect: fortinet.fortios.fortios_firewall_ssh_local_key
+ fortios_firewall_ssh_setting:
+ redirect: fortinet.fortios.fortios_firewall_ssh_setting
+ fortios_firewall_ssl_server:
+ redirect: fortinet.fortios.fortios_firewall_ssl_server
+ fortios_firewall_ssl_setting:
+ redirect: fortinet.fortios.fortios_firewall_ssl_setting
+ fortios_firewall_ssl_ssh_profile:
+ redirect: fortinet.fortios.fortios_firewall_ssl_ssh_profile
+ fortios_firewall_ttl_policy:
+ redirect: fortinet.fortios.fortios_firewall_ttl_policy
+ fortios_firewall_vip:
+ redirect: fortinet.fortios.fortios_firewall_vip
+ fortios_firewall_vip46:
+ redirect: fortinet.fortios.fortios_firewall_vip46
+ fortios_firewall_vip6:
+ redirect: fortinet.fortios.fortios_firewall_vip6
+ fortios_firewall_vip64:
+ redirect: fortinet.fortios.fortios_firewall_vip64
+ fortios_firewall_vipgrp:
+ redirect: fortinet.fortios.fortios_firewall_vipgrp
+ fortios_firewall_vipgrp46:
+ redirect: fortinet.fortios.fortios_firewall_vipgrp46
+ fortios_firewall_vipgrp6:
+ redirect: fortinet.fortios.fortios_firewall_vipgrp6
+ fortios_firewall_vipgrp64:
+ redirect: fortinet.fortios.fortios_firewall_vipgrp64
+ fortios_firewall_wildcard_fqdn_custom:
+ redirect: fortinet.fortios.fortios_firewall_wildcard_fqdn_custom
+ fortios_firewall_wildcard_fqdn_group:
+ redirect: fortinet.fortios.fortios_firewall_wildcard_fqdn_group
+ fortios_ftp_proxy_explicit:
+ redirect: fortinet.fortios.fortios_ftp_proxy_explicit
+ fortios_icap_profile:
+ redirect: fortinet.fortios.fortios_icap_profile
+ fortios_icap_server:
+ redirect: fortinet.fortios.fortios_icap_server
+ fortios_ips_custom:
+ redirect: fortinet.fortios.fortios_ips_custom
+ fortios_ips_decoder:
+ redirect: fortinet.fortios.fortios_ips_decoder
+ fortios_ips_global:
+ redirect: fortinet.fortios.fortios_ips_global
+ fortios_ips_rule:
+ redirect: fortinet.fortios.fortios_ips_rule
+ fortios_ips_rule_settings:
+ redirect: fortinet.fortios.fortios_ips_rule_settings
+ fortios_ips_sensor:
+ redirect: fortinet.fortios.fortios_ips_sensor
+ fortios_ips_settings:
+ redirect: fortinet.fortios.fortios_ips_settings
+ fortios_ipv4_policy:
+ redirect: fortinet.fortios.fortios_ipv4_policy
+ fortios_log_custom_field:
+ redirect: fortinet.fortios.fortios_log_custom_field
+ fortios_log_disk_filter:
+ redirect: fortinet.fortios.fortios_log_disk_filter
+ fortios_log_disk_setting:
+ redirect: fortinet.fortios.fortios_log_disk_setting
+ fortios_log_eventfilter:
+ redirect: fortinet.fortios.fortios_log_eventfilter
+ fortios_log_fortianalyzer2_filter:
+ redirect: fortinet.fortios.fortios_log_fortianalyzer2_filter
+ fortios_log_fortianalyzer2_setting:
+ redirect: fortinet.fortios.fortios_log_fortianalyzer2_setting
+ fortios_log_fortianalyzer3_filter:
+ redirect: fortinet.fortios.fortios_log_fortianalyzer3_filter
+ fortios_log_fortianalyzer3_setting:
+ redirect: fortinet.fortios.fortios_log_fortianalyzer3_setting
+ fortios_log_fortianalyzer_filter:
+ redirect: fortinet.fortios.fortios_log_fortianalyzer_filter
+ fortios_log_fortianalyzer_override_filter:
+ redirect: fortinet.fortios.fortios_log_fortianalyzer_override_filter
+ fortios_log_fortianalyzer_override_setting:
+ redirect: fortinet.fortios.fortios_log_fortianalyzer_override_setting
+ fortios_log_fortianalyzer_setting:
+ redirect: fortinet.fortios.fortios_log_fortianalyzer_setting
+ fortios_log_fortiguard_filter:
+ redirect: fortinet.fortios.fortios_log_fortiguard_filter
+ fortios_log_fortiguard_override_filter:
+ redirect: fortinet.fortios.fortios_log_fortiguard_override_filter
+ fortios_log_fortiguard_override_setting:
+ redirect: fortinet.fortios.fortios_log_fortiguard_override_setting
+ fortios_log_fortiguard_setting:
+ redirect: fortinet.fortios.fortios_log_fortiguard_setting
+ fortios_log_gui_display:
+ redirect: fortinet.fortios.fortios_log_gui_display
+ fortios_log_memory_filter:
+ redirect: fortinet.fortios.fortios_log_memory_filter
+ fortios_log_memory_global_setting:
+ redirect: fortinet.fortios.fortios_log_memory_global_setting
+ fortios_log_memory_setting:
+ redirect: fortinet.fortios.fortios_log_memory_setting
+ fortios_log_null_device_filter:
+ redirect: fortinet.fortios.fortios_log_null_device_filter
+ fortios_log_null_device_setting:
+ redirect: fortinet.fortios.fortios_log_null_device_setting
+ fortios_log_setting:
+ redirect: fortinet.fortios.fortios_log_setting
+ fortios_log_syslogd2_filter:
+ redirect: fortinet.fortios.fortios_log_syslogd2_filter
+ fortios_log_syslogd2_setting:
+ redirect: fortinet.fortios.fortios_log_syslogd2_setting
+ fortios_log_syslogd3_filter:
+ redirect: fortinet.fortios.fortios_log_syslogd3_filter
+ fortios_log_syslogd3_setting:
+ redirect: fortinet.fortios.fortios_log_syslogd3_setting
+ fortios_log_syslogd4_filter:
+ redirect: fortinet.fortios.fortios_log_syslogd4_filter
+ fortios_log_syslogd4_setting:
+ redirect: fortinet.fortios.fortios_log_syslogd4_setting
+ fortios_log_syslogd_filter:
+ redirect: fortinet.fortios.fortios_log_syslogd_filter
+ fortios_log_syslogd_override_filter:
+ redirect: fortinet.fortios.fortios_log_syslogd_override_filter
+ fortios_log_syslogd_override_setting:
+ redirect: fortinet.fortios.fortios_log_syslogd_override_setting
+ fortios_log_syslogd_setting:
+ redirect: fortinet.fortios.fortios_log_syslogd_setting
+ fortios_log_threat_weight:
+ redirect: fortinet.fortios.fortios_log_threat_weight
+ fortios_log_webtrends_filter:
+ redirect: fortinet.fortios.fortios_log_webtrends_filter
+ fortios_log_webtrends_setting:
+ redirect: fortinet.fortios.fortios_log_webtrends_setting
+ fortios_report_chart:
+ redirect: fortinet.fortios.fortios_report_chart
+ fortios_report_dataset:
+ redirect: fortinet.fortios.fortios_report_dataset
+ fortios_report_layout:
+ redirect: fortinet.fortios.fortios_report_layout
+ fortios_report_setting:
+ redirect: fortinet.fortios.fortios_report_setting
+ fortios_report_style:
+ redirect: fortinet.fortios.fortios_report_style
+ fortios_report_theme:
+ redirect: fortinet.fortios.fortios_report_theme
+ fortios_router_access_list:
+ redirect: fortinet.fortios.fortios_router_access_list
+ fortios_router_access_list6:
+ redirect: fortinet.fortios.fortios_router_access_list6
+ fortios_router_aspath_list:
+ redirect: fortinet.fortios.fortios_router_aspath_list
+ fortios_router_auth_path:
+ redirect: fortinet.fortios.fortios_router_auth_path
+ fortios_router_bfd:
+ redirect: fortinet.fortios.fortios_router_bfd
+ fortios_router_bfd6:
+ redirect: fortinet.fortios.fortios_router_bfd6
+ fortios_router_bgp:
+ redirect: fortinet.fortios.fortios_router_bgp
+ fortios_router_community_list:
+ redirect: fortinet.fortios.fortios_router_community_list
+ fortios_router_isis:
+ redirect: fortinet.fortios.fortios_router_isis
+ fortios_router_key_chain:
+ redirect: fortinet.fortios.fortios_router_key_chain
+ fortios_router_multicast:
+ redirect: fortinet.fortios.fortios_router_multicast
+ fortios_router_multicast6:
+ redirect: fortinet.fortios.fortios_router_multicast6
+ fortios_router_multicast_flow:
+ redirect: fortinet.fortios.fortios_router_multicast_flow
+ fortios_router_ospf:
+ redirect: fortinet.fortios.fortios_router_ospf
+ fortios_router_ospf6:
+ redirect: fortinet.fortios.fortios_router_ospf6
+ fortios_router_policy:
+ redirect: fortinet.fortios.fortios_router_policy
+ fortios_router_policy6:
+ redirect: fortinet.fortios.fortios_router_policy6
+ fortios_router_prefix_list:
+ redirect: fortinet.fortios.fortios_router_prefix_list
+ fortios_router_prefix_list6:
+ redirect: fortinet.fortios.fortios_router_prefix_list6
+ fortios_router_rip:
+ redirect: fortinet.fortios.fortios_router_rip
+ fortios_router_ripng:
+ redirect: fortinet.fortios.fortios_router_ripng
+ fortios_router_route_map:
+ redirect: fortinet.fortios.fortios_router_route_map
+ fortios_router_setting:
+ redirect: fortinet.fortios.fortios_router_setting
+ fortios_router_static:
+ redirect: fortinet.fortios.fortios_router_static
+ fortios_router_static6:
+ redirect: fortinet.fortios.fortios_router_static6
+ fortios_spamfilter_bwl:
+ redirect: fortinet.fortios.fortios_spamfilter_bwl
+ fortios_spamfilter_bword:
+ redirect: fortinet.fortios.fortios_spamfilter_bword
+ fortios_spamfilter_dnsbl:
+ redirect: fortinet.fortios.fortios_spamfilter_dnsbl
+ fortios_spamfilter_fortishield:
+ redirect: fortinet.fortios.fortios_spamfilter_fortishield
+ fortios_spamfilter_iptrust:
+ redirect: fortinet.fortios.fortios_spamfilter_iptrust
+ fortios_spamfilter_mheader:
+ redirect: fortinet.fortios.fortios_spamfilter_mheader
+ fortios_spamfilter_options:
+ redirect: fortinet.fortios.fortios_spamfilter_options
+ fortios_spamfilter_profile:
+ redirect: fortinet.fortios.fortios_spamfilter_profile
+ fortios_ssh_filter_profile:
+ redirect: fortinet.fortios.fortios_ssh_filter_profile
+ fortios_switch_controller_802_1X_settings:
+ redirect: fortinet.fortios.fortios_switch_controller_802_1X_settings
+ fortios_switch_controller_custom_command:
+ redirect: fortinet.fortios.fortios_switch_controller_custom_command
+ fortios_switch_controller_global:
+ redirect: fortinet.fortios.fortios_switch_controller_global
+ fortios_switch_controller_igmp_snooping:
+ redirect: fortinet.fortios.fortios_switch_controller_igmp_snooping
+ fortios_switch_controller_lldp_profile:
+ redirect: fortinet.fortios.fortios_switch_controller_lldp_profile
+ fortios_switch_controller_lldp_settings:
+ redirect: fortinet.fortios.fortios_switch_controller_lldp_settings
+ fortios_switch_controller_mac_sync_settings:
+ redirect: fortinet.fortios.fortios_switch_controller_mac_sync_settings
+ fortios_switch_controller_managed_switch:
+ redirect: fortinet.fortios.fortios_switch_controller_managed_switch
+ fortios_switch_controller_network_monitor_settings:
+ redirect: fortinet.fortios.fortios_switch_controller_network_monitor_settings
+ fortios_switch_controller_qos_dot1p_map:
+ redirect: fortinet.fortios.fortios_switch_controller_qos_dot1p_map
+ fortios_switch_controller_qos_ip_dscp_map:
+ redirect: fortinet.fortios.fortios_switch_controller_qos_ip_dscp_map
+ fortios_switch_controller_qos_qos_policy:
+ redirect: fortinet.fortios.fortios_switch_controller_qos_qos_policy
+ fortios_switch_controller_qos_queue_policy:
+ redirect: fortinet.fortios.fortios_switch_controller_qos_queue_policy
+ fortios_switch_controller_quarantine:
+ redirect: fortinet.fortios.fortios_switch_controller_quarantine
+ fortios_switch_controller_security_policy_802_1X:
+ redirect: fortinet.fortios.fortios_switch_controller_security_policy_802_1X
+ fortios_switch_controller_security_policy_captive_portal:
+ redirect: fortinet.fortios.fortios_switch_controller_security_policy_captive_portal
+ fortios_switch_controller_sflow:
+ redirect: fortinet.fortios.fortios_switch_controller_sflow
+ fortios_switch_controller_storm_control:
+ redirect: fortinet.fortios.fortios_switch_controller_storm_control
+ fortios_switch_controller_stp_settings:
+ redirect: fortinet.fortios.fortios_switch_controller_stp_settings
+ fortios_switch_controller_switch_group:
+ redirect: fortinet.fortios.fortios_switch_controller_switch_group
+ fortios_switch_controller_switch_interface_tag:
+ redirect: fortinet.fortios.fortios_switch_controller_switch_interface_tag
+ fortios_switch_controller_switch_log:
+ redirect: fortinet.fortios.fortios_switch_controller_switch_log
+ fortios_switch_controller_switch_profile:
+ redirect: fortinet.fortios.fortios_switch_controller_switch_profile
+ fortios_switch_controller_system:
+ redirect: fortinet.fortios.fortios_switch_controller_system
+ fortios_switch_controller_virtual_port_pool:
+ redirect: fortinet.fortios.fortios_switch_controller_virtual_port_pool
+ fortios_switch_controller_vlan:
+ redirect: fortinet.fortios.fortios_switch_controller_vlan
+ fortios_system_accprofile:
+ redirect: fortinet.fortios.fortios_system_accprofile
+ fortios_system_admin:
+ redirect: fortinet.fortios.fortios_system_admin
+ fortios_system_affinity_interrupt:
+ redirect: fortinet.fortios.fortios_system_affinity_interrupt
+ fortios_system_affinity_packet_redistribution:
+ redirect: fortinet.fortios.fortios_system_affinity_packet_redistribution
+ fortios_system_alarm:
+ redirect: fortinet.fortios.fortios_system_alarm
+ fortios_system_alias:
+ redirect: fortinet.fortios.fortios_system_alias
+ fortios_system_api_user:
+ redirect: fortinet.fortios.fortios_system_api_user
+ fortios_system_arp_table:
+ redirect: fortinet.fortios.fortios_system_arp_table
+ fortios_system_auto_install:
+ redirect: fortinet.fortios.fortios_system_auto_install
+ fortios_system_auto_script:
+ redirect: fortinet.fortios.fortios_system_auto_script
+ fortios_system_automation_action:
+ redirect: fortinet.fortios.fortios_system_automation_action
+ fortios_system_automation_destination:
+ redirect: fortinet.fortios.fortios_system_automation_destination
+ fortios_system_automation_stitch:
+ redirect: fortinet.fortios.fortios_system_automation_stitch
+ fortios_system_automation_trigger:
+ redirect: fortinet.fortios.fortios_system_automation_trigger
+ fortios_system_autoupdate_push_update:
+ redirect: fortinet.fortios.fortios_system_autoupdate_push_update
+ fortios_system_autoupdate_schedule:
+ redirect: fortinet.fortios.fortios_system_autoupdate_schedule
+ fortios_system_autoupdate_tunneling:
+ redirect: fortinet.fortios.fortios_system_autoupdate_tunneling
+ fortios_system_central_management:
+ redirect: fortinet.fortios.fortios_system_central_management
+ fortios_system_cluster_sync:
+ redirect: fortinet.fortios.fortios_system_cluster_sync
+ fortios_system_console:
+ redirect: fortinet.fortios.fortios_system_console
+ fortios_system_csf:
+ redirect: fortinet.fortios.fortios_system_csf
+ fortios_system_custom_language:
+ redirect: fortinet.fortios.fortios_system_custom_language
+ fortios_system_ddns:
+ redirect: fortinet.fortios.fortios_system_ddns
+ fortios_system_dedicated_mgmt:
+ redirect: fortinet.fortios.fortios_system_dedicated_mgmt
+ fortios_system_dhcp6_server:
+ redirect: fortinet.fortios.fortios_system_dhcp6_server
+ fortios_system_dhcp_server:
+ redirect: fortinet.fortios.fortios_system_dhcp_server
+ fortios_system_dns:
+ redirect: fortinet.fortios.fortios_system_dns
+ fortios_system_dns_database:
+ redirect: fortinet.fortios.fortios_system_dns_database
+ fortios_system_dns_server:
+ redirect: fortinet.fortios.fortios_system_dns_server
+ fortios_system_dscp_based_priority:
+ redirect: fortinet.fortios.fortios_system_dscp_based_priority
+ fortios_system_email_server:
+ redirect: fortinet.fortios.fortios_system_email_server
+ fortios_system_external_resource:
+ redirect: fortinet.fortios.fortios_system_external_resource
+ fortios_system_fips_cc:
+ redirect: fortinet.fortios.fortios_system_fips_cc
+ fortios_system_firmware_upgrade:
+ redirect: fortinet.fortios.fortios_system_firmware_upgrade
+ fortios_system_fm:
+ redirect: fortinet.fortios.fortios_system_fm
+ fortios_system_fortiguard:
+ redirect: fortinet.fortios.fortios_system_fortiguard
+ fortios_system_fortimanager:
+ redirect: fortinet.fortios.fortios_system_fortimanager
+ fortios_system_fortisandbox:
+ redirect: fortinet.fortios.fortios_system_fortisandbox
+ fortios_system_fsso_polling:
+ redirect: fortinet.fortios.fortios_system_fsso_polling
+ fortios_system_ftm_push:
+ redirect: fortinet.fortios.fortios_system_ftm_push
+ fortios_system_geoip_override:
+ redirect: fortinet.fortios.fortios_system_geoip_override
+ fortios_system_global:
+ redirect: fortinet.fortios.fortios_system_global
+ fortios_system_gre_tunnel:
+ redirect: fortinet.fortios.fortios_system_gre_tunnel
+ fortios_system_ha:
+ redirect: fortinet.fortios.fortios_system_ha
+ fortios_system_ha_monitor:
+ redirect: fortinet.fortios.fortios_system_ha_monitor
+ fortios_system_interface:
+ redirect: fortinet.fortios.fortios_system_interface
+ fortios_system_ipip_tunnel:
+ redirect: fortinet.fortios.fortios_system_ipip_tunnel
+ fortios_system_ips_urlfilter_dns:
+ redirect: fortinet.fortios.fortios_system_ips_urlfilter_dns
+ fortios_system_ips_urlfilter_dns6:
+ redirect: fortinet.fortios.fortios_system_ips_urlfilter_dns6
+ fortios_system_ipv6_neighbor_cache:
+ redirect: fortinet.fortios.fortios_system_ipv6_neighbor_cache
+ fortios_system_ipv6_tunnel:
+ redirect: fortinet.fortios.fortios_system_ipv6_tunnel
+ fortios_system_link_monitor:
+ redirect: fortinet.fortios.fortios_system_link_monitor
+ fortios_system_mac_address_table:
+ redirect: fortinet.fortios.fortios_system_mac_address_table
+ fortios_system_management_tunnel:
+ redirect: fortinet.fortios.fortios_system_management_tunnel
+ fortios_system_mobile_tunnel:
+ redirect: fortinet.fortios.fortios_system_mobile_tunnel
+ fortios_system_nat64:
+ redirect: fortinet.fortios.fortios_system_nat64
+ fortios_system_nd_proxy:
+ redirect: fortinet.fortios.fortios_system_nd_proxy
+ fortios_system_netflow:
+ redirect: fortinet.fortios.fortios_system_netflow
+ fortios_system_network_visibility:
+ redirect: fortinet.fortios.fortios_system_network_visibility
+ fortios_system_ntp:
+ redirect: fortinet.fortios.fortios_system_ntp
+ fortios_system_object_tagging:
+ redirect: fortinet.fortios.fortios_system_object_tagging
+ fortios_system_password_policy:
+ redirect: fortinet.fortios.fortios_system_password_policy
+ fortios_system_password_policy_guest_admin:
+ redirect: fortinet.fortios.fortios_system_password_policy_guest_admin
+ fortios_system_pppoe_interface:
+ redirect: fortinet.fortios.fortios_system_pppoe_interface
+ fortios_system_probe_response:
+ redirect: fortinet.fortios.fortios_system_probe_response
+ fortios_system_proxy_arp:
+ redirect: fortinet.fortios.fortios_system_proxy_arp
+ fortios_system_replacemsg_admin:
+ redirect: fortinet.fortios.fortios_system_replacemsg_admin
+ fortios_system_replacemsg_alertmail:
+ redirect: fortinet.fortios.fortios_system_replacemsg_alertmail
+ fortios_system_replacemsg_auth:
+ redirect: fortinet.fortios.fortios_system_replacemsg_auth
+ fortios_system_replacemsg_device_detection_portal:
+ redirect: fortinet.fortios.fortios_system_replacemsg_device_detection_portal
+ fortios_system_replacemsg_ec:
+ redirect: fortinet.fortios.fortios_system_replacemsg_ec
+ fortios_system_replacemsg_fortiguard_wf:
+ redirect: fortinet.fortios.fortios_system_replacemsg_fortiguard_wf
+ fortios_system_replacemsg_ftp:
+ redirect: fortinet.fortios.fortios_system_replacemsg_ftp
+ fortios_system_replacemsg_group:
+ redirect: fortinet.fortios.fortios_system_replacemsg_group
+ fortios_system_replacemsg_http:
+ redirect: fortinet.fortios.fortios_system_replacemsg_http
+ fortios_system_replacemsg_icap:
+ redirect: fortinet.fortios.fortios_system_replacemsg_icap
+ fortios_system_replacemsg_image:
+ redirect: fortinet.fortios.fortios_system_replacemsg_image
+ fortios_system_replacemsg_mail:
+ redirect: fortinet.fortios.fortios_system_replacemsg_mail
+ fortios_system_replacemsg_nac_quar:
+ redirect: fortinet.fortios.fortios_system_replacemsg_nac_quar
+ fortios_system_replacemsg_nntp:
+ redirect: fortinet.fortios.fortios_system_replacemsg_nntp
+ fortios_system_replacemsg_spam:
+ redirect: fortinet.fortios.fortios_system_replacemsg_spam
+ fortios_system_replacemsg_sslvpn:
+ redirect: fortinet.fortios.fortios_system_replacemsg_sslvpn
+ fortios_system_replacemsg_traffic_quota:
+ redirect: fortinet.fortios.fortios_system_replacemsg_traffic_quota
+ fortios_system_replacemsg_utm:
+ redirect: fortinet.fortios.fortios_system_replacemsg_utm
+ fortios_system_replacemsg_webproxy:
+ redirect: fortinet.fortios.fortios_system_replacemsg_webproxy
+ fortios_system_resource_limits:
+ redirect: fortinet.fortios.fortios_system_resource_limits
+ fortios_system_sdn_connector:
+ redirect: fortinet.fortios.fortios_system_sdn_connector
+ fortios_system_session_helper:
+ redirect: fortinet.fortios.fortios_system_session_helper
+ fortios_system_session_ttl:
+ redirect: fortinet.fortios.fortios_system_session_ttl
+ fortios_system_settings:
+ redirect: fortinet.fortios.fortios_system_settings
+ fortios_system_sflow:
+ redirect: fortinet.fortios.fortios_system_sflow
+ fortios_system_sit_tunnel:
+ redirect: fortinet.fortios.fortios_system_sit_tunnel
+ fortios_system_sms_server:
+ redirect: fortinet.fortios.fortios_system_sms_server
+ fortios_system_snmp_community:
+ redirect: fortinet.fortios.fortios_system_snmp_community
+ fortios_system_snmp_sysinfo:
+ redirect: fortinet.fortios.fortios_system_snmp_sysinfo
+ fortios_system_snmp_user:
+ redirect: fortinet.fortios.fortios_system_snmp_user
+ fortios_system_storage:
+ redirect: fortinet.fortios.fortios_system_storage
+ fortios_system_switch_interface:
+ redirect: fortinet.fortios.fortios_system_switch_interface
+ fortios_system_tos_based_priority:
+ redirect: fortinet.fortios.fortios_system_tos_based_priority
+ fortios_system_vdom:
+ redirect: fortinet.fortios.fortios_system_vdom
+ fortios_system_vdom_dns:
+ redirect: fortinet.fortios.fortios_system_vdom_dns
+ fortios_system_vdom_exception:
+ redirect: fortinet.fortios.fortios_system_vdom_exception
+ fortios_system_vdom_link:
+ redirect: fortinet.fortios.fortios_system_vdom_link
+ fortios_system_vdom_netflow:
+ redirect: fortinet.fortios.fortios_system_vdom_netflow
+ fortios_system_vdom_property:
+ redirect: fortinet.fortios.fortios_system_vdom_property
+ fortios_system_vdom_radius_server:
+ redirect: fortinet.fortios.fortios_system_vdom_radius_server
+ fortios_system_vdom_sflow:
+ redirect: fortinet.fortios.fortios_system_vdom_sflow
+ fortios_system_virtual_wan_link:
+ redirect: fortinet.fortios.fortios_system_virtual_wan_link
+ fortios_system_virtual_wire_pair:
+ redirect: fortinet.fortios.fortios_system_virtual_wire_pair
+ fortios_system_vxlan:
+ redirect: fortinet.fortios.fortios_system_vxlan
+ fortios_system_wccp:
+ redirect: fortinet.fortios.fortios_system_wccp
+ fortios_system_zone:
+ redirect: fortinet.fortios.fortios_system_zone
+ fortios_user_adgrp:
+ redirect: fortinet.fortios.fortios_user_adgrp
+ fortios_user_device:
+ redirect: fortinet.fortios.fortios_user_device
+ fortios_user_device_access_list:
+ redirect: fortinet.fortios.fortios_user_device_access_list
+ fortios_user_device_category:
+ redirect: fortinet.fortios.fortios_user_device_category
+ fortios_user_device_group:
+ redirect: fortinet.fortios.fortios_user_device_group
+ fortios_user_domain_controller:
+ redirect: fortinet.fortios.fortios_user_domain_controller
+ fortios_user_fortitoken:
+ redirect: fortinet.fortios.fortios_user_fortitoken
+ fortios_user_fsso:
+ redirect: fortinet.fortios.fortios_user_fsso
+ fortios_user_fsso_polling:
+ redirect: fortinet.fortios.fortios_user_fsso_polling
+ fortios_user_group:
+ redirect: fortinet.fortios.fortios_user_group
+ fortios_user_krb_keytab:
+ redirect: fortinet.fortios.fortios_user_krb_keytab
+ fortios_user_ldap:
+ redirect: fortinet.fortios.fortios_user_ldap
+ fortios_user_local:
+ redirect: fortinet.fortios.fortios_user_local
+ fortios_user_password_policy:
+ redirect: fortinet.fortios.fortios_user_password_policy
+ fortios_user_peer:
+ redirect: fortinet.fortios.fortios_user_peer
+ fortios_user_peergrp:
+ redirect: fortinet.fortios.fortios_user_peergrp
+ fortios_user_pop3:
+ redirect: fortinet.fortios.fortios_user_pop3
+ fortios_user_quarantine:
+ redirect: fortinet.fortios.fortios_user_quarantine
+ fortios_user_radius:
+ redirect: fortinet.fortios.fortios_user_radius
+ fortios_user_security_exempt_list:
+ redirect: fortinet.fortios.fortios_user_security_exempt_list
+ fortios_user_setting:
+ redirect: fortinet.fortios.fortios_user_setting
+ fortios_user_tacacsplus:
+ redirect: fortinet.fortios.fortios_user_tacacsplus
+ fortios_voip_profile:
+ redirect: fortinet.fortios.fortios_voip_profile
+ fortios_vpn_certificate_ca:
+ redirect: fortinet.fortios.fortios_vpn_certificate_ca
+ fortios_vpn_certificate_crl:
+ redirect: fortinet.fortios.fortios_vpn_certificate_crl
+ fortios_vpn_certificate_local:
+ redirect: fortinet.fortios.fortios_vpn_certificate_local
+ fortios_vpn_certificate_ocsp_server:
+ redirect: fortinet.fortios.fortios_vpn_certificate_ocsp_server
+ fortios_vpn_certificate_remote:
+ redirect: fortinet.fortios.fortios_vpn_certificate_remote
+ fortios_vpn_certificate_setting:
+ redirect: fortinet.fortios.fortios_vpn_certificate_setting
+ fortios_vpn_ipsec_concentrator:
+ redirect: fortinet.fortios.fortios_vpn_ipsec_concentrator
+ fortios_vpn_ipsec_forticlient:
+ redirect: fortinet.fortios.fortios_vpn_ipsec_forticlient
+ fortios_vpn_ipsec_manualkey:
+ redirect: fortinet.fortios.fortios_vpn_ipsec_manualkey
+ fortios_vpn_ipsec_manualkey_interface:
+ redirect: fortinet.fortios.fortios_vpn_ipsec_manualkey_interface
+ fortios_vpn_ipsec_phase1:
+ redirect: fortinet.fortios.fortios_vpn_ipsec_phase1
+ fortios_vpn_ipsec_phase1_interface:
+ redirect: fortinet.fortios.fortios_vpn_ipsec_phase1_interface
+ fortios_vpn_ipsec_phase2:
+ redirect: fortinet.fortios.fortios_vpn_ipsec_phase2
+ fortios_vpn_ipsec_phase2_interface:
+ redirect: fortinet.fortios.fortios_vpn_ipsec_phase2_interface
+ fortios_vpn_l2tp:
+ redirect: fortinet.fortios.fortios_vpn_l2tp
+ fortios_vpn_pptp:
+ redirect: fortinet.fortios.fortios_vpn_pptp
+ fortios_vpn_ssl_settings:
+ redirect: fortinet.fortios.fortios_vpn_ssl_settings
+ fortios_vpn_ssl_web_host_check_software:
+ redirect: fortinet.fortios.fortios_vpn_ssl_web_host_check_software
+ fortios_vpn_ssl_web_portal:
+ redirect: fortinet.fortios.fortios_vpn_ssl_web_portal
+ fortios_vpn_ssl_web_realm:
+ redirect: fortinet.fortios.fortios_vpn_ssl_web_realm
+ fortios_vpn_ssl_web_user_bookmark:
+ redirect: fortinet.fortios.fortios_vpn_ssl_web_user_bookmark
+ fortios_vpn_ssl_web_user_group_bookmark:
+ redirect: fortinet.fortios.fortios_vpn_ssl_web_user_group_bookmark
+ fortios_waf_main_class:
+ redirect: fortinet.fortios.fortios_waf_main_class
+ fortios_waf_profile:
+ redirect: fortinet.fortios.fortios_waf_profile
+ fortios_waf_signature:
+ redirect: fortinet.fortios.fortios_waf_signature
+ fortios_waf_sub_class:
+ redirect: fortinet.fortios.fortios_waf_sub_class
+ fortios_wanopt_auth_group:
+ redirect: fortinet.fortios.fortios_wanopt_auth_group
+ fortios_wanopt_cache_service:
+ redirect: fortinet.fortios.fortios_wanopt_cache_service
+ fortios_wanopt_content_delivery_network_rule:
+ redirect: fortinet.fortios.fortios_wanopt_content_delivery_network_rule
+ fortios_wanopt_peer:
+ redirect: fortinet.fortios.fortios_wanopt_peer
+ fortios_wanopt_profile:
+ redirect: fortinet.fortios.fortios_wanopt_profile
+ fortios_wanopt_remote_storage:
+ redirect: fortinet.fortios.fortios_wanopt_remote_storage
+ fortios_wanopt_settings:
+ redirect: fortinet.fortios.fortios_wanopt_settings
+ fortios_wanopt_webcache:
+ redirect: fortinet.fortios.fortios_wanopt_webcache
+ fortios_web_proxy_debug_url:
+ redirect: fortinet.fortios.fortios_web_proxy_debug_url
+ fortios_web_proxy_explicit:
+ redirect: fortinet.fortios.fortios_web_proxy_explicit
+ fortios_web_proxy_forward_server:
+ redirect: fortinet.fortios.fortios_web_proxy_forward_server
+ fortios_web_proxy_forward_server_group:
+ redirect: fortinet.fortios.fortios_web_proxy_forward_server_group
+ fortios_web_proxy_global:
+ redirect: fortinet.fortios.fortios_web_proxy_global
+ fortios_web_proxy_profile:
+ redirect: fortinet.fortios.fortios_web_proxy_profile
+ fortios_web_proxy_url_match:
+ redirect: fortinet.fortios.fortios_web_proxy_url_match
+ fortios_web_proxy_wisp:
+ redirect: fortinet.fortios.fortios_web_proxy_wisp
+ fortios_webfilter:
+ redirect: fortinet.fortios.fortios_webfilter
+ fortios_webfilter_content:
+ redirect: fortinet.fortios.fortios_webfilter_content
+ fortios_webfilter_content_header:
+ redirect: fortinet.fortios.fortios_webfilter_content_header
+ fortios_webfilter_fortiguard:
+ redirect: fortinet.fortios.fortios_webfilter_fortiguard
+ fortios_webfilter_ftgd_local_cat:
+ redirect: fortinet.fortios.fortios_webfilter_ftgd_local_cat
+ fortios_webfilter_ftgd_local_rating:
+ redirect: fortinet.fortios.fortios_webfilter_ftgd_local_rating
+ fortios_webfilter_ips_urlfilter_cache_setting:
+ redirect: fortinet.fortios.fortios_webfilter_ips_urlfilter_cache_setting
+ fortios_webfilter_ips_urlfilter_setting:
+ redirect: fortinet.fortios.fortios_webfilter_ips_urlfilter_setting
+ fortios_webfilter_ips_urlfilter_setting6:
+ redirect: fortinet.fortios.fortios_webfilter_ips_urlfilter_setting6
+ fortios_webfilter_override:
+ redirect: fortinet.fortios.fortios_webfilter_override
+ fortios_webfilter_profile:
+ redirect: fortinet.fortios.fortios_webfilter_profile
+ fortios_webfilter_search_engine:
+ redirect: fortinet.fortios.fortios_webfilter_search_engine
+ fortios_webfilter_urlfilter:
+ redirect: fortinet.fortios.fortios_webfilter_urlfilter
+ fortios_wireless_controller_ap_status:
+ redirect: fortinet.fortios.fortios_wireless_controller_ap_status
+ fortios_wireless_controller_ble_profile:
+ redirect: fortinet.fortios.fortios_wireless_controller_ble_profile
+ fortios_wireless_controller_bonjour_profile:
+ redirect: fortinet.fortios.fortios_wireless_controller_bonjour_profile
+ fortios_wireless_controller_global:
+ redirect: fortinet.fortios.fortios_wireless_controller_global
+ fortios_wireless_controller_hotspot20_anqp_3gpp_cellular:
+ redirect: fortinet.fortios.fortios_wireless_controller_hotspot20_anqp_3gpp_cellular
+ fortios_wireless_controller_hotspot20_anqp_ip_address_type:
+ redirect: fortinet.fortios.fortios_wireless_controller_hotspot20_anqp_ip_address_type
+ fortios_wireless_controller_hotspot20_anqp_nai_realm:
+ redirect: fortinet.fortios.fortios_wireless_controller_hotspot20_anqp_nai_realm
+ fortios_wireless_controller_hotspot20_anqp_network_auth_type:
+ redirect: fortinet.fortios.fortios_wireless_controller_hotspot20_anqp_network_auth_type
+ fortios_wireless_controller_hotspot20_anqp_roaming_consortium:
+ redirect: fortinet.fortios.fortios_wireless_controller_hotspot20_anqp_roaming_consortium
+ fortios_wireless_controller_hotspot20_anqp_venue_name:
+ redirect: fortinet.fortios.fortios_wireless_controller_hotspot20_anqp_venue_name
+ fortios_wireless_controller_hotspot20_h2qp_conn_capability:
+ redirect: fortinet.fortios.fortios_wireless_controller_hotspot20_h2qp_conn_capability
+ fortios_wireless_controller_hotspot20_h2qp_operator_name:
+ redirect: fortinet.fortios.fortios_wireless_controller_hotspot20_h2qp_operator_name
+ fortios_wireless_controller_hotspot20_h2qp_osu_provider:
+ redirect: fortinet.fortios.fortios_wireless_controller_hotspot20_h2qp_osu_provider
+ fortios_wireless_controller_hotspot20_h2qp_wan_metric:
+ redirect: fortinet.fortios.fortios_wireless_controller_hotspot20_h2qp_wan_metric
+ fortios_wireless_controller_hotspot20_hs_profile:
+ redirect: fortinet.fortios.fortios_wireless_controller_hotspot20_hs_profile
+ fortios_wireless_controller_hotspot20_icon:
+ redirect: fortinet.fortios.fortios_wireless_controller_hotspot20_icon
+ fortios_wireless_controller_hotspot20_qos_map:
+ redirect: fortinet.fortios.fortios_wireless_controller_hotspot20_qos_map
+ fortios_wireless_controller_inter_controller:
+ redirect: fortinet.fortios.fortios_wireless_controller_inter_controller
+ fortios_wireless_controller_qos_profile:
+ redirect: fortinet.fortios.fortios_wireless_controller_qos_profile
+ fortios_wireless_controller_setting:
+ redirect: fortinet.fortios.fortios_wireless_controller_setting
+ fortios_wireless_controller_timers:
+ redirect: fortinet.fortios.fortios_wireless_controller_timers
+ fortios_wireless_controller_utm_profile:
+ redirect: fortinet.fortios.fortios_wireless_controller_utm_profile
+ fortios_wireless_controller_vap:
+ redirect: fortinet.fortios.fortios_wireless_controller_vap
+ fortios_wireless_controller_vap_group:
+ redirect: fortinet.fortios.fortios_wireless_controller_vap_group
+ fortios_wireless_controller_wids_profile:
+ redirect: fortinet.fortios.fortios_wireless_controller_wids_profile
+ fortios_wireless_controller_wtp:
+ redirect: fortinet.fortios.fortios_wireless_controller_wtp
+ fortios_wireless_controller_wtp_group:
+ redirect: fortinet.fortios.fortios_wireless_controller_wtp_group
+ fortios_wireless_controller_wtp_profile:
+ redirect: fortinet.fortios.fortios_wireless_controller_wtp_profile
+ netbox_device:
+ redirect: netbox.netbox.netbox_device
+ netbox_ip_address:
+ redirect: netbox.netbox.netbox_ip_address
+ netbox_interface:
+ redirect: netbox.netbox.netbox_interface
+ netbox_prefix:
+ redirect: netbox.netbox.netbox_prefix
+ netbox_site:
+ redirect: netbox.netbox.netbox_site
+ aws_netapp_cvs_FileSystems:
+ redirect: netapp.aws.aws_netapp_cvs_filesystems
+ aws_netapp_cvs_active_directory:
+ redirect: netapp.aws.aws_netapp_cvs_active_directory
+ aws_netapp_cvs_pool:
+ redirect: netapp.aws.aws_netapp_cvs_pool
+ aws_netapp_cvs_snapshots:
+ redirect: netapp.aws.aws_netapp_cvs_snapshots
+ na_elementsw_access_group:
+ redirect: netapp.elementsw.na_elementsw_access_group
+ na_elementsw_account:
+ redirect: netapp.elementsw.na_elementsw_account
+ na_elementsw_admin_users:
+ redirect: netapp.elementsw.na_elementsw_admin_users
+ na_elementsw_backup:
+ redirect: netapp.elementsw.na_elementsw_backup
+ na_elementsw_check_connections:
+ redirect: netapp.elementsw.na_elementsw_check_connections
+ na_elementsw_cluster:
+ redirect: netapp.elementsw.na_elementsw_cluster
+ na_elementsw_cluster_config:
+ redirect: netapp.elementsw.na_elementsw_cluster_config
+ na_elementsw_cluster_pair:
+ redirect: netapp.elementsw.na_elementsw_cluster_pair
+ na_elementsw_cluster_snmp:
+ redirect: netapp.elementsw.na_elementsw_cluster_snmp
+ na_elementsw_drive:
+ redirect: netapp.elementsw.na_elementsw_drive
+ na_elementsw_initiators:
+ redirect: netapp.elementsw.na_elementsw_initiators
+ na_elementsw_ldap:
+ redirect: netapp.elementsw.na_elementsw_ldap
+ na_elementsw_network_interfaces:
+ redirect: netapp.elementsw.na_elementsw_network_interfaces
+ na_elementsw_node:
+ redirect: netapp.elementsw.na_elementsw_node
+ na_elementsw_snapshot:
+ redirect: netapp.elementsw.na_elementsw_snapshot
+ na_elementsw_snapshot_restore:
+ redirect: netapp.elementsw.na_elementsw_snapshot_restore
+ na_elementsw_snapshot_schedule:
+ redirect: netapp.elementsw.na_elementsw_snapshot_schedule
+ na_elementsw_vlan:
+ redirect: netapp.elementsw.na_elementsw_vlan
+ na_elementsw_volume:
+ redirect: netapp.elementsw.na_elementsw_volume
+ na_elementsw_volume_clone:
+ redirect: netapp.elementsw.na_elementsw_volume_clone
+ na_elementsw_volume_pair:
+ redirect: netapp.elementsw.na_elementsw_volume_pair
+ na_ontap_aggregate:
+ redirect: netapp.ontap.na_ontap_aggregate
+ na_ontap_autosupport:
+ redirect: netapp.ontap.na_ontap_autosupport
+ na_ontap_broadcast_domain:
+ redirect: netapp.ontap.na_ontap_broadcast_domain
+ na_ontap_broadcast_domain_ports:
+ redirect: netapp.ontap.na_ontap_broadcast_domain_ports
+ na_ontap_cg_snapshot:
+ redirect: netapp.ontap.na_ontap_cg_snapshot
+ na_ontap_cifs:
+ redirect: netapp.ontap.na_ontap_cifs
+ na_ontap_cifs_acl:
+ redirect: netapp.ontap.na_ontap_cifs_acl
+ na_ontap_cifs_server:
+ redirect: netapp.ontap.na_ontap_cifs_server
+ na_ontap_cluster:
+ redirect: netapp.ontap.na_ontap_cluster
+ na_ontap_cluster_ha:
+ redirect: netapp.ontap.na_ontap_cluster_ha
+ na_ontap_cluster_peer:
+ redirect: netapp.ontap.na_ontap_cluster_peer
+ na_ontap_command:
+ redirect: netapp.ontap.na_ontap_command
+ na_ontap_disks:
+ redirect: netapp.ontap.na_ontap_disks
+ na_ontap_dns:
+ redirect: netapp.ontap.na_ontap_dns
+ na_ontap_export_policy:
+ redirect: netapp.ontap.na_ontap_export_policy
+ na_ontap_export_policy_rule:
+ redirect: netapp.ontap.na_ontap_export_policy_rule
+ na_ontap_fcp:
+ redirect: netapp.ontap.na_ontap_fcp
+ na_ontap_firewall_policy:
+ redirect: netapp.ontap.na_ontap_firewall_policy
+ na_ontap_firmware_upgrade:
+ redirect: netapp.ontap.na_ontap_firmware_upgrade
+ na_ontap_flexcache:
+ redirect: netapp.ontap.na_ontap_flexcache
+ na_ontap_igroup:
+ redirect: netapp.ontap.na_ontap_igroup
+ na_ontap_igroup_initiator:
+ redirect: netapp.ontap.na_ontap_igroup_initiator
+ na_ontap_info:
+ redirect: netapp.ontap.na_ontap_info
+ na_ontap_interface:
+ redirect: netapp.ontap.na_ontap_interface
+ na_ontap_ipspace:
+ redirect: netapp.ontap.na_ontap_ipspace
+ na_ontap_iscsi:
+ redirect: netapp.ontap.na_ontap_iscsi
+ na_ontap_job_schedule:
+ redirect: netapp.ontap.na_ontap_job_schedule
+ na_ontap_kerberos_realm:
+ redirect: netapp.ontap.na_ontap_kerberos_realm
+ na_ontap_ldap:
+ redirect: netapp.ontap.na_ontap_ldap
+ na_ontap_ldap_client:
+ redirect: netapp.ontap.na_ontap_ldap_client
+ na_ontap_license:
+ redirect: netapp.ontap.na_ontap_license
+ na_ontap_lun:
+ redirect: netapp.ontap.na_ontap_lun
+ na_ontap_lun_copy:
+ redirect: netapp.ontap.na_ontap_lun_copy
+ na_ontap_lun_map:
+ redirect: netapp.ontap.na_ontap_lun_map
+ na_ontap_motd:
+ redirect: netapp.ontap.na_ontap_motd
+ na_ontap_ndmp:
+ redirect: netapp.ontap.na_ontap_ndmp
+ na_ontap_net_ifgrp:
+ redirect: netapp.ontap.na_ontap_net_ifgrp
+ na_ontap_net_port:
+ redirect: netapp.ontap.na_ontap_net_port
+ na_ontap_net_routes:
+ redirect: netapp.ontap.na_ontap_net_routes
+ na_ontap_net_subnet:
+ redirect: netapp.ontap.na_ontap_net_subnet
+ na_ontap_net_vlan:
+ redirect: netapp.ontap.na_ontap_net_vlan
+ na_ontap_nfs:
+ redirect: netapp.ontap.na_ontap_nfs
+ na_ontap_node:
+ redirect: netapp.ontap.na_ontap_node
+ na_ontap_ntp:
+ redirect: netapp.ontap.na_ontap_ntp
+ na_ontap_nvme:
+ redirect: netapp.ontap.na_ontap_nvme
+ na_ontap_nvme_namespace:
+ redirect: netapp.ontap.na_ontap_nvme_namespace
+ na_ontap_nvme_subsystem:
+ redirect: netapp.ontap.na_ontap_nvme_subsystem
+ na_ontap_object_store:
+ redirect: netapp.ontap.na_ontap_object_store
+ na_ontap_ports:
+ redirect: netapp.ontap.na_ontap_ports
+ na_ontap_portset:
+ redirect: netapp.ontap.na_ontap_portset
+ na_ontap_qos_adaptive_policy_group:
+ redirect: netapp.ontap.na_ontap_qos_adaptive_policy_group
+ na_ontap_qos_policy_group:
+ redirect: netapp.ontap.na_ontap_qos_policy_group
+ na_ontap_qtree:
+ redirect: netapp.ontap.na_ontap_qtree
+ na_ontap_quotas:
+ redirect: netapp.ontap.na_ontap_quotas
+ na_ontap_security_key_manager:
+ redirect: netapp.ontap.na_ontap_security_key_manager
+ na_ontap_service_processor_network:
+ redirect: netapp.ontap.na_ontap_service_processor_network
+ na_ontap_snapmirror:
+ redirect: netapp.ontap.na_ontap_snapmirror
+ na_ontap_snapshot:
+ redirect: netapp.ontap.na_ontap_snapshot
+ na_ontap_snapshot_policy:
+ redirect: netapp.ontap.na_ontap_snapshot_policy
+ na_ontap_snmp:
+ redirect: netapp.ontap.na_ontap_snmp
+ na_ontap_software_update:
+ redirect: netapp.ontap.na_ontap_software_update
+ na_ontap_svm:
+ redirect: netapp.ontap.na_ontap_svm
+ na_ontap_svm_options:
+ redirect: netapp.ontap.na_ontap_svm_options
+ na_ontap_ucadapter:
+ redirect: netapp.ontap.na_ontap_ucadapter
+ na_ontap_unix_group:
+ redirect: netapp.ontap.na_ontap_unix_group
+ na_ontap_unix_user:
+ redirect: netapp.ontap.na_ontap_unix_user
+ na_ontap_user:
+ redirect: netapp.ontap.na_ontap_user
+ na_ontap_user_role:
+ redirect: netapp.ontap.na_ontap_user_role
+ na_ontap_volume:
+ redirect: netapp.ontap.na_ontap_volume
+ na_ontap_volume_autosize:
+ redirect: netapp.ontap.na_ontap_volume_autosize
+ na_ontap_volume_clone:
+ redirect: netapp.ontap.na_ontap_volume_clone
+ na_ontap_vscan:
+ redirect: netapp.ontap.na_ontap_vscan
+ na_ontap_vscan_on_access_policy:
+ redirect: netapp.ontap.na_ontap_vscan_on_access_policy
+ na_ontap_vscan_on_demand_task:
+ redirect: netapp.ontap.na_ontap_vscan_on_demand_task
+ na_ontap_vscan_scanner_pool:
+ redirect: netapp.ontap.na_ontap_vscan_scanner_pool
+ na_ontap_vserver_cifs_security:
+ redirect: netapp.ontap.na_ontap_vserver_cifs_security
+ na_ontap_vserver_peer:
+ redirect: netapp.ontap.na_ontap_vserver_peer
+ cp_mgmt_access_layer:
+ redirect: check_point.mgmt.cp_mgmt_access_layer
+ cp_mgmt_access_layer_facts:
+ redirect: check_point.mgmt.cp_mgmt_access_layer_facts
+ cp_mgmt_access_role:
+ redirect: check_point.mgmt.cp_mgmt_access_role
+ cp_mgmt_access_role_facts:
+ redirect: check_point.mgmt.cp_mgmt_access_role_facts
+ cp_mgmt_access_rule:
+ redirect: check_point.mgmt.cp_mgmt_access_rule
+ cp_mgmt_access_rule_facts:
+ redirect: check_point.mgmt.cp_mgmt_access_rule_facts
+ cp_mgmt_address_range:
+ redirect: check_point.mgmt.cp_mgmt_address_range
+ cp_mgmt_address_range_facts:
+ redirect: check_point.mgmt.cp_mgmt_address_range_facts
+ cp_mgmt_administrator:
+ redirect: check_point.mgmt.cp_mgmt_administrator
+ cp_mgmt_administrator_facts:
+ redirect: check_point.mgmt.cp_mgmt_administrator_facts
+ cp_mgmt_application_site:
+ redirect: check_point.mgmt.cp_mgmt_application_site
+ cp_mgmt_application_site_category:
+ redirect: check_point.mgmt.cp_mgmt_application_site_category
+ cp_mgmt_application_site_category_facts:
+ redirect: check_point.mgmt.cp_mgmt_application_site_category_facts
+ cp_mgmt_application_site_facts:
+ redirect: check_point.mgmt.cp_mgmt_application_site_facts
+ cp_mgmt_application_site_group:
+ redirect: check_point.mgmt.cp_mgmt_application_site_group
+ cp_mgmt_application_site_group_facts:
+ redirect: check_point.mgmt.cp_mgmt_application_site_group_facts
+ cp_mgmt_assign_global_assignment:
+ redirect: check_point.mgmt.cp_mgmt_assign_global_assignment
+ cp_mgmt_discard:
+ redirect: check_point.mgmt.cp_mgmt_discard
+ cp_mgmt_dns_domain:
+ redirect: check_point.mgmt.cp_mgmt_dns_domain
+ cp_mgmt_dns_domain_facts:
+ redirect: check_point.mgmt.cp_mgmt_dns_domain_facts
+ cp_mgmt_dynamic_object:
+ redirect: check_point.mgmt.cp_mgmt_dynamic_object
+ cp_mgmt_dynamic_object_facts:
+ redirect: check_point.mgmt.cp_mgmt_dynamic_object_facts
+ cp_mgmt_exception_group:
+ redirect: check_point.mgmt.cp_mgmt_exception_group
+ cp_mgmt_exception_group_facts:
+ redirect: check_point.mgmt.cp_mgmt_exception_group_facts
+ cp_mgmt_global_assignment:
+ redirect: check_point.mgmt.cp_mgmt_global_assignment
+ cp_mgmt_global_assignment_facts:
+ redirect: check_point.mgmt.cp_mgmt_global_assignment_facts
+ cp_mgmt_group:
+ redirect: check_point.mgmt.cp_mgmt_group
+ cp_mgmt_group_facts:
+ redirect: check_point.mgmt.cp_mgmt_group_facts
+ cp_mgmt_group_with_exclusion:
+ redirect: check_point.mgmt.cp_mgmt_group_with_exclusion
+ cp_mgmt_group_with_exclusion_facts:
+ redirect: check_point.mgmt.cp_mgmt_group_with_exclusion_facts
+ cp_mgmt_host:
+ redirect: check_point.mgmt.cp_mgmt_host
+ cp_mgmt_host_facts:
+ redirect: check_point.mgmt.cp_mgmt_host_facts
+ cp_mgmt_install_policy:
+ redirect: check_point.mgmt.cp_mgmt_install_policy
+ cp_mgmt_mds_facts:
+ redirect: check_point.mgmt.cp_mgmt_mds_facts
+ cp_mgmt_multicast_address_range:
+ redirect: check_point.mgmt.cp_mgmt_multicast_address_range
+ cp_mgmt_multicast_address_range_facts:
+ redirect: check_point.mgmt.cp_mgmt_multicast_address_range_facts
+ cp_mgmt_network:
+ redirect: check_point.mgmt.cp_mgmt_network
+ cp_mgmt_network_facts:
+ redirect: check_point.mgmt.cp_mgmt_network_facts
+ cp_mgmt_package:
+ redirect: check_point.mgmt.cp_mgmt_package
+ cp_mgmt_package_facts:
+ redirect: check_point.mgmt.cp_mgmt_package_facts
+ cp_mgmt_publish:
+ redirect: check_point.mgmt.cp_mgmt_publish
+ cp_mgmt_put_file:
+ redirect: check_point.mgmt.cp_mgmt_put_file
+ cp_mgmt_run_ips_update:
+ redirect: check_point.mgmt.cp_mgmt_run_ips_update
+ cp_mgmt_run_script:
+ redirect: check_point.mgmt.cp_mgmt_run_script
+ cp_mgmt_security_zone:
+ redirect: check_point.mgmt.cp_mgmt_security_zone
+ cp_mgmt_security_zone_facts:
+ redirect: check_point.mgmt.cp_mgmt_security_zone_facts
+ cp_mgmt_service_dce_rpc:
+ redirect: check_point.mgmt.cp_mgmt_service_dce_rpc
+ cp_mgmt_service_dce_rpc_facts:
+ redirect: check_point.mgmt.cp_mgmt_service_dce_rpc_facts
+ cp_mgmt_service_group:
+ redirect: check_point.mgmt.cp_mgmt_service_group
+ cp_mgmt_service_group_facts:
+ redirect: check_point.mgmt.cp_mgmt_service_group_facts
+ cp_mgmt_service_icmp:
+ redirect: check_point.mgmt.cp_mgmt_service_icmp
+ cp_mgmt_service_icmp6:
+ redirect: check_point.mgmt.cp_mgmt_service_icmp6
+ cp_mgmt_service_icmp6_facts:
+ redirect: check_point.mgmt.cp_mgmt_service_icmp6_facts
+ cp_mgmt_service_icmp_facts:
+ redirect: check_point.mgmt.cp_mgmt_service_icmp_facts
+ cp_mgmt_service_other:
+ redirect: check_point.mgmt.cp_mgmt_service_other
+ cp_mgmt_service_other_facts:
+ redirect: check_point.mgmt.cp_mgmt_service_other_facts
+ cp_mgmt_service_rpc:
+ redirect: check_point.mgmt.cp_mgmt_service_rpc
+ cp_mgmt_service_rpc_facts:
+ redirect: check_point.mgmt.cp_mgmt_service_rpc_facts
+ cp_mgmt_service_sctp:
+ redirect: check_point.mgmt.cp_mgmt_service_sctp
+ cp_mgmt_service_sctp_facts:
+ redirect: check_point.mgmt.cp_mgmt_service_sctp_facts
+ cp_mgmt_service_tcp:
+ redirect: check_point.mgmt.cp_mgmt_service_tcp
+ cp_mgmt_service_tcp_facts:
+ redirect: check_point.mgmt.cp_mgmt_service_tcp_facts
+ cp_mgmt_service_udp:
+ redirect: check_point.mgmt.cp_mgmt_service_udp
+ cp_mgmt_service_udp_facts:
+ redirect: check_point.mgmt.cp_mgmt_service_udp_facts
+ cp_mgmt_session_facts:
+ redirect: check_point.mgmt.cp_mgmt_session_facts
+ cp_mgmt_simple_gateway:
+ redirect: check_point.mgmt.cp_mgmt_simple_gateway
+ cp_mgmt_simple_gateway_facts:
+ redirect: check_point.mgmt.cp_mgmt_simple_gateway_facts
+ cp_mgmt_tag:
+ redirect: check_point.mgmt.cp_mgmt_tag
+ cp_mgmt_tag_facts:
+ redirect: check_point.mgmt.cp_mgmt_tag_facts
+ cp_mgmt_threat_exception:
+ redirect: check_point.mgmt.cp_mgmt_threat_exception
+ cp_mgmt_threat_exception_facts:
+ redirect: check_point.mgmt.cp_mgmt_threat_exception_facts
+ cp_mgmt_threat_indicator:
+ redirect: check_point.mgmt.cp_mgmt_threat_indicator
+ cp_mgmt_threat_indicator_facts:
+ redirect: check_point.mgmt.cp_mgmt_threat_indicator_facts
+ cp_mgmt_threat_layer:
+ redirect: check_point.mgmt.cp_mgmt_threat_layer
+ cp_mgmt_threat_layer_facts:
+ redirect: check_point.mgmt.cp_mgmt_threat_layer_facts
+ cp_mgmt_threat_profile:
+ redirect: check_point.mgmt.cp_mgmt_threat_profile
+ cp_mgmt_threat_profile_facts:
+ redirect: check_point.mgmt.cp_mgmt_threat_profile_facts
+ cp_mgmt_threat_protection_override:
+ redirect: check_point.mgmt.cp_mgmt_threat_protection_override
+ cp_mgmt_threat_rule:
+ redirect: check_point.mgmt.cp_mgmt_threat_rule
+ cp_mgmt_threat_rule_facts:
+ redirect: check_point.mgmt.cp_mgmt_threat_rule_facts
+ cp_mgmt_time:
+ redirect: check_point.mgmt.cp_mgmt_time
+ cp_mgmt_time_facts:
+ redirect: check_point.mgmt.cp_mgmt_time_facts
+ cp_mgmt_verify_policy:
+ redirect: check_point.mgmt.cp_mgmt_verify_policy
+ cp_mgmt_vpn_community_meshed:
+ redirect: check_point.mgmt.cp_mgmt_vpn_community_meshed
+ cp_mgmt_vpn_community_meshed_facts:
+ redirect: check_point.mgmt.cp_mgmt_vpn_community_meshed_facts
+ cp_mgmt_vpn_community_star:
+ redirect: check_point.mgmt.cp_mgmt_vpn_community_star
+ cp_mgmt_vpn_community_star_facts:
+ redirect: check_point.mgmt.cp_mgmt_vpn_community_star_facts
+ cp_mgmt_wildcard:
+ redirect: check_point.mgmt.cp_mgmt_wildcard
+ cp_mgmt_wildcard_facts:
+ redirect: check_point.mgmt.cp_mgmt_wildcard_facts
+ eos_ospfv2:
+ redirect: arista.eos.eos_ospfv2
+ eos_static_route:
+ redirect: arista.eos.eos_static_route
+ eos_acls:
+ redirect: arista.eos.eos_acls
+ eos_interfaces:
+ redirect: arista.eos.eos_interfaces
+ eos_facts:
+ redirect: arista.eos.eos_facts
+ eos_logging:
+ redirect: arista.eos.eos_logging
+ eos_lag_interfaces:
+ redirect: arista.eos.eos_lag_interfaces
+ eos_l2_interfaces:
+ redirect: arista.eos.eos_l2_interfaces
+ eos_l3_interface:
+ redirect: arista.eos.eos_l3_interface
+ eos_lacp:
+ redirect: arista.eos.eos_lacp
+ eos_lldp_global:
+ redirect: arista.eos.eos_lldp_global
+ eos_static_routes:
+ redirect: arista.eos.eos_static_routes
+ eos_lacp_interfaces:
+ redirect: arista.eos.eos_lacp_interfaces
+ eos_system:
+ redirect: arista.eos.eos_system
+ eos_vlan:
+ redirect: arista.eos.eos_vlan
+ eos_eapi:
+ redirect: arista.eos.eos_eapi
+ eos_acl_interfaces:
+ redirect: arista.eos.eos_acl_interfaces
+ eos_l2_interface:
+ redirect: arista.eos.eos_l2_interface
+ eos_lldp_interfaces:
+ redirect: arista.eos.eos_lldp_interfaces
+ eos_command:
+ redirect: arista.eos.eos_command
+ eos_linkagg:
+ redirect: arista.eos.eos_linkagg
+ eos_l3_interfaces:
+ redirect: arista.eos.eos_l3_interfaces
+ eos_vlans:
+ redirect: arista.eos.eos_vlans
+ eos_user:
+ redirect: arista.eos.eos_user
+ eos_banner:
+ redirect: arista.eos.eos_banner
+ eos_lldp:
+ redirect: arista.eos.eos_lldp
+ eos_interface:
+ redirect: arista.eos.eos_interface
+ eos_config:
+ redirect: arista.eos.eos_config
+ eos_bgp:
+ redirect: arista.eos.eos_bgp
+ eos_vrf:
+ redirect: arista.eos.eos_vrf
+ aci_aaa_user:
+ redirect: cisco.aci.aci_aaa_user
+ aci_aaa_user_certificate:
+ redirect: cisco.aci.aci_aaa_user_certificate
+ aci_access_port_block_to_access_port:
+ redirect: cisco.aci.aci_access_port_block_to_access_port
+ aci_access_port_to_interface_policy_leaf_profile:
+ redirect: cisco.aci.aci_access_port_to_interface_policy_leaf_profile
+ aci_access_sub_port_block_to_access_port:
+ redirect: cisco.aci.aci_access_sub_port_block_to_access_port
+ aci_aep:
+ redirect: cisco.aci.aci_aep
+ aci_aep_to_domain:
+ redirect: cisco.aci.aci_aep_to_domain
+ aci_ap:
+ redirect: cisco.aci.aci_ap
+ aci_bd:
+ redirect: cisco.aci.aci_bd
+ aci_bd_subnet:
+ redirect: cisco.aci.aci_bd_subnet
+ aci_bd_to_l3out:
+ redirect: cisco.aci.aci_bd_to_l3out
+ aci_config_rollback:
+ redirect: cisco.aci.aci_config_rollback
+ aci_config_snapshot:
+ redirect: cisco.aci.aci_config_snapshot
+ aci_contract:
+ redirect: cisco.aci.aci_contract
+ aci_contract_subject:
+ redirect: cisco.aci.aci_contract_subject
+ aci_contract_subject_to_filter:
+ redirect: cisco.aci.aci_contract_subject_to_filter
+ aci_domain:
+ redirect: cisco.aci.aci_domain
+ aci_domain_to_encap_pool:
+ redirect: cisco.aci.aci_domain_to_encap_pool
+ aci_domain_to_vlan_pool:
+ redirect: cisco.aci.aci_domain_to_vlan_pool
+ aci_encap_pool:
+ redirect: cisco.aci.aci_encap_pool
+ aci_encap_pool_range:
+ redirect: cisco.aci.aci_encap_pool_range
+ aci_epg:
+ redirect: cisco.aci.aci_epg
+ aci_epg_monitoring_policy:
+ redirect: cisco.aci.aci_epg_monitoring_policy
+ aci_epg_to_contract:
+ redirect: cisco.aci.aci_epg_to_contract
+ aci_epg_to_domain:
+ redirect: cisco.aci.aci_epg_to_domain
+ aci_fabric_node:
+ redirect: cisco.aci.aci_fabric_node
+ aci_fabric_scheduler:
+ redirect: cisco.aci.aci_fabric_scheduler
+ aci_filter:
+ redirect: cisco.aci.aci_filter
+ aci_filter_entry:
+ redirect: cisco.aci.aci_filter_entry
+ aci_firmware_group:
+ redirect: cisco.aci.aci_firmware_group
+ aci_firmware_group_node:
+ redirect: cisco.aci.aci_firmware_group_node
+ aci_firmware_policy:
+ redirect: cisco.aci.aci_firmware_policy
+ aci_firmware_source:
+ redirect: cisco.aci.aci_firmware_source
+ aci_interface_policy_cdp:
+ redirect: cisco.aci.aci_interface_policy_cdp
+ aci_interface_policy_fc:
+ redirect: cisco.aci.aci_interface_policy_fc
+ aci_interface_policy_l2:
+ redirect: cisco.aci.aci_interface_policy_l2
+ aci_interface_policy_leaf_policy_group:
+ redirect: cisco.aci.aci_interface_policy_leaf_policy_group
+ aci_interface_policy_leaf_profile:
+ redirect: cisco.aci.aci_interface_policy_leaf_profile
+ aci_interface_policy_lldp:
+ redirect: cisco.aci.aci_interface_policy_lldp
+ aci_interface_policy_mcp:
+ redirect: cisco.aci.aci_interface_policy_mcp
+ aci_interface_policy_ospf:
+ redirect: cisco.aci.aci_interface_policy_ospf
+ aci_interface_policy_port_channel:
+ redirect: cisco.aci.aci_interface_policy_port_channel
+ aci_interface_policy_port_security:
+ redirect: cisco.aci.aci_interface_policy_port_security
+ aci_interface_selector_to_switch_policy_leaf_profile:
+ redirect: cisco.aci.aci_interface_selector_to_switch_policy_leaf_profile
+ aci_l3out:
+ redirect: cisco.aci.aci_l3out
+ aci_l3out_extepg:
+ redirect: cisco.aci.aci_l3out_extepg
+ aci_l3out_extsubnet:
+ redirect: cisco.aci.aci_l3out_extsubnet
+ aci_l3out_route_tag_policy:
+ redirect: cisco.aci.aci_l3out_route_tag_policy
+ aci_maintenance_group:
+ redirect: cisco.aci.aci_maintenance_group
+ aci_maintenance_group_node:
+ redirect: cisco.aci.aci_maintenance_group_node
+ aci_maintenance_policy:
+ redirect: cisco.aci.aci_maintenance_policy
+ aci_rest:
+ redirect: cisco.aci.aci_rest
+ aci_static_binding_to_epg:
+ redirect: cisco.aci.aci_static_binding_to_epg
+ aci_switch_leaf_selector:
+ redirect: cisco.aci.aci_switch_leaf_selector
+ aci_switch_policy_leaf_profile:
+ redirect: cisco.aci.aci_switch_policy_leaf_profile
+ aci_switch_policy_vpc_protection_group:
+ redirect: cisco.aci.aci_switch_policy_vpc_protection_group
+ aci_taboo_contract:
+ redirect: cisco.aci.aci_taboo_contract
+ aci_tenant:
+ redirect: cisco.aci.aci_tenant
+ aci_tenant_action_rule_profile:
+ redirect: cisco.aci.aci_tenant_action_rule_profile
+ aci_tenant_ep_retention_policy:
+ redirect: cisco.aci.aci_tenant_ep_retention_policy
+ aci_tenant_span_dst_group:
+ redirect: cisco.aci.aci_tenant_span_dst_group
+ aci_tenant_span_src_group:
+ redirect: cisco.aci.aci_tenant_span_src_group
+ aci_tenant_span_src_group_to_dst_group:
+ redirect: cisco.aci.aci_tenant_span_src_group_to_dst_group
+ aci_vlan_pool:
+ redirect: cisco.aci.aci_vlan_pool
+ aci_vlan_pool_encap_block:
+ redirect: cisco.aci.aci_vlan_pool_encap_block
+ aci_vmm_credential:
+ redirect: cisco.aci.aci_vmm_credential
+ aci_vrf:
+ redirect: cisco.aci.aci_vrf
+ asa_acl:
+ redirect: cisco.asa.asa_acl
+ asa_config:
+ redirect: cisco.asa.asa_config
+ asa_og:
+ redirect: cisco.asa.asa_og
+ asa_command:
+ redirect: cisco.asa.asa_command
+ intersight_facts:
+ redirect: cisco.intersight.intersight_info
+ intersight_info:
+ redirect: cisco.intersight.intersight_info
+ intersight_rest_api:
+ redirect: cisco.intersight.intersight_rest_api
+ ios_ospfv2:
+ redirect: cisco.ios.ios_ospfv2
+ ios_l3_interfaces:
+ redirect: cisco.ios.ios_l3_interfaces
+ ios_lldp:
+ redirect: cisco.ios.ios_lldp
+ ios_interface:
+ redirect: cisco.ios.ios_interface
+ ios_lldp_interfaces:
+ redirect: cisco.ios.ios_lldp_interfaces
+ ios_l3_interface:
+ redirect: cisco.ios.ios_l3_interface
+ ios_acl_interfaces:
+ redirect: cisco.ios.ios_acl_interfaces
+ ios_static_routes:
+ redirect: cisco.ios.ios_static_routes
+ ios_l2_interfaces:
+ redirect: cisco.ios.ios_l2_interfaces
+ ios_logging:
+ redirect: cisco.ios.ios_logging
+ ios_vlan:
+ redirect: cisco.ios.ios_vlan
+ ios_command:
+ redirect: cisco.ios.ios_command
+ ios_static_route:
+ redirect: cisco.ios.ios_static_route
+ ios_lldp_global:
+ redirect: cisco.ios.ios_lldp_global
+ ios_banner:
+ redirect: cisco.ios.ios_banner
+ ios_lag_interfaces:
+ redirect: cisco.ios.ios_lag_interfaces
+ ios_linkagg:
+ redirect: cisco.ios.ios_linkagg
+ ios_user:
+ redirect: cisco.ios.ios_user
+ ios_system:
+ redirect: cisco.ios.ios_system
+ ios_facts:
+ redirect: cisco.ios.ios_facts
+ ios_ping:
+ redirect: cisco.ios.ios_ping
+ ios_vlans:
+ redirect: cisco.ios.ios_vlans
+ ios_vrf:
+ redirect: cisco.ios.ios_vrf
+ ios_bgp:
+ redirect: cisco.ios.ios_bgp
+ ios_ntp:
+ redirect: cisco.ios.ios_ntp
+ ios_lacp_interfaces:
+ redirect: cisco.ios.ios_lacp_interfaces
+ ios_lacp:
+ redirect: cisco.ios.ios_lacp
+ ios_config:
+ redirect: cisco.ios.ios_config
+ ios_l2_interface:
+ redirect: cisco.ios.ios_l2_interface
+ ios_acls:
+ redirect: cisco.ios.ios_acls
+ ios_interfaces:
+ redirect: cisco.ios.ios_interfaces
+ iosxr_ospfv2:
+ redirect: cisco.iosxr.iosxr_ospfv2
+ iosxr_bgp:
+ redirect: cisco.iosxr.iosxr_bgp
+ iosxr_lldp_interfaces:
+ redirect: cisco.iosxr.iosxr_lldp_interfaces
+ iosxr_l3_interfaces:
+ redirect: cisco.iosxr.iosxr_l3_interfaces
+ iosxr_netconf:
+ redirect: cisco.iosxr.iosxr_netconf
+ iosxr_static_routes:
+ redirect: cisco.iosxr.iosxr_static_routes
+ iosxr_lldp_global:
+ redirect: cisco.iosxr.iosxr_lldp_global
+ iosxr_config:
+ redirect: cisco.iosxr.iosxr_config
+ iosxr_lag_interfaces:
+ redirect: cisco.iosxr.iosxr_lag_interfaces
+ iosxr_interface:
+ redirect: cisco.iosxr.iosxr_interface
+ iosxr_user:
+ redirect: cisco.iosxr.iosxr_user
+ iosxr_facts:
+ redirect: cisco.iosxr.iosxr_facts
+ iosxr_interfaces:
+ redirect: cisco.iosxr.iosxr_interfaces
+ iosxr_acl_interfaces:
+ redirect: cisco.iosxr.iosxr_acl_interfaces
+ iosxr_l2_interfaces:
+ redirect: cisco.iosxr.iosxr_l2_interfaces
+ iosxr_logging:
+ redirect: cisco.iosxr.iosxr_logging
+ iosxr_lacp:
+ redirect: cisco.iosxr.iosxr_lacp
+ iosxr_acls:
+ redirect: cisco.iosxr.iosxr_acls
+ iosxr_system:
+ redirect: cisco.iosxr.iosxr_system
+ iosxr_command:
+ redirect: cisco.iosxr.iosxr_command
+ iosxr_lacp_interfaces:
+ redirect: cisco.iosxr.iosxr_lacp_interfaces
+ iosxr_banner:
+ redirect: cisco.iosxr.iosxr_banner
+ meraki_admin:
+ redirect: cisco.meraki.meraki_admin
+ meraki_config_template:
+ redirect: cisco.meraki.meraki_config_template
+ meraki_content_filtering:
+ redirect: cisco.meraki.meraki_content_filtering
+ meraki_device:
+ redirect: cisco.meraki.meraki_device
+ meraki_firewalled_services:
+ redirect: cisco.meraki.meraki_firewalled_services
+ meraki_malware:
+ redirect: cisco.meraki.meraki_malware
+ meraki_mr_l3_firewall:
+ redirect: cisco.meraki.meraki_mr_l3_firewall
+ meraki_mx_l3_firewall:
+ redirect: cisco.meraki.meraki_mx_l3_firewall
+ meraki_mx_l7_firewall:
+ redirect: cisco.meraki.meraki_mx_l7_firewall
+ meraki_nat:
+ redirect: cisco.meraki.meraki_nat
+ meraki_network:
+ redirect: cisco.meraki.meraki_network
+ meraki_organization:
+ redirect: cisco.meraki.meraki_organization
+ meraki_snmp:
+ redirect: cisco.meraki.meraki_snmp
+ meraki_ssid:
+ redirect: cisco.meraki.meraki_ssid
+ meraki_static_route:
+ redirect: cisco.meraki.meraki_static_route
+ meraki_switchport:
+ redirect: cisco.meraki.meraki_switchport
+ meraki_syslog:
+ redirect: cisco.meraki.meraki_syslog
+ meraki_vlan:
+ redirect: cisco.meraki.meraki_vlan
+ meraki_webhook:
+ redirect: cisco.meraki.meraki_webhook
+ mso_label:
+ redirect: cisco.mso.mso_label
+ mso_role:
+ redirect: cisco.mso.mso_role
+ mso_schema:
+ redirect: cisco.mso.mso_schema
+ mso_schema_site:
+ redirect: cisco.mso.mso_schema_site
+ mso_schema_site_anp:
+ redirect: cisco.mso.mso_schema_site_anp
+ mso_schema_site_anp_epg:
+ redirect: cisco.mso.mso_schema_site_anp_epg
+ mso_schema_site_anp_epg_domain:
+ redirect: cisco.mso.mso_schema_site_anp_epg_domain
+ mso_schema_site_anp_epg_staticleaf:
+ redirect: cisco.mso.mso_schema_site_anp_epg_staticleaf
+ mso_schema_site_anp_epg_staticport:
+ redirect: cisco.mso.mso_schema_site_anp_epg_staticport
+ mso_schema_site_anp_epg_subnet:
+ redirect: cisco.mso.mso_schema_site_anp_epg_subnet
+ mso_schema_site_bd:
+ redirect: cisco.mso.mso_schema_site_bd
+ mso_schema_site_bd_l3out:
+ redirect: cisco.mso.mso_schema_site_bd_l3out
+ mso_schema_site_bd_subnet:
+ redirect: cisco.mso.mso_schema_site_bd_subnet
+ mso_schema_site_vrf:
+ redirect: cisco.mso.mso_schema_site_vrf
+ mso_schema_site_vrf_region:
+ redirect: cisco.mso.mso_schema_site_vrf_region
+ mso_schema_site_vrf_region_cidr:
+ redirect: cisco.mso.mso_schema_site_vrf_region_cidr
+ mso_schema_site_vrf_region_cidr_subnet:
+ redirect: cisco.mso.mso_schema_site_vrf_region_cidr_subnet
+ mso_schema_template:
+ redirect: cisco.mso.mso_schema_template
+ mso_schema_template_anp:
+ redirect: cisco.mso.mso_schema_template_anp
+ mso_schema_template_anp_epg:
+ redirect: cisco.mso.mso_schema_template_anp_epg
+ mso_schema_template_anp_epg_contract:
+ redirect: cisco.mso.mso_schema_template_anp_epg_contract
+ mso_schema_template_anp_epg_subnet:
+ redirect: cisco.mso.mso_schema_template_anp_epg_subnet
+ mso_schema_template_bd:
+ redirect: cisco.mso.mso_schema_template_bd
+ mso_schema_template_bd_subnet:
+ redirect: cisco.mso.mso_schema_template_bd_subnet
+ mso_schema_template_contract_filter:
+ redirect: cisco.mso.mso_schema_template_contract_filter
+ mso_schema_template_deploy:
+ redirect: cisco.mso.mso_schema_template_deploy
+ mso_schema_template_externalepg:
+ redirect: cisco.mso.mso_schema_template_externalepg
+ mso_schema_template_filter_entry:
+ redirect: cisco.mso.mso_schema_template_filter_entry
+ mso_schema_template_l3out:
+ redirect: cisco.mso.mso_schema_template_l3out
+ mso_schema_template_vrf:
+ redirect: cisco.mso.mso_schema_template_vrf
+ mso_site:
+ redirect: cisco.mso.mso_site
+ mso_tenant:
+ redirect: cisco.mso.mso_tenant
+ mso_user:
+ redirect: cisco.mso.mso_user
+ nxos_telemetry:
+ redirect: cisco.nxos.nxos_telemetry
+ nxos_user:
+ redirect: cisco.nxos.nxos_user
+ nxos_bfd_interfaces:
+ redirect: cisco.nxos.nxos_bfd_interfaces
+ nxos_ospf:
+ redirect: cisco.nxos.nxos_ospf
+ nxos_ospfv2:
+ redirect: cisco.nxos.nxos_ospfv2
+ nxos_system:
+ redirect: cisco.nxos.nxos_system
+ nxos_l3_interface:
+ redirect: cisco.nxos.nxos_l3_interface
+ nxos_smu:
+ redirect: cisco.nxos.nxos_smu
+ nxos_reboot:
+ redirect: cisco.nxos.nxos_reboot
+ nxos_static_routes:
+ redirect: cisco.nxos.nxos_static_routes
+ nxos_static_route:
+ redirect: cisco.nxos.nxos_static_route
+ nxos_acl_interfaces:
+ redirect: cisco.nxos.nxos_acl_interfaces
+ nxos_vpc:
+ redirect: cisco.nxos.nxos_vpc
+ nxos_linkagg:
+ redirect: cisco.nxos.nxos_linkagg
+ nxos_vxlan_vtep_vni:
+ redirect: cisco.nxos.nxos_vxlan_vtep_vni
+ nxos_vrrp:
+ redirect: cisco.nxos.nxos_vrrp
+ nxos_lldp:
+ redirect: cisco.nxos.nxos_lldp
+ nxos_interface:
+ redirect: cisco.nxos.nxos_interface
+ nxos_lacp_interfaces:
+ redirect: cisco.nxos.nxos_lacp_interfaces
+ nxos_gir_profile_management:
+ redirect: cisco.nxos.nxos_gir_profile_management
+ nxos_snmp_community:
+ redirect: cisco.nxos.nxos_snmp_community
+ nxos_lag_interfaces:
+ redirect: cisco.nxos.nxos_lag_interfaces
+ nxos_acl:
+ redirect: cisco.nxos.nxos_acl
+ nxos_hsrp_interfaces:
+ redirect: cisco.nxos.nxos_hsrp_interfaces
+ nxos_lldp_global:
+ redirect: cisco.nxos.nxos_lldp_global
+ nxos_snmp_contact:
+ redirect: cisco.nxos.nxos_snmp_contact
+ nxos_vrf_interface:
+ redirect: cisco.nxos.nxos_vrf_interface
+ nxos_rpm:
+ redirect: cisco.nxos.nxos_rpm
+ nxos_ntp_options:
+ redirect: cisco.nxos.nxos_ntp_options
+ nxos_ospf_vrf:
+ redirect: cisco.nxos.nxos_ospf_vrf
+ nxos_vtp_version:
+ redirect: cisco.nxos.nxos_vtp_version
+ nxos_igmp_interface:
+ redirect: cisco.nxos.nxos_igmp_interface
+ nxos_bgp_neighbor:
+ redirect: cisco.nxos.nxos_bgp_neighbor
+ nxos_bgp:
+ redirect: cisco.nxos.nxos_bgp
+ nxos_rollback:
+ redirect: cisco.nxos.nxos_rollback
+ nxos_aaa_server:
+ redirect: cisco.nxos.nxos_aaa_server
+ nxos_udld_interface:
+ redirect: cisco.nxos.nxos_udld_interface
+ nxos_bgp_af:
+ redirect: cisco.nxos.nxos_bgp_af
+ nxos_feature:
+ redirect: cisco.nxos.nxos_feature
+ nxos_snmp_traps:
+ redirect: cisco.nxos.nxos_snmp_traps
+ nxos_evpn_global:
+ redirect: cisco.nxos.nxos_evpn_global
+ nxos_igmp:
+ redirect: cisco.nxos.nxos_igmp
+ nxos_aaa_server_host:
+ redirect: cisco.nxos.nxos_aaa_server_host
+ nxos_vrf_af:
+ redirect: cisco.nxos.nxos_vrf_af
+ nxos_snapshot:
+ redirect: cisco.nxos.nxos_snapshot
+ nxos_gir:
+ redirect: cisco.nxos.nxos_gir
+ nxos_command:
+ redirect: cisco.nxos.nxos_command
+ nxos_vxlan_vtep:
+ redirect: cisco.nxos.nxos_vxlan_vtep
+ nxos_snmp_location:
+ redirect: cisco.nxos.nxos_snmp_location
+ nxos_evpn_vni:
+ redirect: cisco.nxos.nxos_evpn_vni
+ nxos_vpc_interface:
+ redirect: cisco.nxos.nxos_vpc_interface
+ nxos_logging:
+ redirect: cisco.nxos.nxos_logging
+ nxos_pim:
+ redirect: cisco.nxos.nxos_pim
+ nxos_ping:
+ redirect: cisco.nxos.nxos_ping
+ nxos_pim_rp_address:
+ redirect: cisco.nxos.nxos_pim_rp_address
+ nxos_pim_interface:
+ redirect: cisco.nxos.nxos_pim_interface
+ nxos_install_os:
+ redirect: cisco.nxos.nxos_install_os
+ nxos_nxapi:
+ redirect: cisco.nxos.nxos_nxapi
+ nxos_l2_interface:
+ redirect: cisco.nxos.nxos_l2_interface
+ nxos_bgp_neighbor_af:
+ redirect: cisco.nxos.nxos_bgp_neighbor_af
+ nxos_lacp:
+ redirect: cisco.nxos.nxos_lacp
+ nxos_lldp_interfaces:
+ redirect: cisco.nxos.nxos_lldp_interfaces
+ nxos_acl_interface:
+ redirect: cisco.nxos.nxos_acl_interface
+ nxos_vrf:
+ redirect: cisco.nxos.nxos_vrf
+ nxos_interface_ospf:
+ redirect: cisco.nxos.nxos_interface_ospf
+ nxos_acls:
+ redirect: cisco.nxos.nxos_acls
+ nxos_vtp_password:
+ redirect: cisco.nxos.nxos_vtp_password
+ nxos_l3_interfaces:
+ redirect: cisco.nxos.nxos_l3_interfaces
+ nxos_igmp_snooping:
+ redirect: cisco.nxos.nxos_igmp_snooping
+ nxos_banner:
+ redirect: cisco.nxos.nxos_banner
+ nxos_bfd_global:
+ redirect: cisco.nxos.nxos_bfd_global
+ nxos_udld:
+ redirect: cisco.nxos.nxos_udld
+ nxos_vtp_domain:
+ redirect: cisco.nxos.nxos_vtp_domain
+ nxos_snmp_host:
+ redirect: cisco.nxos.nxos_snmp_host
+ nxos_l2_interfaces:
+ redirect: cisco.nxos.nxos_l2_interfaces
+ nxos_hsrp:
+ redirect: cisco.nxos.nxos_hsrp
+ nxos_interfaces:
+ redirect: cisco.nxos.nxos_interfaces
+ nxos_overlay_global:
+ redirect: cisco.nxos.nxos_overlay_global
+ nxos_snmp_user:
+ redirect: cisco.nxos.nxos_snmp_user
+ nxos_vlans:
+ redirect: cisco.nxos.nxos_vlans
+ nxos_ntp:
+ redirect: cisco.nxos.nxos_ntp
+ nxos_file_copy:
+ redirect: cisco.nxos.nxos_file_copy
+ nxos_ntp_auth:
+ redirect: cisco.nxos.nxos_ntp_auth
+ nxos_config:
+ redirect: cisco.nxos.nxos_config
+ nxos_vlan:
+ redirect: cisco.nxos.nxos_vlan
+ nxos_facts:
+ redirect: cisco.nxos.nxos_facts
+ nxos_zone_zoneset:
+ redirect: cisco.nxos.nxos_zone_zoneset
+ nxos_vsan:
+ redirect: cisco.nxos.nxos_vsan
+ nxos_devicealias:
+ redirect: cisco.nxos.nxos_devicealias
+ ucs_managed_objects:
+ redirect: cisco.ucs.ucs_managed_objects
+ ucs_vnic_template:
+ redirect: cisco.ucs.ucs_vnic_template
+ ucs_query:
+ redirect: cisco.ucs.ucs_query
+ ucs_dns_server:
+ redirect: cisco.ucs.ucs_dns_server
+ ucs_lan_connectivity:
+ redirect: cisco.ucs.ucs_lan_connectivity
+ ucs_vhba_template:
+ redirect: cisco.ucs.ucs_vhba_template
+ ucs_san_connectivity:
+ redirect: cisco.ucs.ucs_san_connectivity
+ ucs_disk_group_policy:
+ redirect: cisco.ucs.ucs_disk_group_policy
+ ucs_uuid_pool:
+ redirect: cisco.ucs.ucs_uuid_pool
+ ucs_vlan_find:
+ redirect: cisco.ucs.ucs_vlan_find
+ ucs_vlans:
+ redirect: cisco.ucs.ucs_vlans
+ ucs_service_profile_template:
+ redirect: cisco.ucs.ucs_service_profile_template
+ ucs_ip_pool:
+ redirect: cisco.ucs.ucs_ip_pool
+ ucs_timezone:
+ redirect: cisco.ucs.ucs_timezone
+ ucs_ntp_server:
+ redirect: cisco.ucs.ucs_ntp_server
+ ucs_mac_pool:
+ redirect: cisco.ucs.ucs_mac_pool
+ ucs_storage_profile:
+ redirect: cisco.ucs.ucs_storage_profile
+ ucs_org:
+ redirect: cisco.ucs.ucs_org
+ ucs_vsans:
+ redirect: cisco.ucs.ucs_vsans
+ ucs_wwn_pool:
+ redirect: cisco.ucs.ucs_wwn_pool
+ bigip_apm_acl:
+ redirect: f5networks.f5_modules.bigip_apm_acl
+ bigip_apm_network_access:
+ redirect: f5networks.f5_modules.bigip_apm_network_access
+ bigip_apm_policy_fetch:
+ redirect: f5networks.f5_modules.bigip_apm_policy_fetch
+ bigip_apm_policy_import:
+ redirect: f5networks.f5_modules.bigip_apm_policy_import
+ bigip_appsvcs_extension:
+ redirect: f5networks.f5_modules.bigip_appsvcs_extension
+ bigip_asm_dos_application:
+ redirect: f5networks.f5_modules.bigip_asm_dos_application
+ bigip_asm_policy_fetch:
+ redirect: f5networks.f5_modules.bigip_asm_policy_fetch
+ bigip_asm_policy_import:
+ redirect: f5networks.f5_modules.bigip_asm_policy_import
+ bigip_asm_policy_manage:
+ redirect: f5networks.f5_modules.bigip_asm_policy_manage
+ bigip_asm_policy_server_technology:
+ redirect: f5networks.f5_modules.bigip_asm_policy_server_technology
+ bigip_asm_policy_signature_set:
+ redirect: f5networks.f5_modules.bigip_asm_policy_signature_set
+ bigip_cli_alias:
+ redirect: f5networks.f5_modules.bigip_cli_alias
+ bigip_cli_script:
+ redirect: f5networks.f5_modules.bigip_cli_script
+ bigip_command:
+ redirect: f5networks.f5_modules.bigip_command
+ bigip_config:
+ redirect: f5networks.f5_modules.bigip_config
+ bigip_configsync_action:
+ redirect: f5networks.f5_modules.bigip_configsync_action
+ bigip_data_group:
+ redirect: f5networks.f5_modules.bigip_data_group
+ bigip_device_auth:
+ redirect: f5networks.f5_modules.bigip_device_auth
+ bigip_device_auth_ldap:
+ redirect: f5networks.f5_modules.bigip_device_auth_ldap
+ bigip_device_certificate:
+ redirect: f5networks.f5_modules.bigip_device_certificate
+ bigip_device_connectivity:
+ redirect: f5networks.f5_modules.bigip_device_connectivity
+ bigip_device_dns:
+ redirect: f5networks.f5_modules.bigip_device_dns
+ bigip_device_group:
+ redirect: f5networks.f5_modules.bigip_device_group
+ bigip_device_group_member:
+ redirect: f5networks.f5_modules.bigip_device_group_member
+ bigip_device_ha_group:
+ redirect: f5networks.f5_modules.bigip_device_ha_group
+ bigip_device_httpd:
+ redirect: f5networks.f5_modules.bigip_device_httpd
+ bigip_device_info:
+ redirect: f5networks.f5_modules.bigip_device_info
+ bigip_device_license:
+ redirect: f5networks.f5_modules.bigip_device_license
+ bigip_device_ntp:
+ redirect: f5networks.f5_modules.bigip_device_ntp
+ bigip_device_sshd:
+ redirect: f5networks.f5_modules.bigip_device_sshd
+ bigip_device_syslog:
+ redirect: f5networks.f5_modules.bigip_device_syslog
+ bigip_device_traffic_group:
+ redirect: f5networks.f5_modules.bigip_device_traffic_group
+ bigip_device_trust:
+ redirect: f5networks.f5_modules.bigip_device_trust
+ bigip_dns_cache_resolver:
+ redirect: f5networks.f5_modules.bigip_dns_cache_resolver
+ bigip_dns_nameserver:
+ redirect: f5networks.f5_modules.bigip_dns_nameserver
+ bigip_dns_resolver:
+ redirect: f5networks.f5_modules.bigip_dns_resolver
+ bigip_dns_zone:
+ redirect: f5networks.f5_modules.bigip_dns_zone
+ bigip_file_copy:
+ redirect: f5networks.f5_modules.bigip_file_copy
+ bigip_firewall_address_list:
+ redirect: f5networks.f5_modules.bigip_firewall_address_list
+ bigip_firewall_dos_profile:
+ redirect: f5networks.f5_modules.bigip_firewall_dos_profile
+ bigip_firewall_dos_vector:
+ redirect: f5networks.f5_modules.bigip_firewall_dos_vector
+ bigip_firewall_global_rules:
+ redirect: f5networks.f5_modules.bigip_firewall_global_rules
+ bigip_firewall_log_profile:
+ redirect: f5networks.f5_modules.bigip_firewall_log_profile
+ bigip_firewall_log_profile_network:
+ redirect: f5networks.f5_modules.bigip_firewall_log_profile_network
+ bigip_firewall_policy:
+ redirect: f5networks.f5_modules.bigip_firewall_policy
+ bigip_firewall_port_list:
+ redirect: f5networks.f5_modules.bigip_firewall_port_list
+ bigip_firewall_rule:
+ redirect: f5networks.f5_modules.bigip_firewall_rule
+ bigip_firewall_rule_list:
+ redirect: f5networks.f5_modules.bigip_firewall_rule_list
+ bigip_firewall_schedule:
+ redirect: f5networks.f5_modules.bigip_firewall_schedule
+ bigip_gtm_datacenter:
+ redirect: f5networks.f5_modules.bigip_gtm_datacenter
+ bigip_gtm_global:
+ redirect: f5networks.f5_modules.bigip_gtm_global
+ bigip_gtm_monitor_bigip:
+ redirect: f5networks.f5_modules.bigip_gtm_monitor_bigip
+ bigip_gtm_monitor_external:
+ redirect: f5networks.f5_modules.bigip_gtm_monitor_external
+ bigip_gtm_monitor_firepass:
+ redirect: f5networks.f5_modules.bigip_gtm_monitor_firepass
+ bigip_gtm_monitor_http:
+ redirect: f5networks.f5_modules.bigip_gtm_monitor_http
+ bigip_gtm_monitor_https:
+ redirect: f5networks.f5_modules.bigip_gtm_monitor_https
+ bigip_gtm_monitor_tcp:
+ redirect: f5networks.f5_modules.bigip_gtm_monitor_tcp
+ bigip_gtm_monitor_tcp_half_open:
+ redirect: f5networks.f5_modules.bigip_gtm_monitor_tcp_half_open
+ bigip_gtm_pool:
+ redirect: f5networks.f5_modules.bigip_gtm_pool
+ bigip_gtm_pool_member:
+ redirect: f5networks.f5_modules.bigip_gtm_pool_member
+ bigip_gtm_server:
+ redirect: f5networks.f5_modules.bigip_gtm_server
+ bigip_gtm_topology_record:
+ redirect: f5networks.f5_modules.bigip_gtm_topology_record
+ bigip_gtm_topology_region:
+ redirect: f5networks.f5_modules.bigip_gtm_topology_region
+ bigip_gtm_virtual_server:
+ redirect: f5networks.f5_modules.bigip_gtm_virtual_server
+ bigip_gtm_wide_ip:
+ redirect: f5networks.f5_modules.bigip_gtm_wide_ip
+ bigip_hostname:
+ redirect: f5networks.f5_modules.bigip_hostname
+ bigip_iapp_service:
+ redirect: f5networks.f5_modules.bigip_iapp_service
+ bigip_iapp_template:
+ redirect: f5networks.f5_modules.bigip_iapp_template
+ bigip_ike_peer:
+ redirect: f5networks.f5_modules.bigip_ike_peer
+ bigip_imish_config:
+ redirect: f5networks.f5_modules.bigip_imish_config
+ bigip_ipsec_policy:
+ redirect: f5networks.f5_modules.bigip_ipsec_policy
+ bigip_irule:
+ redirect: f5networks.f5_modules.bigip_irule
+ bigip_log_destination:
+ redirect: f5networks.f5_modules.bigip_log_destination
+ bigip_log_publisher:
+ redirect: f5networks.f5_modules.bigip_log_publisher
+ bigip_lx_package:
+ redirect: f5networks.f5_modules.bigip_lx_package
+ bigip_management_route:
+ redirect: f5networks.f5_modules.bigip_management_route
+ bigip_message_routing_peer:
+ redirect: f5networks.f5_modules.bigip_message_routing_peer
+ bigip_message_routing_protocol:
+ redirect: f5networks.f5_modules.bigip_message_routing_protocol
+ bigip_message_routing_route:
+ redirect: f5networks.f5_modules.bigip_message_routing_route
+ bigip_message_routing_router:
+ redirect: f5networks.f5_modules.bigip_message_routing_router
+ bigip_message_routing_transport_config:
+ redirect: f5networks.f5_modules.bigip_message_routing_transport_config
+ bigip_monitor_dns:
+ redirect: f5networks.f5_modules.bigip_monitor_dns
+ bigip_monitor_external:
+ redirect: f5networks.f5_modules.bigip_monitor_external
+ bigip_monitor_gateway_icmp:
+ redirect: f5networks.f5_modules.bigip_monitor_gateway_icmp
+ bigip_monitor_http:
+ redirect: f5networks.f5_modules.bigip_monitor_http
+ bigip_monitor_https:
+ redirect: f5networks.f5_modules.bigip_monitor_https
+ bigip_monitor_ldap:
+ redirect: f5networks.f5_modules.bigip_monitor_ldap
+ bigip_monitor_snmp_dca:
+ redirect: f5networks.f5_modules.bigip_monitor_snmp_dca
+ bigip_monitor_tcp:
+ redirect: f5networks.f5_modules.bigip_monitor_tcp
+ bigip_monitor_tcp_echo:
+ redirect: f5networks.f5_modules.bigip_monitor_tcp_echo
+ bigip_monitor_tcp_half_open:
+ redirect: f5networks.f5_modules.bigip_monitor_tcp_half_open
+ bigip_monitor_udp:
+ redirect: f5networks.f5_modules.bigip_monitor_udp
+ bigip_node:
+ redirect: f5networks.f5_modules.bigip_node
+ bigip_partition:
+ redirect: f5networks.f5_modules.bigip_partition
+ bigip_password_policy:
+ redirect: f5networks.f5_modules.bigip_password_policy
+ bigip_policy:
+ redirect: f5networks.f5_modules.bigip_policy
+ bigip_policy_rule:
+ redirect: f5networks.f5_modules.bigip_policy_rule
+ bigip_pool:
+ redirect: f5networks.f5_modules.bigip_pool
+ bigip_pool_member:
+ redirect: f5networks.f5_modules.bigip_pool_member
+ bigip_profile_analytics:
+ redirect: f5networks.f5_modules.bigip_profile_analytics
+ bigip_profile_client_ssl:
+ redirect: f5networks.f5_modules.bigip_profile_client_ssl
+ bigip_profile_dns:
+ redirect: f5networks.f5_modules.bigip_profile_dns
+ bigip_profile_fastl4:
+ redirect: f5networks.f5_modules.bigip_profile_fastl4
+ bigip_profile_http:
+ redirect: f5networks.f5_modules.bigip_profile_http
+ bigip_profile_http2:
+ redirect: f5networks.f5_modules.bigip_profile_http2
+ bigip_profile_http_compression:
+ redirect: f5networks.f5_modules.bigip_profile_http_compression
+ bigip_profile_oneconnect:
+ redirect: f5networks.f5_modules.bigip_profile_oneconnect
+ bigip_profile_persistence_cookie:
+ redirect: f5networks.f5_modules.bigip_profile_persistence_cookie
+ bigip_profile_persistence_src_addr:
+ redirect: f5networks.f5_modules.bigip_profile_persistence_src_addr
+ bigip_profile_server_ssl:
+ redirect: f5networks.f5_modules.bigip_profile_server_ssl
+ bigip_profile_tcp:
+ redirect: f5networks.f5_modules.bigip_profile_tcp
+ bigip_profile_udp:
+ redirect: f5networks.f5_modules.bigip_profile_udp
+ bigip_provision:
+ redirect: f5networks.f5_modules.bigip_provision
+ bigip_qkview:
+ redirect: f5networks.f5_modules.bigip_qkview
+ bigip_remote_role:
+ redirect: f5networks.f5_modules.bigip_remote_role
+ bigip_remote_syslog:
+ redirect: f5networks.f5_modules.bigip_remote_syslog
+ bigip_remote_user:
+ redirect: f5networks.f5_modules.bigip_remote_user
+ bigip_routedomain:
+ redirect: f5networks.f5_modules.bigip_routedomain
+ bigip_selfip:
+ redirect: f5networks.f5_modules.bigip_selfip
+ bigip_service_policy:
+ redirect: f5networks.f5_modules.bigip_service_policy
+ bigip_smtp:
+ redirect: f5networks.f5_modules.bigip_smtp
+ bigip_snat_pool:
+ redirect: f5networks.f5_modules.bigip_snat_pool
+ bigip_snat_translation:
+ redirect: f5networks.f5_modules.bigip_snat_translation
+ bigip_snmp:
+ redirect: f5networks.f5_modules.bigip_snmp
+ bigip_snmp_community:
+ redirect: f5networks.f5_modules.bigip_snmp_community
+ bigip_snmp_trap:
+ redirect: f5networks.f5_modules.bigip_snmp_trap
+ bigip_software_image:
+ redirect: f5networks.f5_modules.bigip_software_image
+ bigip_software_install:
+ redirect: f5networks.f5_modules.bigip_software_install
+ bigip_software_update:
+ redirect: f5networks.f5_modules.bigip_software_update
+ bigip_ssl_certificate:
+ redirect: f5networks.f5_modules.bigip_ssl_certificate
+ bigip_ssl_key:
+ redirect: f5networks.f5_modules.bigip_ssl_key
+ bigip_ssl_ocsp:
+ redirect: f5networks.f5_modules.bigip_ssl_ocsp
+ bigip_static_route:
+ redirect: f5networks.f5_modules.bigip_static_route
+ bigip_sys_daemon_log_tmm:
+ redirect: f5networks.f5_modules.bigip_sys_daemon_log_tmm
+ bigip_sys_db:
+ redirect: f5networks.f5_modules.bigip_sys_db
+ bigip_sys_global:
+ redirect: f5networks.f5_modules.bigip_sys_global
+ bigip_timer_policy:
+ redirect: f5networks.f5_modules.bigip_timer_policy
+ bigip_traffic_selector:
+ redirect: f5networks.f5_modules.bigip_traffic_selector
+ bigip_trunk:
+ redirect: f5networks.f5_modules.bigip_trunk
+ bigip_tunnel:
+ redirect: f5networks.f5_modules.bigip_tunnel
+ bigip_ucs:
+ redirect: f5networks.f5_modules.bigip_ucs
+ bigip_ucs_fetch:
+ redirect: f5networks.f5_modules.bigip_ucs_fetch
+ bigip_user:
+ redirect: f5networks.f5_modules.bigip_user
+ bigip_vcmp_guest:
+ redirect: f5networks.f5_modules.bigip_vcmp_guest
+ bigip_virtual_address:
+ redirect: f5networks.f5_modules.bigip_virtual_address
+ bigip_virtual_server:
+ redirect: f5networks.f5_modules.bigip_virtual_server
+ bigip_vlan:
+ redirect: f5networks.f5_modules.bigip_vlan
+ bigip_wait:
+ redirect: f5networks.f5_modules.bigip_wait
+ bigiq_application_fasthttp:
+ redirect: f5networks.f5_modules.bigiq_application_fasthttp
+ bigiq_application_fastl4_tcp:
+ redirect: f5networks.f5_modules.bigiq_application_fastl4_tcp
+ bigiq_application_fastl4_udp:
+ redirect: f5networks.f5_modules.bigiq_application_fastl4_udp
+ bigiq_application_http:
+ redirect: f5networks.f5_modules.bigiq_application_http
+ bigiq_application_https_offload:
+ redirect: f5networks.f5_modules.bigiq_application_https_offload
+ bigiq_application_https_waf:
+ redirect: f5networks.f5_modules.bigiq_application_https_waf
+ bigiq_device_discovery:
+ redirect: f5networks.f5_modules.bigiq_device_discovery
+ bigiq_device_info:
+ redirect: f5networks.f5_modules.bigiq_device_info
+ bigiq_regkey_license:
+ redirect: f5networks.f5_modules.bigiq_regkey_license
+ bigiq_regkey_license_assignment:
+ redirect: f5networks.f5_modules.bigiq_regkey_license_assignment
+ bigiq_regkey_pool:
+ redirect: f5networks.f5_modules.bigiq_regkey_pool
+ bigiq_utility_license:
+ redirect: f5networks.f5_modules.bigiq_utility_license
+ bigiq_utility_license_assignment:
+ redirect: f5networks.f5_modules.bigiq_utility_license_assignment
+ os_auth:
+ redirect: openstack.cloud.auth
+ os_client_config:
+ redirect: openstack.cloud.config
+ os_coe_cluster:
+ redirect: openstack.cloud.coe_cluster
+ os_coe_cluster_template:
+ redirect: openstack.cloud.coe_cluster_template
+ os_flavor_info:
+ redirect: openstack.cloud.compute_flavor_info
+ os_floating_ip:
+ redirect: openstack.cloud.floating_ip
+ os_group:
+ redirect: openstack.cloud.identity_group
+ os_group_info:
+ redirect: openstack.cloud.identity_group_info
+ os_image:
+ redirect: openstack.cloud.image
+ os_image_info:
+ redirect: openstack.cloud.image_info
+ os_ironic:
+ redirect: openstack.cloud.baremetal_node
+ os_ironic_inspect:
+ redirect: openstack.cloud.baremetal_inspect
+ os_ironic_node:
+ redirect: openstack.cloud.baremetal_node_action
+ os_keypair:
+ redirect: openstack.cloud.keypair
+ os_keystone_domain:
+ redirect: openstack.cloud.identity_domain
+ os_keystone_domain_info:
+ redirect: openstack.cloud.identity_domain_info
+ os_keystone_endpoint:
+ redirect: openstack.cloud.endpoint
+ os_keystone_role:
+ redirect: openstack.cloud.identity_role
+ os_keystone_service:
+ redirect: openstack.cloud.catalog_service
+ os_listener:
+ redirect: openstack.cloud.lb_listener
+ os_loadbalancer:
+ redirect: openstack.cloud.loadbalancer
+ os_member:
+ redirect: openstack.cloud.lb_member
+ os_network:
+ redirect: openstack.cloud.network
+ os_networks_info:
+ redirect: openstack.cloud.networks_info
+ os_nova_flavor:
+ redirect: openstack.cloud.compute_flavor
+ os_nova_host_aggregate:
+ redirect: openstack.cloud.host_aggregate
+ os_object:
+ redirect: openstack.cloud.object
+ os_pool:
+ redirect: openstack.cloud.lb_pool
+ os_port:
+ redirect: openstack.cloud.port
+ os_port_info:
+ redirect: openstack.cloud.port_info
+ os_project:
+ redirect: openstack.cloud.project
+ os_project_access:
+ redirect: openstack.cloud.project_access
+ os_project_info:
+ redirect: openstack.cloud.project_info
+ os_quota:
+ redirect: openstack.cloud.quota
+ os_recordset:
+ redirect: openstack.cloud.recordset
+ os_router:
+ redirect: openstack.cloud.router
+ os_security_group:
+ redirect: openstack.cloud.security_group
+ os_security_group_rule:
+ redirect: openstack.cloud.security_group_rule
+ os_server:
+ redirect: openstack.cloud.server
+ os_server_action:
+ redirect: openstack.cloud.server_action
+ os_server_group:
+ redirect: openstack.cloud.server_group
+ os_server_info:
+ redirect: openstack.cloud.server_info
+ os_server_metadata:
+ redirect: openstack.cloud.server_metadata
+ os_server_volume:
+ redirect: openstack.cloud.server_volume
+ os_stack:
+ redirect: openstack.cloud.stack
+ os_subnet:
+ redirect: openstack.cloud.subnet
+ os_subnets_info:
+ redirect: openstack.cloud.subnets_info
+ os_user:
+ redirect: openstack.cloud.identity_user
+ os_user_group:
+ redirect: openstack.cloud.group_assignment
+ os_user_info:
+ redirect: openstack.cloud.identity_user_info
+ os_user_role:
+ redirect: openstack.cloud.role_assignment
+ os_volume:
+ redirect: openstack.cloud.volume
+ os_volume_snapshot:
+ redirect: openstack.cloud.volume_snapshot
+ os_zone:
+ redirect: openstack.cloud.dns_zone
+ junos_acls:
+ redirect: junipernetworks.junos.junos_acls
+ junos_acl_interfaces:
+ redirect: junipernetworks.junos.junos_acl_interfaces
+ junos_ospfv2:
+ redirect: junipernetworks.junos.junos_ospfv2
+ junos_user:
+ redirect: junipernetworks.junos.junos_user
+ junos_l2_interface:
+ redirect: junipernetworks.junos.junos_l2_interface
+ junos_lldp:
+ redirect: junipernetworks.junos.junos_lldp
+ junos_rpc:
+ redirect: junipernetworks.junos.junos_rpc
+ junos_l2_interfaces:
+ redirect: junipernetworks.junos.junos_l2_interfaces
+ junos_lldp_interface:
+ redirect: junipernetworks.junos.junos_lldp_interface
+ junos_static_route:
+ redirect: junipernetworks.junos.junos_static_route
+ junos_lacp:
+ redirect: junipernetworks.junos.junos_lacp
+ junos_lacp_interfaces:
+ redirect: junipernetworks.junos.junos_lacp_interfaces
+ junos_vlans:
+ redirect: junipernetworks.junos.junos_vlans
+ junos_linkagg:
+ redirect: junipernetworks.junos.junos_linkagg
+ junos_scp:
+ redirect: junipernetworks.junos.junos_scp
+ junos_banner:
+ redirect: junipernetworks.junos.junos_banner
+ junos_l3_interface:
+ redirect: junipernetworks.junos.junos_l3_interface
+ junos_logging:
+ redirect: junipernetworks.junos.junos_logging
+ junos_package:
+ redirect: junipernetworks.junos.junos_package
+ junos_netconf:
+ redirect: junipernetworks.junos.junos_netconf
+ junos_facts:
+ redirect: junipernetworks.junos.junos_facts
+ junos_ping:
+ redirect: junipernetworks.junos.junos_ping
+ junos_interface:
+ redirect: junipernetworks.junos.junos_interface
+ junos_lldp_global:
+ redirect: junipernetworks.junos.junos_lldp_global
+ junos_config:
+ redirect: junipernetworks.junos.junos_config
+ junos_static_routes:
+ redirect: junipernetworks.junos.junos_static_routes
+ junos_command:
+ redirect: junipernetworks.junos.junos_command
+ junos_lag_interfaces:
+ redirect: junipernetworks.junos.junos_lag_interfaces
+ junos_l3_interfaces:
+ redirect: junipernetworks.junos.junos_l3_interfaces
+ junos_lldp_interfaces:
+ redirect: junipernetworks.junos.junos_lldp_interfaces
+ junos_vlan:
+ redirect: junipernetworks.junos.junos_vlan
+ junos_system:
+ redirect: junipernetworks.junos.junos_system
+ junos_interfaces:
+ redirect: junipernetworks.junos.junos_interfaces
+ junos_vrf:
+ redirect: junipernetworks.junos.junos_vrf
+ tower_credential:
+ redirect: awx.awx.tower_credential
+ tower_credential_type:
+ redirect: awx.awx.tower_credential_type
+ tower_group:
+ redirect: awx.awx.tower_group
+ tower_host:
+ redirect: awx.awx.tower_host
+ tower_inventory:
+ redirect: awx.awx.tower_inventory
+ tower_inventory_source:
+ redirect: awx.awx.tower_inventory_source
+ tower_job_cancel:
+ redirect: awx.awx.tower_job_cancel
+ tower_job_launch:
+ redirect: awx.awx.tower_job_launch
+ tower_job_list:
+ redirect: awx.awx.tower_job_list
+ tower_job_template:
+ redirect: awx.awx.tower_job_template
+ tower_job_wait:
+ redirect: awx.awx.tower_job_wait
+ tower_label:
+ redirect: awx.awx.tower_label
+ tower_notification:
+ redirect: awx.awx.tower_notification
+ tower_organization:
+ redirect: awx.awx.tower_organization
+ tower_project:
+ redirect: awx.awx.tower_project
+ tower_receive:
+ redirect: awx.awx.tower_receive
+ tower_role:
+ redirect: awx.awx.tower_role
+ tower_send:
+ redirect: awx.awx.tower_send
+ tower_settings:
+ redirect: awx.awx.tower_settings
+ tower_team:
+ redirect: awx.awx.tower_team
+ tower_user:
+ redirect: awx.awx.tower_user
+ tower_workflow_launch:
+ redirect: awx.awx.tower_workflow_launch
+ tower_workflow_template:
+ redirect: awx.awx.tower_workflow_template
+ ovirt_affinity_group:
+ redirect: ovirt.ovirt.ovirt_affinity_group
+ ovirt_affinity_label:
+ redirect: ovirt.ovirt.ovirt_affinity_label
+ ovirt_affinity_label_info:
+ redirect: ovirt.ovirt.ovirt_affinity_label_info
+ ovirt_api_info:
+ redirect: ovirt.ovirt.ovirt_api_info
+ ovirt_auth:
+ redirect: ovirt.ovirt.ovirt_auth
+ ovirt_cluster:
+ redirect: ovirt.ovirt.ovirt_cluster
+ ovirt_cluster_info:
+ redirect: ovirt.ovirt.ovirt_cluster_info
+ ovirt_datacenter:
+ redirect: ovirt.ovirt.ovirt_datacenter
+ ovirt_datacenter_info:
+ redirect: ovirt.ovirt.ovirt_datacenter_info
+ ovirt_disk:
+ redirect: ovirt.ovirt.ovirt_disk
+ ovirt_disk_info:
+ redirect: ovirt.ovirt.ovirt_disk_info
+ ovirt_event:
+ redirect: ovirt.ovirt.ovirt_event
+ ovirt_event_info:
+ redirect: ovirt.ovirt.ovirt_event_info
+ ovirt_external_provider:
+ redirect: ovirt.ovirt.ovirt_external_provider
+ ovirt_external_provider_info:
+ redirect: ovirt.ovirt.ovirt_external_provider_info
+ ovirt_group:
+ redirect: ovirt.ovirt.ovirt_group
+ ovirt_group_info:
+ redirect: ovirt.ovirt.ovirt_group_info
+ ovirt_host:
+ redirect: ovirt.ovirt.ovirt_host
+ ovirt_host_info:
+ redirect: ovirt.ovirt.ovirt_host_info
+ ovirt_host_network:
+ redirect: ovirt.ovirt.ovirt_host_network
+ ovirt_host_pm:
+ redirect: ovirt.ovirt.ovirt_host_pm
+ ovirt_host_storage_info:
+ redirect: ovirt.ovirt.ovirt_host_storage_info
+ ovirt_instance_type:
+ redirect: ovirt.ovirt.ovirt_instance_type
+ ovirt_job:
+ redirect: ovirt.ovirt.ovirt_job
+ ovirt_mac_pool:
+ redirect: ovirt.ovirt.ovirt_mac_pool
+ ovirt_network:
+ redirect: ovirt.ovirt.ovirt_network
+ ovirt_network_info:
+ redirect: ovirt.ovirt.ovirt_network_info
+ ovirt_nic:
+ redirect: ovirt.ovirt.ovirt_nic
+ ovirt_nic_info:
+ redirect: ovirt.ovirt.ovirt_nic_info
+ ovirt_permission:
+ redirect: ovirt.ovirt.ovirt_permission
+ ovirt_permission_info:
+ redirect: ovirt.ovirt.ovirt_permission_info
+ ovirt_quota:
+ redirect: ovirt.ovirt.ovirt_quota
+ ovirt_quota_info:
+ redirect: ovirt.ovirt.ovirt_quota_info
+ ovirt_role:
+ redirect: ovirt.ovirt.ovirt_role
+ ovirt_scheduling_policy_info:
+ redirect: ovirt.ovirt.ovirt_scheduling_policy_info
+ ovirt_snapshot:
+ redirect: ovirt.ovirt.ovirt_snapshot
+ ovirt_snapshot_info:
+ redirect: ovirt.ovirt.ovirt_snapshot_info
+ ovirt_storage_connection:
+ redirect: ovirt.ovirt.ovirt_storage_connection
+ ovirt_storage_domain:
+ redirect: ovirt.ovirt.ovirt_storage_domain
+ ovirt_storage_domain_info:
+ redirect: ovirt.ovirt.ovirt_storage_domain_info
+ ovirt_storage_template_info:
+ redirect: ovirt.ovirt.ovirt_storage_template_info
+ ovirt_storage_vm_info:
+ redirect: ovirt.ovirt.ovirt_storage_vm_info
+ ovirt_tag:
+ redirect: ovirt.ovirt.ovirt_tag
+ ovirt_tag_info:
+ redirect: ovirt.ovirt.ovirt_tag_info
+ ovirt_template:
+ redirect: ovirt.ovirt.ovirt_template
+ ovirt_template_info:
+ redirect: ovirt.ovirt.ovirt_template_info
+ ovirt_user:
+ redirect: ovirt.ovirt.ovirt_user
+ ovirt_user_info:
+ redirect: ovirt.ovirt.ovirt_user_info
+ ovirt_vm:
+ redirect: ovirt.ovirt.ovirt_vm
+ ovirt_vm_info:
+ redirect: ovirt.ovirt.ovirt_vm_info
+ ovirt_vmpool:
+ redirect: ovirt.ovirt.ovirt_vmpool
+ ovirt_vmpool_info:
+ redirect: ovirt.ovirt.ovirt_vmpool_info
+ ovirt_vnic_profile:
+ redirect: ovirt.ovirt.ovirt_vnic_profile
+ ovirt_vnic_profile_info:
+ redirect: ovirt.ovirt.ovirt_vnic_profile_info
+ dellos10_command:
+ redirect: dellemc.os10.os10_command
+ dellos10_facts:
+ redirect: dellemc.os10.os10_facts
+ dellos10_config:
+ redirect: dellemc.os10.os10_config
+ dellos9_facts:
+ redirect: dellemc.os9.os9_facts
+ dellos9_command:
+ redirect: dellemc.os9.os9_command
+ dellos9_config:
+ redirect: dellemc.os9.os9_config
+ dellos6_facts:
+ redirect: dellemc.os6.os6_facts
+ dellos6_config:
+ redirect: dellemc.os6.os6_config
+ dellos6_command:
+ redirect: dellemc.os6.os6_command
+ hcloud_location_facts:
+ redirect: hetzner.hcloud.hcloud_location_facts
+ hcloud_server_info:
+ redirect: hetzner.hcloud.hcloud_server_info
+ hcloud_server_network:
+ redirect: hetzner.hcloud.hcloud_server_network
+ hcloud_server_type_info:
+ redirect: hetzner.hcloud.hcloud_server_type_info
+ hcloud_route:
+ redirect: hetzner.hcloud.hcloud_route
+ hcloud_server:
+ redirect: hetzner.hcloud.hcloud_server
+ hcloud_volume_info:
+ redirect: hetzner.hcloud.hcloud_volume_info
+ hcloud_server_type_facts:
+ redirect: hetzner.hcloud.hcloud_server_type_facts
+ hcloud_ssh_key_info:
+ redirect: hetzner.hcloud.hcloud_ssh_key_info
+ hcloud_network_info:
+ redirect: hetzner.hcloud.hcloud_network_info
+ hcloud_datacenter_info:
+ redirect: hetzner.hcloud.hcloud_datacenter_info
+ hcloud_image_facts:
+ redirect: hetzner.hcloud.hcloud_image_facts
+ hcloud_volume_facts:
+ redirect: hetzner.hcloud.hcloud_volume_facts
+ hcloud_floating_ip_info:
+ redirect: hetzner.hcloud.hcloud_floating_ip_info
+ hcloud_floating_ip_facts:
+ redirect: hetzner.hcloud.hcloud_floating_ip_facts
+ hcloud_image_info:
+ redirect: hetzner.hcloud.hcloud_image_info
+ hcloud_ssh_key_facts:
+ redirect: hetzner.hcloud.hcloud_ssh_key_facts
+ hcloud_location_info:
+ redirect: hetzner.hcloud.hcloud_location_info
+ hcloud_network:
+ redirect: hetzner.hcloud.hcloud_network
+ hcloud_volume:
+ redirect: hetzner.hcloud.hcloud_volume
+ hcloud_ssh_key:
+ redirect: hetzner.hcloud.hcloud_ssh_key
+ hcloud_datacenter_facts:
+ redirect: hetzner.hcloud.hcloud_datacenter_facts
+ hcloud_rdns:
+ redirect: hetzner.hcloud.hcloud_rdns
+ hcloud_floating_ip:
+ redirect: hetzner.hcloud.hcloud_floating_ip
+ hcloud_server_facts:
+ redirect: hetzner.hcloud.hcloud_server_facts
+ hcloud_subnetwork:
+ redirect: hetzner.hcloud.hcloud_subnetwork
+ skydive_capture:
+ redirect: community.skydive.skydive_capture
+ skydive_edge:
+ redirect: community.skydive.skydive_edge
+ skydive_node:
+ redirect: community.skydive.skydive_node
+ cyberark_authentication:
+ redirect: cyberark.pas.cyberark_authentication
+ cyberark_user:
+ redirect: cyberark.pas.cyberark_user
+ gcp_appengine_firewall_rule:
+ redirect: google.cloud.gcp_appengine_firewall_rule
+ gcp_appengine_firewall_rule_info:
+ redirect: google.cloud.gcp_appengine_firewall_rule_info
+ gcp_bigquery_dataset:
+ redirect: google.cloud.gcp_bigquery_dataset
+ gcp_bigquery_dataset_info:
+ redirect: google.cloud.gcp_bigquery_dataset_info
+ gcp_bigquery_table:
+ redirect: google.cloud.gcp_bigquery_table
+ gcp_bigquery_table_info:
+ redirect: google.cloud.gcp_bigquery_table_info
+ gcp_cloudbuild_trigger:
+ redirect: google.cloud.gcp_cloudbuild_trigger
+ gcp_cloudbuild_trigger_info:
+ redirect: google.cloud.gcp_cloudbuild_trigger_info
+ gcp_cloudfunctions_cloud_function:
+ redirect: google.cloud.gcp_cloudfunctions_cloud_function
+ gcp_cloudfunctions_cloud_function_info:
+ redirect: google.cloud.gcp_cloudfunctions_cloud_function_info
+ gcp_cloudscheduler_job:
+ redirect: google.cloud.gcp_cloudscheduler_job
+ gcp_cloudscheduler_job_info:
+ redirect: google.cloud.gcp_cloudscheduler_job_info
+ gcp_cloudtasks_queue:
+ redirect: google.cloud.gcp_cloudtasks_queue
+ gcp_cloudtasks_queue_info:
+ redirect: google.cloud.gcp_cloudtasks_queue_info
+ gcp_compute_address:
+ redirect: google.cloud.gcp_compute_address
+ gcp_compute_address_info:
+ redirect: google.cloud.gcp_compute_address_info
+ gcp_compute_autoscaler:
+ redirect: google.cloud.gcp_compute_autoscaler
+ gcp_compute_autoscaler_info:
+ redirect: google.cloud.gcp_compute_autoscaler_info
+ gcp_compute_backend_bucket:
+ redirect: google.cloud.gcp_compute_backend_bucket
+ gcp_compute_backend_bucket_info:
+ redirect: google.cloud.gcp_compute_backend_bucket_info
+ gcp_compute_backend_service:
+ redirect: google.cloud.gcp_compute_backend_service
+ gcp_compute_backend_service_info:
+ redirect: google.cloud.gcp_compute_backend_service_info
+ gcp_compute_disk:
+ redirect: google.cloud.gcp_compute_disk
+ gcp_compute_disk_info:
+ redirect: google.cloud.gcp_compute_disk_info
+ gcp_compute_firewall:
+ redirect: google.cloud.gcp_compute_firewall
+ gcp_compute_firewall_info:
+ redirect: google.cloud.gcp_compute_firewall_info
+ gcp_compute_forwarding_rule:
+ redirect: google.cloud.gcp_compute_forwarding_rule
+ gcp_compute_forwarding_rule_info:
+ redirect: google.cloud.gcp_compute_forwarding_rule_info
+ gcp_compute_global_address:
+ redirect: google.cloud.gcp_compute_global_address
+ gcp_compute_global_address_info:
+ redirect: google.cloud.gcp_compute_global_address_info
+ gcp_compute_global_forwarding_rule:
+ redirect: google.cloud.gcp_compute_global_forwarding_rule
+ gcp_compute_global_forwarding_rule_info:
+ redirect: google.cloud.gcp_compute_global_forwarding_rule_info
+ gcp_compute_health_check:
+ redirect: google.cloud.gcp_compute_health_check
+ gcp_compute_health_check_info:
+ redirect: google.cloud.gcp_compute_health_check_info
+ gcp_compute_http_health_check:
+ redirect: google.cloud.gcp_compute_http_health_check
+ gcp_compute_http_health_check_info:
+ redirect: google.cloud.gcp_compute_http_health_check_info
+ gcp_compute_https_health_check:
+ redirect: google.cloud.gcp_compute_https_health_check
+ gcp_compute_https_health_check_info:
+ redirect: google.cloud.gcp_compute_https_health_check_info
+ gcp_compute_image:
+ redirect: google.cloud.gcp_compute_image
+ gcp_compute_image_info:
+ redirect: google.cloud.gcp_compute_image_info
+ gcp_compute_instance:
+ redirect: google.cloud.gcp_compute_instance
+ gcp_compute_instance_group:
+ redirect: google.cloud.gcp_compute_instance_group
+ gcp_compute_instance_group_info:
+ redirect: google.cloud.gcp_compute_instance_group_info
+ gcp_compute_instance_group_manager:
+ redirect: google.cloud.gcp_compute_instance_group_manager
+ gcp_compute_instance_group_manager_info:
+ redirect: google.cloud.gcp_compute_instance_group_manager_info
+ gcp_compute_instance_info:
+ redirect: google.cloud.gcp_compute_instance_info
+ gcp_compute_instance_template:
+ redirect: google.cloud.gcp_compute_instance_template
+ gcp_compute_instance_template_info:
+ redirect: google.cloud.gcp_compute_instance_template_info
+ gcp_compute_interconnect_attachment:
+ redirect: google.cloud.gcp_compute_interconnect_attachment
+ gcp_compute_interconnect_attachment_info:
+ redirect: google.cloud.gcp_compute_interconnect_attachment_info
+ gcp_compute_network:
+ redirect: google.cloud.gcp_compute_network
+ gcp_compute_network_endpoint_group:
+ redirect: google.cloud.gcp_compute_network_endpoint_group
+ gcp_compute_network_endpoint_group_info:
+ redirect: google.cloud.gcp_compute_network_endpoint_group_info
+ gcp_compute_network_info:
+ redirect: google.cloud.gcp_compute_network_info
+ gcp_compute_node_group:
+ redirect: google.cloud.gcp_compute_node_group
+ gcp_compute_node_group_info:
+ redirect: google.cloud.gcp_compute_node_group_info
+ gcp_compute_node_template:
+ redirect: google.cloud.gcp_compute_node_template
+ gcp_compute_node_template_info:
+ redirect: google.cloud.gcp_compute_node_template_info
+ gcp_compute_region_backend_service:
+ redirect: google.cloud.gcp_compute_region_backend_service
+ gcp_compute_region_backend_service_info:
+ redirect: google.cloud.gcp_compute_region_backend_service_info
+ gcp_compute_region_disk:
+ redirect: google.cloud.gcp_compute_region_disk
+ gcp_compute_region_disk_info:
+ redirect: google.cloud.gcp_compute_region_disk_info
+ gcp_compute_reservation:
+ redirect: google.cloud.gcp_compute_reservation
+ gcp_compute_reservation_info:
+ redirect: google.cloud.gcp_compute_reservation_info
+ gcp_compute_route:
+ redirect: google.cloud.gcp_compute_route
+ gcp_compute_route_info:
+ redirect: google.cloud.gcp_compute_route_info
+ gcp_compute_router:
+ redirect: google.cloud.gcp_compute_router
+ gcp_compute_router_info:
+ redirect: google.cloud.gcp_compute_router_info
+ gcp_compute_snapshot:
+ redirect: google.cloud.gcp_compute_snapshot
+ gcp_compute_snapshot_info:
+ redirect: google.cloud.gcp_compute_snapshot_info
+ gcp_compute_ssl_certificate:
+ redirect: google.cloud.gcp_compute_ssl_certificate
+ gcp_compute_ssl_certificate_info:
+ redirect: google.cloud.gcp_compute_ssl_certificate_info
+ gcp_compute_ssl_policy:
+ redirect: google.cloud.gcp_compute_ssl_policy
+ gcp_compute_ssl_policy_info:
+ redirect: google.cloud.gcp_compute_ssl_policy_info
+ gcp_compute_subnetwork:
+ redirect: google.cloud.gcp_compute_subnetwork
+ gcp_compute_subnetwork_info:
+ redirect: google.cloud.gcp_compute_subnetwork_info
+ gcp_compute_target_http_proxy:
+ redirect: google.cloud.gcp_compute_target_http_proxy
+ gcp_compute_target_http_proxy_info:
+ redirect: google.cloud.gcp_compute_target_http_proxy_info
+ gcp_compute_target_https_proxy:
+ redirect: google.cloud.gcp_compute_target_https_proxy
+ gcp_compute_target_https_proxy_info:
+ redirect: google.cloud.gcp_compute_target_https_proxy_info
+ gcp_compute_target_instance:
+ redirect: google.cloud.gcp_compute_target_instance
+ gcp_compute_target_instance_info:
+ redirect: google.cloud.gcp_compute_target_instance_info
+ gcp_compute_target_pool:
+ redirect: google.cloud.gcp_compute_target_pool
+ gcp_compute_target_pool_info:
+ redirect: google.cloud.gcp_compute_target_pool_info
+ gcp_compute_target_ssl_proxy:
+ redirect: google.cloud.gcp_compute_target_ssl_proxy
+ gcp_compute_target_ssl_proxy_info:
+ redirect: google.cloud.gcp_compute_target_ssl_proxy_info
+ gcp_compute_target_tcp_proxy:
+ redirect: google.cloud.gcp_compute_target_tcp_proxy
+ gcp_compute_target_tcp_proxy_info:
+ redirect: google.cloud.gcp_compute_target_tcp_proxy_info
+ gcp_compute_target_vpn_gateway:
+ redirect: google.cloud.gcp_compute_target_vpn_gateway
+ gcp_compute_target_vpn_gateway_info:
+ redirect: google.cloud.gcp_compute_target_vpn_gateway_info
+ gcp_compute_url_map:
+ redirect: google.cloud.gcp_compute_url_map
+ gcp_compute_url_map_info:
+ redirect: google.cloud.gcp_compute_url_map_info
+ gcp_compute_vpn_tunnel:
+ redirect: google.cloud.gcp_compute_vpn_tunnel
+ gcp_compute_vpn_tunnel_info:
+ redirect: google.cloud.gcp_compute_vpn_tunnel_info
+ gcp_container_cluster:
+ redirect: google.cloud.gcp_container_cluster
+ gcp_container_cluster_info:
+ redirect: google.cloud.gcp_container_cluster_info
+ gcp_container_node_pool:
+ redirect: google.cloud.gcp_container_node_pool
+ gcp_container_node_pool_info:
+ redirect: google.cloud.gcp_container_node_pool_info
+ gcp_dns_managed_zone:
+ redirect: google.cloud.gcp_dns_managed_zone
+ gcp_dns_managed_zone_info:
+ redirect: google.cloud.gcp_dns_managed_zone_info
+ gcp_dns_resource_record_set:
+ redirect: google.cloud.gcp_dns_resource_record_set
+ gcp_dns_resource_record_set_info:
+ redirect: google.cloud.gcp_dns_resource_record_set_info
+ gcp_filestore_instance:
+ redirect: google.cloud.gcp_filestore_instance
+ gcp_filestore_instance_info:
+ redirect: google.cloud.gcp_filestore_instance_info
+ gcp_iam_role:
+ redirect: google.cloud.gcp_iam_role
+ gcp_iam_role_info:
+ redirect: google.cloud.gcp_iam_role_info
+ gcp_iam_service_account:
+ redirect: google.cloud.gcp_iam_service_account
+ gcp_iam_service_account_info:
+ redirect: google.cloud.gcp_iam_service_account_info
+ gcp_iam_service_account_key:
+ redirect: google.cloud.gcp_iam_service_account_key
+ gcp_kms_crypto_key:
+ redirect: google.cloud.gcp_kms_crypto_key
+ gcp_kms_crypto_key_info:
+ redirect: google.cloud.gcp_kms_crypto_key_info
+ gcp_kms_key_ring:
+ redirect: google.cloud.gcp_kms_key_ring
+ gcp_kms_key_ring_info:
+ redirect: google.cloud.gcp_kms_key_ring_info
+ gcp_logging_metric:
+ redirect: google.cloud.gcp_logging_metric
+ gcp_logging_metric_info:
+ redirect: google.cloud.gcp_logging_metric_info
+ gcp_mlengine_model:
+ redirect: google.cloud.gcp_mlengine_model
+ gcp_mlengine_model_info:
+ redirect: google.cloud.gcp_mlengine_model_info
+ gcp_mlengine_version:
+ redirect: google.cloud.gcp_mlengine_version
+ gcp_mlengine_version_info:
+ redirect: google.cloud.gcp_mlengine_version_info
+ gcp_pubsub_subscription:
+ redirect: google.cloud.gcp_pubsub_subscription
+ gcp_pubsub_subscription_info:
+ redirect: google.cloud.gcp_pubsub_subscription_info
+ gcp_pubsub_topic:
+ redirect: google.cloud.gcp_pubsub_topic
+ gcp_pubsub_topic_info:
+ redirect: google.cloud.gcp_pubsub_topic_info
+ gcp_redis_instance:
+ redirect: google.cloud.gcp_redis_instance
+ gcp_redis_instance_info:
+ redirect: google.cloud.gcp_redis_instance_info
+ gcp_resourcemanager_project:
+ redirect: google.cloud.gcp_resourcemanager_project
+ gcp_resourcemanager_project_info:
+ redirect: google.cloud.gcp_resourcemanager_project_info
+ gcp_runtimeconfig_config:
+ redirect: google.cloud.gcp_runtimeconfig_config
+ gcp_runtimeconfig_config_info:
+ redirect: google.cloud.gcp_runtimeconfig_config_info
+ gcp_runtimeconfig_variable:
+ redirect: google.cloud.gcp_runtimeconfig_variable
+ gcp_runtimeconfig_variable_info:
+ redirect: google.cloud.gcp_runtimeconfig_variable_info
+ gcp_serviceusage_service:
+ redirect: google.cloud.gcp_serviceusage_service
+ gcp_serviceusage_service_info:
+ redirect: google.cloud.gcp_serviceusage_service_info
+ gcp_sourcerepo_repository:
+ redirect: google.cloud.gcp_sourcerepo_repository
+ gcp_sourcerepo_repository_info:
+ redirect: google.cloud.gcp_sourcerepo_repository_info
+ gcp_spanner_database:
+ redirect: google.cloud.gcp_spanner_database
+ gcp_spanner_database_info:
+ redirect: google.cloud.gcp_spanner_database_info
+ gcp_spanner_instance:
+ redirect: google.cloud.gcp_spanner_instance
+ gcp_spanner_instance_info:
+ redirect: google.cloud.gcp_spanner_instance_info
+ gcp_sql_database:
+ redirect: google.cloud.gcp_sql_database
+ gcp_sql_database_info:
+ redirect: google.cloud.gcp_sql_database_info
+ gcp_sql_instance:
+ redirect: google.cloud.gcp_sql_instance
+ gcp_sql_instance_info:
+ redirect: google.cloud.gcp_sql_instance_info
+ gcp_sql_user:
+ redirect: google.cloud.gcp_sql_user
+ gcp_sql_user_info:
+ redirect: google.cloud.gcp_sql_user_info
+ gcp_storage_bucket:
+ redirect: google.cloud.gcp_storage_bucket
+ gcp_storage_bucket_access_control:
+ redirect: google.cloud.gcp_storage_bucket_access_control
+ gcp_storage_object:
+ redirect: google.cloud.gcp_storage_object
+ gcp_tpu_node:
+ redirect: google.cloud.gcp_tpu_node
+ gcp_tpu_node_info:
+ redirect: google.cloud.gcp_tpu_node_info
+ purefa_alert:
+ redirect: purestorage.flasharray.purefa_alert
+ purefa_arrayname:
+ redirect: purestorage.flasharray.purefa_arrayname
+ purefa_banner:
+ redirect: purestorage.flasharray.purefa_banner
+ purefa_connect:
+ redirect: purestorage.flasharray.purefa_connect
+ purefa_dns:
+ redirect: purestorage.flasharray.purefa_dns
+ purefa_ds:
+ redirect: purestorage.flasharray.purefa_ds
+ purefa_dsrole:
+ redirect: purestorage.flasharray.purefa_dsrole
+ purefa_hg:
+ redirect: purestorage.flasharray.purefa_hg
+ purefa_host:
+ redirect: purestorage.flasharray.purefa_host
+ purefa_info:
+ redirect: purestorage.flasharray.purefa_info
+ purefa_ntp:
+ redirect: purestorage.flasharray.purefa_ntp
+ purefa_offload:
+ redirect: purestorage.flasharray.purefa_offload
+ purefa_pg:
+ redirect: purestorage.flasharray.purefa_pg
+ purefa_pgsnap:
+ redirect: purestorage.flasharray.purefa_pgsnap
+ purefa_phonehome:
+ redirect: purestorage.flasharray.purefa_phonehome
+ purefa_ra:
+ redirect: purestorage.flasharray.purefa_ra
+ purefa_smtp:
+ redirect: purestorage.flasharray.purefa_smtp
+ purefa_snap:
+ redirect: purestorage.flasharray.purefa_snap
+ purefa_snmp:
+ redirect: purestorage.flasharray.purefa_snmp
+ purefa_syslog:
+ redirect: purestorage.flasharray.purefa_syslog
+ purefa_user:
+ redirect: purestorage.flasharray.purefa_user
+ purefa_vg:
+ redirect: purestorage.flasharray.purefa_vg
+ purefa_volume:
+ redirect: purestorage.flasharray.purefa_volume
+ purefb_bucket:
+ redirect: purestorage.flashblade.purefb_bucket
+ purefb_ds:
+ redirect: purestorage.flashblade.purefb_ds
+ purefb_dsrole:
+ redirect: purestorage.flashblade.purefb_dsrole
+ purefb_fs:
+ redirect: purestorage.flashblade.purefb_fs
+ purefb_info:
+ redirect: purestorage.flashblade.purefb_info
+ purefb_network:
+ redirect: purestorage.flashblade.purefb_network
+ purefb_ra:
+ redirect: purestorage.flashblade.purefb_ra
+ purefb_s3acc:
+ redirect: purestorage.flashblade.purefb_s3acc
+ purefb_s3user:
+ redirect: purestorage.flashblade.purefb_s3user
+ purefb_smtp:
+ redirect: purestorage.flashblade.purefb_smtp
+ purefb_snap:
+ redirect: purestorage.flashblade.purefb_snap
+ purefb_subnet:
+ redirect: purestorage.flashblade.purefb_subnet
+ azure_rm_acs:
+ redirect: azure.azcollection.azure_rm_acs
+ azure_rm_virtualmachine_info:
+ redirect: azure.azcollection.azure_rm_virtualmachine_info
+ azure_rm_dnsrecordset_info:
+ redirect: azure.azcollection.azure_rm_dnsrecordset_info
+ azure_rm_dnszone_info:
+ redirect: azure.azcollection.azure_rm_dnszone_info
+ azure_rm_networkinterface_info:
+ redirect: azure.azcollection.azure_rm_networkinterface_info
+ azure_rm_publicipaddress_info:
+ redirect: azure.azcollection.azure_rm_publicipaddress_info
+ azure_rm_securitygroup_info:
+ redirect: azure.azcollection.azure_rm_securitygroup_info
+ azure_rm_storageaccount_info:
+ redirect: azure.azcollection.azure_rm_storageaccount_info
+ azure_rm_virtualnetwork_info:
+ redirect: azure.azcollection.azure_rm_virtualnetwork_info
+ azure_rm_deployment:
+ redirect: azure.azcollection.azure_rm_deployment
+ azure_rm_dnsrecordset:
+ redirect: azure.azcollection.azure_rm_dnsrecordset
+ azure_rm_dnszone:
+ redirect: azure.azcollection.azure_rm_dnszone
+ azure_rm_networkinterface:
+ redirect: azure.azcollection.azure_rm_networkinterface
+ azure_rm_publicipaddress:
+ redirect: azure.azcollection.azure_rm_publicipaddress
+ azure_rm_securitygroup:
+ redirect: azure.azcollection.azure_rm_securitygroup
+ azure_rm_storageaccount:
+ redirect: azure.azcollection.azure_rm_storageaccount
+ azure_rm_subnet:
+ redirect: azure.azcollection.azure_rm_subnet
+ azure_rm_virtualmachine:
+ redirect: azure.azcollection.azure_rm_virtualmachine
+ azure_rm_virtualnetwork:
+ redirect: azure.azcollection.azure_rm_virtualnetwork
+ azure_rm_aks:
+ redirect: azure.azcollection.azure_rm_aks
+ azure_rm_aks_info:
+ redirect: azure.azcollection.azure_rm_aks_info
+ azure_rm_aksversion_info:
+ redirect: azure.azcollection.azure_rm_aksversion_info
+ azure_rm_appgateway:
+ redirect: azure.azcollection.azure_rm_appgateway
+ azure_rm_applicationsecuritygroup:
+ redirect: azure.azcollection.azure_rm_applicationsecuritygroup
+ azure_rm_applicationsecuritygroup_info:
+ redirect: azure.azcollection.azure_rm_applicationsecuritygroup_info
+ azure_rm_appserviceplan:
+ redirect: azure.azcollection.azure_rm_appserviceplan
+ azure_rm_appserviceplan_info:
+ redirect: azure.azcollection.azure_rm_appserviceplan_info
+ azure_rm_availabilityset:
+ redirect: azure.azcollection.azure_rm_availabilityset
+ azure_rm_availabilityset_info:
+ redirect: azure.azcollection.azure_rm_availabilityset_info
+ azure_rm_containerinstance:
+ redirect: azure.azcollection.azure_rm_containerinstance
+ azure_rm_containerinstance_info:
+ redirect: azure.azcollection.azure_rm_containerinstance_info
+ azure_rm_containerregistry:
+ redirect: azure.azcollection.azure_rm_containerregistry
+ azure_rm_containerregistry_info:
+ redirect: azure.azcollection.azure_rm_containerregistry_info
+ azure_rm_deployment_info:
+ redirect: azure.azcollection.azure_rm_deployment_info
+ azure_rm_functionapp:
+ redirect: azure.azcollection.azure_rm_functionapp
+ azure_rm_functionapp_info:
+ redirect: azure.azcollection.azure_rm_functionapp_info
+ azure_rm_gallery:
+ redirect: azure.azcollection.azure_rm_gallery
+ azure_rm_gallery_info:
+ redirect: azure.azcollection.azure_rm_gallery_info
+ azure_rm_galleryimage:
+ redirect: azure.azcollection.azure_rm_galleryimage
+ azure_rm_galleryimage_info:
+ redirect: azure.azcollection.azure_rm_galleryimage_info
+ azure_rm_galleryimageversion:
+ redirect: azure.azcollection.azure_rm_galleryimageversion
+ azure_rm_galleryimageversion_info:
+ redirect: azure.azcollection.azure_rm_galleryimageversion_info
+ azure_rm_image:
+ redirect: azure.azcollection.azure_rm_image
+ azure_rm_image_info:
+ redirect: azure.azcollection.azure_rm_image_info
+ azure_rm_keyvault:
+ redirect: azure.azcollection.azure_rm_keyvault
+ azure_rm_keyvault_info:
+ redirect: azure.azcollection.azure_rm_keyvault_info
+ azure_rm_keyvaultkey:
+ redirect: azure.azcollection.azure_rm_keyvaultkey
+ azure_rm_keyvaultkey_info:
+ redirect: azure.azcollection.azure_rm_keyvaultkey_info
+ azure_rm_keyvaultsecret:
+ redirect: azure.azcollection.azure_rm_keyvaultsecret
+ azure_rm_manageddisk:
+ redirect: azure.azcollection.azure_rm_manageddisk
+ azure_rm_manageddisk_info:
+ redirect: azure.azcollection.azure_rm_manageddisk_info
+ azure_rm_resource:
+ redirect: azure.azcollection.azure_rm_resource
+ azure_rm_resource_info:
+ redirect: azure.azcollection.azure_rm_resource_info
+ azure_rm_resourcegroup:
+ redirect: azure.azcollection.azure_rm_resourcegroup
+ azure_rm_resourcegroup_info:
+ redirect: azure.azcollection.azure_rm_resourcegroup_info
+ azure_rm_snapshot:
+ redirect: azure.azcollection.azure_rm_snapshot
+ azure_rm_storageblob:
+ redirect: azure.azcollection.azure_rm_storageblob
+ azure_rm_subnet_info:
+ redirect: azure.azcollection.azure_rm_subnet_info
+ azure_rm_virtualmachineextension:
+ redirect: azure.azcollection.azure_rm_virtualmachineextension
+ azure_rm_virtualmachineextension_info:
+ redirect: azure.azcollection.azure_rm_virtualmachineextension_info
+ azure_rm_virtualmachineimage_info:
+ redirect: azure.azcollection.azure_rm_virtualmachineimage_info
+ azure_rm_virtualmachinescaleset:
+ redirect: azure.azcollection.azure_rm_virtualmachinescaleset
+ azure_rm_virtualmachinescaleset_info:
+ redirect: azure.azcollection.azure_rm_virtualmachinescaleset_info
+ azure_rm_virtualmachinescalesetextension:
+ redirect: azure.azcollection.azure_rm_virtualmachinescalesetextension
+ azure_rm_virtualmachinescalesetextension_info:
+ redirect: azure.azcollection.azure_rm_virtualmachinescalesetextension_info
+ azure_rm_virtualmachinescalesetinstance:
+ redirect: azure.azcollection.azure_rm_virtualmachinescalesetinstance
+ azure_rm_virtualmachinescalesetinstance_info:
+ redirect: azure.azcollection.azure_rm_virtualmachinescalesetinstance_info
+ azure_rm_webapp:
+ redirect: azure.azcollection.azure_rm_webapp
+ azure_rm_webapp_info:
+ redirect: azure.azcollection.azure_rm_webapp_info
+ azure_rm_webappslot:
+ redirect: azure.azcollection.azure_rm_webappslot
+ azure_rm_automationaccount:
+ redirect: azure.azcollection.azure_rm_automationaccount
+ azure_rm_automationaccount_info:
+ redirect: azure.azcollection.azure_rm_automationaccount_info
+ azure_rm_autoscale:
+ redirect: azure.azcollection.azure_rm_autoscale
+ azure_rm_autoscale_info:
+ redirect: azure.azcollection.azure_rm_autoscale_info
+ azure_rm_azurefirewall:
+ redirect: azure.azcollection.azure_rm_azurefirewall
+ azure_rm_azurefirewall_info:
+ redirect: azure.azcollection.azure_rm_azurefirewall_info
+ azure_rm_batchaccount:
+ redirect: azure.azcollection.azure_rm_batchaccount
+ azure_rm_cdnendpoint:
+ redirect: azure.azcollection.azure_rm_cdnendpoint
+ azure_rm_cdnendpoint_info:
+ redirect: azure.azcollection.azure_rm_cdnendpoint_info
+ azure_rm_cdnprofile:
+ redirect: azure.azcollection.azure_rm_cdnprofile
+ azure_rm_cdnprofile_info:
+ redirect: azure.azcollection.azure_rm_cdnprofile_info
+ azure_rm_iotdevice:
+ redirect: azure.azcollection.azure_rm_iotdevice
+ azure_rm_iotdevice_info:
+ redirect: azure.azcollection.azure_rm_iotdevice_info
+ azure_rm_iotdevicemodule:
+ redirect: azure.azcollection.azure_rm_iotdevicemodule
+ azure_rm_iothub:
+ redirect: azure.azcollection.azure_rm_iothub
+ azure_rm_iothub_info:
+ redirect: azure.azcollection.azure_rm_iothub_info
+ azure_rm_iothubconsumergroup:
+ redirect: azure.azcollection.azure_rm_iothubconsumergroup
+ azure_rm_loadbalancer:
+ redirect: azure.azcollection.azure_rm_loadbalancer
+ azure_rm_loadbalancer_info:
+ redirect: azure.azcollection.azure_rm_loadbalancer_info
+ azure_rm_lock:
+ redirect: azure.azcollection.azure_rm_lock
+ azure_rm_lock_info:
+ redirect: azure.azcollection.azure_rm_lock_info
+ azure_rm_loganalyticsworkspace:
+ redirect: azure.azcollection.azure_rm_loganalyticsworkspace
+ azure_rm_loganalyticsworkspace_info:
+ redirect: azure.azcollection.azure_rm_loganalyticsworkspace_info
+ azure_rm_monitorlogprofile:
+ redirect: azure.azcollection.azure_rm_monitorlogprofile
+ azure_rm_rediscache:
+ redirect: azure.azcollection.azure_rm_rediscache
+ azure_rm_rediscache_info:
+ redirect: azure.azcollection.azure_rm_rediscache_info
+ azure_rm_rediscachefirewallrule:
+ redirect: azure.azcollection.azure_rm_rediscachefirewallrule
+ azure_rm_roleassignment:
+ redirect: azure.azcollection.azure_rm_roleassignment
+ azure_rm_roleassignment_info:
+ redirect: azure.azcollection.azure_rm_roleassignment_info
+ azure_rm_roledefinition:
+ redirect: azure.azcollection.azure_rm_roledefinition
+ azure_rm_roledefinition_info:
+ redirect: azure.azcollection.azure_rm_roledefinition_info
+ azure_rm_route:
+ redirect: azure.azcollection.azure_rm_route
+ azure_rm_routetable:
+ redirect: azure.azcollection.azure_rm_routetable
+ azure_rm_routetable_info:
+ redirect: azure.azcollection.azure_rm_routetable_info
+ azure_rm_servicebus:
+ redirect: azure.azcollection.azure_rm_servicebus
+ azure_rm_servicebus_info:
+ redirect: azure.azcollection.azure_rm_servicebus_info
+ azure_rm_servicebusqueue:
+ redirect: azure.azcollection.azure_rm_servicebusqueue
+ azure_rm_servicebussaspolicy:
+ redirect: azure.azcollection.azure_rm_servicebussaspolicy
+ azure_rm_servicebustopic:
+ redirect: azure.azcollection.azure_rm_servicebustopic
+ azure_rm_servicebustopicsubscription:
+ redirect: azure.azcollection.azure_rm_servicebustopicsubscription
+ azure_rm_trafficmanagerendpoint:
+ redirect: azure.azcollection.azure_rm_trafficmanagerendpoint
+ azure_rm_trafficmanagerendpoint_info:
+ redirect: azure.azcollection.azure_rm_trafficmanagerendpoint_info
+ azure_rm_trafficmanagerprofile:
+ redirect: azure.azcollection.azure_rm_trafficmanagerprofile
+ azure_rm_trafficmanagerprofile_info:
+ redirect: azure.azcollection.azure_rm_trafficmanagerprofile_info
+ azure_rm_virtualnetworkgateway:
+ redirect: azure.azcollection.azure_rm_virtualnetworkgateway
+ azure_rm_virtualnetworkpeering:
+ redirect: azure.azcollection.azure_rm_virtualnetworkpeering
+ azure_rm_virtualnetworkpeering_info:
+ redirect: azure.azcollection.azure_rm_virtualnetworkpeering_info
+ azure_rm_cosmosdbaccount:
+ redirect: azure.azcollection.azure_rm_cosmosdbaccount
+ azure_rm_cosmosdbaccount_info:
+ redirect: azure.azcollection.azure_rm_cosmosdbaccount_info
+ azure_rm_devtestlab:
+ redirect: azure.azcollection.azure_rm_devtestlab
+ azure_rm_devtestlab_info:
+ redirect: azure.azcollection.azure_rm_devtestlab_info
+ azure_rm_devtestlabarmtemplate_info:
+ redirect: azure.azcollection.azure_rm_devtestlabarmtemplate_info
+ azure_rm_devtestlabartifact_info:
+ redirect: azure.azcollection.azure_rm_devtestlabartifact_info
+ azure_rm_devtestlabartifactsource:
+ redirect: azure.azcollection.azure_rm_devtestlabartifactsource
+ azure_rm_devtestlabartifactsource_info:
+ redirect: azure.azcollection.azure_rm_devtestlabartifactsource_info
+ azure_rm_devtestlabcustomimage:
+ redirect: azure.azcollection.azure_rm_devtestlabcustomimage
+ azure_rm_devtestlabcustomimage_info:
+ redirect: azure.azcollection.azure_rm_devtestlabcustomimage_info
+ azure_rm_devtestlabenvironment:
+ redirect: azure.azcollection.azure_rm_devtestlabenvironment
+ azure_rm_devtestlabenvironment_info:
+ redirect: azure.azcollection.azure_rm_devtestlabenvironment_info
+ azure_rm_devtestlabpolicy:
+ redirect: azure.azcollection.azure_rm_devtestlabpolicy
+ azure_rm_devtestlabpolicy_info:
+ redirect: azure.azcollection.azure_rm_devtestlabpolicy_info
+ azure_rm_devtestlabschedule:
+ redirect: azure.azcollection.azure_rm_devtestlabschedule
+ azure_rm_devtestlabschedule_info:
+ redirect: azure.azcollection.azure_rm_devtestlabschedule_info
+ azure_rm_devtestlabvirtualmachine:
+ redirect: azure.azcollection.azure_rm_devtestlabvirtualmachine
+ azure_rm_devtestlabvirtualmachine_info:
+ redirect: azure.azcollection.azure_rm_devtestlabvirtualmachine_info
+ azure_rm_devtestlabvirtualnetwork:
+ redirect: azure.azcollection.azure_rm_devtestlabvirtualnetwork
+ azure_rm_devtestlabvirtualnetwork_info:
+ redirect: azure.azcollection.azure_rm_devtestlabvirtualnetwork_info
+ azure_rm_hdinsightcluster:
+ redirect: azure.azcollection.azure_rm_hdinsightcluster
+ azure_rm_hdinsightcluster_info:
+ redirect: azure.azcollection.azure_rm_hdinsightcluster_info
+ azure_rm_mariadbconfiguration:
+ redirect: azure.azcollection.azure_rm_mariadbconfiguration
+ azure_rm_mariadbconfiguration_info:
+ redirect: azure.azcollection.azure_rm_mariadbconfiguration_info
+ azure_rm_mariadbdatabase:
+ redirect: azure.azcollection.azure_rm_mariadbdatabase
+ azure_rm_mariadbdatabase_info:
+ redirect: azure.azcollection.azure_rm_mariadbdatabase_info
+ azure_rm_mariadbfirewallrule:
+ redirect: azure.azcollection.azure_rm_mariadbfirewallrule
+ azure_rm_mariadbfirewallrule_info:
+ redirect: azure.azcollection.azure_rm_mariadbfirewallrule_info
+ azure_rm_mariadbserver:
+ redirect: azure.azcollection.azure_rm_mariadbserver
+ azure_rm_mariadbserver_info:
+ redirect: azure.azcollection.azure_rm_mariadbserver_info
+ azure_rm_mysqlconfiguration:
+ redirect: azure.azcollection.azure_rm_mysqlconfiguration
+ azure_rm_mysqlconfiguration_info:
+ redirect: azure.azcollection.azure_rm_mysqlconfiguration_info
+ azure_rm_mysqldatabase:
+ redirect: azure.azcollection.azure_rm_mysqldatabase
+ azure_rm_mysqldatabase_info:
+ redirect: azure.azcollection.azure_rm_mysqldatabase_info
+ azure_rm_mysqlfirewallrule:
+ redirect: azure.azcollection.azure_rm_mysqlfirewallrule
+ azure_rm_mysqlfirewallrule_info:
+ redirect: azure.azcollection.azure_rm_mysqlfirewallrule_info
+ azure_rm_mysqlserver:
+ redirect: azure.azcollection.azure_rm_mysqlserver
+ azure_rm_mysqlserver_info:
+ redirect: azure.azcollection.azure_rm_mysqlserver_info
+ azure_rm_postgresqlconfiguration:
+ redirect: azure.azcollection.azure_rm_postgresqlconfiguration
+ azure_rm_postgresqlconfiguration_info:
+ redirect: azure.azcollection.azure_rm_postgresqlconfiguration_info
+ azure_rm_postgresqldatabase:
+ redirect: azure.azcollection.azure_rm_postgresqldatabase
+ azure_rm_postgresqldatabase_info:
+ redirect: azure.azcollection.azure_rm_postgresqldatabase_info
+ azure_rm_postgresqlfirewallrule:
+ redirect: azure.azcollection.azure_rm_postgresqlfirewallrule
+ azure_rm_postgresqlfirewallrule_info:
+ redirect: azure.azcollection.azure_rm_postgresqlfirewallrule_info
+ azure_rm_postgresqlserver:
+ redirect: azure.azcollection.azure_rm_postgresqlserver
+ azure_rm_postgresqlserver_info:
+ redirect: azure.azcollection.azure_rm_postgresqlserver_info
+ azure_rm_sqldatabase:
+ redirect: azure.azcollection.azure_rm_sqldatabase
+ azure_rm_sqldatabase_info:
+ redirect: azure.azcollection.azure_rm_sqldatabase_info
+ azure_rm_sqlfirewallrule:
+ redirect: azure.azcollection.azure_rm_sqlfirewallrule
+ azure_rm_sqlfirewallrule_info:
+ redirect: azure.azcollection.azure_rm_sqlfirewallrule_info
+ azure_rm_sqlserver:
+ redirect: azure.azcollection.azure_rm_sqlserver
+ azure_rm_sqlserver_info:
+ redirect: azure.azcollection.azure_rm_sqlserver_info
+ openvswitch_port:
+ redirect: openvswitch.openvswitch.openvswitch_port
+ openvswitch_db:
+ redirect: openvswitch.openvswitch.openvswitch_db
+ openvswitch_bridge:
+ redirect: openvswitch.openvswitch.openvswitch_bridge
+ vyos_ospfv2:
+ redirect: vyos.vyos.vyos_ospfv2
+ vyos_l3_interface:
+ redirect: vyos.vyos.vyos_l3_interface
+ vyos_banner:
+ redirect: vyos.vyos.vyos_banner
+ vyos_firewall_rules:
+ redirect: vyos.vyos.vyos_firewall_rules
+ vyos_static_route:
+ redirect: vyos.vyos.vyos_static_route
+ vyos_lldp_interface:
+ redirect: vyos.vyos.vyos_lldp_interface
+ vyos_vlan:
+ redirect: vyos.vyos.vyos_vlan
+ vyos_user:
+ redirect: vyos.vyos.vyos_user
+ vyos_firewall_interfaces:
+ redirect: vyos.vyos.vyos_firewall_interfaces
+ vyos_interface:
+ redirect: vyos.vyos.vyos_interface
+ vyos_firewall_global:
+ redirect: vyos.vyos.vyos_firewall_global
+ vyos_config:
+ redirect: vyos.vyos.vyos_config
+ vyos_facts:
+ redirect: vyos.vyos.vyos_facts
+ vyos_linkagg:
+ redirect: vyos.vyos.vyos_linkagg
+ vyos_ping:
+ redirect: vyos.vyos.vyos_ping
+ vyos_lag_interfaces:
+ redirect: vyos.vyos.vyos_lag_interfaces
+ vyos_lldp:
+ redirect: vyos.vyos.vyos_lldp
+ vyos_lldp_global:
+ redirect: vyos.vyos.vyos_lldp_global
+ vyos_l3_interfaces:
+ redirect: vyos.vyos.vyos_l3_interfaces
+ vyos_lldp_interfaces:
+ redirect: vyos.vyos.vyos_lldp_interfaces
+ vyos_interfaces:
+ redirect: vyos.vyos.vyos_interfaces
+ vyos_logging:
+ redirect: vyos.vyos.vyos_logging
+ vyos_static_routes:
+ redirect: vyos.vyos.vyos_static_routes
+ vyos_command:
+ redirect: vyos.vyos.vyos_command
+ vyos_system:
+ redirect: vyos.vyos.vyos_system
+ cpm_plugconfig:
+ redirect: wti.remote.cpm_plugconfig
+ cpm_plugcontrol:
+ redirect: wti.remote.cpm_plugcontrol
+ cpm_serial_port_config:
+ redirect: wti.remote.cpm_serial_port_config
+ cpm_serial_port_info:
+ redirect: wti.remote.cpm_serial_port_info
+ cpm_user:
+ redirect: wti.remote.cpm_user
+ module_utils:
+ # test entries
+ formerly_core:
+ redirect: ansible_collections.testns.testcoll.plugins.module_utils.base
+ sub1.sub2.formerly_core:
+ redirect: ansible_collections.testns.testcoll.plugins.module_utils.base
+ # real
+ acme:
+ redirect: community.crypto.acme
+ alicloud_ecs:
+ redirect: community.general.alicloud_ecs
+ ansible_tower:
+ redirect: awx.awx.ansible_tower
+ aws.batch:
+ redirect: amazon.aws.batch
+ aws.cloudfront_facts:
+ redirect: amazon.aws.cloudfront_facts
+ aws.core:
+ redirect: amazon.aws.core
+ aws.direct_connect:
+ redirect: amazon.aws.direct_connect
+ aws.elb_utils:
+ redirect: amazon.aws.elb_utils
+ aws.elbv2:
+ redirect: amazon.aws.elbv2
+ aws.iam:
+ redirect: amazon.aws.iam
+ aws.rds:
+ redirect: amazon.aws.rds
+ aws.s3:
+ redirect: amazon.aws.s3
+ aws.urls:
+ redirect: amazon.aws.urls
+ aws.waf:
+ redirect: amazon.aws.waf
+ aws.waiters:
+ redirect: amazon.aws.waiters
+ azure_rm_common:
+ redirect: azure.azcollection.azure_rm_common
+ azure_rm_common_ext:
+ redirect: azure.azcollection.azure_rm_common_ext
+ azure_rm_common_rest:
+ redirect: azure.azcollection.azure_rm_common_rest
+ cloud:
+ redirect: community.general.cloud
+ cloudscale:
+ redirect: cloudscale_ch.cloud.api
+ cloudstack:
+ redirect: ngine_io.cloudstack.cloudstack
+ compat.ipaddress:
+ redirect: ansible.netcommon.compat.ipaddress
+ crypto:
+ redirect: community.crypto.crypto
+ database:
+ redirect: community.general.database
+ digital_ocean:
+ redirect: community.digitalocean.digital_ocean
+ dimensiondata:
+ redirect: community.general.dimensiondata
+ docker:
+ redirect: community.general.docker
+ docker.common:
+ redirect: community.general.docker.common
+ docker.swarm:
+ redirect: community.general.docker.swarm
+ ec2:
+ redirect: amazon.aws.ec2
+ ecs:
+ redirect: community.crypto.ecs
+ ecs.api:
+ redirect: community.crypto.ecs.api
+ exoscale:
+ redirect: ngine_io.exoscale.exoscale
+ f5_utils:
+ tombstone:
+ removal_date: 2019-11-06
+ firewalld:
+ redirect: ansible.posix.firewalld
+ gcdns:
+ redirect: community.general.gcdns
+ gce:
+ redirect: community.general.gce
+ gcp:
+ redirect: community.general.gcp
+ gcp_utils:
+ redirect: google.cloud.gcp_utils
+ gitlab:
+ redirect: community.general.gitlab
+ hcloud:
+ redirect: hetzner.hcloud.hcloud
+ heroku:
+ redirect: community.general.heroku
+ hetzner:
+ redirect: community.general.hetzner
+ hwc_utils:
+ redirect: community.general.hwc_utils
+ ibm_sa_utils:
+ redirect: community.general.ibm_sa_utils
+ identity:
+ redirect: community.general.identity
+ identity.keycloak:
+ redirect: community.general.identity.keycloak
+ identity.keycloak.keycloak:
+ redirect: community.general.identity.keycloak.keycloak
+ infinibox:
+ redirect: infinidat.infinibox.infinibox
+ influxdb:
+ redirect: community.general.influxdb
+ ipa:
+ redirect: community.general.ipa
+ ismount:
+ redirect: ansible.posix.mount
+ k8s.common:
+ redirect: community.kubernetes.common
+ k8s.raw:
+ redirect: community.kubernetes.raw
+ k8s.scale:
+ redirect: community.kubernetes.scale
+ known_hosts:
+ redirect: community.general.known_hosts
+ kubevirt:
+ redirect: community.general.kubevirt
+ ldap:
+ redirect: community.general.ldap
+ linode:
+ redirect: community.general.linode
+ lxd:
+ redirect: community.general.lxd
+ manageiq:
+ redirect: community.general.manageiq
+ memset:
+ redirect: community.general.memset
+ mysql:
+ redirect: community.mysql.mysql
+ net_tools.netbox.netbox_utils:
+ redirect: netbox.netbox.netbox_utils
+ net_tools.nios:
+ redirect: community.general.net_tools.nios
+ net_tools.nios.api:
+ redirect: community.general.net_tools.nios.api
+ netapp:
+ redirect: netapp.ontap.netapp
+ netapp_elementsw_module:
+ redirect: netapp.ontap.netapp_elementsw_module
+ netapp_module:
+ redirect: netapp.ontap.netapp_module
+ network.a10.a10:
+ redirect: community.network.network.a10.a10
+ network.aci.aci:
+ redirect: cisco.aci.aci
+ network.aci.mso:
+ redirect: cisco.mso.mso
+ network.aireos.aireos:
+ redirect: community.network.network.aireos.aireos
+ network.aos.aos:
+ redirect: community.network.network.aos.aos
+ network.aruba.aruba:
+ redirect: community.network.network.aruba.aruba
+ network.asa.asa:
+ redirect: cisco.asa.network.asa.asa
+ network.avi.ansible_utils:
+ redirect: community.network.network.avi.ansible_utils
+ network.avi.avi:
+ redirect: community.network.network.avi.avi
+ network.avi.avi_api:
+ redirect: community.network.network.avi.avi_api
+ network.bigswitch.bigswitch:
+ redirect: community.network.network.bigswitch.bigswitch
+ network.checkpoint.checkpoint:
+ redirect: check_point.mgmt.checkpoint
+ network.cloudengine.ce:
+ redirect: community.network.network.cloudengine.ce
+ network.cnos.cnos:
+ redirect: community.network.network.cnos.cnos
+ network.cnos.cnos_devicerules:
+ redirect: community.network.network.cnos.cnos_devicerules
+ network.cnos.cnos_errorcodes:
+ redirect: community.network.network.cnos.cnos_errorcodes
+ network.common.cfg.base:
+ redirect: ansible.netcommon.network.common.cfg.base
+ network.common.config:
+ redirect: ansible.netcommon.network.common.config
+ network.common.facts.facts:
+ redirect: ansible.netcommon.network.common.facts.facts
+ network.common.netconf:
+ redirect: ansible.netcommon.network.common.netconf
+ network.common.network:
+ redirect: ansible.netcommon.network.common.network
+ network.common.parsing:
+ redirect: ansible.netcommon.network.common.parsing
+ network.common.utils:
+ redirect: ansible.netcommon.network.common.utils
+ network.dellos10.dellos10:
+ redirect: dellemc.os10.network.os10
+ network.dellos9.dellos9:
+ redirect: dellemc.os9.network.os9
+ network.dellos6.dellos6:
+ redirect: dellemc.os6.network.os6
+ network.edgeos.edgeos:
+ redirect: community.network.network.edgeos.edgeos
+ network.edgeswitch.edgeswitch:
+ redirect: community.network.network.edgeswitch.edgeswitch
+ network.edgeswitch.edgeswitch_interface:
+ redirect: community.network.network.edgeswitch.edgeswitch_interface
+ network.enos.enos:
+ redirect: community.network.network.enos.enos
+ network.eos.argspec.facts:
+ redirect: arista.eos.network.eos.argspec.facts
+ network.eos.argspec.facts.facts:
+ redirect: arista.eos.network.eos.argspec.facts.facts
+ network.eos.argspec.interfaces:
+ redirect: arista.eos.network.eos.argspec.interfaces
+ network.eos.argspec.interfaces.interfaces:
+ redirect: arista.eos.network.eos.argspec.interfaces.interfaces
+ network.eos.argspec.l2_interfaces:
+ redirect: arista.eos.network.eos.argspec.l2_interfaces
+ network.eos.argspec.l2_interfaces.l2_interfaces:
+ redirect: arista.eos.network.eos.argspec.l2_interfaces.l2_interfaces
+ network.eos.argspec.l3_interfaces:
+ redirect: arista.eos.network.eos.argspec.l3_interfaces
+ network.eos.argspec.l3_interfaces.l3_interfaces:
+ redirect: arista.eos.network.eos.argspec.l3_interfaces.l3_interfaces
+ network.eos.argspec.lacp:
+ redirect: arista.eos.network.eos.argspec.lacp
+ network.eos.argspec.lacp.lacp:
+ redirect: arista.eos.network.eos.argspec.lacp.lacp
+ network.eos.argspec.lacp_interfaces:
+ redirect: arista.eos.network.eos.argspec.lacp_interfaces
+ network.eos.argspec.lacp_interfaces.lacp_interfaces:
+ redirect: arista.eos.network.eos.argspec.lacp_interfaces.lacp_interfaces
+ network.eos.argspec.lag_interfaces:
+ redirect: arista.eos.network.eos.argspec.lag_interfaces
+ network.eos.argspec.lag_interfaces.lag_interfaces:
+ redirect: arista.eos.network.eos.argspec.lag_interfaces.lag_interfaces
+ network.eos.argspec.lldp_global:
+ redirect: arista.eos.network.eos.argspec.lldp_global
+ network.eos.argspec.lldp_global.lldp_global:
+ redirect: arista.eos.network.eos.argspec.lldp_global.lldp_global
+ network.eos.argspec.lldp_interfaces:
+ redirect: arista.eos.network.eos.argspec.lldp_interfaces
+ network.eos.argspec.lldp_interfaces.lldp_interfaces:
+ redirect: arista.eos.network.eos.argspec.lldp_interfaces.lldp_interfaces
+ network.eos.argspec.vlans:
+ redirect: arista.eos.network.eos.argspec.vlans
+ network.eos.argspec.vlans.vlans:
+ redirect: arista.eos.network.eos.argspec.vlans.vlans
+ network.eos.config:
+ redirect: arista.eos.network.eos.config
+ network.eos.config.interfaces:
+ redirect: arista.eos.network.eos.config.interfaces
+ network.eos.config.interfaces.interfaces:
+ redirect: arista.eos.network.eos.config.interfaces.interfaces
+ network.eos.config.l2_interfaces:
+ redirect: arista.eos.network.eos.config.l2_interfaces
+ network.eos.config.l2_interfaces.l2_interfaces:
+ redirect: arista.eos.network.eos.config.l2_interfaces.l2_interfaces
+ network.eos.config.l3_interfaces:
+ redirect: arista.eos.network.eos.config.l3_interfaces
+ network.eos.config.l3_interfaces.l3_interfaces:
+ redirect: arista.eos.network.eos.config.l3_interfaces.l3_interfaces
+ network.eos.config.lacp:
+ redirect: arista.eos.network.eos.config.lacp
+ network.eos.config.lacp.lacp:
+ redirect: arista.eos.network.eos.config.lacp.lacp
+ network.eos.config.lacp_interfaces:
+ redirect: arista.eos.network.eos.config.lacp_interfaces
+ network.eos.config.lacp_interfaces.lacp_interfaces:
+ redirect: arista.eos.network.eos.config.lacp_interfaces.lacp_interfaces
+ network.eos.config.lag_interfaces:
+ redirect: arista.eos.network.eos.config.lag_interfaces
+ network.eos.config.lag_interfaces.lag_interfaces:
+ redirect: arista.eos.network.eos.config.lag_interfaces.lag_interfaces
+ network.eos.config.lldp_global:
+ redirect: arista.eos.network.eos.config.lldp_global
+ network.eos.config.lldp_global.lldp_global:
+ redirect: arista.eos.network.eos.config.lldp_global.lldp_global
+ network.eos.config.lldp_interfaces:
+ redirect: arista.eos.network.eos.config.lldp_interfaces
+ network.eos.config.lldp_interfaces.lldp_interfaces:
+ redirect: arista.eos.network.eos.config.lldp_interfaces.lldp_interfaces
+ network.eos.config.vlans:
+ redirect: arista.eos.network.eos.config.vlans
+ network.eos.config.vlans.vlans:
+ redirect: arista.eos.network.eos.config.vlans.vlans
+ network.eos.eos:
+ redirect: arista.eos.network.eos.eos
+ network.eos.facts:
+ redirect: arista.eos.network.eos.facts
+ network.eos.facts.facts:
+ redirect: arista.eos.network.eos.facts.facts
+ network.eos.facts.interfaces:
+ redirect: arista.eos.network.eos.facts.interfaces
+ network.eos.facts.interfaces.interfaces:
+ redirect: arista.eos.network.eos.facts.interfaces.interfaces
+ network.eos.facts.l2_interfaces:
+ redirect: arista.eos.network.eos.facts.l2_interfaces
+ network.eos.facts.l2_interfaces.l2_interfaces:
+ redirect: arista.eos.network.eos.facts.l2_interfaces.l2_interfaces
+ network.eos.facts.l3_interfaces:
+ redirect: arista.eos.network.eos.facts.l3_interfaces
+ network.eos.facts.l3_interfaces.l3_interfaces:
+ redirect: arista.eos.network.eos.facts.l3_interfaces.l3_interfaces
+ network.eos.facts.lacp:
+ redirect: arista.eos.network.eos.facts.lacp
+ network.eos.facts.lacp.lacp:
+ redirect: arista.eos.network.eos.facts.lacp.lacp
+ network.eos.facts.lacp_interfaces:
+ redirect: arista.eos.network.eos.facts.lacp_interfaces
+ network.eos.facts.lacp_interfaces.lacp_interfaces:
+ redirect: arista.eos.network.eos.facts.lacp_interfaces.lacp_interfaces
+ network.eos.facts.lag_interfaces:
+ redirect: arista.eos.network.eos.facts.lag_interfaces
+ network.eos.facts.lag_interfaces.lag_interfaces:
+ redirect: arista.eos.network.eos.facts.lag_interfaces.lag_interfaces
+ network.eos.facts.legacy:
+ redirect: arista.eos.network.eos.facts.legacy
+ network.eos.facts.legacy.base:
+ redirect: arista.eos.network.eos.facts.legacy.base
+ network.eos.facts.lldp_global:
+ redirect: arista.eos.network.eos.facts.lldp_global
+ network.eos.facts.lldp_global.lldp_global:
+ redirect: arista.eos.network.eos.facts.lldp_global.lldp_global
+ network.eos.facts.lldp_interfaces:
+ redirect: arista.eos.network.eos.facts.lldp_interfaces
+ network.eos.facts.lldp_interfaces.lldp_interfaces:
+ redirect: arista.eos.network.eos.facts.lldp_interfaces.lldp_interfaces
+ network.eos.facts.vlans:
+ redirect: arista.eos.network.eos.facts.vlans
+ network.eos.facts.vlans.vlans:
+ redirect: arista.eos.network.eos.facts.vlans.vlans
+ network.eos.providers:
+ redirect: arista.eos.network.eos.providers
+ network.eos.providers.cli:
+ redirect: arista.eos.network.eos.providers.cli
+ network.eos.providers.cli.config:
+ redirect: arista.eos.network.eos.providers.cli.config
+ network.eos.providers.cli.config.bgp:
+ redirect: arista.eos.network.eos.providers.cli.config.bgp
+ network.eos.providers.cli.config.bgp.address_family:
+ redirect: arista.eos.network.eos.providers.cli.config.bgp.address_family
+ network.eos.providers.cli.config.bgp.neighbors:
+ redirect: arista.eos.network.eos.providers.cli.config.bgp.neighbors
+ network.eos.providers.cli.config.bgp.process:
+ redirect: arista.eos.network.eos.providers.cli.config.bgp.process
+ network.eos.providers.module:
+ redirect: arista.eos.network.eos.providers.module
+ network.eos.providers.providers:
+ redirect: arista.eos.network.eos.providers.providers
+ network.eos.utils:
+ redirect: arista.eos.network.eos.utils
+ network.eos.utils.utils:
+ redirect: arista.eos.network.eos.utils.utils
+ network.eric_eccli.eric_eccli:
+ redirect: community.network.network.eric_eccli.eric_eccli
+ network.exos.argspec.facts.facts:
+ redirect: community.network.network.exos.argspec.facts.facts
+ network.exos.argspec.lldp_global:
+ redirect: community.network.network.exos.argspec.lldp_global
+ network.exos.argspec.lldp_global.lldp_global:
+ redirect: community.network.network.exos.argspec.lldp_global.lldp_global
+ network.exos.config.lldp_global:
+ redirect: community.network.network.exos.config.lldp_global
+ network.exos.config.lldp_global.lldp_global:
+ redirect: community.network.network.exos.config.lldp_global.lldp_global
+ network.exos.exos:
+ redirect: community.network.network.exos.exos
+ network.exos.facts.facts:
+ redirect: community.network.network.exos.facts.facts
+ network.exos.facts.legacy:
+ redirect: community.network.network.exos.facts.legacy
+ network.exos.facts.legacy.base:
+ redirect: community.network.network.exos.facts.legacy.base
+ network.exos.facts.lldp_global:
+ redirect: community.network.network.exos.facts.lldp_global
+ network.exos.facts.lldp_global.lldp_global:
+ redirect: community.network.network.exos.facts.lldp_global.lldp_global
+ network.exos.utils.utils:
+ redirect: community.network.network.exos.utils.utils
+ network.f5.bigip:
+ redirect: f5networks.f5_modules.bigip
+ network.f5.bigiq:
+ redirect: f5networks.f5_modules.bigiq
+ network.f5.common:
+ redirect: f5networks.f5_modules.common
+ network.f5.compare:
+ redirect: f5networks.f5_modules.compare
+ network.f5.icontrol:
+ redirect: f5networks.f5_modules.icontrol
+ network.f5.ipaddress:
+ redirect: f5networks.f5_modules.ipaddress
+ # FIXME: missing
+ #network.f5.iworkflow:
+ # redirect: f5networks.f5_modules.iworkflow
+ #network.f5.legacy:
+ # redirect: f5networks.f5_modules.legacy
+ network.f5.urls:
+ redirect: f5networks.f5_modules.urls
+ network.fortianalyzer.common:
+ redirect: community.network.network.fortianalyzer.common
+ network.fortianalyzer.fortianalyzer:
+ redirect: community.network.network.fortianalyzer.fortianalyzer
+ network.fortimanager.common:
+ redirect: fortinet.fortimanager.common
+ network.fortimanager.fortimanager:
+ redirect: fortinet.fortimanager.fortimanager
+ network.fortios.argspec:
+ redirect: fortinet.fortios.fortios.argspec
+ network.fortios.argspec.facts:
+ redirect: fortinet.fortios.fortios.argspec.facts
+ network.fortios.argspec.facts.facts:
+ redirect: fortinet.fortios.fortios.argspec.facts.facts
+ network.fortios.argspec.system:
+ redirect: fortinet.fortios.fortios.argspec.system
+ network.fortios.argspec.system.system:
+ redirect: fortinet.fortios.fortios.argspec.system.system
+ network.fortios.facts:
+ redirect: fortinet.fortios.fortios.facts
+ network.fortios.facts.facts:
+ redirect: fortinet.fortios.fortios.facts.facts
+ network.fortios.facts.system:
+ redirect: fortinet.fortios.fortios.facts.system
+ network.fortios.facts.system.system:
+ redirect: fortinet.fortios.fortios.facts.system.system
+ network.fortios.fortios:
+ redirect: fortinet.fortios.fortios.fortios
+ network.frr:
+ redirect: frr.frr.network.frr
+ network.frr.frr:
+ redirect: frr.frr.network.frr.frr
+ network.frr.providers:
+ redirect: frr.frr.network.frr.providers
+ network.frr.providers.cli:
+ redirect: frr.frr.network.frr.providers.cli
+ network.frr.providers.cli.config:
+ redirect: frr.frr.network.frr.providers.cli.config
+ network.frr.providers.cli.config.base:
+ redirect: frr.frr.network.frr.providers.cli.config.base
+ network.frr.providers.cli.config.bgp:
+ redirect: frr.frr.network.frr.providers.cli.config.bgp
+ network.frr.providers.cli.config.bgp.address_family:
+ redirect: frr.frr.network.frr.providers.cli.config.bgp.address_family
+ network.frr.providers.cli.config.bgp.neighbors:
+ redirect: frr.frr.network.frr.providers.cli.config.bgp.neighbors
+ network.frr.providers.cli.config.bgp.process:
+ redirect: frr.frr.network.frr.providers.cli.config.bgp.process
+ network.frr.providers.module:
+ redirect: frr.frr.network.frr.providers.module
+ network.frr.providers.providers:
+ redirect: frr.frr.network.frr.providers.providers
+ network.ftd:
+ redirect: community.network.network.ftd
+ network.ftd.common:
+ redirect: community.network.network.ftd.common
+ network.ftd.configuration:
+ redirect: community.network.network.ftd.configuration
+ network.ftd.device:
+ redirect: community.network.network.ftd.device
+ network.ftd.fdm_swagger_client:
+ redirect: community.network.network.ftd.fdm_swagger_client
+ network.ftd.operation:
+ redirect: community.network.network.ftd.operation
+ network.icx:
+ redirect: community.network.network.icx
+ network.icx.icx:
+ redirect: community.network.network.icx.icx
+ network.ingate:
+ redirect: community.network.network.ingate
+ network.ingate.common:
+ redirect: community.network.network.ingate.common
+ network.ios:
+ redirect: cisco.ios.network.ios
+ network.ios.argspec:
+ redirect: cisco.ios.network.ios.argspec
+ network.ios.argspec.facts:
+ redirect: cisco.ios.network.ios.argspec.facts
+ network.ios.argspec.facts.facts:
+ redirect: cisco.ios.network.ios.argspec.facts.facts
+ network.ios.argspec.interfaces:
+ redirect: cisco.ios.network.ios.argspec.interfaces
+ network.ios.argspec.interfaces.interfaces:
+ redirect: cisco.ios.network.ios.argspec.interfaces.interfaces
+ network.ios.argspec.l2_interfaces:
+ redirect: cisco.ios.network.ios.argspec.l2_interfaces
+ network.ios.argspec.l2_interfaces.l2_interfaces:
+ redirect: cisco.ios.network.ios.argspec.l2_interfaces.l2_interfaces
+ network.ios.argspec.l3_interfaces:
+ redirect: cisco.ios.network.ios.argspec.l3_interfaces
+ network.ios.argspec.l3_interfaces.l3_interfaces:
+ redirect: cisco.ios.network.ios.argspec.l3_interfaces.l3_interfaces
+ network.ios.argspec.lacp:
+ redirect: cisco.ios.network.ios.argspec.lacp
+ network.ios.argspec.lacp.lacp:
+ redirect: cisco.ios.network.ios.argspec.lacp.lacp
+ network.ios.argspec.lacp_interfaces:
+ redirect: cisco.ios.network.ios.argspec.lacp_interfaces
+ network.ios.argspec.lacp_interfaces.lacp_interfaces:
+ redirect: cisco.ios.network.ios.argspec.lacp_interfaces.lacp_interfaces
+ network.ios.argspec.lag_interfaces:
+ redirect: cisco.ios.network.ios.argspec.lag_interfaces
+ network.ios.argspec.lag_interfaces.lag_interfaces:
+ redirect: cisco.ios.network.ios.argspec.lag_interfaces.lag_interfaces
+ network.ios.argspec.lldp_global:
+ redirect: cisco.ios.network.ios.argspec.lldp_global
+ network.ios.argspec.lldp_global.lldp_global:
+ redirect: cisco.ios.network.ios.argspec.lldp_global.lldp_global
+ network.ios.argspec.lldp_interfaces:
+ redirect: cisco.ios.network.ios.argspec.lldp_interfaces
+ network.ios.argspec.lldp_interfaces.lldp_interfaces:
+ redirect: cisco.ios.network.ios.argspec.lldp_interfaces.lldp_interfaces
+ network.ios.argspec.vlans:
+ redirect: cisco.ios.network.ios.argspec.vlans
+ network.ios.argspec.vlans.vlans:
+ redirect: cisco.ios.network.ios.argspec.vlans.vlans
+ network.ios.config:
+ redirect: cisco.ios.network.ios.config
+ network.ios.config.interfaces:
+ redirect: cisco.ios.network.ios.config.interfaces
+ network.ios.config.interfaces.interfaces:
+ redirect: cisco.ios.network.ios.config.interfaces.interfaces
+ network.ios.config.l2_interfaces:
+ redirect: cisco.ios.network.ios.config.l2_interfaces
+ network.ios.config.l2_interfaces.l2_interfaces:
+ redirect: cisco.ios.network.ios.config.l2_interfaces.l2_interfaces
+ network.ios.config.l3_interfaces:
+ redirect: cisco.ios.network.ios.config.l3_interfaces
+ network.ios.config.l3_interfaces.l3_interfaces:
+ redirect: cisco.ios.network.ios.config.l3_interfaces.l3_interfaces
+ network.ios.config.lacp:
+ redirect: cisco.ios.network.ios.config.lacp
+ network.ios.config.lacp.lacp:
+ redirect: cisco.ios.network.ios.config.lacp.lacp
+ network.ios.config.lacp_interfaces:
+ redirect: cisco.ios.network.ios.config.lacp_interfaces
+ network.ios.config.lacp_interfaces.lacp_interfaces:
+ redirect: cisco.ios.network.ios.config.lacp_interfaces.lacp_interfaces
+ network.ios.config.lag_interfaces:
+ redirect: cisco.ios.network.ios.config.lag_interfaces
+ network.ios.config.lag_interfaces.lag_interfaces:
+ redirect: cisco.ios.network.ios.config.lag_interfaces.lag_interfaces
+ network.ios.config.lldp_global:
+ redirect: cisco.ios.network.ios.config.lldp_global
+ network.ios.config.lldp_global.lldp_global:
+ redirect: cisco.ios.network.ios.config.lldp_global.lldp_global
+ network.ios.config.lldp_interfaces:
+ redirect: cisco.ios.network.ios.config.lldp_interfaces
+ network.ios.config.lldp_interfaces.lldp_interfaces:
+ redirect: cisco.ios.network.ios.config.lldp_interfaces.lldp_interfaces
+ network.ios.config.vlans:
+ redirect: cisco.ios.network.ios.config.vlans
+ network.ios.config.vlans.vlans:
+ redirect: cisco.ios.network.ios.config.vlans.vlans
+ network.ios.facts:
+ redirect: cisco.ios.network.ios.facts
+ network.ios.facts.facts:
+ redirect: cisco.ios.network.ios.facts.facts
+ network.ios.facts.interfaces:
+ redirect: cisco.ios.network.ios.facts.interfaces
+ network.ios.facts.interfaces.interfaces:
+ redirect: cisco.ios.network.ios.facts.interfaces.interfaces
+ network.ios.facts.l2_interfaces:
+ redirect: cisco.ios.network.ios.facts.l2_interfaces
+ network.ios.facts.l2_interfaces.l2_interfaces:
+ redirect: cisco.ios.network.ios.facts.l2_interfaces.l2_interfaces
+ network.ios.facts.l3_interfaces:
+ redirect: cisco.ios.network.ios.facts.l3_interfaces
+ network.ios.facts.l3_interfaces.l3_interfaces:
+ redirect: cisco.ios.network.ios.facts.l3_interfaces.l3_interfaces
+ network.ios.facts.lacp:
+ redirect: cisco.ios.network.ios.facts.lacp
+ network.ios.facts.lacp.lacp:
+ redirect: cisco.ios.network.ios.facts.lacp.lacp
+ network.ios.facts.lacp_interfaces:
+ redirect: cisco.ios.network.ios.facts.lacp_interfaces
+ network.ios.facts.lacp_interfaces.lacp_interfaces:
+ redirect: cisco.ios.network.ios.facts.lacp_interfaces.lacp_interfaces
+ network.ios.facts.lag_interfaces:
+ redirect: cisco.ios.network.ios.facts.lag_interfaces
+ network.ios.facts.lag_interfaces.lag_interfaces:
+ redirect: cisco.ios.network.ios.facts.lag_interfaces.lag_interfaces
+ network.ios.facts.legacy:
+ redirect: cisco.ios.network.ios.facts.legacy
+ network.ios.facts.legacy.base:
+ redirect: cisco.ios.network.ios.facts.legacy.base
+ network.ios.facts.lldp_global:
+ redirect: cisco.ios.network.ios.facts.lldp_global
+ network.ios.facts.lldp_global.lldp_global:
+ redirect: cisco.ios.network.ios.facts.lldp_global.lldp_global
+ network.ios.facts.lldp_interfaces:
+ redirect: cisco.ios.network.ios.facts.lldp_interfaces
+ network.ios.facts.lldp_interfaces.lldp_interfaces:
+ redirect: cisco.ios.network.ios.facts.lldp_interfaces.lldp_interfaces
+ network.ios.facts.vlans:
+ redirect: cisco.ios.network.ios.facts.vlans
+ network.ios.facts.vlans.vlans:
+ redirect: cisco.ios.network.ios.facts.vlans.vlans
+ network.ios.ios:
+ redirect: cisco.ios.network.ios.ios
+ network.ios.providers:
+ redirect: cisco.ios.network.ios.providers
+ network.ios.providers.cli:
+ redirect: cisco.ios.network.ios.providers.cli
+ network.ios.providers.cli.config:
+ redirect: cisco.ios.network.ios.providers.cli.config
+ network.ios.providers.cli.config.base:
+ redirect: cisco.ios.network.ios.providers.cli.config.base
+ network.ios.providers.cli.config.bgp:
+ redirect: cisco.ios.network.ios.providers.cli.config.bgp
+ network.ios.providers.cli.config.bgp.address_family:
+ redirect: cisco.ios.network.ios.providers.cli.config.bgp.address_family
+ network.ios.providers.cli.config.bgp.neighbors:
+ redirect: cisco.ios.network.ios.providers.cli.config.bgp.neighbors
+ network.ios.providers.cli.config.bgp.process:
+ redirect: cisco.ios.network.ios.providers.cli.config.bgp.process
+ network.ios.providers.module:
+ redirect: cisco.ios.network.ios.providers.module
+ network.ios.providers.providers:
+ redirect: cisco.ios.network.ios.providers.providers
+ network.ios.utils:
+ redirect: cisco.ios.network.ios.utils
+ network.ios.utils.utils:
+ redirect: cisco.ios.network.ios.utils.utils
+ network.iosxr:
+ redirect: cisco.iosxr.network.iosxr
+ network.iosxr.argspec:
+ redirect: cisco.iosxr.network.iosxr.argspec
+ network.iosxr.argspec.facts:
+ redirect: cisco.iosxr.network.iosxr.argspec.facts
+ network.iosxr.argspec.facts.facts:
+ redirect: cisco.iosxr.network.iosxr.argspec.facts.facts
+ network.iosxr.argspec.interfaces:
+ redirect: cisco.iosxr.network.iosxr.argspec.interfaces
+ network.iosxr.argspec.interfaces.interfaces:
+ redirect: cisco.iosxr.network.iosxr.argspec.interfaces.interfaces
+ network.iosxr.argspec.l2_interfaces:
+ redirect: cisco.iosxr.network.iosxr.argspec.l2_interfaces
+ network.iosxr.argspec.l2_interfaces.l2_interfaces:
+ redirect: cisco.iosxr.network.iosxr.argspec.l2_interfaces.l2_interfaces
+ network.iosxr.argspec.l3_interfaces:
+ redirect: cisco.iosxr.network.iosxr.argspec.l3_interfaces
+ network.iosxr.argspec.l3_interfaces.l3_interfaces:
+ redirect: cisco.iosxr.network.iosxr.argspec.l3_interfaces.l3_interfaces
+ network.iosxr.argspec.lacp:
+ redirect: cisco.iosxr.network.iosxr.argspec.lacp
+ network.iosxr.argspec.lacp.lacp:
+ redirect: cisco.iosxr.network.iosxr.argspec.lacp.lacp
+ network.iosxr.argspec.lacp_interfaces:
+ redirect: cisco.iosxr.network.iosxr.argspec.lacp_interfaces
+ network.iosxr.argspec.lacp_interfaces.lacp_interfaces:
+ redirect: cisco.iosxr.network.iosxr.argspec.lacp_interfaces.lacp_interfaces
+ network.iosxr.argspec.lag_interfaces:
+ redirect: cisco.iosxr.network.iosxr.argspec.lag_interfaces
+ network.iosxr.argspec.lag_interfaces.lag_interfaces:
+ redirect: cisco.iosxr.network.iosxr.argspec.lag_interfaces.lag_interfaces
+ network.iosxr.argspec.lldp_global:
+ redirect: cisco.iosxr.network.iosxr.argspec.lldp_global
+ network.iosxr.argspec.lldp_global.lldp_global:
+ redirect: cisco.iosxr.network.iosxr.argspec.lldp_global.lldp_global
+ network.iosxr.argspec.lldp_interfaces:
+ redirect: cisco.iosxr.network.iosxr.argspec.lldp_interfaces
+ network.iosxr.argspec.lldp_interfaces.lldp_interfaces:
+ redirect: cisco.iosxr.network.iosxr.argspec.lldp_interfaces.lldp_interfaces
+ network.iosxr.config:
+ redirect: cisco.iosxr.network.iosxr.config
+ network.iosxr.config.interfaces:
+ redirect: cisco.iosxr.network.iosxr.config.interfaces
+ network.iosxr.config.interfaces.interfaces:
+ redirect: cisco.iosxr.network.iosxr.config.interfaces.interfaces
+ network.iosxr.config.l2_interfaces:
+ redirect: cisco.iosxr.network.iosxr.config.l2_interfaces
+ network.iosxr.config.l2_interfaces.l2_interfaces:
+ redirect: cisco.iosxr.network.iosxr.config.l2_interfaces.l2_interfaces
+ network.iosxr.config.l3_interfaces:
+ redirect: cisco.iosxr.network.iosxr.config.l3_interfaces
+ network.iosxr.config.l3_interfaces.l3_interfaces:
+ redirect: cisco.iosxr.network.iosxr.config.l3_interfaces.l3_interfaces
+ network.iosxr.config.lacp:
+ redirect: cisco.iosxr.network.iosxr.config.lacp
+ network.iosxr.config.lacp.lacp:
+ redirect: cisco.iosxr.network.iosxr.config.lacp.lacp
+ network.iosxr.config.lacp_interfaces:
+ redirect: cisco.iosxr.network.iosxr.config.lacp_interfaces
+ network.iosxr.config.lacp_interfaces.lacp_interfaces:
+ redirect: cisco.iosxr.network.iosxr.config.lacp_interfaces.lacp_interfaces
+ network.iosxr.config.lag_interfaces:
+ redirect: cisco.iosxr.network.iosxr.config.lag_interfaces
+ network.iosxr.config.lag_interfaces.lag_interfaces:
+ redirect: cisco.iosxr.network.iosxr.config.lag_interfaces.lag_interfaces
+ network.iosxr.config.lldp_global:
+ redirect: cisco.iosxr.network.iosxr.config.lldp_global
+ network.iosxr.config.lldp_global.lldp_global:
+ redirect: cisco.iosxr.network.iosxr.config.lldp_global.lldp_global
+ network.iosxr.config.lldp_interfaces:
+ redirect: cisco.iosxr.network.iosxr.config.lldp_interfaces
+ network.iosxr.config.lldp_interfaces.lldp_interfaces:
+ redirect: cisco.iosxr.network.iosxr.config.lldp_interfaces.lldp_interfaces
+ network.iosxr.facts:
+ redirect: cisco.iosxr.network.iosxr.facts
+ network.iosxr.facts.facts:
+ redirect: cisco.iosxr.network.iosxr.facts.facts
+ network.iosxr.facts.interfaces:
+ redirect: cisco.iosxr.network.iosxr.facts.interfaces
+ network.iosxr.facts.interfaces.interfaces:
+ redirect: cisco.iosxr.network.iosxr.facts.interfaces.interfaces
+ network.iosxr.facts.l2_interfaces:
+ redirect: cisco.iosxr.network.iosxr.facts.l2_interfaces
+ network.iosxr.facts.l2_interfaces.l2_interfaces:
+ redirect: cisco.iosxr.network.iosxr.facts.l2_interfaces.l2_interfaces
+ network.iosxr.facts.l3_interfaces:
+ redirect: cisco.iosxr.network.iosxr.facts.l3_interfaces
+ network.iosxr.facts.l3_interfaces.l3_interfaces:
+ redirect: cisco.iosxr.network.iosxr.facts.l3_interfaces.l3_interfaces
+ network.iosxr.facts.lacp:
+ redirect: cisco.iosxr.network.iosxr.facts.lacp
+ network.iosxr.facts.lacp.lacp:
+ redirect: cisco.iosxr.network.iosxr.facts.lacp.lacp
+ network.iosxr.facts.lacp_interfaces:
+ redirect: cisco.iosxr.network.iosxr.facts.lacp_interfaces
+ network.iosxr.facts.lacp_interfaces.lacp_interfaces:
+ redirect: cisco.iosxr.network.iosxr.facts.lacp_interfaces.lacp_interfaces
+ network.iosxr.facts.lag_interfaces:
+ redirect: cisco.iosxr.network.iosxr.facts.lag_interfaces
+ network.iosxr.facts.lag_interfaces.lag_interfaces:
+ redirect: cisco.iosxr.network.iosxr.facts.lag_interfaces.lag_interfaces
+ network.iosxr.facts.legacy:
+ redirect: cisco.iosxr.network.iosxr.facts.legacy
+ network.iosxr.facts.legacy.base:
+ redirect: cisco.iosxr.network.iosxr.facts.legacy.base
+ network.iosxr.facts.lldp_global:
+ redirect: cisco.iosxr.network.iosxr.facts.lldp_global
+ network.iosxr.facts.lldp_global.lldp_global:
+ redirect: cisco.iosxr.network.iosxr.facts.lldp_global.lldp_global
+ network.iosxr.facts.lldp_interfaces:
+ redirect: cisco.iosxr.network.iosxr.facts.lldp_interfaces
+ network.iosxr.facts.lldp_interfaces.lldp_interfaces:
+ redirect: cisco.iosxr.network.iosxr.facts.lldp_interfaces.lldp_interfaces
+ network.iosxr.iosxr:
+ redirect: cisco.iosxr.network.iosxr.iosxr
+ network.iosxr.providers:
+ redirect: cisco.iosxr.network.iosxr.providers
+ network.iosxr.providers.cli:
+ redirect: cisco.iosxr.network.iosxr.providers.cli
+ network.iosxr.providers.cli.config:
+ redirect: cisco.iosxr.network.iosxr.providers.cli.config
+ network.iosxr.providers.cli.config.bgp:
+ redirect: cisco.iosxr.network.iosxr.providers.cli.config.bgp
+ network.iosxr.providers.cli.config.bgp.address_family:
+ redirect: cisco.iosxr.network.iosxr.providers.cli.config.bgp.address_family
+ network.iosxr.providers.cli.config.bgp.neighbors:
+ redirect: cisco.iosxr.network.iosxr.providers.cli.config.bgp.neighbors
+ network.iosxr.providers.cli.config.bgp.process:
+ redirect: cisco.iosxr.network.iosxr.providers.cli.config.bgp.process
+ network.iosxr.providers.module:
+ redirect: cisco.iosxr.network.iosxr.providers.module
+ network.iosxr.providers.providers:
+ redirect: cisco.iosxr.network.iosxr.providers.providers
+ network.iosxr.utils:
+ redirect: cisco.iosxr.network.iosxr.utils
+ network.iosxr.utils.utils:
+ redirect: cisco.iosxr.network.iosxr.utils.utils
+ network.ironware:
+ redirect: community.network.network.ironware
+ network.ironware.ironware:
+ redirect: community.network.network.ironware.ironware
+ network.junos:
+ redirect: junipernetworks.junos.network.junos
+ network.junos.argspec:
+ redirect: junipernetworks.junos.network.junos.argspec
+ network.junos.argspec.facts:
+ redirect: junipernetworks.junos.network.junos.argspec.facts
+ network.junos.argspec.facts.facts:
+ redirect: junipernetworks.junos.network.junos.argspec.facts.facts
+ network.junos.argspec.interfaces:
+ redirect: junipernetworks.junos.network.junos.argspec.interfaces
+ network.junos.argspec.interfaces.interfaces:
+ redirect: junipernetworks.junos.network.junos.argspec.interfaces.interfaces
+ network.junos.argspec.l2_interfaces:
+ redirect: junipernetworks.junos.network.junos.argspec.l2_interfaces
+ network.junos.argspec.l2_interfaces.l2_interfaces:
+ redirect: junipernetworks.junos.network.junos.argspec.l2_interfaces.l2_interfaces
+ network.junos.argspec.l3_interfaces:
+ redirect: junipernetworks.junos.network.junos.argspec.l3_interfaces
+ network.junos.argspec.l3_interfaces.l3_interfaces:
+ redirect: junipernetworks.junos.network.junos.argspec.l3_interfaces.l3_interfaces
+ network.junos.argspec.lacp:
+ redirect: junipernetworks.junos.network.junos.argspec.lacp
+ network.junos.argspec.lacp.lacp:
+ redirect: junipernetworks.junos.network.junos.argspec.lacp.lacp
+ network.junos.argspec.lacp_interfaces:
+ redirect: junipernetworks.junos.network.junos.argspec.lacp_interfaces
+ network.junos.argspec.lacp_interfaces.lacp_interfaces:
+ redirect: junipernetworks.junos.network.junos.argspec.lacp_interfaces.lacp_interfaces
+ network.junos.argspec.lag_interfaces:
+ redirect: junipernetworks.junos.network.junos.argspec.lag_interfaces
+ network.junos.argspec.lag_interfaces.lag_interfaces:
+ redirect: junipernetworks.junos.network.junos.argspec.lag_interfaces.lag_interfaces
+ network.junos.argspec.lldp_global:
+ redirect: junipernetworks.junos.network.junos.argspec.lldp_global
+ network.junos.argspec.lldp_global.lldp_global:
+ redirect: junipernetworks.junos.network.junos.argspec.lldp_global.lldp_global
+ network.junos.argspec.lldp_interfaces:
+ redirect: junipernetworks.junos.network.junos.argspec.lldp_interfaces
+ network.junos.argspec.lldp_interfaces.lldp_interfaces:
+ redirect: junipernetworks.junos.network.junos.argspec.lldp_interfaces.lldp_interfaces
+ network.junos.argspec.vlans:
+ redirect: junipernetworks.junos.network.junos.argspec.vlans
+ network.junos.argspec.vlans.vlans:
+ redirect: junipernetworks.junos.network.junos.argspec.vlans.vlans
+ network.junos.config:
+ redirect: junipernetworks.junos.network.junos.config
+ network.junos.config.interfaces:
+ redirect: junipernetworks.junos.network.junos.config.interfaces
+ network.junos.config.interfaces.interfaces:
+ redirect: junipernetworks.junos.network.junos.config.interfaces.interfaces
+ network.junos.config.l2_interfaces:
+ redirect: junipernetworks.junos.network.junos.config.l2_interfaces
+ network.junos.config.l2_interfaces.l2_interfaces:
+ redirect: junipernetworks.junos.network.junos.config.l2_interfaces.l2_interfaces
+ network.junos.config.l3_interfaces:
+ redirect: junipernetworks.junos.network.junos.config.l3_interfaces
+ network.junos.config.l3_interfaces.l3_interfaces:
+ redirect: junipernetworks.junos.network.junos.config.l3_interfaces.l3_interfaces
+ network.junos.config.lacp:
+ redirect: junipernetworks.junos.network.junos.config.lacp
+ network.junos.config.lacp.lacp:
+ redirect: junipernetworks.junos.network.junos.config.lacp.lacp
+ network.junos.config.lacp_interfaces:
+ redirect: junipernetworks.junos.network.junos.config.lacp_interfaces
+ network.junos.config.lacp_interfaces.lacp_interfaces:
+ redirect: junipernetworks.junos.network.junos.config.lacp_interfaces.lacp_interfaces
+ network.junos.config.lag_interfaces:
+ redirect: junipernetworks.junos.network.junos.config.lag_interfaces
+ network.junos.config.lag_interfaces.lag_interfaces:
+ redirect: junipernetworks.junos.network.junos.config.lag_interfaces.lag_interfaces
+ network.junos.config.lldp_global:
+ redirect: junipernetworks.junos.network.junos.config.lldp_global
+ network.junos.config.lldp_global.lldp_global:
+ redirect: junipernetworks.junos.network.junos.config.lldp_global.lldp_global
+ network.junos.config.lldp_interfaces:
+ redirect: junipernetworks.junos.network.junos.config.lldp_interfaces
+ network.junos.config.lldp_interfaces.lldp_interfaces:
+ redirect: junipernetworks.junos.network.junos.config.lldp_interfaces.lldp_interfaces
+ network.junos.config.vlans:
+ redirect: junipernetworks.junos.network.junos.config.vlans
+ network.junos.config.vlans.vlans:
+ redirect: junipernetworks.junos.network.junos.config.vlans.vlans
+ network.junos.facts:
+ redirect: junipernetworks.junos.network.junos.facts
+ network.junos.facts.facts:
+ redirect: junipernetworks.junos.network.junos.facts.facts
+ network.junos.facts.interfaces:
+ redirect: junipernetworks.junos.network.junos.facts.interfaces
+ network.junos.facts.interfaces.interfaces:
+ redirect: junipernetworks.junos.network.junos.facts.interfaces.interfaces
+ network.junos.facts.l2_interfaces:
+ redirect: junipernetworks.junos.network.junos.facts.l2_interfaces
+ network.junos.facts.l2_interfaces.l2_interfaces:
+ redirect: junipernetworks.junos.network.junos.facts.l2_interfaces.l2_interfaces
+ network.junos.facts.l3_interfaces:
+ redirect: junipernetworks.junos.network.junos.facts.l3_interfaces
+ network.junos.facts.l3_interfaces.l3_interfaces:
+ redirect: junipernetworks.junos.network.junos.facts.l3_interfaces.l3_interfaces
+ network.junos.facts.lacp:
+ redirect: junipernetworks.junos.network.junos.facts.lacp
+ network.junos.facts.lacp.lacp:
+ redirect: junipernetworks.junos.network.junos.facts.lacp.lacp
+ network.junos.facts.lacp_interfaces:
+ redirect: junipernetworks.junos.network.junos.facts.lacp_interfaces
+ network.junos.facts.lacp_interfaces.lacp_interfaces:
+ redirect: junipernetworks.junos.network.junos.facts.lacp_interfaces.lacp_interfaces
+ network.junos.facts.lag_interfaces:
+ redirect: junipernetworks.junos.network.junos.facts.lag_interfaces
+ network.junos.facts.lag_interfaces.lag_interfaces:
+ redirect: junipernetworks.junos.network.junos.facts.lag_interfaces.lag_interfaces
+ network.junos.facts.legacy:
+ redirect: junipernetworks.junos.network.junos.facts.legacy
+ network.junos.facts.legacy.base:
+ redirect: junipernetworks.junos.network.junos.facts.legacy.base
+ network.junos.facts.lldp_global:
+ redirect: junipernetworks.junos.network.junos.facts.lldp_global
+ network.junos.facts.lldp_global.lldp_global:
+ redirect: junipernetworks.junos.network.junos.facts.lldp_global.lldp_global
+ network.junos.facts.lldp_interfaces:
+ redirect: junipernetworks.junos.network.junos.facts.lldp_interfaces
+ network.junos.facts.lldp_interfaces.lldp_interfaces:
+ redirect: junipernetworks.junos.network.junos.facts.lldp_interfaces.lldp_interfaces
+ network.junos.facts.vlans:
+ redirect: junipernetworks.junos.network.junos.facts.vlans
+ network.junos.facts.vlans.vlans:
+ redirect: junipernetworks.junos.network.junos.facts.vlans.vlans
+ network.junos.junos:
+ redirect: junipernetworks.junos.network.junos.junos
+ network.junos.utils:
+ redirect: junipernetworks.junos.network.junos.utils
+ network.junos.utils.utils:
+ redirect: junipernetworks.junos.network.junos.utils.utils
+ network.meraki:
+ redirect: cisco.meraki.network.meraki
+ network.meraki.meraki:
+ redirect: cisco.meraki.network.meraki.meraki
+ network.netconf:
+ redirect: ansible.netcommon.network.netconf
+ network.netconf.netconf:
+ redirect: ansible.netcommon.network.netconf.netconf
+ network.netscaler:
+ redirect: community.network.network.netscaler
+ network.netscaler.netscaler:
+ redirect: community.network.network.netscaler.netscaler
+ network.netvisor:
+ redirect: community.network.network.netvisor
+ network.netvisor.netvisor:
+ redirect: community.network.network.netvisor.netvisor
+ network.netvisor.pn_nvos:
+ redirect: community.network.network.netvisor.pn_nvos
+ network.nos:
+ redirect: community.network.network.nos
+ network.nos.nos:
+ redirect: community.network.network.nos.nos
+ network.nso:
+ redirect: community.network.network.nso
+ network.nso.nso:
+ redirect: community.network.network.nso.nso
+ network.nxos:
+ redirect: cisco.nxos.network.nxos
+ network.nxos.argspec:
+ redirect: cisco.nxos.network.nxos.argspec
+ network.nxos.argspec.bfd_interfaces:
+ redirect: cisco.nxos.network.nxos.argspec.bfd_interfaces
+ network.nxos.argspec.bfd_interfaces.bfd_interfaces:
+ redirect: cisco.nxos.network.nxos.argspec.bfd_interfaces.bfd_interfaces
+ network.nxos.argspec.facts:
+ redirect: cisco.nxos.network.nxos.argspec.facts
+ network.nxos.argspec.facts.facts:
+ redirect: cisco.nxos.network.nxos.argspec.facts.facts
+ network.nxos.argspec.interfaces:
+ redirect: cisco.nxos.network.nxos.argspec.interfaces
+ network.nxos.argspec.interfaces.interfaces:
+ redirect: cisco.nxos.network.nxos.argspec.interfaces.interfaces
+ network.nxos.argspec.l2_interfaces:
+ redirect: cisco.nxos.network.nxos.argspec.l2_interfaces
+ network.nxos.argspec.l2_interfaces.l2_interfaces:
+ redirect: cisco.nxos.network.nxos.argspec.l2_interfaces.l2_interfaces
+ network.nxos.argspec.l3_interfaces:
+ redirect: cisco.nxos.network.nxos.argspec.l3_interfaces
+ network.nxos.argspec.l3_interfaces.l3_interfaces:
+ redirect: cisco.nxos.network.nxos.argspec.l3_interfaces.l3_interfaces
+ network.nxos.argspec.lacp:
+ redirect: cisco.nxos.network.nxos.argspec.lacp
+ network.nxos.argspec.lacp.lacp:
+ redirect: cisco.nxos.network.nxos.argspec.lacp.lacp
+ network.nxos.argspec.lacp_interfaces:
+ redirect: cisco.nxos.network.nxos.argspec.lacp_interfaces
+ network.nxos.argspec.lacp_interfaces.lacp_interfaces:
+ redirect: cisco.nxos.network.nxos.argspec.lacp_interfaces.lacp_interfaces
+ network.nxos.argspec.lag_interfaces:
+ redirect: cisco.nxos.network.nxos.argspec.lag_interfaces
+ network.nxos.argspec.lag_interfaces.lag_interfaces:
+ redirect: cisco.nxos.network.nxos.argspec.lag_interfaces.lag_interfaces
+ network.nxos.argspec.lldp_global:
+ redirect: cisco.nxos.network.nxos.argspec.lldp_global
+ network.nxos.argspec.lldp_global.lldp_global:
+ redirect: cisco.nxos.network.nxos.argspec.lldp_global.lldp_global
+ network.nxos.argspec.telemetry:
+ redirect: cisco.nxos.network.nxos.argspec.telemetry
+ network.nxos.argspec.telemetry.telemetry:
+ redirect: cisco.nxos.network.nxos.argspec.telemetry.telemetry
+ network.nxos.argspec.vlans:
+ redirect: cisco.nxos.network.nxos.argspec.vlans
+ network.nxos.argspec.vlans.vlans:
+ redirect: cisco.nxos.network.nxos.argspec.vlans.vlans
+ network.nxos.cmdref:
+ redirect: cisco.nxos.network.nxos.cmdref
+ network.nxos.cmdref.telemetry:
+ redirect: cisco.nxos.network.nxos.cmdref.telemetry
+ network.nxos.cmdref.telemetry.telemetry:
+ redirect: cisco.nxos.network.nxos.cmdref.telemetry.telemetry
+ network.nxos.config:
+ redirect: cisco.nxos.network.nxos.config
+ network.nxos.config.bfd_interfaces:
+ redirect: cisco.nxos.network.nxos.config.bfd_interfaces
+ network.nxos.config.bfd_interfaces.bfd_interfaces:
+ redirect: cisco.nxos.network.nxos.config.bfd_interfaces.bfd_interfaces
+ network.nxos.config.interfaces:
+ redirect: cisco.nxos.network.nxos.config.interfaces
+ network.nxos.config.interfaces.interfaces:
+ redirect: cisco.nxos.network.nxos.config.interfaces.interfaces
+ network.nxos.config.l2_interfaces:
+ redirect: cisco.nxos.network.nxos.config.l2_interfaces
+ network.nxos.config.l2_interfaces.l2_interfaces:
+ redirect: cisco.nxos.network.nxos.config.l2_interfaces.l2_interfaces
+ network.nxos.config.l3_interfaces:
+ redirect: cisco.nxos.network.nxos.config.l3_interfaces
+ network.nxos.config.l3_interfaces.l3_interfaces:
+ redirect: cisco.nxos.network.nxos.config.l3_interfaces.l3_interfaces
+ network.nxos.config.lacp:
+ redirect: cisco.nxos.network.nxos.config.lacp
+ network.nxos.config.lacp.lacp:
+ redirect: cisco.nxos.network.nxos.config.lacp.lacp
+ network.nxos.config.lacp_interfaces:
+ redirect: cisco.nxos.network.nxos.config.lacp_interfaces
+ network.nxos.config.lacp_interfaces.lacp_interfaces:
+ redirect: cisco.nxos.network.nxos.config.lacp_interfaces.lacp_interfaces
+ network.nxos.config.lag_interfaces:
+ redirect: cisco.nxos.network.nxos.config.lag_interfaces
+ network.nxos.config.lag_interfaces.lag_interfaces:
+ redirect: cisco.nxos.network.nxos.config.lag_interfaces.lag_interfaces
+ network.nxos.config.lldp_global:
+ redirect: cisco.nxos.network.nxos.config.lldp_global
+ network.nxos.config.lldp_global.lldp_global:
+ redirect: cisco.nxos.network.nxos.config.lldp_global.lldp_global
+ network.nxos.config.telemetry:
+ redirect: cisco.nxos.network.nxos.config.telemetry
+ network.nxos.config.telemetry.telemetry:
+ redirect: cisco.nxos.network.nxos.config.telemetry.telemetry
+ network.nxos.config.vlans:
+ redirect: cisco.nxos.network.nxos.config.vlans
+ network.nxos.config.vlans.vlans:
+ redirect: cisco.nxos.network.nxos.config.vlans.vlans
+ network.nxos.facts:
+ redirect: cisco.nxos.network.nxos.facts
+ network.nxos.facts.bfd_interfaces:
+ redirect: cisco.nxos.network.nxos.facts.bfd_interfaces
+ network.nxos.facts.bfd_interfaces.bfd_interfaces:
+ redirect: cisco.nxos.network.nxos.facts.bfd_interfaces.bfd_interfaces
+ network.nxos.facts.facts:
+ redirect: cisco.nxos.network.nxos.facts.facts
+ network.nxos.facts.interfaces:
+ redirect: cisco.nxos.network.nxos.facts.interfaces
+ network.nxos.facts.interfaces.interfaces:
+ redirect: cisco.nxos.network.nxos.facts.interfaces.interfaces
+ network.nxos.facts.l2_interfaces:
+ redirect: cisco.nxos.network.nxos.facts.l2_interfaces
+ network.nxos.facts.l2_interfaces.l2_interfaces:
+ redirect: cisco.nxos.network.nxos.facts.l2_interfaces.l2_interfaces
+ network.nxos.facts.l3_interfaces:
+ redirect: cisco.nxos.network.nxos.facts.l3_interfaces
+ network.nxos.facts.l3_interfaces.l3_interfaces:
+ redirect: cisco.nxos.network.nxos.facts.l3_interfaces.l3_interfaces
+ network.nxos.facts.lacp:
+ redirect: cisco.nxos.network.nxos.facts.lacp
+ network.nxos.facts.lacp.lacp:
+ redirect: cisco.nxos.network.nxos.facts.lacp.lacp
+ network.nxos.facts.lacp_interfaces:
+ redirect: cisco.nxos.network.nxos.facts.lacp_interfaces
+ network.nxos.facts.lacp_interfaces.lacp_interfaces:
+ redirect: cisco.nxos.network.nxos.facts.lacp_interfaces.lacp_interfaces
+ network.nxos.facts.lag_interfaces:
+ redirect: cisco.nxos.network.nxos.facts.lag_interfaces
+ network.nxos.facts.lag_interfaces.lag_interfaces:
+ redirect: cisco.nxos.network.nxos.facts.lag_interfaces.lag_interfaces
+ network.nxos.facts.legacy:
+ redirect: cisco.nxos.network.nxos.facts.legacy
+ network.nxos.facts.legacy.base:
+ redirect: cisco.nxos.network.nxos.facts.legacy.base
+ network.nxos.facts.lldp_global:
+ redirect: cisco.nxos.network.nxos.facts.lldp_global
+ network.nxos.facts.lldp_global.lldp_global:
+ redirect: cisco.nxos.network.nxos.facts.lldp_global.lldp_global
+ network.nxos.facts.telemetry:
+ redirect: cisco.nxos.network.nxos.facts.telemetry
+ network.nxos.facts.telemetry.telemetry:
+ redirect: cisco.nxos.network.nxos.facts.telemetry.telemetry
+ network.nxos.facts.vlans:
+ redirect: cisco.nxos.network.nxos.facts.vlans
+ network.nxos.facts.vlans.vlans:
+ redirect: cisco.nxos.network.nxos.facts.vlans.vlans
+ network.nxos.nxos:
+ redirect: cisco.nxos.network.nxos.nxos
+ network.nxos.utils:
+ redirect: cisco.nxos.network.nxos.utils
+ network.nxos.utils.telemetry:
+ redirect: cisco.nxos.network.nxos.utils.telemetry
+ network.nxos.utils.telemetry.telemetry:
+ redirect: cisco.nxos.network.nxos.utils.telemetry.telemetry
+ network.nxos.utils.utils:
+ redirect: cisco.nxos.network.nxos.utils.utils
+ network.onyx:
+ redirect: mellanox.onyx.network.onyx
+ network.onyx.onyx:
+ redirect: mellanox.onyx.network.onyx.onyx
+ network.ordnance:
+ redirect: community.network.network.ordnance
+ network.ordnance.ordnance:
+ redirect: community.network.network.ordnance.ordnance
+ network.panos:
+ redirect: community.network.network.panos
+ network.panos.panos:
+ redirect: community.network.network.panos.panos
+ network.restconf:
+ redirect: ansible.netcommon.network.restconf
+ network.restconf.restconf:
+ redirect: ansible.netcommon.network.restconf.restconf
+ network.routeros:
+ redirect: community.network.network.routeros
+ network.routeros.routeros:
+ redirect: community.network.network.routeros.routeros
+ network.skydive:
+ redirect: community.skydive.network.skydive
+ network.skydive.api:
+ redirect: community.skydive.network.skydive.api
+ network.slxos:
+ redirect: community.network.network.slxos
+ network.slxos.slxos:
+ redirect: community.network.network.slxos.slxos
+ network.sros:
+ redirect: community.network.network.sros
+ network.sros.sros:
+ redirect: community.network.network.sros.sros
+ network.voss:
+ redirect: community.network.network.voss
+ network.voss.voss:
+ redirect: community.network.network.voss.voss
+ network.vyos:
+ redirect: vyos.vyos.network.vyos
+ network.vyos.argspec:
+ redirect: vyos.vyos.network.vyos.argspec
+ network.vyos.argspec.facts:
+ redirect: vyos.vyos.network.vyos.argspec.facts
+ network.vyos.argspec.facts.facts:
+ redirect: vyos.vyos.network.vyos.argspec.facts.facts
+ network.vyos.argspec.interfaces:
+ redirect: vyos.vyos.network.vyos.argspec.interfaces
+ network.vyos.argspec.interfaces.interfaces:
+ redirect: vyos.vyos.network.vyos.argspec.interfaces.interfaces
+ network.vyos.argspec.l3_interfaces:
+ redirect: vyos.vyos.network.vyos.argspec.l3_interfaces
+ network.vyos.argspec.l3_interfaces.l3_interfaces:
+ redirect: vyos.vyos.network.vyos.argspec.l3_interfaces.l3_interfaces
+ network.vyos.argspec.lag_interfaces:
+ redirect: vyos.vyos.network.vyos.argspec.lag_interfaces
+ network.vyos.argspec.lag_interfaces.lag_interfaces:
+ redirect: vyos.vyos.network.vyos.argspec.lag_interfaces.lag_interfaces
+ network.vyos.argspec.lldp_global:
+ redirect: vyos.vyos.network.vyos.argspec.lldp_global
+ network.vyos.argspec.lldp_global.lldp_global:
+ redirect: vyos.vyos.network.vyos.argspec.lldp_global.lldp_global
+ network.vyos.argspec.lldp_interfaces:
+ redirect: vyos.vyos.network.vyos.argspec.lldp_interfaces
+ network.vyos.argspec.lldp_interfaces.lldp_interfaces:
+ redirect: vyos.vyos.network.vyos.argspec.lldp_interfaces.lldp_interfaces
+ network.vyos.config:
+ redirect: vyos.vyos.network.vyos.config
+ network.vyos.config.interfaces:
+ redirect: vyos.vyos.network.vyos.config.interfaces
+ network.vyos.config.interfaces.interfaces:
+ redirect: vyos.vyos.network.vyos.config.interfaces.interfaces
+ network.vyos.config.l3_interfaces:
+ redirect: vyos.vyos.network.vyos.config.l3_interfaces
+ network.vyos.config.l3_interfaces.l3_interfaces:
+ redirect: vyos.vyos.network.vyos.config.l3_interfaces.l3_interfaces
+ network.vyos.config.lag_interfaces:
+ redirect: vyos.vyos.network.vyos.config.lag_interfaces
+ network.vyos.config.lag_interfaces.lag_interfaces:
+ redirect: vyos.vyos.network.vyos.config.lag_interfaces.lag_interfaces
+ network.vyos.config.lldp_global:
+ redirect: vyos.vyos.network.vyos.config.lldp_global
+ network.vyos.config.lldp_global.lldp_global:
+ redirect: vyos.vyos.network.vyos.config.lldp_global.lldp_global
+ network.vyos.config.lldp_interfaces:
+ redirect: vyos.vyos.network.vyos.config.lldp_interfaces
+ network.vyos.config.lldp_interfaces.lldp_interfaces:
+ redirect: vyos.vyos.network.vyos.config.lldp_interfaces.lldp_interfaces
+ network.vyos.facts:
+ redirect: vyos.vyos.network.vyos.facts
+ network.vyos.facts.facts:
+ redirect: vyos.vyos.network.vyos.facts.facts
+ network.vyos.facts.interfaces:
+ redirect: vyos.vyos.network.vyos.facts.interfaces
+ network.vyos.facts.interfaces.interfaces:
+ redirect: vyos.vyos.network.vyos.facts.interfaces.interfaces
+ network.vyos.facts.l3_interfaces:
+ redirect: vyos.vyos.network.vyos.facts.l3_interfaces
+ network.vyos.facts.l3_interfaces.l3_interfaces:
+ redirect: vyos.vyos.network.vyos.facts.l3_interfaces.l3_interfaces
+ network.vyos.facts.lag_interfaces:
+ redirect: vyos.vyos.network.vyos.facts.lag_interfaces
+ network.vyos.facts.lag_interfaces.lag_interfaces:
+ redirect: vyos.vyos.network.vyos.facts.lag_interfaces.lag_interfaces
+ network.vyos.facts.legacy:
+ redirect: vyos.vyos.network.vyos.facts.legacy
+ network.vyos.facts.legacy.base:
+ redirect: vyos.vyos.network.vyos.facts.legacy.base
+ network.vyos.facts.lldp_global:
+ redirect: vyos.vyos.network.vyos.facts.lldp_global
+ network.vyos.facts.lldp_global.lldp_global:
+ redirect: vyos.vyos.network.vyos.facts.lldp_global.lldp_global
+ network.vyos.facts.lldp_interfaces:
+ redirect: vyos.vyos.network.vyos.facts.lldp_interfaces
+ network.vyos.facts.lldp_interfaces.lldp_interfaces:
+ redirect: vyos.vyos.network.vyos.facts.lldp_interfaces.lldp_interfaces
+ network.vyos.utils:
+ redirect: vyos.vyos.network.vyos.utils
+ network.vyos.utils.utils:
+ redirect: vyos.vyos.network.vyos.utils.utils
+ network.vyos.vyos:
+ redirect: vyos.vyos.network.vyos.vyos
+ oneandone:
+ redirect: community.general.oneandone
+ oneview:
+ redirect: community.general.oneview
+ online:
+ redirect: community.general.online
+ opennebula:
+ redirect: community.general.opennebula
+ openstack:
+ redirect: openstack.cloud.openstack
+ oracle:
+ redirect: community.general.oracle
+ oracle.oci_utils:
+ redirect: community.general.oracle.oci_utils
+ ovirt:
+ redirect: community.general._ovirt
+ podman:
+ redirect: containers.podman.podman
+ podman.common:
+ redirect: containers.podman.podman.common
+ postgres:
+ redirect: community.general.postgres
+ pure:
+ redirect: community.general.pure
+ rabbitmq:
+ redirect: community.rabbitmq.rabbitmq
+ rax:
+ redirect: community.general.rax
+ redfish_utils:
+ redirect: community.general.redfish_utils
+ redhat:
+ redirect: community.general.redhat
+ remote_management.dellemc:
+ redirect: community.general.remote_management.dellemc
+ remote_management.dellemc.dellemc_idrac:
+ redirect: community.general.remote_management.dellemc.dellemc_idrac
+ remote_management.dellemc.ome:
+ redirect: community.general.remote_management.dellemc.ome
+ remote_management.intersight:
+ redirect: cisco.intersight.intersight
+ remote_management.lxca:
+ redirect: community.general.remote_management.lxca
+ remote_management.lxca.common:
+ redirect: community.general.remote_management.lxca.common
+ remote_management.ucs:
+ redirect: cisco.ucs.ucs
+ scaleway:
+ redirect: community.general.scaleway
+ service_now:
+ redirect: servicenow.servicenow.service_now
+ source_control:
+ redirect: community.general.source_control
+ source_control.bitbucket:
+ redirect: community.general.source_control.bitbucket
+ storage:
+ redirect: community.general.storage
+ storage.emc:
+ redirect: community.general.storage.emc
+ storage.emc.emc_vnx:
+ redirect: community.general.storage.emc.emc_vnx
+ storage.hpe3par:
+ redirect: community.general.storage.hpe3par
+ storage.hpe3par.hpe3par:
+ redirect: community.general.storage.hpe3par.hpe3par
+ univention_umc:
+ redirect: community.general.univention_umc
+ utm_utils:
+ redirect: community.general.utm_utils
+ vca:
+ redirect: community.vmware.vca
+ vexata:
+ redirect: community.general.vexata
+ vmware:
+ redirect: community.vmware.vmware
+ vmware_rest_client:
+ redirect: community.vmware.vmware_rest_client
+ vmware_spbm:
+ redirect: community.vmware.vmware_spbm
+ vultr:
+ redirect: ngine_io.vultr.vultr
+ xenserver:
+ redirect: community.general.xenserver
+ # end module_utils
+ cliconf:
+ frr:
+ redirect: frr.frr.frr
+ aireos:
+ redirect: community.network.aireos
+ apconos:
+ redirect: community.network.apconos
+ aruba:
+ redirect: community.network.aruba
+ ce:
+ redirect: community.network.ce
+ cnos:
+ redirect: community.network.cnos
+ edgeos:
+ redirect: community.network.edgeos
+ edgeswitch:
+ redirect: community.network.edgeswitch
+ enos:
+ redirect: community.network.enos
+ eric_eccli:
+ redirect: community.network.eric_eccli
+ exos:
+ redirect: community.network.exos
+ icx:
+ redirect: community.network.icx
+ ironware:
+ redirect: community.network.ironware
+ netvisor:
+ redirect: community.network.netvisor
+ nos:
+ redirect: community.network.nos
+ onyx:
+ redirect: mellanox.onyx.onyx
+ routeros:
+ redirect: community.network.routeros
+ slxos:
+ redirect: community.network.slxos
+ voss:
+ redirect: community.network.voss
+ eos:
+ redirect: arista.eos.eos
+ asa:
+ redirect: cisco.asa.asa
+ ios:
+ redirect: cisco.ios.ios
+ iosxr:
+ redirect: cisco.iosxr.iosxr
+ nxos:
+ redirect: cisco.nxos.nxos
+ junos:
+ redirect: junipernetworks.junos.junos
+ dellos10:
+ redirect: dellemc.os10.os10
+ dellos9:
+ redirect: dellemc.os9.os9
+ dellos6:
+ redirect: dellemc.os6.os6
+ vyos:
+ redirect: vyos.vyos.vyos
+ terminal:
+ frr:
+ redirect: frr.frr.frr
+ aireos:
+ redirect: community.network.aireos
+ apconos:
+ redirect: community.network.apconos
+ aruba:
+ redirect: community.network.aruba
+ ce:
+ redirect: community.network.ce
+ cnos:
+ redirect: community.network.cnos
+ edgeos:
+ redirect: community.network.edgeos
+ edgeswitch:
+ redirect: community.network.edgeswitch
+ enos:
+ redirect: community.network.enos
+ eric_eccli:
+ redirect: community.network.eric_eccli
+ exos:
+ redirect: community.network.exos
+ icx:
+ redirect: community.network.icx
+ ironware:
+ redirect: community.network.ironware
+ netvisor:
+ redirect: community.network.netvisor
+ nos:
+ redirect: community.network.nos
+ onyx:
+ redirect: mellanox.onyx.onyx
+ routeros:
+ redirect: community.network.routeros
+ slxos:
+ redirect: community.network.slxos
+ sros:
+ redirect: community.network.sros
+ voss:
+ redirect: community.network.voss
+ eos:
+ redirect: arista.eos.eos
+ asa:
+ redirect: cisco.asa.asa
+ ios:
+ redirect: cisco.ios.ios
+ iosxr:
+ redirect: cisco.iosxr.iosxr
+ nxos:
+ redirect: cisco.nxos.nxos
+ bigip:
+ redirect: f5networks.f5_modules.bigip
+ junos:
+ redirect: junipernetworks.junos.junos
+ dellos10:
+ redirect: dellemc.os10.os10
+ dellos9:
+ redirect: dellemc.os9.os9
+ dellos6:
+ redirect: dellemc.os6.os6
+ vyos:
+ redirect: vyos.vyos.vyos
+ action:
+ # test entry, overloaded with module of same name to use a different base action (ie not "normal.py")
+ uses_redirected_action:
+ redirect: testns.testcoll.subclassed_norm
+ aireos:
+ redirect: community.network.aireos
+ aruba:
+ redirect: community.network.aruba
+ ce:
+ redirect: community.network.ce
+ ce_template:
+ redirect: community.network.ce_template
+ cnos:
+ redirect: community.network.cnos
+ edgeos_config:
+ redirect: community.network.edgeos_config
+ enos:
+ redirect: community.network.enos
+ exos:
+ redirect: community.network.exos
+ ironware:
+ redirect: community.network.ironware
+ nos_config:
+ redirect: community.network.nos_config
+ onyx_config:
+ redirect: mellanox.onyx.onyx_config
+ slxos:
+ redirect: community.network.slxos
+ sros:
+ redirect: community.network.sros
+ voss:
+ redirect: community.network.voss
+ aws_s3:
+ redirect: amazon.aws.aws_s3
+ cli_command:
+ redirect: ansible.netcommon.cli_command
+ cli_config:
+ redirect: ansible.netcommon.cli_config
+ net_base:
+ redirect: ansible.netcommon.net_base
+ net_user:
+ redirect: ansible.netcommon.net_user
+ net_vlan:
+ redirect: ansible.netcommon.net_vlan
+ net_static_route:
+ redirect: ansible.netcommon.net_static_route
+ net_lldp:
+ redirect: ansible.netcommon.net_lldp
+ net_vrf:
+ redirect: ansible.netcommon.net_vrf
+ net_ping:
+ redirect: ansible.netcommon.net_ping
+ net_l3_interface:
+ redirect: ansible.netcommon.net_l3_interface
+ net_l2_interface:
+ redirect: ansible.netcommon.net_l2_interface
+ net_interface:
+ redirect: ansible.netcommon.net_interface
+ net_system:
+ redirect: ansible.netcommon.net_system
+ net_lldp_interface:
+ redirect: ansible.netcommon.net_lldp_interface
+ net_put:
+ redirect: ansible.netcommon.net_put
+ net_get:
+ redirect: ansible.netcommon.net_get
+ net_logging:
+ redirect: ansible.netcommon.net_logging
+ net_banner:
+ redirect: ansible.netcommon.net_banner
+ net_linkagg:
+ redirect: ansible.netcommon.net_linkagg
+ netconf:
+ redirect: ansible.netcommon.netconf
+ network:
+ redirect: ansible.netcommon.network
+ telnet:
+ redirect: ansible.netcommon.telnet
+ patch:
+ redirect: ansible.posix.patch
+ synchronize:
+ redirect: ansible.posix.synchronize
+ win_copy:
+ redirect: ansible.windows.win_copy
+ win_reboot:
+ redirect: ansible.windows.win_reboot
+ win_template:
+ redirect: ansible.windows.win_template
+ win_updates:
+ redirect: ansible.windows.win_updates
+ fortios_config:
+ redirect: fortinet.fortios.fortios_config
+ eos:
+ redirect: arista.eos.eos
+ asa:
+ redirect: cisco.asa.asa
+ ios:
+ redirect: cisco.ios.ios
+ iosxr:
+ redirect: cisco.iosxr.iosxr
+ nxos:
+ redirect: cisco.nxos.nxos
+ nxos_file_copy:
+ redirect: cisco.nxos.nxos_file_copy
+ bigip:
+ redirect: f5networks.f5_modules.bigip
+ bigiq:
+ redirect: f5networks.f5_modules.bigiq
+ junos:
+ redirect: junipernetworks.junos.junos
+ dellos10:
+ redirect: dellemc.os10.os10
+ dellos9:
+ redirect: dellemc.os9.os9
+ dellos6:
+ redirect: dellemc.os6.os6
+ vyos:
+ redirect: vyos.vyos.vyos
+ become:
+ doas:
+ redirect: community.general.doas
+ dzdo:
+ redirect: community.general.dzdo
+ ksu:
+ redirect: community.general.ksu
+ machinectl:
+ redirect: community.general.machinectl
+ pbrun:
+ redirect: community.general.pbrun
+ pfexec:
+ redirect: community.general.pfexec
+ pmrun:
+ redirect: community.general.pmrun
+ sesu:
+ redirect: community.general.sesu
+ enable:
+ redirect: ansible.netcommon.enable
+ cache:
+ memcached:
+ redirect: community.general.memcached
+ pickle:
+ redirect: community.general.pickle
+ redis:
+ redirect: community.general.redis
+ yaml:
+ redirect: community.general.yaml
+ mongodb:
+ redirect: community.mongodb.mongodb
+ callback:
+ actionable:
+ redirect: community.general.actionable
+ cgroup_memory_recap:
+ redirect: community.general.cgroup_memory_recap
+ context_demo:
+ redirect: community.general.context_demo
+ counter_enabled:
+ redirect: community.general.counter_enabled
+ dense:
+ redirect: community.general.dense
+ full_skip:
+ redirect: community.general.full_skip
+ hipchat:
+ redirect: community.general.hipchat
+ jabber:
+ redirect: community.general.jabber
+ log_plays:
+ redirect: community.general.log_plays
+ logdna:
+ redirect: community.general.logdna
+ logentries:
+ redirect: community.general.logentries
+ logstash:
+ redirect: community.general.logstash
+ mail:
+ redirect: community.general.mail
+ nrdp:
+ redirect: community.general.nrdp
+ 'null':
+ redirect: community.general.null
+ osx_say:
+ redirect: community.general.osx_say
+ say:
+ redirect: community.general.say
+ selective:
+ redirect: community.general.selective
+ slack:
+ redirect: community.general.slack
+ splunk:
+ redirect: community.general.splunk
+ stderr:
+ redirect: community.general.stderr
+ sumologic:
+ redirect: community.general.sumologic
+ syslog_json:
+ redirect: community.general.syslog_json
+ unixy:
+ redirect: community.general.unixy
+ yaml:
+ redirect: community.general.yaml
+ grafana_annotations:
+ redirect: community.grafana.grafana_annotations
+ aws_resource_actions:
+ redirect: amazon.aws.aws_resource_actions
+ cgroup_perf_recap:
+ redirect: ansible.posix.cgroup_perf_recap
+ debug:
+ redirect: ansible.posix.debug
+ json:
+ redirect: ansible.posix.json
+ profile_roles:
+ redirect: ansible.posix.profile_roles
+ profile_tasks:
+ redirect: ansible.posix.profile_tasks
+ skippy:
+ redirect: ansible.posix.skippy
+ timer:
+ redirect: ansible.posix.timer
+ foreman:
+ redirect: theforeman.foreman.foreman
+ # 'collections' integration test entries, do not remove
+ formerly_core_callback:
+ redirect: testns.testcoll.usercallback
+ formerly_core_removed_callback:
+ redirect: testns.testcoll.removedcallback
+ formerly_core_missing_callback:
+ redirect: bogusns.boguscoll.boguscallback
+ doc_fragments:
+ a10:
+ redirect: community.network.a10
+ aireos:
+ redirect: community.network.aireos
+ alicloud:
+ redirect: community.general.alicloud
+ aruba:
+ redirect: community.network.aruba
+ auth_basic:
+ redirect: community.general.auth_basic
+ avi:
+ redirect: community.network.avi
+ ce:
+ redirect: community.network.ce
+ cloudscale:
+ redirect: cloudscale_ch.cloud.api_parameters
+ cloudstack:
+ redirect: ngine_io.cloudstack.cloudstack
+ cnos:
+ redirect: community.network.cnos
+ digital_ocean:
+ redirect: community.digitalocean.digital_ocean
+ dimensiondata:
+ redirect: community.general.dimensiondata
+ dimensiondata_wait:
+ redirect: community.general.dimensiondata_wait
+ docker:
+ redirect: community.general.docker
+ emc:
+ redirect: community.general.emc
+ enos:
+ redirect: community.network.enos
+ exoscale:
+ redirect: ngine_io.exoscale.exoscale
+ gcp:
+ redirect: google.cloud.gcp
+ hetzner:
+ redirect: community.general.hetzner
+ hpe3par:
+ redirect: community.general.hpe3par
+ hwc:
+ redirect: community.general.hwc
+ ibm_storage:
+ redirect: community.general.ibm_storage
+ infinibox:
+ redirect: infinidat.infinibox.infinibox
+ influxdb:
+ redirect: community.general.influxdb
+ ingate:
+ redirect: community.network.ingate
+ ipa:
+ redirect: community.general.ipa
+ ironware:
+ redirect: community.network.ironware
+ keycloak:
+ redirect: community.general.keycloak
+ kubevirt_common_options:
+ redirect: community.general.kubevirt_common_options
+ kubevirt_vm_options:
+ redirect: community.general.kubevirt_vm_options
+ ldap:
+ redirect: community.general.ldap
+ lxca_common:
+ redirect: community.general.lxca_common
+ manageiq:
+ redirect: community.general.manageiq
+ mysql:
+ redirect: community.mysql.mysql
+ netscaler:
+ redirect: community.network.netscaler
+ nios:
+ redirect: community.general.nios
+ nso:
+ redirect: community.network.nso
+ oneview:
+ redirect: community.general.oneview
+ online:
+ redirect: community.general.online
+ onyx:
+ redirect: mellanox.onyx.onyx
+ opennebula:
+ redirect: community.general.opennebula
+ openswitch:
+ redirect: community.general.openswitch
+ oracle:
+ redirect: community.general.oracle
+ oracle_creatable_resource:
+ redirect: community.general.oracle_creatable_resource
+ oracle_display_name_option:
+ redirect: community.general.oracle_display_name_option
+ oracle_name_option:
+ redirect: community.general.oracle_name_option
+ oracle_tags:
+ redirect: community.general.oracle_tags
+ oracle_wait_options:
+ redirect: community.general.oracle_wait_options
+ ovirt_facts:
+ redirect: community.general.ovirt_facts
+ panos:
+ redirect: community.network.panos
+ postgres:
+ redirect: community.general.postgres
+ proxysql:
+ redirect: community.proxysql.proxysql
+ purestorage:
+ redirect: community.general.purestorage
+ rabbitmq:
+ redirect: community.rabbitmq.rabbitmq
+ rackspace:
+ redirect: community.general.rackspace
+ scaleway:
+ redirect: community.general.scaleway
+ sros:
+ redirect: community.network.sros
+ utm:
+ redirect: community.general.utm
+ vexata:
+ redirect: community.general.vexata
+ vultr:
+ redirect: ngine_io.vultr.vultr
+ xenserver:
+ redirect: community.general.xenserver
+ zabbix:
+ redirect: community.zabbix.zabbix
+ k8s_auth_options:
+ redirect: community.kubernetes.k8s_auth_options
+ k8s_name_options:
+ redirect: community.kubernetes.k8s_name_options
+ k8s_resource_options:
+ redirect: community.kubernetes.k8s_resource_options
+ k8s_scale_options:
+ redirect: community.kubernetes.k8s_scale_options
+ k8s_state_options:
+ redirect: community.kubernetes.k8s_state_options
+ acme:
+ redirect: community.crypto.acme
+ ecs_credential:
+ redirect: community.crypto.ecs_credential
+ vca:
+ redirect: community.vmware.vca
+ vmware:
+ redirect: community.vmware.vmware
+ vmware_rest_client:
+ redirect: community.vmware.vmware_rest_client
+ service_now:
+ redirect: servicenow.servicenow.service_now
+ aws:
+ redirect: amazon.aws.aws
+ aws_credentials:
+ redirect: amazon.aws.aws_credentials
+ aws_region:
+ redirect: amazon.aws.aws_region
+ ec2:
+ redirect: amazon.aws.ec2
+ netconf:
+ redirect: ansible.netcommon.netconf
+ network_agnostic:
+ redirect: ansible.netcommon.network_agnostic
+ fortios:
+ redirect: fortinet.fortios.fortios
+ netapp:
+ redirect: netapp.ontap.netapp
+ checkpoint_commands:
+ redirect: check_point.mgmt.checkpoint_commands
+ checkpoint_facts:
+ redirect: check_point.mgmt.checkpoint_facts
+ checkpoint_objects:
+ redirect: check_point.mgmt.checkpoint_objects
+ eos:
+ redirect: arista.eos.eos
+ aci:
+ redirect: cisco.aci.aci
+ asa:
+ redirect: cisco.asa.asa
+ intersight:
+ redirect: cisco.intersight.intersight
+ ios:
+ redirect: cisco.ios.ios
+ iosxr:
+ redirect: cisco.iosxr.iosxr
+ meraki:
+ redirect: cisco.meraki.meraki
+ mso:
+ redirect: cisco.mso.modules
+ nxos:
+ redirect: cisco.nxos.nxos
+ ucs:
+ redirect: cisco.ucs.ucs
+ f5:
+ redirect: f5networks.f5_modules.f5
+ openstack:
+ redirect: openstack.cloud.openstack
+ junos:
+ redirect: junipernetworks.junos.junos
+ tower:
+ redirect: awx.awx.auth
+ ovirt:
+ redirect: ovirt.ovirt.ovirt
+ ovirt_info:
+ redirect: ovirt.ovirt.ovirt_info
+ dellos10:
+ redirect: dellemc.os10.os10
+ dellos9:
+ redirect: dellemc.os9.os9
+ dellos6:
+ redirect: dellemc.os6.os6
+ hcloud:
+ redirect: hetzner.hcloud.hcloud
+ skydive:
+ redirect: community.skydive.skydive
+ azure:
+ redirect: azure.azcollection.azure
+ azure_tags:
+ redirect: azure.azcollection.azure_tags
+ vyos:
+ redirect: vyos.vyos.vyos
+ filter:
+ # test entries
+ formerly_core_filter:
+ redirect: ansible.builtin.bool
+ formerly_core_masked_filter:
+ redirect: ansible.builtin.bool
+ gcp_kms_encrypt:
+ redirect: google.cloud.gcp_kms_encrypt
+ gcp_kms_decrypt:
+ redirect: google.cloud.gcp_kms_decrypt
+ json_query:
+ redirect: community.general.json_query
+ random_mac:
+ redirect: community.general.random_mac
+ k8s_config_resource_name:
+ redirect: community.kubernetes.k8s_config_resource_name
+ cidr_merge:
+ redirect: ansible.netcommon.cidr_merge
+ ipaddr:
+ redirect: ansible.netcommon.ipaddr
+ ipmath:
+ redirect: ansible.netcommon.ipmath
+ ipwrap:
+ redirect: ansible.netcommon.ipwrap
+ ip4_hex:
+ redirect: ansible.netcommon.ip4_hex
+ ipv4:
+ redirect: ansible.netcommon.ipv4
+ ipv6:
+ redirect: ansible.netcommon.ipv6
+ ipsubnet:
+ redirect: ansible.netcommon.ipsubnet
+ next_nth_usable:
+ redirect: ansible.netcommon.next_nth_usable
+ network_in_network:
+ redirect: ansible.netcommon.network_in_network
+ network_in_usable:
+ redirect: ansible.netcommon.network_in_usable
+ reduce_on_network:
+ redirect: ansible.netcommon.reduce_on_network
+ nthhost:
+ redirect: ansible.netcommon.nthhost
+ previous_nth_usable:
+ redirect: ansible.netcommon.previous_nth_usable
+ slaac:
+ redirect: ansible.netcommon.slaac
+ hwaddr:
+ redirect: ansible.netcommon.hwaddr
+ parse_cli:
+ redirect: ansible.netcommon.parse_cli
+ parse_cli_textfsm:
+ redirect: ansible.netcommon.parse_cli_textfsm
+ parse_xml:
+ redirect: ansible.netcommon.parse_xml
+ type5_pw:
+ redirect: ansible.netcommon.type5_pw
+ hash_salt:
+ redirect: ansible.netcommon.hash_salt
+ comp_type5:
+ redirect: ansible.netcommon.comp_type5
+ vlan_parser:
+ redirect: ansible.netcommon.vlan_parser
+ httpapi:
+ exos:
+ redirect: community.network.exos
+ fortianalyzer:
+ redirect: community.network.fortianalyzer
+ fortimanager:
+ redirect: fortinet.fortimanager.fortimanager
+ ftd:
+ redirect: community.network.ftd
+ vmware:
+ redirect: community.vmware.vmware
+ restconf:
+ redirect: ansible.netcommon.restconf
+ fortios:
+ redirect: fortinet.fortios.fortios
+ checkpoint:
+ redirect: check_point.mgmt.checkpoint
+ eos:
+ redirect: arista.eos.eos
+ nxos:
+ redirect: cisco.nxos.nxos
+ splunk:
+ redirect: splunk.es.splunk
+ qradar:
+ redirect: ibm.qradar.qradar
+ inventory:
+ # test entry
+ formerly_core_inventory:
+ redirect: testns.content_adj.statichost
+ cloudscale:
+ redirect: cloudscale_ch.cloud.inventory
+ docker_machine:
+ redirect: community.general.docker_machine
+ docker_swarm:
+ redirect: community.general.docker_swarm
+ gitlab_runners:
+ redirect: community.general.gitlab_runners
+ kubevirt:
+ redirect: community.general.kubevirt
+ linode:
+ redirect: community.general.linode
+ nmap:
+ redirect: community.general.nmap
+ online:
+ redirect: community.general.online
+ scaleway:
+ redirect: community.general.scaleway
+ virtualbox:
+ redirect: community.general.virtualbox
+ vultr:
+ redirect: ngine_io.vultr.vultr
+ k8s:
+ redirect: community.kubernetes.k8s
+ openshift:
+ redirect: community.kubernetes.openshift
+ vmware_vm_inventory:
+ redirect: community.vmware.vmware_vm_inventory
+ aws_ec2:
+ redirect: amazon.aws.aws_ec2
+ aws_rds:
+ redirect: amazon.aws.aws_rds
+ foreman:
+ redirect: theforeman.foreman.foreman
+ netbox:
+ redirect: netbox.netbox.nb_inventory
+ openstack:
+ redirect: openstack.cloud.openstack
+ tower:
+ redirect: awx.awx.tower
+ hcloud:
+ redirect: hetzner.hcloud.hcloud
+ gcp_compute:
+ redirect: google.cloud.gcp_compute
+ azure_rm:
+ redirect: azure.azcollection.azure_rm
+ lookup:
+ # test entry
+ formerly_core_lookup:
+ redirect: testns.testcoll.mylookup
+ avi:
+ redirect: community.network.avi
+ cartesian:
+ redirect: community.general.cartesian
+ chef_databag:
+ redirect: community.general.chef_databag
+ conjur_variable:
+ redirect: cyberark.conjur.conjur_variable
+ consul_kv:
+ redirect: community.general.consul_kv
+ credstash:
+ redirect: community.general.credstash
+ cyberarkpassword:
+ redirect: community.general.cyberarkpassword
+ dig:
+ redirect: community.general.dig
+ dnstxt:
+ redirect: community.general.dnstxt
+ etcd:
+ redirect: community.general.etcd
+ filetree:
+ redirect: community.general.filetree
+ flattened:
+ redirect: community.general.flattened
+ gcp_storage_file:
+ redirect: community.general.gcp_storage_file
+ hashi_vault:
+ redirect: community.general.hashi_vault
+ hiera:
+ redirect: community.general.hiera
+ keyring:
+ redirect: community.general.keyring
+ lastpass:
+ redirect: community.general.lastpass
+ lmdb_kv:
+ redirect: community.general.lmdb_kv
+ manifold:
+ redirect: community.general.manifold
+ nios:
+ redirect: community.general.nios
+ nios_next_ip:
+ redirect: community.general.nios_next_ip
+ nios_next_network:
+ redirect: community.general.nios_next_network
+ onepassword:
+ redirect: community.general.onepassword
+ onepassword_raw:
+ redirect: community.general.onepassword_raw
+ passwordstore:
+ redirect: community.general.passwordstore
+ rabbitmq:
+ redirect: community.rabbitmq.rabbitmq
+ redis:
+ redirect: community.general.redis
+ shelvefile:
+ redirect: community.general.shelvefile
+ grafana_dashboard:
+ redirect: community.grafana.grafana_dashboard
+ openshift:
+ redirect: community.kubernetes.openshift
+ k8s:
+ redirect: community.kubernetes.k8s
+ mongodb:
+ redirect: community.mongodb.mongodb
+ laps_password:
+ redirect: community.windows.laps_password
+ aws_account_attribute:
+ redirect: amazon.aws.aws_account_attribute
+ aws_secret:
+ redirect: amazon.aws.aws_secret
+ aws_service_ip_ranges:
+ redirect: amazon.aws.aws_service_ip_ranges
+ aws_ssm:
+ redirect: amazon.aws.aws_ssm
+ skydive:
+ redirect: community.skydive.skydive
+ cpm_metering:
+ redirect: wti.remote.cpm_metering
+ cpm_status:
+ redirect: wti.remote.cpm_status
+ netconf:
+ ce:
+ redirect: community.network.ce
+ sros:
+ redirect: community.network.sros
+ default:
+ redirect: ansible.netcommon.default
+ iosxr:
+ redirect: cisco.iosxr.iosxr
+ junos:
+ redirect: junipernetworks.junos.junos
+ shell:
+ # test entry
+ formerly_core_powershell:
+ redirect: ansible.builtin.powershell
+ csh:
+ redirect: ansible.posix.csh
+ fish:
+ redirect: ansible.posix.fish
+ test:
+ # test entries
+ formerly_core_test:
+ redirect: ansible.builtin.search
+ formerly_core_masked_test:
+ redirect: ansible.builtin.search
+import_redirection:
+ # test entry
+ ansible.module_utils.formerly_core:
+ redirect: ansible_collections.testns.testcoll.plugins.module_utils.base
+ ansible.module_utils.known_hosts:
+ redirect: ansible_collections.community.general.plugins.module_utils.known_hosts
+ # ansible.builtin synthetic collection redirection hackery
+ ansible_collections.ansible.builtin.plugins.modules:
+ redirect: ansible.modules
+ ansible_collections.ansible.builtin.plugins.module_utils:
+ redirect: ansible.module_utils
+ ansible_collections.ansible.builtin.plugins:
+ redirect: ansible.plugins
diff --git a/lib/ansible/config/base.yml b/lib/ansible/config/base.yml
new file mode 100644
index 00000000..2ad9d72f
--- /dev/null
+++ b/lib/ansible/config/base.yml
@@ -0,0 +1,2002 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+---
+ALLOW_WORLD_READABLE_TMPFILES:
+ name: Allow world-readable temporary files
+ deprecated:
+ why: moved to a per plugin approach that is more flexible
+ version: "2.14"
+ alternatives: mostly the same config will work, but now controlled from the plugin itself and not using the general constant.
+ default: False
+ description:
+ - This makes the temporary files created on the machine world-readable and will issue a warning instead of failing the task.
+ - It is useful when becoming an unprivileged user.
+ env: []
+ ini:
+ - {key: allow_world_readable_tmpfiles, section: defaults}
+ type: boolean
+ yaml: {key: defaults.allow_world_readable_tmpfiles}
+ version_added: "2.1"
+ANSIBLE_CONNECTION_PATH:
+ name: Path of ansible-connection script
+ default: null
+ description:
+ - Specify where to look for the ansible-connection script. This location will be checked before searching $PATH.
+ - If null, ansible will start with the same directory as the ansible script.
+ type: path
+ env: [{name: ANSIBLE_CONNECTION_PATH}]
+ ini:
+ - {key: ansible_connection_path, section: persistent_connection}
+ yaml: {key: persistent_connection.ansible_connection_path}
+ version_added: "2.8"
+ANSIBLE_COW_SELECTION:
+ name: Cowsay filter selection
+ default: default
+ description: This allows you to chose a specific cowsay stencil for the banners or use 'random' to cycle through them.
+ env: [{name: ANSIBLE_COW_SELECTION}]
+ ini:
+ - {key: cow_selection, section: defaults}
+ANSIBLE_COW_WHITELIST:
+ name: Cowsay filter whitelist
+ default: ['bud-frogs', 'bunny', 'cheese', 'daemon', 'default', 'dragon', 'elephant-in-snake', 'elephant', 'eyes', 'hellokitty', 'kitty', 'luke-koala', 'meow', 'milk', 'moofasa', 'moose', 'ren', 'sheep', 'small', 'stegosaurus', 'stimpy', 'supermilker', 'three-eyes', 'turkey', 'turtle', 'tux', 'udder', 'vader-koala', 'vader', 'www']
+ description: White list of cowsay templates that are 'safe' to use, set to empty list if you want to enable all installed templates.
+ env: [{name: ANSIBLE_COW_WHITELIST}]
+ ini:
+ - {key: cow_whitelist, section: defaults}
+ type: list
+ yaml: {key: display.cowsay_whitelist}
+ANSIBLE_FORCE_COLOR:
+ name: Force color output
+ default: False
+ description: This option forces color mode even when running without a TTY or the "nocolor" setting is True.
+ env: [{name: ANSIBLE_FORCE_COLOR}]
+ ini:
+ - {key: force_color, section: defaults}
+ type: boolean
+ yaml: {key: display.force_color}
+ANSIBLE_NOCOLOR:
+ name: Suppress color output
+ default: False
+ description: This setting allows suppressing colorizing output, which is used to give a better indication of failure and status information.
+ env: [{name: ANSIBLE_NOCOLOR}]
+ ini:
+ - {key: nocolor, section: defaults}
+ type: boolean
+ yaml: {key: display.nocolor}
+ANSIBLE_NOCOWS:
+ name: Suppress cowsay output
+ default: False
+ description: If you have cowsay installed but want to avoid the 'cows' (why????), use this.
+ env: [{name: ANSIBLE_NOCOWS}]
+ ini:
+ - {key: nocows, section: defaults}
+ type: boolean
+ yaml: {key: display.i_am_no_fun}
+ANSIBLE_COW_PATH:
+ name: Set path to cowsay command
+ default: null
+ description: Specify a custom cowsay path or swap in your cowsay implementation of choice
+ env: [{name: ANSIBLE_COW_PATH}]
+ ini:
+ - {key: cowpath, section: defaults}
+ type: string
+ yaml: {key: display.cowpath}
+ANSIBLE_PIPELINING:
+ name: Connection pipelining
+ default: False
+ description:
+ - Pipelining, if supported by the connection plugin, reduces the number of network operations required to execute a module on the remote server,
+ by executing many Ansible modules without actual file transfer.
+ - This can result in a very significant performance improvement when enabled.
+ - "However this conflicts with privilege escalation (become). For example, when using 'sudo:' operations you must first
+ disable 'requiretty' in /etc/sudoers on all managed hosts, which is why it is disabled by default."
+ - This option is disabled if ``ANSIBLE_KEEP_REMOTE_FILES`` is enabled.
+ env:
+ - name: ANSIBLE_PIPELINING
+ - name: ANSIBLE_SSH_PIPELINING
+ ini:
+ - section: connection
+ key: pipelining
+ - section: ssh_connection
+ key: pipelining
+ type: boolean
+ yaml: {key: plugins.connection.pipelining}
+ANSIBLE_SSH_ARGS:
+ # TODO: move to ssh plugin
+ default: -C -o ControlMaster=auto -o ControlPersist=60s
+ description:
+ - If set, this will override the Ansible default ssh arguments.
+ - In particular, users may wish to raise the ControlPersist time to encourage performance. A value of 30 minutes may be appropriate.
+ - Be aware that if `-o ControlPath` is set in ssh_args, the control path setting is not used.
+ env: [{name: ANSIBLE_SSH_ARGS}]
+ ini:
+ - {key: ssh_args, section: ssh_connection}
+ yaml: {key: ssh_connection.ssh_args}
+ANSIBLE_SSH_CONTROL_PATH:
+ # TODO: move to ssh plugin
+ default: null
+ description:
+ - This is the location to save ssh's ControlPath sockets, it uses ssh's variable substitution.
+ - Since 2.3, if null, ansible will generate a unique hash. Use `%(directory)s` to indicate where to use the control dir path setting.
+ - Before 2.3 it defaulted to `control_path=%(directory)s/ansible-ssh-%%h-%%p-%%r`.
+ - Be aware that this setting is ignored if `-o ControlPath` is set in ssh args.
+ env: [{name: ANSIBLE_SSH_CONTROL_PATH}]
+ ini:
+ - {key: control_path, section: ssh_connection}
+ yaml: {key: ssh_connection.control_path}
+ANSIBLE_SSH_CONTROL_PATH_DIR:
+ # TODO: move to ssh plugin
+ default: ~/.ansible/cp
+ description:
+ - This sets the directory to use for ssh control path if the control path setting is null.
+ - Also, provides the `%(directory)s` variable for the control path setting.
+ env: [{name: ANSIBLE_SSH_CONTROL_PATH_DIR}]
+ ini:
+ - {key: control_path_dir, section: ssh_connection}
+ yaml: {key: ssh_connection.control_path_dir}
+ANSIBLE_SSH_EXECUTABLE:
+ # TODO: move to ssh plugin, note that ssh_utils refs this and needs to be updated if removed
+ default: ssh
+ description:
+ - This defines the location of the ssh binary. It defaults to `ssh` which will use the first ssh binary available in $PATH.
+ - This option is usually not required, it might be useful when access to system ssh is restricted,
+ or when using ssh wrappers to connect to remote hosts.
+ env: [{name: ANSIBLE_SSH_EXECUTABLE}]
+ ini:
+ - {key: ssh_executable, section: ssh_connection}
+ yaml: {key: ssh_connection.ssh_executable}
+ version_added: "2.2"
+ANSIBLE_SSH_RETRIES:
+ # TODO: move to ssh plugin
+ default: 0
+ description: Number of attempts to establish a connection before we give up and report the host as 'UNREACHABLE'
+ env: [{name: ANSIBLE_SSH_RETRIES}]
+ ini:
+ - {key: retries, section: ssh_connection}
+ type: integer
+ yaml: {key: ssh_connection.retries}
+ANY_ERRORS_FATAL:
+ name: Make Task failures fatal
+ default: False
+ description: Sets the default value for the any_errors_fatal keyword, if True, Task failures will be considered fatal errors.
+ env:
+ - name: ANSIBLE_ANY_ERRORS_FATAL
+ ini:
+ - section: defaults
+ key: any_errors_fatal
+ type: boolean
+ yaml: {key: errors.any_task_errors_fatal}
+ version_added: "2.4"
+BECOME_ALLOW_SAME_USER:
+ name: Allow becoming the same user
+ default: False
+ description: This setting controls if become is skipped when remote user and become user are the same. I.E root sudo to root.
+ env: [{name: ANSIBLE_BECOME_ALLOW_SAME_USER}]
+ ini:
+ - {key: become_allow_same_user, section: privilege_escalation}
+ type: boolean
+ yaml: {key: privilege_escalation.become_allow_same_user}
+AGNOSTIC_BECOME_PROMPT:
+ name: Display an agnostic become prompt
+ default: True
+ type: boolean
+ description: Display an agnostic become prompt instead of displaying a prompt containing the command line supplied become method
+ env: [{name: ANSIBLE_AGNOSTIC_BECOME_PROMPT}]
+ ini:
+ - {key: agnostic_become_prompt, section: privilege_escalation}
+ yaml: {key: privilege_escalation.agnostic_become_prompt}
+ version_added: "2.5"
+CACHE_PLUGIN:
+ name: Persistent Cache plugin
+ default: memory
+ description: Chooses which cache plugin to use, the default 'memory' is ephemeral.
+ env: [{name: ANSIBLE_CACHE_PLUGIN}]
+ ini:
+ - {key: fact_caching, section: defaults}
+ yaml: {key: facts.cache.plugin}
+CACHE_PLUGIN_CONNECTION:
+ name: Cache Plugin URI
+ default: ~
+ description: Defines connection or path information for the cache plugin
+ env: [{name: ANSIBLE_CACHE_PLUGIN_CONNECTION}]
+ ini:
+ - {key: fact_caching_connection, section: defaults}
+ yaml: {key: facts.cache.uri}
+CACHE_PLUGIN_PREFIX:
+ name: Cache Plugin table prefix
+ default: ansible_facts
+ description: Prefix to use for cache plugin files/tables
+ env: [{name: ANSIBLE_CACHE_PLUGIN_PREFIX}]
+ ini:
+ - {key: fact_caching_prefix, section: defaults}
+ yaml: {key: facts.cache.prefix}
+CACHE_PLUGIN_TIMEOUT:
+ name: Cache Plugin expiration timeout
+ default: 86400
+ description: Expiration timeout for the cache plugin data
+ env: [{name: ANSIBLE_CACHE_PLUGIN_TIMEOUT}]
+ ini:
+ - {key: fact_caching_timeout, section: defaults}
+ type: integer
+ yaml: {key: facts.cache.timeout}
+COLLECTIONS_SCAN_SYS_PATH:
+ name: enable/disable scanning sys.path for installed collections
+ default: true
+ type: boolean
+ env:
+ - {name: ANSIBLE_COLLECTIONS_SCAN_SYS_PATH}
+ ini:
+ - {key: collections_scan_sys_path, section: defaults}
+COLLECTIONS_PATHS:
+ name: ordered list of root paths for loading installed Ansible collections content
+ description: Colon separated paths in which Ansible will search for collections content.
+ default: ~/.ansible/collections:/usr/share/ansible/collections
+ type: pathspec
+ env:
+ - name: ANSIBLE_COLLECTIONS_PATHS # TODO: Deprecate this and ini once PATH has been in a few releases.
+ - name: ANSIBLE_COLLECTIONS_PATH
+ version_added: '2.10'
+ ini:
+ - key: collections_paths
+ section: defaults
+ - key: collections_path
+ section: defaults
+ version_added: '2.10'
+COLLECTIONS_ON_ANSIBLE_VERSION_MISMATCH:
+ name: Defines behavior when loading a collection that does not support the current Ansible version
+ description:
+ - When a collection is loaded that does not support the running Ansible version (via the collection metadata key
+ `requires_ansible`), the default behavior is to issue a warning and continue anyway. Setting this value to `ignore`
+ skips the warning entirely, while setting it to `fatal` will immediately halt Ansible execution.
+ env: [{name: ANSIBLE_COLLECTIONS_ON_ANSIBLE_VERSION_MISMATCH}]
+ ini: [{key: collections_on_ansible_version_mismatch, section: defaults}]
+ choices: [error, warning, ignore]
+ default: warning
+COLOR_CHANGED:
+ name: Color for 'changed' task status
+ default: yellow
+ description: Defines the color to use on 'Changed' task status
+ env: [{name: ANSIBLE_COLOR_CHANGED}]
+ ini:
+ - {key: changed, section: colors}
+ yaml: {key: display.colors.changed}
+COLOR_CONSOLE_PROMPT:
+ name: "Color for ansible-console's prompt task status"
+ default: white
+ description: Defines the default color to use for ansible-console
+ env: [{name: ANSIBLE_COLOR_CONSOLE_PROMPT}]
+ ini:
+ - {key: console_prompt, section: colors}
+ version_added: "2.7"
+COLOR_DEBUG:
+ name: Color for debug statements
+ default: dark gray
+ description: Defines the color to use when emitting debug messages
+ env: [{name: ANSIBLE_COLOR_DEBUG}]
+ ini:
+ - {key: debug, section: colors}
+ yaml: {key: display.colors.debug}
+COLOR_DEPRECATE:
+ name: Color for deprecation messages
+ default: purple
+ description: Defines the color to use when emitting deprecation messages
+ env: [{name: ANSIBLE_COLOR_DEPRECATE}]
+ ini:
+ - {key: deprecate, section: colors}
+ yaml: {key: display.colors.deprecate}
+COLOR_DIFF_ADD:
+ name: Color for diff added display
+ default: green
+ description: Defines the color to use when showing added lines in diffs
+ env: [{name: ANSIBLE_COLOR_DIFF_ADD}]
+ ini:
+ - {key: diff_add, section: colors}
+ yaml: {key: display.colors.diff.add}
+COLOR_DIFF_LINES:
+ name: Color for diff lines display
+ default: cyan
+ description: Defines the color to use when showing diffs
+ env: [{name: ANSIBLE_COLOR_DIFF_LINES}]
+ ini:
+ - {key: diff_lines, section: colors}
+COLOR_DIFF_REMOVE:
+ name: Color for diff removed display
+ default: red
+ description: Defines the color to use when showing removed lines in diffs
+ env: [{name: ANSIBLE_COLOR_DIFF_REMOVE}]
+ ini:
+ - {key: diff_remove, section: colors}
+COLOR_ERROR:
+ name: Color for error messages
+ default: red
+ description: Defines the color to use when emitting error messages
+ env: [{name: ANSIBLE_COLOR_ERROR}]
+ ini:
+ - {key: error, section: colors}
+ yaml: {key: colors.error}
+COLOR_HIGHLIGHT:
+ name: Color for highlighting
+ default: white
+ description: Defines the color to use for highlighting
+ env: [{name: ANSIBLE_COLOR_HIGHLIGHT}]
+ ini:
+ - {key: highlight, section: colors}
+COLOR_OK:
+ name: Color for 'ok' task status
+ default: green
+ description: Defines the color to use when showing 'OK' task status
+ env: [{name: ANSIBLE_COLOR_OK}]
+ ini:
+ - {key: ok, section: colors}
+COLOR_SKIP:
+ name: Color for 'skip' task status
+ default: cyan
+ description: Defines the color to use when showing 'Skipped' task status
+ env: [{name: ANSIBLE_COLOR_SKIP}]
+ ini:
+ - {key: skip, section: colors}
+COLOR_UNREACHABLE:
+ name: Color for 'unreachable' host state
+ default: bright red
+ description: Defines the color to use on 'Unreachable' status
+ env: [{name: ANSIBLE_COLOR_UNREACHABLE}]
+ ini:
+ - {key: unreachable, section: colors}
+COLOR_VERBOSE:
+ name: Color for verbose messages
+ default: blue
+ description: Defines the color to use when emitting verbose messages. i.e those that show with '-v's.
+ env: [{name: ANSIBLE_COLOR_VERBOSE}]
+ ini:
+ - {key: verbose, section: colors}
+COLOR_WARN:
+ name: Color for warning messages
+ default: bright purple
+ description: Defines the color to use when emitting warning messages
+ env: [{name: ANSIBLE_COLOR_WARN}]
+ ini:
+ - {key: warn, section: colors}
+CONDITIONAL_BARE_VARS:
+ name: Allow bare variable evaluation in conditionals
+ default: False
+ type: boolean
+ description:
+ - With this setting on (True), running conditional evaluation 'var' is treated differently than 'var.subkey' as the first is evaluated
+ directly while the second goes through the Jinja2 parser. But 'false' strings in 'var' get evaluated as booleans.
+ - With this setting off they both evaluate the same but in cases in which 'var' was 'false' (a string) it won't get evaluated as a boolean anymore.
+ - Currently this setting defaults to 'True' but will soon change to 'False' and the setting itself will be removed in the future.
+ - Expect that this setting eventually will be deprecated after 2.12
+ env: [{name: ANSIBLE_CONDITIONAL_BARE_VARS}]
+ ini:
+ - {key: conditional_bare_variables, section: defaults}
+ version_added: "2.8"
+COVERAGE_REMOTE_OUTPUT:
+ name: Sets the output directory and filename prefix to generate coverage run info.
+ description:
+ - Sets the output directory on the remote host to generate coverage reports to.
+ - Currently only used for remote coverage on PowerShell modules.
+ - This is for internal use only.
+ env:
+ - {name: _ANSIBLE_COVERAGE_REMOTE_OUTPUT}
+ vars:
+ - {name: _ansible_coverage_remote_output}
+ type: str
+ version_added: '2.9'
+COVERAGE_REMOTE_WHITELIST:
+ name: Sets the list of paths to run coverage for.
+ description:
+ - A list of paths for files on the Ansible controller to run coverage for when executing on the remote host.
+ - Only files that match the path glob will have its coverage collected.
+ - Multiple path globs can be specified and are separated by ``:``.
+ - Currently only used for remote coverage on PowerShell modules.
+ - This is for internal use only.
+ default: '*'
+ env:
+ - {name: _ANSIBLE_COVERAGE_REMOTE_WHITELIST}
+ type: str
+ version_added: '2.9'
+ACTION_WARNINGS:
+ name: Toggle action warnings
+ default: True
+ description:
+ - By default Ansible will issue a warning when received from a task action (module or action plugin)
+ - These warnings can be silenced by adjusting this setting to False.
+ env: [{name: ANSIBLE_ACTION_WARNINGS}]
+ ini:
+ - {key: action_warnings, section: defaults}
+ type: boolean
+ version_added: "2.5"
+COMMAND_WARNINGS:
+ name: Command module warnings
+ default: True
+ description:
+ - By default Ansible will issue a warning when the shell or command module is used and the command appears to be similar to an existing Ansible module.
+ - These warnings can be silenced by adjusting this setting to False. You can also control this at the task level with the module option ``warn``.
+ env: [{name: ANSIBLE_COMMAND_WARNINGS}]
+ ini:
+ - {key: command_warnings, section: defaults}
+ type: boolean
+ version_added: "1.8"
+LOCALHOST_WARNING:
+ name: Warning when using implicit inventory with only localhost
+ default: True
+ description:
+ - By default Ansible will issue a warning when there are no hosts in the
+ inventory.
+ - These warnings can be silenced by adjusting this setting to False.
+ env: [{name: ANSIBLE_LOCALHOST_WARNING}]
+ ini:
+ - {key: localhost_warning, section: defaults}
+ type: boolean
+ version_added: "2.6"
+DOC_FRAGMENT_PLUGIN_PATH:
+ name: documentation fragment plugins path
+ default: ~/.ansible/plugins/doc_fragments:/usr/share/ansible/plugins/doc_fragments
+ description: Colon separated paths in which Ansible will search for Documentation Fragments Plugins.
+ env: [{name: ANSIBLE_DOC_FRAGMENT_PLUGINS}]
+ ini:
+ - {key: doc_fragment_plugins, section: defaults}
+ type: pathspec
+DEFAULT_ACTION_PLUGIN_PATH:
+ name: Action plugins path
+ default: ~/.ansible/plugins/action:/usr/share/ansible/plugins/action
+ description: Colon separated paths in which Ansible will search for Action Plugins.
+ env: [{name: ANSIBLE_ACTION_PLUGINS}]
+ ini:
+ - {key: action_plugins, section: defaults}
+ type: pathspec
+ yaml: {key: plugins.action.path}
+DEFAULT_ALLOW_UNSAFE_LOOKUPS:
+ name: Allow unsafe lookups
+ default: False
+ description:
+ - "When enabled, this option allows lookup plugins (whether used in variables as ``{{lookup('foo')}}`` or as a loop as with_foo)
+ to return data that is not marked 'unsafe'."
+ - By default, such data is marked as unsafe to prevent the templating engine from evaluating any jinja2 templating language,
+ as this could represent a security risk. This option is provided to allow for backwards-compatibility,
+ however users should first consider adding allow_unsafe=True to any lookups which may be expected to contain data which may be run
+ through the templating engine late
+ env: []
+ ini:
+ - {key: allow_unsafe_lookups, section: defaults}
+ type: boolean
+ version_added: "2.2.3"
+DEFAULT_ASK_PASS:
+ name: Ask for the login password
+ default: False
+ description:
+ - This controls whether an Ansible playbook should prompt for a login password.
+ If using SSH keys for authentication, you probably do not needed to change this setting.
+ env: [{name: ANSIBLE_ASK_PASS}]
+ ini:
+ - {key: ask_pass, section: defaults}
+ type: boolean
+ yaml: {key: defaults.ask_pass}
+DEFAULT_ASK_VAULT_PASS:
+ name: Ask for the vault password(s)
+ default: False
+ description:
+ - This controls whether an Ansible playbook should prompt for a vault password.
+ env: [{name: ANSIBLE_ASK_VAULT_PASS}]
+ ini:
+ - {key: ask_vault_pass, section: defaults}
+ type: boolean
+DEFAULT_BECOME:
+ name: Enable privilege escalation (become)
+ default: False
+ description: Toggles the use of privilege escalation, allowing you to 'become' another user after login.
+ env: [{name: ANSIBLE_BECOME}]
+ ini:
+ - {key: become, section: privilege_escalation}
+ type: boolean
+DEFAULT_BECOME_ASK_PASS:
+ name: Ask for the privilege escalation (become) password
+ default: False
+ description: Toggle to prompt for privilege escalation password.
+ env: [{name: ANSIBLE_BECOME_ASK_PASS}]
+ ini:
+ - {key: become_ask_pass, section: privilege_escalation}
+ type: boolean
+DEFAULT_BECOME_METHOD:
+ name: Choose privilege escalation method
+ default: 'sudo'
+ description: Privilege escalation method to use when `become` is enabled.
+ env: [{name: ANSIBLE_BECOME_METHOD}]
+ ini:
+ - {section: privilege_escalation, key: become_method}
+DEFAULT_BECOME_EXE:
+ name: Choose 'become' executable
+ default: ~
+ description: 'executable to use for privilege escalation, otherwise Ansible will depend on PATH'
+ env: [{name: ANSIBLE_BECOME_EXE}]
+ ini:
+ - {key: become_exe, section: privilege_escalation}
+DEFAULT_BECOME_FLAGS:
+ name: Set 'become' executable options
+ default: ''
+ description: Flags to pass to the privilege escalation executable.
+ env: [{name: ANSIBLE_BECOME_FLAGS}]
+ ini:
+ - {key: become_flags, section: privilege_escalation}
+BECOME_PLUGIN_PATH:
+ name: Become plugins path
+ default: ~/.ansible/plugins/become:/usr/share/ansible/plugins/become
+ description: Colon separated paths in which Ansible will search for Become Plugins.
+ env: [{name: ANSIBLE_BECOME_PLUGINS}]
+ ini:
+ - {key: become_plugins, section: defaults}
+ type: pathspec
+ version_added: "2.8"
+DEFAULT_BECOME_USER:
+ # FIXME: should really be blank and make -u passing optional depending on it
+ name: Set the user you 'become' via privilege escalation
+ default: root
+ description: The user your login/remote user 'becomes' when using privilege escalation, most systems will use 'root' when no user is specified.
+ env: [{name: ANSIBLE_BECOME_USER}]
+ ini:
+ - {key: become_user, section: privilege_escalation}
+ yaml: {key: become.user}
+DEFAULT_CACHE_PLUGIN_PATH:
+ name: Cache Plugins Path
+ default: ~/.ansible/plugins/cache:/usr/share/ansible/plugins/cache
+ description: Colon separated paths in which Ansible will search for Cache Plugins.
+ env: [{name: ANSIBLE_CACHE_PLUGINS}]
+ ini:
+ - {key: cache_plugins, section: defaults}
+ type: pathspec
+DEFAULT_CALLABLE_WHITELIST:
+ name: Template 'callable' whitelist
+ default: []
+ description: Whitelist of callable methods to be made available to template evaluation
+ env: [{name: ANSIBLE_CALLABLE_WHITELIST}]
+ ini:
+ - {key: callable_whitelist, section: defaults}
+ type: list
+DEFAULT_CALLBACK_PLUGIN_PATH:
+ name: Callback Plugins Path
+ default: ~/.ansible/plugins/callback:/usr/share/ansible/plugins/callback
+ description: Colon separated paths in which Ansible will search for Callback Plugins.
+ env: [{name: ANSIBLE_CALLBACK_PLUGINS}]
+ ini:
+ - {key: callback_plugins, section: defaults}
+ type: pathspec
+ yaml: {key: plugins.callback.path}
+DEFAULT_CALLBACK_WHITELIST:
+ name: Callback Whitelist
+ default: []
+ description:
+ - "List of whitelisted callbacks, not all callbacks need whitelisting,
+ but many of those shipped with Ansible do as we don't want them activated by default."
+ env: [{name: ANSIBLE_CALLBACK_WHITELIST}]
+ ini:
+ - {key: callback_whitelist, section: defaults}
+ type: list
+ yaml: {key: plugins.callback.whitelist}
+DEFAULT_CLICONF_PLUGIN_PATH:
+ name: Cliconf Plugins Path
+ default: ~/.ansible/plugins/cliconf:/usr/share/ansible/plugins/cliconf
+ description: Colon separated paths in which Ansible will search for Cliconf Plugins.
+ env: [{name: ANSIBLE_CLICONF_PLUGINS}]
+ ini:
+ - {key: cliconf_plugins, section: defaults}
+ type: pathspec
+DEFAULT_CONNECTION_PLUGIN_PATH:
+ name: Connection Plugins Path
+ default: ~/.ansible/plugins/connection:/usr/share/ansible/plugins/connection
+ description: Colon separated paths in which Ansible will search for Connection Plugins.
+ env: [{name: ANSIBLE_CONNECTION_PLUGINS}]
+ ini:
+ - {key: connection_plugins, section: defaults}
+ type: pathspec
+ yaml: {key: plugins.connection.path}
+DEFAULT_DEBUG:
+ name: Debug mode
+ default: False
+ description:
+ - "Toggles debug output in Ansible. This is *very* verbose and can hinder
+ multiprocessing. Debug output can also include secret information
+ despite no_log settings being enabled, which means debug mode should not be used in
+ production."
+ env: [{name: ANSIBLE_DEBUG}]
+ ini:
+ - {key: debug, section: defaults}
+ type: boolean
+DEFAULT_EXECUTABLE:
+ name: Target shell executable
+ default: /bin/sh
+ description:
+ - "This indicates the command to use to spawn a shell under for Ansible's execution needs on a target.
+ Users may need to change this in rare instances when shell usage is constrained, but in most cases it may be left as is."
+ env: [{name: ANSIBLE_EXECUTABLE}]
+ ini:
+ - {key: executable, section: defaults}
+DEFAULT_FACT_PATH:
+ name: local fact path
+ default: ~
+ description:
+ - "This option allows you to globally configure a custom path for 'local_facts' for the implied M(ansible.builtin.setup) task when using fact gathering."
+ - "If not set, it will fallback to the default from the M(ansible.builtin.setup) module: ``/etc/ansible/facts.d``."
+ - "This does **not** affect user defined tasks that use the M(ansible.builtin.setup) module."
+ env: [{name: ANSIBLE_FACT_PATH}]
+ ini:
+ - {key: fact_path, section: defaults}
+ type: string
+ yaml: {key: facts.gathering.fact_path}
+DEFAULT_FILTER_PLUGIN_PATH:
+ name: Jinja2 Filter Plugins Path
+ default: ~/.ansible/plugins/filter:/usr/share/ansible/plugins/filter
+ description: Colon separated paths in which Ansible will search for Jinja2 Filter Plugins.
+ env: [{name: ANSIBLE_FILTER_PLUGINS}]
+ ini:
+ - {key: filter_plugins, section: defaults}
+ type: pathspec
+DEFAULT_FORCE_HANDLERS:
+ name: Force handlers to run after failure
+ default: False
+ description:
+ - This option controls if notified handlers run on a host even if a failure occurs on that host.
+ - When false, the handlers will not run if a failure has occurred on a host.
+ - This can also be set per play or on the command line. See Handlers and Failure for more details.
+ env: [{name: ANSIBLE_FORCE_HANDLERS}]
+ ini:
+ - {key: force_handlers, section: defaults}
+ type: boolean
+ version_added: "1.9.1"
+DEFAULT_FORKS:
+ name: Number of task forks
+ default: 5
+ description: Maximum number of forks Ansible will use to execute tasks on target hosts.
+ env: [{name: ANSIBLE_FORKS}]
+ ini:
+ - {key: forks, section: defaults}
+ type: integer
+DEFAULT_GATHERING:
+ name: Gathering behaviour
+ default: 'implicit'
+ description:
+ - This setting controls the default policy of fact gathering (facts discovered about remote systems).
+ - "When 'implicit' (the default), the cache plugin will be ignored and facts will be gathered per play unless 'gather_facts: False' is set."
+ - "When 'explicit' the inverse is true, facts will not be gathered unless directly requested in the play."
+ - "The 'smart' value means each new host that has no facts discovered will be scanned,
+ but if the same host is addressed in multiple plays it will not be contacted again in the playbook run."
+ - "This option can be useful for those wishing to save fact gathering time. Both 'smart' and 'explicit' will use the cache plugin."
+ env: [{name: ANSIBLE_GATHERING}]
+ ini:
+ - key: gathering
+ section: defaults
+ version_added: "1.6"
+ choices: ['smart', 'explicit', 'implicit']
+DEFAULT_GATHER_SUBSET:
+ name: Gather facts subset
+ default: ['all']
+ description:
+ - Set the `gather_subset` option for the M(ansible.builtin.setup) task in the implicit fact gathering.
+ See the module documentation for specifics.
+ - "It does **not** apply to user defined M(ansible.builtin.setup) tasks."
+ env: [{name: ANSIBLE_GATHER_SUBSET}]
+ ini:
+ - key: gather_subset
+ section: defaults
+ version_added: "2.1"
+ type: list
+DEFAULT_GATHER_TIMEOUT:
+ name: Gather facts timeout
+ default: 10
+ description:
+ - Set the timeout in seconds for the implicit fact gathering.
+ - "It does **not** apply to user defined M(ansible.builtin.setup) tasks."
+ env: [{name: ANSIBLE_GATHER_TIMEOUT}]
+ ini:
+ - {key: gather_timeout, section: defaults}
+ type: integer
+ yaml: {key: defaults.gather_timeout}
+DEFAULT_HANDLER_INCLUDES_STATIC:
+ name: Make handler M(ansible.builtin.include) static
+ default: False
+ description:
+ - "Since 2.0 M(ansible.builtin.include) can be 'dynamic', this setting (if True) forces that if the include appears in a ``handlers`` section to be 'static'."
+ env: [{name: ANSIBLE_HANDLER_INCLUDES_STATIC}]
+ ini:
+ - {key: handler_includes_static, section: defaults}
+ type: boolean
+ deprecated:
+ why: include itself is deprecated and this setting will not matter in the future
+ version: "2.12"
+ alternatives: none as its already built into the decision between include_tasks and import_tasks
+DEFAULT_HASH_BEHAVIOUR:
+ name: Hash merge behaviour
+ default: replace
+ type: string
+ choices: ["replace", "merge"]
+ description:
+ - This setting controls how variables merge in Ansible.
+ By default Ansible will override variables in specific precedence orders, as described in Variables.
+ When a variable of higher precedence wins, it will replace the other value.
+ - "Some users prefer that variables that are hashes (aka 'dictionaries' in Python terms) are merged.
+ This setting is called 'merge'. This is not the default behavior and it does not affect variables whose values are scalars
+ (integers, strings) or arrays. We generally recommend not using this setting unless you think you have an absolute need for it,
+ and playbooks in the official examples repos do not use this setting"
+ - In version 2.0 a ``combine`` filter was added to allow doing this for a particular variable (described in Filters).
+ env: [{name: ANSIBLE_HASH_BEHAVIOUR}]
+ ini:
+ - {key: hash_behaviour, section: defaults}
+ deprecated:
+ why: this feature is fragile and not portable, leading to continual confusion and misuse
+ version: "2.13"
+ alternatives: the ``combine`` filter explicitly
+DEFAULT_HOST_LIST:
+ name: Inventory Source
+ default: /etc/ansible/hosts
+ description: Comma separated list of Ansible inventory sources
+ env:
+ - name: ANSIBLE_INVENTORY
+ expand_relative_paths: True
+ ini:
+ - key: inventory
+ section: defaults
+ type: pathlist
+ yaml: {key: defaults.inventory}
+DEFAULT_HTTPAPI_PLUGIN_PATH:
+ name: HttpApi Plugins Path
+ default: ~/.ansible/plugins/httpapi:/usr/share/ansible/plugins/httpapi
+ description: Colon separated paths in which Ansible will search for HttpApi Plugins.
+ env: [{name: ANSIBLE_HTTPAPI_PLUGINS}]
+ ini:
+ - {key: httpapi_plugins, section: defaults}
+ type: pathspec
+DEFAULT_INTERNAL_POLL_INTERVAL:
+ name: Internal poll interval
+ default: 0.001
+ env: []
+ ini:
+ - {key: internal_poll_interval, section: defaults}
+ type: float
+ version_added: "2.2"
+ description:
+ - This sets the interval (in seconds) of Ansible internal processes polling each other.
+ Lower values improve performance with large playbooks at the expense of extra CPU load.
+ Higher values are more suitable for Ansible usage in automation scenarios,
+ when UI responsiveness is not required but CPU usage might be a concern.
+ - "The default corresponds to the value hardcoded in Ansible <= 2.1"
+DEFAULT_INVENTORY_PLUGIN_PATH:
+ name: Inventory Plugins Path
+ default: ~/.ansible/plugins/inventory:/usr/share/ansible/plugins/inventory
+ description: Colon separated paths in which Ansible will search for Inventory Plugins.
+ env: [{name: ANSIBLE_INVENTORY_PLUGINS}]
+ ini:
+ - {key: inventory_plugins, section: defaults}
+ type: pathspec
+DEFAULT_JINJA2_EXTENSIONS:
+ name: Enabled Jinja2 extensions
+ default: []
+ description:
+ - This is a developer-specific feature that allows enabling additional Jinja2 extensions.
+ - "See the Jinja2 documentation for details. If you do not know what these do, you probably don't need to change this setting :)"
+ env: [{name: ANSIBLE_JINJA2_EXTENSIONS}]
+ ini:
+ - {key: jinja2_extensions, section: defaults}
+DEFAULT_JINJA2_NATIVE:
+ name: Use Jinja2's NativeEnvironment for templating
+ default: False
+ description: This option preserves variable types during template operations. This requires Jinja2 >= 2.10.
+ env: [{name: ANSIBLE_JINJA2_NATIVE}]
+ ini:
+ - {key: jinja2_native, section: defaults}
+ type: boolean
+ yaml: {key: jinja2_native}
+ version_added: 2.7
+DEFAULT_KEEP_REMOTE_FILES:
+ name: Keep remote files
+ default: False
+ description:
+ - Enables/disables the cleaning up of the temporary files Ansible used to execute the tasks on the remote.
+ - If this option is enabled it will disable ``ANSIBLE_PIPELINING``.
+ env: [{name: ANSIBLE_KEEP_REMOTE_FILES}]
+ ini:
+ - {key: keep_remote_files, section: defaults}
+ type: boolean
+DEFAULT_LIBVIRT_LXC_NOSECLABEL:
+ # TODO: move to plugin
+ name: No security label on Lxc
+ default: False
+ description:
+ - "This setting causes libvirt to connect to lxc containers by passing --noseclabel to virsh.
+ This is necessary when running on systems which do not have SELinux."
+ env:
+ - name: LIBVIRT_LXC_NOSECLABEL
+ deprecated:
+ why: environment variables without ``ANSIBLE_`` prefix are deprecated
+ version: "2.12"
+ alternatives: the ``ANSIBLE_LIBVIRT_LXC_NOSECLABEL`` environment variable
+ - name: ANSIBLE_LIBVIRT_LXC_NOSECLABEL
+ ini:
+ - {key: libvirt_lxc_noseclabel, section: selinux}
+ type: boolean
+ version_added: "2.1"
+DEFAULT_LOAD_CALLBACK_PLUGINS:
+ name: Load callbacks for adhoc
+ default: False
+ description:
+ - Controls whether callback plugins are loaded when running /usr/bin/ansible.
+ This may be used to log activity from the command line, send notifications, and so on.
+ Callback plugins are always loaded for ``ansible-playbook``.
+ env: [{name: ANSIBLE_LOAD_CALLBACK_PLUGINS}]
+ ini:
+ - {key: bin_ansible_callbacks, section: defaults}
+ type: boolean
+ version_added: "1.8"
+DEFAULT_LOCAL_TMP:
+ name: Controller temporary directory
+ default: ~/.ansible/tmp
+ description: Temporary directory for Ansible to use on the controller.
+ env: [{name: ANSIBLE_LOCAL_TEMP}]
+ ini:
+ - {key: local_tmp, section: defaults}
+ type: tmppath
+DEFAULT_LOG_PATH:
+ name: Ansible log file path
+ default: ~
+ description: File to which Ansible will log on the controller. When empty logging is disabled.
+ env: [{name: ANSIBLE_LOG_PATH}]
+ ini:
+ - {key: log_path, section: defaults}
+ type: path
+DEFAULT_LOG_FILTER:
+ name: Name filters for python logger
+ default: []
+ description: List of logger names to filter out of the log file
+ env: [{name: ANSIBLE_LOG_FILTER}]
+ ini:
+ - {key: log_filter, section: defaults}
+ type: list
+DEFAULT_LOOKUP_PLUGIN_PATH:
+ name: Lookup Plugins Path
+ description: Colon separated paths in which Ansible will search for Lookup Plugins.
+ default: ~/.ansible/plugins/lookup:/usr/share/ansible/plugins/lookup
+ env: [{name: ANSIBLE_LOOKUP_PLUGINS}]
+ ini:
+ - {key: lookup_plugins, section: defaults}
+ type: pathspec
+ yaml: {key: defaults.lookup_plugins}
+DEFAULT_MANAGED_STR:
+ name: Ansible managed
+ default: 'Ansible managed'
+ description: Sets the macro for the 'ansible_managed' variable available for M(ansible.builtin.template) and M(ansible.windows.win_template) modules. This is only relevant for those two modules.
+ env: []
+ ini:
+ - {key: ansible_managed, section: defaults}
+ yaml: {key: defaults.ansible_managed}
+DEFAULT_MODULE_ARGS:
+ name: Adhoc default arguments
+ default: ''
+ description:
+ - This sets the default arguments to pass to the ``ansible`` adhoc binary if no ``-a`` is specified.
+ env: [{name: ANSIBLE_MODULE_ARGS}]
+ ini:
+ - {key: module_args, section: defaults}
+DEFAULT_MODULE_COMPRESSION:
+ name: Python module compression
+ default: ZIP_DEFLATED
+ description: Compression scheme to use when transferring Python modules to the target.
+ env: []
+ ini:
+ - {key: module_compression, section: defaults}
+# vars:
+# - name: ansible_module_compression
+DEFAULT_MODULE_NAME:
+ name: Default adhoc module
+ default: command
+ description: "Module to use with the ``ansible`` AdHoc command, if none is specified via ``-m``."
+ env: []
+ ini:
+ - {key: module_name, section: defaults}
+DEFAULT_MODULE_PATH:
+ name: Modules Path
+ description: Colon separated paths in which Ansible will search for Modules.
+ default: ~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules
+ env: [{name: ANSIBLE_LIBRARY}]
+ ini:
+ - {key: library, section: defaults}
+ type: pathspec
+DEFAULT_MODULE_UTILS_PATH:
+ name: Module Utils Path
+ description: Colon separated paths in which Ansible will search for Module utils files, which are shared by modules.
+ default: ~/.ansible/plugins/module_utils:/usr/share/ansible/plugins/module_utils
+ env: [{name: ANSIBLE_MODULE_UTILS}]
+ ini:
+ - {key: module_utils, section: defaults}
+ type: pathspec
+DEFAULT_NETCONF_PLUGIN_PATH:
+ name: Netconf Plugins Path
+ default: ~/.ansible/plugins/netconf:/usr/share/ansible/plugins/netconf
+ description: Colon separated paths in which Ansible will search for Netconf Plugins.
+ env: [{name: ANSIBLE_NETCONF_PLUGINS}]
+ ini:
+ - {key: netconf_plugins, section: defaults}
+ type: pathspec
+DEFAULT_NO_LOG:
+ name: No log
+ default: False
+ description: "Toggle Ansible's display and logging of task details, mainly used to avoid security disclosures."
+ env: [{name: ANSIBLE_NO_LOG}]
+ ini:
+ - {key: no_log, section: defaults}
+ type: boolean
+DEFAULT_NO_TARGET_SYSLOG:
+ name: No syslog on target
+ default: False
+ description:
+ - Toggle Ansible logging to syslog on the target when it executes tasks. On Windows hosts this will disable a newer
+ style PowerShell modules from writting to the event log.
+ env: [{name: ANSIBLE_NO_TARGET_SYSLOG}]
+ ini:
+ - {key: no_target_syslog, section: defaults}
+ vars:
+ - name: ansible_no_target_syslog
+ version_added: '2.10'
+ type: boolean
+ yaml: {key: defaults.no_target_syslog}
+DEFAULT_NULL_REPRESENTATION:
+ name: Represent a null
+ default: ~
+ description: What templating should return as a 'null' value. When not set it will let Jinja2 decide.
+ env: [{name: ANSIBLE_NULL_REPRESENTATION}]
+ ini:
+ - {key: null_representation, section: defaults}
+ type: none
+DEFAULT_POLL_INTERVAL:
+ name: Async poll interval
+ default: 15
+ description:
+ - For asynchronous tasks in Ansible (covered in Asynchronous Actions and Polling),
+ this is how often to check back on the status of those tasks when an explicit poll interval is not supplied.
+ The default is a reasonably moderate 15 seconds which is a tradeoff between checking in frequently and
+ providing a quick turnaround when something may have completed.
+ env: [{name: ANSIBLE_POLL_INTERVAL}]
+ ini:
+ - {key: poll_interval, section: defaults}
+ type: integer
+DEFAULT_PRIVATE_KEY_FILE:
+ name: Private key file
+ default: ~
+ description:
+ - Option for connections using a certificate or key file to authenticate, rather than an agent or passwords,
+ you can set the default value here to avoid re-specifying --private-key with every invocation.
+ env: [{name: ANSIBLE_PRIVATE_KEY_FILE}]
+ ini:
+ - {key: private_key_file, section: defaults}
+ type: path
+DEFAULT_PRIVATE_ROLE_VARS:
+ name: Private role variables
+ default: False
+ description:
+ - Makes role variables inaccessible from other roles.
+ - This was introduced as a way to reset role variables to default values if
+ a role is used more than once in a playbook.
+ env: [{name: ANSIBLE_PRIVATE_ROLE_VARS}]
+ ini:
+ - {key: private_role_vars, section: defaults}
+ type: boolean
+ yaml: {key: defaults.private_role_vars}
+DEFAULT_REMOTE_PORT:
+ name: Remote port
+ default: ~
+ description: Port to use in remote connections, when blank it will use the connection plugin default.
+ env: [{name: ANSIBLE_REMOTE_PORT}]
+ ini:
+ - {key: remote_port, section: defaults}
+ type: integer
+ yaml: {key: defaults.remote_port}
+DEFAULT_REMOTE_USER:
+ name: Login/Remote User
+ default:
+ description:
+ - Sets the login user for the target machines
+ - "When blank it uses the connection plugin's default, normally the user currently executing Ansible."
+ env: [{name: ANSIBLE_REMOTE_USER}]
+ ini:
+ - {key: remote_user, section: defaults}
+DEFAULT_ROLES_PATH:
+ name: Roles path
+ default: ~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles
+ description: Colon separated paths in which Ansible will search for Roles.
+ env: [{name: ANSIBLE_ROLES_PATH}]
+ expand_relative_paths: True
+ ini:
+ - {key: roles_path, section: defaults}
+ type: pathspec
+ yaml: {key: defaults.roles_path}
+DEFAULT_SCP_IF_SSH:
+ # TODO: move to ssh plugin
+ default: smart
+ description:
+ - "Preferred method to use when transferring files over ssh."
+ - When set to smart, Ansible will try them until one succeeds or they all fail.
+ - If set to True, it will force 'scp', if False it will use 'sftp'.
+ env: [{name: ANSIBLE_SCP_IF_SSH}]
+ ini:
+ - {key: scp_if_ssh, section: ssh_connection}
+DEFAULT_SELINUX_SPECIAL_FS:
+ name: Problematic file systems
+ default: fuse, nfs, vboxsf, ramfs, 9p, vfat
+ description:
+ - "Some filesystems do not support safe operations and/or return inconsistent errors,
+ this setting makes Ansible 'tolerate' those in the list w/o causing fatal errors."
+ - Data corruption may occur and writes are not always verified when a filesystem is in the list.
+ env:
+ - name: ANSIBLE_SELINUX_SPECIAL_FS
+ version_added: "2.9"
+ ini:
+ - {key: special_context_filesystems, section: selinux}
+ type: list
+DEFAULT_SFTP_BATCH_MODE:
+ # TODO: move to ssh plugin
+ default: True
+ description: 'TODO: write it'
+ env: [{name: ANSIBLE_SFTP_BATCH_MODE}]
+ ini:
+ - {key: sftp_batch_mode, section: ssh_connection}
+ type: boolean
+ yaml: {key: ssh_connection.sftp_batch_mode}
+DEFAULT_SQUASH_ACTIONS:
+ name: Squashable actions
+ default: apk, apt, dnf, homebrew, openbsd_pkg, pacman, pip, pkgng, yum, zypper
+ description:
+ - Ansible can optimise actions that call modules that support list parameters when using ``with_`` looping.
+ Instead of calling the module once for each item, the module is called once with the full list.
+ - The default value for this setting is only for certain package managers, but it can be used for any module.
+ - Currently, this is only supported for modules that have a name or pkg parameter, and only when the item is the only thing being passed to the parameter.
+ env: [{name: ANSIBLE_SQUASH_ACTIONS}]
+ ini:
+ - {key: squash_actions, section: defaults}
+ type: list
+ version_added: "2.0"
+ deprecated:
+ why: Loop squashing is deprecated and this configuration will no longer be used
+ version: "2.11"
+ alternatives: a list directly with the module argument
+DEFAULT_SSH_TRANSFER_METHOD:
+ # TODO: move to ssh plugin
+ default:
+ description: 'unused?'
+ # - "Preferred method to use when transferring files over ssh"
+ # - Setting to smart will try them until one succeeds or they all fail
+ #choices: ['sftp', 'scp', 'dd', 'smart']
+ env: [{name: ANSIBLE_SSH_TRANSFER_METHOD}]
+ ini:
+ - {key: transfer_method, section: ssh_connection}
+DEFAULT_STDOUT_CALLBACK:
+ name: Main display callback plugin
+ default: default
+ description:
+ - "Set the main callback used to display Ansible output, you can only have one at a time."
+ - You can have many other callbacks, but just one can be in charge of stdout.
+ env: [{name: ANSIBLE_STDOUT_CALLBACK}]
+ ini:
+ - {key: stdout_callback, section: defaults}
+ENABLE_TASK_DEBUGGER:
+ name: Whether to enable the task debugger
+ default: False
+ description:
+ - Whether or not to enable the task debugger, this previously was done as a strategy plugin.
+ - Now all strategy plugins can inherit this behavior. The debugger defaults to activating when
+ - a task is failed on unreachable. Use the debugger keyword for more flexibility.
+ type: boolean
+ env: [{name: ANSIBLE_ENABLE_TASK_DEBUGGER}]
+ ini:
+ - {key: enable_task_debugger, section: defaults}
+ version_added: "2.5"
+TASK_DEBUGGER_IGNORE_ERRORS:
+ name: Whether a failed task with ignore_errors=True will still invoke the debugger
+ default: True
+ description:
+ - This option defines whether the task debugger will be invoked on a failed task when ignore_errors=True
+ is specified.
+ - True specifies that the debugger will honor ignore_errors, False will not honor ignore_errors.
+ type: boolean
+ env: [{name: ANSIBLE_TASK_DEBUGGER_IGNORE_ERRORS}]
+ ini:
+ - {key: task_debugger_ignore_errors, section: defaults}
+ version_added: "2.7"
+DEFAULT_STRATEGY:
+ name: Implied strategy
+ default: 'linear'
+ description: Set the default strategy used for plays.
+ env: [{name: ANSIBLE_STRATEGY}]
+ ini:
+ - {key: strategy, section: defaults}
+ version_added: "2.3"
+DEFAULT_STRATEGY_PLUGIN_PATH:
+ name: Strategy Plugins Path
+ description: Colon separated paths in which Ansible will search for Strategy Plugins.
+ default: ~/.ansible/plugins/strategy:/usr/share/ansible/plugins/strategy
+ env: [{name: ANSIBLE_STRATEGY_PLUGINS}]
+ ini:
+ - {key: strategy_plugins, section: defaults}
+ type: pathspec
+DEFAULT_SU:
+ default: False
+ description: 'Toggle the use of "su" for tasks.'
+ env: [{name: ANSIBLE_SU}]
+ ini:
+ - {key: su, section: defaults}
+ type: boolean
+ yaml: {key: defaults.su}
+DEFAULT_SYSLOG_FACILITY:
+ name: syslog facility
+ default: LOG_USER
+ description: Syslog facility to use when Ansible logs to the remote target
+ env: [{name: ANSIBLE_SYSLOG_FACILITY}]
+ ini:
+ - {key: syslog_facility, section: defaults}
+DEFAULT_TASK_INCLUDES_STATIC:
+ name: Task include static
+ default: False
+ description:
+ - The `include` tasks can be static or dynamic, this toggles the default expected behaviour if autodetection fails and it is not explicitly set in task.
+ env: [{name: ANSIBLE_TASK_INCLUDES_STATIC}]
+ ini:
+ - {key: task_includes_static, section: defaults}
+ type: boolean
+ version_added: "2.1"
+ deprecated:
+ why: include itself is deprecated and this setting will not matter in the future
+ version: "2.12"
+ alternatives: None, as its already built into the decision between include_tasks and import_tasks
+DEFAULT_TERMINAL_PLUGIN_PATH:
+ name: Terminal Plugins Path
+ default: ~/.ansible/plugins/terminal:/usr/share/ansible/plugins/terminal
+ description: Colon separated paths in which Ansible will search for Terminal Plugins.
+ env: [{name: ANSIBLE_TERMINAL_PLUGINS}]
+ ini:
+ - {key: terminal_plugins, section: defaults}
+ type: pathspec
+DEFAULT_TEST_PLUGIN_PATH:
+ name: Jinja2 Test Plugins Path
+ description: Colon separated paths in which Ansible will search for Jinja2 Test Plugins.
+ default: ~/.ansible/plugins/test:/usr/share/ansible/plugins/test
+ env: [{name: ANSIBLE_TEST_PLUGINS}]
+ ini:
+ - {key: test_plugins, section: defaults}
+ type: pathspec
+DEFAULT_TIMEOUT:
+ name: Connection timeout
+ default: 10
+ description: This is the default timeout for connection plugins to use.
+ env: [{name: ANSIBLE_TIMEOUT}]
+ ini:
+ - {key: timeout, section: defaults}
+ type: integer
+DEFAULT_TRANSPORT:
+ # note that ssh_utils refs this and needs to be updated if removed
+ name: Connection plugin
+ default: smart
+ description: "Default connection plugin to use, the 'smart' option will toggle between 'ssh' and 'paramiko' depending on controller OS and ssh versions"
+ env: [{name: ANSIBLE_TRANSPORT}]
+ ini:
+ - {key: transport, section: defaults}
+DEFAULT_UNDEFINED_VAR_BEHAVIOR:
+ name: Jinja2 fail on undefined
+ default: True
+ version_added: "1.3"
+ description:
+ - When True, this causes ansible templating to fail steps that reference variable names that are likely typoed.
+ - "Otherwise, any '{{ template_expression }}' that contains undefined variables will be rendered in a template or ansible action line exactly as written."
+ env: [{name: ANSIBLE_ERROR_ON_UNDEFINED_VARS}]
+ ini:
+ - {key: error_on_undefined_vars, section: defaults}
+ type: boolean
+DEFAULT_VARS_PLUGIN_PATH:
+ name: Vars Plugins Path
+ default: ~/.ansible/plugins/vars:/usr/share/ansible/plugins/vars
+ description: Colon separated paths in which Ansible will search for Vars Plugins.
+ env: [{name: ANSIBLE_VARS_PLUGINS}]
+ ini:
+ - {key: vars_plugins, section: defaults}
+ type: pathspec
+# TODO: unused?
+#DEFAULT_VAR_COMPRESSION_LEVEL:
+# default: 0
+# description: 'TODO: write it'
+# env: [{name: ANSIBLE_VAR_COMPRESSION_LEVEL}]
+# ini:
+# - {key: var_compression_level, section: defaults}
+# type: integer
+# yaml: {key: defaults.var_compression_level}
+DEFAULT_VAULT_ID_MATCH:
+ name: Force vault id match
+ default: False
+ description: 'If true, decrypting vaults with a vault id will only try the password from the matching vault-id'
+ env: [{name: ANSIBLE_VAULT_ID_MATCH}]
+ ini:
+ - {key: vault_id_match, section: defaults}
+ yaml: {key: defaults.vault_id_match}
+DEFAULT_VAULT_IDENTITY:
+ name: Vault id label
+ default: default
+ description: 'The label to use for the default vault id label in cases where a vault id label is not provided'
+ env: [{name: ANSIBLE_VAULT_IDENTITY}]
+ ini:
+ - {key: vault_identity, section: defaults}
+ yaml: {key: defaults.vault_identity}
+DEFAULT_VAULT_ENCRYPT_IDENTITY:
+ name: Vault id to use for encryption
+ default:
+ description: 'The vault_id to use for encrypting by default. If multiple vault_ids are provided, this specifies which to use for encryption. The --encrypt-vault-id cli option overrides the configured value.'
+ env: [{name: ANSIBLE_VAULT_ENCRYPT_IDENTITY}]
+ ini:
+ - {key: vault_encrypt_identity, section: defaults}
+ yaml: {key: defaults.vault_encrypt_identity}
+DEFAULT_VAULT_IDENTITY_LIST:
+ name: Default vault ids
+ default: []
+ description: 'A list of vault-ids to use by default. Equivalent to multiple --vault-id args. Vault-ids are tried in order.'
+ env: [{name: ANSIBLE_VAULT_IDENTITY_LIST}]
+ ini:
+ - {key: vault_identity_list, section: defaults}
+ type: list
+ yaml: {key: defaults.vault_identity_list}
+DEFAULT_VAULT_PASSWORD_FILE:
+ name: Vault password file
+ default: ~
+ description: 'The vault password file to use. Equivalent to --vault-password-file or --vault-id'
+ env: [{name: ANSIBLE_VAULT_PASSWORD_FILE}]
+ ini:
+ - {key: vault_password_file, section: defaults}
+ type: path
+ yaml: {key: defaults.vault_password_file}
+DEFAULT_VERBOSITY:
+ name: Verbosity
+ default: 0
+ description: Sets the default verbosity, equivalent to the number of ``-v`` passed in the command line.
+ env: [{name: ANSIBLE_VERBOSITY}]
+ ini:
+ - {key: verbosity, section: defaults}
+ type: integer
+DEPRECATION_WARNINGS:
+ name: Deprecation messages
+ default: True
+ description: "Toggle to control the showing of deprecation warnings"
+ env: [{name: ANSIBLE_DEPRECATION_WARNINGS}]
+ ini:
+ - {key: deprecation_warnings, section: defaults}
+ type: boolean
+DEVEL_WARNING:
+ name: Running devel warning
+ default: True
+ description: Toggle to control showing warnings related to running devel
+ env: [{name: ANSIBLE_DEVEL_WARNING}]
+ ini:
+ - {key: devel_warning, section: defaults}
+ type: boolean
+DIFF_ALWAYS:
+ name: Show differences
+ default: False
+ description: Configuration toggle to tell modules to show differences when in 'changed' status, equivalent to ``--diff``.
+ env: [{name: ANSIBLE_DIFF_ALWAYS}]
+ ini:
+ - {key: always, section: diff}
+ type: bool
+DIFF_CONTEXT:
+ name: Difference context
+ default: 3
+ description: How many lines of context to show when displaying the differences between files.
+ env: [{name: ANSIBLE_DIFF_CONTEXT}]
+ ini:
+ - {key: context, section: diff}
+ type: integer
+DISPLAY_ARGS_TO_STDOUT:
+ name: Show task arguments
+ default: False
+ description:
+ - "Normally ``ansible-playbook`` will print a header for each task that is run.
+ These headers will contain the name: field from the task if you specified one.
+ If you didn't then ``ansible-playbook`` uses the task's action to help you tell which task is presently running.
+ Sometimes you run many of the same action and so you want more information about the task to differentiate it from others of the same action.
+ If you set this variable to True in the config then ``ansible-playbook`` will also include the task's arguments in the header."
+ - "This setting defaults to False because there is a chance that you have sensitive values in your parameters and
+ you do not want those to be printed."
+ - "If you set this to True you should be sure that you have secured your environment's stdout
+ (no one can shoulder surf your screen and you aren't saving stdout to an insecure file) or
+ made sure that all of your playbooks explicitly added the ``no_log: True`` parameter to tasks which have sensitive values
+ See How do I keep secret data in my playbook? for more information."
+ env: [{name: ANSIBLE_DISPLAY_ARGS_TO_STDOUT}]
+ ini:
+ - {key: display_args_to_stdout, section: defaults}
+ type: boolean
+ version_added: "2.1"
+DISPLAY_SKIPPED_HOSTS:
+ name: Show skipped results
+ default: True
+ description: "Toggle to control displaying skipped task/host entries in a task in the default callback"
+ env:
+ - name: DISPLAY_SKIPPED_HOSTS
+ deprecated:
+ why: environment variables without ``ANSIBLE_`` prefix are deprecated
+ version: "2.12"
+ alternatives: the ``ANSIBLE_DISPLAY_SKIPPED_HOSTS`` environment variable
+ - name: ANSIBLE_DISPLAY_SKIPPED_HOSTS
+ ini:
+ - {key: display_skipped_hosts, section: defaults}
+ type: boolean
+DOCSITE_ROOT_URL:
+ name: Root docsite URL
+ default: https://docs.ansible.com/ansible/
+ description: Root docsite URL used to generate docs URLs in warning/error text;
+ must be an absolute URL with valid scheme and trailing slash.
+ ini:
+ - {key: docsite_root_url, section: defaults}
+ version_added: "2.8"
+DUPLICATE_YAML_DICT_KEY:
+ name: Controls ansible behaviour when finding duplicate keys in YAML.
+ default: warn
+ description:
+ - By default Ansible will issue a warning when a duplicate dict key is encountered in YAML.
+ - These warnings can be silenced by adjusting this setting to False.
+ env: [{name: ANSIBLE_DUPLICATE_YAML_DICT_KEY}]
+ ini:
+ - {key: duplicate_dict_key, section: defaults}
+ type: string
+ choices: ['warn', 'error', 'ignore']
+ version_added: "2.9"
+ERROR_ON_MISSING_HANDLER:
+ name: Missing handler error
+ default: True
+ description: "Toggle to allow missing handlers to become a warning instead of an error when notifying."
+ env: [{name: ANSIBLE_ERROR_ON_MISSING_HANDLER}]
+ ini:
+ - {key: error_on_missing_handler, section: defaults}
+ type: boolean
+CONNECTION_FACTS_MODULES:
+ name: Map of connections to fact modules
+ default:
+ # use ansible.legacy names on unqualified facts modules to allow library/ overrides
+ asa: ansible.legacy.asa_facts
+ cisco.asa.asa: cisco.asa.asa_facts
+ eos: ansible.legacy.eos_facts
+ arista.eos.eos: arista.eos.eos_facts
+ frr: ansible.legacy.frr_facts
+ frr.frr.frr: frr.frr.frr_facts
+ ios: ansible.legacy.ios_facts
+ cisco.ios.ios: cisco.ios.ios_facts
+ iosxr: ansible.legacy.iosxr_facts
+ cisco.iosxr.iosxr: cisco.iosxr.iosxr_facts
+ junos: ansible.legacy.junos_facts
+ junipernetworks.junos.junos: junipernetworks.junos.junos_facts
+ nxos: ansible.legacy.nxos_facts
+ cisco.nxos.nxos: cisco.nxos.nxos_facts
+ vyos: ansible.legacy.vyos_facts
+ vyos.vyos.vyos: vyos.vyos.vyos_facts
+ exos: ansible.legacy.exos_facts
+ extreme.exos.exos: extreme.exos.exos_facts
+ slxos: ansible.legacy.slxos_facts
+ extreme.slxos.slxos: extreme.slxos.slxos_facts
+ voss: ansible.legacy.voss_facts
+ extreme.voss.voss: extreme.voss.voss_facts
+ ironware: ansible.legacy.ironware_facts
+ community.network.ironware: community.network.ironware_facts
+ description: "Which modules to run during a play's fact gathering stage based on connection"
+ env: [{name: ANSIBLE_CONNECTION_FACTS_MODULES}]
+ ini:
+ - {key: connection_facts_modules, section: defaults}
+ type: dict
+FACTS_MODULES:
+ name: Gather Facts Modules
+ default:
+ - smart
+ description: "Which modules to run during a play's fact gathering stage, using the default of 'smart' will try to figure it out based on connection type."
+ env: [{name: ANSIBLE_FACTS_MODULES}]
+ ini:
+ - {key: facts_modules, section: defaults}
+ type: list
+ vars:
+ - name: ansible_facts_modules
+GALAXY_IGNORE_CERTS:
+ name: Galaxy validate certs
+ default: False
+ description:
+ - If set to yes, ansible-galaxy will not validate TLS certificates.
+ This can be useful for testing against a server with a self-signed certificate.
+ env: [{name: ANSIBLE_GALAXY_IGNORE}]
+ ini:
+ - {key: ignore_certs, section: galaxy}
+ type: boolean
+GALAXY_ROLE_SKELETON:
+ name: Galaxy role or collection skeleton directory
+ default:
+ description: Role or collection skeleton directory to use as a template for the ``init`` action in ``ansible-galaxy``, same as ``--role-skeleton``.
+ env: [{name: ANSIBLE_GALAXY_ROLE_SKELETON}]
+ ini:
+ - {key: role_skeleton, section: galaxy}
+ type: path
+GALAXY_ROLE_SKELETON_IGNORE:
+ name: Galaxy skeleton ignore
+ default: ["^.git$", "^.*/.git_keep$"]
+ description: patterns of files to ignore inside a Galaxy role or collection skeleton directory
+ env: [{name: ANSIBLE_GALAXY_ROLE_SKELETON_IGNORE}]
+ ini:
+ - {key: role_skeleton_ignore, section: galaxy}
+ type: list
+# TODO: unused?
+#GALAXY_SCMS:
+# name: Galaxy SCMS
+# default: git, hg
+# description: Available galaxy source control management systems.
+# env: [{name: ANSIBLE_GALAXY_SCMS}]
+# ini:
+# - {key: scms, section: galaxy}
+# type: list
+GALAXY_SERVER:
+ default: https://galaxy.ansible.com
+ description: "URL to prepend when roles don't specify the full URI, assume they are referencing this server as the source."
+ env: [{name: ANSIBLE_GALAXY_SERVER}]
+ ini:
+ - {key: server, section: galaxy}
+ yaml: {key: galaxy.server}
+GALAXY_SERVER_LIST:
+ description:
+ - A list of Galaxy servers to use when installing a collection.
+ - The value corresponds to the config ini header ``[galaxy_server.{{item}}]`` which defines the server details.
+ - 'See :ref:`galaxy_server_config` for more details on how to define a Galaxy server.'
+ - The order of servers in this list is used to as the order in which a collection is resolved.
+ - Setting this config option will ignore the :ref:`galaxy_server` config option.
+ env: [{name: ANSIBLE_GALAXY_SERVER_LIST}]
+ ini:
+ - {key: server_list, section: galaxy}
+ type: list
+ version_added: "2.9"
+GALAXY_TOKEN_PATH:
+ default: ~/.ansible/galaxy_token
+ description: "Local path to galaxy access token file"
+ env: [{name: ANSIBLE_GALAXY_TOKEN_PATH}]
+ ini:
+ - {key: token_path, section: galaxy}
+ type: path
+ version_added: "2.9"
+GALAXY_DISPLAY_PROGRESS:
+ default: ~
+ description:
+ - Some steps in ``ansible-galaxy`` display a progress wheel which can cause issues on certain displays or when
+ outputing the stdout to a file.
+ - This config option controls whether the display wheel is shown or not.
+ - The default is to show the display wheel if stdout has a tty.
+ env: [{name: ANSIBLE_GALAXY_DISPLAY_PROGRESS}]
+ ini:
+ - {key: display_progress, section: galaxy}
+ type: bool
+ version_added: "2.10"
+HOST_KEY_CHECKING:
+ name: Check host keys
+ default: True
+ description: 'Set this to "False" if you want to avoid host key checking by the underlying tools Ansible uses to connect to the host'
+ env: [{name: ANSIBLE_HOST_KEY_CHECKING}]
+ ini:
+ - {key: host_key_checking, section: defaults}
+ type: boolean
+HOST_PATTERN_MISMATCH:
+ name: Control host pattern mismatch behaviour
+ default: 'warning'
+ description: This setting changes the behaviour of mismatched host patterns, it allows you to force a fatal error, a warning or just ignore it
+ env: [{name: ANSIBLE_HOST_PATTERN_MISMATCH}]
+ ini:
+ - {key: host_pattern_mismatch, section: inventory}
+ choices: ['warning', 'error', 'ignore']
+ version_added: "2.8"
+INTERPRETER_PYTHON:
+ name: Python interpreter path (or automatic discovery behavior) used for module execution
+ default: auto_legacy
+ env: [{name: ANSIBLE_PYTHON_INTERPRETER}]
+ ini:
+ - {key: interpreter_python, section: defaults}
+ vars:
+ - {name: ansible_python_interpreter}
+ version_added: "2.8"
+ description:
+ - Path to the Python interpreter to be used for module execution on remote targets, or an automatic discovery mode.
+ Supported discovery modes are ``auto``, ``auto_silent``, and ``auto_legacy`` (the default). All discovery modes
+ employ a lookup table to use the included system Python (on distributions known to include one), falling back to a
+ fixed ordered list of well-known Python interpreter locations if a platform-specific default is not available. The
+ fallback behavior will issue a warning that the interpreter should be set explicitly (since interpreters installed
+ later may change which one is used). This warning behavior can be disabled by setting ``auto_silent``. The default
+ value of ``auto_legacy`` provides all the same behavior, but for backwards-compatibility with older Ansible releases
+ that always defaulted to ``/usr/bin/python``, will use that interpreter if present (and issue a warning that the
+ default behavior will change to that of ``auto`` in a future Ansible release.
+INTERPRETER_PYTHON_DISTRO_MAP:
+ name: Mapping of known included platform pythons for various Linux distros
+ default:
+ centos: &rhelish
+ '6': /usr/bin/python
+ '8': /usr/libexec/platform-python
+ debian:
+ '10': /usr/bin/python3
+ fedora:
+ '23': /usr/bin/python3
+ redhat: *rhelish
+ rhel: *rhelish
+ ubuntu:
+ '14': /usr/bin/python
+ '16': /usr/bin/python3
+ version_added: "2.8"
+ # FUTURE: add inventory override once we're sure it can't be abused by a rogue target
+ # FUTURE: add a platform layer to the map so we could use for, eg, freebsd/macos/etc?
+INTERPRETER_PYTHON_FALLBACK:
+ name: Ordered list of Python interpreters to check for in discovery
+ default:
+ - /usr/bin/python
+ - python3.7
+ - python3.6
+ - python3.5
+ - python2.7
+ - python2.6
+ - /usr/libexec/platform-python
+ - /usr/bin/python3
+ - python
+ # FUTURE: add inventory override once we're sure it can't be abused by a rogue target
+ version_added: "2.8"
+TRANSFORM_INVALID_GROUP_CHARS:
+ name: Transform invalid characters in group names
+ default: 'never'
+ description:
+ - Make ansible transform invalid characters in group names supplied by inventory sources.
+ - If 'never' it will allow for the group name but warn about the issue.
+ - When 'ignore', it does the same as 'never', without issuing a warning.
+ - When 'always' it will replace any invalid characters with '_' (underscore) and warn the user
+ - When 'silently', it does the same as 'always', without issuing a warning.
+ env: [{name: ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS}]
+ ini:
+ - {key: force_valid_group_names, section: defaults}
+ type: string
+ choices: ['always', 'never', 'ignore', 'silently']
+ version_added: '2.8'
+INVALID_TASK_ATTRIBUTE_FAILED:
+ name: Controls whether invalid attributes for a task result in errors instead of warnings
+ default: True
+ description: If 'false', invalid attributes for a task will result in warnings instead of errors
+ type: boolean
+ env:
+ - name: ANSIBLE_INVALID_TASK_ATTRIBUTE_FAILED
+ ini:
+ - key: invalid_task_attribute_failed
+ section: defaults
+ version_added: "2.7"
+INVENTORY_ANY_UNPARSED_IS_FAILED:
+ name: Controls whether any unparseable inventory source is a fatal error
+ default: False
+ description: >
+ If 'true', it is a fatal error when any given inventory source
+ cannot be successfully parsed by any available inventory plugin;
+ otherwise, this situation only attracts a warning.
+ type: boolean
+ env: [{name: ANSIBLE_INVENTORY_ANY_UNPARSED_IS_FAILED}]
+ ini:
+ - {key: any_unparsed_is_failed, section: inventory}
+ version_added: "2.7"
+INVENTORY_CACHE_ENABLED:
+ name: Inventory caching enabled
+ default: False
+ description: Toggle to turn on inventory caching
+ env: [{name: ANSIBLE_INVENTORY_CACHE}]
+ ini:
+ - {key: cache, section: inventory}
+ type: bool
+INVENTORY_CACHE_PLUGIN:
+ name: Inventory cache plugin
+ description: The plugin for caching inventory. If INVENTORY_CACHE_PLUGIN is not provided CACHE_PLUGIN can be used instead.
+ env: [{name: ANSIBLE_INVENTORY_CACHE_PLUGIN}]
+ ini:
+ - {key: cache_plugin, section: inventory}
+INVENTORY_CACHE_PLUGIN_CONNECTION:
+ name: Inventory cache plugin URI to override the defaults section
+ description: The inventory cache connection. If INVENTORY_CACHE_PLUGIN_CONNECTION is not provided CACHE_PLUGIN_CONNECTION can be used instead.
+ env: [{name: ANSIBLE_INVENTORY_CACHE_CONNECTION}]
+ ini:
+ - {key: cache_connection, section: inventory}
+INVENTORY_CACHE_PLUGIN_PREFIX:
+ name: Inventory cache plugin table prefix
+ description: The table prefix for the cache plugin. If INVENTORY_CACHE_PLUGIN_PREFIX is not provided CACHE_PLUGIN_PREFIX can be used instead.
+ env: [{name: ANSIBLE_INVENTORY_CACHE_PLUGIN_PREFIX}]
+ default: ansible_facts
+ ini:
+ - {key: cache_prefix, section: inventory}
+INVENTORY_CACHE_TIMEOUT:
+ name: Inventory cache plugin expiration timeout
+ description: Expiration timeout for the inventory cache plugin data. If INVENTORY_CACHE_TIMEOUT is not provided CACHE_TIMEOUT can be used instead.
+ default: 3600
+ env: [{name: ANSIBLE_INVENTORY_CACHE_TIMEOUT}]
+ ini:
+ - {key: cache_timeout, section: inventory}
+INVENTORY_ENABLED:
+ name: Active Inventory plugins
+ default: ['host_list', 'script', 'auto', 'yaml', 'ini', 'toml']
+ description: List of enabled inventory plugins, it also determines the order in which they are used.
+ env: [{name: ANSIBLE_INVENTORY_ENABLED}]
+ ini:
+ - {key: enable_plugins, section: inventory}
+ type: list
+INVENTORY_EXPORT:
+ name: Set ansible-inventory into export mode
+ default: False
+ description: Controls if ansible-inventory will accurately reflect Ansible's view into inventory or its optimized for exporting.
+ env: [{name: ANSIBLE_INVENTORY_EXPORT}]
+ ini:
+ - {key: export, section: inventory}
+ type: bool
+INVENTORY_IGNORE_EXTS:
+ name: Inventory ignore extensions
+ default: "{{(BLACKLIST_EXTS + ('.orig', '.ini', '.cfg', '.retry'))}}"
+ description: List of extensions to ignore when using a directory as an inventory source
+ env: [{name: ANSIBLE_INVENTORY_IGNORE}]
+ ini:
+ - {key: inventory_ignore_extensions, section: defaults}
+ - {key: ignore_extensions, section: inventory}
+ type: list
+INVENTORY_IGNORE_PATTERNS:
+ name: Inventory ignore patterns
+ default: []
+ description: List of patterns to ignore when using a directory as an inventory source
+ env: [{name: ANSIBLE_INVENTORY_IGNORE_REGEX}]
+ ini:
+ - {key: inventory_ignore_patterns, section: defaults}
+ - {key: ignore_patterns, section: inventory}
+ type: list
+INVENTORY_UNPARSED_IS_FAILED:
+ name: Unparsed Inventory failure
+ default: False
+ description: >
+ If 'true' it is a fatal error if every single potential inventory
+ source fails to parse, otherwise this situation will only attract a
+ warning.
+ env: [{name: ANSIBLE_INVENTORY_UNPARSED_FAILED}]
+ ini:
+ - {key: unparsed_is_failed, section: inventory}
+ type: bool
+MAX_FILE_SIZE_FOR_DIFF:
+ name: Diff maximum file size
+ default: 104448
+ description: Maximum size of files to be considered for diff display
+ env: [{name: ANSIBLE_MAX_DIFF_SIZE}]
+ ini:
+ - {key: max_diff_size, section: defaults}
+ type: int
+NETWORK_GROUP_MODULES:
+ name: Network module families
+ default: [eos, nxos, ios, iosxr, junos, enos, ce, vyos, sros, dellos9, dellos10, dellos6, asa, aruba, aireos, bigip, ironware, onyx, netconf, exos, voss, slxos]
+ description: 'TODO: write it'
+ env:
+ - name: NETWORK_GROUP_MODULES
+ deprecated:
+ why: environment variables without ``ANSIBLE_`` prefix are deprecated
+ version: "2.12"
+ alternatives: the ``ANSIBLE_NETWORK_GROUP_MODULES`` environment variable
+ - name: ANSIBLE_NETWORK_GROUP_MODULES
+ ini:
+ - {key: network_group_modules, section: defaults}
+ type: list
+ yaml: {key: defaults.network_group_modules}
+INJECT_FACTS_AS_VARS:
+ default: True
+ description:
+ - Facts are available inside the `ansible_facts` variable, this setting also pushes them as their own vars in the main namespace.
+ - Unlike inside the `ansible_facts` dictionary, these will have an `ansible_` prefix.
+ env: [{name: ANSIBLE_INJECT_FACT_VARS}]
+ ini:
+ - {key: inject_facts_as_vars, section: defaults}
+ type: boolean
+ version_added: "2.5"
+MODULE_IGNORE_EXTS:
+ name: Module ignore extensions
+ default: "{{(BLACKLIST_EXTS + ('.yaml', '.yml', '.ini'))}}"
+ description:
+ - List of extensions to ignore when looking for modules to load
+ - This is for blacklisting script and binary module fallback extensions
+ env: [{name: ANSIBLE_MODULE_IGNORE_EXTS}]
+ ini:
+ - {key: module_ignore_exts, section: defaults}
+ type: list
+OLD_PLUGIN_CACHE_CLEARING:
+ description: Previouslly Ansible would only clear some of the plugin loading caches when loading new roles, this led to some behaviours in which a plugin loaded in prevoius plays would be unexpectedly 'sticky'. This setting allows to return to that behaviour.
+ env: [{name: ANSIBLE_OLD_PLUGIN_CACHE_CLEAR}]
+ ini:
+ - {key: old_plugin_cache_clear, section: defaults}
+ type: boolean
+ default: False
+ version_added: "2.8"
+PARAMIKO_HOST_KEY_AUTO_ADD:
+ # TODO: move to plugin
+ default: False
+ description: 'TODO: write it'
+ env: [{name: ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD}]
+ ini:
+ - {key: host_key_auto_add, section: paramiko_connection}
+ type: boolean
+PARAMIKO_LOOK_FOR_KEYS:
+ name: look for keys
+ default: True
+ description: 'TODO: write it'
+ env: [{name: ANSIBLE_PARAMIKO_LOOK_FOR_KEYS}]
+ ini:
+ - {key: look_for_keys, section: paramiko_connection}
+ type: boolean
+PERSISTENT_CONTROL_PATH_DIR:
+ name: Persistence socket path
+ default: ~/.ansible/pc
+ description: Path to socket to be used by the connection persistence system.
+ env: [{name: ANSIBLE_PERSISTENT_CONTROL_PATH_DIR}]
+ ini:
+ - {key: control_path_dir, section: persistent_connection}
+ type: path
+PERSISTENT_CONNECT_TIMEOUT:
+ name: Persistence timeout
+ default: 30
+ description: This controls how long the persistent connection will remain idle before it is destroyed.
+ env: [{name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT}]
+ ini:
+ - {key: connect_timeout, section: persistent_connection}
+ type: integer
+PERSISTENT_CONNECT_RETRY_TIMEOUT:
+ name: Persistence connection retry timeout
+ default: 15
+ description: This controls the retry timeout for persistent connection to connect to the local domain socket.
+ env: [{name: ANSIBLE_PERSISTENT_CONNECT_RETRY_TIMEOUT}]
+ ini:
+ - {key: connect_retry_timeout, section: persistent_connection}
+ type: integer
+PERSISTENT_COMMAND_TIMEOUT:
+ name: Persistence command timeout
+ default: 30
+ description: This controls the amount of time to wait for response from remote device before timing out persistent connection.
+ env: [{name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT}]
+ ini:
+ - {key: command_timeout, section: persistent_connection}
+ type: int
+PLAYBOOK_DIR:
+ name: playbook dir override for non-playbook CLIs (ala --playbook-dir)
+ version_added: "2.9"
+ description:
+ - A number of non-playbook CLIs have a ``--playbook-dir`` argument; this sets the default value for it.
+ env: [{name: ANSIBLE_PLAYBOOK_DIR}]
+ ini: [{key: playbook_dir, section: defaults}]
+ type: path
+PLAYBOOK_VARS_ROOT:
+ name: playbook vars files root
+ default: top
+ version_added: "2.4.1"
+ description:
+ - This sets which playbook dirs will be used as a root to process vars plugins, which includes finding host_vars/group_vars
+ - The ``top`` option follows the traditional behaviour of using the top playbook in the chain to find the root directory.
+ - The ``bottom`` option follows the 2.4.0 behaviour of using the current playbook to find the root directory.
+ - The ``all`` option examines from the first parent to the current playbook.
+ env: [{name: ANSIBLE_PLAYBOOK_VARS_ROOT}]
+ ini:
+ - {key: playbook_vars_root, section: defaults}
+ choices: [ top, bottom, all ]
+PLUGIN_FILTERS_CFG:
+ name: Config file for limiting valid plugins
+ default: null
+ version_added: "2.5.0"
+ description:
+ - "A path to configuration for filtering which plugins installed on the system are allowed to be used."
+ - "See :ref:`plugin_filtering_config` for details of the filter file's format."
+ - " The default is /etc/ansible/plugin_filters.yml"
+ ini:
+ - key: plugin_filters_cfg
+ section: default
+ deprecated:
+ why: specifying "plugin_filters_cfg" under the "default" section is deprecated
+ version: "2.12"
+ alternatives: the "defaults" section instead
+ - key: plugin_filters_cfg
+ section: defaults
+ type: path
+PYTHON_MODULE_RLIMIT_NOFILE:
+ name: Adjust maximum file descriptor soft limit during Python module execution
+ description:
+ - Attempts to set RLIMIT_NOFILE soft limit to the specified value when executing Python modules (can speed up subprocess usage on
+ Python 2.x. See https://bugs.python.org/issue11284). The value will be limited by the existing hard limit. Default
+ value of 0 does not attempt to adjust existing system-defined limits.
+ default: 0
+ env:
+ - {name: ANSIBLE_PYTHON_MODULE_RLIMIT_NOFILE}
+ ini:
+ - {key: python_module_rlimit_nofile, section: defaults}
+ vars:
+ - {name: ansible_python_module_rlimit_nofile}
+ version_added: '2.8'
+RETRY_FILES_ENABLED:
+ name: Retry files
+ default: False
+ description: This controls whether a failed Ansible playbook should create a .retry file.
+ env: [{name: ANSIBLE_RETRY_FILES_ENABLED}]
+ ini:
+ - {key: retry_files_enabled, section: defaults}
+ type: bool
+RETRY_FILES_SAVE_PATH:
+ name: Retry files path
+ default: ~
+ description:
+ - This sets the path in which Ansible will save .retry files when a playbook fails and retry files are enabled.
+ - This file will be overwritten after each run with the list of failed hosts from all plays.
+ env: [{name: ANSIBLE_RETRY_FILES_SAVE_PATH}]
+ ini:
+ - {key: retry_files_save_path, section: defaults}
+ type: path
+RUN_VARS_PLUGINS:
+ name: When should vars plugins run relative to inventory
+ default: demand
+ description:
+ - This setting can be used to optimize vars_plugin usage depending on user's inventory size and play selection.
+ - Setting to C(demand) will run vars_plugins relative to inventory sources anytime vars are 'demanded' by tasks.
+ - Setting to C(start) will run vars_plugins relative to inventory sources after importing that inventory source.
+ env: [{name: ANSIBLE_RUN_VARS_PLUGINS}]
+ ini:
+ - {key: run_vars_plugins, section: defaults}
+ type: str
+ choices: ['demand', 'start']
+ version_added: "2.10"
+SHOW_CUSTOM_STATS:
+ name: Display custom stats
+ default: False
+ description: 'This adds the custom stats set via the set_stats plugin to the default output'
+ env: [{name: ANSIBLE_SHOW_CUSTOM_STATS}]
+ ini:
+ - {key: show_custom_stats, section: defaults}
+ type: bool
+STRING_TYPE_FILTERS:
+ name: Filters to preserve strings
+ default: [string, to_json, to_nice_json, to_yaml, to_nice_yaml, ppretty, json]
+ description:
+ - "This list of filters avoids 'type conversion' when templating variables"
+ - Useful when you want to avoid conversion into lists or dictionaries for JSON strings, for example.
+ env: [{name: ANSIBLE_STRING_TYPE_FILTERS}]
+ ini:
+ - {key: dont_type_filters, section: jinja2}
+ type: list
+SYSTEM_WARNINGS:
+ name: System warnings
+ default: True
+ description:
+ - Allows disabling of warnings related to potential issues on the system running ansible itself (not on the managed hosts)
+ - These may include warnings about 3rd party packages or other conditions that should be resolved if possible.
+ env: [{name: ANSIBLE_SYSTEM_WARNINGS}]
+ ini:
+ - {key: system_warnings, section: defaults}
+ type: boolean
+TAGS_RUN:
+ name: Run Tags
+ default: []
+ type: list
+ description: default list of tags to run in your plays, Skip Tags has precedence.
+ env: [{name: ANSIBLE_RUN_TAGS}]
+ ini:
+ - {key: run, section: tags}
+ version_added: "2.5"
+TAGS_SKIP:
+ name: Skip Tags
+ default: []
+ type: list
+ description: default list of tags to skip in your plays, has precedence over Run Tags
+ env: [{name: ANSIBLE_SKIP_TAGS}]
+ ini:
+ - {key: skip, section: tags}
+ version_added: "2.5"
+TASK_TIMEOUT:
+ name: Task Timeout
+ default: 0
+ description:
+ - Set the maximum time (in seconds) that a task can run for.
+ - If set to 0 (the default) there is no timeout.
+ env: [{name: ANSIBLE_TASK_TIMEOUT}]
+ ini:
+ - {key: task_timeout, section: defaults}
+ type: integer
+ version_added: '2.10'
+WORKER_SHUTDOWN_POLL_COUNT:
+ name: Worker Shutdown Poll Count
+ default: 0
+ description:
+ - The maximum number of times to check Task Queue Manager worker processes to verify they have exited cleanly.
+ - After this limit is reached any worker processes still running will be terminated.
+ - This is for internal use only.
+ env: [{name: ANSIBLE_WORKER_SHUTDOWN_POLL_COUNT}]
+ type: integer
+ version_added: '2.10'
+WORKER_SHUTDOWN_POLL_DELAY:
+ name: Worker Shutdown Poll Delay
+ default: 0.1
+ description:
+ - The number of seconds to sleep between polling loops when checking Task Queue Manager worker processes to verify they have exited cleanly.
+ - This is for internal use only.
+ env: [{name: ANSIBLE_WORKER_SHUTDOWN_POLL_DELAY}]
+ type: float
+ version_added: '2.10'
+USE_PERSISTENT_CONNECTIONS:
+ name: Persistence
+ default: False
+ description: Toggles the use of persistence for connections.
+ env: [{name: ANSIBLE_USE_PERSISTENT_CONNECTIONS}]
+ ini:
+ - {key: use_persistent_connections, section: defaults}
+ type: boolean
+VARIABLE_PLUGINS_ENABLED:
+ name: Vars plugin whitelist
+ default: ['host_group_vars']
+ description: Whitelist for variable plugins that require it.
+ env: [{name: ANSIBLE_VARS_ENABLED}]
+ ini:
+ - {key: vars_plugins_enabled, section: defaults}
+ type: list
+ version_added: "2.10"
+VARIABLE_PRECEDENCE:
+ name: Group variable precedence
+ default: ['all_inventory', 'groups_inventory', 'all_plugins_inventory', 'all_plugins_play', 'groups_plugins_inventory', 'groups_plugins_play']
+ description: Allows to change the group variable precedence merge order.
+ env: [{name: ANSIBLE_PRECEDENCE}]
+ ini:
+ - {key: precedence, section: defaults}
+ type: list
+ version_added: "2.4"
+WIN_ASYNC_STARTUP_TIMEOUT:
+ name: Windows Async Startup Timeout
+ default: 5
+ description:
+ - For asynchronous tasks in Ansible (covered in Asynchronous Actions and Polling),
+ this is how long, in seconds, to wait for the task spawned by Ansible to connect back to the named pipe used
+ on Windows systems. The default is 5 seconds. This can be too low on slower systems, or systems under heavy load.
+ - This is not the total time an async command can run for, but is a separate timeout to wait for an async command to
+ start. The task will only start to be timed against its async_timeout once it has connected to the pipe, so the
+ overall maximum duration the task can take will be extended by the amount specified here.
+ env: [{name: ANSIBLE_WIN_ASYNC_STARTUP_TIMEOUT}]
+ ini:
+ - {key: win_async_startup_timeout, section: defaults}
+ type: integer
+ vars:
+ - {name: ansible_win_async_startup_timeout}
+ version_added: '2.10'
+YAML_FILENAME_EXTENSIONS:
+ name: Valid YAML extensions
+ default: [".yml", ".yaml", ".json"]
+ description:
+ - "Check all of these extensions when looking for 'variable' files which should be YAML or JSON or vaulted versions of these."
+ - 'This affects vars_files, include_vars, inventory and vars plugins among others.'
+ env:
+ - name: ANSIBLE_YAML_FILENAME_EXT
+ ini:
+ - section: defaults
+ key: yaml_valid_extensions
+ type: list
+NETCONF_SSH_CONFIG:
+ description: This variable is used to enable bastion/jump host with netconf connection. If set to True the bastion/jump
+ host ssh settings should be present in ~/.ssh/config file, alternatively it can be set
+ to custom ssh configuration file path to read the bastion/jump host settings.
+ env: [{name: ANSIBLE_NETCONF_SSH_CONFIG}]
+ ini:
+ - {key: ssh_config, section: netconf_connection}
+ yaml: {key: netconf_connection.ssh_config}
+ default: null
+STRING_CONVERSION_ACTION:
+ version_added: '2.8'
+ description:
+ - Action to take when a module parameter value is converted to a string (this does not affect variables).
+ For string parameters, values such as '1.00', "['a', 'b',]", and 'yes', 'y', etc.
+ will be converted by the YAML parser unless fully quoted.
+ - Valid options are 'error', 'warn', and 'ignore'.
+ - Since 2.8, this option defaults to 'warn' but will change to 'error' in 2.12.
+ default: 'warn'
+ env:
+ - name: ANSIBLE_STRING_CONVERSION_ACTION
+ ini:
+ - section: defaults
+ key: string_conversion_action
+ type: string
+VERBOSE_TO_STDERR:
+ version_added: '2.8'
+ description:
+ - Force 'verbose' option to use stderr instead of stdout
+ default: False
+ env:
+ - name: ANSIBLE_VERBOSE_TO_STDERR
+ ini:
+ - section: defaults
+ key: verbose_to_stderr
+ type: bool
+...
diff --git a/lib/ansible/config/data.py b/lib/ansible/config/data.py
new file mode 100644
index 00000000..6a5bb391
--- /dev/null
+++ b/lib/ansible/config/data.py
@@ -0,0 +1,43 @@
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ConfigData(object):
+
+ def __init__(self):
+ self._global_settings = {}
+ self._plugins = {}
+
+ def get_setting(self, name, plugin=None):
+
+ setting = None
+ if plugin is None:
+ setting = self._global_settings.get(name)
+ elif plugin.type in self._plugins and plugin.name in self._plugins[plugin.type]:
+ setting = self._plugins[plugin.type][plugin.name].get(name)
+
+ return setting
+
+ def get_settings(self, plugin=None):
+
+ settings = []
+ if plugin is None:
+ settings = [self._global_settings[k] for k in self._global_settings]
+ elif plugin.type in self._plugins and plugin.name in self._plugins[plugin.type]:
+ settings = [self._plugins[plugin.type][plugin.name][k] for k in self._plugins[plugin.type][plugin.name]]
+
+ return settings
+
+ def update_setting(self, setting, plugin=None):
+
+ if plugin is None:
+ self._global_settings[setting.name] = setting
+ else:
+ if plugin.type not in self._plugins:
+ self._plugins[plugin.type] = {}
+ if plugin.name not in self._plugins[plugin.type]:
+ self._plugins[plugin.type][plugin.name] = {}
+ self._plugins[plugin.type][plugin.name][setting.name] = setting
diff --git a/lib/ansible/config/manager.py b/lib/ansible/config/manager.py
new file mode 100644
index 00000000..858dc4c6
--- /dev/null
+++ b/lib/ansible/config/manager.py
@@ -0,0 +1,588 @@
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import atexit
+import io
+import os
+import os.path
+import sys
+import stat
+import tempfile
+import traceback
+from collections import namedtuple
+
+from yaml import load as yaml_load
+try:
+ # use C version if possible for speedup
+ from yaml import CSafeLoader as SafeLoader
+except ImportError:
+ from yaml import SafeLoader
+
+from ansible.config.data import ConfigData
+from ansible.errors import AnsibleOptionsError, AnsibleError
+from ansible.module_utils._text import to_text, to_bytes, to_native
+from ansible.module_utils.common._collections_compat import Sequence
+from ansible.module_utils.six import PY3, string_types
+from ansible.module_utils.six.moves import configparser
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.parsing.quoting import unquote
+from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
+from ansible.utils import py3compat
+from ansible.utils.path import cleanup_tmp_file, makedirs_safe, unfrackpath
+
+
+Plugin = namedtuple('Plugin', 'name type')
+Setting = namedtuple('Setting', 'name value origin type')
+
+INTERNAL_DEFS = {'lookup': ('_terms',)}
+
+
+def _get_entry(plugin_type, plugin_name, config):
+ ''' construct entry for requested config '''
+ entry = ''
+ if plugin_type:
+ entry += 'plugin_type: %s ' % plugin_type
+ if plugin_name:
+ entry += 'plugin: %s ' % plugin_name
+ entry += 'setting: %s ' % config
+ return entry
+
+
+# FIXME: see if we can unify in module_utils with similar function used by argspec
+def ensure_type(value, value_type, origin=None):
+ ''' return a configuration variable with casting
+ :arg value: The value to ensure correct typing of
+ :kwarg value_type: The type of the value. This can be any of the following strings:
+ :boolean: sets the value to a True or False value
+ :bool: Same as 'boolean'
+ :integer: Sets the value to an integer or raises a ValueType error
+ :int: Same as 'integer'
+ :float: Sets the value to a float or raises a ValueType error
+ :list: Treats the value as a comma separated list. Split the value
+ and return it as a python list.
+ :none: Sets the value to None
+ :path: Expands any environment variables and tilde's in the value.
+ :tmppath: Create a unique temporary directory inside of the directory
+ specified by value and return its path.
+ :temppath: Same as 'tmppath'
+ :tmp: Same as 'tmppath'
+ :pathlist: Treat the value as a typical PATH string. (On POSIX, this
+ means colon separated strings.) Split the value and then expand
+ each part for environment variables and tildes.
+ :pathspec: Treat the value as a PATH string. Expands any environment variables
+ tildes's in the value.
+ :str: Sets the value to string types.
+ :string: Same as 'str'
+ '''
+
+ errmsg = ''
+ basedir = None
+ if origin and os.path.isabs(origin) and os.path.exists(to_bytes(origin)):
+ basedir = origin
+
+ if value_type:
+ value_type = value_type.lower()
+
+ if value is not None:
+ if value_type in ('boolean', 'bool'):
+ value = boolean(value, strict=False)
+
+ elif value_type in ('integer', 'int'):
+ value = int(value)
+
+ elif value_type == 'float':
+ value = float(value)
+
+ elif value_type == 'list':
+ if isinstance(value, string_types):
+ value = [x.strip() for x in value.split(',')]
+ elif not isinstance(value, Sequence):
+ errmsg = 'list'
+
+ elif value_type == 'none':
+ if value == "None":
+ value = None
+
+ if value is not None:
+ errmsg = 'None'
+
+ elif value_type == 'path':
+ if isinstance(value, string_types):
+ value = resolve_path(value, basedir=basedir)
+ else:
+ errmsg = 'path'
+
+ elif value_type in ('tmp', 'temppath', 'tmppath'):
+ if isinstance(value, string_types):
+ value = resolve_path(value, basedir=basedir)
+ if not os.path.exists(value):
+ makedirs_safe(value, 0o700)
+ prefix = 'ansible-local-%s' % os.getpid()
+ value = tempfile.mkdtemp(prefix=prefix, dir=value)
+ atexit.register(cleanup_tmp_file, value, warn=True)
+ else:
+ errmsg = 'temppath'
+
+ elif value_type == 'pathspec':
+ if isinstance(value, string_types):
+ value = value.split(os.pathsep)
+
+ if isinstance(value, Sequence):
+ value = [resolve_path(x, basedir=basedir) for x in value]
+ else:
+ errmsg = 'pathspec'
+
+ elif value_type == 'pathlist':
+ if isinstance(value, string_types):
+ value = [x.strip() for x in value.split(',')]
+
+ if isinstance(value, Sequence):
+ value = [resolve_path(x, basedir=basedir) for x in value]
+ else:
+ errmsg = 'pathlist'
+
+ elif value_type in ('str', 'string'):
+ if isinstance(value, (string_types, AnsibleVaultEncryptedUnicode)):
+ value = unquote(to_text(value, errors='surrogate_or_strict'))
+ else:
+ errmsg = 'string'
+
+ # defaults to string type
+ elif isinstance(value, (string_types, AnsibleVaultEncryptedUnicode)):
+ value = unquote(to_text(value, errors='surrogate_or_strict'))
+
+ if errmsg:
+ raise ValueError('Invalid type provided for "%s": %s' % (errmsg, to_native(value)))
+
+ return to_text(value, errors='surrogate_or_strict', nonstring='passthru')
+
+
+# FIXME: see if this can live in utils/path
+def resolve_path(path, basedir=None):
+ ''' resolve relative or 'variable' paths '''
+ if '{{CWD}}' in path: # allow users to force CWD using 'magic' {{CWD}}
+ path = path.replace('{{CWD}}', os.getcwd())
+
+ return unfrackpath(path, follow=False, basedir=basedir)
+
+
+# FIXME: generic file type?
+def get_config_type(cfile):
+
+ ftype = None
+ if cfile is not None:
+ ext = os.path.splitext(cfile)[-1]
+ if ext in ('.ini', '.cfg'):
+ ftype = 'ini'
+ elif ext in ('.yaml', '.yml'):
+ ftype = 'yaml'
+ else:
+ raise AnsibleOptionsError("Unsupported configuration file extension for %s: %s" % (cfile, to_native(ext)))
+
+ return ftype
+
+
+# FIXME: can move to module_utils for use for ini plugins also?
+def get_ini_config_value(p, entry):
+ ''' returns the value of last ini entry found '''
+ value = None
+ if p is not None:
+ try:
+ value = p.get(entry.get('section', 'defaults'), entry.get('key', ''), raw=True)
+ except Exception: # FIXME: actually report issues here
+ pass
+ return value
+
+
+def find_ini_config_file(warnings=None):
+ ''' Load INI Config File order(first found is used): ENV, CWD, HOME, /etc/ansible '''
+ # FIXME: eventually deprecate ini configs
+
+ if warnings is None:
+ # Note: In this case, warnings does nothing
+ warnings = set()
+
+ # A value that can never be a valid path so that we can tell if ANSIBLE_CONFIG was set later
+ # We can't use None because we could set path to None.
+ SENTINEL = object
+
+ potential_paths = []
+
+ # Environment setting
+ path_from_env = os.getenv("ANSIBLE_CONFIG", SENTINEL)
+ if path_from_env is not SENTINEL:
+ path_from_env = unfrackpath(path_from_env, follow=False)
+ if os.path.isdir(to_bytes(path_from_env)):
+ path_from_env = os.path.join(path_from_env, "ansible.cfg")
+ potential_paths.append(path_from_env)
+
+ # Current working directory
+ warn_cmd_public = False
+ try:
+ cwd = os.getcwd()
+ perms = os.stat(cwd)
+ cwd_cfg = os.path.join(cwd, "ansible.cfg")
+ if perms.st_mode & stat.S_IWOTH:
+ # Working directory is world writable so we'll skip it.
+ # Still have to look for a file here, though, so that we know if we have to warn
+ if os.path.exists(cwd_cfg):
+ warn_cmd_public = True
+ else:
+ potential_paths.append(to_text(cwd_cfg, errors='surrogate_or_strict'))
+ except OSError:
+ # If we can't access cwd, we'll simply skip it as a possible config source
+ pass
+
+ # Per user location
+ potential_paths.append(unfrackpath("~/.ansible.cfg", follow=False))
+
+ # System location
+ potential_paths.append("/etc/ansible/ansible.cfg")
+
+ for path in potential_paths:
+ b_path = to_bytes(path)
+ if os.path.exists(b_path) and os.access(b_path, os.R_OK):
+ break
+ else:
+ path = None
+
+ # Emit a warning if all the following are true:
+ # * We did not use a config from ANSIBLE_CONFIG
+ # * There's an ansible.cfg in the current working directory that we skipped
+ if path_from_env != path and warn_cmd_public:
+ warnings.add(u"Ansible is being run in a world writable directory (%s),"
+ u" ignoring it as an ansible.cfg source."
+ u" For more information see"
+ u" https://docs.ansible.com/ansible/devel/reference_appendices/config.html#cfg-in-world-writable-dir"
+ % to_text(cwd))
+
+ return path
+
+
+def _add_base_defs_deprecations(base_defs):
+ '''Add deprecation source 'ansible.builtin' to deprecations in base.yml'''
+ def process(entry):
+ if 'deprecated' in entry:
+ entry['deprecated']['collection_name'] = 'ansible.builtin'
+
+ for dummy, data in base_defs.items():
+ process(data)
+ for section in ('ini', 'env', 'vars'):
+ if section in data:
+ for entry in data[section]:
+ process(entry)
+
+
+class ConfigManager(object):
+
+ DEPRECATED = []
+ WARNINGS = set()
+
+ def __init__(self, conf_file=None, defs_file=None):
+
+ self._base_defs = {}
+ self._plugins = {}
+ self._parsers = {}
+
+ self._config_file = conf_file
+ self.data = ConfigData()
+
+ self._base_defs = self._read_config_yaml_file(defs_file or ('%s/base.yml' % os.path.dirname(__file__)))
+ _add_base_defs_deprecations(self._base_defs)
+
+ if self._config_file is None:
+ # set config using ini
+ self._config_file = find_ini_config_file(self.WARNINGS)
+
+ # consume configuration
+ if self._config_file:
+ # initialize parser and read config
+ self._parse_config_file()
+
+ # update constants
+ self.update_config_data()
+
+ def _read_config_yaml_file(self, yml_file):
+ # TODO: handle relative paths as relative to the directory containing the current playbook instead of CWD
+ # Currently this is only used with absolute paths to the `ansible/config` directory
+ yml_file = to_bytes(yml_file)
+ if os.path.exists(yml_file):
+ with open(yml_file, 'rb') as config_def:
+ return yaml_load(config_def, Loader=SafeLoader) or {}
+ raise AnsibleError(
+ "Missing base YAML definition file (bad install?): %s" % to_native(yml_file))
+
+ def _parse_config_file(self, cfile=None):
+ ''' return flat configuration settings from file(s) '''
+ # TODO: take list of files with merge/nomerge
+
+ if cfile is None:
+ cfile = self._config_file
+
+ ftype = get_config_type(cfile)
+ if cfile is not None:
+ if ftype == 'ini':
+ self._parsers[cfile] = configparser.ConfigParser()
+ with open(to_bytes(cfile), 'rb') as f:
+ try:
+ cfg_text = to_text(f.read(), errors='surrogate_or_strict')
+ except UnicodeError as e:
+ raise AnsibleOptionsError("Error reading config file(%s) because the config file was not utf8 encoded: %s" % (cfile, to_native(e)))
+ try:
+ if PY3:
+ self._parsers[cfile].read_string(cfg_text)
+ else:
+ cfg_file = io.StringIO(cfg_text)
+ self._parsers[cfile].readfp(cfg_file)
+ except configparser.Error as e:
+ raise AnsibleOptionsError("Error reading config file (%s): %s" % (cfile, to_native(e)))
+ # FIXME: this should eventually handle yaml config files
+ # elif ftype == 'yaml':
+ # with open(cfile, 'rb') as config_stream:
+ # self._parsers[cfile] = yaml.safe_load(config_stream)
+ else:
+ raise AnsibleOptionsError("Unsupported configuration file type: %s" % to_native(ftype))
+
+ def _find_yaml_config_files(self):
+ ''' Load YAML Config Files in order, check merge flags, keep origin of settings'''
+ pass
+
+ def get_plugin_options(self, plugin_type, name, keys=None, variables=None, direct=None):
+
+ options = {}
+ defs = self.get_configuration_definitions(plugin_type, name)
+ for option in defs:
+ options[option] = self.get_config_value(option, plugin_type=plugin_type, plugin_name=name, keys=keys, variables=variables, direct=direct)
+
+ return options
+
+ def get_plugin_vars(self, plugin_type, name):
+
+ pvars = []
+ for pdef in self.get_configuration_definitions(plugin_type, name).values():
+ if 'vars' in pdef and pdef['vars']:
+ for var_entry in pdef['vars']:
+ pvars.append(var_entry['name'])
+ return pvars
+
+ def get_configuration_definition(self, name, plugin_type=None, plugin_name=None):
+
+ ret = {}
+ if plugin_type is None:
+ ret = self._base_defs.get(name, None)
+ elif plugin_name is None:
+ ret = self._plugins.get(plugin_type, {}).get(name, None)
+ else:
+ ret = self._plugins.get(plugin_type, {}).get(plugin_name, {}).get(name, None)
+
+ return ret
+
+ def get_configuration_definitions(self, plugin_type=None, name=None):
+ ''' just list the possible settings, either base or for specific plugins or plugin '''
+
+ ret = {}
+ if plugin_type is None:
+ ret = self._base_defs
+ elif name is None:
+ ret = self._plugins.get(plugin_type, {})
+ else:
+ ret = self._plugins.get(plugin_type, {}).get(name, {})
+
+ return ret
+
+ def _loop_entries(self, container, entry_list):
+ ''' repeat code for value entry assignment '''
+
+ value = None
+ origin = None
+ for entry in entry_list:
+ name = entry.get('name')
+ try:
+ temp_value = container.get(name, None)
+ except UnicodeEncodeError:
+ self.WARNINGS.add(u'value for config entry {0} contains invalid characters, ignoring...'.format(to_text(name)))
+ continue
+ if temp_value is not None: # only set if entry is defined in container
+ # inline vault variables should be converted to a text string
+ if isinstance(temp_value, AnsibleVaultEncryptedUnicode):
+ temp_value = to_text(temp_value, errors='surrogate_or_strict')
+
+ value = temp_value
+ origin = name
+
+ # deal with deprecation of setting source, if used
+ if 'deprecated' in entry:
+ self.DEPRECATED.append((entry['name'], entry['deprecated']))
+
+ return value, origin
+
+ def get_config_value(self, config, cfile=None, plugin_type=None, plugin_name=None, keys=None, variables=None, direct=None):
+ ''' wrapper '''
+
+ try:
+ value, _drop = self.get_config_value_and_origin(config, cfile=cfile, plugin_type=plugin_type, plugin_name=plugin_name,
+ keys=keys, variables=variables, direct=direct)
+ except AnsibleError:
+ raise
+ except Exception as e:
+ raise AnsibleError("Unhandled exception when retrieving %s:\n%s" % (config, to_native(e)), orig_exc=e)
+ return value
+
+ def get_config_value_and_origin(self, config, cfile=None, plugin_type=None, plugin_name=None, keys=None, variables=None, direct=None):
+ ''' Given a config key figure out the actual value and report on the origin of the settings '''
+ if cfile is None:
+ # use default config
+ cfile = self._config_file
+
+ # Note: sources that are lists listed in low to high precedence (last one wins)
+ value = None
+ origin = None
+
+ defs = self.get_configuration_definitions(plugin_type, plugin_name)
+ if config in defs:
+
+ aliases = defs[config].get('aliases', [])
+
+ # direct setting via plugin arguments, can set to None so we bypass rest of processing/defaults
+ direct_aliases = []
+ if direct:
+ direct_aliases = [direct[alias] for alias in aliases if alias in direct]
+ if direct and config in direct:
+ value = direct[config]
+ origin = 'Direct'
+ elif direct and direct_aliases:
+ value = direct_aliases[0]
+ origin = 'Direct'
+
+ else:
+ # Use 'variable overrides' if present, highest precedence, but only present when querying running play
+ if variables and defs[config].get('vars'):
+ value, origin = self._loop_entries(variables, defs[config]['vars'])
+ origin = 'var: %s' % origin
+
+ # use playbook keywords if you have em
+ if value is None and keys:
+ if config in keys:
+ value = keys[config]
+ keyword = config
+
+ elif aliases:
+ for alias in aliases:
+ if alias in keys:
+ value = keys[alias]
+ keyword = alias
+ break
+
+ if value is not None:
+ origin = 'keyword: %s' % keyword
+
+ # env vars are next precedence
+ if value is None and defs[config].get('env'):
+ value, origin = self._loop_entries(py3compat.environ, defs[config]['env'])
+ origin = 'env: %s' % origin
+
+ # try config file entries next, if we have one
+ if self._parsers.get(cfile, None) is None:
+ self._parse_config_file(cfile)
+
+ if value is None and cfile is not None:
+ ftype = get_config_type(cfile)
+ if ftype and defs[config].get(ftype):
+ if ftype == 'ini':
+ # load from ini config
+ try: # FIXME: generalize _loop_entries to allow for files also, most of this code is dupe
+ for ini_entry in defs[config]['ini']:
+ temp_value = get_ini_config_value(self._parsers[cfile], ini_entry)
+ if temp_value is not None:
+ value = temp_value
+ origin = cfile
+ if 'deprecated' in ini_entry:
+ self.DEPRECATED.append(('[%s]%s' % (ini_entry['section'], ini_entry['key']), ini_entry['deprecated']))
+ except Exception as e:
+ sys.stderr.write("Error while loading ini config %s: %s" % (cfile, to_native(e)))
+ elif ftype == 'yaml':
+ # FIXME: implement, also , break down key from defs (. notation???)
+ origin = cfile
+
+ # set default if we got here w/o a value
+ if value is None:
+ if defs[config].get('required', False):
+ if not plugin_type or config not in INTERNAL_DEFS.get(plugin_type, {}):
+ raise AnsibleError("No setting was provided for required configuration %s" %
+ to_native(_get_entry(plugin_type, plugin_name, config)))
+ else:
+ value = defs[config].get('default')
+ origin = 'default'
+ # skip typing as this is a templated default that will be resolved later in constants, which has needed vars
+ if plugin_type is None and isinstance(value, string_types) and (value.startswith('{{') and value.endswith('}}')):
+ return value, origin
+
+ # ensure correct type, can raise exceptions on mismatched types
+ try:
+ value = ensure_type(value, defs[config].get('type'), origin=origin)
+ except ValueError as e:
+ if origin.startswith('env:') and value == '':
+ # this is empty env var for non string so we can set to default
+ origin = 'default'
+ value = ensure_type(defs[config].get('default'), defs[config].get('type'), origin=origin)
+ else:
+ raise AnsibleOptionsError('Invalid type for configuration option %s: %s' %
+ (to_native(_get_entry(plugin_type, plugin_name, config)), to_native(e)))
+
+ # deal with deprecation of the setting
+ if 'deprecated' in defs[config] and origin != 'default':
+ self.DEPRECATED.append((config, defs[config].get('deprecated')))
+ else:
+ raise AnsibleError('Requested entry (%s) was not defined in configuration.' % to_native(_get_entry(plugin_type, plugin_name, config)))
+
+ return value, origin
+
+ def initialize_plugin_configuration_definitions(self, plugin_type, name, defs):
+
+ if plugin_type not in self._plugins:
+ self._plugins[plugin_type] = {}
+
+ self._plugins[plugin_type][name] = defs
+
+ def update_config_data(self, defs=None, configfile=None):
+ ''' really: update constants '''
+
+ if defs is None:
+ defs = self._base_defs
+
+ if configfile is None:
+ configfile = self._config_file
+
+ if not isinstance(defs, dict):
+ raise AnsibleOptionsError("Invalid configuration definition type: %s for %s" % (type(defs), defs))
+
+ # update the constant for config file
+ self.data.update_setting(Setting('CONFIG_FILE', configfile, '', 'string'))
+
+ origin = None
+ # env and config defs can have several entries, ordered in list from lowest to highest precedence
+ for config in defs:
+ if not isinstance(defs[config], dict):
+ raise AnsibleOptionsError("Invalid configuration definition '%s': type is %s" % (to_native(config), type(defs[config])))
+
+ # get value and origin
+ try:
+ value, origin = self.get_config_value_and_origin(config, configfile)
+ except Exception as e:
+ # Printing the problem here because, in the current code:
+ # (1) we can't reach the error handler for AnsibleError before we
+ # hit a different error due to lack of working config.
+ # (2) We don't have access to display yet because display depends on config
+ # being properly loaded.
+ #
+ # If we start getting double errors printed from this section of code, then the
+ # above problem #1 has been fixed. Revamp this to be more like the try: except
+ # in get_config_value() at that time.
+ sys.stderr.write("Unhandled error:\n %s\n\n" % traceback.format_exc())
+ raise AnsibleError("Invalid settings supplied for %s: %s\n" % (config, to_native(e)), orig_exc=e)
+
+ # set the constant
+ self.data.update_setting(Setting(config, value, origin, defs[config].get('type', 'string')))
diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py
new file mode 100644
index 00000000..575bed52
--- /dev/null
+++ b/lib/ansible/constants.py
@@ -0,0 +1,227 @@
+# Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+
+from ast import literal_eval
+from jinja2 import Template
+from string import ascii_letters, digits
+
+from ansible.config.manager import ConfigManager, ensure_type, get_ini_config_value
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common.collections import Sequence
+from ansible.module_utils.parsing.convert_bool import boolean, BOOLEANS_TRUE
+from ansible.module_utils.six import string_types
+from ansible.utils.fqcn import add_internal_fqcns
+
+
+def _warning(msg):
+ ''' display is not guaranteed here, nor it being the full class, but try anyways, fallback to sys.stderr.write '''
+ try:
+ from ansible.utils.display import Display
+ Display().warning(msg)
+ except Exception:
+ import sys
+ sys.stderr.write(' [WARNING] %s\n' % (msg))
+
+
+def _deprecated(msg, version='2.8'):
+ ''' display is not guaranteed here, nor it being the full class, but try anyways, fallback to sys.stderr.write '''
+ try:
+ from ansible.utils.display import Display
+ Display().deprecated(msg, version=version)
+ except Exception:
+ import sys
+ sys.stderr.write(' [DEPRECATED] %s, to be removed in %s\n' % (msg, version))
+
+
+def mk_boolean(value):
+ ''' moved to module_utils'''
+ _deprecated('ansible.constants.mk_boolean() is deprecated. Use ansible.module_utils.parsing.convert_bool.boolean() instead')
+ return boolean(value, strict=False)
+
+
+def get_config(parser, section, key, env_var, default_value, value_type=None, expand_relative_paths=False):
+ ''' kept for backwarsd compatibility, but deprecated '''
+ _deprecated('ansible.constants.get_config() is deprecated. There is new config API, see porting docs.')
+
+ value = None
+ # small reconstruction of the old code env/ini/default
+ value = os.environ.get(env_var, None)
+ if value is None:
+ try:
+ value = get_ini_config_value(parser, {'key': key, 'section': section})
+ except Exception:
+ pass
+ if value is None:
+ value = default_value
+
+ value = ensure_type(value, value_type)
+
+ return value
+
+
+def set_constant(name, value, export=vars()):
+ ''' sets constants and returns resolved options dict '''
+ export[name] = value
+
+
+class _DeprecatedSequenceConstant(Sequence):
+ def __init__(self, value, msg, version):
+ self._value = value
+ self._msg = msg
+ self._version = version
+
+ def __len__(self):
+ _deprecated(self._msg, version=self._version)
+ return len(self._value)
+
+ def __getitem__(self, y):
+ _deprecated(self._msg, version=self._version)
+ return self._value[y]
+
+
+# Deprecated constants
+BECOME_METHODS = _DeprecatedSequenceConstant(
+ ['sudo', 'su', 'pbrun', 'pfexec', 'doas', 'dzdo', 'ksu', 'runas', 'pmrun', 'enable', 'machinectl'],
+ ('ansible.constants.BECOME_METHODS is deprecated, please use '
+ 'ansible.plugins.loader.become_loader. This list is statically '
+ 'defined and may not include all become methods'),
+ '2.10'
+)
+
+# CONSTANTS ### yes, actual ones
+BLACKLIST_EXTS = ('.pyc', '.pyo', '.swp', '.bak', '~', '.rpm', '.md', '.txt', '.rst')
+BOOL_TRUE = BOOLEANS_TRUE
+COLLECTION_PTYPE_COMPAT = {'module': 'modules'}
+DEFAULT_BECOME_PASS = None
+DEFAULT_PASSWORD_CHARS = to_text(ascii_letters + digits + ".,:-_", errors='strict') # characters included in auto-generated passwords
+DEFAULT_REMOTE_PASS = None
+DEFAULT_SUBSET = None
+# FIXME: expand to other plugins, but never doc fragments
+CONFIGURABLE_PLUGINS = ('become', 'cache', 'callback', 'cliconf', 'connection', 'httpapi', 'inventory', 'lookup', 'netconf', 'shell', 'vars')
+# NOTE: always update the docs/docsite/Makefile to match
+DOCUMENTABLE_PLUGINS = CONFIGURABLE_PLUGINS + ('module', 'strategy')
+IGNORE_FILES = ("COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION", "GUIDELINES") # ignore during module search
+INTERNAL_RESULT_KEYS = ('add_host', 'add_group')
+LOCALHOST = ('127.0.0.1', 'localhost', '::1')
+MODULE_REQUIRE_ARGS = tuple(add_internal_fqcns(('command', 'win_command', 'ansible.windows.win_command', 'shell', 'win_shell',
+ 'ansible.windows.win_shell', 'raw', 'script')))
+MODULE_NO_JSON = tuple(add_internal_fqcns(('command', 'win_command', 'ansible.windows.win_command', 'shell', 'win_shell',
+ 'ansible.windows.win_shell', 'raw')))
+RESTRICTED_RESULT_KEYS = ('ansible_rsync_path', 'ansible_playbook_python', 'ansible_facts')
+TREE_DIR = None
+VAULT_VERSION_MIN = 1.0
+VAULT_VERSION_MAX = 1.0
+
+# This matches a string that cannot be used as a valid python variable name i.e 'not-valid', 'not!valid@either' '1_nor_This'
+INVALID_VARIABLE_NAMES = re.compile(r'^[\d\W]|[^\w]')
+
+
+# FIXME: remove once play_context mangling is removed
+# the magic variable mapping dictionary below is used to translate
+# host/inventory variables to fields in the PlayContext
+# object. The dictionary values are tuples, to account for aliases
+# in variable names.
+
+COMMON_CONNECTION_VARS = frozenset(('ansible_connection', 'ansible_host', 'ansible_user', 'ansible_shell_executable',
+ 'ansible_port', 'ansible_pipelining', 'ansible_password', 'ansible_timeout',
+ 'ansible_shell_type', 'ansible_module_compression', 'ansible_private_key_file'))
+
+MAGIC_VARIABLE_MAPPING = dict(
+
+ # base
+ connection=('ansible_connection', ),
+ module_compression=('ansible_module_compression', ),
+ shell=('ansible_shell_type', ),
+ executable=('ansible_shell_executable', ),
+
+ # connection common
+ remote_addr=('ansible_ssh_host', 'ansible_host'),
+ remote_user=('ansible_ssh_user', 'ansible_user'),
+ password=('ansible_ssh_pass', 'ansible_password'),
+ port=('ansible_ssh_port', 'ansible_port'),
+ pipelining=('ansible_ssh_pipelining', 'ansible_pipelining'),
+ timeout=('ansible_ssh_timeout', 'ansible_timeout'),
+ private_key_file=('ansible_ssh_private_key_file', 'ansible_private_key_file'),
+
+ # networking modules
+ network_os=('ansible_network_os', ),
+ connection_user=('ansible_connection_user',),
+
+ # ssh TODO: remove
+ ssh_executable=('ansible_ssh_executable', ),
+ ssh_common_args=('ansible_ssh_common_args', ),
+ sftp_extra_args=('ansible_sftp_extra_args', ),
+ scp_extra_args=('ansible_scp_extra_args', ),
+ ssh_extra_args=('ansible_ssh_extra_args', ),
+ ssh_transfer_method=('ansible_ssh_transfer_method', ),
+
+ # docker TODO: remove
+ docker_extra_args=('ansible_docker_extra_args', ),
+
+ # become
+ become=('ansible_become', ),
+ become_method=('ansible_become_method', ),
+ become_user=('ansible_become_user', ),
+ become_pass=('ansible_become_password', 'ansible_become_pass'),
+ become_exe=('ansible_become_exe', ),
+ become_flags=('ansible_become_flags', ),
+)
+
+# POPULATE SETTINGS FROM CONFIG ###
+config = ConfigManager()
+
+# Generate constants from config
+for setting in config.data.get_settings():
+
+ value = setting.value
+ if setting.origin == 'default' and \
+ isinstance(setting.value, string_types) and \
+ (setting.value.startswith('{{') and setting.value.endswith('}}')):
+ try:
+ t = Template(setting.value)
+ value = t.render(vars())
+ try:
+ value = literal_eval(value)
+ except ValueError:
+ pass # not a python data structure
+ except Exception:
+ pass # not templatable
+
+ value = ensure_type(value, setting.type)
+
+ set_constant(setting.name, value)
+
+for warn in config.WARNINGS:
+ _warning(warn)
+
+
+# The following are hard-coded action names
+_ACTION_DEBUG = add_internal_fqcns(('debug', ))
+_ACTION_IMPORT_PLAYBOOK = add_internal_fqcns(('import_playbook', ))
+_ACTION_IMPORT_ROLE = add_internal_fqcns(('import_role', ))
+_ACTION_IMPORT_TASKS = add_internal_fqcns(('import_tasks', ))
+_ACTION_INCLUDE = add_internal_fqcns(('include', ))
+_ACTION_INCLUDE_ROLE = add_internal_fqcns(('include_role', ))
+_ACTION_INCLUDE_TASKS = add_internal_fqcns(('include_tasks', ))
+_ACTION_INCLUDE_VARS = add_internal_fqcns(('include_vars', ))
+_ACTION_META = add_internal_fqcns(('meta', ))
+_ACTION_SET_FACT = add_internal_fqcns(('set_fact', ))
+_ACTION_SETUP = add_internal_fqcns(('setup', ))
+_ACTION_HAS_CMD = add_internal_fqcns(('command', 'shell', 'script'))
+_ACTION_ALLOWS_RAW_ARGS = _ACTION_HAS_CMD + add_internal_fqcns(('raw', ))
+_ACTION_ALL_INCLUDES = _ACTION_INCLUDE + _ACTION_INCLUDE_TASKS + _ACTION_INCLUDE_ROLE
+_ACTION_ALL_IMPORT_PLAYBOOKS = _ACTION_INCLUDE + _ACTION_IMPORT_PLAYBOOK
+_ACTION_ALL_INCLUDE_IMPORT_TASKS = _ACTION_INCLUDE + _ACTION_INCLUDE_TASKS + _ACTION_IMPORT_TASKS
+_ACTION_ALL_PROPER_INCLUDE_IMPORT_ROLES = _ACTION_INCLUDE_ROLE + _ACTION_IMPORT_ROLE
+_ACTION_ALL_PROPER_INCLUDE_IMPORT_TASKS = _ACTION_INCLUDE_TASKS + _ACTION_IMPORT_TASKS
+_ACTION_ALL_INCLUDE_ROLE_TASKS = _ACTION_INCLUDE_ROLE + _ACTION_INCLUDE_TASKS
+_ACTION_ALL_INCLUDE_TASKS = _ACTION_INCLUDE + _ACTION_INCLUDE_TASKS
+_ACTION_FACT_GATHERING = _ACTION_SETUP + add_internal_fqcns(('gather_facts', ))
+_ACTION_WITH_CLEAN_FACTS = _ACTION_SET_FACT + _ACTION_INCLUDE_VARS
diff --git a/lib/ansible/context.py b/lib/ansible/context.py
new file mode 100644
index 00000000..96f13fea
--- /dev/null
+++ b/lib/ansible/context.py
@@ -0,0 +1,56 @@
+# Copyright: (c) 2018, Toshio Kuratomi <tkuratomi@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+Context of the running Ansible.
+
+In the future we *may* create Context objects to allow running multiple Ansible plays in parallel
+with different contexts but that is currently out of scope as the Ansible library is just for
+running the ansible command line tools.
+
+These APIs are still in flux so do not use them unless you are willing to update them with every Ansible release
+"""
+
+from ansible.module_utils.common._collections_compat import Mapping, Set
+from ansible.module_utils.common.collections import is_sequence
+from ansible.utils.context_objects import CLIArgs, GlobalCLIArgs
+
+
+__all__ = ('CLIARGS',)
+
+# Note: this is not the singleton version. The Singleton is only created once the program has
+# actually parsed the args
+CLIARGS = CLIArgs({})
+
+
+# This should be called immediately after cli_args are processed (parsed, validated, and any
+# normalization performed on them). No other code should call it
+def _init_global_context(cli_args):
+ """Initialize the global context objects"""
+ global CLIARGS
+ CLIARGS = GlobalCLIArgs.from_options(cli_args)
+
+
+def cliargs_deferred_get(key, default=None, shallowcopy=False):
+ """Closure over getting a key from CLIARGS with shallow copy functionality
+
+ Primarily used in ``FieldAttribute`` where we need to defer setting the default
+ until after the CLI arguments have been parsed
+
+ This function is not directly bound to ``CliArgs`` so that it works with
+ ``CLIARGS`` being replaced
+ """
+ def inner():
+ value = CLIARGS.get(key, default=default)
+ if not shallowcopy:
+ return value
+ elif is_sequence(value):
+ return value[:]
+ elif isinstance(value, (Mapping, Set)):
+ return value.copy()
+ return value
+ return inner
diff --git a/lib/ansible/errors/__init__.py b/lib/ansible/errors/__init__.py
new file mode 100644
index 00000000..563c5d25
--- /dev/null
+++ b/lib/ansible/errors/__init__.py
@@ -0,0 +1,341 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from ansible.errors.yaml_strings import (
+ YAML_COMMON_DICT_ERROR,
+ YAML_COMMON_LEADING_TAB_ERROR,
+ YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR,
+ YAML_COMMON_UNBALANCED_QUOTES_ERROR,
+ YAML_COMMON_UNQUOTED_COLON_ERROR,
+ YAML_COMMON_UNQUOTED_VARIABLE_ERROR,
+ YAML_POSITION_DETAILS,
+ YAML_AND_SHORTHAND_ERROR,
+)
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.common._collections_compat import Sequence
+
+
+class AnsibleError(Exception):
+ '''
+ This is the base class for all errors raised from Ansible code,
+ and can be instantiated with two optional parameters beyond the
+ error message to control whether detailed information is displayed
+ when the error occurred while parsing a data file of some kind.
+
+ Usage:
+
+ raise AnsibleError('some message here', obj=obj, show_content=True)
+
+ Where "obj" is some subclass of ansible.parsing.yaml.objects.AnsibleBaseYAMLObject,
+ which should be returned by the DataLoader() class.
+ '''
+
+ def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None):
+ super(AnsibleError, self).__init__(message)
+
+ # we import this here to prevent an import loop problem,
+ # since the objects code also imports ansible.errors
+ from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
+
+ self._obj = obj
+ self._show_content = show_content
+ if obj and isinstance(obj, AnsibleBaseYAMLObject):
+ extended_error = self._get_extended_error()
+ if extended_error and not suppress_extended_error:
+ self.message = '%s\n\n%s' % (to_native(message), to_native(extended_error))
+ else:
+ self.message = '%s' % to_native(message)
+ else:
+ self.message = '%s' % to_native(message)
+ if orig_exc:
+ self.orig_exc = orig_exc
+
+ def __str__(self):
+ return self.message
+
+ def __repr__(self):
+ return self.message
+
+ def _get_error_lines_from_file(self, file_name, line_number):
+ '''
+ Returns the line in the file which corresponds to the reported error
+ location, as well as the line preceding it (if the error did not
+ occur on the first line), to provide context to the error.
+ '''
+
+ target_line = ''
+ prev_line = ''
+
+ with open(file_name, 'r') as f:
+ lines = f.readlines()
+
+ target_line = lines[line_number]
+ if line_number > 0:
+ prev_line = lines[line_number - 1]
+
+ return (target_line, prev_line)
+
+ def _get_extended_error(self):
+ '''
+ Given an object reporting the location of the exception in a file, return
+ detailed information regarding it including:
+
+ * the line which caused the error as well as the one preceding it
+ * causes and suggested remedies for common syntax errors
+
+ If this error was created with show_content=False, the reporting of content
+ is suppressed, as the file contents may be sensitive (ie. vault data).
+ '''
+
+ error_message = ''
+
+ try:
+ (src_file, line_number, col_number) = self._obj.ansible_pos
+ error_message += YAML_POSITION_DETAILS % (src_file, line_number, col_number)
+ if src_file not in ('<string>', '<unicode>') and self._show_content:
+ (target_line, prev_line) = self._get_error_lines_from_file(src_file, line_number - 1)
+ target_line = to_text(target_line)
+ prev_line = to_text(prev_line)
+ if target_line:
+ stripped_line = target_line.replace(" ", "")
+
+ # Check for k=v syntax in addition to YAML syntax and set the appropriate error position,
+ # arrow index
+ if re.search(r'\w+(\s+)?=(\s+)?[\w/-]+', prev_line):
+ error_position = prev_line.rstrip().find('=')
+ arrow_line = (" " * error_position) + "^ here"
+ error_message = YAML_POSITION_DETAILS % (src_file, line_number - 1, error_position + 1)
+ error_message += "\nThe offending line appears to be:\n\n%s\n%s\n\n" % (prev_line.rstrip(), arrow_line)
+ error_message += YAML_AND_SHORTHAND_ERROR
+ else:
+ arrow_line = (" " * (col_number - 1)) + "^ here"
+ error_message += "\nThe offending line appears to be:\n\n%s\n%s\n%s\n" % (prev_line.rstrip(), target_line.rstrip(), arrow_line)
+
+ # TODO: There may be cases where there is a valid tab in a line that has other errors.
+ if '\t' in target_line:
+ error_message += YAML_COMMON_LEADING_TAB_ERROR
+ # common error/remediation checking here:
+ # check for unquoted vars starting lines
+ if ('{{' in target_line and '}}' in target_line) and ('"{{' not in target_line or "'{{" not in target_line):
+ error_message += YAML_COMMON_UNQUOTED_VARIABLE_ERROR
+ # check for common dictionary mistakes
+ elif ":{{" in stripped_line and "}}" in stripped_line:
+ error_message += YAML_COMMON_DICT_ERROR
+ # check for common unquoted colon mistakes
+ elif (len(target_line) and
+ len(target_line) > 1 and
+ len(target_line) > col_number and
+ target_line[col_number] == ":" and
+ target_line.count(':') > 1):
+ error_message += YAML_COMMON_UNQUOTED_COLON_ERROR
+ # otherwise, check for some common quoting mistakes
+ else:
+ # FIXME: This needs to split on the first ':' to account for modules like lineinfile
+ # that may have lines that contain legitimate colons, e.g., line: 'i ALL= (ALL) NOPASSWD: ALL'
+ # and throw off the quote matching logic.
+ parts = target_line.split(":")
+ if len(parts) > 1:
+ middle = parts[1].strip()
+ match = False
+ unbalanced = False
+
+ if middle.startswith("'") and not middle.endswith("'"):
+ match = True
+ elif middle.startswith('"') and not middle.endswith('"'):
+ match = True
+
+ if (len(middle) > 0 and
+ middle[0] in ['"', "'"] and
+ middle[-1] in ['"', "'"] and
+ target_line.count("'") > 2 or
+ target_line.count('"') > 2):
+ unbalanced = True
+
+ if match:
+ error_message += YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR
+ if unbalanced:
+ error_message += YAML_COMMON_UNBALANCED_QUOTES_ERROR
+
+ except (IOError, TypeError):
+ error_message += '\n(could not open file to display line)'
+ except IndexError:
+ error_message += '\n(specified line no longer in file, maybe it changed?)'
+
+ return error_message
+
+
+class AnsibleAssertionError(AnsibleError, AssertionError):
+ '''Invalid assertion'''
+ pass
+
+
+class AnsibleOptionsError(AnsibleError):
+ ''' bad or incomplete options passed '''
+ pass
+
+
+class AnsibleParserError(AnsibleError):
+ ''' something was detected early that is wrong about a playbook or data file '''
+ pass
+
+
+class AnsibleInternalError(AnsibleError):
+ ''' internal safeguards tripped, something happened in the code that should never happen '''
+ pass
+
+
+class AnsibleRuntimeError(AnsibleError):
+ ''' ansible had a problem while running a playbook '''
+ pass
+
+
+class AnsibleModuleError(AnsibleRuntimeError):
+ ''' a module failed somehow '''
+ pass
+
+
+class AnsibleConnectionFailure(AnsibleRuntimeError):
+ ''' the transport / connection_plugin had a fatal error '''
+ pass
+
+
+class AnsibleAuthenticationFailure(AnsibleConnectionFailure):
+ '''invalid username/password/key'''
+ pass
+
+
+class AnsibleCallbackError(AnsibleRuntimeError):
+ ''' a callback failure '''
+ pass
+
+
+class AnsibleTemplateError(AnsibleRuntimeError):
+ '''A template related error'''
+ pass
+
+
+class AnsibleFilterError(AnsibleTemplateError):
+ ''' a templating failure '''
+ pass
+
+
+class AnsibleLookupError(AnsibleTemplateError):
+ ''' a lookup failure '''
+ pass
+
+
+class AnsibleUndefinedVariable(AnsibleTemplateError):
+ ''' a templating failure '''
+ pass
+
+
+class AnsibleFileNotFound(AnsibleRuntimeError):
+ ''' a file missing failure '''
+
+ def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, paths=None, file_name=None):
+
+ self.file_name = file_name
+ self.paths = paths
+
+ if message:
+ message += "\n"
+ if self.file_name:
+ message += "Could not find or access '%s'" % to_text(self.file_name)
+ else:
+ message += "Could not find file"
+
+ if self.paths and isinstance(self.paths, Sequence):
+ searched = to_text('\n\t'.join(self.paths))
+ if message:
+ message += "\n"
+ message += "Searched in:\n\t%s" % searched
+
+ message += " on the Ansible Controller.\nIf you are using a module and expect the file to exist on the remote, see the remote_src option"
+
+ super(AnsibleFileNotFound, self).__init__(message=message, obj=obj, show_content=show_content,
+ suppress_extended_error=suppress_extended_error, orig_exc=orig_exc)
+
+
+# These Exceptions are temporary, using them as flow control until we can get a better solution.
+# DO NOT USE as they will probably be removed soon.
+# We will port the action modules in our tree to use a context manager instead.
+class AnsibleAction(AnsibleRuntimeError):
+ ''' Base Exception for Action plugin flow control '''
+
+ def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, result=None):
+
+ super(AnsibleAction, self).__init__(message=message, obj=obj, show_content=show_content,
+ suppress_extended_error=suppress_extended_error, orig_exc=orig_exc)
+ if result is None:
+ self.result = {}
+ else:
+ self.result = result
+
+
+class AnsibleActionSkip(AnsibleAction):
+ ''' an action runtime skip'''
+
+ def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, result=None):
+ super(AnsibleActionSkip, self).__init__(message=message, obj=obj, show_content=show_content,
+ suppress_extended_error=suppress_extended_error, orig_exc=orig_exc, result=result)
+ self.result.update({'skipped': True, 'msg': message})
+
+
+class AnsibleActionFail(AnsibleAction):
+ ''' an action runtime failure'''
+ def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, result=None):
+ super(AnsibleActionFail, self).__init__(message=message, obj=obj, show_content=show_content,
+ suppress_extended_error=suppress_extended_error, orig_exc=orig_exc, result=result)
+ self.result.update({'failed': True, 'msg': message})
+
+
+class _AnsibleActionDone(AnsibleAction):
+ ''' an action runtime early exit'''
+ pass
+
+
+class AnsiblePluginError(AnsibleError):
+ ''' base class for Ansible plugin-related errors that do not need AnsibleError contextual data '''
+ def __init__(self, message=None, plugin_load_context=None):
+ super(AnsiblePluginError, self).__init__(message)
+ self.plugin_load_context = plugin_load_context
+
+
+class AnsiblePluginRemovedError(AnsiblePluginError):
+ ''' a requested plugin has been removed '''
+ pass
+
+
+class AnsiblePluginCircularRedirect(AnsiblePluginError):
+ '''a cycle was detected in plugin redirection'''
+ pass
+
+
+class AnsibleCollectionUnsupportedVersionError(AnsiblePluginError):
+ '''a collection is not supported by this version of Ansible'''
+ pass
+
+
+class AnsibleFilterTypeError(AnsibleTemplateError, TypeError):
+ ''' a Jinja filter templating failure due to bad type'''
+ pass
diff --git a/lib/ansible/errors/yaml_strings.py b/lib/ansible/errors/yaml_strings.py
new file mode 100644
index 00000000..e10a3f9d
--- /dev/null
+++ b/lib/ansible/errors/yaml_strings.py
@@ -0,0 +1,140 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+__all__ = [
+ 'YAML_SYNTAX_ERROR',
+ 'YAML_POSITION_DETAILS',
+ 'YAML_COMMON_DICT_ERROR',
+ 'YAML_COMMON_UNQUOTED_VARIABLE_ERROR',
+ 'YAML_COMMON_UNQUOTED_COLON_ERROR',
+ 'YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR',
+ 'YAML_COMMON_UNBALANCED_QUOTES_ERROR',
+]
+
+YAML_SYNTAX_ERROR = """\
+Syntax Error while loading YAML.
+ %s"""
+
+YAML_POSITION_DETAILS = """\
+The error appears to be in '%s': line %s, column %s, but may
+be elsewhere in the file depending on the exact syntax problem.
+"""
+
+YAML_COMMON_DICT_ERROR = """\
+This one looks easy to fix. YAML thought it was looking for the start of a
+hash/dictionary and was confused to see a second "{". Most likely this was
+meant to be an ansible template evaluation instead, so we have to give the
+parser a small hint that we wanted a string instead. The solution here is to
+just quote the entire value.
+
+For instance, if the original line was:
+
+ app_path: {{ base_path }}/foo
+
+It should be written as:
+
+ app_path: "{{ base_path }}/foo"
+"""
+
+YAML_COMMON_UNQUOTED_VARIABLE_ERROR = """\
+We could be wrong, but this one looks like it might be an issue with
+missing quotes. Always quote template expression brackets when they
+start a value. For instance:
+
+ with_items:
+ - {{ foo }}
+
+Should be written as:
+
+ with_items:
+ - "{{ foo }}"
+"""
+
+YAML_COMMON_UNQUOTED_COLON_ERROR = """\
+This one looks easy to fix. There seems to be an extra unquoted colon in the line
+and this is confusing the parser. It was only expecting to find one free
+colon. The solution is just add some quotes around the colon, or quote the
+entire line after the first colon.
+
+For instance, if the original line was:
+
+ copy: src=file.txt dest=/path/filename:with_colon.txt
+
+It can be written as:
+
+ copy: src=file.txt dest='/path/filename:with_colon.txt'
+
+Or:
+
+ copy: 'src=file.txt dest=/path/filename:with_colon.txt'
+"""
+
+YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR = """\
+This one looks easy to fix. It seems that there is a value started
+with a quote, and the YAML parser is expecting to see the line ended
+with the same kind of quote. For instance:
+
+ when: "ok" in result.stdout
+
+Could be written as:
+
+ when: '"ok" in result.stdout'
+
+Or equivalently:
+
+ when: "'ok' in result.stdout"
+"""
+
+YAML_COMMON_UNBALANCED_QUOTES_ERROR = """\
+We could be wrong, but this one looks like it might be an issue with
+unbalanced quotes. If starting a value with a quote, make sure the
+line ends with the same set of quotes. For instance this arbitrary
+example:
+
+ foo: "bad" "wolf"
+
+Could be written as:
+
+ foo: '"bad" "wolf"'
+"""
+
+YAML_COMMON_LEADING_TAB_ERROR = """\
+There appears to be a tab character at the start of the line.
+
+YAML does not use tabs for formatting. Tabs should be replaced with spaces.
+
+For example:
+ - name: update tooling
+ vars:
+ version: 1.2.3
+# ^--- there is a tab there.
+
+Should be written as:
+ - name: update tooling
+ vars:
+ version: 1.2.3
+# ^--- all spaces here.
+"""
+
+YAML_AND_SHORTHAND_ERROR = """\
+There appears to be both 'k=v' shorthand syntax and YAML in this task. \
+Only one syntax may be used.
+"""
diff --git a/lib/ansible/executor/__init__.py b/lib/ansible/executor/__init__.py
new file mode 100644
index 00000000..ae8ccff5
--- /dev/null
+++ b/lib/ansible/executor/__init__.py
@@ -0,0 +1,20 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
diff --git a/lib/ansible/executor/action_write_locks.py b/lib/ansible/executor/action_write_locks.py
new file mode 100644
index 00000000..dfc7f9c9
--- /dev/null
+++ b/lib/ansible/executor/action_write_locks.py
@@ -0,0 +1,44 @@
+# (c) 2016 - Red Hat, Inc. <info@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from multiprocessing import Lock
+
+from ansible.module_utils.facts.system.pkg_mgr import PKG_MGRS
+
+if 'action_write_locks' not in globals():
+ # Do not initialize this more than once because it seems to bash
+ # the existing one. multiprocessing must be reloading the module
+ # when it forks?
+ action_write_locks = dict()
+
+ # Below is a Lock for use when we weren't expecting a named module. It gets used when an action
+ # plugin invokes a module whose name does not match with the action's name. Slightly less
+ # efficient as all processes with unexpected module names will wait on this lock
+ action_write_locks[None] = Lock()
+
+ # These plugins are known to be called directly by action plugins with names differing from the
+ # action plugin name. We precreate them here as an optimization.
+ # If a list of service managers is created in the future we can do the same for them.
+ mods = set(p['name'] for p in PKG_MGRS)
+
+ mods.update(('copy', 'file', 'setup', 'slurp', 'stat'))
+ for mod_name in mods:
+ action_write_locks[mod_name] = Lock()
diff --git a/lib/ansible/executor/discovery/__init__.py b/lib/ansible/executor/discovery/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/lib/ansible/executor/discovery/__init__.py
diff --git a/lib/ansible/executor/discovery/python_target.py b/lib/ansible/executor/discovery/python_target.py
new file mode 100644
index 00000000..71377332
--- /dev/null
+++ b/lib/ansible/executor/discovery/python_target.py
@@ -0,0 +1,48 @@
+# Copyright: (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# FUTURE: this could be swapped out for our bundled version of distro to move more complete platform
+# logic to the targets, so long as we maintain Py2.6 compat and don't need to do any kind of script assembly
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import platform
+import io
+import os
+
+
+def read_utf8_file(path, encoding='utf-8'):
+ if not os.access(path, os.R_OK):
+ return None
+ with io.open(path, 'r', encoding=encoding) as fd:
+ content = fd.read()
+
+ return content
+
+
+def get_platform_info():
+ result = dict(platform_dist_result=[])
+
+ if hasattr(platform, 'dist'):
+ result['platform_dist_result'] = platform.dist()
+
+ osrelease_content = read_utf8_file('/etc/os-release')
+ # try to fall back to /usr/lib/os-release
+ if not osrelease_content:
+ osrelease_content = read_utf8_file('/usr/lib/os-release')
+
+ result['osrelease_content'] = osrelease_content
+
+ return result
+
+
+def main():
+ info = get_platform_info()
+
+ print(json.dumps(info))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/executor/interpreter_discovery.py b/lib/ansible/executor/interpreter_discovery.py
new file mode 100644
index 00000000..d387180b
--- /dev/null
+++ b/lib/ansible/executor/interpreter_discovery.py
@@ -0,0 +1,203 @@
+# Copyright: (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import bisect
+import json
+import pkgutil
+import re
+
+from ansible import constants as C
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.distro import LinuxDistribution
+from ansible.utils.display import Display
+from ansible.utils.plugin_docs import get_versioned_doclink
+from distutils.version import LooseVersion
+from traceback import format_exc
+
+display = Display()
+foundre = re.compile(r'(?s)PLATFORM[\r\n]+(.*)FOUND(.*)ENDFOUND')
+
+
+class InterpreterDiscoveryRequiredError(Exception):
+ def __init__(self, message, interpreter_name, discovery_mode):
+ super(InterpreterDiscoveryRequiredError, self).__init__(message)
+ self.interpreter_name = interpreter_name
+ self.discovery_mode = discovery_mode
+
+ def __str__(self):
+ return self.message
+
+ def __repr__(self):
+ # TODO: proper repr impl
+ return self.message
+
+
+def discover_interpreter(action, interpreter_name, discovery_mode, task_vars):
+ # interpreter discovery is a 2-step process with the target. First, we use a simple shell-agnostic bootstrap to
+ # get the system type from uname, and find any random Python that can get us the info we need. For supported
+ # target OS types, we'll dispatch a Python script that calls plaform.dist() (for older platforms, where available)
+ # and brings back /etc/os-release (if present). The proper Python path is looked up in a table of known
+ # distros/versions with included Pythons; if nothing is found, depending on the discovery mode, either the
+ # default fallback of /usr/bin/python is used (if we know it's there), or discovery fails.
+
+ # FUTURE: add logical equivalence for "python3" in the case of py3-only modules?
+ if interpreter_name != 'python':
+ raise ValueError('Interpreter discovery not supported for {0}'.format(interpreter_name))
+
+ host = task_vars.get('inventory_hostname', 'unknown')
+ res = None
+ platform_type = 'unknown'
+ found_interpreters = [u'/usr/bin/python'] # fallback value
+ is_auto_legacy = discovery_mode.startswith('auto_legacy')
+ is_silent = discovery_mode.endswith('_silent')
+
+ try:
+ platform_python_map = C.config.get_config_value('INTERPRETER_PYTHON_DISTRO_MAP', variables=task_vars)
+ bootstrap_python_list = C.config.get_config_value('INTERPRETER_PYTHON_FALLBACK', variables=task_vars)
+
+ display.vvv(msg=u"Attempting {0} interpreter discovery".format(interpreter_name), host=host)
+
+ # not all command -v impls accept a list of commands, so we have to call it once per python
+ command_list = ["command -v '%s'" % py for py in bootstrap_python_list]
+ shell_bootstrap = "echo PLATFORM; uname; echo FOUND; {0}; echo ENDFOUND".format('; '.join(command_list))
+
+ # FUTURE: in most cases we probably don't want to use become, but maybe sometimes we do?
+ res = action._low_level_execute_command(shell_bootstrap, sudoable=False)
+
+ raw_stdout = res.get('stdout', u'')
+
+ match = foundre.match(raw_stdout)
+
+ if not match:
+ display.debug(u'raw interpreter discovery output: {0}'.format(raw_stdout), host=host)
+ raise ValueError('unexpected output from Python interpreter discovery')
+
+ platform_type = match.groups()[0].lower().strip()
+
+ found_interpreters = [interp.strip() for interp in match.groups()[1].splitlines() if interp.startswith('/')]
+
+ display.debug(u"found interpreters: {0}".format(found_interpreters), host=host)
+
+ if not found_interpreters:
+ action._discovery_warnings.append(u'No python interpreters found for host {0} (tried {1})'.format(host, bootstrap_python_list))
+ # this is lame, but returning None or throwing an exception is uglier
+ return u'/usr/bin/python'
+
+ if platform_type != 'linux':
+ raise NotImplementedError('unsupported platform for extended discovery: {0}'.format(to_native(platform_type)))
+
+ platform_script = pkgutil.get_data('ansible.executor.discovery', 'python_target.py')
+
+ # FUTURE: respect pipelining setting instead of just if the connection supports it?
+ if action._connection.has_pipelining:
+ res = action._low_level_execute_command(found_interpreters[0], sudoable=False, in_data=platform_script)
+ else:
+ # FUTURE: implement on-disk case (via script action or ?)
+ raise NotImplementedError('pipelining support required for extended interpreter discovery')
+
+ platform_info = json.loads(res.get('stdout'))
+
+ distro, version = _get_linux_distro(platform_info)
+
+ if not distro or not version:
+ raise NotImplementedError('unable to get Linux distribution/version info')
+
+ version_map = platform_python_map.get(distro.lower().strip())
+ if not version_map:
+ raise NotImplementedError('unsupported Linux distribution: {0}'.format(distro))
+
+ platform_interpreter = to_text(_version_fuzzy_match(version, version_map), errors='surrogate_or_strict')
+
+ # provide a transition period for hosts that were using /usr/bin/python previously (but shouldn't have been)
+ if is_auto_legacy:
+ if platform_interpreter != u'/usr/bin/python' and u'/usr/bin/python' in found_interpreters:
+ # FIXME: support comments in sivel's deprecation scanner so we can get reminded on this
+ if not is_silent:
+ action._discovery_deprecation_warnings.append(dict(
+ msg=u"Distribution {0} {1} on host {2} should use {3}, but is using "
+ u"/usr/bin/python for backward compatibility with prior Ansible releases. "
+ u"A future Ansible release will default to using the discovered platform "
+ u"python for this host. See {4} for more information"
+ .format(distro, version, host, platform_interpreter,
+ get_versioned_doclink('reference_appendices/interpreter_discovery.html')),
+ version='2.12'))
+ return u'/usr/bin/python'
+
+ if platform_interpreter not in found_interpreters:
+ if platform_interpreter not in bootstrap_python_list:
+ # sanity check to make sure we looked for it
+ if not is_silent:
+ action._discovery_warnings \
+ .append(u"Platform interpreter {0} on host {1} is missing from bootstrap list"
+ .format(platform_interpreter, host))
+
+ if not is_silent:
+ action._discovery_warnings \
+ .append(u"Distribution {0} {1} on host {2} should use {3}, but is using {4}, since the "
+ u"discovered platform python interpreter was not present. See {5} "
+ u"for more information."
+ .format(distro, version, host, platform_interpreter, found_interpreters[0],
+ get_versioned_doclink('reference_appendices/interpreter_discovery.html')))
+ return found_interpreters[0]
+
+ return platform_interpreter
+ except NotImplementedError as ex:
+ display.vvv(msg=u'Python interpreter discovery fallback ({0})'.format(to_text(ex)), host=host)
+ except Exception as ex:
+ if not is_silent:
+ display.warning(msg=u'Unhandled error in Python interpreter discovery for host {0}: {1}'.format(host, to_text(ex)))
+ display.debug(msg=u'Interpreter discovery traceback:\n{0}'.format(to_text(format_exc())), host=host)
+ if res and res.get('stderr'):
+ display.vvv(msg=u'Interpreter discovery remote stderr:\n{0}'.format(to_text(res.get('stderr'))), host=host)
+
+ if not is_silent:
+ action._discovery_warnings \
+ .append(u"Platform {0} on host {1} is using the discovered Python interpreter at {2}, but future installation of "
+ u"another Python interpreter could change the meaning of that path. See {3} "
+ u"for more information."
+ .format(platform_type, host, found_interpreters[0],
+ get_versioned_doclink('reference_appendices/interpreter_discovery.html')))
+ return found_interpreters[0]
+
+
+def _get_linux_distro(platform_info):
+ dist_result = platform_info.get('platform_dist_result', [])
+
+ if len(dist_result) == 3 and any(dist_result):
+ return dist_result[0], dist_result[1]
+
+ osrelease_content = platform_info.get('osrelease_content')
+
+ if not osrelease_content:
+ return u'', u''
+
+ osr = LinuxDistribution._parse_os_release_content(osrelease_content)
+
+ return osr.get('id', u''), osr.get('version_id', u'')
+
+
+def _version_fuzzy_match(version, version_map):
+ # try exact match first
+ res = version_map.get(version)
+ if res:
+ return res
+
+ sorted_looseversions = sorted([LooseVersion(v) for v in version_map.keys()])
+
+ find_looseversion = LooseVersion(version)
+
+ # slot match; return nearest previous version we're newer than
+ kpos = bisect.bisect(sorted_looseversions, find_looseversion)
+
+ if kpos == 0:
+ # older than everything in the list, return the oldest version
+ # TODO: warning-worthy?
+ return version_map.get(sorted_looseversions[0].vstring)
+
+ # TODO: is "past the end of the list" warning-worthy too (at least if it's not a major version match)?
+
+ # return the next-oldest entry that we're newer than...
+ return version_map.get(sorted_looseversions[kpos - 1].vstring)
diff --git a/lib/ansible/executor/module_common.py b/lib/ansible/executor/module_common.py
new file mode 100644
index 00000000..15f2506a
--- /dev/null
+++ b/lib/ansible/executor/module_common.py
@@ -0,0 +1,1390 @@
+# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ast
+import base64
+import datetime
+import json
+import os
+import shlex
+import zipfile
+import re
+import pkgutil
+from io import BytesIO
+
+from ansible.release import __version__, __author__
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsiblePluginRemovedError
+from ansible.executor.interpreter_discovery import InterpreterDiscoveryRequiredError
+from ansible.executor.powershell import module_manifest as ps_manifest
+from ansible.module_utils.common.json import AnsibleJSONEncoder
+from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native
+from ansible.plugins.loader import module_utils_loader
+from ansible.utils.collection_loader._collection_finder import _get_collection_metadata, _nested_dict_get
+
+# Must import strategy and use write_locks from there
+# If we import write_locks directly then we end up binding a
+# variable to the object and then it never gets updated.
+from ansible.executor import action_write_locks
+
+from ansible.utils.display import Display
+from collections import namedtuple
+
+
+try:
+ import importlib.util
+ import importlib.machinery
+ imp = None
+except ImportError:
+ import imp
+
+# if we're on a Python that doesn't have FNFError, redefine it as IOError (since that's what we'll see)
+try:
+ FileNotFoundError
+except NameError:
+ FileNotFoundError = IOError
+
+display = Display()
+
+ModuleUtilsProcessEntry = namedtuple('ModuleUtilsInfo', ['name_parts', 'is_ambiguous', 'has_redirected_child'])
+
+REPLACER = b"#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
+REPLACER_VERSION = b"\"<<ANSIBLE_VERSION>>\""
+REPLACER_COMPLEX = b"\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
+REPLACER_WINDOWS = b"# POWERSHELL_COMMON"
+REPLACER_JSONARGS = b"<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>"
+REPLACER_SELINUX = b"<<SELINUX_SPECIAL_FILESYSTEMS>>"
+
+# We could end up writing out parameters with unicode characters so we need to
+# specify an encoding for the python source file
+ENCODING_STRING = u'# -*- coding: utf-8 -*-'
+b_ENCODING_STRING = b'# -*- coding: utf-8 -*-'
+
+# module_common is relative to module_utils, so fix the path
+_MODULE_UTILS_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils')
+
+# ******************************************************************************
+
+ANSIBALLZ_TEMPLATE = u'''%(shebang)s
+%(coding)s
+_ANSIBALLZ_WRAPPER = True # For test-module.py script to tell this is a ANSIBALLZ_WRAPPER
+# This code is part of Ansible, but is an independent component.
+# The code in this particular templatable string, and this templatable string
+# only, is BSD licensed. Modules which end up using this snippet, which is
+# dynamically combined together by Ansible still belong to the author of the
+# module, and they may assign their own license to the complete work.
+#
+# Copyright (c), James Cammarata, 2016
+# Copyright (c), Toshio Kuratomi, 2016
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+def _ansiballz_main():
+%(rlimit)s
+ import os
+ import os.path
+ import sys
+ import __main__
+
+ # For some distros and python versions we pick up this script in the temporary
+ # directory. This leads to problems when the ansible module masks a python
+ # library that another import needs. We have not figured out what about the
+ # specific distros and python versions causes this to behave differently.
+ #
+ # Tested distros:
+ # Fedora23 with python3.4 Works
+ # Ubuntu15.10 with python2.7 Works
+ # Ubuntu15.10 with python3.4 Fails without this
+ # Ubuntu16.04.1 with python3.5 Fails without this
+ # To test on another platform:
+ # * use the copy module (since this shadows the stdlib copy module)
+ # * Turn off pipelining
+ # * Make sure that the destination file does not exist
+ # * ansible ubuntu16-test -m copy -a 'src=/etc/motd dest=/var/tmp/m'
+ # This will traceback in shutil. Looking at the complete traceback will show
+ # that shutil is importing copy which finds the ansible module instead of the
+ # stdlib module
+ scriptdir = None
+ try:
+ scriptdir = os.path.dirname(os.path.realpath(__main__.__file__))
+ except (AttributeError, OSError):
+ # Some platforms don't set __file__ when reading from stdin
+ # OSX raises OSError if using abspath() in a directory we don't have
+ # permission to read (realpath calls abspath)
+ pass
+
+ # Strip cwd from sys.path to avoid potential permissions issues
+ excludes = set(('', '.', scriptdir))
+ sys.path = [p for p in sys.path if p not in excludes]
+
+ import base64
+ import runpy
+ import shutil
+ import tempfile
+ import zipfile
+
+ if sys.version_info < (3,):
+ PY3 = False
+ else:
+ PY3 = True
+
+ ZIPDATA = """%(zipdata)s"""
+
+ # Note: temp_path isn't needed once we switch to zipimport
+ def invoke_module(modlib_path, temp_path, json_params):
+ # When installed via setuptools (including python setup.py install),
+ # ansible may be installed with an easy-install.pth file. That file
+ # may load the system-wide install of ansible rather than the one in
+ # the module. sitecustomize is the only way to override that setting.
+ z = zipfile.ZipFile(modlib_path, mode='a')
+
+ # py3: modlib_path will be text, py2: it's bytes. Need bytes at the end
+ sitecustomize = u'import sys\\nsys.path.insert(0,"%%s")\\n' %% modlib_path
+ sitecustomize = sitecustomize.encode('utf-8')
+ # Use a ZipInfo to work around zipfile limitation on hosts with
+ # clocks set to a pre-1980 year (for instance, Raspberry Pi)
+ zinfo = zipfile.ZipInfo()
+ zinfo.filename = 'sitecustomize.py'
+ zinfo.date_time = ( %(year)i, %(month)i, %(day)i, %(hour)i, %(minute)i, %(second)i)
+ z.writestr(zinfo, sitecustomize)
+ z.close()
+
+ # Put the zipped up module_utils we got from the controller first in the python path so that we
+ # can monkeypatch the right basic
+ sys.path.insert(0, modlib_path)
+
+ # Monkeypatch the parameters into basic
+ from ansible.module_utils import basic
+ basic._ANSIBLE_ARGS = json_params
+%(coverage)s
+ # Run the module! By importing it as '__main__', it thinks it is executing as a script
+ runpy.run_module(mod_name='%(module_fqn)s', init_globals=None, run_name='__main__', alter_sys=True)
+
+ # Ansible modules must exit themselves
+ print('{"msg": "New-style module did not handle its own exit", "failed": true}')
+ sys.exit(1)
+
+ def debug(command, zipped_mod, json_params):
+ # The code here normally doesn't run. It's only used for debugging on the
+ # remote machine.
+ #
+ # The subcommands in this function make it easier to debug ansiballz
+ # modules. Here's the basic steps:
+ #
+ # Run ansible with the environment variable: ANSIBLE_KEEP_REMOTE_FILES=1 and -vvv
+ # to save the module file remotely::
+ # $ ANSIBLE_KEEP_REMOTE_FILES=1 ansible host1 -m ping -a 'data=october' -vvv
+ #
+ # Part of the verbose output will tell you where on the remote machine the
+ # module was written to::
+ # [...]
+ # <host1> SSH: EXEC ssh -C -q -o ControlMaster=auto -o ControlPersist=60s -o KbdInteractiveAuthentication=no -o
+ # PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey -o PasswordAuthentication=no -o ConnectTimeout=10 -o
+ # ControlPath=/home/badger/.ansible/cp/ansible-ssh-%%h-%%p-%%r -tt rhel7 '/bin/sh -c '"'"'LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8
+ # LC_MESSAGES=en_US.UTF-8 /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping'"'"''
+ # [...]
+ #
+ # Login to the remote machine and run the module file via from the previous
+ # step with the explode subcommand to extract the module payload into
+ # source files::
+ # $ ssh host1
+ # $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping explode
+ # Module expanded into:
+ # /home/badger/.ansible/tmp/ansible-tmp-1461173408.08-279692652635227/ansible
+ #
+ # You can now edit the source files to instrument the code or experiment with
+ # different parameter values. When you're ready to run the code you've modified
+ # (instead of the code from the actual zipped module), use the execute subcommand like this::
+ # $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping execute
+
+ # Okay to use __file__ here because we're running from a kept file
+ basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'debug_dir')
+ args_path = os.path.join(basedir, 'args')
+
+ if command == 'excommunicate':
+ print('The excommunicate debug command is deprecated and will be removed in 2.11. Use execute instead.')
+ command = 'execute'
+
+ if command == 'explode':
+ # transform the ZIPDATA into an exploded directory of code and then
+ # print the path to the code. This is an easy way for people to look
+ # at the code on the remote machine for debugging it in that
+ # environment
+ z = zipfile.ZipFile(zipped_mod)
+ for filename in z.namelist():
+ if filename.startswith('/'):
+ raise Exception('Something wrong with this module zip file: should not contain absolute paths')
+
+ dest_filename = os.path.join(basedir, filename)
+ if dest_filename.endswith(os.path.sep) and not os.path.exists(dest_filename):
+ os.makedirs(dest_filename)
+ else:
+ directory = os.path.dirname(dest_filename)
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+ f = open(dest_filename, 'wb')
+ f.write(z.read(filename))
+ f.close()
+
+ # write the args file
+ f = open(args_path, 'wb')
+ f.write(json_params)
+ f.close()
+
+ print('Module expanded into:')
+ print('%%s' %% basedir)
+ exitcode = 0
+
+ elif command == 'execute':
+ # Execute the exploded code instead of executing the module from the
+ # embedded ZIPDATA. This allows people to easily run their modified
+ # code on the remote machine to see how changes will affect it.
+
+ # Set pythonpath to the debug dir
+ sys.path.insert(0, basedir)
+
+ # read in the args file which the user may have modified
+ with open(args_path, 'rb') as f:
+ json_params = f.read()
+
+ # Monkeypatch the parameters into basic
+ from ansible.module_utils import basic
+ basic._ANSIBLE_ARGS = json_params
+
+ # Run the module! By importing it as '__main__', it thinks it is executing as a script
+ runpy.run_module(mod_name='%(module_fqn)s', init_globals=None, run_name='__main__', alter_sys=True)
+
+ # Ansible modules must exit themselves
+ print('{"msg": "New-style module did not handle its own exit", "failed": true}')
+ sys.exit(1)
+
+ else:
+ print('WARNING: Unknown debug command. Doing nothing.')
+ exitcode = 0
+
+ return exitcode
+
+ #
+ # See comments in the debug() method for information on debugging
+ #
+
+ ANSIBALLZ_PARAMS = %(params)s
+ if PY3:
+ ANSIBALLZ_PARAMS = ANSIBALLZ_PARAMS.encode('utf-8')
+ try:
+ # There's a race condition with the controller removing the
+ # remote_tmpdir and this module executing under async. So we cannot
+ # store this in remote_tmpdir (use system tempdir instead)
+ # Only need to use [ansible_module]_payload_ in the temp_path until we move to zipimport
+ # (this helps ansible-test produce coverage stats)
+ temp_path = tempfile.mkdtemp(prefix='ansible_%(ansible_module)s_payload_')
+
+ zipped_mod = os.path.join(temp_path, 'ansible_%(ansible_module)s_payload.zip')
+ with open(zipped_mod, 'wb') as modlib:
+ modlib.write(base64.b64decode(ZIPDATA))
+
+ if len(sys.argv) == 2:
+ exitcode = debug(sys.argv[1], zipped_mod, ANSIBALLZ_PARAMS)
+ else:
+ # Note: temp_path isn't needed once we switch to zipimport
+ invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)
+ finally:
+ try:
+ shutil.rmtree(temp_path)
+ except (NameError, OSError):
+ # tempdir creation probably failed
+ pass
+ sys.exit(exitcode)
+
+if __name__ == '__main__':
+ _ansiballz_main()
+'''
+
+ANSIBALLZ_COVERAGE_TEMPLATE = '''
+ # Access to the working directory is required by coverage.
+ # Some platforms, such as macOS, may not allow querying the working directory when using become to drop privileges.
+ try:
+ os.getcwd()
+ except OSError:
+ os.chdir('/')
+
+ os.environ['COVERAGE_FILE'] = '%(coverage_output)s'
+
+ import atexit
+
+ try:
+ import coverage
+ except ImportError:
+ print('{"msg": "Could not import `coverage` module.", "failed": true}')
+ sys.exit(1)
+
+ cov = coverage.Coverage(config_file='%(coverage_config)s')
+
+ def atexit_coverage():
+ cov.stop()
+ cov.save()
+
+ atexit.register(atexit_coverage)
+
+ cov.start()
+'''
+
+ANSIBALLZ_COVERAGE_CHECK_TEMPLATE = '''
+ try:
+ if PY3:
+ import importlib.util
+ if importlib.util.find_spec('coverage') is None:
+ raise ImportError
+ else:
+ import imp
+ imp.find_module('coverage')
+ except ImportError:
+ print('{"msg": "Could not find `coverage` module.", "failed": true}')
+ sys.exit(1)
+'''
+
+ANSIBALLZ_RLIMIT_TEMPLATE = '''
+ import resource
+
+ existing_soft, existing_hard = resource.getrlimit(resource.RLIMIT_NOFILE)
+
+ # adjust soft limit subject to existing hard limit
+ requested_soft = min(existing_hard, %(rlimit_nofile)d)
+
+ if requested_soft != existing_soft:
+ try:
+ resource.setrlimit(resource.RLIMIT_NOFILE, (requested_soft, existing_hard))
+ except ValueError:
+ # some platforms (eg macOS) lie about their hard limit
+ pass
+'''
+
+
+def _strip_comments(source):
+ # Strip comments and blank lines from the wrapper
+ buf = []
+ for line in source.splitlines():
+ l = line.strip()
+ if not l or l.startswith(u'#'):
+ continue
+ buf.append(line)
+ return u'\n'.join(buf)
+
+
+if C.DEFAULT_KEEP_REMOTE_FILES:
+ # Keep comments when KEEP_REMOTE_FILES is set. That way users will see
+ # the comments with some nice usage instructions
+ ACTIVE_ANSIBALLZ_TEMPLATE = ANSIBALLZ_TEMPLATE
+else:
+ # ANSIBALLZ_TEMPLATE stripped of comments for smaller over the wire size
+ ACTIVE_ANSIBALLZ_TEMPLATE = _strip_comments(ANSIBALLZ_TEMPLATE)
+
+# dirname(dirname(dirname(site-packages/ansible/executor/module_common.py) == site-packages
+# Do this instead of getting site-packages from distutils.sysconfig so we work when we
+# haven't been installed
+site_packages = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
+CORE_LIBRARY_PATH_RE = re.compile(r'%s/(?P<path>ansible/modules/.*)\.(py|ps1)$' % site_packages)
+COLLECTION_PATH_RE = re.compile(r'/(?P<path>ansible_collections/[^/]+/[^/]+/plugins/modules/.*)\.(py|ps1)$')
+
+# Detect new-style Python modules by looking for required imports:
+# import ansible_collections.[my_ns.my_col.plugins.module_utils.my_module_util]
+# from ansible_collections.[my_ns.my_col.plugins.module_utils import my_module_util]
+# import ansible.module_utils[.basic]
+# from ansible.module_utils[ import basic]
+# from ansible.module_utils[.basic import AnsibleModule]
+# from ..module_utils[ import basic]
+# from ..module_utils[.basic import AnsibleModule]
+NEW_STYLE_PYTHON_MODULE_RE = re.compile(
+ # Relative imports
+ br'(?:from +\.{2,} *module_utils.* +import |'
+ # Collection absolute imports:
+ br'from +ansible_collections\.[^.]+\.[^.]+\.plugins\.module_utils.* +import |'
+ br'import +ansible_collections\.[^.]+\.[^.]+\.plugins\.module_utils.*|'
+ # Core absolute imports
+ br'from +ansible\.module_utils.* +import |'
+ br'import +ansible\.module_utils\.)'
+)
+
+
+class ModuleDepFinder(ast.NodeVisitor):
+ def __init__(self, module_fqn, is_pkg_init=False, *args, **kwargs):
+ """
+ Walk the ast tree for the python module.
+ :arg module_fqn: The fully qualified name to reach this module in dotted notation.
+ example: ansible.module_utils.basic
+ :arg is_pkg_init: Inform the finder it's looking at a package init (eg __init__.py) to allow
+ relative import expansion to use the proper package level without having imported it locally first.
+
+ Save submodule[.submoduleN][.identifier] into self.submodules
+ when they are from ansible.module_utils or ansible_collections packages
+
+ self.submodules will end up with tuples like:
+ - ('ansible', 'module_utils', 'basic',)
+ - ('ansible', 'module_utils', 'urls', 'fetch_url')
+ - ('ansible', 'module_utils', 'database', 'postgres')
+ - ('ansible', 'module_utils', 'database', 'postgres', 'quote')
+ - ('ansible', 'module_utils', 'database', 'postgres', 'quote')
+ - ('ansible_collections', 'my_ns', 'my_col', 'plugins', 'module_utils', 'foo')
+
+ It's up to calling code to determine whether the final element of the
+ tuple are module names or something else (function, class, or variable names)
+ .. seealso:: :python3:class:`ast.NodeVisitor`
+ """
+ super(ModuleDepFinder, self).__init__(*args, **kwargs)
+ self.submodules = set()
+ self.module_fqn = module_fqn
+ self.is_pkg_init = is_pkg_init
+
+ def visit_Import(self, node):
+ """
+ Handle import ansible.module_utils.MODLIB[.MODLIBn] [as asname]
+
+ We save these as interesting submodules when the imported library is in ansible.module_utils
+ or ansible.collections
+ """
+ for alias in node.names:
+ if (alias.name.startswith('ansible.module_utils.') or
+ alias.name.startswith('ansible_collections.')):
+ py_mod = tuple(alias.name.split('.'))
+ self.submodules.add(py_mod)
+ self.generic_visit(node)
+
+ def visit_ImportFrom(self, node):
+ """
+ Handle from ansible.module_utils.MODLIB import [.MODLIBn] [as asname]
+
+ Also has to handle relative imports
+
+ We save these as interesting submodules when the imported library is in ansible.module_utils
+ or ansible.collections
+ """
+
+ # FIXME: These should all get skipped:
+ # from ansible.executor import module_common
+ # from ...executor import module_common
+ # from ... import executor (Currently it gives a non-helpful error)
+ if node.level > 0:
+ # if we're in a package init, we have to add one to the node level (and make it none if 0 to preserve the right slicing behavior)
+ level_slice_offset = -node.level + 1 or None if self.is_pkg_init else -node.level
+ if self.module_fqn:
+ parts = tuple(self.module_fqn.split('.'))
+ if node.module:
+ # relative import: from .module import x
+ node_module = '.'.join(parts[:level_slice_offset] + (node.module,))
+ else:
+ # relative import: from . import x
+ node_module = '.'.join(parts[:level_slice_offset])
+ else:
+ # fall back to an absolute import
+ node_module = node.module
+ else:
+ # absolute import: from module import x
+ node_module = node.module
+
+ # Specialcase: six is a special case because of its
+ # import logic
+ py_mod = None
+ if node.names[0].name == '_six':
+ self.submodules.add(('_six',))
+ elif node_module.startswith('ansible.module_utils'):
+ # from ansible.module_utils.MODULE1[.MODULEn] import IDENTIFIER [as asname]
+ # from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [as asname]
+ # from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [,IDENTIFIER] [as asname]
+ # from ansible.module_utils import MODULE1 [,MODULEn] [as asname]
+ py_mod = tuple(node_module.split('.'))
+
+ elif node_module.startswith('ansible_collections.'):
+ if node_module.endswith('plugins.module_utils') or '.plugins.module_utils.' in node_module:
+ # from ansible_collections.ns.coll.plugins.module_utils import MODULE [as aname] [,MODULE2] [as aname]
+ # from ansible_collections.ns.coll.plugins.module_utils.MODULE import IDENTIFIER [as aname]
+ # FIXME: Unhandled cornercase (needs to be ignored):
+ # from ansible_collections.ns.coll.plugins.[!module_utils].[FOO].plugins.module_utils import IDENTIFIER
+ py_mod = tuple(node_module.split('.'))
+ else:
+ # Not from module_utils so ignore. for instance:
+ # from ansible_collections.ns.coll.plugins.lookup import IDENTIFIER
+ pass
+
+ if py_mod:
+ for alias in node.names:
+ self.submodules.add(py_mod + (alias.name,))
+
+ self.generic_visit(node)
+
+
+def _slurp(path):
+ if not os.path.exists(path):
+ raise AnsibleError("imported module support code does not exist at %s" % os.path.abspath(path))
+ with open(path, 'rb') as fd:
+ data = fd.read()
+ return data
+
+
+def _get_shebang(interpreter, task_vars, templar, args=tuple()):
+ """
+ Note not stellar API:
+ Returns None instead of always returning a shebang line. Doing it this
+ way allows the caller to decide to use the shebang it read from the
+ file rather than trust that we reformatted what they already have
+ correctly.
+ """
+ interpreter_name = os.path.basename(interpreter).strip()
+
+ # FUTURE: add logical equivalence for python3 in the case of py3-only modules
+
+ # check for first-class interpreter config
+ interpreter_config_key = "INTERPRETER_%s" % interpreter_name.upper()
+
+ if C.config.get_configuration_definitions().get(interpreter_config_key):
+ # a config def exists for this interpreter type; consult config for the value
+ interpreter_out = C.config.get_config_value(interpreter_config_key, variables=task_vars)
+ discovered_interpreter_config = u'discovered_interpreter_%s' % interpreter_name
+
+ interpreter_out = templar.template(interpreter_out.strip())
+
+ facts_from_task_vars = task_vars.get('ansible_facts', {})
+
+ # handle interpreter discovery if requested
+ if interpreter_out in ['auto', 'auto_legacy', 'auto_silent', 'auto_legacy_silent']:
+ if discovered_interpreter_config not in facts_from_task_vars:
+ # interpreter discovery is desired, but has not been run for this host
+ raise InterpreterDiscoveryRequiredError("interpreter discovery needed",
+ interpreter_name=interpreter_name,
+ discovery_mode=interpreter_out)
+ else:
+ interpreter_out = facts_from_task_vars[discovered_interpreter_config]
+ else:
+ # a config def does not exist for this interpreter type; consult vars for a possible direct override
+ interpreter_config = u'ansible_%s_interpreter' % interpreter_name
+
+ if interpreter_config not in task_vars:
+ return None, interpreter
+
+ interpreter_out = templar.template(task_vars[interpreter_config].strip())
+
+ shebang = u'#!' + interpreter_out
+
+ if args:
+ shebang = shebang + u' ' + u' '.join(args)
+
+ return shebang, interpreter_out
+
+
+class ModuleUtilLocatorBase:
+ def __init__(self, fq_name_parts, is_ambiguous=False, child_is_redirected=False):
+ self._is_ambiguous = is_ambiguous
+ # a child package redirection could cause intermediate package levels to be missing, eg
+ # from ansible.module_utils.x.y.z import foo; if x.y.z.foo is redirected, we may not have packages on disk for
+ # the intermediate packages x.y.z, so we'll need to supply empty packages for those
+ self._child_is_redirected = child_is_redirected
+ self.found = False
+ self.redirected = False
+ self.fq_name_parts = fq_name_parts
+ self.source_code = ''
+ self.output_path = ''
+ self.is_package = False
+ self._collection_name = None
+ # for ambiguous imports, we should only test for things more than one level below module_utils
+ # this lets us detect erroneous imports and redirections earlier
+ if is_ambiguous and len(self._get_module_utils_remainder_parts(fq_name_parts)) > 1:
+ self.candidate_names = [fq_name_parts, fq_name_parts[:-1]]
+ else:
+ self.candidate_names = [fq_name_parts]
+
+ @property
+ def candidate_names_joined(self):
+ return ['.'.join(n) for n in self.candidate_names]
+
+ def _handle_redirect(self, name_parts):
+ module_utils_relative_parts = self._get_module_utils_remainder_parts(name_parts)
+
+ # only allow redirects from below module_utils- if above that, bail out (eg, parent package names)
+ if not module_utils_relative_parts:
+ return False
+
+ try:
+ collection_metadata = _get_collection_metadata(self._collection_name)
+ except ValueError as ve: # collection not found or some other error related to collection load
+ raise AnsibleError('error processing module_util {0} loading redirected collection {1}: {2}'
+ .format('.'.join(name_parts), self._collection_name, to_native(ve)))
+
+ routing_entry = _nested_dict_get(collection_metadata, ['plugin_routing', 'module_utils', '.'.join(module_utils_relative_parts)])
+ if not routing_entry:
+ return False
+ # FIXME: add deprecation warning support
+
+ dep_or_ts = routing_entry.get('tombstone')
+ removed = dep_or_ts is not None
+ if not removed:
+ dep_or_ts = routing_entry.get('deprecation')
+
+ if dep_or_ts:
+ removal_date = dep_or_ts.get('removal_date')
+ removal_version = dep_or_ts.get('removal_version')
+ warning_text = dep_or_ts.get('warning_text')
+
+ msg = 'module_util {0} has been removed'.format('.'.join(name_parts))
+ if warning_text:
+ msg += ' ({0})'.format(warning_text)
+ else:
+ msg += '.'
+
+ display.deprecated(msg, removal_version, removed, removal_date, self._collection_name)
+ if 'redirect' in routing_entry:
+ self.redirected = True
+ source_pkg = '.'.join(name_parts)
+ self.is_package = True # treat all redirects as packages
+ redirect_target_pkg = routing_entry['redirect']
+
+ # expand FQCN redirects
+ if not redirect_target_pkg.startswith('ansible_collections'):
+ split_fqcn = redirect_target_pkg.split('.')
+ if len(split_fqcn) < 3:
+ raise Exception('invalid redirect for {0}: {1}'.format(source_pkg, redirect_target_pkg))
+ # assume it's an FQCN, expand it
+ redirect_target_pkg = 'ansible_collections.{0}.{1}.plugins.module_utils.{2}'.format(
+ split_fqcn[0], # ns
+ split_fqcn[1], # coll
+ '.'.join(split_fqcn[2:]) # sub-module_utils remainder
+ )
+ display.vvv('redirecting module_util {0} to {1}'.format(source_pkg, redirect_target_pkg))
+ self.source_code = self._generate_redirect_shim_source(source_pkg, redirect_target_pkg)
+ return True
+ return False
+
+ def _get_module_utils_remainder_parts(self, name_parts):
+ # subclasses should override to return the name parts after module_utils
+ return []
+
+ def _get_module_utils_remainder(self, name_parts):
+ # return the remainder parts as a package string
+ return '.'.join(self._get_module_utils_remainder_parts(name_parts))
+
+ def _find_module(self, name_parts):
+ return False
+
+ def _locate(self, redirect_first=True):
+ for candidate_name_parts in self.candidate_names:
+ if redirect_first and self._handle_redirect(candidate_name_parts):
+ break
+
+ if self._find_module(candidate_name_parts):
+ break
+
+ if not redirect_first and self._handle_redirect(candidate_name_parts):
+ break
+
+ else: # didn't find what we were looking for- last chance for packages whose parents were redirected
+ if self._child_is_redirected: # make fake packages
+ self.is_package = True
+ self.source_code = ''
+ else: # nope, just bail
+ return
+
+ if self.is_package:
+ path_parts = candidate_name_parts + ('__init__',)
+ else:
+ path_parts = candidate_name_parts
+ self.found = True
+ self.output_path = os.path.join(*path_parts) + '.py'
+ self.fq_name_parts = candidate_name_parts
+
+ def _generate_redirect_shim_source(self, fq_source_module, fq_target_module):
+ return """
+import sys
+import {1} as mod
+
+sys.modules['{0}'] = mod
+""".format(fq_source_module, fq_target_module)
+
+ # FIXME: add __repr__ impl
+
+
+class LegacyModuleUtilLocator(ModuleUtilLocatorBase):
+ def __init__(self, fq_name_parts, is_ambiguous=False, mu_paths=None, child_is_redirected=False):
+ super(LegacyModuleUtilLocator, self).__init__(fq_name_parts, is_ambiguous, child_is_redirected)
+
+ if fq_name_parts[0:2] != ('ansible', 'module_utils'):
+ raise Exception('this class can only locate from ansible.module_utils, got {0}'.format(fq_name_parts))
+
+ if fq_name_parts[2] == 'six':
+ # FIXME: handle the ansible.module_utils.six._six case with a redirect or an internal _six attr on six itself?
+ # six creates its submodules at runtime; convert all these to just 'ansible.module_utils.six'
+ fq_name_parts = ('ansible', 'module_utils', 'six')
+ self.candidate_names = [fq_name_parts]
+
+ self._mu_paths = mu_paths
+ self._collection_name = 'ansible.builtin' # legacy module utils always look in ansible.builtin for redirects
+ self._locate(redirect_first=False) # let local stuff override redirects for legacy
+
+ def _get_module_utils_remainder_parts(self, name_parts):
+ return name_parts[2:] # eg, foo.bar for ansible.module_utils.foo.bar
+
+ def _find_module(self, name_parts):
+ rel_name_parts = self._get_module_utils_remainder_parts(name_parts)
+
+ # no redirection; try to find the module
+ if len(rel_name_parts) == 1: # direct child of module_utils, just search the top-level dirs we were given
+ paths = self._mu_paths
+ else: # a nested submodule of module_utils, extend the paths given with the intermediate package names
+ paths = [os.path.join(p, *rel_name_parts[:-1]) for p in
+ self._mu_paths] # extend the MU paths with the relative bit
+
+ if imp is None: # python3 find module
+ # find_spec needs the full module name
+ self._info = info = importlib.machinery.PathFinder.find_spec('.'.join(name_parts), paths)
+ if info is not None and os.path.splitext(info.origin)[1] in importlib.machinery.SOURCE_SUFFIXES:
+ self.is_package = info.origin.endswith('/__init__.py')
+ path = info.origin
+ else:
+ return False
+ self.source_code = _slurp(path)
+ else: # python2 find module
+ try:
+ # imp just wants the leaf module/package name being searched for
+ info = imp.find_module(name_parts[-1], paths)
+ except ImportError:
+ return False
+
+ if info[2][2] == imp.PY_SOURCE:
+ fd = info[0]
+ elif info[2][2] == imp.PKG_DIRECTORY:
+ self.is_package = True
+ fd = open(os.path.join(info[1], '__init__.py'))
+ else:
+ return False
+
+ try:
+ self.source_code = fd.read()
+ finally:
+ fd.close()
+
+ return True
+
+
+class CollectionModuleUtilLocator(ModuleUtilLocatorBase):
+ def __init__(self, fq_name_parts, is_ambiguous=False, child_is_redirected=False):
+ super(CollectionModuleUtilLocator, self).__init__(fq_name_parts, is_ambiguous, child_is_redirected)
+
+ if fq_name_parts[0] != 'ansible_collections':
+ raise Exception('CollectionModuleUtilLocator can only locate from ansible_collections, got {0}'.format(fq_name_parts))
+ elif len(fq_name_parts) >= 6 and fq_name_parts[3:5] != ('plugins', 'module_utils'):
+ raise Exception('CollectionModuleUtilLocator can only locate below ansible_collections.(ns).(coll).plugins.module_utils, got {0}'
+ .format(fq_name_parts))
+
+ self._collection_name = '.'.join(fq_name_parts[1:3])
+
+ self._locate()
+
+ def _find_module(self, name_parts):
+ # synthesize empty inits for packages down through module_utils- we don't want to allow those to be shipped over, but the
+ # package hierarchy needs to exist
+ if len(name_parts) < 6:
+ self.source_code = ''
+ self.is_package = True
+ return True
+
+ # NB: we can't use pkgutil.get_data safely here, since we don't want to import/execute package/module code on
+ # the controller while analyzing/assembling the module, so we'll have to manually import the collection's
+ # Python package to locate it (import root collection, reassemble resource path beneath, fetch source)
+
+ collection_pkg_name = '.'.join(name_parts[0:3])
+ resource_base_path = os.path.join(*name_parts[3:])
+
+ src = None
+ # look for package_dir first, then module
+ try:
+ src = pkgutil.get_data(collection_pkg_name, to_native(os.path.join(resource_base_path, '__init__.py')))
+ except ImportError:
+ pass
+
+ # TODO: we might want to synthesize fake inits for py3-style packages, for now they're required beneath module_utils
+
+ if src is not None: # empty string is OK
+ self.is_package = True
+ else:
+ try:
+ src = pkgutil.get_data(collection_pkg_name, to_native(resource_base_path + '.py'))
+ except ImportError:
+ pass
+
+ if src is None: # empty string is OK
+ return False
+
+ self.source_code = src
+ return True
+
+ def _get_module_utils_remainder_parts(self, name_parts):
+ return name_parts[5:] # eg, foo.bar for ansible_collections.ns.coll.plugins.module_utils.foo.bar
+
+
+def recursive_finder(name, module_fqn, module_data, zf):
+ """
+ Using ModuleDepFinder, make sure we have all of the module_utils files that
+ the module and its module_utils files needs. (no longer actually recursive)
+ :arg name: Name of the python module we're examining
+ :arg module_fqn: Fully qualified name of the python module we're scanning
+ :arg module_data: string Python code of the module we're scanning
+ :arg zf: An open :python:class:`zipfile.ZipFile` object that holds the Ansible module payload
+ which we're assembling
+ """
+
+ # py_module_cache maps python module names to a tuple of the code in the module
+ # and the pathname to the module.
+ # Here we pre-load it with modules which we create without bothering to
+ # read from actual files (In some cases, these need to differ from what ansible
+ # ships because they're namespace packages in the module)
+ # FIXME: do we actually want ns pkg behavior for these? Seems like they should just be forced to emptyish pkg stubs
+ py_module_cache = {
+ ('ansible',): (
+ b'from pkgutil import extend_path\n'
+ b'__path__=extend_path(__path__,__name__)\n'
+ b'__version__="' + to_bytes(__version__) +
+ b'"\n__author__="' + to_bytes(__author__) + b'"\n',
+ 'ansible/__init__.py'),
+ ('ansible', 'module_utils'): (
+ b'from pkgutil import extend_path\n'
+ b'__path__=extend_path(__path__,__name__)\n',
+ 'ansible/module_utils/__init__.py')}
+
+ module_utils_paths = [p for p in module_utils_loader._get_paths(subdirs=False) if os.path.isdir(p)]
+ module_utils_paths.append(_MODULE_UTILS_PATH)
+
+ # Parse the module code and find the imports of ansible.module_utils
+ try:
+ tree = compile(module_data, '<unknown>', 'exec', ast.PyCF_ONLY_AST)
+ except (SyntaxError, IndentationError) as e:
+ raise AnsibleError("Unable to import %s due to %s" % (name, e.msg))
+
+ finder = ModuleDepFinder(module_fqn)
+ finder.visit(tree)
+
+ # the format of this set is a tuple of the module name and whether or not the import is ambiguous as a module name
+ # or an attribute of a module (eg from x.y import z <-- is z a module or an attribute of x.y?)
+ modules_to_process = [ModuleUtilsProcessEntry(m, True, False) for m in finder.submodules]
+
+ # HACK: basic is currently always required since module global init is currently tied up with AnsiballZ arg input
+ modules_to_process.append(ModuleUtilsProcessEntry(('ansible', 'module_utils', 'basic'), False, False))
+
+ # we'll be adding new modules inline as we discover them, so just keep going til we've processed them all
+ while modules_to_process:
+ modules_to_process.sort() # not strictly necessary, but nice to process things in predictable and repeatable order
+ py_module_name, is_ambiguous, child_is_redirected = modules_to_process.pop(0)
+
+ if py_module_name in py_module_cache:
+ # this is normal; we'll often see the same module imported many times, but we only need to process it once
+ continue
+
+ if py_module_name[0:2] == ('ansible', 'module_utils'):
+ module_info = LegacyModuleUtilLocator(py_module_name, is_ambiguous=is_ambiguous,
+ mu_paths=module_utils_paths, child_is_redirected=child_is_redirected)
+ elif py_module_name[0] == 'ansible_collections':
+ module_info = CollectionModuleUtilLocator(py_module_name, is_ambiguous=is_ambiguous, child_is_redirected=child_is_redirected)
+ else:
+ # FIXME: dot-joined result
+ display.warning('ModuleDepFinder improperly found a non-module_utils import %s'
+ % [py_module_name])
+ continue
+
+ # Could not find the module. Construct a helpful error message.
+ if not module_info.found:
+ # FIXME: use dot-joined candidate names
+ msg = 'Could not find imported module support code for {0}. Looked for ({1})'.format(module_fqn, module_info.candidate_names_joined)
+ raise AnsibleError(msg)
+
+ # check the cache one more time with the module we actually found, since the name could be different than the input
+ # eg, imported name vs module
+ if module_info.fq_name_parts in py_module_cache:
+ continue
+
+ # compile the source, process all relevant imported modules
+ try:
+ tree = compile(module_info.source_code, '<unknown>', 'exec', ast.PyCF_ONLY_AST)
+ except (SyntaxError, IndentationError) as e:
+ raise AnsibleError("Unable to import %s due to %s" % (module_info.fq_name_parts, e.msg))
+
+ finder = ModuleDepFinder('.'.join(module_info.fq_name_parts), module_info.is_package)
+ finder.visit(tree)
+ modules_to_process.extend(ModuleUtilsProcessEntry(m, True, False) for m in finder.submodules if m not in py_module_cache)
+
+ # we've processed this item, add it to the output list
+ py_module_cache[module_info.fq_name_parts] = (module_info.source_code, module_info.output_path)
+
+ # ensure we process all ancestor package inits
+ accumulated_pkg_name = []
+ for pkg in module_info.fq_name_parts[:-1]:
+ accumulated_pkg_name.append(pkg) # we're accumulating this across iterations
+ normalized_name = tuple(accumulated_pkg_name) # extra machinations to get a hashable type (list is not)
+ if normalized_name not in py_module_cache:
+ modules_to_process.append((normalized_name, False, module_info.redirected))
+
+ for py_module_name in py_module_cache:
+ py_module_file_name = py_module_cache[py_module_name][1]
+
+ zf.writestr(py_module_file_name, py_module_cache[py_module_name][0])
+ mu_file = to_text(py_module_file_name, errors='surrogate_or_strict')
+ display.vvvvv("Including module_utils file %s" % mu_file)
+
+
+def _is_binary(b_module_data):
+ textchars = bytearray(set([7, 8, 9, 10, 12, 13, 27]) | set(range(0x20, 0x100)) - set([0x7f]))
+ start = b_module_data[:1024]
+ return bool(start.translate(None, textchars))
+
+
+def _get_ansible_module_fqn(module_path):
+ """
+ Get the fully qualified name for an ansible module based on its pathname
+
+ remote_module_fqn is the fully qualified name. Like ansible.modules.system.ping
+ Or ansible_collections.Namespace.Collection_name.plugins.modules.ping
+ .. warning:: This function is for ansible modules only. It won't work for other things
+ (non-module plugins, etc)
+ """
+ remote_module_fqn = None
+
+ # Is this a core module?
+ match = CORE_LIBRARY_PATH_RE.search(module_path)
+ if not match:
+ # Is this a module in a collection?
+ match = COLLECTION_PATH_RE.search(module_path)
+
+ # We can tell the FQN for core modules and collection modules
+ if match:
+ path = match.group('path')
+ if '.' in path:
+ # FQNs must be valid as python identifiers. This sanity check has failed.
+ # we could check other things as well
+ raise ValueError('Module name (or path) was not a valid python identifier')
+
+ remote_module_fqn = '.'.join(path.split('/'))
+ else:
+ # Currently we do not handle modules in roles so we can end up here for that reason
+ raise ValueError("Unable to determine module's fully qualified name")
+
+ return remote_module_fqn
+
+
+def _add_module_to_zip(zf, remote_module_fqn, b_module_data):
+ """Add a module from ansible or from an ansible collection into the module zip"""
+ module_path_parts = remote_module_fqn.split('.')
+
+ # Write the module
+ module_path = '/'.join(module_path_parts) + '.py'
+ zf.writestr(module_path, b_module_data)
+
+ # Write the __init__.py's necessary to get there
+ if module_path_parts[0] == 'ansible':
+ # The ansible namespace is setup as part of the module_utils setup...
+ start = 2
+ existing_paths = frozenset()
+ else:
+ # ... but ansible_collections and other toplevels are not
+ start = 1
+ existing_paths = frozenset(zf.namelist())
+
+ for idx in range(start, len(module_path_parts)):
+ package_path = '/'.join(module_path_parts[:idx]) + '/__init__.py'
+ # If a collections module uses module_utils from a collection then most packages will have already been added by recursive_finder.
+ if package_path in existing_paths:
+ continue
+ # Note: We don't want to include more than one ansible module in a payload at this time
+ # so no need to fill the __init__.py with namespace code
+ zf.writestr(package_path, b'')
+
+
+def _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression, async_timeout, become,
+ become_method, become_user, become_password, become_flags, environment):
+ """
+ Given the source of the module, convert it to a Jinja2 template to insert
+ module code and return whether it's a new or old style module.
+ """
+ module_substyle = module_style = 'old'
+
+ # module_style is something important to calling code (ActionBase). It
+ # determines how arguments are formatted (json vs k=v) and whether
+ # a separate arguments file needs to be sent over the wire.
+ # module_substyle is extra information that's useful internally. It tells
+ # us what we have to look to substitute in the module files and whether
+ # we're using module replacer or ansiballz to format the module itself.
+ if _is_binary(b_module_data):
+ module_substyle = module_style = 'binary'
+ elif REPLACER in b_module_data:
+ # Do REPLACER before from ansible.module_utils because we need make sure
+ # we substitute "from ansible.module_utils basic" for REPLACER
+ module_style = 'new'
+ module_substyle = 'python'
+ b_module_data = b_module_data.replace(REPLACER, b'from ansible.module_utils.basic import *')
+ elif NEW_STYLE_PYTHON_MODULE_RE.search(b_module_data):
+ module_style = 'new'
+ module_substyle = 'python'
+ elif REPLACER_WINDOWS in b_module_data:
+ module_style = 'new'
+ module_substyle = 'powershell'
+ b_module_data = b_module_data.replace(REPLACER_WINDOWS, b'#Requires -Module Ansible.ModuleUtils.Legacy')
+ elif re.search(b'#Requires -Module', b_module_data, re.IGNORECASE) \
+ or re.search(b'#Requires -Version', b_module_data, re.IGNORECASE)\
+ or re.search(b'#AnsibleRequires -OSVersion', b_module_data, re.IGNORECASE) \
+ or re.search(b'#AnsibleRequires -Powershell', b_module_data, re.IGNORECASE) \
+ or re.search(b'#AnsibleRequires -CSharpUtil', b_module_data, re.IGNORECASE):
+ module_style = 'new'
+ module_substyle = 'powershell'
+ elif REPLACER_JSONARGS in b_module_data:
+ module_style = 'new'
+ module_substyle = 'jsonargs'
+ elif b'WANT_JSON' in b_module_data:
+ module_substyle = module_style = 'non_native_want_json'
+
+ shebang = None
+ # Neither old-style, non_native_want_json nor binary modules should be modified
+ # except for the shebang line (Done by modify_module)
+ if module_style in ('old', 'non_native_want_json', 'binary'):
+ return b_module_data, module_style, shebang
+
+ output = BytesIO()
+ py_module_names = set()
+
+ try:
+ remote_module_fqn = _get_ansible_module_fqn(module_path)
+ except ValueError:
+ # Modules in roles currently are not found by the fqn heuristic so we
+ # fallback to this. This means that relative imports inside a module from
+ # a role may fail. Absolute imports should be used for future-proofness.
+ # People should start writing collections instead of modules in roles so we
+ # may never fix this
+ display.debug('ANSIBALLZ: Could not determine module FQN')
+ remote_module_fqn = 'ansible.modules.%s' % module_name
+
+ if module_substyle == 'python':
+ params = dict(ANSIBLE_MODULE_ARGS=module_args,)
+ try:
+ python_repred_params = repr(json.dumps(params, cls=AnsibleJSONEncoder, vault_to_text=True))
+ except TypeError as e:
+ raise AnsibleError("Unable to pass options to module, they must be JSON serializable: %s" % to_native(e))
+
+ try:
+ compression_method = getattr(zipfile, module_compression)
+ except AttributeError:
+ display.warning(u'Bad module compression string specified: %s. Using ZIP_STORED (no compression)' % module_compression)
+ compression_method = zipfile.ZIP_STORED
+
+ lookup_path = os.path.join(C.DEFAULT_LOCAL_TMP, 'ansiballz_cache')
+ cached_module_filename = os.path.join(lookup_path, "%s-%s" % (module_name, module_compression))
+
+ zipdata = None
+ # Optimization -- don't lock if the module has already been cached
+ if os.path.exists(cached_module_filename):
+ display.debug('ANSIBALLZ: using cached module: %s' % cached_module_filename)
+ with open(cached_module_filename, 'rb') as module_data:
+ zipdata = module_data.read()
+ else:
+ if module_name in action_write_locks.action_write_locks:
+ display.debug('ANSIBALLZ: Using lock for %s' % module_name)
+ lock = action_write_locks.action_write_locks[module_name]
+ else:
+ # If the action plugin directly invokes the module (instead of
+ # going through a strategy) then we don't have a cross-process
+ # Lock specifically for this module. Use the "unexpected
+ # module" lock instead
+ display.debug('ANSIBALLZ: Using generic lock for %s' % module_name)
+ lock = action_write_locks.action_write_locks[None]
+
+ display.debug('ANSIBALLZ: Acquiring lock')
+ with lock:
+ display.debug('ANSIBALLZ: Lock acquired: %s' % id(lock))
+ # Check that no other process has created this while we were
+ # waiting for the lock
+ if not os.path.exists(cached_module_filename):
+ display.debug('ANSIBALLZ: Creating module')
+ # Create the module zip data
+ zipoutput = BytesIO()
+ zf = zipfile.ZipFile(zipoutput, mode='w', compression=compression_method)
+
+ # walk the module imports, looking for module_utils to send- they'll be added to the zipfile
+ recursive_finder(module_name, remote_module_fqn, b_module_data, zf)
+
+ display.debug('ANSIBALLZ: Writing module into payload')
+ _add_module_to_zip(zf, remote_module_fqn, b_module_data)
+
+ zf.close()
+ zipdata = base64.b64encode(zipoutput.getvalue())
+
+ # Write the assembled module to a temp file (write to temp
+ # so that no one looking for the file reads a partially
+ # written file)
+ if not os.path.exists(lookup_path):
+ # Note -- if we have a global function to setup, that would
+ # be a better place to run this
+ os.makedirs(lookup_path)
+ display.debug('ANSIBALLZ: Writing module')
+ with open(cached_module_filename + '-part', 'wb') as f:
+ f.write(zipdata)
+
+ # Rename the file into its final position in the cache so
+ # future users of this module can read it off the
+ # filesystem instead of constructing from scratch.
+ display.debug('ANSIBALLZ: Renaming module')
+ os.rename(cached_module_filename + '-part', cached_module_filename)
+ display.debug('ANSIBALLZ: Done creating module')
+
+ if zipdata is None:
+ display.debug('ANSIBALLZ: Reading module after lock')
+ # Another process wrote the file while we were waiting for
+ # the write lock. Go ahead and read the data from disk
+ # instead of re-creating it.
+ try:
+ with open(cached_module_filename, 'rb') as f:
+ zipdata = f.read()
+ except IOError:
+ raise AnsibleError('A different worker process failed to create module file. '
+ 'Look at traceback for that process for debugging information.')
+ zipdata = to_text(zipdata, errors='surrogate_or_strict')
+
+ shebang, interpreter = _get_shebang(u'/usr/bin/python', task_vars, templar)
+ if shebang is None:
+ shebang = u'#!/usr/bin/python'
+
+ # FUTURE: the module cache entry should be invalidated if we got this value from a host-dependent source
+ rlimit_nofile = C.config.get_config_value('PYTHON_MODULE_RLIMIT_NOFILE', variables=task_vars)
+
+ if not isinstance(rlimit_nofile, int):
+ rlimit_nofile = int(templar.template(rlimit_nofile))
+
+ if rlimit_nofile:
+ rlimit = ANSIBALLZ_RLIMIT_TEMPLATE % dict(
+ rlimit_nofile=rlimit_nofile,
+ )
+ else:
+ rlimit = ''
+
+ coverage_config = os.environ.get('_ANSIBLE_COVERAGE_CONFIG')
+
+ if coverage_config:
+ coverage_output = os.environ['_ANSIBLE_COVERAGE_OUTPUT']
+
+ if coverage_output:
+ # Enable code coverage analysis of the module.
+ # This feature is for internal testing and may change without notice.
+ coverage = ANSIBALLZ_COVERAGE_TEMPLATE % dict(
+ coverage_config=coverage_config,
+ coverage_output=coverage_output,
+ )
+ else:
+ # Verify coverage is available without importing it.
+ # This will detect when a module would fail with coverage enabled with minimal overhead.
+ coverage = ANSIBALLZ_COVERAGE_CHECK_TEMPLATE
+ else:
+ coverage = ''
+
+ now = datetime.datetime.utcnow()
+ output.write(to_bytes(ACTIVE_ANSIBALLZ_TEMPLATE % dict(
+ zipdata=zipdata,
+ ansible_module=module_name,
+ module_fqn=remote_module_fqn,
+ params=python_repred_params,
+ shebang=shebang,
+ coding=ENCODING_STRING,
+ year=now.year,
+ month=now.month,
+ day=now.day,
+ hour=now.hour,
+ minute=now.minute,
+ second=now.second,
+ coverage=coverage,
+ rlimit=rlimit,
+ )))
+ b_module_data = output.getvalue()
+
+ elif module_substyle == 'powershell':
+ # Powershell/winrm don't actually make use of shebang so we can
+ # safely set this here. If we let the fallback code handle this
+ # it can fail in the presence of the UTF8 BOM commonly added by
+ # Windows text editors
+ shebang = u'#!powershell'
+ # create the common exec wrapper payload and set that as the module_data
+ # bytes
+ b_module_data = ps_manifest._create_powershell_wrapper(
+ b_module_data, module_path, module_args, environment,
+ async_timeout, become, become_method, become_user, become_password,
+ become_flags, module_substyle, task_vars, remote_module_fqn
+ )
+
+ elif module_substyle == 'jsonargs':
+ module_args_json = to_bytes(json.dumps(module_args, cls=AnsibleJSONEncoder, vault_to_text=True))
+
+ # these strings could be included in a third-party module but
+ # officially they were included in the 'basic' snippet for new-style
+ # python modules (which has been replaced with something else in
+ # ansiballz) If we remove them from jsonargs-style module replacer
+ # then we can remove them everywhere.
+ python_repred_args = to_bytes(repr(module_args_json))
+ b_module_data = b_module_data.replace(REPLACER_VERSION, to_bytes(repr(__version__)))
+ b_module_data = b_module_data.replace(REPLACER_COMPLEX, python_repred_args)
+ b_module_data = b_module_data.replace(REPLACER_SELINUX, to_bytes(','.join(C.DEFAULT_SELINUX_SPECIAL_FS)))
+
+ # The main event -- substitute the JSON args string into the module
+ b_module_data = b_module_data.replace(REPLACER_JSONARGS, module_args_json)
+
+ facility = b'syslog.' + to_bytes(task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY), errors='surrogate_or_strict')
+ b_module_data = b_module_data.replace(b'syslog.LOG_USER', facility)
+
+ return (b_module_data, module_style, shebang)
+
+
+def modify_module(module_name, module_path, module_args, templar, task_vars=None, module_compression='ZIP_STORED', async_timeout=0, become=False,
+ become_method=None, become_user=None, become_password=None, become_flags=None, environment=None):
+ """
+ Used to insert chunks of code into modules before transfer rather than
+ doing regular python imports. This allows for more efficient transfer in
+ a non-bootstrapping scenario by not moving extra files over the wire and
+ also takes care of embedding arguments in the transferred modules.
+
+ This version is done in such a way that local imports can still be
+ used in the module code, so IDEs don't have to be aware of what is going on.
+
+ Example:
+
+ from ansible.module_utils.basic import *
+
+ ... will result in the insertion of basic.py into the module
+ from the module_utils/ directory in the source tree.
+
+ For powershell, this code effectively no-ops, as the exec wrapper requires access to a number of
+ properties not available here.
+
+ """
+ task_vars = {} if task_vars is None else task_vars
+ environment = {} if environment is None else environment
+
+ with open(module_path, 'rb') as f:
+
+ # read in the module source
+ b_module_data = f.read()
+
+ (b_module_data, module_style, shebang) = _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression,
+ async_timeout=async_timeout, become=become, become_method=become_method,
+ become_user=become_user, become_password=become_password, become_flags=become_flags,
+ environment=environment)
+
+ if module_style == 'binary':
+ return (b_module_data, module_style, to_text(shebang, nonstring='passthru'))
+ elif shebang is None:
+ b_lines = b_module_data.split(b"\n", 1)
+ if b_lines[0].startswith(b"#!"):
+ b_shebang = b_lines[0].strip()
+ # shlex.split on python-2.6 needs bytes. On python-3.x it needs text
+ args = shlex.split(to_native(b_shebang[2:], errors='surrogate_or_strict'))
+
+ # _get_shebang() takes text strings
+ args = [to_text(a, errors='surrogate_or_strict') for a in args]
+ interpreter = args[0]
+ b_new_shebang = to_bytes(_get_shebang(interpreter, task_vars, templar, args[1:])[0],
+ errors='surrogate_or_strict', nonstring='passthru')
+
+ if b_new_shebang:
+ b_lines[0] = b_shebang = b_new_shebang
+
+ if os.path.basename(interpreter).startswith(u'python'):
+ b_lines.insert(1, b_ENCODING_STRING)
+
+ shebang = to_text(b_shebang, nonstring='passthru', errors='surrogate_or_strict')
+ else:
+ # No shebang, assume a binary module?
+ pass
+
+ b_module_data = b"\n".join(b_lines)
+
+ return (b_module_data, module_style, shebang)
+
+
+def get_action_args_with_defaults(action, args, defaults, templar, redirected_names=None):
+ group_collection_map = {
+ 'acme': ['community.crypto'],
+ 'aws': ['amazon.aws', 'community.aws'],
+ 'azure': ['azure.azcollection'],
+ 'cpm': ['wti.remote'],
+ 'docker': ['community.general', 'community.docker'],
+ 'gcp': ['google.cloud'],
+ 'k8s': ['community.kubernetes', 'community.general', 'community.kubevirt', 'community.okd', 'kubernetes.core'],
+ 'os': ['openstack.cloud'],
+ 'ovirt': ['ovirt.ovirt', 'community.general'],
+ 'vmware': ['community.vmware'],
+ 'testgroup': ['testns.testcoll', 'testns.othercoll', 'testns.boguscoll']
+ }
+
+ if not redirected_names:
+ redirected_names = [action]
+
+ tmp_args = {}
+ module_defaults = {}
+
+ # Merge latest defaults into dict, since they are a list of dicts
+ if isinstance(defaults, list):
+ for default in defaults:
+ module_defaults.update(default)
+
+ # if I actually have defaults, template and merge
+ if module_defaults:
+ module_defaults = templar.template(module_defaults)
+
+ # deal with configured group defaults first
+ for default in module_defaults:
+ if not default.startswith('group/'):
+ continue
+
+ group_name = default.split('group/')[-1]
+
+ for collection_name in group_collection_map.get(group_name, []):
+ try:
+ action_group = _get_collection_metadata(collection_name).get('action_groups', {})
+ except ValueError:
+ # The collection may not be installed
+ continue
+
+ if any(name for name in redirected_names if name in action_group):
+ tmp_args.update((module_defaults.get('group/%s' % group_name) or {}).copy())
+
+ # handle specific action defaults
+ for action in redirected_names:
+ if action in module_defaults:
+ tmp_args.update(module_defaults[action].copy())
+
+ # direct args override all
+ tmp_args.update(args)
+
+ return tmp_args
diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py
new file mode 100644
index 00000000..1a53f3e7
--- /dev/null
+++ b/lib/ansible/executor/play_iterator.py
@@ -0,0 +1,567 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import fnmatch
+
+from ansible import constants as C
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.playbook.block import Block
+from ansible.playbook.task import Task
+from ansible.utils.display import Display
+
+
+display = Display()
+
+
+__all__ = ['PlayIterator']
+
+
+class HostState:
+ def __init__(self, blocks):
+ self._blocks = blocks[:]
+
+ self.cur_block = 0
+ self.cur_regular_task = 0
+ self.cur_rescue_task = 0
+ self.cur_always_task = 0
+ self.cur_dep_chain = None
+ self.run_state = PlayIterator.ITERATING_SETUP
+ self.fail_state = PlayIterator.FAILED_NONE
+ self.pending_setup = False
+ self.tasks_child_state = None
+ self.rescue_child_state = None
+ self.always_child_state = None
+ self.did_rescue = False
+ self.did_start_at_task = False
+
+ def __repr__(self):
+ return "HostState(%r)" % self._blocks
+
+ def __str__(self):
+ def _run_state_to_string(n):
+ states = ["ITERATING_SETUP", "ITERATING_TASKS", "ITERATING_RESCUE", "ITERATING_ALWAYS", "ITERATING_COMPLETE"]
+ try:
+ return states[n]
+ except IndexError:
+ return "UNKNOWN STATE"
+
+ def _failed_state_to_string(n):
+ states = {1: "FAILED_SETUP", 2: "FAILED_TASKS", 4: "FAILED_RESCUE", 8: "FAILED_ALWAYS"}
+ if n == 0:
+ return "FAILED_NONE"
+ else:
+ ret = []
+ for i in (1, 2, 4, 8):
+ if n & i:
+ ret.append(states[i])
+ return "|".join(ret)
+
+ return ("HOST STATE: block=%d, task=%d, rescue=%d, always=%d, run_state=%s, fail_state=%s, pending_setup=%s, tasks child state? (%s), "
+ "rescue child state? (%s), always child state? (%s), did rescue? %s, did start at task? %s" % (
+ self.cur_block,
+ self.cur_regular_task,
+ self.cur_rescue_task,
+ self.cur_always_task,
+ _run_state_to_string(self.run_state),
+ _failed_state_to_string(self.fail_state),
+ self.pending_setup,
+ self.tasks_child_state,
+ self.rescue_child_state,
+ self.always_child_state,
+ self.did_rescue,
+ self.did_start_at_task,
+ ))
+
+ def __eq__(self, other):
+ if not isinstance(other, HostState):
+ return False
+
+ for attr in ('_blocks', 'cur_block', 'cur_regular_task', 'cur_rescue_task', 'cur_always_task',
+ 'run_state', 'fail_state', 'pending_setup', 'cur_dep_chain',
+ 'tasks_child_state', 'rescue_child_state', 'always_child_state'):
+ if getattr(self, attr) != getattr(other, attr):
+ return False
+
+ return True
+
+ def get_current_block(self):
+ return self._blocks[self.cur_block]
+
+ def copy(self):
+ new_state = HostState(self._blocks)
+ new_state.cur_block = self.cur_block
+ new_state.cur_regular_task = self.cur_regular_task
+ new_state.cur_rescue_task = self.cur_rescue_task
+ new_state.cur_always_task = self.cur_always_task
+ new_state.run_state = self.run_state
+ new_state.fail_state = self.fail_state
+ new_state.pending_setup = self.pending_setup
+ new_state.did_rescue = self.did_rescue
+ new_state.did_start_at_task = self.did_start_at_task
+ if self.cur_dep_chain is not None:
+ new_state.cur_dep_chain = self.cur_dep_chain[:]
+ if self.tasks_child_state is not None:
+ new_state.tasks_child_state = self.tasks_child_state.copy()
+ if self.rescue_child_state is not None:
+ new_state.rescue_child_state = self.rescue_child_state.copy()
+ if self.always_child_state is not None:
+ new_state.always_child_state = self.always_child_state.copy()
+ return new_state
+
+
+class PlayIterator:
+
+ # the primary running states for the play iteration
+ ITERATING_SETUP = 0
+ ITERATING_TASKS = 1
+ ITERATING_RESCUE = 2
+ ITERATING_ALWAYS = 3
+ ITERATING_COMPLETE = 4
+
+ # the failure states for the play iteration, which are powers
+ # of 2 as they may be or'ed together in certain circumstances
+ FAILED_NONE = 0
+ FAILED_SETUP = 1
+ FAILED_TASKS = 2
+ FAILED_RESCUE = 4
+ FAILED_ALWAYS = 8
+
+ def __init__(self, inventory, play, play_context, variable_manager, all_vars, start_at_done=False):
+ self._play = play
+ self._blocks = []
+ self._variable_manager = variable_manager
+
+ # Default options to gather
+ gather_subset = self._play.gather_subset
+ gather_timeout = self._play.gather_timeout
+ fact_path = self._play.fact_path
+
+ setup_block = Block(play=self._play)
+ # Gathering facts with run_once would copy the facts from one host to
+ # the others.
+ setup_block.run_once = False
+ setup_task = Task(block=setup_block)
+ setup_task.action = 'gather_facts'
+ setup_task.name = 'Gathering Facts'
+ setup_task.args = {
+ 'gather_subset': gather_subset,
+ }
+
+ # Unless play is specifically tagged, gathering should 'always' run
+ if not self._play.tags:
+ setup_task.tags = ['always']
+
+ if gather_timeout:
+ setup_task.args['gather_timeout'] = gather_timeout
+ if fact_path:
+ setup_task.args['fact_path'] = fact_path
+ setup_task.set_loader(self._play._loader)
+ # short circuit fact gathering if the entire playbook is conditional
+ if self._play._included_conditional is not None:
+ setup_task.when = self._play._included_conditional[:]
+ setup_block.block = [setup_task]
+
+ setup_block = setup_block.filter_tagged_tasks(all_vars)
+ self._blocks.append(setup_block)
+
+ for block in self._play.compile():
+ new_block = block.filter_tagged_tasks(all_vars)
+ if new_block.has_tasks():
+ self._blocks.append(new_block)
+
+ self._host_states = {}
+ start_at_matched = False
+ batch = inventory.get_hosts(self._play.hosts, order=self._play.order)
+ self.batch_size = len(batch)
+ for host in batch:
+ self._host_states[host.name] = HostState(blocks=self._blocks)
+ # if we're looking to start at a specific task, iterate through
+ # the tasks for this host until we find the specified task
+ if play_context.start_at_task is not None and not start_at_done:
+ while True:
+ (s, task) = self.get_next_task_for_host(host, peek=True)
+ if s.run_state == self.ITERATING_COMPLETE:
+ break
+ if task.name == play_context.start_at_task or (task.name and fnmatch.fnmatch(task.name, play_context.start_at_task)) or \
+ task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task):
+ start_at_matched = True
+ break
+ else:
+ self.get_next_task_for_host(host)
+
+ # finally, reset the host's state to ITERATING_SETUP
+ if start_at_matched:
+ self._host_states[host.name].did_start_at_task = True
+ self._host_states[host.name].run_state = self.ITERATING_SETUP
+
+ if start_at_matched:
+ # we have our match, so clear the start_at_task field on the
+ # play context to flag that we've started at a task (and future
+ # plays won't try to advance)
+ play_context.start_at_task = None
+
+ def get_host_state(self, host):
+ # Since we're using the PlayIterator to carry forward failed hosts,
+ # in the event that a previous host was not in the current inventory
+ # we create a stub state for it now
+ if host.name not in self._host_states:
+ self._host_states[host.name] = HostState(blocks=[])
+
+ return self._host_states[host.name].copy()
+
+ def cache_block_tasks(self, block):
+ # now a noop, we've changed the way we do caching and finding of
+ # original task entries, but just in case any 3rd party strategies
+ # are using this we're leaving it here for now
+ return
+
+ def get_next_task_for_host(self, host, peek=False):
+
+ display.debug("getting the next task for host %s" % host.name)
+ s = self.get_host_state(host)
+
+ task = None
+ if s.run_state == self.ITERATING_COMPLETE:
+ display.debug("host %s is done iterating, returning" % host.name)
+ return (s, None)
+
+ (s, task) = self._get_next_task_from_state(s, host=host, peek=peek)
+
+ if not peek:
+ self._host_states[host.name] = s
+
+ display.debug("done getting next task for host %s" % host.name)
+ display.debug(" ^ task is: %s" % task)
+ display.debug(" ^ state is: %s" % s)
+ return (s, task)
+
+ def _get_next_task_from_state(self, state, host, peek, in_child=False):
+
+ task = None
+
+ # try and find the next task, given the current state.
+ while True:
+ # try to get the current block from the list of blocks, and
+ # if we run past the end of the list we know we're done with
+ # this block
+ try:
+ block = state._blocks[state.cur_block]
+ except IndexError:
+ state.run_state = self.ITERATING_COMPLETE
+ return (state, None)
+
+ if state.run_state == self.ITERATING_SETUP:
+ # First, we check to see if we were pending setup. If not, this is
+ # the first trip through ITERATING_SETUP, so we set the pending_setup
+ # flag and try to determine if we do in fact want to gather facts for
+ # the specified host.
+ if not state.pending_setup:
+ state.pending_setup = True
+
+ # Gather facts if the default is 'smart' and we have not yet
+ # done it for this host; or if 'explicit' and the play sets
+ # gather_facts to True; or if 'implicit' and the play does
+ # NOT explicitly set gather_facts to False.
+
+ gathering = C.DEFAULT_GATHERING
+ implied = self._play.gather_facts is None or boolean(self._play.gather_facts, strict=False)
+
+ if (gathering == 'implicit' and implied) or \
+ (gathering == 'explicit' and boolean(self._play.gather_facts, strict=False)) or \
+ (gathering == 'smart' and implied and not (self._variable_manager._fact_cache.get(host.name, {}).get('_ansible_facts_gathered', False))):
+ # The setup block is always self._blocks[0], as we inject it
+ # during the play compilation in __init__ above.
+ setup_block = self._blocks[0]
+ if setup_block.has_tasks() and len(setup_block.block) > 0:
+ task = setup_block.block[0]
+ else:
+ # This is the second trip through ITERATING_SETUP, so we clear
+ # the flag and move onto the next block in the list while setting
+ # the run state to ITERATING_TASKS
+ state.pending_setup = False
+
+ state.run_state = self.ITERATING_TASKS
+ if not state.did_start_at_task:
+ state.cur_block += 1
+ state.cur_regular_task = 0
+ state.cur_rescue_task = 0
+ state.cur_always_task = 0
+ state.tasks_child_state = None
+ state.rescue_child_state = None
+ state.always_child_state = None
+
+ elif state.run_state == self.ITERATING_TASKS:
+ # clear the pending setup flag, since we're past that and it didn't fail
+ if state.pending_setup:
+ state.pending_setup = False
+
+ # First, we check for a child task state that is not failed, and if we
+ # have one recurse into it for the next task. If we're done with the child
+ # state, we clear it and drop back to getting the next task from the list.
+ if state.tasks_child_state:
+ (state.tasks_child_state, task) = self._get_next_task_from_state(state.tasks_child_state, host=host, peek=peek, in_child=True)
+ if self._check_failed_state(state.tasks_child_state):
+ # failed child state, so clear it and move into the rescue portion
+ state.tasks_child_state = None
+ self._set_failed_state(state)
+ else:
+ # get the next task recursively
+ if task is None or state.tasks_child_state.run_state == self.ITERATING_COMPLETE:
+ # we're done with the child state, so clear it and continue
+ # back to the top of the loop to get the next task
+ state.tasks_child_state = None
+ continue
+ else:
+ # First here, we check to see if we've failed anywhere down the chain
+ # of states we have, and if so we move onto the rescue portion. Otherwise,
+ # we check to see if we've moved past the end of the list of tasks. If so,
+ # we move into the always portion of the block, otherwise we get the next
+ # task from the list.
+ if self._check_failed_state(state):
+ state.run_state = self.ITERATING_RESCUE
+ elif state.cur_regular_task >= len(block.block):
+ state.run_state = self.ITERATING_ALWAYS
+ else:
+ task = block.block[state.cur_regular_task]
+ # if the current task is actually a child block, create a child
+ # state for us to recurse into on the next pass
+ if isinstance(task, Block):
+ state.tasks_child_state = HostState(blocks=[task])
+ state.tasks_child_state.run_state = self.ITERATING_TASKS
+ # since we've created the child state, clear the task
+ # so we can pick up the child state on the next pass
+ task = None
+ state.cur_regular_task += 1
+
+ elif state.run_state == self.ITERATING_RESCUE:
+ # The process here is identical to ITERATING_TASKS, except instead
+ # we move into the always portion of the block.
+ if host.name in self._play._removed_hosts:
+ self._play._removed_hosts.remove(host.name)
+
+ if state.rescue_child_state:
+ (state.rescue_child_state, task) = self._get_next_task_from_state(state.rescue_child_state, host=host, peek=peek, in_child=True)
+ if self._check_failed_state(state.rescue_child_state):
+ state.rescue_child_state = None
+ self._set_failed_state(state)
+ else:
+ if task is None or state.rescue_child_state.run_state == self.ITERATING_COMPLETE:
+ state.rescue_child_state = None
+ continue
+ else:
+ if state.fail_state & self.FAILED_RESCUE == self.FAILED_RESCUE:
+ state.run_state = self.ITERATING_ALWAYS
+ elif state.cur_rescue_task >= len(block.rescue):
+ if len(block.rescue) > 0:
+ state.fail_state = self.FAILED_NONE
+ state.run_state = self.ITERATING_ALWAYS
+ state.did_rescue = True
+ else:
+ task = block.rescue[state.cur_rescue_task]
+ if isinstance(task, Block):
+ state.rescue_child_state = HostState(blocks=[task])
+ state.rescue_child_state.run_state = self.ITERATING_TASKS
+ task = None
+ state.cur_rescue_task += 1
+
+ elif state.run_state == self.ITERATING_ALWAYS:
+ # And again, the process here is identical to ITERATING_TASKS, except
+ # instead we either move onto the next block in the list, or we set the
+ # run state to ITERATING_COMPLETE in the event of any errors, or when we
+ # have hit the end of the list of blocks.
+ if state.always_child_state:
+ (state.always_child_state, task) = self._get_next_task_from_state(state.always_child_state, host=host, peek=peek, in_child=True)
+ if self._check_failed_state(state.always_child_state):
+ state.always_child_state = None
+ self._set_failed_state(state)
+ else:
+ if task is None or state.always_child_state.run_state == self.ITERATING_COMPLETE:
+ state.always_child_state = None
+ continue
+ else:
+ if state.cur_always_task >= len(block.always):
+ if state.fail_state != self.FAILED_NONE:
+ state.run_state = self.ITERATING_COMPLETE
+ else:
+ state.cur_block += 1
+ state.cur_regular_task = 0
+ state.cur_rescue_task = 0
+ state.cur_always_task = 0
+ state.run_state = self.ITERATING_TASKS
+ state.tasks_child_state = None
+ state.rescue_child_state = None
+ state.always_child_state = None
+ state.did_rescue = False
+
+ # we're advancing blocks, so if this was an end-of-role block we
+ # mark the current role complete
+ if block._eor and host.name in block._role._had_task_run and not in_child and not peek:
+ block._role._completed[host.name] = True
+ else:
+ task = block.always[state.cur_always_task]
+ if isinstance(task, Block):
+ state.always_child_state = HostState(blocks=[task])
+ state.always_child_state.run_state = self.ITERATING_TASKS
+ task = None
+ state.cur_always_task += 1
+
+ elif state.run_state == self.ITERATING_COMPLETE:
+ return (state, None)
+
+ # if something above set the task, break out of the loop now
+ if task:
+ break
+
+ return (state, task)
+
+ def _set_failed_state(self, state):
+ if state.run_state == self.ITERATING_SETUP:
+ state.fail_state |= self.FAILED_SETUP
+ state.run_state = self.ITERATING_COMPLETE
+ elif state.run_state == self.ITERATING_TASKS:
+ if state.tasks_child_state is not None:
+ state.tasks_child_state = self._set_failed_state(state.tasks_child_state)
+ else:
+ state.fail_state |= self.FAILED_TASKS
+ if state._blocks[state.cur_block].rescue:
+ state.run_state = self.ITERATING_RESCUE
+ elif state._blocks[state.cur_block].always:
+ state.run_state = self.ITERATING_ALWAYS
+ else:
+ state.run_state = self.ITERATING_COMPLETE
+ elif state.run_state == self.ITERATING_RESCUE:
+ if state.rescue_child_state is not None:
+ state.rescue_child_state = self._set_failed_state(state.rescue_child_state)
+ else:
+ state.fail_state |= self.FAILED_RESCUE
+ if state._blocks[state.cur_block].always:
+ state.run_state = self.ITERATING_ALWAYS
+ else:
+ state.run_state = self.ITERATING_COMPLETE
+ elif state.run_state == self.ITERATING_ALWAYS:
+ if state.always_child_state is not None:
+ state.always_child_state = self._set_failed_state(state.always_child_state)
+ else:
+ state.fail_state |= self.FAILED_ALWAYS
+ state.run_state = self.ITERATING_COMPLETE
+ return state
+
+ def mark_host_failed(self, host):
+ s = self.get_host_state(host)
+ display.debug("marking host %s failed, current state: %s" % (host, s))
+ s = self._set_failed_state(s)
+ display.debug("^ failed state is now: %s" % s)
+ self._host_states[host.name] = s
+ self._play._removed_hosts.append(host.name)
+
+ def get_failed_hosts(self):
+ return dict((host, True) for (host, state) in iteritems(self._host_states) if self._check_failed_state(state))
+
+ def _check_failed_state(self, state):
+ if state is None:
+ return False
+ elif state.run_state == self.ITERATING_RESCUE and self._check_failed_state(state.rescue_child_state):
+ return True
+ elif state.run_state == self.ITERATING_ALWAYS and self._check_failed_state(state.always_child_state):
+ return True
+ elif state.fail_state != self.FAILED_NONE:
+ if state.run_state == self.ITERATING_RESCUE and state.fail_state & self.FAILED_RESCUE == 0:
+ return False
+ elif state.run_state == self.ITERATING_ALWAYS and state.fail_state & self.FAILED_ALWAYS == 0:
+ return False
+ else:
+ return not (state.did_rescue and state.fail_state & self.FAILED_ALWAYS == 0)
+ elif state.run_state == self.ITERATING_TASKS and self._check_failed_state(state.tasks_child_state):
+ cur_block = state._blocks[state.cur_block]
+ if len(cur_block.rescue) > 0 and state.fail_state & self.FAILED_RESCUE == 0:
+ return False
+ else:
+ return True
+ return False
+
+ def is_failed(self, host):
+ s = self.get_host_state(host)
+ return self._check_failed_state(s)
+
+ def get_active_state(self, state):
+ '''
+ Finds the active state, recursively if necessary when there are child states.
+ '''
+ if state.run_state == self.ITERATING_TASKS and state.tasks_child_state is not None:
+ return self.get_active_state(state.tasks_child_state)
+ elif state.run_state == self.ITERATING_RESCUE and state.rescue_child_state is not None:
+ return self.get_active_state(state.rescue_child_state)
+ elif state.run_state == self.ITERATING_ALWAYS and state.always_child_state is not None:
+ return self.get_active_state(state.always_child_state)
+ return state
+
+ def is_any_block_rescuing(self, state):
+ '''
+ Given the current HostState state, determines if the current block, or any child blocks,
+ are in rescue mode.
+ '''
+ if state.run_state == self.ITERATING_RESCUE:
+ return True
+ if state.tasks_child_state is not None:
+ return self.is_any_block_rescuing(state.tasks_child_state)
+ return False
+
+ def get_original_task(self, host, task):
+ # now a noop because we've changed the way we do caching
+ return (None, None)
+
+ def _insert_tasks_into_state(self, state, task_list):
+ # if we've failed at all, or if the task list is empty, just return the current state
+ if state.fail_state != self.FAILED_NONE and state.run_state not in (self.ITERATING_RESCUE, self.ITERATING_ALWAYS) or not task_list:
+ return state
+
+ if state.run_state == self.ITERATING_TASKS:
+ if state.tasks_child_state:
+ state.tasks_child_state = self._insert_tasks_into_state(state.tasks_child_state, task_list)
+ else:
+ target_block = state._blocks[state.cur_block].copy()
+ before = target_block.block[:state.cur_regular_task]
+ after = target_block.block[state.cur_regular_task:]
+ target_block.block = before + task_list + after
+ state._blocks[state.cur_block] = target_block
+ elif state.run_state == self.ITERATING_RESCUE:
+ if state.rescue_child_state:
+ state.rescue_child_state = self._insert_tasks_into_state(state.rescue_child_state, task_list)
+ else:
+ target_block = state._blocks[state.cur_block].copy()
+ before = target_block.rescue[:state.cur_rescue_task]
+ after = target_block.rescue[state.cur_rescue_task:]
+ target_block.rescue = before + task_list + after
+ state._blocks[state.cur_block] = target_block
+ elif state.run_state == self.ITERATING_ALWAYS:
+ if state.always_child_state:
+ state.always_child_state = self._insert_tasks_into_state(state.always_child_state, task_list)
+ else:
+ target_block = state._blocks[state.cur_block].copy()
+ before = target_block.always[:state.cur_always_task]
+ after = target_block.always[state.cur_always_task:]
+ target_block.always = before + task_list + after
+ state._blocks[state.cur_block] = target_block
+ return state
+
+ def add_tasks(self, host, task_list):
+ self._host_states[host.name] = self._insert_tasks_into_state(self.get_host_state(host), task_list)
diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py
new file mode 100644
index 00000000..aacf1353
--- /dev/null
+++ b/lib/ansible/executor/playbook_executor.py
@@ -0,0 +1,311 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible import constants as C
+from ansible import context
+from ansible.executor.task_queue_manager import TaskQueueManager
+from ansible.module_utils._text import to_text
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.plugins.loader import become_loader, connection_loader, shell_loader
+from ansible.playbook import Playbook
+from ansible.template import Templar
+from ansible.utils.helpers import pct_to_int
+from ansible.utils.path import makedirs_safe
+from ansible.utils.ssh_functions import set_default_transport
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class PlaybookExecutor:
+
+ '''
+ This is the primary class for executing playbooks, and thus the
+ basis for bin/ansible-playbook operation.
+ '''
+
+ def __init__(self, playbooks, inventory, variable_manager, loader, passwords):
+ self._playbooks = playbooks
+ self._inventory = inventory
+ self._variable_manager = variable_manager
+ self._loader = loader
+ self.passwords = passwords
+ self._unreachable_hosts = dict()
+
+ if context.CLIARGS.get('listhosts') or context.CLIARGS.get('listtasks') or \
+ context.CLIARGS.get('listtags') or context.CLIARGS.get('syntax'):
+ self._tqm = None
+ else:
+ self._tqm = TaskQueueManager(
+ inventory=inventory,
+ variable_manager=variable_manager,
+ loader=loader,
+ passwords=self.passwords,
+ forks=context.CLIARGS.get('forks'),
+ )
+
+ # Note: We run this here to cache whether the default ansible ssh
+ # executable supports control persist. Sometime in the future we may
+ # need to enhance this to check that ansible_ssh_executable specified
+ # in inventory is also cached. We can't do this caching at the point
+ # where it is used (in task_executor) because that is post-fork and
+ # therefore would be discarded after every task.
+ set_default_transport()
+
+ def run(self):
+ '''
+ Run the given playbook, based on the settings in the play which
+ may limit the runs to serialized groups, etc.
+ '''
+
+ result = 0
+ entrylist = []
+ entry = {}
+ try:
+ # preload become/connection/shell to set config defs cached
+ list(connection_loader.all(class_only=True))
+ list(shell_loader.all(class_only=True))
+ list(become_loader.all(class_only=True))
+
+ for playbook_path in self._playbooks:
+ pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader)
+ # FIXME: move out of inventory self._inventory.set_playbook_basedir(os.path.realpath(os.path.dirname(playbook_path)))
+
+ if self._tqm is None: # we are doing a listing
+ entry = {'playbook': playbook_path}
+ entry['plays'] = []
+ else:
+ # make sure the tqm has callbacks loaded
+ self._tqm.load_callbacks()
+ self._tqm.send_callback('v2_playbook_on_start', pb)
+
+ i = 1
+ plays = pb.get_plays()
+ display.vv(u'%d plays in %s' % (len(plays), to_text(playbook_path)))
+
+ for play in plays:
+ if play._included_path is not None:
+ self._loader.set_basedir(play._included_path)
+ else:
+ self._loader.set_basedir(pb._basedir)
+
+ # clear any filters which may have been applied to the inventory
+ self._inventory.remove_restriction()
+
+ # Allow variables to be used in vars_prompt fields.
+ all_vars = self._variable_manager.get_vars(play=play)
+ templar = Templar(loader=self._loader, variables=all_vars)
+ setattr(play, 'vars_prompt', templar.template(play.vars_prompt))
+
+ # FIXME: this should be a play 'sub object' like loop_control
+ if play.vars_prompt:
+ for var in play.vars_prompt:
+ vname = var['name']
+ prompt = var.get("prompt", vname)
+ default = var.get("default", None)
+ private = boolean(var.get("private", True))
+ confirm = boolean(var.get("confirm", False))
+ encrypt = var.get("encrypt", None)
+ salt_size = var.get("salt_size", None)
+ salt = var.get("salt", None)
+ unsafe = var.get("unsafe", None)
+
+ if vname not in self._variable_manager.extra_vars:
+ if self._tqm:
+ self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt,
+ default, unsafe)
+ play.vars[vname] = display.do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default, unsafe)
+ else: # we are either in --list-<option> or syntax check
+ play.vars[vname] = default
+
+ # Post validate so any play level variables are templated
+ all_vars = self._variable_manager.get_vars(play=play)
+ templar = Templar(loader=self._loader, variables=all_vars)
+ play.post_validate(templar)
+
+ if context.CLIARGS['syntax']:
+ continue
+
+ if self._tqm is None:
+ # we are just doing a listing
+ entry['plays'].append(play)
+
+ else:
+ self._tqm._unreachable_hosts.update(self._unreachable_hosts)
+
+ previously_failed = len(self._tqm._failed_hosts)
+ previously_unreachable = len(self._tqm._unreachable_hosts)
+
+ break_play = False
+ # we are actually running plays
+ batches = self._get_serialized_batches(play)
+ if len(batches) == 0:
+ self._tqm.send_callback('v2_playbook_on_play_start', play)
+ self._tqm.send_callback('v2_playbook_on_no_hosts_matched')
+ for batch in batches:
+ # restrict the inventory to the hosts in the serialized batch
+ self._inventory.restrict_to_hosts(batch)
+ # and run it...
+ result = self._tqm.run(play=play)
+
+ # break the play if the result equals the special return code
+ if result & self._tqm.RUN_FAILED_BREAK_PLAY != 0:
+ result = self._tqm.RUN_FAILED_HOSTS
+ break_play = True
+
+ # check the number of failures here, to see if they're above the maximum
+ # failure percentage allowed, or if any errors are fatal. If either of those
+ # conditions are met, we break out, otherwise we only break out if the entire
+ # batch failed
+ failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts) - \
+ (previously_failed + previously_unreachable)
+
+ if len(batch) == failed_hosts_count:
+ break_play = True
+ break
+
+ # update the previous counts so they don't accumulate incorrectly
+ # over multiple serial batches
+ previously_failed += len(self._tqm._failed_hosts) - previously_failed
+ previously_unreachable += len(self._tqm._unreachable_hosts) - previously_unreachable
+
+ # save the unreachable hosts from this batch
+ self._unreachable_hosts.update(self._tqm._unreachable_hosts)
+
+ if break_play:
+ break
+
+ i = i + 1 # per play
+
+ if entry:
+ entrylist.append(entry) # per playbook
+
+ # send the stats callback for this playbook
+ if self._tqm is not None:
+ if C.RETRY_FILES_ENABLED:
+ retries = set(self._tqm._failed_hosts.keys())
+ retries.update(self._tqm._unreachable_hosts.keys())
+ retries = sorted(retries)
+ if len(retries) > 0:
+ if C.RETRY_FILES_SAVE_PATH:
+ basedir = C.RETRY_FILES_SAVE_PATH
+ elif playbook_path:
+ basedir = os.path.dirname(os.path.abspath(playbook_path))
+ else:
+ basedir = '~/'
+
+ (retry_name, _) = os.path.splitext(os.path.basename(playbook_path))
+ filename = os.path.join(basedir, "%s.retry" % retry_name)
+ if self._generate_retry_inventory(filename, retries):
+ display.display("\tto retry, use: --limit @%s\n" % filename)
+
+ self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)
+
+ # if the last result wasn't zero, break out of the playbook file name loop
+ if result != 0:
+ break
+
+ if entrylist:
+ return entrylist
+
+ finally:
+ if self._tqm is not None:
+ self._tqm.cleanup()
+ if self._loader:
+ self._loader.cleanup_all_tmp_files()
+
+ if context.CLIARGS['syntax']:
+ display.display("No issues encountered")
+ return result
+
+ if context.CLIARGS['start_at_task'] and not self._tqm._start_at_done:
+ display.error(
+ "No matching task \"%s\" found."
+ " Note: --start-at-task can only follow static includes."
+ % context.CLIARGS['start_at_task']
+ )
+
+ return result
+
+ def _get_serialized_batches(self, play):
+ '''
+ Returns a list of hosts, subdivided into batches based on
+ the serial size specified in the play.
+ '''
+
+ # make sure we have a unique list of hosts
+ all_hosts = self._inventory.get_hosts(play.hosts, order=play.order)
+ all_hosts_len = len(all_hosts)
+
+ # the serial value can be listed as a scalar or a list of
+ # scalars, so we make sure it's a list here
+ serial_batch_list = play.serial
+ if len(serial_batch_list) == 0:
+ serial_batch_list = [-1]
+
+ cur_item = 0
+ serialized_batches = []
+
+ while len(all_hosts) > 0:
+ # get the serial value from current item in the list
+ serial = pct_to_int(serial_batch_list[cur_item], all_hosts_len)
+
+ # if the serial count was not specified or is invalid, default to
+ # a list of all hosts, otherwise grab a chunk of the hosts equal
+ # to the current serial item size
+ if serial <= 0:
+ serialized_batches.append(all_hosts)
+ break
+ else:
+ play_hosts = []
+ for x in range(serial):
+ if len(all_hosts) > 0:
+ play_hosts.append(all_hosts.pop(0))
+
+ serialized_batches.append(play_hosts)
+
+ # increment the current batch list item number, and if we've hit
+ # the end keep using the last element until we've consumed all of
+ # the hosts in the inventory
+ cur_item += 1
+ if cur_item > len(serial_batch_list) - 1:
+ cur_item = len(serial_batch_list) - 1
+
+ return serialized_batches
+
+ def _generate_retry_inventory(self, retry_path, replay_hosts):
+ '''
+ Called when a playbook run fails. It generates an inventory which allows
+ re-running on ONLY the failed hosts. This may duplicate some variable
+ information in group_vars/host_vars but that is ok, and expected.
+ '''
+ try:
+ makedirs_safe(os.path.dirname(retry_path))
+ with open(retry_path, 'w') as fd:
+ for x in replay_hosts:
+ fd.write("%s\n" % x)
+ except Exception as e:
+ display.warning("Could not create retry file '%s'.\n\t%s" % (retry_path, to_text(e)))
+ return False
+
+ return True
diff --git a/lib/ansible/executor/powershell/__init__.py b/lib/ansible/executor/powershell/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/lib/ansible/executor/powershell/__init__.py
diff --git a/lib/ansible/executor/powershell/async_watchdog.ps1 b/lib/ansible/executor/powershell/async_watchdog.ps1
new file mode 100644
index 00000000..cd3de81b
--- /dev/null
+++ b/lib/ansible/executor/powershell/async_watchdog.ps1
@@ -0,0 +1,110 @@
+# (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+param(
+ [Parameter(Mandatory=$true)][System.Collections.IDictionary]$Payload
+)
+
+# help with debugging errors as we don't have visibility of this running process
+trap {
+ $watchdog_path = "$($env:TEMP)\ansible-async-watchdog-error-$(Get-Date -Format "yyyy-MM-ddTHH-mm-ss.ffffZ").txt"
+ $error_msg = "Error while running the async exec wrapper`r`n$(Format-AnsibleException -ErrorRecord $_)"
+ Set-Content -Path $watchdog_path -Value $error_msg
+ break
+}
+
+$ErrorActionPreference = "Stop"
+
+Write-AnsibleLog "INFO - starting async_watchdog" "async_watchdog"
+
+# pop 0th action as entrypoint
+$payload.actions = $payload.actions[1..99]
+
+$actions = $Payload.actions
+$entrypoint = $payload.($actions[0])
+$entrypoint = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($entrypoint))
+
+$resultfile_path = $payload.async_results_path
+$max_exec_time_sec = $payload.async_timeout_sec
+
+Write-AnsibleLog "INFO - deserializing existing result file args at: '$resultfile_path'" "async_watchdog"
+if (-not (Test-Path -Path $resultfile_path)) {
+ $msg = "result file at '$resultfile_path' does not exist"
+ Write-AnsibleLog "ERROR - $msg" "async_watchdog"
+ throw $msg
+}
+$result_json = Get-Content -Path $resultfile_path -Raw
+Write-AnsibleLog "INFO - result file json is: $result_json" "async_watchdog"
+$result = ConvertFrom-AnsibleJson -InputObject $result_json
+
+Write-AnsibleLog "INFO - creating async runspace" "async_watchdog"
+$rs = [RunspaceFactory]::CreateRunspace()
+$rs.Open()
+
+Write-AnsibleLog "INFO - creating async PowerShell pipeline" "async_watchdog"
+$ps = [PowerShell]::Create()
+$ps.Runspace = $rs
+
+# these functions are set in exec_wrapper
+Write-AnsibleLog "INFO - adding global functions to PowerShell pipeline script" "async_watchdog"
+$ps.AddScript($script:common_functions).AddStatement() > $null
+$ps.AddScript($script:wrapper_functions).AddStatement() > $null
+$ps.AddCommand("Set-Variable").AddParameters(@{Name="common_functions"; Value=$script:common_functions; Scope="script"}).AddStatement() > $null
+
+Write-AnsibleLog "INFO - adding $($actions[0]) to PowerShell pipeline script" "async_watchdog"
+$ps.AddScript($entrypoint).AddArgument($payload) > $null
+
+Write-AnsibleLog "INFO - async job start, calling BeginInvoke()" "async_watchdog"
+$job_async_result = $ps.BeginInvoke()
+
+Write-AnsibleLog "INFO - waiting '$max_exec_time_sec' seconds for async job to complete" "async_watchdog"
+$job_async_result.AsyncWaitHandle.WaitOne($max_exec_time_sec * 1000) > $null
+$result.finished = 1
+
+if ($job_async_result.IsCompleted) {
+ Write-AnsibleLog "INFO - async job completed, calling EndInvoke()" "async_watchdog"
+
+ $job_output = $ps.EndInvoke($job_async_result)
+ $job_error = $ps.Streams.Error
+
+ Write-AnsibleLog "INFO - raw module stdout:`r`n$($job_output | Out-String)" "async_watchdog"
+ if ($job_error) {
+ Write-AnsibleLog "WARN - raw module stderr:`r`n$($job_error | Out-String)" "async_watchdog"
+ }
+
+ # write success/output/error to result object
+ # TODO: cleanse leading/trailing junk
+ try {
+ Write-AnsibleLog "INFO - deserializing Ansible stdout" "async_watchdog"
+ $module_result = ConvertFrom-AnsibleJson -InputObject $job_output
+ # TODO: check for conflicting keys
+ $result = $result + $module_result
+ } catch {
+ $result.failed = $true
+ $result.msg = "failed to parse module output: $($_.Exception.Message)"
+ # return output back to Ansible to help with debugging errors
+ $result.stdout = $job_output | Out-String
+ $result.stderr = $job_error | Out-String
+ }
+
+ $result_json = ConvertTo-Json -InputObject $result -Depth 99 -Compress
+ Set-Content -Path $resultfile_path -Value $result_json
+
+ Write-AnsibleLog "INFO - wrote output to $resultfile_path" "async_watchdog"
+} else {
+ Write-AnsibleLog "ERROR - reached timeout on async job, stopping job" "async_watchdog"
+ $ps.BeginStop($null, $null) > $null # best effort stop
+
+ # write timeout to result object
+ $result.failed = $true
+ $result.msg = "timed out waiting for module completion"
+ $result_json = ConvertTo-Json -InputObject $result -Depth 99 -Compress
+ Set-Content -Path $resultfile_path -Value $result_json
+
+ Write-AnsibleLog "INFO - wrote timeout to '$resultfile_path'" "async_watchdog"
+}
+
+# in the case of a hung pipeline, this will cause the process to stay alive until it's un-hung...
+#$rs.Close() | Out-Null
+
+Write-AnsibleLog "INFO - ending async_watchdog" "async_watchdog"
diff --git a/lib/ansible/executor/powershell/async_wrapper.ps1 b/lib/ansible/executor/powershell/async_wrapper.ps1
new file mode 100644
index 00000000..b9b991a3
--- /dev/null
+++ b/lib/ansible/executor/powershell/async_wrapper.ps1
@@ -0,0 +1,172 @@
+# (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+param(
+ [Parameter(Mandatory=$true)][System.Collections.IDictionary]$Payload
+)
+
+$ErrorActionPreference = "Stop"
+
+Write-AnsibleLog "INFO - starting async_wrapper" "async_wrapper"
+
+if (-not $Payload.environment.ContainsKey("ANSIBLE_ASYNC_DIR")) {
+ Write-AnsibleError -Message "internal error: the environment variable ANSIBLE_ASYNC_DIR is not set and is required for an async task"
+ $host.SetShouldExit(1)
+ return
+}
+$async_dir = [System.Environment]::ExpandEnvironmentVariables($Payload.environment.ANSIBLE_ASYNC_DIR)
+
+# calculate the result path so we can include it in the worker payload
+$jid = $Payload.async_jid
+$local_jid = $jid + "." + $pid
+
+$results_path = [System.IO.Path]::Combine($async_dir, $local_jid)
+
+Write-AnsibleLog "INFO - creating async results path at '$results_path'" "async_wrapper"
+
+$Payload.async_results_path = $results_path
+[System.IO.Directory]::CreateDirectory([System.IO.Path]::GetDirectoryName($results_path)) > $null
+
+# we use Win32_Process to escape the current process job, CreateProcess with a
+# breakaway flag won't work for psrp as the psrp process does not have breakaway
+# rights. Unfortunately we can't read/write to the spawned process as we can't
+# inherit the handles. We use a locked down named pipe to send the exec_wrapper
+# payload. Anonymous pipes won't work as the spawned process will not be a child
+# of the current one and will not be able to inherit the handles
+
+# pop the async_wrapper action so we don't get stuck in a loop and create new
+# exec_wrapper for our async process
+$Payload.actions = $Payload.actions[1..99]
+$payload_json = ConvertTo-Json -InputObject $Payload -Depth 99 -Compress
+
+#
+$exec_wrapper = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Payload.exec_wrapper))
+$exec_wrapper += "`0`0`0`0" + $payload_json
+$payload_bytes = [System.Text.Encoding]::UTF8.GetBytes($exec_wrapper)
+$pipe_name = "ansible-async-$jid-$([guid]::NewGuid())"
+
+# template the async process command line with the payload details
+$bootstrap_wrapper = {
+ # help with debugging errors as we loose visibility of the process output
+ # from here on
+ trap {
+ $wrapper_path = "$($env:TEMP)\ansible-async-wrapper-error-$(Get-Date -Format "yyyy-MM-ddTHH-mm-ss.ffffZ").txt"
+ $error_msg = "Error while running the async exec wrapper`r`n$($_ | Out-String)`r`n$($_.ScriptStackTrace)"
+ Set-Content -Path $wrapper_path -Value $error_msg
+ break
+ }
+
+ &chcp.com 65001 > $null
+
+ # store the pipe name and no. of bytes to read, these are populated before
+ # before the process is created - do not remove or changed
+ $pipe_name = ""
+ $bytes_length = 0
+
+ $input_bytes = New-Object -TypeName byte[] -ArgumentList $bytes_length
+ $pipe = New-Object -TypeName System.IO.Pipes.NamedPipeClientStream -ArgumentList @(
+ ".", # localhost
+ $pipe_name,
+ [System.IO.Pipes.PipeDirection]::In,
+ [System.IO.Pipes.PipeOptions]::None,
+ [System.Security.Principal.TokenImpersonationLevel]::Anonymous
+ )
+ try {
+ $pipe.Connect()
+ $pipe.Read($input_bytes, 0, $bytes_length) > $null
+ } finally {
+ $pipe.Close()
+ }
+ $exec = [System.Text.Encoding]::UTF8.GetString($input_bytes)
+ $exec_parts = $exec.Split(@("`0`0`0`0"), 2, [StringSplitOptions]::RemoveEmptyEntries)
+ Set-Variable -Name json_raw -Value $exec_parts[1]
+ $exec = [ScriptBlock]::Create($exec_parts[0])
+ &$exec
+}
+
+$bootstrap_wrapper = $bootstrap_wrapper.ToString().Replace('$pipe_name = ""', "`$pipe_name = `"$pipe_name`"")
+$bootstrap_wrapper = $bootstrap_wrapper.Replace('$bytes_length = 0', "`$bytes_length = $($payload_bytes.Count)")
+$encoded_command = [System.Convert]::ToBase64String([System.Text.Encoding]::Unicode.GetBytes($bootstrap_wrapper))
+$pwsh_path = "$env:SystemRoot\System32\WindowsPowerShell\v1.0\powershell.exe"
+$exec_args = "`"$pwsh_path`" -NonInteractive -NoProfile -ExecutionPolicy Bypass -EncodedCommand $encoded_command"
+
+# create a named pipe that is set to allow only the current user read access
+$current_user = ([Security.Principal.WindowsIdentity]::GetCurrent()).User
+$pipe_sec = New-Object -TypeName System.IO.Pipes.PipeSecurity
+$pipe_ar = New-Object -TypeName System.IO.Pipes.PipeAccessRule -ArgumentList @(
+ $current_user,
+ [System.IO.Pipes.PipeAccessRights]::Read,
+ [System.Security.AccessControl.AccessControlType]::Allow
+)
+$pipe_sec.AddAccessRule($pipe_ar)
+
+Write-AnsibleLog "INFO - creating named pipe '$pipe_name'" "async_wrapper"
+$pipe = New-Object -TypeName System.IO.Pipes.NamedPipeServerStream -ArgumentList @(
+ $pipe_name,
+ [System.IO.Pipes.PipeDirection]::Out,
+ 1,
+ [System.IO.Pipes.PipeTransmissionMode]::Byte,
+ [System.IO.Pipes.PipeOptions]::Asynchronous,
+ 0,
+ 0,
+ $pipe_sec
+)
+
+try {
+ Write-AnsibleLog "INFO - creating async process '$exec_args'" "async_wrapper"
+ $process = Invoke-CimMethod -ClassName Win32_Process -Name Create -Arguments @{CommandLine=$exec_args}
+ $rc = $process.ReturnValue
+
+ Write-AnsibleLog "INFO - return value from async process exec: $rc" "async_wrapper"
+ if ($rc -ne 0) {
+ $error_msg = switch($rc) {
+ 2 { "Access denied" }
+ 3 { "Insufficient privilege" }
+ 8 { "Unknown failure" }
+ 9 { "Path not found" }
+ 21 { "Invalid parameter" }
+ default { "Other" }
+ }
+ throw "Failed to start async process: $rc ($error_msg)"
+ }
+ $watchdog_pid = $process.ProcessId
+ Write-AnsibleLog "INFO - created async process PID: $watchdog_pid" "async_wrapper"
+
+ # populate initial results before we send the async data to avoid result race
+ $result = @{
+ started = 1;
+ finished = 0;
+ results_file = $results_path;
+ ansible_job_id = $local_jid;
+ _ansible_suppress_tmpdir_delete = $true;
+ ansible_async_watchdog_pid = $watchdog_pid
+ }
+
+ Write-AnsibleLog "INFO - writing initial async results to '$results_path'" "async_wrapper"
+ $result_json = ConvertTo-Json -InputObject $result -Depth 99 -Compress
+ Set-Content $results_path -Value $result_json
+
+ $np_timeout = $Payload.async_startup_timeout * 1000
+ Write-AnsibleLog "INFO - waiting for async process to connect to named pipe for $np_timeout milliseconds" "async_wrapper"
+ $wait_async = $pipe.BeginWaitForConnection($null, $null)
+ $wait_async.AsyncWaitHandle.WaitOne($np_timeout) > $null
+ if (-not $wait_async.IsCompleted) {
+ $msg = "Ansible encountered a timeout while waiting for the async task to start and connect to the named"
+ $msg += "pipe. This can be affected by the performance of the target - you can increase this timeout using"
+ $msg += "WIN_ASYNC_STARTUP_TIMEOUT or just for this host using the win_async_startup_timeout hostvar if "
+ $msg += "this keeps happening."
+ throw $msg
+ }
+ $pipe.EndWaitForConnection($wait_async)
+
+ Write-AnsibleLog "INFO - writing exec_wrapper and payload to async process" "async_wrapper"
+ $pipe.Write($payload_bytes, 0, $payload_bytes.Count)
+ $pipe.Flush()
+ $pipe.WaitForPipeDrain()
+} finally {
+ $pipe.Close()
+}
+
+Write-AnsibleLog "INFO - outputting initial async result: $result_json" "async_wrapper"
+Write-Output -InputObject $result_json
+Write-AnsibleLog "INFO - ending async_wrapper" "async_wrapper"
diff --git a/lib/ansible/executor/powershell/become_wrapper.ps1 b/lib/ansible/executor/powershell/become_wrapper.ps1
new file mode 100644
index 00000000..00f4d4fd
--- /dev/null
+++ b/lib/ansible/executor/powershell/become_wrapper.ps1
@@ -0,0 +1,155 @@
+# (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+param(
+ [Parameter(Mandatory=$true)][System.Collections.IDictionary]$Payload
+)
+
+#Requires -Module Ansible.ModuleUtils.AddType
+#AnsibleRequires -CSharpUtil Ansible.AccessToken
+#AnsibleRequires -CSharpUtil Ansible.Become
+
+$ErrorActionPreference = "Stop"
+
+Write-AnsibleLog "INFO - starting become_wrapper" "become_wrapper"
+
+Function Get-EnumValue($enum, $flag_type, $value) {
+ $raw_enum_value = $value.Replace('_', '')
+ try {
+ $enum_value = [Enum]::Parse($enum, $raw_enum_value, $true)
+ } catch [System.ArgumentException] {
+ $valid_options = [Enum]::GetNames($enum) | ForEach-Object -Process {
+ (($_ -creplace "(.)([A-Z][a-z]+)", '$1_$2') -creplace "([a-z0-9])([A-Z])", '$1_$2').ToString().ToLower()
+ }
+ throw "become_flags $flag_type value '$value' is not valid, valid values are: $($valid_options -join ", ")"
+ }
+ return $enum_value
+}
+
+Function Get-BecomeFlags($flags) {
+ $logon_type = [Ansible.AccessToken.LogonType]::Interactive
+ $logon_flags = [Ansible.Become.LogonFlags]::WithProfile
+
+ if ($null -eq $flags -or $flags -eq "") {
+ $flag_split = @()
+ } elseif ($flags -is [string]) {
+ $flag_split = $flags.Split(" ")
+ } else {
+ throw "become_flags must be a string, was $($flags.GetType())"
+ }
+
+ foreach ($flag in $flag_split) {
+ $split = $flag.Split("=")
+ if ($split.Count -ne 2) {
+ throw "become_flags entry '$flag' is in an invalid format, must be a key=value pair"
+ }
+ $flag_key = $split[0]
+ $flag_value = $split[1]
+ if ($flag_key -eq "logon_type") {
+ $enum_details = @{
+ enum = [Ansible.AccessToken.LogonType]
+ flag_type = $flag_key
+ value = $flag_value
+ }
+ $logon_type = Get-EnumValue @enum_details
+ } elseif ($flag_key -eq "logon_flags") {
+ $logon_flag_values = $flag_value.Split(",")
+ $logon_flags = 0 -as [Ansible.Become.LogonFlags]
+ foreach ($logon_flag_value in $logon_flag_values) {
+ if ($logon_flag_value -eq "") {
+ continue
+ }
+ $enum_details = @{
+ enum = [Ansible.Become.LogonFlags]
+ flag_type = $flag_key
+ value = $logon_flag_value
+ }
+ $logon_flag = Get-EnumValue @enum_details
+ $logon_flags = $logon_flags -bor $logon_flag
+ }
+ } else {
+ throw "become_flags key '$flag_key' is not a valid runas flag, must be 'logon_type' or 'logon_flags'"
+ }
+ }
+
+ return $logon_type, [Ansible.Become.LogonFlags]$logon_flags
+}
+
+Write-AnsibleLog "INFO - loading C# become code" "become_wrapper"
+$add_type_b64 = $Payload.powershell_modules["Ansible.ModuleUtils.AddType"]
+$add_type = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($add_type_b64))
+New-Module -Name Ansible.ModuleUtils.AddType -ScriptBlock ([ScriptBlock]::Create($add_type)) | Import-Module > $null
+
+$new_tmp = [System.Environment]::ExpandEnvironmentVariables($Payload.module_args["_ansible_remote_tmp"])
+$access_def = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Payload.csharp_utils["Ansible.AccessToken"]))
+$become_def = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Payload.csharp_utils["Ansible.Become"]))
+$process_def = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Payload.csharp_utils["Ansible.Process"]))
+Add-CSharpType -References $access_def, $become_def, $process_def -TempPath $new_tmp -IncludeDebugInfo
+
+$username = $Payload.become_user
+$password = $Payload.become_password
+# We need to set password to the value of NullString so a null password is preserved when crossing the .NET
+# boundary. If we pass $null it will automatically be converted to "" and we need to keep the distinction for
+# accounts that don't have a password and when someone wants to become without knowing the password.
+if ($null -eq $password) {
+ $password = [NullString]::Value
+}
+
+try {
+ $logon_type, $logon_flags = Get-BecomeFlags -flags $Payload.become_flags
+} catch {
+ Write-AnsibleError -Message "internal error: failed to parse become_flags '$($Payload.become_flags)'" -ErrorRecord $_
+ $host.SetShouldExit(1)
+ return
+}
+Write-AnsibleLog "INFO - parsed become input, user: '$username', type: '$logon_type', flags: '$logon_flags'" "become_wrapper"
+
+# NB: CreateProcessWithTokenW commandline maxes out at 1024 chars, must
+# bootstrap via small wrapper which contains the exec_wrapper passed through the
+# stdin pipe. Cannot use 'powershell -' as the $ErrorActionPreference is always
+# set to Stop and cannot be changed. Also need to split the payload from the wrapper to prevent potentially
+# sensitive content from being logged by the scriptblock logger.
+$bootstrap_wrapper = {
+ &chcp.com 65001 > $null
+ $exec_wrapper_str = [System.Console]::In.ReadToEnd()
+ $split_parts = $exec_wrapper_str.Split(@("`0`0`0`0"), 2, [StringSplitOptions]::RemoveEmptyEntries)
+ Set-Variable -Name json_raw -Value $split_parts[1]
+ $exec_wrapper = [ScriptBlock]::Create($split_parts[0])
+ &$exec_wrapper
+}
+$exec_command = [System.Convert]::ToBase64String([System.Text.Encoding]::Unicode.GetBytes($bootstrap_wrapper.ToString()))
+$lp_command_line = "powershell.exe -NonInteractive -NoProfile -ExecutionPolicy Bypass -EncodedCommand $exec_command"
+$lp_current_directory = $env:SystemRoot # TODO: should this be set to the become user's profile dir?
+
+# pop the become_wrapper action so we don't get stuck in a loop
+$Payload.actions = $Payload.actions[1..99]
+# we want the output from the exec_wrapper to be base64 encoded to preserve unicode chars
+$Payload.encoded_output = $true
+
+$payload_json = ConvertTo-Json -InputObject $Payload -Depth 99 -Compress
+# delimit the payload JSON from the wrapper to keep sensitive contents out of scriptblocks (which can be logged)
+$exec_wrapper = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Payload.exec_wrapper))
+$exec_wrapper += "`0`0`0`0" + $payload_json
+
+try {
+ Write-AnsibleLog "INFO - starting become process '$lp_command_line'" "become_wrapper"
+ $result = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($username, $password, $logon_flags, $logon_type,
+ $null, $lp_command_line, $lp_current_directory, $null, $exec_wrapper)
+ Write-AnsibleLog "INFO - become process complete with rc: $($result.ExitCode)" "become_wrapper"
+ $stdout = $result.StandardOut
+ try {
+ $stdout = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($stdout))
+ } catch [FormatException] {
+ # output wasn't Base64, ignore as it may contain an error message we want to pass to Ansible
+ Write-AnsibleLog "WARN - become process stdout was not base64 encoded as expected: $stdout"
+ }
+
+ $host.UI.WriteLine($stdout)
+ $host.UI.WriteErrorLine($result.StandardError.Trim())
+ $host.SetShouldExit($result.ExitCode)
+} catch {
+ Write-AnsibleError -Message "internal error: failed to become user '$username'" -ErrorRecord $_
+ $host.SetShouldExit(1)
+}
+
+Write-AnsibleLog "INFO - ending become_wrapper" "become_wrapper"
diff --git a/lib/ansible/executor/powershell/bootstrap_wrapper.ps1 b/lib/ansible/executor/powershell/bootstrap_wrapper.ps1
new file mode 100644
index 00000000..cdba80cb
--- /dev/null
+++ b/lib/ansible/executor/powershell/bootstrap_wrapper.ps1
@@ -0,0 +1,13 @@
+&chcp.com 65001 > $null
+
+if ($PSVersionTable.PSVersion -lt [Version]"3.0") {
+ '{"failed":true,"msg":"Ansible requires PowerShell v3.0 or newer"}'
+ exit 1
+}
+
+$exec_wrapper_str = $input | Out-String
+$split_parts = $exec_wrapper_str.Split(@("`0`0`0`0"), 2, [StringSplitOptions]::RemoveEmptyEntries)
+If (-not $split_parts.Length -eq 2) { throw "invalid payload" }
+Set-Variable -Name json_raw -Value $split_parts[1]
+$exec_wrapper = [ScriptBlock]::Create($split_parts[0])
+&$exec_wrapper
diff --git a/lib/ansible/executor/powershell/coverage_wrapper.ps1 b/lib/ansible/executor/powershell/coverage_wrapper.ps1
new file mode 100644
index 00000000..5044ab92
--- /dev/null
+++ b/lib/ansible/executor/powershell/coverage_wrapper.ps1
@@ -0,0 +1,196 @@
+# (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+param(
+ [Parameter(Mandatory=$true)][System.Collections.IDictionary]$Payload
+)
+
+#AnsibleRequires -Wrapper module_wrapper
+
+$ErrorActionPreference = "Stop"
+
+Write-AnsibleLog "INFO - starting coverage_wrapper" "coverage_wrapper"
+
+# Required to be set for psrp to we can set a breakpoint in the remote runspace
+if ($PSVersionTable.PSVersion -ge [Version]'4.0') {
+ $host.Runspace.Debugger.SetDebugMode([System.Management.Automation.DebugModes]::RemoteScript)
+}
+
+Function New-CoverageBreakpoint {
+ Param (
+ [String]$Path,
+ [ScriptBlock]$Code,
+ [String]$AnsiblePath
+ )
+
+ # It is quicker to pass in the code as a string instead of calling ParseFile as we already know the contents
+ $predicate = {
+ $args[0] -is [System.Management.Automation.Language.CommandBaseAst]
+ }
+ $script_cmds = $Code.Ast.FindAll($predicate, $true)
+
+ # Create an object that tracks the Ansible path of the file and the breakpoints that have been set in it
+ $info = [PSCustomObject]@{
+ Path = $AnsiblePath
+ Breakpoints = [System.Collections.Generic.List`1[System.Management.Automation.Breakpoint]]@()
+ }
+
+ # Keep track of lines that are already scanned. PowerShell can contains multiple commands in 1 line
+ $scanned_lines = [System.Collections.Generic.HashSet`1[System.Int32]]@()
+ foreach ($cmd in $script_cmds) {
+ if (-not $scanned_lines.Add($cmd.Extent.StartLineNumber)) {
+ continue
+ }
+
+ # Do not add any -Action value, even if it is $null or {}. Doing so will balloon the runtime.
+ $params = @{
+ Script = $Path
+ Line = $cmd.Extent.StartLineNumber
+ Column = $cmd.Extent.StartColumnNumber
+ }
+ $info.Breakpoints.Add((Set-PSBreakpoint @params))
+ }
+
+ $info
+}
+
+Function Compare-WhitelistPattern {
+ Param (
+ [String[]]$Patterns,
+ [String]$Path
+ )
+
+ foreach ($pattern in $Patterns) {
+ if ($Path -like $pattern) {
+ return $true
+ }
+ }
+ return $false
+}
+
+$module_name = $Payload.module_args["_ansible_module_name"]
+Write-AnsibleLog "INFO - building coverage payload for '$module_name'" "coverage_wrapper"
+
+# A PS Breakpoint needs an actual path to work properly, we create a temp directory that will store the module and
+# module_util code during execution
+$temp_path = Join-Path -Path ([System.IO.Path]::GetTempPath()) -ChildPath "ansible-coverage-$([System.IO.Path]::GetRandomFileName())"
+Write-AnsibleLog "INFO - Creating temp path for coverage files '$temp_path'" "coverage_wrapper"
+New-Item -Path $temp_path -ItemType Directory > $null
+$breakpoint_info = [System.Collections.Generic.List`1[PSObject]]@()
+
+# Ensures we create files with UTF-8 encoding and a BOM. This is critical to force the powershell engine to read files
+# as UTF-8 and not as the system's codepage.
+$file_encoding = 'UTF8'
+
+try {
+ $scripts = [System.Collections.Generic.List`1[System.Object]]@($script:common_functions)
+
+ $coverage_whitelist = $Payload.coverage.whitelist.Split(":", [StringSplitOptions]::RemoveEmptyEntries)
+
+ # We need to track what utils have already been added to the script for loading. This is because the load
+ # order is important and can have module_utils that rely on other utils.
+ $loaded_utils = [System.Collections.Generic.HashSet`1[System.String]]@()
+ $parse_util = {
+ $util_name = $args[0]
+ if (-not $loaded_utils.Add($util_name)) {
+ return
+ }
+
+ $util_code = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Payload.powershell_modules.$util_name))
+ $util_sb = [ScriptBlock]::Create($util_code)
+ $util_path = Join-Path -Path $temp_path -ChildPath "$($util_name).psm1"
+
+ Write-AnsibleLog "INFO - Outputting module_util $util_name to temp file '$util_path'" "coverage_wrapper"
+ Set-Content -LiteralPath $util_path -Value $util_code -Encoding $file_encoding
+
+ $ansible_path = $Payload.coverage.module_util_paths.$util_name
+ if ((Compare-WhitelistPattern -Patterns $coverage_whitelist -Path $ansible_path)) {
+ $cov_params = @{
+ Path = $util_path
+ Code = $util_sb
+ AnsiblePath = $ansible_path
+ }
+ $breakpoints = New-CoverageBreakpoint @cov_params
+ $breakpoint_info.Add($breakpoints)
+ }
+
+ if ($null -ne $util_sb.Ast.ScriptRequirements) {
+ foreach ($required_util in $util_sb.Ast.ScriptRequirements.RequiredModules) {
+ &$parse_util $required_util.Name
+ }
+ }
+ Write-AnsibleLog "INFO - Adding util $util_name to scripts to run" "coverage_wrapper"
+ $scripts.Add("Import-Module -Name '$util_path'")
+ }
+ foreach ($util in $Payload.powershell_modules.Keys) {
+ &$parse_util $util
+ }
+
+ $module = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Payload.module_entry))
+ $module_path = Join-Path -Path $temp_path -ChildPath "$($module_name).ps1"
+ Write-AnsibleLog "INFO - Ouputting module $module_name to temp file '$module_path'" "coverage_wrapper"
+ Set-Content -LiteralPath $module_path -Value $module -Encoding $file_encoding
+ $scripts.Add($module_path)
+
+ $ansible_path = $Payload.coverage.module_path
+ if ((Compare-WhitelistPattern -Patterns $coverage_whitelist -Path $ansible_path)) {
+ $cov_params = @{
+ Path = $module_path
+ Code = [ScriptBlock]::Create($module)
+ AnsiblePath = $Payload.coverage.module_path
+ }
+ $breakpoints = New-CoverageBreakpoint @cov_params
+ $breakpoint_info.Add($breakpoints)
+ }
+
+ $variables = [System.Collections.ArrayList]@(@{ Name = "complex_args"; Value = $Payload.module_args; Scope = "Global" })
+ $entrypoint = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($payload.module_wrapper))
+ $entrypoint = [ScriptBlock]::Create($entrypoint)
+
+ $params = @{
+ Scripts = $scripts
+ Variables = $variables
+ Environment = $Payload.environment
+ ModuleName = $module_name
+ }
+ if ($breakpoint_info) {
+ $params.Breakpoints = $breakpoint_info.Breakpoints
+ }
+
+ try {
+ &$entrypoint @params
+ } finally {
+ # Processing here is kept to an absolute minimum to make sure each task runtime is kept as small as
+ # possible. Once all the tests have been run ansible-test will collect this info and process it locally in
+ # one go.
+ Write-AnsibleLog "INFO - Creating coverage result output" "coverage_wrapper"
+ $coverage_info = @{}
+ foreach ($info in $breakpoint_info) {
+ $coverage_info.($info.Path) = $info.Breakpoints | Select-Object -Property Line, HitCount
+ }
+
+ # The coverage.output value is a filename set by the Ansible controller. We append some more remote side
+ # info to the filename to make it unique and identify the remote host a bit more.
+ $ps_version = "$($PSVersionTable.PSVersion.Major).$($PSVersionTable.PSVersion.Minor)"
+ $coverage_output_path = "$($Payload.coverage.output)=powershell-$ps_version=coverage.$($env:COMPUTERNAME).$PID.$(Get-Random)"
+ $code_cov_json = ConvertTo-Json -InputObject $coverage_info -Compress
+
+ Write-AnsibleLog "INFO - Outputting coverage json to '$coverage_output_path'" "coverage_wrapper"
+ # Ansible controller expects these files to be UTF-8 without a BOM, use .NET for this.
+ $utf8_no_bom = New-Object -TypeName System.Text.UTF8Encoding -ArgumentList $false
+ [System.IO.File]::WriteAllbytes($coverage_output_path, $utf8_no_bom.GetBytes($code_cov_json))
+ }
+} finally {
+ try {
+ if ($breakpoint_info) {
+ foreach ($b in $breakpoint_info.Breakpoints) {
+ Remove-PSBreakpoint -Breakpoint $b
+ }
+ }
+ } finally {
+ Write-AnsibleLog "INFO - Remove temp coverage folder '$temp_path'" "coverage_wrapper"
+ Remove-Item -LiteralPath $temp_path -Force -Recurse
+ }
+}
+
+Write-AnsibleLog "INFO - ending coverage_wrapper" "coverage_wrapper"
diff --git a/lib/ansible/executor/powershell/exec_wrapper.ps1 b/lib/ansible/executor/powershell/exec_wrapper.ps1
new file mode 100644
index 00000000..59ce2e88
--- /dev/null
+++ b/lib/ansible/executor/powershell/exec_wrapper.ps1
@@ -0,0 +1,229 @@
+# (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+begin {
+ $DebugPreference = "Continue"
+ $ProgressPreference = "SilentlyContinue"
+ $ErrorActionPreference = "Stop"
+ Set-StrictMode -Version 2
+
+ # common functions that are loaded in exec and module context, this is set
+ # as a script scoped variable so async_watchdog and module_wrapper can
+ # access the functions when creating their Runspaces
+ $script:common_functions = {
+ Function ConvertFrom-AnsibleJson {
+ <#
+ .SYNOPSIS
+ Converts a JSON string to a Hashtable/Array in the fastest way
+ possible. Unfortunately ConvertFrom-Json is still faster but outputs
+ a PSCustomObject which is combersone for module consumption.
+
+ .PARAMETER InputObject
+ [String] The JSON string to deserialize.
+ #>
+ param(
+ [Parameter(Mandatory=$true, Position=0)][String]$InputObject
+ )
+
+ # we can use -AsHashtable to get PowerShell to convert the JSON to
+ # a Hashtable and not a PSCustomObject. This was added in PowerShell
+ # 6.0, fall back to a manual conversion for older versions
+ $cmdlet = Get-Command -Name ConvertFrom-Json -CommandType Cmdlet
+ if ("AsHashtable" -in $cmdlet.Parameters.Keys) {
+ return ,(ConvertFrom-Json -InputObject $InputObject -AsHashtable)
+ } else {
+ # get the PSCustomObject and then manually convert from there
+ $raw_obj = ConvertFrom-Json -InputObject $InputObject
+
+ Function ConvertTo-Hashtable {
+ param($InputObject)
+
+ if ($null -eq $InputObject) {
+ return $null
+ }
+
+ if ($InputObject -is [PSCustomObject]) {
+ $new_value = @{}
+ foreach ($prop in $InputObject.PSObject.Properties.GetEnumerator()) {
+ $new_value.($prop.Name) = (ConvertTo-Hashtable -InputObject $prop.Value)
+ }
+ return ,$new_value
+ } elseif ($InputObject -is [Array]) {
+ $new_value = [System.Collections.ArrayList]@()
+ foreach ($val in $InputObject) {
+ $new_value.Add((ConvertTo-Hashtable -InputObject $val)) > $null
+ }
+ return ,$new_value.ToArray()
+ } else {
+ return ,$InputObject
+ }
+ }
+ return ,(ConvertTo-Hashtable -InputObject $raw_obj)
+ }
+ }
+
+ Function Format-AnsibleException {
+ <#
+ .SYNOPSIS
+ Formats a PowerShell ErrorRecord to a string that's fit for human
+ consumption.
+
+ .NOTES
+ Using Out-String can give us the first part of the exception but it
+ also wraps the messages at 80 chars which is not ideal. We also
+ append the ScriptStackTrace and the .NET StackTrace if present.
+ #>
+ param([System.Management.Automation.ErrorRecord]$ErrorRecord)
+
+ $exception = @"
+$($ErrorRecord.ToString())
+$($ErrorRecord.InvocationInfo.PositionMessage)
+ + CategoryInfo : $($ErrorRecord.CategoryInfo.ToString())
+ + FullyQualifiedErrorId : $($ErrorRecord.FullyQualifiedErrorId.ToString())
+"@
+ # module_common strip comments and empty newlines, need to manually
+ # add a preceding newline using `r`n
+ $exception += "`r`n`r`nScriptStackTrace:`r`n$($ErrorRecord.ScriptStackTrace)`r`n"
+
+ # exceptions from C# will also have a StackTrace which we
+ # append if found
+ if ($null -ne $ErrorRecord.Exception.StackTrace) {
+ $exception += "`r`n$($ErrorRecord.Exception.ToString())"
+ }
+
+ return $exception
+ }
+ }
+ .$common_functions
+
+ # common wrapper functions used in the exec wrappers, this is defined in a
+ # script scoped variable so async_watchdog can pass them into the async job
+ $script:wrapper_functions = {
+ Function Write-AnsibleError {
+ <#
+ .SYNOPSIS
+ Writes an error message to a JSON string in the format that Ansible
+ understands. Also optionally adds an exception record if the
+ ErrorRecord is passed through.
+ #>
+ param(
+ [Parameter(Mandatory=$true)][String]$Message,
+ [System.Management.Automation.ErrorRecord]$ErrorRecord = $null
+ )
+ $result = @{
+ msg = $Message
+ failed = $true
+ }
+ if ($null -ne $ErrorRecord) {
+ $result.msg += ": $($ErrorRecord.Exception.Message)"
+ $result.exception = (Format-AnsibleException -ErrorRecord $ErrorRecord)
+ }
+ Write-Output -InputObject (ConvertTo-Json -InputObject $result -Depth 99 -Compress)
+ }
+
+ Function Write-AnsibleLog {
+ <#
+ .SYNOPSIS
+ Used as a debugging tool to log events to a file as they run in the
+ exec wrappers. By default this is a noop function but the $log_path
+ can be manually set to enable it. Manually set ANSIBLE_EXEC_DEBUG as
+ an env value on the Windows host that this is run on to enable.
+ #>
+ param(
+ [Parameter(Mandatory=$true, Position=0)][String]$Message,
+ [Parameter(Position=1)][String]$Wrapper
+ )
+
+ $log_path = $env:ANSIBLE_EXEC_DEBUG
+ if ($log_path) {
+ $log_path = [System.Environment]::ExpandEnvironmentVariables($log_path)
+ $parent_path = [System.IO.Path]::GetDirectoryName($log_path)
+ if (Test-Path -LiteralPath $parent_path -PathType Container) {
+ $msg = "{0:u} - {1} - {2} - " -f (Get-Date), $pid, ([System.Security.Principal.WindowsIdentity]::GetCurrent().Name)
+ if ($null -ne $Wrapper) {
+ $msg += "$Wrapper - "
+ }
+ $msg += $Message + "`r`n"
+ $msg_bytes = [System.Text.Encoding]::UTF8.GetBytes($msg)
+
+ $fs = [System.IO.File]::Open($log_path, [System.IO.FileMode]::Append,
+ [System.IO.FileAccess]::Write, [System.IO.FileShare]::ReadWrite)
+ try {
+ $fs.Write($msg_bytes, 0, $msg_bytes.Length)
+ } finally {
+ $fs.Close()
+ }
+ }
+ }
+ }
+ }
+ .$wrapper_functions
+
+ # only init and stream in $json_raw if it wasn't set by the enclosing scope
+ if (-not $(Get-Variable "json_raw" -ErrorAction SilentlyContinue)) {
+ $json_raw = ''
+ }
+} process {
+ $json_raw += [String]$input
+} end {
+ Write-AnsibleLog "INFO - starting exec_wrapper" "exec_wrapper"
+ if (-not $json_raw) {
+ Write-AnsibleError -Message "internal error: no input given to PowerShell exec wrapper"
+ exit 1
+ }
+
+ Write-AnsibleLog "INFO - converting json raw to a payload" "exec_wrapper"
+ $payload = ConvertFrom-AnsibleJson -InputObject $json_raw
+
+ # TODO: handle binary modules
+ # TODO: handle persistence
+
+ if ($payload.min_os_version) {
+ $min_os_version = [Version]$payload.min_os_version
+ # Environment.OSVersion.Version is deprecated and may not return the
+ # right version
+ $actual_os_version = [Version](Get-Item -Path $env:SystemRoot\System32\kernel32.dll).VersionInfo.ProductVersion
+
+ Write-AnsibleLog "INFO - checking if actual os version '$actual_os_version' is less than the min os version '$min_os_version'" "exec_wrapper"
+ if ($actual_os_version -lt $min_os_version) {
+ Write-AnsibleError -Message "internal error: This module cannot run on this OS as it requires a minimum version of $min_os_version, actual was $actual_os_version"
+ exit 1
+ }
+ }
+ if ($payload.min_ps_version) {
+ $min_ps_version = [Version]$payload.min_ps_version
+ $actual_ps_version = $PSVersionTable.PSVersion
+
+ Write-AnsibleLog "INFO - checking if actual PS version '$actual_ps_version' is less than the min PS version '$min_ps_version'" "exec_wrapper"
+ if ($actual_ps_version -lt $min_ps_version) {
+ Write-AnsibleError -Message "internal error: This module cannot run as it requires a minimum PowerShell version of $min_ps_version, actual was $actual_ps_version"
+ exit 1
+ }
+ }
+
+ # pop 0th action as entrypoint
+ $action = $payload.actions[0]
+ Write-AnsibleLog "INFO - running action $action" "exec_wrapper"
+
+ $entrypoint = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($payload.($action)))
+ $entrypoint = [ScriptBlock]::Create($entrypoint)
+ # so we preserve the formatting and don't fall prey to locale issues, some
+ # wrappers want the output to be in base64 form, we store the value here in
+ # case the wrapper changes the value when they create a payload for their
+ # own exec_wrapper
+ $encoded_output = $payload.encoded_output
+
+ try {
+ $output = &$entrypoint -Payload $payload
+ if ($encoded_output -and $null -ne $output) {
+ $b64_output = [System.Convert]::ToBase64String([System.Text.Encoding]::UTF8.GetBytes($output))
+ Write-Output -InputObject $b64_output
+ } else {
+ $output
+ }
+ } catch {
+ Write-AnsibleError -Message "internal error: failed to run exec_wrapper action $action" -ErrorRecord $_
+ exit 1
+ }
+ Write-AnsibleLog "INFO - ending exec_wrapper" "exec_wrapper"
+}
diff --git a/lib/ansible/executor/powershell/module_manifest.py b/lib/ansible/executor/powershell/module_manifest.py
new file mode 100644
index 00000000..83a1c3a7
--- /dev/null
+++ b/lib/ansible/executor/powershell/module_manifest.py
@@ -0,0 +1,389 @@
+# (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import base64
+import errno
+import json
+import os
+import pkgutil
+import random
+import re
+
+from distutils.version import LooseVersion
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.compat.importlib import import_module
+from ansible.plugins.loader import ps_module_utils_loader
+from ansible.utils.collection_loader import resource_from_fqcr
+
+
+class PSModuleDepFinder(object):
+
+ def __init__(self):
+ # This is also used by validate-modules to get a module's required utils in base and a collection.
+ self.ps_modules = dict()
+ self.exec_scripts = dict()
+
+ # by defining an explicit dict of cs utils and where they are used, we
+ # can potentially save time by not adding the type multiple times if it
+ # isn't needed
+ self.cs_utils_wrapper = dict()
+ self.cs_utils_module = dict()
+
+ self.ps_version = None
+ self.os_version = None
+ self.become = False
+
+ self._re_cs_module = [
+ # Reference C# module_util in another C# util, this must always be the fully qualified name.
+ # 'using ansible_collections.{namespace}.{collection}.plugins.module_utils.{name}'
+ re.compile(to_bytes(r'(?i)^using\s((Ansible\..+)|'
+ r'(ansible_collections\.\w+\.\w+\.plugins\.module_utils\.[\w\.]+));\s*$')),
+ ]
+
+ self._re_cs_in_ps_module = [
+ # Reference C# module_util in a PowerShell module
+ # '#AnsibleRequires -CSharpUtil Ansible.{name}'
+ # '#AnsibleRequires -CSharpUtil ansible_collections.{namespace}.{collection}.plugins.module_utils.{name}'
+ # '#AnsibleRequires -CSharpUtil ..module_utils.{name}'
+ re.compile(to_bytes(r'(?i)^#\s*ansiblerequires\s+-csharputil\s+((Ansible\..+)|'
+ r'(ansible_collections\.\w+\.\w+\.plugins\.module_utils\.[\w\.]+)|'
+ r'(\.[\w\.]+))')),
+ ]
+
+ self._re_ps_module = [
+ # Original way of referencing a builtin module_util
+ # '#Requires -Module Ansible.ModuleUtils.{name}
+ re.compile(to_bytes(r'(?i)^#\s*requires\s+\-module(?:s?)\s*(Ansible\.ModuleUtils\..+)')),
+ # New way of referencing a builtin and collection module_util
+ # '#AnsibleRequires -PowerShell Ansible.ModuleUtils.{name}'
+ # '#AnsibleRequires -PowerShell ansible_collections.{namespace}.{collection}.plugins.module_utils.{name}'
+ # '#AnsibleRequires -PowerShell ..module_utils.{name}'
+ re.compile(to_bytes(r'(?i)^#\s*ansiblerequires\s+-powershell\s+((Ansible\.ModuleUtils\..+)|'
+ r'(ansible_collections\.\w+\.\w+\.plugins\.module_utils\.[\w\.]+)|'
+ r'(\.[\w\.]+))')),
+ ]
+
+ self._re_wrapper = re.compile(to_bytes(r'(?i)^#\s*ansiblerequires\s+-wrapper\s+(\w*)'))
+ self._re_ps_version = re.compile(to_bytes(r'(?i)^#requires\s+\-version\s+([0-9]+(\.[0-9]+){0,3})$'))
+ self._re_os_version = re.compile(to_bytes(r'(?i)^#ansiblerequires\s+\-osversion\s+([0-9]+(\.[0-9]+){0,3})$'))
+ self._re_become = re.compile(to_bytes(r'(?i)^#ansiblerequires\s+\-become$'))
+
+ def scan_module(self, module_data, fqn=None, wrapper=False, powershell=True):
+ lines = module_data.split(b'\n')
+ module_utils = set()
+ if wrapper:
+ cs_utils = self.cs_utils_wrapper
+ else:
+ cs_utils = self.cs_utils_module
+
+ if powershell:
+ checks = [
+ # PS module contains '#Requires -Module Ansible.ModuleUtils.*'
+ # PS module contains '#AnsibleRequires -Powershell Ansible.*' (or collections module_utils ref)
+ (self._re_ps_module, self.ps_modules, ".psm1"),
+ # PS module contains '#AnsibleRequires -CSharpUtil Ansible.*' (or collections module_utils ref)
+ (self._re_cs_in_ps_module, cs_utils, ".cs"),
+ ]
+ else:
+ checks = [
+ # CS module contains 'using Ansible.*;' or 'using ansible_collections.ns.coll.plugins.module_utils.*;'
+ (self._re_cs_module, cs_utils, ".cs"),
+ ]
+
+ for line in lines:
+ for check in checks:
+ for pattern in check[0]:
+ match = pattern.match(line)
+ if match:
+ # tolerate windows line endings by stripping any remaining
+ # newline chars
+ module_util_name = to_text(match.group(1).rstrip())
+
+ if module_util_name not in check[1].keys():
+ module_utils.add((module_util_name, check[2], fqn))
+
+ break
+
+ if powershell:
+ ps_version_match = self._re_ps_version.match(line)
+ if ps_version_match:
+ self._parse_version_match(ps_version_match, "ps_version")
+
+ os_version_match = self._re_os_version.match(line)
+ if os_version_match:
+ self._parse_version_match(os_version_match, "os_version")
+
+ # once become is set, no need to keep on checking recursively
+ if not self.become:
+ become_match = self._re_become.match(line)
+ if become_match:
+ self.become = True
+
+ if wrapper:
+ wrapper_match = self._re_wrapper.match(line)
+ if wrapper_match:
+ self.scan_exec_script(wrapper_match.group(1).rstrip())
+
+ # recursively drill into each Requires to see if there are any more
+ # requirements
+ for m in set(module_utils):
+ self._add_module(m, wrapper=wrapper)
+
+ def scan_exec_script(self, name):
+ # scans lib/ansible/executor/powershell for scripts used in the module
+ # exec side. It also scans these scripts for any dependencies
+ name = to_text(name)
+ if name in self.exec_scripts.keys():
+ return
+
+ data = pkgutil.get_data("ansible.executor.powershell", to_native(name + ".ps1"))
+ if data is None:
+ raise AnsibleError("Could not find executor powershell script "
+ "for '%s'" % name)
+
+ b_data = to_bytes(data)
+
+ # remove comments to reduce the payload size in the exec wrappers
+ if C.DEFAULT_DEBUG:
+ exec_script = b_data
+ else:
+ exec_script = _strip_comments(b_data)
+ self.exec_scripts[name] = to_bytes(exec_script)
+ self.scan_module(b_data, wrapper=True, powershell=True)
+
+ def _add_module(self, name, wrapper=False):
+ m, ext, fqn = name
+ m = to_text(m)
+
+ util_fqn = None
+
+ if m.startswith("Ansible."):
+ # Builtin util, use plugin loader to get the data
+ mu_path = ps_module_utils_loader.find_plugin(m, ext)
+
+ if not mu_path:
+ raise AnsibleError('Could not find imported module support code '
+ 'for \'%s\'' % m)
+
+ module_util_data = to_bytes(_slurp(mu_path))
+ else:
+ # Collection util, load the package data based on the util import.
+
+ submodules = m.split(".")
+ if m.startswith('.'):
+ fqn_submodules = fqn.split('.')
+ for submodule in submodules:
+ if submodule:
+ break
+ del fqn_submodules[-1]
+
+ submodules = fqn_submodules + [s for s in submodules if s]
+
+ n_package_name = to_native('.'.join(submodules[:-1]), errors='surrogate_or_strict')
+ n_resource_name = to_native(submodules[-1] + ext, errors='surrogate_or_strict')
+
+ try:
+ module_util = import_module(n_package_name)
+ module_util_data = to_bytes(pkgutil.get_data(n_package_name, n_resource_name),
+ errors='surrogate_or_strict')
+ util_fqn = to_text("%s.%s " % (n_package_name, submodules[-1]), errors='surrogate_or_strict')
+
+ # Get the path of the util which is required for coverage collection.
+ resource_paths = list(module_util.__path__)
+ if len(resource_paths) != 1:
+ # This should never happen with a collection but we are just being defensive about it.
+ raise AnsibleError("Internal error: Referenced module_util package '%s' contains 0 or multiple "
+ "import locations when we only expect 1." % n_package_name)
+ mu_path = os.path.join(resource_paths[0], n_resource_name)
+ except OSError as err:
+ if err.errno == errno.ENOENT:
+ raise AnsibleError('Could not find collection imported module support code for \'%s\''
+ % to_native(m))
+ else:
+ raise
+
+ util_info = {
+ 'data': module_util_data,
+ 'path': to_text(mu_path),
+ }
+ if ext == ".psm1":
+ self.ps_modules[m] = util_info
+ else:
+ if wrapper:
+ self.cs_utils_wrapper[m] = util_info
+ else:
+ self.cs_utils_module[m] = util_info
+ self.scan_module(module_util_data, fqn=util_fqn, wrapper=wrapper, powershell=(ext == ".psm1"))
+
+ def _parse_version_match(self, match, attribute):
+ new_version = to_text(match.group(1)).rstrip()
+
+ # PowerShell cannot cast a string of "1" to Version, it must have at
+ # least the major.minor for it to be valid so we append 0
+ if match.group(2) is None:
+ new_version = "%s.0" % new_version
+
+ existing_version = getattr(self, attribute, None)
+ if existing_version is None:
+ setattr(self, attribute, new_version)
+ else:
+ # determine which is the latest version and set that
+ if LooseVersion(new_version) > LooseVersion(existing_version):
+ setattr(self, attribute, new_version)
+
+
+def _slurp(path):
+ if not os.path.exists(path):
+ raise AnsibleError("imported module support code does not exist at %s"
+ % os.path.abspath(path))
+ fd = open(path, 'rb')
+ data = fd.read()
+ fd.close()
+ return data
+
+
+def _strip_comments(source):
+ # Strip comments and blank lines from the wrapper
+ buf = []
+ start_block = False
+ for line in source.splitlines():
+ l = line.strip()
+
+ if start_block and l.endswith(b'#>'):
+ start_block = False
+ continue
+ elif start_block:
+ continue
+ elif l.startswith(b'<#'):
+ start_block = True
+ continue
+ elif not l or l.startswith(b'#'):
+ continue
+
+ buf.append(line)
+ return b'\n'.join(buf)
+
+
+def _create_powershell_wrapper(b_module_data, module_path, module_args,
+ environment, async_timeout, become,
+ become_method, become_user, become_password,
+ become_flags, substyle, task_vars, module_fqn):
+ # creates the manifest/wrapper used in PowerShell/C# modules to enable
+ # things like become and async - this is also called in action/script.py
+
+ # FUTURE: add process_wrapper.ps1 to run module_wrapper in a new process
+ # if running under a persistent connection and substyle is C# so we
+ # don't have type conflicts
+ finder = PSModuleDepFinder()
+ if substyle != 'script':
+ # don't scan the module for util dependencies and other Ansible related
+ # flags if the substyle is 'script' which is set by action/script
+ finder.scan_module(b_module_data, fqn=module_fqn, powershell=(substyle == "powershell"))
+
+ module_wrapper = "module_%s_wrapper" % substyle
+ exec_manifest = dict(
+ module_entry=to_text(base64.b64encode(b_module_data)),
+ powershell_modules=dict(),
+ csharp_utils=dict(),
+ csharp_utils_module=list(), # csharp_utils only required by a module
+ module_args=module_args,
+ actions=[module_wrapper],
+ environment=environment,
+ encoded_output=False,
+ )
+ finder.scan_exec_script(module_wrapper)
+
+ if async_timeout > 0:
+ finder.scan_exec_script('exec_wrapper')
+ finder.scan_exec_script('async_watchdog')
+ finder.scan_exec_script('async_wrapper')
+
+ exec_manifest["actions"].insert(0, 'async_watchdog')
+ exec_manifest["actions"].insert(0, 'async_wrapper')
+ exec_manifest["async_jid"] = str(random.randint(0, 999999999999))
+ exec_manifest["async_timeout_sec"] = async_timeout
+ exec_manifest["async_startup_timeout"] = C.config.get_config_value("WIN_ASYNC_STARTUP_TIMEOUT", variables=task_vars)
+
+ if become and resource_from_fqcr(become_method) == 'runas': # runas and namespace.collection.runas
+ finder.scan_exec_script('exec_wrapper')
+ finder.scan_exec_script('become_wrapper')
+
+ exec_manifest["actions"].insert(0, 'become_wrapper')
+ exec_manifest["become_user"] = become_user
+ exec_manifest["become_password"] = become_password
+ exec_manifest['become_flags'] = become_flags
+
+ exec_manifest['min_ps_version'] = finder.ps_version
+ exec_manifest['min_os_version'] = finder.os_version
+ if finder.become and 'become_wrapper' not in exec_manifest['actions']:
+ finder.scan_exec_script('exec_wrapper')
+ finder.scan_exec_script('become_wrapper')
+
+ exec_manifest['actions'].insert(0, 'become_wrapper')
+ exec_manifest['become_user'] = 'SYSTEM'
+ exec_manifest['become_password'] = None
+ exec_manifest['become_flags'] = None
+
+ coverage_manifest = dict(
+ module_path=module_path,
+ module_util_paths=dict(),
+ output=None,
+ )
+ coverage_output = C.config.get_config_value('COVERAGE_REMOTE_OUTPUT', variables=task_vars)
+ if coverage_output and substyle == 'powershell':
+ finder.scan_exec_script('coverage_wrapper')
+ coverage_manifest['output'] = coverage_output
+
+ coverage_whitelist = C.config.get_config_value('COVERAGE_REMOTE_WHITELIST', variables=task_vars)
+ coverage_manifest['whitelist'] = coverage_whitelist
+
+ # make sure Ansible.ModuleUtils.AddType is added if any C# utils are used
+ if len(finder.cs_utils_wrapper) > 0 or len(finder.cs_utils_module) > 0:
+ finder._add_module((b"Ansible.ModuleUtils.AddType", ".psm1", None),
+ wrapper=False)
+
+ # exec_wrapper is only required to be part of the payload if using
+ # become or async, to save on payload space we check if exec_wrapper has
+ # already been added, and remove it manually if it hasn't later
+ exec_required = "exec_wrapper" in finder.exec_scripts.keys()
+ finder.scan_exec_script("exec_wrapper")
+ # must contain an empty newline so it runs the begin/process/end block
+ finder.exec_scripts["exec_wrapper"] += b"\n\n"
+
+ exec_wrapper = finder.exec_scripts["exec_wrapper"]
+ if not exec_required:
+ finder.exec_scripts.pop("exec_wrapper")
+
+ for name, data in finder.exec_scripts.items():
+ b64_data = to_text(base64.b64encode(data))
+ exec_manifest[name] = b64_data
+
+ for name, data in finder.ps_modules.items():
+ b64_data = to_text(base64.b64encode(data['data']))
+ exec_manifest['powershell_modules'][name] = b64_data
+ coverage_manifest['module_util_paths'][name] = data['path']
+
+ cs_utils = {}
+ for cs_util in [finder.cs_utils_wrapper, finder.cs_utils_module]:
+ for name, data in cs_util.items():
+ cs_utils[name] = data['data']
+
+ for name, data in cs_utils.items():
+ b64_data = to_text(base64.b64encode(data))
+ exec_manifest['csharp_utils'][name] = b64_data
+ exec_manifest['csharp_utils_module'] = list(finder.cs_utils_module.keys())
+
+ # To save on the data we are sending across we only add the coverage info if coverage is being run
+ if 'coverage_wrapper' in exec_manifest:
+ exec_manifest['coverage'] = coverage_manifest
+
+ b_json = to_bytes(json.dumps(exec_manifest))
+ # delimit the payload JSON from the wrapper to keep sensitive contents out of scriptblocks (which can be logged)
+ b_data = exec_wrapper + b'\0\0\0\0' + b_json
+ return b_data
diff --git a/lib/ansible/executor/powershell/module_powershell_wrapper.ps1 b/lib/ansible/executor/powershell/module_powershell_wrapper.ps1
new file mode 100644
index 00000000..70069c02
--- /dev/null
+++ b/lib/ansible/executor/powershell/module_powershell_wrapper.ps1
@@ -0,0 +1,73 @@
+# (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+param(
+ [Parameter(Mandatory=$true)][System.Collections.IDictionary]$Payload
+)
+
+#AnsibleRequires -Wrapper module_wrapper
+
+$ErrorActionPreference = "Stop"
+
+Write-AnsibleLog "INFO - starting module_powershell_wrapper" "module_powershell_wrapper"
+
+$module_name = $Payload.module_args["_ansible_module_name"]
+Write-AnsibleLog "INFO - building module payload for '$module_name'" "module_powershell_wrapper"
+
+# compile any C# module utils passed in from the controller, Add-CSharpType is
+# automatically added to the payload manifest if any csharp util is set
+$csharp_utils = [System.Collections.ArrayList]@()
+foreach ($csharp_util in $Payload.csharp_utils_module) {
+ Write-AnsibleLog "INFO - adding $csharp_util to list of C# references to compile" "module_powershell_wrapper"
+ $util_code = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Payload.csharp_utils[$csharp_util]))
+ $csharp_utils.Add($util_code) > $null
+}
+if ($csharp_utils.Count -gt 0) {
+ $add_type_b64 = $Payload.powershell_modules["Ansible.ModuleUtils.AddType"]
+ $add_type = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($add_type_b64))
+ New-Module -Name Ansible.ModuleUtils.AddType -ScriptBlock ([ScriptBlock]::Create($add_type)) | Import-Module > $null
+
+ # add any C# references so the module does not have to do so
+ $new_tmp = [System.Environment]::ExpandEnvironmentVariables($Payload.module_args["_ansible_remote_tmp"])
+ Add-CSharpType -References $csharp_utils -TempPath $new_tmp -IncludeDebugInfo
+}
+
+if ($Payload.ContainsKey("coverage") -and $null -ne $host.Runspace -and $null -ne $host.Runspace.Debugger) {
+ $entrypoint = $payload.coverage_wrapper
+
+ $params = @{
+ Payload = $Payload
+ }
+} else {
+ # get the common module_wrapper code and invoke that to run the module
+ $module = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Payload.module_entry))
+ $variables = [System.Collections.ArrayList]@(@{ Name = "complex_args"; Value = $Payload.module_args; Scope = "Global" })
+ $entrypoint = $Payload.module_wrapper
+
+ $params = @{
+ Scripts = @($script:common_functions, $module)
+ Variables = $variables
+ Environment = $Payload.environment
+ Modules = $Payload.powershell_modules
+ ModuleName = $module_name
+ }
+}
+
+$entrypoint = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($entrypoint))
+$entrypoint = [ScriptBlock]::Create($entrypoint)
+
+try {
+ &$entrypoint @params
+} catch {
+ # failed to invoke the PowerShell module, capture the exception and
+ # output a pretty error for Ansible to parse
+ $result = @{
+ msg = "Failed to invoke PowerShell module: $($_.Exception.Message)"
+ failed = $true
+ exception = (Format-AnsibleException -ErrorRecord $_)
+ }
+ Write-Output -InputObject (ConvertTo-Json -InputObject $result -Depth 99 -Compress)
+ $host.SetShouldExit(1)
+}
+
+Write-AnsibleLog "INFO - ending module_powershell_wrapper" "module_powershell_wrapper"
diff --git a/lib/ansible/executor/powershell/module_script_wrapper.ps1 b/lib/ansible/executor/powershell/module_script_wrapper.ps1
new file mode 100644
index 00000000..7a2e4ba4
--- /dev/null
+++ b/lib/ansible/executor/powershell/module_script_wrapper.ps1
@@ -0,0 +1,22 @@
+# (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+param(
+ [Parameter(Mandatory=$true)][System.Collections.IDictionary]$Payload
+)
+
+#AnsibleRequires -Wrapper module_wrapper
+
+$ErrorActionPreference = "Stop"
+
+Write-AnsibleLog "INFO - starting module_script_wrapper" "module_script_wrapper"
+
+$script = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Payload.module_entry))
+
+# get the common module_wrapper code and invoke that to run the module
+$entrypoint = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($payload.module_wrapper))
+$entrypoint = [ScriptBlock]::Create($entrypoint)
+
+&$entrypoint -Scripts $script -Environment $Payload.environment -ModuleName "script"
+
+Write-AnsibleLog "INFO - ending module_script_wrapper" "module_script_wrapper"
diff --git a/lib/ansible/executor/powershell/module_wrapper.ps1 b/lib/ansible/executor/powershell/module_wrapper.ps1
new file mode 100644
index 00000000..68e425ff
--- /dev/null
+++ b/lib/ansible/executor/powershell/module_wrapper.ps1
@@ -0,0 +1,221 @@
+# (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+<#
+.SYNOPSIS
+Invokes an Ansible module in a new Runspace. This cmdlet will output the
+module's output and write any errors to the error stream of the current
+host.
+
+.PARAMETER Scripts
+[Object[]] String or ScriptBlocks to execute.
+
+.PARAMETER Variables
+[System.Collections.ArrayList] The variables to set in the new Pipeline.
+Each value is a hashtable that contains the parameters to use with
+Set-Variable;
+ Name: the name of the variable to set
+ Value: the value of the variable to set
+ Scope: the scope of the variable
+
+.PARAMETER Environment
+[System.Collections.IDictionary] A Dictionary of environment key/values to
+set in the new Pipeline.
+
+.PARAMETER Modules
+[System.Collections.IDictionary] A Dictionary of PowerShell modules to
+import into the new Pipeline. The key is the name of the module and the
+value is a base64 string of the module util code.
+
+.PARAMETER ModuleName
+[String] The name of the module that is being executed.
+
+.PARAMETER Breakpoints
+A list of line breakpoints to add to the runspace debugger. This is used to
+track module and module_utils coverage.
+#>
+param(
+ [Object[]]$Scripts,
+ [System.Collections.ArrayList][AllowEmptyCollection()]$Variables,
+ [System.Collections.IDictionary]$Environment,
+ [System.Collections.IDictionary]$Modules,
+ [String]$ModuleName,
+ [System.Management.Automation.LineBreakpoint[]]$Breakpoints = @()
+)
+
+Write-AnsibleLog "INFO - creating new PowerShell pipeline for $ModuleName" "module_wrapper"
+$ps = [PowerShell]::Create()
+
+# do not set ErrorActionPreference for script
+if ($ModuleName -ne "script") {
+ $ps.Runspace.SessionStateProxy.SetVariable("ErrorActionPreference", "Stop")
+}
+
+# force input encoding to preamble-free UTF8 so PS sub-processes (eg,
+# Start-Job) don't blow up. This is only required for WinRM, a PSRP
+# runspace doesn't have a host console and this will bomb out
+if ($host.Name -eq "ConsoleHost") {
+ Write-AnsibleLog "INFO - setting console input encoding to UTF8 for $ModuleName" "module_wrapper"
+ $ps.AddScript('[Console]::InputEncoding = New-Object Text.UTF8Encoding $false').AddStatement() > $null
+}
+
+# set the variables
+foreach ($variable in $Variables) {
+ Write-AnsibleLog "INFO - setting variable '$($variable.Name)' for $ModuleName" "module_wrapper"
+ $ps.AddCommand("Set-Variable").AddParameters($variable).AddStatement() > $null
+}
+
+# set the environment vars
+if ($Environment) {
+ # Escaping quotes can be problematic, instead just pass the string to the runspace and set it directly.
+ Write-AnsibleLog "INFO - setting environment vars for $ModuleName" "module_wrapper"
+ $ps.Runspace.SessionStateProxy.SetVariable("_AnsibleEnvironment", $Environment)
+ $ps.AddScript(@'
+foreach ($env_kv in $_AnsibleEnvironment.GetEnumerator()) {
+ [System.Environment]::SetEnvironmentVariable($env_kv.Key, $env_kv.Value)
+}
+'@).AddStatement() > $null
+}
+
+# import the PS modules
+if ($Modules) {
+ foreach ($module in $Modules.GetEnumerator()) {
+ Write-AnsibleLog "INFO - create module util '$($module.Key)' for $ModuleName" "module_wrapper"
+ $module_name = $module.Key
+ $module_code = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($module.Value))
+ $ps.AddCommand("New-Module").AddParameters(@{Name=$module_name; ScriptBlock=[ScriptBlock]::Create($module_code)}) > $null
+ $ps.AddCommand("Import-Module").AddParameter("WarningAction", "SilentlyContinue") > $null
+ $ps.AddCommand("Out-Null").AddStatement() > $null
+ }
+}
+
+# redefine Write-Host to dump to output instead of failing
+# lots of scripts still use it
+$ps.AddScript('Function Write-Host($msg) { Write-Output -InputObject $msg }').AddStatement() > $null
+
+# add the scripts and run
+foreach ($script in $Scripts) {
+ $ps.AddScript($script).AddStatement() > $null
+}
+
+if ($Breakpoints.Count -gt 0) {
+ Write-AnsibleLog "INFO - adding breakpoint to runspace that will run the modules" "module_wrapper"
+ if ($PSVersionTable.PSVersion.Major -eq 3) {
+ # The SetBreakpoints method was only added in PowerShell v4+. We need to rely on a private method to
+ # achieve the same functionality in this older PowerShell version. This should be removed once we drop
+ # support for PowerShell v3.
+ $set_method = $ps.Runspace.Debugger.GetType().GetMethod(
+ 'AddLineBreakpoint', [System.Reflection.BindingFlags]'Instance, NonPublic'
+ )
+ foreach ($b in $Breakpoints) {
+ $set_method.Invoke($ps.Runspace.Debugger, [Object[]]@(,$b)) > $null
+ }
+ } else {
+ $ps.Runspace.Debugger.SetBreakpoints($Breakpoints)
+ }
+}
+
+Write-AnsibleLog "INFO - start module exec with Invoke() - $ModuleName" "module_wrapper"
+
+# temporarily override the stdout stream and create our own in a StringBuilder
+# we use this to ensure there's always an Out pipe and that we capture the
+# output for things like async or psrp
+$orig_out = [System.Console]::Out
+$sb = New-Object -TypeName System.Text.StringBuilder
+$new_out = New-Object -TypeName System.IO.StringWriter -ArgumentList $sb
+try {
+ [System.Console]::SetOut($new_out)
+ $module_output = $ps.Invoke()
+} catch {
+ # uncaught exception while executing module, present a prettier error for
+ # Ansible to parse
+ $error_params = @{
+ Message = "Unhandled exception while executing module"
+ ErrorRecord = $_
+ }
+
+ # Be more defensive when trying to find the InnerException in case it isn't
+ # set. This shouldn't ever be the case but if it is then it makes it more
+ # difficult to track down the problem.
+ if ($_.Exception.PSObject.Properties.Name -contains "InnerException") {
+ $inner_exception = $_.Exception.InnerException
+ if ($inner_exception.PSObject.Properties.Name -contains "ErrorRecord") {
+ $error_params.ErrorRecord = $inner_exception.ErrorRecord
+ }
+ }
+
+ Write-AnsibleError @error_params
+ $host.SetShouldExit(1)
+ return
+} finally {
+ [System.Console]::SetOut($orig_out)
+ $new_out.Dispose()
+}
+
+# other types of errors may not throw an exception in Invoke but rather just
+# set the pipeline state to failed
+if ($ps.InvocationStateInfo.State -eq "Failed" -and $ModuleName -ne "script") {
+ $reason = $ps.InvocationStateInfo.Reason
+ $error_params = @{
+ Message = "Unhandled exception while executing module"
+ }
+
+ # The error record should always be set on the reason but this does not
+ # always happen on Server 2008 R2 for some reason (probably memory hotfix).
+ # Be defensive when trying to get the error record and fall back to other
+ # options.
+ if ($null -eq $reason) {
+ $error_params.Message += ": Unknown error"
+ } elseif ($reason.PSObject.Properties.Name -contains "ErrorRecord") {
+ $error_params.ErrorRecord = $reason.ErrorRecord
+ } else {
+ $error_params.Message += ": $($reason.ToString())"
+ }
+
+ Write-AnsibleError @error_params
+ $host.SetShouldExit(1)
+ return
+}
+
+Write-AnsibleLog "INFO - module exec ended $ModuleName" "module_wrapper"
+$stdout = $sb.ToString()
+if ($stdout) {
+ Write-Output -InputObject $stdout
+}
+if ($module_output.Count -gt 0) {
+ # do not output if empty collection
+ Write-AnsibleLog "INFO - using the output stream for module output - $ModuleName" "module_wrapper"
+ Write-Output -InputObject ($module_output -join "`r`n")
+}
+
+# we attempt to get the return code from the LASTEXITCODE variable
+# this is set explicitly in newer style variables when calling
+# ExitJson and FailJson. If set we set the current hosts' exit code
+# to that same value
+$rc = $ps.Runspace.SessionStateProxy.GetVariable("LASTEXITCODE")
+if ($null -ne $rc) {
+ Write-AnsibleLog "INFO - got an rc of $rc from $ModuleName exec" "module_wrapper"
+ $host.SetShouldExit($rc)
+}
+
+# PS3 doesn't properly set HadErrors in many cases, inspect the error stream as a fallback
+# with the trap handler that's now in place, this should only write to the output if
+# $ErrorActionPreference != "Stop", that's ok because this is sent to the stderr output
+# for a user to manually debug if something went horribly wrong
+if ($ps.HadErrors -or ($PSVersionTable.PSVersion.Major -lt 4 -and $ps.Streams.Error.Count -gt 0)) {
+ Write-AnsibleLog "WARN - module had errors, outputting error info $ModuleName" "module_wrapper"
+ # if the rc wasn't explicitly set, we return an exit code of 1
+ if ($null -eq $rc) {
+ $host.SetShouldExit(1)
+ }
+
+ # output each error to the error stream of the current pipeline
+ foreach ($err in $ps.Streams.Error) {
+ $error_msg = Format-AnsibleException -ErrorRecord $err
+
+ # need to use the current hosts's UI class as we may not have
+ # a console to write the stderr to, e.g. psrp
+ Write-AnsibleLog "WARN - error msg for for $($ModuleName):`r`n$error_msg" "module_wrapper"
+ $host.UI.WriteErrorLine($error_msg)
+ }
+}
diff --git a/lib/ansible/executor/process/__init__.py b/lib/ansible/executor/process/__init__.py
new file mode 100644
index 00000000..ae8ccff5
--- /dev/null
+++ b/lib/ansible/executor/process/__init__.py
@@ -0,0 +1,20 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
diff --git a/lib/ansible/executor/process/worker.py b/lib/ansible/executor/process/worker.py
new file mode 100644
index 00000000..0b18fc35
--- /dev/null
+++ b/lib/ansible/executor/process/worker.py
@@ -0,0 +1,223 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+import traceback
+
+from jinja2.exceptions import TemplateNotFound
+
+HAS_PYCRYPTO_ATFORK = False
+try:
+ from Crypto.Random import atfork
+ HAS_PYCRYPTO_ATFORK = True
+except Exception:
+ # We only need to call atfork if pycrypto is used because it will need to
+ # reinitialize its RNG. Since old paramiko could be using pycrypto, we
+ # need to take charge of calling it.
+ pass
+
+from ansible.errors import AnsibleConnectionFailure
+from ansible.executor.task_executor import TaskExecutor
+from ansible.executor.task_result import TaskResult
+from ansible.module_utils._text import to_text
+from ansible.utils.display import Display
+from ansible.utils.multiprocessing import context as multiprocessing_context
+
+__all__ = ['WorkerProcess']
+
+display = Display()
+
+
+class WorkerProcess(multiprocessing_context.Process):
+ '''
+ The worker thread class, which uses TaskExecutor to run tasks
+ read from a job queue and pushes results into a results queue
+ for reading later.
+ '''
+
+ def __init__(self, final_q, task_vars, host, task, play_context, loader, variable_manager, shared_loader_obj):
+
+ super(WorkerProcess, self).__init__()
+ # takes a task queue manager as the sole param:
+ self._final_q = final_q
+ self._task_vars = task_vars
+ self._host = host
+ self._task = task
+ self._play_context = play_context
+ self._loader = loader
+ self._variable_manager = variable_manager
+ self._shared_loader_obj = shared_loader_obj
+
+ # NOTE: this works due to fork, if switching to threads this should change to per thread storage of temp files
+ # clear var to ensure we only delete files for this child
+ self._loader._tempfiles = set()
+
+ def _save_stdin(self):
+ self._new_stdin = os.devnull
+ try:
+ if sys.stdin.isatty() and sys.stdin.fileno() is not None:
+ try:
+ self._new_stdin = os.fdopen(os.dup(sys.stdin.fileno()))
+ except OSError:
+ # couldn't dupe stdin, most likely because it's
+ # not a valid file descriptor, so we just rely on
+ # using the one that was passed in
+ pass
+ except (AttributeError, ValueError):
+ # couldn't get stdin's fileno, so we just carry on
+ pass
+
+ def start(self):
+ '''
+ multiprocessing.Process replaces the worker's stdin with a new file
+ opened on os.devnull, but we wish to preserve it if it is connected to
+ a terminal. Therefore dup a copy prior to calling the real start(),
+ ensuring the descriptor is preserved somewhere in the new child, and
+ make sure it is closed in the parent when start() completes.
+ '''
+
+ self._save_stdin()
+ try:
+ return super(WorkerProcess, self).start()
+ finally:
+ if self._new_stdin != os.devnull:
+ self._new_stdin.close()
+
+ def _hard_exit(self, e):
+ '''
+ There is no safe exception to return to higher level code that does not
+ risk an innocent try/except finding itself executing in the wrong
+ process. All code executing above WorkerProcess.run() on the stack
+ conceptually belongs to another program.
+ '''
+
+ try:
+ display.debug(u"WORKER HARD EXIT: %s" % to_text(e))
+ except BaseException:
+ # If the cause of the fault is IOError being generated by stdio,
+ # attempting to log a debug message may trigger another IOError.
+ # Try printing once then give up.
+ pass
+
+ os._exit(1)
+
+ def run(self):
+ '''
+ Wrap _run() to ensure no possibility an errant exception can cause
+ control to return to the StrategyBase task loop, or any other code
+ higher in the stack.
+
+ As multiprocessing in Python 2.x provides no protection, it is possible
+ a try/except added in far-away code can cause a crashed child process
+ to suddenly assume the role and prior state of its parent.
+ '''
+ try:
+ return self._run()
+ except BaseException as e:
+ self._hard_exit(e)
+
+ def _run(self):
+ '''
+ Called when the process is started. Pushes the result onto the
+ results queue. We also remove the host from the blocked hosts list, to
+ signify that they are ready for their next task.
+ '''
+
+ # import cProfile, pstats, StringIO
+ # pr = cProfile.Profile()
+ # pr.enable()
+
+ if HAS_PYCRYPTO_ATFORK:
+ atfork()
+
+ try:
+ # execute the task and build a TaskResult from the result
+ display.debug("running TaskExecutor() for %s/%s" % (self._host, self._task))
+ executor_result = TaskExecutor(
+ self._host,
+ self._task,
+ self._task_vars,
+ self._play_context,
+ self._new_stdin,
+ self._loader,
+ self._shared_loader_obj,
+ self._final_q
+ ).run()
+
+ display.debug("done running TaskExecutor() for %s/%s [%s]" % (self._host, self._task, self._task._uuid))
+ self._host.vars = dict()
+ self._host.groups = []
+ task_result = TaskResult(
+ self._host.name,
+ self._task._uuid,
+ executor_result,
+ task_fields=self._task.dump_attrs(),
+ )
+
+ # put the result on the result queue
+ display.debug("sending task result for task %s" % self._task._uuid)
+ self._final_q.put(task_result)
+ display.debug("done sending task result for task %s" % self._task._uuid)
+
+ except AnsibleConnectionFailure:
+ self._host.vars = dict()
+ self._host.groups = []
+ task_result = TaskResult(
+ self._host.name,
+ self._task._uuid,
+ dict(unreachable=True),
+ task_fields=self._task.dump_attrs(),
+ )
+ self._final_q.put(task_result, block=False)
+
+ except Exception as e:
+ if not isinstance(e, (IOError, EOFError, KeyboardInterrupt, SystemExit)) or isinstance(e, TemplateNotFound):
+ try:
+ self._host.vars = dict()
+ self._host.groups = []
+ task_result = TaskResult(
+ self._host.name,
+ self._task._uuid,
+ dict(failed=True, exception=to_text(traceback.format_exc()), stdout=''),
+ task_fields=self._task.dump_attrs(),
+ )
+ self._final_q.put(task_result, block=False)
+ except Exception:
+ display.debug(u"WORKER EXCEPTION: %s" % to_text(e))
+ display.debug(u"WORKER TRACEBACK: %s" % to_text(traceback.format_exc()))
+ finally:
+ self._clean_up()
+
+ display.debug("WORKER PROCESS EXITING")
+
+ # pr.disable()
+ # s = StringIO.StringIO()
+ # sortby = 'time'
+ # ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
+ # ps.print_stats()
+ # with open('worker_%06d.stats' % os.getpid(), 'w') as f:
+ # f.write(s.getvalue())
+
+ def _clean_up(self):
+ # NOTE: see note in init about forks
+ # ensure we cleanup all temp files for this worker
+ self._loader.cleanup_all_tmp_files()
diff --git a/lib/ansible/executor/stats.py b/lib/ansible/executor/stats.py
new file mode 100644
index 00000000..30ecc5e7
--- /dev/null
+++ b/lib/ansible/executor/stats.py
@@ -0,0 +1,99 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.common._collections_compat import MutableMapping
+from ansible.utils.vars import merge_hash
+
+
+class AggregateStats:
+ ''' holds stats about per-host activity during playbook runs '''
+
+ def __init__(self):
+
+ self.processed = {}
+ self.failures = {}
+ self.ok = {}
+ self.dark = {}
+ self.changed = {}
+ self.skipped = {}
+ self.rescued = {}
+ self.ignored = {}
+
+ # user defined stats, which can be per host or global
+ self.custom = {}
+
+ def increment(self, what, host):
+ ''' helper function to bump a statistic '''
+
+ self.processed[host] = 1
+ prev = (getattr(self, what)).get(host, 0)
+ getattr(self, what)[host] = prev + 1
+
+ def decrement(self, what, host):
+ _what = getattr(self, what)
+ try:
+ if _what[host] - 1 < 0:
+ # This should never happen, but let's be safe
+ raise KeyError("Don't be so negative")
+ _what[host] -= 1
+ except KeyError:
+ _what[host] = 0
+
+ def summarize(self, host):
+ ''' return information about a particular host '''
+
+ return dict(
+ ok=self.ok.get(host, 0),
+ failures=self.failures.get(host, 0),
+ unreachable=self.dark.get(host, 0),
+ changed=self.changed.get(host, 0),
+ skipped=self.skipped.get(host, 0),
+ rescued=self.rescued.get(host, 0),
+ ignored=self.ignored.get(host, 0),
+ )
+
+ def set_custom_stats(self, which, what, host=None):
+ ''' allow setting of a custom stat'''
+
+ if host is None:
+ host = '_run'
+ if host not in self.custom:
+ self.custom[host] = {which: what}
+ else:
+ self.custom[host][which] = what
+
+ def update_custom_stats(self, which, what, host=None):
+ ''' allow aggregation of a custom stat'''
+
+ if host is None:
+ host = '_run'
+ if host not in self.custom or which not in self.custom[host]:
+ return self.set_custom_stats(which, what, host)
+
+ # mismatching types
+ if not isinstance(what, type(self.custom[host][which])):
+ return None
+
+ if isinstance(what, MutableMapping):
+ self.custom[host][which] = merge_hash(self.custom[host][which], what)
+ else:
+ # let overloaded + take care of other types
+ self.custom[host][which] += what
diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py
new file mode 100644
index 00000000..ec1fc976
--- /dev/null
+++ b/lib/ansible/executor/task_executor.py
@@ -0,0 +1,1178 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+import pty
+import time
+import json
+import signal
+import subprocess
+import sys
+import termios
+import traceback
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleConnectionFailure, AnsibleActionFail, AnsibleActionSkip
+from ansible.executor.task_result import TaskResult
+from ansible.executor.module_common import get_action_args_with_defaults
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils.six import iteritems, string_types, binary_type
+from ansible.module_utils.six.moves import xrange
+from ansible.module_utils._text import to_text, to_native
+from ansible.module_utils.connection import write_to_file_descriptor
+from ansible.playbook.conditional import Conditional
+from ansible.playbook.task import Task
+from ansible.plugins.loader import become_loader, cliconf_loader, connection_loader, httpapi_loader, netconf_loader, terminal_loader
+from ansible.template import Templar
+from ansible.utils.collection_loader import AnsibleCollectionConfig
+from ansible.utils.listify import listify_lookup_plugin_terms
+from ansible.utils.unsafe_proxy import to_unsafe_text, wrap_var
+from ansible.vars.clean import namespace_facts, clean_facts
+from ansible.utils.display import Display
+from ansible.utils.vars import combine_vars, isidentifier
+
+display = Display()
+
+
+RETURN_VARS = [x for x in C.MAGIC_VARIABLE_MAPPING.items() if 'become' not in x and '_pass' not in x]
+
+__all__ = ['TaskExecutor']
+
+
+class TaskTimeoutError(BaseException):
+ pass
+
+
+def task_timeout(signum, frame):
+ raise TaskTimeoutError
+
+
+def remove_omit(task_args, omit_token):
+ '''
+ Remove args with a value equal to the ``omit_token`` recursively
+ to align with now having suboptions in the argument_spec
+ '''
+
+ if not isinstance(task_args, dict):
+ return task_args
+
+ new_args = {}
+ for i in iteritems(task_args):
+ if i[1] == omit_token:
+ continue
+ elif isinstance(i[1], dict):
+ new_args[i[0]] = remove_omit(i[1], omit_token)
+ elif isinstance(i[1], list):
+ new_args[i[0]] = [remove_omit(v, omit_token) for v in i[1]]
+ else:
+ new_args[i[0]] = i[1]
+
+ return new_args
+
+
+class TaskExecutor:
+
+ '''
+ This is the main worker class for the executor pipeline, which
+ handles loading an action plugin to actually dispatch the task to
+ a given host. This class roughly corresponds to the old Runner()
+ class.
+ '''
+
+ # Modules that we optimize by squashing loop items into a single call to
+ # the module
+ SQUASH_ACTIONS = frozenset(C.DEFAULT_SQUASH_ACTIONS)
+
+ def __init__(self, host, task, job_vars, play_context, new_stdin, loader, shared_loader_obj, final_q):
+ self._host = host
+ self._task = task
+ self._job_vars = job_vars
+ self._play_context = play_context
+ self._new_stdin = new_stdin
+ self._loader = loader
+ self._shared_loader_obj = shared_loader_obj
+ self._connection = None
+ self._final_q = final_q
+ self._loop_eval_error = None
+
+ self._task.squash()
+
+ def run(self):
+ '''
+ The main executor entrypoint, where we determine if the specified
+ task requires looping and either runs the task with self._run_loop()
+ or self._execute(). After that, the returned results are parsed and
+ returned as a dict.
+ '''
+
+ display.debug("in run() - task %s" % self._task._uuid)
+
+ try:
+ try:
+ items = self._get_loop_items()
+ except AnsibleUndefinedVariable as e:
+ # save the error raised here for use later
+ items = None
+ self._loop_eval_error = e
+
+ if items is not None:
+ if len(items) > 0:
+ item_results = self._run_loop(items)
+
+ # create the overall result item
+ res = dict(results=item_results)
+
+ # loop through the item results, and set the global changed/failed result flags based on any item.
+ for item in item_results:
+ if 'changed' in item and item['changed'] and not res.get('changed'):
+ res['changed'] = True
+ if 'failed' in item and item['failed']:
+ item_ignore = item.pop('_ansible_ignore_errors')
+ if not res.get('failed'):
+ res['failed'] = True
+ res['msg'] = 'One or more items failed'
+ self._task.ignore_errors = item_ignore
+ elif self._task.ignore_errors and not item_ignore:
+ self._task.ignore_errors = item_ignore
+
+ # ensure to accumulate these
+ for array in ['warnings', 'deprecations']:
+ if array in item and item[array]:
+ if array not in res:
+ res[array] = []
+ if not isinstance(item[array], list):
+ item[array] = [item[array]]
+ res[array] = res[array] + item[array]
+ del item[array]
+
+ if not res.get('Failed', False):
+ res['msg'] = 'All items completed'
+ else:
+ res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[])
+ else:
+ display.debug("calling self._execute()")
+ res = self._execute()
+ display.debug("_execute() done")
+
+ # make sure changed is set in the result, if it's not present
+ if 'changed' not in res:
+ res['changed'] = False
+
+ def _clean_res(res, errors='surrogate_or_strict'):
+ if isinstance(res, binary_type):
+ return to_unsafe_text(res, errors=errors)
+ elif isinstance(res, dict):
+ for k in res:
+ try:
+ res[k] = _clean_res(res[k], errors=errors)
+ except UnicodeError:
+ if k == 'diff':
+ # If this is a diff, substitute a replacement character if the value
+ # is undecodable as utf8. (Fix #21804)
+ display.warning("We were unable to decode all characters in the module return data."
+ " Replaced some in an effort to return as much as possible")
+ res[k] = _clean_res(res[k], errors='surrogate_then_replace')
+ else:
+ raise
+ elif isinstance(res, list):
+ for idx, item in enumerate(res):
+ res[idx] = _clean_res(item, errors=errors)
+ return res
+
+ display.debug("dumping result to json")
+ res = _clean_res(res)
+ display.debug("done dumping result, returning")
+ return res
+ except AnsibleError as e:
+ return dict(failed=True, msg=wrap_var(to_text(e, nonstring='simplerepr')), _ansible_no_log=self._play_context.no_log)
+ except Exception as e:
+ return dict(failed=True, msg='Unexpected failure during module execution.', exception=to_text(traceback.format_exc()),
+ stdout='', _ansible_no_log=self._play_context.no_log)
+ finally:
+ try:
+ self._connection.close()
+ except AttributeError:
+ pass
+ except Exception as e:
+ display.debug(u"error closing connection: %s" % to_text(e))
+
+ def _get_loop_items(self):
+ '''
+ Loads a lookup plugin to handle the with_* portion of a task (if specified),
+ and returns the items result.
+ '''
+
+ # get search path for this task to pass to lookup plugins
+ self._job_vars['ansible_search_path'] = self._task.get_search_path()
+
+ # ensure basedir is always in (dwim already searches here but we need to display it)
+ if self._loader.get_basedir() not in self._job_vars['ansible_search_path']:
+ self._job_vars['ansible_search_path'].append(self._loader.get_basedir())
+
+ templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=self._job_vars)
+ items = None
+ loop_cache = self._job_vars.get('_ansible_loop_cache')
+ if loop_cache is not None:
+ # _ansible_loop_cache may be set in `get_vars` when calculating `delegate_to`
+ # to avoid reprocessing the loop
+ items = loop_cache
+ elif self._task.loop_with:
+ if self._task.loop_with in self._shared_loader_obj.lookup_loader:
+ fail = True
+ if self._task.loop_with == 'first_found':
+ # first_found loops are special. If the item is undefined then we want to fall through to the next value rather than failing.
+ fail = False
+
+ loop_terms = listify_lookup_plugin_terms(terms=self._task.loop, templar=templar, loader=self._loader, fail_on_undefined=fail,
+ convert_bare=False)
+ if not fail:
+ loop_terms = [t for t in loop_terms if not templar.is_template(t)]
+
+ # get lookup
+ mylookup = self._shared_loader_obj.lookup_loader.get(self._task.loop_with, loader=self._loader, templar=templar)
+
+ # give lookup task 'context' for subdir (mostly needed for first_found)
+ for subdir in ['template', 'var', 'file']: # TODO: move this to constants?
+ if subdir in self._task.action:
+ break
+ setattr(mylookup, '_subdir', subdir + 's')
+
+ # run lookup
+ items = wrap_var(mylookup.run(terms=loop_terms, variables=self._job_vars, wantlist=True))
+ else:
+ raise AnsibleError("Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % self._task.loop_with)
+
+ elif self._task.loop is not None:
+ items = templar.template(self._task.loop)
+ if not isinstance(items, list):
+ raise AnsibleError(
+ "Invalid data passed to 'loop', it requires a list, got this instead: %s."
+ " Hint: If you passed a list/dict of just one element,"
+ " try adding wantlist=True to your lookup invocation or use q/query instead of lookup." % items
+ )
+
+ return items
+
+ def _run_loop(self, items):
+ '''
+ Runs the task with the loop items specified and collates the result
+ into an array named 'results' which is inserted into the final result
+ along with the item for which the loop ran.
+ '''
+
+ results = []
+
+ # make copies of the job vars and task so we can add the item to
+ # the variables and re-validate the task with the item variable
+ # task_vars = self._job_vars.copy()
+ task_vars = self._job_vars
+
+ loop_var = 'item'
+ index_var = None
+ label = None
+ loop_pause = 0
+ extended = False
+ templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=self._job_vars)
+
+ # FIXME: move this to the object itself to allow post_validate to take care of templating (loop_control.post_validate)
+ if self._task.loop_control:
+ loop_var = templar.template(self._task.loop_control.loop_var)
+ index_var = templar.template(self._task.loop_control.index_var)
+ loop_pause = templar.template(self._task.loop_control.pause)
+ extended = templar.template(self._task.loop_control.extended)
+
+ # This may be 'None',so it is templated below after we ensure a value and an item is assigned
+ label = self._task.loop_control.label
+
+ # ensure we always have a label
+ if label is None:
+ label = '{{' + loop_var + '}}'
+
+ if loop_var in task_vars:
+ display.warning(u"The loop variable '%s' is already in use. "
+ u"You should set the `loop_var` value in the `loop_control` option for the task"
+ u" to something else to avoid variable collisions and unexpected behavior." % loop_var)
+
+ ran_once = False
+ if self._task.loop_with:
+ # Only squash with 'with_:' not with the 'loop:', 'magic' squashing can be removed once with_ loops are
+ items = self._squash_items(items, loop_var, task_vars)
+
+ no_log = False
+ items_len = len(items)
+ for item_index, item in enumerate(items):
+ task_vars['ansible_loop_var'] = loop_var
+
+ task_vars[loop_var] = item
+ if index_var:
+ task_vars['ansible_index_var'] = index_var
+ task_vars[index_var] = item_index
+
+ if extended:
+ task_vars['ansible_loop'] = {
+ 'allitems': items,
+ 'index': item_index + 1,
+ 'index0': item_index,
+ 'first': item_index == 0,
+ 'last': item_index + 1 == items_len,
+ 'length': items_len,
+ 'revindex': items_len - item_index,
+ 'revindex0': items_len - item_index - 1,
+ }
+ try:
+ task_vars['ansible_loop']['nextitem'] = items[item_index + 1]
+ except IndexError:
+ pass
+ if item_index - 1 >= 0:
+ task_vars['ansible_loop']['previtem'] = items[item_index - 1]
+
+ # Update template vars to reflect current loop iteration
+ templar.available_variables = task_vars
+
+ # pause between loop iterations
+ if loop_pause and ran_once:
+ try:
+ time.sleep(float(loop_pause))
+ except ValueError as e:
+ raise AnsibleError('Invalid pause value: %s, produced error: %s' % (loop_pause, to_native(e)))
+ else:
+ ran_once = True
+
+ try:
+ tmp_task = self._task.copy(exclude_parent=True, exclude_tasks=True)
+ tmp_task._parent = self._task._parent
+ tmp_play_context = self._play_context.copy()
+ except AnsibleParserError as e:
+ results.append(dict(failed=True, msg=to_text(e)))
+ continue
+
+ # now we swap the internal task and play context with their copies,
+ # execute, and swap them back so we can do the next iteration cleanly
+ (self._task, tmp_task) = (tmp_task, self._task)
+ (self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)
+ res = self._execute(variables=task_vars)
+ task_fields = self._task.dump_attrs()
+ (self._task, tmp_task) = (tmp_task, self._task)
+ (self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)
+
+ # update 'general no_log' based on specific no_log
+ no_log = no_log or tmp_task.no_log
+
+ # now update the result with the item info, and append the result
+ # to the list of results
+ res[loop_var] = item
+ res['ansible_loop_var'] = loop_var
+ if index_var:
+ res[index_var] = item_index
+ res['ansible_index_var'] = index_var
+ if extended:
+ res['ansible_loop'] = task_vars['ansible_loop']
+
+ res['_ansible_item_result'] = True
+ res['_ansible_ignore_errors'] = task_fields.get('ignore_errors')
+
+ # gets templated here unlike rest of loop_control fields, depends on loop_var above
+ try:
+ res['_ansible_item_label'] = templar.template(label, cache=False)
+ except AnsibleUndefinedVariable as e:
+ res.update({
+ 'failed': True,
+ 'msg': 'Failed to template loop_control.label: %s' % to_text(e)
+ })
+
+ self._final_q.put(
+ TaskResult(
+ self._host.name,
+ self._task._uuid,
+ res,
+ task_fields=task_fields,
+ ),
+ block=False,
+ )
+ results.append(res)
+ del task_vars[loop_var]
+
+ # clear 'connection related' plugin variables for next iteration
+ if self._connection:
+ clear_plugins = {
+ 'connection': self._connection._load_name,
+ 'shell': self._connection._shell._load_name
+ }
+ if self._connection.become:
+ clear_plugins['become'] = self._connection.become._load_name
+
+ for plugin_type, plugin_name in iteritems(clear_plugins):
+ for var in C.config.get_plugin_vars(plugin_type, plugin_name):
+ if var in task_vars and var not in self._job_vars:
+ del task_vars[var]
+
+ self._task.no_log = no_log
+
+ return results
+
+ def _squash_items(self, items, loop_var, variables):
+ '''
+ Squash items down to a comma-separated list for certain modules which support it
+ (typically package management modules).
+ '''
+ name = None
+ try:
+ # _task.action could contain templatable strings (via action: and
+ # local_action:) Template it before comparing. If we don't end up
+ # optimizing it here, the templatable string might use template vars
+ # that aren't available until later (it could even use vars from the
+ # with_items loop) so don't make the templated string permanent yet.
+ templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables)
+ task_action = self._task.action
+ if templar.is_template(task_action):
+ task_action = templar.template(task_action, fail_on_undefined=False)
+
+ if len(items) > 0 and task_action in self.SQUASH_ACTIONS:
+ if all(isinstance(o, string_types) for o in items):
+ final_items = []
+
+ found = None
+ for allowed in ['name', 'pkg', 'package']:
+ name = self._task.args.pop(allowed, None)
+ if name is not None:
+ found = allowed
+ break
+
+ # This gets the information to check whether the name field
+ # contains a template that we can squash for
+ template_no_item = template_with_item = None
+ if name:
+ if templar.is_template(name):
+ variables[loop_var] = '\0$'
+ template_no_item = templar.template(name, variables, cache=False)
+ variables[loop_var] = '\0@'
+ template_with_item = templar.template(name, variables, cache=False)
+ del variables[loop_var]
+
+ # Check if the user is doing some operation that doesn't take
+ # name/pkg or the name/pkg field doesn't have any variables
+ # and thus the items can't be squashed
+ if template_no_item != template_with_item:
+ if self._task.loop_with and self._task.loop_with not in ('items', 'list'):
+ value_text = "\"{{ query('%s', %r) }}\"" % (self._task.loop_with, self._task.loop)
+ else:
+ value_text = '%r' % self._task.loop
+ # Without knowing the data structure well, it's easiest to strip python2 unicode
+ # literals after stringifying
+ value_text = re.sub(r"\bu'", "'", value_text)
+
+ display.deprecated(
+ 'Invoking "%s" only once while using a loop via squash_actions is deprecated. '
+ 'Instead of using a loop to supply multiple items and specifying `%s: "%s"`, '
+ 'please use `%s: %s` and remove the loop' % (self._task.action, found, name, found, value_text),
+ version='2.11', collection_name='ansible.builtin'
+ )
+ for item in items:
+ variables[loop_var] = item
+ if self._task.evaluate_conditional(templar, variables):
+ new_item = templar.template(name, cache=False)
+ final_items.append(new_item)
+ self._task.args['name'] = final_items
+ # Wrap this in a list so that the calling function loop
+ # executes exactly once
+ return [final_items]
+ else:
+ # Restore the name parameter
+ self._task.args['name'] = name
+ # elif:
+ # Right now we only optimize single entries. In the future we
+ # could optimize more types:
+ # * lists can be squashed together
+ # * dicts could squash entries that match in all cases except the
+ # name or pkg field.
+ except Exception:
+ # Squashing is an optimization. If it fails for any reason,
+ # simply use the unoptimized list of items.
+
+ # Restore the name parameter
+ if name is not None:
+ self._task.args['name'] = name
+ return items
+
+ def _execute(self, variables=None):
+ '''
+ The primary workhorse of the executor system, this runs the task
+ on the specified host (which may be the delegated_to host) and handles
+ the retry/until and block rescue/always execution
+ '''
+
+ if variables is None:
+ variables = self._job_vars
+
+ templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables)
+
+ context_validation_error = None
+ try:
+ # TODO: remove play_context as this does not take delegation into account, task itself should hold values
+ # for connection/shell/become/terminal plugin options to finalize.
+ # Kept for now for backwards compatibility and a few functions that are still exclusive to it.
+
+ # apply the given task's information to the connection info,
+ # which may override some fields already set by the play or
+ # the options specified on the command line
+ self._play_context = self._play_context.set_task_and_variable_override(task=self._task, variables=variables, templar=templar)
+
+ # fields set from the play/task may be based on variables, so we have to
+ # do the same kind of post validation step on it here before we use it.
+ self._play_context.post_validate(templar=templar)
+
+ # now that the play context is finalized, if the remote_addr is not set
+ # default to using the host's address field as the remote address
+ if not self._play_context.remote_addr:
+ self._play_context.remote_addr = self._host.address
+
+ # We also add "magic" variables back into the variables dict to make sure
+ # a certain subset of variables exist.
+ self._play_context.update_vars(variables)
+
+ except AnsibleError as e:
+ # save the error, which we'll raise later if we don't end up
+ # skipping this task during the conditional evaluation step
+ context_validation_error = e
+
+ # Evaluate the conditional (if any) for this task, which we do before running
+ # the final task post-validation. We do this before the post validation due to
+ # the fact that the conditional may specify that the task be skipped due to a
+ # variable not being present which would otherwise cause validation to fail
+ try:
+ if not self._task.evaluate_conditional(templar, variables):
+ display.debug("when evaluation is False, skipping this task")
+ return dict(changed=False, skipped=True, skip_reason='Conditional result was False', _ansible_no_log=self._play_context.no_log)
+ except AnsibleError as e:
+ # loop error takes precedence
+ if self._loop_eval_error is not None:
+ # Display the error from the conditional as well to prevent
+ # losing information useful for debugging.
+ display.v(to_text(e))
+ raise self._loop_eval_error # pylint: disable=raising-bad-type
+ raise
+
+ # Not skipping, if we had loop error raised earlier we need to raise it now to halt the execution of this task
+ if self._loop_eval_error is not None:
+ raise self._loop_eval_error # pylint: disable=raising-bad-type
+
+ # if we ran into an error while setting up the PlayContext, raise it now
+ if context_validation_error is not None:
+ raise context_validation_error # pylint: disable=raising-bad-type
+
+ # if this task is a TaskInclude, we just return now with a success code so the
+ # main thread can expand the task list for the given host
+ if self._task.action in C._ACTION_ALL_INCLUDE_TASKS:
+ include_args = self._task.args.copy()
+ include_file = include_args.pop('_raw_params', None)
+ if not include_file:
+ return dict(failed=True, msg="No include file was specified to the include")
+
+ include_file = templar.template(include_file)
+ return dict(include=include_file, include_args=include_args)
+
+ # if this task is a IncludeRole, we just return now with a success code so the main thread can expand the task list for the given host
+ elif self._task.action in C._ACTION_INCLUDE_ROLE:
+ include_args = self._task.args.copy()
+ return dict(include_args=include_args)
+
+ # Now we do final validation on the task, which sets all fields to their final values.
+ try:
+ self._task.post_validate(templar=templar)
+ except AnsibleError:
+ raise
+ except Exception:
+ return dict(changed=False, failed=True, _ansible_no_log=self._play_context.no_log, exception=to_text(traceback.format_exc()))
+ if '_variable_params' in self._task.args:
+ variable_params = self._task.args.pop('_variable_params')
+ if isinstance(variable_params, dict):
+ if C.INJECT_FACTS_AS_VARS:
+ display.warning("Using a variable for a task's 'args' is unsafe in some situations "
+ "(see https://docs.ansible.com/ansible/devel/reference_appendices/faq.html#argsplat-unsafe)")
+ variable_params.update(self._task.args)
+ self._task.args = variable_params
+
+ if self._task.delegate_to:
+ # use vars from delegated host (which already include task vars) instead of original host
+ cvars = variables.get('ansible_delegated_vars', {}).get(self._task.delegate_to, {})
+ orig_vars = templar.available_variables
+ else:
+ # just use normal host vars
+ cvars = orig_vars = variables
+
+ templar.available_variables = cvars
+
+ # get the connection and the handler for this execution
+ if (not self._connection or
+ not getattr(self._connection, 'connected', False) or
+ self._play_context.remote_addr != self._connection._play_context.remote_addr):
+ self._connection = self._get_connection(cvars, templar)
+ else:
+ # if connection is reused, its _play_context is no longer valid and needs
+ # to be replaced with the one templated above, in case other data changed
+ self._connection._play_context = self._play_context
+
+ plugin_vars = self._set_connection_options(cvars, templar)
+ templar.available_variables = orig_vars
+
+ # get handler
+ self._handler = self._get_action_handler(connection=self._connection, templar=templar)
+
+ # Apply default params for action/module, if present
+ self._task.args = get_action_args_with_defaults(
+ self._task.action, self._task.args, self._task.module_defaults, templar, self._task._ansible_internal_redirect_list
+ )
+
+ # And filter out any fields which were set to default(omit), and got the omit token value
+ omit_token = variables.get('omit')
+ if omit_token is not None:
+ self._task.args = remove_omit(self._task.args, omit_token)
+
+ # Read some values from the task, so that we can modify them if need be
+ if self._task.until:
+ retries = self._task.retries
+ if retries is None:
+ retries = 3
+ elif retries <= 0:
+ retries = 1
+ else:
+ retries += 1
+ else:
+ retries = 1
+
+ delay = self._task.delay
+ if delay < 0:
+ delay = 1
+
+ # make a copy of the job vars here, in case we need to update them
+ # with the registered variable value later on when testing conditions
+ vars_copy = variables.copy()
+
+ display.debug("starting attempt loop")
+ result = None
+ for attempt in xrange(1, retries + 1):
+ display.debug("running the handler")
+ try:
+ if self._task.timeout:
+ old_sig = signal.signal(signal.SIGALRM, task_timeout)
+ signal.alarm(self._task.timeout)
+ result = self._handler.run(task_vars=variables)
+ except AnsibleActionSkip as e:
+ return dict(skipped=True, msg=to_text(e))
+ except AnsibleActionFail as e:
+ return dict(failed=True, msg=to_text(e))
+ except AnsibleConnectionFailure as e:
+ return dict(unreachable=True, msg=to_text(e))
+ except TaskTimeoutError as e:
+ msg = 'The %s action failed to execute in the expected time frame (%d) and was terminated' % (self._task.action, self._task.timeout)
+ return dict(failed=True, msg=msg)
+ finally:
+ if self._task.timeout:
+ signal.alarm(0)
+ old_sig = signal.signal(signal.SIGALRM, old_sig)
+ self._handler.cleanup()
+ display.debug("handler run complete")
+
+ # preserve no log
+ result["_ansible_no_log"] = self._play_context.no_log
+
+ # update the local copy of vars with the registered value, if specified,
+ # or any facts which may have been generated by the module execution
+ if self._task.register:
+ if not isidentifier(self._task.register):
+ raise AnsibleError("Invalid variable name in 'register' specified: '%s'" % self._task.register)
+
+ vars_copy[self._task.register] = result = wrap_var(result)
+
+ if self._task.async_val > 0:
+ if self._task.poll > 0 and not result.get('skipped') and not result.get('failed'):
+ result = self._poll_async_result(result=result, templar=templar, task_vars=vars_copy)
+ # FIXME callback 'v2_runner_on_async_poll' here
+
+ # ensure no log is preserved
+ result["_ansible_no_log"] = self._play_context.no_log
+
+ # helper methods for use below in evaluating changed/failed_when
+ def _evaluate_changed_when_result(result):
+ if self._task.changed_when is not None and self._task.changed_when:
+ cond = Conditional(loader=self._loader)
+ cond.when = self._task.changed_when
+ result['changed'] = cond.evaluate_conditional(templar, vars_copy)
+
+ def _evaluate_failed_when_result(result):
+ if self._task.failed_when:
+ cond = Conditional(loader=self._loader)
+ cond.when = self._task.failed_when
+ failed_when_result = cond.evaluate_conditional(templar, vars_copy)
+ result['failed_when_result'] = result['failed'] = failed_when_result
+ else:
+ failed_when_result = False
+ return failed_when_result
+
+ if 'ansible_facts' in result:
+ if self._task.action in C._ACTION_WITH_CLEAN_FACTS:
+ vars_copy.update(result['ansible_facts'])
+ else:
+ # TODO: cleaning of facts should eventually become part of taskresults instead of vars
+ af = wrap_var(result['ansible_facts'])
+ vars_copy['ansible_facts'] = combine_vars(vars_copy.get('ansible_facts', {}), namespace_facts(af))
+ if C.INJECT_FACTS_AS_VARS:
+ vars_copy.update(clean_facts(af))
+
+ # set the failed property if it was missing.
+ if 'failed' not in result:
+ # rc is here for backwards compatibility and modules that use it instead of 'failed'
+ if 'rc' in result and result['rc'] not in [0, "0"]:
+ result['failed'] = True
+ else:
+ result['failed'] = False
+
+ # Make attempts and retries available early to allow their use in changed/failed_when
+ if self._task.until:
+ result['attempts'] = attempt
+
+ # set the changed property if it was missing.
+ if 'changed' not in result:
+ result['changed'] = False
+
+ # re-update the local copy of vars with the registered value, if specified,
+ # or any facts which may have been generated by the module execution
+ # This gives changed/failed_when access to additional recently modified
+ # attributes of result
+ if self._task.register:
+ vars_copy[self._task.register] = result = wrap_var(result)
+
+ # if we didn't skip this task, use the helpers to evaluate the changed/
+ # failed_when properties
+ if 'skipped' not in result:
+ _evaluate_changed_when_result(result)
+ _evaluate_failed_when_result(result)
+
+ if retries > 1:
+ cond = Conditional(loader=self._loader)
+ cond.when = self._task.until
+ if cond.evaluate_conditional(templar, vars_copy):
+ break
+ else:
+ # no conditional check, or it failed, so sleep for the specified time
+ if attempt < retries:
+ result['_ansible_retry'] = True
+ result['retries'] = retries
+ display.debug('Retrying task, attempt %d of %d' % (attempt, retries))
+ self._final_q.put(TaskResult(self._host.name, self._task._uuid, result, task_fields=self._task.dump_attrs()), block=False)
+ time.sleep(delay)
+ self._handler = self._get_action_handler(connection=self._connection, templar=templar)
+ else:
+ if retries > 1:
+ # we ran out of attempts, so mark the result as failed
+ result['attempts'] = retries - 1
+ result['failed'] = True
+
+ # do the final update of the local variables here, for both registered
+ # values and any facts which may have been created
+ if self._task.register:
+ variables[self._task.register] = result = wrap_var(result)
+
+ if 'ansible_facts' in result:
+ if self._task.action in C._ACTION_WITH_CLEAN_FACTS:
+ variables.update(result['ansible_facts'])
+ else:
+ # TODO: cleaning of facts should eventually become part of taskresults instead of vars
+ af = wrap_var(result['ansible_facts'])
+ variables['ansible_facts'] = combine_vars(variables.get('ansible_facts', {}), namespace_facts(af))
+ if C.INJECT_FACTS_AS_VARS:
+ variables.update(clean_facts(af))
+
+ # save the notification target in the result, if it was specified, as
+ # this task may be running in a loop in which case the notification
+ # may be item-specific, ie. "notify: service {{item}}"
+ if self._task.notify is not None:
+ result['_ansible_notify'] = self._task.notify
+
+ # add the delegated vars to the result, so we can reference them
+ # on the results side without having to do any further templating
+ # also now add conneciton vars results when delegating
+ if self._task.delegate_to:
+ result["_ansible_delegated_vars"] = {'ansible_delegated_host': self._task.delegate_to}
+ for k in plugin_vars:
+ result["_ansible_delegated_vars"][k] = cvars.get(k)
+
+ # and return
+ display.debug("attempt loop complete, returning result")
+ return result
+
+ def _poll_async_result(self, result, templar, task_vars=None):
+ '''
+ Polls for the specified JID to be complete
+ '''
+
+ if task_vars is None:
+ task_vars = self._job_vars
+
+ async_jid = result.get('ansible_job_id')
+ if async_jid is None:
+ return dict(failed=True, msg="No job id was returned by the async task")
+
+ # Create a new pseudo-task to run the async_status module, and run
+ # that (with a sleep for "poll" seconds between each retry) until the
+ # async time limit is exceeded.
+
+ async_task = Task().load(dict(action='async_status jid=%s' % async_jid, environment=self._task.environment))
+
+ # FIXME: this is no longer the case, normal takes care of all, see if this can just be generalized
+ # Because this is an async task, the action handler is async. However,
+ # we need the 'normal' action handler for the status check, so get it
+ # now via the action_loader
+ async_handler = self._shared_loader_obj.action_loader.get(
+ 'ansible.legacy.async_status',
+ task=async_task,
+ connection=self._connection,
+ play_context=self._play_context,
+ loader=self._loader,
+ templar=templar,
+ shared_loader_obj=self._shared_loader_obj,
+ )
+
+ time_left = self._task.async_val
+ while time_left > 0:
+ time.sleep(self._task.poll)
+
+ try:
+ async_result = async_handler.run(task_vars=task_vars)
+ # We do not bail out of the loop in cases where the failure
+ # is associated with a parsing error. The async_runner can
+ # have issues which result in a half-written/unparseable result
+ # file on disk, which manifests to the user as a timeout happening
+ # before it's time to timeout.
+ if (int(async_result.get('finished', 0)) == 1 or
+ ('failed' in async_result and async_result.get('_ansible_parsed', False)) or
+ 'skipped' in async_result):
+ break
+ except Exception as e:
+ # Connections can raise exceptions during polling (eg, network bounce, reboot); these should be non-fatal.
+ # On an exception, call the connection's reset method if it has one
+ # (eg, drop/recreate WinRM connection; some reused connections are in a broken state)
+ display.vvvv("Exception during async poll, retrying... (%s)" % to_text(e))
+ display.debug("Async poll exception was:\n%s" % to_text(traceback.format_exc()))
+ try:
+ async_handler._connection.reset()
+ except AttributeError:
+ pass
+
+ # Little hack to raise the exception if we've exhausted the timeout period
+ time_left -= self._task.poll
+ if time_left <= 0:
+ raise
+ else:
+ time_left -= self._task.poll
+
+ if int(async_result.get('finished', 0)) != 1:
+ if async_result.get('_ansible_parsed'):
+ return dict(failed=True, msg="async task did not complete within the requested time - %ss" % self._task.async_val)
+ else:
+ return dict(failed=True, msg="async task produced unparseable results", async_result=async_result)
+ else:
+ async_handler.cleanup(force=True)
+ return async_result
+
+ def _get_become(self, name):
+ become = become_loader.get(name)
+ if not become:
+ raise AnsibleError("Invalid become method specified, could not find matching plugin: '%s'. "
+ "Use `ansible-doc -t become -l` to list available plugins." % name)
+ return become
+
+ def _get_connection(self, cvars, templar):
+ '''
+ Reads the connection property for the host, and returns the
+ correct connection object from the list of connection plugins
+ '''
+
+ # use magic var if it exists, if not, let task inheritance do it's thing.
+ if cvars.get('ansible_connection') is not None:
+ self._play_context.connection = templar.template(cvars['ansible_connection'])
+ else:
+ self._play_context.connection = self._task.connection
+
+ # TODO: play context has logic to update the connection for 'smart'
+ # (default value, will chose between ssh and paramiko) and 'persistent'
+ # (really paramiko), eventually this should move to task object itself.
+ connection_name = self._play_context.connection
+
+ # load connection
+ conn_type = connection_name
+ connection, plugin_load_context = self._shared_loader_obj.connection_loader.get_with_context(
+ conn_type,
+ self._play_context,
+ self._new_stdin,
+ task_uuid=self._task._uuid,
+ ansible_playbook_pid=to_text(os.getppid())
+ )
+
+ if not connection:
+ raise AnsibleError("the connection plugin '%s' was not found" % conn_type)
+
+ # load become plugin if needed
+ if cvars.get('ansible_become') is not None:
+ become = boolean(templar.template(cvars['ansible_become']))
+ else:
+ become = self._task.become
+
+ if become:
+ if cvars.get('ansible_become_method'):
+ become_plugin = self._get_become(templar.template(cvars['ansible_become_method']))
+ else:
+ become_plugin = self._get_become(self._task.become_method)
+
+ try:
+ connection.set_become_plugin(become_plugin)
+ except AttributeError:
+ # Older connection plugin that does not support set_become_plugin
+ pass
+
+ if getattr(connection.become, 'require_tty', False) and not getattr(connection, 'has_tty', False):
+ raise AnsibleError(
+ "The '%s' connection does not provide a TTY which is required for the selected "
+ "become plugin: %s." % (conn_type, become_plugin.name)
+ )
+
+ # Backwards compat for connection plugins that don't support become plugins
+ # Just do this unconditionally for now, we could move it inside of the
+ # AttributeError above later
+ self._play_context.set_become_plugin(become_plugin.name)
+
+ # Also backwards compat call for those still using play_context
+ self._play_context.set_attributes_from_plugin(connection)
+
+ if any(((connection.supports_persistence and C.USE_PERSISTENT_CONNECTIONS), connection.force_persistence)):
+ self._play_context.timeout = connection.get_option('persistent_command_timeout')
+ display.vvvv('attempting to start connection', host=self._play_context.remote_addr)
+ display.vvvv('using connection plugin %s' % connection.transport, host=self._play_context.remote_addr)
+
+ options = self._get_persistent_connection_options(connection, cvars, templar)
+ socket_path = start_connection(self._play_context, options, self._task._uuid)
+ display.vvvv('local domain socket path is %s' % socket_path, host=self._play_context.remote_addr)
+ setattr(connection, '_socket_path', socket_path)
+
+ return connection
+
+ def _get_persistent_connection_options(self, connection, final_vars, templar):
+
+ option_vars = C.config.get_plugin_vars('connection', connection._load_name)
+ plugin = connection._sub_plugin
+ if plugin.get('type'):
+ option_vars.extend(C.config.get_plugin_vars(plugin['type'], plugin['name']))
+
+ options = {}
+ for k in option_vars:
+ if k in final_vars:
+ options[k] = templar.template(final_vars[k])
+
+ return options
+
+ def _set_plugin_options(self, plugin_type, variables, templar, task_keys):
+ try:
+ plugin = getattr(self._connection, '_%s' % plugin_type)
+ except AttributeError:
+ # Some plugins are assigned to private attrs, ``become`` is not
+ plugin = getattr(self._connection, plugin_type)
+
+ option_vars = C.config.get_plugin_vars(plugin_type, plugin._load_name)
+ options = {}
+ for k in option_vars:
+ if k in variables:
+ options[k] = templar.template(variables[k])
+ # TODO move to task method?
+ plugin.set_options(task_keys=task_keys, var_options=options)
+
+ return option_vars
+
+ def _set_connection_options(self, variables, templar):
+
+ # keep list of variable names possibly consumed
+ varnames = []
+
+ # grab list of usable vars for this plugin
+ option_vars = C.config.get_plugin_vars('connection', self._connection._load_name)
+ varnames.extend(option_vars)
+
+ # create dict of 'templated vars'
+ options = {'_extras': {}}
+ for k in option_vars:
+ if k in variables:
+ options[k] = templar.template(variables[k])
+
+ # add extras if plugin supports them
+ if getattr(self._connection, 'allow_extras', False):
+ for k in variables:
+ if k.startswith('ansible_%s_' % self._connection._load_name) and k not in options:
+ options['_extras'][k] = templar.template(variables[k])
+
+ task_keys = self._task.dump_attrs()
+
+ # The task_keys 'timeout' attr is the task's timeout, not the connection timeout.
+ # The connection timeout is threaded through the play_context for now.
+ task_keys['timeout'] = self._play_context.timeout
+
+ if self._play_context.password:
+ # The connection password is threaded through the play_context for
+ # now. This is something we ultimately want to avoid, but the first
+ # step is to get connection plugins pulling the password through the
+ # config system instead of directly accessing play_context.
+ task_keys['password'] = self._play_context.password
+
+ # set options with 'templated vars' specific to this plugin and dependent ones
+ self._connection.set_options(task_keys=task_keys, var_options=options)
+ varnames.extend(self._set_plugin_options('shell', variables, templar, task_keys))
+
+ if self._connection.become is not None:
+ if self._play_context.become_pass:
+ # FIXME: eventually remove from task and play_context, here for backwards compat
+ # keep out of play objects to avoid accidental disclosure, only become plugin should have
+ # The become pass is already in the play_context if given on
+ # the CLI (-K). Make the plugin aware of it in this case.
+ task_keys['become_pass'] = self._play_context.become_pass
+
+ varnames.extend(self._set_plugin_options('become', variables, templar, task_keys))
+
+ # FOR BACKWARDS COMPAT:
+ for option in ('become_user', 'become_flags', 'become_exe', 'become_pass'):
+ try:
+ setattr(self._play_context, option, self._connection.become.get_option(option))
+ except KeyError:
+ pass # some plugins don't support all base flags
+ self._play_context.prompt = self._connection.become.prompt
+
+ return varnames
+
+ def _get_action_handler(self, connection, templar):
+ '''
+ Returns the correct action plugin to handle the requestion task action
+ '''
+
+ module_collection, separator, module_name = self._task.action.rpartition(".")
+ module_prefix = module_name.split('_')[0]
+ if module_collection:
+ # For network modules, which look for one action plugin per platform, look for the
+ # action plugin in the same collection as the module by prefixing the action plugin
+ # with the same collection.
+ network_action = "{0}.{1}".format(module_collection, module_prefix)
+ else:
+ network_action = module_prefix
+
+ collections = self._task.collections
+
+ # let action plugin override module, fallback to 'normal' action plugin otherwise
+ if self._shared_loader_obj.action_loader.has_plugin(self._task.action, collection_list=collections):
+ handler_name = self._task.action
+ elif all((module_prefix in C.NETWORK_GROUP_MODULES, self._shared_loader_obj.action_loader.has_plugin(network_action, collection_list=collections))):
+ handler_name = network_action
+ else:
+ # use ansible.legacy.normal to allow (historic) local action_plugins/ override without collections search
+ handler_name = 'ansible.legacy.normal'
+ collections = None # until then, we don't want the task's collection list to be consulted; use the builtin
+
+ handler = self._shared_loader_obj.action_loader.get(
+ handler_name,
+ task=self._task,
+ connection=connection,
+ play_context=self._play_context,
+ loader=self._loader,
+ templar=templar,
+ shared_loader_obj=self._shared_loader_obj,
+ collection_list=collections
+ )
+
+ if not handler:
+ raise AnsibleError("the handler '%s' was not found" % handler_name)
+
+ return handler
+
+
+def start_connection(play_context, variables, task_uuid):
+ '''
+ Starts the persistent connection
+ '''
+ candidate_paths = [C.ANSIBLE_CONNECTION_PATH or os.path.dirname(sys.argv[0])]
+ candidate_paths.extend(os.environ['PATH'].split(os.pathsep))
+ for dirname in candidate_paths:
+ ansible_connection = os.path.join(dirname, 'ansible-connection')
+ if os.path.isfile(ansible_connection):
+ display.vvvv("Found ansible-connection at path {0}".format(ansible_connection))
+ break
+ else:
+ raise AnsibleError("Unable to find location of 'ansible-connection'. "
+ "Please set or check the value of ANSIBLE_CONNECTION_PATH")
+
+ env = os.environ.copy()
+ env.update({
+ # HACK; most of these paths may change during the controller's lifetime
+ # (eg, due to late dynamic role includes, multi-playbook execution), without a way
+ # to invalidate/update, ansible-connection won't always see the same plugins the controller
+ # can.
+ 'ANSIBLE_BECOME_PLUGINS': become_loader.print_paths(),
+ 'ANSIBLE_CLICONF_PLUGINS': cliconf_loader.print_paths(),
+ 'ANSIBLE_COLLECTIONS_PATH': to_native(os.pathsep.join(AnsibleCollectionConfig.collection_paths)),
+ 'ANSIBLE_CONNECTION_PLUGINS': connection_loader.print_paths(),
+ 'ANSIBLE_HTTPAPI_PLUGINS': httpapi_loader.print_paths(),
+ 'ANSIBLE_NETCONF_PLUGINS': netconf_loader.print_paths(),
+ 'ANSIBLE_TERMINAL_PLUGINS': terminal_loader.print_paths(),
+ })
+ python = sys.executable
+ master, slave = pty.openpty()
+ p = subprocess.Popen(
+ [python, ansible_connection, to_text(os.getppid()), to_text(task_uuid)],
+ stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env
+ )
+ os.close(slave)
+
+ # We need to set the pty into noncanonical mode. This ensures that we
+ # can receive lines longer than 4095 characters (plus newline) without
+ # truncating.
+ old = termios.tcgetattr(master)
+ new = termios.tcgetattr(master)
+ new[3] = new[3] & ~termios.ICANON
+
+ try:
+ termios.tcsetattr(master, termios.TCSANOW, new)
+ write_to_file_descriptor(master, variables)
+ write_to_file_descriptor(master, play_context.serialize())
+
+ (stdout, stderr) = p.communicate()
+ finally:
+ termios.tcsetattr(master, termios.TCSANOW, old)
+ os.close(master)
+
+ if p.returncode == 0:
+ result = json.loads(to_text(stdout, errors='surrogate_then_replace'))
+ else:
+ try:
+ result = json.loads(to_text(stderr, errors='surrogate_then_replace'))
+ except getattr(json.decoder, 'JSONDecodeError', ValueError):
+ # JSONDecodeError only available on Python 3.5+
+ result = {'error': to_text(stderr, errors='surrogate_then_replace')}
+
+ if 'messages' in result:
+ for level, message in result['messages']:
+ if level == 'log':
+ display.display(message, log_only=True)
+ elif level in ('debug', 'v', 'vv', 'vvv', 'vvvv', 'vvvvv', 'vvvvvv'):
+ getattr(display, level)(message, host=play_context.remote_addr)
+ else:
+ if hasattr(display, level):
+ getattr(display, level)(message)
+ else:
+ display.vvvv(message, host=play_context.remote_addr)
+
+ if 'error' in result:
+ if play_context.verbosity > 2:
+ if result.get('exception'):
+ msg = "The full traceback is:\n" + result['exception']
+ display.display(msg, color=C.COLOR_ERROR)
+ raise AnsibleError(result['error'])
+
+ return result['socket_path']
diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py
new file mode 100644
index 00000000..f43bdc78
--- /dev/null
+++ b/lib/ansible/executor/task_queue_manager.py
@@ -0,0 +1,395 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+import tempfile
+import time
+
+from ansible import constants as C
+from ansible import context
+from ansible.errors import AnsibleError
+from ansible.executor.play_iterator import PlayIterator
+from ansible.executor.stats import AggregateStats
+from ansible.executor.task_result import TaskResult
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_text, to_native
+from ansible.playbook.block import Block
+from ansible.playbook.play_context import PlayContext
+from ansible.plugins.loader import callback_loader, strategy_loader, module_loader
+from ansible.plugins.callback import CallbackBase
+from ansible.template import Templar
+from ansible.utils.collection_loader import AnsibleCollectionRef
+from ansible.utils.helpers import pct_to_int
+from ansible.vars.hostvars import HostVars
+from ansible.vars.reserved import warn_if_reserved
+from ansible.utils.display import Display
+from ansible.utils.multiprocessing import context as multiprocessing_context
+
+
+__all__ = ['TaskQueueManager']
+
+display = Display()
+
+
+class TaskQueueManager:
+
+ '''
+ This class handles the multiprocessing requirements of Ansible by
+ creating a pool of worker forks, a result handler fork, and a
+ manager object with shared datastructures/queues for coordinating
+ work between all processes.
+
+ The queue manager is responsible for loading the play strategy plugin,
+ which dispatches the Play's tasks to hosts.
+ '''
+
+ RUN_OK = 0
+ RUN_ERROR = 1
+ RUN_FAILED_HOSTS = 2
+ RUN_UNREACHABLE_HOSTS = 4
+ RUN_FAILED_BREAK_PLAY = 8
+ RUN_UNKNOWN_ERROR = 255
+
+ def __init__(self, inventory, variable_manager, loader, passwords, stdout_callback=None, run_additional_callbacks=True, run_tree=False, forks=None):
+
+ self._inventory = inventory
+ self._variable_manager = variable_manager
+ self._loader = loader
+ self._stats = AggregateStats()
+ self.passwords = passwords
+ self._stdout_callback = stdout_callback
+ self._run_additional_callbacks = run_additional_callbacks
+ self._run_tree = run_tree
+ self._forks = forks or 5
+
+ self._callbacks_loaded = False
+ self._callback_plugins = []
+ self._start_at_done = False
+
+ # make sure any module paths (if specified) are added to the module_loader
+ if context.CLIARGS.get('module_path', False):
+ for path in context.CLIARGS['module_path']:
+ if path:
+ module_loader.add_directory(path)
+
+ # a special flag to help us exit cleanly
+ self._terminated = False
+
+ # dictionaries to keep track of failed/unreachable hosts
+ self._failed_hosts = dict()
+ self._unreachable_hosts = dict()
+
+ try:
+ self._final_q = multiprocessing_context.Queue()
+ except OSError as e:
+ raise AnsibleError("Unable to use multiprocessing, this is normally caused by lack of access to /dev/shm: %s" % to_native(e))
+
+ # A temporary file (opened pre-fork) used by connection
+ # plugins for inter-process locking.
+ self._connection_lockfile = tempfile.TemporaryFile()
+
+ def _initialize_processes(self, num):
+ self._workers = []
+
+ for i in range(num):
+ self._workers.append(None)
+
+ def load_callbacks(self):
+ '''
+ Loads all available callbacks, with the exception of those which
+ utilize the CALLBACK_TYPE option. When CALLBACK_TYPE is set to 'stdout',
+ only one such callback plugin will be loaded.
+ '''
+
+ if self._callbacks_loaded:
+ return
+
+ stdout_callback_loaded = False
+ if self._stdout_callback is None:
+ self._stdout_callback = C.DEFAULT_STDOUT_CALLBACK
+
+ if isinstance(self._stdout_callback, CallbackBase):
+ stdout_callback_loaded = True
+ elif isinstance(self._stdout_callback, string_types):
+ if self._stdout_callback not in callback_loader:
+ raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback)
+ else:
+ self._stdout_callback = callback_loader.get(self._stdout_callback)
+ self._stdout_callback.set_options()
+ stdout_callback_loaded = True
+ else:
+ raise AnsibleError("callback must be an instance of CallbackBase or the name of a callback plugin")
+
+ # get all configured loadable callbacks (adjacent, builtin)
+ callback_list = list(callback_loader.all(class_only=True))
+
+ # add whitelisted callbacks that refer to collections, which might not appear in normal listing
+ for c in C.DEFAULT_CALLBACK_WHITELIST:
+ # load all, as collection ones might be using short/redirected names and not a fqcn
+ plugin = callback_loader.get(c, class_only=True)
+
+ # TODO: check if this skip is redundant, loader should handle bad file/plugin cases already
+ if plugin:
+ # avoids incorrect and dupes possible due to collections
+ if plugin not in callback_list:
+ callback_list.append(plugin)
+ else:
+ display.warning("Skipping callback plugin '%s', unable to load" % c)
+
+ # for each callback in the list see if we should add it to 'active callbacks' used in the play
+ for callback_plugin in callback_list:
+
+ callback_type = getattr(callback_plugin, 'CALLBACK_TYPE', '')
+ callback_needs_whitelist = getattr(callback_plugin, 'CALLBACK_NEEDS_WHITELIST', False)
+
+ # try to get collection world name first
+ cnames = getattr(callback_plugin, '_redirected_names', [])
+ if cnames:
+ # store the name the plugin was loaded as, as that's what we'll need to compare to the configured callback list later
+ callback_name = cnames[0]
+ else:
+ # fallback to 'old loader name'
+ (callback_name, _) = os.path.splitext(os.path.basename(callback_plugin._original_path))
+
+ display.vvvvv("Attempting to use '%s' callback." % (callback_name))
+ if callback_type == 'stdout':
+ # we only allow one callback of type 'stdout' to be loaded,
+ if callback_name != self._stdout_callback or stdout_callback_loaded:
+ display.vv("Skipping callback '%s', as we already have a stdout callback." % (callback_name))
+ continue
+ stdout_callback_loaded = True
+ elif callback_name == 'tree' and self._run_tree:
+ # TODO: remove special case for tree, which is an adhoc cli option --tree
+ pass
+ elif not self._run_additional_callbacks or (callback_needs_whitelist and (
+ # only run if not adhoc, or adhoc was specifically configured to run + check enabled list
+ C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST)):
+ # 2.x plugins shipped with ansible should require whitelisting, older or non shipped should load automatically
+ continue
+
+ try:
+ callback_obj = callback_plugin()
+ # avoid bad plugin not returning an object, only needed cause we do class_only load and bypass loader checks,
+ # really a bug in the plugin itself which we ignore as callback errors are not supposed to be fatal.
+ if callback_obj:
+ # skip initializing if we already did the work for the same plugin (even with diff names)
+ if callback_obj not in self._callback_plugins:
+ callback_obj.set_options()
+ self._callback_plugins.append(callback_obj)
+ else:
+ display.vv("Skipping callback '%s', already loaded as '%s'." % (callback_plugin, callback_name))
+ else:
+ display.warning("Skipping callback '%s', as it does not create a valid plugin instance." % callback_name)
+ continue
+ except Exception as e:
+ display.warning("Skipping callback '%s', unable to load due to: %s" % (callback_name, to_native(e)))
+ continue
+
+ self._callbacks_loaded = True
+
+ def run(self, play):
+ '''
+ Iterates over the roles/tasks in a play, using the given (or default)
+ strategy for queueing tasks. The default is the linear strategy, which
+ operates like classic Ansible by keeping all hosts in lock-step with
+ a given task (meaning no hosts move on to the next task until all hosts
+ are done with the current task).
+ '''
+
+ if not self._callbacks_loaded:
+ self.load_callbacks()
+
+ all_vars = self._variable_manager.get_vars(play=play)
+ warn_if_reserved(all_vars)
+ templar = Templar(loader=self._loader, variables=all_vars)
+
+ new_play = play.copy()
+ new_play.post_validate(templar)
+ new_play.handlers = new_play.compile_roles_handlers() + new_play.handlers
+
+ self.hostvars = HostVars(
+ inventory=self._inventory,
+ variable_manager=self._variable_manager,
+ loader=self._loader,
+ )
+
+ play_context = PlayContext(new_play, self.passwords, self._connection_lockfile.fileno())
+ if (self._stdout_callback and
+ hasattr(self._stdout_callback, 'set_play_context')):
+ self._stdout_callback.set_play_context(play_context)
+
+ for callback_plugin in self._callback_plugins:
+ if hasattr(callback_plugin, 'set_play_context'):
+ callback_plugin.set_play_context(play_context)
+
+ self.send_callback('v2_playbook_on_play_start', new_play)
+
+ # build the iterator
+ iterator = PlayIterator(
+ inventory=self._inventory,
+ play=new_play,
+ play_context=play_context,
+ variable_manager=self._variable_manager,
+ all_vars=all_vars,
+ start_at_done=self._start_at_done,
+ )
+
+ # adjust to # of workers to configured forks or size of batch, whatever is lower
+ self._initialize_processes(min(self._forks, iterator.batch_size))
+
+ # load the specified strategy (or the default linear one)
+ strategy = strategy_loader.get(new_play.strategy, self)
+ if strategy is None:
+ raise AnsibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds)
+
+ # Because the TQM may survive multiple play runs, we start by marking
+ # any hosts as failed in the iterator here which may have been marked
+ # as failed in previous runs. Then we clear the internal list of failed
+ # hosts so we know what failed this round.
+ for host_name in self._failed_hosts.keys():
+ host = self._inventory.get_host(host_name)
+ iterator.mark_host_failed(host)
+
+ self.clear_failed_hosts()
+
+ # during initialization, the PlayContext will clear the start_at_task
+ # field to signal that a matching task was found, so check that here
+ # and remember it so we don't try to skip tasks on future plays
+ if context.CLIARGS.get('start_at_task') is not None and play_context.start_at_task is None:
+ self._start_at_done = True
+
+ # and run the play using the strategy and cleanup on way out
+ play_return = strategy.run(iterator, play_context)
+
+ # now re-save the hosts that failed from the iterator to our internal list
+ for host_name in iterator.get_failed_hosts():
+ self._failed_hosts[host_name] = True
+
+ strategy.cleanup()
+ self._cleanup_processes()
+ return play_return
+
+ def cleanup(self):
+ display.debug("RUNNING CLEANUP")
+ self.terminate()
+ self._final_q.close()
+ self._cleanup_processes()
+
+ # A bug exists in Python 2.6 that causes an exception to be raised during
+ # interpreter shutdown. This is only an issue in our CI testing but we
+ # hit it frequently enough to add a small sleep to avoid the issue.
+ # This can be removed once we have split controller available in CI.
+ #
+ # Further information:
+ # Issue: https://bugs.python.org/issue4106
+ # Fix: https://hg.python.org/cpython/rev/d316315a8781
+ #
+ try:
+ if (2, 6) == (sys.version_info[0:2]):
+ time.sleep(0.0001)
+ except (IndexError, AttributeError):
+ # In case there is an issue getting the version info, don't raise an Exception
+ pass
+
+ def _cleanup_processes(self):
+ if hasattr(self, '_workers'):
+ for attempts_remaining in range(C.WORKER_SHUTDOWN_POLL_COUNT - 1, -1, -1):
+ if not any(worker_prc and worker_prc.is_alive() for worker_prc in self._workers):
+ break
+
+ if attempts_remaining:
+ time.sleep(C.WORKER_SHUTDOWN_POLL_DELAY)
+ else:
+ display.warning('One or more worker processes are still running and will be terminated.')
+
+ for worker_prc in self._workers:
+ if worker_prc and worker_prc.is_alive():
+ try:
+ worker_prc.terminate()
+ except AttributeError:
+ pass
+
+ def clear_failed_hosts(self):
+ self._failed_hosts = dict()
+
+ def get_inventory(self):
+ return self._inventory
+
+ def get_variable_manager(self):
+ return self._variable_manager
+
+ def get_loader(self):
+ return self._loader
+
+ def get_workers(self):
+ return self._workers[:]
+
+ def terminate(self):
+ self._terminated = True
+
+ def has_dead_workers(self):
+
+ # [<WorkerProcess(WorkerProcess-2, stopped[SIGKILL])>,
+ # <WorkerProcess(WorkerProcess-2, stopped[SIGTERM])>
+
+ defunct = False
+ for x in self._workers:
+ if getattr(x, 'exitcode', None):
+ defunct = True
+ return defunct
+
+ def send_callback(self, method_name, *args, **kwargs):
+ for callback_plugin in [self._stdout_callback] + self._callback_plugins:
+ # a plugin that set self.disabled to True will not be called
+ # see osx_say.py example for such a plugin
+ if getattr(callback_plugin, 'disabled', False):
+ continue
+
+ # try to find v2 method, fallback to v1 method, ignore callback if no method found
+ methods = []
+ for possible in [method_name, 'v2_on_any']:
+ gotit = getattr(callback_plugin, possible, None)
+ if gotit is None:
+ gotit = getattr(callback_plugin, possible.replace('v2_', ''), None)
+ if gotit is not None:
+ methods.append(gotit)
+
+ # send clean copies
+ new_args = []
+ for arg in args:
+ # FIXME: add play/task cleaners
+ if isinstance(arg, TaskResult):
+ new_args.append(arg.clean_copy())
+ # elif isinstance(arg, Play):
+ # elif isinstance(arg, Task):
+ else:
+ new_args.append(arg)
+
+ for method in methods:
+ try:
+ method(*new_args, **kwargs)
+ except Exception as e:
+ # TODO: add config toggle to make this fatal or not?
+ display.warning(u"Failure using method (%s) in callback plugin (%s): %s" % (to_text(method_name), to_text(callback_plugin), to_text(e)))
+ from traceback import format_tb
+ from sys import exc_info
+ display.vvv('Callback Exception: \n' + ' '.join(format_tb(exc_info()[2])))
diff --git a/lib/ansible/executor/task_result.py b/lib/ansible/executor/task_result.py
new file mode 100644
index 00000000..543b860e
--- /dev/null
+++ b/lib/ansible/executor/task_result.py
@@ -0,0 +1,154 @@
+# Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible import constants as C
+from ansible.parsing.dataloader import DataLoader
+from ansible.vars.clean import module_response_deepcopy, strip_internal_keys
+
+_IGNORE = ('failed', 'skipped')
+_PRESERVE = ('attempts', 'changed', 'retries')
+_SUB_PRESERVE = {'_ansible_delegated_vars': ('ansible_host', 'ansible_port', 'ansible_user', 'ansible_connection')}
+
+# stuff callbacks need
+CLEAN_EXCEPTIONS = (
+ '_ansible_verbose_always', # for debug and other actions, to always expand data (pretty jsonification)
+ '_ansible_item_label', # to know actual 'item' variable
+ '_ansible_no_log', # jic we didnt clean up well enough, DON'T LOG
+ '_ansible_verbose_override', # controls display of ansible_facts, gathering would be very noise with -v otherwise
+)
+
+
+class TaskResult:
+ '''
+ This class is responsible for interpreting the resulting data
+ from an executed task, and provides helper methods for determining
+ the result of a given task.
+ '''
+
+ def __init__(self, host, task, return_data, task_fields=None):
+ self._host = host
+ self._task = task
+
+ if isinstance(return_data, dict):
+ self._result = return_data.copy()
+ else:
+ self._result = DataLoader().load(return_data)
+
+ if task_fields is None:
+ self._task_fields = dict()
+ else:
+ self._task_fields = task_fields
+
+ @property
+ def task_name(self):
+ return self._task_fields.get('name', None) or self._task.get_name()
+
+ def is_changed(self):
+ return self._check_key('changed')
+
+ def is_skipped(self):
+ # loop results
+ if 'results' in self._result:
+ results = self._result['results']
+ # Loop tasks are only considered skipped if all items were skipped.
+ # some squashed results (eg, yum) are not dicts and can't be skipped individually
+ if results and all(isinstance(res, dict) and res.get('skipped', False) for res in results):
+ return True
+
+ # regular tasks and squashed non-dict results
+ return self._result.get('skipped', False)
+
+ def is_failed(self):
+ if 'failed_when_result' in self._result or \
+ 'results' in self._result and True in [True for x in self._result['results'] if 'failed_when_result' in x]:
+ return self._check_key('failed_when_result')
+ else:
+ return self._check_key('failed')
+
+ def is_unreachable(self):
+ return self._check_key('unreachable')
+
+ def needs_debugger(self, globally_enabled=False):
+ _debugger = self._task_fields.get('debugger')
+ _ignore_errors = C.TASK_DEBUGGER_IGNORE_ERRORS and self._task_fields.get('ignore_errors')
+
+ ret = False
+ if globally_enabled and ((self.is_failed() and not _ignore_errors) or self.is_unreachable()):
+ ret = True
+
+ if _debugger in ('always',):
+ ret = True
+ elif _debugger in ('never',):
+ ret = False
+ elif _debugger in ('on_failed',) and self.is_failed() and not _ignore_errors:
+ ret = True
+ elif _debugger in ('on_unreachable',) and self.is_unreachable():
+ ret = True
+ elif _debugger in ('on_skipped',) and self.is_skipped():
+ ret = True
+
+ return ret
+
+ def _check_key(self, key):
+ '''get a specific key from the result or its items'''
+
+ if isinstance(self._result, dict) and key in self._result:
+ return self._result.get(key, False)
+ else:
+ flag = False
+ for res in self._result.get('results', []):
+ if isinstance(res, dict):
+ flag |= res.get(key, False)
+ return flag
+
+ def clean_copy(self):
+
+ ''' returns 'clean' taskresult object '''
+
+ # FIXME: clean task_fields, _task and _host copies
+ result = TaskResult(self._host, self._task, {}, self._task_fields)
+
+ # statuses are already reflected on the event type
+ if result._task and result._task.action in C._ACTION_DEBUG:
+ # debug is verbose by default to display vars, no need to add invocation
+ ignore = _IGNORE + ('invocation',)
+ else:
+ ignore = _IGNORE
+
+ subset = {}
+ # preserve subset for later
+ for sub in _SUB_PRESERVE:
+ if sub in self._result:
+ subset[sub] = {}
+ for key in _SUB_PRESERVE[sub]:
+ if key in self._result[sub]:
+ subset[sub][key] = self._result[sub][key]
+
+ if isinstance(self._task.no_log, bool) and self._task.no_log or self._result.get('_ansible_no_log', False):
+ x = {"censored": "the output has been hidden due to the fact that 'no_log: true' was specified for this result"}
+
+ # preserve full
+ for preserve in _PRESERVE:
+ if preserve in self._result:
+ x[preserve] = self._result[preserve]
+
+ result._result = x
+ elif self._result:
+ result._result = module_response_deepcopy(self._result)
+
+ # actualy remove
+ for remove_key in ignore:
+ if remove_key in result._result:
+ del result._result[remove_key]
+
+ # remove almost ALL internal keys, keep ones relevant to callback
+ strip_internal_keys(result._result, exceptions=CLEAN_EXCEPTIONS)
+
+ # keep subset
+ result._result.update(subset)
+
+ return result
diff --git a/lib/ansible/galaxy/__init__.py b/lib/ansible/galaxy/__init__.py
new file mode 100644
index 00000000..a73baac8
--- /dev/null
+++ b/lib/ansible/galaxy/__init__.py
@@ -0,0 +1,72 @@
+########################################################################
+#
+# (C) 2015, Brian Coca <bcoca@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+########################################################################
+''' This manages remote shared Ansible objects, mainly roles'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import yaml
+
+import ansible.constants as C
+from ansible import context
+from ansible.module_utils._text import to_bytes
+
+# default_readme_template
+# default_meta_template
+
+
+def get_collections_galaxy_meta_info():
+ meta_path = os.path.join(os.path.dirname(__file__), 'data', 'collections_galaxy_meta.yml')
+ with open(to_bytes(meta_path, errors='surrogate_or_strict'), 'rb') as galaxy_obj:
+ return yaml.safe_load(galaxy_obj)
+
+
+class Galaxy(object):
+ ''' Keeps global galaxy info '''
+
+ def __init__(self):
+ # TODO: eventually remove this as it contains a mismash of properties that aren't really global
+
+ # roles_path needs to be a list and will be by default
+ roles_path = context.CLIARGS.get('roles_path', C.DEFAULT_ROLES_PATH)
+ # cli option handling is responsible for splitting roles_path
+ self.roles_paths = roles_path
+
+ self.roles = {}
+
+ # load data path for resource usage
+ this_dir, this_filename = os.path.split(__file__)
+ type_path = context.CLIARGS.get('role_type', 'default')
+ if type_path == 'default':
+ type_path = os.path.join(type_path, context.CLIARGS.get('type'))
+
+ self.DATA_PATH = os.path.join(this_dir, 'data', type_path)
+
+ @property
+ def default_role_skeleton_path(self):
+ return self.DATA_PATH
+
+ def add_role(self, role):
+ self.roles[role.name] = role
+
+ def remove_role(self, role_name):
+ del self.roles[role_name]
diff --git a/lib/ansible/galaxy/api.py b/lib/ansible/galaxy/api.py
new file mode 100644
index 00000000..4dd3cded
--- /dev/null
+++ b/lib/ansible/galaxy/api.py
@@ -0,0 +1,581 @@
+# (C) 2013, James Cammarata <jcammarata@ansible.com>
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import hashlib
+import json
+import os
+import tarfile
+import uuid
+import time
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.galaxy.user_agent import user_agent
+from ansible.module_utils.six import string_types
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+from ansible.module_utils.six.moves.urllib.parse import quote as urlquote, urlencode, urlparse
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.urls import open_url, prepare_multipart
+from ansible.utils.display import Display
+from ansible.utils.hashing import secure_hash_s
+
+try:
+ from urllib.parse import urlparse
+except ImportError:
+ # Python 2
+ from urlparse import urlparse
+
+display = Display()
+
+
+def g_connect(versions):
+ """
+ Wrapper to lazily initialize connection info to Galaxy and verify the API versions required are available on the
+ endpoint.
+
+ :param versions: A list of API versions that the function supports.
+ """
+ def decorator(method):
+ def wrapped(self, *args, **kwargs):
+ if not self._available_api_versions:
+ display.vvvv("Initial connection to galaxy_server: %s" % self.api_server)
+
+ # Determine the type of Galaxy server we are talking to. First try it unauthenticated then with Bearer
+ # auth for Automation Hub.
+ n_url = self.api_server
+ error_context_msg = 'Error when finding available api versions from %s (%s)' % (self.name, n_url)
+
+ if self.api_server == 'https://galaxy.ansible.com' or self.api_server == 'https://galaxy.ansible.com/':
+ n_url = 'https://galaxy.ansible.com/api/'
+
+ try:
+ data = self._call_galaxy(n_url, method='GET', error_context_msg=error_context_msg)
+ except (AnsibleError, GalaxyError, ValueError, KeyError) as err:
+ # Either the URL doesnt exist, or other error. Or the URL exists, but isn't a galaxy API
+ # root (not JSON, no 'available_versions') so try appending '/api/'
+ if n_url.endswith('/api') or n_url.endswith('/api/'):
+ raise
+
+ # Let exceptions here bubble up but raise the original if this returns a 404 (/api/ wasn't found).
+ n_url = _urljoin(n_url, '/api/')
+ try:
+ data = self._call_galaxy(n_url, method='GET', error_context_msg=error_context_msg)
+ except GalaxyError as new_err:
+ if new_err.http_code == 404:
+ raise err
+ raise
+
+ if 'available_versions' not in data:
+ raise AnsibleError("Tried to find galaxy API root at %s but no 'available_versions' are available "
+ "on %s" % (n_url, self.api_server))
+
+ # Update api_server to point to the "real" API root, which in this case could have been the configured
+ # url + '/api/' appended.
+ self.api_server = n_url
+
+ # Default to only supporting v1, if only v1 is returned we also assume that v2 is available even though
+ # it isn't returned in the available_versions dict.
+ available_versions = data.get('available_versions', {u'v1': u'v1/'})
+ if list(available_versions.keys()) == [u'v1']:
+ available_versions[u'v2'] = u'v2/'
+
+ self._available_api_versions = available_versions
+ display.vvvv("Found API version '%s' with Galaxy server %s (%s)"
+ % (', '.join(available_versions.keys()), self.name, self.api_server))
+
+ # Verify that the API versions the function works with are available on the server specified.
+ available_versions = set(self._available_api_versions.keys())
+ common_versions = set(versions).intersection(available_versions)
+ if not common_versions:
+ raise AnsibleError("Galaxy action %s requires API versions '%s' but only '%s' are available on %s %s"
+ % (method.__name__, ", ".join(versions), ", ".join(available_versions),
+ self.name, self.api_server))
+
+ return method(self, *args, **kwargs)
+ return wrapped
+ return decorator
+
+
+def _urljoin(*args):
+ return '/'.join(to_native(a, errors='surrogate_or_strict').strip('/') for a in args + ('',) if a)
+
+
+class GalaxyError(AnsibleError):
+ """ Error for bad Galaxy server responses. """
+
+ def __init__(self, http_error, message):
+ super(GalaxyError, self).__init__(message)
+ self.http_code = http_error.code
+ self.url = http_error.geturl()
+
+ try:
+ http_msg = to_text(http_error.read())
+ err_info = json.loads(http_msg)
+ except (AttributeError, ValueError):
+ err_info = {}
+
+ url_split = self.url.split('/')
+ if 'v2' in url_split:
+ galaxy_msg = err_info.get('message', http_error.reason)
+ code = err_info.get('code', 'Unknown')
+ full_error_msg = u"%s (HTTP Code: %d, Message: %s Code: %s)" % (message, self.http_code, galaxy_msg, code)
+ elif 'v3' in url_split:
+ errors = err_info.get('errors', [])
+ if not errors:
+ errors = [{}] # Defaults are set below, we just need to make sure 1 error is present.
+
+ message_lines = []
+ for error in errors:
+ error_msg = error.get('detail') or error.get('title') or http_error.reason
+ error_code = error.get('code') or 'Unknown'
+ message_line = u"(HTTP Code: %d, Message: %s Code: %s)" % (self.http_code, error_msg, error_code)
+ message_lines.append(message_line)
+
+ full_error_msg = "%s %s" % (message, ', '.join(message_lines))
+ else:
+ # v1 and unknown API endpoints
+ galaxy_msg = err_info.get('default', http_error.reason)
+ full_error_msg = u"%s (HTTP Code: %d, Message: %s)" % (message, self.http_code, galaxy_msg)
+
+ self.message = to_native(full_error_msg)
+
+
+class CollectionVersionMetadata:
+
+ def __init__(self, namespace, name, version, download_url, artifact_sha256, dependencies):
+ """
+ Contains common information about a collection on a Galaxy server to smooth through API differences for
+ Collection and define a standard meta info for a collection.
+
+ :param namespace: The namespace name.
+ :param name: The collection name.
+ :param version: The version that the metadata refers to.
+ :param download_url: The URL to download the collection.
+ :param artifact_sha256: The SHA256 of the collection artifact for later verification.
+ :param dependencies: A dict of dependencies of the collection.
+ """
+ self.namespace = namespace
+ self.name = name
+ self.version = version
+ self.download_url = download_url
+ self.artifact_sha256 = artifact_sha256
+ self.dependencies = dependencies
+
+
+class GalaxyAPI:
+ """ This class is meant to be used as a API client for an Ansible Galaxy server """
+
+ def __init__(self, galaxy, name, url, username=None, password=None, token=None, validate_certs=True):
+ self.galaxy = galaxy
+ self.name = name
+ self.username = username
+ self.password = password
+ self.token = token
+ self.api_server = url
+ self.validate_certs = validate_certs
+ self._available_api_versions = {}
+
+ display.debug('Validate TLS certificates for %s: %s' % (self.api_server, self.validate_certs))
+
+ @property
+ @g_connect(['v1', 'v2', 'v3'])
+ def available_api_versions(self):
+ # Calling g_connect will populate self._available_api_versions
+ return self._available_api_versions
+
+ def _call_galaxy(self, url, args=None, headers=None, method=None, auth_required=False, error_context_msg=None):
+ headers = headers or {}
+ self._add_auth_token(headers, url, required=auth_required)
+
+ try:
+ display.vvvv("Calling Galaxy at %s" % url)
+ resp = open_url(to_native(url), data=args, validate_certs=self.validate_certs, headers=headers,
+ method=method, timeout=20, http_agent=user_agent(), follow_redirects='safe')
+ except HTTPError as e:
+ raise GalaxyError(e, error_context_msg)
+ except Exception as e:
+ raise AnsibleError("Unknown error when attempting to call Galaxy at '%s': %s" % (url, to_native(e)))
+
+ resp_data = to_text(resp.read(), errors='surrogate_or_strict')
+ try:
+ data = json.loads(resp_data)
+ except ValueError:
+ raise AnsibleError("Failed to parse Galaxy response from '%s' as JSON:\n%s"
+ % (resp.url, to_native(resp_data)))
+
+ return data
+
+ def _add_auth_token(self, headers, url, token_type=None, required=False):
+ # Don't add the auth token if one is already present
+ if 'Authorization' in headers:
+ return
+
+ if not self.token and required:
+ raise AnsibleError("No access token or username set. A token can be set with --api-key "
+ "or at {0}.".format(to_native(C.GALAXY_TOKEN_PATH)))
+
+ if self.token:
+ headers.update(self.token.headers())
+
+ @g_connect(['v1'])
+ def authenticate(self, github_token):
+ """
+ Retrieve an authentication token
+ """
+ url = _urljoin(self.api_server, self.available_api_versions['v1'], "tokens") + '/'
+ args = urlencode({"github_token": github_token})
+ resp = open_url(url, data=args, validate_certs=self.validate_certs, method="POST", http_agent=user_agent())
+ data = json.loads(to_text(resp.read(), errors='surrogate_or_strict'))
+ return data
+
+ @g_connect(['v1'])
+ def create_import_task(self, github_user, github_repo, reference=None, role_name=None):
+ """
+ Post an import request
+ """
+ url = _urljoin(self.api_server, self.available_api_versions['v1'], "imports") + '/'
+ args = {
+ "github_user": github_user,
+ "github_repo": github_repo,
+ "github_reference": reference if reference else ""
+ }
+ if role_name:
+ args['alternate_role_name'] = role_name
+ elif github_repo.startswith('ansible-role'):
+ args['alternate_role_name'] = github_repo[len('ansible-role') + 1:]
+ data = self._call_galaxy(url, args=urlencode(args), method="POST")
+ if data.get('results', None):
+ return data['results']
+ return data
+
+ @g_connect(['v1'])
+ def get_import_task(self, task_id=None, github_user=None, github_repo=None):
+ """
+ Check the status of an import task.
+ """
+ url = _urljoin(self.api_server, self.available_api_versions['v1'], "imports")
+ if task_id is not None:
+ url = "%s?id=%d" % (url, task_id)
+ elif github_user is not None and github_repo is not None:
+ url = "%s?github_user=%s&github_repo=%s" % (url, github_user, github_repo)
+ else:
+ raise AnsibleError("Expected task_id or github_user and github_repo")
+
+ data = self._call_galaxy(url)
+ return data['results']
+
+ @g_connect(['v1'])
+ def lookup_role_by_name(self, role_name, notify=True):
+ """
+ Find a role by name.
+ """
+ role_name = to_text(urlquote(to_bytes(role_name)))
+
+ try:
+ parts = role_name.split(".")
+ user_name = ".".join(parts[0:-1])
+ role_name = parts[-1]
+ if notify:
+ display.display("- downloading role '%s', owned by %s" % (role_name, user_name))
+ except Exception:
+ raise AnsibleError("Invalid role name (%s). Specify role as format: username.rolename" % role_name)
+
+ url = _urljoin(self.api_server, self.available_api_versions['v1'], "roles",
+ "?owner__username=%s&name=%s" % (user_name, role_name))
+ data = self._call_galaxy(url)
+ if len(data["results"]) != 0:
+ return data["results"][0]
+ return None
+
+ @g_connect(['v1'])
+ def fetch_role_related(self, related, role_id):
+ """
+ Fetch the list of related items for the given role.
+ The url comes from the 'related' field of the role.
+ """
+
+ results = []
+ try:
+ url = _urljoin(self.api_server, self.available_api_versions['v1'], "roles", role_id, related,
+ "?page_size=50")
+ data = self._call_galaxy(url)
+ results = data['results']
+ done = (data.get('next_link', None) is None)
+
+ # https://github.com/ansible/ansible/issues/64355
+ # api_server contains part of the API path but next_link includes the /api part so strip it out.
+ url_info = urlparse(self.api_server)
+ base_url = "%s://%s/" % (url_info.scheme, url_info.netloc)
+
+ while not done:
+ url = _urljoin(base_url, data['next_link'])
+ data = self._call_galaxy(url)
+ results += data['results']
+ done = (data.get('next_link', None) is None)
+ except Exception as e:
+ display.warning("Unable to retrieve role (id=%s) data (%s), but this is not fatal so we continue: %s"
+ % (role_id, related, to_text(e)))
+ return results
+
+ @g_connect(['v1'])
+ def get_list(self, what):
+ """
+ Fetch the list of items specified.
+ """
+ try:
+ url = _urljoin(self.api_server, self.available_api_versions['v1'], what, "?page_size")
+ data = self._call_galaxy(url)
+ if "results" in data:
+ results = data['results']
+ else:
+ results = data
+ done = True
+ if "next" in data:
+ done = (data.get('next_link', None) is None)
+ while not done:
+ url = _urljoin(self.api_server, data['next_link'])
+ data = self._call_galaxy(url)
+ results += data['results']
+ done = (data.get('next_link', None) is None)
+ return results
+ except Exception as error:
+ raise AnsibleError("Failed to download the %s list: %s" % (what, to_native(error)))
+
+ @g_connect(['v1'])
+ def search_roles(self, search, **kwargs):
+
+ search_url = _urljoin(self.api_server, self.available_api_versions['v1'], "search", "roles", "?")
+
+ if search:
+ search_url += '&autocomplete=' + to_text(urlquote(to_bytes(search)))
+
+ tags = kwargs.get('tags', None)
+ platforms = kwargs.get('platforms', None)
+ page_size = kwargs.get('page_size', None)
+ author = kwargs.get('author', None)
+
+ if tags and isinstance(tags, string_types):
+ tags = tags.split(',')
+ search_url += '&tags_autocomplete=' + '+'.join(tags)
+
+ if platforms and isinstance(platforms, string_types):
+ platforms = platforms.split(',')
+ search_url += '&platforms_autocomplete=' + '+'.join(platforms)
+
+ if page_size:
+ search_url += '&page_size=%s' % page_size
+
+ if author:
+ search_url += '&username_autocomplete=%s' % author
+
+ data = self._call_galaxy(search_url)
+ return data
+
+ @g_connect(['v1'])
+ def add_secret(self, source, github_user, github_repo, secret):
+ url = _urljoin(self.api_server, self.available_api_versions['v1'], "notification_secrets") + '/'
+ args = urlencode({
+ "source": source,
+ "github_user": github_user,
+ "github_repo": github_repo,
+ "secret": secret
+ })
+ data = self._call_galaxy(url, args=args, method="POST")
+ return data
+
+ @g_connect(['v1'])
+ def list_secrets(self):
+ url = _urljoin(self.api_server, self.available_api_versions['v1'], "notification_secrets")
+ data = self._call_galaxy(url, auth_required=True)
+ return data
+
+ @g_connect(['v1'])
+ def remove_secret(self, secret_id):
+ url = _urljoin(self.api_server, self.available_api_versions['v1'], "notification_secrets", secret_id) + '/'
+ data = self._call_galaxy(url, auth_required=True, method='DELETE')
+ return data
+
+ @g_connect(['v1'])
+ def delete_role(self, github_user, github_repo):
+ url = _urljoin(self.api_server, self.available_api_versions['v1'], "removerole",
+ "?github_user=%s&github_repo=%s" % (github_user, github_repo))
+ data = self._call_galaxy(url, auth_required=True, method='DELETE')
+ return data
+
+ # Collection APIs #
+
+ @g_connect(['v2', 'v3'])
+ def publish_collection(self, collection_path):
+ """
+ Publishes a collection to a Galaxy server and returns the import task URI.
+
+ :param collection_path: The path to the collection tarball to publish.
+ :return: The import task URI that contains the import results.
+ """
+ display.display("Publishing collection artifact '%s' to %s %s" % (collection_path, self.name, self.api_server))
+
+ b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict')
+ if not os.path.exists(b_collection_path):
+ raise AnsibleError("The collection path specified '%s' does not exist." % to_native(collection_path))
+ elif not tarfile.is_tarfile(b_collection_path):
+ raise AnsibleError("The collection path specified '%s' is not a tarball, use 'ansible-galaxy collection "
+ "build' to create a proper release artifact." % to_native(collection_path))
+
+ with open(b_collection_path, 'rb') as collection_tar:
+ sha256 = secure_hash_s(collection_tar.read(), hash_func=hashlib.sha256)
+
+ content_type, b_form_data = prepare_multipart(
+ {
+ 'sha256': sha256,
+ 'file': {
+ 'filename': b_collection_path,
+ 'mime_type': 'application/octet-stream',
+ },
+ }
+ )
+
+ headers = {
+ 'Content-type': content_type,
+ 'Content-length': len(b_form_data),
+ }
+
+ if 'v3' in self.available_api_versions:
+ n_url = _urljoin(self.api_server, self.available_api_versions['v3'], 'artifacts', 'collections') + '/'
+ else:
+ n_url = _urljoin(self.api_server, self.available_api_versions['v2'], 'collections') + '/'
+
+ resp = self._call_galaxy(n_url, args=b_form_data, headers=headers, method='POST', auth_required=True,
+ error_context_msg='Error when publishing collection to %s (%s)'
+ % (self.name, self.api_server))
+
+ return resp['task']
+
+ @g_connect(['v2', 'v3'])
+ def wait_import_task(self, task_id, timeout=0):
+ """
+ Waits until the import process on the Galaxy server has completed or the timeout is reached.
+
+ :param task_id: The id of the import task to wait for. This can be parsed out of the return
+ value for GalaxyAPI.publish_collection.
+ :param timeout: The timeout in seconds, 0 is no timeout.
+ """
+ state = 'waiting'
+ data = None
+
+ # Construct the appropriate URL per version
+ if 'v3' in self.available_api_versions:
+ full_url = _urljoin(self.api_server, self.available_api_versions['v3'],
+ 'imports/collections', task_id, '/')
+ else:
+ full_url = _urljoin(self.api_server, self.available_api_versions['v2'],
+ 'collection-imports', task_id, '/')
+
+ display.display("Waiting until Galaxy import task %s has completed" % full_url)
+ start = time.time()
+ wait = 2
+
+ while timeout == 0 or (time.time() - start) < timeout:
+ data = self._call_galaxy(full_url, method='GET', auth_required=True,
+ error_context_msg='Error when getting import task results at %s' % full_url)
+
+ state = data.get('state', 'waiting')
+
+ if data.get('finished_at', None):
+ break
+
+ display.vvv('Galaxy import process has a status of %s, wait %d seconds before trying again'
+ % (state, wait))
+ time.sleep(wait)
+
+ # poor man's exponential backoff algo so we don't flood the Galaxy API, cap at 30 seconds.
+ wait = min(30, wait * 1.5)
+ if state == 'waiting':
+ raise AnsibleError("Timeout while waiting for the Galaxy import process to finish, check progress at '%s'"
+ % to_native(full_url))
+
+ for message in data.get('messages', []):
+ level = message['level']
+ if level == 'error':
+ display.error("Galaxy import error message: %s" % message['message'])
+ elif level == 'warning':
+ display.warning("Galaxy import warning message: %s" % message['message'])
+ else:
+ display.vvv("Galaxy import message: %s - %s" % (level, message['message']))
+
+ if state == 'failed':
+ code = to_native(data['error'].get('code', 'UNKNOWN'))
+ description = to_native(
+ data['error'].get('description', "Unknown error, see %s for more details" % full_url))
+ raise AnsibleError("Galaxy import process failed: %s (Code: %s)" % (description, code))
+
+ @g_connect(['v2', 'v3'])
+ def get_collection_version_metadata(self, namespace, name, version):
+ """
+ Gets the collection information from the Galaxy server about a specific Collection version.
+
+ :param namespace: The collection namespace.
+ :param name: The collection name.
+ :param version: Version of the collection to get the information for.
+ :return: CollectionVersionMetadata about the collection at the version requested.
+ """
+ api_path = self.available_api_versions.get('v3', self.available_api_versions.get('v2'))
+ url_paths = [self.api_server, api_path, 'collections', namespace, name, 'versions', version, '/']
+
+ n_collection_url = _urljoin(*url_paths)
+ error_context_msg = 'Error when getting collection version metadata for %s.%s:%s from %s (%s)' \
+ % (namespace, name, version, self.name, self.api_server)
+ data = self._call_galaxy(n_collection_url, error_context_msg=error_context_msg)
+
+ return CollectionVersionMetadata(data['namespace']['name'], data['collection']['name'], data['version'],
+ data['download_url'], data['artifact']['sha256'],
+ data['metadata']['dependencies'])
+
+ @g_connect(['v2', 'v3'])
+ def get_collection_versions(self, namespace, name):
+ """
+ Gets a list of available versions for a collection on a Galaxy server.
+
+ :param namespace: The collection namespace.
+ :param name: The collection name.
+ :return: A list of versions that are available.
+ """
+ relative_link = False
+ if 'v3' in self.available_api_versions:
+ api_path = self.available_api_versions['v3']
+ results_key = 'data'
+ pagination_path = ['links', 'next']
+ relative_link = True # AH pagination results are relative an not an absolute URI.
+ else:
+ api_path = self.available_api_versions['v2']
+ results_key = 'results'
+ pagination_path = ['next']
+
+ n_url = _urljoin(self.api_server, api_path, 'collections', namespace, name, 'versions', '/')
+
+ error_context_msg = 'Error when getting available collection versions for %s.%s from %s (%s)' \
+ % (namespace, name, self.name, self.api_server)
+ data = self._call_galaxy(n_url, error_context_msg=error_context_msg)
+
+ versions = []
+ while True:
+ versions += [v['version'] for v in data[results_key]]
+
+ next_link = data
+ for path in pagination_path:
+ next_link = next_link.get(path, {})
+
+ if not next_link:
+ break
+ elif relative_link:
+ # TODO: This assumes the pagination result is relative to the root server. Will need to be verified
+ # with someone who knows the AH API.
+ next_link = n_url.replace(urlparse(n_url).path, next_link)
+
+ data = self._call_galaxy(to_native(next_link, errors='surrogate_or_strict'),
+ error_context_msg=error_context_msg)
+
+ return versions
diff --git a/lib/ansible/galaxy/collection.py b/lib/ansible/galaxy/collection.py
new file mode 100644
index 00000000..054a8a57
--- /dev/null
+++ b/lib/ansible/galaxy/collection.py
@@ -0,0 +1,1551 @@
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import errno
+import fnmatch
+import json
+import operator
+import os
+import shutil
+import stat
+import sys
+import tarfile
+import tempfile
+import threading
+import time
+import yaml
+
+from collections import namedtuple
+from contextlib import contextmanager
+from distutils.version import LooseVersion
+from hashlib import sha256
+from io import BytesIO
+from yaml.error import YAMLError
+
+try:
+ import queue
+except ImportError:
+ import Queue as queue # Python 2
+
+import ansible.constants as C
+from ansible.errors import AnsibleError
+from ansible.galaxy import get_collections_galaxy_meta_info
+from ansible.galaxy.api import CollectionVersionMetadata, GalaxyError
+from ansible.galaxy.user_agent import user_agent
+from ansible.module_utils import six
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.utils.collection_loader import AnsibleCollectionRef
+from ansible.utils.display import Display
+from ansible.utils.galaxy import scm_archive_collection
+from ansible.utils.hashing import secure_hash, secure_hash_s
+from ansible.utils.version import SemanticVersion
+from ansible.module_utils.urls import open_url
+
+urlparse = six.moves.urllib.parse.urlparse
+urldefrag = six.moves.urllib.parse.urldefrag
+urllib_error = six.moves.urllib.error
+
+
+display = Display()
+
+MANIFEST_FORMAT = 1
+
+ModifiedContent = namedtuple('ModifiedContent', ['filename', 'expected', 'installed'])
+
+
+class CollectionRequirement:
+
+ _FILE_MAPPING = [(b'MANIFEST.json', 'manifest_file'), (b'FILES.json', 'files_file')]
+
+ def __init__(self, namespace, name, b_path, api, versions, requirement, force, parent=None, metadata=None,
+ files=None, skip=False, allow_pre_releases=False):
+ """Represents a collection requirement, the versions that are available to be installed as well as any
+ dependencies the collection has.
+
+ :param namespace: The collection namespace.
+ :param name: The collection name.
+ :param b_path: Byte str of the path to the collection tarball if it has already been downloaded.
+ :param api: The GalaxyAPI to use if the collection is from Galaxy.
+ :param versions: A list of versions of the collection that are available.
+ :param requirement: The version requirement string used to verify the list of versions fit the requirements.
+ :param force: Whether the force flag applied to the collection.
+ :param parent: The name of the parent the collection is a dependency of.
+ :param metadata: The galaxy.api.CollectionVersionMetadata that has already been retrieved from the Galaxy
+ server.
+ :param files: The files that exist inside the collection. This is based on the FILES.json file inside the
+ collection artifact.
+ :param skip: Whether to skip installing the collection. Should be set if the collection is already installed
+ and force is not set.
+ :param allow_pre_releases: Whether to skip pre-release versions of collections.
+ """
+ self.namespace = namespace
+ self.name = name
+ self.b_path = b_path
+ self.api = api
+ self._versions = set(versions)
+ self.force = force
+ self.skip = skip
+ self.required_by = []
+ self.allow_pre_releases = allow_pre_releases
+
+ self._metadata = metadata
+ self._files = files
+
+ self.add_requirement(parent, requirement)
+
+ def __str__(self):
+ return to_native("%s.%s" % (self.namespace, self.name))
+
+ def __unicode__(self):
+ return u"%s.%s" % (self.namespace, self.name)
+
+ @property
+ def metadata(self):
+ self._get_metadata()
+ return self._metadata
+
+ @property
+ def versions(self):
+ if self.allow_pre_releases:
+ return self._versions
+ return set(v for v in self._versions if v == '*' or not SemanticVersion(v).is_prerelease)
+
+ @versions.setter
+ def versions(self, value):
+ self._versions = set(value)
+
+ @property
+ def pre_releases(self):
+ return set(v for v in self._versions if SemanticVersion(v).is_prerelease)
+
+ @property
+ def latest_version(self):
+ try:
+ return max([v for v in self.versions if v != '*'], key=SemanticVersion)
+ except ValueError: # ValueError: max() arg is an empty sequence
+ return '*'
+
+ @property
+ def dependencies(self):
+ if not self._metadata:
+ if len(self.versions) > 1:
+ return {}
+ self._get_metadata()
+
+ dependencies = self._metadata.dependencies
+
+ if dependencies is None:
+ return {}
+
+ return dependencies
+
+ @staticmethod
+ def artifact_info(b_path):
+ """Load the manifest data from the MANIFEST.json and FILES.json. If the files exist, return a dict containing the keys 'files_file' and 'manifest_file'.
+ :param b_path: The directory of a collection.
+ """
+ info = {}
+ for b_file_name, property_name in CollectionRequirement._FILE_MAPPING:
+ b_file_path = os.path.join(b_path, b_file_name)
+ if not os.path.exists(b_file_path):
+ continue
+ with open(b_file_path, 'rb') as file_obj:
+ try:
+ info[property_name] = json.loads(to_text(file_obj.read(), errors='surrogate_or_strict'))
+ except ValueError:
+ raise AnsibleError("Collection file at '%s' does not contain a valid json string." % to_native(b_file_path))
+ return info
+
+ @staticmethod
+ def galaxy_metadata(b_path):
+ """Generate the manifest data from the galaxy.yml file.
+ If the galaxy.yml exists, return a dictionary containing the keys 'files_file' and 'manifest_file'.
+
+ :param b_path: The directory of a collection.
+ """
+ b_galaxy_path = get_galaxy_metadata_path(b_path)
+ info = {}
+ if os.path.exists(b_galaxy_path):
+ collection_meta = _get_galaxy_yml(b_galaxy_path)
+ info['files_file'] = _build_files_manifest(b_path, collection_meta['namespace'], collection_meta['name'], collection_meta['build_ignore'])
+ info['manifest_file'] = _build_manifest(**collection_meta)
+ return info
+
+ @staticmethod
+ def collection_info(b_path, fallback_metadata=False):
+ info = CollectionRequirement.artifact_info(b_path)
+ if info or not fallback_metadata:
+ return info
+ return CollectionRequirement.galaxy_metadata(b_path)
+
+ def add_requirement(self, parent, requirement):
+ self.required_by.append((parent, requirement))
+ new_versions = set(v for v in self.versions if self._meets_requirements(v, requirement, parent))
+ if len(new_versions) == 0:
+ if self.skip:
+ force_flag = '--force-with-deps' if parent else '--force'
+ version = self.latest_version if self.latest_version != '*' else 'unknown'
+ msg = "Cannot meet requirement %s:%s as it is already installed at version '%s'. Use %s to overwrite" \
+ % (to_text(self), requirement, version, force_flag)
+ raise AnsibleError(msg)
+ elif parent is None:
+ msg = "Cannot meet requirement %s for dependency %s" % (requirement, to_text(self))
+ else:
+ msg = "Cannot meet dependency requirement '%s:%s' for collection %s" \
+ % (to_text(self), requirement, parent)
+
+ collection_source = to_text(self.b_path, nonstring='passthru') or self.api.api_server
+ req_by = "\n".join(
+ "\t%s - '%s:%s'" % (to_text(p) if p else 'base', to_text(self), r)
+ for p, r in self.required_by
+ )
+
+ versions = ", ".join(sorted(self.versions, key=SemanticVersion))
+ if not self.versions and self.pre_releases:
+ pre_release_msg = (
+ '\nThis collection only contains pre-releases. Utilize `--pre` to install pre-releases, or '
+ 'explicitly provide the pre-release version.'
+ )
+ else:
+ pre_release_msg = ''
+
+ raise AnsibleError(
+ "%s from source '%s'. Available versions before last requirement added: %s\nRequirements from:\n%s%s"
+ % (msg, collection_source, versions, req_by, pre_release_msg)
+ )
+
+ self.versions = new_versions
+
+ def download(self, b_path):
+ download_url = self._metadata.download_url
+ artifact_hash = self._metadata.artifact_sha256
+ headers = {}
+ self.api._add_auth_token(headers, download_url, required=False)
+
+ b_collection_path = _download_file(download_url, b_path, artifact_hash, self.api.validate_certs,
+ headers=headers)
+
+ return to_text(b_collection_path, errors='surrogate_or_strict')
+
+ def install(self, path, b_temp_path):
+ if self.skip:
+ display.display("Skipping '%s' as it is already installed" % to_text(self))
+ return
+
+ # Install if it is not
+ collection_path = os.path.join(path, self.namespace, self.name)
+ b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict')
+ display.display("Installing '%s:%s' to '%s'" % (to_text(self), self.latest_version, collection_path))
+
+ if self.b_path is None:
+ self.b_path = self.download(b_temp_path)
+
+ if os.path.exists(b_collection_path):
+ shutil.rmtree(b_collection_path)
+
+ if os.path.isfile(self.b_path):
+ self.install_artifact(b_collection_path, b_temp_path)
+ else:
+ self.install_scm(b_collection_path)
+
+ display.display("%s (%s) was installed successfully" % (to_text(self), self.latest_version))
+
+ def install_artifact(self, b_collection_path, b_temp_path):
+
+ try:
+ with tarfile.open(self.b_path, mode='r') as collection_tar:
+ files_member_obj = collection_tar.getmember('FILES.json')
+ with _tarfile_extract(collection_tar, files_member_obj) as (dummy, files_obj):
+ files = json.loads(to_text(files_obj.read(), errors='surrogate_or_strict'))
+
+ _extract_tar_file(collection_tar, 'MANIFEST.json', b_collection_path, b_temp_path)
+ _extract_tar_file(collection_tar, 'FILES.json', b_collection_path, b_temp_path)
+
+ for file_info in files['files']:
+ file_name = file_info['name']
+ if file_name == '.':
+ continue
+
+ if file_info['ftype'] == 'file':
+ _extract_tar_file(collection_tar, file_name, b_collection_path, b_temp_path,
+ expected_hash=file_info['chksum_sha256'])
+
+ else:
+ _extract_tar_dir(collection_tar, file_name, b_collection_path)
+
+ except Exception:
+ # Ensure we don't leave the dir behind in case of a failure.
+ shutil.rmtree(b_collection_path)
+
+ b_namespace_path = os.path.dirname(b_collection_path)
+ if not os.listdir(b_namespace_path):
+ os.rmdir(b_namespace_path)
+
+ raise
+
+ def install_scm(self, b_collection_output_path):
+ """Install the collection from source control into given dir.
+
+ Generates the Ansible collection artifact data from a galaxy.yml and installs the artifact to a directory.
+ This should follow the same pattern as build_collection, but instead of creating an artifact, install it.
+ :param b_collection_output_path: The installation directory for the collection artifact.
+ :raises AnsibleError: If no collection metadata found.
+ """
+ b_collection_path = self.b_path
+
+ b_galaxy_path = get_galaxy_metadata_path(b_collection_path)
+ if not os.path.exists(b_galaxy_path):
+ raise AnsibleError("The collection galaxy.yml path '%s' does not exist." % to_native(b_galaxy_path))
+
+ info = CollectionRequirement.galaxy_metadata(b_collection_path)
+
+ collection_manifest = info['manifest_file']
+ collection_meta = collection_manifest['collection_info']
+ file_manifest = info['files_file']
+
+ _build_collection_dir(b_collection_path, b_collection_output_path, collection_manifest, file_manifest)
+
+ collection_name = "%s.%s" % (collection_manifest['collection_info']['namespace'],
+ collection_manifest['collection_info']['name'])
+ display.display('Created collection for %s at %s' % (collection_name, to_text(b_collection_output_path)))
+
+ def set_latest_version(self):
+ self.versions = set([self.latest_version])
+ self._get_metadata()
+
+ def verify(self, remote_collection, path, b_temp_tar_path):
+ if not self.skip:
+ display.display("'%s' has not been installed, nothing to verify" % (to_text(self)))
+ return
+
+ collection_path = os.path.join(path, self.namespace, self.name)
+ b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict')
+
+ display.vvv("Verifying '%s:%s'." % (to_text(self), self.latest_version))
+ display.vvv("Installed collection found at '%s'" % collection_path)
+ display.vvv("Remote collection found at '%s'" % remote_collection.metadata.download_url)
+
+ # Compare installed version versus requirement version
+ if self.latest_version != remote_collection.latest_version:
+ err = "%s has the version '%s' but is being compared to '%s'" % (to_text(self), self.latest_version, remote_collection.latest_version)
+ display.display(err)
+ return
+
+ modified_content = []
+
+ # Verify the manifest hash matches before verifying the file manifest
+ expected_hash = _get_tar_file_hash(b_temp_tar_path, 'MANIFEST.json')
+ self._verify_file_hash(b_collection_path, 'MANIFEST.json', expected_hash, modified_content)
+ manifest = _get_json_from_tar_file(b_temp_tar_path, 'MANIFEST.json')
+
+ # Use the manifest to verify the file manifest checksum
+ file_manifest_data = manifest['file_manifest_file']
+ file_manifest_filename = file_manifest_data['name']
+ expected_hash = file_manifest_data['chksum_%s' % file_manifest_data['chksum_type']]
+
+ # Verify the file manifest before using it to verify individual files
+ self._verify_file_hash(b_collection_path, file_manifest_filename, expected_hash, modified_content)
+ file_manifest = _get_json_from_tar_file(b_temp_tar_path, file_manifest_filename)
+
+ # Use the file manifest to verify individual file checksums
+ for manifest_data in file_manifest['files']:
+ if manifest_data['ftype'] == 'file':
+ expected_hash = manifest_data['chksum_%s' % manifest_data['chksum_type']]
+ self._verify_file_hash(b_collection_path, manifest_data['name'], expected_hash, modified_content)
+
+ if modified_content:
+ display.display("Collection %s contains modified content in the following files:" % to_text(self))
+ display.display(to_text(self))
+ display.vvv(to_text(self.b_path))
+ for content_change in modified_content:
+ display.display(' %s' % content_change.filename)
+ display.vvv(" Expected: %s\n Found: %s" % (content_change.expected, content_change.installed))
+ else:
+ display.vvv("Successfully verified that checksums for '%s:%s' match the remote collection" % (to_text(self), self.latest_version))
+
+ def _verify_file_hash(self, b_path, filename, expected_hash, error_queue):
+ b_file_path = to_bytes(os.path.join(to_text(b_path), filename), errors='surrogate_or_strict')
+
+ if not os.path.isfile(b_file_path):
+ actual_hash = None
+ else:
+ with open(b_file_path, mode='rb') as file_object:
+ actual_hash = _consume_file(file_object)
+
+ if expected_hash != actual_hash:
+ error_queue.append(ModifiedContent(filename=filename, expected=expected_hash, installed=actual_hash))
+
+ def _get_metadata(self):
+ if self._metadata:
+ return
+ self._metadata = self.api.get_collection_version_metadata(self.namespace, self.name, self.latest_version)
+
+ def _meets_requirements(self, version, requirements, parent):
+ """
+ Supports version identifiers can be '==', '!=', '>', '>=', '<', '<=', '*'. Each requirement is delimited by ','
+ """
+ op_map = {
+ '!=': operator.ne,
+ '==': operator.eq,
+ '=': operator.eq,
+ '>=': operator.ge,
+ '>': operator.gt,
+ '<=': operator.le,
+ '<': operator.lt,
+ }
+
+ for req in list(requirements.split(',')):
+ op_pos = 2 if len(req) > 1 and req[1] == '=' else 1
+ op = op_map.get(req[:op_pos])
+
+ requirement = req[op_pos:]
+ if not op:
+ requirement = req
+ op = operator.eq
+
+ # In the case we are checking a new requirement on a base requirement (parent != None) we can't accept
+ # version as '*' (unknown version) unless the requirement is also '*'.
+ if parent and version == '*' and requirement != '*':
+ display.warning("Failed to validate the collection requirement '%s:%s' for %s when the existing "
+ "install does not have a version set, the collection may not work."
+ % (to_text(self), req, parent))
+ continue
+ elif requirement == '*' or version == '*':
+ continue
+
+ if not op(SemanticVersion(version), SemanticVersion.from_loose_version(LooseVersion(requirement))):
+ break
+ else:
+ return True
+
+ # The loop was broken early, it does not meet all the requirements
+ return False
+
+ @staticmethod
+ def from_tar(b_path, force, parent=None):
+ if not tarfile.is_tarfile(b_path):
+ raise AnsibleError("Collection artifact at '%s' is not a valid tar file." % to_native(b_path))
+
+ info = {}
+ with tarfile.open(b_path, mode='r') as collection_tar:
+ for b_member_name, property_name in CollectionRequirement._FILE_MAPPING:
+ n_member_name = to_native(b_member_name)
+ try:
+ member = collection_tar.getmember(n_member_name)
+ except KeyError:
+ raise AnsibleError("Collection at '%s' does not contain the required file %s."
+ % (to_native(b_path), n_member_name))
+
+ with _tarfile_extract(collection_tar, member) as (dummy, member_obj):
+ try:
+ info[property_name] = json.loads(to_text(member_obj.read(), errors='surrogate_or_strict'))
+ except ValueError:
+ raise AnsibleError("Collection tar file member %s does not contain a valid json string."
+ % n_member_name)
+
+ meta = info['manifest_file']['collection_info']
+ files = info['files_file']['files']
+
+ namespace = meta['namespace']
+ name = meta['name']
+ version = meta['version']
+ meta = CollectionVersionMetadata(namespace, name, version, None, None, meta['dependencies'])
+
+ if SemanticVersion(version).is_prerelease:
+ allow_pre_release = True
+ else:
+ allow_pre_release = False
+
+ return CollectionRequirement(namespace, name, b_path, None, [version], version, force, parent=parent,
+ metadata=meta, files=files, allow_pre_releases=allow_pre_release)
+
+ @staticmethod
+ def from_path(b_path, force, parent=None, fallback_metadata=False, skip=True):
+ info = CollectionRequirement.collection_info(b_path, fallback_metadata)
+
+ allow_pre_release = False
+ if 'manifest_file' in info:
+ manifest = info['manifest_file']['collection_info']
+ namespace = manifest['namespace']
+ name = manifest['name']
+ version = to_text(manifest['version'], errors='surrogate_or_strict')
+
+ try:
+ _v = SemanticVersion()
+ _v.parse(version)
+ if _v.is_prerelease:
+ allow_pre_release = True
+ except ValueError:
+ display.warning("Collection at '%s' does not have a valid version set, falling back to '*'. Found "
+ "version: '%s'" % (to_text(b_path), version))
+ version = '*'
+
+ dependencies = manifest['dependencies']
+ else:
+ if fallback_metadata:
+ warning = "Collection at '%s' does not have a galaxy.yml or a MANIFEST.json file, cannot detect version."
+ else:
+ warning = "Collection at '%s' does not have a MANIFEST.json file, cannot detect version."
+ display.warning(warning % to_text(b_path))
+ parent_dir, name = os.path.split(to_text(b_path, errors='surrogate_or_strict'))
+ namespace = os.path.split(parent_dir)[1]
+
+ version = '*'
+ dependencies = {}
+
+ meta = CollectionVersionMetadata(namespace, name, version, None, None, dependencies)
+
+ files = info.get('files_file', {}).get('files', {})
+
+ return CollectionRequirement(namespace, name, b_path, None, [version], version, force, parent=parent,
+ metadata=meta, files=files, skip=skip, allow_pre_releases=allow_pre_release)
+
+ @staticmethod
+ def from_name(collection, apis, requirement, force, parent=None, allow_pre_release=False):
+ namespace, name = collection.split('.', 1)
+ galaxy_meta = None
+
+ for api in apis:
+ try:
+ if not (requirement == '*' or requirement.startswith('<') or requirement.startswith('>') or
+ requirement.startswith('!=')):
+ # Exact requirement
+ allow_pre_release = True
+
+ if requirement.startswith('='):
+ requirement = requirement.lstrip('=')
+
+ resp = api.get_collection_version_metadata(namespace, name, requirement)
+
+ galaxy_meta = resp
+ versions = [resp.version]
+ else:
+ versions = api.get_collection_versions(namespace, name)
+ except GalaxyError as err:
+ if err.http_code != 404:
+ raise
+
+ versions = []
+
+ # Automation Hub doesn't return a 404 but an empty version list so we check that to align both AH and
+ # Galaxy when the collection is not available on that server.
+ if not versions:
+ display.vvv("Collection '%s' is not available from server %s %s" % (collection, api.name,
+ api.api_server))
+ continue
+
+ display.vvv("Collection '%s' obtained from server %s %s" % (collection, api.name, api.api_server))
+ break
+ else:
+ raise AnsibleError("Failed to find collection %s:%s" % (collection, requirement))
+
+ req = CollectionRequirement(namespace, name, None, api, versions, requirement, force, parent=parent,
+ metadata=galaxy_meta, allow_pre_releases=allow_pre_release)
+ return req
+
+
+def build_collection(collection_path, output_path, force):
+ """Creates the Ansible collection artifact in a .tar.gz file.
+
+ :param collection_path: The path to the collection to build. This should be the directory that contains the
+ galaxy.yml file.
+ :param output_path: The path to create the collection build artifact. This should be a directory.
+ :param force: Whether to overwrite an existing collection build artifact or fail.
+ :return: The path to the collection build artifact.
+ """
+ b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict')
+ b_galaxy_path = get_galaxy_metadata_path(b_collection_path)
+ if not os.path.exists(b_galaxy_path):
+ raise AnsibleError("The collection galaxy.yml path '%s' does not exist." % to_native(b_galaxy_path))
+
+ info = CollectionRequirement.galaxy_metadata(b_collection_path)
+
+ collection_manifest = info['manifest_file']
+ collection_meta = collection_manifest['collection_info']
+ file_manifest = info['files_file']
+
+ collection_output = os.path.join(output_path, "%s-%s-%s.tar.gz" % (collection_meta['namespace'],
+ collection_meta['name'],
+ collection_meta['version']))
+
+ b_collection_output = to_bytes(collection_output, errors='surrogate_or_strict')
+ if os.path.exists(b_collection_output):
+ if os.path.isdir(b_collection_output):
+ raise AnsibleError("The output collection artifact '%s' already exists, "
+ "but is a directory - aborting" % to_native(collection_output))
+ elif not force:
+ raise AnsibleError("The file '%s' already exists. You can use --force to re-create "
+ "the collection artifact." % to_native(collection_output))
+
+ _build_collection_tar(b_collection_path, b_collection_output, collection_manifest, file_manifest)
+ return collection_output
+
+
+def download_collections(collections, output_path, apis, validate_certs, no_deps, allow_pre_release):
+ """Download Ansible collections as their tarball from a Galaxy server to the path specified and creates a requirements
+ file of the downloaded requirements to be used for an install.
+
+ :param collections: The collections to download, should be a list of tuples with (name, requirement, Galaxy Server).
+ :param output_path: The path to download the collections to.
+ :param apis: A list of GalaxyAPIs to query when search for a collection.
+ :param validate_certs: Whether to validate the certificate if downloading a tarball from a non-Galaxy host.
+ :param no_deps: Ignore any collection dependencies and only download the base requirements.
+ :param allow_pre_release: Do not ignore pre-release versions when selecting the latest.
+ """
+ with _tempdir() as b_temp_path:
+ display.display("Process install dependency map")
+ with _display_progress():
+ dep_map = _build_dependency_map(collections, [], b_temp_path, apis, validate_certs, True, True, no_deps,
+ allow_pre_release=allow_pre_release)
+
+ requirements = []
+ display.display("Starting collection download process to '%s'" % output_path)
+ with _display_progress():
+ for name, requirement in dep_map.items():
+ collection_filename = "%s-%s-%s.tar.gz" % (requirement.namespace, requirement.name,
+ requirement.latest_version)
+ dest_path = os.path.join(output_path, collection_filename)
+ requirements.append({'name': collection_filename, 'version': requirement.latest_version})
+
+ display.display("Downloading collection '%s' to '%s'" % (name, dest_path))
+
+ if requirement.api is None and requirement.b_path and os.path.isfile(requirement.b_path):
+ shutil.copy(requirement.b_path, to_bytes(dest_path, errors='surrogate_or_strict'))
+ elif requirement.api is None and requirement.b_path:
+ temp_path = to_text(b_temp_path, errors='surrogate_or_string')
+ temp_download_path = build_collection(requirement.b_path, temp_path, True)
+ shutil.move(to_bytes(temp_download_path, errors='surrogate_or_strict'),
+ to_bytes(dest_path, errors='surrogate_or_strict'))
+ else:
+ b_temp_download_path = requirement.download(b_temp_path)
+ shutil.move(b_temp_download_path, to_bytes(dest_path, errors='surrogate_or_strict'))
+
+ display.display("%s (%s) was downloaded successfully" % (name, requirement.latest_version))
+
+ requirements_path = os.path.join(output_path, 'requirements.yml')
+ display.display("Writing requirements.yml file of downloaded collections to '%s'" % requirements_path)
+ with open(to_bytes(requirements_path, errors='surrogate_or_strict'), mode='wb') as req_fd:
+ req_fd.write(to_bytes(yaml.safe_dump({'collections': requirements}), errors='surrogate_or_strict'))
+
+
+def publish_collection(collection_path, api, wait, timeout):
+ """Publish an Ansible collection tarball into an Ansible Galaxy server.
+
+ :param collection_path: The path to the collection tarball to publish.
+ :param api: A GalaxyAPI to publish the collection to.
+ :param wait: Whether to wait until the import process is complete.
+ :param timeout: The time in seconds to wait for the import process to finish, 0 is indefinite.
+ """
+ import_uri = api.publish_collection(collection_path)
+
+ if wait:
+ # Galaxy returns a url fragment which differs between v2 and v3. The second to last entry is
+ # always the task_id, though.
+ # v2: {"task": "https://galaxy-dev.ansible.com/api/v2/collection-imports/35573/"}
+ # v3: {"task": "/api/automation-hub/v3/imports/collections/838d1308-a8f4-402c-95cb-7823f3806cd8/"}
+ task_id = None
+ for path_segment in reversed(import_uri.split('/')):
+ if path_segment:
+ task_id = path_segment
+ break
+
+ if not task_id:
+ raise AnsibleError("Publishing the collection did not return valid task info. Cannot wait for task status. Returned task info: '%s'" % import_uri)
+
+ display.display("Collection has been published to the Galaxy server %s %s" % (api.name, api.api_server))
+ with _display_progress():
+ api.wait_import_task(task_id, timeout)
+ display.display("Collection has been successfully published and imported to the Galaxy server %s %s"
+ % (api.name, api.api_server))
+ else:
+ display.display("Collection has been pushed to the Galaxy server %s %s, not waiting until import has "
+ "completed due to --no-wait being set. Import task results can be found at %s"
+ % (api.name, api.api_server, import_uri))
+
+
+def install_collections(collections, output_path, apis, validate_certs, ignore_errors, no_deps, force, force_deps,
+ allow_pre_release=False):
+ """Install Ansible collections to the path specified.
+
+ :param collections: The collections to install, should be a list of tuples with (name, requirement, Galaxy server).
+ :param output_path: The path to install the collections to.
+ :param apis: A list of GalaxyAPIs to query when searching for a collection.
+ :param validate_certs: Whether to validate the certificates if downloading a tarball.
+ :param ignore_errors: Whether to ignore any errors when installing the collection.
+ :param no_deps: Ignore any collection dependencies and only install the base requirements.
+ :param force: Re-install a collection if it has already been installed.
+ :param force_deps: Re-install a collection as well as its dependencies if they have already been installed.
+ """
+ existing_collections = find_existing_collections(output_path, fallback_metadata=True)
+
+ with _tempdir() as b_temp_path:
+ display.display("Process install dependency map")
+ with _display_progress():
+ dependency_map = _build_dependency_map(collections, existing_collections, b_temp_path, apis,
+ validate_certs, force, force_deps, no_deps,
+ allow_pre_release=allow_pre_release)
+
+ display.display("Starting collection install process")
+ with _display_progress():
+ for collection in dependency_map.values():
+ try:
+ collection.install(output_path, b_temp_path)
+ except AnsibleError as err:
+ if ignore_errors:
+ display.warning("Failed to install collection %s but skipping due to --ignore-errors being set. "
+ "Error: %s" % (to_text(collection), to_text(err)))
+ else:
+ raise
+
+
+def validate_collection_name(name):
+ """Validates the collection name as an input from the user or a requirements file fit the requirements.
+
+ :param name: The input name with optional range specifier split by ':'.
+ :return: The input value, required for argparse validation.
+ """
+ collection, dummy, dummy = name.partition(':')
+ if AnsibleCollectionRef.is_valid_collection_name(collection):
+ return name
+
+ raise AnsibleError("Invalid collection name '%s', "
+ "name must be in the format <namespace>.<collection>. \n"
+ "Please make sure namespace and collection name contains "
+ "characters from [a-zA-Z0-9_] only." % name)
+
+
+def validate_collection_path(collection_path):
+ """Ensure a given path ends with 'ansible_collections'
+
+ :param collection_path: The path that should end in 'ansible_collections'
+ :return: collection_path ending in 'ansible_collections' if it does not already.
+ """
+
+ if os.path.split(collection_path)[1] != 'ansible_collections':
+ return os.path.join(collection_path, 'ansible_collections')
+
+ return collection_path
+
+
+def verify_collections(collections, search_paths, apis, validate_certs, ignore_errors, allow_pre_release=False):
+
+ with _display_progress():
+ with _tempdir() as b_temp_path:
+ for collection in collections:
+ try:
+
+ local_collection = None
+ b_collection = to_bytes(collection[0], errors='surrogate_or_strict')
+
+ if os.path.isfile(b_collection) or urlparse(collection[0]).scheme.lower() in ['http', 'https'] or len(collection[0].split('.')) != 2:
+ raise AnsibleError(message="'%s' is not a valid collection name. The format namespace.name is expected." % collection[0])
+
+ collection_name = collection[0]
+ namespace, name = collection_name.split('.')
+ collection_version = collection[1]
+
+ # Verify local collection exists before downloading it from a galaxy server
+ for search_path in search_paths:
+ b_search_path = to_bytes(os.path.join(search_path, namespace, name), errors='surrogate_or_strict')
+ if os.path.isdir(b_search_path):
+ if not os.path.isfile(os.path.join(to_text(b_search_path, errors='surrogate_or_strict'), 'MANIFEST.json')):
+ raise AnsibleError(
+ message="Collection %s does not appear to have a MANIFEST.json. " % collection_name +
+ "A MANIFEST.json is expected if the collection has been built and installed via ansible-galaxy."
+ )
+ local_collection = CollectionRequirement.from_path(b_search_path, False)
+ break
+ if local_collection is None:
+ raise AnsibleError(message='Collection %s is not installed in any of the collection paths.' % collection_name)
+
+ # Download collection on a galaxy server for comparison
+ try:
+ remote_collection = CollectionRequirement.from_name(collection_name, apis, collection_version, False, parent=None,
+ allow_pre_release=allow_pre_release)
+ except AnsibleError as e:
+ if e.message == 'Failed to find collection %s:%s' % (collection[0], collection[1]):
+ raise AnsibleError('Failed to find remote collection %s:%s on any of the galaxy servers' % (collection[0], collection[1]))
+ raise
+
+ download_url = remote_collection.metadata.download_url
+ headers = {}
+ remote_collection.api._add_auth_token(headers, download_url, required=False)
+ b_temp_tar_path = _download_file(download_url, b_temp_path, None, validate_certs, headers=headers)
+
+ local_collection.verify(remote_collection, search_path, b_temp_tar_path)
+
+ except AnsibleError as err:
+ if ignore_errors:
+ display.warning("Failed to verify collection %s but skipping due to --ignore-errors being set. "
+ "Error: %s" % (collection[0], to_text(err)))
+ else:
+ raise
+
+
+@contextmanager
+def _tempdir():
+ b_temp_path = tempfile.mkdtemp(dir=to_bytes(C.DEFAULT_LOCAL_TMP, errors='surrogate_or_strict'))
+ yield b_temp_path
+ shutil.rmtree(b_temp_path)
+
+
+@contextmanager
+def _tarfile_extract(tar, member):
+ tar_obj = tar.extractfile(member)
+ yield member, tar_obj
+ tar_obj.close()
+
+
+@contextmanager
+def _display_progress():
+ config_display = C.GALAXY_DISPLAY_PROGRESS
+ display_wheel = sys.stdout.isatty() if config_display is None else config_display
+
+ if not display_wheel:
+ yield
+ return
+
+ def progress(display_queue, actual_display):
+ actual_display.debug("Starting display_progress display thread")
+ t = threading.current_thread()
+
+ while True:
+ for c in "|/-\\":
+ actual_display.display(c + "\b", newline=False)
+ time.sleep(0.1)
+
+ # Display a message from the main thread
+ while True:
+ try:
+ method, args, kwargs = display_queue.get(block=False, timeout=0.1)
+ except queue.Empty:
+ break
+ else:
+ func = getattr(actual_display, method)
+ func(*args, **kwargs)
+
+ if getattr(t, "finish", False):
+ actual_display.debug("Received end signal for display_progress display thread")
+ return
+
+ class DisplayThread(object):
+
+ def __init__(self, display_queue):
+ self.display_queue = display_queue
+
+ def __getattr__(self, attr):
+ def call_display(*args, **kwargs):
+ self.display_queue.put((attr, args, kwargs))
+
+ return call_display
+
+ # Temporary override the global display class with our own which add the calls to a queue for the thread to call.
+ global display
+ old_display = display
+ try:
+ display_queue = queue.Queue()
+ display = DisplayThread(display_queue)
+ t = threading.Thread(target=progress, args=(display_queue, old_display))
+ t.daemon = True
+ t.start()
+
+ try:
+ yield
+ finally:
+ t.finish = True
+ t.join()
+ except Exception:
+ # The exception is re-raised so we can sure the thread is finished and not using the display anymore
+ raise
+ finally:
+ display = old_display
+
+
+def _get_galaxy_yml(b_galaxy_yml_path):
+ meta_info = get_collections_galaxy_meta_info()
+
+ mandatory_keys = set()
+ string_keys = set()
+ list_keys = set()
+ dict_keys = set()
+
+ for info in meta_info:
+ if info.get('required', False):
+ mandatory_keys.add(info['key'])
+
+ key_list_type = {
+ 'str': string_keys,
+ 'list': list_keys,
+ 'dict': dict_keys,
+ }[info.get('type', 'str')]
+ key_list_type.add(info['key'])
+
+ all_keys = frozenset(list(mandatory_keys) + list(string_keys) + list(list_keys) + list(dict_keys))
+
+ try:
+ with open(b_galaxy_yml_path, 'rb') as g_yaml:
+ galaxy_yml = yaml.safe_load(g_yaml)
+ except YAMLError as err:
+ raise AnsibleError("Failed to parse the galaxy.yml at '%s' with the following error:\n%s"
+ % (to_native(b_galaxy_yml_path), to_native(err)))
+
+ set_keys = set(galaxy_yml.keys())
+ missing_keys = mandatory_keys.difference(set_keys)
+ if missing_keys:
+ raise AnsibleError("The collection galaxy.yml at '%s' is missing the following mandatory keys: %s"
+ % (to_native(b_galaxy_yml_path), ", ".join(sorted(missing_keys))))
+
+ extra_keys = set_keys.difference(all_keys)
+ if len(extra_keys) > 0:
+ display.warning("Found unknown keys in collection galaxy.yml at '%s': %s"
+ % (to_text(b_galaxy_yml_path), ", ".join(extra_keys)))
+
+ # Add the defaults if they have not been set
+ for optional_string in string_keys:
+ if optional_string not in galaxy_yml:
+ galaxy_yml[optional_string] = None
+
+ for optional_list in list_keys:
+ list_val = galaxy_yml.get(optional_list, None)
+
+ if list_val is None:
+ galaxy_yml[optional_list] = []
+ elif not isinstance(list_val, list):
+ galaxy_yml[optional_list] = [list_val]
+
+ for optional_dict in dict_keys:
+ if optional_dict not in galaxy_yml:
+ galaxy_yml[optional_dict] = {}
+
+ # license is a builtin var in Python, to avoid confusion we just rename it to license_ids
+ galaxy_yml['license_ids'] = galaxy_yml['license']
+ del galaxy_yml['license']
+
+ return galaxy_yml
+
+
+def _build_files_manifest(b_collection_path, namespace, name, ignore_patterns):
+ # We always ignore .pyc and .retry files as well as some well known version control directories. The ignore
+ # patterns can be extended by the build_ignore key in galaxy.yml
+ b_ignore_patterns = [
+ b'galaxy.yml',
+ b'galaxy.yaml',
+ b'.git',
+ b'*.pyc',
+ b'*.retry',
+ b'tests/output', # Ignore ansible-test result output directory.
+ to_bytes('{0}-{1}-*.tar.gz'.format(namespace, name)), # Ignores previously built artifacts in the root dir.
+ ]
+ b_ignore_patterns += [to_bytes(p) for p in ignore_patterns]
+ b_ignore_dirs = frozenset([b'CVS', b'.bzr', b'.hg', b'.git', b'.svn', b'__pycache__', b'.tox'])
+
+ entry_template = {
+ 'name': None,
+ 'ftype': None,
+ 'chksum_type': None,
+ 'chksum_sha256': None,
+ 'format': MANIFEST_FORMAT
+ }
+ manifest = {
+ 'files': [
+ {
+ 'name': '.',
+ 'ftype': 'dir',
+ 'chksum_type': None,
+ 'chksum_sha256': None,
+ 'format': MANIFEST_FORMAT,
+ },
+ ],
+ 'format': MANIFEST_FORMAT,
+ }
+
+ def _walk(b_path, b_top_level_dir):
+ for b_item in os.listdir(b_path):
+ b_abs_path = os.path.join(b_path, b_item)
+ b_rel_base_dir = b'' if b_path == b_top_level_dir else b_path[len(b_top_level_dir) + 1:]
+ b_rel_path = os.path.join(b_rel_base_dir, b_item)
+ rel_path = to_text(b_rel_path, errors='surrogate_or_strict')
+
+ if os.path.isdir(b_abs_path):
+ if any(b_item == b_path for b_path in b_ignore_dirs) or \
+ any(fnmatch.fnmatch(b_rel_path, b_pattern) for b_pattern in b_ignore_patterns):
+ display.vvv("Skipping '%s' for collection build" % to_text(b_abs_path))
+ continue
+
+ if os.path.islink(b_abs_path):
+ b_link_target = os.path.realpath(b_abs_path)
+
+ if not _is_child_path(b_link_target, b_top_level_dir):
+ display.warning("Skipping '%s' as it is a symbolic link to a directory outside the collection"
+ % to_text(b_abs_path))
+ continue
+
+ manifest_entry = entry_template.copy()
+ manifest_entry['name'] = rel_path
+ manifest_entry['ftype'] = 'dir'
+
+ manifest['files'].append(manifest_entry)
+
+ if not os.path.islink(b_abs_path):
+ _walk(b_abs_path, b_top_level_dir)
+ else:
+ if any(fnmatch.fnmatch(b_rel_path, b_pattern) for b_pattern in b_ignore_patterns):
+ display.vvv("Skipping '%s' for collection build" % to_text(b_abs_path))
+ continue
+
+ # Handling of file symlinks occur in _build_collection_tar, the manifest for a symlink is the same for
+ # a normal file.
+ manifest_entry = entry_template.copy()
+ manifest_entry['name'] = rel_path
+ manifest_entry['ftype'] = 'file'
+ manifest_entry['chksum_type'] = 'sha256'
+ manifest_entry['chksum_sha256'] = secure_hash(b_abs_path, hash_func=sha256)
+
+ manifest['files'].append(manifest_entry)
+
+ _walk(b_collection_path, b_collection_path)
+
+ return manifest
+
+
+def _build_manifest(namespace, name, version, authors, readme, tags, description, license_ids, license_file,
+ dependencies, repository, documentation, homepage, issues, **kwargs):
+
+ manifest = {
+ 'collection_info': {
+ 'namespace': namespace,
+ 'name': name,
+ 'version': version,
+ 'authors': authors,
+ 'readme': readme,
+ 'tags': tags,
+ 'description': description,
+ 'license': license_ids,
+ 'license_file': license_file if license_file else None, # Handle galaxy.yml having an empty string (None)
+ 'dependencies': dependencies,
+ 'repository': repository,
+ 'documentation': documentation,
+ 'homepage': homepage,
+ 'issues': issues,
+ },
+ 'file_manifest_file': {
+ 'name': 'FILES.json',
+ 'ftype': 'file',
+ 'chksum_type': 'sha256',
+ 'chksum_sha256': None, # Filled out in _build_collection_tar
+ 'format': MANIFEST_FORMAT
+ },
+ 'format': MANIFEST_FORMAT,
+ }
+
+ return manifest
+
+
+def _build_collection_tar(b_collection_path, b_tar_path, collection_manifest, file_manifest):
+ """Build a tar.gz collection artifact from the manifest data."""
+ files_manifest_json = to_bytes(json.dumps(file_manifest, indent=True), errors='surrogate_or_strict')
+ collection_manifest['file_manifest_file']['chksum_sha256'] = secure_hash_s(files_manifest_json, hash_func=sha256)
+ collection_manifest_json = to_bytes(json.dumps(collection_manifest, indent=True), errors='surrogate_or_strict')
+
+ with _tempdir() as b_temp_path:
+ b_tar_filepath = os.path.join(b_temp_path, os.path.basename(b_tar_path))
+
+ with tarfile.open(b_tar_filepath, mode='w:gz') as tar_file:
+ # Add the MANIFEST.json and FILES.json file to the archive
+ for name, b in [('MANIFEST.json', collection_manifest_json), ('FILES.json', files_manifest_json)]:
+ b_io = BytesIO(b)
+ tar_info = tarfile.TarInfo(name)
+ tar_info.size = len(b)
+ tar_info.mtime = time.time()
+ tar_info.mode = 0o0644
+ tar_file.addfile(tarinfo=tar_info, fileobj=b_io)
+
+ for file_info in file_manifest['files']:
+ if file_info['name'] == '.':
+ continue
+
+ # arcname expects a native string, cannot be bytes
+ filename = to_native(file_info['name'], errors='surrogate_or_strict')
+ b_src_path = os.path.join(b_collection_path, to_bytes(filename, errors='surrogate_or_strict'))
+
+ def reset_stat(tarinfo):
+ if tarinfo.type != tarfile.SYMTYPE:
+ existing_is_exec = tarinfo.mode & stat.S_IXUSR
+ tarinfo.mode = 0o0755 if existing_is_exec or tarinfo.isdir() else 0o0644
+ tarinfo.uid = tarinfo.gid = 0
+ tarinfo.uname = tarinfo.gname = ''
+
+ return tarinfo
+
+ if os.path.islink(b_src_path):
+ b_link_target = os.path.realpath(b_src_path)
+ if _is_child_path(b_link_target, b_collection_path):
+ b_rel_path = os.path.relpath(b_link_target, start=os.path.dirname(b_src_path))
+
+ tar_info = tarfile.TarInfo(filename)
+ tar_info.type = tarfile.SYMTYPE
+ tar_info.linkname = to_native(b_rel_path, errors='surrogate_or_strict')
+ tar_info = reset_stat(tar_info)
+ tar_file.addfile(tarinfo=tar_info)
+
+ continue
+
+ # Dealing with a normal file, just add it by name.
+ tar_file.add(os.path.realpath(b_src_path), arcname=filename, recursive=False, filter=reset_stat)
+
+ shutil.copy(b_tar_filepath, b_tar_path)
+ collection_name = "%s.%s" % (collection_manifest['collection_info']['namespace'],
+ collection_manifest['collection_info']['name'])
+ display.display('Created collection for %s at %s' % (collection_name, to_text(b_tar_path)))
+
+
+def _build_collection_dir(b_collection_path, b_collection_output, collection_manifest, file_manifest):
+ """Build a collection directory from the manifest data.
+
+ This should follow the same pattern as _build_collection_tar.
+ """
+ os.makedirs(b_collection_output, mode=0o0755)
+
+ files_manifest_json = to_bytes(json.dumps(file_manifest, indent=True), errors='surrogate_or_strict')
+ collection_manifest['file_manifest_file']['chksum_sha256'] = secure_hash_s(files_manifest_json, hash_func=sha256)
+ collection_manifest_json = to_bytes(json.dumps(collection_manifest, indent=True), errors='surrogate_or_strict')
+
+ # Write contents to the files
+ for name, b in [('MANIFEST.json', collection_manifest_json), ('FILES.json', files_manifest_json)]:
+ b_path = os.path.join(b_collection_output, to_bytes(name, errors='surrogate_or_strict'))
+ with open(b_path, 'wb') as file_obj, BytesIO(b) as b_io:
+ shutil.copyfileobj(b_io, file_obj)
+
+ os.chmod(b_path, 0o0644)
+
+ base_directories = []
+ for file_info in file_manifest['files']:
+ if file_info['name'] == '.':
+ continue
+
+ src_file = os.path.join(b_collection_path, to_bytes(file_info['name'], errors='surrogate_or_strict'))
+ dest_file = os.path.join(b_collection_output, to_bytes(file_info['name'], errors='surrogate_or_strict'))
+
+ if any(src_file.startswith(directory) for directory in base_directories):
+ continue
+
+ existing_is_exec = os.stat(src_file).st_mode & stat.S_IXUSR
+ mode = 0o0755 if existing_is_exec else 0o0644
+
+ if os.path.isdir(src_file):
+ mode = 0o0755
+ base_directories.append(src_file)
+ shutil.copytree(src_file, dest_file)
+ else:
+ shutil.copyfile(src_file, dest_file)
+
+ os.chmod(dest_file, mode)
+
+
+def find_existing_collections(path, fallback_metadata=False):
+ collections = []
+
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ for b_namespace in os.listdir(b_path):
+ b_namespace_path = os.path.join(b_path, b_namespace)
+ if os.path.isfile(b_namespace_path):
+ continue
+
+ for b_collection in os.listdir(b_namespace_path):
+ b_collection_path = os.path.join(b_namespace_path, b_collection)
+ if os.path.isdir(b_collection_path):
+ req = CollectionRequirement.from_path(b_collection_path, False, fallback_metadata=fallback_metadata)
+ display.vvv("Found installed collection %s:%s at '%s'" % (to_text(req), req.latest_version,
+ to_text(b_collection_path)))
+ collections.append(req)
+
+ return collections
+
+
+def _build_dependency_map(collections, existing_collections, b_temp_path, apis, validate_certs, force, force_deps,
+ no_deps, allow_pre_release=False):
+ dependency_map = {}
+
+ # First build the dependency map on the actual requirements
+ for name, version, source, req_type in collections:
+ _get_collection_info(dependency_map, existing_collections, name, version, source, b_temp_path, apis,
+ validate_certs, (force or force_deps), allow_pre_release=allow_pre_release, req_type=req_type)
+
+ checked_parents = set([to_text(c) for c in dependency_map.values() if c.skip])
+ while len(dependency_map) != len(checked_parents):
+ while not no_deps: # Only parse dependencies if no_deps was not set
+ parents_to_check = set(dependency_map.keys()).difference(checked_parents)
+
+ deps_exhausted = True
+ for parent in parents_to_check:
+ parent_info = dependency_map[parent]
+
+ if parent_info.dependencies:
+ deps_exhausted = False
+ for dep_name, dep_requirement in parent_info.dependencies.items():
+ _get_collection_info(dependency_map, existing_collections, dep_name, dep_requirement,
+ None, b_temp_path, apis, validate_certs, force_deps,
+ parent=parent, allow_pre_release=allow_pre_release)
+
+ checked_parents.add(parent)
+
+ # No extra dependencies were resolved, exit loop
+ if deps_exhausted:
+ break
+
+ # Now we have resolved the deps to our best extent, now select the latest version for collections with
+ # multiple versions found and go from there
+ deps_not_checked = set(dependency_map.keys()).difference(checked_parents)
+ for collection in deps_not_checked:
+ dependency_map[collection].set_latest_version()
+ if no_deps or len(dependency_map[collection].dependencies) == 0:
+ checked_parents.add(collection)
+
+ return dependency_map
+
+
+def _collections_from_scm(collection, requirement, b_temp_path, force, parent=None):
+ """Returns a list of collections found in the repo. If there is a galaxy.yml in the collection then just return
+ the specific collection. Otherwise, check each top-level directory for a galaxy.yml.
+
+ :param collection: URI to a git repo
+ :param requirement: The version of the artifact
+ :param b_temp_path: The temporary path to the archive of a collection
+ :param force: Whether to overwrite an existing collection or fail
+ :param parent: The name of the parent collection
+ :raises AnsibleError: if nothing found
+ :return: List of CollectionRequirement objects
+ :rtype: list
+ """
+
+ reqs = []
+ name, version, path, fragment = parse_scm(collection, requirement)
+ b_repo_root = to_bytes(name, errors='surrogate_or_strict')
+
+ b_collection_path = os.path.join(b_temp_path, b_repo_root)
+ if fragment:
+ b_fragment = to_bytes(fragment, errors='surrogate_or_strict')
+ b_collection_path = os.path.join(b_collection_path, b_fragment)
+
+ b_galaxy_path = get_galaxy_metadata_path(b_collection_path)
+
+ err = ("%s appears to be an SCM collection source, but the required galaxy.yml was not found. "
+ "Append #path/to/collection/ to your URI (before the comma separated version, if one is specified) "
+ "to point to a directory containing the galaxy.yml or directories of collections" % collection)
+
+ display.vvvvv("Considering %s as a possible path to a collection's galaxy.yml" % b_galaxy_path)
+ if os.path.exists(b_galaxy_path):
+ return [CollectionRequirement.from_path(b_collection_path, force, parent, fallback_metadata=True, skip=False)]
+
+ if not os.path.isdir(b_collection_path) or not os.listdir(b_collection_path):
+ raise AnsibleError(err)
+
+ for b_possible_collection in os.listdir(b_collection_path):
+ b_collection = os.path.join(b_collection_path, b_possible_collection)
+ if not os.path.isdir(b_collection):
+ continue
+ b_galaxy = get_galaxy_metadata_path(b_collection)
+ display.vvvvv("Considering %s as a possible path to a collection's galaxy.yml" % b_galaxy)
+ if os.path.exists(b_galaxy):
+ reqs.append(CollectionRequirement.from_path(b_collection, force, parent, fallback_metadata=True, skip=False))
+ if not reqs:
+ raise AnsibleError(err)
+
+ return reqs
+
+
+def _get_collection_info(dep_map, existing_collections, collection, requirement, source, b_temp_path, apis,
+ validate_certs, force, parent=None, allow_pre_release=False, req_type=None):
+ dep_msg = ""
+ if parent:
+ dep_msg = " - as dependency of %s" % parent
+ display.vvv("Processing requirement collection '%s'%s" % (to_text(collection), dep_msg))
+
+ b_tar_path = None
+
+ is_file = (
+ req_type == 'file' or
+ (not req_type and os.path.isfile(to_bytes(collection, errors='surrogate_or_strict')))
+ )
+
+ is_url = (
+ req_type == 'url' or
+ (not req_type and urlparse(collection).scheme.lower() in ['http', 'https'])
+ )
+
+ is_scm = (
+ req_type == 'git' or
+ (not req_type and not b_tar_path and collection.startswith(('git+', 'git@')))
+ )
+
+ if is_file:
+ display.vvvv("Collection requirement '%s' is a tar artifact" % to_text(collection))
+ b_tar_path = to_bytes(collection, errors='surrogate_or_strict')
+ elif is_url:
+ display.vvvv("Collection requirement '%s' is a URL to a tar artifact" % collection)
+ try:
+ b_tar_path = _download_file(collection, b_temp_path, None, validate_certs)
+ except urllib_error.URLError as err:
+ raise AnsibleError("Failed to download collection tar from '%s': %s"
+ % (to_native(collection), to_native(err)))
+
+ if is_scm:
+ if not collection.startswith('git'):
+ collection = 'git+' + collection
+
+ name, version, path, fragment = parse_scm(collection, requirement)
+ b_tar_path = scm_archive_collection(path, name=name, version=version)
+
+ with tarfile.open(b_tar_path, mode='r') as collection_tar:
+ collection_tar.extractall(path=to_text(b_temp_path))
+
+ # Ignore requirement if it is set (it must follow semantic versioning, unlike a git version, which is any tree-ish)
+ # If the requirement was the only place version was set, requirement == version at this point
+ if requirement not in {"*", ""} and requirement != version:
+ display.warning(
+ "The collection {0} appears to be a git repository and two versions were provided: '{1}', and '{2}'. "
+ "The version {2} is being disregarded.".format(collection, version, requirement)
+ )
+ requirement = "*"
+
+ reqs = _collections_from_scm(collection, requirement, b_temp_path, force, parent)
+ for req in reqs:
+ collection_info = get_collection_info_from_req(dep_map, req)
+ update_dep_map_collection_info(dep_map, existing_collections, collection_info, parent, requirement)
+ else:
+ if b_tar_path:
+ req = CollectionRequirement.from_tar(b_tar_path, force, parent=parent)
+ collection_info = get_collection_info_from_req(dep_map, req)
+ else:
+ validate_collection_name(collection)
+
+ display.vvvv("Collection requirement '%s' is the name of a collection" % collection)
+ if collection in dep_map:
+ collection_info = dep_map[collection]
+ collection_info.add_requirement(parent, requirement)
+ else:
+ apis = [source] if source else apis
+ collection_info = CollectionRequirement.from_name(collection, apis, requirement, force, parent=parent,
+ allow_pre_release=allow_pre_release)
+
+ update_dep_map_collection_info(dep_map, existing_collections, collection_info, parent, requirement)
+
+
+def get_collection_info_from_req(dep_map, collection):
+ collection_name = to_text(collection)
+ if collection_name in dep_map:
+ collection_info = dep_map[collection_name]
+ collection_info.add_requirement(None, collection.latest_version)
+ else:
+ collection_info = collection
+ return collection_info
+
+
+def update_dep_map_collection_info(dep_map, existing_collections, collection_info, parent, requirement):
+ existing = [c for c in existing_collections if to_text(c) == to_text(collection_info)]
+ if existing and not collection_info.force:
+ # Test that the installed collection fits the requirement
+ existing[0].add_requirement(parent, requirement)
+ collection_info = existing[0]
+
+ dep_map[to_text(collection_info)] = collection_info
+
+
+def parse_scm(collection, version):
+ if ',' in collection:
+ collection, version = collection.split(',', 1)
+ elif version == '*' or not version:
+ version = 'HEAD'
+
+ if collection.startswith('git+'):
+ path = collection[4:]
+ else:
+ path = collection
+
+ path, fragment = urldefrag(path)
+ fragment = fragment.strip(os.path.sep)
+
+ if path.endswith(os.path.sep + '.git'):
+ name = path.split(os.path.sep)[-2]
+ elif '://' not in path and '@' not in path:
+ name = path
+ else:
+ name = path.split('/')[-1]
+ if name.endswith('.git'):
+ name = name[:-4]
+
+ return name, version, path, fragment
+
+
+def _download_file(url, b_path, expected_hash, validate_certs, headers=None):
+ urlsplit = os.path.splitext(to_text(url.rsplit('/', 1)[1]))
+ b_file_name = to_bytes(urlsplit[0], errors='surrogate_or_strict')
+ b_file_ext = to_bytes(urlsplit[1], errors='surrogate_or_strict')
+ b_file_path = tempfile.NamedTemporaryFile(dir=b_path, prefix=b_file_name, suffix=b_file_ext, delete=False).name
+
+ display.display("Downloading %s to %s" % (url, to_text(b_path)))
+ # Galaxy redirs downloads to S3 which reject the request if an Authorization header is attached so don't redir that
+ resp = open_url(to_native(url, errors='surrogate_or_strict'), validate_certs=validate_certs, headers=headers,
+ unredirected_headers=['Authorization'], http_agent=user_agent())
+
+ with open(b_file_path, 'wb') as download_file:
+ actual_hash = _consume_file(resp, download_file)
+
+ if expected_hash:
+ display.vvvv("Validating downloaded file hash %s with expected hash %s" % (actual_hash, expected_hash))
+ if expected_hash != actual_hash:
+ raise AnsibleError("Mismatch artifact hash with downloaded file")
+
+ return b_file_path
+
+
+def _extract_tar_dir(tar, dirname, b_dest):
+ """ Extracts a directory from a collection tar. """
+ member_names = [to_native(dirname, errors='surrogate_or_strict')]
+
+ # Create list of members with and without trailing separator
+ if not member_names[-1].endswith(os.path.sep):
+ member_names.append(member_names[-1] + os.path.sep)
+
+ # Try all of the member names and stop on the first one that are able to successfully get
+ for member in member_names:
+ try:
+ tar_member = tar.getmember(member)
+ except KeyError:
+ continue
+ break
+ else:
+ # If we still can't find the member, raise a nice error.
+ raise AnsibleError("Unable to extract '%s' from collection" % to_native(member, errors='surrogate_or_strict'))
+
+ b_dir_path = os.path.join(b_dest, to_bytes(dirname, errors='surrogate_or_strict'))
+
+ b_parent_path = os.path.dirname(b_dir_path)
+ try:
+ os.makedirs(b_parent_path, mode=0o0755)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+ if tar_member.type == tarfile.SYMTYPE:
+ b_link_path = to_bytes(tar_member.linkname, errors='surrogate_or_strict')
+ if not _is_child_path(b_link_path, b_dest, link_name=b_dir_path):
+ raise AnsibleError("Cannot extract symlink '%s' in collection: path points to location outside of "
+ "collection '%s'" % (to_native(dirname), b_link_path))
+
+ os.symlink(b_link_path, b_dir_path)
+
+ else:
+ if not os.path.isdir(b_dir_path):
+ os.mkdir(b_dir_path, 0o0755)
+
+
+def _extract_tar_file(tar, filename, b_dest, b_temp_path, expected_hash=None):
+ """ Extracts a file from a collection tar. """
+ with _get_tar_file_member(tar, filename) as (tar_member, tar_obj):
+ if tar_member.type == tarfile.SYMTYPE:
+ actual_hash = _consume_file(tar_obj)
+
+ else:
+ with tempfile.NamedTemporaryFile(dir=b_temp_path, delete=False) as tmpfile_obj:
+ actual_hash = _consume_file(tar_obj, tmpfile_obj)
+
+ if expected_hash and actual_hash != expected_hash:
+ raise AnsibleError("Checksum mismatch for '%s' inside collection at '%s'"
+ % (to_native(filename, errors='surrogate_or_strict'), to_native(tar.name)))
+
+ b_dest_filepath = os.path.abspath(os.path.join(b_dest, to_bytes(filename, errors='surrogate_or_strict')))
+ b_parent_dir = os.path.dirname(b_dest_filepath)
+ if not _is_child_path(b_parent_dir, b_dest):
+ raise AnsibleError("Cannot extract tar entry '%s' as it will be placed outside the collection directory"
+ % to_native(filename, errors='surrogate_or_strict'))
+
+ if not os.path.exists(b_parent_dir):
+ # Seems like Galaxy does not validate if all file entries have a corresponding dir ftype entry. This check
+ # makes sure we create the parent directory even if it wasn't set in the metadata.
+ os.makedirs(b_parent_dir, mode=0o0755)
+
+ if tar_member.type == tarfile.SYMTYPE:
+ b_link_path = to_bytes(tar_member.linkname, errors='surrogate_or_strict')
+ if not _is_child_path(b_link_path, b_dest, link_name=b_dest_filepath):
+ raise AnsibleError("Cannot extract symlink '%s' in collection: path points to location outside of "
+ "collection '%s'" % (to_native(filename), b_link_path))
+
+ os.symlink(b_link_path, b_dest_filepath)
+
+ else:
+ shutil.move(to_bytes(tmpfile_obj.name, errors='surrogate_or_strict'), b_dest_filepath)
+
+ # Default to rw-r--r-- and only add execute if the tar file has execute.
+ tar_member = tar.getmember(to_native(filename, errors='surrogate_or_strict'))
+ new_mode = 0o644
+ if stat.S_IMODE(tar_member.mode) & stat.S_IXUSR:
+ new_mode |= 0o0111
+
+ os.chmod(b_dest_filepath, new_mode)
+
+
+def _get_tar_file_member(tar, filename):
+ n_filename = to_native(filename, errors='surrogate_or_strict')
+ try:
+ member = tar.getmember(n_filename)
+ except KeyError:
+ raise AnsibleError("Collection tar at '%s' does not contain the expected file '%s'." % (
+ to_native(tar.name),
+ n_filename))
+
+ return _tarfile_extract(tar, member)
+
+
+def _get_json_from_tar_file(b_path, filename):
+ file_contents = ''
+
+ with tarfile.open(b_path, mode='r') as collection_tar:
+ with _get_tar_file_member(collection_tar, filename) as (dummy, tar_obj):
+ bufsize = 65536
+ data = tar_obj.read(bufsize)
+ while data:
+ file_contents += to_text(data)
+ data = tar_obj.read(bufsize)
+
+ return json.loads(file_contents)
+
+
+def _get_tar_file_hash(b_path, filename):
+ with tarfile.open(b_path, mode='r') as collection_tar:
+ with _get_tar_file_member(collection_tar, filename) as (dummy, tar_obj):
+ return _consume_file(tar_obj)
+
+
+def _is_child_path(path, parent_path, link_name=None):
+ """ Checks that path is a path within the parent_path specified. """
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+
+ if link_name and not os.path.isabs(b_path):
+ # If link_name is specified, path is the source of the link and we need to resolve the absolute path.
+ b_link_dir = os.path.dirname(to_bytes(link_name, errors='surrogate_or_strict'))
+ b_path = os.path.abspath(os.path.join(b_link_dir, b_path))
+
+ b_parent_path = to_bytes(parent_path, errors='surrogate_or_strict')
+ return b_path == b_parent_path or b_path.startswith(b_parent_path + to_bytes(os.path.sep))
+
+
+def _consume_file(read_from, write_to=None):
+ bufsize = 65536
+ sha256_digest = sha256()
+ data = read_from.read(bufsize)
+ while data:
+ if write_to is not None:
+ write_to.write(data)
+ write_to.flush()
+ sha256_digest.update(data)
+ data = read_from.read(bufsize)
+
+ return sha256_digest.hexdigest()
+
+
+def get_galaxy_metadata_path(b_path):
+ return os.path.join(b_path, b'galaxy.yml')
diff --git a/lib/ansible/galaxy/data/apb/.travis.yml b/lib/ansible/galaxy/data/apb/.travis.yml
new file mode 100644
index 00000000..44c0ba40
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/.travis.yml
@@ -0,0 +1,25 @@
+---
+services: docker
+sudo: required
+language: python
+python:
+ - '2.7'
+
+env:
+ - OPENSHIFT_VERSION=v3.9.0
+ - KUBERNETES_VERSION=v1.9.0
+
+script:
+ # Configure test values
+ - export apb_name=APB_NAME
+
+ # Download test shim.
+ - wget -O ${PWD}/apb-test.sh https://raw.githubusercontent.com/ansibleplaybookbundle/apb-test-shim/master/apb-test.sh
+ - chmod +x ${PWD}/apb-test.sh
+
+ # Run tests.
+ - ${PWD}/apb-test.sh
+
+# Uncomment to allow travis to notify galaxy
+# notifications:
+# webhooks: https://galaxy.ansible.com/api/v1/notifications/
diff --git a/lib/ansible/galaxy/data/apb/Dockerfile.j2 b/lib/ansible/galaxy/data/apb/Dockerfile.j2
new file mode 100644
index 00000000..4d99a8b0
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/Dockerfile.j2
@@ -0,0 +1,9 @@
+FROM ansibleplaybookbundle/apb-base
+
+LABEL "com.redhat.apb.spec"=\
+""
+
+COPY playbooks /opt/apb/actions
+COPY . /opt/ansible/roles/{{ role_name }}
+RUN chmod -R g=u /opt/{ansible,apb}
+USER apb
diff --git a/lib/ansible/galaxy/data/apb/Makefile.j2 b/lib/ansible/galaxy/data/apb/Makefile.j2
new file mode 100644
index 00000000..ebeaa61f
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/Makefile.j2
@@ -0,0 +1,21 @@
+DOCKERHOST = DOCKERHOST
+DOCKERORG = DOCKERORG
+IMAGENAME = {{ role_name }}
+TAG = latest
+USER=$(shell id -u)
+PWD=$(shell pwd)
+build_and_push: apb_build docker_push apb_push
+
+.PHONY: apb_build
+apb_build:
+ docker run --rm --privileged -v $(PWD):/mnt:z -v $(HOME)/.kube:/.kube -v /var/run/docker.sock:/var/run/docker.sock -u $(USER) docker.io/ansibleplaybookbundle/apb-tools:latest prepare
+ docker build -t $(DOCKERHOST)/$(DOCKERORG)/$(IMAGENAME):$(TAG) .
+
+.PHONY: docker_push
+docker_push:
+ docker push $(DOCKERHOST)/$(DOCKERORG)/$(IMAGENAME):$(TAG)
+
+.PHONY: apb_push
+apb_push:
+ docker run --rm --privileged -v $(PWD):/mnt:z -v $(HOME)/.kube:/.kube -v /var/run/docker.sock:/var/run/docker.sock -u $(USER) docker.io/ansibleplaybookbundle/apb-tools:latest push
+
diff --git a/lib/ansible/galaxy/data/apb/README.md b/lib/ansible/galaxy/data/apb/README.md
new file mode 100644
index 00000000..2e350a03
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/README.md
@@ -0,0 +1,38 @@
+APB Name
+=========
+
+A brief description of the APB goes here.
+
+Requirements
+------------
+
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+
+APB Variables
+--------------
+
+A description of the settable variables for this APB should go here, including any variables that are in defaults/main.yml, vars/main.yml, apb.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (i.e. hostvars, group vars, etc.) should be mentioned here as well.
+
+Dependencies
+------------
+
+A list of other APBs/roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+
+Example Playbook
+----------------
+
+Including an example of how to use your APB (for instance, with variables passed in as parameters) is always nice for users too:
+
+ - hosts: servers
+ roles:
+ - { role: username.rolename, x: 42 }
+
+License
+-------
+
+BSD
+
+Author Information
+------------------
+
+An optional section for the role authors to include contact information, or a website (HTML is not allowed).
diff --git a/lib/ansible/galaxy/data/apb/apb.yml.j2 b/lib/ansible/galaxy/data/apb/apb.yml.j2
new file mode 100644
index 00000000..f9688019
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/apb.yml.j2
@@ -0,0 +1,13 @@
+version: '1.0.0'
+name: {{ role_name }}
+description: {{ description }}
+bindable: False
+async: optional
+metadata:
+ displayName: {{ role_name }}
+plans:
+ - name: default
+ description: This default plan deploys {{ role_name }}
+ free: True
+ metadata: {}
+ parameters: []
diff --git a/lib/ansible/galaxy/data/apb/defaults/main.yml.j2 b/lib/ansible/galaxy/data/apb/defaults/main.yml.j2
new file mode 100644
index 00000000..3818e64c
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/defaults/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# defaults file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/apb/files/.git_keep b/lib/ansible/galaxy/data/apb/files/.git_keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/files/.git_keep
diff --git a/lib/ansible/galaxy/data/apb/handlers/main.yml.j2 b/lib/ansible/galaxy/data/apb/handlers/main.yml.j2
new file mode 100644
index 00000000..3f4c4967
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/handlers/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# handlers file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/apb/meta/main.yml.j2 b/lib/ansible/galaxy/data/apb/meta/main.yml.j2
new file mode 100644
index 00000000..862f8ef8
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/meta/main.yml.j2
@@ -0,0 +1,44 @@
+galaxy_info:
+ author: {{ author }}
+ description: {{ description }}
+ company: {{ company }}
+
+ # If the issue tracker for your role is not on github, uncomment the
+ # next line and provide a value
+ # issue_tracker_url: {{ issue_tracker_url }}
+
+ # Choose a valid license ID from https://spdx.org - some suggested licenses:
+ # - BSD-3-Clause (default)
+ # - MIT
+ # - GPL-2.0-or-later
+ # - GPL-3.0-only
+ # - Apache-2.0
+ # - CC-BY-4.0
+ license: {{ license }}
+
+ #
+ # platforms is a list of platforms, and each platform has a name and a list of versions.
+ #
+ # platforms:
+ # - name: Fedora
+ # versions:
+ # - all
+ # - 25
+ # - name: SomePlatform
+ # versions:
+ # - all
+ # - 1.0
+ # - 7
+ # - 99.99
+
+ galaxy_tags:
+ - apb
+ # List tags for your role here, one per line. A tag is a keyword that describes
+ # and categorizes the role. Users find roles by searching for tags.
+ #
+ # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
+ # Maximum 20 tags per role.
+
+dependencies: []
+ # List your role dependencies here, one per line. Be sure to remove the '[]' above,
+ # if you add dependencies to this list.
diff --git a/lib/ansible/galaxy/data/apb/playbooks/deprovision.yml.j2 b/lib/ansible/galaxy/data/apb/playbooks/deprovision.yml.j2
new file mode 100644
index 00000000..19527310
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/playbooks/deprovision.yml.j2
@@ -0,0 +1,8 @@
+- name: "{{ role_name }} playbook to deprovision the application"
+ hosts: localhost
+ gather_facts: false
+ connection: local
+ vars:
+ apb_action: deprovision
+ roles:
+ - role: {{ role_name }}
diff --git a/lib/ansible/galaxy/data/apb/playbooks/provision.yml.j2 b/lib/ansible/galaxy/data/apb/playbooks/provision.yml.j2
new file mode 100644
index 00000000..7b08605e
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/playbooks/provision.yml.j2
@@ -0,0 +1,8 @@
+- name: "{{ role_name }} playbook to provision the application"
+ hosts: localhost
+ gather_facts: false
+ connection: local
+ vars:
+ apb_action: provision
+ roles:
+ - role: {{ role_name }}
diff --git a/lib/ansible/galaxy/data/apb/tasks/main.yml.j2 b/lib/ansible/galaxy/data/apb/tasks/main.yml.j2
new file mode 100644
index 00000000..a9880650
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/tasks/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# tasks file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/apb/templates/.git_keep b/lib/ansible/galaxy/data/apb/templates/.git_keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/templates/.git_keep
diff --git a/lib/ansible/galaxy/data/apb/tests/ansible.cfg b/lib/ansible/galaxy/data/apb/tests/ansible.cfg
new file mode 100644
index 00000000..2f74f1b2
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/tests/ansible.cfg
@@ -0,0 +1,2 @@
+[defaults]
+inventory=./inventory
diff --git a/lib/ansible/galaxy/data/apb/tests/inventory b/lib/ansible/galaxy/data/apb/tests/inventory
new file mode 100644
index 00000000..ea69cbf1
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/tests/inventory
@@ -0,0 +1,3 @@
+localhost
+
+
diff --git a/lib/ansible/galaxy/data/apb/tests/test.yml.j2 b/lib/ansible/galaxy/data/apb/tests/test.yml.j2
new file mode 100644
index 00000000..fb14f85c
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/tests/test.yml.j2
@@ -0,0 +1,7 @@
+---
+- hosts: localhost
+ gather_facts: no
+ connection: local
+ tasks:
+
+ # Add tasks and assertions for testing the service here.
diff --git a/lib/ansible/galaxy/data/apb/vars/main.yml.j2 b/lib/ansible/galaxy/data/apb/vars/main.yml.j2
new file mode 100644
index 00000000..092d511a
--- /dev/null
+++ b/lib/ansible/galaxy/data/apb/vars/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# vars file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/collections_galaxy_meta.yml b/lib/ansible/galaxy/data/collections_galaxy_meta.yml
new file mode 100644
index 00000000..75137234
--- /dev/null
+++ b/lib/ansible/galaxy/data/collections_galaxy_meta.yml
@@ -0,0 +1,110 @@
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# key: The name of the key as defined in galaxy.yml
+# description: Comment/info on the key to be used as the generated doc and auto generated skeleton galaxy.yml file
+# required: Whether the key is required (default is no)
+# type: The type of value that can be set, aligns to the values in the plugin formatter
+---
+- key: namespace
+ description:
+ - The namespace of the collection.
+ - This can be a company/brand/organization or product namespace under which all content lives.
+ - May only contain alphanumeric lowercase characters and underscores. Namespaces cannot start with underscores or
+ numbers and cannot contain consecutive underscores.
+ required: yes
+ type: str
+
+- key: name
+ description:
+ - The name of the collection.
+ - Has the same character restrictions as C(namespace).
+ required: yes
+ type: str
+
+- key: version
+ description:
+ - The version of the collection.
+ - Must be compatible with semantic versioning.
+ required: yes
+ type: str
+
+- key: readme
+ description:
+ - The path to the Markdown (.md) readme file.
+ - This path is relative to the root of the collection.
+ required: yes
+ type: str
+
+- key: authors
+ description:
+ - A list of the collection's content authors.
+ - Can be just the name or in the format 'Full Name <email> (url) @nicks:irc/im.site#channel'.
+ required: yes
+ type: list
+
+- key: description
+ description:
+ - A short summary description of the collection.
+ type: str
+
+- key: license
+ description:
+ - Either a single license or a list of licenses for content inside of a collection.
+ - Ansible Galaxy currently only accepts L(SPDX,https://spdx.org/licenses/) licenses
+ - This key is mutually exclusive with C(license_file).
+ type: list
+
+- key: license_file
+ description:
+ - The path to the license file for the collection.
+ - This path is relative to the root of the collection.
+ - This key is mutually exclusive with C(license).
+ type: str
+
+- key: tags
+ description:
+ - A list of tags you want to associate with the collection for indexing/searching.
+ - A tag name has the same character requirements as C(namespace) and C(name).
+ type: list
+
+- key: dependencies
+ description:
+ - Collections that this collection requires to be installed for it to be usable.
+ - The key of the dict is the collection label C(namespace.name).
+ - The value is a version range
+ L(specifiers,https://python-semanticversion.readthedocs.io/en/latest/#requirement-specification).
+ - Multiple version range specifiers can be set and are separated by C(,).
+ type: dict
+
+- key: repository
+ description:
+ - The URL of the originating SCM repository.
+ type: str
+
+- key: documentation
+ description:
+ - The URL to any online docs.
+ type: str
+
+- key: homepage
+ description:
+ - The URL to the homepage of the collection/project.
+ type: str
+
+- key: issues
+ description:
+ - The URL to the collection issue tracker.
+ type: str
+
+- key: build_ignore
+ description:
+ - A list of file glob-like patterns used to filter any files or directories
+ that should not be included in the build artifact.
+ - A pattern is matched from the relative path of the file or directory of the
+ collection directory.
+ - This uses C(fnmatch) to match the files or directories.
+ - Some directories and files like C(galaxy.yml), C(*.pyc), C(*.retry), and
+ C(.git) are always filtered.
+ type: list
+ version_added: '2.10'
diff --git a/lib/ansible/galaxy/data/container/.travis.yml b/lib/ansible/galaxy/data/container/.travis.yml
new file mode 100644
index 00000000..a3370b7d
--- /dev/null
+++ b/lib/ansible/galaxy/data/container/.travis.yml
@@ -0,0 +1,45 @@
+language: python
+dist: trusty
+sudo: required
+
+services:
+ - docker
+
+before_install:
+ - sudo apt-add-repository 'deb http://archive.ubuntu.com/ubuntu trusty-backports universe'
+ - sudo apt-get update -qq
+ - sudo apt-get install -y -o Dpkg::Options::="--force-confold" --force-yes docker-engine
+
+install:
+ # Install the latest Ansible Container and Ansible
+ - pip install git+https://github.com/ansible/ansible-container.git
+ - pip install ansible
+
+script:
+ # Make sure docker is functioning
+ - docker version
+ - docker-compose version
+ - docker info
+
+ # Create an Ansible Container project
+ - mkdir -p tests
+ - cd tests
+ - ansible-container init
+
+ # Install the role into the project
+ - echo "Installing and testing git+https://github.com/${TRAVIS_REPO_SLUG},${TRAVIS_COMMIT}"
+ - ansible-container install git+https://github.com/${TRAVIS_REPO_SLUG},${TRAVIS_COMMIT}
+
+ # Build the service image
+ - ansible-container build
+
+ # Start the service
+ - ansible-container run -d
+ - docker ps
+
+ # Run tests
+ - ansible-playbook test.yml
+
+notifications:
+ email: false
+ webhooks: https://galaxy.ansible.com/api/v1/notifications/
diff --git a/lib/ansible/galaxy/data/container/README.md b/lib/ansible/galaxy/data/container/README.md
new file mode 100644
index 00000000..1b66bdb5
--- /dev/null
+++ b/lib/ansible/galaxy/data/container/README.md
@@ -0,0 +1,49 @@
+# Role Name
+
+Adds a <SERVICE_NAME> service to your [Ansible Container](https://github.com/ansible/ansible-container) project. Run the following commands
+to install the service:
+
+```
+# Set the working directory to your Ansible Container project root
+$ cd myproject
+
+# Install the service
+$ ansible-container install <USERNAME.ROLE_NAME>
+```
+
+## Requirements
+
+- [Ansible Container](https://github.com/ansible/ansible-container)
+- An existing Ansible Container project. To create a project, simply run the following:
+ ```
+ # Create an empty project directory
+ $ mkdir myproject
+
+ # Set the working directory to the new directory
+ $ cd myproject
+
+ # Initialize the project
+ $ ansible-container init
+ ```
+
+- Continue listing any prerequisites here...
+
+
+## Role Variables
+
+A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set
+via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
+
+## Dependencies
+
+A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+
+## License
+
+BSD
+
+## Author Information
+
+An optional section for the role authors to include contact information, or a website (HTML is not allowed).
+
+
diff --git a/lib/ansible/galaxy/data/container/defaults/main.yml.j2 b/lib/ansible/galaxy/data/container/defaults/main.yml.j2
new file mode 100644
index 00000000..3818e64c
--- /dev/null
+++ b/lib/ansible/galaxy/data/container/defaults/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# defaults file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/container/files/.git_keep b/lib/ansible/galaxy/data/container/files/.git_keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/lib/ansible/galaxy/data/container/files/.git_keep
diff --git a/lib/ansible/galaxy/data/container/handlers/main.yml.j2 b/lib/ansible/galaxy/data/container/handlers/main.yml.j2
new file mode 100644
index 00000000..3f4c4967
--- /dev/null
+++ b/lib/ansible/galaxy/data/container/handlers/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# handlers file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/container/meta/container.yml.j2 b/lib/ansible/galaxy/data/container/meta/container.yml.j2
new file mode 100644
index 00000000..f033d341
--- /dev/null
+++ b/lib/ansible/galaxy/data/container/meta/container.yml.j2
@@ -0,0 +1,11 @@
+# Add your Ansible Container service definitions here.
+# For example:
+ #
+ # web:
+ # image: ubuntu:trusty
+ # ports:
+ # - "80:80"
+ # command: ['/usr/bin/dumb-init', '/usr/sbin/apache2ctl', '-D', 'FOREGROUND']
+ # dev_overrides:
+ # environment:
+ # - "DEBUG=1"
diff --git a/lib/ansible/galaxy/data/container/meta/main.yml.j2 b/lib/ansible/galaxy/data/container/meta/main.yml.j2
new file mode 100644
index 00000000..72fc9a22
--- /dev/null
+++ b/lib/ansible/galaxy/data/container/meta/main.yml.j2
@@ -0,0 +1,52 @@
+galaxy_info:
+ author: {{ author }}
+ description: {{ description }}
+ company: {{ company }}
+
+ # If the issue tracker for your role is not on github, uncomment the
+ # next line and provide a value
+ # issue_tracker_url: {{ issue_tracker_url }}
+
+ # Choose a valid license ID from https://spdx.org - some suggested licenses:
+ # - BSD-3-Clause (default)
+ # - MIT
+ # - GPL-2.0-or-later
+ # - GPL-3.0-only
+ # - Apache-2.0
+ # - CC-BY-4.0
+ license: {{ license }}
+
+ min_ansible_container_version: 0.2.0
+
+ # If Ansible is required outside of the build container, provide the minimum version:
+ # min_ansible_version:
+
+ #
+ # Provide a list of supported platforms, and for each platform a list of versions.
+ # If you don't wish to enumerate all versions for a particular platform, use 'all'.
+ # To view available platforms and versions (or releases), visit:
+ # https://galaxy.ansible.com/api/v1/platforms/
+ #
+ # platforms:
+ # - name: Fedora
+ # versions:
+ # - all
+ # - 25
+ # - name: SomePlatform
+ # versions:
+ # - all
+ # - 1.0
+ # - 7
+ # - 99.99
+
+ galaxy_tags:
+ - container
+ # List tags for your role here, one per line. A tag is a keyword that describes
+ # and categorizes the role. Users find roles by searching for tags.
+ #
+ # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
+ # Maximum 20 tags per role.
+
+dependencies: []
+ # List your role dependencies here, one per line. Be sure to remove the '[]' above,
+ # if you add dependencies to this list.
diff --git a/lib/ansible/galaxy/data/container/tasks/main.yml.j2 b/lib/ansible/galaxy/data/container/tasks/main.yml.j2
new file mode 100644
index 00000000..a9880650
--- /dev/null
+++ b/lib/ansible/galaxy/data/container/tasks/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# tasks file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/container/templates/.git_keep b/lib/ansible/galaxy/data/container/templates/.git_keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/lib/ansible/galaxy/data/container/templates/.git_keep
diff --git a/lib/ansible/galaxy/data/container/tests/ansible.cfg b/lib/ansible/galaxy/data/container/tests/ansible.cfg
new file mode 100644
index 00000000..2f74f1b2
--- /dev/null
+++ b/lib/ansible/galaxy/data/container/tests/ansible.cfg
@@ -0,0 +1,2 @@
+[defaults]
+inventory=./inventory
diff --git a/lib/ansible/galaxy/data/container/tests/inventory b/lib/ansible/galaxy/data/container/tests/inventory
new file mode 100644
index 00000000..ea69cbf1
--- /dev/null
+++ b/lib/ansible/galaxy/data/container/tests/inventory
@@ -0,0 +1,3 @@
+localhost
+
+
diff --git a/lib/ansible/galaxy/data/container/tests/test.yml.j2 b/lib/ansible/galaxy/data/container/tests/test.yml.j2
new file mode 100644
index 00000000..fb14f85c
--- /dev/null
+++ b/lib/ansible/galaxy/data/container/tests/test.yml.j2
@@ -0,0 +1,7 @@
+---
+- hosts: localhost
+ gather_facts: no
+ connection: local
+ tasks:
+
+ # Add tasks and assertions for testing the service here.
diff --git a/lib/ansible/galaxy/data/container/vars/main.yml.j2 b/lib/ansible/galaxy/data/container/vars/main.yml.j2
new file mode 100644
index 00000000..092d511a
--- /dev/null
+++ b/lib/ansible/galaxy/data/container/vars/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# vars file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/default/collection/README.md.j2 b/lib/ansible/galaxy/data/default/collection/README.md.j2
new file mode 100644
index 00000000..5e516220
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/collection/README.md.j2
@@ -0,0 +1,3 @@
+# Ansible Collection - {{ namespace }}.{{ collection_name }}
+
+Documentation for the collection.
diff --git a/lib/ansible/galaxy/data/default/collection/docs/.git_keep b/lib/ansible/galaxy/data/default/collection/docs/.git_keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/collection/docs/.git_keep
diff --git a/lib/ansible/galaxy/data/default/collection/galaxy.yml.j2 b/lib/ansible/galaxy/data/default/collection/galaxy.yml.j2
new file mode 100644
index 00000000..a95008fc
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/collection/galaxy.yml.j2
@@ -0,0 +1,11 @@
+### REQUIRED
+{% for option in required_config %}
+{{ option.description | comment_ify }}
+{{ {option.key: option.value} | to_nice_yaml }}
+{% endfor %}
+
+### OPTIONAL but strongly recommended
+{% for option in optional_config %}
+{{ option.description | comment_ify }}
+{{ {option.key: option.value} | to_nice_yaml }}
+{% endfor %}
diff --git a/lib/ansible/galaxy/data/default/collection/plugins/README.md.j2 b/lib/ansible/galaxy/data/default/collection/plugins/README.md.j2
new file mode 100644
index 00000000..7c006cfa
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/collection/plugins/README.md.j2
@@ -0,0 +1,31 @@
+# Collections Plugins Directory
+
+This directory can be used to ship various plugins inside an Ansible collection. Each plugin is placed in a folder that
+is named after the type of plugin it is in. It can also include the `module_utils` and `modules` directory that
+would contain module utils and modules respectively.
+
+Here is an example directory of the majority of plugins currently supported by Ansible:
+
+```
+└── plugins
+ ├── action
+ ├── become
+ ├── cache
+ ├── callback
+ ├── cliconf
+ ├── connection
+ ├── filter
+ ├── httpapi
+ ├── inventory
+ ├── lookup
+ ├── module_utils
+ ├── modules
+ ├── netconf
+ ├── shell
+ ├── strategy
+ ├── terminal
+ ├── test
+ └── vars
+```
+
+A full list of plugin types can be found at [Working With Plugins]({{ ansible_plugin_list_dir }}).
diff --git a/lib/ansible/galaxy/data/default/collection/roles/.git_keep b/lib/ansible/galaxy/data/default/collection/roles/.git_keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/collection/roles/.git_keep
diff --git a/lib/ansible/galaxy/data/default/role/.travis.yml b/lib/ansible/galaxy/data/default/role/.travis.yml
new file mode 100644
index 00000000..36bbf620
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/role/.travis.yml
@@ -0,0 +1,29 @@
+---
+language: python
+python: "2.7"
+
+# Use the new container infrastructure
+sudo: false
+
+# Install ansible
+addons:
+ apt:
+ packages:
+ - python-pip
+
+install:
+ # Install ansible
+ - pip install ansible
+
+ # Check ansible version
+ - ansible --version
+
+ # Create ansible.cfg with correct roles_path
+ - printf '[defaults]\nroles_path=../' >ansible.cfg
+
+script:
+ # Basic role syntax check
+ - ansible-playbook tests/test.yml -i tests/inventory --syntax-check
+
+notifications:
+ webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file
diff --git a/lib/ansible/galaxy/data/default/role/README.md b/lib/ansible/galaxy/data/default/role/README.md
new file mode 100644
index 00000000..225dd44b
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/role/README.md
@@ -0,0 +1,38 @@
+Role Name
+=========
+
+A brief description of the role goes here.
+
+Requirements
+------------
+
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+
+Role Variables
+--------------
+
+A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
+
+Dependencies
+------------
+
+A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+
+Example Playbook
+----------------
+
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+
+ - hosts: servers
+ roles:
+ - { role: username.rolename, x: 42 }
+
+License
+-------
+
+BSD
+
+Author Information
+------------------
+
+An optional section for the role authors to include contact information, or a website (HTML is not allowed).
diff --git a/lib/ansible/galaxy/data/default/role/defaults/main.yml.j2 b/lib/ansible/galaxy/data/default/role/defaults/main.yml.j2
new file mode 100644
index 00000000..3818e64c
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/role/defaults/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# defaults file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/default/role/files/.git_keep b/lib/ansible/galaxy/data/default/role/files/.git_keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/role/files/.git_keep
diff --git a/lib/ansible/galaxy/data/default/role/handlers/main.yml.j2 b/lib/ansible/galaxy/data/default/role/handlers/main.yml.j2
new file mode 100644
index 00000000..3f4c4967
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/role/handlers/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# handlers file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/default/role/meta/main.yml.j2 b/lib/ansible/galaxy/data/default/role/meta/main.yml.j2
new file mode 100644
index 00000000..4891a68b
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/role/meta/main.yml.j2
@@ -0,0 +1,55 @@
+galaxy_info:
+ author: {{ author }}
+ description: {{ description }}
+ company: {{ company }}
+
+ # If the issue tracker for your role is not on github, uncomment the
+ # next line and provide a value
+ # issue_tracker_url: {{ issue_tracker_url }}
+
+ # Choose a valid license ID from https://spdx.org - some suggested licenses:
+ # - BSD-3-Clause (default)
+ # - MIT
+ # - GPL-2.0-or-later
+ # - GPL-3.0-only
+ # - Apache-2.0
+ # - CC-BY-4.0
+ license: {{ license }}
+
+ min_ansible_version: {{ min_ansible_version }}
+
+ # If this a Container Enabled role, provide the minimum Ansible Container version.
+ # min_ansible_container_version:
+
+ #
+ # Provide a list of supported platforms, and for each platform a list of versions.
+ # If you don't wish to enumerate all versions for a particular platform, use 'all'.
+ # To view available platforms and versions (or releases), visit:
+ # https://galaxy.ansible.com/api/v1/platforms/
+ #
+ # platforms:
+ # - name: Fedora
+ # versions:
+ # - all
+ # - 25
+ # - name: SomePlatform
+ # versions:
+ # - all
+ # - 1.0
+ # - 7
+ # - 99.99
+
+ galaxy_tags: []
+ # List tags for your role here, one per line. A tag is a keyword that describes
+ # and categorizes the role. Users find roles by searching for tags. Be sure to
+ # remove the '[]' above, if you add tags to this list.
+ #
+ # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
+ # Maximum 20 tags per role.
+
+dependencies: []
+ # List your role dependencies here, one per line. Be sure to remove the '[]' above,
+ # if you add dependencies to this list.
+{% for dependency in dependencies %}
+ #- {{ dependency }}
+{%- endfor %}
diff --git a/lib/ansible/galaxy/data/default/role/tasks/main.yml.j2 b/lib/ansible/galaxy/data/default/role/tasks/main.yml.j2
new file mode 100644
index 00000000..a9880650
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/role/tasks/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# tasks file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/default/role/templates/.git_keep b/lib/ansible/galaxy/data/default/role/templates/.git_keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/role/templates/.git_keep
diff --git a/lib/ansible/galaxy/data/default/role/tests/inventory b/lib/ansible/galaxy/data/default/role/tests/inventory
new file mode 100644
index 00000000..878877b0
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/role/tests/inventory
@@ -0,0 +1,2 @@
+localhost
+
diff --git a/lib/ansible/galaxy/data/default/role/tests/test.yml.j2 b/lib/ansible/galaxy/data/default/role/tests/test.yml.j2
new file mode 100644
index 00000000..0c40f95a
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/role/tests/test.yml.j2
@@ -0,0 +1,5 @@
+---
+- hosts: localhost
+ remote_user: root
+ roles:
+ - {{ role_name }}
diff --git a/lib/ansible/galaxy/data/default/role/vars/main.yml.j2 b/lib/ansible/galaxy/data/default/role/vars/main.yml.j2
new file mode 100644
index 00000000..092d511a
--- /dev/null
+++ b/lib/ansible/galaxy/data/default/role/vars/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# vars file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/network/.travis.yml b/lib/ansible/galaxy/data/network/.travis.yml
new file mode 100644
index 00000000..36bbf620
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/.travis.yml
@@ -0,0 +1,29 @@
+---
+language: python
+python: "2.7"
+
+# Use the new container infrastructure
+sudo: false
+
+# Install ansible
+addons:
+ apt:
+ packages:
+ - python-pip
+
+install:
+ # Install ansible
+ - pip install ansible
+
+ # Check ansible version
+ - ansible --version
+
+ # Create ansible.cfg with correct roles_path
+ - printf '[defaults]\nroles_path=../' >ansible.cfg
+
+script:
+ # Basic role syntax check
+ - ansible-playbook tests/test.yml -i tests/inventory --syntax-check
+
+notifications:
+ webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file
diff --git a/lib/ansible/galaxy/data/network/README.md b/lib/ansible/galaxy/data/network/README.md
new file mode 100644
index 00000000..84533c63
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/README.md
@@ -0,0 +1,38 @@
+Role Name
+=========
+
+A brief description of the role goes here.
+
+Requirements
+------------
+
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses any vendor specific SDKs or module with specific dependencies, it may be a good idea to mention in this section that the package is required.
+
+Role Variables
+--------------
+
+A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
+
+Dependencies
+------------
+
+A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+
+Example Playbook
+----------------
+
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+
+ - hosts: servers
+ roles:
+ - { role: username.rolename, x: 42 }
+
+License
+-------
+
+BSD
+
+Author Information
+------------------
+
+An optional section for the role authors to include contact information, or a website (HTML is not allowed).
diff --git a/lib/ansible/galaxy/data/network/cliconf_plugins/example.py.j2 b/lib/ansible/galaxy/data/network/cliconf_plugins/example.py.j2
new file mode 100644
index 00000000..02f234ac
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/cliconf_plugins/example.py.j2
@@ -0,0 +1,40 @@
+#
+# (c) 2018 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import (absolute_import, division, print_function)
+from ansible.errors import AnsibleError
+__metaclass__ = type
+
+try:
+ from ansible.plugins.cliconf import CliconfBase
+ """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/cliconf/iosxr.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/cliconf/junos.py
+ """
+except ImportError:
+ raise AnsibleError("Cliconf Plugin [ {{ role_name }} ]: Dependency not satisfied")
+
+
+class Cliconf(CliconfBase):
+ """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/cliconf/iosxr.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/cliconf/junos.py
+ """
+ raise AnsibleError("Cliconf Plugin [ {{ role_name }} ]: Not implemented")
diff --git a/lib/ansible/galaxy/data/network/defaults/main.yml.j2 b/lib/ansible/galaxy/data/network/defaults/main.yml.j2
new file mode 100644
index 00000000..3818e64c
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/defaults/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# defaults file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/network/files/.git_keep b/lib/ansible/galaxy/data/network/files/.git_keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/files/.git_keep
diff --git a/lib/ansible/galaxy/data/network/library/example_command.py.j2 b/lib/ansible/galaxy/data/network/library/example_command.py.j2
new file mode 100644
index 00000000..0f3dac2d
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/library/example_command.py.j2
@@ -0,0 +1,66 @@
+#
+# (c) 2018 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import (absolute_import, division, print_function)
+from ansible.errors import AnsibleError
+__metaclass__ = type
+
+
+### Documentation
+DOCUMENTATION = """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_command.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_command.py
+"""
+
+EXAMPLES = """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_command.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_command.py
+"""
+
+
+RETURN = """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_command.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_command.py
+"""
+
+#### Imports
+try:
+ from ansible.module_utils.basic import AnsibleModule
+ """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_command.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_command.py
+ """
+except ImportError:
+ raise AnsibleError("[ {{ role_name }}_command ]: Dependency not satisfied")
+
+#### Implementation
+def main():
+ """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_command.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_command.py
+ """
+ raise AnsibleError(" [ {{ role_name }}_command ]: Not Implemented")
+
+#### Entrypoint
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/galaxy/data/network/library/example_config.py.j2 b/lib/ansible/galaxy/data/network/library/example_config.py.j2
new file mode 100644
index 00000000..2c2c72be
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/library/example_config.py.j2
@@ -0,0 +1,66 @@
+#
+# (c) 2018 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import (absolute_import, division, print_function)
+from ansible.errors import AnsibleError
+__metaclass__ = type
+
+
+### Documentation
+DOCUMENTATION = """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_config.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_config.py
+"""
+
+EXAMPLES = """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_config.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_config.py
+"""
+
+
+RETURN = """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_config.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_config.py
+"""
+
+### Imports
+try:
+ from ansible.module_utils.basic import AnsibleModule
+ """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_config.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_config.py
+ """
+except ImportError:
+ raise AnsibleError("[ {{ role_name }}_config ]: Dependency not satisfied")
+
+### Implementation
+def main():
+ """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_config.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_config.py
+ """
+ raise AnsibleError(" [ {{ role_name }}_config ]: Not Implemented")
+
+### Entrypoint
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/galaxy/data/network/library/example_facts.py.j2 b/lib/ansible/galaxy/data/network/library/example_facts.py.j2
new file mode 100644
index 00000000..9f7608c3
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/library/example_facts.py.j2
@@ -0,0 +1,66 @@
+#
+# (c) 2018 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import (absolute_import, division, print_function)
+from ansible.errors import AnsibleError
+__metaclass__ = type
+
+
+### Documentation
+DOCUMENTATION = """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_facts.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_facts.py
+"""
+
+EXAMPLES = """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_facts.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_facts.py
+"""
+
+
+RETURN = """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_facts.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_facts.py
+"""
+
+### Imports
+try:
+ from ansible.module_utils.basic import AnsibleModule
+ """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_facts.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_facts.py
+ """
+except ImportError:
+ raise AnsibleError("[ {{ role_name }}_facts ]: Dependency not satisfied")
+
+### Implementation
+def main():
+ """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/iosxr/iosxr_facts.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/network/junos/junos_facts.py
+ """
+ raise AnsibleError(" [ {{ role_name }}_facts ]: Not Implemented")
+
+### Entrypoint
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/galaxy/data/network/meta/main.yml.j2 b/lib/ansible/galaxy/data/network/meta/main.yml.j2
new file mode 100644
index 00000000..d0184ae8
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/meta/main.yml.j2
@@ -0,0 +1,52 @@
+galaxy_info:
+ author: {{ author }}
+ description: {{ description }}
+ company: {{ company }}
+
+ # If the issue tracker for your role is not on github, uncomment the
+ # next line and provide a value
+ # issue_tracker_url: {{ issue_tracker_url }}
+
+ # Choose a valid license ID from https://spdx.org - some suggested licenses:
+ # - BSD-3-Clause (default)
+ # - MIT
+ # - GPL-2.0-or-later
+ # - GPL-3.0-only
+ # - Apache-2.0
+ # - CC-BY-4.0
+ license: {{ license }}
+
+ min_ansible_version: {{ min_ansible_version }}
+
+ # If this a Container Enabled role, provide the minimum Ansible Container version.
+ # min_ansible_container_version:
+
+ #
+ # platforms is a list of platforms, and each platform has a name and a list of versions.
+ #
+ # platforms:
+ # - name: VYOS
+ # versions:
+ # - all
+ # - 25
+ # - name: SomePlatform
+ # versions:
+ # - all
+ # - 1.0
+ # - 7
+ # - 99.99
+
+ galaxy_tags: []
+ # List tags for your role here, one per line. A tag is a keyword that describes
+ # and categorizes the role. Users find roles by searching for tags. Be sure to
+ # remove the '[]' above, if you add tags to this list.
+ #
+ # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
+ # Maximum 20 tags per role.
+
+dependencies: []
+ # List your role dependencies here, one per line. Be sure to remove the '[]' above,
+ # if you add dependencies to this list.
+{%- for dependency in dependencies %}
+ #- {{ dependency }}
+{%- endfor %}
diff --git a/lib/ansible/galaxy/data/network/module_utils/example.py.j2 b/lib/ansible/galaxy/data/network/module_utils/example.py.j2
new file mode 100644
index 00000000..9bf2d3f6
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/module_utils/example.py.j2
@@ -0,0 +1,40 @@
+#
+# (c) 2018 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import (absolute_import, division, print_function)
+from ansible.errors import AnsibleError
+__metaclass__ = type
+
+### Imports
+try:
+ from ansible.module_utils.basic import env_fallback, return_values
+ from ansible.module_utils.connection import Connection
+ """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/module_utils/network/iosxr/iosxr.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/module_utils/network/junos//junos.py
+ """
+except ImportError:
+ raise AnsibleError("Netconf Plugin [ {{ role_name }} ]: Dependency not satisfied")
+
+### Implementation
+"""
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/module_utils/network/iosxr/iosxr.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/module_utils/network/junos//junos.py
+"""
diff --git a/lib/ansible/galaxy/data/network/netconf_plugins/example.py.j2 b/lib/ansible/galaxy/data/network/netconf_plugins/example.py.j2
new file mode 100644
index 00000000..e3a1ce61
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/netconf_plugins/example.py.j2
@@ -0,0 +1,40 @@
+#
+# (c) 2018 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import (absolute_import, division, print_function)
+from ansible.errors import AnsibleError
+__metaclass__ = type
+
+try:
+ from ansible.plugins.terminal import NetconfBase
+ """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/netconf/iosxr.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/netconf/junos.py
+ """
+except ImportError:
+ raise AnsibleError("Netconf Plugin [ {{ role_name }} ]: Dependency not satisfied")
+
+
+class Netconf(NetconfBase):
+ """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/netconf/iosxr.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/netconf/junos.py
+ """
+ raise AnsibleError("Netconf Plugin [ {{ role_name }} ]: Not implemented")
diff --git a/lib/ansible/galaxy/data/network/tasks/main.yml.j2 b/lib/ansible/galaxy/data/network/tasks/main.yml.j2
new file mode 100644
index 00000000..a9880650
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/tasks/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# tasks file for {{ role_name }}
diff --git a/lib/ansible/galaxy/data/network/templates/.git_keep b/lib/ansible/galaxy/data/network/templates/.git_keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/templates/.git_keep
diff --git a/lib/ansible/galaxy/data/network/terminal_plugins/example.py.j2 b/lib/ansible/galaxy/data/network/terminal_plugins/example.py.j2
new file mode 100644
index 00000000..621a140c
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/terminal_plugins/example.py.j2
@@ -0,0 +1,40 @@
+#
+# (c) 2018 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import (absolute_import, division, print_function)
+from ansible.errors import AnsibleError
+__metaclass__ = type
+
+try:
+ from ansible.plugins.terminal import TerminalBase
+ """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/terminal/iosxr.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/terminal/junos.py
+ """
+except ImportError:
+ raise AnsibleError("Terminal Plugin [ {{ role_name }} ]: Dependency not satisfied")
+
+
+class TerminalModule(TerminalBase):
+ """
+ Examples:
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/terminal/iosxr.py
+ https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/terminal/junos.py
+ """
+ raise AnsibleError("Terminal Plugin [ {{ role_name }} ]: Not implemented")
diff --git a/lib/ansible/galaxy/data/network/tests/inventory b/lib/ansible/galaxy/data/network/tests/inventory
new file mode 100644
index 00000000..878877b0
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/tests/inventory
@@ -0,0 +1,2 @@
+localhost
+
diff --git a/lib/ansible/galaxy/data/network/tests/test.yml.j2 b/lib/ansible/galaxy/data/network/tests/test.yml.j2
new file mode 100644
index 00000000..11284eb5
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/tests/test.yml.j2
@@ -0,0 +1,14 @@
+---
+- hosts: localhost
+ connection: network_cli
+ gather_facts: False
+
+ roles:
+ - {{ role_name }}
+
+- hosts: localhost
+ connection: netconf
+ gather_facts: False
+
+ roles:
+ - {{ role_name }}
diff --git a/lib/ansible/galaxy/data/network/vars/main.yml.j2 b/lib/ansible/galaxy/data/network/vars/main.yml.j2
new file mode 100644
index 00000000..092d511a
--- /dev/null
+++ b/lib/ansible/galaxy/data/network/vars/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# vars file for {{ role_name }}
diff --git a/lib/ansible/galaxy/role.py b/lib/ansible/galaxy/role.py
new file mode 100644
index 00000000..7de44ded
--- /dev/null
+++ b/lib/ansible/galaxy/role.py
@@ -0,0 +1,399 @@
+########################################################################
+#
+# (C) 2015, Brian Coca <bcoca@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+########################################################################
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import errno
+import datetime
+import os
+import tarfile
+import tempfile
+import yaml
+from distutils.version import LooseVersion
+from shutil import rmtree
+
+from ansible import context
+from ansible.errors import AnsibleError
+from ansible.galaxy.user_agent import user_agent
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.urls import open_url
+from ansible.playbook.role.requirement import RoleRequirement
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class GalaxyRole(object):
+
+ SUPPORTED_SCMS = set(['git', 'hg'])
+ META_MAIN = (os.path.join('meta', 'main.yml'), os.path.join('meta', 'main.yaml'))
+ META_INSTALL = os.path.join('meta', '.galaxy_install_info')
+ META_REQUIREMENTS = (os.path.join('meta', 'requirements.yml'), os.path.join('meta', 'requirements.yaml'))
+ ROLE_DIRS = ('defaults', 'files', 'handlers', 'meta', 'tasks', 'templates', 'vars', 'tests')
+
+ def __init__(self, galaxy, api, name, src=None, version=None, scm=None, path=None):
+
+ self._metadata = None
+ self._requirements = None
+ self._install_info = None
+ self._validate_certs = not context.CLIARGS['ignore_certs']
+
+ display.debug('Validate TLS certificates: %s' % self._validate_certs)
+
+ self.galaxy = galaxy
+ self.api = api
+
+ self.name = name
+ self.version = version
+ self.src = src or name
+ self.scm = scm
+ self.paths = [os.path.join(x, self.name) for x in galaxy.roles_paths]
+
+ if path is not None:
+ if not path.endswith(os.path.join(os.path.sep, self.name)):
+ path = os.path.join(path, self.name)
+ else:
+ # Look for a meta/main.ya?ml inside the potential role dir in case
+ # the role name is the same as parent directory of the role.
+ #
+ # Example:
+ # ./roles/testing/testing/meta/main.yml
+ for meta_main in self.META_MAIN:
+ if os.path.exists(os.path.join(path, name, meta_main)):
+ path = os.path.join(path, self.name)
+ break
+ self.path = path
+ else:
+ # use the first path by default
+ self.path = os.path.join(galaxy.roles_paths[0], self.name)
+
+ def __repr__(self):
+ """
+ Returns "rolename (version)" if version is not null
+ Returns "rolename" otherwise
+ """
+ if self.version:
+ return "%s (%s)" % (self.name, self.version)
+ else:
+ return self.name
+
+ def __eq__(self, other):
+ return self.name == other.name
+
+ @property
+ def metadata(self):
+ """
+ Returns role metadata
+ """
+ if self._metadata is None:
+ for path in self.paths:
+ for meta_main in self.META_MAIN:
+ meta_path = os.path.join(path, meta_main)
+ if os.path.isfile(meta_path):
+ try:
+ with open(meta_path, 'r') as f:
+ self._metadata = yaml.safe_load(f)
+ except Exception:
+ display.vvvvv("Unable to load metadata for %s" % self.name)
+ return False
+ break
+
+ return self._metadata
+
+ @property
+ def install_info(self):
+ """
+ Returns role install info
+ """
+ if self._install_info is None:
+
+ info_path = os.path.join(self.path, self.META_INSTALL)
+ if os.path.isfile(info_path):
+ try:
+ f = open(info_path, 'r')
+ self._install_info = yaml.safe_load(f)
+ except Exception:
+ display.vvvvv("Unable to load Galaxy install info for %s" % self.name)
+ return False
+ finally:
+ f.close()
+ return self._install_info
+
+ @property
+ def _exists(self):
+ for path in self.paths:
+ if os.path.isdir(path):
+ return True
+
+ return False
+
+ def _write_galaxy_install_info(self):
+ """
+ Writes a YAML-formatted file to the role's meta/ directory
+ (named .galaxy_install_info) which contains some information
+ we can use later for commands like 'list' and 'info'.
+ """
+
+ info = dict(
+ version=self.version,
+ install_date=datetime.datetime.utcnow().strftime("%c"),
+ )
+ if not os.path.exists(os.path.join(self.path, 'meta')):
+ os.makedirs(os.path.join(self.path, 'meta'))
+ info_path = os.path.join(self.path, self.META_INSTALL)
+ with open(info_path, 'w+') as f:
+ try:
+ self._install_info = yaml.safe_dump(info, f)
+ except Exception:
+ return False
+
+ return True
+
+ def remove(self):
+ """
+ Removes the specified role from the roles path.
+ There is a sanity check to make sure there's a meta/main.yml file at this
+ path so the user doesn't blow away random directories.
+ """
+ if self.metadata:
+ try:
+ rmtree(self.path)
+ return True
+ except Exception:
+ pass
+
+ return False
+
+ def fetch(self, role_data):
+ """
+ Downloads the archived role to a temp location based on role data
+ """
+ if role_data:
+
+ # first grab the file and save it to a temp location
+ if "github_user" in role_data and "github_repo" in role_data:
+ archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], self.version)
+ else:
+ archive_url = self.src
+
+ display.display("- downloading role from %s" % archive_url)
+
+ try:
+ url_file = open_url(archive_url, validate_certs=self._validate_certs, http_agent=user_agent())
+ temp_file = tempfile.NamedTemporaryFile(delete=False)
+ data = url_file.read()
+ while data:
+ temp_file.write(data)
+ data = url_file.read()
+ temp_file.close()
+ return temp_file.name
+ except Exception as e:
+ display.error(u"failed to download the file: %s" % to_text(e))
+
+ return False
+
+ def install(self):
+
+ if self.scm:
+ # create tar file from scm url
+ tmp_file = RoleRequirement.scm_archive_role(keep_scm_meta=context.CLIARGS['keep_scm_meta'], **self.spec)
+ elif self.src:
+ if os.path.isfile(self.src):
+ tmp_file = self.src
+ elif '://' in self.src:
+ role_data = self.src
+ tmp_file = self.fetch(role_data)
+ else:
+ role_data = self.api.lookup_role_by_name(self.src)
+ if not role_data:
+ raise AnsibleError("- sorry, %s was not found on %s." % (self.src, self.api.api_server))
+
+ if role_data.get('role_type') == 'APP':
+ # Container Role
+ display.warning("%s is a Container App role, and should only be installed using Ansible "
+ "Container" % self.name)
+
+ role_versions = self.api.fetch_role_related('versions', role_data['id'])
+ if not self.version:
+ # convert the version names to LooseVersion objects
+ # and sort them to get the latest version. If there
+ # are no versions in the list, we'll grab the head
+ # of the master branch
+ if len(role_versions) > 0:
+ loose_versions = [LooseVersion(a.get('name', None)) for a in role_versions]
+ try:
+ loose_versions.sort()
+ except TypeError:
+ raise AnsibleError(
+ 'Unable to compare role versions (%s) to determine the most recent version due to incompatible version formats. '
+ 'Please contact the role author to resolve versioning conflicts, or specify an explicit role version to '
+ 'install.' % ', '.join([v.vstring for v in loose_versions])
+ )
+ self.version = to_text(loose_versions[-1])
+ elif role_data.get('github_branch', None):
+ self.version = role_data['github_branch']
+ else:
+ self.version = 'master'
+ elif self.version != 'master':
+ if role_versions and to_text(self.version) not in [a.get('name', None) for a in role_versions]:
+ raise AnsibleError("- the specified version (%s) of %s was not found in the list of available versions (%s)." % (self.version,
+ self.name,
+ role_versions))
+
+ # check if there's a source link for our role_version
+ for role_version in role_versions:
+ if role_version['name'] == self.version and 'source' in role_version:
+ self.src = role_version['source']
+
+ tmp_file = self.fetch(role_data)
+
+ else:
+ raise AnsibleError("No valid role data found")
+
+ if tmp_file:
+
+ display.debug("installing from %s" % tmp_file)
+
+ if not tarfile.is_tarfile(tmp_file):
+ raise AnsibleError("the downloaded file does not appear to be a valid tar archive.")
+ else:
+ role_tar_file = tarfile.open(tmp_file, "r")
+ # verify the role's meta file
+ meta_file = None
+ members = role_tar_file.getmembers()
+ # next find the metadata file
+ for member in members:
+ for meta_main in self.META_MAIN:
+ if meta_main in member.name:
+ # Look for parent of meta/main.yml
+ # Due to possibility of sub roles each containing meta/main.yml
+ # look for shortest length parent
+ meta_parent_dir = os.path.dirname(os.path.dirname(member.name))
+ if not meta_file:
+ archive_parent_dir = meta_parent_dir
+ meta_file = member
+ else:
+ if len(meta_parent_dir) < len(archive_parent_dir):
+ archive_parent_dir = meta_parent_dir
+ meta_file = member
+ if not meta_file:
+ raise AnsibleError("this role does not appear to have a meta/main.yml file.")
+ else:
+ try:
+ self._metadata = yaml.safe_load(role_tar_file.extractfile(meta_file))
+ except Exception:
+ raise AnsibleError("this role does not appear to have a valid meta/main.yml file.")
+
+ # we strip off any higher-level directories for all of the files contained within
+ # the tar file here. The default is 'github_repo-target'. Gerrit instances, on the other
+ # hand, does not have a parent directory at all.
+ installed = False
+ while not installed:
+ display.display("- extracting %s to %s" % (self.name, self.path))
+ try:
+ if os.path.exists(self.path):
+ if not os.path.isdir(self.path):
+ raise AnsibleError("the specified roles path exists and is not a directory.")
+ elif not context.CLIARGS.get("force", False):
+ raise AnsibleError("the specified role %s appears to already exist. Use --force to replace it." % self.name)
+ else:
+ # using --force, remove the old path
+ if not self.remove():
+ raise AnsibleError("%s doesn't appear to contain a role.\n please remove this directory manually if you really "
+ "want to put the role here." % self.path)
+ else:
+ os.makedirs(self.path)
+
+ # now we do the actual extraction to the path
+ for member in members:
+ # we only extract files, and remove any relative path
+ # bits that might be in the file for security purposes
+ # and drop any containing directory, as mentioned above
+ if member.isreg() or member.issym():
+ n_member_name = to_native(member.name)
+ n_archive_parent_dir = to_native(archive_parent_dir)
+ n_parts = n_member_name.replace(n_archive_parent_dir, "", 1).split(os.sep)
+ n_final_parts = []
+ for n_part in n_parts:
+ if n_part != '..' and '~' not in n_part and '$' not in n_part:
+ n_final_parts.append(n_part)
+ member.name = os.path.join(*n_final_parts)
+ role_tar_file.extract(member, to_native(self.path))
+
+ # write out the install info file for later use
+ self._write_galaxy_install_info()
+ installed = True
+ except OSError as e:
+ error = True
+ if e.errno == errno.EACCES and len(self.paths) > 1:
+ current = self.paths.index(self.path)
+ if len(self.paths) > current:
+ self.path = self.paths[current + 1]
+ error = False
+ if error:
+ raise AnsibleError("Could not update files in %s: %s" % (self.path, to_native(e)))
+
+ # return the parsed yaml metadata
+ display.display("- %s was installed successfully" % str(self))
+ if not (self.src and os.path.isfile(self.src)):
+ try:
+ os.unlink(tmp_file)
+ except (OSError, IOError) as e:
+ display.warning(u"Unable to remove tmp file (%s): %s" % (tmp_file, to_text(e)))
+ return True
+
+ return False
+
+ @property
+ def spec(self):
+ """
+ Returns role spec info
+ {
+ 'scm': 'git',
+ 'src': 'http://git.example.com/repos/repo.git',
+ 'version': 'v1.0',
+ 'name': 'repo'
+ }
+ """
+ return dict(scm=self.scm, src=self.src, version=self.version, name=self.name)
+
+ @property
+ def requirements(self):
+ """
+ Returns role requirements
+ """
+ if self._requirements is None:
+ self._requirements = []
+ for meta_requirements in self.META_REQUIREMENTS:
+ meta_path = os.path.join(self.path, meta_requirements)
+ if os.path.isfile(meta_path):
+ try:
+ f = open(meta_path, 'r')
+ self._requirements = yaml.safe_load(f)
+ except Exception:
+ display.vvvvv("Unable to load requirements for %s" % self.name)
+ finally:
+ f.close()
+
+ break
+
+ return self._requirements
diff --git a/lib/ansible/galaxy/token.py b/lib/ansible/galaxy/token.py
new file mode 100644
index 00000000..7231c8f9
--- /dev/null
+++ b/lib/ansible/galaxy/token.py
@@ -0,0 +1,180 @@
+########################################################################
+#
+# (C) 2015, Chris Houseknecht <chouse@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+########################################################################
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import base64
+import os
+import json
+from stat import S_IRUSR, S_IWUSR
+
+import yaml
+
+from ansible import constants as C
+from ansible.galaxy.user_agent import user_agent
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.urls import open_url
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class NoTokenSentinel(object):
+ """ Represents an ansible.cfg server with not token defined (will ignore cmdline and GALAXY_TOKEN_PATH. """
+ def __new__(cls, *args, **kwargs):
+ return cls
+
+
+class KeycloakToken(object):
+ '''A token granted by a Keycloak server.
+
+ Like sso.redhat.com as used by cloud.redhat.com
+ ie Automation Hub'''
+
+ token_type = 'Bearer'
+
+ def __init__(self, access_token=None, auth_url=None, validate_certs=True):
+ self.access_token = access_token
+ self.auth_url = auth_url
+ self._token = None
+ self.validate_certs = validate_certs
+
+ def _form_payload(self):
+ return 'grant_type=refresh_token&client_id=cloud-services&refresh_token=%s' % self.access_token
+
+ def get(self):
+ if self._token:
+ return self._token
+
+ # - build a request to POST to auth_url
+ # - body is form encoded
+ # - 'request_token' is the offline token stored in ansible.cfg
+ # - 'grant_type' is 'refresh_token'
+ # - 'client_id' is 'cloud-services'
+ # - should probably be based on the contents of the
+ # offline_ticket's JWT payload 'aud' (audience)
+ # or 'azp' (Authorized party - the party to which the ID Token was issued)
+ payload = self._form_payload()
+
+ resp = open_url(to_native(self.auth_url),
+ data=payload,
+ validate_certs=self.validate_certs,
+ method='POST',
+ http_agent=user_agent())
+
+ # TODO: handle auth errors
+
+ data = json.loads(to_text(resp.read(), errors='surrogate_or_strict'))
+
+ # - extract 'access_token'
+ self._token = data.get('access_token')
+
+ return self._token
+
+ def headers(self):
+ headers = {}
+ headers['Authorization'] = '%s %s' % (self.token_type, self.get())
+ return headers
+
+
+class GalaxyToken(object):
+ ''' Class to storing and retrieving local galaxy token '''
+
+ token_type = 'Token'
+
+ def __init__(self, token=None):
+ self.b_file = to_bytes(C.GALAXY_TOKEN_PATH, errors='surrogate_or_strict')
+ # Done so the config file is only opened when set/get/save is called
+ self._config = None
+ self._token = token
+
+ @property
+ def config(self):
+ if self._config is None:
+ self._config = self._read()
+
+ # Prioritise the token passed into the constructor
+ if self._token:
+ self._config['token'] = None if self._token is NoTokenSentinel else self._token
+
+ return self._config
+
+ def _read(self):
+ action = 'Opened'
+ if not os.path.isfile(self.b_file):
+ # token file not found, create and chomd u+rw
+ open(self.b_file, 'w').close()
+ os.chmod(self.b_file, S_IRUSR | S_IWUSR) # owner has +rw
+ action = 'Created'
+
+ with open(self.b_file, 'r') as f:
+ config = yaml.safe_load(f)
+
+ display.vvv('%s %s' % (action, to_text(self.b_file)))
+
+ return config or {}
+
+ def set(self, token):
+ self._token = token
+ self.save()
+
+ def get(self):
+ return self.config.get('token', None)
+
+ def save(self):
+ with open(self.b_file, 'w') as f:
+ yaml.safe_dump(self.config, f, default_flow_style=False)
+
+ def headers(self):
+ headers = {}
+ token = self.get()
+ if token:
+ headers['Authorization'] = '%s %s' % (self.token_type, self.get())
+ return headers
+
+
+class BasicAuthToken(object):
+ token_type = 'Basic'
+
+ def __init__(self, username, password=None):
+ self.username = username
+ self.password = password
+ self._token = None
+
+ @staticmethod
+ def _encode_token(username, password):
+ token = "%s:%s" % (to_text(username, errors='surrogate_or_strict'),
+ to_text(password, errors='surrogate_or_strict', nonstring='passthru') or '')
+ b64_val = base64.b64encode(to_bytes(token, encoding='utf-8', errors='surrogate_or_strict'))
+ return to_text(b64_val)
+
+ def get(self):
+ if self._token:
+ return self._token
+
+ self._token = self._encode_token(self.username, self.password)
+
+ return self._token
+
+ def headers(self):
+ headers = {}
+ headers['Authorization'] = '%s %s' % (self.token_type, self.get())
+ return headers
diff --git a/lib/ansible/galaxy/user_agent.py b/lib/ansible/galaxy/user_agent.py
new file mode 100644
index 00000000..c860bcdb
--- /dev/null
+++ b/lib/ansible/galaxy/user_agent.py
@@ -0,0 +1,23 @@
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import platform
+import sys
+
+from ansible.module_utils.ansible_release import __version__ as ansible_version
+
+
+def user_agent():
+ """Returns a user agent used by ansible-galaxy to include the Ansible version, platform and python version."""
+
+ python_version = sys.version_info
+ return u"ansible-galaxy/{ansible_version} ({platform}; python:{py_major}.{py_minor}.{py_micro})".format(
+ ansible_version=ansible_version,
+ platform=platform.system(),
+ py_major=python_version.major,
+ py_minor=python_version.minor,
+ py_micro=python_version.micro,
+ )
diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/lib/ansible/inventory/__init__.py
diff --git a/lib/ansible/inventory/data.py b/lib/ansible/inventory/data.py
new file mode 100644
index 00000000..df4af766
--- /dev/null
+++ b/lib/ansible/inventory/data.py
@@ -0,0 +1,280 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+#############################################
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.inventory.group import Group
+from ansible.inventory.host import Host
+from ansible.module_utils.six import iteritems, string_types
+from ansible.utils.display import Display
+from ansible.utils.vars import combine_vars
+from ansible.utils.path import basedir
+
+display = Display()
+
+
+class InventoryData(object):
+ """
+ Holds inventory data (host and group objects).
+ Using it's methods should guarantee expected relationships and data.
+ """
+
+ def __init__(self):
+
+ self.groups = {}
+ self.hosts = {}
+
+ # provides 'groups' magic var, host object has group_names
+ self._groups_dict_cache = {}
+
+ # current localhost, implicit or explicit
+ self.localhost = None
+
+ self.current_source = None
+
+ # Always create the 'all' and 'ungrouped' groups,
+ for group in ('all', 'ungrouped'):
+ self.add_group(group)
+ self.add_child('all', 'ungrouped')
+
+ def serialize(self):
+ self._groups_dict_cache = None
+ data = {
+ 'groups': self.groups,
+ 'hosts': self.hosts,
+ 'local': self.localhost,
+ 'source': self.current_source,
+ }
+ return data
+
+ def deserialize(self, data):
+ self._groups_dict_cache = {}
+ self.hosts = data.get('hosts')
+ self.groups = data.get('groups')
+ self.localhost = data.get('local')
+ self.current_source = data.get('source')
+
+ def _create_implicit_localhost(self, pattern):
+
+ if self.localhost:
+ new_host = self.localhost
+ else:
+ new_host = Host(pattern)
+
+ new_host.address = "127.0.0.1"
+ new_host.implicit = True
+
+ # set localhost defaults
+ py_interp = sys.executable
+ if not py_interp:
+ # sys.executable is not set in some cornercases. see issue #13585
+ py_interp = '/usr/bin/python'
+ display.warning('Unable to determine python interpreter from sys.executable. Using /usr/bin/python default. '
+ 'You can correct this by setting ansible_python_interpreter for localhost')
+ new_host.set_variable("ansible_python_interpreter", py_interp)
+ new_host.set_variable("ansible_connection", 'local')
+
+ self.localhost = new_host
+
+ return new_host
+
+ def reconcile_inventory(self):
+ ''' Ensure inventory basic rules, run after updates '''
+
+ display.debug('Reconcile groups and hosts in inventory.')
+ self.current_source = None
+
+ group_names = set()
+ # set group vars from group_vars/ files and vars plugins
+ for g in self.groups:
+ group = self.groups[g]
+ group_names.add(group.name)
+
+ # ensure all groups inherit from 'all'
+ if group.name != 'all' and not group.get_ancestors():
+ self.add_child('all', group.name)
+
+ host_names = set()
+ # get host vars from host_vars/ files and vars plugins
+ for host in self.hosts.values():
+ host_names.add(host.name)
+
+ mygroups = host.get_groups()
+
+ if self.groups['ungrouped'] in mygroups:
+ # clear ungrouped of any incorrectly stored by parser
+ if set(mygroups).difference(set([self.groups['all'], self.groups['ungrouped']])):
+ self.groups['ungrouped'].remove_host(host)
+
+ elif not host.implicit:
+ # add ungrouped hosts to ungrouped, except implicit
+ length = len(mygroups)
+ if length == 0 or (length == 1 and self.groups['all'] in mygroups):
+ self.add_child('ungrouped', host.name)
+
+ # special case for implicit hosts
+ if host.implicit:
+ host.vars = combine_vars(self.groups['all'].get_vars(), host.vars)
+
+ # warn if overloading identifier as both group and host
+ for conflict in group_names.intersection(host_names):
+ display.warning("Found both group and host with same name: %s" % conflict)
+
+ self._groups_dict_cache = {}
+
+ def get_host(self, hostname):
+ ''' fetch host object using name deal with implicit localhost '''
+
+ matching_host = self.hosts.get(hostname, None)
+
+ # if host is not in hosts dict
+ if matching_host is None and hostname in C.LOCALHOST:
+ # might need to create implicit localhost
+ matching_host = self._create_implicit_localhost(hostname)
+
+ return matching_host
+
+ def add_group(self, group):
+ ''' adds a group to inventory if not there already, returns named actually used '''
+
+ if group:
+ if not isinstance(group, string_types):
+ raise AnsibleError("Invalid group name supplied, expected a string but got %s for %s" % (type(group), group))
+ if group not in self.groups:
+ g = Group(group)
+ if g.name not in self.groups:
+ self.groups[g.name] = g
+ self._groups_dict_cache = {}
+ display.debug("Added group %s to inventory" % group)
+ group = g.name
+ else:
+ display.debug("group %s already in inventory" % group)
+ else:
+ raise AnsibleError("Invalid empty/false group name provided: %s" % group)
+
+ return group
+
+ def remove_group(self, group):
+
+ if group in self.groups:
+ del self.groups[group]
+ display.debug("Removed group %s from inventory" % group)
+ self._groups_dict_cache = {}
+
+ for host in self.hosts:
+ h = self.hosts[host]
+ h.remove_group(group)
+
+ def add_host(self, host, group=None, port=None):
+ ''' adds a host to inventory and possibly a group if not there already '''
+
+ if host:
+ if not isinstance(host, string_types):
+ raise AnsibleError("Invalid host name supplied, expected a string but got %s for %s" % (type(host), host))
+
+ # TODO: add to_safe_host_name
+ g = None
+ if group:
+ if group in self.groups:
+ g = self.groups[group]
+ else:
+ raise AnsibleError("Could not find group %s in inventory" % group)
+
+ if host not in self.hosts:
+ h = Host(host, port)
+ self.hosts[host] = h
+ if self.current_source: # set to 'first source' in which host was encountered
+ self.set_variable(host, 'inventory_file', self.current_source)
+ self.set_variable(host, 'inventory_dir', basedir(self.current_source))
+ else:
+ self.set_variable(host, 'inventory_file', None)
+ self.set_variable(host, 'inventory_dir', None)
+ display.debug("Added host %s to inventory" % (host))
+
+ # set default localhost from inventory to avoid creating an implicit one. Last localhost defined 'wins'.
+ if host in C.LOCALHOST:
+ if self.localhost is None:
+ self.localhost = self.hosts[host]
+ display.vvvv("Set default localhost to %s" % h)
+ else:
+ display.warning("A duplicate localhost-like entry was found (%s). First found localhost was %s" % (h, self.localhost.name))
+ else:
+ h = self.hosts[host]
+
+ if g:
+ g.add_host(h)
+ self._groups_dict_cache = {}
+ display.debug("Added host %s to group %s" % (host, group))
+ else:
+ raise AnsibleError("Invalid empty host name provided: %s" % host)
+
+ return host
+
+ def remove_host(self, host):
+
+ if host.name in self.hosts:
+ del self.hosts[host.name]
+
+ for group in self.groups:
+ g = self.groups[group]
+ g.remove_host(host)
+
+ def set_variable(self, entity, varname, value):
+ ''' sets a variable for an inventory object '''
+
+ if entity in self.groups:
+ inv_object = self.groups[entity]
+ elif entity in self.hosts:
+ inv_object = self.hosts[entity]
+ else:
+ raise AnsibleError("Could not identify group or host named %s" % entity)
+
+ inv_object.set_variable(varname, value)
+ display.debug('set %s for %s' % (varname, entity))
+
+ def add_child(self, group, child):
+ ''' Add host or group to group '''
+ added = False
+ if group in self.groups:
+ g = self.groups[group]
+ if child in self.groups:
+ added = g.add_child_group(self.groups[child])
+ elif child in self.hosts:
+ added = g.add_host(self.hosts[child])
+ else:
+ raise AnsibleError("%s is not a known host nor group" % child)
+ self._groups_dict_cache = {}
+ display.debug('Group %s now contains %s' % (group, child))
+ else:
+ raise AnsibleError("%s is not a known group" % group)
+ return added
+
+ def get_groups_dict(self):
+ """
+ We merge a 'magic' var 'groups' with group name keys and hostname list values into every host variable set. Cache for speed.
+ """
+ if not self._groups_dict_cache:
+ for (group_name, group) in iteritems(self.groups):
+ self._groups_dict_cache[group_name] = [h.name for h in group.get_hosts()]
+
+ return self._groups_dict_cache
diff --git a/lib/ansible/inventory/group.py b/lib/ansible/inventory/group.py
new file mode 100644
index 00000000..0dd91bb7
--- /dev/null
+++ b/lib/ansible/inventory/group.py
@@ -0,0 +1,289 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from itertools import chain
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.common._collections_compat import Mapping, MutableMapping
+from ansible.utils.display import Display
+from ansible.utils.vars import combine_vars
+
+display = Display()
+
+
+def to_safe_group_name(name, replacer="_", force=False, silent=False):
+ # Converts 'bad' characters in a string to underscores (or provided replacer) so they can be used as Ansible hosts or groups
+
+ warn = ''
+ if name: # when deserializing we might not have name yet
+ invalid_chars = C.INVALID_VARIABLE_NAMES.findall(name)
+ if invalid_chars:
+ msg = 'invalid character(s) "%s" in group name (%s)' % (to_text(set(invalid_chars)), to_text(name))
+ if C.TRANSFORM_INVALID_GROUP_CHARS not in ('never', 'ignore') or force:
+ name = C.INVALID_VARIABLE_NAMES.sub(replacer, name)
+ if not (silent or C.TRANSFORM_INVALID_GROUP_CHARS == 'silently'):
+ display.vvvv('Replacing ' + msg)
+ warn = 'Invalid characters were found in group names and automatically replaced, use -vvvv to see details'
+ else:
+ if C.TRANSFORM_INVALID_GROUP_CHARS == 'never':
+ display.vvvv('Not replacing %s' % msg)
+ warn = True
+ warn = 'Invalid characters were found in group names but not replaced, use -vvvv to see details'
+
+ if warn:
+ display.warning(warn)
+
+ return name
+
+
+class Group:
+ ''' a group of ansible hosts '''
+
+ # __slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ]
+
+ def __init__(self, name=None):
+
+ self.depth = 0
+ self.name = to_safe_group_name(name)
+ self.hosts = []
+ self._hosts = None
+ self.vars = {}
+ self.child_groups = []
+ self.parent_groups = []
+ self._hosts_cache = None
+ self.priority = 1
+
+ def __repr__(self):
+ return self.get_name()
+
+ def __str__(self):
+ return self.get_name()
+
+ def __getstate__(self):
+ return self.serialize()
+
+ def __setstate__(self, data):
+ return self.deserialize(data)
+
+ def serialize(self):
+ parent_groups = []
+ for parent in self.parent_groups:
+ parent_groups.append(parent.serialize())
+
+ self._hosts = None
+
+ result = dict(
+ name=self.name,
+ vars=self.vars.copy(),
+ parent_groups=parent_groups,
+ depth=self.depth,
+ hosts=self.hosts,
+ )
+
+ return result
+
+ def deserialize(self, data):
+ self.__init__()
+ self.name = data.get('name')
+ self.vars = data.get('vars', dict())
+ self.depth = data.get('depth', 0)
+ self.hosts = data.get('hosts', [])
+ self._hosts = None
+
+ parent_groups = data.get('parent_groups', [])
+ for parent_data in parent_groups:
+ g = Group()
+ g.deserialize(parent_data)
+ self.parent_groups.append(g)
+
+ def _walk_relationship(self, rel, include_self=False, preserve_ordering=False):
+ '''
+ Given `rel` that is an iterable property of Group,
+ consitituting a directed acyclic graph among all groups,
+ Returns a set of all groups in full tree
+ A B C
+ | / | /
+ | / | /
+ D -> E
+ | / vertical connections
+ | / are directed upward
+ F
+ Called on F, returns set of (A, B, C, D, E)
+ '''
+ seen = set([])
+ unprocessed = set(getattr(self, rel))
+ if include_self:
+ unprocessed.add(self)
+ if preserve_ordering:
+ ordered = [self] if include_self else []
+ ordered.extend(getattr(self, rel))
+
+ while unprocessed:
+ seen.update(unprocessed)
+ new_unprocessed = set([])
+
+ for new_item in chain.from_iterable(getattr(g, rel) for g in unprocessed):
+ new_unprocessed.add(new_item)
+ if preserve_ordering:
+ if new_item not in seen:
+ ordered.append(new_item)
+
+ new_unprocessed.difference_update(seen)
+ unprocessed = new_unprocessed
+
+ if preserve_ordering:
+ return ordered
+ return seen
+
+ def get_ancestors(self):
+ return self._walk_relationship('parent_groups')
+
+ def get_descendants(self, **kwargs):
+ return self._walk_relationship('child_groups', **kwargs)
+
+ @property
+ def host_names(self):
+ if self._hosts is None:
+ self._hosts = set(self.hosts)
+ return self._hosts
+
+ def get_name(self):
+ return self.name
+
+ def add_child_group(self, group):
+ added = False
+ if self == group:
+ raise Exception("can't add group to itself")
+
+ # don't add if it's already there
+ if group not in self.child_groups:
+
+ # prepare list of group's new ancestors this edge creates
+ start_ancestors = group.get_ancestors()
+ new_ancestors = self.get_ancestors()
+ if group in new_ancestors:
+ raise AnsibleError("Adding group '%s' as child to '%s' creates a recursive dependency loop." % (to_native(group.name), to_native(self.name)))
+ new_ancestors.add(self)
+ new_ancestors.difference_update(start_ancestors)
+
+ added = True
+ self.child_groups.append(group)
+
+ # update the depth of the child
+ group.depth = max([self.depth + 1, group.depth])
+
+ # update the depth of the grandchildren
+ group._check_children_depth()
+
+ # now add self to child's parent_groups list, but only if there
+ # isn't already a group with the same name
+ if self.name not in [g.name for g in group.parent_groups]:
+ group.parent_groups.append(self)
+ for h in group.get_hosts():
+ h.populate_ancestors(additions=new_ancestors)
+
+ self.clear_hosts_cache()
+ return added
+
+ def _check_children_depth(self):
+
+ depth = self.depth
+ start_depth = self.depth # self.depth could change over loop
+ seen = set([])
+ unprocessed = set(self.child_groups)
+
+ while unprocessed:
+ seen.update(unprocessed)
+ depth += 1
+ to_process = unprocessed.copy()
+ unprocessed = set([])
+ for g in to_process:
+ if g.depth < depth:
+ g.depth = depth
+ unprocessed.update(g.child_groups)
+ if depth - start_depth > len(seen):
+ raise AnsibleError("The group named '%s' has a recursive dependency loop." % to_native(self.name))
+
+ def add_host(self, host):
+ added = False
+ if host.name not in self.host_names:
+ self.hosts.append(host)
+ self._hosts.add(host.name)
+ host.add_group(self)
+ self.clear_hosts_cache()
+ added = True
+ return added
+
+ def remove_host(self, host):
+ removed = False
+ if host.name in self.host_names:
+ self.hosts.remove(host)
+ self._hosts.remove(host.name)
+ host.remove_group(self)
+ self.clear_hosts_cache()
+ removed = True
+ return removed
+
+ def set_variable(self, key, value):
+
+ if key == 'ansible_group_priority':
+ self.set_priority(int(value))
+ else:
+ if key in self.vars and isinstance(self.vars[key], MutableMapping) and isinstance(value, Mapping):
+ self.vars[key] = combine_vars(self.vars[key], value)
+ else:
+ self.vars[key] = value
+
+ def clear_hosts_cache(self):
+
+ self._hosts_cache = None
+ for g in self.get_ancestors():
+ g._hosts_cache = None
+
+ def get_hosts(self):
+
+ if self._hosts_cache is None:
+ self._hosts_cache = self._get_hosts()
+ return self._hosts_cache
+
+ def _get_hosts(self):
+
+ hosts = []
+ seen = {}
+ for kid in self.get_descendants(include_self=True, preserve_ordering=True):
+ kid_hosts = kid.hosts
+ for kk in kid_hosts:
+ if kk not in seen:
+ seen[kk] = 1
+ if self.name == 'all' and kk.implicit:
+ continue
+ hosts.append(kk)
+ return hosts
+
+ def get_vars(self):
+ return self.vars.copy()
+
+ def set_priority(self, priority):
+ try:
+ self.priority = int(priority)
+ except TypeError:
+ # FIXME: warn about invalid priority
+ pass
diff --git a/lib/ansible/inventory/helpers.py b/lib/ansible/inventory/helpers.py
new file mode 100644
index 00000000..39c72210
--- /dev/null
+++ b/lib/ansible/inventory/helpers.py
@@ -0,0 +1,40 @@
+# (c) 2017, Ansible by RedHat Inc,
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+#############################################
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.utils.vars import combine_vars
+
+
+def sort_groups(groups):
+ return sorted(groups, key=lambda g: (g.depth, g.priority, g.name))
+
+
+def get_group_vars(groups):
+ """
+ Combine all the group vars from a list of inventory groups.
+
+ :param groups: list of ansible.inventory.group.Group objects
+ :rtype: dict
+ """
+ results = {}
+ for group in sort_groups(groups):
+ results = combine_vars(results, group.get_vars())
+
+ return results
diff --git a/lib/ansible/inventory/host.py b/lib/ansible/inventory/host.py
new file mode 100644
index 00000000..7ad30079
--- /dev/null
+++ b/lib/ansible/inventory/host.py
@@ -0,0 +1,162 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.inventory.group import Group
+from ansible.module_utils.common._collections_compat import Mapping, MutableMapping
+from ansible.utils.vars import combine_vars, get_unique_id
+
+__all__ = ['Host']
+
+
+class Host:
+ ''' a single ansible host '''
+
+ # __slots__ = [ 'name', 'vars', 'groups' ]
+
+ def __getstate__(self):
+ return self.serialize()
+
+ def __setstate__(self, data):
+ return self.deserialize(data)
+
+ def __eq__(self, other):
+ if not isinstance(other, Host):
+ return False
+ return self._uuid == other._uuid
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __hash__(self):
+ return hash(self.name)
+
+ def __str__(self):
+ return self.get_name()
+
+ def __repr__(self):
+ return self.get_name()
+
+ def serialize(self):
+ groups = []
+ for group in self.groups:
+ groups.append(group.serialize())
+
+ return dict(
+ name=self.name,
+ vars=self.vars.copy(),
+ address=self.address,
+ uuid=self._uuid,
+ groups=groups,
+ implicit=self.implicit,
+ )
+
+ def deserialize(self, data):
+ self.__init__(gen_uuid=False)
+
+ self.name = data.get('name')
+ self.vars = data.get('vars', dict())
+ self.address = data.get('address', '')
+ self._uuid = data.get('uuid', None)
+ self.implicit = data.get('implicit', False)
+
+ groups = data.get('groups', [])
+ for group_data in groups:
+ g = Group()
+ g.deserialize(group_data)
+ self.groups.append(g)
+
+ def __init__(self, name=None, port=None, gen_uuid=True):
+
+ self.vars = {}
+ self.groups = []
+ self._uuid = None
+
+ self.name = name
+ self.address = name
+
+ if port:
+ self.set_variable('ansible_port', int(port))
+
+ if gen_uuid:
+ self._uuid = get_unique_id()
+ self.implicit = False
+
+ def get_name(self):
+ return self.name
+
+ def populate_ancestors(self, additions=None):
+ # populate ancestors
+ if additions is None:
+ for group in self.groups:
+ self.add_group(group)
+ else:
+ for group in additions:
+ if group not in self.groups:
+ self.groups.append(group)
+
+ def add_group(self, group):
+ added = False
+ # populate ancestors first
+ for oldg in group.get_ancestors():
+ if oldg not in self.groups:
+ self.groups.append(oldg)
+
+ # actually add group
+ if group not in self.groups:
+ self.groups.append(group)
+ added = True
+ return added
+
+ def remove_group(self, group):
+ removed = False
+ if group in self.groups:
+ self.groups.remove(group)
+ removed = True
+
+ # remove exclusive ancestors, xcept all!
+ for oldg in group.get_ancestors():
+ if oldg.name != 'all':
+ for childg in self.groups:
+ if oldg in childg.get_ancestors():
+ break
+ else:
+ self.remove_group(oldg)
+ return removed
+
+ def set_variable(self, key, value):
+ if key in self.vars and isinstance(self.vars[key], MutableMapping) and isinstance(value, Mapping):
+ self.vars[key] = combine_vars(self.vars[key], value)
+ else:
+ self.vars[key] = value
+
+ def get_groups(self):
+ return self.groups
+
+ def get_magic_vars(self):
+ results = {}
+ results['inventory_hostname'] = self.name
+ results['inventory_hostname_short'] = self.name.split('.')[0]
+ results['group_names'] = sorted([g.name for g in self.get_groups() if g.name != 'all'])
+
+ return results
+
+ def get_vars(self):
+ return combine_vars(self.vars, self.get_magic_vars())
diff --git a/lib/ansible/inventory/manager.py b/lib/ansible/inventory/manager.py
new file mode 100644
index 00000000..5606b265
--- /dev/null
+++ b/lib/ansible/inventory/manager.py
@@ -0,0 +1,641 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+#############################################
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import fnmatch
+import os
+import sys
+import re
+import itertools
+import traceback
+
+from operator import attrgetter
+from random import shuffle
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError
+from ansible.inventory.data import InventoryData
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.parsing.utils.addresses import parse_address
+from ansible.plugins.loader import inventory_loader
+from ansible.utils.helpers import deduplicate_list
+from ansible.utils.path import unfrackpath
+from ansible.utils.display import Display
+from ansible.utils.vars import combine_vars
+from ansible.vars.plugins import get_vars_from_inventory_sources
+
+display = Display()
+
+IGNORED_ALWAYS = [br"^\.", b"^host_vars$", b"^group_vars$", b"^vars_plugins$"]
+IGNORED_PATTERNS = [to_bytes(x) for x in C.INVENTORY_IGNORE_PATTERNS]
+IGNORED_EXTS = [b'%s$' % to_bytes(re.escape(x)) for x in C.INVENTORY_IGNORE_EXTS]
+
+IGNORED = re.compile(b'|'.join(IGNORED_ALWAYS + IGNORED_PATTERNS + IGNORED_EXTS))
+
+PATTERN_WITH_SUBSCRIPT = re.compile(
+ r'''^
+ (.+) # A pattern expression ending with...
+ \[(?: # A [subscript] expression comprising:
+ (-?[0-9]+)| # A single positive or negative number
+ ([0-9]+)([:-]) # Or an x:y or x: range.
+ ([0-9]*)
+ )\]
+ $
+ ''', re.X
+)
+
+
+def order_patterns(patterns):
+ ''' takes a list of patterns and reorders them by modifier to apply them consistently '''
+
+ # FIXME: this goes away if we apply patterns incrementally or by groups
+ pattern_regular = []
+ pattern_intersection = []
+ pattern_exclude = []
+ for p in patterns:
+ if not p:
+ continue
+
+ if p[0] == "!":
+ pattern_exclude.append(p)
+ elif p[0] == "&":
+ pattern_intersection.append(p)
+ else:
+ pattern_regular.append(p)
+
+ # if no regular pattern was given, hence only exclude and/or intersection
+ # make that magically work
+ if pattern_regular == []:
+ pattern_regular = ['all']
+
+ # when applying the host selectors, run those without the "&" or "!"
+ # first, then the &s, then the !s.
+ return pattern_regular + pattern_intersection + pattern_exclude
+
+
+def split_host_pattern(pattern):
+ """
+ Takes a string containing host patterns separated by commas (or a list
+ thereof) and returns a list of single patterns (which may not contain
+ commas). Whitespace is ignored.
+
+ Also accepts ':' as a separator for backwards compatibility, but it is
+ not recommended due to the conflict with IPv6 addresses and host ranges.
+
+ Example: 'a,b[1], c[2:3] , d' -> ['a', 'b[1]', 'c[2:3]', 'd']
+ """
+
+ if isinstance(pattern, list):
+ results = (split_host_pattern(p) for p in pattern)
+ # flatten the results
+ return list(itertools.chain.from_iterable(results))
+ elif not isinstance(pattern, string_types):
+ pattern = to_text(pattern, errors='surrogate_or_strict')
+
+ # If it's got commas in it, we'll treat it as a straightforward
+ # comma-separated list of patterns.
+ if u',' in pattern:
+ patterns = pattern.split(u',')
+
+ # If it doesn't, it could still be a single pattern. This accounts for
+ # non-separator uses of colons: IPv6 addresses and [x:y] host ranges.
+ else:
+ try:
+ (base, port) = parse_address(pattern, allow_ranges=True)
+ patterns = [pattern]
+ except Exception:
+ # The only other case we accept is a ':'-separated list of patterns.
+ # This mishandles IPv6 addresses, and is retained only for backwards
+ # compatibility.
+ patterns = re.findall(
+ to_text(r'''(?: # We want to match something comprising:
+ [^\s:\[\]] # (anything other than whitespace or ':[]'
+ | # ...or...
+ \[[^\]]*\] # a single complete bracketed expression)
+ )+ # occurring once or more
+ '''), pattern, re.X
+ )
+
+ return [p.strip() for p in patterns if p.strip()]
+
+
+class InventoryManager(object):
+ ''' Creates and manages inventory '''
+
+ def __init__(self, loader, sources=None):
+
+ # base objects
+ self._loader = loader
+ self._inventory = InventoryData()
+
+ # a list of host(names) to contain current inquiries to
+ self._restriction = None
+ self._subset = None
+
+ # caches
+ self._hosts_patterns_cache = {} # resolved full patterns
+ self._pattern_cache = {} # resolved individual patterns
+
+ # the inventory dirs, files, script paths or lists of hosts
+ if sources is None:
+ self._sources = []
+ elif isinstance(sources, string_types):
+ self._sources = [sources]
+ else:
+ self._sources = sources
+
+ # get to work!
+ self.parse_sources(cache=True)
+
+ @property
+ def localhost(self):
+ return self._inventory.localhost
+
+ @property
+ def groups(self):
+ return self._inventory.groups
+
+ @property
+ def hosts(self):
+ return self._inventory.hosts
+
+ def add_host(self, host, group=None, port=None):
+ return self._inventory.add_host(host, group, port)
+
+ def add_group(self, group):
+ return self._inventory.add_group(group)
+
+ def get_groups_dict(self):
+ return self._inventory.get_groups_dict()
+
+ def reconcile_inventory(self):
+ self.clear_caches()
+ return self._inventory.reconcile_inventory()
+
+ def get_host(self, hostname):
+ return self._inventory.get_host(hostname)
+
+ def _fetch_inventory_plugins(self):
+ ''' sets up loaded inventory plugins for usage '''
+
+ display.vvvv('setting up inventory plugins')
+
+ plugins = []
+ for name in C.INVENTORY_ENABLED:
+ plugin = inventory_loader.get(name)
+ if plugin:
+ plugins.append(plugin)
+ else:
+ display.warning('Failed to load inventory plugin, skipping %s' % name)
+
+ if not plugins:
+ raise AnsibleError("No inventory plugins available to generate inventory, make sure you have at least one whitelisted.")
+
+ return plugins
+
+ def parse_sources(self, cache=False):
+ ''' iterate over inventory sources and parse each one to populate it'''
+
+ parsed = False
+ # allow for multiple inventory parsing
+ for source in self._sources:
+
+ if source:
+ if ',' not in source:
+ source = unfrackpath(source, follow=False)
+ parse = self.parse_source(source, cache=cache)
+ if parse and not parsed:
+ parsed = True
+
+ if parsed:
+ # do post processing
+ self._inventory.reconcile_inventory()
+ else:
+ if C.INVENTORY_UNPARSED_IS_FAILED:
+ raise AnsibleError("No inventory was parsed, please check your configuration and options.")
+ else:
+ display.warning("No inventory was parsed, only implicit localhost is available")
+
+ for group in self.groups.values():
+ group.vars = combine_vars(group.vars, get_vars_from_inventory_sources(self._loader, self._sources, [group], 'inventory'))
+ for host in self.hosts.values():
+ host.vars = combine_vars(host.vars, get_vars_from_inventory_sources(self._loader, self._sources, [host], 'inventory'))
+
+ def parse_source(self, source, cache=False):
+ ''' Generate or update inventory for the source provided '''
+
+ parsed = False
+ display.debug(u'Examining possible inventory source: %s' % source)
+
+ # use binary for path functions
+ b_source = to_bytes(source)
+
+ # process directories as a collection of inventories
+ if os.path.isdir(b_source):
+ display.debug(u'Searching for inventory files in directory: %s' % source)
+ for i in sorted(os.listdir(b_source)):
+
+ display.debug(u'Considering %s' % i)
+ # Skip hidden files and stuff we explicitly ignore
+ if IGNORED.search(i):
+ continue
+
+ # recursively deal with directory entries
+ fullpath = to_text(os.path.join(b_source, i), errors='surrogate_or_strict')
+ parsed_this_one = self.parse_source(fullpath, cache=cache)
+ display.debug(u'parsed %s as %s' % (fullpath, parsed_this_one))
+ if not parsed:
+ parsed = parsed_this_one
+ else:
+ # left with strings or files, let plugins figure it out
+
+ # set so new hosts can use for inventory_file/dir vars
+ self._inventory.current_source = source
+
+ # try source with each plugin
+ failures = []
+ for plugin in self._fetch_inventory_plugins():
+
+ plugin_name = to_text(getattr(plugin, '_load_name', getattr(plugin, '_original_path', '')))
+ display.debug(u'Attempting to use plugin %s (%s)' % (plugin_name, plugin._original_path))
+
+ # initialize and figure out if plugin wants to attempt parsing this file
+ try:
+ plugin_wants = bool(plugin.verify_file(source))
+ except Exception:
+ plugin_wants = False
+
+ if plugin_wants:
+ try:
+ # FIXME in case plugin fails 1/2 way we have partial inventory
+ plugin.parse(self._inventory, self._loader, source, cache=cache)
+ try:
+ plugin.update_cache_if_changed()
+ except AttributeError:
+ # some plugins might not implement caching
+ pass
+ parsed = True
+ display.vvv('Parsed %s inventory source with %s plugin' % (source, plugin_name))
+ break
+ except AnsibleParserError as e:
+ display.debug('%s was not parsable by %s' % (source, plugin_name))
+ tb = ''.join(traceback.format_tb(sys.exc_info()[2]))
+ failures.append({'src': source, 'plugin': plugin_name, 'exc': e, 'tb': tb})
+ except Exception as e:
+ display.debug('%s failed while attempting to parse %s' % (plugin_name, source))
+ tb = ''.join(traceback.format_tb(sys.exc_info()[2]))
+ failures.append({'src': source, 'plugin': plugin_name, 'exc': AnsibleError(e), 'tb': tb})
+ else:
+ display.vvv("%s declined parsing %s as it did not pass its verify_file() method" % (plugin_name, source))
+ else:
+ if not parsed and failures:
+ # only if no plugin processed files should we show errors.
+ for fail in failures:
+ display.warning(u'\n* Failed to parse %s with %s plugin: %s' % (to_text(fail['src']), fail['plugin'], to_text(fail['exc'])))
+ if 'tb' in fail:
+ display.vvv(to_text(fail['tb']))
+ if C.INVENTORY_ANY_UNPARSED_IS_FAILED:
+ raise AnsibleError(u'Completely failed to parse inventory source %s' % (source))
+ if not parsed:
+ if source != '/etc/ansible/hosts' or os.path.exists(source):
+ # only warn if NOT using the default and if using it, only if the file is present
+ display.warning("Unable to parse %s as an inventory source" % source)
+
+ # clear up, jic
+ self._inventory.current_source = None
+
+ return parsed
+
+ def clear_caches(self):
+ ''' clear all caches '''
+ self._hosts_patterns_cache = {}
+ self._pattern_cache = {}
+ # FIXME: flush inventory cache
+
+ def refresh_inventory(self):
+ ''' recalculate inventory '''
+
+ self.clear_caches()
+ self._inventory = InventoryData()
+ self.parse_sources(cache=False)
+
+ def _match_list(self, items, pattern_str):
+ # compile patterns
+ try:
+ if not pattern_str[0] == '~':
+ pattern = re.compile(fnmatch.translate(pattern_str))
+ else:
+ pattern = re.compile(pattern_str[1:])
+ except Exception:
+ raise AnsibleError('Invalid host list pattern: %s' % pattern_str)
+
+ # apply patterns
+ results = []
+ for item in items:
+ if pattern.match(item):
+ results.append(item)
+ return results
+
+ def get_hosts(self, pattern="all", ignore_limits=False, ignore_restrictions=False, order=None):
+ """
+ Takes a pattern or list of patterns and returns a list of matching
+ inventory host names, taking into account any active restrictions
+ or applied subsets
+ """
+
+ hosts = []
+
+ # Check if pattern already computed
+ if isinstance(pattern, list):
+ pattern_list = pattern[:]
+ else:
+ pattern_list = [pattern]
+
+ if pattern_list:
+ if not ignore_limits and self._subset:
+ pattern_list.extend(self._subset)
+
+ if not ignore_restrictions and self._restriction:
+ pattern_list.extend(self._restriction)
+
+ # This is only used as a hash key in the self._hosts_patterns_cache dict
+ # a tuple is faster than stringifying
+ pattern_hash = tuple(pattern_list)
+
+ if pattern_hash not in self._hosts_patterns_cache:
+
+ patterns = split_host_pattern(pattern)
+ hosts = self._evaluate_patterns(patterns)
+
+ # mainly useful for hostvars[host] access
+ if not ignore_limits and self._subset:
+ # exclude hosts not in a subset, if defined
+ subset_uuids = set(s._uuid for s in self._evaluate_patterns(self._subset))
+ hosts = [h for h in hosts if h._uuid in subset_uuids]
+
+ if not ignore_restrictions and self._restriction:
+ # exclude hosts mentioned in any restriction (ex: failed hosts)
+ hosts = [h for h in hosts if h.name in self._restriction]
+
+ self._hosts_patterns_cache[pattern_hash] = deduplicate_list(hosts)
+
+ # sort hosts list if needed (should only happen when called from strategy)
+ if order in ['sorted', 'reverse_sorted']:
+ hosts = sorted(self._hosts_patterns_cache[pattern_hash][:], key=attrgetter('name'), reverse=(order == 'reverse_sorted'))
+ elif order == 'reverse_inventory':
+ hosts = self._hosts_patterns_cache[pattern_hash][::-1]
+ else:
+ hosts = self._hosts_patterns_cache[pattern_hash][:]
+ if order == 'shuffle':
+ shuffle(hosts)
+ elif order not in [None, 'inventory']:
+ raise AnsibleOptionsError("Invalid 'order' specified for inventory hosts: %s" % order)
+
+ return hosts
+
+ def _evaluate_patterns(self, patterns):
+ """
+ Takes a list of patterns and returns a list of matching host names,
+ taking into account any negative and intersection patterns.
+ """
+
+ patterns = order_patterns(patterns)
+ hosts = []
+
+ for p in patterns:
+ # avoid resolving a pattern that is a plain host
+ if p in self._inventory.hosts:
+ hosts.append(self._inventory.get_host(p))
+ else:
+ that = self._match_one_pattern(p)
+ if p[0] == "!":
+ that = set(that)
+ hosts = [h for h in hosts if h not in that]
+ elif p[0] == "&":
+ that = set(that)
+ hosts = [h for h in hosts if h in that]
+ else:
+ existing_hosts = set(y.name for y in hosts)
+ hosts.extend([h for h in that if h.name not in existing_hosts])
+ return hosts
+
+ def _match_one_pattern(self, pattern):
+ """
+ Takes a single pattern and returns a list of matching host names.
+ Ignores intersection (&) and exclusion (!) specifiers.
+
+ The pattern may be:
+
+ 1. A regex starting with ~, e.g. '~[abc]*'
+ 2. A shell glob pattern with ?/*/[chars]/[!chars], e.g. 'foo*'
+ 3. An ordinary word that matches itself only, e.g. 'foo'
+
+ The pattern is matched using the following rules:
+
+ 1. If it's 'all', it matches all hosts in all groups.
+ 2. Otherwise, for each known group name:
+ (a) if it matches the group name, the results include all hosts
+ in the group or any of its children.
+ (b) otherwise, if it matches any hosts in the group, the results
+ include the matching hosts.
+
+ This means that 'foo*' may match one or more groups (thus including all
+ hosts therein) but also hosts in other groups.
+
+ The built-in groups 'all' and 'ungrouped' are special. No pattern can
+ match these group names (though 'all' behaves as though it matches, as
+ described above). The word 'ungrouped' can match a host of that name,
+ and patterns like 'ungr*' and 'al*' can match either hosts or groups
+ other than all and ungrouped.
+
+ If the pattern matches one or more group names according to these rules,
+ it may have an optional range suffix to select a subset of the results.
+ This is allowed only if the pattern is not a regex, i.e. '~foo[1]' does
+ not work (the [1] is interpreted as part of the regex), but 'foo*[1]'
+ would work if 'foo*' matched the name of one or more groups.
+
+ Duplicate matches are always eliminated from the results.
+ """
+
+ if pattern[0] in ("&", "!"):
+ pattern = pattern[1:]
+
+ if pattern not in self._pattern_cache:
+ (expr, slice) = self._split_subscript(pattern)
+ hosts = self._enumerate_matches(expr)
+ try:
+ hosts = self._apply_subscript(hosts, slice)
+ except IndexError:
+ raise AnsibleError("No hosts matched the subscripted pattern '%s'" % pattern)
+ self._pattern_cache[pattern] = hosts
+
+ return self._pattern_cache[pattern]
+
+ def _split_subscript(self, pattern):
+ """
+ Takes a pattern, checks if it has a subscript, and returns the pattern
+ without the subscript and a (start,end) tuple representing the given
+ subscript (or None if there is no subscript).
+
+ Validates that the subscript is in the right syntax, but doesn't make
+ sure the actual indices make sense in context.
+ """
+
+ # Do not parse regexes for enumeration info
+ if pattern[0] == '~':
+ return (pattern, None)
+
+ # We want a pattern followed by an integer or range subscript.
+ # (We can't be more restrictive about the expression because the
+ # fnmatch semantics permit [\[:\]] to occur.)
+
+ subscript = None
+ m = PATTERN_WITH_SUBSCRIPT.match(pattern)
+ if m:
+ (pattern, idx, start, sep, end) = m.groups()
+ if idx:
+ subscript = (int(idx), None)
+ else:
+ if not end:
+ end = -1
+ subscript = (int(start), int(end))
+ if sep == '-':
+ display.warning("Use [x:y] inclusive subscripts instead of [x-y] which has been removed")
+
+ return (pattern, subscript)
+
+ def _apply_subscript(self, hosts, subscript):
+ """
+ Takes a list of hosts and a (start,end) tuple and returns the subset of
+ hosts based on the subscript (which may be None to return all hosts).
+ """
+
+ if not hosts or not subscript:
+ return hosts
+
+ (start, end) = subscript
+
+ if end:
+ if end == -1:
+ end = len(hosts) - 1
+ return hosts[start:end + 1]
+ else:
+ return [hosts[start]]
+
+ def _enumerate_matches(self, pattern):
+ """
+ Returns a list of host names matching the given pattern according to the
+ rules explained above in _match_one_pattern.
+ """
+
+ results = []
+ # check if pattern matches group
+ matching_groups = self._match_list(self._inventory.groups, pattern)
+ if matching_groups:
+ for groupname in matching_groups:
+ results.extend(self._inventory.groups[groupname].get_hosts())
+
+ # check hosts if no groups matched or it is a regex/glob pattern
+ if not matching_groups or pattern[0] == '~' or any(special in pattern for special in ('.', '?', '*', '[')):
+ # pattern might match host
+ matching_hosts = self._match_list(self._inventory.hosts, pattern)
+ if matching_hosts:
+ for hostname in matching_hosts:
+ results.append(self._inventory.hosts[hostname])
+
+ if not results and pattern in C.LOCALHOST:
+ # get_host autocreates implicit when needed
+ implicit = self._inventory.get_host(pattern)
+ if implicit:
+ results.append(implicit)
+
+ # Display warning if specified host pattern did not match any groups or hosts
+ if not results and not matching_groups and pattern != 'all':
+ msg = "Could not match supplied host pattern, ignoring: %s" % pattern
+ display.debug(msg)
+ if C.HOST_PATTERN_MISMATCH == 'warning':
+ display.warning(msg)
+ elif C.HOST_PATTERN_MISMATCH == 'error':
+ raise AnsibleError(msg)
+ # no need to write 'ignore' state
+
+ return results
+
+ def list_hosts(self, pattern="all"):
+ """ return a list of hostnames for a pattern """
+ # FIXME: cache?
+ result = self.get_hosts(pattern)
+
+ # allow implicit localhost if pattern matches and no other results
+ if len(result) == 0 and pattern in C.LOCALHOST:
+ result = [pattern]
+
+ return result
+
+ def list_groups(self):
+ # FIXME: cache?
+ return sorted(self._inventory.groups.keys())
+
+ def restrict_to_hosts(self, restriction):
+ """
+ Restrict list operations to the hosts given in restriction. This is used
+ to batch serial operations in main playbook code, don't use this for other
+ reasons.
+ """
+ if restriction is None:
+ return
+ elif not isinstance(restriction, list):
+ restriction = [restriction]
+ self._restriction = set(to_text(h.name) for h in restriction)
+
+ def subset(self, subset_pattern):
+ """
+ Limits inventory results to a subset of inventory that matches a given
+ pattern, such as to select a given geographic of numeric slice amongst
+ a previous 'hosts' selection that only select roles, or vice versa.
+ Corresponds to --limit parameter to ansible-playbook
+ """
+ if subset_pattern is None:
+ self._subset = None
+ else:
+ subset_patterns = split_host_pattern(subset_pattern)
+ results = []
+ # allow Unix style @filename data
+ for x in subset_patterns:
+ if not x:
+ continue
+
+ if x[0] == "@":
+ b_limit_file = to_bytes(x[1:])
+ if not os.path.exists(b_limit_file):
+ raise AnsibleError(u'Unable to find limit file %s' % b_limit_file)
+ with open(b_limit_file) as fd:
+ results.extend([to_text(l.strip()) for l in fd.read().split("\n")])
+ else:
+ results.append(to_text(x))
+ self._subset = results
+
+ def remove_restriction(self):
+ """ Do not restrict list operations """
+ self._restriction = None
+
+ def clear_pattern_cache(self):
+ self._pattern_cache = {}
diff --git a/lib/ansible/module_utils/__init__.py b/lib/ansible/module_utils/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/lib/ansible/module_utils/__init__.py
diff --git a/lib/ansible/module_utils/_text.py b/lib/ansible/module_utils/_text.py
new file mode 100644
index 00000000..6cd77217
--- /dev/null
+++ b/lib/ansible/module_utils/_text.py
@@ -0,0 +1,15 @@
+# Copyright (c), Toshio Kuratomi <tkuratomi@ansible.com> 2016
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+.. warn:: Use ansible.module_utils.common.text.converters instead.
+"""
+
+# Backwards compat for people still calling it from this package
+import codecs
+
+from ansible.module_utils.six import PY3, text_type, binary_type
+
+from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
diff --git a/lib/ansible/module_utils/ansible_release.py b/lib/ansible/module_utils/ansible_release.py
new file mode 120000
index 00000000..fee41085
--- /dev/null
+++ b/lib/ansible/module_utils/ansible_release.py
@@ -0,0 +1 @@
+../release.py \ No newline at end of file
diff --git a/lib/ansible/module_utils/api.py b/lib/ansible/module_utils/api.py
new file mode 100644
index 00000000..46a036d3
--- /dev/null
+++ b/lib/ansible/module_utils/api.py
@@ -0,0 +1,116 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright: (c) 2015, Brian Coca, <bcoca@ansible.com>
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+"""
+This module adds shared support for generic api modules
+
+In order to use this module, include it as part of a custom
+module as shown below.
+
+The 'api' module provides the following common argument specs:
+
+ * rate limit spec
+ - rate: number of requests per time unit (int)
+ - rate_limit: time window in which the limit is applied in seconds
+
+ * retry spec
+ - retries: number of attempts
+ - retry_pause: delay between attempts in seconds
+"""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+import time
+
+
+def rate_limit_argument_spec(spec=None):
+ """Creates an argument spec for working with rate limiting"""
+ arg_spec = (dict(
+ rate=dict(type='int'),
+ rate_limit=dict(type='int'),
+ ))
+ if spec:
+ arg_spec.update(spec)
+ return arg_spec
+
+
+def retry_argument_spec(spec=None):
+ """Creates an argument spec for working with retrying"""
+ arg_spec = (dict(
+ retries=dict(type='int'),
+ retry_pause=dict(type='float', default=1),
+ ))
+ if spec:
+ arg_spec.update(spec)
+ return arg_spec
+
+
+def basic_auth_argument_spec(spec=None):
+ arg_spec = (dict(
+ api_username=dict(type='str'),
+ api_password=dict(type='str', no_log=True),
+ api_url=dict(type='str'),
+ validate_certs=dict(type='bool', default=True)
+ ))
+ if spec:
+ arg_spec.update(spec)
+ return arg_spec
+
+
+def rate_limit(rate=None, rate_limit=None):
+ """rate limiting decorator"""
+ minrate = None
+ if rate is not None and rate_limit is not None:
+ minrate = float(rate_limit) / float(rate)
+
+ def wrapper(f):
+ last = [0.0]
+
+ def ratelimited(*args, **kwargs):
+ if sys.version_info >= (3, 8):
+ real_time = time.process_time
+ else:
+ real_time = time.clock
+ if minrate is not None:
+ elapsed = real_time() - last[0]
+ left = minrate - elapsed
+ if left > 0:
+ time.sleep(left)
+ last[0] = real_time()
+ ret = f(*args, **kwargs)
+ return ret
+
+ return ratelimited
+ return wrapper
+
+
+def retry(retries=None, retry_pause=1):
+ """Retry decorator"""
+ def wrapper(f):
+
+ def retried(*args, **kwargs):
+ retry_count = 0
+ if retries is not None:
+ ret = None
+ while True:
+ retry_count += 1
+ if retry_count >= retries:
+ raise Exception("Retry limit exceeded: %d" % retries)
+ try:
+ ret = f(*args, **kwargs)
+ except Exception:
+ pass
+ if ret:
+ break
+ time.sleep(retry_pause)
+ return ret
+
+ return retried
+ return wrapper
diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py
new file mode 100644
index 00000000..24d225f5
--- /dev/null
+++ b/lib/ansible/module_utils/basic.py
@@ -0,0 +1,2853 @@
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
+# Copyright (c), Toshio Kuratomi <tkuratomi@ansible.com> 2016
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+FILE_ATTRIBUTES = {
+ 'A': 'noatime',
+ 'a': 'append',
+ 'c': 'compressed',
+ 'C': 'nocow',
+ 'd': 'nodump',
+ 'D': 'dirsync',
+ 'e': 'extents',
+ 'E': 'encrypted',
+ 'h': 'blocksize',
+ 'i': 'immutable',
+ 'I': 'indexed',
+ 'j': 'journalled',
+ 'N': 'inline',
+ 's': 'zero',
+ 'S': 'synchronous',
+ 't': 'notail',
+ 'T': 'blockroot',
+ 'u': 'undelete',
+ 'X': 'compressedraw',
+ 'Z': 'compresseddirty',
+}
+
+# Ansible modules can be written in any language.
+# The functions available here can be used to do many common tasks,
+# to simplify development of Python modules.
+
+import __main__
+import atexit
+import errno
+import datetime
+import grp
+import fcntl
+import locale
+import os
+import pwd
+import platform
+import re
+import select
+import shlex
+import shutil
+import signal
+import stat
+import subprocess
+import sys
+import tempfile
+import time
+import traceback
+import types
+
+from collections import deque
+from itertools import chain, repeat
+
+try:
+ import syslog
+ HAS_SYSLOG = True
+except ImportError:
+ HAS_SYSLOG = False
+
+try:
+ from systemd import journal
+ # Makes sure that systemd.journal has method sendv()
+ # Double check that journal has method sendv (some packages don't)
+ has_journal = hasattr(journal, 'sendv')
+except ImportError:
+ has_journal = False
+
+HAVE_SELINUX = False
+try:
+ import selinux
+ HAVE_SELINUX = True
+except ImportError:
+ pass
+
+# Python2 & 3 way to get NoneType
+NoneType = type(None)
+
+from ansible.module_utils.compat import selectors
+
+from ._text import to_native, to_bytes, to_text
+from ansible.module_utils.common.text.converters import (
+ jsonify,
+ container_to_bytes as json_dict_unicode_to_bytes,
+ container_to_text as json_dict_bytes_to_unicode,
+)
+
+from ansible.module_utils.common.text.formatters import (
+ lenient_lowercase,
+ bytes_to_human,
+ human_to_bytes,
+ SIZE_RANGES,
+)
+
+try:
+ from ansible.module_utils.common._json_compat import json
+except ImportError as e:
+ print('\n{{"msg": "Error: ansible requires the stdlib json: {0}", "failed": true}}'.format(to_native(e)))
+ sys.exit(1)
+
+
+AVAILABLE_HASH_ALGORITHMS = dict()
+try:
+ import hashlib
+
+ # python 2.7.9+ and 2.7.0+
+ for attribute in ('available_algorithms', 'algorithms'):
+ algorithms = getattr(hashlib, attribute, None)
+ if algorithms:
+ break
+ if algorithms is None:
+ # python 2.5+
+ algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
+ for algorithm in algorithms:
+ AVAILABLE_HASH_ALGORITHMS[algorithm] = getattr(hashlib, algorithm)
+
+ # we may have been able to import md5 but it could still not be available
+ try:
+ hashlib.md5()
+ except ValueError:
+ AVAILABLE_HASH_ALGORITHMS.pop('md5', None)
+except Exception:
+ import sha
+ AVAILABLE_HASH_ALGORITHMS = {'sha1': sha.sha}
+ try:
+ import md5
+ AVAILABLE_HASH_ALGORITHMS['md5'] = md5.md5
+ except Exception:
+ pass
+
+from ansible.module_utils.common._collections_compat import (
+ KeysView,
+ Mapping, MutableMapping,
+ Sequence, MutableSequence,
+ Set, MutableSet,
+)
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.common.file import (
+ _PERM_BITS as PERM_BITS,
+ _EXEC_PERM_BITS as EXEC_PERM_BITS,
+ _DEFAULT_PERM as DEFAULT_PERM,
+ is_executable,
+ format_attributes,
+ get_flags_from_attributes,
+)
+from ansible.module_utils.common.sys_info import (
+ get_distribution,
+ get_distribution_version,
+ get_platform_subclass,
+)
+from ansible.module_utils.pycompat24 import get_exception, literal_eval
+from ansible.module_utils.common.parameters import (
+ handle_aliases,
+ list_deprecations,
+ list_no_log_values,
+ PASS_VARS,
+ PASS_BOOLS,
+)
+
+from ansible.module_utils.six import (
+ PY2,
+ PY3,
+ b,
+ binary_type,
+ integer_types,
+ iteritems,
+ string_types,
+ text_type,
+)
+from ansible.module_utils.six.moves import map, reduce, shlex_quote
+from ansible.module_utils.common.validation import (
+ check_missing_parameters,
+ check_mutually_exclusive,
+ check_required_arguments,
+ check_required_by,
+ check_required_if,
+ check_required_one_of,
+ check_required_together,
+ count_terms,
+ check_type_bool,
+ check_type_bits,
+ check_type_bytes,
+ check_type_float,
+ check_type_int,
+ check_type_jsonarg,
+ check_type_list,
+ check_type_dict,
+ check_type_path,
+ check_type_raw,
+ check_type_str,
+ safe_eval,
+)
+from ansible.module_utils.common._utils import get_all_subclasses as _get_all_subclasses
+from ansible.module_utils.parsing.convert_bool import BOOLEANS, BOOLEANS_FALSE, BOOLEANS_TRUE, boolean
+from ansible.module_utils.common.warnings import (
+ deprecate,
+ get_deprecation_messages,
+ get_warning_messages,
+ warn,
+)
+
+# Note: When getting Sequence from collections, it matches with strings. If
+# this matters, make sure to check for strings before checking for sequencetype
+SEQUENCETYPE = frozenset, KeysView, Sequence
+
+PASSWORD_MATCH = re.compile(r'^(?:.+[-_\s])?pass(?:[-_\s]?(?:word|phrase|wrd|wd)?)(?:[-_\s].+)?$', re.I)
+
+imap = map
+
+try:
+ # Python 2
+ unicode
+except NameError:
+ # Python 3
+ unicode = text_type
+
+try:
+ # Python 2
+ basestring
+except NameError:
+ # Python 3
+ basestring = string_types
+
+_literal_eval = literal_eval
+
+# End of deprecated names
+
+# Internal global holding passed in params. This is consulted in case
+# multiple AnsibleModules are created. Otherwise each AnsibleModule would
+# attempt to read from stdin. Other code should not use this directly as it
+# is an internal implementation detail
+_ANSIBLE_ARGS = None
+
+FILE_COMMON_ARGUMENTS = dict(
+ # These are things we want. About setting metadata (mode, ownership, permissions in general) on
+ # created files (these are used by set_fs_attributes_if_different and included in
+ # load_file_common_arguments)
+ mode=dict(type='raw'),
+ owner=dict(type='str'),
+ group=dict(type='str'),
+ seuser=dict(type='str'),
+ serole=dict(type='str'),
+ selevel=dict(type='str'),
+ setype=dict(type='str'),
+ attributes=dict(type='str', aliases=['attr']),
+ unsafe_writes=dict(type='bool', default=False), # should be available to any module using atomic_move
+)
+
+PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?')
+
+# Used for parsing symbolic file perms
+MODE_OPERATOR_RE = re.compile(r'[+=-]')
+USERS_RE = re.compile(r'[^ugo]')
+PERMS_RE = re.compile(r'[^rwxXstugo]')
+
+# Used for determining if the system is running a new enough python version
+# and should only restrict on our documented minimum versions
+_PY3_MIN = sys.version_info[:2] >= (3, 5)
+_PY2_MIN = (2, 6) <= sys.version_info[:2] < (3,)
+_PY_MIN = _PY3_MIN or _PY2_MIN
+if not _PY_MIN:
+ print(
+ '\n{"failed": true, '
+ '"msg": "Ansible requires a minimum of Python2 version 2.6 or Python3 version 3.5. Current version: %s"}' % ''.join(sys.version.splitlines())
+ )
+ sys.exit(1)
+
+
+#
+# Deprecated functions
+#
+
+def get_platform():
+ '''
+ **Deprecated** Use :py:func:`platform.system` directly.
+
+ :returns: Name of the platform the module is running on in a native string
+
+ Returns a native string that labels the platform ("Linux", "Solaris", etc). Currently, this is
+ the result of calling :py:func:`platform.system`.
+ '''
+ return platform.system()
+
+# End deprecated functions
+
+
+#
+# Compat shims
+#
+
+def load_platform_subclass(cls, *args, **kwargs):
+ """**Deprecated**: Use ansible.module_utils.common.sys_info.get_platform_subclass instead"""
+ platform_cls = get_platform_subclass(cls)
+ return super(cls, platform_cls).__new__(platform_cls)
+
+
+def get_all_subclasses(cls):
+ """**Deprecated**: Use ansible.module_utils.common._utils.get_all_subclasses instead"""
+ return list(_get_all_subclasses(cls))
+
+
+# End compat shims
+
+
+def _remove_values_conditions(value, no_log_strings, deferred_removals):
+ """
+ Helper function for :meth:`remove_values`.
+
+ :arg value: The value to check for strings that need to be stripped
+ :arg no_log_strings: set of strings which must be stripped out of any values
+ :arg deferred_removals: List which holds information about nested
+ containers that have to be iterated for removals. It is passed into
+ this function so that more entries can be added to it if value is
+ a container type. The format of each entry is a 2-tuple where the first
+ element is the ``value`` parameter and the second value is a new
+ container to copy the elements of ``value`` into once iterated.
+ :returns: if ``value`` is a scalar, returns ``value`` with two exceptions:
+ 1. :class:`~datetime.datetime` objects which are changed into a string representation.
+ 2. objects which are in no_log_strings are replaced with a placeholder
+ so that no sensitive data is leaked.
+ If ``value`` is a container type, returns a new empty container.
+
+ ``deferred_removals`` is added to as a side-effect of this function.
+
+ .. warning:: It is up to the caller to make sure the order in which value
+ is passed in is correct. For instance, higher level containers need
+ to be passed in before lower level containers. For example, given
+ ``{'level1': {'level2': 'level3': [True]} }`` first pass in the
+ dictionary for ``level1``, then the dict for ``level2``, and finally
+ the list for ``level3``.
+ """
+ if isinstance(value, (text_type, binary_type)):
+ # Need native str type
+ native_str_value = value
+ if isinstance(value, text_type):
+ value_is_text = True
+ if PY2:
+ native_str_value = to_bytes(value, errors='surrogate_or_strict')
+ elif isinstance(value, binary_type):
+ value_is_text = False
+ if PY3:
+ native_str_value = to_text(value, errors='surrogate_or_strict')
+
+ if native_str_value in no_log_strings:
+ return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
+ for omit_me in no_log_strings:
+ native_str_value = native_str_value.replace(omit_me, '*' * 8)
+
+ if value_is_text and isinstance(native_str_value, binary_type):
+ value = to_text(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
+ elif not value_is_text and isinstance(native_str_value, text_type):
+ value = to_bytes(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
+ else:
+ value = native_str_value
+
+ elif isinstance(value, Sequence):
+ if isinstance(value, MutableSequence):
+ new_value = type(value)()
+ else:
+ new_value = [] # Need a mutable value
+ deferred_removals.append((value, new_value))
+ value = new_value
+
+ elif isinstance(value, Set):
+ if isinstance(value, MutableSet):
+ new_value = type(value)()
+ else:
+ new_value = set() # Need a mutable value
+ deferred_removals.append((value, new_value))
+ value = new_value
+
+ elif isinstance(value, Mapping):
+ if isinstance(value, MutableMapping):
+ new_value = type(value)()
+ else:
+ new_value = {} # Need a mutable value
+ deferred_removals.append((value, new_value))
+ value = new_value
+
+ elif isinstance(value, tuple(chain(integer_types, (float, bool, NoneType)))):
+ stringy_value = to_native(value, encoding='utf-8', errors='surrogate_or_strict')
+ if stringy_value in no_log_strings:
+ return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
+ for omit_me in no_log_strings:
+ if omit_me in stringy_value:
+ return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
+
+ elif isinstance(value, datetime.datetime):
+ value = value.isoformat()
+ else:
+ raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
+
+ return value
+
+
+def remove_values(value, no_log_strings):
+ """ Remove strings in no_log_strings from value. If value is a container
+ type, then remove a lot more.
+
+ Use of deferred_removals exists, rather than a pure recursive solution,
+ because of the potential to hit the maximum recursion depth when dealing with
+ large amounts of data (see issue #24560).
+ """
+
+ deferred_removals = deque()
+
+ no_log_strings = [to_native(s, errors='surrogate_or_strict') for s in no_log_strings]
+ new_value = _remove_values_conditions(value, no_log_strings, deferred_removals)
+
+ while deferred_removals:
+ old_data, new_data = deferred_removals.popleft()
+ if isinstance(new_data, Mapping):
+ for old_key, old_elem in old_data.items():
+ new_elem = _remove_values_conditions(old_elem, no_log_strings, deferred_removals)
+ new_data[old_key] = new_elem
+ else:
+ for elem in old_data:
+ new_elem = _remove_values_conditions(elem, no_log_strings, deferred_removals)
+ if isinstance(new_data, MutableSequence):
+ new_data.append(new_elem)
+ elif isinstance(new_data, MutableSet):
+ new_data.add(new_elem)
+ else:
+ raise TypeError('Unknown container type encountered when removing private values from output')
+
+ return new_value
+
+
+def _sanitize_keys_conditions(value, no_log_strings, ignore_keys, deferred_removals):
+ """ Helper method to sanitize_keys() to build deferred_removals and avoid deep recursion. """
+ if isinstance(value, (text_type, binary_type)):
+ return value
+
+ if isinstance(value, Sequence):
+ if isinstance(value, MutableSequence):
+ new_value = type(value)()
+ else:
+ new_value = [] # Need a mutable value
+ deferred_removals.append((value, new_value))
+ return new_value
+
+ if isinstance(value, Set):
+ if isinstance(value, MutableSet):
+ new_value = type(value)()
+ else:
+ new_value = set() # Need a mutable value
+ deferred_removals.append((value, new_value))
+ return new_value
+
+ if isinstance(value, Mapping):
+ if isinstance(value, MutableMapping):
+ new_value = type(value)()
+ else:
+ new_value = {} # Need a mutable value
+ deferred_removals.append((value, new_value))
+ return new_value
+
+ if isinstance(value, tuple(chain(integer_types, (float, bool, NoneType)))):
+ return value
+
+ if isinstance(value, (datetime.datetime, datetime.date)):
+ return value
+
+ raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
+
+
+def sanitize_keys(obj, no_log_strings, ignore_keys=frozenset()):
+ """ Sanitize the keys in a container object by removing no_log values from key names.
+
+ This is a companion function to the `remove_values()` function. Similar to that function,
+ we make use of deferred_removals to avoid hitting maximum recursion depth in cases of
+ large data structures.
+
+ :param obj: The container object to sanitize. Non-container objects are returned unmodified.
+ :param no_log_strings: A set of string values we do not want logged.
+ :param ignore_keys: A set of string values of keys to not sanitize.
+
+ :returns: An object with sanitized keys.
+ """
+
+ deferred_removals = deque()
+
+ no_log_strings = [to_native(s, errors='surrogate_or_strict') for s in no_log_strings]
+ new_value = _sanitize_keys_conditions(obj, no_log_strings, ignore_keys, deferred_removals)
+
+ while deferred_removals:
+ old_data, new_data = deferred_removals.popleft()
+
+ if isinstance(new_data, Mapping):
+ for old_key, old_elem in old_data.items():
+ if old_key in ignore_keys or old_key.startswith('_ansible'):
+ new_data[old_key] = _sanitize_keys_conditions(old_elem, no_log_strings, ignore_keys, deferred_removals)
+ else:
+ # Sanitize the old key. We take advantage of the sanitizing code in
+ # _remove_values_conditions() rather than recreating it here.
+ new_key = _remove_values_conditions(old_key, no_log_strings, None)
+ new_data[new_key] = _sanitize_keys_conditions(old_elem, no_log_strings, ignore_keys, deferred_removals)
+ else:
+ for elem in old_data:
+ new_elem = _sanitize_keys_conditions(elem, no_log_strings, ignore_keys, deferred_removals)
+ if isinstance(new_data, MutableSequence):
+ new_data.append(new_elem)
+ elif isinstance(new_data, MutableSet):
+ new_data.add(new_elem)
+ else:
+ raise TypeError('Unknown container type encountered when removing private values from keys')
+
+ return new_value
+
+
+def heuristic_log_sanitize(data, no_log_values=None):
+ ''' Remove strings that look like passwords from log messages '''
+ # Currently filters:
+ # user:pass@foo/whatever and http://username:pass@wherever/foo
+ # This code has false positives and consumes parts of logs that are
+ # not passwds
+
+ # begin: start of a passwd containing string
+ # end: end of a passwd containing string
+ # sep: char between user and passwd
+ # prev_begin: where in the overall string to start a search for
+ # a passwd
+ # sep_search_end: where in the string to end a search for the sep
+ data = to_native(data)
+
+ output = []
+ begin = len(data)
+ prev_begin = begin
+ sep = 1
+ while sep:
+ # Find the potential end of a passwd
+ try:
+ end = data.rindex('@', 0, begin)
+ except ValueError:
+ # No passwd in the rest of the data
+ output.insert(0, data[0:begin])
+ break
+
+ # Search for the beginning of a passwd
+ sep = None
+ sep_search_end = end
+ while not sep:
+ # URL-style username+password
+ try:
+ begin = data.rindex('://', 0, sep_search_end)
+ except ValueError:
+ # No url style in the data, check for ssh style in the
+ # rest of the string
+ begin = 0
+ # Search for separator
+ try:
+ sep = data.index(':', begin + 3, end)
+ except ValueError:
+ # No separator; choices:
+ if begin == 0:
+ # Searched the whole string so there's no password
+ # here. Return the remaining data
+ output.insert(0, data[0:begin])
+ break
+ # Search for a different beginning of the password field.
+ sep_search_end = begin
+ continue
+ if sep:
+ # Password was found; remove it.
+ output.insert(0, data[end:prev_begin])
+ output.insert(0, '********')
+ output.insert(0, data[begin:sep + 1])
+ prev_begin = begin
+
+ output = ''.join(output)
+ if no_log_values:
+ output = remove_values(output, no_log_values)
+ return output
+
+
+def _load_params():
+ ''' read the modules parameters and store them globally.
+
+ This function may be needed for certain very dynamic custom modules which
+ want to process the parameters that are being handed the module. Since
+ this is so closely tied to the implementation of modules we cannot
+ guarantee API stability for it (it may change between versions) however we
+ will try not to break it gratuitously. It is certainly more future-proof
+ to call this function and consume its outputs than to implement the logic
+ inside it as a copy in your own code.
+ '''
+ global _ANSIBLE_ARGS
+ if _ANSIBLE_ARGS is not None:
+ buffer = _ANSIBLE_ARGS
+ else:
+ # debug overrides to read args from file or cmdline
+
+ # Avoid tracebacks when locale is non-utf8
+ # We control the args and we pass them as utf8
+ if len(sys.argv) > 1:
+ if os.path.isfile(sys.argv[1]):
+ fd = open(sys.argv[1], 'rb')
+ buffer = fd.read()
+ fd.close()
+ else:
+ buffer = sys.argv[1]
+ if PY3:
+ buffer = buffer.encode('utf-8', errors='surrogateescape')
+ # default case, read from stdin
+ else:
+ if PY2:
+ buffer = sys.stdin.read()
+ else:
+ buffer = sys.stdin.buffer.read()
+ _ANSIBLE_ARGS = buffer
+
+ try:
+ params = json.loads(buffer.decode('utf-8'))
+ except ValueError:
+ # This helper used too early for fail_json to work.
+ print('\n{"msg": "Error: Module unable to decode valid JSON on stdin. Unable to figure out what parameters were passed", "failed": true}')
+ sys.exit(1)
+
+ if PY2:
+ params = json_dict_unicode_to_bytes(params)
+
+ try:
+ return params['ANSIBLE_MODULE_ARGS']
+ except KeyError:
+ # This helper does not have access to fail_json so we have to print
+ # json output on our own.
+ print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", '
+ '"failed": true}')
+ sys.exit(1)
+
+
+def env_fallback(*args, **kwargs):
+ ''' Load value from environment '''
+ for arg in args:
+ if arg in os.environ:
+ return os.environ[arg]
+ raise AnsibleFallbackNotFound
+
+
+def missing_required_lib(library, reason=None, url=None):
+ hostname = platform.node()
+ msg = "Failed to import the required Python library (%s) on %s's Python %s." % (library, hostname, sys.executable)
+ if reason:
+ msg += " This is required %s." % reason
+ if url:
+ msg += " See %s for more info." % url
+
+ msg += (" Please read the module documentation and install it in the appropriate location."
+ " If the required library is installed, but Ansible is using the wrong Python interpreter,"
+ " please consult the documentation on ansible_python_interpreter")
+ return msg
+
+
+class AnsibleFallbackNotFound(Exception):
+ pass
+
+
+class AnsibleModule(object):
+ def __init__(self, argument_spec, bypass_checks=False, no_log=False,
+ mutually_exclusive=None, required_together=None,
+ required_one_of=None, add_file_common_args=False,
+ supports_check_mode=False, required_if=None, required_by=None):
+
+ '''
+ Common code for quickly building an ansible module in Python
+ (although you can write modules with anything that can return JSON).
+
+ See :ref:`developing_modules_general` for a general introduction
+ and :ref:`developing_program_flow_modules` for more detailed explanation.
+ '''
+
+ self._name = os.path.basename(__file__) # initialize name until we can parse from options
+ self.argument_spec = argument_spec
+ self.supports_check_mode = supports_check_mode
+ self.check_mode = False
+ self.bypass_checks = bypass_checks
+ self.no_log = no_log
+
+ self.mutually_exclusive = mutually_exclusive
+ self.required_together = required_together
+ self.required_one_of = required_one_of
+ self.required_if = required_if
+ self.required_by = required_by
+ self.cleanup_files = []
+ self._debug = False
+ self._diff = False
+ self._socket_path = None
+ self._shell = None
+ self._verbosity = 0
+ # May be used to set modifications to the environment for any
+ # run_command invocation
+ self.run_command_environ_update = {}
+ self._clean = {}
+ self._string_conversion_action = ''
+
+ self.aliases = {}
+ self._legal_inputs = []
+ self._options_context = list()
+ self._tmpdir = None
+
+ self._created_files = set()
+
+ if add_file_common_args:
+ self._uses_common_file_args = True
+ for k, v in FILE_COMMON_ARGUMENTS.items():
+ if k not in self.argument_spec:
+ self.argument_spec[k] = v
+
+ self._load_params()
+ self._set_fallbacks()
+
+ # append to legal_inputs and then possibly check against them
+ try:
+ self.aliases = self._handle_aliases()
+ except (ValueError, TypeError) as e:
+ # Use exceptions here because it isn't safe to call fail_json until no_log is processed
+ print('\n{"failed": true, "msg": "Module alias error: %s"}' % to_native(e))
+ sys.exit(1)
+
+ # Save parameter values that should never be logged
+ self.no_log_values = set()
+ self._handle_no_log_values()
+
+ # check the locale as set by the current environment, and reset to
+ # a known valid (LANG=C) if it's an invalid/unavailable locale
+ self._check_locale()
+
+ self._check_arguments()
+
+ # check exclusive early
+ if not bypass_checks:
+ self._check_mutually_exclusive(mutually_exclusive)
+
+ self._set_defaults(pre=True)
+
+ self._CHECK_ARGUMENT_TYPES_DISPATCHER = {
+ 'str': self._check_type_str,
+ 'list': self._check_type_list,
+ 'dict': self._check_type_dict,
+ 'bool': self._check_type_bool,
+ 'int': self._check_type_int,
+ 'float': self._check_type_float,
+ 'path': self._check_type_path,
+ 'raw': self._check_type_raw,
+ 'jsonarg': self._check_type_jsonarg,
+ 'json': self._check_type_jsonarg,
+ 'bytes': self._check_type_bytes,
+ 'bits': self._check_type_bits,
+ }
+ if not bypass_checks:
+ self._check_required_arguments()
+ self._check_argument_types()
+ self._check_argument_values()
+ self._check_required_together(required_together)
+ self._check_required_one_of(required_one_of)
+ self._check_required_if(required_if)
+ self._check_required_by(required_by)
+
+ self._set_defaults(pre=False)
+
+ # deal with options sub-spec
+ self._handle_options()
+
+ if not self.no_log:
+ self._log_invocation()
+
+ # finally, make sure we're in a sane working dir
+ self._set_cwd()
+
+ @property
+ def tmpdir(self):
+ # if _ansible_tmpdir was not set and we have a remote_tmp,
+ # the module needs to create it and clean it up once finished.
+ # otherwise we create our own module tmp dir from the system defaults
+ if self._tmpdir is None:
+ basedir = None
+
+ if self._remote_tmp is not None:
+ basedir = os.path.expanduser(os.path.expandvars(self._remote_tmp))
+
+ if basedir is not None and not os.path.exists(basedir):
+ try:
+ os.makedirs(basedir, mode=0o700)
+ except (OSError, IOError) as e:
+ self.warn("Unable to use %s as temporary directory, "
+ "failing back to system: %s" % (basedir, to_native(e)))
+ basedir = None
+ else:
+ self.warn("Module remote_tmp %s did not exist and was "
+ "created with a mode of 0700, this may cause"
+ " issues when running as another user. To "
+ "avoid this, create the remote_tmp dir with "
+ "the correct permissions manually" % basedir)
+
+ basefile = "ansible-moduletmp-%s-" % time.time()
+ try:
+ tmpdir = tempfile.mkdtemp(prefix=basefile, dir=basedir)
+ except (OSError, IOError) as e:
+ self.fail_json(
+ msg="Failed to create remote module tmp path at dir %s "
+ "with prefix %s: %s" % (basedir, basefile, to_native(e))
+ )
+ if not self._keep_remote_files:
+ atexit.register(shutil.rmtree, tmpdir)
+ self._tmpdir = tmpdir
+
+ return self._tmpdir
+
+ def warn(self, warning):
+ warn(warning)
+ self.log('[WARNING] %s' % warning)
+
+ def deprecate(self, msg, version=None, date=None, collection_name=None):
+ if version is not None and date is not None:
+ raise AssertionError("implementation error -- version and date must not both be set")
+ deprecate(msg, version=version, date=date, collection_name=collection_name)
+ # For compatibility, we accept that neither version nor date is set,
+ # and treat that the same as if version would haven been set
+ if date is not None:
+ self.log('[DEPRECATION WARNING] %s %s' % (msg, date))
+ else:
+ self.log('[DEPRECATION WARNING] %s %s' % (msg, version))
+
+ def load_file_common_arguments(self, params, path=None):
+ '''
+ many modules deal with files, this encapsulates common
+ options that the file module accepts such that it is directly
+ available to all modules and they can share code.
+
+ Allows to overwrite the path/dest module argument by providing path.
+ '''
+
+ if path is None:
+ path = params.get('path', params.get('dest', None))
+ if path is None:
+ return {}
+ else:
+ path = os.path.expanduser(os.path.expandvars(path))
+
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ # if the path is a symlink, and we're following links, get
+ # the target of the link instead for testing
+ if params.get('follow', False) and os.path.islink(b_path):
+ b_path = os.path.realpath(b_path)
+ path = to_native(b_path)
+
+ mode = params.get('mode', None)
+ owner = params.get('owner', None)
+ group = params.get('group', None)
+
+ # selinux related options
+ seuser = params.get('seuser', None)
+ serole = params.get('serole', None)
+ setype = params.get('setype', None)
+ selevel = params.get('selevel', None)
+ secontext = [seuser, serole, setype]
+
+ if self.selinux_mls_enabled():
+ secontext.append(selevel)
+
+ default_secontext = self.selinux_default_context(path)
+ for i in range(len(default_secontext)):
+ if i is not None and secontext[i] == '_default':
+ secontext[i] = default_secontext[i]
+
+ attributes = params.get('attributes', None)
+ return dict(
+ path=path, mode=mode, owner=owner, group=group,
+ seuser=seuser, serole=serole, setype=setype,
+ selevel=selevel, secontext=secontext, attributes=attributes,
+ )
+
+ # Detect whether using selinux that is MLS-aware.
+ # While this means you can set the level/range with
+ # selinux.lsetfilecon(), it may or may not mean that you
+ # will get the selevel as part of the context returned
+ # by selinux.lgetfilecon().
+
+ def selinux_mls_enabled(self):
+ if not HAVE_SELINUX:
+ return False
+ if selinux.is_selinux_mls_enabled() == 1:
+ return True
+ else:
+ return False
+
+ def selinux_enabled(self):
+ if not HAVE_SELINUX:
+ seenabled = self.get_bin_path('selinuxenabled')
+ if seenabled is not None:
+ (rc, out, err) = self.run_command(seenabled)
+ if rc == 0:
+ self.fail_json(msg="Aborting, target uses selinux but python bindings (libselinux-python) aren't installed!")
+ return False
+ if selinux.is_selinux_enabled() == 1:
+ return True
+ else:
+ return False
+
+ # Determine whether we need a placeholder for selevel/mls
+ def selinux_initial_context(self):
+ context = [None, None, None]
+ if self.selinux_mls_enabled():
+ context.append(None)
+ return context
+
+ # If selinux fails to find a default, return an array of None
+ def selinux_default_context(self, path, mode=0):
+ context = self.selinux_initial_context()
+ if not HAVE_SELINUX or not self.selinux_enabled():
+ return context
+ try:
+ ret = selinux.matchpathcon(to_native(path, errors='surrogate_or_strict'), mode)
+ except OSError:
+ return context
+ if ret[0] == -1:
+ return context
+ # Limit split to 4 because the selevel, the last in the list,
+ # may contain ':' characters
+ context = ret[1].split(':', 3)
+ return context
+
+ def selinux_context(self, path):
+ context = self.selinux_initial_context()
+ if not HAVE_SELINUX or not self.selinux_enabled():
+ return context
+ try:
+ ret = selinux.lgetfilecon_raw(to_native(path, errors='surrogate_or_strict'))
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ self.fail_json(path=path, msg='path %s does not exist' % path)
+ else:
+ self.fail_json(path=path, msg='failed to retrieve selinux context')
+ if ret[0] == -1:
+ return context
+ # Limit split to 4 because the selevel, the last in the list,
+ # may contain ':' characters
+ context = ret[1].split(':', 3)
+ return context
+
+ def user_and_group(self, path, expand=True):
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ if expand:
+ b_path = os.path.expanduser(os.path.expandvars(b_path))
+ st = os.lstat(b_path)
+ uid = st.st_uid
+ gid = st.st_gid
+ return (uid, gid)
+
+ def find_mount_point(self, path):
+ path_is_bytes = False
+ if isinstance(path, binary_type):
+ path_is_bytes = True
+
+ b_path = os.path.realpath(to_bytes(os.path.expanduser(os.path.expandvars(path)), errors='surrogate_or_strict'))
+ while not os.path.ismount(b_path):
+ b_path = os.path.dirname(b_path)
+
+ if path_is_bytes:
+ return b_path
+
+ return to_text(b_path, errors='surrogate_or_strict')
+
+ def is_special_selinux_path(self, path):
+ """
+ Returns a tuple containing (True, selinux_context) if the given path is on a
+ NFS or other 'special' fs mount point, otherwise the return will be (False, None).
+ """
+ try:
+ f = open('/proc/mounts', 'r')
+ mount_data = f.readlines()
+ f.close()
+ except Exception:
+ return (False, None)
+ path_mount_point = self.find_mount_point(path)
+ for line in mount_data:
+ (device, mount_point, fstype, options, rest) = line.split(' ', 4)
+
+ if path_mount_point == mount_point:
+ for fs in self._selinux_special_fs:
+ if fs in fstype:
+ special_context = self.selinux_context(path_mount_point)
+ return (True, special_context)
+
+ return (False, None)
+
+ def set_default_selinux_context(self, path, changed):
+ if not HAVE_SELINUX or not self.selinux_enabled():
+ return changed
+ context = self.selinux_default_context(path)
+ return self.set_context_if_different(path, context, False)
+
+ def set_context_if_different(self, path, context, changed, diff=None):
+
+ if not HAVE_SELINUX or not self.selinux_enabled():
+ return changed
+
+ if self.check_file_absent_if_check_mode(path):
+ return True
+
+ cur_context = self.selinux_context(path)
+ new_context = list(cur_context)
+ # Iterate over the current context instead of the
+ # argument context, which may have selevel.
+
+ (is_special_se, sp_context) = self.is_special_selinux_path(path)
+ if is_special_se:
+ new_context = sp_context
+ else:
+ for i in range(len(cur_context)):
+ if len(context) > i:
+ if context[i] is not None and context[i] != cur_context[i]:
+ new_context[i] = context[i]
+ elif context[i] is None:
+ new_context[i] = cur_context[i]
+
+ if cur_context != new_context:
+ if diff is not None:
+ if 'before' not in diff:
+ diff['before'] = {}
+ diff['before']['secontext'] = cur_context
+ if 'after' not in diff:
+ diff['after'] = {}
+ diff['after']['secontext'] = new_context
+
+ try:
+ if self.check_mode:
+ return True
+ rc = selinux.lsetfilecon(to_native(path), ':'.join(new_context))
+ except OSError as e:
+ self.fail_json(path=path, msg='invalid selinux context: %s' % to_native(e),
+ new_context=new_context, cur_context=cur_context, input_was=context)
+ if rc != 0:
+ self.fail_json(path=path, msg='set selinux context failed')
+ changed = True
+ return changed
+
+ def set_owner_if_different(self, path, owner, changed, diff=None, expand=True):
+
+ if owner is None:
+ return changed
+
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ if expand:
+ b_path = os.path.expanduser(os.path.expandvars(b_path))
+
+ if self.check_file_absent_if_check_mode(b_path):
+ return True
+
+ orig_uid, orig_gid = self.user_and_group(b_path, expand)
+ try:
+ uid = int(owner)
+ except ValueError:
+ try:
+ uid = pwd.getpwnam(owner).pw_uid
+ except KeyError:
+ path = to_text(b_path)
+ self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner)
+
+ if orig_uid != uid:
+ if diff is not None:
+ if 'before' not in diff:
+ diff['before'] = {}
+ diff['before']['owner'] = orig_uid
+ if 'after' not in diff:
+ diff['after'] = {}
+ diff['after']['owner'] = uid
+
+ if self.check_mode:
+ return True
+ try:
+ os.lchown(b_path, uid, -1)
+ except (IOError, OSError) as e:
+ path = to_text(b_path)
+ self.fail_json(path=path, msg='chown failed: %s' % (to_text(e)))
+ changed = True
+ return changed
+
+ def set_group_if_different(self, path, group, changed, diff=None, expand=True):
+
+ if group is None:
+ return changed
+
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ if expand:
+ b_path = os.path.expanduser(os.path.expandvars(b_path))
+
+ if self.check_file_absent_if_check_mode(b_path):
+ return True
+
+ orig_uid, orig_gid = self.user_and_group(b_path, expand)
+ try:
+ gid = int(group)
+ except ValueError:
+ try:
+ gid = grp.getgrnam(group).gr_gid
+ except KeyError:
+ path = to_text(b_path)
+ self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group)
+
+ if orig_gid != gid:
+ if diff is not None:
+ if 'before' not in diff:
+ diff['before'] = {}
+ diff['before']['group'] = orig_gid
+ if 'after' not in diff:
+ diff['after'] = {}
+ diff['after']['group'] = gid
+
+ if self.check_mode:
+ return True
+ try:
+ os.lchown(b_path, -1, gid)
+ except OSError:
+ path = to_text(b_path)
+ self.fail_json(path=path, msg='chgrp failed')
+ changed = True
+ return changed
+
+ def set_mode_if_different(self, path, mode, changed, diff=None, expand=True):
+
+ # Remove paths so we do not warn about creating with default permissions
+ # since we are calling this method on the path and setting the specified mode.
+ try:
+ self._created_files.remove(path)
+ except KeyError:
+ pass
+
+ if mode is None:
+ return changed
+
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ if expand:
+ b_path = os.path.expanduser(os.path.expandvars(b_path))
+ path_stat = os.lstat(b_path)
+
+ if self.check_file_absent_if_check_mode(b_path):
+ return True
+
+ if not isinstance(mode, int):
+ try:
+ mode = int(mode, 8)
+ except Exception:
+ try:
+ mode = self._symbolic_mode_to_octal(path_stat, mode)
+ except Exception as e:
+ path = to_text(b_path)
+ self.fail_json(path=path,
+ msg="mode must be in octal or symbolic form",
+ details=to_native(e))
+
+ if mode != stat.S_IMODE(mode):
+ # prevent mode from having extra info orbeing invalid long number
+ path = to_text(b_path)
+ self.fail_json(path=path, msg="Invalid mode supplied, only permission info is allowed", details=mode)
+
+ prev_mode = stat.S_IMODE(path_stat.st_mode)
+
+ if prev_mode != mode:
+
+ if diff is not None:
+ if 'before' not in diff:
+ diff['before'] = {}
+ diff['before']['mode'] = '0%03o' % prev_mode
+ if 'after' not in diff:
+ diff['after'] = {}
+ diff['after']['mode'] = '0%03o' % mode
+
+ if self.check_mode:
+ return True
+ # FIXME: comparison against string above will cause this to be executed
+ # every time
+ try:
+ if hasattr(os, 'lchmod'):
+ os.lchmod(b_path, mode)
+ else:
+ if not os.path.islink(b_path):
+ os.chmod(b_path, mode)
+ else:
+ # Attempt to set the perms of the symlink but be
+ # careful not to change the perms of the underlying
+ # file while trying
+ underlying_stat = os.stat(b_path)
+ os.chmod(b_path, mode)
+ new_underlying_stat = os.stat(b_path)
+ if underlying_stat.st_mode != new_underlying_stat.st_mode:
+ os.chmod(b_path, stat.S_IMODE(underlying_stat.st_mode))
+ except OSError as e:
+ if os.path.islink(b_path) and e.errno in (
+ errno.EACCES, # can't access symlink in sticky directory (stat)
+ errno.EPERM, # can't set mode on symbolic links (chmod)
+ errno.EROFS, # can't set mode on read-only filesystem
+ ):
+ pass
+ elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links
+ pass
+ else:
+ raise
+ except Exception as e:
+ path = to_text(b_path)
+ self.fail_json(path=path, msg='chmod failed', details=to_native(e),
+ exception=traceback.format_exc())
+
+ path_stat = os.lstat(b_path)
+ new_mode = stat.S_IMODE(path_stat.st_mode)
+
+ if new_mode != prev_mode:
+ changed = True
+ return changed
+
+ def set_attributes_if_different(self, path, attributes, changed, diff=None, expand=True):
+
+ if attributes is None:
+ return changed
+
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ if expand:
+ b_path = os.path.expanduser(os.path.expandvars(b_path))
+
+ if self.check_file_absent_if_check_mode(b_path):
+ return True
+
+ existing = self.get_file_attributes(b_path)
+
+ attr_mod = '='
+ if attributes.startswith(('-', '+')):
+ attr_mod = attributes[0]
+ attributes = attributes[1:]
+
+ if existing.get('attr_flags', '') != attributes or attr_mod == '-':
+ attrcmd = self.get_bin_path('chattr')
+ if attrcmd:
+ attrcmd = [attrcmd, '%s%s' % (attr_mod, attributes), b_path]
+ changed = True
+
+ if diff is not None:
+ if 'before' not in diff:
+ diff['before'] = {}
+ diff['before']['attributes'] = existing.get('attr_flags')
+ if 'after' not in diff:
+ diff['after'] = {}
+ diff['after']['attributes'] = '%s%s' % (attr_mod, attributes)
+
+ if not self.check_mode:
+ try:
+ rc, out, err = self.run_command(attrcmd)
+ if rc != 0 or err:
+ raise Exception("Error while setting attributes: %s" % (out + err))
+ except Exception as e:
+ self.fail_json(path=to_text(b_path), msg='chattr failed',
+ details=to_native(e), exception=traceback.format_exc())
+ return changed
+
+ def get_file_attributes(self, path):
+ output = {}
+ attrcmd = self.get_bin_path('lsattr', False)
+ if attrcmd:
+ attrcmd = [attrcmd, '-vd', path]
+ try:
+ rc, out, err = self.run_command(attrcmd)
+ if rc == 0:
+ res = out.split()
+ output['attr_flags'] = res[1].replace('-', '').strip()
+ output['version'] = res[0].strip()
+ output['attributes'] = format_attributes(output['attr_flags'])
+ except Exception:
+ pass
+ return output
+
+ @classmethod
+ def _symbolic_mode_to_octal(cls, path_stat, symbolic_mode):
+ """
+ This enables symbolic chmod string parsing as stated in the chmod man-page
+
+ This includes things like: "u=rw-x+X,g=r-x+X,o=r-x+X"
+ """
+
+ new_mode = stat.S_IMODE(path_stat.st_mode)
+
+ # Now parse all symbolic modes
+ for mode in symbolic_mode.split(','):
+ # Per single mode. This always contains a '+', '-' or '='
+ # Split it on that
+ permlist = MODE_OPERATOR_RE.split(mode)
+
+ # And find all the operators
+ opers = MODE_OPERATOR_RE.findall(mode)
+
+ # The user(s) where it's all about is the first element in the
+ # 'permlist' list. Take that and remove it from the list.
+ # An empty user or 'a' means 'all'.
+ users = permlist.pop(0)
+ use_umask = (users == '')
+ if users == 'a' or users == '':
+ users = 'ugo'
+
+ # Check if there are illegal characters in the user list
+ # They can end up in 'users' because they are not split
+ if USERS_RE.match(users):
+ raise ValueError("bad symbolic permission for mode: %s" % mode)
+
+ # Now we have two list of equal length, one contains the requested
+ # permissions and one with the corresponding operators.
+ for idx, perms in enumerate(permlist):
+ # Check if there are illegal characters in the permissions
+ if PERMS_RE.match(perms):
+ raise ValueError("bad symbolic permission for mode: %s" % mode)
+
+ for user in users:
+ mode_to_apply = cls._get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask)
+ new_mode = cls._apply_operation_to_mode(user, opers[idx], mode_to_apply, new_mode)
+
+ return new_mode
+
+ @staticmethod
+ def _apply_operation_to_mode(user, operator, mode_to_apply, current_mode):
+ if operator == '=':
+ if user == 'u':
+ mask = stat.S_IRWXU | stat.S_ISUID
+ elif user == 'g':
+ mask = stat.S_IRWXG | stat.S_ISGID
+ elif user == 'o':
+ mask = stat.S_IRWXO | stat.S_ISVTX
+
+ # mask out u, g, or o permissions from current_mode and apply new permissions
+ inverse_mask = mask ^ PERM_BITS
+ new_mode = (current_mode & inverse_mask) | mode_to_apply
+ elif operator == '+':
+ new_mode = current_mode | mode_to_apply
+ elif operator == '-':
+ new_mode = current_mode - (current_mode & mode_to_apply)
+ return new_mode
+
+ @staticmethod
+ def _get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask):
+ prev_mode = stat.S_IMODE(path_stat.st_mode)
+
+ is_directory = stat.S_ISDIR(path_stat.st_mode)
+ has_x_permissions = (prev_mode & EXEC_PERM_BITS) > 0
+ apply_X_permission = is_directory or has_x_permissions
+
+ # Get the umask, if the 'user' part is empty, the effect is as if (a) were
+ # given, but bits that are set in the umask are not affected.
+ # We also need the "reversed umask" for masking
+ umask = os.umask(0)
+ os.umask(umask)
+ rev_umask = umask ^ PERM_BITS
+
+ # Permission bits constants documented at:
+ # http://docs.python.org/2/library/stat.html#stat.S_ISUID
+ if apply_X_permission:
+ X_perms = {
+ 'u': {'X': stat.S_IXUSR},
+ 'g': {'X': stat.S_IXGRP},
+ 'o': {'X': stat.S_IXOTH},
+ }
+ else:
+ X_perms = {
+ 'u': {'X': 0},
+ 'g': {'X': 0},
+ 'o': {'X': 0},
+ }
+
+ user_perms_to_modes = {
+ 'u': {
+ 'r': rev_umask & stat.S_IRUSR if use_umask else stat.S_IRUSR,
+ 'w': rev_umask & stat.S_IWUSR if use_umask else stat.S_IWUSR,
+ 'x': rev_umask & stat.S_IXUSR if use_umask else stat.S_IXUSR,
+ 's': stat.S_ISUID,
+ 't': 0,
+ 'u': prev_mode & stat.S_IRWXU,
+ 'g': (prev_mode & stat.S_IRWXG) << 3,
+ 'o': (prev_mode & stat.S_IRWXO) << 6},
+ 'g': {
+ 'r': rev_umask & stat.S_IRGRP if use_umask else stat.S_IRGRP,
+ 'w': rev_umask & stat.S_IWGRP if use_umask else stat.S_IWGRP,
+ 'x': rev_umask & stat.S_IXGRP if use_umask else stat.S_IXGRP,
+ 's': stat.S_ISGID,
+ 't': 0,
+ 'u': (prev_mode & stat.S_IRWXU) >> 3,
+ 'g': prev_mode & stat.S_IRWXG,
+ 'o': (prev_mode & stat.S_IRWXO) << 3},
+ 'o': {
+ 'r': rev_umask & stat.S_IROTH if use_umask else stat.S_IROTH,
+ 'w': rev_umask & stat.S_IWOTH if use_umask else stat.S_IWOTH,
+ 'x': rev_umask & stat.S_IXOTH if use_umask else stat.S_IXOTH,
+ 's': 0,
+ 't': stat.S_ISVTX,
+ 'u': (prev_mode & stat.S_IRWXU) >> 6,
+ 'g': (prev_mode & stat.S_IRWXG) >> 3,
+ 'o': prev_mode & stat.S_IRWXO},
+ }
+
+ # Insert X_perms into user_perms_to_modes
+ for key, value in X_perms.items():
+ user_perms_to_modes[key].update(value)
+
+ def or_reduce(mode, perm):
+ return mode | user_perms_to_modes[user][perm]
+
+ return reduce(or_reduce, perms, 0)
+
+ def set_fs_attributes_if_different(self, file_args, changed, diff=None, expand=True):
+ # set modes owners and context as needed
+ changed = self.set_context_if_different(
+ file_args['path'], file_args['secontext'], changed, diff
+ )
+ changed = self.set_owner_if_different(
+ file_args['path'], file_args['owner'], changed, diff, expand
+ )
+ changed = self.set_group_if_different(
+ file_args['path'], file_args['group'], changed, diff, expand
+ )
+ changed = self.set_mode_if_different(
+ file_args['path'], file_args['mode'], changed, diff, expand
+ )
+ changed = self.set_attributes_if_different(
+ file_args['path'], file_args['attributes'], changed, diff, expand
+ )
+ return changed
+
+ def check_file_absent_if_check_mode(self, file_path):
+ return self.check_mode and not os.path.exists(file_path)
+
+ def set_directory_attributes_if_different(self, file_args, changed, diff=None, expand=True):
+ return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
+
+ def set_file_attributes_if_different(self, file_args, changed, diff=None, expand=True):
+ return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
+
+ def add_atomic_move_warnings(self):
+ for path in sorted(self._created_files):
+ self.warn("File '{0}' created with default permissions '{1:o}'. The previous default was '666'. "
+ "Specify 'mode' to avoid this warning.".format(to_native(path), DEFAULT_PERM))
+
+ def add_path_info(self, kwargs):
+ '''
+ for results that are files, supplement the info about the file
+ in the return path with stats about the file path.
+ '''
+
+ path = kwargs.get('path', kwargs.get('dest', None))
+ if path is None:
+ return kwargs
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ if os.path.exists(b_path):
+ (uid, gid) = self.user_and_group(path)
+ kwargs['uid'] = uid
+ kwargs['gid'] = gid
+ try:
+ user = pwd.getpwuid(uid)[0]
+ except KeyError:
+ user = str(uid)
+ try:
+ group = grp.getgrgid(gid)[0]
+ except KeyError:
+ group = str(gid)
+ kwargs['owner'] = user
+ kwargs['group'] = group
+ st = os.lstat(b_path)
+ kwargs['mode'] = '0%03o' % stat.S_IMODE(st[stat.ST_MODE])
+ # secontext not yet supported
+ if os.path.islink(b_path):
+ kwargs['state'] = 'link'
+ elif os.path.isdir(b_path):
+ kwargs['state'] = 'directory'
+ elif os.stat(b_path).st_nlink > 1:
+ kwargs['state'] = 'hard'
+ else:
+ kwargs['state'] = 'file'
+ if HAVE_SELINUX and self.selinux_enabled():
+ kwargs['secontext'] = ':'.join(self.selinux_context(path))
+ kwargs['size'] = st[stat.ST_SIZE]
+ return kwargs
+
+ def _check_locale(self):
+ '''
+ Uses the locale module to test the currently set locale
+ (per the LANG and LC_CTYPE environment settings)
+ '''
+ try:
+ # setting the locale to '' uses the default locale
+ # as it would be returned by locale.getdefaultlocale()
+ locale.setlocale(locale.LC_ALL, '')
+ except locale.Error:
+ # fallback to the 'C' locale, which may cause unicode
+ # issues but is preferable to simply failing because
+ # of an unknown locale
+ locale.setlocale(locale.LC_ALL, 'C')
+ os.environ['LANG'] = 'C'
+ os.environ['LC_ALL'] = 'C'
+ os.environ['LC_MESSAGES'] = 'C'
+ except Exception as e:
+ self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" %
+ to_native(e), exception=traceback.format_exc())
+
+ def _handle_aliases(self, spec=None, param=None, option_prefix=''):
+ if spec is None:
+ spec = self.argument_spec
+ if param is None:
+ param = self.params
+
+ # this uses exceptions as it happens before we can safely call fail_json
+ alias_warnings = []
+ alias_results, self._legal_inputs = handle_aliases(spec, param, alias_warnings=alias_warnings)
+ for option, alias in alias_warnings:
+ warn('Both option %s and its alias %s are set.' % (option_prefix + option, option_prefix + alias))
+
+ deprecated_aliases = []
+ for i in spec.keys():
+ if 'deprecated_aliases' in spec[i].keys():
+ for alias in spec[i]['deprecated_aliases']:
+ deprecated_aliases.append(alias)
+
+ for deprecation in deprecated_aliases:
+ if deprecation['name'] in param.keys():
+ deprecate("Alias '%s' is deprecated. See the module docs for more information" % deprecation['name'],
+ version=deprecation.get('version'), date=deprecation.get('date'),
+ collection_name=deprecation.get('collection_name'))
+ return alias_results
+
+ def _handle_no_log_values(self, spec=None, param=None):
+ if spec is None:
+ spec = self.argument_spec
+ if param is None:
+ param = self.params
+
+ try:
+ self.no_log_values.update(list_no_log_values(spec, param))
+ except TypeError as te:
+ self.fail_json(msg="Failure when processing no_log parameters. Module invocation will be hidden. "
+ "%s" % to_native(te), invocation={'module_args': 'HIDDEN DUE TO FAILURE'})
+
+ for message in list_deprecations(spec, param):
+ deprecate(message['msg'], version=message.get('version'), date=message.get('date'),
+ collection_name=message.get('collection_name'))
+
+ def _check_arguments(self, spec=None, param=None, legal_inputs=None):
+ self._syslog_facility = 'LOG_USER'
+ unsupported_parameters = set()
+ if spec is None:
+ spec = self.argument_spec
+ if param is None:
+ param = self.params
+ if legal_inputs is None:
+ legal_inputs = self._legal_inputs
+
+ for k in list(param.keys()):
+
+ if k not in legal_inputs:
+ unsupported_parameters.add(k)
+
+ for k in PASS_VARS:
+ # handle setting internal properties from internal ansible vars
+ param_key = '_ansible_%s' % k
+ if param_key in param:
+ if k in PASS_BOOLS:
+ setattr(self, PASS_VARS[k][0], self.boolean(param[param_key]))
+ else:
+ setattr(self, PASS_VARS[k][0], param[param_key])
+
+ # clean up internal top level params:
+ if param_key in self.params:
+ del self.params[param_key]
+ else:
+ # use defaults if not already set
+ if not hasattr(self, PASS_VARS[k][0]):
+ setattr(self, PASS_VARS[k][0], PASS_VARS[k][1])
+
+ if unsupported_parameters:
+ msg = "Unsupported parameters for (%s) module: %s" % (self._name, ', '.join(sorted(list(unsupported_parameters))))
+ if self._options_context:
+ msg += " found in %s." % " -> ".join(self._options_context)
+ msg += " Supported parameters include: %s" % (', '.join(sorted(spec.keys())))
+ self.fail_json(msg=msg)
+ if self.check_mode and not self.supports_check_mode:
+ self.exit_json(skipped=True, msg="remote module (%s) does not support check mode" % self._name)
+
+ def _count_terms(self, check, param=None):
+ if param is None:
+ param = self.params
+ return count_terms(check, param)
+
+ def _check_mutually_exclusive(self, spec, param=None):
+ if param is None:
+ param = self.params
+
+ try:
+ check_mutually_exclusive(spec, param)
+ except TypeError as e:
+ msg = to_native(e)
+ if self._options_context:
+ msg += " found in %s" % " -> ".join(self._options_context)
+ self.fail_json(msg=msg)
+
+ def _check_required_one_of(self, spec, param=None):
+ if spec is None:
+ return
+
+ if param is None:
+ param = self.params
+
+ try:
+ check_required_one_of(spec, param)
+ except TypeError as e:
+ msg = to_native(e)
+ if self._options_context:
+ msg += " found in %s" % " -> ".join(self._options_context)
+ self.fail_json(msg=msg)
+
+ def _check_required_together(self, spec, param=None):
+ if spec is None:
+ return
+ if param is None:
+ param = self.params
+
+ try:
+ check_required_together(spec, param)
+ except TypeError as e:
+ msg = to_native(e)
+ if self._options_context:
+ msg += " found in %s" % " -> ".join(self._options_context)
+ self.fail_json(msg=msg)
+
+ def _check_required_by(self, spec, param=None):
+ if spec is None:
+ return
+ if param is None:
+ param = self.params
+
+ try:
+ check_required_by(spec, param)
+ except TypeError as e:
+ self.fail_json(msg=to_native(e))
+
+ def _check_required_arguments(self, spec=None, param=None):
+ if spec is None:
+ spec = self.argument_spec
+ if param is None:
+ param = self.params
+
+ try:
+ check_required_arguments(spec, param)
+ except TypeError as e:
+ msg = to_native(e)
+ if self._options_context:
+ msg += " found in %s" % " -> ".join(self._options_context)
+ self.fail_json(msg=msg)
+
+ def _check_required_if(self, spec, param=None):
+ ''' ensure that parameters which conditionally required are present '''
+ if spec is None:
+ return
+ if param is None:
+ param = self.params
+
+ try:
+ check_required_if(spec, param)
+ except TypeError as e:
+ msg = to_native(e)
+ if self._options_context:
+ msg += " found in %s" % " -> ".join(self._options_context)
+ self.fail_json(msg=msg)
+
+ def _check_argument_values(self, spec=None, param=None):
+ ''' ensure all arguments have the requested values, and there are no stray arguments '''
+ if spec is None:
+ spec = self.argument_spec
+ if param is None:
+ param = self.params
+ for (k, v) in spec.items():
+ choices = v.get('choices', None)
+ if choices is None:
+ continue
+ if isinstance(choices, SEQUENCETYPE) and not isinstance(choices, (binary_type, text_type)):
+ if k in param:
+ # Allow one or more when type='list' param with choices
+ if isinstance(param[k], list):
+ diff_list = ", ".join([item for item in param[k] if item not in choices])
+ if diff_list:
+ choices_str = ", ".join([to_native(c) for c in choices])
+ msg = "value of %s must be one or more of: %s. Got no match for: %s" % (k, choices_str, diff_list)
+ if self._options_context:
+ msg += " found in %s" % " -> ".join(self._options_context)
+ self.fail_json(msg=msg)
+ elif param[k] not in choices:
+ # PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking
+ # the value. If we can't figure this out, module author is responsible.
+ lowered_choices = None
+ if param[k] == 'False':
+ lowered_choices = lenient_lowercase(choices)
+ overlap = BOOLEANS_FALSE.intersection(choices)
+ if len(overlap) == 1:
+ # Extract from a set
+ (param[k],) = overlap
+
+ if param[k] == 'True':
+ if lowered_choices is None:
+ lowered_choices = lenient_lowercase(choices)
+ overlap = BOOLEANS_TRUE.intersection(choices)
+ if len(overlap) == 1:
+ (param[k],) = overlap
+
+ if param[k] not in choices:
+ choices_str = ", ".join([to_native(c) for c in choices])
+ msg = "value of %s must be one of: %s, got: %s" % (k, choices_str, param[k])
+ if self._options_context:
+ msg += " found in %s" % " -> ".join(self._options_context)
+ self.fail_json(msg=msg)
+ else:
+ msg = "internal error: choices for argument %s are not iterable: %s" % (k, choices)
+ if self._options_context:
+ msg += " found in %s" % " -> ".join(self._options_context)
+ self.fail_json(msg=msg)
+
+ def safe_eval(self, value, locals=None, include_exceptions=False):
+ return safe_eval(value, locals, include_exceptions)
+
+ def _check_type_str(self, value, param=None, prefix=''):
+ opts = {
+ 'error': False,
+ 'warn': False,
+ 'ignore': True
+ }
+
+ # Ignore, warn, or error when converting to a string.
+ allow_conversion = opts.get(self._string_conversion_action, True)
+ try:
+ return check_type_str(value, allow_conversion)
+ except TypeError:
+ common_msg = 'quote the entire value to ensure it does not change.'
+ from_msg = '{0!r}'.format(value)
+ to_msg = '{0!r}'.format(to_text(value))
+
+ if param is not None:
+ if prefix:
+ param = '{0}{1}'.format(prefix, param)
+
+ from_msg = '{0}: {1!r}'.format(param, value)
+ to_msg = '{0}: {1!r}'.format(param, to_text(value))
+
+ if self._string_conversion_action == 'error':
+ msg = common_msg.capitalize()
+ raise TypeError(to_native(msg))
+ elif self._string_conversion_action == 'warn':
+ msg = ('The value "{0}" (type {1.__class__.__name__}) was converted to "{2}" (type string). '
+ 'If this does not look like what you expect, {3}').format(from_msg, value, to_msg, common_msg)
+ self.warn(to_native(msg))
+ return to_native(value, errors='surrogate_or_strict')
+
+ def _check_type_list(self, value):
+ return check_type_list(value)
+
+ def _check_type_dict(self, value):
+ return check_type_dict(value)
+
+ def _check_type_bool(self, value):
+ return check_type_bool(value)
+
+ def _check_type_int(self, value):
+ return check_type_int(value)
+
+ def _check_type_float(self, value):
+ return check_type_float(value)
+
+ def _check_type_path(self, value):
+ return check_type_path(value)
+
+ def _check_type_jsonarg(self, value):
+ return check_type_jsonarg(value)
+
+ def _check_type_raw(self, value):
+ return check_type_raw(value)
+
+ def _check_type_bytes(self, value):
+ return check_type_bytes(value)
+
+ def _check_type_bits(self, value):
+ return check_type_bits(value)
+
+ def _handle_options(self, argument_spec=None, params=None, prefix=''):
+ ''' deal with options to create sub spec '''
+ if argument_spec is None:
+ argument_spec = self.argument_spec
+ if params is None:
+ params = self.params
+
+ for (k, v) in argument_spec.items():
+ wanted = v.get('type', None)
+ if wanted == 'dict' or (wanted == 'list' and v.get('elements', '') == 'dict'):
+ spec = v.get('options', None)
+ if v.get('apply_defaults', False):
+ if spec is not None:
+ if params.get(k) is None:
+ params[k] = {}
+ else:
+ continue
+ elif spec is None or k not in params or params[k] is None:
+ continue
+
+ self._options_context.append(k)
+
+ if isinstance(params[k], dict):
+ elements = [params[k]]
+ else:
+ elements = params[k]
+
+ for idx, param in enumerate(elements):
+ if not isinstance(param, dict):
+ self.fail_json(msg="value of %s must be of type dict or list of dict" % k)
+
+ new_prefix = prefix + k
+ if wanted == 'list':
+ new_prefix += '[%d]' % idx
+ new_prefix += '.'
+
+ self._set_fallbacks(spec, param)
+ options_aliases = self._handle_aliases(spec, param, option_prefix=new_prefix)
+
+ options_legal_inputs = list(spec.keys()) + list(options_aliases.keys())
+
+ self._check_arguments(spec, param, options_legal_inputs)
+
+ # check exclusive early
+ if not self.bypass_checks:
+ self._check_mutually_exclusive(v.get('mutually_exclusive', None), param)
+
+ self._set_defaults(pre=True, spec=spec, param=param)
+
+ if not self.bypass_checks:
+ self._check_required_arguments(spec, param)
+ self._check_argument_types(spec, param, new_prefix)
+ self._check_argument_values(spec, param)
+
+ self._check_required_together(v.get('required_together', None), param)
+ self._check_required_one_of(v.get('required_one_of', None), param)
+ self._check_required_if(v.get('required_if', None), param)
+ self._check_required_by(v.get('required_by', None), param)
+
+ self._set_defaults(pre=False, spec=spec, param=param)
+
+ # handle multi level options (sub argspec)
+ self._handle_options(spec, param, new_prefix)
+ self._options_context.pop()
+
+ def _get_wanted_type(self, wanted, k):
+ if not callable(wanted):
+ if wanted is None:
+ # Mostly we want to default to str.
+ # For values set to None explicitly, return None instead as
+ # that allows a user to unset a parameter
+ wanted = 'str'
+ try:
+ type_checker = self._CHECK_ARGUMENT_TYPES_DISPATCHER[wanted]
+ except KeyError:
+ self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k))
+ else:
+ # set the type_checker to the callable, and reset wanted to the callable's name (or type if it doesn't have one, ala MagicMock)
+ type_checker = wanted
+ wanted = getattr(wanted, '__name__', to_native(type(wanted)))
+
+ return type_checker, wanted
+
+ def _handle_elements(self, wanted, param, values):
+ type_checker, wanted_name = self._get_wanted_type(wanted, param)
+ validated_params = []
+ # Get param name for strings so we can later display this value in a useful error message if needed
+ # Only pass 'kwargs' to our checkers and ignore custom callable checkers
+ kwargs = {}
+ if wanted_name == 'str' and isinstance(wanted, string_types):
+ if isinstance(param, string_types):
+ kwargs['param'] = param
+ elif isinstance(param, dict):
+ kwargs['param'] = list(param.keys())[0]
+ for value in values:
+ try:
+ validated_params.append(type_checker(value, **kwargs))
+ except (TypeError, ValueError) as e:
+ msg = "Elements value for option %s" % param
+ if self._options_context:
+ msg += " found in '%s'" % " -> ".join(self._options_context)
+ msg += " is of type %s and we were unable to convert to %s: %s" % (type(value), wanted_name, to_native(e))
+ self.fail_json(msg=msg)
+ return validated_params
+
+ def _check_argument_types(self, spec=None, param=None, prefix=''):
+ ''' ensure all arguments have the requested type '''
+
+ if spec is None:
+ spec = self.argument_spec
+ if param is None:
+ param = self.params
+
+ for (k, v) in spec.items():
+ wanted = v.get('type', None)
+ if k not in param:
+ continue
+
+ value = param[k]
+ if value is None:
+ continue
+
+ type_checker, wanted_name = self._get_wanted_type(wanted, k)
+ # Get param name for strings so we can later display this value in a useful error message if needed
+ # Only pass 'kwargs' to our checkers and ignore custom callable checkers
+ kwargs = {}
+ if wanted_name == 'str' and isinstance(type_checker, string_types):
+ kwargs['param'] = list(param.keys())[0]
+
+ # Get the name of the parent key if this is a nested option
+ if prefix:
+ kwargs['prefix'] = prefix
+
+ try:
+ param[k] = type_checker(value, **kwargs)
+ wanted_elements = v.get('elements', None)
+ if wanted_elements:
+ if wanted != 'list' or not isinstance(param[k], list):
+ msg = "Invalid type %s for option '%s'" % (wanted_name, param)
+ if self._options_context:
+ msg += " found in '%s'." % " -> ".join(self._options_context)
+ msg += ", elements value check is supported only with 'list' type"
+ self.fail_json(msg=msg)
+ param[k] = self._handle_elements(wanted_elements, k, param[k])
+
+ except (TypeError, ValueError) as e:
+ msg = "argument %s is of type %s" % (k, type(value))
+ if self._options_context:
+ msg += " found in '%s'." % " -> ".join(self._options_context)
+ msg += " and we were unable to convert to %s: %s" % (wanted_name, to_native(e))
+ self.fail_json(msg=msg)
+
+ def _set_defaults(self, pre=True, spec=None, param=None):
+ if spec is None:
+ spec = self.argument_spec
+ if param is None:
+ param = self.params
+ for (k, v) in spec.items():
+ default = v.get('default', None)
+ if pre is True:
+ # this prevents setting defaults on required items
+ if default is not None and k not in param:
+ param[k] = default
+ else:
+ # make sure things without a default still get set None
+ if k not in param:
+ param[k] = default
+
+ def _set_fallbacks(self, spec=None, param=None):
+ if spec is None:
+ spec = self.argument_spec
+ if param is None:
+ param = self.params
+
+ for (k, v) in spec.items():
+ fallback = v.get('fallback', (None,))
+ fallback_strategy = fallback[0]
+ fallback_args = []
+ fallback_kwargs = {}
+ if k not in param and fallback_strategy is not None:
+ for item in fallback[1:]:
+ if isinstance(item, dict):
+ fallback_kwargs = item
+ else:
+ fallback_args = item
+ try:
+ param[k] = fallback_strategy(*fallback_args, **fallback_kwargs)
+ except AnsibleFallbackNotFound:
+ continue
+
+ def _load_params(self):
+ ''' read the input and set the params attribute.
+
+ This method is for backwards compatibility. The guts of the function
+ were moved out in 2.1 so that custom modules could read the parameters.
+ '''
+ # debug overrides to read args from file or cmdline
+ self.params = _load_params()
+
+ def _log_to_syslog(self, msg):
+ if HAS_SYSLOG:
+ module = 'ansible-%s' % self._name
+ facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER)
+ syslog.openlog(str(module), 0, facility)
+ syslog.syslog(syslog.LOG_INFO, msg)
+
+ def debug(self, msg):
+ if self._debug:
+ self.log('[debug] %s' % msg)
+
+ def log(self, msg, log_args=None):
+
+ if not self.no_log:
+
+ if log_args is None:
+ log_args = dict()
+
+ module = 'ansible-%s' % self._name
+ if isinstance(module, binary_type):
+ module = module.decode('utf-8', 'replace')
+
+ # 6655 - allow for accented characters
+ if not isinstance(msg, (binary_type, text_type)):
+ raise TypeError("msg should be a string (got %s)" % type(msg))
+
+ # We want journal to always take text type
+ # syslog takes bytes on py2, text type on py3
+ if isinstance(msg, binary_type):
+ journal_msg = remove_values(msg.decode('utf-8', 'replace'), self.no_log_values)
+ else:
+ # TODO: surrogateescape is a danger here on Py3
+ journal_msg = remove_values(msg, self.no_log_values)
+
+ if PY3:
+ syslog_msg = journal_msg
+ else:
+ syslog_msg = journal_msg.encode('utf-8', 'replace')
+
+ if has_journal:
+ journal_args = [("MODULE", os.path.basename(__file__))]
+ for arg in log_args:
+ journal_args.append((arg.upper(), str(log_args[arg])))
+ try:
+ if HAS_SYSLOG:
+ # If syslog_facility specified, it needs to convert
+ # from the facility name to the facility code, and
+ # set it as SYSLOG_FACILITY argument of journal.send()
+ facility = getattr(syslog,
+ self._syslog_facility,
+ syslog.LOG_USER) >> 3
+ journal.send(MESSAGE=u"%s %s" % (module, journal_msg),
+ SYSLOG_FACILITY=facility,
+ **dict(journal_args))
+ else:
+ journal.send(MESSAGE=u"%s %s" % (module, journal_msg),
+ **dict(journal_args))
+ except IOError:
+ # fall back to syslog since logging to journal failed
+ self._log_to_syslog(syslog_msg)
+ else:
+ self._log_to_syslog(syslog_msg)
+
+ def _log_invocation(self):
+ ''' log that ansible ran the module '''
+ # TODO: generalize a separate log function and make log_invocation use it
+ # Sanitize possible password argument when logging.
+ log_args = dict()
+
+ for param in self.params:
+ canon = self.aliases.get(param, param)
+ arg_opts = self.argument_spec.get(canon, {})
+ no_log = arg_opts.get('no_log', None)
+
+ # try to proactively capture password/passphrase fields
+ if no_log is None and PASSWORD_MATCH.search(param):
+ log_args[param] = 'NOT_LOGGING_PASSWORD'
+ self.warn('Module did not set no_log for %s' % param)
+ elif self.boolean(no_log):
+ log_args[param] = 'NOT_LOGGING_PARAMETER'
+ else:
+ param_val = self.params[param]
+ if not isinstance(param_val, (text_type, binary_type)):
+ param_val = str(param_val)
+ elif isinstance(param_val, text_type):
+ param_val = param_val.encode('utf-8')
+ log_args[param] = heuristic_log_sanitize(param_val, self.no_log_values)
+
+ msg = ['%s=%s' % (to_native(arg), to_native(val)) for arg, val in log_args.items()]
+ if msg:
+ msg = 'Invoked with %s' % ' '.join(msg)
+ else:
+ msg = 'Invoked'
+
+ self.log(msg, log_args=log_args)
+
+ def _set_cwd(self):
+ try:
+ cwd = os.getcwd()
+ if not os.access(cwd, os.F_OK | os.R_OK):
+ raise Exception()
+ return cwd
+ except Exception:
+ # we don't have access to the cwd, probably because of sudo.
+ # Try and move to a neutral location to prevent errors
+ for cwd in [self.tmpdir, os.path.expandvars('$HOME'), tempfile.gettempdir()]:
+ try:
+ if os.access(cwd, os.F_OK | os.R_OK):
+ os.chdir(cwd)
+ return cwd
+ except Exception:
+ pass
+ # we won't error here, as it may *not* be a problem,
+ # and we don't want to break modules unnecessarily
+ return None
+
+ def get_bin_path(self, arg, required=False, opt_dirs=None):
+ '''
+ Find system executable in PATH.
+
+ :param arg: The executable to find.
+ :param required: if executable is not found and required is ``True``, fail_json
+ :param opt_dirs: optional list of directories to search in addition to ``PATH``
+ :returns: if found return full path; otherwise return None
+ '''
+
+ bin_path = None
+ try:
+ bin_path = get_bin_path(arg=arg, opt_dirs=opt_dirs)
+ except ValueError as e:
+ if required:
+ self.fail_json(msg=to_text(e))
+ else:
+ return bin_path
+
+ return bin_path
+
+ def boolean(self, arg):
+ '''Convert the argument to a boolean'''
+ if arg is None:
+ return arg
+
+ try:
+ return boolean(arg)
+ except TypeError as e:
+ self.fail_json(msg=to_native(e))
+
+ def jsonify(self, data):
+ try:
+ return jsonify(data)
+ except UnicodeError as e:
+ self.fail_json(msg=to_text(e))
+
+ def from_json(self, data):
+ return json.loads(data)
+
+ def add_cleanup_file(self, path):
+ if path not in self.cleanup_files:
+ self.cleanup_files.append(path)
+
+ def do_cleanup_files(self):
+ for path in self.cleanup_files:
+ self.cleanup(path)
+
+ def _return_formatted(self, kwargs):
+
+ self.add_atomic_move_warnings()
+ self.add_path_info(kwargs)
+
+ if 'invocation' not in kwargs:
+ kwargs['invocation'] = {'module_args': self.params}
+
+ if 'warnings' in kwargs:
+ if isinstance(kwargs['warnings'], list):
+ for w in kwargs['warnings']:
+ self.warn(w)
+ else:
+ self.warn(kwargs['warnings'])
+
+ warnings = get_warning_messages()
+ if warnings:
+ kwargs['warnings'] = warnings
+
+ if 'deprecations' in kwargs:
+ if isinstance(kwargs['deprecations'], list):
+ for d in kwargs['deprecations']:
+ if isinstance(d, SEQUENCETYPE) and len(d) == 2:
+ self.deprecate(d[0], version=d[1])
+ elif isinstance(d, Mapping):
+ self.deprecate(d['msg'], version=d.get('version'), date=d.get('date'),
+ collection_name=d.get('collection_name'))
+ else:
+ self.deprecate(d) # pylint: disable=ansible-deprecated-no-version
+ else:
+ self.deprecate(kwargs['deprecations']) # pylint: disable=ansible-deprecated-no-version
+
+ deprecations = get_deprecation_messages()
+ if deprecations:
+ kwargs['deprecations'] = deprecations
+
+ kwargs = remove_values(kwargs, self.no_log_values)
+ print('\n%s' % self.jsonify(kwargs))
+
+ def exit_json(self, **kwargs):
+ ''' return from the module, without error '''
+
+ self.do_cleanup_files()
+ self._return_formatted(kwargs)
+ sys.exit(0)
+
+ def fail_json(self, msg, **kwargs):
+ ''' return from the module, with an error message '''
+
+ kwargs['failed'] = True
+ kwargs['msg'] = msg
+
+ # Add traceback if debug or high verbosity and it is missing
+ # NOTE: Badly named as exception, it really always has been a traceback
+ if 'exception' not in kwargs and sys.exc_info()[2] and (self._debug or self._verbosity >= 3):
+ if PY2:
+ # On Python 2 this is the last (stack frame) exception and as such may be unrelated to the failure
+ kwargs['exception'] = 'WARNING: The below traceback may *not* be related to the actual failure.\n' +\
+ ''.join(traceback.format_tb(sys.exc_info()[2]))
+ else:
+ kwargs['exception'] = ''.join(traceback.format_tb(sys.exc_info()[2]))
+
+ self.do_cleanup_files()
+ self._return_formatted(kwargs)
+ sys.exit(1)
+
+ def fail_on_missing_params(self, required_params=None):
+ if not required_params:
+ return
+ try:
+ check_missing_parameters(self.params, required_params)
+ except TypeError as e:
+ self.fail_json(msg=to_native(e))
+
+ def digest_from_file(self, filename, algorithm):
+ ''' Return hex digest of local file for a digest_method specified by name, or None if file is not present. '''
+ b_filename = to_bytes(filename, errors='surrogate_or_strict')
+
+ if not os.path.exists(b_filename):
+ return None
+ if os.path.isdir(b_filename):
+ self.fail_json(msg="attempted to take checksum of directory: %s" % filename)
+
+ # preserve old behaviour where the third parameter was a hash algorithm object
+ if hasattr(algorithm, 'hexdigest'):
+ digest_method = algorithm
+ else:
+ try:
+ digest_method = AVAILABLE_HASH_ALGORITHMS[algorithm]()
+ except KeyError:
+ self.fail_json(msg="Could not hash file '%s' with algorithm '%s'. Available algorithms: %s" %
+ (filename, algorithm, ', '.join(AVAILABLE_HASH_ALGORITHMS)))
+
+ blocksize = 64 * 1024
+ infile = open(os.path.realpath(b_filename), 'rb')
+ block = infile.read(blocksize)
+ while block:
+ digest_method.update(block)
+ block = infile.read(blocksize)
+ infile.close()
+ return digest_method.hexdigest()
+
+ def md5(self, filename):
+ ''' Return MD5 hex digest of local file using digest_from_file().
+
+ Do not use this function unless you have no other choice for:
+ 1) Optional backwards compatibility
+ 2) Compatibility with a third party protocol
+
+ This function will not work on systems complying with FIPS-140-2.
+
+ Most uses of this function can use the module.sha1 function instead.
+ '''
+ if 'md5' not in AVAILABLE_HASH_ALGORITHMS:
+ raise ValueError('MD5 not available. Possibly running in FIPS mode')
+ return self.digest_from_file(filename, 'md5')
+
+ def sha1(self, filename):
+ ''' Return SHA1 hex digest of local file using digest_from_file(). '''
+ return self.digest_from_file(filename, 'sha1')
+
+ def sha256(self, filename):
+ ''' Return SHA-256 hex digest of local file using digest_from_file(). '''
+ return self.digest_from_file(filename, 'sha256')
+
+ def backup_local(self, fn):
+ '''make a date-marked backup of the specified file, return True or False on success or failure'''
+
+ backupdest = ''
+ if os.path.exists(fn):
+ # backups named basename.PID.YYYY-MM-DD@HH:MM:SS~
+ ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time()))
+ backupdest = '%s.%s.%s' % (fn, os.getpid(), ext)
+
+ try:
+ self.preserved_copy(fn, backupdest)
+ except (shutil.Error, IOError) as e:
+ self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, to_native(e)))
+
+ return backupdest
+
+ def cleanup(self, tmpfile):
+ if os.path.exists(tmpfile):
+ try:
+ os.unlink(tmpfile)
+ except OSError as e:
+ sys.stderr.write("could not cleanup %s: %s" % (tmpfile, to_native(e)))
+
+ def preserved_copy(self, src, dest):
+ """Copy a file with preserved ownership, permissions and context"""
+
+ # shutil.copy2(src, dst)
+ # Similar to shutil.copy(), but metadata is copied as well - in fact,
+ # this is just shutil.copy() followed by copystat(). This is similar
+ # to the Unix command cp -p.
+ #
+ # shutil.copystat(src, dst)
+ # Copy the permission bits, last access time, last modification time,
+ # and flags from src to dst. The file contents, owner, and group are
+ # unaffected. src and dst are path names given as strings.
+
+ shutil.copy2(src, dest)
+
+ # Set the context
+ if self.selinux_enabled():
+ context = self.selinux_context(src)
+ self.set_context_if_different(dest, context, False)
+
+ # chown it
+ try:
+ dest_stat = os.stat(src)
+ tmp_stat = os.stat(dest)
+ if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
+ os.chown(dest, dest_stat.st_uid, dest_stat.st_gid)
+ except OSError as e:
+ if e.errno != errno.EPERM:
+ raise
+
+ # Set the attributes
+ current_attribs = self.get_file_attributes(src)
+ current_attribs = current_attribs.get('attr_flags', '')
+ self.set_attributes_if_different(dest, current_attribs, True)
+
+ def atomic_move(self, src, dest, unsafe_writes=False):
+ '''atomically move src to dest, copying attributes from dest, returns true on success
+ it uses os.rename to ensure this as it is an atomic operation, rest of the function is
+ to work around limitations, corner cases and ensure selinux context is saved if possible'''
+ context = None
+ dest_stat = None
+ b_src = to_bytes(src, errors='surrogate_or_strict')
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+ if os.path.exists(b_dest):
+ try:
+ dest_stat = os.stat(b_dest)
+
+ # copy mode and ownership
+ os.chmod(b_src, dest_stat.st_mode & PERM_BITS)
+ os.chown(b_src, dest_stat.st_uid, dest_stat.st_gid)
+
+ # try to copy flags if possible
+ if hasattr(os, 'chflags') and hasattr(dest_stat, 'st_flags'):
+ try:
+ os.chflags(b_src, dest_stat.st_flags)
+ except OSError as e:
+ for err in 'EOPNOTSUPP', 'ENOTSUP':
+ if hasattr(errno, err) and e.errno == getattr(errno, err):
+ break
+ else:
+ raise
+ except OSError as e:
+ if e.errno != errno.EPERM:
+ raise
+ if self.selinux_enabled():
+ context = self.selinux_context(dest)
+ else:
+ if self.selinux_enabled():
+ context = self.selinux_default_context(dest)
+
+ creating = not os.path.exists(b_dest)
+
+ try:
+ # Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic.
+ os.rename(b_src, b_dest)
+ except (IOError, OSError) as e:
+ if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY, errno.EBUSY]:
+ # only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied)
+ # and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems
+ self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, to_native(e)),
+ exception=traceback.format_exc())
+ else:
+ # Use bytes here. In the shippable CI, this fails with
+ # a UnicodeError with surrogateescape'd strings for an unknown
+ # reason (doesn't happen in a local Ubuntu16.04 VM)
+ b_dest_dir = os.path.dirname(b_dest)
+ b_suffix = os.path.basename(b_dest)
+ error_msg = None
+ tmp_dest_name = None
+ try:
+ tmp_dest_fd, tmp_dest_name = tempfile.mkstemp(prefix=b'.ansible_tmp',
+ dir=b_dest_dir, suffix=b_suffix)
+ except (OSError, IOError) as e:
+ error_msg = 'The destination directory (%s) is not writable by the current user. Error was: %s' % (os.path.dirname(dest), to_native(e))
+ except TypeError:
+ # We expect that this is happening because python3.4.x and
+ # below can't handle byte strings in mkstemp(). Traceback
+ # would end in something like:
+ # file = _os.path.join(dir, pre + name + suf)
+ # TypeError: can't concat bytes to str
+ error_msg = ('Failed creating tmp file for atomic move. This usually happens when using Python3 less than Python3.5. '
+ 'Please use Python2.x or Python3.5 or greater.')
+ finally:
+ if error_msg:
+ if unsafe_writes:
+ self._unsafe_writes(b_src, b_dest)
+ else:
+ self.fail_json(msg=error_msg, exception=traceback.format_exc())
+
+ if tmp_dest_name:
+ b_tmp_dest_name = to_bytes(tmp_dest_name, errors='surrogate_or_strict')
+
+ try:
+ try:
+ # close tmp file handle before file operations to prevent text file busy errors on vboxfs synced folders (windows host)
+ os.close(tmp_dest_fd)
+ # leaves tmp file behind when sudo and not root
+ try:
+ shutil.move(b_src, b_tmp_dest_name)
+ except OSError:
+ # cleanup will happen by 'rm' of tmpdir
+ # copy2 will preserve some metadata
+ shutil.copy2(b_src, b_tmp_dest_name)
+
+ if self.selinux_enabled():
+ self.set_context_if_different(
+ b_tmp_dest_name, context, False)
+ try:
+ tmp_stat = os.stat(b_tmp_dest_name)
+ if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
+ os.chown(b_tmp_dest_name, dest_stat.st_uid, dest_stat.st_gid)
+ except OSError as e:
+ if e.errno != errno.EPERM:
+ raise
+ try:
+ os.rename(b_tmp_dest_name, b_dest)
+ except (shutil.Error, OSError, IOError) as e:
+ if unsafe_writes and e.errno == errno.EBUSY:
+ self._unsafe_writes(b_tmp_dest_name, b_dest)
+ else:
+ self.fail_json(msg='Unable to make %s into to %s, failed final rename from %s: %s' %
+ (src, dest, b_tmp_dest_name, to_native(e)),
+ exception=traceback.format_exc())
+ except (shutil.Error, OSError, IOError) as e:
+ self.fail_json(msg='Failed to replace file: %s to %s: %s' % (src, dest, to_native(e)),
+ exception=traceback.format_exc())
+ finally:
+ self.cleanup(b_tmp_dest_name)
+
+ if creating:
+ # Keep track of what files we create here with default permissions so later we can see if the permissions
+ # are explicitly set with a follow up call to set_mode_if_different().
+ #
+ # Only warn if the module accepts 'mode' parameter so the user can take action.
+ # If the module does not allow the user to set 'mode', then the warning is useless to the
+ # user since it provides no actionable information.
+ #
+ if self.argument_spec.get('mode') and self.params.get('mode') is None:
+ self._created_files.add(dest)
+
+ # make sure the file has the correct permissions
+ # based on the current value of umask
+ umask = os.umask(0)
+ os.umask(umask)
+ os.chmod(b_dest, DEFAULT_PERM & ~umask)
+ try:
+ os.chown(b_dest, os.geteuid(), os.getegid())
+ except OSError:
+ # We're okay with trying our best here. If the user is not
+ # root (or old Unices) they won't be able to chown.
+ pass
+
+ if self.selinux_enabled():
+ # rename might not preserve context
+ self.set_context_if_different(dest, context, False)
+
+ def _unsafe_writes(self, src, dest):
+ # sadly there are some situations where we cannot ensure atomicity, but only if
+ # the user insists and we get the appropriate error we update the file unsafely
+ try:
+ out_dest = in_src = None
+ try:
+ out_dest = open(dest, 'wb')
+ in_src = open(src, 'rb')
+ shutil.copyfileobj(in_src, out_dest)
+ finally: # assuring closed files in 2.4 compatible way
+ if out_dest:
+ out_dest.close()
+ if in_src:
+ in_src.close()
+ except (shutil.Error, OSError, IOError) as e:
+ self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, to_native(e)),
+ exception=traceback.format_exc())
+
+ def _clean_args(self, args):
+
+ if not self._clean:
+ # create a printable version of the command for use in reporting later,
+ # which strips out things like passwords from the args list
+ to_clean_args = args
+ if PY2:
+ if isinstance(args, text_type):
+ to_clean_args = to_bytes(args)
+ else:
+ if isinstance(args, binary_type):
+ to_clean_args = to_text(args)
+ if isinstance(args, (text_type, binary_type)):
+ to_clean_args = shlex.split(to_clean_args)
+
+ clean_args = []
+ is_passwd = False
+ for arg in (to_native(a) for a in to_clean_args):
+ if is_passwd:
+ is_passwd = False
+ clean_args.append('********')
+ continue
+ if PASSWD_ARG_RE.match(arg):
+ sep_idx = arg.find('=')
+ if sep_idx > -1:
+ clean_args.append('%s=********' % arg[:sep_idx])
+ continue
+ else:
+ is_passwd = True
+ arg = heuristic_log_sanitize(arg, self.no_log_values)
+ clean_args.append(arg)
+ self._clean = ' '.join(shlex_quote(arg) for arg in clean_args)
+
+ return self._clean
+
+ def _restore_signal_handlers(self):
+ # Reset SIGPIPE to SIG_DFL, otherwise in Python2.7 it gets ignored in subprocesses.
+ if PY2 and sys.platform != 'win32':
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+ def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None,
+ use_unsafe_shell=False, prompt_regex=None, environ_update=None, umask=None, encoding='utf-8', errors='surrogate_or_strict',
+ expand_user_and_vars=True, pass_fds=None, before_communicate_callback=None, ignore_invalid_cwd=True):
+ '''
+ Execute a command, returns rc, stdout, and stderr.
+
+ :arg args: is the command to run
+ * If args is a list, the command will be run with shell=False.
+ * If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
+ * If args is a string and use_unsafe_shell=True it runs with shell=True.
+ :kw check_rc: Whether to call fail_json in case of non zero RC.
+ Default False
+ :kw close_fds: See documentation for subprocess.Popen(). Default True
+ :kw executable: See documentation for subprocess.Popen(). Default None
+ :kw data: If given, information to write to the stdin of the command
+ :kw binary_data: If False, append a newline to the data. Default False
+ :kw path_prefix: If given, additional path to find the command in.
+ This adds to the PATH environment variable so helper commands in
+ the same directory can also be found
+ :kw cwd: If given, working directory to run the command inside
+ :kw use_unsafe_shell: See `args` parameter. Default False
+ :kw prompt_regex: Regex string (not a compiled regex) which can be
+ used to detect prompts in the stdout which would otherwise cause
+ the execution to hang (especially if no input data is specified)
+ :kw environ_update: dictionary to *update* os.environ with
+ :kw umask: Umask to be used when running the command. Default None
+ :kw encoding: Since we return native strings, on python3 we need to
+ know the encoding to use to transform from bytes to text. If you
+ want to always get bytes back, use encoding=None. The default is
+ "utf-8". This does not affect transformation of strings given as
+ args.
+ :kw errors: Since we return native strings, on python3 we need to
+ transform stdout and stderr from bytes to text. If the bytes are
+ undecodable in the ``encoding`` specified, then use this error
+ handler to deal with them. The default is ``surrogate_or_strict``
+ which means that the bytes will be decoded using the
+ surrogateescape error handler if available (available on all
+ python3 versions we support) otherwise a UnicodeError traceback
+ will be raised. This does not affect transformations of strings
+ given as args.
+ :kw expand_user_and_vars: When ``use_unsafe_shell=False`` this argument
+ dictates whether ``~`` is expanded in paths and environment variables
+ are expanded before running the command. When ``True`` a string such as
+ ``$SHELL`` will be expanded regardless of escaping. When ``False`` and
+ ``use_unsafe_shell=False`` no path or variable expansion will be done.
+ :kw pass_fds: When running on Python 3 this argument
+ dictates which file descriptors should be passed
+ to an underlying ``Popen`` constructor. On Python 2, this will
+ set ``close_fds`` to False.
+ :kw before_communicate_callback: This function will be called
+ after ``Popen`` object will be created
+ but before communicating to the process.
+ (``Popen`` object will be passed to callback as a first argument)
+ :kw ignore_invalid_cwd: This flag indicates whether an invalid ``cwd``
+ (non-existent or not a directory) should be ignored or should raise
+ an exception.
+ :returns: A 3-tuple of return code (integer), stdout (native string),
+ and stderr (native string). On python2, stdout and stderr are both
+ byte strings. On python3, stdout and stderr are text strings converted
+ according to the encoding and errors parameters. If you want byte
+ strings on python3, use encoding=None to turn decoding to text off.
+ '''
+ # used by clean args later on
+ self._clean = None
+
+ if not isinstance(args, (list, binary_type, text_type)):
+ msg = "Argument 'args' to run_command must be list or string"
+ self.fail_json(rc=257, cmd=args, msg=msg)
+
+ shell = False
+ if use_unsafe_shell:
+
+ # stringify args for unsafe/direct shell usage
+ if isinstance(args, list):
+ args = b" ".join([to_bytes(shlex_quote(x), errors='surrogate_or_strict') for x in args])
+ else:
+ args = to_bytes(args, errors='surrogate_or_strict')
+
+ # not set explicitly, check if set by controller
+ if executable:
+ executable = to_bytes(executable, errors='surrogate_or_strict')
+ args = [executable, b'-c', args]
+ elif self._shell not in (None, '/bin/sh'):
+ args = [to_bytes(self._shell, errors='surrogate_or_strict'), b'-c', args]
+ else:
+ shell = True
+ else:
+ # ensure args are a list
+ if isinstance(args, (binary_type, text_type)):
+ # On python2.6 and below, shlex has problems with text type
+ # On python3, shlex needs a text type.
+ if PY2:
+ args = to_bytes(args, errors='surrogate_or_strict')
+ elif PY3:
+ args = to_text(args, errors='surrogateescape')
+ args = shlex.split(args)
+
+ # expand ``~`` in paths, and all environment vars
+ if expand_user_and_vars:
+ args = [to_bytes(os.path.expanduser(os.path.expandvars(x)), errors='surrogate_or_strict') for x in args if x is not None]
+ else:
+ args = [to_bytes(x, errors='surrogate_or_strict') for x in args if x is not None]
+
+ prompt_re = None
+ if prompt_regex:
+ if isinstance(prompt_regex, text_type):
+ if PY3:
+ prompt_regex = to_bytes(prompt_regex, errors='surrogateescape')
+ elif PY2:
+ prompt_regex = to_bytes(prompt_regex, errors='surrogate_or_strict')
+ try:
+ prompt_re = re.compile(prompt_regex, re.MULTILINE)
+ except re.error:
+ self.fail_json(msg="invalid prompt regular expression given to run_command")
+
+ rc = 0
+ msg = None
+ st_in = None
+
+ # Manipulate the environ we'll send to the new process
+ old_env_vals = {}
+ # We can set this from both an attribute and per call
+ for key, val in self.run_command_environ_update.items():
+ old_env_vals[key] = os.environ.get(key, None)
+ os.environ[key] = val
+ if environ_update:
+ for key, val in environ_update.items():
+ old_env_vals[key] = os.environ.get(key, None)
+ os.environ[key] = val
+ if path_prefix:
+ old_env_vals['PATH'] = os.environ['PATH']
+ os.environ['PATH'] = "%s:%s" % (path_prefix, os.environ['PATH'])
+
+ # If using test-module.py and explode, the remote lib path will resemble:
+ # /tmp/test_module_scratch/debug_dir/ansible/module_utils/basic.py
+ # If using ansible or ansible-playbook with a remote system:
+ # /tmp/ansible_vmweLQ/ansible_modlib.zip/ansible/module_utils/basic.py
+
+ # Clean out python paths set by ansiballz
+ if 'PYTHONPATH' in os.environ:
+ pypaths = os.environ['PYTHONPATH'].split(':')
+ pypaths = [x for x in pypaths
+ if not x.endswith('/ansible_modlib.zip') and
+ not x.endswith('/debug_dir')]
+ os.environ['PYTHONPATH'] = ':'.join(pypaths)
+ if not os.environ['PYTHONPATH']:
+ del os.environ['PYTHONPATH']
+
+ if data:
+ st_in = subprocess.PIPE
+
+ kwargs = dict(
+ executable=executable,
+ shell=shell,
+ close_fds=close_fds,
+ stdin=st_in,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ preexec_fn=self._restore_signal_handlers,
+ )
+ if PY3 and pass_fds:
+ kwargs["pass_fds"] = pass_fds
+ elif PY2 and pass_fds:
+ kwargs['close_fds'] = False
+
+ # store the pwd
+ prev_dir = os.getcwd()
+
+ # make sure we're in the right working directory
+ if cwd:
+ if os.path.isdir(cwd):
+ cwd = to_bytes(os.path.abspath(os.path.expanduser(cwd)), errors='surrogate_or_strict')
+ kwargs['cwd'] = cwd
+ try:
+ os.chdir(cwd)
+ except (OSError, IOError) as e:
+ self.fail_json(rc=e.errno, msg="Could not chdir to %s, %s" % (cwd, to_native(e)),
+ exception=traceback.format_exc())
+ elif not ignore_invalid_cwd:
+ self.fail_json(msg="Provided cwd is not a valid directory: %s" % cwd)
+
+ old_umask = None
+ if umask:
+ old_umask = os.umask(umask)
+
+ try:
+ if self._debug:
+ self.log('Executing: ' + self._clean_args(args))
+ cmd = subprocess.Popen(args, **kwargs)
+ if before_communicate_callback:
+ before_communicate_callback(cmd)
+
+ # the communication logic here is essentially taken from that
+ # of the _communicate() function in ssh.py
+
+ stdout = b''
+ stderr = b''
+ try:
+ selector = selectors.DefaultSelector()
+ except OSError:
+ # Failed to detect default selector for the given platform
+ # Select PollSelector which is supported by major platforms
+ selector = selectors.PollSelector()
+
+ selector.register(cmd.stdout, selectors.EVENT_READ)
+ selector.register(cmd.stderr, selectors.EVENT_READ)
+ if os.name == 'posix':
+ fcntl.fcntl(cmd.stdout.fileno(), fcntl.F_SETFL, fcntl.fcntl(cmd.stdout.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
+ fcntl.fcntl(cmd.stderr.fileno(), fcntl.F_SETFL, fcntl.fcntl(cmd.stderr.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ if data:
+ if not binary_data:
+ data += '\n'
+ if isinstance(data, text_type):
+ data = to_bytes(data)
+ cmd.stdin.write(data)
+ cmd.stdin.close()
+
+ while True:
+ events = selector.select(1)
+ for key, event in events:
+ b_chunk = key.fileobj.read()
+ if b_chunk == b(''):
+ selector.unregister(key.fileobj)
+ if key.fileobj == cmd.stdout:
+ stdout += b_chunk
+ elif key.fileobj == cmd.stderr:
+ stderr += b_chunk
+ # if we're checking for prompts, do it now
+ if prompt_re:
+ if prompt_re.search(stdout) and not data:
+ if encoding:
+ stdout = to_native(stdout, encoding=encoding, errors=errors)
+ return (257, stdout, "A prompt was encountered while running a command, but no input data was specified")
+ # only break out if no pipes are left to read or
+ # the pipes are completely read and
+ # the process is terminated
+ if (not events or not selector.get_map()) and cmd.poll() is not None:
+ break
+ # No pipes are left to read but process is not yet terminated
+ # Only then it is safe to wait for the process to be finished
+ # NOTE: Actually cmd.poll() is always None here if no selectors are left
+ elif not selector.get_map() and cmd.poll() is None:
+ cmd.wait()
+ # The process is terminated. Since no pipes to read from are
+ # left, there is no need to call select() again.
+ break
+
+ cmd.stdout.close()
+ cmd.stderr.close()
+ selector.close()
+
+ rc = cmd.returncode
+ except (OSError, IOError) as e:
+ self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(e)))
+ self.fail_json(rc=e.errno, msg=to_native(e), cmd=self._clean_args(args))
+ except Exception as e:
+ self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(traceback.format_exc())))
+ self.fail_json(rc=257, msg=to_native(e), exception=traceback.format_exc(), cmd=self._clean_args(args))
+
+ # Restore env settings
+ for key, val in old_env_vals.items():
+ if val is None:
+ del os.environ[key]
+ else:
+ os.environ[key] = val
+
+ if old_umask:
+ os.umask(old_umask)
+
+ if rc != 0 and check_rc:
+ msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values)
+ self.fail_json(cmd=self._clean_args(args), rc=rc, stdout=stdout, stderr=stderr, msg=msg)
+
+ # reset the pwd
+ os.chdir(prev_dir)
+
+ if encoding is not None:
+ return (rc, to_native(stdout, encoding=encoding, errors=errors),
+ to_native(stderr, encoding=encoding, errors=errors))
+
+ return (rc, stdout, stderr)
+
+ def append_to_file(self, filename, str):
+ filename = os.path.expandvars(os.path.expanduser(filename))
+ fh = open(filename, 'a')
+ fh.write(str)
+ fh.close()
+
+ def bytes_to_human(self, size):
+ return bytes_to_human(size)
+
+ # for backwards compatibility
+ pretty_bytes = bytes_to_human
+
+ def human_to_bytes(self, number, isbits=False):
+ return human_to_bytes(number, isbits)
+
+ #
+ # Backwards compat
+ #
+
+ # In 2.0, moved from inside the module to the toplevel
+ is_executable = is_executable
+
+ @staticmethod
+ def get_buffer_size(fd):
+ try:
+ # 1032 == FZ_GETPIPE_SZ
+ buffer_size = fcntl.fcntl(fd, 1032)
+ except Exception:
+ try:
+ # not as exact as above, but should be good enough for most platforms that fail the previous call
+ buffer_size = select.PIPE_BUF
+ except Exception:
+ buffer_size = 9000 # use sane default JIC
+
+ return buffer_size
+
+
+def get_module_path():
+ return os.path.dirname(os.path.realpath(__file__))
diff --git a/lib/ansible/module_utils/common/__init__.py b/lib/ansible/module_utils/common/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/lib/ansible/module_utils/common/__init__.py
diff --git a/lib/ansible/module_utils/common/_collections_compat.py b/lib/ansible/module_utils/common/_collections_compat.py
new file mode 100644
index 00000000..3197eef6
--- /dev/null
+++ b/lib/ansible/module_utils/common/_collections_compat.py
@@ -0,0 +1,46 @@
+# Copyright (c), Sviatoslav Sydorenko <ssydoren@redhat.com> 2018
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+"""Collections ABC import shim.
+
+This module is intended only for internal use.
+It will go away once the bundled copy of six includes equivalent functionality.
+Third parties should not use this.
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+try:
+ """Python 3.3+ branch."""
+ from collections.abc import (
+ MappingView,
+ ItemsView,
+ KeysView,
+ ValuesView,
+ Mapping, MutableMapping,
+ Sequence, MutableSequence,
+ Set, MutableSet,
+ Container,
+ Hashable,
+ Sized,
+ Callable,
+ Iterable,
+ Iterator,
+ )
+except ImportError:
+ """Use old lib location under 2.6-3.2."""
+ from collections import (
+ MappingView,
+ ItemsView,
+ KeysView,
+ ValuesView,
+ Mapping, MutableMapping,
+ Sequence, MutableSequence,
+ Set, MutableSet,
+ Container,
+ Hashable,
+ Sized,
+ Callable,
+ Iterable,
+ Iterator,
+ )
diff --git a/lib/ansible/module_utils/common/_json_compat.py b/lib/ansible/module_utils/common/_json_compat.py
new file mode 100644
index 00000000..787af0ff
--- /dev/null
+++ b/lib/ansible/module_utils/common/_json_compat.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import types
+import json
+
+# Detect the python-json library which is incompatible
+try:
+ if not isinstance(json.loads, types.FunctionType) or not isinstance(json.dumps, types.FunctionType):
+ raise ImportError('json.loads or json.dumps were not found in the imported json library.')
+except AttributeError:
+ raise ImportError('python-json was detected, which is incompatible.')
diff --git a/lib/ansible/module_utils/common/_utils.py b/lib/ansible/module_utils/common/_utils.py
new file mode 100644
index 00000000..66df3167
--- /dev/null
+++ b/lib/ansible/module_utils/common/_utils.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2018, Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+"""
+Modules in _utils are waiting to find a better home. If you need to use them, be prepared for them
+to move to a different location in the future.
+"""
+
+
+def get_all_subclasses(cls):
+ '''
+ Recursively search and find all subclasses of a given class
+
+ :arg cls: A python class
+ :rtype: set
+ :returns: The set of python classes which are the subclasses of `cls`.
+
+ In python, you can use a class's :py:meth:`__subclasses__` method to determine what subclasses
+ of a class exist. However, `__subclasses__` only goes one level deep. This function searches
+ each child class's `__subclasses__` method to find all of the descendent classes. It then
+ returns an iterable of the descendent classes.
+ '''
+ # Retrieve direct subclasses
+ subclasses = set(cls.__subclasses__())
+ to_visit = list(subclasses)
+ # Then visit all subclasses
+ while to_visit:
+ for sc in to_visit:
+ # The current class is now visited, so remove it from list
+ to_visit.remove(sc)
+ # Appending all subclasses to visit and keep a reference of available class
+ for ssc in sc.__subclasses__():
+ if ssc not in subclasses:
+ to_visit.append(ssc)
+ subclasses.add(ssc)
+ return subclasses
diff --git a/lib/ansible/module_utils/common/collections.py b/lib/ansible/module_utils/common/collections.py
new file mode 100644
index 00000000..1b120b8d
--- /dev/null
+++ b/lib/ansible/module_utils/common/collections.py
@@ -0,0 +1,112 @@
+# Copyright: (c) 2018, Sviatoslav Sydorenko <ssydoren@redhat.com>
+# Copyright: (c) 2018, Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+"""Collection of low-level utility functions."""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+from ansible.module_utils.six import binary_type, text_type
+from ansible.module_utils.common._collections_compat import Hashable, Mapping, Sequence
+
+
+class ImmutableDict(Hashable, Mapping):
+ """Dictionary that cannot be updated"""
+ def __init__(self, *args, **kwargs):
+ self._store = dict(*args, **kwargs)
+
+ def __getitem__(self, key):
+ return self._store[key]
+
+ def __iter__(self):
+ return self._store.__iter__()
+
+ def __len__(self):
+ return self._store.__len__()
+
+ def __hash__(self):
+ return hash(frozenset(self.items()))
+
+ def __eq__(self, other):
+ try:
+ if self.__hash__() == hash(other):
+ return True
+ except TypeError:
+ pass
+
+ return False
+
+ def __repr__(self):
+ return 'ImmutableDict({0})'.format(repr(self._store))
+
+ def union(self, overriding_mapping):
+ """
+ Create an ImmutableDict as a combination of the original and overriding_mapping
+
+ :arg overriding_mapping: A Mapping of replacement and additional items
+ :return: A copy of the ImmutableDict with key-value pairs from the overriding_mapping added
+
+ If any of the keys in overriding_mapping are already present in the original ImmutableDict,
+ the overriding_mapping item replaces the one in the original ImmutableDict.
+ """
+ return ImmutableDict(self._store, **overriding_mapping)
+
+ def difference(self, subtractive_iterable):
+ """
+ Create an ImmutableDict as a combination of the original minus keys in subtractive_iterable
+
+ :arg subtractive_iterable: Any iterable containing keys that should not be present in the
+ new ImmutableDict
+ :return: A copy of the ImmutableDict with keys from the subtractive_iterable removed
+ """
+ remove_keys = frozenset(subtractive_iterable)
+ keys = (k for k in self._store.keys() if k not in remove_keys)
+ return ImmutableDict((k, self._store[k]) for k in keys)
+
+
+def is_string(seq):
+ """Identify whether the input has a string-like type (inclding bytes)."""
+ # AnsibleVaultEncryptedUnicode inherits from Sequence, but is expected to be a string like object
+ return isinstance(seq, (text_type, binary_type)) or getattr(seq, '__ENCRYPTED__', False)
+
+
+def is_iterable(seq, include_strings=False):
+ """Identify whether the input is an iterable."""
+ if not include_strings and is_string(seq):
+ return False
+
+ try:
+ iter(seq)
+ return True
+ except TypeError:
+ return False
+
+
+def is_sequence(seq, include_strings=False):
+ """Identify whether the input is a sequence.
+
+ Strings and bytes are not sequences here,
+ unless ``include_string`` is ``True``.
+
+ Non-indexable things are never of a sequence type.
+ """
+ if not include_strings and is_string(seq):
+ return False
+
+ return isinstance(seq, Sequence)
+
+
+def count(seq):
+ """Returns a dictionary with the number of appearances of each element of the iterable.
+
+ Resembles the collections.Counter class functionality. It is meant to be used when the
+ code is run on Python 2.6.* where collections.Counter is not available. It should be
+ deprecated and replaced when support for Python < 2.7 is dropped.
+ """
+ if not is_iterable(seq):
+ raise Exception('Argument provided is not an iterable')
+ counters = dict()
+ for elem in seq:
+ counters[elem] = counters.get(elem, 0) + 1
+ return counters
diff --git a/lib/ansible/module_utils/common/dict_transformations.py b/lib/ansible/module_utils/common/dict_transformations.py
new file mode 100644
index 00000000..831fbc53
--- /dev/null
+++ b/lib/ansible/module_utils/common/dict_transformations.py
@@ -0,0 +1,140 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+import re
+from copy import deepcopy
+
+
+def camel_dict_to_snake_dict(camel_dict, reversible=False, ignore_list=()):
+ """
+ reversible allows two way conversion of a camelized dict
+ such that snake_dict_to_camel_dict(camel_dict_to_snake_dict(x)) == x
+
+ This is achieved through mapping e.g. HTTPEndpoint to h_t_t_p_endpoint
+ where the default would be simply http_endpoint, which gets turned into
+ HttpEndpoint if recamelized.
+
+ ignore_list is used to avoid converting a sub-tree of a dict. This is
+ particularly important for tags, where keys are case-sensitive. We convert
+ the 'Tags' key but nothing below.
+ """
+
+ def value_is_list(camel_list):
+
+ checked_list = []
+ for item in camel_list:
+ if isinstance(item, dict):
+ checked_list.append(camel_dict_to_snake_dict(item, reversible))
+ elif isinstance(item, list):
+ checked_list.append(value_is_list(item))
+ else:
+ checked_list.append(item)
+
+ return checked_list
+
+ snake_dict = {}
+ for k, v in camel_dict.items():
+ if isinstance(v, dict) and k not in ignore_list:
+ snake_dict[_camel_to_snake(k, reversible=reversible)] = camel_dict_to_snake_dict(v, reversible)
+ elif isinstance(v, list) and k not in ignore_list:
+ snake_dict[_camel_to_snake(k, reversible=reversible)] = value_is_list(v)
+ else:
+ snake_dict[_camel_to_snake(k, reversible=reversible)] = v
+
+ return snake_dict
+
+
+def snake_dict_to_camel_dict(snake_dict, capitalize_first=False):
+ """
+ Perhaps unexpectedly, snake_dict_to_camel_dict returns dromedaryCase
+ rather than true CamelCase. Passing capitalize_first=True returns
+ CamelCase. The default remains False as that was the original implementation
+ """
+
+ def camelize(complex_type, capitalize_first=False):
+ if complex_type is None:
+ return
+ new_type = type(complex_type)()
+ if isinstance(complex_type, dict):
+ for key in complex_type:
+ new_type[_snake_to_camel(key, capitalize_first)] = camelize(complex_type[key], capitalize_first)
+ elif isinstance(complex_type, list):
+ for i in range(len(complex_type)):
+ new_type.append(camelize(complex_type[i], capitalize_first))
+ else:
+ return complex_type
+ return new_type
+
+ return camelize(snake_dict, capitalize_first)
+
+
+def _snake_to_camel(snake, capitalize_first=False):
+ if capitalize_first:
+ return ''.join(x.capitalize() or '_' for x in snake.split('_'))
+ else:
+ return snake.split('_')[0] + ''.join(x.capitalize() or '_' for x in snake.split('_')[1:])
+
+
+def _camel_to_snake(name, reversible=False):
+
+ def prepend_underscore_and_lower(m):
+ return '_' + m.group(0).lower()
+
+ if reversible:
+ upper_pattern = r'[A-Z]'
+ else:
+ # Cope with pluralized abbreviations such as TargetGroupARNs
+ # that would otherwise be rendered target_group_ar_ns
+ upper_pattern = r'[A-Z]{3,}s$'
+
+ s1 = re.sub(upper_pattern, prepend_underscore_and_lower, name)
+ # Handle when there was nothing before the plural_pattern
+ if s1.startswith("_") and not name.startswith("_"):
+ s1 = s1[1:]
+ if reversible:
+ return s1
+
+ # Remainder of solution seems to be https://stackoverflow.com/a/1176023
+ first_cap_pattern = r'(.)([A-Z][a-z]+)'
+ all_cap_pattern = r'([a-z0-9])([A-Z]+)'
+ s2 = re.sub(first_cap_pattern, r'\1_\2', s1)
+ return re.sub(all_cap_pattern, r'\1_\2', s2).lower()
+
+
+def dict_merge(a, b):
+ '''recursively merges dicts. not just simple a['key'] = b['key'], if
+ both a and b have a key whose value is a dict then dict_merge is called
+ on both values and the result stored in the returned dictionary.'''
+ if not isinstance(b, dict):
+ return b
+ result = deepcopy(a)
+ for k, v in b.items():
+ if k in result and isinstance(result[k], dict):
+ result[k] = dict_merge(result[k], v)
+ else:
+ result[k] = deepcopy(v)
+ return result
+
+
+def recursive_diff(dict1, dict2):
+ left = dict((k, v) for (k, v) in dict1.items() if k not in dict2)
+ right = dict((k, v) for (k, v) in dict2.items() if k not in dict1)
+ for k in (set(dict1.keys()) & set(dict2.keys())):
+ if isinstance(dict1[k], dict) and isinstance(dict2[k], dict):
+ result = recursive_diff(dict1[k], dict2[k])
+ if result:
+ left[k] = result[0]
+ right[k] = result[1]
+ elif dict1[k] != dict2[k]:
+ left[k] = dict1[k]
+ right[k] = dict2[k]
+ if left or right:
+ return left, right
+ else:
+ return None
diff --git a/lib/ansible/module_utils/common/file.py b/lib/ansible/module_utils/common/file.py
new file mode 100644
index 00000000..9703ea78
--- /dev/null
+++ b/lib/ansible/module_utils/common/file.py
@@ -0,0 +1,202 @@
+# Copyright (c) 2018, Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import errno
+import os
+import stat
+import re
+import pwd
+import grp
+import time
+import shutil
+import traceback
+import fcntl
+import sys
+
+from contextlib import contextmanager
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.six import b, binary_type
+
+try:
+ import selinux
+ HAVE_SELINUX = True
+except ImportError:
+ HAVE_SELINUX = False
+
+
+FILE_ATTRIBUTES = {
+ 'A': 'noatime',
+ 'a': 'append',
+ 'c': 'compressed',
+ 'C': 'nocow',
+ 'd': 'nodump',
+ 'D': 'dirsync',
+ 'e': 'extents',
+ 'E': 'encrypted',
+ 'h': 'blocksize',
+ 'i': 'immutable',
+ 'I': 'indexed',
+ 'j': 'journalled',
+ 'N': 'inline',
+ 's': 'zero',
+ 'S': 'synchronous',
+ 't': 'notail',
+ 'T': 'blockroot',
+ 'u': 'undelete',
+ 'X': 'compressedraw',
+ 'Z': 'compresseddirty',
+}
+
+
+# Used for parsing symbolic file perms
+MODE_OPERATOR_RE = re.compile(r'[+=-]')
+USERS_RE = re.compile(r'[^ugo]')
+PERMS_RE = re.compile(r'[^rwxXstugo]')
+
+
+_PERM_BITS = 0o7777 # file mode permission bits
+_EXEC_PERM_BITS = 0o0111 # execute permission bits
+_DEFAULT_PERM = 0o0666 # default file permission bits
+
+
+def is_executable(path):
+ # This function's signature needs to be repeated
+ # as the first line of its docstring.
+ # This method is reused by the basic module,
+ # the repetion helps the basic module's html documentation come out right.
+ # http://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#confval-autodoc_docstring_signature
+ '''is_executable(path)
+
+ is the given path executable?
+
+ :arg path: The path of the file to check.
+
+ Limitations:
+
+ * Does not account for FSACLs.
+ * Most times we really want to know "Can the current user execute this
+ file". This function does not tell us that, only if any execute bit is set.
+ '''
+ # These are all bitfields so first bitwise-or all the permissions we're
+ # looking for, then bitwise-and with the file's mode to determine if any
+ # execute bits are set.
+ return ((stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) & os.stat(path)[stat.ST_MODE])
+
+
+def format_attributes(attributes):
+ attribute_list = [FILE_ATTRIBUTES.get(attr) for attr in attributes if attr in FILE_ATTRIBUTES]
+ return attribute_list
+
+
+def get_flags_from_attributes(attributes):
+ flags = [key for key, attr in FILE_ATTRIBUTES.items() if attr in attributes]
+ return ''.join(flags)
+
+
+def get_file_arg_spec():
+ arg_spec = dict(
+ mode=dict(type='raw'),
+ owner=dict(),
+ group=dict(),
+ seuser=dict(),
+ serole=dict(),
+ selevel=dict(),
+ setype=dict(),
+ attributes=dict(aliases=['attr']),
+ )
+ return arg_spec
+
+
+class LockTimeout(Exception):
+ pass
+
+
+class FileLock:
+ '''
+ Currently FileLock is implemented via fcntl.flock on a lock file, however this
+ behaviour may change in the future. Avoid mixing lock types fcntl.flock,
+ fcntl.lockf and module_utils.common.file.FileLock as it will certainly cause
+ unwanted and/or unexpected behaviour
+ '''
+ def __init__(self):
+ self.lockfd = None
+
+ @contextmanager
+ def lock_file(self, path, tmpdir, lock_timeout=None):
+ '''
+ Context for lock acquisition
+ '''
+ try:
+ self.set_lock(path, tmpdir, lock_timeout)
+ yield
+ finally:
+ self.unlock()
+
+ def set_lock(self, path, tmpdir, lock_timeout=None):
+ '''
+ Create a lock file based on path with flock to prevent other processes
+ using given path.
+ Please note that currently file locking only works when it's executed by
+ the same user, I.E single user scenarios
+
+ :kw path: Path (file) to lock
+ :kw tmpdir: Path where to place the temporary .lock file
+ :kw lock_timeout:
+ Wait n seconds for lock acquisition, fail if timeout is reached.
+ 0 = Do not wait, fail if lock cannot be acquired immediately,
+ Default is None, wait indefinitely until lock is released.
+ :returns: True
+ '''
+ lock_path = os.path.join(tmpdir, 'ansible-{0}.lock'.format(os.path.basename(path)))
+ l_wait = 0.1
+ r_exception = IOError
+ if sys.version_info[0] == 3:
+ r_exception = BlockingIOError
+
+ self.lockfd = open(lock_path, 'w')
+
+ if lock_timeout <= 0:
+ fcntl.flock(self.lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ os.chmod(lock_path, stat.S_IWRITE | stat.S_IREAD)
+ return True
+
+ if lock_timeout:
+ e_secs = 0
+ while e_secs < lock_timeout:
+ try:
+ fcntl.flock(self.lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ os.chmod(lock_path, stat.S_IWRITE | stat.S_IREAD)
+ return True
+ except r_exception:
+ time.sleep(l_wait)
+ e_secs += l_wait
+ continue
+
+ self.lockfd.close()
+ raise LockTimeout('{0} sec'.format(lock_timeout))
+
+ fcntl.flock(self.lockfd, fcntl.LOCK_EX)
+ os.chmod(lock_path, stat.S_IWRITE | stat.S_IREAD)
+
+ return True
+
+ def unlock(self):
+ '''
+ Make sure lock file is available for everyone and Unlock the file descriptor
+ locked by set_lock
+
+ :returns: True
+ '''
+ if not self.lockfd:
+ return True
+
+ try:
+ fcntl.flock(self.lockfd, fcntl.LOCK_UN)
+ self.lockfd.close()
+ except ValueError: # file wasn't opened, let context manager fail gracefully
+ pass
+
+ return True
diff --git a/lib/ansible/module_utils/common/json.py b/lib/ansible/module_utils/common/json.py
new file mode 100644
index 00000000..866a8966
--- /dev/null
+++ b/lib/ansible/module_utils/common/json.py
@@ -0,0 +1,82 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+import datetime
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common._collections_compat import Mapping
+from ansible.module_utils.common.collections import is_sequence
+
+
+def _is_unsafe(value):
+ return getattr(value, '__UNSAFE__', False) and not getattr(value, '__ENCRYPTED__', False)
+
+
+def _is_vault(value):
+ return getattr(value, '__ENCRYPTED__', False)
+
+
+def _preprocess_unsafe_encode(value):
+ """Recursively preprocess a data structure converting instances of ``AnsibleUnsafe``
+ into their JSON dict representations
+
+ Used in ``AnsibleJSONEncoder.iterencode``
+ """
+ if _is_unsafe(value):
+ value = {'__ansible_unsafe': to_text(value, errors='surrogate_or_strict', nonstring='strict')}
+ elif is_sequence(value):
+ value = [_preprocess_unsafe_encode(v) for v in value]
+ elif isinstance(value, Mapping):
+ value = dict((k, _preprocess_unsafe_encode(v)) for k, v in value.items())
+
+ return value
+
+
+class AnsibleJSONEncoder(json.JSONEncoder):
+ '''
+ Simple encoder class to deal with JSON encoding of Ansible internal types
+ '''
+
+ def __init__(self, preprocess_unsafe=False, vault_to_text=False, **kwargs):
+ self._preprocess_unsafe = preprocess_unsafe
+ self._vault_to_text = vault_to_text
+ super(AnsibleJSONEncoder, self).__init__(**kwargs)
+
+ # NOTE: ALWAYS inform AWS/Tower when new items get added as they consume them downstream via a callback
+ def default(self, o):
+ if getattr(o, '__ENCRYPTED__', False):
+ # vault object
+ if self._vault_to_text:
+ value = to_text(o, errors='surrogate_or_strict')
+ else:
+ value = {'__ansible_vault': to_text(o._ciphertext, errors='surrogate_or_strict', nonstring='strict')}
+ elif getattr(o, '__UNSAFE__', False):
+ # unsafe object, this will never be triggered, see ``AnsibleJSONEncoder.iterencode``
+ value = {'__ansible_unsafe': to_text(o, errors='surrogate_or_strict', nonstring='strict')}
+ elif isinstance(o, Mapping):
+ # hostvars and other objects
+ value = dict(o)
+ elif isinstance(o, (datetime.date, datetime.datetime)):
+ # date object
+ value = o.isoformat()
+ else:
+ # use default encoder
+ value = super(AnsibleJSONEncoder, self).default(o)
+ return value
+
+ def iterencode(self, o, **kwargs):
+ """Custom iterencode, primarily design to handle encoding ``AnsibleUnsafe``
+ as the ``AnsibleUnsafe`` subclasses inherit from string types and
+ ``json.JSONEncoder`` does not support custom encoders for string types
+ """
+ if self._preprocess_unsafe:
+ o = _preprocess_unsafe_encode(o)
+
+ return super(AnsibleJSONEncoder, self).iterencode(o, **kwargs)
diff --git a/lib/ansible/module_utils/common/network.py b/lib/ansible/module_utils/common/network.py
new file mode 100644
index 00000000..9e1c1ab7
--- /dev/null
+++ b/lib/ansible/module_utils/common/network.py
@@ -0,0 +1,161 @@
+# Copyright (c) 2016 Red Hat Inc
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+# General networking tools that may be used by all modules
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+from struct import pack
+from socket import inet_ntoa
+
+from ansible.module_utils.six.moves import zip
+
+
+VALID_MASKS = [2**8 - 2**i for i in range(0, 9)]
+
+
+def is_netmask(val):
+ parts = str(val).split('.')
+ if not len(parts) == 4:
+ return False
+ for part in parts:
+ try:
+ if int(part) not in VALID_MASKS:
+ raise ValueError
+ except ValueError:
+ return False
+ return True
+
+
+def is_masklen(val):
+ try:
+ return 0 <= int(val) <= 32
+ except ValueError:
+ return False
+
+
+def to_netmask(val):
+ """ converts a masklen to a netmask """
+ if not is_masklen(val):
+ raise ValueError('invalid value for masklen')
+
+ bits = 0
+ for i in range(32 - int(val), 32):
+ bits |= (1 << i)
+
+ return inet_ntoa(pack('>I', bits))
+
+
+def to_masklen(val):
+ """ converts a netmask to a masklen """
+ if not is_netmask(val):
+ raise ValueError('invalid value for netmask: %s' % val)
+
+ bits = list()
+ for x in val.split('.'):
+ octet = bin(int(x)).count('1')
+ bits.append(octet)
+
+ return sum(bits)
+
+
+def to_subnet(addr, mask, dotted_notation=False):
+ """ coverts an addr / mask pair to a subnet in cidr notation """
+ try:
+ if not is_masklen(mask):
+ raise ValueError
+ cidr = int(mask)
+ mask = to_netmask(mask)
+ except ValueError:
+ cidr = to_masklen(mask)
+
+ addr = addr.split('.')
+ mask = mask.split('.')
+
+ network = list()
+ for s_addr, s_mask in zip(addr, mask):
+ network.append(str(int(s_addr) & int(s_mask)))
+
+ if dotted_notation:
+ return '%s %s' % ('.'.join(network), to_netmask(cidr))
+ return '%s/%s' % ('.'.join(network), cidr)
+
+
+def to_ipv6_subnet(addr):
+ """ IPv6 addresses are eight groupings. The first four groupings (64 bits) comprise the subnet address. """
+
+ # https://tools.ietf.org/rfc/rfc2374.txt
+
+ # Split by :: to identify omitted zeros
+ ipv6_prefix = addr.split('::')[0]
+
+ # Get the first four groups, or as many as are found + ::
+ found_groups = []
+ for group in ipv6_prefix.split(':'):
+ found_groups.append(group)
+ if len(found_groups) == 4:
+ break
+ if len(found_groups) < 4:
+ found_groups.append('::')
+
+ # Concatenate network address parts
+ network_addr = ''
+ for group in found_groups:
+ if group != '::':
+ network_addr += str(group)
+ network_addr += str(':')
+
+ # Ensure network address ends with ::
+ if not network_addr.endswith('::'):
+ network_addr += str(':')
+ return network_addr
+
+
+def to_ipv6_network(addr):
+ """ IPv6 addresses are eight groupings. The first three groupings (48 bits) comprise the network address. """
+
+ # Split by :: to identify omitted zeros
+ ipv6_prefix = addr.split('::')[0]
+
+ # Get the first three groups, or as many as are found + ::
+ found_groups = []
+ for group in ipv6_prefix.split(':'):
+ found_groups.append(group)
+ if len(found_groups) == 3:
+ break
+ if len(found_groups) < 3:
+ found_groups.append('::')
+
+ # Concatenate network address parts
+ network_addr = ''
+ for group in found_groups:
+ if group != '::':
+ network_addr += str(group)
+ network_addr += str(':')
+
+ # Ensure network address ends with ::
+ if not network_addr.endswith('::'):
+ network_addr += str(':')
+ return network_addr
+
+
+def to_bits(val):
+ """ converts a netmask to bits """
+ bits = ''
+ for octet in val.split('.'):
+ bits += bin(int(octet))[2:].zfill(8)
+ return str
+
+
+def is_mac(mac_address):
+ """
+ Validate MAC address for given string
+ Args:
+ mac_address: string to validate as MAC address
+
+ Returns: (Boolean) True if string is valid MAC address, otherwise False
+ """
+ mac_addr_regex = re.compile('[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$')
+ return bool(mac_addr_regex.match(mac_address.lower()))
diff --git a/lib/ansible/module_utils/common/parameters.py b/lib/ansible/module_utils/common/parameters.py
new file mode 100644
index 00000000..4cf631e1
--- /dev/null
+++ b/lib/ansible/module_utils/common/parameters.py
@@ -0,0 +1,197 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common._collections_compat import Mapping
+from ansible.module_utils.common.collections import is_iterable
+from ansible.module_utils.common.validation import check_type_dict
+
+from ansible.module_utils.six import (
+ binary_type,
+ integer_types,
+ string_types,
+ text_type,
+)
+
+# Python2 & 3 way to get NoneType
+NoneType = type(None)
+
+# if adding boolean attribute, also add to PASS_BOOL
+# some of this dupes defaults from controller config
+PASS_VARS = {
+ 'check_mode': ('check_mode', False),
+ 'debug': ('_debug', False),
+ 'diff': ('_diff', False),
+ 'keep_remote_files': ('_keep_remote_files', False),
+ 'module_name': ('_name', None),
+ 'no_log': ('no_log', False),
+ 'remote_tmp': ('_remote_tmp', None),
+ 'selinux_special_fs': ('_selinux_special_fs', ['fuse', 'nfs', 'vboxsf', 'ramfs', '9p', 'vfat']),
+ 'shell_executable': ('_shell', '/bin/sh'),
+ 'socket': ('_socket_path', None),
+ 'string_conversion_action': ('_string_conversion_action', 'warn'),
+ 'syslog_facility': ('_syslog_facility', 'INFO'),
+ 'tmpdir': ('_tmpdir', None),
+ 'verbosity': ('_verbosity', 0),
+ 'version': ('ansible_version', '0.0'),
+}
+
+PASS_BOOLS = ('check_mode', 'debug', 'diff', 'keep_remote_files', 'no_log')
+
+
+def _return_datastructure_name(obj):
+ """ Return native stringified values from datastructures.
+
+ For use with removing sensitive values pre-jsonification."""
+ if isinstance(obj, (text_type, binary_type)):
+ if obj:
+ yield to_native(obj, errors='surrogate_or_strict')
+ return
+ elif isinstance(obj, Mapping):
+ for element in obj.items():
+ for subelement in _return_datastructure_name(element[1]):
+ yield subelement
+ elif is_iterable(obj):
+ for element in obj:
+ for subelement in _return_datastructure_name(element):
+ yield subelement
+ elif isinstance(obj, (bool, NoneType)):
+ # This must come before int because bools are also ints
+ return
+ elif isinstance(obj, tuple(list(integer_types) + [float])):
+ yield to_native(obj, nonstring='simplerepr')
+ else:
+ raise TypeError('Unknown parameter type: %s' % (type(obj)))
+
+
+def list_no_log_values(argument_spec, params):
+ """Return set of no log values
+
+ :arg argument_spec: An argument spec dictionary from a module
+ :arg params: Dictionary of all module parameters
+
+ :returns: Set of strings that should be hidden from output::
+
+ {'secret_dict_value', 'secret_list_item_one', 'secret_list_item_two', 'secret_string'}
+ """
+
+ no_log_values = set()
+ for arg_name, arg_opts in argument_spec.items():
+ if arg_opts.get('no_log', False):
+ # Find the value for the no_log'd param
+ no_log_object = params.get(arg_name, None)
+
+ if no_log_object:
+ try:
+ no_log_values.update(_return_datastructure_name(no_log_object))
+ except TypeError as e:
+ raise TypeError('Failed to convert "%s": %s' % (arg_name, to_native(e)))
+
+ # Get no_log values from suboptions
+ sub_argument_spec = arg_opts.get('options')
+ if sub_argument_spec is not None:
+ wanted_type = arg_opts.get('type')
+ sub_parameters = params.get(arg_name)
+
+ if sub_parameters is not None:
+ if wanted_type == 'dict' or (wanted_type == 'list' and arg_opts.get('elements', '') == 'dict'):
+ # Sub parameters can be a dict or list of dicts. Ensure parameters are always a list.
+ if not isinstance(sub_parameters, list):
+ sub_parameters = [sub_parameters]
+
+ for sub_param in sub_parameters:
+ # Validate dict fields in case they came in as strings
+
+ if isinstance(sub_param, string_types):
+ sub_param = check_type_dict(sub_param)
+
+ if not isinstance(sub_param, Mapping):
+ raise TypeError("Value '{1}' in the sub parameter field '{0}' must by a {2}, "
+ "not '{1.__class__.__name__}'".format(arg_name, sub_param, wanted_type))
+
+ no_log_values.update(list_no_log_values(sub_argument_spec, sub_param))
+
+ return no_log_values
+
+
+def list_deprecations(argument_spec, params, prefix=''):
+ """Return a list of deprecations
+
+ :arg argument_spec: An argument spec dictionary from a module
+ :arg params: Dictionary of all module parameters
+
+ :returns: List of dictionaries containing a message and version in which
+ the deprecated parameter will be removed, or an empty list::
+
+ [{'msg': "Param 'deptest' is deprecated. See the module docs for more information", 'version': '2.9'}]
+ """
+
+ deprecations = []
+ for arg_name, arg_opts in argument_spec.items():
+ if arg_name in params:
+ if prefix:
+ sub_prefix = '%s["%s"]' % (prefix, arg_name)
+ else:
+ sub_prefix = arg_name
+ if arg_opts.get('removed_at_date') is not None:
+ deprecations.append({
+ 'msg': "Param '%s' is deprecated. See the module docs for more information" % sub_prefix,
+ 'date': arg_opts.get('removed_at_date'),
+ 'collection_name': arg_opts.get('removed_from_collection'),
+ })
+ elif arg_opts.get('removed_in_version') is not None:
+ deprecations.append({
+ 'msg': "Param '%s' is deprecated. See the module docs for more information" % sub_prefix,
+ 'version': arg_opts.get('removed_in_version'),
+ 'collection_name': arg_opts.get('removed_from_collection'),
+ })
+ # Check sub-argument spec
+ sub_argument_spec = arg_opts.get('options')
+ if sub_argument_spec is not None:
+ sub_arguments = params[arg_name]
+ if isinstance(sub_arguments, Mapping):
+ sub_arguments = [sub_arguments]
+ if isinstance(sub_arguments, list):
+ for sub_params in sub_arguments:
+ if isinstance(sub_params, Mapping):
+ deprecations.extend(list_deprecations(sub_argument_spec, sub_params, prefix=sub_prefix))
+
+ return deprecations
+
+
+def handle_aliases(argument_spec, params, alias_warnings=None):
+ """Return a two item tuple. The first is a dictionary of aliases, the second is
+ a list of legal inputs.
+
+ If a list is provided to the alias_warnings parameter, it will be filled with tuples
+ (option, alias) in every case where both an option and its alias are specified.
+ """
+
+ legal_inputs = ['_ansible_%s' % k for k in PASS_VARS]
+ aliases_results = {} # alias:canon
+
+ for (k, v) in argument_spec.items():
+ legal_inputs.append(k)
+ aliases = v.get('aliases', None)
+ default = v.get('default', None)
+ required = v.get('required', False)
+ if default is not None and required:
+ # not alias specific but this is a good place to check this
+ raise ValueError("internal error: required and default are mutually exclusive for %s" % k)
+ if aliases is None:
+ continue
+ if not is_iterable(aliases) or isinstance(aliases, (binary_type, text_type)):
+ raise TypeError('internal error: aliases must be a list or tuple')
+ for alias in aliases:
+ legal_inputs.append(alias)
+ aliases_results[alias] = k
+ if alias in params:
+ if k in params and alias_warnings is not None:
+ alias_warnings.append((k, alias))
+ params[k] = params[alias]
+
+ return aliases_results, legal_inputs
diff --git a/lib/ansible/module_utils/common/process.py b/lib/ansible/module_utils/common/process.py
new file mode 100644
index 00000000..91e818a0
--- /dev/null
+++ b/lib/ansible/module_utils/common/process.py
@@ -0,0 +1,44 @@
+# Copyright (c) 2018, Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.module_utils.common.file import is_executable
+
+
+def get_bin_path(arg, opt_dirs=None, required=None):
+ '''
+ Find system executable in PATH. Raises ValueError if executable is not found.
+ Optional arguments:
+ - required: [Deprecated] Prior to 2.10, if executable is not found and required is true it raises an Exception.
+ In 2.10 and later, an Exception is always raised. This parameter will be removed in 2.14.
+ - opt_dirs: optional list of directories to search in addition to PATH
+ If found return full path, otherwise raise ValueError.
+ '''
+ opt_dirs = [] if opt_dirs is None else opt_dirs
+
+ sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
+ paths = []
+ for d in opt_dirs:
+ if d is not None and os.path.exists(d):
+ paths.append(d)
+ paths += os.environ.get('PATH', '').split(os.pathsep)
+ bin_path = None
+ # mangle PATH to include /sbin dirs
+ for p in sbin_paths:
+ if p not in paths and os.path.exists(p):
+ paths.append(p)
+ for d in paths:
+ if not d:
+ continue
+ path = os.path.join(d, arg)
+ if os.path.exists(path) and not os.path.isdir(path) and is_executable(path):
+ bin_path = path
+ break
+ if bin_path is None:
+ raise ValueError('Failed to find required executable %s in paths: %s' % (arg, os.pathsep.join(paths)))
+
+ return bin_path
diff --git a/lib/ansible/module_utils/common/removed.py b/lib/ansible/module_utils/common/removed.py
new file mode 100644
index 00000000..45725b0f
--- /dev/null
+++ b/lib/ansible/module_utils/common/removed.py
@@ -0,0 +1,63 @@
+# Copyright (c) 2018, Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import sys
+
+from ansible.module_utils._text import to_native
+
+
+def removed_module(removed_in, msg='This module has been removed. The module documentation for'
+ ' Ansible-%(version)s may contain hints for porting'):
+ """
+ This function is deprecated and should not be used. Instead the module should just be
+ actually removed. This function is scheduled for removal for the 2.12 release.
+
+ Returns module failure along with a message about the module being removed
+
+ :arg removed_in: The version that the module was removed in
+ :kwarg msg: Message to use in the module's failure message. The default says that the module
+ has been removed and what version of the Ansible documentation to search for porting help.
+
+ Remove the actual code and instead have boilerplate like this::
+
+ from ansible.module_utils.common.removed import removed_module
+
+ if __name__ == '__main__':
+ removed_module("2.4")
+ """
+ results = {
+ 'failed': True,
+ 'deprecations': [
+ {
+ 'msg': 'The removed_module function is deprecated',
+ 'version': '2.12',
+ },
+ ]
+ }
+
+ # Convert numbers into strings
+ removed_in = to_native(removed_in)
+
+ version = removed_in.split('.')
+ try:
+ numeric_minor = int(version[-1])
+ except Exception:
+ last_version = None
+ else:
+ version = version[:-1]
+ version.append(to_native(numeric_minor - 1))
+ last_version = '.'.join(version)
+
+ if last_version is None:
+ results['warnings'] = ['removed modules should specify the version they were removed in']
+ results['msg'] = 'This module has been removed'
+ else:
+ results['msg'] = msg % {'version': last_version}
+
+ print('\n{0}\n'.format(json.dumps(results)))
+ sys.exit(1)
diff --git a/lib/ansible/module_utils/common/sys_info.py b/lib/ansible/module_utils/common/sys_info.py
new file mode 100644
index 00000000..f0f4e99b
--- /dev/null
+++ b/lib/ansible/module_utils/common/sys_info.py
@@ -0,0 +1,159 @@
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
+# Copyright (c), Toshio Kuratomi <tkuratomi@ansible.com> 2016
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import platform
+
+from ansible.module_utils import distro
+from ansible.module_utils.common._utils import get_all_subclasses
+
+
+__all__ = ('get_distribution', 'get_distribution_version', 'get_platform_subclass')
+
+
+def get_distribution():
+ '''
+ Return the name of the distribution the module is running on
+
+ :rtype: NativeString or None
+ :returns: Name of the distribution the module is running on
+
+ This function attempts to determine what Linux distribution the code is running on and return
+ a string representing that value. If the distribution cannot be determined, it returns
+ ``OtherLinux``. If not run on Linux it returns None.
+ '''
+ distribution = None
+
+ if platform.system() == 'Linux':
+ distribution = distro.id().capitalize()
+
+ if distribution == 'Amzn':
+ distribution = 'Amazon'
+ elif distribution == 'Rhel':
+ distribution = 'Redhat'
+ elif not distribution:
+ distribution = 'OtherLinux'
+
+ return distribution
+
+
+def get_distribution_version():
+ '''
+ Get the version of the Linux distribution the code is running on
+
+ :rtype: NativeString or None
+ :returns: A string representation of the version of the distribution. If it cannot determine
+ the version, it returns empty string. If this is not run on a Linux machine it returns None
+ '''
+ version = None
+
+ needs_best_version = frozenset((
+ u'centos',
+ u'debian',
+ ))
+
+ if platform.system() == 'Linux':
+ version = distro.version()
+ distro_id = distro.id()
+
+ if version is not None:
+ if distro_id in needs_best_version:
+ version_best = distro.version(best=True)
+
+ # CentoOS maintainers believe only the major version is appropriate
+ # but Ansible users desire minor version information, e.g., 7.5.
+ # https://github.com/ansible/ansible/issues/50141#issuecomment-449452781
+ if distro_id == u'centos':
+ version = u'.'.join(version_best.split(u'.')[:2])
+
+ # Debian does not include minor version in /etc/os-release.
+ # Bug report filed upstream requesting this be added to /etc/os-release
+ # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=931197
+ if distro_id == u'debian':
+ version = version_best
+
+ else:
+ version = u''
+
+ return version
+
+
+def get_distribution_codename():
+ '''
+ Return the code name for this Linux Distribution
+
+ :rtype: NativeString or None
+ :returns: A string representation of the distribution's codename or None if not a Linux distro
+ '''
+ codename = None
+ if platform.system() == 'Linux':
+ # Until this gets merged and we update our bundled copy of distro:
+ # https://github.com/nir0s/distro/pull/230
+ # Fixes Fedora 28+ not having a code name and Ubuntu Xenial Xerus needing to be "xenial"
+ os_release_info = distro.os_release_info()
+ codename = os_release_info.get('version_codename')
+
+ if codename is None:
+ codename = os_release_info.get('ubuntu_codename')
+
+ if codename is None and distro.id() == 'ubuntu':
+ lsb_release_info = distro.lsb_release_info()
+ codename = lsb_release_info.get('codename')
+
+ if codename is None:
+ codename = distro.codename()
+ if codename == u'':
+ codename = None
+
+ return codename
+
+
+def get_platform_subclass(cls):
+ '''
+ Finds a subclass implementing desired functionality on the platform the code is running on
+
+ :arg cls: Class to find an appropriate subclass for
+ :returns: A class that implements the functionality on this platform
+
+ Some Ansible modules have different implementations depending on the platform they run on. This
+ function is used to select between the various implementations and choose one. You can look at
+ the implementation of the Ansible :ref:`User module<user_module>` module for an example of how to use this.
+
+ This function replaces ``basic.load_platform_subclass()``. When you port code, you need to
+ change the callers to be explicit about instantiating the class. For instance, code in the
+ Ansible User module changed from::
+
+ .. code-block:: python
+
+ # Old
+ class User:
+ def __new__(cls, args, kwargs):
+ return load_platform_subclass(User, args, kwargs)
+
+ # New
+ class User:
+ def __new__(cls, *args, **kwargs):
+ new_cls = get_platform_subclass(User)
+ return super(cls, new_cls).__new__(new_cls)
+ '''
+
+ this_platform = platform.system()
+ distribution = get_distribution()
+ subclass = None
+
+ # get the most specific superclass for this platform
+ if distribution is not None:
+ for sc in get_all_subclasses(cls):
+ if sc.distribution is not None and sc.distribution == distribution and sc.platform == this_platform:
+ subclass = sc
+ if subclass is None:
+ for sc in get_all_subclasses(cls):
+ if sc.platform == this_platform and sc.distribution is None:
+ subclass = sc
+ if subclass is None:
+ subclass = cls
+
+ return subclass
diff --git a/lib/ansible/module_utils/common/text/__init__.py b/lib/ansible/module_utils/common/text/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/lib/ansible/module_utils/common/text/__init__.py
diff --git a/lib/ansible/module_utils/common/text/converters.py b/lib/ansible/module_utils/common/text/converters.py
new file mode 100644
index 00000000..b3fe99a0
--- /dev/null
+++ b/lib/ansible/module_utils/common/text/converters.py
@@ -0,0 +1,322 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# (c) 2016 Toshio Kuratomi <tkuratomi@ansible.com>
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import codecs
+import datetime
+import json
+
+from ansible.module_utils.common._collections_compat import Set
+from ansible.module_utils.six import (
+ PY3,
+ binary_type,
+ iteritems,
+ text_type,
+)
+
+try:
+ codecs.lookup_error('surrogateescape')
+ HAS_SURROGATEESCAPE = True
+except LookupError:
+ HAS_SURROGATEESCAPE = False
+
+
+_COMPOSED_ERROR_HANDLERS = frozenset((None, 'surrogate_or_replace',
+ 'surrogate_or_strict',
+ 'surrogate_then_replace'))
+
+
+def to_bytes(obj, encoding='utf-8', errors=None, nonstring='simplerepr'):
+ """Make sure that a string is a byte string
+
+ :arg obj: An object to make sure is a byte string. In most cases this
+ will be either a text string or a byte string. However, with
+ ``nonstring='simplerepr'``, this can be used as a traceback-free
+ version of ``str(obj)``.
+ :kwarg encoding: The encoding to use to transform from a text string to
+ a byte string. Defaults to using 'utf-8'.
+ :kwarg errors: The error handler to use if the text string is not
+ encodable using the specified encoding. Any valid `codecs error
+ handler <https://docs.python.org/2/library/codecs.html#codec-base-classes>`_
+ may be specified. There are three additional error strategies
+ specifically aimed at helping people to port code. The first two are:
+
+ :surrogate_or_strict: Will use ``surrogateescape`` if it is a valid
+ handler, otherwise it will use ``strict``
+ :surrogate_or_replace: Will use ``surrogateescape`` if it is a valid
+ handler, otherwise it will use ``replace``.
+
+ Because ``surrogateescape`` was added in Python3 this usually means that
+ Python3 will use ``surrogateescape`` and Python2 will use the fallback
+ error handler. Note that the code checks for ``surrogateescape`` when the
+ module is imported. If you have a backport of ``surrogateescape`` for
+ Python2, be sure to register the error handler prior to importing this
+ module.
+
+ The last error handler is:
+
+ :surrogate_then_replace: Will use ``surrogateescape`` if it is a valid
+ handler. If encoding with ``surrogateescape`` would traceback,
+ surrogates are first replaced with a replacement characters
+ and then the string is encoded using ``replace`` (which replaces
+ the rest of the nonencodable bytes). If ``surrogateescape`` is
+ not present it will simply use ``replace``. (Added in Ansible 2.3)
+ This strategy is designed to never traceback when it attempts
+ to encode a string.
+
+ The default until Ansible-2.2 was ``surrogate_or_replace``
+ From Ansible-2.3 onwards, the default is ``surrogate_then_replace``.
+
+ :kwarg nonstring: The strategy to use if a nonstring is specified in
+ ``obj``. Default is 'simplerepr'. Valid values are:
+
+ :simplerepr: The default. This takes the ``str`` of the object and
+ then returns the bytes version of that string.
+ :empty: Return an empty byte string
+ :passthru: Return the object passed in
+ :strict: Raise a :exc:`TypeError`
+
+ :returns: Typically this returns a byte string. If a nonstring object is
+ passed in this may be a different type depending on the strategy
+ specified by nonstring. This will never return a text string.
+
+ .. note:: If passed a byte string, this function does not check that the
+ string is valid in the specified encoding. If it's important that the
+ byte string is in the specified encoding do::
+
+ encoded_string = to_bytes(to_text(input_string, 'latin-1'), 'utf-8')
+
+ .. version_changed:: 2.3
+
+ Added the ``surrogate_then_replace`` error handler and made it the default error handler.
+ """
+ if isinstance(obj, binary_type):
+ return obj
+
+ # We're given a text string
+ # If it has surrogates, we know because it will decode
+ original_errors = errors
+ if errors in _COMPOSED_ERROR_HANDLERS:
+ if HAS_SURROGATEESCAPE:
+ errors = 'surrogateescape'
+ elif errors == 'surrogate_or_strict':
+ errors = 'strict'
+ else:
+ errors = 'replace'
+
+ if isinstance(obj, text_type):
+ try:
+ # Try this first as it's the fastest
+ return obj.encode(encoding, errors)
+ except UnicodeEncodeError:
+ if original_errors in (None, 'surrogate_then_replace'):
+ # We should only reach this if encoding was non-utf8 original_errors was
+ # surrogate_then_escape and errors was surrogateescape
+
+ # Slow but works
+ return_string = obj.encode('utf-8', 'surrogateescape')
+ return_string = return_string.decode('utf-8', 'replace')
+ return return_string.encode(encoding, 'replace')
+ raise
+
+ # Note: We do these last even though we have to call to_bytes again on the
+ # value because we're optimizing the common case
+ if nonstring == 'simplerepr':
+ try:
+ value = str(obj)
+ except UnicodeError:
+ try:
+ value = repr(obj)
+ except UnicodeError:
+ # Giving up
+ return to_bytes('')
+ elif nonstring == 'passthru':
+ return obj
+ elif nonstring == 'empty':
+ # python2.4 doesn't have b''
+ return to_bytes('')
+ elif nonstring == 'strict':
+ raise TypeError('obj must be a string type')
+ else:
+ raise TypeError('Invalid value %s for to_bytes\' nonstring parameter' % nonstring)
+
+ return to_bytes(value, encoding, errors)
+
+
+def to_text(obj, encoding='utf-8', errors=None, nonstring='simplerepr'):
+ """Make sure that a string is a text string
+
+ :arg obj: An object to make sure is a text string. In most cases this
+ will be either a text string or a byte string. However, with
+ ``nonstring='simplerepr'``, this can be used as a traceback-free
+ version of ``str(obj)``.
+ :kwarg encoding: The encoding to use to transform from a byte string to
+ a text string. Defaults to using 'utf-8'.
+ :kwarg errors: The error handler to use if the byte string is not
+ decodable using the specified encoding. Any valid `codecs error
+ handler <https://docs.python.org/2/library/codecs.html#codec-base-classes>`_
+ may be specified. We support three additional error strategies
+ specifically aimed at helping people to port code:
+
+ :surrogate_or_strict: Will use surrogateescape if it is a valid
+ handler, otherwise it will use strict
+ :surrogate_or_replace: Will use surrogateescape if it is a valid
+ handler, otherwise it will use replace.
+ :surrogate_then_replace: Does the same as surrogate_or_replace but
+ `was added for symmetry with the error handlers in
+ :func:`ansible.module_utils._text.to_bytes` (Added in Ansible 2.3)
+
+ Because surrogateescape was added in Python3 this usually means that
+ Python3 will use `surrogateescape` and Python2 will use the fallback
+ error handler. Note that the code checks for surrogateescape when the
+ module is imported. If you have a backport of `surrogateescape` for
+ python2, be sure to register the error handler prior to importing this
+ module.
+
+ The default until Ansible-2.2 was `surrogate_or_replace`
+ In Ansible-2.3 this defaults to `surrogate_then_replace` for symmetry
+ with :func:`ansible.module_utils._text.to_bytes` .
+ :kwarg nonstring: The strategy to use if a nonstring is specified in
+ ``obj``. Default is 'simplerepr'. Valid values are:
+
+ :simplerepr: The default. This takes the ``str`` of the object and
+ then returns the text version of that string.
+ :empty: Return an empty text string
+ :passthru: Return the object passed in
+ :strict: Raise a :exc:`TypeError`
+
+ :returns: Typically this returns a text string. If a nonstring object is
+ passed in this may be a different type depending on the strategy
+ specified by nonstring. This will never return a byte string.
+ From Ansible-2.3 onwards, the default is `surrogate_then_replace`.
+
+ .. version_changed:: 2.3
+
+ Added the surrogate_then_replace error handler and made it the default error handler.
+ """
+ if isinstance(obj, text_type):
+ return obj
+
+ if errors in _COMPOSED_ERROR_HANDLERS:
+ if HAS_SURROGATEESCAPE:
+ errors = 'surrogateescape'
+ elif errors == 'surrogate_or_strict':
+ errors = 'strict'
+ else:
+ errors = 'replace'
+
+ if isinstance(obj, binary_type):
+ # Note: We don't need special handling for surrogate_then_replace
+ # because all bytes will either be made into surrogates or are valid
+ # to decode.
+ return obj.decode(encoding, errors)
+
+ # Note: We do these last even though we have to call to_text again on the
+ # value because we're optimizing the common case
+ if nonstring == 'simplerepr':
+ try:
+ value = str(obj)
+ except UnicodeError:
+ try:
+ value = repr(obj)
+ except UnicodeError:
+ # Giving up
+ return u''
+ elif nonstring == 'passthru':
+ return obj
+ elif nonstring == 'empty':
+ return u''
+ elif nonstring == 'strict':
+ raise TypeError('obj must be a string type')
+ else:
+ raise TypeError('Invalid value %s for to_text\'s nonstring parameter' % nonstring)
+
+ return to_text(value, encoding, errors)
+
+
+#: :py:func:`to_native`
+#: Transform a variable into the native str type for the python version
+#:
+#: On Python2, this is an alias for
+#: :func:`~ansible.module_utils.to_bytes`. On Python3 it is an alias for
+#: :func:`~ansible.module_utils.to_text`. It makes it easier to
+#: transform a variable into the native str type for the python version
+#: the code is running on. Use this when constructing the message to
+#: send to exceptions or when dealing with an API that needs to take
+#: a native string. Example::
+#:
+#: try:
+#: 1//0
+#: except ZeroDivisionError as e:
+#: raise MyException('Encountered and error: %s' % to_native(e))
+if PY3:
+ to_native = to_text
+else:
+ to_native = to_bytes
+
+
+def _json_encode_fallback(obj):
+ if isinstance(obj, Set):
+ return list(obj)
+ elif isinstance(obj, datetime.datetime):
+ return obj.isoformat()
+ raise TypeError("Cannot json serialize %s" % to_native(obj))
+
+
+def jsonify(data, **kwargs):
+ for encoding in ("utf-8", "latin-1"):
+ try:
+ return json.dumps(data, encoding=encoding, default=_json_encode_fallback, **kwargs)
+ # Old systems using old simplejson module does not support encoding keyword.
+ except TypeError:
+ try:
+ new_data = container_to_text(data, encoding=encoding)
+ except UnicodeDecodeError:
+ continue
+ return json.dumps(new_data, default=_json_encode_fallback, **kwargs)
+ except UnicodeDecodeError:
+ continue
+ raise UnicodeError('Invalid unicode encoding encountered')
+
+
+def container_to_bytes(d, encoding='utf-8', errors='surrogate_or_strict'):
+ ''' Recursively convert dict keys and values to byte str
+
+ Specialized for json return because this only handles, lists, tuples,
+ and dict container types (the containers that the json module returns)
+ '''
+
+ if isinstance(d, text_type):
+ return to_bytes(d, encoding=encoding, errors=errors)
+ elif isinstance(d, dict):
+ return dict(container_to_bytes(o, encoding, errors) for o in iteritems(d))
+ elif isinstance(d, list):
+ return [container_to_bytes(o, encoding, errors) for o in d]
+ elif isinstance(d, tuple):
+ return tuple(container_to_bytes(o, encoding, errors) for o in d)
+ else:
+ return d
+
+
+def container_to_text(d, encoding='utf-8', errors='surrogate_or_strict'):
+ """Recursively convert dict keys and values to text str
+
+ Specialized for json return because this only handles, lists, tuples,
+ and dict container types (the containers that the json module returns)
+ """
+
+ if isinstance(d, binary_type):
+ # Warning, can traceback
+ return to_text(d, encoding=encoding, errors=errors)
+ elif isinstance(d, dict):
+ return dict(container_to_text(o, encoding, errors) for o in iteritems(d))
+ elif isinstance(d, list):
+ return [container_to_text(o, encoding, errors) for o in d]
+ elif isinstance(d, tuple):
+ return tuple(container_to_text(o, encoding, errors) for o in d)
+ else:
+ return d
diff --git a/lib/ansible/module_utils/common/text/formatters.py b/lib/ansible/module_utils/common/text/formatters.py
new file mode 100644
index 00000000..971247a0
--- /dev/null
+++ b/lib/ansible/module_utils/common/text/formatters.py
@@ -0,0 +1,114 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import re
+
+from ansible.module_utils.six import iteritems
+
+SIZE_RANGES = {
+ 'Y': 1 << 80,
+ 'Z': 1 << 70,
+ 'E': 1 << 60,
+ 'P': 1 << 50,
+ 'T': 1 << 40,
+ 'G': 1 << 30,
+ 'M': 1 << 20,
+ 'K': 1 << 10,
+ 'B': 1,
+}
+
+
+def lenient_lowercase(lst):
+ """Lowercase elements of a list.
+
+ If an element is not a string, pass it through untouched.
+ """
+ lowered = []
+ for value in lst:
+ try:
+ lowered.append(value.lower())
+ except AttributeError:
+ lowered.append(value)
+ return lowered
+
+
+def human_to_bytes(number, default_unit=None, isbits=False):
+ """Convert number in string format into bytes (ex: '2K' => 2048) or using unit argument.
+
+ example: human_to_bytes('10M') <=> human_to_bytes(10, 'M').
+
+ When isbits is False (default), converts bytes from a human-readable format to integer.
+ example: human_to_bytes('1MB') returns 1048576 (int).
+ The function expects 'B' (uppercase) as a byte identifier passed
+ as a part of 'name' param string or 'unit', e.g. 'MB'/'KB'/etc.
+ (except when the identifier is single 'b', it is perceived as a byte identifier too).
+ if 'Mb'/'Kb'/... is passed, the ValueError will be rased.
+
+ When isbits is True, converts bits from a human-readable format to integer.
+ example: human_to_bytes('1Mb', isbits=True) returns 1048576 (int) -
+ string bits representation was passed and return as a number or bits.
+ The function expects 'b' (lowercase) as a bit identifier, e.g. 'Mb'/'Kb'/etc.
+ if 'MB'/'KB'/... is passed, the ValueError will be rased.
+ """
+ m = re.search(r'^\s*(\d*\.?\d*)\s*([A-Za-z]+)?', str(number), flags=re.IGNORECASE)
+ if m is None:
+ raise ValueError("human_to_bytes() can't interpret following string: %s" % str(number))
+ try:
+ num = float(m.group(1))
+ except Exception:
+ raise ValueError("human_to_bytes() can't interpret following number: %s (original input string: %s)" % (m.group(1), number))
+
+ unit = m.group(2)
+ if unit is None:
+ unit = default_unit
+
+ if unit is None:
+ ''' No unit given, returning raw number '''
+ return int(round(num))
+ range_key = unit[0].upper()
+ try:
+ limit = SIZE_RANGES[range_key]
+ except Exception:
+ raise ValueError("human_to_bytes() failed to convert %s (unit = %s). The suffix must be one of %s" % (number, unit, ", ".join(SIZE_RANGES.keys())))
+
+ # default value
+ unit_class = 'B'
+ unit_class_name = 'byte'
+ # handling bits case
+ if isbits:
+ unit_class = 'b'
+ unit_class_name = 'bit'
+ # check unit value if more than one character (KB, MB)
+ if len(unit) > 1:
+ expect_message = 'expect %s%s or %s' % (range_key, unit_class, range_key)
+ if range_key == 'B':
+ expect_message = 'expect %s or %s' % (unit_class, unit_class_name)
+
+ if unit_class_name in unit.lower():
+ pass
+ elif unit[1] != unit_class:
+ raise ValueError("human_to_bytes() failed to convert %s. Value is not a valid string (%s)" % (number, expect_message))
+
+ return int(round(num * limit))
+
+
+def bytes_to_human(size, isbits=False, unit=None):
+ base = 'Bytes'
+ if isbits:
+ base = 'bits'
+ suffix = ''
+
+ for suffix, limit in sorted(iteritems(SIZE_RANGES), key=lambda item: -item[1]):
+ if (unit is None and size >= limit) or unit is not None and unit.upper() == suffix[0]:
+ break
+
+ if limit != 1:
+ suffix += base[0]
+ else:
+ suffix = base
+
+ return '%.2f %s' % (size / limit, suffix)
diff --git a/lib/ansible/module_utils/common/validation.py b/lib/ansible/module_utils/common/validation.py
new file mode 100644
index 00000000..fc13f4d0
--- /dev/null
+++ b/lib/ansible/module_utils/common/validation.py
@@ -0,0 +1,547 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import os
+import re
+
+from ast import literal_eval
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.common._json_compat import json
+from ansible.module_utils.common.collections import is_iterable
+from ansible.module_utils.common.text.converters import jsonify
+from ansible.module_utils.common.text.formatters import human_to_bytes
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils.six import (
+ binary_type,
+ integer_types,
+ string_types,
+ text_type,
+)
+
+
+def count_terms(terms, module_parameters):
+ """Count the number of occurrences of a key in a given dictionary
+
+ :arg terms: String or iterable of values to check
+ :arg module_parameters: Dictionary of module parameters
+
+ :returns: An integer that is the number of occurrences of the terms values
+ in the provided dictionary.
+ """
+
+ if not is_iterable(terms):
+ terms = [terms]
+
+ return len(set(terms).intersection(module_parameters))
+
+
+def check_mutually_exclusive(terms, module_parameters):
+ """Check mutually exclusive terms against argument parameters
+
+ Accepts a single list or list of lists that are groups of terms that should be
+ mutually exclusive with one another
+
+ :arg terms: List of mutually exclusive module parameters
+ :arg module_parameters: Dictionary of module parameters
+
+ :returns: Empty list or raises TypeError if the check fails.
+ """
+
+ results = []
+ if terms is None:
+ return results
+
+ for check in terms:
+ count = count_terms(check, module_parameters)
+ if count > 1:
+ results.append(check)
+
+ if results:
+ full_list = ['|'.join(check) for check in results]
+ msg = "parameters are mutually exclusive: %s" % ', '.join(full_list)
+ raise TypeError(to_native(msg))
+
+ return results
+
+
+def check_required_one_of(terms, module_parameters):
+ """Check each list of terms to ensure at least one exists in the given module
+ parameters
+
+ Accepts a list of lists or tuples
+
+ :arg terms: List of lists of terms to check. For each list of terms, at
+ least one is required.
+ :arg module_parameters: Dictionary of module parameters
+
+ :returns: Empty list or raises TypeError if the check fails.
+ """
+
+ results = []
+ if terms is None:
+ return results
+
+ for term in terms:
+ count = count_terms(term, module_parameters)
+ if count == 0:
+ results.append(term)
+
+ if results:
+ for term in results:
+ msg = "one of the following is required: %s" % ', '.join(term)
+ raise TypeError(to_native(msg))
+
+ return results
+
+
+def check_required_together(terms, module_parameters):
+ """Check each list of terms to ensure every parameter in each list exists
+ in the given module parameters
+
+ Accepts a list of lists or tuples
+
+ :arg terms: List of lists of terms to check. Each list should include
+ parameters that are all required when at least one is specified
+ in the module_parameters.
+ :arg module_parameters: Dictionary of module parameters
+
+ :returns: Empty list or raises TypeError if the check fails.
+ """
+
+ results = []
+ if terms is None:
+ return results
+
+ for term in terms:
+ counts = [count_terms(field, module_parameters) for field in term]
+ non_zero = [c for c in counts if c > 0]
+ if len(non_zero) > 0:
+ if 0 in counts:
+ results.append(term)
+ if results:
+ for term in results:
+ msg = "parameters are required together: %s" % ', '.join(term)
+ raise TypeError(to_native(msg))
+
+ return results
+
+
+def check_required_by(requirements, module_parameters):
+ """For each key in requirements, check the corresponding list to see if they
+ exist in module_parameters
+
+ Accepts a single string or list of values for each key
+
+ :arg requirements: Dictionary of requirements
+ :arg module_parameters: Dictionary of module parameters
+
+ :returns: Empty dictionary or raises TypeError if the
+ """
+
+ result = {}
+ if requirements is None:
+ return result
+
+ for (key, value) in requirements.items():
+ if key not in module_parameters or module_parameters[key] is None:
+ continue
+ result[key] = []
+ # Support strings (single-item lists)
+ if isinstance(value, string_types):
+ value = [value]
+ for required in value:
+ if required not in module_parameters or module_parameters[required] is None:
+ result[key].append(required)
+
+ if result:
+ for key, missing in result.items():
+ if len(missing) > 0:
+ msg = "missing parameter(s) required by '%s': %s" % (key, ', '.join(missing))
+ raise TypeError(to_native(msg))
+
+ return result
+
+
+def check_required_arguments(argument_spec, module_parameters):
+ """Check all paramaters in argument_spec and return a list of parameters
+ that are required but not present in module_parameters
+
+ Raises TypeError if the check fails
+
+ :arg argument_spec: Argument spec dicitionary containing all parameters
+ and their specification
+ :arg module_paramaters: Dictionary of module parameters
+
+ :returns: Empty list or raises TypeError if the check fails.
+ """
+
+ missing = []
+ if argument_spec is None:
+ return missing
+
+ for (k, v) in argument_spec.items():
+ required = v.get('required', False)
+ if required and k not in module_parameters:
+ missing.append(k)
+
+ if missing:
+ msg = "missing required arguments: %s" % ", ".join(sorted(missing))
+ raise TypeError(to_native(msg))
+
+ return missing
+
+
+def check_required_if(requirements, module_parameters):
+ """Check parameters that are conditionally required
+
+ Raises TypeError if the check fails
+
+ :arg requirements: List of lists specifying a parameter, value, parameters
+ required when the given parameter is the specified value, and optionally
+ a boolean indicating any or all parameters are required.
+
+ Example:
+ required_if=[
+ ['state', 'present', ('path',), True],
+ ['someint', 99, ('bool_param', 'string_param')],
+ ]
+
+ :arg module_paramaters: Dictionary of module parameters
+
+ :returns: Empty list or raises TypeError if the check fails.
+ The results attribute of the exception contains a list of dictionaries.
+ Each dictionary is the result of evaluting each item in requirements.
+ Each return dictionary contains the following keys:
+
+ :key missing: List of parameters that are required but missing
+ :key requires: 'any' or 'all'
+ :key paramater: Parameter name that has the requirement
+ :key value: Original value of the paramater
+ :key requirements: Original required parameters
+
+ Example:
+ [
+ {
+ 'parameter': 'someint',
+ 'value': 99
+ 'requirements': ('bool_param', 'string_param'),
+ 'missing': ['string_param'],
+ 'requires': 'all',
+ }
+ ]
+
+ """
+ results = []
+ if requirements is None:
+ return results
+
+ for req in requirements:
+ missing = {}
+ missing['missing'] = []
+ max_missing_count = 0
+ is_one_of = False
+ if len(req) == 4:
+ key, val, requirements, is_one_of = req
+ else:
+ key, val, requirements = req
+
+ # is_one_of is True at least one requirement should be
+ # present, else all requirements should be present.
+ if is_one_of:
+ max_missing_count = len(requirements)
+ missing['requires'] = 'any'
+ else:
+ missing['requires'] = 'all'
+
+ if key in module_parameters and module_parameters[key] == val:
+ for check in requirements:
+ count = count_terms(check, module_parameters)
+ if count == 0:
+ missing['missing'].append(check)
+ if len(missing['missing']) and len(missing['missing']) >= max_missing_count:
+ missing['parameter'] = key
+ missing['value'] = val
+ missing['requirements'] = requirements
+ results.append(missing)
+
+ if results:
+ for missing in results:
+ msg = "%s is %s but %s of the following are missing: %s" % (
+ missing['parameter'], missing['value'], missing['requires'], ', '.join(missing['missing']))
+ raise TypeError(to_native(msg))
+
+ return results
+
+
+def check_missing_parameters(module_parameters, required_parameters=None):
+ """This is for checking for required params when we can not check via
+ argspec because we need more information than is simply given in the argspec.
+
+ Raises TypeError if any required parameters are missing
+
+ :arg module_paramaters: Dictionary of module parameters
+ :arg required_parameters: List of parameters to look for in the given module
+ parameters
+
+ :returns: Empty list or raises TypeError if the check fails.
+ """
+ missing_params = []
+ if required_parameters is None:
+ return missing_params
+
+ for param in required_parameters:
+ if not module_parameters.get(param):
+ missing_params.append(param)
+
+ if missing_params:
+ msg = "missing required arguments: %s" % ', '.join(missing_params)
+ raise TypeError(to_native(msg))
+
+ return missing_params
+
+
+def safe_eval(value, locals=None, include_exceptions=False):
+ # do not allow method calls to modules
+ if not isinstance(value, string_types):
+ # already templated to a datavaluestructure, perhaps?
+ if include_exceptions:
+ return (value, None)
+ return value
+ if re.search(r'\w\.\w+\(', value):
+ if include_exceptions:
+ return (value, None)
+ return value
+ # do not allow imports
+ if re.search(r'import \w+', value):
+ if include_exceptions:
+ return (value, None)
+ return value
+ try:
+ result = literal_eval(value)
+ if include_exceptions:
+ return (result, None)
+ else:
+ return result
+ except Exception as e:
+ if include_exceptions:
+ return (value, e)
+ return value
+
+
+def check_type_str(value, allow_conversion=True):
+ """Verify that the value is a string or convert to a string.
+
+ Since unexpected changes can sometimes happen when converting to a string,
+ ``allow_conversion`` controls whether or not the value will be converted or a
+ TypeError will be raised if the value is not a string and would be converted
+
+ :arg value: Value to validate or convert to a string
+ :arg allow_conversion: Whether to convert the string and return it or raise
+ a TypeError
+
+ :returns: Original value if it is a string, the value converted to a string
+ if allow_conversion=True, or raises a TypeError if allow_conversion=False.
+ """
+ if isinstance(value, string_types):
+ return value
+
+ if allow_conversion:
+ return to_native(value, errors='surrogate_or_strict')
+
+ msg = "'{0!r}' is not a string and conversion is not allowed".format(value)
+ raise TypeError(to_native(msg))
+
+
+def check_type_list(value):
+ """Verify that the value is a list or convert to a list
+
+ A comma separated string will be split into a list. Rases a TypeError if
+ unable to convert to a list.
+
+ :arg value: Value to validate or convert to a list
+
+ :returns: Original value if it is already a list, single item list if a
+ float, int or string without commas, or a multi-item list if a
+ comma-delimited string.
+ """
+ if isinstance(value, list):
+ return value
+
+ if isinstance(value, string_types):
+ return value.split(",")
+ elif isinstance(value, int) or isinstance(value, float):
+ return [str(value)]
+
+ raise TypeError('%s cannot be converted to a list' % type(value))
+
+
+def check_type_dict(value):
+ """Verify that value is a dict or convert it to a dict and return it.
+
+ Raises TypeError if unable to convert to a dict
+
+ :arg value: Dict or string to convert to a dict. Accepts 'k1=v2, k2=v2'.
+
+ :returns: value converted to a dictionary
+ """
+ if isinstance(value, dict):
+ return value
+
+ if isinstance(value, string_types):
+ if value.startswith("{"):
+ try:
+ return json.loads(value)
+ except Exception:
+ (result, exc) = safe_eval(value, dict(), include_exceptions=True)
+ if exc is not None:
+ raise TypeError('unable to evaluate string as dictionary')
+ return result
+ elif '=' in value:
+ fields = []
+ field_buffer = []
+ in_quote = False
+ in_escape = False
+ for c in value.strip():
+ if in_escape:
+ field_buffer.append(c)
+ in_escape = False
+ elif c == '\\':
+ in_escape = True
+ elif not in_quote and c in ('\'', '"'):
+ in_quote = c
+ elif in_quote and in_quote == c:
+ in_quote = False
+ elif not in_quote and c in (',', ' '):
+ field = ''.join(field_buffer)
+ if field:
+ fields.append(field)
+ field_buffer = []
+ else:
+ field_buffer.append(c)
+
+ field = ''.join(field_buffer)
+ if field:
+ fields.append(field)
+ return dict(x.split("=", 1) for x in fields)
+ else:
+ raise TypeError("dictionary requested, could not parse JSON or key=value")
+
+ raise TypeError('%s cannot be converted to a dict' % type(value))
+
+
+def check_type_bool(value):
+ """Verify that the value is a bool or convert it to a bool and return it.
+
+ Raises TypeError if unable to convert to a bool
+
+ :arg value: String, int, or float to convert to bool. Valid booleans include:
+ '1', 'on', 1, '0', 0, 'n', 'f', 'false', 'true', 'y', 't', 'yes', 'no', 'off'
+
+ :returns: Boolean True or False
+ """
+ if isinstance(value, bool):
+ return value
+
+ if isinstance(value, string_types) or isinstance(value, (int, float)):
+ return boolean(value)
+
+ raise TypeError('%s cannot be converted to a bool' % type(value))
+
+
+def check_type_int(value):
+ """Verify that the value is an integer and return it or convert the value
+ to an integer and return it
+
+ Raises TypeError if unable to convert to an int
+
+ :arg value: String or int to convert of verify
+
+ :return: Int of given value
+ """
+ if isinstance(value, integer_types):
+ return value
+
+ if isinstance(value, string_types):
+ try:
+ return int(value)
+ except ValueError:
+ pass
+
+ raise TypeError('%s cannot be converted to an int' % type(value))
+
+
+def check_type_float(value):
+ """Verify that value is a float or convert it to a float and return it
+
+ Raises TypeError if unable to convert to a float
+
+ :arg value: Float, int, str, or bytes to verify or convert and return.
+
+ :returns: Float of given value.
+ """
+ if isinstance(value, float):
+ return value
+
+ if isinstance(value, (binary_type, text_type, int)):
+ try:
+ return float(value)
+ except ValueError:
+ pass
+
+ raise TypeError('%s cannot be converted to a float' % type(value))
+
+
+def check_type_path(value,):
+ """Verify the provided value is a string or convert it to a string,
+ then return the expanded path
+ """
+ value = check_type_str(value)
+ return os.path.expanduser(os.path.expandvars(value))
+
+
+def check_type_raw(value):
+ """Returns the raw value
+ """
+ return value
+
+
+def check_type_bytes(value):
+ """Convert a human-readable string value to bytes
+
+ Raises TypeError if unable to covert the value
+ """
+ try:
+ return human_to_bytes(value)
+ except ValueError:
+ raise TypeError('%s cannot be converted to a Byte value' % type(value))
+
+
+def check_type_bits(value):
+ """Convert a human-readable string bits value to bits in integer.
+
+ Example: check_type_bits('1Mb') returns integer 1048576.
+
+ Raises TypeError if unable to covert the value.
+ """
+ try:
+ return human_to_bytes(value, isbits=True)
+ except ValueError:
+ raise TypeError('%s cannot be converted to a Bit value' % type(value))
+
+
+def check_type_jsonarg(value):
+ """Return a jsonified string. Sometimes the controller turns a json string
+ into a dict/list so transform it back into json here
+
+ Raises TypeError if unable to covert the value
+
+ """
+ if isinstance(value, (text_type, binary_type)):
+ return value.strip()
+ elif isinstance(value, (list, tuple, dict)):
+ return jsonify(value)
+ raise TypeError('%s cannot be converted to a json string' % type(value))
diff --git a/lib/ansible/module_utils/common/warnings.py b/lib/ansible/module_utils/common/warnings.py
new file mode 100644
index 00000000..9423e6a4
--- /dev/null
+++ b/lib/ansible/module_utils/common/warnings.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.six import string_types
+
+_global_warnings = []
+_global_deprecations = []
+
+
+def warn(warning):
+ if isinstance(warning, string_types):
+ _global_warnings.append(warning)
+ else:
+ raise TypeError("warn requires a string not a %s" % type(warning))
+
+
+def deprecate(msg, version=None, date=None, collection_name=None):
+ if isinstance(msg, string_types):
+ # For compatibility, we accept that neither version nor date is set,
+ # and treat that the same as if version would haven been set
+ if date is not None:
+ _global_deprecations.append({'msg': msg, 'date': date, 'collection_name': collection_name})
+ else:
+ _global_deprecations.append({'msg': msg, 'version': version, 'collection_name': collection_name})
+ else:
+ raise TypeError("deprecate requires a string not a %s" % type(msg))
+
+
+def get_warning_messages():
+ """Return a tuple of warning messages accumulated over this run"""
+ return tuple(_global_warnings)
+
+
+def get_deprecation_messages():
+ """Return a tuple of deprecations accumulated over this run"""
+ return tuple(_global_deprecations)
diff --git a/lib/ansible/module_utils/compat/__init__.py b/lib/ansible/module_utils/compat/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/lib/ansible/module_utils/compat/__init__.py
diff --git a/lib/ansible/module_utils/compat/_selectors2.py b/lib/ansible/module_utils/compat/_selectors2.py
new file mode 100644
index 00000000..be44b4b3
--- /dev/null
+++ b/lib/ansible/module_utils/compat/_selectors2.py
@@ -0,0 +1,655 @@
+# This file is from the selectors2.py package. It backports the PSF Licensed
+# selectors module from the Python-3.5 stdlib to older versions of Python.
+# The author, Seth Michael Larson, dual licenses his modifications under the
+# PSF License and MIT License:
+# https://github.com/SethMichaelLarson/selectors2#license
+#
+# Copyright (c) 2016 Seth Michael Larson
+#
+# PSF License (see licenses/PSF-license.txt or https://opensource.org/licenses/Python-2.0)
+# MIT License (see licenses/MIT-license.txt or https://opensource.org/licenses/MIT)
+#
+
+
+# Backport of selectors.py from Python 3.5+ to support Python < 3.4
+# Also has the behavior specified in PEP 475 which is to retry syscalls
+# in the case of an EINTR error. This module is required because selectors34
+# does not follow this behavior and instead returns that no file descriptor
+# events have occurred rather than retry the syscall. The decision to drop
+# support for select.devpoll is made to maintain 100% test coverage.
+
+import errno
+import math
+import select
+import socket
+import sys
+import time
+from collections import namedtuple
+from ansible.module_utils.common._collections_compat import Mapping
+
+try:
+ monotonic = time.monotonic
+except (AttributeError, ImportError): # Python 3.3<
+ monotonic = time.time
+
+__author__ = 'Seth Michael Larson'
+__email__ = 'sethmichaellarson@protonmail.com'
+__version__ = '1.1.1'
+__license__ = 'MIT'
+
+__all__ = [
+ 'EVENT_READ',
+ 'EVENT_WRITE',
+ 'SelectorError',
+ 'SelectorKey',
+ 'DefaultSelector'
+]
+
+EVENT_READ = (1 << 0)
+EVENT_WRITE = (1 << 1)
+
+HAS_SELECT = True # Variable that shows whether the platform has a selector.
+_SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None.
+
+
+class SelectorError(Exception):
+ def __init__(self, errcode):
+ super(SelectorError, self).__init__()
+ self.errno = errcode
+
+ def __repr__(self):
+ return "<SelectorError errno={0}>".format(self.errno)
+
+ def __str__(self):
+ return self.__repr__()
+
+
+def _fileobj_to_fd(fileobj):
+ """ Return a file descriptor from a file object. If
+ given an integer will simply return that integer back. """
+ if isinstance(fileobj, int):
+ fd = fileobj
+ else:
+ try:
+ fd = int(fileobj.fileno())
+ except (AttributeError, TypeError, ValueError):
+ raise ValueError("Invalid file object: {0!r}".format(fileobj))
+ if fd < 0:
+ raise ValueError("Invalid file descriptor: {0}".format(fd))
+ return fd
+
+
+# Python 3.5 uses a more direct route to wrap system calls to increase speed.
+if sys.version_info >= (3, 5):
+ def _syscall_wrapper(func, _, *args, **kwargs):
+ """ This is the short-circuit version of the below logic
+ because in Python 3.5+ all selectors restart system calls. """
+ try:
+ return func(*args, **kwargs)
+ except (OSError, IOError, select.error) as e:
+ errcode = None
+ if hasattr(e, "errno"):
+ errcode = e.errno
+ elif hasattr(e, "args"):
+ errcode = e.args[0]
+ raise SelectorError(errcode)
+else:
+ def _syscall_wrapper(func, recalc_timeout, *args, **kwargs):
+ """ Wrapper function for syscalls that could fail due to EINTR.
+ All functions should be retried if there is time left in the timeout
+ in accordance with PEP 475. """
+ timeout = kwargs.get("timeout", None)
+ if timeout is None:
+ expires = None
+ recalc_timeout = False
+ else:
+ timeout = float(timeout)
+ if timeout < 0.0: # Timeout less than 0 treated as no timeout.
+ expires = None
+ else:
+ expires = monotonic() + timeout
+
+ args = list(args)
+ if recalc_timeout and "timeout" not in kwargs:
+ raise ValueError(
+ "Timeout must be in args or kwargs to be recalculated")
+
+ result = _SYSCALL_SENTINEL
+ while result is _SYSCALL_SENTINEL:
+ try:
+ result = func(*args, **kwargs)
+ # OSError is thrown by select.select
+ # IOError is thrown by select.epoll.poll
+ # select.error is thrown by select.poll.poll
+ # Aren't we thankful for Python 3.x rework for exceptions?
+ except (OSError, IOError, select.error) as e:
+ # select.error wasn't a subclass of OSError in the past.
+ errcode = None
+ if hasattr(e, "errno"):
+ errcode = e.errno
+ elif hasattr(e, "args"):
+ errcode = e.args[0]
+
+ # Also test for the Windows equivalent of EINTR.
+ is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and
+ errcode == errno.WSAEINTR))
+
+ if is_interrupt:
+ if expires is not None:
+ current_time = monotonic()
+ if current_time > expires:
+ raise OSError(errno.ETIMEDOUT)
+ if recalc_timeout:
+ if "timeout" in kwargs:
+ kwargs["timeout"] = expires - current_time
+ continue
+ if errcode:
+ raise SelectorError(errcode)
+ else:
+ raise
+ return result
+
+
+SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
+
+
+class _SelectorMapping(Mapping):
+ """ Mapping of file objects to selector keys """
+
+ def __init__(self, selector):
+ self._selector = selector
+
+ def __len__(self):
+ return len(self._selector._fd_to_key)
+
+ def __getitem__(self, fileobj):
+ try:
+ fd = self._selector._fileobj_lookup(fileobj)
+ return self._selector._fd_to_key[fd]
+ except KeyError:
+ raise KeyError("{0!r} is not registered.".format(fileobj))
+
+ def __iter__(self):
+ return iter(self._selector._fd_to_key)
+
+
+class BaseSelector(object):
+ """ Abstract Selector class
+
+ A selector supports registering file objects to be monitored
+ for specific I/O events.
+
+ A file object is a file descriptor or any object with a
+ `fileno()` method. An arbitrary object can be attached to the
+ file object which can be used for example to store context info,
+ a callback, etc.
+
+ A selector can use various implementations (select(), poll(), epoll(),
+ and kqueue()) depending on the platform. The 'DefaultSelector' class uses
+ the most efficient implementation for the current platform.
+ """
+ def __init__(self):
+ # Maps file descriptors to keys.
+ self._fd_to_key = {}
+
+ # Read-only mapping returned by get_map()
+ self._map = _SelectorMapping(self)
+
+ def _fileobj_lookup(self, fileobj):
+ """ Return a file descriptor from a file object.
+ This wraps _fileobj_to_fd() to do an exhaustive
+ search in case the object is invalid but we still
+ have it in our map. Used by unregister() so we can
+ unregister an object that was previously registered
+ even if it is closed. It is also used by _SelectorMapping
+ """
+ try:
+ return _fileobj_to_fd(fileobj)
+ except ValueError:
+
+ # Search through all our mapped keys.
+ for key in self._fd_to_key.values():
+ if key.fileobj is fileobj:
+ return key.fd
+
+ # Raise ValueError after all.
+ raise
+
+ def register(self, fileobj, events, data=None):
+ """ Register a file object for a set of events to monitor. """
+ if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
+ raise ValueError("Invalid events: {0!r}".format(events))
+
+ key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
+
+ if key.fd in self._fd_to_key:
+ raise KeyError("{0!r} (FD {1}) is already registered"
+ .format(fileobj, key.fd))
+
+ self._fd_to_key[key.fd] = key
+ return key
+
+ def unregister(self, fileobj):
+ """ Unregister a file object from being monitored. """
+ try:
+ key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
+ except KeyError:
+ raise KeyError("{0!r} is not registered".format(fileobj))
+
+ # Getting the fileno of a closed socket on Windows errors with EBADF.
+ except socket.error as err:
+ if err.errno != errno.EBADF:
+ raise
+ else:
+ for key in self._fd_to_key.values():
+ if key.fileobj is fileobj:
+ self._fd_to_key.pop(key.fd)
+ break
+ else:
+ raise KeyError("{0!r} is not registered".format(fileobj))
+ return key
+
+ def modify(self, fileobj, events, data=None):
+ """ Change a registered file object monitored events and data. """
+ # NOTE: Some subclasses optimize this operation even further.
+ try:
+ key = self._fd_to_key[self._fileobj_lookup(fileobj)]
+ except KeyError:
+ raise KeyError("{0!r} is not registered".format(fileobj))
+
+ if events != key.events:
+ self.unregister(fileobj)
+ key = self.register(fileobj, events, data)
+
+ elif data != key.data:
+ # Use a shortcut to update the data.
+ key = key._replace(data=data)
+ self._fd_to_key[key.fd] = key
+
+ return key
+
+ def select(self, timeout=None):
+ """ Perform the actual selection until some monitored file objects
+ are ready or the timeout expires. """
+ raise NotImplementedError()
+
+ def close(self):
+ """ Close the selector. This must be called to ensure that all
+ underlying resources are freed. """
+ self._fd_to_key.clear()
+ self._map = None
+
+ def get_key(self, fileobj):
+ """ Return the key associated with a registered file object. """
+ mapping = self.get_map()
+ if mapping is None:
+ raise RuntimeError("Selector is closed")
+ try:
+ return mapping[fileobj]
+ except KeyError:
+ raise KeyError("{0!r} is not registered".format(fileobj))
+
+ def get_map(self):
+ """ Return a mapping of file objects to selector keys """
+ return self._map
+
+ def _key_from_fd(self, fd):
+ """ Return the key associated to a given file descriptor
+ Return None if it is not found. """
+ try:
+ return self._fd_to_key[fd]
+ except KeyError:
+ return None
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ self.close()
+
+
+# Almost all platforms have select.select()
+if hasattr(select, "select"):
+ class SelectSelector(BaseSelector):
+ """ Select-based selector. """
+ def __init__(self):
+ super(SelectSelector, self).__init__()
+ self._readers = set()
+ self._writers = set()
+
+ def register(self, fileobj, events, data=None):
+ key = super(SelectSelector, self).register(fileobj, events, data)
+ if events & EVENT_READ:
+ self._readers.add(key.fd)
+ if events & EVENT_WRITE:
+ self._writers.add(key.fd)
+ return key
+
+ def unregister(self, fileobj):
+ key = super(SelectSelector, self).unregister(fileobj)
+ self._readers.discard(key.fd)
+ self._writers.discard(key.fd)
+ return key
+
+ def _select(self, r, w, timeout=None):
+ """ Wrapper for select.select because timeout is a positional arg """
+ return select.select(r, w, [], timeout)
+
+ def select(self, timeout=None):
+ # Selecting on empty lists on Windows errors out.
+ if not len(self._readers) and not len(self._writers):
+ return []
+
+ timeout = None if timeout is None else max(timeout, 0.0)
+ ready = []
+ r, w, _ = _syscall_wrapper(self._select, True, self._readers,
+ self._writers, timeout=timeout)
+ r = set(r)
+ w = set(w)
+ for fd in r | w:
+ events = 0
+ if fd in r:
+ events |= EVENT_READ
+ if fd in w:
+ events |= EVENT_WRITE
+
+ key = self._key_from_fd(fd)
+ if key:
+ ready.append((key, events & key.events))
+ return ready
+
+ __all__.append('SelectSelector')
+
+
+if hasattr(select, "poll"):
+ class PollSelector(BaseSelector):
+ """ Poll-based selector """
+ def __init__(self):
+ super(PollSelector, self).__init__()
+ self._poll = select.poll()
+
+ def register(self, fileobj, events, data=None):
+ key = super(PollSelector, self).register(fileobj, events, data)
+ event_mask = 0
+ if events & EVENT_READ:
+ event_mask |= select.POLLIN
+ if events & EVENT_WRITE:
+ event_mask |= select.POLLOUT
+ self._poll.register(key.fd, event_mask)
+ return key
+
+ def unregister(self, fileobj):
+ key = super(PollSelector, self).unregister(fileobj)
+ self._poll.unregister(key.fd)
+ return key
+
+ def _wrap_poll(self, timeout=None):
+ """ Wrapper function for select.poll.poll() so that
+ _syscall_wrapper can work with only seconds. """
+ if timeout is not None:
+ if timeout <= 0:
+ timeout = 0
+ else:
+ # select.poll.poll() has a resolution of 1 millisecond,
+ # round away from zero to wait *at least* timeout seconds.
+ timeout = math.ceil(timeout * 1e3)
+
+ result = self._poll.poll(timeout)
+ return result
+
+ def select(self, timeout=None):
+ ready = []
+ fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
+ for fd, event_mask in fd_events:
+ events = 0
+ if event_mask & ~select.POLLIN:
+ events |= EVENT_WRITE
+ if event_mask & ~select.POLLOUT:
+ events |= EVENT_READ
+
+ key = self._key_from_fd(fd)
+ if key:
+ ready.append((key, events & key.events))
+
+ return ready
+
+ __all__.append('PollSelector')
+
+if hasattr(select, "epoll"):
+ class EpollSelector(BaseSelector):
+ """ Epoll-based selector """
+ def __init__(self):
+ super(EpollSelector, self).__init__()
+ self._epoll = select.epoll()
+
+ def fileno(self):
+ return self._epoll.fileno()
+
+ def register(self, fileobj, events, data=None):
+ key = super(EpollSelector, self).register(fileobj, events, data)
+ events_mask = 0
+ if events & EVENT_READ:
+ events_mask |= select.EPOLLIN
+ if events & EVENT_WRITE:
+ events_mask |= select.EPOLLOUT
+ _syscall_wrapper(self._epoll.register, False, key.fd, events_mask)
+ return key
+
+ def unregister(self, fileobj):
+ key = super(EpollSelector, self).unregister(fileobj)
+ try:
+ _syscall_wrapper(self._epoll.unregister, False, key.fd)
+ except SelectorError:
+ # This can occur when the fd was closed since registry.
+ pass
+ return key
+
+ def select(self, timeout=None):
+ if timeout is not None:
+ if timeout <= 0:
+ timeout = 0.0
+ else:
+ # select.epoll.poll() has a resolution of 1 millisecond
+ # but luckily takes seconds so we don't need a wrapper
+ # like PollSelector. Just for better rounding.
+ timeout = math.ceil(timeout * 1e3) * 1e-3
+ timeout = float(timeout)
+ else:
+ timeout = -1.0 # epoll.poll() must have a float.
+
+ # We always want at least 1 to ensure that select can be called
+ # with no file descriptors registered. Otherwise will fail.
+ max_events = max(len(self._fd_to_key), 1)
+
+ ready = []
+ fd_events = _syscall_wrapper(self._epoll.poll, True,
+ timeout=timeout,
+ maxevents=max_events)
+ for fd, event_mask in fd_events:
+ events = 0
+ if event_mask & ~select.EPOLLIN:
+ events |= EVENT_WRITE
+ if event_mask & ~select.EPOLLOUT:
+ events |= EVENT_READ
+
+ key = self._key_from_fd(fd)
+ if key:
+ ready.append((key, events & key.events))
+ return ready
+
+ def close(self):
+ self._epoll.close()
+ super(EpollSelector, self).close()
+
+ __all__.append('EpollSelector')
+
+
+if hasattr(select, "devpoll"):
+ class DevpollSelector(BaseSelector):
+ """Solaris /dev/poll selector."""
+
+ def __init__(self):
+ super(DevpollSelector, self).__init__()
+ self._devpoll = select.devpoll()
+
+ def fileno(self):
+ return self._devpoll.fileno()
+
+ def register(self, fileobj, events, data=None):
+ key = super(DevpollSelector, self).register(fileobj, events, data)
+ poll_events = 0
+ if events & EVENT_READ:
+ poll_events |= select.POLLIN
+ if events & EVENT_WRITE:
+ poll_events |= select.POLLOUT
+ self._devpoll.register(key.fd, poll_events)
+ return key
+
+ def unregister(self, fileobj):
+ key = super(DevpollSelector, self).unregister(fileobj)
+ self._devpoll.unregister(key.fd)
+ return key
+
+ def _wrap_poll(self, timeout=None):
+ """ Wrapper function for select.poll.poll() so that
+ _syscall_wrapper can work with only seconds. """
+ if timeout is not None:
+ if timeout <= 0:
+ timeout = 0
+ else:
+ # select.devpoll.poll() has a resolution of 1 millisecond,
+ # round away from zero to wait *at least* timeout seconds.
+ timeout = math.ceil(timeout * 1e3)
+
+ result = self._devpoll.poll(timeout)
+ return result
+
+ def select(self, timeout=None):
+ ready = []
+ fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
+ for fd, event_mask in fd_events:
+ events = 0
+ if event_mask & ~select.POLLIN:
+ events |= EVENT_WRITE
+ if event_mask & ~select.POLLOUT:
+ events |= EVENT_READ
+
+ key = self._key_from_fd(fd)
+ if key:
+ ready.append((key, events & key.events))
+
+ return ready
+
+ def close(self):
+ self._devpoll.close()
+ super(DevpollSelector, self).close()
+
+ __all__.append('DevpollSelector')
+
+
+if hasattr(select, "kqueue"):
+ class KqueueSelector(BaseSelector):
+ """ Kqueue / Kevent-based selector """
+ def __init__(self):
+ super(KqueueSelector, self).__init__()
+ self._kqueue = select.kqueue()
+
+ def fileno(self):
+ return self._kqueue.fileno()
+
+ def register(self, fileobj, events, data=None):
+ key = super(KqueueSelector, self).register(fileobj, events, data)
+ if events & EVENT_READ:
+ kevent = select.kevent(key.fd,
+ select.KQ_FILTER_READ,
+ select.KQ_EV_ADD)
+
+ _syscall_wrapper(self._wrap_control, False, [kevent], 0, 0)
+
+ if events & EVENT_WRITE:
+ kevent = select.kevent(key.fd,
+ select.KQ_FILTER_WRITE,
+ select.KQ_EV_ADD)
+
+ _syscall_wrapper(self._wrap_control, False, [kevent], 0, 0)
+
+ return key
+
+ def unregister(self, fileobj):
+ key = super(KqueueSelector, self).unregister(fileobj)
+ if key.events & EVENT_READ:
+ kevent = select.kevent(key.fd,
+ select.KQ_FILTER_READ,
+ select.KQ_EV_DELETE)
+ try:
+ _syscall_wrapper(self._wrap_control, False, [kevent], 0, 0)
+ except SelectorError:
+ pass
+ if key.events & EVENT_WRITE:
+ kevent = select.kevent(key.fd,
+ select.KQ_FILTER_WRITE,
+ select.KQ_EV_DELETE)
+ try:
+ _syscall_wrapper(self._wrap_control, False, [kevent], 0, 0)
+ except SelectorError:
+ pass
+
+ return key
+
+ def select(self, timeout=None):
+ if timeout is not None:
+ timeout = max(timeout, 0)
+
+ max_events = len(self._fd_to_key) * 2
+ ready_fds = {}
+
+ kevent_list = _syscall_wrapper(self._wrap_control, True,
+ None, max_events, timeout=timeout)
+
+ for kevent in kevent_list:
+ fd = kevent.ident
+ event_mask = kevent.filter
+ events = 0
+ if event_mask == select.KQ_FILTER_READ:
+ events |= EVENT_READ
+ if event_mask == select.KQ_FILTER_WRITE:
+ events |= EVENT_WRITE
+
+ key = self._key_from_fd(fd)
+ if key:
+ if key.fd not in ready_fds:
+ ready_fds[key.fd] = (key, events & key.events)
+ else:
+ old_events = ready_fds[key.fd][1]
+ ready_fds[key.fd] = (key, (events | old_events) & key.events)
+
+ return list(ready_fds.values())
+
+ def close(self):
+ self._kqueue.close()
+ super(KqueueSelector, self).close()
+
+ def _wrap_control(self, changelist, max_events, timeout):
+ return self._kqueue.control(changelist, max_events, timeout)
+
+ __all__.append('KqueueSelector')
+
+
+# Choose the best implementation, roughly:
+# kqueue == epoll == devpoll > poll > select.
+# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
+if 'KqueueSelector' in globals(): # Platform-specific: Mac OS and BSD
+ DefaultSelector = KqueueSelector
+elif 'DevpollSelector' in globals():
+ DefaultSelector = DevpollSelector
+elif 'EpollSelector' in globals(): # Platform-specific: Linux
+ DefaultSelector = EpollSelector
+elif 'PollSelector' in globals(): # Platform-specific: Linux
+ DefaultSelector = PollSelector
+elif 'SelectSelector' in globals(): # Platform-specific: Windows
+ DefaultSelector = SelectSelector
+else: # Platform-specific: AppEngine
+ def no_selector(_):
+ raise ValueError("Platform does not have a selector")
+ DefaultSelector = no_selector
+ HAS_SELECT = False
diff --git a/lib/ansible/module_utils/compat/importlib.py b/lib/ansible/module_utils/compat/importlib.py
new file mode 100644
index 00000000..eee0ddf7
--- /dev/null
+++ b/lib/ansible/module_utils/compat/importlib.py
@@ -0,0 +1,18 @@
+# Copyright (c) 2020 Matt Martz <matt@sivel.net>
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+
+try:
+ from importlib import import_module
+except ImportError:
+ # importlib.import_module returns the tail
+ # whereas __import__ returns the head
+ # compat to work like importlib.import_module
+ def import_module(name):
+ __import__(name)
+ return sys.modules[name]
diff --git a/lib/ansible/module_utils/compat/paramiko.py b/lib/ansible/module_utils/compat/paramiko.py
new file mode 100644
index 00000000..8e09b1b8
--- /dev/null
+++ b/lib/ansible/module_utils/compat/paramiko.py
@@ -0,0 +1,17 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+PARAMIKO_IMPORT_ERR = None
+
+paramiko = None
+try:
+ import paramiko
+# paramiko and gssapi are incompatible and raise AttributeError not ImportError
+# When running in FIPS mode, cryptography raises InternalError
+# https://bugzilla.redhat.com/show_bug.cgi?id=1778939
+except Exception as err:
+ PARAMIKO_IMPORT_ERR = err
diff --git a/lib/ansible/module_utils/compat/selectors.py b/lib/ansible/module_utils/compat/selectors.py
new file mode 100644
index 00000000..53996d7e
--- /dev/null
+++ b/lib/ansible/module_utils/compat/selectors.py
@@ -0,0 +1,56 @@
+# (c) 2014, 2017 Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat selectors library. Python-3.5 has this builtin. The selectors2
+package exists on pypi to backport the functionality as far as python-2.6.
+'''
+# The following makes it easier for us to script updates of the bundled code
+_BUNDLED_METADATA = {"pypi_name": "selectors2", "version": "1.1.1", "version_constraints": ">1.0,<2.0"}
+
+# Added these bugfix commits from 2.1.0:
+# * https://github.com/SethMichaelLarson/selectors2/commit/3bd74f2033363b606e1e849528ccaa76f5067590
+# Wrap kqueue.control so that timeout is a keyword arg
+# * https://github.com/SethMichaelLarson/selectors2/commit/6f6a26f42086d8aab273b30be492beecb373646b
+# Fix formatting of the kqueue.control patch for pylint
+# * https://github.com/SethMichaelLarson/selectors2/commit/f0c2c6c66cfa7662bc52beaf4e2d65adfa25e189
+# Fix use of OSError exception for py3 and use the wrapper of kqueue.control so retries of
+# interrupted syscalls work with kqueue
+
+import os.path
+import sys
+
+try:
+ # Python 3.4+
+ import selectors as _system_selectors
+except ImportError:
+ try:
+ # backport package installed in the system
+ import selectors2 as _system_selectors
+ except ImportError:
+ _system_selectors = None
+
+if _system_selectors:
+ selectors = _system_selectors
+else:
+ # Our bundled copy
+ from ansible.module_utils.compat import _selectors2 as selectors
+sys.modules['ansible.module_utils.compat.selectors'] = selectors
diff --git a/lib/ansible/module_utils/connection.py b/lib/ansible/module_utils/connection.py
new file mode 100644
index 00000000..a76fdb6b
--- /dev/null
+++ b/lib/ansible/module_utils/connection.py
@@ -0,0 +1,217 @@
+#
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# (c) 2017 Red Hat Inc.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import hashlib
+import json
+import socket
+import struct
+import traceback
+import uuid
+
+from functools import partial
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.module_utils.common.json import AnsibleJSONEncoder
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.six.moves import cPickle
+
+
+def write_to_file_descriptor(fd, obj):
+ """Handles making sure all data is properly written to file descriptor fd.
+
+ In particular, that data is encoded in a character stream-friendly way and
+ that all data gets written before returning.
+ """
+ # Need to force a protocol that is compatible with both py2 and py3.
+ # That would be protocol=2 or less.
+ # Also need to force a protocol that excludes certain control chars as
+ # stdin in this case is a pty and control chars will cause problems.
+ # that means only protocol=0 will work.
+ src = cPickle.dumps(obj, protocol=0)
+
+ # raw \r characters will not survive pty round-trip
+ # They should be rehydrated on the receiving end
+ src = src.replace(b'\r', br'\r')
+ data_hash = to_bytes(hashlib.sha1(src).hexdigest())
+
+ os.write(fd, b'%d\n' % len(src))
+ os.write(fd, src)
+ os.write(fd, b'%s\n' % data_hash)
+
+
+def send_data(s, data):
+ packed_len = struct.pack('!Q', len(data))
+ return s.sendall(packed_len + data)
+
+
+def recv_data(s):
+ header_len = 8 # size of a packed unsigned long long
+ data = to_bytes("")
+ while len(data) < header_len:
+ d = s.recv(header_len - len(data))
+ if not d:
+ return None
+ data += d
+ data_len = struct.unpack('!Q', data[:header_len])[0]
+ data = data[header_len:]
+ while len(data) < data_len:
+ d = s.recv(data_len - len(data))
+ if not d:
+ return None
+ data += d
+ return data
+
+
+def exec_command(module, command):
+ connection = Connection(module._socket_path)
+ try:
+ out = connection.exec_command(command)
+ except ConnectionError as exc:
+ code = getattr(exc, 'code', 1)
+ message = getattr(exc, 'err', exc)
+ return code, '', to_text(message, errors='surrogate_then_replace')
+ return 0, out, ''
+
+
+def request_builder(method_, *args, **kwargs):
+ reqid = str(uuid.uuid4())
+ req = {'jsonrpc': '2.0', 'method': method_, 'id': reqid}
+ req['params'] = (args, kwargs)
+
+ return req
+
+
+class ConnectionError(Exception):
+
+ def __init__(self, message, *args, **kwargs):
+ super(ConnectionError, self).__init__(message)
+ for k, v in iteritems(kwargs):
+ setattr(self, k, v)
+
+
+class Connection(object):
+
+ def __init__(self, socket_path):
+ if socket_path is None:
+ raise AssertionError('socket_path must be a value')
+ self.socket_path = socket_path
+
+ def __getattr__(self, name):
+ try:
+ return self.__dict__[name]
+ except KeyError:
+ if name.startswith('_'):
+ raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
+ return partial(self.__rpc__, name)
+
+ def _exec_jsonrpc(self, name, *args, **kwargs):
+
+ req = request_builder(name, *args, **kwargs)
+ reqid = req['id']
+
+ if not os.path.exists(self.socket_path):
+ raise ConnectionError(
+ 'socket path %s does not exist or cannot be found. See Troubleshooting socket '
+ 'path issues in the Network Debug and Troubleshooting Guide' % self.socket_path
+ )
+
+ try:
+ data = json.dumps(req, cls=AnsibleJSONEncoder)
+ except TypeError as exc:
+ raise ConnectionError(
+ "Failed to encode some variables as JSON for communication with ansible-connection. "
+ "The original exception was: %s" % to_text(exc)
+ )
+
+ try:
+ out = self.send(data)
+ except socket.error as e:
+ raise ConnectionError(
+ 'unable to connect to socket %s. See Troubleshooting socket path issues '
+ 'in the Network Debug and Troubleshooting Guide' % self.socket_path,
+ err=to_text(e, errors='surrogate_then_replace'), exception=traceback.format_exc()
+ )
+
+ try:
+ response = json.loads(out)
+ except ValueError:
+ params = [repr(arg) for arg in args] + ['{0}={1!r}'.format(k, v) for k, v in iteritems(kwargs)]
+ params = ', '.join(params)
+ raise ConnectionError(
+ "Unable to decode JSON from response to {0}({1}). Received '{2}'.".format(name, params, out)
+ )
+
+ if response['id'] != reqid:
+ raise ConnectionError('invalid json-rpc id received')
+ if "result_type" in response:
+ response["result"] = cPickle.loads(to_bytes(response["result"]))
+
+ return response
+
+ def __rpc__(self, name, *args, **kwargs):
+ """Executes the json-rpc and returns the output received
+ from remote device.
+ :name: rpc method to be executed over connection plugin that implements jsonrpc 2.0
+ :args: Ordered list of params passed as arguments to rpc method
+ :kwargs: Dict of valid key, value pairs passed as arguments to rpc method
+
+ For usage refer the respective connection plugin docs.
+ """
+
+ response = self._exec_jsonrpc(name, *args, **kwargs)
+
+ if 'error' in response:
+ err = response.get('error')
+ msg = err.get('data') or err['message']
+ code = err['code']
+ raise ConnectionError(to_text(msg, errors='surrogate_then_replace'), code=code)
+
+ return response['result']
+
+ def send(self, data):
+ try:
+ sf = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ sf.connect(self.socket_path)
+
+ send_data(sf, to_bytes(data))
+ response = recv_data(sf)
+
+ except socket.error as e:
+ sf.close()
+ raise ConnectionError(
+ 'unable to connect to socket %s. See the socket path issue category in '
+ 'Network Debug and Troubleshooting Guide' % self.socket_path,
+ err=to_text(e, errors='surrogate_then_replace'), exception=traceback.format_exc()
+ )
+
+ sf.close()
+
+ return to_text(response, errors='surrogate_or_strict')
diff --git a/lib/ansible/module_utils/csharp/Ansible.AccessToken.cs b/lib/ansible/module_utils/csharp/Ansible.AccessToken.cs
new file mode 100644
index 00000000..676991e2
--- /dev/null
+++ b/lib/ansible/module_utils/csharp/Ansible.AccessToken.cs
@@ -0,0 +1,460 @@
+using Microsoft.Win32.SafeHandles;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.ConstrainedExecution;
+using System.Runtime.InteropServices;
+using System.Security.Principal;
+using System.Text;
+
+namespace Ansible.AccessToken
+{
+ internal class NativeHelpers
+ {
+ [StructLayout(LayoutKind.Sequential)]
+ public struct LUID_AND_ATTRIBUTES
+ {
+ public Luid Luid;
+ public UInt32 Attributes;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct SID_AND_ATTRIBUTES
+ {
+ public IntPtr Sid;
+ public int Attributes;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct TOKEN_PRIVILEGES
+ {
+ public UInt32 PrivilegeCount;
+ [MarshalAs(UnmanagedType.ByValArray, SizeConst = 1)]
+ public LUID_AND_ATTRIBUTES[] Privileges;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct TOKEN_USER
+ {
+ public SID_AND_ATTRIBUTES User;
+ }
+
+ public enum TokenInformationClass : uint
+ {
+ TokenUser = 1,
+ TokenPrivileges = 3,
+ TokenStatistics = 10,
+ TokenElevationType = 18,
+ TokenLinkedToken = 19,
+ }
+ }
+
+ internal class NativeMethods
+ {
+ [DllImport("kernel32.dll", SetLastError = true)]
+ public static extern bool CloseHandle(
+ IntPtr hObject);
+
+ [DllImport("advapi32.dll", SetLastError = true)]
+ public static extern bool DuplicateTokenEx(
+ SafeNativeHandle hExistingToken,
+ TokenAccessLevels dwDesiredAccess,
+ IntPtr lpTokenAttributes,
+ SecurityImpersonationLevel ImpersonationLevel,
+ TokenType TokenType,
+ out SafeNativeHandle phNewToken);
+
+ [DllImport("kernel32.dll")]
+ public static extern SafeNativeHandle GetCurrentProcess();
+
+ [DllImport("advapi32.dll", SetLastError = true)]
+ public static extern bool GetTokenInformation(
+ SafeNativeHandle TokenHandle,
+ NativeHelpers.TokenInformationClass TokenInformationClass,
+ SafeMemoryBuffer TokenInformation,
+ UInt32 TokenInformationLength,
+ out UInt32 ReturnLength);
+
+ [DllImport("advapi32.dll", SetLastError = true)]
+ public static extern bool ImpersonateLoggedOnUser(
+ SafeNativeHandle hToken);
+
+ [DllImport("advapi32.dll", SetLastError = true, CharSet = CharSet.Unicode)]
+ public static extern bool LogonUserW(
+ string lpszUsername,
+ string lpszDomain,
+ string lpszPassword,
+ LogonType dwLogonType,
+ LogonProvider dwLogonProvider,
+ out SafeNativeHandle phToken);
+
+ [DllImport("advapi32.dll", SetLastError = true, CharSet = CharSet.Unicode)]
+ public static extern bool LookupPrivilegeNameW(
+ string lpSystemName,
+ ref Luid lpLuid,
+ StringBuilder lpName,
+ ref UInt32 cchName);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ public static extern SafeNativeHandle OpenProcess(
+ ProcessAccessFlags dwDesiredAccess,
+ bool bInheritHandle,
+ UInt32 dwProcessId);
+
+ [DllImport("advapi32.dll", SetLastError = true)]
+ public static extern bool OpenProcessToken(
+ SafeNativeHandle ProcessHandle,
+ TokenAccessLevels DesiredAccess,
+ out SafeNativeHandle TokenHandle);
+
+ [DllImport("advapi32.dll", SetLastError = true)]
+ public static extern bool RevertToSelf();
+ }
+
+ internal class SafeMemoryBuffer : SafeHandleZeroOrMinusOneIsInvalid
+ {
+ public SafeMemoryBuffer() : base(true) { }
+ public SafeMemoryBuffer(int cb) : base(true)
+ {
+ base.SetHandle(Marshal.AllocHGlobal(cb));
+ }
+ public SafeMemoryBuffer(IntPtr handle) : base(true)
+ {
+ base.SetHandle(handle);
+ }
+
+ [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)]
+ protected override bool ReleaseHandle()
+ {
+ Marshal.FreeHGlobal(handle);
+ return true;
+ }
+ }
+
+ public enum LogonProvider
+ {
+ Default,
+ WinNT35,
+ WinNT40,
+ WinNT50,
+ }
+
+ public enum LogonType
+ {
+ Interactive = 2,
+ Network = 3,
+ Batch = 4,
+ Service = 5,
+ Unlock = 7,
+ NetworkCleartext = 8,
+ NewCredentials = 9,
+ }
+
+ [Flags]
+ public enum PrivilegeAttributes : uint
+ {
+ Disabled = 0x00000000,
+ EnabledByDefault = 0x00000001,
+ Enabled = 0x00000002,
+ Removed = 0x00000004,
+ UsedForAccess = 0x80000000,
+ }
+
+ [Flags]
+ public enum ProcessAccessFlags : uint
+ {
+ Terminate = 0x00000001,
+ CreateThread = 0x00000002,
+ VmOperation = 0x00000008,
+ VmRead = 0x00000010,
+ VmWrite = 0x00000020,
+ DupHandle = 0x00000040,
+ CreateProcess = 0x00000080,
+ SetQuota = 0x00000100,
+ SetInformation = 0x00000200,
+ QueryInformation = 0x00000400,
+ SuspendResume = 0x00000800,
+ QueryLimitedInformation = 0x00001000,
+ Delete = 0x00010000,
+ ReadControl = 0x00020000,
+ WriteDac = 0x00040000,
+ WriteOwner = 0x00080000,
+ Synchronize = 0x00100000,
+ }
+
+ public enum SecurityImpersonationLevel
+ {
+ Anonymous,
+ Identification,
+ Impersonation,
+ Delegation,
+ }
+
+ public enum TokenElevationType
+ {
+ Default = 1,
+ Full,
+ Limited,
+ }
+
+ public enum TokenType
+ {
+ Primary = 1,
+ Impersonation,
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct Luid
+ {
+ public UInt32 LowPart;
+ public Int32 HighPart;
+
+ public static explicit operator UInt64(Luid l)
+ {
+ return (UInt64)((UInt64)l.HighPart << 32) | (UInt64)l.LowPart;
+ }
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct TokenStatistics
+ {
+ public Luid TokenId;
+ public Luid AuthenticationId;
+ public Int64 ExpirationTime;
+ public TokenType TokenType;
+ public SecurityImpersonationLevel ImpersonationLevel;
+ public UInt32 DynamicCharged;
+ public UInt32 DynamicAvailable;
+ public UInt32 GroupCount;
+ public UInt32 PrivilegeCount;
+ public Luid ModifiedId;
+ }
+
+ public class PrivilegeInfo
+ {
+ public string Name;
+ public PrivilegeAttributes Attributes;
+
+ internal PrivilegeInfo(NativeHelpers.LUID_AND_ATTRIBUTES la)
+ {
+ Name = TokenUtil.GetPrivilegeName(la.Luid);
+ Attributes = (PrivilegeAttributes)la.Attributes;
+ }
+ }
+
+ public class SafeNativeHandle : SafeHandleZeroOrMinusOneIsInvalid
+ {
+ public SafeNativeHandle() : base(true) { }
+ public SafeNativeHandle(IntPtr handle) : base(true) { this.handle = handle; }
+
+ [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)]
+ protected override bool ReleaseHandle()
+ {
+ return NativeMethods.CloseHandle(handle);
+ }
+ }
+
+ public class Win32Exception : System.ComponentModel.Win32Exception
+ {
+ private string _msg;
+
+ public Win32Exception(string message) : this(Marshal.GetLastWin32Error(), message) { }
+ public Win32Exception(int errorCode, string message) : base(errorCode)
+ {
+ _msg = String.Format("{0} ({1}, Win32ErrorCode {2} - 0x{2:X8})", message, base.Message, errorCode);
+ }
+
+ public override string Message { get { return _msg; } }
+ public static explicit operator Win32Exception(string message) { return new Win32Exception(message); }
+ }
+
+ public class TokenUtil
+ {
+ public static SafeNativeHandle DuplicateToken(SafeNativeHandle hToken, TokenAccessLevels access,
+ SecurityImpersonationLevel impersonationLevel, TokenType tokenType)
+ {
+ SafeNativeHandle dupToken;
+ if (!NativeMethods.DuplicateTokenEx(hToken, access, IntPtr.Zero, impersonationLevel, tokenType, out dupToken))
+ throw new Win32Exception("Failed to duplicate token");
+ return dupToken;
+ }
+
+ public static SecurityIdentifier GetTokenUser(SafeNativeHandle hToken)
+ {
+ using (SafeMemoryBuffer tokenInfo = GetTokenInformation(hToken,
+ NativeHelpers.TokenInformationClass.TokenUser))
+ {
+ NativeHelpers.TOKEN_USER tokenUser = (NativeHelpers.TOKEN_USER)Marshal.PtrToStructure(
+ tokenInfo.DangerousGetHandle(),
+ typeof(NativeHelpers.TOKEN_USER));
+ return new SecurityIdentifier(tokenUser.User.Sid);
+ }
+ }
+
+ public static List<PrivilegeInfo> GetTokenPrivileges(SafeNativeHandle hToken)
+ {
+ using (SafeMemoryBuffer tokenInfo = GetTokenInformation(hToken,
+ NativeHelpers.TokenInformationClass.TokenPrivileges))
+ {
+ NativeHelpers.TOKEN_PRIVILEGES tokenPrivs = (NativeHelpers.TOKEN_PRIVILEGES)Marshal.PtrToStructure(
+ tokenInfo.DangerousGetHandle(),
+ typeof(NativeHelpers.TOKEN_PRIVILEGES));
+
+ NativeHelpers.LUID_AND_ATTRIBUTES[] luidAttrs =
+ new NativeHelpers.LUID_AND_ATTRIBUTES[tokenPrivs.PrivilegeCount];
+ PtrToStructureArray(luidAttrs, IntPtr.Add(tokenInfo.DangerousGetHandle(),
+ Marshal.SizeOf(tokenPrivs.PrivilegeCount)));
+
+ return luidAttrs.Select(la => new PrivilegeInfo(la)).ToList();
+ }
+ }
+
+ public static TokenStatistics GetTokenStatistics(SafeNativeHandle hToken)
+ {
+ using (SafeMemoryBuffer tokenInfo = GetTokenInformation(hToken,
+ NativeHelpers.TokenInformationClass.TokenStatistics))
+ {
+ TokenStatistics tokenStats = (TokenStatistics)Marshal.PtrToStructure(
+ tokenInfo.DangerousGetHandle(),
+ typeof(TokenStatistics));
+ return tokenStats;
+ }
+ }
+
+ public static TokenElevationType GetTokenElevationType(SafeNativeHandle hToken)
+ {
+ using (SafeMemoryBuffer tokenInfo = GetTokenInformation(hToken,
+ NativeHelpers.TokenInformationClass.TokenElevationType))
+ {
+ return (TokenElevationType)Marshal.ReadInt32(tokenInfo.DangerousGetHandle());
+ }
+ }
+
+ public static SafeNativeHandle GetTokenLinkedToken(SafeNativeHandle hToken)
+ {
+ using (SafeMemoryBuffer tokenInfo = GetTokenInformation(hToken,
+ NativeHelpers.TokenInformationClass.TokenLinkedToken))
+ {
+ return new SafeNativeHandle(Marshal.ReadIntPtr(tokenInfo.DangerousGetHandle()));
+ }
+ }
+
+ public static IEnumerable<SafeNativeHandle> EnumerateUserTokens(SecurityIdentifier sid,
+ TokenAccessLevels access = TokenAccessLevels.Query)
+ {
+ foreach (System.Diagnostics.Process process in System.Diagnostics.Process.GetProcesses())
+ {
+ // We always need the Query access level so we can query the TokenUser
+ using (process)
+ using (SafeNativeHandle hToken = TryOpenAccessToken(process, access | TokenAccessLevels.Query))
+ {
+ if (hToken == null)
+ continue;
+
+ if (!sid.Equals(GetTokenUser(hToken)))
+ continue;
+
+ yield return hToken;
+ }
+ }
+ }
+
+ public static void ImpersonateToken(SafeNativeHandle hToken)
+ {
+ if (!NativeMethods.ImpersonateLoggedOnUser(hToken))
+ throw new Win32Exception("Failed to impersonate token");
+ }
+
+ public static SafeNativeHandle LogonUser(string username, string domain, string password, LogonType logonType,
+ LogonProvider logonProvider)
+ {
+ SafeNativeHandle hToken;
+ if (!NativeMethods.LogonUserW(username, domain, password, logonType, logonProvider, out hToken))
+ throw new Win32Exception(String.Format("Failed to logon {0}",
+ String.IsNullOrEmpty(domain) ? username : domain + "\\" + username));
+
+ return hToken;
+ }
+
+ public static SafeNativeHandle OpenProcess()
+ {
+ return NativeMethods.GetCurrentProcess();
+ }
+
+ public static SafeNativeHandle OpenProcess(Int32 pid, ProcessAccessFlags access, bool inherit)
+ {
+ SafeNativeHandle hProcess = NativeMethods.OpenProcess(access, inherit, (UInt32)pid);
+ if (hProcess.IsInvalid)
+ throw new Win32Exception(String.Format("Failed to open process {0} with access {1}",
+ pid, access.ToString()));
+
+ return hProcess;
+ }
+
+ public static SafeNativeHandle OpenProcessToken(SafeNativeHandle hProcess, TokenAccessLevels access)
+ {
+ SafeNativeHandle hToken;
+ if (!NativeMethods.OpenProcessToken(hProcess, access, out hToken))
+ throw new Win32Exception(String.Format("Failed to open proces token with access {0}",
+ access.ToString()));
+
+ return hToken;
+ }
+
+ public static void RevertToSelf()
+ {
+ if (!NativeMethods.RevertToSelf())
+ throw new Win32Exception("Failed to revert thread impersonation");
+ }
+
+ internal static string GetPrivilegeName(Luid luid)
+ {
+ UInt32 nameLen = 0;
+ NativeMethods.LookupPrivilegeNameW(null, ref luid, null, ref nameLen);
+
+ StringBuilder name = new StringBuilder((int)(nameLen + 1));
+ if (!NativeMethods.LookupPrivilegeNameW(null, ref luid, name, ref nameLen))
+ throw new Win32Exception("LookupPrivilegeName() failed");
+
+ return name.ToString();
+ }
+
+ private static SafeMemoryBuffer GetTokenInformation(SafeNativeHandle hToken,
+ NativeHelpers.TokenInformationClass infoClass)
+ {
+ UInt32 tokenLength;
+ bool res = NativeMethods.GetTokenInformation(hToken, infoClass, new SafeMemoryBuffer(IntPtr.Zero), 0,
+ out tokenLength);
+ int errCode = Marshal.GetLastWin32Error();
+ if (!res && errCode != 24 && errCode != 122) // ERROR_INSUFFICIENT_BUFFER, ERROR_BAD_LENGTH
+ throw new Win32Exception(errCode, String.Format("GetTokenInformation({0}) failed to get buffer length",
+ infoClass.ToString()));
+
+ SafeMemoryBuffer tokenInfo = new SafeMemoryBuffer((int)tokenLength);
+ if (!NativeMethods.GetTokenInformation(hToken, infoClass, tokenInfo, tokenLength, out tokenLength))
+ throw new Win32Exception(String.Format("GetTokenInformation({0}) failed", infoClass.ToString()));
+
+ return tokenInfo;
+ }
+
+ private static void PtrToStructureArray<T>(T[] array, IntPtr ptr)
+ {
+ IntPtr ptrOffset = ptr;
+ for (int i = 0; i < array.Length; i++, ptrOffset = IntPtr.Add(ptrOffset, Marshal.SizeOf(typeof(T))))
+ array[i] = (T)Marshal.PtrToStructure(ptrOffset, typeof(T));
+ }
+
+ private static SafeNativeHandle TryOpenAccessToken(System.Diagnostics.Process process, TokenAccessLevels access)
+ {
+ try
+ {
+ using (SafeNativeHandle hProcess = OpenProcess(process.Id, ProcessAccessFlags.QueryInformation, false))
+ return OpenProcessToken(hProcess, access);
+ }
+ catch (Win32Exception)
+ {
+ return null;
+ }
+ }
+ }
+}
diff --git a/lib/ansible/module_utils/csharp/Ansible.Basic.cs b/lib/ansible/module_utils/csharp/Ansible.Basic.cs
new file mode 100644
index 00000000..51a543bc
--- /dev/null
+++ b/lib/ansible/module_utils/csharp/Ansible.Basic.cs
@@ -0,0 +1,1476 @@
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.Diagnostics;
+using System.IO;
+using System.Linq;
+using System.Management.Automation;
+using System.Management.Automation.Runspaces;
+using System.Reflection;
+using System.Runtime.InteropServices;
+using System.Security.AccessControl;
+using System.Security.Principal;
+#if CORECLR
+using Newtonsoft.Json;
+#else
+using System.Web.Script.Serialization;
+#endif
+
+// System.Diagnostics.EventLog.dll reference different versioned dlls that are
+// loaded in PSCore, ignore CS1702 so the code will ignore this warning
+//NoWarn -Name CS1702 -CLR Core
+
+//AssemblyReference -Type Newtonsoft.Json.JsonConvert -CLR Core
+//AssemblyReference -Type System.Diagnostics.EventLog -CLR Core
+//AssemblyReference -Type System.Security.AccessControl.NativeObjectSecurity -CLR Core
+//AssemblyReference -Type System.Security.AccessControl.DirectorySecurity -CLR Core
+//AssemblyReference -Type System.Security.Principal.IdentityReference -CLR Core
+
+//AssemblyReference -Name System.Web.Extensions.dll -CLR Framework
+
+namespace Ansible.Basic
+{
+ public class AnsibleModule
+ {
+ public delegate void ExitHandler(int rc);
+ public static ExitHandler Exit = new ExitHandler(ExitModule);
+
+ public delegate void WriteLineHandler(string line);
+ public static WriteLineHandler WriteLine = new WriteLineHandler(WriteLineModule);
+
+ public static bool _DebugArgSpec = false;
+
+ private static List<string> BOOLEANS_TRUE = new List<string>() { "y", "yes", "on", "1", "true", "t", "1.0" };
+ private static List<string> BOOLEANS_FALSE = new List<string>() { "n", "no", "off", "0", "false", "f", "0.0" };
+
+ private string remoteTmp = Path.GetTempPath();
+ private string tmpdir = null;
+ private HashSet<string> noLogValues = new HashSet<string>();
+ private List<string> optionsContext = new List<string>();
+ private List<string> warnings = new List<string>();
+ private List<Dictionary<string, string>> deprecations = new List<Dictionary<string, string>>();
+ private List<string> cleanupFiles = new List<string>();
+
+ private Dictionary<string, string> passVars = new Dictionary<string, string>()
+ {
+ // null values means no mapping, not used in Ansible.Basic.AnsibleModule
+ { "check_mode", "CheckMode" },
+ { "debug", "DebugMode" },
+ { "diff", "DiffMode" },
+ { "keep_remote_files", "KeepRemoteFiles" },
+ { "module_name", "ModuleName" },
+ { "no_log", "NoLog" },
+ { "remote_tmp", "remoteTmp" },
+ { "selinux_special_fs", null },
+ { "shell_executable", null },
+ { "socket", null },
+ { "string_conversion_action", null },
+ { "syslog_facility", null },
+ { "tmpdir", "tmpdir" },
+ { "verbosity", "Verbosity" },
+ { "version", "AnsibleVersion" },
+ };
+ private List<string> passBools = new List<string>() { "check_mode", "debug", "diff", "keep_remote_files", "no_log" };
+ private List<string> passInts = new List<string>() { "verbosity" };
+ private Dictionary<string, List<object>> specDefaults = new Dictionary<string, List<object>>()
+ {
+ // key - (default, type) - null is freeform
+ { "apply_defaults", new List<object>() { false, typeof(bool) } },
+ { "aliases", new List<object>() { typeof(List<string>), typeof(List<string>) } },
+ { "choices", new List<object>() { typeof(List<object>), typeof(List<object>) } },
+ { "default", new List<object>() { null, null } },
+ { "deprecated_aliases", new List<object>() { typeof(List<Hashtable>), typeof(List<Hashtable>) } },
+ { "elements", new List<object>() { null, null } },
+ { "mutually_exclusive", new List<object>() { typeof(List<List<string>>), typeof(List<object>) } },
+ { "no_log", new List<object>() { false, typeof(bool) } },
+ { "options", new List<object>() { typeof(Hashtable), typeof(Hashtable) } },
+ { "removed_in_version", new List<object>() { null, typeof(string) } },
+ { "removed_at_date", new List<object>() { null, typeof(DateTime) } },
+ { "removed_from_collection", new List<object>() { null, typeof(string) } },
+ { "required", new List<object>() { false, typeof(bool) } },
+ { "required_by", new List<object>() { typeof(Hashtable), typeof(Hashtable) } },
+ { "required_if", new List<object>() { typeof(List<List<object>>), typeof(List<object>) } },
+ { "required_one_of", new List<object>() { typeof(List<List<string>>), typeof(List<object>) } },
+ { "required_together", new List<object>() { typeof(List<List<string>>), typeof(List<object>) } },
+ { "supports_check_mode", new List<object>() { false, typeof(bool) } },
+ { "type", new List<object>() { "str", null } },
+ };
+ private Dictionary<string, Delegate> optionTypes = new Dictionary<string, Delegate>()
+ {
+ { "bool", new Func<object, bool>(ParseBool) },
+ { "dict", new Func<object, Dictionary<string, object>>(ParseDict) },
+ { "float", new Func<object, float>(ParseFloat) },
+ { "int", new Func<object, int>(ParseInt) },
+ { "json", new Func<object, string>(ParseJson) },
+ { "list", new Func<object, List<object>>(ParseList) },
+ { "path", new Func<object, string>(ParsePath) },
+ { "raw", new Func<object, object>(ParseRaw) },
+ { "sid", new Func<object, SecurityIdentifier>(ParseSid) },
+ { "str", new Func<object, string>(ParseStr) },
+ };
+
+ public Dictionary<string, object> Diff = new Dictionary<string, object>();
+ public IDictionary Params = null;
+ public Dictionary<string, object> Result = new Dictionary<string, object>() { { "changed", false } };
+
+ public bool CheckMode { get; private set; }
+ public bool DebugMode { get; private set; }
+ public bool DiffMode { get; private set; }
+ public bool KeepRemoteFiles { get; private set; }
+ public string ModuleName { get; private set; }
+ public bool NoLog { get; private set; }
+ public int Verbosity { get; private set; }
+ public string AnsibleVersion { get; private set; }
+
+ public string Tmpdir
+ {
+ get
+ {
+ if (tmpdir == null)
+ {
+ SecurityIdentifier user = WindowsIdentity.GetCurrent().User;
+ DirectorySecurity dirSecurity = new DirectorySecurity();
+ dirSecurity.SetOwner(user);
+ dirSecurity.SetAccessRuleProtection(true, false); // disable inheritance rules
+ FileSystemAccessRule ace = new FileSystemAccessRule(user, FileSystemRights.FullControl,
+ InheritanceFlags.ContainerInherit | InheritanceFlags.ObjectInherit,
+ PropagationFlags.None, AccessControlType.Allow);
+ dirSecurity.AddAccessRule(ace);
+
+ string baseDir = Path.GetFullPath(Environment.ExpandEnvironmentVariables(remoteTmp));
+ if (!Directory.Exists(baseDir))
+ {
+ string failedMsg = null;
+ try
+ {
+#if CORECLR
+ DirectoryInfo createdDir = Directory.CreateDirectory(baseDir);
+ FileSystemAclExtensions.SetAccessControl(createdDir, dirSecurity);
+#else
+ Directory.CreateDirectory(baseDir, dirSecurity);
+#endif
+ }
+ catch (Exception e)
+ {
+ failedMsg = String.Format("Failed to create base tmpdir '{0}': {1}", baseDir, e.Message);
+ }
+
+ if (failedMsg != null)
+ {
+ string envTmp = Path.GetTempPath();
+ Warn(String.Format("Unable to use '{0}' as temporary directory, falling back to system tmp '{1}': {2}", baseDir, envTmp, failedMsg));
+ baseDir = envTmp;
+ }
+ else
+ {
+ NTAccount currentUser = (NTAccount)user.Translate(typeof(NTAccount));
+ string warnMsg = String.Format("Module remote_tmp {0} did not exist and was created with FullControl to {1}, ", baseDir, currentUser.ToString());
+ warnMsg += "this may cause issues when running as another user. To avoid this, create the remote_tmp dir with the correct permissions manually";
+ Warn(warnMsg);
+ }
+ }
+
+ string dateTime = DateTime.Now.ToFileTime().ToString();
+ string dirName = String.Format("ansible-moduletmp-{0}-{1}", dateTime, new Random().Next(0, int.MaxValue));
+ string newTmpdir = Path.Combine(baseDir, dirName);
+#if CORECLR
+ DirectoryInfo tmpdirInfo = Directory.CreateDirectory(newTmpdir);
+ FileSystemAclExtensions.SetAccessControl(tmpdirInfo, dirSecurity);
+#else
+ Directory.CreateDirectory(newTmpdir, dirSecurity);
+#endif
+ tmpdir = newTmpdir;
+
+ if (!KeepRemoteFiles)
+ cleanupFiles.Add(tmpdir);
+ }
+ return tmpdir;
+ }
+ }
+
+ public AnsibleModule(string[] args, IDictionary argumentSpec, IDictionary[] fragments = null)
+ {
+ // NoLog is not set yet, we cannot rely on FailJson to sanitize the output
+ // Do the minimum amount to get this running before we actually parse the params
+ Dictionary<string, string> aliases = new Dictionary<string, string>();
+ try
+ {
+ ValidateArgumentSpec(argumentSpec);
+
+ // Merge the fragments if present into the main arg spec.
+ if (fragments != null)
+ {
+ foreach (IDictionary fragment in fragments)
+ {
+ ValidateArgumentSpec(fragment);
+ MergeFragmentSpec(argumentSpec, fragment);
+ }
+ }
+
+ // Used by ansible-test to retrieve the module argument spec, not designed for public use.
+ if (_DebugArgSpec)
+ {
+ // Cannot call exit here because it will be caught with the catch (Exception e) below. Instead
+ // just throw a new exception with a specific message and the exception block will handle it.
+ ScriptBlock.Create("Set-Variable -Name ansibleTestArgSpec -Value $args[0] -Scope Global"
+ ).Invoke(argumentSpec);
+ throw new Exception("ansible-test validate-modules check");
+ }
+
+ // Now make sure all the metadata keys are set to their defaults, this must be done after we've
+ // potentially output the arg spec for ansible-test.
+ SetArgumentSpecDefaults(argumentSpec);
+
+ Params = GetParams(args);
+ aliases = GetAliases(argumentSpec, Params);
+ SetNoLogValues(argumentSpec, Params);
+ }
+ catch (Exception e)
+ {
+ if (e.Message == "ansible-test validate-modules check")
+ Exit(0);
+
+ Dictionary<string, object> result = new Dictionary<string, object>
+ {
+ { "failed", true },
+ { "msg", String.Format("internal error: {0}", e.Message) },
+ { "exception", e.ToString() }
+ };
+ WriteLine(ToJson(result));
+ Exit(1);
+ }
+
+ // Initialise public properties to the defaults before we parse the actual inputs
+ CheckMode = false;
+ DebugMode = false;
+ DiffMode = false;
+ KeepRemoteFiles = false;
+ ModuleName = "undefined win module";
+ NoLog = (bool)argumentSpec["no_log"];
+ Verbosity = 0;
+ AppDomain.CurrentDomain.ProcessExit += CleanupFiles;
+
+ List<string> legalInputs = passVars.Keys.Select(v => "_ansible_" + v).ToList();
+ legalInputs.AddRange(((IDictionary)argumentSpec["options"]).Keys.Cast<string>().ToList());
+ legalInputs.AddRange(aliases.Keys.Cast<string>().ToList());
+ CheckArguments(argumentSpec, Params, legalInputs);
+
+ // Set a Ansible friendly invocation value in the result object
+ Dictionary<string, object> invocation = new Dictionary<string, object>() { { "module_args", Params } };
+ Result["invocation"] = RemoveNoLogValues(invocation, noLogValues);
+
+ if (!NoLog)
+ LogEvent(String.Format("Invoked with:\r\n {0}", FormatLogData(Params, 2)), sanitise: false);
+ }
+
+ public static AnsibleModule Create(string[] args, IDictionary argumentSpec, IDictionary[] fragments = null)
+ {
+ return new AnsibleModule(args, argumentSpec, fragments);
+ }
+
+ public void Debug(string message)
+ {
+ if (DebugMode)
+ LogEvent(String.Format("[DEBUG] {0}", message));
+ }
+
+ public void Deprecate(string message, string version)
+ {
+ Deprecate(message, version, null);
+ }
+
+ public void Deprecate(string message, string version, string collectionName)
+ {
+ deprecations.Add(new Dictionary<string, string>() {
+ { "msg", message }, { "version", version }, { "collection_name", collectionName } });
+ LogEvent(String.Format("[DEPRECATION WARNING] {0} {1}", message, version));
+ }
+
+ public void Deprecate(string message, DateTime date)
+ {
+ Deprecate(message, date, null);
+ }
+
+ public void Deprecate(string message, DateTime date, string collectionName)
+ {
+ string isoDate = date.ToString("yyyy-MM-dd");
+ deprecations.Add(new Dictionary<string, string>() {
+ { "msg", message }, { "date", isoDate }, { "collection_name", collectionName } });
+ LogEvent(String.Format("[DEPRECATION WARNING] {0} {1}", message, isoDate));
+ }
+
+ public void ExitJson()
+ {
+ WriteLine(GetFormattedResults(Result));
+ CleanupFiles(null, null);
+ Exit(0);
+ }
+
+ public void FailJson(string message) { FailJson(message, null, null); }
+ public void FailJson(string message, ErrorRecord psErrorRecord) { FailJson(message, psErrorRecord, null); }
+ public void FailJson(string message, Exception exception) { FailJson(message, null, exception); }
+ private void FailJson(string message, ErrorRecord psErrorRecord, Exception exception)
+ {
+ Result["failed"] = true;
+ Result["msg"] = RemoveNoLogValues(message, noLogValues);
+
+
+ if (!Result.ContainsKey("exception") && (Verbosity > 2 || DebugMode))
+ {
+ if (psErrorRecord != null)
+ {
+ string traceback = String.Format("{0}\r\n{1}", psErrorRecord.ToString(), psErrorRecord.InvocationInfo.PositionMessage);
+ traceback += String.Format("\r\n + CategoryInfo : {0}", psErrorRecord.CategoryInfo.ToString());
+ traceback += String.Format("\r\n + FullyQualifiedErrorId : {0}", psErrorRecord.FullyQualifiedErrorId.ToString());
+ traceback += String.Format("\r\n\r\nScriptStackTrace:\r\n{0}", psErrorRecord.ScriptStackTrace);
+ Result["exception"] = traceback;
+ }
+ else if (exception != null)
+ Result["exception"] = exception.ToString();
+ }
+
+ WriteLine(GetFormattedResults(Result));
+ CleanupFiles(null, null);
+ Exit(1);
+ }
+
+ public void LogEvent(string message, EventLogEntryType logEntryType = EventLogEntryType.Information, bool sanitise = true)
+ {
+ if (NoLog)
+ return;
+
+ string logSource = "Ansible";
+ bool logSourceExists = false;
+ try
+ {
+ logSourceExists = EventLog.SourceExists(logSource);
+ }
+ catch (System.Security.SecurityException) { } // non admin users may not have permission
+
+ if (!logSourceExists)
+ {
+ try
+ {
+ EventLog.CreateEventSource(logSource, "Application");
+ }
+ catch (System.Security.SecurityException)
+ {
+ // Cannot call Warn as that calls LogEvent and we get stuck in a loop
+ warnings.Add(String.Format("Access error when creating EventLog source {0}, logging to the Application source instead", logSource));
+ logSource = "Application";
+ }
+ }
+ if (sanitise)
+ message = (string)RemoveNoLogValues(message, noLogValues);
+ message = String.Format("{0} - {1}", ModuleName, message);
+
+ using (EventLog eventLog = new EventLog("Application"))
+ {
+ eventLog.Source = logSource;
+ try
+ {
+ eventLog.WriteEntry(message, logEntryType, 0);
+ }
+ catch (System.InvalidOperationException) { } // Ignore permission errors on the Application event log
+ catch (System.Exception e)
+ {
+ // Cannot call Warn as that calls LogEvent and we get stuck in a loop
+ warnings.Add(String.Format("Unknown error when creating event log entry: {0}", e.Message));
+ }
+ }
+ }
+
+ public void Warn(string message)
+ {
+ warnings.Add(message);
+ LogEvent(String.Format("[WARNING] {0}", message), EventLogEntryType.Warning);
+ }
+
+ public static object FromJson(string json) { return FromJson<object>(json); }
+ public static T FromJson<T>(string json)
+ {
+#if CORECLR
+ return JsonConvert.DeserializeObject<T>(json);
+#else
+ JavaScriptSerializer jss = new JavaScriptSerializer();
+ jss.MaxJsonLength = int.MaxValue;
+ jss.RecursionLimit = int.MaxValue;
+ return jss.Deserialize<T>(json);
+#endif
+ }
+
+ public static string ToJson(object obj)
+ {
+ // Using PowerShell to serialize the JSON is preferable over the native .NET libraries as it handles
+ // PS Objects a lot better than the alternatives. In case we are debugging in Visual Studio we have a
+ // fallback to the other libraries as we won't be dealing with PowerShell objects there.
+ if (Runspace.DefaultRunspace != null)
+ {
+ PSObject rawOut = ScriptBlock.Create("ConvertTo-Json -InputObject $args[0] -Depth 99 -Compress").Invoke(obj)[0];
+ return rawOut.BaseObject as string;
+ }
+ else
+ {
+#if CORECLR
+ return JsonConvert.SerializeObject(obj);
+#else
+ JavaScriptSerializer jss = new JavaScriptSerializer();
+ jss.MaxJsonLength = int.MaxValue;
+ jss.RecursionLimit = int.MaxValue;
+ return jss.Serialize(obj);
+#endif
+ }
+ }
+
+ public static IDictionary GetParams(string[] args)
+ {
+ if (args.Length > 0)
+ {
+ string inputJson = File.ReadAllText(args[0]);
+ Dictionary<string, object> rawParams = FromJson<Dictionary<string, object>>(inputJson);
+ if (!rawParams.ContainsKey("ANSIBLE_MODULE_ARGS"))
+ throw new ArgumentException("Module was unable to get ANSIBLE_MODULE_ARGS value from the argument path json");
+ return (IDictionary)rawParams["ANSIBLE_MODULE_ARGS"];
+ }
+ else
+ {
+ // $complex_args is already a Hashtable, no need to waste time converting to a dictionary
+ PSObject rawArgs = ScriptBlock.Create("$complex_args").Invoke()[0];
+ return rawArgs.BaseObject as Hashtable;
+ }
+ }
+
+ public static bool ParseBool(object value)
+ {
+ if (value.GetType() == typeof(bool))
+ return (bool)value;
+
+ List<string> booleans = new List<string>();
+ booleans.AddRange(BOOLEANS_TRUE);
+ booleans.AddRange(BOOLEANS_FALSE);
+
+ string stringValue = ParseStr(value).ToLowerInvariant().Trim();
+ if (BOOLEANS_TRUE.Contains(stringValue))
+ return true;
+ else if (BOOLEANS_FALSE.Contains(stringValue))
+ return false;
+
+ string msg = String.Format("The value '{0}' is not a valid boolean. Valid booleans include: {1}",
+ stringValue, String.Join(", ", booleans));
+ throw new ArgumentException(msg);
+ }
+
+ public static Dictionary<string, object> ParseDict(object value)
+ {
+ Type valueType = value.GetType();
+ if (valueType == typeof(Dictionary<string, object>))
+ return (Dictionary<string, object>)value;
+ else if (value is IDictionary)
+ return ((IDictionary)value).Cast<DictionaryEntry>().ToDictionary(kvp => (string)kvp.Key, kvp => kvp.Value);
+ else if (valueType == typeof(string))
+ {
+ string stringValue = (string)value;
+ if (stringValue.StartsWith("{") && stringValue.EndsWith("}"))
+ return FromJson<Dictionary<string, object>>((string)value);
+ else if (stringValue.IndexOfAny(new char[1] { '=' }) != -1)
+ {
+ List<string> fields = new List<string>();
+ List<char> fieldBuffer = new List<char>();
+ char? inQuote = null;
+ bool inEscape = false;
+ string field;
+
+ foreach (char c in stringValue.ToCharArray())
+ {
+ if (inEscape)
+ {
+ fieldBuffer.Add(c);
+ inEscape = false;
+ }
+ else if (c == '\\')
+ inEscape = true;
+ else if (inQuote == null && (c == '\'' || c == '"'))
+ inQuote = c;
+ else if (inQuote != null && c == inQuote)
+ inQuote = null;
+ else if (inQuote == null && (c == ',' || c == ' '))
+ {
+ field = String.Join("", fieldBuffer);
+ if (field != "")
+ fields.Add(field);
+ fieldBuffer = new List<char>();
+ }
+ else
+ fieldBuffer.Add(c);
+ }
+
+ field = String.Join("", fieldBuffer);
+ if (field != "")
+ fields.Add(field);
+
+ return fields.Distinct().Select(i => i.Split(new[] { '=' }, 2)).ToDictionary(i => i[0], i => i.Length > 1 ? (object)i[1] : null);
+ }
+ else
+ throw new ArgumentException("string cannot be converted to a dict, must either be a JSON string or in the key=value form");
+ }
+
+ throw new ArgumentException(String.Format("{0} cannot be converted to a dict", valueType.FullName));
+ }
+
+ public static float ParseFloat(object value)
+ {
+ if (value.GetType() == typeof(float))
+ return (float)value;
+
+ string valueStr = ParseStr(value);
+ return float.Parse(valueStr);
+ }
+
+ public static int ParseInt(object value)
+ {
+ Type valueType = value.GetType();
+ if (valueType == typeof(int))
+ return (int)value;
+ else
+ return Int32.Parse(ParseStr(value));
+ }
+
+ public static string ParseJson(object value)
+ {
+ // mostly used to ensure a dict is a json string as it may
+ // have been converted on the controller side
+ Type valueType = value.GetType();
+ if (value is IDictionary)
+ return ToJson(value);
+ else if (valueType == typeof(string))
+ return (string)value;
+ else
+ throw new ArgumentException(String.Format("{0} cannot be converted to json", valueType.FullName));
+ }
+
+ public static List<object> ParseList(object value)
+ {
+ if (value == null)
+ return null;
+
+ Type valueType = value.GetType();
+ if (valueType.IsGenericType && valueType.GetGenericTypeDefinition() == typeof(List<>))
+ return (List<object>)value;
+ else if (valueType == typeof(ArrayList))
+ return ((ArrayList)value).Cast<object>().ToList();
+ else if (valueType.IsArray)
+ return ((object[])value).ToList();
+ else if (valueType == typeof(string))
+ return ((string)value).Split(',').Select(s => s.Trim()).ToList<object>();
+ else if (valueType == typeof(int))
+ return new List<object>() { value };
+ else
+ throw new ArgumentException(String.Format("{0} cannot be converted to a list", valueType.FullName));
+ }
+
+ public static string ParsePath(object value)
+ {
+ string stringValue = ParseStr(value);
+
+ // do not validate, expand the env vars if it starts with \\?\ as
+ // it is a special path designed for the NT kernel to interpret
+ if (stringValue.StartsWith(@"\\?\"))
+ return stringValue;
+
+ stringValue = Environment.ExpandEnvironmentVariables(stringValue);
+ if (stringValue.IndexOfAny(Path.GetInvalidPathChars()) != -1)
+ throw new ArgumentException("string value contains invalid path characters, cannot convert to path");
+
+ // will fire an exception if it contains any invalid chars
+ Path.GetFullPath(stringValue);
+ return stringValue;
+ }
+
+ public static object ParseRaw(object value) { return value; }
+
+ public static SecurityIdentifier ParseSid(object value)
+ {
+ string stringValue = ParseStr(value);
+
+ try
+ {
+ return new SecurityIdentifier(stringValue);
+ }
+ catch (ArgumentException) { } // ignore failures string may not have been a SID
+
+ NTAccount account = new NTAccount(stringValue);
+ return (SecurityIdentifier)account.Translate(typeof(SecurityIdentifier));
+ }
+
+ public static string ParseStr(object value) { return value.ToString(); }
+
+ private void ValidateArgumentSpec(IDictionary argumentSpec)
+ {
+ Dictionary<string, object> changedValues = new Dictionary<string, object>();
+ foreach (DictionaryEntry entry in argumentSpec)
+ {
+ string key = (string)entry.Key;
+
+ // validate the key is a valid argument spec key
+ if (!specDefaults.ContainsKey(key))
+ {
+ string msg = String.Format("argument spec entry contains an invalid key '{0}', valid keys: {1}",
+ key, String.Join(", ", specDefaults.Keys));
+ throw new ArgumentException(FormatOptionsContext(msg, " - "));
+ }
+
+ // ensure the value is casted to the type we expect
+ Type optionType = null;
+ if (entry.Value != null)
+ optionType = (Type)specDefaults[key][1];
+ if (optionType != null)
+ {
+ Type actualType = entry.Value.GetType();
+ bool invalid = false;
+ if (optionType.IsGenericType && optionType.GetGenericTypeDefinition() == typeof(List<>))
+ {
+ // verify the actual type is not just a single value of the list type
+ Type entryType = optionType.GetGenericArguments()[0];
+ object[] arrayElementTypes = new object[]
+ {
+ null, // ArrayList does not have an ElementType
+ entryType,
+ typeof(object), // Hope the object is actually entryType or it can at least be casted.
+ };
+
+ bool isArray = entry.Value is IList && arrayElementTypes.Contains(actualType.GetElementType());
+ if (actualType == entryType || isArray)
+ {
+ object rawArray;
+ if (isArray)
+ rawArray = entry.Value;
+ else
+ rawArray = new object[1] { entry.Value };
+
+ MethodInfo castMethod = typeof(Enumerable).GetMethod("Cast").MakeGenericMethod(entryType);
+ MethodInfo toListMethod = typeof(Enumerable).GetMethod("ToList").MakeGenericMethod(entryType);
+
+ var enumerable = castMethod.Invoke(null, new object[1] { rawArray });
+ var newList = toListMethod.Invoke(null, new object[1] { enumerable });
+ changedValues.Add(key, newList);
+ }
+ else if (actualType != optionType && !(actualType == typeof(List<object>)))
+ invalid = true;
+ }
+ else
+ invalid = actualType != optionType;
+
+ if (invalid)
+ {
+ string msg = String.Format("argument spec for '{0}' did not match expected type {1}: actual type {2}",
+ key, optionType.FullName, actualType.FullName);
+ throw new ArgumentException(FormatOptionsContext(msg, " - "));
+ }
+ }
+
+ // recursively validate the spec
+ if (key == "options" && entry.Value != null)
+ {
+ IDictionary optionsSpec = (IDictionary)entry.Value;
+ foreach (DictionaryEntry optionEntry in optionsSpec)
+ {
+ optionsContext.Add((string)optionEntry.Key);
+ IDictionary optionMeta = (IDictionary)optionEntry.Value;
+ ValidateArgumentSpec(optionMeta);
+ optionsContext.RemoveAt(optionsContext.Count - 1);
+ }
+ }
+
+ // validate the type and elements key type values are known types
+ if (key == "type" || key == "elements" && entry.Value != null)
+ {
+ Type valueType = entry.Value.GetType();
+ if (valueType == typeof(string))
+ {
+ string typeValue = (string)entry.Value;
+ if (!optionTypes.ContainsKey(typeValue))
+ {
+ string msg = String.Format("{0} '{1}' is unsupported", key, typeValue);
+ msg = String.Format("{0}. Valid types are: {1}", FormatOptionsContext(msg, " - "), String.Join(", ", optionTypes.Keys));
+ throw new ArgumentException(msg);
+ }
+ }
+ else if (!(entry.Value is Delegate))
+ {
+ string msg = String.Format("{0} must either be a string or delegate, was: {1}", key, valueType.FullName);
+ throw new ArgumentException(FormatOptionsContext(msg, " - "));
+ }
+ }
+ }
+
+ // Outside of the spec iterator, change the values that were casted above
+ foreach (KeyValuePair<string, object> changedValue in changedValues)
+ argumentSpec[changedValue.Key] = changedValue.Value;
+ }
+
+ private void MergeFragmentSpec(IDictionary argumentSpec, IDictionary fragment)
+ {
+ foreach (DictionaryEntry fragmentEntry in fragment)
+ {
+ string fragmentKey = fragmentEntry.Key.ToString();
+
+ if (argumentSpec.Contains(fragmentKey))
+ {
+ // We only want to add new list entries and merge dictionary new keys and values. Leave the other
+ // values as is in the argument spec as that takes priority over the fragment.
+ if (fragmentEntry.Value is IDictionary)
+ {
+ MergeFragmentSpec((IDictionary)argumentSpec[fragmentKey], (IDictionary)fragmentEntry.Value);
+ }
+ else if (fragmentEntry.Value is IList)
+ {
+ IList specValue = (IList)argumentSpec[fragmentKey];
+ foreach (object fragmentValue in (IList)fragmentEntry.Value)
+ specValue.Add(fragmentValue);
+ }
+ }
+ else
+ argumentSpec[fragmentKey] = fragmentEntry.Value;
+ }
+ }
+
+ private void SetArgumentSpecDefaults(IDictionary argumentSpec)
+ {
+ foreach (KeyValuePair<string, List<object>> metadataEntry in specDefaults)
+ {
+ List<object> defaults = metadataEntry.Value;
+ object defaultValue = defaults[0];
+ if (defaultValue != null && defaultValue.GetType() == typeof(Type).GetType())
+ defaultValue = Activator.CreateInstance((Type)defaultValue);
+
+ if (!argumentSpec.Contains(metadataEntry.Key))
+ argumentSpec[metadataEntry.Key] = defaultValue;
+ }
+
+ // Recursively set the defaults for any inner options.
+ foreach (DictionaryEntry entry in argumentSpec)
+ {
+ if (entry.Value == null || entry.Key.ToString() != "options")
+ continue;
+
+ IDictionary optionsSpec = (IDictionary)entry.Value;
+ foreach (DictionaryEntry optionEntry in optionsSpec)
+ {
+ optionsContext.Add((string)optionEntry.Key);
+ IDictionary optionMeta = (IDictionary)optionEntry.Value;
+ SetArgumentSpecDefaults(optionMeta);
+ optionsContext.RemoveAt(optionsContext.Count - 1);
+ }
+ }
+ }
+
+ private Dictionary<string, string> GetAliases(IDictionary argumentSpec, IDictionary parameters)
+ {
+ Dictionary<string, string> aliasResults = new Dictionary<string, string>();
+
+ foreach (DictionaryEntry entry in (IDictionary)argumentSpec["options"])
+ {
+ string k = (string)entry.Key;
+ Hashtable v = (Hashtable)entry.Value;
+
+ List<string> aliases = (List<string>)v["aliases"];
+ object defaultValue = v["default"];
+ bool required = (bool)v["required"];
+
+ if (defaultValue != null && required)
+ throw new ArgumentException(String.Format("required and default are mutually exclusive for {0}", k));
+
+ foreach (string alias in aliases)
+ {
+ aliasResults.Add(alias, k);
+ if (parameters.Contains(alias))
+ parameters[k] = parameters[alias];
+ }
+
+ List<Hashtable> deprecatedAliases = (List<Hashtable>)v["deprecated_aliases"];
+ foreach (Hashtable depInfo in deprecatedAliases)
+ {
+ foreach (string keyName in new List<string> { "name" })
+ {
+ if (!depInfo.ContainsKey(keyName))
+ {
+ string msg = String.Format("{0} is required in a deprecated_aliases entry", keyName);
+ throw new ArgumentException(FormatOptionsContext(msg, " - "));
+ }
+ }
+ if (!depInfo.ContainsKey("version") && !depInfo.ContainsKey("date"))
+ {
+ string msg = "One of version or date is required in a deprecated_aliases entry";
+ throw new ArgumentException(FormatOptionsContext(msg, " - "));
+ }
+ if (depInfo.ContainsKey("version") && depInfo.ContainsKey("date"))
+ {
+ string msg = "Only one of version or date is allowed in a deprecated_aliases entry";
+ throw new ArgumentException(FormatOptionsContext(msg, " - "));
+ }
+ if (depInfo.ContainsKey("date") && depInfo["date"].GetType() != typeof(DateTime))
+ {
+ string msg = "A deprecated_aliases date must be a DateTime object";
+ throw new ArgumentException(FormatOptionsContext(msg, " - "));
+ }
+ string collectionName = null;
+ if (depInfo.ContainsKey("collection_name"))
+ {
+ collectionName = (string)depInfo["collection_name"];
+ }
+ string aliasName = (string)depInfo["name"];
+
+ if (parameters.Contains(aliasName))
+ {
+ string msg = String.Format("Alias '{0}' is deprecated. See the module docs for more information", aliasName);
+ if (depInfo.ContainsKey("version"))
+ {
+ string depVersion = (string)depInfo["version"];
+ Deprecate(FormatOptionsContext(msg, " - "), depVersion, collectionName);
+ }
+ if (depInfo.ContainsKey("date"))
+ {
+ DateTime depDate = (DateTime)depInfo["date"];
+ Deprecate(FormatOptionsContext(msg, " - "), depDate, collectionName);
+ }
+ }
+ }
+ }
+
+ return aliasResults;
+ }
+
+ private void SetNoLogValues(IDictionary argumentSpec, IDictionary parameters)
+ {
+ foreach (DictionaryEntry entry in (IDictionary)argumentSpec["options"])
+ {
+ string k = (string)entry.Key;
+ Hashtable v = (Hashtable)entry.Value;
+
+ if ((bool)v["no_log"])
+ {
+ object noLogObject = parameters.Contains(k) ? parameters[k] : null;
+ string noLogString = noLogObject == null ? "" : noLogObject.ToString();
+ if (!String.IsNullOrEmpty(noLogString))
+ noLogValues.Add(noLogString);
+ }
+ string collectionName = null;
+ if (v.ContainsKey("removed_from_collection"))
+ {
+ collectionName = (string)v["removed_from_collection"];
+ }
+
+ object removedInVersion = v["removed_in_version"];
+ if (removedInVersion != null && parameters.Contains(k))
+ Deprecate(String.Format("Param '{0}' is deprecated. See the module docs for more information", k),
+ removedInVersion.ToString(), collectionName);
+
+ object removedAtDate = v["removed_at_date"];
+ if (removedAtDate != null && parameters.Contains(k))
+ Deprecate(String.Format("Param '{0}' is deprecated. See the module docs for more information", k),
+ (DateTime)removedAtDate, collectionName);
+ }
+ }
+
+ private void CheckArguments(IDictionary spec, IDictionary param, List<string> legalInputs)
+ {
+ // initially parse the params and check for unsupported ones and set internal vars
+ CheckUnsupportedArguments(param, legalInputs);
+
+ // Only run this check if we are at the root argument (optionsContext.Count == 0)
+ if (CheckMode && !(bool)spec["supports_check_mode"] && optionsContext.Count == 0)
+ {
+ Result["skipped"] = true;
+ Result["msg"] = String.Format("remote module ({0}) does not support check mode", ModuleName);
+ ExitJson();
+ }
+ IDictionary optionSpec = (IDictionary)spec["options"];
+
+ CheckMutuallyExclusive(param, (IList)spec["mutually_exclusive"]);
+ CheckRequiredArguments(optionSpec, param);
+
+ // set the parameter types based on the type spec value
+ foreach (DictionaryEntry entry in optionSpec)
+ {
+ string k = (string)entry.Key;
+ Hashtable v = (Hashtable)entry.Value;
+
+ object value = param.Contains(k) ? param[k] : null;
+ if (value != null)
+ {
+ // convert the current value to the wanted type
+ Delegate typeConverter;
+ string type;
+ if (v["type"].GetType() == typeof(string))
+ {
+ type = (string)v["type"];
+ typeConverter = optionTypes[type];
+ }
+ else
+ {
+ type = "delegate";
+ typeConverter = (Delegate)v["type"];
+ }
+
+ try
+ {
+ value = typeConverter.DynamicInvoke(value);
+ param[k] = value;
+ }
+ catch (Exception e)
+ {
+ string msg = String.Format("argument for {0} is of type {1} and we were unable to convert to {2}: {3}",
+ k, value.GetType(), type, e.InnerException.Message);
+ FailJson(FormatOptionsContext(msg));
+ }
+
+ // ensure it matches the choices if there are choices set
+ List<string> choices = ((List<object>)v["choices"]).Select(x => x.ToString()).Cast<string>().ToList();
+ if (choices.Count > 0)
+ {
+ List<string> values;
+ string choiceMsg;
+ if (type == "list")
+ {
+ values = ((List<object>)value).Select(x => x.ToString()).Cast<string>().ToList();
+ choiceMsg = "one or more of";
+ }
+ else
+ {
+ values = new List<string>() { value.ToString() };
+ choiceMsg = "one of";
+ }
+
+ List<string> diffList = values.Except(choices, StringComparer.OrdinalIgnoreCase).ToList();
+ List<string> caseDiffList = values.Except(choices).ToList();
+ if (diffList.Count > 0)
+ {
+ string msg = String.Format("value of {0} must be {1}: {2}. Got no match for: {3}",
+ k, choiceMsg, String.Join(", ", choices), String.Join(", ", diffList));
+ FailJson(FormatOptionsContext(msg));
+ }
+ /*
+ For now we will just silently accept case insensitive choices, uncomment this if we want to add it back in
+ else if (caseDiffList.Count > 0)
+ {
+ // For backwards compatibility with Legacy.psm1 we need to be matching choices that are not case sensitive.
+ // We will warn the user it was case insensitive and tell them this will become case sensitive in the future.
+ string msg = String.Format(
+ "value of {0} was a case insensitive match of {1}: {2}. Checking of choices will be case sensitive in a future Ansible release. Case insensitive matches were: {3}",
+ k, choiceMsg, String.Join(", ", choices), String.Join(", ", caseDiffList.Select(x => RemoveNoLogValues(x, noLogValues)))
+ );
+ Warn(FormatOptionsContext(msg));
+ }*/
+ }
+ }
+ }
+
+ CheckRequiredTogether(param, (IList)spec["required_together"]);
+ CheckRequiredOneOf(param, (IList)spec["required_one_of"]);
+ CheckRequiredIf(param, (IList)spec["required_if"]);
+ CheckRequiredBy(param, (IDictionary)spec["required_by"]);
+
+ // finally ensure all missing parameters are set to null and handle sub options
+ foreach (DictionaryEntry entry in optionSpec)
+ {
+ string k = (string)entry.Key;
+ IDictionary v = (IDictionary)entry.Value;
+
+ if (!param.Contains(k))
+ param[k] = null;
+
+ CheckSubOption(param, k, v);
+ }
+ }
+
+ private void CheckUnsupportedArguments(IDictionary param, List<string> legalInputs)
+ {
+ HashSet<string> unsupportedParameters = new HashSet<string>();
+ HashSet<string> caseUnsupportedParameters = new HashSet<string>();
+ List<string> removedParameters = new List<string>();
+
+ foreach (DictionaryEntry entry in param)
+ {
+ string paramKey = (string)entry.Key;
+ if (!legalInputs.Contains(paramKey, StringComparer.OrdinalIgnoreCase))
+ unsupportedParameters.Add(paramKey);
+ else if (!legalInputs.Contains(paramKey))
+ // For backwards compatibility we do not care about the case but we need to warn the users as this will
+ // change in a future Ansible release.
+ caseUnsupportedParameters.Add(paramKey);
+ else if (paramKey.StartsWith("_ansible_"))
+ {
+ removedParameters.Add(paramKey);
+ string key = paramKey.Replace("_ansible_", "");
+ // skip setting NoLog if NoLog is already set to true (set by the module)
+ // or there's no mapping for this key
+ if ((key == "no_log" && NoLog == true) || (passVars[key] == null))
+ continue;
+
+ object value = entry.Value;
+ if (passBools.Contains(key))
+ value = ParseBool(value);
+ else if (passInts.Contains(key))
+ value = ParseInt(value);
+
+ string propertyName = passVars[key];
+ PropertyInfo property = typeof(AnsibleModule).GetProperty(propertyName);
+ FieldInfo field = typeof(AnsibleModule).GetField(propertyName, BindingFlags.NonPublic | BindingFlags.Instance);
+ if (property != null)
+ property.SetValue(this, value, null);
+ else if (field != null)
+ field.SetValue(this, value);
+ else
+ FailJson(String.Format("implementation error: unknown AnsibleModule property {0}", propertyName));
+ }
+ }
+ foreach (string parameter in removedParameters)
+ param.Remove(parameter);
+
+ if (unsupportedParameters.Count > 0)
+ {
+ legalInputs.RemoveAll(x => passVars.Keys.Contains(x.Replace("_ansible_", "")));
+ string msg = String.Format("Unsupported parameters for ({0}) module: {1}", ModuleName, String.Join(", ", unsupportedParameters));
+ msg = String.Format("{0}. Supported parameters include: {1}", FormatOptionsContext(msg), String.Join(", ", legalInputs));
+ FailJson(msg);
+ }
+
+ /*
+ // Uncomment when we want to start warning users around options that are not a case sensitive match to the spec
+ if (caseUnsupportedParameters.Count > 0)
+ {
+ legalInputs.RemoveAll(x => passVars.Keys.Contains(x.Replace("_ansible_", "")));
+ string msg = String.Format("Parameters for ({0}) was a case insensitive match: {1}", ModuleName, String.Join(", ", caseUnsupportedParameters));
+ msg = String.Format("{0}. Module options will become case sensitive in a future Ansible release. Supported parameters include: {1}",
+ FormatOptionsContext(msg), String.Join(", ", legalInputs));
+ Warn(msg);
+ }*/
+
+ // Make sure we convert all the incorrect case params to the ones set by the module spec
+ foreach (string key in caseUnsupportedParameters)
+ {
+ string correctKey = legalInputs[legalInputs.FindIndex(s => s.Equals(key, StringComparison.OrdinalIgnoreCase))];
+ object value = param[key];
+ param.Remove(key);
+ param.Add(correctKey, value);
+ }
+ }
+
+ private void CheckMutuallyExclusive(IDictionary param, IList mutuallyExclusive)
+ {
+ if (mutuallyExclusive == null)
+ return;
+
+ foreach (object check in mutuallyExclusive)
+ {
+ List<string> mutualCheck = ((IList)check).Cast<string>().ToList();
+ int count = 0;
+ foreach (string entry in mutualCheck)
+ if (param.Contains(entry))
+ count++;
+
+ if (count > 1)
+ {
+ string msg = String.Format("parameters are mutually exclusive: {0}", String.Join(", ", mutualCheck));
+ FailJson(FormatOptionsContext(msg));
+ }
+ }
+ }
+
+ private void CheckRequiredArguments(IDictionary spec, IDictionary param)
+ {
+ List<string> missing = new List<string>();
+ foreach (DictionaryEntry entry in spec)
+ {
+ string k = (string)entry.Key;
+ Hashtable v = (Hashtable)entry.Value;
+
+ // set defaults for values not already set
+ object defaultValue = v["default"];
+ if (defaultValue != null && !param.Contains(k))
+ param[k] = defaultValue;
+
+ // check required arguments
+ bool required = (bool)v["required"];
+ if (required && !param.Contains(k))
+ missing.Add(k);
+ }
+ if (missing.Count > 0)
+ {
+ string msg = String.Format("missing required arguments: {0}", String.Join(", ", missing));
+ FailJson(FormatOptionsContext(msg));
+ }
+ }
+
+ private void CheckRequiredTogether(IDictionary param, IList requiredTogether)
+ {
+ if (requiredTogether == null)
+ return;
+
+ foreach (object check in requiredTogether)
+ {
+ List<string> requiredCheck = ((IList)check).Cast<string>().ToList();
+ List<bool> found = new List<bool>();
+ foreach (string field in requiredCheck)
+ if (param.Contains(field))
+ found.Add(true);
+ else
+ found.Add(false);
+
+ if (found.Contains(true) && found.Contains(false))
+ {
+ string msg = String.Format("parameters are required together: {0}", String.Join(", ", requiredCheck));
+ FailJson(FormatOptionsContext(msg));
+ }
+ }
+ }
+
+ private void CheckRequiredOneOf(IDictionary param, IList requiredOneOf)
+ {
+ if (requiredOneOf == null)
+ return;
+
+ foreach (object check in requiredOneOf)
+ {
+ List<string> requiredCheck = ((IList)check).Cast<string>().ToList();
+ int count = 0;
+ foreach (string field in requiredCheck)
+ if (param.Contains(field))
+ count++;
+
+ if (count == 0)
+ {
+ string msg = String.Format("one of the following is required: {0}", String.Join(", ", requiredCheck));
+ FailJson(FormatOptionsContext(msg));
+ }
+ }
+ }
+
+ private void CheckRequiredIf(IDictionary param, IList requiredIf)
+ {
+ if (requiredIf == null)
+ return;
+
+ foreach (object check in requiredIf)
+ {
+ IList requiredCheck = (IList)check;
+ List<string> missing = new List<string>();
+ List<string> missingFields = new List<string>();
+ int maxMissingCount = 1;
+ bool oneRequired = false;
+
+ if (requiredCheck.Count < 3 && requiredCheck.Count < 4)
+ FailJson(String.Format("internal error: invalid required_if value count of {0}, expecting 3 or 4 entries", requiredCheck.Count));
+ else if (requiredCheck.Count == 4)
+ oneRequired = (bool)requiredCheck[3];
+
+ string key = (string)requiredCheck[0];
+ object val = requiredCheck[1];
+ IList requirements = (IList)requiredCheck[2];
+
+ if (ParseStr(param[key]) != ParseStr(val))
+ continue;
+
+ string term = "all";
+ if (oneRequired)
+ {
+ maxMissingCount = requirements.Count;
+ term = "any";
+ }
+
+ foreach (string required in requirements.Cast<string>())
+ if (!param.Contains(required))
+ missing.Add(required);
+
+ if (missing.Count >= maxMissingCount)
+ {
+ string msg = String.Format("{0} is {1} but {2} of the following are missing: {3}",
+ key, val.ToString(), term, String.Join(", ", missing));
+ FailJson(FormatOptionsContext(msg));
+ }
+ }
+ }
+
+ private void CheckRequiredBy(IDictionary param, IDictionary requiredBy)
+ {
+ foreach (DictionaryEntry entry in requiredBy)
+ {
+ string key = (string)entry.Key;
+ if (!param.Contains(key))
+ continue;
+
+ List<string> missing = new List<string>();
+ List<string> requires = ParseList(entry.Value).Cast<string>().ToList();
+ foreach (string required in requires)
+ if (!param.Contains(required))
+ missing.Add(required);
+
+ if (missing.Count > 0)
+ {
+ string msg = String.Format("missing parameter(s) required by '{0}': {1}", key, String.Join(", ", missing));
+ FailJson(FormatOptionsContext(msg));
+ }
+ }
+ }
+
+ private void CheckSubOption(IDictionary param, string key, IDictionary spec)
+ {
+ object value = param[key];
+
+ string type;
+ if (spec["type"].GetType() == typeof(string))
+ type = (string)spec["type"];
+ else
+ type = "delegate";
+
+ string elements = null;
+ Delegate typeConverter = null;
+ if (spec["elements"] != null && spec["elements"].GetType() == typeof(string))
+ {
+ elements = (string)spec["elements"];
+ typeConverter = optionTypes[elements];
+ }
+ else if (spec["elements"] != null)
+ {
+ elements = "delegate";
+ typeConverter = (Delegate)spec["elements"];
+ }
+
+ if (!(type == "dict" || (type == "list" && elements != null)))
+ // either not a dict, or list with the elements set, so continue
+ return;
+ else if (type == "list")
+ {
+ // cast each list element to the type specified
+ if (value == null)
+ return;
+
+ List<object> newValue = new List<object>();
+ foreach (object element in (List<object>)value)
+ {
+ if (elements == "dict")
+ newValue.Add(ParseSubSpec(spec, element, key));
+ else
+ {
+ try
+ {
+ object newElement = typeConverter.DynamicInvoke(element);
+ newValue.Add(newElement);
+ }
+ catch (Exception e)
+ {
+ string msg = String.Format("argument for list entry {0} is of type {1} and we were unable to convert to {2}: {3}",
+ key, element.GetType(), elements, e.Message);
+ FailJson(FormatOptionsContext(msg));
+ }
+ }
+ }
+
+ param[key] = newValue;
+ }
+ else
+ param[key] = ParseSubSpec(spec, value, key);
+ }
+
+ private object ParseSubSpec(IDictionary spec, object value, string context)
+ {
+ bool applyDefaults = (bool)spec["apply_defaults"];
+
+ // set entry to an empty dict if apply_defaults is set
+ IDictionary optionsSpec = (IDictionary)spec["options"];
+ if (applyDefaults && optionsSpec.Keys.Count > 0 && value == null)
+ value = new Dictionary<string, object>();
+ else if (optionsSpec.Keys.Count == 0 || value == null)
+ return value;
+
+ optionsContext.Add(context);
+ Dictionary<string, object> newValue = (Dictionary<string, object>)ParseDict(value);
+ Dictionary<string, string> aliases = GetAliases(spec, newValue);
+ SetNoLogValues(spec, newValue);
+
+ List<string> subLegalInputs = optionsSpec.Keys.Cast<string>().ToList();
+ subLegalInputs.AddRange(aliases.Keys.Cast<string>().ToList());
+
+ CheckArguments(spec, newValue, subLegalInputs);
+ optionsContext.RemoveAt(optionsContext.Count - 1);
+ return newValue;
+ }
+
+ private string GetFormattedResults(Dictionary<string, object> result)
+ {
+ if (!result.ContainsKey("invocation"))
+ result["invocation"] = new Dictionary<string, object>() { { "module_args", RemoveNoLogValues(Params, noLogValues) } };
+
+ if (warnings.Count > 0)
+ result["warnings"] = warnings;
+
+ if (deprecations.Count > 0)
+ result["deprecations"] = deprecations;
+
+ if (Diff.Count > 0 && DiffMode)
+ result["diff"] = Diff;
+
+ return ToJson(result);
+ }
+
+ private string FormatLogData(object data, int indentLevel)
+ {
+ if (data == null)
+ return "$null";
+
+ string msg = "";
+ if (data is IList)
+ {
+ string newMsg = "";
+ foreach (object value in (IList)data)
+ {
+ string entryValue = FormatLogData(value, indentLevel + 2);
+ newMsg += String.Format("\r\n{0}- {1}", new String(' ', indentLevel), entryValue);
+ }
+ msg += newMsg;
+ }
+ else if (data is IDictionary)
+ {
+ bool start = true;
+ foreach (DictionaryEntry entry in (IDictionary)data)
+ {
+ string newMsg = FormatLogData(entry.Value, indentLevel + 2);
+ if (!start)
+ msg += String.Format("\r\n{0}", new String(' ', indentLevel));
+ msg += String.Format("{0}: {1}", (string)entry.Key, newMsg);
+ start = false;
+ }
+ }
+ else
+ msg = (string)RemoveNoLogValues(ParseStr(data), noLogValues);
+
+ return msg;
+ }
+
+ private object RemoveNoLogValues(object value, HashSet<string> noLogStrings)
+ {
+ Queue<Tuple<object, object>> deferredRemovals = new Queue<Tuple<object, object>>();
+ object newValue = RemoveValueConditions(value, noLogStrings, deferredRemovals);
+
+ while (deferredRemovals.Count > 0)
+ {
+ Tuple<object, object> data = deferredRemovals.Dequeue();
+ object oldData = data.Item1;
+ object newData = data.Item2;
+
+ if (oldData is IDictionary)
+ {
+ foreach (DictionaryEntry entry in (IDictionary)oldData)
+ {
+ object newElement = RemoveValueConditions(entry.Value, noLogStrings, deferredRemovals);
+ ((IDictionary)newData).Add((string)entry.Key, newElement);
+ }
+ }
+ else
+ {
+ foreach (object element in (IList)oldData)
+ {
+ object newElement = RemoveValueConditions(element, noLogStrings, deferredRemovals);
+ ((IList)newData).Add(newElement);
+ }
+ }
+ }
+
+ return newValue;
+ }
+
+ private object RemoveValueConditions(object value, HashSet<string> noLogStrings, Queue<Tuple<object, object>> deferredRemovals)
+ {
+ if (value == null)
+ return value;
+
+ Type valueType = value.GetType();
+ HashSet<Type> numericTypes = new HashSet<Type>
+ {
+ typeof(byte), typeof(sbyte), typeof(short), typeof(ushort), typeof(int), typeof(uint),
+ typeof(long), typeof(ulong), typeof(decimal), typeof(double), typeof(float)
+ };
+
+ if (numericTypes.Contains(valueType) || valueType == typeof(bool))
+ {
+ string valueString = ParseStr(value);
+ if (noLogStrings.Contains(valueString))
+ return "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER";
+ foreach (string omitMe in noLogStrings)
+ if (valueString.Contains(omitMe))
+ return "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER";
+ }
+ else if (valueType == typeof(DateTime))
+ value = ((DateTime)value).ToString("o");
+ else if (value is IList)
+ {
+ List<object> newValue = new List<object>();
+ deferredRemovals.Enqueue(new Tuple<object, object>((IList)value, newValue));
+ value = newValue;
+ }
+ else if (value is IDictionary)
+ {
+ Hashtable newValue = new Hashtable();
+ deferredRemovals.Enqueue(new Tuple<object, object>((IDictionary)value, newValue));
+ value = newValue;
+ }
+ else
+ {
+ string stringValue = value.ToString();
+ if (noLogStrings.Contains(stringValue))
+ return "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER";
+ foreach (string omitMe in noLogStrings)
+ if (stringValue.Contains(omitMe))
+ return (stringValue).Replace(omitMe, "********");
+ value = stringValue;
+ }
+ return value;
+ }
+
+ private void CleanupFiles(object s, EventArgs ev)
+ {
+ foreach (string path in cleanupFiles)
+ {
+ if (File.Exists(path))
+ File.Delete(path);
+ else if (Directory.Exists(path))
+ Directory.Delete(path, true);
+ }
+ cleanupFiles = new List<string>();
+ }
+
+ private string FormatOptionsContext(string msg, string prefix = " ")
+ {
+ if (optionsContext.Count > 0)
+ msg += String.Format("{0}found in {1}", prefix, String.Join(" -> ", optionsContext));
+ return msg;
+ }
+
+ [DllImport("kernel32.dll")]
+ private static extern IntPtr GetConsoleWindow();
+
+ private static void ExitModule(int rc)
+ {
+ // When running in a Runspace Environment.Exit will kill the entire
+ // process which is not what we want, detect if we are in a
+ // Runspace and call a ScriptBlock with exit instead.
+ if (Runspace.DefaultRunspace != null)
+ ScriptBlock.Create("Set-Variable -Name LASTEXITCODE -Value $args[0] -Scope Global; exit $args[0]").Invoke(rc);
+ else
+ {
+ // Used for local debugging in Visual Studio
+ if (System.Diagnostics.Debugger.IsAttached)
+ {
+ Console.WriteLine("Press enter to continue...");
+ Console.ReadLine();
+ }
+ Environment.Exit(rc);
+ }
+ }
+
+ private static void WriteLineModule(string line)
+ {
+ Console.WriteLine(line);
+ }
+ }
+}
diff --git a/lib/ansible/module_utils/csharp/Ansible.Become.cs b/lib/ansible/module_utils/csharp/Ansible.Become.cs
new file mode 100644
index 00000000..a7434a76
--- /dev/null
+++ b/lib/ansible/module_utils/csharp/Ansible.Become.cs
@@ -0,0 +1,655 @@
+using Microsoft.Win32.SafeHandles;
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.IO;
+using System.Linq;
+using System.Runtime.ConstrainedExecution;
+using System.Runtime.InteropServices;
+using System.Security.AccessControl;
+using System.Security.Principal;
+using System.Text;
+using Ansible.AccessToken;
+using Ansible.Process;
+
+namespace Ansible.Become
+{
+ internal class NativeHelpers
+ {
+ [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Unicode)]
+ public struct KERB_S4U_LOGON
+ {
+ public UInt32 MessageType;
+ public UInt32 Flags;
+ public LSA_UNICODE_STRING ClientUpn;
+ public LSA_UNICODE_STRING ClientRealm;
+ }
+
+ [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Ansi)]
+ public struct LSA_STRING
+ {
+ public UInt16 Length;
+ public UInt16 MaximumLength;
+ [MarshalAs(UnmanagedType.LPStr)] public string Buffer;
+
+ public static implicit operator string(LSA_STRING s)
+ {
+ return s.Buffer;
+ }
+
+ public static implicit operator LSA_STRING(string s)
+ {
+ if (s == null)
+ s = "";
+
+ LSA_STRING lsaStr = new LSA_STRING
+ {
+ Buffer = s,
+ Length = (UInt16)s.Length,
+ MaximumLength = (UInt16)(s.Length + 1),
+ };
+ return lsaStr;
+ }
+ }
+
+ [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Unicode)]
+ public struct LSA_UNICODE_STRING
+ {
+ public UInt16 Length;
+ public UInt16 MaximumLength;
+ public IntPtr Buffer;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct SECURITY_LOGON_SESSION_DATA
+ {
+ public UInt32 Size;
+ public Luid LogonId;
+ public LSA_UNICODE_STRING UserName;
+ public LSA_UNICODE_STRING LogonDomain;
+ public LSA_UNICODE_STRING AuthenticationPackage;
+ public SECURITY_LOGON_TYPE LogonType;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct TOKEN_SOURCE
+ {
+ [MarshalAs(UnmanagedType.ByValArray, SizeConst = 8)] public char[] SourceName;
+ public Luid SourceIdentifier;
+ }
+
+ public enum SECURITY_LOGON_TYPE
+ {
+ System = 0, // Used only by the Sytem account
+ Interactive = 2,
+ Network,
+ Batch,
+ Service,
+ Proxy,
+ Unlock,
+ NetworkCleartext,
+ NewCredentials,
+ RemoteInteractive,
+ CachedInteractive,
+ CachedRemoteInteractive,
+ CachedUnlock
+ }
+ }
+
+ internal class NativeMethods
+ {
+ [DllImport("advapi32.dll", SetLastError = true)]
+ public static extern bool AllocateLocallyUniqueId(
+ out Luid Luid);
+
+ [DllImport("advapi32.dll", SetLastError = true, CharSet = CharSet.Unicode)]
+ public static extern bool CreateProcessWithTokenW(
+ SafeNativeHandle hToken,
+ LogonFlags dwLogonFlags,
+ [MarshalAs(UnmanagedType.LPWStr)] string lpApplicationName,
+ StringBuilder lpCommandLine,
+ Process.NativeHelpers.ProcessCreationFlags dwCreationFlags,
+ Process.SafeMemoryBuffer lpEnvironment,
+ [MarshalAs(UnmanagedType.LPWStr)] string lpCurrentDirectory,
+ Process.NativeHelpers.STARTUPINFOEX lpStartupInfo,
+ out Process.NativeHelpers.PROCESS_INFORMATION lpProcessInformation);
+
+ [DllImport("kernel32.dll")]
+ public static extern UInt32 GetCurrentThreadId();
+
+ [DllImport("user32.dll", SetLastError = true)]
+ public static extern NoopSafeHandle GetProcessWindowStation();
+
+ [DllImport("user32.dll", SetLastError = true)]
+ public static extern NoopSafeHandle GetThreadDesktop(
+ UInt32 dwThreadId);
+
+ [DllImport("secur32.dll", SetLastError = true)]
+ public static extern UInt32 LsaDeregisterLogonProcess(
+ IntPtr LsaHandle);
+
+ [DllImport("secur32.dll", SetLastError = true)]
+ public static extern UInt32 LsaFreeReturnBuffer(
+ IntPtr Buffer);
+
+ [DllImport("secur32.dll", SetLastError = true)]
+ public static extern UInt32 LsaGetLogonSessionData(
+ ref Luid LogonId,
+ out SafeLsaMemoryBuffer ppLogonSessionData);
+
+ [DllImport("secur32.dll", SetLastError = true, CharSet = CharSet.Unicode)]
+ public static extern UInt32 LsaLogonUser(
+ SafeLsaHandle LsaHandle,
+ NativeHelpers.LSA_STRING OriginName,
+ LogonType LogonType,
+ UInt32 AuthenticationPackage,
+ IntPtr AuthenticationInformation,
+ UInt32 AuthenticationInformationLength,
+ IntPtr LocalGroups,
+ NativeHelpers.TOKEN_SOURCE SourceContext,
+ out SafeLsaMemoryBuffer ProfileBuffer,
+ out UInt32 ProfileBufferLength,
+ out Luid LogonId,
+ out SafeNativeHandle Token,
+ out IntPtr Quotas,
+ out UInt32 SubStatus);
+
+ [DllImport("secur32.dll", SetLastError = true, CharSet = CharSet.Unicode)]
+ public static extern UInt32 LsaLookupAuthenticationPackage(
+ SafeLsaHandle LsaHandle,
+ NativeHelpers.LSA_STRING PackageName,
+ out UInt32 AuthenticationPackage);
+
+ [DllImport("advapi32.dll")]
+ public static extern UInt32 LsaNtStatusToWinError(
+ UInt32 Status);
+
+ [DllImport("secur32.dll", SetLastError = true, CharSet = CharSet.Unicode)]
+ public static extern UInt32 LsaRegisterLogonProcess(
+ NativeHelpers.LSA_STRING LogonProcessName,
+ out SafeLsaHandle LsaHandle,
+ out IntPtr SecurityMode);
+ }
+
+ internal class SafeLsaHandle : SafeHandleZeroOrMinusOneIsInvalid
+ {
+ public SafeLsaHandle() : base(true) { }
+
+ [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)]
+ protected override bool ReleaseHandle()
+ {
+ UInt32 res = NativeMethods.LsaDeregisterLogonProcess(handle);
+ return res == 0;
+ }
+ }
+
+ internal class SafeLsaMemoryBuffer : SafeHandleZeroOrMinusOneIsInvalid
+ {
+ public SafeLsaMemoryBuffer() : base(true) { }
+
+ [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)]
+ protected override bool ReleaseHandle()
+ {
+ UInt32 res = NativeMethods.LsaFreeReturnBuffer(handle);
+ return res == 0;
+ }
+ }
+
+ internal class NoopSafeHandle : SafeHandle
+ {
+ public NoopSafeHandle() : base(IntPtr.Zero, false) { }
+ public override bool IsInvalid { get { return false; } }
+
+ [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)]
+ protected override bool ReleaseHandle() { return true; }
+ }
+
+ [Flags]
+ public enum LogonFlags
+ {
+ WithProfile = 0x00000001,
+ NetcredentialsOnly = 0x00000002
+ }
+
+ public class BecomeUtil
+ {
+ private static List<string> SERVICE_SIDS = new List<string>()
+ {
+ "S-1-5-18", // NT AUTHORITY\SYSTEM
+ "S-1-5-19", // NT AUTHORITY\LocalService
+ "S-1-5-20" // NT AUTHORITY\NetworkService
+ };
+ private static int WINDOWS_STATION_ALL_ACCESS = 0x000F037F;
+ private static int DESKTOP_RIGHTS_ALL_ACCESS = 0x000F01FF;
+
+ public static Result CreateProcessAsUser(string username, string password, string command)
+ {
+ return CreateProcessAsUser(username, password, LogonFlags.WithProfile, LogonType.Interactive,
+ null, command, null, null, "");
+ }
+
+ public static Result CreateProcessAsUser(string username, string password, LogonFlags logonFlags, LogonType logonType,
+ string lpApplicationName, string lpCommandLine, string lpCurrentDirectory, IDictionary environment,
+ string stdin)
+ {
+ byte[] stdinBytes;
+ if (String.IsNullOrEmpty(stdin))
+ stdinBytes = new byte[0];
+ else
+ {
+ if (!stdin.EndsWith(Environment.NewLine))
+ stdin += Environment.NewLine;
+ stdinBytes = new UTF8Encoding(false).GetBytes(stdin);
+ }
+ return CreateProcessAsUser(username, password, logonFlags, logonType, lpApplicationName, lpCommandLine,
+ lpCurrentDirectory, environment, stdinBytes);
+ }
+
+ /// <summary>
+ /// Creates a process as another user account. This method will attempt to run as another user with the
+ /// highest possible permissions available. The main privilege required is the SeDebugPrivilege, without
+ /// this privilege you can only run as a local or domain user if the username and password is specified.
+ /// </summary>
+ /// <param name="username">The username of the runas user</param>
+ /// <param name="password">The password of the runas user</param>
+ /// <param name="logonFlags">LogonFlags to control how to logon a user when the password is specified</param>
+ /// <param name="logonType">Controls what type of logon is used, this only applies when the password is specified</param>
+ /// <param name="lpApplicationName">The name of the executable or batch file to executable</param>
+ /// <param name="lpCommandLine">The command line to execute, typically this includes lpApplication as the first argument</param>
+ /// <param name="lpCurrentDirectory">The full path to the current directory for the process, null will have the same cwd as the calling process</param>
+ /// <param name="environment">A dictionary of key/value pairs to define the new process environment</param>
+ /// <param name="stdin">Bytes sent to the stdin pipe</param>
+ /// <returns>Ansible.Process.Result object that contains the command output and return code</returns>
+ public static Result CreateProcessAsUser(string username, string password, LogonFlags logonFlags, LogonType logonType,
+ string lpApplicationName, string lpCommandLine, string lpCurrentDirectory, IDictionary environment, byte[] stdin)
+ {
+ // While we use STARTUPINFOEX having EXTENDED_STARTUPINFO_PRESENT causes a parameter validation error
+ Process.NativeHelpers.ProcessCreationFlags creationFlags = Process.NativeHelpers.ProcessCreationFlags.CREATE_UNICODE_ENVIRONMENT;
+ Process.NativeHelpers.PROCESS_INFORMATION pi = new Process.NativeHelpers.PROCESS_INFORMATION();
+ Process.NativeHelpers.STARTUPINFOEX si = new Process.NativeHelpers.STARTUPINFOEX();
+ si.startupInfo.dwFlags = Process.NativeHelpers.StartupInfoFlags.USESTDHANDLES;
+
+ SafeFileHandle stdoutRead, stdoutWrite, stderrRead, stderrWrite, stdinRead, stdinWrite;
+ ProcessUtil.CreateStdioPipes(si, out stdoutRead, out stdoutWrite, out stderrRead, out stderrWrite,
+ out stdinRead, out stdinWrite);
+ FileStream stdinStream = new FileStream(stdinWrite, FileAccess.Write);
+
+ // $null from PowerShell ends up as an empty string, we need to convert back as an empty string doesn't
+ // make sense for these parameters
+ if (lpApplicationName == "")
+ lpApplicationName = null;
+
+ if (lpCurrentDirectory == "")
+ lpCurrentDirectory = null;
+
+ // A user may have 2 tokens, 1 limited and 1 elevated. GetUserTokens will return both token to ensure
+ // we don't close one of the pairs while the process is still running. If the process tries to retrieve
+ // one of the pairs and the token handle is closed then it will fail with ERROR_NO_SUCH_LOGON_SESSION.
+ List<SafeNativeHandle> userTokens = GetUserTokens(username, password, logonType);
+ try
+ {
+ using (Process.SafeMemoryBuffer lpEnvironment = ProcessUtil.CreateEnvironmentPointer(environment))
+ {
+ bool launchSuccess = false;
+ StringBuilder commandLine = new StringBuilder(lpCommandLine);
+ foreach (SafeNativeHandle token in userTokens)
+ {
+ // GetUserTokens could return null if an elevated token could not be retrieved.
+ if (token == null)
+ continue;
+
+ if (NativeMethods.CreateProcessWithTokenW(token, logonFlags, lpApplicationName,
+ commandLine, creationFlags, lpEnvironment, lpCurrentDirectory, si, out pi))
+ {
+ launchSuccess = true;
+ break;
+ }
+ }
+
+ if (!launchSuccess)
+ throw new Process.Win32Exception("CreateProcessWithTokenW() failed");
+ }
+ return ProcessUtil.WaitProcess(stdoutRead, stdoutWrite, stderrRead, stderrWrite, stdinStream, stdin,
+ pi.hProcess);
+ }
+ finally
+ {
+ userTokens.Where(t => t != null).ToList().ForEach(t => t.Dispose());
+ }
+ }
+
+ private static List<SafeNativeHandle> GetUserTokens(string username, string password, LogonType logonType)
+ {
+ List<SafeNativeHandle> userTokens = new List<SafeNativeHandle>();
+
+ SafeNativeHandle systemToken = null;
+ bool impersonated = false;
+ string becomeSid = username;
+ if (logonType != LogonType.NewCredentials)
+ {
+ // If prefixed with .\, we are becoming a local account, strip the prefix
+ if (username.StartsWith(".\\"))
+ username = username.Substring(2);
+
+ NTAccount account = new NTAccount(username);
+ becomeSid = ((SecurityIdentifier)account.Translate(typeof(SecurityIdentifier))).Value;
+
+ // Grant access to the current Windows Station and Desktop to the become user
+ GrantAccessToWindowStationAndDesktop(account);
+
+ // Try and impersonate a SYSTEM token, we need a SYSTEM token to either become a well known service
+ // account or have administrative rights on the become access token.
+ // If we ultimately are becoming the SYSTEM account we want the token with the most privileges available.
+ // https://github.com/ansible/ansible/issues/71453
+ bool mostPrivileges = becomeSid == "S-1-5-18";
+ systemToken = GetPrimaryTokenForUser(new SecurityIdentifier("S-1-5-18"),
+ new List<string>() { "SeTcbPrivilege" }, mostPrivileges);
+ if (systemToken != null)
+ {
+ try
+ {
+ TokenUtil.ImpersonateToken(systemToken);
+ impersonated = true;
+ }
+ catch (Process.Win32Exception) { } // We tried, just rely on current user's permissions.
+ }
+ }
+
+ // We require impersonation if becoming a service sid or becoming a user without a password
+ if (!impersonated && (SERVICE_SIDS.Contains(becomeSid) || String.IsNullOrEmpty(password)))
+ throw new Exception("Failed to get token for NT AUTHORITY\\SYSTEM required for become as a service account or an account without a password");
+
+ try
+ {
+ if (becomeSid == "S-1-5-18")
+ userTokens.Add(systemToken);
+ // Cannot use String.IsEmptyOrNull() as an empty string is an account that doesn't have a pass.
+ // We only use S4U if no password was defined or it was null
+ else if (!SERVICE_SIDS.Contains(becomeSid) && password == null && logonType != LogonType.NewCredentials)
+ {
+ // If no password was specified, try and duplicate an existing token for that user or use S4U to
+ // generate one without network credentials
+ SecurityIdentifier sid = new SecurityIdentifier(becomeSid);
+ SafeNativeHandle becomeToken = GetPrimaryTokenForUser(sid);
+ if (becomeToken != null)
+ {
+ userTokens.Add(GetElevatedToken(becomeToken));
+ userTokens.Add(becomeToken);
+ }
+ else
+ {
+ becomeToken = GetS4UTokenForUser(sid, logonType);
+ userTokens.Add(null);
+ userTokens.Add(becomeToken);
+ }
+ }
+ else
+ {
+ string domain = null;
+ switch (becomeSid)
+ {
+ case "S-1-5-19":
+ logonType = LogonType.Service;
+ domain = "NT AUTHORITY";
+ username = "LocalService";
+ break;
+ case "S-1-5-20":
+ logonType = LogonType.Service;
+ domain = "NT AUTHORITY";
+ username = "NetworkService";
+ break;
+ default:
+ // Trying to become a local or domain account
+ if (username.Contains(@"\"))
+ {
+ string[] userSplit = username.Split(new char[1] { '\\' }, 2);
+ domain = userSplit[0];
+ username = userSplit[1];
+ }
+ else if (!username.Contains("@"))
+ domain = ".";
+ break;
+ }
+
+ SafeNativeHandle hToken = TokenUtil.LogonUser(username, domain, password, logonType,
+ LogonProvider.Default);
+
+ // Get the elevated token for a local/domain accounts only
+ if (!SERVICE_SIDS.Contains(becomeSid))
+ userTokens.Add(GetElevatedToken(hToken));
+ userTokens.Add(hToken);
+ }
+ }
+ finally
+ {
+ if (impersonated)
+ TokenUtil.RevertToSelf();
+ }
+
+ return userTokens;
+ }
+
+ private static SafeNativeHandle GetPrimaryTokenForUser(SecurityIdentifier sid,
+ List<string> requiredPrivileges = null, bool mostPrivileges = false)
+ {
+ // According to CreateProcessWithTokenW we require a token with
+ // TOKEN_QUERY, TOKEN_DUPLICATE and TOKEN_ASSIGN_PRIMARY
+ // Also add in TOKEN_IMPERSONATE so we can get an impersonated token
+ TokenAccessLevels dwAccess = TokenAccessLevels.Query |
+ TokenAccessLevels.Duplicate |
+ TokenAccessLevels.AssignPrimary |
+ TokenAccessLevels.Impersonate;
+
+ SafeNativeHandle userToken = null;
+ int privilegeCount = 0;
+
+ foreach (SafeNativeHandle hToken in TokenUtil.EnumerateUserTokens(sid, dwAccess))
+ {
+ // Filter out any Network logon tokens, using become with that is useless when S4U
+ // can give us a Batch logon
+ NativeHelpers.SECURITY_LOGON_TYPE tokenLogonType = GetTokenLogonType(hToken);
+ if (tokenLogonType == NativeHelpers.SECURITY_LOGON_TYPE.Network)
+ continue;
+
+ List<string> actualPrivileges = TokenUtil.GetTokenPrivileges(hToken).Select(x => x.Name).ToList();
+
+ // If the token has less or the same number of privileges than the current token, skip it.
+ if (mostPrivileges && privilegeCount >= actualPrivileges.Count)
+ continue;
+
+ // Check that the required privileges are on the token
+ if (requiredPrivileges != null)
+ {
+ int missing = requiredPrivileges.Where(x => !actualPrivileges.Contains(x)).Count();
+ if (missing > 0)
+ continue;
+ }
+
+ // Duplicate the token to convert it to a primary token with the access level required.
+ try
+ {
+ userToken = TokenUtil.DuplicateToken(hToken, TokenAccessLevels.MaximumAllowed,
+ SecurityImpersonationLevel.Anonymous, TokenType.Primary);
+ privilegeCount = actualPrivileges.Count;
+ }
+ catch (Process.Win32Exception)
+ {
+ continue;
+ }
+
+ // If we don't care about getting the token with the most privileges, escape the loop as we already
+ // have a token.
+ if (!mostPrivileges)
+ break;
+ }
+
+ return userToken;
+ }
+
+ private static SafeNativeHandle GetS4UTokenForUser(SecurityIdentifier sid, LogonType logonType)
+ {
+ NTAccount becomeAccount = (NTAccount)sid.Translate(typeof(NTAccount));
+ string[] userSplit = becomeAccount.Value.Split(new char[1] { '\\' }, 2);
+ string domainName = userSplit[0];
+ string username = userSplit[1];
+ bool domainUser = domainName.ToLowerInvariant() != Environment.MachineName.ToLowerInvariant();
+
+ NativeHelpers.LSA_STRING logonProcessName = "ansible";
+ SafeLsaHandle lsaHandle;
+ IntPtr securityMode;
+ UInt32 res = NativeMethods.LsaRegisterLogonProcess(logonProcessName, out lsaHandle, out securityMode);
+ if (res != 0)
+ throw new Process.Win32Exception((int)NativeMethods.LsaNtStatusToWinError(res), "LsaRegisterLogonProcess() failed");
+
+ using (lsaHandle)
+ {
+ NativeHelpers.LSA_STRING packageName = domainUser ? "Kerberos" : "MICROSOFT_AUTHENTICATION_PACKAGE_V1_0";
+ UInt32 authPackage;
+ res = NativeMethods.LsaLookupAuthenticationPackage(lsaHandle, packageName, out authPackage);
+ if (res != 0)
+ throw new Process.Win32Exception((int)NativeMethods.LsaNtStatusToWinError(res),
+ String.Format("LsaLookupAuthenticationPackage({0}) failed", (string)packageName));
+
+ int usernameLength = username.Length * sizeof(char);
+ int domainLength = domainName.Length * sizeof(char);
+ int authInfoLength = (Marshal.SizeOf(typeof(NativeHelpers.KERB_S4U_LOGON)) + usernameLength + domainLength);
+ IntPtr authInfo = Marshal.AllocHGlobal((int)authInfoLength);
+ try
+ {
+ IntPtr usernamePtr = IntPtr.Add(authInfo, Marshal.SizeOf(typeof(NativeHelpers.KERB_S4U_LOGON)));
+ IntPtr domainPtr = IntPtr.Add(usernamePtr, usernameLength);
+
+ // KERB_S4U_LOGON has the same structure as MSV1_0_S4U_LOGON (local accounts)
+ NativeHelpers.KERB_S4U_LOGON s4uLogon = new NativeHelpers.KERB_S4U_LOGON
+ {
+ MessageType = 12, // KerbS4ULogon
+ Flags = 0,
+ ClientUpn = new NativeHelpers.LSA_UNICODE_STRING
+ {
+ Length = (UInt16)usernameLength,
+ MaximumLength = (UInt16)usernameLength,
+ Buffer = usernamePtr,
+ },
+ ClientRealm = new NativeHelpers.LSA_UNICODE_STRING
+ {
+ Length = (UInt16)domainLength,
+ MaximumLength = (UInt16)domainLength,
+ Buffer = domainPtr,
+ },
+ };
+ Marshal.StructureToPtr(s4uLogon, authInfo, false);
+ Marshal.Copy(username.ToCharArray(), 0, usernamePtr, username.Length);
+ Marshal.Copy(domainName.ToCharArray(), 0, domainPtr, domainName.Length);
+
+ Luid sourceLuid;
+ if (!NativeMethods.AllocateLocallyUniqueId(out sourceLuid))
+ throw new Process.Win32Exception("AllocateLocallyUniqueId() failed");
+
+ NativeHelpers.TOKEN_SOURCE tokenSource = new NativeHelpers.TOKEN_SOURCE
+ {
+ SourceName = "ansible\0".ToCharArray(),
+ SourceIdentifier = sourceLuid,
+ };
+
+ // Only Batch or Network will work with S4U, prefer Batch but use Network if asked
+ LogonType lsaLogonType = logonType == LogonType.Network
+ ? LogonType.Network
+ : LogonType.Batch;
+ SafeLsaMemoryBuffer profileBuffer;
+ UInt32 profileBufferLength;
+ Luid logonId;
+ SafeNativeHandle hToken;
+ IntPtr quotas;
+ UInt32 subStatus;
+
+ res = NativeMethods.LsaLogonUser(lsaHandle, logonProcessName, lsaLogonType, authPackage,
+ authInfo, (UInt32)authInfoLength, IntPtr.Zero, tokenSource, out profileBuffer, out profileBufferLength,
+ out logonId, out hToken, out quotas, out subStatus);
+ if (res != 0)
+ throw new Process.Win32Exception((int)NativeMethods.LsaNtStatusToWinError(res),
+ String.Format("LsaLogonUser() failed with substatus {0}", subStatus));
+
+ profileBuffer.Dispose();
+ return hToken;
+ }
+ finally
+ {
+ Marshal.FreeHGlobal(authInfo);
+ }
+ }
+ }
+
+ private static SafeNativeHandle GetElevatedToken(SafeNativeHandle hToken)
+ {
+ TokenElevationType tet = TokenUtil.GetTokenElevationType(hToken);
+ // We already have the best token we can get, no linked token is really available.
+ if (tet != TokenElevationType.Limited)
+ return null;
+
+ SafeNativeHandle linkedToken = TokenUtil.GetTokenLinkedToken(hToken);
+ TokenStatistics tokenStats = TokenUtil.GetTokenStatistics(linkedToken);
+
+ // We can only use a token if it's a primary one (we had the SeTcbPrivilege set)
+ if (tokenStats.TokenType == TokenType.Primary)
+ return linkedToken;
+ else
+ return null;
+ }
+
+ private static NativeHelpers.SECURITY_LOGON_TYPE GetTokenLogonType(SafeNativeHandle hToken)
+ {
+ TokenStatistics stats = TokenUtil.GetTokenStatistics(hToken);
+
+ SafeLsaMemoryBuffer sessionDataPtr;
+ UInt32 res = NativeMethods.LsaGetLogonSessionData(ref stats.AuthenticationId, out sessionDataPtr);
+ if (res != 0)
+ // Default to Network, if we weren't able to get the actual type treat it as an error and assume
+ // we don't want to run a process with the token
+ return NativeHelpers.SECURITY_LOGON_TYPE.Network;
+
+ using (sessionDataPtr)
+ {
+ NativeHelpers.SECURITY_LOGON_SESSION_DATA sessionData = (NativeHelpers.SECURITY_LOGON_SESSION_DATA)Marshal.PtrToStructure(
+ sessionDataPtr.DangerousGetHandle(), typeof(NativeHelpers.SECURITY_LOGON_SESSION_DATA));
+ return sessionData.LogonType;
+ }
+ }
+
+ private static void GrantAccessToWindowStationAndDesktop(IdentityReference account)
+ {
+ GrantAccess(account, NativeMethods.GetProcessWindowStation(), WINDOWS_STATION_ALL_ACCESS);
+ GrantAccess(account, NativeMethods.GetThreadDesktop(NativeMethods.GetCurrentThreadId()), DESKTOP_RIGHTS_ALL_ACCESS);
+ }
+
+ private static void GrantAccess(IdentityReference account, NoopSafeHandle handle, int accessMask)
+ {
+ GenericSecurity security = new GenericSecurity(false, ResourceType.WindowObject, handle, AccessControlSections.Access);
+ security.AddAccessRule(new GenericAccessRule(account, accessMask, AccessControlType.Allow));
+ security.Persist(handle, AccessControlSections.Access);
+ }
+
+ private class GenericSecurity : NativeObjectSecurity
+ {
+ public GenericSecurity(bool isContainer, ResourceType resType, SafeHandle objectHandle, AccessControlSections sectionsRequested)
+ : base(isContainer, resType, objectHandle, sectionsRequested) { }
+ public new void Persist(SafeHandle handle, AccessControlSections includeSections) { base.Persist(handle, includeSections); }
+ public new void AddAccessRule(AccessRule rule) { base.AddAccessRule(rule); }
+ public override Type AccessRightType { get { throw new NotImplementedException(); } }
+ public override AccessRule AccessRuleFactory(System.Security.Principal.IdentityReference identityReference, int accessMask, bool isInherited,
+ InheritanceFlags inheritanceFlags, PropagationFlags propagationFlags, AccessControlType type)
+ { throw new NotImplementedException(); }
+ public override Type AccessRuleType { get { return typeof(AccessRule); } }
+ public override AuditRule AuditRuleFactory(System.Security.Principal.IdentityReference identityReference, int accessMask, bool isInherited,
+ InheritanceFlags inheritanceFlags, PropagationFlags propagationFlags, AuditFlags flags)
+ { throw new NotImplementedException(); }
+ public override Type AuditRuleType { get { return typeof(AuditRule); } }
+ }
+
+ private class GenericAccessRule : AccessRule
+ {
+ public GenericAccessRule(IdentityReference identity, int accessMask, AccessControlType type) :
+ base(identity, accessMask, false, InheritanceFlags.None, PropagationFlags.None, type)
+ { }
+ }
+ }
+}
diff --git a/lib/ansible/module_utils/csharp/Ansible.Privilege.cs b/lib/ansible/module_utils/csharp/Ansible.Privilege.cs
new file mode 100644
index 00000000..2c0b266b
--- /dev/null
+++ b/lib/ansible/module_utils/csharp/Ansible.Privilege.cs
@@ -0,0 +1,443 @@
+using Microsoft.Win32.SafeHandles;
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.ConstrainedExecution;
+using System.Runtime.InteropServices;
+using System.Security.Principal;
+using System.Text;
+
+namespace Ansible.Privilege
+{
+ internal class NativeHelpers
+ {
+ [StructLayout(LayoutKind.Sequential)]
+ public struct LUID
+ {
+ public UInt32 LowPart;
+ public Int32 HighPart;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct LUID_AND_ATTRIBUTES
+ {
+ public LUID Luid;
+ public PrivilegeAttributes Attributes;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct TOKEN_PRIVILEGES
+ {
+ public UInt32 PrivilegeCount;
+ [MarshalAs(UnmanagedType.ByValArray, SizeConst = 1)]
+ public LUID_AND_ATTRIBUTES[] Privileges;
+ }
+ }
+
+ internal class NativeMethods
+ {
+ [DllImport("advapi32.dll", SetLastError = true)]
+ public static extern bool AdjustTokenPrivileges(
+ SafeNativeHandle TokenHandle,
+ [MarshalAs(UnmanagedType.Bool)] bool DisableAllPrivileges,
+ SafeMemoryBuffer NewState,
+ UInt32 BufferLength,
+ SafeMemoryBuffer PreviousState,
+ out UInt32 ReturnLength);
+
+ [DllImport("kernel32.dll")]
+ public static extern bool CloseHandle(
+ IntPtr hObject);
+
+ [DllImport("kernel32")]
+ public static extern SafeWaitHandle GetCurrentProcess();
+
+ [DllImport("advapi32.dll", SetLastError = true)]
+ public static extern bool GetTokenInformation(
+ SafeNativeHandle TokenHandle,
+ UInt32 TokenInformationClass,
+ SafeMemoryBuffer TokenInformation,
+ UInt32 TokenInformationLength,
+ out UInt32 ReturnLength);
+
+ [DllImport("advapi32.dll", SetLastError = true, CharSet = CharSet.Unicode)]
+ public static extern bool LookupPrivilegeName(
+ string lpSystemName,
+ ref NativeHelpers.LUID lpLuid,
+ StringBuilder lpName,
+ ref UInt32 cchName);
+
+ [DllImport("advapi32.dll", SetLastError = true, CharSet = CharSet.Unicode)]
+ public static extern bool LookupPrivilegeValue(
+ string lpSystemName,
+ string lpName,
+ out NativeHelpers.LUID lpLuid);
+
+ [DllImport("advapi32.dll", SetLastError = true)]
+ public static extern bool OpenProcessToken(
+ SafeHandle ProcessHandle,
+ TokenAccessLevels DesiredAccess,
+ out SafeNativeHandle TokenHandle);
+ }
+
+ internal class SafeMemoryBuffer : SafeHandleZeroOrMinusOneIsInvalid
+ {
+ public SafeMemoryBuffer() : base(true) { }
+ public SafeMemoryBuffer(int cb) : base(true)
+ {
+ base.SetHandle(Marshal.AllocHGlobal(cb));
+ }
+ public SafeMemoryBuffer(IntPtr handle) : base(true)
+ {
+ base.SetHandle(handle);
+ }
+ [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)]
+ protected override bool ReleaseHandle()
+ {
+ Marshal.FreeHGlobal(handle);
+ return true;
+ }
+ }
+
+ internal class SafeNativeHandle : SafeHandleZeroOrMinusOneIsInvalid
+ {
+ public SafeNativeHandle() : base(true) { }
+ public SafeNativeHandle(IntPtr handle) : base(true) { this.handle = handle; }
+ [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)]
+ protected override bool ReleaseHandle()
+ {
+ return NativeMethods.CloseHandle(handle);
+ }
+ }
+
+ public class Win32Exception : System.ComponentModel.Win32Exception
+ {
+ private string _msg;
+ public Win32Exception(string message) : this(Marshal.GetLastWin32Error(), message) { }
+ public Win32Exception(int errorCode, string message) : base(errorCode)
+ {
+ _msg = String.Format("{0} ({1}, Win32ErrorCode {2})", message, base.Message, errorCode);
+ }
+ public override string Message { get { return _msg; } }
+ public static explicit operator Win32Exception(string message) { return new Win32Exception(message); }
+ }
+
+ [Flags]
+ public enum PrivilegeAttributes : uint
+ {
+ Disabled = 0x00000000,
+ EnabledByDefault = 0x00000001,
+ Enabled = 0x00000002,
+ Removed = 0x00000004,
+ UsedForAccess = 0x80000000,
+ }
+
+ public class PrivilegeEnabler : IDisposable
+ {
+ private SafeHandle process;
+ private Dictionary<string, bool?> previousState;
+
+ /// <summary>
+ /// Temporarily enables the privileges specified and reverts once the class is disposed.
+ /// </summary>
+ /// <param name="strict">Whether to fail if any privilege failed to be enabled, if false then this will continue silently</param>
+ /// <param name="privileges">A list of privileges to enable</param>
+ public PrivilegeEnabler(bool strict, params string[] privileges)
+ {
+ if (privileges.Length > 0)
+ {
+ process = PrivilegeUtil.GetCurrentProcess();
+ Dictionary<string, bool?> newState = new Dictionary<string, bool?>();
+ for (int i = 0; i < privileges.Length; i++)
+ newState.Add(privileges[i], true);
+ try
+ {
+ previousState = PrivilegeUtil.SetTokenPrivileges(process, newState, strict);
+ }
+ catch (Win32Exception e)
+ {
+ throw new Win32Exception(e.NativeErrorCode, String.Format("Failed to enable privilege(s) {0}", String.Join(", ", privileges)));
+ }
+ }
+ }
+
+ public void Dispose()
+ {
+ // disables any privileges that were enabled by this class
+ if (previousState != null)
+ PrivilegeUtil.SetTokenPrivileges(process, previousState);
+ GC.SuppressFinalize(this);
+ }
+ ~PrivilegeEnabler() { this.Dispose(); }
+ }
+
+ public class PrivilegeUtil
+ {
+ private static readonly UInt32 TOKEN_PRIVILEGES = 3;
+
+ /// <summary>
+ /// Checks if the specific privilege constant is a valid privilege name
+ /// </summary>
+ /// <param name="name">The privilege constant (Se*Privilege) is valid</param>
+ /// <returns>true if valid, else false</returns>
+ public static bool CheckPrivilegeName(string name)
+ {
+ NativeHelpers.LUID luid;
+ if (!NativeMethods.LookupPrivilegeValue(null, name, out luid))
+ {
+ int errCode = Marshal.GetLastWin32Error();
+ if (errCode != 1313) // ERROR_NO_SUCH_PRIVILEGE
+ throw new Win32Exception(errCode, String.Format("LookupPrivilegeValue({0}) failed", name));
+ return false;
+ }
+ else
+ {
+ return true;
+ }
+ }
+
+ /// <summary>
+ /// Disables the privilege specified
+ /// </summary>
+ /// <param name="token">The process token to that contains the privilege to disable</param>
+ /// <param name="privilege">The privilege constant to disable</param>
+ /// <returns>The previous state that can be passed to SetTokenPrivileges to revert the action</returns>
+ public static Dictionary<string, bool?> DisablePrivilege(SafeHandle token, string privilege)
+ {
+ return SetTokenPrivileges(token, new Dictionary<string, bool?>() { { privilege, false } });
+ }
+
+ /// <summary>
+ /// Disables all the privileges
+ /// </summary>
+ /// <param name="token">The process token to that contains the privilege to disable</param>
+ /// <returns>The previous state that can be passed to SetTokenPrivileges to revert the action</returns>
+ public static Dictionary<string, bool?> DisableAllPrivileges(SafeHandle token)
+ {
+ return AdjustTokenPrivileges(token, null, false);
+ }
+
+ /// <summary>
+ /// Enables the privilege specified
+ /// </summary>
+ /// <param name="token">The process token to that contains the privilege to enable</param>
+ /// <param name="privilege">The privilege constant to enable</param>
+ /// <returns>The previous state that can be passed to SetTokenPrivileges to revert the action</returns>
+ public static Dictionary<string, bool?> EnablePrivilege(SafeHandle token, string privilege)
+ {
+ return SetTokenPrivileges(token, new Dictionary<string, bool?>() { { privilege, true } });
+ }
+
+ /// <summary>
+ /// Get's the status of all the privileges on the token specified
+ /// </summary>
+ /// <param name="token">The process token to get the privilege status on</param>
+ /// <returns>Dictionary where the key is the privilege constant and the value is the PrivilegeAttributes flags</returns>
+ public static Dictionary<String, PrivilegeAttributes> GetAllPrivilegeInfo(SafeHandle token)
+ {
+ SafeNativeHandle hToken = null;
+ if (!NativeMethods.OpenProcessToken(token, TokenAccessLevels.Query, out hToken))
+ throw new Win32Exception("OpenProcessToken() failed");
+
+ using (hToken)
+ {
+ UInt32 tokenLength = 0;
+ NativeMethods.GetTokenInformation(hToken, TOKEN_PRIVILEGES, new SafeMemoryBuffer(0), 0, out tokenLength);
+
+ NativeHelpers.LUID_AND_ATTRIBUTES[] privileges;
+ using (SafeMemoryBuffer privilegesPtr = new SafeMemoryBuffer((int)tokenLength))
+ {
+ if (!NativeMethods.GetTokenInformation(hToken, TOKEN_PRIVILEGES, privilegesPtr, tokenLength, out tokenLength))
+ throw new Win32Exception("GetTokenInformation() for TOKEN_PRIVILEGES failed");
+
+ NativeHelpers.TOKEN_PRIVILEGES privilegeInfo = (NativeHelpers.TOKEN_PRIVILEGES)Marshal.PtrToStructure(
+ privilegesPtr.DangerousGetHandle(), typeof(NativeHelpers.TOKEN_PRIVILEGES));
+ privileges = new NativeHelpers.LUID_AND_ATTRIBUTES[privilegeInfo.PrivilegeCount];
+ PtrToStructureArray(privileges, IntPtr.Add(privilegesPtr.DangerousGetHandle(), Marshal.SizeOf(privilegeInfo.PrivilegeCount)));
+ }
+
+ return privileges.ToDictionary(p => GetPrivilegeName(p.Luid), p => p.Attributes);
+ }
+ }
+
+ /// <summary>
+ /// Get a handle to the current process for use with the methods above
+ /// </summary>
+ /// <returns>SafeWaitHandle handle of the current process token</returns>
+ public static SafeWaitHandle GetCurrentProcess()
+ {
+ return NativeMethods.GetCurrentProcess();
+ }
+
+ /// <summary>
+ /// Removes a privilege from the token. This operation is irreversible
+ /// </summary>
+ /// <param name="token">The process token to that contains the privilege to remove</param>
+ /// <param name="privilege">The privilege constant to remove</param>
+ public static void RemovePrivilege(SafeHandle token, string privilege)
+ {
+ SetTokenPrivileges(token, new Dictionary<string, bool?>() { { privilege, null } });
+ }
+
+ /// <summary>
+ /// Do a bulk set of multiple privileges
+ /// </summary>
+ /// <param name="token">The process token to use when setting the privilege state</param>
+ /// <param name="state">A dictionary that contains the privileges to set, the key is the constant name and the value can be;
+ /// true - enable the privilege
+ /// false - disable the privilege
+ /// null - remove the privilege (this cannot be reversed)
+ /// </param>
+ /// <param name="strict">When true, will fail if one privilege failed to be set, otherwise it will silently continue</param>
+ /// <returns>The previous state that can be passed to SetTokenPrivileges to revert the action</returns>
+ public static Dictionary<string, bool?> SetTokenPrivileges(SafeHandle token, IDictionary state, bool strict = true)
+ {
+ NativeHelpers.LUID_AND_ATTRIBUTES[] privilegeAttr = new NativeHelpers.LUID_AND_ATTRIBUTES[state.Count];
+ int i = 0;
+
+ foreach (DictionaryEntry entry in state)
+ {
+ string key = (string)entry.Key;
+ NativeHelpers.LUID luid;
+ if (!NativeMethods.LookupPrivilegeValue(null, key, out luid))
+ throw new Win32Exception(String.Format("LookupPrivilegeValue({0}) failed", key));
+
+ PrivilegeAttributes attributes;
+ switch ((bool?)entry.Value)
+ {
+ case true:
+ attributes = PrivilegeAttributes.Enabled;
+ break;
+ case false:
+ attributes = PrivilegeAttributes.Disabled;
+ break;
+ default:
+ attributes = PrivilegeAttributes.Removed;
+ break;
+ }
+
+ privilegeAttr[i].Luid = luid;
+ privilegeAttr[i].Attributes = attributes;
+ i++;
+ }
+
+ return AdjustTokenPrivileges(token, privilegeAttr, strict);
+ }
+
+ private static Dictionary<string, bool?> AdjustTokenPrivileges(SafeHandle token, NativeHelpers.LUID_AND_ATTRIBUTES[] newState, bool strict)
+ {
+ bool disableAllPrivileges;
+ SafeMemoryBuffer newStatePtr;
+ NativeHelpers.LUID_AND_ATTRIBUTES[] oldStatePrivileges;
+ UInt32 returnLength;
+
+ if (newState == null)
+ {
+ disableAllPrivileges = true;
+ newStatePtr = new SafeMemoryBuffer(0);
+ }
+ else
+ {
+ disableAllPrivileges = false;
+
+ // Need to manually marshal the bytes requires for newState as the constant size
+ // of LUID_AND_ATTRIBUTES is set to 1 and can't be overridden at runtime, TOKEN_PRIVILEGES
+ // always contains at least 1 entry so we need to calculate the extra size if there are
+ // nore than 1 LUID_AND_ATTRIBUTES entry
+ int tokenPrivilegesSize = Marshal.SizeOf(typeof(NativeHelpers.TOKEN_PRIVILEGES));
+ int luidAttrSize = 0;
+ if (newState.Length > 1)
+ luidAttrSize = Marshal.SizeOf(typeof(NativeHelpers.LUID_AND_ATTRIBUTES)) * (newState.Length - 1);
+ int totalSize = tokenPrivilegesSize + luidAttrSize;
+ byte[] newStateBytes = new byte[totalSize];
+
+ // get the first entry that includes the struct details
+ NativeHelpers.TOKEN_PRIVILEGES tokenPrivileges = new NativeHelpers.TOKEN_PRIVILEGES()
+ {
+ PrivilegeCount = (UInt32)newState.Length,
+ Privileges = new NativeHelpers.LUID_AND_ATTRIBUTES[1],
+ };
+ if (newState.Length > 0)
+ tokenPrivileges.Privileges[0] = newState[0];
+ int offset = StructureToBytes(tokenPrivileges, newStateBytes, 0);
+
+ // copy the remaining LUID_AND_ATTRIBUTES (if any)
+ for (int i = 1; i < newState.Length; i++)
+ offset += StructureToBytes(newState[i], newStateBytes, offset);
+
+ // finally create the pointer to the byte array we just created
+ newStatePtr = new SafeMemoryBuffer(newStateBytes.Length);
+ Marshal.Copy(newStateBytes, 0, newStatePtr.DangerousGetHandle(), newStateBytes.Length);
+ }
+
+ using (newStatePtr)
+ {
+ SafeNativeHandle hToken;
+ if (!NativeMethods.OpenProcessToken(token, TokenAccessLevels.Query | TokenAccessLevels.AdjustPrivileges, out hToken))
+ throw new Win32Exception("OpenProcessToken() failed with Query and AdjustPrivileges");
+
+ using (hToken)
+ {
+ if (!NativeMethods.AdjustTokenPrivileges(hToken, disableAllPrivileges, newStatePtr, 0, new SafeMemoryBuffer(0), out returnLength))
+ {
+ int errCode = Marshal.GetLastWin32Error();
+ if (errCode != 122) // ERROR_INSUFFICIENT_BUFFER
+ throw new Win32Exception(errCode, "AdjustTokenPrivileges() failed to get old state size");
+ }
+
+ using (SafeMemoryBuffer oldStatePtr = new SafeMemoryBuffer((int)returnLength))
+ {
+ bool res = NativeMethods.AdjustTokenPrivileges(hToken, disableAllPrivileges, newStatePtr, returnLength, oldStatePtr, out returnLength);
+ int errCode = Marshal.GetLastWin32Error();
+
+ // even when res == true, ERROR_NOT_ALL_ASSIGNED may be set as the last error code
+ // fail if we are running with strict, otherwise ignore those privileges
+ if (!res || ((strict && errCode != 0) || (!strict && !(errCode == 0 || errCode == 0x00000514))))
+ throw new Win32Exception(errCode, "AdjustTokenPrivileges() failed");
+
+ // Marshal the oldStatePtr to the struct
+ NativeHelpers.TOKEN_PRIVILEGES oldState = (NativeHelpers.TOKEN_PRIVILEGES)Marshal.PtrToStructure(
+ oldStatePtr.DangerousGetHandle(), typeof(NativeHelpers.TOKEN_PRIVILEGES));
+ oldStatePrivileges = new NativeHelpers.LUID_AND_ATTRIBUTES[oldState.PrivilegeCount];
+ PtrToStructureArray(oldStatePrivileges, IntPtr.Add(oldStatePtr.DangerousGetHandle(), Marshal.SizeOf(oldState.PrivilegeCount)));
+ }
+ }
+ }
+
+ return oldStatePrivileges.ToDictionary(p => GetPrivilegeName(p.Luid), p => (bool?)p.Attributes.HasFlag(PrivilegeAttributes.Enabled));
+ }
+
+ private static string GetPrivilegeName(NativeHelpers.LUID luid)
+ {
+ UInt32 nameLen = 0;
+ NativeMethods.LookupPrivilegeName(null, ref luid, null, ref nameLen);
+
+ StringBuilder name = new StringBuilder((int)(nameLen + 1));
+ if (!NativeMethods.LookupPrivilegeName(null, ref luid, name, ref nameLen))
+ throw new Win32Exception("LookupPrivilegeName() failed");
+
+ return name.ToString();
+ }
+
+ private static void PtrToStructureArray<T>(T[] array, IntPtr ptr)
+ {
+ IntPtr ptrOffset = ptr;
+ for (int i = 0; i < array.Length; i++, ptrOffset = IntPtr.Add(ptrOffset, Marshal.SizeOf(typeof(T))))
+ array[i] = (T)Marshal.PtrToStructure(ptrOffset, typeof(T));
+ }
+
+ private static int StructureToBytes<T>(T structure, byte[] array, int offset)
+ {
+ int size = Marshal.SizeOf(structure);
+ using (SafeMemoryBuffer structPtr = new SafeMemoryBuffer(size))
+ {
+ Marshal.StructureToPtr(structure, structPtr.DangerousGetHandle(), false);
+ Marshal.Copy(structPtr.DangerousGetHandle(), array, offset, size);
+ }
+
+ return size;
+ }
+ }
+}
+
diff --git a/lib/ansible/module_utils/csharp/Ansible.Process.cs b/lib/ansible/module_utils/csharp/Ansible.Process.cs
new file mode 100644
index 00000000..f4c68f05
--- /dev/null
+++ b/lib/ansible/module_utils/csharp/Ansible.Process.cs
@@ -0,0 +1,461 @@
+using Microsoft.Win32.SafeHandles;
+using System;
+using System.Collections;
+using System.IO;
+using System.Linq;
+using System.Runtime.ConstrainedExecution;
+using System.Runtime.InteropServices;
+using System.Text;
+using System.Threading;
+
+namespace Ansible.Process
+{
+ internal class NativeHelpers
+ {
+ [StructLayout(LayoutKind.Sequential)]
+ public class SECURITY_ATTRIBUTES
+ {
+ public UInt32 nLength;
+ public IntPtr lpSecurityDescriptor;
+ public bool bInheritHandle = false;
+ public SECURITY_ATTRIBUTES()
+ {
+ nLength = (UInt32)Marshal.SizeOf(this);
+ }
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public class STARTUPINFO
+ {
+ public UInt32 cb;
+ public IntPtr lpReserved;
+ [MarshalAs(UnmanagedType.LPWStr)] public string lpDesktop;
+ [MarshalAs(UnmanagedType.LPWStr)] public string lpTitle;
+ public UInt32 dwX;
+ public UInt32 dwY;
+ public UInt32 dwXSize;
+ public UInt32 dwYSize;
+ public UInt32 dwXCountChars;
+ public UInt32 dwYCountChars;
+ public UInt32 dwFillAttribute;
+ public StartupInfoFlags dwFlags;
+ public UInt16 wShowWindow;
+ public UInt16 cbReserved2;
+ public IntPtr lpReserved2;
+ public SafeFileHandle hStdInput;
+ public SafeFileHandle hStdOutput;
+ public SafeFileHandle hStdError;
+ public STARTUPINFO()
+ {
+ cb = (UInt32)Marshal.SizeOf(this);
+ }
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public class STARTUPINFOEX
+ {
+ public STARTUPINFO startupInfo;
+ public IntPtr lpAttributeList;
+ public STARTUPINFOEX()
+ {
+ startupInfo = new STARTUPINFO();
+ startupInfo.cb = (UInt32)Marshal.SizeOf(this);
+ }
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct PROCESS_INFORMATION
+ {
+ public IntPtr hProcess;
+ public IntPtr hThread;
+ public int dwProcessId;
+ public int dwThreadId;
+ }
+
+ [Flags]
+ public enum ProcessCreationFlags : uint
+ {
+ CREATE_NEW_CONSOLE = 0x00000010,
+ CREATE_UNICODE_ENVIRONMENT = 0x00000400,
+ EXTENDED_STARTUPINFO_PRESENT = 0x00080000
+ }
+
+ [Flags]
+ public enum StartupInfoFlags : uint
+ {
+ USESTDHANDLES = 0x00000100
+ }
+
+ [Flags]
+ public enum HandleFlags : uint
+ {
+ None = 0,
+ INHERIT = 1
+ }
+ }
+
+ internal class NativeMethods
+ {
+ [DllImport("kernel32.dll", SetLastError = true)]
+ public static extern bool AllocConsole();
+
+ [DllImport("shell32.dll", SetLastError = true)]
+ public static extern SafeMemoryBuffer CommandLineToArgvW(
+ [MarshalAs(UnmanagedType.LPWStr)] string lpCmdLine,
+ out int pNumArgs);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ public static extern bool CreatePipe(
+ out SafeFileHandle hReadPipe,
+ out SafeFileHandle hWritePipe,
+ NativeHelpers.SECURITY_ATTRIBUTES lpPipeAttributes,
+ UInt32 nSize);
+
+ [DllImport("kernel32.dll", SetLastError = true, CharSet = CharSet.Unicode)]
+ public static extern bool CreateProcessW(
+ [MarshalAs(UnmanagedType.LPWStr)] string lpApplicationName,
+ StringBuilder lpCommandLine,
+ IntPtr lpProcessAttributes,
+ IntPtr lpThreadAttributes,
+ bool bInheritHandles,
+ NativeHelpers.ProcessCreationFlags dwCreationFlags,
+ SafeMemoryBuffer lpEnvironment,
+ [MarshalAs(UnmanagedType.LPWStr)] string lpCurrentDirectory,
+ NativeHelpers.STARTUPINFOEX lpStartupInfo,
+ out NativeHelpers.PROCESS_INFORMATION lpProcessInformation);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ public static extern bool FreeConsole();
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ public static extern IntPtr GetConsoleWindow();
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ public static extern bool GetExitCodeProcess(
+ SafeWaitHandle hProcess,
+ out UInt32 lpExitCode);
+
+ [DllImport("kernel32.dll", SetLastError = true, CharSet = CharSet.Unicode)]
+ public static extern uint SearchPathW(
+ [MarshalAs(UnmanagedType.LPWStr)] string lpPath,
+ [MarshalAs(UnmanagedType.LPWStr)] string lpFileName,
+ [MarshalAs(UnmanagedType.LPWStr)] string lpExtension,
+ UInt32 nBufferLength,
+ [MarshalAs(UnmanagedType.LPTStr)] StringBuilder lpBuffer,
+ out IntPtr lpFilePart);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ public static extern bool SetConsoleCP(
+ UInt32 wCodePageID);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ public static extern bool SetConsoleOutputCP(
+ UInt32 wCodePageID);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ public static extern bool SetHandleInformation(
+ SafeFileHandle hObject,
+ NativeHelpers.HandleFlags dwMask,
+ NativeHelpers.HandleFlags dwFlags);
+
+ [DllImport("kernel32.dll")]
+ public static extern UInt32 WaitForSingleObject(
+ SafeWaitHandle hHandle,
+ UInt32 dwMilliseconds);
+ }
+
+ internal class SafeMemoryBuffer : SafeHandleZeroOrMinusOneIsInvalid
+ {
+ public SafeMemoryBuffer() : base(true) { }
+ public SafeMemoryBuffer(int cb) : base(true)
+ {
+ base.SetHandle(Marshal.AllocHGlobal(cb));
+ }
+ public SafeMemoryBuffer(IntPtr handle) : base(true)
+ {
+ base.SetHandle(handle);
+ }
+
+ [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)]
+ protected override bool ReleaseHandle()
+ {
+ Marshal.FreeHGlobal(handle);
+ return true;
+ }
+ }
+
+ public class Win32Exception : System.ComponentModel.Win32Exception
+ {
+ private string _msg;
+
+ public Win32Exception(string message) : this(Marshal.GetLastWin32Error(), message) { }
+ public Win32Exception(int errorCode, string message) : base(errorCode)
+ {
+ _msg = String.Format("{0} ({1}, Win32ErrorCode {2})", message, base.Message, errorCode);
+ }
+
+ public override string Message { get { return _msg; } }
+ public static explicit operator Win32Exception(string message) { return new Win32Exception(message); }
+ }
+
+ public class Result
+ {
+ public string StandardOut { get; internal set; }
+ public string StandardError { get; internal set; }
+ public uint ExitCode { get; internal set; }
+ }
+
+ public class ProcessUtil
+ {
+ /// <summary>
+ /// Parses a command line string into an argv array according to the Windows rules
+ /// </summary>
+ /// <param name="lpCommandLine">The command line to parse</param>
+ /// <returns>An array of arguments interpreted by Windows</returns>
+ public static string[] ParseCommandLine(string lpCommandLine)
+ {
+ int numArgs;
+ using (SafeMemoryBuffer buf = NativeMethods.CommandLineToArgvW(lpCommandLine, out numArgs))
+ {
+ if (buf.IsInvalid)
+ throw new Win32Exception("Error parsing command line");
+ IntPtr[] strptrs = new IntPtr[numArgs];
+ Marshal.Copy(buf.DangerousGetHandle(), strptrs, 0, numArgs);
+ return strptrs.Select(s => Marshal.PtrToStringUni(s)).ToArray();
+ }
+ }
+
+ /// <summary>
+ /// Searches the path for the executable specified. Will throw a Win32Exception if the file is not found.
+ /// </summary>
+ /// <param name="lpFileName">The executable to search for</param>
+ /// <returns>The full path of the executable to search for</returns>
+ public static string SearchPath(string lpFileName)
+ {
+ StringBuilder sbOut = new StringBuilder(0);
+ IntPtr filePartOut = IntPtr.Zero;
+ UInt32 res = NativeMethods.SearchPathW(null, lpFileName, null, (UInt32)sbOut.Capacity, sbOut, out filePartOut);
+ if (res == 0)
+ {
+ int lastErr = Marshal.GetLastWin32Error();
+ if (lastErr == 2) // ERROR_FILE_NOT_FOUND
+ throw new FileNotFoundException(String.Format("Could not find file '{0}'.", lpFileName));
+ else
+ throw new Win32Exception(String.Format("SearchPathW({0}) failed to get buffer length", lpFileName));
+ }
+
+ sbOut.EnsureCapacity((int)res);
+ if (NativeMethods.SearchPathW(null, lpFileName, null, (UInt32)sbOut.Capacity, sbOut, out filePartOut) == 0)
+ throw new Win32Exception(String.Format("SearchPathW({0}) failed", lpFileName));
+
+ return sbOut.ToString();
+ }
+
+ public static Result CreateProcess(string command)
+ {
+ return CreateProcess(null, command, null, null, String.Empty);
+ }
+
+ public static Result CreateProcess(string lpApplicationName, string lpCommandLine, string lpCurrentDirectory,
+ IDictionary environment)
+ {
+ return CreateProcess(lpApplicationName, lpCommandLine, lpCurrentDirectory, environment, String.Empty);
+ }
+
+ public static Result CreateProcess(string lpApplicationName, string lpCommandLine, string lpCurrentDirectory,
+ IDictionary environment, string stdin)
+ {
+ return CreateProcess(lpApplicationName, lpCommandLine, lpCurrentDirectory, environment, stdin, null);
+ }
+
+ public static Result CreateProcess(string lpApplicationName, string lpCommandLine, string lpCurrentDirectory,
+ IDictionary environment, byte[] stdin)
+ {
+ return CreateProcess(lpApplicationName, lpCommandLine, lpCurrentDirectory, environment, stdin, null);
+ }
+
+ public static Result CreateProcess(string lpApplicationName, string lpCommandLine, string lpCurrentDirectory,
+ IDictionary environment, string stdin, string outputEncoding)
+ {
+ byte[] stdinBytes;
+ if (String.IsNullOrEmpty(stdin))
+ stdinBytes = new byte[0];
+ else
+ {
+ if (!stdin.EndsWith(Environment.NewLine))
+ stdin += Environment.NewLine;
+ stdinBytes = new UTF8Encoding(false).GetBytes(stdin);
+ }
+ return CreateProcess(lpApplicationName, lpCommandLine, lpCurrentDirectory, environment, stdinBytes, outputEncoding);
+ }
+
+ /// <summary>
+ /// Creates a process based on the CreateProcess API call.
+ /// </summary>
+ /// <param name="lpApplicationName">The name of the executable or batch file to execute</param>
+ /// <param name="lpCommandLine">The command line to execute, typically this includes lpApplication as the first argument</param>
+ /// <param name="lpCurrentDirectory">The full path to the current directory for the process, null will have the same cwd as the calling process</param>
+ /// <param name="environment">A dictionary of key/value pairs to define the new process environment</param>
+ /// <param name="stdin">A byte array to send over the stdin pipe</param>
+ /// <param name="outputEncoding">The character encoding for decoding stdout/stderr output of the process.</param>
+ /// <returns>Result object that contains the command output and return code</returns>
+ public static Result CreateProcess(string lpApplicationName, string lpCommandLine, string lpCurrentDirectory,
+ IDictionary environment, byte[] stdin, string outputEncoding)
+ {
+ NativeHelpers.ProcessCreationFlags creationFlags = NativeHelpers.ProcessCreationFlags.CREATE_UNICODE_ENVIRONMENT |
+ NativeHelpers.ProcessCreationFlags.EXTENDED_STARTUPINFO_PRESENT;
+ NativeHelpers.PROCESS_INFORMATION pi = new NativeHelpers.PROCESS_INFORMATION();
+ NativeHelpers.STARTUPINFOEX si = new NativeHelpers.STARTUPINFOEX();
+ si.startupInfo.dwFlags = NativeHelpers.StartupInfoFlags.USESTDHANDLES;
+
+ SafeFileHandle stdoutRead, stdoutWrite, stderrRead, stderrWrite, stdinRead, stdinWrite;
+ CreateStdioPipes(si, out stdoutRead, out stdoutWrite, out stderrRead, out stderrWrite, out stdinRead,
+ out stdinWrite);
+ FileStream stdinStream = new FileStream(stdinWrite, FileAccess.Write);
+
+ // $null from PowerShell ends up as an empty string, we need to convert back as an empty string doesn't
+ // make sense for these parameters
+ if (lpApplicationName == "")
+ lpApplicationName = null;
+
+ if (lpCurrentDirectory == "")
+ lpCurrentDirectory = null;
+
+ using (SafeMemoryBuffer lpEnvironment = CreateEnvironmentPointer(environment))
+ {
+ // Create console with utf-8 CP if no existing console is present
+ bool isConsole = false;
+ if (NativeMethods.GetConsoleWindow() == IntPtr.Zero)
+ {
+ isConsole = NativeMethods.AllocConsole();
+
+ // Set console input/output codepage to UTF-8
+ NativeMethods.SetConsoleCP(65001);
+ NativeMethods.SetConsoleOutputCP(65001);
+ }
+
+ try
+ {
+ StringBuilder commandLine = new StringBuilder(lpCommandLine);
+ if (!NativeMethods.CreateProcessW(lpApplicationName, commandLine, IntPtr.Zero, IntPtr.Zero,
+ true, creationFlags, lpEnvironment, lpCurrentDirectory, si, out pi))
+ {
+ throw new Win32Exception("CreateProcessW() failed");
+ }
+ }
+ finally
+ {
+ if (isConsole)
+ NativeMethods.FreeConsole();
+ }
+ }
+
+ return WaitProcess(stdoutRead, stdoutWrite, stderrRead, stderrWrite, stdinStream, stdin, pi.hProcess,
+ outputEncoding);
+ }
+
+ internal static void CreateStdioPipes(NativeHelpers.STARTUPINFOEX si, out SafeFileHandle stdoutRead,
+ out SafeFileHandle stdoutWrite, out SafeFileHandle stderrRead, out SafeFileHandle stderrWrite,
+ out SafeFileHandle stdinRead, out SafeFileHandle stdinWrite)
+ {
+ NativeHelpers.SECURITY_ATTRIBUTES pipesec = new NativeHelpers.SECURITY_ATTRIBUTES();
+ pipesec.bInheritHandle = true;
+
+ if (!NativeMethods.CreatePipe(out stdoutRead, out stdoutWrite, pipesec, 0))
+ throw new Win32Exception("STDOUT pipe setup failed");
+ if (!NativeMethods.SetHandleInformation(stdoutRead, NativeHelpers.HandleFlags.INHERIT, 0))
+ throw new Win32Exception("STDOUT pipe handle setup failed");
+
+ if (!NativeMethods.CreatePipe(out stderrRead, out stderrWrite, pipesec, 0))
+ throw new Win32Exception("STDERR pipe setup failed");
+ if (!NativeMethods.SetHandleInformation(stderrRead, NativeHelpers.HandleFlags.INHERIT, 0))
+ throw new Win32Exception("STDERR pipe handle setup failed");
+
+ if (!NativeMethods.CreatePipe(out stdinRead, out stdinWrite, pipesec, 0))
+ throw new Win32Exception("STDIN pipe setup failed");
+ if (!NativeMethods.SetHandleInformation(stdinWrite, NativeHelpers.HandleFlags.INHERIT, 0))
+ throw new Win32Exception("STDIN pipe handle setup failed");
+
+ si.startupInfo.hStdOutput = stdoutWrite;
+ si.startupInfo.hStdError = stderrWrite;
+ si.startupInfo.hStdInput = stdinRead;
+ }
+
+ internal static SafeMemoryBuffer CreateEnvironmentPointer(IDictionary environment)
+ {
+ IntPtr lpEnvironment = IntPtr.Zero;
+ if (environment != null && environment.Count > 0)
+ {
+ StringBuilder environmentString = new StringBuilder();
+ foreach (DictionaryEntry kv in environment)
+ environmentString.AppendFormat("{0}={1}\0", kv.Key, kv.Value);
+ environmentString.Append('\0');
+
+ lpEnvironment = Marshal.StringToHGlobalUni(environmentString.ToString());
+ }
+ return new SafeMemoryBuffer(lpEnvironment);
+ }
+
+ internal static Result WaitProcess(SafeFileHandle stdoutRead, SafeFileHandle stdoutWrite, SafeFileHandle stderrRead,
+ SafeFileHandle stderrWrite, FileStream stdinStream, byte[] stdin, IntPtr hProcess, string outputEncoding = null)
+ {
+ // Default to using UTF-8 as the output encoding, this should be a sane default for most scenarios.
+ outputEncoding = String.IsNullOrEmpty(outputEncoding) ? "utf-8" : outputEncoding;
+ Encoding encodingInstance = Encoding.GetEncoding(outputEncoding);
+
+ FileStream stdoutFS = new FileStream(stdoutRead, FileAccess.Read, 4096);
+ StreamReader stdout = new StreamReader(stdoutFS, encodingInstance, true, 4096);
+ stdoutWrite.Close();
+
+ FileStream stderrFS = new FileStream(stderrRead, FileAccess.Read, 4096);
+ StreamReader stderr = new StreamReader(stderrFS, encodingInstance, true, 4096);
+ stderrWrite.Close();
+
+ stdinStream.Write(stdin, 0, stdin.Length);
+ stdinStream.Close();
+
+ string stdoutStr, stderrStr = null;
+ GetProcessOutput(stdout, stderr, out stdoutStr, out stderrStr);
+ UInt32 rc = GetProcessExitCode(hProcess);
+
+ return new Result
+ {
+ StandardOut = stdoutStr,
+ StandardError = stderrStr,
+ ExitCode = rc
+ };
+ }
+
+ internal static void GetProcessOutput(StreamReader stdoutStream, StreamReader stderrStream, out string stdout, out string stderr)
+ {
+ var sowait = new EventWaitHandle(false, EventResetMode.ManualReset);
+ var sewait = new EventWaitHandle(false, EventResetMode.ManualReset);
+ string so = null, se = null;
+ ThreadPool.QueueUserWorkItem((s) =>
+ {
+ so = stdoutStream.ReadToEnd();
+ sowait.Set();
+ });
+ ThreadPool.QueueUserWorkItem((s) =>
+ {
+ se = stderrStream.ReadToEnd();
+ sewait.Set();
+ });
+ foreach (var wh in new WaitHandle[] { sowait, sewait })
+ wh.WaitOne();
+ stdout = so;
+ stderr = se;
+ }
+
+ internal static UInt32 GetProcessExitCode(IntPtr processHandle)
+ {
+ SafeWaitHandle hProcess = new SafeWaitHandle(processHandle, true);
+ NativeMethods.WaitForSingleObject(hProcess, 0xFFFFFFFF);
+
+ UInt32 exitCode;
+ if (!NativeMethods.GetExitCodeProcess(hProcess, out exitCode))
+ throw new Win32Exception("GetExitCodeProcess() failed");
+ return exitCode;
+ }
+ }
+}
diff --git a/lib/ansible/module_utils/csharp/__init__.py b/lib/ansible/module_utils/csharp/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/lib/ansible/module_utils/csharp/__init__.py
diff --git a/lib/ansible/module_utils/distro/__init__.py b/lib/ansible/module_utils/distro/__init__.py
new file mode 100644
index 00000000..25b148a1
--- /dev/null
+++ b/lib/ansible/module_utils/distro/__init__.py
@@ -0,0 +1,46 @@
+# (c) 2018 Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat distro library.
+'''
+# The following makes it easier for us to script updates of the bundled code
+_BUNDLED_METADATA = {"pypi_name": "distro", "version": "1.5.0"}
+
+# The following additional changes have been made:
+# * Remove optparse since it is not needed for our use.
+# * A format string including {} has been changed to {0} (py2.6 compat)
+# * Port two calls from subprocess.check_output to subprocess.Popen().communicate() (py2.6 compat)
+
+
+import sys
+
+try:
+ import distro as _system_distro
+except ImportError:
+ _system_distro = None
+
+if _system_distro:
+ distro = _system_distro
+else:
+ # Our bundled copy
+ from ansible.module_utils.distro import _distro as distro
+sys.modules['ansible.module_utils.distro'] = distro
diff --git a/lib/ansible/module_utils/distro/_distro.py b/lib/ansible/module_utils/distro/_distro.py
new file mode 100644
index 00000000..3a6b486a
--- /dev/null
+++ b/lib/ansible/module_utils/distro/_distro.py
@@ -0,0 +1,1271 @@
+# Copyright 2015,2016,2017 Nir Cohen
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# A local copy of the license can be found in licenses/Apache-License.txt
+#
+# Modifications to this code have been made by Ansible Project
+
+"""
+The ``distro`` package (``distro`` stands for Linux Distribution) provides
+information about the Linux distribution it runs on, such as a reliable
+machine-readable distro ID, or version information.
+
+It is the recommended replacement for Python's original
+:py:func:`platform.linux_distribution` function, but it provides much more
+functionality. An alternative implementation became necessary because Python
+3.5 deprecated this function, and Python 3.8 will remove it altogether.
+Its predecessor function :py:func:`platform.dist` was already
+deprecated since Python 2.6 and will also be removed in Python 3.8.
+Still, there are many cases in which access to OS distribution information
+is needed. See `Python issue 1322 <https://bugs.python.org/issue1322>`_ for
+more information.
+"""
+
+import os
+import re
+import sys
+import shlex
+import logging
+import subprocess
+
+
+_UNIXCONFDIR = os.environ.get('UNIXCONFDIR', '/etc')
+_OS_RELEASE_BASENAME = 'os-release'
+
+#: Translation table for normalizing the "ID" attribute defined in os-release
+#: files, for use by the :func:`distro.id` method.
+#:
+#: * Key: Value as defined in the os-release file, translated to lower case,
+#: with blanks translated to underscores.
+#:
+#: * Value: Normalized value.
+NORMALIZED_OS_ID = {
+ 'ol': 'oracle', # Oracle Linux
+}
+
+#: Translation table for normalizing the "Distributor ID" attribute returned by
+#: the lsb_release command, for use by the :func:`distro.id` method.
+#:
+#: * Key: Value as returned by the lsb_release command, translated to lower
+#: case, with blanks translated to underscores.
+#:
+#: * Value: Normalized value.
+NORMALIZED_LSB_ID = {
+ 'enterpriseenterpriseas': 'oracle', # Oracle Enterprise Linux 4
+ 'enterpriseenterpriseserver': 'oracle', # Oracle Linux 5
+ 'redhatenterpriseworkstation': 'rhel', # RHEL 6, 7 Workstation
+ 'redhatenterpriseserver': 'rhel', # RHEL 6, 7 Server
+ 'redhatenterprisecomputenode': 'rhel', # RHEL 6 ComputeNode
+}
+
+#: Translation table for normalizing the distro ID derived from the file name
+#: of distro release files, for use by the :func:`distro.id` method.
+#:
+#: * Key: Value as derived from the file name of a distro release file,
+#: translated to lower case, with blanks translated to underscores.
+#:
+#: * Value: Normalized value.
+NORMALIZED_DISTRO_ID = {
+ 'redhat': 'rhel', # RHEL 6.x, 7.x
+}
+
+# Pattern for content of distro release file (reversed)
+_DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile(
+ r'(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)')
+
+# Pattern for base file name of distro release file
+_DISTRO_RELEASE_BASENAME_PATTERN = re.compile(
+ r'(\w+)[-_](release|version)$')
+
+# Base file names to be ignored when searching for distro release file
+_DISTRO_RELEASE_IGNORE_BASENAMES = (
+ 'debian_version',
+ 'lsb-release',
+ 'oem-release',
+ _OS_RELEASE_BASENAME,
+ 'system-release',
+ 'plesk-release',
+)
+
+
+#
+# Python 2.6 does not have subprocess.check_output so replicate it here
+#
+def _my_check_output(*popenargs, **kwargs):
+ r"""Run command with arguments and return its output as a byte string.
+
+ If the exit code was non-zero it raises a CalledProcessError. The
+ CalledProcessError object will have the return code in the returncode
+ attribute and output in the output attribute.
+
+ The arguments are the same as for the Popen constructor. Example:
+
+ >>> check_output(["ls", "-l", "/dev/null"])
+ 'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
+
+ The stdout argument is not allowed as it is used internally.
+ To capture standard error in the result, use stderr=STDOUT.
+
+ >>> check_output(["/bin/sh", "-c",
+ ... "ls -l non_existent_file ; exit 0"],
+ ... stderr=STDOUT)
+ 'ls: non_existent_file: No such file or directory\n'
+
+ This is a backport of Python-2.7's check output to Python-2.6
+ """
+ if 'stdout' in kwargs:
+ raise ValueError(
+ 'stdout argument not allowed, it will be overridden.'
+ )
+ process = subprocess.Popen(
+ stdout=subprocess.PIPE, *popenargs, **kwargs
+ )
+ output, unused_err = process.communicate()
+ retcode = process.poll()
+ if retcode:
+ cmd = kwargs.get("args")
+ if cmd is None:
+ cmd = popenargs[0]
+ # Deviation from Python-2.7: Python-2.6's CalledProcessError does not
+ # have an argument for the stdout so simply omit it.
+ raise subprocess.CalledProcessError(retcode, cmd)
+ return output
+
+
+try:
+ _check_output = subprocess.check_output
+except AttributeError:
+ _check_output = _my_check_output
+
+
+def linux_distribution(full_distribution_name=True):
+ """
+ Return information about the current OS distribution as a tuple
+ ``(id_name, version, codename)`` with items as follows:
+
+ * ``id_name``: If *full_distribution_name* is false, the result of
+ :func:`distro.id`. Otherwise, the result of :func:`distro.name`.
+
+ * ``version``: The result of :func:`distro.version`.
+
+ * ``codename``: The result of :func:`distro.codename`.
+
+ The interface of this function is compatible with the original
+ :py:func:`platform.linux_distribution` function, supporting a subset of
+ its parameters.
+
+ The data it returns may not exactly be the same, because it uses more data
+ sources than the original function, and that may lead to different data if
+ the OS distribution is not consistent across multiple data sources it
+ provides (there are indeed such distributions ...).
+
+ Another reason for differences is the fact that the :func:`distro.id`
+ method normalizes the distro ID string to a reliable machine-readable value
+ for a number of popular OS distributions.
+ """
+ return _distro.linux_distribution(full_distribution_name)
+
+
+def id():
+ """
+ Return the distro ID of the current distribution, as a
+ machine-readable string.
+
+ For a number of OS distributions, the returned distro ID value is
+ *reliable*, in the sense that it is documented and that it does not change
+ across releases of the distribution.
+
+ This package maintains the following reliable distro ID values:
+
+ ============== =========================================
+ Distro ID Distribution
+ ============== =========================================
+ "ubuntu" Ubuntu
+ "debian" Debian
+ "rhel" RedHat Enterprise Linux
+ "centos" CentOS
+ "fedora" Fedora
+ "sles" SUSE Linux Enterprise Server
+ "opensuse" openSUSE
+ "amazon" Amazon Linux
+ "arch" Arch Linux
+ "cloudlinux" CloudLinux OS
+ "exherbo" Exherbo Linux
+ "gentoo" GenToo Linux
+ "ibm_powerkvm" IBM PowerKVM
+ "kvmibm" KVM for IBM z Systems
+ "linuxmint" Linux Mint
+ "mageia" Mageia
+ "mandriva" Mandriva Linux
+ "parallels" Parallels
+ "pidora" Pidora
+ "raspbian" Raspbian
+ "oracle" Oracle Linux (and Oracle Enterprise Linux)
+ "scientific" Scientific Linux
+ "slackware" Slackware
+ "xenserver" XenServer
+ "openbsd" OpenBSD
+ "netbsd" NetBSD
+ "freebsd" FreeBSD
+ "midnightbsd" MidnightBSD
+ ============== =========================================
+
+ If you have a need to get distros for reliable IDs added into this set,
+ or if you find that the :func:`distro.id` function returns a different
+ distro ID for one of the listed distros, please create an issue in the
+ `distro issue tracker`_.
+
+ **Lookup hierarchy and transformations:**
+
+ First, the ID is obtained from the following sources, in the specified
+ order. The first available and non-empty value is used:
+
+ * the value of the "ID" attribute of the os-release file,
+
+ * the value of the "Distributor ID" attribute returned by the lsb_release
+ command,
+
+ * the first part of the file name of the distro release file,
+
+ The so determined ID value then passes the following transformations,
+ before it is returned by this method:
+
+ * it is translated to lower case,
+
+ * blanks (which should not be there anyway) are translated to underscores,
+
+ * a normalization of the ID is performed, based upon
+ `normalization tables`_. The purpose of this normalization is to ensure
+ that the ID is as reliable as possible, even across incompatible changes
+ in the OS distributions. A common reason for an incompatible change is
+ the addition of an os-release file, or the addition of the lsb_release
+ command, with ID values that differ from what was previously determined
+ from the distro release file name.
+ """
+ return _distro.id()
+
+
+def name(pretty=False):
+ """
+ Return the name of the current OS distribution, as a human-readable
+ string.
+
+ If *pretty* is false, the name is returned without version or codename.
+ (e.g. "CentOS Linux")
+
+ If *pretty* is true, the version and codename are appended.
+ (e.g. "CentOS Linux 7.1.1503 (Core)")
+
+ **Lookup hierarchy:**
+
+ The name is obtained from the following sources, in the specified order.
+ The first available and non-empty value is used:
+
+ * If *pretty* is false:
+
+ - the value of the "NAME" attribute of the os-release file,
+
+ - the value of the "Distributor ID" attribute returned by the lsb_release
+ command,
+
+ - the value of the "<name>" field of the distro release file.
+
+ * If *pretty* is true:
+
+ - the value of the "PRETTY_NAME" attribute of the os-release file,
+
+ - the value of the "Description" attribute returned by the lsb_release
+ command,
+
+ - the value of the "<name>" field of the distro release file, appended
+ with the value of the pretty version ("<version_id>" and "<codename>"
+ fields) of the distro release file, if available.
+ """
+ return _distro.name(pretty)
+
+
+def version(pretty=False, best=False):
+ """
+ Return the version of the current OS distribution, as a human-readable
+ string.
+
+ If *pretty* is false, the version is returned without codename (e.g.
+ "7.0").
+
+ If *pretty* is true, the codename in parenthesis is appended, if the
+ codename is non-empty (e.g. "7.0 (Maipo)").
+
+ Some distributions provide version numbers with different precisions in
+ the different sources of distribution information. Examining the different
+ sources in a fixed priority order does not always yield the most precise
+ version (e.g. for Debian 8.2, or CentOS 7.1).
+
+ The *best* parameter can be used to control the approach for the returned
+ version:
+
+ If *best* is false, the first non-empty version number in priority order of
+ the examined sources is returned.
+
+ If *best* is true, the most precise version number out of all examined
+ sources is returned.
+
+ **Lookup hierarchy:**
+
+ In all cases, the version number is obtained from the following sources.
+ If *best* is false, this order represents the priority order:
+
+ * the value of the "VERSION_ID" attribute of the os-release file,
+ * the value of the "Release" attribute returned by the lsb_release
+ command,
+ * the version number parsed from the "<version_id>" field of the first line
+ of the distro release file,
+ * the version number parsed from the "PRETTY_NAME" attribute of the
+ os-release file, if it follows the format of the distro release files.
+ * the version number parsed from the "Description" attribute returned by
+ the lsb_release command, if it follows the format of the distro release
+ files.
+ """
+ return _distro.version(pretty, best)
+
+
+def version_parts(best=False):
+ """
+ Return the version of the current OS distribution as a tuple
+ ``(major, minor, build_number)`` with items as follows:
+
+ * ``major``: The result of :func:`distro.major_version`.
+
+ * ``minor``: The result of :func:`distro.minor_version`.
+
+ * ``build_number``: The result of :func:`distro.build_number`.
+
+ For a description of the *best* parameter, see the :func:`distro.version`
+ method.
+ """
+ return _distro.version_parts(best)
+
+
+def major_version(best=False):
+ """
+ Return the major version of the current OS distribution, as a string,
+ if provided.
+ Otherwise, the empty string is returned. The major version is the first
+ part of the dot-separated version string.
+
+ For a description of the *best* parameter, see the :func:`distro.version`
+ method.
+ """
+ return _distro.major_version(best)
+
+
+def minor_version(best=False):
+ """
+ Return the minor version of the current OS distribution, as a string,
+ if provided.
+ Otherwise, the empty string is returned. The minor version is the second
+ part of the dot-separated version string.
+
+ For a description of the *best* parameter, see the :func:`distro.version`
+ method.
+ """
+ return _distro.minor_version(best)
+
+
+def build_number(best=False):
+ """
+ Return the build number of the current OS distribution, as a string,
+ if provided.
+ Otherwise, the empty string is returned. The build number is the third part
+ of the dot-separated version string.
+
+ For a description of the *best* parameter, see the :func:`distro.version`
+ method.
+ """
+ return _distro.build_number(best)
+
+
+def like():
+ """
+ Return a space-separated list of distro IDs of distributions that are
+ closely related to the current OS distribution in regards to packaging
+ and programming interfaces, for example distributions the current
+ distribution is a derivative from.
+
+ **Lookup hierarchy:**
+
+ This information item is only provided by the os-release file.
+ For details, see the description of the "ID_LIKE" attribute in the
+ `os-release man page
+ <http://www.freedesktop.org/software/systemd/man/os-release.html>`_.
+ """
+ return _distro.like()
+
+
+def codename():
+ """
+ Return the codename for the release of the current OS distribution,
+ as a string.
+
+ If the distribution does not have a codename, an empty string is returned.
+
+ Note that the returned codename is not always really a codename. For
+ example, openSUSE returns "x86_64". This function does not handle such
+ cases in any special way and just returns the string it finds, if any.
+
+ **Lookup hierarchy:**
+
+ * the codename within the "VERSION" attribute of the os-release file, if
+ provided,
+
+ * the value of the "Codename" attribute returned by the lsb_release
+ command,
+
+ * the value of the "<codename>" field of the distro release file.
+ """
+ return _distro.codename()
+
+
+def info(pretty=False, best=False):
+ """
+ Return certain machine-readable information items about the current OS
+ distribution in a dictionary, as shown in the following example:
+
+ .. sourcecode:: python
+
+ {
+ 'id': 'rhel',
+ 'version': '7.0',
+ 'version_parts': {
+ 'major': '7',
+ 'minor': '0',
+ 'build_number': ''
+ },
+ 'like': 'fedora',
+ 'codename': 'Maipo'
+ }
+
+ The dictionary structure and keys are always the same, regardless of which
+ information items are available in the underlying data sources. The values
+ for the various keys are as follows:
+
+ * ``id``: The result of :func:`distro.id`.
+
+ * ``version``: The result of :func:`distro.version`.
+
+ * ``version_parts -> major``: The result of :func:`distro.major_version`.
+
+ * ``version_parts -> minor``: The result of :func:`distro.minor_version`.
+
+ * ``version_parts -> build_number``: The result of
+ :func:`distro.build_number`.
+
+ * ``like``: The result of :func:`distro.like`.
+
+ * ``codename``: The result of :func:`distro.codename`.
+
+ For a description of the *pretty* and *best* parameters, see the
+ :func:`distro.version` method.
+ """
+ return _distro.info(pretty, best)
+
+
+def os_release_info():
+ """
+ Return a dictionary containing key-value pairs for the information items
+ from the os-release file data source of the current OS distribution.
+
+ See `os-release file`_ for details about these information items.
+ """
+ return _distro.os_release_info()
+
+
+def lsb_release_info():
+ """
+ Return a dictionary containing key-value pairs for the information items
+ from the lsb_release command data source of the current OS distribution.
+
+ See `lsb_release command output`_ for details about these information
+ items.
+ """
+ return _distro.lsb_release_info()
+
+
+def distro_release_info():
+ """
+ Return a dictionary containing key-value pairs for the information items
+ from the distro release file data source of the current OS distribution.
+
+ See `distro release file`_ for details about these information items.
+ """
+ return _distro.distro_release_info()
+
+
+def uname_info():
+ """
+ Return a dictionary containing key-value pairs for the information items
+ from the distro release file data source of the current OS distribution.
+ """
+ return _distro.uname_info()
+
+
+def os_release_attr(attribute):
+ """
+ Return a single named information item from the os-release file data source
+ of the current OS distribution.
+
+ Parameters:
+
+ * ``attribute`` (string): Key of the information item.
+
+ Returns:
+
+ * (string): Value of the information item, if the item exists.
+ The empty string, if the item does not exist.
+
+ See `os-release file`_ for details about these information items.
+ """
+ return _distro.os_release_attr(attribute)
+
+
+def lsb_release_attr(attribute):
+ """
+ Return a single named information item from the lsb_release command output
+ data source of the current OS distribution.
+
+ Parameters:
+
+ * ``attribute`` (string): Key of the information item.
+
+ Returns:
+
+ * (string): Value of the information item, if the item exists.
+ The empty string, if the item does not exist.
+
+ See `lsb_release command output`_ for details about these information
+ items.
+ """
+ return _distro.lsb_release_attr(attribute)
+
+
+def distro_release_attr(attribute):
+ """
+ Return a single named information item from the distro release file
+ data source of the current OS distribution.
+
+ Parameters:
+
+ * ``attribute`` (string): Key of the information item.
+
+ Returns:
+
+ * (string): Value of the information item, if the item exists.
+ The empty string, if the item does not exist.
+
+ See `distro release file`_ for details about these information items.
+ """
+ return _distro.distro_release_attr(attribute)
+
+
+def uname_attr(attribute):
+ """
+ Return a single named information item from the distro release file
+ data source of the current OS distribution.
+
+ Parameters:
+
+ * ``attribute`` (string): Key of the information item.
+
+ Returns:
+
+ * (string): Value of the information item, if the item exists.
+ The empty string, if the item does not exist.
+ """
+ return _distro.uname_attr(attribute)
+
+
+class cached_property(object):
+ """A version of @property which caches the value. On access, it calls the
+ underlying function and sets the value in `__dict__` so future accesses
+ will not re-call the property.
+ """
+ def __init__(self, f):
+ self._fname = f.__name__
+ self._f = f
+
+ def __get__(self, obj, owner):
+ assert obj is not None, 'call {0} on an instance'.format(self._fname)
+ ret = obj.__dict__[self._fname] = self._f(obj)
+ return ret
+
+
+class LinuxDistribution(object):
+ """
+ Provides information about a OS distribution.
+
+ This package creates a private module-global instance of this class with
+ default initialization arguments, that is used by the
+ `consolidated accessor functions`_ and `single source accessor functions`_.
+ By using default initialization arguments, that module-global instance
+ returns data about the current OS distribution (i.e. the distro this
+ package runs on).
+
+ Normally, it is not necessary to create additional instances of this class.
+ However, in situations where control is needed over the exact data sources
+ that are used, instances of this class can be created with a specific
+ distro release file, or a specific os-release file, or without invoking the
+ lsb_release command.
+ """
+
+ def __init__(self,
+ include_lsb=True,
+ os_release_file='',
+ distro_release_file='',
+ include_uname=True):
+ """
+ The initialization method of this class gathers information from the
+ available data sources, and stores that in private instance attributes.
+ Subsequent access to the information items uses these private instance
+ attributes, so that the data sources are read only once.
+
+ Parameters:
+
+ * ``include_lsb`` (bool): Controls whether the
+ `lsb_release command output`_ is included as a data source.
+
+ If the lsb_release command is not available in the program execution
+ path, the data source for the lsb_release command will be empty.
+
+ * ``os_release_file`` (string): The path name of the
+ `os-release file`_ that is to be used as a data source.
+
+ An empty string (the default) will cause the default path name to
+ be used (see `os-release file`_ for details).
+
+ If the specified or defaulted os-release file does not exist, the
+ data source for the os-release file will be empty.
+
+ * ``distro_release_file`` (string): The path name of the
+ `distro release file`_ that is to be used as a data source.
+
+ An empty string (the default) will cause a default search algorithm
+ to be used (see `distro release file`_ for details).
+
+ If the specified distro release file does not exist, or if no default
+ distro release file can be found, the data source for the distro
+ release file will be empty.
+
+ * ``include_uname`` (bool): Controls whether uname command output is
+ included as a data source. If the uname command is not available in
+ the program execution path the data source for the uname command will
+ be empty.
+
+ Public instance attributes:
+
+ * ``os_release_file`` (string): The path name of the
+ `os-release file`_ that is actually used as a data source. The
+ empty string if no distro release file is used as a data source.
+
+ * ``distro_release_file`` (string): The path name of the
+ `distro release file`_ that is actually used as a data source. The
+ empty string if no distro release file is used as a data source.
+
+ * ``include_lsb`` (bool): The result of the ``include_lsb`` parameter.
+ This controls whether the lsb information will be loaded.
+
+ * ``include_uname`` (bool): The result of the ``include_uname``
+ parameter. This controls whether the uname information will
+ be loaded.
+
+ Raises:
+
+ * :py:exc:`IOError`: Some I/O issue with an os-release file or distro
+ release file.
+
+ * :py:exc:`subprocess.CalledProcessError`: The lsb_release command had
+ some issue (other than not being available in the program execution
+ path).
+
+ * :py:exc:`UnicodeError`: A data source has unexpected characters or
+ uses an unexpected encoding.
+ """
+ self.os_release_file = os_release_file or \
+ os.path.join(_UNIXCONFDIR, _OS_RELEASE_BASENAME)
+ self.distro_release_file = distro_release_file or '' # updated later
+ self.include_lsb = include_lsb
+ self.include_uname = include_uname
+
+ def __repr__(self):
+ """Return repr of all info
+ """
+ return \
+ "LinuxDistribution(" \
+ "os_release_file={self.os_release_file!r}, " \
+ "distro_release_file={self.distro_release_file!r}, " \
+ "include_lsb={self.include_lsb!r}, " \
+ "include_uname={self.include_uname!r}, " \
+ "_os_release_info={self._os_release_info!r}, " \
+ "_lsb_release_info={self._lsb_release_info!r}, " \
+ "_distro_release_info={self._distro_release_info!r}, " \
+ "_uname_info={self._uname_info!r})".format(
+ self=self)
+
+ def linux_distribution(self, full_distribution_name=True):
+ """
+ Return information about the OS distribution that is compatible
+ with Python's :func:`platform.linux_distribution`, supporting a subset
+ of its parameters.
+
+ For details, see :func:`distro.linux_distribution`.
+ """
+ return (
+ self.name() if full_distribution_name else self.id(),
+ self.version(),
+ self.codename()
+ )
+
+ def id(self):
+ """Return the distro ID of the OS distribution, as a string.
+
+ For details, see :func:`distro.id`.
+ """
+ def normalize(distro_id, table):
+ distro_id = distro_id.lower().replace(' ', '_')
+ return table.get(distro_id, distro_id)
+
+ distro_id = self.os_release_attr('id')
+ if distro_id:
+ return normalize(distro_id, NORMALIZED_OS_ID)
+
+ distro_id = self.lsb_release_attr('distributor_id')
+ if distro_id:
+ return normalize(distro_id, NORMALIZED_LSB_ID)
+
+ distro_id = self.distro_release_attr('id')
+ if distro_id:
+ return normalize(distro_id, NORMALIZED_DISTRO_ID)
+
+ distro_id = self.uname_attr('id')
+ if distro_id:
+ return normalize(distro_id, NORMALIZED_DISTRO_ID)
+
+ return ''
+
+ def name(self, pretty=False):
+ """
+ Return the name of the OS distribution, as a string.
+
+ For details, see :func:`distro.name`.
+ """
+ name = self.os_release_attr('name') \
+ or self.lsb_release_attr('distributor_id') \
+ or self.distro_release_attr('name') \
+ or self.uname_attr('name')
+ if pretty:
+ name = self.os_release_attr('pretty_name') \
+ or self.lsb_release_attr('description')
+ if not name:
+ name = self.distro_release_attr('name') \
+ or self.uname_attr('name')
+ version = self.version(pretty=True)
+ if version:
+ name = name + ' ' + version
+ return name or ''
+
+ def version(self, pretty=False, best=False):
+ """
+ Return the version of the OS distribution, as a string.
+
+ For details, see :func:`distro.version`.
+ """
+ versions = [
+ self.os_release_attr('version_id'),
+ self.lsb_release_attr('release'),
+ self.distro_release_attr('version_id'),
+ self._parse_distro_release_content(
+ self.os_release_attr('pretty_name')).get('version_id', ''),
+ self._parse_distro_release_content(
+ self.lsb_release_attr('description')).get('version_id', ''),
+ self.uname_attr('release')
+ ]
+ version = ''
+ if best:
+ # This algorithm uses the last version in priority order that has
+ # the best precision. If the versions are not in conflict, that
+ # does not matter; otherwise, using the last one instead of the
+ # first one might be considered a surprise.
+ for v in versions:
+ if v.count(".") > version.count(".") or version == '':
+ version = v
+ else:
+ for v in versions:
+ if v != '':
+ version = v
+ break
+ if pretty and version and self.codename():
+ version = u'{0} ({1})'.format(version, self.codename())
+ return version
+
+ def version_parts(self, best=False):
+ """
+ Return the version of the OS distribution, as a tuple of version
+ numbers.
+
+ For details, see :func:`distro.version_parts`.
+ """
+ version_str = self.version(best=best)
+ if version_str:
+ version_regex = re.compile(r'(\d+)\.?(\d+)?\.?(\d+)?')
+ matches = version_regex.match(version_str)
+ if matches:
+ major, minor, build_number = matches.groups()
+ return major, minor or '', build_number or ''
+ return '', '', ''
+
+ def major_version(self, best=False):
+ """
+ Return the major version number of the current distribution.
+
+ For details, see :func:`distro.major_version`.
+ """
+ return self.version_parts(best)[0]
+
+ def minor_version(self, best=False):
+ """
+ Return the minor version number of the current distribution.
+
+ For details, see :func:`distro.minor_version`.
+ """
+ return self.version_parts(best)[1]
+
+ def build_number(self, best=False):
+ """
+ Return the build number of the current distribution.
+
+ For details, see :func:`distro.build_number`.
+ """
+ return self.version_parts(best)[2]
+
+ def like(self):
+ """
+ Return the IDs of distributions that are like the OS distribution.
+
+ For details, see :func:`distro.like`.
+ """
+ return self.os_release_attr('id_like') or ''
+
+ def codename(self):
+ """
+ Return the codename of the OS distribution.
+
+ For details, see :func:`distro.codename`.
+ """
+ try:
+ # Handle os_release specially since distros might purposefully set
+ # this to empty string to have no codename
+ return self._os_release_info['codename']
+ except KeyError:
+ return self.lsb_release_attr('codename') \
+ or self.distro_release_attr('codename') \
+ or ''
+
+ def info(self, pretty=False, best=False):
+ """
+ Return certain machine-readable information about the OS
+ distribution.
+
+ For details, see :func:`distro.info`.
+ """
+ return dict(
+ id=self.id(),
+ version=self.version(pretty, best),
+ version_parts=dict(
+ major=self.major_version(best),
+ minor=self.minor_version(best),
+ build_number=self.build_number(best)
+ ),
+ like=self.like(),
+ codename=self.codename(),
+ )
+
+ def os_release_info(self):
+ """
+ Return a dictionary containing key-value pairs for the information
+ items from the os-release file data source of the OS distribution.
+
+ For details, see :func:`distro.os_release_info`.
+ """
+ return self._os_release_info
+
+ def lsb_release_info(self):
+ """
+ Return a dictionary containing key-value pairs for the information
+ items from the lsb_release command data source of the OS
+ distribution.
+
+ For details, see :func:`distro.lsb_release_info`.
+ """
+ return self._lsb_release_info
+
+ def distro_release_info(self):
+ """
+ Return a dictionary containing key-value pairs for the information
+ items from the distro release file data source of the OS
+ distribution.
+
+ For details, see :func:`distro.distro_release_info`.
+ """
+ return self._distro_release_info
+
+ def uname_info(self):
+ """
+ Return a dictionary containing key-value pairs for the information
+ items from the uname command data source of the OS distribution.
+
+ For details, see :func:`distro.uname_info`.
+ """
+ return self._uname_info
+
+ def os_release_attr(self, attribute):
+ """
+ Return a single named information item from the os-release file data
+ source of the OS distribution.
+
+ For details, see :func:`distro.os_release_attr`.
+ """
+ return self._os_release_info.get(attribute, '')
+
+ def lsb_release_attr(self, attribute):
+ """
+ Return a single named information item from the lsb_release command
+ output data source of the OS distribution.
+
+ For details, see :func:`distro.lsb_release_attr`.
+ """
+ return self._lsb_release_info.get(attribute, '')
+
+ def distro_release_attr(self, attribute):
+ """
+ Return a single named information item from the distro release file
+ data source of the OS distribution.
+
+ For details, see :func:`distro.distro_release_attr`.
+ """
+ return self._distro_release_info.get(attribute, '')
+
+ def uname_attr(self, attribute):
+ """
+ Return a single named information item from the uname command
+ output data source of the OS distribution.
+
+ For details, see :func:`distro.uname_release_attr`.
+ """
+ return self._uname_info.get(attribute, '')
+
+ @cached_property
+ def _os_release_info(self):
+ """
+ Get the information items from the specified os-release file.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ if os.path.isfile(self.os_release_file):
+ with open(self.os_release_file) as release_file:
+ return self._parse_os_release_content(release_file)
+ return {}
+
+ @staticmethod
+ def _parse_os_release_content(lines):
+ """
+ Parse the lines of an os-release file.
+
+ Parameters:
+
+ * lines: Iterable through the lines in the os-release file.
+ Each line must be a unicode string or a UTF-8 encoded byte
+ string.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ props = {}
+ lexer = shlex.shlex(lines, posix=True)
+ lexer.whitespace_split = True
+
+ # The shlex module defines its `wordchars` variable using literals,
+ # making it dependent on the encoding of the Python source file.
+ # In Python 2.6 and 2.7, the shlex source file is encoded in
+ # 'iso-8859-1', and the `wordchars` variable is defined as a byte
+ # string. This causes a UnicodeDecodeError to be raised when the
+ # parsed content is a unicode object. The following fix resolves that
+ # (... but it should be fixed in shlex...):
+ if sys.version_info[0] == 2 and isinstance(lexer.wordchars, bytes):
+ lexer.wordchars = lexer.wordchars.decode('iso-8859-1')
+
+ tokens = list(lexer)
+ for token in tokens:
+ # At this point, all shell-like parsing has been done (i.e.
+ # comments processed, quotes and backslash escape sequences
+ # processed, multi-line values assembled, trailing newlines
+ # stripped, etc.), so the tokens are now either:
+ # * variable assignments: var=value
+ # * commands or their arguments (not allowed in os-release)
+ if '=' in token:
+ k, v = token.split('=', 1)
+ props[k.lower()] = v
+ else:
+ # Ignore any tokens that are not variable assignments
+ pass
+
+ if 'version_codename' in props:
+ # os-release added a version_codename field. Use that in
+ # preference to anything else Note that some distros purposefully
+ # do not have code names. They should be setting
+ # version_codename=""
+ props['codename'] = props['version_codename']
+ elif 'ubuntu_codename' in props:
+ # Same as above but a non-standard field name used on older Ubuntus
+ props['codename'] = props['ubuntu_codename']
+ elif 'version' in props:
+ # If there is no version_codename, parse it from the version
+ codename = re.search(r'(\(\D+\))|,(\s+)?\D+', props['version'])
+ if codename:
+ codename = codename.group()
+ codename = codename.strip('()')
+ codename = codename.strip(',')
+ codename = codename.strip()
+ # codename appears within paranthese.
+ props['codename'] = codename
+
+ return props
+
+ @cached_property
+ def _lsb_release_info(self):
+ """
+ Get the information items from the lsb_release command output.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ if not self.include_lsb:
+ return {}
+ with open(os.devnull, 'w') as devnull:
+ try:
+ cmd = ('lsb_release', '-a')
+ stdout = _check_output(cmd, stderr=devnull)
+ except OSError: # Command not found
+ return {}
+ content = self._to_str(stdout).splitlines()
+ return self._parse_lsb_release_content(content)
+
+ @staticmethod
+ def _parse_lsb_release_content(lines):
+ """
+ Parse the output of the lsb_release command.
+
+ Parameters:
+
+ * lines: Iterable through the lines of the lsb_release output.
+ Each line must be a unicode string or a UTF-8 encoded byte
+ string.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ props = {}
+ for line in lines:
+ kv = line.strip('\n').split(':', 1)
+ if len(kv) != 2:
+ # Ignore lines without colon.
+ continue
+ k, v = kv
+ props.update({k.replace(' ', '_').lower(): v.strip()})
+ return props
+
+ @cached_property
+ def _uname_info(self):
+ with open(os.devnull, 'w') as devnull:
+ try:
+ cmd = ('uname', '-rs')
+ stdout = _check_output(cmd, stderr=devnull)
+ except OSError:
+ return {}
+ content = self._to_str(stdout).splitlines()
+ return self._parse_uname_content(content)
+
+ @staticmethod
+ def _parse_uname_content(lines):
+ props = {}
+ match = re.search(r'^([^\s]+)\s+([\d\.]+)', lines[0].strip())
+ if match:
+ name, version = match.groups()
+
+ # This is to prevent the Linux kernel version from
+ # appearing as the 'best' version on otherwise
+ # identifiable distributions.
+ if name == 'Linux':
+ return {}
+ props['id'] = name.lower()
+ props['name'] = name
+ props['release'] = version
+ return props
+
+ @staticmethod
+ def _to_str(text):
+ encoding = sys.getfilesystemencoding()
+ encoding = 'utf-8' if encoding == 'ascii' else encoding
+
+ if sys.version_info[0] >= 3:
+ if isinstance(text, bytes):
+ return text.decode(encoding)
+ else:
+ if isinstance(text, unicode): # noqa pylint: disable=undefined-variable
+ return text.encode(encoding)
+
+ return text
+
+ @cached_property
+ def _distro_release_info(self):
+ """
+ Get the information items from the specified distro release file.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ if self.distro_release_file:
+ # If it was specified, we use it and parse what we can, even if
+ # its file name or content does not match the expected pattern.
+ distro_info = self._parse_distro_release_file(
+ self.distro_release_file)
+ basename = os.path.basename(self.distro_release_file)
+ # The file name pattern for user-specified distro release files
+ # is somewhat more tolerant (compared to when searching for the
+ # file), because we want to use what was specified as best as
+ # possible.
+ match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
+ if 'name' in distro_info \
+ and 'cloudlinux' in distro_info['name'].lower():
+ distro_info['id'] = 'cloudlinux'
+ elif match:
+ distro_info['id'] = match.group(1)
+ return distro_info
+ else:
+ try:
+ basenames = os.listdir(_UNIXCONFDIR)
+ # We sort for repeatability in cases where there are multiple
+ # distro specific files; e.g. CentOS, Oracle, Enterprise all
+ # containing `redhat-release` on top of their own.
+ basenames.sort()
+ except OSError:
+ # This may occur when /etc is not readable but we can't be
+ # sure about the *-release files. Check common entries of
+ # /etc for information. If they turn out to not be there the
+ # error is handled in `_parse_distro_release_file()`.
+ basenames = ['SuSE-release',
+ 'arch-release',
+ 'base-release',
+ 'centos-release',
+ 'fedora-release',
+ 'gentoo-release',
+ 'mageia-release',
+ 'mandrake-release',
+ 'mandriva-release',
+ 'mandrivalinux-release',
+ 'manjaro-release',
+ 'oracle-release',
+ 'redhat-release',
+ 'sl-release',
+ 'slackware-version']
+ for basename in basenames:
+ if basename in _DISTRO_RELEASE_IGNORE_BASENAMES:
+ continue
+ match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
+ if match:
+ filepath = os.path.join(_UNIXCONFDIR, basename)
+ distro_info = self._parse_distro_release_file(filepath)
+ if 'name' in distro_info:
+ # The name is always present if the pattern matches
+ self.distro_release_file = filepath
+ distro_info['id'] = match.group(1)
+ if 'cloudlinux' in distro_info['name'].lower():
+ distro_info['id'] = 'cloudlinux'
+ return distro_info
+ return {}
+
+ def _parse_distro_release_file(self, filepath):
+ """
+ Parse a distro release file.
+
+ Parameters:
+
+ * filepath: Path name of the distro release file.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ try:
+ with open(filepath) as fp:
+ # Only parse the first line. For instance, on SLES there
+ # are multiple lines. We don't want them...
+ return self._parse_distro_release_content(fp.readline())
+ except (OSError, IOError):
+ # Ignore not being able to read a specific, seemingly version
+ # related file.
+ # See https://github.com/nir0s/distro/issues/162
+ return {}
+
+ @staticmethod
+ def _parse_distro_release_content(line):
+ """
+ Parse a line from a distro release file.
+
+ Parameters:
+ * line: Line from the distro release file. Must be a unicode string
+ or a UTF-8 encoded byte string.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(
+ line.strip()[::-1])
+ distro_info = {}
+ if matches:
+ # regexp ensures non-None
+ distro_info['name'] = matches.group(3)[::-1]
+ if matches.group(2):
+ distro_info['version_id'] = matches.group(2)[::-1]
+ if matches.group(1):
+ distro_info['codename'] = matches.group(1)[::-1]
+ elif line:
+ distro_info['name'] = line.strip()
+ return distro_info
+
+
+_distro = LinuxDistribution()
+
+
+def main():
+ logger = logging.getLogger(__name__)
+ logger.setLevel(logging.DEBUG)
+ logger.addHandler(logging.StreamHandler(sys.stdout))
+
+ logger.info('Name: %s', name(pretty=True))
+ distribution_version = version(pretty=True)
+ logger.info('Version: %s', distribution_version)
+ distribution_codename = codename()
+ logger.info('Codename: %s', distribution_codename)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/module_utils/facts/__init__.py b/lib/ansible/module_utils/facts/__init__.py
new file mode 100644
index 00000000..96ab778b
--- /dev/null
+++ b/lib/ansible/module_utils/facts/__init__.py
@@ -0,0 +1,34 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# (c) 2017 Red Hat Inc.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# import from the compat api because 2.0-2.3 had a module_utils.facts.ansible_facts
+# and get_all_facts in top level namespace
+from ansible.module_utils.facts.compat import ansible_facts, get_all_facts # noqa
diff --git a/lib/ansible/module_utils/facts/ansible_collector.py b/lib/ansible/module_utils/facts/ansible_collector.py
new file mode 100644
index 00000000..8ca0089e
--- /dev/null
+++ b/lib/ansible/module_utils/facts/ansible_collector.py
@@ -0,0 +1,142 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# (c) 2017 Red Hat Inc.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import fnmatch
+import sys
+
+from ansible.module_utils.facts import timeout
+from ansible.module_utils.facts import collector
+
+
+class AnsibleFactCollector(collector.BaseFactCollector):
+ '''A FactCollector that returns results under 'ansible_facts' top level key.
+
+ If a namespace if provided, facts will be collected under that namespace.
+ For ex, a ansible.module_utils.facts.namespace.PrefixFactNamespace(prefix='ansible_')
+
+ Has a 'from_gather_subset() constructor that populates collectors based on a
+ gather_subset specifier.'''
+
+ def __init__(self, collectors=None, namespace=None, filter_spec=None):
+
+ super(AnsibleFactCollector, self).__init__(collectors=collectors,
+ namespace=namespace)
+
+ self.filter_spec = filter_spec
+
+ def _filter(self, facts_dict, filter_spec):
+ # assume a filter_spec='' is equilv to filter_spec='*'
+ if not filter_spec or filter_spec == '*':
+ return facts_dict
+
+ return [(x, y) for x, y in facts_dict.items() if fnmatch.fnmatch(x, filter_spec)]
+
+ def collect(self, module=None, collected_facts=None):
+ collected_facts = collected_facts or {}
+
+ facts_dict = {}
+
+ for collector_obj in self.collectors:
+ info_dict = {}
+
+ try:
+
+ # Note: this collects with namespaces, so collected_facts also includes namespaces
+ info_dict = collector_obj.collect_with_namespace(module=module,
+ collected_facts=collected_facts)
+ except Exception as e:
+ sys.stderr.write(repr(e))
+ sys.stderr.write('\n')
+
+ # shallow copy of the new facts to pass to each collector in collected_facts so facts
+ # can reference other facts they depend on.
+ collected_facts.update(info_dict.copy())
+
+ # NOTE: If we want complicated fact dict merging, this is where it would hook in
+ facts_dict.update(self._filter(info_dict, self.filter_spec))
+
+ return facts_dict
+
+
+class CollectorMetaDataCollector(collector.BaseFactCollector):
+ '''Collector that provides a facts with the gather_subset metadata.'''
+
+ name = 'gather_subset'
+ _fact_ids = set([])
+
+ def __init__(self, collectors=None, namespace=None, gather_subset=None, module_setup=None):
+ super(CollectorMetaDataCollector, self).__init__(collectors, namespace)
+ self.gather_subset = gather_subset
+ self.module_setup = module_setup
+
+ def collect(self, module=None, collected_facts=None):
+ meta_facts = {'gather_subset': self.gather_subset}
+ if self.module_setup:
+ meta_facts['module_setup'] = self.module_setup
+ return meta_facts
+
+
+def get_ansible_collector(all_collector_classes,
+ namespace=None,
+ filter_spec=None,
+ gather_subset=None,
+ gather_timeout=None,
+ minimal_gather_subset=None):
+
+ filter_spec = filter_spec or '*'
+ gather_subset = gather_subset or ['all']
+ gather_timeout = gather_timeout or timeout.DEFAULT_GATHER_TIMEOUT
+ minimal_gather_subset = minimal_gather_subset or frozenset()
+
+ collector_classes = \
+ collector.collector_classes_from_gather_subset(
+ all_collector_classes=all_collector_classes,
+ minimal_gather_subset=minimal_gather_subset,
+ gather_subset=gather_subset,
+ gather_timeout=gather_timeout)
+
+ collectors = []
+ for collector_class in collector_classes:
+ collector_obj = collector_class(namespace=namespace)
+ collectors.append(collector_obj)
+
+ # Add a collector that knows what gather_subset we used so it it can provide a fact
+ collector_meta_data_collector = \
+ CollectorMetaDataCollector(gather_subset=gather_subset,
+ module_setup=True)
+ collectors.append(collector_meta_data_collector)
+
+ fact_collector = \
+ AnsibleFactCollector(collectors=collectors,
+ filter_spec=filter_spec,
+ namespace=namespace)
+
+ return fact_collector
diff --git a/lib/ansible/module_utils/facts/collector.py b/lib/ansible/module_utils/facts/collector.py
new file mode 100644
index 00000000..47f1bec8
--- /dev/null
+++ b/lib/ansible/module_utils/facts/collector.py
@@ -0,0 +1,400 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# (c) 2017 Red Hat Inc.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from collections import defaultdict
+
+import platform
+
+from ansible.module_utils.facts import timeout
+
+
+class CycleFoundInFactDeps(Exception):
+ '''Indicates there is a cycle in fact collector deps
+
+ If collector-B requires collector-A, and collector-A requires
+ collector-B, that is a cycle. In that case, there is no ordering
+ that will satisfy B before A and A and before B. That will cause this
+ error to be raised.
+ '''
+ pass
+
+
+class UnresolvedFactDep(ValueError):
+ pass
+
+
+class CollectorNotFoundError(KeyError):
+ pass
+
+
+class BaseFactCollector:
+ _fact_ids = set()
+
+ _platform = 'Generic'
+ name = None
+ required_facts = set()
+
+ def __init__(self, collectors=None, namespace=None):
+ '''Base class for things that collect facts.
+
+ 'collectors' is an optional list of other FactCollectors for composing.'''
+ self.collectors = collectors or []
+
+ # self.namespace is a object with a 'transform' method that transforms
+ # the name to indicate the namespace (ie, adds a prefix or suffix).
+ self.namespace = namespace
+
+ self.fact_ids = set([self.name])
+ self.fact_ids.update(self._fact_ids)
+
+ @classmethod
+ def platform_match(cls, platform_info):
+ if platform_info.get('system', None) == cls._platform:
+ return cls
+ return None
+
+ def _transform_name(self, key_name):
+ if self.namespace:
+ return self.namespace.transform(key_name)
+ return key_name
+
+ def _transform_dict_keys(self, fact_dict):
+ '''update a dicts keys to use new names as transformed by self._transform_name'''
+
+ for old_key in list(fact_dict.keys()):
+ new_key = self._transform_name(old_key)
+ # pop the item by old_key and replace it using new_key
+ fact_dict[new_key] = fact_dict.pop(old_key)
+ return fact_dict
+
+ # TODO/MAYBE: rename to 'collect' and add 'collect_without_namespace'
+ def collect_with_namespace(self, module=None, collected_facts=None):
+ # collect, then transform the key names if needed
+ facts_dict = self.collect(module=module, collected_facts=collected_facts)
+ if self.namespace:
+ facts_dict = self._transform_dict_keys(facts_dict)
+ return facts_dict
+
+ def collect(self, module=None, collected_facts=None):
+ '''do the fact collection
+
+ 'collected_facts' is a object (a dict, likely) that holds all previously
+ facts. This is intended to be used if a FactCollector needs to reference
+ another fact (for ex, the system arch) and should not be modified (usually).
+
+ Returns a dict of facts.
+
+ '''
+ facts_dict = {}
+ return facts_dict
+
+
+def get_collector_names(valid_subsets=None,
+ minimal_gather_subset=None,
+ gather_subset=None,
+ aliases_map=None,
+ platform_info=None):
+ '''return a set of FactCollector names based on gather_subset spec.
+
+ gather_subset is a spec describing which facts to gather.
+ valid_subsets is a frozenset of potential matches for gather_subset ('all', 'network') etc
+ minimal_gather_subsets is a frozenset of matches to always use, even for gather_subset='!all'
+ '''
+
+ # Retrieve module parameters
+ gather_subset = gather_subset or ['all']
+
+ # the list of everything that 'all' expands to
+ valid_subsets = valid_subsets or frozenset()
+
+ # if provided, minimal_gather_subset is always added, even after all negations
+ minimal_gather_subset = minimal_gather_subset or frozenset()
+
+ aliases_map = aliases_map or defaultdict(set)
+
+ # Retrieve all facts elements
+ additional_subsets = set()
+ exclude_subsets = set()
+
+ # total always starts with the min set, then
+ # adds of the additions in gather_subset, then
+ # excludes all of the excludes, then add any explicitly
+ # requested subsets.
+ gather_subset_with_min = ['min']
+ gather_subset_with_min.extend(gather_subset)
+
+ # subsets we mention in gather_subset explicitly, except for 'all'/'min'
+ explicitly_added = set()
+
+ for subset in gather_subset_with_min:
+ subset_id = subset
+ if subset_id == 'min':
+ additional_subsets.update(minimal_gather_subset)
+ continue
+ if subset_id == 'all':
+ additional_subsets.update(valid_subsets)
+ continue
+ if subset_id.startswith('!'):
+ subset = subset[1:]
+ if subset == 'min':
+ exclude_subsets.update(minimal_gather_subset)
+ continue
+ if subset == 'all':
+ exclude_subsets.update(valid_subsets - minimal_gather_subset)
+ continue
+ exclude = True
+ else:
+ exclude = False
+
+ if exclude:
+ # include 'devices', 'dmi' etc for '!hardware'
+ exclude_subsets.update(aliases_map.get(subset, set()))
+ exclude_subsets.add(subset)
+ else:
+ # NOTE: this only considers adding an unknown gather subsetup an error. Asking to
+ # exclude an unknown gather subset is ignored.
+ if subset_id not in valid_subsets:
+ raise TypeError("Bad subset '%s' given to Ansible. gather_subset options allowed: all, %s" %
+ (subset, ", ".join(sorted(valid_subsets))))
+
+ explicitly_added.add(subset)
+ additional_subsets.add(subset)
+
+ if not additional_subsets:
+ additional_subsets.update(valid_subsets)
+
+ additional_subsets.difference_update(exclude_subsets - explicitly_added)
+
+ return additional_subsets
+
+
+def find_collectors_for_platform(all_collector_classes, compat_platforms):
+ found_collectors = set()
+ found_collectors_names = set()
+
+ # start from specific platform, then try generic
+ for compat_platform in compat_platforms:
+ platform_match = None
+ for all_collector_class in all_collector_classes:
+
+ # ask the class if it is compatible with the platform info
+ platform_match = all_collector_class.platform_match(compat_platform)
+
+ if not platform_match:
+ continue
+
+ primary_name = all_collector_class.name
+
+ if primary_name not in found_collectors_names:
+ found_collectors.add(all_collector_class)
+ found_collectors_names.add(all_collector_class.name)
+
+ return found_collectors
+
+
+def build_fact_id_to_collector_map(collectors_for_platform):
+ fact_id_to_collector_map = defaultdict(list)
+ aliases_map = defaultdict(set)
+
+ for collector_class in collectors_for_platform:
+ primary_name = collector_class.name
+
+ fact_id_to_collector_map[primary_name].append(collector_class)
+
+ for fact_id in collector_class._fact_ids:
+ fact_id_to_collector_map[fact_id].append(collector_class)
+ aliases_map[primary_name].add(fact_id)
+
+ return fact_id_to_collector_map, aliases_map
+
+
+def select_collector_classes(collector_names, all_fact_subsets):
+ seen_collector_classes = set()
+
+ selected_collector_classes = []
+
+ for collector_name in collector_names:
+ collector_classes = all_fact_subsets.get(collector_name, [])
+ for collector_class in collector_classes:
+ if collector_class not in seen_collector_classes:
+ selected_collector_classes.append(collector_class)
+ seen_collector_classes.add(collector_class)
+
+ return selected_collector_classes
+
+
+def _get_requires_by_collector_name(collector_name, all_fact_subsets):
+ required_facts = set()
+
+ try:
+ collector_classes = all_fact_subsets[collector_name]
+ except KeyError:
+ raise CollectorNotFoundError('Fact collector "%s" not found' % collector_name)
+ for collector_class in collector_classes:
+ required_facts.update(collector_class.required_facts)
+ return required_facts
+
+
+def find_unresolved_requires(collector_names, all_fact_subsets):
+ '''Find any collector names that have unresolved requires
+
+ Returns a list of collector names that correspond to collector
+ classes whose .requires_facts() are not in collector_names.
+ '''
+ unresolved = set()
+
+ for collector_name in collector_names:
+ required_facts = _get_requires_by_collector_name(collector_name, all_fact_subsets)
+ for required_fact in required_facts:
+ if required_fact not in collector_names:
+ unresolved.add(required_fact)
+
+ return unresolved
+
+
+def resolve_requires(unresolved_requires, all_fact_subsets):
+ new_names = set()
+ failed = []
+ for unresolved in unresolved_requires:
+ if unresolved in all_fact_subsets:
+ new_names.add(unresolved)
+ else:
+ failed.append(unresolved)
+
+ if failed:
+ raise UnresolvedFactDep('unresolved fact dep %s' % ','.join(failed))
+ return new_names
+
+
+def build_dep_data(collector_names, all_fact_subsets):
+ dep_map = defaultdict(set)
+ for collector_name in collector_names:
+ collector_deps = set()
+ for collector in all_fact_subsets[collector_name]:
+ for dep in collector.required_facts:
+ collector_deps.add(dep)
+ dep_map[collector_name] = collector_deps
+ return dep_map
+
+
+def tsort(dep_map):
+ sorted_list = []
+
+ unsorted_map = dep_map.copy()
+
+ while unsorted_map:
+ acyclic = False
+ for node, edges in list(unsorted_map.items()):
+ for edge in edges:
+ if edge in unsorted_map:
+ break
+ else:
+ acyclic = True
+ del unsorted_map[node]
+ sorted_list.append((node, edges))
+
+ if not acyclic:
+ raise CycleFoundInFactDeps('Unable to tsort deps, there was a cycle in the graph. sorted=%s' % sorted_list)
+
+ return sorted_list
+
+
+def _solve_deps(collector_names, all_fact_subsets):
+ unresolved = collector_names.copy()
+ solutions = collector_names.copy()
+
+ while True:
+ unresolved = find_unresolved_requires(solutions, all_fact_subsets)
+ if unresolved == set():
+ break
+
+ new_names = resolve_requires(unresolved, all_fact_subsets)
+ solutions.update(new_names)
+
+ return solutions
+
+
+def collector_classes_from_gather_subset(all_collector_classes=None,
+ valid_subsets=None,
+ minimal_gather_subset=None,
+ gather_subset=None,
+ gather_timeout=None,
+ platform_info=None):
+ '''return a list of collector classes that match the args'''
+
+ # use gather_name etc to get the list of collectors
+
+ all_collector_classes = all_collector_classes or []
+
+ minimal_gather_subset = minimal_gather_subset or frozenset()
+
+ platform_info = platform_info or {'system': platform.system()}
+
+ gather_timeout = gather_timeout or timeout.DEFAULT_GATHER_TIMEOUT
+
+ # tweak the modules GATHER_TIMEOUT
+ timeout.GATHER_TIMEOUT = gather_timeout
+
+ valid_subsets = valid_subsets or frozenset()
+
+ # maps alias names like 'hardware' to the list of names that are part of hardware
+ # like 'devices' and 'dmi'
+ aliases_map = defaultdict(set)
+
+ compat_platforms = [platform_info, {'system': 'Generic'}]
+
+ collectors_for_platform = find_collectors_for_platform(all_collector_classes, compat_platforms)
+
+ # all_facts_subsets maps the subset name ('hardware') to the class that provides it.
+
+ # TODO: name collisions here? are there facts with the same name as a gather_subset (all, network, hardware, virtual, ohai, facter)
+ all_fact_subsets, aliases_map = build_fact_id_to_collector_map(collectors_for_platform)
+
+ all_valid_subsets = frozenset(all_fact_subsets.keys())
+
+ # expand any fact_id/collectorname/gather_subset term ('all', 'env', etc) to the list of names that represents
+ collector_names = get_collector_names(valid_subsets=all_valid_subsets,
+ minimal_gather_subset=minimal_gather_subset,
+ gather_subset=gather_subset,
+ aliases_map=aliases_map,
+ platform_info=platform_info)
+
+ complete_collector_names = _solve_deps(collector_names, all_fact_subsets)
+
+ dep_map = build_dep_data(complete_collector_names, all_fact_subsets)
+
+ ordered_deps = tsort(dep_map)
+ ordered_collector_names = [x[0] for x in ordered_deps]
+
+ selected_collector_classes = select_collector_classes(ordered_collector_names,
+ all_fact_subsets)
+
+ return selected_collector_classes
diff --git a/lib/ansible/module_utils/facts/compat.py b/lib/ansible/module_utils/facts/compat.py
new file mode 100644
index 00000000..a69fee37
--- /dev/null
+++ b/lib/ansible/module_utils/facts/compat.py
@@ -0,0 +1,87 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# (c) 2017 Red Hat Inc.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.facts.namespace import PrefixFactNamespace
+from ansible.module_utils.facts import default_collectors
+from ansible.module_utils.facts import ansible_collector
+
+
+def get_all_facts(module):
+ '''compat api for ansible 2.2/2.3 module_utils.facts.get_all_facts method
+
+ Expects module to be an instance of AnsibleModule, with a 'gather_subset' param.
+
+ returns a dict mapping the bare fact name ('default_ipv4' with no 'ansible_' namespace) to
+ the fact value.'''
+
+ gather_subset = module.params['gather_subset']
+ return ansible_facts(module, gather_subset=gather_subset)
+
+
+def ansible_facts(module, gather_subset=None):
+ '''Compat api for ansible 2.0/2.2/2.3 module_utils.facts.ansible_facts method
+
+ 2.3/2.3 expects a gather_subset arg.
+ 2.0/2.1 does not except a gather_subset arg
+
+ So make gather_subsets an optional arg, defaulting to configured DEFAULT_GATHER_TIMEOUT
+
+ 'module' should be an instance of an AnsibleModule.
+
+ returns a dict mapping the bare fact name ('default_ipv4' with no 'ansible_' namespace) to
+ the fact value.
+ '''
+
+ gather_subset = gather_subset or module.params.get('gather_subset', ['all'])
+ gather_timeout = module.params.get('gather_timeout', 10)
+ filter_spec = module.params.get('filter', '*')
+
+ minimal_gather_subset = frozenset(['apparmor', 'caps', 'cmdline', 'date_time',
+ 'distribution', 'dns', 'env', 'fips', 'local',
+ 'lsb', 'pkg_mgr', 'platform', 'python', 'selinux',
+ 'service_mgr', 'ssh_pub_keys', 'user'])
+
+ all_collector_classes = default_collectors.collectors
+
+ # don't add a prefix
+ namespace = PrefixFactNamespace(namespace_name='ansible', prefix='')
+
+ fact_collector = \
+ ansible_collector.get_ansible_collector(all_collector_classes=all_collector_classes,
+ namespace=namespace,
+ filter_spec=filter_spec,
+ gather_subset=gather_subset,
+ gather_timeout=gather_timeout,
+ minimal_gather_subset=minimal_gather_subset)
+
+ facts_dict = fact_collector.collect(module=module)
+
+ return facts_dict
diff --git a/lib/ansible/module_utils/facts/default_collectors.py b/lib/ansible/module_utils/facts/default_collectors.py
new file mode 100644
index 00000000..3ac35704
--- /dev/null
+++ b/lib/ansible/module_utils/facts/default_collectors.py
@@ -0,0 +1,172 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# (c) 2017 Red Hat Inc.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible.module_utils.facts.other.facter import FacterFactCollector
+from ansible.module_utils.facts.other.ohai import OhaiFactCollector
+
+from ansible.module_utils.facts.system.apparmor import ApparmorFactCollector
+from ansible.module_utils.facts.system.caps import SystemCapabilitiesFactCollector
+from ansible.module_utils.facts.system.chroot import ChrootFactCollector
+from ansible.module_utils.facts.system.cmdline import CmdLineFactCollector
+from ansible.module_utils.facts.system.distribution import DistributionFactCollector
+from ansible.module_utils.facts.system.date_time import DateTimeFactCollector
+from ansible.module_utils.facts.system.env import EnvFactCollector
+from ansible.module_utils.facts.system.dns import DnsFactCollector
+from ansible.module_utils.facts.system.fips import FipsFactCollector
+from ansible.module_utils.facts.system.local import LocalFactCollector
+from ansible.module_utils.facts.system.lsb import LSBFactCollector
+from ansible.module_utils.facts.system.pkg_mgr import PkgMgrFactCollector
+from ansible.module_utils.facts.system.pkg_mgr import OpenBSDPkgMgrFactCollector
+from ansible.module_utils.facts.system.platform import PlatformFactCollector
+from ansible.module_utils.facts.system.python import PythonFactCollector
+from ansible.module_utils.facts.system.selinux import SelinuxFactCollector
+from ansible.module_utils.facts.system.service_mgr import ServiceMgrFactCollector
+from ansible.module_utils.facts.system.ssh_pub_keys import SshPubKeyFactCollector
+from ansible.module_utils.facts.system.user import UserFactCollector
+
+from ansible.module_utils.facts.hardware.base import HardwareCollector
+from ansible.module_utils.facts.hardware.aix import AIXHardwareCollector
+from ansible.module_utils.facts.hardware.darwin import DarwinHardwareCollector
+from ansible.module_utils.facts.hardware.dragonfly import DragonFlyHardwareCollector
+from ansible.module_utils.facts.hardware.freebsd import FreeBSDHardwareCollector
+from ansible.module_utils.facts.hardware.hpux import HPUXHardwareCollector
+from ansible.module_utils.facts.hardware.hurd import HurdHardwareCollector
+from ansible.module_utils.facts.hardware.linux import LinuxHardwareCollector
+from ansible.module_utils.facts.hardware.netbsd import NetBSDHardwareCollector
+from ansible.module_utils.facts.hardware.openbsd import OpenBSDHardwareCollector
+from ansible.module_utils.facts.hardware.sunos import SunOSHardwareCollector
+
+from ansible.module_utils.facts.network.base import NetworkCollector
+from ansible.module_utils.facts.network.aix import AIXNetworkCollector
+from ansible.module_utils.facts.network.darwin import DarwinNetworkCollector
+from ansible.module_utils.facts.network.dragonfly import DragonFlyNetworkCollector
+from ansible.module_utils.facts.network.fc_wwn import FcWwnInitiatorFactCollector
+from ansible.module_utils.facts.network.freebsd import FreeBSDNetworkCollector
+from ansible.module_utils.facts.network.hpux import HPUXNetworkCollector
+from ansible.module_utils.facts.network.hurd import HurdNetworkCollector
+from ansible.module_utils.facts.network.linux import LinuxNetworkCollector
+from ansible.module_utils.facts.network.iscsi import IscsiInitiatorNetworkCollector
+from ansible.module_utils.facts.network.nvme import NvmeInitiatorNetworkCollector
+from ansible.module_utils.facts.network.netbsd import NetBSDNetworkCollector
+from ansible.module_utils.facts.network.openbsd import OpenBSDNetworkCollector
+from ansible.module_utils.facts.network.sunos import SunOSNetworkCollector
+
+from ansible.module_utils.facts.virtual.base import VirtualCollector
+from ansible.module_utils.facts.virtual.dragonfly import DragonFlyVirtualCollector
+from ansible.module_utils.facts.virtual.freebsd import FreeBSDVirtualCollector
+from ansible.module_utils.facts.virtual.hpux import HPUXVirtualCollector
+from ansible.module_utils.facts.virtual.linux import LinuxVirtualCollector
+from ansible.module_utils.facts.virtual.netbsd import NetBSDVirtualCollector
+from ansible.module_utils.facts.virtual.openbsd import OpenBSDVirtualCollector
+from ansible.module_utils.facts.virtual.sunos import SunOSVirtualCollector
+
+# these should always be first due to most other facts depending on them
+_base = [
+ PlatformFactCollector,
+ DistributionFactCollector,
+ LSBFactCollector
+]
+
+# These restrict what is possible in others
+_restrictive = [
+ SelinuxFactCollector,
+ ApparmorFactCollector,
+ ChrootFactCollector,
+ FipsFactCollector
+]
+
+# general info, not required but probably useful for other facts
+_general = [
+ PythonFactCollector,
+ SystemCapabilitiesFactCollector,
+ PkgMgrFactCollector,
+ OpenBSDPkgMgrFactCollector,
+ ServiceMgrFactCollector,
+ CmdLineFactCollector,
+ DateTimeFactCollector,
+ EnvFactCollector,
+ SshPubKeyFactCollector,
+ UserFactCollector
+]
+
+# virtual, this might also limit hardware/networking
+_virtual = [
+ VirtualCollector,
+ DragonFlyVirtualCollector,
+ FreeBSDVirtualCollector,
+ LinuxVirtualCollector,
+ OpenBSDVirtualCollector,
+ NetBSDVirtualCollector,
+ SunOSVirtualCollector,
+ HPUXVirtualCollector
+]
+
+_hardware = [
+ HardwareCollector,
+ AIXHardwareCollector,
+ DarwinHardwareCollector,
+ DragonFlyHardwareCollector,
+ FreeBSDHardwareCollector,
+ HPUXHardwareCollector,
+ HurdHardwareCollector,
+ LinuxHardwareCollector,
+ NetBSDHardwareCollector,
+ OpenBSDHardwareCollector,
+ SunOSHardwareCollector
+]
+
+_network = [
+ DnsFactCollector,
+ FcWwnInitiatorFactCollector,
+ NetworkCollector,
+ AIXNetworkCollector,
+ DarwinNetworkCollector,
+ DragonFlyNetworkCollector,
+ FreeBSDNetworkCollector,
+ HPUXNetworkCollector,
+ HurdNetworkCollector,
+ IscsiInitiatorNetworkCollector,
+ NvmeInitiatorNetworkCollector,
+ LinuxNetworkCollector,
+ NetBSDNetworkCollector,
+ OpenBSDNetworkCollector,
+ SunOSNetworkCollector
+]
+
+# other fact sources
+_extra_facts = [
+ LocalFactCollector,
+ FacterFactCollector,
+ OhaiFactCollector
+]
+
+# TODO: make config driven
+collectors = _base + _restrictive + _general + _virtual + _hardware + _network + _extra_facts
diff --git a/lib/ansible/module_utils/facts/hardware/__init__.py b/lib/ansible/module_utils/facts/hardware/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/lib/ansible/module_utils/facts/hardware/__init__.py
diff --git a/lib/ansible/module_utils/facts/hardware/aix.py b/lib/ansible/module_utils/facts/hardware/aix.py
new file mode 100644
index 00000000..20f09232
--- /dev/null
+++ b/lib/ansible/module_utils/facts/hardware/aix.py
@@ -0,0 +1,252 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
+from ansible.module_utils.facts.utils import get_mount_size
+
+
+class AIXHardware(Hardware):
+ """
+ AIX-specific subclass of Hardware. Defines memory and CPU facts:
+ - memfree_mb
+ - memtotal_mb
+ - swapfree_mb
+ - swaptotal_mb
+ - processor (a list)
+ - processor_cores
+ - processor_count
+ """
+ platform = 'AIX'
+
+ def populate(self, collected_facts=None):
+ hardware_facts = {}
+
+ cpu_facts = self.get_cpu_facts()
+ memory_facts = self.get_memory_facts()
+ dmi_facts = self.get_dmi_facts()
+ vgs_facts = self.get_vgs_facts()
+ mount_facts = self.get_mount_facts()
+ devices_facts = self.get_device_facts()
+
+ hardware_facts.update(cpu_facts)
+ hardware_facts.update(memory_facts)
+ hardware_facts.update(dmi_facts)
+ hardware_facts.update(vgs_facts)
+ hardware_facts.update(mount_facts)
+ hardware_facts.update(devices_facts)
+
+ return hardware_facts
+
+ def get_cpu_facts(self):
+ cpu_facts = {}
+ cpu_facts['processor'] = []
+
+ rc, out, err = self.module.run_command("/usr/sbin/lsdev -Cc processor")
+ if out:
+ i = 0
+ for line in out.splitlines():
+
+ if 'Available' in line:
+ if i == 0:
+ data = line.split(' ')
+ cpudev = data[0]
+
+ i += 1
+ cpu_facts['processor_count'] = int(i)
+
+ rc, out, err = self.module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a type")
+
+ data = out.split(' ')
+ cpu_facts['processor'] = data[1]
+
+ rc, out, err = self.module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a smt_threads")
+ if out:
+ data = out.split(' ')
+ cpu_facts['processor_cores'] = int(data[1])
+
+ return cpu_facts
+
+ def get_memory_facts(self):
+ memory_facts = {}
+ pagesize = 4096
+ rc, out, err = self.module.run_command("/usr/bin/vmstat -v")
+ for line in out.splitlines():
+ data = line.split()
+ if 'memory pages' in line:
+ pagecount = int(data[0])
+ if 'free pages' in line:
+ freecount = int(data[0])
+ memory_facts['memtotal_mb'] = pagesize * pagecount // 1024 // 1024
+ memory_facts['memfree_mb'] = pagesize * freecount // 1024 // 1024
+ # Get swapinfo. swapinfo output looks like:
+ # Device 1M-blocks Used Avail Capacity
+ # /dev/ada0p3 314368 0 314368 0%
+ #
+ rc, out, err = self.module.run_command("/usr/sbin/lsps -s")
+ if out:
+ lines = out.splitlines()
+ data = lines[1].split()
+ swaptotal_mb = int(data[0].rstrip('MB'))
+ percused = int(data[1].rstrip('%'))
+ memory_facts['swaptotal_mb'] = swaptotal_mb
+ memory_facts['swapfree_mb'] = int(swaptotal_mb * (100 - percused) / 100)
+
+ return memory_facts
+
+ def get_dmi_facts(self):
+ dmi_facts = {}
+
+ rc, out, err = self.module.run_command("/usr/sbin/lsattr -El sys0 -a fwversion")
+ data = out.split()
+ dmi_facts['firmware_version'] = data[1].strip('IBM,')
+ lsconf_path = self.module.get_bin_path("lsconf")
+ if lsconf_path:
+ rc, out, err = self.module.run_command(lsconf_path)
+ if rc == 0 and out:
+ for line in out.splitlines():
+ data = line.split(':')
+ if 'Machine Serial Number' in line:
+ dmi_facts['product_serial'] = data[1].strip()
+ if 'LPAR Info' in line:
+ dmi_facts['lpar_info'] = data[1].strip()
+ if 'System Model' in line:
+ dmi_facts['product_name'] = data[1].strip()
+ return dmi_facts
+
+ def get_vgs_facts(self):
+ """
+ Get vg and pv Facts
+ rootvg:
+ PV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION
+ hdisk0 active 546 0 00..00..00..00..00
+ hdisk1 active 546 113 00..00..00..21..92
+ realsyncvg:
+ PV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION
+ hdisk74 active 1999 6 00..00..00..00..06
+ testvg:
+ PV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION
+ hdisk105 active 999 838 200..39..199..200..200
+ hdisk106 active 999 599 200..00..00..199..200
+ """
+
+ vgs_facts = {}
+ lsvg_path = self.module.get_bin_path("lsvg")
+ xargs_path = self.module.get_bin_path("xargs")
+ cmd = "%s -o | %s %s -p" % (lsvg_path, xargs_path, lsvg_path)
+ if lsvg_path and xargs_path:
+ rc, out, err = self.module.run_command(cmd, use_unsafe_shell=True)
+ if rc == 0 and out:
+ vgs_facts['vgs'] = {}
+ for m in re.finditer(r'(\S+):\n.*FREE DISTRIBUTION(\n(\S+)\s+(\w+)\s+(\d+)\s+(\d+).*)+', out):
+ vgs_facts['vgs'][m.group(1)] = []
+ pp_size = 0
+ cmd = "%s %s" % (lsvg_path, m.group(1))
+ rc, out, err = self.module.run_command(cmd)
+ if rc == 0 and out:
+ pp_size = re.search(r'PP SIZE:\s+(\d+\s+\S+)', out).group(1)
+ for n in re.finditer(r'(\S+)\s+(\w+)\s+(\d+)\s+(\d+).*', m.group(0)):
+ pv_info = {'pv_name': n.group(1),
+ 'pv_state': n.group(2),
+ 'total_pps': n.group(3),
+ 'free_pps': n.group(4),
+ 'pp_size': pp_size
+ }
+ vgs_facts['vgs'][m.group(1)].append(pv_info)
+
+ return vgs_facts
+
+ def get_mount_facts(self):
+ mount_facts = {}
+
+ mount_facts['mounts'] = []
+
+ mounts = []
+
+ # AIX does not have mtab but mount command is only source of info (or to use
+ # api calls to get same info)
+ mount_path = self.module.get_bin_path('mount')
+ rc, mount_out, err = self.module.run_command(mount_path)
+ if mount_out:
+ for line in mount_out.split('\n'):
+ fields = line.split()
+ if len(fields) != 0 and fields[0] != 'node' and fields[0][0] != '-' and re.match('^/.*|^[a-zA-Z].*|^[0-9].*', fields[0]):
+ if re.match('^/', fields[0]):
+ # normal mount
+ mount = fields[1]
+ mount_info = {'mount': mount,
+ 'device': fields[0],
+ 'fstype': fields[2],
+ 'options': fields[6],
+ 'time': '%s %s %s' % (fields[3], fields[4], fields[5])}
+ mount_info.update(get_mount_size(mount))
+ else:
+ # nfs or cifs based mount
+ # in case of nfs if no mount options are provided on command line
+ # add into fields empty string...
+ if len(fields) < 8:
+ fields.append("")
+
+ mount_info = {'mount': fields[2],
+ 'device': '%s:%s' % (fields[0], fields[1]),
+ 'fstype': fields[3],
+ 'options': fields[7],
+ 'time': '%s %s %s' % (fields[4], fields[5], fields[6])}
+
+ mounts.append(mount_info)
+
+ mount_facts['mounts'] = mounts
+
+ return mount_facts
+
+ def get_device_facts(self):
+ device_facts = {}
+ device_facts['devices'] = {}
+
+ lsdev_cmd = self.module.get_bin_path('lsdev', True)
+ lsattr_cmd = self.module.get_bin_path('lsattr', True)
+ rc, out_lsdev, err = self.module.run_command(lsdev_cmd)
+
+ for line in out_lsdev.splitlines():
+ field = line.split()
+
+ device_attrs = {}
+ device_name = field[0]
+ device_state = field[1]
+ device_type = field[2:]
+ lsattr_cmd_args = [lsattr_cmd, '-E', '-l', device_name]
+ rc, out_lsattr, err = self.module.run_command(lsattr_cmd_args)
+ for attr in out_lsattr.splitlines():
+ attr_fields = attr.split()
+ attr_name = attr_fields[0]
+ attr_parameter = attr_fields[1]
+ device_attrs[attr_name] = attr_parameter
+
+ device_facts['devices'][device_name] = {
+ 'state': device_state,
+ 'type': ' '.join(device_type),
+ 'attributes': device_attrs
+ }
+
+ return device_facts
+
+
+class AIXHardwareCollector(HardwareCollector):
+ _platform = 'AIX'
+ _fact_class = AIXHardware
diff --git a/lib/ansible/module_utils/facts/hardware/base.py b/lib/ansible/module_utils/facts/hardware/base.py
new file mode 100644
index 00000000..ce7ca7e4
--- /dev/null
+++ b/lib/ansible/module_utils/facts/hardware/base.py
@@ -0,0 +1,66 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# (c) 2017 Red Hat Inc.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class Hardware:
+ platform = 'Generic'
+
+ # FIXME: remove load_on_init when we can
+ def __init__(self, module, load_on_init=False):
+ self.module = module
+
+ def populate(self, collected_facts=None):
+ return {}
+
+
+class HardwareCollector(BaseFactCollector):
+ name = 'hardware'
+ _fact_ids = set(['processor',
+ 'processor_cores',
+ 'processor_count',
+ # TODO: mounts isnt exactly hardware
+ 'mounts',
+ 'devices'])
+ _fact_class = Hardware
+
+ def collect(self, module=None, collected_facts=None):
+ collected_facts = collected_facts or {}
+ if not module:
+ return {}
+
+ # Network munges cached_facts by side effect, so give it a copy
+ facts_obj = self._fact_class(module)
+
+ facts_dict = facts_obj.populate(collected_facts=collected_facts)
+
+ return facts_dict
diff --git a/lib/ansible/module_utils/facts/hardware/darwin.py b/lib/ansible/module_utils/facts/hardware/darwin.py
new file mode 100644
index 00000000..f2cb6d68
--- /dev/null
+++ b/lib/ansible/module_utils/facts/hardware/darwin.py
@@ -0,0 +1,131 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
+from ansible.module_utils.facts.sysctl import get_sysctl
+
+
+class DarwinHardware(Hardware):
+ """
+ Darwin-specific subclass of Hardware. Defines memory and CPU facts:
+ - processor
+ - processor_cores
+ - memtotal_mb
+ - memfree_mb
+ - model
+ - osversion
+ - osrevision
+ """
+ platform = 'Darwin'
+
+ def populate(self, collected_facts=None):
+ hardware_facts = {}
+
+ self.sysctl = get_sysctl(self.module, ['hw', 'machdep', 'kern'])
+ mac_facts = self.get_mac_facts()
+ cpu_facts = self.get_cpu_facts()
+ memory_facts = self.get_memory_facts()
+
+ hardware_facts.update(mac_facts)
+ hardware_facts.update(cpu_facts)
+ hardware_facts.update(memory_facts)
+
+ return hardware_facts
+
+ def get_system_profile(self):
+ rc, out, err = self.module.run_command(["/usr/sbin/system_profiler", "SPHardwareDataType"])
+ if rc != 0:
+ return dict()
+ system_profile = dict()
+ for line in out.splitlines():
+ if ': ' in line:
+ (key, value) = line.split(': ', 1)
+ system_profile[key.strip()] = ' '.join(value.strip().split())
+ return system_profile
+
+ def get_mac_facts(self):
+ mac_facts = {}
+ rc, out, err = self.module.run_command("sysctl hw.model")
+ if rc == 0:
+ mac_facts['model'] = mac_facts['product_name'] = out.splitlines()[-1].split()[1]
+ mac_facts['osversion'] = self.sysctl['kern.osversion']
+ mac_facts['osrevision'] = self.sysctl['kern.osrevision']
+
+ return mac_facts
+
+ def get_cpu_facts(self):
+ cpu_facts = {}
+ if 'machdep.cpu.brand_string' in self.sysctl: # Intel
+ cpu_facts['processor'] = self.sysctl['machdep.cpu.brand_string']
+ cpu_facts['processor_cores'] = self.sysctl['machdep.cpu.core_count']
+ else: # PowerPC
+ system_profile = self.get_system_profile()
+ cpu_facts['processor'] = '%s @ %s' % (system_profile['Processor Name'], system_profile['Processor Speed'])
+ cpu_facts['processor_cores'] = self.sysctl['hw.physicalcpu']
+ cpu_facts['processor_vcpus'] = self.sysctl.get('hw.logicalcpu') or self.sysctl.get('hw.ncpu') or ''
+
+ return cpu_facts
+
+ def get_memory_facts(self):
+ memory_facts = {
+ 'memtotal_mb': int(self.sysctl['hw.memsize']) // 1024 // 1024,
+ 'memfree_mb': 0,
+ }
+
+ total_used = 0
+ page_size = 4096
+ try:
+ vm_stat_command = get_bin_path('vm_stat')
+ except ValueError:
+ return memory_facts
+
+ rc, out, err = self.module.run_command(vm_stat_command)
+ if rc == 0:
+ # Free = Total - (Wired + active + inactive)
+ # Get a generator of tuples from the command output so we can later
+ # turn it into a dictionary
+ memory_stats = (line.rstrip('.').split(':', 1) for line in out.splitlines())
+
+ # Strip extra left spaces from the value
+ memory_stats = dict((k, v.lstrip()) for k, v in memory_stats)
+
+ for k, v in memory_stats.items():
+ try:
+ memory_stats[k] = int(v)
+ except ValueError:
+ # Most values convert cleanly to integer values but if the field does
+ # not convert to an integer, just leave it alone.
+ pass
+
+ if memory_stats.get('Pages wired down'):
+ total_used += memory_stats['Pages wired down'] * page_size
+ if memory_stats.get('Pages active'):
+ total_used += memory_stats['Pages active'] * page_size
+ if memory_stats.get('Pages inactive'):
+ total_used += memory_stats['Pages inactive'] * page_size
+
+ memory_facts['memfree_mb'] = memory_facts['memtotal_mb'] - (total_used // 1024 // 1024)
+
+ return memory_facts
+
+
+class DarwinHardwareCollector(HardwareCollector):
+ _fact_class = DarwinHardware
+ _platform = 'Darwin'
diff --git a/lib/ansible/module_utils/facts/hardware/dragonfly.py b/lib/ansible/module_utils/facts/hardware/dragonfly.py
new file mode 100644
index 00000000..ea24151f
--- /dev/null
+++ b/lib/ansible/module_utils/facts/hardware/dragonfly.py
@@ -0,0 +1,26 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.facts.hardware.base import HardwareCollector
+from ansible.module_utils.facts.hardware.freebsd import FreeBSDHardware
+
+
+class DragonFlyHardwareCollector(HardwareCollector):
+ # Note: This uses the freebsd fact class, there is no dragonfly hardware fact class
+ _fact_class = FreeBSDHardware
+ _platform = 'DragonFly'
diff --git a/lib/ansible/module_utils/facts/hardware/freebsd.py b/lib/ansible/module_utils/facts/hardware/freebsd.py
new file mode 100644
index 00000000..3078b383
--- /dev/null
+++ b/lib/ansible/module_utils/facts/hardware/freebsd.py
@@ -0,0 +1,214 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import json
+import re
+
+from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
+from ansible.module_utils.facts.timeout import TimeoutError, timeout
+
+from ansible.module_utils.facts.utils import get_file_content, get_mount_size
+
+
+class FreeBSDHardware(Hardware):
+ """
+ FreeBSD-specific subclass of Hardware. Defines memory and CPU facts:
+ - memfree_mb
+ - memtotal_mb
+ - swapfree_mb
+ - swaptotal_mb
+ - processor (a list)
+ - processor_cores
+ - processor_count
+ - devices
+ """
+ platform = 'FreeBSD'
+ DMESG_BOOT = '/var/run/dmesg.boot'
+
+ def populate(self, collected_facts=None):
+ hardware_facts = {}
+
+ cpu_facts = self.get_cpu_facts()
+ memory_facts = self.get_memory_facts()
+ dmi_facts = self.get_dmi_facts()
+ device_facts = self.get_device_facts()
+
+ mount_facts = {}
+ try:
+ mount_facts = self.get_mount_facts()
+ except TimeoutError:
+ pass
+
+ hardware_facts.update(cpu_facts)
+ hardware_facts.update(memory_facts)
+ hardware_facts.update(dmi_facts)
+ hardware_facts.update(device_facts)
+ hardware_facts.update(mount_facts)
+
+ return hardware_facts
+
+ def get_cpu_facts(self):
+ cpu_facts = {}
+ cpu_facts['processor'] = []
+ sysctl = self.module.get_bin_path('sysctl')
+ if sysctl:
+ rc, out, err = self.module.run_command("%s -n hw.ncpu" % sysctl, check_rc=False)
+ cpu_facts['processor_count'] = out.strip()
+
+ dmesg_boot = get_file_content(FreeBSDHardware.DMESG_BOOT)
+ if not dmesg_boot:
+ try:
+ rc, dmesg_boot, err = self.module.run_command(self.module.get_bin_path("dmesg"), check_rc=False)
+ except Exception:
+ dmesg_boot = ''
+
+ for line in dmesg_boot.splitlines():
+ if 'CPU:' in line:
+ cpu = re.sub(r'CPU:\s+', r"", line)
+ cpu_facts['processor'].append(cpu.strip())
+ if 'Logical CPUs per core' in line:
+ cpu_facts['processor_cores'] = line.split()[4]
+
+ return cpu_facts
+
+ def get_memory_facts(self):
+ memory_facts = {}
+
+ sysctl = self.module.get_bin_path('sysctl')
+ if sysctl:
+ rc, out, err = self.module.run_command("%s vm.stats" % sysctl, check_rc=False)
+ for line in out.splitlines():
+ data = line.split()
+ if 'vm.stats.vm.v_page_size' in line:
+ pagesize = int(data[1])
+ if 'vm.stats.vm.v_page_count' in line:
+ pagecount = int(data[1])
+ if 'vm.stats.vm.v_free_count' in line:
+ freecount = int(data[1])
+ memory_facts['memtotal_mb'] = pagesize * pagecount // 1024 // 1024
+ memory_facts['memfree_mb'] = pagesize * freecount // 1024 // 1024
+
+ swapinfo = self.module.get_bin_path('swapinfo')
+ if swapinfo:
+ # Get swapinfo. swapinfo output looks like:
+ # Device 1M-blocks Used Avail Capacity
+ # /dev/ada0p3 314368 0 314368 0%
+ #
+ rc, out, err = self.module.run_command("%s -k" % swapinfo)
+ lines = out.splitlines()
+ if len(lines[-1]) == 0:
+ lines.pop()
+ data = lines[-1].split()
+ if data[0] != 'Device':
+ memory_facts['swaptotal_mb'] = int(data[1]) // 1024
+ memory_facts['swapfree_mb'] = int(data[3]) // 1024
+
+ return memory_facts
+
+ @timeout()
+ def get_mount_facts(self):
+ mount_facts = {}
+
+ mount_facts['mounts'] = []
+ fstab = get_file_content('/etc/fstab')
+ if fstab:
+ for line in fstab.splitlines():
+ if line.startswith('#') or line.strip() == '':
+ continue
+ fields = re.sub(r'\s+', ' ', line).split()
+ mount_statvfs_info = get_mount_size(fields[1])
+ mount_info = {'mount': fields[1],
+ 'device': fields[0],
+ 'fstype': fields[2],
+ 'options': fields[3]}
+ mount_info.update(mount_statvfs_info)
+ mount_facts['mounts'].append(mount_info)
+
+ return mount_facts
+
+ def get_device_facts(self):
+ device_facts = {}
+
+ sysdir = '/dev'
+ device_facts['devices'] = {}
+ drives = re.compile(r'(ada?\d+|da\d+|a?cd\d+)') # TODO: rc, disks, err = self.module.run_command("/sbin/sysctl kern.disks")
+ slices = re.compile(r'(ada?\d+s\d+\w*|da\d+s\d+\w*)')
+ if os.path.isdir(sysdir):
+ dirlist = sorted(os.listdir(sysdir))
+ for device in dirlist:
+ d = drives.match(device)
+ if d:
+ device_facts['devices'][d.group(1)] = []
+ s = slices.match(device)
+ if s:
+ device_facts['devices'][d.group(1)].append(s.group(1))
+
+ return device_facts
+
+ def get_dmi_facts(self):
+ ''' learn dmi facts from system
+
+ Use dmidecode executable if available'''
+
+ dmi_facts = {}
+
+ # Fall back to using dmidecode, if available
+ dmi_bin = self.module.get_bin_path('dmidecode')
+ DMI_DICT = {
+ 'bios_date': 'bios-release-date',
+ 'bios_vendor': 'bios-vendor',
+ 'bios_version': 'bios-version',
+ 'board_asset_tag': 'baseboard-asset-tag',
+ 'board_name': 'baseboard-product-name',
+ 'board_serial': 'baseboard-serial-number',
+ 'board_vendor': 'baseboard-manufacturer',
+ 'board_version': 'baseboard-version',
+ 'chassis_asset_tag': 'chassis-asset-tag',
+ 'chassis_serial': 'chassis-serial-number',
+ 'chassis_vendor': 'chassis-manufacturer',
+ 'chassis_version': 'chassis-version',
+ 'form_factor': 'chassis-type',
+ 'product_name': 'system-product-name',
+ 'product_serial': 'system-serial-number',
+ 'product_uuid': 'system-uuid',
+ 'product_version': 'system-version',
+ 'system_vendor': 'system-manufacturer',
+ }
+ for (k, v) in DMI_DICT.items():
+ if dmi_bin is not None:
+ (rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v))
+ if rc == 0:
+ # Strip out commented lines (specific dmidecode output)
+ # FIXME: why add the fact and then test if it is json?
+ dmi_facts[k] = ''.join([line for line in out.splitlines() if not line.startswith('#')])
+ try:
+ json.dumps(dmi_facts[k])
+ except UnicodeDecodeError:
+ dmi_facts[k] = 'NA'
+ else:
+ dmi_facts[k] = 'NA'
+ else:
+ dmi_facts[k] = 'NA'
+
+ return dmi_facts
+
+
+class FreeBSDHardwareCollector(HardwareCollector):
+ _fact_class = FreeBSDHardware
+ _platform = 'FreeBSD'
diff --git a/lib/ansible/module_utils/facts/hardware/hpux.py b/lib/ansible/module_utils/facts/hardware/hpux.py
new file mode 100644
index 00000000..ae72ed8e
--- /dev/null
+++ b/lib/ansible/module_utils/facts/hardware/hpux.py
@@ -0,0 +1,165 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+
+from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
+
+
+class HPUXHardware(Hardware):
+ """
+ HP-UX-specific subclass of Hardware. Defines memory and CPU facts:
+ - memfree_mb
+ - memtotal_mb
+ - swapfree_mb
+ - swaptotal_mb
+ - processor
+ - processor_cores
+ - processor_count
+ - model
+ - firmware
+ """
+
+ platform = 'HP-UX'
+
+ def populate(self, collected_facts=None):
+ hardware_facts = {}
+
+ cpu_facts = self.get_cpu_facts(collected_facts=collected_facts)
+ memory_facts = self.get_memory_facts()
+ hw_facts = self.get_hw_facts()
+
+ hardware_facts.update(cpu_facts)
+ hardware_facts.update(memory_facts)
+ hardware_facts.update(hw_facts)
+
+ return hardware_facts
+
+ def get_cpu_facts(self, collected_facts=None):
+ cpu_facts = {}
+ collected_facts = collected_facts or {}
+
+ if collected_facts.get('ansible_architecture') in ['9000/800', '9000/785']:
+ rc, out, err = self.module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
+ cpu_facts['processor_count'] = int(out.strip())
+ # Working with machinfo mess
+ elif collected_facts.get('ansible_architecture') == 'ia64':
+ if collected_facts.get('ansible_distribution_version') == "B.11.23":
+ rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep 'Number of CPUs'", use_unsafe_shell=True)
+ if out:
+ cpu_facts['processor_count'] = int(out.strip().split('=')[1])
+ rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep 'processor family'", use_unsafe_shell=True)
+ if out:
+ cpu_facts['processor'] = re.search('.*(Intel.*)', out).groups()[0].strip()
+ rc, out, err = self.module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
+ cpu_facts['processor_cores'] = int(out.strip())
+ if collected_facts.get('ansible_distribution_version') == "B.11.31":
+ # if machinfo return cores strings release B.11.31 > 1204
+ rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep core | wc -l", use_unsafe_shell=True)
+ if out.strip() == '0':
+ rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
+ cpu_facts['processor_count'] = int(out.strip().split(" ")[0])
+ # If hyperthreading is active divide cores by 2
+ rc, out, err = self.module.run_command("/usr/sbin/psrset | grep LCPU", use_unsafe_shell=True)
+ data = re.sub(' +', ' ', out).strip().split(' ')
+ if len(data) == 1:
+ hyperthreading = 'OFF'
+ else:
+ hyperthreading = data[1]
+ rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep logical", use_unsafe_shell=True)
+ data = out.strip().split(" ")
+ if hyperthreading == 'ON':
+ cpu_facts['processor_cores'] = int(data[0]) / 2
+ else:
+ if len(data) == 1:
+ cpu_facts['processor_cores'] = cpu_facts['processor_count']
+ else:
+ cpu_facts['processor_cores'] = int(data[0])
+ rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Intel |cut -d' ' -f4-", use_unsafe_shell=True)
+ cpu_facts['processor'] = out.strip()
+ else:
+ rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | egrep 'socket[s]?$' | tail -1", use_unsafe_shell=True)
+ cpu_facts['processor_count'] = int(out.strip().split(" ")[0])
+ rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep -e '[0-9] core' | tail -1", use_unsafe_shell=True)
+ cpu_facts['processor_cores'] = int(out.strip().split(" ")[0])
+ rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
+ cpu_facts['processor'] = out.strip()
+
+ return cpu_facts
+
+ def get_memory_facts(self, collected_facts=None):
+ memory_facts = {}
+ collected_facts = collected_facts or {}
+
+ pagesize = 4096
+ rc, out, err = self.module.run_command("/usr/bin/vmstat | tail -1", use_unsafe_shell=True)
+ data = int(re.sub(' +', ' ', out).split(' ')[5].strip())
+ memory_facts['memfree_mb'] = pagesize * data // 1024 // 1024
+ if collected_facts.get('ansible_architecture') in ['9000/800', '9000/785']:
+ try:
+ rc, out, err = self.module.run_command("grep Physical /var/adm/syslog/syslog.log")
+ data = re.search('.*Physical: ([0-9]*) Kbytes.*', out).groups()[0].strip()
+ memory_facts['memtotal_mb'] = int(data) // 1024
+ except AttributeError:
+ # For systems where memory details aren't sent to syslog or the log has rotated, use parsed
+ # adb output. Unfortunately /dev/kmem doesn't have world-read, so this only works as root.
+ if os.access("/dev/kmem", os.R_OK):
+ rc, out, err = self.module.run_command("echo 'phys_mem_pages/D' | adb -k /stand/vmunix /dev/kmem | tail -1 | awk '{print $2}'",
+ use_unsafe_shell=True)
+ if not err:
+ data = out
+ memory_facts['memtotal_mb'] = int(data) / 256
+ else:
+ rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Memory", use_unsafe_shell=True)
+ data = re.search(r'Memory[\ :=]*([0-9]*).*MB.*', out).groups()[0].strip()
+ memory_facts['memtotal_mb'] = int(data)
+ rc, out, err = self.module.run_command("/usr/sbin/swapinfo -m -d -f -q")
+ memory_facts['swaptotal_mb'] = int(out.strip())
+ rc, out, err = self.module.run_command("/usr/sbin/swapinfo -m -d -f | egrep '^dev|^fs'", use_unsafe_shell=True)
+ swap = 0
+ for line in out.strip().splitlines():
+ swap += int(re.sub(' +', ' ', line).split(' ')[3].strip())
+ memory_facts['swapfree_mb'] = swap
+
+ return memory_facts
+
+ def get_hw_facts(self, collected_facts=None):
+ hw_facts = {}
+ collected_facts = collected_facts or {}
+
+ rc, out, err = self.module.run_command("model")
+ hw_facts['model'] = out.strip()
+ if collected_facts.get('ansible_architecture') == 'ia64':
+ separator = ':'
+ if collected_facts.get('ansible_distribution_version') == "B.11.23":
+ separator = '='
+ rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo |grep -i 'Firmware revision' | grep -v BMC", use_unsafe_shell=True)
+ hw_facts['firmware_version'] = out.split(separator)[1].strip()
+ rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo |grep -i 'Machine serial number' ", use_unsafe_shell=True)
+ if rc == 0 and out:
+ hw_facts['product_serial'] = out.split(separator)[1].strip()
+
+ return hw_facts
+
+
+class HPUXHardwareCollector(HardwareCollector):
+ _fact_class = HPUXHardware
+ _platform = 'HP-UX'
+
+ required_facts = set(['platform', 'distribution'])
diff --git a/lib/ansible/module_utils/facts/hardware/hurd.py b/lib/ansible/module_utils/facts/hardware/hurd.py
new file mode 100644
index 00000000..306e13c1
--- /dev/null
+++ b/lib/ansible/module_utils/facts/hardware/hurd.py
@@ -0,0 +1,53 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.facts.timeout import TimeoutError
+from ansible.module_utils.facts.hardware.base import HardwareCollector
+from ansible.module_utils.facts.hardware.linux import LinuxHardware
+
+
+class HurdHardware(LinuxHardware):
+ """
+ GNU Hurd specific subclass of Hardware. Define memory and mount facts
+ based on procfs compatibility translator mimicking the interface of
+ the Linux kernel.
+ """
+
+ platform = 'GNU'
+
+ def populate(self, collected_facts=None):
+ hardware_facts = {}
+ uptime_facts = self.get_uptime_facts()
+ memory_facts = self.get_memory_facts()
+
+ mount_facts = {}
+ try:
+ mount_facts = self.get_mount_facts()
+ except TimeoutError:
+ pass
+
+ hardware_facts.update(uptime_facts)
+ hardware_facts.update(memory_facts)
+ hardware_facts.update(mount_facts)
+
+ return hardware_facts
+
+
+class HurdHardwareCollector(HardwareCollector):
+ _fact_class = HurdHardware
+ _platform = 'GNU'
diff --git a/lib/ansible/module_utils/facts/hardware/linux.py b/lib/ansible/module_utils/facts/hardware/linux.py
new file mode 100644
index 00000000..c468e685
--- /dev/null
+++ b/lib/ansible/module_utils/facts/hardware/linux.py
@@ -0,0 +1,847 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import collections
+import errno
+import glob
+import json
+import os
+import re
+import sys
+import time
+
+from multiprocessing import cpu_count
+from multiprocessing.pool import ThreadPool
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.common.text.formatters import bytes_to_human
+from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
+from ansible.module_utils.facts.utils import get_file_content, get_file_lines, get_mount_size
+
+# import this as a module to ensure we get the same module instance
+from ansible.module_utils.facts import timeout
+
+
+def get_partition_uuid(partname):
+ try:
+ uuids = os.listdir("/dev/disk/by-uuid")
+ except OSError:
+ return
+
+ for uuid in uuids:
+ dev = os.path.realpath("/dev/disk/by-uuid/" + uuid)
+ if dev == ("/dev/" + partname):
+ return uuid
+
+ return None
+
+
+class LinuxHardware(Hardware):
+ """
+ Linux-specific subclass of Hardware. Defines memory and CPU facts:
+ - memfree_mb
+ - memtotal_mb
+ - swapfree_mb
+ - swaptotal_mb
+ - processor (a list)
+ - processor_cores
+ - processor_count
+
+ In addition, it also defines number of DMI facts and device facts.
+ """
+
+ platform = 'Linux'
+
+ # Originally only had these four as toplevelfacts
+ ORIGINAL_MEMORY_FACTS = frozenset(('MemTotal', 'SwapTotal', 'MemFree', 'SwapFree'))
+ # Now we have all of these in a dict structure
+ MEMORY_FACTS = ORIGINAL_MEMORY_FACTS.union(('Buffers', 'Cached', 'SwapCached'))
+
+ # regex used against findmnt output to detect bind mounts
+ BIND_MOUNT_RE = re.compile(r'.*\]')
+
+ # regex used against mtab content to find entries that are bind mounts
+ MTAB_BIND_MOUNT_RE = re.compile(r'.*bind.*"')
+
+ # regex used for replacing octal escape sequences
+ OCTAL_ESCAPE_RE = re.compile(r'\\[0-9]{3}')
+
+ def populate(self, collected_facts=None):
+ hardware_facts = {}
+ self.module.run_command_environ_update = {'LANG': 'C', 'LC_ALL': 'C', 'LC_NUMERIC': 'C'}
+
+ cpu_facts = self.get_cpu_facts(collected_facts=collected_facts)
+ memory_facts = self.get_memory_facts()
+ dmi_facts = self.get_dmi_facts()
+ device_facts = self.get_device_facts()
+ uptime_facts = self.get_uptime_facts()
+ lvm_facts = self.get_lvm_facts()
+
+ mount_facts = {}
+ try:
+ mount_facts = self.get_mount_facts()
+ except timeout.TimeoutError:
+ pass
+
+ hardware_facts.update(cpu_facts)
+ hardware_facts.update(memory_facts)
+ hardware_facts.update(dmi_facts)
+ hardware_facts.update(device_facts)
+ hardware_facts.update(uptime_facts)
+ hardware_facts.update(lvm_facts)
+ hardware_facts.update(mount_facts)
+
+ return hardware_facts
+
+ def get_memory_facts(self):
+ memory_facts = {}
+ if not os.access("/proc/meminfo", os.R_OK):
+ return memory_facts
+
+ memstats = {}
+ for line in get_file_lines("/proc/meminfo"):
+ data = line.split(":", 1)
+ key = data[0]
+ if key in self.ORIGINAL_MEMORY_FACTS:
+ val = data[1].strip().split(' ')[0]
+ memory_facts["%s_mb" % key.lower()] = int(val) // 1024
+
+ if key in self.MEMORY_FACTS:
+ val = data[1].strip().split(' ')[0]
+ memstats[key.lower()] = int(val) // 1024
+
+ if None not in (memstats.get('memtotal'), memstats.get('memfree')):
+ memstats['real:used'] = memstats['memtotal'] - memstats['memfree']
+ if None not in (memstats.get('cached'), memstats.get('memfree'), memstats.get('buffers')):
+ memstats['nocache:free'] = memstats['cached'] + memstats['memfree'] + memstats['buffers']
+ if None not in (memstats.get('memtotal'), memstats.get('nocache:free')):
+ memstats['nocache:used'] = memstats['memtotal'] - memstats['nocache:free']
+ if None not in (memstats.get('swaptotal'), memstats.get('swapfree')):
+ memstats['swap:used'] = memstats['swaptotal'] - memstats['swapfree']
+
+ memory_facts['memory_mb'] = {
+ 'real': {
+ 'total': memstats.get('memtotal'),
+ 'used': memstats.get('real:used'),
+ 'free': memstats.get('memfree'),
+ },
+ 'nocache': {
+ 'free': memstats.get('nocache:free'),
+ 'used': memstats.get('nocache:used'),
+ },
+ 'swap': {
+ 'total': memstats.get('swaptotal'),
+ 'free': memstats.get('swapfree'),
+ 'used': memstats.get('swap:used'),
+ 'cached': memstats.get('swapcached'),
+ },
+ }
+
+ return memory_facts
+
+ def get_cpu_facts(self, collected_facts=None):
+ cpu_facts = {}
+ collected_facts = collected_facts or {}
+
+ i = 0
+ vendor_id_occurrence = 0
+ model_name_occurrence = 0
+ processor_occurence = 0
+ physid = 0
+ coreid = 0
+ sockets = {}
+ cores = {}
+
+ xen = False
+ xen_paravirt = False
+ try:
+ if os.path.exists('/proc/xen'):
+ xen = True
+ else:
+ for line in get_file_lines('/sys/hypervisor/type'):
+ if line.strip() == 'xen':
+ xen = True
+ # Only interested in the first line
+ break
+ except IOError:
+ pass
+
+ if not os.access("/proc/cpuinfo", os.R_OK):
+ return cpu_facts
+
+ cpu_facts['processor'] = []
+ for line in get_file_lines('/proc/cpuinfo'):
+ data = line.split(":", 1)
+ key = data[0].strip()
+
+ try:
+ val = data[1].strip()
+ except IndexError:
+ val = ""
+
+ if xen:
+ if key == 'flags':
+ # Check for vme cpu flag, Xen paravirt does not expose this.
+ # Need to detect Xen paravirt because it exposes cpuinfo
+ # differently than Xen HVM or KVM and causes reporting of
+ # only a single cpu core.
+ if 'vme' not in val:
+ xen_paravirt = True
+
+ # model name is for Intel arch, Processor (mind the uppercase P)
+ # works for some ARM devices, like the Sheevaplug.
+ # 'ncpus active' is SPARC attribute
+ if key in ['model name', 'Processor', 'vendor_id', 'cpu', 'Vendor', 'processor']:
+ if 'processor' not in cpu_facts:
+ cpu_facts['processor'] = []
+ cpu_facts['processor'].append(val)
+ if key == 'vendor_id':
+ vendor_id_occurrence += 1
+ if key == 'model name':
+ model_name_occurrence += 1
+ if key == 'processor':
+ processor_occurence += 1
+ i += 1
+ elif key == 'physical id':
+ physid = val
+ if physid not in sockets:
+ sockets[physid] = 1
+ elif key == 'core id':
+ coreid = val
+ if coreid not in sockets:
+ cores[coreid] = 1
+ elif key == 'cpu cores':
+ sockets[physid] = int(val)
+ elif key == 'siblings':
+ cores[coreid] = int(val)
+ elif key == '# processors':
+ cpu_facts['processor_cores'] = int(val)
+ elif key == 'ncpus active':
+ i = int(val)
+
+ # Skip for platforms without vendor_id/model_name in cpuinfo (e.g ppc64le)
+ if vendor_id_occurrence > 0:
+ if vendor_id_occurrence == model_name_occurrence:
+ i = vendor_id_occurrence
+
+ # The fields for ARM CPUs do not always include 'vendor_id' or 'model name',
+ # and sometimes includes both 'processor' and 'Processor'.
+ # The fields for Power CPUs include 'processor' and 'cpu'.
+ # Always use 'processor' count for ARM and Power systems
+ if collected_facts.get('ansible_architecture', '').startswith(('armv', 'aarch', 'ppc')):
+ i = processor_occurence
+
+ # FIXME
+ if collected_facts.get('ansible_architecture') != 's390x':
+ if xen_paravirt:
+ cpu_facts['processor_count'] = i
+ cpu_facts['processor_cores'] = i
+ cpu_facts['processor_threads_per_core'] = 1
+ cpu_facts['processor_vcpus'] = i
+ else:
+ if sockets:
+ cpu_facts['processor_count'] = len(sockets)
+ else:
+ cpu_facts['processor_count'] = i
+
+ socket_values = list(sockets.values())
+ if socket_values and socket_values[0]:
+ cpu_facts['processor_cores'] = socket_values[0]
+ else:
+ cpu_facts['processor_cores'] = 1
+
+ core_values = list(cores.values())
+ if core_values:
+ cpu_facts['processor_threads_per_core'] = core_values[0] // cpu_facts['processor_cores']
+ else:
+ cpu_facts['processor_threads_per_core'] = 1 // cpu_facts['processor_cores']
+
+ cpu_facts['processor_vcpus'] = (cpu_facts['processor_threads_per_core'] *
+ cpu_facts['processor_count'] * cpu_facts['processor_cores'])
+
+ # if the number of processors available to the module's
+ # thread cannot be determined, the processor count
+ # reported by /proc will be the default:
+ cpu_facts['processor_nproc'] = processor_occurence
+
+ try:
+ cpu_facts['processor_nproc'] = len(
+ os.sched_getaffinity(0)
+ )
+ except AttributeError:
+ # In Python < 3.3, os.sched_getaffinity() is not available
+ try:
+ cmd = get_bin_path('nproc')
+ except ValueError:
+ pass
+ else:
+ rc, out, _err = self.module.run_command(cmd)
+ if rc == 0:
+ cpu_facts['processor_nproc'] = int(out)
+
+ return cpu_facts
+
+ def get_dmi_facts(self):
+ ''' learn dmi facts from system
+
+ Try /sys first for dmi related facts.
+ If that is not available, fall back to dmidecode executable '''
+
+ dmi_facts = {}
+
+ if os.path.exists('/sys/devices/virtual/dmi/id/product_name'):
+ # Use kernel DMI info, if available
+
+ # DMI SPEC -- https://www.dmtf.org/sites/default/files/standards/documents/DSP0134_3.2.0.pdf
+ FORM_FACTOR = ["Unknown", "Other", "Unknown", "Desktop",
+ "Low Profile Desktop", "Pizza Box", "Mini Tower", "Tower",
+ "Portable", "Laptop", "Notebook", "Hand Held", "Docking Station",
+ "All In One", "Sub Notebook", "Space-saving", "Lunch Box",
+ "Main Server Chassis", "Expansion Chassis", "Sub Chassis",
+ "Bus Expansion Chassis", "Peripheral Chassis", "RAID Chassis",
+ "Rack Mount Chassis", "Sealed-case PC", "Multi-system",
+ "CompactPCI", "AdvancedTCA", "Blade", "Blade Enclosure",
+ "Tablet", "Convertible", "Detachable", "IoT Gateway",
+ "Embedded PC", "Mini PC", "Stick PC"]
+
+ DMI_DICT = {
+ 'bios_date': '/sys/devices/virtual/dmi/id/bios_date',
+ 'bios_vendor': '/sys/devices/virtual/dmi/id/bios_vendor',
+ 'bios_version': '/sys/devices/virtual/dmi/id/bios_version',
+ 'board_asset_tag': '/sys/devices/virtual/dmi/id/board_asset_tag',
+ 'board_name': '/sys/devices/virtual/dmi/id/board_name',
+ 'board_serial': '/sys/devices/virtual/dmi/id/board_serial',
+ 'board_vendor': '/sys/devices/virtual/dmi/id/board_vendor',
+ 'board_version': '/sys/devices/virtual/dmi/id/board_version',
+ 'chassis_asset_tag': '/sys/devices/virtual/dmi/id/chassis_asset_tag',
+ 'chassis_serial': '/sys/devices/virtual/dmi/id/chassis_serial',
+ 'chassis_vendor': '/sys/devices/virtual/dmi/id/chassis_vendor',
+ 'chassis_version': '/sys/devices/virtual/dmi/id/chassis_version',
+ 'form_factor': '/sys/devices/virtual/dmi/id/chassis_type',
+ 'product_name': '/sys/devices/virtual/dmi/id/product_name',
+ 'product_serial': '/sys/devices/virtual/dmi/id/product_serial',
+ 'product_uuid': '/sys/devices/virtual/dmi/id/product_uuid',
+ 'product_version': '/sys/devices/virtual/dmi/id/product_version',
+ 'system_vendor': '/sys/devices/virtual/dmi/id/sys_vendor',
+ }
+
+ for (key, path) in DMI_DICT.items():
+ data = get_file_content(path)
+ if data is not None:
+ if key == 'form_factor':
+ try:
+ dmi_facts['form_factor'] = FORM_FACTOR[int(data)]
+ except IndexError:
+ dmi_facts['form_factor'] = 'unknown (%s)' % data
+ else:
+ dmi_facts[key] = data
+ else:
+ dmi_facts[key] = 'NA'
+
+ else:
+ # Fall back to using dmidecode, if available
+ dmi_bin = self.module.get_bin_path('dmidecode')
+ DMI_DICT = {
+ 'bios_date': 'bios-release-date',
+ 'bios_vendor': 'bios-vendor',
+ 'bios_version': 'bios-version',
+ 'board_asset_tag': 'baseboard-asset-tag',
+ 'board_name': 'baseboard-product-name',
+ 'board_serial': 'baseboard-serial-number',
+ 'board_vendor': 'baseboard-manufacturer',
+ 'board_version': 'baseboard-version',
+ 'chassis_asset_tag': 'chassis-asset-tag',
+ 'chassis_serial': 'chassis-serial-number',
+ 'chassis_vendor': 'chassis-manufacturer',
+ 'chassis_version': 'chassis-version',
+ 'form_factor': 'chassis-type',
+ 'product_name': 'system-product-name',
+ 'product_serial': 'system-serial-number',
+ 'product_uuid': 'system-uuid',
+ 'product_version': 'system-version',
+ 'system_vendor': 'system-manufacturer',
+ }
+ for (k, v) in DMI_DICT.items():
+ if dmi_bin is not None:
+ (rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v))
+ if rc == 0:
+ # Strip out commented lines (specific dmidecode output)
+ thisvalue = ''.join([line for line in out.splitlines() if not line.startswith('#')])
+ try:
+ json.dumps(thisvalue)
+ except UnicodeDecodeError:
+ thisvalue = "NA"
+
+ dmi_facts[k] = thisvalue
+ else:
+ dmi_facts[k] = 'NA'
+ else:
+ dmi_facts[k] = 'NA'
+
+ return dmi_facts
+
+ def _run_lsblk(self, lsblk_path):
+ # call lsblk and collect all uuids
+ # --exclude 2 makes lsblk ignore floppy disks, which are slower to answer than typical timeouts
+ # this uses the linux major device number
+ # for details see https://www.kernel.org/doc/Documentation/devices.txt
+ args = ['--list', '--noheadings', '--paths', '--output', 'NAME,UUID', '--exclude', '2']
+ cmd = [lsblk_path] + args
+ rc, out, err = self.module.run_command(cmd)
+ return rc, out, err
+
+ def _lsblk_uuid(self):
+ uuids = {}
+ lsblk_path = self.module.get_bin_path("lsblk")
+ if not lsblk_path:
+ return uuids
+
+ rc, out, err = self._run_lsblk(lsblk_path)
+ if rc != 0:
+ return uuids
+
+ # each line will be in format:
+ # <devicename><some whitespace><uuid>
+ # /dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0
+ for lsblk_line in out.splitlines():
+ if not lsblk_line:
+ continue
+
+ line = lsblk_line.strip()
+ fields = line.rsplit(None, 1)
+
+ if len(fields) < 2:
+ continue
+
+ device_name, uuid = fields[0].strip(), fields[1].strip()
+ if device_name in uuids:
+ continue
+ uuids[device_name] = uuid
+
+ return uuids
+
+ def _udevadm_uuid(self, device):
+ # fallback for versions of lsblk <= 2.23 that don't have --paths, see _run_lsblk() above
+ uuid = 'N/A'
+
+ udevadm_path = self.module.get_bin_path('udevadm')
+ if not udevadm_path:
+ return uuid
+
+ cmd = [udevadm_path, 'info', '--query', 'property', '--name', device]
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ return uuid
+
+ # a snippet of the output of the udevadm command below will be:
+ # ...
+ # ID_FS_TYPE=ext4
+ # ID_FS_USAGE=filesystem
+ # ID_FS_UUID=57b1a3e7-9019-4747-9809-7ec52bba9179
+ # ...
+ m = re.search('ID_FS_UUID=(.*)\n', out)
+ if m:
+ uuid = m.group(1)
+
+ return uuid
+
+ def _run_findmnt(self, findmnt_path):
+ args = ['--list', '--noheadings', '--notruncate']
+ cmd = [findmnt_path] + args
+ rc, out, err = self.module.run_command(cmd, errors='surrogate_then_replace')
+ return rc, out, err
+
+ def _find_bind_mounts(self):
+ bind_mounts = set()
+ findmnt_path = self.module.get_bin_path("findmnt")
+ if not findmnt_path:
+ return bind_mounts
+
+ rc, out, err = self._run_findmnt(findmnt_path)
+ if rc != 0:
+ return bind_mounts
+
+ # find bind mounts, in case /etc/mtab is a symlink to /proc/mounts
+ for line in out.splitlines():
+ fields = line.split()
+ # fields[0] is the TARGET, fields[1] is the SOURCE
+ if len(fields) < 2:
+ continue
+
+ # bind mounts will have a [/directory_name] in the SOURCE column
+ if self.BIND_MOUNT_RE.match(fields[1]):
+ bind_mounts.add(fields[0])
+
+ return bind_mounts
+
+ def _mtab_entries(self):
+ mtab_file = '/etc/mtab'
+ if not os.path.exists(mtab_file):
+ mtab_file = '/proc/mounts'
+
+ mtab = get_file_content(mtab_file, '')
+ mtab_entries = []
+ for line in mtab.splitlines():
+ fields = line.split()
+ if len(fields) < 4:
+ continue
+ mtab_entries.append(fields)
+ return mtab_entries
+
+ @staticmethod
+ def _replace_octal_escapes_helper(match):
+ # Convert to integer using base8 and then convert to character
+ return chr(int(match.group()[1:], 8))
+
+ def _replace_octal_escapes(self, value):
+ return self.OCTAL_ESCAPE_RE.sub(self._replace_octal_escapes_helper, value)
+
+ def get_mount_info(self, mount, device, uuids):
+
+ mount_size = get_mount_size(mount)
+
+ # _udevadm_uuid is a fallback for versions of lsblk <= 2.23 that don't have --paths
+ # see _run_lsblk() above
+ # https://github.com/ansible/ansible/issues/36077
+ uuid = uuids.get(device, self._udevadm_uuid(device))
+
+ return mount_size, uuid
+
+ def get_mount_facts(self):
+
+ mounts = []
+
+ # gather system lists
+ bind_mounts = self._find_bind_mounts()
+ uuids = self._lsblk_uuid()
+ mtab_entries = self._mtab_entries()
+
+ # start threads to query each mount
+ results = {}
+ pool = ThreadPool(processes=min(len(mtab_entries), cpu_count()))
+ maxtime = globals().get('GATHER_TIMEOUT') or timeout.DEFAULT_GATHER_TIMEOUT
+ for fields in mtab_entries:
+ # Transform octal escape sequences
+ fields = [self._replace_octal_escapes(field) for field in fields]
+
+ device, mount, fstype, options = fields[0], fields[1], fields[2], fields[3]
+
+ if not device.startswith('/') and ':/' not in device or fstype == 'none':
+ continue
+
+ mount_info = {'mount': mount,
+ 'device': device,
+ 'fstype': fstype,
+ 'options': options}
+
+ if mount in bind_mounts:
+ # only add if not already there, we might have a plain /etc/mtab
+ if not self.MTAB_BIND_MOUNT_RE.match(options):
+ mount_info['options'] += ",bind"
+
+ results[mount] = {'info': mount_info,
+ 'extra': pool.apply_async(self.get_mount_info, (mount, device, uuids)),
+ 'timelimit': time.time() + maxtime}
+
+ pool.close() # done with new workers, start gc
+
+ # wait for workers and get results
+ while results:
+ for mount in results:
+ res = results[mount]['extra']
+ if res.ready():
+ if res.successful():
+ mount_size, uuid = res.get()
+ if mount_size:
+ results[mount]['info'].update(mount_size)
+ results[mount]['info']['uuid'] = uuid or 'N/A'
+ else:
+ # give incomplete data
+ errmsg = to_text(res.get())
+ self.module.warn("Error prevented getting extra info for mount %s: %s." % (mount, errmsg))
+ results[mount]['info']['note'] = 'Could not get extra information: %s.' % (errmsg)
+
+ mounts.append(results[mount]['info'])
+ del results[mount]
+ break
+ elif time.time() > results[mount]['timelimit']:
+ results[mount]['info']['note'] = 'Timed out while attempting to get extra information.'
+ mounts.append(results[mount]['info'])
+ del results[mount]
+ break
+ else:
+ # avoid cpu churn
+ time.sleep(0.1)
+
+ return {'mounts': mounts}
+
+ def get_device_links(self, link_dir):
+ if not os.path.exists(link_dir):
+ return {}
+ try:
+ retval = collections.defaultdict(set)
+ for entry in os.listdir(link_dir):
+ try:
+ target = os.path.basename(os.readlink(os.path.join(link_dir, entry)))
+ retval[target].add(entry)
+ except OSError:
+ continue
+ return dict((k, list(sorted(v))) for (k, v) in iteritems(retval))
+ except OSError:
+ return {}
+
+ def get_all_device_owners(self):
+ try:
+ retval = collections.defaultdict(set)
+ for path in glob.glob('/sys/block/*/slaves/*'):
+ elements = path.split('/')
+ device = elements[3]
+ target = elements[5]
+ retval[target].add(device)
+ return dict((k, list(sorted(v))) for (k, v) in iteritems(retval))
+ except OSError:
+ return {}
+
+ def get_all_device_links(self):
+ return {
+ 'ids': self.get_device_links('/dev/disk/by-id'),
+ 'uuids': self.get_device_links('/dev/disk/by-uuid'),
+ 'labels': self.get_device_links('/dev/disk/by-label'),
+ 'masters': self.get_all_device_owners(),
+ }
+
+ def get_holders(self, block_dev_dict, sysdir):
+ block_dev_dict['holders'] = []
+ if os.path.isdir(sysdir + "/holders"):
+ for folder in os.listdir(sysdir + "/holders"):
+ if not folder.startswith("dm-"):
+ continue
+ name = get_file_content(sysdir + "/holders/" + folder + "/dm/name")
+ if name:
+ block_dev_dict['holders'].append(name)
+ else:
+ block_dev_dict['holders'].append(folder)
+
+ def get_device_facts(self):
+ device_facts = {}
+
+ device_facts['devices'] = {}
+ lspci = self.module.get_bin_path('lspci')
+ if lspci:
+ rc, pcidata, err = self.module.run_command([lspci, '-D'], errors='surrogate_then_replace')
+ else:
+ pcidata = None
+
+ try:
+ block_devs = os.listdir("/sys/block")
+ except OSError:
+ return device_facts
+
+ devs_wwn = {}
+ try:
+ devs_by_id = os.listdir("/dev/disk/by-id")
+ except OSError:
+ pass
+ else:
+ for link_name in devs_by_id:
+ if link_name.startswith("wwn-"):
+ try:
+ wwn_link = os.readlink(os.path.join("/dev/disk/by-id", link_name))
+ except OSError:
+ continue
+ devs_wwn[os.path.basename(wwn_link)] = link_name[4:]
+
+ links = self.get_all_device_links()
+ device_facts['device_links'] = links
+
+ for block in block_devs:
+ virtual = 1
+ sysfs_no_links = 0
+ try:
+ path = os.readlink(os.path.join("/sys/block/", block))
+ except OSError:
+ e = sys.exc_info()[1]
+ if e.errno == errno.EINVAL:
+ path = block
+ sysfs_no_links = 1
+ else:
+ continue
+ sysdir = os.path.join("/sys/block", path)
+ if sysfs_no_links == 1:
+ for folder in os.listdir(sysdir):
+ if "device" in folder:
+ virtual = 0
+ break
+ d = {}
+ d['virtual'] = virtual
+ d['links'] = {}
+ for (link_type, link_values) in iteritems(links):
+ d['links'][link_type] = link_values.get(block, [])
+ diskname = os.path.basename(sysdir)
+ for key in ['vendor', 'model', 'sas_address', 'sas_device_handle']:
+ d[key] = get_file_content(sysdir + "/device/" + key)
+
+ sg_inq = self.module.get_bin_path('sg_inq')
+
+ if sg_inq:
+ device = "/dev/%s" % (block)
+ rc, drivedata, err = self.module.run_command([sg_inq, device])
+ if rc == 0:
+ serial = re.search(r"Unit serial number:\s+(\w+)", drivedata)
+ if serial:
+ d['serial'] = serial.group(1)
+
+ for key, test in [('removable', '/removable'),
+ ('support_discard', '/queue/discard_granularity'),
+ ]:
+ d[key] = get_file_content(sysdir + test)
+
+ if diskname in devs_wwn:
+ d['wwn'] = devs_wwn[diskname]
+
+ d['partitions'] = {}
+ for folder in os.listdir(sysdir):
+ m = re.search("(" + diskname + r"[p]?\d+)", folder)
+ if m:
+ part = {}
+ partname = m.group(1)
+ part_sysdir = sysdir + "/" + partname
+
+ part['links'] = {}
+ for (link_type, link_values) in iteritems(links):
+ part['links'][link_type] = link_values.get(partname, [])
+
+ part['start'] = get_file_content(part_sysdir + "/start", 0)
+ part['sectors'] = get_file_content(part_sysdir + "/size", 0)
+
+ part['sectorsize'] = get_file_content(part_sysdir + "/queue/logical_block_size")
+ if not part['sectorsize']:
+ part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size", 512)
+ part['size'] = bytes_to_human((float(part['sectors']) * 512.0))
+ part['uuid'] = get_partition_uuid(partname)
+ self.get_holders(part, part_sysdir)
+
+ d['partitions'][partname] = part
+
+ d['rotational'] = get_file_content(sysdir + "/queue/rotational")
+ d['scheduler_mode'] = ""
+ scheduler = get_file_content(sysdir + "/queue/scheduler")
+ if scheduler is not None:
+ m = re.match(r".*?(\[(.*)\])", scheduler)
+ if m:
+ d['scheduler_mode'] = m.group(2)
+
+ d['sectors'] = get_file_content(sysdir + "/size")
+ if not d['sectors']:
+ d['sectors'] = 0
+ d['sectorsize'] = get_file_content(sysdir + "/queue/logical_block_size")
+ if not d['sectorsize']:
+ d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size", 512)
+ d['size'] = bytes_to_human(float(d['sectors']) * 512.0)
+
+ d['host'] = ""
+
+ # domains are numbered (0 to ffff), bus (0 to ff), slot (0 to 1f), and function (0 to 7).
+ m = re.match(r".+/([a-f0-9]{4}:[a-f0-9]{2}:[0|1][a-f0-9]\.[0-7])/", sysdir)
+ if m and pcidata:
+ pciid = m.group(1)
+ did = re.escape(pciid)
+ m = re.search("^" + did + r"\s(.*)$", pcidata, re.MULTILINE)
+ if m:
+ d['host'] = m.group(1)
+
+ self.get_holders(d, sysdir)
+
+ device_facts['devices'][diskname] = d
+
+ return device_facts
+
+ def get_uptime_facts(self):
+ uptime_facts = {}
+ uptime_file_content = get_file_content('/proc/uptime')
+ if uptime_file_content:
+ uptime_seconds_string = uptime_file_content.split(' ')[0]
+ uptime_facts['uptime_seconds'] = int(float(uptime_seconds_string))
+
+ return uptime_facts
+
+ def _find_mapper_device_name(self, dm_device):
+ dm_prefix = '/dev/dm-'
+ mapper_device = dm_device
+ if dm_device.startswith(dm_prefix):
+ dmsetup_cmd = self.module.get_bin_path('dmsetup', True)
+ mapper_prefix = '/dev/mapper/'
+ rc, dm_name, err = self.module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device))
+ if rc == 0:
+ mapper_device = mapper_prefix + dm_name.rstrip()
+ return mapper_device
+
+ def get_lvm_facts(self):
+ """ Get LVM Facts if running as root and lvm utils are available """
+
+ lvm_facts = {}
+
+ if os.getuid() == 0 and self.module.get_bin_path('vgs'):
+ lvm_util_options = '--noheadings --nosuffix --units g --separator ,'
+
+ vgs_path = self.module.get_bin_path('vgs')
+ # vgs fields: VG #PV #LV #SN Attr VSize VFree
+ vgs = {}
+ if vgs_path:
+ rc, vg_lines, err = self.module.run_command('%s %s' % (vgs_path, lvm_util_options))
+ for vg_line in vg_lines.splitlines():
+ items = vg_line.strip().split(',')
+ vgs[items[0]] = {'size_g': items[-2],
+ 'free_g': items[-1],
+ 'num_lvs': items[2],
+ 'num_pvs': items[1]}
+
+ lvs_path = self.module.get_bin_path('lvs')
+ # lvs fields:
+ # LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert
+ lvs = {}
+ if lvs_path:
+ rc, lv_lines, err = self.module.run_command('%s %s' % (lvs_path, lvm_util_options))
+ for lv_line in lv_lines.splitlines():
+ items = lv_line.strip().split(',')
+ lvs[items[0]] = {'size_g': items[3], 'vg': items[1]}
+
+ pvs_path = self.module.get_bin_path('pvs')
+ # pvs fields: PV VG #Fmt #Attr PSize PFree
+ pvs = {}
+ if pvs_path:
+ rc, pv_lines, err = self.module.run_command('%s %s' % (pvs_path, lvm_util_options))
+ for pv_line in pv_lines.splitlines():
+ items = pv_line.strip().split(',')
+ pvs[self._find_mapper_device_name(items[0])] = {
+ 'size_g': items[4],
+ 'free_g': items[5],
+ 'vg': items[1]}
+
+ lvm_facts['lvm'] = {'lvs': lvs, 'vgs': vgs, 'pvs': pvs}
+
+ return lvm_facts
+
+
+class LinuxHardwareCollector(HardwareCollector):
+ _platform = 'Linux'
+ _fact_class = LinuxHardware
+
+ required_facts = set(['platform'])
diff --git a/lib/ansible/module_utils/facts/hardware/netbsd.py b/lib/ansible/module_utils/facts/hardware/netbsd.py
new file mode 100644
index 00000000..84b544ce
--- /dev/null
+++ b/lib/ansible/module_utils/facts/hardware/netbsd.py
@@ -0,0 +1,162 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+
+from ansible.module_utils.six.moves import reduce
+
+from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
+from ansible.module_utils.facts.timeout import TimeoutError, timeout
+
+from ansible.module_utils.facts.utils import get_file_content, get_file_lines, get_mount_size
+from ansible.module_utils.facts.sysctl import get_sysctl
+
+
+class NetBSDHardware(Hardware):
+ """
+ NetBSD-specific subclass of Hardware. Defines memory and CPU facts:
+ - memfree_mb
+ - memtotal_mb
+ - swapfree_mb
+ - swaptotal_mb
+ - processor (a list)
+ - processor_cores
+ - processor_count
+ - devices
+ """
+ platform = 'NetBSD'
+ MEMORY_FACTS = ['MemTotal', 'SwapTotal', 'MemFree', 'SwapFree']
+
+ def populate(self, collected_facts=None):
+ hardware_facts = {}
+ self.sysctl = get_sysctl(self.module, ['machdep'])
+ cpu_facts = self.get_cpu_facts()
+ memory_facts = self.get_memory_facts()
+
+ mount_facts = {}
+ try:
+ mount_facts = self.get_mount_facts()
+ except TimeoutError:
+ pass
+
+ dmi_facts = self.get_dmi_facts()
+
+ hardware_facts.update(cpu_facts)
+ hardware_facts.update(memory_facts)
+ hardware_facts.update(mount_facts)
+ hardware_facts.update(dmi_facts)
+
+ return hardware_facts
+
+ def get_cpu_facts(self):
+ cpu_facts = {}
+
+ i = 0
+ physid = 0
+ sockets = {}
+ if not os.access("/proc/cpuinfo", os.R_OK):
+ return cpu_facts
+ cpu_facts['processor'] = []
+ for line in get_file_lines("/proc/cpuinfo"):
+ data = line.split(":", 1)
+ key = data[0].strip()
+ # model name is for Intel arch, Processor (mind the uppercase P)
+ # works for some ARM devices, like the Sheevaplug.
+ if key == 'model name' or key == 'Processor':
+ if 'processor' not in cpu_facts:
+ cpu_facts['processor'] = []
+ cpu_facts['processor'].append(data[1].strip())
+ i += 1
+ elif key == 'physical id':
+ physid = data[1].strip()
+ if physid not in sockets:
+ sockets[physid] = 1
+ elif key == 'cpu cores':
+ sockets[physid] = int(data[1].strip())
+ if len(sockets) > 0:
+ cpu_facts['processor_count'] = len(sockets)
+ cpu_facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
+ else:
+ cpu_facts['processor_count'] = i
+ cpu_facts['processor_cores'] = 'NA'
+
+ return cpu_facts
+
+ def get_memory_facts(self):
+ memory_facts = {}
+ if not os.access("/proc/meminfo", os.R_OK):
+ return memory_facts
+ for line in get_file_lines("/proc/meminfo"):
+ data = line.split(":", 1)
+ key = data[0]
+ if key in NetBSDHardware.MEMORY_FACTS:
+ val = data[1].strip().split(' ')[0]
+ memory_facts["%s_mb" % key.lower()] = int(val) // 1024
+
+ return memory_facts
+
+ @timeout()
+ def get_mount_facts(self):
+ mount_facts = {}
+
+ mount_facts['mounts'] = []
+ fstab = get_file_content('/etc/fstab')
+
+ if not fstab:
+ return mount_facts
+
+ for line in fstab.splitlines():
+ if line.startswith('#') or line.strip() == '':
+ continue
+ fields = re.sub(r'\s+', ' ', line).split()
+ mount_statvfs_info = get_mount_size(fields[1])
+ mount_info = {'mount': fields[1],
+ 'device': fields[0],
+ 'fstype': fields[2],
+ 'options': fields[3]}
+ mount_info.update(mount_statvfs_info)
+ mount_facts['mounts'].append(mount_info)
+ return mount_facts
+
+ def get_dmi_facts(self):
+ dmi_facts = {}
+ # We don't use dmidecode(8) here because:
+ # - it would add dependency on an external package
+ # - dmidecode(8) can only be ran as root
+ # So instead we rely on sysctl(8) to provide us the information on a
+ # best-effort basis. As a bonus we also get facts on non-amd64/i386
+ # platforms this way.
+ sysctl_to_dmi = {
+ 'machdep.dmi.system-product': 'product_name',
+ 'machdep.dmi.system-version': 'product_version',
+ 'machdep.dmi.system-uuid': 'product_uuid',
+ 'machdep.dmi.system-serial': 'product_serial',
+ 'machdep.dmi.system-vendor': 'system_vendor',
+ }
+
+ for mib in sysctl_to_dmi:
+ if mib in self.sysctl:
+ dmi_facts[sysctl_to_dmi[mib]] = self.sysctl[mib]
+
+ return dmi_facts
+
+
+class NetBSDHardwareCollector(HardwareCollector):
+ _fact_class = NetBSDHardware
+ _platform = 'NetBSD'
diff --git a/lib/ansible/module_utils/facts/hardware/openbsd.py b/lib/ansible/module_utils/facts/hardware/openbsd.py
new file mode 100644
index 00000000..6b666047
--- /dev/null
+++ b/lib/ansible/module_utils/facts/hardware/openbsd.py
@@ -0,0 +1,170 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from ansible.module_utils._text import to_text
+
+from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
+from ansible.module_utils.facts import timeout
+
+from ansible.module_utils.facts.utils import get_file_content, get_mount_size
+from ansible.module_utils.facts.sysctl import get_sysctl
+
+
+class OpenBSDHardware(Hardware):
+ """
+ OpenBSD-specific subclass of Hardware. Defines memory, CPU and device facts:
+ - memfree_mb
+ - memtotal_mb
+ - swapfree_mb
+ - swaptotal_mb
+ - processor (a list)
+ - processor_cores
+ - processor_count
+ - processor_speed
+
+ In addition, it also defines number of DMI facts and device facts.
+ """
+ platform = 'OpenBSD'
+
+ def populate(self, collected_facts=None):
+ hardware_facts = {}
+ self.sysctl = get_sysctl(self.module, ['hw'])
+
+ # TODO: change name
+ cpu_facts = self.get_processor_facts()
+ memory_facts = self.get_memory_facts()
+ device_facts = self.get_device_facts()
+ dmi_facts = self.get_dmi_facts()
+
+ mount_facts = {}
+ try:
+ mount_facts = self.get_mount_facts()
+ except timeout.TimeoutError:
+ pass
+
+ hardware_facts.update(cpu_facts)
+ hardware_facts.update(memory_facts)
+ hardware_facts.update(dmi_facts)
+ hardware_facts.update(device_facts)
+ hardware_facts.update(mount_facts)
+
+ return hardware_facts
+
+ @timeout.timeout()
+ def get_mount_facts(self):
+ mount_facts = {}
+
+ mount_facts['mounts'] = []
+ fstab = get_file_content('/etc/fstab')
+ if fstab:
+ for line in fstab.splitlines():
+ if line.startswith('#') or line.strip() == '':
+ continue
+ fields = re.sub(r'\s+', ' ', line).split()
+ if fields[1] == 'none' or fields[3] == 'xx':
+ continue
+ mount_statvfs_info = get_mount_size(fields[1])
+ mount_info = {'mount': fields[1],
+ 'device': fields[0],
+ 'fstype': fields[2],
+ 'options': fields[3]}
+ mount_info.update(mount_statvfs_info)
+ mount_facts['mounts'].append(mount_info)
+ return mount_facts
+
+ def get_memory_facts(self):
+ memory_facts = {}
+ # Get free memory. vmstat output looks like:
+ # procs memory page disks traps cpu
+ # r b w avm fre flt re pi po fr sr wd0 fd0 int sys cs us sy id
+ # 0 0 0 47512 28160 51 0 0 0 0 0 1 0 116 89 17 0 1 99
+ rc, out, err = self.module.run_command("/usr/bin/vmstat")
+ if rc == 0:
+ memory_facts['memfree_mb'] = int(out.splitlines()[-1].split()[4]) // 1024
+ memory_facts['memtotal_mb'] = int(self.sysctl['hw.usermem']) // 1024 // 1024
+
+ # Get swapctl info. swapctl output looks like:
+ # total: 69268 1K-blocks allocated, 0 used, 69268 available
+ # And for older OpenBSD:
+ # total: 69268k bytes allocated = 0k used, 69268k available
+ rc, out, err = self.module.run_command("/sbin/swapctl -sk")
+ if rc == 0:
+ swaptrans = {ord(u'k'): None,
+ ord(u'm'): None,
+ ord(u'g'): None}
+ data = to_text(out, errors='surrogate_or_strict').split()
+ memory_facts['swapfree_mb'] = int(data[-2].translate(swaptrans)) // 1024
+ memory_facts['swaptotal_mb'] = int(data[1].translate(swaptrans)) // 1024
+
+ return memory_facts
+
+ def get_processor_facts(self):
+ cpu_facts = {}
+ processor = []
+ for i in range(int(self.sysctl['hw.ncpu'])):
+ processor.append(self.sysctl['hw.model'])
+
+ cpu_facts['processor'] = processor
+ # The following is partly a lie because there is no reliable way to
+ # determine the number of physical CPUs in the system. We can only
+ # query the number of logical CPUs, which hides the number of cores.
+ # On amd64/i386 we could try to inspect the smt/core/package lines in
+ # dmesg, however even those have proven to be unreliable.
+ # So take a shortcut and report the logical number of processors in
+ # 'processor_count' and 'processor_cores' and leave it at that.
+ cpu_facts['processor_count'] = self.sysctl['hw.ncpu']
+ cpu_facts['processor_cores'] = self.sysctl['hw.ncpu']
+
+ return cpu_facts
+
+ def get_device_facts(self):
+ device_facts = {}
+ devices = []
+ devices.extend(self.sysctl['hw.disknames'].split(','))
+ device_facts['devices'] = devices
+
+ return device_facts
+
+ def get_dmi_facts(self):
+ dmi_facts = {}
+ # We don't use dmidecode(8) here because:
+ # - it would add dependency on an external package
+ # - dmidecode(8) can only be ran as root
+ # So instead we rely on sysctl(8) to provide us the information on a
+ # best-effort basis. As a bonus we also get facts on non-amd64/i386
+ # platforms this way.
+ sysctl_to_dmi = {
+ 'hw.product': 'product_name',
+ 'hw.version': 'product_version',
+ 'hw.uuid': 'product_uuid',
+ 'hw.serialno': 'product_serial',
+ 'hw.vendor': 'system_vendor',
+ }
+
+ for mib in sysctl_to_dmi:
+ if mib in self.sysctl:
+ dmi_facts[sysctl_to_dmi[mib]] = self.sysctl[mib]
+
+ return dmi_facts
+
+
+class OpenBSDHardwareCollector(HardwareCollector):
+ _fact_class = OpenBSDHardware
+ _platform = 'OpenBSD'
diff --git a/lib/ansible/module_utils/facts/hardware/sunos.py b/lib/ansible/module_utils/facts/hardware/sunos.py
new file mode 100644
index 00000000..90696bee
--- /dev/null
+++ b/lib/ansible/module_utils/facts/hardware/sunos.py
@@ -0,0 +1,287 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import time
+
+from ansible.module_utils.six.moves import reduce
+
+from ansible.module_utils.common.text.formatters import bytes_to_human
+
+from ansible.module_utils.facts.utils import get_file_content, get_mount_size
+
+from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
+from ansible.module_utils.facts import timeout
+
+
+class SunOSHardware(Hardware):
+ """
+ In addition to the generic memory and cpu facts, this also sets
+ swap_reserved_mb and swap_allocated_mb that is available from *swap -s*.
+ """
+ platform = 'SunOS'
+
+ def populate(self, collected_facts=None):
+ hardware_facts = {}
+
+ # FIXME: could pass to run_command(environ_update), but it also tweaks the env
+ # of the parent process instead of altering an env provided to Popen()
+ # Use C locale for hardware collection helpers to avoid locale specific number formatting (#24542)
+ self.module.run_command_environ_update = {'LANG': 'C', 'LC_ALL': 'C', 'LC_NUMERIC': 'C'}
+
+ cpu_facts = self.get_cpu_facts()
+ memory_facts = self.get_memory_facts()
+ dmi_facts = self.get_dmi_facts()
+ device_facts = self.get_device_facts()
+ uptime_facts = self.get_uptime_facts()
+
+ mount_facts = {}
+ try:
+ mount_facts = self.get_mount_facts()
+ except timeout.TimeoutError:
+ pass
+
+ hardware_facts.update(cpu_facts)
+ hardware_facts.update(memory_facts)
+ hardware_facts.update(dmi_facts)
+ hardware_facts.update(device_facts)
+ hardware_facts.update(uptime_facts)
+ hardware_facts.update(mount_facts)
+
+ return hardware_facts
+
+ def get_cpu_facts(self, collected_facts=None):
+ physid = 0
+ sockets = {}
+
+ cpu_facts = {}
+ collected_facts = collected_facts or {}
+
+ rc, out, err = self.module.run_command("/usr/bin/kstat cpu_info")
+
+ cpu_facts['processor'] = []
+
+ for line in out.splitlines():
+ if len(line) < 1:
+ continue
+
+ data = line.split(None, 1)
+ key = data[0].strip()
+
+ # "brand" works on Solaris 10 & 11. "implementation" for Solaris 9.
+ if key == 'module:':
+ brand = ''
+ elif key == 'brand':
+ brand = data[1].strip()
+ elif key == 'clock_MHz':
+ clock_mhz = data[1].strip()
+ elif key == 'implementation':
+ processor = brand or data[1].strip()
+ # Add clock speed to description for SPARC CPU
+ # FIXME
+ if collected_facts.get('ansible_machine') != 'i86pc':
+ processor += " @ " + clock_mhz + "MHz"
+ if 'ansible_processor' not in collected_facts:
+ cpu_facts['processor'] = []
+ cpu_facts['processor'].append(processor)
+ elif key == 'chip_id':
+ physid = data[1].strip()
+ if physid not in sockets:
+ sockets[physid] = 1
+ else:
+ sockets[physid] += 1
+
+ # Counting cores on Solaris can be complicated.
+ # https://blogs.oracle.com/mandalika/entry/solaris_show_me_the_cpu
+ # Treat 'processor_count' as physical sockets and 'processor_cores' as
+ # virtual CPUs visisble to Solaris. Not a true count of cores for modern SPARC as
+ # these processors have: sockets -> cores -> threads/virtual CPU.
+ if len(sockets) > 0:
+ cpu_facts['processor_count'] = len(sockets)
+ cpu_facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
+ else:
+ cpu_facts['processor_cores'] = 'NA'
+ cpu_facts['processor_count'] = len(cpu_facts['processor'])
+
+ return cpu_facts
+
+ def get_memory_facts(self):
+ memory_facts = {}
+
+ rc, out, err = self.module.run_command(["/usr/sbin/prtconf"])
+
+ for line in out.splitlines():
+ if 'Memory size' in line:
+ memory_facts['memtotal_mb'] = int(line.split()[2])
+
+ rc, out, err = self.module.run_command("/usr/sbin/swap -s")
+
+ allocated = int(out.split()[1][:-1])
+ reserved = int(out.split()[5][:-1])
+ used = int(out.split()[8][:-1])
+ free = int(out.split()[10][:-1])
+
+ memory_facts['swapfree_mb'] = free // 1024
+ memory_facts['swaptotal_mb'] = (free + used) // 1024
+ memory_facts['swap_allocated_mb'] = allocated // 1024
+ memory_facts['swap_reserved_mb'] = reserved // 1024
+
+ return memory_facts
+
+ @timeout.timeout()
+ def get_mount_facts(self):
+ mount_facts = {}
+ mount_facts['mounts'] = []
+
+ # For a detailed format description see mnttab(4)
+ # special mount_point fstype options time
+ fstab = get_file_content('/etc/mnttab')
+
+ if fstab:
+ for line in fstab.splitlines():
+ fields = line.split('\t')
+ mount_statvfs_info = get_mount_size(fields[1])
+ mount_info = {'mount': fields[1],
+ 'device': fields[0],
+ 'fstype': fields[2],
+ 'options': fields[3],
+ 'time': fields[4]}
+ mount_info.update(mount_statvfs_info)
+ mount_facts['mounts'].append(mount_info)
+
+ return mount_facts
+
+ def get_dmi_facts(self):
+ dmi_facts = {}
+
+ # On Solaris 8 the prtdiag wrapper is absent from /usr/sbin,
+ # but that's okay, because we know where to find the real thing:
+ rc, platform, err = self.module.run_command('/usr/bin/uname -i')
+ platform_sbin = '/usr/platform/' + platform.rstrip() + '/sbin'
+
+ prtdiag_path = self.module.get_bin_path("prtdiag", opt_dirs=[platform_sbin])
+ rc, out, err = self.module.run_command(prtdiag_path)
+ """
+ rc returns 1
+ """
+ if out:
+ system_conf = out.split('\n')[0]
+
+ # If you know of any other manufacturers whose names appear in
+ # the first line of prtdiag's output, please add them here:
+ vendors = [
+ "Fujitsu",
+ "Oracle Corporation",
+ "QEMU",
+ "Sun Microsystems",
+ "VMware, Inc.",
+ ]
+ vendor_regexp = "|".join(map(re.escape, vendors))
+ system_conf_regexp = (r'System Configuration:\s+'
+ + r'(' + vendor_regexp + r')\s+'
+ + r'(?:sun\w+\s+)?'
+ + r'(.+)')
+
+ found = re.match(system_conf_regexp, system_conf)
+ if found:
+ dmi_facts['system_vendor'] = found.group(1)
+ dmi_facts['product_name'] = found.group(2)
+
+ return dmi_facts
+
+ def get_device_facts(self):
+ # Device facts are derived for sdderr kstats. This code does not use the
+ # full output, but rather queries for specific stats.
+ # Example output:
+ # sderr:0:sd0,err:Hard Errors 0
+ # sderr:0:sd0,err:Illegal Request 6
+ # sderr:0:sd0,err:Media Error 0
+ # sderr:0:sd0,err:Predictive Failure Analysis 0
+ # sderr:0:sd0,err:Product VBOX HARDDISK 9
+ # sderr:0:sd0,err:Revision 1.0
+ # sderr:0:sd0,err:Serial No VB0ad2ec4d-074a
+ # sderr:0:sd0,err:Size 53687091200
+ # sderr:0:sd0,err:Soft Errors 0
+ # sderr:0:sd0,err:Transport Errors 0
+ # sderr:0:sd0,err:Vendor ATA
+
+ device_facts = {}
+ device_facts['devices'] = {}
+
+ disk_stats = {
+ 'Product': 'product',
+ 'Revision': 'revision',
+ 'Serial No': 'serial',
+ 'Size': 'size',
+ 'Vendor': 'vendor',
+ 'Hard Errors': 'hard_errors',
+ 'Soft Errors': 'soft_errors',
+ 'Transport Errors': 'transport_errors',
+ 'Media Error': 'media_errors',
+ 'Predictive Failure Analysis': 'predictive_failure_analysis',
+ 'Illegal Request': 'illegal_request',
+ }
+
+ cmd = ['/usr/bin/kstat', '-p']
+
+ for ds in disk_stats:
+ cmd.append('sderr:::%s' % ds)
+
+ d = {}
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ return device_facts
+
+ sd_instances = frozenset(line.split(':')[1] for line in out.split('\n') if line.startswith('sderr'))
+ for instance in sd_instances:
+ lines = (line for line in out.split('\n') if ':' in line and line.split(':')[1] == instance)
+ for line in lines:
+ text, value = line.split('\t')
+ stat = text.split(':')[3]
+
+ if stat == 'Size':
+ d[disk_stats.get(stat)] = bytes_to_human(float(value))
+ else:
+ d[disk_stats.get(stat)] = value.rstrip()
+
+ diskname = 'sd' + instance
+ device_facts['devices'][diskname] = d
+ d = {}
+
+ return device_facts
+
+ def get_uptime_facts(self):
+ uptime_facts = {}
+ # sample kstat output:
+ # unix:0:system_misc:boot_time 1548249689
+ rc, out, err = self.module.run_command('/usr/bin/kstat -p unix:0:system_misc:boot_time')
+
+ if rc != 0:
+ return
+
+ # uptime = $current_time - $boot_time
+ uptime_facts['uptime_seconds'] = int(time.time() - int(out.split('\t')[1]))
+
+ return uptime_facts
+
+
+class SunOSHardwareCollector(HardwareCollector):
+ _fact_class = SunOSHardware
+ _platform = 'SunOS'
+
+ required_facts = set(['platform'])
diff --git a/lib/ansible/module_utils/facts/namespace.py b/lib/ansible/module_utils/facts/namespace.py
new file mode 100644
index 00000000..2d6bf8a5
--- /dev/null
+++ b/lib/ansible/module_utils/facts/namespace.py
@@ -0,0 +1,51 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# (c) 2017 Red Hat Inc.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class FactNamespace:
+ def __init__(self, namespace_name):
+ self.namespace_name = namespace_name
+
+ def transform(self, name):
+ '''Take a text name, and transforms it as needed (add a namespace prefix, etc)'''
+ return name
+
+ def _underscore(self, name):
+ return name.replace('-', '_')
+
+
+class PrefixFactNamespace(FactNamespace):
+ def __init__(self, namespace_name, prefix=None):
+ super(PrefixFactNamespace, self).__init__(namespace_name)
+ self.prefix = prefix
+
+ def transform(self, name):
+ new_name = self._underscore(name)
+ return '%s%s' % (self.prefix, new_name)
diff --git a/lib/ansible/module_utils/facts/network/__init__.py b/lib/ansible/module_utils/facts/network/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/__init__.py
diff --git a/lib/ansible/module_utils/facts/network/aix.py b/lib/ansible/module_utils/facts/network/aix.py
new file mode 100644
index 00000000..e9c90c64
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/aix.py
@@ -0,0 +1,145 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from ansible.module_utils.facts.network.base import NetworkCollector
+from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork
+
+
+class AIXNetwork(GenericBsdIfconfigNetwork):
+ """
+ This is the AIX Network Class.
+ It uses the GenericBsdIfconfigNetwork unchanged.
+ """
+ platform = 'AIX'
+
+ def get_default_interfaces(self, route_path):
+ interface = dict(v4={}, v6={})
+
+ netstat_path = self.module.get_bin_path('netstat')
+
+ if netstat_path:
+ rc, out, err = self.module.run_command([netstat_path, '-nr'])
+
+ lines = out.splitlines()
+ for line in lines:
+ words = line.split()
+ if len(words) > 1 and words[0] == 'default':
+ if '.' in words[1]:
+ interface['v4']['gateway'] = words[1]
+ interface['v4']['interface'] = words[5]
+ elif ':' in words[1]:
+ interface['v6']['gateway'] = words[1]
+ interface['v6']['interface'] = words[5]
+
+ return interface['v4'], interface['v6']
+
+ # AIX 'ifconfig -a' does not have three words in the interface line
+ def get_interfaces_info(self, ifconfig_path, ifconfig_options='-a'):
+ interfaces = {}
+ current_if = {}
+ ips = dict(
+ all_ipv4_addresses=[],
+ all_ipv6_addresses=[],
+ )
+
+ uname_rc = None
+ uname_out = None
+ uname_err = None
+ uname_path = self.module.get_bin_path('uname')
+ if uname_path:
+ uname_rc, uname_out, uname_err = self.module.run_command([uname_path, '-W'])
+
+ rc, out, err = self.module.run_command([ifconfig_path, ifconfig_options])
+
+ for line in out.splitlines():
+
+ if line:
+ words = line.split()
+
+ # only this condition differs from GenericBsdIfconfigNetwork
+ if re.match(r'^\w*\d*:', line):
+ current_if = self.parse_interface_line(words)
+ interfaces[current_if['device']] = current_if
+ elif words[0].startswith('options='):
+ self.parse_options_line(words, current_if, ips)
+ elif words[0] == 'nd6':
+ self.parse_nd6_line(words, current_if, ips)
+ elif words[0] == 'ether':
+ self.parse_ether_line(words, current_if, ips)
+ elif words[0] == 'media:':
+ self.parse_media_line(words, current_if, ips)
+ elif words[0] == 'status:':
+ self.parse_status_line(words, current_if, ips)
+ elif words[0] == 'lladdr':
+ self.parse_lladdr_line(words, current_if, ips)
+ elif words[0] == 'inet':
+ self.parse_inet_line(words, current_if, ips)
+ elif words[0] == 'inet6':
+ self.parse_inet6_line(words, current_if, ips)
+ else:
+ self.parse_unknown_line(words, current_if, ips)
+
+ # don't bother with wpars it does not work
+ # zero means not in wpar
+ if not uname_rc and uname_out.split()[0] == '0':
+
+ if current_if['macaddress'] == 'unknown' and re.match('^en', current_if['device']):
+ entstat_path = self.module.get_bin_path('entstat')
+ if entstat_path:
+ rc, out, err = self.module.run_command([entstat_path, current_if['device']])
+ if rc != 0:
+ break
+ for line in out.splitlines():
+ if not line:
+ pass
+ buff = re.match('^Hardware Address: (.*)', line)
+ if buff:
+ current_if['macaddress'] = buff.group(1)
+
+ buff = re.match('^Device Type:', line)
+ if buff and re.match('.*Ethernet', line):
+ current_if['type'] = 'ether'
+
+ # device must have mtu attribute in ODM
+ if 'mtu' not in current_if:
+ lsattr_path = self.module.get_bin_path('lsattr')
+ if lsattr_path:
+ rc, out, err = self.module.run_command([lsattr_path, '-El', current_if['device']])
+ if rc != 0:
+ break
+ for line in out.splitlines():
+ if line:
+ words = line.split()
+ if words[0] == 'mtu':
+ current_if['mtu'] = words[1]
+ return interfaces, ips
+
+ # AIX 'ifconfig -a' does not inform about MTU, so remove current_if['mtu'] here
+ def parse_interface_line(self, words):
+ device = words[0][0:-1]
+ current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
+ current_if['flags'] = self.get_options(words[1])
+ current_if['macaddress'] = 'unknown' # will be overwritten later
+ return current_if
+
+
+class AIXNetworkCollector(NetworkCollector):
+ _fact_class = AIXNetwork
+ _platform = 'AIX'
diff --git a/lib/ansible/module_utils/facts/network/base.py b/lib/ansible/module_utils/facts/network/base.py
new file mode 100644
index 00000000..fe38ba82
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/base.py
@@ -0,0 +1,70 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class Network:
+ """
+ This is a generic Network subclass of Facts. This should be further
+ subclassed to implement per platform. If you subclass this,
+ you must define:
+ - interfaces (a list of interface names)
+ - interface_<name> dictionary of ipv4, ipv6, and mac address information.
+
+ All subclasses MUST define platform.
+ """
+ platform = 'Generic'
+
+ # FIXME: remove load_on_init when we can
+ def __init__(self, module, load_on_init=False):
+ self.module = module
+
+ # TODO: more or less abstract/NotImplemented
+ def populate(self, collected_facts=None):
+ return {}
+
+
+class NetworkCollector(BaseFactCollector):
+ # MAYBE: we could try to build this based on the arch specific implementation of Network() or its kin
+ name = 'network'
+ _fact_class = Network
+ _fact_ids = set(['interfaces',
+ 'default_ipv4',
+ 'default_ipv6',
+ 'all_ipv4_addresses',
+ 'all_ipv6_addresses'])
+
+ IPV6_SCOPE = {'0': 'global',
+ '10': 'host',
+ '20': 'link',
+ '40': 'admin',
+ '50': 'site',
+ '80': 'organization'}
+
+ def collect(self, module=None, collected_facts=None):
+ collected_facts = collected_facts or {}
+ if not module:
+ return {}
+
+ # Network munges cached_facts by side effect, so give it a copy
+ facts_obj = self._fact_class(module)
+
+ facts_dict = facts_obj.populate(collected_facts=collected_facts)
+
+ return facts_dict
diff --git a/lib/ansible/module_utils/facts/network/darwin.py b/lib/ansible/module_utils/facts/network/darwin.py
new file mode 100644
index 00000000..90117e53
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/darwin.py
@@ -0,0 +1,49 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.facts.network.base import NetworkCollector
+from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork
+
+
+class DarwinNetwork(GenericBsdIfconfigNetwork):
+ """
+ This is the Mac macOS Darwin Network Class.
+ It uses the GenericBsdIfconfigNetwork unchanged
+ """
+ platform = 'Darwin'
+
+ # media line is different to the default FreeBSD one
+ def parse_media_line(self, words, current_if, ips):
+ # not sure if this is useful - we also drop information
+ current_if['media'] = 'Unknown' # Mac does not give us this
+ current_if['media_select'] = words[1]
+ if len(words) > 2:
+ # MacOSX sets the media to '<unknown type>' for bridge interface
+ # and parsing splits this into two words; this if/else helps
+ if words[1] == '<unknown' and words[2] == 'type>':
+ current_if['media_select'] = 'Unknown'
+ current_if['media_type'] = 'unknown type'
+ else:
+ current_if['media_type'] = words[2][1:-1]
+ if len(words) > 3:
+ current_if['media_options'] = self.get_options(words[3])
+
+
+class DarwinNetworkCollector(NetworkCollector):
+ _fact_class = DarwinNetwork
+ _platform = 'Darwin'
diff --git a/lib/ansible/module_utils/facts/network/dragonfly.py b/lib/ansible/module_utils/facts/network/dragonfly.py
new file mode 100644
index 00000000..e43bbb28
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/dragonfly.py
@@ -0,0 +1,33 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.facts.network.base import NetworkCollector
+from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork
+
+
+class DragonFlyNetwork(GenericBsdIfconfigNetwork):
+ """
+ This is the DragonFly Network Class.
+ It uses the GenericBsdIfconfigNetwork unchanged.
+ """
+ platform = 'DragonFly'
+
+
+class DragonFlyNetworkCollector(NetworkCollector):
+ _fact_class = DragonFlyNetwork
+ _platform = 'DragonFly'
diff --git a/lib/ansible/module_utils/facts/network/fc_wwn.py b/lib/ansible/module_utils/facts/network/fc_wwn.py
new file mode 100644
index 00000000..9d9bfc5b
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/fc_wwn.py
@@ -0,0 +1,83 @@
+# Fibre Channel WWN initiator related facts collection for ansible.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+import glob
+
+from ansible.module_utils.facts.utils import get_file_lines
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class FcWwnInitiatorFactCollector(BaseFactCollector):
+ name = 'fibre_channel_wwn'
+ _fact_ids = set()
+
+ def collect(self, module=None, collected_facts=None):
+ """
+ Example contents /sys/class/fc_host/*/port_name:
+
+ 0x21000014ff52a9bb
+
+ """
+
+ fc_facts = {}
+ fc_facts['fibre_channel_wwn'] = []
+ if sys.platform.startswith('linux'):
+ for fcfile in glob.glob('/sys/class/fc_host/*/port_name'):
+ for line in get_file_lines(fcfile):
+ fc_facts['fibre_channel_wwn'].append(line.rstrip()[2:])
+ elif sys.platform.startswith('sunos'):
+ """
+ on solaris 10 or solaris 11 should use `fcinfo hba-port`
+ TBD (not implemented): on solaris 9 use `prtconf -pv`
+ """
+ cmd = module.get_bin_path('fcinfo')
+ cmd = cmd + " hba-port"
+ rc, fcinfo_out, err = module.run_command(cmd)
+ """
+ # fcinfo hba-port | grep "Port WWN"
+ HBA Port WWN: 10000090fa1658de
+ """
+ if fcinfo_out:
+ for line in fcinfo_out.splitlines():
+ if 'Port WWN' in line:
+ data = line.split(' ')
+ fc_facts['fibre_channel_wwn'].append(data[-1].rstrip())
+ elif sys.platform.startswith('aix'):
+ # get list of available fibre-channel devices (fcs)
+ cmd = module.get_bin_path('lsdev')
+ cmd = cmd + " -Cc adapter -l fcs*"
+ rc, lsdev_out, err = module.run_command(cmd)
+ if lsdev_out:
+ lscfg_cmd = module.get_bin_path('lscfg')
+ for line in lsdev_out.splitlines():
+ # if device is available (not in defined state), get its WWN
+ if 'Available' in line:
+ data = line.split(' ')
+ cmd = lscfg_cmd + " -vl %s" % data[0]
+ rc, lscfg_out, err = module.run_command(cmd)
+ # example output
+ # lscfg -vpl fcs3 | grep "Network Address"
+ # Network Address.............10000090FA551509
+ for line in lscfg_out.splitlines():
+ if 'Network Address' in line:
+ data = line.split('.')
+ fc_facts['fibre_channel_wwn'].append(data[-1].rstrip())
+ return fc_facts
diff --git a/lib/ansible/module_utils/facts/network/freebsd.py b/lib/ansible/module_utils/facts/network/freebsd.py
new file mode 100644
index 00000000..36f6eec7
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/freebsd.py
@@ -0,0 +1,33 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.facts.network.base import NetworkCollector
+from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork
+
+
+class FreeBSDNetwork(GenericBsdIfconfigNetwork):
+ """
+ This is the FreeBSD Network Class.
+ It uses the GenericBsdIfconfigNetwork unchanged.
+ """
+ platform = 'FreeBSD'
+
+
+class FreeBSDNetworkCollector(NetworkCollector):
+ _fact_class = FreeBSDNetwork
+ _platform = 'FreeBSD'
diff --git a/lib/ansible/module_utils/facts/network/generic_bsd.py b/lib/ansible/module_utils/facts/network/generic_bsd.py
new file mode 100644
index 00000000..8f4d145f
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/generic_bsd.py
@@ -0,0 +1,310 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import socket
+import struct
+
+from ansible.module_utils.facts.network.base import Network
+
+
+class GenericBsdIfconfigNetwork(Network):
+ """
+ This is a generic BSD subclass of Network using the ifconfig command.
+ It defines
+ - interfaces (a list of interface names)
+ - interface_<name> dictionary of ipv4, ipv6, and mac address information.
+ - all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
+ """
+ platform = 'Generic_BSD_Ifconfig'
+
+ def populate(self, collected_facts=None):
+ network_facts = {}
+ ifconfig_path = self.module.get_bin_path('ifconfig')
+
+ if ifconfig_path is None:
+ return network_facts
+
+ route_path = self.module.get_bin_path('route')
+
+ if route_path is None:
+ return network_facts
+
+ default_ipv4, default_ipv6 = self.get_default_interfaces(route_path)
+ interfaces, ips = self.get_interfaces_info(ifconfig_path)
+ interfaces = self.detect_type_media(interfaces)
+
+ self.merge_default_interface(default_ipv4, interfaces, 'ipv4')
+ self.merge_default_interface(default_ipv6, interfaces, 'ipv6')
+ network_facts['interfaces'] = sorted(list(interfaces.keys()))
+
+ for iface in interfaces:
+ network_facts[iface] = interfaces[iface]
+
+ network_facts['default_ipv4'] = default_ipv4
+ network_facts['default_ipv6'] = default_ipv6
+ network_facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
+ network_facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
+
+ return network_facts
+
+ def detect_type_media(self, interfaces):
+ for iface in interfaces:
+ if 'media' in interfaces[iface]:
+ if 'ether' in interfaces[iface]['media'].lower():
+ interfaces[iface]['type'] = 'ether'
+ return interfaces
+
+ def get_default_interfaces(self, route_path):
+
+ # Use the commands:
+ # route -n get default
+ # route -n get -inet6 default
+ # to find out the default outgoing interface, address, and gateway
+
+ command = dict(v4=[route_path, '-n', 'get', 'default'],
+ v6=[route_path, '-n', 'get', '-inet6', 'default'])
+
+ interface = dict(v4={}, v6={})
+
+ for v in 'v4', 'v6':
+
+ if v == 'v6' and not socket.has_ipv6:
+ continue
+ rc, out, err = self.module.run_command(command[v])
+ if not out:
+ # v6 routing may result in
+ # RTNETLINK answers: Invalid argument
+ continue
+ for line in out.splitlines():
+ words = line.strip().split(': ')
+ # Collect output from route command
+ if len(words) > 1:
+ if words[0] == 'interface':
+ interface[v]['interface'] = words[1]
+ if words[0] == 'gateway':
+ interface[v]['gateway'] = words[1]
+ # help pick the right interface address on OpenBSD
+ if words[0] == 'if address':
+ interface[v]['address'] = words[1]
+ # help pick the right interface address on NetBSD
+ if words[0] == 'local addr':
+ interface[v]['address'] = words[1]
+
+ return interface['v4'], interface['v6']
+
+ def get_interfaces_info(self, ifconfig_path, ifconfig_options='-a'):
+ interfaces = {}
+ current_if = {}
+ ips = dict(
+ all_ipv4_addresses=[],
+ all_ipv6_addresses=[],
+ )
+ # FreeBSD, DragonflyBSD, NetBSD, OpenBSD and macOS all implicitly add '-a'
+ # when running the command 'ifconfig'.
+ # Solaris must explicitly run the command 'ifconfig -a'.
+ rc, out, err = self.module.run_command([ifconfig_path, ifconfig_options])
+
+ for line in out.splitlines():
+
+ if line:
+ words = line.split()
+
+ if words[0] == 'pass':
+ continue
+ elif re.match(r'^\S', line) and len(words) > 3:
+ current_if = self.parse_interface_line(words)
+ interfaces[current_if['device']] = current_if
+ elif words[0].startswith('options='):
+ self.parse_options_line(words, current_if, ips)
+ elif words[0] == 'nd6':
+ self.parse_nd6_line(words, current_if, ips)
+ elif words[0] == 'ether':
+ self.parse_ether_line(words, current_if, ips)
+ elif words[0] == 'media:':
+ self.parse_media_line(words, current_if, ips)
+ elif words[0] == 'status:':
+ self.parse_status_line(words, current_if, ips)
+ elif words[0] == 'lladdr':
+ self.parse_lladdr_line(words, current_if, ips)
+ elif words[0] == 'inet':
+ self.parse_inet_line(words, current_if, ips)
+ elif words[0] == 'inet6':
+ self.parse_inet6_line(words, current_if, ips)
+ elif words[0] == 'tunnel':
+ self.parse_tunnel_line(words, current_if, ips)
+ else:
+ self.parse_unknown_line(words, current_if, ips)
+
+ return interfaces, ips
+
+ def parse_interface_line(self, words):
+ device = words[0][0:-1]
+ current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
+ current_if['flags'] = self.get_options(words[1])
+ if 'LOOPBACK' in current_if['flags']:
+ current_if['type'] = 'loopback'
+ current_if['macaddress'] = 'unknown' # will be overwritten later
+
+ if len(words) >= 5: # Newer FreeBSD versions
+ current_if['metric'] = words[3]
+ current_if['mtu'] = words[5]
+ else:
+ current_if['mtu'] = words[3]
+
+ return current_if
+
+ def parse_options_line(self, words, current_if, ips):
+ # Mac has options like this...
+ current_if['options'] = self.get_options(words[0])
+
+ def parse_nd6_line(self, words, current_if, ips):
+ # FreeBSD has options like this...
+ current_if['options'] = self.get_options(words[1])
+
+ def parse_ether_line(self, words, current_if, ips):
+ current_if['macaddress'] = words[1]
+ current_if['type'] = 'ether'
+
+ def parse_media_line(self, words, current_if, ips):
+ # not sure if this is useful - we also drop information
+ current_if['media'] = words[1]
+ if len(words) > 2:
+ current_if['media_select'] = words[2]
+ if len(words) > 3:
+ current_if['media_type'] = words[3][1:]
+ if len(words) > 4:
+ current_if['media_options'] = self.get_options(words[4])
+
+ def parse_status_line(self, words, current_if, ips):
+ current_if['status'] = words[1]
+
+ def parse_lladdr_line(self, words, current_if, ips):
+ current_if['lladdr'] = words[1]
+
+ def parse_inet_line(self, words, current_if, ips):
+ # netbsd show aliases like this
+ # lo0: flags=8049<UP,LOOPBACK,RUNNING,MULTICAST> mtu 33184
+ # inet 127.0.0.1 netmask 0xff000000
+ # inet alias 127.1.1.1 netmask 0xff000000
+ if words[1] == 'alias':
+ del words[1]
+
+ address = {'address': words[1]}
+ # cidr style ip address (eg, 127.0.0.1/24) in inet line
+ # used in netbsd ifconfig -e output after 7.1
+ if '/' in address['address']:
+ ip_address, cidr_mask = address['address'].split('/')
+
+ address['address'] = ip_address
+
+ netmask_length = int(cidr_mask)
+ netmask_bin = (1 << 32) - (1 << 32 >> int(netmask_length))
+ address['netmask'] = socket.inet_ntoa(struct.pack('!L', netmask_bin))
+
+ if len(words) > 5:
+ address['broadcast'] = words[3]
+
+ else:
+ # deal with hex netmask
+ if re.match('([0-9a-f]){8}', words[3]) and len(words[3]) == 8:
+ words[3] = '0x' + words[3]
+ if words[3].startswith('0x'):
+ address['netmask'] = socket.inet_ntoa(struct.pack('!L', int(words[3], base=16)))
+ else:
+ # otherwise assume this is a dotted quad
+ address['netmask'] = words[3]
+ # calculate the network
+ address_bin = struct.unpack('!L', socket.inet_aton(address['address']))[0]
+ netmask_bin = struct.unpack('!L', socket.inet_aton(address['netmask']))[0]
+ address['network'] = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
+ if 'broadcast' not in address:
+ # broadcast may be given or we need to calculate
+ if len(words) > 5:
+ address['broadcast'] = words[5]
+ else:
+ address['broadcast'] = socket.inet_ntoa(struct.pack('!L', address_bin | (~netmask_bin & 0xffffffff)))
+
+ # add to our list of addresses
+ if not words[1].startswith('127.'):
+ ips['all_ipv4_addresses'].append(address['address'])
+ current_if['ipv4'].append(address)
+
+ def parse_inet6_line(self, words, current_if, ips):
+ address = {'address': words[1]}
+
+ # using cidr style addresses, ala NetBSD ifconfig post 7.1
+ if '/' in address['address']:
+ ip_address, cidr_mask = address['address'].split('/')
+
+ address['address'] = ip_address
+ address['prefix'] = cidr_mask
+
+ if len(words) > 5:
+ address['scope'] = words[5]
+ else:
+ if (len(words) >= 4) and (words[2] == 'prefixlen'):
+ address['prefix'] = words[3]
+ if (len(words) >= 6) and (words[4] == 'scopeid'):
+ address['scope'] = words[5]
+
+ localhost6 = ['::1', '::1/128', 'fe80::1%lo0']
+ if address['address'] not in localhost6:
+ ips['all_ipv6_addresses'].append(address['address'])
+ current_if['ipv6'].append(address)
+
+ def parse_tunnel_line(self, words, current_if, ips):
+ current_if['type'] = 'tunnel'
+
+ def parse_unknown_line(self, words, current_if, ips):
+ # we are going to ignore unknown lines here - this may be
+ # a bad idea - but you can override it in your subclass
+ pass
+
+ # TODO: these are module scope static function candidates
+ # (most of the class is really...)
+ def get_options(self, option_string):
+ start = option_string.find('<') + 1
+ end = option_string.rfind('>')
+ if (start > 0) and (end > 0) and (end > start + 1):
+ option_csv = option_string[start:end]
+ return option_csv.split(',')
+ else:
+ return []
+
+ def merge_default_interface(self, defaults, interfaces, ip_type):
+ if 'interface' not in defaults:
+ return
+ if not defaults['interface'] in interfaces:
+ return
+ ifinfo = interfaces[defaults['interface']]
+ # copy all the interface values across except addresses
+ for item in ifinfo:
+ if item != 'ipv4' and item != 'ipv6':
+ defaults[item] = ifinfo[item]
+
+ ipinfo = []
+ if 'address' in defaults:
+ ipinfo = [x for x in ifinfo[ip_type] if x['address'] == defaults['address']]
+
+ if len(ipinfo) == 0:
+ ipinfo = ifinfo[ip_type]
+
+ if len(ipinfo) > 0:
+ for item in ipinfo[0]:
+ defaults[item] = ipinfo[0][item]
diff --git a/lib/ansible/module_utils/facts/network/hpux.py b/lib/ansible/module_utils/facts/network/hpux.py
new file mode 100644
index 00000000..6e87ee92
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/hpux.py
@@ -0,0 +1,82 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.facts.network.base import Network, NetworkCollector
+
+
+class HPUXNetwork(Network):
+ """
+ HP-UX-specifig subclass of Network. Defines networking facts:
+ - default_interface
+ - interfaces (a list of interface names)
+ - interface_<name> dictionary of ipv4 address information.
+ """
+ platform = 'HP-UX'
+
+ def populate(self, collected_facts=None):
+ network_facts = {}
+ netstat_path = self.module.get_bin_path('netstat')
+
+ if netstat_path is None:
+ return network_facts
+
+ default_interfaces_facts = self.get_default_interfaces()
+ network_facts.update(default_interfaces_facts)
+
+ interfaces = self.get_interfaces_info()
+ network_facts['interfaces'] = interfaces.keys()
+ for iface in interfaces:
+ network_facts[iface] = interfaces[iface]
+
+ return network_facts
+
+ def get_default_interfaces(self):
+ default_interfaces = {}
+ rc, out, err = self.module.run_command("/usr/bin/netstat -nr")
+ lines = out.splitlines()
+ for line in lines:
+ words = line.split()
+ if len(words) > 1:
+ if words[0] == 'default':
+ default_interfaces['default_interface'] = words[4]
+ default_interfaces['default_gateway'] = words[1]
+
+ return default_interfaces
+
+ def get_interfaces_info(self):
+ interfaces = {}
+ rc, out, err = self.module.run_command("/usr/bin/netstat -ni")
+ lines = out.splitlines()
+ for line in lines:
+ words = line.split()
+ for i in range(len(words) - 1):
+ if words[i][:3] == 'lan':
+ device = words[i]
+ interfaces[device] = {'device': device}
+ address = words[i + 3]
+ interfaces[device]['ipv4'] = {'address': address}
+ network = words[i + 2]
+ interfaces[device]['ipv4'] = {'network': network,
+ 'interface': device,
+ 'address': address}
+ return interfaces
+
+
+class HPUXNetworkCollector(NetworkCollector):
+ _fact_class = HPUXNetwork
+ _platform = 'HP-UX'
diff --git a/lib/ansible/module_utils/facts/network/hurd.py b/lib/ansible/module_utils/facts/network/hurd.py
new file mode 100644
index 00000000..518df390
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/hurd.py
@@ -0,0 +1,87 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.module_utils.facts.network.base import Network, NetworkCollector
+
+
+class HurdPfinetNetwork(Network):
+ """
+ This is a GNU Hurd specific subclass of Network. It use fsysopts to
+ get the ip address and support only pfinet.
+ """
+ platform = 'GNU'
+ _socket_dir = '/servers/socket/'
+
+ def assign_network_facts(self, network_facts, fsysopts_path, socket_path):
+ rc, out, err = self.module.run_command([fsysopts_path, '-L', socket_path])
+ # FIXME: build up a interfaces datastructure, then assign into network_facts
+ network_facts['interfaces'] = []
+ for i in out.split():
+ if '=' in i and i.startswith('--'):
+ k, v = i.split('=', 1)
+ # remove '--'
+ k = k[2:]
+ if k == 'interface':
+ # remove /dev/ from /dev/eth0
+ v = v[5:]
+ network_facts['interfaces'].append(v)
+ network_facts[v] = {
+ 'active': True,
+ 'device': v,
+ 'ipv4': {},
+ 'ipv6': [],
+ }
+ current_if = v
+ elif k == 'address':
+ network_facts[current_if]['ipv4']['address'] = v
+ elif k == 'netmask':
+ network_facts[current_if]['ipv4']['netmask'] = v
+ elif k == 'address6':
+ address, prefix = v.split('/')
+ network_facts[current_if]['ipv6'].append({
+ 'address': address,
+ 'prefix': prefix,
+ })
+ return network_facts
+
+ def populate(self, collected_facts=None):
+ network_facts = {}
+
+ fsysopts_path = self.module.get_bin_path('fsysopts')
+ if fsysopts_path is None:
+ return network_facts
+
+ socket_path = None
+
+ for l in ('inet', 'inet6'):
+ link = os.path.join(self._socket_dir, l)
+ if os.path.exists(link):
+ socket_path = link
+ break
+
+ if socket_path is None:
+ return network_facts
+
+ return self.assign_network_facts(network_facts, fsysopts_path, socket_path)
+
+
+class HurdNetworkCollector(NetworkCollector):
+ _platform = 'GNU'
+ _fact_class = HurdPfinetNetwork
diff --git a/lib/ansible/module_utils/facts/network/iscsi.py b/lib/ansible/module_utils/facts/network/iscsi.py
new file mode 100644
index 00000000..33b2737b
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/iscsi.py
@@ -0,0 +1,113 @@
+# iSCSI initiator related facts collection for Ansible.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+import subprocess
+
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.facts.utils import get_file_content
+from ansible.module_utils.facts.network.base import NetworkCollector
+
+
+class IscsiInitiatorNetworkCollector(NetworkCollector):
+ name = 'iscsi'
+ _fact_ids = set()
+
+ def collect(self, module=None, collected_facts=None):
+ """
+ Example of contents of /etc/iscsi/initiatorname.iscsi:
+
+ ## DO NOT EDIT OR REMOVE THIS FILE!
+ ## If you remove this file, the iSCSI daemon will not start.
+ ## If you change the InitiatorName, existing access control lists
+ ## may reject this initiator. The InitiatorName must be unique
+ ## for each iSCSI initiator. Do NOT duplicate iSCSI InitiatorNames.
+ InitiatorName=iqn.1993-08.org.debian:01:44a42c8ddb8b
+
+ Example of output from the AIX lsattr command:
+
+ # lsattr -E -l iscsi0
+ disc_filename /etc/iscsi/targets Configuration file False
+ disc_policy file Discovery Policy True
+ initiator_name iqn.localhost.hostid.7f000002 iSCSI Initiator Name True
+ isns_srvnames auto iSNS Servers IP Addresses True
+ isns_srvports iSNS Servers Port Numbers True
+ max_targets 16 Maximum Targets Allowed True
+ num_cmd_elems 200 Maximum number of commands to queue to driver True
+
+ Example of output from the HP-UX iscsiutil command:
+
+ #iscsiutil -l
+ Initiator Name : iqn.1986-03.com.hp:mcel_VMhost3.1f355cf6-e2db-11e0-a999-b44c0aef5537
+ Initiator Alias :
+
+ Authentication Method : None
+ CHAP Method : CHAP_UNI
+ Initiator CHAP Name :
+ CHAP Secret :
+ NAS Hostname :
+ NAS Secret :
+ Radius Server Hostname :
+ Header Digest : None, CRC32C (default)
+ Data Digest : None, CRC32C (default)
+ SLP Scope list for iSLPD :
+ """
+
+ iscsi_facts = {}
+ iscsi_facts['iscsi_iqn'] = ""
+ if sys.platform.startswith('linux') or sys.platform.startswith('sunos'):
+ for line in get_file_content('/etc/iscsi/initiatorname.iscsi', '').splitlines():
+ if line.startswith('#') or line.startswith(';') or line.strip() == '':
+ continue
+ if line.startswith('InitiatorName='):
+ iscsi_facts['iscsi_iqn'] = line.split('=', 1)[1]
+ break
+ elif sys.platform.startswith('aix'):
+ try:
+ cmd = get_bin_path('lsattr')
+ except ValueError:
+ return iscsi_facts
+
+ cmd += " -E -l iscsi0"
+ rc, out, err = module.run_command(cmd)
+ if rc == 0 and out:
+ line = self.findstr(out, 'initiator_name')
+ iscsi_facts['iscsi_iqn'] = line.split()[1].rstrip()
+
+ elif sys.platform.startswith('hp-ux'):
+ # try to find it in the default PATH and opt_dirs
+ try:
+ cmd = get_bin_path('iscsiutil', opt_dirs=['/opt/iscsi/bin'])
+ except ValueError:
+ return iscsi_facts
+
+ cmd += " -l"
+ rc, out, err = module.run_command(cmd)
+ if out:
+ line = self.findstr(out, 'Initiator Name')
+ iscsi_facts['iscsi_iqn'] = line.split(":", 1)[1].rstrip()
+
+ return iscsi_facts
+
+ def findstr(self, text, match):
+ for line in text.splitlines():
+ if match in line:
+ found = line
+ return found
diff --git a/lib/ansible/module_utils/facts/network/linux.py b/lib/ansible/module_utils/facts/network/linux.py
new file mode 100644
index 00000000..93965f2d
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/linux.py
@@ -0,0 +1,322 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import glob
+import os
+import re
+import socket
+import struct
+
+from ansible.module_utils.facts.network.base import Network, NetworkCollector
+
+from ansible.module_utils.facts.utils import get_file_content
+
+
+class LinuxNetwork(Network):
+ """
+ This is a Linux-specific subclass of Network. It defines
+ - interfaces (a list of interface names)
+ - interface_<name> dictionary of ipv4, ipv6, and mac address information.
+ - all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
+ - ipv4_address and ipv6_address: the first non-local address for each family.
+ """
+ platform = 'Linux'
+ INTERFACE_TYPE = {
+ '1': 'ether',
+ '32': 'infiniband',
+ '512': 'ppp',
+ '772': 'loopback',
+ '65534': 'tunnel',
+ }
+
+ def populate(self, collected_facts=None):
+ network_facts = {}
+ ip_path = self.module.get_bin_path('ip')
+ if ip_path is None:
+ return network_facts
+ default_ipv4, default_ipv6 = self.get_default_interfaces(ip_path,
+ collected_facts=collected_facts)
+ interfaces, ips = self.get_interfaces_info(ip_path, default_ipv4, default_ipv6)
+ network_facts['interfaces'] = interfaces.keys()
+ for iface in interfaces:
+ network_facts[iface] = interfaces[iface]
+ network_facts['default_ipv4'] = default_ipv4
+ network_facts['default_ipv6'] = default_ipv6
+ network_facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
+ network_facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
+ return network_facts
+
+ def get_default_interfaces(self, ip_path, collected_facts=None):
+ collected_facts = collected_facts or {}
+ # Use the commands:
+ # ip -4 route get 8.8.8.8 -> Google public DNS
+ # ip -6 route get 2404:6800:400a:800::1012 -> ipv6.google.com
+ # to find out the default outgoing interface, address, and gateway
+ command = dict(
+ v4=[ip_path, '-4', 'route', 'get', '8.8.8.8'],
+ v6=[ip_path, '-6', 'route', 'get', '2404:6800:400a:800::1012']
+ )
+ interface = dict(v4={}, v6={})
+
+ for v in 'v4', 'v6':
+ if (v == 'v6' and collected_facts.get('ansible_os_family') == 'RedHat' and
+ collected_facts.get('ansible_distribution_version', '').startswith('4.')):
+ continue
+ if v == 'v6' and not socket.has_ipv6:
+ continue
+ rc, out, err = self.module.run_command(command[v], errors='surrogate_then_replace')
+ if not out:
+ # v6 routing may result in
+ # RTNETLINK answers: Invalid argument
+ continue
+ words = out.splitlines()[0].split()
+ # A valid output starts with the queried address on the first line
+ if len(words) > 0 and words[0] == command[v][-1]:
+ for i in range(len(words) - 1):
+ if words[i] == 'dev':
+ interface[v]['interface'] = words[i + 1]
+ elif words[i] == 'src':
+ interface[v]['address'] = words[i + 1]
+ elif words[i] == 'via' and words[i + 1] != command[v][-1]:
+ interface[v]['gateway'] = words[i + 1]
+ return interface['v4'], interface['v6']
+
+ def get_interfaces_info(self, ip_path, default_ipv4, default_ipv6):
+ interfaces = {}
+ ips = dict(
+ all_ipv4_addresses=[],
+ all_ipv6_addresses=[],
+ )
+
+ # FIXME: maybe split into smaller methods?
+ # FIXME: this is pretty much a constructor
+
+ for path in glob.glob('/sys/class/net/*'):
+ if not os.path.isdir(path):
+ continue
+ device = os.path.basename(path)
+ interfaces[device] = {'device': device}
+ if os.path.exists(os.path.join(path, 'address')):
+ macaddress = get_file_content(os.path.join(path, 'address'), default='')
+ if macaddress and macaddress != '00:00:00:00:00:00':
+ interfaces[device]['macaddress'] = macaddress
+ if os.path.exists(os.path.join(path, 'mtu')):
+ interfaces[device]['mtu'] = int(get_file_content(os.path.join(path, 'mtu')))
+ if os.path.exists(os.path.join(path, 'operstate')):
+ interfaces[device]['active'] = get_file_content(os.path.join(path, 'operstate')) != 'down'
+ if os.path.exists(os.path.join(path, 'device', 'driver', 'module')):
+ interfaces[device]['module'] = os.path.basename(os.path.realpath(os.path.join(path, 'device', 'driver', 'module')))
+ if os.path.exists(os.path.join(path, 'type')):
+ _type = get_file_content(os.path.join(path, 'type'))
+ interfaces[device]['type'] = self.INTERFACE_TYPE.get(_type, 'unknown')
+ if os.path.exists(os.path.join(path, 'bridge')):
+ interfaces[device]['type'] = 'bridge'
+ interfaces[device]['interfaces'] = [os.path.basename(b) for b in glob.glob(os.path.join(path, 'brif', '*'))]
+ if os.path.exists(os.path.join(path, 'bridge', 'bridge_id')):
+ interfaces[device]['id'] = get_file_content(os.path.join(path, 'bridge', 'bridge_id'), default='')
+ if os.path.exists(os.path.join(path, 'bridge', 'stp_state')):
+ interfaces[device]['stp'] = get_file_content(os.path.join(path, 'bridge', 'stp_state')) == '1'
+ if os.path.exists(os.path.join(path, 'bonding')):
+ interfaces[device]['type'] = 'bonding'
+ interfaces[device]['slaves'] = get_file_content(os.path.join(path, 'bonding', 'slaves'), default='').split()
+ interfaces[device]['mode'] = get_file_content(os.path.join(path, 'bonding', 'mode'), default='').split()[0]
+ interfaces[device]['miimon'] = get_file_content(os.path.join(path, 'bonding', 'miimon'), default='').split()[0]
+ interfaces[device]['lacp_rate'] = get_file_content(os.path.join(path, 'bonding', 'lacp_rate'), default='').split()[0]
+ primary = get_file_content(os.path.join(path, 'bonding', 'primary'))
+ if primary:
+ interfaces[device]['primary'] = primary
+ path = os.path.join(path, 'bonding', 'all_slaves_active')
+ if os.path.exists(path):
+ interfaces[device]['all_slaves_active'] = get_file_content(path) == '1'
+ if os.path.exists(os.path.join(path, 'bonding_slave')):
+ interfaces[device]['perm_macaddress'] = get_file_content(os.path.join(path, 'bonding_slave', 'perm_hwaddr'), default='')
+ if os.path.exists(os.path.join(path, 'device')):
+ interfaces[device]['pciid'] = os.path.basename(os.readlink(os.path.join(path, 'device')))
+ if os.path.exists(os.path.join(path, 'speed')):
+ speed = get_file_content(os.path.join(path, 'speed'))
+ if speed is not None:
+ interfaces[device]['speed'] = int(speed)
+
+ # Check whether an interface is in promiscuous mode
+ if os.path.exists(os.path.join(path, 'flags')):
+ promisc_mode = False
+ # The second byte indicates whether the interface is in promiscuous mode.
+ # 1 = promisc
+ # 0 = no promisc
+ data = int(get_file_content(os.path.join(path, 'flags')), 16)
+ promisc_mode = (data & 0x0100 > 0)
+ interfaces[device]['promisc'] = promisc_mode
+
+ # TODO: determine if this needs to be in a nested scope/closure
+ def parse_ip_output(output, secondary=False):
+ for line in output.splitlines():
+ if not line:
+ continue
+ words = line.split()
+ broadcast = ''
+ if words[0] == 'inet':
+ if '/' in words[1]:
+ address, netmask_length = words[1].split('/')
+ if len(words) > 3:
+ if words[2] == 'brd':
+ broadcast = words[3]
+ else:
+ # pointopoint interfaces do not have a prefix
+ address = words[1]
+ netmask_length = "32"
+ address_bin = struct.unpack('!L', socket.inet_aton(address))[0]
+ netmask_bin = (1 << 32) - (1 << 32 >> int(netmask_length))
+ netmask = socket.inet_ntoa(struct.pack('!L', netmask_bin))
+ network = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
+ iface = words[-1]
+ # NOTE: device is ref to outside scope
+ # NOTE: interfaces is also ref to outside scope
+ if iface != device:
+ interfaces[iface] = {}
+ if not secondary and "ipv4" not in interfaces[iface]:
+ interfaces[iface]['ipv4'] = {'address': address,
+ 'broadcast': broadcast,
+ 'netmask': netmask,
+ 'network': network}
+ else:
+ if "ipv4_secondaries" not in interfaces[iface]:
+ interfaces[iface]["ipv4_secondaries"] = []
+ interfaces[iface]["ipv4_secondaries"].append({
+ 'address': address,
+ 'broadcast': broadcast,
+ 'netmask': netmask,
+ 'network': network,
+ })
+
+ # add this secondary IP to the main device
+ if secondary:
+ if "ipv4_secondaries" not in interfaces[device]:
+ interfaces[device]["ipv4_secondaries"] = []
+ if device != iface:
+ interfaces[device]["ipv4_secondaries"].append({
+ 'address': address,
+ 'broadcast': broadcast,
+ 'netmask': netmask,
+ 'network': network,
+ })
+
+ # NOTE: default_ipv4 is ref to outside scope
+ # If this is the default address, update default_ipv4
+ if 'address' in default_ipv4 and default_ipv4['address'] == address:
+ default_ipv4['broadcast'] = broadcast
+ default_ipv4['netmask'] = netmask
+ default_ipv4['network'] = network
+ # NOTE: macaddress is ref from outside scope
+ default_ipv4['macaddress'] = macaddress
+ default_ipv4['mtu'] = interfaces[device]['mtu']
+ default_ipv4['type'] = interfaces[device].get("type", "unknown")
+ default_ipv4['alias'] = words[-1]
+ if not address.startswith('127.'):
+ ips['all_ipv4_addresses'].append(address)
+ elif words[0] == 'inet6':
+ if 'peer' == words[2]:
+ address = words[1]
+ _, prefix = words[3].split('/')
+ scope = words[5]
+ else:
+ address, prefix = words[1].split('/')
+ scope = words[3]
+ if 'ipv6' not in interfaces[device]:
+ interfaces[device]['ipv6'] = []
+ interfaces[device]['ipv6'].append({
+ 'address': address,
+ 'prefix': prefix,
+ 'scope': scope
+ })
+ # If this is the default address, update default_ipv6
+ if 'address' in default_ipv6 and default_ipv6['address'] == address:
+ default_ipv6['prefix'] = prefix
+ default_ipv6['scope'] = scope
+ default_ipv6['macaddress'] = macaddress
+ default_ipv6['mtu'] = interfaces[device]['mtu']
+ default_ipv6['type'] = interfaces[device].get("type", "unknown")
+ if not address == '::1':
+ ips['all_ipv6_addresses'].append(address)
+
+ ip_path = self.module.get_bin_path("ip")
+
+ args = [ip_path, 'addr', 'show', 'primary', device]
+ rc, primary_data, stderr = self.module.run_command(args, errors='surrogate_then_replace')
+ if rc == 0:
+ parse_ip_output(primary_data)
+ else:
+ # possibly busybox, fallback to running without the "primary" arg
+ # https://github.com/ansible/ansible/issues/50871
+ args = [ip_path, 'addr', 'show', device]
+ rc, data, stderr = self.module.run_command(args, errors='surrogate_then_replace')
+ if rc == 0:
+ parse_ip_output(data)
+
+ args = [ip_path, 'addr', 'show', 'secondary', device]
+ rc, secondary_data, stderr = self.module.run_command(args, errors='surrogate_then_replace')
+ if rc == 0:
+ parse_ip_output(secondary_data, secondary=True)
+
+ interfaces[device].update(self.get_ethtool_data(device))
+
+ # replace : by _ in interface name since they are hard to use in template
+ new_interfaces = {}
+ # i is a dict key (string) not an index int
+ for i in interfaces:
+ if ':' in i:
+ new_interfaces[i.replace(':', '_')] = interfaces[i]
+ else:
+ new_interfaces[i] = interfaces[i]
+ return new_interfaces, ips
+
+ def get_ethtool_data(self, device):
+
+ data = {}
+ ethtool_path = self.module.get_bin_path("ethtool")
+ # FIXME: exit early on falsey ethtool_path and un-indent
+ if ethtool_path:
+ args = [ethtool_path, '-k', device]
+ rc, stdout, stderr = self.module.run_command(args, errors='surrogate_then_replace')
+ # FIXME: exit early on falsey if we can
+ if rc == 0:
+ features = {}
+ for line in stdout.strip().splitlines():
+ if not line or line.endswith(":"):
+ continue
+ key, value = line.split(": ")
+ if not value:
+ continue
+ features[key.strip().replace('-', '_')] = value.strip()
+ data['features'] = features
+
+ args = [ethtool_path, '-T', device]
+ rc, stdout, stderr = self.module.run_command(args, errors='surrogate_then_replace')
+ if rc == 0:
+ data['timestamping'] = [m.lower() for m in re.findall(r'SOF_TIMESTAMPING_(\w+)', stdout)]
+ data['hw_timestamp_filters'] = [m.lower() for m in re.findall(r'HWTSTAMP_FILTER_(\w+)', stdout)]
+ m = re.search(r'PTP Hardware Clock: (\d+)', stdout)
+ if m:
+ data['phc_index'] = int(m.groups()[0])
+
+ return data
+
+
+class LinuxNetworkCollector(NetworkCollector):
+ _platform = 'Linux'
+ _fact_class = LinuxNetwork
+ required_facts = set(['distribution', 'platform'])
diff --git a/lib/ansible/module_utils/facts/network/netbsd.py b/lib/ansible/module_utils/facts/network/netbsd.py
new file mode 100644
index 00000000..de8ceff6
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/netbsd.py
@@ -0,0 +1,48 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.facts.network.base import NetworkCollector
+from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork
+
+
+class NetBSDNetwork(GenericBsdIfconfigNetwork):
+ """
+ This is the NetBSD Network Class.
+ It uses the GenericBsdIfconfigNetwork
+ """
+ platform = 'NetBSD'
+
+ def parse_media_line(self, words, current_if, ips):
+ # example of line:
+ # $ ifconfig
+ # ne0: flags=8863<UP,BROADCAST,NOTRAILERS,RUNNING,SIMPLEX,MULTICAST> mtu 1500
+ # ec_capabilities=1<VLAN_MTU>
+ # ec_enabled=0
+ # address: 00:20:91:45:00:78
+ # media: Ethernet 10baseT full-duplex
+ # inet 192.168.156.29 netmask 0xffffff00 broadcast 192.168.156.255
+ current_if['media'] = words[1]
+ if len(words) > 2:
+ current_if['media_type'] = words[2]
+ if len(words) > 3:
+ current_if['media_options'] = words[3].split(',')
+
+
+class NetBSDNetworkCollector(NetworkCollector):
+ _fact_class = NetBSDNetwork
+ _platform = 'NetBSD'
diff --git a/lib/ansible/module_utils/facts/network/nvme.py b/lib/ansible/module_utils/facts/network/nvme.py
new file mode 100644
index 00000000..5111a90b
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/nvme.py
@@ -0,0 +1,55 @@
+# NVMe initiator related facts collection for Ansible.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+import subprocess
+
+from ansible.module_utils.facts.utils import get_file_content
+from ansible.module_utils.facts.network.base import NetworkCollector
+
+
+class NvmeInitiatorNetworkCollector(NetworkCollector):
+ name = 'nvme'
+ _fact_ids = set()
+
+ def collect(self, module=None, collected_facts=None):
+ """
+ Currently NVMe is only supported in some Linux distributions.
+ If NVMe is configured on the host then a file will have been created
+ during the NVMe driver installation. This file holds the unique NQN
+ of the host.
+
+ Example of contents of /etc/nvme/hostnqn:
+
+ # cat /etc/nvme/hostnqn
+ nqn.2014-08.org.nvmexpress:fc_lif:uuid:2cd61a74-17f9-4c22-b350-3020020c458d
+
+ """
+
+ nvme_facts = {}
+ nvme_facts['hostnqn'] = ""
+ if sys.platform.startswith('linux'):
+ for line in get_file_content('/etc/nvme/hostnqn', '').splitlines():
+ if line.startswith('#') or line.startswith(';') or line.strip() == '':
+ continue
+ if line.startswith('nqn.'):
+ nvme_facts['hostnqn'] = line
+ break
+ return nvme_facts
diff --git a/lib/ansible/module_utils/facts/network/openbsd.py b/lib/ansible/module_utils/facts/network/openbsd.py
new file mode 100644
index 00000000..9e11d82f
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/openbsd.py
@@ -0,0 +1,42 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.facts.network.base import NetworkCollector
+from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork
+
+
+class OpenBSDNetwork(GenericBsdIfconfigNetwork):
+ """
+ This is the OpenBSD Network Class.
+ It uses the GenericBsdIfconfigNetwork.
+ """
+ platform = 'OpenBSD'
+
+ # OpenBSD 'ifconfig -a' does not have information about aliases
+ def get_interfaces_info(self, ifconfig_path, ifconfig_options='-aA'):
+ return super(OpenBSDNetwork, self).get_interfaces_info(ifconfig_path, ifconfig_options)
+
+ # Return macaddress instead of lladdr
+ def parse_lladdr_line(self, words, current_if, ips):
+ current_if['macaddress'] = words[1]
+ current_if['type'] = 'ether'
+
+
+class OpenBSDNetworkCollector(NetworkCollector):
+ _fact_class = OpenBSDNetwork
+ _platform = 'OpenBSD'
diff --git a/lib/ansible/module_utils/facts/network/sunos.py b/lib/ansible/module_utils/facts/network/sunos.py
new file mode 100644
index 00000000..adba14c6
--- /dev/null
+++ b/lib/ansible/module_utils/facts/network/sunos.py
@@ -0,0 +1,116 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from ansible.module_utils.facts.network.base import NetworkCollector
+from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork
+
+
+class SunOSNetwork(GenericBsdIfconfigNetwork):
+ """
+ This is the SunOS Network Class.
+ It uses the GenericBsdIfconfigNetwork.
+
+ Solaris can have different FLAGS and MTU for IPv4 and IPv6 on the same interface
+ so these facts have been moved inside the 'ipv4' and 'ipv6' lists.
+ """
+ platform = 'SunOS'
+
+ # Solaris 'ifconfig -a' will print interfaces twice, once for IPv4 and again for IPv6.
+ # MTU and FLAGS also may differ between IPv4 and IPv6 on the same interface.
+ # 'parse_interface_line()' checks for previously seen interfaces before defining
+ # 'current_if' so that IPv6 facts don't clobber IPv4 facts (or vice versa).
+ def get_interfaces_info(self, ifconfig_path):
+ interfaces = {}
+ current_if = {}
+ ips = dict(
+ all_ipv4_addresses=[],
+ all_ipv6_addresses=[],
+ )
+ rc, out, err = self.module.run_command([ifconfig_path, '-a'])
+
+ for line in out.splitlines():
+
+ if line:
+ words = line.split()
+
+ if re.match(r'^\S', line) and len(words) > 3:
+ current_if = self.parse_interface_line(words, current_if, interfaces)
+ interfaces[current_if['device']] = current_if
+ elif words[0].startswith('options='):
+ self.parse_options_line(words, current_if, ips)
+ elif words[0] == 'nd6':
+ self.parse_nd6_line(words, current_if, ips)
+ elif words[0] == 'ether':
+ self.parse_ether_line(words, current_if, ips)
+ elif words[0] == 'media:':
+ self.parse_media_line(words, current_if, ips)
+ elif words[0] == 'status:':
+ self.parse_status_line(words, current_if, ips)
+ elif words[0] == 'lladdr':
+ self.parse_lladdr_line(words, current_if, ips)
+ elif words[0] == 'inet':
+ self.parse_inet_line(words, current_if, ips)
+ elif words[0] == 'inet6':
+ self.parse_inet6_line(words, current_if, ips)
+ else:
+ self.parse_unknown_line(words, current_if, ips)
+
+ # 'parse_interface_line' and 'parse_inet*_line' leave two dicts in the
+ # ipv4/ipv6 lists which is ugly and hard to read.
+ # This quick hack merges the dictionaries. Purely cosmetic.
+ for iface in interfaces:
+ for v in 'ipv4', 'ipv6':
+ combined_facts = {}
+ for facts in interfaces[iface][v]:
+ combined_facts.update(facts)
+ if len(combined_facts.keys()) > 0:
+ interfaces[iface][v] = [combined_facts]
+
+ return interfaces, ips
+
+ def parse_interface_line(self, words, current_if, interfaces):
+ device = words[0][0:-1]
+ if device not in interfaces:
+ current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
+ else:
+ current_if = interfaces[device]
+ flags = self.get_options(words[1])
+ v = 'ipv4'
+ if 'IPv6' in flags:
+ v = 'ipv6'
+ if 'LOOPBACK' in flags:
+ current_if['type'] = 'loopback'
+ current_if[v].append({'flags': flags, 'mtu': words[3]})
+ current_if['macaddress'] = 'unknown' # will be overwritten later
+ return current_if
+
+ # Solaris displays single digit octets in MAC addresses e.g. 0:1:2:d:e:f
+ # Add leading zero to each octet where needed.
+ def parse_ether_line(self, words, current_if, ips):
+ macaddress = ''
+ for octet in words[1].split(':'):
+ octet = ('0' + octet)[-2:None]
+ macaddress += (octet + ':')
+ current_if['macaddress'] = macaddress[0:-1]
+
+
+class SunOSNetworkCollector(NetworkCollector):
+ _fact_class = SunOSNetwork
+ _platform = 'SunOS'
diff --git a/lib/ansible/module_utils/facts/other/__init__.py b/lib/ansible/module_utils/facts/other/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/lib/ansible/module_utils/facts/other/__init__.py
diff --git a/lib/ansible/module_utils/facts/other/facter.py b/lib/ansible/module_utils/facts/other/facter.py
new file mode 100644
index 00000000..899fcc41
--- /dev/null
+++ b/lib/ansible/module_utils/facts/other/facter.py
@@ -0,0 +1,85 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+from ansible.module_utils.facts.namespace import PrefixFactNamespace
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class FacterFactCollector(BaseFactCollector):
+ name = 'facter'
+ _fact_ids = set(['facter'])
+
+ def __init__(self, collectors=None, namespace=None):
+ namespace = PrefixFactNamespace(namespace_name='facter',
+ prefix='facter_')
+ super(FacterFactCollector, self).__init__(collectors=collectors,
+ namespace=namespace)
+
+ def find_facter(self, module):
+ facter_path = module.get_bin_path('facter', opt_dirs=['/opt/puppetlabs/bin'])
+ cfacter_path = module.get_bin_path('cfacter', opt_dirs=['/opt/puppetlabs/bin'])
+
+ # Prefer to use cfacter if available
+ if cfacter_path is not None:
+ facter_path = cfacter_path
+
+ return facter_path
+
+ def run_facter(self, module, facter_path):
+ # if facter is installed, and we can use --json because
+ # ruby-json is ALSO installed, include facter data in the JSON
+ rc, out, err = module.run_command(facter_path + " --puppet --json")
+ return rc, out, err
+
+ def get_facter_output(self, module):
+ facter_path = self.find_facter(module)
+ if not facter_path:
+ return None
+
+ rc, out, err = self.run_facter(module, facter_path)
+
+ if rc != 0:
+ return None
+
+ return out
+
+ def collect(self, module=None, collected_facts=None):
+ # Note that this mirrors previous facter behavior, where there isnt
+ # a 'ansible_facter' key in the main fact dict, but instead, 'facter_whatever'
+ # items are added to the main dict.
+ facter_dict = {}
+
+ if not module:
+ return facter_dict
+
+ facter_output = self.get_facter_output(module)
+
+ # TODO: if we fail, should we add a empty facter key or nothing?
+ if facter_output is None:
+ return facter_dict
+
+ try:
+ facter_dict = json.loads(facter_output)
+ except Exception:
+ # FIXME: maybe raise a FactCollectorError with some info attrs?
+ pass
+
+ return facter_dict
diff --git a/lib/ansible/module_utils/facts/other/ohai.py b/lib/ansible/module_utils/facts/other/ohai.py
new file mode 100644
index 00000000..df292376
--- /dev/null
+++ b/lib/ansible/module_utils/facts/other/ohai.py
@@ -0,0 +1,72 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+from ansible.module_utils.facts.namespace import PrefixFactNamespace
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class OhaiFactCollector(BaseFactCollector):
+ '''This is a subclass of Facts for including information gathered from Ohai.'''
+ name = 'ohai'
+ _fact_ids = set()
+
+ def __init__(self, collectors=None, namespace=None):
+ namespace = PrefixFactNamespace(namespace_name='ohai',
+ prefix='ohai_')
+ super(OhaiFactCollector, self).__init__(collectors=collectors,
+ namespace=namespace)
+
+ def find_ohai(self, module):
+ ohai_path = module.get_bin_path('ohai')
+ return ohai_path
+
+ def run_ohai(self, module, ohai_path,):
+ rc, out, err = module.run_command(ohai_path)
+ return rc, out, err
+
+ def get_ohai_output(self, module):
+ ohai_path = self.find_ohai(module)
+ if not ohai_path:
+ return None
+
+ rc, out, err = self.run_ohai(module, ohai_path)
+ if rc != 0:
+ return None
+
+ return out
+
+ def collect(self, module=None, collected_facts=None):
+ ohai_facts = {}
+ if not module:
+ return ohai_facts
+
+ ohai_output = self.get_ohai_output(module)
+
+ if ohai_output is None:
+ return ohai_facts
+
+ try:
+ ohai_facts = json.loads(ohai_output)
+ except Exception:
+ # FIXME: useful error, logging, something...
+ pass
+
+ return ohai_facts
diff --git a/lib/ansible/module_utils/facts/packages.py b/lib/ansible/module_utils/facts/packages.py
new file mode 100644
index 00000000..808a41b6
--- /dev/null
+++ b/lib/ansible/module_utils/facts/packages.py
@@ -0,0 +1,86 @@
+# (c) 2018, Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from abc import ABCMeta, abstractmethod
+
+from ansible.module_utils.six import with_metaclass
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.common._utils import get_all_subclasses
+
+
+def get_all_pkg_managers():
+
+ return dict([(obj.__name__.lower(), obj) for obj in get_all_subclasses(PkgMgr) if obj not in (CLIMgr, LibMgr)])
+
+
+class PkgMgr(with_metaclass(ABCMeta, object)):
+
+ @abstractmethod
+ def is_available(self):
+ # This method is supposed to return True/False if the package manager is currently installed/usable
+ # It can also 'prep' the required systems in the process of detecting availability
+ pass
+
+ @abstractmethod
+ def list_installed(self):
+ # This method should return a list of installed packages, each list item will be passed to get_package_details
+ pass
+
+ @abstractmethod
+ def get_package_details(self, package):
+ # This takes a 'package' item and returns a dictionary with the package information, name and version are minimal requirements
+ pass
+
+ def get_packages(self):
+ # Take all of the above and return a dictionary of lists of dictionaries (package = list of installed versions)
+
+ installed_packages = {}
+ for package in self.list_installed():
+ package_details = self.get_package_details(package)
+ if 'source' not in package_details:
+ package_details['source'] = self.__class__.__name__.lower()
+ name = package_details['name']
+ if name not in installed_packages:
+ installed_packages[name] = [package_details]
+ else:
+ installed_packages[name].append(package_details)
+ return installed_packages
+
+
+class LibMgr(PkgMgr):
+
+ LIB = None
+
+ def __init__(self):
+
+ self._lib = None
+ super(LibMgr, self).__init__()
+
+ def is_available(self):
+ found = False
+ try:
+ self._lib = __import__(self.LIB)
+ found = True
+ except ImportError:
+ pass
+ return found
+
+
+class CLIMgr(PkgMgr):
+
+ CLI = None
+
+ def __init__(self):
+
+ self._cli = None
+ super(CLIMgr, self).__init__()
+
+ def is_available(self):
+ try:
+ self._cli = get_bin_path(self.CLI)
+ except ValueError:
+ return False
+ return True
diff --git a/lib/ansible/module_utils/facts/sysctl.py b/lib/ansible/module_utils/facts/sysctl.py
new file mode 100644
index 00000000..4c82dc22
--- /dev/null
+++ b/lib/ansible/module_utils/facts/sysctl.py
@@ -0,0 +1,38 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+
+def get_sysctl(module, prefixes):
+ sysctl_cmd = module.get_bin_path('sysctl')
+ cmd = [sysctl_cmd]
+ cmd.extend(prefixes)
+
+ rc, out, err = module.run_command(cmd)
+ if rc != 0:
+ return dict()
+
+ sysctl = dict()
+ for line in out.splitlines():
+ if not line:
+ continue
+ (key, value) = re.split(r'\s?=\s?|: ', line, maxsplit=1)
+ sysctl[key] = value.strip()
+
+ return sysctl
diff --git a/lib/ansible/module_utils/facts/system/__init__.py b/lib/ansible/module_utils/facts/system/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/__init__.py
diff --git a/lib/ansible/module_utils/facts/system/apparmor.py b/lib/ansible/module_utils/facts/system/apparmor.py
new file mode 100644
index 00000000..53c3ed18
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/apparmor.py
@@ -0,0 +1,39 @@
+# Collect facts related to apparmor
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class ApparmorFactCollector(BaseFactCollector):
+ name = 'apparmor'
+ _fact_ids = set()
+
+ def collect(self, module=None, collected_facts=None):
+ facts_dict = {}
+ apparmor_facts = {}
+ if os.path.exists('/sys/kernel/security/apparmor'):
+ apparmor_facts['status'] = 'enabled'
+ else:
+ apparmor_facts['status'] = 'disabled'
+
+ facts_dict['apparmor'] = apparmor_facts
+ return facts_dict
diff --git a/lib/ansible/module_utils/facts/system/caps.py b/lib/ansible/module_utils/facts/system/caps.py
new file mode 100644
index 00000000..057eeda4
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/caps.py
@@ -0,0 +1,55 @@
+# Collect facts related to systems 'capabilities' via capsh
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class SystemCapabilitiesFactCollector(BaseFactCollector):
+ name = 'caps'
+ _fact_ids = set(['system_capabilities',
+ 'system_capabilities_enforced'])
+
+ def collect(self, module=None, collected_facts=None):
+ facts_dict = {}
+ if not module:
+ return facts_dict
+
+ capsh_path = module.get_bin_path('capsh')
+ # NOTE: early exit 'if not crash_path' and unindent rest of method -akl
+ if capsh_path:
+ # NOTE: -> get_caps_data()/parse_caps_data() for easier mocking -akl
+ rc, out, err = module.run_command([capsh_path, "--print"], errors='surrogate_then_replace')
+ enforced_caps = []
+ enforced = 'NA'
+ for line in out.splitlines():
+ if len(line) < 1:
+ continue
+ if line.startswith('Current:'):
+ if line.split(':')[1].strip() == '=ep':
+ enforced = 'False'
+ else:
+ enforced = 'True'
+ enforced_caps = [i.strip() for i in line.split('=')[1].split(',')]
+
+ facts_dict['system_capabilities_enforced'] = enforced
+ facts_dict['system_capabilities'] = enforced_caps
+
+ return facts_dict
diff --git a/lib/ansible/module_utils/facts/system/chroot.py b/lib/ansible/module_utils/facts/system/chroot.py
new file mode 100644
index 00000000..25c61125
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/chroot.py
@@ -0,0 +1,47 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+def is_chroot(module=None):
+
+ is_chroot = None
+
+ if os.environ.get('debian_chroot', False):
+ is_chroot = True
+ else:
+ my_root = os.stat('/')
+ try:
+ # check if my file system is the root one
+ proc_root = os.stat('/proc/1/root/.')
+ is_chroot = my_root.st_ino != proc_root.st_ino or my_root.st_dev != proc_root.st_dev
+ except Exception:
+ # I'm not root or no proc, fallback to checking it is inode #2
+ fs_root_ino = 2
+
+ if module is not None:
+ stat_path = module.get_bin_path('stat')
+ if stat_path:
+ cmd = [stat_path, '-f', '--format=%T', '/']
+ rc, out, err = module.run_command(cmd)
+ if 'btrfs' in out:
+ fs_root_ino = 256
+ elif 'xfs' in out:
+ fs_root_ino = 128
+
+ is_chroot = (my_root.st_ino != fs_root_ino)
+
+ return is_chroot
+
+
+class ChrootFactCollector(BaseFactCollector):
+ name = 'chroot'
+ _fact_ids = set(['is_chroot'])
+
+ def collect(self, module=None, collected_facts=None):
+ return {'is_chroot': is_chroot(module)}
diff --git a/lib/ansible/module_utils/facts/system/cmdline.py b/lib/ansible/module_utils/facts/system/cmdline.py
new file mode 100644
index 00000000..1b1b71e7
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/cmdline.py
@@ -0,0 +1,79 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import shlex
+
+from ansible.module_utils.facts.utils import get_file_content
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class CmdLineFactCollector(BaseFactCollector):
+ name = 'cmdline'
+ _fact_ids = set()
+
+ def _get_proc_cmdline(self):
+ return get_file_content('/proc/cmdline')
+
+ def _parse_proc_cmdline(self, data):
+ cmdline_dict = {}
+ try:
+ for piece in shlex.split(data, posix=False):
+ item = piece.split('=', 1)
+ if len(item) == 1:
+ cmdline_dict[item[0]] = True
+ else:
+ cmdline_dict[item[0]] = item[1]
+ except ValueError:
+ pass
+
+ return cmdline_dict
+
+ def _parse_proc_cmdline_facts(self, data):
+ cmdline_dict = {}
+ try:
+ for piece in shlex.split(data, posix=False):
+ item = piece.split('=', 1)
+ if len(item) == 1:
+ cmdline_dict[item[0]] = True
+ else:
+ if item[0] in cmdline_dict:
+ if isinstance(cmdline_dict[item[0]], list):
+ cmdline_dict[item[0]].append(item[1])
+ else:
+ new_list = [cmdline_dict[item[0]], item[1]]
+ cmdline_dict[item[0]] = new_list
+ else:
+ cmdline_dict[item[0]] = item[1]
+ except ValueError:
+ pass
+
+ return cmdline_dict
+
+ def collect(self, module=None, collected_facts=None):
+ cmdline_facts = {}
+
+ data = self._get_proc_cmdline()
+
+ if not data:
+ return cmdline_facts
+
+ cmdline_facts['cmdline'] = self._parse_proc_cmdline(data)
+ cmdline_facts['proc_cmdline'] = self._parse_proc_cmdline_facts(data)
+
+ return cmdline_facts
diff --git a/lib/ansible/module_utils/facts/system/date_time.py b/lib/ansible/module_utils/facts/system/date_time.py
new file mode 100644
index 00000000..aa59d5bc
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/date_time.py
@@ -0,0 +1,62 @@
+# Data and time related facts collection for ansible.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import datetime
+import time
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class DateTimeFactCollector(BaseFactCollector):
+ name = 'date_time'
+ _fact_ids = set()
+
+ def collect(self, module=None, collected_facts=None):
+ facts_dict = {}
+ date_time_facts = {}
+
+ # Store the timestamp once, then get local and UTC versions from that
+ epoch_ts = time.time()
+ now = datetime.datetime.fromtimestamp(epoch_ts)
+ utcnow = datetime.datetime.utcfromtimestamp(epoch_ts)
+
+ date_time_facts['year'] = now.strftime('%Y')
+ date_time_facts['month'] = now.strftime('%m')
+ date_time_facts['weekday'] = now.strftime('%A')
+ date_time_facts['weekday_number'] = now.strftime('%w')
+ date_time_facts['weeknumber'] = now.strftime('%W')
+ date_time_facts['day'] = now.strftime('%d')
+ date_time_facts['hour'] = now.strftime('%H')
+ date_time_facts['minute'] = now.strftime('%M')
+ date_time_facts['second'] = now.strftime('%S')
+ date_time_facts['epoch'] = now.strftime('%s')
+ if date_time_facts['epoch'] == '' or date_time_facts['epoch'][0] == '%':
+ date_time_facts['epoch'] = str(int(epoch_ts))
+ date_time_facts['date'] = now.strftime('%Y-%m-%d')
+ date_time_facts['time'] = now.strftime('%H:%M:%S')
+ date_time_facts['iso8601_micro'] = utcnow.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
+ date_time_facts['iso8601'] = utcnow.strftime("%Y-%m-%dT%H:%M:%SZ")
+ date_time_facts['iso8601_basic'] = now.strftime("%Y%m%dT%H%M%S%f")
+ date_time_facts['iso8601_basic_short'] = now.strftime("%Y%m%dT%H%M%S")
+ date_time_facts['tz'] = time.strftime("%Z")
+ date_time_facts['tz_offset'] = time.strftime("%z")
+
+ facts_dict['date_time'] = date_time_facts
+ return facts_dict
diff --git a/lib/ansible/module_utils/facts/system/distribution.py b/lib/ansible/module_utils/facts/system/distribution.py
new file mode 100644
index 00000000..8c2c7b42
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/distribution.py
@@ -0,0 +1,681 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import platform
+import re
+
+from ansible.module_utils.common.sys_info import get_distribution, get_distribution_version, \
+ get_distribution_codename
+from ansible.module_utils.facts.utils import get_file_content
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+def get_uname(module, flags=('-v')):
+ if isinstance(flags, str):
+ flags = flags.split()
+ command = ['uname']
+ command.extend(flags)
+ rc, out, err = module.run_command(command)
+ if rc == 0:
+ return out
+ return None
+
+
+def _file_exists(path, allow_empty=False):
+ # not finding the file, exit early
+ if not os.path.exists(path):
+ return False
+
+ # if just the path needs to exists (ie, it can be empty) we are done
+ if allow_empty:
+ return True
+
+ # file exists but is empty and we dont allow_empty
+ if os.path.getsize(path) == 0:
+ return False
+
+ # file exists with some content
+ return True
+
+
+class DistributionFiles:
+ '''has-a various distro file parsers (os-release, etc) and logic for finding the right one.'''
+ # every distribution name mentioned here, must have one of
+ # - allowempty == True
+ # - be listed in SEARCH_STRING
+ # - have a function get_distribution_DISTNAME implemented
+ # keep names in sync with Conditionals page of docs
+ OSDIST_LIST = (
+ {'path': '/etc/altlinux-release', 'name': 'Altlinux'},
+ {'path': '/etc/oracle-release', 'name': 'OracleLinux'},
+ {'path': '/etc/slackware-version', 'name': 'Slackware'},
+ {'path': '/etc/redhat-release', 'name': 'RedHat'},
+ {'path': '/etc/vmware-release', 'name': 'VMwareESX', 'allowempty': True},
+ {'path': '/etc/openwrt_release', 'name': 'OpenWrt'},
+ {'path': '/etc/system-release', 'name': 'Amazon'},
+ {'path': '/etc/alpine-release', 'name': 'Alpine'},
+ {'path': '/etc/arch-release', 'name': 'Archlinux', 'allowempty': True},
+ {'path': '/etc/os-release', 'name': 'Archlinux'},
+ {'path': '/etc/os-release', 'name': 'SUSE'},
+ {'path': '/etc/SuSE-release', 'name': 'SUSE'},
+ {'path': '/etc/gentoo-release', 'name': 'Gentoo'},
+ {'path': '/etc/os-release', 'name': 'Debian'},
+ {'path': '/etc/lsb-release', 'name': 'Debian'},
+ {'path': '/etc/lsb-release', 'name': 'Mandriva'},
+ {'path': '/etc/sourcemage-release', 'name': 'SMGL'},
+ {'path': '/usr/lib/os-release', 'name': 'ClearLinux'},
+ {'path': '/etc/coreos/update.conf', 'name': 'Coreos'},
+ {'path': '/etc/flatcar/update.conf', 'name': 'Flatcar'},
+ {'path': '/etc/os-release', 'name': 'NA'},
+ )
+
+ SEARCH_STRING = {
+ 'OracleLinux': 'Oracle Linux',
+ 'RedHat': 'Red Hat',
+ 'Altlinux': 'ALT',
+ 'SMGL': 'Source Mage GNU/Linux',
+ }
+
+ # We can't include this in SEARCH_STRING because a name match on its keys
+ # causes a fallback to using the first whitespace separated item from the file content
+ # as the name. For os-release, that is in form 'NAME=Arch'
+ OS_RELEASE_ALIAS = {
+ 'Archlinux': 'Arch Linux'
+ }
+
+ STRIP_QUOTES = r'\'\"\\'
+
+ def __init__(self, module):
+ self.module = module
+
+ def _get_file_content(self, path):
+ return get_file_content(path)
+
+ def _get_dist_file_content(self, path, allow_empty=False):
+ # cant find that dist file or it is incorrectly empty
+ if not _file_exists(path, allow_empty=allow_empty):
+ return False, None
+
+ data = self._get_file_content(path)
+ return True, data
+
+ def _parse_dist_file(self, name, dist_file_content, path, collected_facts):
+ dist_file_dict = {}
+ dist_file_content = dist_file_content.strip(DistributionFiles.STRIP_QUOTES)
+ if name in self.SEARCH_STRING:
+ # look for the distribution string in the data and replace according to RELEASE_NAME_MAP
+ # only the distribution name is set, the version is assumed to be correct from distro.linux_distribution()
+ if self.SEARCH_STRING[name] in dist_file_content:
+ # this sets distribution=RedHat if 'Red Hat' shows up in data
+ dist_file_dict['distribution'] = name
+ dist_file_dict['distribution_file_search_string'] = self.SEARCH_STRING[name]
+ else:
+ # this sets distribution to what's in the data, e.g. CentOS, Scientific, ...
+ dist_file_dict['distribution'] = dist_file_content.split()[0]
+
+ return True, dist_file_dict
+
+ if name in self.OS_RELEASE_ALIAS:
+ if self.OS_RELEASE_ALIAS[name] in dist_file_content:
+ dist_file_dict['distribution'] = name
+ return True, dist_file_dict
+ return False, dist_file_dict
+
+ # call a dedicated function for parsing the file content
+ # TODO: replace with a map or a class
+ try:
+ # FIXME: most of these dont actually look at the dist file contents, but random other stuff
+ distfunc_name = 'parse_distribution_file_' + name
+ distfunc = getattr(self, distfunc_name)
+ parsed, dist_file_dict = distfunc(name, dist_file_content, path, collected_facts)
+ return parsed, dist_file_dict
+ except AttributeError as exc:
+ print('exc: %s' % exc)
+ # this should never happen, but if it does fail quietly and not with a traceback
+ return False, dist_file_dict
+
+ return True, dist_file_dict
+ # to debug multiple matching release files, one can use:
+ # self.facts['distribution_debug'].append({path + ' ' + name:
+ # (parsed,
+ # self.facts['distribution'],
+ # self.facts['distribution_version'],
+ # self.facts['distribution_release'],
+ # )})
+
+ def _guess_distribution(self):
+ # try to find out which linux distribution this is
+ dist = (get_distribution(), get_distribution_version(), get_distribution_codename())
+ distribution_guess = {
+ 'distribution': dist[0] or 'NA',
+ 'distribution_version': dist[1] or 'NA',
+ # distribution_release can be the empty string
+ 'distribution_release': 'NA' if dist[2] is None else dist[2]
+ }
+
+ distribution_guess['distribution_major_version'] = distribution_guess['distribution_version'].split('.')[0] or 'NA'
+ return distribution_guess
+
+ def process_dist_files(self):
+ # Try to handle the exceptions now ...
+ # self.facts['distribution_debug'] = []
+ dist_file_facts = {}
+
+ dist_guess = self._guess_distribution()
+ dist_file_facts.update(dist_guess)
+
+ for ddict in self.OSDIST_LIST:
+ name = ddict['name']
+ path = ddict['path']
+ allow_empty = ddict.get('allowempty', False)
+
+ has_dist_file, dist_file_content = self._get_dist_file_content(path, allow_empty=allow_empty)
+
+ # but we allow_empty. For example, ArchLinux with an empty /etc/arch-release and a
+ # /etc/os-release with a different name
+ if has_dist_file and allow_empty:
+ dist_file_facts['distribution'] = name
+ dist_file_facts['distribution_file_path'] = path
+ dist_file_facts['distribution_file_variety'] = name
+ break
+
+ if not has_dist_file:
+ # keep looking
+ continue
+
+ parsed_dist_file, parsed_dist_file_facts = self._parse_dist_file(name, dist_file_content, path, dist_file_facts)
+
+ # finally found the right os dist file and were able to parse it
+ if parsed_dist_file:
+ dist_file_facts['distribution'] = name
+ dist_file_facts['distribution_file_path'] = path
+ # distribution and file_variety are the same here, but distribution
+ # will be changed/mapped to a more specific name.
+ # ie, dist=Fedora, file_variety=RedHat
+ dist_file_facts['distribution_file_variety'] = name
+ dist_file_facts['distribution_file_parsed'] = parsed_dist_file
+ dist_file_facts.update(parsed_dist_file_facts)
+ break
+
+ return dist_file_facts
+
+ # TODO: FIXME: split distro file parsing into its own module or class
+ def parse_distribution_file_Slackware(self, name, data, path, collected_facts):
+ slackware_facts = {}
+ if 'Slackware' not in data:
+ return False, slackware_facts # TODO: remove
+ slackware_facts['distribution'] = name
+ version = re.findall(r'\w+[.]\w+\+?', data)
+ if version:
+ slackware_facts['distribution_version'] = version[0]
+ return True, slackware_facts
+
+ def parse_distribution_file_Amazon(self, name, data, path, collected_facts):
+ amazon_facts = {}
+ if 'Amazon' not in data:
+ return False, amazon_facts
+ amazon_facts['distribution'] = 'Amazon'
+ version = [n for n in data.split() if n.isdigit()]
+ version = version[0] if version else 'NA'
+ amazon_facts['distribution_version'] = version
+ return True, amazon_facts
+
+ def parse_distribution_file_OpenWrt(self, name, data, path, collected_facts):
+ openwrt_facts = {}
+ if 'OpenWrt' not in data:
+ return False, openwrt_facts # TODO: remove
+ openwrt_facts['distribution'] = name
+ version = re.search('DISTRIB_RELEASE="(.*)"', data)
+ if version:
+ openwrt_facts['distribution_version'] = version.groups()[0]
+ release = re.search('DISTRIB_CODENAME="(.*)"', data)
+ if release:
+ openwrt_facts['distribution_release'] = release.groups()[0]
+ return True, openwrt_facts
+
+ def parse_distribution_file_Alpine(self, name, data, path, collected_facts):
+ alpine_facts = {}
+ alpine_facts['distribution'] = 'Alpine'
+ alpine_facts['distribution_version'] = data
+ return True, alpine_facts
+
+ def parse_distribution_file_SUSE(self, name, data, path, collected_facts):
+ suse_facts = {}
+ if 'suse' not in data.lower():
+ return False, suse_facts # TODO: remove if tested without this
+ if path == '/etc/os-release':
+ for line in data.splitlines():
+ distribution = re.search("^NAME=(.*)", line)
+ if distribution:
+ suse_facts['distribution'] = distribution.group(1).strip('"')
+ # example pattern are 13.04 13.0 13
+ distribution_version = re.search(r'^VERSION_ID="?([0-9]+\.?[0-9]*)"?', line)
+ if distribution_version:
+ suse_facts['distribution_version'] = distribution_version.group(1)
+ suse_facts['distribution_major_version'] = distribution_version.group(1).split('.')[0]
+ if 'open' in data.lower():
+ release = re.search(r'^VERSION_ID="?[0-9]+\.?([0-9]*)"?', line)
+ if release:
+ suse_facts['distribution_release'] = release.groups()[0]
+ elif 'enterprise' in data.lower() and 'VERSION_ID' in line:
+ # SLES doesn't got funny release names
+ release = re.search(r'^VERSION_ID="?[0-9]+\.?([0-9]*)"?', line)
+ if release.group(1):
+ release = release.group(1)
+ else:
+ release = "0" # no minor number, so it is the first release
+ suse_facts['distribution_release'] = release
+ elif path == '/etc/SuSE-release':
+ if 'open' in data.lower():
+ data = data.splitlines()
+ distdata = get_file_content(path).splitlines()[0]
+ suse_facts['distribution'] = distdata.split()[0]
+ for line in data:
+ release = re.search('CODENAME *= *([^\n]+)', line)
+ if release:
+ suse_facts['distribution_release'] = release.groups()[0].strip()
+ elif 'enterprise' in data.lower():
+ lines = data.splitlines()
+ distribution = lines[0].split()[0]
+ if "Server" in data:
+ suse_facts['distribution'] = "SLES"
+ elif "Desktop" in data:
+ suse_facts['distribution'] = "SLED"
+ for line in lines:
+ release = re.search('PATCHLEVEL = ([0-9]+)', line) # SLES doesn't got funny release names
+ if release:
+ suse_facts['distribution_release'] = release.group(1)
+ suse_facts['distribution_version'] = collected_facts['distribution_version'] + '.' + release.group(1)
+
+ # See https://www.suse.com/support/kb/doc/?id=000019341 for SLES for SAP
+ if os.path.islink('/etc/products.d/baseproduct') and os.path.realpath('/etc/products.d/baseproduct').endswith('SLES_SAP.prod'):
+ suse_facts['distribution'] = 'SLES_SAP'
+
+ return True, suse_facts
+
+ def parse_distribution_file_Debian(self, name, data, path, collected_facts):
+ debian_facts = {}
+ if 'Debian' in data or 'Raspbian' in data:
+ debian_facts['distribution'] = 'Debian'
+ release = re.search(r"PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data)
+ if release:
+ debian_facts['distribution_release'] = release.groups()[0]
+
+ # Last resort: try to find release from tzdata as either lsb is missing or this is very old debian
+ if collected_facts['distribution_release'] == 'NA' and 'Debian' in data:
+ dpkg_cmd = self.module.get_bin_path('dpkg')
+ if dpkg_cmd:
+ cmd = "%s --status tzdata|grep Provides|cut -f2 -d'-'" % dpkg_cmd
+ rc, out, err = self.module.run_command(cmd)
+ if rc == 0:
+ debian_facts['distribution_release'] = out.strip()
+ elif 'Ubuntu' in data:
+ debian_facts['distribution'] = 'Ubuntu'
+ # nothing else to do, Ubuntu gets correct info from python functions
+ elif 'SteamOS' in data:
+ debian_facts['distribution'] = 'SteamOS'
+ # nothing else to do, SteamOS gets correct info from python functions
+ elif path in ('/etc/lsb-release', '/etc/os-release') and 'Kali' in data:
+ # Kali does not provide /etc/lsb-release anymore
+ debian_facts['distribution'] = 'Kali'
+ release = re.search('DISTRIB_RELEASE=(.*)', data)
+ if release:
+ debian_facts['distribution_release'] = release.groups()[0]
+ elif 'Devuan' in data:
+ debian_facts['distribution'] = 'Devuan'
+ release = re.search(r"PRETTY_NAME=\"?[^(\"]+ \(?([^) \"]+)\)?", data)
+ if release:
+ debian_facts['distribution_release'] = release.groups()[0]
+ version = re.search(r"VERSION_ID=\"(.*)\"", data)
+ if version:
+ debian_facts['distribution_version'] = version.group(1)
+ debian_facts['distribution_major_version'] = version.group(1)
+ elif 'Cumulus' in data:
+ debian_facts['distribution'] = 'Cumulus Linux'
+ version = re.search(r"VERSION_ID=(.*)", data)
+ if version:
+ major, _minor, _dummy_ver = version.group(1).split(".")
+ debian_facts['distribution_version'] = version.group(1)
+ debian_facts['distribution_major_version'] = major
+
+ release = re.search(r'VERSION="(.*)"', data)
+ if release:
+ debian_facts['distribution_release'] = release.groups()[0]
+ elif "Mint" in data:
+ debian_facts['distribution'] = 'Linux Mint'
+ version = re.search(r"VERSION_ID=\"(.*)\"", data)
+ if version:
+ debian_facts['distribution_version'] = version.group(1)
+ debian_facts['distribution_major_version'] = version.group(1).split('.')[0]
+ else:
+ return False, debian_facts
+
+ return True, debian_facts
+
+ def parse_distribution_file_Mandriva(self, name, data, path, collected_facts):
+ mandriva_facts = {}
+ if 'Mandriva' in data:
+ mandriva_facts['distribution'] = 'Mandriva'
+ version = re.search('DISTRIB_RELEASE="(.*)"', data)
+ if version:
+ mandriva_facts['distribution_version'] = version.groups()[0]
+ release = re.search('DISTRIB_CODENAME="(.*)"', data)
+ if release:
+ mandriva_facts['distribution_release'] = release.groups()[0]
+ mandriva_facts['distribution'] = name
+ else:
+ return False, mandriva_facts
+
+ return True, mandriva_facts
+
+ def parse_distribution_file_NA(self, name, data, path, collected_facts):
+ na_facts = {}
+ for line in data.splitlines():
+ distribution = re.search("^NAME=(.*)", line)
+ if distribution and name == 'NA':
+ na_facts['distribution'] = distribution.group(1).strip('"')
+ version = re.search("^VERSION=(.*)", line)
+ if version and collected_facts['distribution_version'] == 'NA':
+ na_facts['distribution_version'] = version.group(1).strip('"')
+ return True, na_facts
+
+ def parse_distribution_file_Coreos(self, name, data, path, collected_facts):
+ coreos_facts = {}
+ # FIXME: pass in ro copy of facts for this kind of thing
+ distro = get_distribution()
+
+ if distro.lower() == 'coreos':
+ if not data:
+ # include fix from #15230, #15228
+ # TODO: verify this is ok for above bugs
+ return False, coreos_facts
+ release = re.search("^GROUP=(.*)", data)
+ if release:
+ coreos_facts['distribution_release'] = release.group(1).strip('"')
+ else:
+ return False, coreos_facts # TODO: remove if tested without this
+
+ return True, coreos_facts
+
+ def parse_distribution_file_Flatcar(self, name, data, path, collected_facts):
+ flatcar_facts = {}
+ distro = get_distribution()
+
+ if distro.lower() == 'flatcar':
+ if not data:
+ return False, flatcar_facts
+ release = re.search("^GROUP=(.*)", data)
+ if release:
+ flatcar_facts['distribution_release'] = release.group(1).strip('"')
+ else:
+ return False, flatcar_facts
+
+ return True, flatcar_facts
+
+ def parse_distribution_file_ClearLinux(self, name, data, path, collected_facts):
+ clear_facts = {}
+ if "clearlinux" not in name.lower():
+ return False, clear_facts
+
+ pname = re.search('NAME="(.*)"', data)
+ if pname:
+ if 'Clear Linux' not in pname.groups()[0]:
+ return False, clear_facts
+ clear_facts['distribution'] = pname.groups()[0]
+ version = re.search('VERSION_ID=(.*)', data)
+ if version:
+ clear_facts['distribution_major_version'] = version.groups()[0]
+ clear_facts['distribution_version'] = version.groups()[0]
+ release = re.search('ID=(.*)', data)
+ if release:
+ clear_facts['distribution_release'] = release.groups()[0]
+ return True, clear_facts
+
+
+class Distribution(object):
+ """
+ This subclass of Facts fills the distribution, distribution_version and distribution_release variables
+
+ To do so it checks the existence and content of typical files in /etc containing distribution information
+
+ This is unit tested. Please extend the tests to cover all distributions if you have them available.
+ """
+
+ # every distribution name mentioned here, must have one of
+ # - allowempty == True
+ # - be listed in SEARCH_STRING
+ # - have a function get_distribution_DISTNAME implemented
+ OSDIST_LIST = (
+ {'path': '/etc/oracle-release', 'name': 'OracleLinux'},
+ {'path': '/etc/slackware-version', 'name': 'Slackware'},
+ {'path': '/etc/redhat-release', 'name': 'RedHat'},
+ {'path': '/etc/vmware-release', 'name': 'VMwareESX', 'allowempty': True},
+ {'path': '/etc/openwrt_release', 'name': 'OpenWrt'},
+ {'path': '/etc/system-release', 'name': 'Amazon'},
+ {'path': '/etc/alpine-release', 'name': 'Alpine'},
+ {'path': '/etc/arch-release', 'name': 'Archlinux', 'allowempty': True},
+ {'path': '/etc/os-release', 'name': 'SUSE'},
+ {'path': '/etc/SuSE-release', 'name': 'SUSE'},
+ {'path': '/etc/gentoo-release', 'name': 'Gentoo'},
+ {'path': '/etc/os-release', 'name': 'Debian'},
+ {'path': '/etc/lsb-release', 'name': 'Mandriva'},
+ {'path': '/etc/altlinux-release', 'name': 'Altlinux'},
+ {'path': '/etc/sourcemage-release', 'name': 'SMGL'},
+ {'path': '/usr/lib/os-release', 'name': 'ClearLinux'},
+ {'path': '/etc/coreos/update.conf', 'name': 'Coreos'},
+ {'path': '/etc/flatcar/update.conf', 'name': 'Flatcar'},
+ {'path': '/etc/os-release', 'name': 'NA'},
+ )
+
+ SEARCH_STRING = {
+ 'OracleLinux': 'Oracle Linux',
+ 'RedHat': 'Red Hat',
+ 'Altlinux': 'ALT Linux',
+ 'ClearLinux': 'Clear Linux Software for Intel Architecture',
+ 'SMGL': 'Source Mage GNU/Linux',
+ }
+
+ # keep keys in sync with Conditionals page of docs
+ OS_FAMILY_MAP = {'RedHat': ['RedHat', 'Fedora', 'CentOS', 'Scientific', 'SLC',
+ 'Ascendos', 'CloudLinux', 'PSBM', 'OracleLinux', 'OVS',
+ 'OEL', 'Amazon', 'Virtuozzo', 'XenServer', 'Alibaba',
+ 'EulerOS', 'openEuler'],
+ 'Debian': ['Debian', 'Ubuntu', 'Raspbian', 'Neon', 'KDE neon',
+ 'Linux Mint', 'SteamOS', 'Devuan', 'Kali', 'Cumulus Linux',
+ 'Pop!_OS', ],
+ 'Suse': ['SuSE', 'SLES', 'SLED', 'openSUSE', 'openSUSE Tumbleweed',
+ 'SLES_SAP', 'SUSE_LINUX', 'openSUSE Leap'],
+ 'Archlinux': ['Archlinux', 'Antergos', 'Manjaro'],
+ 'Mandrake': ['Mandrake', 'Mandriva'],
+ 'Solaris': ['Solaris', 'Nexenta', 'OmniOS', 'OpenIndiana', 'SmartOS'],
+ 'Slackware': ['Slackware'],
+ 'Altlinux': ['Altlinux'],
+ 'SGML': ['SGML'],
+ 'Gentoo': ['Gentoo', 'Funtoo'],
+ 'Alpine': ['Alpine'],
+ 'AIX': ['AIX'],
+ 'HP-UX': ['HPUX'],
+ 'Darwin': ['MacOSX'],
+ 'FreeBSD': ['FreeBSD', 'TrueOS'],
+ 'ClearLinux': ['Clear Linux OS', 'Clear Linux Mix']}
+
+ OS_FAMILY = {}
+ for family, names in OS_FAMILY_MAP.items():
+ for name in names:
+ OS_FAMILY[name] = family
+
+ def __init__(self, module):
+ self.module = module
+
+ def get_distribution_facts(self):
+ distribution_facts = {}
+
+ # The platform module provides information about the running
+ # system/distribution. Use this as a baseline and fix buggy systems
+ # afterwards
+ system = platform.system()
+ distribution_facts['distribution'] = system
+ distribution_facts['distribution_release'] = platform.release()
+ distribution_facts['distribution_version'] = platform.version()
+
+ systems_implemented = ('AIX', 'HP-UX', 'Darwin', 'FreeBSD', 'OpenBSD', 'SunOS', 'DragonFly', 'NetBSD')
+
+ if system in systems_implemented:
+ cleanedname = system.replace('-', '')
+ distfunc = getattr(self, 'get_distribution_' + cleanedname)
+ dist_func_facts = distfunc()
+ distribution_facts.update(dist_func_facts)
+ elif system == 'Linux':
+
+ distribution_files = DistributionFiles(module=self.module)
+
+ # linux_distribution_facts = LinuxDistribution(module).get_distribution_facts()
+ dist_file_facts = distribution_files.process_dist_files()
+
+ distribution_facts.update(dist_file_facts)
+
+ distro = distribution_facts['distribution']
+
+ # look for a os family alias for the 'distribution', if there isnt one, use 'distribution'
+ distribution_facts['os_family'] = self.OS_FAMILY.get(distro, None) or distro
+
+ return distribution_facts
+
+ def get_distribution_AIX(self):
+ aix_facts = {}
+ rc, out, err = self.module.run_command("/usr/bin/oslevel")
+ data = out.split('.')
+ aix_facts['distribution_major_version'] = data[0]
+ if len(data) > 1:
+ aix_facts['distribution_version'] = '%s.%s' % (data[0], data[1])
+ aix_facts['distribution_release'] = data[1]
+ else:
+ aix_facts['distribution_version'] = data[0]
+ return aix_facts
+
+ def get_distribution_HPUX(self):
+ hpux_facts = {}
+ rc, out, err = self.module.run_command(r"/usr/sbin/swlist |egrep 'HPUX.*OE.*[AB].[0-9]+\.[0-9]+'", use_unsafe_shell=True)
+ data = re.search(r'HPUX.*OE.*([AB].[0-9]+\.[0-9]+)\.([0-9]+).*', out)
+ if data:
+ hpux_facts['distribution_version'] = data.groups()[0]
+ hpux_facts['distribution_release'] = data.groups()[1]
+ return hpux_facts
+
+ def get_distribution_Darwin(self):
+ darwin_facts = {}
+ darwin_facts['distribution'] = 'MacOSX'
+ rc, out, err = self.module.run_command("/usr/bin/sw_vers -productVersion")
+ data = out.split()[-1]
+ if data:
+ darwin_facts['distribution_major_version'] = data.split('.')[0]
+ darwin_facts['distribution_version'] = data
+ return darwin_facts
+
+ def get_distribution_FreeBSD(self):
+ freebsd_facts = {}
+ freebsd_facts['distribution_release'] = platform.release()
+ data = re.search(r'(\d+)\.(\d+)-(RELEASE|STABLE|CURRENT).*', freebsd_facts['distribution_release'])
+ if 'trueos' in platform.version():
+ freebsd_facts['distribution'] = 'TrueOS'
+ if data:
+ freebsd_facts['distribution_major_version'] = data.group(1)
+ freebsd_facts['distribution_version'] = '%s.%s' % (data.group(1), data.group(2))
+ return freebsd_facts
+
+ def get_distribution_OpenBSD(self):
+ openbsd_facts = {}
+ openbsd_facts['distribution_version'] = platform.release()
+ rc, out, err = self.module.run_command("/sbin/sysctl -n kern.version")
+ match = re.match(r'OpenBSD\s[0-9]+.[0-9]+-(\S+)\s.*', out)
+ if match:
+ openbsd_facts['distribution_release'] = match.groups()[0]
+ else:
+ openbsd_facts['distribution_release'] = 'release'
+ return openbsd_facts
+
+ def get_distribution_DragonFly(self):
+ return {}
+
+ def get_distribution_NetBSD(self):
+ netbsd_facts = {}
+ # FIXME: poking at self.facts, should eventually make these each a collector
+ platform_release = platform.release()
+ netbsd_facts['distribution_major_version'] = platform_release.split('.')[0]
+ return netbsd_facts
+
+ def get_distribution_SMGL(self):
+ smgl_facts = {}
+ smgl_facts['distribution'] = 'Source Mage GNU/Linux'
+ return smgl_facts
+
+ def get_distribution_SunOS(self):
+ sunos_facts = {}
+
+ data = get_file_content('/etc/release').splitlines()[0]
+
+ if 'Solaris' in data:
+ # for solaris 10 uname_r will contain 5.10, for solaris 11 it will have 5.11
+ uname_r = get_uname(self.module, flags=['-r'])
+ ora_prefix = ''
+ if 'Oracle Solaris' in data:
+ data = data.replace('Oracle ', '')
+ ora_prefix = 'Oracle '
+ sunos_facts['distribution'] = data.split()[0]
+ sunos_facts['distribution_version'] = data.split()[1]
+ sunos_facts['distribution_release'] = ora_prefix + data
+ sunos_facts['distribution_major_version'] = uname_r.split('.')[1].rstrip()
+ return sunos_facts
+
+ uname_v = get_uname(self.module, flags=['-v'])
+ distribution_version = None
+
+ if 'SmartOS' in data:
+ sunos_facts['distribution'] = 'SmartOS'
+ if _file_exists('/etc/product'):
+ product_data = dict([l.split(': ', 1) for l in get_file_content('/etc/product').splitlines() if ': ' in l])
+ if 'Image' in product_data:
+ distribution_version = product_data.get('Image').split()[-1]
+ elif 'OpenIndiana' in data:
+ sunos_facts['distribution'] = 'OpenIndiana'
+ elif 'OmniOS' in data:
+ sunos_facts['distribution'] = 'OmniOS'
+ distribution_version = data.split()[-1]
+ elif uname_v is not None and 'NexentaOS_' in uname_v:
+ sunos_facts['distribution'] = 'Nexenta'
+ distribution_version = data.split()[-1].lstrip('v')
+
+ if sunos_facts.get('distribution', '') in ('SmartOS', 'OpenIndiana', 'OmniOS', 'Nexenta'):
+ sunos_facts['distribution_release'] = data.strip()
+ if distribution_version is not None:
+ sunos_facts['distribution_version'] = distribution_version
+ elif uname_v is not None:
+ sunos_facts['distribution_version'] = uname_v.splitlines()[0].strip()
+ return sunos_facts
+
+ return sunos_facts
+
+
+class DistributionFactCollector(BaseFactCollector):
+ name = 'distribution'
+ _fact_ids = set(['distribution_version',
+ 'distribution_release',
+ 'distribution_major_version',
+ 'os_family'])
+
+ def collect(self, module=None, collected_facts=None):
+ collected_facts = collected_facts or {}
+ facts_dict = {}
+ if not module:
+ return facts_dict
+
+ distribution = Distribution(module=module)
+ distro_facts = distribution.get_distribution_facts()
+
+ return distro_facts
diff --git a/lib/ansible/module_utils/facts/system/dns.py b/lib/ansible/module_utils/facts/system/dns.py
new file mode 100644
index 00000000..bd385e9d
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/dns.py
@@ -0,0 +1,67 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible.module_utils.facts.utils import get_file_content
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class DnsFactCollector(BaseFactCollector):
+ name = 'dns'
+ _fact_ids = set()
+
+ def collect(self, module=None, collected_facts=None):
+ dns_facts = {}
+
+ # TODO: flatten
+ dns_facts['dns'] = {}
+
+ for line in get_file_content('/etc/resolv.conf', '').splitlines():
+ if line.startswith('#') or line.startswith(';') or line.strip() == '':
+ continue
+ tokens = line.split()
+ if len(tokens) == 0:
+ continue
+ if tokens[0] == 'nameserver':
+ if 'nameservers' not in dns_facts['dns']:
+ dns_facts['dns']['nameservers'] = []
+ for nameserver in tokens[1:]:
+ dns_facts['dns']['nameservers'].append(nameserver)
+ elif tokens[0] == 'domain':
+ if len(tokens) > 1:
+ dns_facts['dns']['domain'] = tokens[1]
+ elif tokens[0] == 'search':
+ dns_facts['dns']['search'] = []
+ for suffix in tokens[1:]:
+ dns_facts['dns']['search'].append(suffix)
+ elif tokens[0] == 'sortlist':
+ dns_facts['dns']['sortlist'] = []
+ for address in tokens[1:]:
+ dns_facts['dns']['sortlist'].append(address)
+ elif tokens[0] == 'options':
+ dns_facts['dns']['options'] = {}
+ if len(tokens) > 1:
+ for option in tokens[1:]:
+ option_tokens = option.split(':', 1)
+ if len(option_tokens) == 0:
+ continue
+ val = len(option_tokens) == 2 and option_tokens[1] or True
+ dns_facts['dns']['options'][option_tokens[0]] = val
+
+ return dns_facts
diff --git a/lib/ansible/module_utils/facts/system/env.py b/lib/ansible/module_utils/facts/system/env.py
new file mode 100644
index 00000000..279aad61
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/env.py
@@ -0,0 +1,37 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.module_utils.six import iteritems
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class EnvFactCollector(BaseFactCollector):
+ name = 'env'
+ _fact_ids = set()
+
+ def collect(self, module=None, collected_facts=None):
+ env_facts = {}
+ env_facts['env'] = {}
+
+ for k, v in iteritems(os.environ):
+ env_facts['env'][k] = v
+
+ return env_facts
diff --git a/lib/ansible/module_utils/facts/system/fips.py b/lib/ansible/module_utils/facts/system/fips.py
new file mode 100644
index 00000000..20ada639
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/fips.py
@@ -0,0 +1,37 @@
+# Determine if a system is in 'fips' mode
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.facts.utils import get_file_content
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class FipsFactCollector(BaseFactCollector):
+ name = 'fips'
+ _fact_ids = set()
+
+ def collect(self, module=None, collected_facts=None):
+ # NOTE: this is populated even if it is not set
+ fips_facts = {}
+ fips_facts['fips'] = False
+ data = get_file_content('/proc/sys/crypto/fips_enabled')
+ if data and data == '1':
+ fips_facts['fips'] = True
+ return fips_facts
diff --git a/lib/ansible/module_utils/facts/system/local.py b/lib/ansible/module_utils/facts/system/local.py
new file mode 100644
index 00000000..fe33a323
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/local.py
@@ -0,0 +1,92 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import glob
+import json
+import os
+import stat
+
+from ansible.module_utils.six.moves import configparser
+from ansible.module_utils.six.moves import StringIO
+
+from ansible.module_utils.facts.utils import get_file_content
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class LocalFactCollector(BaseFactCollector):
+ name = 'local'
+ _fact_ids = set()
+
+ def collect(self, module=None, collected_facts=None):
+ local_facts = {}
+ local_facts['local'] = {}
+
+ if not module:
+ return local_facts
+
+ fact_path = module.params.get('fact_path', None)
+
+ if not fact_path or not os.path.exists(fact_path):
+ return local_facts
+
+ local = {}
+ for fn in sorted(glob.glob(fact_path + '/*.fact')):
+ # where it will sit under local facts
+ fact_base = os.path.basename(fn).replace('.fact', '')
+ if stat.S_IXUSR & os.stat(fn)[stat.ST_MODE]:
+ # run it
+ # try to read it as json first
+ # if that fails read it with ConfigParser
+ # if that fails, skip it
+ try:
+ rc, out, err = module.run_command(fn)
+ except UnicodeError:
+ fact = 'error loading fact - output of running %s was not utf-8' % fn
+ local[fact_base] = fact
+ local_facts['local'] = local
+ module.warn(fact)
+ return local_facts
+ else:
+ out = get_file_content(fn, default='')
+
+ # load raw json
+ fact = 'loading %s' % fact_base
+ try:
+ fact = json.loads(out)
+ except ValueError:
+ # load raw ini
+ cp = configparser.ConfigParser()
+ try:
+ cp.readfp(StringIO(out))
+ except configparser.Error:
+ fact = "error loading fact - please check content"
+ module.warn(fact)
+ else:
+ fact = {}
+ for sect in cp.sections():
+ if sect not in fact:
+ fact[sect] = {}
+ for opt in cp.options(sect):
+ val = cp.get(sect, opt)
+ fact[sect][opt] = val
+
+ local[fact_base] = fact
+
+ local_facts['local'] = local
+ return local_facts
diff --git a/lib/ansible/module_utils/facts/system/lsb.py b/lib/ansible/module_utils/facts/system/lsb.py
new file mode 100644
index 00000000..596e7256
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/lsb.py
@@ -0,0 +1,106 @@
+# Collect facts related to LSB (Linux Standard Base)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.module_utils.facts.utils import get_file_lines
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class LSBFactCollector(BaseFactCollector):
+ name = 'lsb'
+ _fact_ids = set()
+ STRIP_QUOTES = r'\'\"\\'
+
+ def _lsb_release_bin(self, lsb_path, module):
+ lsb_facts = {}
+
+ if not lsb_path:
+ return lsb_facts
+
+ rc, out, err = module.run_command([lsb_path, "-a"], errors='surrogate_then_replace')
+ if rc != 0:
+ return lsb_facts
+
+ for line in out.splitlines():
+ if len(line) < 1 or ':' not in line:
+ continue
+ value = line.split(':', 1)[1].strip()
+
+ if 'LSB Version:' in line:
+ lsb_facts['release'] = value
+ elif 'Distributor ID:' in line:
+ lsb_facts['id'] = value
+ elif 'Description:' in line:
+ lsb_facts['description'] = value
+ elif 'Release:' in line:
+ lsb_facts['release'] = value
+ elif 'Codename:' in line:
+ lsb_facts['codename'] = value
+
+ return lsb_facts
+
+ def _lsb_release_file(self, etc_lsb_release_location):
+ lsb_facts = {}
+
+ if not os.path.exists(etc_lsb_release_location):
+ return lsb_facts
+
+ for line in get_file_lines(etc_lsb_release_location):
+ value = line.split('=', 1)[1].strip()
+
+ if 'DISTRIB_ID' in line:
+ lsb_facts['id'] = value
+ elif 'DISTRIB_RELEASE' in line:
+ lsb_facts['release'] = value
+ elif 'DISTRIB_DESCRIPTION' in line:
+ lsb_facts['description'] = value
+ elif 'DISTRIB_CODENAME' in line:
+ lsb_facts['codename'] = value
+
+ return lsb_facts
+
+ def collect(self, module=None, collected_facts=None):
+ facts_dict = {}
+ lsb_facts = {}
+
+ if not module:
+ return facts_dict
+
+ lsb_path = module.get_bin_path('lsb_release')
+
+ # try the 'lsb_release' script first
+ if lsb_path:
+ lsb_facts = self._lsb_release_bin(lsb_path,
+ module=module)
+
+ # no lsb_release, try looking in /etc/lsb-release
+ if not lsb_facts:
+ lsb_facts = self._lsb_release_file('/etc/lsb-release')
+
+ if lsb_facts and 'release' in lsb_facts:
+ lsb_facts['major_release'] = lsb_facts['release'].split('.')[0]
+
+ for k, v in lsb_facts.items():
+ if v:
+ lsb_facts[k] = v.strip(LSBFactCollector.STRIP_QUOTES)
+
+ facts_dict['lsb'] = lsb_facts
+ return facts_dict
diff --git a/lib/ansible/module_utils/facts/system/pkg_mgr.py b/lib/ansible/module_utils/facts/system/pkg_mgr.py
new file mode 100644
index 00000000..be9f029f
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/pkg_mgr.py
@@ -0,0 +1,141 @@
+# Collect facts related to the system package manager
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import subprocess
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+# A list of dicts. If there is a platform with more than one
+# package manager, put the preferred one last. If there is an
+# ansible module, use that as the value for the 'name' key.
+PKG_MGRS = [{'path': '/usr/bin/yum', 'name': 'yum'},
+ {'path': '/usr/bin/dnf', 'name': 'dnf'},
+ {'path': '/usr/bin/apt-get', 'name': 'apt'},
+ {'path': '/usr/bin/zypper', 'name': 'zypper'},
+ {'path': '/usr/sbin/urpmi', 'name': 'urpmi'},
+ {'path': '/usr/bin/pacman', 'name': 'pacman'},
+ {'path': '/bin/opkg', 'name': 'opkg'},
+ {'path': '/usr/pkg/bin/pkgin', 'name': 'pkgin'},
+ {'path': '/opt/local/bin/pkgin', 'name': 'pkgin'},
+ {'path': '/opt/tools/bin/pkgin', 'name': 'pkgin'},
+ {'path': '/opt/local/bin/port', 'name': 'macports'},
+ {'path': '/usr/local/bin/brew', 'name': 'homebrew'},
+ {'path': '/sbin/apk', 'name': 'apk'},
+ {'path': '/usr/sbin/pkg', 'name': 'pkgng'},
+ {'path': '/usr/sbin/swlist', 'name': 'swdepot'},
+ {'path': '/usr/bin/emerge', 'name': 'portage'},
+ {'path': '/usr/sbin/pkgadd', 'name': 'svr4pkg'},
+ {'path': '/usr/bin/pkg', 'name': 'pkg5'},
+ {'path': '/usr/bin/xbps-install', 'name': 'xbps'},
+ {'path': '/usr/local/sbin/pkg', 'name': 'pkgng'},
+ {'path': '/usr/bin/swupd', 'name': 'swupd'},
+ {'path': '/usr/sbin/sorcery', 'name': 'sorcery'},
+ {'path': '/usr/bin/rpm-ostree', 'name': 'atomic_container'},
+ {'path': '/usr/bin/installp', 'name': 'installp'},
+ {'path': '/QOpenSys/pkgs/bin/yum', 'name': 'yum'},
+ ]
+
+
+class OpenBSDPkgMgrFactCollector(BaseFactCollector):
+ name = 'pkg_mgr'
+ _fact_ids = set()
+ _platform = 'OpenBSD'
+
+ def collect(self, module=None, collected_facts=None):
+ facts_dict = {}
+
+ facts_dict['pkg_mgr'] = 'openbsd_pkg'
+ return facts_dict
+
+
+# the fact ends up being 'pkg_mgr' so stick with that naming/spelling
+class PkgMgrFactCollector(BaseFactCollector):
+ name = 'pkg_mgr'
+ _fact_ids = set()
+ _platform = 'Generic'
+ required_facts = set(['distribution'])
+
+ def _check_rh_versions(self, pkg_mgr_name, collected_facts):
+ if collected_facts['ansible_distribution'] == 'Fedora':
+ if os.path.exists('/run/ostree-booted'):
+ return "atomic_container"
+ try:
+ if int(collected_facts['ansible_distribution_major_version']) < 23:
+ for yum in [pkg_mgr for pkg_mgr in PKG_MGRS if pkg_mgr['name'] == 'yum']:
+ if os.path.exists(yum['path']):
+ pkg_mgr_name = 'yum'
+ break
+ else:
+ for dnf in [pkg_mgr for pkg_mgr in PKG_MGRS if pkg_mgr['name'] == 'dnf']:
+ if os.path.exists(dnf['path']):
+ pkg_mgr_name = 'dnf'
+ break
+ except ValueError:
+ # If there's some new magical Fedora version in the future,
+ # just default to dnf
+ pkg_mgr_name = 'dnf'
+ elif collected_facts['ansible_distribution'] == 'Amazon':
+ pkg_mgr_name = 'yum'
+ else:
+ # If it's not one of the above and it's Red Hat family of distros, assume
+ # RHEL or a clone. For versions of RHEL < 8 that Ansible supports, the
+ # vendor supported official package manager is 'yum' and in RHEL 8+
+ # (as far as we know at the time of this writing) it is 'dnf'.
+ # If anyone wants to force a non-official package manager then they
+ # can define a provider to either the package or yum action plugins.
+ if int(collected_facts['ansible_distribution_major_version']) < 8:
+ pkg_mgr_name = 'yum'
+ else:
+ pkg_mgr_name = 'dnf'
+ return pkg_mgr_name
+
+ def _check_apt_flavor(self, pkg_mgr_name):
+ # Check if '/usr/bin/apt' is APT-RPM or an ordinary (dpkg-based) APT.
+ # There's rpm package on Debian, so checking if /usr/bin/rpm exists
+ # is not enough. Instead ask RPM if /usr/bin/apt-get belongs to some
+ # RPM package.
+ rpm_query = '/usr/bin/rpm -q --whatprovides /usr/bin/apt-get'.split()
+ if os.path.exists('/usr/bin/rpm'):
+ with open(os.devnull, 'w') as null:
+ try:
+ subprocess.check_call(rpm_query, stdout=null, stderr=null)
+ pkg_mgr_name = 'apt_rpm'
+ except subprocess.CalledProcessError:
+ # No apt-get in RPM database. Looks like Debian/Ubuntu
+ # with rpm package installed
+ pkg_mgr_name = 'apt'
+ return pkg_mgr_name
+
+ def collect(self, module=None, collected_facts=None):
+ facts_dict = {}
+ collected_facts = collected_facts or {}
+
+ pkg_mgr_name = 'unknown'
+ for pkg in PKG_MGRS:
+ if os.path.exists(pkg['path']):
+ pkg_mgr_name = pkg['name']
+
+ # Handle distro family defaults when more than one package manager is
+ # installed or available to the distro, the ansible_fact entry should be
+ # the default package manager officially supported by the distro.
+ if collected_facts['ansible_os_family'] == "RedHat":
+ pkg_mgr_name = self._check_rh_versions(pkg_mgr_name, collected_facts)
+ elif collected_facts['ansible_os_family'] == 'Debian' and pkg_mgr_name != 'apt':
+ # It's possible to install yum, dnf, zypper, rpm, etc inside of
+ # Debian. Doing so does not mean the system wants to use them.
+ pkg_mgr_name = 'apt'
+ elif collected_facts['ansible_os_family'] == 'Altlinux':
+ if pkg_mgr_name == 'apt':
+ pkg_mgr_name = 'apt_rpm'
+
+ # Check if /usr/bin/apt-get is ordinary (dpkg-based) APT or APT-RPM
+ if pkg_mgr_name == 'apt':
+ pkg_mgr_name = self._check_apt_flavor(pkg_mgr_name)
+
+ facts_dict['pkg_mgr'] = pkg_mgr_name
+ return facts_dict
diff --git a/lib/ansible/module_utils/facts/system/platform.py b/lib/ansible/module_utils/facts/system/platform.py
new file mode 100644
index 00000000..beac7ccb
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/platform.py
@@ -0,0 +1,97 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import socket
+import platform
+
+from ansible.module_utils.facts.utils import get_file_content
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+# i86pc is a Solaris and derivatives-ism
+SOLARIS_I86_RE_PATTERN = r'i([3456]86|86pc)'
+solaris_i86_re = re.compile(SOLARIS_I86_RE_PATTERN)
+
+
+class PlatformFactCollector(BaseFactCollector):
+ name = 'platform'
+ _fact_ids = set(['system',
+ 'kernel',
+ 'kernel_version',
+ 'machine',
+ 'python_version',
+ 'architecture',
+ 'machine_id'])
+
+ def collect(self, module=None, collected_facts=None):
+ platform_facts = {}
+ # platform.system() can be Linux, Darwin, Java, or Windows
+ platform_facts['system'] = platform.system()
+ platform_facts['kernel'] = platform.release()
+ platform_facts['kernel_version'] = platform.version()
+ platform_facts['machine'] = platform.machine()
+
+ platform_facts['python_version'] = platform.python_version()
+
+ platform_facts['fqdn'] = socket.getfqdn()
+ platform_facts['hostname'] = platform.node().split('.')[0]
+ platform_facts['nodename'] = platform.node()
+
+ platform_facts['domain'] = '.'.join(platform_facts['fqdn'].split('.')[1:])
+
+ arch_bits = platform.architecture()[0]
+
+ platform_facts['userspace_bits'] = arch_bits.replace('bit', '')
+ if platform_facts['machine'] == 'x86_64':
+ platform_facts['architecture'] = platform_facts['machine']
+ if platform_facts['userspace_bits'] == '64':
+ platform_facts['userspace_architecture'] = 'x86_64'
+ elif platform_facts['userspace_bits'] == '32':
+ platform_facts['userspace_architecture'] = 'i386'
+ elif solaris_i86_re.search(platform_facts['machine']):
+ platform_facts['architecture'] = 'i386'
+ if platform_facts['userspace_bits'] == '64':
+ platform_facts['userspace_architecture'] = 'x86_64'
+ elif platform_facts['userspace_bits'] == '32':
+ platform_facts['userspace_architecture'] = 'i386'
+ else:
+ platform_facts['architecture'] = platform_facts['machine']
+
+ if platform_facts['system'] == 'AIX':
+ # Attempt to use getconf to figure out architecture
+ # fall back to bootinfo if needed
+ getconf_bin = module.get_bin_path('getconf')
+ if getconf_bin:
+ rc, out, err = module.run_command([getconf_bin, 'MACHINE_ARCHITECTURE'])
+ data = out.splitlines()
+ platform_facts['architecture'] = data[0]
+ else:
+ bootinfo_bin = module.get_bin_path('bootinfo')
+ rc, out, err = module.run_command([bootinfo_bin, '-p'])
+ data = out.splitlines()
+ platform_facts['architecture'] = data[0]
+ elif platform_facts['system'] == 'OpenBSD':
+ platform_facts['architecture'] = platform.uname()[5]
+
+ machine_id = get_file_content("/var/lib/dbus/machine-id") or get_file_content("/etc/machine-id")
+ if machine_id:
+ machine_id = machine_id.splitlines()[0]
+ platform_facts["machine_id"] = machine_id
+
+ return platform_facts
diff --git a/lib/ansible/module_utils/facts/system/python.py b/lib/ansible/module_utils/facts/system/python.py
new file mode 100644
index 00000000..172a0913
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/python.py
@@ -0,0 +1,60 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+try:
+ # Check if we have SSLContext support
+ from ssl import create_default_context, SSLContext
+ del create_default_context
+ del SSLContext
+ HAS_SSLCONTEXT = True
+except ImportError:
+ HAS_SSLCONTEXT = False
+
+
+class PythonFactCollector(BaseFactCollector):
+ name = 'python'
+ _fact_ids = set()
+
+ def collect(self, module=None, collected_facts=None):
+ python_facts = {}
+ python_facts['python'] = {
+ 'version': {
+ 'major': sys.version_info[0],
+ 'minor': sys.version_info[1],
+ 'micro': sys.version_info[2],
+ 'releaselevel': sys.version_info[3],
+ 'serial': sys.version_info[4]
+ },
+ 'version_info': list(sys.version_info),
+ 'executable': sys.executable,
+ 'has_sslcontext': HAS_SSLCONTEXT
+ }
+
+ try:
+ python_facts['python']['type'] = sys.subversion[0]
+ except AttributeError:
+ try:
+ python_facts['python']['type'] = sys.implementation.name
+ except AttributeError:
+ python_facts['python']['type'] = None
+
+ return python_facts
diff --git a/lib/ansible/module_utils/facts/system/selinux.py b/lib/ansible/module_utils/facts/system/selinux.py
new file mode 100644
index 00000000..c3f88fa9
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/selinux.py
@@ -0,0 +1,91 @@
+# Collect facts related to selinux
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+try:
+ import selinux
+ HAVE_SELINUX = True
+except ImportError:
+ HAVE_SELINUX = False
+
+SELINUX_MODE_DICT = {
+ 1: 'enforcing',
+ 0: 'permissive',
+ -1: 'disabled'
+}
+
+
+class SelinuxFactCollector(BaseFactCollector):
+ name = 'selinux'
+ _fact_ids = set()
+
+ def collect(self, module=None, collected_facts=None):
+ facts_dict = {}
+ selinux_facts = {}
+
+ # If selinux library is missing, only set the status and selinux_python_present since
+ # there is no way to tell if SELinux is enabled or disabled on the system
+ # without the library.
+ if not HAVE_SELINUX:
+ selinux_facts['status'] = 'Missing selinux Python library'
+ facts_dict['selinux'] = selinux_facts
+ facts_dict['selinux_python_present'] = False
+ return facts_dict
+
+ # Set a boolean for testing whether the Python library is present
+ facts_dict['selinux_python_present'] = True
+
+ if not selinux.is_selinux_enabled():
+ selinux_facts['status'] = 'disabled'
+ else:
+ selinux_facts['status'] = 'enabled'
+
+ try:
+ selinux_facts['policyvers'] = selinux.security_policyvers()
+ except (AttributeError, OSError):
+ selinux_facts['policyvers'] = 'unknown'
+
+ try:
+ (rc, configmode) = selinux.selinux_getenforcemode()
+ if rc == 0:
+ selinux_facts['config_mode'] = SELINUX_MODE_DICT.get(configmode, 'unknown')
+ else:
+ selinux_facts['config_mode'] = 'unknown'
+ except (AttributeError, OSError):
+ selinux_facts['config_mode'] = 'unknown'
+
+ try:
+ mode = selinux.security_getenforce()
+ selinux_facts['mode'] = SELINUX_MODE_DICT.get(mode, 'unknown')
+ except (AttributeError, OSError):
+ selinux_facts['mode'] = 'unknown'
+
+ try:
+ (rc, policytype) = selinux.selinux_getpolicytype()
+ if rc == 0:
+ selinux_facts['type'] = policytype
+ else:
+ selinux_facts['type'] = 'unknown'
+ except (AttributeError, OSError):
+ selinux_facts['type'] = 'unknown'
+
+ facts_dict['selinux'] = selinux_facts
+ return facts_dict
diff --git a/lib/ansible/module_utils/facts/system/service_mgr.py b/lib/ansible/module_utils/facts/system/service_mgr.py
new file mode 100644
index 00000000..dc8df68e
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/service_mgr.py
@@ -0,0 +1,152 @@
+# Collect facts related to system service manager and init.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import platform
+import re
+
+from ansible.module_utils._text import to_native
+
+from ansible.module_utils.facts.utils import get_file_content
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+# The distutils module is not shipped with SUNWPython on Solaris.
+# It's in the SUNWPython-devel package which also contains development files
+# that don't belong on production boxes. Since our Solaris code doesn't
+# depend on LooseVersion, do not import it on Solaris.
+if platform.system() != 'SunOS':
+ from distutils.version import LooseVersion
+
+
+class ServiceMgrFactCollector(BaseFactCollector):
+ name = 'service_mgr'
+ _fact_ids = set()
+ required_facts = set(['platform', 'distribution'])
+
+ @staticmethod
+ def is_systemd_managed(module):
+ # tools must be installed
+ if module.get_bin_path('systemctl'):
+
+ # this should show if systemd is the boot init system, if checking init faild to mark as systemd
+ # these mirror systemd's own sd_boot test http://www.freedesktop.org/software/systemd/man/sd_booted.html
+ for canary in ["/run/systemd/system/", "/dev/.run/systemd/", "/dev/.systemd/"]:
+ if os.path.exists(canary):
+ return True
+ return False
+
+ @staticmethod
+ def is_systemd_managed_offline(module):
+ # tools must be installed
+ if module.get_bin_path('systemctl'):
+ # check if /sbin/init is a symlink to systemd
+ # on SUSE, /sbin/init may be missing if systemd-sysvinit package is not installed.
+ if os.path.islink('/sbin/init') and os.path.basename(os.readlink('/sbin/init')) == 'systemd':
+ return True
+ return False
+
+ def collect(self, module=None, collected_facts=None):
+ facts_dict = {}
+
+ if not module:
+ return facts_dict
+
+ collected_facts = collected_facts or {}
+ service_mgr_name = None
+
+ # TODO: detect more custom init setups like bootscripts, dmd, s6, Epoch, etc
+ # also other OSs other than linux might need to check across several possible candidates
+
+ # Mapping of proc_1 values to more useful names
+ proc_1_map = {
+ 'procd': 'openwrt_init',
+ 'runit-init': 'runit',
+ 'svscan': 'svc',
+ 'openrc-init': 'openrc',
+ }
+
+ # try various forms of querying pid 1
+ proc_1 = get_file_content('/proc/1/comm')
+ if proc_1 is None:
+ # FIXME: return code isnt checked
+ # FIXME: if stdout is empty string, odd things
+ # FIXME: other code seems to think we could get proc_1 == None past this point
+ rc, proc_1, err = module.run_command("ps -p 1 -o comm|tail -n 1", use_unsafe_shell=True)
+ # If the output of the command starts with what looks like a PID, then the 'ps' command
+ # probably didn't work the way we wanted, probably because it's busybox
+ if re.match(r' *[0-9]+ ', proc_1):
+ proc_1 = None
+
+ # The ps command above may return "COMMAND" if the user cannot read /proc, e.g. with grsecurity
+ if proc_1 == "COMMAND\n":
+ proc_1 = None
+
+ # FIXME: empty string proc_1 staus empty string
+ if proc_1 is not None:
+ proc_1 = os.path.basename(proc_1)
+ proc_1 = to_native(proc_1)
+ proc_1 = proc_1.strip()
+
+ if proc_1 is not None and (proc_1 == 'init' or proc_1.endswith('sh')):
+ # many systems return init, so this cannot be trusted, if it ends in 'sh' it probalby is a shell in a container
+ proc_1 = None
+
+ # if not init/None it should be an identifiable or custom init, so we are done!
+ if proc_1 is not None:
+ # Lookup proc_1 value in map and use proc_1 value itself if no match
+ # FIXME: empty string still falls through
+ service_mgr_name = proc_1_map.get(proc_1, proc_1)
+
+ # FIXME: replace with a system->service_mgr_name map?
+ # start with the easy ones
+ elif collected_facts.get('ansible_distribution', None) == 'MacOSX':
+ # FIXME: find way to query executable, version matching is not ideal
+ if LooseVersion(platform.mac_ver()[0]) >= LooseVersion('10.4'):
+ service_mgr_name = 'launchd'
+ else:
+ service_mgr_name = 'systemstarter'
+ elif 'BSD' in collected_facts.get('ansible_system', '') or collected_facts.get('ansible_system') in ['Bitrig', 'DragonFly']:
+ # FIXME: we might want to break out to individual BSDs or 'rc'
+ service_mgr_name = 'bsdinit'
+ elif collected_facts.get('ansible_system') == 'AIX':
+ service_mgr_name = 'src'
+ elif collected_facts.get('ansible_system') == 'SunOS':
+ service_mgr_name = 'smf'
+ elif collected_facts.get('ansible_distribution') == 'OpenWrt':
+ service_mgr_name = 'openwrt_init'
+ elif collected_facts.get('ansible_system') == 'Linux':
+ # FIXME: mv is_systemd_managed
+ if self.is_systemd_managed(module=module):
+ service_mgr_name = 'systemd'
+ elif module.get_bin_path('initctl') and os.path.exists("/etc/init/"):
+ service_mgr_name = 'upstart'
+ elif os.path.exists('/sbin/openrc'):
+ service_mgr_name = 'openrc'
+ elif self.is_systemd_managed_offline(module=module):
+ service_mgr_name = 'systemd'
+ elif os.path.exists('/etc/init.d/'):
+ service_mgr_name = 'sysvinit'
+
+ if not service_mgr_name:
+ # if we cannot detect, fallback to generic 'service'
+ service_mgr_name = 'service'
+
+ facts_dict['service_mgr'] = service_mgr_name
+ return facts_dict
diff --git a/lib/ansible/module_utils/facts/system/ssh_pub_keys.py b/lib/ansible/module_utils/facts/system/ssh_pub_keys.py
new file mode 100644
index 00000000..7c9bcb37
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/ssh_pub_keys.py
@@ -0,0 +1,54 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.facts.utils import get_file_content
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class SshPubKeyFactCollector(BaseFactCollector):
+ name = 'ssh_pub_keys'
+ _fact_ids = set(['ssh_host_pub_keys',
+ 'ssh_host_key_dsa_public',
+ 'ssh_host_key_rsa_public',
+ 'ssh_host_key_ecdsa_public',
+ 'ssh_host_key_ed25519_public'])
+
+ def collect(self, module=None, collected_facts=None):
+ ssh_pub_key_facts = {}
+ algos = ('dsa', 'rsa', 'ecdsa', 'ed25519')
+
+ # list of directories to check for ssh keys
+ # used in the order listed here, the first one with keys is used
+ keydirs = ['/etc/ssh', '/etc/openssh', '/etc']
+
+ for keydir in keydirs:
+ for algo in algos:
+ factname = 'ssh_host_key_%s_public' % algo
+ if factname in ssh_pub_key_facts:
+ # a previous keydir was already successful, stop looking
+ # for keys
+ return ssh_pub_key_facts
+ key_filename = '%s/ssh_host_%s_key.pub' % (keydir, algo)
+ keydata = get_file_content(key_filename)
+ if keydata is not None:
+ (keytype, key) = keydata.split()[0:2]
+ ssh_pub_key_facts[factname] = key
+ ssh_pub_key_facts[factname + '_keytype'] = keytype
+
+ return ssh_pub_key_facts
diff --git a/lib/ansible/module_utils/facts/system/user.py b/lib/ansible/module_utils/facts/system/user.py
new file mode 100644
index 00000000..745b5db3
--- /dev/null
+++ b/lib/ansible/module_utils/facts/system/user.py
@@ -0,0 +1,50 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import getpass
+import os
+import pwd
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class UserFactCollector(BaseFactCollector):
+ name = 'user'
+ _fact_ids = set(['user_id', 'user_uid', 'user_gid',
+ 'user_gecos', 'user_dir', 'user_shell',
+ 'real_user_id', 'effective_user_id',
+ 'effective_group_ids'])
+
+ def collect(self, module=None, collected_facts=None):
+ user_facts = {}
+
+ user_facts['user_id'] = getpass.getuser()
+
+ pwent = pwd.getpwnam(getpass.getuser())
+
+ user_facts['user_uid'] = pwent.pw_uid
+ user_facts['user_gid'] = pwent.pw_gid
+ user_facts['user_gecos'] = pwent.pw_gecos
+ user_facts['user_dir'] = pwent.pw_dir
+ user_facts['user_shell'] = pwent.pw_shell
+ user_facts['real_user_id'] = os.getuid()
+ user_facts['effective_user_id'] = os.geteuid()
+ user_facts['real_group_id'] = os.getgid()
+ user_facts['effective_group_id'] = os.getgid()
+
+ return user_facts
diff --git a/lib/ansible/module_utils/facts/timeout.py b/lib/ansible/module_utils/facts/timeout.py
new file mode 100644
index 00000000..934e7aff
--- /dev/null
+++ b/lib/ansible/module_utils/facts/timeout.py
@@ -0,0 +1,68 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import multiprocessing
+import multiprocessing.pool as mp
+
+# timeout function to make sure some fact gathering
+# steps do not exceed a time limit
+
+GATHER_TIMEOUT = None
+DEFAULT_GATHER_TIMEOUT = 10
+
+
+class TimeoutError(Exception):
+ pass
+
+
+def timeout(seconds=None, error_message="Timer expired"):
+ """
+ Timeout decorator to expire after a set number of seconds. This raises an
+ ansible.module_utils.facts.TimeoutError if the timeout is hit before the
+ function completes.
+ """
+ def decorator(func):
+ def wrapper(*args, **kwargs):
+ timeout_value = seconds
+ if timeout_value is None:
+ timeout_value = globals().get('GATHER_TIMEOUT') or DEFAULT_GATHER_TIMEOUT
+
+ pool = mp.ThreadPool(processes=1)
+ res = pool.apply_async(func, args, kwargs)
+ pool.close()
+ try:
+ return res.get(timeout_value)
+ except multiprocessing.TimeoutError:
+ # This is an ansible.module_utils.common.facts.timeout.TimeoutError
+ raise TimeoutError('Timer expired after %s seconds' % timeout_value)
+
+ return wrapper
+
+ # If we were called as @timeout, then the first parameter will be the
+ # function we are to wrap instead of the number of seconds. Detect this
+ # and correct it by setting seconds to our default value and return the
+ # inner decorator function manually wrapped around the function
+ if callable(seconds):
+ func = seconds
+ seconds = None
+ return decorator(func)
+
+ # If we were called as @timeout([...]) then python itself will take
+ # care of wrapping the inner decorator around the function
+
+ return decorator
diff --git a/lib/ansible/module_utils/facts/utils.py b/lib/ansible/module_utils/facts/utils.py
new file mode 100644
index 00000000..9fd00afd
--- /dev/null
+++ b/lib/ansible/module_utils/facts/utils.py
@@ -0,0 +1,79 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+
+def get_file_content(path, default=None, strip=True):
+ data = default
+ if os.path.exists(path) and os.access(path, os.R_OK):
+ try:
+ try:
+ datafile = open(path)
+ data = datafile.read()
+ if strip:
+ data = data.strip()
+ if len(data) == 0:
+ data = default
+ finally:
+ datafile.close()
+ except Exception:
+ # ignore errors as some jails/containers might have readable permissions but not allow reads to proc
+ # done in 2 blocks for 2.4 compat
+ pass
+ return data
+
+
+def get_file_lines(path, strip=True, line_sep=None):
+ '''get list of lines from file'''
+ data = get_file_content(path, strip=strip)
+ if data:
+ if line_sep is None:
+ ret = data.splitlines()
+ else:
+ if len(line_sep) == 1:
+ ret = data.rstrip(line_sep).split(line_sep)
+ else:
+ ret = data.split(line_sep)
+ else:
+ ret = []
+ return ret
+
+
+def get_mount_size(mountpoint):
+ mount_size = {}
+
+ try:
+ statvfs_result = os.statvfs(mountpoint)
+ mount_size['size_total'] = statvfs_result.f_frsize * statvfs_result.f_blocks
+ mount_size['size_available'] = statvfs_result.f_frsize * (statvfs_result.f_bavail)
+
+ # Block total/available/used
+ mount_size['block_size'] = statvfs_result.f_bsize
+ mount_size['block_total'] = statvfs_result.f_blocks
+ mount_size['block_available'] = statvfs_result.f_bavail
+ mount_size['block_used'] = mount_size['block_total'] - mount_size['block_available']
+
+ # Inode total/available/used
+ mount_size['inode_total'] = statvfs_result.f_files
+ mount_size['inode_available'] = statvfs_result.f_favail
+ mount_size['inode_used'] = mount_size['inode_total'] - mount_size['inode_available']
+ except OSError:
+ pass
+
+ return mount_size
diff --git a/lib/ansible/module_utils/facts/virtual/__init__.py b/lib/ansible/module_utils/facts/virtual/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/lib/ansible/module_utils/facts/virtual/__init__.py
diff --git a/lib/ansible/module_utils/facts/virtual/base.py b/lib/ansible/module_utils/facts/virtual/base.py
new file mode 100644
index 00000000..02da049e
--- /dev/null
+++ b/lib/ansible/module_utils/facts/virtual/base.py
@@ -0,0 +1,70 @@
+# base classes for virtualization facts
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.facts.collector import BaseFactCollector
+
+
+class Virtual:
+ """
+ This is a generic Virtual subclass of Facts. This should be further
+ subclassed to implement per platform. If you subclass this,
+ you should define:
+ - virtualization_type
+ - virtualization_role
+ - container (e.g. solaris zones, freebsd jails, linux containers)
+
+ All subclasses MUST define platform.
+ """
+ platform = 'Generic'
+
+ # FIXME: remove load_on_init if we can
+ def __init__(self, module, load_on_init=False):
+ self.module = module
+
+ # FIXME: just here for existing tests cases till they are updated
+ def populate(self, collected_facts=None):
+ virtual_facts = self.get_virtual_facts()
+
+ return virtual_facts
+
+ def get_virtual_facts(self):
+ virtual_facts = {'virtualization_type': '',
+ 'virtualization_role': ''}
+ return virtual_facts
+
+
+class VirtualCollector(BaseFactCollector):
+ name = 'virtual'
+ _fact_class = Virtual
+ _fact_ids = set(['virtualization_type',
+ 'virtualization_role'])
+
+ def collect(self, module=None, collected_facts=None):
+ collected_facts = collected_facts or {}
+ if not module:
+ return {}
+
+ # Network munges cached_facts by side effect, so give it a copy
+ facts_obj = self._fact_class(module)
+
+ facts_dict = facts_obj.populate(collected_facts=collected_facts)
+
+ return facts_dict
diff --git a/lib/ansible/module_utils/facts/virtual/dragonfly.py b/lib/ansible/module_utils/facts/virtual/dragonfly.py
new file mode 100644
index 00000000..b176f8bf
--- /dev/null
+++ b/lib/ansible/module_utils/facts/virtual/dragonfly.py
@@ -0,0 +1,25 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.facts.virtual.freebsd import FreeBSDVirtual, VirtualCollector
+
+
+class DragonFlyVirtualCollector(VirtualCollector):
+ # Note the _fact_class impl is actually the FreeBSDVirtual impl
+ _fact_class = FreeBSDVirtual
+ _platform = 'DragonFly'
diff --git a/lib/ansible/module_utils/facts/virtual/freebsd.py b/lib/ansible/module_utils/facts/virtual/freebsd.py
new file mode 100644
index 00000000..cfaf880e
--- /dev/null
+++ b/lib/ansible/module_utils/facts/virtual/freebsd.py
@@ -0,0 +1,57 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.module_utils.facts.virtual.base import Virtual, VirtualCollector
+from ansible.module_utils.facts.virtual.sysctl import VirtualSysctlDetectionMixin
+
+
+class FreeBSDVirtual(Virtual, VirtualSysctlDetectionMixin):
+ """
+ This is a FreeBSD-specific subclass of Virtual. It defines
+ - virtualization_type
+ - virtualization_role
+ """
+ platform = 'FreeBSD'
+
+ def get_virtual_facts(self):
+ virtual_facts = {}
+ # Set empty values as default
+ virtual_facts['virtualization_type'] = ''
+ virtual_facts['virtualization_role'] = ''
+
+ if os.path.exists('/dev/xen/xenstore'):
+ virtual_facts['virtualization_type'] = 'xen'
+ virtual_facts['virtualization_role'] = 'guest'
+
+ if virtual_facts['virtualization_type'] == '':
+ virtual_product_facts = self.detect_virt_product('kern.vm_guest') or self.detect_virt_product(
+ 'hw.hv_vendor') or self.detect_virt_product('security.jail.jailed')
+ virtual_facts.update(virtual_product_facts)
+
+ if virtual_facts['virtualization_type'] == '':
+ virtual_vendor_facts = self.detect_virt_vendor('hw.model')
+ virtual_facts.update(virtual_vendor_facts)
+
+ return virtual_facts
+
+
+class FreeBSDVirtualCollector(VirtualCollector):
+ _fact_class = FreeBSDVirtual
+ _platform = 'FreeBSD'
diff --git a/lib/ansible/module_utils/facts/virtual/hpux.py b/lib/ansible/module_utils/facts/virtual/hpux.py
new file mode 100644
index 00000000..94ea6a1a
--- /dev/null
+++ b/lib/ansible/module_utils/facts/virtual/hpux.py
@@ -0,0 +1,62 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+
+from ansible.module_utils.facts.virtual.base import Virtual, VirtualCollector
+
+
+class HPUXVirtual(Virtual):
+ """
+ This is a HP-UX specific subclass of Virtual. It defines
+ - virtualization_type
+ - virtualization_role
+ """
+ platform = 'HP-UX'
+
+ def get_virtual_facts(self):
+ virtual_facts = {}
+ if os.path.exists('/usr/sbin/vecheck'):
+ rc, out, err = self.module.run_command("/usr/sbin/vecheck")
+ if rc == 0:
+ virtual_facts['virtualization_type'] = 'guest'
+ virtual_facts['virtualization_role'] = 'HP vPar'
+ if os.path.exists('/opt/hpvm/bin/hpvminfo'):
+ rc, out, err = self.module.run_command("/opt/hpvm/bin/hpvminfo")
+ if rc == 0 and re.match('.*Running.*HPVM vPar.*', out):
+ virtual_facts['virtualization_type'] = 'guest'
+ virtual_facts['virtualization_role'] = 'HPVM vPar'
+ elif rc == 0 and re.match('.*Running.*HPVM guest.*', out):
+ virtual_facts['virtualization_type'] = 'guest'
+ virtual_facts['virtualization_role'] = 'HPVM IVM'
+ elif rc == 0 and re.match('.*Running.*HPVM host.*', out):
+ virtual_facts['virtualization_type'] = 'host'
+ virtual_facts['virtualization_role'] = 'HPVM'
+ if os.path.exists('/usr/sbin/parstatus'):
+ rc, out, err = self.module.run_command("/usr/sbin/parstatus")
+ if rc == 0:
+ virtual_facts['virtualization_type'] = 'guest'
+ virtual_facts['virtualization_role'] = 'HP nPar'
+
+ return virtual_facts
+
+
+class HPUXVirtualCollector(VirtualCollector):
+ _fact_class = HPUXVirtual
+ _platform = 'HP-UX'
diff --git a/lib/ansible/module_utils/facts/virtual/linux.py b/lib/ansible/module_utils/facts/virtual/linux.py
new file mode 100644
index 00000000..e133df42
--- /dev/null
+++ b/lib/ansible/module_utils/facts/virtual/linux.py
@@ -0,0 +1,251 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import glob
+import os
+import re
+
+from ansible.module_utils.facts.virtual.base import Virtual, VirtualCollector
+from ansible.module_utils.facts.utils import get_file_content, get_file_lines
+
+
+class LinuxVirtual(Virtual):
+ """
+ This is a Linux-specific subclass of Virtual. It defines
+ - virtualization_type
+ - virtualization_role
+ """
+ platform = 'Linux'
+
+ # For more information, check: http://people.redhat.com/~rjones/virt-what/
+ def get_virtual_facts(self):
+ virtual_facts = {}
+ # lxc/docker
+ if os.path.exists('/proc/1/cgroup'):
+ for line in get_file_lines('/proc/1/cgroup'):
+ if re.search(r'/docker(/|-[0-9a-f]+\.scope)', line):
+ virtual_facts['virtualization_type'] = 'docker'
+ virtual_facts['virtualization_role'] = 'guest'
+ return virtual_facts
+ if re.search('/lxc/', line) or re.search('/machine.slice/machine-lxc', line):
+ virtual_facts['virtualization_type'] = 'lxc'
+ virtual_facts['virtualization_role'] = 'guest'
+ return virtual_facts
+
+ # lxc does not always appear in cgroups anymore but sets 'container=lxc' environment var, requires root privs
+ if os.path.exists('/proc/1/environ'):
+ for line in get_file_lines('/proc/1/environ', line_sep='\x00'):
+ if re.search('container=lxc', line):
+ virtual_facts['virtualization_type'] = 'lxc'
+ virtual_facts['virtualization_role'] = 'guest'
+ return virtual_facts
+ if re.search('container=podman', line):
+ virtual_facts['virtualization_type'] = 'podman'
+ virtual_facts['virtualization_role'] = 'guest'
+ return virtual_facts
+ if re.search('^container=.', line):
+ virtual_facts['virtualization_type'] = 'container'
+ virtual_facts['virtualization_role'] = 'guest'
+ return virtual_facts
+
+ if os.path.exists('/proc/vz') and not os.path.exists('/proc/lve'):
+ virtual_facts['virtualization_type'] = 'openvz'
+ if os.path.exists('/proc/bc'):
+ virtual_facts['virtualization_role'] = 'host'
+ else:
+ virtual_facts['virtualization_role'] = 'guest'
+ return virtual_facts
+
+ systemd_container = get_file_content('/run/systemd/container')
+ if systemd_container:
+ virtual_facts['virtualization_type'] = systemd_container
+ virtual_facts['virtualization_role'] = 'guest'
+ return virtual_facts
+
+ if os.path.exists("/proc/xen"):
+ virtual_facts['virtualization_type'] = 'xen'
+ virtual_facts['virtualization_role'] = 'guest'
+ try:
+ for line in get_file_lines('/proc/xen/capabilities'):
+ if "control_d" in line:
+ virtual_facts['virtualization_role'] = 'host'
+ except IOError:
+ pass
+ return virtual_facts
+
+ # assume guest for this block
+ virtual_facts['virtualization_role'] = 'guest'
+
+ product_name = get_file_content('/sys/devices/virtual/dmi/id/product_name')
+
+ if product_name in ('KVM', 'KVM Server', 'Bochs', 'AHV'):
+ virtual_facts['virtualization_type'] = 'kvm'
+ return virtual_facts
+
+ if product_name == 'RHEV Hypervisor':
+ virtual_facts['virtualization_type'] = 'RHEV'
+ return virtual_facts
+
+ if product_name in ('VMware Virtual Platform', 'VMware7,1'):
+ virtual_facts['virtualization_type'] = 'VMware'
+ return virtual_facts
+
+ if product_name in ('OpenStack Compute', 'OpenStack Nova'):
+ virtual_facts['virtualization_type'] = 'openstack'
+ return virtual_facts
+
+ bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor')
+
+ if bios_vendor == 'Xen':
+ virtual_facts['virtualization_type'] = 'xen'
+ return virtual_facts
+
+ if bios_vendor == 'innotek GmbH':
+ virtual_facts['virtualization_type'] = 'virtualbox'
+ return virtual_facts
+
+ if bios_vendor in ('Amazon EC2', 'DigitalOcean', 'Hetzner'):
+ virtual_facts['virtualization_type'] = 'kvm'
+ return virtual_facts
+
+ sys_vendor = get_file_content('/sys/devices/virtual/dmi/id/sys_vendor')
+
+ KVM_SYS_VENDORS = ('QEMU', 'oVirt', 'Amazon EC2', 'DigitalOcean', 'Google', 'Scaleway', 'Nutanix')
+ if sys_vendor in KVM_SYS_VENDORS:
+ virtual_facts['virtualization_type'] = 'kvm'
+ return virtual_facts
+
+ # FIXME: This does also match hyperv
+ if sys_vendor == 'Microsoft Corporation':
+ virtual_facts['virtualization_type'] = 'VirtualPC'
+ return virtual_facts
+
+ if sys_vendor == 'Parallels Software International Inc.':
+ virtual_facts['virtualization_type'] = 'parallels'
+ return virtual_facts
+
+ if sys_vendor == 'OpenStack Foundation':
+ virtual_facts['virtualization_type'] = 'openstack'
+ return virtual_facts
+
+ # unassume guest
+ del virtual_facts['virtualization_role']
+
+ if os.path.exists('/proc/self/status'):
+ for line in get_file_lines('/proc/self/status'):
+ if re.match(r'^VxID:\s+\d+', line):
+ virtual_facts['virtualization_type'] = 'linux_vserver'
+ if re.match(r'^VxID:\s+0', line):
+ virtual_facts['virtualization_role'] = 'host'
+ else:
+ virtual_facts['virtualization_role'] = 'guest'
+ return virtual_facts
+
+ if os.path.exists('/proc/cpuinfo'):
+ for line in get_file_lines('/proc/cpuinfo'):
+ if re.match('^model name.*QEMU Virtual CPU', line):
+ virtual_facts['virtualization_type'] = 'kvm'
+ elif re.match('^vendor_id.*User Mode Linux', line):
+ virtual_facts['virtualization_type'] = 'uml'
+ elif re.match('^model name.*UML', line):
+ virtual_facts['virtualization_type'] = 'uml'
+ elif re.match('^machine.*CHRP IBM pSeries .emulated by qemu.', line):
+ virtual_facts['virtualization_type'] = 'kvm'
+ elif re.match('^vendor_id.*PowerVM Lx86', line):
+ virtual_facts['virtualization_type'] = 'powervm_lx86'
+ elif re.match('^vendor_id.*IBM/S390', line):
+ virtual_facts['virtualization_type'] = 'PR/SM'
+ lscpu = self.module.get_bin_path('lscpu')
+ if lscpu:
+ rc, out, err = self.module.run_command(["lscpu"])
+ if rc == 0:
+ for line in out.splitlines():
+ data = line.split(":", 1)
+ key = data[0].strip()
+ if key == 'Hypervisor':
+ virtual_facts['virtualization_type'] = data[1].strip()
+ else:
+ virtual_facts['virtualization_type'] = 'ibm_systemz'
+ else:
+ continue
+ if virtual_facts['virtualization_type'] == 'PR/SM':
+ virtual_facts['virtualization_role'] = 'LPAR'
+ else:
+ virtual_facts['virtualization_role'] = 'guest'
+ return virtual_facts
+
+ # Beware that we can have both kvm and virtualbox running on a single system
+ if os.path.exists("/proc/modules") and os.access('/proc/modules', os.R_OK):
+ modules = []
+ for line in get_file_lines("/proc/modules"):
+ data = line.split(" ", 1)
+ modules.append(data[0])
+
+ if 'kvm' in modules:
+ virtual_facts['virtualization_type'] = 'kvm'
+ virtual_facts['virtualization_role'] = 'host'
+
+ if os.path.isdir('/rhev/'):
+ # Check whether this is a RHEV hypervisor (is vdsm running ?)
+ for f in glob.glob('/proc/[0-9]*/comm'):
+ try:
+ with open(f) as virt_fh:
+ comm_content = virt_fh.read().rstrip()
+ if comm_content == 'vdsm':
+ virtual_facts['virtualization_type'] = 'RHEV'
+ break
+ except Exception:
+ pass
+
+ return virtual_facts
+
+ if 'vboxdrv' in modules:
+ virtual_facts['virtualization_type'] = 'virtualbox'
+ virtual_facts['virtualization_role'] = 'host'
+ return virtual_facts
+
+ if 'virtio' in modules:
+ virtual_facts['virtualization_type'] = 'kvm'
+ virtual_facts['virtualization_role'] = 'guest'
+ return virtual_facts
+
+ # In older Linux Kernel versions, /sys filesystem is not available
+ # dmidecode is the safest option to parse virtualization related values
+ dmi_bin = self.module.get_bin_path('dmidecode')
+ # We still want to continue even if dmidecode is not available
+ if dmi_bin is not None:
+ (rc, out, err) = self.module.run_command('%s -s system-product-name' % dmi_bin)
+ if rc == 0:
+ # Strip out commented lines (specific dmidecode output)
+ vendor_name = ''.join([line.strip() for line in out.splitlines() if not line.startswith('#')])
+ if vendor_name.startswith('VMware'):
+ virtual_facts['virtualization_type'] = 'VMware'
+ virtual_facts['virtualization_role'] = 'guest'
+ return virtual_facts
+
+ # If none of the above matches, return 'NA' for virtualization_type
+ # and virtualization_role. This allows for proper grouping.
+ virtual_facts['virtualization_type'] = 'NA'
+ virtual_facts['virtualization_role'] = 'NA'
+
+ return virtual_facts
+
+
+class LinuxVirtualCollector(VirtualCollector):
+ _fact_class = LinuxVirtual
+ _platform = 'Linux'
diff --git a/lib/ansible/module_utils/facts/virtual/netbsd.py b/lib/ansible/module_utils/facts/virtual/netbsd.py
new file mode 100644
index 00000000..514ef859
--- /dev/null
+++ b/lib/ansible/module_utils/facts/virtual/netbsd.py
@@ -0,0 +1,50 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.module_utils.facts.virtual.base import Virtual, VirtualCollector
+from ansible.module_utils.facts.virtual.sysctl import VirtualSysctlDetectionMixin
+
+
+class NetBSDVirtual(Virtual, VirtualSysctlDetectionMixin):
+ platform = 'NetBSD'
+
+ def get_virtual_facts(self):
+ virtual_facts = {}
+ # Set empty values as default
+ virtual_facts['virtualization_type'] = ''
+ virtual_facts['virtualization_role'] = ''
+
+ virtual_product_facts = self.detect_virt_product('machdep.dmi.system-product')
+ virtual_facts.update(virtual_product_facts)
+
+ if virtual_facts['virtualization_type'] == '':
+ virtual_vendor_facts = self.detect_virt_vendor('machdep.dmi.system-vendor')
+ virtual_facts.update(virtual_vendor_facts)
+
+ if os.path.exists('/dev/xencons'):
+ virtual_facts['virtualization_type'] = 'xen'
+ virtual_facts['virtualization_role'] = 'guest'
+
+ return virtual_facts
+
+
+class NetBSDVirtualCollector(VirtualCollector):
+ _fact_class = NetBSDVirtual
+ _platform = 'NetBSD'
diff --git a/lib/ansible/module_utils/facts/virtual/openbsd.py b/lib/ansible/module_utils/facts/virtual/openbsd.py
new file mode 100644
index 00000000..42daa337
--- /dev/null
+++ b/lib/ansible/module_utils/facts/virtual/openbsd.py
@@ -0,0 +1,64 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from ansible.module_utils.facts.virtual.base import Virtual, VirtualCollector
+from ansible.module_utils.facts.virtual.sysctl import VirtualSysctlDetectionMixin
+
+from ansible.module_utils.facts.utils import get_file_content
+
+
+class OpenBSDVirtual(Virtual, VirtualSysctlDetectionMixin):
+ """
+ This is a OpenBSD-specific subclass of Virtual. It defines
+ - virtualization_type
+ - virtualization_role
+ """
+ platform = 'OpenBSD'
+ DMESG_BOOT = '/var/run/dmesg.boot'
+
+ def get_virtual_facts(self):
+ virtual_facts = {}
+
+ # Set empty values as default
+ virtual_facts['virtualization_type'] = ''
+ virtual_facts['virtualization_role'] = ''
+
+ virtual_product_facts = self.detect_virt_product('hw.product')
+ virtual_facts.update(virtual_product_facts)
+
+ if virtual_facts['virtualization_type'] == '':
+ virtual_vendor_facts = self.detect_virt_vendor('hw.vendor')
+ virtual_facts.update(virtual_vendor_facts)
+
+ # Check the dmesg if vmm(4) attached, indicating the host is
+ # capable of virtualization.
+ dmesg_boot = get_file_content(OpenBSDVirtual.DMESG_BOOT)
+ for line in dmesg_boot.splitlines():
+ match = re.match('^vmm0 at mainbus0: (SVM/RVI|VMX/EPT)$', line)
+ if match:
+ virtual_facts['virtualization_type'] = 'vmm'
+ virtual_facts['virtualization_role'] = 'host'
+
+ return virtual_facts
+
+
+class OpenBSDVirtualCollector(VirtualCollector):
+ _fact_class = OpenBSDVirtual
+ _platform = 'OpenBSD'
diff --git a/lib/ansible/module_utils/facts/virtual/sunos.py b/lib/ansible/module_utils/facts/virtual/sunos.py
new file mode 100644
index 00000000..06ce661a
--- /dev/null
+++ b/lib/ansible/module_utils/facts/virtual/sunos.py
@@ -0,0 +1,120 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.module_utils.facts.virtual.base import Virtual, VirtualCollector
+
+
+class SunOSVirtual(Virtual):
+ """
+ This is a SunOS-specific subclass of Virtual. It defines
+ - virtualization_type
+ - virtualization_role
+ - container
+ """
+ platform = 'SunOS'
+
+ def get_virtual_facts(self):
+ virtual_facts = {}
+ # Check if it's a zone
+
+ zonename = self.module.get_bin_path('zonename')
+ if zonename:
+ rc, out, err = self.module.run_command(zonename)
+ if rc == 0 and out.rstrip() != "global":
+ virtual_facts['container'] = 'zone'
+ # Check if it's a branded zone (i.e. Solaris 8/9 zone)
+ if os.path.isdir('/.SUNWnative'):
+ virtual_facts['container'] = 'zone'
+ # If it's a zone check if we can detect if our global zone is itself virtualized.
+ # Relies on the "guest tools" (e.g. vmware tools) to be installed
+
+ if 'container' in virtual_facts and virtual_facts['container'] == 'zone':
+ modinfo = self.module.get_bin_path('modinfo')
+ if modinfo:
+ rc, out, err = self.module.run_command(modinfo)
+ if rc == 0:
+ for line in out.splitlines():
+ if 'VMware' in line:
+ virtual_facts['virtualization_type'] = 'vmware'
+ virtual_facts['virtualization_role'] = 'guest'
+ if 'VirtualBox' in line:
+ virtual_facts['virtualization_type'] = 'virtualbox'
+ virtual_facts['virtualization_role'] = 'guest'
+
+ if os.path.exists('/proc/vz'):
+ virtual_facts['virtualization_type'] = 'virtuozzo'
+ virtual_facts['virtualization_role'] = 'guest'
+
+ # Detect domaining on Sparc hardware
+ virtinfo = self.module.get_bin_path('virtinfo')
+ if virtinfo:
+ # The output of virtinfo is different whether we are on a machine with logical
+ # domains ('LDoms') on a T-series or domains ('Domains') on a M-series. Try LDoms first.
+ rc, out, err = self.module.run_command("/usr/sbin/virtinfo -p")
+ # The output contains multiple lines with different keys like this:
+ # DOMAINROLE|impl=LDoms|control=false|io=false|service=false|root=false
+ # The output may also be not formatted and the returncode is set to 0 regardless of the error condition:
+ # virtinfo can only be run from the global zone
+ if rc == 0:
+ try:
+ for line in out.splitlines():
+ fields = line.split('|')
+ if fields[0] == 'DOMAINROLE' and fields[1] == 'impl=LDoms':
+ virtual_facts['virtualization_type'] = 'ldom'
+ virtual_facts['virtualization_role'] = 'guest'
+ hostfeatures = []
+ for field in fields[2:]:
+ arg = field.split('=')
+ if arg[1] == 'true':
+ hostfeatures.append(arg[0])
+ if len(hostfeatures) > 0:
+ virtual_facts['virtualization_role'] = 'host (' + ','.join(hostfeatures) + ')'
+ except ValueError:
+ pass
+
+ else:
+ smbios = self.module.get_bin_path('smbios')
+ if not smbios:
+ return
+ rc, out, err = self.module.run_command(smbios)
+ if rc == 0:
+ for line in out.splitlines():
+ if 'VMware' in line:
+ virtual_facts['virtualization_type'] = 'vmware'
+ virtual_facts['virtualization_role'] = 'guest'
+ elif 'Parallels' in line:
+ virtual_facts['virtualization_type'] = 'parallels'
+ virtual_facts['virtualization_role'] = 'guest'
+ elif 'VirtualBox' in line:
+ virtual_facts['virtualization_type'] = 'virtualbox'
+ virtual_facts['virtualization_role'] = 'guest'
+ elif 'HVM domU' in line:
+ virtual_facts['virtualization_type'] = 'xen'
+ virtual_facts['virtualization_role'] = 'guest'
+ elif 'KVM' in line:
+ virtual_facts['virtualization_type'] = 'kvm'
+ virtual_facts['virtualization_role'] = 'guest'
+
+ return virtual_facts
+
+
+class SunOSVirtualCollector(VirtualCollector):
+ _fact_class = SunOSVirtual
+ _platform = 'SunOS'
diff --git a/lib/ansible/module_utils/facts/virtual/sysctl.py b/lib/ansible/module_utils/facts/virtual/sysctl.py
new file mode 100644
index 00000000..a159cc15
--- /dev/null
+++ b/lib/ansible/module_utils/facts/virtual/sysctl.py
@@ -0,0 +1,69 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+
+class VirtualSysctlDetectionMixin(object):
+ def detect_sysctl(self):
+ self.sysctl_path = self.module.get_bin_path('sysctl')
+
+ def detect_virt_product(self, key):
+ virtual_product_facts = {}
+ self.detect_sysctl()
+ if self.sysctl_path:
+ rc, out, err = self.module.run_command("%s -n %s" % (self.sysctl_path, key))
+ if rc == 0:
+ if re.match('(KVM|kvm|Bochs|SmartDC).*', out):
+ virtual_product_facts['virtualization_type'] = 'kvm'
+ virtual_product_facts['virtualization_role'] = 'guest'
+ elif re.match('.*VMware.*', out):
+ virtual_product_facts['virtualization_type'] = 'VMware'
+ virtual_product_facts['virtualization_role'] = 'guest'
+ elif out.rstrip() == 'VirtualBox':
+ virtual_product_facts['virtualization_type'] = 'virtualbox'
+ virtual_product_facts['virtualization_role'] = 'guest'
+ elif out.rstrip() == 'HVM domU':
+ virtual_product_facts['virtualization_type'] = 'xen'
+ virtual_product_facts['virtualization_role'] = 'guest'
+ elif out.rstrip() == 'Parallels':
+ virtual_product_facts['virtualization_type'] = 'parallels'
+ virtual_product_facts['virtualization_role'] = 'guest'
+ elif out.rstrip() == 'RHEV Hypervisor':
+ virtual_product_facts['virtualization_type'] = 'RHEV'
+ virtual_product_facts['virtualization_role'] = 'guest'
+ elif (key == 'security.jail.jailed') and (out.rstrip() == '1'):
+ virtual_product_facts['virtualization_type'] = 'jails'
+ virtual_product_facts['virtualization_role'] = 'guest'
+
+ return virtual_product_facts
+
+ def detect_virt_vendor(self, key):
+ virtual_vendor_facts = {}
+ self.detect_sysctl()
+ if self.sysctl_path:
+ rc, out, err = self.module.run_command("%s -n %s" % (self.sysctl_path, key))
+ if rc == 0:
+ if out.rstrip() == 'QEMU':
+ virtual_vendor_facts['virtualization_type'] = 'kvm'
+ virtual_vendor_facts['virtualization_role'] = 'guest'
+ if out.rstrip() == 'OpenBSD':
+ virtual_vendor_facts['virtualization_type'] = 'vmm'
+ virtual_vendor_facts['virtualization_role'] = 'guest'
+
+ return virtual_vendor_facts
diff --git a/lib/ansible/module_utils/json_utils.py b/lib/ansible/module_utils/json_utils.py
new file mode 100644
index 00000000..d5639fa3
--- /dev/null
+++ b/lib/ansible/module_utils/json_utils.py
@@ -0,0 +1,79 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+
+# NB: a copy of this function exists in ../../modules/core/async_wrapper.py. Ensure any
+# changes are propagated there.
+def _filter_non_json_lines(data):
+ '''
+ Used to filter unrelated output around module JSON output, like messages from
+ tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
+
+ Filters leading lines before first line-starting occurrence of '{' or '[', and filter all
+ trailing lines after matching close character (working from the bottom of output).
+ '''
+ warnings = []
+
+ # Filter initial junk
+ lines = data.splitlines()
+
+ for start, line in enumerate(lines):
+ line = line.strip()
+ if line.startswith(u'{'):
+ endchar = u'}'
+ break
+ elif line.startswith(u'['):
+ endchar = u']'
+ break
+ else:
+ raise ValueError('No start of json char found')
+
+ # Filter trailing junk
+ lines = lines[start:]
+
+ for reverse_end_offset, line in enumerate(reversed(lines)):
+ if line.strip().endswith(endchar):
+ break
+ else:
+ raise ValueError('No end of json char found')
+
+ if reverse_end_offset > 0:
+ # Trailing junk is uncommon and can point to things the user might
+ # want to change. So print a warning if we find any
+ trailing_junk = lines[len(lines) - reverse_end_offset:]
+ for line in trailing_junk:
+ if line.strip():
+ warnings.append('Module invocation had junk after the JSON data: %s' % '\n'.join(trailing_junk))
+ break
+
+ lines = lines[:(len(lines) - reverse_end_offset)]
+
+ return ('\n'.join(lines), warnings)
diff --git a/lib/ansible/module_utils/parsing/__init__.py b/lib/ansible/module_utils/parsing/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/lib/ansible/module_utils/parsing/__init__.py
diff --git a/lib/ansible/module_utils/parsing/convert_bool.py b/lib/ansible/module_utils/parsing/convert_bool.py
new file mode 100644
index 00000000..7eea875f
--- /dev/null
+++ b/lib/ansible/module_utils/parsing/convert_bool.py
@@ -0,0 +1,29 @@
+# Copyright: 2017, Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause )
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.six import binary_type, text_type
+from ansible.module_utils._text import to_text
+
+
+BOOLEANS_TRUE = frozenset(('y', 'yes', 'on', '1', 'true', 't', 1, 1.0, True))
+BOOLEANS_FALSE = frozenset(('n', 'no', 'off', '0', 'false', 'f', 0, 0.0, False))
+BOOLEANS = BOOLEANS_TRUE.union(BOOLEANS_FALSE)
+
+
+def boolean(value, strict=True):
+ if isinstance(value, bool):
+ return value
+
+ normalized_value = value
+ if isinstance(value, (text_type, binary_type)):
+ normalized_value = to_text(value, errors='surrogate_or_strict').lower().strip()
+
+ if normalized_value in BOOLEANS_TRUE:
+ return True
+ elif normalized_value in BOOLEANS_FALSE or not strict:
+ return False
+
+ raise TypeError("The value '%s' is not a valid boolean. Valid booleans include: %s" % (to_text(value), ', '.join(repr(i) for i in BOOLEANS)))
diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.AddType.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.AddType.psm1
new file mode 100644
index 00000000..ba38159d
--- /dev/null
+++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.AddType.psm1
@@ -0,0 +1,370 @@
+# Copyright (c) 2018 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+Function Add-CSharpType {
+ <#
+ .SYNOPSIS
+ Compiles one or more C# scripts similar to Add-Type. This exposes
+ more configuration options that are useable within Ansible and it
+ also allows multiple C# sources to be compiled together.
+
+ .PARAMETER References
+ [String[]] A collection of C# scripts to compile together.
+
+ .PARAMETER IgnoreWarnings
+ [Switch] Whether to compile code that contains compiler warnings, by
+ default warnings will cause a compiler error.
+
+ .PARAMETER PassThru
+ [Switch] Whether to return the loaded Assembly
+
+ .PARAMETER AnsibleModule
+ [Ansible.Basic.AnsibleModule] used to derive the TempPath and Debug values.
+ TempPath is set to the Tmpdir property of the class
+ IncludeDebugInfo is set when the Ansible verbosity is >= 3
+
+ .PARAMETER TempPath
+ [String] The temporary directory in which the dynamic assembly is
+ compiled to. This file is deleted once compilation is complete.
+ Cannot be used when AnsibleModule is set. This is a no-op when
+ running on PSCore.
+
+ .PARAMETER IncludeDebugInfo
+ [Switch] Whether to include debug information in the compiled
+ assembly. Cannot be used when AnsibleModule is set. This is a no-op
+ when running on PSCore.
+
+ .PARAMETER CompileSymbols
+ [String[]] A list of symbols to be defined during compile time. These are
+ added to the existing symbols, 'CORECLR', 'WINDOWS', 'UNIX' that are set
+ conditionalls in this cmdlet.
+
+ .NOTES
+ The following features were added to control the compiling options from the
+ code itself.
+
+ * Predefined compiler SYMBOLS
+
+ * CORECLR - Added when running on PowerShell Core.
+ * WINDOWS - Added when running on Windows.
+ * UNIX - Added when running on non-Windows.
+ * X86 - Added when running on a 32-bit process (Ansible 2.10+)
+ * AMD64 - Added when running on a 64-bit process (Ansible 2.10+)
+
+ * Ignore compiler warnings inline with the following comment inline
+
+ //NoWarn -Name <rule code> [-CLR Core|Framework]
+
+ * Specify custom assembly references inline
+
+ //AssemblyReference -Name Dll.Location.dll [-CLR Core|Framework]
+
+ # Added in Ansible 2.10
+ //AssemblyReference -Type System.Type.Name [-CLR Core|Framework]
+
+ * Create automatic type accelerators to simplify long namespace names (Ansible 2.9+)
+
+ //TypeAccelerator -Name <AcceleratorName> -TypeName <Name of compiled type>
+ #>
+ param(
+ [Parameter(Mandatory=$true)][AllowEmptyCollection()][String[]]$References,
+ [Switch]$IgnoreWarnings,
+ [Switch]$PassThru,
+ [Parameter(Mandatory=$true, ParameterSetName="Module")][Object]$AnsibleModule,
+ [Parameter(ParameterSetName="Manual")][String]$TempPath = $env:TMP,
+ [Parameter(ParameterSetName="Manual")][Switch]$IncludeDebugInfo,
+ [String[]]$CompileSymbols = @()
+ )
+ if ($null -eq $References -or $References.Length -eq 0) {
+ return
+ }
+
+ # define special symbols CORECLR, WINDOWS, UNIX if required
+ # the Is* variables are defined on PSCore, if absent we assume an
+ # older version of PowerShell under .NET Framework and Windows
+ $defined_symbols = [System.Collections.ArrayList]$CompileSymbols
+
+ if ([System.IntPtr]::Size -eq 4) {
+ $defined_symbols.Add('X86') > $null
+ } else {
+ $defined_symbols.Add('AMD64') > $null
+ }
+
+ $is_coreclr = Get-Variable -Name IsCoreCLR -ErrorAction SilentlyContinue
+ if ($null -ne $is_coreclr) {
+ if ($is_coreclr.Value) {
+ $defined_symbols.Add("CORECLR") > $null
+ }
+ }
+ $is_windows = Get-Variable -Name IsWindows -ErrorAction SilentlyContinue
+ if ($null -ne $is_windows) {
+ if ($is_windows.Value) {
+ $defined_symbols.Add("WINDOWS") > $null
+ } else {
+ $defined_symbols.Add("UNIX") > $null
+ }
+ } else {
+ $defined_symbols.Add("WINDOWS") > $null
+ }
+
+ # Store any TypeAccelerators shortcuts the util wants us to set
+ $type_accelerators = [System.Collections.Generic.List`1[Hashtable]]@()
+
+ # pattern used to find referenced assemblies in the code
+ $assembly_pattern = [Regex]"//\s*AssemblyReference\s+-(?<Parameter>(Name)|(Type))\s+(?<Name>[\w.]*)(\s+-CLR\s+(?<CLR>Core|Framework))?"
+ $no_warn_pattern = [Regex]"//\s*NoWarn\s+-Name\s+(?<Name>[\w\d]*)(\s+-CLR\s+(?<CLR>Core|Framework))?"
+ $type_pattern = [Regex]"//\s*TypeAccelerator\s+-Name\s+(?<Name>[\w.]*)\s+-TypeName\s+(?<TypeName>[\w.]*)"
+
+ # PSCore vs PSDesktop use different methods to compile the code,
+ # PSCore uses Roslyn and can compile the code purely in memory
+ # without touching the disk while PSDesktop uses CodeDom and csc.exe
+ # to compile the code. We branch out here and run each
+ # distribution's method to add our C# code.
+ if ($is_coreclr) {
+ # compile the code using Roslyn on PSCore
+
+ # Include the default assemblies using the logic in Add-Type
+ # https://github.com/PowerShell/PowerShell/blob/master/src/Microsoft.PowerShell.Commands.Utility/commands/utility/AddType.cs
+ $assemblies = [System.Collections.Generic.HashSet`1[Microsoft.CodeAnalysis.MetadataReference]]@(
+ [Microsoft.CodeAnalysis.CompilationReference]::CreateFromFile(([System.Reflection.Assembly]::GetAssembly([PSObject])).Location)
+ )
+ $netcore_app_ref_folder = [System.IO.Path]::Combine([System.IO.Path]::GetDirectoryName([PSObject].Assembly.Location), "ref")
+ $lib_assembly_location = [System.IO.Path]::GetDirectoryName([object].Assembly.Location)
+ foreach ($file in [System.IO.Directory]::EnumerateFiles($netcore_app_ref_folder, "*.dll", [System.IO.SearchOption]::TopDirectoryOnly)) {
+ $assemblies.Add([Microsoft.CodeAnalysis.MetadataReference]::CreateFromFile($file)) > $null
+ }
+
+ # loop through the references, parse as a SyntaxTree and get
+ # referenced assemblies
+ $ignore_warnings = New-Object -TypeName 'System.Collections.Generic.Dictionary`2[[String], [Microsoft.CodeAnalysis.ReportDiagnostic]]'
+ $parse_options = ([Microsoft.CodeAnalysis.CSharp.CSharpParseOptions]::Default).WithPreprocessorSymbols($defined_symbols)
+ $syntax_trees = [System.Collections.Generic.List`1[Microsoft.CodeAnalysis.SyntaxTree]]@()
+ foreach ($reference in $References) {
+ # scan through code and add any assemblies that match
+ # //AssemblyReference -Name ... [-CLR Core]
+ # //NoWarn -Name ... [-CLR Core]
+ # //TypeAccelerator -Name ... -TypeName ...
+ $assembly_matches = $assembly_pattern.Matches($reference)
+ foreach ($match in $assembly_matches) {
+ $clr = $match.Groups["CLR"].Value
+ if ($clr -and $clr -ne "Core") {
+ continue
+ }
+
+ $parameter_type = $match.Groups["Parameter"].Value
+ $assembly_path = $match.Groups["Name"].Value
+ if ($parameter_type -eq "Type") {
+ $assembly_path = ([Type]$assembly_path).Assembly.Location
+ } else {
+ if (-not ([System.IO.Path]::IsPathRooted($assembly_path))) {
+ $assembly_path = Join-Path -Path $lib_assembly_location -ChildPath $assembly_path
+ }
+ }
+ $assemblies.Add([Microsoft.CodeAnalysis.MetadataReference]::CreateFromFile($assembly_path)) > $null
+ }
+ $warn_matches = $no_warn_pattern.Matches($reference)
+ foreach ($match in $warn_matches) {
+ $clr = $match.Groups["CLR"].Value
+ if ($clr -and $clr -ne "Core") {
+ continue
+ }
+ $ignore_warnings.Add($match.Groups["Name"], [Microsoft.CodeAnalysis.ReportDiagnostic]::Suppress)
+ }
+ $syntax_trees.Add([Microsoft.CodeAnalysis.CSharp.CSharpSyntaxTree]::ParseText($reference, $parse_options)) > $null
+
+ $type_matches = $type_pattern.Matches($reference)
+ foreach ($match in $type_matches) {
+ $type_accelerators.Add(@{Name=$match.Groups["Name"].Value; TypeName=$match.Groups["TypeName"].Value})
+ }
+ }
+
+ # Release seems to contain the correct line numbers compared to
+ # debug,may need to keep a closer eye on this in the future
+ $compiler_options = (New-Object -TypeName Microsoft.CodeAnalysis.CSharp.CSharpCompilationOptions -ArgumentList @(
+ [Microsoft.CodeAnalysis.OutputKind]::DynamicallyLinkedLibrary
+ )).WithOptimizationLevel([Microsoft.CodeAnalysis.OptimizationLevel]::Release)
+
+ # set warnings to error out if IgnoreWarnings is not set
+ if (-not $IgnoreWarnings.IsPresent) {
+ $compiler_options = $compiler_options.WithGeneralDiagnosticOption([Microsoft.CodeAnalysis.ReportDiagnostic]::Error)
+ $compiler_options = $compiler_options.WithSpecificDiagnosticOptions($ignore_warnings)
+ }
+
+ # create compilation object
+ $compilation = [Microsoft.CodeAnalysis.CSharp.CSharpCompilation]::Create(
+ [System.Guid]::NewGuid().ToString(),
+ $syntax_trees,
+ $assemblies,
+ $compiler_options
+ )
+
+ # Load the compiled code and pdb info, we do this so we can
+ # include line number in a stracktrace
+ $code_ms = New-Object -TypeName System.IO.MemoryStream
+ $pdb_ms = New-Object -TypeName System.IO.MemoryStream
+ try {
+ $emit_result = $compilation.Emit($code_ms, $pdb_ms)
+ if (-not $emit_result.Success) {
+ $errors = [System.Collections.ArrayList]@()
+
+ foreach ($e in $emit_result.Diagnostics) {
+ # builds the error msg, based on logic in Add-Type
+ # https://github.com/PowerShell/PowerShell/blob/master/src/Microsoft.PowerShell.Commands.Utility/commands/utility/AddType.cs#L1239
+ if ($null -eq $e.Location.SourceTree) {
+ $errors.Add($e.ToString()) > $null
+ continue
+ }
+
+ $cancel_token = New-Object -TypeName System.Threading.CancellationToken -ArgumentList $false
+ $text_lines = $e.Location.SourceTree.GetText($cancel_token).Lines
+ $line_span = $e.Location.GetLineSpan()
+
+ $diagnostic_message = $e.ToString()
+ $error_line_string = $text_lines[$line_span.StartLinePosition.Line].ToString()
+ $error_position = $line_span.StartLinePosition.Character
+
+ $sb = New-Object -TypeName System.Text.StringBuilder -ArgumentList ($diagnostic_message.Length + $error_line_string.Length * 2 + 4)
+ $sb.AppendLine($diagnostic_message)
+ $sb.AppendLine($error_line_string)
+
+ for ($i = 0; $i -lt $error_line_string.Length; $i++) {
+ if ([System.Char]::IsWhiteSpace($error_line_string[$i])) {
+ continue
+ }
+ $sb.Append($error_line_string, 0, $i)
+ $sb.Append(' ', [Math]::Max(0, $error_position - $i))
+ $sb.Append("^")
+ break
+ }
+
+ $errors.Add($sb.ToString()) > $null
+ }
+
+ throw [InvalidOperationException]"Failed to compile C# code:`r`n$($errors -join "`r`n")"
+ }
+
+ $code_ms.Seek(0, [System.IO.SeekOrigin]::Begin) > $null
+ $pdb_ms.Seek(0, [System.IO.SeekOrigin]::Begin) > $null
+ $compiled_assembly = [System.Runtime.Loader.AssemblyLoadContext]::Default.LoadFromStream($code_ms, $pdb_ms)
+ } finally {
+ $code_ms.Close()
+ $pdb_ms.Close()
+ }
+ } else {
+ # compile the code using CodeDom on PSDesktop
+
+ # configure compile options based on input
+ if ($PSCmdlet.ParameterSetName -eq "Module") {
+ $temp_path = $AnsibleModule.Tmpdir
+ $include_debug = $AnsibleModule.Verbosity -ge 3
+ } else {
+ $temp_path = $TempPath
+ $include_debug = $IncludeDebugInfo.IsPresent
+ }
+ $compiler_options = [System.Collections.ArrayList]@("/optimize")
+ if ($defined_symbols.Count -gt 0) {
+ $compiler_options.Add("/define:" + ([String]::Join(";", $defined_symbols.ToArray()))) > $null
+ }
+
+ $compile_parameters = New-Object -TypeName System.CodeDom.Compiler.CompilerParameters
+ $compile_parameters.GenerateExecutable = $false
+ $compile_parameters.GenerateInMemory = $true
+ $compile_parameters.TreatWarningsAsErrors = (-not $IgnoreWarnings.IsPresent)
+ $compile_parameters.IncludeDebugInformation = $include_debug
+ $compile_parameters.TempFiles = (New-Object -TypeName System.CodeDom.Compiler.TempFileCollection -ArgumentList $temp_path, $false)
+
+ # Add-Type automatically references System.dll, System.Core.dll,
+ # and System.Management.Automation.dll which we replicate here
+ $assemblies = [System.Collections.Generic.HashSet`1[String]]@(
+ "System.dll",
+ "System.Core.dll",
+ ([System.Reflection.Assembly]::GetAssembly([PSObject])).Location
+ )
+
+ # create a code snippet for each reference and check if we need
+ # to reference any extra assemblies
+ $ignore_warnings = [System.Collections.ArrayList]@()
+ $compile_units = [System.Collections.Generic.List`1[System.CodeDom.CodeSnippetCompileUnit]]@()
+ foreach ($reference in $References) {
+ # scan through code and add any assemblies that match
+ # //AssemblyReference -Name ... [-CLR Framework]
+ # //NoWarn -Name ... [-CLR Framework]
+ # //TypeAccelerator -Name ... -TypeName ...
+ $assembly_matches = $assembly_pattern.Matches($reference)
+ foreach ($match in $assembly_matches) {
+ $clr = $match.Groups["CLR"].Value
+ if ($clr -and $clr -ne "Framework") {
+ continue
+ }
+
+ $parameter_type = $match.Groups["Parameter"].Value
+ $assembly_path = $match.Groups["Name"].Value
+ if ($parameter_type -eq "Type") {
+ $assembly_path = ([Type]$assembly_path).Assembly.Location
+ }
+ $assemblies.Add($assembly_path) > $null
+ }
+ $warn_matches = $no_warn_pattern.Matches($reference)
+ foreach ($match in $warn_matches) {
+ $clr = $match.Groups["CLR"].Value
+ if ($clr -and $clr -ne "Framework") {
+ continue
+ }
+ $warning_id = $match.Groups["Name"].Value
+ # /nowarn should only contain the numeric part
+ if ($warning_id.StartsWith("CS")) {
+ $warning_id = $warning_id.Substring(2)
+ }
+ $ignore_warnings.Add($warning_id) > $null
+ }
+ $compile_units.Add((New-Object -TypeName System.CodeDom.CodeSnippetCompileUnit -ArgumentList $reference)) > $null
+
+ $type_matches = $type_pattern.Matches($reference)
+ foreach ($match in $type_matches) {
+ $type_accelerators.Add(@{Name=$match.Groups["Name"].Value; TypeName=$match.Groups["TypeName"].Value})
+ }
+ }
+ if ($ignore_warnings.Count -gt 0) {
+ $compiler_options.Add("/nowarn:" + ([String]::Join(",", $ignore_warnings.ToArray()))) > $null
+ }
+ $compile_parameters.ReferencedAssemblies.AddRange($assemblies)
+ $compile_parameters.CompilerOptions = [String]::Join(" ", $compiler_options.ToArray())
+
+ # compile the code together and check for errors
+ $provider = New-Object -TypeName Microsoft.CSharp.CSharpCodeProvider
+ $compile = $provider.CompileAssemblyFromDom($compile_parameters, $compile_units)
+ if ($compile.Errors.HasErrors) {
+ $msg = "Failed to compile C# code: "
+ foreach ($e in $compile.Errors) {
+ $msg += "`r`n" + $e.ToString()
+ }
+ throw [InvalidOperationException]$msg
+ }
+ $compiled_assembly = $compile.CompiledAssembly
+ }
+
+ $type_accelerator = [PSObject].Assembly.GetType("System.Management.Automation.TypeAccelerators")
+ foreach ($accelerator in $type_accelerators) {
+ $type_name = $accelerator.TypeName
+ $found = $false
+
+ foreach ($assembly_type in $compiled_assembly.GetTypes()) {
+ if ($assembly_type.Name -eq $type_name) {
+ $type_accelerator::Add($accelerator.Name, $assembly_type)
+ $found = $true
+ break
+ }
+ }
+ if (-not $found) {
+ throw "Failed to find compiled class '$type_name' for custom TypeAccelerator."
+ }
+ }
+
+ # return the compiled assembly if PassThru is set.
+ if ($PassThru) {
+ return $compiled_assembly
+ }
+}
+
+Export-ModuleMember -Function Add-CSharpType
+
diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.ArgvParser.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.ArgvParser.psm1
new file mode 100644
index 00000000..2c6bfb0a
--- /dev/null
+++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.ArgvParser.psm1
@@ -0,0 +1,75 @@
+# Copyright (c) 2017 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+# The rules used in these functions are derived from the below
+# https://docs.microsoft.com/en-us/cpp/cpp/parsing-cpp-command-line-arguments
+# https://blogs.msdn.microsoft.com/twistylittlepassagesallalike/2011/04/23/everyone-quotes-command-line-arguments-the-wrong-way/
+
+Function Escape-Argument($argument, $force_quote=$false) {
+ # this converts a single argument to an escaped version, use Join-Arguments
+ # instead of this function as this only escapes a single string.
+
+ # check if argument contains a space, \n, \t, \v or "
+ if ($force_quote -eq $false -and $argument.Length -gt 0 -and $argument -notmatch "[ \n\t\v`"]") {
+ # argument does not need escaping (and we don't want to force it),
+ # return as is
+ return $argument
+ } else {
+ # we need to quote the arg so start with "
+ $new_argument = '"'
+
+ for ($i = 0; $i -lt $argument.Length; $i++) {
+ $num_backslashes = 0
+
+ # get the number of \ from current char until end or not a \
+ while ($i -ne ($argument.Length - 1) -and $argument[$i] -eq "\") {
+ $num_backslashes++
+ $i++
+ }
+
+ $current_char = $argument[$i]
+ if ($i -eq ($argument.Length -1) -and $current_char -eq "\") {
+ # We are at the end of the string so we need to add the same \
+ # * 2 as the end char would be a "
+ $new_argument += ("\" * ($num_backslashes + 1) * 2)
+ } elseif ($current_char -eq '"') {
+ # we have a inline ", we need to add the existing \ but * by 2
+ # plus another 1
+ $new_argument += ("\" * (($num_backslashes * 2) + 1))
+ $new_argument += $current_char
+ } else {
+ # normal character so no need to escape the \ we have counted
+ $new_argument += ("\" * $num_backslashes)
+ $new_argument += $current_char
+ }
+ }
+
+ # we need to close the special arg with a "
+ $new_argument += '"'
+ return $new_argument
+ }
+}
+
+Function Argv-ToString($arguments, $force_quote=$false) {
+ # Takes in a list of un escaped arguments and convert it to a single string
+ # that can be used when starting a new process. It will escape the
+ # characters as necessary in the list.
+ # While there is a CommandLineToArgvW function there is a no
+ # ArgvToCommandLineW that we can call to convert a list to an escaped
+ # string.
+ # You can also pass in force_quote so that each argument is quoted even
+ # when not necessary, by default only arguments with certain characters are
+ # quoted.
+ # TODO: add in another switch which will escape the args for cmd.exe
+
+ $escaped_arguments = @()
+ foreach ($argument in $arguments) {
+ $escaped_argument = Escape-Argument -argument $argument -force_quote $force_quote
+ $escaped_arguments += $escaped_argument
+ }
+
+ return ($escaped_arguments -join ' ')
+}
+
+# this line must stay at the bottom to ensure all defined module parts are exported
+Export-ModuleMember -Alias * -Function * -Cmdlet *
diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.Backup.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.Backup.psm1
new file mode 100644
index 00000000..246341cb
--- /dev/null
+++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.Backup.psm1
@@ -0,0 +1,33 @@
+# Copyright (c): 2018, Dag Wieers (@dagwieers) <dag@wieers.com>
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+Function Backup-File {
+<#
+ .SYNOPSIS
+ Helper function to make a backup of a file.
+ .EXAMPLE
+ Backup-File -path $path -WhatIf:$check_mode
+#>
+ [CmdletBinding(SupportsShouldProcess=$true)]
+
+ Param (
+ [Parameter(Mandatory = $true, ValueFromPipeline = $true)]
+ [string] $path
+ )
+
+ Process {
+ $backup_path = $null
+ if (Test-Path -LiteralPath $path -PathType Leaf) {
+ $backup_path = "$path.$pid." + [DateTime]::Now.ToString("yyyyMMdd-HHmmss") + ".bak";
+ Try {
+ Copy-Item -LiteralPath $path -Destination $backup_path
+ } Catch {
+ throw "Failed to create backup file '$backup_path' from '$path'. ($($_.Exception.Message))"
+ }
+ }
+ return $backup_path
+ }
+}
+
+# This line must stay at the bottom to ensure all defined module parts are exported
+Export-ModuleMember -Function Backup-File
diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CamelConversion.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CamelConversion.psm1
new file mode 100644
index 00000000..cb08eda5
--- /dev/null
+++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CamelConversion.psm1
@@ -0,0 +1,65 @@
+# Copyright (c) 2017 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+# used by Convert-DictToSnakeCase to convert a string in camelCase
+# format to snake_case
+Function Convert-StringToSnakeCase($string) {
+ # cope with pluralized abbreaviations such as TargetGroupARNs
+ if ($string -cmatch "[A-Z]{3,}s") {
+ $replacement_string = $string -creplace $matches[0], "_$($matches[0].ToLower())"
+
+ # handle when there was nothing before the plural pattern
+ if ($replacement_string.StartsWith("_") -and -not $string.StartsWith("_")) {
+ $replacement_string = $replacement_string.Substring(1)
+ }
+ $string = $replacement_string
+ }
+ $string = $string -creplace "(.)([A-Z][a-z]+)", '$1_$2'
+ $string = $string -creplace "([a-z0-9])([A-Z])", '$1_$2'
+ $string = $string.ToLower()
+
+ return $string
+}
+
+# used by Convert-DictToSnakeCase to covert list entries from camelCase
+# to snake_case
+Function Convert-ListToSnakeCase($list) {
+ $snake_list = [System.Collections.ArrayList]@()
+ foreach ($value in $list) {
+ if ($value -is [Hashtable]) {
+ $new_value = Convert-DictToSnakeCase -dict $value
+ } elseif ($value -is [Array] -or $value -is [System.Collections.ArrayList]) {
+ $new_value = Convert-ListToSnakeCase -list $value
+ } else {
+ $new_value = $value
+ }
+ [void]$snake_list.Add($new_value)
+ }
+
+ return ,$snake_list
+}
+
+# converts a dict/hashtable keys from camelCase to snake_case
+# this is to keep the return values consistent with the Ansible
+# way of working.
+Function Convert-DictToSnakeCase($dict) {
+ $snake_dict = @{}
+ foreach ($dict_entry in $dict.GetEnumerator()) {
+ $key = $dict_entry.Key
+ $snake_key = Convert-StringToSnakeCase -string $key
+
+ $value = $dict_entry.Value
+ if ($value -is [Hashtable]) {
+ $snake_dict.$snake_key = Convert-DictToSnakeCase -dict $value
+ } elseif ($value -is [Array] -or $value -is [System.Collections.ArrayList]) {
+ $snake_dict.$snake_key = Convert-ListToSnakeCase -list $value
+ } else {
+ $snake_dict.$snake_key = $value
+ }
+ }
+
+ return ,$snake_dict
+}
+
+# this line must stay at the bottom to ensure all defined module parts are exported
+Export-ModuleMember -Alias * -Function * -Cmdlet *
diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CommandUtil.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CommandUtil.psm1
new file mode 100644
index 00000000..0e037e57
--- /dev/null
+++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CommandUtil.psm1
@@ -0,0 +1,122 @@
+# Copyright (c) 2017 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+#AnsibleRequires -CSharpUtil Ansible.Process
+
+Function Load-CommandUtils {
+ <#
+ .SYNOPSIS
+ No-op, as the C# types are automatically loaded.
+ #>
+ Param()
+ $msg = "Load-CommandUtils is deprecated and no longer needed, this cmdlet will be removed in a future version"
+ if ((Get-Command -Name Add-DeprecationWarning -ErrorAction SilentlyContinue) -and (Get-Variable -Name result -ErrorAction SilentlyContinue)) {
+ Add-DeprecationWarning -obj $result.Value -message $msg -version 2.12
+ } else {
+ $module = Get-Variable -Name module -ErrorAction SilentlyContinue
+ if ($null -ne $module -and $module.Value.GetType().FullName -eq "Ansible.Basic.AnsibleModule") {
+ $module.Value.Deprecate($msg, "2.12")
+ }
+ }
+}
+
+Function Get-ExecutablePath {
+ <#
+ .SYNOPSIS
+ Get's the full path to an executable, will search the directory specified or ones in the PATH env var.
+
+ .PARAMETER executable
+ [String]The executable to seach for.
+
+ .PARAMETER directory
+ [String] If set, the directory to search in.
+
+ .OUTPUT
+ [String] The full path the executable specified.
+ #>
+ Param(
+ [String]$executable,
+ [String]$directory = $null
+ )
+
+ # we need to add .exe if it doesn't have an extension already
+ if (-not [System.IO.Path]::HasExtension($executable)) {
+ $executable = "$($executable).exe"
+ }
+ $full_path = [System.IO.Path]::GetFullPath($executable)
+
+ if ($full_path -ne $executable -and $directory -ne $null) {
+ $file = Get-Item -LiteralPath "$directory\$executable" -Force -ErrorAction SilentlyContinue
+ } else {
+ $file = Get-Item -LiteralPath $executable -Force -ErrorAction SilentlyContinue
+ }
+
+ if ($null -ne $file) {
+ $executable_path = $file.FullName
+ } else {
+ $executable_path = [Ansible.Process.ProcessUtil]::SearchPath($executable)
+ }
+ return $executable_path
+}
+
+Function Run-Command {
+ <#
+ .SYNOPSIS
+ Run a command with the CreateProcess API and return the stdout/stderr and return code.
+
+ .PARAMETER command
+ The full command, including the executable, to run.
+
+ .PARAMETER working_directory
+ The working directory to set on the new process, will default to the current working dir.
+
+ .PARAMETER stdin
+ A string to sent over the stdin pipe to the new process.
+
+ .PARAMETER environment
+ A hashtable of key/value pairs to run with the command. If set, it will replace all other env vars.
+
+ .PARAMETER output_encoding_override
+ The character encoding name for decoding stdout/stderr output of the process.
+
+ .OUTPUT
+ [Hashtable]
+ [String]executable - The full path to the executable that was run
+ [String]stdout - The stdout stream of the process
+ [String]stderr - The stderr stream of the process
+ [Int32]rc - The return code of the process
+ #>
+ Param(
+ [string]$command,
+ [string]$working_directory = $null,
+ [string]$stdin = "",
+ [hashtable]$environment = @{},
+ [string]$output_encoding_override = $null
+ )
+
+ # need to validate the working directory if it is set
+ if ($working_directory) {
+ # validate working directory is a valid path
+ if (-not (Test-Path -LiteralPath $working_directory)) {
+ throw "invalid working directory path '$working_directory'"
+ }
+ }
+
+ # lpApplicationName needs to be the full path to an executable, we do this
+ # by getting the executable as the first arg and then getting the full path
+ $arguments = [Ansible.Process.ProcessUtil]::ParseCommandLine($command)
+ $executable = Get-ExecutablePath -executable $arguments[0] -directory $working_directory
+
+ # run the command and get the results
+ $command_result = [Ansible.Process.ProcessUtil]::CreateProcess($executable, $command, $working_directory, $environment, $stdin, $output_encoding_override)
+
+ return ,@{
+ executable = $executable
+ stdout = $command_result.StandardOut
+ stderr = $command_result.StandardError
+ rc = $command_result.ExitCode
+ }
+}
+
+# this line must stay at the bottom to ensure all defined module parts are exported
+Export-ModuleMember -Function Get-ExecutablePath, Load-CommandUtils, Run-Command
diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.FileUtil.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.FileUtil.psm1
new file mode 100644
index 00000000..81a29ac0
--- /dev/null
+++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.FileUtil.psm1
@@ -0,0 +1,60 @@
+# Copyright (c) 2017 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+<#
+Test-Path/Get-Item cannot find/return info on files that are locked like
+C:\pagefile.sys. These 2 functions are designed to work with these files and
+provide similar functionality with the normal cmdlets with as minimal overhead
+as possible. They work by using Get-ChildItem with a filter and return the
+result from that.
+#>
+
+Function Test-AnsiblePath {
+ [CmdletBinding()]
+ Param(
+ [Parameter(Mandatory=$true)][string]$Path
+ )
+ # Replacement for Test-Path
+ try {
+ $file_attributes = [System.IO.File]::GetAttributes($Path)
+ } catch [System.IO.FileNotFoundException], [System.IO.DirectoryNotFoundException] {
+ return $false
+ } catch [NotSupportedException] {
+ # When testing a path like Cert:\LocalMachine\My, System.IO.File will
+ # not work, we just revert back to using Test-Path for this
+ return Test-Path -Path $Path
+ }
+
+ if ([Int32]$file_attributes -eq -1) {
+ return $false
+ } else {
+ return $true
+ }
+}
+
+Function Get-AnsibleItem {
+ [CmdletBinding()]
+ Param(
+ [Parameter(Mandatory=$true)][string]$Path
+ )
+ # Replacement for Get-Item
+ try {
+ $file_attributes = [System.IO.File]::GetAttributes($Path)
+ } catch {
+ # if -ErrorAction SilentlyCotinue is set on the cmdlet and we failed to
+ # get the attributes, just return $null, otherwise throw the error
+ if ($ErrorActionPreference -ne "SilentlyContinue") {
+ throw $_
+ }
+ return $null
+ }
+ if ([Int32]$file_attributes -eq -1) {
+ throw New-Object -TypeName System.Management.Automation.ItemNotFoundException -ArgumentList "Cannot find path '$Path' because it does not exist."
+ } elseif ($file_attributes.HasFlag([System.IO.FileAttributes]::Directory)) {
+ return New-Object -TypeName System.IO.DirectoryInfo -ArgumentList $Path
+ } else {
+ return New-Object -TypeName System.IO.FileInfo -ArgumentList $Path
+ }
+}
+
+Export-ModuleMember -Function Test-AnsiblePath, Get-AnsibleItem
diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.Legacy.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.Legacy.psm1
new file mode 100644
index 00000000..de2c7756
--- /dev/null
+++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.Legacy.psm1
@@ -0,0 +1,377 @@
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2014, and others
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+Set-StrictMode -Version 2.0
+$ErrorActionPreference = "Stop"
+
+Function Set-Attr($obj, $name, $value)
+{
+<#
+ .SYNOPSIS
+ Helper function to set an "attribute" on a psobject instance in PowerShell.
+ This is a convenience to make adding Members to the object easier and
+ slightly more pythonic
+ .EXAMPLE
+ Set-Attr $result "changed" $true
+#>
+
+ # If the provided $obj is undefined, define one to be nice
+ If (-not $obj.GetType)
+ {
+ $obj = @{ }
+ }
+
+ Try
+ {
+ $obj.$name = $value
+ }
+ Catch
+ {
+ $obj | Add-Member -Force -MemberType NoteProperty -Name $name -Value $value
+ }
+}
+
+Function Exit-Json($obj)
+{
+<#
+ .SYNOPSIS
+ Helper function to convert a PowerShell object to JSON and output it, exiting
+ the script
+ .EXAMPLE
+ Exit-Json $result
+#>
+
+ # If the provided $obj is undefined, define one to be nice
+ If (-not $obj.GetType)
+ {
+ $obj = @{ }
+ }
+
+ if (-not $obj.ContainsKey('changed')) {
+ Set-Attr -obj $obj -name "changed" -value $false
+ }
+
+ Write-Output $obj | ConvertTo-Json -Compress -Depth 99
+ Exit
+}
+
+Function Fail-Json($obj, $message = $null)
+{
+<#
+ .SYNOPSIS
+ Helper function to add the "msg" property and "failed" property, convert the
+ PowerShell Hashtable to JSON and output it, exiting the script
+ .EXAMPLE
+ Fail-Json $result "This is the failure message"
+#>
+
+ if ($obj -is [hashtable] -or $obj -is [psobject]) {
+ # Nothing to do
+ } elseif ($obj -is [string] -and $null -eq $message) {
+ # If we weren't given 2 args, and the only arg was a string,
+ # create a new Hashtable and use the arg as the failure message
+ $message = $obj
+ $obj = @{ }
+ } else {
+ # If the first argument is undefined or a different type,
+ # make it a Hashtable
+ $obj = @{ }
+ }
+
+ # Still using Set-Attr for PSObject compatibility
+ Set-Attr -obj $obj -name "msg" -value $message
+ Set-Attr -obj $obj -name "failed" -value $true
+
+ if (-not $obj.ContainsKey('changed')) {
+ Set-Attr -obj $obj -name "changed" -value $false
+ }
+
+ Write-Output $obj | ConvertTo-Json -Compress -Depth 99
+ Exit 1
+}
+
+Function Add-Warning($obj, $message)
+{
+<#
+ .SYNOPSIS
+ Helper function to add warnings, even if the warnings attribute was
+ not already set up. This is a convenience for the module developer
+ so they do not have to check for the attribute prior to adding.
+#>
+
+ if (-not $obj.ContainsKey("warnings")) {
+ $obj.warnings = @()
+ } elseif ($obj.warnings -isnot [array]) {
+ throw "Add-Warning: warnings attribute is not an array"
+ }
+
+ $obj.warnings += $message
+}
+
+Function Add-DeprecationWarning($obj, $message, $version = $null)
+{
+<#
+ .SYNOPSIS
+ Helper function to add deprecations, even if the deprecations attribute was
+ not already set up. This is a convenience for the module developer
+ so they do not have to check for the attribute prior to adding.
+#>
+ if (-not $obj.ContainsKey("deprecations")) {
+ $obj.deprecations = @()
+ } elseif ($obj.deprecations -isnot [array]) {
+ throw "Add-DeprecationWarning: deprecations attribute is not a list"
+ }
+
+ $obj.deprecations += @{
+ msg = $message
+ version = $version
+ }
+}
+
+Function Expand-Environment($value)
+{
+<#
+ .SYNOPSIS
+ Helper function to expand environment variables in values. By default
+ it turns any type to a string, but we ensure $null remains $null.
+#>
+ if ($null -ne $value) {
+ [System.Environment]::ExpandEnvironmentVariables($value)
+ } else {
+ $value
+ }
+}
+
+Function Get-AnsibleParam($obj, $name, $default = $null, $resultobj = @{}, $failifempty = $false, $emptyattributefailmessage, $ValidateSet, $ValidateSetErrorMessage, $type = $null, $aliases = @())
+{
+<#
+ .SYNOPSIS
+ Helper function to get an "attribute" from a psobject instance in PowerShell.
+ This is a convenience to make getting Members from an object easier and
+ slightly more pythonic
+ .EXAMPLE
+ $attr = Get-AnsibleParam $response "code" -default "1"
+ .EXAMPLE
+ Get-AnsibleParam -obj $params -name "State" -default "Present" -ValidateSet "Present","Absent" -resultobj $resultobj -failifempty $true
+ Get-AnsibleParam also supports Parameter validation to save you from coding that manually
+ Note that if you use the failifempty option, you do need to specify resultobject as well.
+#>
+ # Check if the provided Member $name or aliases exist in $obj and return it or the default.
+ try {
+
+ $found = $null
+ # First try to find preferred parameter $name
+ $aliases = @($name) + $aliases
+
+ # Iterate over aliases to find acceptable Member $name
+ foreach ($alias in $aliases) {
+ if ($obj.ContainsKey($alias)) {
+ $found = $alias
+ break
+ }
+ }
+
+ if ($null -eq $found) {
+ throw
+ }
+ $name = $found
+
+ if ($ValidateSet) {
+
+ if ($ValidateSet -contains ($obj.$name)) {
+ $value = $obj.$name
+ } else {
+ if ($null -eq $ValidateSetErrorMessage) {
+ #Auto-generated error should be sufficient in most use cases
+ $ValidateSetErrorMessage = "Get-AnsibleParam: Argument $name needs to be one of $($ValidateSet -join ",") but was $($obj.$name)."
+ }
+ Fail-Json -obj $resultobj -message $ValidateSetErrorMessage
+ }
+ } else {
+ $value = $obj.$name
+ }
+ } catch {
+ if ($failifempty -eq $false) {
+ $value = $default
+ } else {
+ if (-not $emptyattributefailmessage) {
+ $emptyattributefailmessage = "Get-AnsibleParam: Missing required argument: $name"
+ }
+ Fail-Json -obj $resultobj -message $emptyattributefailmessage
+ }
+ }
+
+ # If $null -eq $value, the parameter was unspecified by the user (deliberately or not)
+ # Please leave $null-values intact, modules need to know if a parameter was specified
+ if ($null -eq $value) {
+ return $null
+ }
+
+ if ($type -eq "path") {
+ # Expand environment variables on path-type
+ $value = Expand-Environment($value)
+ # Test if a valid path is provided
+ if (-not (Test-Path -IsValid $value)) {
+ $path_invalid = $true
+ # could still be a valid-shaped path with a nonexistent drive letter
+ if ($value -match "^\w:") {
+ # rewrite path with a valid drive letter and recheck the shape- this might still fail, eg, a nonexistent non-filesystem PS path
+ if (Test-Path -IsValid $(@(Get-PSDrive -PSProvider Filesystem)[0].Name + $value.Substring(1))) {
+ $path_invalid = $false
+ }
+ }
+ if ($path_invalid) {
+ Fail-Json -obj $resultobj -message "Get-AnsibleParam: Parameter '$name' has an invalid path '$value' specified."
+ }
+ }
+ } elseif ($type -eq "str") {
+ # Convert str types to real Powershell strings
+ $value = $value.ToString()
+ } elseif ($type -eq "bool") {
+ # Convert boolean types to real Powershell booleans
+ $value = $value | ConvertTo-Bool
+ } elseif ($type -eq "int") {
+ # Convert int types to real Powershell integers
+ $value = $value -as [int]
+ } elseif ($type -eq "float") {
+ # Convert float types to real Powershell floats
+ $value = $value -as [float]
+ } elseif ($type -eq "list") {
+ if ($value -is [array]) {
+ # Nothing to do
+ } elseif ($value -is [string]) {
+ # Convert string type to real Powershell array
+ $value = $value.Split(",").Trim()
+ } elseif ($value -is [int]) {
+ $value = @($value)
+ } else {
+ Fail-Json -obj $resultobj -message "Get-AnsibleParam: Parameter '$name' is not a YAML list."
+ }
+ # , is not a typo, forces it to return as a list when it is empty or only has 1 entry
+ return ,$value
+ }
+
+ return $value
+}
+
+#Alias Get-attr-->Get-AnsibleParam for backwards compat. Only add when needed to ease debugging of scripts
+If (-not(Get-Alias -Name "Get-attr" -ErrorAction SilentlyContinue))
+{
+ New-Alias -Name Get-attr -Value Get-AnsibleParam
+}
+
+Function ConvertTo-Bool
+{
+<#
+ .SYNOPSIS
+ Helper filter/pipeline function to convert a value to boolean following current
+ Ansible practices
+ .EXAMPLE
+ $is_true = "true" | ConvertTo-Bool
+#>
+ param(
+ [parameter(valuefrompipeline=$true)]
+ $obj
+ )
+
+ $boolean_strings = "yes", "on", "1", "true", 1
+ $obj_string = [string]$obj
+
+ if (($obj -is [boolean] -and $obj) -or $boolean_strings -contains $obj_string.ToLower()) {
+ return $true
+ } else {
+ return $false
+ }
+}
+
+Function Parse-Args($arguments, $supports_check_mode = $false)
+{
+<#
+ .SYNOPSIS
+ Helper function to parse Ansible JSON arguments from a "file" passed as
+ the single argument to the module.
+ .EXAMPLE
+ $params = Parse-Args $args
+#>
+ $params = New-Object psobject
+ If ($arguments.Length -gt 0)
+ {
+ $params = Get-Content $arguments[0] | ConvertFrom-Json
+ }
+ Else {
+ $params = $complex_args
+ }
+ $check_mode = Get-AnsibleParam -obj $params -name "_ansible_check_mode" -type "bool" -default $false
+ If ($check_mode -and -not $supports_check_mode)
+ {
+ Exit-Json @{
+ skipped = $true
+ changed = $false
+ msg = "remote module does not support check mode"
+ }
+ }
+ return $params
+}
+
+
+Function Get-FileChecksum($path, $algorithm = 'sha1')
+{
+<#
+ .SYNOPSIS
+ Helper function to calculate a hash of a file in a way which PowerShell 3
+ and above can handle
+#>
+ If (Test-Path -LiteralPath $path -PathType Leaf)
+ {
+ switch ($algorithm)
+ {
+ 'md5' { $sp = New-Object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider }
+ 'sha1' { $sp = New-Object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider }
+ 'sha256' { $sp = New-Object -TypeName System.Security.Cryptography.SHA256CryptoServiceProvider }
+ 'sha384' { $sp = New-Object -TypeName System.Security.Cryptography.SHA384CryptoServiceProvider }
+ 'sha512' { $sp = New-Object -TypeName System.Security.Cryptography.SHA512CryptoServiceProvider }
+ default { Fail-Json @{} "Unsupported hash algorithm supplied '$algorithm'" }
+ }
+
+ If ($PSVersionTable.PSVersion.Major -ge 4) {
+ $raw_hash = Get-FileHash -LiteralPath $path -Algorithm $algorithm
+ $hash = $raw_hash.Hash.ToLower()
+ } Else {
+ $fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read, [System.IO.FileShare]::ReadWrite);
+ $hash = [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower();
+ $fp.Dispose();
+ }
+ }
+ ElseIf (Test-Path -LiteralPath $path -PathType Container)
+ {
+ $hash = "3";
+ }
+ Else
+ {
+ $hash = "1";
+ }
+ return $hash
+}
+
+Function Get-PendingRebootStatus
+{
+<#
+ .SYNOPSIS
+ Check if reboot is required, if so notify CA.
+ Function returns true if computer has a pending reboot
+#>
+ $featureData = Invoke-CimMethod -EA Ignore -Name GetServerFeature -Namespace root\microsoft\windows\servermanager -Class MSFT_ServerManagerTasks
+ $regData = Get-ItemProperty "HKLM:\SYSTEM\CurrentControlSet\Control\Session Manager" "PendingFileRenameOperations" -EA Ignore
+ $CBSRebootStatus = Get-ChildItem "HKLM:\\SOFTWARE\Microsoft\Windows\CurrentVersion\Component Based Servicing" -ErrorAction SilentlyContinue| Where-Object {$_.PSChildName -eq "RebootPending"}
+ if(($featureData -and $featureData.RequiresReboot) -or $regData -or $CBSRebootStatus)
+ {
+ return $True
+ }
+ else
+ {
+ return $False
+ }
+}
+
+# this line must stay at the bottom to ensure all defined module parts are exported
+Export-ModuleMember -Alias * -Function * -Cmdlet *
diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.LinkUtil.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.LinkUtil.psm1
new file mode 100644
index 00000000..78aa4eac
--- /dev/null
+++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.LinkUtil.psm1
@@ -0,0 +1,454 @@
+# Copyright (c) 2017 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+#Requires -Module Ansible.ModuleUtils.PrivilegeUtil
+
+Function Load-LinkUtils() {
+ $link_util = @'
+using Microsoft.Win32.SafeHandles;
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Runtime.InteropServices;
+using System.Text;
+
+namespace Ansible
+{
+ public enum LinkType
+ {
+ SymbolicLink,
+ JunctionPoint,
+ HardLink
+ }
+
+ public class LinkUtilWin32Exception : System.ComponentModel.Win32Exception
+ {
+ private string _msg;
+
+ public LinkUtilWin32Exception(string message) : this(Marshal.GetLastWin32Error(), message) { }
+
+ public LinkUtilWin32Exception(int errorCode, string message) : base(errorCode)
+ {
+ _msg = String.Format("{0} ({1}, Win32ErrorCode {2})", message, base.Message, errorCode);
+ }
+
+ public override string Message { get { return _msg; } }
+ public static explicit operator LinkUtilWin32Exception(string message) { return new LinkUtilWin32Exception(message); }
+ }
+
+ public class LinkInfo
+ {
+ public LinkType Type { get; internal set; }
+ public string PrintName { get; internal set; }
+ public string SubstituteName { get; internal set; }
+ public string AbsolutePath { get; internal set; }
+ public string TargetPath { get; internal set; }
+ public string[] HardTargets { get; internal set; }
+ }
+
+ [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Unicode)]
+ public struct REPARSE_DATA_BUFFER
+ {
+ public UInt32 ReparseTag;
+ public UInt16 ReparseDataLength;
+ public UInt16 Reserved;
+ public UInt16 SubstituteNameOffset;
+ public UInt16 SubstituteNameLength;
+ public UInt16 PrintNameOffset;
+ public UInt16 PrintNameLength;
+
+ [MarshalAs(UnmanagedType.ByValArray, SizeConst = LinkUtil.MAXIMUM_REPARSE_DATA_BUFFER_SIZE)]
+ public char[] PathBuffer;
+ }
+
+ public class LinkUtil
+ {
+ public const int MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 1024 * 16;
+
+ private const UInt32 FILE_FLAG_BACKUP_SEMANTICS = 0x02000000;
+ private const UInt32 FILE_FLAG_OPEN_REPARSE_POINT = 0x00200000;
+
+ private const UInt32 FSCTL_GET_REPARSE_POINT = 0x000900A8;
+ private const UInt32 FSCTL_SET_REPARSE_POINT = 0x000900A4;
+ private const UInt32 FILE_DEVICE_FILE_SYSTEM = 0x00090000;
+
+ private const UInt32 IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003;
+ private const UInt32 IO_REPARSE_TAG_SYMLINK = 0xA000000C;
+
+ private const UInt32 SYMLINK_FLAG_RELATIVE = 0x00000001;
+
+ private const Int64 INVALID_HANDLE_VALUE = -1;
+
+ private const UInt32 SIZE_OF_WCHAR = 2;
+
+ private const UInt32 SYMBOLIC_LINK_FLAG_FILE = 0x00000000;
+ private const UInt32 SYMBOLIC_LINK_FLAG_DIRECTORY = 0x00000001;
+
+ [DllImport("kernel32.dll", CharSet = CharSet.Auto)]
+ private static extern SafeFileHandle CreateFile(
+ string lpFileName,
+ [MarshalAs(UnmanagedType.U4)] FileAccess dwDesiredAccess,
+ [MarshalAs(UnmanagedType.U4)] FileShare dwShareMode,
+ IntPtr lpSecurityAttributes,
+ [MarshalAs(UnmanagedType.U4)] FileMode dwCreationDisposition,
+ UInt32 dwFlagsAndAttributes,
+ IntPtr hTemplateFile);
+
+ // Used by GetReparsePointInfo()
+ [DllImport("kernel32.dll", SetLastError = true, CharSet = CharSet.Auto)]
+ private static extern bool DeviceIoControl(
+ SafeFileHandle hDevice,
+ UInt32 dwIoControlCode,
+ IntPtr lpInBuffer,
+ UInt32 nInBufferSize,
+ out REPARSE_DATA_BUFFER lpOutBuffer,
+ UInt32 nOutBufferSize,
+ out UInt32 lpBytesReturned,
+ IntPtr lpOverlapped);
+
+ // Used by CreateJunctionPoint()
+ [DllImport("kernel32.dll", SetLastError = true, CharSet = CharSet.Auto)]
+ private static extern bool DeviceIoControl(
+ SafeFileHandle hDevice,
+ UInt32 dwIoControlCode,
+ REPARSE_DATA_BUFFER lpInBuffer,
+ UInt32 nInBufferSize,
+ IntPtr lpOutBuffer,
+ UInt32 nOutBufferSize,
+ out UInt32 lpBytesReturned,
+ IntPtr lpOverlapped);
+
+ [DllImport("kernel32.dll", SetLastError = true, CharSet = CharSet.Auto)]
+ private static extern bool GetVolumePathName(
+ string lpszFileName,
+ StringBuilder lpszVolumePathName,
+ ref UInt32 cchBufferLength);
+
+ [DllImport("kernel32.dll", SetLastError = true, CharSet = CharSet.Auto)]
+ private static extern IntPtr FindFirstFileNameW(
+ string lpFileName,
+ UInt32 dwFlags,
+ ref UInt32 StringLength,
+ StringBuilder LinkName);
+
+ [DllImport("kernel32.dll", SetLastError = true, CharSet = CharSet.Auto)]
+ private static extern bool FindNextFileNameW(
+ IntPtr hFindStream,
+ ref UInt32 StringLength,
+ StringBuilder LinkName);
+
+ [DllImport("kernel32.dll", SetLastError = true)]
+ private static extern bool FindClose(
+ IntPtr hFindFile);
+
+ [DllImport("kernel32.dll", SetLastError = true, CharSet = CharSet.Auto)]
+ private static extern bool RemoveDirectory(
+ string lpPathName);
+
+ [DllImport("kernel32.dll", SetLastError = true, CharSet = CharSet.Auto)]
+ private static extern bool DeleteFile(
+ string lpFileName);
+
+ [DllImport("kernel32.dll", SetLastError = true, CharSet = CharSet.Auto)]
+ private static extern bool CreateSymbolicLink(
+ string lpSymlinkFileName,
+ string lpTargetFileName,
+ UInt32 dwFlags);
+
+ [DllImport("kernel32.dll", SetLastError = true, CharSet = CharSet.Auto)]
+ private static extern bool CreateHardLink(
+ string lpFileName,
+ string lpExistingFileName,
+ IntPtr lpSecurityAttributes);
+
+ public static LinkInfo GetLinkInfo(string linkPath)
+ {
+ FileAttributes attr = File.GetAttributes(linkPath);
+ if (attr.HasFlag(FileAttributes.ReparsePoint))
+ return GetReparsePointInfo(linkPath);
+
+ if (!attr.HasFlag(FileAttributes.Directory))
+ return GetHardLinkInfo(linkPath);
+
+ return null;
+ }
+
+ public static void DeleteLink(string linkPath)
+ {
+ bool success;
+ FileAttributes attr = File.GetAttributes(linkPath);
+ if (attr.HasFlag(FileAttributes.Directory))
+ {
+ success = RemoveDirectory(linkPath);
+ }
+ else
+ {
+ success = DeleteFile(linkPath);
+ }
+
+ if (!success)
+ throw new LinkUtilWin32Exception(String.Format("Failed to delete link at {0}", linkPath));
+ }
+
+ public static void CreateLink(string linkPath, String linkTarget, LinkType linkType)
+ {
+ switch (linkType)
+ {
+ case LinkType.SymbolicLink:
+ UInt32 linkFlags;
+ FileAttributes attr = File.GetAttributes(linkTarget);
+ if (attr.HasFlag(FileAttributes.Directory))
+ linkFlags = SYMBOLIC_LINK_FLAG_DIRECTORY;
+ else
+ linkFlags = SYMBOLIC_LINK_FLAG_FILE;
+
+ if (!CreateSymbolicLink(linkPath, linkTarget, linkFlags))
+ throw new LinkUtilWin32Exception(String.Format("CreateSymbolicLink({0}, {1}, {2}) failed", linkPath, linkTarget, linkFlags));
+ break;
+ case LinkType.JunctionPoint:
+ CreateJunctionPoint(linkPath, linkTarget);
+ break;
+ case LinkType.HardLink:
+ if (!CreateHardLink(linkPath, linkTarget, IntPtr.Zero))
+ throw new LinkUtilWin32Exception(String.Format("CreateHardLink({0}, {1}) failed", linkPath, linkTarget));
+ break;
+ }
+ }
+
+ private static LinkInfo GetHardLinkInfo(string linkPath)
+ {
+ UInt32 maxPath = 260;
+ List<string> result = new List<string>();
+
+ StringBuilder sb = new StringBuilder((int)maxPath);
+ UInt32 stringLength = maxPath;
+ if (!GetVolumePathName(linkPath, sb, ref stringLength))
+ throw new LinkUtilWin32Exception("GetVolumePathName() failed");
+ string volume = sb.ToString();
+
+ stringLength = maxPath;
+ IntPtr findHandle = FindFirstFileNameW(linkPath, 0, ref stringLength, sb);
+ if (findHandle.ToInt64() != INVALID_HANDLE_VALUE)
+ {
+ try
+ {
+ do
+ {
+ string hardLinkPath = sb.ToString();
+ if (hardLinkPath.StartsWith("\\"))
+ hardLinkPath = hardLinkPath.Substring(1, hardLinkPath.Length - 1);
+
+ result.Add(Path.Combine(volume, hardLinkPath));
+ stringLength = maxPath;
+
+ } while (FindNextFileNameW(findHandle, ref stringLength, sb));
+ }
+ finally
+ {
+ FindClose(findHandle);
+ }
+ }
+
+ if (result.Count > 1)
+ return new LinkInfo
+ {
+ Type = LinkType.HardLink,
+ HardTargets = result.ToArray()
+ };
+
+ return null;
+ }
+
+ private static LinkInfo GetReparsePointInfo(string linkPath)
+ {
+ SafeFileHandle fileHandle = CreateFile(
+ linkPath,
+ FileAccess.Read,
+ FileShare.None,
+ IntPtr.Zero,
+ FileMode.Open,
+ FILE_FLAG_OPEN_REPARSE_POINT | FILE_FLAG_BACKUP_SEMANTICS,
+ IntPtr.Zero);
+
+ if (fileHandle.IsInvalid)
+ throw new LinkUtilWin32Exception(String.Format("CreateFile({0}) failed", linkPath));
+
+ REPARSE_DATA_BUFFER buffer = new REPARSE_DATA_BUFFER();
+ UInt32 bytesReturned;
+ try
+ {
+ if (!DeviceIoControl(
+ fileHandle,
+ FSCTL_GET_REPARSE_POINT,
+ IntPtr.Zero,
+ 0,
+ out buffer,
+ MAXIMUM_REPARSE_DATA_BUFFER_SIZE,
+ out bytesReturned,
+ IntPtr.Zero))
+ throw new LinkUtilWin32Exception(String.Format("DeviceIoControl() failed for file at {0}", linkPath));
+ }
+ finally
+ {
+ fileHandle.Dispose();
+ }
+
+ bool isRelative = false;
+ int pathOffset = 0;
+ LinkType linkType;
+ if (buffer.ReparseTag == IO_REPARSE_TAG_SYMLINK)
+ {
+ UInt32 bufferFlags = Convert.ToUInt32(buffer.PathBuffer[0]) + Convert.ToUInt32(buffer.PathBuffer[1]);
+ if (bufferFlags == SYMLINK_FLAG_RELATIVE)
+ isRelative = true;
+ pathOffset = 2;
+ linkType = LinkType.SymbolicLink;
+ }
+ else if (buffer.ReparseTag == IO_REPARSE_TAG_MOUNT_POINT)
+ {
+ linkType = LinkType.JunctionPoint;
+ }
+ else
+ {
+ string errorMessage = String.Format("Invalid Reparse Tag: {0}", buffer.ReparseTag.ToString());
+ throw new Exception(errorMessage);
+ }
+
+ string printName = new string(buffer.PathBuffer, (int)(buffer.PrintNameOffset / SIZE_OF_WCHAR) + pathOffset, (int)(buffer.PrintNameLength / SIZE_OF_WCHAR));
+ string substituteName = new string(buffer.PathBuffer, (int)(buffer.SubstituteNameOffset / SIZE_OF_WCHAR) + pathOffset, (int)(buffer.SubstituteNameLength / SIZE_OF_WCHAR));
+
+ // TODO: should we check for \?\UNC\server for convert it to the NT style \\server path
+ // Remove the leading Windows object directory \?\ from the path if present
+ string targetPath = substituteName;
+ if (targetPath.StartsWith("\\??\\"))
+ targetPath = targetPath.Substring(4, targetPath.Length - 4);
+
+ string absolutePath = targetPath;
+ if (isRelative)
+ absolutePath = Path.GetFullPath(Path.Combine(new FileInfo(linkPath).Directory.FullName, targetPath));
+
+ return new LinkInfo
+ {
+ Type = linkType,
+ PrintName = printName,
+ SubstituteName = substituteName,
+ AbsolutePath = absolutePath,
+ TargetPath = targetPath
+ };
+ }
+
+ private static void CreateJunctionPoint(string linkPath, string linkTarget)
+ {
+ // We need to create the link as a dir beforehand
+ Directory.CreateDirectory(linkPath);
+ SafeFileHandle fileHandle = CreateFile(
+ linkPath,
+ FileAccess.Write,
+ FileShare.Read | FileShare.Write | FileShare.None,
+ IntPtr.Zero,
+ FileMode.Open,
+ FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OPEN_REPARSE_POINT,
+ IntPtr.Zero);
+
+ if (fileHandle.IsInvalid)
+ throw new LinkUtilWin32Exception(String.Format("CreateFile({0}) failed", linkPath));
+
+ try
+ {
+ string substituteName = "\\??\\" + Path.GetFullPath(linkTarget);
+ string printName = linkTarget;
+
+ REPARSE_DATA_BUFFER buffer = new REPARSE_DATA_BUFFER();
+ buffer.SubstituteNameOffset = 0;
+ buffer.SubstituteNameLength = (UInt16)(substituteName.Length * SIZE_OF_WCHAR);
+ buffer.PrintNameOffset = (UInt16)(buffer.SubstituteNameLength + 2);
+ buffer.PrintNameLength = (UInt16)(printName.Length * SIZE_OF_WCHAR);
+
+ buffer.ReparseTag = IO_REPARSE_TAG_MOUNT_POINT;
+ buffer.ReparseDataLength = (UInt16)(buffer.SubstituteNameLength + buffer.PrintNameLength + 12);
+ buffer.PathBuffer = new char[MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
+
+ byte[] unicodeBytes = Encoding.Unicode.GetBytes(substituteName + "\0" + printName);
+ char[] pathBuffer = Encoding.Unicode.GetChars(unicodeBytes);
+ Array.Copy(pathBuffer, buffer.PathBuffer, pathBuffer.Length);
+
+ UInt32 bytesReturned;
+ if (!DeviceIoControl(
+ fileHandle,
+ FSCTL_SET_REPARSE_POINT,
+ buffer,
+ (UInt32)(buffer.ReparseDataLength + 8),
+ IntPtr.Zero, 0,
+ out bytesReturned,
+ IntPtr.Zero))
+ throw new LinkUtilWin32Exception(String.Format("DeviceIoControl() failed to create junction point at {0} to {1}", linkPath, linkTarget));
+ }
+ finally
+ {
+ fileHandle.Dispose();
+ }
+ }
+ }
+}
+'@
+
+ # FUTURE: find a better way to get the _ansible_remote_tmp variable
+ $original_tmp = $env:TMP
+
+ $remote_tmp = $original_tmp
+ $module_params = Get-Variable -Name complex_args -ErrorAction SilentlyContinue
+ if ($module_params) {
+ if ($module_params.Value.ContainsKey("_ansible_remote_tmp") ) {
+ $remote_tmp = $module_params.Value["_ansible_remote_tmp"]
+ $remote_tmp = [System.Environment]::ExpandEnvironmentVariables($remote_tmp)
+ }
+ }
+
+ $env:TMP = $remote_tmp
+ Add-Type -TypeDefinition $link_util
+ $env:TMP = $original_tmp
+
+ # enable the SeBackupPrivilege if it is disabled
+ $state = Get-AnsiblePrivilege -Name SeBackupPrivilege
+ if ($state -eq $false) {
+ Set-AnsiblePrivilege -Name SeBackupPrivilege -Value $true
+ }
+}
+
+Function Get-Link($link_path) {
+ $link_info = [Ansible.LinkUtil]::GetLinkInfo($link_path)
+ return $link_info
+}
+
+Function Remove-Link($link_path) {
+ [Ansible.LinkUtil]::DeleteLink($link_path)
+}
+
+Function New-Link($link_path, $link_target, $link_type) {
+ if (-not (Test-Path -LiteralPath $link_target)) {
+ throw "link_target '$link_target' does not exist, cannot create link"
+ }
+
+ switch($link_type) {
+ "link" {
+ $type = [Ansible.LinkType]::SymbolicLink
+ }
+ "junction" {
+ if (Test-Path -LiteralPath $link_target -PathType Leaf) {
+ throw "cannot set the target for a junction point to a file"
+ }
+ $type = [Ansible.LinkType]::JunctionPoint
+ }
+ "hard" {
+ if (Test-Path -LiteralPath $link_target -PathType Container) {
+ throw "cannot set the target for a hard link to a directory"
+ }
+ $type = [Ansible.LinkType]::HardLink
+ }
+ default { throw "invalid link_type option $($link_type): expecting link, junction, hard" }
+ }
+ [Ansible.LinkUtil]::CreateLink($link_path, $link_target, $type)
+}
+
+# this line must stay at the bottom to ensure all defined module parts are exported
+Export-ModuleMember -Alias * -Function * -Cmdlet *
diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.PrivilegeUtil.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.PrivilegeUtil.psm1
new file mode 100644
index 00000000..03cebe75
--- /dev/null
+++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.PrivilegeUtil.psm1
@@ -0,0 +1,99 @@
+# Copyright (c) 2018 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+#AnsibleRequires -CSharpUtil Ansible.Privilege
+
+Function Import-PrivilegeUtil {
+ <#
+ .SYNOPSIS
+ No-op, as the C# types are automatically loaded.
+ #>
+ [CmdletBinding()]
+ Param()
+ $msg = "Import-PrivilegeUtil is deprecated and no longer needed, this cmdlet will be removed in a future version"
+ if ((Get-Command -Name Add-DeprecationWarning -ErrorAction SilentlyContinue) -and (Get-Variable -Name result -ErrorAction SilentlyContinue)) {
+ Add-DeprecationWarning -obj $result.Value -message $msg -version 2.12
+ } else {
+ $module = Get-Variable -Name module -ErrorAction SilentlyContinue
+ if ($null -ne $module -and $module.Value.GetType().FullName -eq "Ansible.Basic.AnsibleModule") {
+ $module.Value.Deprecate($msg, "2.12")
+ }
+ }
+}
+
+Function Get-AnsiblePrivilege {
+ <#
+ .SYNOPSIS
+ Get the status of a privilege for the current process. This returns
+ $true - the privilege is enabled
+ $false - the privilege is disabled
+ $null - the privilege is removed from the token
+
+ If Name is not a valid privilege name, this will throw an
+ ArgumentException.
+
+ .EXAMPLE
+ Get-AnsiblePrivilege -Name SeDebugPrivilege
+ #>
+ [CmdletBinding()]
+ param(
+ [Parameter(Mandatory=$true)][String]$Name
+ )
+
+ if (-not [Ansible.Privilege.PrivilegeUtil]::CheckPrivilegeName($Name)) {
+ throw [System.ArgumentException] "Invalid privilege name '$Name'"
+ }
+
+ $process_token = [Ansible.Privilege.PrivilegeUtil]::GetCurrentProcess()
+ $privilege_info = [Ansible.Privilege.PrivilegeUtil]::GetAllPrivilegeInfo($process_token)
+ if ($privilege_info.ContainsKey($Name)) {
+ $status = $privilege_info.$Name
+ return $status.HasFlag([Ansible.Privilege.PrivilegeAttributes]::Enabled)
+ } else {
+ return $null
+ }
+}
+
+Function Set-AnsiblePrivilege {
+ <#
+ .SYNOPSIS
+ Enables/Disables a privilege on the current process' token. If a privilege
+ has been removed from the process token, this will throw an
+ InvalidOperationException.
+
+ .EXAMPLE
+ # enable a privilege
+ Set-AnsiblePrivilege -Name SeCreateSymbolicLinkPrivilege -Value $true
+
+ # disable a privilege
+ Set-AnsiblePrivilege -Name SeCreateSymbolicLinkPrivilege -Value $false
+ #>
+ [CmdletBinding(SupportsShouldProcess)]
+ param(
+ [Parameter(Mandatory=$true)][String]$Name,
+ [Parameter(Mandatory=$true)][bool]$Value
+ )
+
+ $action = switch($Value) {
+ $true { "Enable" }
+ $false { "Disable" }
+ }
+
+ $current_state = Get-AnsiblePrivilege -Name $Name
+ if ($current_state -eq $Value) {
+ return # no change needs to occur
+ } elseif ($null -eq $current_state) {
+ # once a privilege is removed from a token we cannot do anything with it
+ throw [System.InvalidOperationException] "Cannot $($action.ToLower()) the privilege '$Name' as it has been removed from the token"
+ }
+
+ $process_token = [Ansible.Privilege.PrivilegeUtil]::GetCurrentProcess()
+ if ($PSCmdlet.ShouldProcess($Name, "$action the privilege $Name")) {
+ $new_state = New-Object -TypeName 'System.Collections.Generic.Dictionary`2[[System.String], [System.Nullable`1[System.Boolean]]]'
+ $new_state.Add($Name, $Value)
+ [Ansible.Privilege.PrivilegeUtil]::SetTokenPrivileges($process_token, $new_state) > $null
+ }
+}
+
+Export-ModuleMember -Function Import-PrivilegeUtil, Get-AnsiblePrivilege, Set-AnsiblePrivilege
+
diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.SID.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.SID.psm1
new file mode 100644
index 00000000..38c02cc2
--- /dev/null
+++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.SID.psm1
@@ -0,0 +1,93 @@
+# Copyright (c) 2017 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+Function Convert-FromSID($sid) {
+ # Converts a SID to a Down-Level Logon name in the form of DOMAIN\UserName
+ # If the SID is for a local user or group then DOMAIN would be the server
+ # name.
+
+ $account_object = New-Object System.Security.Principal.SecurityIdentifier($sid)
+ try {
+ $nt_account = $account_object.Translate([System.Security.Principal.NTAccount])
+ } catch {
+ Fail-Json -obj @{} -message "failed to convert sid '$sid' to a logon name: $($_.Exception.Message)"
+ }
+
+ return $nt_account.Value
+}
+
+Function Convert-ToSID {
+ [Diagnostics.CodeAnalysis.SuppressMessageAttribute("PSAvoidUsingEmptyCatchBlock", "", Justification="We don't care if converting to a SID fails, just that it failed or not")]
+ param($account_name)
+ # Converts an account name to a SID, it can take in the following forms
+ # SID: Will just return the SID value that was passed in
+ # UPN:
+ # principal@domain (Domain users only)
+ # Down-Level Login Name
+ # DOMAIN\principal (Domain)
+ # SERVERNAME\principal (Local)
+ # .\principal (Local)
+ # NT AUTHORITY\SYSTEM (Local Service Accounts)
+ # Login Name
+ # principal (Local/Local Service Accounts)
+
+ try {
+ $sid = New-Object -TypeName System.Security.Principal.SecurityIdentifier -ArgumentList $account_name
+ return $sid.Value
+ } catch {}
+
+ if ($account_name -like "*\*") {
+ $account_name_split = $account_name -split "\\"
+ if ($account_name_split[0] -eq ".") {
+ $domain = $env:COMPUTERNAME
+ } else {
+ $domain = $account_name_split[0]
+ }
+ $username = $account_name_split[1]
+ } elseif ($account_name -like "*@*") {
+ $account_name_split = $account_name -split "@"
+ $domain = $account_name_split[1]
+ $username = $account_name_split[0]
+ } else {
+ $domain = $null
+ $username = $account_name
+ }
+
+ if ($domain) {
+ # searching for a local group with the servername prefixed will fail,
+ # need to check for this situation and only use NTAccount(String)
+ if ($domain -eq $env:COMPUTERNAME) {
+ $adsi = [ADSI]("WinNT://$env:COMPUTERNAME,computer")
+ $group = $adsi.psbase.children | Where-Object { $_.schemaClassName -eq "group" -and $_.Name -eq $username }
+ } else {
+ $group = $null
+ }
+ if ($group) {
+ $account = New-Object System.Security.Principal.NTAccount($username)
+ } else {
+ $account = New-Object System.Security.Principal.NTAccount($domain, $username)
+ }
+ } else {
+ # when in a domain NTAccount(String) will favour domain lookups check
+ # if username is a local user and explictly search on the localhost for
+ # that account
+ $adsi = [ADSI]("WinNT://$env:COMPUTERNAME,computer")
+ $user = $adsi.psbase.children | Where-Object { $_.schemaClassName -eq "user" -and $_.Name -eq $username }
+ if ($user) {
+ $account = New-Object System.Security.Principal.NTAccount($env:COMPUTERNAME, $username)
+ } else {
+ $account = New-Object System.Security.Principal.NTAccount($username)
+ }
+ }
+
+ try {
+ $account_sid = $account.Translate([System.Security.Principal.SecurityIdentifier])
+ } catch {
+ Fail-Json @{} "account_name $account_name is not a valid account, cannot get SID: $($_.Exception.Message)"
+ }
+
+ return $account_sid.Value
+}
+
+# this line must stay at the bottom to ensure all defined module parts are exported
+Export-ModuleMember -Alias * -Function * -Cmdlet *
diff --git a/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.WebRequest.psm1 b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.WebRequest.psm1
new file mode 100644
index 00000000..f346c6b6
--- /dev/null
+++ b/lib/ansible/module_utils/powershell/Ansible.ModuleUtils.WebRequest.psm1
@@ -0,0 +1,514 @@
+# Copyright (c) 2019 Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+Function Get-AnsibleWebRequest {
+ <#
+ .SYNOPSIS
+ Creates a System.Net.WebRequest object based on common URL module options in Ansible.
+
+ .DESCRIPTION
+ Will create a WebRequest based on common input options within Ansible. This can be used manually or with
+ Invoke-WithWebRequest.
+
+ .PARAMETER Uri
+ The URI to create the web request for.
+
+ .PARAMETER Method
+ The protocol method to use, if omitted, will use the default value for the URI protocol specified.
+
+ .PARAMETER FollowRedirects
+ Whether to follow redirect reponses. This is only valid when using a HTTP URI.
+ all - Will follow all redirects
+ none - Will follow no redirects
+ safe - Will only follow redirects when GET or HEAD is used as the Method
+
+ .PARAMETER Headers
+ A hashtable or dictionary of header values to set on the request. This is only valid for a HTTP URI.
+
+ .PARAMETER HttpAgent
+ A string to set for the 'User-Agent' header. This is only valid for a HTTP URI.
+
+ .PARAMETER MaximumRedirection
+ The maximum number of redirections that will be followed. This is only valid for a HTTP URI.
+
+ .PARAMETER Timeout
+ The timeout in seconds that defines how long to wait until the request times out.
+
+ .PARAMETER ValidateCerts
+ Whether to validate SSL certificates, default to True.
+
+ .PARAMETER ClientCert
+ The path to PFX file to use for X509 authentication. This is only valid for a HTTP URI. This path can either
+ be a filesystem path (C:\folder\cert.pfx) or a PSPath to a credential (Cert:\CurrentUser\My\<thumbprint>).
+
+ .PARAMETER ClientCertPassword
+ The password for the PFX certificate if required. This is only valid for a HTTP URI.
+
+ .PARAMETER ForceBasicAuth
+ Whether to set the Basic auth header on the first request instead of when required. This is only valid for a
+ HTTP URI.
+
+ .PARAMETER UrlUsername
+ The username to use for authenticating with the target.
+
+ .PARAMETER UrlPassword
+ The password to use for authenticating with the target.
+
+ .PARAMETER UseDefaultCredential
+ Whether to use the current user's credentials if available. This will only work when using Become, using SSH with
+ password auth, or WinRM with CredSSP or Kerberos with credential delegation.
+
+ .PARAMETER UseProxy
+ Whether to use the default proxy defined in IE (WinINet) for the user or set no proxy at all. This should not
+ be set to True when ProxyUrl is also defined.
+
+ .PARAMETER ProxyUrl
+ An explicit proxy server to use for the request instead of relying on the default proxy in IE. This is only
+ valid for a HTTP URI.
+
+ .PARAMETER ProxyUsername
+ An optional username to use for proxy authentication.
+
+ .PARAMETER ProxyPassword
+ The password for ProxyUsername.
+
+ .PARAMETER ProxyUseDefaultCredential
+ Whether to use the current user's credentials for proxy authentication if available. This will only work when
+ using Become, using SSH with password auth, or WinRM with CredSSP or Kerberos with credential delegation.
+
+ .PARAMETER Module
+ The AnsibleBasic module that can be used as a backup parameter source or a way to return warnings back to the
+ Ansible controller.
+
+ .EXAMPLE
+ $spec = @{
+ options = @{}
+ }
+ $module = Ansible.Basic.AnsibleModule]::Create($args, $spec, @(Get-AnsibleWebRequestSpec))
+
+ $web_request = Get-AnsibleWebRequest -Module $module
+ #>
+ [CmdletBinding()]
+ [OutputType([System.Net.WebRequest])]
+ Param (
+ [Alias("url")]
+ [System.Uri]
+ $Uri,
+
+ [System.String]
+ $Method,
+
+ [Alias("follow_redirects")]
+ [ValidateSet("all", "none", "safe")]
+ [System.String]
+ $FollowRedirects = "safe",
+
+ [System.Collections.IDictionary]
+ $Headers,
+
+ [Alias("http_agent")]
+ [System.String]
+ $HttpAgent = "ansible-httpget",
+
+ [Alias("maximum_redirection")]
+ [System.Int32]
+ $MaximumRedirection = 50,
+
+ [System.Int32]
+ $Timeout = 30,
+
+ [Alias("validate_certs")]
+ [System.Boolean]
+ $ValidateCerts = $true,
+
+ # Credential params
+ [Alias("client_cert")]
+ [System.String]
+ $ClientCert,
+
+ [Alias("client_cert_password")]
+ [System.String]
+ $ClientCertPassword,
+
+ [Alias("force_basic_auth")]
+ [Switch]
+ $ForceBasicAuth,
+
+ [Alias("url_username")]
+ [System.String]
+ $UrlUsername,
+
+ [Alias("url_password")]
+ [System.String]
+ $UrlPassword,
+
+ [Alias("use_default_credential")]
+ [Switch]
+ $UseDefaultCredential,
+
+ # Proxy params
+ [Alias("use_proxy")]
+ [System.Boolean]
+ $UseProxy = $true,
+
+ [Alias("proxy_url")]
+ [System.String]
+ $ProxyUrl,
+
+ [Alias("proxy_username")]
+ [System.String]
+ $ProxyUsername,
+
+ [Alias("proxy_password")]
+ [System.String]
+ $ProxyPassword,
+
+ [Alias("proxy_use_default_credential")]
+ [Switch]
+ $ProxyUseDefaultCredential,
+
+ [ValidateScript({ $_.GetType().FullName -eq 'Ansible.Basic.AnsibleModule' })]
+ [System.Object]
+ $Module
+ )
+
+ # Set module options for parameters unless they were explicitly passed in.
+ if ($Module) {
+ foreach ($param in $PSCmdlet.MyInvocation.MyCommand.Parameters.GetEnumerator()) {
+ if ($PSBoundParameters.ContainsKey($param.Key)) {
+ # Was set explicitly we want to use that value
+ continue
+ }
+
+ foreach ($alias in @($Param.Key) + $param.Value.Aliases) {
+ if ($Module.Params.ContainsKey($alias)) {
+ $var_value = $Module.Params.$alias -as $param.Value.ParameterType
+ Set-Variable -Name $param.Key -Value $var_value
+ break
+ }
+ }
+ }
+ }
+
+ # Disable certificate validation if requested
+ # FUTURE: set this on ServerCertificateValidationCallback of the HttpWebRequest once .NET 4.5 is the minimum
+ if (-not $ValidateCerts) {
+ [System.Net.ServicePointManager]::ServerCertificateValidationCallback = { $true }
+ }
+
+ # Enable TLS1.1/TLS1.2 if they're available but disabled (eg. .NET 4.5)
+ $security_protocols = [System.Net.ServicePointManager]::SecurityProtocol -bor [System.Net.SecurityProtocolType]::SystemDefault
+ if ([System.Net.SecurityProtocolType].GetMember("Tls11").Count -gt 0) {
+ $security_protocols = $security_protocols -bor [System.Net.SecurityProtocolType]::Tls11
+ }
+ if ([System.Net.SecurityProtocolType].GetMember("Tls12").Count -gt 0) {
+ $security_protocols = $security_protocols -bor [System.Net.SecurityProtocolType]::Tls12
+ }
+ [System.Net.ServicePointManager]::SecurityProtocol = $security_protocols
+
+ $web_request = [System.Net.WebRequest]::Create($Uri)
+ if ($Method) {
+ $web_request.Method = $Method
+ }
+ $web_request.Timeout = $Timeout * 1000
+
+ if ($UseDefaultCredential -and $web_request -is [System.Net.HttpWebRequest]) {
+ $web_request.UseDefaultCredentials = $true
+ } elseif ($UrlUsername) {
+ if ($ForceBasicAuth) {
+ $auth_value = [System.Convert]::ToBase64String([System.Text.Encoding]::ASCII.GetBytes(("{0}:{1}" -f $UrlUsername, $UrlPassword)))
+ $web_request.Headers.Add("Authorization", "Basic $auth_value")
+ } else {
+ $credential = New-Object -TypeName System.Net.NetworkCredential -ArgumentList $UrlUsername, $UrlPassword
+ $web_request.Credentials = $credential
+ }
+ }
+
+ if ($ClientCert) {
+ # Expecting either a filepath or PSPath (Cert:\CurrentUser\My\<thumbprint>)
+ $cert = Get-Item -LiteralPath $ClientCert -ErrorAction SilentlyContinue
+ if ($null -eq $cert) {
+ Write-Error -Message "Client certificate '$ClientCert' does not exist" -Category ObjectNotFound
+ return
+ }
+
+ $crypto_ns = 'System.Security.Cryptography.X509Certificates'
+ if ($cert.PSProvider.Name -ne 'Certificate') {
+ try {
+ $cert = New-Object -TypeName "$crypto_ns.X509Certificate2" -ArgumentList @(
+ $ClientCert, $ClientCertPassword
+ )
+ } catch [System.Security.Cryptography.CryptographicException] {
+ Write-Error -Message "Failed to read client certificate at '$ClientCert'" -Exception $_.Exception -Category SecurityError
+ return
+ }
+ }
+ $web_request.ClientCertificates = New-Object -TypeName "$crypto_ns.X509Certificate2Collection" -ArgumentList @(
+ $cert
+ )
+ }
+
+ if (-not $UseProxy) {
+ $proxy = $null
+ } elseif ($ProxyUrl) {
+ $proxy = New-Object -TypeName System.Net.WebProxy -ArgumentList $ProxyUrl, $true
+ } else {
+ $proxy = $web_request.Proxy
+ }
+
+ # $web_request.Proxy may return $null for a FTP web request. We only set the credentials if we have an actual
+ # proxy to work with, otherwise just ignore the credentials property.
+ if ($null -ne $proxy) {
+ if ($ProxyUseDefaultCredential) {
+ # Weird hack, $web_request.Proxy returns an IWebProxy object which only gurantees the Credentials
+ # property. We cannot set UseDefaultCredentials so we just set the Credentials to the
+ # DefaultCredentials in the CredentialCache which does the same thing.
+ $proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials
+ } elseif ($ProxyUsername) {
+ $proxy.Credentials = New-Object -TypeName System.Net.NetworkCredential -ArgumentList @(
+ $ProxyUsername, $ProxyPassword
+ )
+ } else {
+ $proxy.Credentials = $null
+ }
+ }
+
+ $web_request.Proxy = $proxy
+
+ # Some parameters only apply when dealing with a HttpWebRequest
+ if ($web_request -is [System.Net.HttpWebRequest]) {
+ if ($Headers) {
+ foreach ($header in $Headers.GetEnumerator()) {
+ switch ($header.Key) {
+ Accept { $web_request.Accept = $header.Value }
+ Connection { $web_request.Connection = $header.Value }
+ Content-Length { $web_request.ContentLength = $header.Value }
+ Content-Type { $web_request.ContentType = $header.Value }
+ Expect { $web_request.Expect = $header.Value }
+ Date { $web_request.Date = $header.Value }
+ Host { $web_request.Host = $header.Value }
+ If-Modified-Since { $web_request.IfModifiedSince = $header.Value }
+ Range { $web_request.AddRange($header.Value) }
+ Referer { $web_request.Referer = $header.Value }
+ Transfer-Encoding {
+ $web_request.SendChunked = $true
+ $web_request.TransferEncoding = $header.Value
+ }
+ User-Agent { continue }
+ default { $web_request.Headers.Add($header.Key, $header.Value) }
+ }
+ }
+ }
+
+ # For backwards compatibility we need to support setting the User-Agent if the header was set in the task.
+ # We just need to make sure that if an explicit http_agent module was set then that takes priority.
+ if ($Headers -and $Headers.ContainsKey("User-Agent")) {
+ if ($HttpAgent -eq $ansible_web_request_options.http_agent.default) {
+ $HttpAgent = $Headers['User-Agent']
+ } elseif ($null -ne $Module) {
+ $Module.Warn("The 'User-Agent' header and the 'http_agent' was set, using the 'http_agent' for web request")
+ }
+ }
+ $web_request.UserAgent = $HttpAgent
+
+ switch ($FollowRedirects) {
+ none { $web_request.AllowAutoRedirect = $false }
+ safe {
+ if ($web_request.Method -in @("GET", "HEAD")) {
+ $web_request.AllowAutoRedirect = $true
+ } else {
+ $web_request.AllowAutoRedirect = $false
+ }
+ }
+ all { $web_request.AllowAutoRedirect = $true }
+ }
+
+ if ($MaximumRedirection -eq 0) {
+ $web_request.AllowAutoRedirect = $false
+ } else {
+ $web_request.MaximumAutomaticRedirections = $MaximumRedirection
+ }
+ }
+
+ return $web_request
+}
+
+Function Invoke-WithWebRequest {
+ <#
+ .SYNOPSIS
+ Invokes a ScriptBlock with the WebRequest.
+
+ .DESCRIPTION
+ Invokes the ScriptBlock and handle extra information like accessing the response stream, closing those streams
+ safely as well as setting common module return values.
+
+ .PARAMETER Module
+ The Ansible.Basic module to set the return values for. This will set the following return values;
+ elapsed - The total time, in seconds, that it took to send the web request and process the response
+ msg - The human readable description of the response status code
+ status_code - An int that is the response status code
+
+ .PARAMETER Request
+ The System.Net.WebRequest to call. This can either be manually crafted or created with Get-AnsibleWebRequest.
+
+ .PARAMETER Script
+ The ScriptBlock to invoke during the web request. This ScriptBlock should take in the params
+ Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream)
+
+ This scriptblock should manage the response based on what it need to do.
+
+ .PARAMETER Body
+ An optional Stream to send to the target during the request.
+
+ .PARAMETER IgnoreBadResponse
+ By default a WebException will be raised for a non 2xx status code and the Script will not be invoked. This
+ parameter can be set to process all responses regardless of the status code.
+
+ .EXAMPLE Basic module that downloads a file
+ $spec = @{
+ options = @{
+ path = @{ type = "path"; required = $true }
+ }
+ }
+ $module = Ansible.Basic.AnsibleModule]::Create($args, $spec, @(Get-AnsibleWebRequestSpec))
+
+ $web_request = Get-AnsibleWebRequest -Module $module
+
+ Invoke-WithWebRequest -Module $module -Request $web_request -Script {
+ Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream)
+
+ $fs = [System.IO.File]::Create($module.Params.path)
+ try {
+ $Stream.CopyTo($fs)
+ $fs.Flush()
+ } finally {
+ $fs.Dispose()
+ }
+ }
+ #>
+ [CmdletBinding()]
+ param (
+ [Parameter(Mandatory=$true)]
+ [System.Object]
+ [ValidateScript({ $_.GetType().FullName -eq 'Ansible.Basic.AnsibleModule' })]
+ $Module,
+
+ [Parameter(Mandatory=$true)]
+ [System.Net.WebRequest]
+ $Request,
+
+ [Parameter(Mandatory=$true)]
+ [ScriptBlock]
+ $Script,
+
+ [AllowNull()]
+ [System.IO.Stream]
+ $Body,
+
+ [Switch]
+ $IgnoreBadResponse
+ )
+
+ $start = Get-Date
+ if ($null -ne $Body) {
+ $request_st = $Request.GetRequestStream()
+ try {
+ $Body.CopyTo($request_st)
+ $request_st.Flush()
+ } finally {
+ $request_st.Close()
+ }
+ }
+
+ try {
+ try {
+ $web_response = $Request.GetResponse()
+ } catch [System.Net.WebException] {
+ # A WebResponse with a status code not in the 200 range will raise a WebException. We check if the
+ # exception raised contains the actual response and continue on if IgnoreBadResponse is set. We also
+ # make sure we set the status_code return value on the Module object if possible
+
+ if ($_.Exception.PSObject.Properties.Name -match "Response") {
+ $web_response = $_.Exception.Response
+
+ if (-not $IgnoreBadResponse -or $null -eq $web_response) {
+ $Module.Result.msg = $_.Exception.StatusDescription
+ $Module.Result.status_code = $_.Exception.Response.StatusCode
+ throw $_
+ }
+ } else {
+ throw $_
+ }
+ }
+
+ if ($Request.RequestUri.IsFile) {
+ # A FileWebResponse won't have these properties set
+ $Module.Result.msg = "OK"
+ $Module.Result.status_code = 200
+ } else {
+ $Module.Result.msg = $web_response.StatusDescription
+ $Module.Result.status_code = $web_response.StatusCode
+ }
+
+ $response_stream = $web_response.GetResponseStream()
+ try {
+ # Invoke the ScriptBlock and pass in WebResponse and ResponseStream
+ &$Script -Response $web_response -Stream $response_stream
+ } finally {
+ $response_stream.Dispose()
+ }
+ } finally {
+ if ($web_response) {
+ $web_response.Close()
+ }
+ $Module.Result.elapsed = ((Get-date) - $start).TotalSeconds
+ }
+}
+
+Function Get-AnsibleWebRequestSpec {
+ <#
+ .SYNOPSIS
+ Used by modules to get the argument spec fragment for AnsibleModule.
+
+ .EXAMPLES
+ $spec = @{
+ options = @{}
+ }
+ $module = [Ansible.Basic.AnsibleModule]::Create($args, $spec, @(Get-AnsibleWebRequestSpec))
+ #>
+ @{ options = $ansible_web_request_options }
+}
+
+# See lib/ansible/plugins/doc_fragments/url_windows.py
+# Kept here for backwards compat as this variable was added in Ansible 2.9. Ultimately this util should be removed
+# once the deprecation period has been added.
+$ansible_web_request_options = @{
+ method = @{ type="str" }
+ follow_redirects = @{ type="str"; choices=@("all","none","safe"); default="safe" }
+ headers = @{ type="dict" }
+ http_agent = @{ type="str"; default="ansible-httpget" }
+ maximum_redirection = @{ type="int"; default=50 }
+ timeout = @{ type="int"; default=30 } # Was defaulted to 10 in win_get_url but 30 in win_uri so we use 30
+ validate_certs = @{ type="bool"; default=$true }
+
+ # Credential options
+ client_cert = @{ type="str" }
+ client_cert_password = @{ type="str"; no_log=$true }
+ force_basic_auth = @{ type="bool"; default=$false }
+ url_username = @{ type="str" }
+ url_password = @{ type="str"; no_log=$true }
+ use_default_credential = @{ type="bool"; default=$false }
+
+ # Proxy options
+ use_proxy = @{ type="bool"; default=$true }
+ proxy_url = @{ type="str" }
+ proxy_username = @{ type="str" }
+ proxy_password = @{ type="str"; no_log=$true }
+ proxy_use_default_credential = @{ type="bool"; default=$false }
+}
+
+$export_members = @{
+ Function = "Get-AnsibleWebRequest", "Get-AnsibleWebRequestSpec", "Invoke-WithWebRequest"
+ Variable = "ansible_web_request_options"
+}
+Export-ModuleMember @export_members
diff --git a/lib/ansible/module_utils/powershell/__init__.py b/lib/ansible/module_utils/powershell/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/lib/ansible/module_utils/powershell/__init__.py
diff --git a/lib/ansible/module_utils/pycompat24.py b/lib/ansible/module_utils/pycompat24.py
new file mode 100644
index 00000000..0c050dff
--- /dev/null
+++ b/lib/ansible/module_utils/pycompat24.py
@@ -0,0 +1,91 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
+# Copyright (c) 2015, Marius Gedminas
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+
+
+def get_exception():
+ """Get the current exception.
+
+ This code needs to work on Python 2.4 through 3.x, so we cannot use
+ "except Exception, e:" (SyntaxError on Python 3.x) nor
+ "except Exception as e:" (SyntaxError on Python 2.4-2.5).
+ Instead we must use ::
+
+ except Exception:
+ e = get_exception()
+
+ """
+ return sys.exc_info()[1]
+
+
+try:
+ # Python 2.6+
+ from ast import literal_eval
+except ImportError:
+ # a replacement for literal_eval that works with python 2.4. from:
+ # https://mail.python.org/pipermail/python-list/2009-September/551880.html
+ # which is essentially a cut/paste from an earlier (2.6) version of python's
+ # ast.py
+ from compiler import ast, parse
+ from ansible.module_utils.six import binary_type, integer_types, string_types, text_type
+
+ def literal_eval(node_or_string):
+ """
+ Safely evaluate an expression node or a string containing a Python
+ expression. The string or node provided may only consist of the following
+ Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
+ and None.
+ """
+ _safe_names = {'None': None, 'True': True, 'False': False}
+ if isinstance(node_or_string, string_types):
+ node_or_string = parse(node_or_string, mode='eval')
+ if isinstance(node_or_string, ast.Expression):
+ node_or_string = node_or_string.node
+
+ def _convert(node):
+ if isinstance(node, ast.Const) and isinstance(node.value, (text_type, binary_type, float, complex) + integer_types):
+ return node.value
+ elif isinstance(node, ast.Tuple):
+ return tuple(map(_convert, node.nodes))
+ elif isinstance(node, ast.List):
+ return list(map(_convert, node.nodes))
+ elif isinstance(node, ast.Dict):
+ return dict((_convert(k), _convert(v)) for k, v in node.items())
+ elif isinstance(node, ast.Name):
+ if node.name in _safe_names:
+ return _safe_names[node.name]
+ elif isinstance(node, ast.UnarySub):
+ return -_convert(node.expr) # pylint: disable=invalid-unary-operand-type
+ raise ValueError('malformed string')
+ return _convert(node_or_string)
+
+__all__ = ('get_exception', 'literal_eval')
diff --git a/lib/ansible/module_utils/service.py b/lib/ansible/module_utils/service.py
new file mode 100644
index 00000000..7369bd78
--- /dev/null
+++ b/lib/ansible/module_utils/service.py
@@ -0,0 +1,274 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) Ansible Inc, 2016
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import glob
+import os
+import pickle
+import platform
+import select
+import shlex
+import subprocess
+import traceback
+
+from ansible.module_utils.six import PY2, b
+from ansible.module_utils._text import to_bytes, to_text
+
+
+def sysv_is_enabled(name, runlevel=None):
+ '''
+ This function will check if the service name supplied
+ is enabled in any of the sysv runlevels
+
+ :arg name: name of the service to test for
+ :kw runlevel: runlevel to check (default: None)
+ '''
+ if runlevel:
+ if not os.path.isdir('/etc/rc0.d/'):
+ return bool(glob.glob('/etc/init.d/rc%s.d/S??%s' % (runlevel, name)))
+ return bool(glob.glob('/etc/rc%s.d/S??%s' % (runlevel, name)))
+ else:
+ if not os.path.isdir('/etc/rc0.d/'):
+ return bool(glob.glob('/etc/init.d/rc?.d/S??%s' % name))
+ return bool(glob.glob('/etc/rc?.d/S??%s' % name))
+
+
+def get_sysv_script(name):
+ '''
+ This function will return the expected path for an init script
+ corresponding to the service name supplied.
+
+ :arg name: name or path of the service to test for
+ '''
+ if name.startswith('/'):
+ result = name
+ else:
+ result = '/etc/init.d/%s' % name
+
+ return result
+
+
+def sysv_exists(name):
+ '''
+ This function will return True or False depending on
+ the existence of an init script corresponding to the service name supplied.
+
+ :arg name: name of the service to test for
+ '''
+ return os.path.exists(get_sysv_script(name))
+
+
+def get_ps(module, pattern):
+ '''
+ Last resort to find a service by trying to match pattern to programs in memory
+ '''
+ found = False
+ if platform.system() == 'SunOS':
+ flags = '-ef'
+ else:
+ flags = 'auxww'
+ psbin = module.get_bin_path('ps', True)
+
+ (rc, psout, pserr) = module.run_command([psbin, flags])
+ if rc == 0:
+ for line in psout.splitlines():
+ if pattern in line:
+ # FIXME: should add logic to prevent matching 'self', though that should be extreemly rare
+ found = True
+ break
+ return found
+
+
+def fail_if_missing(module, found, service, msg=''):
+ '''
+ This function will return an error or exit gracefully depending on check mode status
+ and if the service is missing or not.
+
+ :arg module: is an AnsibleModule object, used for it's utility methods
+ :arg found: boolean indicating if services was found or not
+ :arg service: name of service
+ :kw msg: extra info to append to error/success msg when missing
+ '''
+ if not found:
+ module.fail_json(msg='Could not find the requested service %s: %s' % (service, msg))
+
+
+def fork_process():
+ '''
+ This function performs the double fork process to detach from the
+ parent process and execute.
+ '''
+ pid = os.fork()
+
+ if pid == 0:
+ # Set stdin/stdout/stderr to /dev/null
+ fd = os.open(os.devnull, os.O_RDWR)
+
+ # clone stdin/out/err
+ for num in range(3):
+ if fd != num:
+ os.dup2(fd, num)
+
+ # close otherwise
+ if fd not in range(3):
+ os.close(fd)
+
+ # Make us a daemon
+ pid = os.fork()
+
+ # end if not in child
+ if pid > 0:
+ os._exit(0)
+
+ # get new process session and detach
+ sid = os.setsid()
+ if sid == -1:
+ raise Exception("Unable to detach session while daemonizing")
+
+ # avoid possible problems with cwd being removed
+ os.chdir("/")
+
+ pid = os.fork()
+ if pid > 0:
+ os._exit(0)
+
+ return pid
+
+
+def daemonize(module, cmd):
+ '''
+ Execute a command while detaching as a daemon, returns rc, stdout, and stderr.
+
+ :arg module: is an AnsibleModule object, used for it's utility methods
+ :arg cmd: is a list or string representing the command and options to run
+
+ This is complex because daemonization is hard for people.
+ What we do is daemonize a part of this module, the daemon runs the command,
+ picks up the return code and output, and returns it to the main process.
+ '''
+
+ # init some vars
+ chunk = 4096 # FIXME: pass in as arg?
+ errors = 'surrogate_or_strict'
+
+ # start it!
+ try:
+ pipe = os.pipe()
+ pid = fork_process()
+ except OSError:
+ module.fail_json(msg="Error while attempting to fork: %s", exception=traceback.format_exc())
+ except Exception as exc:
+ module.fail_json(msg=to_text(exc), exception=traceback.format_exc())
+
+ # we don't do any locking as this should be a unique module/process
+ if pid == 0:
+ os.close(pipe[0])
+
+ # if command is string deal with py2 vs py3 conversions for shlex
+ if not isinstance(cmd, list):
+ if PY2:
+ cmd = shlex.split(to_bytes(cmd, errors=errors))
+ else:
+ cmd = shlex.split(to_text(cmd, errors=errors))
+
+ # make sure we always use byte strings
+ run_cmd = []
+ for c in cmd:
+ run_cmd.append(to_bytes(c, errors=errors))
+
+ # execute the command in forked process
+ p = subprocess.Popen(run_cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=lambda: os.close(pipe[1]))
+ fds = [p.stdout, p.stderr]
+
+ # loop reading output till its done
+ output = {p.stdout: b(""), p.stderr: b("")}
+ while fds:
+ rfd, wfd, efd = select.select(fds, [], fds, 1)
+ if (rfd + wfd + efd) or p.poll():
+ for out in fds:
+ if out in rfd:
+ data = os.read(out.fileno(), chunk)
+ if not data:
+ fds.remove(out)
+ output[out] += b(data)
+
+ # even after fds close, we might want to wait for pid to die
+ p.wait()
+
+ # Return a pickled data of parent
+ return_data = pickle.dumps([p.returncode, to_text(output[p.stdout]), to_text(output[p.stderr])], protocol=pickle.HIGHEST_PROTOCOL)
+ os.write(pipe[1], to_bytes(return_data, errors=errors))
+
+ # clean up
+ os.close(pipe[1])
+ os._exit(0)
+
+ elif pid == -1:
+ module.fail_json(msg="Unable to fork, no exception thrown, probably due to lack of resources, check logs.")
+
+ else:
+ # in parent
+ os.close(pipe[1])
+ os.waitpid(pid, 0)
+
+ # Grab response data after child finishes
+ return_data = b("")
+ while True:
+ rfd, wfd, efd = select.select([pipe[0]], [], [pipe[0]])
+ if pipe[0] in rfd:
+ data = os.read(pipe[0], chunk)
+ if not data:
+ break
+ return_data += b(data)
+
+ # Note: no need to specify encoding on py3 as this module sends the
+ # pickle to itself (thus same python interpreter so we aren't mixing
+ # py2 and py3)
+ return pickle.loads(to_bytes(return_data, errors=errors))
+
+
+def check_ps(module, pattern):
+
+ # Set ps flags
+ if platform.system() == 'SunOS':
+ psflags = '-ef'
+ else:
+ psflags = 'auxww'
+
+ # Find ps binary
+ psbin = module.get_bin_path('ps', True)
+
+ (rc, out, err) = module.run_command('%s %s' % (psbin, psflags))
+ # If rc is 0, set running as appropriate
+ if rc == 0:
+ for line in out.split('\n'):
+ if pattern in line:
+ return True
+ return False
diff --git a/lib/ansible/module_utils/six/__init__.py b/lib/ansible/module_utils/six/__init__.py
new file mode 100644
index 00000000..d2d9a45a
--- /dev/null
+++ b/lib/ansible/module_utils/six/__init__.py
@@ -0,0 +1,962 @@
+# This code is strewn with things that are not defined on Python3 (unicode,
+# long, etc) but they are all shielded by version checks. This is also an
+# upstream vendored file that we're not going to modify on our own
+# pylint: disable=undefined-variable
+
+# Copyright (c) 2010-2018 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+from __future__ import absolute_import
+
+import functools
+import itertools
+import operator
+import sys
+import types
+
+# The following makes it easier for us to script updates of the bundled code. It is not part of
+# upstream six
+_BUNDLED_METADATA = {"pypi_name": "six", "version": "1.12.0"}
+
+__author__ = "Benjamin Peterson <benjamin@python.org>"
+__version__ = "1.12.0"
+
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+PY34 = sys.version_info[0:2] >= (3, 4)
+
+if PY3:
+ string_types = str,
+ integer_types = int,
+ class_types = type,
+ text_type = str
+ binary_type = bytes
+
+ MAXSIZE = sys.maxsize
+else:
+ string_types = basestring,
+ integer_types = (int, long)
+ class_types = (type, types.ClassType)
+ text_type = unicode
+ binary_type = str
+
+ if sys.platform.startswith("java"):
+ # Jython always uses 32 bits.
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+ class X(object):
+
+ def __len__(self):
+ return 1 << 31
+ try:
+ len(X())
+ except OverflowError:
+ # 32-bit
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # 64-bit
+ MAXSIZE = int((1 << 63) - 1)
+ del X
+
+
+def _add_doc(func, doc):
+ """Add documentation to a function."""
+ func.__doc__ = doc
+
+
+def _import_module(name):
+ """Import module, returning the module after the last dot."""
+ __import__(name)
+ return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+ def __init__(self, name):
+ self.name = name
+
+ def __get__(self, obj, tp):
+ result = self._resolve()
+ setattr(obj, self.name, result) # Invokes __set__.
+ try:
+ # This is a bit ugly, but it avoids running this again by
+ # removing this descriptor.
+ delattr(obj.__class__, self.name)
+ except AttributeError:
+ pass
+ return result
+
+
+class MovedModule(_LazyDescr):
+
+ def __init__(self, name, old, new=None):
+ super(MovedModule, self).__init__(name)
+ if PY3:
+ if new is None:
+ new = name
+ self.mod = new
+ else:
+ self.mod = old
+
+ def _resolve(self):
+ return _import_module(self.mod)
+
+ def __getattr__(self, attr):
+ _module = self._resolve()
+ value = getattr(_module, attr)
+ setattr(self, attr, value)
+ return value
+
+
+class _LazyModule(types.ModuleType):
+
+ def __init__(self, name):
+ super(_LazyModule, self).__init__(name)
+ self.__doc__ = self.__class__.__doc__
+
+ def __dir__(self):
+ attrs = ["__doc__", "__name__"]
+ attrs += [attr.name for attr in self._moved_attributes]
+ return attrs
+
+ # Subclasses should override this
+ _moved_attributes = []
+
+
+class MovedAttribute(_LazyDescr):
+
+ def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+ super(MovedAttribute, self).__init__(name)
+ if PY3:
+ if new_mod is None:
+ new_mod = name
+ self.mod = new_mod
+ if new_attr is None:
+ if old_attr is None:
+ new_attr = name
+ else:
+ new_attr = old_attr
+ self.attr = new_attr
+ else:
+ self.mod = old_mod
+ if old_attr is None:
+ old_attr = name
+ self.attr = old_attr
+
+ def _resolve(self):
+ module = _import_module(self.mod)
+ return getattr(module, self.attr)
+
+
+class _SixMetaPathImporter(object):
+
+ """
+ A meta path importer to import six.moves and its submodules.
+
+ This class implements a PEP302 finder and loader. It should be compatible
+ with Python 2.5 and all existing versions of Python3
+ """
+
+ def __init__(self, six_module_name):
+ self.name = six_module_name
+ self.known_modules = {}
+
+ def _add_module(self, mod, *fullnames):
+ for fullname in fullnames:
+ self.known_modules[self.name + "." + fullname] = mod
+
+ def _get_module(self, fullname):
+ return self.known_modules[self.name + "." + fullname]
+
+ def find_module(self, fullname, path=None):
+ if fullname in self.known_modules:
+ return self
+ return None
+
+ def __get_module(self, fullname):
+ try:
+ return self.known_modules[fullname]
+ except KeyError:
+ raise ImportError("This loader does not know module " + fullname)
+
+ def load_module(self, fullname):
+ try:
+ # in case of a reload
+ return sys.modules[fullname]
+ except KeyError:
+ pass
+ mod = self.__get_module(fullname)
+ if isinstance(mod, MovedModule):
+ mod = mod._resolve()
+ else:
+ mod.__loader__ = self
+ sys.modules[fullname] = mod
+ return mod
+
+ def is_package(self, fullname):
+ """
+ Return true, if the named module is a package.
+
+ We need this method to get correct spec objects with
+ Python 3.4 (see PEP451)
+ """
+ return hasattr(self.__get_module(fullname), "__path__")
+
+ def get_code(self, fullname):
+ """Return None
+
+ Required, if is_package is implemented"""
+ self.__get_module(fullname) # eventually raises ImportError
+ return None
+ get_source = get_code # same as get_code
+
+
+_importer = _SixMetaPathImporter(__name__)
+
+
+class _MovedItems(_LazyModule):
+
+ """Lazy loading of moved objects"""
+ __path__ = [] # mark as package
+
+
+_moved_attributes = [
+ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+ MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+ MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
+ MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+ MovedAttribute("intern", "__builtin__", "sys"),
+ MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+ MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
+ MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
+ MovedAttribute("getoutput", "commands", "subprocess"),
+ MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
+ MovedAttribute("reduce", "__builtin__", "functools"),
+ MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
+ MovedAttribute("StringIO", "StringIO", "io"),
+ MovedAttribute("UserDict", "UserDict", "collections"),
+ MovedAttribute("UserList", "UserList", "collections"),
+ MovedAttribute("UserString", "UserString", "collections"),
+ MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+ MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
+ MovedModule("builtins", "__builtin__"),
+ MovedModule("configparser", "ConfigParser"),
+ MovedModule("copyreg", "copy_reg"),
+ MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
+ MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
+ MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+ MovedModule("http_cookies", "Cookie", "http.cookies"),
+ MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+ MovedModule("html_parser", "HTMLParser", "html.parser"),
+ MovedModule("http_client", "httplib", "http.client"),
+ MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+ MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
+ MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+ MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
+ MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+ MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+ MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+ MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+ MovedModule("cPickle", "cPickle", "pickle"),
+ MovedModule("queue", "Queue"),
+ MovedModule("reprlib", "repr"),
+ MovedModule("socketserver", "SocketServer"),
+ MovedModule("_thread", "thread", "_thread"),
+ MovedModule("tkinter", "Tkinter"),
+ MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+ MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+ MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+ MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+ MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
+ MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+ MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+ MovedModule("tkinter_colorchooser", "tkColorChooser",
+ "tkinter.colorchooser"),
+ MovedModule("tkinter_commondialog", "tkCommonDialog",
+ "tkinter.commondialog"),
+ MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+ MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+ MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+ "tkinter.simpledialog"),
+ MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+ MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+ MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
+ MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+ MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
+ MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
+]
+# Add windows specific modules.
+if sys.platform == "win32":
+ _moved_attributes += [
+ MovedModule("winreg", "_winreg"),
+ ]
+
+for attr in _moved_attributes:
+ setattr(_MovedItems, attr.name, attr)
+ if isinstance(attr, MovedModule):
+ _importer._add_module(attr, "moves." + attr.name)
+del attr
+
+_MovedItems._moved_attributes = _moved_attributes
+
+moves = _MovedItems(__name__ + ".moves")
+_importer._add_module(moves, "moves")
+
+
+class Module_six_moves_urllib_parse(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+ MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+ MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+ MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+ MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+ MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("quote", "urllib", "urllib.parse"),
+ MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),
+ MovedAttribute("urlencode", "urllib", "urllib.parse"),
+ MovedAttribute("splitquery", "urllib", "urllib.parse"),
+ MovedAttribute("splittag", "urllib", "urllib.parse"),
+ MovedAttribute("splituser", "urllib", "urllib.parse"),
+ MovedAttribute("splitvalue", "urllib", "urllib.parse"),
+ MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_params", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_query", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+ setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
+ "moves.urllib_parse", "moves.urllib.parse")
+
+
+class Module_six_moves_urllib_error(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+ MovedAttribute("URLError", "urllib2", "urllib.error"),
+ MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+ MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+ setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
+ "moves.urllib_error", "moves.urllib.error")
+
+
+class Module_six_moves_urllib_request(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+ MovedAttribute("urlopen", "urllib2", "urllib.request"),
+ MovedAttribute("install_opener", "urllib2", "urllib.request"),
+ MovedAttribute("build_opener", "urllib2", "urllib.request"),
+ MovedAttribute("pathname2url", "urllib", "urllib.request"),
+ MovedAttribute("url2pathname", "urllib", "urllib.request"),
+ MovedAttribute("getproxies", "urllib", "urllib.request"),
+ MovedAttribute("Request", "urllib2", "urllib.request"),
+ MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+ MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+ MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+ MovedAttribute("URLopener", "urllib", "urllib.request"),
+ MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+ MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
+ MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
+ MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+ setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
+ "moves.urllib_request", "moves.urllib.request")
+
+
+class Module_six_moves_urllib_response(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+ MovedAttribute("addbase", "urllib", "urllib.response"),
+ MovedAttribute("addclosehook", "urllib", "urllib.response"),
+ MovedAttribute("addinfo", "urllib", "urllib.response"),
+ MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+ setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
+ "moves.urllib_response", "moves.urllib.response")
+
+
+class Module_six_moves_urllib_robotparser(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
+]
+for attr in _urllib_robotparser_moved_attributes:
+ setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
+ "moves.urllib_robotparser", "moves.urllib.robotparser")
+
+
+class Module_six_moves_urllib(types.ModuleType):
+
+ """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+ __path__ = [] # mark as package
+ parse = _importer._get_module("moves.urllib_parse")
+ error = _importer._get_module("moves.urllib_error")
+ request = _importer._get_module("moves.urllib_request")
+ response = _importer._get_module("moves.urllib_response")
+ robotparser = _importer._get_module("moves.urllib_robotparser")
+
+ def __dir__(self):
+ return ['parse', 'error', 'request', 'response', 'robotparser']
+
+
+_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
+ "moves.urllib")
+
+
+def add_move(move):
+ """Add an item to six.moves."""
+ setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+ """Remove item from six.moves."""
+ try:
+ delattr(_MovedItems, name)
+ except AttributeError:
+ try:
+ del moves.__dict__[name]
+ except KeyError:
+ raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+ _meth_func = "__func__"
+ _meth_self = "__self__"
+
+ _func_closure = "__closure__"
+ _func_code = "__code__"
+ _func_defaults = "__defaults__"
+ _func_globals = "__globals__"
+else:
+ _meth_func = "im_func"
+ _meth_self = "im_self"
+
+ _func_closure = "func_closure"
+ _func_code = "func_code"
+ _func_defaults = "func_defaults"
+ _func_globals = "func_globals"
+
+
+try:
+ advance_iterator = next
+except NameError:
+ def advance_iterator(it):
+ return it.next()
+next = advance_iterator
+
+
+try:
+ callable = callable
+except NameError:
+ def callable(obj):
+ return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+ def get_unbound_function(unbound):
+ return unbound
+
+ create_bound_method = types.MethodType
+
+ def create_unbound_method(func, cls):
+ return func
+
+ Iterator = object
+else:
+ def get_unbound_function(unbound):
+ return unbound.im_func
+
+ def create_bound_method(func, obj):
+ return types.MethodType(func, obj, obj.__class__)
+
+ def create_unbound_method(func, cls):
+ return types.MethodType(func, None, cls)
+
+ class Iterator(object):
+
+ def next(self):
+ return type(self).__next__(self)
+
+ callable = callable
+_add_doc(get_unbound_function,
+ """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+if PY3:
+ def iterkeys(d, **kw):
+ return iter(d.keys(**kw))
+
+ def itervalues(d, **kw):
+ return iter(d.values(**kw))
+
+ def iteritems(d, **kw):
+ return iter(d.items(**kw))
+
+ def iterlists(d, **kw):
+ return iter(d.lists(**kw))
+
+ viewkeys = operator.methodcaller("keys")
+
+ viewvalues = operator.methodcaller("values")
+
+ viewitems = operator.methodcaller("items")
+else:
+ def iterkeys(d, **kw):
+ return d.iterkeys(**kw)
+
+ def itervalues(d, **kw):
+ return d.itervalues(**kw)
+
+ def iteritems(d, **kw):
+ return d.iteritems(**kw)
+
+ def iterlists(d, **kw):
+ return d.iterlists(**kw)
+
+ viewkeys = operator.methodcaller("viewkeys")
+
+ viewvalues = operator.methodcaller("viewvalues")
+
+ viewitems = operator.methodcaller("viewitems")
+
+_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
+_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
+_add_doc(iteritems,
+ "Return an iterator over the (key, value) pairs of a dictionary.")
+_add_doc(iterlists,
+ "Return an iterator over the (key, [values]) pairs of a dictionary.")
+
+
+if PY3:
+ def b(s):
+ return s.encode("latin-1")
+
+ def u(s):
+ return s
+ unichr = chr
+ import struct
+ int2byte = struct.Struct(">B").pack
+ del struct
+ byte2int = operator.itemgetter(0)
+ indexbytes = operator.getitem
+ iterbytes = iter
+ import io
+ StringIO = io.StringIO
+ BytesIO = io.BytesIO
+ _assertCountEqual = "assertCountEqual"
+ if sys.version_info[1] <= 1:
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+ else:
+ _assertRaisesRegex = "assertRaisesRegex"
+ _assertRegex = "assertRegex"
+else:
+ def b(s):
+ return s
+ # Workaround for standalone backslash
+
+ def u(s):
+ return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
+ unichr = unichr
+ int2byte = chr
+
+ def byte2int(bs):
+ return ord(bs[0])
+
+ def indexbytes(buf, i):
+ return ord(buf[i])
+ iterbytes = functools.partial(itertools.imap, ord)
+ import StringIO
+ StringIO = BytesIO = StringIO.StringIO
+ _assertCountEqual = "assertItemsEqual"
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+def assertCountEqual(self, *args, **kwargs):
+ return getattr(self, _assertCountEqual)(*args, **kwargs)
+
+
+def assertRaisesRegex(self, *args, **kwargs):
+ return getattr(self, _assertRaisesRegex)(*args, **kwargs)
+
+
+def assertRegex(self, *args, **kwargs):
+ return getattr(self, _assertRegex)(*args, **kwargs)
+
+
+if PY3:
+ exec_ = getattr(moves.builtins, "exec")
+
+ def reraise(tp, value, tb=None):
+ try:
+ if value is None:
+ value = tp()
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+ finally:
+ value = None
+ tb = None
+
+else:
+ def exec_(_code_, _globs_=None, _locs_=None):
+ """Execute code in a namespace."""
+ if _globs_ is None:
+ frame = sys._getframe(1)
+ _globs_ = frame.f_globals
+ if _locs_ is None:
+ _locs_ = frame.f_locals
+ del frame
+ elif _locs_ is None:
+ _locs_ = _globs_
+ exec("""exec _code_ in _globs_, _locs_""")
+
+ exec_("""def reraise(tp, value, tb=None):
+ try:
+ raise tp, value, tb
+ finally:
+ tb = None
+""")
+
+
+if sys.version_info[:2] == (3, 2):
+ exec_("""def raise_from(value, from_value):
+ try:
+ if from_value is None:
+ raise value
+ raise value from from_value
+ finally:
+ value = None
+""")
+elif sys.version_info[:2] > (3, 2):
+ exec_("""def raise_from(value, from_value):
+ try:
+ raise value from from_value
+ finally:
+ value = None
+""")
+else:
+ def raise_from(value, from_value):
+ raise value
+
+
+print_ = getattr(moves.builtins, "print", None)
+if print_ is None:
+ def print_(*args, **kwargs):
+ """The new-style print function for Python 2.4 and 2.5."""
+ fp = kwargs.pop("file", sys.stdout)
+ if fp is None:
+ return
+
+ def write(data):
+ if not isinstance(data, basestring):
+ data = str(data)
+ # If the file has an encoding, encode unicode with it.
+ if (isinstance(fp, file) and
+ isinstance(data, unicode) and
+ fp.encoding is not None):
+ errors = getattr(fp, "errors", None)
+ if errors is None:
+ errors = "strict"
+ data = data.encode(fp.encoding, errors)
+ fp.write(data)
+ want_unicode = False
+ sep = kwargs.pop("sep", None)
+ if sep is not None:
+ if isinstance(sep, unicode):
+ want_unicode = True
+ elif not isinstance(sep, str):
+ raise TypeError("sep must be None or a string")
+ end = kwargs.pop("end", None)
+ if end is not None:
+ if isinstance(end, unicode):
+ want_unicode = True
+ elif not isinstance(end, str):
+ raise TypeError("end must be None or a string")
+ if kwargs:
+ raise TypeError("invalid keyword arguments to print()")
+ if not want_unicode:
+ for arg in args:
+ if isinstance(arg, unicode):
+ want_unicode = True
+ break
+ if want_unicode:
+ newline = unicode("\n")
+ space = unicode(" ")
+ else:
+ newline = "\n"
+ space = " "
+ if sep is None:
+ sep = space
+ if end is None:
+ end = newline
+ for i, arg in enumerate(args):
+ if i:
+ write(sep)
+ write(arg)
+ write(end)
+if sys.version_info[:2] < (3, 3):
+ _print = print_
+
+ def print_(*args, **kwargs):
+ fp = kwargs.get("file", sys.stdout)
+ flush = kwargs.pop("flush", False)
+ _print(*args, **kwargs)
+ if flush and fp is not None:
+ fp.flush()
+
+_add_doc(reraise, """Reraise an exception.""")
+
+if sys.version_info[0:2] < (3, 4):
+ def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
+ updated=functools.WRAPPER_UPDATES):
+ def wrapper(f):
+ f = functools.wraps(wrapped, assigned, updated)(f)
+ f.__wrapped__ = wrapped
+ return f
+ return wrapper
+else:
+ wraps = functools.wraps
+
+
+def with_metaclass(meta, *bases):
+ """Create a base class with a metaclass."""
+ # This requires a bit of explanation: the basic idea is to make a dummy
+ # metaclass for one level of class instantiation that replaces itself with
+ # the actual metaclass.
+ class metaclass(type):
+
+ def __new__(cls, name, this_bases, d):
+ return meta(name, bases, d)
+
+ @classmethod
+ def __prepare__(cls, name, this_bases):
+ return meta.__prepare__(name, bases)
+ return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+def add_metaclass(metaclass):
+ """Class decorator for creating a class with a metaclass."""
+ def wrapper(cls):
+ orig_vars = cls.__dict__.copy()
+ slots = orig_vars.get('__slots__')
+ if slots is not None:
+ if isinstance(slots, str):
+ slots = [slots]
+ for slots_var in slots:
+ orig_vars.pop(slots_var)
+ orig_vars.pop('__dict__', None)
+ orig_vars.pop('__weakref__', None)
+ if hasattr(cls, '__qualname__'):
+ orig_vars['__qualname__'] = cls.__qualname__
+ return metaclass(cls.__name__, cls.__bases__, orig_vars)
+ return wrapper
+
+
+def ensure_binary(s, encoding='utf-8', errors='strict'):
+ """Coerce **s** to six.binary_type.
+
+ For Python 2:
+ - `unicode` -> encoded to `str`
+ - `str` -> `str`
+
+ For Python 3:
+ - `str` -> encoded to `bytes`
+ - `bytes` -> `bytes`
+ """
+ if isinstance(s, text_type):
+ return s.encode(encoding, errors)
+ elif isinstance(s, binary_type):
+ return s
+ else:
+ raise TypeError("not expecting type '%s'" % type(s))
+
+
+def ensure_str(s, encoding='utf-8', errors='strict'):
+ """Coerce *s* to `str`.
+
+ For Python 2:
+ - `unicode` -> encoded to `str`
+ - `str` -> `str`
+
+ For Python 3:
+ - `str` -> `str`
+ - `bytes` -> decoded to `str`
+ """
+ if not isinstance(s, (text_type, binary_type)):
+ raise TypeError("not expecting type '%s'" % type(s))
+ if PY2 and isinstance(s, text_type):
+ s = s.encode(encoding, errors)
+ elif PY3 and isinstance(s, binary_type):
+ s = s.decode(encoding, errors)
+ return s
+
+
+def ensure_text(s, encoding='utf-8', errors='strict'):
+ """Coerce *s* to six.text_type.
+
+ For Python 2:
+ - `unicode` -> `unicode`
+ - `str` -> `unicode`
+
+ For Python 3:
+ - `str` -> `str`
+ - `bytes` -> decoded to `str`
+ """
+ if isinstance(s, binary_type):
+ return s.decode(encoding, errors)
+ elif isinstance(s, text_type):
+ return s
+ else:
+ raise TypeError("not expecting type '%s'" % type(s))
+
+
+def python_2_unicode_compatible(klass):
+ """
+ A decorator that defines __unicode__ and __str__ methods under Python 2.
+ Under Python 3 it does nothing.
+
+ To support Python 2 and 3 with a single code base, define a __str__ method
+ returning text and apply this decorator to the class.
+ """
+ if PY2:
+ if '__str__' not in klass.__dict__:
+ raise ValueError("@python_2_unicode_compatible cannot be applied "
+ "to %s because it doesn't define __str__()." %
+ klass.__name__)
+ klass.__unicode__ = klass.__str__
+ klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
+ return klass
+
+
+# Complete the moves implementation.
+# This code is at the end of this module to speed up module loading.
+# Turn this module into a package.
+__path__ = [] # required for PEP 302 and PEP 451
+__package__ = __name__ # see PEP 366 @ReservedAssignment
+if globals().get("__spec__") is not None:
+ __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
+# Remove other six meta path importers, since they cause problems. This can
+# happen if six is removed from sys.modules and then reloaded. (Setuptools does
+# this for some reason.)
+if sys.meta_path:
+ for i, importer in enumerate(sys.meta_path):
+ # Here's some real nastiness: Another "instance" of the six module might
+ # be floating around. Therefore, we can't use isinstance() to check for
+ # the six meta path importer, since the other six instance will have
+ # inserted an importer with different class.
+ if (type(importer).__name__ == "_SixMetaPathImporter" and
+ importer.name == __name__):
+ del sys.meta_path[i]
+ break
+ del i, importer
+# Finally, add the importer to the meta path import hook.
+sys.meta_path.append(_importer)
diff --git a/lib/ansible/module_utils/splitter.py b/lib/ansible/module_utils/splitter.py
new file mode 100644
index 00000000..c170b1cf
--- /dev/null
+++ b/lib/ansible/module_utils/splitter.py
@@ -0,0 +1,219 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def _get_quote_state(token, quote_char):
+ '''
+ the goal of this block is to determine if the quoted string
+ is unterminated in which case it needs to be put back together
+ '''
+ # the char before the current one, used to see if
+ # the current character is escaped
+ prev_char = None
+ for idx, cur_char in enumerate(token):
+ if idx > 0:
+ prev_char = token[idx - 1]
+ if cur_char in '"\'' and prev_char != '\\':
+ if quote_char:
+ if cur_char == quote_char:
+ quote_char = None
+ else:
+ quote_char = cur_char
+ return quote_char
+
+
+def _count_jinja2_blocks(token, cur_depth, open_token, close_token):
+ '''
+ this function counts the number of opening/closing blocks for a
+ given opening/closing type and adjusts the current depth for that
+ block based on the difference
+ '''
+ num_open = token.count(open_token)
+ num_close = token.count(close_token)
+ if num_open != num_close:
+ cur_depth += (num_open - num_close)
+ if cur_depth < 0:
+ cur_depth = 0
+ return cur_depth
+
+
+def split_args(args):
+ '''
+ Splits args on whitespace, but intelligently reassembles
+ those that may have been split over a jinja2 block or quotes.
+
+ When used in a remote module, we won't ever have to be concerned about
+ jinja2 blocks, however this function is/will be used in the
+ core portions as well before the args are templated.
+
+ example input: a=b c="foo bar"
+ example output: ['a=b', 'c="foo bar"']
+
+ Basically this is a variation shlex that has some more intelligence for
+ how Ansible needs to use it.
+ '''
+
+ # the list of params parsed out of the arg string
+ # this is going to be the result value when we are donei
+ params = []
+
+ # here we encode the args, so we have a uniform charset to
+ # work with, and split on white space
+ args = args.strip()
+ try:
+ args = args.encode('utf-8')
+ do_decode = True
+ except UnicodeDecodeError:
+ do_decode = False
+ items = args.split('\n')
+
+ # iterate over the tokens, and reassemble any that may have been
+ # split on a space inside a jinja2 block.
+ # ex if tokens are "{{", "foo", "}}" these go together
+
+ # These variables are used
+ # to keep track of the state of the parsing, since blocks and quotes
+ # may be nested within each other.
+
+ quote_char = None
+ inside_quotes = False
+ print_depth = 0 # used to count nested jinja2 {{ }} blocks
+ block_depth = 0 # used to count nested jinja2 {% %} blocks
+ comment_depth = 0 # used to count nested jinja2 {# #} blocks
+
+ # now we loop over each split chunk, coalescing tokens if the white space
+ # split occurred within quotes or a jinja2 block of some kind
+ for itemidx, item in enumerate(items):
+
+ # we split on spaces and newlines separately, so that we
+ # can tell which character we split on for reassembly
+ # inside quotation characters
+ tokens = item.strip().split(' ')
+
+ line_continuation = False
+ for idx, token in enumerate(tokens):
+
+ # if we hit a line continuation character, but
+ # we're not inside quotes, ignore it and continue
+ # on to the next token while setting a flag
+ if token == '\\' and not inside_quotes:
+ line_continuation = True
+ continue
+
+ # store the previous quoting state for checking later
+ was_inside_quotes = inside_quotes
+ quote_char = _get_quote_state(token, quote_char)
+ inside_quotes = quote_char is not None
+
+ # multiple conditions may append a token to the list of params,
+ # so we keep track with this flag to make sure it only happens once
+ # append means add to the end of the list, don't append means concatenate
+ # it to the end of the last token
+ appended = False
+
+ # if we're inside quotes now, but weren't before, append the token
+ # to the end of the list, since we'll tack on more to it later
+ # otherwise, if we're inside any jinja2 block, inside quotes, or we were
+ # inside quotes (but aren't now) concat this token to the last param
+ if inside_quotes and not was_inside_quotes:
+ params.append(token)
+ appended = True
+ elif print_depth or block_depth or comment_depth or inside_quotes or was_inside_quotes:
+ if idx == 0 and not inside_quotes and was_inside_quotes:
+ params[-1] = "%s%s" % (params[-1], token)
+ elif len(tokens) > 1:
+ spacer = ''
+ if idx > 0:
+ spacer = ' '
+ params[-1] = "%s%s%s" % (params[-1], spacer, token)
+ else:
+ spacer = ''
+ if not params[-1].endswith('\n') and idx == 0:
+ spacer = '\n'
+ params[-1] = "%s%s%s" % (params[-1], spacer, token)
+ appended = True
+
+ # if the number of paired block tags is not the same, the depth has changed, so we calculate that here
+ # and may append the current token to the params (if we haven't previously done so)
+ prev_print_depth = print_depth
+ print_depth = _count_jinja2_blocks(token, print_depth, "{{", "}}")
+ if print_depth != prev_print_depth and not appended:
+ params.append(token)
+ appended = True
+
+ prev_block_depth = block_depth
+ block_depth = _count_jinja2_blocks(token, block_depth, "{%", "%}")
+ if block_depth != prev_block_depth and not appended:
+ params.append(token)
+ appended = True
+
+ prev_comment_depth = comment_depth
+ comment_depth = _count_jinja2_blocks(token, comment_depth, "{#", "#}")
+ if comment_depth != prev_comment_depth and not appended:
+ params.append(token)
+ appended = True
+
+ # finally, if we're at zero depth for all blocks and not inside quotes, and have not
+ # yet appended anything to the list of params, we do so now
+ if not (print_depth or block_depth or comment_depth) and not inside_quotes and not appended and token != '':
+ params.append(token)
+
+ # if this was the last token in the list, and we have more than
+ # one item (meaning we split on newlines), add a newline back here
+ # to preserve the original structure
+ if len(items) > 1 and itemidx != len(items) - 1 and not line_continuation:
+ if not params[-1].endswith('\n') or item == '':
+ params[-1] += '\n'
+
+ # always clear the line continuation flag
+ line_continuation = False
+
+ # If we're done and things are not at zero depth or we're still inside quotes,
+ # raise an error to indicate that the args were unbalanced
+ if print_depth or block_depth or comment_depth or inside_quotes:
+ raise Exception("error while splitting arguments, either an unbalanced jinja2 block or quotes")
+
+ # finally, we decode each param back to the unicode it was in the arg string
+ if do_decode:
+ params = [x.decode('utf-8') for x in params]
+
+ return params
+
+
+def is_quoted(data):
+ return len(data) > 0 and (data[0] == '"' and data[-1] == '"' or data[0] == "'" and data[-1] == "'")
+
+
+def unquote(data):
+ ''' removes first and last quotes from a string, if the string starts and ends with the same quotes '''
+ if is_quoted(data):
+ return data[1:-1]
+ return data
diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py
new file mode 100644
index 00000000..2502df09
--- /dev/null
+++ b/lib/ansible/module_utils/urls.py
@@ -0,0 +1,1721 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
+# Copyright (c), Toshio Kuratomi <tkuratomi@ansible.com>, 2015
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+#
+# The match_hostname function and supporting code is under the terms and
+# conditions of the Python Software Foundation License. They were taken from
+# the Python3 standard library and adapted for use in Python2. See comments in the
+# source for which code precisely is under this License.
+#
+# PSF License (see licenses/PSF-license.txt or https://opensource.org/licenses/Python-2.0)
+
+
+'''
+The **urls** utils module offers a replacement for the urllib2 python library.
+
+urllib2 is the python stdlib way to retrieve files from the Internet but it
+lacks some security features (around verifying SSL certificates) that users
+should care about in most situations. Using the functions in this module corrects
+deficiencies in the urllib2 module wherever possible.
+
+There are also third-party libraries (for instance, requests) which can be used
+to replace urllib2 with a more secure library. However, all third party libraries
+require that the library be installed on the managed machine. That is an extra step
+for users making use of a module. If possible, avoid third party libraries by using
+this code instead.
+'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import atexit
+import base64
+import email.mime.multipart
+import email.mime.nonmultipart
+import email.mime.application
+import email.parser
+import email.utils
+import functools
+import mimetypes
+import netrc
+import os
+import platform
+import re
+import socket
+import sys
+import tempfile
+import traceback
+
+from contextlib import contextmanager
+
+try:
+ import email.policy
+except ImportError:
+ # Py2
+ import email.generator
+
+try:
+ import httplib
+except ImportError:
+ # Python 3
+ import http.client as httplib
+
+import ansible.module_utils.six.moves.http_cookiejar as cookiejar
+import ansible.module_utils.six.moves.urllib.request as urllib_request
+import ansible.module_utils.six.moves.urllib.error as urllib_error
+
+from ansible.module_utils.common.collections import Mapping
+from ansible.module_utils.six import PY3, string_types
+from ansible.module_utils.six.moves import cStringIO
+from ansible.module_utils.basic import get_distribution
+from ansible.module_utils._text import to_bytes, to_native, to_text
+
+try:
+ # python3
+ import urllib.request as urllib_request
+ from urllib.request import AbstractHTTPHandler
+except ImportError:
+ # python2
+ import urllib2 as urllib_request
+ from urllib2 import AbstractHTTPHandler
+
+urllib_request.HTTPRedirectHandler.http_error_308 = urllib_request.HTTPRedirectHandler.http_error_307
+
+try:
+ from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
+ HAS_URLPARSE = True
+except Exception:
+ HAS_URLPARSE = False
+
+try:
+ import ssl
+ HAS_SSL = True
+except Exception:
+ HAS_SSL = False
+
+try:
+ # SNI Handling needs python2.7.9's SSLContext
+ from ssl import create_default_context, SSLContext
+ HAS_SSLCONTEXT = True
+except ImportError:
+ HAS_SSLCONTEXT = False
+
+# SNI Handling for python < 2.7.9 with urllib3 support
+try:
+ # urllib3>=1.15
+ HAS_URLLIB3_SSL_WRAP_SOCKET = False
+ try:
+ from urllib3.contrib.pyopenssl import PyOpenSSLContext
+ except ImportError:
+ from requests.packages.urllib3.contrib.pyopenssl import PyOpenSSLContext
+ HAS_URLLIB3_PYOPENSSLCONTEXT = True
+except ImportError:
+ # urllib3<1.15,>=1.6
+ HAS_URLLIB3_PYOPENSSLCONTEXT = False
+ try:
+ try:
+ from urllib3.contrib.pyopenssl import ssl_wrap_socket
+ except ImportError:
+ from requests.packages.urllib3.contrib.pyopenssl import ssl_wrap_socket
+ HAS_URLLIB3_SSL_WRAP_SOCKET = True
+ except ImportError:
+ pass
+
+# Select a protocol that includes all secure tls protocols
+# Exclude insecure ssl protocols if possible
+
+if HAS_SSL:
+ # If we can't find extra tls methods, ssl.PROTOCOL_TLSv1 is sufficient
+ PROTOCOL = ssl.PROTOCOL_TLSv1
+if not HAS_SSLCONTEXT and HAS_SSL:
+ try:
+ import ctypes
+ import ctypes.util
+ except ImportError:
+ # python 2.4 (likely rhel5 which doesn't have tls1.1 support in its openssl)
+ pass
+ else:
+ libssl_name = ctypes.util.find_library('ssl')
+ libssl = ctypes.CDLL(libssl_name)
+ for method in ('TLSv1_1_method', 'TLSv1_2_method'):
+ try:
+ libssl[method]
+ # Found something - we'll let openssl autonegotiate and hope
+ # the server has disabled sslv2 and 3. best we can do.
+ PROTOCOL = ssl.PROTOCOL_SSLv23
+ break
+ except AttributeError:
+ pass
+ del libssl
+
+
+# The following makes it easier for us to script updates of the bundled backports.ssl_match_hostname
+# The bundled backports.ssl_match_hostname should really be moved into its own file for processing
+_BUNDLED_METADATA = {"pypi_name": "backports.ssl_match_hostname", "version": "3.7.0.1"}
+
+LOADED_VERIFY_LOCATIONS = set()
+
+HAS_MATCH_HOSTNAME = True
+try:
+ from ssl import match_hostname, CertificateError
+except ImportError:
+ try:
+ from backports.ssl_match_hostname import match_hostname, CertificateError
+ except ImportError:
+ HAS_MATCH_HOSTNAME = False
+
+
+try:
+ import urllib_gssapi
+ HAS_GSSAPI = True
+except ImportError:
+ HAS_GSSAPI = False
+
+if not HAS_MATCH_HOSTNAME:
+ # The following block of code is under the terms and conditions of the
+ # Python Software Foundation License
+
+ """The match_hostname() function from Python 3.4, essential when using SSL."""
+
+ try:
+ # Divergence: Python-3.7+'s _ssl has this exception type but older Pythons do not
+ from _ssl import SSLCertVerificationError
+ CertificateError = SSLCertVerificationError
+ except ImportError:
+ class CertificateError(ValueError):
+ pass
+
+ def _dnsname_match(dn, hostname):
+ """Matching according to RFC 6125, section 6.4.3
+
+ - Hostnames are compared lower case.
+ - For IDNA, both dn and hostname must be encoded as IDN A-label (ACE).
+ - Partial wildcards like 'www*.example.org', multiple wildcards, sole
+ wildcard or wildcards in labels other then the left-most label are not
+ supported and a CertificateError is raised.
+ - A wildcard must match at least one character.
+ """
+ if not dn:
+ return False
+
+ wildcards = dn.count('*')
+ # speed up common case w/o wildcards
+ if not wildcards:
+ return dn.lower() == hostname.lower()
+
+ if wildcards > 1:
+ # Divergence .format() to percent formatting for Python < 2.6
+ raise CertificateError(
+ "too many wildcards in certificate DNS name: %s" % repr(dn))
+
+ dn_leftmost, sep, dn_remainder = dn.partition('.')
+
+ if '*' in dn_remainder:
+ # Only match wildcard in leftmost segment.
+ # Divergence .format() to percent formatting for Python < 2.6
+ raise CertificateError(
+ "wildcard can only be present in the leftmost label: "
+ "%s." % repr(dn))
+
+ if not sep:
+ # no right side
+ # Divergence .format() to percent formatting for Python < 2.6
+ raise CertificateError(
+ "sole wildcard without additional labels are not support: "
+ "%s." % repr(dn))
+
+ if dn_leftmost != '*':
+ # no partial wildcard matching
+ # Divergence .format() to percent formatting for Python < 2.6
+ raise CertificateError(
+ "partial wildcards in leftmost label are not supported: "
+ "%s." % repr(dn))
+
+ hostname_leftmost, sep, hostname_remainder = hostname.partition('.')
+ if not hostname_leftmost or not sep:
+ # wildcard must match at least one char
+ return False
+ return dn_remainder.lower() == hostname_remainder.lower()
+
+ def _inet_paton(ipname):
+ """Try to convert an IP address to packed binary form
+
+ Supports IPv4 addresses on all platforms and IPv6 on platforms with IPv6
+ support.
+ """
+ # inet_aton() also accepts strings like '1'
+ # Divergence: We make sure we have native string type for all python versions
+ try:
+ b_ipname = to_bytes(ipname, errors='strict')
+ except UnicodeError:
+ raise ValueError("%s must be an all-ascii string." % repr(ipname))
+
+ # Set ipname in native string format
+ if sys.version_info < (3,):
+ n_ipname = b_ipname
+ else:
+ n_ipname = ipname
+
+ if n_ipname.count('.') == 3:
+ try:
+ return socket.inet_aton(n_ipname)
+ # Divergence: OSError on late python3. socket.error earlier.
+ # Null bytes generate ValueError on python3(we want to raise
+ # ValueError anyway), TypeError # earlier
+ except (OSError, socket.error, TypeError):
+ pass
+
+ try:
+ return socket.inet_pton(socket.AF_INET6, n_ipname)
+ # Divergence: OSError on late python3. socket.error earlier.
+ # Null bytes generate ValueError on python3(we want to raise
+ # ValueError anyway), TypeError # earlier
+ except (OSError, socket.error, TypeError):
+ # Divergence .format() to percent formatting for Python < 2.6
+ raise ValueError("%s is neither an IPv4 nor an IP6 "
+ "address." % repr(ipname))
+ except AttributeError:
+ # AF_INET6 not available
+ pass
+
+ # Divergence .format() to percent formatting for Python < 2.6
+ raise ValueError("%s is not an IPv4 address." % repr(ipname))
+
+ def _ipaddress_match(ipname, host_ip):
+ """Exact matching of IP addresses.
+
+ RFC 6125 explicitly doesn't define an algorithm for this
+ (section 1.7.2 - "Out of Scope").
+ """
+ # OpenSSL may add a trailing newline to a subjectAltName's IP address
+ ip = _inet_paton(ipname.rstrip())
+ return ip == host_ip
+
+ def match_hostname(cert, hostname):
+ """Verify that *cert* (in decoded format as returned by
+ SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
+ rules are followed.
+
+ The function matches IP addresses rather than dNSNames if hostname is a
+ valid ipaddress string. IPv4 addresses are supported on all platforms.
+ IPv6 addresses are supported on platforms with IPv6 support (AF_INET6
+ and inet_pton).
+
+ CertificateError is raised on failure. On success, the function
+ returns nothing.
+ """
+ if not cert:
+ raise ValueError("empty or no certificate, match_hostname needs a "
+ "SSL socket or SSL context with either "
+ "CERT_OPTIONAL or CERT_REQUIRED")
+ try:
+ # Divergence: Deal with hostname as bytes
+ host_ip = _inet_paton(to_text(hostname, errors='strict'))
+ except UnicodeError:
+ # Divergence: Deal with hostname as byte strings.
+ # IP addresses should be all ascii, so we consider it not
+ # an IP address if this fails
+ host_ip = None
+ except ValueError:
+ # Not an IP address (common case)
+ host_ip = None
+ dnsnames = []
+ san = cert.get('subjectAltName', ())
+ for key, value in san:
+ if key == 'DNS':
+ if host_ip is None and _dnsname_match(value, hostname):
+ return
+ dnsnames.append(value)
+ elif key == 'IP Address':
+ if host_ip is not None and _ipaddress_match(value, host_ip):
+ return
+ dnsnames.append(value)
+ if not dnsnames:
+ # The subject is only checked when there is no dNSName entry
+ # in subjectAltName
+ for sub in cert.get('subject', ()):
+ for key, value in sub:
+ # XXX according to RFC 2818, the most specific Common Name
+ # must be used.
+ if key == 'commonName':
+ if _dnsname_match(value, hostname):
+ return
+ dnsnames.append(value)
+ if len(dnsnames) > 1:
+ raise CertificateError("hostname %r doesn't match either of %s" % (hostname, ', '.join(map(repr, dnsnames))))
+ elif len(dnsnames) == 1:
+ raise CertificateError("hostname %r doesn't match %r" % (hostname, dnsnames[0]))
+ else:
+ raise CertificateError("no appropriate commonName or subjectAltName fields were found")
+
+ # End of Python Software Foundation Licensed code
+
+ HAS_MATCH_HOSTNAME = True
+
+
+# This is a dummy cacert provided for macOS since you need at least 1
+# ca cert, regardless of validity, for Python on macOS to use the
+# keychain functionality in OpenSSL for validating SSL certificates.
+# See: http://mercurial.selenic.com/wiki/CACertificates#Mac_OS_X_10.6_and_higher
+b_DUMMY_CA_CERT = b"""-----BEGIN CERTIFICATE-----
+MIICvDCCAiWgAwIBAgIJAO8E12S7/qEpMA0GCSqGSIb3DQEBBQUAMEkxCzAJBgNV
+BAYTAlVTMRcwFQYDVQQIEw5Ob3J0aCBDYXJvbGluYTEPMA0GA1UEBxMGRHVyaGFt
+MRAwDgYDVQQKEwdBbnNpYmxlMB4XDTE0MDMxODIyMDAyMloXDTI0MDMxNTIyMDAy
+MlowSTELMAkGA1UEBhMCVVMxFzAVBgNVBAgTDk5vcnRoIENhcm9saW5hMQ8wDQYD
+VQQHEwZEdXJoYW0xEDAOBgNVBAoTB0Fuc2libGUwgZ8wDQYJKoZIhvcNAQEBBQAD
+gY0AMIGJAoGBANtvpPq3IlNlRbCHhZAcP6WCzhc5RbsDqyh1zrkmLi0GwcQ3z/r9
+gaWfQBYhHpobK2Tiq11TfraHeNB3/VfNImjZcGpN8Fl3MWwu7LfVkJy3gNNnxkA1
+4Go0/LmIvRFHhbzgfuo9NFgjPmmab9eqXJceqZIlz2C8xA7EeG7ku0+vAgMBAAGj
+gaswgagwHQYDVR0OBBYEFPnN1nPRqNDXGlCqCvdZchRNi/FaMHkGA1UdIwRyMHCA
+FPnN1nPRqNDXGlCqCvdZchRNi/FaoU2kSzBJMQswCQYDVQQGEwJVUzEXMBUGA1UE
+CBMOTm9ydGggQ2Fyb2xpbmExDzANBgNVBAcTBkR1cmhhbTEQMA4GA1UEChMHQW5z
+aWJsZYIJAO8E12S7/qEpMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEA
+MUB80IR6knq9K/tY+hvPsZer6eFMzO3JGkRFBh2kn6JdMDnhYGX7AXVHGflrwNQH
+qFy+aenWXsC0ZvrikFxbQnX8GVtDADtVznxOi7XzFw7JOxdsVrpXgSN0eh0aMzvV
+zKPZsZ2miVGclicJHzm5q080b1p/sZtuKIEZk6vZqEg=
+-----END CERTIFICATE-----
+"""
+
+#
+# Exceptions
+#
+
+
+class ConnectionError(Exception):
+ """Failed to connect to the server"""
+ pass
+
+
+class ProxyError(ConnectionError):
+ """Failure to connect because of a proxy"""
+ pass
+
+
+class SSLValidationError(ConnectionError):
+ """Failure to connect due to SSL validation failing"""
+ pass
+
+
+class NoSSLError(SSLValidationError):
+ """Needed to connect to an HTTPS url but no ssl library available to verify the certificate"""
+ pass
+
+
+# Some environments (Google Compute Engine's CoreOS deploys) do not compile
+# against openssl and thus do not have any HTTPS support.
+CustomHTTPSConnection = None
+CustomHTTPSHandler = None
+HTTPSClientAuthHandler = None
+UnixHTTPSConnection = None
+if hasattr(httplib, 'HTTPSConnection') and hasattr(urllib_request, 'HTTPSHandler'):
+ class CustomHTTPSConnection(httplib.HTTPSConnection):
+ def __init__(self, *args, **kwargs):
+ httplib.HTTPSConnection.__init__(self, *args, **kwargs)
+ self.context = None
+ if HAS_SSLCONTEXT:
+ self.context = self._context
+ elif HAS_URLLIB3_PYOPENSSLCONTEXT:
+ self.context = self._context = PyOpenSSLContext(PROTOCOL)
+ if self.context and self.cert_file:
+ self.context.load_cert_chain(self.cert_file, self.key_file)
+
+ def connect(self):
+ "Connect to a host on a given (SSL) port."
+
+ if hasattr(self, 'source_address'):
+ sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address)
+ else:
+ sock = socket.create_connection((self.host, self.port), self.timeout)
+
+ server_hostname = self.host
+ # Note: self._tunnel_host is not available on py < 2.6 but this code
+ # isn't used on py < 2.6 (lack of create_connection)
+ if self._tunnel_host:
+ self.sock = sock
+ self._tunnel()
+ server_hostname = self._tunnel_host
+
+ if HAS_SSLCONTEXT or HAS_URLLIB3_PYOPENSSLCONTEXT:
+ self.sock = self.context.wrap_socket(sock, server_hostname=server_hostname)
+ elif HAS_URLLIB3_SSL_WRAP_SOCKET:
+ self.sock = ssl_wrap_socket(sock, keyfile=self.key_file, cert_reqs=ssl.CERT_NONE, certfile=self.cert_file, ssl_version=PROTOCOL,
+ server_hostname=server_hostname)
+ else:
+ self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=PROTOCOL)
+
+ class CustomHTTPSHandler(urllib_request.HTTPSHandler):
+
+ def https_open(self, req):
+ kwargs = {}
+ if HAS_SSLCONTEXT:
+ kwargs['context'] = self._context
+ return self.do_open(
+ functools.partial(
+ CustomHTTPSConnection,
+ **kwargs
+ ),
+ req
+ )
+
+ https_request = AbstractHTTPHandler.do_request_
+
+ class HTTPSClientAuthHandler(urllib_request.HTTPSHandler):
+ '''Handles client authentication via cert/key
+
+ This is a fairly lightweight extension on HTTPSHandler, and can be used
+ in place of HTTPSHandler
+ '''
+
+ def __init__(self, client_cert=None, client_key=None, unix_socket=None, **kwargs):
+ urllib_request.HTTPSHandler.__init__(self, **kwargs)
+ self.client_cert = client_cert
+ self.client_key = client_key
+ self._unix_socket = unix_socket
+
+ def https_open(self, req):
+ return self.do_open(self._build_https_connection, req)
+
+ def _build_https_connection(self, host, **kwargs):
+ kwargs.update({
+ 'cert_file': self.client_cert,
+ 'key_file': self.client_key,
+ })
+ try:
+ kwargs['context'] = self._context
+ except AttributeError:
+ pass
+ if self._unix_socket:
+ return UnixHTTPSConnection(self._unix_socket)(host, **kwargs)
+ return httplib.HTTPSConnection(host, **kwargs)
+
+ @contextmanager
+ def unix_socket_patch_httpconnection_connect():
+ '''Monkey patch ``httplib.HTTPConnection.connect`` to be ``UnixHTTPConnection.connect``
+ so that when calling ``super(UnixHTTPSConnection, self).connect()`` we get the
+ correct behavior of creating self.sock for the unix socket
+ '''
+ _connect = httplib.HTTPConnection.connect
+ httplib.HTTPConnection.connect = UnixHTTPConnection.connect
+ yield
+ httplib.HTTPConnection.connect = _connect
+
+ class UnixHTTPSConnection(httplib.HTTPSConnection):
+ def __init__(self, unix_socket):
+ self._unix_socket = unix_socket
+
+ def connect(self):
+ # This method exists simply to ensure we monkeypatch
+ # httplib.HTTPConnection.connect to call UnixHTTPConnection.connect
+ with unix_socket_patch_httpconnection_connect():
+ # Disable pylint check for the super() call. It complains about UnixHTTPSConnection
+ # being a NoneType because of the initial definition above, but it won't actually
+ # be a NoneType when this code runs
+ # pylint: disable=bad-super-call
+ super(UnixHTTPSConnection, self).connect()
+
+ def __call__(self, *args, **kwargs):
+ httplib.HTTPSConnection.__init__(self, *args, **kwargs)
+ return self
+
+
+class UnixHTTPConnection(httplib.HTTPConnection):
+ '''Handles http requests to a unix socket file'''
+
+ def __init__(self, unix_socket):
+ self._unix_socket = unix_socket
+
+ def connect(self):
+ self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ try:
+ self.sock.connect(self._unix_socket)
+ except OSError as e:
+ raise OSError('Invalid Socket File (%s): %s' % (self._unix_socket, e))
+ if self.timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
+ self.sock.settimeout(self.timeout)
+
+ def __call__(self, *args, **kwargs):
+ httplib.HTTPConnection.__init__(self, *args, **kwargs)
+ return self
+
+
+class UnixHTTPHandler(urllib_request.HTTPHandler):
+ '''Handler for Unix urls'''
+
+ def __init__(self, unix_socket, **kwargs):
+ urllib_request.HTTPHandler.__init__(self, **kwargs)
+ self._unix_socket = unix_socket
+
+ def http_open(self, req):
+ return self.do_open(UnixHTTPConnection(self._unix_socket), req)
+
+
+class ParseResultDottedDict(dict):
+ '''
+ A dict that acts similarly to the ParseResult named tuple from urllib
+ '''
+ def __init__(self, *args, **kwargs):
+ super(ParseResultDottedDict, self).__init__(*args, **kwargs)
+ self.__dict__ = self
+
+ def as_list(self):
+ '''
+ Generate a list from this dict, that looks like the ParseResult named tuple
+ '''
+ return [self.get(k, None) for k in ('scheme', 'netloc', 'path', 'params', 'query', 'fragment')]
+
+
+def generic_urlparse(parts):
+ '''
+ Returns a dictionary of url parts as parsed by urlparse,
+ but accounts for the fact that older versions of that
+ library do not support named attributes (ie. .netloc)
+ '''
+ generic_parts = ParseResultDottedDict()
+ if hasattr(parts, 'netloc'):
+ # urlparse is newer, just read the fields straight
+ # from the parts object
+ generic_parts['scheme'] = parts.scheme
+ generic_parts['netloc'] = parts.netloc
+ generic_parts['path'] = parts.path
+ generic_parts['params'] = parts.params
+ generic_parts['query'] = parts.query
+ generic_parts['fragment'] = parts.fragment
+ generic_parts['username'] = parts.username
+ generic_parts['password'] = parts.password
+ hostname = parts.hostname
+ if hostname and hostname[0] == '[' and '[' in parts.netloc and ']' in parts.netloc:
+ # Py2.6 doesn't parse IPv6 addresses correctly
+ hostname = parts.netloc.split(']')[0][1:].lower()
+ generic_parts['hostname'] = hostname
+
+ try:
+ port = parts.port
+ except ValueError:
+ # Py2.6 doesn't parse IPv6 addresses correctly
+ netloc = parts.netloc.split('@')[-1].split(']')[-1]
+ if ':' in netloc:
+ port = netloc.split(':')[1]
+ if port:
+ port = int(port)
+ else:
+ port = None
+ generic_parts['port'] = port
+ else:
+ # we have to use indexes, and then parse out
+ # the other parts not supported by indexing
+ generic_parts['scheme'] = parts[0]
+ generic_parts['netloc'] = parts[1]
+ generic_parts['path'] = parts[2]
+ generic_parts['params'] = parts[3]
+ generic_parts['query'] = parts[4]
+ generic_parts['fragment'] = parts[5]
+ # get the username, password, etc.
+ try:
+ netloc_re = re.compile(r'^((?:\w)+(?::(?:\w)+)?@)?([A-Za-z0-9.-]+)(:\d+)?$')
+ match = netloc_re.match(parts[1])
+ auth = match.group(1)
+ hostname = match.group(2)
+ port = match.group(3)
+ if port:
+ # the capture group for the port will include the ':',
+ # so remove it and convert the port to an integer
+ port = int(port[1:])
+ if auth:
+ # the capture group above includes the @, so remove it
+ # and then split it up based on the first ':' found
+ auth = auth[:-1]
+ username, password = auth.split(':', 1)
+ else:
+ username = password = None
+ generic_parts['username'] = username
+ generic_parts['password'] = password
+ generic_parts['hostname'] = hostname
+ generic_parts['port'] = port
+ except Exception:
+ generic_parts['username'] = None
+ generic_parts['password'] = None
+ generic_parts['hostname'] = parts[1]
+ generic_parts['port'] = None
+ return generic_parts
+
+
+class RequestWithMethod(urllib_request.Request):
+ '''
+ Workaround for using DELETE/PUT/etc with urllib2
+ Originally contained in library/net_infrastructure/dnsmadeeasy
+ '''
+
+ def __init__(self, url, method, data=None, headers=None, origin_req_host=None, unverifiable=True):
+ if headers is None:
+ headers = {}
+ self._method = method.upper()
+ urllib_request.Request.__init__(self, url, data, headers, origin_req_host, unverifiable)
+
+ def get_method(self):
+ if self._method:
+ return self._method
+ else:
+ return urllib_request.Request.get_method(self)
+
+
+def RedirectHandlerFactory(follow_redirects=None, validate_certs=True, ca_path=None):
+ """This is a class factory that closes over the value of
+ ``follow_redirects`` so that the RedirectHandler class has access to
+ that value without having to use globals, and potentially cause problems
+ where ``open_url`` or ``fetch_url`` are used multiple times in a module.
+ """
+
+ class RedirectHandler(urllib_request.HTTPRedirectHandler):
+ """This is an implementation of a RedirectHandler to match the
+ functionality provided by httplib2. It will utilize the value of
+ ``follow_redirects`` that is passed into ``RedirectHandlerFactory``
+ to determine how redirects should be handled in urllib2.
+ """
+
+ def redirect_request(self, req, fp, code, msg, hdrs, newurl):
+ if not HAS_SSLCONTEXT:
+ handler = maybe_add_ssl_handler(newurl, validate_certs, ca_path=ca_path)
+ if handler:
+ urllib_request._opener.add_handler(handler)
+
+ # Preserve urllib2 compatibility
+ if follow_redirects == 'urllib2':
+ return urllib_request.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, hdrs, newurl)
+
+ # Handle disabled redirects
+ elif follow_redirects in ['no', 'none', False]:
+ raise urllib_error.HTTPError(newurl, code, msg, hdrs, fp)
+
+ method = req.get_method()
+
+ # Handle non-redirect HTTP status or invalid follow_redirects
+ if follow_redirects in ['all', 'yes', True]:
+ if code < 300 or code >= 400:
+ raise urllib_error.HTTPError(req.get_full_url(), code, msg, hdrs, fp)
+ elif follow_redirects == 'safe':
+ if code < 300 or code >= 400 or method not in ('GET', 'HEAD'):
+ raise urllib_error.HTTPError(req.get_full_url(), code, msg, hdrs, fp)
+ else:
+ raise urllib_error.HTTPError(req.get_full_url(), code, msg, hdrs, fp)
+
+ try:
+ # Python 2-3.3
+ data = req.get_data()
+ origin_req_host = req.get_origin_req_host()
+ except AttributeError:
+ # Python 3.4+
+ data = req.data
+ origin_req_host = req.origin_req_host
+
+ # Be conciliant with URIs containing a space
+ newurl = newurl.replace(' ', '%20')
+
+ # Suport redirect with payload and original headers
+ if code in (307, 308):
+ # Preserve payload and headers
+ headers = req.headers
+ else:
+ # Do not preserve payload and filter headers
+ data = None
+ headers = dict((k, v) for k, v in req.headers.items()
+ if k.lower() not in ("content-length", "content-type", "transfer-encoding"))
+
+ # http://tools.ietf.org/html/rfc7231#section-6.4.4
+ if code == 303 and method != 'HEAD':
+ method = 'GET'
+
+ # Do what the browsers do, despite standards...
+ # First, turn 302s into GETs.
+ if code == 302 and method != 'HEAD':
+ method = 'GET'
+
+ # Second, if a POST is responded to with a 301, turn it into a GET.
+ if code == 301 and method == 'POST':
+ method = 'GET'
+
+ return RequestWithMethod(newurl,
+ method=method,
+ headers=headers,
+ data=data,
+ origin_req_host=origin_req_host,
+ unverifiable=True,
+ )
+
+ return RedirectHandler
+
+
+def build_ssl_validation_error(hostname, port, paths, exc=None):
+ '''Inteligently build out the SSLValidationError based on what support
+ you have installed
+ '''
+
+ msg = [
+ ('Failed to validate the SSL certificate for %s:%s.'
+ ' Make sure your managed systems have a valid CA'
+ ' certificate installed.')
+ ]
+ if not HAS_SSLCONTEXT:
+ msg.append('If the website serving the url uses SNI you need'
+ ' python >= 2.7.9 on your managed machine')
+ msg.append(' (the python executable used (%s) is version: %s)' %
+ (sys.executable, ''.join(sys.version.splitlines())))
+ if not HAS_URLLIB3_PYOPENSSLCONTEXT and not HAS_URLLIB3_SSL_WRAP_SOCKET:
+ msg.append('or you can install the `urllib3`, `pyOpenSSL`,'
+ ' `ndg-httpsclient`, and `pyasn1` python modules')
+
+ msg.append('to perform SNI verification in python >= 2.6.')
+
+ msg.append('You can use validate_certs=False if you do'
+ ' not need to confirm the servers identity but this is'
+ ' unsafe and not recommended.'
+ ' Paths checked for this platform: %s.')
+
+ if exc:
+ msg.append('The exception msg was: %s.' % to_native(exc))
+
+ raise SSLValidationError(' '.join(msg) % (hostname, port, ", ".join(paths)))
+
+
+def atexit_remove_file(filename):
+ if os.path.exists(filename):
+ try:
+ os.unlink(filename)
+ except Exception:
+ # just ignore if we cannot delete, things should be ok
+ pass
+
+
+class SSLValidationHandler(urllib_request.BaseHandler):
+ '''
+ A custom handler class for SSL validation.
+
+ Based on:
+ http://stackoverflow.com/questions/1087227/validate-ssl-certificates-with-python
+ http://techknack.net/python-urllib2-handlers/
+ '''
+ CONNECT_COMMAND = "CONNECT %s:%s HTTP/1.0\r\n"
+
+ def __init__(self, hostname, port, ca_path=None):
+ self.hostname = hostname
+ self.port = port
+ self.ca_path = ca_path
+
+ def get_ca_certs(self):
+ # tries to find a valid CA cert in one of the
+ # standard locations for the current distribution
+
+ ca_certs = []
+ cadata = bytearray()
+ paths_checked = []
+
+ if self.ca_path:
+ paths_checked = [self.ca_path]
+ with open(to_bytes(self.ca_path, errors='surrogate_or_strict'), 'rb') as f:
+ if HAS_SSLCONTEXT:
+ cadata.extend(
+ ssl.PEM_cert_to_DER_cert(
+ to_native(f.read(), errors='surrogate_or_strict')
+ )
+ )
+ else:
+ ca_certs.append(f.read())
+ return ca_certs, cadata, paths_checked
+
+ if not HAS_SSLCONTEXT:
+ paths_checked.append('/etc/ssl/certs')
+
+ system = to_text(platform.system(), errors='surrogate_or_strict')
+ # build a list of paths to check for .crt/.pem files
+ # based on the platform type
+ if system == u'Linux':
+ paths_checked.append('/etc/pki/ca-trust/extracted/pem')
+ paths_checked.append('/etc/pki/tls/certs')
+ paths_checked.append('/usr/share/ca-certificates/cacert.org')
+ elif system == u'FreeBSD':
+ paths_checked.append('/usr/local/share/certs')
+ elif system == u'OpenBSD':
+ paths_checked.append('/etc/ssl')
+ elif system == u'NetBSD':
+ ca_certs.append('/etc/openssl/certs')
+ elif system == u'SunOS':
+ paths_checked.append('/opt/local/etc/openssl/certs')
+
+ # fall back to a user-deployed cert in a standard
+ # location if the OS platform one is not available
+ paths_checked.append('/etc/ansible')
+
+ tmp_path = None
+ if not HAS_SSLCONTEXT:
+ tmp_fd, tmp_path = tempfile.mkstemp()
+ atexit.register(atexit_remove_file, tmp_path)
+
+ # Write the dummy ca cert if we are running on macOS
+ if system == u'Darwin':
+ if HAS_SSLCONTEXT:
+ cadata.extend(
+ ssl.PEM_cert_to_DER_cert(
+ to_native(b_DUMMY_CA_CERT, errors='surrogate_or_strict')
+ )
+ )
+ else:
+ os.write(tmp_fd, b_DUMMY_CA_CERT)
+ # Default Homebrew path for OpenSSL certs
+ paths_checked.append('/usr/local/etc/openssl')
+
+ # for all of the paths, find any .crt or .pem files
+ # and compile them into single temp file for use
+ # in the ssl check to speed up the test
+ for path in paths_checked:
+ if os.path.exists(path) and os.path.isdir(path):
+ dir_contents = os.listdir(path)
+ for f in dir_contents:
+ full_path = os.path.join(path, f)
+ if os.path.isfile(full_path) and os.path.splitext(f)[1] in ('.crt', '.pem'):
+ try:
+ if full_path not in LOADED_VERIFY_LOCATIONS:
+ with open(full_path, 'rb') as cert_file:
+ b_cert = cert_file.read()
+ if HAS_SSLCONTEXT:
+ try:
+ cadata.extend(
+ ssl.PEM_cert_to_DER_cert(
+ to_native(b_cert, errors='surrogate_or_strict')
+ )
+ )
+ except Exception:
+ continue
+ else:
+ os.write(tmp_fd, b_cert)
+ os.write(tmp_fd, b'\n')
+ except (OSError, IOError):
+ pass
+
+ if HAS_SSLCONTEXT:
+ default_verify_paths = ssl.get_default_verify_paths()
+ paths_checked[:0] = [default_verify_paths.capath]
+
+ return (tmp_path, cadata, paths_checked)
+
+ def validate_proxy_response(self, response, valid_codes=None):
+ '''
+ make sure we get back a valid code from the proxy
+ '''
+ valid_codes = [200] if valid_codes is None else valid_codes
+
+ try:
+ (http_version, resp_code, msg) = re.match(br'(HTTP/\d\.\d) (\d\d\d) (.*)', response).groups()
+ if int(resp_code) not in valid_codes:
+ raise Exception
+ except Exception:
+ raise ProxyError('Connection to proxy failed')
+
+ def detect_no_proxy(self, url):
+ '''
+ Detect if the 'no_proxy' environment variable is set and honor those locations.
+ '''
+ env_no_proxy = os.environ.get('no_proxy')
+ if env_no_proxy:
+ env_no_proxy = env_no_proxy.split(',')
+ netloc = urlparse(url).netloc
+
+ for host in env_no_proxy:
+ if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
+ # Our requested URL matches something in no_proxy, so don't
+ # use the proxy for this
+ return False
+ return True
+
+ def make_context(self, cafile, cadata):
+ cafile = self.ca_path or cafile
+ if self.ca_path:
+ cadata = None
+ else:
+ cadata = cadata or None
+
+ if HAS_SSLCONTEXT:
+ context = create_default_context(cafile=cafile)
+ elif HAS_URLLIB3_PYOPENSSLCONTEXT:
+ context = PyOpenSSLContext(PROTOCOL)
+ else:
+ raise NotImplementedError('Host libraries are too old to support creating an sslcontext')
+
+ if cafile or cadata:
+ context.load_verify_locations(cafile=cafile, cadata=cadata)
+ return context
+
+ def http_request(self, req):
+ tmp_ca_cert_path, cadata, paths_checked = self.get_ca_certs()
+
+ # Detect if 'no_proxy' environment variable is set and if our URL is included
+ use_proxy = self.detect_no_proxy(req.get_full_url())
+ https_proxy = os.environ.get('https_proxy')
+
+ context = None
+ try:
+ context = self.make_context(tmp_ca_cert_path, cadata)
+ except NotImplementedError:
+ # We'll make do with no context below
+ pass
+
+ try:
+ if use_proxy and https_proxy:
+ proxy_parts = generic_urlparse(urlparse(https_proxy))
+ port = proxy_parts.get('port') or 443
+ proxy_hostname = proxy_parts.get('hostname', None)
+ if proxy_hostname is None or proxy_parts.get('scheme') == '':
+ raise ProxyError("Failed to parse https_proxy environment variable."
+ " Please make sure you export https proxy as 'https_proxy=<SCHEME>://<IP_ADDRESS>:<PORT>'")
+
+ s = socket.create_connection((proxy_hostname, port))
+ if proxy_parts.get('scheme') == 'http':
+ s.sendall(to_bytes(self.CONNECT_COMMAND % (self.hostname, self.port), errors='surrogate_or_strict'))
+ if proxy_parts.get('username'):
+ credentials = "%s:%s" % (proxy_parts.get('username', ''), proxy_parts.get('password', ''))
+ s.sendall(b'Proxy-Authorization: Basic %s\r\n' % base64.b64encode(to_bytes(credentials, errors='surrogate_or_strict')).strip())
+ s.sendall(b'\r\n')
+ connect_result = b""
+ while connect_result.find(b"\r\n\r\n") <= 0:
+ connect_result += s.recv(4096)
+ # 128 kilobytes of headers should be enough for everyone.
+ if len(connect_result) > 131072:
+ raise ProxyError('Proxy sent too verbose headers. Only 128KiB allowed.')
+ self.validate_proxy_response(connect_result)
+ if context:
+ ssl_s = context.wrap_socket(s, server_hostname=self.hostname)
+ elif HAS_URLLIB3_SSL_WRAP_SOCKET:
+ ssl_s = ssl_wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL, server_hostname=self.hostname)
+ else:
+ ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL)
+ match_hostname(ssl_s.getpeercert(), self.hostname)
+ else:
+ raise ProxyError('Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.' % proxy_parts.get('scheme'))
+ else:
+ s = socket.create_connection((self.hostname, self.port))
+ if context:
+ ssl_s = context.wrap_socket(s, server_hostname=self.hostname)
+ elif HAS_URLLIB3_SSL_WRAP_SOCKET:
+ ssl_s = ssl_wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL, server_hostname=self.hostname)
+ else:
+ ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL)
+ match_hostname(ssl_s.getpeercert(), self.hostname)
+ # close the ssl connection
+ # ssl_s.unwrap()
+ s.close()
+ except (ssl.SSLError, CertificateError) as e:
+ build_ssl_validation_error(self.hostname, self.port, paths_checked, e)
+ except socket.error as e:
+ raise ConnectionError('Failed to connect to %s at port %s: %s' % (self.hostname, self.port, to_native(e)))
+
+ return req
+
+ https_request = http_request
+
+
+def maybe_add_ssl_handler(url, validate_certs, ca_path=None):
+ parsed = generic_urlparse(urlparse(url))
+ if parsed.scheme == 'https' and validate_certs:
+ if not HAS_SSL:
+ raise NoSSLError('SSL validation is not available in your version of python. You can use validate_certs=False,'
+ ' however this is unsafe and not recommended')
+
+ # create the SSL validation handler and
+ # add it to the list of handlers
+ return SSLValidationHandler(parsed.hostname, parsed.port or 443, ca_path=ca_path)
+
+
+def rfc2822_date_string(timetuple, zone='-0000'):
+ """Accepts a timetuple and optional zone which defaults to ``-0000``
+ and returns a date string as specified by RFC 2822, e.g.:
+
+ Fri, 09 Nov 2001 01:08:47 -0000
+
+ Copied from email.utils.formatdate and modified for separate use
+ """
+ return '%s, %02d %s %04d %02d:%02d:%02d %s' % (
+ ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][timetuple[6]],
+ timetuple[2],
+ ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
+ 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][timetuple[1] - 1],
+ timetuple[0], timetuple[3], timetuple[4], timetuple[5],
+ zone)
+
+
+class Request:
+ def __init__(self, headers=None, use_proxy=True, force=False, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=False,
+ follow_redirects='urllib2', client_cert=None, client_key=None, cookies=None, unix_socket=None,
+ ca_path=None):
+ """This class works somewhat similarly to the ``Session`` class of from requests
+ by defining a cookiejar that an be used across requests as well as cascaded defaults that
+ can apply to repeated requests
+
+ For documentation of params, see ``Request.open``
+
+ >>> from ansible.module_utils.urls import Request
+ >>> r = Request()
+ >>> r.open('GET', 'http://httpbin.org/cookies/set?k1=v1').read()
+ '{\n "cookies": {\n "k1": "v1"\n }\n}\n'
+ >>> r = Request(url_username='user', url_password='passwd')
+ >>> r.open('GET', 'http://httpbin.org/basic-auth/user/passwd').read()
+ '{\n "authenticated": true, \n "user": "user"\n}\n'
+ >>> r = Request(headers=dict(foo='bar'))
+ >>> r.open('GET', 'http://httpbin.org/get', headers=dict(baz='qux')).read()
+
+ """
+
+ self.headers = headers or {}
+ if not isinstance(self.headers, dict):
+ raise ValueError("headers must be a dict: %r" % self.headers)
+ self.use_proxy = use_proxy
+ self.force = force
+ self.timeout = timeout
+ self.validate_certs = validate_certs
+ self.url_username = url_username
+ self.url_password = url_password
+ self.http_agent = http_agent
+ self.force_basic_auth = force_basic_auth
+ self.follow_redirects = follow_redirects
+ self.client_cert = client_cert
+ self.client_key = client_key
+ self.unix_socket = unix_socket
+ self.ca_path = ca_path
+ if isinstance(cookies, cookiejar.CookieJar):
+ self.cookies = cookies
+ else:
+ self.cookies = cookiejar.CookieJar()
+
+ def _fallback(self, value, fallback):
+ if value is None:
+ return fallback
+ return value
+
+ def open(self, method, url, data=None, headers=None, use_proxy=None,
+ force=None, last_mod_time=None, timeout=None, validate_certs=None,
+ url_username=None, url_password=None, http_agent=None,
+ force_basic_auth=None, follow_redirects=None,
+ client_cert=None, client_key=None, cookies=None, use_gssapi=False,
+ unix_socket=None, ca_path=None, unredirected_headers=None):
+ """
+ Sends a request via HTTP(S) or FTP using urllib2 (Python2) or urllib (Python3)
+
+ Does not require the module environment
+
+ Returns :class:`HTTPResponse` object.
+
+ :arg method: method for the request
+ :arg url: URL to request
+
+ :kwarg data: (optional) bytes, or file-like object to send
+ in the body of the request
+ :kwarg headers: (optional) Dictionary of HTTP Headers to send with the
+ request
+ :kwarg use_proxy: (optional) Boolean of whether or not to use proxy
+ :kwarg force: (optional) Boolean of whether or not to set `cache-control: no-cache` header
+ :kwarg last_mod_time: (optional) Datetime object to use when setting If-Modified-Since header
+ :kwarg timeout: (optional) How long to wait for the server to send
+ data before giving up, as a float
+ :kwarg validate_certs: (optional) Booleani that controls whether we verify
+ the server's TLS certificate
+ :kwarg url_username: (optional) String of the user to use when authenticating
+ :kwarg url_password: (optional) String of the password to use when authenticating
+ :kwarg http_agent: (optional) String of the User-Agent to use in the request
+ :kwarg force_basic_auth: (optional) Boolean determining if auth header should be sent in the initial request
+ :kwarg follow_redirects: (optional) String of urllib2, all/yes, safe, none to determine how redirects are
+ followed, see RedirectHandlerFactory for more information
+ :kwarg client_cert: (optional) PEM formatted certificate chain file to be used for SSL client authentication.
+ This file can also include the key as well, and if the key is included, client_key is not required
+ :kwarg client_key: (optional) PEM formatted file that contains your private key to be used for SSL client
+ authentication. If client_cert contains both the certificate and key, this option is not required
+ :kwarg cookies: (optional) CookieJar object to send with the
+ request
+ :kwarg use_gssapi: (optional) Use GSSAPI handler of requests.
+ :kwarg unix_socket: (optional) String of file system path to unix socket file to use when establishing
+ connection to the provided url
+ :kwarg ca_path: (optional) String of file system path to CA cert bundle to use
+ :kwarg unredirected_headers: (optional) A list of headers to not attach on a redirected request
+ :returns: HTTPResponse. Added in Ansible 2.9
+ """
+
+ method = method.upper()
+
+ if headers is None:
+ headers = {}
+ elif not isinstance(headers, dict):
+ raise ValueError("headers must be a dict")
+ headers = dict(self.headers, **headers)
+
+ use_proxy = self._fallback(use_proxy, self.use_proxy)
+ force = self._fallback(force, self.force)
+ timeout = self._fallback(timeout, self.timeout)
+ validate_certs = self._fallback(validate_certs, self.validate_certs)
+ url_username = self._fallback(url_username, self.url_username)
+ url_password = self._fallback(url_password, self.url_password)
+ http_agent = self._fallback(http_agent, self.http_agent)
+ force_basic_auth = self._fallback(force_basic_auth, self.force_basic_auth)
+ follow_redirects = self._fallback(follow_redirects, self.follow_redirects)
+ client_cert = self._fallback(client_cert, self.client_cert)
+ client_key = self._fallback(client_key, self.client_key)
+ cookies = self._fallback(cookies, self.cookies)
+ unix_socket = self._fallback(unix_socket, self.unix_socket)
+ ca_path = self._fallback(ca_path, self.ca_path)
+
+ handlers = []
+
+ if unix_socket:
+ handlers.append(UnixHTTPHandler(unix_socket))
+
+ ssl_handler = maybe_add_ssl_handler(url, validate_certs, ca_path=ca_path)
+ if ssl_handler and not HAS_SSLCONTEXT:
+ handlers.append(ssl_handler)
+ if HAS_GSSAPI and use_gssapi:
+ handlers.append(urllib_gssapi.HTTPSPNEGOAuthHandler())
+
+ parsed = generic_urlparse(urlparse(url))
+ if parsed.scheme != 'ftp':
+ username = url_username
+
+ if username:
+ password = url_password
+ netloc = parsed.netloc
+ elif '@' in parsed.netloc:
+ credentials, netloc = parsed.netloc.split('@', 1)
+ if ':' in credentials:
+ username, password = credentials.split(':', 1)
+ else:
+ username = credentials
+ password = ''
+
+ parsed_list = parsed.as_list()
+ parsed_list[1] = netloc
+
+ # reconstruct url without credentials
+ url = urlunparse(parsed_list)
+
+ if username and not force_basic_auth:
+ passman = urllib_request.HTTPPasswordMgrWithDefaultRealm()
+
+ # this creates a password manager
+ passman.add_password(None, netloc, username, password)
+
+ # because we have put None at the start it will always
+ # use this username/password combination for urls
+ # for which `theurl` is a super-url
+ authhandler = urllib_request.HTTPBasicAuthHandler(passman)
+ digest_authhandler = urllib_request.HTTPDigestAuthHandler(passman)
+
+ # create the AuthHandler
+ handlers.append(authhandler)
+ handlers.append(digest_authhandler)
+
+ elif username and force_basic_auth:
+ headers["Authorization"] = basic_auth_header(username, password)
+
+ else:
+ try:
+ rc = netrc.netrc(os.environ.get('NETRC'))
+ login = rc.authenticators(parsed.hostname)
+ except IOError:
+ login = None
+
+ if login:
+ username, _, password = login
+ if username and password:
+ headers["Authorization"] = basic_auth_header(username, password)
+
+ if not use_proxy:
+ proxyhandler = urllib_request.ProxyHandler({})
+ handlers.append(proxyhandler)
+
+ context = None
+ if HAS_SSLCONTEXT and not validate_certs:
+ # In 2.7.9, the default context validates certificates
+ context = SSLContext(ssl.PROTOCOL_SSLv23)
+ if ssl.OP_NO_SSLv2:
+ context.options |= ssl.OP_NO_SSLv2
+ context.options |= ssl.OP_NO_SSLv3
+ context.verify_mode = ssl.CERT_NONE
+ context.check_hostname = False
+ handlers.append(HTTPSClientAuthHandler(client_cert=client_cert,
+ client_key=client_key,
+ context=context,
+ unix_socket=unix_socket))
+ elif client_cert or unix_socket:
+ handlers.append(HTTPSClientAuthHandler(client_cert=client_cert,
+ client_key=client_key,
+ unix_socket=unix_socket))
+
+ if ssl_handler and HAS_SSLCONTEXT and validate_certs:
+ tmp_ca_path, cadata, paths_checked = ssl_handler.get_ca_certs()
+ try:
+ context = ssl_handler.make_context(tmp_ca_path, cadata)
+ except NotImplementedError:
+ pass
+
+ # pre-2.6 versions of python cannot use the custom https
+ # handler, since the socket class is lacking create_connection.
+ # Some python builds lack HTTPS support.
+ if hasattr(socket, 'create_connection') and CustomHTTPSHandler:
+ kwargs = {}
+ if HAS_SSLCONTEXT:
+ kwargs['context'] = context
+ handlers.append(CustomHTTPSHandler(**kwargs))
+
+ handlers.append(RedirectHandlerFactory(follow_redirects, validate_certs, ca_path=ca_path))
+
+ # add some nicer cookie handling
+ if cookies is not None:
+ handlers.append(urllib_request.HTTPCookieProcessor(cookies))
+
+ opener = urllib_request.build_opener(*handlers)
+ urllib_request.install_opener(opener)
+
+ data = to_bytes(data, nonstring='passthru')
+ request = RequestWithMethod(url, method, data)
+
+ # add the custom agent header, to help prevent issues
+ # with sites that block the default urllib agent string
+ if http_agent:
+ request.add_header('User-agent', http_agent)
+
+ # Cache control
+ # Either we directly force a cache refresh
+ if force:
+ request.add_header('cache-control', 'no-cache')
+ # or we do it if the original is more recent than our copy
+ elif last_mod_time:
+ tstamp = rfc2822_date_string(last_mod_time.timetuple(), 'GMT')
+ request.add_header('If-Modified-Since', tstamp)
+
+ # user defined headers now, which may override things we've set above
+ unredirected_headers = unredirected_headers or []
+ for header in headers:
+ if header in unredirected_headers:
+ request.add_unredirected_header(header, headers[header])
+ else:
+ request.add_header(header, headers[header])
+
+ return urllib_request.urlopen(request, None, timeout)
+
+ def get(self, url, **kwargs):
+ r"""Sends a GET request. Returns :class:`HTTPResponse` object.
+
+ :arg url: URL to request
+ :kwarg \*\*kwargs: Optional arguments that ``open`` takes.
+ :returns: HTTPResponse
+ """
+
+ return self.open('GET', url, **kwargs)
+
+ def options(self, url, **kwargs):
+ r"""Sends a OPTIONS request. Returns :class:`HTTPResponse` object.
+
+ :arg url: URL to request
+ :kwarg \*\*kwargs: Optional arguments that ``open`` takes.
+ :returns: HTTPResponse
+ """
+
+ return self.open('OPTIONS', url, **kwargs)
+
+ def head(self, url, **kwargs):
+ r"""Sends a HEAD request. Returns :class:`HTTPResponse` object.
+
+ :arg url: URL to request
+ :kwarg \*\*kwargs: Optional arguments that ``open`` takes.
+ :returns: HTTPResponse
+ """
+
+ return self.open('HEAD', url, **kwargs)
+
+ def post(self, url, data=None, **kwargs):
+ r"""Sends a POST request. Returns :class:`HTTPResponse` object.
+
+ :arg url: URL to request.
+ :kwarg data: (optional) bytes, or file-like object to send in the body of the request.
+ :kwarg \*\*kwargs: Optional arguments that ``open`` takes.
+ :returns: HTTPResponse
+ """
+
+ return self.open('POST', url, data=data, **kwargs)
+
+ def put(self, url, data=None, **kwargs):
+ r"""Sends a PUT request. Returns :class:`HTTPResponse` object.
+
+ :arg url: URL to request.
+ :kwarg data: (optional) bytes, or file-like object to send in the body of the request.
+ :kwarg \*\*kwargs: Optional arguments that ``open`` takes.
+ :returns: HTTPResponse
+ """
+
+ return self.open('PUT', url, data=data, **kwargs)
+
+ def patch(self, url, data=None, **kwargs):
+ r"""Sends a PATCH request. Returns :class:`HTTPResponse` object.
+
+ :arg url: URL to request.
+ :kwarg data: (optional) bytes, or file-like object to send in the body of the request.
+ :kwarg \*\*kwargs: Optional arguments that ``open`` takes.
+ :returns: HTTPResponse
+ """
+
+ return self.open('PATCH', url, data=data, **kwargs)
+
+ def delete(self, url, **kwargs):
+ r"""Sends a DELETE request. Returns :class:`HTTPResponse` object.
+
+ :arg url: URL to request
+ :kwargs \*\*kwargs: Optional arguments that ``open`` takes.
+ :returns: HTTPResponse
+ """
+
+ return self.open('DELETE', url, **kwargs)
+
+
+def open_url(url, data=None, headers=None, method=None, use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None,
+ force_basic_auth=False, follow_redirects='urllib2',
+ client_cert=None, client_key=None, cookies=None,
+ use_gssapi=False, unix_socket=None, ca_path=None,
+ unredirected_headers=None):
+ '''
+ Sends a request via HTTP(S) or FTP using urllib2 (Python2) or urllib (Python3)
+
+ Does not require the module environment
+ '''
+ method = method or ('POST' if data else 'GET')
+ return Request().open(method, url, data=data, headers=headers, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth, follow_redirects=follow_redirects,
+ client_cert=client_cert, client_key=client_key, cookies=cookies,
+ use_gssapi=use_gssapi, unix_socket=unix_socket, ca_path=ca_path,
+ unredirected_headers=unredirected_headers)
+
+
+def prepare_multipart(fields):
+ """Takes a mapping, and prepares a multipart/form-data body
+
+ :arg fields: Mapping
+ :returns: tuple of (content_type, body) where ``content_type`` is
+ the ``multipart/form-data`` ``Content-Type`` header including
+ ``boundary`` and ``body`` is the prepared bytestring body
+
+ Payload content from a file will be base64 encoded and will include
+ the appropriate ``Content-Transfer-Encoding`` and ``Content-Type``
+ headers.
+
+ Example:
+ {
+ "file1": {
+ "filename": "/bin/true",
+ "mime_type": "application/octet-stream"
+ },
+ "file2": {
+ "content": "text based file content",
+ "filename": "fake.txt",
+ "mime_type": "text/plain",
+ },
+ "text_form_field": "value"
+ }
+ """
+
+ if not isinstance(fields, Mapping):
+ raise TypeError(
+ 'Mapping is required, cannot be type %s' % fields.__class__.__name__
+ )
+
+ m = email.mime.multipart.MIMEMultipart('form-data')
+ for field, value in sorted(fields.items()):
+ if isinstance(value, string_types):
+ main_type = 'text'
+ sub_type = 'plain'
+ content = value
+ filename = None
+ elif isinstance(value, Mapping):
+ filename = value.get('filename')
+ content = value.get('content')
+ if not any((filename, content)):
+ raise ValueError('at least one of filename or content must be provided')
+
+ mime = value.get('mime_type')
+ if not mime:
+ try:
+ mime = mimetypes.guess_type(filename or '', strict=False)[0] or 'application/octet-stream'
+ except Exception:
+ mime = 'application/octet-stream'
+ main_type, sep, sub_type = mime.partition('/')
+ else:
+ raise TypeError(
+ 'value must be a string, or mapping, cannot be type %s' % value.__class__.__name__
+ )
+
+ if not content and filename:
+ with open(to_bytes(filename, errors='surrogate_or_strict'), 'rb') as f:
+ part = email.mime.application.MIMEApplication(f.read())
+ del part['Content-Type']
+ part.add_header('Content-Type', '%s/%s' % (main_type, sub_type))
+ else:
+ part = email.mime.nonmultipart.MIMENonMultipart(main_type, sub_type)
+ part.set_payload(to_bytes(content))
+
+ part.add_header('Content-Disposition', 'form-data')
+ del part['MIME-Version']
+ part.set_param(
+ 'name',
+ field,
+ header='Content-Disposition'
+ )
+ if filename:
+ part.set_param(
+ 'filename',
+ to_native(os.path.basename(filename)),
+ header='Content-Disposition'
+ )
+
+ m.attach(part)
+
+ if PY3:
+ # Ensure headers are not split over multiple lines
+ # The HTTP policy also uses CRLF by default
+ b_data = m.as_bytes(policy=email.policy.HTTP)
+ else:
+ # Py2
+ # We cannot just call ``as_string`` since it provides no way
+ # to specify ``maxheaderlen``
+ fp = cStringIO() # cStringIO seems to be required here
+ # Ensure headers are not split over multiple lines
+ g = email.generator.Generator(fp, maxheaderlen=0)
+ g.flatten(m)
+ # ``fix_eols`` switches from ``\n`` to ``\r\n``
+ b_data = email.utils.fix_eols(fp.getvalue())
+ del m
+
+ headers, sep, b_content = b_data.partition(b'\r\n\r\n')
+ del b_data
+
+ if PY3:
+ parser = email.parser.BytesHeaderParser().parsebytes
+ else:
+ # Py2
+ parser = email.parser.HeaderParser().parsestr
+
+ return (
+ parser(headers)['content-type'], # Message converts to native strings
+ b_content
+ )
+
+
+#
+# Module-related functions
+#
+
+
+def basic_auth_header(username, password):
+ """Takes a username and password and returns a byte string suitable for
+ using as value of an Authorization header to do basic auth.
+ """
+ return b"Basic %s" % base64.b64encode(to_bytes("%s:%s" % (username, password), errors='surrogate_or_strict'))
+
+
+def url_argument_spec():
+ '''
+ Creates an argument spec that can be used with any module
+ that will be requesting content via urllib/urllib2
+ '''
+ return dict(
+ url=dict(type='str'),
+ force=dict(type='bool', default=False, aliases=['thirsty'],
+ deprecated_aliases=[dict(name='thirsty', version='2.13', collection_name='ansible.builtin')]),
+ http_agent=dict(type='str', default='ansible-httpget'),
+ use_proxy=dict(type='bool', default=True),
+ validate_certs=dict(type='bool', default=True),
+ url_username=dict(type='str'),
+ url_password=dict(type='str', no_log=True),
+ force_basic_auth=dict(type='bool', default=False),
+ client_cert=dict(type='path'),
+ client_key=dict(type='path'),
+ )
+
+
+def fetch_url(module, url, data=None, headers=None, method=None,
+ use_proxy=True, force=False, last_mod_time=None, timeout=10,
+ use_gssapi=False, unix_socket=None, ca_path=None, cookies=None):
+ """Sends a request via HTTP(S) or FTP (needs the module as parameter)
+
+ :arg module: The AnsibleModule (used to get username, password etc. (s.b.).
+ :arg url: The url to use.
+
+ :kwarg data: The data to be sent (in case of POST/PUT).
+ :kwarg headers: A dict with the request headers.
+ :kwarg method: "POST", "PUT", etc.
+ :kwarg boolean use_proxy: Default: True
+ :kwarg boolean force: If True: Do not get a cached copy (Default: False)
+ :kwarg last_mod_time: Default: None
+ :kwarg int timeout: Default: 10
+ :kwarg boolean use_gssapi: Default: False
+ :kwarg unix_socket: (optional) String of file system path to unix socket file to use when establishing
+ connection to the provided url
+ :kwarg ca_path: (optional) String of file system path to CA cert bundle to use
+
+ :returns: A tuple of (**response**, **info**). Use ``response.read()`` to read the data.
+ The **info** contains the 'status' and other meta data. When a HttpError (status >= 400)
+ occurred then ``info['body']`` contains the error response data::
+
+ Example::
+
+ data={...}
+ resp, info = fetch_url(module,
+ "http://example.com",
+ data=module.jsonify(data),
+ headers={'Content-type': 'application/json'},
+ method="POST")
+ status_code = info["status"]
+ body = resp.read()
+ if status_code >= 400 :
+ body = info['body']
+ """
+
+ if not HAS_URLPARSE:
+ module.fail_json(msg='urlparse is not installed')
+
+ # ensure we use proper tempdir
+ old_tempdir = tempfile.tempdir
+ tempfile.tempdir = module.tmpdir
+
+ # Get validate_certs from the module params
+ validate_certs = module.params.get('validate_certs', True)
+
+ username = module.params.get('url_username', '')
+ password = module.params.get('url_password', '')
+ http_agent = module.params.get('http_agent', 'ansible-httpget')
+ force_basic_auth = module.params.get('force_basic_auth', '')
+
+ follow_redirects = module.params.get('follow_redirects', 'urllib2')
+
+ client_cert = module.params.get('client_cert')
+ client_key = module.params.get('client_key')
+
+ if not isinstance(cookies, cookiejar.CookieJar):
+ cookies = cookiejar.LWPCookieJar()
+
+ r = None
+ info = dict(url=url, status=-1)
+ try:
+ r = open_url(url, data=data, headers=headers, method=method,
+ use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout,
+ validate_certs=validate_certs, url_username=username,
+ url_password=password, http_agent=http_agent, force_basic_auth=force_basic_auth,
+ follow_redirects=follow_redirects, client_cert=client_cert,
+ client_key=client_key, cookies=cookies, use_gssapi=use_gssapi,
+ unix_socket=unix_socket, ca_path=ca_path)
+ # Lowercase keys, to conform to py2 behavior, so that py3 and py2 are predictable
+ info.update(dict((k.lower(), v) for k, v in r.info().items()))
+
+ # Don't be lossy, append header values for duplicate headers
+ # In Py2 there is nothing that needs done, py2 does this for us
+ if PY3:
+ temp_headers = {}
+ for name, value in r.headers.items():
+ # The same as above, lower case keys to match py2 behavior, and create more consistent results
+ name = name.lower()
+ if name in temp_headers:
+ temp_headers[name] = ', '.join((temp_headers[name], value))
+ else:
+ temp_headers[name] = value
+ info.update(temp_headers)
+
+ # parse the cookies into a nice dictionary
+ cookie_list = []
+ cookie_dict = dict()
+ # Python sorts cookies in order of most specific (ie. longest) path first. See ``CookieJar._cookie_attrs``
+ # Cookies with the same path are reversed from response order.
+ # This code makes no assumptions about that, and accepts the order given by python
+ for cookie in cookies:
+ cookie_dict[cookie.name] = cookie.value
+ cookie_list.append((cookie.name, cookie.value))
+ info['cookies_string'] = '; '.join('%s=%s' % c for c in cookie_list)
+
+ info['cookies'] = cookie_dict
+ # finally update the result with a message about the fetch
+ info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), url=r.geturl(), status=r.code))
+ except NoSSLError as e:
+ distribution = get_distribution()
+ if distribution is not None and distribution.lower() == 'redhat':
+ module.fail_json(msg='%s. You can also install python-ssl from EPEL' % to_native(e), **info)
+ else:
+ module.fail_json(msg='%s' % to_native(e), **info)
+ except (ConnectionError, ValueError) as e:
+ module.fail_json(msg=to_native(e), **info)
+ except urllib_error.HTTPError as e:
+ try:
+ body = e.read()
+ except AttributeError:
+ body = ''
+
+ # Try to add exception info to the output but don't fail if we can't
+ try:
+ # Lowercase keys, to conform to py2 behavior, so that py3 and py2 are predictable
+ info.update(dict((k.lower(), v) for k, v in e.info().items()))
+ except Exception:
+ pass
+
+ info.update({'msg': to_native(e), 'body': body, 'status': e.code})
+
+ except urllib_error.URLError as e:
+ code = int(getattr(e, 'code', -1))
+ info.update(dict(msg="Request failed: %s" % to_native(e), status=code))
+ except socket.error as e:
+ info.update(dict(msg="Connection failure: %s" % to_native(e), status=-1))
+ except httplib.BadStatusLine as e:
+ info.update(dict(msg="Connection failure: connection was closed before a valid response was received: %s" % to_native(e.line), status=-1))
+ except Exception as e:
+ info.update(dict(msg="An unknown error occurred: %s" % to_native(e), status=-1),
+ exception=traceback.format_exc())
+ finally:
+ tempfile.tempdir = old_tempdir
+
+ return r, info
+
+
+def fetch_file(module, url, data=None, headers=None, method=None,
+ use_proxy=True, force=False, last_mod_time=None, timeout=10):
+ '''Download and save a file via HTTP(S) or FTP (needs the module as parameter).
+ This is basically a wrapper around fetch_url().
+
+ :arg module: The AnsibleModule (used to get username, password etc. (s.b.).
+ :arg url: The url to use.
+
+ :kwarg data: The data to be sent (in case of POST/PUT).
+ :kwarg headers: A dict with the request headers.
+ :kwarg method: "POST", "PUT", etc.
+ :kwarg boolean use_proxy: Default: True
+ :kwarg boolean force: If True: Do not get a cached copy (Default: False)
+ :kwarg last_mod_time: Default: None
+ :kwarg int timeout: Default: 10
+
+ :returns: A string, the path to the downloaded file.
+ '''
+ # download file
+ bufsize = 65536
+ file_name, file_ext = os.path.splitext(str(url.rsplit('/', 1)[1]))
+ fetch_temp_file = tempfile.NamedTemporaryFile(dir=module.tmpdir, prefix=file_name, suffix=file_ext, delete=False)
+ module.add_cleanup_file(fetch_temp_file.name)
+ try:
+ rsp, info = fetch_url(module, url, data, headers, method, use_proxy, force, last_mod_time, timeout)
+ if not rsp:
+ module.fail_json(msg="Failure downloading %s, %s" % (url, info['msg']))
+ data = rsp.read(bufsize)
+ while data:
+ fetch_temp_file.write(data)
+ data = rsp.read(bufsize)
+ fetch_temp_file.close()
+ except Exception as e:
+ module.fail_json(msg="Failure downloading %s, %s" % (url, to_native(e)))
+ return fetch_temp_file.name
diff --git a/lib/ansible/module_utils/yumdnf.py b/lib/ansible/module_utils/yumdnf.py
new file mode 100644
index 00000000..0d715bfc
--- /dev/null
+++ b/lib/ansible/module_utils/yumdnf.py
@@ -0,0 +1,178 @@
+# -*- coding: utf-8 -*-
+#
+# # Copyright: (c) 2012, Red Hat, Inc
+# Written by Seth Vidal <skvidal at fedoraproject.org>
+# Contributing Authors:
+# - Ansible Core Team
+# - Eduard Snesarev (@verm666)
+# - Berend De Schouwer (@berenddeschouwer)
+# - Abhijeet Kasurde (@Akasurde)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import time
+import glob
+import tempfile
+from abc import ABCMeta, abstractmethod
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import with_metaclass
+
+yumdnf_argument_spec = dict(
+ argument_spec=dict(
+ allow_downgrade=dict(type='bool', default=False),
+ autoremove=dict(type='bool', default=False),
+ bugfix=dict(required=False, type='bool', default=False),
+ conf_file=dict(type='str'),
+ disable_excludes=dict(type='str', default=None),
+ disable_gpg_check=dict(type='bool', default=False),
+ disable_plugin=dict(type='list', default=[]),
+ disablerepo=dict(type='list', default=[]),
+ download_only=dict(type='bool', default=False),
+ download_dir=dict(type='str', default=None),
+ enable_plugin=dict(type='list', default=[]),
+ enablerepo=dict(type='list', default=[]),
+ exclude=dict(type='list', default=[]),
+ installroot=dict(type='str', default="/"),
+ install_repoquery=dict(type='bool', default=True),
+ install_weak_deps=dict(type='bool', default=True),
+ list=dict(type='str'),
+ name=dict(type='list', elements='str', aliases=['pkg'], default=[]),
+ releasever=dict(default=None),
+ security=dict(type='bool', default=False),
+ skip_broken=dict(type='bool', default=False),
+ # removed==absent, installed==present, these are accepted as aliases
+ state=dict(type='str', default=None, choices=['absent', 'installed', 'latest', 'present', 'removed']),
+ update_cache=dict(type='bool', default=False, aliases=['expire-cache']),
+ update_only=dict(required=False, default="no", type='bool'),
+ validate_certs=dict(type='bool', default=True),
+ lock_timeout=dict(type='int', default=30),
+ ),
+ required_one_of=[['name', 'list', 'update_cache']],
+ mutually_exclusive=[['name', 'list']],
+ supports_check_mode=True,
+)
+
+
+class YumDnf(with_metaclass(ABCMeta, object)):
+ """
+ Abstract class that handles the population of instance variables that should
+ be identical between both YUM and DNF modules because of the feature parity
+ and shared argument spec
+ """
+
+ def __init__(self, module):
+
+ self.module = module
+
+ self.allow_downgrade = self.module.params['allow_downgrade']
+ self.autoremove = self.module.params['autoremove']
+ self.bugfix = self.module.params['bugfix']
+ self.conf_file = self.module.params['conf_file']
+ self.disable_excludes = self.module.params['disable_excludes']
+ self.disable_gpg_check = self.module.params['disable_gpg_check']
+ self.disable_plugin = self.module.params['disable_plugin']
+ self.disablerepo = self.module.params.get('disablerepo', [])
+ self.download_only = self.module.params['download_only']
+ self.download_dir = self.module.params['download_dir']
+ self.enable_plugin = self.module.params['enable_plugin']
+ self.enablerepo = self.module.params.get('enablerepo', [])
+ self.exclude = self.module.params['exclude']
+ self.installroot = self.module.params['installroot']
+ self.install_repoquery = self.module.params['install_repoquery']
+ self.install_weak_deps = self.module.params['install_weak_deps']
+ self.list = self.module.params['list']
+ self.names = [p.strip() for p in self.module.params['name']]
+ self.releasever = self.module.params['releasever']
+ self.security = self.module.params['security']
+ self.skip_broken = self.module.params['skip_broken']
+ self.state = self.module.params['state']
+ self.update_only = self.module.params['update_only']
+ self.update_cache = self.module.params['update_cache']
+ self.validate_certs = self.module.params['validate_certs']
+ self.lock_timeout = self.module.params['lock_timeout']
+
+ # It's possible someone passed a comma separated string since it used
+ # to be a string type, so we should handle that
+ self.names = self.listify_comma_sep_strings_in_list(self.names)
+ self.disablerepo = self.listify_comma_sep_strings_in_list(self.disablerepo)
+ self.enablerepo = self.listify_comma_sep_strings_in_list(self.enablerepo)
+ self.exclude = self.listify_comma_sep_strings_in_list(self.exclude)
+
+ # Fail if someone passed a space separated string
+ # https://github.com/ansible/ansible/issues/46301
+ for name in self.names:
+ if ' ' in name and not any(spec in name for spec in ['@', '>', '<', '=']):
+ module.fail_json(
+ msg='It appears that a space separated string of packages was passed in '
+ 'as an argument. To operate on several packages, pass a comma separated '
+ 'string of packages or a list of packages.'
+ )
+
+ # Sanity checking for autoremove
+ if self.state is None:
+ if self.autoremove:
+ self.state = "absent"
+ else:
+ self.state = "present"
+
+ if self.autoremove and (self.state != "absent"):
+ self.module.fail_json(
+ msg="Autoremove should be used alone or with state=absent",
+ results=[],
+ )
+
+ # This should really be redefined by both the yum and dnf module but a
+ # default isn't a bad idea
+ self.lockfile = '/var/run/yum.pid'
+
+ @abstractmethod
+ def is_lockfile_pid_valid(self):
+ return
+
+ def _is_lockfile_present(self):
+ return (os.path.isfile(self.lockfile) or glob.glob(self.lockfile)) and self.is_lockfile_pid_valid()
+
+ def wait_for_lock(self):
+ '''Poll until the lock is removed if timeout is a positive number'''
+
+ if not self._is_lockfile_present():
+ return
+
+ if self.lock_timeout > 0:
+ for iteration in range(0, self.lock_timeout):
+ time.sleep(1)
+ if not self._is_lockfile_present():
+ return
+
+ self.module.fail_json(msg='{0} lockfile is held by another process'.format(self.pkg_mgr_name))
+
+ def listify_comma_sep_strings_in_list(self, some_list):
+ """
+ method to accept a list of strings as the parameter, find any strings
+ in that list that are comma separated, remove them from the list and add
+ their comma separated elements to the original list
+ """
+ new_list = []
+ remove_from_original_list = []
+ for element in some_list:
+ if ',' in element:
+ remove_from_original_list.append(element)
+ new_list.extend([e.strip() for e in element.split(',')])
+
+ for element in remove_from_original_list:
+ some_list.remove(element)
+
+ some_list.extend(new_list)
+
+ if some_list == [""]:
+ return []
+
+ return some_list
+
+ @abstractmethod
+ def run(self):
+ raise NotImplementedError
diff --git a/lib/ansible/modules/__init__.py b/lib/ansible/modules/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/lib/ansible/modules/__init__.py
diff --git a/lib/ansible/modules/add_host.py b/lib/ansible/modules/add_host.py
new file mode 100644
index 00000000..3d8f8f74
--- /dev/null
+++ b/lib/ansible/modules/add_host.py
@@ -0,0 +1,87 @@
+# -*- mode: python -*-
+
+# Copyright: (c) 2012, Seth Vidal (@skvidal)
+# Copyright: Ansible Team
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: add_host
+short_description: Add a host (and alternatively a group) to the ansible-playbook in-memory inventory
+description:
+- Use variables to create new hosts and groups in inventory for use in later plays of the same playbook.
+- Takes variables so you can define the new hosts more fully.
+- This module is also supported for Windows targets.
+version_added: "0.9"
+options:
+ name:
+ description:
+ - The hostname/ip of the host to add to the inventory, can include a colon and a port number.
+ type: str
+ required: true
+ aliases: [ host, hostname ]
+ groups:
+ description:
+ - The groups to add the hostname to.
+ type: list
+ aliases: [ group, groupname ]
+notes:
+- This module bypasses the play host loop and only runs once for all the hosts in the play, if you need it
+ to iterate use a C(loop) construct. If you need to dynamically add all hosts targeted by a playbook for
+ later use, the C(group_by) module is potentially a better choice.
+- The alias C(host) of the parameter C(name) is only available on Ansible 2.4 and newer.
+- Since Ansible 2.4, the C(inventory_dir) variable is now set to C(None) instead of the 'global inventory source',
+ because you can now have multiple sources. An example was added that shows how to partially restore the previous behaviour.
+- Windows targets are supported by this module.
+seealso:
+- module: ansible.builtin.group_by
+author:
+- Ansible Core Team
+- Seth Vidal (@skvidal)
+'''
+
+EXAMPLES = r'''
+- name: Add host to group 'just_created' with variable foo=42
+ add_host:
+ name: '{{ ip_from_ec2 }}'
+ groups: just_created
+ foo: 42
+
+- name: Add host to multiple groups
+ add_host:
+ hostname: '{{ new_ip }}'
+ groups:
+ - group1
+ - group2
+
+- name: Add a host with a non-standard port local to your machines
+ add_host:
+ name: '{{ new_ip }}:{{ new_port }}'
+
+- name: Add a host alias that we reach through a tunnel (Ansible 1.9 and older)
+ add_host:
+ hostname: '{{ new_ip }}'
+ ansible_ssh_host: '{{ inventory_hostname }}'
+ ansible_ssh_port: '{{ new_port }}'
+
+- name: Add a host alias that we reach through a tunnel (Ansible 2.0 and newer)
+ add_host:
+ hostname: '{{ new_ip }}'
+ ansible_host: '{{ inventory_hostname }}'
+ ansible_port: '{{ new_port }}'
+
+- name: Ensure inventory vars are set to the same value as the inventory_hostname has (close to pre Ansible 2.4 behaviour)
+ add_host:
+ hostname: charlie
+ inventory_dir: '{{ inventory_dir }}'
+
+- name: Add all hosts running this playbook to the done group
+ add_host:
+ name: '{{ item }}'
+ groups: done
+ loop: "{{ ansible_play_hosts }}"
+'''
diff --git a/lib/ansible/modules/apt.py b/lib/ansible/modules/apt.py
new file mode 100644
index 00000000..7e9fed6e
--- /dev/null
+++ b/lib/ansible/modules/apt.py
@@ -0,0 +1,1229 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Flowroute LLC
+# Written by Matthew Williams <matthew@flowroute.com>
+# Based on yum module written by Seth Vidal <skvidal at fedoraproject.org>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: apt
+short_description: Manages apt-packages
+description:
+ - Manages I(apt) packages (such as for Debian/Ubuntu).
+version_added: "0.0.2"
+options:
+ name:
+ description:
+ - A list of package names, like C(foo), or package specifier with version, like C(foo=1.0).
+ Name wildcards (fnmatch) like C(apt*) and version wildcards like C(foo=1.0*) are also supported.
+ aliases: [ package, pkg ]
+ type: list
+ elements: str
+ state:
+ description:
+ - Indicates the desired package state. C(latest) ensures that the latest version is installed. C(build-dep) ensures the package build dependencies
+ are installed. C(fixed) attempt to correct a system with broken dependencies in place.
+ default: present
+ choices: [ absent, build-dep, latest, present, fixed ]
+ update_cache:
+ description:
+ - Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as a separate step.
+ - Default is not to update the cache.
+ type: bool
+ update_cache_retries:
+ description:
+ - Amount of retries if the cache update fails. Also see I(update_cache_retry_max_delay).
+ type: int
+ default: 5
+ version_added: '2.10'
+ update_cache_retry_max_delay:
+ description:
+ - Use an exponential backoff delay for each retry (see I(update_cache_retries)) up to this max delay in seconds.
+ type: int
+ default: 12
+ version_added: '2.10'
+ cache_valid_time:
+ description:
+ - Update the apt cache if its older than the I(cache_valid_time). This option is set in seconds.
+ - As of Ansible 2.4, if explicitly set, this sets I(update_cache=yes).
+ default: 0
+ purge:
+ description:
+ - Will force purging of configuration files if the module state is set to I(absent).
+ type: bool
+ default: 'no'
+ default_release:
+ description:
+ - Corresponds to the C(-t) option for I(apt) and sets pin priorities
+ install_recommends:
+ description:
+ - Corresponds to the C(--no-install-recommends) option for I(apt). C(yes) installs recommended packages. C(no) does not install
+ recommended packages. By default, Ansible will use the same defaults as the operating system. Suggested packages are never installed.
+ aliases: ['install-recommends']
+ type: bool
+ force:
+ description:
+ - 'Corresponds to the C(--force-yes) to I(apt-get) and implies C(allow_unauthenticated: yes)'
+ - "This option will disable checking both the packages' signatures and the certificates of the
+ web servers they are downloaded from."
+ - 'This option *is not* the equivalent of passing the C(-f) flag to I(apt-get) on the command line'
+ - '**This is a destructive operation with the potential to destroy your system, and it should almost never be used.**
+ Please also see C(man apt-get) for more information.'
+ type: bool
+ default: 'no'
+ allow_unauthenticated:
+ description:
+ - Ignore if packages cannot be authenticated. This is useful for bootstrapping environments that manage their own apt-key setup.
+ - 'C(allow_unauthenticated) is only supported with state: I(install)/I(present)'
+ type: bool
+ default: 'no'
+ version_added: "2.1"
+ upgrade:
+ description:
+ - If yes or safe, performs an aptitude safe-upgrade.
+ - If full, performs an aptitude full-upgrade.
+ - If dist, performs an apt-get dist-upgrade.
+ - 'Note: This does not upgrade a specific package, use state=latest for that.'
+ - 'Note: Since 2.4, apt-get is used as a fall-back if aptitude is not present.'
+ version_added: "1.1"
+ choices: [ dist, full, 'no', safe, 'yes' ]
+ default: 'no'
+ dpkg_options:
+ description:
+ - Add dpkg options to apt command. Defaults to '-o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold"'
+ - Options should be supplied as comma separated list
+ default: force-confdef,force-confold
+ deb:
+ description:
+ - Path to a .deb package on the remote machine.
+ - If :// in the path, ansible will attempt to download deb before installing. (Version added 2.1)
+ - Requires the C(xz-utils) package to extract the control file of the deb package to install.
+ required: false
+ version_added: "1.6"
+ autoremove:
+ description:
+ - If C(yes), remove unused dependency packages for all module states except I(build-dep). It can also be used as the only option.
+ - Previous to version 2.4, autoclean was also an alias for autoremove, now it is its own separate command. See documentation for further information.
+ type: bool
+ default: 'no'
+ version_added: "2.1"
+ autoclean:
+ description:
+ - If C(yes), cleans the local repository of retrieved package files that can no longer be downloaded.
+ type: bool
+ default: 'no'
+ version_added: "2.4"
+ policy_rc_d:
+ description:
+ - Force the exit code of /usr/sbin/policy-rc.d.
+ - For example, if I(policy_rc_d=101) the installed package will not trigger a service start.
+ - If /usr/sbin/policy-rc.d already exist, it is backed up and restored after the package installation.
+ - If C(null), the /usr/sbin/policy-rc.d isn't created/changed.
+ type: int
+ default: null
+ version_added: "2.8"
+ only_upgrade:
+ description:
+ - Only upgrade a package if it is already installed.
+ type: bool
+ default: 'no'
+ version_added: "2.1"
+ force_apt_get:
+ description:
+ - Force usage of apt-get instead of aptitude
+ type: bool
+ default: 'no'
+ version_added: "2.4"
+requirements:
+ - python-apt (python 2)
+ - python3-apt (python 3)
+ - aptitude (before 2.4)
+author: "Matthew Williams (@mgwilliams)"
+notes:
+ - Three of the upgrade modes (C(full), C(safe) and its alias C(yes)) required C(aptitude) up to 2.3, since 2.4 C(apt-get) is used as a fall-back.
+ - In most cases, packages installed with apt will start newly installed services by default. Most distributions have mechanisms to avoid this.
+ For example when installing Postgresql-9.5 in Debian 9, creating an excutable shell script (/usr/sbin/policy-rc.d) that throws
+ a return code of 101 will stop Postgresql 9.5 starting up after install. Remove the file or remove its execute permission afterwards.
+ - The apt-get commandline supports implicit regex matches here but we do not because it can let typos through easier
+ (If you typo C(foo) as C(fo) apt-get would install packages that have "fo" in their name with a warning and a prompt for the user.
+ Since we don't have warnings and prompts before installing we disallow this.Use an explicit fnmatch pattern if you want wildcarding)
+ - When used with a `loop:` each package will be processed individually, it is much more efficient to pass the list directly to the `name` option.
+'''
+
+EXAMPLES = '''
+- name: Install apache httpd (state=present is optional)
+ apt:
+ name: apache2
+ state: present
+
+- name: Update repositories cache and install "foo" package
+ apt:
+ name: foo
+ update_cache: yes
+
+- name: Remove "foo" package
+ apt:
+ name: foo
+ state: absent
+
+- name: Install the package "foo"
+ apt:
+ name: foo
+
+- name: Install a list of packages
+ apt:
+ pkg:
+ - foo
+ - foo-tools
+
+- name: Install the version '1.00' of package "foo"
+ apt:
+ name: foo=1.00
+
+- name: Update the repository cache and update package "nginx" to latest version using default release squeeze-backport
+ apt:
+ name: nginx
+ state: latest
+ default_release: squeeze-backports
+ update_cache: yes
+
+- name: Install latest version of "openjdk-6-jdk" ignoring "install-recommends"
+ apt:
+ name: openjdk-6-jdk
+ state: latest
+ install_recommends: no
+
+- name: Update all packages to their latest version
+ apt:
+ name: "*"
+ state: latest
+
+- name: Upgrade the OS (apt-get dist-upgrade)
+ apt:
+ upgrade: dist
+
+- name: Run the equivalent of "apt-get update" as a separate step
+ apt:
+ update_cache: yes
+
+- name: Only run "update_cache=yes" if the last one is more than 3600 seconds ago
+ apt:
+ update_cache: yes
+ cache_valid_time: 3600
+
+- name: Pass options to dpkg on run
+ apt:
+ upgrade: dist
+ update_cache: yes
+ dpkg_options: 'force-confold,force-confdef'
+
+- name: Install a .deb package
+ apt:
+ deb: /tmp/mypackage.deb
+
+- name: Install the build dependencies for package "foo"
+ apt:
+ pkg: foo
+ state: build-dep
+
+- name: Install a .deb package from the internet
+ apt:
+ deb: https://example.com/python-ppq_0.1-1_all.deb
+
+- name: Remove useless packages from the cache
+ apt:
+ autoclean: yes
+
+- name: Remove dependencies that are no longer required
+ apt:
+ autoremove: yes
+
+'''
+
+RETURN = '''
+cache_updated:
+ description: if the cache was updated or not
+ returned: success, in some cases
+ type: bool
+ sample: True
+cache_update_time:
+ description: time of the last cache update (0 if unknown)
+ returned: success, in some cases
+ type: int
+ sample: 1425828348000
+stdout:
+ description: output from apt
+ returned: success, when needed
+ type: str
+ sample: "Reading package lists...\nBuilding dependency tree...\nReading state information...\nThe following extra packages will be installed:\n apache2-bin ..."
+stderr:
+ description: error output from apt
+ returned: success, when needed
+ type: str
+ sample: "AH00558: apache2: Could not reliably determine the server's fully qualified domain name, using 127.0.1.1. Set the 'ServerName' directive globally to ..."
+''' # NOQA
+
+# added to stave off future warnings about apt api
+import warnings
+warnings.filterwarnings('ignore', "apt API not stable yet", FutureWarning)
+
+import datetime
+import fnmatch
+import itertools
+import os
+import shutil
+import re
+import sys
+import tempfile
+import time
+import random
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.module_utils.urls import fetch_file
+
+# APT related constants
+APT_ENV_VARS = dict(
+ DEBIAN_FRONTEND='noninteractive',
+ DEBIAN_PRIORITY='critical',
+ # We screenscrape apt-get and aptitude output for information so we need
+ # to make sure we use the C locale when running commands
+ LANG='C',
+ LC_ALL='C',
+ LC_MESSAGES='C',
+ LC_CTYPE='C',
+)
+
+DPKG_OPTIONS = 'force-confdef,force-confold'
+APT_GET_ZERO = "\n0 upgraded, 0 newly installed"
+APTITUDE_ZERO = "\n0 packages upgraded, 0 newly installed"
+APT_LISTS_PATH = "/var/lib/apt/lists"
+APT_UPDATE_SUCCESS_STAMP_PATH = "/var/lib/apt/periodic/update-success-stamp"
+APT_MARK_INVALID_OP = 'Invalid operation'
+APT_MARK_INVALID_OP_DEB6 = 'Usage: apt-mark [options] {markauto|unmarkauto} packages'
+
+CLEAN_OP_CHANGED_STR = dict(
+ autoremove='The following packages will be REMOVED',
+ # "Del python3-q 2.4-1 [24 kB]"
+ autoclean='Del ',
+)
+
+HAS_PYTHON_APT = True
+try:
+ import apt
+ import apt.debfile
+ import apt_pkg
+except ImportError:
+ HAS_PYTHON_APT = False
+
+if sys.version_info[0] < 3:
+ PYTHON_APT = 'python-apt'
+else:
+ PYTHON_APT = 'python3-apt'
+
+
+class PolicyRcD(object):
+ """
+ This class is a context manager for the /usr/sbin/policy-rc.d file.
+ It allow the user to prevent dpkg to start the corresponding service when installing
+ a package.
+ https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt
+ """
+
+ def __init__(self, module):
+ # we need the module for later use (eg. fail_json)
+ self.m = module
+
+ # if policy_rc_d is null then we don't need to modify policy-rc.d
+ if self.m.params['policy_rc_d'] is None:
+ return
+
+ # if the /usr/sbin/policy-rc.d already exist
+ # we will back it up during package installation
+ # then restore it
+ if os.path.exists('/usr/sbin/policy-rc.d'):
+ self.backup_dir = tempfile.mkdtemp(prefix="ansible")
+ else:
+ self.backup_dir = None
+
+ def __enter__(self):
+ """
+ This method will be call when we enter the context, before we call `apt-get …`
+ """
+
+ # if policy_rc_d is null then we don't need to modify policy-rc.d
+ if self.m.params['policy_rc_d'] is None:
+ return
+
+ # if the /usr/sbin/policy-rc.d already exist we back it up
+ if self.backup_dir:
+ try:
+ shutil.move('/usr/sbin/policy-rc.d', self.backup_dir)
+ except Exception:
+ self.m.fail_json(msg="Fail to move /usr/sbin/policy-rc.d to %s" % self.backup_dir)
+
+ # we write /usr/sbin/policy-rc.d so it always exit with code policy_rc_d
+ try:
+ with open('/usr/sbin/policy-rc.d', 'w') as policy_rc_d:
+ policy_rc_d.write('#!/bin/sh\nexit %d\n' % self.m.params['policy_rc_d'])
+
+ os.chmod('/usr/sbin/policy-rc.d', 0o0755)
+ except Exception:
+ self.m.fail_json(msg="Failed to create or chmod /usr/sbin/policy-rc.d")
+
+ def __exit__(self, type, value, traceback):
+ """
+ This method will be call when we enter the context, before we call `apt-get …`
+ """
+
+ # if policy_rc_d is null then we don't need to modify policy-rc.d
+ if self.m.params['policy_rc_d'] is None:
+ return
+
+ if self.backup_dir:
+ # if /usr/sbin/policy-rc.d already exists before the call to __enter__
+ # we restore it (from the backup done in __enter__)
+ try:
+ shutil.move(os.path.join(self.backup_dir, 'policy-rc.d'),
+ '/usr/sbin/policy-rc.d')
+ os.rmdir(self.tmpdir_name)
+ except Exception:
+ self.m.fail_json(msg="Fail to move back %s to /usr/sbin/policy-rc.d"
+ % os.path.join(self.backup_dir, 'policy-rc.d'))
+ else:
+ # if they wheren't any /usr/sbin/policy-rc.d file before the call to __enter__
+ # we just remove the file
+ try:
+ os.remove('/usr/sbin/policy-rc.d')
+ except Exception:
+ self.m.fail_json(msg="Fail to remove /usr/sbin/policy-rc.d (after package manipulation)")
+
+
+def package_split(pkgspec):
+ parts = pkgspec.split('=', 1)
+ version = None
+ if len(parts) > 1:
+ version = parts[1]
+ return parts[0], version
+
+
+def package_versions(pkgname, pkg, pkg_cache):
+ try:
+ versions = set(p.version for p in pkg.versions)
+ except AttributeError:
+ # assume older version of python-apt is installed
+ # apt.package.Package#versions require python-apt >= 0.7.9.
+ pkg_cache_list = (p for p in pkg_cache.Packages if p.Name == pkgname)
+ pkg_versions = (p.VersionList for p in pkg_cache_list)
+ versions = set(p.VerStr for p in itertools.chain(*pkg_versions))
+
+ return versions
+
+
+def package_version_compare(version, other_version):
+ try:
+ return apt_pkg.version_compare(version, other_version)
+ except AttributeError:
+ return apt_pkg.VersionCompare(version, other_version)
+
+
+def package_status(m, pkgname, version, cache, state):
+ try:
+ # get the package from the cache, as well as the
+ # low-level apt_pkg.Package object which contains
+ # state fields not directly accessible from the
+ # higher-level apt.package.Package object.
+ pkg = cache[pkgname]
+ ll_pkg = cache._cache[pkgname] # the low-level package object
+ except KeyError:
+ if state == 'install':
+ try:
+ provided_packages = cache.get_providing_packages(pkgname)
+ if provided_packages:
+ is_installed = False
+ upgradable = False
+ version_ok = False
+ # when virtual package providing only one package, look up status of target package
+ if cache.is_virtual_package(pkgname) and len(provided_packages) == 1:
+ package = provided_packages[0]
+ installed, version_ok, upgradable, has_files = package_status(m, package.name, version, cache, state='install')
+ if installed:
+ is_installed = True
+ return is_installed, version_ok, upgradable, False
+ m.fail_json(msg="No package matching '%s' is available" % pkgname)
+ except AttributeError:
+ # python-apt version too old to detect virtual packages
+ # mark as upgradable and let apt-get install deal with it
+ return False, False, True, False
+ else:
+ return False, False, False, False
+ try:
+ has_files = len(pkg.installed_files) > 0
+ except UnicodeDecodeError:
+ has_files = True
+ except AttributeError:
+ has_files = False # older python-apt cannot be used to determine non-purged
+
+ try:
+ package_is_installed = ll_pkg.current_state == apt_pkg.CURSTATE_INSTALLED
+ except AttributeError: # python-apt 0.7.X has very weak low-level object
+ try:
+ # might not be necessary as python-apt post-0.7.X should have current_state property
+ package_is_installed = pkg.is_installed
+ except AttributeError:
+ # assume older version of python-apt is installed
+ package_is_installed = pkg.isInstalled
+
+ version_is_installed = package_is_installed
+ if version:
+ versions = package_versions(pkgname, pkg, cache._cache)
+ avail_upgrades = fnmatch.filter(versions, version)
+
+ if package_is_installed:
+ try:
+ installed_version = pkg.installed.version
+ except AttributeError:
+ installed_version = pkg.installedVersion
+
+ # check if the version is matched as well
+ version_is_installed = fnmatch.fnmatch(installed_version, version)
+
+ # Only claim the package is upgradable if a candidate matches the version
+ package_is_upgradable = False
+ for candidate in avail_upgrades:
+ if package_version_compare(candidate, installed_version) > 0:
+ package_is_upgradable = True
+ break
+ else:
+ package_is_upgradable = bool(avail_upgrades)
+ else:
+ try:
+ package_is_upgradable = pkg.is_upgradable
+ except AttributeError:
+ # assume older version of python-apt is installed
+ package_is_upgradable = pkg.isUpgradable
+
+ return package_is_installed, version_is_installed, package_is_upgradable, has_files
+
+
+def expand_dpkg_options(dpkg_options_compressed):
+ options_list = dpkg_options_compressed.split(',')
+ dpkg_options = ""
+ for dpkg_option in options_list:
+ dpkg_options = '%s -o "Dpkg::Options::=--%s"' \
+ % (dpkg_options, dpkg_option)
+ return dpkg_options.strip()
+
+
+def expand_pkgspec_from_fnmatches(m, pkgspec, cache):
+ # Note: apt-get does implicit regex matching when an exact package name
+ # match is not found. Something like this:
+ # matches = [pkg.name for pkg in cache if re.match(pkgspec, pkg.name)]
+ # (Should also deal with the ':' for multiarch like the fnmatch code below)
+ #
+ # We have decided not to do similar implicit regex matching but might take
+ # a PR to add some sort of explicit regex matching:
+ # https://github.com/ansible/ansible-modules-core/issues/1258
+ new_pkgspec = []
+ if pkgspec:
+ for pkgspec_pattern in pkgspec:
+ pkgname_pattern, version = package_split(pkgspec_pattern)
+
+ # note that none of these chars is allowed in a (debian) pkgname
+ if frozenset('*?[]!').intersection(pkgname_pattern):
+ # handle multiarch pkgnames, the idea is that "apt*" should
+ # only select native packages. But "apt*:i386" should still work
+ if ":" not in pkgname_pattern:
+ # Filter the multiarch packages from the cache only once
+ try:
+ pkg_name_cache = _non_multiarch
+ except NameError:
+ pkg_name_cache = _non_multiarch = [pkg.name for pkg in cache if ':' not in pkg.name] # noqa: F841
+ else:
+ # Create a cache of pkg_names including multiarch only once
+ try:
+ pkg_name_cache = _all_pkg_names
+ except NameError:
+ pkg_name_cache = _all_pkg_names = [pkg.name for pkg in cache] # noqa: F841
+
+ matches = fnmatch.filter(pkg_name_cache, pkgname_pattern)
+
+ if not matches:
+ m.fail_json(msg="No package(s) matching '%s' available" % str(pkgname_pattern))
+ else:
+ new_pkgspec.extend(matches)
+ else:
+ # No wildcards in name
+ new_pkgspec.append(pkgspec_pattern)
+ return new_pkgspec
+
+
+def parse_diff(output):
+ diff = to_native(output).splitlines()
+ try:
+ # check for start marker from aptitude
+ diff_start = diff.index('Resolving dependencies...')
+ except ValueError:
+ try:
+ # check for start marker from apt-get
+ diff_start = diff.index('Reading state information...')
+ except ValueError:
+ # show everything
+ diff_start = -1
+ try:
+ # check for end marker line from both apt-get and aptitude
+ diff_end = next(i for i, item in enumerate(diff) if re.match('[0-9]+ (packages )?upgraded', item))
+ except StopIteration:
+ diff_end = len(diff)
+ diff_start += 1
+ diff_end += 1
+ return {'prepared': '\n'.join(diff[diff_start:diff_end])}
+
+
+def mark_installed_manually(m, packages):
+ if not packages:
+ return
+
+ apt_mark_cmd_path = m.get_bin_path("apt-mark")
+
+ # https://github.com/ansible/ansible/issues/40531
+ if apt_mark_cmd_path is None:
+ m.warn("Could not find apt-mark binary, not marking package(s) as manually installed.")
+ return
+
+ cmd = "%s manual %s" % (apt_mark_cmd_path, ' '.join(packages))
+ rc, out, err = m.run_command(cmd)
+
+ if APT_MARK_INVALID_OP in err or APT_MARK_INVALID_OP_DEB6 in err:
+ cmd = "%s unmarkauto %s" % (apt_mark_cmd_path, ' '.join(packages))
+ rc, out, err = m.run_command(cmd)
+
+ if rc != 0:
+ m.fail_json(msg="'%s' failed: %s" % (cmd, err), stdout=out, stderr=err, rc=rc)
+
+
+def install(m, pkgspec, cache, upgrade=False, default_release=None,
+ install_recommends=None, force=False,
+ dpkg_options=expand_dpkg_options(DPKG_OPTIONS),
+ build_dep=False, fixed=False, autoremove=False, only_upgrade=False,
+ allow_unauthenticated=False):
+ pkg_list = []
+ packages = ""
+ pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache)
+ package_names = []
+ for package in pkgspec:
+ if build_dep:
+ # Let apt decide what to install
+ pkg_list.append("'%s'" % package)
+ continue
+
+ name, version = package_split(package)
+ package_names.append(name)
+ installed, installed_version, upgradable, has_files = package_status(m, name, version, cache, state='install')
+ if (not installed and not only_upgrade) or (installed and not installed_version) or (upgrade and upgradable):
+ pkg_list.append("'%s'" % package)
+ if installed_version and upgradable and version:
+ # This happens when the package is installed, a newer version is
+ # available, and the version is a wildcard that matches both
+ #
+ # We do not apply the upgrade flag because we cannot specify both
+ # a version and state=latest. (This behaviour mirrors how apt
+ # treats a version with wildcard in the package)
+ pkg_list.append("'%s'" % package)
+ packages = ' '.join(pkg_list)
+
+ if packages:
+ if force:
+ force_yes = '--force-yes'
+ else:
+ force_yes = ''
+
+ if m.check_mode:
+ check_arg = '--simulate'
+ else:
+ check_arg = ''
+
+ if autoremove:
+ autoremove = '--auto-remove'
+ else:
+ autoremove = ''
+
+ if only_upgrade:
+ only_upgrade = '--only-upgrade'
+ else:
+ only_upgrade = ''
+
+ if fixed:
+ fixed = '--fix-broken'
+ else:
+ fixed = ''
+
+ if build_dep:
+ cmd = "%s -y %s %s %s %s %s build-dep %s" % (APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, check_arg, packages)
+ else:
+ cmd = "%s -y %s %s %s %s %s %s install %s" % (APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, autoremove, check_arg, packages)
+
+ if default_release:
+ cmd += " -t '%s'" % (default_release,)
+
+ if install_recommends is False:
+ cmd += " -o APT::Install-Recommends=no"
+ elif install_recommends is True:
+ cmd += " -o APT::Install-Recommends=yes"
+ # install_recommends is None uses the OS default
+
+ if allow_unauthenticated:
+ cmd += " --allow-unauthenticated"
+
+ with PolicyRcD(m):
+ rc, out, err = m.run_command(cmd)
+
+ if m._diff:
+ diff = parse_diff(out)
+ else:
+ diff = {}
+ status = True
+
+ changed = True
+ if build_dep:
+ changed = APT_GET_ZERO not in out
+
+ data = dict(changed=changed, stdout=out, stderr=err, diff=diff)
+ if rc:
+ status = False
+ data = dict(msg="'%s' failed: %s" % (cmd, err), stdout=out, stderr=err, rc=rc)
+ else:
+ status = True
+ data = dict(changed=False)
+
+ if not build_dep:
+ mark_installed_manually(m, package_names)
+
+ return (status, data)
+
+
+def get_field_of_deb(m, deb_file, field="Version"):
+ cmd_dpkg = m.get_bin_path("dpkg", True)
+ cmd = cmd_dpkg + " --field %s %s" % (deb_file, field)
+ rc, stdout, stderr = m.run_command(cmd)
+ if rc != 0:
+ m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr)
+ return to_native(stdout).strip('\n')
+
+
+def install_deb(m, debs, cache, force, install_recommends, allow_unauthenticated, dpkg_options):
+ changed = False
+ deps_to_install = []
+ pkgs_to_install = []
+ for deb_file in debs.split(','):
+ try:
+ pkg = apt.debfile.DebPackage(deb_file)
+ pkg_name = get_field_of_deb(m, deb_file, "Package")
+ pkg_version = get_field_of_deb(m, deb_file, "Version")
+ if len(apt_pkg.get_architectures()) > 1:
+ pkg_arch = get_field_of_deb(m, deb_file, "Architecture")
+ pkg_key = "%s:%s" % (pkg_name, pkg_arch)
+ else:
+ pkg_key = pkg_name
+ try:
+ installed_pkg = apt.Cache()[pkg_key]
+ installed_version = installed_pkg.installed.version
+ if package_version_compare(pkg_version, installed_version) == 0:
+ # Does not need to down-/upgrade, move on to next package
+ continue
+ except Exception:
+ # Must not be installed, continue with installation
+ pass
+ # Check if package is installable
+ if not pkg.check() and not force:
+ m.fail_json(msg=pkg._failure_string)
+
+ # add any missing deps to the list of deps we need
+ # to install so they're all done in one shot
+ deps_to_install.extend(pkg.missing_deps)
+
+ except Exception as e:
+ m.fail_json(msg="Unable to install package: %s" % to_native(e))
+
+ # and add this deb to the list of packages to install
+ pkgs_to_install.append(deb_file)
+
+ # install the deps through apt
+ retvals = {}
+ if deps_to_install:
+ (success, retvals) = install(m=m, pkgspec=deps_to_install, cache=cache,
+ install_recommends=install_recommends,
+ allow_unauthenticated=allow_unauthenticated,
+ dpkg_options=expand_dpkg_options(dpkg_options))
+ if not success:
+ m.fail_json(**retvals)
+ changed = retvals.get('changed', False)
+
+ if pkgs_to_install:
+ options = ' '.join(["--%s" % x for x in dpkg_options.split(",")])
+ if m.check_mode:
+ options += " --simulate"
+ if force:
+ options += " --force-all"
+
+ cmd = "dpkg %s -i %s" % (options, " ".join(pkgs_to_install))
+
+ with PolicyRcD(m):
+ rc, out, err = m.run_command(cmd)
+
+ if "stdout" in retvals:
+ stdout = retvals["stdout"] + out
+ else:
+ stdout = out
+ if "diff" in retvals:
+ diff = retvals["diff"]
+ if 'prepared' in diff:
+ diff['prepared'] += '\n\n' + out
+ else:
+ diff = parse_diff(out)
+ if "stderr" in retvals:
+ stderr = retvals["stderr"] + err
+ else:
+ stderr = err
+
+ if rc == 0:
+ m.exit_json(changed=True, stdout=stdout, stderr=stderr, diff=diff)
+ else:
+ m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr)
+ else:
+ m.exit_json(changed=changed, stdout=retvals.get('stdout', ''), stderr=retvals.get('stderr', ''), diff=retvals.get('diff', ''))
+
+
+def remove(m, pkgspec, cache, purge=False, force=False,
+ dpkg_options=expand_dpkg_options(DPKG_OPTIONS), autoremove=False):
+ pkg_list = []
+ pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache)
+ for package in pkgspec:
+ name, version = package_split(package)
+ installed, installed_version, upgradable, has_files = package_status(m, name, version, cache, state='remove')
+ if installed_version or (has_files and purge):
+ pkg_list.append("'%s'" % package)
+ packages = ' '.join(pkg_list)
+
+ if not packages:
+ m.exit_json(changed=False)
+ else:
+ if force:
+ force_yes = '--force-yes'
+ else:
+ force_yes = ''
+
+ if purge:
+ purge = '--purge'
+ else:
+ purge = ''
+
+ if autoremove:
+ autoremove = '--auto-remove'
+ else:
+ autoremove = ''
+
+ if m.check_mode:
+ check_arg = '--simulate'
+ else:
+ check_arg = ''
+
+ cmd = "%s -q -y %s %s %s %s %s remove %s" % (APT_GET_CMD, dpkg_options, purge, force_yes, autoremove, check_arg, packages)
+
+ with PolicyRcD(m):
+ rc, out, err = m.run_command(cmd)
+
+ if m._diff:
+ diff = parse_diff(out)
+ else:
+ diff = {}
+ if rc:
+ m.fail_json(msg="'apt-get remove %s' failed: %s" % (packages, err), stdout=out, stderr=err, rc=rc)
+ m.exit_json(changed=True, stdout=out, stderr=err, diff=diff)
+
+
+def cleanup(m, purge=False, force=False, operation=None,
+ dpkg_options=expand_dpkg_options(DPKG_OPTIONS)):
+
+ if operation not in frozenset(['autoremove', 'autoclean']):
+ raise AssertionError('Expected "autoremove" or "autoclean" cleanup operation, got %s' % operation)
+
+ if force:
+ force_yes = '--force-yes'
+ else:
+ force_yes = ''
+
+ if purge:
+ purge = '--purge'
+ else:
+ purge = ''
+
+ if m.check_mode:
+ check_arg = '--simulate'
+ else:
+ check_arg = ''
+
+ cmd = "%s -y %s %s %s %s %s" % (APT_GET_CMD, dpkg_options, purge, force_yes, operation, check_arg)
+
+ with PolicyRcD(m):
+ rc, out, err = m.run_command(cmd)
+
+ if m._diff:
+ diff = parse_diff(out)
+ else:
+ diff = {}
+ if rc:
+ m.fail_json(msg="'apt-get %s' failed: %s" % (operation, err), stdout=out, stderr=err, rc=rc)
+
+ changed = CLEAN_OP_CHANGED_STR[operation] in out
+
+ m.exit_json(changed=changed, stdout=out, stderr=err, diff=diff)
+
+
+def upgrade(m, mode="yes", force=False, default_release=None,
+ use_apt_get=False,
+ dpkg_options=expand_dpkg_options(DPKG_OPTIONS), autoremove=False,
+ allow_unauthenticated=False,
+ ):
+
+ if autoremove:
+ autoremove = '--auto-remove'
+ else:
+ autoremove = ''
+
+ if m.check_mode:
+ check_arg = '--simulate'
+ else:
+ check_arg = ''
+
+ apt_cmd = None
+ prompt_regex = None
+ if mode == "dist" or (mode == "full" and use_apt_get):
+ # apt-get dist-upgrade
+ apt_cmd = APT_GET_CMD
+ upgrade_command = "dist-upgrade %s" % (autoremove)
+ elif mode == "full" and not use_apt_get:
+ # aptitude full-upgrade
+ apt_cmd = APTITUDE_CMD
+ upgrade_command = "full-upgrade"
+ else:
+ if use_apt_get:
+ apt_cmd = APT_GET_CMD
+ upgrade_command = "upgrade --with-new-pkgs %s" % (autoremove)
+ else:
+ # aptitude safe-upgrade # mode=yes # default
+ apt_cmd = APTITUDE_CMD
+ upgrade_command = "safe-upgrade"
+ prompt_regex = r"(^Do you want to ignore this warning and proceed anyway\?|^\*\*\*.*\[default=.*\])"
+
+ if force:
+ if apt_cmd == APT_GET_CMD:
+ force_yes = '--force-yes'
+ else:
+ force_yes = '--assume-yes --allow-untrusted'
+ else:
+ force_yes = ''
+
+ allow_unauthenticated = '--allow-unauthenticated' if allow_unauthenticated else ''
+
+ if apt_cmd is None:
+ if use_apt_get:
+ apt_cmd = APT_GET_CMD
+ else:
+ m.fail_json(msg="Unable to find APTITUDE in path. Please make sure "
+ "to have APTITUDE in path or use 'force_apt_get=True'")
+ apt_cmd_path = m.get_bin_path(apt_cmd, required=True)
+
+ cmd = '%s -y %s %s %s %s %s' % (apt_cmd_path, dpkg_options, force_yes, allow_unauthenticated,
+ check_arg, upgrade_command)
+
+ if default_release:
+ cmd += " -t '%s'" % (default_release,)
+
+ with PolicyRcD(m):
+ rc, out, err = m.run_command(cmd, prompt_regex=prompt_regex)
+
+ if m._diff:
+ diff = parse_diff(out)
+ else:
+ diff = {}
+ if rc:
+ m.fail_json(msg="'%s %s' failed: %s" % (apt_cmd, upgrade_command, err), stdout=out, rc=rc)
+ if (apt_cmd == APT_GET_CMD and APT_GET_ZERO in out) or (apt_cmd == APTITUDE_CMD and APTITUDE_ZERO in out):
+ m.exit_json(changed=False, msg=out, stdout=out, stderr=err)
+ m.exit_json(changed=True, msg=out, stdout=out, stderr=err, diff=diff)
+
+
+def get_cache_mtime():
+ """Return mtime of a valid apt cache file.
+ Stat the apt cache file and if no cache file is found return 0
+ :returns: ``int``
+ """
+ cache_time = 0
+ if os.path.exists(APT_UPDATE_SUCCESS_STAMP_PATH):
+ cache_time = os.stat(APT_UPDATE_SUCCESS_STAMP_PATH).st_mtime
+ elif os.path.exists(APT_LISTS_PATH):
+ cache_time = os.stat(APT_LISTS_PATH).st_mtime
+ return cache_time
+
+
+def get_updated_cache_time():
+ """Return the mtime time stamp and the updated cache time.
+ Always retrieve the mtime of the apt cache or set the `cache_mtime`
+ variable to 0
+ :returns: ``tuple``
+ """
+ cache_mtime = get_cache_mtime()
+ mtimestamp = datetime.datetime.fromtimestamp(cache_mtime)
+ updated_cache_time = int(time.mktime(mtimestamp.timetuple()))
+ return mtimestamp, updated_cache_time
+
+
+# https://github.com/ansible/ansible-modules-core/issues/2951
+def get_cache(module):
+ '''Attempt to get the cache object and update till it works'''
+ cache = None
+ try:
+ cache = apt.Cache()
+ except SystemError as e:
+ if '/var/lib/apt/lists/' in to_native(e).lower():
+ # update cache until files are fixed or retries exceeded
+ retries = 0
+ while retries < 2:
+ (rc, so, se) = module.run_command(['apt-get', 'update', '-q'])
+ retries += 1
+ if rc == 0:
+ break
+ if rc != 0:
+ module.fail_json(msg='Updating the cache to correct corrupt package lists failed:\n%s\n%s' % (to_native(e), so + se), rc=rc)
+ # try again
+ cache = apt.Cache()
+ else:
+ module.fail_json(msg=to_native(e))
+ return cache
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'build-dep', 'fixed', 'latest', 'present']),
+ update_cache=dict(type='bool', aliases=['update-cache']),
+ update_cache_retries=dict(type='int', default=5),
+ update_cache_retry_max_delay=dict(type='int', default=12),
+ cache_valid_time=dict(type='int', default=0),
+ purge=dict(type='bool', default=False),
+ package=dict(type='list', elements='str', aliases=['pkg', 'name']),
+ deb=dict(type='path'),
+ default_release=dict(type='str', aliases=['default-release']),
+ install_recommends=dict(type='bool', aliases=['install-recommends']),
+ force=dict(type='bool', default=False),
+ upgrade=dict(type='str', choices=['dist', 'full', 'no', 'safe', 'yes']),
+ dpkg_options=dict(type='str', default=DPKG_OPTIONS),
+ autoremove=dict(type='bool', default=False),
+ autoclean=dict(type='bool', default=False),
+ policy_rc_d=dict(type='int', default=None),
+ only_upgrade=dict(type='bool', default=False),
+ force_apt_get=dict(type='bool', default=False),
+ allow_unauthenticated=dict(type='bool', default=False, aliases=['allow-unauthenticated']),
+ ),
+ mutually_exclusive=[['deb', 'package', 'upgrade']],
+ required_one_of=[['autoremove', 'deb', 'package', 'update_cache', 'upgrade']],
+ supports_check_mode=True,
+ )
+
+ module.run_command_environ_update = APT_ENV_VARS
+
+ if not HAS_PYTHON_APT:
+ if module.check_mode:
+ module.fail_json(msg="%s must be installed to use check mode. "
+ "If run normally this module can auto-install it." % PYTHON_APT)
+ try:
+ # We skip cache update in auto install the dependency if the
+ # user explicitly declared it with update_cache=no.
+ if module.params.get('update_cache') is False:
+ module.warn("Auto-installing missing dependency without updating cache: %s" % PYTHON_APT)
+ else:
+ module.warn("Updating cache and auto-installing missing dependency: %s" % PYTHON_APT)
+ module.run_command(['apt-get', 'update'], check_rc=True)
+
+ module.run_command(['apt-get', 'install', '--no-install-recommends', PYTHON_APT, '-y', '-q'], check_rc=True)
+ global apt, apt_pkg
+ import apt
+ import apt.debfile
+ import apt_pkg
+ except ImportError:
+ module.fail_json(msg="Could not import python modules: apt, apt_pkg. "
+ "Please install %s package." % PYTHON_APT)
+
+ global APTITUDE_CMD
+ APTITUDE_CMD = module.get_bin_path("aptitude", False)
+ global APT_GET_CMD
+ APT_GET_CMD = module.get_bin_path("apt-get")
+
+ p = module.params
+
+ if p['upgrade'] == 'no':
+ p['upgrade'] = None
+
+ use_apt_get = p['force_apt_get']
+
+ if not use_apt_get and not APTITUDE_CMD:
+ use_apt_get = True
+
+ updated_cache = False
+ updated_cache_time = 0
+ install_recommends = p['install_recommends']
+ allow_unauthenticated = p['allow_unauthenticated']
+ dpkg_options = expand_dpkg_options(p['dpkg_options'])
+ autoremove = p['autoremove']
+ autoclean = p['autoclean']
+
+ # Get the cache object
+ cache = get_cache(module)
+
+ try:
+ if p['default_release']:
+ try:
+ apt_pkg.config['APT::Default-Release'] = p['default_release']
+ except AttributeError:
+ apt_pkg.Config['APT::Default-Release'] = p['default_release']
+ # reopen cache w/ modified config
+ cache.open(progress=None)
+
+ mtimestamp, updated_cache_time = get_updated_cache_time()
+ # Cache valid time is default 0, which will update the cache if
+ # needed and `update_cache` was set to true
+ updated_cache = False
+ if p['update_cache'] or p['cache_valid_time']:
+ now = datetime.datetime.now()
+ tdelta = datetime.timedelta(seconds=p['cache_valid_time'])
+ if not mtimestamp + tdelta >= now:
+ # Retry to update the cache with exponential backoff
+ err = ''
+ update_cache_retries = module.params.get('update_cache_retries')
+ update_cache_retry_max_delay = module.params.get('update_cache_retry_max_delay')
+ randomize = random.randint(0, 1000) / 1000.0
+
+ for retry in range(update_cache_retries):
+ try:
+ cache.update()
+ break
+ except apt.cache.FetchFailedException as e:
+ err = to_native(e)
+
+ # Use exponential backoff plus a little bit of randomness
+ delay = 2 ** retry + randomize
+ if delay > update_cache_retry_max_delay:
+ delay = update_cache_retry_max_delay + randomize
+ time.sleep(delay)
+ else:
+ module.fail_json(msg='Failed to update apt cache: %s' % (err if err else 'unknown reason'))
+
+ cache.open(progress=None)
+ mtimestamp, post_cache_update_time = get_updated_cache_time()
+ if updated_cache_time != post_cache_update_time:
+ updated_cache = True
+ updated_cache_time = post_cache_update_time
+
+ # If there is nothing else to do exit. This will set state as
+ # changed based on if the cache was updated.
+ if not p['package'] and not p['upgrade'] and not p['deb']:
+ module.exit_json(
+ changed=updated_cache,
+ cache_updated=updated_cache,
+ cache_update_time=updated_cache_time
+ )
+
+ force_yes = p['force']
+
+ if p['upgrade']:
+ upgrade(module, p['upgrade'], force_yes, p['default_release'], use_apt_get, dpkg_options, autoremove, allow_unauthenticated)
+
+ if p['deb']:
+ if p['state'] != 'present':
+ module.fail_json(msg="deb only supports state=present")
+ if '://' in p['deb']:
+ p['deb'] = fetch_file(module, p['deb'])
+ install_deb(module, p['deb'], cache,
+ install_recommends=install_recommends,
+ allow_unauthenticated=allow_unauthenticated,
+ force=force_yes, dpkg_options=p['dpkg_options'])
+
+ unfiltered_packages = p['package'] or ()
+ packages = [package.strip() for package in unfiltered_packages if package != '*']
+ all_installed = '*' in unfiltered_packages
+ latest = p['state'] == 'latest'
+
+ if latest and all_installed:
+ if packages:
+ module.fail_json(msg='unable to install additional packages when upgrading all installed packages')
+ upgrade(module, 'yes', force_yes, p['default_release'], use_apt_get, dpkg_options, autoremove, allow_unauthenticated)
+
+ if packages:
+ for package in packages:
+ if package.count('=') > 1:
+ module.fail_json(msg="invalid package spec: %s" % package)
+ if latest and '=' in package:
+ module.fail_json(msg='version number inconsistent with state=latest: %s' % package)
+
+ if not packages:
+ if autoclean:
+ cleanup(module, p['purge'], force=force_yes, operation='autoclean', dpkg_options=dpkg_options)
+ if autoremove:
+ cleanup(module, p['purge'], force=force_yes, operation='autoremove', dpkg_options=dpkg_options)
+
+ if p['state'] in ('latest', 'present', 'build-dep', 'fixed'):
+ state_upgrade = False
+ state_builddep = False
+ state_fixed = False
+ if p['state'] == 'latest':
+ state_upgrade = True
+ if p['state'] == 'build-dep':
+ state_builddep = True
+ if p['state'] == 'fixed':
+ state_fixed = True
+
+ success, retvals = install(
+ module,
+ packages,
+ cache,
+ upgrade=state_upgrade,
+ default_release=p['default_release'],
+ install_recommends=install_recommends,
+ force=force_yes,
+ dpkg_options=dpkg_options,
+ build_dep=state_builddep,
+ fixed=state_fixed,
+ autoremove=autoremove,
+ only_upgrade=p['only_upgrade'],
+ allow_unauthenticated=allow_unauthenticated
+ )
+
+ # Store if the cache has been updated
+ retvals['cache_updated'] = updated_cache
+ # Store when the update time was last
+ retvals['cache_update_time'] = updated_cache_time
+
+ if success:
+ module.exit_json(**retvals)
+ else:
+ module.fail_json(**retvals)
+ elif p['state'] == 'absent':
+ remove(module, packages, cache, p['purge'], force=force_yes, dpkg_options=dpkg_options, autoremove=autoremove)
+
+ except apt.cache.LockFailedException as lockFailedException:
+ module.fail_json(msg="Failed to lock apt for exclusive operation: %s" % lockFailedException)
+ except apt.cache.FetchFailedException as fetchFailedException:
+ module.fail_json(msg="Could not fetch updated apt files: %s" % fetchFailedException)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/apt_key.py b/lib/ansible/modules/apt_key.py
new file mode 100644
index 00000000..d8bb6e15
--- /dev/null
+++ b/lib/ansible/modules/apt_key.py
@@ -0,0 +1,356 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2012, Jayson Vantuyl <jayson@aggressive.ly>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: apt_key
+author:
+- Jayson Vantuyl (@jvantuyl)
+version_added: "1.0"
+short_description: Add or remove an apt key
+description:
+ - Add or remove an I(apt) key, optionally downloading it.
+notes:
+ - Doesn't download the key unless it really needs it.
+ - As a sanity check, downloaded key id must match the one specified.
+ - "Use full fingerprint (40 characters) key ids to avoid key collisions.
+ To generate a full-fingerprint imported key: C(apt-key adv --list-public-keys --with-fingerprint --with-colons)."
+ - If you specify both the key id and the URL with C(state=present), the task can verify or add the key as needed.
+ - Adding a new key requires an apt cache update (e.g. using the apt module's update_cache option)
+requirements:
+ - gpg
+options:
+ id:
+ description:
+ - The identifier of the key.
+ - Including this allows check mode to correctly report the changed state.
+ - If specifying a subkey's id be aware that apt-key does not understand how to remove keys via a subkey id. Specify the primary key's id instead.
+ - This parameter is required when C(state) is set to C(absent).
+ data:
+ description:
+ - The keyfile contents to add to the keyring.
+ file:
+ description:
+ - The path to a keyfile on the remote server to add to the keyring.
+ keyring:
+ description:
+ - The full path to specific keyring file in /etc/apt/trusted.gpg.d/
+ version_added: "1.3"
+ url:
+ description:
+ - The URL to retrieve key from.
+ keyserver:
+ description:
+ - The keyserver to retrieve key from.
+ version_added: "1.6"
+ state:
+ description:
+ - Ensures that the key is present (added) or absent (revoked).
+ choices: [ absent, present ]
+ default: present
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target url will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+'''
+
+EXAMPLES = '''
+- name: Add an apt key by id from a keyserver
+ apt_key:
+ keyserver: keyserver.ubuntu.com
+ id: 36A1D7869245C8950F966E92D8576A8BA88D21E9
+
+- name: Add an Apt signing key, uses whichever key is at the URL
+ apt_key:
+ url: https://ftp-master.debian.org/keys/archive-key-6.0.asc
+ state: present
+
+- name: Add an Apt signing key, will not download if present
+ apt_key:
+ id: 9FED2BCBDCD29CDF762678CBAED4B06F473041FA
+ url: https://ftp-master.debian.org/keys/archive-key-6.0.asc
+ state: present
+
+- name: Remove a Apt specific signing key, leading 0x is valid
+ apt_key:
+ id: 0x9FED2BCBDCD29CDF762678CBAED4B06F473041FA
+ state: absent
+
+# Use armored file since utf-8 string is expected. Must be of "PGP PUBLIC KEY BLOCK" type.
+- name: Add a key from a file on the Ansible server.
+ apt_key:
+ data: "{{ lookup('file', 'apt.asc') }}"
+ state: present
+
+- name: Add an Apt signing key to a specific keyring file
+ apt_key:
+ id: 9FED2BCBDCD29CDF762678CBAED4B06F473041FA
+ url: https://ftp-master.debian.org/keys/archive-key-6.0.asc
+ keyring: /etc/apt/trusted.gpg.d/debian.gpg
+
+- name: Add Apt signing key on remote server to keyring
+ apt_key:
+ id: 9FED2BCBDCD29CDF762678CBAED4B06F473041FA
+ file: /tmp/apt.gpg
+ state: present
+'''
+
+
+# FIXME: standardize into module_common
+from traceback import format_exc
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import fetch_url
+
+
+apt_key_bin = None
+
+
+def find_needed_binaries(module):
+ global apt_key_bin
+
+ apt_key_bin = module.get_bin_path('apt-key', required=True)
+
+ # FIXME: Is there a reason that gpg and grep are checked? Is it just
+ # cruft or does the apt .deb package not require them (and if they're not
+ # installed, /usr/bin/apt-key fails?)
+ module.get_bin_path('gpg', required=True)
+ module.get_bin_path('grep', required=True)
+
+
+def parse_key_id(key_id):
+ """validate the key_id and break it into segments
+
+ :arg key_id: The key_id as supplied by the user. A valid key_id will be
+ 8, 16, or more hexadecimal chars with an optional leading ``0x``.
+ :returns: The portion of key_id suitable for apt-key del, the portion
+ suitable for comparisons with --list-public-keys, and the portion that
+ can be used with --recv-key. If key_id is long enough, these will be
+ the last 8 characters of key_id, the last 16 characters, and all of
+ key_id. If key_id is not long enough, some of the values will be the
+ same.
+
+ * apt-key del <= 1.10 has a bug with key_id != 8 chars
+ * apt-key adv --list-public-keys prints 16 chars
+ * apt-key adv --recv-key can take more chars
+
+ """
+ # Make sure the key_id is valid hexadecimal
+ int(key_id, 16)
+
+ key_id = key_id.upper()
+ if key_id.startswith('0X'):
+ key_id = key_id[2:]
+
+ key_id_len = len(key_id)
+ if (key_id_len != 8 and key_id_len != 16) and key_id_len <= 16:
+ raise ValueError('key_id must be 8, 16, or 16+ hexadecimal characters in length')
+
+ short_key_id = key_id[-8:]
+
+ fingerprint = key_id
+ if key_id_len > 16:
+ fingerprint = key_id[-16:]
+
+ return short_key_id, fingerprint, key_id
+
+
+def all_keys(module, keyring, short_format):
+ if keyring:
+ cmd = "%s --keyring %s adv --list-public-keys --keyid-format=long" % (apt_key_bin, keyring)
+ else:
+ cmd = "%s adv --list-public-keys --keyid-format=long" % apt_key_bin
+ (rc, out, err) = module.run_command(cmd)
+ results = []
+ lines = to_native(out).split('\n')
+ for line in lines:
+ if (line.startswith("pub") or line.startswith("sub")) and "expired" not in line:
+ tokens = line.split()
+ code = tokens[1]
+ (len_type, real_code) = code.split("/")
+ results.append(real_code)
+ if short_format:
+ results = shorten_key_ids(results)
+ return results
+
+
+def shorten_key_ids(key_id_list):
+ """
+ Takes a list of key ids, and converts them to the 'short' format,
+ by reducing them to their last 8 characters.
+ """
+ short = []
+ for key in key_id_list:
+ short.append(key[-8:])
+ return short
+
+
+def download_key(module, url):
+ # FIXME: move get_url code to common, allow for in-memory D/L, support proxies
+ # and reuse here
+ if url is None:
+ module.fail_json(msg="needed a URL but was not specified")
+
+ try:
+ rsp, info = fetch_url(module, url)
+ if info['status'] != 200:
+ module.fail_json(msg="Failed to download key at %s: %s" % (url, info['msg']))
+
+ return rsp.read()
+ except Exception:
+ module.fail_json(msg="error getting key id from url: %s" % url, traceback=format_exc())
+
+
+def import_key(module, keyring, keyserver, key_id):
+ if keyring:
+ cmd = "%s --keyring %s adv --no-tty --keyserver %s --recv %s" % (apt_key_bin, keyring, keyserver, key_id)
+ else:
+ cmd = "%s adv --no-tty --keyserver %s --recv %s" % (apt_key_bin, keyserver, key_id)
+ for retry in range(5):
+ lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ (rc, out, err) = module.run_command(cmd, environ_update=lang_env)
+ if rc == 0:
+ break
+ else:
+ # Out of retries
+ if rc == 2 and 'not found on keyserver' in out:
+ msg = 'Key %s not found on keyserver %s' % (key_id, keyserver)
+ module.fail_json(cmd=cmd, msg=msg)
+ else:
+ msg = "Error fetching key %s from keyserver: %s" % (key_id, keyserver)
+ module.fail_json(cmd=cmd, msg=msg, rc=rc, stdout=out, stderr=err)
+ return True
+
+
+def add_key(module, keyfile, keyring, data=None):
+ if data is not None:
+ if keyring:
+ cmd = "%s --keyring %s add -" % (apt_key_bin, keyring)
+ else:
+ cmd = "%s add -" % apt_key_bin
+ (rc, out, err) = module.run_command(cmd, data=data, check_rc=True, binary_data=True)
+ else:
+ if keyring:
+ cmd = "%s --keyring %s add %s" % (apt_key_bin, keyring, keyfile)
+ else:
+ cmd = "%s add %s" % (apt_key_bin, keyfile)
+ (rc, out, err) = module.run_command(cmd, check_rc=True)
+ return True
+
+
+def remove_key(module, key_id, keyring):
+ # FIXME: use module.run_command, fail at point of error and don't discard useful stdin/stdout
+ if keyring:
+ cmd = '%s --keyring %s del %s' % (apt_key_bin, keyring, key_id)
+ else:
+ cmd = '%s del %s' % (apt_key_bin, key_id)
+ (rc, out, err) = module.run_command(cmd, check_rc=True)
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ id=dict(type='str'),
+ url=dict(type='str'),
+ data=dict(type='str'),
+ file=dict(type='path'),
+ key=dict(type='str'),
+ keyring=dict(type='path'),
+ validate_certs=dict(type='bool', default=True),
+ keyserver=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=(('data', 'filename', 'keyserver', 'url'),),
+ )
+
+ key_id = module.params['id']
+ url = module.params['url']
+ data = module.params['data']
+ filename = module.params['file']
+ keyring = module.params['keyring']
+ state = module.params['state']
+ keyserver = module.params['keyserver']
+ changed = False
+
+ fingerprint = short_key_id = key_id
+ short_format = False
+ if key_id:
+ try:
+ short_key_id, fingerprint, key_id = parse_key_id(key_id)
+ except ValueError:
+ module.fail_json(msg='Invalid key_id', id=key_id)
+
+ if len(fingerprint) == 8:
+ short_format = True
+
+ find_needed_binaries(module)
+
+ keys = all_keys(module, keyring, short_format)
+ return_values = {}
+
+ if state == 'present':
+ if fingerprint and fingerprint in keys:
+ module.exit_json(changed=False)
+ elif fingerprint and fingerprint not in keys and module.check_mode:
+ # TODO: Someday we could go further -- write keys out to
+ # a temporary file and then extract the key id from there via gpg
+ # to decide if the key is installed or not.
+ module.exit_json(changed=True)
+ else:
+ if not filename and not data and not keyserver:
+ data = download_key(module, url)
+
+ if filename:
+ add_key(module, filename, keyring)
+ elif keyserver:
+ import_key(module, keyring, keyserver, key_id)
+ else:
+ add_key(module, "-", keyring, data)
+
+ changed = False
+ keys2 = all_keys(module, keyring, short_format)
+ if len(keys) != len(keys2):
+ changed = True
+
+ if fingerprint and fingerprint not in keys2:
+ module.fail_json(msg="key does not seem to have been added", id=key_id)
+ module.exit_json(changed=changed)
+
+ elif state == 'absent':
+ if not key_id:
+ module.fail_json(msg="key is required")
+ if fingerprint in keys:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ # we use the "short" id: key_id[-8:], short_format=True
+ # it's a workaround for https://bugs.launchpad.net/ubuntu/+source/apt/+bug/1481871
+ if remove_key(module, short_key_id, keyring):
+ keys = all_keys(module, keyring, short_format)
+ if fingerprint in keys:
+ module.fail_json(msg="apt-key del did not return an error but the key was not removed (check that the id is correct and *not* a subkey)",
+ id=key_id)
+ changed = True
+ else:
+ # FIXME: module.fail_json or exit-json immediately at point of failure
+ module.fail_json(msg="error removing key_id", **return_values)
+
+ module.exit_json(changed=changed, **return_values)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/apt_repository.py b/lib/ansible/modules/apt_repository.py
new file mode 100644
index 00000000..834bdec1
--- /dev/null
+++ b/lib/ansible/modules/apt_repository.py
@@ -0,0 +1,600 @@
+#!/usr/bin/python
+# encoding: utf-8
+
+# Copyright: (c) 2012, Matt Wright <matt@nobien.net>
+# Copyright: (c) 2013, Alexander Saltanov <asd@mokote.com>
+# Copyright: (c) 2014, Rutger Spiertz <rutger@kumina.nl>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: apt_repository
+short_description: Add and remove APT repositories
+description:
+ - Add or remove an APT repositories in Ubuntu and Debian.
+notes:
+ - This module works on Debian, Ubuntu and their derivatives.
+ - This module supports Debian Squeeze (version 6) as well as its successors.
+options:
+ repo:
+ description:
+ - A source string for the repository.
+ required: true
+ state:
+ description:
+ - A source string state.
+ choices: [ absent, present ]
+ default: "present"
+ mode:
+ description:
+ - The octal mode for newly created files in sources.list.d
+ default: '0644'
+ version_added: "1.6"
+ update_cache:
+ description:
+ - Run the equivalent of C(apt-get update) when a change occurs. Cache updates are run after making changes.
+ type: bool
+ default: "yes"
+ update_cache_retries:
+ description:
+ - Amount of retries if the cache update fails. Also see I(update_cache_retry_max_delay).
+ type: int
+ default: 5
+ version_added: '2.10'
+ update_cache_retry_max_delay:
+ description:
+ - Use an exponential backoff delay for each retry (see I(update_cache_retries)) up to this max delay in seconds.
+ type: int
+ default: 12
+ version_added: '2.10'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target repo will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+ version_added: '1.8'
+ filename:
+ description:
+ - Sets the name of the source list file in sources.list.d.
+ Defaults to a file name based on the repository source url.
+ The .list extension will be automatically added.
+ version_added: '2.1'
+ codename:
+ description:
+ - Override the distribution codename to use for PPA repositories.
+ Should usually only be set when working with a PPA on a non-Ubuntu target (e.g. Debian or Mint)
+ version_added: '2.3'
+author:
+- Alexander Saltanov (@sashka)
+version_added: "0.7"
+requirements:
+ - python-apt (python 2)
+ - python3-apt (python 3)
+'''
+
+EXAMPLES = '''
+- name: Add specified repository into sources list
+ apt_repository:
+ repo: deb http://archive.canonical.com/ubuntu hardy partner
+ state: present
+
+- name: Add specified repository into sources list using specified filename
+ apt_repository:
+ repo: deb http://dl.google.com/linux/chrome/deb/ stable main
+ state: present
+ filename: google-chrome
+
+- name: Add source repository into sources list
+ apt_repository:
+ repo: deb-src http://archive.canonical.com/ubuntu hardy partner
+ state: present
+
+- name: Remove specified repository from sources list
+ apt_repository:
+ repo: deb http://archive.canonical.com/ubuntu hardy partner
+ state: absent
+
+- name: Add nginx stable repository from PPA and install its signing key on Ubuntu target
+ apt_repository:
+ repo: ppa:nginx/stable
+
+- name: Add nginx stable repository from PPA and install its signing key on Debian target
+ apt_repository:
+ repo: 'ppa:nginx/stable'
+ codename: trusty
+'''
+
+import glob
+import json
+import os
+import re
+import sys
+import tempfile
+import copy
+import random
+import time
+
+try:
+ import apt
+ import apt_pkg
+ import aptsources.distro as aptsources_distro
+ distro = aptsources_distro.get_distro()
+ HAVE_PYTHON_APT = True
+except ImportError:
+ distro = None
+ HAVE_PYTHON_APT = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import fetch_url
+
+
+if sys.version_info[0] < 3:
+ PYTHON_APT = 'python-apt'
+else:
+ PYTHON_APT = 'python3-apt'
+
+DEFAULT_SOURCES_PERM = 0o0644
+
+VALID_SOURCE_TYPES = ('deb', 'deb-src')
+
+
+def install_python_apt(module):
+
+ if not module.check_mode:
+ apt_get_path = module.get_bin_path('apt-get')
+ if apt_get_path:
+ rc, so, se = module.run_command([apt_get_path, 'update'])
+ if rc != 0:
+ module.fail_json(msg="Failed to auto-install %s. Error was: '%s'" % (PYTHON_APT, se.strip()))
+ rc, so, se = module.run_command([apt_get_path, 'install', PYTHON_APT, '-y', '-q'])
+ if rc == 0:
+ global apt, apt_pkg, aptsources_distro, distro, HAVE_PYTHON_APT
+ import apt
+ import apt_pkg
+ import aptsources.distro as aptsources_distro
+ distro = aptsources_distro.get_distro()
+ HAVE_PYTHON_APT = True
+ else:
+ module.fail_json(msg="Failed to auto-install %s. Error was: '%s'" % (PYTHON_APT, se.strip()))
+ else:
+ module.fail_json(msg="%s must be installed to use check mode" % PYTHON_APT)
+
+
+class InvalidSource(Exception):
+ pass
+
+
+# Simple version of aptsources.sourceslist.SourcesList.
+# No advanced logic and no backups inside.
+class SourcesList(object):
+ def __init__(self, module):
+ self.module = module
+ self.files = {} # group sources by file
+ # Repositories that we're adding -- used to implement mode param
+ self.new_repos = set()
+ self.default_file = self._apt_cfg_file('Dir::Etc::sourcelist')
+
+ # read sources.list if it exists
+ if os.path.isfile(self.default_file):
+ self.load(self.default_file)
+
+ # read sources.list.d
+ for file in glob.iglob('%s/*.list' % self._apt_cfg_dir('Dir::Etc::sourceparts')):
+ self.load(file)
+
+ def __iter__(self):
+ '''Simple iterator to go over all sources. Empty, non-source, and other not valid lines will be skipped.'''
+ for file, sources in self.files.items():
+ for n, valid, enabled, source, comment in sources:
+ if valid:
+ yield file, n, enabled, source, comment
+
+ def _expand_path(self, filename):
+ if '/' in filename:
+ return filename
+ else:
+ return os.path.abspath(os.path.join(self._apt_cfg_dir('Dir::Etc::sourceparts'), filename))
+
+ def _suggest_filename(self, line):
+ def _cleanup_filename(s):
+ filename = self.module.params['filename']
+ if filename is not None:
+ return filename
+ return '_'.join(re.sub('[^a-zA-Z0-9]', ' ', s).split())
+
+ def _strip_username_password(s):
+ if '@' in s:
+ s = s.split('@', 1)
+ s = s[-1]
+ return s
+
+ # Drop options and protocols.
+ line = re.sub(r'\[[^\]]+\]', '', line)
+ line = re.sub(r'\w+://', '', line)
+
+ # split line into valid keywords
+ parts = [part for part in line.split() if part not in VALID_SOURCE_TYPES]
+
+ # Drop usernames and passwords
+ parts[0] = _strip_username_password(parts[0])
+
+ return '%s.list' % _cleanup_filename(' '.join(parts[:1]))
+
+ def _parse(self, line, raise_if_invalid_or_disabled=False):
+ valid = False
+ enabled = True
+ source = ''
+ comment = ''
+
+ line = line.strip()
+ if line.startswith('#'):
+ enabled = False
+ line = line[1:]
+
+ # Check for another "#" in the line and treat a part after it as a comment.
+ i = line.find('#')
+ if i > 0:
+ comment = line[i + 1:].strip()
+ line = line[:i]
+
+ # Split a source into substring to make sure that it is source spec.
+ # Duplicated whitespaces in a valid source spec will be removed.
+ source = line.strip()
+ if source:
+ chunks = source.split()
+ if chunks[0] in VALID_SOURCE_TYPES:
+ valid = True
+ source = ' '.join(chunks)
+
+ if raise_if_invalid_or_disabled and (not valid or not enabled):
+ raise InvalidSource(line)
+
+ return valid, enabled, source, comment
+
+ @staticmethod
+ def _apt_cfg_file(filespec):
+ '''
+ Wrapper for `apt_pkg` module for running with Python 2.5
+ '''
+ try:
+ result = apt_pkg.config.find_file(filespec)
+ except AttributeError:
+ result = apt_pkg.Config.FindFile(filespec)
+ return result
+
+ @staticmethod
+ def _apt_cfg_dir(dirspec):
+ '''
+ Wrapper for `apt_pkg` module for running with Python 2.5
+ '''
+ try:
+ result = apt_pkg.config.find_dir(dirspec)
+ except AttributeError:
+ result = apt_pkg.Config.FindDir(dirspec)
+ return result
+
+ def load(self, file):
+ group = []
+ f = open(file, 'r')
+ for n, line in enumerate(f):
+ valid, enabled, source, comment = self._parse(line)
+ group.append((n, valid, enabled, source, comment))
+ self.files[file] = group
+
+ def save(self):
+ for filename, sources in list(self.files.items()):
+ if sources:
+ d, fn = os.path.split(filename)
+ try:
+ os.makedirs(d)
+ except OSError as err:
+ if not os.path.isdir(d):
+ self.module.fail_json("Failed to create directory %s: %s" % (d, to_native(err)))
+ fd, tmp_path = tempfile.mkstemp(prefix=".%s-" % fn, dir=d)
+
+ f = os.fdopen(fd, 'w')
+ for n, valid, enabled, source, comment in sources:
+ chunks = []
+ if not enabled:
+ chunks.append('# ')
+ chunks.append(source)
+ if comment:
+ chunks.append(' # ')
+ chunks.append(comment)
+ chunks.append('\n')
+ line = ''.join(chunks)
+
+ try:
+ f.write(line)
+ except IOError as err:
+ self.module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, to_native(err)))
+ self.module.atomic_move(tmp_path, filename)
+
+ # allow the user to override the default mode
+ if filename in self.new_repos:
+ this_mode = self.module.params.get('mode', DEFAULT_SOURCES_PERM)
+ self.module.set_mode_if_different(filename, this_mode, False)
+ else:
+ del self.files[filename]
+ if os.path.exists(filename):
+ os.remove(filename)
+
+ def dump(self):
+ dumpstruct = {}
+ for filename, sources in self.files.items():
+ if sources:
+ lines = []
+ for n, valid, enabled, source, comment in sources:
+ chunks = []
+ if not enabled:
+ chunks.append('# ')
+ chunks.append(source)
+ if comment:
+ chunks.append(' # ')
+ chunks.append(comment)
+ chunks.append('\n')
+ lines.append(''.join(chunks))
+ dumpstruct[filename] = ''.join(lines)
+ return dumpstruct
+
+ def _choice(self, new, old):
+ if new is None:
+ return old
+ return new
+
+ def modify(self, file, n, enabled=None, source=None, comment=None):
+ '''
+ This function to be used with iterator, so we don't care of invalid sources.
+ If source, enabled, or comment is None, original value from line ``n`` will be preserved.
+ '''
+ valid, enabled_old, source_old, comment_old = self.files[file][n][1:]
+ self.files[file][n] = (n, valid, self._choice(enabled, enabled_old), self._choice(source, source_old), self._choice(comment, comment_old))
+
+ def _add_valid_source(self, source_new, comment_new, file):
+ # We'll try to reuse disabled source if we have it.
+ # If we have more than one entry, we will enable them all - no advanced logic, remember.
+ found = False
+ for filename, n, enabled, source, comment in self:
+ if source == source_new:
+ self.modify(filename, n, enabled=True)
+ found = True
+
+ if not found:
+ if file is None:
+ file = self.default_file
+ else:
+ file = self._expand_path(file)
+
+ if file not in self.files:
+ self.files[file] = []
+
+ files = self.files[file]
+ files.append((len(files), True, True, source_new, comment_new))
+ self.new_repos.add(file)
+
+ def add_source(self, line, comment='', file=None):
+ source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
+
+ # Prefer separate files for new sources.
+ self._add_valid_source(source, comment, file=file or self._suggest_filename(source))
+
+ def _remove_valid_source(self, source):
+ # If we have more than one entry, we will remove them all (not comment, remove!)
+ for filename, n, enabled, src, comment in self:
+ if source == src and enabled:
+ self.files[filename].pop(n)
+
+ def remove_source(self, line):
+ source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
+ self._remove_valid_source(source)
+
+
+class UbuntuSourcesList(SourcesList):
+
+ LP_API = 'https://launchpad.net/api/1.0/~%s/+archive/%s'
+
+ def __init__(self, module, add_ppa_signing_keys_callback=None):
+ self.module = module
+ self.add_ppa_signing_keys_callback = add_ppa_signing_keys_callback
+ self.codename = module.params['codename'] or distro.codename
+ super(UbuntuSourcesList, self).__init__(module)
+
+ def _get_ppa_info(self, owner_name, ppa_name):
+ lp_api = self.LP_API % (owner_name, ppa_name)
+
+ headers = dict(Accept='application/json')
+ response, info = fetch_url(self.module, lp_api, headers=headers)
+ if info['status'] != 200:
+ self.module.fail_json(msg="failed to fetch PPA information, error was: %s" % info['msg'])
+ return json.loads(to_native(response.read()))
+
+ def _expand_ppa(self, path):
+ ppa = path.split(':')[1]
+ ppa_owner = ppa.split('/')[0]
+ try:
+ ppa_name = ppa.split('/')[1]
+ except IndexError:
+ ppa_name = 'ppa'
+
+ line = 'deb http://ppa.launchpad.net/%s/%s/ubuntu %s main' % (ppa_owner, ppa_name, self.codename)
+ return line, ppa_owner, ppa_name
+
+ def _key_already_exists(self, key_fingerprint):
+ rc, out, err = self.module.run_command('apt-key export %s' % key_fingerprint, check_rc=True)
+ return len(err) == 0
+
+ def add_source(self, line, comment='', file=None):
+ if line.startswith('ppa:'):
+ source, ppa_owner, ppa_name = self._expand_ppa(line)
+
+ if source in self.repos_urls:
+ # repository already exists
+ return
+
+ if self.add_ppa_signing_keys_callback is not None:
+ info = self._get_ppa_info(ppa_owner, ppa_name)
+ if not self._key_already_exists(info['signing_key_fingerprint']):
+ command = ['apt-key', 'adv', '--recv-keys', '--no-tty', '--keyserver', 'hkp://keyserver.ubuntu.com:80', info['signing_key_fingerprint']]
+ self.add_ppa_signing_keys_callback(command)
+
+ file = file or self._suggest_filename('%s_%s' % (line, self.codename))
+ else:
+ source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
+ file = file or self._suggest_filename(source)
+ self._add_valid_source(source, comment, file)
+
+ def remove_source(self, line):
+ if line.startswith('ppa:'):
+ source = self._expand_ppa(line)[0]
+ else:
+ source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
+ self._remove_valid_source(source)
+
+ @property
+ def repos_urls(self):
+ _repositories = []
+ for parsed_repos in self.files.values():
+ for parsed_repo in parsed_repos:
+ valid = parsed_repo[1]
+ enabled = parsed_repo[2]
+ source_line = parsed_repo[3]
+
+ if not valid or not enabled:
+ continue
+
+ if source_line.startswith('ppa:'):
+ source, ppa_owner, ppa_name = self._expand_ppa(source_line)
+ _repositories.append(source)
+ else:
+ _repositories.append(source_line)
+
+ return _repositories
+
+
+def get_add_ppa_signing_key_callback(module):
+ def _run_command(command):
+ module.run_command(command, check_rc=True)
+
+ if module.check_mode:
+ return None
+ else:
+ return _run_command
+
+
+def revert_sources_list(sources_before, sources_after, sourceslist_before):
+ '''Revert the sourcelist files to their previous state.'''
+
+ # First remove any new files that were created:
+ for filename in set(sources_after.keys()).difference(sources_before.keys()):
+ if os.path.exists(filename):
+ os.remove(filename)
+ # Now revert the existing files to their former state:
+ sourceslist_before.save()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ repo=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ mode=dict(type='raw'),
+ update_cache=dict(type='bool', default=True, aliases=['update-cache']),
+ update_cache_retries=dict(type='int', default=5),
+ update_cache_retry_max_delay=dict(type='int', default=12),
+ filename=dict(type='str'),
+ # This should not be needed, but exists as a failsafe
+ install_python_apt=dict(type='bool', default=True),
+ validate_certs=dict(type='bool', default=True),
+ codename=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ params = module.params
+ repo = module.params['repo']
+ state = module.params['state']
+ update_cache = module.params['update_cache']
+ # Note: mode is referenced in SourcesList class via the passed in module (self here)
+
+ sourceslist = None
+
+ if not HAVE_PYTHON_APT:
+ if params['install_python_apt']:
+ install_python_apt(module)
+ else:
+ module.fail_json(msg='%s is not installed, and install_python_apt is False' % PYTHON_APT)
+
+ if not repo:
+ module.fail_json(msg='Please set argument \'repo\' to a non-empty value')
+
+ if isinstance(distro, aptsources_distro.Distribution):
+ sourceslist = UbuntuSourcesList(module, add_ppa_signing_keys_callback=get_add_ppa_signing_key_callback(module))
+ else:
+ module.fail_json(msg='Module apt_repository is not supported on target.')
+
+ sourceslist_before = copy.deepcopy(sourceslist)
+ sources_before = sourceslist.dump()
+
+ try:
+ if state == 'present':
+ sourceslist.add_source(repo)
+ elif state == 'absent':
+ sourceslist.remove_source(repo)
+ except InvalidSource as err:
+ module.fail_json(msg='Invalid repository string: %s' % to_native(err))
+
+ sources_after = sourceslist.dump()
+ changed = sources_before != sources_after
+
+ if changed and module._diff:
+ diff = []
+ for filename in set(sources_before.keys()).union(sources_after.keys()):
+ diff.append({'before': sources_before.get(filename, ''),
+ 'after': sources_after.get(filename, ''),
+ 'before_header': (filename, '/dev/null')[filename not in sources_before],
+ 'after_header': (filename, '/dev/null')[filename not in sources_after]})
+ else:
+ diff = {}
+
+ if changed and not module.check_mode:
+ try:
+ sourceslist.save()
+ if update_cache:
+ err = ''
+ update_cache_retries = module.params.get('update_cache_retries')
+ update_cache_retry_max_delay = module.params.get('update_cache_retry_max_delay')
+ randomize = random.randint(0, 1000) / 1000.0
+
+ for retry in range(update_cache_retries):
+ try:
+ cache = apt.Cache()
+ cache.update()
+ break
+ except apt.cache.FetchFailedException as e:
+ err = to_native(e)
+
+ # Use exponential backoff with a max fail count, plus a little bit of randomness
+ delay = 2 ** retry + randomize
+ if delay > update_cache_retry_max_delay:
+ delay = update_cache_retry_max_delay + randomize
+ time.sleep(delay)
+ else:
+ revert_sources_list(sources_before, sources_after, sourceslist_before)
+ module.fail_json(msg='Failed to update apt cache: %s' % (err if err else 'unknown reason'))
+
+ except (OSError, IOError) as err:
+ revert_sources_list(sources_before, sources_after, sourceslist_before)
+ module.fail_json(msg=to_native(err))
+
+ module.exit_json(changed=changed, repo=repo, state=state, diff=diff)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/assemble.py b/lib/ansible/modules/assemble.py
new file mode 100644
index 00000000..81814eda
--- /dev/null
+++ b/lib/ansible/modules/assemble.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Stephen Fromm <sfromm@gmail.com>
+# Copyright: (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: assemble
+short_description: Assemble configuration files from fragments
+description:
+- Assembles a configuration file from fragments.
+- Often a particular program will take a single configuration file and does not support a
+ C(conf.d) style structure where it is easy to build up the configuration
+ from multiple sources. C(assemble) will take a directory of files that can be
+ local or have already been transferred to the system, and concatenate them
+ together to produce a destination file.
+- Files are assembled in string sorting order.
+- Puppet calls this idea I(fragments).
+version_added: '0.5'
+options:
+ src:
+ description:
+ - An already existing directory full of source files.
+ type: path
+ required: true
+ dest:
+ description:
+ - A file to create using the concatenation of all of the source files.
+ type: path
+ required: true
+ backup:
+ description:
+ - Create a backup file (if C(yes)), including the timestamp information so
+ you can get the original file back if you somehow clobbered it
+ incorrectly.
+ type: bool
+ default: no
+ delimiter:
+ description:
+ - A delimiter to separate the file contents.
+ type: str
+ version_added: '1.4'
+ remote_src:
+ description:
+ - If C(no), it will search for src at originating/master machine.
+ - If C(yes), it will go to the remote/target machine for the src.
+ type: bool
+ default: yes
+ version_added: '1.4'
+ regexp:
+ description:
+ - Assemble files only if C(regex) matches the filename.
+ - If not set, all files are assembled.
+ - Every "\" (backslash) must be escaped as "\\" to comply to YAML syntax.
+ - Uses L(Python regular expressions,http://docs.python.org/2/library/re.html).
+ type: str
+ ignore_hidden:
+ description:
+ - A boolean that controls if files that start with a '.' will be included or not.
+ type: bool
+ default: no
+ version_added: '2.0'
+ validate:
+ description:
+ - The validation command to run before copying into place.
+ - The path to the file to validate is passed in via '%s' which must be present as in the sshd example below.
+ - The command is passed securely so shell features like expansion and pipes won't work.
+ type: str
+ version_added: '2.0'
+seealso:
+- module: ansible.builtin.copy
+- module: ansible.builtin.template
+- module: ansible.windows.win_copy
+author:
+- Stephen Fromm (@sfromm)
+extends_documentation_fragment:
+- decrypt
+- files
+'''
+
+EXAMPLES = r'''
+- name: Assemble from fragments from a directory
+ assemble:
+ src: /etc/someapp/fragments
+ dest: /etc/someapp/someapp.conf
+
+- name: Inserted provided delimiter in between each fragment
+ assemble:
+ src: /etc/someapp/fragments
+ dest: /etc/someapp/someapp.conf
+ delimiter: '### START FRAGMENT ###'
+
+- name: Assemble a new "sshd_config" file into place, after passing validation with sshd
+ assemble:
+ src: /etc/ssh/conf.d/
+ dest: /etc/ssh/sshd_config
+ validate: /usr/sbin/sshd -t -f %s
+'''
+
+import codecs
+import os
+import re
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import b, indexbytes
+from ansible.module_utils._text import to_native
+
+
+def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False, tmpdir=None):
+ ''' assemble a file from a directory of fragments '''
+ tmpfd, temp_path = tempfile.mkstemp(dir=tmpdir)
+ tmp = os.fdopen(tmpfd, 'wb')
+ delimit_me = False
+ add_newline = False
+
+ for f in sorted(os.listdir(src_path)):
+ if compiled_regexp and not compiled_regexp.search(f):
+ continue
+ fragment = os.path.join(src_path, f)
+ if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')):
+ continue
+ with open(fragment, 'rb') as fragment_fh:
+ fragment_content = fragment_fh.read()
+
+ # always put a newline between fragments if the previous fragment didn't end with a newline.
+ if add_newline:
+ tmp.write(b('\n'))
+
+ # delimiters should only appear between fragments
+ if delimit_me:
+ if delimiter:
+ # un-escape anything like newlines
+ delimiter = codecs.escape_decode(delimiter)[0]
+ tmp.write(delimiter)
+ # always make sure there's a newline after the
+ # delimiter, so lines don't run together
+
+ # byte indexing differs on Python 2 and 3,
+ # use indexbytes for compat
+ # chr(10) == '\n'
+ if indexbytes(delimiter, -1) != 10:
+ tmp.write(b('\n'))
+
+ tmp.write(fragment_content)
+ delimit_me = True
+ if fragment_content.endswith(b('\n')):
+ add_newline = False
+ else:
+ add_newline = True
+
+ tmp.close()
+ return temp_path
+
+
+def cleanup(path, result=None):
+ # cleanup just in case
+ if os.path.exists(path):
+ try:
+ os.remove(path)
+ except (IOError, OSError) as e:
+ # don't error on possible race conditions, but keep warning
+ if result is not None:
+ result['warnings'] = ['Unable to remove temp file (%s): %s' % (path, to_native(e))]
+
+
+def main():
+
+ module = AnsibleModule(
+ # not checking because of daisy chain to file module
+ argument_spec=dict(
+ src=dict(type='path', required=True),
+ delimiter=dict(type='str'),
+ dest=dict(type='path', required=True),
+ backup=dict(type='bool', default=False),
+ remote_src=dict(type='bool', default=True),
+ regexp=dict(type='str'),
+ ignore_hidden=dict(type='bool', default=False),
+ validate=dict(type='str'),
+ ),
+ add_file_common_args=True,
+ )
+
+ changed = False
+ path_hash = None
+ dest_hash = None
+ src = module.params['src']
+ dest = module.params['dest']
+ backup = module.params['backup']
+ delimiter = module.params['delimiter']
+ regexp = module.params['regexp']
+ compiled_regexp = None
+ ignore_hidden = module.params['ignore_hidden']
+ validate = module.params.get('validate', None)
+
+ result = dict(src=src, dest=dest)
+ if not os.path.exists(src):
+ module.fail_json(msg="Source (%s) does not exist" % src)
+
+ if not os.path.isdir(src):
+ module.fail_json(msg="Source (%s) is not a directory" % src)
+
+ if regexp is not None:
+ try:
+ compiled_regexp = re.compile(regexp)
+ except re.error as e:
+ module.fail_json(msg="Invalid Regexp (%s) in \"%s\"" % (to_native(e), regexp))
+
+ if validate and "%s" not in validate:
+ module.fail_json(msg="validate must contain %%s: %s" % validate)
+
+ path = assemble_from_fragments(src, delimiter, compiled_regexp, ignore_hidden, module.tmpdir)
+ path_hash = module.sha1(path)
+ result['checksum'] = path_hash
+
+ # Backwards compat. This won't return data if FIPS mode is active
+ try:
+ pathmd5 = module.md5(path)
+ except ValueError:
+ pathmd5 = None
+ result['md5sum'] = pathmd5
+
+ if os.path.exists(dest):
+ dest_hash = module.sha1(dest)
+
+ if path_hash != dest_hash:
+ if validate:
+ (rc, out, err) = module.run_command(validate % path)
+ result['validation'] = dict(rc=rc, stdout=out, stderr=err)
+ if rc != 0:
+ cleanup(path)
+ module.fail_json(msg="failed to validate: rc:%s error:%s" % (rc, err))
+ if backup and dest_hash is not None:
+ result['backup_file'] = module.backup_local(dest)
+
+ module.atomic_move(path, dest, unsafe_writes=module.params['unsafe_writes'])
+ changed = True
+
+ cleanup(path, result)
+
+ # handle file permissions
+ file_args = module.load_file_common_arguments(module.params)
+ result['changed'] = module.set_fs_attributes_if_different(file_args, changed)
+
+ # Mission complete
+ result['msg'] = "OK"
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/assert.py b/lib/ansible/modules/assert.py
new file mode 100644
index 00000000..32a0dd6f
--- /dev/null
+++ b/lib/ansible/modules/assert.py
@@ -0,0 +1,83 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: assert
+short_description: Asserts given expressions are true
+description:
+ - This module asserts that given expressions are true with an optional custom message.
+ - This module is also supported for Windows targets.
+version_added: "1.5"
+options:
+ that:
+ description:
+ - A list of string expressions of the same form that can be passed to the 'when' statement.
+ type: list
+ required: true
+ fail_msg:
+ description:
+ - The customized message used for a failing assertion.
+ - This argument was called 'msg' before Ansible 2.7, now it is renamed to 'fail_msg' with alias 'msg'.
+ type: str
+ aliases: [ msg ]
+ version_added: "2.7"
+ success_msg:
+ description:
+ - The customized message used for a successful assertion.
+ type: str
+ version_added: "2.7"
+ quiet:
+ description:
+ - Set this to C(yes) to avoid verbose output.
+ type: bool
+ default: no
+ version_added: "2.8"
+notes:
+ - This module is also supported for Windows targets.
+seealso:
+- module: ansible.builtin.debug
+- module: ansible.builtin.fail
+- module: ansible.builtin.meta
+author:
+ - Ansible Core Team
+ - Michael DeHaan
+'''
+
+EXAMPLES = r'''
+- assert: { that: "ansible_os_family != 'RedHat'" }
+
+- assert:
+ that:
+ - "'foo' in some_command_result.stdout"
+ - number_of_the_counting == 3
+
+- name: After version 2.7 both 'msg' and 'fail_msg' can customize failing assertion message
+ assert:
+ that:
+ - my_param <= 100
+ - my_param >= 0
+ fail_msg: "'my_param' must be between 0 and 100"
+ success_msg: "'my_param' is between 0 and 100"
+
+- name: Please use 'msg' when ansible version is smaller than 2.7
+ assert:
+ that:
+ - my_param <= 100
+ - my_param >= 0
+ msg: "'my_param' must be between 0 and 100"
+
+- name: Use quiet to avoid verbose output
+ assert:
+ that:
+ - my_param <= 100
+ - my_param >= 0
+ quiet: true
+'''
diff --git a/lib/ansible/modules/async_status.py b/lib/ansible/modules/async_status.py
new file mode 100644
index 00000000..03e0bc16
--- /dev/null
+++ b/lib/ansible/modules/async_status.py
@@ -0,0 +1,140 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: async_status
+short_description: Obtain status of asynchronous task
+description:
+- This module gets the status of an asynchronous task.
+- This module is also supported for Windows targets.
+version_added: "0.5"
+options:
+ jid:
+ description:
+ - Job or task identifier
+ type: str
+ required: true
+ mode:
+ description:
+ - If C(status), obtain the status.
+ - If C(cleanup), clean up the async job cache (by default in C(~/.ansible_async/)) for the specified job I(jid).
+ type: str
+ choices: [ cleanup, status ]
+ default: status
+notes:
+- This module is also supported for Windows targets.
+seealso:
+- ref: playbooks_async
+ description: Detailed information on how to use asynchronous actions and polling.
+author:
+- Ansible Core Team
+- Michael DeHaan
+'''
+
+EXAMPLES = r'''
+---
+- name: Asynchronous yum task
+ yum:
+ name: docker-io
+ state: present
+ async: 1000
+ poll: 0
+ register: yum_sleeper
+
+- name: Wait for asynchronous job to end
+ async_status:
+ jid: '{{ yum_sleeper.ansible_job_id }}'
+ register: job_result
+ until: job_result.finished
+ retries: 100
+ delay: 10
+'''
+
+RETURN = r'''
+ansible_job_id:
+ description: The asynchronous job id
+ returned: success
+ type: str
+ sample: '360874038559.4169'
+finished:
+ description: Whether the asynchronous job has finished (C(1)) or not (C(0))
+ returned: success
+ type: int
+ sample: 1
+started:
+ description: Whether the asynchronous job has started (C(1)) or not (C(0))
+ returned: success
+ type: int
+ sample: 1
+'''
+
+import json
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible.module_utils._text import to_native
+
+
+def main():
+
+ module = AnsibleModule(argument_spec=dict(
+ jid=dict(type='str', required=True),
+ mode=dict(type='str', default='status', choices=['cleanup', 'status']),
+ # passed in from the async_status action plugin
+ _async_dir=dict(type='path', required=True),
+ ))
+
+ mode = module.params['mode']
+ jid = module.params['jid']
+ async_dir = module.params['_async_dir']
+
+ # setup logging directory
+ logdir = os.path.expanduser(async_dir)
+ log_path = os.path.join(logdir, jid)
+
+ if not os.path.exists(log_path):
+ module.fail_json(msg="could not find job", ansible_job_id=jid, started=1, finished=1)
+
+ if mode == 'cleanup':
+ os.unlink(log_path)
+ module.exit_json(ansible_job_id=jid, erased=log_path)
+
+ # NOT in cleanup mode, assume regular status mode
+ # no remote kill mode currently exists, but probably should
+ # consider log_path + ".pid" file and also unlink that above
+
+ data = None
+ try:
+ with open(log_path) as f:
+ data = json.loads(f.read())
+ except Exception:
+ if not data:
+ # file not written yet? That means it is running
+ module.exit_json(results_file=log_path, ansible_job_id=jid, started=1, finished=0)
+ else:
+ module.fail_json(ansible_job_id=jid, results_file=log_path,
+ msg="Could not parse job output: %s" % data, started=1, finished=1)
+
+ if 'started' not in data:
+ data['finished'] = 1
+ data['ansible_job_id'] = jid
+ elif 'finished' not in data:
+ data['finished'] = 0
+
+ # Fix error: TypeError: exit_json() keywords must be strings
+ data = dict([(to_native(k), v) for k, v in iteritems(data)])
+
+ module.exit_json(**data)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/async_wrapper.py b/lib/ansible/modules/async_wrapper.py
new file mode 100644
index 00000000..640e74cf
--- /dev/null
+++ b/lib/ansible/modules/async_wrapper.py
@@ -0,0 +1,351 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+import errno
+import json
+import shlex
+import shutil
+import os
+import subprocess
+import sys
+import traceback
+import signal
+import time
+import syslog
+import multiprocessing
+
+from ansible.module_utils._text import to_text
+
+PY3 = sys.version_info[0] == 3
+
+syslog.openlog('ansible-%s' % os.path.basename(__file__))
+syslog.syslog(syslog.LOG_NOTICE, 'Invoked with %s' % " ".join(sys.argv[1:]))
+
+# pipe for communication between forked process and parent
+ipc_watcher, ipc_notifier = multiprocessing.Pipe()
+
+
+def notice(msg):
+ syslog.syslog(syslog.LOG_NOTICE, msg)
+
+
+def daemonize_self():
+ # daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
+ try:
+ pid = os.fork()
+ if pid > 0:
+ # exit first parent
+ sys.exit(0)
+ except OSError:
+ e = sys.exc_info()[1]
+ sys.exit("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
+
+ # decouple from parent environment (does not chdir / to keep the directory context the same as for non async tasks)
+ os.setsid()
+ os.umask(int('022', 8))
+
+ # do second fork
+ try:
+ pid = os.fork()
+ if pid > 0:
+ # print "Daemon PID %d" % pid
+ sys.exit(0)
+ except OSError:
+ e = sys.exc_info()[1]
+ sys.exit("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
+
+ dev_null = open('/dev/null', 'w')
+ os.dup2(dev_null.fileno(), sys.stdin.fileno())
+ os.dup2(dev_null.fileno(), sys.stdout.fileno())
+ os.dup2(dev_null.fileno(), sys.stderr.fileno())
+
+
+# NB: this function copied from module_utils/json_utils.py. Ensure any changes are propagated there.
+# FUTURE: AnsibleModule-ify this module so it's Ansiballz-compatible and can use the module_utils copy of this function.
+def _filter_non_json_lines(data):
+ '''
+ Used to filter unrelated output around module JSON output, like messages from
+ tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
+
+ Filters leading lines before first line-starting occurrence of '{' or '[', and filter all
+ trailing lines after matching close character (working from the bottom of output).
+ '''
+ warnings = []
+
+ # Filter initial junk
+ lines = data.splitlines()
+
+ for start, line in enumerate(lines):
+ line = line.strip()
+ if line.startswith(u'{'):
+ endchar = u'}'
+ break
+ elif line.startswith(u'['):
+ endchar = u']'
+ break
+ else:
+ raise ValueError('No start of json char found')
+
+ # Filter trailing junk
+ lines = lines[start:]
+
+ for reverse_end_offset, line in enumerate(reversed(lines)):
+ if line.strip().endswith(endchar):
+ break
+ else:
+ raise ValueError('No end of json char found')
+
+ if reverse_end_offset > 0:
+ # Trailing junk is uncommon and can point to things the user might
+ # want to change. So print a warning if we find any
+ trailing_junk = lines[len(lines) - reverse_end_offset:]
+ warnings.append('Module invocation had junk after the JSON data: %s' % '\n'.join(trailing_junk))
+
+ lines = lines[:(len(lines) - reverse_end_offset)]
+
+ return ('\n'.join(lines), warnings)
+
+
+def _get_interpreter(module_path):
+ module_fd = open(module_path, 'rb')
+ try:
+ head = module_fd.read(1024)
+ if head[0:2] != '#!':
+ return None
+ return head[2:head.index('\n')].strip().split(' ')
+ finally:
+ module_fd.close()
+
+
+def _make_temp_dir(path):
+ # TODO: Add checks for permissions on path.
+ try:
+ os.makedirs(path)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+
+def _run_module(wrapped_cmd, jid, job_path):
+
+ tmp_job_path = job_path + ".tmp"
+ jobfile = open(tmp_job_path, "w")
+ jobfile.write(json.dumps({"started": 1, "finished": 0, "ansible_job_id": jid}))
+ jobfile.close()
+ os.rename(tmp_job_path, job_path)
+ jobfile = open(tmp_job_path, "w")
+ result = {}
+
+ # signal grandchild process started and isolated from being terminated
+ # by the connection being closed sending a signal to the job group
+ ipc_notifier.send(True)
+ ipc_notifier.close()
+
+ outdata = ''
+ filtered_outdata = ''
+ stderr = ''
+ try:
+ cmd = shlex.split(wrapped_cmd)
+ # call the module interpreter directly (for non-binary modules)
+ # this permits use of a script for an interpreter on non-Linux platforms
+ interpreter = _get_interpreter(cmd[0])
+ if interpreter:
+ cmd = interpreter + cmd
+ script = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+
+ (outdata, stderr) = script.communicate()
+ if PY3:
+ outdata = outdata.decode('utf-8', 'surrogateescape')
+ stderr = stderr.decode('utf-8', 'surrogateescape')
+
+ (filtered_outdata, json_warnings) = _filter_non_json_lines(outdata)
+
+ result = json.loads(filtered_outdata)
+
+ if json_warnings:
+ # merge JSON junk warnings with any existing module warnings
+ module_warnings = result.get('warnings', [])
+ if not isinstance(module_warnings, list):
+ module_warnings = [module_warnings]
+ module_warnings.extend(json_warnings)
+ result['warnings'] = module_warnings
+
+ if stderr:
+ result['stderr'] = stderr
+ jobfile.write(json.dumps(result))
+
+ except (OSError, IOError):
+ e = sys.exc_info()[1]
+ result = {
+ "failed": 1,
+ "cmd": wrapped_cmd,
+ "msg": to_text(e),
+ "outdata": outdata, # temporary notice only
+ "stderr": stderr
+ }
+ result['ansible_job_id'] = jid
+ jobfile.write(json.dumps(result))
+
+ except (ValueError, Exception):
+ result = {
+ "failed": 1,
+ "cmd": wrapped_cmd,
+ "data": outdata, # temporary notice only
+ "stderr": stderr,
+ "msg": traceback.format_exc()
+ }
+ result['ansible_job_id'] = jid
+ jobfile.write(json.dumps(result))
+
+ jobfile.close()
+ os.rename(tmp_job_path, job_path)
+
+
+def main():
+ if len(sys.argv) < 5:
+ print(json.dumps({
+ "failed": True,
+ "msg": "usage: async_wrapper <jid> <time_limit> <modulescript> <argsfile> [-preserve_tmp] "
+ "Humans, do not call directly!"
+ }))
+ sys.exit(1)
+
+ jid = "%s.%d" % (sys.argv[1], os.getpid())
+ time_limit = sys.argv[2]
+ wrapped_module = sys.argv[3]
+ argsfile = sys.argv[4]
+ if '-tmp-' not in os.path.dirname(wrapped_module):
+ preserve_tmp = True
+ elif len(sys.argv) > 5:
+ preserve_tmp = sys.argv[5] == '-preserve_tmp'
+ else:
+ preserve_tmp = False
+ # consider underscore as no argsfile so we can support passing of additional positional parameters
+ if argsfile != '_':
+ cmd = "%s %s" % (wrapped_module, argsfile)
+ else:
+ cmd = wrapped_module
+ step = 5
+
+ async_dir = os.environ.get('ANSIBLE_ASYNC_DIR', '~/.ansible_async')
+
+ # setup job output directory
+ jobdir = os.path.expanduser(async_dir)
+ job_path = os.path.join(jobdir, jid)
+
+ try:
+ _make_temp_dir(jobdir)
+ except Exception as e:
+ print(json.dumps({
+ "failed": 1,
+ "msg": "could not create: %s - %s" % (jobdir, to_text(e)),
+ "exception": to_text(traceback.format_exc()),
+ }))
+ sys.exit(1)
+
+ # immediately exit this process, leaving an orphaned process
+ # running which immediately forks a supervisory timing process
+
+ try:
+ pid = os.fork()
+ if pid:
+ # Notify the overlord that the async process started
+
+ # we need to not return immediately such that the launched command has an attempt
+ # to initialize PRIOR to ansible trying to clean up the launch directory (and argsfile)
+ # this probably could be done with some IPC later. Modules should always read
+ # the argsfile at the very first start of their execution anyway
+
+ # close off notifier handle in grandparent, probably unnecessary as
+ # this process doesn't hang around long enough
+ ipc_notifier.close()
+
+ # allow waiting up to 2.5 seconds in total should be long enough for worst
+ # loaded environment in practice.
+ retries = 25
+ while retries > 0:
+ if ipc_watcher.poll(0.1):
+ break
+ else:
+ retries = retries - 1
+ continue
+
+ notice("Return async_wrapper task started.")
+ print(json.dumps({"started": 1, "finished": 0, "ansible_job_id": jid, "results_file": job_path,
+ "_ansible_suppress_tmpdir_delete": not preserve_tmp}))
+ sys.stdout.flush()
+ sys.exit(0)
+ else:
+ # The actual wrapper process
+
+ # close off the receiving end of the pipe from child process
+ ipc_watcher.close()
+
+ # Daemonize, so we keep on running
+ daemonize_self()
+
+ # we are now daemonized, create a supervisory process
+ notice("Starting module and watcher")
+
+ sub_pid = os.fork()
+ if sub_pid:
+ # close off inherited pipe handles
+ ipc_watcher.close()
+ ipc_notifier.close()
+
+ # the parent stops the process after the time limit
+ remaining = int(time_limit)
+
+ # set the child process group id to kill all children
+ os.setpgid(sub_pid, sub_pid)
+
+ notice("Start watching %s (%s)" % (sub_pid, remaining))
+ time.sleep(step)
+ while os.waitpid(sub_pid, os.WNOHANG) == (0, 0):
+ notice("%s still running (%s)" % (sub_pid, remaining))
+ time.sleep(step)
+ remaining = remaining - step
+ if remaining <= 0:
+ notice("Now killing %s" % (sub_pid))
+ os.killpg(sub_pid, signal.SIGKILL)
+ notice("Sent kill to group %s " % sub_pid)
+ time.sleep(1)
+ if not preserve_tmp:
+ shutil.rmtree(os.path.dirname(wrapped_module), True)
+ sys.exit(0)
+ notice("Done in kid B.")
+ if not preserve_tmp:
+ shutil.rmtree(os.path.dirname(wrapped_module), True)
+ sys.exit(0)
+ else:
+ # the child process runs the actual module
+ notice("Start module (%s)" % os.getpid())
+ _run_module(cmd, jid, job_path)
+ notice("Module complete (%s)" % os.getpid())
+ sys.exit(0)
+
+ except SystemExit:
+ # On python2.4, SystemExit is a subclass of Exception.
+ # This block makes python2.4 behave the same as python2.5+
+ raise
+
+ except Exception:
+ e = sys.exc_info()[1]
+ notice("error: %s" % e)
+ print(json.dumps({
+ "failed": True,
+ "msg": "FATAL ERROR: %s" % e
+ }))
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/blockinfile.py b/lib/ansible/modules/blockinfile.py
new file mode 100644
index 00000000..2f80a65e
--- /dev/null
+++ b/lib/ansible/modules/blockinfile.py
@@ -0,0 +1,352 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, 2015 YAEGASHI Takeshi <yaegashi@debian.org>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: blockinfile
+short_description: Insert/update/remove a text block surrounded by marker lines
+version_added: '2.0'
+description:
+- This module will insert/update/remove a block of multi-line text surrounded by customizable marker lines.
+author:
+- Yaegashi Takeshi (@yaegashi)
+options:
+ path:
+ description:
+ - The file to modify.
+ - Before Ansible 2.3 this option was only usable as I(dest), I(destfile) and I(name).
+ type: path
+ required: yes
+ aliases: [ dest, destfile, name ]
+ state:
+ description:
+ - Whether the block should be there or not.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ marker:
+ description:
+ - The marker line template.
+ - C({mark}) will be replaced with the values in C(marker_begin) (default="BEGIN") and C(marker_end) (default="END").
+ - Using a custom marker without the C({mark}) variable may result in the block being repeatedly inserted on subsequent playbook runs.
+ type: str
+ default: '# {mark} ANSIBLE MANAGED BLOCK'
+ block:
+ description:
+ - The text to insert inside the marker lines.
+ - If it is missing or an empty string, the block will be removed as if C(state) were specified to C(absent).
+ type: str
+ default: ''
+ aliases: [ content ]
+ insertafter:
+ description:
+ - If specified and no begin/ending C(marker) lines are found, the block will be inserted after the last match of specified regular expression.
+ - A special value is available; C(EOF) for inserting the block at the end of the file.
+ - If specified regular expression has no matches, C(EOF) will be used instead.
+ type: str
+ choices: [ EOF, '*regex*' ]
+ default: EOF
+ insertbefore:
+ description:
+ - If specified and no begin/ending C(marker) lines are found, the block will be inserted before the last match of specified regular expression.
+ - A special value is available; C(BOF) for inserting the block at the beginning of the file.
+ - If specified regular expression has no matches, the block will be inserted at the end of the file.
+ type: str
+ choices: [ BOF, '*regex*' ]
+ create:
+ description:
+ - Create a new file if it does not exist.
+ type: bool
+ default: no
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can
+ get the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+ marker_begin:
+ description:
+ - This will be inserted at C({mark}) in the opening ansible block marker.
+ type: str
+ default: BEGIN
+ version_added: '2.5'
+ marker_end:
+ required: false
+ description:
+ - This will be inserted at C({mark}) in the closing ansible block marker.
+ type: str
+ default: END
+ version_added: '2.5'
+notes:
+ - This module supports check mode.
+ - When using 'with_*' loops be aware that if you do not set a unique mark the block will be overwritten on each iteration.
+ - As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well.
+ - Option I(follow) has been removed in Ansible 2.5, because this module modifies the contents of the file so I(follow=no) doesn't make sense.
+ - When more then one block should be handled in one file you must change the I(marker) per task.
+extends_documentation_fragment:
+- files
+- validate
+'''
+
+EXAMPLES = r'''
+# Before Ansible 2.3, option 'dest' or 'name' was used instead of 'path'
+- name: Insert/Update "Match User" configuration block in /etc/ssh/sshd_config
+ blockinfile:
+ path: /etc/ssh/sshd_config
+ block: |
+ Match User ansible-agent
+ PasswordAuthentication no
+
+- name: Insert/Update eth0 configuration stanza in /etc/network/interfaces
+ (it might be better to copy files into /etc/network/interfaces.d/)
+ blockinfile:
+ path: /etc/network/interfaces
+ block: |
+ iface eth0 inet static
+ address 192.0.2.23
+ netmask 255.255.255.0
+
+- name: Insert/Update configuration using a local file and validate it
+ blockinfile:
+ block: "{{ lookup('file', './local/sshd_config') }}"
+ path: /etc/ssh/sshd_config
+ backup: yes
+ validate: /usr/sbin/sshd -T -f %s
+
+- name: Insert/Update HTML surrounded by custom markers after <body> line
+ blockinfile:
+ path: /var/www/html/index.html
+ marker: "<!-- {mark} ANSIBLE MANAGED BLOCK -->"
+ insertafter: "<body>"
+ block: |
+ <h1>Welcome to {{ ansible_hostname }}</h1>
+ <p>Last updated on {{ ansible_date_time.iso8601 }}</p>
+
+- name: Remove HTML as well as surrounding markers
+ blockinfile:
+ path: /var/www/html/index.html
+ marker: "<!-- {mark} ANSIBLE MANAGED BLOCK -->"
+ block: ""
+
+- name: Add mappings to /etc/hosts
+ blockinfile:
+ path: /etc/hosts
+ block: |
+ {{ item.ip }} {{ item.name }}
+ marker: "# {mark} ANSIBLE MANAGED BLOCK {{ item.name }}"
+ loop:
+ - { name: host1, ip: 10.10.1.10 }
+ - { name: host2, ip: 10.10.1.11 }
+ - { name: host3, ip: 10.10.1.12 }
+'''
+
+import re
+import os
+import tempfile
+from ansible.module_utils.six import b
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes
+
+
+def write_changes(module, contents, path):
+
+ tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir)
+ f = os.fdopen(tmpfd, 'wb')
+ f.write(contents)
+ f.close()
+
+ validate = module.params.get('validate', None)
+ valid = not validate
+ if validate:
+ if "%s" not in validate:
+ module.fail_json(msg="validate must contain %%s: %s" % (validate))
+ (rc, out, err) = module.run_command(validate % tmpfile)
+ valid = rc == 0
+ if rc != 0:
+ module.fail_json(msg='failed to validate: '
+ 'rc:%s error:%s' % (rc, err))
+ if valid:
+ module.atomic_move(tmpfile, path, unsafe_writes=module.params['unsafe_writes'])
+
+
+def check_file_attrs(module, changed, message, diff):
+
+ file_args = module.load_file_common_arguments(module.params)
+ if module.set_file_attributes_if_different(file_args, False, diff=diff):
+
+ if changed:
+ message += " and "
+ changed = True
+ message += "ownership, perms or SE linux context changed"
+
+ return message, changed
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True, aliases=['dest', 'destfile', 'name']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ marker=dict(type='str', default='# {mark} ANSIBLE MANAGED BLOCK'),
+ block=dict(type='str', default='', aliases=['content']),
+ insertafter=dict(type='str'),
+ insertbefore=dict(type='str'),
+ create=dict(type='bool', default=False),
+ backup=dict(type='bool', default=False),
+ validate=dict(type='str'),
+ marker_begin=dict(type='str', default='BEGIN'),
+ marker_end=dict(type='str', default='END'),
+ ),
+ mutually_exclusive=[['insertbefore', 'insertafter']],
+ add_file_common_args=True,
+ supports_check_mode=True
+ )
+ params = module.params
+ path = params['path']
+
+ if os.path.isdir(path):
+ module.fail_json(rc=256,
+ msg='Path %s is a directory !' % path)
+
+ path_exists = os.path.exists(path)
+ if not path_exists:
+ if not module.boolean(params['create']):
+ module.fail_json(rc=257,
+ msg='Path %s does not exist !' % path)
+ destpath = os.path.dirname(path)
+ if not os.path.exists(destpath) and not module.check_mode:
+ try:
+ os.makedirs(destpath)
+ except Exception as e:
+ module.fail_json(msg='Error creating %s Error code: %s Error description: %s' % (destpath, e[0], e[1]))
+ original = None
+ lines = []
+ else:
+ f = open(path, 'rb')
+ original = f.read()
+ f.close()
+ lines = original.splitlines(True)
+
+ diff = {'before': '',
+ 'after': '',
+ 'before_header': '%s (content)' % path,
+ 'after_header': '%s (content)' % path}
+
+ if module._diff and original:
+ diff['before'] = original
+
+ insertbefore = params['insertbefore']
+ insertafter = params['insertafter']
+ block = to_bytes(params['block'])
+ marker = to_bytes(params['marker'])
+ present = params['state'] == 'present'
+
+ if not present and not path_exists:
+ module.exit_json(changed=False, msg="File %s not present" % path)
+
+ if insertbefore is None and insertafter is None:
+ insertafter = 'EOF'
+
+ if insertafter not in (None, 'EOF'):
+ insertre = re.compile(to_bytes(insertafter, errors='surrogate_or_strict'))
+ elif insertbefore not in (None, 'BOF'):
+ insertre = re.compile(to_bytes(insertbefore, errors='surrogate_or_strict'))
+ else:
+ insertre = None
+
+ marker0 = re.sub(b(r'{mark}'), b(params['marker_begin']), marker) + b(os.linesep)
+ marker1 = re.sub(b(r'{mark}'), b(params['marker_end']), marker) + b(os.linesep)
+ if present and block:
+ # Escape sequences like '\n' need to be handled in Ansible 1.x
+ if module.ansible_version.startswith('1.'):
+ block = re.sub('', block, '')
+ if not block.endswith(b(os.linesep)):
+ block += b(os.linesep)
+ blocklines = [marker0] + block.splitlines(True) + [marker1]
+ else:
+ blocklines = []
+
+ n0 = n1 = None
+ for i, line in enumerate(lines):
+ if line == marker0:
+ n0 = i
+ if line == marker1:
+ n1 = i
+
+ if None in (n0, n1):
+ n0 = None
+ if insertre is not None:
+ for i, line in enumerate(lines):
+ if insertre.search(line):
+ n0 = i
+ if n0 is None:
+ n0 = len(lines)
+ elif insertafter is not None:
+ n0 += 1
+ elif insertbefore is not None:
+ n0 = 0 # insertbefore=BOF
+ else:
+ n0 = len(lines) # insertafter=EOF
+ elif n0 < n1:
+ lines[n0:n1 + 1] = []
+ else:
+ lines[n1:n0 + 1] = []
+ n0 = n1
+
+ # Ensure there is a line separator before the block of lines to be inserted
+ if n0 > 0:
+ if not lines[n0 - 1].endswith(b(os.linesep)):
+ lines[n0 - 1] += b(os.linesep)
+
+ lines[n0:n0] = blocklines
+ if lines:
+ result = b''.join(lines)
+ else:
+ result = b''
+
+ if module._diff:
+ diff['after'] = result
+
+ if original == result:
+ msg = ''
+ changed = False
+ elif original is None:
+ msg = 'File created'
+ changed = True
+ elif not blocklines:
+ msg = 'Block removed'
+ changed = True
+ else:
+ msg = 'Block inserted'
+ changed = True
+
+ if changed and not module.check_mode:
+ if module.boolean(params['backup']) and path_exists:
+ module.backup_local(path)
+ # We should always follow symlinks so that we change the real file
+ real_path = os.path.realpath(params['path'])
+ write_changes(module, result, real_path)
+
+ if module.check_mode and not path_exists:
+ module.exit_json(changed=changed, msg=msg, diff=diff)
+
+ attr_diff = {}
+ msg, changed = check_file_attrs(module, changed, msg, attr_diff)
+
+ attr_diff['before_header'] = '%s (file attributes)' % path
+ attr_diff['after_header'] = '%s (file attributes)' % path
+
+ difflist = [diff, attr_diff]
+ module.exit_json(changed=changed, msg=msg, diff=difflist)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/command.py b/lib/ansible/modules/command.py
new file mode 100644
index 00000000..dcecbe64
--- /dev/null
+++ b/lib/ansible/modules/command.py
@@ -0,0 +1,373 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
+# Copyright: (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: command
+short_description: Execute commands on targets
+version_added: historical
+description:
+ - The C(command) module takes the command name followed by a list of space-delimited arguments.
+ - The given command will be executed on all selected nodes.
+ - The command(s) will not be
+ processed through the shell, so variables like C($HOSTNAME) and operations
+ like C("*"), C("<"), C(">"), C("|"), C(";") and C("&") will not work.
+ Use the M(ansible.builtin.shell) module if you need these features.
+ - To create C(command) tasks that are easier to read than the ones using space-delimited
+ arguments, pass parameters using the C(args) L(task keyword,../reference_appendices/playbooks_keywords.html#task)
+ or use C(cmd) parameter.
+ - Either a free form command or C(cmd) parameter is required, see the examples.
+ - For Windows targets, use the M(ansible.windows.win_command) module instead.
+options:
+ free_form:
+ description:
+ - The command module takes a free form string as a command to run.
+ - There is no actual parameter named 'free form'.
+ cmd:
+ type: str
+ description:
+ - The command to run.
+ argv:
+ type: list
+ description:
+ - Passes the command as a list rather than a string.
+ - Use C(argv) to avoid quoting values that would otherwise be interpreted incorrectly (for example "user name").
+ - Only the string (free form) or the list (argv) form can be provided, not both. One or the other must be provided.
+ version_added: "2.6"
+ creates:
+ type: path
+ description:
+ - A filename or (since 2.0) glob pattern. If a matching file already exists, this step B(won't) be run.
+ removes:
+ type: path
+ description:
+ - A filename or (since 2.0) glob pattern. If a matching file exists, this step B(will) be run.
+ version_added: "0.8"
+ chdir:
+ type: path
+ description:
+ - Change into this directory before running the command.
+ version_added: "0.6"
+ warn:
+ description:
+ - Enable or disable task warnings.
+ type: bool
+ default: yes
+ version_added: "1.8"
+ stdin:
+ description:
+ - Set the stdin of the command directly to the specified value.
+ version_added: "2.4"
+ stdin_add_newline:
+ type: bool
+ default: yes
+ description:
+ - If set to C(yes), append a newline to stdin data.
+ version_added: "2.8"
+ strip_empty_ends:
+ description:
+ - Strip empty lines from the end of stdout/stderr in result.
+ version_added: "2.8"
+ type: bool
+ default: yes
+notes:
+ - If you want to run a command through the shell (say you are using C(<), C(>), C(|), etc), you actually want the M(ansible.builtin.shell) module instead.
+ Parsing shell metacharacters can lead to unexpected commands being executed if quoting is not done correctly so it is more secure to
+ use the C(command) module when possible.
+ - " C(creates), C(removes), and C(chdir) can be specified after the command.
+ For instance, if you only want to run a command if a certain file does not exist, use this."
+ - Check mode is supported when passing C(creates) or C(removes). If running in check mode and either of these are specified, the module will
+ check for the existence of the file and report the correct changed status. If these are not supplied, the task will be skipped.
+ - The C(executable) parameter is removed since version 2.4. If you have a need for this parameter, use the M(ansible.builtin.shell) module instead.
+ - For Windows targets, use the M(ansible.windows.win_command) module instead.
+ - For rebooting systems, use the M(ansible.builtin.reboot) or M(ansible.windows.win_reboot) module.
+seealso:
+- module: ansible.builtin.raw
+- module: ansible.builtin.script
+- module: ansible.builtin.shell
+- module: ansible.windows.win_command
+author:
+ - Ansible Core Team
+ - Michael DeHaan
+'''
+
+EXAMPLES = r'''
+- name: Return motd to registered var
+ command: cat /etc/motd
+ register: mymotd
+
+# free-form (string) arguments, all arguments on one line
+- name: Run command if /path/to/database does not exist (without 'args')
+ command: /usr/bin/make_database.sh db_user db_name creates=/path/to/database
+
+# free-form (string) arguments, some arguments on separate lines with the 'args' keyword
+# 'args' is a task keyword, passed at the same level as the module
+- name: Run command if /path/to/database does not exist (with 'args' keyword)
+ command: /usr/bin/make_database.sh db_user db_name
+ args:
+ creates: /path/to/database
+
+# 'cmd' is module parameter
+- name: Run command if /path/to/database does not exist (with 'cmd' parameter)
+ command:
+ cmd: /usr/bin/make_database.sh db_user db_name
+ creates: /path/to/database
+
+- name: Change the working directory to somedir/ and run the command as db_owner if /path/to/database does not exist
+ command: /usr/bin/make_database.sh db_user db_name
+ become: yes
+ become_user: db_owner
+ args:
+ chdir: somedir/
+ creates: /path/to/database
+
+# argv (list) arguments, each argument on a separate line, 'args' keyword not necessary
+# 'argv' is a parameter, indented one level from the module
+- name: Use 'argv' to send a command as a list - leave 'command' empty
+ command:
+ argv:
+ - /usr/bin/make_database.sh
+ - Username with whitespace
+ - dbname with whitespace
+ creates: /path/to/database
+
+- name: Safely use templated variable to run command. Always use the quote filter to avoid injection issues
+ command: cat {{ myfile|quote }}
+ register: myoutput
+'''
+
+RETURN = r'''
+msg:
+ description: changed
+ returned: always
+ type: bool
+ sample: True
+start:
+ description: The command execution start time
+ returned: always
+ type: str
+ sample: '2017-09-29 22:03:48.083128'
+end:
+ description: The command execution end time
+ returned: always
+ type: str
+ sample: '2017-09-29 22:03:48.084657'
+delta:
+ description: The command execution delta time
+ returned: always
+ type: str
+ sample: '0:00:00.001529'
+stdout:
+ description: The command standard output
+ returned: always
+ type: str
+ sample: 'Clustering node rabbit@slave1 with rabbit@master …'
+stderr:
+ description: The command standard error
+ returned: always
+ type: str
+ sample: 'ls cannot access foo: No such file or directory'
+cmd:
+ description: The command executed by the task
+ returned: always
+ type: list
+ sample:
+ - echo
+ - hello
+rc:
+ description: The command return code (0 means success)
+ returned: always
+ type: int
+ sample: 0
+stdout_lines:
+ description: The command standard output split in lines
+ returned: always
+ type: list
+ sample: [u'Clustering node rabbit@slave1 with rabbit@master …']
+stderr_lines:
+ description: The command standard error split in lines
+ returned: always
+ type: list
+ sample: [u'ls cannot access foo: No such file or directory', u'ls …']
+'''
+
+import datetime
+import glob
+import os
+import shlex
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native, to_bytes, to_text
+from ansible.module_utils.common.collections import is_iterable
+
+
+def check_command(module, commandline):
+ arguments = {'chown': 'owner', 'chmod': 'mode', 'chgrp': 'group',
+ 'ln': 'state=link', 'mkdir': 'state=directory',
+ 'rmdir': 'state=absent', 'rm': 'state=absent', 'touch': 'state=touch'}
+ commands = {'curl': 'get_url or uri', 'wget': 'get_url or uri',
+ 'svn': 'subversion', 'service': 'service',
+ 'yum': 'yum', 'apt-get': 'apt',
+ 'tar': 'unarchive', 'unzip': 'unarchive', 'sed': 'replace, lineinfile or template',
+ 'dnf': 'dnf'}
+ become = ['sudo', 'su', 'runas']
+ if isinstance(commandline, list):
+ command = commandline[0]
+ else:
+ command = commandline.split()[0]
+ command = os.path.basename(command)
+
+ disable_suffix = "If you need to use command because {mod} is insufficient you can add" \
+ " 'warn: false' to this command task or set 'command_warnings=False' in" \
+ " ansible.cfg to get rid of this message."
+ substitutions = {'mod': None, 'cmd': command}
+
+ if command in arguments:
+ msg = "Consider using the {mod} module with {subcmd} rather than running '{cmd}'. " + disable_suffix
+ substitutions['mod'] = 'file'
+ substitutions['subcmd'] = arguments[command]
+ module.warn(msg.format(**substitutions))
+
+ if command in commands:
+ msg = "Consider using the {mod} module rather than running '{cmd}'. " + disable_suffix
+ substitutions['mod'] = commands[command]
+ module.warn(msg.format(**substitutions))
+
+ if command in become:
+ module.warn("Consider using 'become', 'become_method', and 'become_user' rather than running %s" % (command,))
+
+
+def main():
+
+ # the command module is the one ansible module that does not take key=value args
+ # hence don't copy this one if you are looking to build others!
+ module = AnsibleModule(
+ argument_spec=dict(
+ _raw_params=dict(),
+ _uses_shell=dict(type='bool', default=False),
+ argv=dict(type='list'),
+ chdir=dict(type='path'),
+ executable=dict(),
+ creates=dict(type='path'),
+ removes=dict(type='path'),
+ # The default for this really comes from the action plugin
+ warn=dict(type='bool', default=True),
+ stdin=dict(required=False),
+ stdin_add_newline=dict(type='bool', default=True),
+ strip_empty_ends=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+ shell = module.params['_uses_shell']
+ chdir = module.params['chdir']
+ executable = module.params['executable']
+ args = module.params['_raw_params']
+ argv = module.params['argv']
+ creates = module.params['creates']
+ removes = module.params['removes']
+ warn = module.params['warn']
+ stdin = module.params['stdin']
+ stdin_add_newline = module.params['stdin_add_newline']
+ strip = module.params['strip_empty_ends']
+
+ if not shell and executable:
+ module.warn("As of Ansible 2.4, the parameter 'executable' is no longer supported with the 'command' module. Not using '%s'." % executable)
+ executable = None
+
+ if (not args or args.strip() == '') and not argv:
+ module.fail_json(rc=256, msg="no command given")
+
+ if args and argv:
+ module.fail_json(rc=256, msg="only command or argv can be given, not both")
+
+ if not shell and args:
+ args = shlex.split(args)
+
+ args = args or argv
+
+ # All args must be strings
+ if is_iterable(args, include_strings=False):
+ args = [to_native(arg, errors='surrogate_or_strict', nonstring='simplerepr') for arg in args]
+
+ if chdir:
+ try:
+ chdir = to_bytes(os.path.abspath(chdir), errors='surrogate_or_strict')
+ except ValueError as e:
+ module.fail_json(msg='Unable to use supplied chdir: %s' % to_text(e))
+
+ try:
+ os.chdir(chdir)
+ except (IOError, OSError) as e:
+ module.fail_json(msg='Unable to change directory before execution: %s' % to_text(e))
+
+ if creates:
+ # do not run the command if the line contains creates=filename
+ # and the filename already exists. This allows idempotence
+ # of command executions.
+ if glob.glob(creates):
+ module.exit_json(
+ cmd=args,
+ stdout="skipped, since %s exists" % creates,
+ changed=False,
+ rc=0
+ )
+
+ if removes:
+ # do not run the command if the line contains removes=filename
+ # and the filename does not exist. This allows idempotence
+ # of command executions.
+ if not glob.glob(removes):
+ module.exit_json(
+ cmd=args,
+ stdout="skipped, since %s does not exist" % removes,
+ changed=False,
+ rc=0
+ )
+
+ if warn:
+ check_command(module, args)
+
+ startd = datetime.datetime.now()
+
+ if not module.check_mode:
+ rc, out, err = module.run_command(args, executable=executable, use_unsafe_shell=shell, encoding=None, data=stdin, binary_data=(not stdin_add_newline))
+ elif creates or removes:
+ rc = 0
+ out = err = b'Command would have run if not in check mode'
+ else:
+ module.exit_json(msg="skipped, running in check mode", skipped=True)
+
+ endd = datetime.datetime.now()
+ delta = endd - startd
+
+ if strip:
+ out = out.rstrip(b"\r\n")
+ err = err.rstrip(b"\r\n")
+
+ result = dict(
+ cmd=args,
+ stdout=out,
+ stderr=err,
+ rc=rc,
+ start=str(startd),
+ end=str(endd),
+ delta=str(delta),
+ changed=True,
+ )
+
+ if rc != 0:
+ module.fail_json(msg='non-zero return code', **result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/copy.py b/lib/ansible/modules/copy.py
new file mode 100644
index 00000000..0dddb3ff
--- /dev/null
+++ b/lib/ansible/modules/copy.py
@@ -0,0 +1,799 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: copy
+version_added: historical
+short_description: Copy files to remote locations
+description:
+ - The C(copy) module copies a file from the local or remote machine to a location on the remote machine.
+ - Use the M(ansible.builtin.fetch) module to copy files from remote locations to the local box.
+ - If you need variable interpolation in copied files, use the M(ansible.builtin.template) module.
+ Using a variable in the C(content) field will result in unpredictable output.
+ - For Windows targets, use the M(ansible.windows.win_copy) module instead.
+options:
+ src:
+ description:
+ - Local path to a file to copy to the remote server.
+ - This can be absolute or relative.
+ - If path is a directory, it is copied recursively. In this case, if path ends
+ with "/", only inside contents of that directory are copied to destination.
+ Otherwise, if it does not end with "/", the directory itself with all contents
+ is copied. This behavior is similar to the C(rsync) command line tool.
+ type: path
+ content:
+ description:
+ - When used instead of C(src), sets the contents of a file directly to the specified value.
+ - Works only when C(dest) is a file. Creates the file if it does not exist.
+ - For advanced formatting or if C(content) contains a variable, use the
+ M(ansible.builtin.template) module.
+ type: str
+ version_added: '1.1'
+ dest:
+ description:
+ - Remote absolute path where the file should be copied to.
+ - If C(src) is a directory, this must be a directory too.
+ - If C(dest) is a non-existent path and if either C(dest) ends with "/" or C(src) is a directory, C(dest) is created.
+ - If I(dest) is a relative path, the starting directory is determined by the remote host.
+ - If C(src) and C(dest) are files, the parent directory of C(dest) is not created and the task fails if it does not already exist.
+ type: path
+ required: yes
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+ version_added: '0.7'
+ force:
+ description:
+ - Influence whether the remote file must always be replaced.
+ - If C(yes), the remote file will be replaced when contents are different than the source.
+ - If C(no), the file will only be transferred if the destination does not exist.
+ - Alias C(thirsty) has been deprecated and will be removed in 2.13.
+ type: bool
+ default: yes
+ aliases: [ thirsty ]
+ version_added: '1.1'
+ mode:
+ description:
+ - The permissions of the destination file or directory.
+ - For those used to C(/usr/bin/chmod) remember that modes are actually octal numbers.
+ You must either add a leading zero so that Ansible's YAML parser knows it is an octal number
+ (like C(0644) or C(01777))or quote it (like C('644') or C('1777')) so Ansible receives a string
+ and can do its own conversion from string into number. Giving Ansible a number without following
+ one of these rules will end up with a decimal number which will have unexpected results.
+ - As of Ansible 1.8, the mode may be specified as a symbolic mode (for example, C(u+rwx) or C(u=rw,g=r,o=r)).
+ - As of Ansible 2.3, the mode may also be the special string C(preserve).
+ - C(preserve) means that the file will be given the same permissions as the source file.
+ - When doing a recursive copy, see also C(directory_mode).
+ type: path
+ directory_mode:
+ description:
+ - When doing a recursive copy set the mode for the directories.
+ - If this is not set we will use the system defaults.
+ - The mode is only set on directories which are newly created, and will not affect those that already existed.
+ type: raw
+ version_added: '1.5'
+ remote_src:
+ description:
+ - Influence whether C(src) needs to be transferred or already is present remotely.
+ - If C(no), it will search for C(src) at originating/master machine.
+ - If C(yes) it will go to the remote/target machine for the C(src).
+ - C(remote_src) supports recursive copying as of version 2.8.
+ - C(remote_src) only works with C(mode=preserve) as of version 2.6.
+ type: bool
+ default: no
+ version_added: '2.0'
+ follow:
+ description:
+ - This flag indicates that filesystem links in the destination, if they exist, should be followed.
+ type: bool
+ default: no
+ version_added: '1.8'
+ local_follow:
+ description:
+ - This flag indicates that filesystem links in the source tree, if they exist, should be followed.
+ type: bool
+ default: yes
+ version_added: '2.4'
+ checksum:
+ description:
+ - SHA1 checksum of the file being transferred.
+ - Used to validate that the copy of the file was successful.
+ - If this is not provided, ansible will use the local calculated checksum of the src file.
+ type: str
+ version_added: '2.5'
+extends_documentation_fragment:
+- decrypt
+- files
+- validate
+notes:
+- The M(ansible.builtin.copy) module recursively copy facility does not scale to lots (>hundreds) of files.
+seealso:
+- module: ansible.builtin.assemble
+- module: ansible.builtin.fetch
+- module: ansible.builtin.file
+- module: ansible.builtin.template
+- module: ansible.posix.synchronize
+- module: ansible.windows.win_copy
+author:
+- Ansible Core Team
+- Michael DeHaan
+'''
+
+EXAMPLES = r'''
+- name: Copy file with owner and permissions
+ copy:
+ src: /srv/myfiles/foo.conf
+ dest: /etc/foo.conf
+ owner: foo
+ group: foo
+ mode: '0644'
+
+- name: Copy file with owner and permission, using symbolic representation
+ copy:
+ src: /srv/myfiles/foo.conf
+ dest: /etc/foo.conf
+ owner: foo
+ group: foo
+ mode: u=rw,g=r,o=r
+
+- name: Another symbolic mode example, adding some permissions and removing others
+ copy:
+ src: /srv/myfiles/foo.conf
+ dest: /etc/foo.conf
+ owner: foo
+ group: foo
+ mode: u+rw,g-wx,o-rwx
+
+- name: Copy a new "ntp.conf" file into place, backing up the original if it differs from the copied version
+ copy:
+ src: /mine/ntp.conf
+ dest: /etc/ntp.conf
+ owner: root
+ group: root
+ mode: '0644'
+ backup: yes
+
+- name: Copy a new "sudoers" file into place, after passing validation with visudo
+ copy:
+ src: /mine/sudoers
+ dest: /etc/sudoers
+ validate: /usr/sbin/visudo -csf %s
+
+- name: Copy a "sudoers" file on the remote machine for editing
+ copy:
+ src: /etc/sudoers
+ dest: /etc/sudoers.edit
+ remote_src: yes
+ validate: /usr/sbin/visudo -csf %s
+
+- name: Copy using inline content
+ copy:
+ content: '# This file was moved to /etc/other.conf'
+ dest: /etc/mine.conf
+
+- name: If follow=yes, /path/to/file will be overwritten by contents of foo.conf
+ copy:
+ src: /etc/foo.conf
+ dest: /path/to/link # link to /path/to/file
+ follow: yes
+
+- name: If follow=no, /path/to/link will become a file and be overwritten by contents of foo.conf
+ copy:
+ src: /etc/foo.conf
+ dest: /path/to/link # link to /path/to/file
+ follow: no
+'''
+
+RETURN = r'''
+dest:
+ description: Destination file/path
+ returned: success
+ type: str
+ sample: /path/to/file.txt
+src:
+ description: Source file used for the copy on the target machine
+ returned: changed
+ type: str
+ sample: /home/httpd/.ansible/tmp/ansible-tmp-1423796390.97-147729857856000/source
+md5sum:
+ description: MD5 checksum of the file after running copy
+ returned: when supported
+ type: str
+ sample: 2a5aeecc61dc98c4d780b14b330e3282
+checksum:
+ description: SHA1 checksum of the file after running copy
+ returned: success
+ type: str
+ sample: 6e642bb8dd5c2e027bf21dd923337cbb4214f827
+backup_file:
+ description: Name of backup file created
+ returned: changed and if backup=yes
+ type: str
+ sample: /path/to/file.txt.2015-02-12@22:09~
+gid:
+ description: Group id of the file, after execution
+ returned: success
+ type: int
+ sample: 100
+group:
+ description: Group of the file, after execution
+ returned: success
+ type: str
+ sample: httpd
+owner:
+ description: Owner of the file, after execution
+ returned: success
+ type: str
+ sample: httpd
+uid:
+ description: Owner id of the file, after execution
+ returned: success
+ type: int
+ sample: 100
+mode:
+ description: Permissions of the target, after execution
+ returned: success
+ type: str
+ sample: 0644
+size:
+ description: Size of the target, after execution
+ returned: success
+ type: int
+ sample: 1220
+state:
+ description: State of the target, after execution
+ returned: success
+ type: str
+ sample: file
+'''
+
+import errno
+import filecmp
+import grp
+import os
+import os.path
+import platform
+import pwd
+import shutil
+import stat
+import tempfile
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.module_utils.six import PY3
+
+
+# The AnsibleModule object
+module = None
+
+
+class AnsibleModuleError(Exception):
+ def __init__(self, results):
+ self.results = results
+
+
+# Once we get run_command moved into common, we can move this into a common/files module. We can't
+# until then because of the module.run_command() method. We may need to move it into
+# basic::AnsibleModule() until then but if so, make it a private function so that we don't have to
+# keep it for backwards compatibility later.
+def clear_facls(path):
+ setfacl = get_bin_path('setfacl')
+ # FIXME "setfacl -b" is available on Linux and FreeBSD. There is "setfacl -D e" on z/OS. Others?
+ acl_command = [setfacl, '-b', path]
+ b_acl_command = [to_bytes(x) for x in acl_command]
+ rc, out, err = module.run_command(b_acl_command, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
+ if rc != 0:
+ raise RuntimeError('Error running "{0}": stdout: "{1}"; stderr: "{2}"'.format(' '.join(b_acl_command), out, err))
+
+
+def split_pre_existing_dir(dirname):
+ '''
+ Return the first pre-existing directory and a list of the new directories that will be created.
+ '''
+ head, tail = os.path.split(dirname)
+ b_head = to_bytes(head, errors='surrogate_or_strict')
+ if head == '':
+ return ('.', [tail])
+ if not os.path.exists(b_head):
+ if head == '/':
+ raise AnsibleModuleError(results={'msg': "The '/' directory doesn't exist on this machine."})
+ (pre_existing_dir, new_directory_list) = split_pre_existing_dir(head)
+ else:
+ return (head, [tail])
+ new_directory_list.append(tail)
+ return (pre_existing_dir, new_directory_list)
+
+
+def adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed):
+ '''
+ Walk the new directories list and make sure that permissions are as we would expect
+ '''
+
+ if new_directory_list:
+ working_dir = os.path.join(pre_existing_dir, new_directory_list.pop(0))
+ directory_args['path'] = working_dir
+ changed = module.set_fs_attributes_if_different(directory_args, changed)
+ changed = adjust_recursive_directory_permissions(working_dir, new_directory_list, module, directory_args, changed)
+ return changed
+
+
+def chown_recursive(path, module):
+ changed = False
+ owner = module.params['owner']
+ group = module.params['group']
+
+ if owner is not None:
+ if not module.check_mode:
+ for dirpath, dirnames, filenames in os.walk(path):
+ owner_changed = module.set_owner_if_different(dirpath, owner, False)
+ if owner_changed is True:
+ changed = owner_changed
+ for dir in [os.path.join(dirpath, d) for d in dirnames]:
+ owner_changed = module.set_owner_if_different(dir, owner, False)
+ if owner_changed is True:
+ changed = owner_changed
+ for file in [os.path.join(dirpath, f) for f in filenames]:
+ owner_changed = module.set_owner_if_different(file, owner, False)
+ if owner_changed is True:
+ changed = owner_changed
+ else:
+ uid = pwd.getpwnam(owner).pw_uid
+ for dirpath, dirnames, filenames in os.walk(path):
+ owner_changed = (os.stat(dirpath).st_uid != uid)
+ if owner_changed is True:
+ changed = owner_changed
+ for dir in [os.path.join(dirpath, d) for d in dirnames]:
+ owner_changed = (os.stat(dir).st_uid != uid)
+ if owner_changed is True:
+ changed = owner_changed
+ for file in [os.path.join(dirpath, f) for f in filenames]:
+ owner_changed = (os.stat(file).st_uid != uid)
+ if owner_changed is True:
+ changed = owner_changed
+ if group is not None:
+ if not module.check_mode:
+ for dirpath, dirnames, filenames in os.walk(path):
+ group_changed = module.set_group_if_different(dirpath, group, False)
+ if group_changed is True:
+ changed = group_changed
+ for dir in [os.path.join(dirpath, d) for d in dirnames]:
+ group_changed = module.set_group_if_different(dir, group, False)
+ if group_changed is True:
+ changed = group_changed
+ for file in [os.path.join(dirpath, f) for f in filenames]:
+ group_changed = module.set_group_if_different(file, group, False)
+ if group_changed is True:
+ changed = group_changed
+ else:
+ gid = grp.getgrnam(group).gr_gid
+ for dirpath, dirnames, filenames in os.walk(path):
+ group_changed = (os.stat(dirpath).st_gid != gid)
+ if group_changed is True:
+ changed = group_changed
+ for dir in [os.path.join(dirpath, d) for d in dirnames]:
+ group_changed = (os.stat(dir).st_gid != gid)
+ if group_changed is True:
+ changed = group_changed
+ for file in [os.path.join(dirpath, f) for f in filenames]:
+ group_changed = (os.stat(file).st_gid != gid)
+ if group_changed is True:
+ changed = group_changed
+
+ return changed
+
+
+def copy_diff_files(src, dest, module):
+ """Copy files that are different between `src` directory and `dest` directory."""
+
+ changed = False
+ owner = module.params['owner']
+ group = module.params['group']
+ local_follow = module.params['local_follow']
+ diff_files = filecmp.dircmp(src, dest).diff_files
+ if len(diff_files):
+ changed = True
+ if not module.check_mode:
+ for item in diff_files:
+ src_item_path = os.path.join(src, item)
+ dest_item_path = os.path.join(dest, item)
+ b_src_item_path = to_bytes(src_item_path, errors='surrogate_or_strict')
+ b_dest_item_path = to_bytes(dest_item_path, errors='surrogate_or_strict')
+ if os.path.islink(b_src_item_path) and local_follow is False:
+ linkto = os.readlink(b_src_item_path)
+ os.symlink(linkto, b_dest_item_path)
+ else:
+ shutil.copyfile(b_src_item_path, b_dest_item_path)
+ shutil.copymode(b_src_item_path, b_dest_item_path)
+
+ if owner is not None:
+ module.set_owner_if_different(b_dest_item_path, owner, False)
+ if group is not None:
+ module.set_group_if_different(b_dest_item_path, group, False)
+ changed = True
+ return changed
+
+
+def copy_left_only(src, dest, module):
+ """Copy files that exist in `src` directory only to the `dest` directory."""
+
+ changed = False
+ owner = module.params['owner']
+ group = module.params['group']
+ local_follow = module.params['local_follow']
+ left_only = filecmp.dircmp(src, dest).left_only
+ if len(left_only):
+ changed = True
+ if not module.check_mode:
+ for item in left_only:
+ src_item_path = os.path.join(src, item)
+ dest_item_path = os.path.join(dest, item)
+ b_src_item_path = to_bytes(src_item_path, errors='surrogate_or_strict')
+ b_dest_item_path = to_bytes(dest_item_path, errors='surrogate_or_strict')
+
+ if os.path.islink(b_src_item_path) and os.path.isdir(b_src_item_path) and local_follow is True:
+ shutil.copytree(b_src_item_path, b_dest_item_path, symlinks=not(local_follow))
+ chown_recursive(b_dest_item_path, module)
+
+ if os.path.islink(b_src_item_path) and os.path.isdir(b_src_item_path) and local_follow is False:
+ linkto = os.readlink(b_src_item_path)
+ os.symlink(linkto, b_dest_item_path)
+
+ if os.path.islink(b_src_item_path) and os.path.isfile(b_src_item_path) and local_follow is True:
+ shutil.copyfile(b_src_item_path, b_dest_item_path)
+ if owner is not None:
+ module.set_owner_if_different(b_dest_item_path, owner, False)
+ if group is not None:
+ module.set_group_if_different(b_dest_item_path, group, False)
+
+ if os.path.islink(b_src_item_path) and os.path.isfile(b_src_item_path) and local_follow is False:
+ linkto = os.readlink(b_src_item_path)
+ os.symlink(linkto, b_dest_item_path)
+
+ if not os.path.islink(b_src_item_path) and os.path.isfile(b_src_item_path):
+ shutil.copyfile(b_src_item_path, b_dest_item_path)
+ shutil.copymode(b_src_item_path, b_dest_item_path)
+
+ if owner is not None:
+ module.set_owner_if_different(b_dest_item_path, owner, False)
+ if group is not None:
+ module.set_group_if_different(b_dest_item_path, group, False)
+
+ if not os.path.islink(b_src_item_path) and os.path.isdir(b_src_item_path):
+ shutil.copytree(b_src_item_path, b_dest_item_path, symlinks=not(local_follow))
+ chown_recursive(b_dest_item_path, module)
+
+ changed = True
+ return changed
+
+
+def copy_common_dirs(src, dest, module):
+ changed = False
+ common_dirs = filecmp.dircmp(src, dest).common_dirs
+ for item in common_dirs:
+ src_item_path = os.path.join(src, item)
+ dest_item_path = os.path.join(dest, item)
+ b_src_item_path = to_bytes(src_item_path, errors='surrogate_or_strict')
+ b_dest_item_path = to_bytes(dest_item_path, errors='surrogate_or_strict')
+ diff_files_changed = copy_diff_files(b_src_item_path, b_dest_item_path, module)
+ left_only_changed = copy_left_only(b_src_item_path, b_dest_item_path, module)
+ if diff_files_changed or left_only_changed:
+ changed = True
+
+ # recurse into subdirectory
+ changed = changed or copy_common_dirs(os.path.join(src, item), os.path.join(dest, item), module)
+ return changed
+
+
+def main():
+
+ global module
+
+ module = AnsibleModule(
+ # not checking because of daisy chain to file module
+ argument_spec=dict(
+ src=dict(type='path'),
+ _original_basename=dict(type='str'), # used to handle 'dest is a directory' via template, a slight hack
+ content=dict(type='str', no_log=True),
+ dest=dict(type='path', required=True),
+ backup=dict(type='bool', default=False),
+ force=dict(type='bool', default=True, aliases=['thirsty']),
+ validate=dict(type='str'),
+ directory_mode=dict(type='raw'),
+ remote_src=dict(type='bool'),
+ local_follow=dict(type='bool'),
+ checksum=dict(type='str'),
+ follow=dict(type='bool', default=False),
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ if module.params.get('thirsty'):
+ module.deprecate('The alias "thirsty" has been deprecated and will be removed, use "force" instead',
+ version='2.13', collection_name='ansible.builtin')
+
+ src = module.params['src']
+ b_src = to_bytes(src, errors='surrogate_or_strict')
+ dest = module.params['dest']
+ # Make sure we always have a directory component for later processing
+ if os.path.sep not in dest:
+ dest = '.{0}{1}'.format(os.path.sep, dest)
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+ backup = module.params['backup']
+ force = module.params['force']
+ _original_basename = module.params.get('_original_basename', None)
+ validate = module.params.get('validate', None)
+ follow = module.params['follow']
+ local_follow = module.params['local_follow']
+ mode = module.params['mode']
+ owner = module.params['owner']
+ group = module.params['group']
+ remote_src = module.params['remote_src']
+ checksum = module.params['checksum']
+
+ if not os.path.exists(b_src):
+ module.fail_json(msg="Source %s not found" % (src))
+ if not os.access(b_src, os.R_OK):
+ module.fail_json(msg="Source %s not readable" % (src))
+
+ # Preserve is usually handled in the action plugin but mode + remote_src has to be done on the
+ # remote host
+ if module.params['mode'] == 'preserve':
+ module.params['mode'] = '0%03o' % stat.S_IMODE(os.stat(b_src).st_mode)
+ mode = module.params['mode']
+
+ checksum_dest = None
+
+ if os.path.isfile(src):
+ checksum_src = module.sha1(src)
+ else:
+ checksum_src = None
+
+ # Backwards compat only. This will be None in FIPS mode
+ try:
+ if os.path.isfile(src):
+ md5sum_src = module.md5(src)
+ else:
+ md5sum_src = None
+ except ValueError:
+ md5sum_src = None
+
+ changed = False
+
+ if checksum and checksum_src != checksum:
+ module.fail_json(
+ msg='Copied file does not match the expected checksum. Transfer failed.',
+ checksum=checksum_src,
+ expected_checksum=checksum
+ )
+
+ # Special handling for recursive copy - create intermediate dirs
+ if dest.endswith(os.sep):
+ if _original_basename:
+ dest = os.path.join(dest, _original_basename)
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+ dirname = os.path.dirname(dest)
+ b_dirname = to_bytes(dirname, errors='surrogate_or_strict')
+ if not os.path.exists(b_dirname):
+ try:
+ (pre_existing_dir, new_directory_list) = split_pre_existing_dir(dirname)
+ except AnsibleModuleError as e:
+ e.result['msg'] += ' Could not copy to {0}'.format(dest)
+ module.fail_json(**e.results)
+
+ os.makedirs(b_dirname)
+ directory_args = module.load_file_common_arguments(module.params)
+ directory_mode = module.params["directory_mode"]
+ if directory_mode is not None:
+ directory_args['mode'] = directory_mode
+ else:
+ directory_args['mode'] = None
+ adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed)
+
+ if os.path.isdir(b_dest):
+ basename = os.path.basename(src)
+ if _original_basename:
+ basename = _original_basename
+ dest = os.path.join(dest, basename)
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+
+ if os.path.exists(b_dest):
+ if os.path.islink(b_dest) and follow:
+ b_dest = os.path.realpath(b_dest)
+ dest = to_native(b_dest, errors='surrogate_or_strict')
+ if not force:
+ module.exit_json(msg="file already exists", src=src, dest=dest, changed=False)
+ if os.access(b_dest, os.R_OK) and os.path.isfile(b_dest):
+ checksum_dest = module.sha1(dest)
+ else:
+ if not os.path.exists(os.path.dirname(b_dest)):
+ try:
+ # os.path.exists() can return false in some
+ # circumstances where the directory does not have
+ # the execute bit for the current user set, in
+ # which case the stat() call will raise an OSError
+ os.stat(os.path.dirname(b_dest))
+ except OSError as e:
+ if "permission denied" in to_native(e).lower():
+ module.fail_json(msg="Destination directory %s is not accessible" % (os.path.dirname(dest)))
+ module.fail_json(msg="Destination directory %s does not exist" % (os.path.dirname(dest)))
+
+ if not os.access(os.path.dirname(b_dest), os.W_OK) and not module.params['unsafe_writes']:
+ module.fail_json(msg="Destination %s not writable" % (os.path.dirname(dest)))
+
+ backup_file = None
+ if checksum_src != checksum_dest or os.path.islink(b_dest):
+ if not module.check_mode:
+ try:
+ if backup:
+ if os.path.exists(b_dest):
+ backup_file = module.backup_local(dest)
+ # allow for conversion from symlink.
+ if os.path.islink(b_dest):
+ os.unlink(b_dest)
+ open(b_dest, 'w').close()
+ if validate:
+ # if we have a mode, make sure we set it on the temporary
+ # file source as some validations may require it
+ if mode is not None:
+ module.set_mode_if_different(src, mode, False)
+ if owner is not None:
+ module.set_owner_if_different(src, owner, False)
+ if group is not None:
+ module.set_group_if_different(src, group, False)
+ if "%s" not in validate:
+ module.fail_json(msg="validate must contain %%s: %s" % (validate))
+ (rc, out, err) = module.run_command(validate % src)
+ if rc != 0:
+ module.fail_json(msg="failed to validate", exit_status=rc, stdout=out, stderr=err)
+ b_mysrc = b_src
+ if remote_src and os.path.isfile(b_src):
+ _, b_mysrc = tempfile.mkstemp(dir=os.path.dirname(b_dest))
+
+ shutil.copyfile(b_src, b_mysrc)
+ try:
+ shutil.copystat(b_src, b_mysrc)
+ except OSError as err:
+ if err.errno == errno.ENOSYS and mode == "preserve":
+ module.warn("Unable to copy stats {0}".format(to_native(b_src)))
+ else:
+ raise
+
+ # might be needed below
+ if PY3 and hasattr(os, 'listxattr'):
+ try:
+ src_has_acls = 'system.posix_acl_access' in os.listxattr(src)
+ except Exception as e:
+ # assume unwanted ACLs by default
+ src_has_acls = True
+
+ module.atomic_move(b_mysrc, dest, unsafe_writes=module.params['unsafe_writes'])
+
+ if PY3 and hasattr(os, 'listxattr') and platform.system() == 'Linux' and not remote_src:
+ # atomic_move used above to copy src into dest might, in some cases,
+ # use shutil.copy2 which in turn uses shutil.copystat.
+ # Since Python 3.3, shutil.copystat copies file extended attributes:
+ # https://docs.python.org/3/library/shutil.html#shutil.copystat
+ # os.listxattr (along with others) was added to handle the operation.
+
+ # This means that on Python 3 we are copying the extended attributes which includes
+ # the ACLs on some systems - further limited to Linux as the documentation above claims
+ # that the extended attributes are copied only on Linux. Also, os.listxattr is only
+ # available on Linux.
+
+ # If not remote_src, then the file was copied from the controller. In that
+ # case, any filesystem ACLs are artifacts of the copy rather than preservation
+ # of existing attributes. Get rid of them:
+
+ if src_has_acls:
+ # FIXME If dest has any default ACLs, there are not applied to src now because
+ # they were overridden by copystat. Should/can we do anything about this?
+ # 'system.posix_acl_default' in os.listxattr(os.path.dirname(b_dest))
+
+ try:
+ clear_facls(dest)
+ except ValueError as e:
+ if 'setfacl' in to_native(e):
+ # No setfacl so we're okay. The controller couldn't have set a facl
+ # without the setfacl command
+ pass
+ else:
+ raise
+ except RuntimeError as e:
+ # setfacl failed.
+ if 'Operation not supported' in to_native(e):
+ # The file system does not support ACLs.
+ pass
+ else:
+ raise
+
+ except (IOError, OSError):
+ module.fail_json(msg="failed to copy: %s to %s" % (src, dest), traceback=traceback.format_exc())
+ changed = True
+ else:
+ changed = False
+
+ # If neither have checksums, both src and dest are directories.
+ if checksum_src is None and checksum_dest is None:
+ if remote_src and os.path.isdir(module.params['src']):
+ b_src = to_bytes(module.params['src'], errors='surrogate_or_strict')
+ b_dest = to_bytes(module.params['dest'], errors='surrogate_or_strict')
+
+ if src.endswith(os.path.sep) and os.path.isdir(module.params['dest']):
+ diff_files_changed = copy_diff_files(b_src, b_dest, module)
+ left_only_changed = copy_left_only(b_src, b_dest, module)
+ common_dirs_changed = copy_common_dirs(b_src, b_dest, module)
+ owner_group_changed = chown_recursive(b_dest, module)
+ if diff_files_changed or left_only_changed or common_dirs_changed or owner_group_changed:
+ changed = True
+
+ if src.endswith(os.path.sep) and not os.path.exists(module.params['dest']):
+ b_basename = to_bytes(os.path.basename(src), errors='surrogate_or_strict')
+ b_dest = to_bytes(os.path.join(b_dest, b_basename), errors='surrogate_or_strict')
+ b_src = to_bytes(os.path.join(module.params['src'], ""), errors='surrogate_or_strict')
+ if not module.check_mode:
+ shutil.copytree(b_src, b_dest, symlinks=not(local_follow))
+ chown_recursive(dest, module)
+ changed = True
+
+ if not src.endswith(os.path.sep) and os.path.isdir(module.params['dest']):
+ b_basename = to_bytes(os.path.basename(src), errors='surrogate_or_strict')
+ b_dest = to_bytes(os.path.join(b_dest, b_basename), errors='surrogate_or_strict')
+ b_src = to_bytes(os.path.join(module.params['src'], ""), errors='surrogate_or_strict')
+ if not module.check_mode and not os.path.exists(b_dest):
+ shutil.copytree(b_src, b_dest, symlinks=not(local_follow))
+ changed = True
+ chown_recursive(dest, module)
+ if module.check_mode and not os.path.exists(b_dest):
+ changed = True
+ if os.path.exists(b_dest):
+ diff_files_changed = copy_diff_files(b_src, b_dest, module)
+ left_only_changed = copy_left_only(b_src, b_dest, module)
+ common_dirs_changed = copy_common_dirs(b_src, b_dest, module)
+ owner_group_changed = chown_recursive(b_dest, module)
+ if diff_files_changed or left_only_changed or common_dirs_changed or owner_group_changed:
+ changed = True
+
+ if not src.endswith(os.path.sep) and not os.path.exists(module.params['dest']):
+ b_basename = to_bytes(os.path.basename(module.params['src']), errors='surrogate_or_strict')
+ b_dest = to_bytes(os.path.join(b_dest, b_basename), errors='surrogate_or_strict')
+ if not module.check_mode and not os.path.exists(b_dest):
+ os.makedirs(b_dest)
+ b_src = to_bytes(os.path.join(module.params['src'], ""), errors='surrogate_or_strict')
+ diff_files_changed = copy_diff_files(b_src, b_dest, module)
+ left_only_changed = copy_left_only(b_src, b_dest, module)
+ common_dirs_changed = copy_common_dirs(b_src, b_dest, module)
+ owner_group_changed = chown_recursive(b_dest, module)
+ if diff_files_changed or left_only_changed or common_dirs_changed or owner_group_changed:
+ changed = True
+ if module.check_mode and not os.path.exists(b_dest):
+ changed = True
+
+ res_args = dict(
+ dest=dest, src=src, md5sum=md5sum_src, checksum=checksum_src, changed=changed
+ )
+ if backup_file:
+ res_args['backup_file'] = backup_file
+
+ if not module.check_mode:
+ file_args = module.load_file_common_arguments(module.params, path=dest)
+ res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'])
+
+ module.exit_json(**res_args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cron.py b/lib/ansible/modules/cron.py
new file mode 100644
index 00000000..93277a49
--- /dev/null
+++ b/lib/ansible/modules/cron.py
@@ -0,0 +1,773 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Dane Summers <dsummers@pinedesk.biz>
+# Copyright: (c) 2013, Mike Grozak <mike.grozak@gmail.com>
+# Copyright: (c) 2013, Patrick Callahan <pmc@patrickcallahan.com>
+# Copyright: (c) 2015, Evan Kaufman <evan@digitalflophouse.com>
+# Copyright: (c) 2015, Luca Berruti <nadirio@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: cron
+short_description: Manage cron.d and crontab entries
+description:
+ - Use this module to manage crontab and environment variables entries. This module allows
+ you to create environment variables and named crontab entries, update, or delete them.
+ - 'When crontab jobs are managed: the module includes one line with the description of the
+ crontab entry C("#Ansible: <name>") corresponding to the "name" passed to the module,
+ which is used by future ansible/module calls to find/check the state. The "name"
+ parameter should be unique, and changing the "name" value will result in a new cron
+ task being created (or a different one being removed).'
+ - When environment variables are managed, no comment line is added, but, when the module
+ needs to find/check the state, it uses the "name" parameter to find the environment
+ variable definition line.
+ - When using symbols such as %, they must be properly escaped.
+version_added: "0.9"
+options:
+ name:
+ description:
+ - Description of a crontab entry or, if env is set, the name of environment variable.
+ - Required if C(state=absent).
+ - Note that if name is not set and C(state=present), then a
+ new crontab entry will always be created, regardless of existing ones.
+ - This parameter will always be required in future releases.
+ type: str
+ user:
+ description:
+ - The specific user whose crontab should be modified.
+ - When unset, this parameter defaults to the current user.
+ type: str
+ job:
+ description:
+ - The command to execute or, if env is set, the value of environment variable.
+ - The command should not contain line breaks.
+ - Required if C(state=present).
+ type: str
+ aliases: [ value ]
+ state:
+ description:
+ - Whether to ensure the job or environment variable is present or absent.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ cron_file:
+ description:
+ - If specified, uses this file instead of an individual user's crontab.
+ - If this is a relative path, it is interpreted with respect to I(/etc/cron.d).
+ - If it is absolute, it will typically be I(/etc/crontab).
+ - Many linux distros expect (and some require) the filename portion to consist solely
+ of upper- and lower-case letters, digits, underscores, and hyphens.
+ - To use the C(cron_file) parameter you must specify the C(user) as well.
+ type: str
+ backup:
+ description:
+ - If set, create a backup of the crontab before it is modified.
+ The location of the backup is returned in the C(backup_file) variable by this module.
+ type: bool
+ default: no
+ minute:
+ description:
+ - Minute when the job should run ( 0-59, *, */2, etc )
+ type: str
+ default: "*"
+ hour:
+ description:
+ - Hour when the job should run ( 0-23, *, */2, etc )
+ type: str
+ default: "*"
+ day:
+ description:
+ - Day of the month the job should run ( 1-31, *, */2, etc )
+ type: str
+ default: "*"
+ aliases: [ dom ]
+ month:
+ description:
+ - Month of the year the job should run ( 1-12, *, */2, etc )
+ type: str
+ default: "*"
+ weekday:
+ description:
+ - Day of the week that the job should run ( 0-6 for Sunday-Saturday, *, etc )
+ type: str
+ default: "*"
+ aliases: [ dow ]
+ reboot:
+ description:
+ - If the job should be run at reboot. This option is deprecated. Users should use special_time.
+ version_added: "1.0"
+ type: bool
+ default: no
+ special_time:
+ description:
+ - Special time specification nickname.
+ type: str
+ choices: [ annually, daily, hourly, monthly, reboot, weekly, yearly ]
+ version_added: "1.3"
+ disabled:
+ description:
+ - If the job should be disabled (commented out) in the crontab.
+ - Only has effect if C(state=present).
+ type: bool
+ default: no
+ version_added: "2.0"
+ env:
+ description:
+ - If set, manages a crontab's environment variable.
+ - New variables are added on top of crontab.
+ - C(name) and C(value) parameters are the name and the value of environment variable.
+ type: bool
+ default: false
+ version_added: "2.1"
+ insertafter:
+ description:
+ - Used with C(state=present) and C(env).
+ - If specified, the environment variable will be inserted after the declaration of specified environment variable.
+ type: str
+ version_added: "2.1"
+ insertbefore:
+ description:
+ - Used with C(state=present) and C(env).
+ - If specified, the environment variable will be inserted before the declaration of specified environment variable.
+ type: str
+ version_added: "2.1"
+requirements:
+ - cron (or cronie on CentOS)
+author:
+ - Dane Summers (@dsummersl)
+ - Mike Grozak (@rhaido)
+ - Patrick Callahan (@dirtyharrycallahan)
+ - Evan Kaufman (@EvanK)
+ - Luca Berruti (@lberruti)
+'''
+
+EXAMPLES = r'''
+- name: Ensure a job that runs at 2 and 5 exists. Creates an entry like "0 5,2 * * ls -alh > /dev/null"
+ cron:
+ name: "check dirs"
+ minute: "0"
+ hour: "5,2"
+ job: "ls -alh > /dev/null"
+
+- name: 'Ensure an old job is no longer present. Removes any job that is prefixed by "#Ansible: an old job" from the crontab'
+ cron:
+ name: "an old job"
+ state: absent
+
+- name: Creates an entry like "@reboot /some/job.sh"
+ cron:
+ name: "a job for reboot"
+ special_time: reboot
+ job: "/some/job.sh"
+
+- name: Creates an entry like "PATH=/opt/bin" on top of crontab
+ cron:
+ name: PATH
+ env: yes
+ job: /opt/bin
+
+- name: Creates an entry like "APP_HOME=/srv/app" and insert it after PATH declaration
+ cron:
+ name: APP_HOME
+ env: yes
+ job: /srv/app
+ insertafter: PATH
+
+- name: Creates a cron file under /etc/cron.d
+ cron:
+ name: yum autoupdate
+ weekday: "2"
+ minute: "0"
+ hour: "12"
+ user: root
+ job: "YUMINTERACTIVE=0 /usr/sbin/yum-autoupdate"
+ cron_file: ansible_yum-autoupdate
+
+- name: Removes a cron file from under /etc/cron.d
+ cron:
+ name: "yum autoupdate"
+ cron_file: ansible_yum-autoupdate
+ state: absent
+
+- name: Removes "APP_HOME" environment variable from crontab
+ cron:
+ name: APP_HOME
+ env: yes
+ state: absent
+'''
+
+import os
+import platform
+import pwd
+import re
+import sys
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_bytes, to_native
+from ansible.module_utils.six.moves import shlex_quote
+
+
+class CronTabError(Exception):
+ pass
+
+
+class CronTab(object):
+ """
+ CronTab object to write time based crontab file
+
+ user - the user of the crontab (defaults to current user)
+ cron_file - a cron file under /etc/cron.d, or an absolute path
+ """
+
+ def __init__(self, module, user=None, cron_file=None):
+ self.module = module
+ self.user = user
+ self.root = (os.getuid() == 0)
+ self.lines = None
+ self.ansible = "#Ansible: "
+ self.n_existing = ''
+ self.cron_cmd = self.module.get_bin_path('crontab', required=True)
+
+ if cron_file:
+ if os.path.isabs(cron_file):
+ self.cron_file = cron_file
+ self.b_cron_file = to_bytes(cron_file, errors='surrogate_or_strict')
+ else:
+ self.cron_file = os.path.join('/etc/cron.d', cron_file)
+ self.b_cron_file = os.path.join(b'/etc/cron.d', to_bytes(cron_file, errors='surrogate_or_strict'))
+ else:
+ self.cron_file = None
+
+ self.read()
+
+ def read(self):
+ # Read in the crontab from the system
+ self.lines = []
+ if self.cron_file:
+ # read the cronfile
+ try:
+ f = open(self.b_cron_file, 'rb')
+ self.n_existing = to_native(f.read(), errors='surrogate_or_strict')
+ self.lines = self.n_existing.splitlines()
+ f.close()
+ except IOError:
+ # cron file does not exist
+ return
+ except Exception:
+ raise CronTabError("Unexpected error:", sys.exc_info()[0])
+ else:
+ # using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME
+ (rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True)
+
+ if rc != 0 and rc != 1: # 1 can mean that there are no jobs.
+ raise CronTabError("Unable to read crontab")
+
+ self.n_existing = out
+
+ lines = out.splitlines()
+ count = 0
+ for l in lines:
+ if count > 2 or (not re.match(r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l) and
+ not re.match(r'# \(/tmp/.*installed on.*\)', l) and
+ not re.match(r'# \(.*version.*\)', l)):
+ self.lines.append(l)
+ else:
+ pattern = re.escape(l) + '[\r\n]?'
+ self.n_existing = re.sub(pattern, '', self.n_existing, 1)
+ count += 1
+
+ def is_empty(self):
+ if len(self.lines) == 0:
+ return True
+ else:
+ return False
+
+ def write(self, backup_file=None):
+ """
+ Write the crontab to the system. Saves all information.
+ """
+ if backup_file:
+ fileh = open(backup_file, 'wb')
+ elif self.cron_file:
+ fileh = open(self.b_cron_file, 'wb')
+ else:
+ filed, path = tempfile.mkstemp(prefix='crontab')
+ os.chmod(path, int('0644', 8))
+ fileh = os.fdopen(filed, 'wb')
+
+ fileh.write(to_bytes(self.render()))
+ fileh.close()
+
+ # return if making a backup
+ if backup_file:
+ return
+
+ # Add the entire crontab back to the user crontab
+ if not self.cron_file:
+ # quoting shell args for now but really this should be two non-shell calls. FIXME
+ (rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True)
+ os.unlink(path)
+
+ if rc != 0:
+ self.module.fail_json(msg=err)
+
+ # set SELinux permissions
+ if self.module.selinux_enabled() and self.cron_file:
+ self.module.set_default_selinux_context(self.cron_file, False)
+
+ def do_comment(self, name):
+ return "%s%s" % (self.ansible, name)
+
+ def add_job(self, name, job):
+ # Add the comment
+ self.lines.append(self.do_comment(name))
+
+ # Add the job
+ self.lines.append("%s" % (job))
+
+ def update_job(self, name, job):
+ return self._update_job(name, job, self.do_add_job)
+
+ def do_add_job(self, lines, comment, job):
+ lines.append(comment)
+
+ lines.append("%s" % (job))
+
+ def remove_job(self, name):
+ return self._update_job(name, "", self.do_remove_job)
+
+ def do_remove_job(self, lines, comment, job):
+ return None
+
+ def add_env(self, decl, insertafter=None, insertbefore=None):
+ if not (insertafter or insertbefore):
+ self.lines.insert(0, decl)
+ return
+
+ if insertafter:
+ other_name = insertafter
+ elif insertbefore:
+ other_name = insertbefore
+ other_decl = self.find_env(other_name)
+ if len(other_decl) > 0:
+ if insertafter:
+ index = other_decl[0] + 1
+ elif insertbefore:
+ index = other_decl[0]
+ self.lines.insert(index, decl)
+ return
+
+ self.module.fail_json(msg="Variable named '%s' not found." % other_name)
+
+ def update_env(self, name, decl):
+ return self._update_env(name, decl, self.do_add_env)
+
+ def do_add_env(self, lines, decl):
+ lines.append(decl)
+
+ def remove_env(self, name):
+ return self._update_env(name, '', self.do_remove_env)
+
+ def do_remove_env(self, lines, decl):
+ return None
+
+ def remove_job_file(self):
+ try:
+ os.unlink(self.cron_file)
+ return True
+ except OSError:
+ # cron file does not exist
+ return False
+ except Exception:
+ raise CronTabError("Unexpected error:", sys.exc_info()[0])
+
+ def find_job(self, name, job=None):
+ # attempt to find job by 'Ansible:' header comment
+ comment = None
+ for l in self.lines:
+ if comment is not None:
+ if comment == name:
+ return [comment, l]
+ else:
+ comment = None
+ elif re.match(r'%s' % self.ansible, l):
+ comment = re.sub(r'%s' % self.ansible, '', l)
+
+ # failing that, attempt to find job by exact match
+ if job:
+ for i, l in enumerate(self.lines):
+ if l == job:
+ # if no leading ansible header, insert one
+ if not re.match(r'%s' % self.ansible, self.lines[i - 1]):
+ self.lines.insert(i, self.do_comment(name))
+ return [self.lines[i], l, True]
+ # if a leading blank ansible header AND job has a name, update header
+ elif name and self.lines[i - 1] == self.do_comment(None):
+ self.lines[i - 1] = self.do_comment(name)
+ return [self.lines[i - 1], l, True]
+
+ return []
+
+ def find_env(self, name):
+ for index, l in enumerate(self.lines):
+ if re.match(r'^%s=' % name, l):
+ return [index, l]
+
+ return []
+
+ def get_cron_job(self, minute, hour, day, month, weekday, job, special, disabled):
+ # normalize any leading/trailing newlines (ansible/ansible-modules-core#3791)
+ job = job.strip('\r\n')
+
+ if disabled:
+ disable_prefix = '#'
+ else:
+ disable_prefix = ''
+
+ if special:
+ if self.cron_file:
+ return "%s@%s %s %s" % (disable_prefix, special, self.user, job)
+ else:
+ return "%s@%s %s" % (disable_prefix, special, job)
+ else:
+ if self.cron_file:
+ return "%s%s %s %s %s %s %s %s" % (disable_prefix, minute, hour, day, month, weekday, self.user, job)
+ else:
+ return "%s%s %s %s %s %s %s" % (disable_prefix, minute, hour, day, month, weekday, job)
+
+ def get_jobnames(self):
+ jobnames = []
+
+ for l in self.lines:
+ if re.match(r'%s' % self.ansible, l):
+ jobnames.append(re.sub(r'%s' % self.ansible, '', l))
+
+ return jobnames
+
+ def get_envnames(self):
+ envnames = []
+
+ for l in self.lines:
+ if re.match(r'^\S+=', l):
+ envnames.append(l.split('=')[0])
+
+ return envnames
+
+ def _update_job(self, name, job, addlinesfunction):
+ ansiblename = self.do_comment(name)
+ newlines = []
+ comment = None
+
+ for l in self.lines:
+ if comment is not None:
+ addlinesfunction(newlines, comment, job)
+ comment = None
+ elif l == ansiblename:
+ comment = l
+ else:
+ newlines.append(l)
+
+ self.lines = newlines
+
+ if len(newlines) == 0:
+ return True
+ else:
+ return False # TODO add some more error testing
+
+ def _update_env(self, name, decl, addenvfunction):
+ newlines = []
+
+ for l in self.lines:
+ if re.match(r'^%s=' % name, l):
+ addenvfunction(newlines, decl)
+ else:
+ newlines.append(l)
+
+ self.lines = newlines
+
+ def render(self):
+ """
+ Render this crontab as it would be in the crontab.
+ """
+ crons = []
+ for cron in self.lines:
+ crons.append(cron)
+
+ result = '\n'.join(crons)
+ if result:
+ result = result.rstrip('\r\n') + '\n'
+ return result
+
+ def _read_user_execute(self):
+ """
+ Returns the command line for reading a crontab
+ """
+ user = ''
+ if self.user:
+ if platform.system() == 'SunOS':
+ return "su %s -c '%s -l'" % (shlex_quote(self.user), shlex_quote(self.cron_cmd))
+ elif platform.system() == 'AIX':
+ return "%s -l %s" % (shlex_quote(self.cron_cmd), shlex_quote(self.user))
+ elif platform.system() == 'HP-UX':
+ return "%s %s %s" % (self.cron_cmd, '-l', shlex_quote(self.user))
+ elif pwd.getpwuid(os.getuid())[0] != self.user:
+ user = '-u %s' % shlex_quote(self.user)
+ return "%s %s %s" % (self.cron_cmd, user, '-l')
+
+ def _write_execute(self, path):
+ """
+ Return the command line for writing a crontab
+ """
+ user = ''
+ if self.user:
+ if platform.system() in ['SunOS', 'HP-UX', 'AIX']:
+ return "chown %s %s ; su '%s' -c '%s %s'" % (
+ shlex_quote(self.user), shlex_quote(path), shlex_quote(self.user), self.cron_cmd, shlex_quote(path))
+ elif pwd.getpwuid(os.getuid())[0] != self.user:
+ user = '-u %s' % shlex_quote(self.user)
+ return "%s %s %s" % (self.cron_cmd, user, shlex_quote(path))
+
+
+def main():
+ # The following example playbooks:
+ #
+ # - cron: name="check dirs" hour="5,2" job="ls -alh > /dev/null"
+ #
+ # - name: do the job
+ # cron: name="do the job" hour="5,2" job="/some/dir/job.sh"
+ #
+ # - name: no job
+ # cron: name="an old job" state=absent
+ #
+ # - name: sets env
+ # cron: name="PATH" env=yes value="/bin:/usr/bin"
+ #
+ # Would produce:
+ # PATH=/bin:/usr/bin
+ # # Ansible: check dirs
+ # * * 5,2 * * ls -alh > /dev/null
+ # # Ansible: do the job
+ # * * 5,2 * * /some/dir/job.sh
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str'),
+ user=dict(type='str'),
+ job=dict(type='str', aliases=['value']),
+ cron_file=dict(type='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ backup=dict(type='bool', default=False),
+ minute=dict(type='str', default='*'),
+ hour=dict(type='str', default='*'),
+ day=dict(type='str', default='*', aliases=['dom']),
+ month=dict(type='str', default='*'),
+ weekday=dict(type='str', default='*', aliases=['dow']),
+ reboot=dict(type='bool', default=False),
+ special_time=dict(type='str', choices=["reboot", "yearly", "annually", "monthly", "weekly", "daily", "hourly"]),
+ disabled=dict(type='bool', default=False),
+ env=dict(type='bool', default=False),
+ insertafter=dict(type='str'),
+ insertbefore=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['reboot', 'special_time'],
+ ['insertafter', 'insertbefore'],
+ ],
+ )
+
+ name = module.params['name']
+ user = module.params['user']
+ job = module.params['job']
+ cron_file = module.params['cron_file']
+ state = module.params['state']
+ backup = module.params['backup']
+ minute = module.params['minute']
+ hour = module.params['hour']
+ day = module.params['day']
+ month = module.params['month']
+ weekday = module.params['weekday']
+ reboot = module.params['reboot']
+ special_time = module.params['special_time']
+ disabled = module.params['disabled']
+ env = module.params['env']
+ insertafter = module.params['insertafter']
+ insertbefore = module.params['insertbefore']
+ do_install = state == 'present'
+
+ changed = False
+ res_args = dict()
+ warnings = list()
+
+ if cron_file:
+ cron_file_basename = os.path.basename(cron_file)
+ if not re.search(r'^[A-Z0-9_-]+$', cron_file_basename, re.I):
+ warnings.append('Filename portion of cron_file ("%s") should consist' % cron_file_basename +
+ ' solely of upper- and lower-case letters, digits, underscores, and hyphens')
+
+ # Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option.
+ os.umask(int('022', 8))
+ crontab = CronTab(module, user, cron_file)
+
+ module.debug('cron instantiated - name: "%s"' % name)
+
+ if not name:
+ module.deprecate(
+ msg="The 'name' parameter will be required in future releases.",
+ version='2.12', collection_name='ansible.builtin'
+ )
+ if reboot:
+ module.deprecate(
+ msg="The 'reboot' parameter will be removed in future releases. Use 'special_time' option instead.",
+ version='2.12', collection_name='ansible.builtin'
+ )
+
+ if module._diff:
+ diff = dict()
+ diff['before'] = crontab.n_existing
+ if crontab.cron_file:
+ diff['before_header'] = crontab.cron_file
+ else:
+ if crontab.user:
+ diff['before_header'] = 'crontab for user "%s"' % crontab.user
+ else:
+ diff['before_header'] = 'crontab'
+
+ # --- user input validation ---
+
+ if env and not name:
+ module.fail_json(msg="You must specify 'name' while working with environment variables (env=yes)")
+
+ if (special_time or reboot) and \
+ (True in [(x != '*') for x in [minute, hour, day, month, weekday]]):
+ module.fail_json(msg="You must specify time and date fields or special time.")
+
+ # cannot support special_time on solaris
+ if (special_time or reboot) and platform.system() == 'SunOS':
+ module.fail_json(msg="Solaris does not support special_time=... or @reboot")
+
+ if cron_file and do_install:
+ if not user:
+ module.fail_json(msg="To use cron_file=... parameter you must specify user=... as well")
+
+ if job is None and do_install:
+ module.fail_json(msg="You must specify 'job' to install a new cron job or variable")
+
+ if (insertafter or insertbefore) and not env and do_install:
+ module.fail_json(msg="Insertafter and insertbefore parameters are valid only with env=yes")
+
+ if reboot:
+ special_time = "reboot"
+
+ # if requested make a backup before making a change
+ if backup and not module.check_mode:
+ (backuph, backup_file) = tempfile.mkstemp(prefix='crontab')
+ crontab.write(backup_file)
+
+ if crontab.cron_file and not do_install:
+ if module._diff:
+ diff['after'] = ''
+ diff['after_header'] = '/dev/null'
+ else:
+ diff = dict()
+ if module.check_mode:
+ changed = os.path.isfile(crontab.cron_file)
+ else:
+ changed = crontab.remove_job_file()
+ module.exit_json(changed=changed, cron_file=cron_file, state=state, diff=diff)
+
+ if env:
+ if ' ' in name:
+ module.fail_json(msg="Invalid name for environment variable")
+ decl = '%s="%s"' % (name, job)
+ old_decl = crontab.find_env(name)
+
+ if do_install:
+ if len(old_decl) == 0:
+ crontab.add_env(decl, insertafter, insertbefore)
+ changed = True
+ if len(old_decl) > 0 and old_decl[1] != decl:
+ crontab.update_env(name, decl)
+ changed = True
+ else:
+ if len(old_decl) > 0:
+ crontab.remove_env(name)
+ changed = True
+ else:
+ if do_install:
+ for char in ['\r', '\n']:
+ if char in job.strip('\r\n'):
+ warnings.append('Job should not contain line breaks')
+ break
+
+ job = crontab.get_cron_job(minute, hour, day, month, weekday, job, special_time, disabled)
+ old_job = crontab.find_job(name, job)
+
+ if len(old_job) == 0:
+ crontab.add_job(name, job)
+ changed = True
+ if len(old_job) > 0 and old_job[1] != job:
+ crontab.update_job(name, job)
+ changed = True
+ if len(old_job) > 2:
+ crontab.update_job(name, job)
+ changed = True
+ else:
+ old_job = crontab.find_job(name)
+
+ if len(old_job) > 0:
+ crontab.remove_job(name)
+ changed = True
+
+ # no changes to env/job, but existing crontab needs a terminating newline
+ if not changed and crontab.n_existing != '':
+ if not (crontab.n_existing.endswith('\r') or crontab.n_existing.endswith('\n')):
+ changed = True
+
+ res_args = dict(
+ jobs=crontab.get_jobnames(),
+ envs=crontab.get_envnames(),
+ warnings=warnings,
+ changed=changed
+ )
+
+ if changed:
+ if not module.check_mode:
+ crontab.write()
+ if module._diff:
+ diff['after'] = crontab.render()
+ if crontab.cron_file:
+ diff['after_header'] = crontab.cron_file
+ else:
+ if crontab.user:
+ diff['after_header'] = 'crontab for user "%s"' % crontab.user
+ else:
+ diff['after_header'] = 'crontab'
+
+ res_args['diff'] = diff
+
+ # retain the backup only if crontab or cron file have changed
+ if backup and not module.check_mode:
+ if changed:
+ res_args['backup_file'] = backup_file
+ else:
+ os.unlink(backup_file)
+
+ if cron_file:
+ res_args['cron_file'] = cron_file
+
+ module.exit_json(**res_args)
+
+ # --- should never get here
+ module.exit_json(msg="Unable to execute cron task.")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/debconf.py b/lib/ansible/modules/debconf.py
new file mode 100644
index 00000000..8dbb30d3
--- /dev/null
+++ b/lib/ansible/modules/debconf.py
@@ -0,0 +1,201 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Brian Coca <briancoca+ansible@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: debconf
+short_description: Configure a .deb package
+description:
+ - Configure a .deb package using debconf-set-selections.
+ - Or just query existing selections.
+version_added: "1.6"
+notes:
+ - This module requires the command line debconf tools.
+ - A number of questions have to be answered (depending on the package).
+ Use 'debconf-show <package>' on any Debian or derivative with the package
+ installed to see questions/settings available.
+ - Some distros will always record tasks involving the setting of passwords as changed. This is due to debconf-get-selections masking passwords.
+ - It is highly recommended to add I(no_log=True) to task while handling sensitive information using this module.
+requirements:
+- debconf
+- debconf-utils
+options:
+ name:
+ description:
+ - Name of package to configure.
+ type: str
+ required: true
+ aliases: [ pkg ]
+ question:
+ description:
+ - A debconf configuration setting.
+ type: str
+ aliases: [ selection, setting ]
+ vtype:
+ description:
+ - The type of the value supplied.
+ - It is highly recommended to add I(no_log=True) to task while specifying I(vtype=password).
+ - C(seen) was added in Ansible 2.2.
+ type: str
+ choices: [ boolean, error, multiselect, note, password, seen, select, string, text, title ]
+ value:
+ description:
+ - Value to set the configuration to.
+ type: str
+ aliases: [ answer ]
+ unseen:
+ description:
+ - Do not set 'seen' flag when pre-seeding.
+ type: bool
+ default: false
+author:
+- Brian Coca (@bcoca)
+'''
+
+EXAMPLES = r'''
+- name: Set default locale to fr_FR.UTF-8
+ debconf:
+ name: locales
+ question: locales/default_environment_locale
+ value: fr_FR.UTF-8
+ vtype: select
+
+- name: Set to generate locales
+ debconf:
+ name: locales
+ question: locales/locales_to_be_generated
+ value: en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8
+ vtype: multiselect
+
+- name: Accept oracle license
+ debconf:
+ name: oracle-java7-installer
+ question: shared/accepted-oracle-license-v1-1
+ value: 'true'
+ vtype: select
+
+- name: Specifying package you can register/return the list of questions and current values
+ debconf:
+ name: tzdata
+
+- name: Pre-configure tripwire site passphrase
+ debconf:
+ name: tripwire
+ question: tripwire/site-passphrase
+ value: "{{ site_passphrase }}"
+ vtype: password
+ no_log: True
+'''
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_selections(module, pkg):
+ cmd = [module.get_bin_path('debconf-show', True), pkg]
+ rc, out, err = module.run_command(' '.join(cmd))
+
+ if rc != 0:
+ module.fail_json(msg=err)
+
+ selections = {}
+
+ for line in out.splitlines():
+ (key, value) = line.split(':', 1)
+ selections[key.strip('*').strip()] = value.strip()
+
+ return selections
+
+
+def set_selection(module, pkg, question, vtype, value, unseen):
+ setsel = module.get_bin_path('debconf-set-selections', True)
+ cmd = [setsel]
+ if unseen:
+ cmd.append('-u')
+
+ if vtype == 'boolean':
+ if value == 'True':
+ value = 'true'
+ elif value == 'False':
+ value = 'false'
+ data = ' '.join([pkg, question, vtype, value])
+
+ return module.run_command(cmd, data=data)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True, aliases=['pkg']),
+ question=dict(type='str', aliases=['selection', 'setting']),
+ vtype=dict(type='str', choices=['boolean', 'error', 'multiselect', 'note', 'password', 'seen', 'select', 'string', 'text', 'title']),
+ value=dict(type='str', aliases=['answer']),
+ unseen=dict(type='bool', default=False),
+ ),
+ required_together=(['question', 'vtype', 'value'],),
+ supports_check_mode=True,
+ )
+
+ # TODO: enable passing array of options and/or debconf file from get-selections dump
+ pkg = module.params["name"]
+ question = module.params["question"]
+ vtype = module.params["vtype"]
+ value = module.params["value"]
+ unseen = module.params["unseen"]
+
+ prev = get_selections(module, pkg)
+
+ changed = False
+ msg = ""
+
+ if question is not None:
+ if vtype is None or value is None:
+ module.fail_json(msg="when supplying a question you must supply a valid vtype and value")
+
+ # if question doesn't exist, value cannot match
+ if question not in prev:
+ changed = True
+ else:
+
+ existing = prev[question]
+
+ # ensure we compare booleans supplied to the way debconf sees them (true/false strings)
+ if vtype == 'boolean':
+ value = to_text(value).lower()
+ existing = to_text(prev[question]).lower()
+
+ if value != existing:
+ changed = True
+
+ if changed:
+ if not module.check_mode:
+ rc, msg, e = set_selection(module, pkg, question, vtype, value, unseen)
+ if rc:
+ module.fail_json(msg=e)
+
+ curr = {question: value}
+ if question in prev:
+ prev = {question: prev[question]}
+ else:
+ prev[question] = ''
+ if module._diff:
+ after = prev.copy()
+ after.update(curr)
+ diff_dict = {'before': prev, 'after': after}
+ else:
+ diff_dict = {}
+
+ module.exit_json(changed=changed, msg=msg, current=curr, previous=prev, diff=diff_dict)
+
+ module.exit_json(changed=changed, msg=msg, current=prev)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/debug.py b/lib/ansible/modules/debug.py
new file mode 100644
index 00000000..bd8eb7e2
--- /dev/null
+++ b/lib/ansible/modules/debug.py
@@ -0,0 +1,78 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012 Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: debug
+short_description: Print statements during execution
+description:
+- This module prints statements during execution and can be useful
+ for debugging variables or expressions without necessarily halting
+ the playbook.
+- Useful for debugging together with the 'when:' directive.
+- This module is also supported for Windows targets.
+version_added: '0.8'
+options:
+ msg:
+ description:
+ - The customized message that is printed. If omitted, prints a generic message.
+ type: str
+ default: 'Hello world!'
+ var:
+ description:
+ - A variable name to debug.
+ - Mutually exclusive with the C(msg) option.
+ - Be aware that this option already runs in Jinja2 context and has an implicit C({{ }}) wrapping,
+ so you should not be using Jinja2 delimiters unless you are looking for double interpolation.
+ type: str
+ verbosity:
+ description:
+ - A number that controls when the debug is run, if you set to 3 it will only run debug when -vvv or above
+ type: int
+ default: 0
+ version_added: '2.1'
+notes:
+ - This module is also supported for Windows targets.
+seealso:
+- module: ansible.builtin.assert
+- module: ansible.builtin.fail
+author:
+- Dag Wieers (@dagwieers)
+- Michael DeHaan
+'''
+
+EXAMPLES = r'''
+# Example that prints the loopback address and gateway for each host
+- debug:
+ msg: System {{ inventory_hostname }} has uuid {{ ansible_product_uuid }}
+
+- debug:
+ msg: System {{ inventory_hostname }} has gateway {{ ansible_default_ipv4.gateway }}
+ when: ansible_default_ipv4.gateway is defined
+
+# Example that prints return information from the previous task
+- shell: /usr/bin/uptime
+ register: result
+
+- debug:
+ var: result
+ verbosity: 2
+
+- name: Display all variables/facts known for a host
+ debug:
+ var: hostvars[inventory_hostname]
+ verbosity: 4
+
+# Example that prints two lines of messages, but only if there is an environment value set
+- debug:
+ msg:
+ - "Provisioning based on YOUR_KEY which is: {{ lookup('env', 'YOUR_KEY') }}"
+ - "These servers were built using the password of '{{ password_used }}'. Please retain this for later use."
+'''
diff --git a/lib/ansible/modules/dnf.py b/lib/ansible/modules/dnf.py
new file mode 100644
index 00000000..8ffe61ad
--- /dev/null
+++ b/lib/ansible/modules/dnf.py
@@ -0,0 +1,1330 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2015 Cristian van Ee <cristian at cvee.org>
+# Copyright 2015 Igor Gnatenko <i.gnatenko.brain@gmail.com>
+# Copyright 2018 Adam Miller <admiller@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: dnf
+version_added: 1.9
+short_description: Manages packages with the I(dnf) package manager
+description:
+ - Installs, upgrade, removes, and lists packages and groups with the I(dnf) package manager.
+options:
+ name:
+ description:
+ - "A package name or package specifier with version, like C(name-1.0).
+ When using state=latest, this can be '*' which means run: dnf -y update.
+ You can also pass a url or a local path to a rpm file.
+ To operate on several packages this can accept a comma separated string of packages or a list of packages."
+ required: true
+ aliases:
+ - pkg
+ type: list
+ elements: str
+
+ list:
+ description:
+ - Various (non-idempotent) commands for usage with C(/usr/bin/ansible) and I(not) playbooks. See examples.
+
+ state:
+ description:
+ - Whether to install (C(present), C(latest)), or remove (C(absent)) a package.
+ - Default is C(None), however in effect the default action is C(present) unless the C(autoremove) option is
+ enabled for this module, then C(absent) is inferred.
+ choices: ['absent', 'present', 'installed', 'removed', 'latest']
+
+ enablerepo:
+ description:
+ - I(Repoid) of repositories to enable for the install/update operation.
+ These repos will not persist beyond the transaction.
+ When specifying multiple repos, separate them with a ",".
+
+ disablerepo:
+ description:
+ - I(Repoid) of repositories to disable for the install/update operation.
+ These repos will not persist beyond the transaction.
+ When specifying multiple repos, separate them with a ",".
+
+ conf_file:
+ description:
+ - The remote dnf configuration file to use for the transaction.
+
+ disable_gpg_check:
+ description:
+ - Whether to disable the GPG checking of signatures of packages being
+ installed. Has an effect only if state is I(present) or I(latest).
+ - This setting affects packages installed from a repository as well as
+ "local" packages installed from the filesystem or a URL.
+ type: bool
+ default: 'no'
+
+ installroot:
+ description:
+ - Specifies an alternative installroot, relative to which all packages
+ will be installed.
+ version_added: "2.3"
+ default: "/"
+
+ releasever:
+ description:
+ - Specifies an alternative release from which all packages will be
+ installed.
+ version_added: "2.6"
+
+ autoremove:
+ description:
+ - If C(yes), removes all "leaf" packages from the system that were originally
+ installed as dependencies of user-installed packages but which are no longer
+ required by any such package. Should be used alone or when state is I(absent)
+ type: bool
+ default: "no"
+ version_added: "2.4"
+ exclude:
+ description:
+ - Package name(s) to exclude when state=present, or latest. This can be a
+ list or a comma separated string.
+ version_added: "2.7"
+ skip_broken:
+ description:
+ - Skip packages with broken dependencies(devsolve) and are causing problems.
+ type: bool
+ default: "no"
+ version_added: "2.7"
+ update_cache:
+ description:
+ - Force dnf to check if cache is out of date and redownload if needed.
+ Has an effect only if state is I(present) or I(latest).
+ type: bool
+ default: "no"
+ aliases: [ expire-cache ]
+ version_added: "2.7"
+ update_only:
+ description:
+ - When using latest, only update installed packages. Do not install packages.
+ - Has an effect only if state is I(latest)
+ default: "no"
+ type: bool
+ version_added: "2.7"
+ security:
+ description:
+ - If set to C(yes), and C(state=latest) then only installs updates that have been marked security related.
+ - Note that, similar to ``dnf upgrade-minimal``, this filter applies to dependencies as well.
+ type: bool
+ default: "no"
+ version_added: "2.7"
+ bugfix:
+ description:
+ - If set to C(yes), and C(state=latest) then only installs updates that have been marked bugfix related.
+ - Note that, similar to ``dnf upgrade-minimal``, this filter applies to dependencies as well.
+ default: "no"
+ type: bool
+ version_added: "2.7"
+ enable_plugin:
+ description:
+ - I(Plugin) name to enable for the install/update operation.
+ The enabled plugin will not persist beyond the transaction.
+ version_added: "2.7"
+ disable_plugin:
+ description:
+ - I(Plugin) name to disable for the install/update operation.
+ The disabled plugins will not persist beyond the transaction.
+ version_added: "2.7"
+ disable_excludes:
+ description:
+ - Disable the excludes defined in DNF config files.
+ - If set to C(all), disables all excludes.
+ - If set to C(main), disable excludes defined in [main] in dnf.conf.
+ - If set to C(repoid), disable excludes defined for given repo id.
+ version_added: "2.7"
+ validate_certs:
+ description:
+ - This only applies if using a https url as the source of the rpm. e.g. for localinstall. If set to C(no), the SSL certificates will not be validated.
+ - This should only set to C(no) used on personally controlled sites using self-signed certificates as it avoids verifying the source site.
+ type: bool
+ default: "yes"
+ version_added: "2.7"
+ allow_downgrade:
+ description:
+ - Specify if the named package and version is allowed to downgrade
+ a maybe already installed higher version of that package.
+ Note that setting allow_downgrade=True can make this module
+ behave in a non-idempotent way. The task could end up with a set
+ of packages that does not match the complete list of specified
+ packages to install (because dependencies between the downgraded
+ package and others can cause changes to the packages which were
+ in the earlier transaction).
+ type: bool
+ default: "no"
+ version_added: "2.7"
+ install_repoquery:
+ description:
+ - This is effectively a no-op in DNF as it is not needed with DNF, but is an accepted parameter for feature
+ parity/compatibility with the I(yum) module.
+ type: bool
+ default: "yes"
+ version_added: "2.7"
+ download_only:
+ description:
+ - Only download the packages, do not install them.
+ default: "no"
+ type: bool
+ version_added: "2.7"
+ lock_timeout:
+ description:
+ - Amount of time to wait for the dnf lockfile to be freed.
+ required: false
+ default: 30
+ type: int
+ version_added: "2.8"
+ install_weak_deps:
+ description:
+ - Will also install all packages linked by a weak dependency relation.
+ type: bool
+ default: "yes"
+ version_added: "2.8"
+ download_dir:
+ description:
+ - Specifies an alternate directory to store packages.
+ - Has an effect only if I(download_only) is specified.
+ type: str
+ version_added: "2.8"
+ allowerasing:
+ description:
+ - If C(yes) it allows erasing of installed packages to resolve dependencies.
+ required: false
+ type: bool
+ default: "no"
+ version_added: "2.10"
+notes:
+ - When used with a `loop:` each package will be processed individually, it is much more efficient to pass the list directly to the `name` option.
+ - Group removal doesn't work if the group was installed with Ansible because
+ upstream dnf's API doesn't properly mark groups as installed, therefore upon
+ removal the module is unable to detect that the group is installed
+ (https://bugzilla.redhat.com/show_bug.cgi?id=1620324)
+requirements:
+ - "python >= 2.6"
+ - python-dnf
+ - for the autoremove option you need dnf >= 2.0.1"
+author:
+ - Igor Gnatenko (@ignatenkobrain) <i.gnatenko.brain@gmail.com>
+ - Cristian van Ee (@DJMuggs) <cristian at cvee.org>
+ - Berend De Schouwer (@berenddeschouwer)
+ - Adam Miller (@maxamillion) <admiller@redhat.com>
+'''
+
+EXAMPLES = '''
+- name: Install the latest version of Apache
+ dnf:
+ name: httpd
+ state: latest
+
+- name: Install the latest version of Apache and MariaDB
+ dnf:
+ name:
+ - httpd
+ - mariadb-server
+ state: latest
+
+- name: Remove the Apache package
+ dnf:
+ name: httpd
+ state: absent
+
+- name: Install the latest version of Apache from the testing repo
+ dnf:
+ name: httpd
+ enablerepo: testing
+ state: present
+
+- name: Upgrade all packages
+ dnf:
+ name: "*"
+ state: latest
+
+- name: Install the nginx rpm from a remote repo
+ dnf:
+ name: 'http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm'
+ state: present
+
+- name: Install nginx rpm from a local file
+ dnf:
+ name: /usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm
+ state: present
+
+- name: Install the 'Development tools' package group
+ dnf:
+ name: '@Development tools'
+ state: present
+
+- name: Autoremove unneeded packages installed as dependencies
+ dnf:
+ autoremove: yes
+
+- name: Uninstall httpd but keep its dependencies
+ dnf:
+ name: httpd
+ state: absent
+ autoremove: no
+
+- name: Install a modularity appstream with defined stream and profile
+ dnf:
+ name: '@postgresql:9.6/client'
+ state: present
+
+- name: Install a modularity appstream with defined stream
+ dnf:
+ name: '@postgresql:9.6'
+ state: present
+
+- name: Install a modularity appstream with defined profile
+ dnf:
+ name: '@postgresql/client'
+ state: present
+'''
+
+import os
+import re
+import sys
+
+try:
+ import dnf
+ import dnf.cli
+ import dnf.const
+ import dnf.exceptions
+ import dnf.subject
+ import dnf.util
+ HAS_DNF = True
+except ImportError:
+ HAS_DNF = False
+
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.urls import fetch_file
+from ansible.module_utils.six import PY2, text_type
+from distutils.version import LooseVersion
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.yumdnf import YumDnf, yumdnf_argument_spec
+
+
+class DnfModule(YumDnf):
+ """
+ DNF Ansible module back-end implementation
+ """
+
+ def __init__(self, module):
+ # This populates instance vars for all argument spec params
+ super(DnfModule, self).__init__(module)
+
+ self._ensure_dnf()
+ self.lockfile = "/var/cache/dnf/*_lock.pid"
+ self.pkg_mgr_name = "dnf"
+
+ try:
+ self.with_modules = dnf.base.WITH_MODULES
+ except AttributeError:
+ self.with_modules = False
+
+ # DNF specific args that are not part of YumDnf
+ self.allowerasing = self.module.params['allowerasing']
+
+ def is_lockfile_pid_valid(self):
+ # FIXME? it looks like DNF takes care of invalid lock files itself?
+ # https://github.com/ansible/ansible/issues/57189
+ return True
+
+ def _sanitize_dnf_error_msg_install(self, spec, error):
+ """
+ For unhandled dnf.exceptions.Error scenarios, there are certain error
+ messages we want to filter in an install scenario. Do that here.
+ """
+ if (
+ to_text("no package matched") in to_text(error) or
+ to_text("No match for argument:") in to_text(error)
+ ):
+ return "No package {0} available.".format(spec)
+
+ return error
+
+ def _sanitize_dnf_error_msg_remove(self, spec, error):
+ """
+ For unhandled dnf.exceptions.Error scenarios, there are certain error
+ messages we want to ignore in a removal scenario as known benign
+ failures. Do that here.
+ """
+ if (
+ 'no package matched' in to_native(error) or
+ 'No match for argument:' in to_native(error)
+ ):
+ return (False, "{0} is not installed".format(spec))
+
+ # Return value is tuple of:
+ # ("Is this actually a failure?", "Error Message")
+ return (True, error)
+
+ def _package_dict(self, package):
+ """Return a dictionary of information for the package."""
+ # NOTE: This no longer contains the 'dnfstate' field because it is
+ # already known based on the query type.
+ result = {
+ 'name': package.name,
+ 'arch': package.arch,
+ 'epoch': str(package.epoch),
+ 'release': package.release,
+ 'version': package.version,
+ 'repo': package.repoid}
+ result['nevra'] = '{epoch}:{name}-{version}-{release}.{arch}'.format(
+ **result)
+
+ if package.installtime == 0:
+ result['yumstate'] = 'available'
+ else:
+ result['yumstate'] = 'installed'
+
+ return result
+
+ def _packagename_dict(self, packagename):
+ """
+ Return a dictionary of information for a package name string or None
+ if the package name doesn't contain at least all NVR elements
+ """
+
+ if packagename[-4:] == '.rpm':
+ packagename = packagename[:-4]
+
+ # This list was auto generated on a Fedora 28 system with the following one-liner
+ # printf '[ '; for arch in $(ls /usr/lib/rpm/platform); do printf '"%s", ' ${arch%-linux}; done; printf ']\n'
+ redhat_rpm_arches = [
+ "aarch64", "alphaev56", "alphaev5", "alphaev67", "alphaev6", "alpha",
+ "alphapca56", "amd64", "armv3l", "armv4b", "armv4l", "armv5tejl", "armv5tel",
+ "armv5tl", "armv6hl", "armv6l", "armv7hl", "armv7hnl", "armv7l", "athlon",
+ "geode", "i386", "i486", "i586", "i686", "ia32e", "ia64", "m68k", "mips64el",
+ "mips64", "mips64r6el", "mips64r6", "mipsel", "mips", "mipsr6el", "mipsr6",
+ "noarch", "pentium3", "pentium4", "ppc32dy4", "ppc64iseries", "ppc64le", "ppc64",
+ "ppc64p7", "ppc64pseries", "ppc8260", "ppc8560", "ppciseries", "ppc", "ppcpseries",
+ "riscv64", "s390", "s390x", "sh3", "sh4a", "sh4", "sh", "sparc64", "sparc64v",
+ "sparc", "sparcv8", "sparcv9", "sparcv9v", "x86_64"
+ ]
+
+ rpm_arch_re = re.compile(r'(.*)\.(.*)')
+ rpm_nevr_re = re.compile(r'(\S+)-(?:(\d*):)?(.*)-(~?\w+[\w.+]*)')
+ try:
+ arch = None
+ rpm_arch_match = rpm_arch_re.match(packagename)
+ if rpm_arch_match:
+ nevr, arch = rpm_arch_match.groups()
+ if arch in redhat_rpm_arches:
+ packagename = nevr
+ rpm_nevr_match = rpm_nevr_re.match(packagename)
+ if rpm_nevr_match:
+ name, epoch, version, release = rpm_nevr_re.match(packagename).groups()
+ if not version or not version.split('.')[0].isdigit():
+ return None
+ else:
+ return None
+ except AttributeError as e:
+ self.module.fail_json(
+ msg='Error attempting to parse package: %s, %s' % (packagename, to_native(e)),
+ rc=1,
+ results=[]
+ )
+
+ if not epoch:
+ epoch = "0"
+
+ if ':' in name:
+ epoch_name = name.split(":")
+
+ epoch = epoch_name[0]
+ name = ''.join(epoch_name[1:])
+
+ result = {
+ 'name': name,
+ 'epoch': epoch,
+ 'release': release,
+ 'version': version,
+ }
+
+ return result
+
+ # Original implementation from yum.rpmUtils.miscutils (GPLv2+)
+ # http://yum.baseurl.org/gitweb?p=yum.git;a=blob;f=rpmUtils/miscutils.py
+ def _compare_evr(self, e1, v1, r1, e2, v2, r2):
+ # return 1: a is newer than b
+ # 0: a and b are the same version
+ # -1: b is newer than a
+ if e1 is None:
+ e1 = '0'
+ else:
+ e1 = str(e1)
+ v1 = str(v1)
+ r1 = str(r1)
+ if e2 is None:
+ e2 = '0'
+ else:
+ e2 = str(e2)
+ v2 = str(v2)
+ r2 = str(r2)
+ # print '%s, %s, %s vs %s, %s, %s' % (e1, v1, r1, e2, v2, r2)
+ rc = dnf.rpm.rpm.labelCompare((e1, v1, r1), (e2, v2, r2))
+ # print '%s, %s, %s vs %s, %s, %s = %s' % (e1, v1, r1, e2, v2, r2, rc)
+ return rc
+
+ def _ensure_dnf(self):
+ if not HAS_DNF:
+ if PY2:
+ package = 'python2-dnf'
+ else:
+ package = 'python3-dnf'
+
+ if self.module.check_mode:
+ self.module.fail_json(
+ msg="`{0}` is not installed, but it is required"
+ "for the Ansible dnf module.".format(package),
+ results=[],
+ )
+
+ rc, stdout, stderr = self.module.run_command(['dnf', 'install', '-y', package])
+ global dnf
+ try:
+ import dnf
+ import dnf.cli
+ import dnf.const
+ import dnf.exceptions
+ import dnf.subject
+ import dnf.util
+ except ImportError:
+ self.module.fail_json(
+ msg="Could not import the dnf python module using {0} ({1}). "
+ "Please install `{2}` package or ensure you have specified the "
+ "correct ansible_python_interpreter.".format(sys.executable, sys.version.replace('\n', ''),
+ package),
+ results=[],
+ cmd='dnf install -y {0}'.format(package),
+ rc=rc,
+ stdout=stdout,
+ stderr=stderr,
+ )
+
+ def _configure_base(self, base, conf_file, disable_gpg_check, installroot='/'):
+ """Configure the dnf Base object."""
+
+ conf = base.conf
+
+ # Change the configuration file path if provided, this must be done before conf.read() is called
+ if conf_file:
+ # Fail if we can't read the configuration file.
+ if not os.access(conf_file, os.R_OK):
+ self.module.fail_json(
+ msg="cannot read configuration file", conf_file=conf_file,
+ results=[],
+ )
+ else:
+ conf.config_file_path = conf_file
+
+ # Read the configuration file
+ conf.read()
+
+ # Turn off debug messages in the output
+ conf.debuglevel = 0
+
+ # Set whether to check gpg signatures
+ conf.gpgcheck = not disable_gpg_check
+ conf.localpkg_gpgcheck = not disable_gpg_check
+
+ # Don't prompt for user confirmations
+ conf.assumeyes = True
+
+ # Set installroot
+ conf.installroot = installroot
+
+ # Load substitutions from the filesystem
+ conf.substitutions.update_from_etc(installroot)
+
+ # Handle different DNF versions immutable mutable datatypes and
+ # dnf v1/v2/v3
+ #
+ # In DNF < 3.0 are lists, and modifying them works
+ # In DNF >= 3.0 < 3.6 are lists, but modifying them doesn't work
+ # In DNF >= 3.6 have been turned into tuples, to communicate that modifying them doesn't work
+ #
+ # https://www.happyassassin.net/2018/06/27/adams-debugging-adventures-the-immutable-mutable-object/
+ #
+ # Set excludes
+ if self.exclude:
+ _excludes = list(conf.exclude)
+ _excludes.extend(self.exclude)
+ conf.exclude = _excludes
+ # Set disable_excludes
+ if self.disable_excludes:
+ _disable_excludes = list(conf.disable_excludes)
+ if self.disable_excludes not in _disable_excludes:
+ _disable_excludes.append(self.disable_excludes)
+ conf.disable_excludes = _disable_excludes
+
+ # Set releasever
+ if self.releasever is not None:
+ conf.substitutions['releasever'] = self.releasever
+
+ # Set skip_broken (in dnf this is strict=0)
+ if self.skip_broken:
+ conf.strict = 0
+
+ if self.download_only:
+ conf.downloadonly = True
+ if self.download_dir:
+ conf.destdir = self.download_dir
+
+ # Default in dnf upstream is true
+ conf.clean_requirements_on_remove = self.autoremove
+
+ # Default in dnf (and module default) is True
+ conf.install_weak_deps = self.install_weak_deps
+
+ def _specify_repositories(self, base, disablerepo, enablerepo):
+ """Enable and disable repositories matching the provided patterns."""
+ base.read_all_repos()
+ repos = base.repos
+
+ # Disable repositories
+ for repo_pattern in disablerepo:
+ if repo_pattern:
+ for repo in repos.get_matching(repo_pattern):
+ repo.disable()
+
+ # Enable repositories
+ for repo_pattern in enablerepo:
+ if repo_pattern:
+ for repo in repos.get_matching(repo_pattern):
+ repo.enable()
+
+ def _base(self, conf_file, disable_gpg_check, disablerepo, enablerepo, installroot):
+ """Return a fully configured dnf Base object."""
+ base = dnf.Base()
+ self._configure_base(base, conf_file, disable_gpg_check, installroot)
+ try:
+ # this method has been supported in dnf-4.2.17-6 or later
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1788212
+ base.setup_loggers()
+ except AttributeError:
+ pass
+ try:
+ base.init_plugins(set(self.disable_plugin), set(self.enable_plugin))
+ base.pre_configure_plugins()
+ except AttributeError:
+ pass # older versions of dnf didn't require this and don't have these methods
+ self._specify_repositories(base, disablerepo, enablerepo)
+ try:
+ base.configure_plugins()
+ except AttributeError:
+ pass # older versions of dnf didn't require this and don't have these methods
+
+ try:
+ if self.update_cache:
+ try:
+ base.update_cache()
+ except dnf.exceptions.RepoError as e:
+ self.module.fail_json(
+ msg="{0}".format(to_text(e)),
+ results=[],
+ rc=1
+ )
+ base.fill_sack(load_system_repo='auto')
+ except dnf.exceptions.RepoError as e:
+ self.module.fail_json(
+ msg="{0}".format(to_text(e)),
+ results=[],
+ rc=1
+ )
+
+ filters = []
+ if self.bugfix:
+ key = {'advisory_type__eq': 'bugfix'}
+ filters.append(base.sack.query().upgrades().filter(**key))
+ if self.security:
+ key = {'advisory_type__eq': 'security'}
+ filters.append(base.sack.query().upgrades().filter(**key))
+ if filters:
+ base._update_security_filters = filters
+
+ return base
+
+ def list_items(self, command):
+ """List package info based on the command."""
+ # Rename updates to upgrades
+ if command == 'updates':
+ command = 'upgrades'
+
+ # Return the corresponding packages
+ if command in ['installed', 'upgrades', 'available']:
+ results = [
+ self._package_dict(package)
+ for package in getattr(self.base.sack.query(), command)()]
+ # Return the enabled repository ids
+ elif command in ['repos', 'repositories']:
+ results = [
+ {'repoid': repo.id, 'state': 'enabled'}
+ for repo in self.base.repos.iter_enabled()]
+ # Return any matching packages
+ else:
+ packages = dnf.subject.Subject(command).get_best_query(self.base.sack)
+ results = [self._package_dict(package) for package in packages]
+
+ self.module.exit_json(msg="", results=results)
+
+ def _is_installed(self, pkg):
+ installed = self.base.sack.query().installed()
+ if installed.filter(name=pkg):
+ return True
+ else:
+ return False
+
+ def _is_newer_version_installed(self, pkg_name):
+ candidate_pkg = self._packagename_dict(pkg_name)
+ if not candidate_pkg:
+ # The user didn't provide a versioned rpm, so version checking is
+ # not required
+ return False
+
+ installed = self.base.sack.query().installed()
+ installed_pkg = installed.filter(name=candidate_pkg['name']).run()
+ if installed_pkg:
+ installed_pkg = installed_pkg[0]
+
+ # this looks weird but one is a dict and the other is a dnf.Package
+ evr_cmp = self._compare_evr(
+ installed_pkg.epoch, installed_pkg.version, installed_pkg.release,
+ candidate_pkg['epoch'], candidate_pkg['version'], candidate_pkg['release'],
+ )
+
+ if evr_cmp == 1:
+ return True
+ else:
+ return False
+
+ else:
+
+ return False
+
+ def _mark_package_install(self, pkg_spec, upgrade=False):
+ """Mark the package for install."""
+ is_newer_version_installed = self._is_newer_version_installed(pkg_spec)
+ is_installed = self._is_installed(pkg_spec)
+ try:
+ if is_newer_version_installed:
+ if self.allow_downgrade:
+ # dnf only does allow_downgrade, we have to handle this ourselves
+ # because it allows a possibility for non-idempotent transactions
+ # on a system's package set (pending the yum repo has many old
+ # NVRs indexed)
+ if upgrade:
+ if is_installed:
+ self.base.upgrade(pkg_spec)
+ else:
+ self.base.install(pkg_spec)
+ else:
+ self.base.install(pkg_spec)
+ else: # Nothing to do, report back
+ pass
+ elif is_installed: # An potentially older (or same) version is installed
+ if upgrade:
+ self.base.upgrade(pkg_spec)
+ else: # Nothing to do, report back
+ pass
+ else: # The package is not installed, simply install it
+ self.base.install(pkg_spec)
+
+ return {'failed': False, 'msg': '', 'failure': '', 'rc': 0}
+
+ except dnf.exceptions.MarkingError as e:
+ return {
+ 'failed': True,
+ 'msg': "No package {0} available.".format(pkg_spec),
+ 'failure': " ".join((pkg_spec, to_native(e))),
+ 'rc': 1,
+ "results": []
+ }
+
+ except dnf.exceptions.DepsolveError as e:
+ return {
+ 'failed': True,
+ 'msg': "Depsolve Error occured for package {0}.".format(pkg_spec),
+ 'failure': " ".join((pkg_spec, to_native(e))),
+ 'rc': 1,
+ "results": []
+ }
+
+ except dnf.exceptions.Error as e:
+ if to_text("already installed") in to_text(e):
+ return {'failed': False, 'msg': '', 'failure': ''}
+ else:
+ return {
+ 'failed': True,
+ 'msg': "Unknown Error occured for package {0}.".format(pkg_spec),
+ 'failure': " ".join((pkg_spec, to_native(e))),
+ 'rc': 1,
+ "results": []
+ }
+
+ def _whatprovides(self, filepath):
+ available = self.base.sack.query().available()
+ pkg_spec = available.filter(provides=filepath).run()
+
+ if pkg_spec:
+ return pkg_spec[0].name
+
+ def _parse_spec_group_file(self):
+ pkg_specs, grp_specs, module_specs, filenames = [], [], [], []
+ already_loaded_comps = False # Only load this if necessary, it's slow
+
+ for name in self.names:
+ if '://' in name:
+ name = fetch_file(self.module, name)
+ filenames.append(name)
+ elif name.endswith(".rpm"):
+ filenames.append(name)
+ elif name.startswith("@") or ('/' in name):
+ # like "dnf install /usr/bin/vi"
+ if '/' in name:
+ pkg_spec = self._whatprovides(name)
+ if pkg_spec:
+ pkg_specs.append(pkg_spec)
+ continue
+
+ if not already_loaded_comps:
+ self.base.read_comps()
+ already_loaded_comps = True
+
+ grp_env_mdl_candidate = name[1:].strip()
+
+ if self.with_modules:
+ mdl = self.module_base._get_modules(grp_env_mdl_candidate)
+ if mdl[0]:
+ module_specs.append(grp_env_mdl_candidate)
+ else:
+ grp_specs.append(grp_env_mdl_candidate)
+ else:
+ grp_specs.append(grp_env_mdl_candidate)
+ else:
+ pkg_specs.append(name)
+ return pkg_specs, grp_specs, module_specs, filenames
+
+ def _update_only(self, pkgs):
+ not_installed = []
+ for pkg in pkgs:
+ if self._is_installed(pkg):
+ try:
+ if isinstance(to_text(pkg), text_type):
+ self.base.upgrade(pkg)
+ else:
+ self.base.package_upgrade(pkg)
+ except Exception as e:
+ self.module.fail_json(
+ msg="Error occured attempting update_only operation: {0}".format(to_native(e)),
+ results=[],
+ rc=1,
+ )
+ else:
+ not_installed.append(pkg)
+
+ return not_installed
+
+ def _install_remote_rpms(self, filenames):
+ if int(dnf.__version__.split(".")[0]) >= 2:
+ pkgs = list(sorted(self.base.add_remote_rpms(list(filenames)), reverse=True))
+ else:
+ pkgs = []
+ try:
+ for filename in filenames:
+ pkgs.append(self.base.add_remote_rpm(filename))
+ except IOError as e:
+ if to_text("Can not load RPM file") in to_text(e):
+ self.module.fail_json(
+ msg="Error occured attempting remote rpm install of package: {0}. {1}".format(filename, to_native(e)),
+ results=[],
+ rc=1,
+ )
+ if self.update_only:
+ self._update_only(pkgs)
+ else:
+ for pkg in pkgs:
+ try:
+ if self._is_newer_version_installed(self._package_dict(pkg)['nevra']):
+ if self.allow_downgrade:
+ self.base.package_install(pkg)
+ else:
+ self.base.package_install(pkg)
+ except Exception as e:
+ self.module.fail_json(
+ msg="Error occured attempting remote rpm operation: {0}".format(to_native(e)),
+ results=[],
+ rc=1,
+ )
+
+ def _is_module_installed(self, module_spec):
+ if self.with_modules:
+ module_spec = module_spec.strip()
+ module_list, nsv = self.module_base._get_modules(module_spec)
+ enabled_streams = self.base._moduleContainer.getEnabledStream(nsv.name)
+
+ if enabled_streams:
+ if nsv.stream:
+ if nsv.stream in enabled_streams:
+ return True # The provided stream was found
+ else:
+ return False # The provided stream was not found
+ else:
+ return True # No stream provided, but module found
+
+ return False # seems like a sane default
+
+ def ensure(self):
+
+ response = {
+ 'msg': "",
+ 'changed': False,
+ 'results': [],
+ 'rc': 0
+ }
+
+ # Accumulate failures. Package management modules install what they can
+ # and fail with a message about what they can't.
+ failure_response = {
+ 'msg': "",
+ 'failures': [],
+ 'results': [],
+ 'rc': 1
+ }
+
+ # Autoremove is called alone
+ # Jump to remove path where base.autoremove() is run
+ if not self.names and self.autoremove:
+ self.names = []
+ self.state = 'absent'
+
+ if self.names == ['*'] and self.state == 'latest':
+ try:
+ self.base.upgrade_all()
+ except dnf.exceptions.DepsolveError as e:
+ failure_response['msg'] = "Depsolve Error occured attempting to upgrade all packages"
+ self.module.fail_json(**failure_response)
+ else:
+ pkg_specs, group_specs, module_specs, filenames = self._parse_spec_group_file()
+
+ pkg_specs = [p.strip() for p in pkg_specs]
+ filenames = [f.strip() for f in filenames]
+ groups = []
+ environments = []
+ for group_spec in (g.strip() for g in group_specs):
+ group = self.base.comps.group_by_pattern(group_spec)
+ if group:
+ groups.append(group.id)
+ else:
+ environment = self.base.comps.environment_by_pattern(group_spec)
+ if environment:
+ environments.append(environment.id)
+ else:
+ self.module.fail_json(
+ msg="No group {0} available.".format(group_spec),
+ results=[],
+ )
+
+ if self.state in ['installed', 'present']:
+ # Install files.
+ self._install_remote_rpms(filenames)
+ for filename in filenames:
+ response['results'].append("Installed {0}".format(filename))
+
+ # Install modules
+ if module_specs and self.with_modules:
+ for module in module_specs:
+ try:
+ if not self._is_module_installed(module):
+ response['results'].append("Module {0} installed.".format(module))
+ self.module_base.install([module])
+ self.module_base.enable([module])
+ except dnf.exceptions.MarkingErrors as e:
+ failure_response['failures'].append(' '.join((module, to_native(e))))
+
+ # Install groups.
+ for group in groups:
+ try:
+ group_pkg_count_installed = self.base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES)
+ if group_pkg_count_installed == 0:
+ response['results'].append("Group {0} already installed.".format(group))
+ else:
+ response['results'].append("Group {0} installed.".format(group))
+ except dnf.exceptions.DepsolveError as e:
+ failure_response['msg'] = "Depsolve Error occured attempting to install group: {0}".format(group)
+ self.module.fail_json(**failure_response)
+ except dnf.exceptions.Error as e:
+ # In dnf 2.0 if all the mandatory packages in a group do
+ # not install, an error is raised. We want to capture
+ # this but still install as much as possible.
+ failure_response['failures'].append(" ".join((group, to_native(e))))
+
+ for environment in environments:
+ try:
+ self.base.environment_install(environment, dnf.const.GROUP_PACKAGE_TYPES)
+ except dnf.exceptions.DepsolveError as e:
+ failure_response['msg'] = "Depsolve Error occured attempting to install environment: {0}".format(environment)
+ self.module.fail_json(**failure_response)
+ except dnf.exceptions.Error as e:
+ failure_response['failures'].append(" ".join((environment, to_native(e))))
+
+ if module_specs and not self.with_modules:
+ # This means that the group or env wasn't found in comps
+ self.module.fail_json(
+ msg="No group {0} available.".format(module_specs[0]),
+ results=[],
+ )
+
+ # Install packages.
+ if self.update_only:
+ not_installed = self._update_only(pkg_specs)
+ for spec in not_installed:
+ response['results'].append("Packages providing %s not installed due to update_only specified" % spec)
+ else:
+ for pkg_spec in pkg_specs:
+ install_result = self._mark_package_install(pkg_spec)
+ if install_result['failed']:
+ if install_result['msg']:
+ failure_response['msg'] += install_result['msg']
+ failure_response['failures'].append(self._sanitize_dnf_error_msg_install(pkg_spec, install_result['failure']))
+ else:
+ if install_result['msg']:
+ response['results'].append(install_result['msg'])
+
+ elif self.state == 'latest':
+ # "latest" is same as "installed" for filenames.
+ self._install_remote_rpms(filenames)
+ for filename in filenames:
+ response['results'].append("Installed {0}".format(filename))
+
+ # Upgrade modules
+ if module_specs and self.with_modules:
+ for module in module_specs:
+ try:
+ if self._is_module_installed(module):
+ response['results'].append("Module {0} upgraded.".format(module))
+ self.module_base.upgrade([module])
+ except dnf.exceptions.MarkingErrors as e:
+ failure_response['failures'].append(' '.join((module, to_native(e))))
+
+ for group in groups:
+ try:
+ try:
+ self.base.group_upgrade(group)
+ response['results'].append("Group {0} upgraded.".format(group))
+ except dnf.exceptions.CompsError:
+ if not self.update_only:
+ # If not already installed, try to install.
+ group_pkg_count_installed = self.base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES)
+ if group_pkg_count_installed == 0:
+ response['results'].append("Group {0} already installed.".format(group))
+ else:
+ response['results'].append("Group {0} installed.".format(group))
+ except dnf.exceptions.Error as e:
+ failure_response['failures'].append(" ".join((group, to_native(e))))
+
+ for environment in environments:
+ try:
+ try:
+ self.base.environment_upgrade(environment)
+ except dnf.exceptions.CompsError:
+ # If not already installed, try to install.
+ self.base.environment_install(environment, dnf.const.GROUP_PACKAGE_TYPES)
+ except dnf.exceptions.DepsolveError as e:
+ failure_response['msg'] = "Depsolve Error occured attempting to install environment: {0}".format(environment)
+ except dnf.exceptions.Error as e:
+ failure_response['failures'].append(" ".join((environment, to_native(e))))
+
+ if self.update_only:
+ not_installed = self._update_only(pkg_specs)
+ for spec in not_installed:
+ response['results'].append("Packages providing %s not installed due to update_only specified" % spec)
+ else:
+ for pkg_spec in pkg_specs:
+ # best effort causes to install the latest package
+ # even if not previously installed
+ self.base.conf.best = True
+ install_result = self._mark_package_install(pkg_spec, upgrade=True)
+ if install_result['failed']:
+ if install_result['msg']:
+ failure_response['msg'] += install_result['msg']
+ failure_response['failures'].append(self._sanitize_dnf_error_msg_install(pkg_spec, install_result['failure']))
+ else:
+ if install_result['msg']:
+ response['results'].append(install_result['msg'])
+
+ else:
+ # state == absent
+ if filenames:
+ self.module.fail_json(
+ msg="Cannot remove paths -- please specify package name.",
+ results=[],
+ )
+
+ # Remove modules
+ if module_specs and self.with_modules:
+ for module in module_specs:
+ try:
+ if self._is_module_installed(module):
+ response['results'].append("Module {0} removed.".format(module))
+ self.module_base.remove([module])
+ self.module_base.disable([module])
+ self.module_base.reset([module])
+ except dnf.exceptions.MarkingErrors as e:
+ failure_response['failures'].append(' '.join((module, to_native(e))))
+
+ for group in groups:
+ try:
+ self.base.group_remove(group)
+ except dnf.exceptions.CompsError:
+ # Group is already uninstalled.
+ pass
+ except AttributeError:
+ # Group either isn't installed or wasn't marked installed at install time
+ # because of DNF bug
+ #
+ # This is necessary until the upstream dnf API bug is fixed where installing
+ # a group via the dnf API doesn't actually mark the group as installed
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1620324
+ pass
+
+ for environment in environments:
+ try:
+ self.base.environment_remove(environment)
+ except dnf.exceptions.CompsError:
+ # Environment is already uninstalled.
+ pass
+
+ installed = self.base.sack.query().installed()
+ for pkg_spec in pkg_specs:
+ # short-circuit installed check for wildcard matching
+ if '*' in pkg_spec:
+ try:
+ self.base.remove(pkg_spec)
+ except dnf.exceptions.MarkingError as e:
+ is_failure, handled_remove_error = self._sanitize_dnf_error_msg_remove(pkg_spec, to_native(e))
+ if is_failure:
+ failure_response['failures'].append('{0} - {1}'.format(pkg_spec, to_native(e)))
+ else:
+ response['results'].append(handled_remove_error)
+ continue
+
+ installed_pkg = list(map(str, installed.filter(name=pkg_spec).run()))
+ if installed_pkg:
+ candidate_pkg = self._packagename_dict(installed_pkg[0])
+ installed_pkg = installed.filter(name=candidate_pkg['name']).run()
+ else:
+ candidate_pkg = self._packagename_dict(pkg_spec)
+ installed_pkg = installed.filter(nevra=pkg_spec).run()
+ if installed_pkg:
+ installed_pkg = installed_pkg[0]
+ evr_cmp = self._compare_evr(
+ installed_pkg.epoch, installed_pkg.version, installed_pkg.release,
+ candidate_pkg['epoch'], candidate_pkg['version'], candidate_pkg['release'],
+ )
+ if evr_cmp == 0:
+ self.base.remove(pkg_spec)
+
+ # Like the dnf CLI we want to allow recursive removal of dependent
+ # packages
+ self.allowerasing = True
+
+ if self.autoremove:
+ self.base.autoremove()
+
+ try:
+ if not self.base.resolve(allow_erasing=self.allowerasing):
+ if failure_response['failures']:
+ failure_response['msg'] = 'Failed to install some of the specified packages'
+ self.module.fail_json(**failure_response)
+
+ response['msg'] = "Nothing to do"
+ self.module.exit_json(**response)
+ else:
+ response['changed'] = True
+
+ # If packages got installed/removed, add them to the results.
+ # We do this early so we can use it for both check_mode and not.
+ if self.download_only:
+ install_action = 'Downloaded'
+ else:
+ install_action = 'Installed'
+ for package in self.base.transaction.install_set:
+ response['results'].append("{0}: {1}".format(install_action, package))
+ for package in self.base.transaction.remove_set:
+ response['results'].append("Removed: {0}".format(package))
+
+ if failure_response['failures']:
+ failure_response['msg'] = 'Failed to install some of the specified packages'
+ self.module.fail_json(**failure_response)
+ if self.module.check_mode:
+ response['msg'] = "Check mode: No changes made, but would have if not in check mode"
+ self.module.exit_json(**response)
+
+ try:
+ if self.download_only and self.download_dir and self.base.conf.destdir:
+ dnf.util.ensure_dir(self.base.conf.destdir)
+ self.base.repos.all().pkgdir = self.base.conf.destdir
+
+ self.base.download_packages(self.base.transaction.install_set)
+ except dnf.exceptions.DownloadError as e:
+ self.module.fail_json(
+ msg="Failed to download packages: {0}".format(to_text(e)),
+ results=[],
+ )
+
+ # Validate GPG. This is NOT done in dnf.Base (it's done in the
+ # upstream CLI subclass of dnf.Base)
+ if not self.disable_gpg_check:
+ for package in self.base.transaction.install_set:
+ fail = False
+ gpgres, gpgerr = self.base._sig_check_pkg(package)
+ if gpgres == 0: # validated successfully
+ continue
+ elif gpgres == 1: # validation failed, install cert?
+ try:
+ self.base._get_key_for_package(package)
+ except dnf.exceptions.Error as e:
+ fail = True
+ else: # fatal error
+ fail = True
+
+ if fail:
+ msg = 'Failed to validate GPG signature for {0}'.format(package)
+ self.module.fail_json(msg)
+
+ if self.download_only:
+ # No further work left to do, and the results were already updated above.
+ # Just return them.
+ self.module.exit_json(**response)
+ else:
+ self.base.do_transaction()
+
+ if failure_response['failures']:
+ failure_response['msg'] = 'Failed to install some of the specified packages'
+ self.module.exit_json(**response)
+ self.module.exit_json(**response)
+ except dnf.exceptions.DepsolveError as e:
+ failure_response['msg'] = "Depsolve Error occured: {0}".format(to_native(e))
+ self.module.fail_json(**failure_response)
+ except dnf.exceptions.Error as e:
+ if to_text("already installed") in to_text(e):
+ response['changed'] = False
+ response['results'].append("Package already installed: {0}".format(to_native(e)))
+ self.module.exit_json(**response)
+ else:
+ failure_response['msg'] = "Unknown Error occured: {0}".format(to_native(e))
+ self.module.fail_json(**failure_response)
+
+ @staticmethod
+ def has_dnf():
+ return HAS_DNF
+
+ def run(self):
+ """The main function."""
+
+ # Check if autoremove is called correctly
+ if self.autoremove:
+ if LooseVersion(dnf.__version__) < LooseVersion('2.0.1'):
+ self.module.fail_json(
+ msg="Autoremove requires dnf>=2.0.1. Current dnf version is %s" % dnf.__version__,
+ results=[],
+ )
+
+ # Check if download_dir is called correctly
+ if self.download_dir:
+ if LooseVersion(dnf.__version__) < LooseVersion('2.6.2'):
+ self.module.fail_json(
+ msg="download_dir requires dnf>=2.6.2. Current dnf version is %s" % dnf.__version__,
+ results=[],
+ )
+
+ if self.update_cache and not self.names and not self.list:
+ self.base = self._base(
+ self.conf_file, self.disable_gpg_check, self.disablerepo,
+ self.enablerepo, self.installroot
+ )
+ self.module.exit_json(
+ msg="Cache updated",
+ changed=False,
+ results=[],
+ rc=0
+ )
+
+ # Set state as installed by default
+ # This is not set in AnsibleModule() because the following shouldn't happen
+ # - dnf: autoremove=yes state=installed
+ if self.state is None:
+ self.state = 'installed'
+
+ if self.list:
+ self.base = self._base(
+ self.conf_file, self.disable_gpg_check, self.disablerepo,
+ self.enablerepo, self.installroot
+ )
+ self.list_items(self.list)
+ else:
+ # Note: base takes a long time to run so we want to check for failure
+ # before running it.
+ if not dnf.util.am_i_root():
+ self.module.fail_json(
+ msg="This command has to be run under the root user.",
+ results=[],
+ )
+ self.base = self._base(
+ self.conf_file, self.disable_gpg_check, self.disablerepo,
+ self.enablerepo, self.installroot
+ )
+
+ if self.with_modules:
+ self.module_base = dnf.module.module_base.ModuleBase(self.base)
+
+ self.ensure()
+
+
+def main():
+ # state=installed name=pkgspec
+ # state=removed name=pkgspec
+ # state=latest name=pkgspec
+ #
+ # informational commands:
+ # list=installed
+ # list=updates
+ # list=available
+ # list=repos
+ # list=pkgspec
+
+ # Extend yumdnf_argument_spec with dnf-specific features that will never be
+ # backported to yum because yum is now in "maintenance mode" upstream
+ yumdnf_argument_spec['argument_spec']['allowerasing'] = dict(default=False, type='bool')
+
+ module = AnsibleModule(
+ **yumdnf_argument_spec
+ )
+
+ module_implementation = DnfModule(module)
+ try:
+ module_implementation.run()
+ except dnf.exceptions.RepoError as de:
+ module.fail_json(
+ msg="Failed to synchronize repodata: {0}".format(to_native(de)),
+ rc=1,
+ results=[],
+ changed=False
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/dpkg_selections.py b/lib/ansible/modules/dpkg_selections.py
new file mode 100644
index 00000000..9043786e
--- /dev/null
+++ b/lib/ansible/modules/dpkg_selections.py
@@ -0,0 +1,74 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: dpkg_selections
+short_description: Dpkg package selection selections
+description:
+ - Change dpkg package selection state via --get-selections and --set-selections.
+version_added: "2.0"
+author:
+- Brian Brazil (@brian-brazil) <brian.brazil@boxever.com>
+options:
+ name:
+ description:
+ - Name of the package
+ required: true
+ selection:
+ description:
+ - The selection state to set the package to.
+ choices: [ 'install', 'hold', 'deinstall', 'purge' ]
+ required: true
+notes:
+ - This module won't cause any packages to be installed/removed/purged, use the C(apt) module for that.
+'''
+EXAMPLES = '''
+- name: Prevent python from being upgraded
+ dpkg_selections:
+ name: python
+ selection: hold
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ selection=dict(choices=['install', 'hold', 'deinstall', 'purge'])
+ ),
+ supports_check_mode=True,
+ )
+
+ dpkg = module.get_bin_path('dpkg', True)
+
+ name = module.params['name']
+ selection = module.params['selection']
+
+ # Get current settings.
+ rc, out, err = module.run_command([dpkg, '--get-selections', name], check_rc=True)
+ if not out:
+ current = 'not present'
+ else:
+ current = out.split()[1]
+
+ changed = current != selection
+
+ if module.check_mode or not changed:
+ module.exit_json(changed=changed, before=current, after=selection)
+
+ module.run_command([dpkg, '--set-selections'], data="%s %s" % (name, selection), check_rc=True)
+ module.exit_json(changed=changed, before=current, after=selection)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/expect.py b/lib/ansible/modules/expect.py
new file mode 100644
index 00000000..0f262156
--- /dev/null
+++ b/lib/ansible/modules/expect.py
@@ -0,0 +1,241 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: expect
+version_added: '2.0'
+short_description: Executes a command and responds to prompts.
+description:
+ - The C(expect) module executes a command and responds to prompts.
+ - The given command will be executed on all selected nodes. It will not be
+ processed through the shell, so variables like C($HOME) and operations
+ like C("<"), C(">"), C("|"), and C("&") will not work.
+options:
+ command:
+ description:
+ - The command module takes command to run.
+ required: true
+ creates:
+ type: path
+ description:
+ - A filename, when it already exists, this step will B(not) be run.
+ removes:
+ type: path
+ description:
+ - A filename, when it does not exist, this step will B(not) be run.
+ chdir:
+ type: path
+ description:
+ - Change into this directory before running the command.
+ responses:
+ type: dict
+ description:
+ - Mapping of expected string/regex and string to respond with. If the
+ response is a list, successive matches return successive
+ responses. List functionality is new in 2.1.
+ required: true
+ timeout:
+ type: int
+ description:
+ - Amount of time in seconds to wait for the expected strings. Use
+ C(null) to disable timeout.
+ default: 30
+ echo:
+ description:
+ - Whether or not to echo out your response strings.
+ default: false
+ type: bool
+requirements:
+ - python >= 2.6
+ - pexpect >= 3.3
+notes:
+ - If you want to run a command through the shell (say you are using C(<),
+ C(>), C(|), etc), you must specify a shell in the command such as
+ C(/bin/bash -c "/path/to/something | grep else").
+ - The question, or key, under I(responses) is a python regex match. Case
+ insensitive searches are indicated with a prefix of C(?i).
+ - By default, if a question is encountered multiple times, its string
+ response will be repeated. If you need different responses for successive
+ question matches, instead of a string response, use a list of strings as
+ the response. The list functionality is new in 2.1.
+ - The M(ansible.builtin.expect) module is designed for simple scenarios.
+ For more complex needs, consider the use of expect code with the M(ansible.builtin.shell)
+ or M(ansible.builtin.script) modules. (An example is part of the M(ansible.builtin.shell) module documentation)
+seealso:
+- module: ansible.builtin.script
+- module: ansible.builtin.shell
+author: "Matt Martz (@sivel)"
+'''
+
+EXAMPLES = r'''
+- name: Case insensitive password string match
+ expect:
+ command: passwd username
+ responses:
+ (?i)password: "MySekretPa$$word"
+ # you don't want to show passwords in your logs
+ no_log: true
+
+- name: Generic question with multiple different responses
+ expect:
+ command: /path/to/custom/command
+ responses:
+ Question:
+ - response1
+ - response2
+ - response3
+'''
+
+import datetime
+import os
+import traceback
+
+PEXPECT_IMP_ERR = None
+try:
+ import pexpect
+ HAS_PEXPECT = True
+except ImportError:
+ PEXPECT_IMP_ERR = traceback.format_exc()
+ HAS_PEXPECT = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native, to_text
+
+
+def response_closure(module, question, responses):
+ resp_gen = (u'%s\n' % to_text(r).rstrip(u'\n') for r in responses)
+
+ def wrapped(info):
+ try:
+ return next(resp_gen)
+ except StopIteration:
+ module.fail_json(msg="No remaining responses for '%s', "
+ "output was '%s'" %
+ (question,
+ info['child_result_list'][-1]))
+
+ return wrapped
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ command=dict(required=True),
+ chdir=dict(type='path'),
+ creates=dict(type='path'),
+ removes=dict(type='path'),
+ responses=dict(type='dict', required=True),
+ timeout=dict(type='int', default=30),
+ echo=dict(type='bool', default=False),
+ )
+ )
+
+ if not HAS_PEXPECT:
+ module.fail_json(msg=missing_required_lib("pexpect"),
+ exception=PEXPECT_IMP_ERR)
+
+ chdir = module.params['chdir']
+ args = module.params['command']
+ creates = module.params['creates']
+ removes = module.params['removes']
+ responses = module.params['responses']
+ timeout = module.params['timeout']
+ echo = module.params['echo']
+
+ events = dict()
+ for key, value in responses.items():
+ if isinstance(value, list):
+ response = response_closure(module, key, value)
+ else:
+ response = u'%s\n' % to_text(value).rstrip(u'\n')
+
+ events[to_text(key)] = response
+
+ if args.strip() == '':
+ module.fail_json(rc=256, msg="no command given")
+
+ if chdir:
+ chdir = os.path.abspath(chdir)
+ os.chdir(chdir)
+
+ if creates:
+ # do not run the command if the line contains creates=filename
+ # and the filename already exists. This allows idempotence
+ # of command executions.
+ if os.path.exists(creates):
+ module.exit_json(
+ cmd=args,
+ stdout="skipped, since %s exists" % creates,
+ changed=False,
+ rc=0
+ )
+
+ if removes:
+ # do not run the command if the line contains removes=filename
+ # and the filename does not exist. This allows idempotence
+ # of command executions.
+ if not os.path.exists(removes):
+ module.exit_json(
+ cmd=args,
+ stdout="skipped, since %s does not exist" % removes,
+ changed=False,
+ rc=0
+ )
+
+ startd = datetime.datetime.now()
+
+ try:
+ try:
+ # Prefer pexpect.run from pexpect>=4
+ out, rc = pexpect.run(args, timeout=timeout, withexitstatus=True,
+ events=events, cwd=chdir, echo=echo,
+ encoding='utf-8')
+ except TypeError:
+ # Use pexpect.runu in pexpect>=3.3,<4
+ out, rc = pexpect.runu(args, timeout=timeout, withexitstatus=True,
+ events=events, cwd=chdir, echo=echo)
+ except (TypeError, AttributeError) as e:
+ # This should catch all insufficient versions of pexpect
+ # We deem them insufficient for their lack of ability to specify
+ # to not echo responses via the run/runu functions, which would
+ # potentially leak sensitive information
+ module.fail_json(msg='Insufficient version of pexpect installed '
+ '(%s), this module requires pexpect>=3.3. '
+ 'Error was %s' % (pexpect.__version__, to_native(e)))
+ except pexpect.ExceptionPexpect as e:
+ module.fail_json(msg='%s' % to_native(e), exception=traceback.format_exc())
+
+ endd = datetime.datetime.now()
+ delta = endd - startd
+
+ if out is None:
+ out = ''
+
+ result = dict(
+ cmd=args,
+ stdout=out.rstrip('\r\n'),
+ rc=rc,
+ start=str(startd),
+ end=str(endd),
+ delta=str(delta),
+ changed=True,
+ )
+
+ if rc is None:
+ module.fail_json(msg='command exceeded timeout', **result)
+ elif rc != 0:
+ module.fail_json(msg='non-zero return code', **result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/fail.py b/lib/ansible/modules/fail.py
new file mode 100644
index 00000000..fb34e086
--- /dev/null
+++ b/lib/ansible/modules/fail.py
@@ -0,0 +1,42 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: fail
+short_description: Fail with custom message
+description:
+- This module fails the progress with a custom message.
+- It can be useful for bailing out when a certain condition is met using C(when).
+- This module is also supported for Windows targets.
+version_added: "0.8"
+options:
+ msg:
+ description:
+ - The customized message used for failing execution.
+ - If omitted, fail will simply bail out with a generic message.
+ type: str
+ default: Failed as requested from task
+notes:
+ - This module is also supported for Windows targets.
+seealso:
+- module: ansible.builtin.assert
+- module: ansible.builtin.debug
+- module: ansible.builtin.meta
+author:
+- Dag Wieers (@dagwieers)
+'''
+
+EXAMPLES = r'''
+- name: Example using fail and when together
+ fail:
+ msg: The system may not be provisioned according to the CMDB status.
+ when: cmdb_status != "to-be-staged"
+'''
diff --git a/lib/ansible/modules/fetch.py b/lib/ansible/modules/fetch.py
new file mode 100644
index 00000000..d2f886dc
--- /dev/null
+++ b/lib/ansible/modules/fetch.py
@@ -0,0 +1,105 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# This is a virtual module that is entirely implemented as an action plugin and runs on the controller
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: fetch
+short_description: Fetch files from remote nodes
+description:
+- This module works like M(ansible.builtin.copy), but in reverse.
+- It is used for fetching files from remote machines and storing them locally in a file tree, organized by hostname.
+- Files that already exist at I(dest) will be overwritten if they are different than the I(src).
+- This module is also supported for Windows targets.
+version_added: '0.2'
+options:
+ src:
+ description:
+ - The file on the remote system to fetch.
+ - This I(must) be a file, not a directory.
+ - Recursive fetching may be supported in a later release.
+ required: yes
+ dest:
+ description:
+ - A directory to save the file into.
+ - For example, if the I(dest) directory is C(/backup) a I(src) file named C(/etc/profile) on host
+ C(host.example.com), would be saved into C(/backup/host.example.com/etc/profile).
+ The host name is based on the inventory name.
+ required: yes
+ fail_on_missing:
+ version_added: '1.1'
+ description:
+ - When set to C(yes), the task will fail if the remote file cannot be read for any reason.
+ - Prior to Ansible 2.5, setting this would only fail if the source file was missing.
+ - The default was changed to C(yes) in Ansible 2.5.
+ type: bool
+ default: yes
+ validate_checksum:
+ version_added: '1.4'
+ description:
+ - Verify that the source and destination checksums match after the files are fetched.
+ type: bool
+ default: yes
+ flat:
+ version_added: '1.2'
+ description:
+ - Allows you to override the default behavior of appending hostname/path/to/file to the destination.
+ - If C(dest) ends with '/', it will use the basename of the source file, similar to the copy module.
+ - This can be useful if working with a single host, or if retrieving files that are uniquely named per host.
+ - If using multiple hosts with the same filename, the file will be overwritten for each host.
+ type: bool
+ default: no
+notes:
+- When running fetch with C(become), the M(ansible.builtin.slurp) module will also be
+ used to fetch the contents of the file for determining the remote
+ checksum. This effectively doubles the transfer size, and
+ depending on the file size can consume all available memory on the
+ remote or local hosts causing a C(MemoryError). Due to this it is
+ advisable to run this module without C(become) whenever possible.
+- Prior to Ansible 2.5 this module would not fail if reading the remote
+ file was impossible unless C(fail_on_missing) was set.
+- In Ansible 2.5 or later, playbook authors are encouraged to use
+ C(fail_when) or C(ignore_errors) to get this ability. They may
+ also explicitly set C(fail_on_missing) to C(no) to get the
+ non-failing behaviour.
+- This module is also supported for Windows targets.
+seealso:
+- module: ansible.builtin.copy
+- module: ansible.builtin.slurp
+author:
+- Ansible Core Team
+- Michael DeHaan
+'''
+
+EXAMPLES = r'''
+- name: Store file into /tmp/fetched/host.example.com/tmp/somefile
+ fetch:
+ src: /tmp/somefile
+ dest: /tmp/fetched
+
+- name: Specifying a path directly
+ fetch:
+ src: /tmp/somefile
+ dest: /tmp/prefix-{{ inventory_hostname }}
+ flat: yes
+
+- name: Specifying a destination path
+ fetch:
+ src: /tmp/uniquefile
+ dest: /tmp/special/
+ flat: yes
+
+- name: Storing in a path relative to the playbook
+ fetch:
+ src: /tmp/uniquefile
+ dest: special/prefix-{{ inventory_hostname }}
+ flat: yes
+'''
diff --git a/lib/ansible/modules/file.py b/lib/ansible/modules/file.py
new file mode 100644
index 00000000..76a3e130
--- /dev/null
+++ b/lib/ansible/modules/file.py
@@ -0,0 +1,926 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: file
+version_added: historical
+short_description: Manage files and file properties
+extends_documentation_fragment: files
+description:
+- Set attributes of files, symlinks or directories.
+- Alternatively, remove files, symlinks or directories.
+- Many other modules support the same options as the C(file) module - including M(ansible.builtin.copy),
+ M(ansible.builtin.template), and M(ansible.builtin.assemble).
+- For Windows targets, use the M(ansible.windows.win_file) module instead.
+options:
+ path:
+ description:
+ - Path to the file being managed.
+ type: path
+ required: yes
+ aliases: [ dest, name ]
+ state:
+ description:
+ - If C(absent), directories will be recursively deleted, and files or symlinks will
+ be unlinked. In the case of a directory, if C(diff) is declared, you will see the files and folders deleted listed
+ under C(path_contents). Note that C(absent) will not cause C(file) to fail if the C(path) does
+ not exist as the state did not change.
+ - If C(directory), all intermediate subdirectories will be created if they
+ do not exist. Since Ansible 1.7 they will be created with the supplied permissions.
+ - If C(file), without any other options this works mostly as a 'stat' and will return the current state of C(path).
+ Even with other options (i.e C(mode)), the file will be modified but will NOT be created if it does not exist;
+ see the C(touch) value or the M(ansible.builtin.copy) or M(ansible.builtin.template) module if you want that behavior.
+ - If C(hard), the hard link will be created or changed.
+ - If C(link), the symbolic link will be created or changed.
+ - If C(touch) (new in 1.4), an empty file will be created if the C(path) does not
+ exist, while an existing file or directory will receive updated file access and
+ modification times (similar to the way C(touch) works from the command line).
+ type: str
+ default: file
+ choices: [ absent, directory, file, hard, link, touch ]
+ src:
+ description:
+ - Path of the file to link to.
+ - This applies only to C(state=link) and C(state=hard).
+ - For C(state=link), this will also accept a non-existing path.
+ - Relative paths are relative to the file being created (C(path)) which is how
+ the Unix command C(ln -s SRC DEST) treats relative paths.
+ type: path
+ recurse:
+ description:
+ - Recursively set the specified file attributes on directory contents.
+ - This applies only when C(state) is set to C(directory).
+ type: bool
+ default: no
+ version_added: '1.1'
+ force:
+ description:
+ - >
+ Force the creation of the symlinks in two cases: the source file does
+ not exist (but will appear later); the destination exists and is a file (so, we need to unlink the
+ C(path) file and create symlink to the C(src) file in place of it).
+ type: bool
+ default: no
+ follow:
+ description:
+ - This flag indicates that filesystem links, if they exist, should be followed.
+ - Previous to Ansible 2.5, this was C(no) by default.
+ type: bool
+ default: yes
+ version_added: '1.8'
+ modification_time:
+ description:
+ - This parameter indicates the time the file's modification time should be set to.
+ - Should be C(preserve) when no modification is required, C(YYYYMMDDHHMM.SS) when using default time format, or C(now).
+ - Default is None meaning that C(preserve) is the default for C(state=[file,directory,link,hard]) and C(now) is default for C(state=touch).
+ type: str
+ version_added: "2.7"
+ modification_time_format:
+ description:
+ - When used with C(modification_time), indicates the time format that must be used.
+ - Based on default Python format (see time.strftime doc).
+ type: str
+ default: "%Y%m%d%H%M.%S"
+ version_added: '2.7'
+ access_time:
+ description:
+ - This parameter indicates the time the file's access time should be set to.
+ - Should be C(preserve) when no modification is required, C(YYYYMMDDHHMM.SS) when using default time format, or C(now).
+ - Default is C(None) meaning that C(preserve) is the default for C(state=[file,directory,link,hard]) and C(now) is default for C(state=touch).
+ type: str
+ version_added: '2.7'
+ access_time_format:
+ description:
+ - When used with C(access_time), indicates the time format that must be used.
+ - Based on default Python format (see time.strftime doc).
+ type: str
+ default: "%Y%m%d%H%M.%S"
+ version_added: '2.7'
+seealso:
+- module: ansible.builtin.assemble
+- module: ansible.builtin.copy
+- module: ansible.builtin.stat
+- module: ansible.builtin.template
+- module: ansible.windows.win_file
+author:
+- Ansible Core Team
+- Michael DeHaan
+'''
+
+EXAMPLES = r'''
+- name: Change file ownership, group and permissions
+ file:
+ path: /etc/foo.conf
+ owner: foo
+ group: foo
+ mode: '0644'
+
+- name: Give insecure permissions to an existing file
+ file:
+ path: /work
+ owner: root
+ group: root
+ mode: '1777'
+
+- name: Create a symbolic link
+ file:
+ src: /file/to/link/to
+ dest: /path/to/symlink
+ owner: foo
+ group: foo
+ state: link
+
+- name: Create two hard links
+ file:
+ src: '/tmp/{{ item.src }}'
+ dest: '{{ item.dest }}'
+ state: hard
+ loop:
+ - { src: x, dest: y }
+ - { src: z, dest: k }
+
+- name: Touch a file, using symbolic modes to set the permissions (equivalent to 0644)
+ file:
+ path: /etc/foo.conf
+ state: touch
+ mode: u=rw,g=r,o=r
+
+- name: Touch the same file, but add/remove some permissions
+ file:
+ path: /etc/foo.conf
+ state: touch
+ mode: u+rw,g-wx,o-rwx
+
+- name: Touch again the same file, but dont change times this makes the task idempotent
+ file:
+ path: /etc/foo.conf
+ state: touch
+ mode: u+rw,g-wx,o-rwx
+ modification_time: preserve
+ access_time: preserve
+
+- name: Create a directory if it does not exist
+ file:
+ path: /etc/some_directory
+ state: directory
+ mode: '0755'
+
+- name: Update modification and access time of given file
+ file:
+ path: /etc/some_file
+ state: file
+ modification_time: now
+ access_time: now
+
+- name: Set access time based on seconds from epoch value
+ file:
+ path: /etc/another_file
+ state: file
+ access_time: '{{ "%Y%m%d%H%M.%S" | strftime(stat_var.stat.atime) }}'
+
+- name: Recursively change ownership of a directory
+ file:
+ path: /etc/foo
+ state: directory
+ recurse: yes
+ owner: foo
+ group: foo
+
+- name: Remove file (delete file)
+ file:
+ path: /etc/foo.txt
+ state: absent
+
+- name: Recursively remove directory
+ file:
+ path: /etc/foo
+ state: absent
+
+'''
+RETURN = r'''
+dest:
+ description: Destination file/path, equal to the value passed to I(path)
+ returned: state=touch, state=hard, state=link
+ type: str
+ sample: /path/to/file.txt
+path:
+ description: Destination file/path, equal to the value passed to I(path)
+ returned: state=absent, state=directory, state=file
+ type: str
+ sample: /path/to/file.txt
+'''
+
+import errno
+import os
+import shutil
+import sys
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+
+
+# There will only be a single AnsibleModule object per module
+module = None
+
+
+class AnsibleModuleError(Exception):
+ def __init__(self, results):
+ self.results = results
+
+ def __repr__(self):
+ print('AnsibleModuleError(results={0})'.format(self.results))
+
+
+class ParameterError(AnsibleModuleError):
+ pass
+
+
+class Sentinel(object):
+ def __new__(cls, *args, **kwargs):
+ return cls
+
+
+def _ansible_excepthook(exc_type, exc_value, tb):
+ # Using an exception allows us to catch it if the calling code knows it can recover
+ if issubclass(exc_type, AnsibleModuleError):
+ module.fail_json(**exc_value.results)
+ else:
+ sys.__excepthook__(exc_type, exc_value, tb)
+
+
+def additional_parameter_handling(params):
+ """Additional parameter validation and reformatting"""
+ # When path is a directory, rewrite the pathname to be the file inside of the directory
+ # TODO: Why do we exclude link? Why don't we exclude directory? Should we exclude touch?
+ # I think this is where we want to be in the future:
+ # when isdir(path):
+ # if state == absent: Remove the directory
+ # if state == touch: Touch the directory
+ # if state == directory: Assert the directory is the same as the one specified
+ # if state == file: place inside of the directory (use _original_basename)
+ # if state == link: place inside of the directory (use _original_basename. Fallback to src?)
+ # if state == hard: place inside of the directory (use _original_basename. Fallback to src?)
+ if (params['state'] not in ("link", "absent") and os.path.isdir(to_bytes(params['path'], errors='surrogate_or_strict'))):
+ basename = None
+
+ if params['_original_basename']:
+ basename = params['_original_basename']
+ elif params['src']:
+ basename = os.path.basename(params['src'])
+
+ if basename:
+ params['path'] = os.path.join(params['path'], basename)
+
+ # state should default to file, but since that creates many conflicts,
+ # default state to 'current' when it exists.
+ prev_state = get_state(to_bytes(params['path'], errors='surrogate_or_strict'))
+
+ if params['state'] is None:
+ if prev_state != 'absent':
+ params['state'] = prev_state
+ elif params['recurse']:
+ params['state'] = 'directory'
+ else:
+ params['state'] = 'file'
+
+ # make sure the target path is a directory when we're doing a recursive operation
+ if params['recurse'] and params['state'] != 'directory':
+ raise ParameterError(results={"msg": "recurse option requires state to be 'directory'",
+ "path": params["path"]})
+
+ # Fail if 'src' but no 'state' is specified
+ if params['src'] and params['state'] not in ('link', 'hard'):
+ raise ParameterError(results={'msg': "src option requires state to be 'link' or 'hard'",
+ 'path': params['path']})
+
+
+def get_state(path):
+ ''' Find out current state '''
+
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ try:
+ if os.path.lexists(b_path):
+ if os.path.islink(b_path):
+ return 'link'
+ elif os.path.isdir(b_path):
+ return 'directory'
+ elif os.stat(b_path).st_nlink > 1:
+ return 'hard'
+
+ # could be many other things, but defaulting to file
+ return 'file'
+
+ return 'absent'
+ except OSError as e:
+ if e.errno == errno.ENOENT: # It may already have been removed
+ return 'absent'
+ else:
+ raise
+
+
+# This should be moved into the common file utilities
+def recursive_set_attributes(b_path, follow, file_args, mtime, atime):
+ changed = False
+
+ try:
+ for b_root, b_dirs, b_files in os.walk(b_path):
+ for b_fsobj in b_dirs + b_files:
+ b_fsname = os.path.join(b_root, b_fsobj)
+ if not os.path.islink(b_fsname):
+ tmp_file_args = file_args.copy()
+ tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict')
+ changed |= module.set_fs_attributes_if_different(tmp_file_args, changed, expand=False)
+ changed |= update_timestamp_for_file(tmp_file_args['path'], mtime, atime)
+
+ else:
+ # Change perms on the link
+ tmp_file_args = file_args.copy()
+ tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict')
+ changed |= module.set_fs_attributes_if_different(tmp_file_args, changed, expand=False)
+ changed |= update_timestamp_for_file(tmp_file_args['path'], mtime, atime)
+
+ if follow:
+ b_fsname = os.path.join(b_root, os.readlink(b_fsname))
+ # The link target could be nonexistent
+ if os.path.exists(b_fsname):
+ if os.path.isdir(b_fsname):
+ # Link is a directory so change perms on the directory's contents
+ changed |= recursive_set_attributes(b_fsname, follow, file_args, mtime, atime)
+
+ # Change perms on the file pointed to by the link
+ tmp_file_args = file_args.copy()
+ tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict')
+ changed |= module.set_fs_attributes_if_different(tmp_file_args, changed, expand=False)
+ changed |= update_timestamp_for_file(tmp_file_args['path'], mtime, atime)
+ except RuntimeError as e:
+ # on Python3 "RecursionError" is raised which is derived from "RuntimeError"
+ # TODO once this function is moved into the common file utilities, this should probably raise more general exception
+ raise AnsibleModuleError(
+ results={'msg': "Could not recursively set attributes on %s. Original error was: '%s'" % (to_native(b_path), to_native(e))}
+ )
+
+ return changed
+
+
+def initial_diff(path, state, prev_state):
+ diff = {'before': {'path': path},
+ 'after': {'path': path},
+ }
+
+ if prev_state != state:
+ diff['before']['state'] = prev_state
+ diff['after']['state'] = state
+ if state == 'absent' and prev_state == 'directory':
+ walklist = {
+ 'directories': [],
+ 'files': [],
+ }
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ for base_path, sub_folders, files in os.walk(b_path):
+ for folder in sub_folders:
+ folderpath = os.path.join(base_path, folder)
+ walklist['directories'].append(folderpath)
+
+ for filename in files:
+ filepath = os.path.join(base_path, filename)
+ walklist['files'].append(filepath)
+
+ diff['before']['path_content'] = walklist
+
+ return diff
+
+#
+# States
+#
+
+
+def get_timestamp_for_time(formatted_time, time_format):
+ if formatted_time == 'preserve':
+ return None
+ elif formatted_time == 'now':
+ return Sentinel
+ else:
+ try:
+ struct = time.strptime(formatted_time, time_format)
+ struct_time = time.mktime(struct)
+ except (ValueError, OverflowError) as e:
+ raise AnsibleModuleError(results={'msg': 'Error while obtaining timestamp for time %s using format %s: %s'
+ % (formatted_time, time_format, to_native(e, nonstring='simplerepr'))})
+
+ return struct_time
+
+
+def update_timestamp_for_file(path, mtime, atime, diff=None):
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+
+ try:
+ # When mtime and atime are set to 'now', rely on utime(path, None) which does not require ownership of the file
+ # https://github.com/ansible/ansible/issues/50943
+ if mtime is Sentinel and atime is Sentinel:
+ # It's not exact but we can't rely on os.stat(path).st_mtime after setting os.utime(path, None) as it may
+ # not be updated. Just use the current time for the diff values
+ mtime = atime = time.time()
+
+ previous_mtime = os.stat(b_path).st_mtime
+ previous_atime = os.stat(b_path).st_atime
+
+ set_time = None
+ else:
+ # If both parameters are None 'preserve', nothing to do
+ if mtime is None and atime is None:
+ return False
+
+ previous_mtime = os.stat(b_path).st_mtime
+ previous_atime = os.stat(b_path).st_atime
+
+ if mtime is None:
+ mtime = previous_mtime
+ elif mtime is Sentinel:
+ mtime = time.time()
+
+ if atime is None:
+ atime = previous_atime
+ elif atime is Sentinel:
+ atime = time.time()
+
+ # If both timestamps are already ok, nothing to do
+ if mtime == previous_mtime and atime == previous_atime:
+ return False
+
+ set_time = (atime, mtime)
+
+ os.utime(b_path, set_time)
+
+ if diff is not None:
+ if 'before' not in diff:
+ diff['before'] = {}
+ if 'after' not in diff:
+ diff['after'] = {}
+ if mtime != previous_mtime:
+ diff['before']['mtime'] = previous_mtime
+ diff['after']['mtime'] = mtime
+ if atime != previous_atime:
+ diff['before']['atime'] = previous_atime
+ diff['after']['atime'] = atime
+ except OSError as e:
+ raise AnsibleModuleError(results={'msg': 'Error while updating modification or access time: %s'
+ % to_native(e, nonstring='simplerepr'), 'path': path})
+ return True
+
+
+def keep_backward_compatibility_on_timestamps(parameter, state):
+ if state in ['file', 'hard', 'directory', 'link'] and parameter is None:
+ return 'preserve'
+ elif state == 'touch' and parameter is None:
+ return 'now'
+ else:
+ return parameter
+
+
+def execute_diff_peek(path):
+ """Take a guess as to whether a file is a binary file"""
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ appears_binary = False
+ try:
+ with open(b_path, 'rb') as f:
+ head = f.read(8192)
+ except Exception:
+ # If we can't read the file, we're okay assuming it's text
+ pass
+ else:
+ if b"\x00" in head:
+ appears_binary = True
+
+ return appears_binary
+
+
+def ensure_absent(path):
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ prev_state = get_state(b_path)
+ result = {}
+
+ if prev_state != 'absent':
+ diff = initial_diff(path, 'absent', prev_state)
+
+ if not module.check_mode:
+ if prev_state == 'directory':
+ try:
+ shutil.rmtree(b_path, ignore_errors=False)
+ except Exception as e:
+ raise AnsibleModuleError(results={'msg': "rmtree failed: %s" % to_native(e)})
+ else:
+ try:
+ os.unlink(b_path)
+ except OSError as e:
+ if e.errno != errno.ENOENT: # It may already have been removed
+ raise AnsibleModuleError(results={'msg': "unlinking failed: %s " % to_native(e),
+ 'path': path})
+
+ result.update({'path': path, 'changed': True, 'diff': diff, 'state': 'absent'})
+ else:
+ result.update({'path': path, 'changed': False, 'state': 'absent'})
+
+ return result
+
+
+def execute_touch(path, follow, timestamps):
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ prev_state = get_state(b_path)
+ changed = False
+ result = {'dest': path}
+ mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
+ atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
+
+ if not module.check_mode:
+ if prev_state == 'absent':
+ # Create an empty file if the filename did not already exist
+ try:
+ open(b_path, 'wb').close()
+ changed = True
+ except (OSError, IOError) as e:
+ raise AnsibleModuleError(results={'msg': 'Error, could not touch target: %s'
+ % to_native(e, nonstring='simplerepr'),
+ 'path': path})
+
+ # Update the attributes on the file
+ diff = initial_diff(path, 'touch', prev_state)
+ file_args = module.load_file_common_arguments(module.params)
+ try:
+ changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
+ changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
+ except SystemExit as e:
+ if e.code:
+ # We take this to mean that fail_json() was called from
+ # somewhere in basic.py
+ if prev_state == 'absent':
+ # If we just created the file we can safely remove it
+ os.remove(b_path)
+ raise
+
+ result['changed'] = changed
+ result['diff'] = diff
+ return result
+
+
+def ensure_file_attributes(path, follow, timestamps):
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ prev_state = get_state(b_path)
+ file_args = module.load_file_common_arguments(module.params)
+ mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
+ atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
+
+ if prev_state != 'file':
+ if follow and prev_state == 'link':
+ # follow symlink and operate on original
+ b_path = os.path.realpath(b_path)
+ path = to_native(b_path, errors='strict')
+ prev_state = get_state(b_path)
+ file_args['path'] = path
+
+ if prev_state not in ('file', 'hard'):
+ # file is not absent and any other state is a conflict
+ raise AnsibleModuleError(results={'msg': 'file (%s) is %s, cannot continue' % (path, prev_state),
+ 'path': path, 'state': prev_state})
+
+ diff = initial_diff(path, 'file', prev_state)
+ changed = module.set_fs_attributes_if_different(file_args, False, diff, expand=False)
+ changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
+ return {'path': path, 'changed': changed, 'diff': diff}
+
+
+def ensure_directory(path, follow, recurse, timestamps):
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ prev_state = get_state(b_path)
+ file_args = module.load_file_common_arguments(module.params)
+ mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
+ atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
+
+ # For followed symlinks, we need to operate on the target of the link
+ if follow and prev_state == 'link':
+ b_path = os.path.realpath(b_path)
+ path = to_native(b_path, errors='strict')
+ file_args['path'] = path
+ prev_state = get_state(b_path)
+
+ changed = False
+ diff = initial_diff(path, 'directory', prev_state)
+
+ if prev_state == 'absent':
+ # Create directory and assign permissions to it
+ if module.check_mode:
+ return {'path': path, 'changed': True, 'diff': diff}
+ curpath = ''
+
+ try:
+ # Split the path so we can apply filesystem attributes recursively
+ # from the root (/) directory for absolute paths or the base path
+ # of a relative path. We can then walk the appropriate directory
+ # path to apply attributes.
+ # Something like mkdir -p with mode applied to all of the newly created directories
+ for dirname in path.strip('/').split('/'):
+ curpath = '/'.join([curpath, dirname])
+ # Remove leading slash if we're creating a relative path
+ if not os.path.isabs(path):
+ curpath = curpath.lstrip('/')
+ b_curpath = to_bytes(curpath, errors='surrogate_or_strict')
+ if not os.path.exists(b_curpath):
+ try:
+ os.mkdir(b_curpath)
+ changed = True
+ except OSError as ex:
+ # Possibly something else created the dir since the os.path.exists
+ # check above. As long as it's a dir, we don't need to error out.
+ if not (ex.errno == errno.EEXIST and os.path.isdir(b_curpath)):
+ raise
+ tmp_file_args = file_args.copy()
+ tmp_file_args['path'] = curpath
+ changed = module.set_fs_attributes_if_different(tmp_file_args, changed, diff, expand=False)
+ changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
+ except Exception as e:
+ raise AnsibleModuleError(results={'msg': 'There was an issue creating %s as requested:'
+ ' %s' % (curpath, to_native(e)),
+ 'path': path})
+ return {'path': path, 'changed': changed, 'diff': diff}
+
+ elif prev_state != 'directory':
+ # We already know prev_state is not 'absent', therefore it exists in some form.
+ raise AnsibleModuleError(results={'msg': '%s already exists as a %s' % (path, prev_state),
+ 'path': path})
+
+ #
+ # previous state == directory
+ #
+
+ changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
+ changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
+ if recurse:
+ changed |= recursive_set_attributes(b_path, follow, file_args, mtime, atime)
+
+ return {'path': path, 'changed': changed, 'diff': diff}
+
+
+def ensure_symlink(path, src, follow, force, timestamps):
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ b_src = to_bytes(src, errors='surrogate_or_strict')
+ prev_state = get_state(b_path)
+ mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
+ atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
+ # source is both the source of a symlink or an informational passing of the src for a template module
+ # or copy module, even if this module never uses it, it is needed to key off some things
+ if src is None:
+ if follow:
+ # use the current target of the link as the source
+ src = to_native(os.path.realpath(b_path), errors='strict')
+ b_src = to_bytes(src, errors='surrogate_or_strict')
+
+ if not os.path.islink(b_path) and os.path.isdir(b_path):
+ relpath = path
+ else:
+ b_relpath = os.path.dirname(b_path)
+ relpath = to_native(b_relpath, errors='strict')
+
+ absrc = os.path.join(relpath, src)
+ b_absrc = to_bytes(absrc, errors='surrogate_or_strict')
+ if not force and not os.path.exists(b_absrc):
+ raise AnsibleModuleError(results={'msg': 'src file does not exist, use "force=yes" if you'
+ ' really want to create the link: %s' % absrc,
+ 'path': path, 'src': src})
+
+ if prev_state == 'directory':
+ if not force:
+ raise AnsibleModuleError(results={'msg': 'refusing to convert from %s to symlink for %s'
+ % (prev_state, path),
+ 'path': path})
+ elif os.listdir(b_path):
+ # refuse to replace a directory that has files in it
+ raise AnsibleModuleError(results={'msg': 'the directory %s is not empty, refusing to'
+ ' convert it' % path,
+ 'path': path})
+ elif prev_state in ('file', 'hard') and not force:
+ raise AnsibleModuleError(results={'msg': 'refusing to convert from %s to symlink for %s'
+ % (prev_state, path),
+ 'path': path})
+
+ diff = initial_diff(path, 'link', prev_state)
+ changed = False
+
+ if prev_state in ('hard', 'file', 'directory', 'absent'):
+ changed = True
+ elif prev_state == 'link':
+ b_old_src = os.readlink(b_path)
+ if b_old_src != b_src:
+ diff['before']['src'] = to_native(b_old_src, errors='strict')
+ diff['after']['src'] = src
+ changed = True
+ else:
+ raise AnsibleModuleError(results={'msg': 'unexpected position reached', 'dest': path, 'src': src})
+
+ if changed and not module.check_mode:
+ if prev_state != 'absent':
+ # try to replace atomically
+ b_tmppath = to_bytes(os.path.sep).join(
+ [os.path.dirname(b_path), to_bytes(".%s.%s.tmp" % (os.getpid(), time.time()))]
+ )
+ try:
+ if prev_state == 'directory':
+ os.rmdir(b_path)
+ os.symlink(b_src, b_tmppath)
+ os.rename(b_tmppath, b_path)
+ except OSError as e:
+ if os.path.exists(b_tmppath):
+ os.unlink(b_tmppath)
+ raise AnsibleModuleError(results={'msg': 'Error while replacing: %s'
+ % to_native(e, nonstring='simplerepr'),
+ 'path': path})
+ else:
+ try:
+ os.symlink(b_src, b_path)
+ except OSError as e:
+ raise AnsibleModuleError(results={'msg': 'Error while linking: %s'
+ % to_native(e, nonstring='simplerepr'),
+ 'path': path})
+
+ if module.check_mode and not os.path.exists(b_path):
+ return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
+
+ # Now that we might have created the symlink, get the arguments.
+ # We need to do it now so we can properly follow the symlink if needed
+ # because load_file_common_arguments sets 'path' according
+ # the value of follow and the symlink existence.
+ file_args = module.load_file_common_arguments(module.params)
+
+ # Whenever we create a link to a nonexistent target we know that the nonexistent target
+ # cannot have any permissions set on it. Skip setting those and emit a warning (the user
+ # can set follow=False to remove the warning)
+ if follow and os.path.islink(b_path) and not os.path.exists(file_args['path']):
+ module.warn('Cannot set fs attributes on a non-existent symlink target. follow should be'
+ ' set to False to avoid this.')
+ else:
+ changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
+ changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
+
+ return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
+
+
+def ensure_hardlink(path, src, follow, force, timestamps):
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ b_src = to_bytes(src, errors='surrogate_or_strict')
+ prev_state = get_state(b_path)
+ file_args = module.load_file_common_arguments(module.params)
+ mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
+ atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
+
+ # src is the source of a hardlink. We require it if we are creating a new hardlink.
+ # We require path in the argument_spec so we know it is present at this point.
+ if src is None:
+ raise AnsibleModuleError(results={'msg': 'src is required for creating new hardlinks'})
+
+ if not os.path.exists(b_src):
+ raise AnsibleModuleError(results={'msg': 'src does not exist', 'dest': path, 'src': src})
+
+ diff = initial_diff(path, 'hard', prev_state)
+ changed = False
+
+ if prev_state == 'absent':
+ changed = True
+ elif prev_state == 'link':
+ b_old_src = os.readlink(b_path)
+ if b_old_src != b_src:
+ diff['before']['src'] = to_native(b_old_src, errors='strict')
+ diff['after']['src'] = src
+ changed = True
+ elif prev_state == 'hard':
+ if not os.stat(b_path).st_ino == os.stat(b_src).st_ino:
+ changed = True
+ if not force:
+ raise AnsibleModuleError(results={'msg': 'Cannot link, different hard link exists at destination',
+ 'dest': path, 'src': src})
+ elif prev_state == 'file':
+ changed = True
+ if not force:
+ raise AnsibleModuleError(results={'msg': 'Cannot link, %s exists at destination' % prev_state,
+ 'dest': path, 'src': src})
+ elif prev_state == 'directory':
+ changed = True
+ if os.path.exists(b_path):
+ if os.stat(b_path).st_ino == os.stat(b_src).st_ino:
+ return {'path': path, 'changed': False}
+ elif not force:
+ raise AnsibleModuleError(results={'msg': 'Cannot link: different hard link exists at destination',
+ 'dest': path, 'src': src})
+ else:
+ raise AnsibleModuleError(results={'msg': 'unexpected position reached', 'dest': path, 'src': src})
+
+ if changed and not module.check_mode:
+ if prev_state != 'absent':
+ # try to replace atomically
+ b_tmppath = to_bytes(os.path.sep).join(
+ [os.path.dirname(b_path), to_bytes(".%s.%s.tmp" % (os.getpid(), time.time()))]
+ )
+ try:
+ if prev_state == 'directory':
+ if os.path.exists(b_path):
+ try:
+ os.unlink(b_path)
+ except OSError as e:
+ if e.errno != errno.ENOENT: # It may already have been removed
+ raise
+ os.link(b_src, b_tmppath)
+ os.rename(b_tmppath, b_path)
+ except OSError as e:
+ if os.path.exists(b_tmppath):
+ os.unlink(b_tmppath)
+ raise AnsibleModuleError(results={'msg': 'Error while replacing: %s'
+ % to_native(e, nonstring='simplerepr'),
+ 'path': path})
+ else:
+ try:
+ os.link(b_src, b_path)
+ except OSError as e:
+ raise AnsibleModuleError(results={'msg': 'Error while linking: %s'
+ % to_native(e, nonstring='simplerepr'),
+ 'path': path})
+
+ if module.check_mode and not os.path.exists(b_path):
+ return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
+
+ changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
+ changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
+
+ return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
+
+
+def main():
+
+ global module
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', choices=['absent', 'directory', 'file', 'hard', 'link', 'touch']),
+ path=dict(type='path', required=True, aliases=['dest', 'name']),
+ _original_basename=dict(type='str'), # Internal use only, for recursive ops
+ recurse=dict(type='bool', default=False),
+ force=dict(type='bool', default=False), # Note: Should not be in file_common_args in future
+ follow=dict(type='bool', default=True), # Note: Different default than file_common_args
+ _diff_peek=dict(type='bool'), # Internal use only, for internal checks in the action plugins
+ src=dict(type='path'), # Note: Should not be in file_common_args in future
+ modification_time=dict(type='str'),
+ modification_time_format=dict(type='str', default='%Y%m%d%H%M.%S'),
+ access_time=dict(type='str'),
+ access_time_format=dict(type='str', default='%Y%m%d%H%M.%S'),
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ # When we rewrite basic.py, we will do something similar to this on instantiating an AnsibleModule
+ sys.excepthook = _ansible_excepthook
+ additional_parameter_handling(module.params)
+ params = module.params
+
+ state = params['state']
+ recurse = params['recurse']
+ force = params['force']
+ follow = params['follow']
+ path = params['path']
+ src = params['src']
+
+ timestamps = {}
+ timestamps['modification_time'] = keep_backward_compatibility_on_timestamps(params['modification_time'], state)
+ timestamps['modification_time_format'] = params['modification_time_format']
+ timestamps['access_time'] = keep_backward_compatibility_on_timestamps(params['access_time'], state)
+ timestamps['access_time_format'] = params['access_time_format']
+
+ # short-circuit for diff_peek
+ if params['_diff_peek'] is not None:
+ appears_binary = execute_diff_peek(to_bytes(path, errors='surrogate_or_strict'))
+ module.exit_json(path=path, changed=False, appears_binary=appears_binary)
+
+ if state == 'file':
+ result = ensure_file_attributes(path, follow, timestamps)
+ elif state == 'directory':
+ result = ensure_directory(path, follow, recurse, timestamps)
+ elif state == 'link':
+ result = ensure_symlink(path, src, follow, force, timestamps)
+ elif state == 'hard':
+ result = ensure_hardlink(path, src, follow, force, timestamps)
+ elif state == 'touch':
+ result = execute_touch(path, follow, timestamps)
+ elif state == 'absent':
+ result = ensure_absent(path)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/find.py b/lib/ansible/modules/find.py
new file mode 100644
index 00000000..b6aad844
--- /dev/null
+++ b/lib/ansible/modules/find.py
@@ -0,0 +1,467 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Ruggero Marchei <ruggero.marchei@daemonzone.net>
+# Copyright: (c) 2015, Brian Coca <bcoca@ansible.com>
+# Copyright: (c) 2016-2017, Konstantin Shalygin <k0ste@k0ste.ru>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: find
+author: Brian Coca (@bcoca)
+version_added: "2.0"
+short_description: Return a list of files based on specific criteria
+description:
+ - Return a list of files based on specific criteria. Multiple criteria are AND'd together.
+ - For Windows targets, use the M(ansible.windows.win_find) module instead.
+options:
+ age:
+ description:
+ - Select files whose age is equal to or greater than the specified time.
+ - Use a negative age to find files equal to or less than the specified time.
+ - You can choose seconds, minutes, hours, days, or weeks by specifying the
+ first letter of any of those words (e.g., "1w").
+ type: str
+ patterns:
+ default: '*'
+ description:
+ - One or more (shell or regex) patterns, which type is controlled by C(use_regex) option.
+ - The patterns restrict the list of files to be returned to those whose basenames match at
+ least one of the patterns specified. Multiple patterns can be specified using a list.
+ - The pattern is matched against the file base name, excluding the directory.
+ - When using regexen, the pattern MUST match the ENTIRE file name, not just parts of it. So
+ if you are looking to match all files ending in .default, you'd need to use '.*\.default'
+ as a regexp and not just '\.default'.
+ - This parameter expects a list, which can be either comma separated or YAML. If any of the
+ patterns contain a comma, make sure to put them in a list to avoid splitting the patterns
+ in undesirable ways.
+ type: list
+ aliases: [ pattern ]
+ excludes:
+ description:
+ - One or more (shell or regex) patterns, which type is controlled by C(use_regex) option.
+ - Items whose basenames match an C(excludes) pattern are culled from C(patterns) matches.
+ Multiple patterns can be specified using a list.
+ type: list
+ aliases: [ exclude ]
+ version_added: "2.5"
+ contains:
+ description:
+ - A regular expression or pattern which should be matched against the file content.
+ type: str
+ paths:
+ description:
+ - List of paths of directories to search. All paths must be fully qualified.
+ type: list
+ required: true
+ aliases: [ name, path ]
+ file_type:
+ description:
+ - Type of file to select.
+ - The 'link' and 'any' choices were added in Ansible 2.3.
+ type: str
+ choices: [ any, directory, file, link ]
+ default: file
+ recurse:
+ description:
+ - If target is a directory, recursively descend into the directory looking for files.
+ type: bool
+ default: no
+ size:
+ description:
+ - Select files whose size is equal to or greater than the specified size.
+ - Use a negative size to find files equal to or less than the specified size.
+ - Unqualified values are in bytes but b, k, m, g, and t can be appended to specify
+ bytes, kilobytes, megabytes, gigabytes, and terabytes, respectively.
+ - Size is not evaluated for directories.
+ age_stamp:
+ description:
+ - Choose the file property against which we compare age.
+ type: str
+ choices: [ atime, ctime, mtime ]
+ default: mtime
+ hidden:
+ description:
+ - Set this to C(yes) to include hidden files, otherwise they will be ignored.
+ type: bool
+ default: no
+ follow:
+ description:
+ - Set this to C(yes) to follow symlinks in path for systems with python 2.6+.
+ type: bool
+ default: no
+ get_checksum:
+ description:
+ - Set this to C(yes) to retrieve a file's SHA1 checksum.
+ type: bool
+ default: no
+ use_regex:
+ description:
+ - If C(no), the patterns are file globs (shell).
+ - If C(yes), they are python regexes.
+ type: bool
+ default: no
+ depth:
+ description:
+ - Set the maximum number of levels to descend into.
+ - Setting recurse to C(no) will override this value, which is effectively depth 1.
+ - Default is unlimited depth.
+ type: int
+ version_added: "2.6"
+seealso:
+- module: ansible.windows.win_find
+'''
+
+
+EXAMPLES = r'''
+- name: Recursively find /tmp files older than 2 days
+ find:
+ paths: /tmp
+ age: 2d
+ recurse: yes
+
+- name: Recursively find /tmp files older than 4 weeks and equal or greater than 1 megabyte
+ find:
+ paths: /tmp
+ age: 4w
+ size: 1m
+ recurse: yes
+
+- name: Recursively find /var/tmp files with last access time greater than 3600 seconds
+ find:
+ paths: /var/tmp
+ age: 3600
+ age_stamp: atime
+ recurse: yes
+
+- name: Find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz
+ find:
+ paths: /var/log
+ patterns: '*.old,*.log.gz'
+ size: 10m
+
+# Note that YAML double quotes require escaping backslashes but yaml single quotes do not.
+- name: Find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz via regex
+ find:
+ paths: /var/log
+ patterns: "^.*?\\.(?:old|log\\.gz)$"
+ size: 10m
+ use_regex: yes
+
+- name: Find /var/log all directories, exclude nginx and mysql
+ find:
+ paths: /var/log
+ recurse: no
+ file_type: directory
+ excludes: 'nginx,mysql'
+
+# When using patterns that contain a comma, make sure they are formatted as lists to avoid splitting the pattern
+- name: Use a single pattern that contains a comma formatted as a list
+ find:
+ paths: /var/log
+ file_type: file
+ use_regex: yes
+ patterns: ['^_[0-9]{2,4}_.*.log$']
+
+- name: Use multiple patterns that contain a comma formatted as a YAML list
+ find:
+ paths: /var/log
+ file_type: file
+ use_regex: yes
+ patterns:
+ - '^_[0-9]{2,4}_.*.log$'
+ - '^[a-z]{1,5}_.*log$'
+
+'''
+
+RETURN = r'''
+files:
+ description: All matches found with the specified criteria (see stat module for full output of each dictionary)
+ returned: success
+ type: list
+ sample: [
+ { path: "/var/tmp/test1",
+ mode: "0644",
+ "...": "...",
+ checksum: 16fac7be61a6e4591a33ef4b729c5c3302307523
+ },
+ { path: "/var/tmp/test2",
+ "...": "..."
+ },
+ ]
+matched:
+ description: Number of matches
+ returned: success
+ type: int
+ sample: 14
+examined:
+ description: Number of filesystem objects looked at
+ returned: success
+ type: int
+ sample: 34
+'''
+
+import fnmatch
+import grp
+import os
+import pwd
+import re
+import stat
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def pfilter(f, patterns=None, excludes=None, use_regex=False):
+ '''filter using glob patterns'''
+ if patterns is None and excludes is None:
+ return True
+
+ if use_regex:
+ if patterns and excludes is None:
+ for p in patterns:
+ r = re.compile(p)
+ if r.match(f):
+ return True
+
+ elif patterns and excludes:
+ for p in patterns:
+ r = re.compile(p)
+ if r.match(f):
+ for e in excludes:
+ r = re.compile(e)
+ if r.match(f):
+ return False
+ return True
+
+ else:
+ if patterns and excludes is None:
+ for p in patterns:
+ if fnmatch.fnmatch(f, p):
+ return True
+
+ elif patterns and excludes:
+ for p in patterns:
+ if fnmatch.fnmatch(f, p):
+ for e in excludes:
+ if fnmatch.fnmatch(f, e):
+ return False
+ return True
+
+ return False
+
+
+def agefilter(st, now, age, timestamp):
+ '''filter files older than age'''
+ if age is None:
+ return True
+ elif age >= 0 and now - st.__getattribute__("st_%s" % timestamp) >= abs(age):
+ return True
+ elif age < 0 and now - st.__getattribute__("st_%s" % timestamp) <= abs(age):
+ return True
+ return False
+
+
+def sizefilter(st, size):
+ '''filter files greater than size'''
+ if size is None:
+ return True
+ elif size >= 0 and st.st_size >= abs(size):
+ return True
+ elif size < 0 and st.st_size <= abs(size):
+ return True
+ return False
+
+
+def contentfilter(fsname, pattern):
+ """
+ Filter files which contain the given expression
+ :arg fsname: Filename to scan for lines matching a pattern
+ :arg pattern: Pattern to look for inside of line
+ :rtype: bool
+ :returns: True if one of the lines in fsname matches the pattern. Otherwise False
+ """
+ if pattern is None:
+ return True
+
+ prog = re.compile(pattern)
+
+ try:
+ with open(fsname) as f:
+ for line in f:
+ if prog.match(line):
+ return True
+
+ except Exception:
+ pass
+
+ return False
+
+
+def statinfo(st):
+ pw_name = ""
+ gr_name = ""
+
+ try: # user data
+ pw_name = pwd.getpwuid(st.st_uid).pw_name
+ except Exception:
+ pass
+
+ try: # group data
+ gr_name = grp.getgrgid(st.st_gid).gr_name
+ except Exception:
+ pass
+
+ return {
+ 'mode': "%04o" % stat.S_IMODE(st.st_mode),
+ 'isdir': stat.S_ISDIR(st.st_mode),
+ 'ischr': stat.S_ISCHR(st.st_mode),
+ 'isblk': stat.S_ISBLK(st.st_mode),
+ 'isreg': stat.S_ISREG(st.st_mode),
+ 'isfifo': stat.S_ISFIFO(st.st_mode),
+ 'islnk': stat.S_ISLNK(st.st_mode),
+ 'issock': stat.S_ISSOCK(st.st_mode),
+ 'uid': st.st_uid,
+ 'gid': st.st_gid,
+ 'size': st.st_size,
+ 'inode': st.st_ino,
+ 'dev': st.st_dev,
+ 'nlink': st.st_nlink,
+ 'atime': st.st_atime,
+ 'mtime': st.st_mtime,
+ 'ctime': st.st_ctime,
+ 'gr_name': gr_name,
+ 'pw_name': pw_name,
+ 'wusr': bool(st.st_mode & stat.S_IWUSR),
+ 'rusr': bool(st.st_mode & stat.S_IRUSR),
+ 'xusr': bool(st.st_mode & stat.S_IXUSR),
+ 'wgrp': bool(st.st_mode & stat.S_IWGRP),
+ 'rgrp': bool(st.st_mode & stat.S_IRGRP),
+ 'xgrp': bool(st.st_mode & stat.S_IXGRP),
+ 'woth': bool(st.st_mode & stat.S_IWOTH),
+ 'roth': bool(st.st_mode & stat.S_IROTH),
+ 'xoth': bool(st.st_mode & stat.S_IXOTH),
+ 'isuid': bool(st.st_mode & stat.S_ISUID),
+ 'isgid': bool(st.st_mode & stat.S_ISGID),
+ }
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ paths=dict(type='list', required=True, aliases=['name', 'path']),
+ patterns=dict(type='list', default=['*'], aliases=['pattern']),
+ excludes=dict(type='list', aliases=['exclude']),
+ contains=dict(type='str'),
+ file_type=dict(type='str', default="file", choices=['any', 'directory', 'file', 'link']),
+ age=dict(type='str'),
+ age_stamp=dict(type='str', default="mtime", choices=['atime', 'ctime', 'mtime']),
+ size=dict(type='str'),
+ recurse=dict(type='bool', default=False),
+ hidden=dict(type='bool', default=False),
+ follow=dict(type='bool', default=False),
+ get_checksum=dict(type='bool', default=False),
+ use_regex=dict(type='bool', default=False),
+ depth=dict(type='int'),
+ ),
+ supports_check_mode=True,
+ )
+
+ params = module.params
+
+ filelist = []
+
+ if params['age'] is None:
+ age = None
+ else:
+ # convert age to seconds:
+ m = re.match(r"^(-?\d+)(s|m|h|d|w)?$", params['age'].lower())
+ seconds_per_unit = {"s": 1, "m": 60, "h": 3600, "d": 86400, "w": 604800}
+ if m:
+ age = int(m.group(1)) * seconds_per_unit.get(m.group(2), 1)
+ else:
+ module.fail_json(age=params['age'], msg="failed to process age")
+
+ if params['size'] is None:
+ size = None
+ else:
+ # convert size to bytes:
+ m = re.match(r"^(-?\d+)(b|k|m|g|t)?$", params['size'].lower())
+ bytes_per_unit = {"b": 1, "k": 1024, "m": 1024**2, "g": 1024**3, "t": 1024**4}
+ if m:
+ size = int(m.group(1)) * bytes_per_unit.get(m.group(2), 1)
+ else:
+ module.fail_json(size=params['size'], msg="failed to process size")
+
+ now = time.time()
+ msg = ''
+ looked = 0
+ for npath in params['paths']:
+ npath = os.path.expanduser(os.path.expandvars(npath))
+ if os.path.isdir(npath):
+ for root, dirs, files in os.walk(npath, followlinks=params['follow']):
+ looked = looked + len(files) + len(dirs)
+ for fsobj in (files + dirs):
+ fsname = os.path.normpath(os.path.join(root, fsobj))
+ if params['depth']:
+ wpath = npath.rstrip(os.path.sep) + os.path.sep
+ depth = int(fsname.count(os.path.sep)) - int(wpath.count(os.path.sep)) + 1
+ if depth > params['depth']:
+ continue
+ if os.path.basename(fsname).startswith('.') and not params['hidden']:
+ continue
+
+ try:
+ st = os.lstat(fsname)
+ except Exception:
+ msg += "%s was skipped as it does not seem to be a valid file or it cannot be accessed\n" % fsname
+ continue
+
+ r = {'path': fsname}
+ if params['file_type'] == 'any':
+ if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
+
+ r.update(statinfo(st))
+ if stat.S_ISREG(st.st_mode) and params['get_checksum']:
+ r['checksum'] = module.sha1(fsname)
+ filelist.append(r)
+
+ elif stat.S_ISDIR(st.st_mode) and params['file_type'] == 'directory':
+ if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
+
+ r.update(statinfo(st))
+ filelist.append(r)
+
+ elif stat.S_ISREG(st.st_mode) and params['file_type'] == 'file':
+ if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and \
+ agefilter(st, now, age, params['age_stamp']) and \
+ sizefilter(st, size) and contentfilter(fsname, params['contains']):
+
+ r.update(statinfo(st))
+ if params['get_checksum']:
+ r['checksum'] = module.sha1(fsname)
+ filelist.append(r)
+
+ elif stat.S_ISLNK(st.st_mode) and params['file_type'] == 'link':
+ if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
+
+ r.update(statinfo(st))
+ filelist.append(r)
+
+ if not params['recurse']:
+ break
+ else:
+ msg += "%s was skipped as it does not seem to be a valid directory or it cannot be accessed\n" % npath
+
+ matched = len(filelist)
+ module.exit_json(files=filelist, changed=False, msg=msg, matched=matched, examined=looked)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/gather_facts.py b/lib/ansible/modules/gather_facts.py
new file mode 100644
index 00000000..a019d835
--- /dev/null
+++ b/lib/ansible/modules/gather_facts.py
@@ -0,0 +1,43 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gather_facts
+version_added: 2.8
+short_description: Gathers facts about remote hosts
+description:
+ - This module takes care of executing the configured facts modules, the default is to use the M(ansible.builtin.setup) module.
+ - This module is automatically called by playbooks to gather useful variables about remote hosts that can be used in playbooks.
+ - It can also be executed directly by C(/usr/bin/ansible) to check what variables are available to a host.
+ - Ansible provides many I(facts) about the system, automatically.
+options:
+ parallel:
+ description:
+ - A toggle that controls if the fact modules are executed in parallel or serially and in order.
+ This can guarantee the merge order of module facts at the expense of performance.
+ - By default it will be true if more than one fact module is used.
+ type: bool
+notes:
+ - This module is mostly a wrapper around other fact gathering modules.
+ - Options passed to this module must be supported by all the underlying fact modules configured.
+ - Facts returned by each module will be merged, conflicts will favor 'last merged'.
+ Order is not guaranteed, when doing parallel gathering on multiple modules.
+author:
+ - "Ansible Core Team"
+'''
+
+RETURN = """
+# depends on the fact module called
+"""
+
+EXAMPLES = """
+# Display facts from all hosts and store them indexed by I(hostname) at C(/tmp/facts).
+# ansible all -m gather_facts --tree /tmp/facts
+"""
diff --git a/lib/ansible/modules/get_url.py b/lib/ansible/modules/get_url.py
new file mode 100644
index 00000000..c89a4401
--- /dev/null
+++ b/lib/ansible/modules/get_url.py
@@ -0,0 +1,650 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: get_url
+short_description: Downloads files from HTTP, HTTPS, or FTP to node
+description:
+ - Downloads files from HTTP, HTTPS, or FTP to the remote server. The remote
+ server I(must) have direct access to the remote resource.
+ - By default, if an environment variable C(<protocol>_proxy) is set on
+ the target host, requests will be sent through that proxy. This
+ behaviour can be overridden by setting a variable for this task
+ (see `setting the environment
+ <https://docs.ansible.com/playbooks_environment.html>`_),
+ or by using the use_proxy option.
+ - HTTP redirects can redirect from HTTP to HTTPS so you should be sure that
+ your proxy environment for both protocols is correct.
+ - From Ansible 2.4 when run with C(--check), it will do a HEAD request to validate the URL but
+ will not download the entire file or verify it against hashes.
+ - For Windows targets, use the M(ansible.windows.win_get_url) module instead.
+version_added: '0.6'
+options:
+ url:
+ description:
+ - HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path
+ type: str
+ required: true
+ dest:
+ description:
+ - Absolute path of where to download the file to.
+ - If C(dest) is a directory, either the server provided filename or, if
+ none provided, the base name of the URL on the remote server will be
+ used. If a directory, C(force) has no effect.
+ - If C(dest) is a directory, the file will always be downloaded
+ (regardless of the C(force) option), but replaced only if the contents changed..
+ type: path
+ required: true
+ tmp_dest:
+ description:
+ - Absolute path of where temporary file is downloaded to.
+ - When run on Ansible 2.5 or greater, path defaults to ansible's remote_tmp setting
+ - When run on Ansible prior to 2.5, it defaults to C(TMPDIR), C(TEMP) or C(TMP) env variables or a platform specific value.
+ - U(https://docs.python.org/2/library/tempfile.html#tempfile.tempdir)
+ type: path
+ version_added: '2.1'
+ force:
+ description:
+ - If C(yes) and C(dest) is not a directory, will download the file every
+ time and replace the file if the contents change. If C(no), the file
+ will only be downloaded if the destination does not exist. Generally
+ should be C(yes) only for small local files.
+ - Prior to 0.6, this module behaved as if C(yes) was the default.
+ - Alias C(thirsty) has been deprecated and will be removed in 2.13.
+ type: bool
+ default: no
+ aliases: [ thirsty ]
+ version_added: '0.7'
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get
+ the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+ version_added: '2.1'
+ sha256sum:
+ description:
+ - If a SHA-256 checksum is passed to this parameter, the digest of the
+ destination file will be calculated after it is downloaded to ensure
+ its integrity and verify that the transfer completed successfully.
+ This option is deprecated and will be removed in version 2.14. Use
+ option C(checksum) instead.
+ default: ''
+ version_added: "1.3"
+ checksum:
+ description:
+ - 'If a checksum is passed to this parameter, the digest of the
+ destination file will be calculated after it is downloaded to ensure
+ its integrity and verify that the transfer completed successfully.
+ Format: <algorithm>:<checksum|url>, e.g. checksum="sha256:D98291AC[...]B6DC7B97",
+ checksum="sha256:http://example.com/path/sha256sum.txt"'
+ - If you worry about portability, only the sha1 algorithm is available
+ on all platforms and python versions.
+ - The third party hashlib library can be installed for access to additional algorithms.
+ - Additionally, if a checksum is passed to this parameter, and the file exist under
+ the C(dest) location, the I(destination_checksum) would be calculated, and if
+ checksum equals I(destination_checksum), the file download would be skipped
+ (unless C(force) is true). If the checksum does not equal I(destination_checksum),
+ the destination file is deleted.
+ type: str
+ default: ''
+ version_added: "2.0"
+ use_proxy:
+ description:
+ - if C(no), it will not use a proxy, even if one is defined in
+ an environment variable on the target hosts.
+ type: bool
+ default: yes
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated.
+ - This should only be used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: yes
+ timeout:
+ description:
+ - Timeout in seconds for URL request.
+ type: int
+ default: 10
+ version_added: '1.8'
+ headers:
+ description:
+ - Add custom HTTP headers to a request in hash/dict format.
+ - The hash/dict format was added in Ansible 2.6.
+ - Previous versions used a C("key:value,key:value") string format.
+ - The C("key:value,key:value") string format is deprecated and has been removed in version 2.10.
+ type: dict
+ version_added: '2.0'
+ url_username:
+ description:
+ - The username for use in HTTP basic authentication.
+ - This parameter can be used without C(url_password) for sites that allow empty passwords.
+ - Since version 2.8 you can also use the C(username) alias for this option.
+ type: str
+ aliases: ['username']
+ version_added: '1.6'
+ url_password:
+ description:
+ - The password for use in HTTP basic authentication.
+ - If the C(url_username) parameter is not specified, the C(url_password) parameter will not be used.
+ - Since version 2.8 you can also use the 'password' alias for this option.
+ type: str
+ aliases: ['password']
+ version_added: '1.6'
+ force_basic_auth:
+ description:
+ - Force the sending of the Basic authentication header upon initial request.
+ - httplib2, the library used by the uri module only sends authentication information when a webservice
+ responds to an initial request with a 401 status. Since some basic auth services do not properly
+ send a 401, logins will fail.
+ type: bool
+ default: no
+ version_added: '2.0'
+ client_cert:
+ description:
+ - PEM formatted certificate chain file to be used for SSL client authentication.
+ - This file can also include the key as well, and if the key is included, C(client_key) is not required.
+ type: path
+ version_added: '2.4'
+ client_key:
+ description:
+ - PEM formatted file that contains your private key to be used for SSL client authentication.
+ - If C(client_cert) contains both the certificate and key, this option is not required.
+ type: path
+ version_added: '2.4'
+ http_agent:
+ description:
+ - Header to identify as, generally appears in web server logs.
+ type: str
+ default: ansible-httpget
+# informational: requirements for nodes
+extends_documentation_fragment:
+ - files
+notes:
+ - For Windows targets, use the M(ansible.windows.win_get_url) module instead.
+seealso:
+- module: ansible.builtin.uri
+- module: ansible.windows.win_get_url
+author:
+- Jan-Piet Mens (@jpmens)
+'''
+
+EXAMPLES = r'''
+- name: Download foo.conf
+ get_url:
+ url: http://example.com/path/file.conf
+ dest: /etc/foo.conf
+ mode: '0440'
+
+- name: Download file and force basic auth
+ get_url:
+ url: http://example.com/path/file.conf
+ dest: /etc/foo.conf
+ force_basic_auth: yes
+
+- name: Download file with custom HTTP headers
+ get_url:
+ url: http://example.com/path/file.conf
+ dest: /etc/foo.conf
+ headers:
+ key1: one
+ key2: two
+
+- name: Download file with check (sha256)
+ get_url:
+ url: http://example.com/path/file.conf
+ dest: /etc/foo.conf
+ checksum: sha256:b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c
+
+- name: Download file with check (md5)
+ get_url:
+ url: http://example.com/path/file.conf
+ dest: /etc/foo.conf
+ checksum: md5:66dffb5228a211e61d6d7ef4a86f5758
+
+- name: Download file with checksum url (sha256)
+ get_url:
+ url: http://example.com/path/file.conf
+ dest: /etc/foo.conf
+ checksum: sha256:http://example.com/path/sha256sum.txt
+
+- name: Download file from a file path
+ get_url:
+ url: file:///tmp/afile.txt
+ dest: /tmp/afilecopy.txt
+
+- name: < Fetch file that requires authentication.
+ username/password only available since 2.8, in older versions you need to use url_username/url_password
+ get_url:
+ url: http://example.com/path/file.conf
+ dest: /etc/foo.conf
+ username: bar
+ password: '{{ mysecret }}'
+'''
+
+RETURN = r'''
+backup_file:
+ description: name of backup file created after download
+ returned: changed and if backup=yes
+ type: str
+ sample: /path/to/file.txt.2015-02-12@22:09~
+checksum_dest:
+ description: sha1 checksum of the file after copy
+ returned: success
+ type: str
+ sample: 6e642bb8dd5c2e027bf21dd923337cbb4214f827
+checksum_src:
+ description: sha1 checksum of the file
+ returned: success
+ type: str
+ sample: 6e642bb8dd5c2e027bf21dd923337cbb4214f827
+dest:
+ description: destination file/path
+ returned: success
+ type: str
+ sample: /path/to/file.txt
+elapsed:
+ description: The number of seconds that elapsed while performing the download
+ returned: always
+ type: int
+ sample: 23
+gid:
+ description: group id of the file
+ returned: success
+ type: int
+ sample: 100
+group:
+ description: group of the file
+ returned: success
+ type: str
+ sample: "httpd"
+md5sum:
+ description: md5 checksum of the file after download
+ returned: when supported
+ type: str
+ sample: "2a5aeecc61dc98c4d780b14b330e3282"
+mode:
+ description: permissions of the target
+ returned: success
+ type: str
+ sample: "0644"
+msg:
+ description: the HTTP message from the request
+ returned: always
+ type: str
+ sample: OK (unknown bytes)
+owner:
+ description: owner of the file
+ returned: success
+ type: str
+ sample: httpd
+secontext:
+ description: the SELinux security context of the file
+ returned: success
+ type: str
+ sample: unconfined_u:object_r:user_tmp_t:s0
+size:
+ description: size of the target
+ returned: success
+ type: int
+ sample: 1220
+src:
+ description: source file used after download
+ returned: always
+ type: str
+ sample: /tmp/tmpAdFLdV
+state:
+ description: state of the target
+ returned: success
+ type: str
+ sample: file
+status_code:
+ description: the HTTP status code from the request
+ returned: always
+ type: int
+ sample: 200
+uid:
+ description: owner id of the file, after execution
+ returned: success
+ type: int
+ sample: 100
+url:
+ description: the actual URL used for the request
+ returned: always
+ type: str
+ sample: https://www.ansible.com/
+'''
+
+import datetime
+import os
+import re
+import shutil
+import tempfile
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlsplit
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import fetch_url, url_argument_spec
+
+# ==============================================================
+# url handling
+
+
+def url_filename(url):
+ fn = os.path.basename(urlsplit(url)[2])
+ if fn == '':
+ return 'index.html'
+ return fn
+
+
+def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, headers=None, tmp_dest=''):
+ """
+ Download data from the url and store in a temporary file.
+
+ Return (tempfile, info about the request)
+ """
+ if module.check_mode:
+ method = 'HEAD'
+ else:
+ method = 'GET'
+
+ start = datetime.datetime.utcnow()
+ rsp, info = fetch_url(module, url, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout, headers=headers, method=method)
+ elapsed = (datetime.datetime.utcnow() - start).seconds
+
+ if info['status'] == 304:
+ module.exit_json(url=url, dest=dest, changed=False, msg=info.get('msg', ''), status_code=info['status'], elapsed=elapsed)
+
+ # Exceptions in fetch_url may result in a status -1, the ensures a proper error to the user in all cases
+ if info['status'] == -1:
+ module.fail_json(msg=info['msg'], url=url, dest=dest, elapsed=elapsed)
+
+ if info['status'] != 200 and not url.startswith('file:/') and not (url.startswith('ftp:/') and info.get('msg', '').startswith('OK')):
+ module.fail_json(msg="Request failed", status_code=info['status'], response=info['msg'], url=url, dest=dest, elapsed=elapsed)
+
+ # create a temporary file and copy content to do checksum-based replacement
+ if tmp_dest:
+ # tmp_dest should be an existing dir
+ tmp_dest_is_dir = os.path.isdir(tmp_dest)
+ if not tmp_dest_is_dir:
+ if os.path.exists(tmp_dest):
+ module.fail_json(msg="%s is a file but should be a directory." % tmp_dest, elapsed=elapsed)
+ else:
+ module.fail_json(msg="%s directory does not exist." % tmp_dest, elapsed=elapsed)
+ else:
+ tmp_dest = module.tmpdir
+
+ fd, tempname = tempfile.mkstemp(dir=tmp_dest)
+
+ f = os.fdopen(fd, 'wb')
+ try:
+ shutil.copyfileobj(rsp, f)
+ except Exception as e:
+ os.remove(tempname)
+ module.fail_json(msg="failed to create temporary content file: %s" % to_native(e), elapsed=elapsed, exception=traceback.format_exc())
+ f.close()
+ rsp.close()
+ return tempname, info
+
+
+def extract_filename_from_headers(headers):
+ """
+ Extracts a filename from the given dict of HTTP headers.
+
+ Looks for the content-disposition header and applies a regex.
+ Returns the filename if successful, else None."""
+ cont_disp_regex = 'attachment; ?filename="?([^"]+)'
+ res = None
+
+ if 'content-disposition' in headers:
+ cont_disp = headers['content-disposition']
+ match = re.match(cont_disp_regex, cont_disp)
+ if match:
+ res = match.group(1)
+ # Try preventing any funny business.
+ res = os.path.basename(res)
+
+ return res
+
+
+# ==============================================================
+# main
+
+def main():
+ argument_spec = url_argument_spec()
+
+ # setup aliases
+ argument_spec['url_username']['aliases'] = ['username']
+ argument_spec['url_password']['aliases'] = ['password']
+
+ argument_spec.update(
+ url=dict(type='str', required=True),
+ dest=dict(type='path', required=True),
+ backup=dict(type='bool', default=False),
+ sha256sum=dict(type='str', default=''),
+ checksum=dict(type='str', default=''),
+ timeout=dict(type='int', default=10),
+ headers=dict(type='dict'),
+ tmp_dest=dict(type='path'),
+ )
+
+ module = AnsibleModule(
+ # not checking because of daisy chain to file module
+ argument_spec=argument_spec,
+ add_file_common_args=True,
+ supports_check_mode=True,
+ mutually_exclusive=[['checksum', 'sha256sum']],
+ )
+
+ if module.params.get('thirsty'):
+ module.deprecate('The alias "thirsty" has been deprecated and will be removed, use "force" instead',
+ version='2.13', collection_name='ansible.builtin')
+
+ if module.params.get('sha256sum'):
+ module.deprecate('The parameter "sha256sum" has been deprecated and will be removed, use "checksum" instead',
+ version='2.14', collection_name='ansible.builtin')
+
+ url = module.params['url']
+ dest = module.params['dest']
+ backup = module.params['backup']
+ force = module.params['force']
+ sha256sum = module.params['sha256sum']
+ checksum = module.params['checksum']
+ use_proxy = module.params['use_proxy']
+ timeout = module.params['timeout']
+ headers = module.params['headers']
+ tmp_dest = module.params['tmp_dest']
+
+ result = dict(
+ changed=False,
+ checksum_dest=None,
+ checksum_src=None,
+ dest=dest,
+ elapsed=0,
+ url=url,
+ )
+
+ dest_is_dir = os.path.isdir(dest)
+ last_mod_time = None
+
+ # workaround for usage of deprecated sha256sum parameter
+ if sha256sum:
+ checksum = 'sha256:%s' % (sha256sum)
+
+ # checksum specified, parse for algorithm and checksum
+ if checksum:
+ try:
+ algorithm, checksum = checksum.split(':', 1)
+ except ValueError:
+ module.fail_json(msg="The checksum parameter has to be in format <algorithm>:<checksum>", **result)
+
+ if checksum.startswith('http://') or checksum.startswith('https://') or checksum.startswith('ftp://'):
+ checksum_url = checksum
+ # download checksum file to checksum_tmpsrc
+ checksum_tmpsrc, checksum_info = url_get(module, checksum_url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest)
+ with open(checksum_tmpsrc) as f:
+ lines = [line.rstrip('\n') for line in f]
+ os.remove(checksum_tmpsrc)
+ checksum_map = {}
+ for line in lines:
+ parts = line.split(None, 1)
+ if len(parts) == 2:
+ checksum_map[parts[0]] = parts[1]
+ filename = url_filename(url)
+
+ # Look through each line in the checksum file for a hash corresponding to
+ # the filename in the url, returning the first hash that is found.
+ for cksum in (s for (s, f) in checksum_map.items() if f.strip('./') == filename):
+ checksum = cksum
+ break
+ else:
+ checksum = None
+
+ if checksum is None:
+ module.fail_json(msg="Unable to find a checksum for file '%s' in '%s'" % (filename, checksum_url))
+ # Remove any non-alphanumeric characters, including the infamous
+ # Unicode zero-width space
+ checksum = re.sub(r'\W+', '', checksum).lower()
+ # Ensure the checksum portion is a hexdigest
+ try:
+ int(checksum, 16)
+ except ValueError:
+ module.fail_json(msg='The checksum format is invalid', **result)
+
+ if not dest_is_dir and os.path.exists(dest):
+ checksum_mismatch = False
+
+ # If the download is not forced and there is a checksum, allow
+ # checksum match to skip the download.
+ if not force and checksum != '':
+ destination_checksum = module.digest_from_file(dest, algorithm)
+
+ if checksum != destination_checksum:
+ checksum_mismatch = True
+
+ # Not forcing redownload, unless checksum does not match
+ if not force and checksum and not checksum_mismatch:
+ # Not forcing redownload, unless checksum does not match
+ # allow file attribute changes
+ file_args = module.load_file_common_arguments(module.params, path=dest)
+ result['changed'] = module.set_fs_attributes_if_different(file_args, False)
+ if result['changed']:
+ module.exit_json(msg="file already exists but file attributes changed", **result)
+ module.exit_json(msg="file already exists", **result)
+
+ # If the file already exists, prepare the last modified time for the
+ # request.
+ mtime = os.path.getmtime(dest)
+ last_mod_time = datetime.datetime.utcfromtimestamp(mtime)
+
+ # If the checksum does not match we have to force the download
+ # because last_mod_time may be newer than on remote
+ if checksum_mismatch:
+ force = True
+
+ # download to tmpsrc
+ start = datetime.datetime.utcnow()
+ tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest)
+ result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
+ result['src'] = tmpsrc
+
+ # Now the request has completed, we can finally generate the final
+ # destination file name from the info dict.
+
+ if dest_is_dir:
+ filename = extract_filename_from_headers(info)
+ if not filename:
+ # Fall back to extracting the filename from the URL.
+ # Pluck the URL from the info, since a redirect could have changed
+ # it.
+ filename = url_filename(info['url'])
+ dest = os.path.join(dest, filename)
+ result['dest'] = dest
+
+ # raise an error if there is no tmpsrc file
+ if not os.path.exists(tmpsrc):
+ os.remove(tmpsrc)
+ module.fail_json(msg="Request failed", status_code=info['status'], response=info['msg'], **result)
+ if not os.access(tmpsrc, os.R_OK):
+ os.remove(tmpsrc)
+ module.fail_json(msg="Source %s is not readable" % (tmpsrc), **result)
+ result['checksum_src'] = module.sha1(tmpsrc)
+
+ # check if there is no dest file
+ if os.path.exists(dest):
+ # raise an error if copy has no permission on dest
+ if not os.access(dest, os.W_OK):
+ os.remove(tmpsrc)
+ module.fail_json(msg="Destination %s is not writable" % (dest), **result)
+ if not os.access(dest, os.R_OK):
+ os.remove(tmpsrc)
+ module.fail_json(msg="Destination %s is not readable" % (dest), **result)
+ result['checksum_dest'] = module.sha1(dest)
+ else:
+ if not os.path.exists(os.path.dirname(dest)):
+ os.remove(tmpsrc)
+ module.fail_json(msg="Destination %s does not exist" % (os.path.dirname(dest)), **result)
+ if not os.access(os.path.dirname(dest), os.W_OK):
+ os.remove(tmpsrc)
+ module.fail_json(msg="Destination %s is not writable" % (os.path.dirname(dest)), **result)
+
+ if module.check_mode:
+ if os.path.exists(tmpsrc):
+ os.remove(tmpsrc)
+ result['changed'] = ('checksum_dest' not in result or
+ result['checksum_src'] != result['checksum_dest'])
+ module.exit_json(msg=info.get('msg', ''), **result)
+
+ backup_file = None
+ if result['checksum_src'] != result['checksum_dest']:
+ try:
+ if backup:
+ if os.path.exists(dest):
+ backup_file = module.backup_local(dest)
+ module.atomic_move(tmpsrc, dest)
+ except Exception as e:
+ if os.path.exists(tmpsrc):
+ os.remove(tmpsrc)
+ module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, to_native(e)),
+ exception=traceback.format_exc(), **result)
+ result['changed'] = True
+ else:
+ result['changed'] = False
+ if os.path.exists(tmpsrc):
+ os.remove(tmpsrc)
+
+ if checksum != '':
+ destination_checksum = module.digest_from_file(dest, algorithm)
+
+ if checksum != destination_checksum:
+ os.remove(dest)
+ module.fail_json(msg="The checksum for %s did not match %s; it was %s." % (dest, checksum, destination_checksum), **result)
+
+ # allow file attribute changes
+ file_args = module.load_file_common_arguments(module.params, path=dest)
+ result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed'])
+
+ # Backwards compat only. We'll return None on FIPS enabled systems
+ try:
+ result['md5sum'] = module.md5(dest)
+ except ValueError:
+ result['md5sum'] = None
+
+ if backup_file:
+ result['backup_file'] = backup_file
+
+ # Mission complete
+ module.exit_json(msg=info.get('msg', ''), status_code=info.get('status', ''), **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/getent.py b/lib/ansible/modules/getent.py
new file mode 100644
index 00000000..1997aa0d
--- /dev/null
+++ b/lib/ansible/modules/getent.py
@@ -0,0 +1,157 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Brian Coca <brian.coca+dev@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: getent
+short_description: A wrapper to the unix getent utility
+description:
+ - Runs getent against one of it's various databases and returns information into
+ the host's facts, in a getent_<database> prefixed variable.
+version_added: "1.8"
+options:
+ database:
+ description:
+ - The name of a getent database supported by the target system (passwd, group,
+ hosts, etc).
+ required: True
+ key:
+ description:
+ - Key from which to return values from the specified database, otherwise the
+ full contents are returned.
+ default: ''
+ service:
+ description:
+ - Override all databases with the specified service
+ - The underlying system must support the service flag which is not always available.
+ version_added: "2.9"
+ split:
+ description:
+ - "Character used to split the database values into lists/arrays such as ':' or '\t', otherwise it will try to pick one depending on the database."
+ fail_key:
+ description:
+ - If a supplied key is missing this will make the task fail if C(yes).
+ type: bool
+ default: 'yes'
+
+notes:
+ - Not all databases support enumeration, check system documentation for details.
+author:
+- Brian Coca (@bcoca)
+'''
+
+EXAMPLES = '''
+- name: Get root user info
+ getent:
+ database: passwd
+ key: root
+- debug:
+ var: getent_passwd
+
+- name: Get all groups
+ getent:
+ database: group
+ split: ':'
+- debug:
+ var: getent_group
+
+- name: Get all hosts, split by tab
+ getent:
+ database: hosts
+- debug:
+ var: getent_hosts
+
+- name: Get http service info, no error if missing
+ getent:
+ database: services
+ key: http
+ fail_key: False
+- debug:
+ var: getent_services
+
+- name: Get user password hash (requires sudo/root)
+ getent:
+ database: shadow
+ key: www-data
+ split: ':'
+- debug:
+ var: getent_shadow
+
+'''
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ database=dict(type='str', required=True),
+ key=dict(type='str'),
+ service=dict(type='str'),
+ split=dict(type='str'),
+ fail_key=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ colon = ['passwd', 'shadow', 'group', 'gshadow']
+
+ database = module.params['database']
+ key = module.params.get('key')
+ split = module.params.get('split')
+ service = module.params.get('service')
+ fail_key = module.params.get('fail_key')
+
+ getent_bin = module.get_bin_path('getent', True)
+
+ if key is not None:
+ cmd = [getent_bin, database, key]
+ else:
+ cmd = [getent_bin, database]
+
+ if service is not None:
+ cmd.extend(['-s', service])
+
+ if split is None and database in colon:
+ split = ':'
+
+ try:
+ rc, out, err = module.run_command(cmd)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ msg = "Unexpected failure!"
+ dbtree = 'getent_%s' % database
+ results = {dbtree: {}}
+
+ if rc == 0:
+ for line in out.splitlines():
+ record = line.split(split)
+ results[dbtree][record[0]] = record[1:]
+
+ module.exit_json(ansible_facts=results)
+
+ elif rc == 1:
+ msg = "Missing arguments, or database unknown."
+ elif rc == 2:
+ msg = "One or more supplied key could not be found in the database."
+ if not fail_key:
+ results[dbtree][key] = None
+ module.exit_json(ansible_facts=results, msg=msg)
+ elif rc == 3:
+ msg = "Enumeration not supported on this database."
+
+ module.fail_json(msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/git.py b/lib/ansible/modules/git.py
new file mode 100644
index 00000000..b47e9451
--- /dev/null
+++ b/lib/ansible/modules/git.py
@@ -0,0 +1,1277 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: git
+author:
+ - "Ansible Core Team"
+ - "Michael DeHaan"
+version_added: "0.0.1"
+short_description: Deploy software (or files) from git checkouts
+description:
+ - Manage I(git) checkouts of repositories to deploy files or software.
+options:
+ repo:
+ description:
+ - git, SSH, or HTTP(S) protocol address of the git repository.
+ required: true
+ aliases: [ name ]
+ dest:
+ description:
+ - The path of where the repository should be checked out. This
+ parameter is required, unless C(clone) is set to C(no).
+ required: true
+ version:
+ description:
+ - What version of the repository to check out. This can be
+ the literal string C(HEAD), a branch name, a tag name.
+ It can also be a I(SHA-1) hash, in which case C(refspec) needs
+ to be specified if the given revision is not already available.
+ default: "HEAD"
+ accept_hostkey:
+ description:
+ - if C(yes), ensure that "-o StrictHostKeyChecking=no" is
+ present as an ssh option.
+ type: bool
+ default: 'no'
+ version_added: "1.5"
+ ssh_opts:
+ description:
+ - Creates a wrapper script and exports the path as GIT_SSH
+ which git then automatically uses to override ssh arguments.
+ An example value could be "-o StrictHostKeyChecking=no"
+ (although this particular option is better set via
+ C(accept_hostkey)).
+ version_added: "1.5"
+ key_file:
+ description:
+ - Specify an optional private key file path, on the target host, to use for the checkout.
+ version_added: "1.5"
+ reference:
+ description:
+ - Reference repository (see "git clone --reference ...")
+ version_added: "1.4"
+ remote:
+ description:
+ - Name of the remote.
+ default: "origin"
+ refspec:
+ description:
+ - Add an additional refspec to be fetched.
+ If version is set to a I(SHA-1) not reachable from any branch
+ or tag, this option may be necessary to specify the ref containing
+ the I(SHA-1).
+ Uses the same syntax as the 'git fetch' command.
+ An example value could be "refs/meta/config".
+ version_added: "1.9"
+ force:
+ description:
+ - If C(yes), any modified files in the working
+ repository will be discarded. Prior to 0.7, this was always
+ 'yes' and could not be disabled. Prior to 1.9, the default was
+ `yes`
+ type: bool
+ default: 'no'
+ version_added: "0.7"
+ depth:
+ description:
+ - Create a shallow clone with a history truncated to the specified
+ number or revisions. The minimum possible value is C(1), otherwise
+ ignored. Needs I(git>=1.9.1) to work correctly.
+ version_added: "1.2"
+ clone:
+ description:
+ - If C(no), do not clone the repository even if it does not exist locally
+ type: bool
+ default: 'yes'
+ version_added: "1.9"
+ update:
+ description:
+ - If C(no), do not retrieve new revisions from the origin repository
+ - Operations like archive will work on the existing (old) repository and might
+ not respond to changes to the options version or remote.
+ type: bool
+ default: 'yes'
+ version_added: "1.2"
+ executable:
+ description:
+ - Path to git executable to use. If not supplied,
+ the normal mechanism for resolving binary paths will be used.
+ version_added: "1.4"
+ bare:
+ description:
+ - if C(yes), repository will be created as a bare repo, otherwise
+ it will be a standard repo with a workspace.
+ type: bool
+ default: 'no'
+ version_added: "1.4"
+ umask:
+ description:
+ - The umask to set before doing any checkouts, or any other
+ repository maintenance.
+ version_added: "2.2"
+
+ recursive:
+ description:
+ - if C(no), repository will be cloned without the --recursive
+ option, skipping sub-modules.
+ type: bool
+ default: 'yes'
+ version_added: "1.6"
+
+ track_submodules:
+ description:
+ - if C(yes), submodules will track the latest commit on their
+ master branch (or other branch specified in .gitmodules). If
+ C(no), submodules will be kept at the revision specified by the
+ main project. This is equivalent to specifying the --remote flag
+ to git submodule update.
+ type: bool
+ default: 'no'
+ version_added: "1.8"
+
+ verify_commit:
+ description:
+ - if C(yes), when cloning or checking out a C(version) verify the
+ signature of a GPG signed commit. This requires C(git) version>=2.1.0
+ to be installed. The commit MUST be signed and the public key MUST
+ be present in the GPG keyring.
+ type: bool
+ default: 'no'
+ version_added: "2.0"
+
+ archive:
+ description:
+ - Specify archive file path with extension. If specified, creates an
+ archive file of the specified format containing the tree structure
+ for the source tree.
+ Allowed archive formats ["zip", "tar.gz", "tar", "tgz"]
+ - This will clone and perform git archive from local directory as not
+ all git servers support git archive.
+ version_added: "2.4"
+
+ archive_prefix:
+ description:
+ - Specify a prefix to add to each file path in archive. Requires C(archive) to be specified.
+ version_added: "2.10"
+ type: str
+
+ separate_git_dir:
+ description:
+ - The path to place the cloned repository. If specified, Git repository
+ can be separated from working tree.
+ version_added: "2.7"
+
+ gpg_whitelist:
+ description:
+ - A list of trusted GPG fingerprints to compare to the fingerprint of the
+ GPG-signed commit.
+ - Only used when I(verify_commit=yes).
+ type: list
+ default: []
+ version_added: "2.9"
+
+requirements:
+ - git>=1.7.1 (the command line tool)
+
+notes:
+ - "If the task seems to be hanging, first verify remote host is in C(known_hosts).
+ SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt,
+ one solution is to use the option accept_hostkey. Another solution is to
+ add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling
+ the git module, with the following command: ssh-keyscan -H remote_host.com >> /etc/ssh/ssh_known_hosts."
+'''
+
+EXAMPLES = '''
+- name: Git checkout
+ git:
+ repo: 'https://foosball.example.org/path/to/repo.git'
+ dest: /srv/checkout
+ version: release-0.22
+
+- name: Read-write git checkout from github
+ git:
+ repo: git@github.com:mylogin/hello.git
+ dest: /home/mylogin/hello
+
+- name: Just ensuring the repo checkout exists
+ git:
+ repo: 'https://foosball.example.org/path/to/repo.git'
+ dest: /srv/checkout
+ update: no
+
+- name: Just get information about the repository whether or not it has already been cloned locally
+ git:
+ repo: 'https://foosball.example.org/path/to/repo.git'
+ dest: /srv/checkout
+ clone: no
+ update: no
+
+- name: Checkout a github repo and use refspec to fetch all pull requests
+ git:
+ repo: https://github.com/ansible/ansible-examples.git
+ dest: /src/ansible-examples
+ refspec: '+refs/pull/*:refs/heads/*'
+
+- name: Create git archive from repo
+ git:
+ repo: https://github.com/ansible/ansible-examples.git
+ dest: /src/ansible-examples
+ archive: /tmp/ansible-examples.zip
+
+- name: Clone a repo with separate git directory
+ git:
+ repo: https://github.com/ansible/ansible-examples.git
+ dest: /src/ansible-examples
+ separate_git_dir: /src/ansible-examples.git
+'''
+
+RETURN = '''
+after:
+ description: last commit revision of the repository retrieved during the update
+ returned: success
+ type: str
+ sample: 4c020102a9cd6fe908c9a4a326a38f972f63a903
+before:
+ description: commit revision before the repository was updated, "null" for new repository
+ returned: success
+ type: str
+ sample: 67c04ebe40a003bda0efb34eacfb93b0cafdf628
+remote_url_changed:
+ description: Contains True or False whether or not the remote URL was changed.
+ returned: success
+ type: bool
+ sample: True
+warnings:
+ description: List of warnings if requested features were not available due to a too old git version.
+ returned: error
+ type: str
+ sample: Your git version is too old to fully support the depth argument. Falling back to full checkouts.
+git_dir_now:
+ description: Contains the new path of .git directory if it's changed
+ returned: success
+ type: str
+ sample: /path/to/new/git/dir
+git_dir_before:
+ description: Contains the original path of .git directory if it's changed
+ returned: success
+ type: str
+ sample: /path/to/old/git/dir
+'''
+
+import filecmp
+import os
+import re
+import shlex
+import stat
+import sys
+import shutil
+import tempfile
+from distutils.version import LooseVersion
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import b, string_types
+from ansible.module_utils._text import to_native, to_text
+
+
+def relocate_repo(module, result, repo_dir, old_repo_dir, worktree_dir):
+ if os.path.exists(repo_dir):
+ module.fail_json(msg='Separate-git-dir path %s already exists.' % repo_dir)
+ if worktree_dir:
+ dot_git_file_path = os.path.join(worktree_dir, '.git')
+ try:
+ shutil.move(old_repo_dir, repo_dir)
+ with open(dot_git_file_path, 'w') as dot_git_file:
+ dot_git_file.write('gitdir: %s' % repo_dir)
+ result['git_dir_before'] = old_repo_dir
+ result['git_dir_now'] = repo_dir
+ except (IOError, OSError) as err:
+ # if we already moved the .git dir, roll it back
+ if os.path.exists(repo_dir):
+ shutil.move(repo_dir, old_repo_dir)
+ module.fail_json(msg=u'Unable to move git dir. %s' % to_text(err))
+
+
+def head_splitter(headfile, remote, module=None, fail_on_error=False):
+ '''Extract the head reference'''
+ # https://github.com/ansible/ansible-modules-core/pull/907
+
+ res = None
+ if os.path.exists(headfile):
+ rawdata = None
+ try:
+ f = open(headfile, 'r')
+ rawdata = f.readline()
+ f.close()
+ except Exception:
+ if fail_on_error and module:
+ module.fail_json(msg="Unable to read %s" % headfile)
+ if rawdata:
+ try:
+ rawdata = rawdata.replace('refs/remotes/%s' % remote, '', 1)
+ refparts = rawdata.split(' ')
+ newref = refparts[-1]
+ nrefparts = newref.split('/', 2)
+ res = nrefparts[-1].rstrip('\n')
+ except Exception:
+ if fail_on_error and module:
+ module.fail_json(msg="Unable to split head from '%s'" % rawdata)
+ return res
+
+
+def unfrackgitpath(path):
+ if path is None:
+ return None
+
+ # copied from ansible.utils.path
+ return os.path.normpath(os.path.realpath(os.path.expanduser(os.path.expandvars(path))))
+
+
+def get_submodule_update_params(module, git_path, cwd):
+ # or: git submodule [--quiet] update [--init] [-N|--no-fetch]
+ # [-f|--force] [--rebase] [--reference <repository>] [--merge]
+ # [--recursive] [--] [<path>...]
+
+ params = []
+
+ # run a bad submodule command to get valid params
+ cmd = "%s submodule update --help" % (git_path)
+ rc, stdout, stderr = module.run_command(cmd, cwd=cwd)
+ lines = stderr.split('\n')
+ update_line = None
+ for line in lines:
+ if 'git submodule [--quiet] update ' in line:
+ update_line = line
+ if update_line:
+ update_line = update_line.replace('[', '')
+ update_line = update_line.replace(']', '')
+ update_line = update_line.replace('|', ' ')
+ parts = shlex.split(update_line)
+ for part in parts:
+ if part.startswith('--'):
+ part = part.replace('--', '')
+ params.append(part)
+
+ return params
+
+
+def write_ssh_wrapper(module_tmpdir):
+ try:
+ # make sure we have full permission to the module_dir, which
+ # may not be the case if we're sudo'ing to a non-root user
+ if os.access(module_tmpdir, os.W_OK | os.R_OK | os.X_OK):
+ fd, wrapper_path = tempfile.mkstemp(prefix=module_tmpdir + '/')
+ else:
+ raise OSError
+ except (IOError, OSError):
+ fd, wrapper_path = tempfile.mkstemp()
+ fh = os.fdopen(fd, 'w+b')
+ template = b("""#!/bin/sh
+if [ -z "$GIT_SSH_OPTS" ]; then
+ BASEOPTS=""
+else
+ BASEOPTS=$GIT_SSH_OPTS
+fi
+
+# Let ssh fail rather than prompt
+BASEOPTS="$BASEOPTS -o BatchMode=yes"
+
+if [ -z "$GIT_KEY" ]; then
+ ssh $BASEOPTS "$@"
+else
+ ssh -i "$GIT_KEY" -o IdentitiesOnly=yes $BASEOPTS "$@"
+fi
+""")
+ fh.write(template)
+ fh.close()
+ st = os.stat(wrapper_path)
+ os.chmod(wrapper_path, st.st_mode | stat.S_IEXEC)
+ return wrapper_path
+
+
+def set_git_ssh(ssh_wrapper, key_file, ssh_opts):
+
+ if os.environ.get("GIT_SSH"):
+ del os.environ["GIT_SSH"]
+ os.environ["GIT_SSH"] = ssh_wrapper
+
+ if os.environ.get("GIT_KEY"):
+ del os.environ["GIT_KEY"]
+
+ if key_file:
+ os.environ["GIT_KEY"] = key_file
+
+ if os.environ.get("GIT_SSH_OPTS"):
+ del os.environ["GIT_SSH_OPTS"]
+
+ if ssh_opts:
+ os.environ["GIT_SSH_OPTS"] = ssh_opts
+
+
+def get_version(module, git_path, dest, ref="HEAD"):
+ ''' samples the version of the git repo '''
+
+ cmd = "%s rev-parse %s" % (git_path, ref)
+ rc, stdout, stderr = module.run_command(cmd, cwd=dest)
+ sha = to_native(stdout).rstrip('\n')
+ return sha
+
+
+def get_submodule_versions(git_path, module, dest, version='HEAD'):
+ cmd = [git_path, 'submodule', 'foreach', git_path, 'rev-parse', version]
+ (rc, out, err) = module.run_command(cmd, cwd=dest)
+ if rc != 0:
+ module.fail_json(
+ msg='Unable to determine hashes of submodules',
+ stdout=out,
+ stderr=err,
+ rc=rc)
+ submodules = {}
+ subm_name = None
+ for line in out.splitlines():
+ if line.startswith("Entering '"):
+ subm_name = line[10:-1]
+ elif len(line.strip()) == 40:
+ if subm_name is None:
+ module.fail_json()
+ submodules[subm_name] = line.strip()
+ subm_name = None
+ else:
+ module.fail_json(msg='Unable to parse submodule hash line: %s' % line.strip())
+ if subm_name is not None:
+ module.fail_json(msg='Unable to find hash for submodule: %s' % subm_name)
+
+ return submodules
+
+
+def clone(git_path, module, repo, dest, remote, depth, version, bare,
+ reference, refspec, verify_commit, separate_git_dir, result, gpg_whitelist):
+ ''' makes a new git repo if it does not already exist '''
+ dest_dirname = os.path.dirname(dest)
+ try:
+ os.makedirs(dest_dirname)
+ except Exception:
+ pass
+ cmd = [git_path, 'clone']
+
+ if bare:
+ cmd.append('--bare')
+ else:
+ cmd.extend(['--origin', remote])
+ if depth:
+ if version == 'HEAD' or refspec:
+ cmd.extend(['--depth', str(depth)])
+ elif is_remote_branch(git_path, module, dest, repo, version) \
+ or is_remote_tag(git_path, module, dest, repo, version):
+ cmd.extend(['--depth', str(depth)])
+ cmd.extend(['--branch', version])
+ else:
+ # only use depth if the remote object is branch or tag (i.e. fetchable)
+ module.warn("Ignoring depth argument. "
+ "Shallow clones are only available for "
+ "HEAD, branches, tags or in combination with refspec.")
+ if reference:
+ cmd.extend(['--reference', str(reference)])
+ needs_separate_git_dir_fallback = False
+
+ if separate_git_dir:
+ git_version_used = git_version(git_path, module)
+ if git_version_used is None:
+ module.fail_json(msg='Can not find git executable at %s' % git_path)
+ if git_version_used < LooseVersion('1.7.5'):
+ # git before 1.7.5 doesn't have separate-git-dir argument, do fallback
+ needs_separate_git_dir_fallback = True
+ else:
+ cmd.append('--separate-git-dir=%s' % separate_git_dir)
+
+ cmd.extend([repo, dest])
+ module.run_command(cmd, check_rc=True, cwd=dest_dirname)
+ if needs_separate_git_dir_fallback:
+ relocate_repo(module, result, separate_git_dir, os.path.join(dest, ".git"), dest)
+
+ if bare and remote != 'origin':
+ module.run_command([git_path, 'remote', 'add', remote, repo], check_rc=True, cwd=dest)
+
+ if refspec:
+ cmd = [git_path, 'fetch']
+ if depth:
+ cmd.extend(['--depth', str(depth)])
+ cmd.extend([remote, refspec])
+ module.run_command(cmd, check_rc=True, cwd=dest)
+
+ if verify_commit:
+ verify_commit_sign(git_path, module, dest, version, gpg_whitelist)
+
+
+def has_local_mods(module, git_path, dest, bare):
+ if bare:
+ return False
+
+ cmd = "%s status --porcelain" % (git_path)
+ rc, stdout, stderr = module.run_command(cmd, cwd=dest)
+ lines = stdout.splitlines()
+ lines = list(filter(lambda c: not re.search('^\\?\\?.*$', c), lines))
+
+ return len(lines) > 0
+
+
+def reset(git_path, module, dest):
+ '''
+ Resets the index and working tree to HEAD.
+ Discards any changes to tracked files in working
+ tree since that commit.
+ '''
+ cmd = "%s reset --hard HEAD" % (git_path,)
+ return module.run_command(cmd, check_rc=True, cwd=dest)
+
+
+def get_diff(module, git_path, dest, repo, remote, depth, bare, before, after):
+ ''' Return the difference between 2 versions '''
+ if before is None:
+ return {'prepared': '>> Newly checked out %s' % after}
+ elif before != after:
+ # Ensure we have the object we are referring to during git diff !
+ git_version_used = git_version(git_path, module)
+ fetch(git_path, module, repo, dest, after, remote, depth, bare, '', git_version_used)
+ cmd = '%s diff %s %s' % (git_path, before, after)
+ (rc, out, err) = module.run_command(cmd, cwd=dest)
+ if rc == 0 and out:
+ return {'prepared': out}
+ elif rc == 0:
+ return {'prepared': '>> No visual differences between %s and %s' % (before, after)}
+ elif err:
+ return {'prepared': '>> Failed to get proper diff between %s and %s:\n>> %s' % (before, after, err)}
+ else:
+ return {'prepared': '>> Failed to get proper diff between %s and %s' % (before, after)}
+ return {}
+
+
+def get_remote_head(git_path, module, dest, version, remote, bare):
+ cloning = False
+ cwd = None
+ tag = False
+ if remote == module.params['repo']:
+ cloning = True
+ elif remote == 'file://' + os.path.expanduser(module.params['repo']):
+ cloning = True
+ else:
+ cwd = dest
+ if version == 'HEAD':
+ if cloning:
+ # cloning the repo, just get the remote's HEAD version
+ cmd = '%s ls-remote %s -h HEAD' % (git_path, remote)
+ else:
+ head_branch = get_head_branch(git_path, module, dest, remote, bare)
+ cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, head_branch)
+ elif is_remote_branch(git_path, module, dest, remote, version):
+ cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version)
+ elif is_remote_tag(git_path, module, dest, remote, version):
+ tag = True
+ cmd = '%s ls-remote %s -t refs/tags/%s*' % (git_path, remote, version)
+ else:
+ # appears to be a sha1. return as-is since it appears
+ # cannot check for a specific sha1 on remote
+ return version
+ (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=cwd)
+ if len(out) < 1:
+ module.fail_json(msg="Could not determine remote revision for %s" % version, stdout=out, stderr=err, rc=rc)
+
+ out = to_native(out)
+
+ if tag:
+ # Find the dereferenced tag if this is an annotated tag.
+ for tag in out.split('\n'):
+ if tag.endswith(version + '^{}'):
+ out = tag
+ break
+ elif tag.endswith(version):
+ out = tag
+
+ rev = out.split()[0]
+ return rev
+
+
+def is_remote_tag(git_path, module, dest, remote, version):
+ cmd = '%s ls-remote %s -t refs/tags/%s' % (git_path, remote, version)
+ (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
+ if to_native(version, errors='surrogate_or_strict') in out:
+ return True
+ else:
+ return False
+
+
+def get_branches(git_path, module, dest):
+ branches = []
+ cmd = '%s branch --no-color -a' % (git_path,)
+ (rc, out, err) = module.run_command(cmd, cwd=dest)
+ if rc != 0:
+ module.fail_json(msg="Could not determine branch data - received %s" % out, stdout=out, stderr=err)
+ for line in out.split('\n'):
+ if line.strip():
+ branches.append(line.strip())
+ return branches
+
+
+def get_annotated_tags(git_path, module, dest):
+ tags = []
+ cmd = [git_path, 'for-each-ref', 'refs/tags/', '--format', '%(objecttype):%(refname:short)']
+ (rc, out, err) = module.run_command(cmd, cwd=dest)
+ if rc != 0:
+ module.fail_json(msg="Could not determine tag data - received %s" % out, stdout=out, stderr=err)
+ for line in to_native(out).split('\n'):
+ if line.strip():
+ tagtype, tagname = line.strip().split(':')
+ if tagtype == 'tag':
+ tags.append(tagname)
+ return tags
+
+
+def is_remote_branch(git_path, module, dest, remote, version):
+ cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version)
+ (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
+ if to_native(version, errors='surrogate_or_strict') in out:
+ return True
+ else:
+ return False
+
+
+def is_local_branch(git_path, module, dest, branch):
+ branches = get_branches(git_path, module, dest)
+ lbranch = '%s' % branch
+ if lbranch in branches:
+ return True
+ elif '* %s' % branch in branches:
+ return True
+ else:
+ return False
+
+
+def is_not_a_branch(git_path, module, dest):
+ branches = get_branches(git_path, module, dest)
+ for branch in branches:
+ if branch.startswith('* ') and ('no branch' in branch or 'detached from' in branch or 'detached at' in branch):
+ return True
+ return False
+
+
+def get_repo_path(dest, bare):
+ if bare:
+ repo_path = dest
+ else:
+ repo_path = os.path.join(dest, '.git')
+ # Check if the .git is a file. If it is a file, it means that the repository is in external directory respective to the working copy (e.g. we are in a
+ # submodule structure).
+ if os.path.isfile(repo_path):
+ with open(repo_path, 'r') as gitfile:
+ data = gitfile.read()
+ ref_prefix, gitdir = data.rstrip().split('gitdir: ', 1)
+ if ref_prefix:
+ raise ValueError('.git file has invalid git dir reference format')
+
+ # There is a possibility the .git file to have an absolute path.
+ if os.path.isabs(gitdir):
+ repo_path = gitdir
+ else:
+ repo_path = os.path.join(repo_path.split('.git')[0], gitdir)
+ if not os.path.isdir(repo_path):
+ raise ValueError('%s is not a directory' % repo_path)
+ return repo_path
+
+
+def get_head_branch(git_path, module, dest, remote, bare=False):
+ '''
+ Determine what branch HEAD is associated with. This is partly
+ taken from lib/ansible/utils/__init__.py. It finds the correct
+ path to .git/HEAD and reads from that file the branch that HEAD is
+ associated with. In the case of a detached HEAD, this will look
+ up the branch in .git/refs/remotes/<remote>/HEAD.
+ '''
+ try:
+ repo_path = get_repo_path(dest, bare)
+ except (IOError, ValueError) as err:
+ # No repo path found
+ """``.git`` file does not have a valid format for detached Git dir."""
+ module.fail_json(
+ msg='Current repo does not have a valid reference to a '
+ 'separate Git dir or it refers to the invalid path',
+ details=to_text(err),
+ )
+ # Read .git/HEAD for the name of the branch.
+ # If we're in a detached HEAD state, look up the branch associated with
+ # the remote HEAD in .git/refs/remotes/<remote>/HEAD
+ headfile = os.path.join(repo_path, "HEAD")
+ if is_not_a_branch(git_path, module, dest):
+ headfile = os.path.join(repo_path, 'refs', 'remotes', remote, 'HEAD')
+ branch = head_splitter(headfile, remote, module=module, fail_on_error=True)
+ return branch
+
+
+def get_remote_url(git_path, module, dest, remote):
+ '''Return URL of remote source for repo.'''
+ command = [git_path, 'ls-remote', '--get-url', remote]
+ (rc, out, err) = module.run_command(command, cwd=dest)
+ if rc != 0:
+ # There was an issue getting remote URL, most likely
+ # command is not available in this version of Git.
+ return None
+ return to_native(out).rstrip('\n')
+
+
+def set_remote_url(git_path, module, repo, dest, remote):
+ ''' updates repo from remote sources '''
+ # Return if remote URL isn't changing.
+ remote_url = get_remote_url(git_path, module, dest, remote)
+ if remote_url == repo or unfrackgitpath(remote_url) == unfrackgitpath(repo):
+ return False
+
+ command = [git_path, 'remote', 'set-url', remote, repo]
+ (rc, out, err) = module.run_command(command, cwd=dest)
+ if rc != 0:
+ label = "set a new url %s for %s" % (repo, remote)
+ module.fail_json(msg="Failed to %s: %s %s" % (label, out, err))
+
+ # Return False if remote_url is None to maintain previous behavior
+ # for Git versions prior to 1.7.5 that lack required functionality.
+ return remote_url is not None
+
+
+def fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec, git_version_used, force=False):
+ ''' updates repo from remote sources '''
+ set_remote_url(git_path, module, repo, dest, remote)
+ commands = []
+
+ fetch_str = 'download remote objects and refs'
+ fetch_cmd = [git_path, 'fetch']
+
+ refspecs = []
+ if depth:
+ # try to find the minimal set of refs we need to fetch to get a
+ # successful checkout
+ currenthead = get_head_branch(git_path, module, dest, remote)
+ if refspec:
+ refspecs.append(refspec)
+ elif version == 'HEAD':
+ refspecs.append(currenthead)
+ elif is_remote_branch(git_path, module, dest, repo, version):
+ if currenthead != version:
+ # this workaround is only needed for older git versions
+ # 1.8.3 is broken, 1.9.x works
+ # ensure that remote branch is available as both local and remote ref
+ refspecs.append('+refs/heads/%s:refs/heads/%s' % (version, version))
+ refspecs.append('+refs/heads/%s:refs/remotes/%s/%s' % (version, remote, version))
+ elif is_remote_tag(git_path, module, dest, repo, version):
+ refspecs.append('+refs/tags/' + version + ':refs/tags/' + version)
+ if refspecs:
+ # if refspecs is empty, i.e. version is neither heads nor tags
+ # assume it is a version hash
+ # fall back to a full clone, otherwise we might not be able to checkout
+ # version
+ fetch_cmd.extend(['--depth', str(depth)])
+
+ if not depth or not refspecs:
+ # don't try to be minimalistic but do a full clone
+ # also do this if depth is given, but version is something that can't be fetched directly
+ if bare:
+ refspecs = ['+refs/heads/*:refs/heads/*', '+refs/tags/*:refs/tags/*']
+ else:
+ # ensure all tags are fetched
+ if git_version_used >= LooseVersion('1.9'):
+ fetch_cmd.append('--tags')
+ else:
+ # old git versions have a bug in --tags that prevents updating existing tags
+ commands.append((fetch_str, fetch_cmd + [remote]))
+ refspecs = ['+refs/tags/*:refs/tags/*']
+ if refspec:
+ refspecs.append(refspec)
+
+ if force:
+ fetch_cmd.append('--force')
+
+ fetch_cmd.extend([remote])
+
+ commands.append((fetch_str, fetch_cmd + refspecs))
+
+ for (label, command) in commands:
+ (rc, out, err) = module.run_command(command, cwd=dest)
+ if rc != 0:
+ module.fail_json(msg="Failed to %s: %s %s" % (label, out, err), cmd=command)
+
+
+def submodules_fetch(git_path, module, remote, track_submodules, dest):
+ changed = False
+
+ if not os.path.exists(os.path.join(dest, '.gitmodules')):
+ # no submodules
+ return changed
+
+ gitmodules_file = open(os.path.join(dest, '.gitmodules'), 'r')
+ for line in gitmodules_file:
+ # Check for new submodules
+ if not changed and line.strip().startswith('path'):
+ path = line.split('=', 1)[1].strip()
+ # Check that dest/path/.git exists
+ if not os.path.exists(os.path.join(dest, path, '.git')):
+ changed = True
+
+ # Check for updates to existing modules
+ if not changed:
+ # Fetch updates
+ begin = get_submodule_versions(git_path, module, dest)
+ cmd = [git_path, 'submodule', 'foreach', git_path, 'fetch']
+ (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
+ if rc != 0:
+ module.fail_json(msg="Failed to fetch submodules: %s" % out + err)
+
+ if track_submodules:
+ # Compare against submodule HEAD
+ # FIXME: determine this from .gitmodules
+ version = 'master'
+ after = get_submodule_versions(git_path, module, dest, '%s/%s' % (remote, version))
+ if begin != after:
+ changed = True
+ else:
+ # Compare against the superproject's expectation
+ cmd = [git_path, 'submodule', 'status']
+ (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
+ if rc != 0:
+ module.fail_json(msg='Failed to retrieve submodule status: %s' % out + err)
+ for line in out.splitlines():
+ if line[0] != ' ':
+ changed = True
+ break
+ return changed
+
+
+def submodule_update(git_path, module, dest, track_submodules, force=False):
+ ''' init and update any submodules '''
+
+ # get the valid submodule params
+ params = get_submodule_update_params(module, git_path, dest)
+
+ # skip submodule commands if .gitmodules is not present
+ if not os.path.exists(os.path.join(dest, '.gitmodules')):
+ return (0, '', '')
+ cmd = [git_path, 'submodule', 'sync']
+ (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
+ if 'remote' in params and track_submodules:
+ cmd = [git_path, 'submodule', 'update', '--init', '--recursive', '--remote']
+ else:
+ cmd = [git_path, 'submodule', 'update', '--init', '--recursive']
+ if force:
+ cmd.append('--force')
+ (rc, out, err) = module.run_command(cmd, cwd=dest)
+ if rc != 0:
+ module.fail_json(msg="Failed to init/update submodules: %s" % out + err)
+ return (rc, out, err)
+
+
+def set_remote_branch(git_path, module, dest, remote, version, depth):
+ """set refs for the remote branch version
+
+ This assumes the branch does not yet exist locally and is therefore also not checked out.
+ Can't use git remote set-branches, as it is not available in git 1.7.1 (centos6)
+ """
+
+ branchref = "+refs/heads/%s:refs/heads/%s" % (version, version)
+ branchref += ' +refs/heads/%s:refs/remotes/%s/%s' % (version, remote, version)
+ cmd = "%s fetch --depth=%s %s %s" % (git_path, depth, remote, branchref)
+ (rc, out, err) = module.run_command(cmd, cwd=dest)
+ if rc != 0:
+ module.fail_json(msg="Failed to fetch branch from remote: %s" % version, stdout=out, stderr=err, rc=rc)
+
+
+def switch_version(git_path, module, dest, remote, version, verify_commit, depth, gpg_whitelist):
+ cmd = ''
+ if version == 'HEAD':
+ branch = get_head_branch(git_path, module, dest, remote)
+ (rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, branch), cwd=dest)
+ if rc != 0:
+ module.fail_json(msg="Failed to checkout branch %s" % branch,
+ stdout=out, stderr=err, rc=rc)
+ cmd = "%s reset --hard %s/%s --" % (git_path, remote, branch)
+ else:
+ # FIXME check for local_branch first, should have been fetched already
+ if is_remote_branch(git_path, module, dest, remote, version):
+ if depth and not is_local_branch(git_path, module, dest, version):
+ # git clone --depth implies --single-branch, which makes
+ # the checkout fail if the version changes
+ # fetch the remote branch, to be able to check it out next
+ set_remote_branch(git_path, module, dest, remote, version, depth)
+ if not is_local_branch(git_path, module, dest, version):
+ cmd = "%s checkout --track -b %s %s/%s" % (git_path, version, remote, version)
+ else:
+ (rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, version), cwd=dest)
+ if rc != 0:
+ module.fail_json(msg="Failed to checkout branch %s" % version, stdout=out, stderr=err, rc=rc)
+ cmd = "%s reset --hard %s/%s" % (git_path, remote, version)
+ else:
+ cmd = "%s checkout --force %s" % (git_path, version)
+ (rc, out1, err1) = module.run_command(cmd, cwd=dest)
+ if rc != 0:
+ if version != 'HEAD':
+ module.fail_json(msg="Failed to checkout %s" % (version),
+ stdout=out1, stderr=err1, rc=rc, cmd=cmd)
+ else:
+ module.fail_json(msg="Failed to checkout branch %s" % (branch),
+ stdout=out1, stderr=err1, rc=rc, cmd=cmd)
+
+ if verify_commit:
+ verify_commit_sign(git_path, module, dest, version, gpg_whitelist)
+
+ return (rc, out1, err1)
+
+
+def verify_commit_sign(git_path, module, dest, version, gpg_whitelist):
+ if version in get_annotated_tags(git_path, module, dest):
+ git_sub = "verify-tag"
+ else:
+ git_sub = "verify-commit"
+ cmd = "%s %s %s --raw" % (git_path, git_sub, version)
+ (rc, out, err) = module.run_command(cmd, cwd=dest)
+ if rc != 0:
+ module.fail_json(msg='Failed to verify GPG signature of commit/tag "%s"' % version, stdout=out, stderr=err, rc=rc)
+ if gpg_whitelist:
+ fingerprint = get_gpg_fingerprint(err)
+ if fingerprint not in gpg_whitelist:
+ module.fail_json(msg='The gpg_whitelist does not include the public key "%s" for this commit' % fingerprint, stdout=out, stderr=err, rc=rc)
+ return (rc, out, err)
+
+
+def get_gpg_fingerprint(output):
+ """Return a fingerprint of the primary key.
+
+ Ref:
+ https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;hb=HEAD#l482
+ """
+ for line in output.splitlines():
+ data = line.split()
+ if data[1] != 'VALIDSIG':
+ continue
+
+ # if signed with a subkey, this contains the primary key fingerprint
+ data_id = 11 if len(data) == 11 else 2
+ return data[data_id]
+
+
+def git_version(git_path, module):
+ """return the installed version of git"""
+ cmd = "%s --version" % git_path
+ (rc, out, err) = module.run_command(cmd)
+ if rc != 0:
+ # one could fail_json here, but the version info is not that important,
+ # so let's try to fail only on actual git commands
+ return None
+ rematch = re.search('git version (.*)$', to_native(out))
+ if not rematch:
+ return None
+ return LooseVersion(rematch.groups()[0])
+
+
+def git_archive(git_path, module, dest, archive, archive_fmt, archive_prefix, version):
+ """ Create git archive in given source directory """
+ cmd = [git_path, 'archive', '--format', archive_fmt, '--output', archive, version]
+ if archive_prefix is not None:
+ cmd.insert(-1, '--prefix')
+ cmd.insert(-1, archive_prefix)
+ (rc, out, err) = module.run_command(cmd, cwd=dest)
+ if rc != 0:
+ module.fail_json(msg="Failed to perform archive operation",
+ details="Git archive command failed to create "
+ "archive %s using %s directory."
+ "Error: %s" % (archive, dest, err))
+ return rc, out, err
+
+
+def create_archive(git_path, module, dest, archive, archive_prefix, version, repo, result):
+ """ Helper function for creating archive using git_archive """
+ all_archive_fmt = {'.zip': 'zip', '.gz': 'tar.gz', '.tar': 'tar',
+ '.tgz': 'tgz'}
+ _, archive_ext = os.path.splitext(archive)
+ archive_fmt = all_archive_fmt.get(archive_ext, None)
+ if archive_fmt is None:
+ module.fail_json(msg="Unable to get file extension from "
+ "archive file name : %s" % archive,
+ details="Please specify archive as filename with "
+ "extension. File extension can be one "
+ "of ['tar', 'tar.gz', 'zip', 'tgz']")
+
+ repo_name = repo.split("/")[-1].replace(".git", "")
+
+ if os.path.exists(archive):
+ # If git archive file exists, then compare it with new git archive file.
+ # if match, do nothing
+ # if does not match, then replace existing with temp archive file.
+ tempdir = tempfile.mkdtemp()
+ new_archive_dest = os.path.join(tempdir, repo_name)
+ new_archive = new_archive_dest + '.' + archive_fmt
+ git_archive(git_path, module, dest, new_archive, archive_fmt, archive_prefix, version)
+
+ # filecmp is supposed to be efficient than md5sum checksum
+ if filecmp.cmp(new_archive, archive):
+ result.update(changed=False)
+ # Cleanup before exiting
+ try:
+ shutil.rmtree(tempdir)
+ except OSError:
+ pass
+ else:
+ try:
+ shutil.move(new_archive, archive)
+ shutil.rmtree(tempdir)
+ result.update(changed=True)
+ except OSError as e:
+ module.fail_json(msg="Failed to move %s to %s" %
+ (new_archive, archive),
+ details=u"Error occurred while moving : %s"
+ % to_text(e))
+ else:
+ # Perform archive from local directory
+ git_archive(git_path, module, dest, archive, archive_fmt, archive_prefix, version)
+ result.update(changed=True)
+
+
+# ===========================================
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dest=dict(type='path'),
+ repo=dict(required=True, aliases=['name']),
+ version=dict(default='HEAD'),
+ remote=dict(default='origin'),
+ refspec=dict(default=None),
+ reference=dict(default=None),
+ force=dict(default='no', type='bool'),
+ depth=dict(default=None, type='int'),
+ clone=dict(default='yes', type='bool'),
+ update=dict(default='yes', type='bool'),
+ verify_commit=dict(default='no', type='bool'),
+ gpg_whitelist=dict(default=[], type='list'),
+ accept_hostkey=dict(default='no', type='bool'),
+ key_file=dict(default=None, type='path', required=False),
+ ssh_opts=dict(default=None, required=False),
+ executable=dict(default=None, type='path'),
+ bare=dict(default='no', type='bool'),
+ recursive=dict(default='yes', type='bool'),
+ track_submodules=dict(default='no', type='bool'),
+ umask=dict(default=None, type='raw'),
+ archive=dict(type='path'),
+ archive_prefix=dict(),
+ separate_git_dir=dict(type='path'),
+ ),
+ mutually_exclusive=[('separate_git_dir', 'bare')],
+ required_by={'archive_prefix': ['archive']},
+ supports_check_mode=True
+ )
+
+ dest = module.params['dest']
+ repo = module.params['repo']
+ version = module.params['version']
+ remote = module.params['remote']
+ refspec = module.params['refspec']
+ force = module.params['force']
+ depth = module.params['depth']
+ update = module.params['update']
+ allow_clone = module.params['clone']
+ bare = module.params['bare']
+ verify_commit = module.params['verify_commit']
+ gpg_whitelist = module.params['gpg_whitelist']
+ reference = module.params['reference']
+ git_path = module.params['executable'] or module.get_bin_path('git', True)
+ key_file = module.params['key_file']
+ ssh_opts = module.params['ssh_opts']
+ umask = module.params['umask']
+ archive = module.params['archive']
+ archive_prefix = module.params['archive_prefix']
+ separate_git_dir = module.params['separate_git_dir']
+
+ result = dict(changed=False, warnings=list())
+
+ if module.params['accept_hostkey']:
+ if ssh_opts is not None:
+ if "-o StrictHostKeyChecking=no" not in ssh_opts:
+ ssh_opts += " -o StrictHostKeyChecking=no"
+ else:
+ ssh_opts = "-o StrictHostKeyChecking=no"
+
+ # evaluate and set the umask before doing anything else
+ if umask is not None:
+ if not isinstance(umask, string_types):
+ module.fail_json(msg="umask must be defined as a quoted octal integer")
+ try:
+ umask = int(umask, 8)
+ except Exception:
+ module.fail_json(msg="umask must be an octal integer",
+ details=str(sys.exc_info()[1]))
+ os.umask(umask)
+
+ # Certain features such as depth require a file:/// protocol for path based urls
+ # so force a protocol here ...
+ if os.path.expanduser(repo).startswith('/'):
+ repo = 'file://' + os.path.expanduser(repo)
+
+ # We screenscrape a huge amount of git commands so use C locale anytime we
+ # call run_command()
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ if separate_git_dir:
+ separate_git_dir = os.path.realpath(separate_git_dir)
+
+ gitconfig = None
+ if not dest and allow_clone:
+ module.fail_json(msg="the destination directory must be specified unless clone=no")
+ elif dest:
+ dest = os.path.abspath(dest)
+ try:
+ repo_path = get_repo_path(dest, bare)
+ if separate_git_dir and os.path.exists(repo_path) and separate_git_dir != repo_path:
+ result['changed'] = True
+ if not module.check_mode:
+ relocate_repo(module, result, separate_git_dir, repo_path, dest)
+ repo_path = separate_git_dir
+ except (IOError, ValueError) as err:
+ # No repo path found
+ """``.git`` file does not have a valid format for detached Git dir."""
+ module.fail_json(
+ msg='Current repo does not have a valid reference to a '
+ 'separate Git dir or it refers to the invalid path',
+ details=to_text(err),
+ )
+ gitconfig = os.path.join(repo_path, 'config')
+
+ # create a wrapper script and export
+ # GIT_SSH=<path> as an environment variable
+ # for git to use the wrapper script
+ ssh_wrapper = write_ssh_wrapper(module.tmpdir)
+ set_git_ssh(ssh_wrapper, key_file, ssh_opts)
+ module.add_cleanup_file(path=ssh_wrapper)
+
+ git_version_used = git_version(git_path, module)
+
+ if depth is not None and git_version_used < LooseVersion('1.9.1'):
+ result['warnings'].append("Your git version is too old to fully support the depth argument. Falling back to full checkouts.")
+ depth = None
+
+ recursive = module.params['recursive']
+ track_submodules = module.params['track_submodules']
+
+ result.update(before=None)
+
+ local_mods = False
+ if (dest and not os.path.exists(gitconfig)) or (not dest and not allow_clone):
+ # if there is no git configuration, do a clone operation unless:
+ # * the user requested no clone (they just want info)
+ # * we're doing a check mode test
+ # In those cases we do an ls-remote
+ if module.check_mode or not allow_clone:
+ remote_head = get_remote_head(git_path, module, dest, version, repo, bare)
+ result.update(changed=True, after=remote_head)
+ if module._diff:
+ diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
+ if diff:
+ result['diff'] = diff
+ module.exit_json(**result)
+ # there's no git config, so clone
+ clone(git_path, module, repo, dest, remote, depth, version, bare, reference, refspec, verify_commit, separate_git_dir, result, gpg_whitelist)
+ elif not update:
+ # Just return having found a repo already in the dest path
+ # this does no checking that the repo is the actual repo
+ # requested.
+ result['before'] = get_version(module, git_path, dest)
+ result.update(after=result['before'])
+ if archive:
+ # Git archive is not supported by all git servers, so
+ # we will first clone and perform git archive from local directory
+ if module.check_mode:
+ result.update(changed=True)
+ module.exit_json(**result)
+
+ create_archive(git_path, module, dest, archive, archive_prefix, version, repo, result)
+
+ module.exit_json(**result)
+ else:
+ # else do a pull
+ local_mods = has_local_mods(module, git_path, dest, bare)
+ result['before'] = get_version(module, git_path, dest)
+ if local_mods:
+ # failure should happen regardless of check mode
+ if not force:
+ module.fail_json(msg="Local modifications exist in repository (force=no).", **result)
+ # if force and in non-check mode, do a reset
+ if not module.check_mode:
+ reset(git_path, module, dest)
+ result.update(changed=True, msg='Local modifications exist.')
+
+ # exit if already at desired sha version
+ if module.check_mode:
+ remote_url = get_remote_url(git_path, module, dest, remote)
+ remote_url_changed = remote_url and remote_url != repo and unfrackgitpath(remote_url) != unfrackgitpath(repo)
+ else:
+ remote_url_changed = set_remote_url(git_path, module, repo, dest, remote)
+ result.update(remote_url_changed=remote_url_changed)
+
+ if module.check_mode:
+ remote_head = get_remote_head(git_path, module, dest, version, remote, bare)
+ result.update(changed=(result['before'] != remote_head or remote_url_changed), after=remote_head)
+ # FIXME: This diff should fail since the new remote_head is not fetched yet?!
+ if module._diff:
+ diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
+ if diff:
+ result['diff'] = diff
+ module.exit_json(**result)
+ else:
+ fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec, git_version_used, force=force)
+
+ result['after'] = get_version(module, git_path, dest)
+
+ # switch to version specified regardless of whether
+ # we got new revisions from the repository
+ if not bare:
+ switch_version(git_path, module, dest, remote, version, verify_commit, depth, gpg_whitelist)
+
+ # Deal with submodules
+ submodules_updated = False
+ if recursive and not bare:
+ submodules_updated = submodules_fetch(git_path, module, remote, track_submodules, dest)
+ if submodules_updated:
+ result.update(submodules_changed=submodules_updated)
+
+ if module.check_mode:
+ result.update(changed=True, after=remote_head)
+ module.exit_json(**result)
+
+ # Switch to version specified
+ submodule_update(git_path, module, dest, track_submodules, force=force)
+
+ # determine if we changed anything
+ result['after'] = get_version(module, git_path, dest)
+
+ if result['before'] != result['after'] or local_mods or submodules_updated or remote_url_changed:
+ result.update(changed=True)
+ if module._diff:
+ diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
+ if diff:
+ result['diff'] = diff
+
+ if archive:
+ # Git archive is not supported by all git servers, so
+ # we will first clone and perform git archive from local directory
+ if module.check_mode:
+ result.update(changed=True)
+ module.exit_json(**result)
+
+ create_archive(git_path, module, dest, archive, archive_prefix, version, repo, result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/group.py b/lib/ansible/modules/group.py
new file mode 100644
index 00000000..5c25d86f
--- /dev/null
+++ b/lib/ansible/modules/group.py
@@ -0,0 +1,656 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Stephen Fromm <sfromm@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: group
+version_added: "0.0.2"
+short_description: Add or remove groups
+requirements:
+- groupadd
+- groupdel
+- groupmod
+description:
+ - Manage presence of groups on a host.
+ - For Windows targets, use the M(ansible.windows.win_group) module instead.
+options:
+ name:
+ description:
+ - Name of the group to manage.
+ type: str
+ required: true
+ gid:
+ description:
+ - Optional I(GID) to set for the group.
+ type: int
+ state:
+ description:
+ - Whether the group should be present or not on the remote host.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ system:
+ description:
+ - If I(yes), indicates that the group created is a system group.
+ type: bool
+ default: no
+ local:
+ description:
+ - Forces the use of "local" command alternatives on platforms that implement it.
+ - This is useful in environments that use centralized authentication when you want to manipulate the local groups.
+ (e.g. it uses C(lgroupadd) instead of C(groupadd)).
+ - This requires that these commands exist on the targeted host, otherwise it will be a fatal error.
+ type: bool
+ default: no
+ version_added: "2.6"
+ non_unique:
+ description:
+ - This option allows to change the group ID to a non-unique value. Requires C(gid).
+ - Not supported on macOS or BusyBox distributions.
+ type: bool
+ default: no
+ version_added: "2.8"
+seealso:
+- module: ansible.builtin.user
+- module: ansible.windows.win_group
+author:
+- Stephen Fromm (@sfromm)
+'''
+
+EXAMPLES = '''
+- name: Ensure group "somegroup" exists
+ group:
+ name: somegroup
+ state: present
+
+- name: Ensure group "docker" exists with correct gid
+ group:
+ name: docker
+ state: present
+ gid: 1750
+
+'''
+
+RETURN = r'''
+gid:
+ description: Group ID of the group.
+ returned: When C(state) is 'present'
+ type: int
+ sample: 1001
+name:
+ description: Group name
+ returned: always
+ type: str
+ sample: users
+state:
+ description: Whether the group is present or not
+ returned: always
+ type: str
+ sample: 'absent'
+system:
+ description: Whether the group is a system group or not
+ returned: When C(state) is 'present'
+ type: bool
+ sample: False
+'''
+
+import grp
+import os
+
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.sys_info import get_platform_subclass
+
+
+class Group(object):
+ """
+ This is a generic Group manipulation class that is subclassed
+ based on platform.
+
+ A subclass may wish to override the following action methods:-
+ - group_del()
+ - group_add()
+ - group_mod()
+
+ All subclasses MUST define platform and distribution (which may be None).
+ """
+
+ platform = 'Generic'
+ distribution = None
+ GROUPFILE = '/etc/group'
+
+ def __new__(cls, *args, **kwargs):
+ new_cls = get_platform_subclass(Group)
+ return super(cls, new_cls).__new__(new_cls)
+
+ def __init__(self, module):
+ self.module = module
+ self.state = module.params['state']
+ self.name = module.params['name']
+ self.gid = module.params['gid']
+ self.system = module.params['system']
+ self.local = module.params['local']
+ self.non_unique = module.params['non_unique']
+
+ def execute_command(self, cmd):
+ return self.module.run_command(cmd)
+
+ def group_del(self):
+ if self.local:
+ command_name = 'lgroupdel'
+ else:
+ command_name = 'groupdel'
+ cmd = [self.module.get_bin_path(command_name, True), self.name]
+ return self.execute_command(cmd)
+
+ def _local_check_gid_exists(self):
+ if self.gid:
+ for gr in grp.getgrall():
+ if self.gid == gr.gr_gid and self.name != gr.gr_name:
+ self.module.fail_json(msg="GID '{0}' already exists with group '{1}'".format(self.gid, gr.gr_name))
+
+ def group_add(self, **kwargs):
+ if self.local:
+ command_name = 'lgroupadd'
+ self._local_check_gid_exists()
+ else:
+ command_name = 'groupadd'
+ cmd = [self.module.get_bin_path(command_name, True)]
+ for key in kwargs:
+ if key == 'gid' and kwargs[key] is not None:
+ cmd.append('-g')
+ cmd.append(str(kwargs[key]))
+ if self.non_unique:
+ cmd.append('-o')
+ elif key == 'system' and kwargs[key] is True:
+ cmd.append('-r')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def group_mod(self, **kwargs):
+ if self.local:
+ command_name = 'lgroupmod'
+ self._local_check_gid_exists()
+ else:
+ command_name = 'groupmod'
+ cmd = [self.module.get_bin_path(command_name, True)]
+ info = self.group_info()
+ for key in kwargs:
+ if key == 'gid':
+ if kwargs[key] is not None and info[2] != int(kwargs[key]):
+ cmd.append('-g')
+ cmd.append(str(kwargs[key]))
+ if self.non_unique:
+ cmd.append('-o')
+ if len(cmd) == 1:
+ return (None, '', '')
+ if self.module.check_mode:
+ return (0, '', '')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def group_exists(self):
+ # The grp module does not distinguish between local and directory accounts.
+ # It's output cannot be used to determine whether or not a group exists locally.
+ # It returns True if the group exists locally or in the directory, so instead
+ # look in the local GROUP file for an existing account.
+ if self.local:
+ if not os.path.exists(self.GROUPFILE):
+ self.module.fail_json(msg="'local: true' specified but unable to find local group file {0} to parse.".format(self.GROUPFILE))
+
+ exists = False
+ name_test = '{0}:'.format(self.name)
+ with open(self.GROUPFILE, 'rb') as f:
+ reversed_lines = f.readlines()[::-1]
+ for line in reversed_lines:
+ if line.startswith(to_bytes(name_test)):
+ exists = True
+ break
+
+ if not exists:
+ self.module.warn(
+ "'local: true' specified and group was not found in {file}. "
+ "The local group may already exist if the local group database exists somewhere other than {file}.".format(file=self.GROUPFILE))
+
+ return exists
+
+ else:
+ try:
+ if grp.getgrnam(self.name):
+ return True
+ except KeyError:
+ return False
+
+ def group_info(self):
+ if not self.group_exists():
+ return False
+ try:
+ info = list(grp.getgrnam(self.name))
+ except KeyError:
+ return False
+ return info
+
+
+# ===========================================
+
+class SunOS(Group):
+ """
+ This is a SunOS Group manipulation class. Solaris doesn't have
+ the 'system' group concept.
+
+ This overrides the following methods from the generic class:-
+ - group_add()
+ """
+
+ platform = 'SunOS'
+ distribution = None
+ GROUPFILE = '/etc/group'
+
+ def group_add(self, **kwargs):
+ cmd = [self.module.get_bin_path('groupadd', True)]
+ for key in kwargs:
+ if key == 'gid' and kwargs[key] is not None:
+ cmd.append('-g')
+ cmd.append(str(kwargs[key]))
+ if self.non_unique:
+ cmd.append('-o')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+
+# ===========================================
+
+class AIX(Group):
+ """
+ This is a AIX Group manipulation class.
+
+ This overrides the following methods from the generic class:-
+ - group_del()
+ - group_add()
+ - group_mod()
+ """
+
+ platform = 'AIX'
+ distribution = None
+ GROUPFILE = '/etc/group'
+
+ def group_del(self):
+ cmd = [self.module.get_bin_path('rmgroup', True), self.name]
+ return self.execute_command(cmd)
+
+ def group_add(self, **kwargs):
+ cmd = [self.module.get_bin_path('mkgroup', True)]
+ for key in kwargs:
+ if key == 'gid' and kwargs[key] is not None:
+ cmd.append('id=' + str(kwargs[key]))
+ elif key == 'system' and kwargs[key] is True:
+ cmd.append('-a')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def group_mod(self, **kwargs):
+ cmd = [self.module.get_bin_path('chgroup', True)]
+ info = self.group_info()
+ for key in kwargs:
+ if key == 'gid':
+ if kwargs[key] is not None and info[2] != int(kwargs[key]):
+ cmd.append('id=' + str(kwargs[key]))
+ if len(cmd) == 1:
+ return (None, '', '')
+ if self.module.check_mode:
+ return (0, '', '')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+
+# ===========================================
+
+class FreeBsdGroup(Group):
+ """
+ This is a FreeBSD Group manipulation class.
+
+ This overrides the following methods from the generic class:-
+ - group_del()
+ - group_add()
+ - group_mod()
+ """
+
+ platform = 'FreeBSD'
+ distribution = None
+ GROUPFILE = '/etc/group'
+
+ def group_del(self):
+ cmd = [self.module.get_bin_path('pw', True), 'groupdel', self.name]
+ return self.execute_command(cmd)
+
+ def group_add(self, **kwargs):
+ cmd = [self.module.get_bin_path('pw', True), 'groupadd', self.name]
+ if self.gid is not None:
+ cmd.append('-g')
+ cmd.append(str(self.gid))
+ if self.non_unique:
+ cmd.append('-o')
+ return self.execute_command(cmd)
+
+ def group_mod(self, **kwargs):
+ cmd = [self.module.get_bin_path('pw', True), 'groupmod', self.name]
+ info = self.group_info()
+ cmd_len = len(cmd)
+ if self.gid is not None and int(self.gid) != info[2]:
+ cmd.append('-g')
+ cmd.append(str(self.gid))
+ if self.non_unique:
+ cmd.append('-o')
+ # modify the group if cmd will do anything
+ if cmd_len != len(cmd):
+ if self.module.check_mode:
+ return (0, '', '')
+ return self.execute_command(cmd)
+ return (None, '', '')
+
+
+class DragonFlyBsdGroup(FreeBsdGroup):
+ """
+ This is a DragonFlyBSD Group manipulation class.
+ It inherits all behaviors from FreeBsdGroup class.
+ """
+
+ platform = 'DragonFly'
+
+
+# ===========================================
+
+class DarwinGroup(Group):
+ """
+ This is a Mac macOS Darwin Group manipulation class.
+
+ This overrides the following methods from the generic class:-
+ - group_del()
+ - group_add()
+ - group_mod()
+
+ group manipulation are done using dseditgroup(1).
+ """
+
+ platform = 'Darwin'
+ distribution = None
+
+ def group_add(self, **kwargs):
+ cmd = [self.module.get_bin_path('dseditgroup', True)]
+ cmd += ['-o', 'create']
+ if self.gid is not None:
+ cmd += ['-i', str(self.gid)]
+ elif 'system' in kwargs and kwargs['system'] is True:
+ gid = self.get_lowest_available_system_gid()
+ if gid is not False:
+ self.gid = str(gid)
+ cmd += ['-i', str(self.gid)]
+ cmd += ['-L', self.name]
+ (rc, out, err) = self.execute_command(cmd)
+ return (rc, out, err)
+
+ def group_del(self):
+ cmd = [self.module.get_bin_path('dseditgroup', True)]
+ cmd += ['-o', 'delete']
+ cmd += ['-L', self.name]
+ (rc, out, err) = self.execute_command(cmd)
+ return (rc, out, err)
+
+ def group_mod(self, gid=None):
+ info = self.group_info()
+ if self.gid is not None and int(self.gid) != info[2]:
+ cmd = [self.module.get_bin_path('dseditgroup', True)]
+ cmd += ['-o', 'edit']
+ if gid is not None:
+ cmd += ['-i', str(gid)]
+ cmd += ['-L', self.name]
+ (rc, out, err) = self.execute_command(cmd)
+ return (rc, out, err)
+ return (None, '', '')
+
+ def get_lowest_available_system_gid(self):
+ # check for lowest available system gid (< 500)
+ try:
+ cmd = [self.module.get_bin_path('dscl', True)]
+ cmd += ['/Local/Default', '-list', '/Groups', 'PrimaryGroupID']
+ (rc, out, err) = self.execute_command(cmd)
+ lines = out.splitlines()
+ highest = 0
+ for group_info in lines:
+ parts = group_info.split(' ')
+ if len(parts) > 1:
+ gid = int(parts[-1])
+ if gid > highest and gid < 500:
+ highest = gid
+ if highest == 0 or highest == 499:
+ return False
+ return (highest + 1)
+ except Exception:
+ return False
+
+
+class OpenBsdGroup(Group):
+ """
+ This is a OpenBSD Group manipulation class.
+
+ This overrides the following methods from the generic class:-
+ - group_del()
+ - group_add()
+ - group_mod()
+ """
+
+ platform = 'OpenBSD'
+ distribution = None
+ GROUPFILE = '/etc/group'
+
+ def group_del(self):
+ cmd = [self.module.get_bin_path('groupdel', True), self.name]
+ return self.execute_command(cmd)
+
+ def group_add(self, **kwargs):
+ cmd = [self.module.get_bin_path('groupadd', True)]
+ if self.gid is not None:
+ cmd.append('-g')
+ cmd.append(str(self.gid))
+ if self.non_unique:
+ cmd.append('-o')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def group_mod(self, **kwargs):
+ cmd = [self.module.get_bin_path('groupmod', True)]
+ info = self.group_info()
+ if self.gid is not None and int(self.gid) != info[2]:
+ cmd.append('-g')
+ cmd.append(str(self.gid))
+ if self.non_unique:
+ cmd.append('-o')
+ if len(cmd) == 1:
+ return (None, '', '')
+ if self.module.check_mode:
+ return (0, '', '')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+
+# ===========================================
+
+class NetBsdGroup(Group):
+ """
+ This is a NetBSD Group manipulation class.
+
+ This overrides the following methods from the generic class:-
+ - group_del()
+ - group_add()
+ - group_mod()
+ """
+
+ platform = 'NetBSD'
+ distribution = None
+ GROUPFILE = '/etc/group'
+
+ def group_del(self):
+ cmd = [self.module.get_bin_path('groupdel', True), self.name]
+ return self.execute_command(cmd)
+
+ def group_add(self, **kwargs):
+ cmd = [self.module.get_bin_path('groupadd', True)]
+ if self.gid is not None:
+ cmd.append('-g')
+ cmd.append(str(self.gid))
+ if self.non_unique:
+ cmd.append('-o')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def group_mod(self, **kwargs):
+ cmd = [self.module.get_bin_path('groupmod', True)]
+ info = self.group_info()
+ if self.gid is not None and int(self.gid) != info[2]:
+ cmd.append('-g')
+ cmd.append(str(self.gid))
+ if self.non_unique:
+ cmd.append('-o')
+ if len(cmd) == 1:
+ return (None, '', '')
+ if self.module.check_mode:
+ return (0, '', '')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+
+# ===========================================
+
+
+class BusyBoxGroup(Group):
+ """
+ BusyBox group manipulation class for systems that have addgroup and delgroup.
+
+ It overrides the following methods:
+ - group_add()
+ - group_del()
+ - group_mod()
+ """
+
+ def group_add(self, **kwargs):
+ cmd = [self.module.get_bin_path('addgroup', True)]
+ if self.gid is not None:
+ cmd.extend(['-g', str(self.gid)])
+
+ if self.system:
+ cmd.append('-S')
+
+ cmd.append(self.name)
+
+ return self.execute_command(cmd)
+
+ def group_del(self):
+ cmd = [self.module.get_bin_path('delgroup', True), self.name]
+ return self.execute_command(cmd)
+
+ def group_mod(self, **kwargs):
+ # Since there is no groupmod command, modify /etc/group directly
+ info = self.group_info()
+ if self.gid is not None and self.gid != info[2]:
+ with open('/etc/group', 'rb') as f:
+ b_groups = f.read()
+
+ b_name = to_bytes(self.name)
+ b_current_group_string = b'%s:x:%d:' % (b_name, info[2])
+ b_new_group_string = b'%s:x:%d:' % (b_name, self.gid)
+
+ if b':%d:' % self.gid in b_groups:
+ self.module.fail_json(msg="gid '{gid}' in use".format(gid=self.gid))
+
+ if self.module.check_mode:
+ return 0, '', ''
+ b_new_groups = b_groups.replace(b_current_group_string, b_new_group_string)
+ with open('/etc/group', 'wb') as f:
+ f.write(b_new_groups)
+ return 0, '', ''
+
+ return None, '', ''
+
+
+class AlpineGroup(BusyBoxGroup):
+
+ platform = 'Linux'
+ distribution = 'Alpine'
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ name=dict(type='str', required=True),
+ gid=dict(type='int'),
+ system=dict(type='bool', default=False),
+ local=dict(type='bool', default=False),
+ non_unique=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ required_if=[
+ ['non_unique', True, ['gid']],
+ ],
+ )
+
+ group = Group(module)
+
+ module.debug('Group instantiated - platform %s' % group.platform)
+ if group.distribution:
+ module.debug('Group instantiated - distribution %s' % group.distribution)
+
+ rc = None
+ out = ''
+ err = ''
+ result = {}
+ result['name'] = group.name
+ result['state'] = group.state
+
+ if group.state == 'absent':
+
+ if group.group_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = group.group_del()
+ if rc != 0:
+ module.fail_json(name=group.name, msg=err)
+
+ elif group.state == 'present':
+
+ if not group.group_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = group.group_add(gid=group.gid, system=group.system)
+ else:
+ (rc, out, err) = group.group_mod(gid=group.gid)
+
+ if rc is not None and rc != 0:
+ module.fail_json(name=group.name, msg=err)
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ if group.group_exists():
+ info = group.group_info()
+ result['system'] = group.system
+ result['gid'] = info[2]
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/group_by.py b/lib/ansible/modules/group_by.py
new file mode 100644
index 00000000..4a709d28
--- /dev/null
+++ b/lib/ansible/modules/group_by.py
@@ -0,0 +1,58 @@
+# -*- mode: python -*-
+
+# Copyright: (c) 2012, Jeroen Hoekx (@jhoekx)
+# Copyright: Ansible Team
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: group_by
+short_description: Create Ansible groups based on facts
+description:
+- Use facts to create ad-hoc groups that can be used later in a playbook.
+- This module is also supported for Windows targets.
+version_added: "0.9"
+options:
+ key:
+ description:
+ - The variables whose values will be used as groups.
+ type: str
+ required: true
+ parents:
+ description:
+ - The list of the parent groups.
+ type: list
+ default: all
+ version_added: "2.4"
+notes:
+- Spaces in group names are converted to dashes '-'.
+- This module is also supported for Windows targets.
+seealso:
+- module: ansible.builtin.add_host
+author:
+- Jeroen Hoekx (@jhoekx)
+'''
+
+EXAMPLES = r'''
+- name: Create groups based on the machine architecture
+ group_by:
+ key: machine_{{ ansible_machine }}
+
+- name: Create groups like 'virt_kvm_host'
+ group_by:
+ key: virt_{{ ansible_virtualization_type }}_{{ ansible_virtualization_role }}
+
+- name: Create nested groups
+ group_by:
+ key: el{{ ansible_distribution_major_version }}-{{ ansible_architecture }}
+ parents:
+ - el{{ ansible_distribution_major_version }}
+
+# Add all active hosts to a static group
+- group_by:
+ key: done
+'''
diff --git a/lib/ansible/modules/hostname.py b/lib/ansible/modules/hostname.py
new file mode 100644
index 00000000..c0ffe37a
--- /dev/null
+++ b/lib/ansible/modules/hostname.py
@@ -0,0 +1,863 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Hiroaki Nakamura <hnakamur@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: hostname
+author:
+ - Adrian Likins (@alikins)
+ - Hideki Saito (@saito-hideki)
+version_added: "1.4"
+short_description: Manage hostname
+requirements: [ hostname ]
+description:
+ - Set system's hostname, supports most OSs/Distributions, including those using systemd.
+ - Note, this module does *NOT* modify C(/etc/hosts). You need to modify it yourself using other modules like template or replace.
+ - Windows, HP-UX and AIX are not currently supported.
+options:
+ name:
+ description:
+ - Name of the host
+ required: true
+ use:
+ description:
+ - Which strategy to use to update the hostname.
+ - If not set we try to autodetect, but this can be problematic, specially with containers as they can present misleading information.
+ choices: ['generic', 'debian','sles', 'redhat', 'alpine', 'systemd', 'openrc', 'openbsd', 'solaris', 'freebsd']
+ version_added: '2.9'
+'''
+
+EXAMPLES = '''
+- name: Set a hostname
+ hostname:
+ name: web01
+'''
+
+import os
+import platform
+import socket
+import traceback
+
+from ansible.module_utils.basic import (
+ AnsibleModule,
+ get_distribution,
+ get_distribution_version,
+)
+from ansible.module_utils.common.sys_info import get_platform_subclass
+from ansible.module_utils.facts.system.service_mgr import ServiceMgrFactCollector
+from ansible.module_utils._text import to_native
+
+STRATS = {'generic': 'Generic', 'debian': 'Debian', 'sles': 'SLES', 'redhat': 'RedHat', 'alpine': 'Alpine',
+ 'systemd': 'Systemd', 'openrc': 'OpenRC', 'openbsd': 'OpenBSD', 'solaris': 'Solaris', 'freebsd': 'FreeBSD'}
+
+
+class UnimplementedStrategy(object):
+ def __init__(self, module):
+ self.module = module
+
+ def update_current_and_permanent_hostname(self):
+ self.unimplemented_error()
+
+ def update_current_hostname(self):
+ self.unimplemented_error()
+
+ def update_permanent_hostname(self):
+ self.unimplemented_error()
+
+ def get_current_hostname(self):
+ self.unimplemented_error()
+
+ def set_current_hostname(self, name):
+ self.unimplemented_error()
+
+ def get_permanent_hostname(self):
+ self.unimplemented_error()
+
+ def set_permanent_hostname(self, name):
+ self.unimplemented_error()
+
+ def unimplemented_error(self):
+ system = platform.system()
+ distribution = get_distribution()
+ if distribution is not None:
+ msg_platform = '%s (%s)' % (system, distribution)
+ else:
+ msg_platform = system
+ self.module.fail_json(
+ msg='hostname module cannot be used on platform %s' % msg_platform)
+
+
+class Hostname(object):
+ """
+ This is a generic Hostname manipulation class that is subclassed
+ based on platform.
+
+ A subclass may wish to set different strategy instance to self.strategy.
+
+ All subclasses MUST define platform and distribution (which may be None).
+ """
+
+ platform = 'Generic'
+ distribution = None
+ strategy_class = UnimplementedStrategy
+
+ def __new__(cls, *args, **kwargs):
+ new_cls = get_platform_subclass(Hostname)
+ return super(cls, new_cls).__new__(new_cls)
+
+ def __init__(self, module):
+ self.module = module
+ self.name = module.params['name']
+ self.use = module.params['use']
+
+ if self.use is not None:
+ strat = globals()['%sStrategy' % STRATS[self.use]]
+ self.strategy = strat(module)
+ elif self.platform == 'Linux' and ServiceMgrFactCollector.is_systemd_managed(module):
+ self.strategy = SystemdStrategy(module)
+ else:
+ self.strategy = self.strategy_class(module)
+
+ def update_current_and_permanent_hostname(self):
+ return self.strategy.update_current_and_permanent_hostname()
+
+ def get_current_hostname(self):
+ return self.strategy.get_current_hostname()
+
+ def set_current_hostname(self, name):
+ self.strategy.set_current_hostname(name)
+
+ def get_permanent_hostname(self):
+ return self.strategy.get_permanent_hostname()
+
+ def set_permanent_hostname(self, name):
+ self.strategy.set_permanent_hostname(name)
+
+
+class GenericStrategy(object):
+ """
+ This is a generic Hostname manipulation strategy class.
+
+ A subclass may wish to override some or all of these methods.
+ - get_current_hostname()
+ - get_permanent_hostname()
+ - set_current_hostname(name)
+ - set_permanent_hostname(name)
+ """
+
+ def __init__(self, module):
+ self.module = module
+ self.changed = False
+ self.hostname_cmd = self.module.get_bin_path('hostnamectl', False)
+ if not self.hostname_cmd:
+ self.hostname_cmd = self.module.get_bin_path('hostname', True)
+
+ def update_current_and_permanent_hostname(self):
+ self.update_current_hostname()
+ self.update_permanent_hostname()
+ return self.changed
+
+ def update_current_hostname(self):
+ name = self.module.params['name']
+ current_name = self.get_current_hostname()
+ if current_name != name:
+ if not self.module.check_mode:
+ self.set_current_hostname(name)
+ self.changed = True
+
+ def update_permanent_hostname(self):
+ name = self.module.params['name']
+ permanent_name = self.get_permanent_hostname()
+ if permanent_name != name:
+ if not self.module.check_mode:
+ self.set_permanent_hostname(name)
+ self.changed = True
+
+ def get_current_hostname(self):
+ cmd = [self.hostname_cmd]
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+ return to_native(out).strip()
+
+ def set_current_hostname(self, name):
+ cmd = [self.hostname_cmd, name]
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+
+ def get_permanent_hostname(self):
+ return 'UNKNOWN'
+
+ def set_permanent_hostname(self, name):
+ pass
+
+
+class DebianStrategy(GenericStrategy):
+ """
+ This is a Debian family Hostname manipulation strategy class - it edits
+ the /etc/hostname file.
+ """
+
+ HOSTNAME_FILE = '/etc/hostname'
+
+ def get_permanent_hostname(self):
+ if not os.path.isfile(self.HOSTNAME_FILE):
+ try:
+ open(self.HOSTNAME_FILE, "a").write("")
+ except IOError as e:
+ self.module.fail_json(msg="failed to write file: %s" %
+ to_native(e), exception=traceback.format_exc())
+ try:
+ f = open(self.HOSTNAME_FILE)
+ try:
+ return f.read().strip()
+ finally:
+ f.close()
+ except Exception as e:
+ self.module.fail_json(msg="failed to read hostname: %s" %
+ to_native(e), exception=traceback.format_exc())
+
+ def set_permanent_hostname(self, name):
+ try:
+ f = open(self.HOSTNAME_FILE, 'w+')
+ try:
+ f.write("%s\n" % name)
+ finally:
+ f.close()
+ except Exception as e:
+ self.module.fail_json(msg="failed to update hostname: %s" %
+ to_native(e), exception=traceback.format_exc())
+
+
+class SLESStrategy(GenericStrategy):
+ """
+ This is a SLES Hostname strategy class - it edits the
+ /etc/HOSTNAME file.
+ """
+ HOSTNAME_FILE = '/etc/HOSTNAME'
+
+ def get_permanent_hostname(self):
+ if not os.path.isfile(self.HOSTNAME_FILE):
+ try:
+ open(self.HOSTNAME_FILE, "a").write("")
+ except IOError as e:
+ self.module.fail_json(msg="failed to write file: %s" %
+ to_native(e), exception=traceback.format_exc())
+ try:
+ f = open(self.HOSTNAME_FILE)
+ try:
+ return f.read().strip()
+ finally:
+ f.close()
+ except Exception as e:
+ self.module.fail_json(msg="failed to read hostname: %s" %
+ to_native(e), exception=traceback.format_exc())
+
+ def set_permanent_hostname(self, name):
+ try:
+ f = open(self.HOSTNAME_FILE, 'w+')
+ try:
+ f.write("%s\n" % name)
+ finally:
+ f.close()
+ except Exception as e:
+ self.module.fail_json(msg="failed to update hostname: %s" %
+ to_native(e), exception=traceback.format_exc())
+
+
+class RedHatStrategy(GenericStrategy):
+ """
+ This is a Redhat Hostname strategy class - it edits the
+ /etc/sysconfig/network file.
+ """
+ NETWORK_FILE = '/etc/sysconfig/network'
+
+ def get_permanent_hostname(self):
+ try:
+ f = open(self.NETWORK_FILE, 'rb')
+ try:
+ for line in f.readlines():
+ if line.startswith('HOSTNAME'):
+ k, v = line.split('=')
+ return v.strip()
+ finally:
+ f.close()
+ except Exception as e:
+ self.module.fail_json(msg="failed to read hostname: %s" %
+ to_native(e), exception=traceback.format_exc())
+
+ def set_permanent_hostname(self, name):
+ try:
+ lines = []
+ found = False
+ f = open(self.NETWORK_FILE, 'rb')
+ try:
+ for line in f.readlines():
+ if line.startswith('HOSTNAME'):
+ lines.append("HOSTNAME=%s\n" % name)
+ found = True
+ else:
+ lines.append(line)
+ finally:
+ f.close()
+ if not found:
+ lines.append("HOSTNAME=%s\n" % name)
+ f = open(self.NETWORK_FILE, 'w+')
+ try:
+ f.writelines(lines)
+ finally:
+ f.close()
+ except Exception as e:
+ self.module.fail_json(msg="failed to update hostname: %s" %
+ to_native(e), exception=traceback.format_exc())
+
+
+class AlpineStrategy(GenericStrategy):
+ """
+ This is a Alpine Linux Hostname manipulation strategy class - it edits
+ the /etc/hostname file then run hostname -F /etc/hostname.
+ """
+
+ HOSTNAME_FILE = '/etc/hostname'
+
+ def update_current_and_permanent_hostname(self):
+ self.update_permanent_hostname()
+ self.update_current_hostname()
+ return self.changed
+
+ def get_permanent_hostname(self):
+ if not os.path.isfile(self.HOSTNAME_FILE):
+ try:
+ open(self.HOSTNAME_FILE, "a").write("")
+ except IOError as e:
+ self.module.fail_json(msg="failed to write file: %s" %
+ to_native(e), exception=traceback.format_exc())
+ try:
+ f = open(self.HOSTNAME_FILE)
+ try:
+ return f.read().strip()
+ finally:
+ f.close()
+ except Exception as e:
+ self.module.fail_json(msg="failed to read hostname: %s" %
+ to_native(e), exception=traceback.format_exc())
+
+ def set_permanent_hostname(self, name):
+ try:
+ f = open(self.HOSTNAME_FILE, 'w+')
+ try:
+ f.write("%s\n" % name)
+ finally:
+ f.close()
+ except Exception as e:
+ self.module.fail_json(msg="failed to update hostname: %s" %
+ to_native(e), exception=traceback.format_exc())
+
+ def set_current_hostname(self, name):
+ cmd = [self.hostname_cmd, '-F', self.HOSTNAME_FILE]
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+
+
+class SystemdStrategy(GenericStrategy):
+ """
+ This is a Systemd hostname manipulation strategy class - it uses
+ the hostnamectl command.
+ """
+
+ def get_current_hostname(self):
+ cmd = [self.hostname_cmd, '--transient', 'status']
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+ return to_native(out).strip()
+
+ def set_current_hostname(self, name):
+ if len(name) > 64:
+ self.module.fail_json(msg="name cannot be longer than 64 characters on systemd servers, try a shorter name")
+ cmd = [self.hostname_cmd, '--transient', 'set-hostname', name]
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+
+ def get_permanent_hostname(self):
+ cmd = [self.hostname_cmd, '--static', 'status']
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+ return to_native(out).strip()
+
+ def set_permanent_hostname(self, name):
+ if len(name) > 64:
+ self.module.fail_json(msg="name cannot be longer than 64 characters on systemd servers, try a shorter name")
+ cmd = [self.hostname_cmd, '--pretty', 'set-hostname', name]
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+ cmd = [self.hostname_cmd, '--static', 'set-hostname', name]
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+
+
+class OpenRCStrategy(GenericStrategy):
+ """
+ This is a Gentoo (OpenRC) Hostname manipulation strategy class - it edits
+ the /etc/conf.d/hostname file.
+ """
+
+ HOSTNAME_FILE = '/etc/conf.d/hostname'
+
+ def get_permanent_hostname(self):
+ name = 'UNKNOWN'
+ try:
+ try:
+ f = open(self.HOSTNAME_FILE, 'r')
+ for line in f:
+ line = line.strip()
+ if line.startswith('hostname='):
+ name = line[10:].strip('"')
+ break
+ except Exception as e:
+ self.module.fail_json(msg="failed to read hostname: %s" %
+ to_native(e), exception=traceback.format_exc())
+ finally:
+ f.close()
+
+ return name
+
+ def set_permanent_hostname(self, name):
+ try:
+ try:
+ f = open(self.HOSTNAME_FILE, 'r')
+ lines = [x.strip() for x in f]
+
+ for i, line in enumerate(lines):
+ if line.startswith('hostname='):
+ lines[i] = 'hostname="%s"' % name
+ break
+ f.close()
+
+ f = open(self.HOSTNAME_FILE, 'w')
+ f.write('\n'.join(lines) + '\n')
+ except Exception as e:
+ self.module.fail_json(msg="failed to update hostname: %s" %
+ to_native(e), exception=traceback.format_exc())
+ finally:
+ f.close()
+
+
+class OpenBSDStrategy(GenericStrategy):
+ """
+ This is a OpenBSD family Hostname manipulation strategy class - it edits
+ the /etc/myname file.
+ """
+
+ HOSTNAME_FILE = '/etc/myname'
+
+ def get_permanent_hostname(self):
+ if not os.path.isfile(self.HOSTNAME_FILE):
+ try:
+ open(self.HOSTNAME_FILE, "a").write("")
+ except IOError as e:
+ self.module.fail_json(msg="failed to write file: %s" %
+ to_native(e), exception=traceback.format_exc())
+ try:
+ f = open(self.HOSTNAME_FILE)
+ try:
+ return f.read().strip()
+ finally:
+ f.close()
+ except Exception as e:
+ self.module.fail_json(msg="failed to read hostname: %s" %
+ to_native(e), exception=traceback.format_exc())
+
+ def set_permanent_hostname(self, name):
+ try:
+ f = open(self.HOSTNAME_FILE, 'w+')
+ try:
+ f.write("%s\n" % name)
+ finally:
+ f.close()
+ except Exception as e:
+ self.module.fail_json(msg="failed to update hostname: %s" %
+ to_native(e), exception=traceback.format_exc())
+
+
+class SolarisStrategy(GenericStrategy):
+ """
+ This is a Solaris11 or later Hostname manipulation strategy class - it
+ execute hostname command.
+ """
+
+ def set_current_hostname(self, name):
+ cmd_option = '-t'
+ cmd = [self.hostname_cmd, cmd_option, name]
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+
+ def get_permanent_hostname(self):
+ fmri = 'svc:/system/identity:node'
+ pattern = 'config/nodename'
+ cmd = '/usr/sbin/svccfg -s %s listprop -o value %s' % (fmri, pattern)
+ rc, out, err = self.module.run_command(cmd, use_unsafe_shell=True)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+ return to_native(out).strip()
+
+ def set_permanent_hostname(self, name):
+ cmd = [self.hostname_cmd, name]
+ rc, out, err = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
+
+
+class FreeBSDStrategy(GenericStrategy):
+ """
+ This is a FreeBSD hostname manipulation strategy class - it edits
+ the /etc/rc.conf.d/hostname file.
+ """
+
+ HOSTNAME_FILE = '/etc/rc.conf.d/hostname'
+
+ def get_permanent_hostname(self):
+
+ name = 'UNKNOWN'
+ if not os.path.isfile(self.HOSTNAME_FILE):
+ try:
+ open(self.HOSTNAME_FILE, "a").write("hostname=temporarystub\n")
+ except IOError as e:
+ self.module.fail_json(msg="failed to write file: %s" %
+ to_native(e), exception=traceback.format_exc())
+ try:
+ try:
+ f = open(self.HOSTNAME_FILE, 'r')
+ for line in f:
+ line = line.strip()
+ if line.startswith('hostname='):
+ name = line[10:].strip('"')
+ break
+ except Exception as e:
+ self.module.fail_json(msg="failed to read hostname: %s" %
+ to_native(e), exception=traceback.format_exc())
+ finally:
+ f.close()
+
+ return name
+
+ def set_permanent_hostname(self, name):
+ try:
+ try:
+ f = open(self.HOSTNAME_FILE, 'r')
+ lines = [x.strip() for x in f]
+
+ for i, line in enumerate(lines):
+ if line.startswith('hostname='):
+ lines[i] = 'hostname="%s"' % name
+ break
+ f.close()
+
+ f = open(self.HOSTNAME_FILE, 'w')
+ f.write('\n'.join(lines) + '\n')
+ except Exception as e:
+ self.module.fail_json(msg="failed to update hostname: %s" %
+ to_native(e), exception=traceback.format_exc())
+ finally:
+ f.close()
+
+
+class FedoraHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Fedora'
+ strategy_class = SystemdStrategy
+
+
+class SLESHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Sles'
+ try:
+ distribution_version = get_distribution_version()
+ # cast to float may raise ValueError on non SLES, we use float for a little more safety over int
+ if distribution_version and 10 <= float(distribution_version) <= 12:
+ strategy_class = SLESStrategy
+ else:
+ raise ValueError()
+ except ValueError:
+ strategy_class = UnimplementedStrategy
+
+
+class OpenSUSEHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Opensuse'
+ strategy_class = SystemdStrategy
+
+
+class OpenSUSELeapHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Opensuse-leap'
+ strategy_class = SystemdStrategy
+
+
+class OpenSUSETumbleweedHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Opensuse-tumbleweed'
+ strategy_class = SystemdStrategy
+
+
+class AsteraHostname(Hostname):
+ platform = 'Linux'
+ distribution = '"astralinuxce"'
+ strategy_class = SystemdStrategy
+
+
+class ArchHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Arch'
+ strategy_class = SystemdStrategy
+
+
+class ArchARMHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Archarm'
+ strategy_class = SystemdStrategy
+
+
+class ManjaroHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Manjaro'
+ strategy_class = SystemdStrategy
+
+
+class RHELHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Redhat'
+ strategy_class = RedHatStrategy
+
+
+class CentOSHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Centos'
+ strategy_class = RedHatStrategy
+
+
+class ClearLinuxHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Clear-linux-os'
+ strategy_class = SystemdStrategy
+
+
+class CloudlinuxserverHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Cloudlinuxserver'
+ strategy_class = RedHatStrategy
+
+
+class CloudlinuxHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Cloudlinux'
+ strategy_class = RedHatStrategy
+
+
+class CoreosHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Coreos'
+ strategy_class = SystemdStrategy
+
+
+class FlatcarHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Flatcar'
+ strategy_class = SystemdStrategy
+
+
+class ScientificHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Scientific'
+ strategy_class = RedHatStrategy
+
+
+class OracleLinuxHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Oracle'
+ strategy_class = RedHatStrategy
+
+
+class VirtuozzoLinuxHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Virtuozzo'
+ strategy_class = RedHatStrategy
+
+
+class AmazonLinuxHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Amazon'
+ strategy_class = RedHatStrategy
+
+
+class DebianHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Debian'
+ strategy_class = DebianStrategy
+
+
+class KylinHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Kylin'
+ strategy_class = DebianStrategy
+
+
+class CumulusHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Cumulus-linux'
+ strategy_class = DebianStrategy
+
+
+class KaliHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Kali'
+ strategy_class = DebianStrategy
+
+
+class UbuntuHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Ubuntu'
+ strategy_class = DebianStrategy
+
+
+class LinuxmintHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Linuxmint'
+ strategy_class = DebianStrategy
+
+
+class LinaroHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Linaro'
+ strategy_class = DebianStrategy
+
+
+class DevuanHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Devuan'
+ strategy_class = DebianStrategy
+
+
+class RaspbianHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Raspbian'
+ strategy_class = DebianStrategy
+
+
+class GentooHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Gentoo'
+ strategy_class = OpenRCStrategy
+
+
+class ALTLinuxHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Altlinux'
+ strategy_class = RedHatStrategy
+
+
+class AlpineLinuxHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Alpine'
+ strategy_class = AlpineStrategy
+
+
+class OpenBSDHostname(Hostname):
+ platform = 'OpenBSD'
+ distribution = None
+ strategy_class = OpenBSDStrategy
+
+
+class SolarisHostname(Hostname):
+ platform = 'SunOS'
+ distribution = None
+ strategy_class = SolarisStrategy
+
+
+class FreeBSDHostname(Hostname):
+ platform = 'FreeBSD'
+ distribution = None
+ strategy_class = FreeBSDStrategy
+
+
+class NetBSDHostname(Hostname):
+ platform = 'NetBSD'
+ distribution = None
+ strategy_class = FreeBSDStrategy
+
+
+class NeonHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Neon'
+ strategy_class = DebianStrategy
+
+
+class OsmcHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Osmc'
+ strategy_class = SystemdStrategy
+
+
+class VoidLinuxHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Void'
+ strategy_class = DebianStrategy
+
+
+class PopHostname(Hostname):
+ platform = 'Linux'
+ distribution = 'Pop'
+ strategy_class = DebianStrategy
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ use=dict(type='str', choices=STRATS.keys())
+ ),
+ supports_check_mode=True,
+ )
+
+ hostname = Hostname(module)
+ name = module.params['name']
+
+ current_hostname = hostname.get_current_hostname()
+ permanent_hostname = hostname.get_permanent_hostname()
+
+ changed = hostname.update_current_and_permanent_hostname()
+
+ if name != current_hostname:
+ name_before = current_hostname
+ elif name != permanent_hostname:
+ name_before = permanent_hostname
+
+ kw = dict(changed=changed, name=name,
+ ansible_facts=dict(ansible_hostname=name.split('.')[0],
+ ansible_nodename=name,
+ ansible_fqdn=socket.getfqdn(),
+ ansible_domain='.'.join(socket.getfqdn().split('.')[1:])))
+
+ if changed:
+ kw['diff'] = {'after': 'hostname = ' + name + '\n',
+ 'before': 'hostname = ' + name_before + '\n'}
+
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/import_playbook.py b/lib/ansible/modules/import_playbook.py
new file mode 100644
index 00000000..51ef23fa
--- /dev/null
+++ b/lib/ansible/modules/import_playbook.py
@@ -0,0 +1,58 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author: Ansible Core Team (@ansible)
+module: import_playbook
+short_description: Import a playbook
+description:
+ - Includes a file with a list of plays to be executed.
+ - Files with a list of plays can only be included at the top level.
+ - You cannot use this action inside a play.
+version_added: "2.4"
+options:
+ free-form:
+ description:
+ - The name of the imported playbook is specified directly without any other option.
+notes:
+ - This is a core feature of Ansible, rather than a module, and cannot be overridden like a module.
+seealso:
+- module: ansible.builtin.import_role
+- module: ansible.builtin.import_tasks
+- module: ansible.builtin.include_role
+- module: ansible.builtin.include_tasks
+- ref: playbooks_reuse_includes
+ description: More information related to including and importing playbooks, roles and tasks.
+'''
+
+EXAMPLES = r'''
+- hosts: localhost
+ tasks:
+ - debug:
+ msg: play1
+
+- name: Include a play after another play
+ import_playbook: otherplays.yaml
+
+
+- name: This DOES NOT WORK
+ hosts: all
+ tasks:
+ - debug:
+ msg: task1
+
+ - name: This fails because I'm inside a play already
+ import_playbook: stuff.yaml
+'''
+
+RETURN = r'''
+# This module does not return anything except plays to execute.
+'''
diff --git a/lib/ansible/modules/import_role.py b/lib/ansible/modules/import_role.py
new file mode 100644
index 00000000..ea221d16
--- /dev/null
+++ b/lib/ansible/modules/import_role.py
@@ -0,0 +1,93 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author: Ansible Core Team (@ansible)
+module: import_role
+short_description: Import a role into a play
+description:
+ - Much like the C(roles:) keyword, this task loads a role, but it allows you to control when the role tasks run in
+ between other tasks of the play.
+ - Most keywords, loops and conditionals will only be applied to the imported tasks, not to this statement itself. If
+ you want the opposite behavior, use M(ansible.builtin.include_role) instead.
+version_added: '2.4'
+options:
+ name:
+ description:
+ - The name of the role to be executed.
+ type: str
+ required: true
+ tasks_from:
+ description:
+ - File to load from a role's C(tasks/) directory.
+ type: str
+ default: main
+ vars_from:
+ description:
+ - File to load from a role's C(vars/) directory.
+ type: str
+ default: main
+ defaults_from:
+ description:
+ - File to load from a role's C(defaults/) directory.
+ type: str
+ default: main
+ allow_duplicates:
+ description:
+ - Overrides the role's metadata setting to allow using a role more than once with the same parameters.
+ type: bool
+ default: yes
+ handlers_from:
+ description:
+ - File to load from a role's C(handlers/) directory.
+ type: str
+ default: main
+ version_added: '2.8'
+notes:
+ - Handlers are made available to the whole play.
+ - Since Ansible 2.7 variables defined in C(vars) and C(defaults) for the role are exposed at playbook parsing time.
+ Due to this, these variables will be accessible to roles and tasks executed before the location of the
+ M(ansible.builtin.import_role) task.
+ - Unlike M(ansible.builtin.include_role) variable exposure is not configurable, and will always be exposed.
+seealso:
+- module: ansible.builtin.import_playbook
+- module: ansible.builtin.import_tasks
+- module: ansible.builtin.include_role
+- module: ansible.builtin.include_tasks
+- ref: playbooks_reuse_includes
+ description: More information related to including and importing playbooks, roles and tasks.
+'''
+
+EXAMPLES = r'''
+- hosts: all
+ tasks:
+ - import_role:
+ name: myrole
+
+ - name: Run tasks/other.yaml instead of 'main'
+ import_role:
+ name: myrole
+ tasks_from: other
+
+ - name: Pass variables to role
+ import_role:
+ name: myrole
+ vars:
+ rolevar1: value from task
+
+ - name: Apply condition to each task in role
+ import_role:
+ name: myrole
+ when: not idontwanttorun
+'''
+
+RETURN = r'''
+# This module does not return anything except tasks to execute.
+'''
diff --git a/lib/ansible/modules/import_tasks.py b/lib/ansible/modules/import_tasks.py
new file mode 100644
index 00000000..829e291a
--- /dev/null
+++ b/lib/ansible/modules/import_tasks.py
@@ -0,0 +1,60 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author: Ansible Core Team (@ansible)
+module: import_tasks
+short_description: Import a task list
+description:
+ - Imports a list of tasks to be added to the current playbook for subsequent execution.
+version_added: "2.4"
+options:
+ free-form:
+ description:
+ - The name of the imported file is specified directly without any other option.
+ - Most keywords, including loops and conditionals, only applied to the imported tasks, not to this statement itself.
+ - If you need any of those to apply, use M(ansible.builtin.include_tasks) instead.
+notes:
+ - This is a core feature of Ansible, rather than a module, and cannot be overridden like a module.
+seealso:
+- module: ansible.builtin.import_playbook
+- module: ansible.builtin.import_role
+- module: ansible.builtin.include_role
+- module: ansible.builtin.include_tasks
+- ref: playbooks_reuse_includes
+ description: More information related to including and importing playbooks, roles and tasks.
+'''
+
+EXAMPLES = r'''
+- hosts: all
+ tasks:
+ - debug:
+ msg: task1
+
+ - name: Include task list in play
+ import_tasks: stuff.yaml
+
+ - debug:
+ msg: task10
+
+- hosts: all
+ tasks:
+ - debug:
+ msg: task1
+
+ - name: Apply conditional to all imported tasks
+ import_tasks: stuff.yaml
+ when: hostvar is defined
+'''
+
+RETURN = r'''
+# This module does not return anything except tasks to execute.
+'''
diff --git a/lib/ansible/modules/include.py b/lib/ansible/modules/include.py
new file mode 100644
index 00000000..d9d5d51b
--- /dev/null
+++ b/lib/ansible/modules/include.py
@@ -0,0 +1,83 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author: Ansible Core Team (@ansible)
+module: include
+short_description: Include a play or task list
+description:
+ - Includes a file with a list of plays or tasks to be executed in the current playbook.
+ - Files with a list of plays can only be included at the top level. Lists of tasks can only be included where tasks
+ normally run (in play).
+ - Before Ansible 2.0, all includes were 'static' and were executed when the play was compiled.
+ - Static includes are not subject to most directives. For example, loops or conditionals are applied instead to each
+ inherited task.
+ - Since Ansible 2.0, task includes are dynamic and behave more like real tasks. This means they can be looped,
+ skipped and use variables from any source. Ansible tries to auto detect this, but you can use the C(static)
+ directive (which was added in Ansible 2.1) to bypass autodetection.
+ - This module is also supported for Windows targets.
+version_added: "0.6"
+options:
+ free-form:
+ description:
+ - This module allows you to specify the name of the file directly without any other options.
+notes:
+ - This is a core feature of Ansible, rather than a module, and cannot be overridden like a module.
+ - Include has some unintuitive behaviours depending on if it is running in a static or dynamic in play or in playbook context,
+ in an effort to clarify behaviours we are moving to a new set modules (M(ansible.builtin.include_tasks),
+ M(ansible.builtin.include_role), M(ansible.builtin.import_playbook), M(ansible.builtin.import_tasks))
+ that have well established and clear behaviours.
+ - B(This module will still be supported for some time but we are looking at deprecating it in the near future.)
+seealso:
+- module: ansible.builtin.import_playbook
+- module: ansible.builtin.import_role
+- module: ansible.builtin.import_tasks
+- module: ansible.builtin.include_role
+- module: ansible.builtin.include_tasks
+- ref: playbooks_reuse_includes
+ description: More information related to including and importing playbooks, roles and tasks.
+'''
+
+EXAMPLES = r'''
+- hosts: localhost
+ tasks:
+ - debug:
+ msg: play1
+
+- name: Include a play after another play
+ include: otherplays.yaml
+
+
+- hosts: all
+ tasks:
+ - debug:
+ msg: task1
+
+ - name: Include task list in play
+ include: stuff.yaml
+
+ - debug:
+ msg: task10
+
+- hosts: all
+ tasks:
+ - debug:
+ msg: task1
+
+ - name: Include task list in play only if the condition is true
+ include: "{{ hostvar }}.yaml"
+ static: no
+ when: hostvar is defined
+'''
+
+RETURN = r'''
+# This module does not return anything except plays or tasks to execute.
+'''
diff --git a/lib/ansible/modules/include_role.py b/lib/ansible/modules/include_role.py
new file mode 100644
index 00000000..f1a0cf6f
--- /dev/null
+++ b/lib/ansible/modules/include_role.py
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author: Ansible Core Team (@ansible)
+module: include_role
+short_description: Load and execute a role
+description:
+ - Dynamically loads and executes a specified role as a task.
+ - May be used only where Ansible tasks are allowed - inside C(pre_tasks), C(tasks), or C(post_tasks) playbook objects, or as a task inside a role.
+ - Task-level keywords, loops, and conditionals apply only to the C(include_role) statement itself.
+ - To apply keywords to the tasks within the role, pass them using the C(apply) option or use M(ansible.builtin.import_role) instead.
+ - Ignores some keywords, like C(until) and C(retries).
+ - This module is also supported for Windows targets.
+version_added: "2.2"
+options:
+ apply:
+ description:
+ - Accepts a hash of task keywords (e.g. C(tags), C(become)) that will be applied to all tasks within the included role.
+ version_added: '2.7'
+ name:
+ description:
+ - The name of the role to be executed.
+ type: str
+ required: True
+ tasks_from:
+ description:
+ - File to load from a role's C(tasks/) directory.
+ type: str
+ default: main
+ vars_from:
+ description:
+ - File to load from a role's C(vars/) directory.
+ type: str
+ default: main
+ defaults_from:
+ description:
+ - File to load from a role's C(defaults/) directory.
+ type: str
+ default: main
+ allow_duplicates:
+ description:
+ - Overrides the role's metadata setting to allow using a role more than once with the same parameters.
+ type: bool
+ default: yes
+ public:
+ description:
+ - This option dictates whether the role's C(vars) and C(defaults) are exposed to the playbook. If set to C(yes)
+ the variables will be available to tasks following the C(include_role) task. This functionality differs from
+ standard variable exposure for roles listed under the C(roles) header or C(import_role) as they are exposed at
+ playbook parsing time, and available to earlier roles and tasks as well.
+ type: bool
+ default: no
+ version_added: '2.7'
+ handlers_from:
+ description:
+ - File to load from a role's C(handlers/) directory.
+ type: str
+ default: main
+ version_added: '2.8'
+notes:
+ - Handlers are made available to the whole play.
+ - Before Ansible 2.4, as with C(include), this task could be static or dynamic, If static, it implied that it won't
+ need templating, loops or conditionals and will show included tasks in the C(--list) options. Ansible would try to
+ autodetect what is needed, but you can set C(static) to C(yes) or C(no) at task level to control this.
+ - After Ansible 2.4, you can use M(ansible.builtin.import_role) for C(static) behaviour and this action for C(dynamic) one.
+seealso:
+- module: ansible.builtin.import_playbook
+- module: ansible.builtin.import_role
+- module: ansible.builtin.import_tasks
+- module: ansible.builtin.include_tasks
+- ref: playbooks_reuse_includes
+ description: More information related to including and importing playbooks, roles and tasks.
+'''
+
+EXAMPLES = r'''
+- include_role:
+ name: myrole
+
+- name: Run tasks/other.yaml instead of 'main'
+ include_role:
+ name: myrole
+ tasks_from: other
+
+- name: Pass variables to role
+ include_role:
+ name: myrole
+ vars:
+ rolevar1: value from task
+
+- name: Use role in loop
+ include_role:
+ name: '{{ roleinputvar }}'
+ loop:
+ - '{{ roleinput1 }}'
+ - '{{ roleinput2 }}'
+ loop_control:
+ loop_var: roleinputvar
+
+- name: Conditional role
+ include_role:
+ name: myrole
+ when: not idontwanttorun
+
+- name: Apply tags to tasks within included file
+ include_role:
+ name: install
+ apply:
+ tags:
+ - install
+ tags:
+ - always
+'''
+
+RETURN = r'''
+# This module does not return anything except tasks to execute.
+'''
diff --git a/lib/ansible/modules/include_tasks.py b/lib/ansible/modules/include_tasks.py
new file mode 100644
index 00000000..41768b34
--- /dev/null
+++ b/lib/ansible/modules/include_tasks.py
@@ -0,0 +1,90 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author: Ansible Core Team (@ansible)
+module: include_tasks
+short_description: Dynamically include a task list
+description:
+ - Includes a file with a list of tasks to be executed in the current playbook.
+version_added: '2.4'
+options:
+ file:
+ description:
+ - The name of the imported file is specified directly without any other option.
+ - Unlike M(ansible.builtin.import_tasks), most keywords, including loop, with_items, and conditionals, apply to this statement.
+ - The do until loop is not supported on M(ansible.builtin.include_tasks).
+ type: str
+ version_added: '2.7'
+ apply:
+ description:
+ - Accepts a hash of task keywords (e.g. C(tags), C(become)) that will be applied to the tasks within the include.
+ type: str
+ version_added: '2.7'
+ free-form:
+ description:
+ - |
+ Supplying a file name via free-form C(- include_tasks: file.yml) of a file to be included is the equivalent
+ of specifying an argument of I(file).
+notes:
+ - This is a core feature of the Ansible, rather than a module, and cannot be overridden like a module.
+seealso:
+- module: ansible.builtin.import_playbook
+- module: ansible.builtin.import_role
+- module: ansible.builtin.import_tasks
+- module: ansible.builtin.include_role
+- ref: playbooks_reuse_includes
+ description: More information related to including and importing playbooks, roles and tasks.
+'''
+
+EXAMPLES = r'''
+- hosts: all
+ tasks:
+ - debug:
+ msg: task1
+
+ - name: Include task list in play
+ include_tasks: stuff.yaml
+
+ - debug:
+ msg: task10
+
+- hosts: all
+ tasks:
+ - debug:
+ msg: task1
+
+ - name: Include task list in play only if the condition is true
+ include_tasks: "{{ hostvar }}.yaml"
+ when: hostvar is defined
+
+- name: Apply tags to tasks within included file
+ include_tasks:
+ file: install.yml
+ apply:
+ tags:
+ - install
+ tags:
+ - always
+
+- name: Apply tags to tasks within included file when using free-form
+ include_tasks: install.yml
+ args:
+ apply:
+ tags:
+ - install
+ tags:
+ - always
+'''
+
+RETURN = r'''
+# This module does not return anything except tasks to execute.
+'''
diff --git a/lib/ansible/modules/include_vars.py b/lib/ansible/modules/include_vars.py
new file mode 100644
index 00000000..019d7468
--- /dev/null
+++ b/lib/ansible/modules/include_vars.py
@@ -0,0 +1,164 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author: Allen Sanabria (@linuxdynasty)
+module: include_vars
+short_description: Load variables from files, dynamically within a task
+description:
+ - Loads YAML/JSON variables dynamically from a file or directory, recursively, during task runtime.
+ - If loading a directory, the files are sorted alphabetically before being loaded.
+ - This module is also supported for Windows targets.
+ - To assign included variables to a different host than C(inventory_hostname),
+ use C(delegate_to) and set C(delegate_facts=yes).
+version_added: "1.4"
+options:
+ file:
+ description:
+ - The file name from which variables should be loaded.
+ - If the path is relative, it will look for the file in vars/ subdirectory of a role or relative to playbook.
+ type: path
+ version_added: "2.2"
+ dir:
+ description:
+ - The directory name from which the variables should be loaded.
+ - If the path is relative and the task is inside a role, it will look inside the role's vars/ subdirectory.
+ - If the path is relative and not inside a role, it will be parsed relative to the playbook.
+ type: path
+ version_added: "2.2"
+ name:
+ description:
+ - The name of a variable into which assign the included vars.
+ - If omitted (null) they will be made top level vars.
+ type: str
+ version_added: "2.2"
+ depth:
+ description:
+ - When using C(dir), this module will, by default, recursively go through each sub directory and load up the
+ variables. By explicitly setting the depth, this module will only go as deep as the depth.
+ type: int
+ default: 0
+ version_added: "2.2"
+ files_matching:
+ description:
+ - Limit the files that are loaded within any directory to this regular expression.
+ type: str
+ version_added: "2.2"
+ ignore_files:
+ description:
+ - List of file names to ignore.
+ type: list
+ version_added: "2.2"
+ extensions:
+ description:
+ - List of file extensions to read when using C(dir).
+ type: list
+ default: [ json, yaml, yml ]
+ version_added: "2.3"
+ ignore_unknown_extensions:
+ description:
+ - Ignore unknown file extensions within the directory.
+ - This allows users to specify a directory containing vars files that are intermingled with non-vars files extension types
+ (e.g. a directory with a README in it and vars files).
+ type: bool
+ default: no
+ version_added: "2.7"
+ free-form:
+ description:
+ - This module allows you to specify the 'file' option directly without any other options.
+ - There is no 'free-form' option, this is just an indicator, see example below.
+notes:
+ - This module is also supported for Windows targets.
+seealso:
+- module: ansible.builtin.set_fact
+- ref: playbooks_delegation
+ description: More information related to task delegation.
+'''
+
+EXAMPLES = r'''
+- name: Include vars of stuff.yaml into the 'stuff' variable (2.2).
+ include_vars:
+ file: stuff.yaml
+ name: stuff
+
+- name: Conditionally decide to load in variables into 'plans' when x is 0, otherwise do not. (2.2)
+ include_vars:
+ file: contingency_plan.yaml
+ name: plans
+ when: x == 0
+
+- name: Load a variable file based on the OS type, or a default if not found. Using free-form to specify the file.
+ include_vars: "{{ lookup('first_found', params) }}"
+ vars:
+ params:
+ files:
+ - '{{ansible_distribution}}.yaml'
+ - '{{ansible_os_family}}.yaml'
+ - default.yaml
+ paths:
+ - 'vars'
+
+- name: Bare include (free-form)
+ include_vars: myvars.yaml
+
+- name: Include all .json and .jsn files in vars/all and all nested directories (2.3)
+ include_vars:
+ dir: vars/all
+ extensions:
+ - 'json'
+ - 'jsn'
+
+- name: Include all default extension files in vars/all and all nested directories and save the output in test. (2.2)
+ include_vars:
+ dir: vars/all
+ name: test
+
+- name: Include default extension files in vars/services (2.2)
+ include_vars:
+ dir: vars/services
+ depth: 1
+
+- name: Include only files matching bastion.yaml (2.2)
+ include_vars:
+ dir: vars
+ files_matching: bastion.yaml
+
+- name: Include all .yaml files except bastion.yaml (2.3)
+ include_vars:
+ dir: vars
+ ignore_files:
+ - 'bastion.yaml'
+ extensions:
+ - 'yaml'
+
+- name: Ignore warnings raised for files with unknown extensions while loading (2.7)
+ include_vars:
+ dir: vars
+ ignore_unknown_extensions: True
+ extensions:
+ - ''
+ - 'yaml'
+ - 'yml'
+ - 'json'
+'''
+
+RETURN = r'''
+ansible_facts:
+ description: Variables that were included and their values
+ returned: success
+ type: dict
+ sample: {'variable': 'value'}
+ansible_included_var_files:
+ description: A list of files that were successfully included
+ returned: success
+ type: list
+ sample: [ /path/to/file.json, /path/to/file.yaml ]
+ version_added: '2.4'
+'''
diff --git a/lib/ansible/modules/iptables.py b/lib/ansible/modules/iptables.py
new file mode 100644
index 00000000..efe31c60
--- /dev/null
+++ b/lib/ansible/modules/iptables.py
@@ -0,0 +1,794 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Linus Unnebäck <linus@folkdatorn.se>
+# Copyright: (c) 2017, Sébastien DA ROCHA <sebastien@da-rocha.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: iptables
+short_description: Modify iptables rules
+version_added: "2.0"
+author:
+- Linus Unnebäck (@LinusU) <linus@folkdatorn.se>
+- Sébastien DA ROCHA (@sebastiendarocha)
+description:
+ - C(iptables) is used to set up, maintain, and inspect the tables of IP packet
+ filter rules in the Linux kernel.
+ - This module does not handle the saving and/or loading of rules, but rather
+ only manipulates the current rules that are present in memory. This is the
+ same as the behaviour of the C(iptables) and C(ip6tables) command which
+ this module uses internally.
+notes:
+ - This module just deals with individual rules.If you need advanced
+ chaining of rules the recommended way is to template the iptables restore
+ file.
+options:
+ table:
+ description:
+ - This option specifies the packet matching table which the command should operate on.
+ - If the kernel is configured with automatic module loading, an attempt will be made
+ to load the appropriate module for that table if it is not already there.
+ type: str
+ choices: [ filter, nat, mangle, raw, security ]
+ default: filter
+ state:
+ description:
+ - Whether the rule should be absent or present.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ action:
+ description:
+ - Whether the rule should be appended at the bottom or inserted at the top.
+ - If the rule already exists the chain will not be modified.
+ type: str
+ choices: [ append, insert ]
+ default: append
+ version_added: "2.2"
+ rule_num:
+ description:
+ - Insert the rule as the given rule number.
+ - This works only with C(action=insert).
+ type: str
+ version_added: "2.5"
+ ip_version:
+ description:
+ - Which version of the IP protocol this rule should apply to.
+ type: str
+ choices: [ ipv4, ipv6 ]
+ default: ipv4
+ chain:
+ description:
+ - Specify the iptables chain to modify.
+ - This could be a user-defined chain or one of the standard iptables chains, like
+ C(INPUT), C(FORWARD), C(OUTPUT), C(PREROUTING), C(POSTROUTING), C(SECMARK) or C(CONNSECMARK).
+ type: str
+ protocol:
+ description:
+ - The protocol of the rule or of the packet to check.
+ - The specified protocol can be one of C(tcp), C(udp), C(udplite), C(icmp), C(ipv6-icmp) or C(icmpv6),
+ C(esp), C(ah), C(sctp) or the special keyword C(all), or it can be a numeric value,
+ representing one of these protocols or a different one.
+ - A protocol name from I(/etc/protocols) is also allowed.
+ - A C(!) argument before the protocol inverts the test.
+ - The number zero is equivalent to all.
+ - C(all) will match with all protocols and is taken as default when this option is omitted.
+ type: str
+ source:
+ description:
+ - Source specification.
+ - Address can be either a network name, a hostname, a network IP address
+ (with /mask), or a plain IP address.
+ - Hostnames will be resolved once only, before the rule is submitted to
+ the kernel. Please note that specifying any name to be resolved with
+ a remote query such as DNS is a really bad idea.
+ - The mask can be either a network mask or a plain number, specifying
+ the number of 1's at the left side of the network mask. Thus, a mask
+ of 24 is equivalent to 255.255.255.0. A C(!) argument before the
+ address specification inverts the sense of the address.
+ type: str
+ destination:
+ description:
+ - Destination specification.
+ - Address can be either a network name, a hostname, a network IP address
+ (with /mask), or a plain IP address.
+ - Hostnames will be resolved once only, before the rule is submitted to
+ the kernel. Please note that specifying any name to be resolved with
+ a remote query such as DNS is a really bad idea.
+ - The mask can be either a network mask or a plain number, specifying
+ the number of 1's at the left side of the network mask. Thus, a mask
+ of 24 is equivalent to 255.255.255.0. A C(!) argument before the
+ address specification inverts the sense of the address.
+ type: str
+ tcp_flags:
+ description:
+ - TCP flags specification.
+ - C(tcp_flags) expects a dict with the two keys C(flags) and C(flags_set).
+ type: dict
+ default: {}
+ version_added: "2.4"
+ suboptions:
+ flags:
+ description:
+ - List of flags you want to examine.
+ type: list
+ flags_set:
+ description:
+ - Flags to be set.
+ type: list
+ match:
+ description:
+ - Specifies a match to use, that is, an extension module that tests for
+ a specific property.
+ - The set of matches make up the condition under which a target is invoked.
+ - Matches are evaluated first to last if specified as an array and work in short-circuit
+ fashion, i.e. if one extension yields false, evaluation will stop.
+ type: list
+ default: []
+ jump:
+ description:
+ - This specifies the target of the rule; i.e., what to do if the packet matches it.
+ - The target can be a user-defined chain (other than the one
+ this rule is in), one of the special builtin targets which decide the
+ fate of the packet immediately, or an extension (see EXTENSIONS
+ below).
+ - If this option is omitted in a rule (and the goto parameter
+ is not used), then matching the rule will have no effect on the
+ packet's fate, but the counters on the rule will be incremented.
+ type: str
+ gateway:
+ description:
+ - This specifies the IP address of host to send the cloned packets.
+ - This option is only valid when C(jump) is set to C(TEE).
+ type: str
+ version_added: "2.8"
+ log_prefix:
+ description:
+ - Specifies a log text for the rule. Only make sense with a LOG jump.
+ type: str
+ version_added: "2.5"
+ log_level:
+ description:
+ - Logging level according to the syslogd-defined priorities.
+ - The value can be strings or numbers from 1-8.
+ - This parameter is only applicable if C(jump) is set to C(LOG).
+ type: str
+ version_added: "2.8"
+ choices: [ '0', '1', '2', '3', '4', '5', '6', '7', 'emerg', 'alert', 'crit', 'error', 'warning', 'notice', 'info', 'debug' ]
+ goto:
+ description:
+ - This specifies that the processing should continue in a user specified chain.
+ - Unlike the jump argument return will not continue processing in
+ this chain but instead in the chain that called us via jump.
+ type: str
+ in_interface:
+ description:
+ - Name of an interface via which a packet was received (only for packets
+ entering the C(INPUT), C(FORWARD) and C(PREROUTING) chains).
+ - When the C(!) argument is used before the interface name, the sense is inverted.
+ - If the interface name ends in a C(+), then any interface which begins with
+ this name will match.
+ - If this option is omitted, any interface name will match.
+ type: str
+ out_interface:
+ description:
+ - Name of an interface via which a packet is going to be sent (for
+ packets entering the C(FORWARD), C(OUTPUT) and C(POSTROUTING) chains).
+ - When the C(!) argument is used before the interface name, the sense is inverted.
+ - If the interface name ends in a C(+), then any interface which begins
+ with this name will match.
+ - If this option is omitted, any interface name will match.
+ type: str
+ fragment:
+ description:
+ - This means that the rule only refers to second and further fragments
+ of fragmented packets.
+ - Since there is no way to tell the source or destination ports of such
+ a packet (or ICMP type), such a packet will not match any rules which specify them.
+ - When the "!" argument precedes fragment argument, the rule will only match head fragments,
+ or unfragmented packets.
+ type: str
+ set_counters:
+ description:
+ - This enables the administrator to initialize the packet and byte
+ counters of a rule (during C(INSERT), C(APPEND), C(REPLACE) operations).
+ type: str
+ source_port:
+ description:
+ - Source port or port range specification.
+ - This can either be a service name or a port number.
+ - An inclusive range can also be specified, using the format C(first:last).
+ - If the first port is omitted, C(0) is assumed; if the last is omitted, C(65535) is assumed.
+ - If the first port is greater than the second one they will be swapped.
+ type: str
+ destination_port:
+ description:
+ - "Destination port or port range specification. This can either be
+ a service name or a port number. An inclusive range can also be
+ specified, using the format first:last. If the first port is omitted,
+ '0' is assumed; if the last is omitted, '65535' is assumed. If the
+ first port is greater than the second one they will be swapped.
+ This is only valid if the rule also specifies one of the following
+ protocols: tcp, udp, dccp or sctp."
+ type: str
+ to_ports:
+ description:
+ - This specifies a destination port or range of ports to use, without
+ this, the destination port is never altered.
+ - This is only valid if the rule also specifies one of the protocol
+ C(tcp), C(udp), C(dccp) or C(sctp).
+ type: str
+ to_destination:
+ description:
+ - This specifies a destination address to use with C(DNAT).
+ - Without this, the destination address is never altered.
+ type: str
+ version_added: "2.1"
+ to_source:
+ description:
+ - This specifies a source address to use with C(SNAT).
+ - Without this, the source address is never altered.
+ type: str
+ version_added: "2.2"
+ syn:
+ description:
+ - This allows matching packets that have the SYN bit set and the ACK
+ and RST bits unset.
+ - When negated, this matches all packets with the RST or the ACK bits set.
+ type: str
+ choices: [ ignore, match, negate ]
+ default: ignore
+ version_added: "2.5"
+ set_dscp_mark:
+ description:
+ - This allows specifying a DSCP mark to be added to packets.
+ It takes either an integer or hex value.
+ - Mutually exclusive with C(set_dscp_mark_class).
+ type: str
+ version_added: "2.1"
+ set_dscp_mark_class:
+ description:
+ - This allows specifying a predefined DiffServ class which will be
+ translated to the corresponding DSCP mark.
+ - Mutually exclusive with C(set_dscp_mark).
+ type: str
+ version_added: "2.1"
+ comment:
+ description:
+ - This specifies a comment that will be added to the rule.
+ type: str
+ ctstate:
+ description:
+ - C(ctstate) is a list of the connection states to match in the conntrack module.
+ - Possible states are C(INVALID), C(NEW), C(ESTABLISHED), C(RELATED), C(UNTRACKED), C(SNAT), C(DNAT)
+ type: list
+ default: []
+ src_range:
+ description:
+ - Specifies the source IP range to match in the iprange module.
+ type: str
+ version_added: "2.8"
+ dst_range:
+ description:
+ - Specifies the destination IP range to match in the iprange module.
+ type: str
+ version_added: "2.8"
+ limit:
+ description:
+ - Specifies the maximum average number of matches to allow per second.
+ - The number can specify units explicitly, using `/second', `/minute',
+ `/hour' or `/day', or parts of them (so `5/second' is the same as
+ `5/s').
+ type: str
+ limit_burst:
+ description:
+ - Specifies the maximum burst before the above limit kicks in.
+ type: str
+ version_added: "2.1"
+ uid_owner:
+ description:
+ - Specifies the UID or username to use in match by owner rule.
+ - From Ansible 2.6 when the C(!) argument is prepended then the it inverts
+ the rule to apply instead to all users except that one specified.
+ type: str
+ version_added: "2.1"
+ gid_owner:
+ description:
+ - Specifies the GID or group to use in match by owner rule.
+ type: str
+ version_added: "2.9"
+ reject_with:
+ description:
+ - 'Specifies the error packet type to return while rejecting. It implies
+ "jump: REJECT"'
+ type: str
+ version_added: "2.1"
+ icmp_type:
+ description:
+ - This allows specification of the ICMP type, which can be a numeric
+ ICMP type, type/code pair, or one of the ICMP type names shown by the
+ command 'iptables -p icmp -h'
+ type: str
+ version_added: "2.2"
+ flush:
+ description:
+ - Flushes the specified table and chain of all rules.
+ - If no chain is specified then the entire table is purged.
+ - Ignores all other parameters.
+ type: bool
+ default: false
+ version_added: "2.2"
+ policy:
+ description:
+ - Set the policy for the chain to the given target.
+ - Only built-in chains can have policies.
+ - This parameter requires the C(chain) parameter.
+ - Ignores all other parameters.
+ type: str
+ choices: [ ACCEPT, DROP, QUEUE, RETURN ]
+ version_added: "2.2"
+ wait:
+ description:
+ - Wait N seconds for the xtables lock to prevent multiple instances of
+ the program from running concurrently.
+ type: str
+ version_added: "2.10"
+'''
+
+EXAMPLES = r'''
+- name: Block specific IP
+ iptables:
+ chain: INPUT
+ source: 8.8.8.8
+ jump: DROP
+ become: yes
+
+- name: Forward port 80 to 8600
+ iptables:
+ table: nat
+ chain: PREROUTING
+ in_interface: eth0
+ protocol: tcp
+ match: tcp
+ destination_port: 80
+ jump: REDIRECT
+ to_ports: 8600
+ comment: Redirect web traffic to port 8600
+ become: yes
+
+- name: Allow related and established connections
+ iptables:
+ chain: INPUT
+ ctstate: ESTABLISHED,RELATED
+ jump: ACCEPT
+ become: yes
+
+- name: Allow new incoming SYN packets on TCP port 22 (SSH)
+ iptables:
+ chain: INPUT
+ protocol: tcp
+ destination_port: 22
+ ctstate: NEW
+ syn: match
+ jump: ACCEPT
+ comment: Accept new SSH connections.
+
+- name: Match on IP ranges
+ iptables:
+ chain: FORWARD
+ src_range: 192.168.1.100-192.168.1.199
+ dst_range: 10.0.0.1-10.0.0.50
+ jump: ACCEPT
+
+- name: Tag all outbound tcp packets with DSCP mark 8
+ iptables:
+ chain: OUTPUT
+ jump: DSCP
+ table: mangle
+ set_dscp_mark: 8
+ protocol: tcp
+
+- name: Tag all outbound tcp packets with DSCP DiffServ class CS1
+ iptables:
+ chain: OUTPUT
+ jump: DSCP
+ table: mangle
+ set_dscp_mark_class: CS1
+ protocol: tcp
+
+- name: Insert a rule on line 5
+ iptables:
+ chain: INPUT
+ protocol: tcp
+ destination_port: 8080
+ jump: ACCEPT
+ action: insert
+ rule_num: 5
+
+- name: Set the policy for the INPUT chain to DROP
+ iptables:
+ chain: INPUT
+ policy: DROP
+
+- name: Reject tcp with tcp-reset
+ iptables:
+ chain: INPUT
+ protocol: tcp
+ reject_with: tcp-reset
+ ip_version: ipv4
+
+- name: Set tcp flags
+ iptables:
+ chain: OUTPUT
+ jump: DROP
+ protocol: tcp
+ tcp_flags:
+ flags: ALL
+ flags_set:
+ - ACK
+ - RST
+ - SYN
+ - FIN
+
+- name: Iptables flush filter
+ iptables:
+ chain: "{{ item }}"
+ flush: yes
+ with_items: [ 'INPUT', 'FORWARD', 'OUTPUT' ]
+
+- name: Iptables flush nat
+ iptables:
+ table: nat
+ chain: '{{ item }}'
+ flush: yes
+ with_items: [ 'INPUT', 'OUTPUT', 'PREROUTING', 'POSTROUTING' ]
+
+- name: Log packets arriving into an user-defined chain
+ iptables:
+ chain: LOGGING
+ action: append
+ state: present
+ limit: 2/second
+ limit_burst: 20
+ log_prefix: "IPTABLES:INFO: "
+ log_level: info
+'''
+
+import re
+
+from distutils.version import LooseVersion
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+IPTABLES_WAIT_SUPPORT_ADDED = '1.4.20'
+
+IPTABLES_WAIT_WITH_SECONDS_SUPPORT_ADDED = '1.6.0'
+
+BINS = dict(
+ ipv4='iptables',
+ ipv6='ip6tables',
+)
+
+ICMP_TYPE_OPTIONS = dict(
+ ipv4='--icmp-type',
+ ipv6='--icmpv6-type',
+)
+
+
+def append_param(rule, param, flag, is_list):
+ if is_list:
+ for item in param:
+ append_param(rule, item, flag, False)
+ else:
+ if param is not None:
+ if param[0] == '!':
+ rule.extend(['!', flag, param[1:]])
+ else:
+ rule.extend([flag, param])
+
+
+def append_tcp_flags(rule, param, flag):
+ if param:
+ if 'flags' in param and 'flags_set' in param:
+ rule.extend([flag, ','.join(param['flags']), ','.join(param['flags_set'])])
+
+
+def append_match_flag(rule, param, flag, negatable):
+ if param == 'match':
+ rule.extend([flag])
+ elif negatable and param == 'negate':
+ rule.extend(['!', flag])
+
+
+def append_csv(rule, param, flag):
+ if param:
+ rule.extend([flag, ','.join(param)])
+
+
+def append_match(rule, param, match):
+ if param:
+ rule.extend(['-m', match])
+
+
+def append_jump(rule, param, jump):
+ if param:
+ rule.extend(['-j', jump])
+
+
+def append_wait(rule, param, flag):
+ if param:
+ rule.extend([flag, param])
+
+
+def construct_rule(params):
+ rule = []
+ append_wait(rule, params['wait'], '-w')
+ append_param(rule, params['protocol'], '-p', False)
+ append_param(rule, params['source'], '-s', False)
+ append_param(rule, params['destination'], '-d', False)
+ append_param(rule, params['match'], '-m', True)
+ append_tcp_flags(rule, params['tcp_flags'], '--tcp-flags')
+ append_param(rule, params['jump'], '-j', False)
+ if params.get('jump') and params['jump'].lower() == 'tee':
+ append_param(rule, params['gateway'], '--gateway', False)
+ append_param(rule, params['log_prefix'], '--log-prefix', False)
+ append_param(rule, params['log_level'], '--log-level', False)
+ append_param(rule, params['to_destination'], '--to-destination', False)
+ append_param(rule, params['to_source'], '--to-source', False)
+ append_param(rule, params['goto'], '-g', False)
+ append_param(rule, params['in_interface'], '-i', False)
+ append_param(rule, params['out_interface'], '-o', False)
+ append_param(rule, params['fragment'], '-f', False)
+ append_param(rule, params['set_counters'], '-c', False)
+ append_param(rule, params['source_port'], '--source-port', False)
+ append_param(rule, params['destination_port'], '--destination-port', False)
+ append_param(rule, params['to_ports'], '--to-ports', False)
+ append_param(rule, params['set_dscp_mark'], '--set-dscp', False)
+ append_param(
+ rule,
+ params['set_dscp_mark_class'],
+ '--set-dscp-class',
+ False)
+ append_match_flag(rule, params['syn'], '--syn', True)
+ if 'conntrack' in params['match']:
+ append_csv(rule, params['ctstate'], '--ctstate')
+ elif 'state' in params['match']:
+ append_csv(rule, params['ctstate'], '--state')
+ elif params['ctstate']:
+ append_match(rule, params['ctstate'], 'conntrack')
+ append_csv(rule, params['ctstate'], '--ctstate')
+ if 'iprange' in params['match']:
+ append_param(rule, params['src_range'], '--src-range', False)
+ append_param(rule, params['dst_range'], '--dst-range', False)
+ elif params['src_range'] or params['dst_range']:
+ append_match(rule, params['src_range'] or params['dst_range'], 'iprange')
+ append_param(rule, params['src_range'], '--src-range', False)
+ append_param(rule, params['dst_range'], '--dst-range', False)
+ append_match(rule, params['limit'] or params['limit_burst'], 'limit')
+ append_param(rule, params['limit'], '--limit', False)
+ append_param(rule, params['limit_burst'], '--limit-burst', False)
+ append_match(rule, params['uid_owner'], 'owner')
+ append_match_flag(rule, params['uid_owner'], '--uid-owner', True)
+ append_param(rule, params['uid_owner'], '--uid-owner', False)
+ append_match(rule, params['gid_owner'], 'owner')
+ append_match_flag(rule, params['gid_owner'], '--gid-owner', True)
+ append_param(rule, params['gid_owner'], '--gid-owner', False)
+ if params['jump'] is None:
+ append_jump(rule, params['reject_with'], 'REJECT')
+ append_param(rule, params['reject_with'], '--reject-with', False)
+ append_param(
+ rule,
+ params['icmp_type'],
+ ICMP_TYPE_OPTIONS[params['ip_version']],
+ False)
+ append_match(rule, params['comment'], 'comment')
+ append_param(rule, params['comment'], '--comment', False)
+ return rule
+
+
+def push_arguments(iptables_path, action, params, make_rule=True):
+ cmd = [iptables_path]
+ cmd.extend(['-t', params['table']])
+ cmd.extend([action, params['chain']])
+ if action == '-I' and params['rule_num']:
+ cmd.extend([params['rule_num']])
+ if make_rule:
+ cmd.extend(construct_rule(params))
+ return cmd
+
+
+def check_present(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-C', params)
+ rc, _, __ = module.run_command(cmd, check_rc=False)
+ return (rc == 0)
+
+
+def append_rule(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-A', params)
+ module.run_command(cmd, check_rc=True)
+
+
+def insert_rule(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-I', params)
+ module.run_command(cmd, check_rc=True)
+
+
+def remove_rule(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-D', params)
+ module.run_command(cmd, check_rc=True)
+
+
+def flush_table(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-F', params, make_rule=False)
+ module.run_command(cmd, check_rc=True)
+
+
+def set_chain_policy(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-P', params, make_rule=False)
+ cmd.append(params['policy'])
+ module.run_command(cmd, check_rc=True)
+
+
+def get_chain_policy(iptables_path, module, params):
+ cmd = push_arguments(iptables_path, '-L', params)
+ rc, out, _ = module.run_command(cmd, check_rc=True)
+ chain_header = out.split("\n")[0]
+ result = re.search(r'\(policy ([A-Z]+)\)', chain_header)
+ if result:
+ return result.group(1)
+ return None
+
+
+def get_iptables_version(iptables_path, module):
+ cmd = [iptables_path, '--version']
+ rc, out, _ = module.run_command(cmd, check_rc=True)
+ return out.split('v')[1].rstrip('\n')
+
+
+def main():
+ module = AnsibleModule(
+ supports_check_mode=True,
+ argument_spec=dict(
+ table=dict(type='str', default='filter', choices=['filter', 'nat', 'mangle', 'raw', 'security']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ action=dict(type='str', default='append', choices=['append', 'insert']),
+ ip_version=dict(type='str', default='ipv4', choices=['ipv4', 'ipv6']),
+ chain=dict(type='str'),
+ rule_num=dict(type='str'),
+ protocol=dict(type='str'),
+ wait=dict(type='str'),
+ source=dict(type='str'),
+ to_source=dict(type='str'),
+ destination=dict(type='str'),
+ to_destination=dict(type='str'),
+ match=dict(type='list', default=[]),
+ tcp_flags=dict(type='dict',
+ options=dict(
+ flags=dict(type='list'),
+ flags_set=dict(type='list'))
+ ),
+ jump=dict(type='str'),
+ gateway=dict(type='str'),
+ log_prefix=dict(type='str'),
+ log_level=dict(type='str',
+ choices=['0', '1', '2', '3', '4', '5', '6', '7',
+ 'emerg', 'alert', 'crit', 'error',
+ 'warning', 'notice', 'info', 'debug'],
+ default=None,
+ ),
+ goto=dict(type='str'),
+ in_interface=dict(type='str'),
+ out_interface=dict(type='str'),
+ fragment=dict(type='str'),
+ set_counters=dict(type='str'),
+ source_port=dict(type='str'),
+ destination_port=dict(type='str'),
+ to_ports=dict(type='str'),
+ set_dscp_mark=dict(type='str'),
+ set_dscp_mark_class=dict(type='str'),
+ comment=dict(type='str'),
+ ctstate=dict(type='list', default=[]),
+ src_range=dict(type='str'),
+ dst_range=dict(type='str'),
+ limit=dict(type='str'),
+ limit_burst=dict(type='str'),
+ uid_owner=dict(type='str'),
+ gid_owner=dict(type='str'),
+ reject_with=dict(type='str'),
+ icmp_type=dict(type='str'),
+ syn=dict(type='str', default='ignore', choices=['ignore', 'match', 'negate']),
+ flush=dict(type='bool', default=False),
+ policy=dict(type='str', choices=['ACCEPT', 'DROP', 'QUEUE', 'RETURN']),
+ ),
+ mutually_exclusive=(
+ ['set_dscp_mark', 'set_dscp_mark_class'],
+ ['flush', 'policy'],
+ ),
+ required_if=[
+ ['jump', 'TEE', ['gateway']],
+ ['jump', 'tee', ['gateway']],
+ ]
+ )
+ args = dict(
+ changed=False,
+ failed=False,
+ ip_version=module.params['ip_version'],
+ table=module.params['table'],
+ chain=module.params['chain'],
+ flush=module.params['flush'],
+ rule=' '.join(construct_rule(module.params)),
+ state=module.params['state'],
+ )
+
+ ip_version = module.params['ip_version']
+ iptables_path = module.get_bin_path(BINS[ip_version], True)
+
+ # Check if chain option is required
+ if args['flush'] is False and args['chain'] is None:
+ module.fail_json(msg="Either chain or flush parameter must be specified.")
+
+ if module.params.get('log_prefix', None) or module.params.get('log_level', None):
+ if module.params['jump'] is None:
+ module.params['jump'] = 'LOG'
+ elif module.params['jump'] != 'LOG':
+ module.fail_json(msg="Logging options can only be used with the LOG jump target.")
+
+ # Check if wait option is supported
+ iptables_version = LooseVersion(get_iptables_version(iptables_path, module))
+
+ if iptables_version >= LooseVersion(IPTABLES_WAIT_SUPPORT_ADDED):
+ if iptables_version < LooseVersion(IPTABLES_WAIT_WITH_SECONDS_SUPPORT_ADDED):
+ module.params['wait'] = ''
+ else:
+ module.params['wait'] = None
+
+ # Flush the table
+ if args['flush'] is True:
+ args['changed'] = True
+ if not module.check_mode:
+ flush_table(iptables_path, module, module.params)
+
+ # Set the policy
+ elif module.params['policy']:
+ current_policy = get_chain_policy(iptables_path, module, module.params)
+ if not current_policy:
+ module.fail_json(msg='Can\'t detect current policy')
+
+ changed = current_policy != module.params['policy']
+ args['changed'] = changed
+ if changed and not module.check_mode:
+ set_chain_policy(iptables_path, module, module.params)
+
+ else:
+ insert = (module.params['action'] == 'insert')
+ rule_is_present = check_present(iptables_path, module, module.params)
+ should_be_present = (args['state'] == 'present')
+
+ # Check if target is up to date
+ args['changed'] = (rule_is_present != should_be_present)
+ if args['changed'] is False:
+ # Target is already up to date
+ module.exit_json(**args)
+
+ # Check only; don't modify
+ if not module.check_mode:
+ if should_be_present:
+ if insert:
+ insert_rule(iptables_path, module, module.params)
+ else:
+ append_rule(iptables_path, module, module.params)
+ else:
+ remove_rule(iptables_path, module, module.params)
+
+ module.exit_json(**args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/known_hosts.py b/lib/ansible/modules/known_hosts.py
new file mode 100644
index 00000000..221e24cc
--- /dev/null
+++ b/lib/ansible/modules/known_hosts.py
@@ -0,0 +1,351 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2014, Matthew Vernon <mcv21@cam.ac.uk>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: known_hosts
+short_description: Add or remove a host from the C(known_hosts) file
+description:
+ - The C(known_hosts) module lets you add or remove a host keys from the C(known_hosts) file.
+ - Starting at Ansible 2.2, multiple entries per host are allowed, but only one for each key type supported by ssh.
+ This is useful if you're going to want to use the M(ansible.builtin.git) module over ssh, for example.
+ - If you have a very large number of host keys to manage, you will find the M(ansible.builtin.template) module more useful.
+version_added: "1.9"
+options:
+ name:
+ aliases: [ 'host' ]
+ description:
+ - The host to add or remove (must match a host specified in key). It will be converted to lowercase so that ssh-keygen can find it.
+ - Must match with <hostname> or <ip> present in key attribute.
+ - For custom SSH port, C(name) needs to specify port as well. See example section.
+ required: true
+ type: str
+ key:
+ description:
+ - The SSH public host key, as a string.
+ - Required if C(state=present), optional when C(state=absent), in which case all keys for the host are removed.
+ - The key must be in the right format for SSH (see sshd(8), section "SSH_KNOWN_HOSTS FILE FORMAT").
+ - Specifically, the key should not match the format that is found in an SSH pubkey file, but should rather have the hostname prepended to a
+ line that includes the pubkey, the same way that it would appear in the known_hosts file. The value prepended to the line must also match
+ the value of the name parameter.
+ - Should be of format `<hostname[,IP]> ssh-rsa <pubkey>`.
+ - For custom SSH port, C(key) needs to specify port as well. See example section.
+ type: str
+ path:
+ description:
+ - The known_hosts file to edit.
+ default: "~/.ssh/known_hosts"
+ type: path
+ hash_host:
+ description:
+ - Hash the hostname in the known_hosts file.
+ type: bool
+ default: "no"
+ version_added: "2.3"
+ state:
+ description:
+ - I(present) to add the host key.
+ - I(absent) to remove it.
+ choices: [ "absent", "present" ]
+ default: "present"
+ type: str
+author:
+- Matthew Vernon (@mcv21)
+'''
+
+EXAMPLES = r'''
+- name: Tell the host about our servers it might want to ssh to
+ known_hosts:
+ path: /etc/ssh/ssh_known_hosts
+ name: foo.com.invalid
+ key: "{{ lookup('file', 'pubkeys/foo.com.invalid') }}"
+
+- name: Another way to call known_hosts
+ known_hosts:
+ name: host1.example.com # or 10.9.8.77
+ key: host1.example.com,10.9.8.77 ssh-rsa ASDeararAIUHI324324 # some key gibberish
+ path: /etc/ssh/ssh_known_hosts
+ state: present
+
+- name: Add host with custom SSH port
+ known_hosts:
+ name: '[host1.example.com]:2222'
+ key: '[host1.example.com]:2222 ssh-rsa ASDeararAIUHI324324' # some key gibberish
+ path: /etc/ssh/ssh_known_hosts
+ state: present
+'''
+
+# Makes sure public host keys are present or absent in the given known_hosts
+# file.
+#
+# Arguments
+# =========
+# name = hostname whose key should be added (alias: host)
+# key = line(s) to add to known_hosts file
+# path = the known_hosts file to edit (default: ~/.ssh/known_hosts)
+# hash_host = yes|no (default: no) hash the hostname in the known_hosts file
+# state = absent|present (default: present)
+
+import base64
+import errno
+import hashlib
+import hmac
+import os
+import os.path
+import re
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.file import FileLock
+from ansible.module_utils._text import to_bytes, to_native
+
+
+def enforce_state(module, params):
+ """
+ Add or remove key.
+ """
+
+ host = params["name"].lower()
+ key = params.get("key", None)
+ path = params.get("path")
+ hash_host = params.get("hash_host")
+ state = params.get("state")
+ # Find the ssh-keygen binary
+ sshkeygen = module.get_bin_path("ssh-keygen", True)
+
+ if not key and state != "absent":
+ module.fail_json(msg="No key specified when adding a host")
+
+ if key and hash_host:
+ key = hash_host_key(host, key)
+
+ # Trailing newline in files gets lost, so re-add if necessary
+ if key and not key.endswith('\n'):
+ key += '\n'
+
+ sanity_check(module, host, key, sshkeygen)
+
+ found, replace_or_add, found_line = search_for_host_key(module, host, key, path, sshkeygen)
+
+ params['diff'] = compute_diff(path, found_line, replace_or_add, state, key)
+
+ # We will change state if found==True & state!="present"
+ # or found==False & state=="present"
+ # i.e found XOR (state=="present")
+ # Alternatively, if replace is true (i.e. key present, and we must change
+ # it)
+ if module.check_mode:
+ module.exit_json(changed=replace_or_add or (state == "present") != found,
+ diff=params['diff'])
+
+ # Now do the work.
+
+ # Only remove whole host if found and no key provided
+ if found and not key and state == "absent":
+ module.run_command([sshkeygen, '-R', host, '-f', path], check_rc=True)
+ params['changed'] = True
+
+ # Next, add a new (or replacing) entry
+ if replace_or_add or found != (state == "present"):
+ try:
+ inf = open(path, "r")
+ except IOError as e:
+ if e.errno == errno.ENOENT:
+ inf = None
+ else:
+ module.fail_json(msg="Failed to read %s: %s" % (path, str(e)))
+ try:
+ with tempfile.NamedTemporaryFile(mode='w+', dir=os.path.dirname(path), delete=False) as outf:
+ if inf is not None:
+ for line_number, line in enumerate(inf):
+ if found_line == (line_number + 1) and (replace_or_add or state == 'absent'):
+ continue # skip this line to replace its key
+ outf.write(line)
+ inf.close()
+ if state == 'present':
+ outf.write(key)
+ except (IOError, OSError) as e:
+ module.fail_json(msg="Failed to write to file %s: %s" % (path, to_native(e)))
+ else:
+ module.atomic_move(outf.name, path)
+
+ params['changed'] = True
+
+ return params
+
+
+def sanity_check(module, host, key, sshkeygen):
+ '''Check supplied key is sensible
+
+ host and key are parameters provided by the user; If the host
+ provided is inconsistent with the key supplied, then this function
+ quits, providing an error to the user.
+ sshkeygen is the path to ssh-keygen, found earlier with get_bin_path
+ '''
+ # If no key supplied, we're doing a removal, and have nothing to check here.
+ if not key:
+ return
+ # Rather than parsing the key ourselves, get ssh-keygen to do it
+ # (this is essential for hashed keys, but otherwise useful, as the
+ # key question is whether ssh-keygen thinks the key matches the host).
+
+ # The approach is to write the key to a temporary file,
+ # and then attempt to look up the specified host in that file.
+
+ if re.search(r'\S+(\s+)?,(\s+)?', host):
+ module.fail_json(msg="Comma separated list of names is not supported. "
+ "Please pass a single name to lookup in the known_hosts file.")
+
+ with tempfile.NamedTemporaryFile(mode='w+') as outf:
+ try:
+ outf.write(key)
+ outf.flush()
+ except IOError as e:
+ module.fail_json(msg="Failed to write to temporary file %s: %s" %
+ (outf.name, to_native(e)))
+
+ sshkeygen_command = [sshkeygen, '-F', host, '-f', outf.name]
+ rc, stdout, stderr = module.run_command(sshkeygen_command)
+
+ if stdout == '': # host not found
+ module.fail_json(msg="Host parameter does not match hashed host field in supplied key")
+
+
+def search_for_host_key(module, host, key, path, sshkeygen):
+ '''search_for_host_key(module,host,key,path,sshkeygen) -> (found,replace_or_add,found_line)
+
+ Looks up host and keytype in the known_hosts file path; if it's there, looks to see
+ if one of those entries matches key. Returns:
+ found (Boolean): is host found in path?
+ replace_or_add (Boolean): is the key in path different to that supplied by user?
+ found_line (int or None): the line where a key of the same type was found
+ if found=False, then replace is always False.
+ sshkeygen is the path to ssh-keygen, found earlier with get_bin_path
+ '''
+ if os.path.exists(path) is False:
+ return False, False, None
+
+ sshkeygen_command = [sshkeygen, '-F', host, '-f', path]
+
+ # openssh >=6.4 has changed ssh-keygen behaviour such that it returns
+ # 1 if no host is found, whereas previously it returned 0
+ rc, stdout, stderr = module.run_command(sshkeygen_command, check_rc=False)
+ if stdout == '' and stderr == '' and (rc == 0 or rc == 1):
+ return False, False, None # host not found, no other errors
+ if rc != 0: # something went wrong
+ module.fail_json(msg="ssh-keygen failed (rc=%d, stdout='%s',stderr='%s')" % (rc, stdout, stderr))
+
+ # If user supplied no key, we don't want to try and replace anything with it
+ if not key:
+ return True, False, None
+
+ lines = stdout.split('\n')
+ new_key = normalize_known_hosts_key(key)
+
+ for lnum, l in enumerate(lines):
+ if l == '':
+ continue
+ elif l[0] == '#': # info output from ssh-keygen; contains the line number where key was found
+ try:
+ # This output format has been hardcoded in ssh-keygen since at least OpenSSH 4.0
+ # It always outputs the non-localized comment before the found key
+ found_line = int(re.search(r'found: line (\d+)', l).group(1))
+ except IndexError:
+ module.fail_json(msg="failed to parse output of ssh-keygen for line number: '%s'" % l)
+ else:
+ found_key = normalize_known_hosts_key(l)
+ if new_key['host'][:3] == '|1|' and found_key['host'][:3] == '|1|': # do not change host hash if already hashed
+ new_key['host'] = found_key['host']
+ if new_key == found_key: # found a match
+ return True, False, found_line # found exactly the same key, don't replace
+ elif new_key['type'] == found_key['type']: # found a different key for the same key type
+ return True, True, found_line
+
+ # No match found, return found and replace, but no line
+ return True, True, None
+
+
+def hash_host_key(host, key):
+ hmac_key = os.urandom(20)
+ hashed_host = hmac.new(hmac_key, to_bytes(host), hashlib.sha1).digest()
+ parts = key.strip().split()
+ # @ indicates the optional marker field used for @cert-authority or @revoked
+ i = 1 if parts[0][0] == '@' else 0
+ parts[i] = '|1|%s|%s' % (to_native(base64.b64encode(hmac_key)), to_native(base64.b64encode(hashed_host)))
+ return ' '.join(parts)
+
+
+def normalize_known_hosts_key(key):
+ '''
+ Transform a key, either taken from a known_host file or provided by the
+ user, into a normalized form.
+ The host part (which might include multiple hostnames or be hashed) gets
+ replaced by the provided host. Also, any spurious information gets removed
+ from the end (like the username@host tag usually present in hostkeys, but
+ absent in known_hosts files)
+ '''
+ key = key.strip() # trim trailing newline
+ k = key.split()
+ d = dict()
+ # The optional "marker" field, used for @cert-authority or @revoked
+ if k[0][0] == '@':
+ d['options'] = k[0]
+ d['host'] = k[1]
+ d['type'] = k[2]
+ d['key'] = k[3]
+ else:
+ d['host'] = k[0]
+ d['type'] = k[1]
+ d['key'] = k[2]
+ return d
+
+
+def compute_diff(path, found_line, replace_or_add, state, key):
+ diff = {
+ 'before_header': path,
+ 'after_header': path,
+ 'before': '',
+ 'after': '',
+ }
+ try:
+ inf = open(path, "r")
+ except IOError as e:
+ if e.errno == errno.ENOENT:
+ diff['before_header'] = '/dev/null'
+ else:
+ diff['before'] = inf.read()
+ inf.close()
+ lines = diff['before'].splitlines(1)
+ if (replace_or_add or state == 'absent') and found_line is not None and 1 <= found_line <= len(lines):
+ del lines[found_line - 1]
+ if state == 'present' and (replace_or_add or found_line is None):
+ lines.append(key)
+ diff['after'] = ''.join(lines)
+ return diff
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, type='str', aliases=['host']),
+ key=dict(required=False, type='str'),
+ path=dict(default="~/.ssh/known_hosts", type='path'),
+ hash_host=dict(required=False, type='bool', default=False),
+ state=dict(default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True
+ )
+
+ results = enforce_state(module, module.params)
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/lineinfile.py b/lib/ansible/modules/lineinfile.py
new file mode 100644
index 00000000..4e856e50
--- /dev/null
+++ b/lib/ansible/modules/lineinfile.py
@@ -0,0 +1,577 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
+# Copyright: (c) 2014, Ahti Kitsik <ak@ahtik.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: lineinfile
+short_description: Manage lines in text files
+description:
+ - This module ensures a particular line is in a file, or replace an
+ existing line using a back-referenced regular expression.
+ - This is primarily useful when you want to change a single line in a file only.
+ - See the M(ansible.builtin.replace) module if you want to change multiple, similar lines
+ or check M(ansible.builtin.blockinfile) if you want to insert/update/remove a block of lines in a file.
+ For other cases, see the M(ansible.builtin.copy) or M(ansible.builtin.template) modules.
+version_added: "0.7"
+options:
+ path:
+ description:
+ - The file to modify.
+ - Before Ansible 2.3 this option was only usable as I(dest), I(destfile) and I(name).
+ type: path
+ required: true
+ aliases: [ dest, destfile, name ]
+ regexp:
+ description:
+ - The regular expression to look for in every line of the file.
+ - For C(state=present), the pattern to replace if found. Only the last line found will be replaced.
+ - For C(state=absent), the pattern of the line(s) to remove.
+ - If the regular expression is not matched, the line will be
+ added to the file in keeping with C(insertbefore) or C(insertafter)
+ settings.
+ - When modifying a line the regexp should typically match both the initial state of
+ the line as well as its state after replacement by C(line) to ensure idempotence.
+ - Uses Python regular expressions. See U(https://docs.python.org/3/library/re.html).
+ type: str
+ aliases: [ regex ]
+ version_added: '1.7'
+ state:
+ description:
+ - Whether the line should be there or not.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ line:
+ description:
+ - The line to insert/replace into the file.
+ - Required for C(state=present).
+ - If C(backrefs) is set, may contain backreferences that will get
+ expanded with the C(regexp) capture groups if the regexp matches.
+ type: str
+ aliases: [ value ]
+ backrefs:
+ description:
+ - Used with C(state=present).
+ - If set, C(line) can contain backreferences (both positional and named)
+ that will get populated if the C(regexp) matches.
+ - This parameter changes the operation of the module slightly;
+ C(insertbefore) and C(insertafter) will be ignored, and if the C(regexp)
+ does not match anywhere in the file, the file will be left unchanged.
+ - If the C(regexp) does match, the last matching line will be replaced by
+ the expanded line parameter.
+ type: bool
+ default: no
+ version_added: "1.1"
+ insertafter:
+ description:
+ - Used with C(state=present).
+ - If specified, the line will be inserted after the last match of specified regular expression.
+ - If the first match is required, use(firstmatch=yes).
+ - A special value is available; C(EOF) for inserting the line at the end of the file.
+ - If specified regular expression has no matches, EOF will be used instead.
+ - If C(insertbefore) is set, default value C(EOF) will be ignored.
+ - If regular expressions are passed to both C(regexp) and C(insertafter), C(insertafter) is only honored if no match for C(regexp) is found.
+ - May not be used with C(backrefs) or C(insertbefore).
+ type: str
+ choices: [ EOF, '*regex*' ]
+ default: EOF
+ insertbefore:
+ description:
+ - Used with C(state=present).
+ - If specified, the line will be inserted before the last match of specified regular expression.
+ - If the first match is required, use C(firstmatch=yes).
+ - A value is available; C(BOF) for inserting the line at the beginning of the file.
+ - If specified regular expression has no matches, the line will be inserted at the end of the file.
+ - If regular expressions are passed to both C(regexp) and C(insertbefore), C(insertbefore) is only honored if no match for C(regexp) is found.
+ - May not be used with C(backrefs) or C(insertafter).
+ type: str
+ choices: [ BOF, '*regex*' ]
+ version_added: "1.1"
+ create:
+ description:
+ - Used with C(state=present).
+ - If specified, the file will be created if it does not already exist.
+ - By default it will fail if the file is missing.
+ type: bool
+ default: no
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can
+ get the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+ firstmatch:
+ description:
+ - Used with C(insertafter) or C(insertbefore).
+ - If set, C(insertafter) and C(insertbefore) will work with the first line that matches the given regular expression.
+ type: bool
+ default: no
+ version_added: "2.5"
+ others:
+ description:
+ - All arguments accepted by the M(ansible.builtin.file) module also work here.
+ type: str
+extends_documentation_fragment:
+ - files
+ - validate
+notes:
+ - As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well.
+seealso:
+- module: ansible.builtin.blockinfile
+- module: ansible.builtin.copy
+- module: ansible.builtin.file
+- module: ansible.builtin.replace
+- module: ansible.builtin.template
+- module: community.windows.win_lineinfile
+author:
+ - Daniel Hokka Zakrissoni (@dhozac)
+ - Ahti Kitsik (@ahtik)
+'''
+
+EXAMPLES = r'''
+# NOTE: Before 2.3, option 'dest', 'destfile' or 'name' was used instead of 'path'
+- name: Ensure SELinux is set to enforcing mode
+ lineinfile:
+ path: /etc/selinux/config
+ regexp: '^SELINUX='
+ line: SELINUX=enforcing
+
+- name: Make sure group wheel is not in the sudoers configuration
+ lineinfile:
+ path: /etc/sudoers
+ state: absent
+ regexp: '^%wheel'
+
+- name: Replace a localhost entry with our own
+ lineinfile:
+ path: /etc/hosts
+ regexp: '^127\.0\.0\.1'
+ line: 127.0.0.1 localhost
+ owner: root
+ group: root
+ mode: '0644'
+
+- name: Ensure the default Apache port is 8080
+ lineinfile:
+ path: /etc/httpd/conf/httpd.conf
+ regexp: '^Listen '
+ insertafter: '^#Listen '
+ line: Listen 8080
+
+- name: Ensure we have our own comment added to /etc/services
+ lineinfile:
+ path: /etc/services
+ regexp: '^# port for http'
+ insertbefore: '^www.*80/tcp'
+ line: '# port for http by default'
+
+- name: Add a line to a file if the file does not exist, without passing regexp
+ lineinfile:
+ path: /tmp/testfile
+ line: 192.168.1.99 foo.lab.net foo
+ create: yes
+
+# NOTE: Yaml requires escaping backslashes in double quotes but not in single quotes
+- name: Ensure the JBoss memory settings are exactly as needed
+ lineinfile:
+ path: /opt/jboss-as/bin/standalone.conf
+ regexp: '^(.*)Xms(\d+)m(.*)$'
+ line: '\1Xms${xms}m\3'
+ backrefs: yes
+
+# NOTE: Fully quoted because of the ': ' on the line. See the Gotchas in the YAML docs.
+- name: Validate the sudoers file before saving
+ lineinfile:
+ path: /etc/sudoers
+ state: present
+ regexp: '^%ADMIN ALL='
+ line: '%ADMIN ALL=(ALL) NOPASSWD: ALL'
+ validate: /usr/sbin/visudo -cf %s
+
+# See https://docs.python.org/3/library/re.html for further details on syntax
+- name: Use backrefs with alternative group syntax to avoid conflicts with variable values
+ lineinfile:
+ path: /tmp/config
+ regexp: ^(host=).*
+ line: \g<1>{{ hostname }}
+ backrefs: yes
+'''
+
+import os
+import re
+import tempfile
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native, to_text
+
+
+def write_changes(module, b_lines, dest):
+
+ tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir)
+ with os.fdopen(tmpfd, 'wb') as f:
+ f.writelines(b_lines)
+
+ validate = module.params.get('validate', None)
+ valid = not validate
+ if validate:
+ if "%s" not in validate:
+ module.fail_json(msg="validate must contain %%s: %s" % (validate))
+ (rc, out, err) = module.run_command(to_bytes(validate % tmpfile, errors='surrogate_or_strict'))
+ valid = rc == 0
+ if rc != 0:
+ module.fail_json(msg='failed to validate: '
+ 'rc:%s error:%s' % (rc, err))
+ if valid:
+ module.atomic_move(tmpfile,
+ to_native(os.path.realpath(to_bytes(dest, errors='surrogate_or_strict')), errors='surrogate_or_strict'),
+ unsafe_writes=module.params['unsafe_writes'])
+
+
+def check_file_attrs(module, changed, message, diff):
+
+ file_args = module.load_file_common_arguments(module.params)
+ if module.set_fs_attributes_if_different(file_args, False, diff=diff):
+
+ if changed:
+ message += " and "
+ changed = True
+ message += "ownership, perms or SE linux context changed"
+
+ return message, changed
+
+
+def present(module, dest, regexp, line, insertafter, insertbefore, create,
+ backup, backrefs, firstmatch):
+
+ diff = {'before': '',
+ 'after': '',
+ 'before_header': '%s (content)' % dest,
+ 'after_header': '%s (content)' % dest}
+
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+ if not os.path.exists(b_dest):
+ if not create:
+ module.fail_json(rc=257, msg='Destination %s does not exist !' % dest)
+ b_destpath = os.path.dirname(b_dest)
+ if b_destpath and not os.path.exists(b_destpath) and not module.check_mode:
+ try:
+ os.makedirs(b_destpath)
+ except Exception as e:
+ module.fail_json(msg='Error creating %s (%s)' % (to_text(b_destpath), to_text(e)))
+
+ b_lines = []
+ else:
+ with open(b_dest, 'rb') as f:
+ b_lines = f.readlines()
+
+ if module._diff:
+ diff['before'] = to_native(b''.join(b_lines))
+
+ if regexp is not None:
+ bre_m = re.compile(to_bytes(regexp, errors='surrogate_or_strict'))
+
+ if insertafter not in (None, 'BOF', 'EOF'):
+ bre_ins = re.compile(to_bytes(insertafter, errors='surrogate_or_strict'))
+ elif insertbefore not in (None, 'BOF'):
+ bre_ins = re.compile(to_bytes(insertbefore, errors='surrogate_or_strict'))
+ else:
+ bre_ins = None
+
+ # index[0] is the line num where regexp has been found
+ # index[1] is the line num where insertafter/insertbefore has been found
+ index = [-1, -1]
+ match = None
+ exact_line_match = False
+ b_line = to_bytes(line, errors='surrogate_or_strict')
+
+ # The module's doc says
+ # "If regular expressions are passed to both regexp and
+ # insertafter, insertafter is only honored if no match for regexp is found."
+ # Therefore:
+ # 1. regexp was found -> ignore insertafter, replace the founded line
+ # 2. regexp was not found -> insert the line after 'insertafter' or 'insertbefore' line
+
+ # Given the above:
+ # 1. First check that there is no match for regexp:
+ if regexp is not None:
+ for lineno, b_cur_line in enumerate(b_lines):
+ match_found = bre_m.search(b_cur_line)
+ if match_found:
+ index[0] = lineno
+ match = match_found
+ if firstmatch:
+ break
+
+ # 2. When no match found on the previous step,
+ # parse for searching insertafter/insertbefore:
+ if not match:
+ for lineno, b_cur_line in enumerate(b_lines):
+ if b_line == b_cur_line.rstrip(b'\r\n'):
+ index[0] = lineno
+ exact_line_match = True
+
+ elif bre_ins is not None and bre_ins.search(b_cur_line):
+ if insertafter:
+ # + 1 for the next line
+ index[1] = lineno + 1
+ if firstmatch:
+ break
+
+ if insertbefore:
+ # index[1] for the previous line
+ index[1] = lineno
+ if firstmatch:
+ break
+
+ msg = ''
+ changed = False
+ b_linesep = to_bytes(os.linesep, errors='surrogate_or_strict')
+ # Exact line or Regexp matched a line in the file
+ if index[0] != -1:
+ if backrefs and match:
+ b_new_line = match.expand(b_line)
+ else:
+ # Don't do backref expansion if not asked.
+ b_new_line = b_line
+
+ if not b_new_line.endswith(b_linesep):
+ b_new_line += b_linesep
+
+ # If no regexp was given and no line match is found anywhere in the file,
+ # insert the line appropriately if using insertbefore or insertafter
+ if regexp is None and match is None and not exact_line_match:
+
+ # Insert lines
+ if insertafter and insertafter != 'EOF':
+ # Ensure there is a line separator after the found string
+ # at the end of the file.
+ if b_lines and not b_lines[-1][-1:] in (b'\n', b'\r'):
+ b_lines[-1] = b_lines[-1] + b_linesep
+
+ # If the line to insert after is at the end of the file
+ # use the appropriate index value.
+ if len(b_lines) == index[1]:
+ if b_lines[index[1] - 1].rstrip(b'\r\n') != b_line:
+ b_lines.append(b_line + b_linesep)
+ msg = 'line added'
+ changed = True
+ elif b_lines[index[1]].rstrip(b'\r\n') != b_line:
+ b_lines.insert(index[1], b_line + b_linesep)
+ msg = 'line added'
+ changed = True
+
+ elif insertbefore and insertbefore != 'BOF':
+ # If the line to insert before is at the beginning of the file
+ # use the appropriate index value.
+ if index[1] <= 0:
+ if b_lines[index[1]].rstrip(b'\r\n') != b_line:
+ b_lines.insert(index[1], b_line + b_linesep)
+ msg = 'line added'
+ changed = True
+
+ elif b_lines[index[1] - 1].rstrip(b'\r\n') != b_line:
+ b_lines.insert(index[1], b_line + b_linesep)
+ msg = 'line added'
+ changed = True
+
+ elif b_lines[index[0]] != b_new_line:
+ b_lines[index[0]] = b_new_line
+ msg = 'line replaced'
+ changed = True
+
+ elif backrefs:
+ # Do absolutely nothing, since it's not safe generating the line
+ # without the regexp matching to populate the backrefs.
+ pass
+ # Add it to the beginning of the file
+ elif insertbefore == 'BOF' or insertafter == 'BOF':
+ b_lines.insert(0, b_line + b_linesep)
+ msg = 'line added'
+ changed = True
+ # Add it to the end of the file if requested or
+ # if insertafter/insertbefore didn't match anything
+ # (so default behaviour is to add at the end)
+ elif insertafter == 'EOF' or index[1] == -1:
+
+ # If the file is not empty then ensure there's a newline before the added line
+ if b_lines and not b_lines[-1][-1:] in (b'\n', b'\r'):
+ b_lines.append(b_linesep)
+
+ b_lines.append(b_line + b_linesep)
+ msg = 'line added'
+ changed = True
+
+ elif insertafter and index[1] != -1:
+
+ # Don't insert the line if it already matches at the index.
+ # If the line to insert after is at the end of the file use the appropriate index value.
+ if len(b_lines) == index[1]:
+ if b_lines[index[1] - 1].rstrip(b'\r\n') != b_line:
+ b_lines.append(b_line + b_linesep)
+ msg = 'line added'
+ changed = True
+ elif b_line != b_lines[index[1]].rstrip(b'\n\r'):
+ b_lines.insert(index[1], b_line + b_linesep)
+ msg = 'line added'
+ changed = True
+
+ # insert matched, but not the regexp
+ else:
+ b_lines.insert(index[1], b_line + b_linesep)
+ msg = 'line added'
+ changed = True
+
+ if module._diff:
+ diff['after'] = to_native(b''.join(b_lines))
+
+ backupdest = ""
+ if changed and not module.check_mode:
+ if backup and os.path.exists(b_dest):
+ backupdest = module.backup_local(dest)
+ write_changes(module, b_lines, dest)
+
+ if module.check_mode and not os.path.exists(b_dest):
+ module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=diff)
+
+ attr_diff = {}
+ msg, changed = check_file_attrs(module, changed, msg, attr_diff)
+
+ attr_diff['before_header'] = '%s (file attributes)' % dest
+ attr_diff['after_header'] = '%s (file attributes)' % dest
+
+ difflist = [diff, attr_diff]
+ module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=difflist)
+
+
+def absent(module, dest, regexp, line, backup):
+
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+ if not os.path.exists(b_dest):
+ module.exit_json(changed=False, msg="file not present")
+
+ msg = ''
+ diff = {'before': '',
+ 'after': '',
+ 'before_header': '%s (content)' % dest,
+ 'after_header': '%s (content)' % dest}
+
+ with open(b_dest, 'rb') as f:
+ b_lines = f.readlines()
+
+ if module._diff:
+ diff['before'] = to_native(b''.join(b_lines))
+
+ if regexp is not None:
+ bre_c = re.compile(to_bytes(regexp, errors='surrogate_or_strict'))
+ found = []
+
+ b_line = to_bytes(line, errors='surrogate_or_strict')
+
+ def matcher(b_cur_line):
+ if regexp is not None:
+ match_found = bre_c.search(b_cur_line)
+ else:
+ match_found = b_line == b_cur_line.rstrip(b'\r\n')
+ if match_found:
+ found.append(b_cur_line)
+ return not match_found
+
+ b_lines = [l for l in b_lines if matcher(l)]
+ changed = len(found) > 0
+
+ if module._diff:
+ diff['after'] = to_native(b''.join(b_lines))
+
+ backupdest = ""
+ if changed and not module.check_mode:
+ if backup:
+ backupdest = module.backup_local(dest)
+ write_changes(module, b_lines, dest)
+
+ if changed:
+ msg = "%s line(s) removed" % len(found)
+
+ attr_diff = {}
+ msg, changed = check_file_attrs(module, changed, msg, attr_diff)
+
+ attr_diff['before_header'] = '%s (file attributes)' % dest
+ attr_diff['after_header'] = '%s (file attributes)' % dest
+
+ difflist = [diff, attr_diff]
+
+ module.exit_json(changed=changed, found=len(found), msg=msg, backup=backupdest, diff=difflist)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True, aliases=['dest', 'destfile', 'name']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ regexp=dict(type='str', aliases=['regex']),
+ line=dict(type='str', aliases=['value']),
+ insertafter=dict(type='str'),
+ insertbefore=dict(type='str'),
+ backrefs=dict(type='bool', default=False),
+ create=dict(type='bool', default=False),
+ backup=dict(type='bool', default=False),
+ firstmatch=dict(type='bool', default=False),
+ validate=dict(type='str'),
+ ),
+ mutually_exclusive=[['insertbefore', 'insertafter']],
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ params = module.params
+ create = params['create']
+ backup = params['backup']
+ backrefs = params['backrefs']
+ path = params['path']
+ firstmatch = params['firstmatch']
+ regexp = params['regexp']
+ line = params['line']
+
+ if regexp == '':
+ module.warn(
+ "The regular expression is an empty string, which will match every line in the file. "
+ "This may have unintended consequences, such as replacing the last line in the file rather than appending. "
+ "If this is desired, use '^' to match every line in the file and avoid this warning.")
+
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ if os.path.isdir(b_path):
+ module.fail_json(rc=256, msg='Path %s is a directory !' % path)
+
+ if params['state'] == 'present':
+ if backrefs and regexp is None:
+ module.fail_json(msg='regexp is required with backrefs=true')
+
+ if line is None:
+ module.fail_json(msg='line is required with state=present')
+
+ # Deal with the insertafter default value manually, to avoid errors
+ # because of the mutually_exclusive mechanism.
+ ins_bef, ins_aft = params['insertbefore'], params['insertafter']
+ if ins_bef is None and ins_aft is None:
+ ins_aft = 'EOF'
+
+ present(module, path, regexp, line,
+ ins_aft, ins_bef, create, backup, backrefs, firstmatch)
+ else:
+ if regexp is None and line is None:
+ module.fail_json(msg='one of line or regexp is required with state=absent')
+
+ absent(module, path, regexp, line, backup)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/meta.py b/lib/ansible/modules/meta.py
new file mode 100644
index 00000000..6381636b
--- /dev/null
+++ b/lib/ansible/modules/meta.py
@@ -0,0 +1,98 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Ansible, a Red Hat company
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: meta
+short_description: Execute Ansible 'actions'
+version_added: '1.2'
+description:
+ - Meta tasks are a special kind of task which can influence Ansible internal execution or state.
+ - Meta tasks can be used anywhere within your playbook.
+ - This module is also supported for Windows targets.
+options:
+ free_form:
+ description:
+ - This module takes a free form command, as a string. There is not an actual option named "free form". See the examples!
+ - C(flush_handlers) makes Ansible run any handler tasks which have thus far been notified. Ansible inserts these tasks internally at certain
+ points to implicitly trigger handler runs (after pre/post tasks, the final role execution, and the main tasks section of your plays).
+ - C(refresh_inventory) (added in Ansible 2.0) forces the reload of the inventory, which in the case of dynamic inventory scripts means they will be
+ re-executed. If the dynamic inventory script is using a cache, Ansible cannot know this and has no way of refreshing it (you can disable the cache
+ or, if available for your specific inventory datasource (e.g. aws), you can use the an inventory plugin instead of an inventory script).
+ This is mainly useful when additional hosts are created and users wish to use them instead of using the M(ansible.builtin.add_host) module.
+ - C(noop) (added in Ansible 2.0) This literally does 'nothing'. It is mainly used internally and not recommended for general use.
+ - C(clear_facts) (added in Ansible 2.1) causes the gathered facts for the hosts specified in the play's list of hosts to be cleared,
+ including the fact cache.
+ - C(clear_host_errors) (added in Ansible 2.1) clears the failed state (if any) from hosts specified in the play's list of hosts.
+ - C(end_play) (added in Ansible 2.2) causes the play to end without failing the host(s). Note that this affects all hosts.
+ - C(reset_connection) (added in Ansible 2.3) interrupts a persistent connection (i.e. ssh + control persist)
+ - C(end_host) (added in Ansible 2.8) is a per-host variation of C(end_play). Causes the play to end for the current host without failing it.
+ choices: [ clear_facts, clear_host_errors, end_host, end_play, flush_handlers, noop, refresh_inventory, reset_connection ]
+ required: true
+notes:
+ - C(meta) is not really a module nor action_plugin as such it cannot be overwritten.
+ - C(clear_facts) will remove the persistent facts from M(ansible.builtin.set_fact) using C(cacheable=True),
+ but not the current host variable it creates for the current run.
+ - Looping on meta tasks is not supported.
+ - Skipping C(meta) tasks with tags is not supported before Ansible 2.11.
+ - This module is also supported for Windows targets.
+seealso:
+- module: ansible.builtin.assert
+- module: ansible.builtin.fail
+author:
+ - Ansible Core Team
+'''
+
+EXAMPLES = r'''
+# Example showing flushing handlers on demand, not at end of play
+- template:
+ src: new.j2
+ dest: /etc/config.txt
+ notify: myhandler
+
+- name: Force all notified handlers to run at this point, not waiting for normal sync points
+ meta: flush_handlers
+
+# Example showing how to refresh inventory during play
+- name: Reload inventory, useful with dynamic inventories when play makes changes to the existing hosts
+ cloud_guest: # this is fake module
+ name: newhost
+ state: present
+
+- name: Refresh inventory to ensure new instances exist in inventory
+ meta: refresh_inventory
+
+# Example showing how to clear all existing facts of targetted hosts
+- name: Clear gathered facts from all currently targeted hosts
+ meta: clear_facts
+
+# Example showing how to continue using a failed target
+- name: Bring host back to play after failure
+ copy:
+ src: file
+ dest: /etc/file
+ remote_user: imightnothavepermission
+
+- meta: clear_host_errors
+
+# Example showing how to reset an existing connection
+- user:
+ name: '{{ ansible_user }}'
+ groups: input
+
+- name: Reset ssh connection to allow user changes to affect 'current login user'
+ meta: reset_connection
+
+# Example showing how to end the play for specific targets
+- name: End the play for hosts that run CentOS 6
+ meta: end_host
+ when:
+ - ansible_distribution == 'CentOS'
+ - ansible_distribution_major_version == '6'
+'''
diff --git a/lib/ansible/modules/package.py b/lib/ansible/modules/package.py
new file mode 100644
index 00000000..a99dab1d
--- /dev/null
+++ b/lib/ansible/modules/package.py
@@ -0,0 +1,64 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Ansible Project
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: package
+version_added: 2.0
+author:
+ - Ansible Core Team
+short_description: Generic OS package manager
+description:
+ - Installs, upgrade and removes packages using the underlying OS package manager.
+ - For Windows targets, use the M(ansible.windows.win_package) module instead.
+options:
+ name:
+ description:
+ - Package name, or package specifier with version.
+ - Syntax varies with package manager. For example C(name-1.0) or C(name=1.0).
+ - Package names also vary with package manager; this module will not "translate" them per distro. For example C(libyaml-dev), C(libyaml-devel).
+ required: true
+ state:
+ description:
+ - Whether to install (C(present)), or remove (C(absent)) a package.
+ - You can use other states like C(latest) ONLY if they are supported by the underlying package module(s) executed.
+ required: true
+ use:
+ description:
+ - The required package manager module to use (yum, apt, etc). The default 'auto' will use existing facts or try to autodetect it.
+ - You should only use this field if the automatic selection is not working for some reason.
+ required: false
+ default: auto
+requirements:
+ - Whatever is required for the package plugins specific for each system.
+notes:
+ - This module actually calls the pertinent package modules for each system (apt, yum, etc).
+ - For Windows targets, use the M(ansible.windows.win_package) module instead.
+'''
+EXAMPLES = '''
+- name: Install ntpdate
+ package:
+ name: ntpdate
+ state: present
+
+# This uses a variable as this changes per distribution.
+- name: Remove the apache package
+ package:
+ name: "{{ apache }}"
+ state: absent
+
+- name: Install the latest version of Apache and MariaDB
+ package:
+ name:
+ - httpd
+ - mariadb-server
+ state: latest
+'''
diff --git a/lib/ansible/modules/package_facts.py b/lib/ansible/modules/package_facts.py
new file mode 100644
index 00000000..ed314b4a
--- /dev/null
+++ b/lib/ansible/modules/package_facts.py
@@ -0,0 +1,449 @@
+#!/usr/bin/python
+# (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# most of it copied from AWX's scan_packages module
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: package_facts
+short_description: package information as facts
+description:
+ - Return information about installed packages as facts
+options:
+ manager:
+ description:
+ - The package manager used by the system so we can query the package information.
+ - Since 2.8 this is a list and can support multiple package managers per system.
+ - The 'portage' and 'pkg' options were added in version 2.8.
+ default: ['auto']
+ choices: ['auto', 'rpm', 'apt', 'portage', 'pkg', 'pacman']
+ required: False
+ type: list
+ strategy:
+ description:
+ - This option controls how the module queries the package managers on the system.
+ C(first) means it will return only information for the first supported package manager available.
+ C(all) will return information for all supported and available package managers on the system.
+ choices: ['first', 'all']
+ default: 'first'
+ version_added: "2.8"
+version_added: "2.5"
+requirements:
+ - For 'portage' support it requires the C(qlist) utility, which is part of 'app-portage/portage-utils'.
+ - For Debian-based systems C(python-apt) package must be installed on targeted hosts.
+author:
+ - Matthew Jones (@matburt)
+ - Brian Coca (@bcoca)
+ - Adam Miller (@maxamillion)
+'''
+
+EXAMPLES = '''
+- name: Gather the package facts
+ package_facts:
+ manager: auto
+
+- name: Print the package facts
+ debug:
+ var: ansible_facts.packages
+
+- name: Check whether a package called foobar is installed
+ debug:
+ msg: "{{ ansible_facts.packages['foobar'] | length }} versions of foobar are installed!"
+ when: "'foobar' in ansible_facts.packages"
+
+'''
+
+RETURN = '''
+ansible_facts:
+ description: facts to add to ansible_facts
+ returned: always
+ type: complex
+ contains:
+ packages:
+ description:
+ - Maps the package name to a non-empty list of dicts with package information.
+ - Every dict in the list corresponds to one installed version of the package.
+ - The fields described below are present for all package managers. Depending on the
+ package manager, there might be more fields for a package.
+ returned: when operating system level package manager is specified or auto detected manager
+ type: dict
+ contains:
+ name:
+ description: The package's name.
+ returned: always
+ type: str
+ version:
+ description: The package's version.
+ returned: always
+ type: str
+ source:
+ description: Where information on the package came from.
+ returned: always
+ type: str
+ sample: |-
+ {
+ "packages": {
+ "kernel": [
+ {
+ "name": "kernel",
+ "source": "rpm",
+ "version": "3.10.0",
+ ...
+ },
+ {
+ "name": "kernel",
+ "source": "rpm",
+ "version": "3.10.0",
+ ...
+ },
+ ...
+ ],
+ "kernel-tools": [
+ {
+ "name": "kernel-tools",
+ "source": "rpm",
+ "version": "3.10.0",
+ ...
+ }
+ ],
+ ...
+ }
+ }
+ # Sample rpm
+ {
+ "packages": {
+ "kernel": [
+ {
+ "arch": "x86_64",
+ "epoch": null,
+ "name": "kernel",
+ "release": "514.26.2.el7",
+ "source": "rpm",
+ "version": "3.10.0"
+ },
+ {
+ "arch": "x86_64",
+ "epoch": null,
+ "name": "kernel",
+ "release": "514.16.1.el7",
+ "source": "rpm",
+ "version": "3.10.0"
+ },
+ {
+ "arch": "x86_64",
+ "epoch": null,
+ "name": "kernel",
+ "release": "514.10.2.el7",
+ "source": "rpm",
+ "version": "3.10.0"
+ },
+ {
+ "arch": "x86_64",
+ "epoch": null,
+ "name": "kernel",
+ "release": "514.21.1.el7",
+ "source": "rpm",
+ "version": "3.10.0"
+ },
+ {
+ "arch": "x86_64",
+ "epoch": null,
+ "name": "kernel",
+ "release": "693.2.2.el7",
+ "source": "rpm",
+ "version": "3.10.0"
+ }
+ ],
+ "kernel-tools": [
+ {
+ "arch": "x86_64",
+ "epoch": null,
+ "name": "kernel-tools",
+ "release": "693.2.2.el7",
+ "source": "rpm",
+ "version": "3.10.0"
+ }
+ ],
+ "kernel-tools-libs": [
+ {
+ "arch": "x86_64",
+ "epoch": null,
+ "name": "kernel-tools-libs",
+ "release": "693.2.2.el7",
+ "source": "rpm",
+ "version": "3.10.0"
+ }
+ ],
+ }
+ }
+ # Sample deb
+ {
+ "packages": {
+ "libbz2-1.0": [
+ {
+ "version": "1.0.6-5",
+ "source": "apt",
+ "arch": "amd64",
+ "name": "libbz2-1.0"
+ }
+ ],
+ "patch": [
+ {
+ "version": "2.7.1-4ubuntu1",
+ "source": "apt",
+ "arch": "amd64",
+ "name": "patch"
+ }
+ ],
+ }
+ }
+'''
+
+import re
+
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.facts.packages import LibMgr, CLIMgr, get_all_pkg_managers
+
+
+class RPM(LibMgr):
+
+ LIB = 'rpm'
+
+ def list_installed(self):
+ return self._lib.TransactionSet().dbMatch()
+
+ def get_package_details(self, package):
+ return dict(name=package[self._lib.RPMTAG_NAME],
+ version=package[self._lib.RPMTAG_VERSION],
+ release=package[self._lib.RPMTAG_RELEASE],
+ epoch=package[self._lib.RPMTAG_EPOCH],
+ arch=package[self._lib.RPMTAG_ARCH],)
+
+ def is_available(self):
+ ''' we expect the python bindings installed, but this gives warning if they are missing and we have rpm cli'''
+ we_have_lib = super(RPM, self).is_available()
+
+ try:
+ get_bin_path('rpm')
+ if not we_have_lib:
+ module.warn('Found "rpm" but %s' % (missing_required_lib('rpm')))
+ except ValueError:
+ pass
+
+ return we_have_lib
+
+
+class APT(LibMgr):
+
+ LIB = 'apt'
+
+ def __init__(self):
+ self._cache = None
+ super(APT, self).__init__()
+
+ @property
+ def pkg_cache(self):
+ if self._cache is not None:
+ return self._cache
+
+ self._cache = self._lib.Cache()
+ return self._cache
+
+ def is_available(self):
+ ''' we expect the python bindings installed, but if there is apt/apt-get give warning about missing bindings'''
+ we_have_lib = super(APT, self).is_available()
+ if not we_have_lib:
+ for exe in ('apt', 'apt-get', 'aptitude'):
+ try:
+ get_bin_path(exe)
+ except ValueError:
+ continue
+ else:
+ module.warn('Found "%s" but %s' % (exe, missing_required_lib('apt')))
+ break
+ return we_have_lib
+
+ def list_installed(self):
+ # Store the cache to avoid running pkg_cache() for each item in the comprehension, which is very slow
+ cache = self.pkg_cache
+ return [pk for pk in cache.keys() if cache[pk].is_installed]
+
+ def get_package_details(self, package):
+ ac_pkg = self.pkg_cache[package].installed
+ return dict(name=package, version=ac_pkg.version, arch=ac_pkg.architecture, category=ac_pkg.section, origin=ac_pkg.origins[0].origin)
+
+
+class PACMAN(CLIMgr):
+
+ CLI = 'pacman'
+
+ def list_installed(self):
+ rc, out, err = module.run_command([self._cli, '-Qi'], environ_update=dict(LC_ALL='C'))
+ if rc != 0 or err:
+ raise Exception("Unable to list packages rc=%s : %s" % (rc, err))
+ return out.split("\n\n")[:-1]
+
+ def get_package_details(self, package):
+ # parse values of details that might extend over several lines
+ raw_pkg_details = {}
+ last_detail = None
+ for line in package.splitlines():
+ m = re.match(r"([\w ]*[\w]) +: (.*)", line)
+ if m:
+ last_detail = m.group(1)
+ raw_pkg_details[last_detail] = m.group(2)
+ else:
+ # append value to previous detail
+ raw_pkg_details[last_detail] = raw_pkg_details[last_detail] + " " + line.lstrip()
+
+ provides = None
+ if raw_pkg_details['Provides'] != 'None':
+ provides = [
+ p.split('=')[0]
+ for p in raw_pkg_details['Provides'].split(' ')
+ ]
+
+ return {
+ 'name': raw_pkg_details['Name'],
+ 'version': raw_pkg_details['Version'],
+ 'arch': raw_pkg_details['Architecture'],
+ 'provides': provides,
+ }
+
+
+class PKG(CLIMgr):
+
+ CLI = 'pkg'
+ atoms = ['name', 'version', 'origin', 'installed', 'automatic', 'arch', 'category', 'prefix', 'vital']
+
+ def list_installed(self):
+ rc, out, err = module.run_command([self._cli, 'query', "%%%s" % '\t%'.join(['n', 'v', 'R', 't', 'a', 'q', 'o', 'p', 'V'])])
+ if rc != 0 or err:
+ raise Exception("Unable to list packages rc=%s : %s" % (rc, err))
+ return out.splitlines()
+
+ def get_package_details(self, package):
+
+ pkg = dict(zip(self.atoms, package.split('\t')))
+
+ if 'arch' in pkg:
+ try:
+ pkg['arch'] = pkg['arch'].split(':')[2]
+ except IndexError:
+ pass
+
+ if 'automatic' in pkg:
+ pkg['automatic'] = bool(int(pkg['automatic']))
+
+ if 'category' in pkg:
+ pkg['category'] = pkg['category'].split('/', 1)[0]
+
+ if 'version' in pkg:
+ if ',' in pkg['version']:
+ pkg['version'], pkg['port_epoch'] = pkg['version'].split(',', 1)
+ else:
+ pkg['port_epoch'] = 0
+
+ if '_' in pkg['version']:
+ pkg['version'], pkg['revision'] = pkg['version'].split('_', 1)
+ else:
+ pkg['revision'] = '0'
+
+ if 'vital' in pkg:
+ pkg['vital'] = bool(int(pkg['vital']))
+
+ return pkg
+
+
+class PORTAGE(CLIMgr):
+
+ CLI = 'qlist'
+ atoms = ['category', 'name', 'version', 'ebuild_revision', 'slots', 'prefixes', 'sufixes']
+
+ def list_installed(self):
+ rc, out, err = module.run_command(' '.join([self._cli, '-Iv', '|', 'xargs', '-n', '1024', 'qatom']), use_unsafe_shell=True)
+ if rc != 0:
+ raise RuntimeError("Unable to list packages rc=%s : %s" % (rc, to_native(err)))
+ return out.splitlines()
+
+ def get_package_details(self, package):
+ return dict(zip(self.atoms, package.split()))
+
+
+def main():
+
+ # get supported pkg managers
+ PKG_MANAGERS = get_all_pkg_managers()
+ PKG_MANAGER_NAMES = [x.lower() for x in PKG_MANAGERS.keys()]
+
+ # start work
+ global module
+ module = AnsibleModule(argument_spec=dict(manager={'type': 'list', 'default': ['auto']},
+ strategy={'choices': ['first', 'all'], 'default': 'first'}),
+ supports_check_mode=True)
+ packages = {}
+ results = {'ansible_facts': {}}
+ managers = [x.lower() for x in module.params['manager']]
+ strategy = module.params['strategy']
+
+ if 'auto' in managers:
+ # keep order from user, we do dedupe below
+ managers.extend(PKG_MANAGER_NAMES)
+ managers.remove('auto')
+
+ unsupported = set(managers).difference(PKG_MANAGER_NAMES)
+ if unsupported:
+ if 'auto' in module.params['manager']:
+ msg = 'Could not auto detect a usable package manager, check warnings for details.'
+ else:
+ msg = 'Unsupported package managers requested: %s' % (', '.join(unsupported))
+ module.fail_json(msg=msg)
+
+ found = 0
+ seen = set()
+ for pkgmgr in managers:
+
+ if found and strategy == 'first':
+ break
+
+ # dedupe as per above
+ if pkgmgr in seen:
+ continue
+ seen.add(pkgmgr)
+ try:
+ try:
+ # manager throws exception on init (calls self.test) if not usable.
+ manager = PKG_MANAGERS[pkgmgr]()
+ if manager.is_available():
+ found += 1
+ packages.update(manager.get_packages())
+
+ except Exception as e:
+ if pkgmgr in module.params['manager']:
+ module.warn('Requested package manager %s was not usable by this module: %s' % (pkgmgr, to_text(e)))
+ continue
+
+ except Exception as e:
+ if pkgmgr in module.params['manager']:
+ module.warn('Failed to retrieve packages with %s: %s' % (pkgmgr, to_text(e)))
+
+ if found == 0:
+ msg = ('Could not detect a supported package manager from the following list: %s, '
+ 'or the required Python library is not installed. Check warnings for details.' % managers)
+ module.fail_json(msg=msg)
+
+ # Set the facts, this will override the facts in ansible_facts that might exist from previous runs
+ # when using operating system level or distribution package managers
+ results['ansible_facts']['packages'] = packages
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/pause.py b/lib/ansible/modules/pause.py
new file mode 100644
index 00000000..be515a4b
--- /dev/null
+++ b/lib/ansible/modules/pause.py
@@ -0,0 +1,96 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pause
+short_description: Pause playbook execution
+description:
+ - Pauses playbook execution for a set amount of time, or until a prompt is acknowledged.
+ All parameters are optional. The default behavior is to pause with a prompt.
+ - To pause/wait/sleep per host, use the M(ansible.builtin.wait_for) module.
+ - You can use C(ctrl+c) if you wish to advance a pause earlier than it is set to expire or if you need to abort a playbook run entirely.
+ To continue early press C(ctrl+c) and then C(c). To abort a playbook press C(ctrl+c) and then C(a).
+ - The pause module integrates into async/parallelized playbooks without any special considerations (see Rolling Updates).
+ When using pauses with the C(serial) playbook parameter (as in rolling updates) you are only prompted once for the current group of hosts.
+ - This module is also supported for Windows targets.
+version_added: "0.8"
+options:
+ minutes:
+ description:
+ - A positive number of minutes to pause for.
+ seconds:
+ description:
+ - A positive number of seconds to pause for.
+ prompt:
+ description:
+ - Optional text to use for the prompt message.
+ echo:
+ description:
+ - Controls whether or not keyboard input is shown when typing.
+ - Has no effect if 'seconds' or 'minutes' is set.
+ type: bool
+ default: 'yes'
+ version_added: 2.5
+author: "Tim Bielawa (@tbielawa)"
+notes:
+ - Starting in 2.2, if you specify 0 or negative for minutes or seconds, it will wait for 1 second, previously it would wait indefinitely.
+ - This module is also supported for Windows targets.
+ - User input is not captured or echoed, regardless of echo setting, when minutes or seconds is specified.
+'''
+
+EXAMPLES = '''
+- name: Pause for 5 minutes to build app cache
+ pause:
+ minutes: 5
+
+- name: Pause until you can verify updates to an application were successful
+ pause:
+
+- name: A helpful reminder of what to look out for post-update
+ pause:
+ prompt: "Make sure org.foo.FooOverload exception is not present"
+
+- name: Pause to get some sensitive input
+ pause:
+ prompt: "Enter a secret"
+ echo: no
+'''
+
+RETURN = '''
+user_input:
+ description: User input from interactive console
+ returned: if no waiting time set
+ type: str
+ sample: Example user input
+start:
+ description: Time when started pausing
+ returned: always
+ type: str
+ sample: "2017-02-23 14:35:07.298862"
+stop:
+ description: Time when ended pausing
+ returned: always
+ type: str
+ sample: "2017-02-23 14:35:09.552594"
+delta:
+ description: Time paused in seconds
+ returned: always
+ type: str
+ sample: 2
+stdout:
+ description: Output of pause module
+ returned: always
+ type: str
+ sample: Paused for 0.04 minutes
+echo:
+ description: Value of echo setting
+ returned: always
+ type: bool
+ sample: true
+'''
diff --git a/lib/ansible/modules/ping.py b/lib/ansible/modules/ping.py
new file mode 100644
index 00000000..fe9238f6
--- /dev/null
+++ b/lib/ansible/modules/ping.py
@@ -0,0 +1,81 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ping
+version_added: historical
+short_description: Try to connect to host, verify a usable python and return C(pong) on success
+description:
+ - A trivial test module, this module always returns C(pong) on successful
+ contact. It does not make sense in playbooks, but it is useful from
+ C(/usr/bin/ansible) to verify the ability to login and that a usable Python is configured.
+ - This is NOT ICMP ping, this is just a trivial test module that requires Python on the remote-node.
+ - For Windows targets, use the M(ansible.windows.win_ping) module instead.
+ - For Network targets, use the M(ansible.netcommon.net_ping) module instead.
+options:
+ data:
+ description:
+ - Data to return for the C(ping) return value.
+ - If this parameter is set to C(crash), the module will cause an exception.
+ type: str
+ default: pong
+seealso:
+- module: ansible.netcommon.net_ping
+- module: ansible.windows.win_ping
+author:
+ - Ansible Core Team
+ - Michael DeHaan
+'''
+
+EXAMPLES = '''
+# Test we can logon to 'webservers' and execute python with json lib.
+# ansible webservers -m ping
+
+- name: Example from an Ansible Playbook
+ ping:
+
+- name: Induce an exception to see what happens
+ ping:
+ data: crash
+'''
+
+RETURN = '''
+ping:
+ description: value provided with the data parameter
+ returned: success
+ type: str
+ sample: pong
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ data=dict(type='str', default='pong'),
+ ),
+ supports_check_mode=True
+ )
+
+ if module.params['data'] == 'crash':
+ raise Exception("boom")
+
+ result = dict(
+ ping=module.params['data'],
+ )
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/pip.py b/lib/ansible/modules/pip.py
new file mode 100644
index 00000000..97ae7d90
--- /dev/null
+++ b/lib/ansible/modules/pip.py
@@ -0,0 +1,782 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Matt Wright <matt@nobien.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pip
+short_description: Manages Python library dependencies
+description:
+ - "Manage Python library dependencies. To use this module, one of the following keys is required: C(name)
+ or C(requirements)."
+version_added: "0.7"
+options:
+ name:
+ description:
+ - The name of a Python library to install or the url(bzr+,hg+,git+,svn+) of the remote package.
+ - This can be a list (since 2.2) and contain version specifiers (since 2.7).
+ type: list
+ version:
+ description:
+ - The version number to install of the Python library specified in the I(name) parameter.
+ type: str
+ requirements:
+ description:
+ - The path to a pip requirements file, which should be local to the remote system.
+ File can be specified as a relative path if using the chdir option.
+ type: str
+ virtualenv:
+ description:
+ - An optional path to a I(virtualenv) directory to install into.
+ It cannot be specified together with the 'executable' parameter
+ (added in 2.1).
+ If the virtualenv does not exist, it will be created before installing
+ packages. The optional virtualenv_site_packages, virtualenv_command,
+ and virtualenv_python options affect the creation of the virtualenv.
+ type: path
+ virtualenv_site_packages:
+ description:
+ - Whether the virtual environment will inherit packages from the
+ global site-packages directory. Note that if this setting is
+ changed on an already existing virtual environment it will not
+ have any effect, the environment must be deleted and newly
+ created.
+ type: bool
+ default: "no"
+ version_added: "1.0"
+ virtualenv_command:
+ description:
+ - The command or a pathname to the command to create the virtual
+ environment with. For example C(pyvenv), C(virtualenv),
+ C(virtualenv2), C(~/bin/virtualenv), C(/usr/local/bin/virtualenv).
+ type: path
+ default: virtualenv
+ version_added: "1.1"
+ virtualenv_python:
+ description:
+ - The Python executable used for creating the virtual environment.
+ For example C(python3.5), C(python2.7). When not specified, the
+ Python version used to run the ansible module is used. This parameter
+ should not be used when C(virtualenv_command) is using C(pyvenv) or
+ the C(-m venv) module.
+ type: str
+ version_added: "2.0"
+ state:
+ description:
+ - The state of module
+ - The 'forcereinstall' option is only available in Ansible 2.1 and above.
+ type: str
+ choices: [ absent, forcereinstall, latest, present ]
+ default: present
+ extra_args:
+ description:
+ - Extra arguments passed to pip.
+ type: str
+ version_added: "1.0"
+ editable:
+ description:
+ - Pass the editable flag.
+ type: bool
+ default: 'no'
+ version_added: "2.0"
+ chdir:
+ description:
+ - cd into this directory before running the command
+ type: path
+ version_added: "1.3"
+ executable:
+ description:
+ - The explicit executable or pathname for the pip executable,
+ if different from the Ansible Python interpreter. For
+ example C(pip3.3), if there are both Python 2.7 and 3.3 installations
+ in the system and you want to run pip for the Python 3.3 installation.
+ - Mutually exclusive with I(virtualenv) (added in 2.1).
+ - Does not affect the Ansible Python interpreter.
+ - The setuptools package must be installed for both the Ansible Python interpreter
+ and for the version of Python specified by this option.
+ type: path
+ version_added: "1.3"
+ umask:
+ description:
+ - The system umask to apply before installing the pip package. This is
+ useful, for example, when installing on systems that have a very
+ restrictive umask by default (e.g., "0077") and you want to pip install
+ packages which are to be used by all users. Note that this requires you
+ to specify desired umask mode as an octal string, (e.g., "0022").
+ type: str
+ version_added: "2.1"
+notes:
+ - The virtualenv (U(http://www.virtualenv.org/)) must be
+ installed on the remote host if the virtualenv parameter is specified and
+ the virtualenv needs to be created.
+ - Although it executes using the Ansible Python interpreter, the pip module shells out to
+ run the actual pip command, so it can use any pip version you specify with I(executable).
+ By default, it uses the pip version for the Ansible Python interpreter. For example, pip3 on python 3, and pip2 or pip on python 2.
+ - The interpreter used by Ansible
+ (see :ref:`ansible_python_interpreter<ansible_python_interpreter>`)
+ requires the setuptools package, regardless of the version of pip set with
+ the I(executable) option.
+requirements:
+- pip
+- virtualenv
+- setuptools
+author:
+- Matt Wright (@mattupstate)
+'''
+
+EXAMPLES = '''
+- name: Install bottle python package
+ pip:
+ name: bottle
+
+- name: Install bottle python package on version 0.11
+ pip:
+ name: bottle==0.11
+
+- name: Install bottle python package with version specifiers
+ pip:
+ name: bottle>0.10,<0.20,!=0.11
+
+- name: Install multi python packages with version specifiers
+ pip:
+ name:
+ - django>1.11.0,<1.12.0
+ - bottle>0.10,<0.20,!=0.11
+
+- name: Install python package using a proxy
+ # Pip doesn't use the standard environment variables, please use the CAPITALIZED ones below
+ pip:
+ name: six
+ environment:
+ HTTP_PROXY: '127.0.0.1:8080'
+ HTTPS_PROXY: '127.0.0.1:8080'
+
+# You do not have to supply '-e' option in extra_args
+- name: Install MyApp using one of the remote protocols (bzr+,hg+,git+,svn+)
+ pip:
+ name: svn+http://myrepo/svn/MyApp#egg=MyApp
+
+- name: Install MyApp using one of the remote protocols (bzr+,hg+,git+)
+ pip:
+ name: git+http://myrepo/app/MyApp
+
+- name: Install MyApp from local tarball
+ pip:
+ name: file:///path/to/MyApp.tar.gz
+
+- name: Install bottle into the specified (virtualenv), inheriting none of the globally installed modules
+ pip:
+ name: bottle
+ virtualenv: /my_app/venv
+
+- name: Install bottle into the specified (virtualenv), inheriting globally installed modules
+ pip:
+ name: bottle
+ virtualenv: /my_app/venv
+ virtualenv_site_packages: yes
+
+- name: Install bottle into the specified (virtualenv), using Python 2.7
+ pip:
+ name: bottle
+ virtualenv: /my_app/venv
+ virtualenv_command: virtualenv-2.7
+
+- name: Install bottle within a user home directory
+ pip:
+ name: bottle
+ extra_args: --user
+
+- name: Install specified python requirements
+ pip:
+ requirements: /my_app/requirements.txt
+
+- name: Install specified python requirements in indicated (virtualenv)
+ pip:
+ requirements: /my_app/requirements.txt
+ virtualenv: /my_app/venv
+
+- name: Install specified python requirements and custom Index URL
+ pip:
+ requirements: /my_app/requirements.txt
+ extra_args: -i https://example.com/pypi/simple
+
+- name: Install specified python requirements offline from a local directory with downloaded packages
+ pip:
+ requirements: /my_app/requirements.txt
+ extra_args: "--no-index --find-links=file:///my_downloaded_packages_dir"
+
+- name: Install bottle for Python 3.3 specifically, using the 'pip3.3' executable
+ pip:
+ name: bottle
+ executable: pip3.3
+
+- name: Install bottle, forcing reinstallation if it's already installed
+ pip:
+ name: bottle
+ state: forcereinstall
+
+- name: Install bottle while ensuring the umask is 0022 (to ensure other users can use it)
+ pip:
+ name: bottle
+ umask: "0022"
+ become: True
+'''
+
+RETURN = '''
+cmd:
+ description: pip command used by the module
+ returned: success
+ type: str
+ sample: pip2 install ansible six
+name:
+ description: list of python modules targetted by pip
+ returned: success
+ type: list
+ sample: ['ansible', 'six']
+requirements:
+ description: Path to the requirements file
+ returned: success, if a requirements file was provided
+ type: str
+ sample: "/srv/git/project/requirements.txt"
+version:
+ description: Version of the package specified in 'name'
+ returned: success, if a name and version were provided
+ type: str
+ sample: "2.5.1"
+virtualenv:
+ description: Path to the virtualenv
+ returned: success, if a virtualenv path was provided
+ type: str
+ sample: "/tmp/virtualenv"
+'''
+
+import os
+import re
+import sys
+import tempfile
+import operator
+import shlex
+import traceback
+from distutils.version import LooseVersion
+
+SETUPTOOLS_IMP_ERR = None
+try:
+ from pkg_resources import Requirement
+
+ HAS_SETUPTOOLS = True
+except ImportError:
+ HAS_SETUPTOOLS = False
+ SETUPTOOLS_IMP_ERR = traceback.format_exc()
+
+from ansible.module_utils.basic import AnsibleModule, is_executable, missing_required_lib
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import PY3
+
+
+#: Python one-liners to be run at the command line that will determine the
+# installed version for these special libraries. These are libraries that
+# don't end up in the output of pip freeze.
+_SPECIAL_PACKAGE_CHECKERS = {'setuptools': 'import setuptools; print(setuptools.__version__)',
+ 'pip': 'import pkg_resources; print(pkg_resources.get_distribution("pip").version)'}
+
+_VCS_RE = re.compile(r'(svn|git|hg|bzr)\+')
+
+op_dict = {">=": operator.ge, "<=": operator.le, ">": operator.gt,
+ "<": operator.lt, "==": operator.eq, "!=": operator.ne, "~=": operator.ge}
+
+
+def _is_vcs_url(name):
+ """Test whether a name is a vcs url or not."""
+ return re.match(_VCS_RE, name)
+
+
+def _is_package_name(name):
+ """Test whether the name is a package name or a version specifier."""
+ return not name.lstrip().startswith(tuple(op_dict.keys()))
+
+
+def _recover_package_name(names):
+ """Recover package names as list from user's raw input.
+
+ :input: a mixed and invalid list of names or version specifiers
+ :return: a list of valid package name
+
+ eg.
+ input: ['django>1.11.1', '<1.11.3', 'ipaddress', 'simpleproject>1.1.0', '<2.0.0']
+ return: ['django>1.11.1,<1.11.3', 'ipaddress', 'simpleproject>1.1.0,<2.0.0']
+
+ input: ['django>1.11.1,<1.11.3,ipaddress', 'simpleproject>1.1.0,<2.0.0']
+ return: ['django>1.11.1,<1.11.3', 'ipaddress', 'simpleproject>1.1.0,<2.0.0']
+ """
+ # rebuild input name to a flat list so we can tolerate any combination of input
+ tmp = []
+ for one_line in names:
+ tmp.extend(one_line.split(","))
+ names = tmp
+
+ # reconstruct the names
+ name_parts = []
+ package_names = []
+ in_brackets = False
+ for name in names:
+ if _is_package_name(name) and not in_brackets:
+ if name_parts:
+ package_names.append(",".join(name_parts))
+ name_parts = []
+ if "[" in name:
+ in_brackets = True
+ if in_brackets and "]" in name:
+ in_brackets = False
+ name_parts.append(name)
+ package_names.append(",".join(name_parts))
+ return package_names
+
+
+def _get_cmd_options(module, cmd):
+ thiscmd = cmd + " --help"
+ rc, stdout, stderr = module.run_command(thiscmd)
+ if rc != 0:
+ module.fail_json(msg="Could not get output from %s: %s" % (thiscmd, stdout + stderr))
+
+ words = stdout.strip().split()
+ cmd_options = [x for x in words if x.startswith('--')]
+ return cmd_options
+
+
+def _get_packages(module, pip, chdir):
+ '''Return results of pip command to get packages.'''
+ # Try 'pip list' command first.
+ command = '%s list --format=freeze' % pip
+ lang_env = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}
+ rc, out, err = module.run_command(command, cwd=chdir, environ_update=lang_env)
+
+ # If there was an error (pip version too old) then use 'pip freeze'.
+ if rc != 0:
+ command = '%s freeze' % pip
+ rc, out, err = module.run_command(command, cwd=chdir)
+ if rc != 0:
+ _fail(module, command, out, err)
+
+ return command, out, err
+
+
+def _is_present(module, req, installed_pkgs, pkg_command):
+ '''Return whether or not package is installed.'''
+ for pkg in installed_pkgs:
+ if '==' in pkg:
+ pkg_name, pkg_version = pkg.split('==')
+ pkg_name = Package.canonicalize_name(pkg_name)
+ else:
+ continue
+
+ if pkg_name == req.package_name and req.is_satisfied_by(pkg_version):
+ return True
+
+ return False
+
+
+def _get_pip(module, env=None, executable=None):
+ # Older pip only installed under the "/usr/bin/pip" name. Many Linux
+ # distros install it there.
+ # By default, we try to use pip required for the current python
+ # interpreter, so people can use pip to install modules dependencies
+ candidate_pip_basenames = ('pip2', 'pip')
+ if PY3:
+ # pip under python3 installs the "/usr/bin/pip3" name
+ candidate_pip_basenames = ('pip3',)
+
+ pip = None
+ if executable is not None:
+ if os.path.isabs(executable):
+ pip = executable
+ else:
+ # If you define your own executable that executable should be the only candidate.
+ # As noted in the docs, executable doesn't work with virtualenvs.
+ candidate_pip_basenames = (executable,)
+
+ if pip is None:
+ if env is None:
+ opt_dirs = []
+ for basename in candidate_pip_basenames:
+ pip = module.get_bin_path(basename, False, opt_dirs)
+ if pip is not None:
+ break
+ else:
+ # For-else: Means that we did not break out of the loop
+ # (therefore, that pip was not found)
+ module.fail_json(msg='Unable to find any of %s to use. pip'
+ ' needs to be installed.' % ', '.join(candidate_pip_basenames))
+ else:
+ # If we're using a virtualenv we must use the pip from the
+ # virtualenv
+ venv_dir = os.path.join(env, 'bin')
+ candidate_pip_basenames = (candidate_pip_basenames[0], 'pip')
+ for basename in candidate_pip_basenames:
+ candidate = os.path.join(venv_dir, basename)
+ if os.path.exists(candidate) and is_executable(candidate):
+ pip = candidate
+ break
+ else:
+ # For-else: Means that we did not break out of the loop
+ # (therefore, that pip was not found)
+ module.fail_json(msg='Unable to find pip in the virtualenv, %s, ' % env +
+ 'under any of these names: %s. ' % (', '.join(candidate_pip_basenames)) +
+ 'Make sure pip is present in the virtualenv.')
+
+ return pip
+
+
+def _fail(module, cmd, out, err):
+ msg = ''
+ if out:
+ msg += "stdout: %s" % (out, )
+ if err:
+ msg += "\n:stderr: %s" % (err, )
+ module.fail_json(cmd=cmd, msg=msg)
+
+
+def _get_package_info(module, package, env=None):
+ """This is only needed for special packages which do not show up in pip freeze
+
+ pip and setuptools fall into this category.
+
+ :returns: a string containing the version number if the package is
+ installed. None if the package is not installed.
+ """
+ if env:
+ opt_dirs = ['%s/bin' % env]
+ else:
+ opt_dirs = []
+ python_bin = module.get_bin_path('python', False, opt_dirs)
+
+ if python_bin is None:
+ formatted_dep = None
+ else:
+ rc, out, err = module.run_command([python_bin, '-c', _SPECIAL_PACKAGE_CHECKERS[package]])
+ if rc:
+ formatted_dep = None
+ else:
+ formatted_dep = '%s==%s' % (package, out.strip())
+ return formatted_dep
+
+
+def setup_virtualenv(module, env, chdir, out, err):
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ cmd = shlex.split(module.params['virtualenv_command'])
+
+ # Find the binary for the command in the PATH
+ # and switch the command for the explicit path.
+ if os.path.basename(cmd[0]) == cmd[0]:
+ cmd[0] = module.get_bin_path(cmd[0], True)
+
+ # Add the system-site-packages option if that
+ # is enabled, otherwise explicitly set the option
+ # to not use system-site-packages if that is an
+ # option provided by the command's help function.
+ if module.params['virtualenv_site_packages']:
+ cmd.append('--system-site-packages')
+ else:
+ cmd_opts = _get_cmd_options(module, cmd[0])
+ if '--no-site-packages' in cmd_opts:
+ cmd.append('--no-site-packages')
+
+ virtualenv_python = module.params['virtualenv_python']
+ # -p is a virtualenv option, not compatible with pyenv or venv
+ # this conditional validates if the command being used is not any of them
+ if not any(ex in module.params['virtualenv_command'] for ex in ('pyvenv', '-m venv')):
+ if virtualenv_python:
+ cmd.append('-p%s' % virtualenv_python)
+ elif PY3:
+ # Ubuntu currently has a patch making virtualenv always
+ # try to use python2. Since Ubuntu16 works without
+ # python2 installed, this is a problem. This code mimics
+ # the upstream behaviour of using the python which invoked
+ # virtualenv to determine which python is used inside of
+ # the virtualenv (when none are specified).
+ cmd.append('-p%s' % sys.executable)
+
+ # if venv or pyvenv are used and virtualenv_python is defined, then
+ # virtualenv_python is ignored, this has to be acknowledged
+ elif module.params['virtualenv_python']:
+ module.fail_json(
+ msg='virtualenv_python should not be used when'
+ ' using the venv module or pyvenv as virtualenv_command'
+ )
+
+ cmd.append(env)
+ rc, out_venv, err_venv = module.run_command(cmd, cwd=chdir)
+ out += out_venv
+ err += err_venv
+ if rc != 0:
+ _fail(module, cmd, out, err)
+ return out, err
+
+
+class Package:
+ """Python distribution package metadata wrapper.
+
+ A wrapper class for Requirement, which provides
+ API to parse package name, version specifier,
+ test whether a package is already satisfied.
+ """
+
+ _CANONICALIZE_RE = re.compile(r'[-_.]+')
+
+ def __init__(self, name_string, version_string=None):
+ self._plain_package = False
+ self.package_name = name_string
+ self._requirement = None
+
+ if version_string:
+ version_string = version_string.lstrip()
+ separator = '==' if version_string[0].isdigit() else ' '
+ name_string = separator.join((name_string, version_string))
+ try:
+ self._requirement = Requirement.parse(name_string)
+ # old pkg_resource will replace 'setuptools' with 'distribute' when it's already installed
+ if self._requirement.project_name == "distribute" and "setuptools" in name_string:
+ self.package_name = "setuptools"
+ self._requirement.project_name = "setuptools"
+ else:
+ self.package_name = Package.canonicalize_name(self._requirement.project_name)
+ self._plain_package = True
+ except ValueError as e:
+ pass
+
+ @property
+ def has_version_specifier(self):
+ if self._plain_package:
+ return bool(self._requirement.specs)
+ return False
+
+ def is_satisfied_by(self, version_to_test):
+ if not self._plain_package:
+ return False
+ try:
+ return self._requirement.specifier.contains(version_to_test, prereleases=True)
+ except AttributeError:
+ # old setuptools has no specifier, do fallback
+ version_to_test = LooseVersion(version_to_test)
+ return all(
+ op_dict[op](version_to_test, LooseVersion(ver))
+ for op, ver in self._requirement.specs
+ )
+
+ @staticmethod
+ def canonicalize_name(name):
+ # This is taken from PEP 503.
+ return Package._CANONICALIZE_RE.sub("-", name).lower()
+
+ def __str__(self):
+ if self._plain_package:
+ return to_native(self._requirement)
+ return self.package_name
+
+
+def main():
+ state_map = dict(
+ present=['install'],
+ absent=['uninstall', '-y'],
+ latest=['install', '-U'],
+ forcereinstall=['install', '-U', '--force-reinstall'],
+ )
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=state_map.keys()),
+ name=dict(type='list', elements='str'),
+ version=dict(type='str'),
+ requirements=dict(type='str'),
+ virtualenv=dict(type='path'),
+ virtualenv_site_packages=dict(type='bool', default=False),
+ virtualenv_command=dict(type='path', default='virtualenv'),
+ virtualenv_python=dict(type='str'),
+ extra_args=dict(type='str'),
+ editable=dict(type='bool', default=False),
+ chdir=dict(type='path'),
+ executable=dict(type='path'),
+ umask=dict(type='str'),
+ ),
+ required_one_of=[['name', 'requirements']],
+ mutually_exclusive=[['name', 'requirements'], ['executable', 'virtualenv']],
+ supports_check_mode=True,
+ )
+
+ if not HAS_SETUPTOOLS:
+ module.fail_json(msg=missing_required_lib("setuptools"),
+ exception=SETUPTOOLS_IMP_ERR)
+
+ state = module.params['state']
+ name = module.params['name']
+ version = module.params['version']
+ requirements = module.params['requirements']
+ extra_args = module.params['extra_args']
+ chdir = module.params['chdir']
+ umask = module.params['umask']
+ env = module.params['virtualenv']
+
+ venv_created = False
+ if env and chdir:
+ env = os.path.join(chdir, env)
+
+ if umask and not isinstance(umask, int):
+ try:
+ umask = int(umask, 8)
+ except Exception:
+ module.fail_json(msg="umask must be an octal integer",
+ details=to_native(sys.exc_info()[1]))
+
+ old_umask = None
+ if umask is not None:
+ old_umask = os.umask(umask)
+ try:
+ if state == 'latest' and version is not None:
+ module.fail_json(msg='version is incompatible with state=latest')
+
+ if chdir is None:
+ # this is done to avoid permissions issues with privilege escalation and virtualenvs
+ chdir = tempfile.gettempdir()
+
+ err = ''
+ out = ''
+
+ if env:
+ if not os.path.exists(os.path.join(env, 'bin', 'activate')):
+ venv_created = True
+ out, err = setup_virtualenv(module, env, chdir, out, err)
+
+ pip = _get_pip(module, env, module.params['executable'])
+
+ cmd = [pip] + state_map[state]
+
+ # If there's a virtualenv we want things we install to be able to use other
+ # installations that exist as binaries within this virtualenv. Example: we
+ # install cython and then gevent -- gevent needs to use the cython binary,
+ # not just a python package that will be found by calling the right python.
+ # So if there's a virtualenv, we add that bin/ to the beginning of the PATH
+ # in run_command by setting path_prefix here.
+ path_prefix = None
+ if env:
+ path_prefix = "/".join(pip.split('/')[:-1])
+
+ # Automatically apply -e option to extra_args when source is a VCS url. VCS
+ # includes those beginning with svn+, git+, hg+ or bzr+
+ has_vcs = False
+ if name:
+ for pkg in name:
+ if pkg and _is_vcs_url(pkg):
+ has_vcs = True
+ break
+
+ # convert raw input package names to Package instances
+ packages = [Package(pkg) for pkg in _recover_package_name(name)]
+ # check invalid combination of arguments
+ if version is not None:
+ if len(packages) > 1:
+ module.fail_json(
+ msg="'version' argument is ambiguous when installing multiple package distributions. "
+ "Please specify version restrictions next to each package in 'name' argument."
+ )
+ if packages[0].has_version_specifier:
+ module.fail_json(
+ msg="The 'version' argument conflicts with any version specifier provided along with a package name. "
+ "Please keep the version specifier, but remove the 'version' argument."
+ )
+ # if the version specifier is provided by version, append that into the package
+ packages[0] = Package(to_native(packages[0]), version)
+
+ if module.params['editable']:
+ args_list = [] # used if extra_args is not used at all
+ if extra_args:
+ args_list = extra_args.split(' ')
+ if '-e' not in args_list:
+ args_list.append('-e')
+ # Ok, we will reconstruct the option string
+ extra_args = ' '.join(args_list)
+
+ if extra_args:
+ cmd.extend(shlex.split(extra_args))
+
+ if name:
+ cmd.extend(to_native(p) for p in packages)
+ elif requirements:
+ cmd.extend(['-r', requirements])
+ else:
+ module.exit_json(
+ changed=False,
+ warnings=["No valid name or requirements file found."],
+ )
+
+ if module.check_mode:
+ if extra_args or requirements or state == 'latest' or not name:
+ module.exit_json(changed=True)
+
+ pkg_cmd, out_pip, err_pip = _get_packages(module, pip, chdir)
+
+ out += out_pip
+ err += err_pip
+
+ changed = False
+ if name:
+ pkg_list = [p for p in out.split('\n') if not p.startswith('You are using') and not p.startswith('You should consider') and p]
+
+ if pkg_cmd.endswith(' freeze') and ('pip' in name or 'setuptools' in name):
+ # Older versions of pip (pre-1.3) do not have pip list.
+ # pip freeze does not list setuptools or pip in its output
+ # So we need to get those via a specialcase
+ for pkg in ('setuptools', 'pip'):
+ if pkg in name:
+ formatted_dep = _get_package_info(module, pkg, env)
+ if formatted_dep is not None:
+ pkg_list.append(formatted_dep)
+ out += '%s\n' % formatted_dep
+
+ for package in packages:
+ is_present = _is_present(module, package, pkg_list, pkg_cmd)
+ if (state == 'present' and not is_present) or (state == 'absent' and is_present):
+ changed = True
+ break
+ module.exit_json(changed=changed, cmd=pkg_cmd, stdout=out, stderr=err)
+
+ out_freeze_before = None
+ if requirements or has_vcs:
+ _, out_freeze_before, _ = _get_packages(module, pip, chdir)
+
+ rc, out_pip, err_pip = module.run_command(cmd, path_prefix=path_prefix, cwd=chdir)
+ out += out_pip
+ err += err_pip
+ if rc == 1 and state == 'absent' and \
+ ('not installed' in out_pip or 'not installed' in err_pip):
+ pass # rc is 1 when attempting to uninstall non-installed package
+ elif rc != 0:
+ _fail(module, cmd, out, err)
+
+ if state == 'absent':
+ changed = 'Successfully uninstalled' in out_pip
+ else:
+ if out_freeze_before is None:
+ changed = 'Successfully installed' in out_pip
+ else:
+ _, out_freeze_after, _ = _get_packages(module, pip, chdir)
+ changed = out_freeze_before != out_freeze_after
+
+ changed = changed or venv_created
+
+ module.exit_json(changed=changed, cmd=cmd, name=name, version=version,
+ state=state, requirements=requirements, virtualenv=env,
+ stdout=out, stderr=err)
+ finally:
+ if old_umask is not None:
+ os.umask(old_umask)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/raw.py b/lib/ansible/modules/raw.py
new file mode 100644
index 00000000..702ac814
--- /dev/null
+++ b/lib/ansible/modules/raw.py
@@ -0,0 +1,76 @@
+# This is a virtual module that is entirely implemented server side
+
+# Copyright: (c) 2012, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: raw
+short_description: Executes a low-down and dirty command
+version_added: historical
+options:
+ free_form:
+ description:
+ - The raw module takes a free form command to run.
+ - There is no parameter actually named 'free form'; see the examples!
+ required: true
+ executable:
+ description:
+ - Change the shell used to execute the command. Should be an absolute path to the executable.
+ - When using privilege escalation (C(become)) a default shell will be assigned if one is not provided
+ as privilege escalation requires a shell.
+ version_added: "1.0"
+description:
+ - Executes a low-down and dirty SSH command, not going through the module
+ subsystem.
+ - This is useful and should only be done in a few cases. A common
+ case is installing C(python) on a system without python installed by default.
+ Another is speaking to any devices such as
+ routers that do not have any Python installed. In any other case, using
+ the M(ansible.builtin.shell) or M(ansible.builtin.command) module is much more appropriate.
+ - Arguments given to C(raw) are run directly through the configured remote shell.
+ - Standard output, error output and return code are returned when
+ available.
+ - There is no change handler support for this module.
+ - This module does not require python on the remote system, much like
+ the M(ansible.builtin.script) module.
+ - This module is also supported for Windows targets.
+notes:
+ - "If using raw from a playbook, you may need to disable fact gathering
+ using C(gather_facts: no) if you're using C(raw) to bootstrap python
+ onto the machine."
+ - If you want to execute a command securely and predictably, it may be
+ better to use the M(ansible.builtin.command) or M(ansible.builtin.shell) modules instead.
+ - The C(environment) keyword does not work with raw normally, it requires a shell
+ which means it only works if C(executable) is set or using the module
+ with privilege escalation (C(become)).
+ - This module is also supported for Windows targets.
+seealso:
+- module: ansible.builtin.command
+- module: ansible.builtin.shell
+- module: ansible.windows.win_command
+- module: ansible.windows.win_shell
+author:
+ - Ansible Core Team
+ - Michael DeHaan
+'''
+
+EXAMPLES = r'''
+- name: Bootstrap a host without python2 installed
+ raw: dnf install -y python2 python2-dnf libselinux-python
+
+- name: Run a command that uses non-posix shell-isms (in this example /bin/sh doesn't handle redirection and wildcards together but bash does)
+ raw: cat < /tmp/*txt
+ args:
+ executable: /bin/bash
+
+- name: Safely use templated variables. Always use quote filter to avoid injection issues.
+ raw: "{{ package_mgr|quote }} {{ pkg_flags|quote }} install {{ python|quote }}"
+
+- name: List user accounts on a Windows system
+ raw: Get-WmiObject -Class Win32_UserAccount
+'''
diff --git a/lib/ansible/modules/reboot.py b/lib/ansible/modules/reboot.py
new file mode 100644
index 00000000..c5066560
--- /dev/null
+++ b/lib/ansible/modules/reboot.py
@@ -0,0 +1,105 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: reboot
+short_description: Reboot a machine
+notes:
+ - C(PATH) is ignored on the remote node when searching for the C(shutdown) command. Use C(search_paths)
+ to specify locations to search if the default paths do not work.
+description:
+ - Reboot a machine, wait for it to go down, come back up, and respond to commands.
+ - For Windows targets, use the M(ansible.windows.win_reboot) module instead.
+version_added: "2.7"
+options:
+ pre_reboot_delay:
+ description:
+ - Seconds to wait before reboot. Passed as a parameter to the reboot command.
+ - On Linux, macOS and OpenBSD, this is converted to minutes and rounded down. If less than 60, it will be set to 0.
+ - On Solaris and FreeBSD, this will be seconds.
+ type: int
+ default: 0
+ post_reboot_delay:
+ description:
+ - Seconds to wait after the reboot command was successful before attempting to validate the system rebooted successfully.
+ - This is useful if you want wait for something to settle despite your connection already working.
+ type: int
+ default: 0
+ reboot_timeout:
+ description:
+ - Maximum seconds to wait for machine to reboot and respond to a test command.
+ - This timeout is evaluated separately for both reboot verification and test command success so the
+ maximum execution time for the module is twice this amount.
+ type: int
+ default: 600
+ connect_timeout:
+ description:
+ - Maximum seconds to wait for a successful connection to the managed hosts before trying again.
+ - If unspecified, the default setting for the underlying connection plugin is used.
+ type: int
+ test_command:
+ description:
+ - Command to run on the rebooted host and expect success from to determine the machine is ready for
+ further tasks.
+ type: str
+ default: whoami
+ msg:
+ description:
+ - Message to display to users before reboot.
+ type: str
+ default: Reboot initiated by Ansible
+
+ search_paths:
+ description:
+ - Paths to search on the remote machine for the C(shutdown) command.
+ - I(Only) these paths will be searched for the C(shutdown) command. C(PATH) is ignored in the remote node when searching for the C(shutdown) command.
+ type: list
+ default: ['/sbin', '/usr/sbin', '/usr/local/sbin']
+ version_added: '2.8'
+
+ boot_time_command:
+ description:
+ - Command to run that returns a unique string indicating the last time the system was booted.
+ - Setting this to a command that has different output each time it is run will cause the task to fail.
+ type: str
+ default: 'cat /proc/sys/kernel/random/boot_id'
+ version_added: '2.10'
+seealso:
+- module: ansible.windows.win_reboot
+author:
+ - Matt Davis (@nitzmahone)
+ - Sam Doran (@samdoran)
+'''
+
+EXAMPLES = r'''
+- name: Unconditionally reboot the machine with all defaults
+ reboot:
+
+- name: Reboot a slow machine that might have lots of updates to apply
+ reboot:
+ reboot_timeout: 3600
+
+- name: Reboot a machine with shutdown command in unusual place
+ reboot:
+ search_paths:
+ - '/lib/molly-guard'
+'''
+
+RETURN = r'''
+rebooted:
+ description: true if the machine was rebooted
+ returned: always
+ type: bool
+ sample: true
+elapsed:
+ description: The number of seconds that elapsed waiting for the system to be rebooted.
+ returned: always
+ type: int
+ sample: 23
+'''
diff --git a/lib/ansible/modules/replace.py b/lib/ansible/modules/replace.py
new file mode 100644
index 00000000..a694d9e7
--- /dev/null
+++ b/lib/ansible/modules/replace.py
@@ -0,0 +1,298 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Evan Kaufman <evan@digitalflophouse.com
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: replace
+author: Evan Kaufman (@EvanK)
+extends_documentation_fragment:
+ - files
+ - validate
+short_description: Replace all instances of a particular string in a
+ file using a back-referenced regular expression
+description:
+ - This module will replace all instances of a pattern within a file.
+ - It is up to the user to maintain idempotence by ensuring that the
+ same pattern would never match any replacements made.
+version_added: "1.6"
+options:
+ path:
+ description:
+ - The file to modify.
+ - Before Ansible 2.3 this option was only usable as I(dest), I(destfile) and I(name).
+ type: path
+ required: true
+ aliases: [ dest, destfile, name ]
+ regexp:
+ description:
+ - The regular expression to look for in the contents of the file.
+ - Uses Python regular expressions; see
+ U(http://docs.python.org/2/library/re.html).
+ - Uses MULTILINE mode, which means C(^) and C($) match the beginning
+ and end of the file, as well as the beginning and end respectively
+ of I(each line) of the file.
+ - Does not use DOTALL, which means the C(.) special character matches
+ any character I(except newlines). A common mistake is to assume that
+ a negated character set like C([^#]) will also not match newlines.
+ - In order to exclude newlines, they must be added to the set like C([^#\n]).
+ - Note that, as of Ansible 2.0, short form tasks should have any escape
+ sequences backslash-escaped in order to prevent them being parsed
+ as string literal escapes. See the examples.
+ type: str
+ required: true
+ replace:
+ description:
+ - The string to replace regexp matches.
+ - May contain backreferences that will get expanded with the regexp capture groups if the regexp matches.
+ - If not set, matches are removed entirely.
+ - Backreferences can be used ambiguously like C(\1), or explicitly like C(\g<1>).
+ type: str
+ after:
+ description:
+ - If specified, only content after this match will be replaced/removed.
+ - Can be used in combination with C(before).
+ - Uses Python regular expressions; see
+ U(http://docs.python.org/2/library/re.html).
+ - Uses DOTALL, which means the C(.) special character I(can match newlines).
+ type: str
+ version_added: "2.4"
+ before:
+ description:
+ - If specified, only content before this match will be replaced/removed.
+ - Can be used in combination with C(after).
+ - Uses Python regular expressions; see
+ U(http://docs.python.org/2/library/re.html).
+ - Uses DOTALL, which means the C(.) special character I(can match newlines).
+ type: str
+ version_added: "2.4"
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can
+ get the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+ others:
+ description:
+ - All arguments accepted by the M(ansible.builtin.file) module also work here.
+ type: str
+ encoding:
+ description:
+ - The character encoding for reading and writing the file.
+ type: str
+ default: utf-8
+ version_added: "2.4"
+notes:
+ - As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well.
+ - As of Ansible 2.7.10, the combined use of I(before) and I(after) works properly. If you were relying on the
+ previous incorrect behavior, you may be need to adjust your tasks.
+ See U(https://github.com/ansible/ansible/issues/31354) for details.
+ - Option I(follow) has been removed in Ansible 2.5, because this module modifies the contents of the file so I(follow=no) doesn't make sense.
+'''
+
+EXAMPLES = r'''
+- name: Before Ansible 2.3, option 'dest', 'destfile' or 'name' was used instead of 'path'
+ replace:
+ path: /etc/hosts
+ regexp: '(\s+)old\.host\.name(\s+.*)?$'
+ replace: '\1new.host.name\2'
+
+- name: Replace after the expression till the end of the file (requires Ansible >= 2.4)
+ replace:
+ path: /etc/apache2/sites-available/default.conf
+ after: 'NameVirtualHost [*]'
+ regexp: '^(.+)$'
+ replace: '# \1'
+
+- name: Replace before the expression till the begin of the file (requires Ansible >= 2.4)
+ replace:
+ path: /etc/apache2/sites-available/default.conf
+ before: '# live site config'
+ regexp: '^(.+)$'
+ replace: '# \1'
+
+# Prior to Ansible 2.7.10, using before and after in combination did the opposite of what was intended.
+# see https://github.com/ansible/ansible/issues/31354 for details.
+- name: Replace between the expressions (requires Ansible >= 2.4)
+ replace:
+ path: /etc/hosts
+ after: '<VirtualHost [*]>'
+ before: '</VirtualHost>'
+ regexp: '^(.+)$'
+ replace: '# \1'
+
+- name: Supports common file attributes
+ replace:
+ path: /home/jdoe/.ssh/known_hosts
+ regexp: '^old\.host\.name[^\n]*\n'
+ owner: jdoe
+ group: jdoe
+ mode: '0644'
+
+- name: Supports a validate command
+ replace:
+ path: /etc/apache/ports
+ regexp: '^(NameVirtualHost|Listen)\s+80\s*$'
+ replace: '\1 127.0.0.1:8080'
+ validate: '/usr/sbin/apache2ctl -f %s -t'
+
+- name: Short form task (in ansible 2+) necessitates backslash-escaped sequences
+ replace: path=/etc/hosts regexp='\\b(localhost)(\\d*)\\b' replace='\\1\\2.localdomain\\2 \\1\\2'
+
+- name: Long form task does not
+ replace:
+ path: /etc/hosts
+ regexp: '\b(localhost)(\d*)\b'
+ replace: '\1\2.localdomain\2 \1\2'
+
+- name: Explicitly specifying positional matched groups in replacement
+ replace:
+ path: /etc/ssh/sshd_config
+ regexp: '^(ListenAddress[ ]+)[^\n]+$'
+ replace: '\g<1>0.0.0.0'
+
+- name: Explicitly specifying named matched groups
+ replace:
+ path: /etc/ssh/sshd_config
+ regexp: '^(?P<dctv>ListenAddress[ ]+)(?P<host>[^\n]+)$'
+ replace: '#\g<dctv>\g<host>\n\g<dctv>0.0.0.0'
+'''
+
+import os
+import re
+import tempfile
+
+from ansible.module_utils._text import to_text, to_bytes
+from ansible.module_utils.basic import AnsibleModule
+
+
+def write_changes(module, contents, path):
+
+ tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir)
+ f = os.fdopen(tmpfd, 'wb')
+ f.write(contents)
+ f.close()
+
+ validate = module.params.get('validate', None)
+ valid = not validate
+ if validate:
+ if "%s" not in validate:
+ module.fail_json(msg="validate must contain %%s: %s" % (validate))
+ (rc, out, err) = module.run_command(validate % tmpfile)
+ valid = rc == 0
+ if rc != 0:
+ module.fail_json(msg='failed to validate: '
+ 'rc:%s error:%s' % (rc, err))
+ if valid:
+ module.atomic_move(tmpfile, path, unsafe_writes=module.params['unsafe_writes'])
+
+
+def check_file_attrs(module, changed, message):
+
+ file_args = module.load_file_common_arguments(module.params)
+ if module.set_file_attributes_if_different(file_args, False):
+
+ if changed:
+ message += " and "
+ changed = True
+ message += "ownership, perms or SE linux context changed"
+
+ return message, changed
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True, aliases=['dest', 'destfile', 'name']),
+ regexp=dict(type='str', required=True),
+ replace=dict(type='str', default=''),
+ after=dict(type='str'),
+ before=dict(type='str'),
+ backup=dict(type='bool', default=False),
+ validate=dict(type='str'),
+ encoding=dict(type='str', default='utf-8'),
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ params = module.params
+ path = params['path']
+ encoding = params['encoding']
+ res_args = dict()
+
+ params['after'] = to_text(params['after'], errors='surrogate_or_strict', nonstring='passthru')
+ params['before'] = to_text(params['before'], errors='surrogate_or_strict', nonstring='passthru')
+ params['regexp'] = to_text(params['regexp'], errors='surrogate_or_strict', nonstring='passthru')
+ params['replace'] = to_text(params['replace'], errors='surrogate_or_strict', nonstring='passthru')
+
+ if os.path.isdir(path):
+ module.fail_json(rc=256, msg='Path %s is a directory !' % path)
+
+ if not os.path.exists(path):
+ module.fail_json(rc=257, msg='Path %s does not exist !' % path)
+ else:
+ f = open(path, 'rb')
+ contents = to_text(f.read(), errors='surrogate_or_strict', encoding=encoding)
+ f.close()
+
+ pattern = u''
+ if params['after'] and params['before']:
+ pattern = u'%s(?P<subsection>.*?)%s' % (params['after'], params['before'])
+ elif params['after']:
+ pattern = u'%s(?P<subsection>.*)' % params['after']
+ elif params['before']:
+ pattern = u'(?P<subsection>.*)%s' % params['before']
+
+ if pattern:
+ section_re = re.compile(pattern, re.DOTALL)
+ match = re.search(section_re, contents)
+ if match:
+ section = match.group('subsection')
+ indices = [match.start('subsection'), match.end('subsection')]
+ else:
+ res_args['msg'] = 'Pattern for before/after params did not match the given file: %s' % pattern
+ res_args['changed'] = False
+ module.exit_json(**res_args)
+ else:
+ section = contents
+
+ mre = re.compile(params['regexp'], re.MULTILINE)
+ result = re.subn(mre, params['replace'], section, 0)
+
+ if result[1] > 0 and section != result[0]:
+ if pattern:
+ result = (contents[:indices[0]] + result[0] + contents[indices[1]:], result[1])
+ msg = '%s replacements made' % result[1]
+ changed = True
+ if module._diff:
+ res_args['diff'] = {
+ 'before_header': path,
+ 'before': contents,
+ 'after_header': path,
+ 'after': result[0],
+ }
+ else:
+ msg = ''
+ changed = False
+
+ if changed and not module.check_mode:
+ if params['backup'] and os.path.exists(path):
+ res_args['backup_file'] = module.backup_local(path)
+ # We should always follow symlinks so that we change the real file
+ path = os.path.realpath(path)
+ write_changes(module, to_bytes(result[0], encoding=encoding), path)
+
+ res_args['msg'], res_args['changed'] = check_file_attrs(module, changed, msg)
+ module.exit_json(**res_args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/rpm_key.py b/lib/ansible/modules/rpm_key.py
new file mode 100644
index 00000000..350cf4e3
--- /dev/null
+++ b/lib/ansible/modules/rpm_key.py
@@ -0,0 +1,240 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Ansible module to import third party repo keys to your rpm db
+# Copyright: (c) 2013, Héctor Acosta <hector.acosta@gazzang.com>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rpm_key
+author:
+ - Hector Acosta (@hacosta) <hector.acosta@gazzang.com>
+short_description: Adds or removes a gpg key from the rpm db
+description:
+ - Adds or removes (rpm --import) a gpg key to your rpm database.
+version_added: "1.3"
+options:
+ key:
+ description:
+ - Key that will be modified. Can be a url, a file on the managed node, or a keyid if the key
+ already exists in the database.
+ required: true
+ state:
+ description:
+ - If the key will be imported or removed from the rpm db.
+ default: present
+ choices: [ absent, present ]
+ validate_certs:
+ description:
+ - If C(no) and the C(key) is a url starting with https, SSL certificates will not be validated.
+ - This should only be used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+ fingerprint:
+ description:
+ - The long-form fingerprint of the key being imported.
+ - This will be used to verify the specified key.
+ type: str
+ version_added: 2.9
+'''
+
+EXAMPLES = '''
+- name: Import a key from a url
+ rpm_key:
+ state: present
+ key: http://apt.sw.be/RPM-GPG-KEY.dag.txt
+
+- name: Import a key from a file
+ rpm_key:
+ state: present
+ key: /path/to/key.gpg
+
+- name: Ensure a key is not present in the db
+ rpm_key:
+ state: absent
+ key: DEADB33F
+
+- name: Verify the key, using a fingerprint, before import
+ rpm_key:
+ key: /path/to/RPM-GPG-KEY.dag.txt
+ fingerprint: EBC6 E12C 62B1 C734 026B 2122 A20E 5214 6B8D 79E6
+'''
+import re
+import os.path
+import tempfile
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_native
+
+
+def is_pubkey(string):
+ """Verifies if string is a pubkey"""
+ pgp_regex = ".*?(-----BEGIN PGP PUBLIC KEY BLOCK-----.*?-----END PGP PUBLIC KEY BLOCK-----).*"
+ return bool(re.match(pgp_regex, to_native(string, errors='surrogate_or_strict'), re.DOTALL))
+
+
+class RpmKey(object):
+
+ def __init__(self, module):
+ # If the key is a url, we need to check if it's present to be idempotent,
+ # to do that, we need to check the keyid, which we can get from the armor.
+ keyfile = None
+ should_cleanup_keyfile = False
+ self.module = module
+ self.rpm = self.module.get_bin_path('rpm', True)
+ state = module.params['state']
+ key = module.params['key']
+ fingerprint = module.params['fingerprint']
+ if fingerprint:
+ fingerprint = fingerprint.replace(' ', '').upper()
+
+ self.gpg = self.module.get_bin_path('gpg')
+ if not self.gpg:
+ self.gpg = self.module.get_bin_path('gpg2', required=True)
+
+ if '://' in key:
+ keyfile = self.fetch_key(key)
+ keyid = self.getkeyid(keyfile)
+ should_cleanup_keyfile = True
+ elif self.is_keyid(key):
+ keyid = key
+ elif os.path.isfile(key):
+ keyfile = key
+ keyid = self.getkeyid(keyfile)
+ else:
+ self.module.fail_json(msg="Not a valid key %s" % key)
+ keyid = self.normalize_keyid(keyid)
+
+ if state == 'present':
+ if self.is_key_imported(keyid):
+ module.exit_json(changed=False)
+ else:
+ if not keyfile:
+ self.module.fail_json(msg="When importing a key, a valid file must be given")
+ if fingerprint:
+ has_fingerprint = self.getfingerprint(keyfile)
+ if fingerprint != has_fingerprint:
+ self.module.fail_json(
+ msg="The specified fingerprint, '%s', does not match the key fingerprint '%s'" % (fingerprint, has_fingerprint)
+ )
+ self.import_key(keyfile)
+ if should_cleanup_keyfile:
+ self.module.cleanup(keyfile)
+ module.exit_json(changed=True)
+ else:
+ if self.is_key_imported(keyid):
+ self.drop_key(keyid)
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+
+ def fetch_key(self, url):
+ """Downloads a key from url, returns a valid path to a gpg key"""
+ rsp, info = fetch_url(self.module, url)
+ if info['status'] != 200:
+ self.module.fail_json(msg="failed to fetch key at %s , error was: %s" % (url, info['msg']))
+
+ key = rsp.read()
+ if not is_pubkey(key):
+ self.module.fail_json(msg="Not a public key: %s" % url)
+ tmpfd, tmpname = tempfile.mkstemp()
+ self.module.add_cleanup_file(tmpname)
+ tmpfile = os.fdopen(tmpfd, "w+b")
+ tmpfile.write(key)
+ tmpfile.close()
+ return tmpname
+
+ def normalize_keyid(self, keyid):
+ """Ensure a keyid doesn't have a leading 0x, has leading or trailing whitespace, and make sure is uppercase"""
+ ret = keyid.strip().upper()
+ if ret.startswith('0x'):
+ return ret[2:]
+ elif ret.startswith('0X'):
+ return ret[2:]
+ else:
+ return ret
+
+ def getkeyid(self, keyfile):
+ stdout, stderr = self.execute_command([self.gpg, '--no-tty', '--batch', '--with-colons', '--fixed-list-mode', keyfile])
+ for line in stdout.splitlines():
+ line = line.strip()
+ if line.startswith('pub:'):
+ return line.split(':')[4]
+
+ self.module.fail_json(msg="Unexpected gpg output")
+
+ def getfingerprint(self, keyfile):
+ stdout, stderr = self.execute_command([
+ self.gpg, '--no-tty', '--batch', '--with-colons',
+ '--fixed-list-mode', '--with-fingerprint', keyfile
+ ])
+ for line in stdout.splitlines():
+ line = line.strip()
+ if line.startswith('fpr:'):
+ # As mentioned here,
+ #
+ # https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob_plain;f=doc/DETAILS
+ #
+ # The description of the `fpr` field says
+ #
+ # "fpr :: Fingerprint (fingerprint is in field 10)"
+ #
+ return line.split(':')[9]
+
+ self.module.fail_json(msg="Unexpected gpg output")
+
+ def is_keyid(self, keystr):
+ """Verifies if a key, as provided by the user is a keyid"""
+ return re.match('(0x)?[0-9a-f]{8}', keystr, flags=re.IGNORECASE)
+
+ def execute_command(self, cmd):
+ rc, stdout, stderr = self.module.run_command(cmd, use_unsafe_shell=True)
+ if rc != 0:
+ self.module.fail_json(msg=stderr)
+ return stdout, stderr
+
+ def is_key_imported(self, keyid):
+ cmd = self.rpm + ' -q gpg-pubkey'
+ rc, stdout, stderr = self.module.run_command(cmd)
+ if rc != 0: # No key is installed on system
+ return False
+ cmd += ' --qf "%{description}" | ' + self.gpg + ' --no-tty --batch --with-colons --fixed-list-mode -'
+ stdout, stderr = self.execute_command(cmd)
+ for line in stdout.splitlines():
+ if keyid in line.split(':')[4]:
+ return True
+ return False
+
+ def import_key(self, keyfile):
+ if not self.module.check_mode:
+ self.execute_command([self.rpm, '--import', keyfile])
+
+ def drop_key(self, keyid):
+ if not self.module.check_mode:
+ self.execute_command([self.rpm, '--erase', '--allmatches', "gpg-pubkey-%s" % keyid[-8:].lower()])
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ key=dict(type='str', required=True),
+ fingerprint=dict(type='str'),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ RpmKey(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/script.py b/lib/ansible/modules/script.py
new file mode 100644
index 00000000..3e013820
--- /dev/null
+++ b/lib/ansible/modules/script.py
@@ -0,0 +1,87 @@
+# Copyright: (c) 2012, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: script
+version_added: "0.9"
+short_description: Runs a local script on a remote node after transferring it
+description:
+ - The C(script) module takes the script name followed by a list of space-delimited arguments.
+ - Either a free form command or C(cmd) parameter is required, see the examples.
+ - The local script at path will be transferred to the remote node and then executed.
+ - The given script will be processed through the shell environment on the remote node.
+ - This module does not require python on the remote system, much like the M(ansible.builtin.raw) module.
+ - This module is also supported for Windows targets.
+options:
+ free_form:
+ description:
+ - Path to the local script file followed by optional arguments.
+ cmd:
+ type: str
+ description:
+ - Path to the local script to run followed by optional arguments.
+ creates:
+ description:
+ - A filename on the remote node, when it already exists, this step will B(not) be run.
+ version_added: "1.5"
+ removes:
+ description:
+ - A filename on the remote node, when it does not exist, this step will B(not) be run.
+ version_added: "1.5"
+ chdir:
+ description:
+ - Change into this directory on the remote node before running the script.
+ version_added: "2.4"
+ executable:
+ description:
+ - Name or path of a executable to invoke the script with.
+ version_added: "2.6"
+notes:
+ - It is usually preferable to write Ansible modules rather than pushing scripts. Convert your script to an Ansible module for bonus points!
+ - The C(ssh) connection plugin will force pseudo-tty allocation via C(-tt) when scripts are executed. Pseudo-ttys do not have a stderr channel and all
+ stderr is sent to stdout. If you depend on separated stdout and stderr result keys, please switch to a copy+command set of tasks instead of using script.
+ - If the path to the local script contains spaces, it needs to be quoted.
+ - This module is also supported for Windows targets.
+seealso:
+- module: ansible.builtin.shell
+- module: ansible.windows.win_shell
+author:
+ - Ansible Core Team
+ - Michael DeHaan
+extends_documentation_fragment:
+ - decrypt
+'''
+
+EXAMPLES = r'''
+- name: Run a script with arguments (free form)
+ script: /some/local/script.sh --some-argument 1234
+
+- name: Run a script with arguments (using 'cmd' parameter)
+ script:
+ cmd: /some/local/script.sh --some-argument 1234
+
+- name: Run a script only if file.txt does not exist on the remote node
+ script: /some/local/create_file.sh --some-argument 1234
+ args:
+ creates: /the/created/file.txt
+
+- name: Run a script only if file.txt exists on the remote node
+ script: /some/local/remove_file.sh --some-argument 1234
+ args:
+ removes: /the/removed/file.txt
+
+- name: Run a script using an executable in a non-system path
+ script: /some/local/script
+ args:
+ executable: /some/remote/executable
+
+- name: Run a script using an executable in a system path
+ script: /some/local/script.py
+ args:
+ executable: python3
+'''
diff --git a/lib/ansible/modules/service.py b/lib/ansible/modules/service.py
new file mode 100644
index 00000000..8ed03618
--- /dev/null
+++ b/lib/ansible/modules/service.py
@@ -0,0 +1,1671 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: service
+version_added: "0.1"
+short_description: Manage services
+description:
+ - Controls services on remote hosts. Supported init systems include BSD init,
+ OpenRC, SysV, Solaris SMF, systemd, upstart.
+ - For Windows targets, use the M(ansible.windows.win_service) module instead.
+options:
+ name:
+ description:
+ - Name of the service.
+ type: str
+ required: true
+ state:
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run
+ commands unless necessary.
+ - C(restarted) will always bounce the service.
+ - C(reloaded) will always reload.
+ - B(At least one of state and enabled are required.)
+ - Note that reloaded will start the service if it is not already started,
+ even if your chosen init system wouldn't normally.
+ type: str
+ choices: [ reloaded, restarted, started, stopped ]
+ sleep:
+ description:
+ - If the service is being C(restarted) then sleep this many seconds
+ between the stop and start command.
+ - This helps to work around badly-behaving init scripts that exit immediately
+ after signaling a process to stop.
+ - Not all service managers support sleep, i.e when using systemd this setting will be ignored.
+ type: int
+ version_added: "1.3"
+ pattern:
+ description:
+ - If the service does not respond to the status command, name a
+ substring to look for as would be found in the output of the I(ps)
+ command as a stand-in for a status result.
+ - If the string is found, the service will be assumed to be started.
+ - While using remote hosts with systemd this setting will be ignored.
+ type: str
+ version_added: "0.7"
+ enabled:
+ description:
+ - Whether the service should start on boot.
+ - B(At least one of state and enabled are required.)
+ type: bool
+ runlevel:
+ description:
+ - For OpenRC init scripts (e.g. Gentoo) only.
+ - The runlevel that this service belongs to.
+ - While using remote hosts with systemd this setting will be ignored.
+ type: str
+ default: default
+ arguments:
+ description:
+ - Additional arguments provided on the command line.
+ - While using remote hosts with systemd this setting will be ignored.
+ type: str
+ aliases: [ args ]
+ use:
+ description:
+ - The service module actually uses system specific modules, normally through auto detection, this setting can force a specific module.
+ - Normally it uses the value of the 'ansible_service_mgr' fact and falls back to the old 'service' module when none matching is found.
+ type: str
+ default: auto
+ version_added: 2.2
+notes:
+ - For AIX, group subsystem names can be used.
+seealso:
+- module: ansible.windows.win_service
+author:
+ - Ansible Core Team
+ - Michael DeHaan
+'''
+
+EXAMPLES = r'''
+- name: Start service httpd, if not started
+ service:
+ name: httpd
+ state: started
+
+- name: Stop service httpd, if started
+ service:
+ name: httpd
+ state: stopped
+
+- name: Restart service httpd, in all cases
+ service:
+ name: httpd
+ state: restarted
+
+- name: Reload service httpd, in all cases
+ service:
+ name: httpd
+ state: reloaded
+
+- name: Enable service httpd, and not touch the state
+ service:
+ name: httpd
+ enabled: yes
+
+- name: Start service foo, based on running process /usr/bin/foo
+ service:
+ name: foo
+ pattern: /usr/bin/foo
+ state: started
+
+- name: Restart network service for interface eth0
+ service:
+ name: network
+ state: restarted
+ args: eth0
+'''
+
+import glob
+import json
+import os
+import platform
+import re
+import select
+import shlex
+import string
+import subprocess
+import tempfile
+import time
+
+# The distutils module is not shipped with SUNWPython on Solaris.
+# It's in the SUNWPython-devel package which also contains development files
+# that don't belong on production boxes. Since our Solaris code doesn't
+# depend on LooseVersion, do not import it on Solaris.
+if platform.system() != 'SunOS':
+ from distutils.version import LooseVersion
+
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.sys_info import get_platform_subclass
+from ansible.module_utils.service import fail_if_missing
+from ansible.module_utils.six import PY2, b
+
+
+class Service(object):
+ """
+ This is the generic Service manipulation class that is subclassed
+ based on platform.
+
+ A subclass should override the following action methods:-
+ - get_service_tools
+ - service_enable
+ - get_service_status
+ - service_control
+
+ All subclasses MUST define platform and distribution (which may be None).
+ """
+
+ platform = 'Generic'
+ distribution = None
+
+ def __new__(cls, *args, **kwargs):
+ new_cls = get_platform_subclass(Service)
+ return super(cls, new_cls).__new__(new_cls)
+
+ def __init__(self, module):
+ self.module = module
+ self.name = module.params['name']
+ self.state = module.params['state']
+ self.sleep = module.params['sleep']
+ self.pattern = module.params['pattern']
+ self.enable = module.params['enabled']
+ self.runlevel = module.params['runlevel']
+ self.changed = False
+ self.running = None
+ self.crashed = None
+ self.action = None
+ self.svc_cmd = None
+ self.svc_initscript = None
+ self.svc_initctl = None
+ self.enable_cmd = None
+ self.arguments = module.params.get('arguments', '')
+ self.rcconf_file = None
+ self.rcconf_key = None
+ self.rcconf_value = None
+ self.svc_change = False
+
+ # ===========================================
+ # Platform specific methods (must be replaced by subclass).
+
+ def get_service_tools(self):
+ self.module.fail_json(msg="get_service_tools not implemented on target platform")
+
+ def service_enable(self):
+ self.module.fail_json(msg="service_enable not implemented on target platform")
+
+ def get_service_status(self):
+ self.module.fail_json(msg="get_service_status not implemented on target platform")
+
+ def service_control(self):
+ self.module.fail_json(msg="service_control not implemented on target platform")
+
+ # ===========================================
+ # Generic methods that should be used on all platforms.
+
+ def execute_command(self, cmd, daemonize=False):
+
+ # Most things don't need to be daemonized
+ if not daemonize:
+ # chkconfig localizes messages and we're screen scraping so make
+ # sure we use the C locale
+ lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ return self.module.run_command(cmd, environ_update=lang_env)
+
+ # This is complex because daemonization is hard for people.
+ # What we do is daemonize a part of this module, the daemon runs the
+ # command, picks up the return code and output, and returns it to the
+ # main process.
+ pipe = os.pipe()
+ pid = os.fork()
+ if pid == 0:
+ os.close(pipe[0])
+ # Set stdin/stdout/stderr to /dev/null
+ fd = os.open(os.devnull, os.O_RDWR)
+ if fd != 0:
+ os.dup2(fd, 0)
+ if fd != 1:
+ os.dup2(fd, 1)
+ if fd != 2:
+ os.dup2(fd, 2)
+ if fd not in (0, 1, 2):
+ os.close(fd)
+
+ # Make us a daemon. Yes, that's all it takes.
+ pid = os.fork()
+ if pid > 0:
+ os._exit(0)
+ os.setsid()
+ os.chdir("/")
+ pid = os.fork()
+ if pid > 0:
+ os._exit(0)
+
+ # Start the command
+ if PY2:
+ # Python 2.6's shlex.split can't handle text strings correctly
+ cmd = to_bytes(cmd, errors='surrogate_or_strict')
+ cmd = shlex.split(cmd)
+ else:
+ # Python3.x shex.split text strings.
+ cmd = to_text(cmd, errors='surrogate_or_strict')
+ cmd = [to_bytes(c, errors='surrogate_or_strict') for c in shlex.split(cmd)]
+ # In either of the above cases, pass a list of byte strings to Popen
+
+ # chkconfig localizes messages and we're screen scraping so make
+ # sure we use the C locale
+ lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=lang_env, preexec_fn=lambda: os.close(pipe[1]))
+ stdout = b("")
+ stderr = b("")
+ fds = [p.stdout, p.stderr]
+ # Wait for all output, or until the main process is dead and its output is done.
+ while fds:
+ rfd, wfd, efd = select.select(fds, [], fds, 1)
+ if not (rfd + wfd + efd) and p.poll() is not None:
+ break
+ if p.stdout in rfd:
+ dat = os.read(p.stdout.fileno(), 4096)
+ if not dat:
+ fds.remove(p.stdout)
+ stdout += dat
+ if p.stderr in rfd:
+ dat = os.read(p.stderr.fileno(), 4096)
+ if not dat:
+ fds.remove(p.stderr)
+ stderr += dat
+ p.wait()
+ # Return a JSON blob to parent
+ blob = json.dumps([p.returncode, to_text(stdout), to_text(stderr)])
+ os.write(pipe[1], to_bytes(blob, errors='surrogate_or_strict'))
+ os.close(pipe[1])
+ os._exit(0)
+ elif pid == -1:
+ self.module.fail_json(msg="unable to fork")
+ else:
+ os.close(pipe[1])
+ os.waitpid(pid, 0)
+ # Wait for data from daemon process and process it.
+ data = b("")
+ while True:
+ rfd, wfd, efd = select.select([pipe[0]], [], [pipe[0]])
+ if pipe[0] in rfd:
+ dat = os.read(pipe[0], 4096)
+ if not dat:
+ break
+ data += dat
+ return json.loads(to_text(data, errors='surrogate_or_strict'))
+
+ def check_ps(self):
+ # Set ps flags
+ if platform.system() == 'SunOS':
+ psflags = '-ef'
+ else:
+ psflags = 'auxww'
+
+ # Find ps binary
+ psbin = self.module.get_bin_path('ps', True)
+
+ (rc, psout, pserr) = self.execute_command('%s %s' % (psbin, psflags))
+ # If rc is 0, set running as appropriate
+ if rc == 0:
+ self.running = False
+ lines = psout.split("\n")
+ for line in lines:
+ if self.pattern in line and "pattern=" not in line:
+ # so as to not confuse ./hacking/test-module.py
+ self.running = True
+ break
+
+ def check_service_changed(self):
+ if self.state and self.running is None:
+ self.module.fail_json(msg="failed determining service state, possible typo of service name?")
+ # Find out if state has changed
+ if not self.running and self.state in ["reloaded", "started"]:
+ self.svc_change = True
+ elif self.running and self.state in ["reloaded", "stopped"]:
+ self.svc_change = True
+ elif self.state == "restarted":
+ self.svc_change = True
+ if self.module.check_mode and self.svc_change:
+ self.module.exit_json(changed=True, msg='service state changed')
+
+ def modify_service_state(self):
+
+ # Only do something if state will change
+ if self.svc_change:
+ # Control service
+ if self.state in ['started']:
+ self.action = "start"
+ elif not self.running and self.state == 'reloaded':
+ self.action = "start"
+ elif self.state == 'stopped':
+ self.action = "stop"
+ elif self.state == 'reloaded':
+ self.action = "reload"
+ elif self.state == 'restarted':
+ self.action = "restart"
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, msg='changing service state')
+
+ return self.service_control()
+
+ else:
+ # If nothing needs to change just say all is well
+ rc = 0
+ err = ''
+ out = ''
+ return rc, out, err
+
+ def service_enable_rcconf(self):
+ if self.rcconf_file is None or self.rcconf_key is None or self.rcconf_value is None:
+ self.module.fail_json(msg="service_enable_rcconf() requires rcconf_file, rcconf_key and rcconf_value")
+
+ self.changed = None
+ entry = '%s="%s"\n' % (self.rcconf_key, self.rcconf_value)
+ RCFILE = open(self.rcconf_file, "r")
+ new_rc_conf = []
+
+ # Build a list containing the possibly modified file.
+ for rcline in RCFILE:
+ # Parse line removing whitespaces, quotes, etc.
+ rcarray = shlex.split(rcline, comments=True)
+ if len(rcarray) >= 1 and '=' in rcarray[0]:
+ (key, value) = rcarray[0].split("=", 1)
+ if key == self.rcconf_key:
+ if value.upper() == self.rcconf_value:
+ # Since the proper entry already exists we can stop iterating.
+ self.changed = False
+ break
+ else:
+ # We found the key but the value is wrong, replace with new entry.
+ rcline = entry
+ self.changed = True
+
+ # Add line to the list.
+ new_rc_conf.append(rcline.strip() + '\n')
+
+ # We are done with reading the current rc.conf, close it.
+ RCFILE.close()
+
+ # If we did not see any trace of our entry we need to add it.
+ if self.changed is None:
+ new_rc_conf.append(entry)
+ self.changed = True
+
+ if self.changed is True:
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, msg="changing service enablement")
+
+ # Create a temporary file next to the current rc.conf (so we stay on the same filesystem).
+ # This way the replacement operation is atomic.
+ rcconf_dir = os.path.dirname(self.rcconf_file)
+ rcconf_base = os.path.basename(self.rcconf_file)
+ (TMP_RCCONF, tmp_rcconf_file) = tempfile.mkstemp(dir=rcconf_dir, prefix="%s-" % rcconf_base)
+
+ # Write out the contents of the list into our temporary file.
+ for rcline in new_rc_conf:
+ os.write(TMP_RCCONF, rcline)
+
+ # Close temporary file.
+ os.close(TMP_RCCONF)
+
+ # Replace previous rc.conf.
+ self.module.atomic_move(tmp_rcconf_file, self.rcconf_file)
+
+
+class LinuxService(Service):
+ """
+ This is the Linux Service manipulation class - it is currently supporting
+ a mixture of binaries and init scripts for controlling services started at
+ boot, as well as for controlling the current state.
+ """
+
+ platform = 'Linux'
+ distribution = None
+
+ def get_service_tools(self):
+
+ paths = ['/sbin', '/usr/sbin', '/bin', '/usr/bin']
+ binaries = ['service', 'chkconfig', 'update-rc.d', 'rc-service', 'rc-update', 'initctl', 'systemctl', 'start', 'stop', 'restart', 'insserv']
+ initpaths = ['/etc/init.d']
+ location = dict()
+
+ for binary in binaries:
+ location[binary] = self.module.get_bin_path(binary, opt_dirs=paths)
+
+ for initdir in initpaths:
+ initscript = "%s/%s" % (initdir, self.name)
+ if os.path.isfile(initscript):
+ self.svc_initscript = initscript
+
+ def check_systemd():
+
+ # tools must be installed
+ if location.get('systemctl', False):
+
+ # this should show if systemd is the boot init system
+ # these mirror systemd's own sd_boot test http://www.freedesktop.org/software/systemd/man/sd_booted.html
+ for canary in ["/run/systemd/system/", "/dev/.run/systemd/", "/dev/.systemd/"]:
+ if os.path.exists(canary):
+ return True
+
+ # If all else fails, check if init is the systemd command, using comm as cmdline could be symlink
+ try:
+ f = open('/proc/1/comm', 'r')
+ except IOError:
+ # If comm doesn't exist, old kernel, no systemd
+ return False
+
+ for line in f:
+ if 'systemd' in line:
+ return True
+
+ return False
+
+ # Locate a tool to enable/disable a service
+ if check_systemd():
+ # service is managed by systemd
+ self.__systemd_unit = self.name
+ self.svc_cmd = location['systemctl']
+ self.enable_cmd = location['systemctl']
+
+ elif location.get('initctl', False) and os.path.exists("/etc/init/%s.conf" % self.name):
+ # service is managed by upstart
+ self.enable_cmd = location['initctl']
+ # set the upstart version based on the output of 'initctl version'
+ self.upstart_version = LooseVersion('0.0.0')
+ try:
+ version_re = re.compile(r'\(upstart (.*)\)')
+ rc, stdout, stderr = self.module.run_command('%s version' % location['initctl'])
+ if rc == 0:
+ res = version_re.search(stdout)
+ if res:
+ self.upstart_version = LooseVersion(res.groups()[0])
+ except Exception:
+ pass # we'll use the default of 0.0.0
+
+ self.svc_cmd = location['initctl']
+
+ elif location.get('rc-service', False):
+ # service is managed by OpenRC
+ self.svc_cmd = location['rc-service']
+ self.enable_cmd = location['rc-update']
+ return # already have service start/stop tool too!
+
+ elif self.svc_initscript:
+ # service is managed by with SysV init scripts
+ if location.get('update-rc.d', False):
+ # and uses update-rc.d
+ self.enable_cmd = location['update-rc.d']
+ elif location.get('insserv', None):
+ # and uses insserv
+ self.enable_cmd = location['insserv']
+ elif location.get('chkconfig', False):
+ # and uses chkconfig
+ self.enable_cmd = location['chkconfig']
+
+ if self.enable_cmd is None:
+ fail_if_missing(self.module, False, self.name, msg='host')
+
+ # If no service control tool selected yet, try to see if 'service' is available
+ if self.svc_cmd is None and location.get('service', False):
+ self.svc_cmd = location['service']
+
+ # couldn't find anything yet
+ if self.svc_cmd is None and not self.svc_initscript:
+ self.module.fail_json(msg='cannot find \'service\' binary or init script for service, possible typo in service name?, aborting')
+
+ if location.get('initctl', False):
+ self.svc_initctl = location['initctl']
+
+ def get_systemd_service_enabled(self):
+ def sysv_exists(name):
+ script = '/etc/init.d/' + name
+ return os.access(script, os.X_OK)
+
+ def sysv_is_enabled(name):
+ return bool(glob.glob('/etc/rc?.d/S??' + name))
+
+ service_name = self.__systemd_unit
+ (rc, out, err) = self.execute_command("%s is-enabled %s" % (self.enable_cmd, service_name,))
+ if rc == 0:
+ return True
+ elif out.startswith('disabled'):
+ return False
+ elif sysv_exists(service_name):
+ return sysv_is_enabled(service_name)
+ else:
+ return False
+
+ def get_systemd_status_dict(self):
+
+ # Check status first as show will not fail if service does not exist
+ (rc, out, err) = self.execute_command("%s show '%s'" % (self.enable_cmd, self.__systemd_unit,))
+ if rc != 0:
+ self.module.fail_json(msg='failure %d running systemctl show for %r: %s' % (rc, self.__systemd_unit, err))
+ elif 'LoadState=not-found' in out:
+ self.module.fail_json(msg='systemd could not find the requested service "%r": %s' % (self.__systemd_unit, err))
+
+ key = None
+ value_buffer = []
+ status_dict = {}
+ for line in out.splitlines():
+ if '=' in line:
+ if not key:
+ key, value = line.split('=', 1)
+ # systemd fields that are shell commands can be multi-line
+ # We take a value that begins with a "{" as the start of
+ # a shell command and a line that ends with "}" as the end of
+ # the command
+ if value.lstrip().startswith('{'):
+ if value.rstrip().endswith('}'):
+ status_dict[key] = value
+ key = None
+ else:
+ value_buffer.append(value)
+ else:
+ status_dict[key] = value
+ key = None
+ else:
+ if line.rstrip().endswith('}'):
+ status_dict[key] = '\n'.join(value_buffer)
+ key = None
+ else:
+ value_buffer.append(value)
+ else:
+ value_buffer.append(value)
+
+ return status_dict
+
+ def get_systemd_service_status(self):
+ d = self.get_systemd_status_dict()
+ if d.get('ActiveState') == 'active':
+ # run-once services (for which a single successful exit indicates
+ # that they are running as designed) should not be restarted here.
+ # Thus, we are not checking d['SubState'].
+ self.running = True
+ self.crashed = False
+ elif d.get('ActiveState') == 'failed':
+ self.running = False
+ self.crashed = True
+ elif d.get('ActiveState') is None:
+ self.module.fail_json(msg='No ActiveState value in systemctl show output for %r' % (self.__systemd_unit,))
+ else:
+ self.running = False
+ self.crashed = False
+ return self.running
+
+ def get_service_status(self):
+ if self.svc_cmd and self.svc_cmd.endswith('systemctl'):
+ return self.get_systemd_service_status()
+
+ self.action = "status"
+ rc, status_stdout, status_stderr = self.service_control()
+
+ # if we have decided the service is managed by upstart, we check for some additional output...
+ if self.svc_initctl and self.running is None:
+ # check the job status by upstart response
+ initctl_rc, initctl_status_stdout, initctl_status_stderr = self.execute_command("%s status %s %s" % (self.svc_initctl, self.name, self.arguments))
+ if "stop/waiting" in initctl_status_stdout:
+ self.running = False
+ elif "start/running" in initctl_status_stdout:
+ self.running = True
+
+ if self.svc_cmd and self.svc_cmd.endswith("rc-service") and self.running is None:
+ openrc_rc, openrc_status_stdout, openrc_status_stderr = self.execute_command("%s %s status" % (self.svc_cmd, self.name))
+ self.running = "started" in openrc_status_stdout
+ self.crashed = "crashed" in openrc_status_stderr
+
+ # Prefer a non-zero return code. For reference, see:
+ # http://refspecs.linuxbase.org/LSB_4.1.0/LSB-Core-generic/LSB-Core-generic/iniscrptact.html
+ if self.running is None and rc in [1, 2, 3, 4, 69]:
+ self.running = False
+
+ # if the job status is still not known check it by status output keywords
+ # Only check keywords if there's only one line of output (some init
+ # scripts will output verbosely in case of error and those can emit
+ # keywords that are picked up as false positives
+ if self.running is None and status_stdout.count('\n') <= 1:
+ # first transform the status output that could irritate keyword matching
+ cleanout = status_stdout.lower().replace(self.name.lower(), '')
+ if "stop" in cleanout:
+ self.running = False
+ elif "run" in cleanout:
+ self.running = not ("not " in cleanout)
+ elif "start" in cleanout and "not " not in cleanout:
+ self.running = True
+ elif 'could not access pid file' in cleanout:
+ self.running = False
+ elif 'is dead and pid file exists' in cleanout:
+ self.running = False
+ elif 'dead but subsys locked' in cleanout:
+ self.running = False
+ elif 'dead but pid file exists' in cleanout:
+ self.running = False
+
+ # if the job status is still not known and we got a zero for the
+ # return code, assume here that the service is running
+ if self.running is None and rc == 0:
+ self.running = True
+
+ # if the job status is still not known check it by special conditions
+ if self.running is None:
+ if self.name == 'iptables' and "ACCEPT" in status_stdout:
+ # iptables status command output is lame
+ # TODO: lookup if we can use a return code for this instead?
+ self.running = True
+
+ return self.running
+
+ def service_enable(self):
+
+ if self.enable_cmd is None:
+ self.module.fail_json(msg='cannot detect command to enable service %s, typo or init system potentially unknown' % self.name)
+
+ self.changed = True
+ action = None
+
+ #
+ # Upstart's initctl
+ #
+ if self.enable_cmd.endswith("initctl"):
+ def write_to_override_file(file_name, file_contents, ):
+ override_file = open(file_name, 'w')
+ override_file.write(file_contents)
+ override_file.close()
+
+ initpath = '/etc/init'
+ if self.upstart_version >= LooseVersion('0.6.7'):
+ manreg = re.compile(r'^manual\s*$', re.M | re.I)
+ config_line = 'manual\n'
+ else:
+ manreg = re.compile(r'^start on manual\s*$', re.M | re.I)
+ config_line = 'start on manual\n'
+ conf_file_name = "%s/%s.conf" % (initpath, self.name)
+ override_file_name = "%s/%s.override" % (initpath, self.name)
+
+ # Check to see if files contain the manual line in .conf and fail if True
+ with open(conf_file_name) as conf_file_fh:
+ conf_file_content = conf_file_fh.read()
+ if manreg.search(conf_file_content):
+ self.module.fail_json(msg="manual stanza not supported in a .conf file")
+
+ self.changed = False
+ if os.path.exists(override_file_name):
+ with open(override_file_name) as override_fh:
+ override_file_contents = override_fh.read()
+ # Remove manual stanza if present and service enabled
+ if self.enable and manreg.search(override_file_contents):
+ self.changed = True
+ override_state = manreg.sub('', override_file_contents)
+ # Add manual stanza if not present and service disabled
+ elif not (self.enable) and not (manreg.search(override_file_contents)):
+ self.changed = True
+ override_state = '\n'.join((override_file_contents, config_line))
+ # service already in desired state
+ else:
+ pass
+ # Add file with manual stanza if service disabled
+ elif not (self.enable):
+ self.changed = True
+ override_state = config_line
+ else:
+ # service already in desired state
+ pass
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=self.changed)
+
+ # The initctl method of enabling and disabling services is much
+ # different than for the other service methods. So actually
+ # committing the change is done in this conditional and then we
+ # skip the boilerplate at the bottom of the method
+ if self.changed:
+ try:
+ write_to_override_file(override_file_name, override_state)
+ except Exception:
+ self.module.fail_json(msg='Could not modify override file')
+
+ return
+
+ #
+ # SysV's chkconfig
+ #
+ if self.enable_cmd.endswith("chkconfig"):
+ if self.enable:
+ action = 'on'
+ else:
+ action = 'off'
+
+ (rc, out, err) = self.execute_command("%s --list %s" % (self.enable_cmd, self.name))
+ if 'chkconfig --add %s' % self.name in err:
+ self.execute_command("%s --add %s" % (self.enable_cmd, self.name))
+ (rc, out, err) = self.execute_command("%s --list %s" % (self.enable_cmd, self.name))
+ if self.name not in out:
+ self.module.fail_json(msg="service %s does not support chkconfig" % self.name)
+ # TODO: look back on why this is here
+ # state = out.split()[-1]
+
+ # Check if we're already in the correct state
+ if "3:%s" % action in out and "5:%s" % action in out:
+ self.changed = False
+ return
+
+ #
+ # Systemd's systemctl
+ #
+ if self.enable_cmd.endswith("systemctl"):
+ if self.enable:
+ action = 'enable'
+ else:
+ action = 'disable'
+
+ # Check if we're already in the correct state
+ service_enabled = self.get_systemd_service_enabled()
+
+ # self.changed should already be true
+ if self.enable == service_enabled:
+ self.changed = False
+ return
+
+ #
+ # OpenRC's rc-update
+ #
+ if self.enable_cmd.endswith("rc-update"):
+ if self.enable:
+ action = 'add'
+ else:
+ action = 'delete'
+
+ (rc, out, err) = self.execute_command("%s show" % self.enable_cmd)
+ for line in out.splitlines():
+ service_name, runlevels = line.split('|')
+ service_name = service_name.strip()
+ if service_name != self.name:
+ continue
+ runlevels = re.split(r'\s+', runlevels)
+ # service already enabled for the runlevel
+ if self.enable and self.runlevel in runlevels:
+ self.changed = False
+ # service already disabled for the runlevel
+ elif not self.enable and self.runlevel not in runlevels:
+ self.changed = False
+ break
+ else:
+ # service already disabled altogether
+ if not self.enable:
+ self.changed = False
+
+ if not self.changed:
+ return
+
+ #
+ # update-rc.d style
+ #
+ if self.enable_cmd.endswith("update-rc.d"):
+
+ enabled = False
+ slinks = glob.glob('/etc/rc?.d/S??' + self.name)
+ if slinks:
+ enabled = True
+
+ if self.enable != enabled:
+ self.changed = True
+
+ if self.enable:
+ action = 'enable'
+ klinks = glob.glob('/etc/rc?.d/K??' + self.name)
+ if not klinks:
+ if not self.module.check_mode:
+ (rc, out, err) = self.execute_command("%s %s defaults" % (self.enable_cmd, self.name))
+ if rc != 0:
+ if err:
+ self.module.fail_json(msg=err)
+ else:
+ self.module.fail_json(msg=out) % (self.enable_cmd, self.name, action)
+ else:
+ action = 'disable'
+
+ if not self.module.check_mode:
+ (rc, out, err) = self.execute_command("%s %s %s" % (self.enable_cmd, self.name, action))
+ if rc != 0:
+ if err:
+ self.module.fail_json(msg=err)
+ else:
+ self.module.fail_json(msg=out) % (self.enable_cmd, self.name, action)
+ else:
+ self.changed = False
+
+ return
+
+ #
+ # insserv (Debian <=7, SLES, others)
+ #
+ if self.enable_cmd.endswith("insserv"):
+ if self.enable:
+ (rc, out, err) = self.execute_command("%s -n -v %s" % (self.enable_cmd, self.name))
+ else:
+ (rc, out, err) = self.execute_command("%s -n -r -v %s" % (self.enable_cmd, self.name))
+
+ self.changed = False
+ for line in err.splitlines():
+ if self.enable and line.find('enable service') != -1:
+ self.changed = True
+ break
+ if not self.enable and line.find('remove service') != -1:
+ self.changed = True
+ break
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=self.changed)
+
+ if not self.changed:
+ return
+
+ if self.enable:
+ (rc, out, err) = self.execute_command("%s %s" % (self.enable_cmd, self.name))
+ if (rc != 0) or (err != ''):
+ self.module.fail_json(msg=("Failed to install service. rc: %s, out: %s, err: %s" % (rc, out, err)))
+ return (rc, out, err)
+ else:
+ (rc, out, err) = self.execute_command("%s -r %s" % (self.enable_cmd, self.name))
+ if (rc != 0) or (err != ''):
+ self.module.fail_json(msg=("Failed to remove service. rc: %s, out: %s, err: %s" % (rc, out, err)))
+ return (rc, out, err)
+
+ #
+ # If we've gotten to the end, the service needs to be updated
+ #
+ self.changed = True
+
+ # we change argument order depending on real binary used:
+ # rc-update and systemctl need the argument order reversed
+
+ if self.enable_cmd.endswith("rc-update"):
+ args = (self.enable_cmd, action, self.name + " " + self.runlevel)
+ elif self.enable_cmd.endswith("systemctl"):
+ args = (self.enable_cmd, action, self.__systemd_unit)
+ else:
+ args = (self.enable_cmd, self.name, action)
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=self.changed)
+
+ (rc, out, err) = self.execute_command("%s %s %s" % args)
+ if rc != 0:
+ if err:
+ self.module.fail_json(msg="Error when trying to %s %s: rc=%s %s" % (action, self.name, rc, err))
+ else:
+ self.module.fail_json(msg="Failure for %s %s: rc=%s %s" % (action, self.name, rc, out))
+
+ return (rc, out, err)
+
+ def service_control(self):
+
+ # Decide what command to run
+ svc_cmd = ''
+ arguments = self.arguments
+ if self.svc_cmd:
+ if not self.svc_cmd.endswith("systemctl"):
+ if self.svc_cmd.endswith("initctl"):
+ # initctl commands take the form <cmd> <action> <name>
+ svc_cmd = self.svc_cmd
+ arguments = "%s %s" % (self.name, arguments)
+ else:
+ # SysV and OpenRC take the form <cmd> <name> <action>
+ svc_cmd = "%s %s" % (self.svc_cmd, self.name)
+ else:
+ # systemd commands take the form <cmd> <action> <name>
+ svc_cmd = self.svc_cmd
+ arguments = "%s %s" % (self.__systemd_unit, arguments)
+ elif self.svc_cmd is None and self.svc_initscript:
+ # upstart
+ svc_cmd = "%s" % self.svc_initscript
+
+ # In OpenRC, if a service crashed, we need to reset its status to
+ # stopped with the zap command, before we can start it back.
+ if self.svc_cmd and self.svc_cmd.endswith('rc-service') and self.action == 'start' and self.crashed:
+ self.execute_command("%s zap" % svc_cmd, daemonize=True)
+
+ if self.action != "restart":
+ if svc_cmd != '':
+ # upstart or systemd or OpenRC
+ rc_state, stdout, stderr = self.execute_command("%s %s %s" % (svc_cmd, self.action, arguments), daemonize=True)
+ else:
+ # SysV
+ rc_state, stdout, stderr = self.execute_command("%s %s %s" % (self.action, self.name, arguments), daemonize=True)
+ elif self.svc_cmd and self.svc_cmd.endswith('rc-service'):
+ # All services in OpenRC support restart.
+ rc_state, stdout, stderr = self.execute_command("%s %s %s" % (svc_cmd, self.action, arguments), daemonize=True)
+ else:
+ # In other systems, not all services support restart. Do it the hard way.
+ if svc_cmd != '':
+ # upstart or systemd
+ rc1, stdout1, stderr1 = self.execute_command("%s %s %s" % (svc_cmd, 'stop', arguments), daemonize=True)
+ else:
+ # SysV
+ rc1, stdout1, stderr1 = self.execute_command("%s %s %s" % ('stop', self.name, arguments), daemonize=True)
+
+ if self.sleep:
+ time.sleep(self.sleep)
+
+ if svc_cmd != '':
+ # upstart or systemd
+ rc2, stdout2, stderr2 = self.execute_command("%s %s %s" % (svc_cmd, 'start', arguments), daemonize=True)
+ else:
+ # SysV
+ rc2, stdout2, stderr2 = self.execute_command("%s %s %s" % ('start', self.name, arguments), daemonize=True)
+
+ # merge return information
+ if rc1 != 0 and rc2 == 0:
+ rc_state = rc2
+ stdout = stdout2
+ stderr = stderr2
+ else:
+ rc_state = rc1 + rc2
+ stdout = stdout1 + stdout2
+ stderr = stderr1 + stderr2
+
+ return (rc_state, stdout, stderr)
+
+
+class FreeBsdService(Service):
+ """
+ This is the FreeBSD Service manipulation class - it uses the /etc/rc.conf
+ file for controlling services started at boot and the 'service' binary to
+ check status and perform direct service manipulation.
+ """
+
+ platform = 'FreeBSD'
+ distribution = None
+
+ def get_service_tools(self):
+ self.svc_cmd = self.module.get_bin_path('service', True)
+ if not self.svc_cmd:
+ self.module.fail_json(msg='unable to find service binary')
+
+ self.sysrc_cmd = self.module.get_bin_path('sysrc')
+
+ def get_service_status(self):
+ rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, 'onestatus', self.arguments))
+ if self.name == "pf":
+ self.running = "Enabled" in stdout
+ else:
+ if rc == 1:
+ self.running = False
+ elif rc == 0:
+ self.running = True
+
+ def service_enable(self):
+ if self.enable:
+ self.rcconf_value = "YES"
+ else:
+ self.rcconf_value = "NO"
+
+ rcfiles = ['/etc/rc.conf', '/etc/rc.conf.local', '/usr/local/etc/rc.conf']
+ for rcfile in rcfiles:
+ if os.path.isfile(rcfile):
+ self.rcconf_file = rcfile
+
+ rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, 'rcvar', self.arguments))
+ try:
+ rcvars = shlex.split(stdout, comments=True)
+ except Exception:
+ # TODO: add a warning to the output with the failure
+ pass
+
+ if not rcvars:
+ self.module.fail_json(msg="unable to determine rcvar", stdout=stdout, stderr=stderr)
+
+ # In rare cases, i.e. sendmail, rcvar can return several key=value pairs
+ # Usually there is just one, however. In other rare cases, i.e. uwsgi,
+ # rcvar can return extra uncommented data that is not at all related to
+ # the rcvar. We will just take the first key=value pair we come across
+ # and hope for the best.
+ for rcvar in rcvars:
+ if '=' in rcvar:
+ self.rcconf_key, default_rcconf_value = rcvar.split('=', 1)
+ break
+
+ if self.rcconf_key is None:
+ self.module.fail_json(msg="unable to determine rcvar", stdout=stdout, stderr=stderr)
+
+ if self.sysrc_cmd: # FreeBSD >= 9.2
+
+ rc, current_rcconf_value, stderr = self.execute_command("%s -n %s" % (self.sysrc_cmd, self.rcconf_key))
+ # it can happen that rcvar is not set (case of a system coming from the ports collection)
+ # so we will fallback on the default
+ if rc != 0:
+ current_rcconf_value = default_rcconf_value
+
+ if current_rcconf_value.strip().upper() != self.rcconf_value:
+
+ self.changed = True
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, msg="changing service enablement")
+
+ rc, change_stdout, change_stderr = self.execute_command("%s %s=\"%s\"" % (self.sysrc_cmd, self.rcconf_key, self.rcconf_value))
+ if rc != 0:
+ self.module.fail_json(msg="unable to set rcvar using sysrc", stdout=change_stdout, stderr=change_stderr)
+
+ # sysrc does not exit with code 1 on permission error => validate successful change using service(8)
+ rc, check_stdout, check_stderr = self.execute_command("%s %s %s" % (self.svc_cmd, self.name, "enabled"))
+ if self.enable != (rc == 0): # rc = 0 indicates enabled service, rc = 1 indicates disabled service
+ self.module.fail_json(msg="unable to set rcvar: sysrc did not change value", stdout=change_stdout, stderr=change_stderr)
+
+ else:
+ self.changed = False
+
+ else: # Legacy (FreeBSD < 9.2)
+ try:
+ return self.service_enable_rcconf()
+ except Exception:
+ self.module.fail_json(msg='unable to set rcvar')
+
+ def service_control(self):
+
+ if self.action == "start":
+ self.action = "onestart"
+ if self.action == "stop":
+ self.action = "onestop"
+ if self.action == "reload":
+ self.action = "onereload"
+
+ ret = self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, self.action, self.arguments))
+
+ if self.sleep:
+ time.sleep(self.sleep)
+
+ return ret
+
+
+class DragonFlyBsdService(FreeBsdService):
+ """
+ This is the DragonFly BSD Service manipulation class - it uses the /etc/rc.conf
+ file for controlling services started at boot and the 'service' binary to
+ check status and perform direct service manipulation.
+ """
+
+ platform = 'DragonFly'
+ distribution = None
+
+ def service_enable(self):
+ if self.enable:
+ self.rcconf_value = "YES"
+ else:
+ self.rcconf_value = "NO"
+
+ rcfiles = ['/etc/rc.conf'] # Overkill?
+ for rcfile in rcfiles:
+ if os.path.isfile(rcfile):
+ self.rcconf_file = rcfile
+
+ self.rcconf_key = "%s" % string.replace(self.name, "-", "_")
+
+ return self.service_enable_rcconf()
+
+
+class OpenBsdService(Service):
+ """
+ This is the OpenBSD Service manipulation class - it uses rcctl(8) or
+ /etc/rc.d scripts for service control. Enabling a service is
+ only supported if rcctl is present.
+ """
+
+ platform = 'OpenBSD'
+ distribution = None
+
+ def get_service_tools(self):
+ self.enable_cmd = self.module.get_bin_path('rcctl')
+
+ if self.enable_cmd:
+ self.svc_cmd = self.enable_cmd
+ else:
+ rcdir = '/etc/rc.d'
+
+ rc_script = "%s/%s" % (rcdir, self.name)
+ if os.path.isfile(rc_script):
+ self.svc_cmd = rc_script
+
+ if not self.svc_cmd:
+ self.module.fail_json(msg='unable to find svc_cmd')
+
+ def get_service_status(self):
+ if self.enable_cmd:
+ rc, stdout, stderr = self.execute_command("%s %s %s" % (self.svc_cmd, 'check', self.name))
+ else:
+ rc, stdout, stderr = self.execute_command("%s %s" % (self.svc_cmd, 'check'))
+
+ if stderr:
+ self.module.fail_json(msg=stderr)
+
+ if rc == 1:
+ self.running = False
+ elif rc == 0:
+ self.running = True
+
+ def service_control(self):
+ if self.enable_cmd:
+ return self.execute_command("%s -f %s %s" % (self.svc_cmd, self.action, self.name), daemonize=True)
+ else:
+ return self.execute_command("%s -f %s" % (self.svc_cmd, self.action))
+
+ def service_enable(self):
+ if not self.enable_cmd:
+ return super(OpenBsdService, self).service_enable()
+
+ rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.enable_cmd, 'getdef', self.name, 'flags'))
+
+ if stderr:
+ self.module.fail_json(msg=stderr)
+
+ getdef_string = stdout.rstrip()
+
+ # Depending on the service the string returned from 'getdef' may be
+ # either a set of flags or the boolean YES/NO
+ if getdef_string == "YES" or getdef_string == "NO":
+ default_flags = ''
+ else:
+ default_flags = getdef_string
+
+ rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.enable_cmd, 'get', self.name, 'flags'))
+
+ if stderr:
+ self.module.fail_json(msg=stderr)
+
+ get_string = stdout.rstrip()
+
+ # Depending on the service the string returned from 'get' may be
+ # either a set of flags or the boolean YES/NO
+ if get_string == "YES" or get_string == "NO":
+ current_flags = ''
+ else:
+ current_flags = get_string
+
+ # If there are arguments from the user we use these as flags unless
+ # they are already set.
+ if self.arguments and self.arguments != current_flags:
+ changed_flags = self.arguments
+ # If the user has not supplied any arguments and the current flags
+ # differ from the default we reset them.
+ elif not self.arguments and current_flags != default_flags:
+ changed_flags = ' '
+ # Otherwise there is no need to modify flags.
+ else:
+ changed_flags = ''
+
+ rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.enable_cmd, 'get', self.name, 'status'))
+
+ if self.enable:
+ if rc == 0 and not changed_flags:
+ return
+
+ if rc != 0:
+ status_action = "set %s status on" % (self.name)
+ else:
+ status_action = ''
+ if changed_flags:
+ flags_action = "set %s flags %s" % (self.name, changed_flags)
+ else:
+ flags_action = ''
+ else:
+ if rc == 1:
+ return
+
+ status_action = "set %s status off" % self.name
+ flags_action = ''
+
+ # Verify state assumption
+ if not status_action and not flags_action:
+ self.module.fail_json(msg="neither status_action or status_flags is set, this should never happen")
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, msg="changing service enablement")
+
+ status_modified = 0
+ if status_action:
+ rc, stdout, stderr = self.execute_command("%s %s" % (self.enable_cmd, status_action))
+
+ if rc != 0:
+ if stderr:
+ self.module.fail_json(msg=stderr)
+ else:
+ self.module.fail_json(msg="rcctl failed to modify service status")
+
+ status_modified = 1
+
+ if flags_action:
+ rc, stdout, stderr = self.execute_command("%s %s" % (self.enable_cmd, flags_action))
+
+ if rc != 0:
+ if stderr:
+ if status_modified:
+ error_message = "rcctl modified service status but failed to set flags: " + stderr
+ else:
+ error_message = stderr
+ else:
+ if status_modified:
+ error_message = "rcctl modified service status but failed to set flags"
+ else:
+ error_message = "rcctl failed to modify service flags"
+
+ self.module.fail_json(msg=error_message)
+
+ self.changed = True
+
+
+class NetBsdService(Service):
+ """
+ This is the NetBSD Service manipulation class - it uses the /etc/rc.conf
+ file for controlling services started at boot, check status and perform
+ direct service manipulation. Init scripts in /etc/rcd are used for
+ controlling services (start/stop) as well as for controlling the current
+ state.
+ """
+
+ platform = 'NetBSD'
+ distribution = None
+
+ def get_service_tools(self):
+ initpaths = ['/etc/rc.d'] # better: $rc_directories - how to get in here? Run: sh -c '. /etc/rc.conf ; echo $rc_directories'
+
+ for initdir in initpaths:
+ initscript = "%s/%s" % (initdir, self.name)
+ if os.path.isfile(initscript):
+ self.svc_initscript = initscript
+
+ if not self.svc_initscript:
+ self.module.fail_json(msg='unable to find rc.d script')
+
+ def service_enable(self):
+ if self.enable:
+ self.rcconf_value = "YES"
+ else:
+ self.rcconf_value = "NO"
+
+ rcfiles = ['/etc/rc.conf'] # Overkill?
+ for rcfile in rcfiles:
+ if os.path.isfile(rcfile):
+ self.rcconf_file = rcfile
+
+ self.rcconf_key = "%s" % string.replace(self.name, "-", "_")
+
+ return self.service_enable_rcconf()
+
+ def get_service_status(self):
+ self.svc_cmd = "%s" % self.svc_initscript
+ rc, stdout, stderr = self.execute_command("%s %s" % (self.svc_cmd, 'onestatus'))
+ if rc == 1:
+ self.running = False
+ elif rc == 0:
+ self.running = True
+
+ def service_control(self):
+ if self.action == "start":
+ self.action = "onestart"
+ if self.action == "stop":
+ self.action = "onestop"
+
+ self.svc_cmd = "%s" % self.svc_initscript
+ return self.execute_command("%s %s" % (self.svc_cmd, self.action), daemonize=True)
+
+
+class SunOSService(Service):
+ """
+ This is the SunOS Service manipulation class - it uses the svcadm
+ command for controlling services, and svcs command for checking status.
+ It also tries to be smart about taking the service out of maintenance
+ state if necessary.
+ """
+ platform = 'SunOS'
+ distribution = None
+
+ def get_service_tools(self):
+ self.svcs_cmd = self.module.get_bin_path('svcs', True)
+
+ if not self.svcs_cmd:
+ self.module.fail_json(msg='unable to find svcs binary')
+
+ self.svcadm_cmd = self.module.get_bin_path('svcadm', True)
+
+ if not self.svcadm_cmd:
+ self.module.fail_json(msg='unable to find svcadm binary')
+
+ if self.svcadm_supports_sync():
+ self.svcadm_sync = '-s'
+ else:
+ self.svcadm_sync = ''
+
+ def svcadm_supports_sync(self):
+ # Support for synchronous restart/refresh is only supported on
+ # Oracle Solaris >= 11.2
+ for line in open('/etc/release', 'r').readlines():
+ m = re.match(r'\s+Oracle Solaris (\d+\.\d+).*', line.rstrip())
+ if m and m.groups()[0] >= 11.2:
+ return True
+
+ def get_service_status(self):
+ status = self.get_sunos_svcs_status()
+ # Only 'online' is considered properly running. Everything else is off
+ # or has some sort of problem.
+ if status == 'online':
+ self.running = True
+ else:
+ self.running = False
+
+ def get_sunos_svcs_status(self):
+ rc, stdout, stderr = self.execute_command("%s %s" % (self.svcs_cmd, self.name))
+ if rc == 1:
+ if stderr:
+ self.module.fail_json(msg=stderr)
+ else:
+ self.module.fail_json(msg=stdout)
+
+ lines = stdout.rstrip("\n").split("\n")
+ status = lines[-1].split(" ")[0]
+ # status is one of: online, offline, degraded, disabled, maintenance, uninitialized
+ # see man svcs(1)
+ return status
+
+ def service_enable(self):
+ # Get current service enablement status
+ rc, stdout, stderr = self.execute_command("%s -l %s" % (self.svcs_cmd, self.name))
+
+ if rc != 0:
+ if stderr:
+ self.module.fail_json(msg=stderr)
+ else:
+ self.module.fail_json(msg=stdout)
+
+ enabled = False
+ temporary = False
+
+ # look for enabled line, which could be one of:
+ # enabled true (temporary)
+ # enabled false (temporary)
+ # enabled true
+ # enabled false
+ for line in stdout.split("\n"):
+ if line.startswith("enabled"):
+ if "true" in line:
+ enabled = True
+ if "temporary" in line:
+ temporary = True
+
+ startup_enabled = (enabled and not temporary) or (not enabled and temporary)
+
+ if self.enable and startup_enabled:
+ return
+ elif (not self.enable) and (not startup_enabled):
+ return
+
+ if not self.module.check_mode:
+ # Mark service as started or stopped (this will have the side effect of
+ # actually stopping or starting the service)
+ if self.enable:
+ subcmd = "enable -rs"
+ else:
+ subcmd = "disable -s"
+
+ rc, stdout, stderr = self.execute_command("%s %s %s" % (self.svcadm_cmd, subcmd, self.name))
+
+ if rc != 0:
+ if stderr:
+ self.module.fail_json(msg=stderr)
+ else:
+ self.module.fail_json(msg=stdout)
+
+ self.changed = True
+
+ def service_control(self):
+ status = self.get_sunos_svcs_status()
+
+ # if starting or reloading, clear maintenance states
+ if self.action in ['start', 'reload', 'restart'] and status in ['maintenance', 'degraded']:
+ rc, stdout, stderr = self.execute_command("%s clear %s" % (self.svcadm_cmd, self.name))
+ if rc != 0:
+ return rc, stdout, stderr
+ status = self.get_sunos_svcs_status()
+
+ if status in ['maintenance', 'degraded']:
+ self.module.fail_json(msg="Failed to bring service out of %s status." % status)
+
+ if self.action == 'start':
+ subcmd = "enable -rst"
+ elif self.action == 'stop':
+ subcmd = "disable -st"
+ elif self.action == 'reload':
+ subcmd = "refresh %s" % (self.svcadm_sync)
+ elif self.action == 'restart' and status == 'online':
+ subcmd = "restart %s" % (self.svcadm_sync)
+ elif self.action == 'restart' and status != 'online':
+ subcmd = "enable -rst"
+
+ return self.execute_command("%s %s %s" % (self.svcadm_cmd, subcmd, self.name))
+
+
+class AIX(Service):
+ """
+ This is the AIX Service (SRC) manipulation class - it uses lssrc, startsrc, stopsrc
+ and refresh for service control. Enabling a service is currently not supported.
+ Would require to add an entry in the /etc/inittab file (mkitab, chitab and rmitab
+ commands)
+ """
+
+ platform = 'AIX'
+ distribution = None
+
+ def get_service_tools(self):
+ self.lssrc_cmd = self.module.get_bin_path('lssrc', True)
+
+ if not self.lssrc_cmd:
+ self.module.fail_json(msg='unable to find lssrc binary')
+
+ self.startsrc_cmd = self.module.get_bin_path('startsrc', True)
+
+ if not self.startsrc_cmd:
+ self.module.fail_json(msg='unable to find startsrc binary')
+
+ self.stopsrc_cmd = self.module.get_bin_path('stopsrc', True)
+
+ if not self.stopsrc_cmd:
+ self.module.fail_json(msg='unable to find stopsrc binary')
+
+ self.refresh_cmd = self.module.get_bin_path('refresh', True)
+
+ if not self.refresh_cmd:
+ self.module.fail_json(msg='unable to find refresh binary')
+
+ def get_service_status(self):
+ status = self.get_aix_src_status()
+ # Only 'active' is considered properly running. Everything else is off
+ # or has some sort of problem.
+ if status == 'active':
+ self.running = True
+ else:
+ self.running = False
+
+ def get_aix_src_status(self):
+ # Check subsystem status
+ rc, stdout, stderr = self.execute_command("%s -s %s" % (self.lssrc_cmd, self.name))
+ if rc == 1:
+ # If check for subsystem is not ok, check if service name is a
+ # group subsystem
+ rc, stdout, stderr = self.execute_command("%s -g %s" % (self.lssrc_cmd, self.name))
+ if rc == 1:
+ if stderr:
+ self.module.fail_json(msg=stderr)
+ else:
+ self.module.fail_json(msg=stdout)
+ else:
+ # Check all subsystem status, if one subsystem is not active
+ # the group is considered not active.
+ lines = stdout.splitlines()
+ for state in lines[1:]:
+ if state.split()[-1].strip() != "active":
+ status = state.split()[-1].strip()
+ break
+ else:
+ status = "active"
+
+ # status is one of: active, inoperative
+ return status
+ else:
+ lines = stdout.rstrip("\n").split("\n")
+ status = lines[-1].split(" ")[-1]
+
+ # status is one of: active, inoperative
+ return status
+
+ def service_control(self):
+
+ # Check if service name is a subsystem of a group subsystem
+ rc, stdout, stderr = self.execute_command("%s -a" % (self.lssrc_cmd))
+ if rc == 1:
+ if stderr:
+ self.module.fail_json(msg=stderr)
+ else:
+ self.module.fail_json(msg=stdout)
+ else:
+ lines = stdout.splitlines()
+ subsystems = []
+ groups = []
+ for line in lines[1:]:
+ subsystem = line.split()[0].strip()
+ group = line.split()[1].strip()
+ subsystems.append(subsystem)
+ if group:
+ groups.append(group)
+
+ # Define if service name parameter:
+ # -s subsystem or -g group subsystem
+ if self.name in subsystems:
+ srccmd_parameter = "-s"
+ elif self.name in groups:
+ srccmd_parameter = "-g"
+
+ if self.action == 'start':
+ srccmd = self.startsrc_cmd
+ elif self.action == 'stop':
+ srccmd = self.stopsrc_cmd
+ elif self.action == 'reload':
+ srccmd = self.refresh_cmd
+ elif self.action == 'restart':
+ self.execute_command("%s %s %s" % (self.stopsrc_cmd, srccmd_parameter, self.name))
+ srccmd = self.startsrc_cmd
+
+ if self.arguments and self.action == 'start':
+ return self.execute_command("%s -a \"%s\" %s %s" % (srccmd, self.arguments, srccmd_parameter, self.name))
+ else:
+ return self.execute_command("%s %s %s" % (srccmd, srccmd_parameter, self.name))
+
+
+# ===========================================
+# Main control flow
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', choices=['started', 'stopped', 'reloaded', 'restarted']),
+ sleep=dict(type='int'),
+ pattern=dict(type='str'),
+ enabled=dict(type='bool'),
+ runlevel=dict(type='str', default='default'),
+ arguments=dict(type='str', default='', aliases=['args']),
+ ),
+ supports_check_mode=True,
+ required_one_of=[['state', 'enabled']],
+ )
+
+ service = Service(module)
+
+ module.debug('Service instantiated - platform %s' % service.platform)
+ if service.distribution:
+ module.debug('Service instantiated - distribution %s' % service.distribution)
+
+ rc = 0
+ out = ''
+ err = ''
+ result = {}
+ result['name'] = service.name
+
+ # Find service management tools
+ service.get_service_tools()
+
+ # Enable/disable service startup at boot if requested
+ if service.module.params['enabled'] is not None:
+ # FIXME: ideally this should detect if we need to toggle the enablement state, though
+ # it's unlikely the changed handler would need to fire in this case so it's a minor thing.
+ service.service_enable()
+ result['enabled'] = service.enable
+
+ if module.params['state'] is None:
+ # Not changing the running state, so bail out now.
+ result['changed'] = service.changed
+ module.exit_json(**result)
+
+ result['state'] = service.state
+
+ # Collect service status
+ if service.pattern:
+ service.check_ps()
+ else:
+ service.get_service_status()
+
+ # Calculate if request will change service state
+ service.check_service_changed()
+
+ # Modify service state if necessary
+ (rc, out, err) = service.modify_service_state()
+
+ if rc != 0:
+ if err and "Job is already running" in err:
+ # upstart got confused, one such possibility is MySQL on Ubuntu 12.04
+ # where status may report it has no start/stop links and we could
+ # not get accurate status
+ pass
+ else:
+ if err:
+ module.fail_json(msg=err)
+ else:
+ module.fail_json(msg=out)
+
+ result['changed'] = service.changed | service.svc_change
+ if service.module.params['enabled'] is not None:
+ result['enabled'] = service.module.params['enabled']
+
+ if not service.module.params['state']:
+ status = service.get_service_status()
+ if status is None:
+ result['state'] = 'absent'
+ elif status is False:
+ result['state'] = 'started'
+ else:
+ result['state'] = 'stopped'
+ else:
+ # as we may have just bounced the service the service command may not
+ # report accurate state at this moment so just show what we ran
+ if service.module.params['state'] in ['reloaded', 'restarted', 'started']:
+ result['state'] = 'started'
+ else:
+ result['state'] = 'stopped'
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/service_facts.py b/lib/ansible/modules/service_facts.py
new file mode 100644
index 00000000..407be921
--- /dev/null
+++ b/lib/ansible/modules/service_facts.py
@@ -0,0 +1,256 @@
+#!/usr/bin/python
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# originally copied from AWX's scan_services module to bring this functionality
+# into Core
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: service_facts
+short_description: Return service state information as fact data
+description:
+ - Return service state information as fact data for various service management utilities
+version_added: "2.5"
+requirements: ["Any of the following supported init systems: systemd, sysv, upstart"]
+
+notes:
+ - When accessing the C(ansible_facts.services) facts collected by this module,
+ it is recommended to not use "dot notation" because services can have a C(-)
+ character in their name which would result in invalid "dot notation", such as
+ C(ansible_facts.services.zuul-gateway). It is instead recommended to
+ using the string value of the service name as the key in order to obtain
+ the fact data value like C(ansible_facts.services['zuul-gateway'])
+
+author:
+ - Adam Miller (@maxamillion)
+'''
+
+EXAMPLES = r'''
+- name: Populate service facts
+ service_facts:
+
+- debug:
+ var: ansible_facts.services
+
+'''
+
+RETURN = r'''
+ansible_facts:
+ description: Facts to add to ansible_facts about the services on the system
+ returned: always
+ type: complex
+ contains:
+ services:
+ description: States of the services with service name as key.
+ returned: always
+ type: complex
+ contains:
+ source:
+ description:
+ - Init system of the service.
+ - One of C(systemd), C(sysv), C(upstart).
+ returned: always
+ type: str
+ sample: sysv
+ state:
+ description:
+ - State of the service.
+ - Either C(running), C(stopped), or C(unknown).
+ returned: always
+ type: str
+ sample: running
+ status:
+ description:
+ - State of the service.
+ - Either C(enabled), C(disabled), C(static), C(indirect) or C(unknown).
+ returned: systemd systems or RedHat/SUSE flavored sysvinit/upstart
+ type: str
+ sample: enabled
+ name:
+ description: Name of the service.
+ returned: always
+ type: str
+ sample: arp-ethers.service
+'''
+
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+class BaseService(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.incomplete_warning = False
+
+
+class ServiceScanService(BaseService):
+
+ def gather_services(self):
+ services = {}
+ service_path = self.module.get_bin_path("service")
+ if service_path is None:
+ return None
+ initctl_path = self.module.get_bin_path("initctl")
+ chkconfig_path = self.module.get_bin_path("chkconfig")
+
+ # sysvinit
+ if service_path is not None and chkconfig_path is None:
+ rc, stdout, stderr = self.module.run_command("%s --status-all 2>&1 | grep -E \"\\[ (\\+|\\-) \\]\"" % service_path, use_unsafe_shell=True)
+ for line in stdout.split("\n"):
+ line_data = line.split()
+ if len(line_data) < 4:
+ continue # Skipping because we expected more data
+ service_name = " ".join(line_data[3:])
+ if line_data[1] == "+":
+ service_state = "running"
+ else:
+ service_state = "stopped"
+ services[service_name] = {"name": service_name, "state": service_state, "source": "sysv"}
+
+ # Upstart
+ if initctl_path is not None and chkconfig_path is None:
+ p = re.compile(r'^\s?(?P<name>.*)\s(?P<goal>\w+)\/(?P<state>\w+)(\,\sprocess\s(?P<pid>[0-9]+))?\s*$')
+ rc, stdout, stderr = self.module.run_command("%s list" % initctl_path)
+ real_stdout = stdout.replace("\r", "")
+ for line in real_stdout.split("\n"):
+ m = p.match(line)
+ if not m:
+ continue
+ service_name = m.group('name')
+ service_goal = m.group('goal')
+ service_state = m.group('state')
+ if m.group('pid'):
+ pid = m.group('pid')
+ else:
+ pid = None # NOQA
+ payload = {"name": service_name, "state": service_state, "goal": service_goal, "source": "upstart"}
+ services[service_name] = payload
+
+ # RH sysvinit
+ elif chkconfig_path is not None:
+ # print '%s --status-all | grep -E "is (running|stopped)"' % service_path
+ p = re.compile(
+ r'(?P<service>.*?)\s+[0-9]:(?P<rl0>on|off)\s+[0-9]:(?P<rl1>on|off)\s+[0-9]:(?P<rl2>on|off)\s+'
+ r'[0-9]:(?P<rl3>on|off)\s+[0-9]:(?P<rl4>on|off)\s+[0-9]:(?P<rl5>on|off)\s+[0-9]:(?P<rl6>on|off)')
+ rc, stdout, stderr = self.module.run_command('%s' % chkconfig_path, use_unsafe_shell=True)
+ # Check for special cases where stdout does not fit pattern
+ match_any = False
+ for line in stdout.split('\n'):
+ if p.match(line):
+ match_any = True
+ if not match_any:
+ p_simple = re.compile(r'(?P<service>.*?)\s+(?P<rl0>on|off)')
+ match_any = False
+ for line in stdout.split('\n'):
+ if p_simple.match(line):
+ match_any = True
+ if match_any:
+ # Try extra flags " -l --allservices" needed for SLES11
+ rc, stdout, stderr = self.module.run_command('%s -l --allservices' % chkconfig_path, use_unsafe_shell=True)
+ elif '--list' in stderr:
+ # Extra flag needed for RHEL5
+ rc, stdout, stderr = self.module.run_command('%s --list' % chkconfig_path, use_unsafe_shell=True)
+ for line in stdout.split('\n'):
+ m = p.match(line)
+ if m:
+ service_name = m.group('service')
+ service_state = 'stopped'
+ service_status = "disabled"
+ if m.group('rl3') == 'on':
+ service_status = "enabled"
+ rc, stdout, stderr = self.module.run_command('%s %s status' % (service_path, service_name), use_unsafe_shell=True)
+ service_state = rc
+ if rc in (0,):
+ service_state = 'running'
+ # elif rc in (1,3):
+ else:
+ if 'root' in stderr or 'permission' in stderr.lower() or 'not in sudoers' in stderr.lower():
+ self.incomplete_warning = True
+ continue
+ else:
+ service_state = 'stopped'
+ service_data = {"name": service_name, "state": service_state, "status": service_status, "source": "sysv"}
+ services[service_name] = service_data
+ return services
+
+
+class SystemctlScanService(BaseService):
+
+ def systemd_enabled(self):
+ # Check if init is the systemd command, using comm as cmdline could be symlink
+ try:
+ f = open('/proc/1/comm', 'r')
+ except IOError:
+ # If comm doesn't exist, old kernel, no systemd
+ return False
+ for line in f:
+ if 'systemd' in line:
+ return True
+ return False
+
+ def gather_services(self):
+ services = {}
+ if not self.systemd_enabled():
+ return None
+ systemctl_path = self.module.get_bin_path("systemctl", opt_dirs=["/usr/bin", "/usr/local/bin"])
+ if systemctl_path is None:
+ return None
+ rc, stdout, stderr = self.module.run_command("%s list-units --no-pager --type service --all" % systemctl_path, use_unsafe_shell=True)
+ for line in [svc_line for svc_line in stdout.split('\n') if '.service' in svc_line and 'not-found' not in svc_line]:
+ service_name = line.split()[0]
+ if "running" in line:
+ state_val = "running"
+ else:
+ if 'failed' in line:
+ service_name = line.split()[1]
+ state_val = "stopped"
+ services[service_name] = {"name": service_name, "state": state_val, "status": "unknown", "source": "systemd"}
+ rc, stdout, stderr = self.module.run_command("%s list-unit-files --no-pager --type service --all" % systemctl_path, use_unsafe_shell=True)
+ for line in [svc_line for svc_line in stdout.split('\n') if '.service' in svc_line and 'not-found' not in svc_line]:
+ # there is one more column (VENDOR PRESET) from `systemctl list-unit-files` for systemd >= 245
+ try:
+ service_name, status_val = line.split()[:2]
+ except IndexError:
+ self.module.fail_json(msg="Malformed output discovered from systemd list-unit-files: {0}".format(line))
+ if service_name not in services:
+ rc, stdout, stderr = self.module.run_command("%s show %s --property=ActiveState" % (systemctl_path, service_name), use_unsafe_shell=True)
+ state = 'unknown'
+ if not rc and stdout != '':
+ state = stdout.replace('ActiveState=', '').rstrip()
+ services[service_name] = {"name": service_name, "state": state, "status": status_val, "source": "systemd"}
+ else:
+ services[service_name]["status"] = status_val
+ return services
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(), supports_check_mode=True)
+ module.run_command_environ_update = dict(LANG="C", LC_ALL="C")
+ service_modules = (ServiceScanService, SystemctlScanService)
+ all_services = {}
+ incomplete_warning = False
+ for svc_module in service_modules:
+ svcmod = svc_module(module)
+ svc = svcmod.gather_services()
+ if svc is not None:
+ all_services.update(svc)
+ if svcmod.incomplete_warning:
+ incomplete_warning = True
+ if len(all_services) == 0:
+ results = dict(skipped=True, msg="Failed to find any services. Sometimes this is due to insufficient privileges.")
+ else:
+ results = dict(ansible_facts=dict(services=all_services))
+ if incomplete_warning:
+ results['msg'] = "WARNING: Could not find status for all services. Sometimes this is due to insufficient privileges."
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/set_fact.py b/lib/ansible/modules/set_fact.py
new file mode 100644
index 00000000..0f5300d0
--- /dev/null
+++ b/lib/ansible/modules/set_fact.py
@@ -0,0 +1,80 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: set_fact
+short_description: Set host facts from a task
+version_added: "1.2"
+description:
+ - This module allows setting new variables.
+ - Variables are set on a host-by-host basis just like facts discovered by the setup module.
+ - These variables will be available to subsequent plays during an ansible-playbook run.
+ - Set C(cacheable) to C(yes) to save variables across executions
+ using a fact cache. Variables created with set_fact have different precedence depending on whether they are or are not cached.
+ - Per the standard Ansible variable precedence rules, many other types of variables have a higher priority, so this value may be overridden.
+ - This module is also supported for Windows targets.
+options:
+ key_value:
+ description:
+ - The C(set_fact) module takes key=value pairs as variables to set
+ in the playbook scope. Or alternatively, accepts complex arguments
+ using the C(args:) statement.
+ required: true
+ cacheable:
+ description:
+ - This boolean converts the variable into an actual 'fact' which will also be added to the fact cache, if fact caching is enabled.
+ - Normally this module creates 'host level variables' and has much higher precedence, this option changes the nature and precedence
+ (by 7 steps) of the variable created.
+ U(https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable)
+ - "This actually creates 2 copies of the variable, a normal 'set_fact' host variable with high precedence and
+ a lower 'ansible_fact' one that is available for persistance via the facts cache plugin.
+ This creates a possibly confusing interaction with C(meta: clear_facts) as it will remove the 'ansible_fact' but not the host variable."
+ type: bool
+ default: no
+ version_added: "2.4"
+notes:
+ - "The C(var=value) notation can only create strings or booleans.
+ If you want to create lists/arrays or dictionary/hashes use C(var: [val1, val2])."
+ - Since 'cacheable' is now a module param, 'cacheable' is no longer a valid fact name as of Ansible 2.4.
+ - This module is also supported for Windows targets.
+seealso:
+- module: ansible.builtin.include_vars
+- ref: ansible_variable_precedence
+ description: More information related to variable precedence and which type of variable wins over others.
+author:
+- Dag Wieers (@dagwieers)
+'''
+
+EXAMPLES = r'''
+- name: Setting host facts using key=value pairs, note that this always creates strings or booleans
+ set_fact: one_fact="something" other_fact="{{ local_var }}"
+
+- name: Setting host facts using complex arguments
+ set_fact:
+ one_fact: something
+ other_fact: "{{ local_var * 2 }}"
+ another_fact: "{{ some_registered_var.results | map(attribute='ansible_facts.some_fact') | list }}"
+
+- name: Setting facts so that they will be persisted in the fact cache
+ set_fact:
+ one_fact: something
+ other_fact: "{{ local_var * 2 }}"
+ cacheable: yes
+
+# As of Ansible 1.8, Ansible will convert boolean strings ('true', 'false', 'yes', 'no')
+# to proper boolean values when using the key=value syntax, however it is still
+# recommended that booleans be set using the complex argument style:
+- name: Setting booleans using complex argument style
+ set_fact:
+ one_fact: yes
+ other_fact: no
+
+'''
diff --git a/lib/ansible/modules/set_stats.py b/lib/ansible/modules/set_stats.py
new file mode 100644
index 00000000..65ae54a7
--- /dev/null
+++ b/lib/ansible/modules/set_stats.py
@@ -0,0 +1,62 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Ansible RedHat, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: set_stats
+short_description: Set stats for the current ansible run
+description:
+ - This module allows setting/accumulating stats on the current ansible run, either per host or for all hosts in the run.
+ - This module is also supported for Windows targets.
+author: Brian Coca (@bcoca)
+options:
+ data:
+ description:
+ - A dictionary of which each key represents a stat (or variable) you want to keep track of.
+ type: dict
+ required: true
+ per_host:
+ description:
+ - whether the stats are per host or for all hosts in the run.
+ type: bool
+ default: no
+ aggregate:
+ description:
+ - Whether the provided value is aggregated to the existing stat C(yes) or will replace it C(no).
+ type: bool
+ default: yes
+notes:
+ - In order for custom stats to be displayed, you must set C(show_custom_stats) in C(ansible.cfg) or C(ANSIBLE_SHOW_CUSTOM_STATS) to C(yes).
+ - This module is also supported for Windows targets.
+version_added: "2.3"
+'''
+
+EXAMPLES = r'''
+- name: Aggregating packages_installed stat per host
+ set_stats:
+ data:
+ packages_installed: 31
+ per_host: yes
+
+- name: Aggregating random stats for all hosts using complex arguments
+ set_stats:
+ data:
+ one_stat: 11
+ other_stat: "{{ local_var * 2 }}"
+ another_stat: "{{ some_registered_var.results | map(attribute='ansible_facts.some_fact') | list }}"
+ per_host: no
+
+
+- name: Setting stats (not aggregating)
+ set_stats:
+ data:
+ the_answer: 42
+ aggregate: no
+'''
diff --git a/lib/ansible/modules/setup.py b/lib/ansible/modules/setup.py
new file mode 100644
index 00000000..083d3d44
--- /dev/null
+++ b/lib/ansible/modules/setup.py
@@ -0,0 +1,182 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: setup
+version_added: historical
+short_description: Gathers facts about remote hosts
+options:
+ gather_subset:
+ version_added: "2.1"
+ description:
+ - "If supplied, restrict the additional facts collected to the given subset.
+ Possible values: C(all), C(min), C(hardware), C(network), C(virtual), C(ohai), and
+ C(facter). Can specify a list of values to specify a larger subset.
+ Values can also be used with an initial C(!) to specify that
+ that specific subset should not be collected. For instance:
+ C(!hardware,!network,!virtual,!ohai,!facter). If C(!all) is specified
+ then only the min subset is collected. To avoid collecting even the
+ min subset, specify C(!all,!min). To collect only specific facts,
+ use C(!all,!min), and specify the particular fact subsets.
+ Use the filter parameter if you do not want to display some collected
+ facts."
+ required: false
+ default: "all"
+ gather_timeout:
+ version_added: "2.2"
+ description:
+ - Set the default timeout in seconds for individual fact gathering.
+ required: false
+ default: 10
+ filter:
+ version_added: "1.1"
+ description:
+ - If supplied, only return facts that match this shell-style (fnmatch) wildcard.
+ required: false
+ default: "*"
+ fact_path:
+ version_added: "1.3"
+ description:
+ - Path used for local ansible facts (C(*.fact)) - files in this dir
+ will be run (if executable) and their results be added to C(ansible_local) facts.
+ If a file is not executable it is read instead.
+ File/results format can be JSON or INI-format. The default C(fact_path) can be
+ specified in C(ansible.cfg) for when setup is automatically called as part of
+ C(gather_facts).
+ NOTE - For windows clients, the results will be added to a variable named after the
+ local file (without extension suffix), rather than C(ansible_local).
+ - Since Ansible 2.1, Windows hosts can use C(fact_path). Make sure that this path
+ exists on the target host. Files in this path MUST be PowerShell scripts C(.ps1)
+ which outputs an object. This object will be formatted by Ansible as json so the
+ script should be outputting a raw hashtable, array, or other primitive object.
+ required: false
+ default: /etc/ansible/facts.d
+description:
+ - This module is automatically called by playbooks to gather useful
+ variables about remote hosts that can be used in playbooks. It can also be
+ executed directly by C(/usr/bin/ansible) to check what variables are
+ available to a host. Ansible provides many I(facts) about the system,
+ automatically.
+ - This module is also supported for Windows targets.
+notes:
+ - More ansible facts will be added with successive releases. If I(facter) or
+ I(ohai) are installed, variables from these programs will also be snapshotted
+ into the JSON file for usage in templating. These variables are prefixed
+ with C(facter_) and C(ohai_) so it's easy to tell their source. All variables are
+ bubbled up to the caller. Using the ansible facts and choosing to not
+ install I(facter) and I(ohai) means you can avoid Ruby-dependencies on your
+ remote systems. (See also M(community.general.facter) and M(community.general.ohai).)
+ - The filter option filters only the first level subkey below ansible_facts.
+ - If the target host is Windows, you will not currently have the ability to use
+ C(filter) as this is provided by a simpler implementation of the module.
+ - This module is also supported for Windows targets.
+ - This module should be run with elevated privileges on BSD systems to gather facts like ansible_product_version.
+author:
+ - "Ansible Core Team"
+ - "Michael DeHaan"
+'''
+
+EXAMPLES = """
+# Display facts from all hosts and store them indexed by I(hostname) at C(/tmp/facts).
+# ansible all -m setup --tree /tmp/facts
+
+# Display only facts regarding memory found by ansible on all hosts and output them.
+# ansible all -m setup -a 'filter=ansible_*_mb'
+
+# Display only facts returned by facter.
+# ansible all -m setup -a 'filter=facter_*'
+
+# Collect only facts returned by facter.
+# ansible all -m setup -a 'gather_subset=!all,!any,facter'
+
+- name: Collect only facts returned by facter
+ setup:
+ gather_subset:
+ - '!all'
+ - '!any'
+ - facter
+
+# Display only facts about certain interfaces.
+# ansible all -m setup -a 'filter=ansible_eth[0-2]'
+
+# Restrict additional gathered facts to network and virtual (includes default minimum facts)
+# ansible all -m setup -a 'gather_subset=network,virtual'
+
+# Collect only network and virtual (excludes default minimum facts)
+# ansible all -m setup -a 'gather_subset=!all,!any,network,virtual'
+
+# Do not call puppet facter or ohai even if present.
+# ansible all -m setup -a 'gather_subset=!facter,!ohai'
+
+# Only collect the default minimum amount of facts:
+# ansible all -m setup -a 'gather_subset=!all'
+
+# Collect no facts, even the default minimum subset of facts:
+# ansible all -m setup -a 'gather_subset=!all,!min'
+
+# Display facts from Windows hosts with custom facts stored in C(C:\\custom_facts).
+# ansible windows -m setup -a "fact_path='c:\\custom_facts'"
+"""
+
+# import module snippets
+from ..module_utils.basic import AnsibleModule
+
+from ansible.module_utils.facts.namespace import PrefixFactNamespace
+from ansible.module_utils.facts import ansible_collector
+
+from ansible.module_utils.facts import default_collectors
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ gather_subset=dict(default=["all"], required=False, type='list'),
+ gather_timeout=dict(default=10, required=False, type='int'),
+ filter=dict(default="*", required=False),
+ fact_path=dict(default='/etc/ansible/facts.d', required=False, type='path'),
+ ),
+ supports_check_mode=True,
+ )
+
+ gather_subset = module.params['gather_subset']
+ gather_timeout = module.params['gather_timeout']
+ filter_spec = module.params['filter']
+
+ # TODO: this mimics existing behavior where gather_subset=["!all"] actually means
+ # to collect nothing except for the below list
+ # TODO: decide what '!all' means, I lean towards making it mean none, but likely needs
+ # some tweaking on how gather_subset operations are performed
+ minimal_gather_subset = frozenset(['apparmor', 'caps', 'cmdline', 'date_time',
+ 'distribution', 'dns', 'env', 'fips', 'local',
+ 'lsb', 'pkg_mgr', 'platform', 'python', 'selinux',
+ 'service_mgr', 'ssh_pub_keys', 'user'])
+
+ all_collector_classes = default_collectors.collectors
+
+ # rename namespace_name to root_key?
+ namespace = PrefixFactNamespace(namespace_name='ansible',
+ prefix='ansible_')
+
+ fact_collector = \
+ ansible_collector.get_ansible_collector(all_collector_classes=all_collector_classes,
+ namespace=namespace,
+ filter_spec=filter_spec,
+ gather_subset=gather_subset,
+ gather_timeout=gather_timeout,
+ minimal_gather_subset=minimal_gather_subset)
+
+ facts_dict = fact_collector.collect(module=module)
+
+ module.exit_json(ansible_facts=facts_dict)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/shell.py b/lib/ansible/modules/shell.py
new file mode 100644
index 00000000..17727352
--- /dev/null
+++ b/lib/ansible/modules/shell.py
@@ -0,0 +1,208 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# There is no actual shell module source, when you use 'shell' in ansible,
+# it runs the 'command' module with special arguments and it behaves differently.
+# See the command source and the comment "#USE_SHELL".
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: shell
+short_description: Execute shell commands on targets
+description:
+ - The C(shell) module takes the command name followed by a list of space-delimited arguments.
+ - Either a free form command or C(cmd) parameter is required, see the examples.
+ - It is almost exactly like the M(ansible.builtin.command) module but runs
+ the command through a shell (C(/bin/sh)) on the remote node.
+ - For Windows targets, use the M(ansible.windows.win_shell) module instead.
+version_added: "0.2"
+options:
+ free_form:
+ description:
+ - The shell module takes a free form command to run, as a string.
+ - There is no actual parameter named 'free form'.
+ - See the examples on how to use this module.
+ type: str
+ cmd:
+ type: str
+ description:
+ - The command to run followed by optional arguments.
+ creates:
+ description:
+ - A filename, when it already exists, this step will B(not) be run.
+ type: path
+ removes:
+ description:
+ - A filename, when it does not exist, this step will B(not) be run.
+ type: path
+ version_added: "0.8"
+ chdir:
+ description:
+ - Change into this directory before running the command.
+ type: path
+ version_added: "0.6"
+ executable:
+ description:
+ - Change the shell used to execute the command.
+ - This expects an absolute path to the executable.
+ type: path
+ version_added: "0.9"
+ warn:
+ description:
+ - Whether to enable task warnings.
+ type: bool
+ default: yes
+ version_added: "1.8"
+ stdin:
+ description:
+ - Set the stdin of the command directly to the specified value.
+ type: str
+ version_added: "2.4"
+ stdin_add_newline:
+ description:
+ - Whether to append a newline to stdin data.
+ type: bool
+ default: yes
+ version_added: "2.8"
+notes:
+ - If you want to execute a command securely and predictably, it may be
+ better to use the M(ansible.builtin.command) module instead. Best practices
+ when writing playbooks will follow the trend of using M(ansible.builtin.command)
+ unless the M(ansible.builtin.shell) module is explicitly required. When running ad-hoc
+ commands, use your best judgement.
+ - Check mode is supported when passing C(creates) or C(removes). If running
+ in check mode and either of these are specified, the module will check for
+ the existence of the file and report the correct changed status. If these
+ are not supplied, the task will be skipped.
+ - To sanitize any variables passed to the shell module, you should use
+ C({{ var | quote }}) instead of just C({{ var }}) to make sure they
+ do not include evil things like semicolons.
+ - An alternative to using inline shell scripts with this module is to use
+ the M(ansible.builtin.script) module possibly together with the M(ansible.builtin.template) module.
+ - For rebooting systems, use the M(ansible.builtin.reboot) or M(ansible.windows.win_reboot) module.
+seealso:
+- module: ansible.builtin.command
+- module: ansible.builtin.raw
+- module: ansible.builtin.script
+- module: ansible.windows.win_shell
+author:
+ - Ansible Core Team
+ - Michael DeHaan
+'''
+
+EXAMPLES = r'''
+- name: Execute the command in remote shell; stdout goes to the specified file on the remote
+ shell: somescript.sh >> somelog.txt
+
+- name: Change the working directory to somedir/ before executing the command
+ shell: somescript.sh >> somelog.txt
+ args:
+ chdir: somedir/
+
+# You can also use the 'args' form to provide the options.
+- name: This command will change the working directory to somedir/ and will only run when somedir/somelog.txt doesn't exist
+ shell: somescript.sh >> somelog.txt
+ args:
+ chdir: somedir/
+ creates: somelog.txt
+
+# You can also use the 'cmd' parameter instead of free form format.
+- name: This command will change the working directory to somedir/
+ shell:
+ cmd: ls -l | grep log
+ chdir: somedir/
+
+- name: Run a command that uses non-posix shell-isms (in this example /bin/sh doesn't handle redirection and wildcards together but bash does)
+ shell: cat < /tmp/*txt
+ args:
+ executable: /bin/bash
+
+- name: Run a command using a templated variable (always use quote filter to avoid injection)
+ shell: cat {{ myfile|quote }}
+
+# You can use shell to run other executables to perform actions inline
+- name: Run expect to wait for a successful PXE boot via out-of-band CIMC
+ shell: |
+ set timeout 300
+ spawn ssh admin@{{ cimc_host }}
+
+ expect "password:"
+ send "{{ cimc_password }}\n"
+
+ expect "\n{{ cimc_name }}"
+ send "connect host\n"
+
+ expect "pxeboot.n12"
+ send "\n"
+
+ exit 0
+ args:
+ executable: /usr/bin/expect
+ delegate_to: localhost
+
+# Disabling warnings
+- name: Using curl to connect to a host via SOCKS proxy (unsupported in uri). Ordinarily this would throw a warning
+ shell: curl --socks5 localhost:9000 http://www.ansible.com
+ args:
+ warn: no
+'''
+
+RETURN = r'''
+msg:
+ description: changed
+ returned: always
+ type: bool
+ sample: True
+start:
+ description: The command execution start time
+ returned: always
+ type: str
+ sample: '2016-02-25 09:18:26.429568'
+end:
+ description: The command execution end time
+ returned: always
+ type: str
+ sample: '2016-02-25 09:18:26.755339'
+delta:
+ description: The command execution delta time
+ returned: always
+ type: str
+ sample: '0:00:00.325771'
+stdout:
+ description: The command standard output
+ returned: always
+ type: str
+ sample: 'Clustering node rabbit@slave1 with rabbit@master …'
+stderr:
+ description: The command standard error
+ returned: always
+ type: str
+ sample: 'ls: cannot access foo: No such file or directory'
+cmd:
+ description: The command executed by the task
+ returned: always
+ type: str
+ sample: 'rabbitmqctl join_cluster rabbit@master'
+rc:
+ description: The command return code (0 means success)
+ returned: always
+ type: int
+ sample: 0
+stdout_lines:
+ description: The command standard output split in lines
+ returned: always
+ type: list
+ sample: [u'Clustering node rabbit@slave1 with rabbit@master …']
+stderr_lines:
+ description: The command standard error split in lines
+ returned: always
+ type: list
+ sample: [u'ls cannot access foo: No such file or directory', u'ls …']
+'''
diff --git a/lib/ansible/modules/slurp.py b/lib/ansible/modules/slurp.py
new file mode 100644
index 00000000..0bc94015
--- /dev/null
+++ b/lib/ansible/modules/slurp.py
@@ -0,0 +1,87 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: slurp
+version_added: historical
+short_description: Slurps a file from remote nodes
+description:
+ - This module works like M(ansible.builtin.fetch). It is used for fetching a base64-
+ encoded blob containing the data in a remote file.
+ - This module is also supported for Windows targets.
+options:
+ src:
+ description:
+ - The file on the remote system to fetch. This I(must) be a file, not a directory.
+ type: path
+ required: true
+ aliases: [ path ]
+notes:
+ - This module returns an 'in memory' base64 encoded version of the file, take
+ into account that this will require at least twice the RAM as the original file size.
+ - This module is also supported for Windows targets.
+seealso:
+- module: ansible.builtin.fetch
+author:
+ - Ansible Core Team
+ - Michael DeHaan (@mpdehaan)
+'''
+
+EXAMPLES = r'''
+- name: Find out what the remote machine's mounts are
+ slurp:
+ src: /proc/mounts
+ register: mounts
+
+- debug:
+ msg: "{{ mounts['content'] | b64decode }}"
+
+# From the commandline, find the pid of the remote machine's sshd
+# $ ansible host -m slurp -a 'src=/var/run/sshd.pid'
+# host | SUCCESS => {
+# "changed": false,
+# "content": "MjE3OQo=",
+# "encoding": "base64",
+# "source": "/var/run/sshd.pid"
+# }
+# $ echo MjE3OQo= | base64 -d
+# 2179
+'''
+
+import base64
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ src=dict(type='path', required=True, aliases=['path']),
+ ),
+ supports_check_mode=True,
+ )
+ source = module.params['src']
+
+ if not os.path.exists(source):
+ module.fail_json(msg="file not found: %s" % source)
+ if not os.access(source, os.R_OK):
+ module.fail_json(msg="file is not readable: %s" % source)
+
+ with open(source, 'rb') as source_fh:
+ source_content = source_fh.read()
+ data = base64.b64encode(source_content)
+
+ module.exit_json(content=data, source=source, encoding='base64')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/stat.py b/lib/ansible/modules/stat.py
new file mode 100644
index 00000000..2a5fbcde
--- /dev/null
+++ b/lib/ansible/modules/stat.py
@@ -0,0 +1,540 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: stat
+version_added: "1.3"
+short_description: Retrieve file or file system status
+description:
+ - Retrieves facts for a file similar to the Linux/Unix 'stat' command.
+ - For Windows targets, use the M(ansible.windows.win_stat) module instead.
+options:
+ path:
+ description:
+ - The full path of the file/object to get the facts of.
+ type: path
+ required: true
+ aliases: [ dest, name ]
+ follow:
+ description:
+ - Whether to follow symlinks.
+ type: bool
+ default: no
+ get_checksum:
+ description:
+ - Whether to return a checksum of the file.
+ type: bool
+ default: yes
+ version_added: "1.8"
+ checksum_algorithm:
+ description:
+ - Algorithm to determine checksum of file.
+ - Will throw an error if the host is unable to use specified algorithm.
+ - The remote host has to support the hashing method specified, C(md5)
+ can be unavailable if the host is FIPS-140 compliant.
+ type: str
+ choices: [ md5, sha1, sha224, sha256, sha384, sha512 ]
+ default: sha1
+ aliases: [ checksum, checksum_algo ]
+ version_added: "2.0"
+ get_mime:
+ description:
+ - Use file magic and return data about the nature of the file. this uses
+ the 'file' utility found on most Linux/Unix systems.
+ - This will add both `mime_type` and 'charset' fields to the return, if possible.
+ - In Ansible 2.3 this option changed from 'mime' to 'get_mime' and the default changed to 'Yes'.
+ type: bool
+ default: yes
+ aliases: [ mime, mime_type, mime-type ]
+ version_added: "2.1"
+ get_attributes:
+ description:
+ - Get file attributes using lsattr tool if present.
+ type: bool
+ default: yes
+ aliases: [ attr, attributes ]
+ version_added: "2.3"
+seealso:
+- module: ansible.builtin.file
+- module: ansible.windows.win_stat
+author: Bruce Pennypacker (@bpennypacker)
+'''
+
+EXAMPLES = r'''
+# Obtain the stats of /etc/foo.conf, and check that the file still belongs
+# to 'root'. Fail otherwise.
+- name: Get stats of a file
+ stat:
+ path: /etc/foo.conf
+ register: st
+- fail:
+ msg: "Whoops! file ownership has changed"
+ when: st.stat.pw_name != 'root'
+
+# Determine if a path exists and is a symlink. Note that if the path does
+# not exist, and we test sym.stat.islnk, it will fail with an error. So
+# therefore, we must test whether it is defined.
+# Run this to understand the structure, the skipped ones do not pass the
+# check performed by 'when'
+- name: Get stats of the FS object
+ stat:
+ path: /path/to/something
+ register: sym
+
+- debug:
+ msg: "islnk isn't defined (path doesn't exist)"
+ when: sym.stat.islnk is not defined
+
+- debug:
+ msg: "islnk is defined (path must exist)"
+ when: sym.stat.islnk is defined
+
+- debug:
+ msg: "Path exists and is a symlink"
+ when: sym.stat.islnk is defined and sym.stat.islnk
+
+- debug:
+ msg: "Path exists and isn't a symlink"
+ when: sym.stat.islnk is defined and sym.stat.islnk == False
+
+
+# Determine if a path exists and is a directory. Note that we need to test
+# both that p.stat.isdir actually exists, and also that it's set to true.
+- name: Get stats of the FS object
+ stat:
+ path: /path/to/something
+ register: p
+- debug:
+ msg: "Path exists and is a directory"
+ when: p.stat.isdir is defined and p.stat.isdir
+
+- name: Don't do checksum
+ stat:
+ path: /path/to/myhugefile
+ get_checksum: no
+
+- name: Use sha256 to calculate checksum
+ stat:
+ path: /path/to/something
+ checksum_algorithm: sha256
+'''
+
+RETURN = r'''
+stat:
+ description: dictionary containing all the stat data, some platforms might add additional fields
+ returned: success
+ type: complex
+ contains:
+ exists:
+ description: If the destination path actually exists or not
+ returned: success
+ type: bool
+ sample: True
+ path:
+ description: The full path of the file/object to get the facts of
+ returned: success and if path exists
+ type: str
+ sample: '/path/to/file'
+ mode:
+ description: Unix permissions of the file in octal representation as a string
+ returned: success, path exists and user can read stats
+ type: str
+ sample: 1755
+ isdir:
+ description: Tells you if the path is a directory
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: False
+ ischr:
+ description: Tells you if the path is a character device
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: False
+ isblk:
+ description: Tells you if the path is a block device
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: False
+ isreg:
+ description: Tells you if the path is a regular file
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: True
+ isfifo:
+ description: Tells you if the path is a named pipe
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: False
+ islnk:
+ description: Tells you if the path is a symbolic link
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: False
+ issock:
+ description: Tells you if the path is a unix domain socket
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: False
+ uid:
+ description: Numeric id representing the file owner
+ returned: success, path exists and user can read stats
+ type: int
+ sample: 1003
+ gid:
+ description: Numeric id representing the group of the owner
+ returned: success, path exists and user can read stats
+ type: int
+ sample: 1003
+ size:
+ description: Size in bytes for a plain file, amount of data for some special files
+ returned: success, path exists and user can read stats
+ type: int
+ sample: 203
+ inode:
+ description: Inode number of the path
+ returned: success, path exists and user can read stats
+ type: int
+ sample: 12758
+ dev:
+ description: Device the inode resides on
+ returned: success, path exists and user can read stats
+ type: int
+ sample: 33
+ nlink:
+ description: Number of links to the inode (hard links)
+ returned: success, path exists and user can read stats
+ type: int
+ sample: 1
+ atime:
+ description: Time of last access
+ returned: success, path exists and user can read stats
+ type: float
+ sample: 1424348972.575
+ mtime:
+ description: Time of last modification
+ returned: success, path exists and user can read stats
+ type: float
+ sample: 1424348972.575
+ ctime:
+ description: Time of last metadata update or creation (depends on OS)
+ returned: success, path exists and user can read stats
+ type: float
+ sample: 1424348972.575
+ wusr:
+ description: Tells you if the owner has write permission
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: True
+ rusr:
+ description: Tells you if the owner has read permission
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: True
+ xusr:
+ description: Tells you if the owner has execute permission
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: True
+ wgrp:
+ description: Tells you if the owner's group has write permission
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: False
+ rgrp:
+ description: Tells you if the owner's group has read permission
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: True
+ xgrp:
+ description: Tells you if the owner's group has execute permission
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: True
+ woth:
+ description: Tells you if others have write permission
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: False
+ roth:
+ description: Tells you if others have read permission
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: True
+ xoth:
+ description: Tells you if others have execute permission
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: True
+ isuid:
+ description: Tells you if the invoking user's id matches the owner's id
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: False
+ isgid:
+ description: Tells you if the invoking user's group id matches the owner's group id
+ returned: success, path exists and user can read stats
+ type: bool
+ sample: False
+ lnk_source:
+ description: Target of the symlink normalized for the remote filesystem
+ returned: success, path exists and user can read stats and the path is a symbolic link
+ type: str
+ sample: /home/foobar/21102015-1445431274-908472971
+ lnk_target:
+ description: Target of the symlink. Note that relative paths remain relative
+ returned: success, path exists and user can read stats and the path is a symbolic link
+ type: str
+ sample: ../foobar/21102015-1445431274-908472971
+ version_added: 2.4
+ md5:
+ description: md5 hash of the path; this will be removed in Ansible 2.9 in
+ favor of the checksum return value
+ returned: success, path exists and user can read stats and path
+ supports hashing and md5 is supported
+ type: str
+ sample: f88fa92d8cf2eeecf4c0a50ccc96d0c0
+ checksum:
+ description: hash of the path
+ returned: success, path exists, user can read stats, path supports
+ hashing and supplied checksum algorithm is available
+ type: str
+ sample: 50ba294cdf28c0d5bcde25708df53346825a429f
+ pw_name:
+ description: User name of owner
+ returned: success, path exists and user can read stats and installed python supports it
+ type: str
+ sample: httpd
+ gr_name:
+ description: Group name of owner
+ returned: success, path exists and user can read stats and installed python supports it
+ type: str
+ sample: www-data
+ mimetype:
+ description: file magic data or mime-type
+ returned: success, path exists and user can read stats and
+ installed python supports it and the `mime` option was true, will
+ return 'unknown' on error.
+ type: str
+ sample: application/pdf; charset=binary
+ charset:
+ description: file character set or encoding
+ returned: success, path exists and user can read stats and
+ installed python supports it and the `mime` option was true, will
+ return 'unknown' on error.
+ type: str
+ sample: us-ascii
+ readable:
+ description: Tells you if the invoking user has the right to read the path
+ returned: success, path exists and user can read the path
+ type: bool
+ sample: False
+ version_added: 2.2
+ writeable:
+ description: Tells you if the invoking user has the right to write the path
+ returned: success, path exists and user can write the path
+ type: bool
+ sample: False
+ version_added: 2.2
+ executable:
+ description: Tells you if the invoking user has execute permission on the path
+ returned: success, path exists and user can execute the path
+ type: bool
+ sample: False
+ version_added: 2.2
+ attributes:
+ description: list of file attributes
+ returned: success, path exists and user can execute the path
+ type: list
+ sample: [ immutable, extent ]
+ version_added: 2.3
+'''
+
+import errno
+import grp
+import os
+import pwd
+import stat
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes
+
+
+def format_output(module, path, st):
+ mode = st.st_mode
+
+ # back to ansible
+ output = dict(
+ exists=True,
+ path=path,
+ mode="%04o" % stat.S_IMODE(mode),
+ isdir=stat.S_ISDIR(mode),
+ ischr=stat.S_ISCHR(mode),
+ isblk=stat.S_ISBLK(mode),
+ isreg=stat.S_ISREG(mode),
+ isfifo=stat.S_ISFIFO(mode),
+ islnk=stat.S_ISLNK(mode),
+ issock=stat.S_ISSOCK(mode),
+ uid=st.st_uid,
+ gid=st.st_gid,
+ size=st.st_size,
+ inode=st.st_ino,
+ dev=st.st_dev,
+ nlink=st.st_nlink,
+ atime=st.st_atime,
+ mtime=st.st_mtime,
+ ctime=st.st_ctime,
+ wusr=bool(mode & stat.S_IWUSR),
+ rusr=bool(mode & stat.S_IRUSR),
+ xusr=bool(mode & stat.S_IXUSR),
+ wgrp=bool(mode & stat.S_IWGRP),
+ rgrp=bool(mode & stat.S_IRGRP),
+ xgrp=bool(mode & stat.S_IXGRP),
+ woth=bool(mode & stat.S_IWOTH),
+ roth=bool(mode & stat.S_IROTH),
+ xoth=bool(mode & stat.S_IXOTH),
+ isuid=bool(mode & stat.S_ISUID),
+ isgid=bool(mode & stat.S_ISGID),
+ )
+
+ # Platform dependent flags:
+ for other in [
+ # Some Linux
+ ('st_blocks', 'blocks'),
+ ('st_blksize', 'block_size'),
+ ('st_rdev', 'device_type'),
+ ('st_flags', 'flags'),
+ # Some Berkley based
+ ('st_gen', 'generation'),
+ ('st_birthtime', 'birthtime'),
+ # RISCOS
+ ('st_ftype', 'file_type'),
+ ('st_attrs', 'attrs'),
+ ('st_obtype', 'object_type'),
+ # macOS
+ ('st_rsize', 'real_size'),
+ ('st_creator', 'creator'),
+ ('st_type', 'file_type'),
+ ]:
+ if hasattr(st, other[0]):
+ output[other[1]] = getattr(st, other[0])
+
+ return output
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True, aliases=['dest', 'name']),
+ follow=dict(type='bool', default=False),
+ get_md5=dict(type='bool', default=False),
+ get_checksum=dict(type='bool', default=True),
+ get_mime=dict(type='bool', default=True, aliases=['mime', 'mime_type', 'mime-type']),
+ get_attributes=dict(type='bool', default=True, aliases=['attr', 'attributes']),
+ checksum_algorithm=dict(type='str', default='sha1',
+ choices=['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512'],
+ aliases=['checksum', 'checksum_algo']),
+ ),
+ supports_check_mode=True,
+ )
+
+ path = module.params.get('path')
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ follow = module.params.get('follow')
+ get_mime = module.params.get('get_mime')
+ get_attr = module.params.get('get_attributes')
+ get_checksum = module.params.get('get_checksum')
+ checksum_algorithm = module.params.get('checksum_algorithm')
+
+ # NOTE: undocumented option since 2.9 to be removed at a later date if possible (3.0+)
+ # no real reason for keeping other than fear we may break older content.
+ get_md5 = module.params.get('get_md5')
+
+ # main stat data
+ try:
+ if follow:
+ st = os.stat(b_path)
+ else:
+ st = os.lstat(b_path)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ output = {'exists': False}
+ module.exit_json(changed=False, stat=output)
+
+ module.fail_json(msg=e.strerror)
+
+ # process base results
+ output = format_output(module, path, st)
+
+ # resolved permissions
+ for perm in [('readable', os.R_OK), ('writeable', os.W_OK), ('executable', os.X_OK)]:
+ output[perm[0]] = os.access(b_path, perm[1])
+
+ # symlink info
+ if output.get('islnk'):
+ output['lnk_source'] = os.path.realpath(b_path)
+ output['lnk_target'] = os.readlink(b_path)
+
+ try: # user data
+ pw = pwd.getpwuid(st.st_uid)
+ output['pw_name'] = pw.pw_name
+ except (TypeError, KeyError):
+ pass
+
+ try: # group data
+ grp_info = grp.getgrgid(st.st_gid)
+ output['gr_name'] = grp_info.gr_name
+ except (KeyError, ValueError, OverflowError):
+ pass
+
+ # checksums
+ if output.get('isreg') and output.get('readable'):
+
+ # NOTE: see above about get_md5
+ if get_md5:
+ # Will fail on FIPS-140 compliant systems
+ try:
+ output['md5'] = module.md5(b_path)
+ except ValueError:
+ output['md5'] = None
+
+ if get_checksum:
+ output['checksum'] = module.digest_from_file(b_path, checksum_algorithm)
+
+ # try to get mime data if requested
+ if get_mime:
+ output['mimetype'] = output['charset'] = 'unknown'
+ mimecmd = module.get_bin_path('file')
+ if mimecmd:
+ mimecmd = [mimecmd, '-i', b_path]
+ try:
+ rc, out, err = module.run_command(mimecmd)
+ if rc == 0:
+ mimetype, charset = out.split(':')[1].split(';')
+ output['mimetype'] = mimetype.strip()
+ output['charset'] = charset.split('=')[1].strip()
+ except Exception:
+ pass
+
+ # try to get attr data
+ if get_attr:
+ output['version'] = None
+ output['attributes'] = []
+ output['attr_flags'] = ''
+ out = module.get_file_attributes(b_path)
+ for x in ('version', 'attributes', 'attr_flags'):
+ if x in out:
+ output[x] = out[x]
+
+ module.exit_json(changed=False, stat=output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/subversion.py b/lib/ansible/modules/subversion.py
new file mode 100644
index 00000000..730d26f0
--- /dev/null
+++ b/lib/ansible/modules/subversion.py
@@ -0,0 +1,342 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: subversion
+short_description: Deploys a subversion repository
+description:
+ - Deploy given repository URL / revision to dest. If dest exists, update to the specified revision, otherwise perform a checkout.
+version_added: "0.7"
+author:
+- Dane Summers (@dsummersl) <njharman@gmail.com>
+notes:
+ - Requires I(svn) to be installed on the client.
+ - This module does not handle externals.
+options:
+ repo:
+ description:
+ - The subversion URL to the repository.
+ required: true
+ aliases: [ name, repository ]
+ dest:
+ description:
+ - Absolute path where the repository should be deployed.
+ required: true
+ revision:
+ description:
+ - Specific revision to checkout.
+ default: HEAD
+ aliases: [ version ]
+ force:
+ description:
+ - If C(yes), modified files will be discarded. If C(no), module will fail if it encounters modified files.
+ Prior to 1.9 the default was C(yes).
+ type: bool
+ default: "no"
+ in_place:
+ description:
+ - If the directory exists, then the working copy will be checked-out over-the-top using
+ svn checkout --force; if force is specified then existing files with different content are reverted
+ type: bool
+ default: "no"
+ version_added: "2.6"
+ username:
+ description:
+ - C(--username) parameter passed to svn.
+ password:
+ description:
+ - C(--password) parameter passed to svn when svn is less than version 1.10.0. This is not secure and
+ the password will be leaked to argv.
+ - C(--password-from-stdin) parameter when svn is greater or equal to version 1.10.0.
+ executable:
+ description:
+ - Path to svn executable to use. If not supplied,
+ the normal mechanism for resolving binary paths will be used.
+ version_added: "1.4"
+ checkout:
+ description:
+ - If C(no), do not check out the repository if it does not exist locally.
+ type: bool
+ default: "yes"
+ version_added: "2.3"
+ update:
+ description:
+ - If C(no), do not retrieve new revisions from the origin repository.
+ type: bool
+ default: "yes"
+ version_added: "2.3"
+ export:
+ description:
+ - If C(yes), do export instead of checkout/update.
+ type: bool
+ default: "no"
+ version_added: "1.6"
+ switch:
+ description:
+ - If C(no), do not call svn switch before update.
+ default: "yes"
+ version_added: "2.0"
+ type: bool
+
+requirements:
+ - subversion (the command line tool with C(svn) entrypoint)
+'''
+
+EXAMPLES = '''
+- name: Checkout subversion repository to specified folder
+ subversion:
+ repo: svn+ssh://an.example.org/path/to/repo
+ dest: /src/checkout
+
+- name: Export subversion directory to folder
+ subversion:
+ repo: svn+ssh://an.example.org/path/to/repo
+ dest: /src/export
+ export: yes
+
+- name: Get information about the repository whether or not it has already been cloned locally
+- subversion:
+ repo: svn+ssh://an.example.org/path/to/repo
+ dest: /src/checkout
+ checkout: no
+ update: no
+'''
+
+import os
+import re
+
+from distutils.version import LooseVersion
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Subversion(object):
+ def __init__(self, module, dest, repo, revision, username, password, svn_path):
+ self.module = module
+ self.dest = dest
+ self.repo = repo
+ self.revision = revision
+ self.username = username
+ self.password = password
+ self.svn_path = svn_path
+
+ def has_option_password_from_stdin(self):
+ rc, version, err = self.module.run_command([self.svn_path, '--version', '--quiet'], check_rc=True)
+ return LooseVersion(version) >= LooseVersion('1.10.0')
+
+ def _exec(self, args, check_rc=True):
+ '''Execute a subversion command, and return output. If check_rc is False, returns the return code instead of the output.'''
+ bits = [
+ self.svn_path,
+ '--non-interactive',
+ '--trust-server-cert',
+ '--no-auth-cache',
+ ]
+ stdin_data = None
+ if self.username:
+ bits.extend(["--username", self.username])
+ if self.password:
+ if self.has_option_password_from_stdin():
+ bits.append("--password-from-stdin")
+ stdin_data = self.password
+ else:
+ self.module.warn("The authentication provided will be used on the svn command line and is not secure. "
+ "To securely pass credentials, upgrade svn to version 1.10.0 or greater.")
+ bits.extend(["--password", self.password])
+ bits.extend(args)
+ rc, out, err = self.module.run_command(bits, check_rc, data=stdin_data)
+
+ if check_rc:
+ return out.splitlines()
+ else:
+ return rc
+
+ def is_svn_repo(self):
+ '''Checks if path is a SVN Repo.'''
+ rc = self._exec(["info", self.dest], check_rc=False)
+ return rc == 0
+
+ def checkout(self, force=False):
+ '''Creates new svn working directory if it does not already exist.'''
+ cmd = ["checkout"]
+ if force:
+ cmd.append("--force")
+ cmd.extend(["-r", self.revision, self.repo, self.dest])
+ self._exec(cmd)
+
+ def export(self, force=False):
+ '''Export svn repo to directory'''
+ cmd = ["export"]
+ if force:
+ cmd.append("--force")
+ cmd.extend(["-r", self.revision, self.repo, self.dest])
+
+ self._exec(cmd)
+
+ def switch(self):
+ '''Change working directory's repo.'''
+ # switch to ensure we are pointing at correct repo.
+ # it also updates!
+ output = self._exec(["switch", "--revision", self.revision, self.repo, self.dest])
+ for line in output:
+ if re.search(r'^[ABDUCGE]\s', line):
+ return True
+ return False
+
+ def update(self):
+ '''Update existing svn working directory.'''
+ output = self._exec(["update", "-r", self.revision, self.dest])
+
+ for line in output:
+ if re.search(r'^[ABDUCGE]\s', line):
+ return True
+ return False
+
+ def revert(self):
+ '''Revert svn working directory.'''
+ output = self._exec(["revert", "-R", self.dest])
+ for line in output:
+ if re.search(r'^Reverted ', line) is None:
+ return True
+ return False
+
+ def get_revision(self):
+ '''Revision and URL of subversion working directory.'''
+ text = '\n'.join(self._exec(["info", self.dest]))
+ rev = re.search(r'^Revision:.*$', text, re.MULTILINE).group(0)
+ url = re.search(r'^URL:.*$', text, re.MULTILINE).group(0)
+ return rev, url
+
+ def get_remote_revision(self):
+ '''Revision and URL of subversion working directory.'''
+ text = '\n'.join(self._exec(["info", self.repo]))
+ rev = re.search(r'^Revision:.*$', text, re.MULTILINE).group(0)
+ return rev
+
+ def has_local_mods(self):
+ '''True if revisioned files have been added or modified. Unrevisioned files are ignored.'''
+ lines = self._exec(["status", "--quiet", "--ignore-externals", self.dest])
+ # The --quiet option will return only modified files.
+ # Match only revisioned files, i.e. ignore status '?'.
+ regex = re.compile(r'^[^?X]')
+ # Has local mods if more than 0 modified revisioned files.
+ return len(list(filter(regex.match, lines))) > 0
+
+ def needs_update(self):
+ curr, url = self.get_revision()
+ out2 = '\n'.join(self._exec(["info", "-r", self.revision, self.dest]))
+ head = re.search(r'^Revision:.*$', out2, re.MULTILINE).group(0)
+ rev1 = int(curr.split(':')[1].strip())
+ rev2 = int(head.split(':')[1].strip())
+ change = False
+ if rev1 < rev2:
+ change = True
+ return change, curr, head
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dest=dict(type='path'),
+ repo=dict(type='str', required=True, aliases=['name', 'repository']),
+ revision=dict(type='str', default='HEAD', aliases=['rev', 'version']),
+ force=dict(type='bool', default=False),
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ executable=dict(type='path'),
+ export=dict(type='bool', default=False),
+ checkout=dict(type='bool', default=True),
+ update=dict(type='bool', default=True),
+ switch=dict(type='bool', default=True),
+ in_place=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ dest = module.params['dest']
+ repo = module.params['repo']
+ revision = module.params['revision']
+ force = module.params['force']
+ username = module.params['username']
+ password = module.params['password']
+ svn_path = module.params['executable'] or module.get_bin_path('svn', True)
+ export = module.params['export']
+ switch = module.params['switch']
+ checkout = module.params['checkout']
+ update = module.params['update']
+ in_place = module.params['in_place']
+
+ # We screenscrape a huge amount of svn commands so use C locale anytime we
+ # call run_command()
+ module.run_command_environ_update = dict(LANG='C', LC_MESSAGES='C')
+
+ if not dest and (checkout or update or export):
+ module.fail_json(msg="the destination directory must be specified unless checkout=no, update=no, and export=no")
+
+ svn = Subversion(module, dest, repo, revision, username, password, svn_path)
+
+ if not export and not update and not checkout:
+ module.exit_json(changed=False, after=svn.get_remote_revision())
+ if export or not os.path.exists(dest):
+ before = None
+ local_mods = False
+ if module.check_mode:
+ module.exit_json(changed=True)
+ elif not export and not checkout:
+ module.exit_json(changed=False)
+ if not export and checkout:
+ svn.checkout()
+ files_changed = True
+ else:
+ svn.export(force=force)
+ files_changed = True
+ elif svn.is_svn_repo():
+ # Order matters. Need to get local mods before switch to avoid false
+ # positives. Need to switch before revert to ensure we are reverting to
+ # correct repo.
+ if not update:
+ module.exit_json(changed=False)
+ if module.check_mode:
+ if svn.has_local_mods() and not force:
+ module.fail_json(msg="ERROR: modified files exist in the repository.")
+ check, before, after = svn.needs_update()
+ module.exit_json(changed=check, before=before, after=after)
+ files_changed = False
+ before = svn.get_revision()
+ local_mods = svn.has_local_mods()
+ if switch:
+ files_changed = svn.switch() or files_changed
+ if local_mods:
+ if force:
+ files_changed = svn.revert() or files_changed
+ else:
+ module.fail_json(msg="ERROR: modified files exist in the repository.")
+ files_changed = svn.update() or files_changed
+ elif in_place:
+ before = None
+ svn.checkout(force=True)
+ files_changed = True
+ local_mods = svn.has_local_mods()
+ if local_mods and force:
+ svn.revert()
+ else:
+ module.fail_json(msg="ERROR: %s folder already exists, but its not a subversion repository." % (dest,))
+
+ if export:
+ module.exit_json(changed=True)
+ else:
+ after = svn.get_revision()
+ changed = files_changed or local_mods
+ module.exit_json(changed=changed, before=before, after=after)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/systemd.py b/lib/ansible/modules/systemd.py
new file mode 100644
index 00000000..a0bf8057
--- /dev/null
+++ b/lib/ansible/modules/systemd.py
@@ -0,0 +1,561 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Brian Coca <bcoca@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: systemd
+author:
+ - Ansible Core Team
+version_added: "2.2"
+short_description: Manage services
+description:
+ - Controls systemd services on remote hosts.
+options:
+ name:
+ description:
+ - Name of the service. This parameter takes the name of exactly one service to work with.
+ - When using in a chroot environment you always need to specify the full name i.e. (crond.service).
+ aliases: [ service, unit ]
+ state:
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
+ C(restarted) will always bounce the service. C(reloaded) will always reload.
+ choices: [ reloaded, restarted, started, stopped ]
+ enabled:
+ description:
+ - Whether the service should start on boot. B(At least one of state and enabled are required.)
+ type: bool
+ force:
+ description:
+ - Whether to override existing symlinks.
+ type: bool
+ version_added: 2.6
+ masked:
+ description:
+ - Whether the unit should be masked or not, a masked unit is impossible to start.
+ type: bool
+ daemon_reload:
+ description:
+ - Run daemon-reload before doing any other operations, to make sure systemd has read any changes.
+ - When set to C(yes), runs daemon-reload even if the module does not start or stop anything.
+ type: bool
+ default: no
+ aliases: [ daemon-reload ]
+ daemon_reexec:
+ description:
+ - Run daemon_reexec command before doing any other operations, the systemd manager will serialize the manager state.
+ type: bool
+ default: no
+ aliases: [ daemon-reexec ]
+ version_added: "2.8"
+ user:
+ description:
+ - (deprecated) run ``systemctl`` talking to the service manager of the calling user, rather than the service manager
+ of the system.
+ - This option is deprecated and will eventually be removed in 2.11. The ``scope`` option should be used instead.
+ - The default value is C(false).
+ type: bool
+ scope:
+ description:
+ - run systemctl within a given service manager scope, either as the default system scope (system),
+ the current user's scope (user), or the scope of all users (global).
+ - "For systemd to work with 'user', the executing user must have its own instance of dbus started (systemd requirement).
+ The user dbus process is normally started during normal login, but not during the run of Ansible tasks.
+ Otherwise you will probably get a 'Failed to connect to bus: no such file or directory' error."
+ choices: [ system, user, global ]
+ default: system
+ version_added: "2.7"
+ no_block:
+ description:
+ - Do not synchronously wait for the requested operation to finish.
+ Enqueued job will continue without Ansible blocking on its completion.
+ type: bool
+ default: no
+ version_added: "2.3"
+notes:
+ - Since 2.4, one of the following options is required 'state', 'enabled', 'masked', 'daemon_reload', ('daemon_reexec' since 2.8),
+ and all except 'daemon_reload' (and 'daemon_reexec' since 2.8) also require 'name'.
+ - Before 2.4 you always required 'name'.
+ - Globs are not supported in name, i.e ``postgres*.service``.
+requirements:
+ - A system managed by systemd.
+'''
+
+EXAMPLES = '''
+- name: Make sure a service is running
+ systemd:
+ state: started
+ name: httpd
+
+- name: Stop service cron on debian, if running
+ systemd:
+ name: cron
+ state: stopped
+
+- name: Restart service cron on centos, in all cases, also issue daemon-reload to pick up config changes
+ systemd:
+ state: restarted
+ daemon_reload: yes
+ name: crond
+
+- name: Reload service httpd, in all cases
+ systemd:
+ name: httpd
+ state: reloaded
+
+- name: Enable service httpd and ensure it is not masked
+ systemd:
+ name: httpd
+ enabled: yes
+ masked: no
+
+- name: Enable a timer for dnf-automatic
+ systemd:
+ name: dnf-automatic.timer
+ state: started
+ enabled: yes
+
+- name: Just force systemd to reread configs (2.4 and above)
+ systemd:
+ daemon_reload: yes
+
+- name: Just force systemd to re-execute itself (2.8 and above)
+ systemd:
+ daemon_reexec: yes
+'''
+
+RETURN = '''
+status:
+ description: A dictionary with the key=value pairs returned from `systemctl show`
+ returned: success
+ type: complex
+ sample: {
+ "ActiveEnterTimestamp": "Sun 2016-05-15 18:28:49 EDT",
+ "ActiveEnterTimestampMonotonic": "8135942",
+ "ActiveExitTimestampMonotonic": "0",
+ "ActiveState": "active",
+ "After": "auditd.service systemd-user-sessions.service time-sync.target systemd-journald.socket basic.target system.slice",
+ "AllowIsolate": "no",
+ "Before": "shutdown.target multi-user.target",
+ "BlockIOAccounting": "no",
+ "BlockIOWeight": "1000",
+ "CPUAccounting": "no",
+ "CPUSchedulingPolicy": "0",
+ "CPUSchedulingPriority": "0",
+ "CPUSchedulingResetOnFork": "no",
+ "CPUShares": "1024",
+ "CanIsolate": "no",
+ "CanReload": "yes",
+ "CanStart": "yes",
+ "CanStop": "yes",
+ "CapabilityBoundingSet": "18446744073709551615",
+ "ConditionResult": "yes",
+ "ConditionTimestamp": "Sun 2016-05-15 18:28:49 EDT",
+ "ConditionTimestampMonotonic": "7902742",
+ "Conflicts": "shutdown.target",
+ "ControlGroup": "/system.slice/crond.service",
+ "ControlPID": "0",
+ "DefaultDependencies": "yes",
+ "Delegate": "no",
+ "Description": "Command Scheduler",
+ "DevicePolicy": "auto",
+ "EnvironmentFile": "/etc/sysconfig/crond (ignore_errors=no)",
+ "ExecMainCode": "0",
+ "ExecMainExitTimestampMonotonic": "0",
+ "ExecMainPID": "595",
+ "ExecMainStartTimestamp": "Sun 2016-05-15 18:28:49 EDT",
+ "ExecMainStartTimestampMonotonic": "8134990",
+ "ExecMainStatus": "0",
+ "ExecReload": "{ path=/bin/kill ; argv[]=/bin/kill -HUP $MAINPID ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
+ "ExecStart": "{ path=/usr/sbin/crond ; argv[]=/usr/sbin/crond -n $CRONDARGS ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
+ "FragmentPath": "/usr/lib/systemd/system/crond.service",
+ "GuessMainPID": "yes",
+ "IOScheduling": "0",
+ "Id": "crond.service",
+ "IgnoreOnIsolate": "no",
+ "IgnoreOnSnapshot": "no",
+ "IgnoreSIGPIPE": "yes",
+ "InactiveEnterTimestampMonotonic": "0",
+ "InactiveExitTimestamp": "Sun 2016-05-15 18:28:49 EDT",
+ "InactiveExitTimestampMonotonic": "8135942",
+ "JobTimeoutUSec": "0",
+ "KillMode": "process",
+ "KillSignal": "15",
+ "LimitAS": "18446744073709551615",
+ "LimitCORE": "18446744073709551615",
+ "LimitCPU": "18446744073709551615",
+ "LimitDATA": "18446744073709551615",
+ "LimitFSIZE": "18446744073709551615",
+ "LimitLOCKS": "18446744073709551615",
+ "LimitMEMLOCK": "65536",
+ "LimitMSGQUEUE": "819200",
+ "LimitNICE": "0",
+ "LimitNOFILE": "4096",
+ "LimitNPROC": "3902",
+ "LimitRSS": "18446744073709551615",
+ "LimitRTPRIO": "0",
+ "LimitRTTIME": "18446744073709551615",
+ "LimitSIGPENDING": "3902",
+ "LimitSTACK": "18446744073709551615",
+ "LoadState": "loaded",
+ "MainPID": "595",
+ "MemoryAccounting": "no",
+ "MemoryLimit": "18446744073709551615",
+ "MountFlags": "0",
+ "Names": "crond.service",
+ "NeedDaemonReload": "no",
+ "Nice": "0",
+ "NoNewPrivileges": "no",
+ "NonBlocking": "no",
+ "NotifyAccess": "none",
+ "OOMScoreAdjust": "0",
+ "OnFailureIsolate": "no",
+ "PermissionsStartOnly": "no",
+ "PrivateNetwork": "no",
+ "PrivateTmp": "no",
+ "RefuseManualStart": "no",
+ "RefuseManualStop": "no",
+ "RemainAfterExit": "no",
+ "Requires": "basic.target",
+ "Restart": "no",
+ "RestartUSec": "100ms",
+ "Result": "success",
+ "RootDirectoryStartOnly": "no",
+ "SameProcessGroup": "no",
+ "SecureBits": "0",
+ "SendSIGHUP": "no",
+ "SendSIGKILL": "yes",
+ "Slice": "system.slice",
+ "StandardError": "inherit",
+ "StandardInput": "null",
+ "StandardOutput": "journal",
+ "StartLimitAction": "none",
+ "StartLimitBurst": "5",
+ "StartLimitInterval": "10000000",
+ "StatusErrno": "0",
+ "StopWhenUnneeded": "no",
+ "SubState": "running",
+ "SyslogLevelPrefix": "yes",
+ "SyslogPriority": "30",
+ "TTYReset": "no",
+ "TTYVHangup": "no",
+ "TTYVTDisallocate": "no",
+ "TimeoutStartUSec": "1min 30s",
+ "TimeoutStopUSec": "1min 30s",
+ "TimerSlackNSec": "50000",
+ "Transient": "no",
+ "Type": "simple",
+ "UMask": "0022",
+ "UnitFileState": "enabled",
+ "WantedBy": "multi-user.target",
+ "Wants": "system.slice",
+ "WatchdogTimestampMonotonic": "0",
+ "WatchdogUSec": "0",
+ }
+''' # NOQA
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.facts.system.chroot import is_chroot
+from ansible.module_utils.service import sysv_exists, sysv_is_enabled, fail_if_missing
+from ansible.module_utils._text import to_native
+
+
+def is_running_service(service_status):
+ return service_status['ActiveState'] in set(['active', 'activating'])
+
+
+def is_deactivating_service(service_status):
+ return service_status['ActiveState'] in set(['deactivating'])
+
+
+def request_was_ignored(out):
+ return '=' not in out and ('ignoring request' in out or 'ignoring command' in out)
+
+
+def parse_systemctl_show(lines):
+ # The output of 'systemctl show' can contain values that span multiple lines. At first glance it
+ # appears that such values are always surrounded by {}, so the previous version of this code
+ # assumed that any value starting with { was a multi-line value; it would then consume lines
+ # until it saw a line that ended with }. However, it is possible to have a single-line value
+ # that starts with { but does not end with } (this could happen in the value for Description=,
+ # for example), and the previous version of this code would then consume all remaining lines as
+ # part of that value. Cryptically, this would lead to Ansible reporting that the service file
+ # couldn't be found.
+ #
+ # To avoid this issue, the following code only accepts multi-line values for keys whose names
+ # start with Exec (e.g., ExecStart=), since these are the only keys whose values are known to
+ # span multiple lines.
+ parsed = {}
+ multival = []
+ k = None
+ for line in lines:
+ if k is None:
+ if '=' in line:
+ k, v = line.split('=', 1)
+ if k.startswith('Exec') and v.lstrip().startswith('{'):
+ if not v.rstrip().endswith('}'):
+ multival.append(v)
+ continue
+ parsed[k] = v.strip()
+ k = None
+ else:
+ multival.append(line)
+ if line.rstrip().endswith('}'):
+ parsed[k] = '\n'.join(multival).strip()
+ multival = []
+ k = None
+ return parsed
+
+
+# ===========================================
+# Main control flow
+
+def main():
+ # initialize
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', aliases=['service', 'unit']),
+ state=dict(type='str', choices=['reloaded', 'restarted', 'started', 'stopped']),
+ enabled=dict(type='bool'),
+ force=dict(type='bool'),
+ masked=dict(type='bool'),
+ daemon_reload=dict(type='bool', default=False, aliases=['daemon-reload']),
+ daemon_reexec=dict(type='bool', default=False, aliases=['daemon-reexec']),
+ user=dict(type='bool'),
+ scope=dict(type='str', default='system', choices=['system', 'user', 'global']),
+ no_block=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ required_one_of=[['state', 'enabled', 'masked', 'daemon_reload', 'daemon_reexec']],
+ required_by=dict(
+ state=('name', ),
+ enabled=('name', ),
+ masked=('name', ),
+ ),
+ mutually_exclusive=[['scope', 'user']],
+ )
+
+ unit = module.params['name']
+ if unit is not None:
+ for globpattern in (r"*", r"?", r"["):
+ if globpattern in unit:
+ module.fail_json(msg="This module does not currently support using glob patterns, found '%s' in service name: %s" % (globpattern, unit))
+
+ systemctl = module.get_bin_path('systemctl', True)
+
+ if os.getenv('XDG_RUNTIME_DIR') is None:
+ os.environ['XDG_RUNTIME_DIR'] = '/run/user/%s' % os.geteuid()
+
+ ''' Set CLI options depending on params '''
+ if module.params['user'] is not None:
+ # handle user deprecation, mutually exclusive with scope
+ module.deprecate("The 'user' option is being replaced by 'scope'", version='2.11', collection_name='ansible.builtin')
+ if module.params['user']:
+ module.params['scope'] = 'user'
+ else:
+ module.params['scope'] = 'system'
+
+ # if scope is 'system' or None, we can ignore as there is no extra switch.
+ # The other choices match the corresponding switch
+ if module.params['scope'] != 'system':
+ systemctl += " --%s" % module.params['scope']
+
+ if module.params['no_block']:
+ systemctl += " --no-block"
+
+ if module.params['force']:
+ systemctl += " --force"
+
+ rc = 0
+ out = err = ''
+ result = dict(
+ name=unit,
+ changed=False,
+ status=dict(),
+ )
+
+ # Run daemon-reload first, if requested
+ if module.params['daemon_reload'] and not module.check_mode:
+ (rc, out, err) = module.run_command("%s daemon-reload" % (systemctl))
+ if rc != 0:
+ module.fail_json(msg='failure %d during daemon-reload: %s' % (rc, err))
+
+ # Run daemon-reexec
+ if module.params['daemon_reexec'] and not module.check_mode:
+ (rc, out, err) = module.run_command("%s daemon-reexec" % (systemctl))
+ if rc != 0:
+ module.fail_json(msg='failure %d during daemon-reexec: %s' % (rc, err))
+
+ if unit:
+ found = False
+ is_initd = sysv_exists(unit)
+ is_systemd = False
+
+ # check service data, cannot error out on rc as it changes across versions, assume not found
+ (rc, out, err) = module.run_command("%s show '%s'" % (systemctl, unit))
+
+ if rc == 0 and not (request_was_ignored(out) or request_was_ignored(err)):
+ # load return of systemctl show into dictionary for easy access and return
+ if out:
+ result['status'] = parse_systemctl_show(to_native(out).split('\n'))
+
+ is_systemd = 'LoadState' in result['status'] and result['status']['LoadState'] != 'not-found'
+
+ is_masked = 'LoadState' in result['status'] and result['status']['LoadState'] == 'masked'
+
+ # Check for loading error
+ if is_systemd and not is_masked and 'LoadError' in result['status']:
+ module.fail_json(msg="Error loading unit file '%s': %s" % (unit, result['status']['LoadError']))
+
+ # Workaround for https://github.com/ansible/ansible/issues/71528
+ elif err and rc == 1 and 'Failed to parse bus message' in err:
+ result['status'] = parse_systemctl_show(to_native(out).split('\n'))
+
+ unit, sep, suffix = unit.partition('@')
+ unit_search = '{unit}{sep}*'.format(unit=unit, sep=sep)
+ (rc, out, err) = module.run_command("{systemctl} list-unit-files '{unit_search}'".format(systemctl=systemctl, unit_search=unit_search))
+ is_systemd = unit in out
+
+ (rc, out, err) = module.run_command("{systemctl} is-active '{unit}'".format(systemctl=systemctl, unit=unit))
+ result['status']['ActiveState'] = out.rstrip('\n')
+
+ else:
+ # list taken from man systemctl(1) for systemd 244
+ valid_enabled_states = [
+ "enabled",
+ "enabled-runtime",
+ "linked",
+ "linked-runtime",
+ "masked",
+ "masked-runtime",
+ "static",
+ "indirect",
+ "disabled",
+ "generated",
+ "transient"]
+
+ (rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
+ if out.strip() in valid_enabled_states:
+ is_systemd = True
+ else:
+ # fallback list-unit-files as show does not work on some systems (chroot)
+ # not used as primary as it skips some services (like those using init.d) and requires .service/etc notation
+ (rc, out, err) = module.run_command("%s list-unit-files '%s'" % (systemctl, unit))
+ if rc == 0:
+ is_systemd = True
+ else:
+ # Check for systemctl command
+ module.run_command(systemctl, check_rc=True)
+
+ # Does service exist?
+ found = is_systemd or is_initd
+ if is_initd and not is_systemd:
+ module.warn('The service (%s) is actually an init script but the system is managed by systemd' % unit)
+
+ # mask/unmask the service, if requested, can operate on services before they are installed
+ if module.params['masked'] is not None:
+ # state is not masked unless systemd affirms otherwise
+ (rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
+ masked = out.strip() == "masked"
+
+ if masked != module.params['masked']:
+ result['changed'] = True
+ if module.params['masked']:
+ action = 'mask'
+ else:
+ action = 'unmask'
+
+ if not module.check_mode:
+ (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
+ if rc != 0:
+ # some versions of system CAN mask/unmask non existing services, we only fail on missing if they don't
+ fail_if_missing(module, found, unit, msg='host')
+
+ # Enable/disable service startup at boot if requested
+ if module.params['enabled'] is not None:
+
+ if module.params['enabled']:
+ action = 'enable'
+ else:
+ action = 'disable'
+
+ fail_if_missing(module, found, unit, msg='host')
+
+ # do we need to enable the service?
+ enabled = False
+ (rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
+
+ # check systemctl result or if it is a init script
+ if rc == 0:
+ enabled = True
+ elif rc == 1:
+ # if not a user or global user service and both init script and unit file exist stdout should have enabled/disabled, otherwise use rc entries
+ if module.params['scope'] == 'system' and \
+ not module.params['user'] and \
+ is_initd and \
+ not out.strip().endswith('disabled') and \
+ sysv_is_enabled(unit):
+ enabled = True
+
+ # default to current state
+ result['enabled'] = enabled
+
+ # Change enable/disable if needed
+ if enabled != module.params['enabled']:
+ result['changed'] = True
+ if not module.check_mode:
+ (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
+ if rc != 0:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, out + err))
+
+ result['enabled'] = not enabled
+
+ # set service state if requested
+ if module.params['state'] is not None:
+ fail_if_missing(module, found, unit, msg="host")
+
+ # default to desired state
+ result['state'] = module.params['state']
+
+ # What is current service state?
+ if 'ActiveState' in result['status']:
+ action = None
+ if module.params['state'] == 'started':
+ if not is_running_service(result['status']):
+ action = 'start'
+ elif module.params['state'] == 'stopped':
+ if is_running_service(result['status']) or is_deactivating_service(result['status']):
+ action = 'stop'
+ else:
+ if not is_running_service(result['status']):
+ action = 'start'
+ else:
+ action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded
+ result['state'] = 'started'
+
+ if action:
+ result['changed'] = True
+ if not module.check_mode:
+ (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
+ if rc != 0:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err))
+ # check for chroot
+ elif is_chroot(module) or os.environ.get('SYSTEMD_OFFLINE') == '1':
+ module.warn("Target is a chroot or systemd is offline. This can lead to false positives or prevent the init system tools from working.")
+ else:
+ # this should not happen?
+ module.fail_json(msg="Service is in unknown state", status=result['status'])
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/sysvinit.py b/lib/ansible/modules/sysvinit.py
new file mode 100644
index 00000000..309bb054
--- /dev/null
+++ b/lib/ansible/modules/sysvinit.py
@@ -0,0 +1,349 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2017, Brian Coca <bcoca@ansible.com>
+# (c) 2017, Adam Miller <admiller@redhat.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: sysvinit
+author:
+ - "Ansible Core Team"
+version_added: "2.6"
+short_description: Manage SysV services.
+description:
+ - Controls services on target hosts that use the SysV init system.
+options:
+ name:
+ required: true
+ description:
+ - Name of the service.
+ aliases: ['service']
+ state:
+ choices: [ 'started', 'stopped', 'restarted', 'reloaded' ]
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
+ Not all init scripts support C(restarted) nor C(reloaded) natively, so these will both trigger a stop and start as needed.
+ enabled:
+ type: bool
+ description:
+ - Whether the service should start on boot. B(At least one of state and enabled are required.)
+ sleep:
+ default: 1
+ description:
+ - If the service is being C(restarted) or C(reloaded) then sleep this many seconds between the stop and start command.
+ This helps to workaround badly behaving services.
+ pattern:
+ description:
+ - A substring to look for as would be found in the output of the I(ps) command as a stand-in for a status result.
+ - If the string is found, the service will be assumed to be running.
+ - "This option is mainly for use with init scripts that don't support the 'status' option."
+ runlevels:
+ description:
+ - The runlevels this script should be enabled/disabled from.
+ - Use this to override the defaults set by the package or init script itself.
+ arguments:
+ description:
+ - Additional arguments provided on the command line that some init scripts accept.
+ aliases: [ 'args' ]
+ daemonize:
+ type: bool
+ description:
+ - Have the module daemonize as the service itself might not do so properly.
+ - This is useful with badly written init scripts or daemons, which
+ commonly manifests as the task hanging as it is still holding the
+ tty or the service dying when the task is over as the connection
+ closes the session.
+ default: no
+notes:
+ - One option other than name is required.
+requirements:
+ - That the service managed has a corresponding init script.
+'''
+
+EXAMPLES = '''
+- name: Make sure apache2 is started
+ sysvinit:
+ name: apache2
+ state: started
+ enabled: yes
+
+- name: Make sure apache2 is started on runlevels 3 and 5
+ sysvinit:
+ name: apache2
+ state: started
+ enabled: yes
+ runlevels:
+ - 3
+ - 5
+'''
+
+RETURN = r'''
+results:
+ description: results from actions taken
+ returned: always
+ type: complex
+ sample: {
+ "attempts": 1,
+ "changed": true,
+ "name": "apache2",
+ "status": {
+ "enabled": {
+ "changed": true,
+ "rc": 0,
+ "stderr": "",
+ "stdout": ""
+ },
+ "stopped": {
+ "changed": true,
+ "rc": 0,
+ "stderr": "",
+ "stdout": "Stopping web server: apache2.\n"
+ }
+ }
+ }
+'''
+
+import re
+from time import sleep
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.service import sysv_is_enabled, get_sysv_script, sysv_exists, fail_if_missing, get_ps, daemonize
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, type='str', aliases=['service']),
+ state=dict(choices=['started', 'stopped', 'restarted', 'reloaded'], type='str'),
+ enabled=dict(type='bool'),
+ sleep=dict(type='int', default=1),
+ pattern=dict(type='str'),
+ arguments=dict(type='str', aliases=['args']),
+ runlevels=dict(type='list'),
+ daemonize=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ required_one_of=[['state', 'enabled']],
+ )
+
+ name = module.params['name']
+ action = module.params['state']
+ enabled = module.params['enabled']
+ runlevels = module.params['runlevels']
+ pattern = module.params['pattern']
+ sleep_for = module.params['sleep']
+ rc = 0
+ out = err = ''
+ result = {
+ 'name': name,
+ 'changed': False,
+ 'status': {}
+ }
+
+ # ensure service exists, get script name
+ fail_if_missing(module, sysv_exists(name), name)
+ script = get_sysv_script(name)
+
+ # locate binaries for service management
+ paths = ['/sbin', '/usr/sbin', '/bin', '/usr/bin']
+ binaries = ['chkconfig', 'update-rc.d', 'insserv', 'service']
+
+ # Keeps track of the service status for various runlevels because we can
+ # operate on multiple runlevels at once
+ runlevel_status = {}
+
+ location = {}
+ for binary in binaries:
+ location[binary] = module.get_bin_path(binary, opt_dirs=paths)
+
+ # figure out enable status
+ if runlevels:
+ for rl in runlevels:
+ runlevel_status.setdefault(rl, {})
+ runlevel_status[rl]["enabled"] = sysv_is_enabled(name, runlevel=rl)
+ else:
+ runlevel_status["enabled"] = sysv_is_enabled(name)
+
+ # figure out started status, everyone does it different!
+ is_started = False
+ worked = False
+
+ # user knows other methods fail and supplied pattern
+ if pattern:
+ worked = is_started = get_ps(module, pattern)
+ else:
+ if location.get('service'):
+ # standard tool that has been 'destandarized' by reimplementation in other OS/distros
+ cmd = '%s %s status' % (location['service'], name)
+ elif script:
+ # maybe script implements status (not LSB)
+ cmd = '%s status' % script
+ else:
+ module.fail_json(msg="Unable to determine service status")
+
+ (rc, out, err) = module.run_command(cmd)
+ if not rc == -1:
+ # special case
+ if name == 'iptables' and "ACCEPT" in out:
+ worked = True
+ is_started = True
+
+ # check output messages, messy but sadly more reliable than rc
+ if not worked and out.count('\n') <= 1:
+
+ cleanout = out.lower().replace(name.lower(), '')
+
+ for stopped in ['stop', 'is dead ', 'dead but ', 'could not access pid file', 'inactive']:
+ if stopped in cleanout:
+ worked = True
+ break
+
+ if not worked:
+ for started_status in ['run', 'start', 'active']:
+ if started_status in cleanout and "not " not in cleanout:
+ is_started = True
+ worked = True
+ break
+
+ # hope rc is not lying to us, use often used 'bad' returns
+ if not worked and rc in [1, 2, 3, 4, 69]:
+ worked = True
+
+ if not worked:
+ # hail mary
+ if rc == 0:
+ is_started = True
+ worked = True
+ # ps for luck, can only assure positive match
+ elif get_ps(module, name):
+ is_started = True
+ worked = True
+ module.warn("Used ps output to match service name and determine it is up, this is very unreliable")
+
+ if not worked:
+ module.warn("Unable to determine if service is up, assuming it is down")
+
+ ###########################################################################
+ # BEGIN: Enable/Disable
+ result['status'].setdefault('enabled', {})
+ result['status']['enabled']['changed'] = False
+ result['status']['enabled']['rc'] = None
+ result['status']['enabled']['stdout'] = None
+ result['status']['enabled']['stderr'] = None
+ if runlevels:
+ result['status']['enabled']['runlevels'] = runlevels
+ for rl in runlevels:
+ if enabled != runlevel_status[rl]["enabled"]:
+ result['changed'] = True
+ result['status']['enabled']['changed'] = True
+
+ if not module.check_mode and result['changed']:
+ # Perform enable/disable here
+ if enabled:
+ if location.get('update-rc.d'):
+ (rc, out, err) = module.run_command("%s %s enable %s" % (location['update-rc.d'], name, ' '.join(runlevels)))
+ elif location.get('chkconfig'):
+ (rc, out, err) = module.run_command("%s --level %s %s on" % (location['chkconfig'], ''.join(runlevels), name))
+ else:
+ if location.get('update-rc.d'):
+ (rc, out, err) = module.run_command("%s %s disable %s" % (location['update-rc.d'], name, ' '.join(runlevels)))
+ elif location.get('chkconfig'):
+ (rc, out, err) = module.run_command("%s --level %s %s off" % (location['chkconfig'], ''.join(runlevels), name))
+ else:
+ if enabled is not None and enabled != runlevel_status["enabled"]:
+ result['changed'] = True
+ result['status']['enabled']['changed'] = True
+
+ if not module.check_mode and result['changed']:
+ # Perform enable/disable here
+ if enabled:
+ if location.get('update-rc.d'):
+ (rc, out, err) = module.run_command("%s %s defaults" % (location['update-rc.d'], name))
+ elif location.get('chkconfig'):
+ (rc, out, err) = module.run_command("%s %s on" % (location['chkconfig'], name))
+ else:
+ if location.get('update-rc.d'):
+ (rc, out, err) = module.run_command("%s %s disable" % (location['update-rc.d'], name))
+ elif location.get('chkconfig'):
+ (rc, out, err) = module.run_command("%s %s off" % (location['chkconfig'], name))
+
+ # Assigned above, might be useful is something goes sideways
+ if not module.check_mode and result['status']['enabled']['changed']:
+ result['status']['enabled']['rc'] = rc
+ result['status']['enabled']['stdout'] = out
+ result['status']['enabled']['stderr'] = err
+ rc, out, err = None, None, None
+
+ if "illegal runlevel specified" in result['status']['enabled']['stderr']:
+ module.fail_json(msg="Illegal runlevel specified for enable operation on service %s" % name, **result)
+ # END: Enable/Disable
+ ###########################################################################
+
+ ###########################################################################
+ # BEGIN: state
+ result['status'].setdefault(module.params['state'], {})
+ result['status'][module.params['state']]['changed'] = False
+ result['status'][module.params['state']]['rc'] = None
+ result['status'][module.params['state']]['stdout'] = None
+ result['status'][module.params['state']]['stderr'] = None
+ if action:
+ action = re.sub(r'p?ed$', '', action.lower())
+
+ def runme(doit):
+
+ args = module.params['arguments']
+ cmd = "%s %s %s" % (script, doit, "" if args is None else args)
+
+ # how to run
+ if module.params['daemonize']:
+ (rc, out, err) = daemonize(module, cmd)
+ else:
+ (rc, out, err) = module.run_command(cmd)
+ # FIXME: ERRORS
+
+ if rc != 0:
+ module.fail_json(msg="Failed to %s service: %s" % (action, name), rc=rc, stdout=out, stderr=err)
+
+ return (rc, out, err)
+
+ if action == 'restart':
+ result['changed'] = True
+ result['status'][module.params['state']]['changed'] = True
+ if not module.check_mode:
+
+ # cannot rely on existing 'restart' in init script
+ for dothis in ['stop', 'start']:
+ (rc, out, err) = runme(dothis)
+ if sleep_for:
+ sleep(sleep_for)
+
+ elif is_started != (action == 'start'):
+ result['changed'] = True
+ result['status'][module.params['state']]['changed'] = True
+ if not module.check_mode:
+ rc, out, err = runme(action)
+
+ elif is_started == (action == 'stop'):
+ result['changed'] = True
+ result['status'][module.params['state']]['changed'] = True
+ if not module.check_mode:
+ rc, out, err = runme(action)
+
+ if not module.check_mode and result['status'][module.params['state']]['changed']:
+ result['status'][module.params['state']]['rc'] = rc
+ result['status'][module.params['state']]['stdout'] = out
+ result['status'][module.params['state']]['stderr'] = err
+ rc, out, err = None, None, None
+ # END: state
+ ###########################################################################
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/tempfile.py b/lib/ansible/modules/tempfile.py
new file mode 100644
index 00000000..706f4910
--- /dev/null
+++ b/lib/ansible/modules/tempfile.py
@@ -0,0 +1,117 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Krzysztof Magosa <krzysztof@magosa.pl>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: tempfile
+version_added: "2.3"
+short_description: Creates temporary files and directories
+description:
+ - The C(tempfile) module creates temporary files and directories. C(mktemp) command takes different parameters on various systems, this module helps
+ to avoid troubles related to that. Files/directories created by module are accessible only by creator. In case you need to make them world-accessible
+ you need to use M(ansible.builtin.file) module.
+ - For Windows targets, use the M(ansible.windows.win_tempfile) module instead.
+options:
+ state:
+ description:
+ - Whether to create file or directory.
+ type: str
+ choices: [ directory, file ]
+ default: file
+ path:
+ description:
+ - Location where temporary file or directory should be created.
+ - If path is not specified, the default system temporary directory will be used.
+ type: path
+ prefix:
+ description:
+ - Prefix of file/directory name created by module.
+ type: str
+ default: ansible.
+ suffix:
+ description:
+ - Suffix of file/directory name created by module.
+ type: str
+ default: ""
+seealso:
+- module: ansible.builtin.file
+- module: ansible.windows.win_tempfile
+author:
+ - Krzysztof Magosa (@krzysztof-magosa)
+'''
+
+EXAMPLES = """
+- name: Create temporary build directory
+ tempfile:
+ state: directory
+ suffix: build
+
+- name: Create temporary file
+ tempfile:
+ state: file
+ suffix: temp
+ register: tempfile_1
+
+- name: Use the registered var and the file module to remove the temporary file
+ file:
+ path: "{{ tempfile_1.path }}"
+ state: absent
+ when: tempfile_1.path is defined
+"""
+
+RETURN = '''
+path:
+ description: Path to created file or directory
+ returned: success
+ type: str
+ sample: "/tmp/ansible.bMlvdk"
+'''
+
+from os import close
+from tempfile import mkstemp, mkdtemp
+from traceback import format_exc
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='file', choices=['file', 'directory']),
+ path=dict(type='path'),
+ prefix=dict(type='str', default='ansible.'),
+ suffix=dict(type='str', default=''),
+ ),
+ )
+
+ try:
+ if module.params['state'] == 'file':
+ handle, path = mkstemp(
+ prefix=module.params['prefix'],
+ suffix=module.params['suffix'],
+ dir=module.params['path'],
+ )
+ close(handle)
+ elif module.params['state'] == 'directory':
+ path = mkdtemp(
+ prefix=module.params['prefix'],
+ suffix=module.params['suffix'],
+ dir=module.params['path'],
+ )
+
+ module.exit_json(changed=True, path=path)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/template.py b/lib/ansible/modules/template.py
new file mode 100644
index 00000000..bc1a2730
--- /dev/null
+++ b/lib/ansible/modules/template.py
@@ -0,0 +1,90 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# This is a virtual module that is entirely implemented as an action plugin and runs on the controller
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: template
+version_added: historical
+short_description: Template a file out to a remote server
+options:
+ follow:
+ description:
+ - Determine whether symbolic links should be followed.
+ - When set to C(yes) symbolic links will be followed, if they exist.
+ - When set to C(no) symbolic links will not be followed.
+ - Previous to Ansible 2.4, this was hardcoded as C(yes).
+ type: bool
+ default: no
+ version_added: '2.4'
+notes:
+- For Windows you can use M(ansible.windows.win_template) which uses '\\r\\n' as C(newline_sequence) by default.
+seealso:
+- module: ansible.builtin.copy
+- module: ansible.windows.win_copy
+- module: ansible.windows.win_template
+author:
+- Ansible Core Team
+- Michael DeHaan
+extends_documentation_fragment:
+- backup
+- files
+- template_common
+- validate
+'''
+
+EXAMPLES = r'''
+- name: Template a file to /etc/file.conf
+ template:
+ src: /mytemplates/foo.j2
+ dest: /etc/file.conf
+ owner: bin
+ group: wheel
+ mode: '0644'
+
+- name: Template a file, using symbolic modes (equivalent to 0644)
+ template:
+ src: /mytemplates/foo.j2
+ dest: /etc/file.conf
+ owner: bin
+ group: wheel
+ mode: u=rw,g=r,o=r
+
+- name: Copy a version of named.conf that is dependent on the OS. setype obtained by doing ls -Z /etc/named.conf on original file
+ template:
+ src: named.conf_{{ ansible_os_family }}.j2
+ dest: /etc/named.conf
+ group: named
+ setype: named_conf_t
+ mode: 0640
+
+- name: Create a DOS-style text file from a template
+ template:
+ src: config.ini.j2
+ dest: /share/windows/config.ini
+ newline_sequence: '\r\n'
+
+- name: Copy a new sudoers file into place, after passing validation with visudo
+ template:
+ src: /mine/sudoers
+ dest: /etc/sudoers
+ validate: /usr/sbin/visudo -cf %s
+
+- name: Update sshd configuration safely, avoid locking yourself out
+ template:
+ src: etc/ssh/sshd_config.j2
+ dest: /etc/ssh/sshd_config
+ owner: root
+ group: root
+ mode: '0600'
+ validate: /usr/sbin/sshd -t -f %s
+ backup: yes
+'''
diff --git a/lib/ansible/modules/unarchive.py b/lib/ansible/modules/unarchive.py
new file mode 100644
index 00000000..749d0288
--- /dev/null
+++ b/lib/ansible/modules/unarchive.py
@@ -0,0 +1,910 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2013, Dylan Martin <dmartin@seattlecentral.edu>
+# Copyright: (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+# Copyright: (c) 2016, Dag Wieers <dag@wieers.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: unarchive
+version_added: '1.4'
+short_description: Unpacks an archive after (optionally) copying it from the local machine.
+description:
+ - The C(unarchive) module unpacks an archive. It will not unpack a compressed file that does not contain an archive.
+ - By default, it will copy the source file from the local system to the target before unpacking.
+ - Set C(remote_src=yes) to unpack an archive which already exists on the target.
+ - If checksum validation is desired, use M(ansible.builtin.get_url) or M(ansible.builtin.uri) instead to fetch the file and set C(remote_src=yes).
+ - For Windows targets, use the M(community.windows.win_unzip) module instead.
+options:
+ src:
+ description:
+ - If C(remote_src=no) (default), local path to archive file to copy to the target server; can be absolute or relative. If C(remote_src=yes), path on the
+ target server to existing archive file to unpack.
+ - If C(remote_src=yes) and C(src) contains C(://), the remote machine will download the file from the URL first. (version_added 2.0). This is only for
+ simple cases, for full download support use the M(ansible.builtin.get_url) module.
+ type: path
+ required: true
+ dest:
+ description:
+ - Remote absolute path where the archive should be unpacked.
+ type: path
+ required: true
+ copy:
+ description:
+ - If true, the file is copied from local 'master' to the target machine, otherwise, the plugin will look for src archive at the target machine.
+ - This option has been deprecated in favor of C(remote_src).
+ - This option is mutually exclusive with C(remote_src).
+ type: bool
+ default: yes
+ creates:
+ description:
+ - If the specified absolute path (file or directory) already exists, this step will B(not) be run.
+ type: path
+ version_added: "1.6"
+ list_files:
+ description:
+ - If set to True, return the list of files that are contained in the tarball.
+ type: bool
+ default: no
+ version_added: "2.0"
+ exclude:
+ description:
+ - List the directory and file entries that you would like to exclude from the unarchive action.
+ type: list
+ version_added: "2.1"
+ keep_newer:
+ description:
+ - Do not replace existing files that are newer than files from the archive.
+ type: bool
+ default: no
+ version_added: "2.1"
+ extra_opts:
+ description:
+ - Specify additional options by passing in an array.
+ - Each space-separated command-line option should be a new element of the array. See examples.
+ - Command-line options with multiple elements must use multiple lines in the array, one for each element.
+ type: list
+ default: ""
+ version_added: "2.1"
+ remote_src:
+ description:
+ - Set to C(yes) to indicate the archived file is already on the remote system and not local to the Ansible controller.
+ - This option is mutually exclusive with C(copy).
+ type: bool
+ default: no
+ version_added: "2.2"
+ validate_certs:
+ description:
+ - This only applies if using a https URL as the source of the file.
+ - This should only set to C(no) used on personally controlled sites using self-signed certificate.
+ - Prior to 2.2 the code worked as if this was set to C(yes).
+ type: bool
+ default: yes
+ version_added: "2.2"
+extends_documentation_fragment:
+- decrypt
+- files
+todo:
+ - Re-implement tar support using native tarfile module.
+ - Re-implement zip support using native zipfile module.
+notes:
+ - Requires C(zipinfo) and C(gtar)/C(unzip) command on target host.
+ - Can handle I(.zip) files using C(unzip) as well as I(.tar), I(.tar.gz), I(.tar.bz2) and I(.tar.xz) files using C(gtar).
+ - Does not handle I(.gz) files, I(.bz2) files or I(.xz) files that do not contain a I(.tar) archive.
+ - Uses gtar's C(--diff) arg to calculate if changed or not. If this C(arg) is not
+ supported, it will always unpack the archive.
+ - Existing files/directories in the destination which are not in the archive
+ are not touched. This is the same behavior as a normal archive extraction.
+ - Existing files/directories in the destination which are not in the archive
+ are ignored for purposes of deciding if the archive should be unpacked or not.
+seealso:
+- module: community.general.archive
+- module: community.general.iso_extract
+- module: community.windows.win_unzip
+author: Michael DeHaan
+'''
+
+EXAMPLES = r'''
+- name: Extract foo.tgz into /var/lib/foo
+ unarchive:
+ src: foo.tgz
+ dest: /var/lib/foo
+
+- name: Unarchive a file that is already on the remote machine
+ unarchive:
+ src: /tmp/foo.zip
+ dest: /usr/local/bin
+ remote_src: yes
+
+- name: Unarchive a file that needs to be downloaded (added in 2.0)
+ unarchive:
+ src: https://example.com/example.zip
+ dest: /usr/local/bin
+ remote_src: yes
+
+- name: Unarchive a file with extra options
+ unarchive:
+ src: /tmp/foo.zip
+ dest: /usr/local/bin
+ extra_opts:
+ - --transform
+ - s/^xxx/yyy/
+'''
+
+import binascii
+import codecs
+import datetime
+import fnmatch
+import grp
+import os
+import platform
+import pwd
+import re
+import stat
+import time
+import traceback
+from zipfile import ZipFile, BadZipfile
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_file
+from ansible.module_utils._text import to_bytes, to_native, to_text
+
+try: # python 3.3+
+ from shlex import quote
+except ImportError: # older python
+ from pipes import quote
+
+# String from tar that shows the tar contents are different from the
+# filesystem
+OWNER_DIFF_RE = re.compile(r': Uid differs$')
+GROUP_DIFF_RE = re.compile(r': Gid differs$')
+MODE_DIFF_RE = re.compile(r': Mode differs$')
+MOD_TIME_DIFF_RE = re.compile(r': Mod time differs$')
+# NEWER_DIFF_RE = re.compile(r' is newer or same age.$')
+EMPTY_FILE_RE = re.compile(r': : Warning: Cannot stat: No such file or directory$')
+MISSING_FILE_RE = re.compile(r': Warning: Cannot stat: No such file or directory$')
+ZIP_FILE_MODE_RE = re.compile(r'([r-][w-][SsTtx-]){3}')
+INVALID_OWNER_RE = re.compile(r': Invalid owner')
+INVALID_GROUP_RE = re.compile(r': Invalid group')
+
+
+def crc32(path):
+ ''' Return a CRC32 checksum of a file '''
+ with open(path, 'rb') as f:
+ file_content = f.read()
+ return binascii.crc32(file_content) & 0xffffffff
+
+
+def shell_escape(string):
+ ''' Quote meta-characters in the args for the unix shell '''
+ return re.sub(r'([^A-Za-z0-9_])', r'\\\1', string)
+
+
+class UnarchiveError(Exception):
+ pass
+
+
+class ZipArchive(object):
+
+ def __init__(self, src, b_dest, file_args, module):
+ self.src = src
+ self.b_dest = b_dest
+ self.file_args = file_args
+ self.opts = module.params['extra_opts']
+ self.module = module
+ self.excludes = module.params['exclude']
+ self.includes = []
+ self.cmd_path = self.module.get_bin_path('unzip')
+ self.zipinfocmd_path = self.module.get_bin_path('zipinfo')
+ self._files_in_archive = []
+ self._infodict = dict()
+
+ def _permstr_to_octal(self, modestr, umask):
+ ''' Convert a Unix permission string (rw-r--r--) into a mode (0644) '''
+ revstr = modestr[::-1]
+ mode = 0
+ for j in range(0, 3):
+ for i in range(0, 3):
+ if revstr[i + 3 * j] in ['r', 'w', 'x', 's', 't']:
+ mode += 2 ** (i + 3 * j)
+ # The unzip utility does not support setting the stST bits
+# if revstr[i + 3 * j] in ['s', 't', 'S', 'T' ]:
+# mode += 2 ** (9 + j)
+ return (mode & ~umask)
+
+ def _legacy_file_list(self):
+ unzip_bin = self.module.get_bin_path('unzip')
+ if not unzip_bin:
+ raise UnarchiveError('Python Zipfile cannot read %s and unzip not found' % self.src)
+
+ rc, out, err = self.module.run_command([unzip_bin, '-v', self.src])
+ if rc:
+ raise UnarchiveError('Neither python zipfile nor unzip can read %s' % self.src)
+
+ for line in out.splitlines()[3:-2]:
+ fields = line.split(None, 7)
+ self._files_in_archive.append(fields[7])
+ self._infodict[fields[7]] = int(fields[6])
+
+ def _crc32(self, path):
+ if self._infodict:
+ return self._infodict[path]
+
+ try:
+ archive = ZipFile(self.src)
+ except BadZipfile as e:
+ if e.args[0].lower().startswith('bad magic number'):
+ # Python2.4 can't handle zipfiles with > 64K files. Try using
+ # /usr/bin/unzip instead
+ self._legacy_file_list()
+ else:
+ raise
+ else:
+ try:
+ for item in archive.infolist():
+ self._infodict[item.filename] = int(item.CRC)
+ except Exception:
+ archive.close()
+ raise UnarchiveError('Unable to list files in the archive')
+
+ return self._infodict[path]
+
+ @property
+ def files_in_archive(self):
+ if self._files_in_archive:
+ return self._files_in_archive
+
+ self._files_in_archive = []
+ try:
+ archive = ZipFile(self.src)
+ except BadZipfile as e:
+ if e.args[0].lower().startswith('bad magic number'):
+ # Python2.4 can't handle zipfiles with > 64K files. Try using
+ # /usr/bin/unzip instead
+ self._legacy_file_list()
+ else:
+ raise
+ else:
+ try:
+ for member in archive.namelist():
+ exclude_flag = False
+ if self.excludes:
+ for exclude in self.excludes:
+ if fnmatch.fnmatch(member, exclude):
+ exclude_flag = True
+ break
+ if not exclude_flag:
+ self._files_in_archive.append(to_native(member))
+ except Exception:
+ archive.close()
+ raise UnarchiveError('Unable to list files in the archive')
+
+ archive.close()
+ return self._files_in_archive
+
+ def is_unarchived(self):
+ # BSD unzip doesn't support zipinfo listings with timestamp.
+ cmd = [self.zipinfocmd_path, '-T', '-s', self.src]
+ if self.excludes:
+ cmd.extend(['-x', ] + self.excludes)
+ rc, out, err = self.module.run_command(cmd)
+
+ old_out = out
+ diff = ''
+ out = ''
+ if rc == 0:
+ unarchived = True
+ else:
+ unarchived = False
+
+ # Get some information related to user/group ownership
+ umask = os.umask(0)
+ os.umask(umask)
+ systemtype = platform.system()
+
+ # Get current user and group information
+ groups = os.getgroups()
+ run_uid = os.getuid()
+ run_gid = os.getgid()
+ try:
+ run_owner = pwd.getpwuid(run_uid).pw_name
+ except (TypeError, KeyError):
+ run_owner = run_uid
+ try:
+ run_group = grp.getgrgid(run_gid).gr_name
+ except (KeyError, ValueError, OverflowError):
+ run_group = run_gid
+
+ # Get future user ownership
+ fut_owner = fut_uid = None
+ if self.file_args['owner']:
+ try:
+ tpw = pwd.getpwnam(self.file_args['owner'])
+ except KeyError:
+ try:
+ tpw = pwd.getpwuid(self.file_args['owner'])
+ except (TypeError, KeyError):
+ tpw = pwd.getpwuid(run_uid)
+ fut_owner = tpw.pw_name
+ fut_uid = tpw.pw_uid
+ else:
+ try:
+ fut_owner = run_owner
+ except Exception:
+ pass
+ fut_uid = run_uid
+
+ # Get future group ownership
+ fut_group = fut_gid = None
+ if self.file_args['group']:
+ try:
+ tgr = grp.getgrnam(self.file_args['group'])
+ except (ValueError, KeyError):
+ try:
+ tgr = grp.getgrgid(self.file_args['group'])
+ except (KeyError, ValueError, OverflowError):
+ tgr = grp.getgrgid(run_gid)
+ fut_group = tgr.gr_name
+ fut_gid = tgr.gr_gid
+ else:
+ try:
+ fut_group = run_group
+ except Exception:
+ pass
+ fut_gid = run_gid
+
+ for line in old_out.splitlines():
+ change = False
+
+ pcs = line.split(None, 7)
+ if len(pcs) != 8:
+ # Too few fields... probably a piece of the header or footer
+ continue
+
+ # Check first and seventh field in order to skip header/footer
+ if len(pcs[0]) != 7 and len(pcs[0]) != 10:
+ continue
+ if len(pcs[6]) != 15:
+ continue
+
+ # Possible entries:
+ # -rw-rws--- 1.9 unx 2802 t- defX 11-Aug-91 13:48 perms.2660
+ # -rw-a-- 1.0 hpf 5358 Tl i4:3 4-Dec-91 11:33 longfilename.hpfs
+ # -r--ahs 1.1 fat 4096 b- i4:2 14-Jul-91 12:58 EA DATA. SF
+ # --w------- 1.0 mac 17357 bx i8:2 4-May-92 04:02 unzip.macr
+ if pcs[0][0] not in 'dl-?' or not frozenset(pcs[0][1:]).issubset('rwxstah-'):
+ continue
+
+ ztype = pcs[0][0]
+ permstr = pcs[0][1:]
+ version = pcs[1]
+ ostype = pcs[2]
+ size = int(pcs[3])
+ path = to_text(pcs[7], errors='surrogate_or_strict')
+
+ # Skip excluded files
+ if path in self.excludes:
+ out += 'Path %s is excluded on request\n' % path
+ continue
+
+ # Itemized change requires L for symlink
+ if path[-1] == '/':
+ if ztype != 'd':
+ err += 'Path %s incorrectly tagged as "%s", but is a directory.\n' % (path, ztype)
+ ftype = 'd'
+ elif ztype == 'l':
+ ftype = 'L'
+ elif ztype == '-':
+ ftype = 'f'
+ elif ztype == '?':
+ ftype = 'f'
+
+ # Some files may be storing FAT permissions, not Unix permissions
+ # For FAT permissions, we will use a base permissions set of 777 if the item is a directory or has the execute bit set. Otherwise, 666.
+ # This permission will then be modified by the system UMask.
+ # BSD always applies the Umask, even to Unix permissions.
+ # For Unix style permissions on Linux or Mac, we want to use them directly.
+ # So we set the UMask for this file to zero. That permission set will then be unchanged when calling _permstr_to_octal
+
+ if len(permstr) == 6:
+ if path[-1] == '/':
+ permstr = 'rwxrwxrwx'
+ elif permstr == 'rwx---':
+ permstr = 'rwxrwxrwx'
+ else:
+ permstr = 'rw-rw-rw-'
+ file_umask = umask
+ elif 'bsd' in systemtype.lower():
+ file_umask = umask
+ else:
+ file_umask = 0
+
+ # Test string conformity
+ if len(permstr) != 9 or not ZIP_FILE_MODE_RE.match(permstr):
+ raise UnarchiveError('ZIP info perm format incorrect, %s' % permstr)
+
+ # DEBUG
+# err += "%s%s %10d %s\n" % (ztype, permstr, size, path)
+
+ b_dest = os.path.join(self.b_dest, to_bytes(path, errors='surrogate_or_strict'))
+ try:
+ st = os.lstat(b_dest)
+ except Exception:
+ change = True
+ self.includes.append(path)
+ err += 'Path %s is missing\n' % path
+ diff += '>%s++++++.?? %s\n' % (ftype, path)
+ continue
+
+ # Compare file types
+ if ftype == 'd' and not stat.S_ISDIR(st.st_mode):
+ change = True
+ self.includes.append(path)
+ err += 'File %s already exists, but not as a directory\n' % path
+ diff += 'c%s++++++.?? %s\n' % (ftype, path)
+ continue
+
+ if ftype == 'f' and not stat.S_ISREG(st.st_mode):
+ change = True
+ unarchived = False
+ self.includes.append(path)
+ err += 'Directory %s already exists, but not as a regular file\n' % path
+ diff += 'c%s++++++.?? %s\n' % (ftype, path)
+ continue
+
+ if ftype == 'L' and not stat.S_ISLNK(st.st_mode):
+ change = True
+ self.includes.append(path)
+ err += 'Directory %s already exists, but not as a symlink\n' % path
+ diff += 'c%s++++++.?? %s\n' % (ftype, path)
+ continue
+
+ itemized = list('.%s.......??' % ftype)
+
+ # Note: this timestamp calculation has a rounding error
+ # somewhere... unzip and this timestamp can be one second off
+ # When that happens, we report a change and re-unzip the file
+ dt_object = datetime.datetime(*(time.strptime(pcs[6], '%Y%m%d.%H%M%S')[0:6]))
+ timestamp = time.mktime(dt_object.timetuple())
+
+ # Compare file timestamps
+ if stat.S_ISREG(st.st_mode):
+ if self.module.params['keep_newer']:
+ if timestamp > st.st_mtime:
+ change = True
+ self.includes.append(path)
+ err += 'File %s is older, replacing file\n' % path
+ itemized[4] = 't'
+ elif stat.S_ISREG(st.st_mode) and timestamp < st.st_mtime:
+ # Add to excluded files, ignore other changes
+ out += 'File %s is newer, excluding file\n' % path
+ self.excludes.append(path)
+ continue
+ else:
+ if timestamp != st.st_mtime:
+ change = True
+ self.includes.append(path)
+ err += 'File %s differs in mtime (%f vs %f)\n' % (path, timestamp, st.st_mtime)
+ itemized[4] = 't'
+
+ # Compare file sizes
+ if stat.S_ISREG(st.st_mode) and size != st.st_size:
+ change = True
+ err += 'File %s differs in size (%d vs %d)\n' % (path, size, st.st_size)
+ itemized[3] = 's'
+
+ # Compare file checksums
+ if stat.S_ISREG(st.st_mode):
+ crc = crc32(b_dest)
+ if crc != self._crc32(path):
+ change = True
+ err += 'File %s differs in CRC32 checksum (0x%08x vs 0x%08x)\n' % (path, self._crc32(path), crc)
+ itemized[2] = 'c'
+
+ # Compare file permissions
+
+ # Do not handle permissions of symlinks
+ if ftype != 'L':
+
+ # Use the new mode provided with the action, if there is one
+ if self.file_args['mode']:
+ if isinstance(self.file_args['mode'], int):
+ mode = self.file_args['mode']
+ else:
+ try:
+ mode = int(self.file_args['mode'], 8)
+ except Exception as e:
+ try:
+ mode = AnsibleModule._symbolic_mode_to_octal(st, self.file_args['mode'])
+ except ValueError as e:
+ self.module.fail_json(path=path, msg="%s" % to_native(e), exception=traceback.format_exc())
+ # Only special files require no umask-handling
+ elif ztype == '?':
+ mode = self._permstr_to_octal(permstr, 0)
+ else:
+ mode = self._permstr_to_octal(permstr, file_umask)
+
+ if mode != stat.S_IMODE(st.st_mode):
+ change = True
+ itemized[5] = 'p'
+ err += 'Path %s differs in permissions (%o vs %o)\n' % (path, mode, stat.S_IMODE(st.st_mode))
+
+ # Compare file user ownership
+ owner = uid = None
+ try:
+ owner = pwd.getpwuid(st.st_uid).pw_name
+ except (TypeError, KeyError):
+ uid = st.st_uid
+
+ # If we are not root and requested owner is not our user, fail
+ if run_uid != 0 and (fut_owner != run_owner or fut_uid != run_uid):
+ raise UnarchiveError('Cannot change ownership of %s to %s, as user %s' % (path, fut_owner, run_owner))
+
+ if owner and owner != fut_owner:
+ change = True
+ err += 'Path %s is owned by user %s, not by user %s as expected\n' % (path, owner, fut_owner)
+ itemized[6] = 'o'
+ elif uid and uid != fut_uid:
+ change = True
+ err += 'Path %s is owned by uid %s, not by uid %s as expected\n' % (path, uid, fut_uid)
+ itemized[6] = 'o'
+
+ # Compare file group ownership
+ group = gid = None
+ try:
+ group = grp.getgrgid(st.st_gid).gr_name
+ except (KeyError, ValueError, OverflowError):
+ gid = st.st_gid
+
+ if run_uid != 0 and (fut_group != run_group or fut_gid != run_gid) and fut_gid not in groups:
+ raise UnarchiveError('Cannot change group ownership of %s to %s, as user %s' % (path, fut_group, run_owner))
+
+ if group and group != fut_group:
+ change = True
+ err += 'Path %s is owned by group %s, not by group %s as expected\n' % (path, group, fut_group)
+ itemized[6] = 'g'
+ elif gid and gid != fut_gid:
+ change = True
+ err += 'Path %s is owned by gid %s, not by gid %s as expected\n' % (path, gid, fut_gid)
+ itemized[6] = 'g'
+
+ # Register changed files and finalize diff output
+ if change:
+ if path not in self.includes:
+ self.includes.append(path)
+ diff += '%s %s\n' % (''.join(itemized), path)
+
+ if self.includes:
+ unarchived = False
+
+ # DEBUG
+# out = old_out + out
+
+ return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd, diff=diff)
+
+ def unarchive(self):
+ cmd = [self.cmd_path, '-o']
+ if self.opts:
+ cmd.extend(self.opts)
+ cmd.append(self.src)
+ # NOTE: Including (changed) files as arguments is problematic (limits on command line/arguments)
+ # if self.includes:
+ # NOTE: Command unzip has this strange behaviour where it expects quoted filenames to also be escaped
+ # cmd.extend(map(shell_escape, self.includes))
+ if self.excludes:
+ cmd.extend(['-x'] + self.excludes)
+ cmd.extend(['-d', self.b_dest])
+ rc, out, err = self.module.run_command(cmd)
+ return dict(cmd=cmd, rc=rc, out=out, err=err)
+
+ def can_handle_archive(self):
+ if not self.cmd_path:
+ return False, 'Command "unzip" not found.'
+ cmd = [self.cmd_path, '-l', self.src]
+ rc, out, err = self.module.run_command(cmd)
+ if rc == 0:
+ return True, None
+ return False, 'Command "%s" could not handle archive.' % self.cmd_path
+
+
+class TgzArchive(object):
+
+ def __init__(self, src, b_dest, file_args, module):
+ self.src = src
+ self.b_dest = b_dest
+ self.file_args = file_args
+ self.opts = module.params['extra_opts']
+ self.module = module
+ if self.module.check_mode:
+ self.module.exit_json(skipped=True, msg="remote module (%s) does not support check mode when using gtar" % self.module._name)
+ self.excludes = [path.rstrip('/') for path in self.module.params['exclude']]
+ # Prefer gtar (GNU tar) as it supports the compression options -z, -j and -J
+ self.cmd_path = self.module.get_bin_path('gtar', None)
+ if not self.cmd_path:
+ # Fallback to tar
+ self.cmd_path = self.module.get_bin_path('tar')
+ self.zipflag = '-z'
+ self._files_in_archive = []
+
+ if self.cmd_path:
+ self.tar_type = self._get_tar_type()
+ else:
+ self.tar_type = None
+
+ def _get_tar_type(self):
+ cmd = [self.cmd_path, '--version']
+ (rc, out, err) = self.module.run_command(cmd)
+ tar_type = None
+ if out.startswith('bsdtar'):
+ tar_type = 'bsd'
+ elif out.startswith('tar') and 'GNU' in out:
+ tar_type = 'gnu'
+ return tar_type
+
+ @property
+ def files_in_archive(self):
+ if self._files_in_archive:
+ return self._files_in_archive
+
+ cmd = [self.cmd_path, '--list', '-C', self.b_dest]
+ if self.zipflag:
+ cmd.append(self.zipflag)
+ if self.opts:
+ cmd.extend(['--show-transformed-names'] + self.opts)
+ if self.excludes:
+ cmd.extend(['--exclude=' + f for f in self.excludes])
+ cmd.extend(['-f', self.src])
+ rc, out, err = self.module.run_command(cmd, cwd=self.b_dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
+
+ if rc != 0:
+ raise UnarchiveError('Unable to list files in the archive')
+
+ for filename in out.splitlines():
+ # Compensate for locale-related problems in gtar output (octal unicode representation) #11348
+ # filename = filename.decode('string_escape')
+ filename = to_native(codecs.escape_decode(filename)[0])
+
+ # We don't allow absolute filenames. If the user wants to unarchive rooted in "/"
+ # they need to use "dest: '/'". This follows the defaults for gtar, pax, etc.
+ # Allowing absolute filenames here also causes bugs: https://github.com/ansible/ansible/issues/21397
+ if filename.startswith('/'):
+ filename = filename[1:]
+
+ exclude_flag = False
+ if self.excludes:
+ for exclude in self.excludes:
+ if fnmatch.fnmatch(filename, exclude):
+ exclude_flag = True
+ break
+
+ if not exclude_flag:
+ self._files_in_archive.append(to_native(filename))
+
+ return self._files_in_archive
+
+ def is_unarchived(self):
+ cmd = [self.cmd_path, '--diff', '-C', self.b_dest]
+ if self.zipflag:
+ cmd.append(self.zipflag)
+ if self.opts:
+ cmd.extend(['--show-transformed-names'] + self.opts)
+ if self.file_args['owner']:
+ cmd.append('--owner=' + quote(self.file_args['owner']))
+ if self.file_args['group']:
+ cmd.append('--group=' + quote(self.file_args['group']))
+ if self.module.params['keep_newer']:
+ cmd.append('--keep-newer-files')
+ if self.excludes:
+ cmd.extend(['--exclude=' + f for f in self.excludes])
+ cmd.extend(['-f', self.src])
+ rc, out, err = self.module.run_command(cmd, cwd=self.b_dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
+
+ # Check whether the differences are in something that we're
+ # setting anyway
+
+ # What is different
+ unarchived = True
+ old_out = out
+ out = ''
+ run_uid = os.getuid()
+ # When unarchiving as a user, or when owner/group/mode is supplied --diff is insufficient
+ # Only way to be sure is to check request with what is on disk (as we do for zip)
+ # Leave this up to set_fs_attributes_if_different() instead of inducing a (false) change
+ for line in old_out.splitlines() + err.splitlines():
+ # FIXME: Remove the bogus lines from error-output as well !
+ # Ignore bogus errors on empty filenames (when using --split-component)
+ if EMPTY_FILE_RE.search(line):
+ continue
+ if run_uid == 0 and not self.file_args['owner'] and OWNER_DIFF_RE.search(line):
+ out += line + '\n'
+ if run_uid == 0 and not self.file_args['group'] and GROUP_DIFF_RE.search(line):
+ out += line + '\n'
+ if not self.file_args['mode'] and MODE_DIFF_RE.search(line):
+ out += line + '\n'
+ if MOD_TIME_DIFF_RE.search(line):
+ out += line + '\n'
+ if MISSING_FILE_RE.search(line):
+ out += line + '\n'
+ if INVALID_OWNER_RE.search(line):
+ out += line + '\n'
+ if INVALID_GROUP_RE.search(line):
+ out += line + '\n'
+ if out:
+ unarchived = False
+ return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd)
+
+ def unarchive(self):
+ cmd = [self.cmd_path, '--extract', '-C', self.b_dest]
+ if self.zipflag:
+ cmd.append(self.zipflag)
+ if self.opts:
+ cmd.extend(['--show-transformed-names'] + self.opts)
+ if self.file_args['owner']:
+ cmd.append('--owner=' + quote(self.file_args['owner']))
+ if self.file_args['group']:
+ cmd.append('--group=' + quote(self.file_args['group']))
+ if self.module.params['keep_newer']:
+ cmd.append('--keep-newer-files')
+ if self.excludes:
+ cmd.extend(['--exclude=' + f for f in self.excludes])
+ cmd.extend(['-f', self.src])
+ rc, out, err = self.module.run_command(cmd, cwd=self.b_dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
+ return dict(cmd=cmd, rc=rc, out=out, err=err)
+
+ def can_handle_archive(self):
+ if not self.cmd_path:
+ return False, 'Commands "gtar" and "tar" not found.'
+
+ if self.tar_type != 'gnu':
+ return False, 'Command "%s" detected as tar type %s. GNU tar required.' % (self.cmd_path, self.tar_type)
+
+ try:
+ if self.files_in_archive:
+ return True, None
+ except UnarchiveError:
+ return False, 'Command "%s" could not handle archive.' % self.cmd_path
+ # Errors and no files in archive assume that we weren't able to
+ # properly unarchive it
+ return False, 'Command "%s" found no files in archive. Empty archive files are not supported.' % self.cmd_path
+
+
+# Class to handle tar files that aren't compressed
+class TarArchive(TgzArchive):
+ def __init__(self, src, b_dest, file_args, module):
+ super(TarArchive, self).__init__(src, b_dest, file_args, module)
+ # argument to tar
+ self.zipflag = ''
+
+
+# Class to handle bzip2 compressed tar files
+class TarBzipArchive(TgzArchive):
+ def __init__(self, src, b_dest, file_args, module):
+ super(TarBzipArchive, self).__init__(src, b_dest, file_args, module)
+ self.zipflag = '-j'
+
+
+# Class to handle xz compressed tar files
+class TarXzArchive(TgzArchive):
+ def __init__(self, src, b_dest, file_args, module):
+ super(TarXzArchive, self).__init__(src, b_dest, file_args, module)
+ self.zipflag = '-J'
+
+
+# try handlers in order and return the one that works or bail if none work
+def pick_handler(src, dest, file_args, module):
+ handlers = [ZipArchive, TgzArchive, TarArchive, TarBzipArchive, TarXzArchive]
+ reasons = set()
+ for handler in handlers:
+ obj = handler(src, dest, file_args, module)
+ (can_handle, reason) = obj.can_handle_archive()
+ if can_handle:
+ return obj
+ reasons.add(reason)
+ reason_msg = ' '.join(reasons)
+ module.fail_json(msg='Failed to find handler for "%s". Make sure the required command to extract the file is installed. %s' % (src, reason_msg))
+
+
+def main():
+ module = AnsibleModule(
+ # not checking because of daisy chain to file module
+ argument_spec=dict(
+ src=dict(type='path', required=True),
+ dest=dict(type='path', required=True),
+ remote_src=dict(type='bool', default=False),
+ creates=dict(type='path'),
+ list_files=dict(type='bool', default=False),
+ keep_newer=dict(type='bool', default=False),
+ exclude=dict(type='list', default=[]),
+ extra_opts=dict(type='list', default=[]),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ add_file_common_args=True,
+ # check-mode only works for zip files, we cover that later
+ supports_check_mode=True,
+ )
+
+ src = module.params['src']
+ dest = module.params['dest']
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+ remote_src = module.params['remote_src']
+ file_args = module.load_file_common_arguments(module.params)
+
+ # did tar file arrive?
+ if not os.path.exists(src):
+ if not remote_src:
+ module.fail_json(msg="Source '%s' failed to transfer" % src)
+ # If remote_src=true, and src= contains ://, try and download the file to a temp directory.
+ elif '://' in src:
+ src = fetch_file(module, src)
+ else:
+ module.fail_json(msg="Source '%s' does not exist" % src)
+ if not os.access(src, os.R_OK):
+ module.fail_json(msg="Source '%s' not readable" % src)
+
+ # skip working with 0 size archives
+ try:
+ if os.path.getsize(src) == 0:
+ module.fail_json(msg="Invalid archive '%s', the file is 0 bytes" % src)
+ except Exception as e:
+ module.fail_json(msg="Source '%s' not readable, %s" % (src, to_native(e)))
+
+ # is dest OK to receive tar file?
+ if not os.path.isdir(b_dest):
+ module.fail_json(msg="Destination '%s' is not a directory" % dest)
+
+ handler = pick_handler(src, b_dest, file_args, module)
+
+ res_args = dict(handler=handler.__class__.__name__, dest=dest, src=src)
+
+ # do we need to do unpack?
+ check_results = handler.is_unarchived()
+
+ # DEBUG
+ # res_args['check_results'] = check_results
+
+ if module.check_mode:
+ res_args['changed'] = not check_results['unarchived']
+ elif check_results['unarchived']:
+ res_args['changed'] = False
+ else:
+ # do the unpack
+ try:
+ res_args['extract_results'] = handler.unarchive()
+ if res_args['extract_results']['rc'] != 0:
+ module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args)
+ except IOError:
+ module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args)
+ else:
+ res_args['changed'] = True
+
+ # Get diff if required
+ if check_results.get('diff', False):
+ res_args['diff'] = {'prepared': check_results['diff']}
+
+ # Run only if we found differences (idempotence) or diff was missing
+ if res_args.get('diff', True) and not module.check_mode:
+ # do we need to change perms?
+ for filename in handler.files_in_archive:
+ file_args['path'] = os.path.join(b_dest, to_bytes(filename, errors='surrogate_or_strict'))
+
+ try:
+ res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'], expand=False)
+ except (IOError, OSError) as e:
+ module.fail_json(msg="Unexpected error when accessing exploded file: %s" % to_native(e), **res_args)
+
+ if module.params['list_files']:
+ res_args['files'] = handler.files_in_archive
+
+ module.exit_json(**res_args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/uri.py b/lib/ansible/modules/uri.py
new file mode 100644
index 00000000..f13ddb47
--- /dev/null
+++ b/lib/ansible/modules/uri.py
@@ -0,0 +1,774 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Romeo Theriault <romeot () hawaii.edu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: uri
+short_description: Interacts with webservices
+description:
+ - Interacts with HTTP and HTTPS web services and supports Digest, Basic and WSSE
+ HTTP authentication mechanisms.
+ - For Windows targets, use the M(ansible.windows.win_uri) module instead.
+version_added: "1.1"
+options:
+ url:
+ description:
+ - HTTP or HTTPS URL in the form (http|https)://host.domain[:port]/path
+ type: str
+ required: true
+ dest:
+ description:
+ - A path of where to download the file to (if desired). If I(dest) is a
+ directory, the basename of the file on the remote server will be used.
+ type: path
+ url_username:
+ description:
+ - A username for the module to use for Digest, Basic or WSSE authentication.
+ type: str
+ aliases: [ user ]
+ url_password:
+ description:
+ - A password for the module to use for Digest, Basic or WSSE authentication.
+ type: str
+ aliases: [ password ]
+ body:
+ description:
+ - The body of the http request/response to the web service. If C(body_format) is set
+ to 'json' it will take an already formatted JSON string or convert a data structure
+ into JSON.
+ - If C(body_format) is set to 'form-urlencoded' it will convert a dictionary
+ or list of tuples into an 'application/x-www-form-urlencoded' string. (Added in v2.7)
+ - If C(body_format) is set to 'form-multipart' it will convert a dictionary
+ into 'multipart/form-multipart' body. (Added in v2.10)
+ type: raw
+ body_format:
+ description:
+ - The serialization format of the body. When set to C(json), C(form-multipart), or C(form-urlencoded), encodes
+ the body argument, if needed, and automatically sets the Content-Type header accordingly.
+ - As of C(2.3) it is possible to override the `Content-Type` header, when
+ set to C(json) or C(form-urlencoded) via the I(headers) option.
+ - The 'Content-Type' header cannot be overridden when using C(form-multipart)
+ - C(form-urlencoded) was added in v2.7.
+ - C(form-multipart) was added in v2.10.
+ type: str
+ choices: [ form-urlencoded, json, raw, form-multipart ]
+ default: raw
+ version_added: "2.0"
+ method:
+ description:
+ - The HTTP method of the request or response.
+ - In more recent versions we do not restrict the method at the module level anymore
+ but it still must be a valid method accepted by the service handling the request.
+ type: str
+ default: GET
+ return_content:
+ description:
+ - Whether or not to return the body of the response as a "content" key in
+ the dictionary result no matter it succeeded or failed.
+ - Independently of this option, if the reported Content-type is "application/json", then the JSON is
+ always loaded into a key called C(json) in the dictionary results.
+ type: bool
+ default: no
+ force_basic_auth:
+ description:
+ - Force the sending of the Basic authentication header upon initial request.
+ - The library used by the uri module only sends authentication information when a webservice
+ responds to an initial request with a 401 status. Since some basic auth services do not properly
+ send a 401, logins will fail.
+ type: bool
+ default: no
+ follow_redirects:
+ description:
+ - Whether or not the URI module should follow redirects. C(all) will follow all redirects.
+ C(safe) will follow only "safe" redirects, where "safe" means that the client is only
+ doing a GET or HEAD on the URI to which it is being redirected. C(none) will not follow
+ any redirects. Note that C(yes) and C(no) choices are accepted for backwards compatibility,
+ where C(yes) is the equivalent of C(all) and C(no) is the equivalent of C(safe). C(yes) and C(no)
+ are deprecated and will be removed in some future version of Ansible.
+ type: str
+ choices: ['all', 'no', 'none', 'safe', 'urllib2', 'yes']
+ default: safe
+ creates:
+ description:
+ - A filename, when it already exists, this step will not be run.
+ type: path
+ removes:
+ description:
+ - A filename, when it does not exist, this step will not be run.
+ type: path
+ status_code:
+ description:
+ - A list of valid, numeric, HTTP status codes that signifies success of the request.
+ type: list
+ default: [ 200 ]
+ timeout:
+ description:
+ - The socket level timeout in seconds
+ type: int
+ default: 30
+ headers:
+ description:
+ - Add custom HTTP headers to a request in the format of a YAML hash. As
+ of C(2.3) supplying C(Content-Type) here will override the header
+ generated by supplying C(json) or C(form-urlencoded) for I(body_format).
+ type: dict
+ version_added: '2.1'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated.
+ - This should only set to C(no) used on personally controlled sites using self-signed certificates.
+ - Prior to 1.9.2 the code defaulted to C(no).
+ type: bool
+ default: yes
+ version_added: '1.9.2'
+ client_cert:
+ description:
+ - PEM formatted certificate chain file to be used for SSL client authentication.
+ - This file can also include the key as well, and if the key is included, I(client_key) is not required
+ type: path
+ version_added: '2.4'
+ client_key:
+ description:
+ - PEM formatted file that contains your private key to be used for SSL client authentication.
+ - If I(client_cert) contains both the certificate and key, this option is not required.
+ type: path
+ version_added: '2.4'
+ src:
+ description:
+ - Path to file to be submitted to the remote server.
+ - Cannot be used with I(body).
+ type: path
+ version_added: '2.7'
+ remote_src:
+ description:
+ - If C(no), the module will search for src on originating/master machine.
+ - If C(yes) the module will use the C(src) path on the remote/target machine.
+ type: bool
+ default: no
+ version_added: '2.7'
+ force:
+ description:
+ - If C(yes) do not get a cached copy.
+ - Alias C(thirsty) has been deprecated and will be removed in 2.13.
+ type: bool
+ default: no
+ aliases: [ thirsty ]
+ use_proxy:
+ description:
+ - If C(no), it will not use a proxy, even if one is defined in an environment variable on the target hosts.
+ type: bool
+ default: yes
+ unix_socket:
+ description:
+ - Path to Unix domain socket to use for connection
+ version_added: '2.8'
+ http_agent:
+ description:
+ - Header to identify as, generally appears in web server logs.
+ type: str
+ default: ansible-httpget
+notes:
+ - The dependency on httplib2 was removed in Ansible 2.1.
+ - The module returns all the HTTP headers in lower-case.
+ - For Windows targets, use the M(ansible.windows.win_uri) module instead.
+seealso:
+- module: ansible.builtin.get_url
+- module: ansible.windows.win_uri
+author:
+- Romeo Theriault (@romeotheriault)
+extends_documentation_fragment: files
+'''
+
+EXAMPLES = r'''
+- name: Check that you can connect (GET) to a page and it returns a status 200
+ uri:
+ url: http://www.example.com
+
+- name: Check that a page returns a status 200 and fail if the word AWESOME is not in the page contents
+ uri:
+ url: http://www.example.com
+ return_content: yes
+ register: this
+ failed_when: "'AWESOME' not in this.content"
+
+- name: Create a JIRA issue
+ uri:
+ url: https://your.jira.example.com/rest/api/2/issue/
+ user: your_username
+ password: your_pass
+ method: POST
+ body: "{{ lookup('file','issue.json') }}"
+ force_basic_auth: yes
+ status_code: 201
+ body_format: json
+
+- name: Login to a form based webpage, then use the returned cookie to access the app in later tasks
+ uri:
+ url: https://your.form.based.auth.example.com/index.php
+ method: POST
+ body_format: form-urlencoded
+ body:
+ name: your_username
+ password: your_password
+ enter: Sign in
+ status_code: 302
+ register: login
+
+- name: Login to a form based webpage using a list of tuples
+ uri:
+ url: https://your.form.based.auth.example.com/index.php
+ method: POST
+ body_format: form-urlencoded
+ body:
+ - [ name, your_username ]
+ - [ password, your_password ]
+ - [ enter, Sign in ]
+ status_code: 302
+ register: login
+
+- name: Upload a file via multipart/form-multipart
+ uri:
+ url: https://httpbin.org/post
+ method: POST
+ body_format: form-multipart
+ body:
+ file1:
+ filename: /bin/true
+ mime_type: application/octet-stream
+ file2:
+ content: text based file content
+ filename: fake.txt
+ mime_type: text/plain
+ text_form_field: value
+
+- name: Connect to website using a previously stored cookie
+ uri:
+ url: https://your.form.based.auth.example.com/dashboard.php
+ method: GET
+ return_content: yes
+ headers:
+ Cookie: "{{ login.cookies_string }}"
+
+- name: Queue build of a project in Jenkins
+ uri:
+ url: http://{{ jenkins.host }}/job/{{ jenkins.job }}/build?token={{ jenkins.token }}
+ user: "{{ jenkins.user }}"
+ password: "{{ jenkins.password }}"
+ method: GET
+ force_basic_auth: yes
+ status_code: 201
+
+- name: POST from contents of local file
+ uri:
+ url: https://httpbin.org/post
+ method: POST
+ src: file.json
+
+- name: POST from contents of remote file
+ uri:
+ url: https://httpbin.org/post
+ method: POST
+ src: /path/to/my/file.json
+ remote_src: yes
+
+- name: Create workspaces in Log analytics Azure
+ uri:
+ url: https://www.mms.microsoft.com/Embedded/Api/ConfigDataSources/LogManagementData/Save
+ method: POST
+ body_format: json
+ status_code: [200, 202]
+ return_content: true
+ headers:
+ Content-Type: application/json
+ x-ms-client-workspace-path: /subscriptions/{{ sub_id }}/resourcegroups/{{ res_group }}/providers/microsoft.operationalinsights/workspaces/{{ w_spaces }}
+ x-ms-client-platform: ibiza
+ x-ms-client-auth-token: "{{ token_az }}"
+ body:
+
+- name: Pause play until a URL is reachable from this host
+ uri:
+ url: "http://192.0.2.1/some/test"
+ follow_redirects: none
+ method: GET
+ register: _result
+ until: _result.status == 200
+ retries: 720 # 720 * 5 seconds = 1hour (60*60/5)
+ delay: 5 # Every 5 seconds
+
+# There are issues in a supporting Python library that is discussed in
+# https://github.com/ansible/ansible/issues/52705 where a proxy is defined
+# but you want to bypass proxy use on CIDR masks by using no_proxy
+- name: Work around a python issue that doesn't support no_proxy envvar
+ uri:
+ follow_redirects: none
+ validate_certs: false
+ timeout: 5
+ url: "http://{{ ip_address }}:{{ port | default(80) }}"
+ register: uri_data
+ failed_when: false
+ changed_when: false
+ vars:
+ ip_address: 192.0.2.1
+ environment: |
+ {
+ {% for no_proxy in (lookup('env', 'no_proxy') | regex_replace('\s*,\s*', ' ') ).split() %}
+ {% if no_proxy | regex_search('\/') and
+ no_proxy | ipaddr('net') != '' and
+ no_proxy | ipaddr('net') != false and
+ ip_address | ipaddr(no_proxy) is not none and
+ ip_address | ipaddr(no_proxy) != false %}
+ 'no_proxy': '{{ ip_address }}'
+ {% elif no_proxy | regex_search(':') != '' and
+ no_proxy | regex_search(':') != false and
+ no_proxy == ip_address + ':' + (port | default(80)) %}
+ 'no_proxy': '{{ ip_address }}:{{ port | default(80) }}'
+ {% elif no_proxy | ipaddr('host') != '' and
+ no_proxy | ipaddr('host') != false and
+ no_proxy == ip_address %}
+ 'no_proxy': '{{ ip_address }}'
+ {% elif no_proxy | regex_search('^(\*|)\.') != '' and
+ no_proxy | regex_search('^(\*|)\.') != false and
+ no_proxy | regex_replace('\*', '') in ip_address %}
+ 'no_proxy': '{{ ip_address }}'
+ {% endif %}
+ {% endfor %}
+ }
+'''
+
+RETURN = r'''
+# The return information includes all the HTTP headers in lower-case.
+content:
+ description: The response body content.
+ returned: status not in status_code or return_content is true
+ type: str
+ sample: "{}"
+cookies:
+ description: The cookie values placed in cookie jar.
+ returned: on success
+ type: dict
+ sample: {"SESSIONID": "[SESSIONID]"}
+ version_added: "2.4"
+cookies_string:
+ description: The value for future request Cookie headers.
+ returned: on success
+ type: str
+ sample: "SESSIONID=[SESSIONID]"
+ version_added: "2.6"
+elapsed:
+ description: The number of seconds that elapsed while performing the download.
+ returned: on success
+ type: int
+ sample: 23
+msg:
+ description: The HTTP message from the request.
+ returned: always
+ type: str
+ sample: OK (unknown bytes)
+redirected:
+ description: Whether the request was redirected.
+ returned: on success
+ type: bool
+ sample: false
+status:
+ description: The HTTP status code from the request.
+ returned: always
+ type: int
+ sample: 200
+url:
+ description: The actual URL used for the request.
+ returned: always
+ type: str
+ sample: https://www.ansible.com/
+'''
+
+import cgi
+import datetime
+import json
+import os
+import re
+import shutil
+import sys
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule, sanitize_keys
+from ansible.module_utils.six import PY2, iteritems, string_types
+from ansible.module_utils.six.moves.urllib.parse import urlencode, urlsplit
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.common._collections_compat import Mapping, Sequence
+from ansible.module_utils.urls import fetch_url, prepare_multipart, url_argument_spec
+
+JSON_CANDIDATES = ('text', 'json', 'javascript')
+
+# List of response key names we do not want sanitize_keys() to change.
+NO_MODIFY_KEYS = frozenset(
+ ('msg', 'exception', 'warnings', 'deprecations', 'failed', 'skipped',
+ 'changed', 'rc', 'stdout', 'stderr', 'elapsed', 'path', 'location',
+ 'content_type')
+)
+
+
+def format_message(err, resp):
+ msg = resp.pop('msg')
+ return err + (' %s' % msg if msg else '')
+
+
+def write_file(module, url, dest, content, resp):
+ # create a tempfile with some test content
+ fd, tmpsrc = tempfile.mkstemp(dir=module.tmpdir)
+ f = open(tmpsrc, 'wb')
+ try:
+ f.write(content)
+ except Exception as e:
+ os.remove(tmpsrc)
+ msg = format_message("Failed to create temporary content file: %s" % to_native(e), resp)
+ module.fail_json(msg=msg, **resp)
+ f.close()
+
+ checksum_src = None
+ checksum_dest = None
+
+ # raise an error if there is no tmpsrc file
+ if not os.path.exists(tmpsrc):
+ os.remove(tmpsrc)
+ msg = format_message("Source '%s' does not exist" % tmpsrc, resp)
+ module.fail_json(msg=msg, **resp)
+ if not os.access(tmpsrc, os.R_OK):
+ os.remove(tmpsrc)
+ msg = format_message("Source '%s' not readable" % tmpsrc, resp)
+ module.fail_json(msg=msg, **resp)
+ checksum_src = module.sha1(tmpsrc)
+
+ # check if there is no dest file
+ if os.path.exists(dest):
+ # raise an error if copy has no permission on dest
+ if not os.access(dest, os.W_OK):
+ os.remove(tmpsrc)
+ msg = format_message("Destination '%s' not writable" % dest, resp)
+ module.fail_json(msg=msg, **resp)
+ if not os.access(dest, os.R_OK):
+ os.remove(tmpsrc)
+ msg = format_message("Destination '%s' not readable" % dest, resp)
+ module.fail_json(msg=msg, **resp)
+ checksum_dest = module.sha1(dest)
+ else:
+ if not os.access(os.path.dirname(dest), os.W_OK):
+ os.remove(tmpsrc)
+ msg = format_message("Destination dir '%s' not writable" % os.path.dirname(dest), resp)
+ module.fail_json(msg=msg, **resp)
+
+ if checksum_src != checksum_dest:
+ try:
+ shutil.copyfile(tmpsrc, dest)
+ except Exception as e:
+ os.remove(tmpsrc)
+ msg = format_message("failed to copy %s to %s: %s" % (tmpsrc, dest, to_native(e)), resp)
+ module.fail_json(msg=msg, **resp)
+
+ os.remove(tmpsrc)
+
+
+def url_filename(url):
+ fn = os.path.basename(urlsplit(url)[2])
+ if fn == '':
+ return 'index.html'
+ return fn
+
+
+def absolute_location(url, location):
+ """Attempts to create an absolute URL based on initial URL, and
+ next URL, specifically in the case of a ``Location`` header.
+ """
+
+ if '://' in location:
+ return location
+
+ elif location.startswith('/'):
+ parts = urlsplit(url)
+ base = url.replace(parts[2], '')
+ return '%s%s' % (base, location)
+
+ elif not location.startswith('/'):
+ base = os.path.dirname(url)
+ return '%s/%s' % (base, location)
+
+ else:
+ return location
+
+
+def kv_list(data):
+ ''' Convert data into a list of key-value tuples '''
+ if data is None:
+ return None
+
+ if isinstance(data, Sequence):
+ return list(data)
+
+ if isinstance(data, Mapping):
+ return list(data.items())
+
+ raise TypeError('cannot form-urlencode body, expect list or dict')
+
+
+def form_urlencoded(body):
+ ''' Convert data into a form-urlencoded string '''
+ if isinstance(body, string_types):
+ return body
+
+ if isinstance(body, (Mapping, Sequence)):
+ result = []
+ # Turn a list of lists into a list of tuples that urlencode accepts
+ for key, values in kv_list(body):
+ if isinstance(values, string_types) or not isinstance(values, (Mapping, Sequence)):
+ values = [values]
+ for value in values:
+ if value is not None:
+ result.append((to_text(key), to_text(value)))
+ return urlencode(result, doseq=True)
+
+ return body
+
+
+def uri(module, url, dest, body, body_format, method, headers, socket_timeout):
+ # is dest is set and is a directory, let's check if we get redirected and
+ # set the filename from that url
+ redirected = False
+ redir_info = {}
+ r = {}
+
+ src = module.params['src']
+ if src:
+ try:
+ headers.update({
+ 'Content-Length': os.stat(src).st_size
+ })
+ data = open(src, 'rb')
+ except OSError:
+ module.fail_json(msg='Unable to open source file %s' % src, elapsed=0)
+ else:
+ data = body
+
+ kwargs = {}
+ if dest is not None:
+ # Stash follow_redirects, in this block we don't want to follow
+ # we'll reset back to the supplied value soon
+ follow_redirects = module.params['follow_redirects']
+ module.params['follow_redirects'] = False
+ if os.path.isdir(dest):
+ # first check if we are redirected to a file download
+ _, redir_info = fetch_url(module, url, data=body,
+ headers=headers,
+ method=method,
+ timeout=socket_timeout, unix_socket=module.params['unix_socket'])
+ # if we are redirected, update the url with the location header,
+ # and update dest with the new url filename
+ if redir_info['status'] in (301, 302, 303, 307):
+ url = redir_info['location']
+ redirected = True
+ dest = os.path.join(dest, url_filename(url))
+ # if destination file already exist, only download if file newer
+ if os.path.exists(dest):
+ kwargs['last_mod_time'] = datetime.datetime.utcfromtimestamp(os.path.getmtime(dest))
+
+ # Reset follow_redirects back to the stashed value
+ module.params['follow_redirects'] = follow_redirects
+
+ resp, info = fetch_url(module, url, data=data, headers=headers,
+ method=method, timeout=socket_timeout, unix_socket=module.params['unix_socket'],
+ **kwargs)
+
+ try:
+ content = resp.read()
+ except AttributeError:
+ # there was no content, but the error read()
+ # may have been stored in the info as 'body'
+ content = info.pop('body', '')
+
+ if src:
+ # Try to close the open file handle
+ try:
+ data.close()
+ except Exception:
+ pass
+
+ r['redirected'] = redirected or info['url'] != url
+ r.update(redir_info)
+ r.update(info)
+
+ return r, content, dest
+
+
+def main():
+ argument_spec = url_argument_spec()
+ argument_spec.update(
+ dest=dict(type='path'),
+ url_username=dict(type='str', aliases=['user']),
+ url_password=dict(type='str', aliases=['password'], no_log=True),
+ body=dict(type='raw'),
+ body_format=dict(type='str', default='raw', choices=['form-urlencoded', 'json', 'raw', 'form-multipart']),
+ src=dict(type='path'),
+ method=dict(type='str', default='GET'),
+ return_content=dict(type='bool', default=False),
+ follow_redirects=dict(type='str', default='safe', choices=['all', 'no', 'none', 'safe', 'urllib2', 'yes']),
+ creates=dict(type='path'),
+ removes=dict(type='path'),
+ status_code=dict(type='list', default=[200]),
+ timeout=dict(type='int', default=30),
+ headers=dict(type='dict', default={}),
+ unix_socket=dict(type='path'),
+ remote_src=dict(type='bool', default=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ add_file_common_args=True,
+ mutually_exclusive=[['body', 'src']],
+ )
+
+ if module.params.get('thirsty'):
+ module.deprecate('The alias "thirsty" has been deprecated and will be removed, use "force" instead',
+ version='2.13', collection_name='ansible.builtin')
+
+ url = module.params['url']
+ body = module.params['body']
+ body_format = module.params['body_format'].lower()
+ method = module.params['method'].upper()
+ dest = module.params['dest']
+ return_content = module.params['return_content']
+ creates = module.params['creates']
+ removes = module.params['removes']
+ status_code = [int(x) for x in list(module.params['status_code'])]
+ socket_timeout = module.params['timeout']
+
+ dict_headers = module.params['headers']
+
+ if not re.match('^[A-Z]+$', method):
+ module.fail_json(msg="Parameter 'method' needs to be a single word in uppercase, like GET or POST.")
+
+ if body_format == 'json':
+ # Encode the body unless its a string, then assume it is pre-formatted JSON
+ if not isinstance(body, string_types):
+ body = json.dumps(body)
+ if 'content-type' not in [header.lower() for header in dict_headers]:
+ dict_headers['Content-Type'] = 'application/json'
+ elif body_format == 'form-urlencoded':
+ if not isinstance(body, string_types):
+ try:
+ body = form_urlencoded(body)
+ except ValueError as e:
+ module.fail_json(msg='failed to parse body as form_urlencoded: %s' % to_native(e), elapsed=0)
+ if 'content-type' not in [header.lower() for header in dict_headers]:
+ dict_headers['Content-Type'] = 'application/x-www-form-urlencoded'
+ elif body_format == 'form-multipart':
+ try:
+ content_type, body = prepare_multipart(body)
+ except (TypeError, ValueError) as e:
+ module.fail_json(msg='failed to parse body as form-multipart: %s' % to_native(e))
+ dict_headers['Content-Type'] = content_type
+
+ if creates is not None:
+ # do not run the command if the line contains creates=filename
+ # and the filename already exists. This allows idempotence
+ # of uri executions.
+ if os.path.exists(creates):
+ module.exit_json(stdout="skipped, since '%s' exists" % creates, changed=False)
+
+ if removes is not None:
+ # do not run the command if the line contains removes=filename
+ # and the filename does not exist. This allows idempotence
+ # of uri executions.
+ if not os.path.exists(removes):
+ module.exit_json(stdout="skipped, since '%s' does not exist" % removes, changed=False)
+
+ # Make the request
+ start = datetime.datetime.utcnow()
+ resp, content, dest = uri(module, url, dest, body, body_format, method,
+ dict_headers, socket_timeout)
+ resp['elapsed'] = (datetime.datetime.utcnow() - start).seconds
+ resp['status'] = int(resp['status'])
+ resp['changed'] = False
+
+ # Write the file out if requested
+ if dest is not None:
+ if resp['status'] in status_code and resp['status'] != 304:
+ write_file(module, url, dest, content, resp)
+ # allow file attribute changes
+ resp['changed'] = True
+ module.params['path'] = dest
+ file_args = module.load_file_common_arguments(module.params, path=dest)
+ resp['changed'] = module.set_fs_attributes_if_different(file_args, resp['changed'])
+ resp['path'] = dest
+
+ # Transmogrify the headers, replacing '-' with '_', since variables don't
+ # work with dashes.
+ # In python3, the headers are title cased. Lowercase them to be
+ # compatible with the python2 behaviour.
+ uresp = {}
+ for key, value in iteritems(resp):
+ ukey = key.replace("-", "_").lower()
+ uresp[ukey] = value
+
+ if 'location' in uresp:
+ uresp['location'] = absolute_location(url, uresp['location'])
+
+ # Default content_encoding to try
+ content_encoding = 'utf-8'
+ if 'content_type' in uresp:
+ # Handle multiple Content-Type headers
+ charsets = []
+ content_types = []
+ for value in uresp['content_type'].split(','):
+ ct, params = cgi.parse_header(value)
+ if ct not in content_types:
+ content_types.append(ct)
+ if 'charset' in params:
+ if params['charset'] not in charsets:
+ charsets.append(params['charset'])
+
+ if content_types:
+ content_type = content_types[0]
+ if len(content_types) > 1:
+ module.warn(
+ 'Received multiple conflicting Content-Type values (%s), using %s' % (', '.join(content_types), content_type)
+ )
+ if charsets:
+ content_encoding = charsets[0]
+ if len(charsets) > 1:
+ module.warn(
+ 'Received multiple conflicting charset values (%s), using %s' % (', '.join(charsets), content_encoding)
+ )
+
+ u_content = to_text(content, encoding=content_encoding)
+ if any(candidate in content_type for candidate in JSON_CANDIDATES):
+ try:
+ js = json.loads(u_content)
+ uresp['json'] = js
+ except Exception:
+ if PY2:
+ sys.exc_clear() # Avoid false positive traceback in fail_json() on Python 2
+ else:
+ u_content = to_text(content, encoding=content_encoding)
+
+ if module.no_log_values:
+ uresp = sanitize_keys(uresp, module.no_log_values, NO_MODIFY_KEYS)
+
+ if resp['status'] not in status_code:
+ uresp['msg'] = 'Status code was %s and not %s: %s' % (resp['status'], status_code, uresp.get('msg', ''))
+ if return_content:
+ module.fail_json(content=u_content, **uresp)
+ else:
+ module.fail_json(**uresp)
+ elif return_content:
+ module.exit_json(content=u_content, **uresp)
+ else:
+ module.exit_json(**uresp)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/user.py b/lib/ansible/modules/user.py
new file mode 100644
index 00000000..c57fadc9
--- /dev/null
+++ b/lib/ansible/modules/user.py
@@ -0,0 +1,3062 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Stephen Fromm <sfromm@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: user
+version_added: "0.2"
+short_description: Manage user accounts
+description:
+ - Manage user accounts and user attributes.
+ - For Windows targets, use the M(ansible.windows.win_user) module instead.
+options:
+ name:
+ description:
+ - Name of the user to create, remove or modify.
+ type: str
+ required: true
+ aliases: [ user ]
+ uid:
+ description:
+ - Optionally sets the I(UID) of the user.
+ type: int
+ comment:
+ description:
+ - Optionally sets the description (aka I(GECOS)) of user account.
+ type: str
+ hidden:
+ description:
+ - macOS only, optionally hide the user from the login window and system preferences.
+ - The default will be C(yes) if the I(system) option is used.
+ type: bool
+ version_added: "2.6"
+ non_unique:
+ description:
+ - Optionally when used with the -u option, this option allows to change the user ID to a non-unique value.
+ type: bool
+ default: no
+ version_added: "1.1"
+ seuser:
+ description:
+ - Optionally sets the seuser type (user_u) on selinux enabled systems.
+ type: str
+ version_added: "2.1"
+ group:
+ description:
+ - Optionally sets the user's primary group (takes a group name).
+ type: str
+ groups:
+ description:
+ - List of groups user will be added to. When set to an empty string C(''),
+ the user is removed from all groups except the primary group.
+ - Before Ansible 2.3, the only input format allowed was a comma separated string.
+ type: list
+ append:
+ description:
+ - If C(yes), add the user to the groups specified in C(groups).
+ - If C(no), user will only be added to the groups specified in C(groups),
+ removing them from all other groups.
+ type: bool
+ default: no
+ shell:
+ description:
+ - Optionally set the user's shell.
+ - On macOS, before Ansible 2.5, the default shell for non-system users was C(/usr/bin/false).
+ Since Ansible 2.5, the default shell for non-system users on macOS is C(/bin/bash).
+ - On other operating systems, the default shell is determined by the underlying tool being
+ used. See Notes for details.
+ type: str
+ home:
+ description:
+ - Optionally set the user's home directory.
+ type: path
+ skeleton:
+ description:
+ - Optionally set a home skeleton directory.
+ - Requires C(create_home) option!
+ type: str
+ version_added: "2.0"
+ password:
+ description:
+ - Optionally set the user's password to this crypted value.
+ - On macOS systems, this value has to be cleartext. Beware of security issues.
+ - To create a disabled account on Linux systems, set this to C('!') or C('*').
+ - To create a disabled account on OpenBSD, set this to C('*************').
+ - See U(https://docs.ansible.com/ansible/faq.html#how-do-i-generate-encrypted-passwords-for-the-user-module)
+ for details on various ways to generate these password values.
+ type: str
+ state:
+ description:
+ - Whether the account should exist or not, taking action if the state is different from what is stated.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ create_home:
+ description:
+ - Unless set to C(no), a home directory will be made for the user
+ when the account is created or if the home directory does not exist.
+ - Changed from C(createhome) to C(create_home) in Ansible 2.5.
+ type: bool
+ default: yes
+ aliases: [ createhome ]
+ move_home:
+ description:
+ - "If set to C(yes) when used with C(home: ), attempt to move the user's old home
+ directory to the specified directory if it isn't there already and the old home exists."
+ type: bool
+ default: no
+ system:
+ description:
+ - When creating an account C(state=present), setting this to C(yes) makes the user a system account.
+ - This setting cannot be changed on existing users.
+ type: bool
+ default: no
+ force:
+ description:
+ - This only affects C(state=absent), it forces removal of the user and associated directories on supported platforms.
+ - The behavior is the same as C(userdel --force), check the man page for C(userdel) on your system for details and support.
+ - When used with C(generate_ssh_key=yes) this forces an existing key to be overwritten.
+ type: bool
+ default: no
+ remove:
+ description:
+ - This only affects C(state=absent), it attempts to remove directories associated with the user.
+ - The behavior is the same as C(userdel --remove), check the man page for details and support.
+ type: bool
+ default: no
+ login_class:
+ description:
+ - Optionally sets the user's login class, a feature of most BSD OSs.
+ type: str
+ generate_ssh_key:
+ description:
+ - Whether to generate a SSH key for the user in question.
+ - This will B(not) overwrite an existing SSH key unless used with C(force=yes).
+ type: bool
+ default: no
+ version_added: "0.9"
+ ssh_key_bits:
+ description:
+ - Optionally specify number of bits in SSH key to create.
+ type: int
+ default: default set by ssh-keygen
+ version_added: "0.9"
+ ssh_key_type:
+ description:
+ - Optionally specify the type of SSH key to generate.
+ - Available SSH key types will depend on implementation
+ present on target host.
+ type: str
+ default: rsa
+ version_added: "0.9"
+ ssh_key_file:
+ description:
+ - Optionally specify the SSH key filename.
+ - If this is a relative filename then it will be relative to the user's home directory.
+ - This parameter defaults to I(.ssh/id_rsa).
+ type: path
+ version_added: "0.9"
+ ssh_key_comment:
+ description:
+ - Optionally define the comment for the SSH key.
+ type: str
+ default: ansible-generated on $HOSTNAME
+ version_added: "0.9"
+ ssh_key_passphrase:
+ description:
+ - Set a passphrase for the SSH key.
+ - If no passphrase is provided, the SSH key will default to having no passphrase.
+ type: str
+ version_added: "0.9"
+ update_password:
+ description:
+ - C(always) will update passwords if they differ.
+ - C(on_create) will only set the password for newly created users.
+ type: str
+ choices: [ always, on_create ]
+ default: always
+ version_added: "1.3"
+ expires:
+ description:
+ - An expiry time for the user in epoch, it will be ignored on platforms that do not support this.
+ - Currently supported on GNU/Linux, FreeBSD, and DragonFlyBSD.
+ - Since Ansible 2.6 you can remove the expiry time by specifying a negative value.
+ Currently supported on GNU/Linux and FreeBSD.
+ type: float
+ version_added: "1.9"
+ password_lock:
+ description:
+ - Lock the password (usermod -L, pw lock, usermod -C).
+ - BUT implementation differs on different platforms, this option does not always mean the user cannot login via other methods.
+ - This option does not disable the user, only lock the password. Do not change the password in the same task.
+ - Currently supported on Linux, FreeBSD, DragonFlyBSD, NetBSD, OpenBSD.
+ type: bool
+ version_added: "2.6"
+ local:
+ description:
+ - Forces the use of "local" command alternatives on platforms that implement it.
+ - This is useful in environments that use centralized authentication when you want to manipulate the local users
+ (i.e. it uses C(luseradd) instead of C(useradd)).
+ - This will check C(/etc/passwd) for an existing account before invoking commands. If the local account database
+ exists somewhere other than C(/etc/passwd), this setting will not work properly.
+ - This requires that the above commands as well as C(/etc/passwd) must exist on the target host, otherwise it will be a fatal error.
+ type: bool
+ default: no
+ version_added: "2.4"
+ profile:
+ description:
+ - Sets the profile of the user.
+ - Does nothing when used with other platforms.
+ - Can set multiple profiles using comma separation.
+ - To delete all the profiles, use C(profile='').
+ - Currently supported on Illumos/Solaris.
+ type: str
+ version_added: "2.8"
+ authorization:
+ description:
+ - Sets the authorization of the user.
+ - Does nothing when used with other platforms.
+ - Can set multiple authorizations using comma separation.
+ - To delete all authorizations, use C(authorization='').
+ - Currently supported on Illumos/Solaris.
+ type: str
+ version_added: "2.8"
+ role:
+ description:
+ - Sets the role of the user.
+ - Does nothing when used with other platforms.
+ - Can set multiple roles using comma separation.
+ - To delete all roles, use C(role='').
+ - Currently supported on Illumos/Solaris.
+ type: str
+ version_added: "2.8"
+notes:
+ - There are specific requirements per platform on user management utilities. However
+ they generally come pre-installed with the system and Ansible will require they
+ are present at runtime. If they are not, a descriptive error message will be shown.
+ - On SunOS platforms, the shadow file is backed up automatically since this module edits it directly.
+ On other platforms, the shadow file is backed up by the underlying tools used by this module.
+ - On macOS, this module uses C(dscl) to create, modify, and delete accounts. C(dseditgroup) is used to
+ modify group membership. Accounts are hidden from the login window by modifying
+ C(/Library/Preferences/com.apple.loginwindow.plist).
+ - On FreeBSD, this module uses C(pw useradd) and C(chpass) to create, C(pw usermod) and C(chpass) to modify,
+ C(pw userdel) remove, C(pw lock) to lock, and C(pw unlock) to unlock accounts.
+ - On all other platforms, this module uses C(useradd) to create, C(usermod) to modify, and
+ C(userdel) to remove accounts.
+seealso:
+- module: ansible.posix.authorized_key
+- module: ansible.builtin.group
+- module: ansible.windows.win_user
+author:
+- Stephen Fromm (@sfromm)
+'''
+
+EXAMPLES = r'''
+- name: Add the user 'johnd' with a specific uid and a primary group of 'admin'
+ user:
+ name: johnd
+ comment: John Doe
+ uid: 1040
+ group: admin
+
+- name: Add the user 'james' with a bash shell, appending the group 'admins' and 'developers' to the user's groups
+ user:
+ name: james
+ shell: /bin/bash
+ groups: admins,developers
+ append: yes
+
+- name: Remove the user 'johnd'
+ user:
+ name: johnd
+ state: absent
+ remove: yes
+
+- name: Create a 2048-bit SSH key for user jsmith in ~jsmith/.ssh/id_rsa
+ user:
+ name: jsmith
+ generate_ssh_key: yes
+ ssh_key_bits: 2048
+ ssh_key_file: .ssh/id_rsa
+
+- name: Added a consultant whose account you want to expire
+ user:
+ name: james18
+ shell: /bin/zsh
+ groups: developers
+ expires: 1422403387
+
+- name: Starting at Ansible 2.6, modify user, remove expiry time
+ user:
+ name: james18
+ expires: -1
+'''
+
+RETURN = r'''
+append:
+ description: Whether or not to append the user to groups
+ returned: When state is 'present' and the user exists
+ type: bool
+ sample: True
+comment:
+ description: Comment section from passwd file, usually the user name
+ returned: When user exists
+ type: str
+ sample: Agent Smith
+create_home:
+ description: Whether or not to create the home directory
+ returned: When user does not exist and not check mode
+ type: bool
+ sample: True
+force:
+ description: Whether or not a user account was forcibly deleted
+ returned: When state is 'absent' and user exists
+ type: bool
+ sample: False
+group:
+ description: Primary user group ID
+ returned: When user exists
+ type: int
+ sample: 1001
+groups:
+ description: List of groups of which the user is a member
+ returned: When C(groups) is not empty and C(state) is 'present'
+ type: str
+ sample: 'chrony,apache'
+home:
+ description: "Path to user's home directory"
+ returned: When C(state) is 'present'
+ type: str
+ sample: '/home/asmith'
+move_home:
+ description: Whether or not to move an existing home directory
+ returned: When C(state) is 'present' and user exists
+ type: bool
+ sample: False
+name:
+ description: User account name
+ returned: always
+ type: str
+ sample: asmith
+password:
+ description: Masked value of the password
+ returned: When C(state) is 'present' and C(password) is not empty
+ type: str
+ sample: 'NOT_LOGGING_PASSWORD'
+remove:
+ description: Whether or not to remove the user account
+ returned: When C(state) is 'absent' and user exists
+ type: bool
+ sample: True
+shell:
+ description: User login shell
+ returned: When C(state) is 'present'
+ type: str
+ sample: '/bin/bash'
+ssh_fingerprint:
+ description: Fingerprint of generated SSH key
+ returned: When C(generate_ssh_key) is C(True)
+ type: str
+ sample: '2048 SHA256:aYNHYcyVm87Igh0IMEDMbvW0QDlRQfE0aJugp684ko8 ansible-generated on host (RSA)'
+ssh_key_file:
+ description: Path to generated SSH private key file
+ returned: When C(generate_ssh_key) is C(True)
+ type: str
+ sample: /home/asmith/.ssh/id_rsa
+ssh_public_key:
+ description: Generated SSH public key file
+ returned: When C(generate_ssh_key) is C(True)
+ type: str
+ sample: >
+ 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC95opt4SPEC06tOYsJQJIuN23BbLMGmYo8ysVZQc4h2DZE9ugbjWWGS1/pweUGjVstgzMkBEeBCByaEf/RJKNecKRPeGd2Bw9DCj/bn5Z6rGfNENKBmo
+ 618mUJBvdlEgea96QGjOwSB7/gmonduC7gsWDMNcOdSE3wJMTim4lddiBx4RgC9yXsJ6Tkz9BHD73MXPpT5ETnse+A3fw3IGVSjaueVnlUyUmOBf7fzmZbhlFVXf2Zi2rFTXqvbdGHKkzpw1U8eB8xFPP7y
+ d5u1u0e6Acju/8aZ/l17IDFiLke5IzlqIMRTEbDwLNeO84YQKWTm9fODHzhYe0yvxqLiK07 ansible-generated on host'
+stderr:
+ description: Standard error from running commands
+ returned: When stderr is returned by a command that is run
+ type: str
+ sample: Group wheels does not exist
+stdout:
+ description: Standard output from running commands
+ returned: When standard output is returned by the command that is run
+ type: str
+ sample:
+system:
+ description: Whether or not the account is a system account
+ returned: When C(system) is passed to the module and the account does not exist
+ type: bool
+ sample: True
+uid:
+ description: User ID of the user account
+ returned: When C(UID) is passed to the module
+ type: int
+ sample: 1044
+'''
+
+
+import errno
+import grp
+import calendar
+import os
+import re
+import pty
+import pwd
+import select
+import shutil
+import socket
+import subprocess
+import time
+import math
+
+from ansible.module_utils import distro
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.sys_info import get_platform_subclass
+
+try:
+ import spwd
+ HAVE_SPWD = True
+except ImportError:
+ HAVE_SPWD = False
+
+
+_HASH_RE = re.compile(r'[^a-zA-Z0-9./=]')
+
+
+class User(object):
+ """
+ This is a generic User manipulation class that is subclassed
+ based on platform.
+
+ A subclass may wish to override the following action methods:-
+ - create_user()
+ - remove_user()
+ - modify_user()
+ - ssh_key_gen()
+ - ssh_key_fingerprint()
+ - user_exists()
+
+ All subclasses MUST define platform and distribution (which may be None).
+ """
+
+ platform = 'Generic'
+ distribution = None
+ PASSWORDFILE = '/etc/passwd'
+ SHADOWFILE = '/etc/shadow'
+ SHADOWFILE_EXPIRE_INDEX = 7
+ LOGIN_DEFS = '/etc/login.defs'
+ DATE_FORMAT = '%Y-%m-%d'
+
+ def __new__(cls, *args, **kwargs):
+ new_cls = get_platform_subclass(User)
+ return super(cls, new_cls).__new__(new_cls)
+
+ def __init__(self, module):
+ self.module = module
+ self.state = module.params['state']
+ self.name = module.params['name']
+ self.uid = module.params['uid']
+ self.hidden = module.params['hidden']
+ self.non_unique = module.params['non_unique']
+ self.seuser = module.params['seuser']
+ self.group = module.params['group']
+ self.comment = module.params['comment']
+ self.shell = module.params['shell']
+ self.password = module.params['password']
+ self.force = module.params['force']
+ self.remove = module.params['remove']
+ self.create_home = module.params['create_home']
+ self.move_home = module.params['move_home']
+ self.skeleton = module.params['skeleton']
+ self.system = module.params['system']
+ self.login_class = module.params['login_class']
+ self.append = module.params['append']
+ self.sshkeygen = module.params['generate_ssh_key']
+ self.ssh_bits = module.params['ssh_key_bits']
+ self.ssh_type = module.params['ssh_key_type']
+ self.ssh_comment = module.params['ssh_key_comment']
+ self.ssh_passphrase = module.params['ssh_key_passphrase']
+ self.update_password = module.params['update_password']
+ self.home = module.params['home']
+ self.expires = None
+ self.password_lock = module.params['password_lock']
+ self.groups = None
+ self.local = module.params['local']
+ self.profile = module.params['profile']
+ self.authorization = module.params['authorization']
+ self.role = module.params['role']
+
+ if module.params['groups'] is not None:
+ self.groups = ','.join(module.params['groups'])
+
+ if module.params['expires'] is not None:
+ try:
+ self.expires = time.gmtime(module.params['expires'])
+ except Exception as e:
+ module.fail_json(msg="Invalid value for 'expires' %s: %s" % (self.expires, to_native(e)))
+
+ if module.params['ssh_key_file'] is not None:
+ self.ssh_file = module.params['ssh_key_file']
+ else:
+ self.ssh_file = os.path.join('.ssh', 'id_%s' % self.ssh_type)
+
+ if self.groups is None and self.append:
+ # Change the argument_spec in 2.14 and remove this warning
+ # required_by={'append': ['groups']}
+ module.warn("'append' is set, but no 'groups' are specified. Use 'groups' for appending new groups."
+ "This will change to an error in Ansible 2.14.")
+
+ def check_password_encrypted(self):
+ # Darwin needs cleartext password, so skip validation
+ if self.module.params['password'] and self.platform != 'Darwin':
+ maybe_invalid = False
+
+ # Allow setting certain passwords in order to disable the account
+ if self.module.params['password'] in set(['*', '!', '*************']):
+ maybe_invalid = False
+ else:
+ # : for delimiter, * for disable user, ! for lock user
+ # these characters are invalid in the password
+ if any(char in self.module.params['password'] for char in ':*!'):
+ maybe_invalid = True
+ if '$' not in self.module.params['password']:
+ maybe_invalid = True
+ else:
+ fields = self.module.params['password'].split("$")
+ if len(fields) >= 3:
+ # contains character outside the crypto constraint
+ if bool(_HASH_RE.search(fields[-1])):
+ maybe_invalid = True
+ # md5
+ if fields[1] == '1' and len(fields[-1]) != 22:
+ maybe_invalid = True
+ # sha256
+ if fields[1] == '5' and len(fields[-1]) != 43:
+ maybe_invalid = True
+ # sha512
+ if fields[1] == '6' and len(fields[-1]) != 86:
+ maybe_invalid = True
+ else:
+ maybe_invalid = True
+ if maybe_invalid:
+ self.module.warn("The input password appears not to have been hashed. "
+ "The 'password' argument must be encrypted for this module to work properly.")
+
+ def execute_command(self, cmd, use_unsafe_shell=False, data=None, obey_checkmode=True):
+ if self.module.check_mode and obey_checkmode:
+ self.module.debug('In check mode, would have run: "%s"' % cmd)
+ return (0, '', '')
+ else:
+ # cast all args to strings ansible-modules-core/issues/4397
+ cmd = [str(x) for x in cmd]
+ return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data)
+
+ def backup_shadow(self):
+ if not self.module.check_mode and self.SHADOWFILE:
+ return self.module.backup_local(self.SHADOWFILE)
+
+ def remove_user_userdel(self):
+ if self.local:
+ command_name = 'luserdel'
+ else:
+ command_name = 'userdel'
+
+ cmd = [self.module.get_bin_path(command_name, True)]
+ if self.force and not self.local:
+ cmd.append('-f')
+ if self.remove:
+ cmd.append('-r')
+ cmd.append(self.name)
+
+ return self.execute_command(cmd)
+
+ def create_user_useradd(self):
+
+ if self.local:
+ command_name = 'luseradd'
+ lgroupmod_cmd = self.module.get_bin_path('lgroupmod', True)
+ lchage_cmd = self.module.get_bin_path('lchage', True)
+ else:
+ command_name = 'useradd'
+
+ cmd = [self.module.get_bin_path(command_name, True)]
+
+ if self.uid is not None:
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.seuser is not None:
+ cmd.append('-Z')
+ cmd.append(self.seuser)
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ cmd.append('-g')
+ cmd.append(self.group)
+ elif self.group_exists(self.name):
+ # use the -N option (no user group) if a group already
+ # exists with the same name as the user to prevent
+ # errors from useradd trying to create a group when
+ # USERGROUPS_ENAB is set in /etc/login.defs.
+ if os.path.exists('/etc/redhat-release'):
+ dist = distro.linux_distribution(full_distribution_name=False)
+ major_release = int(dist[1].split('.')[0])
+ if major_release <= 5 or self.local:
+ cmd.append('-n')
+ else:
+ cmd.append('-N')
+ elif os.path.exists('/etc/SuSE-release'):
+ # -N did not exist in useradd before SLE 11 and did not
+ # automatically create a group
+ dist = distro.linux_distribution(full_distribution_name=False)
+ major_release = int(dist[1].split('.')[0])
+ if major_release >= 12:
+ cmd.append('-N')
+ else:
+ cmd.append('-N')
+
+ if self.groups is not None and len(self.groups):
+ groups = self.get_groups_set()
+ if not self.local:
+ cmd.append('-G')
+ cmd.append(','.join(groups))
+
+ if self.comment is not None:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None:
+ # If the specified path to the user home contains parent directories that
+ # do not exist and create_home is True first create the parent directory
+ # since useradd cannot create it.
+ if self.create_home:
+ parent = os.path.dirname(self.home)
+ if not os.path.isdir(parent):
+ self.create_homedir(self.home)
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.shell is not None:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.expires is not None and not self.local:
+ cmd.append('-e')
+ if self.expires < time.gmtime(0):
+ cmd.append('')
+ else:
+ cmd.append(time.strftime(self.DATE_FORMAT, self.expires))
+
+ if self.password is not None:
+ cmd.append('-p')
+ cmd.append(self.password)
+
+ if self.create_home:
+ if not self.local:
+ cmd.append('-m')
+
+ if self.skeleton is not None:
+ cmd.append('-k')
+ cmd.append(self.skeleton)
+ else:
+ cmd.append('-M')
+
+ if self.system:
+ cmd.append('-r')
+
+ cmd.append(self.name)
+ (rc, out, err) = self.execute_command(cmd)
+ if not self.local or rc != 0:
+ return (rc, out, err)
+
+ if self.expires is not None:
+ if self.expires < time.gmtime(0):
+ lexpires = -1
+ else:
+ # Convert seconds since Epoch to days since Epoch
+ lexpires = int(math.floor(self.module.params['expires'])) // 86400
+ (rc, _out, _err) = self.execute_command([lchage_cmd, '-E', to_native(lexpires), self.name])
+ out += _out
+ err += _err
+ if rc != 0:
+ return (rc, out, err)
+
+ if self.groups is None or len(self.groups) == 0:
+ return (rc, out, err)
+
+ for add_group in groups:
+ (rc, _out, _err) = self.execute_command([lgroupmod_cmd, '-M', self.name, add_group])
+ out += _out
+ err += _err
+ if rc != 0:
+ return (rc, out, err)
+ return (rc, out, err)
+
+ def _check_usermod_append(self):
+ # check if this version of usermod can append groups
+
+ if self.local:
+ command_name = 'lusermod'
+ else:
+ command_name = 'usermod'
+
+ usermod_path = self.module.get_bin_path(command_name, True)
+
+ # for some reason, usermod --help cannot be used by non root
+ # on RH/Fedora, due to lack of execute bit for others
+ if not os.access(usermod_path, os.X_OK):
+ return False
+
+ cmd = [usermod_path, '--help']
+ (rc, data1, data2) = self.execute_command(cmd, obey_checkmode=False)
+ helpout = data1 + data2
+
+ # check if --append exists
+ lines = to_native(helpout).split('\n')
+ for line in lines:
+ if line.strip().startswith('-a, --append'):
+ return True
+
+ return False
+
+ def modify_user_usermod(self):
+
+ if self.local:
+ command_name = 'lusermod'
+ lgroupmod_cmd = self.module.get_bin_path('lgroupmod', True)
+ lgroupmod_add = set()
+ lgroupmod_del = set()
+ lchage_cmd = self.module.get_bin_path('lchage', True)
+ lexpires = None
+ else:
+ command_name = 'usermod'
+
+ cmd = [self.module.get_bin_path(command_name, True)]
+ info = self.user_info()
+ has_append = self._check_usermod_append()
+
+ if self.uid is not None and info[2] != int(self.uid):
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ ginfo = self.group_info(self.group)
+ if info[3] != ginfo[2]:
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None:
+ # get a list of all groups for the user, including the primary
+ current_groups = self.user_group_membership(exclude_primary=False)
+ groups_need_mod = False
+ groups = []
+
+ if self.groups == '':
+ if current_groups and not self.append:
+ groups_need_mod = True
+ else:
+ groups = self.get_groups_set(remove_existing=False)
+ group_diff = set(current_groups).symmetric_difference(groups)
+
+ if group_diff:
+ if self.append:
+ for g in groups:
+ if g in group_diff:
+ if has_append:
+ cmd.append('-a')
+ groups_need_mod = True
+ break
+ else:
+ groups_need_mod = True
+
+ if groups_need_mod:
+ if self.local:
+ if self.append:
+ lgroupmod_add = set(groups).difference(current_groups)
+ lgroupmod_del = set()
+ else:
+ lgroupmod_add = set(groups).difference(current_groups)
+ lgroupmod_del = set(current_groups).difference(groups)
+ else:
+ if self.append and not has_append:
+ cmd.append('-A')
+ cmd.append(','.join(group_diff))
+ else:
+ cmd.append('-G')
+ cmd.append(','.join(groups))
+
+ if self.comment is not None and info[4] != self.comment:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None and info[5] != self.home:
+ cmd.append('-d')
+ cmd.append(self.home)
+ if self.move_home:
+ cmd.append('-m')
+
+ if self.shell is not None and info[6] != self.shell:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.expires is not None:
+
+ current_expires = int(self.user_password()[1])
+
+ if self.expires < time.gmtime(0):
+ if current_expires >= 0:
+ if self.local:
+ lexpires = -1
+ else:
+ cmd.append('-e')
+ cmd.append('')
+ else:
+ # Convert days since Epoch to seconds since Epoch as struct_time
+ current_expire_date = time.gmtime(current_expires * 86400)
+
+ # Current expires is negative or we compare year, month, and day only
+ if current_expires < 0 or current_expire_date[:3] != self.expires[:3]:
+ if self.local:
+ # Convert seconds since Epoch to days since Epoch
+ lexpires = int(math.floor(self.module.params['expires'])) // 86400
+ else:
+ cmd.append('-e')
+ cmd.append(time.strftime(self.DATE_FORMAT, self.expires))
+
+ # Lock if no password or unlocked, unlock only if locked
+ if self.password_lock and not info[1].startswith('!'):
+ cmd.append('-L')
+ elif self.password_lock is False and info[1].startswith('!'):
+ # usermod will refuse to unlock a user with no password, module shows 'changed' regardless
+ cmd.append('-U')
+
+ if self.update_password == 'always' and self.password is not None and info[1] != self.password:
+ cmd.append('-p')
+ cmd.append(self.password)
+
+ (rc, out, err) = (None, '', '')
+
+ # skip if no usermod changes to be made
+ if len(cmd) > 1:
+ cmd.append(self.name)
+ (rc, out, err) = self.execute_command(cmd)
+
+ if not self.local or not (rc is None or rc == 0):
+ return (rc, out, err)
+
+ if lexpires is not None:
+ (rc, _out, _err) = self.execute_command([lchage_cmd, '-E', to_native(lexpires), self.name])
+ out += _out
+ err += _err
+ if rc != 0:
+ return (rc, out, err)
+
+ if len(lgroupmod_add) == 0 and len(lgroupmod_del) == 0:
+ return (rc, out, err)
+
+ for add_group in lgroupmod_add:
+ (rc, _out, _err) = self.execute_command([lgroupmod_cmd, '-M', self.name, add_group])
+ out += _out
+ err += _err
+ if rc != 0:
+ return (rc, out, err)
+
+ for del_group in lgroupmod_del:
+ (rc, _out, _err) = self.execute_command([lgroupmod_cmd, '-m', self.name, del_group])
+ out += _out
+ err += _err
+ if rc != 0:
+ return (rc, out, err)
+ return (rc, out, err)
+
+ def group_exists(self, group):
+ try:
+ # Try group as a gid first
+ grp.getgrgid(int(group))
+ return True
+ except (ValueError, KeyError):
+ try:
+ grp.getgrnam(group)
+ return True
+ except KeyError:
+ return False
+
+ def group_info(self, group):
+ if not self.group_exists(group):
+ return False
+ try:
+ # Try group as a gid first
+ return list(grp.getgrgid(int(group)))
+ except (ValueError, KeyError):
+ return list(grp.getgrnam(group))
+
+ def get_groups_set(self, remove_existing=True):
+ if self.groups is None:
+ return None
+ info = self.user_info()
+ groups = set(x.strip() for x in self.groups.split(',') if x)
+ for g in groups.copy():
+ if not self.group_exists(g):
+ self.module.fail_json(msg="Group %s does not exist" % (g))
+ if info and remove_existing and self.group_info(g)[2] == info[3]:
+ groups.remove(g)
+ return groups
+
+ def user_group_membership(self, exclude_primary=True):
+ ''' Return a list of groups the user belongs to '''
+ groups = []
+ info = self.get_pwd_info()
+ for group in grp.getgrall():
+ if self.name in group.gr_mem:
+ # Exclude the user's primary group by default
+ if not exclude_primary:
+ groups.append(group[0])
+ else:
+ if info[3] != group.gr_gid:
+ groups.append(group[0])
+
+ return groups
+
+ def user_exists(self):
+ # The pwd module does not distinguish between local and directory accounts.
+ # It's output cannot be used to determine whether or not an account exists locally.
+ # It returns True if the account exists locally or in the directory, so instead
+ # look in the local PASSWORD file for an existing account.
+ if self.local:
+ if not os.path.exists(self.PASSWORDFILE):
+ self.module.fail_json(msg="'local: true' specified but unable to find local account file {0} to parse.".format(self.PASSWORDFILE))
+
+ exists = False
+ name_test = '{0}:'.format(self.name)
+ with open(self.PASSWORDFILE, 'rb') as f:
+ reversed_lines = f.readlines()[::-1]
+ for line in reversed_lines:
+ if line.startswith(to_bytes(name_test)):
+ exists = True
+ break
+
+ if not exists:
+ self.module.warn(
+ "'local: true' specified and user '{name}' was not found in {file}. "
+ "The local user account may already exist if the local account database exists "
+ "somewhere other than {file}.".format(file=self.PASSWORDFILE, name=self.name))
+
+ return exists
+
+ else:
+ try:
+ if pwd.getpwnam(self.name):
+ return True
+ except KeyError:
+ return False
+
+ def get_pwd_info(self):
+ if not self.user_exists():
+ return False
+ return list(pwd.getpwnam(self.name))
+
+ def user_info(self):
+ if not self.user_exists():
+ return False
+ info = self.get_pwd_info()
+ if len(info[1]) == 1 or len(info[1]) == 0:
+ info[1] = self.user_password()[0]
+ return info
+
+ def user_password(self):
+ passwd = ''
+ expires = ''
+ if HAVE_SPWD:
+ try:
+ passwd = spwd.getspnam(self.name)[1]
+ expires = spwd.getspnam(self.name)[7]
+ return passwd, expires
+ except KeyError:
+ return passwd, expires
+ except OSError as e:
+ # Python 3.6 raises PermissionError instead of KeyError
+ # Due to absence of PermissionError in python2.7 need to check
+ # errno
+ if e.errno in (errno.EACCES, errno.EPERM, errno.ENOENT):
+ return passwd, expires
+ raise
+
+ if not self.user_exists():
+ return passwd, expires
+ elif self.SHADOWFILE:
+ passwd, expires = self.parse_shadow_file()
+
+ return passwd, expires
+
+ def parse_shadow_file(self):
+ passwd = ''
+ expires = ''
+ if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK):
+ with open(self.SHADOWFILE, 'r') as f:
+ for line in f:
+ if line.startswith('%s:' % self.name):
+ passwd = line.split(':')[1]
+ expires = line.split(':')[self.SHADOWFILE_EXPIRE_INDEX] or -1
+ return passwd, expires
+
+ def get_ssh_key_path(self):
+ info = self.user_info()
+ if os.path.isabs(self.ssh_file):
+ ssh_key_file = self.ssh_file
+ else:
+ if not os.path.exists(info[5]) and not self.module.check_mode:
+ raise Exception('User %s home directory does not exist' % self.name)
+ ssh_key_file = os.path.join(info[5], self.ssh_file)
+ return ssh_key_file
+
+ def ssh_key_gen(self):
+ info = self.user_info()
+ overwrite = None
+ try:
+ ssh_key_file = self.get_ssh_key_path()
+ except Exception as e:
+ return (1, '', to_native(e))
+ ssh_dir = os.path.dirname(ssh_key_file)
+ if not os.path.exists(ssh_dir):
+ if self.module.check_mode:
+ return (0, '', '')
+ try:
+ os.mkdir(ssh_dir, int('0700', 8))
+ os.chown(ssh_dir, info[2], info[3])
+ except OSError as e:
+ return (1, '', 'Failed to create %s: %s' % (ssh_dir, to_native(e)))
+ if os.path.exists(ssh_key_file):
+ if self.force:
+ # ssh-keygen doesn't support overwriting the key interactively, so send 'y' to confirm
+ overwrite = 'y'
+ else:
+ return (None, 'Key already exists, use "force: yes" to overwrite', '')
+ cmd = [self.module.get_bin_path('ssh-keygen', True)]
+ cmd.append('-t')
+ cmd.append(self.ssh_type)
+ if self.ssh_bits > 0:
+ cmd.append('-b')
+ cmd.append(self.ssh_bits)
+ cmd.append('-C')
+ cmd.append(self.ssh_comment)
+ cmd.append('-f')
+ cmd.append(ssh_key_file)
+ if self.ssh_passphrase is not None:
+ if self.module.check_mode:
+ self.module.debug('In check mode, would have run: "%s"' % cmd)
+ return (0, '', '')
+
+ master_in_fd, slave_in_fd = pty.openpty()
+ master_out_fd, slave_out_fd = pty.openpty()
+ master_err_fd, slave_err_fd = pty.openpty()
+ env = os.environ.copy()
+ env['LC_ALL'] = 'C'
+ try:
+ p = subprocess.Popen([to_bytes(c) for c in cmd],
+ stdin=slave_in_fd,
+ stdout=slave_out_fd,
+ stderr=slave_err_fd,
+ preexec_fn=os.setsid,
+ env=env)
+ out_buffer = b''
+ err_buffer = b''
+ while p.poll() is None:
+ r, w, e = select.select([master_out_fd, master_err_fd], [], [], 1)
+ first_prompt = b'Enter passphrase (empty for no passphrase):'
+ second_prompt = b'Enter same passphrase again'
+ prompt = first_prompt
+ for fd in r:
+ if fd == master_out_fd:
+ chunk = os.read(master_out_fd, 10240)
+ out_buffer += chunk
+ if prompt in out_buffer:
+ os.write(master_in_fd, to_bytes(self.ssh_passphrase, errors='strict') + b'\r')
+ prompt = second_prompt
+ else:
+ chunk = os.read(master_err_fd, 10240)
+ err_buffer += chunk
+ if prompt in err_buffer:
+ os.write(master_in_fd, to_bytes(self.ssh_passphrase, errors='strict') + b'\r')
+ prompt = second_prompt
+ if b'Overwrite (y/n)?' in out_buffer or b'Overwrite (y/n)?' in err_buffer:
+ # The key was created between us checking for existence and now
+ return (None, 'Key already exists', '')
+
+ rc = p.returncode
+ out = to_native(out_buffer)
+ err = to_native(err_buffer)
+ except OSError as e:
+ return (1, '', to_native(e))
+ else:
+ cmd.append('-N')
+ cmd.append('')
+
+ (rc, out, err) = self.execute_command(cmd, data=overwrite)
+
+ if rc == 0 and not self.module.check_mode:
+ # If the keys were successfully created, we should be able
+ # to tweak ownership.
+ os.chown(ssh_key_file, info[2], info[3])
+ os.chown('%s.pub' % ssh_key_file, info[2], info[3])
+ return (rc, out, err)
+
+ def ssh_key_fingerprint(self):
+ ssh_key_file = self.get_ssh_key_path()
+ if not os.path.exists(ssh_key_file):
+ return (1, 'SSH Key file %s does not exist' % ssh_key_file, '')
+ cmd = [self.module.get_bin_path('ssh-keygen', True)]
+ cmd.append('-l')
+ cmd.append('-f')
+ cmd.append(ssh_key_file)
+
+ return self.execute_command(cmd, obey_checkmode=False)
+
+ def get_ssh_public_key(self):
+ ssh_public_key_file = '%s.pub' % self.get_ssh_key_path()
+ try:
+ with open(ssh_public_key_file, 'r') as f:
+ ssh_public_key = f.read().strip()
+ except IOError:
+ return None
+ return ssh_public_key
+
+ def create_user(self):
+ # by default we use the create_user_useradd method
+ return self.create_user_useradd()
+
+ def remove_user(self):
+ # by default we use the remove_user_userdel method
+ return self.remove_user_userdel()
+
+ def modify_user(self):
+ # by default we use the modify_user_usermod method
+ return self.modify_user_usermod()
+
+ def create_homedir(self, path):
+ if not os.path.exists(path):
+ if self.skeleton is not None:
+ skeleton = self.skeleton
+ else:
+ skeleton = '/etc/skel'
+
+ if os.path.exists(skeleton):
+ try:
+ shutil.copytree(skeleton, path, symlinks=True)
+ except OSError as e:
+ self.module.exit_json(failed=True, msg="%s" % to_native(e))
+ else:
+ try:
+ os.makedirs(path)
+ except OSError as e:
+ self.module.exit_json(failed=True, msg="%s" % to_native(e))
+ # get umask from /etc/login.defs and set correct home mode
+ if os.path.exists(self.LOGIN_DEFS):
+ with open(self.LOGIN_DEFS, 'r') as f:
+ for line in f:
+ m = re.match(r'^UMASK\s+(\d+)$', line)
+ if m:
+ umask = int(m.group(1), 8)
+ mode = 0o777 & ~umask
+ try:
+ os.chmod(path, mode)
+ except OSError as e:
+ self.module.exit_json(failed=True, msg="%s" % to_native(e))
+
+ def chown_homedir(self, uid, gid, path):
+ try:
+ os.chown(path, uid, gid)
+ for root, dirs, files in os.walk(path):
+ for d in dirs:
+ os.chown(os.path.join(root, d), uid, gid)
+ for f in files:
+ os.chown(os.path.join(root, f), uid, gid)
+ except OSError as e:
+ self.module.exit_json(failed=True, msg="%s" % to_native(e))
+
+
+# ===========================================
+
+class FreeBsdUser(User):
+ """
+ This is a FreeBSD User manipulation class - it uses the pw command
+ to manipulate the user database, followed by the chpass command
+ to change the password.
+
+ This overrides the following methods from the generic class:-
+ - create_user()
+ - remove_user()
+ - modify_user()
+ """
+
+ platform = 'FreeBSD'
+ distribution = None
+ SHADOWFILE = '/etc/master.passwd'
+ SHADOWFILE_EXPIRE_INDEX = 6
+ DATE_FORMAT = '%d-%b-%Y'
+
+ def remove_user(self):
+ cmd = [
+ self.module.get_bin_path('pw', True),
+ 'userdel',
+ '-n',
+ self.name
+ ]
+ if self.remove:
+ cmd.append('-r')
+
+ return self.execute_command(cmd)
+
+ def create_user(self):
+ cmd = [
+ self.module.get_bin_path('pw', True),
+ 'useradd',
+ '-n',
+ self.name,
+ ]
+
+ if self.uid is not None:
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.comment is not None:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None:
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None:
+ groups = self.get_groups_set()
+ cmd.append('-G')
+ cmd.append(','.join(groups))
+
+ if self.create_home:
+ cmd.append('-m')
+
+ if self.skeleton is not None:
+ cmd.append('-k')
+ cmd.append(self.skeleton)
+
+ if self.shell is not None:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.login_class is not None:
+ cmd.append('-L')
+ cmd.append(self.login_class)
+
+ if self.expires is not None:
+ cmd.append('-e')
+ if self.expires < time.gmtime(0):
+ cmd.append('0')
+ else:
+ cmd.append(str(calendar.timegm(self.expires)))
+
+ # system cannot be handled currently - should we error if its requested?
+ # create the user
+ (rc, out, err) = self.execute_command(cmd)
+ if rc is not None and rc != 0:
+ self.module.fail_json(name=self.name, msg=err, rc=rc)
+
+ # we have to set the password in a second command
+ if self.password is not None:
+ cmd = [
+ self.module.get_bin_path('chpass', True),
+ '-p',
+ self.password,
+ self.name
+ ]
+ return self.execute_command(cmd)
+
+ return (rc, out, err)
+
+ def modify_user(self):
+ cmd = [
+ self.module.get_bin_path('pw', True),
+ 'usermod',
+ '-n',
+ self.name
+ ]
+ cmd_len = len(cmd)
+ info = self.user_info()
+
+ if self.uid is not None and info[2] != int(self.uid):
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.comment is not None and info[4] != self.comment:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None:
+ if (info[5] != self.home and self.move_home) or (not os.path.exists(self.home) and self.create_home):
+ cmd.append('-m')
+ if info[5] != self.home:
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.skeleton is not None:
+ cmd.append('-k')
+ cmd.append(self.skeleton)
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ ginfo = self.group_info(self.group)
+ if info[3] != ginfo[2]:
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.shell is not None and info[6] != self.shell:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.login_class is not None:
+ # find current login class
+ user_login_class = None
+ if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK):
+ with open(self.SHADOWFILE, 'r') as f:
+ for line in f:
+ if line.startswith('%s:' % self.name):
+ user_login_class = line.split(':')[4]
+
+ # act only if login_class change
+ if self.login_class != user_login_class:
+ cmd.append('-L')
+ cmd.append(self.login_class)
+
+ if self.groups is not None:
+ current_groups = self.user_group_membership()
+ groups = self.get_groups_set()
+
+ group_diff = set(current_groups).symmetric_difference(groups)
+ groups_need_mod = False
+
+ if group_diff:
+ if self.append:
+ for g in groups:
+ if g in group_diff:
+ groups_need_mod = True
+ break
+ else:
+ groups_need_mod = True
+
+ if groups_need_mod:
+ cmd.append('-G')
+ new_groups = groups
+ if self.append:
+ new_groups = groups | set(current_groups)
+ cmd.append(','.join(new_groups))
+
+ if self.expires is not None:
+
+ current_expires = int(self.user_password()[1])
+
+ # If expiration is negative or zero and the current expiration is greater than zero, disable expiration.
+ # In OpenBSD, setting expiration to zero disables expiration. It does not expire the account.
+ if self.expires <= time.gmtime(0):
+ if current_expires > 0:
+ cmd.append('-e')
+ cmd.append('0')
+ else:
+ # Convert days since Epoch to seconds since Epoch as struct_time
+ current_expire_date = time.gmtime(current_expires)
+
+ # Current expires is negative or we compare year, month, and day only
+ if current_expires <= 0 or current_expire_date[:3] != self.expires[:3]:
+ cmd.append('-e')
+ cmd.append(str(calendar.timegm(self.expires)))
+
+ # modify the user if cmd will do anything
+ if cmd_len != len(cmd):
+ (rc, out, err) = self.execute_command(cmd)
+ if rc is not None and rc != 0:
+ self.module.fail_json(name=self.name, msg=err, rc=rc)
+ else:
+ (rc, out, err) = (None, '', '')
+
+ # we have to set the password in a second command
+ if self.update_password == 'always' and self.password is not None and info[1] != self.password:
+ cmd = [
+ self.module.get_bin_path('chpass', True),
+ '-p',
+ self.password,
+ self.name
+ ]
+ return self.execute_command(cmd)
+
+ # we have to lock/unlock the password in a distinct command
+ if self.password_lock and not info[1].startswith('*LOCKED*'):
+ cmd = [
+ self.module.get_bin_path('pw', True),
+ 'lock',
+ self.name
+ ]
+ if self.uid is not None and info[2] != int(self.uid):
+ cmd.append('-u')
+ cmd.append(self.uid)
+ return self.execute_command(cmd)
+ elif self.password_lock is False and info[1].startswith('*LOCKED*'):
+ cmd = [
+ self.module.get_bin_path('pw', True),
+ 'unlock',
+ self.name
+ ]
+ if self.uid is not None and info[2] != int(self.uid):
+ cmd.append('-u')
+ cmd.append(self.uid)
+ return self.execute_command(cmd)
+ return (rc, out, err)
+
+
+class DragonFlyBsdUser(FreeBsdUser):
+ """
+ This is a DragonFlyBSD User manipulation class - it inherits the
+ FreeBsdUser class behaviors, such as using the pw command to
+ manipulate the user database, followed by the chpass command
+ to change the password.
+ """
+
+ platform = 'DragonFly'
+
+
+class OpenBSDUser(User):
+ """
+ This is a OpenBSD User manipulation class.
+ Main differences are that OpenBSD:-
+ - has no concept of "system" account.
+ - has no force delete user
+
+ This overrides the following methods from the generic class:-
+ - create_user()
+ - remove_user()
+ - modify_user()
+ """
+
+ platform = 'OpenBSD'
+ distribution = None
+ SHADOWFILE = '/etc/master.passwd'
+
+ def create_user(self):
+ cmd = [self.module.get_bin_path('useradd', True)]
+
+ if self.uid is not None:
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None:
+ groups = self.get_groups_set()
+ cmd.append('-G')
+ cmd.append(','.join(groups))
+
+ if self.comment is not None:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None:
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.shell is not None:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.login_class is not None:
+ cmd.append('-L')
+ cmd.append(self.login_class)
+
+ if self.password is not None and self.password != '*':
+ cmd.append('-p')
+ cmd.append(self.password)
+
+ if self.create_home:
+ cmd.append('-m')
+
+ if self.skeleton is not None:
+ cmd.append('-k')
+ cmd.append(self.skeleton)
+
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def remove_user_userdel(self):
+ cmd = [self.module.get_bin_path('userdel', True)]
+ if self.remove:
+ cmd.append('-r')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def modify_user(self):
+ cmd = [self.module.get_bin_path('usermod', True)]
+ info = self.user_info()
+
+ if self.uid is not None and info[2] != int(self.uid):
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ ginfo = self.group_info(self.group)
+ if info[3] != ginfo[2]:
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None:
+ current_groups = self.user_group_membership()
+ groups_need_mod = False
+ groups_option = '-S'
+ groups = []
+
+ if self.groups == '':
+ if current_groups and not self.append:
+ groups_need_mod = True
+ else:
+ groups = self.get_groups_set()
+ group_diff = set(current_groups).symmetric_difference(groups)
+
+ if group_diff:
+ if self.append:
+ for g in groups:
+ if g in group_diff:
+ groups_option = '-G'
+ groups_need_mod = True
+ break
+ else:
+ groups_need_mod = True
+
+ if groups_need_mod:
+ cmd.append(groups_option)
+ cmd.append(','.join(groups))
+
+ if self.comment is not None and info[4] != self.comment:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None and info[5] != self.home:
+ if self.move_home:
+ cmd.append('-m')
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.shell is not None and info[6] != self.shell:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.login_class is not None:
+ # find current login class
+ user_login_class = None
+ userinfo_cmd = [self.module.get_bin_path('userinfo', True), self.name]
+ (rc, out, err) = self.execute_command(userinfo_cmd, obey_checkmode=False)
+
+ for line in out.splitlines():
+ tokens = line.split()
+
+ if tokens[0] == 'class' and len(tokens) == 2:
+ user_login_class = tokens[1]
+
+ # act only if login_class change
+ if self.login_class != user_login_class:
+ cmd.append('-L')
+ cmd.append(self.login_class)
+
+ if self.password_lock and not info[1].startswith('*'):
+ cmd.append('-Z')
+ elif self.password_lock is False and info[1].startswith('*'):
+ cmd.append('-U')
+
+ if self.update_password == 'always' and self.password is not None \
+ and self.password != '*' and info[1] != self.password:
+ cmd.append('-p')
+ cmd.append(self.password)
+
+ # skip if no changes to be made
+ if len(cmd) == 1:
+ return (None, '', '')
+
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+
+class NetBSDUser(User):
+ """
+ This is a NetBSD User manipulation class.
+ Main differences are that NetBSD:-
+ - has no concept of "system" account.
+ - has no force delete user
+
+
+ This overrides the following methods from the generic class:-
+ - create_user()
+ - remove_user()
+ - modify_user()
+ """
+
+ platform = 'NetBSD'
+ distribution = None
+ SHADOWFILE = '/etc/master.passwd'
+
+ def create_user(self):
+ cmd = [self.module.get_bin_path('useradd', True)]
+
+ if self.uid is not None:
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None:
+ groups = self.get_groups_set()
+ if len(groups) > 16:
+ self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups))
+ cmd.append('-G')
+ cmd.append(','.join(groups))
+
+ if self.comment is not None:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None:
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.shell is not None:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.login_class is not None:
+ cmd.append('-L')
+ cmd.append(self.login_class)
+
+ if self.password is not None:
+ cmd.append('-p')
+ cmd.append(self.password)
+
+ if self.create_home:
+ cmd.append('-m')
+
+ if self.skeleton is not None:
+ cmd.append('-k')
+ cmd.append(self.skeleton)
+
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def remove_user_userdel(self):
+ cmd = [self.module.get_bin_path('userdel', True)]
+ if self.remove:
+ cmd.append('-r')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def modify_user(self):
+ cmd = [self.module.get_bin_path('usermod', True)]
+ info = self.user_info()
+
+ if self.uid is not None and info[2] != int(self.uid):
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ ginfo = self.group_info(self.group)
+ if info[3] != ginfo[2]:
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None:
+ current_groups = self.user_group_membership()
+ groups_need_mod = False
+ groups = []
+
+ if self.groups == '':
+ if current_groups and not self.append:
+ groups_need_mod = True
+ else:
+ groups = self.get_groups_set()
+ group_diff = set(current_groups).symmetric_difference(groups)
+
+ if group_diff:
+ if self.append:
+ for g in groups:
+ if g in group_diff:
+ groups = set(current_groups).union(groups)
+ groups_need_mod = True
+ break
+ else:
+ groups_need_mod = True
+
+ if groups_need_mod:
+ if len(groups) > 16:
+ self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups))
+ cmd.append('-G')
+ cmd.append(','.join(groups))
+
+ if self.comment is not None and info[4] != self.comment:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None and info[5] != self.home:
+ if self.move_home:
+ cmd.append('-m')
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.shell is not None and info[6] != self.shell:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.login_class is not None:
+ cmd.append('-L')
+ cmd.append(self.login_class)
+
+ if self.update_password == 'always' and self.password is not None and info[1] != self.password:
+ cmd.append('-p')
+ cmd.append(self.password)
+
+ if self.password_lock and not info[1].startswith('*LOCKED*'):
+ cmd.append('-C yes')
+ elif self.password_lock is False and info[1].startswith('*LOCKED*'):
+ cmd.append('-C no')
+
+ # skip if no changes to be made
+ if len(cmd) == 1:
+ return (None, '', '')
+
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+
+class SunOS(User):
+ """
+ This is a SunOS User manipulation class - The main difference between
+ this class and the generic user class is that Solaris-type distros
+ don't support the concept of a "system" account and we need to
+ edit the /etc/shadow file manually to set a password. (Ugh)
+
+ This overrides the following methods from the generic class:-
+ - create_user()
+ - remove_user()
+ - modify_user()
+ - user_info()
+ """
+
+ platform = 'SunOS'
+ distribution = None
+ SHADOWFILE = '/etc/shadow'
+ USER_ATTR = '/etc/user_attr'
+
+ def get_password_defaults(self):
+ # Read password aging defaults
+ try:
+ minweeks = ''
+ maxweeks = ''
+ warnweeks = ''
+ with open("/etc/default/passwd", 'r') as f:
+ for line in f:
+ line = line.strip()
+ if (line.startswith('#') or line == ''):
+ continue
+ m = re.match(r'^([^#]*)#(.*)$', line)
+ if m: # The line contains a hash / comment
+ line = m.group(1)
+ key, value = line.split('=')
+ if key == "MINWEEKS":
+ minweeks = value.rstrip('\n')
+ elif key == "MAXWEEKS":
+ maxweeks = value.rstrip('\n')
+ elif key == "WARNWEEKS":
+ warnweeks = value.rstrip('\n')
+ except Exception as err:
+ self.module.fail_json(msg="failed to read /etc/default/passwd: %s" % to_native(err))
+
+ return (minweeks, maxweeks, warnweeks)
+
+ def remove_user(self):
+ cmd = [self.module.get_bin_path('userdel', True)]
+ if self.remove:
+ cmd.append('-r')
+ cmd.append(self.name)
+
+ return self.execute_command(cmd)
+
+ def create_user(self):
+ cmd = [self.module.get_bin_path('useradd', True)]
+
+ if self.uid is not None:
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None:
+ groups = self.get_groups_set()
+ cmd.append('-G')
+ cmd.append(','.join(groups))
+
+ if self.comment is not None:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None:
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.shell is not None:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.create_home:
+ cmd.append('-m')
+
+ if self.skeleton is not None:
+ cmd.append('-k')
+ cmd.append(self.skeleton)
+
+ if self.profile is not None:
+ cmd.append('-P')
+ cmd.append(self.profile)
+
+ if self.authorization is not None:
+ cmd.append('-A')
+ cmd.append(self.authorization)
+
+ if self.role is not None:
+ cmd.append('-R')
+ cmd.append(self.role)
+
+ cmd.append(self.name)
+
+ (rc, out, err) = self.execute_command(cmd)
+ if rc is not None and rc != 0:
+ self.module.fail_json(name=self.name, msg=err, rc=rc)
+
+ if not self.module.check_mode:
+ # we have to set the password by editing the /etc/shadow file
+ if self.password is not None:
+ self.backup_shadow()
+ minweeks, maxweeks, warnweeks = self.get_password_defaults()
+ try:
+ lines = []
+ with open(self.SHADOWFILE, 'rb') as f:
+ for line in f:
+ line = to_native(line, errors='surrogate_or_strict')
+ fields = line.strip().split(':')
+ if not fields[0] == self.name:
+ lines.append(line)
+ continue
+ fields[1] = self.password
+ fields[2] = str(int(time.time() // 86400))
+ if minweeks:
+ try:
+ fields[3] = str(int(minweeks) * 7)
+ except ValueError:
+ # mirror solaris, which allows for any value in this field, and ignores anything that is not an int.
+ pass
+ if maxweeks:
+ try:
+ fields[4] = str(int(maxweeks) * 7)
+ except ValueError:
+ # mirror solaris, which allows for any value in this field, and ignores anything that is not an int.
+ pass
+ if warnweeks:
+ try:
+ fields[5] = str(int(warnweeks) * 7)
+ except ValueError:
+ # mirror solaris, which allows for any value in this field, and ignores anything that is not an int.
+ pass
+ line = ':'.join(fields)
+ lines.append('%s\n' % line)
+ with open(self.SHADOWFILE, 'w+') as f:
+ f.writelines(lines)
+ except Exception as err:
+ self.module.fail_json(msg="failed to update users password: %s" % to_native(err))
+
+ return (rc, out, err)
+
+ def modify_user_usermod(self):
+ cmd = [self.module.get_bin_path('usermod', True)]
+ cmd_len = len(cmd)
+ info = self.user_info()
+
+ if self.uid is not None and info[2] != int(self.uid):
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ ginfo = self.group_info(self.group)
+ if info[3] != ginfo[2]:
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None:
+ current_groups = self.user_group_membership()
+ groups = self.get_groups_set()
+ group_diff = set(current_groups).symmetric_difference(groups)
+ groups_need_mod = False
+
+ if group_diff:
+ if self.append:
+ for g in groups:
+ if g in group_diff:
+ groups_need_mod = True
+ break
+ else:
+ groups_need_mod = True
+
+ if groups_need_mod:
+ cmd.append('-G')
+ new_groups = groups
+ if self.append:
+ new_groups.update(current_groups)
+ cmd.append(','.join(new_groups))
+
+ if self.comment is not None and info[4] != self.comment:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None and info[5] != self.home:
+ if self.move_home:
+ cmd.append('-m')
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.shell is not None and info[6] != self.shell:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.profile is not None and info[7] != self.profile:
+ cmd.append('-P')
+ cmd.append(self.profile)
+
+ if self.authorization is not None and info[8] != self.authorization:
+ cmd.append('-A')
+ cmd.append(self.authorization)
+
+ if self.role is not None and info[9] != self.role:
+ cmd.append('-R')
+ cmd.append(self.role)
+
+ # modify the user if cmd will do anything
+ if cmd_len != len(cmd):
+ cmd.append(self.name)
+ (rc, out, err) = self.execute_command(cmd)
+ if rc is not None and rc != 0:
+ self.module.fail_json(name=self.name, msg=err, rc=rc)
+ else:
+ (rc, out, err) = (None, '', '')
+
+ # we have to set the password by editing the /etc/shadow file
+ if self.update_password == 'always' and self.password is not None and info[1] != self.password:
+ self.backup_shadow()
+ (rc, out, err) = (0, '', '')
+ if not self.module.check_mode:
+ minweeks, maxweeks, warnweeks = self.get_password_defaults()
+ try:
+ lines = []
+ with open(self.SHADOWFILE, 'rb') as f:
+ for line in f:
+ line = to_native(line, errors='surrogate_or_strict')
+ fields = line.strip().split(':')
+ if not fields[0] == self.name:
+ lines.append(line)
+ continue
+ fields[1] = self.password
+ fields[2] = str(int(time.time() // 86400))
+ if minweeks:
+ fields[3] = str(int(minweeks) * 7)
+ if maxweeks:
+ fields[4] = str(int(maxweeks) * 7)
+ if warnweeks:
+ fields[5] = str(int(warnweeks) * 7)
+ line = ':'.join(fields)
+ lines.append('%s\n' % line)
+ with open(self.SHADOWFILE, 'w+') as f:
+ f.writelines(lines)
+ rc = 0
+ except Exception as err:
+ self.module.fail_json(msg="failed to update users password: %s" % to_native(err))
+
+ return (rc, out, err)
+
+ def user_info(self):
+ info = super(SunOS, self).user_info()
+ if info:
+ info += self._user_attr_info()
+ return info
+
+ def _user_attr_info(self):
+ info = [''] * 3
+ with open(self.USER_ATTR, 'r') as file_handler:
+ for line in file_handler:
+ lines = line.strip().split('::::')
+ if lines[0] == self.name:
+ tmp = dict(x.split('=') for x in lines[1].split(';'))
+ info[0] = tmp.get('profiles', '')
+ info[1] = tmp.get('auths', '')
+ info[2] = tmp.get('roles', '')
+ return info
+
+
+class DarwinUser(User):
+ """
+ This is a Darwin macOS User manipulation class.
+ Main differences are that Darwin:-
+ - Handles accounts in a database managed by dscl(1)
+ - Has no useradd/groupadd
+ - Does not create home directories
+ - User password must be cleartext
+ - UID must be given
+ - System users must ben under 500
+
+ This overrides the following methods from the generic class:-
+ - user_exists()
+ - create_user()
+ - remove_user()
+ - modify_user()
+ """
+ platform = 'Darwin'
+ distribution = None
+ SHADOWFILE = None
+
+ dscl_directory = '.'
+
+ fields = [
+ ('comment', 'RealName'),
+ ('home', 'NFSHomeDirectory'),
+ ('shell', 'UserShell'),
+ ('uid', 'UniqueID'),
+ ('group', 'PrimaryGroupID'),
+ ('hidden', 'IsHidden'),
+ ]
+
+ def __init__(self, module):
+
+ super(DarwinUser, self).__init__(module)
+
+ # make the user hidden if option is set or deffer to system option
+ if self.hidden is None:
+ if self.system:
+ self.hidden = 1
+ elif self.hidden:
+ self.hidden = 1
+ else:
+ self.hidden = 0
+
+ # add hidden to processing if set
+ if self.hidden is not None:
+ self.fields.append(('hidden', 'IsHidden'))
+
+ def _get_dscl(self):
+ return [self.module.get_bin_path('dscl', True), self.dscl_directory]
+
+ def _list_user_groups(self):
+ cmd = self._get_dscl()
+ cmd += ['-search', '/Groups', 'GroupMembership', self.name]
+ (rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
+ groups = []
+ for line in out.splitlines():
+ if line.startswith(' ') or line.startswith(')'):
+ continue
+ groups.append(line.split()[0])
+ return groups
+
+ def _get_user_property(self, property):
+ '''Return user PROPERTY as given my dscl(1) read or None if not found.'''
+ cmd = self._get_dscl()
+ cmd += ['-read', '/Users/%s' % self.name, property]
+ (rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
+ if rc != 0:
+ return None
+ # from dscl(1)
+ # if property contains embedded spaces, the list will instead be
+ # displayed one entry per line, starting on the line after the key.
+ lines = out.splitlines()
+ # sys.stderr.write('*** |%s| %s -> %s\n' % (property, out, lines))
+ if len(lines) == 1:
+ return lines[0].split(': ')[1]
+ else:
+ if len(lines) > 2:
+ return '\n'.join([lines[1].strip()] + lines[2:])
+ else:
+ if len(lines) == 2:
+ return lines[1].strip()
+ else:
+ return None
+
+ def _get_next_uid(self, system=None):
+ '''
+ Return the next available uid. If system=True, then
+ uid should be below of 500, if possible.
+ '''
+ cmd = self._get_dscl()
+ cmd += ['-list', '/Users', 'UniqueID']
+ (rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
+ if rc != 0:
+ self.module.fail_json(
+ msg="Unable to get the next available uid",
+ rc=rc,
+ out=out,
+ err=err
+ )
+
+ max_uid = 0
+ max_system_uid = 0
+ for line in out.splitlines():
+ current_uid = int(line.split(' ')[-1])
+ if max_uid < current_uid:
+ max_uid = current_uid
+ if max_system_uid < current_uid and current_uid < 500:
+ max_system_uid = current_uid
+
+ if system and (0 < max_system_uid < 499):
+ return max_system_uid + 1
+ return max_uid + 1
+
+ def _change_user_password(self):
+ '''Change password for SELF.NAME against SELF.PASSWORD.
+
+ Please note that password must be cleartext.
+ '''
+ # some documentation on how is stored passwords on OSX:
+ # http://blog.lostpassword.com/2012/07/cracking-mac-os-x-lion-accounts-passwords/
+ # http://null-byte.wonderhowto.com/how-to/hack-mac-os-x-lion-passwords-0130036/
+ # http://pastebin.com/RYqxi7Ca
+ # on OSX 10.8+ hash is SALTED-SHA512-PBKDF2
+ # https://pythonhosted.org/passlib/lib/passlib.hash.pbkdf2_digest.html
+ # https://gist.github.com/nueh/8252572
+ cmd = self._get_dscl()
+ if self.password:
+ cmd += ['-passwd', '/Users/%s' % self.name, self.password]
+ else:
+ cmd += ['-create', '/Users/%s' % self.name, 'Password', '*']
+ (rc, out, err) = self.execute_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Error when changing password', err=err, out=out, rc=rc)
+ return (rc, out, err)
+
+ def _make_group_numerical(self):
+ '''Convert SELF.GROUP to is stringed numerical value suitable for dscl.'''
+ if self.group is None:
+ self.group = 'nogroup'
+ try:
+ self.group = grp.getgrnam(self.group).gr_gid
+ except KeyError:
+ self.module.fail_json(msg='Group "%s" not found. Try to create it first using "group" module.' % self.group)
+ # We need to pass a string to dscl
+ self.group = str(self.group)
+
+ def __modify_group(self, group, action):
+ '''Add or remove SELF.NAME to or from GROUP depending on ACTION.
+ ACTION can be 'add' or 'remove' otherwise 'remove' is assumed. '''
+ if action == 'add':
+ option = '-a'
+ else:
+ option = '-d'
+ cmd = ['dseditgroup', '-o', 'edit', option, self.name, '-t', 'user', group]
+ (rc, out, err) = self.execute_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Cannot %s user "%s" to group "%s".'
+ % (action, self.name, group), err=err, out=out, rc=rc)
+ return (rc, out, err)
+
+ def _modify_group(self):
+ '''Add or remove SELF.NAME to or from GROUP depending on ACTION.
+ ACTION can be 'add' or 'remove' otherwise 'remove' is assumed. '''
+
+ rc = 0
+ out = ''
+ err = ''
+ changed = False
+
+ current = set(self._list_user_groups())
+ if self.groups is not None:
+ target = set(self.groups.split(','))
+ else:
+ target = set([])
+
+ if self.append is False:
+ for remove in current - target:
+ (_rc, _out, _err) = self.__modify_group(remove, 'delete')
+ rc += rc
+ out += _out
+ err += _err
+ changed = True
+
+ for add in target - current:
+ (_rc, _out, _err) = self.__modify_group(add, 'add')
+ rc += _rc
+ out += _out
+ err += _err
+ changed = True
+
+ return (rc, out, err, changed)
+
+ def _update_system_user(self):
+ '''Hide or show user on login window according SELF.SYSTEM.
+
+ Returns 0 if a change has been made, None otherwise.'''
+
+ plist_file = '/Library/Preferences/com.apple.loginwindow.plist'
+
+ # http://support.apple.com/kb/HT5017?viewlocale=en_US
+ cmd = ['defaults', 'read', plist_file, 'HiddenUsersList']
+ (rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
+ # returned value is
+ # (
+ # "_userA",
+ # "_UserB",
+ # userc
+ # )
+ hidden_users = []
+ for x in out.splitlines()[1:-1]:
+ try:
+ x = x.split('"')[1]
+ except IndexError:
+ x = x.strip()
+ hidden_users.append(x)
+
+ if self.system:
+ if self.name not in hidden_users:
+ cmd = ['defaults', 'write', plist_file, 'HiddenUsersList', '-array-add', self.name]
+ (rc, out, err) = self.execute_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Cannot user "%s" to hidden user list.' % self.name, err=err, out=out, rc=rc)
+ return 0
+ else:
+ if self.name in hidden_users:
+ del (hidden_users[hidden_users.index(self.name)])
+
+ cmd = ['defaults', 'write', plist_file, 'HiddenUsersList', '-array'] + hidden_users
+ (rc, out, err) = self.execute_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Cannot remove user "%s" from hidden user list.' % self.name, err=err, out=out, rc=rc)
+ return 0
+
+ def user_exists(self):
+ '''Check is SELF.NAME is a known user on the system.'''
+ cmd = self._get_dscl()
+ cmd += ['-list', '/Users/%s' % self.name]
+ (rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
+ return rc == 0
+
+ def remove_user(self):
+ '''Delete SELF.NAME. If SELF.FORCE is true, remove its home directory.'''
+ info = self.user_info()
+
+ cmd = self._get_dscl()
+ cmd += ['-delete', '/Users/%s' % self.name]
+ (rc, out, err) = self.execute_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg='Cannot delete user "%s".' % self.name, err=err, out=out, rc=rc)
+
+ if self.force:
+ if os.path.exists(info[5]):
+ shutil.rmtree(info[5])
+ out += "Removed %s" % info[5]
+
+ return (rc, out, err)
+
+ def create_user(self, command_name='dscl'):
+ cmd = self._get_dscl()
+ cmd += ['-create', '/Users/%s' % self.name]
+ (rc, out, err) = self.execute_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Cannot create user "%s".' % self.name, err=err, out=out, rc=rc)
+
+ self._make_group_numerical()
+ if self.uid is None:
+ self.uid = str(self._get_next_uid(self.system))
+
+ # Homedir is not created by default
+ if self.create_home:
+ if self.home is None:
+ self.home = '/Users/%s' % self.name
+ if not self.module.check_mode:
+ if not os.path.exists(self.home):
+ os.makedirs(self.home)
+ self.chown_homedir(int(self.uid), int(self.group), self.home)
+
+ # dscl sets shell to /usr/bin/false when UserShell is not specified
+ # so set the shell to /bin/bash when the user is not a system user
+ if not self.system and self.shell is None:
+ self.shell = '/bin/bash'
+
+ for field in self.fields:
+ if field[0] in self.__dict__ and self.__dict__[field[0]]:
+
+ cmd = self._get_dscl()
+ cmd += ['-create', '/Users/%s' % self.name, field[1], self.__dict__[field[0]]]
+ (rc, _out, _err) = self.execute_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Cannot add property "%s" to user "%s".' % (field[0], self.name), err=err, out=out, rc=rc)
+
+ out += _out
+ err += _err
+ if rc != 0:
+ return (rc, _out, _err)
+
+ (rc, _out, _err) = self._change_user_password()
+ out += _out
+ err += _err
+
+ self._update_system_user()
+ # here we don't care about change status since it is a creation,
+ # thus changed is always true.
+ if self.groups:
+ (rc, _out, _err, changed) = self._modify_group()
+ out += _out
+ err += _err
+ return (rc, out, err)
+
+ def modify_user(self):
+ changed = None
+ out = ''
+ err = ''
+
+ if self.group:
+ self._make_group_numerical()
+
+ for field in self.fields:
+ if field[0] in self.__dict__ and self.__dict__[field[0]]:
+ current = self._get_user_property(field[1])
+ if current is None or current != to_text(self.__dict__[field[0]]):
+ cmd = self._get_dscl()
+ cmd += ['-create', '/Users/%s' % self.name, field[1], self.__dict__[field[0]]]
+ (rc, _out, _err) = self.execute_command(cmd)
+ if rc != 0:
+ self.module.fail_json(
+ msg='Cannot update property "%s" for user "%s".'
+ % (field[0], self.name), err=err, out=out, rc=rc)
+ changed = rc
+ out += _out
+ err += _err
+ if self.update_password == 'always' and self.password is not None:
+ (rc, _out, _err) = self._change_user_password()
+ out += _out
+ err += _err
+ changed = rc
+
+ if self.groups:
+ (rc, _out, _err, _changed) = self._modify_group()
+ out += _out
+ err += _err
+
+ if _changed is True:
+ changed = rc
+
+ rc = self._update_system_user()
+ if rc == 0:
+ changed = rc
+
+ return (changed, out, err)
+
+
+class AIX(User):
+ """
+ This is a AIX User manipulation class.
+
+ This overrides the following methods from the generic class:-
+ - create_user()
+ - remove_user()
+ - modify_user()
+ - parse_shadow_file()
+ """
+
+ platform = 'AIX'
+ distribution = None
+ SHADOWFILE = '/etc/security/passwd'
+
+ def remove_user(self):
+ cmd = [self.module.get_bin_path('userdel', True)]
+ if self.remove:
+ cmd.append('-r')
+ cmd.append(self.name)
+
+ return self.execute_command(cmd)
+
+ def create_user_useradd(self, command_name='useradd'):
+ cmd = [self.module.get_bin_path(command_name, True)]
+
+ if self.uid is not None:
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None and len(self.groups):
+ groups = self.get_groups_set()
+ cmd.append('-G')
+ cmd.append(','.join(groups))
+
+ if self.comment is not None:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None:
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.shell is not None:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.create_home:
+ cmd.append('-m')
+
+ if self.skeleton is not None:
+ cmd.append('-k')
+ cmd.append(self.skeleton)
+
+ cmd.append(self.name)
+ (rc, out, err) = self.execute_command(cmd)
+
+ # set password with chpasswd
+ if self.password is not None:
+ cmd = []
+ cmd.append(self.module.get_bin_path('chpasswd', True))
+ cmd.append('-e')
+ cmd.append('-c')
+ self.execute_command(cmd, data="%s:%s" % (self.name, self.password))
+
+ return (rc, out, err)
+
+ def modify_user_usermod(self):
+ cmd = [self.module.get_bin_path('usermod', True)]
+ info = self.user_info()
+
+ if self.uid is not None and info[2] != int(self.uid):
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ ginfo = self.group_info(self.group)
+ if info[3] != ginfo[2]:
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None:
+ current_groups = self.user_group_membership()
+ groups_need_mod = False
+ groups = []
+
+ if self.groups == '':
+ if current_groups and not self.append:
+ groups_need_mod = True
+ else:
+ groups = self.get_groups_set()
+ group_diff = set(current_groups).symmetric_difference(groups)
+
+ if group_diff:
+ if self.append:
+ for g in groups:
+ if g in group_diff:
+ groups_need_mod = True
+ break
+ else:
+ groups_need_mod = True
+
+ if groups_need_mod:
+ cmd.append('-G')
+ cmd.append(','.join(groups))
+
+ if self.comment is not None and info[4] != self.comment:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None and info[5] != self.home:
+ if self.move_home:
+ cmd.append('-m')
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.shell is not None and info[6] != self.shell:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ # skip if no changes to be made
+ if len(cmd) == 1:
+ (rc, out, err) = (None, '', '')
+ else:
+ cmd.append(self.name)
+ (rc, out, err) = self.execute_command(cmd)
+
+ # set password with chpasswd
+ if self.update_password == 'always' and self.password is not None and info[1] != self.password:
+ cmd = []
+ cmd.append(self.module.get_bin_path('chpasswd', True))
+ cmd.append('-e')
+ cmd.append('-c')
+ (rc2, out2, err2) = self.execute_command(cmd, data="%s:%s" % (self.name, self.password))
+ else:
+ (rc2, out2, err2) = (None, '', '')
+
+ if rc is not None:
+ return (rc, out + out2, err + err2)
+ else:
+ return (rc2, out + out2, err + err2)
+
+ def parse_shadow_file(self):
+ """Example AIX shadowfile data:
+ nobody:
+ password = *
+
+ operator1:
+ password = {ssha512}06$xxxxxxxxxxxx....
+ lastupdate = 1549558094
+
+ test1:
+ password = *
+ lastupdate = 1553695126
+
+ """
+
+ b_name = to_bytes(self.name)
+ b_passwd = b''
+ b_expires = b''
+ if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK):
+ with open(self.SHADOWFILE, 'rb') as bf:
+ b_lines = bf.readlines()
+
+ b_passwd_line = b''
+ b_expires_line = b''
+ try:
+ for index, b_line in enumerate(b_lines):
+ # Get password and lastupdate lines which come after the username
+ if b_line.startswith(b'%s:' % b_name):
+ b_passwd_line = b_lines[index + 1]
+ b_expires_line = b_lines[index + 2]
+ break
+
+ # Sanity check the lines because sometimes both are not present
+ if b' = ' in b_passwd_line:
+ b_passwd = b_passwd_line.split(b' = ', 1)[-1].strip()
+
+ if b' = ' in b_expires_line:
+ b_expires = b_expires_line.split(b' = ', 1)[-1].strip()
+
+ except IndexError:
+ self.module.fail_json(msg='Failed to parse shadow file %s' % self.SHADOWFILE)
+
+ passwd = to_native(b_passwd)
+ expires = to_native(b_expires) or -1
+ return passwd, expires
+
+
+class HPUX(User):
+ """
+ This is a HP-UX User manipulation class.
+
+ This overrides the following methods from the generic class:-
+ - create_user()
+ - remove_user()
+ - modify_user()
+ """
+
+ platform = 'HP-UX'
+ distribution = None
+ SHADOWFILE = '/etc/shadow'
+
+ def create_user(self):
+ cmd = ['/usr/sam/lbin/useradd.sam']
+
+ if self.uid is not None:
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None and len(self.groups):
+ groups = self.get_groups_set()
+ cmd.append('-G')
+ cmd.append(','.join(groups))
+
+ if self.comment is not None:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None:
+ cmd.append('-d')
+ cmd.append(self.home)
+
+ if self.shell is not None:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.password is not None:
+ cmd.append('-p')
+ cmd.append(self.password)
+
+ if self.create_home:
+ cmd.append('-m')
+ else:
+ cmd.append('-M')
+
+ if self.system:
+ cmd.append('-r')
+
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def remove_user(self):
+ cmd = ['/usr/sam/lbin/userdel.sam']
+ if self.force:
+ cmd.append('-F')
+ if self.remove:
+ cmd.append('-r')
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+ def modify_user(self):
+ cmd = ['/usr/sam/lbin/usermod.sam']
+ info = self.user_info()
+
+ if self.uid is not None and info[2] != int(self.uid):
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.non_unique:
+ cmd.append('-o')
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg="Group %s does not exist" % self.group)
+ ginfo = self.group_info(self.group)
+ if info[3] != ginfo[2]:
+ cmd.append('-g')
+ cmd.append(self.group)
+
+ if self.groups is not None:
+ current_groups = self.user_group_membership()
+ groups_need_mod = False
+ groups = []
+
+ if self.groups == '':
+ if current_groups and not self.append:
+ groups_need_mod = True
+ else:
+ groups = self.get_groups_set(remove_existing=False)
+ group_diff = set(current_groups).symmetric_difference(groups)
+
+ if group_diff:
+ if self.append:
+ for g in groups:
+ if g in group_diff:
+ groups_need_mod = True
+ break
+ else:
+ groups_need_mod = True
+
+ if groups_need_mod:
+ cmd.append('-G')
+ new_groups = groups
+ if self.append:
+ new_groups = groups | set(current_groups)
+ cmd.append(','.join(new_groups))
+
+ if self.comment is not None and info[4] != self.comment:
+ cmd.append('-c')
+ cmd.append(self.comment)
+
+ if self.home is not None and info[5] != self.home:
+ cmd.append('-d')
+ cmd.append(self.home)
+ if self.move_home:
+ cmd.append('-m')
+
+ if self.shell is not None and info[6] != self.shell:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if self.update_password == 'always' and self.password is not None and info[1] != self.password:
+ cmd.append('-F')
+ cmd.append('-p')
+ cmd.append(self.password)
+
+ # skip if no changes to be made
+ if len(cmd) == 1:
+ return (None, '', '')
+
+ cmd.append(self.name)
+ return self.execute_command(cmd)
+
+
+class BusyBox(User):
+ """
+ This is the BusyBox class for use on systems that have adduser, deluser,
+ and delgroup commands. It overrides the following methods:
+ - create_user()
+ - remove_user()
+ - modify_user()
+ """
+
+ def create_user(self):
+ cmd = [self.module.get_bin_path('adduser', True)]
+
+ cmd.append('-D')
+
+ if self.uid is not None:
+ cmd.append('-u')
+ cmd.append(self.uid)
+
+ if self.group is not None:
+ if not self.group_exists(self.group):
+ self.module.fail_json(msg='Group {0} does not exist'.format(self.group))
+ cmd.append('-G')
+ cmd.append(self.group)
+
+ if self.comment is not None:
+ cmd.append('-g')
+ cmd.append(self.comment)
+
+ if self.home is not None:
+ cmd.append('-h')
+ cmd.append(self.home)
+
+ if self.shell is not None:
+ cmd.append('-s')
+ cmd.append(self.shell)
+
+ if not self.create_home:
+ cmd.append('-H')
+
+ if self.skeleton is not None:
+ cmd.append('-k')
+ cmd.append(self.skeleton)
+
+ if self.system:
+ cmd.append('-S')
+
+ cmd.append(self.name)
+
+ rc, out, err = self.execute_command(cmd)
+
+ if rc is not None and rc != 0:
+ self.module.fail_json(name=self.name, msg=err, rc=rc)
+
+ if self.password is not None:
+ cmd = [self.module.get_bin_path('chpasswd', True)]
+ cmd.append('--encrypted')
+ data = '{name}:{password}'.format(name=self.name, password=self.password)
+ rc, out, err = self.execute_command(cmd, data=data)
+
+ if rc is not None and rc != 0:
+ self.module.fail_json(name=self.name, msg=err, rc=rc)
+
+ # Add to additional groups
+ if self.groups is not None and len(self.groups):
+ groups = self.get_groups_set()
+ add_cmd_bin = self.module.get_bin_path('adduser', True)
+ for group in groups:
+ cmd = [add_cmd_bin, self.name, group]
+ rc, out, err = self.execute_command(cmd)
+ if rc is not None and rc != 0:
+ self.module.fail_json(name=self.name, msg=err, rc=rc)
+
+ return rc, out, err
+
+ def remove_user(self):
+
+ cmd = [
+ self.module.get_bin_path('deluser', True),
+ self.name
+ ]
+
+ if self.remove:
+ cmd.append('--remove-home')
+
+ return self.execute_command(cmd)
+
+ def modify_user(self):
+ current_groups = self.user_group_membership()
+ groups = []
+ rc = None
+ out = ''
+ err = ''
+ info = self.user_info()
+ add_cmd_bin = self.module.get_bin_path('adduser', True)
+ remove_cmd_bin = self.module.get_bin_path('delgroup', True)
+
+ # Manage group membership
+ if self.groups is not None and len(self.groups):
+ groups = self.get_groups_set()
+ group_diff = set(current_groups).symmetric_difference(groups)
+
+ if group_diff:
+ for g in groups:
+ if g in group_diff:
+ add_cmd = [add_cmd_bin, self.name, g]
+ rc, out, err = self.execute_command(add_cmd)
+ if rc is not None and rc != 0:
+ self.module.fail_json(name=self.name, msg=err, rc=rc)
+
+ for g in group_diff:
+ if g not in groups and not self.append:
+ remove_cmd = [remove_cmd_bin, self.name, g]
+ rc, out, err = self.execute_command(remove_cmd)
+ if rc is not None and rc != 0:
+ self.module.fail_json(name=self.name, msg=err, rc=rc)
+
+ # Manage password
+ if self.update_password == 'always' and self.password is not None and info[1] != self.password:
+ cmd = [self.module.get_bin_path('chpasswd', True)]
+ cmd.append('--encrypted')
+ data = '{name}:{password}'.format(name=self.name, password=self.password)
+ rc, out, err = self.execute_command(cmd, data=data)
+
+ if rc is not None and rc != 0:
+ self.module.fail_json(name=self.name, msg=err, rc=rc)
+
+ return rc, out, err
+
+
+class Alpine(BusyBox):
+ """
+ This is the Alpine User manipulation class. It inherits the BusyBox class
+ behaviors such as using adduser and deluser commands.
+ """
+ platform = 'Linux'
+ distribution = 'Alpine'
+
+
+def main():
+ ssh_defaults = dict(
+ bits=0,
+ type='rsa',
+ passphrase=None,
+ comment='ansible-generated on %s' % socket.gethostname()
+ )
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ name=dict(type='str', required=True, aliases=['user']),
+ uid=dict(type='int'),
+ non_unique=dict(type='bool', default=False),
+ group=dict(type='str'),
+ groups=dict(type='list'),
+ comment=dict(type='str'),
+ home=dict(type='path'),
+ shell=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ login_class=dict(type='str'),
+ # following options are specific to macOS
+ hidden=dict(type='bool'),
+ # following options are specific to selinux
+ seuser=dict(type='str'),
+ # following options are specific to userdel
+ force=dict(type='bool', default=False),
+ remove=dict(type='bool', default=False),
+ # following options are specific to useradd
+ create_home=dict(type='bool', default=True, aliases=['createhome']),
+ skeleton=dict(type='str'),
+ system=dict(type='bool', default=False),
+ # following options are specific to usermod
+ move_home=dict(type='bool', default=False),
+ append=dict(type='bool', default=False),
+ # following are specific to ssh key generation
+ generate_ssh_key=dict(type='bool'),
+ ssh_key_bits=dict(type='int', default=ssh_defaults['bits']),
+ ssh_key_type=dict(type='str', default=ssh_defaults['type']),
+ ssh_key_file=dict(type='path'),
+ ssh_key_comment=dict(type='str', default=ssh_defaults['comment']),
+ ssh_key_passphrase=dict(type='str', no_log=True),
+ update_password=dict(type='str', default='always', choices=['always', 'on_create'], no_log=False),
+ expires=dict(type='float'),
+ password_lock=dict(type='bool', no_log=False),
+ local=dict(type='bool'),
+ profile=dict(type='str'),
+ authorization=dict(type='str'),
+ role=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ user = User(module)
+ user.check_password_encrypted()
+
+ module.debug('User instantiated - platform %s' % user.platform)
+ if user.distribution:
+ module.debug('User instantiated - distribution %s' % user.distribution)
+
+ rc = None
+ out = ''
+ err = ''
+ result = {}
+ result['name'] = user.name
+ result['state'] = user.state
+ if user.state == 'absent':
+ if user.user_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = user.remove_user()
+ if rc != 0:
+ module.fail_json(name=user.name, msg=err, rc=rc)
+ result['force'] = user.force
+ result['remove'] = user.remove
+ elif user.state == 'present':
+ if not user.user_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ # Check to see if the provided home path contains parent directories
+ # that do not exist.
+ path_needs_parents = False
+ if user.home and user.create_home:
+ parent = os.path.dirname(user.home)
+ if not os.path.isdir(parent):
+ path_needs_parents = True
+
+ (rc, out, err) = user.create_user()
+
+ # If the home path had parent directories that needed to be created,
+ # make sure file permissions are correct in the created home directory.
+ if path_needs_parents:
+ info = user.user_info()
+ if info is not False:
+ user.chown_homedir(info[2], info[3], user.home)
+
+ if module.check_mode:
+ result['system'] = user.name
+ else:
+ result['system'] = user.system
+ result['create_home'] = user.create_home
+ else:
+ # modify user (note: this function is check mode aware)
+ (rc, out, err) = user.modify_user()
+ result['append'] = user.append
+ result['move_home'] = user.move_home
+ if rc is not None and rc != 0:
+ module.fail_json(name=user.name, msg=err, rc=rc)
+ if user.password is not None:
+ result['password'] = 'NOT_LOGGING_PASSWORD'
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ if user.user_exists() and user.state == 'present':
+ info = user.user_info()
+ if info is False:
+ result['msg'] = "failed to look up user name: %s" % user.name
+ result['failed'] = True
+ result['uid'] = info[2]
+ result['group'] = info[3]
+ result['comment'] = info[4]
+ result['home'] = info[5]
+ result['shell'] = info[6]
+ if user.groups is not None:
+ result['groups'] = user.groups
+
+ # handle missing homedirs
+ info = user.user_info()
+ if user.home is None:
+ user.home = info[5]
+ if not os.path.exists(user.home) and user.create_home:
+ if not module.check_mode:
+ user.create_homedir(user.home)
+ user.chown_homedir(info[2], info[3], user.home)
+ result['changed'] = True
+
+ # deal with ssh key
+ if user.sshkeygen:
+ # generate ssh key (note: this function is check mode aware)
+ (rc, out, err) = user.ssh_key_gen()
+ if rc is not None and rc != 0:
+ module.fail_json(name=user.name, msg=err, rc=rc)
+ if rc == 0:
+ result['changed'] = True
+ (rc, out, err) = user.ssh_key_fingerprint()
+ if rc == 0:
+ result['ssh_fingerprint'] = out.strip()
+ else:
+ result['ssh_fingerprint'] = err.strip()
+ result['ssh_key_file'] = user.get_ssh_key_path()
+ result['ssh_public_key'] = user.get_ssh_public_key()
+
+ module.exit_json(**result)
+
+
+# import module snippets
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/wait_for.py b/lib/ansible/modules/wait_for.py
new file mode 100644
index 00000000..b855030c
--- /dev/null
+++ b/lib/ansible/modules/wait_for.py
@@ -0,0 +1,669 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Jeroen Hoekx <jeroen@hoekx.be>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: wait_for
+short_description: Waits for a condition before continuing
+description:
+ - You can wait for a set amount of time C(timeout), this is the default if nothing is specified or just C(timeout) is specified.
+ This does not produce an error.
+ - Waiting for a port to become available is useful for when services are not immediately available after their init scripts return
+ which is true of certain Java application servers.
+ - It is also useful when starting guests with the M(community.libvirt.virt) module and needing to pause until they are ready.
+ - This module can also be used to wait for a regex match a string to be present in a file.
+ - In Ansible 1.6 and later, this module can also be used to wait for a file to be available or
+ absent on the filesystem.
+ - In Ansible 1.8 and later, this module can also be used to wait for active connections to be closed before continuing, useful if a node
+ is being rotated out of a load balancer pool.
+ - For Windows targets, use the M(ansible.windows.win_wait_for) module instead.
+version_added: "0.7"
+options:
+ host:
+ description:
+ - A resolvable hostname or IP address to wait for.
+ type: str
+ default: 127.0.0.1
+ timeout:
+ description:
+ - Maximum number of seconds to wait for, when used with another condition it will force an error.
+ - When used without other conditions it is equivalent of just sleeping.
+ type: int
+ default: 300
+ connect_timeout:
+ description:
+ - Maximum number of seconds to wait for a connection to happen before closing and retrying.
+ type: int
+ default: 5
+ delay:
+ description:
+ - Number of seconds to wait before starting to poll.
+ type: int
+ default: 0
+ port:
+ description:
+ - Port number to poll.
+ - C(path) and C(port) are mutually exclusive parameters.
+ type: int
+ active_connection_states:
+ description:
+ - The list of TCP connection states which are counted as active connections.
+ type: list
+ default: [ ESTABLISHED, FIN_WAIT1, FIN_WAIT2, SYN_RECV, SYN_SENT, TIME_WAIT ]
+ version_added: "2.3"
+ state:
+ description:
+ - Either C(present), C(started), or C(stopped), C(absent), or C(drained).
+ - When checking a port C(started) will ensure the port is open, C(stopped) will check that it is closed, C(drained) will check for active connections.
+ - When checking for a file or a search string C(present) or C(started) will ensure that the file or string is present before continuing,
+ C(absent) will check that file is absent or removed.
+ type: str
+ choices: [ absent, drained, present, started, stopped ]
+ default: started
+ path:
+ description:
+ - Path to a file on the filesystem that must exist before continuing.
+ - C(path) and C(port) are mutually exclusive parameters.
+ type: path
+ version_added: "1.4"
+ search_regex:
+ description:
+ - Can be used to match a string in either a file or a socket connection.
+ - Defaults to a multiline regex.
+ type: str
+ version_added: "1.4"
+ exclude_hosts:
+ description:
+ - List of hosts or IPs to ignore when looking for active TCP connections for C(drained) state.
+ type: list
+ version_added: "1.8"
+ sleep:
+ description:
+ - Number of seconds to sleep between checks.
+ - Before Ansible 2.3 this was hardcoded to 1 second.
+ type: int
+ default: 1
+ version_added: "2.3"
+ msg:
+ description:
+ - This overrides the normal error message from a failure to meet the required conditions.
+ type: str
+ version_added: "2.4"
+notes:
+ - The ability to use search_regex with a port connection was added in Ansible 1.7.
+ - Prior to Ansible 2.4, testing for the absence of a directory or UNIX socket did not work correctly.
+ - Prior to Ansible 2.4, testing for the presence of a file did not work correctly if the remote user did not have read access to that file.
+ - Under some circumstances when using mandatory access control, a path may always be treated as being absent even if it exists, but
+ can't be modified or created by the remote user either.
+ - When waiting for a path, symbolic links will be followed. Many other modules that manipulate files do not follow symbolic links,
+ so operations on the path using other modules may not work exactly as expected.
+seealso:
+- module: ansible.builtin.wait_for_connection
+- module: ansible.windows.win_wait_for
+- module: community.windows.win_wait_for_process
+author:
+ - Jeroen Hoekx (@jhoekx)
+ - John Jarvis (@jarv)
+ - Andrii Radyk (@AnderEnder)
+'''
+
+EXAMPLES = r'''
+- name: Sleep for 300 seconds and continue with play
+ wait_for:
+ timeout: 300
+ delegate_to: localhost
+
+- name: Wait for port 8000 to become open on the host, don't start checking for 10 seconds
+ wait_for:
+ port: 8000
+ delay: 10
+
+- name: Waits for port 8000 of any IP to close active connections, don't start checking for 10 seconds
+ wait_for:
+ host: 0.0.0.0
+ port: 8000
+ delay: 10
+ state: drained
+
+- name: Wait for port 8000 of any IP to close active connections, ignoring connections for specified hosts
+ wait_for:
+ host: 0.0.0.0
+ port: 8000
+ state: drained
+ exclude_hosts: 10.2.1.2,10.2.1.3
+
+- name: Wait until the file /tmp/foo is present before continuing
+ wait_for:
+ path: /tmp/foo
+
+- name: Wait until the string "completed" is in the file /tmp/foo before continuing
+ wait_for:
+ path: /tmp/foo
+ search_regex: completed
+
+- name: Wait until regex pattern matches in the file /tmp/foo and print the matched group
+ wait_for:
+ path: /tmp/foo
+ search_regex: completed (?P<task>\w+)
+ register: waitfor
+- debug:
+ msg: Completed {{ waitfor['match_groupdict']['task'] }}
+
+- name: Wait until the lock file is removed
+ wait_for:
+ path: /var/lock/file.lock
+ state: absent
+
+- name: Wait until the process is finished and pid was destroyed
+ wait_for:
+ path: /proc/3466/status
+ state: absent
+
+- name: Output customized message when failed
+ wait_for:
+ path: /tmp/foo
+ state: present
+ msg: Timeout to find file /tmp/foo
+
+# Do not assume the inventory_hostname is resolvable and delay 10 seconds at start
+- name: Wait 300 seconds for port 22 to become open and contain "OpenSSH"
+ wait_for:
+ port: 22
+ host: '{{ (ansible_ssh_host|default(ansible_host))|default(inventory_hostname) }}'
+ search_regex: OpenSSH
+ delay: 10
+ connection: local
+
+# Same as above but you normally have ansible_connection set in inventory, which overrides 'connection'
+- name: Wait 300 seconds for port 22 to become open and contain "OpenSSH"
+ wait_for:
+ port: 22
+ host: '{{ (ansible_ssh_host|default(ansible_host))|default(inventory_hostname) }}'
+ search_regex: OpenSSH
+ delay: 10
+ vars:
+ ansible_connection: local
+'''
+
+RETURN = r'''
+elapsed:
+ description: The number of seconds that elapsed while waiting
+ returned: always
+ type: int
+ sample: 23
+match_groups:
+ description: Tuple containing all the subgroups of the match as returned by U(https://docs.python.org/2/library/re.html#re.MatchObject.groups)
+ returned: always
+ type: list
+ sample: ['match 1', 'match 2']
+match_groupdict:
+ description: Dictionary containing all the named subgroups of the match, keyed by the subgroup name,
+ as returned by U(https://docs.python.org/2/library/re.html#re.MatchObject.groupdict)
+ returned: always
+ type: dict
+ sample:
+ {
+ 'group': 'match'
+ }
+'''
+
+import binascii
+import datetime
+import errno
+import math
+import os
+import re
+import select
+import socket
+import time
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.sys_info import get_platform_subclass
+from ansible.module_utils._text import to_native
+
+
+HAS_PSUTIL = False
+PSUTIL_IMP_ERR = None
+try:
+ import psutil
+ HAS_PSUTIL = True
+ # just because we can import it on Linux doesn't mean we will use it
+except ImportError:
+ PSUTIL_IMP_ERR = traceback.format_exc()
+
+
+class TCPConnectionInfo(object):
+ """
+ This is a generic TCP Connection Info strategy class that relies
+ on the psutil module, which is not ideal for targets, but necessary
+ for cross platform support.
+
+ A subclass may wish to override some or all of these methods.
+ - _get_exclude_ips()
+ - get_active_connections()
+
+ All subclasses MUST define platform and distribution (which may be None).
+ """
+ platform = 'Generic'
+ distribution = None
+
+ match_all_ips = {
+ socket.AF_INET: '0.0.0.0',
+ socket.AF_INET6: '::',
+ }
+ ipv4_mapped_ipv6_address = {
+ 'prefix': '::ffff',
+ 'match_all': '::ffff:0.0.0.0'
+ }
+
+ def __new__(cls, *args, **kwargs):
+ new_cls = get_platform_subclass(TCPConnectionInfo)
+ return super(cls, new_cls).__new__(new_cls)
+
+ def __init__(self, module):
+ self.module = module
+ self.ips = _convert_host_to_ip(module.params['host'])
+ self.port = int(self.module.params['port'])
+ self.exclude_ips = self._get_exclude_ips()
+ if not HAS_PSUTIL:
+ module.fail_json(msg=missing_required_lib('psutil'), exception=PSUTIL_IMP_ERR)
+
+ def _get_exclude_ips(self):
+ exclude_hosts = self.module.params['exclude_hosts']
+ exclude_ips = []
+ if exclude_hosts is not None:
+ for host in exclude_hosts:
+ exclude_ips.extend(_convert_host_to_ip(host))
+ return exclude_ips
+
+ def get_active_connections_count(self):
+ active_connections = 0
+ for p in psutil.process_iter():
+ try:
+ if hasattr(p, 'get_connections'):
+ connections = p.get_connections(kind='inet')
+ else:
+ connections = p.connections(kind='inet')
+ except psutil.Error:
+ # Process is Zombie or other error state
+ continue
+ for conn in connections:
+ if conn.status not in self.module.params['active_connection_states']:
+ continue
+ if hasattr(conn, 'local_address'):
+ (local_ip, local_port) = conn.local_address
+ else:
+ (local_ip, local_port) = conn.laddr
+ if self.port != local_port:
+ continue
+ if hasattr(conn, 'remote_address'):
+ (remote_ip, remote_port) = conn.remote_address
+ else:
+ (remote_ip, remote_port) = conn.raddr
+ if (conn.family, remote_ip) in self.exclude_ips:
+ continue
+ if any((
+ (conn.family, local_ip) in self.ips,
+ (conn.family, self.match_all_ips[conn.family]) in self.ips,
+ local_ip.startswith(self.ipv4_mapped_ipv6_address['prefix']) and
+ (conn.family, self.ipv4_mapped_ipv6_address['match_all']) in self.ips,
+ )):
+ active_connections += 1
+ return active_connections
+
+
+# ===========================================
+# Subclass: Linux
+
+class LinuxTCPConnectionInfo(TCPConnectionInfo):
+ """
+ This is a TCP Connection Info evaluation strategy class
+ that utilizes information from Linux's procfs. While less universal,
+ does allow Linux targets to not require an additional library.
+ """
+ platform = 'Linux'
+ distribution = None
+
+ source_file = {
+ socket.AF_INET: '/proc/net/tcp',
+ socket.AF_INET6: '/proc/net/tcp6'
+ }
+ match_all_ips = {
+ socket.AF_INET: '00000000',
+ socket.AF_INET6: '00000000000000000000000000000000',
+ }
+ ipv4_mapped_ipv6_address = {
+ 'prefix': '0000000000000000FFFF0000',
+ 'match_all': '0000000000000000FFFF000000000000'
+ }
+ local_address_field = 1
+ remote_address_field = 2
+ connection_state_field = 3
+
+ def __init__(self, module):
+ self.module = module
+ self.ips = _convert_host_to_hex(module.params['host'])
+ self.port = "%0.4X" % int(module.params['port'])
+ self.exclude_ips = self._get_exclude_ips()
+
+ def _get_exclude_ips(self):
+ exclude_hosts = self.module.params['exclude_hosts']
+ exclude_ips = []
+ if exclude_hosts is not None:
+ for host in exclude_hosts:
+ exclude_ips.extend(_convert_host_to_hex(host))
+ return exclude_ips
+
+ def get_active_connections_count(self):
+ active_connections = 0
+ for family in self.source_file.keys():
+ if not os.path.isfile(self.source_file[family]):
+ continue
+ f = open(self.source_file[family])
+ for tcp_connection in f.readlines():
+ tcp_connection = tcp_connection.strip().split()
+ if tcp_connection[self.local_address_field] == 'local_address':
+ continue
+ if (tcp_connection[self.connection_state_field] not in
+ [get_connection_state_id(_connection_state) for _connection_state in self.module.params['active_connection_states']]):
+ continue
+ (local_ip, local_port) = tcp_connection[self.local_address_field].split(':')
+ if self.port != local_port:
+ continue
+ (remote_ip, remote_port) = tcp_connection[self.remote_address_field].split(':')
+ if (family, remote_ip) in self.exclude_ips:
+ continue
+ if any((
+ (family, local_ip) in self.ips,
+ (family, self.match_all_ips[family]) in self.ips,
+ local_ip.startswith(self.ipv4_mapped_ipv6_address['prefix']) and
+ (family, self.ipv4_mapped_ipv6_address['match_all']) in self.ips,
+ )):
+ active_connections += 1
+ f.close()
+ return active_connections
+
+
+def _convert_host_to_ip(host):
+ """
+ Perform forward DNS resolution on host, IP will give the same IP
+
+ Args:
+ host: String with either hostname, IPv4, or IPv6 address
+
+ Returns:
+ List of tuples containing address family and IP
+ """
+ addrinfo = socket.getaddrinfo(host, 80, 0, 0, socket.SOL_TCP)
+ ips = []
+ for family, socktype, proto, canonname, sockaddr in addrinfo:
+ ip = sockaddr[0]
+ ips.append((family, ip))
+ if family == socket.AF_INET:
+ ips.append((socket.AF_INET6, "::ffff:" + ip))
+ return ips
+
+
+def _convert_host_to_hex(host):
+ """
+ Convert the provided host to the format in /proc/net/tcp*
+
+ /proc/net/tcp uses little-endian four byte hex for ipv4
+ /proc/net/tcp6 uses little-endian per 4B word for ipv6
+
+ Args:
+ host: String with either hostname, IPv4, or IPv6 address
+
+ Returns:
+ List of tuples containing address family and the
+ little-endian converted host
+ """
+ ips = []
+ if host is not None:
+ for family, ip in _convert_host_to_ip(host):
+ hexip_nf = binascii.b2a_hex(socket.inet_pton(family, ip))
+ hexip_hf = ""
+ for i in range(0, len(hexip_nf), 8):
+ ipgroup_nf = hexip_nf[i:i + 8]
+ ipgroup_hf = socket.ntohl(int(ipgroup_nf, base=16))
+ hexip_hf = "%s%08X" % (hexip_hf, ipgroup_hf)
+ ips.append((family, hexip_hf))
+ return ips
+
+
+def _timedelta_total_seconds(timedelta):
+ return (
+ timedelta.microseconds + 0.0 +
+ (timedelta.seconds + timedelta.days * 24 * 3600) * 10 ** 6) / 10 ** 6
+
+
+def get_connection_state_id(state):
+ connection_state_id = {
+ 'ESTABLISHED': '01',
+ 'SYN_SENT': '02',
+ 'SYN_RECV': '03',
+ 'FIN_WAIT1': '04',
+ 'FIN_WAIT2': '05',
+ 'TIME_WAIT': '06',
+ }
+ return connection_state_id[state]
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', default='127.0.0.1'),
+ timeout=dict(type='int', default=300),
+ connect_timeout=dict(type='int', default=5),
+ delay=dict(type='int', default=0),
+ port=dict(type='int'),
+ active_connection_states=dict(type='list', default=['ESTABLISHED', 'FIN_WAIT1', 'FIN_WAIT2', 'SYN_RECV', 'SYN_SENT', 'TIME_WAIT']),
+ path=dict(type='path'),
+ search_regex=dict(type='str'),
+ state=dict(type='str', default='started', choices=['absent', 'drained', 'present', 'started', 'stopped']),
+ exclude_hosts=dict(type='list'),
+ sleep=dict(type='int', default=1),
+ msg=dict(type='str'),
+ ),
+ )
+
+ host = module.params['host']
+ timeout = module.params['timeout']
+ connect_timeout = module.params['connect_timeout']
+ delay = module.params['delay']
+ port = module.params['port']
+ state = module.params['state']
+ path = module.params['path']
+ search_regex = module.params['search_regex']
+ msg = module.params['msg']
+
+ if search_regex is not None:
+ compiled_search_re = re.compile(search_regex, re.MULTILINE)
+ else:
+ compiled_search_re = None
+
+ match_groupdict = {}
+ match_groups = ()
+
+ if port and path:
+ module.fail_json(msg="port and path parameter can not both be passed to wait_for", elapsed=0)
+ if path and state == 'stopped':
+ module.fail_json(msg="state=stopped should only be used for checking a port in the wait_for module", elapsed=0)
+ if path and state == 'drained':
+ module.fail_json(msg="state=drained should only be used for checking a port in the wait_for module", elapsed=0)
+ if module.params['exclude_hosts'] is not None and state != 'drained':
+ module.fail_json(msg="exclude_hosts should only be with state=drained", elapsed=0)
+ for _connection_state in module.params['active_connection_states']:
+ try:
+ get_connection_state_id(_connection_state)
+ except Exception:
+ module.fail_json(msg="unknown active_connection_state (%s) defined" % _connection_state, elapsed=0)
+
+ start = datetime.datetime.utcnow()
+
+ if delay:
+ time.sleep(delay)
+
+ if not port and not path and state != 'drained':
+ time.sleep(timeout)
+ elif state in ['absent', 'stopped']:
+ # first wait for the stop condition
+ end = start + datetime.timedelta(seconds=timeout)
+
+ while datetime.datetime.utcnow() < end:
+ if path:
+ try:
+ if not os.access(path, os.F_OK):
+ break
+ except IOError:
+ break
+ elif port:
+ try:
+ s = socket.create_connection((host, port), connect_timeout)
+ s.shutdown(socket.SHUT_RDWR)
+ s.close()
+ except Exception:
+ break
+ # Conditions not yet met, wait and try again
+ time.sleep(module.params['sleep'])
+ else:
+ elapsed = datetime.datetime.utcnow() - start
+ if port:
+ module.fail_json(msg=msg or "Timeout when waiting for %s:%s to stop." % (host, port), elapsed=elapsed.seconds)
+ elif path:
+ module.fail_json(msg=msg or "Timeout when waiting for %s to be absent." % (path), elapsed=elapsed.seconds)
+
+ elif state in ['started', 'present']:
+ # wait for start condition
+ end = start + datetime.timedelta(seconds=timeout)
+ while datetime.datetime.utcnow() < end:
+ if path:
+ try:
+ os.stat(path)
+ except OSError as e:
+ # If anything except file not present, throw an error
+ if e.errno != 2:
+ elapsed = datetime.datetime.utcnow() - start
+ module.fail_json(msg=msg or "Failed to stat %s, %s" % (path, e.strerror), elapsed=elapsed.seconds)
+ # file doesn't exist yet, so continue
+ else:
+ # File exists. Are there additional things to check?
+ if not compiled_search_re:
+ # nope, succeed!
+ break
+ try:
+ f = open(path)
+ try:
+ search = re.search(compiled_search_re, f.read())
+ if search:
+ if search.groupdict():
+ match_groupdict = search.groupdict()
+ if search.groups():
+ match_groups = search.groups()
+
+ break
+ finally:
+ f.close()
+ except IOError:
+ pass
+ elif port:
+ alt_connect_timeout = math.ceil(_timedelta_total_seconds(end - datetime.datetime.utcnow()))
+ try:
+ s = socket.create_connection((host, port), min(connect_timeout, alt_connect_timeout))
+ except Exception:
+ # Failed to connect by connect_timeout. wait and try again
+ pass
+ else:
+ # Connected -- are there additional conditions?
+ if compiled_search_re:
+ data = ''
+ matched = False
+ while datetime.datetime.utcnow() < end:
+ max_timeout = math.ceil(_timedelta_total_seconds(end - datetime.datetime.utcnow()))
+ (readable, w, e) = select.select([s], [], [], max_timeout)
+ if not readable:
+ # No new data. Probably means our timeout
+ # expired
+ continue
+ response = s.recv(1024)
+ if not response:
+ # Server shutdown
+ break
+ data += to_native(response, errors='surrogate_or_strict')
+ if re.search(compiled_search_re, data):
+ matched = True
+ break
+
+ # Shutdown the client socket
+ try:
+ s.shutdown(socket.SHUT_RDWR)
+ except socket.error as e:
+ if e.errno != errno.ENOTCONN:
+ raise
+ # else, the server broke the connection on its end, assume it's not ready
+ else:
+ s.close()
+ if matched:
+ # Found our string, success!
+ break
+ else:
+ # Connection established, success!
+ try:
+ s.shutdown(socket.SHUT_RDWR)
+ except socket.error as e:
+ if e.errno != errno.ENOTCONN:
+ raise
+ # else, the server broke the connection on its end, assume it's not ready
+ else:
+ s.close()
+ break
+
+ # Conditions not yet met, wait and try again
+ time.sleep(module.params['sleep'])
+
+ else: # while-else
+ # Timeout expired
+ elapsed = datetime.datetime.utcnow() - start
+ if port:
+ if search_regex:
+ module.fail_json(msg=msg or "Timeout when waiting for search string %s in %s:%s" % (search_regex, host, port), elapsed=elapsed.seconds)
+ else:
+ module.fail_json(msg=msg or "Timeout when waiting for %s:%s" % (host, port), elapsed=elapsed.seconds)
+ elif path:
+ if search_regex:
+ module.fail_json(msg=msg or "Timeout when waiting for search string %s in %s" % (search_regex, path), elapsed=elapsed.seconds)
+ else:
+ module.fail_json(msg=msg or "Timeout when waiting for file %s" % (path), elapsed=elapsed.seconds)
+
+ elif state == 'drained':
+ # wait until all active connections are gone
+ end = start + datetime.timedelta(seconds=timeout)
+ tcpconns = TCPConnectionInfo(module)
+ while datetime.datetime.utcnow() < end:
+ try:
+ if tcpconns.get_active_connections_count() == 0:
+ break
+ except IOError:
+ pass
+ # Conditions not yet met, wait and try again
+ time.sleep(module.params['sleep'])
+ else:
+ elapsed = datetime.datetime.utcnow() - start
+ module.fail_json(msg=msg or "Timeout when waiting for %s:%s to drain" % (host, port), elapsed=elapsed.seconds)
+
+ elapsed = datetime.datetime.utcnow() - start
+ module.exit_json(state=state, port=port, search_regex=search_regex, match_groups=match_groups, match_groupdict=match_groupdict, path=path,
+ elapsed=elapsed.seconds)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/wait_for_connection.py b/lib/ansible/modules/wait_for_connection.py
new file mode 100644
index 00000000..cbabebf1
--- /dev/null
+++ b/lib/ansible/modules/wait_for_connection.py
@@ -0,0 +1,107 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: wait_for_connection
+short_description: Waits until remote system is reachable/usable
+description:
+- Waits for a total of C(timeout) seconds.
+- Retries the transport connection after a timeout of C(connect_timeout).
+- Tests the transport connection every C(sleep) seconds.
+- This module makes use of internal ansible transport (and configuration) and the ping/win_ping module to guarantee correct end-to-end functioning.
+- This module is also supported for Windows targets.
+version_added: '2.3'
+options:
+ connect_timeout:
+ description:
+ - Maximum number of seconds to wait for a connection to happen before closing and retrying.
+ type: int
+ default: 5
+ delay:
+ description:
+ - Number of seconds to wait before starting to poll.
+ type: int
+ default: 0
+ sleep:
+ description:
+ - Number of seconds to sleep between checks.
+ type: int
+ default: 1
+ timeout:
+ description:
+ - Maximum number of seconds to wait for.
+ type: int
+ default: 600
+notes:
+- This module is also supported for Windows targets.
+seealso:
+- module: ansible.builtin.wait_for
+- module: ansible.windows.win_wait_for
+- module: community.windows.win_wait_for_process
+author:
+- Dag Wieers (@dagwieers)
+'''
+
+EXAMPLES = r'''
+- name: Wait 600 seconds for target connection to become reachable/usable
+ wait_for_connection:
+
+- name: Wait 300 seconds, but only start checking after 60 seconds
+ wait_for_connection:
+ delay: 60
+ timeout: 300
+
+# Wake desktops, wait for them to become ready and continue playbook
+- hosts: all
+ gather_facts: no
+ tasks:
+ - name: Send magic Wake-On-Lan packet to turn on individual systems
+ wakeonlan:
+ mac: '{{ mac }}'
+ broadcast: 192.168.0.255
+ delegate_to: localhost
+
+ - name: Wait for system to become reachable
+ wait_for_connection:
+
+ - name: Gather facts for first time
+ setup:
+
+# Build a new VM, wait for it to become ready and continue playbook
+- hosts: all
+ gather_facts: no
+ tasks:
+ - name: Clone new VM, if missing
+ vmware_guest:
+ hostname: '{{ vcenter_ipaddress }}'
+ name: '{{ inventory_hostname_short }}'
+ template: Windows 2012R2
+ customization:
+ hostname: '{{ vm_shortname }}'
+ runonce:
+ - powershell.exe -ExecutionPolicy Unrestricted -File C:\Windows\Temp\ConfigureRemotingForAnsible.ps1 -ForceNewSSLCert -EnableCredSSP
+ delegate_to: localhost
+
+ - name: Wait for system to become reachable over WinRM
+ wait_for_connection:
+ timeout: 900
+
+ - name: Gather facts for first time
+ setup:
+'''
+
+RETURN = r'''
+elapsed:
+ description: The number of seconds that elapsed waiting for the connection to appear.
+ returned: always
+ type: float
+ sample: 23.1
+'''
diff --git a/lib/ansible/modules/yum.py b/lib/ansible/modules/yum.py
new file mode 100644
index 00000000..d63fd52a
--- /dev/null
+++ b/lib/ansible/modules/yum.py
@@ -0,0 +1,1698 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Red Hat, Inc
+# Written by Seth Vidal <skvidal at fedoraproject.org>
+# Copyright: (c) 2014, Epic Games, Inc.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: yum
+version_added: historical
+short_description: Manages packages with the I(yum) package manager
+description:
+ - Installs, upgrade, downgrades, removes, and lists packages and groups with the I(yum) package manager.
+ - This module only works on Python 2. If you require Python 3 support see the M(ansible.builtin.dnf) module.
+options:
+ use_backend:
+ description:
+ - This module supports C(yum) (as it always has), this is known as C(yum3)/C(YUM3)/C(yum-deprecated) by
+ upstream yum developers. As of Ansible 2.7+, this module also supports C(YUM4), which is the
+ "new yum" and it has an C(dnf) backend.
+ - By default, this module will select the backend based on the C(ansible_pkg_mgr) fact.
+ default: "auto"
+ choices: [ auto, yum, yum4, dnf ]
+ version_added: "2.7"
+ name:
+ description:
+ - A package name or package specifier with version, like C(name-1.0).
+ - If a previous version is specified, the task also needs to turn C(allow_downgrade) on.
+ See the C(allow_downgrade) documentation for caveats with downgrading packages.
+ - When using state=latest, this can be C('*') which means run C(yum -y update).
+ - You can also pass a url or a local path to a rpm file (using state=present).
+ To operate on several packages this can accept a comma separated string of packages or (as of 2.0) a list of packages.
+ aliases: [ pkg ]
+ type: list
+ elements: str
+ exclude:
+ description:
+ - Package name(s) to exclude when state=present, or latest
+ version_added: "2.0"
+ list:
+ description:
+ - "Package name to run the equivalent of yum list --show-duplicates <package> against. In addition to listing packages,
+ use can also list the following: C(installed), C(updates), C(available) and C(repos)."
+ - This parameter is mutually exclusive with C(name).
+ state:
+ description:
+ - Whether to install (C(present) or C(installed), C(latest)), or remove (C(absent) or C(removed)) a package.
+ - C(present) and C(installed) will simply ensure that a desired package is installed.
+ - C(latest) will update the specified package if it's not of the latest available version.
+ - C(absent) and C(removed) will remove the specified package.
+ - Default is C(None), however in effect the default action is C(present) unless the C(autoremove) option is
+ enabled for this module, then C(absent) is inferred.
+ choices: [ absent, installed, latest, present, removed ]
+ enablerepo:
+ description:
+ - I(Repoid) of repositories to enable for the install/update operation.
+ These repos will not persist beyond the transaction.
+ When specifying multiple repos, separate them with a C(",").
+ - As of Ansible 2.7, this can alternatively be a list instead of C(",")
+ separated string
+ version_added: "0.9"
+ disablerepo:
+ description:
+ - I(Repoid) of repositories to disable for the install/update operation.
+ These repos will not persist beyond the transaction.
+ When specifying multiple repos, separate them with a C(",").
+ - As of Ansible 2.7, this can alternatively be a list instead of C(",")
+ separated string
+ version_added: "0.9"
+ conf_file:
+ description:
+ - The remote yum configuration file to use for the transaction.
+ version_added: "0.6"
+ disable_gpg_check:
+ description:
+ - Whether to disable the GPG checking of signatures of packages being
+ installed. Has an effect only if state is I(present) or I(latest).
+ type: bool
+ default: "no"
+ version_added: "1.2"
+ skip_broken:
+ description:
+ - Skip packages with broken dependencies(devsolve) and are causing problems.
+ type: bool
+ default: "no"
+ version_added: "2.3"
+ update_cache:
+ description:
+ - Force yum to check if cache is out of date and redownload if needed.
+ Has an effect only if state is I(present) or I(latest).
+ type: bool
+ default: "no"
+ aliases: [ expire-cache ]
+ version_added: "1.9"
+ validate_certs:
+ description:
+ - This only applies if using a https url as the source of the rpm. e.g. for localinstall. If set to C(no), the SSL certificates will not be validated.
+ - This should only set to C(no) used on personally controlled sites using self-signed certificates as it avoids verifying the source site.
+ - Prior to 2.1 the code worked as if this was set to C(yes).
+ type: bool
+ default: "yes"
+ version_added: "2.1"
+
+ update_only:
+ description:
+ - When using latest, only update installed packages. Do not install packages.
+ - Has an effect only if state is I(latest)
+ default: "no"
+ type: bool
+ version_added: "2.5"
+
+ installroot:
+ description:
+ - Specifies an alternative installroot, relative to which all packages
+ will be installed.
+ default: "/"
+ version_added: "2.3"
+ security:
+ description:
+ - If set to C(yes), and C(state=latest) then only installs updates that have been marked security related.
+ type: bool
+ default: "no"
+ version_added: "2.4"
+ bugfix:
+ description:
+ - If set to C(yes), and C(state=latest) then only installs updates that have been marked bugfix related.
+ default: "no"
+ version_added: "2.6"
+ allow_downgrade:
+ description:
+ - Specify if the named package and version is allowed to downgrade
+ a maybe already installed higher version of that package.
+ Note that setting allow_downgrade=True can make this module
+ behave in a non-idempotent way. The task could end up with a set
+ of packages that does not match the complete list of specified
+ packages to install (because dependencies between the downgraded
+ package and others can cause changes to the packages which were
+ in the earlier transaction).
+ type: bool
+ default: "no"
+ version_added: "2.4"
+ enable_plugin:
+ description:
+ - I(Plugin) name to enable for the install/update operation.
+ The enabled plugin will not persist beyond the transaction.
+ version_added: "2.5"
+ disable_plugin:
+ description:
+ - I(Plugin) name to disable for the install/update operation.
+ The disabled plugins will not persist beyond the transaction.
+ version_added: "2.5"
+ releasever:
+ description:
+ - Specifies an alternative release from which all packages will be
+ installed.
+ version_added: "2.7"
+ autoremove:
+ description:
+ - If C(yes), removes all "leaf" packages from the system that were originally
+ installed as dependencies of user-installed packages but which are no longer
+ required by any such package. Should be used alone or when state is I(absent)
+ - "NOTE: This feature requires yum >= 3.4.3 (RHEL/CentOS 7+)"
+ type: bool
+ default: "no"
+ version_added: "2.7"
+ disable_excludes:
+ description:
+ - Disable the excludes defined in YUM config files.
+ - If set to C(all), disables all excludes.
+ - If set to C(main), disable excludes defined in [main] in yum.conf.
+ - If set to C(repoid), disable excludes defined for given repo id.
+ version_added: "2.7"
+ download_only:
+ description:
+ - Only download the packages, do not install them.
+ default: "no"
+ type: bool
+ version_added: "2.7"
+ lock_timeout:
+ description:
+ - Amount of time to wait for the yum lockfile to be freed.
+ required: false
+ default: 30
+ type: int
+ version_added: "2.8"
+ install_weak_deps:
+ description:
+ - Will also install all packages linked by a weak dependency relation.
+ - "NOTE: This feature requires yum >= 4 (RHEL/CentOS 8+)"
+ type: bool
+ default: "yes"
+ version_added: "2.8"
+ download_dir:
+ description:
+ - Specifies an alternate directory to store packages.
+ - Has an effect only if I(download_only) is specified.
+ type: str
+ version_added: "2.8"
+ install_repoquery:
+ description:
+ - If repoquery is not available, install yum-utils. If the system is
+ registered to RHN or an RHN Satellite, repoquery allows for querying
+ all channels assigned to the system. It is also required to use the
+ 'list' parameter.
+ - "NOTE: This will run and be logged as a separate yum transation which
+ takes place before any other installation or removal."
+ - "NOTE: This will use the system's default enabled repositories without
+ regard for disablerepo/enablerepo given to the module."
+ required: false
+ version_added: "1.5"
+ default: "yes"
+ type: bool
+notes:
+ - When used with a `loop:` each package will be processed individually,
+ it is much more efficient to pass the list directly to the `name` option.
+ - In versions prior to 1.9.2 this module installed and removed each package
+ given to the yum module separately. This caused problems when packages
+ specified by filename or url had to be installed or removed together. In
+ 1.9.2 this was fixed so that packages are installed in one yum
+ transaction. However, if one of the packages adds a new yum repository
+ that the other packages come from (such as epel-release) then that package
+ needs to be installed in a separate task. This mimics yum's command line
+ behaviour.
+ - 'Yum itself has two types of groups. "Package groups" are specified in the
+ rpm itself while "environment groups" are specified in a separate file
+ (usually by the distribution). Unfortunately, this division becomes
+ apparent to ansible users because ansible needs to operate on the group
+ of packages in a single transaction and yum requires groups to be specified
+ in different ways when used in that way. Package groups are specified as
+ "@development-tools" and environment groups are "@^gnome-desktop-environment".
+ Use the "yum group list hidden ids" command to see which category of group the group
+ you want to install falls into.'
+ - 'The yum module does not support clearing yum cache in an idempotent way, so it
+ was decided not to implement it, the only method is to use command and call the yum
+ command directly, namely "command: yum clean all"
+ https://github.com/ansible/ansible/pull/31450#issuecomment-352889579'
+# informational: requirements for nodes
+requirements:
+- yum
+author:
+ - Ansible Core Team
+ - Seth Vidal (@skvidal)
+ - Eduard Snesarev (@verm666)
+ - Berend De Schouwer (@berenddeschouwer)
+ - Abhijeet Kasurde (@Akasurde)
+ - Adam Miller (@maxamillion)
+'''
+
+EXAMPLES = '''
+- name: Install the latest version of Apache
+ yum:
+ name: httpd
+ state: latest
+
+- name: Install a list of packages (suitable replacement for 2.11 loop deprecation warning)
+ yum:
+ name:
+ - nginx
+ - postgresql
+ - postgresql-server
+ state: present
+
+- name: Install a list of packages with a list variable
+ yum:
+ name: "{{ packages }}"
+ vars:
+ packages:
+ - httpd
+ - httpd-tools
+
+- name: Remove the Apache package
+ yum:
+ name: httpd
+ state: absent
+
+- name: Install the latest version of Apache from the testing repo
+ yum:
+ name: httpd
+ enablerepo: testing
+ state: present
+
+- name: Install one specific version of Apache
+ yum:
+ name: httpd-2.2.29-1.4.amzn1
+ state: present
+
+- name: Upgrade all packages
+ yum:
+ name: '*'
+ state: latest
+
+- name: Upgrade all packages, excluding kernel & foo related packages
+ yum:
+ name: '*'
+ state: latest
+ exclude: kernel*,foo*
+
+- name: Install the nginx rpm from a remote repo
+ yum:
+ name: http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm
+ state: present
+
+- name: Install nginx rpm from a local file
+ yum:
+ name: /usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm
+ state: present
+
+- name: Install the 'Development tools' package group
+ yum:
+ name: "@Development tools"
+ state: present
+
+- name: Install the 'Gnome desktop' environment group
+ yum:
+ name: "@^gnome-desktop-environment"
+ state: present
+
+- name: List ansible packages and register result to print with debug later
+ yum:
+ list: ansible
+ register: result
+
+- name: Install package with multiple repos enabled
+ yum:
+ name: sos
+ enablerepo: "epel,ol7_latest"
+
+- name: Install package with multiple repos disabled
+ yum:
+ name: sos
+ disablerepo: "epel,ol7_latest"
+
+- name: Download the nginx package but do not install it
+ yum:
+ name:
+ - nginx
+ state: latest
+ download_only: true
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.yumdnf import YumDnf, yumdnf_argument_spec
+
+import errno
+import os
+import re
+import tempfile
+
+try:
+ import rpm
+ HAS_RPM_PYTHON = True
+except ImportError:
+ HAS_RPM_PYTHON = False
+
+try:
+ import yum
+ HAS_YUM_PYTHON = True
+except ImportError:
+ HAS_YUM_PYTHON = False
+
+try:
+ from yum.misc import find_unfinished_transactions, find_ts_remaining
+ from rpmUtils.miscutils import splitFilename, compareEVR
+ transaction_helpers = True
+except ImportError:
+ transaction_helpers = False
+
+from contextlib import contextmanager
+from ansible.module_utils.urls import fetch_file
+
+def_qf = "%{epoch}:%{name}-%{version}-%{release}.%{arch}"
+rpmbin = None
+
+
+class YumModule(YumDnf):
+ """
+ Yum Ansible module back-end implementation
+ """
+
+ def __init__(self, module):
+
+ # state=installed name=pkgspec
+ # state=removed name=pkgspec
+ # state=latest name=pkgspec
+ #
+ # informational commands:
+ # list=installed
+ # list=updates
+ # list=available
+ # list=repos
+ # list=pkgspec
+
+ # This populates instance vars for all argument spec params
+ super(YumModule, self).__init__(module)
+
+ self.pkg_mgr_name = "yum"
+ self.lockfile = '/var/run/yum.pid'
+ self._yum_base = None
+
+ def _enablerepos_with_error_checking(self):
+ # NOTE: This seems unintuitive, but it mirrors yum's CLI behavior
+ if len(self.enablerepo) == 1:
+ try:
+ self.yum_base.repos.enableRepo(self.enablerepo[0])
+ except yum.Errors.YumBaseError as e:
+ if u'repository not found' in to_text(e):
+ self.module.fail_json(msg="Repository %s not found." % self.enablerepo[0])
+ else:
+ raise e
+ else:
+ for rid in self.enablerepo:
+ try:
+ self.yum_base.repos.enableRepo(rid)
+ except yum.Errors.YumBaseError as e:
+ if u'repository not found' in to_text(e):
+ self.module.warn("Repository %s not found." % rid)
+ else:
+ raise e
+
+ def is_lockfile_pid_valid(self):
+ try:
+ try:
+ with open(self.lockfile, 'r') as f:
+ oldpid = int(f.readline())
+ except ValueError:
+ # invalid data
+ os.unlink(self.lockfile)
+ return False
+
+ if oldpid == os.getpid():
+ # that's us?
+ os.unlink(self.lockfile)
+ return False
+
+ try:
+ with open("/proc/%d/stat" % oldpid, 'r') as f:
+ stat = f.readline()
+
+ if stat.split()[2] == 'Z':
+ # Zombie
+ os.unlink(self.lockfile)
+ return False
+ except IOError:
+ # either /proc is not mounted or the process is already dead
+ try:
+ # check the state of the process
+ os.kill(oldpid, 0)
+ except OSError as e:
+ if e.errno == errno.ESRCH:
+ # No such process
+ os.unlink(self.lockfile)
+ return False
+
+ self.module.fail_json(msg="Unable to check PID %s in %s: %s" % (oldpid, self.lockfile, to_native(e)))
+ except (IOError, OSError) as e:
+ # lockfile disappeared?
+ return False
+
+ # another copy seems to be running
+ return True
+
+ @property
+ def yum_base(self):
+ if self._yum_base:
+ return self._yum_base
+ else:
+ # Only init once
+ self._yum_base = yum.YumBase()
+ self._yum_base.preconf.debuglevel = 0
+ self._yum_base.preconf.errorlevel = 0
+ self._yum_base.preconf.plugins = True
+ self._yum_base.preconf.enabled_plugins = self.enable_plugin
+ self._yum_base.preconf.disabled_plugins = self.disable_plugin
+ if self.releasever:
+ self._yum_base.preconf.releasever = self.releasever
+ if self.installroot != '/':
+ # do not setup installroot by default, because of error
+ # CRITICAL:yum.cli:Config Error: Error accessing file for config file:////etc/yum.conf
+ # in old yum version (like in CentOS 6.6)
+ self._yum_base.preconf.root = self.installroot
+ self._yum_base.conf.installroot = self.installroot
+ if self.conf_file and os.path.exists(self.conf_file):
+ self._yum_base.preconf.fn = self.conf_file
+ if os.geteuid() != 0:
+ if hasattr(self._yum_base, 'setCacheDir'):
+ self._yum_base.setCacheDir()
+ else:
+ cachedir = yum.misc.getCacheDir()
+ self._yum_base.repos.setCacheDir(cachedir)
+ self._yum_base.conf.cache = 0
+ if self.disable_excludes:
+ self._yum_base.conf.disable_excludes = self.disable_excludes
+
+ # A sideeffect of accessing conf is that the configuration is
+ # loaded and plugins are discovered
+ self.yum_base.conf
+
+ try:
+ for rid in self.disablerepo:
+ self.yum_base.repos.disableRepo(rid)
+
+ self._enablerepos_with_error_checking()
+
+ except Exception as e:
+ self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
+
+ return self._yum_base
+
+ def po_to_envra(self, po):
+ if hasattr(po, 'ui_envra'):
+ return po.ui_envra
+
+ return '%s:%s-%s-%s.%s' % (po.epoch, po.name, po.version, po.release, po.arch)
+
+ def is_group_env_installed(self, name):
+ name_lower = name.lower()
+
+ if yum.__version_info__ >= (3, 4):
+ groups_list = self.yum_base.doGroupLists(return_evgrps=True)
+ else:
+ groups_list = self.yum_base.doGroupLists()
+
+ # list of the installed groups on the first index
+ groups = groups_list[0]
+ for group in groups:
+ if name_lower.endswith(group.name.lower()) or name_lower.endswith(group.groupid.lower()):
+ return True
+
+ if yum.__version_info__ >= (3, 4):
+ # list of the installed env_groups on the third index
+ envs = groups_list[2]
+ for env in envs:
+ if name_lower.endswith(env.name.lower()) or name_lower.endswith(env.environmentid.lower()):
+ return True
+
+ return False
+
+ def is_installed(self, repoq, pkgspec, qf=None, is_pkg=False):
+ if qf is None:
+ qf = "%{epoch}:%{name}-%{version}-%{release}.%{arch}\n"
+
+ if not repoq:
+ pkgs = []
+ try:
+ e, m, _ = self.yum_base.rpmdb.matchPackageNames([pkgspec])
+ pkgs = e + m
+ if not pkgs and not is_pkg:
+ pkgs.extend(self.yum_base.returnInstalledPackagesByDep(pkgspec))
+ except Exception as e:
+ self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
+
+ return [self.po_to_envra(p) for p in pkgs]
+
+ else:
+ global rpmbin
+ if not rpmbin:
+ rpmbin = self.module.get_bin_path('rpm', required=True)
+
+ cmd = [rpmbin, '-q', '--qf', qf, pkgspec]
+ if self.installroot != '/':
+ cmd.extend(['--root', self.installroot])
+ # rpm localizes messages and we're screen scraping so make sure we use
+ # the C locale
+ lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
+ if rc != 0 and 'is not installed' not in out:
+ self.module.fail_json(msg='Error from rpm: %s: %s' % (cmd, err))
+ if 'is not installed' in out:
+ out = ''
+
+ pkgs = [p for p in out.replace('(none)', '0').split('\n') if p.strip()]
+ if not pkgs and not is_pkg:
+ cmd = [rpmbin, '-q', '--qf', qf, '--whatprovides', pkgspec]
+ if self.installroot != '/':
+ cmd.extend(['--root', self.installroot])
+ rc2, out2, err2 = self.module.run_command(cmd, environ_update=lang_env)
+ else:
+ rc2, out2, err2 = (0, '', '')
+
+ if rc2 != 0 and 'no package provides' not in out2:
+ self.module.fail_json(msg='Error from rpm: %s: %s' % (cmd, err + err2))
+ if 'no package provides' in out2:
+ out2 = ''
+ pkgs += [p for p in out2.replace('(none)', '0').split('\n') if p.strip()]
+ return pkgs
+
+ return []
+
+ def is_available(self, repoq, pkgspec, qf=def_qf):
+ if not repoq:
+
+ pkgs = []
+ try:
+ e, m, _ = self.yum_base.pkgSack.matchPackageNames([pkgspec])
+ pkgs = e + m
+ if not pkgs:
+ pkgs.extend(self.yum_base.returnPackagesByDep(pkgspec))
+ except Exception as e:
+ self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
+
+ return [self.po_to_envra(p) for p in pkgs]
+
+ else:
+ myrepoq = list(repoq)
+
+ r_cmd = ['--disablerepo', ','.join(self.disablerepo)]
+ myrepoq.extend(r_cmd)
+
+ r_cmd = ['--enablerepo', ','.join(self.enablerepo)]
+ myrepoq.extend(r_cmd)
+
+ if self.releasever:
+ myrepoq.extend('--releasever=%s' % self.releasever)
+
+ cmd = myrepoq + ["--qf", qf, pkgspec]
+ rc, out, err = self.module.run_command(cmd)
+ if rc == 0:
+ return [p for p in out.split('\n') if p.strip()]
+ else:
+ self.module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err))
+
+ return []
+
+ def is_update(self, repoq, pkgspec, qf=def_qf):
+ if not repoq:
+
+ pkgs = []
+ updates = []
+
+ try:
+ pkgs = self.yum_base.returnPackagesByDep(pkgspec) + \
+ self.yum_base.returnInstalledPackagesByDep(pkgspec)
+ if not pkgs:
+ e, m, _ = self.yum_base.pkgSack.matchPackageNames([pkgspec])
+ pkgs = e + m
+ updates = self.yum_base.doPackageLists(pkgnarrow='updates').updates
+ except Exception as e:
+ self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
+
+ retpkgs = (pkg for pkg in pkgs if pkg in updates)
+
+ return set(self.po_to_envra(p) for p in retpkgs)
+
+ else:
+ myrepoq = list(repoq)
+ r_cmd = ['--disablerepo', ','.join(self.disablerepo)]
+ myrepoq.extend(r_cmd)
+
+ r_cmd = ['--enablerepo', ','.join(self.enablerepo)]
+ myrepoq.extend(r_cmd)
+
+ if self.releasever:
+ myrepoq.extend('--releasever=%s' % self.releasever)
+
+ cmd = myrepoq + ["--pkgnarrow=updates", "--qf", qf, pkgspec]
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc == 0:
+ return set(p for p in out.split('\n') if p.strip())
+ else:
+ self.module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err))
+
+ return set()
+
+ def what_provides(self, repoq, req_spec, qf=def_qf):
+ if not repoq:
+
+ pkgs = []
+ try:
+ try:
+ pkgs = self.yum_base.returnPackagesByDep(req_spec) + \
+ self.yum_base.returnInstalledPackagesByDep(req_spec)
+ except Exception as e:
+ # If a repo with `repo_gpgcheck=1` is added and the repo GPG
+ # key was never accepted, querying this repo will throw an
+ # error: 'repomd.xml signature could not be verified'. In that
+ # situation we need to run `yum -y makecache` which will accept
+ # the key and try again.
+ if 'repomd.xml signature could not be verified' in to_native(e):
+ if self.releasever:
+ self.module.run_command(self.yum_basecmd + ['makecache'] + ['--releasever=%s' % self.releasever])
+ else:
+ self.module.run_command(self.yum_basecmd + ['makecache'])
+ pkgs = self.yum_base.returnPackagesByDep(req_spec) + \
+ self.yum_base.returnInstalledPackagesByDep(req_spec)
+ else:
+ raise
+ if not pkgs:
+ e, m, _ = self.yum_base.pkgSack.matchPackageNames([req_spec])
+ pkgs.extend(e)
+ pkgs.extend(m)
+ e, m, _ = self.yum_base.rpmdb.matchPackageNames([req_spec])
+ pkgs.extend(e)
+ pkgs.extend(m)
+ except Exception as e:
+ self.module.fail_json(msg="Failure talking to yum: %s" % to_native(e))
+
+ return set(self.po_to_envra(p) for p in pkgs)
+
+ else:
+ myrepoq = list(repoq)
+ r_cmd = ['--disablerepo', ','.join(self.disablerepo)]
+ myrepoq.extend(r_cmd)
+
+ r_cmd = ['--enablerepo', ','.join(self.enablerepo)]
+ myrepoq.extend(r_cmd)
+
+ if self.releasever:
+ myrepoq.extend('--releasever=%s' % self.releasever)
+
+ cmd = myrepoq + ["--qf", qf, "--whatprovides", req_spec]
+ rc, out, err = self.module.run_command(cmd)
+ cmd = myrepoq + ["--qf", qf, req_spec]
+ rc2, out2, err2 = self.module.run_command(cmd)
+ if rc == 0 and rc2 == 0:
+ out += out2
+ pkgs = set([p for p in out.split('\n') if p.strip()])
+ if not pkgs:
+ pkgs = self.is_installed(repoq, req_spec, qf=qf)
+ return pkgs
+ else:
+ self.module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2))
+
+ return set()
+
+ def transaction_exists(self, pkglist):
+ """
+ checks the package list to see if any packages are
+ involved in an incomplete transaction
+ """
+
+ conflicts = []
+ if not transaction_helpers:
+ return conflicts
+
+ # first, we create a list of the package 'nvreas'
+ # so we can compare the pieces later more easily
+ pkglist_nvreas = (splitFilename(pkg) for pkg in pkglist)
+
+ # next, we build the list of packages that are
+ # contained within an unfinished transaction
+ unfinished_transactions = find_unfinished_transactions()
+ for trans in unfinished_transactions:
+ steps = find_ts_remaining(trans)
+ for step in steps:
+ # the action is install/erase/etc., but we only
+ # care about the package spec contained in the step
+ (action, step_spec) = step
+ (n, v, r, e, a) = splitFilename(step_spec)
+ # and see if that spec is in the list of packages
+ # requested for installation/updating
+ for pkg in pkglist_nvreas:
+ # if the name and arch match, we're going to assume
+ # this package is part of a pending transaction
+ # the label is just for display purposes
+ label = "%s-%s" % (n, a)
+ if n == pkg[0] and a == pkg[4]:
+ if label not in conflicts:
+ conflicts.append("%s-%s" % (n, a))
+ break
+ return conflicts
+
+ def local_envra(self, path):
+ """return envra of a local rpm passed in"""
+
+ ts = rpm.TransactionSet()
+ ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)
+ fd = os.open(path, os.O_RDONLY)
+ try:
+ header = ts.hdrFromFdno(fd)
+ except rpm.error as e:
+ return None
+ finally:
+ os.close(fd)
+
+ return '%s:%s-%s-%s.%s' % (
+ header[rpm.RPMTAG_EPOCH] or '0',
+ header[rpm.RPMTAG_NAME],
+ header[rpm.RPMTAG_VERSION],
+ header[rpm.RPMTAG_RELEASE],
+ header[rpm.RPMTAG_ARCH]
+ )
+
+ @contextmanager
+ def set_env_proxy(self):
+ # setting system proxy environment and saving old, if exists
+ namepass = ""
+ scheme = ["http", "https"]
+ old_proxy_env = [os.getenv("http_proxy"), os.getenv("https_proxy")]
+ try:
+ # "_none_" is a special value to disable proxy in yum.conf/*.repo
+ if self.yum_base.conf.proxy and self.yum_base.conf.proxy not in ("_none_",):
+ if self.yum_base.conf.proxy_username:
+ namepass = namepass + self.yum_base.conf.proxy_username
+ proxy_url = self.yum_base.conf.proxy
+ if self.yum_base.conf.proxy_password:
+ namepass = namepass + ":" + self.yum_base.conf.proxy_password
+ elif '@' in self.yum_base.conf.proxy:
+ namepass = self.yum_base.conf.proxy.split('@')[0].split('//')[-1]
+ proxy_url = self.yum_base.conf.proxy.replace("{0}@".format(namepass), "")
+
+ if namepass:
+ namepass = namepass + '@'
+ for item in scheme:
+ os.environ[item + "_proxy"] = re.sub(
+ r"(http://)",
+ r"\g<1>" + namepass, proxy_url
+ )
+ else:
+ for item in scheme:
+ os.environ[item + "_proxy"] = self.yum_base.conf.proxy
+ yield
+ except yum.Errors.YumBaseError:
+ raise
+ finally:
+ # revert back to previously system configuration
+ for item in scheme:
+ if os.getenv("{0}_proxy".format(item)):
+ del os.environ["{0}_proxy".format(item)]
+ if old_proxy_env[0]:
+ os.environ["http_proxy"] = old_proxy_env[0]
+ if old_proxy_env[1]:
+ os.environ["https_proxy"] = old_proxy_env[1]
+
+ def pkg_to_dict(self, pkgstr):
+ if pkgstr.strip() and pkgstr.count('|') == 5:
+ n, e, v, r, a, repo = pkgstr.split('|')
+ else:
+ return {'error_parsing': pkgstr}
+
+ d = {
+ 'name': n,
+ 'arch': a,
+ 'epoch': e,
+ 'release': r,
+ 'version': v,
+ 'repo': repo,
+ 'envra': '%s:%s-%s-%s.%s' % (e, n, v, r, a)
+ }
+
+ if repo == 'installed':
+ d['yumstate'] = 'installed'
+ else:
+ d['yumstate'] = 'available'
+
+ return d
+
+ def repolist(self, repoq, qf="%{repoid}"):
+ cmd = repoq + ["--qf", qf, "-a"]
+ if self.releasever:
+ cmd.extend(['--releasever=%s' % self.releasever])
+ rc, out, _ = self.module.run_command(cmd)
+ if rc == 0:
+ return set(p for p in out.split('\n') if p.strip())
+ else:
+ return []
+
+ def list_stuff(self, repoquerybin, stuff):
+
+ qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|%{repoid}"
+ # is_installed goes through rpm instead of repoquery so it needs a slightly different format
+ is_installed_qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|installed\n"
+ repoq = [repoquerybin, '--show-duplicates', '--plugins', '--quiet']
+ if self.disablerepo:
+ repoq.extend(['--disablerepo', ','.join(self.disablerepo)])
+ if self.enablerepo:
+ repoq.extend(['--enablerepo', ','.join(self.enablerepo)])
+ if self.installroot != '/':
+ repoq.extend(['--installroot', self.installroot])
+ if self.conf_file and os.path.exists(self.conf_file):
+ repoq += ['-c', self.conf_file]
+
+ if stuff == 'installed':
+ return [self.pkg_to_dict(p) for p in sorted(self.is_installed(repoq, '-a', qf=is_installed_qf)) if p.strip()]
+
+ if stuff == 'updates':
+ return [self.pkg_to_dict(p) for p in sorted(self.is_update(repoq, '-a', qf=qf)) if p.strip()]
+
+ if stuff == 'available':
+ return [self.pkg_to_dict(p) for p in sorted(self.is_available(repoq, '-a', qf=qf)) if p.strip()]
+
+ if stuff == 'repos':
+ return [dict(repoid=name, state='enabled') for name in sorted(self.repolist(repoq)) if name.strip()]
+
+ return [
+ self.pkg_to_dict(p) for p in
+ sorted(self.is_installed(repoq, stuff, qf=is_installed_qf) + self.is_available(repoq, stuff, qf=qf))
+ if p.strip()
+ ]
+
+ def exec_install(self, items, action, pkgs, res):
+ cmd = self.yum_basecmd + [action] + pkgs
+ if self.releasever:
+ cmd.extend(['--releasever=%s' % self.releasever])
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, results=res['results'], changes=dict(installed=pkgs))
+ else:
+ res['changes'] = dict(installed=pkgs)
+
+ lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
+
+ if rc == 1:
+ for spec in items:
+ # Fail on invalid urls:
+ if ('://' in spec and ('No package %s available.' % spec in out or 'Cannot open: %s. Skipping.' % spec in err)):
+ err = 'Package at %s could not be installed' % spec
+ self.module.fail_json(changed=False, msg=err, rc=rc)
+
+ res['rc'] = rc
+ res['results'].append(out)
+ res['msg'] += err
+ res['changed'] = True
+
+ if ('Nothing to do' in out and rc == 0) or ('does not have any packages' in err):
+ res['changed'] = False
+
+ if rc != 0:
+ res['changed'] = False
+ self.module.fail_json(**res)
+
+ # Fail if yum prints 'No space left on device' because that means some
+ # packages failed executing their post install scripts because of lack of
+ # free space (e.g. kernel package couldn't generate initramfs). Note that
+ # yum can still exit with rc=0 even if some post scripts didn't execute
+ # correctly.
+ if 'No space left on device' in (out or err):
+ res['changed'] = False
+ res['msg'] = 'No space left on device'
+ self.module.fail_json(**res)
+
+ # FIXME - if we did an install - go and check the rpmdb to see if it actually installed
+ # look for each pkg in rpmdb
+ # look for each pkg via obsoletes
+
+ return res
+
+ def install(self, items, repoq):
+
+ pkgs = []
+ downgrade_pkgs = []
+ res = {}
+ res['results'] = []
+ res['msg'] = ''
+ res['rc'] = 0
+ res['changed'] = False
+
+ for spec in items:
+ pkg = None
+ downgrade_candidate = False
+
+ # check if pkgspec is installed (if possible for idempotence)
+ if spec.endswith('.rpm') or '://' in spec:
+ if '://' not in spec and not os.path.exists(spec):
+ res['msg'] += "No RPM file matching '%s' found on system" % spec
+ res['results'].append("No RPM file matching '%s' found on system" % spec)
+ res['rc'] = 127 # Ensure the task fails in with-loop
+ self.module.fail_json(**res)
+
+ if '://' in spec:
+ with self.set_env_proxy():
+ package = fetch_file(self.module, spec)
+ if not package.endswith('.rpm'):
+ # yum requires a local file to have the extension of .rpm and we
+ # can not guarantee that from an URL (redirects, proxies, etc)
+ new_package_path = '%s.rpm' % package
+ os.rename(package, new_package_path)
+ package = new_package_path
+ else:
+ package = spec
+
+ # most common case is the pkg is already installed
+ envra = self.local_envra(package)
+ if envra is None:
+ self.module.fail_json(msg="Failed to get nevra information from RPM package: %s" % spec)
+ installed_pkgs = self.is_installed(repoq, envra)
+ if installed_pkgs:
+ res['results'].append('%s providing %s is already installed' % (installed_pkgs[0], package))
+ continue
+
+ (name, ver, rel, epoch, arch) = splitFilename(envra)
+ installed_pkgs = self.is_installed(repoq, name)
+
+ # case for two same envr but different archs like x86_64 and i686
+ if len(installed_pkgs) == 2:
+ (cur_name0, cur_ver0, cur_rel0, cur_epoch0, cur_arch0) = splitFilename(installed_pkgs[0])
+ (cur_name1, cur_ver1, cur_rel1, cur_epoch1, cur_arch1) = splitFilename(installed_pkgs[1])
+ cur_epoch0 = cur_epoch0 or '0'
+ cur_epoch1 = cur_epoch1 or '0'
+ compare = compareEVR((cur_epoch0, cur_ver0, cur_rel0), (cur_epoch1, cur_ver1, cur_rel1))
+ if compare == 0 and cur_arch0 != cur_arch1:
+ for installed_pkg in installed_pkgs:
+ if installed_pkg.endswith(arch):
+ installed_pkgs = [installed_pkg]
+
+ if len(installed_pkgs) == 1:
+ installed_pkg = installed_pkgs[0]
+ (cur_name, cur_ver, cur_rel, cur_epoch, cur_arch) = splitFilename(installed_pkg)
+ cur_epoch = cur_epoch or '0'
+ compare = compareEVR((cur_epoch, cur_ver, cur_rel), (epoch, ver, rel))
+
+ # compare > 0 -> higher version is installed
+ # compare == 0 -> exact version is installed
+ # compare < 0 -> lower version is installed
+ if compare > 0 and self.allow_downgrade:
+ downgrade_candidate = True
+ elif compare >= 0:
+ continue
+
+ # else: if there are more installed packages with the same name, that would mean
+ # kernel, gpg-pubkey or like, so just let yum deal with it and try to install it
+
+ pkg = package
+
+ # groups
+ elif spec.startswith('@'):
+ if self.is_group_env_installed(spec):
+ continue
+
+ pkg = spec
+
+ # range requires or file-requires or pkgname :(
+ else:
+ # most common case is the pkg is already installed and done
+ # short circuit all the bs - and search for it as a pkg in is_installed
+ # if you find it then we're done
+ if not set(['*', '?']).intersection(set(spec)):
+ installed_pkgs = self.is_installed(repoq, spec, is_pkg=True)
+ if installed_pkgs:
+ res['results'].append('%s providing %s is already installed' % (installed_pkgs[0], spec))
+ continue
+
+ # look up what pkgs provide this
+ pkglist = self.what_provides(repoq, spec)
+ if not pkglist:
+ res['msg'] += "No package matching '%s' found available, installed or updated" % spec
+ res['results'].append("No package matching '%s' found available, installed or updated" % spec)
+ res['rc'] = 126 # Ensure the task fails in with-loop
+ self.module.fail_json(**res)
+
+ # if any of the packages are involved in a transaction, fail now
+ # so that we don't hang on the yum operation later
+ conflicts = self.transaction_exists(pkglist)
+ if conflicts:
+ res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
+ res['rc'] = 125 # Ensure the task fails in with-loop
+ self.module.fail_json(**res)
+
+ # if any of them are installed
+ # then nothing to do
+
+ found = False
+ for this in pkglist:
+ if self.is_installed(repoq, this, is_pkg=True):
+ found = True
+ res['results'].append('%s providing %s is already installed' % (this, spec))
+ break
+
+ # if the version of the pkg you have installed is not in ANY repo, but there are
+ # other versions in the repos (both higher and lower) then the previous checks won't work.
+ # so we check one more time. This really only works for pkgname - not for file provides or virt provides
+ # but virt provides should be all caught in what_provides on its own.
+ # highly irritating
+ if not found:
+ if self.is_installed(repoq, spec):
+ found = True
+ res['results'].append('package providing %s is already installed' % (spec))
+
+ if found:
+ continue
+
+ # Downgrade - The yum install command will only install or upgrade to a spec version, it will
+ # not install an older version of an RPM even if specified by the install spec. So we need to
+ # determine if this is a downgrade, and then use the yum downgrade command to install the RPM.
+ if self.allow_downgrade:
+ for package in pkglist:
+ # Get the NEVRA of the requested package using pkglist instead of spec because pkglist
+ # contains consistently-formatted package names returned by yum, rather than user input
+ # that is often not parsed correctly by splitFilename().
+ (name, ver, rel, epoch, arch) = splitFilename(package)
+
+ # Check if any version of the requested package is installed
+ inst_pkgs = self.is_installed(repoq, name, is_pkg=True)
+ if inst_pkgs:
+ (cur_name, cur_ver, cur_rel, cur_epoch, cur_arch) = splitFilename(inst_pkgs[0])
+ compare = compareEVR((cur_epoch, cur_ver, cur_rel), (epoch, ver, rel))
+ if compare > 0:
+ downgrade_candidate = True
+ else:
+ downgrade_candidate = False
+ break
+
+ # If package needs to be installed/upgraded/downgraded, then pass in the spec
+ # we could get here if nothing provides it but that's not
+ # the error we're catching here
+ pkg = spec
+
+ if downgrade_candidate and self.allow_downgrade:
+ downgrade_pkgs.append(pkg)
+ else:
+ pkgs.append(pkg)
+
+ if downgrade_pkgs:
+ res = self.exec_install(items, 'downgrade', downgrade_pkgs, res)
+
+ if pkgs:
+ res = self.exec_install(items, 'install', pkgs, res)
+
+ return res
+
+ def remove(self, items, repoq):
+
+ pkgs = []
+ res = {}
+ res['results'] = []
+ res['msg'] = ''
+ res['changed'] = False
+ res['rc'] = 0
+
+ for pkg in items:
+ if pkg.startswith('@'):
+ installed = self.is_group_env_installed(pkg)
+ else:
+ installed = self.is_installed(repoq, pkg)
+
+ if installed:
+ pkgs.append(pkg)
+ else:
+ res['results'].append('%s is not installed' % pkg)
+
+ if pkgs:
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, results=res['results'], changes=dict(removed=pkgs))
+ else:
+ res['changes'] = dict(removed=pkgs)
+
+ # run an actual yum transaction
+ if self.autoremove:
+ cmd = self.yum_basecmd + ["autoremove"] + pkgs
+ else:
+ cmd = self.yum_basecmd + ["remove"] + pkgs
+ rc, out, err = self.module.run_command(cmd)
+
+ res['rc'] = rc
+ res['results'].append(out)
+ res['msg'] = err
+
+ if rc != 0:
+ if self.autoremove and 'No such command' in out:
+ self.module.fail_json(msg='Version of YUM too old for autoremove: Requires yum 3.4.3 (RHEL/CentOS 7+)')
+ else:
+ self.module.fail_json(**res)
+
+ # compile the results into one batch. If anything is changed
+ # then mark changed
+ # at the end - if we've end up failed then fail out of the rest
+ # of the process
+
+ # at this point we check to see if the pkg is no longer present
+ self._yum_base = None # previous YumBase package index is now invalid
+ for pkg in pkgs:
+ if pkg.startswith('@'):
+ installed = self.is_group_env_installed(pkg)
+ else:
+ installed = self.is_installed(repoq, pkg, is_pkg=True)
+
+ if installed:
+ # Return a message so it's obvious to the user why yum failed
+ # and which package couldn't be removed. More details:
+ # https://github.com/ansible/ansible/issues/35672
+ res['msg'] = "Package '%s' couldn't be removed!" % pkg
+ self.module.fail_json(**res)
+
+ res['changed'] = True
+
+ return res
+
+ def run_check_update(self):
+ # run check-update to see if we have packages pending
+ if self.releasever:
+ rc, out, err = self.module.run_command(self.yum_basecmd + ['check-update'] + ['--releasever=%s' % self.releasever])
+ else:
+ rc, out, err = self.module.run_command(self.yum_basecmd + ['check-update'])
+ return rc, out, err
+
+ @staticmethod
+ def parse_check_update(check_update_output):
+ updates = {}
+ obsoletes = {}
+
+ # remove incorrect new lines in longer columns in output from yum check-update
+ # yum line wrapping can move the repo to the next line
+ #
+ # Meant to filter out sets of lines like:
+ # some_looooooooooooooooooooooooooooooooooooong_package_name 1:1.2.3-1.el7
+ # some-repo-label
+ #
+ # But it also needs to avoid catching lines like:
+ # Loading mirror speeds from cached hostfile
+ #
+ # ceph.x86_64 1:11.2.0-0.el7 ceph
+
+ # preprocess string and filter out empty lines so the regex below works
+ out = re.sub(r'\n[^\w]\W+(.*)', r' \1', check_update_output)
+
+ available_updates = out.split('\n')
+
+ # build update dictionary
+ for line in available_updates:
+ line = line.split()
+ # ignore irrelevant lines
+ # '*' in line matches lines like mirror lists:
+ # * base: mirror.corbina.net
+ # len(line) != 3 or 6 could be junk or a continuation
+ # len(line) = 6 is package obsoletes
+ #
+ # FIXME: what is the '.' not in line conditional for?
+
+ if '*' in line or len(line) not in [3, 6] or '.' not in line[0]:
+ continue
+
+ pkg, version, repo = line[0], line[1], line[2]
+ name, dist = pkg.rsplit('.', 1)
+ updates.update({name: {'version': version, 'dist': dist, 'repo': repo}})
+
+ if len(line) == 6:
+ obsolete_pkg, obsolete_version, obsolete_repo = line[3], line[4], line[5]
+ obsolete_name, obsolete_dist = obsolete_pkg.rsplit('.', 1)
+ obsoletes.update({obsolete_name: {'version': obsolete_version, 'dist': obsolete_dist, 'repo': obsolete_repo}})
+
+ return updates, obsoletes
+
+ def latest(self, items, repoq):
+
+ res = {}
+ res['results'] = []
+ res['msg'] = ''
+ res['changed'] = False
+ res['rc'] = 0
+ pkgs = {}
+ pkgs['update'] = []
+ pkgs['install'] = []
+ updates = {}
+ obsoletes = {}
+ update_all = False
+ cmd = None
+
+ # determine if we're doing an update all
+ if '*' in items:
+ update_all = True
+
+ rc, out, err = self.run_check_update()
+
+ if rc == 0 and update_all:
+ res['results'].append('Nothing to do here, all packages are up to date')
+ return res
+ elif rc == 100:
+ updates, obsoletes = self.parse_check_update(out)
+ elif rc == 1:
+ res['msg'] = err
+ res['rc'] = rc
+ self.module.fail_json(**res)
+
+ if update_all:
+ cmd = self.yum_basecmd + ['update']
+ will_update = set(updates.keys())
+ will_update_from_other_package = dict()
+ else:
+ will_update = set()
+ will_update_from_other_package = dict()
+ for spec in items:
+ # some guess work involved with groups. update @<group> will install the group if missing
+ if spec.startswith('@'):
+ pkgs['update'].append(spec)
+ will_update.add(spec)
+ continue
+
+ # check if pkgspec is installed (if possible for idempotence)
+ # localpkg
+ if spec.endswith('.rpm') and '://' not in spec:
+ if not os.path.exists(spec):
+ res['msg'] += "No RPM file matching '%s' found on system" % spec
+ res['results'].append("No RPM file matching '%s' found on system" % spec)
+ res['rc'] = 127 # Ensure the task fails in with-loop
+ self.module.fail_json(**res)
+
+ # get the pkg e:name-v-r.arch
+ envra = self.local_envra(spec)
+
+ if envra is None:
+ self.module.fail_json(msg="Failed to get nevra information from RPM package: %s" % spec)
+
+ # local rpm files can't be updated
+ if self.is_installed(repoq, envra):
+ pkgs['update'].append(spec)
+ else:
+ pkgs['install'].append(spec)
+ continue
+
+ # URL
+ if '://' in spec:
+ # download package so that we can check if it's already installed
+ with self.set_env_proxy():
+ package = fetch_file(self.module, spec)
+ envra = self.local_envra(package)
+
+ if envra is None:
+ self.module.fail_json(msg="Failed to get nevra information from RPM package: %s" % spec)
+
+ # local rpm files can't be updated
+ if self.is_installed(repoq, envra):
+ pkgs['update'].append(spec)
+ else:
+ pkgs['install'].append(spec)
+ continue
+
+ # dep/pkgname - find it
+ if self.is_installed(repoq, spec):
+ pkgs['update'].append(spec)
+ else:
+ pkgs['install'].append(spec)
+ pkglist = self.what_provides(repoq, spec)
+ # FIXME..? may not be desirable to throw an exception here if a single package is missing
+ if not pkglist:
+ res['msg'] += "No package matching '%s' found available, installed or updated" % spec
+ res['results'].append("No package matching '%s' found available, installed or updated" % spec)
+ res['rc'] = 126 # Ensure the task fails in with-loop
+ self.module.fail_json(**res)
+
+ nothing_to_do = True
+ for pkg in pkglist:
+ if spec in pkgs['install'] and self.is_available(repoq, pkg):
+ nothing_to_do = False
+ break
+
+ # this contains the full NVR and spec could contain wildcards
+ # or virtual provides (like "python-*" or "smtp-daemon") while
+ # updates contains name only.
+ pkgname, _, _, _, _ = splitFilename(pkg)
+ if spec in pkgs['update'] and pkgname in updates:
+ nothing_to_do = False
+ will_update.add(spec)
+ # Massage the updates list
+ if spec != pkgname:
+ # For reporting what packages would be updated more
+ # succinctly
+ will_update_from_other_package[spec] = pkgname
+ break
+
+ if not self.is_installed(repoq, spec) and self.update_only:
+ res['results'].append("Packages providing %s not installed due to update_only specified" % spec)
+ continue
+ if nothing_to_do:
+ res['results'].append("All packages providing %s are up to date" % spec)
+ continue
+
+ # if any of the packages are involved in a transaction, fail now
+ # so that we don't hang on the yum operation later
+ conflicts = self.transaction_exists(pkglist)
+ if conflicts:
+ res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
+ res['results'].append("The following packages have pending transactions: %s" % ", ".join(conflicts))
+ res['rc'] = 128 # Ensure the task fails in with-loop
+ self.module.fail_json(**res)
+
+ # check_mode output
+ to_update = []
+ for w in will_update:
+ if w.startswith('@'):
+ to_update.append((w, None))
+ elif w not in updates:
+ other_pkg = will_update_from_other_package[w]
+ to_update.append(
+ (
+ w,
+ 'because of (at least) %s-%s.%s from %s' % (
+ other_pkg,
+ updates[other_pkg]['version'],
+ updates[other_pkg]['dist'],
+ updates[other_pkg]['repo']
+ )
+ )
+ )
+ else:
+ to_update.append((w, '%s.%s from %s' % (updates[w]['version'], updates[w]['dist'], updates[w]['repo'])))
+
+ if self.update_only:
+ res['changes'] = dict(installed=[], updated=to_update)
+ else:
+ res['changes'] = dict(installed=pkgs['install'], updated=to_update)
+
+ if obsoletes:
+ res['obsoletes'] = obsoletes
+
+ # return results before we actually execute stuff
+ if self.module.check_mode:
+ if will_update or pkgs['install']:
+ res['changed'] = True
+ return res
+
+ if self.releasever:
+ cmd.extend(['--releasever=%s' % self.releasever])
+
+ # run commands
+ if cmd: # update all
+ rc, out, err = self.module.run_command(cmd)
+ res['changed'] = True
+ elif self.update_only:
+ if pkgs['update']:
+ cmd = self.yum_basecmd + ['update'] + pkgs['update']
+ lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
+ out_lower = out.strip().lower()
+ if not out_lower.endswith("no packages marked for update") and \
+ not out_lower.endswith("nothing to do"):
+ res['changed'] = True
+ else:
+ rc, out, err = [0, '', '']
+ elif pkgs['install'] or will_update and not self.update_only:
+ cmd = self.yum_basecmd + ['install'] + pkgs['install'] + pkgs['update']
+ lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ rc, out, err = self.module.run_command(cmd, environ_update=lang_env)
+ out_lower = out.strip().lower()
+ if not out_lower.endswith("no packages marked for update") and \
+ not out_lower.endswith("nothing to do"):
+ res['changed'] = True
+ else:
+ rc, out, err = [0, '', '']
+
+ res['rc'] = rc
+ res['msg'] += err
+ res['results'].append(out)
+
+ if rc:
+ res['failed'] = True
+
+ return res
+
+ def ensure(self, repoq):
+ pkgs = self.names
+
+ # autoremove was provided without `name`
+ if not self.names and self.autoremove:
+ pkgs = []
+ self.state = 'absent'
+
+ if self.conf_file and os.path.exists(self.conf_file):
+ self.yum_basecmd += ['-c', self.conf_file]
+
+ if repoq:
+ repoq += ['-c', self.conf_file]
+
+ if self.skip_broken:
+ self.yum_basecmd.extend(['--skip-broken'])
+
+ if self.disablerepo:
+ self.yum_basecmd.extend(['--disablerepo=%s' % ','.join(self.disablerepo)])
+
+ if self.enablerepo:
+ self.yum_basecmd.extend(['--enablerepo=%s' % ','.join(self.enablerepo)])
+
+ if self.enable_plugin:
+ self.yum_basecmd.extend(['--enableplugin', ','.join(self.enable_plugin)])
+
+ if self.disable_plugin:
+ self.yum_basecmd.extend(['--disableplugin', ','.join(self.disable_plugin)])
+
+ if self.exclude:
+ e_cmd = ['--exclude=%s' % ','.join(self.exclude)]
+ self.yum_basecmd.extend(e_cmd)
+
+ if self.disable_excludes:
+ self.yum_basecmd.extend(['--disableexcludes=%s' % self.disable_excludes])
+
+ if self.download_only:
+ self.yum_basecmd.extend(['--downloadonly'])
+
+ if self.download_dir:
+ self.yum_basecmd.extend(['--downloaddir=%s' % self.download_dir])
+
+ if self.releasever:
+ self.yum_basecmd.extend(['--releasever=%s' % self.releasever])
+
+ if self.installroot != '/':
+ # do not setup installroot by default, because of error
+ # CRITICAL:yum.cli:Config Error: Error accessing file for config file:////etc/yum.conf
+ # in old yum version (like in CentOS 6.6)
+ e_cmd = ['--installroot=%s' % self.installroot]
+ self.yum_basecmd.extend(e_cmd)
+
+ if self.state in ('installed', 'present', 'latest'):
+ """ The need of this entire if conditional has to be changed
+ this function is the ensure function that is called
+ in the main section.
+
+ This conditional tends to disable/enable repo for
+ install present latest action, same actually
+ can be done for remove and absent action
+
+ As solution I would advice to cal
+ try: self.yum_base.repos.disableRepo(disablerepo)
+ and
+ try: self.yum_base.repos.enableRepo(enablerepo)
+ right before any yum_cmd is actually called regardless
+ of yum action.
+
+ Please note that enable/disablerepo options are general
+ options, this means that we can call those with any action
+ option. https://linux.die.net/man/8/yum
+
+ This docstring will be removed together when issue: #21619
+ will be solved.
+
+ This has been triggered by: #19587
+ """
+
+ if self.update_cache:
+ self.module.run_command(self.yum_basecmd + ['clean', 'expire-cache'])
+
+ try:
+ current_repos = self.yum_base.repos.repos.keys()
+ if self.enablerepo:
+ try:
+ new_repos = self.yum_base.repos.repos.keys()
+ for i in new_repos:
+ if i not in current_repos:
+ rid = self.yum_base.repos.getRepo(i)
+ a = rid.repoXML.repoid # nopep8 - https://github.com/ansible/ansible/pull/21475#pullrequestreview-22404868
+ current_repos = new_repos
+ except yum.Errors.YumBaseError as e:
+ self.module.fail_json(msg="Error setting/accessing repos: %s" % to_native(e))
+ except yum.Errors.YumBaseError as e:
+ self.module.fail_json(msg="Error accessing repos: %s" % to_native(e))
+ if self.state == 'latest' or self.update_only:
+ if self.disable_gpg_check:
+ self.yum_basecmd.append('--nogpgcheck')
+ if self.security:
+ self.yum_basecmd.append('--security')
+ if self.bugfix:
+ self.yum_basecmd.append('--bugfix')
+ res = self.latest(pkgs, repoq)
+ elif self.state in ('installed', 'present'):
+ if self.disable_gpg_check:
+ self.yum_basecmd.append('--nogpgcheck')
+ res = self.install(pkgs, repoq)
+ elif self.state in ('removed', 'absent'):
+ res = self.remove(pkgs, repoq)
+ else:
+ # should be caught by AnsibleModule argument_spec
+ self.module.fail_json(
+ msg="we should never get here unless this all failed",
+ changed=False,
+ results='',
+ errors='unexpected state'
+ )
+ return res
+
+ @staticmethod
+ def has_yum():
+ return HAS_YUM_PYTHON
+
+ def run(self):
+ """
+ actually execute the module code backend
+ """
+
+ error_msgs = []
+ if not HAS_RPM_PYTHON:
+ error_msgs.append('The Python 2 bindings for rpm are needed for this module. If you require Python 3 support use the `dnf` Ansible module instead.')
+ if not HAS_YUM_PYTHON:
+ error_msgs.append('The Python 2 yum module is needed for this module. If you require Python 3 support use the `dnf` Ansible module instead.')
+
+ self.wait_for_lock()
+
+ if error_msgs:
+ self.module.fail_json(msg='. '.join(error_msgs))
+
+ # fedora will redirect yum to dnf, which has incompatibilities
+ # with how this module expects yum to operate. If yum-deprecated
+ # is available, use that instead to emulate the old behaviors.
+ if self.module.get_bin_path('yum-deprecated'):
+ yumbin = self.module.get_bin_path('yum-deprecated')
+ else:
+ yumbin = self.module.get_bin_path('yum')
+
+ # need debug level 2 to get 'Nothing to do' for groupinstall.
+ self.yum_basecmd = [yumbin, '-d', '2', '-y']
+
+ if self.update_cache and not self.names and not self.list:
+ rc, stdout, stderr = self.module.run_command(self.yum_basecmd + ['clean', 'expire-cache'])
+ if rc == 0:
+ self.module.exit_json(
+ changed=False,
+ msg="Cache updated",
+ rc=rc,
+ results=[]
+ )
+ else:
+ self.module.exit_json(
+ changed=False,
+ msg="Failed to update cache",
+ rc=rc,
+ results=[stderr],
+ )
+
+ repoquerybin = self.module.get_bin_path('repoquery', required=False)
+
+ if self.install_repoquery and not repoquerybin and not self.module.check_mode:
+ yum_path = self.module.get_bin_path('yum')
+ if yum_path:
+ if self.releasever:
+ self.module.run_command('%s -y install yum-utils --releasever %s' % (yum_path, self.releasever))
+ else:
+ self.module.run_command('%s -y install yum-utils' % yum_path)
+ repoquerybin = self.module.get_bin_path('repoquery', required=False)
+
+ if self.list:
+ if not repoquerybin:
+ self.module.fail_json(msg="repoquery is required to use list= with this module. Please install the yum-utils package.")
+ results = {'results': self.list_stuff(repoquerybin, self.list)}
+ else:
+ # If rhn-plugin is installed and no rhn-certificate is available on
+ # the system then users will see an error message using the yum API.
+ # Use repoquery in those cases.
+
+ repoquery = None
+ try:
+ yum_plugins = self.yum_base.plugins._plugins
+ except AttributeError:
+ pass
+ else:
+ if 'rhnplugin' in yum_plugins:
+ if repoquerybin:
+ repoquery = [repoquerybin, '--show-duplicates', '--plugins', '--quiet']
+ if self.installroot != '/':
+ repoquery.extend(['--installroot', self.installroot])
+
+ if self.disable_excludes:
+ # repoquery does not support --disableexcludes,
+ # so make a temp copy of yum.conf and get rid of the 'exclude=' line there
+ try:
+ with open('/etc/yum.conf', 'r') as f:
+ content = f.readlines()
+
+ tmp_conf_file = tempfile.NamedTemporaryFile(dir=self.module.tmpdir, delete=False)
+ self.module.add_cleanup_file(tmp_conf_file.name)
+
+ tmp_conf_file.writelines([c for c in content if not c.startswith("exclude=")])
+ tmp_conf_file.close()
+ except Exception as e:
+ self.module.fail_json(msg="Failure setting up repoquery: %s" % to_native(e))
+
+ repoquery.extend(['-c', tmp_conf_file.name])
+
+ results = self.ensure(repoquery)
+ if repoquery:
+ results['msg'] = '%s %s' % (
+ results.get('msg', ''),
+ 'Warning: Due to potential bad behaviour with rhnplugin and certificates, used slower repoquery calls instead of Yum API.'
+ )
+
+ self.module.exit_json(**results)
+
+
+def main():
+ # state=installed name=pkgspec
+ # state=removed name=pkgspec
+ # state=latest name=pkgspec
+ #
+ # informational commands:
+ # list=installed
+ # list=updates
+ # list=available
+ # list=repos
+ # list=pkgspec
+
+ yumdnf_argument_spec['argument_spec']['use_backend'] = dict(default='auto', choices=['auto', 'yum', 'yum4', 'dnf'])
+
+ module = AnsibleModule(
+ **yumdnf_argument_spec
+ )
+
+ module_implementation = YumModule(module)
+ module_implementation.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/yum_repository.py b/lib/ansible/modules/yum_repository.py
new file mode 100644
index 00000000..71e67576
--- /dev/null
+++ b/lib/ansible/modules/yum_repository.py
@@ -0,0 +1,679 @@
+#!/usr/bin/python
+# encoding: utf-8
+
+# (c) 2015-2016, Jiri Tyr <jiri.tyr@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: yum_repository
+author: Jiri Tyr (@jtyr)
+version_added: '2.1'
+short_description: Add or remove YUM repositories
+description:
+ - Add or remove YUM repositories in RPM-based Linux distributions.
+ - If you wish to update an existing repository definition use M(community.general.ini_file) instead.
+
+options:
+ async:
+ description:
+ - If set to C(yes) Yum will download packages and metadata from this
+ repo in parallel, if possible.
+ type: bool
+ default: 'yes'
+ bandwidth:
+ description:
+ - Maximum available network bandwidth in bytes/second. Used with the
+ I(throttle) option.
+ - If I(throttle) is a percentage and bandwidth is C(0) then bandwidth
+ throttling will be disabled. If I(throttle) is expressed as a data rate
+ (bytes/sec) then this option is ignored. Default is C(0) (no bandwidth
+ throttling).
+ default: 0
+ baseurl:
+ description:
+ - URL to the directory where the yum repository's 'repodata' directory
+ lives.
+ - It can also be a list of multiple URLs.
+ - This, the I(metalink) or I(mirrorlist) parameters are required if I(state) is set to
+ C(present).
+ cost:
+ description:
+ - Relative cost of accessing this repository. Useful for weighing one
+ repo's packages as greater/less than any other.
+ default: 1000
+ deltarpm_metadata_percentage:
+ description:
+ - When the relative size of deltarpm metadata vs pkgs is larger than
+ this, deltarpm metadata is not downloaded from the repo. Note that you
+ can give values over C(100), so C(200) means that the metadata is
+ required to be half the size of the packages. Use C(0) to turn off
+ this check, and always download metadata.
+ default: 100
+ deltarpm_percentage:
+ description:
+ - When the relative size of delta vs pkg is larger than this, delta is
+ not used. Use C(0) to turn off delta rpm processing. Local repositories
+ (with file:// I(baseurl)) have delta rpms turned off by default.
+ default: 75
+ description:
+ description:
+ - A human readable string describing the repository. This option corresponds to the "name" property in the repo file.
+ - This parameter is only required if I(state) is set to C(present).
+ enabled:
+ description:
+ - This tells yum whether or not use this repository.
+ type: bool
+ default: 'yes'
+ enablegroups:
+ description:
+ - Determines whether yum will allow the use of package groups for this
+ repository.
+ type: bool
+ default: 'yes'
+ exclude:
+ description:
+ - List of packages to exclude from updates or installs. This should be a
+ space separated list. Shell globs using wildcards (eg. C(*) and C(?))
+ are allowed.
+ - The list can also be a regular YAML array.
+ failovermethod:
+ choices: [roundrobin, priority]
+ default: roundrobin
+ description:
+ - C(roundrobin) randomly selects a URL out of the list of URLs to start
+ with and proceeds through each of them as it encounters a failure
+ contacting the host.
+ - C(priority) starts from the first I(baseurl) listed and reads through
+ them sequentially.
+ file:
+ description:
+ - File name without the C(.repo) extension to save the repo in. Defaults
+ to the value of I(name).
+ gpgcakey:
+ description:
+ - A URL pointing to the ASCII-armored CA key file for the repository.
+ gpgcheck:
+ description:
+ - Tells yum whether or not it should perform a GPG signature check on
+ packages.
+ - No default setting. If the value is not set, the system setting from
+ C(/etc/yum.conf) or system default of C(no) will be used.
+ type: bool
+ gpgkey:
+ description:
+ - A URL pointing to the ASCII-armored GPG key file for the repository.
+ - It can also be a list of multiple URLs.
+ http_caching:
+ description:
+ - Determines how upstream HTTP caches are instructed to handle any HTTP
+ downloads that Yum does.
+ - C(all) means that all HTTP downloads should be cached.
+ - C(packages) means that only RPM package downloads should be cached (but
+ not repository metadata downloads).
+ - C(none) means that no HTTP downloads should be cached.
+ choices: [all, packages, none]
+ default: all
+ include:
+ description:
+ - Include external configuration file. Both, local path and URL is
+ supported. Configuration file will be inserted at the position of the
+ I(include=) line. Included files may contain further include lines.
+ Yum will abort with an error if an inclusion loop is detected.
+ includepkgs:
+ description:
+ - List of packages you want to only use from a repository. This should be
+ a space separated list. Shell globs using wildcards (eg. C(*) and C(?))
+ are allowed. Substitution variables (e.g. C($releasever)) are honored
+ here.
+ - The list can also be a regular YAML array.
+ ip_resolve:
+ description:
+ - Determines how yum resolves host names.
+ - C(4) or C(IPv4) - resolve to IPv4 addresses only.
+ - C(6) or C(IPv6) - resolve to IPv6 addresses only.
+ choices: [4, 6, IPv4, IPv6, whatever]
+ default: whatever
+ keepalive:
+ description:
+ - This tells yum whether or not HTTP/1.1 keepalive should be used with
+ this repository. This can improve transfer speeds by using one
+ connection when downloading multiple files from a repository.
+ type: bool
+ default: 'no'
+ keepcache:
+ description:
+ - Either C(1) or C(0). Determines whether or not yum keeps the cache of
+ headers and packages after successful installation.
+ choices: ['0', '1']
+ default: '1'
+ metadata_expire:
+ description:
+ - Time (in seconds) after which the metadata will expire.
+ - Default value is 6 hours.
+ default: 21600
+ metadata_expire_filter:
+ description:
+ - Filter the I(metadata_expire) time, allowing a trade of speed for
+ accuracy if a command doesn't require it. Each yum command can specify
+ that it requires a certain level of timeliness quality from the remote
+ repos. from "I'm about to install/upgrade, so this better be current"
+ to "Anything that's available is good enough".
+ - C(never) - Nothing is filtered, always obey I(metadata_expire).
+ - C(read-only:past) - Commands that only care about past information are
+ filtered from metadata expiring. Eg. I(yum history) info (if history
+ needs to lookup anything about a previous transaction, then by
+ definition the remote package was available in the past).
+ - C(read-only:present) - Commands that are balanced between past and
+ future. Eg. I(yum list yum).
+ - C(read-only:future) - Commands that are likely to result in running
+ other commands which will require the latest metadata. Eg.
+ I(yum check-update).
+ - Note that this option does not override "yum clean expire-cache".
+ choices: [never, 'read-only:past', 'read-only:present', 'read-only:future']
+ default: 'read-only:present'
+ metalink:
+ description:
+ - Specifies a URL to a metalink file for the repomd.xml, a list of
+ mirrors for the entire repository are generated by converting the
+ mirrors for the repomd.xml file to a I(baseurl).
+ - This, the I(baseurl) or I(mirrorlist) parameters are required if I(state) is set to
+ C(present).
+ mirrorlist:
+ description:
+ - Specifies a URL to a file containing a list of baseurls.
+ - This, the I(baseurl) or I(metalink) parameters are required if I(state) is set to
+ C(present).
+ mirrorlist_expire:
+ description:
+ - Time (in seconds) after which the mirrorlist locally cached will
+ expire.
+ - Default value is 6 hours.
+ default: 21600
+ name:
+ description:
+ - Unique repository ID. This option builds the section name of the repository in the repo file.
+ - This parameter is only required if I(state) is set to C(present) or
+ C(absent).
+ required: true
+ password:
+ description:
+ - Password to use with the username for basic authentication.
+ priority:
+ description:
+ - Enforce ordered protection of repositories. The value is an integer
+ from 1 to 99.
+ - This option only works if the YUM Priorities plugin is installed.
+ default: 99
+ protect:
+ description:
+ - Protect packages from updates from other repositories.
+ type: bool
+ default: 'no'
+ proxy:
+ description:
+ - URL to the proxy server that yum should use. Set to C(_none_) to
+ disable the global proxy setting.
+ proxy_password:
+ description:
+ - Password for this proxy.
+ proxy_username:
+ description:
+ - Username to use for proxy.
+ repo_gpgcheck:
+ description:
+ - This tells yum whether or not it should perform a GPG signature check
+ on the repodata from this repository.
+ type: bool
+ default: 'no'
+ reposdir:
+ description:
+ - Directory where the C(.repo) files will be stored.
+ default: /etc/yum.repos.d
+ retries:
+ description:
+ - Set the number of times any attempt to retrieve a file should retry
+ before returning an error. Setting this to C(0) makes yum try forever.
+ default: 10
+ s3_enabled:
+ description:
+ - Enables support for S3 repositories.
+ - This option only works if the YUM S3 plugin is installed.
+ type: bool
+ default: 'no'
+ skip_if_unavailable:
+ description:
+ - If set to C(yes) yum will continue running if this repository cannot be
+ contacted for any reason. This should be set carefully as all repos are
+ consulted for any given command.
+ type: bool
+ default: 'no'
+ ssl_check_cert_permissions:
+ description:
+ - Whether yum should check the permissions on the paths for the
+ certificates on the repository (both remote and local).
+ - If we can't read any of the files then yum will force
+ I(skip_if_unavailable) to be C(yes). This is most useful for non-root
+ processes which use yum on repos that have client cert files which are
+ readable only by root.
+ type: bool
+ default: 'no'
+ sslcacert:
+ description:
+ - Path to the directory containing the databases of the certificate
+ authorities yum should use to verify SSL certificates.
+ aliases: [ ca_cert ]
+ sslclientcert:
+ description:
+ - Path to the SSL client certificate yum should use to connect to
+ repos/remote sites.
+ aliases: [ client_cert ]
+ sslclientkey:
+ description:
+ - Path to the SSL client key yum should use to connect to repos/remote
+ sites.
+ aliases: [ client_key ]
+ sslverify:
+ description:
+ - Defines whether yum should verify SSL certificates/hosts at all.
+ type: bool
+ default: 'yes'
+ aliases: [ validate_certs ]
+ state:
+ description:
+ - State of the repo file.
+ choices: [absent, present]
+ default: present
+ throttle:
+ description:
+ - Enable bandwidth throttling for downloads.
+ - This option can be expressed as a absolute data rate in bytes/sec. An
+ SI prefix (k, M or G) may be appended to the bandwidth value.
+ timeout:
+ description:
+ - Number of seconds to wait for a connection before timing out.
+ default: 30
+ ui_repoid_vars:
+ description:
+ - When a repository id is displayed, append these yum variables to the
+ string if they are used in the I(baseurl)/etc. Variables are appended
+ in the order listed (and found).
+ default: releasever basearch
+ username:
+ description:
+ - Username to use for basic authentication to a repo or really any url.
+
+extends_documentation_fragment:
+ - files
+
+notes:
+ - All comments will be removed if modifying an existing repo file.
+ - Section order is preserved in an existing repo file.
+ - Parameters in a section are ordered alphabetically in an existing repo
+ file.
+ - The repo file will be automatically deleted if it contains no repository.
+ - When removing a repository, beware that the metadata cache may still remain
+ on disk until you run C(yum clean all). Use a notification handler for this.
+ - "The C(params) parameter was removed in Ansible 2.5 due to circumventing Ansible's parameter
+ handling"
+'''
+
+EXAMPLES = '''
+- name: Add repository
+ yum_repository:
+ name: epel
+ description: EPEL YUM repo
+ baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
+
+- name: Add multiple repositories into the same file (1/2)
+ yum_repository:
+ name: epel
+ description: EPEL YUM repo
+ file: external_repos
+ baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
+ gpgcheck: no
+
+- name: Add multiple repositories into the same file (2/2)
+ yum_repository:
+ name: rpmforge
+ description: RPMforge YUM repo
+ file: external_repos
+ baseurl: http://apt.sw.be/redhat/el7/en/$basearch/rpmforge
+ mirrorlist: http://mirrorlist.repoforge.org/el7/mirrors-rpmforge
+ enabled: no
+
+# Handler showing how to clean yum metadata cache
+- name: yum-clean-metadata
+ command: yum clean metadata
+ args:
+ warn: no
+
+# Example removing a repository and cleaning up metadata cache
+- name: Remove repository (and clean up left-over metadata)
+ yum_repository:
+ name: epel
+ state: absent
+ notify: yum-clean-metadata
+
+- name: Remove repository from a specific repo file
+ yum_repository:
+ name: epel
+ file: external_repos
+ state: absent
+'''
+
+RETURN = '''
+repo:
+ description: repository name
+ returned: success
+ type: str
+ sample: "epel"
+state:
+ description: state of the target, after execution
+ returned: success
+ type: str
+ sample: "present"
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import configparser
+from ansible.module_utils._text import to_native
+
+
+class YumRepo(object):
+ # Class global variables
+ module = None
+ params = None
+ section = None
+ repofile = configparser.RawConfigParser()
+
+ # List of parameters which will be allowed in the repo file output
+ allowed_params = [
+ 'async',
+ 'bandwidth',
+ 'baseurl',
+ 'cost',
+ 'deltarpm_metadata_percentage',
+ 'deltarpm_percentage',
+ 'enabled',
+ 'enablegroups',
+ 'exclude',
+ 'failovermethod',
+ 'gpgcakey',
+ 'gpgcheck',
+ 'gpgkey',
+ 'http_caching',
+ 'include',
+ 'includepkgs',
+ 'ip_resolve',
+ 'keepalive',
+ 'keepcache',
+ 'metadata_expire',
+ 'metadata_expire_filter',
+ 'metalink',
+ 'mirrorlist',
+ 'mirrorlist_expire',
+ 'name',
+ 'password',
+ 'priority',
+ 'protect',
+ 'proxy',
+ 'proxy_password',
+ 'proxy_username',
+ 'repo_gpgcheck',
+ 'retries',
+ 's3_enabled',
+ 'skip_if_unavailable',
+ 'sslcacert',
+ 'ssl_check_cert_permissions',
+ 'sslclientcert',
+ 'sslclientkey',
+ 'sslverify',
+ 'throttle',
+ 'timeout',
+ 'ui_repoid_vars',
+ 'username']
+
+ # List of parameters which can be a list
+ list_params = ['exclude', 'includepkgs']
+
+ def __init__(self, module):
+ # To be able to use fail_json
+ self.module = module
+ # Shortcut for the params
+ self.params = self.module.params
+ # Section is always the repoid
+ self.section = self.params['repoid']
+
+ # Check if repo directory exists
+ repos_dir = self.params['reposdir']
+ if not os.path.isdir(repos_dir):
+ self.module.fail_json(
+ msg="Repo directory '%s' does not exist." % repos_dir)
+
+ # Set dest; also used to set dest parameter for the FS attributes
+ self.params['dest'] = os.path.join(
+ repos_dir, "%s.repo" % self.params['file'])
+
+ # Read the repo file if it exists
+ if os.path.isfile(self.params['dest']):
+ self.repofile.read(self.params['dest'])
+
+ def add(self):
+ # Remove already existing repo and create a new one
+ if self.repofile.has_section(self.section):
+ self.repofile.remove_section(self.section)
+
+ # Add section
+ self.repofile.add_section(self.section)
+
+ # Baseurl/mirrorlist is not required because for removal we need only
+ # the repo name. This is why we check if the baseurl/mirrorlist is
+ # defined.
+ req_params = (self.params['baseurl'], self.params['metalink'], self.params['mirrorlist'])
+ if req_params == (None, None, None):
+ self.module.fail_json(
+ msg="Parameter 'baseurl', 'metalink' or 'mirrorlist' is required for "
+ "adding a new repo.")
+
+ # Set options
+ for key, value in sorted(self.params.items()):
+ if key in self.list_params and isinstance(value, list):
+ # Join items into one string for specific parameters
+ value = ' '.join(value)
+ elif isinstance(value, bool):
+ # Convert boolean value to integer
+ value = int(value)
+
+ # Set the value only if it was defined (default is None)
+ if value is not None and key in self.allowed_params:
+ self.repofile.set(self.section, key, value)
+
+ def save(self):
+ if len(self.repofile.sections()):
+ # Write data into the file
+ try:
+ with open(self.params['dest'], 'w') as fd:
+ self.repofile.write(fd)
+ except IOError as e:
+ self.module.fail_json(
+ msg="Problems handling file %s." % self.params['dest'],
+ details=to_native(e))
+ else:
+ # Remove the file if there are not repos
+ try:
+ os.remove(self.params['dest'])
+ except OSError as e:
+ self.module.fail_json(
+ msg=(
+ "Cannot remove empty repo file %s." %
+ self.params['dest']),
+ details=to_native(e))
+
+ def remove(self):
+ # Remove section if exists
+ if self.repofile.has_section(self.section):
+ self.repofile.remove_section(self.section)
+
+ def dump(self):
+ repo_string = ""
+
+ # Compose the repo file
+ for section in sorted(self.repofile.sections()):
+ repo_string += "[%s]\n" % section
+
+ for key, value in sorted(self.repofile.items(section)):
+ repo_string += "%s = %s\n" % (key, value)
+
+ repo_string += "\n"
+
+ return repo_string
+
+
+def main():
+ # Module settings
+ argument_spec = dict(
+ bandwidth=dict(),
+ baseurl=dict(type='list'),
+ cost=dict(),
+ deltarpm_metadata_percentage=dict(),
+ deltarpm_percentage=dict(),
+ description=dict(),
+ enabled=dict(type='bool'),
+ enablegroups=dict(type='bool'),
+ exclude=dict(type='list'),
+ failovermethod=dict(choices=['roundrobin', 'priority']),
+ file=dict(),
+ gpgcakey=dict(),
+ gpgcheck=dict(type='bool'),
+ gpgkey=dict(type='list'),
+ http_caching=dict(choices=['all', 'packages', 'none']),
+ include=dict(),
+ includepkgs=dict(type='list'),
+ ip_resolve=dict(choices=['4', '6', 'IPv4', 'IPv6', 'whatever']),
+ keepalive=dict(type='bool'),
+ keepcache=dict(choices=['0', '1']),
+ metadata_expire=dict(),
+ metadata_expire_filter=dict(
+ choices=[
+ 'never',
+ 'read-only:past',
+ 'read-only:present',
+ 'read-only:future']),
+ metalink=dict(),
+ mirrorlist=dict(),
+ mirrorlist_expire=dict(),
+ name=dict(required=True),
+ params=dict(type='dict'),
+ password=dict(no_log=True),
+ priority=dict(),
+ protect=dict(type='bool'),
+ proxy=dict(),
+ proxy_password=dict(no_log=True),
+ proxy_username=dict(),
+ repo_gpgcheck=dict(type='bool'),
+ reposdir=dict(default='/etc/yum.repos.d', type='path'),
+ retries=dict(),
+ s3_enabled=dict(type='bool'),
+ skip_if_unavailable=dict(type='bool'),
+ sslcacert=dict(aliases=['ca_cert']),
+ ssl_check_cert_permissions=dict(type='bool'),
+ sslclientcert=dict(aliases=['client_cert']),
+ sslclientkey=dict(aliases=['client_key']),
+ sslverify=dict(type='bool', aliases=['validate_certs']),
+ state=dict(choices=['present', 'absent'], default='present'),
+ throttle=dict(),
+ timeout=dict(),
+ ui_repoid_vars=dict(),
+ username=dict(),
+ )
+
+ argument_spec['async'] = dict(type='bool')
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ # Params was removed
+ # https://meetbot.fedoraproject.org/ansible-meeting/2017-09-28/ansible_dev_meeting.2017-09-28-15.00.log.html
+ if module.params['params']:
+ module.fail_json(msg="The params option to yum_repository was removed in Ansible 2.5 since it circumvents Ansible's option handling")
+
+ name = module.params['name']
+ state = module.params['state']
+
+ # Check if required parameters are present
+ if state == 'present':
+ if (
+ module.params['baseurl'] is None and
+ module.params['metalink'] is None and
+ module.params['mirrorlist'] is None):
+ module.fail_json(
+ msg="Parameter 'baseurl', 'metalink' or 'mirrorlist' is required.")
+ if module.params['description'] is None:
+ module.fail_json(
+ msg="Parameter 'description' is required.")
+
+ # Rename "name" and "description" to ensure correct key sorting
+ module.params['repoid'] = module.params['name']
+ module.params['name'] = module.params['description']
+ del module.params['description']
+
+ # Change list type to string for baseurl and gpgkey
+ for list_param in ['baseurl', 'gpgkey']:
+ if (
+ list_param in module.params and
+ module.params[list_param] is not None):
+ module.params[list_param] = "\n".join(module.params[list_param])
+
+ # Define repo file name if it doesn't exist
+ if module.params['file'] is None:
+ module.params['file'] = module.params['repoid']
+
+ # Instantiate the YumRepo object
+ yumrepo = YumRepo(module)
+
+ # Get repo status before change
+ diff = {
+ 'before_header': yumrepo.params['dest'],
+ 'before': yumrepo.dump(),
+ 'after_header': yumrepo.params['dest'],
+ 'after': ''
+ }
+
+ # Perform action depending on the state
+ if state == 'present':
+ yumrepo.add()
+ elif state == 'absent':
+ yumrepo.remove()
+
+ # Get repo status after change
+ diff['after'] = yumrepo.dump()
+
+ # Compare repo states
+ changed = diff['before'] != diff['after']
+
+ # Save the file only if not in check mode and if there was a change
+ if not module.check_mode and changed:
+ yumrepo.save()
+
+ # Change file attributes if needed
+ if os.path.isfile(module.params['dest']):
+ file_args = module.load_file_common_arguments(module.params)
+ changed = module.set_fs_attributes_if_different(file_args, changed)
+
+ # Print status of the change
+ module.exit_json(changed=changed, repo=name, state=state, diff=diff)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/parsing/__init__.py b/lib/ansible/parsing/__init__.py
new file mode 100644
index 00000000..28634b1b
--- /dev/null
+++ b/lib/ansible/parsing/__init__.py
@@ -0,0 +1,20 @@
+# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
diff --git a/lib/ansible/parsing/ajson.py b/lib/ansible/parsing/ajson.py
new file mode 100644
index 00000000..526c36d8
--- /dev/null
+++ b/lib/ansible/parsing/ajson.py
@@ -0,0 +1,42 @@
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+# Imported for backwards compat
+from ansible.module_utils.common.json import AnsibleJSONEncoder
+
+from ansible.parsing.vault import VaultLib
+from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
+from ansible.utils.unsafe_proxy import wrap_var
+
+
+class AnsibleJSONDecoder(json.JSONDecoder):
+
+ _vaults = {}
+
+ def __init__(self, *args, **kwargs):
+ kwargs['object_hook'] = self.object_hook
+ super(AnsibleJSONDecoder, self).__init__(*args, **kwargs)
+
+ @classmethod
+ def set_secrets(cls, secrets):
+ cls._vaults['default'] = VaultLib(secrets=secrets)
+
+ def object_hook(self, pairs):
+ for key in pairs:
+ value = pairs[key]
+
+ if key == '__ansible_vault':
+ value = AnsibleVaultEncryptedUnicode(value)
+ if self._vaults:
+ value.vault = self._vaults['default']
+ return value
+ elif key == '__ansible_unsafe':
+ return wrap_var(value)
+
+ return pairs
diff --git a/lib/ansible/parsing/dataloader.py b/lib/ansible/parsing/dataloader.py
new file mode 100644
index 00000000..4b7bddff
--- /dev/null
+++ b/lib/ansible/parsing/dataloader.py
@@ -0,0 +1,454 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import copy
+import os
+import os.path
+import re
+import tempfile
+
+from ansible import constants as C
+from ansible.errors import AnsibleFileNotFound, AnsibleParserError
+from ansible.module_utils.basic import is_executable
+from ansible.module_utils.six import binary_type, text_type
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.parsing.quoting import unquote
+from ansible.parsing.utils.yaml import from_yaml
+from ansible.parsing.vault import VaultLib, b_HEADER, is_encrypted, is_encrypted_file, parse_vaulttext_envelope
+from ansible.utils.path import unfrackpath
+from ansible.utils.display import Display
+
+display = Display()
+
+
+# Tries to determine if a path is inside a role, last dir must be 'tasks'
+# this is not perfect but people should really avoid 'tasks' dirs outside roles when using Ansible.
+RE_TASKS = re.compile(u'(?:^|%s)+tasks%s?$' % (os.path.sep, os.path.sep))
+
+
+class DataLoader:
+
+ '''
+ The DataLoader class is used to load and parse YAML or JSON content,
+ either from a given file name or from a string that was previously
+ read in through other means. A Vault password can be specified, and
+ any vault-encrypted files will be decrypted.
+
+ Data read from files will also be cached, so the file will never be
+ read from disk more than once.
+
+ Usage:
+
+ dl = DataLoader()
+ # optionally: dl.set_vault_password('foo')
+ ds = dl.load('...')
+ ds = dl.load_from_file('/path/to/file')
+ '''
+
+ def __init__(self):
+
+ self._basedir = '.'
+
+ # NOTE: not effective with forks as the main copy does not get updated.
+ # avoids rereading files
+ self._FILE_CACHE = dict()
+
+ # NOTE: not thread safe, also issues with forks not returning data to main proc
+ # so they need to be cleaned independently. See WorkerProcess for example.
+ # used to keep track of temp files for cleaning
+ self._tempfiles = set()
+
+ # initialize the vault stuff with an empty password
+ # TODO: replace with a ref to something that can get the password
+ # a creds/auth provider
+ # self.set_vault_password(None)
+ self._vaults = {}
+ self._vault = VaultLib()
+ self.set_vault_secrets(None)
+
+ # TODO: since we can query vault_secrets late, we could provide this to DataLoader init
+ def set_vault_secrets(self, vault_secrets):
+ self._vault.secrets = vault_secrets
+
+ def load(self, data, file_name='<string>', show_content=True, json_only=False):
+ '''Backwards compat for now'''
+ return from_yaml(data, file_name, show_content, self._vault.secrets, json_only=json_only)
+
+ def load_from_file(self, file_name, cache=True, unsafe=False, json_only=False):
+ ''' Loads data from a file, which can contain either JSON or YAML. '''
+
+ file_name = self.path_dwim(file_name)
+ display.debug("Loading data from %s" % file_name)
+
+ # if the file has already been read in and cached, we'll
+ # return those results to avoid more file/vault operations
+ if cache and file_name in self._FILE_CACHE:
+ parsed_data = self._FILE_CACHE[file_name]
+ else:
+ # read the file contents and load the data structure from them
+ (b_file_data, show_content) = self._get_file_contents(file_name)
+
+ file_data = to_text(b_file_data, errors='surrogate_or_strict')
+ parsed_data = self.load(data=file_data, file_name=file_name, show_content=show_content, json_only=json_only)
+
+ # cache the file contents for next time
+ self._FILE_CACHE[file_name] = parsed_data
+
+ if unsafe:
+ return parsed_data
+ else:
+ # return a deep copy here, so the cache is not affected
+ return copy.deepcopy(parsed_data)
+
+ def path_exists(self, path):
+ path = self.path_dwim(path)
+ return os.path.exists(to_bytes(path, errors='surrogate_or_strict'))
+
+ def is_file(self, path):
+ path = self.path_dwim(path)
+ return os.path.isfile(to_bytes(path, errors='surrogate_or_strict')) or path == os.devnull
+
+ def is_directory(self, path):
+ path = self.path_dwim(path)
+ return os.path.isdir(to_bytes(path, errors='surrogate_or_strict'))
+
+ def list_directory(self, path):
+ path = self.path_dwim(path)
+ return os.listdir(path)
+
+ def is_executable(self, path):
+ '''is the given path executable?'''
+ path = self.path_dwim(path)
+ return is_executable(path)
+
+ def _decrypt_if_vault_data(self, b_vault_data, b_file_name=None):
+ '''Decrypt b_vault_data if encrypted and return b_data and the show_content flag'''
+
+ if not is_encrypted(b_vault_data):
+ show_content = True
+ return b_vault_data, show_content
+
+ b_ciphertext, b_version, cipher_name, vault_id = parse_vaulttext_envelope(b_vault_data)
+ b_data = self._vault.decrypt(b_vault_data, filename=b_file_name)
+
+ show_content = False
+ return b_data, show_content
+
+ def _get_file_contents(self, file_name):
+ '''
+ Reads the file contents from the given file name
+
+ If the contents are vault-encrypted, it will decrypt them and return
+ the decrypted data
+
+ :arg file_name: The name of the file to read. If this is a relative
+ path, it will be expanded relative to the basedir
+ :raises AnsibleFileNotFound: if the file_name does not refer to a file
+ :raises AnsibleParserError: if we were unable to read the file
+ :return: Returns a byte string of the file contents
+ '''
+ if not file_name or not isinstance(file_name, (binary_type, text_type)):
+ raise AnsibleParserError("Invalid filename: '%s'" % to_native(file_name))
+
+ b_file_name = to_bytes(self.path_dwim(file_name))
+ # This is what we really want but have to fix unittests to make it pass
+ # if not os.path.exists(b_file_name) or not os.path.isfile(b_file_name):
+ if not self.path_exists(b_file_name):
+ raise AnsibleFileNotFound("Unable to retrieve file contents", file_name=file_name)
+
+ try:
+ with open(b_file_name, 'rb') as f:
+ data = f.read()
+ return self._decrypt_if_vault_data(data, b_file_name)
+ except (IOError, OSError) as e:
+ raise AnsibleParserError("an error occurred while trying to read the file '%s': %s" % (file_name, to_native(e)), orig_exc=e)
+
+ def get_basedir(self):
+ ''' returns the current basedir '''
+ return self._basedir
+
+ def set_basedir(self, basedir):
+ ''' sets the base directory, used to find files when a relative path is given '''
+
+ if basedir is not None:
+ self._basedir = to_text(basedir)
+
+ def path_dwim(self, given):
+ '''
+ make relative paths work like folks expect.
+ '''
+
+ given = unquote(given)
+ given = to_text(given, errors='surrogate_or_strict')
+
+ if given.startswith(to_text(os.path.sep)) or given.startswith(u'~'):
+ path = given
+ else:
+ basedir = to_text(self._basedir, errors='surrogate_or_strict')
+ path = os.path.join(basedir, given)
+
+ return unfrackpath(path, follow=False)
+
+ def _is_role(self, path):
+ ''' imperfect role detection, roles are still valid w/o tasks|meta/main.yml|yaml|etc '''
+
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ b_upath = to_bytes(unfrackpath(path, follow=False), errors='surrogate_or_strict')
+
+ for b_finddir in (b'meta', b'tasks'):
+ for b_suffix in (b'.yml', b'.yaml', b''):
+ b_main = b'main%s' % (b_suffix)
+ b_tasked = os.path.join(b_finddir, b_main)
+
+ if (
+ RE_TASKS.search(path) and
+ os.path.exists(os.path.join(b_path, b_main)) or
+ os.path.exists(os.path.join(b_upath, b_tasked)) or
+ os.path.exists(os.path.join(os.path.dirname(b_path), b_tasked))
+ ):
+ return True
+ return False
+
+ def path_dwim_relative(self, path, dirname, source, is_role=False):
+ '''
+ find one file in either a role or playbook dir with or without
+ explicitly named dirname subdirs
+
+ Used in action plugins and lookups to find supplemental files that
+ could be in either place.
+ '''
+
+ search = []
+ source = to_text(source, errors='surrogate_or_strict')
+
+ # I have full path, nothing else needs to be looked at
+ if source.startswith(to_text(os.path.sep)) or source.startswith(u'~'):
+ search.append(unfrackpath(source, follow=False))
+ else:
+ # base role/play path + templates/files/vars + relative filename
+ search.append(os.path.join(path, dirname, source))
+ basedir = unfrackpath(path, follow=False)
+
+ # not told if role, but detect if it is a role and if so make sure you get correct base path
+ if not is_role:
+ is_role = self._is_role(path)
+
+ if is_role and RE_TASKS.search(path):
+ basedir = unfrackpath(os.path.dirname(path), follow=False)
+
+ cur_basedir = self._basedir
+ self.set_basedir(basedir)
+ # resolved base role/play path + templates/files/vars + relative filename
+ search.append(unfrackpath(os.path.join(basedir, dirname, source), follow=False))
+ self.set_basedir(cur_basedir)
+
+ if is_role and not source.endswith(dirname):
+ # look in role's tasks dir w/o dirname
+ search.append(unfrackpath(os.path.join(basedir, 'tasks', source), follow=False))
+
+ # try to create absolute path for loader basedir + templates/files/vars + filename
+ search.append(unfrackpath(os.path.join(dirname, source), follow=False))
+
+ # try to create absolute path for loader basedir
+ search.append(unfrackpath(os.path.join(basedir, source), follow=False))
+
+ # try to create absolute path for dirname + filename
+ search.append(self.path_dwim(os.path.join(dirname, source)))
+
+ # try to create absolute path for filename
+ search.append(self.path_dwim(source))
+
+ for candidate in search:
+ if os.path.exists(to_bytes(candidate, errors='surrogate_or_strict')):
+ break
+
+ return candidate
+
+ def path_dwim_relative_stack(self, paths, dirname, source, is_role=False):
+ '''
+ find one file in first path in stack taking roles into account and adding play basedir as fallback
+
+ :arg paths: A list of text strings which are the paths to look for the filename in.
+ :arg dirname: A text string representing a directory. The directory
+ is prepended to the source to form the path to search for.
+ :arg source: A text string which is the filename to search for
+ :rtype: A text string
+ :returns: An absolute path to the filename ``source`` if found
+ :raises: An AnsibleFileNotFound Exception if the file is found to exist in the search paths
+ '''
+ b_dirname = to_bytes(dirname, errors='surrogate_or_strict')
+ b_source = to_bytes(source, errors='surrogate_or_strict')
+
+ result = None
+ search = []
+ if source is None:
+ display.warning('Invalid request to find a file that matches a "null" value')
+ elif source and (source.startswith('~') or source.startswith(os.path.sep)):
+ # path is absolute, no relative needed, check existence and return source
+ test_path = unfrackpath(b_source, follow=False)
+ if os.path.exists(to_bytes(test_path, errors='surrogate_or_strict')):
+ result = test_path
+ else:
+ display.debug(u'evaluation_path:\n\t%s' % '\n\t'.join(paths))
+ for path in paths:
+ upath = unfrackpath(path, follow=False)
+ b_upath = to_bytes(upath, errors='surrogate_or_strict')
+ b_pb_base_dir = os.path.dirname(b_upath)
+
+ # if path is in role and 'tasks' not there already, add it into the search
+ if (is_role or self._is_role(path)) and b_pb_base_dir.endswith(b'/tasks'):
+ search.append(os.path.join(os.path.dirname(b_pb_base_dir), b_dirname, b_source))
+ search.append(os.path.join(b_pb_base_dir, b_source))
+ else:
+ # don't add dirname if user already is using it in source
+ if b_source.split(b'/')[0] != dirname:
+ search.append(os.path.join(b_upath, b_dirname, b_source))
+ search.append(os.path.join(b_upath, b_source))
+
+ # always append basedir as last resort
+ # don't add dirname if user already is using it in source
+ if b_source.split(b'/')[0] != dirname:
+ search.append(os.path.join(to_bytes(self.get_basedir(), errors='surrogate_or_strict'), b_dirname, b_source))
+ search.append(os.path.join(to_bytes(self.get_basedir(), errors='surrogate_or_strict'), b_source))
+
+ display.debug(u'search_path:\n\t%s' % to_text(b'\n\t'.join(search)))
+ for b_candidate in search:
+ display.vvvvv(u'looking for "%s" at "%s"' % (source, to_text(b_candidate)))
+ if os.path.exists(b_candidate):
+ result = to_text(b_candidate)
+ break
+
+ if result is None:
+ raise AnsibleFileNotFound(file_name=source, paths=[to_native(p) for p in search])
+
+ return result
+
+ def _create_content_tempfile(self, content):
+ ''' Create a tempfile containing defined content '''
+ fd, content_tempfile = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP)
+ f = os.fdopen(fd, 'wb')
+ content = to_bytes(content)
+ try:
+ f.write(content)
+ except Exception as err:
+ os.remove(content_tempfile)
+ raise Exception(err)
+ finally:
+ f.close()
+ return content_tempfile
+
+ def get_real_file(self, file_path, decrypt=True):
+ """
+ If the file is vault encrypted return a path to a temporary decrypted file
+ If the file is not encrypted then the path is returned
+ Temporary files are cleanup in the destructor
+ """
+
+ if not file_path or not isinstance(file_path, (binary_type, text_type)):
+ raise AnsibleParserError("Invalid filename: '%s'" % to_native(file_path))
+
+ b_file_path = to_bytes(file_path, errors='surrogate_or_strict')
+ if not self.path_exists(b_file_path) or not self.is_file(b_file_path):
+ raise AnsibleFileNotFound(file_name=file_path)
+
+ real_path = self.path_dwim(file_path)
+
+ try:
+ if decrypt:
+ with open(to_bytes(real_path), 'rb') as f:
+ # Limit how much of the file is read since we do not know
+ # whether this is a vault file and therefore it could be very
+ # large.
+ if is_encrypted_file(f, count=len(b_HEADER)):
+ # if the file is encrypted and no password was specified,
+ # the decrypt call would throw an error, but we check first
+ # since the decrypt function doesn't know the file name
+ data = f.read()
+ if not self._vault.secrets:
+ raise AnsibleParserError("A vault password or secret must be specified to decrypt %s" % to_native(file_path))
+
+ data = self._vault.decrypt(data, filename=real_path)
+ # Make a temp file
+ real_path = self._create_content_tempfile(data)
+ self._tempfiles.add(real_path)
+
+ return real_path
+
+ except (IOError, OSError) as e:
+ raise AnsibleParserError("an error occurred while trying to read the file '%s': %s" % (to_native(real_path), to_native(e)), orig_exc=e)
+
+ def cleanup_tmp_file(self, file_path):
+ """
+ Removes any temporary files created from a previous call to
+ get_real_file. file_path must be the path returned from a
+ previous call to get_real_file.
+ """
+ if file_path in self._tempfiles:
+ os.unlink(file_path)
+ self._tempfiles.remove(file_path)
+
+ def cleanup_all_tmp_files(self):
+ """
+ Removes all temporary files that DataLoader has created
+ NOTE: not thread safe, forks also need special handling see __init__ for details.
+ """
+ for f in self._tempfiles:
+ try:
+ self.cleanup_tmp_file(f)
+ except Exception as e:
+ display.warning("Unable to cleanup temp files: %s" % to_text(e))
+
+ def find_vars_files(self, path, name, extensions=None, allow_dir=True):
+ """
+ Find vars files in a given path with specified name. This will find
+ files in a dir named <name>/ or a file called <name> ending in known
+ extensions.
+ """
+
+ b_path = to_bytes(os.path.join(path, name))
+ found = []
+
+ if extensions is None:
+ # Look for file with no extension first to find dir before file
+ extensions = [''] + C.YAML_FILENAME_EXTENSIONS
+ # add valid extensions to name
+ for ext in extensions:
+
+ if '.' in ext:
+ full_path = b_path + to_bytes(ext)
+ elif ext:
+ full_path = b'.'.join([b_path, to_bytes(ext)])
+ else:
+ full_path = b_path
+
+ if self.path_exists(full_path):
+ if self.is_directory(full_path):
+ if allow_dir:
+ found.extend(self._get_dir_vars_files(to_text(full_path), extensions))
+ else:
+ continue
+ else:
+ found.append(full_path)
+ break
+ return found
+
+ def _get_dir_vars_files(self, path, extensions):
+ found = []
+ for spath in sorted(self.list_directory(path)):
+ if not spath.startswith(u'.') and not spath.endswith(u'~'): # skip hidden and backups
+
+ ext = os.path.splitext(spath)[-1]
+ full_spath = os.path.join(path, spath)
+
+ if self.is_directory(full_spath) and not ext: # recursive search if dir
+ found.extend(self._get_dir_vars_files(full_spath, extensions))
+ elif self.is_file(full_spath) and (not ext or to_text(ext) in extensions):
+ # only consider files with valid extensions or no extension
+ found.append(full_spath)
+
+ return found
diff --git a/lib/ansible/parsing/mod_args.py b/lib/ansible/parsing/mod_args.py
new file mode 100644
index 00000000..ed9865cb
--- /dev/null
+++ b/lib/ansible/parsing/mod_args.py
@@ -0,0 +1,347 @@
+# (c) 2014 Michael DeHaan, <michael@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ansible.constants as C
+from ansible.errors import AnsibleParserError, AnsibleError, AnsibleAssertionError
+from ansible.module_utils.six import iteritems, string_types
+from ansible.module_utils._text import to_text
+from ansible.parsing.splitter import parse_kv, split_args
+from ansible.plugins.loader import module_loader, action_loader
+from ansible.template import Templar
+from ansible.utils.fqcn import add_internal_fqcns
+from ansible.utils.sentinel import Sentinel
+
+
+# For filtering out modules correctly below
+FREEFORM_ACTIONS = frozenset(C.MODULE_REQUIRE_ARGS)
+
+RAW_PARAM_MODULES = FREEFORM_ACTIONS.union(add_internal_fqcns((
+ 'include',
+ 'include_vars',
+ 'include_tasks',
+ 'include_role',
+ 'import_tasks',
+ 'import_role',
+ 'add_host',
+ 'group_by',
+ 'set_fact',
+ 'meta',
+)))
+
+BUILTIN_TASKS = frozenset(add_internal_fqcns((
+ 'meta',
+ 'include',
+ 'include_tasks',
+ 'include_role',
+ 'import_tasks',
+ 'import_role'
+)))
+
+
+class ModuleArgsParser:
+
+ """
+ There are several ways a module and argument set can be expressed:
+
+ # legacy form (for a shell command)
+ - action: shell echo hi
+
+ # common shorthand for local actions vs delegate_to
+ - local_action: shell echo hi
+
+ # most commonly:
+ - copy: src=a dest=b
+
+ # legacy form
+ - action: copy src=a dest=b
+
+ # complex args form, for passing structured data
+ - copy:
+ src: a
+ dest: b
+
+ # gross, but technically legal
+ - action:
+ module: copy
+ args:
+ src: a
+ dest: b
+
+ # Standard YAML form for command-type modules. In this case, the args specified
+ # will act as 'defaults' and will be overridden by any args specified
+ # in one of the other formats (complex args under the action, or
+ # parsed from the k=v string
+ - command: 'pwd'
+ args:
+ chdir: '/tmp'
+
+
+ This class has some of the logic to canonicalize these into the form
+
+ - module: <module_name>
+ delegate_to: <optional>
+ args: <args>
+
+ Args may also be munged for certain shell command parameters.
+ """
+
+ def __init__(self, task_ds=None, collection_list=None):
+ task_ds = {} if task_ds is None else task_ds
+
+ if not isinstance(task_ds, dict):
+ raise AnsibleAssertionError("the type of 'task_ds' should be a dict, but is a %s" % type(task_ds))
+ self._task_ds = task_ds
+ self._collection_list = collection_list
+ # delayed local imports to prevent circular import
+ from ansible.playbook.task import Task
+ from ansible.playbook.handler import Handler
+ # store the valid Task/Handler attrs for quick access
+ self._task_attrs = set(Task._valid_attrs.keys())
+ self._task_attrs.update(set(Handler._valid_attrs.keys()))
+ # HACK: why are these not FieldAttributes on task with a post-validate to check usage?
+ self._task_attrs.update(['local_action', 'static'])
+ self._task_attrs = frozenset(self._task_attrs)
+
+ self.internal_redirect_list = []
+
+ def _split_module_string(self, module_string):
+ '''
+ when module names are expressed like:
+ action: copy src=a dest=b
+ the first part of the string is the name of the module
+ and the rest are strings pertaining to the arguments.
+ '''
+
+ tokens = split_args(module_string)
+ if len(tokens) > 1:
+ return (tokens[0], " ".join(tokens[1:]))
+ else:
+ return (tokens[0], "")
+
+ def _normalize_parameters(self, thing, action=None, additional_args=None):
+ '''
+ arguments can be fuzzy. Deal with all the forms.
+ '''
+
+ additional_args = {} if additional_args is None else additional_args
+
+ # final args are the ones we'll eventually return, so first update
+ # them with any additional args specified, which have lower priority
+ # than those which may be parsed/normalized next
+ final_args = dict()
+ if additional_args:
+ if isinstance(additional_args, string_types):
+ templar = Templar(loader=None)
+ if templar.is_template(additional_args):
+ final_args['_variable_params'] = additional_args
+ else:
+ raise AnsibleParserError("Complex args containing variables cannot use bare variables (without Jinja2 delimiters), "
+ "and must use the full variable style ('{{var_name}}')")
+ elif isinstance(additional_args, dict):
+ final_args.update(additional_args)
+ else:
+ raise AnsibleParserError('Complex args must be a dictionary or variable string ("{{var}}").')
+
+ # how we normalize depends if we figured out what the module name is
+ # yet. If we have already figured it out, it's a 'new style' invocation.
+ # otherwise, it's not
+
+ if action is not None:
+ args = self._normalize_new_style_args(thing, action)
+ else:
+ (action, args) = self._normalize_old_style_args(thing)
+
+ # this can occasionally happen, simplify
+ if args and 'args' in args:
+ tmp_args = args.pop('args')
+ if isinstance(tmp_args, string_types):
+ tmp_args = parse_kv(tmp_args)
+ args.update(tmp_args)
+
+ # only internal variables can start with an underscore, so
+ # we don't allow users to set them directly in arguments
+ if args and action not in FREEFORM_ACTIONS:
+ for arg in args:
+ arg = to_text(arg)
+ if arg.startswith('_ansible_'):
+ raise AnsibleError("invalid parameter specified for action '%s': '%s'" % (action, arg))
+
+ # finally, update the args we're going to return with the ones
+ # which were normalized above
+ if args:
+ final_args.update(args)
+
+ return (action, final_args)
+
+ def _normalize_new_style_args(self, thing, action):
+ '''
+ deals with fuzziness in new style module invocations
+ accepting key=value pairs and dictionaries, and returns
+ a dictionary of arguments
+
+ possible example inputs:
+ 'echo hi', 'shell'
+ {'region': 'xyz'}, 'ec2'
+ standardized outputs like:
+ { _raw_params: 'echo hi', _uses_shell: True }
+ '''
+
+ if isinstance(thing, dict):
+ # form is like: { xyz: { x: 2, y: 3 } }
+ args = thing
+ elif isinstance(thing, string_types):
+ # form is like: copy: src=a dest=b
+ check_raw = action in FREEFORM_ACTIONS
+ args = parse_kv(thing, check_raw=check_raw)
+ elif thing is None:
+ # this can happen with modules which take no params, like ping:
+ args = None
+ else:
+ raise AnsibleParserError("unexpected parameter type in action: %s" % type(thing), obj=self._task_ds)
+ return args
+
+ def _normalize_old_style_args(self, thing):
+ '''
+ deals with fuzziness in old-style (action/local_action) module invocations
+ returns tuple of (module_name, dictionary_args)
+
+ possible example inputs:
+ { 'shell' : 'echo hi' }
+ 'shell echo hi'
+ {'module': 'ec2', 'x': 1 }
+ standardized outputs like:
+ ('ec2', { 'x': 1} )
+ '''
+
+ action = None
+ args = None
+
+ if isinstance(thing, dict):
+ # form is like: action: { module: 'copy', src: 'a', dest: 'b' }
+ thing = thing.copy()
+ if 'module' in thing:
+ action, module_args = self._split_module_string(thing['module'])
+ args = thing.copy()
+ check_raw = action in FREEFORM_ACTIONS
+ args.update(parse_kv(module_args, check_raw=check_raw))
+ del args['module']
+
+ elif isinstance(thing, string_types):
+ # form is like: action: copy src=a dest=b
+ (action, args) = self._split_module_string(thing)
+ check_raw = action in FREEFORM_ACTIONS
+ args = parse_kv(args, check_raw=check_raw)
+
+ else:
+ # need a dict or a string, so giving up
+ raise AnsibleParserError("unexpected parameter type in action: %s" % type(thing), obj=self._task_ds)
+
+ return (action, args)
+
+ def parse(self, skip_action_validation=False):
+ '''
+ Given a task in one of the supported forms, parses and returns
+ returns the action, arguments, and delegate_to values for the
+ task, dealing with all sorts of levels of fuzziness.
+ '''
+
+ thing = None
+
+ action = None
+ delegate_to = self._task_ds.get('delegate_to', Sentinel)
+ args = dict()
+
+ self.internal_redirect_list = []
+
+ # This is the standard YAML form for command-type modules. We grab
+ # the args and pass them in as additional arguments, which can/will
+ # be overwritten via dict updates from the other arg sources below
+ additional_args = self._task_ds.get('args', dict())
+
+ # We can have one of action, local_action, or module specified
+ # action
+ if 'action' in self._task_ds:
+ # an old school 'action' statement
+ thing = self._task_ds['action']
+ action, args = self._normalize_parameters(thing, action=action, additional_args=additional_args)
+
+ # local_action
+ if 'local_action' in self._task_ds:
+ # local_action is similar but also implies a delegate_to
+ if action is not None:
+ raise AnsibleParserError("action and local_action are mutually exclusive", obj=self._task_ds)
+ thing = self._task_ds.get('local_action', '')
+ delegate_to = 'localhost'
+ action, args = self._normalize_parameters(thing, action=action, additional_args=additional_args)
+
+ # module: <stuff> is the more new-style invocation
+
+ # filter out task attributes so we're only querying unrecognized keys as actions/modules
+ non_task_ds = dict((k, v) for k, v in iteritems(self._task_ds) if (k not in self._task_attrs) and (not k.startswith('with_')))
+
+ # walk the filtered input dictionary to see if we recognize a module name
+ for item, value in iteritems(non_task_ds):
+ is_action_candidate = False
+ if item in BUILTIN_TASKS:
+ is_action_candidate = True
+ elif skip_action_validation:
+ is_action_candidate = True
+ else:
+ # If the plugin is resolved and redirected smuggle the list of candidate names via the task attribute 'internal_redirect_list'
+ context = action_loader.find_plugin_with_context(item, collection_list=self._collection_list)
+ if not context.resolved:
+ context = module_loader.find_plugin_with_context(item, collection_list=self._collection_list)
+ if context.resolved and context.redirect_list:
+ self.internal_redirect_list = context.redirect_list
+ elif context.redirect_list:
+ self.internal_redirect_list = context.redirect_list
+
+ is_action_candidate = bool(self.internal_redirect_list)
+
+ if is_action_candidate:
+ # finding more than one module name is a problem
+ if action is not None:
+ raise AnsibleParserError("conflicting action statements: %s, %s" % (action, item), obj=self._task_ds)
+ action = item
+ thing = value
+ action, args = self._normalize_parameters(thing, action=action, additional_args=additional_args)
+
+ # if we didn't see any module in the task at all, it's not a task really
+ if action is None:
+ if non_task_ds: # there was one non-task action, but we couldn't find it
+ bad_action = list(non_task_ds.keys())[0]
+ raise AnsibleParserError("couldn't resolve module/action '{0}'. This often indicates a "
+ "misspelling, missing collection, or incorrect module path.".format(bad_action),
+ obj=self._task_ds)
+ else:
+ raise AnsibleParserError("no module/action detected in task.",
+ obj=self._task_ds)
+ elif args.get('_raw_params', '') != '' and action not in RAW_PARAM_MODULES:
+ templar = Templar(loader=None)
+ raw_params = args.pop('_raw_params')
+ if templar.is_template(raw_params):
+ args['_variable_params'] = raw_params
+ else:
+ raise AnsibleParserError("this task '%s' has extra params, which is only allowed in the following modules: %s" % (action,
+ ", ".join(RAW_PARAM_MODULES)),
+ obj=self._task_ds)
+
+ return (action, args, delegate_to)
diff --git a/lib/ansible/parsing/plugin_docs.py b/lib/ansible/parsing/plugin_docs.py
new file mode 100644
index 00000000..bdbde6eb
--- /dev/null
+++ b/lib/ansible/parsing/plugin_docs.py
@@ -0,0 +1,113 @@
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ast
+
+from ansible.module_utils._text import to_text
+from ansible.parsing.yaml.loader import AnsibleLoader
+from ansible.utils.display import Display
+
+display = Display()
+
+
+# NOTE: should move to just reading the variable as we do in plugin_loader since we already load as a 'module'
+# which is much faster than ast parsing ourselves.
+def read_docstring(filename, verbose=True, ignore_errors=True):
+
+ """
+ Search for assignment of the DOCUMENTATION and EXAMPLES variables in the given file.
+ Parse DOCUMENTATION from YAML and return the YAML doc or None together with EXAMPLES, as plain text.
+ """
+
+ data = {
+ 'doc': None,
+ 'plainexamples': None,
+ 'returndocs': None,
+ 'metadata': None, # NOTE: not used anymore, kept for compat
+ 'seealso': None,
+ }
+
+ string_to_vars = {
+ 'DOCUMENTATION': 'doc',
+ 'EXAMPLES': 'plainexamples',
+ 'RETURN': 'returndocs',
+ 'ANSIBLE_METADATA': 'metadata', # NOTE: now unused, but kept for backwards compat
+ }
+
+ try:
+ with open(filename, 'rb') as b_module_data:
+ M = ast.parse(b_module_data.read())
+
+ for child in M.body:
+ if isinstance(child, ast.Assign):
+ for t in child.targets:
+ try:
+ theid = t.id
+ except AttributeError:
+ # skip errors can happen when trying to use the normal code
+ display.warning("Failed to assign id for %s on %s, skipping" % (t, filename))
+ continue
+
+ if theid in string_to_vars:
+ varkey = string_to_vars[theid]
+ if isinstance(child.value, ast.Dict):
+ data[varkey] = ast.literal_eval(child.value)
+ else:
+ if theid == 'EXAMPLES':
+ # examples 'can' be yaml, but even if so, we dont want to parse as such here
+ # as it can create undesired 'objects' that don't display well as docs.
+ data[varkey] = to_text(child.value.s)
+ else:
+ # string should be yaml if already not a dict
+ data[varkey] = AnsibleLoader(child.value.s, file_name=filename).get_single_data()
+
+ display.debug('assigned: %s' % varkey)
+
+ except Exception:
+ if verbose:
+ display.error("unable to parse %s" % filename)
+ if not ignore_errors:
+ raise
+
+ return data
+
+
+def read_docstub(filename):
+ """
+ Quickly find short_description using string methods instead of node parsing.
+ This does not return a full set of documentation strings and is intended for
+ operations like ansible-doc -l.
+ """
+
+ in_documentation = False
+ capturing = False
+ indent_detection = ''
+ doc_stub = []
+
+ with open(filename, 'r') as t_module_data:
+ for line in t_module_data:
+ if in_documentation:
+ # start capturing the stub until indentation returns
+ if capturing and line.startswith(indent_detection):
+ doc_stub.append(line)
+
+ elif capturing and not line.startswith(indent_detection):
+ break
+
+ elif line.lstrip().startswith('short_description:'):
+ capturing = True
+ # Detect that the short_description continues on the next line if it's indented more
+ # than short_description itself.
+ indent_detection = ' ' * (len(line) - len(line.lstrip()) + 1)
+ doc_stub.append(line)
+
+ elif line.startswith('DOCUMENTATION') and '=' in line:
+ in_documentation = True
+
+ short_description = r''.join(doc_stub).strip().rstrip('.')
+ data = AnsibleLoader(short_description, file_name=filename).get_single_data()
+
+ return data
diff --git a/lib/ansible/parsing/quoting.py b/lib/ansible/parsing/quoting.py
new file mode 100644
index 00000000..d3a38d94
--- /dev/null
+++ b/lib/ansible/parsing/quoting.py
@@ -0,0 +1,31 @@
+# (c) 2014 James Cammarata, <jcammarata@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def is_quoted(data):
+ return len(data) > 1 and data[0] == data[-1] and data[0] in ('"', "'") and data[-2] != '\\'
+
+
+def unquote(data):
+ ''' removes first and last quotes from a string, if the string starts and ends with the same quotes '''
+ if is_quoted(data):
+ return data[1:-1]
+ return data
diff --git a/lib/ansible/parsing/splitter.py b/lib/ansible/parsing/splitter.py
new file mode 100644
index 00000000..b5209b01
--- /dev/null
+++ b/lib/ansible/parsing/splitter.py
@@ -0,0 +1,287 @@
+# (c) 2014 James Cammarata, <jcammarata@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import codecs
+import re
+
+from ansible.errors import AnsibleParserError
+from ansible.module_utils._text import to_text
+from ansible.parsing.quoting import unquote
+
+
+# Decode escapes adapted from rspeer's answer here:
+# http://stackoverflow.com/questions/4020539/process-escape-sequences-in-a-string-in-python
+_HEXCHAR = '[a-fA-F0-9]'
+_ESCAPE_SEQUENCE_RE = re.compile(r'''
+ ( \\U{0} # 8-digit hex escapes
+ | \\u{1} # 4-digit hex escapes
+ | \\x{2} # 2-digit hex escapes
+ | \\N\{{[^}}]+\}} # Unicode characters by name
+ | \\[\\'"abfnrtv] # Single-character escapes
+ )'''.format(_HEXCHAR * 8, _HEXCHAR * 4, _HEXCHAR * 2), re.UNICODE | re.VERBOSE)
+
+
+def _decode_escapes(s):
+ def decode_match(match):
+ return codecs.decode(match.group(0), 'unicode-escape')
+
+ return _ESCAPE_SEQUENCE_RE.sub(decode_match, s)
+
+
+def parse_kv(args, check_raw=False):
+ '''
+ Convert a string of key/value items to a dict. If any free-form params
+ are found and the check_raw option is set to True, they will be added
+ to a new parameter called '_raw_params'. If check_raw is not enabled,
+ they will simply be ignored.
+ '''
+
+ args = to_text(args, nonstring='passthru')
+
+ options = {}
+ if args is not None:
+ try:
+ vargs = split_args(args)
+ except IndexError as e:
+ raise AnsibleParserError("Unable to parse argument string", orig_exc=e)
+ except ValueError as ve:
+ if 'no closing quotation' in str(ve).lower():
+ raise AnsibleParserError("error parsing argument string, try quoting the entire line.", orig_exc=ve)
+ else:
+ raise
+
+ raw_params = []
+ for orig_x in vargs:
+ x = _decode_escapes(orig_x)
+ if "=" in x:
+ pos = 0
+ try:
+ while True:
+ pos = x.index('=', pos + 1)
+ if pos > 0 and x[pos - 1] != '\\':
+ break
+ except ValueError:
+ # ran out of string, but we must have some escaped equals,
+ # so replace those and append this to the list of raw params
+ raw_params.append(x.replace('\\=', '='))
+ continue
+
+ k = x[:pos]
+ v = x[pos + 1:]
+
+ # FIXME: make the retrieval of this list of shell/command
+ # options a function, so the list is centralized
+ if check_raw and k not in ('creates', 'removes', 'chdir', 'executable', 'warn'):
+ raw_params.append(orig_x)
+ else:
+ options[k.strip()] = unquote(v.strip())
+ else:
+ raw_params.append(orig_x)
+
+ # recombine the free-form params, if any were found, and assign
+ # them to a special option for use later by the shell/command module
+ if len(raw_params) > 0:
+ options[u'_raw_params'] = join_args(raw_params)
+
+ return options
+
+
+def _get_quote_state(token, quote_char):
+ '''
+ the goal of this block is to determine if the quoted string
+ is unterminated in which case it needs to be put back together
+ '''
+ # the char before the current one, used to see if
+ # the current character is escaped
+ prev_char = None
+ for idx, cur_char in enumerate(token):
+ if idx > 0:
+ prev_char = token[idx - 1]
+ if cur_char in '"\'' and prev_char != '\\':
+ if quote_char:
+ if cur_char == quote_char:
+ quote_char = None
+ else:
+ quote_char = cur_char
+ return quote_char
+
+
+def _count_jinja2_blocks(token, cur_depth, open_token, close_token):
+ '''
+ this function counts the number of opening/closing blocks for a
+ given opening/closing type and adjusts the current depth for that
+ block based on the difference
+ '''
+ num_open = token.count(open_token)
+ num_close = token.count(close_token)
+ if num_open != num_close:
+ cur_depth += (num_open - num_close)
+ if cur_depth < 0:
+ cur_depth = 0
+ return cur_depth
+
+
+def join_args(s):
+ '''
+ Join the original cmd based on manipulations by split_args().
+ This retains the original newlines and whitespaces.
+ '''
+ result = ''
+ for p in s:
+ if len(result) == 0 or result.endswith('\n'):
+ result += p
+ else:
+ result += ' ' + p
+ return result
+
+
+def split_args(args):
+ '''
+ Splits args on whitespace, but intelligently reassembles
+ those that may have been split over a jinja2 block or quotes.
+
+ When used in a remote module, we won't ever have to be concerned about
+ jinja2 blocks, however this function is/will be used in the
+ core portions as well before the args are templated.
+
+ example input: a=b c="foo bar"
+ example output: ['a=b', 'c="foo bar"']
+
+ Basically this is a variation shlex that has some more intelligence for
+ how Ansible needs to use it.
+ '''
+
+ # the list of params parsed out of the arg string
+ # this is going to be the result value when we are done
+ params = []
+
+ # Initial split on newlines
+ items = args.split('\n')
+
+ # iterate over the tokens, and reassemble any that may have been
+ # split on a space inside a jinja2 block.
+ # ex if tokens are "{{", "foo", "}}" these go together
+
+ # These variables are used
+ # to keep track of the state of the parsing, since blocks and quotes
+ # may be nested within each other.
+
+ quote_char = None
+ inside_quotes = False
+ print_depth = 0 # used to count nested jinja2 {{ }} blocks
+ block_depth = 0 # used to count nested jinja2 {% %} blocks
+ comment_depth = 0 # used to count nested jinja2 {# #} blocks
+
+ # now we loop over each split chunk, coalescing tokens if the white space
+ # split occurred within quotes or a jinja2 block of some kind
+ for (itemidx, item) in enumerate(items):
+
+ # we split on spaces and newlines separately, so that we
+ # can tell which character we split on for reassembly
+ # inside quotation characters
+ tokens = item.split(' ')
+
+ line_continuation = False
+ for (idx, token) in enumerate(tokens):
+
+ # Empty entries means we have subsequent spaces
+ # We want to hold onto them so we can reconstruct them later
+ if len(token) == 0 and idx != 0:
+ params[-1] += ' '
+ continue
+
+ # if we hit a line continuation character, but
+ # we're not inside quotes, ignore it and continue
+ # on to the next token while setting a flag
+ if token == '\\' and not inside_quotes:
+ line_continuation = True
+ continue
+
+ # store the previous quoting state for checking later
+ was_inside_quotes = inside_quotes
+ quote_char = _get_quote_state(token, quote_char)
+ inside_quotes = quote_char is not None
+
+ # multiple conditions may append a token to the list of params,
+ # so we keep track with this flag to make sure it only happens once
+ # append means add to the end of the list, don't append means concatenate
+ # it to the end of the last token
+ appended = False
+
+ # if we're inside quotes now, but weren't before, append the token
+ # to the end of the list, since we'll tack on more to it later
+ # otherwise, if we're inside any jinja2 block, inside quotes, or we were
+ # inside quotes (but aren't now) concat this token to the last param
+ if inside_quotes and not was_inside_quotes and not(print_depth or block_depth or comment_depth):
+ params.append(token)
+ appended = True
+ elif print_depth or block_depth or comment_depth or inside_quotes or was_inside_quotes:
+ if idx == 0 and was_inside_quotes:
+ params[-1] = "%s%s" % (params[-1], token)
+ elif len(tokens) > 1:
+ spacer = ''
+ if idx > 0:
+ spacer = ' '
+ params[-1] = "%s%s%s" % (params[-1], spacer, token)
+ else:
+ params[-1] = "%s\n%s" % (params[-1], token)
+ appended = True
+
+ # if the number of paired block tags is not the same, the depth has changed, so we calculate that here
+ # and may append the current token to the params (if we haven't previously done so)
+ prev_print_depth = print_depth
+ print_depth = _count_jinja2_blocks(token, print_depth, "{{", "}}")
+ if print_depth != prev_print_depth and not appended:
+ params.append(token)
+ appended = True
+
+ prev_block_depth = block_depth
+ block_depth = _count_jinja2_blocks(token, block_depth, "{%", "%}")
+ if block_depth != prev_block_depth and not appended:
+ params.append(token)
+ appended = True
+
+ prev_comment_depth = comment_depth
+ comment_depth = _count_jinja2_blocks(token, comment_depth, "{#", "#}")
+ if comment_depth != prev_comment_depth and not appended:
+ params.append(token)
+ appended = True
+
+ # finally, if we're at zero depth for all blocks and not inside quotes, and have not
+ # yet appended anything to the list of params, we do so now
+ if not (print_depth or block_depth or comment_depth) and not inside_quotes and not appended and token != '':
+ params.append(token)
+
+ # if this was the last token in the list, and we have more than
+ # one item (meaning we split on newlines), add a newline back here
+ # to preserve the original structure
+ if len(items) > 1 and itemidx != len(items) - 1 and not line_continuation:
+ params[-1] += '\n'
+
+ # always clear the line continuation flag
+ line_continuation = False
+
+ # If we're done and things are not at zero depth or we're still inside quotes,
+ # raise an error to indicate that the args were unbalanced
+ if print_depth or block_depth or comment_depth or inside_quotes:
+ raise AnsibleParserError(u"failed at splitting arguments, either an unbalanced jinja2 block or quotes: {0}".format(args))
+
+ return params
diff --git a/lib/ansible/parsing/utils/__init__.py b/lib/ansible/parsing/utils/__init__.py
new file mode 100644
index 00000000..ae8ccff5
--- /dev/null
+++ b/lib/ansible/parsing/utils/__init__.py
@@ -0,0 +1,20 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
diff --git a/lib/ansible/parsing/utils/addresses.py b/lib/ansible/parsing/utils/addresses.py
new file mode 100644
index 00000000..0096af44
--- /dev/null
+++ b/lib/ansible/parsing/utils/addresses.py
@@ -0,0 +1,216 @@
+# Copyright 2015 Abhijit Menon-Sen <ams@2ndQuadrant.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+from ansible.errors import AnsibleParserError, AnsibleError
+
+# Components that match a numeric or alphanumeric begin:end or begin:end:step
+# range expression inside square brackets.
+
+numeric_range = r'''
+ \[
+ (?:[0-9]+:[0-9]+) # numeric begin:end
+ (?::[0-9]+)? # numeric :step (optional)
+ \]
+'''
+
+hexadecimal_range = r'''
+ \[
+ (?:[0-9a-f]+:[0-9a-f]+) # hexadecimal begin:end
+ (?::[0-9]+)? # numeric :step (optional)
+ \]
+'''
+
+alphanumeric_range = r'''
+ \[
+ (?:
+ [a-z]:[a-z]| # one-char alphabetic range
+ [0-9]+:[0-9]+ # ...or a numeric one
+ )
+ (?::[0-9]+)? # numeric :step (optional)
+ \]
+'''
+
+# Components that match a 16-bit portion of an IPv6 address in hexadecimal
+# notation (0..ffff) or an 8-bit portion of an IPv4 address in decimal notation
+# (0..255) or an [x:y(:z)] numeric range.
+
+ipv6_component = r'''
+ (?:
+ [0-9a-f]{{1,4}}| # 0..ffff
+ {range} # or a numeric range
+ )
+'''.format(range=hexadecimal_range)
+
+ipv4_component = r'''
+ (?:
+ [01]?[0-9]{{1,2}}| # 0..199
+ 2[0-4][0-9]| # 200..249
+ 25[0-5]| # 250..255
+ {range} # or a numeric range
+ )
+'''.format(range=numeric_range)
+
+# A hostname label, e.g. 'foo' in 'foo.example.com'. Consists of alphanumeric
+# characters plus dashes (and underscores) or valid ranges. The label may not
+# start or end with a hyphen or an underscore. This is interpolated into the
+# hostname pattern below. We don't try to enforce the 63-char length limit.
+
+label = r'''
+ (?:[\w]|{range}) # Starts with an alphanumeric or a range
+ (?:[\w_-]|{range})* # Then zero or more of the same or [_-]
+ (?<![_-]) # ...as long as it didn't end with [_-]
+'''.format(range=alphanumeric_range)
+
+patterns = {
+ # This matches a square-bracketed expression with a port specification. What
+ # is inside the square brackets is validated later.
+
+ 'bracketed_hostport': re.compile(
+ r'''^
+ \[(.+)\] # [host identifier]
+ :([0-9]+) # :port number
+ $
+ ''', re.X
+ ),
+
+ # This matches a bare IPv4 address or hostname (or host pattern including
+ # [x:y(:z)] ranges) with a port specification.
+
+ 'hostport': re.compile(
+ r'''^
+ ((?: # We want to match:
+ [^:\[\]] # (a non-range character
+ | # ...or...
+ \[[^\]]*\] # a complete bracketed expression)
+ )*) # repeated as many times as possible
+ :([0-9]+) # followed by a port number
+ $
+ ''', re.X
+ ),
+
+ # This matches an IPv4 address, but also permits range expressions.
+
+ 'ipv4': re.compile(
+ r'''^
+ (?:{i4}\.){{3}}{i4} # Three parts followed by dots plus one
+ $
+ '''.format(i4=ipv4_component), re.X | re.I
+ ),
+
+ # This matches an IPv6 address, but also permits range expressions.
+ #
+ # This expression looks complex, but it really only spells out the various
+ # combinations in which the basic unit of an IPv6 address (0..ffff) can be
+ # written, from :: to 1:2:3:4:5:6:7:8, plus the IPv4-in-IPv6 variants such
+ # as ::ffff:192.0.2.3.
+ #
+ # Note that we can't just use ipaddress.ip_address() because we also have to
+ # accept ranges in place of each component.
+
+ 'ipv6': re.compile(
+ r'''^
+ (?:{0}:){{7}}{0}| # uncompressed: 1:2:3:4:5:6:7:8
+ (?:{0}:){{1,6}}:| # compressed variants, which are all
+ (?:{0}:)(?::{0}){{1,6}}| # a::b for various lengths of a,b
+ (?:{0}:){{2}}(?::{0}){{1,5}}|
+ (?:{0}:){{3}}(?::{0}){{1,4}}|
+ (?:{0}:){{4}}(?::{0}){{1,3}}|
+ (?:{0}:){{5}}(?::{0}){{1,2}}|
+ (?:{0}:){{6}}(?::{0})| # ...all with 2 <= a+b <= 7
+ :(?::{0}){{1,6}}| # ::ffff(:ffff...)
+ {0}?::| # ffff::, ::
+ # ipv4-in-ipv6 variants
+ (?:0:){{6}}(?:{0}\.){{3}}{0}|
+ ::(?:ffff:)?(?:{0}\.){{3}}{0}|
+ (?:0:){{5}}ffff:(?:{0}\.){{3}}{0}
+ $
+ '''.format(ipv6_component), re.X | re.I
+ ),
+
+ # This matches a hostname or host pattern including [x:y(:z)] ranges.
+ #
+ # We roughly follow DNS rules here, but also allow ranges (and underscores).
+ # In the past, no systematic rules were enforced about inventory hostnames,
+ # but the parsing context (e.g. shlex.split(), fnmatch.fnmatch()) excluded
+ # various metacharacters anyway.
+ #
+ # We don't enforce DNS length restrictions here (63 characters per label,
+ # 253 characters total) or make any attempt to process IDNs.
+
+ 'hostname': re.compile(
+ r'''^
+ {label} # We must have at least one label
+ (?:\.{label})* # Followed by zero or more .labels
+ $
+ '''.format(label=label), re.X | re.I | re.UNICODE
+ ),
+
+}
+
+
+def parse_address(address, allow_ranges=False):
+ """
+ Takes a string and returns a (host, port) tuple. If the host is None, then
+ the string could not be parsed as a host identifier with an optional port
+ specification. If the port is None, then no port was specified.
+
+ The host identifier may be a hostname (qualified or not), an IPv4 address,
+ or an IPv6 address. If allow_ranges is True, then any of those may contain
+ [x:y] range specifications, e.g. foo[1:3] or foo[0:5]-bar[x-z].
+
+ The port number is an optional :NN suffix on an IPv4 address or host name,
+ or a mandatory :NN suffix on any square-bracketed expression: IPv6 address,
+ IPv4 address, or host name. (This means the only way to specify a port for
+ an IPv6 address is to enclose it in square brackets.)
+ """
+
+ # First, we extract the port number if one is specified.
+
+ port = None
+ for matching in ['bracketed_hostport', 'hostport']:
+ m = patterns[matching].match(address)
+ if m:
+ (address, port) = m.groups()
+ port = int(port)
+ continue
+
+ # What we're left with now must be an IPv4 or IPv6 address, possibly with
+ # numeric ranges, or a hostname with alphanumeric ranges.
+
+ host = None
+ for matching in ['ipv4', 'ipv6', 'hostname']:
+ m = patterns[matching].match(address)
+ if m:
+ host = address
+ continue
+
+ # If it isn't any of the above, we don't understand it.
+ if not host:
+ raise AnsibleError("Not a valid network hostname: %s" % address)
+
+ # If we get to this point, we know that any included ranges are valid.
+ # If the caller is prepared to handle them, all is well.
+ # Otherwise we treat it as a parse failure.
+ if not allow_ranges and '[' in host:
+ raise AnsibleParserError("Detected range in host but was asked to ignore ranges")
+
+ return (host, port)
diff --git a/lib/ansible/parsing/utils/jsonify.py b/lib/ansible/parsing/utils/jsonify.py
new file mode 100644
index 00000000..19ebc565
--- /dev/null
+++ b/lib/ansible/parsing/utils/jsonify.py
@@ -0,0 +1,38 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+
+def jsonify(result, format=False):
+ ''' format JSON output (uncompressed or uncompressed) '''
+
+ if result is None:
+ return "{}"
+
+ indent = None
+ if format:
+ indent = 4
+
+ try:
+ return json.dumps(result, sort_keys=True, indent=indent, ensure_ascii=False)
+ except UnicodeDecodeError:
+ return json.dumps(result, sort_keys=True, indent=indent)
diff --git a/lib/ansible/parsing/utils/yaml.py b/lib/ansible/parsing/utils/yaml.py
new file mode 100644
index 00000000..8dd0550e
--- /dev/null
+++ b/lib/ansible/parsing/utils/yaml.py
@@ -0,0 +1,83 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2017, Ansible Project
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+from yaml import YAMLError
+
+from ansible.errors import AnsibleParserError
+from ansible.errors.yaml_strings import YAML_SYNTAX_ERROR
+from ansible.module_utils._text import to_native, to_text
+from ansible.parsing.yaml.loader import AnsibleLoader
+from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
+from ansible.parsing.ajson import AnsibleJSONDecoder
+
+
+__all__ = ('from_yaml',)
+
+
+def _handle_error(json_exc, yaml_exc, file_name, show_content):
+ '''
+ Optionally constructs an object (AnsibleBaseYAMLObject) to encapsulate the
+ file name/position where a YAML exception occurred, and raises an AnsibleParserError
+ to display the syntax exception information.
+ '''
+
+ # if the YAML exception contains a problem mark, use it to construct
+ # an object the error class can use to display the faulty line
+ err_obj = None
+ if hasattr(yaml_exc, 'problem_mark'):
+ err_obj = AnsibleBaseYAMLObject()
+ err_obj.ansible_pos = (file_name, yaml_exc.problem_mark.line + 1, yaml_exc.problem_mark.column + 1)
+
+ err_msg = 'We were unable to read either as JSON nor YAML, these are the errors we got from each:\n' \
+ 'JSON: %s\n\n' % to_text(json_exc) + YAML_SYNTAX_ERROR % getattr(yaml_exc, 'problem', '')
+
+ raise AnsibleParserError(to_native(err_msg), obj=err_obj, show_content=show_content, orig_exc=yaml_exc)
+
+
+def _safe_load(stream, file_name=None, vault_secrets=None):
+ ''' Implements yaml.safe_load(), except using our custom loader class. '''
+
+ loader = AnsibleLoader(stream, file_name, vault_secrets)
+ try:
+ return loader.get_single_data()
+ finally:
+ try:
+ loader.dispose()
+ except AttributeError:
+ pass # older versions of yaml don't have dispose function, ignore
+
+
+def from_yaml(data, file_name='<string>', show_content=True, vault_secrets=None, json_only=False):
+ '''
+ Creates a python datastructure from the given data, which can be either
+ a JSON or YAML string.
+ '''
+ new_data = None
+
+ try:
+ # in case we have to deal with vaults
+ AnsibleJSONDecoder.set_secrets(vault_secrets)
+
+ # we first try to load this data as JSON.
+ # Fixes issues with extra vars json strings not being parsed correctly by the yaml parser
+ new_data = json.loads(data, cls=AnsibleJSONDecoder)
+ except Exception as json_exc:
+
+ if json_only:
+ raise AnsibleParserError(to_native(json_exc), orig_exc=json_exc)
+
+ # must not be JSON, let the rest try
+ try:
+ new_data = _safe_load(data, file_name=file_name, vault_secrets=vault_secrets)
+ except YAMLError as yaml_exc:
+ _handle_error(json_exc, yaml_exc, file_name, show_content)
+
+ return new_data
diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py
new file mode 100644
index 00000000..6cf5dc72
--- /dev/null
+++ b/lib/ansible/parsing/vault/__init__.py
@@ -0,0 +1,1380 @@
+# (c) 2014, James Tanner <tanner.jc@gmail.com>
+# (c) 2016, Adrian Likins <alikins@redhat.com>
+# (c) 2016 Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import errno
+import fcntl
+import os
+import random
+import shlex
+import shutil
+import subprocess
+import sys
+import tempfile
+import warnings
+
+from binascii import hexlify
+from binascii import unhexlify
+from binascii import Error as BinasciiError
+
+HAS_CRYPTOGRAPHY = False
+HAS_PYCRYPTO = False
+HAS_SOME_PYCRYPTO = False
+CRYPTOGRAPHY_BACKEND = None
+try:
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore", DeprecationWarning)
+ from cryptography.exceptions import InvalidSignature
+ from cryptography.hazmat.backends import default_backend
+ from cryptography.hazmat.primitives import hashes, padding
+ from cryptography.hazmat.primitives.hmac import HMAC
+ from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
+ from cryptography.hazmat.primitives.ciphers import (
+ Cipher as C_Cipher, algorithms, modes
+ )
+ CRYPTOGRAPHY_BACKEND = default_backend()
+ HAS_CRYPTOGRAPHY = True
+except ImportError:
+ pass
+
+try:
+ from Crypto.Cipher import AES as AES_pycrypto
+ HAS_SOME_PYCRYPTO = True
+
+ # Note: Only used for loading obsolete VaultAES files. All files are written
+ # using the newer VaultAES256 which does not require md5
+ from Crypto.Hash import SHA256 as SHA256_pycrypto
+ from Crypto.Hash import HMAC as HMAC_pycrypto
+
+ # Counter import fails for 2.0.1, requires >= 2.6.1 from pip
+ from Crypto.Util import Counter as Counter_pycrypto
+
+ # KDF import fails for 2.0.1, requires >= 2.6.1 from pip
+ from Crypto.Protocol.KDF import PBKDF2 as PBKDF2_pycrypto
+ HAS_PYCRYPTO = True
+except ImportError:
+ pass
+
+from ansible.errors import AnsibleError, AnsibleAssertionError
+from ansible import constants as C
+from ansible.module_utils.six import PY3, binary_type
+# Note: on py2, this zip is izip not the list based zip() builtin
+from ansible.module_utils.six.moves import zip
+from ansible.module_utils._text import to_bytes, to_text, to_native
+from ansible.utils.display import Display
+from ansible.utils.path import makedirs_safe
+
+display = Display()
+
+
+b_HEADER = b'$ANSIBLE_VAULT'
+CIPHER_WHITELIST = frozenset((u'AES256',))
+CIPHER_WRITE_WHITELIST = frozenset((u'AES256',))
+# See also CIPHER_MAPPING at the bottom of the file which maps cipher strings
+# (used in VaultFile header) to a cipher class
+
+NEED_CRYPTO_LIBRARY = "ansible-vault requires either the cryptography library (preferred) or"
+if HAS_SOME_PYCRYPTO:
+ NEED_CRYPTO_LIBRARY += " a newer version of"
+NEED_CRYPTO_LIBRARY += " pycrypto in order to function."
+
+
+class AnsibleVaultError(AnsibleError):
+ pass
+
+
+class AnsibleVaultPasswordError(AnsibleVaultError):
+ pass
+
+
+class AnsibleVaultFormatError(AnsibleError):
+ pass
+
+
+def is_encrypted(data):
+ """ Test if this is vault encrypted data blob
+
+ :arg data: a byte or text string to test whether it is recognized as vault
+ encrypted data
+ :returns: True if it is recognized. Otherwise, False.
+ """
+ try:
+ # Make sure we have a byte string and that it only contains ascii
+ # bytes.
+ b_data = to_bytes(to_text(data, encoding='ascii', errors='strict', nonstring='strict'), encoding='ascii', errors='strict')
+ except (UnicodeError, TypeError):
+ # The vault format is pure ascii so if we failed to encode to bytes
+ # via ascii we know that this is not vault data.
+ # Similarly, if it's not a string, it's not vault data
+ return False
+
+ if b_data.startswith(b_HEADER):
+ return True
+ return False
+
+
+def is_encrypted_file(file_obj, start_pos=0, count=-1):
+ """Test if the contents of a file obj are a vault encrypted data blob.
+
+ :arg file_obj: A file object that will be read from.
+ :kwarg start_pos: A byte offset in the file to start reading the header
+ from. Defaults to 0, the beginning of the file.
+ :kwarg count: Read up to this number of bytes from the file to determine
+ if it looks like encrypted vault data. The default is -1, read to the
+ end of file.
+ :returns: True if the file looks like a vault file. Otherwise, False.
+ """
+ # read the header and reset the file stream to where it started
+ current_position = file_obj.tell()
+ try:
+ file_obj.seek(start_pos)
+ return is_encrypted(file_obj.read(count))
+
+ finally:
+ file_obj.seek(current_position)
+
+
+def _parse_vaulttext_envelope(b_vaulttext_envelope, default_vault_id=None):
+
+ b_tmpdata = b_vaulttext_envelope.splitlines()
+ b_tmpheader = b_tmpdata[0].strip().split(b';')
+
+ b_version = b_tmpheader[1].strip()
+ cipher_name = to_text(b_tmpheader[2].strip())
+ vault_id = default_vault_id
+
+ # Only attempt to find vault_id if the vault file is version 1.2 or newer
+ # if self.b_version == b'1.2':
+ if len(b_tmpheader) >= 4:
+ vault_id = to_text(b_tmpheader[3].strip())
+
+ b_ciphertext = b''.join(b_tmpdata[1:])
+
+ return b_ciphertext, b_version, cipher_name, vault_id
+
+
+def parse_vaulttext_envelope(b_vaulttext_envelope, default_vault_id=None, filename=None):
+ """Parse the vaulttext envelope
+
+ When data is saved, it has a header prepended and is formatted into 80
+ character lines. This method extracts the information from the header
+ and then removes the header and the inserted newlines. The string returned
+ is suitable for processing by the Cipher classes.
+
+ :arg b_vaulttext: byte str containing the data from a save file
+ :kwarg default_vault_id: The vault_id name to use if the vaulttext does not provide one.
+ :kwarg filename: The filename that the data came from. This is only
+ used to make better error messages in case the data cannot be
+ decrypted. This is optional.
+ :returns: A tuple of byte str of the vaulttext suitable to pass to parse_vaultext,
+ a byte str of the vault format version,
+ the name of the cipher used, and the vault_id.
+ :raises: AnsibleVaultFormatError: if the vaulttext_envelope format is invalid
+ """
+ # used by decrypt
+ default_vault_id = default_vault_id or C.DEFAULT_VAULT_IDENTITY
+
+ try:
+ return _parse_vaulttext_envelope(b_vaulttext_envelope, default_vault_id)
+ except Exception as exc:
+ msg = "Vault envelope format error"
+ if filename:
+ msg += ' in %s' % (filename)
+ msg += ': %s' % exc
+ raise AnsibleVaultFormatError(msg)
+
+
+def format_vaulttext_envelope(b_ciphertext, cipher_name, version=None, vault_id=None):
+ """ Add header and format to 80 columns
+
+ :arg b_ciphertext: the encrypted and hexlified data as a byte string
+ :arg cipher_name: unicode cipher name (for ex, u'AES256')
+ :arg version: unicode vault version (for ex, '1.2'). Optional ('1.1' is default)
+ :arg vault_id: unicode vault identifier. If provided, the version will be bumped to 1.2.
+ :returns: a byte str that should be dumped into a file. It's
+ formatted to 80 char columns and has the header prepended
+ """
+
+ if not cipher_name:
+ raise AnsibleError("the cipher must be set before adding a header")
+
+ version = version or '1.1'
+
+ # If we specify a vault_id, use format version 1.2. For no vault_id, stick to 1.1
+ if vault_id and vault_id != u'default':
+ version = '1.2'
+
+ b_version = to_bytes(version, 'utf-8', errors='strict')
+ b_vault_id = to_bytes(vault_id, 'utf-8', errors='strict')
+ b_cipher_name = to_bytes(cipher_name, 'utf-8', errors='strict')
+
+ header_parts = [b_HEADER,
+ b_version,
+ b_cipher_name]
+
+ if b_version == b'1.2' and b_vault_id:
+ header_parts.append(b_vault_id)
+
+ header = b';'.join(header_parts)
+
+ b_vaulttext = [header]
+ b_vaulttext += [b_ciphertext[i:i + 80] for i in range(0, len(b_ciphertext), 80)]
+ b_vaulttext += [b'']
+ b_vaulttext = b'\n'.join(b_vaulttext)
+
+ return b_vaulttext
+
+
+def _unhexlify(b_data):
+ try:
+ return unhexlify(b_data)
+ except (BinasciiError, TypeError) as exc:
+ raise AnsibleVaultFormatError('Vault format unhexlify error: %s' % exc)
+
+
+def _parse_vaulttext(b_vaulttext):
+ b_vaulttext = _unhexlify(b_vaulttext)
+ b_salt, b_crypted_hmac, b_ciphertext = b_vaulttext.split(b"\n", 2)
+ b_salt = _unhexlify(b_salt)
+ b_ciphertext = _unhexlify(b_ciphertext)
+
+ return b_ciphertext, b_salt, b_crypted_hmac
+
+
+def parse_vaulttext(b_vaulttext):
+ """Parse the vaulttext
+
+ :arg b_vaulttext: byte str containing the vaulttext (ciphertext, salt, crypted_hmac)
+ :returns: A tuple of byte str of the ciphertext suitable for passing to a
+ Cipher class's decrypt() function, a byte str of the salt,
+ and a byte str of the crypted_hmac
+ :raises: AnsibleVaultFormatError: if the vaulttext format is invalid
+ """
+ # SPLIT SALT, DIGEST, AND DATA
+ try:
+ return _parse_vaulttext(b_vaulttext)
+ except AnsibleVaultFormatError:
+ raise
+ except Exception as exc:
+ msg = "Vault vaulttext format error: %s" % exc
+ raise AnsibleVaultFormatError(msg)
+
+
+def verify_secret_is_not_empty(secret, msg=None):
+ '''Check the secret against minimal requirements.
+
+ Raises: AnsibleVaultPasswordError if the password does not meet requirements.
+
+ Currently, only requirement is that the password is not None or an empty string.
+ '''
+ msg = msg or 'Invalid vault password was provided'
+ if not secret:
+ raise AnsibleVaultPasswordError(msg)
+
+
+class VaultSecret:
+ '''Opaque/abstract objects for a single vault secret. ie, a password or a key.'''
+
+ def __init__(self, _bytes=None):
+ # FIXME: ? that seems wrong... Unset etc?
+ self._bytes = _bytes
+
+ @property
+ def bytes(self):
+ '''The secret as a bytestring.
+
+ Sub classes that store text types will need to override to encode the text to bytes.
+ '''
+ return self._bytes
+
+ def load(self):
+ return self._bytes
+
+
+class PromptVaultSecret(VaultSecret):
+ default_prompt_formats = ["Vault password (%s): "]
+
+ def __init__(self, _bytes=None, vault_id=None, prompt_formats=None):
+ super(PromptVaultSecret, self).__init__(_bytes=_bytes)
+ self.vault_id = vault_id
+
+ if prompt_formats is None:
+ self.prompt_formats = self.default_prompt_formats
+ else:
+ self.prompt_formats = prompt_formats
+
+ @property
+ def bytes(self):
+ return self._bytes
+
+ def load(self):
+ self._bytes = self.ask_vault_passwords()
+
+ def ask_vault_passwords(self):
+ b_vault_passwords = []
+
+ for prompt_format in self.prompt_formats:
+ prompt = prompt_format % {'vault_id': self.vault_id}
+ try:
+ vault_pass = display.prompt(prompt, private=True)
+ except EOFError:
+ raise AnsibleVaultError('EOFError (ctrl-d) on prompt for (%s)' % self.vault_id)
+
+ verify_secret_is_not_empty(vault_pass)
+
+ b_vault_pass = to_bytes(vault_pass, errors='strict', nonstring='simplerepr').strip()
+ b_vault_passwords.append(b_vault_pass)
+
+ # Make sure the passwords match by comparing them all to the first password
+ for b_vault_password in b_vault_passwords:
+ self.confirm(b_vault_passwords[0], b_vault_password)
+
+ if b_vault_passwords:
+ return b_vault_passwords[0]
+
+ return None
+
+ def confirm(self, b_vault_pass_1, b_vault_pass_2):
+ # enforce no newline chars at the end of passwords
+
+ if b_vault_pass_1 != b_vault_pass_2:
+ # FIXME: more specific exception
+ raise AnsibleError("Passwords do not match")
+
+
+def script_is_client(filename):
+ '''Determine if a vault secret script is a client script that can be given --vault-id args'''
+
+ # if password script is 'something-client' or 'something-client.[sh|py|rb|etc]'
+ # script_name can still have '.' or could be entire filename if there is no ext
+ script_name, dummy = os.path.splitext(filename)
+
+ # TODO: for now, this is entirely based on filename
+ if script_name.endswith('-client'):
+ return True
+
+ return False
+
+
+def get_file_vault_secret(filename=None, vault_id=None, encoding=None, loader=None):
+ this_path = os.path.realpath(os.path.expanduser(filename))
+
+ if not os.path.exists(this_path):
+ raise AnsibleError("The vault password file %s was not found" % this_path)
+
+ if loader.is_executable(this_path):
+ if script_is_client(filename):
+ display.vvvv(u'The vault password file %s is a client script.' % to_text(filename))
+ # TODO: pass vault_id_name to script via cli
+ return ClientScriptVaultSecret(filename=this_path, vault_id=vault_id,
+ encoding=encoding, loader=loader)
+ # just a plain vault password script. No args, returns a byte array
+ return ScriptVaultSecret(filename=this_path, encoding=encoding, loader=loader)
+
+ return FileVaultSecret(filename=this_path, encoding=encoding, loader=loader)
+
+
+# TODO: mv these classes to a separate file so we don't pollute vault with 'subprocess' etc
+class FileVaultSecret(VaultSecret):
+ def __init__(self, filename=None, encoding=None, loader=None):
+ super(FileVaultSecret, self).__init__()
+ self.filename = filename
+ self.loader = loader
+
+ self.encoding = encoding or 'utf8'
+
+ # We could load from file here, but that is eventually a pain to test
+ self._bytes = None
+ self._text = None
+
+ @property
+ def bytes(self):
+ if self._bytes:
+ return self._bytes
+ if self._text:
+ return self._text.encode(self.encoding)
+ return None
+
+ def load(self):
+ self._bytes = self._read_file(self.filename)
+
+ def _read_file(self, filename):
+ """
+ Read a vault password from a file or if executable, execute the script and
+ retrieve password from STDOUT
+ """
+
+ # TODO: replace with use of self.loader
+ try:
+ f = open(filename, "rb")
+ vault_pass = f.read().strip()
+ f.close()
+ except (OSError, IOError) as e:
+ raise AnsibleError("Could not read vault password file %s: %s" % (filename, e))
+
+ b_vault_data, dummy = self.loader._decrypt_if_vault_data(vault_pass, filename)
+
+ vault_pass = b_vault_data.strip(b'\r\n')
+
+ verify_secret_is_not_empty(vault_pass,
+ msg='Invalid vault password was provided from file (%s)' % filename)
+
+ return vault_pass
+
+ def __repr__(self):
+ if self.filename:
+ return "%s(filename='%s')" % (self.__class__.__name__, self.filename)
+ return "%s()" % (self.__class__.__name__)
+
+
+class ScriptVaultSecret(FileVaultSecret):
+ def _read_file(self, filename):
+ if not self.loader.is_executable(filename):
+ raise AnsibleVaultError("The vault password script %s was not executable" % filename)
+
+ command = self._build_command()
+
+ stdout, stderr, p = self._run(command)
+
+ self._check_results(stdout, stderr, p)
+
+ vault_pass = stdout.strip(b'\r\n')
+
+ empty_password_msg = 'Invalid vault password was provided from script (%s)' % filename
+ verify_secret_is_not_empty(vault_pass,
+ msg=empty_password_msg)
+
+ return vault_pass
+
+ def _run(self, command):
+ try:
+ # STDERR not captured to make it easier for users to prompt for input in their scripts
+ p = subprocess.Popen(command, stdout=subprocess.PIPE)
+ except OSError as e:
+ msg_format = "Problem running vault password script %s (%s)." \
+ " If this is not a script, remove the executable bit from the file."
+ msg = msg_format % (self.filename, e)
+
+ raise AnsibleError(msg)
+
+ stdout, stderr = p.communicate()
+ return stdout, stderr, p
+
+ def _check_results(self, stdout, stderr, popen):
+ if popen.returncode != 0:
+ raise AnsibleError("Vault password script %s returned non-zero (%s): %s" %
+ (self.filename, popen.returncode, stderr))
+
+ def _build_command(self):
+ return [self.filename]
+
+
+class ClientScriptVaultSecret(ScriptVaultSecret):
+ VAULT_ID_UNKNOWN_RC = 2
+
+ def __init__(self, filename=None, encoding=None, loader=None, vault_id=None):
+ super(ClientScriptVaultSecret, self).__init__(filename=filename,
+ encoding=encoding,
+ loader=loader)
+ self._vault_id = vault_id
+ display.vvvv(u'Executing vault password client script: %s --vault-id %s' % (to_text(filename), to_text(vault_id)))
+
+ def _run(self, command):
+ try:
+ p = subprocess.Popen(command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ except OSError as e:
+ msg_format = "Problem running vault password client script %s (%s)." \
+ " If this is not a script, remove the executable bit from the file."
+ msg = msg_format % (self.filename, e)
+
+ raise AnsibleError(msg)
+
+ stdout, stderr = p.communicate()
+ return stdout, stderr, p
+
+ def _check_results(self, stdout, stderr, popen):
+ if popen.returncode == self.VAULT_ID_UNKNOWN_RC:
+ raise AnsibleError('Vault password client script %s did not find a secret for vault-id=%s: %s' %
+ (self.filename, self._vault_id, stderr))
+
+ if popen.returncode != 0:
+ raise AnsibleError("Vault password client script %s returned non-zero (%s) when getting secret for vault-id=%s: %s" %
+ (self.filename, popen.returncode, self._vault_id, stderr))
+
+ def _build_command(self):
+ command = [self.filename]
+ if self._vault_id:
+ command.extend(['--vault-id', self._vault_id])
+
+ return command
+
+ def __repr__(self):
+ if self.filename:
+ return "%s(filename='%s', vault_id='%s')" % \
+ (self.__class__.__name__, self.filename, self._vault_id)
+ return "%s()" % (self.__class__.__name__)
+
+
+def match_secrets(secrets, target_vault_ids):
+ '''Find all VaultSecret objects that are mapped to any of the target_vault_ids in secrets'''
+ if not secrets:
+ return []
+
+ matches = [(vault_id, secret) for vault_id, secret in secrets if vault_id in target_vault_ids]
+ return matches
+
+
+def match_best_secret(secrets, target_vault_ids):
+ '''Find the best secret from secrets that matches target_vault_ids
+
+ Since secrets should be ordered so the early secrets are 'better' than later ones, this
+ just finds all the matches, then returns the first secret'''
+ matches = match_secrets(secrets, target_vault_ids)
+ if matches:
+ return matches[0]
+ # raise exception?
+ return None
+
+
+def match_encrypt_vault_id_secret(secrets, encrypt_vault_id=None):
+ # See if the --encrypt-vault-id matches a vault-id
+ display.vvvv(u'encrypt_vault_id=%s' % to_text(encrypt_vault_id))
+
+ if encrypt_vault_id is None:
+ raise AnsibleError('match_encrypt_vault_id_secret requires a non None encrypt_vault_id')
+
+ encrypt_vault_id_matchers = [encrypt_vault_id]
+ encrypt_secret = match_best_secret(secrets, encrypt_vault_id_matchers)
+
+ # return the best match for --encrypt-vault-id
+ if encrypt_secret:
+ return encrypt_secret
+
+ # If we specified a encrypt_vault_id and we couldn't find it, dont
+ # fallback to using the first/best secret
+ raise AnsibleVaultError('Did not find a match for --encrypt-vault-id=%s in the known vault-ids %s' % (encrypt_vault_id,
+ [_v for _v, _vs in secrets]))
+
+
+def match_encrypt_secret(secrets, encrypt_vault_id=None):
+ '''Find the best/first/only secret in secrets to use for encrypting'''
+
+ display.vvvv(u'encrypt_vault_id=%s' % to_text(encrypt_vault_id))
+ # See if the --encrypt-vault-id matches a vault-id
+ if encrypt_vault_id:
+ return match_encrypt_vault_id_secret(secrets,
+ encrypt_vault_id=encrypt_vault_id)
+
+ # Find the best/first secret from secrets since we didnt specify otherwise
+ # ie, consider all of the available secrets as matches
+ _vault_id_matchers = [_vault_id for _vault_id, dummy in secrets]
+ best_secret = match_best_secret(secrets, _vault_id_matchers)
+
+ # can be empty list sans any tuple
+ return best_secret
+
+
+class VaultLib:
+ def __init__(self, secrets=None):
+ self.secrets = secrets or []
+ self.cipher_name = None
+ self.b_version = b'1.2'
+
+ @staticmethod
+ def is_encrypted(vaulttext):
+ return is_encrypted(vaulttext)
+
+ def encrypt(self, plaintext, secret=None, vault_id=None):
+ """Vault encrypt a piece of data.
+
+ :arg plaintext: a text or byte string to encrypt.
+ :returns: a utf-8 encoded byte str of encrypted data. The string
+ contains a header identifying this as vault encrypted data and
+ formatted to newline terminated lines of 80 characters. This is
+ suitable for dumping as is to a vault file.
+
+ If the string passed in is a text string, it will be encoded to UTF-8
+ before encryption.
+ """
+
+ if secret is None:
+ if self.secrets:
+ dummy, secret = match_encrypt_secret(self.secrets)
+ else:
+ raise AnsibleVaultError("A vault password must be specified to encrypt data")
+
+ b_plaintext = to_bytes(plaintext, errors='surrogate_or_strict')
+
+ if is_encrypted(b_plaintext):
+ raise AnsibleError("input is already encrypted")
+
+ if not self.cipher_name or self.cipher_name not in CIPHER_WRITE_WHITELIST:
+ self.cipher_name = u"AES256"
+
+ try:
+ this_cipher = CIPHER_MAPPING[self.cipher_name]()
+ except KeyError:
+ raise AnsibleError(u"{0} cipher could not be found".format(self.cipher_name))
+
+ # encrypt data
+ if vault_id:
+ display.vvvvv(u'Encrypting with vault_id "%s" and vault secret %s' % (to_text(vault_id), to_text(secret)))
+ else:
+ display.vvvvv(u'Encrypting without a vault_id using vault secret %s' % to_text(secret))
+
+ b_ciphertext = this_cipher.encrypt(b_plaintext, secret)
+
+ # format the data for output to the file
+ b_vaulttext = format_vaulttext_envelope(b_ciphertext,
+ self.cipher_name,
+ vault_id=vault_id)
+ return b_vaulttext
+
+ def decrypt(self, vaulttext, filename=None):
+ '''Decrypt a piece of vault encrypted data.
+
+ :arg vaulttext: a string to decrypt. Since vault encrypted data is an
+ ascii text format this can be either a byte str or unicode string.
+ :kwarg filename: a filename that the data came from. This is only
+ used to make better error messages in case the data cannot be
+ decrypted.
+ :returns: a byte string containing the decrypted data and the vault-id that was used
+
+ '''
+ plaintext, vault_id, vault_secret = self.decrypt_and_get_vault_id(vaulttext, filename=filename)
+ return plaintext
+
+ def decrypt_and_get_vault_id(self, vaulttext, filename=None):
+ """Decrypt a piece of vault encrypted data.
+
+ :arg vaulttext: a string to decrypt. Since vault encrypted data is an
+ ascii text format this can be either a byte str or unicode string.
+ :kwarg filename: a filename that the data came from. This is only
+ used to make better error messages in case the data cannot be
+ decrypted.
+ :returns: a byte string containing the decrypted data and the vault-id vault-secret that was used
+
+ """
+ b_vaulttext = to_bytes(vaulttext, errors='strict', encoding='utf-8')
+
+ if self.secrets is None:
+ raise AnsibleVaultError("A vault password must be specified to decrypt data")
+
+ if not is_encrypted(b_vaulttext):
+ msg = "input is not vault encrypted data"
+ if filename:
+ msg += "%s is not a vault encrypted file" % to_native(filename)
+ raise AnsibleError(msg)
+
+ b_vaulttext, dummy, cipher_name, vault_id = parse_vaulttext_envelope(b_vaulttext,
+ filename=filename)
+
+ # create the cipher object, note that the cipher used for decrypt can
+ # be different than the cipher used for encrypt
+ if cipher_name in CIPHER_WHITELIST:
+ this_cipher = CIPHER_MAPPING[cipher_name]()
+ else:
+ raise AnsibleError("{0} cipher could not be found".format(cipher_name))
+
+ b_plaintext = None
+
+ if not self.secrets:
+ raise AnsibleVaultError('Attempting to decrypt but no vault secrets found')
+
+ # WARNING: Currently, the vault id is not required to match the vault id in the vault blob to
+ # decrypt a vault properly. The vault id in the vault blob is not part of the encrypted
+ # or signed vault payload. There is no cryptographic checking/verification/validation of the
+ # vault blobs vault id. It can be tampered with and changed. The vault id is just a nick
+ # name to use to pick the best secret and provide some ux/ui info.
+
+ # iterate over all the applicable secrets (all of them by default) until one works...
+ # if we specify a vault_id, only the corresponding vault secret is checked and
+ # we check it first.
+
+ vault_id_matchers = []
+ vault_id_used = None
+ vault_secret_used = None
+
+ if vault_id:
+ display.vvvvv(u'Found a vault_id (%s) in the vaulttext' % to_text(vault_id))
+ vault_id_matchers.append(vault_id)
+ _matches = match_secrets(self.secrets, vault_id_matchers)
+ if _matches:
+ display.vvvvv(u'We have a secret associated with vault id (%s), will try to use to decrypt %s' % (to_text(vault_id), to_text(filename)))
+ else:
+ display.vvvvv(u'Found a vault_id (%s) in the vault text, but we do not have a associated secret (--vault-id)' % to_text(vault_id))
+
+ # Not adding the other secrets to vault_secret_ids enforces a match between the vault_id from the vault_text and
+ # the known vault secrets.
+ if not C.DEFAULT_VAULT_ID_MATCH:
+ # Add all of the known vault_ids as candidates for decrypting a vault.
+ vault_id_matchers.extend([_vault_id for _vault_id, _dummy in self.secrets if _vault_id != vault_id])
+
+ matched_secrets = match_secrets(self.secrets, vault_id_matchers)
+
+ # for vault_secret_id in vault_secret_ids:
+ for vault_secret_id, vault_secret in matched_secrets:
+ display.vvvvv(u'Trying to use vault secret=(%s) id=%s to decrypt %s' % (to_text(vault_secret), to_text(vault_secret_id), to_text(filename)))
+
+ try:
+ # secret = self.secrets[vault_secret_id]
+ display.vvvv(u'Trying secret %s for vault_id=%s' % (to_text(vault_secret), to_text(vault_secret_id)))
+ b_plaintext = this_cipher.decrypt(b_vaulttext, vault_secret)
+ if b_plaintext is not None:
+ vault_id_used = vault_secret_id
+ vault_secret_used = vault_secret
+ file_slug = ''
+ if filename:
+ file_slug = ' of "%s"' % filename
+ display.vvvvv(
+ u'Decrypt%s successful with secret=%s and vault_id=%s' % (to_text(file_slug), to_text(vault_secret), to_text(vault_secret_id))
+ )
+ break
+ except AnsibleVaultFormatError as exc:
+ msg = u"There was a vault format error"
+ if filename:
+ msg += u' in %s' % (to_text(filename))
+ msg += u': %s' % exc
+ display.warning(msg)
+ raise
+ except AnsibleError as e:
+ display.vvvv(u'Tried to use the vault secret (%s) to decrypt (%s) but it failed. Error: %s' %
+ (to_text(vault_secret_id), to_text(filename), e))
+ continue
+ else:
+ msg = "Decryption failed (no vault secrets were found that could decrypt)"
+ if filename:
+ msg += " on %s" % to_native(filename)
+ raise AnsibleVaultError(msg)
+
+ if b_plaintext is None:
+ msg = "Decryption failed"
+ if filename:
+ msg += " on %s" % to_native(filename)
+ raise AnsibleError(msg)
+
+ return b_plaintext, vault_id_used, vault_secret_used
+
+
+class VaultEditor:
+
+ def __init__(self, vault=None):
+ # TODO: it may be more useful to just make VaultSecrets and index of VaultLib objects...
+ self.vault = vault or VaultLib()
+
+ # TODO: mv shred file stuff to it's own class
+ def _shred_file_custom(self, tmp_path):
+ """"Destroy a file, when shred (core-utils) is not available
+
+ Unix `shred' destroys files "so that they can be recovered only with great difficulty with
+ specialised hardware, if at all". It is based on the method from the paper
+ "Secure Deletion of Data from Magnetic and Solid-State Memory",
+ Proceedings of the Sixth USENIX Security Symposium (San Jose, California, July 22-25, 1996).
+
+ We do not go to that length to re-implement shred in Python; instead, overwriting with a block
+ of random data should suffice.
+
+ See https://github.com/ansible/ansible/pull/13700 .
+ """
+
+ file_len = os.path.getsize(tmp_path)
+
+ if file_len > 0: # avoid work when file was empty
+ max_chunk_len = min(1024 * 1024 * 2, file_len)
+
+ passes = 3
+ with open(tmp_path, "wb") as fh:
+ for _ in range(passes):
+ fh.seek(0, 0)
+ # get a random chunk of data, each pass with other length
+ chunk_len = random.randint(max_chunk_len // 2, max_chunk_len)
+ data = os.urandom(chunk_len)
+
+ for _ in range(0, file_len // chunk_len):
+ fh.write(data)
+ fh.write(data[:file_len % chunk_len])
+
+ # FIXME remove this assert once we have unittests to check its accuracy
+ if fh.tell() != file_len:
+ raise AnsibleAssertionError()
+
+ os.fsync(fh)
+
+ def _shred_file(self, tmp_path):
+ """Securely destroy a decrypted file
+
+ Note standard limitations of GNU shred apply (For flash, overwriting would have no effect
+ due to wear leveling; for other storage systems, the async kernel->filesystem->disk calls never
+ guarantee data hits the disk; etc). Furthermore, if your tmp dirs is on tmpfs (ramdisks),
+ it is a non-issue.
+
+ Nevertheless, some form of overwriting the data (instead of just removing the fs index entry) is
+ a good idea. If shred is not available (e.g. on windows, or no core-utils installed), fall back on
+ a custom shredding method.
+ """
+
+ if not os.path.isfile(tmp_path):
+ # file is already gone
+ return
+
+ try:
+ r = subprocess.call(['shred', tmp_path])
+ except (OSError, ValueError):
+ # shred is not available on this system, or some other error occurred.
+ # ValueError caught because macOS El Capitan is raising an
+ # exception big enough to hit a limit in python2-2.7.11 and below.
+ # Symptom is ValueError: insecure pickle when shred is not
+ # installed there.
+ r = 1
+
+ if r != 0:
+ # we could not successfully execute unix shred; therefore, do custom shred.
+ self._shred_file_custom(tmp_path)
+
+ os.remove(tmp_path)
+
+ def _edit_file_helper(self, filename, secret, existing_data=None, force_save=False, vault_id=None):
+
+ # Create a tempfile
+ root, ext = os.path.splitext(os.path.realpath(filename))
+ fd, tmp_path = tempfile.mkstemp(suffix=ext, dir=C.DEFAULT_LOCAL_TMP)
+
+ cmd = self._editor_shell_command(tmp_path)
+ try:
+ if existing_data:
+ self.write_data(existing_data, fd, shred=False)
+ except Exception:
+ # if an error happens, destroy the decrypted file
+ self._shred_file(tmp_path)
+ raise
+ finally:
+ os.close(fd)
+
+ try:
+ # drop the user into an editor on the tmp file
+ subprocess.call(cmd)
+ except Exception as e:
+ # if an error happens, destroy the decrypted file
+ self._shred_file(tmp_path)
+ raise AnsibleError('Unable to execute the command "%s": %s' % (' '.join(cmd), to_native(e)))
+
+ b_tmpdata = self.read_data(tmp_path)
+
+ # Do nothing if the content has not changed
+ if force_save or existing_data != b_tmpdata:
+
+ # encrypt new data and write out to tmp
+ # An existing vaultfile will always be UTF-8,
+ # so decode to unicode here
+ b_ciphertext = self.vault.encrypt(b_tmpdata, secret, vault_id=vault_id)
+ self.write_data(b_ciphertext, tmp_path)
+
+ # shuffle tmp file into place
+ self.shuffle_files(tmp_path, filename)
+ display.vvvvv(u'Saved edited file "%s" encrypted using %s and vault id "%s"' % (to_text(filename), to_text(secret), to_text(vault_id)))
+
+ # always shred temp, jic
+ self._shred_file(tmp_path)
+
+ def _real_path(self, filename):
+ # '-' is special to VaultEditor, dont expand it.
+ if filename == '-':
+ return filename
+
+ real_path = os.path.realpath(filename)
+ return real_path
+
+ def encrypt_bytes(self, b_plaintext, secret, vault_id=None):
+
+ b_ciphertext = self.vault.encrypt(b_plaintext, secret, vault_id=vault_id)
+
+ return b_ciphertext
+
+ def encrypt_file(self, filename, secret, vault_id=None, output_file=None):
+
+ # A file to be encrypted into a vaultfile could be any encoding
+ # so treat the contents as a byte string.
+
+ # follow the symlink
+ filename = self._real_path(filename)
+
+ b_plaintext = self.read_data(filename)
+ b_ciphertext = self.vault.encrypt(b_plaintext, secret, vault_id=vault_id)
+ self.write_data(b_ciphertext, output_file or filename)
+
+ def decrypt_file(self, filename, output_file=None):
+
+ # follow the symlink
+ filename = self._real_path(filename)
+
+ ciphertext = self.read_data(filename)
+
+ try:
+ plaintext = self.vault.decrypt(ciphertext, filename=filename)
+ except AnsibleError as e:
+ raise AnsibleError("%s for %s" % (to_native(e), to_native(filename)))
+ self.write_data(plaintext, output_file or filename, shred=False)
+
+ def create_file(self, filename, secret, vault_id=None):
+ """ create a new encrypted file """
+
+ dirname = os.path.dirname(filename)
+ if dirname and not os.path.exists(dirname):
+ display.warning(u"%s does not exist, creating..." % to_text(dirname))
+ makedirs_safe(dirname)
+
+ # FIXME: If we can raise an error here, we can probably just make it
+ # behave like edit instead.
+ if os.path.isfile(filename):
+ raise AnsibleError("%s exists, please use 'edit' instead" % filename)
+
+ self._edit_file_helper(filename, secret, vault_id=vault_id)
+
+ def edit_file(self, filename):
+ vault_id_used = None
+ vault_secret_used = None
+ # follow the symlink
+ filename = self._real_path(filename)
+
+ b_vaulttext = self.read_data(filename)
+
+ # vault or yaml files are always utf8
+ vaulttext = to_text(b_vaulttext)
+
+ try:
+ # vaulttext gets converted back to bytes, but alas
+ # TODO: return the vault_id that worked?
+ plaintext, vault_id_used, vault_secret_used = self.vault.decrypt_and_get_vault_id(vaulttext)
+ except AnsibleError as e:
+ raise AnsibleError("%s for %s" % (to_native(e), to_native(filename)))
+
+ # Figure out the vault id from the file, to select the right secret to re-encrypt it
+ # (duplicates parts of decrypt, but alas...)
+ dummy, dummy, cipher_name, vault_id = parse_vaulttext_envelope(b_vaulttext, filename=filename)
+
+ # vault id here may not be the vault id actually used for decrypting
+ # as when the edited file has no vault-id but is decrypted by non-default id in secrets
+ # (vault_id=default, while a different vault-id decrypted)
+
+ # we want to get rid of files encrypted with the AES cipher
+ force_save = (cipher_name not in CIPHER_WRITE_WHITELIST)
+
+ # Keep the same vault-id (and version) as in the header
+ self._edit_file_helper(filename, vault_secret_used, existing_data=plaintext, force_save=force_save, vault_id=vault_id)
+
+ def plaintext(self, filename):
+
+ b_vaulttext = self.read_data(filename)
+ vaulttext = to_text(b_vaulttext)
+
+ try:
+ plaintext = self.vault.decrypt(vaulttext, filename=filename)
+ return plaintext
+ except AnsibleError as e:
+ raise AnsibleVaultError("%s for %s" % (to_native(e), to_native(filename)))
+
+ # FIXME/TODO: make this use VaultSecret
+ def rekey_file(self, filename, new_vault_secret, new_vault_id=None):
+
+ # follow the symlink
+ filename = self._real_path(filename)
+
+ prev = os.stat(filename)
+ b_vaulttext = self.read_data(filename)
+ vaulttext = to_text(b_vaulttext)
+
+ display.vvvvv(u'Rekeying file "%s" to with new vault-id "%s" and vault secret %s' %
+ (to_text(filename), to_text(new_vault_id), to_text(new_vault_secret)))
+ try:
+ plaintext, vault_id_used, _dummy = self.vault.decrypt_and_get_vault_id(vaulttext)
+ except AnsibleError as e:
+ raise AnsibleError("%s for %s" % (to_native(e), to_native(filename)))
+
+ # This is more or less an assert, see #18247
+ if new_vault_secret is None:
+ raise AnsibleError('The value for the new_password to rekey %s with is not valid' % filename)
+
+ # FIXME: VaultContext...? could rekey to a different vault_id in the same VaultSecrets
+
+ # Need a new VaultLib because the new vault data can be a different
+ # vault lib format or cipher (for ex, when we migrate 1.0 style vault data to
+ # 1.1 style data we change the version and the cipher). This is where a VaultContext might help
+
+ # the new vault will only be used for encrypting, so it doesn't need the vault secrets
+ # (we will pass one in directly to encrypt)
+ new_vault = VaultLib(secrets={})
+ b_new_vaulttext = new_vault.encrypt(plaintext, new_vault_secret, vault_id=new_vault_id)
+
+ self.write_data(b_new_vaulttext, filename)
+
+ # preserve permissions
+ os.chmod(filename, prev.st_mode)
+ os.chown(filename, prev.st_uid, prev.st_gid)
+
+ display.vvvvv(u'Rekeyed file "%s" (decrypted with vault id "%s") was encrypted with new vault-id "%s" and vault secret %s' %
+ (to_text(filename), to_text(vault_id_used), to_text(new_vault_id), to_text(new_vault_secret)))
+
+ def read_data(self, filename):
+
+ try:
+ if filename == '-':
+ data = sys.stdin.read()
+ else:
+ with open(filename, "rb") as fh:
+ data = fh.read()
+ except Exception as e:
+ msg = to_native(e)
+ if not msg:
+ msg = repr(e)
+ raise AnsibleError('Unable to read source file (%s): %s' % (to_native(filename), msg))
+
+ return data
+
+ def write_data(self, data, thefile, shred=True, mode=0o600):
+ # TODO: add docstrings for arg types since this code is picky about that
+ """Write the data bytes to given path
+
+ This is used to write a byte string to a file or stdout. It is used for
+ writing the results of vault encryption or decryption. It is used for
+ saving the ciphertext after encryption and it is also used for saving the
+ plaintext after decrypting a vault. The type of the 'data' arg should be bytes,
+ since in the plaintext case, the original contents can be of any text encoding
+ or arbitrary binary data.
+
+ When used to write the result of vault encryption, the val of the 'data' arg
+ should be a utf-8 encoded byte string and not a text typ and not a text type..
+
+ When used to write the result of vault decryption, the val of the 'data' arg
+ should be a byte string and not a text type.
+
+ :arg data: the byte string (bytes) data
+ :arg thefile: file descriptor or filename to save 'data' to.
+ :arg shred: if shred==True, make sure that the original data is first shredded so that is cannot be recovered.
+ :returns: None
+ """
+ # FIXME: do we need this now? data_bytes should always be a utf-8 byte string
+ b_file_data = to_bytes(data, errors='strict')
+
+ # check if we have a file descriptor instead of a path
+ is_fd = False
+ try:
+ is_fd = (isinstance(thefile, int) and fcntl.fcntl(thefile, fcntl.F_GETFD) != -1)
+ except Exception:
+ pass
+
+ if is_fd:
+ # if passed descriptor, use that to ensure secure access, otherwise it is a string.
+ # assumes the fd is securely opened by caller (mkstemp)
+ os.ftruncate(thefile, 0)
+ os.write(thefile, b_file_data)
+ elif thefile == '-':
+ # get a ref to either sys.stdout.buffer for py3 or plain old sys.stdout for py2
+ # We need sys.stdout.buffer on py3 so we can write bytes to it since the plaintext
+ # of the vaulted object could be anything/binary/etc
+ output = getattr(sys.stdout, 'buffer', sys.stdout)
+ output.write(b_file_data)
+ else:
+ # file names are insecure and prone to race conditions, so remove and create securely
+ if os.path.isfile(thefile):
+ if shred:
+ self._shred_file(thefile)
+ else:
+ os.remove(thefile)
+
+ # when setting new umask, we get previous as return
+ current_umask = os.umask(0o077)
+ try:
+ try:
+ # create file with secure permissions
+ fd = os.open(thefile, os.O_CREAT | os.O_EXCL | os.O_RDWR | os.O_TRUNC, mode)
+ except OSError as ose:
+ # Want to catch FileExistsError, which doesn't exist in Python 2, so catch OSError
+ # and compare the error number to get equivalent behavior in Python 2/3
+ if ose.errno == errno.EEXIST:
+ raise AnsibleError('Vault file got recreated while we were operating on it: %s' % to_native(ose))
+
+ raise AnsibleError('Problem creating temporary vault file: %s' % to_native(ose))
+
+ try:
+ # now write to the file and ensure ours is only data in it
+ os.ftruncate(fd, 0)
+ os.write(fd, b_file_data)
+ except OSError as e:
+ raise AnsibleError('Unable to write to temporary vault file: %s' % to_native(e))
+ finally:
+ # Make sure the file descriptor is always closed and reset umask
+ os.close(fd)
+ finally:
+ os.umask(current_umask)
+
+ def shuffle_files(self, src, dest):
+ prev = None
+ # overwrite dest with src
+ if os.path.isfile(dest):
+ prev = os.stat(dest)
+ # old file 'dest' was encrypted, no need to _shred_file
+ os.remove(dest)
+ shutil.move(src, dest)
+
+ # reset permissions if needed
+ if prev is not None:
+ # TODO: selinux, ACLs, xattr?
+ os.chmod(dest, prev.st_mode)
+ os.chown(dest, prev.st_uid, prev.st_gid)
+
+ def _editor_shell_command(self, filename):
+ env_editor = os.environ.get('EDITOR', 'vi')
+ editor = shlex.split(env_editor)
+ editor.append(filename)
+
+ return editor
+
+
+########################################
+# CIPHERS #
+########################################
+
+class VaultAES256:
+
+ """
+ Vault implementation using AES-CTR with an HMAC-SHA256 authentication code.
+ Keys are derived using PBKDF2
+ """
+
+ # http://www.daemonology.net/blog/2009-06-11-cryptographic-right-answers.html
+
+ # Note: strings in this class should be byte strings by default.
+
+ def __init__(self):
+ if not HAS_CRYPTOGRAPHY and not HAS_PYCRYPTO:
+ raise AnsibleError(NEED_CRYPTO_LIBRARY)
+
+ @staticmethod
+ def _create_key_cryptography(b_password, b_salt, key_length, iv_length):
+ kdf = PBKDF2HMAC(
+ algorithm=hashes.SHA256(),
+ length=2 * key_length + iv_length,
+ salt=b_salt,
+ iterations=10000,
+ backend=CRYPTOGRAPHY_BACKEND)
+ b_derivedkey = kdf.derive(b_password)
+
+ return b_derivedkey
+
+ @staticmethod
+ def _pbkdf2_prf(p, s):
+ hash_function = SHA256_pycrypto
+ return HMAC_pycrypto.new(p, s, hash_function).digest()
+
+ @classmethod
+ def _create_key_pycrypto(cls, b_password, b_salt, key_length, iv_length):
+
+ # make two keys and one iv
+
+ b_derivedkey = PBKDF2_pycrypto(b_password, b_salt, dkLen=(2 * key_length) + iv_length,
+ count=10000, prf=cls._pbkdf2_prf)
+ return b_derivedkey
+
+ @classmethod
+ def _gen_key_initctr(cls, b_password, b_salt):
+ # 16 for AES 128, 32 for AES256
+ key_length = 32
+
+ if HAS_CRYPTOGRAPHY:
+ # AES is a 128-bit block cipher, so IVs and counter nonces are 16 bytes
+ iv_length = algorithms.AES.block_size // 8
+
+ b_derivedkey = cls._create_key_cryptography(b_password, b_salt, key_length, iv_length)
+ b_iv = b_derivedkey[(key_length * 2):(key_length * 2) + iv_length]
+ elif HAS_PYCRYPTO:
+ # match the size used for counter.new to avoid extra work
+ iv_length = 16
+
+ b_derivedkey = cls._create_key_pycrypto(b_password, b_salt, key_length, iv_length)
+ b_iv = hexlify(b_derivedkey[(key_length * 2):(key_length * 2) + iv_length])
+ else:
+ raise AnsibleError(NEED_CRYPTO_LIBRARY + '(Detected in initctr)')
+
+ b_key1 = b_derivedkey[:key_length]
+ b_key2 = b_derivedkey[key_length:(key_length * 2)]
+
+ return b_key1, b_key2, b_iv
+
+ @staticmethod
+ def _encrypt_cryptography(b_plaintext, b_key1, b_key2, b_iv):
+ cipher = C_Cipher(algorithms.AES(b_key1), modes.CTR(b_iv), CRYPTOGRAPHY_BACKEND)
+ encryptor = cipher.encryptor()
+ padder = padding.PKCS7(algorithms.AES.block_size).padder()
+ b_ciphertext = encryptor.update(padder.update(b_plaintext) + padder.finalize())
+ b_ciphertext += encryptor.finalize()
+
+ # COMBINE SALT, DIGEST AND DATA
+ hmac = HMAC(b_key2, hashes.SHA256(), CRYPTOGRAPHY_BACKEND)
+ hmac.update(b_ciphertext)
+ b_hmac = hmac.finalize()
+
+ return to_bytes(hexlify(b_hmac), errors='surrogate_or_strict'), hexlify(b_ciphertext)
+
+ @staticmethod
+ def _encrypt_pycrypto(b_plaintext, b_key1, b_key2, b_iv):
+ # PKCS#7 PAD DATA http://tools.ietf.org/html/rfc5652#section-6.3
+ bs = AES_pycrypto.block_size
+ padding_length = (bs - len(b_plaintext) % bs) or bs
+ b_plaintext += to_bytes(padding_length * chr(padding_length), encoding='ascii', errors='strict')
+
+ # COUNTER.new PARAMETERS
+ # 1) nbits (integer) - Length of the counter, in bits.
+ # 2) initial_value (integer) - initial value of the counter. "iv" from _gen_key_initctr
+
+ ctr = Counter_pycrypto.new(128, initial_value=int(b_iv, 16))
+
+ # AES.new PARAMETERS
+ # 1) AES key, must be either 16, 24, or 32 bytes long -- "key" from _gen_key_initctr
+ # 2) MODE_CTR, is the recommended mode
+ # 3) counter=<CounterObject>
+
+ cipher = AES_pycrypto.new(b_key1, AES_pycrypto.MODE_CTR, counter=ctr)
+
+ # ENCRYPT PADDED DATA
+ b_ciphertext = cipher.encrypt(b_plaintext)
+
+ # COMBINE SALT, DIGEST AND DATA
+ hmac = HMAC_pycrypto.new(b_key2, b_ciphertext, SHA256_pycrypto)
+
+ return to_bytes(hmac.hexdigest(), errors='surrogate_or_strict'), hexlify(b_ciphertext)
+
+ @classmethod
+ def encrypt(cls, b_plaintext, secret):
+ if secret is None:
+ raise AnsibleVaultError('The secret passed to encrypt() was None')
+ b_salt = os.urandom(32)
+ b_password = secret.bytes
+ b_key1, b_key2, b_iv = cls._gen_key_initctr(b_password, b_salt)
+
+ if HAS_CRYPTOGRAPHY:
+ b_hmac, b_ciphertext = cls._encrypt_cryptography(b_plaintext, b_key1, b_key2, b_iv)
+ elif HAS_PYCRYPTO:
+ b_hmac, b_ciphertext = cls._encrypt_pycrypto(b_plaintext, b_key1, b_key2, b_iv)
+ else:
+ raise AnsibleError(NEED_CRYPTO_LIBRARY + '(Detected in encrypt)')
+
+ b_vaulttext = b'\n'.join([hexlify(b_salt), b_hmac, b_ciphertext])
+ # Unnecessary but getting rid of it is a backwards incompatible vault
+ # format change
+ b_vaulttext = hexlify(b_vaulttext)
+ return b_vaulttext
+
+ @classmethod
+ def _decrypt_cryptography(cls, b_ciphertext, b_crypted_hmac, b_key1, b_key2, b_iv):
+ # b_key1, b_key2, b_iv = self._gen_key_initctr(b_password, b_salt)
+ # EXIT EARLY IF DIGEST DOESN'T MATCH
+ hmac = HMAC(b_key2, hashes.SHA256(), CRYPTOGRAPHY_BACKEND)
+ hmac.update(b_ciphertext)
+ try:
+ hmac.verify(_unhexlify(b_crypted_hmac))
+ except InvalidSignature as e:
+ raise AnsibleVaultError('HMAC verification failed: %s' % e)
+
+ cipher = C_Cipher(algorithms.AES(b_key1), modes.CTR(b_iv), CRYPTOGRAPHY_BACKEND)
+ decryptor = cipher.decryptor()
+ unpadder = padding.PKCS7(128).unpadder()
+ b_plaintext = unpadder.update(
+ decryptor.update(b_ciphertext) + decryptor.finalize()
+ ) + unpadder.finalize()
+
+ return b_plaintext
+
+ @staticmethod
+ def _is_equal(b_a, b_b):
+ """
+ Comparing 2 byte arrrays in constant time
+ to avoid timing attacks.
+
+ It would be nice if there was a library for this but
+ hey.
+ """
+ if not (isinstance(b_a, binary_type) and isinstance(b_b, binary_type)):
+ raise TypeError('_is_equal can only be used to compare two byte strings')
+
+ # http://codahale.com/a-lesson-in-timing-attacks/
+ if len(b_a) != len(b_b):
+ return False
+
+ result = 0
+ for b_x, b_y in zip(b_a, b_b):
+ if PY3:
+ result |= b_x ^ b_y
+ else:
+ result |= ord(b_x) ^ ord(b_y)
+ return result == 0
+
+ @classmethod
+ def _decrypt_pycrypto(cls, b_ciphertext, b_crypted_hmac, b_key1, b_key2, b_iv):
+ # EXIT EARLY IF DIGEST DOESN'T MATCH
+ hmac_decrypt = HMAC_pycrypto.new(b_key2, b_ciphertext, SHA256_pycrypto)
+ if not cls._is_equal(b_crypted_hmac, to_bytes(hmac_decrypt.hexdigest())):
+ return None
+
+ # SET THE COUNTER AND THE CIPHER
+ ctr = Counter_pycrypto.new(128, initial_value=int(b_iv, 16))
+ cipher = AES_pycrypto.new(b_key1, AES_pycrypto.MODE_CTR, counter=ctr)
+
+ # DECRYPT PADDED DATA
+ b_plaintext = cipher.decrypt(b_ciphertext)
+
+ # UNPAD DATA
+ if PY3:
+ padding_length = b_plaintext[-1]
+ else:
+ padding_length = ord(b_plaintext[-1])
+
+ b_plaintext = b_plaintext[:-padding_length]
+ return b_plaintext
+
+ @classmethod
+ def decrypt(cls, b_vaulttext, secret):
+
+ b_ciphertext, b_salt, b_crypted_hmac = parse_vaulttext(b_vaulttext)
+
+ # TODO: would be nice if a VaultSecret could be passed directly to _decrypt_*
+ # (move _gen_key_initctr() to a AES256 VaultSecret or VaultContext impl?)
+ # though, likely needs to be python cryptography specific impl that basically
+ # creates a Cipher() with b_key1, a Mode.CTR() with b_iv, and a HMAC() with sign key b_key2
+ b_password = secret.bytes
+
+ b_key1, b_key2, b_iv = cls._gen_key_initctr(b_password, b_salt)
+
+ if HAS_CRYPTOGRAPHY:
+ b_plaintext = cls._decrypt_cryptography(b_ciphertext, b_crypted_hmac, b_key1, b_key2, b_iv)
+ elif HAS_PYCRYPTO:
+ b_plaintext = cls._decrypt_pycrypto(b_ciphertext, b_crypted_hmac, b_key1, b_key2, b_iv)
+ else:
+ raise AnsibleError(NEED_CRYPTO_LIBRARY + '(Detected in decrypt)')
+
+ return b_plaintext
+
+
+# Keys could be made bytes later if the code that gets the data is more
+# naturally byte-oriented
+CIPHER_MAPPING = {
+ u'AES256': VaultAES256,
+}
diff --git a/lib/ansible/parsing/yaml/__init__.py b/lib/ansible/parsing/yaml/__init__.py
new file mode 100644
index 00000000..ae8ccff5
--- /dev/null
+++ b/lib/ansible/parsing/yaml/__init__.py
@@ -0,0 +1,20 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
diff --git a/lib/ansible/parsing/yaml/constructor.py b/lib/ansible/parsing/yaml/constructor.py
new file mode 100644
index 00000000..208286e4
--- /dev/null
+++ b/lib/ansible/parsing/yaml/constructor.py
@@ -0,0 +1,169 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from yaml.constructor import SafeConstructor, ConstructorError
+from yaml.nodes import MappingNode
+
+from ansible import constants as C
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.parsing.yaml.objects import AnsibleMapping, AnsibleSequence, AnsibleUnicode
+from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
+from ansible.utils.unsafe_proxy import wrap_var
+from ansible.parsing.vault import VaultLib
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class AnsibleConstructor(SafeConstructor):
+ def __init__(self, file_name=None, vault_secrets=None):
+ self._ansible_file_name = file_name
+ super(AnsibleConstructor, self).__init__()
+ self._vaults = {}
+ self.vault_secrets = vault_secrets or []
+ self._vaults['default'] = VaultLib(secrets=self.vault_secrets)
+
+ def construct_yaml_map(self, node):
+ data = AnsibleMapping()
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+ data.ansible_pos = self._node_position_info(node)
+
+ def construct_mapping(self, node, deep=False):
+ # Most of this is from yaml.constructor.SafeConstructor. We replicate
+ # it here so that we can warn users when they have duplicate dict keys
+ # (pyyaml silently allows overwriting keys)
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ self.flatten_mapping(node)
+ mapping = AnsibleMapping()
+
+ # Add our extra information to the returned value
+ mapping.ansible_pos = self._node_position_info(node)
+
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ try:
+ hash(key)
+ except TypeError as exc:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "found unacceptable key (%s)" % exc, key_node.start_mark)
+
+ if key in mapping:
+ msg = (u'While constructing a mapping from {1}, line {2}, column {3}, found a duplicate dict key ({0}).'
+ u' Using last defined value only.'.format(key, *mapping.ansible_pos))
+ if C.DUPLICATE_YAML_DICT_KEY == 'warn':
+ display.warning(msg)
+ elif C.DUPLICATE_YAML_DICT_KEY == 'error':
+ raise ConstructorError(context=None, context_mark=None,
+ problem=to_native(msg),
+ problem_mark=node.start_mark,
+ note=None)
+ else:
+ # when 'ignore'
+ display.debug(msg)
+
+ value = self.construct_object(value_node, deep=deep)
+ mapping[key] = value
+
+ return mapping
+
+ def construct_yaml_str(self, node):
+ # Override the default string handling function
+ # to always return unicode objects
+ value = self.construct_scalar(node)
+ ret = AnsibleUnicode(value)
+
+ ret.ansible_pos = self._node_position_info(node)
+
+ return ret
+
+ def construct_vault_encrypted_unicode(self, node):
+ value = self.construct_scalar(node)
+ b_ciphertext_data = to_bytes(value)
+ # could pass in a key id here to choose the vault to associate with
+ # TODO/FIXME: plugin vault selector
+ vault = self._vaults['default']
+ if vault.secrets is None:
+ raise ConstructorError(context=None, context_mark=None,
+ problem="found !vault but no vault password provided",
+ problem_mark=node.start_mark,
+ note=None)
+ ret = AnsibleVaultEncryptedUnicode(b_ciphertext_data)
+ ret.vault = vault
+ return ret
+
+ def construct_yaml_seq(self, node):
+ data = AnsibleSequence()
+ yield data
+ data.extend(self.construct_sequence(node))
+ data.ansible_pos = self._node_position_info(node)
+
+ def construct_yaml_unsafe(self, node):
+ return wrap_var(self.construct_yaml_str(node))
+
+ def _node_position_info(self, node):
+ # the line number where the previous token has ended (plus empty lines)
+ # Add one so that the first line is line 1 rather than line 0
+ column = node.start_mark.column + 1
+ line = node.start_mark.line + 1
+
+ # in some cases, we may have pre-read the data and then
+ # passed it to the load() call for YAML, in which case we
+ # want to override the default datasource (which would be
+ # '<string>') to the actual filename we read in
+ datasource = self._ansible_file_name or node.start_mark.name
+
+ return (datasource, line, column)
+
+
+AnsibleConstructor.add_constructor(
+ u'tag:yaml.org,2002:map',
+ AnsibleConstructor.construct_yaml_map)
+
+AnsibleConstructor.add_constructor(
+ u'tag:yaml.org,2002:python/dict',
+ AnsibleConstructor.construct_yaml_map)
+
+AnsibleConstructor.add_constructor(
+ u'tag:yaml.org,2002:str',
+ AnsibleConstructor.construct_yaml_str)
+
+AnsibleConstructor.add_constructor(
+ u'tag:yaml.org,2002:python/unicode',
+ AnsibleConstructor.construct_yaml_str)
+
+AnsibleConstructor.add_constructor(
+ u'tag:yaml.org,2002:seq',
+ AnsibleConstructor.construct_yaml_seq)
+
+AnsibleConstructor.add_constructor(
+ u'!unsafe',
+ AnsibleConstructor.construct_yaml_unsafe)
+
+AnsibleConstructor.add_constructor(
+ u'!vault',
+ AnsibleConstructor.construct_vault_encrypted_unicode)
+
+AnsibleConstructor.add_constructor(u'!vault-encrypted', AnsibleConstructor.construct_vault_encrypted_unicode)
diff --git a/lib/ansible/parsing/yaml/dumper.py b/lib/ansible/parsing/yaml/dumper.py
new file mode 100644
index 00000000..67a2efb3
--- /dev/null
+++ b/lib/ansible/parsing/yaml/dumper.py
@@ -0,0 +1,92 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import yaml
+
+from ansible.module_utils.six import PY3
+from ansible.parsing.yaml.objects import AnsibleUnicode, AnsibleSequence, AnsibleMapping, AnsibleVaultEncryptedUnicode
+from ansible.utils.unsafe_proxy import AnsibleUnsafeText, AnsibleUnsafeBytes
+from ansible.vars.hostvars import HostVars, HostVarsVars
+
+
+class AnsibleDumper(yaml.SafeDumper):
+ '''
+ A simple stub class that allows us to add representers
+ for our overridden object types.
+ '''
+ pass
+
+
+def represent_hostvars(self, data):
+ return self.represent_dict(dict(data))
+
+
+# Note: only want to represent the encrypted data
+def represent_vault_encrypted_unicode(self, data):
+ return self.represent_scalar(u'!vault', data._ciphertext.decode(), style='|')
+
+
+if PY3:
+ represent_unicode = yaml.representer.SafeRepresenter.represent_str
+ represent_binary = yaml.representer.SafeRepresenter.represent_binary
+else:
+ represent_unicode = yaml.representer.SafeRepresenter.represent_unicode
+ represent_binary = yaml.representer.SafeRepresenter.represent_str
+
+AnsibleDumper.add_representer(
+ AnsibleUnicode,
+ represent_unicode,
+)
+
+AnsibleDumper.add_representer(
+ AnsibleUnsafeText,
+ represent_unicode,
+)
+
+AnsibleDumper.add_representer(
+ AnsibleUnsafeBytes,
+ represent_binary,
+)
+
+AnsibleDumper.add_representer(
+ HostVars,
+ represent_hostvars,
+)
+
+AnsibleDumper.add_representer(
+ HostVarsVars,
+ represent_hostvars,
+)
+
+AnsibleDumper.add_representer(
+ AnsibleSequence,
+ yaml.representer.SafeRepresenter.represent_list,
+)
+
+AnsibleDumper.add_representer(
+ AnsibleMapping,
+ yaml.representer.SafeRepresenter.represent_dict,
+)
+
+AnsibleDumper.add_representer(
+ AnsibleVaultEncryptedUnicode,
+ represent_vault_encrypted_unicode,
+)
diff --git a/lib/ansible/parsing/yaml/loader.py b/lib/ansible/parsing/yaml/loader.py
new file mode 100644
index 00000000..b6650041
--- /dev/null
+++ b/lib/ansible/parsing/yaml/loader.py
@@ -0,0 +1,52 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ from _yaml import CParser, CEmitter
+ HAVE_PYYAML_C = True
+except ImportError:
+ HAVE_PYYAML_C = False
+
+from yaml.resolver import Resolver
+
+from ansible.parsing.yaml.constructor import AnsibleConstructor
+
+if HAVE_PYYAML_C:
+
+ class AnsibleLoader(CParser, AnsibleConstructor, Resolver):
+ def __init__(self, stream, file_name=None, vault_secrets=None):
+ CParser.__init__(self, stream)
+ AnsibleConstructor.__init__(self, file_name=file_name, vault_secrets=vault_secrets)
+ Resolver.__init__(self)
+else:
+ from yaml.composer import Composer
+ from yaml.reader import Reader
+ from yaml.scanner import Scanner
+ from yaml.parser import Parser
+
+ class AnsibleLoader(Reader, Scanner, Parser, Composer, AnsibleConstructor, Resolver):
+ def __init__(self, stream, file_name=None, vault_secrets=None):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ AnsibleConstructor.__init__(self, file_name=file_name, vault_secrets=vault_secrets)
+ Resolver.__init__(self)
diff --git a/lib/ansible/parsing/yaml/objects.py b/lib/ansible/parsing/yaml/objects.py
new file mode 100644
index 00000000..9c93006d
--- /dev/null
+++ b/lib/ansible/parsing/yaml/objects.py
@@ -0,0 +1,379 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import string
+import sys as _sys
+
+import sys
+import yaml
+
+from ansible.module_utils.common._collections_compat import Sequence
+from ansible.module_utils.six import text_type
+from ansible.module_utils._text import to_bytes, to_text, to_native
+
+
+class AnsibleBaseYAMLObject(object):
+ '''
+ the base class used to sub-class python built-in objects
+ so that we can add attributes to them during yaml parsing
+
+ '''
+ _data_source = None
+ _line_number = 0
+ _column_number = 0
+
+ def _get_ansible_position(self):
+ return (self._data_source, self._line_number, self._column_number)
+
+ def _set_ansible_position(self, obj):
+ try:
+ (src, line, col) = obj
+ except (TypeError, ValueError):
+ raise AssertionError(
+ 'ansible_pos can only be set with a tuple/list '
+ 'of three values: source, line number, column number'
+ )
+ self._data_source = src
+ self._line_number = line
+ self._column_number = col
+
+ ansible_pos = property(_get_ansible_position, _set_ansible_position)
+
+
+# try to always use orderddict with yaml, after py3.6 the dict type already does this
+odict = dict
+if sys.version_info[:2] < (3, 7):
+ # if python 2.7 or py3 < 3.7
+ try:
+ from collections import OrderedDict as odict
+ except ImportError:
+ pass
+
+
+class AnsibleMapping(AnsibleBaseYAMLObject, odict):
+ ''' sub class for dictionaries '''
+ pass
+
+
+class AnsibleUnicode(AnsibleBaseYAMLObject, text_type):
+ ''' sub class for unicode objects '''
+ pass
+
+
+class AnsibleSequence(AnsibleBaseYAMLObject, list):
+ ''' sub class for lists '''
+ pass
+
+
+class AnsibleVaultEncryptedUnicode(Sequence, AnsibleBaseYAMLObject):
+ '''Unicode like object that is not evaluated (decrypted) until it needs to be'''
+ __UNSAFE__ = True
+ __ENCRYPTED__ = True
+ yaml_tag = u'!vault'
+
+ @classmethod
+ def from_plaintext(cls, seq, vault, secret):
+ if not vault:
+ raise vault.AnsibleVaultError('Error creating AnsibleVaultEncryptedUnicode, invalid vault (%s) provided' % vault)
+
+ ciphertext = vault.encrypt(seq, secret)
+ avu = cls(ciphertext)
+ avu.vault = vault
+ return avu
+
+ def __init__(self, ciphertext):
+ '''A AnsibleUnicode with a Vault attribute that can decrypt it.
+
+ ciphertext is a byte string (str on PY2, bytestring on PY3).
+
+ The .data attribute is a property that returns the decrypted plaintext
+ of the ciphertext as a PY2 unicode or PY3 string object.
+ '''
+ super(AnsibleVaultEncryptedUnicode, self).__init__()
+
+ # after construction, calling code has to set the .vault attribute to a vaultlib object
+ self.vault = None
+ self._ciphertext = to_bytes(ciphertext)
+
+ @property
+ def data(self):
+ if not self.vault:
+ return to_text(self._ciphertext)
+ return to_text(self.vault.decrypt(self._ciphertext))
+
+ @data.setter
+ def data(self, value):
+ self._ciphertext = to_bytes(value)
+
+ def is_encrypted(self):
+ return self.vault and self.vault.is_encrypted(self._ciphertext)
+
+ def __eq__(self, other):
+ if self.vault:
+ return other == self.data
+ return False
+
+ def __ne__(self, other):
+ if self.vault:
+ return other != self.data
+ return True
+
+ def __reversed__(self):
+ # This gets inerhited from ``collections.Sequence`` which returns a generator
+ # make this act more like the string implementation
+ return to_text(self[::-1], errors='surrogate_or_strict')
+
+ def __str__(self):
+ return to_native(self.data, errors='surrogate_or_strict')
+
+ def __unicode__(self):
+ return to_text(self.data, errors='surrogate_or_strict')
+
+ def encode(self, encoding=None, errors=None):
+ return to_bytes(self.data, encoding=encoding, errors=errors)
+
+ # Methods below are a copy from ``collections.UserString``
+ # Some are copied as is, where others are modified to not
+ # auto wrap with ``self.__class__``
+ def __repr__(self):
+ return repr(self.data)
+
+ def __int__(self, base=10):
+ return int(self.data, base=base)
+
+ def __float__(self):
+ return float(self.data)
+
+ def __complex__(self):
+ return complex(self.data)
+
+ def __hash__(self):
+ return hash(self.data)
+
+ # This breaks vault, do not define it, we cannot satisfy this
+ # def __getnewargs__(self):
+ # return (self.data[:],)
+
+ def __lt__(self, string):
+ if isinstance(string, AnsibleVaultEncryptedUnicode):
+ return self.data < string.data
+ return self.data < string
+
+ def __le__(self, string):
+ if isinstance(string, AnsibleVaultEncryptedUnicode):
+ return self.data <= string.data
+ return self.data <= string
+
+ def __gt__(self, string):
+ if isinstance(string, AnsibleVaultEncryptedUnicode):
+ return self.data > string.data
+ return self.data > string
+
+ def __ge__(self, string):
+ if isinstance(string, AnsibleVaultEncryptedUnicode):
+ return self.data >= string.data
+ return self.data >= string
+
+ def __contains__(self, char):
+ if isinstance(char, AnsibleVaultEncryptedUnicode):
+ char = char.data
+ return char in self.data
+
+ def __len__(self):
+ return len(self.data)
+
+ def __getitem__(self, index):
+ return self.data[index]
+
+ def __getslice__(self, start, end):
+ start = max(start, 0)
+ end = max(end, 0)
+ return self.data[start:end]
+
+ def __add__(self, other):
+ if isinstance(other, AnsibleVaultEncryptedUnicode):
+ return self.data + other.data
+ elif isinstance(other, text_type):
+ return self.data + other
+ return self.data + to_text(other)
+
+ def __radd__(self, other):
+ if isinstance(other, text_type):
+ return other + self.data
+ return to_text(other) + self.data
+
+ def __mul__(self, n):
+ return self.data * n
+
+ __rmul__ = __mul__
+
+ def __mod__(self, args):
+ return self.data % args
+
+ def __rmod__(self, template):
+ return to_text(template) % self
+
+ # the following methods are defined in alphabetical order:
+ def capitalize(self):
+ return self.data.capitalize()
+
+ def casefold(self):
+ return self.data.casefold()
+
+ def center(self, width, *args):
+ return self.data.center(width, *args)
+
+ def count(self, sub, start=0, end=_sys.maxsize):
+ if isinstance(sub, AnsibleVaultEncryptedUnicode):
+ sub = sub.data
+ return self.data.count(sub, start, end)
+
+ def endswith(self, suffix, start=0, end=_sys.maxsize):
+ return self.data.endswith(suffix, start, end)
+
+ def expandtabs(self, tabsize=8):
+ return self.data.expandtabs(tabsize)
+
+ def find(self, sub, start=0, end=_sys.maxsize):
+ if isinstance(sub, AnsibleVaultEncryptedUnicode):
+ sub = sub.data
+ return self.data.find(sub, start, end)
+
+ def format(self, *args, **kwds):
+ return self.data.format(*args, **kwds)
+
+ def format_map(self, mapping):
+ return self.data.format_map(mapping)
+
+ def index(self, sub, start=0, end=_sys.maxsize):
+ return self.data.index(sub, start, end)
+
+ def isalpha(self):
+ return self.data.isalpha()
+
+ def isalnum(self):
+ return self.data.isalnum()
+
+ def isascii(self):
+ return self.data.isascii()
+
+ def isdecimal(self):
+ return self.data.isdecimal()
+
+ def isdigit(self):
+ return self.data.isdigit()
+
+ def isidentifier(self):
+ return self.data.isidentifier()
+
+ def islower(self):
+ return self.data.islower()
+
+ def isnumeric(self):
+ return self.data.isnumeric()
+
+ def isprintable(self):
+ return self.data.isprintable()
+
+ def isspace(self):
+ return self.data.isspace()
+
+ def istitle(self):
+ return self.data.istitle()
+
+ def isupper(self):
+ return self.data.isupper()
+
+ def join(self, seq):
+ return self.data.join(seq)
+
+ def ljust(self, width, *args):
+ return self.data.ljust(width, *args)
+
+ def lower(self):
+ return self.data.lower()
+
+ def lstrip(self, chars=None):
+ return self.data.lstrip(chars)
+
+ try:
+ # PY3
+ maketrans = str.maketrans
+ except AttributeError:
+ # PY2
+ maketrans = string.maketrans
+
+ def partition(self, sep):
+ return self.data.partition(sep)
+
+ def replace(self, old, new, maxsplit=-1):
+ if isinstance(old, AnsibleVaultEncryptedUnicode):
+ old = old.data
+ if isinstance(new, AnsibleVaultEncryptedUnicode):
+ new = new.data
+ return self.data.replace(old, new, maxsplit)
+
+ def rfind(self, sub, start=0, end=_sys.maxsize):
+ if isinstance(sub, AnsibleVaultEncryptedUnicode):
+ sub = sub.data
+ return self.data.rfind(sub, start, end)
+
+ def rindex(self, sub, start=0, end=_sys.maxsize):
+ return self.data.rindex(sub, start, end)
+
+ def rjust(self, width, *args):
+ return self.data.rjust(width, *args)
+
+ def rpartition(self, sep):
+ return self.data.rpartition(sep)
+
+ def rstrip(self, chars=None):
+ return self.data.rstrip(chars)
+
+ def split(self, sep=None, maxsplit=-1):
+ return self.data.split(sep, maxsplit)
+
+ def rsplit(self, sep=None, maxsplit=-1):
+ return self.data.rsplit(sep, maxsplit)
+
+ def splitlines(self, keepends=False):
+ return self.data.splitlines(keepends)
+
+ def startswith(self, prefix, start=0, end=_sys.maxsize):
+ return self.data.startswith(prefix, start, end)
+
+ def strip(self, chars=None):
+ return self.data.strip(chars)
+
+ def swapcase(self):
+ return self.data.swapcase()
+
+ def title(self):
+ return self.data.title()
+
+ def translate(self, *args):
+ return self.data.translate(*args)
+
+ def upper(self):
+ return self.data.upper()
+
+ def zfill(self, width):
+ return self.data.zfill(width)
diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py
new file mode 100644
index 00000000..8c4ed65f
--- /dev/null
+++ b/lib/ansible/playbook/__init__.py
@@ -0,0 +1,119 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible import constants as C
+from ansible.errors import AnsibleParserError
+from ansible.module_utils._text import to_text, to_native
+from ansible.playbook.play import Play
+from ansible.playbook.playbook_include import PlaybookInclude
+from ansible.plugins.loader import add_all_plugin_dirs
+from ansible.utils.display import Display
+
+display = Display()
+
+
+__all__ = ['Playbook']
+
+
+class Playbook:
+
+ def __init__(self, loader):
+ # Entries in the datastructure of a playbook may
+ # be either a play or an include statement
+ self._entries = []
+ self._basedir = to_text(os.getcwd(), errors='surrogate_or_strict')
+ self._loader = loader
+ self._file_name = None
+
+ @staticmethod
+ def load(file_name, variable_manager=None, loader=None):
+ pb = Playbook(loader=loader)
+ pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager)
+ return pb
+
+ def _load_playbook_data(self, file_name, variable_manager, vars=None):
+
+ if os.path.isabs(file_name):
+ self._basedir = os.path.dirname(file_name)
+ else:
+ self._basedir = os.path.normpath(os.path.join(self._basedir, os.path.dirname(file_name)))
+
+ # set the loaders basedir
+ cur_basedir = self._loader.get_basedir()
+ self._loader.set_basedir(self._basedir)
+
+ add_all_plugin_dirs(self._basedir)
+
+ self._file_name = file_name
+
+ try:
+ ds = self._loader.load_from_file(os.path.basename(file_name))
+ except UnicodeDecodeError as e:
+ raise AnsibleParserError("Could not read playbook (%s) due to encoding issues: %s" % (file_name, to_native(e)))
+
+ # check for errors and restore the basedir in case this error is caught and handled
+ if ds is None:
+ self._loader.set_basedir(cur_basedir)
+ raise AnsibleParserError("Empty playbook, nothing to do", obj=ds)
+ elif not isinstance(ds, list):
+ self._loader.set_basedir(cur_basedir)
+ raise AnsibleParserError("A playbook must be a list of plays, got a %s instead" % type(ds), obj=ds)
+ elif not ds:
+ display.deprecated("Empty plays will currently be skipped, in the future they will cause a syntax error",
+ version='2.12', collection_name='ansible.builtin')
+
+ # Parse the playbook entries. For plays, we simply parse them
+ # using the Play() object, and includes are parsed using the
+ # PlaybookInclude() object
+ for entry in ds:
+ if not isinstance(entry, dict):
+ # restore the basedir in case this error is caught and handled
+ self._loader.set_basedir(cur_basedir)
+ raise AnsibleParserError("playbook entries must be either a valid play or an include statement", obj=entry)
+
+ if any(action in entry for action in C._ACTION_ALL_IMPORT_PLAYBOOKS):
+ if any(action in entry for action in C._ACTION_INCLUDE):
+ display.deprecated("'include' for playbook includes. You should use 'import_playbook' instead",
+ version="2.12", collection_name='ansible.builtin')
+ pb = PlaybookInclude.load(entry, basedir=self._basedir, variable_manager=variable_manager, loader=self._loader)
+ if pb is not None:
+ self._entries.extend(pb._entries)
+ else:
+ which = entry
+ for k in C._ACTION_IMPORT_PLAYBOOK + C._ACTION_INCLUDE:
+ if k in entry:
+ which = entry[k]
+ break
+ display.display("skipping playbook '%s' due to conditional test failure" % which, color=C.COLOR_SKIP)
+ else:
+ entry_obj = Play.load(entry, variable_manager=variable_manager, loader=self._loader, vars=vars)
+ self._entries.append(entry_obj)
+
+ # we're done, so restore the old basedir in the loader
+ self._loader.set_basedir(cur_basedir)
+
+ def get_loader(self):
+ return self._loader
+
+ def get_plays(self):
+ return self._entries[:]
diff --git a/lib/ansible/playbook/attribute.py b/lib/ansible/playbook/attribute.py
new file mode 100644
index 00000000..36f7c792
--- /dev/null
+++ b/lib/ansible/playbook/attribute.py
@@ -0,0 +1,119 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from copy import copy, deepcopy
+
+
+_CONTAINERS = frozenset(('list', 'dict', 'set'))
+
+
+class Attribute:
+
+ def __init__(
+ self,
+ isa=None,
+ private=False,
+ default=None,
+ required=False,
+ listof=None,
+ priority=0,
+ class_type=None,
+ always_post_validate=False,
+ inherit=True,
+ alias=None,
+ extend=False,
+ prepend=False,
+ static=False,
+ ):
+
+ """
+ :class:`Attribute` specifies constraints for attributes of objects which
+ derive from playbook data. The attributes of the object are basically
+ a schema for the yaml playbook.
+
+ :kwarg isa: The type of the attribute. Allowable values are a string
+ representation of any yaml basic datatype, python class, or percent.
+ (Enforced at post-validation time).
+ :kwarg private: Not used at runtime. The docs playbook keyword dumper uses it to determine
+ that a keyword should not be documented. mpdehaan had plans to remove attributes marked
+ private from the ds so they would not have been available at all.
+ :kwarg default: Default value if unspecified in the YAML document.
+ :kwarg required: Whether or not the YAML document must contain this field.
+ If the attribute is None when post-validated, an error will be raised.
+ :kwarg listof: If isa is set to "list", this can optionally be set to
+ ensure that all elements in the list are of the given type. Valid
+ values here are the same as those for isa.
+ :kwarg priority: The order in which the fields should be parsed. Generally
+ this does not need to be set, it is for rare situations where another
+ field depends on the fact that another field was parsed first.
+ :kwarg class_type: If isa is set to "class", this can be optionally set to
+ a class (not a string name). The YAML data for this field will be
+ passed to the __init__ method of that class during post validation and
+ the field will be an instance of that class.
+ :kwarg always_post_validate: Controls whether a field should be post
+ validated or not (default: False).
+ :kwarg inherit: A boolean value, which controls whether the object
+ containing this field should attempt to inherit the value from its
+ parent object if the local value is None.
+ :kwarg alias: An alias to use for the attribute name, for situations where
+ the attribute name may conflict with a Python reserved word.
+ """
+
+ self.isa = isa
+ self.private = private
+ self.default = default
+ self.required = required
+ self.listof = listof
+ self.priority = priority
+ self.class_type = class_type
+ self.always_post_validate = always_post_validate
+ self.inherit = inherit
+ self.alias = alias
+ self.extend = extend
+ self.prepend = prepend
+ self.static = static
+
+ if default is not None and self.isa in _CONTAINERS and not callable(default):
+ raise TypeError('defaults for FieldAttribute may not be mutable, please provide a callable instead')
+
+ def __eq__(self, other):
+ return other.priority == self.priority
+
+ def __ne__(self, other):
+ return other.priority != self.priority
+
+ # NB: higher priority numbers sort first
+
+ def __lt__(self, other):
+ return other.priority < self.priority
+
+ def __gt__(self, other):
+ return other.priority > self.priority
+
+ def __le__(self, other):
+ return other.priority <= self.priority
+
+ def __ge__(self, other):
+ return other.priority >= self.priority
+
+
+class FieldAttribute(Attribute):
+ pass
diff --git a/lib/ansible/playbook/base.py b/lib/ansible/playbook/base.py
new file mode 100644
index 00000000..df045928
--- /dev/null
+++ b/lib/ansible/playbook/base.py
@@ -0,0 +1,630 @@
+# Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import itertools
+import operator
+
+from copy import copy as shallowcopy
+from functools import partial
+
+from jinja2.exceptions import UndefinedError
+
+from ansible import constants as C
+from ansible import context
+from ansible.module_utils.six import iteritems, string_types, with_metaclass
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.errors import AnsibleParserError, AnsibleUndefinedVariable, AnsibleAssertionError
+from ansible.module_utils._text import to_text, to_native
+from ansible.playbook.attribute import Attribute, FieldAttribute
+from ansible.parsing.dataloader import DataLoader
+from ansible.utils.display import Display
+from ansible.utils.sentinel import Sentinel
+from ansible.utils.vars import combine_vars, isidentifier, get_unique_id
+
+display = Display()
+
+
+def _generic_g(prop_name, self):
+ try:
+ value = self._attributes[prop_name]
+ except KeyError:
+ raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, prop_name))
+
+ if value is Sentinel:
+ value = self._attr_defaults[prop_name]
+
+ return value
+
+
+def _generic_g_method(prop_name, self):
+ try:
+ if self._squashed:
+ return self._attributes[prop_name]
+ method = "_get_attr_%s" % prop_name
+ return getattr(self, method)()
+ except KeyError:
+ raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, prop_name))
+
+
+def _generic_g_parent(prop_name, self):
+ try:
+ if self._squashed or self._finalized:
+ value = self._attributes[prop_name]
+ else:
+ try:
+ value = self._get_parent_attribute(prop_name)
+ except AttributeError:
+ value = self._attributes[prop_name]
+ except KeyError:
+ raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, prop_name))
+
+ if value is Sentinel:
+ value = self._attr_defaults[prop_name]
+
+ return value
+
+
+def _generic_s(prop_name, self, value):
+ self._attributes[prop_name] = value
+
+
+def _generic_d(prop_name, self):
+ del self._attributes[prop_name]
+
+
+class BaseMeta(type):
+
+ """
+ Metaclass for the Base object, which is used to construct the class
+ attributes based on the FieldAttributes available.
+ """
+
+ def __new__(cls, name, parents, dct):
+ def _create_attrs(src_dict, dst_dict):
+ '''
+ Helper method which creates the attributes based on those in the
+ source dictionary of attributes. This also populates the other
+ attributes used to keep track of these attributes and via the
+ getter/setter/deleter methods.
+ '''
+ keys = list(src_dict.keys())
+ for attr_name in keys:
+ value = src_dict[attr_name]
+ if isinstance(value, Attribute):
+ if attr_name.startswith('_'):
+ attr_name = attr_name[1:]
+
+ # here we selectively assign the getter based on a few
+ # things, such as whether we have a _get_attr_<name>
+ # method, or if the attribute is marked as not inheriting
+ # its value from a parent object
+ method = "_get_attr_%s" % attr_name
+ if method in src_dict or method in dst_dict:
+ getter = partial(_generic_g_method, attr_name)
+ elif ('_get_parent_attribute' in dst_dict or '_get_parent_attribute' in src_dict) and value.inherit:
+ getter = partial(_generic_g_parent, attr_name)
+ else:
+ getter = partial(_generic_g, attr_name)
+
+ setter = partial(_generic_s, attr_name)
+ deleter = partial(_generic_d, attr_name)
+
+ dst_dict[attr_name] = property(getter, setter, deleter)
+ dst_dict['_valid_attrs'][attr_name] = value
+ dst_dict['_attributes'][attr_name] = Sentinel
+ dst_dict['_attr_defaults'][attr_name] = value.default
+
+ if value.alias is not None:
+ dst_dict[value.alias] = property(getter, setter, deleter)
+ dst_dict['_valid_attrs'][value.alias] = value
+ dst_dict['_alias_attrs'][value.alias] = attr_name
+
+ def _process_parents(parents, dst_dict):
+ '''
+ Helper method which creates attributes from all parent objects
+ recursively on through grandparent objects
+ '''
+ for parent in parents:
+ if hasattr(parent, '__dict__'):
+ _create_attrs(parent.__dict__, dst_dict)
+ new_dst_dict = parent.__dict__.copy()
+ new_dst_dict.update(dst_dict)
+ _process_parents(parent.__bases__, new_dst_dict)
+
+ # create some additional class attributes
+ dct['_attributes'] = {}
+ dct['_attr_defaults'] = {}
+ dct['_valid_attrs'] = {}
+ dct['_alias_attrs'] = {}
+
+ # now create the attributes based on the FieldAttributes
+ # available, including from parent (and grandparent) objects
+ _create_attrs(dct, dct)
+ _process_parents(parents, dct)
+
+ return super(BaseMeta, cls).__new__(cls, name, parents, dct)
+
+
+class FieldAttributeBase(with_metaclass(BaseMeta, object)):
+
+ def __init__(self):
+
+ # initialize the data loader and variable manager, which will be provided
+ # later when the object is actually loaded
+ self._loader = None
+ self._variable_manager = None
+
+ # other internal params
+ self._validated = False
+ self._squashed = False
+ self._finalized = False
+
+ # every object gets a random uuid:
+ self._uuid = get_unique_id()
+
+ # we create a copy of the attributes here due to the fact that
+ # it was initialized as a class param in the meta class, so we
+ # need a unique object here (all members contained within are
+ # unique already).
+ self._attributes = self.__class__._attributes.copy()
+ self._attr_defaults = self.__class__._attr_defaults.copy()
+ for key, value in self._attr_defaults.items():
+ if callable(value):
+ self._attr_defaults[key] = value()
+
+ # and init vars, avoid using defaults in field declaration as it lives across plays
+ self.vars = dict()
+
+ def dump_me(self, depth=0):
+ ''' this is never called from production code, it is here to be used when debugging as a 'complex print' '''
+ if depth == 0:
+ display.debug("DUMPING OBJECT ------------------------------------------------------")
+ display.debug("%s- %s (%s, id=%s)" % (" " * depth, self.__class__.__name__, self, id(self)))
+ if hasattr(self, '_parent') and self._parent:
+ self._parent.dump_me(depth + 2)
+ dep_chain = self._parent.get_dep_chain()
+ if dep_chain:
+ for dep in dep_chain:
+ dep.dump_me(depth + 2)
+ if hasattr(self, '_play') and self._play:
+ self._play.dump_me(depth + 2)
+
+ def preprocess_data(self, ds):
+ ''' infrequently used method to do some pre-processing of legacy terms '''
+ return ds
+
+ def load_data(self, ds, variable_manager=None, loader=None):
+ ''' walk the input datastructure and assign any values '''
+
+ if ds is None:
+ raise AnsibleAssertionError('ds (%s) should not be None but it is.' % ds)
+
+ # cache the datastructure internally
+ setattr(self, '_ds', ds)
+
+ # the variable manager class is used to manage and merge variables
+ # down to a single dictionary for reference in templating, etc.
+ self._variable_manager = variable_manager
+
+ # the data loader class is used to parse data from strings and files
+ if loader is not None:
+ self._loader = loader
+ else:
+ self._loader = DataLoader()
+
+ # call the preprocess_data() function to massage the data into
+ # something we can more easily parse, and then call the validation
+ # function on it to ensure there are no incorrect key values
+ ds = self.preprocess_data(ds)
+ self._validate_attributes(ds)
+
+ # Walk all attributes in the class. We sort them based on their priority
+ # so that certain fields can be loaded before others, if they are dependent.
+ for name, attr in sorted(iteritems(self._valid_attrs), key=operator.itemgetter(1)):
+ # copy the value over unless a _load_field method is defined
+ target_name = name
+ if name in self._alias_attrs:
+ target_name = self._alias_attrs[name]
+ if name in ds:
+ method = getattr(self, '_load_%s' % name, None)
+ if method:
+ self._attributes[target_name] = method(name, ds[name])
+ else:
+ self._attributes[target_name] = ds[name]
+
+ # run early, non-critical validation
+ self.validate()
+
+ # return the constructed object
+ return self
+
+ def get_ds(self):
+ try:
+ return getattr(self, '_ds')
+ except AttributeError:
+ return None
+
+ def get_loader(self):
+ return self._loader
+
+ def get_variable_manager(self):
+ return self._variable_manager
+
+ def _post_validate_debugger(self, attr, value, templar):
+ value = templar.template(value)
+ valid_values = frozenset(('always', 'on_failed', 'on_unreachable', 'on_skipped', 'never'))
+ if value and isinstance(value, string_types) and value not in valid_values:
+ raise AnsibleParserError("'%s' is not a valid value for debugger. Must be one of %s" % (value, ', '.join(valid_values)), obj=self.get_ds())
+ return value
+
+ def _validate_attributes(self, ds):
+ '''
+ Ensures that there are no keys in the datastructure which do
+ not map to attributes for this object.
+ '''
+
+ valid_attrs = frozenset(self._valid_attrs.keys())
+ for key in ds:
+ if key not in valid_attrs:
+ raise AnsibleParserError("'%s' is not a valid attribute for a %s" % (key, self.__class__.__name__), obj=ds)
+
+ def validate(self, all_vars=None):
+ ''' validation that is done at parse time, not load time '''
+ all_vars = {} if all_vars is None else all_vars
+
+ if not self._validated:
+ # walk all fields in the object
+ for (name, attribute) in iteritems(self._valid_attrs):
+
+ if name in self._alias_attrs:
+ name = self._alias_attrs[name]
+
+ # run validator only if present
+ method = getattr(self, '_validate_%s' % name, None)
+ if method:
+ method(attribute, name, getattr(self, name))
+ else:
+ # and make sure the attribute is of the type it should be
+ value = self._attributes[name]
+ if value is not None:
+ if attribute.isa == 'string' and isinstance(value, (list, dict)):
+ raise AnsibleParserError(
+ "The field '%s' is supposed to be a string type,"
+ " however the incoming data structure is a %s" % (name, type(value)), obj=self.get_ds()
+ )
+
+ self._validated = True
+
+ def squash(self):
+ '''
+ Evaluates all attributes and sets them to the evaluated version,
+ so that all future accesses of attributes do not need to evaluate
+ parent attributes.
+ '''
+ if not self._squashed:
+ for name in self._valid_attrs.keys():
+ self._attributes[name] = getattr(self, name)
+ self._squashed = True
+
+ def copy(self):
+ '''
+ Create a copy of this object and return it.
+ '''
+
+ new_me = self.__class__()
+
+ for name in self._valid_attrs.keys():
+ if name in self._alias_attrs:
+ continue
+ new_me._attributes[name] = shallowcopy(self._attributes[name])
+ new_me._attr_defaults[name] = shallowcopy(self._attr_defaults[name])
+
+ new_me._loader = self._loader
+ new_me._variable_manager = self._variable_manager
+ new_me._validated = self._validated
+ new_me._finalized = self._finalized
+ new_me._uuid = self._uuid
+
+ # if the ds value was set on the object, copy it to the new copy too
+ if hasattr(self, '_ds'):
+ new_me._ds = self._ds
+
+ return new_me
+
+ def get_validated_value(self, name, attribute, value, templar):
+ if attribute.isa == 'string':
+ value = to_text(value)
+ elif attribute.isa == 'int':
+ value = int(value)
+ elif attribute.isa == 'float':
+ value = float(value)
+ elif attribute.isa == 'bool':
+ value = boolean(value, strict=True)
+ elif attribute.isa == 'percent':
+ # special value, which may be an integer or float
+ # with an optional '%' at the end
+ if isinstance(value, string_types) and '%' in value:
+ value = value.replace('%', '')
+ value = float(value)
+ elif attribute.isa == 'list':
+ if value is None:
+ value = []
+ elif not isinstance(value, list):
+ value = [value]
+ if attribute.listof is not None:
+ for item in value:
+ if not isinstance(item, attribute.listof):
+ raise AnsibleParserError("the field '%s' should be a list of %s, "
+ "but the item '%s' is a %s" % (name, attribute.listof, item, type(item)), obj=self.get_ds())
+ elif attribute.required and attribute.listof == string_types:
+ if item is None or item.strip() == "":
+ raise AnsibleParserError("the field '%s' is required, and cannot have empty values" % (name,), obj=self.get_ds())
+ elif attribute.isa == 'set':
+ if value is None:
+ value = set()
+ elif not isinstance(value, (list, set)):
+ if isinstance(value, string_types):
+ value = value.split(',')
+ else:
+ # Making a list like this handles strings of
+ # text and bytes properly
+ value = [value]
+ if not isinstance(value, set):
+ value = set(value)
+ elif attribute.isa == 'dict':
+ if value is None:
+ value = dict()
+ elif not isinstance(value, dict):
+ raise TypeError("%s is not a dictionary" % value)
+ elif attribute.isa == 'class':
+ if not isinstance(value, attribute.class_type):
+ raise TypeError("%s is not a valid %s (got a %s instead)" % (name, attribute.class_type, type(value)))
+ value.post_validate(templar=templar)
+ return value
+
+ def post_validate(self, templar):
+ '''
+ we can't tell that everything is of the right type until we have
+ all the variables. Run basic types (from isa) as well as
+ any _post_validate_<foo> functions.
+ '''
+
+ # save the omit value for later checking
+ omit_value = templar.available_variables.get('omit')
+
+ for (name, attribute) in iteritems(self._valid_attrs):
+
+ if attribute.static:
+ value = getattr(self, name)
+
+ # we don't template 'vars' but allow template as values for later use
+ if name not in ('vars',) and templar.is_template(value):
+ display.warning('"%s" is not templatable, but we found: %s, '
+ 'it will not be templated and will be used "as is".' % (name, value))
+ continue
+
+ if getattr(self, name) is None:
+ if not attribute.required:
+ continue
+ else:
+ raise AnsibleParserError("the field '%s' is required but was not set" % name)
+ elif not attribute.always_post_validate and self.__class__.__name__ not in ('Task', 'Handler', 'PlayContext'):
+ # Intermediate objects like Play() won't have their fields validated by
+ # default, as their values are often inherited by other objects and validated
+ # later, so we don't want them to fail out early
+ continue
+
+ try:
+ # Run the post-validator if present. These methods are responsible for
+ # using the given templar to template the values, if required.
+ method = getattr(self, '_post_validate_%s' % name, None)
+ if method:
+ value = method(attribute, getattr(self, name), templar)
+ elif attribute.isa == 'class':
+ value = getattr(self, name)
+ else:
+ # if the attribute contains a variable, template it now
+ value = templar.template(getattr(self, name))
+
+ # if this evaluated to the omit value, set the value back to
+ # the default specified in the FieldAttribute and move on
+ if omit_value is not None and value == omit_value:
+ if callable(attribute.default):
+ setattr(self, name, attribute.default())
+ else:
+ setattr(self, name, attribute.default)
+ continue
+
+ # and make sure the attribute is of the type it should be
+ if value is not None:
+ value = self.get_validated_value(name, attribute, value, templar)
+
+ # and assign the massaged value back to the attribute field
+ setattr(self, name, value)
+ except (TypeError, ValueError) as e:
+ value = getattr(self, name)
+ raise AnsibleParserError("the field '%s' has an invalid value (%s), and could not be converted to an %s."
+ "The error was: %s" % (name, value, attribute.isa, e), obj=self.get_ds(), orig_exc=e)
+ except (AnsibleUndefinedVariable, UndefinedError) as e:
+ if templar._fail_on_undefined_errors and name != 'name':
+ if name == 'args':
+ msg = "The task includes an option with an undefined variable. The error was: %s" % (to_native(e))
+ else:
+ msg = "The field '%s' has an invalid value, which includes an undefined variable. The error was: %s" % (name, to_native(e))
+ raise AnsibleParserError(msg, obj=self.get_ds(), orig_exc=e)
+
+ self._finalized = True
+
+ def _load_vars(self, attr, ds):
+ '''
+ Vars in a play can be specified either as a dictionary directly, or
+ as a list of dictionaries. If the later, this method will turn the
+ list into a single dictionary.
+ '''
+
+ def _validate_variable_keys(ds):
+ for key in ds:
+ if not isidentifier(key):
+ raise TypeError("'%s' is not a valid variable name" % key)
+
+ try:
+ if isinstance(ds, dict):
+ _validate_variable_keys(ds)
+ return combine_vars(self.vars, ds)
+ elif isinstance(ds, list):
+ all_vars = self.vars
+ for item in ds:
+ if not isinstance(item, dict):
+ raise ValueError
+ _validate_variable_keys(item)
+ all_vars = combine_vars(all_vars, item)
+ return all_vars
+ elif ds is None:
+ return {}
+ else:
+ raise ValueError
+ except ValueError as e:
+ raise AnsibleParserError("Vars in a %s must be specified as a dictionary, or a list of dictionaries" % self.__class__.__name__,
+ obj=ds, orig_exc=e)
+ except TypeError as e:
+ raise AnsibleParserError("Invalid variable name in vars specified for %s: %s" % (self.__class__.__name__, e), obj=ds, orig_exc=e)
+
+ def _extend_value(self, value, new_value, prepend=False):
+ '''
+ Will extend the value given with new_value (and will turn both
+ into lists if they are not so already). The values are run through
+ a set to remove duplicate values.
+ '''
+
+ if not isinstance(value, list):
+ value = [value]
+ if not isinstance(new_value, list):
+ new_value = [new_value]
+
+ # Due to where _extend_value may run for some attributes
+ # it is possible to end up with Sentinel in the list of values
+ # ensure we strip them
+ value = [v for v in value if v is not Sentinel]
+ new_value = [v for v in new_value if v is not Sentinel]
+
+ if prepend:
+ combined = new_value + value
+ else:
+ combined = value + new_value
+
+ return [i for i, _ in itertools.groupby(combined) if i is not None]
+
+ def dump_attrs(self):
+ '''
+ Dumps all attributes to a dictionary
+ '''
+ attrs = {}
+ for (name, attribute) in iteritems(self._valid_attrs):
+ attr = getattr(self, name)
+ if attribute.isa == 'class' and hasattr(attr, 'serialize'):
+ attrs[name] = attr.serialize()
+ else:
+ attrs[name] = attr
+ return attrs
+
+ def from_attrs(self, attrs):
+ '''
+ Loads attributes from a dictionary
+ '''
+ for (attr, value) in iteritems(attrs):
+ if attr in self._valid_attrs:
+ attribute = self._valid_attrs[attr]
+ if attribute.isa == 'class' and isinstance(value, dict):
+ obj = attribute.class_type()
+ obj.deserialize(value)
+ setattr(self, attr, obj)
+ else:
+ setattr(self, attr, value)
+
+ def serialize(self):
+ '''
+ Serializes the object derived from the base object into
+ a dictionary of values. This only serializes the field
+ attributes for the object, so this may need to be overridden
+ for any classes which wish to add additional items not stored
+ as field attributes.
+ '''
+
+ repr = self.dump_attrs()
+
+ # serialize the uuid field
+ repr['uuid'] = self._uuid
+ repr['finalized'] = self._finalized
+ repr['squashed'] = self._squashed
+
+ return repr
+
+ def deserialize(self, data):
+ '''
+ Given a dictionary of values, load up the field attributes for
+ this object. As with serialize(), if there are any non-field
+ attribute data members, this method will need to be overridden
+ and extended.
+ '''
+
+ if not isinstance(data, dict):
+ raise AnsibleAssertionError('data (%s) should be a dict but is a %s' % (data, type(data)))
+
+ for (name, attribute) in iteritems(self._valid_attrs):
+ if name in data:
+ setattr(self, name, data[name])
+ else:
+ if callable(attribute.default):
+ setattr(self, name, attribute.default())
+ else:
+ setattr(self, name, attribute.default)
+
+ # restore the UUID field
+ setattr(self, '_uuid', data.get('uuid'))
+ self._finalized = data.get('finalized', False)
+ self._squashed = data.get('squashed', False)
+
+
+class Base(FieldAttributeBase):
+
+ _name = FieldAttribute(isa='string', default='', always_post_validate=True, inherit=False)
+
+ # connection/transport
+ _connection = FieldAttribute(isa='string', default=context.cliargs_deferred_get('connection'))
+ _port = FieldAttribute(isa='int')
+ _remote_user = FieldAttribute(isa='string', default=context.cliargs_deferred_get('remote_user'))
+
+ # variables
+ _vars = FieldAttribute(isa='dict', priority=100, inherit=False, static=True)
+
+ # module default params
+ _module_defaults = FieldAttribute(isa='list', extend=True, prepend=True)
+
+ # flags and misc. settings
+ _environment = FieldAttribute(isa='list', extend=True, prepend=True)
+ _no_log = FieldAttribute(isa='bool')
+ _run_once = FieldAttribute(isa='bool')
+ _ignore_errors = FieldAttribute(isa='bool')
+ _ignore_unreachable = FieldAttribute(isa='bool')
+ _check_mode = FieldAttribute(isa='bool', default=context.cliargs_deferred_get('check'))
+ _diff = FieldAttribute(isa='bool', default=context.cliargs_deferred_get('diff'))
+ _any_errors_fatal = FieldAttribute(isa='bool', default=C.ANY_ERRORS_FATAL)
+ _throttle = FieldAttribute(isa='int', default=0)
+ _timeout = FieldAttribute(isa='int', default=C.TASK_TIMEOUT)
+
+ # explicitly invoke a debugger on tasks
+ _debugger = FieldAttribute(isa='string')
+
+ # Privilege escalation
+ _become = FieldAttribute(isa='bool', default=context.cliargs_deferred_get('become'))
+ _become_method = FieldAttribute(isa='string', default=context.cliargs_deferred_get('become_method'))
+ _become_user = FieldAttribute(isa='string', default=context.cliargs_deferred_get('become_user'))
+ _become_flags = FieldAttribute(isa='string', default=context.cliargs_deferred_get('become_flags'))
+ _become_exe = FieldAttribute(isa='string', default=context.cliargs_deferred_get('become_exe'))
+
+ # used to hold sudo/su stuff
+ DEPRECATED_ATTRIBUTES = []
diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py
new file mode 100644
index 00000000..e3a4e1c7
--- /dev/null
+++ b/lib/ansible/playbook/block.py
@@ -0,0 +1,424 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ansible.constants as C
+from ansible.errors import AnsibleParserError
+from ansible.playbook.attribute import FieldAttribute
+from ansible.playbook.base import Base
+from ansible.playbook.conditional import Conditional
+from ansible.playbook.collectionsearch import CollectionSearch
+from ansible.playbook.helpers import load_list_of_tasks
+from ansible.playbook.role import Role
+from ansible.playbook.taggable import Taggable
+from ansible.utils.sentinel import Sentinel
+
+
+class Block(Base, Conditional, CollectionSearch, Taggable):
+
+ # main block fields containing the task lists
+ _block = FieldAttribute(isa='list', default=list, inherit=False)
+ _rescue = FieldAttribute(isa='list', default=list, inherit=False)
+ _always = FieldAttribute(isa='list', default=list, inherit=False)
+
+ # other fields
+ _delegate_to = FieldAttribute(isa='string')
+ _delegate_facts = FieldAttribute(isa='bool')
+
+ # for future consideration? this would be functionally
+ # similar to the 'else' clause for exceptions
+ # _otherwise = FieldAttribute(isa='list')
+
+ def __init__(self, play=None, parent_block=None, role=None, task_include=None, use_handlers=False, implicit=False):
+ self._play = play
+ self._role = role
+ self._parent = None
+ self._dep_chain = None
+ self._use_handlers = use_handlers
+ self._implicit = implicit
+
+ # end of role flag
+ self._eor = False
+
+ if task_include:
+ self._parent = task_include
+ elif parent_block:
+ self._parent = parent_block
+
+ super(Block, self).__init__()
+
+ def __repr__(self):
+ return "BLOCK(uuid=%s)(id=%s)(parent=%s)" % (self._uuid, id(self), self._parent)
+
+ def __eq__(self, other):
+ '''object comparison based on _uuid'''
+ return self._uuid == other._uuid
+
+ def __ne__(self, other):
+ '''object comparison based on _uuid'''
+ return self._uuid != other._uuid
+
+ def get_vars(self):
+ '''
+ Blocks do not store variables directly, however they may be a member
+ of a role or task include which does, so return those if present.
+ '''
+
+ all_vars = self.vars.copy()
+
+ if self._parent:
+ all_vars.update(self._parent.get_vars())
+
+ return all_vars
+
+ @staticmethod
+ def load(data, play=None, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
+ implicit = not Block.is_block(data)
+ b = Block(play=play, parent_block=parent_block, role=role, task_include=task_include, use_handlers=use_handlers, implicit=implicit)
+ return b.load_data(data, variable_manager=variable_manager, loader=loader)
+
+ @staticmethod
+ def is_block(ds):
+ is_block = False
+ if isinstance(ds, dict):
+ for attr in ('block', 'rescue', 'always'):
+ if attr in ds:
+ is_block = True
+ break
+ return is_block
+
+ def preprocess_data(self, ds):
+ '''
+ If a simple task is given, an implicit block for that single task
+ is created, which goes in the main portion of the block
+ '''
+
+ if not Block.is_block(ds):
+ if isinstance(ds, list):
+ return super(Block, self).preprocess_data(dict(block=ds))
+ else:
+ return super(Block, self).preprocess_data(dict(block=[ds]))
+
+ return super(Block, self).preprocess_data(ds)
+
+ def _load_block(self, attr, ds):
+ try:
+ return load_list_of_tasks(
+ ds,
+ play=self._play,
+ block=self,
+ role=self._role,
+ task_include=None,
+ variable_manager=self._variable_manager,
+ loader=self._loader,
+ use_handlers=self._use_handlers,
+ )
+ except AssertionError as e:
+ raise AnsibleParserError("A malformed block was encountered while loading a block", obj=self._ds, orig_exc=e)
+
+ def _load_rescue(self, attr, ds):
+ try:
+ return load_list_of_tasks(
+ ds,
+ play=self._play,
+ block=self,
+ role=self._role,
+ task_include=None,
+ variable_manager=self._variable_manager,
+ loader=self._loader,
+ use_handlers=self._use_handlers,
+ )
+ except AssertionError as e:
+ raise AnsibleParserError("A malformed block was encountered while loading rescue.", obj=self._ds, orig_exc=e)
+
+ def _load_always(self, attr, ds):
+ try:
+ return load_list_of_tasks(
+ ds,
+ play=self._play,
+ block=self,
+ role=self._role,
+ task_include=None,
+ variable_manager=self._variable_manager,
+ loader=self._loader,
+ use_handlers=self._use_handlers,
+ )
+ except AssertionError as e:
+ raise AnsibleParserError("A malformed block was encountered while loading always", obj=self._ds, orig_exc=e)
+
+ def _validate_always(self, attr, name, value):
+ if value and not self.block:
+ raise AnsibleParserError("'%s' keyword cannot be used without 'block'" % name, obj=self._ds)
+
+ _validate_rescue = _validate_always
+
+ def get_dep_chain(self):
+ if self._dep_chain is None:
+ if self._parent:
+ return self._parent.get_dep_chain()
+ else:
+ return None
+ else:
+ return self._dep_chain[:]
+
+ def copy(self, exclude_parent=False, exclude_tasks=False):
+ def _dupe_task_list(task_list, new_block):
+ new_task_list = []
+ for task in task_list:
+ new_task = task.copy(exclude_parent=True)
+ if task._parent:
+ new_task._parent = task._parent.copy(exclude_tasks=True)
+ if task._parent == new_block:
+ # If task._parent is the same as new_block, just replace it
+ new_task._parent = new_block
+ else:
+ # task may not be a direct child of new_block, search for the correct place to insert new_block
+ cur_obj = new_task._parent
+ while cur_obj._parent and cur_obj._parent != new_block:
+ cur_obj = cur_obj._parent
+
+ cur_obj._parent = new_block
+ else:
+ new_task._parent = new_block
+ new_task_list.append(new_task)
+ return new_task_list
+
+ new_me = super(Block, self).copy()
+ new_me._play = self._play
+ new_me._use_handlers = self._use_handlers
+ new_me._eor = self._eor
+
+ if self._dep_chain is not None:
+ new_me._dep_chain = self._dep_chain[:]
+
+ new_me._parent = None
+ if self._parent and not exclude_parent:
+ new_me._parent = self._parent.copy(exclude_tasks=True)
+
+ if not exclude_tasks:
+ new_me.block = _dupe_task_list(self.block or [], new_me)
+ new_me.rescue = _dupe_task_list(self.rescue or [], new_me)
+ new_me.always = _dupe_task_list(self.always or [], new_me)
+
+ new_me._role = None
+ if self._role:
+ new_me._role = self._role
+
+ new_me.validate()
+ return new_me
+
+ def serialize(self):
+ '''
+ Override of the default serialize method, since when we're serializing
+ a task we don't want to include the attribute list of tasks.
+ '''
+
+ data = dict()
+ for attr in self._valid_attrs:
+ if attr not in ('block', 'rescue', 'always'):
+ data[attr] = getattr(self, attr)
+
+ data['dep_chain'] = self.get_dep_chain()
+ data['eor'] = self._eor
+
+ if self._role is not None:
+ data['role'] = self._role.serialize()
+ if self._parent is not None:
+ data['parent'] = self._parent.copy(exclude_tasks=True).serialize()
+ data['parent_type'] = self._parent.__class__.__name__
+
+ return data
+
+ def deserialize(self, data):
+ '''
+ Override of the default deserialize method, to match the above overridden
+ serialize method
+ '''
+
+ # import is here to avoid import loops
+ from ansible.playbook.task_include import TaskInclude
+ from ansible.playbook.handler_task_include import HandlerTaskInclude
+
+ # we don't want the full set of attributes (the task lists), as that
+ # would lead to a serialize/deserialize loop
+ for attr in self._valid_attrs:
+ if attr in data and attr not in ('block', 'rescue', 'always'):
+ setattr(self, attr, data.get(attr))
+
+ self._dep_chain = data.get('dep_chain', None)
+ self._eor = data.get('eor', False)
+
+ # if there was a serialized role, unpack it too
+ role_data = data.get('role')
+ if role_data:
+ r = Role()
+ r.deserialize(role_data)
+ self._role = r
+
+ parent_data = data.get('parent')
+ if parent_data:
+ parent_type = data.get('parent_type')
+ if parent_type == 'Block':
+ p = Block()
+ elif parent_type == 'TaskInclude':
+ p = TaskInclude()
+ elif parent_type == 'HandlerTaskInclude':
+ p = HandlerTaskInclude()
+ p.deserialize(parent_data)
+ self._parent = p
+ self._dep_chain = self._parent.get_dep_chain()
+
+ def set_loader(self, loader):
+ self._loader = loader
+ if self._parent:
+ self._parent.set_loader(loader)
+ elif self._role:
+ self._role.set_loader(loader)
+
+ dep_chain = self.get_dep_chain()
+ if dep_chain:
+ for dep in dep_chain:
+ dep.set_loader(loader)
+
+ def _get_parent_attribute(self, attr, extend=False, prepend=False):
+ '''
+ Generic logic to get the attribute or parent attribute for a block value.
+ '''
+
+ extend = self._valid_attrs[attr].extend
+ prepend = self._valid_attrs[attr].prepend
+ try:
+ value = self._attributes[attr]
+ # If parent is static, we can grab attrs from the parent
+ # otherwise, defer to the grandparent
+ if getattr(self._parent, 'statically_loaded', True):
+ _parent = self._parent
+ else:
+ _parent = self._parent._parent
+
+ if _parent and (value is Sentinel or extend):
+ try:
+ if getattr(_parent, 'statically_loaded', True):
+ if hasattr(_parent, '_get_parent_attribute'):
+ parent_value = _parent._get_parent_attribute(attr)
+ else:
+ parent_value = _parent._attributes.get(attr, Sentinel)
+ if extend:
+ value = self._extend_value(value, parent_value, prepend)
+ else:
+ value = parent_value
+ except AttributeError:
+ pass
+ if self._role and (value is Sentinel or extend):
+ try:
+ parent_value = self._role._attributes.get(attr, Sentinel)
+ if extend:
+ value = self._extend_value(value, parent_value, prepend)
+ else:
+ value = parent_value
+
+ dep_chain = self.get_dep_chain()
+ if dep_chain and (value is Sentinel or extend):
+ dep_chain.reverse()
+ for dep in dep_chain:
+ dep_value = dep._attributes.get(attr, Sentinel)
+ if extend:
+ value = self._extend_value(value, dep_value, prepend)
+ else:
+ value = dep_value
+
+ if value is not Sentinel and not extend:
+ break
+ except AttributeError:
+ pass
+ if self._play and (value is Sentinel or extend):
+ try:
+ play_value = self._play._attributes.get(attr, Sentinel)
+ if play_value is not Sentinel:
+ if extend:
+ value = self._extend_value(value, play_value, prepend)
+ else:
+ value = play_value
+ except AttributeError:
+ pass
+ except KeyError:
+ pass
+
+ return value
+
+ def filter_tagged_tasks(self, all_vars):
+ '''
+ Creates a new block, with task lists filtered based on the tags.
+ '''
+
+ def evaluate_and_append_task(target):
+ tmp_list = []
+ for task in target:
+ if isinstance(task, Block):
+ filtered_block = evaluate_block(task)
+ if filtered_block.has_tasks():
+ tmp_list.append(filtered_block)
+ elif (task.action in C._ACTION_META or
+ (task.action in C._ACTION_INCLUDE and task.evaluate_tags([], self._play.skip_tags, all_vars=all_vars)) or
+ task.evaluate_tags(self._play.only_tags, self._play.skip_tags, all_vars=all_vars)):
+ tmp_list.append(task)
+ return tmp_list
+
+ def evaluate_block(block):
+ new_block = block.copy(exclude_parent=True, exclude_tasks=True)
+ new_block._parent = block._parent
+ new_block.block = evaluate_and_append_task(block.block)
+ new_block.rescue = evaluate_and_append_task(block.rescue)
+ new_block.always = evaluate_and_append_task(block.always)
+ return new_block
+
+ return evaluate_block(self)
+
+ def has_tasks(self):
+ return len(self.block) > 0 or len(self.rescue) > 0 or len(self.always) > 0
+
+ def get_include_params(self):
+ if self._parent:
+ return self._parent.get_include_params()
+ else:
+ return dict()
+
+ def all_parents_static(self):
+ '''
+ Determine if all of the parents of this block were statically loaded
+ or not. Since Task/TaskInclude objects may be in the chain, they simply
+ call their parents all_parents_static() method. Only Block objects in
+ the chain check the statically_loaded value of the parent.
+ '''
+ from ansible.playbook.task_include import TaskInclude
+ if self._parent:
+ if isinstance(self._parent, TaskInclude) and not self._parent.statically_loaded:
+ return False
+ return self._parent.all_parents_static()
+
+ return True
+
+ def get_first_parent_include(self):
+ from ansible.playbook.task_include import TaskInclude
+ if self._parent:
+ if isinstance(self._parent, TaskInclude):
+ return self._parent
+ return self._parent.get_first_parent_include()
+ return None
diff --git a/lib/ansible/playbook/collectionsearch.py b/lib/ansible/playbook/collectionsearch.py
new file mode 100644
index 00000000..fb69519b
--- /dev/null
+++ b/lib/ansible/playbook/collectionsearch.py
@@ -0,0 +1,61 @@
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.six import string_types
+from ansible.playbook.attribute import FieldAttribute
+from ansible.utils.collection_loader import AnsibleCollectionConfig
+from ansible.template import is_template, Environment
+from ansible.utils.display import Display
+
+display = Display()
+
+
+def _ensure_default_collection(collection_list=None):
+ default_collection = AnsibleCollectionConfig.default_collection
+
+ # Will be None when used as the default
+ if collection_list is None:
+ collection_list = []
+
+ # FIXME: exclude role tasks?
+ if default_collection and default_collection not in collection_list:
+ collection_list.insert(0, default_collection)
+
+ # if there's something in the list, ensure that builtin or legacy is always there too
+ if collection_list and 'ansible.builtin' not in collection_list and 'ansible.legacy' not in collection_list:
+ collection_list.append('ansible.legacy')
+
+ return collection_list
+
+
+class CollectionSearch:
+
+ # this needs to be populated before we can resolve tasks/roles/etc
+ _collections = FieldAttribute(isa='list', listof=string_types, priority=100, default=_ensure_default_collection,
+ always_post_validate=True, static=True)
+
+ def _load_collections(self, attr, ds):
+ # We are always a mixin with Base, so we can validate this untemplated
+ # field early on to guarantee we are dealing with a list.
+ ds = self.get_validated_value('collections', self._collections, ds, None)
+
+ # this will only be called if someone specified a value; call the shared value
+ _ensure_default_collection(collection_list=ds)
+
+ if not ds: # don't return an empty collection list, just return None
+ return None
+
+ # This duplicates static attr checking logic from post_validate()
+ # because if the user attempts to template a collection name, it may
+ # error before it ever gets to the post_validate() warning (e.g. trying
+ # to import a role from the collection).
+ env = Environment()
+ for collection_name in ds:
+ if is_template(collection_name, env):
+ display.warning('"collections" is not templatable, but we found: %s, '
+ 'it will not be templated and will be used "as is".' % (collection_name))
+
+ return ds
diff --git a/lib/ansible/playbook/conditional.py b/lib/ansible/playbook/conditional.py
new file mode 100644
index 00000000..a969d1a7
--- /dev/null
+++ b/lib/ansible/playbook/conditional.py
@@ -0,0 +1,224 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ast
+import re
+
+from jinja2.compiler import generate
+from jinja2.exceptions import UndefinedError
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleUndefinedVariable
+from ansible.module_utils.six import text_type
+from ansible.module_utils._text import to_native
+from ansible.playbook.attribute import FieldAttribute
+from ansible.utils.display import Display
+
+display = Display()
+
+DEFINED_REGEX = re.compile(r'(hostvars\[.+\]|[\w_]+)\s+(not\s+is|is|is\s+not)\s+(defined|undefined)')
+LOOKUP_REGEX = re.compile(r'lookup\s*\(')
+VALID_VAR_REGEX = re.compile("^[_A-Za-z][_a-zA-Z0-9]*$")
+
+
+class Conditional:
+
+ '''
+ This is a mix-in class, to be used with Base to allow the object
+ to be run conditionally when a condition is met or skipped.
+ '''
+
+ _when = FieldAttribute(isa='list', default=list, extend=True, prepend=True)
+
+ def __init__(self, loader=None):
+ # when used directly, this class needs a loader, but we want to
+ # make sure we don't trample on the existing one if this class
+ # is used as a mix-in with a playbook base class
+ if not hasattr(self, '_loader'):
+ if loader is None:
+ raise AnsibleError("a loader must be specified when using Conditional() directly")
+ else:
+ self._loader = loader
+ super(Conditional, self).__init__()
+
+ def _validate_when(self, attr, name, value):
+ if not isinstance(value, list):
+ setattr(self, name, [value])
+
+ def extract_defined_undefined(self, conditional):
+ results = []
+
+ cond = conditional
+ m = DEFINED_REGEX.search(cond)
+ while m:
+ results.append(m.groups())
+ cond = cond[m.end():]
+ m = DEFINED_REGEX.search(cond)
+
+ return results
+
+ def evaluate_conditional(self, templar, all_vars):
+ '''
+ Loops through the conditionals set on this object, returning
+ False if any of them evaluate as such.
+ '''
+
+ # since this is a mix-in, it may not have an underlying datastructure
+ # associated with it, so we pull it out now in case we need it for
+ # error reporting below
+ ds = None
+ if hasattr(self, '_ds'):
+ ds = getattr(self, '_ds')
+
+ try:
+ for conditional in self.when:
+ if not self._check_conditional(conditional, templar, all_vars):
+ return False
+ except Exception as e:
+ raise AnsibleError(
+ "The conditional check '%s' failed. The error was: %s" % (to_native(conditional), to_native(e)), obj=ds
+ )
+
+ return True
+
+ def _check_conditional(self, conditional, templar, all_vars):
+ '''
+ This method does the low-level evaluation of each conditional
+ set on this object, using jinja2 to wrap the conditionals for
+ evaluation.
+ '''
+
+ original = conditional
+ if conditional is None or conditional == '':
+ return True
+
+ # this allows for direct boolean assignments to conditionals "when: False"
+ if isinstance(conditional, bool):
+ return conditional
+
+ if templar.is_template(conditional):
+ display.warning('conditional statements should not include jinja2 '
+ 'templating delimiters such as {{ }} or {%% %%}. '
+ 'Found: %s' % conditional)
+
+ bare_vars_warning = False
+ if C.CONDITIONAL_BARE_VARS:
+ if conditional in all_vars and VALID_VAR_REGEX.match(conditional):
+ conditional = all_vars[conditional]
+ bare_vars_warning = True
+
+ # make sure the templar is using the variables specified with this method
+ templar.available_variables = all_vars
+
+ try:
+ # if the conditional is "unsafe", disable lookups
+ disable_lookups = hasattr(conditional, '__UNSAFE__')
+ conditional = templar.template(conditional, disable_lookups=disable_lookups)
+ if bare_vars_warning and not isinstance(conditional, bool):
+ display.deprecated('evaluating %r as a bare variable, this behaviour will go away and you might need to add |bool'
+ ' to the expression in the future. Also see CONDITIONAL_BARE_VARS configuration toggle' % original,
+ version="2.12", collection_name='ansible.builtin')
+ if not isinstance(conditional, text_type) or conditional == "":
+ return conditional
+
+ # update the lookups flag, as the string returned above may now be unsafe
+ # and we don't want future templating calls to do unsafe things
+ disable_lookups |= hasattr(conditional, '__UNSAFE__')
+
+ # First, we do some low-level jinja2 parsing involving the AST format of the
+ # statement to ensure we don't do anything unsafe (using the disable_lookup flag above)
+ class CleansingNodeVisitor(ast.NodeVisitor):
+ def generic_visit(self, node, inside_call=False, inside_yield=False):
+ if isinstance(node, ast.Call):
+ inside_call = True
+ elif isinstance(node, ast.Yield):
+ inside_yield = True
+ elif isinstance(node, ast.Str):
+ if disable_lookups:
+ if inside_call and node.s.startswith("__"):
+ # calling things with a dunder is generally bad at this point...
+ raise AnsibleError(
+ "Invalid access found in the conditional: '%s'" % conditional
+ )
+ elif inside_yield:
+ # we're inside a yield, so recursively parse and traverse the AST
+ # of the result to catch forbidden syntax from executing
+ parsed = ast.parse(node.s, mode='exec')
+ cnv = CleansingNodeVisitor()
+ cnv.visit(parsed)
+ # iterate over all child nodes
+ for child_node in ast.iter_child_nodes(node):
+ self.generic_visit(
+ child_node,
+ inside_call=inside_call,
+ inside_yield=inside_yield
+ )
+ try:
+ e = templar.environment.overlay()
+ e.filters.update(templar.environment.filters)
+ e.tests.update(templar.environment.tests)
+
+ res = e._parse(conditional, None, None)
+ res = generate(res, e, None, None)
+ parsed = ast.parse(res, mode='exec')
+
+ cnv = CleansingNodeVisitor()
+ cnv.visit(parsed)
+ except Exception as e:
+ raise AnsibleError("Invalid conditional detected: %s" % to_native(e))
+
+ # and finally we generate and template the presented string and look at the resulting string
+ presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
+ val = templar.template(presented, disable_lookups=disable_lookups).strip()
+ if val == "True":
+ return True
+ elif val == "False":
+ return False
+ else:
+ raise AnsibleError("unable to evaluate conditional: %s" % original)
+ except (AnsibleUndefinedVariable, UndefinedError) as e:
+ # the templating failed, meaning most likely a variable was undefined. If we happened
+ # to be looking for an undefined variable, return True, otherwise fail
+ try:
+ # first we extract the variable name from the error message
+ var_name = re.compile(r"'(hostvars\[.+\]|[\w_]+)' is undefined").search(str(e)).groups()[0]
+ # next we extract all defined/undefined tests from the conditional string
+ def_undef = self.extract_defined_undefined(conditional)
+ # then we loop through these, comparing the error variable name against
+ # each def/undef test we found above. If there is a match, we determine
+ # whether the logic/state mean the variable should exist or not and return
+ # the corresponding True/False
+ for (du_var, logic, state) in def_undef:
+ # when we compare the var names, normalize quotes because something
+ # like hostvars['foo'] may be tested against hostvars["foo"]
+ if var_name.replace("'", '"') == du_var.replace("'", '"'):
+ # the should exist is a xor test between a negation in the logic portion
+ # against the state (defined or undefined)
+ should_exist = ('not' in logic) != (state == 'defined')
+ if should_exist:
+ return False
+ else:
+ return True
+ # as nothing above matched the failed var name, re-raise here to
+ # trigger the AnsibleUndefinedVariable exception again below
+ raise
+ except Exception:
+ raise AnsibleUndefinedVariable("error while evaluating conditional (%s): %s" % (original, e))
diff --git a/lib/ansible/playbook/handler.py b/lib/ansible/playbook/handler.py
new file mode 100644
index 00000000..79eaf3fe
--- /dev/null
+++ b/lib/ansible/playbook/handler.py
@@ -0,0 +1,59 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.playbook.attribute import FieldAttribute
+from ansible.playbook.task import Task
+from ansible.module_utils.six import string_types
+
+
+class Handler(Task):
+
+ _listen = FieldAttribute(isa='list', default=list, listof=string_types, static=True)
+
+ def __init__(self, block=None, role=None, task_include=None):
+ self.notified_hosts = []
+
+ self.cached_name = False
+
+ super(Handler, self).__init__(block=block, role=role, task_include=task_include)
+
+ def __repr__(self):
+ ''' returns a human readable representation of the handler '''
+ return "HANDLER: %s" % self.get_name()
+
+ @staticmethod
+ def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
+ t = Handler(block=block, role=role, task_include=task_include)
+ return t.load_data(data, variable_manager=variable_manager, loader=loader)
+
+ def notify_host(self, host):
+ if not self.is_host_notified(host):
+ self.notified_hosts.append(host)
+ return True
+ return False
+
+ def is_host_notified(self, host):
+ return host in self.notified_hosts
+
+ def serialize(self):
+ result = super(Handler, self).serialize()
+ result['is_handler'] = True
+ return result
diff --git a/lib/ansible/playbook/handler_task_include.py b/lib/ansible/playbook/handler_task_include.py
new file mode 100644
index 00000000..1c779f85
--- /dev/null
+++ b/lib/ansible/playbook/handler_task_include.py
@@ -0,0 +1,39 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# from ansible.inventory.host import Host
+from ansible.playbook.handler import Handler
+from ansible.playbook.task_include import TaskInclude
+
+
+class HandlerTaskInclude(Handler, TaskInclude):
+
+ VALID_INCLUDE_KEYWORDS = TaskInclude.VALID_INCLUDE_KEYWORDS.union(('listen',))
+
+ @staticmethod
+ def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
+ t = HandlerTaskInclude(block=block, role=role, task_include=task_include)
+ handler = t.check_options(
+ t.load_data(data, variable_manager=variable_manager, loader=loader),
+ data
+ )
+
+ return handler
diff --git a/lib/ansible/playbook/helpers.py b/lib/ansible/playbook/helpers.py
new file mode 100644
index 00000000..892ce158
--- /dev/null
+++ b/lib/ansible/playbook/helpers.py
@@ -0,0 +1,396 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible import constants as C
+from ansible.errors import AnsibleParserError, AnsibleUndefinedVariable, AnsibleFileNotFound, AnsibleAssertionError
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import string_types
+from ansible.parsing.mod_args import ModuleArgsParser
+from ansible.utils.display import Display
+
+display = Display()
+
+
+def load_list_of_blocks(ds, play, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
+ '''
+ Given a list of mixed task/block data (parsed from YAML),
+ return a list of Block() objects, where implicit blocks
+ are created for each bare Task.
+ '''
+
+ # we import here to prevent a circular dependency with imports
+ from ansible.playbook.block import Block
+
+ if not isinstance(ds, (list, type(None))):
+ raise AnsibleAssertionError('%s should be a list or None but is %s' % (ds, type(ds)))
+
+ block_list = []
+ if ds:
+ count = iter(range(len(ds)))
+ for i in count:
+ block_ds = ds[i]
+ # Implicit blocks are created by bare tasks listed in a play without
+ # an explicit block statement. If we have two implicit blocks in a row,
+ # squash them down to a single block to save processing time later.
+ implicit_blocks = []
+ while block_ds is not None and not Block.is_block(block_ds):
+ implicit_blocks.append(block_ds)
+ i += 1
+ # Advance the iterator, so we don't repeat
+ next(count, None)
+ try:
+ block_ds = ds[i]
+ except IndexError:
+ block_ds = None
+
+ # Loop both implicit blocks and block_ds as block_ds is the next in the list
+ for b in (implicit_blocks, block_ds):
+ if b:
+ block_list.append(
+ Block.load(
+ b,
+ play=play,
+ parent_block=parent_block,
+ role=role,
+ task_include=task_include,
+ use_handlers=use_handlers,
+ variable_manager=variable_manager,
+ loader=loader,
+ )
+ )
+
+ return block_list
+
+
+def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
+ '''
+ Given a list of task datastructures (parsed from YAML),
+ return a list of Task() or TaskInclude() objects.
+ '''
+
+ # we import here to prevent a circular dependency with imports
+ from ansible.playbook.block import Block
+ from ansible.playbook.handler import Handler
+ from ansible.playbook.task import Task
+ from ansible.playbook.task_include import TaskInclude
+ from ansible.playbook.role_include import IncludeRole
+ from ansible.playbook.handler_task_include import HandlerTaskInclude
+ from ansible.template import Templar
+
+ if not isinstance(ds, list):
+ raise AnsibleAssertionError('The ds (%s) should be a list but was a %s' % (ds, type(ds)))
+
+ task_list = []
+ for task_ds in ds:
+ if not isinstance(task_ds, dict):
+ raise AnsibleAssertionError('The ds (%s) should be a dict but was a %s' % (ds, type(ds)))
+
+ if 'block' in task_ds:
+ t = Block.load(
+ task_ds,
+ play=play,
+ parent_block=block,
+ role=role,
+ task_include=task_include,
+ use_handlers=use_handlers,
+ variable_manager=variable_manager,
+ loader=loader,
+ )
+ task_list.append(t)
+ else:
+ args_parser = ModuleArgsParser(task_ds)
+ try:
+ (action, args, delegate_to) = args_parser.parse(skip_action_validation=True)
+ except AnsibleParserError as e:
+ # if the raises exception was created with obj=ds args, then it includes the detail
+ # so we dont need to add it so we can just re raise.
+ if e._obj:
+ raise
+ # But if it wasn't, we can add the yaml object now to get more detail
+ raise AnsibleParserError(to_native(e), obj=task_ds, orig_exc=e)
+
+ if action in C._ACTION_ALL_INCLUDE_IMPORT_TASKS:
+
+ if use_handlers:
+ include_class = HandlerTaskInclude
+ else:
+ include_class = TaskInclude
+
+ t = include_class.load(
+ task_ds,
+ block=block,
+ role=role,
+ task_include=None,
+ variable_manager=variable_manager,
+ loader=loader
+ )
+
+ all_vars = variable_manager.get_vars(play=play, task=t)
+ templar = Templar(loader=loader, variables=all_vars)
+
+ # check to see if this include is dynamic or static:
+ # 1. the user has set the 'static' option to false or true
+ # 2. one of the appropriate config options was set
+ if action in C._ACTION_INCLUDE_TASKS:
+ is_static = False
+ elif action in C._ACTION_IMPORT_TASKS:
+ is_static = True
+ elif t.static is not None:
+ display.deprecated("The use of 'static' has been deprecated. "
+ "Use 'import_tasks' for static inclusion, or 'include_tasks' for dynamic inclusion",
+ version='2.12', collection_name='ansible.builtin')
+ is_static = t.static
+ else:
+ is_static = C.DEFAULT_TASK_INCLUDES_STATIC or \
+ (use_handlers and C.DEFAULT_HANDLER_INCLUDES_STATIC) or \
+ (not templar.is_template(t.args['_raw_params']) and t.all_parents_static() and not t.loop)
+
+ if is_static:
+ if t.loop is not None:
+ if action in C._ACTION_IMPORT_TASKS:
+ raise AnsibleParserError("You cannot use loops on 'import_tasks' statements. You should use 'include_tasks' instead.", obj=task_ds)
+ else:
+ raise AnsibleParserError("You cannot use 'static' on an include with a loop", obj=task_ds)
+
+ # we set a flag to indicate this include was static
+ t.statically_loaded = True
+
+ # handle relative includes by walking up the list of parent include
+ # tasks and checking the relative result to see if it exists
+ parent_include = block
+ cumulative_path = None
+
+ found = False
+ subdir = 'tasks'
+ if use_handlers:
+ subdir = 'handlers'
+ while parent_include is not None:
+ if not isinstance(parent_include, TaskInclude):
+ parent_include = parent_include._parent
+ continue
+ try:
+ parent_include_dir = os.path.dirname(templar.template(parent_include.args.get('_raw_params')))
+ except AnsibleUndefinedVariable as e:
+ if not parent_include.statically_loaded:
+ raise AnsibleParserError(
+ "Error when evaluating variable in dynamic parent include path: %s. "
+ "When using static imports, the parent dynamic include cannot utilize host facts "
+ "or variables from inventory" % parent_include.args.get('_raw_params'),
+ obj=task_ds,
+ suppress_extended_error=True,
+ orig_exc=e
+ )
+ raise
+ if cumulative_path is None:
+ cumulative_path = parent_include_dir
+ elif not os.path.isabs(cumulative_path):
+ cumulative_path = os.path.join(parent_include_dir, cumulative_path)
+ include_target = templar.template(t.args['_raw_params'])
+ if t._role:
+ new_basedir = os.path.join(t._role._role_path, subdir, cumulative_path)
+ include_file = loader.path_dwim_relative(new_basedir, subdir, include_target)
+ else:
+ include_file = loader.path_dwim_relative(loader.get_basedir(), cumulative_path, include_target)
+
+ if os.path.exists(include_file):
+ found = True
+ break
+ else:
+ parent_include = parent_include._parent
+
+ if not found:
+ try:
+ include_target = templar.template(t.args['_raw_params'])
+ except AnsibleUndefinedVariable as e:
+ raise AnsibleParserError(
+ "Error when evaluating variable in import path: %s.\n\n"
+ "When using static imports, ensure that any variables used in their names are defined in vars/vars_files\n"
+ "or extra-vars passed in from the command line. Static imports cannot use variables from facts or inventory\n"
+ "sources like group or host vars." % t.args['_raw_params'],
+ obj=task_ds,
+ suppress_extended_error=True,
+ orig_exc=e)
+ if t._role:
+ include_file = loader.path_dwim_relative(t._role._role_path, subdir, include_target)
+ else:
+ include_file = loader.path_dwim(include_target)
+
+ try:
+ data = loader.load_from_file(include_file)
+ if data is None:
+ display.warning('file %s is empty and had no tasks to include' % include_file)
+ continue
+ elif not isinstance(data, list):
+ raise AnsibleParserError("included task files must contain a list of tasks", obj=data)
+
+ # since we can't send callbacks here, we display a message directly in
+ # the same fashion used by the on_include callback. We also do it here,
+ # because the recursive nature of helper methods means we may be loading
+ # nested includes, and we want the include order printed correctly
+ display.vv("statically imported: %s" % include_file)
+ except AnsibleFileNotFound:
+ if action not in C._ACTION_INCLUDE or t.static or \
+ C.DEFAULT_TASK_INCLUDES_STATIC or \
+ C.DEFAULT_HANDLER_INCLUDES_STATIC and use_handlers:
+ raise
+ display.deprecated(
+ "Included file '%s' not found, however since this include is not "
+ "explicitly marked as 'static: yes', we will try and include it dynamically "
+ "later. In the future, this will be an error unless 'static: no' is used "
+ "on the include task. If you do not want missing includes to be considered "
+ "dynamic, use 'static: yes' on the include or set the global ansible.cfg "
+ "options to make all includes static for tasks and/or handlers" % include_file,
+ version="2.12", collection_name='ansible.builtin'
+ )
+ task_list.append(t)
+ continue
+
+ ti_copy = t.copy(exclude_parent=True)
+ ti_copy._parent = block
+ included_blocks = load_list_of_blocks(
+ data,
+ play=play,
+ parent_block=None,
+ task_include=ti_copy,
+ role=role,
+ use_handlers=use_handlers,
+ loader=loader,
+ variable_manager=variable_manager,
+ )
+
+ # FIXME: remove once 'include' is removed
+ # pop tags out of the include args, if they were specified there, and assign
+ # them to the include. If the include already had tags specified, we raise an
+ # error so that users know not to specify them both ways
+ tags = ti_copy.vars.pop('tags', [])
+ if isinstance(tags, string_types):
+ tags = tags.split(',')
+
+ if len(tags) > 0:
+ if action in C._ACTION_ALL_PROPER_INCLUDE_IMPORT_TASKS:
+ raise AnsibleParserError('You cannot specify "tags" inline to the task, it is a task keyword')
+ if len(ti_copy.tags) > 0:
+ raise AnsibleParserError(
+ "Include tasks should not specify tags in more than one way (both via args and directly on the task). "
+ "Mixing styles in which tags are specified is prohibited for whole import hierarchy, not only for single import statement",
+ obj=task_ds,
+ suppress_extended_error=True,
+ )
+ display.deprecated("You should not specify tags in the include parameters. All tags should be specified using the task-level option",
+ version="2.12", collection_name='ansible.builtin')
+ else:
+ tags = ti_copy.tags[:]
+
+ # now we extend the tags on each of the included blocks
+ for b in included_blocks:
+ b.tags = list(set(b.tags).union(tags))
+ # END FIXME
+
+ # FIXME: handlers shouldn't need this special handling, but do
+ # right now because they don't iterate blocks correctly
+ if use_handlers:
+ for b in included_blocks:
+ task_list.extend(b.block)
+ else:
+ task_list.extend(included_blocks)
+ else:
+ t.is_static = False
+ task_list.append(t)
+
+ elif action in C._ACTION_ALL_PROPER_INCLUDE_IMPORT_ROLES:
+ ir = IncludeRole.load(
+ task_ds,
+ block=block,
+ role=role,
+ task_include=None,
+ variable_manager=variable_manager,
+ loader=loader,
+ )
+
+ # 1. the user has set the 'static' option to false or true
+ # 2. one of the appropriate config options was set
+ is_static = False
+ if action in C._ACTION_IMPORT_ROLE:
+ is_static = True
+
+ elif ir.static is not None:
+ display.deprecated("The use of 'static' for 'include_role' has been deprecated. "
+ "Use 'import_role' for static inclusion, or 'include_role' for dynamic inclusion",
+ version='2.12', collection_name='ansible.builtin')
+ is_static = ir.static
+
+ if is_static:
+ if ir.loop is not None:
+ if action in C._ACTION_IMPORT_ROLE:
+ raise AnsibleParserError("You cannot use loops on 'import_role' statements. You should use 'include_role' instead.", obj=task_ds)
+ else:
+ raise AnsibleParserError("You cannot use 'static' on an include_role with a loop", obj=task_ds)
+
+ # we set a flag to indicate this include was static
+ ir.statically_loaded = True
+
+ # template the role name now, if needed
+ all_vars = variable_manager.get_vars(play=play, task=ir)
+ templar = Templar(loader=loader, variables=all_vars)
+ ir._role_name = templar.template(ir._role_name)
+
+ # uses compiled list from object
+ blocks, _ = ir.get_block_list(variable_manager=variable_manager, loader=loader)
+ task_list.extend(blocks)
+ else:
+ # passes task object itself for latter generation of list
+ task_list.append(ir)
+ else:
+ if use_handlers:
+ t = Handler.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
+ else:
+ t = Task.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
+
+ task_list.append(t)
+
+ return task_list
+
+
+def load_list_of_roles(ds, play, current_role_path=None, variable_manager=None, loader=None, collection_search_list=None):
+ """
+ Loads and returns a list of RoleInclude objects from the ds list of role definitions
+ :param ds: list of roles to load
+ :param play: calling Play object
+ :param current_role_path: path of the owning role, if any
+ :param variable_manager: varmgr to use for templating
+ :param loader: loader to use for DS parsing/services
+ :param collection_search_list: list of collections to search for unqualified role names
+ :return:
+ """
+ # we import here to prevent a circular dependency with imports
+ from ansible.playbook.role.include import RoleInclude
+
+ if not isinstance(ds, list):
+ raise AnsibleAssertionError('ds (%s) should be a list but was a %s' % (ds, type(ds)))
+
+ roles = []
+ for role_def in ds:
+ i = RoleInclude.load(role_def, play=play, current_role_path=current_role_path, variable_manager=variable_manager,
+ loader=loader, collection_list=collection_search_list)
+ roles.append(i)
+
+ return roles
diff --git a/lib/ansible/playbook/included_file.py b/lib/ansible/playbook/included_file.py
new file mode 100644
index 00000000..2d209deb
--- /dev/null
+++ b/lib/ansible/playbook/included_file.py
@@ -0,0 +1,211 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_text
+from ansible.playbook.task_include import TaskInclude
+from ansible.playbook.role_include import IncludeRole
+from ansible.template import Templar
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class IncludedFile:
+
+ def __init__(self, filename, args, vars, task, is_role=False):
+ self._filename = filename
+ self._args = args
+ self._vars = vars
+ self._task = task
+ self._hosts = []
+ self._is_role = is_role
+
+ def add_host(self, host):
+ if host not in self._hosts:
+ self._hosts.append(host)
+ return
+ raise ValueError()
+
+ def __eq__(self, other):
+ return (other._filename == self._filename and
+ other._args == self._args and
+ other._vars == self._vars and
+ other._task._uuid == self._task._uuid and
+ other._task._parent._uuid == self._task._parent._uuid)
+
+ def __repr__(self):
+ return "%s (args=%s vars=%s): %s" % (self._filename, self._args, self._vars, self._hosts)
+
+ @staticmethod
+ def process_include_results(results, iterator, loader, variable_manager):
+ included_files = []
+ task_vars_cache = {}
+
+ for res in results:
+
+ original_host = res._host
+ original_task = res._task
+
+ if original_task.action in C._ACTION_ALL_INCLUDES:
+ if original_task.loop:
+ if 'results' not in res._result:
+ continue
+ include_results = res._result['results']
+ else:
+ include_results = [res._result]
+
+ for include_result in include_results:
+ # if the task result was skipped or failed, continue
+ if 'skipped' in include_result and include_result['skipped'] or 'failed' in include_result and include_result['failed']:
+ continue
+
+ cache_key = (iterator._play, original_host, original_task)
+ try:
+ task_vars = task_vars_cache[cache_key]
+ except KeyError:
+ task_vars = task_vars_cache[cache_key] = variable_manager.get_vars(play=iterator._play, host=original_host, task=original_task)
+
+ include_args = include_result.get('include_args', dict())
+ special_vars = {}
+ loop_var = include_result.get('ansible_loop_var', 'item')
+ index_var = include_result.get('ansible_index_var')
+ if loop_var in include_result:
+ task_vars[loop_var] = special_vars[loop_var] = include_result[loop_var]
+ if index_var and index_var in include_result:
+ task_vars[index_var] = special_vars[index_var] = include_result[index_var]
+ if '_ansible_item_label' in include_result:
+ task_vars['_ansible_item_label'] = special_vars['_ansible_item_label'] = include_result['_ansible_item_label']
+ if 'ansible_loop' in include_result:
+ task_vars['ansible_loop'] = special_vars['ansible_loop'] = include_result['ansible_loop']
+ if original_task.no_log and '_ansible_no_log' not in include_args:
+ task_vars['_ansible_no_log'] = special_vars['_ansible_no_log'] = original_task.no_log
+
+ # get search path for this task to pass to lookup plugins that may be used in pathing to
+ # the included file
+ task_vars['ansible_search_path'] = original_task.get_search_path()
+
+ # ensure basedir is always in (dwim already searches here but we need to display it)
+ if loader.get_basedir() not in task_vars['ansible_search_path']:
+ task_vars['ansible_search_path'].append(loader.get_basedir())
+
+ templar = Templar(loader=loader, variables=task_vars)
+
+ if original_task.action in C._ACTION_ALL_INCLUDE_TASKS:
+ include_file = None
+ if original_task:
+ if original_task.static:
+ continue
+
+ if original_task._parent:
+ # handle relative includes by walking up the list of parent include
+ # tasks and checking the relative result to see if it exists
+ parent_include = original_task._parent
+ cumulative_path = None
+ while parent_include is not None:
+ if not isinstance(parent_include, TaskInclude):
+ parent_include = parent_include._parent
+ continue
+ if isinstance(parent_include, IncludeRole):
+ parent_include_dir = parent_include._role_path
+ else:
+ try:
+ parent_include_dir = os.path.dirname(templar.template(parent_include.args.get('_raw_params')))
+ except AnsibleError as e:
+ parent_include_dir = ''
+ display.warning(
+ 'Templating the path of the parent %s failed. The path to the '
+ 'included file may not be found. '
+ 'The error was: %s.' % (original_task.action, to_text(e))
+ )
+ if cumulative_path is not None and not os.path.isabs(cumulative_path):
+ cumulative_path = os.path.join(parent_include_dir, cumulative_path)
+ else:
+ cumulative_path = parent_include_dir
+ include_target = templar.template(include_result['include'])
+ if original_task._role:
+ new_basedir = os.path.join(original_task._role._role_path, 'tasks', cumulative_path)
+ candidates = [loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_target),
+ loader.path_dwim_relative(new_basedir, 'tasks', include_target)]
+ for include_file in candidates:
+ try:
+ # may throw OSError
+ os.stat(include_file)
+ # or select the task file if it exists
+ break
+ except OSError:
+ pass
+ else:
+ include_file = loader.path_dwim_relative(loader.get_basedir(), cumulative_path, include_target)
+
+ if os.path.exists(include_file):
+ break
+ else:
+ parent_include = parent_include._parent
+
+ if include_file is None:
+ if original_task._role:
+ include_target = templar.template(include_result['include'])
+ include_file = loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_target)
+ else:
+ include_file = loader.path_dwim(include_result['include'])
+
+ include_file = templar.template(include_file)
+ inc_file = IncludedFile(include_file, include_args, special_vars, original_task)
+ else:
+ # template the included role's name here
+ role_name = include_args.pop('name', include_args.pop('role', None))
+ if role_name is not None:
+ role_name = templar.template(role_name)
+
+ new_task = original_task.copy()
+ new_task._role_name = role_name
+ for from_arg in new_task.FROM_ARGS:
+ if from_arg in include_args:
+ from_key = from_arg.replace('_from', '')
+ new_task._from_files[from_key] = templar.template(include_args.pop(from_arg))
+
+ inc_file = IncludedFile(role_name, include_args, special_vars, new_task, is_role=True)
+
+ idx = 0
+ orig_inc_file = inc_file
+ while 1:
+ try:
+ pos = included_files[idx:].index(orig_inc_file)
+ # pos is relative to idx since we are slicing
+ # use idx + pos due to relative indexing
+ inc_file = included_files[idx + pos]
+ except ValueError:
+ included_files.append(orig_inc_file)
+ inc_file = orig_inc_file
+
+ try:
+ inc_file.add_host(original_host)
+ except ValueError:
+ # The host already exists for this include, advance forward, this is a new include
+ idx += pos + 1
+ else:
+ break
+
+ return included_files
diff --git a/lib/ansible/playbook/loop_control.py b/lib/ansible/playbook/loop_control.py
new file mode 100644
index 00000000..d840644f
--- /dev/null
+++ b/lib/ansible/playbook/loop_control.py
@@ -0,0 +1,40 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.playbook.attribute import FieldAttribute
+from ansible.playbook.base import FieldAttributeBase
+
+
+class LoopControl(FieldAttributeBase):
+
+ _loop_var = FieldAttribute(isa='str', default='item')
+ _index_var = FieldAttribute(isa='str')
+ _label = FieldAttribute(isa='str')
+ _pause = FieldAttribute(isa='float', default=0)
+ _extended = FieldAttribute(isa='bool')
+
+ def __init__(self):
+ super(LoopControl, self).__init__()
+
+ @staticmethod
+ def load(data, variable_manager=None, loader=None):
+ t = LoopControl()
+ return t.load_data(data, variable_manager=variable_manager, loader=loader)
diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py
new file mode 100644
index 00000000..35dfc558
--- /dev/null
+++ b/lib/ansible/playbook/play.py
@@ -0,0 +1,343 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible import constants as C
+from ansible import context
+from ansible.errors import AnsibleParserError, AnsibleAssertionError
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import string_types
+from ansible.playbook.attribute import FieldAttribute
+from ansible.playbook.base import Base
+from ansible.playbook.block import Block
+from ansible.playbook.collectionsearch import CollectionSearch
+from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles
+from ansible.playbook.role import Role
+from ansible.playbook.taggable import Taggable
+from ansible.vars.manager import preprocess_vars
+from ansible.utils.display import Display
+
+display = Display()
+
+
+__all__ = ['Play']
+
+
+class Play(Base, Taggable, CollectionSearch):
+
+ """
+ A play is a language feature that represents a list of roles and/or
+ task/handler blocks to execute on a given set of hosts.
+
+ Usage:
+
+ Play.load(datastructure) -> Play
+ Play.something(...)
+ """
+
+ # =================================================================================
+ _hosts = FieldAttribute(isa='list', required=True, listof=string_types, always_post_validate=True, priority=-1)
+
+ # Facts
+ _gather_facts = FieldAttribute(isa='bool', default=None, always_post_validate=True)
+ _gather_subset = FieldAttribute(isa='list', default=(lambda: C.DEFAULT_GATHER_SUBSET), listof=string_types, always_post_validate=True)
+ _gather_timeout = FieldAttribute(isa='int', default=C.DEFAULT_GATHER_TIMEOUT, always_post_validate=True)
+ _fact_path = FieldAttribute(isa='string', default=C.DEFAULT_FACT_PATH)
+
+ # Variable Attributes
+ _vars_files = FieldAttribute(isa='list', default=list, priority=99)
+ _vars_prompt = FieldAttribute(isa='list', default=list, always_post_validate=False)
+
+ # Role Attributes
+ _roles = FieldAttribute(isa='list', default=list, priority=90)
+
+ # Block (Task) Lists Attributes
+ _handlers = FieldAttribute(isa='list', default=list)
+ _pre_tasks = FieldAttribute(isa='list', default=list)
+ _post_tasks = FieldAttribute(isa='list', default=list)
+ _tasks = FieldAttribute(isa='list', default=list)
+
+ # Flag/Setting Attributes
+ _force_handlers = FieldAttribute(isa='bool', default=context.cliargs_deferred_get('force_handlers'), always_post_validate=True)
+ _max_fail_percentage = FieldAttribute(isa='percent', always_post_validate=True)
+ _serial = FieldAttribute(isa='list', default=list, always_post_validate=True)
+ _strategy = FieldAttribute(isa='string', default=C.DEFAULT_STRATEGY, always_post_validate=True)
+ _order = FieldAttribute(isa='string', always_post_validate=True)
+
+ # =================================================================================
+
+ def __init__(self):
+ super(Play, self).__init__()
+
+ self._included_conditional = None
+ self._included_path = None
+ self._removed_hosts = []
+ self.ROLE_CACHE = {}
+
+ self.only_tags = set(context.CLIARGS.get('tags', [])) or frozenset(('all',))
+ self.skip_tags = set(context.CLIARGS.get('skip_tags', []))
+
+ def __repr__(self):
+ return self.get_name()
+
+ def get_name(self):
+ ''' return the name of the Play '''
+ return self.name
+
+ @staticmethod
+ def load(data, variable_manager=None, loader=None, vars=None):
+ if ('name' not in data or data['name'] is None) and 'hosts' in data:
+ if data['hosts'] is None or all(host is None for host in data['hosts']):
+ raise AnsibleParserError("Hosts list cannot be empty - please check your playbook")
+ if isinstance(data['hosts'], list):
+ data['name'] = ','.join(data['hosts'])
+ else:
+ data['name'] = data['hosts']
+ p = Play()
+ if vars:
+ p.vars = vars.copy()
+ return p.load_data(data, variable_manager=variable_manager, loader=loader)
+
+ def preprocess_data(self, ds):
+ '''
+ Adjusts play datastructure to cleanup old/legacy items
+ '''
+
+ if not isinstance(ds, dict):
+ raise AnsibleAssertionError('while preprocessing data (%s), ds should be a dict but was a %s' % (ds, type(ds)))
+
+ # The use of 'user' in the Play datastructure was deprecated to
+ # line up with the same change for Tasks, due to the fact that
+ # 'user' conflicted with the user module.
+ if 'user' in ds:
+ # this should never happen, but error out with a helpful message
+ # to the user if it does...
+ if 'remote_user' in ds:
+ raise AnsibleParserError("both 'user' and 'remote_user' are set for %s. "
+ "The use of 'user' is deprecated, and should be removed" % self.get_name(), obj=ds)
+
+ ds['remote_user'] = ds['user']
+ del ds['user']
+
+ return super(Play, self).preprocess_data(ds)
+
+ def _load_tasks(self, attr, ds):
+ '''
+ Loads a list of blocks from a list which may be mixed tasks/blocks.
+ Bare tasks outside of a block are given an implicit block.
+ '''
+ try:
+ return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
+ except AssertionError as e:
+ raise AnsibleParserError("A malformed block was encountered while loading tasks: %s" % to_native(e), obj=self._ds, orig_exc=e)
+
+ def _load_pre_tasks(self, attr, ds):
+ '''
+ Loads a list of blocks from a list which may be mixed tasks/blocks.
+ Bare tasks outside of a block are given an implicit block.
+ '''
+ try:
+ return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
+ except AssertionError as e:
+ raise AnsibleParserError("A malformed block was encountered while loading pre_tasks", obj=self._ds, orig_exc=e)
+
+ def _load_post_tasks(self, attr, ds):
+ '''
+ Loads a list of blocks from a list which may be mixed tasks/blocks.
+ Bare tasks outside of a block are given an implicit block.
+ '''
+ try:
+ return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
+ except AssertionError as e:
+ raise AnsibleParserError("A malformed block was encountered while loading post_tasks", obj=self._ds, orig_exc=e)
+
+ def _load_handlers(self, attr, ds):
+ '''
+ Loads a list of blocks from a list which may be mixed handlers/blocks.
+ Bare handlers outside of a block are given an implicit block.
+ '''
+ try:
+ return self._extend_value(
+ self.handlers,
+ load_list_of_blocks(ds=ds, play=self, use_handlers=True, variable_manager=self._variable_manager, loader=self._loader),
+ prepend=True
+ )
+ except AssertionError as e:
+ raise AnsibleParserError("A malformed block was encountered while loading handlers", obj=self._ds, orig_exc=e)
+
+ def _load_roles(self, attr, ds):
+ '''
+ Loads and returns a list of RoleInclude objects from the datastructure
+ list of role definitions and creates the Role from those objects
+ '''
+
+ if ds is None:
+ ds = []
+
+ try:
+ role_includes = load_list_of_roles(ds, play=self, variable_manager=self._variable_manager,
+ loader=self._loader, collection_search_list=self.collections)
+ except AssertionError as e:
+ raise AnsibleParserError("A malformed role declaration was encountered.", obj=self._ds, orig_exc=e)
+
+ roles = []
+ for ri in role_includes:
+ roles.append(Role.load(ri, play=self))
+
+ self.roles[:0] = roles
+
+ return self.roles
+
+ def _load_vars_prompt(self, attr, ds):
+ new_ds = preprocess_vars(ds)
+ vars_prompts = []
+ if new_ds is not None:
+ for prompt_data in new_ds:
+ if 'name' not in prompt_data:
+ raise AnsibleParserError("Invalid vars_prompt data structure, missing 'name' key", obj=ds)
+ for key in prompt_data:
+ if key not in ('name', 'prompt', 'default', 'private', 'confirm', 'encrypt', 'salt_size', 'salt', 'unsafe'):
+ raise AnsibleParserError("Invalid vars_prompt data structure, found unsupported key '%s'" % key, obj=ds)
+ vars_prompts.append(prompt_data)
+ return vars_prompts
+
+ def _compile_roles(self):
+ '''
+ Handles the role compilation step, returning a flat list of tasks
+ with the lowest level dependencies first. For example, if a role R
+ has a dependency D1, which also has a dependency D2, the tasks from
+ D2 are merged first, followed by D1, and lastly by the tasks from
+ the parent role R last. This is done for all roles in the Play.
+ '''
+
+ block_list = []
+
+ if len(self.roles) > 0:
+ for r in self.roles:
+ # Don't insert tasks from ``import/include_role``, preventing
+ # duplicate execution at the wrong time
+ if r.from_include:
+ continue
+ block_list.extend(r.compile(play=self))
+
+ return block_list
+
+ def compile_roles_handlers(self):
+ '''
+ Handles the role handler compilation step, returning a flat list of Handlers
+ This is done for all roles in the Play.
+ '''
+
+ block_list = []
+
+ if len(self.roles) > 0:
+ for r in self.roles:
+ if r.from_include:
+ continue
+ block_list.extend(r.get_handler_blocks(play=self))
+
+ return block_list
+
+ def compile(self):
+ '''
+ Compiles and returns the task list for this play, compiled from the
+ roles (which are themselves compiled recursively) and/or the list of
+ tasks specified in the play.
+ '''
+
+ # create a block containing a single flush handlers meta
+ # task, so we can be sure to run handlers at certain points
+ # of the playbook execution
+ flush_block = Block.load(
+ data={'meta': 'flush_handlers'},
+ play=self,
+ variable_manager=self._variable_manager,
+ loader=self._loader
+ )
+
+ block_list = []
+
+ block_list.extend(self.pre_tasks)
+ block_list.append(flush_block)
+ block_list.extend(self._compile_roles())
+ block_list.extend(self.tasks)
+ block_list.append(flush_block)
+ block_list.extend(self.post_tasks)
+ block_list.append(flush_block)
+
+ return block_list
+
+ def get_vars(self):
+ return self.vars.copy()
+
+ def get_vars_files(self):
+ if self.vars_files is None:
+ return []
+ elif not isinstance(self.vars_files, list):
+ return [self.vars_files]
+ return self.vars_files
+
+ def get_handlers(self):
+ return self.handlers[:]
+
+ def get_roles(self):
+ return self.roles[:]
+
+ def get_tasks(self):
+ tasklist = []
+ for task in self.pre_tasks + self.tasks + self.post_tasks:
+ if isinstance(task, Block):
+ tasklist.append(task.block + task.rescue + task.always)
+ else:
+ tasklist.append(task)
+ return tasklist
+
+ def serialize(self):
+ data = super(Play, self).serialize()
+
+ roles = []
+ for role in self.get_roles():
+ roles.append(role.serialize())
+ data['roles'] = roles
+ data['included_path'] = self._included_path
+
+ return data
+
+ def deserialize(self, data):
+ super(Play, self).deserialize(data)
+
+ self._included_path = data.get('included_path', None)
+ if 'roles' in data:
+ role_data = data.get('roles', [])
+ roles = []
+ for role in role_data:
+ r = Role()
+ r.deserialize(role)
+ roles.append(r)
+
+ setattr(self, 'roles', roles)
+ del data['roles']
+
+ def copy(self):
+ new_me = super(Play, self).copy()
+ new_me.ROLE_CACHE = self.ROLE_CACHE.copy()
+ new_me._included_conditional = self._included_conditional
+ new_me._included_path = self._included_path
+ return new_me
diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py
new file mode 100644
index 00000000..d9bdc2fd
--- /dev/null
+++ b/lib/ansible/playbook/play_context.py
@@ -0,0 +1,407 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+from ansible import constants as C
+from ansible import context
+from ansible.errors import AnsibleError
+from ansible.module_utils.compat.paramiko import paramiko
+from ansible.module_utils.six import iteritems
+from ansible.playbook.attribute import FieldAttribute
+from ansible.playbook.base import Base
+from ansible.plugins import get_plugin_class
+from ansible.utils.display import Display
+from ansible.plugins.loader import get_shell_plugin
+from ansible.utils.ssh_functions import check_for_controlpersist
+
+
+display = Display()
+
+
+__all__ = ['PlayContext']
+
+
+TASK_ATTRIBUTE_OVERRIDES = (
+ 'become',
+ 'become_user',
+ 'become_pass',
+ 'become_method',
+ 'become_flags',
+ 'connection',
+ 'docker_extra_args', # TODO: remove
+ 'delegate_to',
+ 'no_log',
+ 'remote_user',
+)
+
+RESET_VARS = (
+ 'ansible_connection',
+ 'ansible_user',
+ 'ansible_host',
+ 'ansible_port',
+
+ # TODO: ???
+ 'ansible_docker_extra_args',
+ 'ansible_ssh_host',
+ 'ansible_ssh_pass',
+ 'ansible_ssh_port',
+ 'ansible_ssh_user',
+ 'ansible_ssh_private_key_file',
+ 'ansible_ssh_pipelining',
+ 'ansible_ssh_executable',
+)
+
+
+class PlayContext(Base):
+
+ '''
+ This class is used to consolidate the connection information for
+ hosts in a play and child tasks, where the task may override some
+ connection/authentication information.
+ '''
+
+ # base
+ _module_compression = FieldAttribute(isa='string', default=C.DEFAULT_MODULE_COMPRESSION)
+ _shell = FieldAttribute(isa='string')
+ _executable = FieldAttribute(isa='string', default=C.DEFAULT_EXECUTABLE)
+
+ # connection fields, some are inherited from Base:
+ # (connection, port, remote_user, environment, no_log)
+ _remote_addr = FieldAttribute(isa='string')
+ _password = FieldAttribute(isa='string')
+ _timeout = FieldAttribute(isa='int', default=C.DEFAULT_TIMEOUT)
+ _connection_user = FieldAttribute(isa='string')
+ _private_key_file = FieldAttribute(isa='string', default=C.DEFAULT_PRIVATE_KEY_FILE)
+ _pipelining = FieldAttribute(isa='bool', default=C.ANSIBLE_PIPELINING)
+
+ # networking modules
+ _network_os = FieldAttribute(isa='string')
+
+ # docker FIXME: remove these
+ _docker_extra_args = FieldAttribute(isa='string')
+
+ # ssh # FIXME: remove these
+ _ssh_executable = FieldAttribute(isa='string', default=C.ANSIBLE_SSH_EXECUTABLE)
+ _ssh_args = FieldAttribute(isa='string', default=C.ANSIBLE_SSH_ARGS)
+ _ssh_common_args = FieldAttribute(isa='string')
+ _sftp_extra_args = FieldAttribute(isa='string')
+ _scp_extra_args = FieldAttribute(isa='string')
+ _ssh_extra_args = FieldAttribute(isa='string')
+ _ssh_transfer_method = FieldAttribute(isa='string', default=C.DEFAULT_SSH_TRANSFER_METHOD)
+
+ # ???
+ _connection_lockfd = FieldAttribute(isa='int')
+
+ # privilege escalation fields
+ _become = FieldAttribute(isa='bool')
+ _become_method = FieldAttribute(isa='string')
+ _become_user = FieldAttribute(isa='string')
+ _become_pass = FieldAttribute(isa='string')
+ _become_exe = FieldAttribute(isa='string', default=C.DEFAULT_BECOME_EXE)
+ _become_flags = FieldAttribute(isa='string', default=C.DEFAULT_BECOME_FLAGS)
+ _prompt = FieldAttribute(isa='string')
+
+ # general flags
+ _verbosity = FieldAttribute(isa='int', default=0)
+ _only_tags = FieldAttribute(isa='set', default=set)
+ _skip_tags = FieldAttribute(isa='set', default=set)
+
+ _start_at_task = FieldAttribute(isa='string')
+ _step = FieldAttribute(isa='bool', default=False)
+
+ # "PlayContext.force_handlers should not be used, the calling code should be using play itself instead"
+ _force_handlers = FieldAttribute(isa='bool', default=False)
+
+ def __init__(self, play=None, passwords=None, connection_lockfd=None):
+ # Note: play is really not optional. The only time it could be omitted is when we create
+ # a PlayContext just so we can invoke its deserialize method to load it from a serialized
+ # data source.
+
+ super(PlayContext, self).__init__()
+
+ if passwords is None:
+ passwords = {}
+
+ self.password = passwords.get('conn_pass', '')
+ self.become_pass = passwords.get('become_pass', '')
+
+ self._become_plugin = None
+
+ self.prompt = ''
+ self.success_key = ''
+
+ # a file descriptor to be used during locking operations
+ self.connection_lockfd = connection_lockfd
+
+ # set options before play to allow play to override them
+ if context.CLIARGS:
+ self.set_attributes_from_cli()
+
+ if play:
+ self.set_attributes_from_play(play)
+
+ def set_attributes_from_plugin(self, plugin):
+ # generic derived from connection plugin, temporary for backwards compat, in the end we should not set play_context properties
+
+ # get options for plugins
+ options = C.config.get_configuration_definitions(get_plugin_class(plugin), plugin._load_name)
+ for option in options:
+ if option:
+ flag = options[option].get('name')
+ if flag:
+ setattr(self, flag, self.connection.get_option(flag))
+
+ def set_attributes_from_play(self, play):
+ self.force_handlers = play.force_handlers
+
+ def set_attributes_from_cli(self):
+ '''
+ Configures this connection information instance with data from
+ options specified by the user on the command line. These have a
+ lower precedence than those set on the play or host.
+ '''
+ if context.CLIARGS.get('timeout', False):
+ self.timeout = int(context.CLIARGS['timeout'])
+
+ # From the command line. These should probably be used directly by plugins instead
+ # For now, they are likely to be moved to FieldAttribute defaults
+ self.private_key_file = context.CLIARGS.get('private_key_file') # Else default
+ self.verbosity = context.CLIARGS.get('verbosity') # Else default
+ self.ssh_common_args = context.CLIARGS.get('ssh_common_args') # Else default
+ self.ssh_extra_args = context.CLIARGS.get('ssh_extra_args') # Else default
+ self.sftp_extra_args = context.CLIARGS.get('sftp_extra_args') # Else default
+ self.scp_extra_args = context.CLIARGS.get('scp_extra_args') # Else default
+
+ # Not every cli that uses PlayContext has these command line args so have a default
+ self.start_at_task = context.CLIARGS.get('start_at_task', None) # Else default
+
+ def set_task_and_variable_override(self, task, variables, templar):
+ '''
+ Sets attributes from the task if they are set, which will override
+ those from the play.
+
+ :arg task: the task object with the parameters that were set on it
+ :arg variables: variables from inventory
+ :arg templar: templar instance if templating variables is needed
+ '''
+
+ new_info = self.copy()
+
+ # loop through a subset of attributes on the task object and set
+ # connection fields based on their values
+ for attr in TASK_ATTRIBUTE_OVERRIDES:
+ if hasattr(task, attr):
+ attr_val = getattr(task, attr)
+ if attr_val is not None:
+ setattr(new_info, attr, attr_val)
+
+ # next, use the MAGIC_VARIABLE_MAPPING dictionary to update this
+ # connection info object with 'magic' variables from the variable list.
+ # If the value 'ansible_delegated_vars' is in the variables, it means
+ # we have a delegated-to host, so we check there first before looking
+ # at the variables in general
+ if task.delegate_to is not None:
+ # In the case of a loop, the delegated_to host may have been
+ # templated based on the loop variable, so we try and locate
+ # the host name in the delegated variable dictionary here
+ delegated_host_name = templar.template(task.delegate_to)
+ delegated_vars = variables.get('ansible_delegated_vars', dict()).get(delegated_host_name, dict())
+
+ delegated_transport = C.DEFAULT_TRANSPORT
+ for transport_var in C.MAGIC_VARIABLE_MAPPING.get('connection'):
+ if transport_var in delegated_vars:
+ delegated_transport = delegated_vars[transport_var]
+ break
+
+ # make sure this delegated_to host has something set for its remote
+ # address, otherwise we default to connecting to it by name. This
+ # may happen when users put an IP entry into their inventory, or if
+ # they rely on DNS for a non-inventory hostname
+ for address_var in ('ansible_%s_host' % delegated_transport,) + C.MAGIC_VARIABLE_MAPPING.get('remote_addr'):
+ if address_var in delegated_vars:
+ break
+ else:
+ display.debug("no remote address found for delegated host %s\nusing its name, so success depends on DNS resolution" % delegated_host_name)
+ delegated_vars['ansible_host'] = delegated_host_name
+
+ # reset the port back to the default if none was specified, to prevent
+ # the delegated host from inheriting the original host's setting
+ for port_var in ('ansible_%s_port' % delegated_transport,) + C.MAGIC_VARIABLE_MAPPING.get('port'):
+ if port_var in delegated_vars:
+ break
+ else:
+ if delegated_transport == 'winrm':
+ delegated_vars['ansible_port'] = 5986
+ else:
+ delegated_vars['ansible_port'] = C.DEFAULT_REMOTE_PORT
+
+ # and likewise for the remote user
+ for user_var in ('ansible_%s_user' % delegated_transport,) + C.MAGIC_VARIABLE_MAPPING.get('remote_user'):
+ if user_var in delegated_vars and delegated_vars[user_var]:
+ break
+ else:
+ delegated_vars['ansible_user'] = task.remote_user or self.remote_user
+ else:
+ delegated_vars = dict()
+
+ # setup shell
+ for exe_var in C.MAGIC_VARIABLE_MAPPING.get('executable'):
+ if exe_var in variables:
+ setattr(new_info, 'executable', variables.get(exe_var))
+
+ attrs_considered = []
+ for (attr, variable_names) in iteritems(C.MAGIC_VARIABLE_MAPPING):
+ for variable_name in variable_names:
+ if attr in attrs_considered:
+ continue
+ # if delegation task ONLY use delegated host vars, avoid delegated FOR host vars
+ if task.delegate_to is not None:
+ if isinstance(delegated_vars, dict) and variable_name in delegated_vars:
+ setattr(new_info, attr, delegated_vars[variable_name])
+ attrs_considered.append(attr)
+ elif variable_name in variables:
+ setattr(new_info, attr, variables[variable_name])
+ attrs_considered.append(attr)
+ # no else, as no other vars should be considered
+
+ # become legacy updates -- from inventory file (inventory overrides
+ # commandline)
+ for become_pass_name in C.MAGIC_VARIABLE_MAPPING.get('become_pass'):
+ if become_pass_name in variables:
+ break
+
+ # make sure we get port defaults if needed
+ if new_info.port is None and C.DEFAULT_REMOTE_PORT is not None:
+ new_info.port = int(C.DEFAULT_REMOTE_PORT)
+
+ # special overrides for the connection setting
+ if len(delegated_vars) > 0:
+ # in the event that we were using local before make sure to reset the
+ # connection type to the default transport for the delegated-to host,
+ # if not otherwise specified
+ for connection_type in C.MAGIC_VARIABLE_MAPPING.get('connection'):
+ if connection_type in delegated_vars:
+ break
+ else:
+ remote_addr_local = new_info.remote_addr in C.LOCALHOST
+ inv_hostname_local = delegated_vars.get('inventory_hostname') in C.LOCALHOST
+ if remote_addr_local and inv_hostname_local:
+ setattr(new_info, 'connection', 'local')
+ elif getattr(new_info, 'connection', None) == 'local' and (not remote_addr_local or not inv_hostname_local):
+ setattr(new_info, 'connection', C.DEFAULT_TRANSPORT)
+
+ # we store original in 'connection_user' for use of network/other modules that fallback to it as login user
+ # connection_user to be deprecated once connection=local is removed for, as local resets remote_user
+ if new_info.connection == 'local':
+ if not new_info.connection_user:
+ new_info.connection_user = new_info.remote_user
+
+ # set no_log to default if it was not previously set
+ if new_info.no_log is None:
+ new_info.no_log = C.DEFAULT_NO_LOG
+
+ if task.check_mode is not None:
+ new_info.check_mode = task.check_mode
+
+ if task.diff is not None:
+ new_info.diff = task.diff
+
+ return new_info
+
+ def set_become_plugin(self, plugin):
+ self._become_plugin = plugin
+
+ def make_become_cmd(self, cmd, executable=None):
+ """ helper function to create privilege escalation commands """
+ display.deprecated(
+ "PlayContext.make_become_cmd should not be used, the calling code should be using become plugins instead",
+ version="2.12", collection_name='ansible.builtin'
+ )
+
+ if not cmd or not self.become:
+ return cmd
+
+ become_method = self.become_method
+
+ # load/call become plugins here
+ plugin = self._become_plugin
+
+ if plugin:
+ options = {
+ 'become_exe': self.become_exe or become_method,
+ 'become_flags': self.become_flags or '',
+ 'become_user': self.become_user,
+ 'become_pass': self.become_pass
+ }
+ plugin.set_options(direct=options)
+
+ if not executable:
+ executable = self.executable
+
+ shell = get_shell_plugin(executable=executable)
+ cmd = plugin.build_become_command(cmd, shell)
+ # for backwards compat:
+ if self.become_pass:
+ self.prompt = plugin.prompt
+ else:
+ raise AnsibleError("Privilege escalation method not found: %s" % become_method)
+
+ return cmd
+
+ def update_vars(self, variables):
+ '''
+ Adds 'magic' variables relating to connections to the variable dictionary provided.
+ In case users need to access from the play, this is a legacy from runner.
+ '''
+
+ for prop, var_list in C.MAGIC_VARIABLE_MAPPING.items():
+ try:
+ if 'become' in prop:
+ continue
+
+ var_val = getattr(self, prop)
+ for var_opt in var_list:
+ if var_opt not in variables and var_val is not None:
+ variables[var_opt] = var_val
+ except AttributeError:
+ continue
+
+ def _get_attr_connection(self):
+ ''' connections are special, this takes care of responding correctly '''
+ conn_type = None
+ if self._attributes['connection'] == 'smart':
+ conn_type = 'ssh'
+ # see if SSH can support ControlPersist if not use paramiko
+ if not check_for_controlpersist(self.ssh_executable) and paramiko is not None:
+ conn_type = "paramiko"
+
+ # if someone did `connection: persistent`, default it to using a persistent paramiko connection to avoid problems
+ elif self._attributes['connection'] == 'persistent' and paramiko is not None:
+ conn_type = 'paramiko'
+
+ if conn_type:
+ self.connection = conn_type
+
+ return self._attributes['connection']
diff --git a/lib/ansible/playbook/playbook_include.py b/lib/ansible/playbook/playbook_include.py
new file mode 100644
index 00000000..4aeecb1e
--- /dev/null
+++ b/lib/ansible/playbook/playbook_include.py
@@ -0,0 +1,161 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+import ansible.constants as C
+from ansible.errors import AnsibleParserError, AnsibleAssertionError
+from ansible.module_utils.six import iteritems, string_types
+from ansible.parsing.splitter import split_args, parse_kv
+from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
+from ansible.playbook.attribute import FieldAttribute
+from ansible.playbook.base import Base
+from ansible.playbook.conditional import Conditional
+from ansible.playbook.taggable import Taggable
+from ansible.template import Templar
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class PlaybookInclude(Base, Conditional, Taggable):
+
+ _import_playbook = FieldAttribute(isa='string')
+ _vars = FieldAttribute(isa='dict', default=dict)
+
+ @staticmethod
+ def load(data, basedir, variable_manager=None, loader=None):
+ return PlaybookInclude().load_data(ds=data, basedir=basedir, variable_manager=variable_manager, loader=loader)
+
+ def load_data(self, ds, basedir, variable_manager=None, loader=None):
+ '''
+ Overrides the base load_data(), as we're actually going to return a new
+ Playbook() object rather than a PlaybookInclude object
+ '''
+
+ # import here to avoid a dependency loop
+ from ansible.playbook import Playbook
+ from ansible.playbook.play import Play
+
+ # first, we use the original parent method to correctly load the object
+ # via the load_data/preprocess_data system we normally use for other
+ # playbook objects
+ new_obj = super(PlaybookInclude, self).load_data(ds, variable_manager, loader)
+
+ all_vars = self.vars.copy()
+ if variable_manager:
+ all_vars.update(variable_manager.get_vars())
+
+ templar = Templar(loader=loader, variables=all_vars)
+
+ # then we use the object to load a Playbook
+ pb = Playbook(loader=loader)
+
+ file_name = templar.template(new_obj.import_playbook)
+ if not os.path.isabs(file_name):
+ file_name = os.path.join(basedir, file_name)
+
+ pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager, vars=self.vars.copy())
+
+ # finally, update each loaded playbook entry with any variables specified
+ # on the included playbook and/or any tags which may have been set
+ for entry in pb._entries:
+
+ # conditional includes on a playbook need a marker to skip gathering
+ if new_obj.when and isinstance(entry, Play):
+ entry._included_conditional = new_obj.when[:]
+
+ temp_vars = entry.vars.copy()
+ temp_vars.update(new_obj.vars)
+ param_tags = temp_vars.pop('tags', None)
+ if param_tags is not None:
+ entry.tags.extend(param_tags.split(','))
+ entry.vars = temp_vars
+ entry.tags = list(set(entry.tags).union(new_obj.tags))
+ if entry._included_path is None:
+ entry._included_path = os.path.dirname(file_name)
+
+ # Check to see if we need to forward the conditionals on to the included
+ # plays. If so, we can take a shortcut here and simply prepend them to
+ # those attached to each block (if any)
+ if new_obj.when:
+ for task_block in (entry.pre_tasks + entry.roles + entry.tasks + entry.post_tasks):
+ task_block._attributes['when'] = new_obj.when[:] + task_block.when[:]
+
+ return pb
+
+ def preprocess_data(self, ds):
+ '''
+ Regorganizes the data for a PlaybookInclude datastructure to line
+ up with what we expect the proper attributes to be
+ '''
+
+ if not isinstance(ds, dict):
+ raise AnsibleAssertionError('ds (%s) should be a dict but was a %s' % (ds, type(ds)))
+
+ # the new, cleaned datastructure, which will have legacy
+ # items reduced to a standard structure
+ new_ds = AnsibleMapping()
+ if isinstance(ds, AnsibleBaseYAMLObject):
+ new_ds.ansible_pos = ds.ansible_pos
+
+ for (k, v) in iteritems(ds):
+ if k in C._ACTION_ALL_IMPORT_PLAYBOOKS:
+ self._preprocess_import(ds, new_ds, k, v)
+ else:
+ # some basic error checking, to make sure vars are properly
+ # formatted and do not conflict with k=v parameters
+ if k == 'vars':
+ if 'vars' in new_ds:
+ raise AnsibleParserError("import_playbook parameters cannot be mixed with 'vars' entries for import statements", obj=ds)
+ elif not isinstance(v, dict):
+ raise AnsibleParserError("vars for import_playbook statements must be specified as a dictionary", obj=ds)
+ new_ds[k] = v
+
+ return super(PlaybookInclude, self).preprocess_data(new_ds)
+
+ def _preprocess_import(self, ds, new_ds, k, v):
+ '''
+ Splits the playbook import line up into filename and parameters
+ '''
+
+ if v is None:
+ raise AnsibleParserError("playbook import parameter is missing", obj=ds)
+ elif not isinstance(v, string_types):
+ raise AnsibleParserError("playbook import parameter must be a string indicating a file path, got %s instead" % type(v), obj=ds)
+
+ # The import_playbook line must include at least one item, which is the filename
+ # to import. Anything after that should be regarded as a parameter to the import
+ items = split_args(v)
+ if len(items) == 0:
+ raise AnsibleParserError("import_playbook statements must specify the file name to import", obj=ds)
+ else:
+ new_ds['import_playbook'] = items[0].strip()
+ if len(items) > 1:
+ display.warning('Additional parameters in import_playbook statements are not supported. This will be an error in version 2.14')
+ # rejoin the parameter portion of the arguments and
+ # then use parse_kv() to get a dict of params back
+ params = parse_kv(" ".join(items[1:]))
+ if 'tags' in params:
+ new_ds['tags'] = params.pop('tags')
+ if 'vars' in new_ds:
+ raise AnsibleParserError("import_playbook parameters cannot be mixed with 'vars' entries for import statements", obj=ds)
+ new_ds['vars'] = params
diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py
new file mode 100644
index 00000000..b7456afc
--- /dev/null
+++ b/lib/ansible/playbook/role/__init__.py
@@ -0,0 +1,528 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.errors import AnsibleError, AnsibleParserError, AnsibleAssertionError
+from ansible.module_utils.six import iteritems, binary_type, text_type
+from ansible.module_utils.common._collections_compat import Container, Mapping, Set, Sequence
+from ansible.playbook.attribute import FieldAttribute
+from ansible.playbook.base import Base
+from ansible.playbook.collectionsearch import CollectionSearch
+from ansible.playbook.conditional import Conditional
+from ansible.playbook.helpers import load_list_of_blocks
+from ansible.playbook.role.metadata import RoleMetadata
+from ansible.playbook.taggable import Taggable
+from ansible.plugins.loader import add_all_plugin_dirs
+from ansible.utils.collection_loader import AnsibleCollectionConfig
+from ansible.utils.vars import combine_vars
+
+
+__all__ = ['Role', 'hash_params']
+
+# TODO: this should be a utility function, but can't be a member of
+# the role due to the fact that it would require the use of self
+# in a static method. This is also used in the base class for
+# strategies (ansible/plugins/strategy/__init__.py)
+
+
+def hash_params(params):
+ """
+ Construct a data structure of parameters that is hashable.
+
+ This requires changing any mutable data structures into immutable ones.
+ We chose a frozenset because role parameters have to be unique.
+
+ .. warning:: this does not handle unhashable scalars. Two things
+ mitigate that limitation:
+
+ 1) There shouldn't be any unhashable scalars specified in the yaml
+ 2) Our only choice would be to return an error anyway.
+ """
+ # Any container is unhashable if it contains unhashable items (for
+ # instance, tuple() is a Hashable subclass but if it contains a dict, it
+ # cannot be hashed)
+ if isinstance(params, Container) and not isinstance(params, (text_type, binary_type)):
+ if isinstance(params, Mapping):
+ try:
+ # Optimistically hope the contents are all hashable
+ new_params = frozenset(params.items())
+ except TypeError:
+ new_params = set()
+ for k, v in params.items():
+ # Hash each entry individually
+ new_params.add((k, hash_params(v)))
+ new_params = frozenset(new_params)
+
+ elif isinstance(params, (Set, Sequence)):
+ try:
+ # Optimistically hope the contents are all hashable
+ new_params = frozenset(params)
+ except TypeError:
+ new_params = set()
+ for v in params:
+ # Hash each entry individually
+ new_params.add(hash_params(v))
+ new_params = frozenset(new_params)
+ else:
+ # This is just a guess.
+ new_params = frozenset(params)
+ return new_params
+
+ # Note: We do not handle unhashable scalars but our only choice would be
+ # to raise an error there anyway.
+ return frozenset((params,))
+
+
+class Role(Base, Conditional, Taggable, CollectionSearch):
+
+ _delegate_to = FieldAttribute(isa='string')
+ _delegate_facts = FieldAttribute(isa='bool')
+
+ def __init__(self, play=None, from_files=None, from_include=False):
+ self._role_name = None
+ self._role_path = None
+ self._role_collection = None
+ self._role_params = dict()
+ self._loader = None
+
+ self._metadata = None
+ self._play = play
+ self._parents = []
+ self._dependencies = []
+ self._task_blocks = []
+ self._handler_blocks = []
+ self._compiled_handler_blocks = None
+ self._default_vars = dict()
+ self._role_vars = dict()
+ self._had_task_run = dict()
+ self._completed = dict()
+
+ if from_files is None:
+ from_files = {}
+ self._from_files = from_files
+
+ # Indicates whether this role was included via include/import_role
+ self.from_include = from_include
+
+ super(Role, self).__init__()
+
+ def __repr__(self):
+ return self.get_name()
+
+ def get_name(self, include_role_fqcn=True):
+ if include_role_fqcn:
+ return '.'.join(x for x in (self._role_collection, self._role_name) if x)
+ return self._role_name
+
+ @staticmethod
+ def load(role_include, play, parent_role=None, from_files=None, from_include=False):
+
+ if from_files is None:
+ from_files = {}
+ try:
+ # The ROLE_CACHE is a dictionary of role names, with each entry
+ # containing another dictionary corresponding to a set of parameters
+ # specified for a role as the key and the Role() object itself.
+ # We use frozenset to make the dictionary hashable.
+
+ params = role_include.get_role_params()
+ if role_include.when is not None:
+ params['when'] = role_include.when
+ if role_include.tags is not None:
+ params['tags'] = role_include.tags
+ if from_files is not None:
+ params['from_files'] = from_files
+ if role_include.vars:
+ params['vars'] = role_include.vars
+
+ params['from_include'] = from_include
+
+ hashed_params = hash_params(params)
+ if role_include.get_name() in play.ROLE_CACHE:
+ for (entry, role_obj) in iteritems(play.ROLE_CACHE[role_include.get_name()]):
+ if hashed_params == entry:
+ if parent_role:
+ role_obj.add_parent(parent_role)
+ return role_obj
+
+ # TODO: need to fix cycle detection in role load (maybe use an empty dict
+ # for the in-flight in role cache as a sentinel that we're already trying to load
+ # that role?)
+ # see https://github.com/ansible/ansible/issues/61527
+ r = Role(play=play, from_files=from_files, from_include=from_include)
+ r._load_role_data(role_include, parent_role=parent_role)
+
+ if role_include.get_name() not in play.ROLE_CACHE:
+ play.ROLE_CACHE[role_include.get_name()] = dict()
+
+ # FIXME: how to handle cache keys for collection-based roles, since they're technically adjustable per task?
+ play.ROLE_CACHE[role_include.get_name()][hashed_params] = r
+ return r
+
+ except RuntimeError:
+ raise AnsibleError("A recursion loop was detected with the roles specified. Make sure child roles do not have dependencies on parent roles",
+ obj=role_include._ds)
+
+ def _load_role_data(self, role_include, parent_role=None):
+ self._role_name = role_include.role
+ self._role_path = role_include.get_role_path()
+ self._role_collection = role_include._role_collection
+ self._role_params = role_include.get_role_params()
+ self._variable_manager = role_include.get_variable_manager()
+ self._loader = role_include.get_loader()
+
+ if parent_role:
+ self.add_parent(parent_role)
+
+ # copy over all field attributes from the RoleInclude
+ # update self._attributes directly, to avoid squashing
+ for (attr_name, _) in iteritems(self._valid_attrs):
+ if attr_name in ('when', 'tags'):
+ self._attributes[attr_name] = self._extend_value(
+ self._attributes[attr_name],
+ role_include._attributes[attr_name],
+ )
+ else:
+ self._attributes[attr_name] = role_include._attributes[attr_name]
+
+ # vars and default vars are regular dictionaries
+ self._role_vars = self._load_role_yaml('vars', main=self._from_files.get('vars'), allow_dir=True)
+ if self._role_vars is None:
+ self._role_vars = dict()
+ elif not isinstance(self._role_vars, dict):
+ raise AnsibleParserError("The vars/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name)
+
+ self._default_vars = self._load_role_yaml('defaults', main=self._from_files.get('defaults'), allow_dir=True)
+ if self._default_vars is None:
+ self._default_vars = dict()
+ elif not isinstance(self._default_vars, dict):
+ raise AnsibleParserError("The defaults/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name)
+
+ # load the role's other files, if they exist
+ metadata = self._load_role_yaml('meta')
+ if metadata:
+ self._metadata = RoleMetadata.load(metadata, owner=self, variable_manager=self._variable_manager, loader=self._loader)
+ self._dependencies = self._load_dependencies()
+ else:
+ self._metadata = RoleMetadata()
+
+ # reset collections list; roles do not inherit collections from parents, just use the defaults
+ # FUTURE: use a private config default for this so we can allow it to be overridden later
+ self.collections = []
+
+ # configure plugin/collection loading; either prepend the current role's collection or configure legacy plugin loading
+ # FIXME: need exception for explicit ansible.legacy?
+ if self._role_collection: # this is a collection-hosted role
+ self.collections.insert(0, self._role_collection)
+ else: # this is a legacy role, but set the default collection if there is one
+ default_collection = AnsibleCollectionConfig.default_collection
+ if default_collection:
+ self.collections.insert(0, default_collection)
+ # legacy role, ensure all plugin dirs under the role are added to plugin search path
+ add_all_plugin_dirs(self._role_path)
+
+ # collections can be specified in metadata for legacy or collection-hosted roles
+ if self._metadata.collections:
+ self.collections.extend((c for c in self._metadata.collections if c not in self.collections))
+
+ # if any collections were specified, ensure that core or legacy synthetic collections are always included
+ if self.collections:
+ # default append collection is core for collection-hosted roles, legacy for others
+ default_append_collection = 'ansible.builtin' if self._role_collection else 'ansible.legacy'
+ if 'ansible.builtin' not in self.collections and 'ansible.legacy' not in self.collections:
+ self.collections.append(default_append_collection)
+
+ task_data = self._load_role_yaml('tasks', main=self._from_files.get('tasks'))
+ if task_data:
+ try:
+ self._task_blocks = load_list_of_blocks(task_data, play=self._play, role=self, loader=self._loader, variable_manager=self._variable_manager)
+ except AssertionError as e:
+ raise AnsibleParserError("The tasks/main.yml file for role '%s' must contain a list of tasks" % self._role_name,
+ obj=task_data, orig_exc=e)
+
+ handler_data = self._load_role_yaml('handlers', main=self._from_files.get('handlers'))
+ if handler_data:
+ try:
+ self._handler_blocks = load_list_of_blocks(handler_data, play=self._play, role=self, use_handlers=True, loader=self._loader,
+ variable_manager=self._variable_manager)
+ except AssertionError as e:
+ raise AnsibleParserError("The handlers/main.yml file for role '%s' must contain a list of tasks" % self._role_name,
+ obj=handler_data, orig_exc=e)
+
+ def _load_role_yaml(self, subdir, main=None, allow_dir=False):
+ file_path = os.path.join(self._role_path, subdir)
+ if self._loader.path_exists(file_path) and self._loader.is_directory(file_path):
+ # Valid extensions and ordering for roles is hard-coded to maintain
+ # role portability
+ extensions = ['.yml', '.yaml', '.json']
+ # If no <main> is specified by the user, look for files with
+ # extensions before bare name. Otherwise, look for bare name first.
+ if main is None:
+ _main = 'main'
+ extensions.append('')
+ else:
+ _main = main
+ extensions.insert(0, '')
+ found_files = self._loader.find_vars_files(file_path, _main, extensions, allow_dir)
+ if found_files:
+ data = {}
+ for found in found_files:
+ new_data = self._loader.load_from_file(found)
+ if new_data and allow_dir:
+ data = combine_vars(data, new_data)
+ else:
+ data = new_data
+ return data
+ elif main is not None:
+ raise AnsibleParserError("Could not find specified file in role: %s/%s" % (subdir, main))
+ return None
+
+ def _load_dependencies(self):
+ '''
+ Recursively loads role dependencies from the metadata list of
+ dependencies, if it exists
+ '''
+
+ deps = []
+ if self._metadata:
+ for role_include in self._metadata.dependencies:
+ r = Role.load(role_include, play=self._play, parent_role=self)
+ deps.append(r)
+
+ return deps
+
+ # other functions
+
+ def add_parent(self, parent_role):
+ ''' adds a role to the list of this roles parents '''
+ if not isinstance(parent_role, Role):
+ raise AnsibleAssertionError()
+
+ if parent_role not in self._parents:
+ self._parents.append(parent_role)
+
+ def get_parents(self):
+ return self._parents
+
+ def get_default_vars(self, dep_chain=None):
+ dep_chain = [] if dep_chain is None else dep_chain
+
+ default_vars = dict()
+ for dep in self.get_all_dependencies():
+ default_vars = combine_vars(default_vars, dep.get_default_vars())
+ if dep_chain:
+ for parent in dep_chain:
+ default_vars = combine_vars(default_vars, parent._default_vars)
+ default_vars = combine_vars(default_vars, self._default_vars)
+ return default_vars
+
+ def get_inherited_vars(self, dep_chain=None):
+ dep_chain = [] if dep_chain is None else dep_chain
+
+ inherited_vars = dict()
+
+ if dep_chain:
+ for parent in dep_chain:
+ inherited_vars = combine_vars(inherited_vars, parent._role_vars)
+ return inherited_vars
+
+ def get_role_params(self, dep_chain=None):
+ dep_chain = [] if dep_chain is None else dep_chain
+
+ params = {}
+ if dep_chain:
+ for parent in dep_chain:
+ params = combine_vars(params, parent._role_params)
+ params = combine_vars(params, self._role_params)
+ return params
+
+ def get_vars(self, dep_chain=None, include_params=True):
+ dep_chain = [] if dep_chain is None else dep_chain
+
+ all_vars = self.get_inherited_vars(dep_chain)
+
+ for dep in self.get_all_dependencies():
+ all_vars = combine_vars(all_vars, dep.get_vars(include_params=include_params))
+
+ all_vars = combine_vars(all_vars, self.vars)
+ all_vars = combine_vars(all_vars, self._role_vars)
+ if include_params:
+ all_vars = combine_vars(all_vars, self.get_role_params(dep_chain=dep_chain))
+
+ return all_vars
+
+ def get_direct_dependencies(self):
+ return self._dependencies[:]
+
+ def get_all_dependencies(self):
+ '''
+ Returns a list of all deps, built recursively from all child dependencies,
+ in the proper order in which they should be executed or evaluated.
+ '''
+
+ child_deps = []
+
+ for dep in self.get_direct_dependencies():
+ for child_dep in dep.get_all_dependencies():
+ child_deps.append(child_dep)
+ child_deps.append(dep)
+
+ return child_deps
+
+ def get_task_blocks(self):
+ return self._task_blocks[:]
+
+ def get_handler_blocks(self, play, dep_chain=None):
+ # Do not recreate this list each time ``get_handler_blocks`` is called.
+ # Cache the results so that we don't potentially overwrite with copied duplicates
+ #
+ # ``get_handler_blocks`` may be called when handling ``import_role`` during parsing
+ # as well as with ``Play.compile_roles_handlers`` from ``TaskExecutor``
+ if self._compiled_handler_blocks:
+ return self._compiled_handler_blocks
+
+ self._compiled_handler_blocks = block_list = []
+
+ # update the dependency chain here
+ if dep_chain is None:
+ dep_chain = []
+ new_dep_chain = dep_chain + [self]
+
+ for dep in self.get_direct_dependencies():
+ dep_blocks = dep.get_handler_blocks(play=play, dep_chain=new_dep_chain)
+ block_list.extend(dep_blocks)
+
+ for task_block in self._handler_blocks:
+ new_task_block = task_block.copy()
+ new_task_block._dep_chain = new_dep_chain
+ new_task_block._play = play
+ block_list.append(new_task_block)
+
+ return block_list
+
+ def has_run(self, host):
+ '''
+ Returns true if this role has been iterated over completely and
+ at least one task was run
+ '''
+
+ return host.name in self._completed and not self._metadata.allow_duplicates
+
+ def compile(self, play, dep_chain=None):
+ '''
+ Returns the task list for this role, which is created by first
+ recursively compiling the tasks for all direct dependencies, and
+ then adding on the tasks for this role.
+
+ The role compile() also remembers and saves the dependency chain
+ with each task, so tasks know by which route they were found, and
+ can correctly take their parent's tags/conditionals into account.
+ '''
+
+ block_list = []
+
+ # update the dependency chain here
+ if dep_chain is None:
+ dep_chain = []
+ new_dep_chain = dep_chain + [self]
+
+ deps = self.get_direct_dependencies()
+ for dep in deps:
+ dep_blocks = dep.compile(play=play, dep_chain=new_dep_chain)
+ block_list.extend(dep_blocks)
+
+ for idx, task_block in enumerate(self._task_blocks):
+ new_task_block = task_block.copy()
+ new_task_block._dep_chain = new_dep_chain
+ new_task_block._play = play
+ if idx == len(self._task_blocks) - 1:
+ new_task_block._eor = True
+ block_list.append(new_task_block)
+
+ return block_list
+
+ def serialize(self, include_deps=True):
+ res = super(Role, self).serialize()
+
+ res['_role_name'] = self._role_name
+ res['_role_path'] = self._role_path
+ res['_role_vars'] = self._role_vars
+ res['_role_params'] = self._role_params
+ res['_default_vars'] = self._default_vars
+ res['_had_task_run'] = self._had_task_run.copy()
+ res['_completed'] = self._completed.copy()
+
+ if self._metadata:
+ res['_metadata'] = self._metadata.serialize()
+
+ if include_deps:
+ deps = []
+ for role in self.get_direct_dependencies():
+ deps.append(role.serialize())
+ res['_dependencies'] = deps
+
+ parents = []
+ for parent in self._parents:
+ parents.append(parent.serialize(include_deps=False))
+ res['_parents'] = parents
+
+ return res
+
+ def deserialize(self, data, include_deps=True):
+ self._role_name = data.get('_role_name', '')
+ self._role_path = data.get('_role_path', '')
+ self._role_vars = data.get('_role_vars', dict())
+ self._role_params = data.get('_role_params', dict())
+ self._default_vars = data.get('_default_vars', dict())
+ self._had_task_run = data.get('_had_task_run', dict())
+ self._completed = data.get('_completed', dict())
+
+ if include_deps:
+ deps = []
+ for dep in data.get('_dependencies', []):
+ r = Role()
+ r.deserialize(dep)
+ deps.append(r)
+ setattr(self, '_dependencies', deps)
+
+ parent_data = data.get('_parents', [])
+ parents = []
+ for parent in parent_data:
+ r = Role()
+ r.deserialize(parent, include_deps=False)
+ parents.append(r)
+ setattr(self, '_parents', parents)
+
+ metadata_data = data.get('_metadata')
+ if metadata_data:
+ m = RoleMetadata()
+ m.deserialize(metadata_data)
+ self._metadata = m
+
+ super(Role, self).deserialize(data)
+
+ def set_loader(self, loader):
+ self._loader = loader
+ for parent in self._parents:
+ parent.set_loader(loader)
+ for dep in self.get_direct_dependencies():
+ dep.set_loader(loader)
diff --git a/lib/ansible/playbook/role/definition.py b/lib/ansible/playbook/role/definition.py
new file mode 100644
index 00000000..20d69ebd
--- /dev/null
+++ b/lib/ansible/playbook/role/definition.py
@@ -0,0 +1,240 @@
+# (c) 2014 Michael DeHaan, <michael@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleAssertionError
+from ansible.module_utils.six import iteritems, string_types
+from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
+from ansible.playbook.attribute import Attribute, FieldAttribute
+from ansible.playbook.base import Base
+from ansible.playbook.collectionsearch import CollectionSearch
+from ansible.playbook.conditional import Conditional
+from ansible.playbook.taggable import Taggable
+from ansible.template import Templar
+from ansible.utils.collection_loader import AnsibleCollectionRef
+from ansible.utils.collection_loader._collection_finder import _get_collection_role_path
+from ansible.utils.path import unfrackpath
+from ansible.utils.display import Display
+
+__all__ = ['RoleDefinition']
+
+display = Display()
+
+
+class RoleDefinition(Base, Conditional, Taggable, CollectionSearch):
+
+ _role = FieldAttribute(isa='string')
+
+ def __init__(self, play=None, role_basedir=None, variable_manager=None, loader=None, collection_list=None):
+
+ super(RoleDefinition, self).__init__()
+
+ self._play = play
+ self._variable_manager = variable_manager
+ self._loader = loader
+
+ self._role_path = None
+ self._role_collection = None
+ self._role_basedir = role_basedir
+ self._role_params = dict()
+ self._collection_list = collection_list
+
+ # def __repr__(self):
+ # return 'ROLEDEF: ' + self._attributes.get('role', '<no name set>')
+
+ @staticmethod
+ def load(data, variable_manager=None, loader=None):
+ raise AnsibleError("not implemented")
+
+ def preprocess_data(self, ds):
+ # role names that are simply numbers can be parsed by PyYAML
+ # as integers even when quoted, so turn it into a string type
+ if isinstance(ds, int):
+ ds = "%s" % ds
+
+ if not isinstance(ds, dict) and not isinstance(ds, string_types) and not isinstance(ds, AnsibleBaseYAMLObject):
+ raise AnsibleAssertionError()
+
+ if isinstance(ds, dict):
+ ds = super(RoleDefinition, self).preprocess_data(ds)
+
+ # save the original ds for use later
+ self._ds = ds
+
+ # we create a new data structure here, using the same
+ # object used internally by the YAML parsing code so we
+ # can preserve file:line:column information if it exists
+ new_ds = AnsibleMapping()
+ if isinstance(ds, AnsibleBaseYAMLObject):
+ new_ds.ansible_pos = ds.ansible_pos
+
+ # first we pull the role name out of the data structure,
+ # and then use that to determine the role path (which may
+ # result in a new role name, if it was a file path)
+ role_name = self._load_role_name(ds)
+ (role_name, role_path) = self._load_role_path(role_name)
+
+ # next, we split the role params out from the valid role
+ # attributes and update the new datastructure with that
+ # result and the role name
+ if isinstance(ds, dict):
+ (new_role_def, role_params) = self._split_role_params(ds)
+ new_ds.update(new_role_def)
+ self._role_params = role_params
+
+ # set the role name in the new ds
+ new_ds['role'] = role_name
+
+ # we store the role path internally
+ self._role_path = role_path
+
+ # and return the cleaned-up data structure
+ return new_ds
+
+ def _load_role_name(self, ds):
+ '''
+ Returns the role name (either the role: or name: field) from
+ the role definition, or (when the role definition is a simple
+ string), just that string
+ '''
+
+ if isinstance(ds, string_types):
+ return ds
+
+ role_name = ds.get('role', ds.get('name'))
+ if not role_name or not isinstance(role_name, string_types):
+ raise AnsibleError('role definitions must contain a role name', obj=ds)
+
+ # if we have the required datastructures, and if the role_name
+ # contains a variable, try and template it now
+ if self._variable_manager:
+ all_vars = self._variable_manager.get_vars(play=self._play)
+ templar = Templar(loader=self._loader, variables=all_vars)
+ role_name = templar.template(role_name)
+
+ return role_name
+
+ def _load_role_path(self, role_name):
+ '''
+ the 'role', as specified in the ds (or as a bare string), can either
+ be a simple name or a full path. If it is a full path, we use the
+ basename as the role name, otherwise we take the name as-given and
+ append it to the default role path
+ '''
+
+ # create a templar class to template the dependency names, in
+ # case they contain variables
+ if self._variable_manager is not None:
+ all_vars = self._variable_manager.get_vars(play=self._play)
+ else:
+ all_vars = dict()
+
+ templar = Templar(loader=self._loader, variables=all_vars)
+ role_name = templar.template(role_name)
+
+ role_tuple = None
+
+ # try to load as a collection-based role first
+ if self._collection_list or AnsibleCollectionRef.is_valid_fqcr(role_name):
+ role_tuple = _get_collection_role_path(role_name, self._collection_list)
+
+ if role_tuple:
+ # we found it, stash collection data and return the name/path tuple
+ self._role_collection = role_tuple[2]
+ return role_tuple[0:2]
+
+ # We didn't find a collection role, look in defined role paths
+ # FUTURE: refactor this to be callable from internal so we can properly order
+ # ansible.legacy searches with the collections keyword
+
+ # we always start the search for roles in the base directory of the playbook
+ role_search_paths = [
+ os.path.join(self._loader.get_basedir(), u'roles'),
+ ]
+
+ # also search in the configured roles path
+ if C.DEFAULT_ROLES_PATH:
+ role_search_paths.extend(C.DEFAULT_ROLES_PATH)
+
+ # next, append the roles basedir, if it was set, so we can
+ # search relative to that directory for dependent roles
+ if self._role_basedir:
+ role_search_paths.append(self._role_basedir)
+
+ # finally as a last resort we look in the current basedir as set
+ # in the loader (which should be the playbook dir itself) but without
+ # the roles/ dir appended
+ role_search_paths.append(self._loader.get_basedir())
+
+ # now iterate through the possible paths and return the first one we find
+ for path in role_search_paths:
+ path = templar.template(path)
+ role_path = unfrackpath(os.path.join(path, role_name))
+ if self._loader.path_exists(role_path):
+ return (role_name, role_path)
+
+ # if not found elsewhere try to extract path from name
+ role_path = unfrackpath(role_name)
+ if self._loader.path_exists(role_path):
+ role_name = os.path.basename(role_name)
+ return (role_name, role_path)
+
+ searches = (self._collection_list or []) + role_search_paths
+ raise AnsibleError("the role '%s' was not found in %s" % (role_name, ":".join(searches)), obj=self._ds)
+
+ def _split_role_params(self, ds):
+ '''
+ Splits any random role params off from the role spec and store
+ them in a dictionary of params for parsing later
+ '''
+
+ role_def = dict()
+ role_params = dict()
+ base_attribute_names = frozenset(self._valid_attrs.keys())
+ for (key, value) in iteritems(ds):
+ # use the list of FieldAttribute values to determine what is and is not
+ # an extra parameter for this role (or sub-class of this role)
+ # FIXME: hard-coded list of exception key names here corresponds to the
+ # connection fields in the Base class. There may need to be some
+ # other mechanism where we exclude certain kinds of field attributes,
+ # or make this list more automatic in some way so we don't have to
+ # remember to update it manually.
+ if key not in base_attribute_names:
+ # this key does not match a field attribute, so it must be a role param
+ role_params[key] = value
+ else:
+ # this is a field attribute, so copy it over directly
+ role_def[key] = value
+
+ return (role_def, role_params)
+
+ def get_role_params(self):
+ return self._role_params.copy()
+
+ def get_role_path(self):
+ return self._role_path
+
+ def get_name(self, include_role_fqcn=True):
+ if include_role_fqcn:
+ return '.'.join(x for x in (self._role_collection, self.role) if x)
+ return self.role
diff --git a/lib/ansible/playbook/role/include.py b/lib/ansible/playbook/role/include.py
new file mode 100644
index 00000000..1e5d901d
--- /dev/null
+++ b/lib/ansible/playbook/role/include.py
@@ -0,0 +1,60 @@
+# (c) 2014 Michael DeHaan, <michael@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.module_utils.six import iteritems, string_types
+from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
+from ansible.playbook.attribute import Attribute, FieldAttribute
+from ansible.playbook.role.definition import RoleDefinition
+from ansible.playbook.role.requirement import RoleRequirement
+from ansible.module_utils._text import to_native
+
+
+__all__ = ['RoleInclude']
+
+
+class RoleInclude(RoleDefinition):
+
+ """
+ A derivative of RoleDefinition, used by playbook code when a role
+ is included for execution in a play.
+ """
+
+ _delegate_to = FieldAttribute(isa='string')
+ _delegate_facts = FieldAttribute(isa='bool', default=False)
+
+ def __init__(self, play=None, role_basedir=None, variable_manager=None, loader=None, collection_list=None):
+ super(RoleInclude, self).__init__(play=play, role_basedir=role_basedir, variable_manager=variable_manager,
+ loader=loader, collection_list=collection_list)
+
+ @staticmethod
+ def load(data, play, current_role_path=None, parent_role=None, variable_manager=None, loader=None, collection_list=None):
+
+ if not (isinstance(data, string_types) or isinstance(data, dict) or isinstance(data, AnsibleBaseYAMLObject)):
+ raise AnsibleParserError("Invalid role definition: %s" % to_native(data))
+
+ if isinstance(data, string_types) and ',' in data:
+ raise AnsibleError("Invalid old style role requirement: %s" % data)
+
+ ri = RoleInclude(play=play, role_basedir=current_role_path, variable_manager=variable_manager, loader=loader, collection_list=collection_list)
+ return ri.load_data(data, variable_manager=variable_manager, loader=loader)
diff --git a/lib/ansible/playbook/role/metadata.py b/lib/ansible/playbook/role/metadata.py
new file mode 100644
index 00000000..1c5c5203
--- /dev/null
+++ b/lib/ansible/playbook/role/metadata.py
@@ -0,0 +1,128 @@
+# (c) 2014 Michael DeHaan, <michael@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.errors import AnsibleParserError, AnsibleError
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import string_types
+from ansible.playbook.attribute import FieldAttribute
+from ansible.playbook.base import Base
+from ansible.playbook.collectionsearch import CollectionSearch
+from ansible.playbook.helpers import load_list_of_roles
+from ansible.playbook.role.requirement import RoleRequirement
+
+__all__ = ['RoleMetadata']
+
+
+class RoleMetadata(Base, CollectionSearch):
+ '''
+ This class wraps the parsing and validation of the optional metadata
+ within each Role (meta/main.yml).
+ '''
+
+ _allow_duplicates = FieldAttribute(isa='bool', default=False)
+ _dependencies = FieldAttribute(isa='list', default=list)
+ _galaxy_info = FieldAttribute(isa='GalaxyInfo')
+
+ def __init__(self, owner=None):
+ self._owner = owner
+ super(RoleMetadata, self).__init__()
+
+ @staticmethod
+ def load(data, owner, variable_manager=None, loader=None):
+ '''
+ Returns a new RoleMetadata object based on the datastructure passed in.
+ '''
+
+ if not isinstance(data, dict):
+ raise AnsibleParserError("the 'meta/main.yml' for role %s is not a dictionary" % owner.get_name())
+
+ m = RoleMetadata(owner=owner).load_data(data, variable_manager=variable_manager, loader=loader)
+ return m
+
+ def _load_dependencies(self, attr, ds):
+ '''
+ This is a helper loading function for the dependencies list,
+ which returns a list of RoleInclude objects
+ '''
+
+ roles = []
+ if ds:
+ if not isinstance(ds, list):
+ raise AnsibleParserError("Expected role dependencies to be a list.", obj=self._ds)
+
+ for role_def in ds:
+ if isinstance(role_def, string_types) or 'role' in role_def or 'name' in role_def:
+ roles.append(role_def)
+ continue
+ try:
+ # role_def is new style: { src: 'galaxy.role,version,name', other_vars: "here" }
+ def_parsed = RoleRequirement.role_yaml_parse(role_def)
+ if def_parsed.get('name'):
+ role_def['name'] = def_parsed['name']
+ roles.append(role_def)
+ except AnsibleError as exc:
+ raise AnsibleParserError(to_native(exc), obj=role_def, orig_exc=exc)
+
+ current_role_path = None
+ collection_search_list = None
+
+ if self._owner:
+ current_role_path = os.path.dirname(self._owner._role_path)
+
+ # if the calling role has a collections search path defined, consult it
+ collection_search_list = self._owner.collections[:] or []
+
+ # if the calling role is a collection role, ensure that its containing collection is searched first
+ owner_collection = self._owner._role_collection
+ if owner_collection:
+ collection_search_list = [c for c in collection_search_list if c != owner_collection]
+ collection_search_list.insert(0, owner_collection)
+ # ensure fallback role search works
+ if 'ansible.legacy' not in collection_search_list:
+ collection_search_list.append('ansible.legacy')
+
+ try:
+ return load_list_of_roles(roles, play=self._owner._play, current_role_path=current_role_path,
+ variable_manager=self._variable_manager, loader=self._loader,
+ collection_search_list=collection_search_list)
+ except AssertionError as e:
+ raise AnsibleParserError("A malformed list of role dependencies was encountered.", obj=self._ds, orig_exc=e)
+
+ def _load_galaxy_info(self, attr, ds):
+ '''
+ This is a helper loading function for the galaxy info entry
+ in the metadata, which returns a GalaxyInfo object rather than
+ a simple dictionary.
+ '''
+
+ return ds
+
+ def serialize(self):
+ return dict(
+ allow_duplicates=self._allow_duplicates,
+ dependencies=self._dependencies
+ )
+
+ def deserialize(self, data):
+ setattr(self, 'allow_duplicates', data.get('allow_duplicates', False))
+ setattr(self, 'dependencies', data.get('dependencies', []))
diff --git a/lib/ansible/playbook/role/requirement.py b/lib/ansible/playbook/role/requirement.py
new file mode 100644
index 00000000..18cea8ff
--- /dev/null
+++ b/lib/ansible/playbook/role/requirement.py
@@ -0,0 +1,130 @@
+# (c) 2014 Michael DeHaan, <michael@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.six import string_types
+from ansible.playbook.role.definition import RoleDefinition
+from ansible.utils.display import Display
+from ansible.utils.galaxy import scm_archive_resource
+
+__all__ = ['RoleRequirement']
+
+VALID_SPEC_KEYS = [
+ 'name',
+ 'role',
+ 'scm',
+ 'src',
+ 'version',
+]
+
+display = Display()
+
+
+class RoleRequirement(RoleDefinition):
+
+ """
+ Helper class for Galaxy, which is used to parse both dependencies
+ specified in meta/main.yml and requirements.yml files.
+ """
+
+ def __init__(self):
+ pass
+
+ @staticmethod
+ def repo_url_to_role_name(repo_url):
+ # gets the role name out of a repo like
+ # http://git.example.com/repos/repo.git" => "repo"
+
+ if '://' not in repo_url and '@' not in repo_url:
+ return repo_url
+ trailing_path = repo_url.split('/')[-1]
+ if trailing_path.endswith('.git'):
+ trailing_path = trailing_path[:-4]
+ if trailing_path.endswith('.tar.gz'):
+ trailing_path = trailing_path[:-7]
+ if ',' in trailing_path:
+ trailing_path = trailing_path.split(',')[0]
+ return trailing_path
+
+ @staticmethod
+ def role_yaml_parse(role):
+
+ if isinstance(role, string_types):
+ name = None
+ scm = None
+ src = None
+ version = None
+ if ',' in role:
+ if role.count(',') == 1:
+ (src, version) = role.strip().split(',', 1)
+ elif role.count(',') == 2:
+ (src, version, name) = role.strip().split(',', 2)
+ else:
+ raise AnsibleError("Invalid role line (%s). Proper format is 'role_name[,version[,name]]'" % role)
+ else:
+ src = role
+
+ if name is None:
+ name = RoleRequirement.repo_url_to_role_name(src)
+ if '+' in src:
+ (scm, src) = src.split('+', 1)
+
+ return dict(name=name, src=src, scm=scm, version=version)
+
+ if 'role' in role:
+ name = role['role']
+ if ',' in name:
+ raise AnsibleError("Invalid old style role requirement: %s" % name)
+ else:
+ del role['role']
+ role['name'] = name
+ else:
+ role = role.copy()
+
+ if 'src' in role:
+ # New style: { src: 'galaxy.role,version,name', other_vars: "here" }
+ if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'):
+ role["src"] = "git+" + role["src"]
+
+ if '+' in role["src"]:
+ (scm, src) = role["src"].split('+')
+ role["scm"] = scm
+ role["src"] = src
+
+ if 'name' not in role:
+ role["name"] = RoleRequirement.repo_url_to_role_name(role["src"])
+
+ if 'version' not in role:
+ role['version'] = ''
+
+ if 'scm' not in role:
+ role['scm'] = None
+
+ for key in list(role.keys()):
+ if key not in VALID_SPEC_KEYS:
+ role.pop(key)
+
+ return role
+
+ @staticmethod
+ def scm_archive_role(src, scm='git', name=None, version='HEAD', keep_scm_meta=False):
+
+ return scm_archive_resource(src, scm=scm, name=name, version=version, keep_scm_meta=keep_scm_meta)
diff --git a/lib/ansible/playbook/role_include.py b/lib/ansible/playbook/role_include.py
new file mode 100644
index 00000000..2ae80ca6
--- /dev/null
+++ b/lib/ansible/playbook/role_include.py
@@ -0,0 +1,176 @@
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from os.path import basename
+
+import ansible.constants as C
+from ansible.errors import AnsibleParserError
+from ansible.playbook.attribute import FieldAttribute
+from ansible.playbook.block import Block
+from ansible.playbook.task_include import TaskInclude
+from ansible.playbook.role import Role
+from ansible.playbook.role.include import RoleInclude
+from ansible.utils.display import Display
+from ansible.module_utils.six import string_types
+
+__all__ = ['IncludeRole']
+
+display = Display()
+
+
+class IncludeRole(TaskInclude):
+
+ """
+ A Role include is derived from a regular role to handle the special
+ circumstances related to the `- include_role: ...`
+ """
+
+ BASE = ('name', 'role') # directly assigned
+ FROM_ARGS = ('tasks_from', 'vars_from', 'defaults_from', 'handlers_from') # used to populate from dict in role
+ OTHER_ARGS = ('apply', 'public', 'allow_duplicates') # assigned to matching property
+ VALID_ARGS = tuple(frozenset(BASE + FROM_ARGS + OTHER_ARGS)) # all valid args
+
+ # =================================================================================
+ # ATTRIBUTES
+
+ # private as this is a 'module options' vs a task property
+ _allow_duplicates = FieldAttribute(isa='bool', default=True, private=True)
+ _public = FieldAttribute(isa='bool', default=False, private=True)
+
+ def __init__(self, block=None, role=None, task_include=None):
+
+ super(IncludeRole, self).__init__(block=block, role=role, task_include=task_include)
+
+ self._from_files = {}
+ self._parent_role = role
+ self._role_name = None
+ self._role_path = None
+
+ def get_name(self):
+ ''' return the name of the task '''
+ return self.name or "%s : %s" % (self.action, self._role_name)
+
+ def get_block_list(self, play=None, variable_manager=None, loader=None):
+
+ # only need play passed in when dynamic
+ if play is None:
+ myplay = self._parent._play
+ else:
+ myplay = play
+
+ ri = RoleInclude.load(self._role_name, play=myplay, variable_manager=variable_manager, loader=loader, collection_list=self.collections)
+ ri.vars.update(self.vars)
+
+ # build role
+ actual_role = Role.load(ri, myplay, parent_role=self._parent_role, from_files=self._from_files,
+ from_include=True)
+ actual_role._metadata.allow_duplicates = self.allow_duplicates
+
+ if self.statically_loaded or self.public:
+ myplay.roles.append(actual_role)
+
+ # save this for later use
+ self._role_path = actual_role._role_path
+
+ # compile role with parent roles as dependencies to ensure they inherit
+ # variables
+ if not self._parent_role:
+ dep_chain = []
+ else:
+ dep_chain = list(self._parent_role._parents)
+ dep_chain.append(self._parent_role)
+
+ p_block = self.build_parent_block()
+
+ # collections value is not inherited; override with the value we calculated during role setup
+ p_block.collections = actual_role.collections
+
+ blocks = actual_role.compile(play=myplay, dep_chain=dep_chain)
+ for b in blocks:
+ b._parent = p_block
+ # HACK: parent inheritance doesn't seem to have a way to handle this intermediate override until squashed/finalized
+ b.collections = actual_role.collections
+
+ # updated available handlers in play
+ handlers = actual_role.get_handler_blocks(play=myplay)
+ for h in handlers:
+ h._parent = p_block
+ myplay.handlers = myplay.handlers + handlers
+ return blocks, handlers
+
+ @staticmethod
+ def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
+
+ ir = IncludeRole(block, role, task_include=task_include).load_data(data, variable_manager=variable_manager, loader=loader)
+
+ # Validate options
+ my_arg_names = frozenset(ir.args.keys())
+
+ # name is needed, or use role as alias
+ ir._role_name = ir.args.get('name', ir.args.get('role'))
+ if ir._role_name is None:
+ raise AnsibleParserError("'name' is a required field for %s." % ir.action, obj=data)
+
+ if 'public' in ir.args and ir.action not in C._ACTION_INCLUDE_ROLE:
+ raise AnsibleParserError('Invalid options for %s: public' % ir.action, obj=data)
+
+ # validate bad args, otherwise we silently ignore
+ bad_opts = my_arg_names.difference(IncludeRole.VALID_ARGS)
+ if bad_opts:
+ raise AnsibleParserError('Invalid options for %s: %s' % (ir.action, ','.join(list(bad_opts))), obj=data)
+
+ # build options for role includes
+ for key in my_arg_names.intersection(IncludeRole.FROM_ARGS):
+ from_key = key.replace('_from', '')
+ args_value = ir.args.get(key)
+ if not isinstance(args_value, string_types):
+ raise AnsibleParserError('Expected a string for %s but got %s instead' % (key, type(args_value)))
+ ir._from_files[from_key] = basename(args_value)
+
+ apply_attrs = ir.args.get('apply', {})
+ if apply_attrs and ir.action not in C._ACTION_INCLUDE_ROLE:
+ raise AnsibleParserError('Invalid options for %s: apply' % ir.action, obj=data)
+ elif not isinstance(apply_attrs, dict):
+ raise AnsibleParserError('Expected a dict for apply but got %s instead' % type(apply_attrs), obj=data)
+
+ # manual list as otherwise the options would set other task parameters we don't want.
+ for option in my_arg_names.intersection(IncludeRole.OTHER_ARGS):
+ setattr(ir, option, ir.args.get(option))
+
+ return ir
+
+ def copy(self, exclude_parent=False, exclude_tasks=False):
+
+ new_me = super(IncludeRole, self).copy(exclude_parent=exclude_parent, exclude_tasks=exclude_tasks)
+ new_me.statically_loaded = self.statically_loaded
+ new_me._from_files = self._from_files.copy()
+ new_me._parent_role = self._parent_role
+ new_me._role_name = self._role_name
+ new_me._role_path = self._role_path
+
+ return new_me
+
+ def get_include_params(self):
+ v = super(IncludeRole, self).get_include_params()
+ if self._parent_role:
+ v.update(self._parent_role.get_role_params())
+ v.setdefault('ansible_parent_role_names', []).insert(0, self._parent_role.get_name())
+ v.setdefault('ansible_parent_role_paths', []).insert(0, self._parent_role._role_path)
+ return v
diff --git a/lib/ansible/playbook/taggable.py b/lib/ansible/playbook/taggable.py
new file mode 100644
index 00000000..d8a71582
--- /dev/null
+++ b/lib/ansible/playbook/taggable.py
@@ -0,0 +1,89 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.six import string_types
+from ansible.playbook.attribute import FieldAttribute
+from ansible.template import Templar
+
+
+class Taggable:
+
+ untagged = frozenset(['untagged'])
+ _tags = FieldAttribute(isa='list', default=list, listof=(string_types, int), extend=True)
+
+ def _load_tags(self, attr, ds):
+ if isinstance(ds, list):
+ return ds
+ elif isinstance(ds, string_types):
+ value = ds.split(',')
+ if isinstance(value, list):
+ return [x.strip() for x in value]
+ else:
+ return [ds]
+ else:
+ raise AnsibleError('tags must be specified as a list', obj=ds)
+
+ def evaluate_tags(self, only_tags, skip_tags, all_vars):
+ ''' this checks if the current item should be executed depending on tag options '''
+
+ if self.tags:
+ templar = Templar(loader=self._loader, variables=all_vars)
+ tags = templar.template(self.tags)
+
+ _temp_tags = set()
+ for tag in tags:
+ if isinstance(tag, list):
+ _temp_tags.update(tag)
+ else:
+ _temp_tags.add(tag)
+ tags = _temp_tags
+ self.tags = list(tags)
+ else:
+ # this makes isdisjoint work for untagged
+ tags = self.untagged
+
+ should_run = True # default, tasks to run
+
+ if only_tags:
+ if 'always' in tags:
+ should_run = True
+ elif ('all' in only_tags and 'never' not in tags):
+ should_run = True
+ elif not tags.isdisjoint(only_tags):
+ should_run = True
+ elif 'tagged' in only_tags and tags != self.untagged and 'never' not in tags:
+ should_run = True
+ else:
+ should_run = False
+
+ if should_run and skip_tags:
+
+ # Check for tags that we need to skip
+ if 'all' in skip_tags:
+ if 'always' not in tags or 'always' in skip_tags:
+ should_run = False
+ elif not tags.isdisjoint(skip_tags):
+ should_run = False
+ elif 'tagged' in skip_tags and tags != self.untagged:
+ should_run = False
+
+ return should_run
diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py
new file mode 100644
index 00000000..9762505c
--- /dev/null
+++ b/lib/ansible/playbook/task.py
@@ -0,0 +1,543 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleAssertionError
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import iteritems, string_types
+from ansible.parsing.mod_args import ModuleArgsParser
+from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
+from ansible.plugins.loader import lookup_loader
+from ansible.playbook.attribute import FieldAttribute
+from ansible.playbook.base import Base
+from ansible.playbook.block import Block
+from ansible.playbook.collectionsearch import CollectionSearch
+from ansible.playbook.conditional import Conditional
+from ansible.playbook.loop_control import LoopControl
+from ansible.playbook.role import Role
+from ansible.playbook.taggable import Taggable
+from ansible.utils.collection_loader import AnsibleCollectionConfig
+from ansible.utils.display import Display
+from ansible.utils.sentinel import Sentinel
+
+__all__ = ['Task']
+
+display = Display()
+
+
+class Task(Base, Conditional, Taggable, CollectionSearch):
+
+ """
+ A task is a language feature that represents a call to a module, with given arguments and other parameters.
+ A handler is a subclass of a task.
+
+ Usage:
+
+ Task.load(datastructure) -> Task
+ Task.something(...)
+ """
+
+ # =================================================================================
+ # ATTRIBUTES
+ # load_<attribute_name> and
+ # validate_<attribute_name>
+ # will be used if defined
+ # might be possible to define others
+
+ # NOTE: ONLY set defaults on task attributes that are not inheritable,
+ # inheritance is only triggered if the 'current value' is None,
+ # default can be set at play/top level object and inheritance will take it's course.
+
+ _args = FieldAttribute(isa='dict', default=dict)
+ _action = FieldAttribute(isa='string')
+
+ _async_val = FieldAttribute(isa='int', default=0, alias='async')
+ _changed_when = FieldAttribute(isa='list', default=list)
+ _delay = FieldAttribute(isa='int', default=5)
+ _delegate_to = FieldAttribute(isa='string')
+ _delegate_facts = FieldAttribute(isa='bool')
+ _failed_when = FieldAttribute(isa='list', default=list)
+ _loop = FieldAttribute()
+ _loop_control = FieldAttribute(isa='class', class_type=LoopControl, inherit=False)
+ _notify = FieldAttribute(isa='list')
+ _poll = FieldAttribute(isa='int', default=C.DEFAULT_POLL_INTERVAL)
+ _register = FieldAttribute(isa='string', static=True)
+ _retries = FieldAttribute(isa='int', default=3)
+ _until = FieldAttribute(isa='list', default=list)
+
+ # deprecated, used to be loop and loop_args but loop has been repurposed
+ _loop_with = FieldAttribute(isa='string', private=True, inherit=False)
+
+ def __init__(self, block=None, role=None, task_include=None):
+ ''' constructors a task, without the Task.load classmethod, it will be pretty blank '''
+
+ # This is a reference of all the candidate action names for transparent execution of module_defaults with redirected content
+ # This isn't a FieldAttribute to prevent it from being set via the playbook
+ self._ansible_internal_redirect_list = []
+
+ self._role = role
+ self._parent = None
+
+ if task_include:
+ self._parent = task_include
+ else:
+ self._parent = block
+
+ super(Task, self).__init__()
+
+ def get_path(self):
+ ''' return the absolute path of the task with its line number '''
+
+ path = ""
+ if hasattr(self, '_ds') and hasattr(self._ds, '_data_source') and hasattr(self._ds, '_line_number'):
+ path = "%s:%s" % (self._ds._data_source, self._ds._line_number)
+ elif hasattr(self._parent._play, '_ds') and hasattr(self._parent._play._ds, '_data_source') and hasattr(self._parent._play._ds, '_line_number'):
+ path = "%s:%s" % (self._parent._play._ds._data_source, self._parent._play._ds._line_number)
+ return path
+
+ def get_name(self, include_role_fqcn=True):
+ ''' return the name of the task '''
+
+ if self._role:
+ role_name = self._role.get_name(include_role_fqcn=include_role_fqcn)
+
+ if self._role and self.name and role_name not in self.name:
+ return "%s : %s" % (role_name, self.name)
+ elif self.name:
+ return self.name
+ else:
+ if self._role:
+ return "%s : %s" % (role_name, self.action)
+ else:
+ return "%s" % (self.action,)
+
+ def _merge_kv(self, ds):
+ if ds is None:
+ return ""
+ elif isinstance(ds, string_types):
+ return ds
+ elif isinstance(ds, dict):
+ buf = ""
+ for (k, v) in iteritems(ds):
+ if k.startswith('_'):
+ continue
+ buf = buf + "%s=%s " % (k, v)
+ buf = buf.strip()
+ return buf
+
+ @staticmethod
+ def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
+ t = Task(block=block, role=role, task_include=task_include)
+ return t.load_data(data, variable_manager=variable_manager, loader=loader)
+
+ def __repr__(self):
+ ''' returns a human readable representation of the task '''
+ if self.get_name() in C._ACTION_META:
+ return "TASK: meta (%s)" % self.args['_raw_params']
+ else:
+ return "TASK: %s" % self.get_name()
+
+ def _preprocess_with_loop(self, ds, new_ds, k, v):
+ ''' take a lookup plugin name and store it correctly '''
+
+ loop_name = k.replace("with_", "")
+ if new_ds.get('loop') is not None or new_ds.get('loop_with') is not None:
+ raise AnsibleError("duplicate loop in task: %s" % loop_name, obj=ds)
+ if v is None:
+ raise AnsibleError("you must specify a value when using %s" % k, obj=ds)
+ new_ds['loop_with'] = loop_name
+ new_ds['loop'] = v
+ # display.deprecated("with_ type loops are being phased out, use the 'loop' keyword instead",
+ # version="2.10", collection_name='ansible.builtin')
+
+ def preprocess_data(self, ds):
+ '''
+ tasks are especially complex arguments so need pre-processing.
+ keep it short.
+ '''
+
+ if not isinstance(ds, dict):
+ raise AnsibleAssertionError('ds (%s) should be a dict but was a %s' % (ds, type(ds)))
+
+ # the new, cleaned datastructure, which will have legacy
+ # items reduced to a standard structure suitable for the
+ # attributes of the task class
+ new_ds = AnsibleMapping()
+ if isinstance(ds, AnsibleBaseYAMLObject):
+ new_ds.ansible_pos = ds.ansible_pos
+
+ # since this affects the task action parsing, we have to resolve in preprocess instead of in typical validator
+ default_collection = AnsibleCollectionConfig.default_collection
+
+ collections_list = ds.get('collections')
+ if collections_list is None:
+ # use the parent value if our ds doesn't define it
+ collections_list = self.collections
+ else:
+ # Validate this untemplated field early on to guarantee we are dealing with a list.
+ # This is also done in CollectionSearch._load_collections() but this runs before that call.
+ collections_list = self.get_validated_value('collections', self._collections, collections_list, None)
+
+ if default_collection and not self._role: # FIXME: and not a collections role
+ if collections_list:
+ if default_collection not in collections_list:
+ collections_list.insert(0, default_collection)
+ else:
+ collections_list = [default_collection]
+
+ if collections_list and 'ansible.builtin' not in collections_list and 'ansible.legacy' not in collections_list:
+ collections_list.append('ansible.legacy')
+
+ if collections_list:
+ ds['collections'] = collections_list
+
+ # use the args parsing class to determine the action, args,
+ # and the delegate_to value from the various possible forms
+ # supported as legacy
+ args_parser = ModuleArgsParser(task_ds=ds, collection_list=collections_list)
+ try:
+ (action, args, delegate_to) = args_parser.parse()
+ except AnsibleParserError as e:
+ # if the raises exception was created with obj=ds args, then it includes the detail
+ # so we dont need to add it so we can just re raise.
+ if e._obj:
+ raise
+ # But if it wasn't, we can add the yaml object now to get more detail
+ raise AnsibleParserError(to_native(e), obj=ds, orig_exc=e)
+ else:
+ self._ansible_internal_redirect_list = args_parser.internal_redirect_list[:]
+
+ # the command/shell/script modules used to support the `cmd` arg,
+ # which corresponds to what we now call _raw_params, so move that
+ # value over to _raw_params (assuming it is empty)
+ if action in C._ACTION_HAS_CMD:
+ if 'cmd' in args:
+ if args.get('_raw_params', '') != '':
+ raise AnsibleError("The 'cmd' argument cannot be used when other raw parameters are specified."
+ " Please put everything in one or the other place.", obj=ds)
+ args['_raw_params'] = args.pop('cmd')
+
+ new_ds['action'] = action
+ new_ds['args'] = args
+ new_ds['delegate_to'] = delegate_to
+
+ # we handle any 'vars' specified in the ds here, as we may
+ # be adding things to them below (special handling for includes).
+ # When that deprecated feature is removed, this can be too.
+ if 'vars' in ds:
+ # _load_vars is defined in Base, and is used to load a dictionary
+ # or list of dictionaries in a standard way
+ new_ds['vars'] = self._load_vars(None, ds.get('vars'))
+ else:
+ new_ds['vars'] = dict()
+
+ for (k, v) in iteritems(ds):
+ if k in ('action', 'local_action', 'args', 'delegate_to') or k == action or k == 'shell':
+ # we don't want to re-assign these values, which were determined by the ModuleArgsParser() above
+ continue
+ elif k.startswith('with_') and k.replace("with_", "") in lookup_loader:
+ # transform into loop property
+ self._preprocess_with_loop(ds, new_ds, k, v)
+ else:
+ # pre-2.0 syntax allowed variables for include statements at the top level of the task,
+ # so we move those into the 'vars' dictionary here, and show a deprecation message
+ # as we will remove this at some point in the future.
+ if action in C._ACTION_INCLUDE and k not in self._valid_attrs and k not in self.DEPRECATED_ATTRIBUTES:
+ display.deprecated("Specifying include variables at the top-level of the task is deprecated."
+ " Please see:\nhttps://docs.ansible.com/ansible/playbooks_roles.html#task-include-files-and-encouraging-reuse\n\n"
+ " for currently supported syntax regarding included files and variables",
+ version="2.12", collection_name='ansible.builtin')
+ new_ds['vars'][k] = v
+ elif C.INVALID_TASK_ATTRIBUTE_FAILED or k in self._valid_attrs:
+ new_ds[k] = v
+ else:
+ display.warning("Ignoring invalid attribute: %s" % k)
+
+ return super(Task, self).preprocess_data(new_ds)
+
+ def _load_loop_control(self, attr, ds):
+ if not isinstance(ds, dict):
+ raise AnsibleParserError(
+ "the `loop_control` value must be specified as a dictionary and cannot "
+ "be a variable itself (though it can contain variables)",
+ obj=ds,
+ )
+
+ return LoopControl.load(data=ds, variable_manager=self._variable_manager, loader=self._loader)
+
+ def _validate_attributes(self, ds):
+ try:
+ super(Task, self)._validate_attributes(ds)
+ except AnsibleParserError as e:
+ e.message += '\nThis error can be suppressed as a warning using the "invalid_task_attribute_failed" configuration'
+ raise e
+
+ def post_validate(self, templar):
+ '''
+ Override of base class post_validate, to also do final validation on
+ the block and task include (if any) to which this task belongs.
+ '''
+
+ if self._parent:
+ self._parent.post_validate(templar)
+
+ if AnsibleCollectionConfig.default_collection:
+ pass
+
+ super(Task, self).post_validate(templar)
+
+ def _post_validate_loop(self, attr, value, templar):
+ '''
+ Override post validation for the loop field, which is templated
+ specially in the TaskExecutor class when evaluating loops.
+ '''
+ return value
+
+ def _post_validate_environment(self, attr, value, templar):
+ '''
+ Override post validation of vars on the play, as we don't want to
+ template these too early.
+ '''
+ env = {}
+ if value is not None:
+
+ def _parse_env_kv(k, v):
+ try:
+ env[k] = templar.template(v, convert_bare=False)
+ except AnsibleUndefinedVariable as e:
+ error = to_native(e)
+ if self.action in C._ACTION_FACT_GATHERING and 'ansible_facts.env' in error or 'ansible_env' in error:
+ # ignore as fact gathering is required for 'env' facts
+ return
+ raise
+
+ if isinstance(value, list):
+ for env_item in value:
+ if isinstance(env_item, dict):
+ for k in env_item:
+ _parse_env_kv(k, env_item[k])
+ else:
+ isdict = templar.template(env_item, convert_bare=False)
+ if isinstance(isdict, dict):
+ env.update(isdict)
+ else:
+ display.warning("could not parse environment value, skipping: %s" % value)
+
+ elif isinstance(value, dict):
+ # should not really happen
+ env = dict()
+ for env_item in value:
+ _parse_env_kv(env_item, value[env_item])
+ else:
+ # at this point it should be a simple string, also should not happen
+ env = templar.template(value, convert_bare=False)
+
+ return env
+
+ def _post_validate_changed_when(self, attr, value, templar):
+ '''
+ changed_when is evaluated after the execution of the task is complete,
+ and should not be templated during the regular post_validate step.
+ '''
+ return value
+
+ def _post_validate_failed_when(self, attr, value, templar):
+ '''
+ failed_when is evaluated after the execution of the task is complete,
+ and should not be templated during the regular post_validate step.
+ '''
+ return value
+
+ def _post_validate_until(self, attr, value, templar):
+ '''
+ until is evaluated after the execution of the task is complete,
+ and should not be templated during the regular post_validate step.
+ '''
+ return value
+
+ def get_vars(self):
+ all_vars = dict()
+ if self._parent:
+ all_vars.update(self._parent.get_vars())
+
+ all_vars.update(self.vars)
+
+ if 'tags' in all_vars:
+ del all_vars['tags']
+ if 'when' in all_vars:
+ del all_vars['when']
+
+ return all_vars
+
+ def get_include_params(self):
+ all_vars = dict()
+ if self._parent:
+ all_vars.update(self._parent.get_include_params())
+ if self.action in C._ACTION_ALL_INCLUDES:
+ all_vars.update(self.vars)
+ return all_vars
+
+ def copy(self, exclude_parent=False, exclude_tasks=False):
+ new_me = super(Task, self).copy()
+
+ # if the task has an associated list of candidate names, copy it to the new object too
+ new_me._ansible_internal_redirect_list = self._ansible_internal_redirect_list[:]
+
+ new_me._parent = None
+ if self._parent and not exclude_parent:
+ new_me._parent = self._parent.copy(exclude_tasks=exclude_tasks)
+
+ new_me._role = None
+ if self._role:
+ new_me._role = self._role
+
+ return new_me
+
+ def serialize(self):
+ data = super(Task, self).serialize()
+
+ if not self._squashed and not self._finalized:
+ if self._parent:
+ data['parent'] = self._parent.serialize()
+ data['parent_type'] = self._parent.__class__.__name__
+
+ if self._role:
+ data['role'] = self._role.serialize()
+
+ if self._ansible_internal_redirect_list:
+ data['_ansible_internal_redirect_list'] = self._ansible_internal_redirect_list[:]
+
+ return data
+
+ def deserialize(self, data):
+
+ # import is here to avoid import loops
+ from ansible.playbook.task_include import TaskInclude
+ from ansible.playbook.handler_task_include import HandlerTaskInclude
+
+ parent_data = data.get('parent', None)
+ if parent_data:
+ parent_type = data.get('parent_type')
+ if parent_type == 'Block':
+ p = Block()
+ elif parent_type == 'TaskInclude':
+ p = TaskInclude()
+ elif parent_type == 'HandlerTaskInclude':
+ p = HandlerTaskInclude()
+ p.deserialize(parent_data)
+ self._parent = p
+ del data['parent']
+
+ role_data = data.get('role')
+ if role_data:
+ r = Role()
+ r.deserialize(role_data)
+ self._role = r
+ del data['role']
+
+ self._ansible_internal_redirect_list = data.get('_ansible_internal_redirect_list', [])
+
+ super(Task, self).deserialize(data)
+
+ def set_loader(self, loader):
+ '''
+ Sets the loader on this object and recursively on parent, child objects.
+ This is used primarily after the Task has been serialized/deserialized, which
+ does not preserve the loader.
+ '''
+
+ self._loader = loader
+
+ if self._parent:
+ self._parent.set_loader(loader)
+
+ def _get_parent_attribute(self, attr, extend=False, prepend=False):
+ '''
+ Generic logic to get the attribute or parent attribute for a task value.
+ '''
+
+ extend = self._valid_attrs[attr].extend
+ prepend = self._valid_attrs[attr].prepend
+ try:
+ value = self._attributes[attr]
+ # If parent is static, we can grab attrs from the parent
+ # otherwise, defer to the grandparent
+ if getattr(self._parent, 'statically_loaded', True):
+ _parent = self._parent
+ else:
+ _parent = self._parent._parent
+
+ if _parent and (value is Sentinel or extend):
+ if getattr(_parent, 'statically_loaded', True):
+ # vars are always inheritable, other attributes might not be for the parent but still should be for other ancestors
+ if attr != 'vars' and hasattr(_parent, '_get_parent_attribute'):
+ parent_value = _parent._get_parent_attribute(attr)
+ else:
+ parent_value = _parent._attributes.get(attr, Sentinel)
+
+ if extend:
+ value = self._extend_value(value, parent_value, prepend)
+ else:
+ value = parent_value
+ except KeyError:
+ pass
+
+ return value
+
+ def get_dep_chain(self):
+ if self._parent:
+ return self._parent.get_dep_chain()
+ else:
+ return None
+
+ def get_search_path(self):
+ '''
+ Return the list of paths you should search for files, in order.
+ This follows role/playbook dependency chain.
+ '''
+ path_stack = []
+
+ dep_chain = self.get_dep_chain()
+ # inside role: add the dependency chain from current to dependent
+ if dep_chain:
+ path_stack.extend(reversed([x._role_path for x in dep_chain]))
+
+ # add path of task itself, unless it is already in the list
+ task_dir = os.path.dirname(self.get_path())
+ if task_dir not in path_stack:
+ path_stack.append(task_dir)
+
+ return path_stack
+
+ def all_parents_static(self):
+ if self._parent:
+ return self._parent.all_parents_static()
+ return True
+
+ def get_first_parent_include(self):
+ from ansible.playbook.task_include import TaskInclude
+ if self._parent:
+ if isinstance(self._parent, TaskInclude):
+ return self._parent
+ return self._parent.get_first_parent_include()
+ return None
diff --git a/lib/ansible/playbook/task_include.py b/lib/ansible/playbook/task_include.py
new file mode 100644
index 00000000..e2dc92e9
--- /dev/null
+++ b/lib/ansible/playbook/task_include.py
@@ -0,0 +1,156 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ansible.constants as C
+from ansible.errors import AnsibleParserError
+from ansible.playbook.attribute import FieldAttribute
+from ansible.playbook.block import Block
+from ansible.playbook.task import Task
+from ansible.utils.display import Display
+from ansible.utils.sentinel import Sentinel
+
+__all__ = ['TaskInclude']
+
+display = Display()
+
+
+class TaskInclude(Task):
+
+ """
+ A task include is derived from a regular task to handle the special
+ circumstances related to the `- include: ...` task.
+ """
+
+ BASE = frozenset(('file', '_raw_params')) # directly assigned
+ OTHER_ARGS = frozenset(('apply',)) # assigned to matching property
+ VALID_ARGS = BASE.union(OTHER_ARGS) # all valid args
+ VALID_INCLUDE_KEYWORDS = frozenset(('action', 'args', 'collections', 'debugger', 'ignore_errors', 'loop', 'loop_control',
+ 'loop_with', 'name', 'no_log', 'register', 'run_once', 'tags', 'vars',
+ 'when'))
+
+ # =================================================================================
+ # ATTRIBUTES
+
+ _static = FieldAttribute(isa='bool', default=None)
+
+ def __init__(self, block=None, role=None, task_include=None):
+ super(TaskInclude, self).__init__(block=block, role=role, task_include=task_include)
+ self.statically_loaded = False
+
+ @staticmethod
+ def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
+ ti = TaskInclude(block=block, role=role, task_include=task_include)
+ task = ti.check_options(
+ ti.load_data(data, variable_manager=variable_manager, loader=loader),
+ data
+ )
+
+ return task
+
+ def check_options(self, task, data):
+ '''
+ Method for options validation to use in 'load_data' for TaskInclude and HandlerTaskInclude
+ since they share the same validations. It is not named 'validate_options' on purpose
+ to prevent confusion with '_validate_*" methods. Note that the task passed might be changed
+ as a side-effect of this method.
+ '''
+ my_arg_names = frozenset(task.args.keys())
+
+ # validate bad args, otherwise we silently ignore
+ bad_opts = my_arg_names.difference(self.VALID_ARGS)
+ if bad_opts and task.action in C._ACTION_ALL_PROPER_INCLUDE_IMPORT_TASKS:
+ raise AnsibleParserError('Invalid options for %s: %s' % (task.action, ','.join(list(bad_opts))), obj=data)
+
+ if not task.args.get('_raw_params'):
+ task.args['_raw_params'] = task.args.pop('file', None)
+ if not task.args['_raw_params']:
+ raise AnsibleParserError('No file specified for %s' % task.action)
+
+ apply_attrs = task.args.get('apply', {})
+ if apply_attrs and task.action not in C._ACTION_INCLUDE_TASKS:
+ raise AnsibleParserError('Invalid options for %s: apply' % task.action, obj=data)
+ elif not isinstance(apply_attrs, dict):
+ raise AnsibleParserError('Expected a dict for apply but got %s instead' % type(apply_attrs), obj=data)
+
+ return task
+
+ def preprocess_data(self, ds):
+ ds = super(TaskInclude, self).preprocess_data(ds)
+
+ diff = set(ds.keys()).difference(self.VALID_INCLUDE_KEYWORDS)
+ for k in diff:
+ # This check doesn't handle ``include`` as we have no idea at this point if it is static or not
+ if ds[k] is not Sentinel and ds['action'] in C._ACTION_ALL_INCLUDE_ROLE_TASKS:
+ if C.INVALID_TASK_ATTRIBUTE_FAILED:
+ raise AnsibleParserError("'%s' is not a valid attribute for a %s" % (k, self.__class__.__name__), obj=ds)
+ else:
+ display.warning("Ignoring invalid attribute: %s" % k)
+
+ return ds
+
+ def copy(self, exclude_parent=False, exclude_tasks=False):
+ new_me = super(TaskInclude, self).copy(exclude_parent=exclude_parent, exclude_tasks=exclude_tasks)
+ new_me.statically_loaded = self.statically_loaded
+ return new_me
+
+ def get_vars(self):
+ '''
+ We override the parent Task() classes get_vars here because
+ we need to include the args of the include into the vars as
+ they are params to the included tasks. But ONLY for 'include'
+ '''
+ if self.action not in C._ACTION_INCLUDE:
+ all_vars = super(TaskInclude, self).get_vars()
+ else:
+ all_vars = dict()
+ if self._parent:
+ all_vars.update(self._parent.get_vars())
+
+ all_vars.update(self.vars)
+ all_vars.update(self.args)
+
+ if 'tags' in all_vars:
+ del all_vars['tags']
+ if 'when' in all_vars:
+ del all_vars['when']
+
+ return all_vars
+
+ def build_parent_block(self):
+ '''
+ This method is used to create the parent block for the included tasks
+ when ``apply`` is specified
+ '''
+ apply_attrs = self.args.pop('apply', {})
+ if apply_attrs:
+ apply_attrs['block'] = []
+ p_block = Block.load(
+ apply_attrs,
+ play=self._parent._play,
+ task_include=self,
+ role=self._role,
+ variable_manager=self._variable_manager,
+ loader=self._loader,
+ )
+ else:
+ p_block = self
+
+ return p_block
diff --git a/lib/ansible/plugins/__init__.py b/lib/ansible/plugins/__init__.py
new file mode 100644
index 00000000..73857f45
--- /dev/null
+++ b/lib/ansible/plugins/__init__.py
@@ -0,0 +1,89 @@
+# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> and others
+# (c) 2017, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from abc import ABCMeta
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import with_metaclass, string_types
+from ansible.utils.display import Display
+
+display = Display()
+
+# Global so that all instances of a PluginLoader will share the caches
+MODULE_CACHE = {}
+PATH_CACHE = {}
+PLUGIN_PATH_CACHE = {}
+
+
+def get_plugin_class(obj):
+ if isinstance(obj, string_types):
+ return obj.lower().replace('module', '')
+ else:
+ return obj.__class__.__name__.lower().replace('module', '')
+
+
+class AnsiblePlugin(with_metaclass(ABCMeta, object)):
+
+ # allow extra passthrough parameters
+ allow_extras = False
+
+ def __init__(self):
+ self._options = {}
+
+ def get_option(self, option, hostvars=None):
+ if option not in self._options:
+ try:
+ option_value = C.config.get_config_value(option, plugin_type=get_plugin_class(self), plugin_name=self._load_name, variables=hostvars)
+ except AnsibleError as e:
+ raise KeyError(to_native(e))
+ self.set_option(option, option_value)
+ return self._options.get(option)
+
+ def set_option(self, option, value):
+ self._options[option] = value
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ '''
+ Sets the _options attribute with the configuration/keyword information for this plugin
+
+ :arg task_keys: Dict with playbook keywords that affect this option
+ :arg var_options: Dict with either 'connection variables'
+ :arg direct: Dict with 'direct assignment'
+ '''
+ self._options = C.config.get_plugin_options(get_plugin_class(self), self._load_name, keys=task_keys, variables=var_options, direct=direct)
+
+ # allow extras/wildcards from vars that are not directly consumed in configuration
+ # this is needed to support things like winrm that can have extended protocol options we don't directly handle
+ if self.allow_extras and var_options and '_extras' in var_options:
+ self.set_option('_extras', var_options['_extras'])
+
+ def has_option(self, option):
+ if not self._options:
+ self.set_options()
+ return option in self._options
+
+ def _check_required(self):
+ # FIXME: standardize required check based on config
+ pass
diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py
new file mode 100644
index 00000000..4e5e82ad
--- /dev/null
+++ b/lib/ansible/plugins/action/__init__.py
@@ -0,0 +1,1232 @@
+# coding: utf-8
+# Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import base64
+import json
+import os
+import random
+import re
+import stat
+import tempfile
+import time
+from abc import ABCMeta, abstractmethod
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleActionSkip, AnsibleActionFail, AnsiblePluginRemovedError
+from ansible.executor.module_common import modify_module
+from ansible.executor.interpreter_discovery import discover_interpreter, InterpreterDiscoveryRequiredError
+from ansible.module_utils.common._collections_compat import Sequence
+from ansible.module_utils.json_utils import _filter_non_json_lines
+from ansible.module_utils.six import binary_type, string_types, text_type, iteritems, with_metaclass
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.parsing.utils.jsonify import jsonify
+from ansible.release import __version__
+from ansible.utils.collection_loader import resource_from_fqcr
+from ansible.utils.display import Display
+from ansible.utils.unsafe_proxy import wrap_var, AnsibleUnsafeText
+from ansible.vars.clean import remove_internal_keys
+
+display = Display()
+
+
+class ActionBase(with_metaclass(ABCMeta, object)):
+
+ '''
+ This class is the base class for all action plugins, and defines
+ code common to all actions. The base class handles the connection
+ by putting/getting files and executing commands based on the current
+ action in use.
+ '''
+
+ # A set of valid arguments
+ _VALID_ARGS = frozenset([])
+
+ def __init__(self, task, connection, play_context, loader, templar, shared_loader_obj):
+ self._task = task
+ self._connection = connection
+ self._play_context = play_context
+ self._loader = loader
+ self._templar = templar
+ self._shared_loader_obj = shared_loader_obj
+ self._cleanup_remote_tmp = False
+
+ self._supports_check_mode = True
+ self._supports_async = False
+
+ # interpreter discovery state
+ self._discovered_interpreter_key = None
+ self._discovered_interpreter = False
+ self._discovery_deprecation_warnings = []
+ self._discovery_warnings = []
+
+ # Backwards compat: self._display isn't really needed, just import the global display and use that.
+ self._display = display
+
+ self._used_interpreter = None
+
+ @abstractmethod
+ def run(self, tmp=None, task_vars=None):
+ """ Action Plugins should implement this method to perform their
+ tasks. Everything else in this base class is a helper method for the
+ action plugin to do that.
+
+ :kwarg tmp: Deprecated parameter. This is no longer used. An action plugin that calls
+ another one and wants to use the same remote tmp for both should set
+ self._connection._shell.tmpdir rather than this parameter.
+ :kwarg task_vars: The variables (host vars, group vars, config vars,
+ etc) associated with this task.
+ :returns: dictionary of results from the module
+
+ Implementors of action modules may find the following variables especially useful:
+
+ * Module parameters. These are stored in self._task.args
+ """
+
+ result = {}
+
+ if tmp is not None:
+ result['warning'] = ['ActionModule.run() no longer honors the tmp parameter. Action'
+ ' plugins should set self._connection._shell.tmpdir to share'
+ ' the tmpdir']
+ del tmp
+
+ if self._task.async_val and not self._supports_async:
+ raise AnsibleActionFail('async is not supported for this task.')
+ elif self._play_context.check_mode and not self._supports_check_mode:
+ raise AnsibleActionSkip('check mode is not supported for this task.')
+ elif self._task.async_val and self._play_context.check_mode:
+ raise AnsibleActionFail('check mode and async cannot be used on same task.')
+
+ # Error if invalid argument is passed
+ if self._VALID_ARGS:
+ task_opts = frozenset(self._task.args.keys())
+ bad_opts = task_opts.difference(self._VALID_ARGS)
+ if bad_opts:
+ raise AnsibleActionFail('Invalid options for %s: %s' % (self._task.action, ','.join(list(bad_opts))))
+
+ if self._connection._shell.tmpdir is None and self._early_needs_tmp_path():
+ self._make_tmp_path()
+
+ return result
+
+ def cleanup(self, force=False):
+ """Method to perform a clean up at the end of an action plugin execution
+
+ By default this is designed to clean up the shell tmpdir, and is toggled based on whether
+ async is in use
+
+ Action plugins may override this if they deem necessary, but should still call this method
+ via super
+ """
+ if force or not self._task.async_val:
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ def get_plugin_option(self, plugin, option, default=None):
+ """Helper to get an option from a plugin without having to use
+ the try/except dance everywhere to set a default
+ """
+ try:
+ return plugin.get_option(option)
+ except (AttributeError, KeyError):
+ return default
+
+ def get_become_option(self, option, default=None):
+ return self.get_plugin_option(self._connection.become, option, default=default)
+
+ def get_connection_option(self, option, default=None):
+ return self.get_plugin_option(self._connection, option, default=default)
+
+ def get_shell_option(self, option, default=None):
+ return self.get_plugin_option(self._connection._shell, option, default=default)
+
+ def _remote_file_exists(self, path):
+ cmd = self._connection._shell.exists(path)
+ result = self._low_level_execute_command(cmd=cmd, sudoable=True)
+ if result['rc'] == 0:
+ return True
+ return False
+
+ def _configure_module(self, module_name, module_args, task_vars):
+ '''
+ Handles the loading and templating of the module code through the
+ modify_module() function.
+ '''
+ if self._task.delegate_to:
+ use_vars = task_vars.get('ansible_delegated_vars')[self._task.delegate_to]
+ else:
+ use_vars = task_vars
+
+ split_module_name = module_name.split('.')
+ collection_name = '.'.join(split_module_name[0:2]) if len(split_module_name) > 2 else ''
+ leaf_module_name = resource_from_fqcr(module_name)
+
+ # Search module path(s) for named module.
+ for mod_type in self._connection.module_implementation_preferences:
+ # Check to determine if PowerShell modules are supported, and apply
+ # some fixes (hacks) to module name + args.
+ if mod_type == '.ps1':
+ # FIXME: This should be temporary and moved to an exec subsystem plugin where we can define the mapping
+ # for each subsystem.
+ win_collection = 'ansible.windows'
+ rewrite_collection_names = ['ansible.builtin', 'ansible.legacy', '']
+ # async_status, win_stat, win_file, win_copy, and win_ping are not just like their
+ # python counterparts but they are compatible enough for our
+ # internal usage
+ # NB: we only rewrite the module if it's not being called by the user (eg, an action calling something else)
+ # and if it's unqualified or FQ to a builtin
+ if leaf_module_name in ('stat', 'file', 'copy', 'ping') and \
+ collection_name in rewrite_collection_names and self._task.action != module_name:
+ module_name = '%s.win_%s' % (win_collection, leaf_module_name)
+ elif leaf_module_name == 'async_status' and collection_name in rewrite_collection_names:
+ module_name = '%s.%s' % (win_collection, leaf_module_name)
+
+ # TODO: move this tweak down to the modules, not extensible here
+ # Remove extra quotes surrounding path parameters before sending to module.
+ if leaf_module_name in ['win_stat', 'win_file', 'win_copy', 'slurp'] and module_args and \
+ hasattr(self._connection._shell, '_unquote'):
+ for key in ('src', 'dest', 'path'):
+ if key in module_args:
+ module_args[key] = self._connection._shell._unquote(module_args[key])
+
+ result = self._shared_loader_obj.module_loader.find_plugin_with_context(module_name, mod_type, collection_list=self._task.collections)
+
+ if not result.resolved:
+ if result.redirect_list and len(result.redirect_list) > 1:
+ # take the last one in the redirect list, we may have successfully jumped through N other redirects
+ target_module_name = result.redirect_list[-1]
+
+ raise AnsibleError("The module {0} was redirected to {1}, which could not be loaded.".format(module_name, target_module_name))
+
+ module_path = result.plugin_resolved_path
+ if module_path:
+ break
+ else: # This is a for-else: http://bit.ly/1ElPkyg
+ raise AnsibleError("The module %s was not found in configured module paths" % (module_name))
+
+ # insert shared code and arguments into the module
+ final_environment = dict()
+ self._compute_environment_string(final_environment)
+
+ become_kwargs = {}
+ if self._connection.become:
+ become_kwargs['become'] = True
+ become_kwargs['become_method'] = self._connection.become.name
+ become_kwargs['become_user'] = self._connection.become.get_option('become_user',
+ playcontext=self._play_context)
+ become_kwargs['become_password'] = self._connection.become.get_option('become_pass',
+ playcontext=self._play_context)
+ become_kwargs['become_flags'] = self._connection.become.get_option('become_flags',
+ playcontext=self._play_context)
+
+ # modify_module will exit early if interpreter discovery is required; re-run after if necessary
+ for dummy in (1, 2):
+ try:
+ (module_data, module_style, module_shebang) = modify_module(module_name, module_path, module_args, self._templar,
+ task_vars=use_vars,
+ module_compression=self._play_context.module_compression,
+ async_timeout=self._task.async_val,
+ environment=final_environment,
+ **become_kwargs)
+ break
+ except InterpreterDiscoveryRequiredError as idre:
+ self._discovered_interpreter = AnsibleUnsafeText(discover_interpreter(
+ action=self,
+ interpreter_name=idre.interpreter_name,
+ discovery_mode=idre.discovery_mode,
+ task_vars=use_vars))
+
+ # update the local task_vars with the discovered interpreter (which might be None);
+ # we'll propagate back to the controller in the task result
+ discovered_key = 'discovered_interpreter_%s' % idre.interpreter_name
+
+ # update the local vars copy for the retry
+ use_vars['ansible_facts'][discovered_key] = self._discovered_interpreter
+
+ # TODO: this condition prevents 'wrong host' from being updated
+ # but in future we would want to be able to update 'delegated host facts'
+ # irrespective of task settings
+ if not self._task.delegate_to or self._task.delegate_facts:
+ # store in local task_vars facts collection for the retry and any other usages in this worker
+ task_vars['ansible_facts'][discovered_key] = self._discovered_interpreter
+ # preserve this so _execute_module can propagate back to controller as a fact
+ self._discovered_interpreter_key = discovered_key
+ else:
+ task_vars['ansible_delegated_vars'][self._task.delegate_to]['ansible_facts'][discovered_key] = self._discovered_interpreter
+
+ return (module_style, module_shebang, module_data, module_path)
+
+ def _compute_environment_string(self, raw_environment_out=None):
+ '''
+ Builds the environment string to be used when executing the remote task.
+ '''
+
+ final_environment = dict()
+ if self._task.environment is not None:
+ environments = self._task.environment
+ if not isinstance(environments, list):
+ environments = [environments]
+
+ # The order of environments matters to make sure we merge
+ # in the parent's values first so those in the block then
+ # task 'win' in precedence
+ for environment in environments:
+ if environment is None or len(environment) == 0:
+ continue
+ temp_environment = self._templar.template(environment)
+ if not isinstance(temp_environment, dict):
+ raise AnsibleError("environment must be a dictionary, received %s (%s)" % (temp_environment, type(temp_environment)))
+ # very deliberately using update here instead of combine_vars, as
+ # these environment settings should not need to merge sub-dicts
+ final_environment.update(temp_environment)
+
+ if len(final_environment) > 0:
+ final_environment = self._templar.template(final_environment)
+
+ if isinstance(raw_environment_out, dict):
+ raw_environment_out.clear()
+ raw_environment_out.update(final_environment)
+
+ return self._connection._shell.env_prefix(**final_environment)
+
+ def _early_needs_tmp_path(self):
+ '''
+ Determines if a tmp path should be created before the action is executed.
+ '''
+
+ return getattr(self, 'TRANSFERS_FILES', False)
+
+ def _is_pipelining_enabled(self, module_style, wrap_async=False):
+ '''
+ Determines if we are required and can do pipelining
+ '''
+
+ try:
+ is_enabled = self._connection.get_option('pipelining')
+ except (KeyError, AttributeError, ValueError):
+ is_enabled = self._play_context.pipelining
+
+ # winrm supports async pipeline
+ # TODO: make other class property 'has_async_pipelining' to separate cases
+ always_pipeline = self._connection.always_pipeline_modules
+
+ # su does not work with pipelining
+ # TODO: add has_pipelining class prop to become plugins
+ become_exception = (self._connection.become.name if self._connection.become else '') != 'su'
+
+ # any of these require a true
+ conditions = [
+ self._connection.has_pipelining, # connection class supports it
+ is_enabled or always_pipeline, # enabled via config or forced via connection (eg winrm)
+ module_style == "new", # old style modules do not support pipelining
+ not C.DEFAULT_KEEP_REMOTE_FILES, # user wants remote files
+ not wrap_async or always_pipeline, # async does not normally support pipelining unless it does (eg winrm)
+ become_exception,
+ ]
+
+ return all(conditions)
+
+ def _get_admin_users(self):
+ '''
+ Returns a list of admin users that are configured for the current shell
+ plugin
+ '''
+
+ return self.get_shell_option('admin_users', ['root'])
+
+ def _get_remote_user(self):
+ ''' consistently get the 'remote_user' for the action plugin '''
+ # TODO: use 'current user running ansible' as fallback when moving away from play_context
+ # pwd.getpwuid(os.getuid()).pw_name
+ remote_user = None
+ try:
+ remote_user = self._connection.get_option('remote_user')
+ except KeyError:
+ # plugin does not have remote_user option, fallback to default and/play_context
+ remote_user = getattr(self._connection, 'default_user', None) or self._play_context.remote_user
+ except AttributeError:
+ # plugin does not use config system, fallback to old play_context
+ remote_user = self._play_context.remote_user
+ return remote_user
+
+ def _is_become_unprivileged(self):
+ '''
+ The user is not the same as the connection user and is not part of the
+ shell configured admin users
+ '''
+ # if we don't use become then we know we aren't switching to a
+ # different unprivileged user
+ if not self._connection.become:
+ return False
+
+ # if we use become and the user is not an admin (or same user) then
+ # we need to return become_unprivileged as True
+ admin_users = self._get_admin_users()
+ remote_user = self._get_remote_user()
+ become_user = self.get_become_option('become_user')
+ return bool(become_user and become_user not in admin_users + [remote_user])
+
+ def _make_tmp_path(self, remote_user=None):
+ '''
+ Create and return a temporary path on a remote box.
+ '''
+
+ # Network connection plugins (network_cli, netconf, etc.) execute on the controller, rather than the remote host.
+ # As such, we want to avoid using remote_user for paths as remote_user may not line up with the local user
+ # This is a hack and should be solved by more intelligent handling of remote_tmp in 2.7
+ if getattr(self._connection, '_remote_is_local', False):
+ tmpdir = C.DEFAULT_LOCAL_TMP
+ else:
+ # NOTE: shell plugins should populate this setting anyways, but they dont do remote expansion, which
+ # we need for 'non posix' systems like cloud-init and solaris
+ tmpdir = self._remote_expand_user(self.get_shell_option('remote_tmp', default='~/.ansible/tmp'), sudoable=False)
+
+ become_unprivileged = self._is_become_unprivileged()
+ basefile = self._connection._shell._generate_temp_dir_name()
+ cmd = self._connection._shell.mkdtemp(basefile=basefile, system=become_unprivileged, tmpdir=tmpdir)
+ result = self._low_level_execute_command(cmd, sudoable=False)
+
+ # error handling on this seems a little aggressive?
+ if result['rc'] != 0:
+ if result['rc'] == 5:
+ output = 'Authentication failure.'
+ elif result['rc'] == 255 and self._connection.transport in ('ssh',):
+
+ if self._play_context.verbosity > 3:
+ output = u'SSH encountered an unknown error. The output was:\n%s%s' % (result['stdout'], result['stderr'])
+ else:
+ output = (u'SSH encountered an unknown error during the connection. '
+ 'We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue')
+
+ elif u'No space left on device' in result['stderr']:
+ output = result['stderr']
+ else:
+ output = ('Failed to create temporary directory.'
+ 'In some cases, you may have been able to authenticate and did not have permissions on the target directory. '
+ 'Consider changing the remote tmp path in ansible.cfg to a path rooted in "/tmp", for more error information use -vvv. '
+ 'Failed command was: %s, exited with result %d' % (cmd, result['rc']))
+ if 'stdout' in result and result['stdout'] != u'':
+ output = output + u", stdout output: %s" % result['stdout']
+ if self._play_context.verbosity > 3 and 'stderr' in result and result['stderr'] != u'':
+ output += u", stderr output: %s" % result['stderr']
+ raise AnsibleConnectionFailure(output)
+ else:
+ self._cleanup_remote_tmp = True
+
+ try:
+ stdout_parts = result['stdout'].strip().split('%s=' % basefile, 1)
+ rc = self._connection._shell.join_path(stdout_parts[-1], u'').splitlines()[-1]
+ except IndexError:
+ # stdout was empty or just space, set to / to trigger error in next if
+ rc = '/'
+
+ # Catch failure conditions, files should never be
+ # written to locations in /.
+ if rc == '/':
+ raise AnsibleError('failed to resolve remote temporary directory from %s: `%s` returned empty string' % (basefile, cmd))
+
+ self._connection._shell.tmpdir = rc
+
+ return rc
+
+ def _should_remove_tmp_path(self, tmp_path):
+ '''Determine if temporary path should be deleted or kept by user request/config'''
+ return tmp_path and self._cleanup_remote_tmp and not C.DEFAULT_KEEP_REMOTE_FILES and "-tmp-" in tmp_path
+
+ def _remove_tmp_path(self, tmp_path):
+ '''Remove a temporary path we created. '''
+
+ if tmp_path is None and self._connection._shell.tmpdir:
+ tmp_path = self._connection._shell.tmpdir
+
+ if self._should_remove_tmp_path(tmp_path):
+ cmd = self._connection._shell.remove(tmp_path, recurse=True)
+ # If we have gotten here we have a working ssh configuration.
+ # If ssh breaks we could leave tmp directories out on the remote system.
+ tmp_rm_res = self._low_level_execute_command(cmd, sudoable=False)
+
+ if tmp_rm_res.get('rc', 0) != 0:
+ display.warning('Error deleting remote temporary files (rc: %s, stderr: %s})'
+ % (tmp_rm_res.get('rc'), tmp_rm_res.get('stderr', 'No error string available.')))
+ else:
+ self._connection._shell.tmpdir = None
+
+ def _transfer_file(self, local_path, remote_path):
+ """
+ Copy a file from the controller to a remote path
+
+ :arg local_path: Path on controller to transfer
+ :arg remote_path: Path on the remote system to transfer into
+
+ .. warning::
+ * When you use this function you likely want to use use fixup_perms2() on the
+ remote_path to make sure that the remote file is readable when the user becomes
+ a non-privileged user.
+ * If you use fixup_perms2() on the file and copy or move the file into place, you will
+ need to then remove filesystem acls on the file once it has been copied into place by
+ the module. See how the copy module implements this for help.
+ """
+ self._connection.put_file(local_path, remote_path)
+ return remote_path
+
+ def _transfer_data(self, remote_path, data):
+ '''
+ Copies the module data out to the temporary module path.
+ '''
+
+ if isinstance(data, dict):
+ data = jsonify(data)
+
+ afd, afile = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP)
+ afo = os.fdopen(afd, 'wb')
+ try:
+ data = to_bytes(data, errors='surrogate_or_strict')
+ afo.write(data)
+ except Exception as e:
+ raise AnsibleError("failure writing module data to temporary file for transfer: %s" % to_native(e))
+
+ afo.flush()
+ afo.close()
+
+ try:
+ self._transfer_file(afile, remote_path)
+ finally:
+ os.unlink(afile)
+
+ return remote_path
+
+ def _fixup_perms2(self, remote_paths, remote_user=None, execute=True):
+ """
+ We need the files we upload to be readable (and sometimes executable)
+ by the user being sudo'd to but we want to limit other people's access
+ (because the files could contain passwords or other private
+ information. We achieve this in one of these ways:
+
+ * If no sudo is performed or the remote_user is sudo'ing to
+ themselves, we don't have to change permissions.
+ * If the remote_user sudo's to a privileged user (for instance, root),
+ we don't have to change permissions
+ * If the remote_user sudo's to an unprivileged user then we attempt to
+ grant the unprivileged user access via file system acls.
+ * If granting file system acls fails we try to change the owner of the
+ file with chown which only works in case the remote_user is
+ privileged or the remote systems allows chown calls by unprivileged
+ users (e.g. HP-UX)
+ * If the chown fails we can set the file to be world readable so that
+ the second unprivileged user can read the file.
+ Since this could allow other users to get access to private
+ information we only do this if ansible is configured with
+ "allow_world_readable_tmpfiles" in the ansible.cfg
+ """
+ if remote_user is None:
+ remote_user = self._get_remote_user()
+
+ if getattr(self._connection._shell, "_IS_WINDOWS", False):
+ # This won't work on Powershell as-is, so we'll just completely skip until
+ # we have a need for it, at which point we'll have to do something different.
+ return remote_paths
+
+ if self._is_become_unprivileged():
+ # Unprivileged user that's different than the ssh user. Let's get
+ # to work!
+
+ # Try to use file system acls to make the files readable for sudo'd
+ # user
+ if execute:
+ chmod_mode = 'rx'
+ setfacl_mode = 'r-x'
+ else:
+ chmod_mode = 'rX'
+ # NOTE: this form fails silently on freebsd. We currently
+ # never call _fixup_perms2() with execute=False but if we
+ # start to we'll have to fix this.
+ setfacl_mode = 'r-X'
+
+ res = self._remote_set_user_facl(remote_paths, self.get_become_option('become_user'), setfacl_mode)
+ if res['rc'] != 0:
+ # File system acls failed; let's try to use chown next
+ # Set executable bit first as on some systems an
+ # unprivileged user can use chown
+ if execute:
+ res = self._remote_chmod(remote_paths, 'u+x')
+ if res['rc'] != 0:
+ raise AnsibleError('Failed to set file mode on remote temporary files (rc: {0}, err: {1})'.format(res['rc'], to_native(res['stderr'])))
+
+ res = self._remote_chown(remote_paths, self.get_become_option('become_user'))
+ if res['rc'] != 0 and remote_user in self._get_admin_users():
+ # chown failed even if remote_user is administrator/root
+ raise AnsibleError('Failed to change ownership of the temporary files Ansible needs to create despite connecting as a privileged user. '
+ 'Unprivileged become user would be unable to read the file.')
+ elif res['rc'] != 0:
+ if self.get_shell_option('world_readable_temp', C.ALLOW_WORLD_READABLE_TMPFILES):
+ # chown and fs acls failed -- do things this insecure
+ # way only if the user opted in in the config file
+ display.warning('Using world-readable permissions for temporary files Ansible needs to create when becoming an unprivileged user. '
+ 'This may be insecure. For information on securing this, see '
+ 'https://docs.ansible.com/ansible/user_guide/become.html#risks-of-becoming-an-unprivileged-user')
+ res = self._remote_chmod(remote_paths, 'a+%s' % chmod_mode)
+ if res['rc'] != 0:
+ raise AnsibleError('Failed to set file mode on remote files (rc: {0}, err: {1})'.format(res['rc'], to_native(res['stderr'])))
+ else:
+ raise AnsibleError('Failed to set permissions on the temporary files Ansible needs to create when becoming an unprivileged user '
+ '(rc: %s, err: %s}). For information on working around this, see '
+ 'https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user'
+ % (res['rc'], to_native(res['stderr'])))
+ elif execute:
+ # Can't depend on the file being transferred with execute permissions.
+ # Only need user perms because no become was used here
+ res = self._remote_chmod(remote_paths, 'u+x')
+ if res['rc'] != 0:
+ raise AnsibleError('Failed to set execute bit on remote files (rc: {0}, err: {1})'.format(res['rc'], to_native(res['stderr'])))
+
+ return remote_paths
+
+ def _remote_chmod(self, paths, mode, sudoable=False):
+ '''
+ Issue a remote chmod command
+ '''
+ cmd = self._connection._shell.chmod(paths, mode)
+ res = self._low_level_execute_command(cmd, sudoable=sudoable)
+ return res
+
+ def _remote_chown(self, paths, user, sudoable=False):
+ '''
+ Issue a remote chown command
+ '''
+ cmd = self._connection._shell.chown(paths, user)
+ res = self._low_level_execute_command(cmd, sudoable=sudoable)
+ return res
+
+ def _remote_set_user_facl(self, paths, user, mode, sudoable=False):
+ '''
+ Issue a remote call to setfacl
+ '''
+ cmd = self._connection._shell.set_user_facl(paths, user, mode)
+ res = self._low_level_execute_command(cmd, sudoable=sudoable)
+ return res
+
+ def _execute_remote_stat(self, path, all_vars, follow, tmp=None, checksum=True):
+ '''
+ Get information from remote file.
+ '''
+ if tmp is not None:
+ display.warning('_execute_remote_stat no longer honors the tmp parameter. Action'
+ ' plugins should set self._connection._shell.tmpdir to share'
+ ' the tmpdir')
+ del tmp # No longer used
+
+ module_args = dict(
+ path=path,
+ follow=follow,
+ get_checksum=checksum,
+ checksum_algorithm='sha1',
+ )
+ mystat = self._execute_module(module_name='ansible.legacy.stat', module_args=module_args, task_vars=all_vars,
+ wrap_async=False)
+
+ if mystat.get('failed'):
+ msg = mystat.get('module_stderr')
+ if not msg:
+ msg = mystat.get('module_stdout')
+ if not msg:
+ msg = mystat.get('msg')
+ raise AnsibleError('Failed to get information on remote file (%s): %s' % (path, msg))
+
+ if not mystat['stat']['exists']:
+ # empty might be matched, 1 should never match, also backwards compatible
+ mystat['stat']['checksum'] = '1'
+
+ # happens sometimes when it is a dir and not on bsd
+ if 'checksum' not in mystat['stat']:
+ mystat['stat']['checksum'] = ''
+ elif not isinstance(mystat['stat']['checksum'], string_types):
+ raise AnsibleError("Invalid checksum returned by stat: expected a string type but got %s" % type(mystat['stat']['checksum']))
+
+ return mystat['stat']
+
+ def _remote_checksum(self, path, all_vars, follow=False):
+ '''
+ Produces a remote checksum given a path,
+ Returns a number 0-4 for specific errors instead of checksum, also ensures it is different
+ 0 = unknown error
+ 1 = file does not exist, this might not be an error
+ 2 = permissions issue
+ 3 = its a directory, not a file
+ 4 = stat module failed, likely due to not finding python
+ 5 = appropriate json module not found
+ '''
+ x = "0" # unknown error has occurred
+ try:
+ remote_stat = self._execute_remote_stat(path, all_vars, follow=follow)
+ if remote_stat['exists'] and remote_stat['isdir']:
+ x = "3" # its a directory not a file
+ else:
+ x = remote_stat['checksum'] # if 1, file is missing
+ except AnsibleError as e:
+ errormsg = to_text(e)
+ if errormsg.endswith(u'Permission denied'):
+ x = "2" # cannot read file
+ elif errormsg.endswith(u'MODULE FAILURE'):
+ x = "4" # python not found or module uncaught exception
+ elif 'json' in errormsg:
+ x = "5" # json module needed
+ finally:
+ return x # pylint: disable=lost-exception
+
+ def _remote_expand_user(self, path, sudoable=True, pathsep=None):
+ ''' takes a remote path and performs tilde/$HOME expansion on the remote host '''
+
+ # We only expand ~/path and ~username/path
+ if not path.startswith('~'):
+ return path
+
+ # Per Jborean, we don't have to worry about Windows as we don't have a notion of user's home
+ # dir there.
+ split_path = path.split(os.path.sep, 1)
+ expand_path = split_path[0]
+
+ if expand_path == '~':
+ # Network connection plugins (network_cli, netconf, etc.) execute on the controller, rather than the remote host.
+ # As such, we want to avoid using remote_user for paths as remote_user may not line up with the local user
+ # This is a hack and should be solved by more intelligent handling of remote_tmp in 2.7
+ become_user = self.get_become_option('become_user')
+ if getattr(self._connection, '_remote_is_local', False):
+ pass
+ elif sudoable and self._connection.become and become_user:
+ expand_path = '~%s' % become_user
+ else:
+ # use remote user instead, if none set default to current user
+ expand_path = '~%s' % (self._get_remote_user() or '')
+
+ # use shell to construct appropriate command and execute
+ cmd = self._connection._shell.expand_user(expand_path)
+ data = self._low_level_execute_command(cmd, sudoable=False)
+
+ try:
+ initial_fragment = data['stdout'].strip().splitlines()[-1]
+ except IndexError:
+ initial_fragment = None
+
+ if not initial_fragment:
+ # Something went wrong trying to expand the path remotely. Try using pwd, if not, return
+ # the original string
+ cmd = self._connection._shell.pwd()
+ pwd = self._low_level_execute_command(cmd, sudoable=False).get('stdout', '').strip()
+ if pwd:
+ expanded = pwd
+ else:
+ expanded = path
+
+ elif len(split_path) > 1:
+ expanded = self._connection._shell.join_path(initial_fragment, *split_path[1:])
+ else:
+ expanded = initial_fragment
+
+ if '..' in os.path.dirname(expanded).split('/'):
+ raise AnsibleError("'%s' returned an invalid relative home directory path containing '..'" % self._play_context.remote_addr)
+
+ return expanded
+
+ def _strip_success_message(self, data):
+ '''
+ Removes the BECOME-SUCCESS message from the data.
+ '''
+ if data.strip().startswith('BECOME-SUCCESS-'):
+ data = re.sub(r'^((\r)?\n)?BECOME-SUCCESS.*(\r)?\n', '', data)
+ return data
+
+ def _update_module_args(self, module_name, module_args, task_vars):
+
+ # set check mode in the module arguments, if required
+ if self._play_context.check_mode:
+ if not self._supports_check_mode:
+ raise AnsibleError("check mode is not supported for this operation")
+ module_args['_ansible_check_mode'] = True
+ else:
+ module_args['_ansible_check_mode'] = False
+
+ # set no log in the module arguments, if required
+ no_target_syslog = C.config.get_config_value('DEFAULT_NO_TARGET_SYSLOG', variables=task_vars)
+ module_args['_ansible_no_log'] = self._play_context.no_log or no_target_syslog
+
+ # set debug in the module arguments, if required
+ module_args['_ansible_debug'] = C.DEFAULT_DEBUG
+
+ # let module know we are in diff mode
+ module_args['_ansible_diff'] = self._play_context.diff
+
+ # let module know our verbosity
+ module_args['_ansible_verbosity'] = display.verbosity
+
+ # give the module information about the ansible version
+ module_args['_ansible_version'] = __version__
+
+ # give the module information about its name
+ module_args['_ansible_module_name'] = module_name
+
+ # set the syslog facility to be used in the module
+ module_args['_ansible_syslog_facility'] = task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY)
+
+ # let module know about filesystems that selinux treats specially
+ module_args['_ansible_selinux_special_fs'] = C.DEFAULT_SELINUX_SPECIAL_FS
+
+ # what to do when parameter values are converted to strings
+ module_args['_ansible_string_conversion_action'] = C.STRING_CONVERSION_ACTION
+
+ # give the module the socket for persistent connections
+ module_args['_ansible_socket'] = getattr(self._connection, 'socket_path')
+ if not module_args['_ansible_socket']:
+ module_args['_ansible_socket'] = task_vars.get('ansible_socket')
+
+ # make sure all commands use the designated shell executable
+ module_args['_ansible_shell_executable'] = self._play_context.executable
+
+ # make sure modules are aware if they need to keep the remote files
+ module_args['_ansible_keep_remote_files'] = C.DEFAULT_KEEP_REMOTE_FILES
+
+ # make sure all commands use the designated temporary directory if created
+ if self._is_become_unprivileged(): # force fallback on remote_tmp as user cannot normally write to dir
+ module_args['_ansible_tmpdir'] = None
+ else:
+ module_args['_ansible_tmpdir'] = self._connection._shell.tmpdir
+
+ # make sure the remote_tmp value is sent through in case modules needs to create their own
+ module_args['_ansible_remote_tmp'] = self.get_shell_option('remote_tmp', default='~/.ansible/tmp')
+
+ def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=None, persist_files=False, delete_remote_tmp=None, wrap_async=False):
+ '''
+ Transfer and run a module along with its arguments.
+ '''
+ if tmp is not None:
+ display.warning('_execute_module no longer honors the tmp parameter. Action plugins'
+ ' should set self._connection._shell.tmpdir to share the tmpdir')
+ del tmp # No longer used
+ if delete_remote_tmp is not None:
+ display.warning('_execute_module no longer honors the delete_remote_tmp parameter.'
+ ' Action plugins should check self._connection._shell.tmpdir to'
+ ' see if a tmpdir existed before they were called to determine'
+ ' if they are responsible for removing it.')
+ del delete_remote_tmp # No longer used
+
+ tmpdir = self._connection._shell.tmpdir
+
+ # We set the module_style to new here so the remote_tmp is created
+ # before the module args are built if remote_tmp is needed (async).
+ # If the module_style turns out to not be new and we didn't create the
+ # remote tmp here, it will still be created. This must be done before
+ # calling self._update_module_args() so the module wrapper has the
+ # correct remote_tmp value set
+ if not self._is_pipelining_enabled("new", wrap_async) and tmpdir is None:
+ self._make_tmp_path()
+ tmpdir = self._connection._shell.tmpdir
+
+ if task_vars is None:
+ task_vars = dict()
+
+ # if a module name was not specified for this execution, use the action from the task
+ if module_name is None:
+ module_name = self._task.action
+ if module_args is None:
+ module_args = self._task.args
+
+ self._update_module_args(module_name, module_args, task_vars)
+
+ # FIXME: convert async_wrapper.py to not rely on environment variables
+ # make sure we get the right async_dir variable, backwards compatibility
+ # means we need to lookup the env value ANSIBLE_ASYNC_DIR first
+ remove_async_dir = None
+ if wrap_async or self._task.async_val:
+ env_async_dir = [e for e in self._task.environment if
+ "ANSIBLE_ASYNC_DIR" in e]
+ if len(env_async_dir) > 0:
+ msg = "Setting the async dir from the environment keyword " \
+ "ANSIBLE_ASYNC_DIR is deprecated. Set the async_dir " \
+ "shell option instead"
+ self._display.deprecated(msg, "2.12", collection_name='ansible.builtin')
+ else:
+ # ANSIBLE_ASYNC_DIR is not set on the task, we get the value
+ # from the shell option and temporarily add to the environment
+ # list for async_wrapper to pick up
+ async_dir = self.get_shell_option('async_dir', default="~/.ansible_async")
+ remove_async_dir = len(self._task.environment)
+ self._task.environment.append({"ANSIBLE_ASYNC_DIR": async_dir})
+
+ # FUTURE: refactor this along with module build process to better encapsulate "smart wrapper" functionality
+ (module_style, shebang, module_data, module_path) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
+ display.vvv("Using module file %s" % module_path)
+ if not shebang and module_style != 'binary':
+ raise AnsibleError("module (%s) is missing interpreter line" % module_name)
+
+ self._used_interpreter = shebang
+ remote_module_path = None
+
+ if not self._is_pipelining_enabled(module_style, wrap_async):
+ # we might need remote tmp dir
+ if tmpdir is None:
+ self._make_tmp_path()
+ tmpdir = self._connection._shell.tmpdir
+
+ remote_module_filename = self._connection._shell.get_remote_filename(module_path)
+ remote_module_path = self._connection._shell.join_path(tmpdir, 'AnsiballZ_%s' % remote_module_filename)
+
+ args_file_path = None
+ if module_style in ('old', 'non_native_want_json', 'binary'):
+ # we'll also need a tmp file to hold our module arguments
+ args_file_path = self._connection._shell.join_path(tmpdir, 'args')
+
+ if remote_module_path or module_style != 'new':
+ display.debug("transferring module to remote %s" % remote_module_path)
+ if module_style == 'binary':
+ self._transfer_file(module_path, remote_module_path)
+ else:
+ self._transfer_data(remote_module_path, module_data)
+ if module_style == 'old':
+ # we need to dump the module args to a k=v string in a file on
+ # the remote system, which can be read and parsed by the module
+ args_data = ""
+ for k, v in iteritems(module_args):
+ args_data += '%s=%s ' % (k, shlex_quote(text_type(v)))
+ self._transfer_data(args_file_path, args_data)
+ elif module_style in ('non_native_want_json', 'binary'):
+ self._transfer_data(args_file_path, json.dumps(module_args))
+ display.debug("done transferring module to remote")
+
+ environment_string = self._compute_environment_string()
+
+ # remove the ANSIBLE_ASYNC_DIR env entry if we added a temporary one for
+ # the async_wrapper task - this is so the async_status plugin doesn't
+ # fire a deprecation warning when it runs after this task
+ if remove_async_dir is not None:
+ del self._task.environment[remove_async_dir]
+
+ remote_files = []
+ if tmpdir and remote_module_path:
+ remote_files = [tmpdir, remote_module_path]
+
+ if args_file_path:
+ remote_files.append(args_file_path)
+
+ sudoable = True
+ in_data = None
+ cmd = ""
+
+ if wrap_async and not self._connection.always_pipeline_modules:
+ # configure, upload, and chmod the async_wrapper module
+ (async_module_style, shebang, async_module_data, async_module_path) = self._configure_module(
+ module_name='ansible.legacy.async_wrapper', module_args=dict(), task_vars=task_vars)
+ async_module_remote_filename = self._connection._shell.get_remote_filename(async_module_path)
+ remote_async_module_path = self._connection._shell.join_path(tmpdir, async_module_remote_filename)
+ self._transfer_data(remote_async_module_path, async_module_data)
+ remote_files.append(remote_async_module_path)
+
+ async_limit = self._task.async_val
+ async_jid = str(random.randint(0, 999999999999))
+
+ # call the interpreter for async_wrapper directly
+ # this permits use of a script for an interpreter on non-Linux platforms
+ # TODO: re-implement async_wrapper as a regular module to avoid this special case
+ interpreter = shebang.replace('#!', '').strip()
+ async_cmd = [interpreter, remote_async_module_path, async_jid, async_limit, remote_module_path]
+
+ if environment_string:
+ async_cmd.insert(0, environment_string)
+
+ if args_file_path:
+ async_cmd.append(args_file_path)
+ else:
+ # maintain a fixed number of positional parameters for async_wrapper
+ async_cmd.append('_')
+
+ if not self._should_remove_tmp_path(tmpdir):
+ async_cmd.append("-preserve_tmp")
+
+ cmd = " ".join(to_text(x) for x in async_cmd)
+
+ else:
+
+ if self._is_pipelining_enabled(module_style):
+ in_data = module_data
+ display.vvv("Pipelining is enabled.")
+ else:
+ cmd = remote_module_path
+
+ cmd = self._connection._shell.build_module_command(environment_string, shebang, cmd, arg_path=args_file_path).strip()
+
+ # Fix permissions of the tmpdir path and tmpdir files. This should be called after all
+ # files have been transferred.
+ if remote_files:
+ # remove none/empty
+ remote_files = [x for x in remote_files if x]
+ self._fixup_perms2(remote_files, self._get_remote_user())
+
+ # actually execute
+ res = self._low_level_execute_command(cmd, sudoable=sudoable, in_data=in_data)
+
+ # parse the main result
+ data = self._parse_returned_data(res)
+
+ # NOTE: INTERNAL KEYS ONLY ACCESSIBLE HERE
+ # get internal info before cleaning
+ if data.pop("_ansible_suppress_tmpdir_delete", False):
+ self._cleanup_remote_tmp = False
+
+ # NOTE: yum returns results .. but that made it 'compatible' with squashing, so we allow mappings, for now
+ if 'results' in data and (not isinstance(data['results'], Sequence) or isinstance(data['results'], string_types)):
+ data['ansible_module_results'] = data['results']
+ del data['results']
+ display.warning("Found internal 'results' key in module return, renamed to 'ansible_module_results'.")
+
+ # remove internal keys
+ remove_internal_keys(data)
+
+ if wrap_async:
+ # async_wrapper will clean up its tmpdir on its own so we want the controller side to
+ # forget about it now
+ self._connection._shell.tmpdir = None
+
+ # FIXME: for backwards compat, figure out if still makes sense
+ data['changed'] = True
+
+ # pre-split stdout/stderr into lines if needed
+ if 'stdout' in data and 'stdout_lines' not in data:
+ # if the value is 'False', a default won't catch it.
+ txt = data.get('stdout', None) or u''
+ data['stdout_lines'] = txt.splitlines()
+ if 'stderr' in data and 'stderr_lines' not in data:
+ # if the value is 'False', a default won't catch it.
+ txt = data.get('stderr', None) or u''
+ data['stderr_lines'] = txt.splitlines()
+
+ # propagate interpreter discovery results back to the controller
+ if self._discovered_interpreter_key:
+ if data.get('ansible_facts') is None:
+ data['ansible_facts'] = {}
+
+ data['ansible_facts'][self._discovered_interpreter_key] = self._discovered_interpreter
+
+ if self._discovery_warnings:
+ if data.get('warnings') is None:
+ data['warnings'] = []
+ data['warnings'].extend(self._discovery_warnings)
+
+ if self._discovery_deprecation_warnings:
+ if data.get('deprecations') is None:
+ data['deprecations'] = []
+ data['deprecations'].extend(self._discovery_deprecation_warnings)
+
+ # mark the entire module results untrusted as a template right here, since the current action could
+ # possibly template one of these values.
+ data = wrap_var(data)
+
+ display.debug("done with _execute_module (%s, %s)" % (module_name, module_args))
+ return data
+
+ def _parse_returned_data(self, res):
+ try:
+ filtered_output, warnings = _filter_non_json_lines(res.get('stdout', u''))
+ for w in warnings:
+ display.warning(w)
+
+ data = json.loads(filtered_output)
+ data['_ansible_parsed'] = True
+ except ValueError:
+ # not valid json, lets try to capture error
+ data = dict(failed=True, _ansible_parsed=False)
+ data['module_stdout'] = res.get('stdout', u'')
+ if 'stderr' in res:
+ data['module_stderr'] = res['stderr']
+ if res['stderr'].startswith(u'Traceback'):
+ data['exception'] = res['stderr']
+
+ # in some cases a traceback will arrive on stdout instead of stderr, such as when using ssh with -tt
+ if 'exception' not in data and data['module_stdout'].startswith(u'Traceback'):
+ data['exception'] = data['module_stdout']
+
+ # The default
+ data['msg'] = "MODULE FAILURE"
+
+ # try to figure out if we are missing interpreter
+ if self._used_interpreter is not None:
+ match = re.compile('%s: (?:No such file or directory|not found)' % self._used_interpreter.lstrip('!#'))
+ if match.search(data['module_stderr']) or match.search(data['module_stdout']):
+ data['msg'] = "The module failed to execute correctly, you probably need to set the interpreter."
+
+ # always append hint
+ data['msg'] += '\nSee stdout/stderr for the exact error'
+
+ if 'rc' in res:
+ data['rc'] = res['rc']
+ return data
+
+ # FIXME: move to connection base
+ def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, executable=None, encoding_errors='surrogate_then_replace', chdir=None):
+ '''
+ This is the function which executes the low level shell command, which
+ may be commands to create/remove directories for temporary files, or to
+ run the module code or python directly when pipelining.
+
+ :kwarg encoding_errors: If the value returned by the command isn't
+ utf-8 then we have to figure out how to transform it to unicode.
+ If the value is just going to be displayed to the user (or
+ discarded) then the default of 'replace' is fine. If the data is
+ used as a key or is going to be written back out to a file
+ verbatim, then this won't work. May have to use some sort of
+ replacement strategy (python3 could use surrogateescape)
+ :kwarg chdir: cd into this directory before executing the command.
+ '''
+
+ display.debug("_low_level_execute_command(): starting")
+ # if not cmd:
+ # # this can happen with powershell modules when there is no analog to a Windows command (like chmod)
+ # display.debug("_low_level_execute_command(): no command, exiting")
+ # return dict(stdout='', stderr='', rc=254)
+
+ if chdir:
+ display.debug("_low_level_execute_command(): changing cwd to %s for this command" % chdir)
+ cmd = self._connection._shell.append_command('cd %s' % chdir, cmd)
+
+ # https://github.com/ansible/ansible/issues/68054
+ if executable:
+ self._connection._shell.executable = executable
+
+ ruser = self._get_remote_user()
+ buser = self.get_become_option('become_user')
+ if (sudoable and self._connection.become and # if sudoable and have become
+ resource_from_fqcr(self._connection.transport) != 'network_cli' and # if not using network_cli
+ (C.BECOME_ALLOW_SAME_USER or (buser != ruser or not any((ruser, buser))))): # if we allow same user PE or users are different and either is set
+ display.debug("_low_level_execute_command(): using become for this command")
+ cmd = self._connection.become.build_become_command(cmd, self._connection._shell)
+
+ if self._connection.allow_executable:
+ if executable is None:
+ executable = self._play_context.executable
+ # mitigation for SSH race which can drop stdout (https://github.com/ansible/ansible/issues/13876)
+ # only applied for the default executable to avoid interfering with the raw action
+ cmd = self._connection._shell.append_command(cmd, 'sleep 0')
+ if executable:
+ cmd = executable + ' -c ' + shlex_quote(cmd)
+
+ display.debug("_low_level_execute_command(): executing: %s" % (cmd,))
+
+ # Change directory to basedir of task for command execution when connection is local
+ if self._connection.transport == 'local':
+ self._connection.cwd = to_bytes(self._loader.get_basedir(), errors='surrogate_or_strict')
+
+ rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ # stdout and stderr may be either a file-like or a bytes object.
+ # Convert either one to a text type
+ if isinstance(stdout, binary_type):
+ out = to_text(stdout, errors=encoding_errors)
+ elif not isinstance(stdout, text_type):
+ out = to_text(b''.join(stdout.readlines()), errors=encoding_errors)
+ else:
+ out = stdout
+
+ if isinstance(stderr, binary_type):
+ err = to_text(stderr, errors=encoding_errors)
+ elif not isinstance(stderr, text_type):
+ err = to_text(b''.join(stderr.readlines()), errors=encoding_errors)
+ else:
+ err = stderr
+
+ if rc is None:
+ rc = 0
+
+ # be sure to remove the BECOME-SUCCESS message now
+ out = self._strip_success_message(out)
+
+ display.debug(u"_low_level_execute_command() done: rc=%d, stdout=%s, stderr=%s" % (rc, out, err))
+ return dict(rc=rc, stdout=out, stdout_lines=out.splitlines(), stderr=err, stderr_lines=err.splitlines())
+
+ def _get_diff_data(self, destination, source, task_vars, source_file=True):
+
+ # Note: Since we do not diff the source and destination before we transform from bytes into
+ # text the diff between source and destination may not be accurate. To fix this, we'd need
+ # to move the diffing from the callback plugins into here.
+ #
+ # Example of data which would cause trouble is src_content == b'\xff' and dest_content ==
+ # b'\xfe'. Neither of those are valid utf-8 so both get turned into the replacement
+ # character: diff['before'] = u'�' ; diff['after'] = u'�' When the callback plugin later
+ # diffs before and after it shows an empty diff.
+
+ diff = {}
+ display.debug("Going to peek to see if file has changed permissions")
+ peek_result = self._execute_module(
+ module_name='ansible.legacy.file', module_args=dict(path=destination, _diff_peek=True),
+ task_vars=task_vars, persist_files=True)
+
+ if peek_result.get('failed', False):
+ display.warning(u"Failed to get diff between '%s' and '%s': %s" % (os.path.basename(source), destination, to_text(peek_result.get(u'msg', u''))))
+ return diff
+
+ if peek_result.get('rc', 0) == 0:
+
+ if peek_result.get('state') in (None, 'absent'):
+ diff['before'] = u''
+ elif peek_result.get('appears_binary'):
+ diff['dst_binary'] = 1
+ elif peek_result.get('size') and C.MAX_FILE_SIZE_FOR_DIFF > 0 and peek_result['size'] > C.MAX_FILE_SIZE_FOR_DIFF:
+ diff['dst_larger'] = C.MAX_FILE_SIZE_FOR_DIFF
+ else:
+ display.debug(u"Slurping the file %s" % source)
+ dest_result = self._execute_module(
+ module_name='ansible.legacy.slurp', module_args=dict(path=destination),
+ task_vars=task_vars, persist_files=True)
+ if 'content' in dest_result:
+ dest_contents = dest_result['content']
+ if dest_result['encoding'] == u'base64':
+ dest_contents = base64.b64decode(dest_contents)
+ else:
+ raise AnsibleError("unknown encoding in content option, failed: %s" % to_native(dest_result))
+ diff['before_header'] = destination
+ diff['before'] = to_text(dest_contents)
+
+ if source_file:
+ st = os.stat(source)
+ if C.MAX_FILE_SIZE_FOR_DIFF > 0 and st[stat.ST_SIZE] > C.MAX_FILE_SIZE_FOR_DIFF:
+ diff['src_larger'] = C.MAX_FILE_SIZE_FOR_DIFF
+ else:
+ display.debug("Reading local copy of the file %s" % source)
+ try:
+ with open(source, 'rb') as src:
+ src_contents = src.read()
+ except Exception as e:
+ raise AnsibleError("Unexpected error while reading source (%s) for diff: %s " % (source, to_native(e)))
+
+ if b"\x00" in src_contents:
+ diff['src_binary'] = 1
+ else:
+ diff['after_header'] = source
+ diff['after'] = to_text(src_contents)
+ else:
+ display.debug(u"source of file passed in")
+ diff['after_header'] = u'dynamically generated'
+ diff['after'] = source
+
+ if self._play_context.no_log:
+ if 'before' in diff:
+ diff["before"] = u""
+ if 'after' in diff:
+ diff["after"] = u" [[ Diff output has been hidden because 'no_log: true' was specified for this result ]]\n"
+
+ return diff
+
+ def _find_needle(self, dirname, needle):
+ '''
+ find a needle in haystack of paths, optionally using 'dirname' as a subdir.
+ This will build the ordered list of paths to search and pass them to dwim
+ to get back the first existing file found.
+ '''
+
+ # dwim already deals with playbook basedirs
+ path_stack = self._task.get_search_path()
+
+ # if missing it will return a file not found exception
+ return self._loader.path_dwim_relative_stack(path_stack, dirname, needle)
diff --git a/lib/ansible/plugins/action/add_host.py b/lib/ansible/plugins/action/add_host.py
new file mode 100644
index 00000000..e418563e
--- /dev/null
+++ b/lib/ansible/plugins/action/add_host.py
@@ -0,0 +1,97 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright 2012, Seth Vidal <skvidal@fedoraproject.org>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleActionFail
+from ansible.module_utils.common._collections_compat import Mapping
+from ansible.module_utils.six import string_types
+from ansible.plugins.action import ActionBase
+from ansible.parsing.utils.addresses import parse_address
+from ansible.utils.display import Display
+from ansible.utils.vars import combine_vars
+
+display = Display()
+
+
+class ActionModule(ActionBase):
+ ''' Create inventory hosts and groups in the memory inventory'''
+
+ # We need to be able to modify the inventory
+ BYPASS_HOST_LOOP = True
+ TRANSFERS_FILES = False
+
+ def run(self, tmp=None, task_vars=None):
+
+ self._supports_check_mode = True
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ args = self._task.args
+ raw = args.pop('_raw_params', {})
+ if isinstance(raw, Mapping):
+ # TODO: create 'conflict' detection in base class to deal with repeats and aliases and warn user
+ args = combine_vars(raw, args)
+ else:
+ raise AnsibleActionFail('Invalid raw parameters passed, requires a dictonary/mapping got a %s' % type(raw))
+
+ # Parse out any hostname:port patterns
+ new_name = args.get('name', args.get('hostname', args.get('host', None)))
+ if new_name is None:
+ raise AnsibleActionFail('name, host or hostname needs to be provided')
+
+ display.vv("creating host via 'add_host': hostname=%s" % new_name)
+
+ try:
+ name, port = parse_address(new_name, allow_ranges=False)
+ except Exception:
+ # not a parsable hostname, but might still be usable
+ name = new_name
+ port = None
+
+ if port:
+ args['ansible_ssh_port'] = port
+
+ groups = args.get('groupname', args.get('groups', args.get('group', '')))
+ # add it to the group if that was specified
+ new_groups = []
+ if groups:
+ if isinstance(groups, list):
+ group_list = groups
+ elif isinstance(groups, string_types):
+ group_list = groups.split(",")
+ else:
+ raise AnsibleActionFail("Groups must be specified as a list.", obj=self._task)
+
+ for group_name in group_list:
+ if group_name not in new_groups:
+ new_groups.append(group_name.strip())
+
+ # Add any variables to the new_host
+ host_vars = dict()
+ special_args = frozenset(('name', 'hostname', 'groupname', 'groups'))
+ for k in args.keys():
+ if k not in special_args:
+ host_vars[k] = args[k]
+
+ result['changed'] = False
+ result['add_host'] = dict(host_name=name, groups=new_groups, host_vars=host_vars)
+ return result
diff --git a/lib/ansible/plugins/action/assemble.py b/lib/ansible/plugins/action/assemble.py
new file mode 100644
index 00000000..06fa2df3
--- /dev/null
+++ b/lib/ansible/plugins/action/assemble.py
@@ -0,0 +1,166 @@
+# (c) 2013-2016, Michael DeHaan <michael.dehaan@gmail.com>
+# Stephen Fromm <sfromm@gmail.com>
+# Brian Coca <briancoca+dev@gmail.com>
+# Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import codecs
+import os
+import os.path
+import re
+import tempfile
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleAction, _AnsibleActionDone, AnsibleActionFail
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.plugins.action import ActionBase
+from ansible.utils.hashing import checksum_s
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = True
+
+ def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False, decrypt=True):
+ ''' assemble a file from a directory of fragments '''
+
+ tmpfd, temp_path = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP)
+ tmp = os.fdopen(tmpfd, 'wb')
+ delimit_me = False
+ add_newline = False
+
+ for f in (to_text(p, errors='surrogate_or_strict') for p in sorted(os.listdir(src_path))):
+ if compiled_regexp and not compiled_regexp.search(f):
+ continue
+ fragment = u"%s/%s" % (src_path, f)
+ if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')):
+ continue
+
+ with open(self._loader.get_real_file(fragment, decrypt=decrypt), 'rb') as fragment_fh:
+ fragment_content = fragment_fh.read()
+
+ # always put a newline between fragments if the previous fragment didn't end with a newline.
+ if add_newline:
+ tmp.write(b'\n')
+
+ # delimiters should only appear between fragments
+ if delimit_me:
+ if delimiter:
+ # un-escape anything like newlines
+ delimiter = codecs.escape_decode(delimiter)[0]
+ tmp.write(delimiter)
+ # always make sure there's a newline after the
+ # delimiter, so lines don't run together
+ if delimiter[-1] != b'\n':
+ tmp.write(b'\n')
+
+ tmp.write(fragment_content)
+ delimit_me = True
+ if fragment_content.endswith(b'\n'):
+ add_newline = False
+ else:
+ add_newline = True
+
+ tmp.close()
+ return temp_path
+
+ def run(self, tmp=None, task_vars=None):
+
+ self._supports_check_mode = False
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ if task_vars is None:
+ task_vars = dict()
+
+ src = self._task.args.get('src', None)
+ dest = self._task.args.get('dest', None)
+ delimiter = self._task.args.get('delimiter', None)
+ remote_src = self._task.args.get('remote_src', 'yes')
+ regexp = self._task.args.get('regexp', None)
+ follow = self._task.args.get('follow', False)
+ ignore_hidden = self._task.args.get('ignore_hidden', False)
+ decrypt = self._task.args.pop('decrypt', True)
+
+ try:
+ if src is None or dest is None:
+ raise AnsibleActionFail("src and dest are required")
+
+ if boolean(remote_src, strict=False):
+ # call assemble via ansible.legacy to allow library/ overrides of the module without collection search
+ result.update(self._execute_module(module_name='ansible.legacy.assemble', task_vars=task_vars))
+ raise _AnsibleActionDone()
+ else:
+ try:
+ src = self._find_needle('files', src)
+ except AnsibleError as e:
+ raise AnsibleActionFail(to_native(e))
+
+ if not os.path.isdir(src):
+ raise AnsibleActionFail(u"Source (%s) is not a directory" % src)
+
+ _re = None
+ if regexp is not None:
+ _re = re.compile(regexp)
+
+ # Does all work assembling the file
+ path = self._assemble_from_fragments(src, delimiter, _re, ignore_hidden, decrypt)
+
+ path_checksum = checksum_s(path)
+ dest = self._remote_expand_user(dest)
+ dest_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=follow)
+
+ diff = {}
+
+ # setup args for running modules
+ new_module_args = self._task.args.copy()
+
+ # clean assemble specific options
+ for opt in ['remote_src', 'regexp', 'delimiter', 'ignore_hidden', 'decrypt']:
+ if opt in new_module_args:
+ del new_module_args[opt]
+ new_module_args['dest'] = dest
+
+ if path_checksum != dest_stat['checksum']:
+
+ if self._play_context.diff:
+ diff = self._get_diff_data(dest, path, task_vars)
+
+ remote_path = self._connection._shell.join_path(self._connection._shell.tmpdir, 'src')
+ xfered = self._transfer_file(path, remote_path)
+
+ # fix file permissions when the copy is done as a different user
+ self._fixup_perms2((self._connection._shell.tmpdir, remote_path))
+
+ new_module_args.update(dict(src=xfered,))
+
+ res = self._execute_module(module_name='ansible.legacy.copy', module_args=new_module_args, task_vars=task_vars)
+ if diff:
+ res['diff'] = diff
+ result.update(res)
+ else:
+ result.update(self._execute_module(module_name='ansible.legacy.file', module_args=new_module_args, task_vars=task_vars))
+
+ except AnsibleAction as e:
+ result.update(e.result)
+ finally:
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ return result
diff --git a/lib/ansible/plugins/action/assert.py b/lib/ansible/plugins/action/assert.py
new file mode 100644
index 00000000..7721a6b4
--- /dev/null
+++ b/lib/ansible/plugins/action/assert.py
@@ -0,0 +1,94 @@
+# Copyright 2012, Dag Wieers <dag@wieers.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError
+from ansible.playbook.conditional import Conditional
+from ansible.plugins.action import ActionBase
+from ansible.module_utils.six import string_types
+from ansible.module_utils.parsing.convert_bool import boolean
+
+
+class ActionModule(ActionBase):
+ ''' Fail with custom message '''
+
+ TRANSFERS_FILES = False
+ _VALID_ARGS = frozenset(('fail_msg', 'msg', 'quiet', 'success_msg', 'that'))
+
+ def run(self, tmp=None, task_vars=None):
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ if 'that' not in self._task.args:
+ raise AnsibleError('conditional required in "that" string')
+
+ fail_msg = None
+ success_msg = None
+
+ fail_msg = self._task.args.get('fail_msg', self._task.args.get('msg'))
+ if fail_msg is None:
+ fail_msg = 'Assertion failed'
+ elif isinstance(fail_msg, list):
+ if not all(isinstance(x, string_types) for x in fail_msg):
+ raise AnsibleError('Type of one of the elements in fail_msg or msg list is not string type')
+ elif not isinstance(fail_msg, (string_types, list)):
+ raise AnsibleError('Incorrect type for fail_msg or msg, expected a string or list and got %s' % type(fail_msg))
+
+ success_msg = self._task.args.get('success_msg')
+ if success_msg is None:
+ success_msg = 'All assertions passed'
+ elif isinstance(success_msg, list):
+ if not all(isinstance(x, string_types) for x in success_msg):
+ raise AnsibleError('Type of one of the elements in success_msg list is not string type')
+ elif not isinstance(success_msg, (string_types, list)):
+ raise AnsibleError('Incorrect type for success_msg, expected a string or list and got %s' % type(success_msg))
+
+ quiet = boolean(self._task.args.get('quiet', False), strict=False)
+
+ # make sure the 'that' items are a list
+ thats = self._task.args['that']
+ if not isinstance(thats, list):
+ thats = [thats]
+
+ # Now we iterate over the that items, temporarily assigning them
+ # to the task's when value so we can evaluate the conditional using
+ # the built in evaluate function. The when has already been evaluated
+ # by this point, and is not used again, so we don't care about mangling
+ # that value now
+ cond = Conditional(loader=self._loader)
+ if not quiet:
+ result['_ansible_verbose_always'] = True
+
+ for that in thats:
+ cond.when = [that]
+ test_result = cond.evaluate_conditional(templar=self._templar, all_vars=task_vars)
+ if not test_result:
+ result['failed'] = True
+ result['evaluated_to'] = test_result
+ result['assertion'] = that
+
+ result['msg'] = fail_msg
+
+ return result
+
+ result['changed'] = False
+ result['msg'] = success_msg
+ return result
diff --git a/lib/ansible/plugins/action/async_status.py b/lib/ansible/plugins/action/async_status.py
new file mode 100644
index 00000000..7b69f624
--- /dev/null
+++ b/lib/ansible/plugins/action/async_status.py
@@ -0,0 +1,46 @@
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError
+from ansible.plugins.action import ActionBase
+from ansible.utils.vars import merge_hash
+
+
+class ActionModule(ActionBase):
+
+ _VALID_ARGS = frozenset(('jid', 'mode'))
+
+ def run(self, tmp=None, task_vars=None):
+ results = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ if "jid" not in self._task.args:
+ raise AnsibleError("jid is required")
+ jid = self._task.args["jid"]
+ mode = self._task.args.get("mode", "status")
+
+ env_async_dir = [e for e in self._task.environment if
+ "ANSIBLE_ASYNC_DIR" in e]
+ if len(env_async_dir) > 0:
+ # for backwards compatibility we need to get the dir from
+ # ANSIBLE_ASYNC_DIR that is defined in the environment. This is
+ # deprecated and will be removed in favour of shell options
+ async_dir = env_async_dir[0]['ANSIBLE_ASYNC_DIR']
+
+ msg = "Setting the async dir from the environment keyword " \
+ "ANSIBLE_ASYNC_DIR is deprecated. Set the async_dir " \
+ "shell option instead"
+ self._display.deprecated(msg, "2.12", collection_name='ansible.builtin')
+ else:
+ # inject the async directory based on the shell option into the
+ # module args
+ async_dir = self.get_shell_option('async_dir', default="~/.ansible_async")
+
+ module_args = dict(jid=jid, mode=mode, _async_dir=async_dir)
+ status = self._execute_module(module_name='ansible.legacy.async_status', task_vars=task_vars,
+ module_args=module_args)
+ results = merge_hash(results, status)
+ return results
diff --git a/lib/ansible/plugins/action/command.py b/lib/ansible/plugins/action/command.py
new file mode 100644
index 00000000..53187ec8
--- /dev/null
+++ b/lib/ansible/plugins/action/command.py
@@ -0,0 +1,32 @@
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible import constants as C
+from ansible.plugins.action import ActionBase
+from ansible.utils.vars import merge_hash
+
+
+class ActionModule(ActionBase):
+
+ def run(self, tmp=None, task_vars=None):
+ self._supports_async = True
+ results = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ # Command module has a special config option to turn off the command nanny warnings
+ if 'warn' not in self._task.args:
+ self._task.args['warn'] = C.COMMAND_WARNINGS
+
+ wrap_async = self._task.async_val and not self._connection.has_native_async
+ # explicitly call `ansible.legacy.command` for backcompat to allow library/ override of `command` while not allowing
+ # collections search for an unqualified `command` module
+ results = merge_hash(results, self._execute_module(module_name='ansible.legacy.command', task_vars=task_vars, wrap_async=wrap_async))
+
+ if not wrap_async:
+ # remove a temporary path we created
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ return results
diff --git a/lib/ansible/plugins/action/copy.py b/lib/ansible/plugins/action/copy.py
new file mode 100644
index 00000000..cb3d15b3
--- /dev/null
+++ b/lib/ansible/plugins/action/copy.py
@@ -0,0 +1,599 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2017 Toshio Kuratomi <tkuraotmi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import os.path
+import stat
+import tempfile
+import traceback
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleFileNotFound
+from ansible.module_utils.basic import FILE_COMMON_ARGUMENTS
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.plugins.action import ActionBase
+from ansible.utils.hashing import checksum
+
+
+# Supplement the FILE_COMMON_ARGUMENTS with arguments that are specific to file
+REAL_FILE_ARGS = frozenset(FILE_COMMON_ARGUMENTS.keys()).union(
+ ('state', 'path', '_original_basename', 'recurse', 'force',
+ '_diff_peek', 'src'))
+
+
+def _create_remote_file_args(module_args):
+ """remove keys that are not relevant to file"""
+ return dict((k, v) for k, v in module_args.items() if k in REAL_FILE_ARGS)
+
+
+def _create_remote_copy_args(module_args):
+ """remove action plugin only keys"""
+ return dict((k, v) for k, v in module_args.items() if k not in ('content', 'decrypt'))
+
+
+def _walk_dirs(topdir, base_path=None, local_follow=False, trailing_slash_detector=None):
+ """
+ Walk a filesystem tree returning enough information to copy the files
+
+ :arg topdir: The directory that the filesystem tree is rooted at
+ :kwarg base_path: The initial directory structure to strip off of the
+ files for the destination directory. If this is None (the default),
+ the base_path is set to ``top_dir``.
+ :kwarg local_follow: Whether to follow symlinks on the source. When set
+ to False, no symlinks are dereferenced. When set to True (the
+ default), the code will dereference most symlinks. However, symlinks
+ can still be present if needed to break a circular link.
+ :kwarg trailing_slash_detector: Function to determine if a path has
+ a trailing directory separator. Only needed when dealing with paths on
+ a remote machine (in which case, pass in a function that is aware of the
+ directory separator conventions on the remote machine).
+ :returns: dictionary of tuples. All of the path elements in the structure are text strings.
+ This separates all the files, directories, and symlinks along with
+ important information about each::
+
+ { 'files': [('/absolute/path/to/copy/from', 'relative/path/to/copy/to'), ...],
+ 'directories': [('/absolute/path/to/copy/from', 'relative/path/to/copy/to'), ...],
+ 'symlinks': [('/symlink/target/path', 'relative/path/to/copy/to'), ...],
+ }
+
+ The ``symlinks`` field is only populated if ``local_follow`` is set to False
+ *or* a circular symlink cannot be dereferenced.
+
+ """
+ # Convert the path segments into byte strings
+
+ r_files = {'files': [], 'directories': [], 'symlinks': []}
+
+ def _recurse(topdir, rel_offset, parent_dirs, rel_base=u''):
+ """
+ This is a closure (function utilizing variables from it's parent
+ function's scope) so that we only need one copy of all the containers.
+ Note that this function uses side effects (See the Variables used from
+ outer scope).
+
+ :arg topdir: The directory we are walking for files
+ :arg rel_offset: Integer defining how many characters to strip off of
+ the beginning of a path
+ :arg parent_dirs: Directories that we're copying that this directory is in.
+ :kwarg rel_base: String to prepend to the path after ``rel_offset`` is
+ applied to form the relative path.
+
+ Variables used from the outer scope
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :r_files: Dictionary of files in the hierarchy. See the return value
+ for :func:`walk` for the structure of this dictionary.
+ :local_follow: Read-only inside of :func:`_recurse`. Whether to follow symlinks
+ """
+ for base_path, sub_folders, files in os.walk(topdir):
+ for filename in files:
+ filepath = os.path.join(base_path, filename)
+ dest_filepath = os.path.join(rel_base, filepath[rel_offset:])
+
+ if os.path.islink(filepath):
+ # Dereference the symlnk
+ real_file = os.path.realpath(filepath)
+ if local_follow and os.path.isfile(real_file):
+ # Add the file pointed to by the symlink
+ r_files['files'].append((real_file, dest_filepath))
+ else:
+ # Mark this file as a symlink to copy
+ r_files['symlinks'].append((os.readlink(filepath), dest_filepath))
+ else:
+ # Just a normal file
+ r_files['files'].append((filepath, dest_filepath))
+
+ for dirname in sub_folders:
+ dirpath = os.path.join(base_path, dirname)
+ dest_dirpath = os.path.join(rel_base, dirpath[rel_offset:])
+ real_dir = os.path.realpath(dirpath)
+ dir_stats = os.stat(real_dir)
+
+ if os.path.islink(dirpath):
+ if local_follow:
+ if (dir_stats.st_dev, dir_stats.st_ino) in parent_dirs:
+ # Just insert the symlink if the target directory
+ # exists inside of the copy already
+ r_files['symlinks'].append((os.readlink(dirpath), dest_dirpath))
+ else:
+ # Walk the dirpath to find all parent directories.
+ new_parents = set()
+ parent_dir_list = os.path.dirname(dirpath).split(os.path.sep)
+ for parent in range(len(parent_dir_list), 0, -1):
+ parent_stat = os.stat(u'/'.join(parent_dir_list[:parent]))
+ if (parent_stat.st_dev, parent_stat.st_ino) in parent_dirs:
+ # Reached the point at which the directory
+ # tree is already known. Don't add any
+ # more or we might go to an ancestor that
+ # isn't being copied.
+ break
+ new_parents.add((parent_stat.st_dev, parent_stat.st_ino))
+
+ if (dir_stats.st_dev, dir_stats.st_ino) in new_parents:
+ # This was a a circular symlink. So add it as
+ # a symlink
+ r_files['symlinks'].append((os.readlink(dirpath), dest_dirpath))
+ else:
+ # Walk the directory pointed to by the symlink
+ r_files['directories'].append((real_dir, dest_dirpath))
+ offset = len(real_dir) + 1
+ _recurse(real_dir, offset, parent_dirs.union(new_parents), rel_base=dest_dirpath)
+ else:
+ # Add the symlink to the destination
+ r_files['symlinks'].append((os.readlink(dirpath), dest_dirpath))
+ else:
+ # Just a normal directory
+ r_files['directories'].append((dirpath, dest_dirpath))
+
+ # Check if the source ends with a "/" so that we know which directory
+ # level to work at (similar to rsync)
+ source_trailing_slash = False
+ if trailing_slash_detector:
+ source_trailing_slash = trailing_slash_detector(topdir)
+ else:
+ source_trailing_slash = topdir.endswith(os.path.sep)
+
+ # Calculate the offset needed to strip the base_path to make relative
+ # paths
+ if base_path is None:
+ base_path = topdir
+ if not source_trailing_slash:
+ base_path = os.path.dirname(base_path)
+ if topdir.startswith(base_path):
+ offset = len(base_path)
+
+ # Make sure we're making the new paths relative
+ if trailing_slash_detector and not trailing_slash_detector(base_path):
+ offset += 1
+ elif not base_path.endswith(os.path.sep):
+ offset += 1
+
+ if os.path.islink(topdir) and not local_follow:
+ r_files['symlinks'] = (os.readlink(topdir), os.path.basename(topdir))
+ return r_files
+
+ dir_stats = os.stat(topdir)
+ parents = frozenset(((dir_stats.st_dev, dir_stats.st_ino),))
+ # Actually walk the directory hierarchy
+ _recurse(topdir, offset, parents)
+
+ return r_files
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = True
+
+ def _ensure_invocation(self, result):
+ # NOTE: adding invocation arguments here needs to be kept in sync with
+ # any no_log specified in the argument_spec in the module.
+ # This is not automatic.
+ # NOTE: do not add to this. This should be made a generic function for action plugins.
+ # This should also use the same argspec as the module instead of keeping it in sync.
+ if 'invocation' not in result:
+ if self._play_context.no_log:
+ result['invocation'] = "CENSORED: no_log is set"
+ else:
+ # NOTE: Should be removed in the future. For now keep this broken
+ # behaviour, have a look in the PR 51582
+ result['invocation'] = self._task.args.copy()
+ result['invocation']['module_args'] = self._task.args.copy()
+
+ if isinstance(result['invocation'], dict):
+ if 'content' in result['invocation']:
+ result['invocation']['content'] = 'CENSORED: content is a no_log parameter'
+ if result['invocation'].get('module_args', {}).get('content') is not None:
+ result['invocation']['module_args']['content'] = 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
+
+ return result
+
+ def _copy_file(self, source_full, source_rel, content, content_tempfile,
+ dest, task_vars, follow):
+ decrypt = boolean(self._task.args.get('decrypt', True), strict=False)
+ force = boolean(self._task.args.get('force', 'yes'), strict=False)
+ raw = boolean(self._task.args.get('raw', 'no'), strict=False)
+
+ result = {}
+ result['diff'] = []
+
+ # If the local file does not exist, get_real_file() raises AnsibleFileNotFound
+ try:
+ source_full = self._loader.get_real_file(source_full, decrypt=decrypt)
+ except AnsibleFileNotFound as e:
+ result['failed'] = True
+ result['msg'] = "could not find src=%s, %s" % (source_full, to_text(e))
+ return result
+
+ # Get the local mode and set if user wanted it preserved
+ # https://github.com/ansible/ansible-modules-core/issues/1124
+ lmode = None
+ if self._task.args.get('mode', None) == 'preserve':
+ lmode = '0%03o' % stat.S_IMODE(os.stat(source_full).st_mode)
+
+ # This is kind of optimization - if user told us destination is
+ # dir, do path manipulation right away, otherwise we still check
+ # for dest being a dir via remote call below.
+ if self._connection._shell.path_has_trailing_slash(dest):
+ dest_file = self._connection._shell.join_path(dest, source_rel)
+ else:
+ dest_file = dest
+
+ # Attempt to get remote file info
+ dest_status = self._execute_remote_stat(dest_file, all_vars=task_vars, follow=follow, checksum=force)
+
+ if dest_status['exists'] and dest_status['isdir']:
+ # The dest is a directory.
+ if content is not None:
+ # If source was defined as content remove the temporary file and fail out.
+ self._remove_tempfile_if_content_defined(content, content_tempfile)
+ result['failed'] = True
+ result['msg'] = "can not use content with a dir as dest"
+ return result
+ else:
+ # Append the relative source location to the destination and get remote stats again
+ dest_file = self._connection._shell.join_path(dest, source_rel)
+ dest_status = self._execute_remote_stat(dest_file, all_vars=task_vars, follow=follow, checksum=force)
+
+ if dest_status['exists'] and not force:
+ # remote_file exists so continue to next iteration.
+ return None
+
+ # Generate a hash of the local file.
+ local_checksum = checksum(source_full)
+
+ if local_checksum != dest_status['checksum']:
+ # The checksums don't match and we will change or error out.
+
+ if self._play_context.diff and not raw:
+ result['diff'].append(self._get_diff_data(dest_file, source_full, task_vars))
+
+ if self._play_context.check_mode:
+ self._remove_tempfile_if_content_defined(content, content_tempfile)
+ result['changed'] = True
+ return result
+
+ # Define a remote directory that we will copy the file to.
+ tmp_src = self._connection._shell.join_path(self._connection._shell.tmpdir, 'source')
+
+ remote_path = None
+
+ if not raw:
+ remote_path = self._transfer_file(source_full, tmp_src)
+ else:
+ self._transfer_file(source_full, dest_file)
+
+ # We have copied the file remotely and no longer require our content_tempfile
+ self._remove_tempfile_if_content_defined(content, content_tempfile)
+ self._loader.cleanup_tmp_file(source_full)
+
+ # FIXME: I don't think this is needed when PIPELINING=0 because the source is created
+ # world readable. Access to the directory itself is controlled via fixup_perms2() as
+ # part of executing the module. Check that umask with scp/sftp/piped doesn't cause
+ # a problem before acting on this idea. (This idea would save a round-trip)
+ # fix file permissions when the copy is done as a different user
+ if remote_path:
+ self._fixup_perms2((self._connection._shell.tmpdir, remote_path))
+
+ if raw:
+ # Continue to next iteration if raw is defined.
+ return None
+
+ # Run the copy module
+
+ # src and dest here come after original and override them
+ # we pass dest only to make sure it includes trailing slash in case of recursive copy
+ new_module_args = _create_remote_copy_args(self._task.args)
+ new_module_args.update(
+ dict(
+ src=tmp_src,
+ dest=dest,
+ _original_basename=source_rel,
+ follow=follow
+ )
+ )
+ if not self._task.args.get('checksum'):
+ new_module_args['checksum'] = local_checksum
+
+ if lmode:
+ new_module_args['mode'] = lmode
+
+ module_return = self._execute_module(module_name='ansible.legacy.copy', module_args=new_module_args, task_vars=task_vars)
+
+ else:
+ # no need to transfer the file, already correct hash, but still need to call
+ # the file module in case we want to change attributes
+ self._remove_tempfile_if_content_defined(content, content_tempfile)
+ self._loader.cleanup_tmp_file(source_full)
+
+ if raw:
+ return None
+
+ # Fix for https://github.com/ansible/ansible-modules-core/issues/1568.
+ # If checksums match, and follow = True, find out if 'dest' is a link. If so,
+ # change it to point to the source of the link.
+ if follow:
+ dest_status_nofollow = self._execute_remote_stat(dest_file, all_vars=task_vars, follow=False)
+ if dest_status_nofollow['islnk'] and 'lnk_source' in dest_status_nofollow.keys():
+ dest = dest_status_nofollow['lnk_source']
+
+ # Build temporary module_args.
+ new_module_args = _create_remote_file_args(self._task.args)
+ new_module_args.update(
+ dict(
+ dest=dest,
+ _original_basename=source_rel,
+ recurse=False,
+ state='file',
+ )
+ )
+ # src is sent to the file module in _original_basename, not in src
+ try:
+ del new_module_args['src']
+ except KeyError:
+ pass
+
+ if lmode:
+ new_module_args['mode'] = lmode
+
+ # Execute the file module.
+ module_return = self._execute_module(module_name='ansible.legacy.file', module_args=new_module_args, task_vars=task_vars)
+
+ if not module_return.get('checksum'):
+ module_return['checksum'] = local_checksum
+
+ result.update(module_return)
+ return result
+
+ def _create_content_tempfile(self, content):
+ ''' Create a tempfile containing defined content '''
+ fd, content_tempfile = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP)
+ f = os.fdopen(fd, 'wb')
+ content = to_bytes(content)
+ try:
+ f.write(content)
+ except Exception as err:
+ os.remove(content_tempfile)
+ raise Exception(err)
+ finally:
+ f.close()
+ return content_tempfile
+
+ def _remove_tempfile_if_content_defined(self, content, content_tempfile):
+ if content is not None:
+ os.remove(content_tempfile)
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for file transfer operations '''
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ source = self._task.args.get('src', None)
+ content = self._task.args.get('content', None)
+ dest = self._task.args.get('dest', None)
+ remote_src = boolean(self._task.args.get('remote_src', False), strict=False)
+ local_follow = boolean(self._task.args.get('local_follow', True), strict=False)
+
+ result['failed'] = True
+ if not source and content is None:
+ result['msg'] = 'src (or content) is required'
+ elif not dest:
+ result['msg'] = 'dest is required'
+ elif source and content is not None:
+ result['msg'] = 'src and content are mutually exclusive'
+ elif content is not None and dest is not None and dest.endswith("/"):
+ result['msg'] = "can not use content with a dir as dest"
+ else:
+ del result['failed']
+
+ if result.get('failed'):
+ return self._ensure_invocation(result)
+
+ # Define content_tempfile in case we set it after finding content populated.
+ content_tempfile = None
+
+ # If content is defined make a tmp file and write the content into it.
+ if content is not None:
+ try:
+ # If content comes to us as a dict it should be decoded json.
+ # We need to encode it back into a string to write it out.
+ if isinstance(content, dict) or isinstance(content, list):
+ content_tempfile = self._create_content_tempfile(json.dumps(content))
+ else:
+ content_tempfile = self._create_content_tempfile(content)
+ source = content_tempfile
+ except Exception as err:
+ result['failed'] = True
+ result['msg'] = "could not write content temp file: %s" % to_native(err)
+ return self._ensure_invocation(result)
+
+ # if we have first_available_file in our vars
+ # look up the files and use the first one we find as src
+ elif remote_src:
+ result.update(self._execute_module(module_name='ansible.legacy.copy', task_vars=task_vars))
+ return self._ensure_invocation(result)
+ else:
+ # find_needle returns a path that may not have a trailing slash on
+ # a directory so we need to determine that now (we use it just
+ # like rsync does to figure out whether to include the directory
+ # or only the files inside the directory
+ trailing_slash = source.endswith(os.path.sep)
+ try:
+ # find in expected paths
+ source = self._find_needle('files', source)
+ except AnsibleError as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+ result['exception'] = traceback.format_exc()
+ return self._ensure_invocation(result)
+
+ if trailing_slash != source.endswith(os.path.sep):
+ if source[-1] == os.path.sep:
+ source = source[:-1]
+ else:
+ source = source + os.path.sep
+
+ # A list of source file tuples (full_path, relative_path) which will try to copy to the destination
+ source_files = {'files': [], 'directories': [], 'symlinks': []}
+
+ # If source is a directory populate our list else source is a file and translate it to a tuple.
+ if os.path.isdir(to_bytes(source, errors='surrogate_or_strict')):
+ # Get a list of the files we want to replicate on the remote side
+ source_files = _walk_dirs(source, local_follow=local_follow,
+ trailing_slash_detector=self._connection._shell.path_has_trailing_slash)
+
+ # If it's recursive copy, destination is always a dir,
+ # explicitly mark it so (note - copy module relies on this).
+ if not self._connection._shell.path_has_trailing_slash(dest):
+ dest = self._connection._shell.join_path(dest, '')
+ # FIXME: Can we optimize cases where there's only one file, no
+ # symlinks and any number of directories? In the original code,
+ # empty directories are not copied....
+ else:
+ source_files['files'] = [(source, os.path.basename(source))]
+
+ changed = False
+ module_return = dict(changed=False)
+
+ # A register for if we executed a module.
+ # Used to cut down on command calls when not recursive.
+ module_executed = False
+
+ # expand any user home dir specifier
+ dest = self._remote_expand_user(dest)
+
+ implicit_directories = set()
+ for source_full, source_rel in source_files['files']:
+ # copy files over. This happens first as directories that have
+ # a file do not need to be created later
+
+ # We only follow symlinks for files in the non-recursive case
+ if source_files['directories']:
+ follow = False
+ else:
+ follow = boolean(self._task.args.get('follow', False), strict=False)
+
+ module_return = self._copy_file(source_full, source_rel, content, content_tempfile, dest, task_vars, follow)
+ if module_return is None:
+ continue
+
+ if module_return.get('failed'):
+ result.update(module_return)
+ return self._ensure_invocation(result)
+
+ paths = os.path.split(source_rel)
+ dir_path = ''
+ for dir_component in paths:
+ os.path.join(dir_path, dir_component)
+ implicit_directories.add(dir_path)
+ if 'diff' in result and not result['diff']:
+ del result['diff']
+ module_executed = True
+ changed = changed or module_return.get('changed', False)
+
+ for src, dest_path in source_files['directories']:
+ # Find directories that are leaves as they might not have been
+ # created yet.
+ if dest_path in implicit_directories:
+ continue
+
+ # Use file module to create these
+ new_module_args = _create_remote_file_args(self._task.args)
+ new_module_args['path'] = os.path.join(dest, dest_path)
+ new_module_args['state'] = 'directory'
+ new_module_args['mode'] = self._task.args.get('directory_mode', None)
+ new_module_args['recurse'] = False
+ del new_module_args['src']
+
+ module_return = self._execute_module(module_name='ansible.legacy.file', module_args=new_module_args, task_vars=task_vars)
+
+ if module_return.get('failed'):
+ result.update(module_return)
+ return self._ensure_invocation(result)
+
+ module_executed = True
+ changed = changed or module_return.get('changed', False)
+
+ for target_path, dest_path in source_files['symlinks']:
+ # Copy symlinks over
+ new_module_args = _create_remote_file_args(self._task.args)
+ new_module_args['path'] = os.path.join(dest, dest_path)
+ new_module_args['src'] = target_path
+ new_module_args['state'] = 'link'
+ new_module_args['force'] = True
+
+ # Only follow remote symlinks in the non-recursive case
+ if source_files['directories']:
+ new_module_args['follow'] = False
+
+ # file module cannot deal with 'preserve' mode and is meaningless
+ # for symlinks anyway, so just don't pass it.
+ if new_module_args.get('mode', None) == 'preserve':
+ new_module_args.pop('mode')
+
+ module_return = self._execute_module(module_name='ansible.legacy.file', module_args=new_module_args, task_vars=task_vars)
+ module_executed = True
+
+ if module_return.get('failed'):
+ result.update(module_return)
+ return self._ensure_invocation(result)
+
+ changed = changed or module_return.get('changed', False)
+
+ if module_executed and len(source_files['files']) == 1:
+ result.update(module_return)
+
+ # the file module returns the file path as 'path', but
+ # the copy module uses 'dest', so add it if it's not there
+ if 'path' in result and 'dest' not in result:
+ result['dest'] = result['path']
+ else:
+ result.update(dict(dest=dest, src=source, changed=changed))
+
+ # Delete tmp path
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ return self._ensure_invocation(result)
diff --git a/lib/ansible/plugins/action/debug.py b/lib/ansible/plugins/action/debug.py
new file mode 100644
index 00000000..2584fd3d
--- /dev/null
+++ b/lib/ansible/plugins/action/debug.py
@@ -0,0 +1,80 @@
+# Copyright 2012, Dag Wieers <dag@wieers.com>
+# Copyright 2016, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleUndefinedVariable
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_text
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+ ''' Print statements during execution '''
+
+ TRANSFERS_FILES = False
+ _VALID_ARGS = frozenset(('msg', 'var', 'verbosity'))
+
+ def run(self, tmp=None, task_vars=None):
+ if task_vars is None:
+ task_vars = dict()
+
+ if 'msg' in self._task.args and 'var' in self._task.args:
+ return {"failed": True, "msg": "'msg' and 'var' are incompatible options"}
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ # get task verbosity
+ verbosity = int(self._task.args.get('verbosity', 0))
+
+ if verbosity <= self._display.verbosity:
+ if 'msg' in self._task.args:
+ result['msg'] = self._task.args['msg']
+
+ elif 'var' in self._task.args:
+ try:
+ results = self._templar.template(self._task.args['var'], convert_bare=True, fail_on_undefined=True)
+ if results == self._task.args['var']:
+ # if results is not str/unicode type, raise an exception
+ if not isinstance(results, string_types):
+ raise AnsibleUndefinedVariable
+ # If var name is same as result, try to template it
+ results = self._templar.template("{{" + results + "}}", convert_bare=True, fail_on_undefined=True)
+ except AnsibleUndefinedVariable as e:
+ results = u"VARIABLE IS NOT DEFINED!"
+ if self._display.verbosity > 0:
+ results += u": %s" % to_text(e)
+
+ if isinstance(self._task.args['var'], (list, dict)):
+ # If var is a list or dict, use the type as key to display
+ result[to_text(type(self._task.args['var']))] = results
+ else:
+ result[self._task.args['var']] = results
+ else:
+ result['msg'] = 'Hello world!'
+
+ # force flag to make debug output module always verbose
+ result['_ansible_verbose_always'] = True
+ else:
+ result['skipped_reason'] = "Verbosity threshold not met."
+ result['skipped'] = True
+
+ result['failed'] = False
+
+ return result
diff --git a/lib/ansible/plugins/action/fail.py b/lib/ansible/plugins/action/fail.py
new file mode 100644
index 00000000..8d3450c8
--- /dev/null
+++ b/lib/ansible/plugins/action/fail.py
@@ -0,0 +1,43 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2012, Dag Wieers <dag@wieers.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+ ''' Fail with custom message '''
+
+ TRANSFERS_FILES = False
+ _VALID_ARGS = frozenset(('msg',))
+
+ def run(self, tmp=None, task_vars=None):
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ msg = 'Failed as requested from task'
+ if self._task.args and 'msg' in self._task.args:
+ msg = self._task.args.get('msg')
+
+ result['failed'] = True
+ result['msg'] = msg
+ return result
diff --git a/lib/ansible/plugins/action/fetch.py b/lib/ansible/plugins/action/fetch.py
new file mode 100644
index 00000000..4f05d2bf
--- /dev/null
+++ b/lib/ansible/plugins/action/fetch.py
@@ -0,0 +1,199 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import base64
+
+from ansible.errors import AnsibleActionFail, AnsibleActionSkip
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.six import string_types
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.plugins.action import ActionBase
+from ansible.utils.display import Display
+from ansible.utils.hashing import checksum, checksum_s, md5, secure_hash
+from ansible.utils.path import makedirs_safe, is_subpath
+
+display = Display()
+
+
+class ActionModule(ActionBase):
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for fetch operations '''
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ try:
+ if self._play_context.check_mode:
+ raise AnsibleActionSkip('check mode not (yet) supported for this module')
+
+ source = self._task.args.get('src', None)
+ original_dest = dest = self._task.args.get('dest', None)
+ flat = boolean(self._task.args.get('flat'), strict=False)
+ fail_on_missing = boolean(self._task.args.get('fail_on_missing', True), strict=False)
+ validate_checksum = boolean(self._task.args.get('validate_checksum', True), strict=False)
+
+ msg = ''
+ # validate source and dest are strings FIXME: use basic.py and module specs
+ if not isinstance(source, string_types):
+ msg = "Invalid type supplied for source option, it must be a string"
+
+ if not isinstance(dest, string_types):
+ msg = "Invalid type supplied for dest option, it must be a string"
+
+ if source is None or dest is None:
+ msg = "src and dest are required"
+
+ if msg:
+ raise AnsibleActionFail(msg)
+
+ source = self._connection._shell.join_path(source)
+ source = self._remote_expand_user(source)
+
+ remote_checksum = None
+ if not self._connection.become:
+ # calculate checksum for the remote file, don't bother if using become as slurp will be used
+ # Force remote_checksum to follow symlinks because fetch always follows symlinks
+ remote_checksum = self._remote_checksum(source, all_vars=task_vars, follow=True)
+
+ # use slurp if permissions are lacking or privilege escalation is needed
+ remote_data = None
+ if remote_checksum in ('1', '2', None):
+ slurpres = self._execute_module(module_name='ansible.legacy.slurp', module_args=dict(src=source), task_vars=task_vars)
+ if slurpres.get('failed'):
+ if not fail_on_missing and (slurpres.get('msg').startswith('file not found') or remote_checksum == '1'):
+ result['msg'] = "the remote file does not exist, not transferring, ignored"
+ result['file'] = source
+ result['changed'] = False
+ else:
+ result.update(slurpres)
+ return result
+ else:
+ if slurpres['encoding'] == 'base64':
+ remote_data = base64.b64decode(slurpres['content'])
+ if remote_data is not None:
+ remote_checksum = checksum_s(remote_data)
+
+ # calculate the destination name
+ if os.path.sep not in self._connection._shell.join_path('a', ''):
+ source = self._connection._shell._unquote(source)
+ source_local = source.replace('\\', '/')
+ else:
+ source_local = source
+
+ # ensure we only use file name, avoid relative paths
+ if not is_subpath(dest, original_dest):
+ # TODO: ? dest = os.path.expanduser(dest.replace(('../','')))
+ raise AnsibleActionFail("Detected directory traversal, expected to be contained in '%s' but got '%s'" % (original_dest, dest))
+
+ if flat:
+ if os.path.isdir(to_bytes(dest, errors='surrogate_or_strict')) and not dest.endswith(os.sep):
+ raise AnsibleActionFail("dest is an existing directory, use a trailing slash if you want to fetch src into that directory")
+ if dest.endswith(os.sep):
+ # if the path ends with "/", we'll use the source filename as the
+ # destination filename
+ base = os.path.basename(source_local)
+ dest = os.path.join(dest, base)
+ if not dest.startswith("/"):
+ # if dest does not start with "/", we'll assume a relative path
+ dest = self._loader.path_dwim(dest)
+ else:
+ # files are saved in dest dir, with a subdir for each host, then the filename
+ if 'inventory_hostname' in task_vars:
+ target_name = task_vars['inventory_hostname']
+ else:
+ target_name = self._play_context.remote_addr
+ dest = "%s/%s/%s" % (self._loader.path_dwim(dest), target_name, source_local)
+
+ if remote_checksum in ('0', '1', '2', '3', '4', '5'):
+ result['changed'] = False
+ result['file'] = source
+ if remote_checksum == '0':
+ result['msg'] = "unable to calculate the checksum of the remote file"
+ elif remote_checksum == '1':
+ result['msg'] = "the remote file does not exist"
+ elif remote_checksum == '2':
+ result['msg'] = "no read permission on remote file"
+ elif remote_checksum == '3':
+ result['msg'] = "remote file is a directory, fetch cannot work on directories"
+ elif remote_checksum == '4':
+ result['msg'] = "python isn't present on the system. Unable to compute checksum"
+ elif remote_checksum == '5':
+ result['msg'] = "stdlib json was not found on the remote machine. Only the raw module can work without those installed"
+ # Historically, these don't fail because you may want to transfer
+ # a log file that possibly MAY exist but keep going to fetch other
+ # log files. Today, this is better achieved by adding
+ # ignore_errors or failed_when to the task. Control the behaviour
+ # via fail_when_missing
+ if fail_on_missing:
+ result['failed'] = True
+ del result['changed']
+ else:
+ result['msg'] += ", not transferring, ignored"
+ return result
+
+ dest = os.path.normpath(dest)
+
+ # calculate checksum for the local file
+ local_checksum = checksum(dest)
+
+ if remote_checksum != local_checksum:
+ # create the containing directories, if needed
+ makedirs_safe(os.path.dirname(dest))
+
+ # fetch the file and check for changes
+ if remote_data is None:
+ self._connection.fetch_file(source, dest)
+ else:
+ try:
+ f = open(to_bytes(dest, errors='surrogate_or_strict'), 'wb')
+ f.write(remote_data)
+ f.close()
+ except (IOError, OSError) as e:
+ raise AnsibleActionFail("Failed to fetch the file: %s" % e)
+ new_checksum = secure_hash(dest)
+ # For backwards compatibility. We'll return None on FIPS enabled systems
+ try:
+ new_md5 = md5(dest)
+ except ValueError:
+ new_md5 = None
+
+ if validate_checksum and new_checksum != remote_checksum:
+ result.update(dict(failed=True, md5sum=new_md5,
+ msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None,
+ checksum=new_checksum, remote_checksum=remote_checksum))
+ else:
+ result.update({'changed': True, 'md5sum': new_md5, 'dest': dest,
+ 'remote_md5sum': None, 'checksum': new_checksum,
+ 'remote_checksum': remote_checksum})
+ else:
+ # For backwards compatibility. We'll return None on FIPS enabled systems
+ try:
+ local_md5 = md5(dest)
+ except ValueError:
+ local_md5 = None
+ result.update(dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum))
+
+ finally:
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ return result
diff --git a/lib/ansible/plugins/action/gather_facts.py b/lib/ansible/plugins/action/gather_facts.py
new file mode 100644
index 00000000..eac63e17
--- /dev/null
+++ b/lib/ansible/plugins/action/gather_facts.py
@@ -0,0 +1,138 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import time
+
+from ansible import constants as C
+from ansible.executor.module_common import get_action_args_with_defaults
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.plugins.action import ActionBase
+from ansible.utils.vars import merge_hash
+
+
+class ActionModule(ActionBase):
+
+ def _get_module_args(self, fact_module, task_vars):
+
+ mod_args = self._task.args.copy()
+
+ # deal with 'setup specific arguments'
+ if fact_module not in C._ACTION_SETUP:
+ # network facts modules must support gather_subset
+ if self._connection._load_name not in ('network_cli', 'httpapi', 'netconf'):
+ subset = mod_args.pop('gather_subset', None)
+ if subset not in ('all', ['all']):
+ self._display.warning('Ignoring subset(%s) for %s' % (subset, fact_module))
+
+ timeout = mod_args.pop('gather_timeout', None)
+ if timeout is not None:
+ self._display.warning('Ignoring timeout(%s) for %s' % (timeout, fact_module))
+
+ fact_filter = mod_args.pop('filter', None)
+ if fact_filter is not None:
+ self._display.warning('Ignoring filter(%s) for %s' % (fact_filter, fact_module))
+
+ # Strip out keys with ``None`` values, effectively mimicking ``omit`` behavior
+ # This ensures we don't pass a ``None`` value as an argument expecting a specific type
+ mod_args = dict((k, v) for k, v in mod_args.items() if v is not None)
+
+ # handle module defaults
+ mod_args = get_action_args_with_defaults(fact_module, mod_args, self._task.module_defaults, self._templar, self._task._ansible_internal_redirect_list)
+
+ return mod_args
+
+ def _combine_task_result(self, result, task_result):
+ filtered_res = {
+ 'ansible_facts': task_result.get('ansible_facts', {}),
+ 'warnings': task_result.get('warnings', []),
+ 'deprecations': task_result.get('deprecations', []),
+ }
+
+ # on conflict the last plugin processed wins, but try to do deep merge and append to lists.
+ return merge_hash(result, filtered_res, list_merge='append_rp')
+
+ def run(self, tmp=None, task_vars=None):
+
+ self._supports_check_mode = True
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ result['ansible_facts'] = {}
+
+ modules = C.config.get_config_value('FACTS_MODULES', variables=task_vars)
+ parallel = task_vars.pop('ansible_facts_parallel', self._task.args.pop('parallel', None))
+ if 'smart' in modules:
+ connection_map = C.config.get_config_value('CONNECTION_FACTS_MODULES', variables=task_vars)
+ network_os = self._task.args.get('network_os', task_vars.get('ansible_network_os', task_vars.get('ansible_facts', {}).get('network_os')))
+ modules.extend([connection_map.get(network_os or self._connection._load_name, 'ansible.legacy.setup')])
+ modules.pop(modules.index('smart'))
+
+ failed = {}
+ skipped = {}
+
+ if parallel is None and len(modules) >= 1:
+ parallel = True
+ else:
+ parallel = boolean(parallel)
+
+ if parallel:
+ # serially execute each module
+ for fact_module in modules:
+ # just one module, no need for fancy async
+ mod_args = self._get_module_args(fact_module, task_vars)
+ res = self._execute_module(module_name=fact_module, module_args=mod_args, task_vars=task_vars, wrap_async=False)
+ if res.get('failed', False):
+ failed[fact_module] = res
+ elif res.get('skipped', False):
+ skipped[fact_module] = res
+ else:
+ result = self._combine_task_result(result, res)
+
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+ else:
+ # do it async
+ jobs = {}
+ for fact_module in modules:
+ mod_args = self._get_module_args(fact_module, task_vars)
+ self._display.vvvv("Running %s" % fact_module)
+ jobs[fact_module] = (self._execute_module(module_name=fact_module, module_args=mod_args, task_vars=task_vars, wrap_async=True))
+
+ while jobs:
+ for module in jobs:
+ poll_args = {'jid': jobs[module]['ansible_job_id'], '_async_dir': os.path.dirname(jobs[module]['results_file'])}
+ res = self._execute_module(module_name='ansible.legacy.async_status', module_args=poll_args, task_vars=task_vars, wrap_async=False)
+ if res.get('finished', 0) == 1:
+ if res.get('failed', False):
+ failed[module] = res
+ elif res.get('skipped', False):
+ skipped[module] = res
+ else:
+ result = self._combine_task_result(result, res)
+ del jobs[module]
+ break
+ else:
+ time.sleep(0.1)
+ else:
+ time.sleep(0.5)
+
+ if skipped:
+ result['msg'] = "The following modules were skipped: %s\n" % (', '.join(skipped.keys()))
+ result['skipped_modules'] = skipped
+ if len(skipped) == len(modules):
+ result['skipped'] = True
+
+ if failed:
+ result['failed'] = True
+ result['msg'] = "The following modules failed to execute: %s\n" % (', '.join(failed.keys()))
+ result['failed_modules'] = failed
+
+ # tell executor facts were gathered
+ result['ansible_facts']['_ansible_facts_gathered'] = True
+
+ # hack to keep --verbose from showing all the setup module result
+ result['_ansible_verbose_override'] = True
+
+ return result
diff --git a/lib/ansible/plugins/action/group_by.py b/lib/ansible/plugins/action/group_by.py
new file mode 100644
index 00000000..0958ad80
--- /dev/null
+++ b/lib/ansible/plugins/action/group_by.py
@@ -0,0 +1,51 @@
+# Copyright 2012, Jeroen Hoekx <jeroen@hoekx.be>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.action import ActionBase
+from ansible.module_utils.six import string_types
+
+
+class ActionModule(ActionBase):
+ ''' Create inventory groups based on variables '''
+
+ # We need to be able to modify the inventory
+ TRANSFERS_FILES = False
+ _VALID_ARGS = frozenset(('key', 'parents'))
+
+ def run(self, tmp=None, task_vars=None):
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ if 'key' not in self._task.args:
+ result['failed'] = True
+ result['msg'] = "the 'key' param is required when using group_by"
+ return result
+
+ group_name = self._task.args.get('key')
+ parent_groups = self._task.args.get('parents', ['all'])
+ if isinstance(parent_groups, string_types):
+ parent_groups = [parent_groups]
+
+ result['changed'] = False
+ result['add_group'] = group_name.replace(' ', '-')
+ result['parent_groups'] = [name.replace(' ', '-') for name in parent_groups]
+ return result
diff --git a/lib/ansible/plugins/action/include_vars.py b/lib/ansible/plugins/action/include_vars.py
new file mode 100644
index 00000000..07234537
--- /dev/null
+++ b/lib/ansible/plugins/action/include_vars.py
@@ -0,0 +1,278 @@
+# Copyright: (c) 2016, Allen Sanabria <asanabria@linuxdynasty.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from os import path, walk
+import re
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_native, to_text
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = False
+
+ VALID_FILE_EXTENSIONS = ['yaml', 'yml', 'json']
+ VALID_DIR_ARGUMENTS = ['dir', 'depth', 'files_matching', 'ignore_files', 'extensions', 'ignore_unknown_extensions']
+ VALID_FILE_ARGUMENTS = ['file', '_raw_params']
+ VALID_ALL = ['name']
+
+ def _set_dir_defaults(self):
+ if not self.depth:
+ self.depth = 0
+
+ if self.files_matching:
+ self.matcher = re.compile(r'{0}'.format(self.files_matching))
+ else:
+ self.matcher = None
+
+ if not self.ignore_files:
+ self.ignore_files = list()
+
+ if isinstance(self.ignore_files, string_types):
+ self.ignore_files = self.ignore_files.split()
+
+ elif isinstance(self.ignore_files, dict):
+ return {
+ 'failed': True,
+ 'message': '{0} must be a list'.format(self.ignore_files)
+ }
+
+ def _set_args(self):
+ """ Set instance variables based on the arguments that were passed """
+
+ self.return_results_as_name = self._task.args.get('name', None)
+ self.source_dir = self._task.args.get('dir', None)
+ self.source_file = self._task.args.get('file', None)
+ if not self.source_dir and not self.source_file:
+ self.source_file = self._task.args.get('_raw_params')
+ if self.source_file:
+ self.source_file = self.source_file.rstrip('\n')
+
+ self.depth = self._task.args.get('depth', None)
+ self.files_matching = self._task.args.get('files_matching', None)
+ self.ignore_unknown_extensions = self._task.args.get('ignore_unknown_extensions', False)
+ self.ignore_files = self._task.args.get('ignore_files', None)
+ self.valid_extensions = self._task.args.get('extensions', self.VALID_FILE_EXTENSIONS)
+
+ # convert/validate extensions list
+ if isinstance(self.valid_extensions, string_types):
+ self.valid_extensions = list(self.valid_extensions)
+ if not isinstance(self.valid_extensions, list):
+ raise AnsibleError('Invalid type for "extensions" option, it must be a list')
+
+ def run(self, tmp=None, task_vars=None):
+ """ Load yml files recursively from a directory.
+ """
+ del tmp # tmp no longer has any effect
+
+ if task_vars is None:
+ task_vars = dict()
+
+ self.show_content = True
+ self.included_files = []
+
+ # Validate arguments
+ dirs = 0
+ files = 0
+ for arg in self._task.args:
+ if arg in self.VALID_DIR_ARGUMENTS:
+ dirs += 1
+ elif arg in self.VALID_FILE_ARGUMENTS:
+ files += 1
+ elif arg in self.VALID_ALL:
+ pass
+ else:
+ raise AnsibleError('{0} is not a valid option in include_vars'.format(to_native(arg)))
+
+ if dirs and files:
+ raise AnsibleError("You are mixing file only and dir only arguments, these are incompatible")
+
+ # set internal vars from args
+ self._set_args()
+
+ results = dict()
+ if self.source_dir:
+ self._set_dir_defaults()
+ self._set_root_dir()
+ if not path.exists(self.source_dir):
+ failed = True
+ err_msg = ('{0} directory does not exist'.format(to_native(self.source_dir)))
+ elif not path.isdir(self.source_dir):
+ failed = True
+ err_msg = ('{0} is not a directory'.format(to_native(self.source_dir)))
+ else:
+ for root_dir, filenames in self._traverse_dir_depth():
+ failed, err_msg, updated_results = (self._load_files_in_dir(root_dir, filenames))
+ if failed:
+ break
+ results.update(updated_results)
+ else:
+ try:
+ self.source_file = self._find_needle('vars', self.source_file)
+ failed, err_msg, updated_results = (
+ self._load_files(self.source_file)
+ )
+ if not failed:
+ results.update(updated_results)
+
+ except AnsibleError as e:
+ failed = True
+ err_msg = to_native(e)
+
+ if self.return_results_as_name:
+ scope = dict()
+ scope[self.return_results_as_name] = results
+ results = scope
+
+ result = super(ActionModule, self).run(task_vars=task_vars)
+
+ if failed:
+ result['failed'] = failed
+ result['message'] = err_msg
+
+ result['ansible_included_var_files'] = self.included_files
+ result['ansible_facts'] = results
+ result['_ansible_no_log'] = not self.show_content
+
+ return result
+
+ def _set_root_dir(self):
+ if self._task._role:
+ if self.source_dir.split('/')[0] == 'vars':
+ path_to_use = (
+ path.join(self._task._role._role_path, self.source_dir)
+ )
+ if path.exists(path_to_use):
+ self.source_dir = path_to_use
+ else:
+ path_to_use = (
+ path.join(
+ self._task._role._role_path, 'vars', self.source_dir
+ )
+ )
+ self.source_dir = path_to_use
+ else:
+ if hasattr(self._task._ds, '_data_source'):
+ current_dir = (
+ "/".join(self._task._ds._data_source.split('/')[:-1])
+ )
+ self.source_dir = path.join(current_dir, self.source_dir)
+
+ def _traverse_dir_depth(self):
+ """ Recursively iterate over a directory and sort the files in
+ alphabetical order. Do not iterate pass the set depth.
+ The default depth is unlimited.
+ """
+ current_depth = 0
+ sorted_walk = list(walk(self.source_dir))
+ sorted_walk.sort(key=lambda x: x[0])
+ for current_root, current_dir, current_files in sorted_walk:
+ current_depth += 1
+ if current_depth <= self.depth or self.depth == 0:
+ current_files.sort()
+ yield (current_root, current_files)
+ else:
+ break
+
+ def _ignore_file(self, filename):
+ """ Return True if a file matches the list of ignore_files.
+ Args:
+ filename (str): The filename that is being matched against.
+
+ Returns:
+ Boolean
+ """
+ for file_type in self.ignore_files:
+ try:
+ if re.search(r'{0}$'.format(file_type), filename):
+ return True
+ except Exception:
+ err_msg = 'Invalid regular expression: {0}'.format(file_type)
+ raise AnsibleError(err_msg)
+ return False
+
+ def _is_valid_file_ext(self, source_file):
+ """ Verify if source file has a valid extension
+ Args:
+ source_file (str): The full path of source file or source file.
+ Returns:
+ Bool
+ """
+ file_ext = path.splitext(source_file)
+ return bool(len(file_ext) > 1 and file_ext[-1][1:] in self.valid_extensions)
+
+ def _load_files(self, filename, validate_extensions=False):
+ """ Loads a file and converts the output into a valid Python dict.
+ Args:
+ filename (str): The source file.
+
+ Returns:
+ Tuple (bool, str, dict)
+ """
+ results = dict()
+ failed = False
+ err_msg = ''
+ if validate_extensions and not self._is_valid_file_ext(filename):
+ failed = True
+ err_msg = ('{0} does not have a valid extension: {1}'.format(to_native(filename), ', '.join(self.valid_extensions)))
+ else:
+ b_data, show_content = self._loader._get_file_contents(filename)
+ data = to_text(b_data, errors='surrogate_or_strict')
+
+ self.show_content = show_content
+ data = self._loader.load(data, file_name=filename, show_content=show_content)
+ if not data:
+ data = dict()
+ if not isinstance(data, dict):
+ failed = True
+ err_msg = ('{0} must be stored as a dictionary/hash'.format(to_native(filename)))
+ else:
+ self.included_files.append(filename)
+ results.update(data)
+
+ return failed, err_msg, results
+
+ def _load_files_in_dir(self, root_dir, var_files):
+ """ Load the found yml files and update/overwrite the dictionary.
+ Args:
+ root_dir (str): The base directory of the list of files that is being passed.
+ var_files: (list): List of files to iterate over and load into a dictionary.
+
+ Returns:
+ Tuple (bool, str, dict)
+ """
+ results = dict()
+ failed = False
+ err_msg = ''
+ for filename in var_files:
+ stop_iter = False
+ # Never include main.yml from a role, as that is the default included by the role
+ if self._task._role:
+ if path.join(self._task._role._role_path, filename) == path.join(root_dir, 'vars', 'main.yml'):
+ stop_iter = True
+ continue
+
+ filepath = path.join(root_dir, filename)
+ if self.files_matching:
+ if not self.matcher.search(filename):
+ stop_iter = True
+
+ if not stop_iter and not failed:
+ if self.ignore_unknown_extensions:
+ if path.exists(filepath) and not self._ignore_file(filename) and self._is_valid_file_ext(filename):
+ failed, err_msg, loaded_data = self._load_files(filepath, validate_extensions=True)
+ if not failed:
+ results.update(loaded_data)
+ else:
+ if path.exists(filepath) and not self._ignore_file(filename):
+ failed, err_msg, loaded_data = self._load_files(filepath, validate_extensions=True)
+ if not failed:
+ results.update(loaded_data)
+
+ return failed, err_msg, results
diff --git a/lib/ansible/plugins/action/normal.py b/lib/ansible/plugins/action/normal.py
new file mode 100644
index 00000000..cb91521a
--- /dev/null
+++ b/lib/ansible/plugins/action/normal.py
@@ -0,0 +1,59 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible import constants as C
+from ansible.plugins.action import ActionBase
+from ansible.utils.vars import merge_hash
+
+
+class ActionModule(ActionBase):
+
+ def run(self, tmp=None, task_vars=None):
+
+ # individual modules might disagree but as the generic the action plugin, pass at this point.
+ self._supports_check_mode = True
+ self._supports_async = True
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ if not result.get('skipped'):
+
+ if result.get('invocation', {}).get('module_args'):
+ # avoid passing to modules in case of no_log
+ # should not be set anymore but here for backwards compatibility
+ del result['invocation']['module_args']
+
+ # FUTURE: better to let _execute_module calculate this internally?
+ wrap_async = self._task.async_val and not self._connection.has_native_async
+
+ # do work!
+ result = merge_hash(result, self._execute_module(task_vars=task_vars, wrap_async=wrap_async))
+
+ # hack to keep --verbose from showing all the setup module result
+ # moved from setup module as now we filter out all _ansible_ from result
+ # FIXME: is this still accurate with gather_facts etc, or does it need support for FQ and other names?
+ if self._task.action in C._ACTION_SETUP:
+ result['_ansible_verbose_override'] = True
+
+ if not wrap_async:
+ # remove a temporary path we created
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ return result
diff --git a/lib/ansible/plugins/action/package.py b/lib/ansible/plugins/action/package.py
new file mode 100644
index 00000000..c1638658
--- /dev/null
+++ b/lib/ansible/plugins/action/package.py
@@ -0,0 +1,94 @@
+# (c) 2015, Ansible Inc,
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleAction, AnsibleActionFail
+from ansible.executor.module_common import get_action_args_with_defaults
+from ansible.plugins.action import ActionBase
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = False
+
+ BUILTIN_PKG_MGR_MODULES = set(['apk', 'apt', 'dnf', 'homebrew', 'installp', 'macports', 'opkg', 'portage', 'pacman',
+ 'pkg5', 'pkgin', 'pkgng', 'sorcery', 'svr4pkg', 'swdepot', 'swupd', 'urpmi', 'xbps', 'yum', 'zypper'])
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for package operations '''
+
+ self._supports_check_mode = True
+ self._supports_async = True
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ module = self._task.args.get('use', 'auto')
+
+ if module == 'auto':
+ try:
+ if self._task.delegate_to: # if we delegate, we should use delegated host's facts
+ module = self._templar.template("{{hostvars['%s']['ansible_facts']['pkg_mgr']}}" % self._task.delegate_to)
+ else:
+ module = self._templar.template('{{ansible_facts.pkg_mgr}}')
+ except Exception:
+ pass # could not get it from template!
+
+ try:
+ if module == 'auto':
+ facts = self._execute_module(
+ module_name='ansible.legacy.setup',
+ module_args=dict(filter='ansible_pkg_mgr', gather_subset='!all'),
+ task_vars=task_vars)
+ display.debug("Facts %s" % facts)
+ module = facts.get('ansible_facts', {}).get('ansible_pkg_mgr', 'auto')
+
+ if module != 'auto':
+ if not self._shared_loader_obj.module_loader.has_plugin(module):
+ raise AnsibleActionFail('Could not find a module for %s.' % module)
+ else:
+ # run the 'package' module
+ new_module_args = self._task.args.copy()
+ if 'use' in new_module_args:
+ del new_module_args['use']
+
+ # get defaults for specific module
+ new_module_args = get_action_args_with_defaults(
+ module, new_module_args, self._task.module_defaults, self._templar, self._task._ansible_internal_redirect_list
+ )
+
+ if module in self.BUILTIN_PKG_MGR_MODULES:
+ # prefix with ansible.legacy to eliminate external collisions while still allowing library/ override
+ module = 'ansible.legacy.' + module
+
+ display.vvvv("Running %s" % module)
+ result.update(self._execute_module(module_name=module, module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val))
+ else:
+ raise AnsibleActionFail('Could not detect which package manager to use. Try gathering facts or setting the "use" option.')
+
+ except AnsibleAction as e:
+ result.update(e.result)
+ finally:
+ if not self._task.async_val:
+ # remove a temporary path we created
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ return result
diff --git a/lib/ansible/plugins/action/pause.py b/lib/ansible/plugins/action/pause.py
new file mode 100644
index 00000000..5dbaa020
--- /dev/null
+++ b/lib/ansible/plugins/action/pause.py
@@ -0,0 +1,301 @@
+# Copyright 2012, Tim Bielawa <tbielawa@redhat.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import datetime
+import signal
+import sys
+import termios
+import time
+import tty
+
+from os import (
+ getpgrp,
+ isatty,
+ tcgetpgrp,
+)
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_text, to_native
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils.six import PY3
+from ansible.plugins.action import ActionBase
+from ansible.utils.display import Display
+
+display = Display()
+
+try:
+ import curses
+
+ # Nest the try except since curses.error is not available if curses did not import
+ try:
+ curses.setupterm()
+ HAS_CURSES = True
+ except (curses.error, TypeError):
+ HAS_CURSES = False
+except ImportError:
+ HAS_CURSES = False
+
+if HAS_CURSES:
+ MOVE_TO_BOL = curses.tigetstr('cr')
+ CLEAR_TO_EOL = curses.tigetstr('el')
+else:
+ MOVE_TO_BOL = b'\r'
+ CLEAR_TO_EOL = b'\x1b[K'
+
+
+class AnsibleTimeoutExceeded(Exception):
+ pass
+
+
+def timeout_handler(signum, frame):
+ raise AnsibleTimeoutExceeded
+
+
+def clear_line(stdout):
+ stdout.write(b'\x1b[%s' % MOVE_TO_BOL)
+ stdout.write(b'\x1b[%s' % CLEAR_TO_EOL)
+
+
+def is_interactive(fd=None):
+ if fd is None:
+ return False
+
+ if isatty(fd):
+ # Compare the current process group to the process group associated
+ # with terminal of the given file descriptor to determine if the process
+ # is running in the background.
+ return getpgrp() == tcgetpgrp(fd)
+ else:
+ return False
+
+
+class ActionModule(ActionBase):
+ ''' pauses execution for a length or time, or until input is received '''
+
+ BYPASS_HOST_LOOP = True
+ _VALID_ARGS = frozenset(('echo', 'minutes', 'prompt', 'seconds'))
+
+ def run(self, tmp=None, task_vars=None):
+ ''' run the pause action module '''
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ duration_unit = 'minutes'
+ prompt = None
+ seconds = None
+ echo = True
+ echo_prompt = ''
+ result.update(dict(
+ changed=False,
+ rc=0,
+ stderr='',
+ stdout='',
+ start=None,
+ stop=None,
+ delta=None,
+ echo=echo
+ ))
+
+ # Should keystrokes be echoed to stdout?
+ if 'echo' in self._task.args:
+ try:
+ echo = boolean(self._task.args['echo'])
+ except TypeError as e:
+ result['failed'] = True
+ result['msg'] = to_native(e)
+ return result
+
+ # Add a note saying the output is hidden if echo is disabled
+ if not echo:
+ echo_prompt = ' (output is hidden)'
+
+ # Is 'prompt' a key in 'args'?
+ if 'prompt' in self._task.args:
+ prompt = "[%s]\n%s%s:" % (self._task.get_name().strip(), self._task.args['prompt'], echo_prompt)
+ else:
+ # If no custom prompt is specified, set a default prompt
+ prompt = "[%s]\n%s%s:" % (self._task.get_name().strip(), 'Press enter to continue, Ctrl+C to interrupt', echo_prompt)
+
+ # Are 'minutes' or 'seconds' keys that exist in 'args'?
+ if 'minutes' in self._task.args or 'seconds' in self._task.args:
+ try:
+ if 'minutes' in self._task.args:
+ # The time() command operates in seconds so we need to
+ # recalculate for minutes=X values.
+ seconds = int(self._task.args['minutes']) * 60
+ else:
+ seconds = int(self._task.args['seconds'])
+ duration_unit = 'seconds'
+
+ except ValueError as e:
+ result['failed'] = True
+ result['msg'] = u"non-integer value given for prompt duration:\n%s" % to_text(e)
+ return result
+
+ ########################################################################
+ # Begin the hard work!
+
+ start = time.time()
+ result['start'] = to_text(datetime.datetime.now())
+ result['user_input'] = b''
+
+ stdin_fd = None
+ old_settings = None
+ try:
+ if seconds is not None:
+ if seconds < 1:
+ seconds = 1
+
+ # setup the alarm handler
+ signal.signal(signal.SIGALRM, timeout_handler)
+ signal.alarm(seconds)
+
+ # show the timer and control prompts
+ display.display("Pausing for %d seconds%s" % (seconds, echo_prompt))
+ display.display("(ctrl+C then 'C' = continue early, ctrl+C then 'A' = abort)\r"),
+
+ # show the prompt specified in the task
+ if 'prompt' in self._task.args:
+ display.display(prompt)
+
+ else:
+ display.display(prompt)
+
+ # save the attributes on the existing (duped) stdin so
+ # that we can restore them later after we set raw mode
+ stdin_fd = None
+ stdout_fd = None
+ try:
+ if PY3:
+ stdin = self._connection._new_stdin.buffer
+ stdout = sys.stdout.buffer
+ else:
+ stdin = self._connection._new_stdin
+ stdout = sys.stdout
+ stdin_fd = stdin.fileno()
+ stdout_fd = stdout.fileno()
+ except (ValueError, AttributeError):
+ # ValueError: someone is using a closed file descriptor as stdin
+ # AttributeError: someone is using a null file descriptor as stdin on windoze
+ stdin = None
+ interactive = is_interactive(stdin_fd)
+ if interactive:
+ # grab actual Ctrl+C sequence
+ try:
+ intr = termios.tcgetattr(stdin_fd)[6][termios.VINTR]
+ except Exception:
+ # unsupported/not present, use default
+ intr = b'\x03' # value for Ctrl+C
+
+ # get backspace sequences
+ try:
+ backspace = termios.tcgetattr(stdin_fd)[6][termios.VERASE]
+ except Exception:
+ backspace = [b'\x7f', b'\x08']
+
+ old_settings = termios.tcgetattr(stdin_fd)
+ tty.setraw(stdin_fd)
+
+ # Only set stdout to raw mode if it is a TTY. This is needed when redirecting
+ # stdout to a file since a file cannot be set to raw mode.
+ if isatty(stdout_fd):
+ tty.setraw(stdout_fd)
+
+ # Only echo input if no timeout is specified
+ if not seconds and echo:
+ new_settings = termios.tcgetattr(stdin_fd)
+ new_settings[3] = new_settings[3] | termios.ECHO
+ termios.tcsetattr(stdin_fd, termios.TCSANOW, new_settings)
+
+ # flush the buffer to make sure no previous key presses
+ # are read in below
+ termios.tcflush(stdin, termios.TCIFLUSH)
+
+ while True:
+ if not interactive:
+ display.warning("Not waiting for response to prompt as stdin is not interactive")
+ if seconds is not None:
+ # Give the signal handler enough time to timeout
+ time.sleep(seconds + 1)
+ break
+
+ try:
+ key_pressed = stdin.read(1)
+
+ if key_pressed == intr: # value for Ctrl+C
+ clear_line(stdout)
+ raise KeyboardInterrupt
+
+ # read key presses and act accordingly
+ if key_pressed in (b'\r', b'\n'):
+ clear_line(stdout)
+ break
+ elif key_pressed in backspace:
+ # delete a character if backspace is pressed
+ result['user_input'] = result['user_input'][:-1]
+ clear_line(stdout)
+ if echo:
+ stdout.write(result['user_input'])
+ stdout.flush()
+ else:
+ result['user_input'] += key_pressed
+
+ except KeyboardInterrupt:
+ signal.alarm(0)
+ display.display("Press 'C' to continue the play or 'A' to abort \r"),
+ if self._c_or_a(stdin):
+ clear_line(stdout)
+ break
+
+ clear_line(stdout)
+
+ raise AnsibleError('user requested abort!')
+
+ except AnsibleTimeoutExceeded:
+ # this is the exception we expect when the alarm signal
+ # fires, so we simply ignore it to move into the cleanup
+ pass
+ finally:
+ # cleanup and save some information
+ # restore the old settings for the duped stdin stdin_fd
+ if not(None in (stdin_fd, old_settings)) and isatty(stdin_fd):
+ termios.tcsetattr(stdin_fd, termios.TCSADRAIN, old_settings)
+
+ duration = time.time() - start
+ result['stop'] = to_text(datetime.datetime.now())
+ result['delta'] = int(duration)
+
+ if duration_unit == 'minutes':
+ duration = round(duration / 60.0, 2)
+ else:
+ duration = round(duration, 2)
+ result['stdout'] = "Paused for %s %s" % (duration, duration_unit)
+
+ result['user_input'] = to_text(result['user_input'], errors='surrogate_or_strict')
+ return result
+
+ def _c_or_a(self, stdin):
+ while True:
+ key_pressed = stdin.read(1)
+ if key_pressed.lower() == b'a':
+ return False
+ elif key_pressed.lower() == b'c':
+ return True
diff --git a/lib/ansible/plugins/action/raw.py b/lib/ansible/plugins/action/raw.py
new file mode 100644
index 00000000..b82ed340
--- /dev/null
+++ b/lib/ansible/plugins/action/raw.py
@@ -0,0 +1,50 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+ TRANSFERS_FILES = False
+
+ def run(self, tmp=None, task_vars=None):
+ if task_vars is None:
+ task_vars = dict()
+
+ if self._task.environment and any(self._task.environment):
+ self._display.warning('raw module does not support the environment keyword')
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ if self._play_context.check_mode:
+ # in --check mode, always skip this module execution
+ result['skipped'] = True
+ return result
+
+ executable = self._task.args.get('executable', False)
+ result.update(self._low_level_execute_command(self._task.args.get('_raw_params'), executable=executable))
+
+ result['changed'] = True
+
+ if 'rc' in result and result['rc'] != 0:
+ result['failed'] = True
+ result['msg'] = 'non-zero return code'
+
+ return result
diff --git a/lib/ansible/plugins/action/reboot.py b/lib/ansible/plugins/action/reboot.py
new file mode 100644
index 00000000..d898a1ae
--- /dev/null
+++ b/lib/ansible/plugins/action/reboot.py
@@ -0,0 +1,446 @@
+# Copyright: (c) 2016-2018, Matt Davis <mdavis@ansible.com>
+# Copyright: (c) 2018, Sam Doran <sdoran@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import random
+import time
+
+from datetime import datetime, timedelta
+
+from ansible.errors import AnsibleError, AnsibleConnectionFailure
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.common.collections import is_string
+from ansible.module_utils.common.validation import check_type_str
+from ansible.plugins.action import ActionBase
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class TimedOutException(Exception):
+ pass
+
+
+class ActionModule(ActionBase):
+ TRANSFERS_FILES = False
+ _VALID_ARGS = frozenset((
+ 'boot_time_command',
+ 'connect_timeout',
+ 'msg',
+ 'post_reboot_delay',
+ 'pre_reboot_delay',
+ 'test_command',
+ 'reboot_timeout',
+ 'search_paths'
+ ))
+
+ DEFAULT_REBOOT_TIMEOUT = 600
+ DEFAULT_CONNECT_TIMEOUT = None
+ DEFAULT_PRE_REBOOT_DELAY = 0
+ DEFAULT_POST_REBOOT_DELAY = 0
+ DEFAULT_TEST_COMMAND = 'whoami'
+ DEFAULT_BOOT_TIME_COMMAND = 'cat /proc/sys/kernel/random/boot_id'
+ DEFAULT_REBOOT_MESSAGE = 'Reboot initiated by Ansible'
+ DEFAULT_SHUTDOWN_COMMAND = 'shutdown'
+ DEFAULT_SHUTDOWN_COMMAND_ARGS = '-r {delay_min} "{message}"'
+ DEFAULT_SUDOABLE = True
+
+ DEPRECATED_ARGS = {}
+
+ BOOT_TIME_COMMANDS = {
+ 'freebsd': '/sbin/sysctl kern.boottime',
+ 'openbsd': '/sbin/sysctl kern.boottime',
+ 'macosx': 'who -b',
+ 'solaris': 'who -b',
+ 'sunos': 'who -b',
+ 'vmkernel': 'grep booted /var/log/vmksummary.log | tail -n 1',
+ 'aix': 'who -b',
+ }
+
+ SHUTDOWN_COMMANDS = {
+ 'alpine': 'reboot',
+ 'vmkernel': 'reboot',
+ }
+
+ SHUTDOWN_COMMAND_ARGS = {
+ 'alpine': '',
+ 'void': '-r +{delay_min} "{message}"',
+ 'freebsd': '-r +{delay_sec}s "{message}"',
+ 'linux': DEFAULT_SHUTDOWN_COMMAND_ARGS,
+ 'macosx': '-r +{delay_min} "{message}"',
+ 'openbsd': '-r +{delay_min} "{message}"',
+ 'solaris': '-y -g {delay_sec} -i 6 "{message}"',
+ 'sunos': '-y -g {delay_sec} -i 6 "{message}"',
+ 'vmkernel': '-d {delay_sec}',
+ 'aix': '-Fr',
+ }
+
+ TEST_COMMANDS = {
+ 'solaris': 'who',
+ 'vmkernel': 'who',
+ }
+
+ def __init__(self, *args, **kwargs):
+ super(ActionModule, self).__init__(*args, **kwargs)
+
+ @property
+ def pre_reboot_delay(self):
+ return self._check_delay('pre_reboot_delay', self.DEFAULT_PRE_REBOOT_DELAY)
+
+ @property
+ def post_reboot_delay(self):
+ return self._check_delay('post_reboot_delay', self.DEFAULT_POST_REBOOT_DELAY)
+
+ def _check_delay(self, key, default):
+ """Ensure that the value is positive or zero"""
+ value = int(self._task.args.get(key, self._task.args.get(key + '_sec', default)))
+ if value < 0:
+ value = 0
+ return value
+
+ def _get_value_from_facts(self, variable_name, distribution, default_value):
+ """Get dist+version specific args first, then distribution, then family, lastly use default"""
+ attr = getattr(self, variable_name)
+ value = attr.get(
+ distribution['name'] + distribution['version'],
+ attr.get(
+ distribution['name'],
+ attr.get(
+ distribution['family'],
+ getattr(self, default_value))))
+ return value
+
+ def get_shutdown_command_args(self, distribution):
+ args = self._get_value_from_facts('SHUTDOWN_COMMAND_ARGS', distribution, 'DEFAULT_SHUTDOWN_COMMAND_ARGS')
+ # Convert seconds to minutes. If less that 60, set it to 0.
+ delay_min = self.pre_reboot_delay // 60
+ reboot_message = self._task.args.get('msg', self.DEFAULT_REBOOT_MESSAGE)
+ return args.format(delay_sec=self.pre_reboot_delay, delay_min=delay_min, message=reboot_message)
+
+ def get_distribution(self, task_vars):
+ # FIXME: only execute the module if we don't already have the facts we need
+ distribution = {}
+ display.debug('{action}: running setup module to get distribution'.format(action=self._task.action))
+ module_output = self._execute_module(
+ task_vars=task_vars,
+ module_name='ansible.legacy.setup',
+ module_args={'gather_subset': 'min'})
+ try:
+ if module_output.get('failed', False):
+ raise AnsibleError('Failed to determine system distribution. {0}, {1}'.format(
+ to_native(module_output['module_stdout']).strip(),
+ to_native(module_output['module_stderr']).strip()))
+ distribution['name'] = module_output['ansible_facts']['ansible_distribution'].lower()
+ distribution['version'] = to_text(module_output['ansible_facts']['ansible_distribution_version'].split('.')[0])
+ distribution['family'] = to_text(module_output['ansible_facts']['ansible_os_family'].lower())
+ display.debug("{action}: distribution: {dist}".format(action=self._task.action, dist=distribution))
+ return distribution
+ except KeyError as ke:
+ raise AnsibleError('Failed to get distribution information. Missing "{0}" in output.'.format(ke.args[0]))
+
+ def get_shutdown_command(self, task_vars, distribution):
+ shutdown_bin = self._get_value_from_facts('SHUTDOWN_COMMANDS', distribution, 'DEFAULT_SHUTDOWN_COMMAND')
+ default_search_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
+ search_paths = self._task.args.get('search_paths', default_search_paths)
+
+ # FIXME: switch all this to user arg spec validation methods when they are available
+ # Convert bare strings to a list
+ if is_string(search_paths):
+ search_paths = [search_paths]
+
+ # Error if we didn't get a list
+ err_msg = "'search_paths' must be a string or flat list of strings, got {0}"
+ try:
+ incorrect_type = any(not is_string(x) for x in search_paths)
+ if not isinstance(search_paths, list) or incorrect_type:
+ raise TypeError
+ except TypeError:
+ raise AnsibleError(err_msg.format(search_paths))
+
+ display.debug('{action}: running find module looking in {paths} to get path for "{command}"'.format(
+ action=self._task.action,
+ command=shutdown_bin,
+ paths=search_paths))
+ find_result = self._execute_module(
+ task_vars=task_vars,
+ # prevent collection search by calling with ansible.legacy (still allows library/ override of find)
+ module_name='ansible.legacy.find',
+ module_args={
+ 'paths': search_paths,
+ 'patterns': [shutdown_bin],
+ 'file_type': 'any'
+ }
+ )
+
+ full_path = [x['path'] for x in find_result['files']]
+ if not full_path:
+ raise AnsibleError('Unable to find command "{0}" in search paths: {1}'.format(shutdown_bin, search_paths))
+ self._shutdown_command = full_path[0]
+ return self._shutdown_command
+
+ def deprecated_args(self):
+ for arg, version in self.DEPRECATED_ARGS.items():
+ if self._task.args.get(arg) is not None:
+ display.warning("Since Ansible {version}, {arg} is no longer a valid option for {action}".format(
+ version=version,
+ arg=arg,
+ action=self._task.action))
+
+ def get_system_boot_time(self, distribution):
+ boot_time_command = self._get_value_from_facts('BOOT_TIME_COMMANDS', distribution, 'DEFAULT_BOOT_TIME_COMMAND')
+ if self._task.args.get('boot_time_command'):
+ boot_time_command = self._task.args.get('boot_time_command')
+
+ try:
+ check_type_str(boot_time_command, allow_conversion=False)
+ except TypeError as e:
+ raise AnsibleError("Invalid value given for 'boot_time_command': %s." % to_native(e))
+
+ display.debug("{action}: getting boot time with command: '{command}'".format(action=self._task.action, command=boot_time_command))
+ command_result = self._low_level_execute_command(boot_time_command, sudoable=self.DEFAULT_SUDOABLE)
+
+ if command_result['rc'] != 0:
+ stdout = command_result['stdout']
+ stderr = command_result['stderr']
+ raise AnsibleError("{action}: failed to get host boot time info, rc: {rc}, stdout: {out}, stderr: {err}".format(
+ action=self._task.action,
+ rc=command_result['rc'],
+ out=to_native(stdout),
+ err=to_native(stderr)))
+ display.debug("{action}: last boot time: {boot}".format(action=self._task.action, boot=command_result['stdout'].strip()))
+ return command_result['stdout'].strip()
+
+ def check_boot_time(self, distribution, previous_boot_time):
+ display.vvv("{action}: attempting to get system boot time".format(action=self._task.action))
+ connect_timeout = self._task.args.get('connect_timeout', self._task.args.get('connect_timeout_sec', self.DEFAULT_CONNECT_TIMEOUT))
+
+ # override connection timeout from defaults to custom value
+ if connect_timeout:
+ try:
+ display.debug("{action}: setting connect_timeout to {value}".format(action=self._task.action, value=connect_timeout))
+ self._connection.set_option("connection_timeout", connect_timeout)
+ self._connection.reset()
+ except AttributeError:
+ display.warning("Connection plugin does not allow the connection timeout to be overridden")
+
+ # try and get boot time
+ try:
+ current_boot_time = self.get_system_boot_time(distribution)
+ except Exception as e:
+ raise e
+
+ # FreeBSD returns an empty string immediately before reboot so adding a length
+ # check to prevent prematurely assuming system has rebooted
+ if len(current_boot_time) == 0 or current_boot_time == previous_boot_time:
+ raise ValueError("boot time has not changed")
+
+ def run_test_command(self, distribution, **kwargs):
+ test_command = self._task.args.get('test_command', self._get_value_from_facts('TEST_COMMANDS', distribution, 'DEFAULT_TEST_COMMAND'))
+ display.vvv("{action}: attempting post-reboot test command".format(action=self._task.action))
+ display.debug("{action}: attempting post-reboot test command '{command}'".format(action=self._task.action, command=test_command))
+ try:
+ command_result = self._low_level_execute_command(test_command, sudoable=self.DEFAULT_SUDOABLE)
+ except Exception:
+ # may need to reset the connection in case another reboot occurred
+ # which has invalidated our connection
+ try:
+ self._connection.reset()
+ except AttributeError:
+ pass
+ raise
+
+ if command_result['rc'] != 0:
+ msg = 'Test command failed: {err} {out}'.format(
+ err=to_native(command_result['stderr']),
+ out=to_native(command_result['stdout']))
+ raise RuntimeError(msg)
+
+ display.vvv("{action}: system successfully rebooted".format(action=self._task.action))
+
+ def do_until_success_or_timeout(self, action, reboot_timeout, action_desc, distribution, action_kwargs=None):
+ max_end_time = datetime.utcnow() + timedelta(seconds=reboot_timeout)
+ if action_kwargs is None:
+ action_kwargs = {}
+
+ fail_count = 0
+ max_fail_sleep = 12
+
+ while datetime.utcnow() < max_end_time:
+ try:
+ action(distribution=distribution, **action_kwargs)
+ if action_desc:
+ display.debug('{action}: {desc} success'.format(action=self._task.action, desc=action_desc))
+ return
+ except Exception as e:
+ if isinstance(e, AnsibleConnectionFailure):
+ try:
+ self._connection.reset()
+ except AnsibleConnectionFailure:
+ pass
+ # Use exponential backoff with a max timout, plus a little bit of randomness
+ random_int = random.randint(0, 1000) / 1000
+ fail_sleep = 2 ** fail_count + random_int
+ if fail_sleep > max_fail_sleep:
+
+ fail_sleep = max_fail_sleep + random_int
+ if action_desc:
+ try:
+ error = to_text(e).splitlines()[-1]
+ except IndexError as e:
+ error = to_text(e)
+ display.debug("{action}: {desc} fail '{err}', retrying in {sleep:.4} seconds...".format(
+ action=self._task.action,
+ desc=action_desc,
+ err=error,
+ sleep=fail_sleep))
+ fail_count += 1
+ time.sleep(fail_sleep)
+
+ raise TimedOutException('Timed out waiting for {desc} (timeout={timeout})'.format(desc=action_desc, timeout=reboot_timeout))
+
+ def perform_reboot(self, task_vars, distribution):
+ result = {}
+ reboot_result = {}
+ shutdown_command = self.get_shutdown_command(task_vars, distribution)
+ shutdown_command_args = self.get_shutdown_command_args(distribution)
+ reboot_command = '{0} {1}'.format(shutdown_command, shutdown_command_args)
+
+ try:
+ display.vvv("{action}: rebooting server...".format(action=self._task.action))
+ display.debug("{action}: rebooting server with command '{command}'".format(action=self._task.action, command=reboot_command))
+ reboot_result = self._low_level_execute_command(reboot_command, sudoable=self.DEFAULT_SUDOABLE)
+ except AnsibleConnectionFailure as e:
+ # If the connection is closed too quickly due to the system being shutdown, carry on
+ display.debug('{action}: AnsibleConnectionFailure caught and handled: {error}'.format(action=self._task.action, error=to_text(e)))
+ reboot_result['rc'] = 0
+
+ result['start'] = datetime.utcnow()
+
+ if reboot_result['rc'] != 0:
+ result['failed'] = True
+ result['rebooted'] = False
+ result['msg'] = "Reboot command failed. Error was {stdout}, {stderr}".format(
+ stdout=to_native(reboot_result['stdout'].strip()),
+ stderr=to_native(reboot_result['stderr'].strip()))
+ return result
+
+ result['failed'] = False
+ return result
+
+ def validate_reboot(self, distribution, original_connection_timeout=None, action_kwargs=None):
+ display.vvv('{action}: validating reboot'.format(action=self._task.action))
+ result = {}
+
+ try:
+ # keep on checking system boot_time with short connection responses
+ reboot_timeout = int(self._task.args.get('reboot_timeout', self._task.args.get('reboot_timeout_sec', self.DEFAULT_REBOOT_TIMEOUT)))
+
+ self.do_until_success_or_timeout(
+ action=self.check_boot_time,
+ action_desc="last boot time check",
+ reboot_timeout=reboot_timeout,
+ distribution=distribution,
+ action_kwargs=action_kwargs)
+
+ # Get the connect_timeout set on the connection to compare to the original
+ try:
+ connect_timeout = self._connection.get_option('connection_timeout')
+ except KeyError:
+ pass
+ else:
+ if original_connection_timeout != connect_timeout:
+ try:
+ display.debug("{action}: setting connect_timeout back to original value of {value}".format(
+ action=self._task.action,
+ value=original_connection_timeout))
+ self._connection.set_option("connection_timeout", original_connection_timeout)
+ self._connection.reset()
+ except (AnsibleError, AttributeError) as e:
+ # reset the connection to clear the custom connection timeout
+ display.debug("{action}: failed to reset connection_timeout back to default: {error}".format(action=self._task.action,
+ error=to_text(e)))
+
+ # finally run test command to ensure everything is working
+ # FUTURE: add a stability check (system must remain up for N seconds) to deal with self-multi-reboot updates
+ self.do_until_success_or_timeout(
+ action=self.run_test_command,
+ action_desc="post-reboot test command",
+ reboot_timeout=reboot_timeout,
+ distribution=distribution,
+ action_kwargs=action_kwargs)
+
+ result['rebooted'] = True
+ result['changed'] = True
+
+ except TimedOutException as toex:
+ result['failed'] = True
+ result['rebooted'] = True
+ result['msg'] = to_text(toex)
+ return result
+
+ return result
+
+ def run(self, tmp=None, task_vars=None):
+ self._supports_check_mode = True
+ self._supports_async = True
+
+ # If running with local connection, fail so we don't reboot ourself
+ if self._connection.transport == 'local':
+ msg = 'Running {0} with local connection would reboot the control node.'.format(self._task.action)
+ return {'changed': False, 'elapsed': 0, 'rebooted': False, 'failed': True, 'msg': msg}
+
+ if self._play_context.check_mode:
+ return {'changed': True, 'elapsed': 0, 'rebooted': True}
+
+ if task_vars is None:
+ task_vars = {}
+
+ self.deprecated_args()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+
+ if result.get('skipped', False) or result.get('failed', False):
+ return result
+
+ distribution = self.get_distribution(task_vars)
+
+ # Get current boot time
+ try:
+ previous_boot_time = self.get_system_boot_time(distribution)
+ except Exception as e:
+ result['failed'] = True
+ result['reboot'] = False
+ result['msg'] = to_text(e)
+ return result
+
+ # Get the original connection_timeout option var so it can be reset after
+ original_connection_timeout = None
+ try:
+ original_connection_timeout = self._connection.get_option('connection_timeout')
+ display.debug("{action}: saving original connect_timeout of {timeout}".format(action=self._task.action, timeout=original_connection_timeout))
+ except KeyError:
+ display.debug("{action}: connect_timeout connection option has not been set".format(action=self._task.action))
+ # Initiate reboot
+ reboot_result = self.perform_reboot(task_vars, distribution)
+
+ if reboot_result['failed']:
+ result = reboot_result
+ elapsed = datetime.utcnow() - reboot_result['start']
+ result['elapsed'] = elapsed.seconds
+ return result
+
+ if self.post_reboot_delay != 0:
+ display.debug("{action}: waiting an additional {delay} seconds".format(action=self._task.action, delay=self.post_reboot_delay))
+ display.vvv("{action}: waiting an additional {delay} seconds".format(action=self._task.action, delay=self.post_reboot_delay))
+ time.sleep(self.post_reboot_delay)
+
+ # Make sure reboot was successful
+ result = self.validate_reboot(distribution, original_connection_timeout, action_kwargs={'previous_boot_time': previous_boot_time})
+
+ elapsed = datetime.utcnow() - reboot_result['start']
+ result['elapsed'] = elapsed.seconds
+
+ return result
diff --git a/lib/ansible/plugins/action/script.py b/lib/ansible/plugins/action/script.py
new file mode 100644
index 00000000..ad73be88
--- /dev/null
+++ b/lib/ansible/plugins/action/script.py
@@ -0,0 +1,152 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+import shlex
+
+from ansible.errors import AnsibleError, AnsibleAction, _AnsibleActionDone, AnsibleActionFail, AnsibleActionSkip
+from ansible.executor.powershell import module_manifest as ps_manifest
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = True
+
+ # On Windows platform, absolute paths begin with a (back)slash
+ # after chopping off a potential drive letter.
+ windows_absolute_path_detection = re.compile(r'^(?:[a-zA-Z]\:)?(\\|\/)')
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for file transfer operations '''
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ try:
+ creates = self._task.args.get('creates')
+ if creates:
+ # do not run the command if the line contains creates=filename
+ # and the filename already exists. This allows idempotence
+ # of command executions.
+ if self._remote_file_exists(creates):
+ raise AnsibleActionSkip("%s exists, matching creates option" % creates)
+
+ removes = self._task.args.get('removes')
+ if removes:
+ # do not run the command if the line contains removes=filename
+ # and the filename does not exist. This allows idempotence
+ # of command executions.
+ if not self._remote_file_exists(removes):
+ raise AnsibleActionSkip("%s does not exist, matching removes option" % removes)
+
+ # The chdir must be absolute, because a relative path would rely on
+ # remote node behaviour & user config.
+ chdir = self._task.args.get('chdir')
+ if chdir:
+ # Powershell is the only Windows-path aware shell
+ if getattr(self._connection._shell, "_IS_WINDOWS", False) and \
+ not self.windows_absolute_path_detection.match(chdir):
+ raise AnsibleActionFail('chdir %s must be an absolute path for a Windows remote node' % chdir)
+ # Every other shell is unix-path-aware.
+ if not getattr(self._connection._shell, "_IS_WINDOWS", False) and not chdir.startswith('/'):
+ raise AnsibleActionFail('chdir %s must be an absolute path for a Unix-aware remote node' % chdir)
+
+ # Split out the script as the first item in raw_params using
+ # shlex.split() in order to support paths and files with spaces in the name.
+ # Any arguments passed to the script will be added back later.
+ raw_params = to_native(self._task.args.get('_raw_params', ''), errors='surrogate_or_strict')
+ parts = [to_text(s, errors='surrogate_or_strict') for s in shlex.split(raw_params.strip())]
+ source = parts[0]
+
+ # Support executable paths and files with spaces in the name.
+ executable = to_native(self._task.args.get('executable', ''), errors='surrogate_or_strict')
+
+ try:
+ source = self._loader.get_real_file(self._find_needle('files', source), decrypt=self._task.args.get('decrypt', True))
+ except AnsibleError as e:
+ raise AnsibleActionFail(to_native(e))
+
+ # now we execute script, always assume changed.
+ result['changed'] = True
+
+ if not self._play_context.check_mode:
+ # transfer the file to a remote tmp location
+ tmp_src = self._connection._shell.join_path(self._connection._shell.tmpdir,
+ os.path.basename(source))
+
+ # Convert raw_params to text for the purpose of replacing the script since
+ # parts and tmp_src are both unicode strings and raw_params will be different
+ # depending on Python version.
+ #
+ # Once everything is encoded consistently, replace the script path on the remote
+ # system with the remainder of the raw_params. This preserves quoting in parameters
+ # that would have been removed by shlex.split().
+ target_command = to_text(raw_params).strip().replace(parts[0], tmp_src)
+
+ self._transfer_file(source, tmp_src)
+
+ # set file permissions, more permissive when the copy is done as a different user
+ self._fixup_perms2((self._connection._shell.tmpdir, tmp_src), execute=True)
+
+ # add preparation steps to one ssh roundtrip executing the script
+ env_dict = dict()
+ env_string = self._compute_environment_string(env_dict)
+
+ if executable:
+ script_cmd = ' '.join([env_string, executable, target_command])
+ else:
+ script_cmd = ' '.join([env_string, target_command])
+
+ if self._play_context.check_mode:
+ raise _AnsibleActionDone()
+
+ script_cmd = self._connection._shell.wrap_for_exec(script_cmd)
+
+ exec_data = None
+ # PowerShell runs the script in a special wrapper to enable things
+ # like become and environment args
+ if getattr(self._connection._shell, "_IS_WINDOWS", False):
+ # FUTURE: use a more public method to get the exec payload
+ pc = self._play_context
+ exec_data = ps_manifest._create_powershell_wrapper(
+ to_bytes(script_cmd), source, {}, env_dict, self._task.async_val,
+ pc.become, pc.become_method, pc.become_user,
+ pc.become_pass, pc.become_flags, "script", task_vars, None
+ )
+ # build the necessary exec wrapper command
+ # FUTURE: this still doesn't let script work on Windows with non-pipelined connections or
+ # full manual exec of KEEP_REMOTE_FILES
+ script_cmd = self._connection._shell.build_module_command(env_string='', shebang='#!powershell', cmd='')
+
+ result.update(self._low_level_execute_command(cmd=script_cmd, in_data=exec_data, sudoable=True, chdir=chdir))
+
+ if 'rc' in result and result['rc'] != 0:
+ raise AnsibleActionFail('non-zero return code')
+
+ except AnsibleAction as e:
+ result.update(e.result)
+ finally:
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ return result
diff --git a/lib/ansible/plugins/action/service.py b/lib/ansible/plugins/action/service.py
new file mode 100644
index 00000000..42d44361
--- /dev/null
+++ b/lib/ansible/plugins/action/service.py
@@ -0,0 +1,101 @@
+# (c) 2015, Ansible Inc,
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible.errors import AnsibleAction, AnsibleActionFail
+from ansible.executor.module_common import get_action_args_with_defaults
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = False
+
+ UNUSED_PARAMS = {
+ 'systemd': ['pattern', 'runlevel', 'sleep', 'arguments', 'args'],
+ }
+
+ # HACK: list of unqualified service manager names that are/were built-in, we'll prefix these with `ansible.legacy` to
+ # avoid collisions with collections search
+ BUILTIN_SVC_MGR_MODULES = set(['openwrt_init', 'service', 'systemd', 'sysvinit'])
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for package operations '''
+
+ self._supports_check_mode = True
+ self._supports_async = True
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ module = self._task.args.get('use', 'auto').lower()
+
+ if module == 'auto':
+ try:
+ if self._task.delegate_to: # if we delegate, we should use delegated host's facts
+ module = self._templar.template("{{hostvars['%s']['ansible_facts']['service_mgr']}}" % self._task.delegate_to)
+ else:
+ module = self._templar.template('{{ansible_facts.service_mgr}}')
+ except Exception:
+ pass # could not get it from template!
+
+ try:
+ if module == 'auto':
+ facts = self._execute_module(
+ module_name='ansible.legacy.setup',
+ module_args=dict(gather_subset='!all', filter='ansible_service_mgr'), task_vars=task_vars)
+ self._display.debug("Facts %s" % facts)
+ module = facts.get('ansible_facts', {}).get('ansible_service_mgr', 'auto')
+
+ if not module or module == 'auto' or not self._shared_loader_obj.module_loader.has_plugin(module):
+ module = 'ansible.legacy.service'
+
+ if module != 'auto':
+ # run the 'service' module
+ new_module_args = self._task.args.copy()
+ if 'use' in new_module_args:
+ del new_module_args['use']
+
+ if module in self.UNUSED_PARAMS:
+ for unused in self.UNUSED_PARAMS[module]:
+ if unused in new_module_args:
+ del new_module_args[unused]
+ self._display.warning('Ignoring "%s" as it is not used in "%s"' % (unused, module))
+
+ # get defaults for specific module
+ new_module_args = get_action_args_with_defaults(
+ module, new_module_args, self._task.module_defaults, self._templar, self._task._ansible_internal_redirect_list
+ )
+
+ # collection prefix known internal modules to avoid collisions from collections search, while still allowing library/ overrides
+ if module in self.BUILTIN_SVC_MGR_MODULES:
+ module = 'ansible.legacy.' + module
+
+ self._display.vvvv("Running %s" % module)
+ result.update(self._execute_module(module_name=module, module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val))
+ else:
+ raise AnsibleActionFail('Could not detect which service manager to use. Try gathering facts or setting the "use" option.')
+
+ except AnsibleAction as e:
+ result.update(e.result)
+ finally:
+ if not self._task.async_val:
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ return result
diff --git a/lib/ansible/plugins/action/set_fact.py b/lib/ansible/plugins/action/set_fact.py
new file mode 100644
index 00000000..d7fe573c
--- /dev/null
+++ b/lib/ansible/plugins/action/set_fact.py
@@ -0,0 +1,61 @@
+# Copyright 2013 Dag Wieers <dag@wieers.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.six import iteritems, string_types
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.plugins.action import ActionBase
+from ansible.utils.vars import isidentifier
+
+import ansible.constants as C
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = False
+
+ def run(self, tmp=None, task_vars=None):
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ facts = dict()
+
+ cacheable = boolean(self._task.args.pop('cacheable', False))
+
+ if self._task.args:
+ for (k, v) in iteritems(self._task.args):
+ k = self._templar.template(k)
+
+ if not isidentifier(k):
+ result['failed'] = True
+ result['msg'] = ("The variable name '%s' is not valid. Variables must start with a letter or underscore character, and contain only "
+ "letters, numbers and underscores." % k)
+ return result
+
+ if not C.DEFAULT_JINJA2_NATIVE and isinstance(v, string_types) and v.lower() in ('true', 'false', 'yes', 'no'):
+ v = boolean(v, strict=False)
+ facts[k] = v
+
+ result['changed'] = False
+ result['ansible_facts'] = facts
+ result['_ansible_facts_cacheable'] = cacheable
+ return result
diff --git a/lib/ansible/plugins/action/set_stats.py b/lib/ansible/plugins/action/set_stats.py
new file mode 100644
index 00000000..f9fe8b30
--- /dev/null
+++ b/lib/ansible/plugins/action/set_stats.py
@@ -0,0 +1,77 @@
+# Copyright 2016 Ansible (RedHat, Inc)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.six import iteritems, string_types
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.plugins.action import ActionBase
+from ansible.utils.vars import isidentifier
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = False
+ _VALID_ARGS = frozenset(('aggregate', 'data', 'per_host'))
+
+ # TODO: document this in non-empty set_stats.py module
+ def run(self, tmp=None, task_vars=None):
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ stats = {'data': {}, 'per_host': False, 'aggregate': True}
+
+ if self._task.args:
+ data = self._task.args.get('data', {})
+
+ if not isinstance(data, dict):
+ data = self._templar.template(data, convert_bare=False, fail_on_undefined=True)
+
+ if not isinstance(data, dict):
+ result['failed'] = True
+ result['msg'] = "The 'data' option needs to be a dictionary/hash"
+ return result
+
+ # set boolean options, defaults are set above in stats init
+ for opt in ['per_host', 'aggregate']:
+ val = self._task.args.get(opt, None)
+ if val is not None:
+ if not isinstance(val, bool):
+ stats[opt] = boolean(self._templar.template(val), strict=False)
+ else:
+ stats[opt] = val
+
+ for (k, v) in iteritems(data):
+
+ k = self._templar.template(k)
+
+ if not isidentifier(k):
+ result['failed'] = True
+ result['msg'] = ("The variable name '%s' is not valid. Variables must start with a letter or underscore character, and contain only "
+ "letters, numbers and underscores." % k)
+ return result
+
+ stats['data'][k] = self._templar.template(v)
+
+ result['changed'] = False
+ result['ansible_stats'] = stats
+
+ return result
diff --git a/lib/ansible/plugins/action/shell.py b/lib/ansible/plugins/action/shell.py
new file mode 100644
index 00000000..617a373d
--- /dev/null
+++ b/lib/ansible/plugins/action/shell.py
@@ -0,0 +1,27 @@
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+
+ def run(self, tmp=None, task_vars=None):
+ del tmp # tmp no longer has any effect
+
+ # Shell module is implemented via command with a special arg
+ self._task.args['_uses_shell'] = True
+
+ command_action = self._shared_loader_obj.action_loader.get('ansible.legacy.command',
+ task=self._task,
+ connection=self._connection,
+ play_context=self._play_context,
+ loader=self._loader,
+ templar=self._templar,
+ shared_loader_obj=self._shared_loader_obj)
+ result = command_action.run(task_vars=task_vars)
+
+ return result
diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py
new file mode 100644
index 00000000..645afff2
--- /dev/null
+++ b/lib/ansible/plugins/action/template.py
@@ -0,0 +1,188 @@
+# Copyright: (c) 2015, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import shutil
+import stat
+import tempfile
+
+from ansible import constants as C
+from ansible.config.manager import ensure_type
+from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleAction, AnsibleActionFail
+from ansible.module_utils._text import to_bytes, to_text, to_native
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils.six import string_types
+from ansible.plugins.action import ActionBase
+from ansible.template import generate_ansible_template_vars
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = True
+ DEFAULT_NEWLINE_SEQUENCE = "\n"
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for template operations '''
+
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ # Options type validation
+ # stings
+ for s_type in ('src', 'dest', 'state', 'newline_sequence', 'variable_start_string', 'variable_end_string', 'block_start_string',
+ 'block_end_string'):
+ if s_type in self._task.args:
+ value = ensure_type(self._task.args[s_type], 'string')
+ if value is not None and not isinstance(value, string_types):
+ raise AnsibleActionFail("%s is expected to be a string, but got %s instead" % (s_type, type(value)))
+ self._task.args[s_type] = value
+
+ # booleans
+ try:
+ follow = boolean(self._task.args.get('follow', False), strict=False)
+ trim_blocks = boolean(self._task.args.get('trim_blocks', True), strict=False)
+ lstrip_blocks = boolean(self._task.args.get('lstrip_blocks', False), strict=False)
+ except TypeError as e:
+ raise AnsibleActionFail(to_native(e))
+
+ # assign to local vars for ease of use
+ source = self._task.args.get('src', None)
+ dest = self._task.args.get('dest', None)
+ state = self._task.args.get('state', None)
+ newline_sequence = self._task.args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE)
+ variable_start_string = self._task.args.get('variable_start_string', None)
+ variable_end_string = self._task.args.get('variable_end_string', None)
+ block_start_string = self._task.args.get('block_start_string', None)
+ block_end_string = self._task.args.get('block_end_string', None)
+ output_encoding = self._task.args.get('output_encoding', 'utf-8') or 'utf-8'
+
+ # Option `lstrip_blocks' was added in Jinja2 version 2.7.
+ if lstrip_blocks:
+ try:
+ import jinja2.defaults
+ except ImportError:
+ raise AnsibleError('Unable to import Jinja2 defaults for determining Jinja2 features.')
+
+ try:
+ jinja2.defaults.LSTRIP_BLOCKS
+ except AttributeError:
+ raise AnsibleError("Option `lstrip_blocks' is only available in Jinja2 versions >=2.7")
+
+ wrong_sequences = ["\\n", "\\r", "\\r\\n"]
+ allowed_sequences = ["\n", "\r", "\r\n"]
+
+ # We need to convert unescaped sequences to proper escaped sequences for Jinja2
+ if newline_sequence in wrong_sequences:
+ newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)]
+
+ try:
+ # logical validation
+ if state is not None:
+ raise AnsibleActionFail("'state' cannot be specified on a template")
+ elif source is None or dest is None:
+ raise AnsibleActionFail("src and dest are required")
+ elif newline_sequence not in allowed_sequences:
+ raise AnsibleActionFail("newline_sequence needs to be one of: \n, \r or \r\n")
+ else:
+ try:
+ source = self._find_needle('templates', source)
+ except AnsibleError as e:
+ raise AnsibleActionFail(to_text(e))
+
+ mode = self._task.args.get('mode', None)
+ if mode == 'preserve':
+ mode = '0%03o' % stat.S_IMODE(os.stat(source).st_mode)
+
+ # Get vault decrypted tmp file
+ try:
+ tmp_source = self._loader.get_real_file(source)
+ except AnsibleFileNotFound as e:
+ raise AnsibleActionFail("could not find src=%s, %s" % (source, to_text(e)))
+ b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict')
+
+ # template the source data locally & get ready to transfer
+ try:
+ with open(b_tmp_source, 'rb') as f:
+ try:
+ template_data = to_text(f.read(), errors='surrogate_or_strict')
+ except UnicodeError:
+ raise AnsibleActionFail("Template source files must be utf-8 encoded")
+
+ # set jinja2 internal search path for includes
+ searchpath = task_vars.get('ansible_search_path', [])
+ searchpath.extend([self._loader._basedir, os.path.dirname(source)])
+
+ # We want to search into the 'templates' subdir of each search path in
+ # addition to our original search paths.
+ newsearchpath = []
+ for p in searchpath:
+ newsearchpath.append(os.path.join(p, 'templates'))
+ newsearchpath.append(p)
+ searchpath = newsearchpath
+
+ # add ansible 'template' vars
+ temp_vars = task_vars.copy()
+ temp_vars.update(generate_ansible_template_vars(source, dest))
+
+ with self._templar.set_temporary_context(searchpath=searchpath, newline_sequence=newline_sequence,
+ block_start_string=block_start_string, block_end_string=block_end_string,
+ variable_start_string=variable_start_string, variable_end_string=variable_end_string,
+ trim_blocks=trim_blocks, lstrip_blocks=lstrip_blocks,
+ available_variables=temp_vars):
+ resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False)
+ except AnsibleAction:
+ raise
+ except Exception as e:
+ raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e)))
+ finally:
+ self._loader.cleanup_tmp_file(b_tmp_source)
+
+ new_task = self._task.copy()
+ # mode is either the mode from task.args or the mode of the source file if the task.args
+ # mode == 'preserve'
+ new_task.args['mode'] = mode
+
+ # remove 'template only' options:
+ for remove in ('newline_sequence', 'block_start_string', 'block_end_string', 'variable_start_string', 'variable_end_string',
+ 'trim_blocks', 'lstrip_blocks', 'output_encoding'):
+ new_task.args.pop(remove, None)
+
+ local_tempdir = tempfile.mkdtemp(dir=C.DEFAULT_LOCAL_TMP)
+
+ try:
+ result_file = os.path.join(local_tempdir, os.path.basename(source))
+ with open(to_bytes(result_file, errors='surrogate_or_strict'), 'wb') as f:
+ f.write(to_bytes(resultant, encoding=output_encoding, errors='surrogate_or_strict'))
+
+ new_task.args.update(
+ dict(
+ src=result_file,
+ dest=dest,
+ follow=follow,
+ ),
+ )
+ # call with ansible.legacy prefix to eliminate collisions with collections while still allowing local override
+ copy_action = self._shared_loader_obj.action_loader.get('ansible.legacy.copy',
+ task=new_task,
+ connection=self._connection,
+ play_context=self._play_context,
+ loader=self._loader,
+ templar=self._templar,
+ shared_loader_obj=self._shared_loader_obj)
+ result.update(copy_action.run(task_vars=task_vars))
+ finally:
+ shutil.rmtree(to_bytes(local_tempdir, errors='surrogate_or_strict'))
+
+ except AnsibleAction as e:
+ result.update(e.result)
+ finally:
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ return result
diff --git a/lib/ansible/plugins/action/unarchive.py b/lib/ansible/plugins/action/unarchive.py
new file mode 100644
index 00000000..4d188e3d
--- /dev/null
+++ b/lib/ansible/plugins/action/unarchive.py
@@ -0,0 +1,111 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2013, Dylan Martin <dmartin@seattlecentral.edu>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.errors import AnsibleError, AnsibleAction, AnsibleActionFail, AnsibleActionSkip
+from ansible.module_utils._text import to_text
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = True
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for unarchive operations '''
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ source = self._task.args.get('src', None)
+ dest = self._task.args.get('dest', None)
+ remote_src = boolean(self._task.args.get('remote_src', False), strict=False)
+ creates = self._task.args.get('creates', None)
+ decrypt = self._task.args.get('decrypt', True)
+
+ try:
+ # "copy" is deprecated in favor of "remote_src".
+ if 'copy' in self._task.args:
+ # They are mutually exclusive.
+ if 'remote_src' in self._task.args:
+ raise AnsibleActionFail("parameters are mutually exclusive: ('copy', 'remote_src')")
+ # We will take the information from copy and store it in
+ # the remote_src var to use later in this file.
+ self._task.args['remote_src'] = remote_src = not boolean(self._task.args.pop('copy'), strict=False)
+
+ if source is None or dest is None:
+ raise AnsibleActionFail("src (or content) and dest are required")
+
+ if creates:
+ # do not run the command if the line contains creates=filename
+ # and the filename already exists. This allows idempotence
+ # of command executions.
+ creates = self._remote_expand_user(creates)
+ if self._remote_file_exists(creates):
+ raise AnsibleActionSkip("skipped, since %s exists" % creates)
+
+ dest = self._remote_expand_user(dest) # CCTODO: Fix path for Windows hosts.
+ source = os.path.expanduser(source)
+
+ if not remote_src:
+ try:
+ source = self._loader.get_real_file(self._find_needle('files', source), decrypt=decrypt)
+ except AnsibleError as e:
+ raise AnsibleActionFail(to_text(e))
+
+ try:
+ remote_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=True)
+ except AnsibleError as e:
+ raise AnsibleActionFail(to_text(e))
+
+ if not remote_stat['exists'] or not remote_stat['isdir']:
+ raise AnsibleActionFail("dest '%s' must be an existing dir" % dest)
+
+ if not remote_src:
+ # transfer the file to a remote tmp location
+ tmp_src = self._connection._shell.join_path(self._connection._shell.tmpdir, 'source')
+ self._transfer_file(source, tmp_src)
+
+ # handle diff mode client side
+ # handle check mode client side
+
+ # remove action plugin only keys
+ new_module_args = self._task.args.copy()
+ for key in ('decrypt',):
+ if key in new_module_args:
+ del new_module_args[key]
+
+ if not remote_src:
+ # fix file permissions when the copy is done as a different user
+ self._fixup_perms2((self._connection._shell.tmpdir, tmp_src))
+ new_module_args['src'] = tmp_src
+
+ # execute the unarchive module now, with the updated args (using ansible.legacy prefix to eliminate collections
+ # collisions with local override
+ result.update(self._execute_module(module_name='ansible.legacy.unarchive', module_args=new_module_args, task_vars=task_vars))
+ except AnsibleAction as e:
+ result.update(e.result)
+ finally:
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+ return result
diff --git a/lib/ansible/plugins/action/uri.py b/lib/ansible/plugins/action/uri.py
new file mode 100644
index 00000000..5ad04dee
--- /dev/null
+++ b/lib/ansible/plugins/action/uri.py
@@ -0,0 +1,95 @@
+# -*- coding: utf-8 -*-
+# (c) 2015, Brian Coca <briancoca+dev@gmail.com>
+# (c) 2018, Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.errors import AnsibleError, AnsibleAction, _AnsibleActionDone, AnsibleActionFail
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common.collections import Mapping
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils.six import text_type
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = True
+
+ def run(self, tmp=None, task_vars=None):
+ self._supports_async = True
+
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ body_format = self._task.args.get('body_format', 'raw')
+ body = self._task.args.get('body')
+ src = self._task.args.get('src', None)
+ remote_src = boolean(self._task.args.get('remote_src', 'no'), strict=False)
+
+ try:
+ if remote_src:
+ # everything is remote, so we just execute the module
+ # without changing any of the module arguments
+ # call with ansible.legacy prefix to prevent collections collisions while allowing local override
+ raise _AnsibleActionDone(result=self._execute_module(module_name='ansible.legacy.uri',
+ task_vars=task_vars, wrap_async=self._task.async_val))
+
+ kwargs = {}
+
+ if src:
+ try:
+ src = self._find_needle('files', src)
+ except AnsibleError as e:
+ raise AnsibleActionFail(to_native(e))
+
+ tmp_src = self._connection._shell.join_path(self._connection._shell.tmpdir, os.path.basename(src))
+ kwargs['src'] = tmp_src
+ self._transfer_file(src, tmp_src)
+ self._fixup_perms2((self._connection._shell.tmpdir, tmp_src))
+ elif body_format == 'form-multipart':
+ if not isinstance(body, Mapping):
+ raise AnsibleActionFail(
+ 'body must be mapping, cannot be type %s' % body.__class__.__name__
+ )
+ for field, value in body.items():
+ if isinstance(value, text_type):
+ continue
+ content = value.get('content')
+ filename = value.get('filename')
+ if not filename or content:
+ continue
+
+ try:
+ filename = self._find_needle('files', filename)
+ except AnsibleError as e:
+ raise AnsibleActionFail(to_native(e))
+
+ tmp_src = self._connection._shell.join_path(
+ self._connection._shell.tmpdir,
+ os.path.basename(filename)
+ )
+ value['filename'] = tmp_src
+ self._transfer_file(filename, tmp_src)
+ self._fixup_perms2((self._connection._shell.tmpdir, tmp_src))
+ kwargs['body'] = body
+
+ new_module_args = self._task.args.copy()
+ new_module_args.update(kwargs)
+
+ # call with ansible.legacy prefix to prevent collections collisions while allowing local override
+ result.update(self._execute_module('ansible.legacy.uri', module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val))
+ except AnsibleAction as e:
+ result.update(e.result)
+ finally:
+ if not self._task.async_val:
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+ return result
diff --git a/lib/ansible/plugins/action/wait_for_connection.py b/lib/ansible/plugins/action/wait_for_connection.py
new file mode 100644
index 00000000..8489c767
--- /dev/null
+++ b/lib/ansible/plugins/action/wait_for_connection.py
@@ -0,0 +1,120 @@
+# (c) 2017, Dag Wieers <dag@wieers.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# CI-required python3 boilerplate
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import time
+from datetime import datetime, timedelta
+
+from ansible.module_utils._text import to_text
+from ansible.plugins.action import ActionBase
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class TimedOutException(Exception):
+ pass
+
+
+class ActionModule(ActionBase):
+ TRANSFERS_FILES = False
+ _VALID_ARGS = frozenset(('connect_timeout', 'delay', 'sleep', 'timeout'))
+
+ DEFAULT_CONNECT_TIMEOUT = 5
+ DEFAULT_DELAY = 0
+ DEFAULT_SLEEP = 1
+ DEFAULT_TIMEOUT = 600
+
+ def do_until_success_or_timeout(self, what, timeout, connect_timeout, what_desc, sleep=1):
+ max_end_time = datetime.utcnow() + timedelta(seconds=timeout)
+
+ e = None
+ while datetime.utcnow() < max_end_time:
+ try:
+ what(connect_timeout)
+ if what_desc:
+ display.debug("wait_for_connection: %s success" % what_desc)
+ return
+ except Exception as e:
+ error = e # PY3 compatibility to store exception for use outside of this block
+ if what_desc:
+ display.debug("wait_for_connection: %s fail (expected), retrying in %d seconds..." % (what_desc, sleep))
+ time.sleep(sleep)
+
+ raise TimedOutException("timed out waiting for %s: %s" % (what_desc, error))
+
+ def run(self, tmp=None, task_vars=None):
+ if task_vars is None:
+ task_vars = dict()
+
+ connect_timeout = int(self._task.args.get('connect_timeout', self.DEFAULT_CONNECT_TIMEOUT))
+ delay = int(self._task.args.get('delay', self.DEFAULT_DELAY))
+ sleep = int(self._task.args.get('sleep', self.DEFAULT_SLEEP))
+ timeout = int(self._task.args.get('timeout', self.DEFAULT_TIMEOUT))
+
+ if self._play_context.check_mode:
+ display.vvv("wait_for_connection: skipping for check_mode")
+ return dict(skipped=True)
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ def ping_module_test(connect_timeout):
+ ''' Test ping module, if available '''
+ display.vvv("wait_for_connection: attempting ping module test")
+ # re-run interpreter discovery if we ran it in the first iteration
+ if self._discovered_interpreter_key:
+ task_vars['ansible_facts'].pop(self._discovered_interpreter_key, None)
+ # call connection reset between runs if it's there
+ try:
+ self._connection.reset()
+ except AttributeError:
+ pass
+
+ ping_result = self._execute_module(module_name='ansible.legacy.ping', module_args=dict(), task_vars=task_vars)
+
+ # Test module output
+ if ping_result['ping'] != 'pong':
+ raise Exception('ping test failed')
+
+ start = datetime.now()
+
+ if delay:
+ time.sleep(delay)
+
+ try:
+ # If the connection has a transport_test method, use it first
+ if hasattr(self._connection, 'transport_test'):
+ self.do_until_success_or_timeout(self._connection.transport_test, timeout, connect_timeout, what_desc="connection port up", sleep=sleep)
+
+ # Use the ping module test to determine end-to-end connectivity
+ self.do_until_success_or_timeout(ping_module_test, timeout, connect_timeout, what_desc="ping module test", sleep=sleep)
+
+ except TimedOutException as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+
+ elapsed = datetime.now() - start
+ result['elapsed'] = elapsed.seconds
+
+ # remove a temporary path we created
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ return result
diff --git a/lib/ansible/plugins/action/yum.py b/lib/ansible/plugins/action/yum.py
new file mode 100644
index 00000000..9d3e1454
--- /dev/null
+++ b/lib/ansible/plugins/action/yum.py
@@ -0,0 +1,103 @@
+# (c) 2018, Ansible Project
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.action import ActionBase
+from ansible.utils.display import Display
+
+display = Display()
+
+VALID_BACKENDS = frozenset(('yum', 'yum4', 'dnf'))
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = False
+
+ def run(self, tmp=None, task_vars=None):
+ '''
+ Action plugin handler for yum3 vs yum4(dnf) operations.
+
+ Enables the yum module to use yum3 and/or yum4. Yum4 is a yum
+ command-line compatibility layer on top of dnf. Since the Ansible
+ modules for yum(aka yum3) and dnf(aka yum4) call each of yum3 and yum4's
+ python APIs natively on the backend, we need to handle this here and
+ pass off to the correct Ansible module to execute on the remote system.
+ '''
+
+ self._supports_check_mode = True
+ self._supports_async = True
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ # Carry-over concept from the package action plugin
+ module = self._task.args.get('use_backend', "auto")
+
+ if module == 'auto':
+ try:
+ if self._task.delegate_to: # if we delegate, we should use delegated host's facts
+ module = self._templar.template("{{hostvars['%s']['ansible_facts']['pkg_mgr']}}" % self._task.delegate_to)
+ else:
+ module = self._templar.template("{{ansible_facts.pkg_mgr}}")
+ except Exception:
+ pass # could not get it from template!
+
+ if module not in VALID_BACKENDS:
+ facts = self._execute_module(
+ module_name="ansible.legacy.setup", module_args=dict(filter="ansible_pkg_mgr", gather_subset="!all"),
+ task_vars=task_vars)
+ display.debug("Facts %s" % facts)
+ module = facts.get("ansible_facts", {}).get("ansible_pkg_mgr", "auto")
+ if (not self._task.delegate_to or self._task.delegate_facts) and module != 'auto':
+ result['ansible_facts'] = {'pkg_mgr': module}
+
+ if module not in VALID_BACKENDS:
+ result.update(
+ {
+ 'failed': True,
+ 'msg': ("Could not detect which major revision of yum is in use, which is required to determine module backend.",
+ "You should manually specify use_backend to tell the module whether to use the yum (yum3) or dnf (yum4) backend})"),
+ }
+ )
+
+ else:
+ if module == "yum4":
+ module = "dnf"
+
+ # eliminate collisions with collections search while still allowing local override
+ module = 'ansible.legacy.' + module
+
+ if not self._shared_loader_obj.module_loader.has_plugin(module):
+ result.update({'failed': True, 'msg': "Could not find a yum module backend for %s." % module})
+ else:
+ # run either the yum (yum3) or dnf (yum4) backend module
+ new_module_args = self._task.args.copy()
+ if 'use_backend' in new_module_args:
+ del new_module_args['use_backend']
+
+ display.vvvv("Running %s as the backend for the yum action plugin" % module)
+ result.update(self._execute_module(
+ module_name=module, module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val))
+
+ # Cleanup
+ if not self._task.async_val:
+ # remove a temporary path we created
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ return result
diff --git a/lib/ansible/plugins/become/__init__.py b/lib/ansible/plugins/become/__init__.py
new file mode 100644
index 00000000..f20326c6
--- /dev/null
+++ b/lib/ansible/plugins/become/__init__.py
@@ -0,0 +1,107 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from abc import abstractmethod
+from random import choice
+from string import ascii_lowercase
+from gettext import dgettext
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils._text import to_bytes
+from ansible.plugins import AnsiblePlugin
+
+
+def _gen_id(length=32):
+ ''' return random string used to identify the current privilege escalation '''
+ return ''.join(choice(ascii_lowercase) for x in range(length))
+
+
+class BecomeBase(AnsiblePlugin):
+
+ name = None
+
+ # messages for detecting prompted password issues
+ fail = tuple()
+ missing = tuple()
+
+ # many connection plugins cannot provide tty, set to True if your become
+ # plugin requires a tty, i.e su
+ require_tty = False
+
+ # prompt to match
+ prompt = ''
+
+ def __init__(self):
+ super(BecomeBase, self).__init__()
+ self._id = ''
+ self.success = ''
+
+ def get_option(self, option, hostvars=None, playcontext=None):
+ """ Overrides the base get_option to provide a fallback to playcontext vars in case a 3rd party plugin did not
+ implement the base become options required in Ansible. """
+ # TODO: add deprecation warning for ValueError in devel that removes the playcontext fallback
+ try:
+ return super(BecomeBase, self).get_option(option, hostvars=hostvars)
+ except KeyError:
+ pc_fallback = ['become_user', 'become_pass', 'become_flags', 'become_exe']
+ if option not in pc_fallback:
+ raise
+
+ return getattr(playcontext, option, None)
+
+ def expect_prompt(self):
+ """This function assists connection plugins in determining if they need to wait for
+ a prompt. Both a prompt and a password are required.
+ """
+ return self.prompt and self.get_option('become_pass')
+
+ def _build_success_command(self, cmd, shell, noexe=False):
+ if not all((cmd, shell, self.success)):
+ return cmd
+
+ try:
+ cmd = shlex_quote('%s %s %s %s' % (shell.ECHO, self.success, shell.COMMAND_SEP, cmd))
+ except AttributeError:
+ # TODO: This should probably become some more robust functionlity used to detect incompat
+ raise AnsibleError('The %s shell family is incompatible with the %s become plugin' % (shell.SHELL_FAMILY, self.name))
+ exe = getattr(shell, 'executable', None)
+ if exe and not noexe:
+ cmd = '%s -c %s' % (exe, cmd)
+ return cmd
+
+ @abstractmethod
+ def build_become_command(self, cmd, shell):
+ self._id = _gen_id()
+ self.success = 'BECOME-SUCCESS-%s' % self._id
+
+ def check_success(self, b_output):
+ b_success = to_bytes(self.success)
+ return any(b_success in l.rstrip() for l in b_output.splitlines(True))
+
+ def check_password_prompt(self, b_output):
+ ''' checks if the expected password prompt exists in b_output '''
+ if self.prompt:
+ b_prompt = to_bytes(self.prompt).strip()
+ return any(l.strip().startswith(b_prompt) for l in b_output.splitlines())
+ return False
+
+ def _check_password_error(self, b_out, msg):
+ ''' returns True/False if domain specific i18n version of msg is found in b_out '''
+ b_fail = to_bytes(dgettext(self.name, msg))
+ return b_fail and b_fail in b_out
+
+ def check_incorrect_password(self, b_output):
+ for errstring in self.fail:
+ if self._check_password_error(b_output, errstring):
+ return True
+ return False
+
+ def check_missing_password(self, b_output):
+ for errstring in self.missing:
+ if self._check_password_error(b_output, errstring):
+ return True
+ return False
diff --git a/lib/ansible/plugins/become/runas.py b/lib/ansible/plugins/become/runas.py
new file mode 100644
index 00000000..c8ae881c
--- /dev/null
+++ b/lib/ansible/plugins/become/runas.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ become: runas
+ short_description: Run As user
+ description:
+ - This become plugins allows your remote/login user to execute commands as another user via the windows runas facility.
+ author: ansible (@core)
+ version_added: "2.8"
+ options:
+ become_user:
+ description: User you 'become' to execute the task
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: runas_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_runas_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_RUNAS_USER
+ required: True
+ become_flags:
+ description: Options to pass to runas, a space delimited list of k=v pairs
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: runas_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_runas_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_RUNAS_FLAGS
+ become_pass:
+ description: password
+ ini:
+ - section: runas_become_plugin
+ key: password
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_runas_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_RUNAS_PASS
+ notes:
+ - runas is really implemented in the powershell module handler and as such can only be used with winrm connections.
+ - This plugin ignores the 'become_exe' setting as it uses an API and not an executable.
+ - The Secondary Logon service (seclogon) must be running to use runas
+"""
+
+from ansible.plugins.become import BecomeBase
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'runas'
+
+ def build_become_command(self, cmd, shell):
+ # runas is implemented inside the winrm connection plugin
+ return cmd
diff --git a/lib/ansible/plugins/become/su.py b/lib/ansible/plugins/become/su.py
new file mode 100644
index 00000000..e2001655
--- /dev/null
+++ b/lib/ansible/plugins/become/su.py
@@ -0,0 +1,158 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ become: su
+ short_description: Substitute User
+ description:
+ - This become plugins allows your remote/login user to execute commands as another user via the su utility.
+ author: ansible (@core)
+ version_added: "2.8"
+ options:
+ become_user:
+ description: User you 'become' to execute the task
+ default: root
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: su_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_su_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_SU_USER
+ become_exe:
+ description: Su executable
+ default: su
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: su_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_su_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_SU_EXE
+ become_flags:
+ description: Options to pass to su
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: su_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_su_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_SU_FLAGS
+ become_pass:
+ description: Password to pass to su
+ required: False
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_su_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_SU_PASS
+ ini:
+ - section: su_become_plugin
+ key: password
+ prompt_l10n:
+ description:
+ - List of localized strings to match for prompt detection
+ - If empty we'll use the built in one
+ default: []
+ ini:
+ - section: su_become_plugin
+ key: localized_prompts
+ vars:
+ - name: ansible_su_prompt_l10n
+ env:
+ - name: ANSIBLE_SU_PROMPT_L10N
+"""
+
+import re
+
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.plugins.become import BecomeBase
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'su'
+
+ # messages for detecting prompted password issues
+ fail = ('Authentication failure',)
+
+ SU_PROMPT_LOCALIZATIONS = [
+ 'Password',
+ '암호',
+ 'パスワード',
+ 'Adgangskode',
+ 'Contraseña',
+ 'Contrasenya',
+ 'Hasło',
+ 'Heslo',
+ 'Jelszó',
+ 'Lösenord',
+ 'Mật khẩu',
+ 'Mot de passe',
+ 'Parola',
+ 'Parool',
+ 'Pasahitza',
+ 'Passord',
+ 'Passwort',
+ 'Salasana',
+ 'Sandi',
+ 'Senha',
+ 'Wachtwoord',
+ 'ססמה',
+ 'Лозинка',
+ 'Парола',
+ 'Пароль',
+ 'गà¥à¤ªà¥à¤¤à¤¶à¤¬à¥à¤¦',
+ 'शबà¥à¤¦à¤•à¥‚ट',
+ 'సంకేతపదమà±',
+ 'හස්පදය',
+ '密ç ',
+ '密碼',
+ 'å£ä»¤',
+ ]
+
+ def check_password_prompt(self, b_output):
+ ''' checks if the expected password prompt exists in b_output '''
+
+ prompts = self.get_option('prompt_l10n') or self.SU_PROMPT_LOCALIZATIONS
+ b_password_string = b"|".join((br'(\w+\'s )?' + to_bytes(p)) for p in prompts)
+ # Colon or unicode fullwidth colon
+ b_password_string = b_password_string + to_bytes(u' ?(:|:) ?')
+ b_su_prompt_localizations_re = re.compile(b_password_string, flags=re.IGNORECASE)
+ return bool(b_su_prompt_localizations_re.match(b_output))
+
+ def build_become_command(self, cmd, shell):
+ super(BecomeModule, self).build_become_command(cmd, shell)
+
+ # Prompt handling for ``su`` is more complicated, this
+ # is used to satisfy the connection plugin
+ self.prompt = True
+
+ if not cmd:
+ return cmd
+
+ exe = self.get_option('become_exe') or self.name
+ flags = self.get_option('become_flags') or ''
+ user = self.get_option('become_user') or ''
+ success_cmd = self._build_success_command(cmd, shell)
+
+ return "%s %s %s -c %s" % (exe, flags, user, shlex_quote(success_cmd))
diff --git a/lib/ansible/plugins/become/sudo.py b/lib/ansible/plugins/become/sudo.py
new file mode 100644
index 00000000..a7593cce
--- /dev/null
+++ b/lib/ansible/plugins/become/sudo.py
@@ -0,0 +1,104 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ become: sudo
+ short_description: Substitute User DO
+ description:
+ - This become plugins allows your remote/login user to execute commands as another user via the sudo utility.
+ author: ansible (@core)
+ version_added: "2.8"
+ options:
+ become_user:
+ description: User you 'become' to execute the task
+ default: root
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: sudo_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_sudo_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_SUDO_USER
+ become_exe:
+ description: Sudo executable
+ default: sudo
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: sudo_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_sudo_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_SUDO_EXE
+ become_flags:
+ description: Options to pass to sudo
+ default: -H -S -n
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: sudo_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_sudo_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_SUDO_FLAGS
+ become_pass:
+ description: Password to pass to sudo
+ required: False
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_sudo_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_SUDO_PASS
+ ini:
+ - section: sudo_become_plugin
+ key: password
+"""
+
+
+from ansible.plugins.become import BecomeBase
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'sudo'
+
+ # messages for detecting prompted password issues
+ fail = ('Sorry, try again.',)
+ missing = ('Sorry, a password is required to run sudo', 'sudo: a password is required')
+
+ def build_become_command(self, cmd, shell):
+ super(BecomeModule, self).build_become_command(cmd, shell)
+
+ if not cmd:
+ return cmd
+
+ becomecmd = self.get_option('become_exe') or self.name
+
+ flags = self.get_option('become_flags') or ''
+ prompt = ''
+ if self.get_option('become_pass'):
+ self.prompt = '[sudo via ansible, key=%s] password:' % self._id
+ if flags: # this could be simplified, but kept as is for now for backwards string matching
+ flags = flags.replace('-n', '')
+ prompt = '-p "%s"' % (self.prompt)
+
+ user = self.get_option('become_user') or ''
+ if user:
+ user = '-u %s' % (user)
+
+ return ' '.join([becomecmd, flags, prompt, user, self._build_success_command(cmd, shell)])
diff --git a/lib/ansible/plugins/cache/__init__.py b/lib/ansible/plugins/cache/__init__.py
new file mode 100644
index 00000000..68b960e1
--- /dev/null
+++ b/lib/ansible/plugins/cache/__init__.py
@@ -0,0 +1,376 @@
+# (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2018, Ansible Project
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import copy
+import os
+import time
+import errno
+from abc import ABCMeta, abstractmethod
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.module_utils.six import with_metaclass
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.module_utils.common._collections_compat import MutableMapping
+from ansible.plugins import AnsiblePlugin
+from ansible.plugins.loader import cache_loader
+from ansible.utils.collection_loader import resource_from_fqcr
+from ansible.utils.display import Display
+from ansible.vars.fact_cache import FactCache as RealFactCache
+
+display = Display()
+
+
+class FactCache(RealFactCache):
+ """
+ This is for backwards compatibility. Will be removed after deprecation. It was removed as it
+ wasn't actually part of the cache plugin API. It's actually the code to make use of cache
+ plugins, not the cache plugin itself. Subclassing it wouldn't yield a usable Cache Plugin and
+ there was no facility to use it as anything else.
+ """
+ def __init__(self, *args, **kwargs):
+ display.deprecated('ansible.plugins.cache.FactCache has been moved to'
+ ' ansible.vars.fact_cache.FactCache. If you are looking for the class'
+ ' to subclass for a cache plugin, you want'
+ ' ansible.plugins.cache.BaseCacheModule or one of its subclasses.',
+ version='2.12', collection_name='ansible.builtin')
+ super(FactCache, self).__init__(*args, **kwargs)
+
+
+class BaseCacheModule(AnsiblePlugin):
+
+ # Backwards compat only. Just import the global display instead
+ _display = display
+
+ def __init__(self, *args, **kwargs):
+ # Third party code is not using cache_loader to load plugin - fall back to previous behavior
+ if not hasattr(self, '_load_name'):
+ display.deprecated('Rather than importing custom CacheModules directly, use ansible.plugins.loader.cache_loader',
+ version='2.14', collection_name='ansible.builtin')
+ self._load_name = self.__module__.split('.')[-1]
+ self._load_name = resource_from_fqcr(self.__module__)
+ super(BaseCacheModule, self).__init__()
+ self.set_options(var_options=args, direct=kwargs)
+
+ @abstractmethod
+ def get(self, key):
+ pass
+
+ @abstractmethod
+ def set(self, key, value):
+ pass
+
+ @abstractmethod
+ def keys(self):
+ pass
+
+ @abstractmethod
+ def contains(self, key):
+ pass
+
+ @abstractmethod
+ def delete(self, key):
+ pass
+
+ @abstractmethod
+ def flush(self):
+ pass
+
+ @abstractmethod
+ def copy(self):
+ pass
+
+
+class BaseFileCacheModule(BaseCacheModule):
+ """
+ A caching module backed by file based storage.
+ """
+ def __init__(self, *args, **kwargs):
+
+ try:
+ super(BaseFileCacheModule, self).__init__(*args, **kwargs)
+ self._cache_dir = self._get_cache_connection(self.get_option('_uri'))
+ self._timeout = float(self.get_option('_timeout'))
+ except KeyError:
+ self._cache_dir = self._get_cache_connection(C.CACHE_PLUGIN_CONNECTION)
+ self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
+ self.plugin_name = resource_from_fqcr(self.__module__)
+ self._cache = {}
+ self.validate_cache_connection()
+
+ def _get_cache_connection(self, source):
+ if source:
+ try:
+ return os.path.expanduser(os.path.expandvars(source))
+ except TypeError:
+ pass
+
+ def validate_cache_connection(self):
+ if not self._cache_dir:
+ raise AnsibleError("error, '%s' cache plugin requires the 'fact_caching_connection' config option "
+ "to be set (to a writeable directory path)" % self.plugin_name)
+
+ if not os.path.exists(self._cache_dir):
+ try:
+ os.makedirs(self._cache_dir)
+ except (OSError, IOError) as e:
+ raise AnsibleError("error in '%s' cache plugin while trying to create cache dir %s : %s" % (self.plugin_name, self._cache_dir, to_bytes(e)))
+ else:
+ for x in (os.R_OK, os.W_OK, os.X_OK):
+ if not os.access(self._cache_dir, x):
+ raise AnsibleError("error in '%s' cache, configured path (%s) does not have necessary permissions (rwx), disabling plugin" % (
+ self.plugin_name, self._cache_dir))
+
+ def _get_cache_file_name(self, key):
+ prefix = self.get_option('_prefix')
+ if prefix:
+ cachefile = "%s/%s%s" % (self._cache_dir, prefix, key)
+ else:
+ cachefile = "%s/%s" % (self._cache_dir, key)
+ return cachefile
+
+ def get(self, key):
+ """ This checks the in memory cache first as the fact was not expired at 'gather time'
+ and it would be problematic if the key did expire after some long running tasks and
+ user gets 'undefined' error in the same play """
+
+ if key not in self._cache:
+
+ if self.has_expired(key) or key == "":
+ raise KeyError
+
+ cachefile = self._get_cache_file_name(key)
+ try:
+ value = self._load(cachefile)
+ self._cache[key] = value
+ except ValueError as e:
+ display.warning("error in '%s' cache plugin while trying to read %s : %s. "
+ "Most likely a corrupt file, so erasing and failing." % (self.plugin_name, cachefile, to_bytes(e)))
+ self.delete(key)
+ raise AnsibleError("The cache file %s was corrupt, or did not otherwise contain valid data. "
+ "It has been removed, so you can re-run your command now." % cachefile)
+ except (OSError, IOError) as e:
+ display.warning("error in '%s' cache plugin while trying to read %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
+ raise KeyError
+ except Exception as e:
+ raise AnsibleError("Error while decoding the cache file %s: %s" % (cachefile, to_bytes(e)))
+
+ return self._cache.get(key)
+
+ def set(self, key, value):
+
+ self._cache[key] = value
+
+ cachefile = self._get_cache_file_name(key)
+ try:
+ self._dump(value, cachefile)
+ except (OSError, IOError) as e:
+ display.warning("error in '%s' cache plugin while trying to write to %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
+
+ def has_expired(self, key):
+
+ if self._timeout == 0:
+ return False
+
+ cachefile = self._get_cache_file_name(key)
+ try:
+ st = os.stat(cachefile)
+ except (OSError, IOError) as e:
+ if e.errno == errno.ENOENT:
+ return False
+ else:
+ display.warning("error in '%s' cache plugin while trying to stat %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
+ return False
+
+ if time.time() - st.st_mtime <= self._timeout:
+ return False
+
+ if key in self._cache:
+ del self._cache[key]
+ return True
+
+ def keys(self):
+ keys = []
+ for k in os.listdir(self._cache_dir):
+ if not (k.startswith('.') or self.has_expired(k)):
+ keys.append(k)
+ return keys
+
+ def contains(self, key):
+ cachefile = self._get_cache_file_name(key)
+
+ if key in self._cache:
+ return True
+
+ if self.has_expired(key):
+ return False
+ try:
+ os.stat(cachefile)
+ return True
+ except (OSError, IOError) as e:
+ if e.errno == errno.ENOENT:
+ return False
+ else:
+ display.warning("error in '%s' cache plugin while trying to stat %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
+
+ def delete(self, key):
+ try:
+ del self._cache[key]
+ except KeyError:
+ pass
+ try:
+ os.remove(self._get_cache_file_name(key))
+ except (OSError, IOError):
+ pass # TODO: only pass on non existing?
+
+ def flush(self):
+ self._cache = {}
+ for key in self.keys():
+ self.delete(key)
+
+ def copy(self):
+ ret = dict()
+ for key in self.keys():
+ ret[key] = self.get(key)
+ return ret
+
+ @abstractmethod
+ def _load(self, filepath):
+ """
+ Read data from a filepath and return it as a value
+
+ :arg filepath: The filepath to read from.
+ :returns: The value stored in the filepath
+
+ This method reads from the file on disk and takes care of any parsing
+ and transformation of the data before returning it. The value
+ returned should be what Ansible would expect if it were uncached data.
+
+ .. note:: Filehandles have advantages but calling code doesn't know
+ whether this file is text or binary, should be decoded, or accessed via
+ a library function. Therefore the API uses a filepath and opens
+ the file inside of the method.
+ """
+ pass
+
+ @abstractmethod
+ def _dump(self, value, filepath):
+ """
+ Write data to a filepath
+
+ :arg value: The value to store
+ :arg filepath: The filepath to store it at
+ """
+ pass
+
+
+class CachePluginAdjudicator(MutableMapping):
+ """
+ Intermediary between a cache dictionary and a CacheModule
+ """
+ def __init__(self, plugin_name='memory', **kwargs):
+ self._cache = {}
+ self._retrieved = {}
+
+ self._plugin = cache_loader.get(plugin_name, **kwargs)
+ if not self._plugin:
+ raise AnsibleError('Unable to load the cache plugin (%s).' % plugin_name)
+
+ self._plugin_name = plugin_name
+
+ def update_cache_if_changed(self):
+ if self._retrieved != self._cache:
+ self.set_cache()
+
+ def set_cache(self):
+ for top_level_cache_key in self._cache.keys():
+ self._plugin.set(top_level_cache_key, self._cache[top_level_cache_key])
+ self._retrieved = copy.deepcopy(self._cache)
+
+ def load_whole_cache(self):
+ for key in self._plugin.keys():
+ self._cache[key] = self._plugin.get(key)
+
+ def __repr__(self):
+ return to_text(self._cache)
+
+ def __iter__(self):
+ return iter(self.keys())
+
+ def __len__(self):
+ return len(self.keys())
+
+ def _do_load_key(self, key):
+ load = False
+ if key not in self._cache and key not in self._retrieved and self._plugin_name != 'memory':
+ if isinstance(self._plugin, BaseFileCacheModule):
+ load = True
+ elif not isinstance(self._plugin, BaseFileCacheModule) and self._plugin.contains(key):
+ # Database-backed caches don't raise KeyError for expired keys, so only load if the key is valid by checking contains()
+ load = True
+ return load
+
+ def __getitem__(self, key):
+ if self._do_load_key(key):
+ try:
+ self._cache[key] = self._plugin.get(key)
+ except KeyError:
+ pass
+ else:
+ self._retrieved[key] = self._cache[key]
+ return self._cache[key]
+
+ def get(self, key, default=None):
+ if self._do_load_key(key):
+ try:
+ self._cache[key] = self._plugin.get(key)
+ except KeyError as e:
+ pass
+ else:
+ self._retrieved[key] = self._cache[key]
+ return self._cache.get(key, default)
+
+ def items(self):
+ return self._cache.items()
+
+ def values(self):
+ return self._cache.values()
+
+ def keys(self):
+ return self._cache.keys()
+
+ def pop(self, key, *args):
+ if args:
+ return self._cache.pop(key, args[0])
+ return self._cache.pop(key)
+
+ def __delitem__(self, key):
+ del self._cache[key]
+
+ def __setitem__(self, key, value):
+ self._cache[key] = value
+
+ def flush(self):
+ for key in self._cache.keys():
+ self._plugin.delete(key)
+ self._cache = {}
+
+ def update(self, value):
+ self._cache.update(value)
diff --git a/lib/ansible/plugins/cache/base.py b/lib/ansible/plugins/cache/base.py
new file mode 100644
index 00000000..692b1b37
--- /dev/null
+++ b/lib/ansible/plugins/cache/base.py
@@ -0,0 +1,21 @@
+# (c) 2017, ansible by Red Hat
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# moved actual classes to __init__ kept here for backward compat with 3rd parties
+from ansible.plugins.cache import BaseCacheModule, BaseFileCacheModule
diff --git a/lib/ansible/plugins/cache/jsonfile.py b/lib/ansible/plugins/cache/jsonfile.py
new file mode 100644
index 00000000..7605dc41
--- /dev/null
+++ b/lib/ansible/plugins/cache/jsonfile.py
@@ -0,0 +1,63 @@
+# (c) 2014, Brian Coca, Josh Drake, et al
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ cache: jsonfile
+ short_description: JSON formatted files.
+ description:
+ - This cache uses JSON formatted, per host, files saved to the filesystem.
+ version_added: "1.9"
+ author: Ansible Core (@ansible-core)
+ options:
+ _uri:
+ required: True
+ description:
+ - Path in which the cache plugin will save the JSON files
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_CONNECTION
+ ini:
+ - key: fact_caching_connection
+ section: defaults
+ _prefix:
+ description: User defined prefix to use when creating the JSON files
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_PREFIX
+ ini:
+ - key: fact_caching_prefix
+ section: defaults
+ _timeout:
+ default: 86400
+ description: Expiration timeout for the cache plugin data
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
+ ini:
+ - key: fact_caching_timeout
+ section: defaults
+ type: integer
+'''
+
+import codecs
+import json
+
+from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
+from ansible.plugins.cache import BaseFileCacheModule
+
+
+class CacheModule(BaseFileCacheModule):
+ """
+ A caching module backed by json files.
+ """
+
+ def _load(self, filepath):
+ # Valid JSON is always UTF-8 encoded.
+ with codecs.open(filepath, 'r', encoding='utf-8') as f:
+ return json.load(f, cls=AnsibleJSONDecoder)
+
+ def _dump(self, value, filepath):
+ with codecs.open(filepath, 'w', encoding='utf-8') as f:
+ f.write(json.dumps(value, cls=AnsibleJSONEncoder, sort_keys=True, indent=4))
diff --git a/lib/ansible/plugins/cache/memory.py b/lib/ansible/plugins/cache/memory.py
new file mode 100644
index 00000000..1bccd544
--- /dev/null
+++ b/lib/ansible/plugins/cache/memory.py
@@ -0,0 +1,53 @@
+# (c) 2014, Brian Coca, Josh Drake, et al
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ cache: memory
+ short_description: RAM backed, non persistent
+ description:
+ - RAM backed cache that is not persistent.
+ - This is the default used if no other plugin is specified.
+ - There are no options to configure.
+ version_added: historical
+ author: core team (@ansible-core)
+'''
+
+from ansible.plugins.cache import BaseCacheModule
+
+
+class CacheModule(BaseCacheModule):
+
+ def __init__(self, *args, **kwargs):
+ self._cache = {}
+
+ def get(self, key):
+ return self._cache.get(key)
+
+ def set(self, key, value):
+ self._cache[key] = value
+
+ def keys(self):
+ return self._cache.keys()
+
+ def contains(self, key):
+ return key in self._cache
+
+ def delete(self, key):
+ del self._cache[key]
+
+ def flush(self):
+ self._cache = {}
+
+ def copy(self):
+ return self._cache.copy()
+
+ def __getstate__(self):
+ return self.copy()
+
+ def __setstate__(self, data):
+ self._cache = data
diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py
new file mode 100644
index 00000000..f7089e39
--- /dev/null
+++ b/lib/ansible/plugins/callback/__init__.py
@@ -0,0 +1,441 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import difflib
+import json
+import os
+import sys
+import warnings
+
+from copy import deepcopy
+
+from ansible import constants as C
+from ansible.module_utils.common._collections_compat import MutableMapping
+from ansible.module_utils.six import PY3
+from ansible.module_utils._text import to_text
+from ansible.parsing.ajson import AnsibleJSONEncoder
+from ansible.plugins import AnsiblePlugin, get_plugin_class
+from ansible.utils.color import stringc
+from ansible.utils.display import Display
+from ansible.vars.clean import strip_internal_keys, module_response_deepcopy
+
+if PY3:
+ # OrderedDict is needed for a backwards compat shim on Python3.x only
+ # https://github.com/ansible/ansible/pull/49512
+ from collections import OrderedDict
+else:
+ OrderedDict = None
+
+global_display = Display()
+
+
+__all__ = ["CallbackBase"]
+
+
+_DEBUG_ALLOWED_KEYS = frozenset(('msg', 'exception', 'warnings', 'deprecations'))
+
+
+class CallbackBase(AnsiblePlugin):
+
+ '''
+ This is a base ansible callback class that does nothing. New callbacks should
+ use this class as a base and override any callback methods they wish to execute
+ custom actions.
+ '''
+
+ def __init__(self, display=None, options=None):
+ if display:
+ self._display = display
+ else:
+ self._display = global_display
+
+ if self._display.verbosity >= 4:
+ name = getattr(self, 'CALLBACK_NAME', 'unnamed')
+ ctype = getattr(self, 'CALLBACK_TYPE', 'old')
+ version = getattr(self, 'CALLBACK_VERSION', '1.0')
+ self._display.vvvv('Loading callback plugin %s of type %s, v%s from %s' % (name, ctype, version, sys.modules[self.__module__].__file__))
+
+ self.disabled = False
+
+ self._plugin_options = {}
+ if options is not None:
+ self.set_options(options)
+
+ self._hide_in_debug = ('changed', 'failed', 'skipped', 'invocation', 'skip_reason')
+
+ ''' helper for callbacks, so they don't all have to include deepcopy '''
+ _copy_result = deepcopy
+
+ def set_option(self, k, v):
+ self._plugin_options[k] = v
+
+ def get_option(self, k):
+ return self._plugin_options[k]
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ ''' This is different than the normal plugin method as callbacks get called early and really don't accept keywords.
+ Also _options was already taken for CLI args and callbacks use _plugin_options instead.
+ '''
+
+ # load from config
+ self._plugin_options = C.config.get_plugin_options(get_plugin_class(self), self._load_name, keys=task_keys, variables=var_options, direct=direct)
+
+ def _run_is_verbose(self, result, verbosity=0):
+ return ((self._display.verbosity > verbosity or result._result.get('_ansible_verbose_always', False) is True)
+ and result._result.get('_ansible_verbose_override', False) is False)
+
+ def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False):
+
+ if not indent and (result.get('_ansible_verbose_always') or self._display.verbosity > 2):
+ indent = 4
+
+ # All result keys stating with _ansible_ are internal, so remove them from the result before we output anything.
+ abridged_result = strip_internal_keys(module_response_deepcopy(result))
+
+ # remove invocation unless specifically wanting it
+ if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result:
+ del abridged_result['invocation']
+
+ # remove diff information from screen output
+ if self._display.verbosity < 3 and 'diff' in result:
+ del abridged_result['diff']
+
+ # remove exception from screen output
+ if 'exception' in abridged_result:
+ del abridged_result['exception']
+
+ try:
+ jsonified_results = json.dumps(abridged_result, cls=AnsibleJSONEncoder, indent=indent, ensure_ascii=False, sort_keys=sort_keys)
+ except TypeError:
+ # Python3 bug: throws an exception when keys are non-homogenous types:
+ # https://bugs.python.org/issue25457
+ # sort into an OrderedDict and then json.dumps() that instead
+ if not OrderedDict:
+ raise
+ jsonified_results = json.dumps(OrderedDict(sorted(abridged_result.items(), key=to_text)),
+ cls=AnsibleJSONEncoder, indent=indent,
+ ensure_ascii=False, sort_keys=False)
+ return jsonified_results
+
+ def _handle_warnings(self, res):
+ ''' display warnings, if enabled and any exist in the result '''
+ if C.ACTION_WARNINGS:
+ if 'warnings' in res and res['warnings']:
+ for warning in res['warnings']:
+ self._display.warning(warning)
+ del res['warnings']
+ if 'deprecations' in res and res['deprecations']:
+ for warning in res['deprecations']:
+ self._display.deprecated(**warning)
+ del res['deprecations']
+
+ def _handle_exception(self, result, use_stderr=False):
+
+ if 'exception' in result:
+ msg = "An exception occurred during task execution. "
+ if self._display.verbosity < 3:
+ # extract just the actual error message from the exception text
+ error = result['exception'].strip().split('\n')[-1]
+ msg += "To see the full traceback, use -vvv. The error was: %s" % error
+ else:
+ msg = "The full traceback is:\n" + result['exception']
+ del result['exception']
+
+ self._display.display(msg, color=C.COLOR_ERROR, stderr=use_stderr)
+
+ def _serialize_diff(self, diff):
+ return json.dumps(diff, sort_keys=True, indent=4, separators=(u',', u': ')) + u'\n'
+
+ def _get_diff(self, difflist):
+
+ if not isinstance(difflist, list):
+ difflist = [difflist]
+
+ ret = []
+ for diff in difflist:
+ if 'dst_binary' in diff:
+ ret.append(u"diff skipped: destination file appears to be binary\n")
+ if 'src_binary' in diff:
+ ret.append(u"diff skipped: source file appears to be binary\n")
+ if 'dst_larger' in diff:
+ ret.append(u"diff skipped: destination file size is greater than %d\n" % diff['dst_larger'])
+ if 'src_larger' in diff:
+ ret.append(u"diff skipped: source file size is greater than %d\n" % diff['src_larger'])
+ if 'before' in diff and 'after' in diff:
+ # format complex structures into 'files'
+ for x in ['before', 'after']:
+ if isinstance(diff[x], MutableMapping):
+ diff[x] = self._serialize_diff(diff[x])
+ elif diff[x] is None:
+ diff[x] = ''
+ if 'before_header' in diff:
+ before_header = u"before: %s" % diff['before_header']
+ else:
+ before_header = u'before'
+ if 'after_header' in diff:
+ after_header = u"after: %s" % diff['after_header']
+ else:
+ after_header = u'after'
+ before_lines = diff['before'].splitlines(True)
+ after_lines = diff['after'].splitlines(True)
+ if before_lines and not before_lines[-1].endswith(u'\n'):
+ before_lines[-1] += u'\n\\ No newline at end of file\n'
+ if after_lines and not after_lines[-1].endswith('\n'):
+ after_lines[-1] += u'\n\\ No newline at end of file\n'
+ differ = difflib.unified_diff(before_lines,
+ after_lines,
+ fromfile=before_header,
+ tofile=after_header,
+ fromfiledate=u'',
+ tofiledate=u'',
+ n=C.DIFF_CONTEXT)
+ difflines = list(differ)
+ if len(difflines) >= 3 and sys.version_info[:2] == (2, 6):
+ # difflib in Python 2.6 adds trailing spaces after
+ # filenames in the -- before/++ after headers.
+ difflines[0] = difflines[0].replace(u' \n', u'\n')
+ difflines[1] = difflines[1].replace(u' \n', u'\n')
+ # it also treats empty files differently
+ difflines[2] = difflines[2].replace(u'-1,0', u'-0,0').replace(u'+1,0', u'+0,0')
+ has_diff = False
+ for line in difflines:
+ has_diff = True
+ if line.startswith(u'+'):
+ line = stringc(line, C.COLOR_DIFF_ADD)
+ elif line.startswith(u'-'):
+ line = stringc(line, C.COLOR_DIFF_REMOVE)
+ elif line.startswith(u'@@'):
+ line = stringc(line, C.COLOR_DIFF_LINES)
+ ret.append(line)
+ if has_diff:
+ ret.append('\n')
+ if 'prepared' in diff:
+ ret.append(diff['prepared'])
+ return u''.join(ret)
+
+ def _get_item_label(self, result):
+ ''' retrieves the value to be displayed as a label for an item entry from a result object'''
+ if result.get('_ansible_no_log', False):
+ item = "(censored due to no_log)"
+ else:
+ item = result.get('_ansible_item_label', result.get('item'))
+ return item
+
+ def _get_item(self, result):
+ ''' here for backwards compat, really should have always been named: _get_item_label'''
+ cback = getattr(self, 'NAME', os.path.basename(__file__))
+ self._display.deprecated("The %s callback plugin should be updated to use the _get_item_label method instead" % cback,
+ version="2.11", collection_name='ansible.builtin')
+ return self._get_item_label(result)
+
+ def _process_items(self, result):
+ # just remove them as now they get handled by individual callbacks
+ del result._result['results']
+
+ def _clean_results(self, result, task_name):
+ ''' removes data from results for display '''
+
+ # mostly controls that debug only outputs what it was meant to
+ if task_name in C._ACTION_DEBUG:
+ if 'msg' in result:
+ # msg should be alone
+ for key in list(result.keys()):
+ if key not in _DEBUG_ALLOWED_KEYS and not key.startswith('_'):
+ result.pop(key)
+ else:
+ # 'var' value as field, so eliminate others and what is left should be varname
+ for hidme in self._hide_in_debug:
+ result.pop(hidme, None)
+
+ def set_play_context(self, play_context):
+ pass
+
+ def on_any(self, *args, **kwargs):
+ pass
+
+ def runner_on_failed(self, host, res, ignore_errors=False):
+ pass
+
+ def runner_on_ok(self, host, res):
+ pass
+
+ def runner_on_skipped(self, host, item=None):
+ pass
+
+ def runner_on_unreachable(self, host, res):
+ pass
+
+ def runner_on_no_hosts(self):
+ pass
+
+ def runner_on_async_poll(self, host, res, jid, clock):
+ pass
+
+ def runner_on_async_ok(self, host, res, jid):
+ pass
+
+ def runner_on_async_failed(self, host, res, jid):
+ pass
+
+ def playbook_on_start(self):
+ pass
+
+ def playbook_on_notify(self, host, handler):
+ pass
+
+ def playbook_on_no_hosts_matched(self):
+ pass
+
+ def playbook_on_no_hosts_remaining(self):
+ pass
+
+ def playbook_on_task_start(self, name, is_conditional):
+ pass
+
+ def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None, unsafe=None):
+ pass
+
+ def playbook_on_setup(self):
+ pass
+
+ def playbook_on_import_for_host(self, host, imported_file):
+ pass
+
+ def playbook_on_not_import_for_host(self, host, missing_file):
+ pass
+
+ def playbook_on_play_start(self, name):
+ pass
+
+ def playbook_on_stats(self, stats):
+ pass
+
+ def on_file_diff(self, host, diff):
+ pass
+
+ # V2 METHODS, by default they call v1 counterparts if possible
+ def v2_on_any(self, *args, **kwargs):
+ self.on_any(args, kwargs)
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ host = result._host.get_name()
+ self.runner_on_failed(host, result._result, ignore_errors)
+
+ def v2_runner_on_ok(self, result):
+ host = result._host.get_name()
+ self.runner_on_ok(host, result._result)
+
+ def v2_runner_on_skipped(self, result):
+ if C.DISPLAY_SKIPPED_HOSTS:
+ host = result._host.get_name()
+ self.runner_on_skipped(host, self._get_item_label(getattr(result._result, 'results', {})))
+
+ def v2_runner_on_unreachable(self, result):
+ host = result._host.get_name()
+ self.runner_on_unreachable(host, result._result)
+
+ # FIXME: not called
+ def v2_runner_on_async_poll(self, result):
+ host = result._host.get_name()
+ jid = result._result.get('ansible_job_id')
+ # FIXME, get real clock
+ clock = 0
+ self.runner_on_async_poll(host, result._result, jid, clock)
+
+ # FIXME: not called
+ def v2_runner_on_async_ok(self, result):
+ host = result._host.get_name()
+ jid = result._result.get('ansible_job_id')
+ self.runner_on_async_ok(host, result._result, jid)
+
+ # FIXME: not called
+ def v2_runner_on_async_failed(self, result):
+ host = result._host.get_name()
+ jid = result._result.get('ansible_job_id')
+ self.runner_on_async_failed(host, result._result, jid)
+
+ def v2_playbook_on_start(self, playbook):
+ self.playbook_on_start()
+
+ def v2_playbook_on_notify(self, handler, host):
+ self.playbook_on_notify(host, handler)
+
+ def v2_playbook_on_no_hosts_matched(self):
+ self.playbook_on_no_hosts_matched()
+
+ def v2_playbook_on_no_hosts_remaining(self):
+ self.playbook_on_no_hosts_remaining()
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self.playbook_on_task_start(task.name, is_conditional)
+
+ # FIXME: not called
+ def v2_playbook_on_cleanup_task_start(self, task):
+ pass # no v1 correspondence
+
+ def v2_playbook_on_handler_task_start(self, task):
+ pass # no v1 correspondence
+
+ def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None, unsafe=None):
+ self.playbook_on_vars_prompt(varname, private, prompt, encrypt, confirm, salt_size, salt, default, unsafe)
+
+ # FIXME: not called
+ def v2_playbook_on_import_for_host(self, result, imported_file):
+ host = result._host.get_name()
+ self.playbook_on_import_for_host(host, imported_file)
+
+ # FIXME: not called
+ def v2_playbook_on_not_import_for_host(self, result, missing_file):
+ host = result._host.get_name()
+ self.playbook_on_not_import_for_host(host, missing_file)
+
+ def v2_playbook_on_play_start(self, play):
+ self.playbook_on_play_start(play.name)
+
+ def v2_playbook_on_stats(self, stats):
+ self.playbook_on_stats(stats)
+
+ def v2_on_file_diff(self, result):
+ if 'diff' in result._result:
+ host = result._host.get_name()
+ self.on_file_diff(host, result._result['diff'])
+
+ def v2_playbook_on_include(self, included_file):
+ pass # no v1 correspondence
+
+ def v2_runner_item_on_ok(self, result):
+ pass
+
+ def v2_runner_item_on_failed(self, result):
+ pass
+
+ def v2_runner_item_on_skipped(self, result):
+ pass
+
+ def v2_runner_retry(self, result):
+ pass
+
+ def v2_runner_on_start(self, host, task):
+ """Event used when host begins execution of a task
+
+ .. versionadded:: 2.8
+ """
+ pass
diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py
new file mode 100644
index 00000000..e735e0ad
--- /dev/null
+++ b/lib/ansible/plugins/callback/default.py
@@ -0,0 +1,426 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ callback: default
+ type: stdout
+ short_description: default Ansible screen output
+ version_added: historical
+ description:
+ - This is the default output callback for ansible-playbook.
+ extends_documentation_fragment:
+ - default_callback
+ requirements:
+ - set as stdout in configuration
+'''
+
+
+from ansible import constants as C
+from ansible import context
+from ansible.playbook.task_include import TaskInclude
+from ansible.plugins.callback import CallbackBase
+from ansible.utils.color import colorize, hostcolor
+
+# These values use ansible.constants for historical reasons, mostly to allow
+# unmodified derivative plugins to work. However, newer options added to the
+# plugin are not also added to ansible.constants, so authors of derivative
+# callback plugins will eventually need to add a reference to the common docs
+# fragment for the 'default' callback plugin
+
+# these are used to provide backwards compat with old plugins that subclass from default
+# but still don't use the new config system and/or fail to document the options
+# TODO: Change the default of check_mode_markers to True in a future release (2.13)
+COMPAT_OPTIONS = (('display_skipped_hosts', C.DISPLAY_SKIPPED_HOSTS),
+ ('display_ok_hosts', True),
+ ('show_custom_stats', C.SHOW_CUSTOM_STATS),
+ ('display_failed_stderr', False),
+ ('check_mode_markers', False),
+ ('show_per_host_start', False))
+
+
+class CallbackModule(CallbackBase):
+
+ '''
+ This is the default callback interface, which simply prints messages
+ to stdout when new callback events are received.
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'default'
+
+ def __init__(self):
+
+ self._play = None
+ self._last_task_banner = None
+ self._last_task_name = None
+ self._task_type_cache = {}
+ super(CallbackModule, self).__init__()
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ # for backwards compat with plugins subclassing default, fallback to constants
+ for option, constant in COMPAT_OPTIONS:
+ try:
+ value = self.get_option(option)
+ except (AttributeError, KeyError):
+ self._display.deprecated("'%s' is subclassing DefaultCallback without the corresponding doc_fragment." % self._load_name,
+ version='2.14', collection_name='ansible.builtin')
+ value = constant
+ setattr(self, option, value)
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+
+ delegated_vars = result._result.get('_ansible_delegated_vars', None)
+ self._clean_results(result._result, result._task.action)
+
+ if self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ self._handle_exception(result._result, use_stderr=self.display_failed_stderr)
+ self._handle_warnings(result._result)
+
+ if result._task.loop and 'results' in result._result:
+ self._process_items(result)
+
+ else:
+ if delegated_vars:
+ self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'],
+ self._dump_results(result._result)),
+ color=C.COLOR_ERROR, stderr=self.display_failed_stderr)
+ else:
+ self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)),
+ color=C.COLOR_ERROR, stderr=self.display_failed_stderr)
+
+ if ignore_errors:
+ self._display.display("...ignoring", color=C.COLOR_SKIP)
+
+ def v2_runner_on_ok(self, result):
+
+ delegated_vars = result._result.get('_ansible_delegated_vars', None)
+
+ if isinstance(result._task, TaskInclude):
+ return
+ elif result._result.get('changed', False):
+ if self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ if delegated_vars:
+ msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
+ else:
+ msg = "changed: [%s]" % result._host.get_name()
+ color = C.COLOR_CHANGED
+ else:
+ if not self.display_ok_hosts:
+ return
+
+ if self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ if delegated_vars:
+ msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
+ else:
+ msg = "ok: [%s]" % result._host.get_name()
+ color = C.COLOR_OK
+
+ self._handle_warnings(result._result)
+
+ if result._task.loop and 'results' in result._result:
+ self._process_items(result)
+ else:
+ self._clean_results(result._result, result._task.action)
+
+ if self._run_is_verbose(result):
+ msg += " => %s" % (self._dump_results(result._result),)
+ self._display.display(msg, color=color)
+
+ def v2_runner_on_skipped(self, result):
+
+ if self.display_skipped_hosts:
+
+ self._clean_results(result._result, result._task.action)
+
+ if self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ if result._task.loop and 'results' in result._result:
+ self._process_items(result)
+ else:
+ msg = "skipping: [%s]" % result._host.get_name()
+ if self._run_is_verbose(result):
+ msg += " => %s" % self._dump_results(result._result)
+ self._display.display(msg, color=C.COLOR_SKIP)
+
+ def v2_runner_on_unreachable(self, result):
+ if self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ delegated_vars = result._result.get('_ansible_delegated_vars', None)
+ if delegated_vars:
+ msg = "fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result))
+ else:
+ msg = "fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result))
+ self._display.display(msg, color=C.COLOR_UNREACHABLE, stderr=self.display_failed_stderr)
+
+ def v2_playbook_on_no_hosts_matched(self):
+ self._display.display("skipping: no hosts matched", color=C.COLOR_SKIP)
+
+ def v2_playbook_on_no_hosts_remaining(self):
+ self._display.banner("NO MORE HOSTS LEFT")
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self._task_start(task, prefix='TASK')
+
+ def _task_start(self, task, prefix=None):
+ # Cache output prefix for task if provided
+ # This is needed to properly display 'RUNNING HANDLER' and similar
+ # when hiding skipped/ok task results
+ if prefix is not None:
+ self._task_type_cache[task._uuid] = prefix
+
+ # Preserve task name, as all vars may not be available for templating
+ # when we need it later
+ if self._play.strategy == 'free':
+ # Explicitly set to None for strategy 'free' to account for any cached
+ # task title from a previous non-free play
+ self._last_task_name = None
+ else:
+ self._last_task_name = task.get_name().strip()
+
+ # Display the task banner immediately if we're not doing any filtering based on task result
+ if self.display_skipped_hosts and self.display_ok_hosts:
+ self._print_task_banner(task)
+
+ def _print_task_banner(self, task):
+ # args can be specified as no_log in several places: in the task or in
+ # the argument spec. We can check whether the task is no_log but the
+ # argument spec can't be because that is only run on the target
+ # machine and we haven't run it thereyet at this time.
+ #
+ # So we give people a config option to affect display of the args so
+ # that they can secure this if they feel that their stdout is insecure
+ # (shoulder surfing, logging stdout straight to a file, etc).
+ args = ''
+ if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT:
+ args = u', '.join(u'%s=%s' % a for a in task.args.items())
+ args = u' %s' % args
+
+ prefix = self._task_type_cache.get(task._uuid, 'TASK')
+
+ # Use cached task name
+ task_name = self._last_task_name
+ if task_name is None:
+ task_name = task.get_name().strip()
+
+ if task.check_mode and self.check_mode_markers:
+ checkmsg = " [CHECK MODE]"
+ else:
+ checkmsg = ""
+ self._display.banner(u"%s [%s%s]%s" % (prefix, task_name, args, checkmsg))
+ if self._display.verbosity >= 2:
+ path = task.get_path()
+ if path:
+ self._display.display(u"task path: %s" % path, color=C.COLOR_DEBUG)
+
+ self._last_task_banner = task._uuid
+
+ def v2_playbook_on_cleanup_task_start(self, task):
+ self._task_start(task, prefix='CLEANUP TASK')
+
+ def v2_playbook_on_handler_task_start(self, task):
+ self._task_start(task, prefix='RUNNING HANDLER')
+
+ def v2_runner_on_start(self, host, task):
+ if self.get_option('show_per_host_start'):
+ self._display.display(" [started %s on %s]" % (task, host), color=C.COLOR_OK)
+
+ def v2_playbook_on_play_start(self, play):
+ name = play.get_name().strip()
+ if play.check_mode and self.check_mode_markers:
+ checkmsg = " [CHECK MODE]"
+ else:
+ checkmsg = ""
+ if not name:
+ msg = u"PLAY%s" % checkmsg
+ else:
+ msg = u"PLAY [%s]%s" % (name, checkmsg)
+
+ self._play = play
+
+ self._display.banner(msg)
+
+ def v2_on_file_diff(self, result):
+ if result._task.loop and 'results' in result._result:
+ for res in result._result['results']:
+ if 'diff' in res and res['diff'] and res.get('changed', False):
+ diff = self._get_diff(res['diff'])
+ if diff:
+ if self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+ self._display.display(diff)
+ elif 'diff' in result._result and result._result['diff'] and result._result.get('changed', False):
+ diff = self._get_diff(result._result['diff'])
+ if diff:
+ if self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+ self._display.display(diff)
+
+ def v2_runner_item_on_ok(self, result):
+
+ delegated_vars = result._result.get('_ansible_delegated_vars', None)
+ if isinstance(result._task, TaskInclude):
+ return
+ elif result._result.get('changed', False):
+ if self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ msg = 'changed'
+ color = C.COLOR_CHANGED
+ else:
+ if not self.display_ok_hosts:
+ return
+
+ if self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ msg = 'ok'
+ color = C.COLOR_OK
+
+ if delegated_vars:
+ msg += ": [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
+ else:
+ msg += ": [%s]" % result._host.get_name()
+
+ msg += " => (item=%s)" % (self._get_item_label(result._result),)
+
+ self._clean_results(result._result, result._task.action)
+ if self._run_is_verbose(result):
+ msg += " => %s" % self._dump_results(result._result)
+ self._display.display(msg, color=color)
+
+ def v2_runner_item_on_failed(self, result):
+ if self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ delegated_vars = result._result.get('_ansible_delegated_vars', None)
+ self._clean_results(result._result, result._task.action)
+ self._handle_exception(result._result)
+
+ msg = "failed: "
+ if delegated_vars:
+ msg += "[%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
+ else:
+ msg += "[%s]" % (result._host.get_name())
+
+ self._handle_warnings(result._result)
+ self._display.display(msg + " (item=%s) => %s" % (self._get_item_label(result._result), self._dump_results(result._result)), color=C.COLOR_ERROR)
+
+ def v2_runner_item_on_skipped(self, result):
+ if self.display_skipped_hosts:
+ if self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ self._clean_results(result._result, result._task.action)
+ msg = "skipping: [%s] => (item=%s) " % (result._host.get_name(), self._get_item_label(result._result))
+ if self._run_is_verbose(result):
+ msg += " => %s" % self._dump_results(result._result)
+ self._display.display(msg, color=C.COLOR_SKIP)
+
+ def v2_playbook_on_include(self, included_file):
+ msg = 'included: %s for %s' % (included_file._filename, ", ".join([h.name for h in included_file._hosts]))
+ label = self._get_item_label(included_file._vars)
+ if label:
+ msg += " => (item=%s)" % label
+ self._display.display(msg, color=C.COLOR_SKIP)
+
+ def v2_playbook_on_stats(self, stats):
+ self._display.banner("PLAY RECAP")
+
+ hosts = sorted(stats.processed.keys())
+ for h in hosts:
+ t = stats.summarize(h)
+
+ self._display.display(
+ u"%s : %s %s %s %s %s %s %s" % (
+ hostcolor(h, t),
+ colorize(u'ok', t['ok'], C.COLOR_OK),
+ colorize(u'changed', t['changed'], C.COLOR_CHANGED),
+ colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE),
+ colorize(u'failed', t['failures'], C.COLOR_ERROR),
+ colorize(u'skipped', t['skipped'], C.COLOR_SKIP),
+ colorize(u'rescued', t['rescued'], C.COLOR_OK),
+ colorize(u'ignored', t['ignored'], C.COLOR_WARN),
+ ),
+ screen_only=True
+ )
+
+ self._display.display(
+ u"%s : %s %s %s %s %s %s %s" % (
+ hostcolor(h, t, False),
+ colorize(u'ok', t['ok'], None),
+ colorize(u'changed', t['changed'], None),
+ colorize(u'unreachable', t['unreachable'], None),
+ colorize(u'failed', t['failures'], None),
+ colorize(u'skipped', t['skipped'], None),
+ colorize(u'rescued', t['rescued'], None),
+ colorize(u'ignored', t['ignored'], None),
+ ),
+ log_only=True
+ )
+
+ self._display.display("", screen_only=True)
+
+ # print custom stats if required
+ if stats.custom and self.show_custom_stats:
+ self._display.banner("CUSTOM STATS: ")
+ # per host
+ # TODO: come up with 'pretty format'
+ for k in sorted(stats.custom.keys()):
+ if k == '_run':
+ continue
+ self._display.display('\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace('\n', '')))
+
+ # print per run custom stats
+ if '_run' in stats.custom:
+ self._display.display("", screen_only=True)
+ self._display.display('\tRUN: %s' % self._dump_results(stats.custom['_run'], indent=1).replace('\n', ''))
+ self._display.display("", screen_only=True)
+
+ if context.CLIARGS['check'] and self.check_mode_markers:
+ self._display.banner("DRY RUN")
+
+ def v2_playbook_on_start(self, playbook):
+ if self._display.verbosity > 1:
+ from os.path import basename
+ self._display.banner("PLAYBOOK: %s" % basename(playbook._file_name))
+
+ # show CLI arguments
+ if self._display.verbosity > 3:
+ if context.CLIARGS.get('args'):
+ self._display.display('Positional arguments: %s' % ' '.join(context.CLIARGS['args']),
+ color=C.COLOR_VERBOSE, screen_only=True)
+
+ for argument in (a for a in context.CLIARGS if a != 'args'):
+ val = context.CLIARGS[argument]
+ if val:
+ self._display.display('%s: %s' % (argument, val), color=C.COLOR_VERBOSE, screen_only=True)
+
+ if context.CLIARGS['check'] and self.check_mode_markers:
+ self._display.banner("DRY RUN")
+
+ def v2_runner_retry(self, result):
+ task_name = result.task_name or result._task
+ msg = "FAILED - RETRYING: %s (%d retries left)." % (task_name, result._result['retries'] - result._result['attempts'])
+ if self._run_is_verbose(result, verbosity=2):
+ msg += "Result was: %s" % self._dump_results(result._result)
+ self._display.display(msg, color=C.COLOR_DEBUG)
+
+ def v2_playbook_on_notify(self, handler, host):
+ if self._display.verbosity > 1:
+ self._display.display("NOTIFIED HANDLER %s for %s" % (handler.get_name(), host), color=C.COLOR_VERBOSE, screen_only=True)
diff --git a/lib/ansible/plugins/callback/junit.py b/lib/ansible/plugins/callback/junit.py
new file mode 100644
index 00000000..556724d1
--- /dev/null
+++ b/lib/ansible/plugins/callback/junit.py
@@ -0,0 +1,382 @@
+# (c) 2016 Matt Clay <matt@mystile.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ callback: junit
+ type: aggregate
+ short_description: write playbook output to a JUnit file.
+ version_added: historical
+ description:
+ - This callback writes playbook output to a JUnit formatted XML file.
+ - "Tasks show up in the report as follows:
+ 'ok': pass
+ 'failed' with 'EXPECTED FAILURE' in the task name: pass
+ 'failed' with 'TOGGLE RESULT' in the task name: pass
+ 'ok' with 'TOGGLE RESULT' in the task name: failure
+ 'failed' due to an exception: error
+ 'failed' for other reasons: failure
+ 'skipped': skipped"
+ options:
+ output_dir:
+ name: JUnit output dir
+ default: ~/.ansible.log
+ description: Directory to write XML files to.
+ env:
+ - name: JUNIT_OUTPUT_DIR
+ task_class:
+ name: JUnit Task class
+ default: False
+ description: Configure the output to be one class per yaml file
+ env:
+ - name: JUNIT_TASK_CLASS
+ task_relative_path:
+ name: JUnit Task relative path
+ default: none
+ description: Configure the output to use relative paths to given directory
+ version_added: "2.8"
+ env:
+ - name: JUNIT_TASK_RELATIVE_PATH
+ fail_on_change:
+ name: JUnit fail on change
+ default: False
+ description: Consider any tasks reporting "changed" as a junit test failure
+ env:
+ - name: JUNIT_FAIL_ON_CHANGE
+ fail_on_ignore:
+ name: JUnit fail on ignore
+ default: False
+ description: Consider failed tasks as a junit test failure even if ignore_on_error is set
+ env:
+ - name: JUNIT_FAIL_ON_IGNORE
+ include_setup_tasks_in_report:
+ name: JUnit include setup tasks in report
+ default: True
+ description: Should the setup tasks be included in the final report
+ env:
+ - name: JUNIT_INCLUDE_SETUP_TASKS_IN_REPORT
+ hide_task_arguments:
+ name: Hide the arguments for a task
+ default: False
+ description: Hide the arguments for a task
+ version_added: "2.8"
+ env:
+ - name: JUNIT_HIDE_TASK_ARGUMENTS
+ test_case_prefix:
+ name: Prefix to find actual test cases
+ default: <empty>
+ description: Consider a task only as test case if it has this value as prefix. Additionaly failing tasks are recorded as failed test cases.
+ version_added: "2.8"
+ env:
+ - name: JUNIT_TEST_CASE_PREFIX
+ requirements:
+ - whitelist in configuration
+ - junit_xml (python lib)
+'''
+
+import os
+import time
+import re
+
+from ansible import constants as C
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.plugins.callback import CallbackBase
+
+try:
+ from junit_xml import TestSuite, TestCase
+
+ # the junit_xml API is changing in version 2.0.0
+ # TestSuite.to_xml_string is being replaced with to_xml_report_string
+ # see: https://github.com/kyrus/python-junit-xml/blob/63db26da353790500642fd02cae1543eb41aab8b/junit_xml/__init__.py#L249-L261
+ try:
+ from junit_xml import to_xml_report_string
+ except ImportError:
+ to_xml_report_string = TestSuite.to_xml_string
+
+ HAS_JUNIT_XML = True
+except ImportError:
+ HAS_JUNIT_XML = False
+
+try:
+ from collections import OrderedDict
+ HAS_ORDERED_DICT = True
+except ImportError:
+ try:
+ from ordereddict import OrderedDict
+ HAS_ORDERED_DICT = True
+ except ImportError:
+ HAS_ORDERED_DICT = False
+
+
+class CallbackModule(CallbackBase):
+ """
+ This callback writes playbook output to a JUnit formatted XML file.
+
+ Tasks show up in the report as follows:
+ 'ok': pass
+ 'failed' with 'EXPECTED FAILURE' in the task name: pass
+ 'failed' with 'TOGGLE RESULT' in the task name: pass
+ 'ok' with 'TOGGLE RESULT' in the task name: failure
+ 'failed' due to an exception: error
+ 'failed' for other reasons: failure
+ 'skipped': skipped
+
+ This plugin makes use of the following environment variables:
+ JUNIT_OUTPUT_DIR (optional): Directory to write XML files to.
+ Default: ~/.ansible.log
+ JUNIT_TASK_CLASS (optional): Configure the output to be one class per yaml file
+ Default: False
+ JUNIT_TASK_RELATIVE_PATH (optional): Configure the output to use relative paths to given directory
+ Default: none
+ JUNIT_FAIL_ON_CHANGE (optional): Consider any tasks reporting "changed" as a junit test failure
+ Default: False
+ JUNIT_FAIL_ON_IGNORE (optional): Consider failed tasks as a junit test failure even if ignore_on_error is set
+ Default: False
+ JUNIT_INCLUDE_SETUP_TASKS_IN_REPORT (optional): Should the setup tasks be included in the final report
+ Default: True
+ JUNIT_HIDE_TASK_ARGUMENTS (optional): Hide the arguments for a task
+ Default: False
+ JUNIT_TEST_CASE_PREFIX (optional): Consider a task only as test case if it has this value as prefix. Additionaly failing tasks are recorded as failed
+ test cases.
+ Default: <empty>
+
+ Requires:
+ junit_xml
+
+ """
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'junit'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self):
+ super(CallbackModule, self).__init__()
+
+ self._output_dir = os.getenv('JUNIT_OUTPUT_DIR', os.path.expanduser('~/.ansible.log'))
+ self._task_class = os.getenv('JUNIT_TASK_CLASS', 'False').lower()
+ self._task_relative_path = os.getenv('JUNIT_TASK_RELATIVE_PATH', '')
+ self._fail_on_change = os.getenv('JUNIT_FAIL_ON_CHANGE', 'False').lower()
+ self._fail_on_ignore = os.getenv('JUNIT_FAIL_ON_IGNORE', 'False').lower()
+ self._include_setup_tasks_in_report = os.getenv('JUNIT_INCLUDE_SETUP_TASKS_IN_REPORT', 'True').lower()
+ self._hide_task_arguments = os.getenv('JUNIT_HIDE_TASK_ARGUMENTS', 'False').lower()
+ self._test_case_prefix = os.getenv('JUNIT_TEST_CASE_PREFIX', '')
+ self._playbook_path = None
+ self._playbook_name = None
+ self._play_name = None
+ self._task_data = None
+
+ self.disabled = False
+
+ if not HAS_JUNIT_XML:
+ self.disabled = True
+ self._display.warning('The `junit_xml` python module is not installed. '
+ 'Disabling the `junit` callback plugin.')
+
+ if HAS_ORDERED_DICT:
+ self._task_data = OrderedDict()
+ else:
+ self.disabled = True
+ self._display.warning('The `ordereddict` python module is not installed. '
+ 'Disabling the `junit` callback plugin.')
+
+ if not os.path.exists(self._output_dir):
+ os.makedirs(self._output_dir)
+
+ def _start_task(self, task):
+ """ record the start of a task for one or more hosts """
+
+ uuid = task._uuid
+
+ if uuid in self._task_data:
+ return
+
+ play = self._play_name
+ name = task.get_name().strip()
+ path = task.get_path()
+ action = task.action
+
+ if not task.no_log and self._hide_task_arguments == 'false':
+ args = ', '.join(('%s=%s' % a for a in task.args.items()))
+ if args:
+ name += ' ' + args
+
+ self._task_data[uuid] = TaskData(uuid, name, path, play, action)
+
+ def _finish_task(self, status, result):
+ """ record the results of a task for a single host """
+
+ task_uuid = result._task._uuid
+
+ if hasattr(result, '_host'):
+ host_uuid = result._host._uuid
+ host_name = result._host.name
+ else:
+ host_uuid = 'include'
+ host_name = 'include'
+
+ task_data = self._task_data[task_uuid]
+
+ if self._fail_on_change == 'true' and status == 'ok' and result._result.get('changed', False):
+ status = 'failed'
+
+ # ignore failure if expected and toggle result if asked for
+ if status == 'failed' and 'EXPECTED FAILURE' in task_data.name:
+ status = 'ok'
+ elif 'TOGGLE RESULT' in task_data.name:
+ if status == 'failed':
+ status = 'ok'
+ elif status == 'ok':
+ status = 'failed'
+
+ if task_data.name.startswith(self._test_case_prefix) or status == 'failed':
+ task_data.add_host(HostData(host_uuid, host_name, status, result))
+
+ def _build_test_case(self, task_data, host_data):
+ """ build a TestCase from the given TaskData and HostData """
+
+ name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
+ duration = host_data.finish - task_data.start
+
+ if self._task_relative_path:
+ junit_classname = os.path.relpath(task_data.path, self._task_relative_path)
+ else:
+ junit_classname = task_data.path
+
+ if self._task_class == 'true':
+ junit_classname = re.sub(r'\.yml:[0-9]+$', '', junit_classname)
+
+ if host_data.status == 'included':
+ return TestCase(name, junit_classname, duration, host_data.result)
+
+ res = host_data.result._result
+ rc = res.get('rc', 0)
+ dump = self._dump_results(res, indent=0)
+ dump = self._cleanse_string(dump)
+
+ if host_data.status == 'ok':
+ return TestCase(name, junit_classname, duration, dump)
+
+ test_case = TestCase(name, junit_classname, duration)
+
+ if host_data.status == 'failed':
+ if 'exception' in res:
+ message = res['exception'].strip().split('\n')[-1]
+ output = res['exception']
+ test_case.add_error_info(message, output)
+ elif 'msg' in res:
+ message = res['msg']
+ test_case.add_failure_info(message, dump)
+ else:
+ test_case.add_failure_info('rc=%s' % rc, dump)
+ elif host_data.status == 'skipped':
+ if 'skip_reason' in res:
+ message = res['skip_reason']
+ else:
+ message = 'skipped'
+ test_case.add_skipped_info(message)
+
+ return test_case
+
+ def _cleanse_string(self, value):
+ """ convert surrogate escapes to the unicode replacement character to avoid XML encoding errors """
+ return to_text(to_bytes(value, errors='surrogateescape'), errors='replace')
+
+ def _generate_report(self):
+ """ generate a TestSuite report from the collected TaskData and HostData """
+
+ test_cases = []
+
+ for task_uuid, task_data in self._task_data.items():
+ if task_data.action in C._ACTION_SETUP and self._include_setup_tasks_in_report == 'false':
+ continue
+
+ for host_uuid, host_data in task_data.host_data.items():
+ test_cases.append(self._build_test_case(task_data, host_data))
+
+ test_suite = TestSuite(self._playbook_name, test_cases)
+ report = to_xml_report_string([test_suite])
+
+ output_file = os.path.join(self._output_dir, '%s-%s.xml' % (self._playbook_name, time.time()))
+
+ with open(output_file, 'wb') as xml:
+ xml.write(to_bytes(report, errors='surrogate_or_strict'))
+
+ def v2_playbook_on_start(self, playbook):
+ self._playbook_path = playbook._file_name
+ self._playbook_name = os.path.splitext(os.path.basename(self._playbook_path))[0]
+
+ def v2_playbook_on_play_start(self, play):
+ self._play_name = play.get_name()
+
+ def v2_runner_on_no_hosts(self, task):
+ self._start_task(task)
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self._start_task(task)
+
+ def v2_playbook_on_cleanup_task_start(self, task):
+ self._start_task(task)
+
+ def v2_playbook_on_handler_task_start(self, task):
+ self._start_task(task)
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ if ignore_errors and self._fail_on_ignore != 'true':
+ self._finish_task('ok', result)
+ else:
+ self._finish_task('failed', result)
+
+ def v2_runner_on_ok(self, result):
+ self._finish_task('ok', result)
+
+ def v2_runner_on_skipped(self, result):
+ self._finish_task('skipped', result)
+
+ def v2_playbook_on_include(self, included_file):
+ self._finish_task('included', included_file)
+
+ def v2_playbook_on_stats(self, stats):
+ self._generate_report()
+
+
+class TaskData:
+ """
+ Data about an individual task.
+ """
+
+ def __init__(self, uuid, name, path, play, action):
+ self.uuid = uuid
+ self.name = name
+ self.path = path
+ self.play = play
+ self.start = None
+ self.host_data = OrderedDict()
+ self.start = time.time()
+ self.action = action
+
+ def add_host(self, host):
+ if host.uuid in self.host_data:
+ if host.status == 'included':
+ # concatenate task include output from multiple items
+ host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result)
+ else:
+ raise Exception('%s: %s: %s: duplicate host callback: %s' % (self.path, self.play, self.name, host.name))
+
+ self.host_data[host.uuid] = host
+
+
+class HostData:
+ """
+ Data about an individual host.
+ """
+
+ def __init__(self, uuid, name, status, result):
+ self.uuid = uuid
+ self.name = name
+ self.status = status
+ self.result = result
+ self.finish = time.time()
diff --git a/lib/ansible/plugins/callback/minimal.py b/lib/ansible/plugins/callback/minimal.py
new file mode 100644
index 00000000..8d3aef5b
--- /dev/null
+++ b/lib/ansible/plugins/callback/minimal.py
@@ -0,0 +1,78 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ callback: minimal
+ type: stdout
+ short_description: minimal Ansible screen output
+ version_added: historical
+ description:
+ - This is the default output callback used by the ansible command (ad-hoc)
+'''
+
+from ansible.plugins.callback import CallbackBase
+from ansible import constants as C
+
+
+class CallbackModule(CallbackBase):
+
+ '''
+ This is the default callback interface, which simply prints messages
+ to stdout when new callback events are received.
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'minimal'
+
+ def _command_generic_msg(self, host, result, caption):
+ ''' output the result of a command run '''
+
+ buf = "%s | %s | rc=%s >>\n" % (host, caption, result.get('rc', -1))
+ buf += result.get('stdout', '')
+ buf += result.get('stderr', '')
+ buf += result.get('msg', '')
+
+ return buf + "\n"
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+
+ self._handle_exception(result._result)
+ self._handle_warnings(result._result)
+
+ if result._task.action in C.MODULE_NO_JSON and 'module_stderr' not in result._result:
+ self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "FAILED"), color=C.COLOR_ERROR)
+ else:
+ self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_ERROR)
+
+ def v2_runner_on_ok(self, result):
+ self._clean_results(result._result, result._task.action)
+
+ self._handle_warnings(result._result)
+
+ if result._result.get('changed', False):
+ color = C.COLOR_CHANGED
+ state = 'CHANGED'
+ else:
+ color = C.COLOR_OK
+ state = 'SUCCESS'
+
+ if result._task.action in C.MODULE_NO_JSON and 'ansible_job_id' not in result._result:
+ self._display.display(self._command_generic_msg(result._host.get_name(), result._result, state), color=color)
+ else:
+ self._display.display("%s | %s => %s" % (result._host.get_name(), state, self._dump_results(result._result, indent=4)), color=color)
+
+ def v2_runner_on_skipped(self, result):
+ self._display.display("%s | SKIPPED" % (result._host.get_name()), color=C.COLOR_SKIP)
+
+ def v2_runner_on_unreachable(self, result):
+ self._display.display("%s | UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_UNREACHABLE)
+
+ def v2_on_file_diff(self, result):
+ if 'diff' in result._result and result._result['diff']:
+ self._display.display(self._get_diff(result._result['diff']))
diff --git a/lib/ansible/plugins/callback/oneline.py b/lib/ansible/plugins/callback/oneline.py
new file mode 100644
index 00000000..20b5be7f
--- /dev/null
+++ b/lib/ansible/plugins/callback/oneline.py
@@ -0,0 +1,77 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ callback: oneline
+ type: stdout
+ short_description: oneline Ansible screen output
+ version_added: historical
+ description:
+ - This is the output callback used by the -o/--one-line command line option.
+'''
+
+from ansible.plugins.callback import CallbackBase
+from ansible import constants as C
+
+
+class CallbackModule(CallbackBase):
+
+ '''
+ This is the default callback interface, which simply prints messages
+ to stdout when new callback events are received.
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'oneline'
+
+ def _command_generic_msg(self, hostname, result, caption):
+ stdout = result.get('stdout', '').replace('\n', '\\n').replace('\r', '\\r')
+ if 'stderr' in result and result['stderr']:
+ stderr = result.get('stderr', '').replace('\n', '\\n').replace('\r', '\\r')
+ return "%s | %s | rc=%s | (stdout) %s (stderr) %s" % (hostname, caption, result.get('rc', -1), stdout, stderr)
+ else:
+ return "%s | %s | rc=%s | (stdout) %s" % (hostname, caption, result.get('rc', -1), stdout)
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ if 'exception' in result._result:
+ if self._display.verbosity < 3:
+ # extract just the actual error message from the exception text
+ error = result._result['exception'].strip().split('\n')[-1]
+ msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
+ else:
+ msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'].replace('\n', '')
+
+ if result._task.action in C.MODULE_NO_JSON and 'module_stderr' not in result._result:
+ self._display.display(self._command_generic_msg(result._host.get_name(), result._result, 'FAILED'), color=C.COLOR_ERROR)
+ else:
+ self._display.display(msg, color=C.COLOR_ERROR)
+
+ self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n', '')),
+ color=C.COLOR_ERROR)
+
+ def v2_runner_on_ok(self, result):
+
+ if result._result.get('changed', False):
+ color = C.COLOR_CHANGED
+ state = 'CHANGED'
+ else:
+ color = C.COLOR_OK
+ state = 'SUCCESS'
+
+ if result._task.action in C.MODULE_NO_JSON and 'ansible_job_id' not in result._result:
+ self._display.display(self._command_generic_msg(result._host.get_name(), result._result, state), color=color)
+ else:
+ self._display.display("%s | %s => %s" % (result._host.get_name(), state, self._dump_results(result._result, indent=0).replace('\n', '')),
+ color=color)
+
+ def v2_runner_on_unreachable(self, result):
+ self._display.display("%s | UNREACHABLE!: %s" % (result._host.get_name(), result._result.get('msg', '')), color=C.COLOR_UNREACHABLE)
+
+ def v2_runner_on_skipped(self, result):
+ self._display.display("%s | SKIPPED" % (result._host.get_name()), color=C.COLOR_SKIP)
diff --git a/lib/ansible/plugins/callback/tree.py b/lib/ansible/plugins/callback/tree.py
new file mode 100644
index 00000000..f86a3cf5
--- /dev/null
+++ b/lib/ansible/plugins/callback/tree.py
@@ -0,0 +1,69 @@
+# (c) 2012-2014, Ansible, Inc
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ callback: tree
+ callback_type: notification
+ requirements:
+ - invoked in the command line
+ short_description: Save host events to files
+ version_added: "2.0"
+ description:
+ - "This callback is used by the Ansible (adhoc) command line option `-t|--tree`"
+ - This produces a JSON dump of events in a directory, a file for each host, the directory used MUST be passed as a command line option.
+'''
+
+import os
+
+from ansible.constants import TREE_DIR
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.plugins.callback import CallbackBase
+from ansible.utils.path import makedirs_safe
+
+
+class CallbackModule(CallbackBase):
+ '''
+ This callback puts results into a host specific file in a directory in json format.
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'tree'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self):
+ super(CallbackModule, self).__init__()
+
+ self.tree = TREE_DIR
+ if not self.tree:
+ self.tree = os.path.expanduser("~/.ansible/tree")
+ self._display.warning("The tree callback is defaulting to ~/.ansible/tree, as an invalid directory was provided: %s" % self.tree)
+
+ def write_tree_file(self, hostname, buf):
+ ''' write something into treedir/hostname '''
+
+ buf = to_bytes(buf)
+ try:
+ makedirs_safe(self.tree)
+ path = os.path.join(self.tree, hostname)
+ with open(path, 'wb+') as fd:
+ fd.write(buf)
+ except (OSError, IOError) as e:
+ self._display.warning(u"Unable to write to %s's file: %s" % (hostname, to_text(e)))
+
+ def result_to_tree(self, result):
+ if self.tree:
+ self.write_tree_file(result._host.get_name(), self._dump_results(result._result))
+
+ def v2_runner_on_ok(self, result):
+ self.result_to_tree(result)
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ self.result_to_tree(result)
+
+ def v2_runner_on_unreachable(self, result):
+ self.result_to_tree(result)
diff --git a/lib/ansible/plugins/cliconf/__init__.py b/lib/ansible/plugins/cliconf/__init__.py
new file mode 100644
index 00000000..be2df78d
--- /dev/null
+++ b/lib/ansible/plugins/cliconf/__init__.py
@@ -0,0 +1,477 @@
+#
+# (c) 2017 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from abc import abstractmethod
+from functools import wraps
+
+from ansible.plugins import AnsiblePlugin
+from ansible.errors import AnsibleError, AnsibleConnectionFailure
+from ansible.module_utils._text import to_bytes, to_text
+
+try:
+ from scp import SCPClient
+ HAS_SCP = True
+except ImportError:
+ HAS_SCP = False
+
+
+def enable_mode(func):
+ @wraps(func)
+ def wrapped(self, *args, **kwargs):
+ prompt = self._connection.get_prompt()
+ if not to_text(prompt, errors='surrogate_or_strict').strip().endswith('#'):
+ raise AnsibleError('operation requires privilege escalation')
+ return func(self, *args, **kwargs)
+ return wrapped
+
+
+class CliconfBase(AnsiblePlugin):
+ """
+ A base class for implementing cli connections
+
+ .. note:: String inputs to :meth:`send_command` will be cast to byte strings
+ within this method and as such are not required to be made byte strings
+ beforehand. Please avoid using literal byte strings (``b'string'``) in
+ :class:`CliConfBase` plugins as this can lead to unexpected errors when
+ running on Python 3
+
+ List of supported rpc's:
+ :get_config: Retrieves the specified configuration from the device
+ :edit_config: Loads the specified commands into the remote device
+ :get: Execute specified command on remote device
+ :get_capabilities: Retrieves device information and supported rpc methods
+ :commit: Load configuration from candidate to running
+ :discard_changes: Discard changes to candidate datastore
+
+ Note: List of supported rpc's for remote device can be extracted from
+ output of get_capabilities()
+
+ :returns: Returns output received from remote device as byte string
+
+ Usage:
+ from ansible.module_utils.connection import Connection
+
+ conn = Connection()
+ conn.get('show lldp neighbors detail'')
+ conn.get_config('running')
+ conn.edit_config(['hostname test', 'netconf ssh'])
+ """
+
+ __rpc__ = ['get_config', 'edit_config', 'get_capabilities', 'get', 'enable_response_logging', 'disable_response_logging']
+
+ def __init__(self, connection):
+ super(CliconfBase, self).__init__()
+ self._connection = connection
+ self.history = list()
+ self.response_logging = False
+
+ def _alarm_handler(self, signum, frame):
+ """Alarm handler raised in case of command timeout """
+ self._connection.queue_message('log', 'closing shell due to command timeout (%s seconds).' % self._connection._play_context.timeout)
+ self.close()
+
+ def send_command(self, command=None, prompt=None, answer=None, sendonly=False, newline=True, prompt_retry_check=False, check_all=False):
+ """Executes a command over the device connection
+
+ This method will execute a command over the device connection and
+ return the results to the caller. This method will also perform
+ logging of any commands based on the `nolog` argument.
+
+ :param command: The command to send over the connection to the device
+ :param prompt: A single regex pattern or a sequence of patterns to evaluate the expected prompt from the command
+ :param answer: The answer to respond with if the prompt is matched.
+ :param sendonly: Bool value that will send the command but not wait for a result.
+ :param newline: Bool value that will append the newline character to the command
+ :param prompt_retry_check: Bool value for trying to detect more prompts
+ :param check_all: Bool value to indicate if all the values in prompt sequence should be matched or any one of
+ given prompt.
+ :returns: The output from the device after executing the command
+ """
+ kwargs = {
+ 'command': to_bytes(command),
+ 'sendonly': sendonly,
+ 'newline': newline,
+ 'prompt_retry_check': prompt_retry_check,
+ 'check_all': check_all
+ }
+
+ if prompt is not None:
+ if isinstance(prompt, list):
+ kwargs['prompt'] = [to_bytes(p) for p in prompt]
+ else:
+ kwargs['prompt'] = to_bytes(prompt)
+ if answer is not None:
+ if isinstance(answer, list):
+ kwargs['answer'] = [to_bytes(p) for p in answer]
+ else:
+ kwargs['answer'] = to_bytes(answer)
+
+ resp = self._connection.send(**kwargs)
+
+ if not self.response_logging:
+ self.history.append(('*****', '*****'))
+ else:
+ self.history.append((kwargs['command'], resp))
+
+ return resp
+
+ def get_base_rpc(self):
+ """Returns list of base rpc method supported by remote device"""
+ return self.__rpc__
+
+ def get_history(self):
+ """ Returns the history file for all commands
+
+ This will return a log of all the commands that have been sent to
+ the device and all of the output received. By default, all commands
+ and output will be redacted unless explicitly configured otherwise.
+
+ :return: An ordered list of command, output pairs
+ """
+ return self.history
+
+ def reset_history(self):
+ """ Resets the history of run commands
+ :return: None
+ """
+ self.history = list()
+
+ def enable_response_logging(self):
+ """Enable logging command response"""
+ self.response_logging = True
+
+ def disable_response_logging(self):
+ """Disable logging command response"""
+ self.response_logging = False
+
+ @abstractmethod
+ def get_config(self, source='running', flags=None, format=None):
+ """Retrieves the specified configuration from the device
+
+ This method will retrieve the configuration specified by source and
+ return it to the caller as a string. Subsequent calls to this method
+ will retrieve a new configuration from the device
+
+ :param source: The configuration source to return from the device.
+ This argument accepts either `running` or `startup` as valid values.
+
+ :param flags: For devices that support configuration filtering, this
+ keyword argument is used to filter the returned configuration.
+ The use of this keyword argument is device dependent adn will be
+ silently ignored on devices that do not support it.
+
+ :param format: For devices that support fetching different configuration
+ format, this keyword argument is used to specify the format in which
+ configuration is to be retrieved.
+
+ :return: The device configuration as specified by the source argument.
+ """
+ pass
+
+ @abstractmethod
+ def edit_config(self, candidate=None, commit=True, replace=None, diff=False, comment=None):
+ """Loads the candidate configuration into the network device
+
+ This method will load the specified candidate config into the device
+ and merge with the current configuration unless replace is set to
+ True. If the device does not support config replace an errors
+ is returned.
+
+ :param candidate: The configuration to load into the device and merge
+ with the current running configuration
+
+ :param commit: Boolean value that indicates if the device candidate
+ configuration should be pushed in the running configuration or discarded.
+
+ :param replace: If the value is True/False it indicates if running configuration should be completely
+ replace by candidate configuration. If can also take configuration file path as value,
+ the file in this case should be present on the remote host in the mentioned path as a
+ prerequisite.
+ :param comment: Commit comment provided it is supported by remote host
+ :return: Returns a json string with contains configuration applied on remote host, the returned
+ response on executing configuration commands and platform relevant data.
+ {
+ "diff": "",
+ "response": [],
+ "request": []
+ }
+
+ """
+ pass
+
+ @abstractmethod
+ def get(self, command=None, prompt=None, answer=None, sendonly=False, newline=True, output=None, check_all=False):
+ """Execute specified command on remote device
+ This method will retrieve the specified data and
+ return it to the caller as a string.
+ :param command: command in string format to be executed on remote device
+ :param prompt: the expected prompt generated by executing command, this can
+ be a string or a list of strings
+ :param answer: the string to respond to the prompt with
+ :param sendonly: bool to disable waiting for response, default is false
+ :param newline: bool to indicate if newline should be added at end of answer or not
+ :param output: For devices that support fetching command output in different
+ format, this keyword argument is used to specify the output in which
+ response is to be retrieved.
+ :param check_all: Bool value to indicate if all the values in prompt sequence should be matched or any one of
+ given prompt.
+ :return: The output from the device after executing the command
+ """
+ pass
+
+ @abstractmethod
+ def get_capabilities(self):
+ """Returns the basic capabilities of the network device
+ This method will provide some basic facts about the device and
+ what capabilities it has to modify the configuration. The minimum
+ return from this method takes the following format.
+ eg:
+ {
+
+ 'rpc': [list of supported rpcs],
+ 'network_api': <str>, # the name of the transport
+ 'device_info': {
+ 'network_os': <str>,
+ 'network_os_version': <str>,
+ 'network_os_model': <str>,
+ 'network_os_hostname': <str>,
+ 'network_os_image': <str>,
+ 'network_os_platform': <str>,
+ },
+ 'device_operations': {
+ 'supports_diff_replace': <bool>, # identify if config should be merged or replaced is supported
+ 'supports_commit': <bool>, # identify if commit is supported by device or not
+ 'supports_rollback': <bool>, # identify if rollback is supported or not
+ 'supports_defaults': <bool>, # identify if fetching running config with default is supported
+ 'supports_commit_comment': <bool>, # identify if adding comment to commit is supported of not
+ 'supports_onbox_diff: <bool>, # identify if on box diff capability is supported or not
+ 'supports_generate_diff: <bool>, # identify if diff capability is supported within plugin
+ 'supports_multiline_delimiter: <bool>, # identify if multiline demiliter is supported within config
+ 'supports_diff_match: <bool>, # identify if match is supported
+ 'supports_diff_ignore_lines: <bool>, # identify if ignore line in diff is supported
+ 'supports_config_replace': <bool>, # identify if running config replace with candidate config is supported
+ 'supports_admin': <bool>, # identify if admin configure mode is supported or not
+ 'supports_commit_label': <bool>, # identify if commit label is supported or not
+ }
+ 'format': [list of supported configuration format],
+ 'diff_match': [list of supported match values],
+ 'diff_replace': [list of supported replace values],
+ 'output': [list of supported command output format]
+ }
+ :return: capability as json string
+ """
+ result = {}
+ result['rpc'] = self.get_base_rpc()
+ result['device_info'] = self.get_device_info()
+ result['network_api'] = 'cliconf'
+ return result
+
+ @abstractmethod
+ def get_device_info(self):
+ """Returns basic information about the network device.
+
+ This method will provide basic information about the device such as OS version and model
+ name. This data is expected to be used to fill the 'device_info' key in get_capabilities()
+ above.
+
+ :return: dictionary of device information
+ """
+ pass
+
+ def commit(self, comment=None):
+ """Commit configuration changes
+
+ This method will perform the commit operation on a previously loaded
+ candidate configuration that was loaded using `edit_config()`. If
+ there is a candidate configuration, it will be committed to the
+ active configuration. If there is not a candidate configuration, this
+ method should just silently return.
+
+ :return: None
+ """
+ return self._connection.method_not_found("commit is not supported by network_os %s" % self._play_context.network_os)
+
+ def discard_changes(self):
+ """Discard candidate configuration
+
+ This method will discard the current candidate configuration if one
+ is present. If there is no candidate configuration currently loaded,
+ then this method should just silently return
+
+ :returns: None
+ """
+ return self._connection.method_not_found("discard_changes is not supported by network_os %s" % self._play_context.network_os)
+
+ def rollback(self, rollback_id, commit=True):
+ """
+
+ :param rollback_id: The commit id to which configuration should be rollbacked
+ :param commit: Flag to indicate if changes should be committed or not
+ :return: Returns diff between before and after change.
+ """
+ pass
+
+ def copy_file(self, source=None, destination=None, proto='scp', timeout=30):
+ """Copies file over scp/sftp to remote device
+
+ :param source: Source file path
+ :param destination: Destination file path on remote device
+ :param proto: Protocol to be used for file transfer,
+ supported protocol: scp and sftp
+ :param timeout: Specifies the wait time to receive response from
+ remote host before triggering timeout exception
+ :return: None
+ """
+ ssh = self._connection.paramiko_conn._connect_uncached()
+ if proto == 'scp':
+ if not HAS_SCP:
+ raise AnsibleError("Required library scp is not installed. Please install it using `pip install scp`")
+ with SCPClient(ssh.get_transport(), socket_timeout=timeout) as scp:
+ out = scp.put(source, destination)
+ elif proto == 'sftp':
+ with ssh.open_sftp() as sftp:
+ sftp.put(source, destination)
+
+ def get_file(self, source=None, destination=None, proto='scp', timeout=30):
+ """Fetch file over scp/sftp from remote device
+ :param source: Source file path
+ :param destination: Destination file path
+ :param proto: Protocol to be used for file transfer,
+ supported protocol: scp and sftp
+ :param timeout: Specifies the wait time to receive response from
+ remote host before triggering timeout exception
+ :return: None
+ """
+ """Fetch file over scp/sftp from remote device"""
+ ssh = self._connection.paramiko_conn._connect_uncached()
+ if proto == 'scp':
+ if not HAS_SCP:
+ raise AnsibleError("Required library scp is not installed. Please install it using `pip install scp`")
+ try:
+ with SCPClient(ssh.get_transport(), socket_timeout=timeout) as scp:
+ scp.get(source, destination)
+ except EOFError:
+ # This appears to be benign.
+ pass
+ elif proto == 'sftp':
+ with ssh.open_sftp() as sftp:
+ sftp.get(source, destination)
+
+ def get_diff(self, candidate=None, running=None, diff_match=None, diff_ignore_lines=None, path=None, diff_replace=None):
+ """
+ Generate diff between candidate and running configuration. If the
+ remote host supports onbox diff capabilities ie. supports_onbox_diff in that case
+ candidate and running configurations are not required to be passed as argument.
+ In case if onbox diff capability is not supported candidate argument is mandatory
+ and running argument is optional.
+ :param candidate: The configuration which is expected to be present on remote host.
+ :param running: The base configuration which is used to generate diff.
+ :param diff_match: Instructs how to match the candidate configuration with current device configuration
+ Valid values are 'line', 'strict', 'exact', 'none'.
+ 'line' - commands are matched line by line
+ 'strict' - command lines are matched with respect to position
+ 'exact' - command lines must be an equal match
+ 'none' - will not compare the candidate configuration with the running configuration
+ :param diff_ignore_lines: Use this argument to specify one or more lines that should be
+ ignored during the diff. This is used for lines in the configuration
+ that are automatically updated by the system. This argument takes
+ a list of regular expressions or exact line matches.
+ :param path: The ordered set of parents that uniquely identify the section or hierarchy
+ the commands should be checked against. If the parents argument
+ is omitted, the commands are checked against the set of top
+ level or global commands.
+ :param diff_replace: Instructs on the way to perform the configuration on the device.
+ If the replace argument is set to I(line) then the modified lines are
+ pushed to the device in configuration mode. If the replace argument is
+ set to I(block) then the entire command block is pushed to the device in
+ configuration mode if any line is not correct.
+ :return: Configuration and/or banner diff in json format.
+ {
+ 'config_diff': ''
+ }
+
+ """
+ pass
+
+ def run_commands(self, commands=None, check_rc=True):
+ """
+ Execute a list of commands on remote host and return the list of response
+ :param commands: The list of command that needs to be executed on remote host.
+ The individual command in list can either be a command string or command dict.
+ If the command is dict the valid keys are
+ {
+ 'command': <command to be executed>
+ 'prompt': <expected prompt on executing the command>,
+ 'answer': <answer for the prompt>,
+ 'output': <the format in which command output should be rendered eg: 'json', 'text'>,
+ 'sendonly': <Boolean flag to indicate if it command execution response should be ignored or not>
+ }
+ :param check_rc: Boolean flag to check if returned response should be checked for error or not.
+ If check_rc is False the error output is appended in return response list, else if the
+ value is True an exception is raised.
+ :return: List of returned response
+ """
+ pass
+
+ def check_edit_config_capability(self, operations, candidate=None, commit=True, replace=None, comment=None):
+
+ if not candidate and not replace:
+ raise ValueError("must provide a candidate or replace to load configuration")
+
+ if commit not in (True, False):
+ raise ValueError("'commit' must be a bool, got %s" % commit)
+
+ if replace and not operations['supports_replace']:
+ raise ValueError("configuration replace is not supported")
+
+ if comment and not operations.get('supports_commit_comment', False):
+ raise ValueError("commit comment is not supported")
+
+ if replace and not operations.get('supports_replace', False):
+ raise ValueError("configuration replace is not supported")
+
+ def set_cli_prompt_context(self):
+ """
+ Ensure the command prompt on device is in right mode
+ :return: None
+ """
+ pass
+
+ def _update_cli_prompt_context(self, config_context=None, exit_command='exit'):
+ """
+ Update the cli prompt context to ensure it is in operational mode
+ :param config_context: It is string value to identify if the current cli prompt ends with config mode prompt
+ :param exit_command: Command to execute to exit the config mode
+ :return: None
+ """
+ out = self._connection.get_prompt()
+ if out is None:
+ raise AnsibleConnectionFailure(message=u'cli prompt is not identified from the last received'
+ u' response window: %s' % self._connection._last_recv_window)
+
+ while True:
+ out = to_text(out, errors='surrogate_then_replace').strip()
+ if config_context and out.endswith(config_context):
+ self._connection.queue_message('vvvv', 'wrong context, sending exit to device')
+ self.send_command(exit_command)
+ out = self._connection.get_prompt()
+ else:
+ break
diff --git a/lib/ansible/plugins/connection/__init__.py b/lib/ansible/plugins/connection/__init__.py
new file mode 100644
index 00000000..b172c4ce
--- /dev/null
+++ b/lib/ansible/plugins/connection/__init__.py
@@ -0,0 +1,383 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
+# (c) 2017, Peter Sprygada <psprygad@redhat.com>
+# (c) 2017 Ansible Project
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import fcntl
+import os
+import shlex
+
+from abc import abstractmethod, abstractproperty
+from functools import wraps
+
+from ansible import constants as C
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.plugins import AnsiblePlugin
+from ansible.utils.display import Display
+from ansible.plugins.loader import connection_loader, get_shell_plugin
+from ansible.utils.path import unfrackpath
+
+display = Display()
+
+
+__all__ = ['ConnectionBase', 'ensure_connect']
+
+BUFSIZE = 65536
+
+
+def ensure_connect(func):
+ @wraps(func)
+ def wrapped(self, *args, **kwargs):
+ if not self._connected:
+ self._connect()
+ return func(self, *args, **kwargs)
+ return wrapped
+
+
+class ConnectionBase(AnsiblePlugin):
+ '''
+ A base class for connections to contain common code.
+ '''
+
+ has_pipelining = False
+ has_native_async = False # eg, winrm
+ always_pipeline_modules = False # eg, winrm
+ has_tty = True # for interacting with become plugins
+ # When running over this connection type, prefer modules written in a certain language
+ # as discovered by the specified file extension. An empty string as the
+ # language means any language.
+ module_implementation_preferences = ('',)
+ allow_executable = True
+
+ # the following control whether or not the connection supports the
+ # persistent connection framework or not
+ supports_persistence = False
+ force_persistence = False
+
+ default_user = None
+
+ def __init__(self, play_context, new_stdin, shell=None, *args, **kwargs):
+
+ super(ConnectionBase, self).__init__()
+
+ # All these hasattrs allow subclasses to override these parameters
+ if not hasattr(self, '_play_context'):
+ # Backwards compat: self._play_context isn't really needed, using set_options/get_option
+ self._play_context = play_context
+ if not hasattr(self, '_new_stdin'):
+ self._new_stdin = new_stdin
+ if not hasattr(self, '_display'):
+ # Backwards compat: self._display isn't really needed, just import the global display and use that.
+ self._display = display
+ if not hasattr(self, '_connected'):
+ self._connected = False
+
+ self.success_key = None
+ self.prompt = None
+ self._connected = False
+ self._socket_path = None
+
+ # helper plugins
+ self._shell = shell
+
+ # we always must have shell
+ if not self._shell:
+ shell_type = play_context.shell if play_context.shell else getattr(self, '_shell_type', None)
+ self._shell = get_shell_plugin(shell_type=shell_type, executable=self._play_context.executable)
+
+ self.become = None
+
+ def set_become_plugin(self, plugin):
+ self.become = plugin
+
+ @property
+ def connected(self):
+ '''Read-only property holding whether the connection to the remote host is active or closed.'''
+ return self._connected
+
+ @property
+ def socket_path(self):
+ '''Read-only property holding the connection socket path for this remote host'''
+ return self._socket_path
+
+ @staticmethod
+ def _split_ssh_args(argstring):
+ """
+ Takes a string like '-o Foo=1 -o Bar="foo bar"' and returns a
+ list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to
+ the argument list. The list will not contain any empty elements.
+ """
+ try:
+ # Python 2.6.x shlex doesn't handle unicode type so we have to
+ # convert args to byte string for that case. More efficient to
+ # try without conversion first but python2.6 doesn't throw an
+ # exception, it merely mangles the output:
+ # >>> shlex.split(u't e')
+ # ['t\x00\x00\x00', '\x00\x00\x00e\x00\x00\x00']
+ return [to_text(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()]
+ except AttributeError:
+ # In Python3, shlex.split doesn't work on a byte string.
+ return [to_text(x.strip()) for x in shlex.split(argstring) if x.strip()]
+
+ @abstractproperty
+ def transport(self):
+ """String used to identify this Connection class from other classes"""
+ pass
+
+ @abstractmethod
+ def _connect(self):
+ """Connect to the host we've been initialized with"""
+
+ @ensure_connect
+ @abstractmethod
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ """Run a command on the remote host.
+
+ :arg cmd: byte string containing the command
+ :kwarg in_data: If set, this data is passed to the command's stdin.
+ This is used to implement pipelining. Currently not all
+ connection plugins implement pipelining.
+ :kwarg sudoable: Tell the connection plugin if we're executing
+ a command via a privilege escalation mechanism. This may affect
+ how the connection plugin returns data. Note that not all
+ connections can handle privilege escalation.
+ :returns: a tuple of (return code, stdout, stderr) The return code is
+ an int while stdout and stderr are both byte strings.
+
+ When a command is executed, it goes through multiple commands to get
+ there. It looks approximately like this::
+
+ [LocalShell] ConnectionCommand [UsersLoginShell (*)] ANSIBLE_SHELL_EXECUTABLE [(BecomeCommand ANSIBLE_SHELL_EXECUTABLE)] Command
+ :LocalShell: Is optional. It is run locally to invoke the
+ ``Connection Command``. In most instances, the
+ ``ConnectionCommand`` can be invoked directly instead. The ssh
+ connection plugin which can have values that need expanding
+ locally specified via ssh_args is the sole known exception to
+ this. Shell metacharacters in the command itself should be
+ processed on the remote machine, not on the local machine so no
+ shell is needed on the local machine. (Example, ``/bin/sh``)
+ :ConnectionCommand: This is the command that connects us to the remote
+ machine to run the rest of the command. ``ansible_user``,
+ ``ansible_ssh_host`` and so forth are fed to this piece of the
+ command to connect to the correct host (Examples ``ssh``,
+ ``chroot``)
+ :UsersLoginShell: This shell may or may not be created depending on
+ the ConnectionCommand used by the connection plugin. This is the
+ shell that the ``ansible_user`` has configured as their login
+ shell. In traditional UNIX parlance, this is the last field of
+ a user's ``/etc/passwd`` entry We do not specifically try to run
+ the ``UsersLoginShell`` when we connect. Instead it is implicit
+ in the actions that the ``ConnectionCommand`` takes when it
+ connects to a remote machine. ``ansible_shell_type`` may be set
+ to inform ansible of differences in how the ``UsersLoginShell``
+ handles things like quoting if a shell has different semantics
+ than the Bourne shell.
+ :ANSIBLE_SHELL_EXECUTABLE: This is the shell set via the inventory var
+ ``ansible_shell_executable`` or via
+ ``constants.DEFAULT_EXECUTABLE`` if the inventory var is not set.
+ We explicitly invoke this shell so that we have predictable
+ quoting rules at this point. ``ANSIBLE_SHELL_EXECUTABLE`` is only
+ settable by the user because some sudo setups may only allow
+ invoking a specific shell. (For instance, ``/bin/bash`` may be
+ allowed but ``/bin/sh``, our default, may not). We invoke this
+ twice, once after the ``ConnectionCommand`` and once after the
+ ``BecomeCommand``. After the ConnectionCommand, this is run by
+ the ``UsersLoginShell``. After the ``BecomeCommand`` we specify
+ that the ``ANSIBLE_SHELL_EXECUTABLE`` is being invoked directly.
+ :BecomeComand ANSIBLE_SHELL_EXECUTABLE: Is the command that performs
+ privilege escalation. Setting this up is performed by the action
+ plugin prior to running ``exec_command``. So we just get passed
+ :param:`cmd` which has the BecomeCommand already added.
+ (Examples: sudo, su) If we have a BecomeCommand then we will
+ invoke a ANSIBLE_SHELL_EXECUTABLE shell inside of it so that we
+ have a consistent view of quoting.
+ :Command: Is the command we're actually trying to run remotely.
+ (Examples: mkdir -p $HOME/.ansible, python $HOME/.ansible/tmp-script-file)
+ """
+ pass
+
+ @ensure_connect
+ @abstractmethod
+ def put_file(self, in_path, out_path):
+ """Transfer a file from local to remote"""
+ pass
+
+ @ensure_connect
+ @abstractmethod
+ def fetch_file(self, in_path, out_path):
+ """Fetch a file from remote to local; callers are expected to have pre-created the directory chain for out_path"""
+ pass
+
+ @abstractmethod
+ def close(self):
+ """Terminate the connection"""
+ pass
+
+ def connection_lock(self):
+ f = self._play_context.connection_lockfd
+ display.vvvv('CONNECTION: pid %d waiting for lock on %d' % (os.getpid(), f), host=self._play_context.remote_addr)
+ fcntl.lockf(f, fcntl.LOCK_EX)
+ display.vvvv('CONNECTION: pid %d acquired lock on %d' % (os.getpid(), f), host=self._play_context.remote_addr)
+
+ def connection_unlock(self):
+ f = self._play_context.connection_lockfd
+ fcntl.lockf(f, fcntl.LOCK_UN)
+ display.vvvv('CONNECTION: pid %d released lock on %d' % (os.getpid(), f), host=self._play_context.remote_addr)
+
+ def reset(self):
+ display.warning("Reset is not implemented for this connection")
+
+ # NOTE: these password functions are all become specific, the name is
+ # confusing as it does not handle 'protocol passwords'
+ # DEPRECATED:
+ # These are kept for backwards compatibility
+ # Use the methods provided by the become plugins instead
+ def check_become_success(self, b_output):
+ display.deprecated(
+ "Connection.check_become_success is deprecated, calling code should be using become plugins instead",
+ version="2.12", collection_name='ansible.builtin'
+ )
+ return self.become.check_success(b_output)
+
+ def check_password_prompt(self, b_output):
+ display.deprecated(
+ "Connection.check_password_prompt is deprecated, calling code should be using become plugins instead",
+ version="2.12", collection_name='ansible.builtin'
+ )
+ return self.become.check_password_prompt(b_output)
+
+ def check_incorrect_password(self, b_output):
+ display.deprecated(
+ "Connection.check_incorrect_password is deprecated, calling code should be using become plugins instead",
+ version="2.12", collection_name='ansible.builtin'
+ )
+ return self.become.check_incorrect_password(b_output)
+
+ def check_missing_password(self, b_output):
+ display.deprecated(
+ "Connection.check_missing_password is deprecated, calling code should be using become plugins instead",
+ version="2.12", collection_name='ansible.builtin'
+ )
+ return self.become.check_missing_password(b_output)
+
+
+class NetworkConnectionBase(ConnectionBase):
+ """
+ A base class for network-style connections.
+ """
+
+ force_persistence = True
+ # Do not use _remote_is_local in other connections
+ _remote_is_local = True
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(NetworkConnectionBase, self).__init__(play_context, new_stdin, *args, **kwargs)
+ self._messages = []
+ self._conn_closed = False
+
+ self._network_os = self._play_context.network_os
+
+ self._local = connection_loader.get('local', play_context, '/dev/null')
+ self._local.set_options()
+
+ self._sub_plugin = {}
+ self._cached_variables = (None, None, None)
+
+ # reconstruct the socket_path and set instance values accordingly
+ self._ansible_playbook_pid = kwargs.get('ansible_playbook_pid')
+ self._update_connection_state()
+
+ def __getattr__(self, name):
+ try:
+ return self.__dict__[name]
+ except KeyError:
+ if not name.startswith('_'):
+ plugin = self._sub_plugin.get('obj')
+ if plugin:
+ method = getattr(plugin, name, None)
+ if method is not None:
+ return method
+ raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
+
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ return self._local.exec_command(cmd, in_data, sudoable)
+
+ def queue_message(self, level, message):
+ """
+ Adds a message to the queue of messages waiting to be pushed back to the controller process.
+
+ :arg level: A string which can either be the name of a method in display, or 'log'. When
+ the messages are returned to task_executor, a value of log will correspond to
+ ``display.display(message, log_only=True)``, while another value will call ``display.[level](message)``
+ """
+ self._messages.append((level, message))
+
+ def pop_messages(self):
+ messages, self._messages = self._messages, []
+ return messages
+
+ def put_file(self, in_path, out_path):
+ """Transfer a file from local to remote"""
+ return self._local.put_file(in_path, out_path)
+
+ def fetch_file(self, in_path, out_path):
+ """Fetch a file from remote to local"""
+ return self._local.fetch_file(in_path, out_path)
+
+ def reset(self):
+ '''
+ Reset the connection
+ '''
+ if self._socket_path:
+ self.queue_message('vvvv', 'resetting persistent connection for socket_path %s' % self._socket_path)
+ self.close()
+ self.queue_message('vvvv', 'reset call on connection instance')
+
+ def close(self):
+ self._conn_closed = True
+ if self._connected:
+ self._connected = False
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ super(NetworkConnectionBase, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+ if self.get_option('persistent_log_messages'):
+ warning = "Persistent connection logging is enabled for %s. This will log ALL interactions" % self._play_context.remote_addr
+ logpath = getattr(C, 'DEFAULT_LOG_PATH')
+ if logpath is not None:
+ warning += " to %s" % logpath
+ self.queue_message('warning', "%s and WILL NOT redact sensitive configuration like passwords. USE WITH CAUTION!" % warning)
+
+ if self._sub_plugin.get('obj') and self._sub_plugin.get('type') != 'external':
+ try:
+ self._sub_plugin['obj'].set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+ except AttributeError:
+ pass
+
+ def _update_connection_state(self):
+ '''
+ Reconstruct the connection socket_path and check if it exists
+
+ If the socket path exists then the connection is active and set
+ both the _socket_path value to the path and the _connected value
+ to True. If the socket path doesn't exist, leave the socket path
+ value to None and the _connected value to False
+ '''
+ ssh = connection_loader.get('ssh', class_only=True)
+ control_path = ssh._create_control_path(
+ self._play_context.remote_addr, self._play_context.port,
+ self._play_context.remote_user, self._play_context.connection,
+ self._ansible_playbook_pid
+ )
+
+ tmp_path = unfrackpath(C.PERSISTENT_CONTROL_PATH_DIR)
+ socket_path = unfrackpath(control_path % dict(directory=tmp_path))
+
+ if os.path.exists(socket_path):
+ self._connected = True
+ self._socket_path = socket_path
+
+ def _log_messages(self, message):
+ if self.get_option('persistent_log_messages'):
+ self.queue_message('log', message)
diff --git a/lib/ansible/plugins/connection/local.py b/lib/ansible/plugins/connection/local.py
new file mode 100644
index 00000000..29505cc2
--- /dev/null
+++ b/lib/ansible/plugins/connection/local.py
@@ -0,0 +1,162 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2015, 2017 Toshio Kuratomi <tkuratomi@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ connection: local
+ short_description: execute on controller
+ description:
+ - This connection plugin allows ansible to execute tasks on the Ansible 'controller' instead of on a remote host.
+ author: ansible (@core)
+ version_added: historical
+ notes:
+ - The remote user is ignored, the user with which the ansible CLI was executed is used instead.
+'''
+
+import os
+import shutil
+import subprocess
+import fcntl
+import getpass
+
+import ansible.constants as C
+from ansible.errors import AnsibleError, AnsibleFileNotFound
+from ansible.module_utils.compat import selectors
+from ansible.module_utils.six import text_type, binary_type
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.plugins.connection import ConnectionBase
+from ansible.utils.display import Display
+from ansible.utils.path import unfrackpath
+
+display = Display()
+
+
+class Connection(ConnectionBase):
+ ''' Local based connections '''
+
+ transport = 'local'
+ has_pipelining = True
+
+ def __init__(self, *args, **kwargs):
+
+ super(Connection, self).__init__(*args, **kwargs)
+ self.cwd = None
+ self.default_user = getpass.getuser()
+
+ def _connect(self):
+ ''' connect to the local host; nothing to do here '''
+
+ # Because we haven't made any remote connection we're running as
+ # the local user, rather than as whatever is configured in remote_user.
+ self._play_context.remote_user = self.default_user
+
+ if not self._connected:
+ display.vvv(u"ESTABLISH LOCAL CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self._play_context.remote_addr)
+ self._connected = True
+ return self
+
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ ''' run a command on the local host '''
+
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ display.debug("in local.exec_command()")
+
+ executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else None
+
+ if not os.path.exists(to_bytes(executable, errors='surrogate_or_strict')):
+ raise AnsibleError("failed to find the executable specified %s."
+ " Please verify if the executable exists and re-try." % executable)
+
+ display.vvv(u"EXEC {0}".format(to_text(cmd)), host=self._play_context.remote_addr)
+ display.debug("opening command with Popen()")
+
+ if isinstance(cmd, (text_type, binary_type)):
+ cmd = to_bytes(cmd)
+ else:
+ cmd = map(to_bytes, cmd)
+
+ p = subprocess.Popen(
+ cmd,
+ shell=isinstance(cmd, (text_type, binary_type)),
+ executable=executable,
+ cwd=self.cwd,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ display.debug("done running command with Popen()")
+
+ if self.become and self.become.expect_prompt() and sudoable:
+ fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
+ fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
+ selector = selectors.DefaultSelector()
+ selector.register(p.stdout, selectors.EVENT_READ)
+ selector.register(p.stderr, selectors.EVENT_READ)
+
+ become_output = b''
+ try:
+ while not self.become.check_success(become_output) and not self.become.check_password_prompt(become_output):
+ events = selector.select(self._play_context.timeout)
+ if not events:
+ stdout, stderr = p.communicate()
+ raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output))
+
+ for key, event in events:
+ if key.fileobj == p.stdout:
+ chunk = p.stdout.read()
+ elif key.fileobj == p.stderr:
+ chunk = p.stderr.read()
+
+ if not chunk:
+ stdout, stderr = p.communicate()
+ raise AnsibleError('privilege output closed while waiting for password prompt:\n' + to_native(become_output))
+ become_output += chunk
+ finally:
+ selector.close()
+
+ if not self.become.check_success(become_output):
+ become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
+ p.stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
+ fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
+ fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
+
+ display.debug("getting output with communicate()")
+ stdout, stderr = p.communicate(in_data)
+ display.debug("done communicating")
+
+ display.debug("done with local.exec_command()")
+ return (p.returncode, stdout, stderr)
+
+ def put_file(self, in_path, out_path):
+ ''' transfer a file from local to local '''
+
+ super(Connection, self).put_file(in_path, out_path)
+
+ in_path = unfrackpath(in_path, basedir=self.cwd)
+ out_path = unfrackpath(out_path, basedir=self.cwd)
+
+ display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self._play_context.remote_addr)
+ if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
+ raise AnsibleFileNotFound("file or module does not exist: {0}".format(to_native(in_path)))
+ try:
+ shutil.copyfile(to_bytes(in_path, errors='surrogate_or_strict'), to_bytes(out_path, errors='surrogate_or_strict'))
+ except shutil.Error:
+ raise AnsibleError("failed to copy: {0} and {1} are the same".format(to_native(in_path), to_native(out_path)))
+ except IOError as e:
+ raise AnsibleError("failed to transfer file to {0}: {1}".format(to_native(out_path), to_native(e)))
+
+ def fetch_file(self, in_path, out_path):
+ ''' fetch a file from local to local -- for compatibility '''
+
+ super(Connection, self).fetch_file(in_path, out_path)
+
+ display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._play_context.remote_addr)
+ self.put_file(in_path, out_path)
+
+ def close(self):
+ ''' terminate the connection; nothing to do here '''
+ self._connected = False
diff --git a/lib/ansible/plugins/connection/paramiko_ssh.py b/lib/ansible/plugins/connection/paramiko_ssh.py
new file mode 100644
index 00000000..96a76d67
--- /dev/null
+++ b/lib/ansible/plugins/connection/paramiko_ssh.py
@@ -0,0 +1,607 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ author: Ansible Core Team
+ connection: paramiko
+ short_description: Run tasks via python ssh (paramiko)
+ description:
+ - Use the python ssh implementation (Paramiko) to connect to targets
+ - The paramiko transport is provided because many distributions, in particular EL6 and before do not support ControlPersist
+ in their SSH implementations.
+ - This is needed on the Ansible control machine to be reasonably efficient with connections.
+ Thus paramiko is faster for most users on these platforms.
+ Users with ControlPersist capability can consider using -c ssh or configuring the transport in the configuration file.
+ - This plugin also borrows a lot of settings from the ssh plugin as they both cover the same protocol.
+ version_added: "0.1"
+ options:
+ remote_addr:
+ description:
+ - Address of the remote target
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ - name: ansible_ssh_host
+ - name: ansible_paramiko_host
+ remote_user:
+ description:
+ - User to login/authenticate as
+ - Can be set from the CLI via the C(--user) or C(-u) options.
+ vars:
+ - name: ansible_user
+ - name: ansible_ssh_user
+ - name: ansible_paramiko_user
+ env:
+ - name: ANSIBLE_REMOTE_USER
+ - name: ANSIBLE_PARAMIKO_REMOTE_USER
+ version_added: '2.5'
+ ini:
+ - section: defaults
+ key: remote_user
+ - section: paramiko_connection
+ key: remote_user
+ version_added: '2.5'
+ password:
+ description:
+ - Secret used to either login the ssh server or as a passphrase for ssh keys that require it
+ - Can be set from the CLI via the C(--ask-pass) option.
+ vars:
+ - name: ansible_password
+ - name: ansible_ssh_pass
+ - name: ansible_ssh_password
+ - name: ansible_paramiko_pass
+ - name: ansible_paramiko_password
+ version_added: '2.5'
+ host_key_auto_add:
+ description: 'TODO: write it'
+ env: [{name: ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD}]
+ ini:
+ - {key: host_key_auto_add, section: paramiko_connection}
+ type: boolean
+ look_for_keys:
+ default: True
+ description: 'TODO: write it'
+ env: [{name: ANSIBLE_PARAMIKO_LOOK_FOR_KEYS}]
+ ini:
+ - {key: look_for_keys, section: paramiko_connection}
+ type: boolean
+ proxy_command:
+ default: ''
+ description:
+ - Proxy information for running the connection via a jumphost
+ - Also this plugin will scan 'ssh_args', 'ssh_extra_args' and 'ssh_common_args' from the 'ssh' plugin settings for proxy information if set.
+ env: [{name: ANSIBLE_PARAMIKO_PROXY_COMMAND}]
+ ini:
+ - {key: proxy_command, section: paramiko_connection}
+ pty:
+ default: True
+ description: 'TODO: write it'
+ env:
+ - name: ANSIBLE_PARAMIKO_PTY
+ ini:
+ - section: paramiko_connection
+ key: pty
+ type: boolean
+ record_host_keys:
+ default: True
+ description: 'TODO: write it'
+ env: [{name: ANSIBLE_PARAMIKO_RECORD_HOST_KEYS}]
+ ini:
+ - section: paramiko_connection
+ key: record_host_keys
+ type: boolean
+ host_key_checking:
+ description: 'Set this to "False" if you want to avoid host key checking by the underlying tools Ansible uses to connect to the host'
+ type: boolean
+ default: True
+ env:
+ - name: ANSIBLE_HOST_KEY_CHECKING
+ - name: ANSIBLE_SSH_HOST_KEY_CHECKING
+ version_added: '2.5'
+ - name: ANSIBLE_PARAMIKO_HOST_KEY_CHECKING
+ version_added: '2.5'
+ ini:
+ - section: defaults
+ key: host_key_checking
+ - section: paramiko_connection
+ key: host_key_checking
+ version_added: '2.5'
+ vars:
+ - name: ansible_host_key_checking
+ version_added: '2.5'
+ - name: ansible_ssh_host_key_checking
+ version_added: '2.5'
+ - name: ansible_paramiko_host_key_checking
+ version_added: '2.5'
+ use_persistent_connections:
+ description: 'Toggles the use of persistence for connections'
+ type: boolean
+ default: False
+ env:
+ - name: ANSIBLE_USE_PERSISTENT_CONNECTIONS
+ ini:
+ - section: defaults
+ key: use_persistent_connections
+# TODO:
+#timeout=self._play_context.timeout,
+"""
+
+import os
+import socket
+import tempfile
+import traceback
+import fcntl
+import sys
+import re
+
+from termios import tcflush, TCIFLUSH
+from distutils.version import LooseVersion
+from binascii import hexlify
+
+from ansible.errors import (
+ AnsibleAuthenticationFailure,
+ AnsibleConnectionFailure,
+ AnsibleError,
+ AnsibleFileNotFound,
+)
+from ansible.module_utils.compat.paramiko import PARAMIKO_IMPORT_ERR, paramiko
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.six.moves import input
+from ansible.plugins.connection import ConnectionBase
+from ansible.utils.display import Display
+from ansible.utils.path import makedirs_safe
+from ansible.module_utils._text import to_bytes, to_native, to_text
+
+display = Display()
+
+
+AUTHENTICITY_MSG = """
+paramiko: The authenticity of host '%s' can't be established.
+The %s key fingerprint is %s.
+Are you sure you want to continue connecting (yes/no)?
+"""
+
+# SSH Options Regex
+SETTINGS_REGEX = re.compile(r'(\w+)(?:\s*=\s*|\s+)(.+)')
+
+
+class MyAddPolicy(object):
+ """
+ Based on AutoAddPolicy in paramiko so we can determine when keys are added
+
+ and also prompt for input.
+
+ Policy for automatically adding the hostname and new host key to the
+ local L{HostKeys} object, and saving it. This is used by L{SSHClient}.
+ """
+
+ def __init__(self, new_stdin, connection):
+ self._new_stdin = new_stdin
+ self.connection = connection
+ self._options = connection._options
+
+ def missing_host_key(self, client, hostname, key):
+
+ if all((self._options['host_key_checking'], not self._options['host_key_auto_add'])):
+
+ fingerprint = hexlify(key.get_fingerprint())
+ ktype = key.get_name()
+
+ if self.connection.get_option('use_persistent_connections') or self.connection.force_persistence:
+ # don't print the prompt string since the user cannot respond
+ # to the question anyway
+ raise AnsibleError(AUTHENTICITY_MSG[1:92] % (hostname, ktype, fingerprint))
+
+ self.connection.connection_lock()
+
+ old_stdin = sys.stdin
+ sys.stdin = self._new_stdin
+
+ # clear out any premature input on sys.stdin
+ tcflush(sys.stdin, TCIFLUSH)
+
+ inp = input(AUTHENTICITY_MSG % (hostname, ktype, fingerprint))
+ sys.stdin = old_stdin
+
+ self.connection.connection_unlock()
+
+ if inp not in ['yes', 'y', '']:
+ raise AnsibleError("host connection rejected by user")
+
+ key._added_by_ansible_this_time = True
+
+ # existing implementation below:
+ client._host_keys.add(hostname, key.get_name(), key)
+
+ # host keys are actually saved in close() function below
+ # in order to control ordering.
+
+
+# keep connection objects on a per host basis to avoid repeated attempts to reconnect
+
+SSH_CONNECTION_CACHE = {}
+SFTP_CONNECTION_CACHE = {}
+
+
+class Connection(ConnectionBase):
+ ''' SSH based connections with Paramiko '''
+
+ transport = 'paramiko'
+ _log_channel = None
+
+ def _cache_key(self):
+ return "%s__%s__" % (self._play_context.remote_addr, self._play_context.remote_user)
+
+ def _connect(self):
+ cache_key = self._cache_key()
+ if cache_key in SSH_CONNECTION_CACHE:
+ self.ssh = SSH_CONNECTION_CACHE[cache_key]
+ else:
+ self.ssh = SSH_CONNECTION_CACHE[cache_key] = self._connect_uncached()
+ return self
+
+ def _set_log_channel(self, name):
+ '''Mimic paramiko.SSHClient.set_log_channel'''
+ self._log_channel = name
+
+ def _parse_proxy_command(self, port=22):
+ proxy_command = None
+ # Parse ansible_ssh_common_args, specifically looking for ProxyCommand
+ ssh_args = [
+ getattr(self._play_context, 'ssh_extra_args', '') or '',
+ getattr(self._play_context, 'ssh_common_args', '') or '',
+ getattr(self._play_context, 'ssh_args', '') or '',
+ ]
+
+ args = self._split_ssh_args(' '.join(ssh_args))
+ for i, arg in enumerate(args):
+ if arg.lower() == 'proxycommand':
+ # _split_ssh_args split ProxyCommand from the command itself
+ proxy_command = args[i + 1]
+ else:
+ # ProxyCommand and the command itself are a single string
+ match = SETTINGS_REGEX.match(arg)
+ if match:
+ if match.group(1).lower() == 'proxycommand':
+ proxy_command = match.group(2)
+
+ if proxy_command:
+ break
+
+ proxy_command = proxy_command or self.get_option('proxy_command')
+
+ sock_kwarg = {}
+ if proxy_command:
+ replacers = {
+ '%h': self._play_context.remote_addr,
+ '%p': port,
+ '%r': self._play_context.remote_user
+ }
+ for find, replace in replacers.items():
+ proxy_command = proxy_command.replace(find, str(replace))
+ try:
+ sock_kwarg = {'sock': paramiko.ProxyCommand(proxy_command)}
+ display.vvv("CONFIGURE PROXY COMMAND FOR CONNECTION: %s" % proxy_command, host=self._play_context.remote_addr)
+ except AttributeError:
+ display.warning('Paramiko ProxyCommand support unavailable. '
+ 'Please upgrade to Paramiko 1.9.0 or newer. '
+ 'Not using configured ProxyCommand')
+
+ return sock_kwarg
+
+ def _connect_uncached(self):
+ ''' activates the connection object '''
+
+ if paramiko is None:
+ raise AnsibleError("paramiko is not installed: %s" % to_native(PARAMIKO_IMPORT_ERR))
+
+ port = self._play_context.port or 22
+ display.vvv("ESTABLISH PARAMIKO SSH CONNECTION FOR USER: %s on PORT %s TO %s" % (self._play_context.remote_user, port, self._play_context.remote_addr),
+ host=self._play_context.remote_addr)
+
+ ssh = paramiko.SSHClient()
+
+ # override paramiko's default logger name
+ if self._log_channel is not None:
+ ssh.set_log_channel(self._log_channel)
+
+ self.keyfile = os.path.expanduser("~/.ssh/known_hosts")
+
+ if self.get_option('host_key_checking'):
+ for ssh_known_hosts in ("/etc/ssh/ssh_known_hosts", "/etc/openssh/ssh_known_hosts"):
+ try:
+ # TODO: check if we need to look at several possible locations, possible for loop
+ ssh.load_system_host_keys(ssh_known_hosts)
+ break
+ except IOError:
+ pass # file was not found, but not required to function
+ ssh.load_system_host_keys()
+
+ ssh_connect_kwargs = self._parse_proxy_command(port)
+
+ ssh.set_missing_host_key_policy(MyAddPolicy(self._new_stdin, self))
+
+ conn_password = self.get_option('password') or self._play_context.password
+
+ allow_agent = True
+
+ if conn_password is not None:
+ allow_agent = False
+
+ try:
+ key_filename = None
+ if self._play_context.private_key_file:
+ key_filename = os.path.expanduser(self._play_context.private_key_file)
+
+ # paramiko 2.2 introduced auth_timeout parameter
+ if LooseVersion(paramiko.__version__) >= LooseVersion('2.2.0'):
+ ssh_connect_kwargs['auth_timeout'] = self._play_context.timeout
+
+ ssh.connect(
+ self._play_context.remote_addr.lower(),
+ username=self._play_context.remote_user,
+ allow_agent=allow_agent,
+ look_for_keys=self.get_option('look_for_keys'),
+ key_filename=key_filename,
+ password=conn_password,
+ timeout=self._play_context.timeout,
+ port=port,
+ **ssh_connect_kwargs
+ )
+ except paramiko.ssh_exception.BadHostKeyException as e:
+ raise AnsibleConnectionFailure('host key mismatch for %s' % e.hostname)
+ except paramiko.ssh_exception.AuthenticationException as e:
+ msg = 'Failed to authenticate: {0}'.format(to_text(e))
+ raise AnsibleAuthenticationFailure(msg)
+ except Exception as e:
+ msg = to_text(e)
+ if u"PID check failed" in msg:
+ raise AnsibleError("paramiko version issue, please upgrade paramiko on the machine running ansible")
+ elif u"Private key file is encrypted" in msg:
+ msg = 'ssh %s@%s:%s : %s\nTo connect as a different user, use -u <username>.' % (
+ self._play_context.remote_user, self._play_context.remote_addr, port, msg)
+ raise AnsibleConnectionFailure(msg)
+ else:
+ raise AnsibleConnectionFailure(msg)
+
+ return ssh
+
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ ''' run a command on the remote host '''
+
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ if in_data:
+ raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
+
+ bufsize = 4096
+
+ try:
+ self.ssh.get_transport().set_keepalive(5)
+ chan = self.ssh.get_transport().open_session()
+ except Exception as e:
+ text_e = to_text(e)
+ msg = u"Failed to open session"
+ if text_e:
+ msg += u": %s" % text_e
+ raise AnsibleConnectionFailure(to_native(msg))
+
+ # sudo usually requires a PTY (cf. requiretty option), therefore
+ # we give it one by default (pty=True in ansible.cfg), and we try
+ # to initialise from the calling environment when sudoable is enabled
+ if self.get_option('pty') and sudoable:
+ chan.get_pty(term=os.getenv('TERM', 'vt100'), width=int(os.getenv('COLUMNS', 0)), height=int(os.getenv('LINES', 0)))
+
+ display.vvv("EXEC %s" % cmd, host=self._play_context.remote_addr)
+
+ cmd = to_bytes(cmd, errors='surrogate_or_strict')
+
+ no_prompt_out = b''
+ no_prompt_err = b''
+ become_output = b''
+
+ try:
+ chan.exec_command(cmd)
+ if self.become and self.become.expect_prompt():
+ passprompt = False
+ become_sucess = False
+ while not (become_sucess or passprompt):
+ display.debug('Waiting for Privilege Escalation input')
+
+ chunk = chan.recv(bufsize)
+ display.debug("chunk is: %s" % chunk)
+ if not chunk:
+ if b'unknown user' in become_output:
+ n_become_user = to_native(self.become.get_option('become_user',
+ playcontext=self._play_context))
+ raise AnsibleError('user %s does not exist' % n_become_user)
+ else:
+ break
+ # raise AnsibleError('ssh connection closed waiting for password prompt')
+ become_output += chunk
+
+ # need to check every line because we might get lectured
+ # and we might get the middle of a line in a chunk
+ for l in become_output.splitlines(True):
+ if self.become.check_success(l):
+ become_sucess = True
+ break
+ elif self.become.check_password_prompt(l):
+ passprompt = True
+ break
+
+ if passprompt:
+ if self.become:
+ become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
+ chan.sendall(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
+ else:
+ raise AnsibleError("A password is required but none was supplied")
+ else:
+ no_prompt_out += become_output
+ no_prompt_err += become_output
+ except socket.timeout:
+ raise AnsibleError('ssh timed out waiting for privilege escalation.\n' + become_output)
+
+ stdout = b''.join(chan.makefile('rb', bufsize))
+ stderr = b''.join(chan.makefile_stderr('rb', bufsize))
+
+ return (chan.recv_exit_status(), no_prompt_out + stdout, no_prompt_out + stderr)
+
+ def put_file(self, in_path, out_path):
+ ''' transfer a file from local to remote '''
+
+ super(Connection, self).put_file(in_path, out_path)
+
+ display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
+
+ if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
+ raise AnsibleFileNotFound("file or module does not exist: %s" % in_path)
+
+ try:
+ self.sftp = self.ssh.open_sftp()
+ except Exception as e:
+ raise AnsibleError("failed to open a SFTP connection (%s)" % e)
+
+ try:
+ self.sftp.put(to_bytes(in_path, errors='surrogate_or_strict'), to_bytes(out_path, errors='surrogate_or_strict'))
+ except IOError:
+ raise AnsibleError("failed to transfer file to %s" % out_path)
+
+ def _connect_sftp(self):
+
+ cache_key = "%s__%s__" % (self._play_context.remote_addr, self._play_context.remote_user)
+ if cache_key in SFTP_CONNECTION_CACHE:
+ return SFTP_CONNECTION_CACHE[cache_key]
+ else:
+ result = SFTP_CONNECTION_CACHE[cache_key] = self._connect().ssh.open_sftp()
+ return result
+
+ def fetch_file(self, in_path, out_path):
+ ''' save a remote file to the specified path '''
+
+ super(Connection, self).fetch_file(in_path, out_path)
+
+ display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
+
+ try:
+ self.sftp = self._connect_sftp()
+ except Exception as e:
+ raise AnsibleError("failed to open a SFTP connection (%s)" % to_native(e))
+
+ try:
+ self.sftp.get(to_bytes(in_path, errors='surrogate_or_strict'), to_bytes(out_path, errors='surrogate_or_strict'))
+ except IOError:
+ raise AnsibleError("failed to transfer file from %s" % in_path)
+
+ def _any_keys_added(self):
+
+ for hostname, keys in iteritems(self.ssh._host_keys):
+ for keytype, key in iteritems(keys):
+ added_this_time = getattr(key, '_added_by_ansible_this_time', False)
+ if added_this_time:
+ return True
+ return False
+
+ def _save_ssh_host_keys(self, filename):
+ '''
+ not using the paramiko save_ssh_host_keys function as we want to add new SSH keys at the bottom so folks
+ don't complain about it :)
+ '''
+
+ if not self._any_keys_added():
+ return False
+
+ path = os.path.expanduser("~/.ssh")
+ makedirs_safe(path)
+
+ with open(filename, 'w') as f:
+
+ for hostname, keys in iteritems(self.ssh._host_keys):
+
+ for keytype, key in iteritems(keys):
+
+ # was f.write
+ added_this_time = getattr(key, '_added_by_ansible_this_time', False)
+ if not added_this_time:
+ f.write("%s %s %s\n" % (hostname, keytype, key.get_base64()))
+
+ for hostname, keys in iteritems(self.ssh._host_keys):
+
+ for keytype, key in iteritems(keys):
+ added_this_time = getattr(key, '_added_by_ansible_this_time', False)
+ if added_this_time:
+ f.write("%s %s %s\n" % (hostname, keytype, key.get_base64()))
+
+ def reset(self):
+ self.close()
+ self._connect()
+
+ def close(self):
+ ''' terminate the connection '''
+
+ cache_key = self._cache_key()
+ SSH_CONNECTION_CACHE.pop(cache_key, None)
+ SFTP_CONNECTION_CACHE.pop(cache_key, None)
+
+ if hasattr(self, 'sftp'):
+ if self.sftp is not None:
+ self.sftp.close()
+
+ if self.get_option('host_key_checking') and self.get_option('record_host_keys') and self._any_keys_added():
+
+ # add any new SSH host keys -- warning -- this could be slow
+ # (This doesn't acquire the connection lock because it needs
+ # to exclude only other known_hosts writers, not connections
+ # that are starting up.)
+ lockfile = self.keyfile.replace("known_hosts", ".known_hosts.lock")
+ dirname = os.path.dirname(self.keyfile)
+ makedirs_safe(dirname)
+
+ KEY_LOCK = open(lockfile, 'w')
+ fcntl.lockf(KEY_LOCK, fcntl.LOCK_EX)
+
+ try:
+ # just in case any were added recently
+
+ self.ssh.load_system_host_keys()
+ self.ssh._host_keys.update(self.ssh._system_host_keys)
+
+ # gather information about the current key file, so
+ # we can ensure the new file has the correct mode/owner
+
+ key_dir = os.path.dirname(self.keyfile)
+ if os.path.exists(self.keyfile):
+ key_stat = os.stat(self.keyfile)
+ mode = key_stat.st_mode
+ uid = key_stat.st_uid
+ gid = key_stat.st_gid
+ else:
+ mode = 33188
+ uid = os.getuid()
+ gid = os.getgid()
+
+ # Save the new keys to a temporary file and move it into place
+ # rather than rewriting the file. We set delete=False because
+ # the file will be moved into place rather than cleaned up.
+
+ tmp_keyfile = tempfile.NamedTemporaryFile(dir=key_dir, delete=False)
+ os.chmod(tmp_keyfile.name, mode & 0o7777)
+ os.chown(tmp_keyfile.name, uid, gid)
+
+ self._save_ssh_host_keys(tmp_keyfile.name)
+ tmp_keyfile.close()
+
+ os.rename(tmp_keyfile.name, self.keyfile)
+
+ except Exception:
+
+ # unable to save keys, including scenario when key was invalid
+ # and caught earlier
+ traceback.print_exc()
+ fcntl.lockf(KEY_LOCK, fcntl.LOCK_UN)
+
+ self.ssh.close()
+ self._connected = False
diff --git a/lib/ansible/plugins/connection/psrp.py b/lib/ansible/plugins/connection/psrp.py
new file mode 100644
index 00000000..9fab9693
--- /dev/null
+++ b/lib/ansible/plugins/connection/psrp.py
@@ -0,0 +1,954 @@
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+author: Ansible Core Team
+connection: psrp
+short_description: Run tasks over Microsoft PowerShell Remoting Protocol
+description:
+- Run commands or put/fetch on a target via PSRP (WinRM plugin)
+- This is similar to the I(winrm) connection plugin which uses the same
+ underlying transport but instead runs in a PowerShell interpreter.
+version_added: "2.7"
+requirements:
+- pypsrp (Python library)
+options:
+ # transport options
+ remote_addr:
+ description:
+ - The hostname or IP address of the remote host.
+ default: inventory_hostname
+ type: str
+ vars:
+ - name: ansible_host
+ - name: ansible_psrp_host
+ remote_user:
+ description:
+ - The user to log in as.
+ type: str
+ vars:
+ - name: ansible_user
+ - name: ansible_psrp_user
+ remote_password:
+ description: Authentication password for the C(remote_user). Can be supplied as CLI option.
+ type: str
+ vars:
+ - name: ansible_password
+ - name: ansible_winrm_pass
+ - name: ansible_winrm_password
+ aliases:
+ - password # Needed for --ask-pass to come through on delegation
+ port:
+ description:
+ - The port for PSRP to connect on the remote target.
+ - Default is C(5986) if I(protocol) is not defined or is C(https),
+ otherwise the port is C(5985).
+ type: int
+ vars:
+ - name: ansible_port
+ - name: ansible_psrp_port
+ protocol:
+ description:
+ - Set the protocol to use for the connection.
+ - Default is C(https) if I(port) is not defined or I(port) is not C(5985).
+ choices:
+ - http
+ - https
+ type: str
+ vars:
+ - name: ansible_psrp_protocol
+ path:
+ description:
+ - The URI path to connect to.
+ type: str
+ vars:
+ - name: ansible_psrp_path
+ default: 'wsman'
+ auth:
+ description:
+ - The authentication protocol to use when authenticating the remote user.
+ - The default, C(negotiate), will attempt to use C(Kerberos) if it is
+ available and fall back to C(NTLM) if it isn't.
+ type: str
+ vars:
+ - name: ansible_psrp_auth
+ choices:
+ - basic
+ - certificate
+ - negotiate
+ - kerberos
+ - ntlm
+ - credssp
+ default: negotiate
+ cert_validation:
+ description:
+ - Whether to validate the remote server's certificate or not.
+ - Set to C(ignore) to not validate any certificates.
+ - I(ca_cert) can be set to the path of a PEM certificate chain to
+ use in the validation.
+ choices:
+ - validate
+ - ignore
+ default: validate
+ type: str
+ vars:
+ - name: ansible_psrp_cert_validation
+ ca_cert:
+ description:
+ - The path to a PEM certificate chain to use when validating the server's
+ certificate.
+ - This value is ignored if I(cert_validation) is set to C(ignore).
+ type: path
+ vars:
+ - name: ansible_psrp_cert_trust_path
+ - name: ansible_psrp_ca_cert
+ aliases: [ cert_trust_path ]
+ connection_timeout:
+ description:
+ - The connection timeout for making the request to the remote host.
+ - This is measured in seconds.
+ type: int
+ vars:
+ - name: ansible_psrp_connection_timeout
+ default: 30
+ read_timeout:
+ description:
+ - The read timeout for receiving data from the remote host.
+ - This value must always be greater than I(operation_timeout).
+ - This option requires pypsrp >= 0.3.
+ - This is measured in seconds.
+ type: int
+ vars:
+ - name: ansible_psrp_read_timeout
+ default: 30
+ version_added: '2.8'
+ reconnection_retries:
+ description:
+ - The number of retries on connection errors.
+ type: int
+ vars:
+ - name: ansible_psrp_reconnection_retries
+ default: 0
+ version_added: '2.8'
+ reconnection_backoff:
+ description:
+ - The backoff time to use in between reconnection attempts.
+ (First sleeps X, then sleeps 2*X, then sleeps 4*X, ...)
+ - This is measured in seconds.
+ - The C(ansible_psrp_reconnection_backoff) variable was added in Ansible
+ 2.9.
+ type: int
+ vars:
+ - name: ansible_psrp_connection_backoff
+ - name: ansible_psrp_reconnection_backoff
+ default: 2
+ version_added: '2.8'
+ message_encryption:
+ description:
+ - Controls the message encryption settings, this is different from TLS
+ encryption when I(ansible_psrp_protocol) is C(https).
+ - Only the auth protocols C(negotiate), C(kerberos), C(ntlm), and
+ C(credssp) can do message encryption. The other authentication protocols
+ only support encryption when C(protocol) is set to C(https).
+ - C(auto) means means message encryption is only used when not using
+ TLS/HTTPS.
+ - C(always) is the same as C(auto) but message encryption is always used
+ even when running over TLS/HTTPS.
+ - C(never) disables any encryption checks that are in place when running
+ over HTTP and disables any authentication encryption processes.
+ type: str
+ vars:
+ - name: ansible_psrp_message_encryption
+ choices:
+ - auto
+ - always
+ - never
+ default: auto
+ proxy:
+ description:
+ - Set the proxy URL to use when connecting to the remote host.
+ vars:
+ - name: ansible_psrp_proxy
+ type: str
+ ignore_proxy:
+ description:
+ - Will disable any environment proxy settings and connect directly to the
+ remote host.
+ - This option is ignored if C(proxy) is set.
+ vars:
+ - name: ansible_psrp_ignore_proxy
+ type: bool
+ default: 'no'
+
+ # auth options
+ certificate_key_pem:
+ description:
+ - The local path to an X509 certificate key to use with certificate auth.
+ type: path
+ vars:
+ - name: ansible_psrp_certificate_key_pem
+ certificate_pem:
+ description:
+ - The local path to an X509 certificate to use with certificate auth.
+ type: path
+ vars:
+ - name: ansible_psrp_certificate_pem
+ credssp_auth_mechanism:
+ description:
+ - The sub authentication mechanism to use with CredSSP auth.
+ - When C(auto), both Kerberos and NTLM is attempted with kerberos being
+ preferred.
+ type: str
+ choices:
+ - auto
+ - kerberos
+ - ntlm
+ default: auto
+ vars:
+ - name: ansible_psrp_credssp_auth_mechanism
+ credssp_disable_tlsv1_2:
+ description:
+ - Disables the use of TLSv1.2 on the CredSSP authentication channel.
+ - This should not be set to C(yes) unless dealing with a host that does not
+ have TLSv1.2.
+ default: no
+ type: bool
+ vars:
+ - name: ansible_psrp_credssp_disable_tlsv1_2
+ credssp_minimum_version:
+ description:
+ - The minimum CredSSP server authentication version that will be accepted.
+ - Set to C(5) to ensure the server has been patched and is not vulnerable
+ to CVE 2018-0886.
+ default: 2
+ type: int
+ vars:
+ - name: ansible_psrp_credssp_minimum_version
+ negotiate_delegate:
+ description:
+ - Allow the remote user the ability to delegate it's credentials to another
+ server, i.e. credential delegation.
+ - Only valid when Kerberos was the negotiated auth or was explicitly set as
+ the authentication.
+ - Ignored when NTLM was the negotiated auth.
+ type: bool
+ vars:
+ - name: ansible_psrp_negotiate_delegate
+ negotiate_hostname_override:
+ description:
+ - Override the remote hostname when searching for the host in the Kerberos
+ lookup.
+ - This allows Ansible to connect over IP but authenticate with the remote
+ server using it's DNS name.
+ - Only valid when Kerberos was the negotiated auth or was explicitly set as
+ the authentication.
+ - Ignored when NTLM was the negotiated auth.
+ type: str
+ vars:
+ - name: ansible_psrp_negotiate_hostname_override
+ negotiate_send_cbt:
+ description:
+ - Send the Channel Binding Token (CBT) structure when authenticating.
+ - CBT is used to provide extra protection against Man in the Middle C(MitM)
+ attacks by binding the outer transport channel to the auth channel.
+ - CBT is not used when using just C(HTTP), only C(HTTPS).
+ default: yes
+ type: bool
+ vars:
+ - name: ansible_psrp_negotiate_send_cbt
+ negotiate_service:
+ description:
+ - Override the service part of the SPN used during Kerberos authentication.
+ - Only valid when Kerberos was the negotiated auth or was explicitly set as
+ the authentication.
+ - Ignored when NTLM was the negotiated auth.
+ default: WSMAN
+ type: str
+ vars:
+ - name: ansible_psrp_negotiate_service
+
+ # protocol options
+ operation_timeout:
+ description:
+ - Sets the WSMan timeout for each operation.
+ - This is measured in seconds.
+ - This should not exceed the value for C(connection_timeout).
+ type: int
+ vars:
+ - name: ansible_psrp_operation_timeout
+ default: 20
+ max_envelope_size:
+ description:
+ - Sets the maximum size of each WSMan message sent to the remote host.
+ - This is measured in bytes.
+ - Defaults to C(150KiB) for compatibility with older hosts.
+ type: int
+ vars:
+ - name: ansible_psrp_max_envelope_size
+ default: 153600
+ configuration_name:
+ description:
+ - The name of the PowerShell configuration endpoint to connect to.
+ type: str
+ vars:
+ - name: ansible_psrp_configuration_name
+ default: Microsoft.PowerShell
+"""
+
+import base64
+import json
+import logging
+import os
+
+from ansible import constants as C
+from ansible.errors import AnsibleConnectionFailure, AnsibleError
+from ansible.errors import AnsibleFileNotFound
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.plugins.connection import ConnectionBase
+from ansible.plugins.shell.powershell import _common_args
+from ansible.utils.display import Display
+from ansible.utils.hashing import sha1
+
+HAS_PYPSRP = True
+PYPSRP_IMP_ERR = None
+try:
+ import pypsrp
+ from pypsrp.complex_objects import GenericComplexObject, PSInvocationState, RunspacePoolState
+ from pypsrp.exceptions import AuthenticationError, WinRMError
+ from pypsrp.host import PSHost, PSHostUserInterface
+ from pypsrp.powershell import PowerShell, RunspacePool
+ from pypsrp.shell import Process, SignalCode, WinRS
+ from pypsrp.wsman import WSMan, AUTH_KWARGS
+ from requests.exceptions import ConnectionError, ConnectTimeout
+except ImportError as err:
+ HAS_PYPSRP = False
+ PYPSRP_IMP_ERR = err
+
+NEWER_PYPSRP = True
+try:
+ import pypsrp.pwsh_scripts
+except ImportError:
+ NEWER_PYPSRP = False
+
+display = Display()
+
+
+class Connection(ConnectionBase):
+
+ transport = 'psrp'
+ module_implementation_preferences = ('.ps1', '.exe', '')
+ allow_executable = False
+ has_pipelining = True
+ allow_extras = True
+
+ def __init__(self, *args, **kwargs):
+ self.always_pipeline_modules = True
+ self.has_native_async = True
+
+ self.runspace = None
+ self.host = None
+
+ self._shell_type = 'powershell'
+ super(Connection, self).__init__(*args, **kwargs)
+
+ if not C.DEFAULT_DEBUG:
+ logging.getLogger('pypsrp').setLevel(logging.WARNING)
+ logging.getLogger('requests_credssp').setLevel(logging.INFO)
+ logging.getLogger('urllib3').setLevel(logging.INFO)
+
+ def _connect(self):
+ if not HAS_PYPSRP:
+ raise AnsibleError("pypsrp or dependencies are not installed: %s"
+ % to_native(PYPSRP_IMP_ERR))
+ super(Connection, self)._connect()
+ self._build_kwargs()
+ display.vvv("ESTABLISH PSRP CONNECTION FOR USER: %s ON PORT %s TO %s" %
+ (self._psrp_user, self._psrp_port, self._psrp_host),
+ host=self._psrp_host)
+
+ if not self.runspace:
+ connection = WSMan(**self._psrp_conn_kwargs)
+
+ # create our psuedo host to capture the exit code and host output
+ host_ui = PSHostUserInterface()
+ self.host = PSHost(None, None, False, "Ansible PSRP Host", None,
+ host_ui, None)
+
+ self.runspace = RunspacePool(
+ connection, host=self.host,
+ configuration_name=self._psrp_configuration_name
+ )
+ display.vvvvv(
+ "PSRP OPEN RUNSPACE: auth=%s configuration=%s endpoint=%s" %
+ (self._psrp_auth, self._psrp_configuration_name,
+ connection.transport.endpoint), host=self._psrp_host
+ )
+ try:
+ self.runspace.open()
+ except AuthenticationError as e:
+ raise AnsibleConnectionFailure("failed to authenticate with "
+ "the server: %s" % to_native(e))
+ except WinRMError as e:
+ raise AnsibleConnectionFailure(
+ "psrp connection failure during runspace open: %s"
+ % to_native(e)
+ )
+ except (ConnectionError, ConnectTimeout) as e:
+ raise AnsibleConnectionFailure(
+ "Failed to connect to the host via PSRP: %s"
+ % to_native(e)
+ )
+
+ self._connected = True
+ return self
+
+ def reset(self):
+ display.vvvvv("PSRP: Reset Connection", host=self._psrp_host)
+ self.runspace = None
+ self._connect()
+
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ super(Connection, self).exec_command(cmd, in_data=in_data,
+ sudoable=sudoable)
+
+ if cmd.startswith(" ".join(_common_args) + " -EncodedCommand"):
+ # This is a PowerShell script encoded by the shell plugin, we will
+ # decode the script and execute it in the runspace instead of
+ # starting a new interpreter to save on time
+ b_command = base64.b64decode(cmd.split(" ")[-1])
+ script = to_text(b_command, 'utf-16-le')
+ in_data = to_text(in_data, errors="surrogate_or_strict", nonstring="passthru")
+
+ if in_data and in_data.startswith(u"#!"):
+ # ANSIBALLZ wrapper, we need to get the interpreter and execute
+ # that as the script - note this won't work as basic.py relies
+ # on packages not available on Windows, once fixed we can enable
+ # this path
+ interpreter = to_native(in_data.splitlines()[0][2:])
+ # script = "$input | &'%s' -" % interpreter
+ # in_data = to_text(in_data)
+ raise AnsibleError("cannot run the interpreter '%s' on the psrp "
+ "connection plugin" % interpreter)
+
+ # call build_module_command to get the bootstrap wrapper text
+ bootstrap_wrapper = self._shell.build_module_command('', '', '')
+ if bootstrap_wrapper == cmd:
+ # Do not display to the user each invocation of the bootstrap wrapper
+ display.vvv("PSRP: EXEC (via pipeline wrapper)")
+ else:
+ display.vvv("PSRP: EXEC %s" % script, host=self._psrp_host)
+ else:
+ # In other cases we want to execute the cmd as the script. We add on the 'exit $LASTEXITCODE' to ensure the
+ # rc is propagated back to the connection plugin.
+ script = to_text(u"%s\nexit $LASTEXITCODE" % cmd)
+ display.vvv(u"PSRP: EXEC %s" % script, host=self._psrp_host)
+
+ rc, stdout, stderr = self._exec_psrp_script(script, in_data)
+ return rc, stdout, stderr
+
+ def put_file(self, in_path, out_path):
+ super(Connection, self).put_file(in_path, out_path)
+
+ out_path = self._shell._unquote(out_path)
+ display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._psrp_host)
+
+ # The new method that uses PSRP directly relies on a feature added in pypsrp 0.4.0 (release 2019-09-19). In
+ # case someone still has an older version present we warn them asking to update their library to a newer
+ # release and fallback to the old WSMV shell.
+ if NEWER_PYPSRP:
+ rc, stdout, stderr, local_sha1 = self._put_file_new(in_path, out_path)
+
+ else:
+ rc, stdout, stderr, local_sha1 = self._put_file_old(in_path, out_path)
+
+ if rc != 0:
+ raise AnsibleError(to_native(stderr))
+
+ put_output = json.loads(stdout)
+ remote_sha1 = put_output.get("sha1")
+
+ if not remote_sha1:
+ raise AnsibleError("Remote sha1 was not returned, stdout: '%s', stderr: '%s'"
+ % (to_native(stdout), to_native(stderr)))
+
+ if not remote_sha1 == local_sha1:
+ raise AnsibleError("Remote sha1 hash %s does not match local hash %s"
+ % (to_native(remote_sha1), to_native(local_sha1)))
+
+ def _put_file_old(self, in_path, out_path):
+ script = u'''begin {
+ $ErrorActionPreference = "Stop"
+ $ProgressPreference = 'SilentlyContinue'
+
+ $path = '%s'
+ $fd = [System.IO.File]::Create($path)
+ $algo = [System.Security.Cryptography.SHA1CryptoServiceProvider]::Create()
+ $bytes = @()
+} process {
+ $bytes = [System.Convert]::FromBase64String($input)
+ $algo.TransformBlock($bytes, 0, $bytes.Length, $bytes, 0) > $null
+ $fd.Write($bytes, 0, $bytes.Length)
+} end {
+ $fd.Close()
+ $algo.TransformFinalBlock($bytes, 0, 0) > $null
+ $hash = [System.BitConverter]::ToString($algo.Hash)
+ $hash = $hash.Replace("-", "").ToLowerInvariant()
+
+ Write-Output -InputObject "{`"sha1`":`"$hash`"}"
+}''' % out_path
+
+ cmd_parts = self._shell._encode_script(script, as_list=True,
+ strict_mode=False,
+ preserve_rc=False)
+ b_in_path = to_bytes(in_path, errors='surrogate_or_strict')
+ if not os.path.exists(b_in_path):
+ raise AnsibleFileNotFound('file or module does not exist: "%s"'
+ % to_native(in_path))
+
+ in_size = os.path.getsize(b_in_path)
+ buffer_size = int(self.runspace.connection.max_payload_size / 4 * 3)
+ sha1_hash = sha1()
+
+ # copying files is faster when using the raw WinRM shell and not PSRP
+ # we will create a WinRS shell just for this process
+ # TODO: speed this up as there is overhead creating a shell for this
+ with WinRS(self.runspace.connection, codepage=65001) as shell:
+ process = Process(shell, cmd_parts[0], cmd_parts[1:])
+ process.begin_invoke()
+
+ offset = 0
+ with open(b_in_path, 'rb') as src_file:
+ for data in iter((lambda: src_file.read(buffer_size)), b""):
+ offset += len(data)
+ display.vvvvv("PSRP PUT %s to %s (offset=%d, size=%d" %
+ (in_path, out_path, offset, len(data)),
+ host=self._psrp_host)
+ b64_data = base64.b64encode(data) + b"\r\n"
+ process.send(b64_data, end=(src_file.tell() == in_size))
+ sha1_hash.update(data)
+
+ # the file was empty, return empty buffer
+ if offset == 0:
+ process.send(b"", end=True)
+
+ process.end_invoke()
+ process.signal(SignalCode.CTRL_C)
+
+ return process.rc, process.stdout, process.stderr, sha1_hash.hexdigest()
+
+ def _put_file_new(self, in_path, out_path):
+ copy_script = '''begin {
+ $ErrorActionPreference = "Stop"
+ $WarningPreference = "Continue"
+ $path = $MyInvocation.UnboundArguments[0]
+ $fd = [System.IO.File]::Create($path)
+ $algo = [System.Security.Cryptography.SHA1CryptoServiceProvider]::Create()
+ $bytes = @()
+
+ $bindingFlags = [System.Reflection.BindingFlags]'NonPublic, Instance'
+ Function Get-Property {
+ <#
+ .SYNOPSIS
+ Gets the private/internal property specified of the object passed in.
+ #>
+ Param (
+ [Parameter(Mandatory=$true, ValueFromPipeline=$true)]
+ [System.Object]
+ $Object,
+
+ [Parameter(Mandatory=$true, Position=1)]
+ [System.String]
+ $Name
+ )
+
+ $Object.GetType().GetProperty($Name, $bindingFlags).GetValue($Object, $null)
+ }
+
+ Function Set-Property {
+ <#
+ .SYNOPSIS
+ Sets the private/internal property specified on the object passed in.
+ #>
+ Param (
+ [Parameter(Mandatory=$true, ValueFromPipeline=$true)]
+ [System.Object]
+ $Object,
+
+ [Parameter(Mandatory=$true, Position=1)]
+ [System.String]
+ $Name,
+
+ [Parameter(Mandatory=$true, Position=2)]
+ [AllowNull()]
+ [System.Object]
+ $Value
+ )
+
+ $Object.GetType().GetProperty($Name, $bindingFlags).SetValue($Object, $Value, $null)
+ }
+
+ Function Get-Field {
+ <#
+ .SYNOPSIS
+ Gets the private/internal field specified of the object passed in.
+ #>
+ Param (
+ [Parameter(Mandatory=$true, ValueFromPipeline=$true)]
+ [System.Object]
+ $Object,
+
+ [Parameter(Mandatory=$true, Position=1)]
+ [System.String]
+ $Name
+ )
+
+ $Object.GetType().GetField($Name, $bindingFlags).GetValue($Object)
+ }
+
+ # MaximumAllowedMemory is required to be set to so we can send input data that exceeds the limit on a PS
+ # Runspace. We use reflection to access/set this property as it is not accessible publicly. This is not ideal
+ # but works on all PowerShell versions I've tested with. We originally used WinRS to send the raw bytes to the
+ # host but this falls flat if someone is using a custom PS configuration name so this is a workaround. This
+ # isn't required for smaller files so if it fails we ignore the error and hope it wasn't needed.
+ # https://github.com/PowerShell/PowerShell/blob/c8e72d1e664b1ee04a14f226adf655cced24e5f0/src/System.Management.Automation/engine/serialization.cs#L325
+ try {
+ $Host | Get-Property 'ExternalHost' | `
+ Get-Field '_transportManager' | `
+ Get-Property 'Fragmentor' | `
+ Get-Property 'DeserializationContext' | `
+ Set-Property 'MaximumAllowedMemory' $null
+ } catch {}
+}
+process {
+ $bytes = [System.Convert]::FromBase64String($input)
+ $algo.TransformBlock($bytes, 0, $bytes.Length, $bytes, 0) > $null
+ $fd.Write($bytes, 0, $bytes.Length)
+}
+end {
+ $fd.Close()
+
+ $algo.TransformFinalBlock($bytes, 0, 0) > $null
+ $hash = [System.BitConverter]::ToString($algo.Hash).Replace('-', '').ToLowerInvariant()
+ Write-Output -InputObject "{`"sha1`":`"$hash`"}"
+}
+'''
+
+ # Get the buffer size of each fragment to send, subtract 82 for the fragment, message, and other header info
+ # fields that PSRP adds. Adjust to size of the base64 encoded bytes length.
+ buffer_size = int((self.runspace.connection.max_payload_size - 82) / 4 * 3)
+
+ sha1_hash = sha1()
+
+ b_in_path = to_bytes(in_path, errors='surrogate_or_strict')
+ if not os.path.exists(b_in_path):
+ raise AnsibleFileNotFound('file or module does not exist: "%s"' % to_native(in_path))
+
+ def read_gen():
+ offset = 0
+
+ with open(b_in_path, 'rb') as src_fd:
+ for b_data in iter((lambda: src_fd.read(buffer_size)), b""):
+ data_len = len(b_data)
+ offset += data_len
+ sha1_hash.update(b_data)
+
+ # PSRP technically supports sending raw bytes but that method requires a larger CLIXML message.
+ # Sending base64 is still more efficient here.
+ display.vvvvv("PSRP PUT %s to %s (offset=%d, size=%d" % (in_path, out_path, offset, data_len),
+ host=self._psrp_host)
+ b64_data = base64.b64encode(b_data)
+ yield [to_text(b64_data)]
+
+ if offset == 0: # empty file
+ yield [""]
+
+ rc, stdout, stderr = self._exec_psrp_script(copy_script, read_gen(), arguments=[out_path], force_stop=True)
+
+ return rc, stdout, stderr, sha1_hash.hexdigest()
+
+ def fetch_file(self, in_path, out_path):
+ super(Connection, self).fetch_file(in_path, out_path)
+ display.vvv("FETCH %s TO %s" % (in_path, out_path),
+ host=self._psrp_host)
+
+ in_path = self._shell._unquote(in_path)
+ out_path = out_path.replace('\\', '/')
+
+ # because we are dealing with base64 data we need to get the max size
+ # of the bytes that the base64 size would equal
+ max_b64_size = int(self.runspace.connection.max_payload_size -
+ (self.runspace.connection.max_payload_size / 4 * 3))
+ buffer_size = max_b64_size - (max_b64_size % 1024)
+
+ # setup the file stream with read only mode
+ setup_script = '''$ErrorActionPreference = "Stop"
+$path = '%s'
+
+if (Test-Path -Path $path -PathType Leaf) {
+ $fs = New-Object -TypeName System.IO.FileStream -ArgumentList @(
+ $path,
+ [System.IO.FileMode]::Open,
+ [System.IO.FileAccess]::Read,
+ [System.IO.FileShare]::Read
+ )
+ $buffer_size = %d
+} elseif (Test-Path -Path $path -PathType Container) {
+ Write-Output -InputObject "[DIR]"
+} else {
+ Write-Error -Message "$path does not exist"
+ $host.SetShouldExit(1)
+}''' % (self._shell._escape(in_path), buffer_size)
+
+ # read the file stream at the offset and return the b64 string
+ read_script = '''$ErrorActionPreference = "Stop"
+$fs.Seek(%d, [System.IO.SeekOrigin]::Begin) > $null
+$buffer = New-Object -TypeName byte[] -ArgumentList $buffer_size
+$bytes_read = $fs.Read($buffer, 0, $buffer_size)
+
+if ($bytes_read -gt 0) {
+ $bytes = $buffer[0..($bytes_read - 1)]
+ Write-Output -InputObject ([System.Convert]::ToBase64String($bytes))
+}'''
+
+ # need to run the setup script outside of the local scope so the
+ # file stream stays active between fetch operations
+ rc, stdout, stderr = self._exec_psrp_script(setup_script,
+ use_local_scope=False,
+ force_stop=True)
+ if rc != 0:
+ raise AnsibleError("failed to setup file stream for fetch '%s': %s"
+ % (out_path, to_native(stderr)))
+ elif stdout.strip() == '[DIR]':
+ # to be consistent with other connection plugins, we assume the caller has created the target dir
+ return
+
+ b_out_path = to_bytes(out_path, errors='surrogate_or_strict')
+ # to be consistent with other connection plugins, we assume the caller has created the target dir
+ offset = 0
+ with open(b_out_path, 'wb') as out_file:
+ while True:
+ display.vvvvv("PSRP FETCH %s to %s (offset=%d" %
+ (in_path, out_path, offset), host=self._psrp_host)
+ rc, stdout, stderr = self._exec_psrp_script(read_script % offset, force_stop=True)
+ if rc != 0:
+ raise AnsibleError("failed to transfer file to '%s': %s"
+ % (out_path, to_native(stderr)))
+
+ data = base64.b64decode(stdout.strip())
+ out_file.write(data)
+ if len(data) < buffer_size:
+ break
+ offset += len(data)
+
+ rc, stdout, stderr = self._exec_psrp_script("$fs.Close()", force_stop=True)
+ if rc != 0:
+ display.warning("failed to close remote file stream of file "
+ "'%s': %s" % (in_path, to_native(stderr)))
+
+ def close(self):
+ if self.runspace and self.runspace.state == RunspacePoolState.OPENED:
+ display.vvvvv("PSRP CLOSE RUNSPACE: %s" % (self.runspace.id),
+ host=self._psrp_host)
+ self.runspace.close()
+ self.runspace = None
+ self._connected = False
+
+ def _build_kwargs(self):
+ self._psrp_host = self.get_option('remote_addr')
+ self._psrp_user = self.get_option('remote_user')
+ self._psrp_pass = self.get_option('remote_password')
+
+ protocol = self.get_option('protocol')
+ port = self.get_option('port')
+ if protocol is None and port is None:
+ protocol = 'https'
+ port = 5986
+ elif protocol is None:
+ protocol = 'https' if int(port) != 5985 else 'http'
+ elif port is None:
+ port = 5986 if protocol == 'https' else 5985
+
+ self._psrp_protocol = protocol
+ self._psrp_port = int(port)
+
+ self._psrp_path = self.get_option('path')
+ self._psrp_auth = self.get_option('auth')
+ # cert validation can either be a bool or a path to the cert
+ cert_validation = self.get_option('cert_validation')
+ cert_trust_path = self.get_option('ca_cert')
+ if cert_validation == 'ignore':
+ self._psrp_cert_validation = False
+ elif cert_trust_path is not None:
+ self._psrp_cert_validation = cert_trust_path
+ else:
+ self._psrp_cert_validation = True
+
+ self._psrp_connection_timeout = self.get_option('connection_timeout') # Can be None
+ self._psrp_read_timeout = self.get_option('read_timeout') # Can be None
+ self._psrp_message_encryption = self.get_option('message_encryption')
+ self._psrp_proxy = self.get_option('proxy')
+ self._psrp_ignore_proxy = boolean(self.get_option('ignore_proxy'))
+ self._psrp_operation_timeout = int(self.get_option('operation_timeout'))
+ self._psrp_max_envelope_size = int(self.get_option('max_envelope_size'))
+ self._psrp_configuration_name = self.get_option('configuration_name')
+ self._psrp_reconnection_retries = int(self.get_option('reconnection_retries'))
+ self._psrp_reconnection_backoff = float(self.get_option('reconnection_backoff'))
+
+ self._psrp_certificate_key_pem = self.get_option('certificate_key_pem')
+ self._psrp_certificate_pem = self.get_option('certificate_pem')
+ self._psrp_credssp_auth_mechanism = self.get_option('credssp_auth_mechanism')
+ self._psrp_credssp_disable_tlsv1_2 = self.get_option('credssp_disable_tlsv1_2')
+ self._psrp_credssp_minimum_version = self.get_option('credssp_minimum_version')
+ self._psrp_negotiate_send_cbt = self.get_option('negotiate_send_cbt')
+ self._psrp_negotiate_delegate = self.get_option('negotiate_delegate')
+ self._psrp_negotiate_hostname_override = self.get_option('negotiate_hostname_override')
+ self._psrp_negotiate_service = self.get_option('negotiate_service')
+
+ supported_args = []
+ for auth_kwarg in AUTH_KWARGS.values():
+ supported_args.extend(auth_kwarg)
+ extra_args = set([v.replace('ansible_psrp_', '') for v in
+ self.get_option('_extras')])
+ unsupported_args = extra_args.difference(supported_args)
+
+ for arg in unsupported_args:
+ display.warning("ansible_psrp_%s is unsupported by the current "
+ "psrp version installed" % arg)
+
+ self._psrp_conn_kwargs = dict(
+ server=self._psrp_host, port=self._psrp_port,
+ username=self._psrp_user, password=self._psrp_pass,
+ ssl=self._psrp_protocol == 'https', path=self._psrp_path,
+ auth=self._psrp_auth, cert_validation=self._psrp_cert_validation,
+ connection_timeout=self._psrp_connection_timeout,
+ encryption=self._psrp_message_encryption, proxy=self._psrp_proxy,
+ no_proxy=self._psrp_ignore_proxy,
+ max_envelope_size=self._psrp_max_envelope_size,
+ operation_timeout=self._psrp_operation_timeout,
+ certificate_key_pem=self._psrp_certificate_key_pem,
+ certificate_pem=self._psrp_certificate_pem,
+ credssp_auth_mechanism=self._psrp_credssp_auth_mechanism,
+ credssp_disable_tlsv1_2=self._psrp_credssp_disable_tlsv1_2,
+ credssp_minimum_version=self._psrp_credssp_minimum_version,
+ negotiate_send_cbt=self._psrp_negotiate_send_cbt,
+ negotiate_delegate=self._psrp_negotiate_delegate,
+ negotiate_hostname_override=self._psrp_negotiate_hostname_override,
+ negotiate_service=self._psrp_negotiate_service,
+ )
+
+ # Check if PSRP version supports newer read_timeout argument (needs pypsrp 0.3.0+)
+ if hasattr(pypsrp, 'FEATURES') and 'wsman_read_timeout' in pypsrp.FEATURES:
+ self._psrp_conn_kwargs['read_timeout'] = self._psrp_read_timeout
+ elif self._psrp_read_timeout is not None:
+ display.warning("ansible_psrp_read_timeout is unsupported by the current psrp version installed, "
+ "using ansible_psrp_connection_timeout value for read_timeout instead.")
+
+ # Check if PSRP version supports newer reconnection_retries argument (needs pypsrp 0.3.0+)
+ if hasattr(pypsrp, 'FEATURES') and 'wsman_reconnections' in pypsrp.FEATURES:
+ self._psrp_conn_kwargs['reconnection_retries'] = self._psrp_reconnection_retries
+ self._psrp_conn_kwargs['reconnection_backoff'] = self._psrp_reconnection_backoff
+ else:
+ if self._psrp_reconnection_retries is not None:
+ display.warning("ansible_psrp_reconnection_retries is unsupported by the current psrp version installed.")
+ if self._psrp_reconnection_backoff is not None:
+ display.warning("ansible_psrp_reconnection_backoff is unsupported by the current psrp version installed.")
+
+ # add in the extra args that were set
+ for arg in extra_args.intersection(supported_args):
+ option = self.get_option('_extras')['ansible_psrp_%s' % arg]
+ self._psrp_conn_kwargs[arg] = option
+
+ def _exec_psrp_script(self, script, input_data=None, use_local_scope=True, force_stop=False, arguments=None):
+ ps = PowerShell(self.runspace)
+ ps.add_script(script, use_local_scope=use_local_scope)
+ if arguments:
+ for arg in arguments:
+ ps.add_argument(arg)
+
+ ps.invoke(input=input_data)
+
+ rc, stdout, stderr = self._parse_pipeline_result(ps)
+
+ if force_stop:
+ # This is usually not needed because we close the Runspace after our exec and we skip the call to close the
+ # pipeline manually to save on some time. Set to True when running multiple exec calls in the same runspace.
+
+ # Current pypsrp versions raise an exception if the current state was not RUNNING. We manually set it so we
+ # can call stop without any issues.
+ ps.state = PSInvocationState.RUNNING
+ ps.stop()
+
+ return rc, stdout, stderr
+
+ def _parse_pipeline_result(self, pipeline):
+ """
+ PSRP doesn't have the same concept as other protocols with its output.
+ We need some extra logic to convert the pipeline streams and host
+ output into the format that Ansible understands.
+
+ :param pipeline: The finished PowerShell pipeline that invoked our
+ commands
+ :return: rc, stdout, stderr based on the pipeline output
+ """
+ # we try and get the rc from our host implementation, this is set if
+ # exit or $host.SetShouldExit() is called in our pipeline, if not we
+ # set to 0 if the pipeline had not errors and 1 if it did
+ rc = self.host.rc or (1 if pipeline.had_errors else 0)
+
+ # TODO: figure out a better way of merging this with the host output
+ stdout_list = []
+ for output in pipeline.output:
+ # Not all pipeline outputs are a string or contain a __str__ value,
+ # we will create our own output based on the properties of the
+ # complex object if that is the case.
+ if isinstance(output, GenericComplexObject) and output.to_string is None:
+ obj_lines = output.property_sets
+ for key, value in output.adapted_properties.items():
+ obj_lines.append(u"%s: %s" % (key, value))
+ for key, value in output.extended_properties.items():
+ obj_lines.append(u"%s: %s" % (key, value))
+ output_msg = u"\n".join(obj_lines)
+ else:
+ output_msg = to_text(output, nonstring='simplerepr')
+
+ stdout_list.append(output_msg)
+
+ if len(self.host.ui.stdout) > 0:
+ stdout_list += self.host.ui.stdout
+ stdout = u"\r\n".join(stdout_list)
+
+ stderr_list = []
+ for error in pipeline.streams.error:
+ # the error record is not as fully fleshed out like we usually get
+ # in PS, we will manually create it here
+ command_name = "%s : " % error.command_name if error.command_name else ''
+ position = "%s\r\n" % error.invocation_position_message if error.invocation_position_message else ''
+ error_msg = "%s%s\r\n%s" \
+ " + CategoryInfo : %s\r\n" \
+ " + FullyQualifiedErrorId : %s" \
+ % (command_name, str(error), position,
+ error.message, error.fq_error)
+ stacktrace = error.script_stacktrace
+ if self._play_context.verbosity >= 3 and stacktrace is not None:
+ error_msg += "\r\nStackTrace:\r\n%s" % stacktrace
+ stderr_list.append(error_msg)
+
+ if len(self.host.ui.stderr) > 0:
+ stderr_list += self.host.ui.stderr
+ stderr = u"\r\n".join([to_text(o) for o in stderr_list])
+
+ display.vvvvv("PSRP RC: %d" % rc, host=self._psrp_host)
+ display.vvvvv("PSRP STDOUT: %s" % stdout, host=self._psrp_host)
+ display.vvvvv("PSRP STDERR: %s" % stderr, host=self._psrp_host)
+
+ # reset the host back output back to defaults, needed if running
+ # multiple pipelines on the same RunspacePool
+ self.host.rc = 0
+ self.host.ui.stdout = []
+ self.host.ui.stderr = []
+
+ return rc, to_bytes(stdout, encoding='utf-8'), to_bytes(stderr, encoding='utf-8')
diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py
new file mode 100644
index 00000000..ed44a035
--- /dev/null
+++ b/lib/ansible/plugins/connection/ssh.py
@@ -0,0 +1,1285 @@
+# Copyright (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright 2015 Abhijit Menon-Sen <ams@2ndQuadrant.com>
+# Copyright 2017 Toshio Kuratomi <tkuratomi@ansible.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ connection: ssh
+ short_description: connect via ssh client binary
+ description:
+ - This connection plugin allows ansible to communicate to the target machines via normal ssh command line.
+ - Ansible does not expose a channel to allow communication between the user and the ssh process to accept
+ a password manually to decrypt an ssh key when using this connection plugin (which is the default). The
+ use of ``ssh-agent`` is highly recommended.
+ author: ansible (@core)
+ version_added: historical
+ options:
+ host:
+ description: Hostname/ip to connect to.
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ - name: ansible_ssh_host
+ host_key_checking:
+ description: Determines if ssh should check host keys
+ type: boolean
+ ini:
+ - section: defaults
+ key: 'host_key_checking'
+ - section: ssh_connection
+ key: 'host_key_checking'
+ version_added: '2.5'
+ env:
+ - name: ANSIBLE_HOST_KEY_CHECKING
+ - name: ANSIBLE_SSH_HOST_KEY_CHECKING
+ version_added: '2.5'
+ vars:
+ - name: ansible_host_key_checking
+ version_added: '2.5'
+ - name: ansible_ssh_host_key_checking
+ version_added: '2.5'
+ password:
+ description: Authentication password for the C(remote_user). Can be supplied as CLI option.
+ vars:
+ - name: ansible_password
+ - name: ansible_ssh_pass
+ - name: ansible_ssh_password
+ sshpass_prompt:
+ description: Password prompt that sshpass should search for. Supported by sshpass 1.06 and up.
+ default: ''
+ ini:
+ - section: 'ssh_connection'
+ key: 'sshpass_prompt'
+ env:
+ - name: ANSIBLE_SSHPASS_PROMPT
+ vars:
+ - name: ansible_sshpass_prompt
+ version_added: '2.10'
+ ssh_args:
+ description: Arguments to pass to all ssh cli tools
+ default: '-C -o ControlMaster=auto -o ControlPersist=60s'
+ ini:
+ - section: 'ssh_connection'
+ key: 'ssh_args'
+ env:
+ - name: ANSIBLE_SSH_ARGS
+ vars:
+ - name: ansible_ssh_args
+ version_added: '2.7'
+ ssh_common_args:
+ description: Common extra args for all ssh CLI tools
+ ini:
+ - section: 'ssh_connection'
+ key: 'ssh_common_args'
+ version_added: '2.7'
+ env:
+ - name: ANSIBLE_SSH_COMMON_ARGS
+ version_added: '2.7'
+ vars:
+ - name: ansible_ssh_common_args
+ ssh_executable:
+ default: ssh
+ description:
+ - This defines the location of the ssh binary. It defaults to ``ssh`` which will use the first ssh binary available in $PATH.
+ - This option is usually not required, it might be useful when access to system ssh is restricted,
+ or when using ssh wrappers to connect to remote hosts.
+ env: [{name: ANSIBLE_SSH_EXECUTABLE}]
+ ini:
+ - {key: ssh_executable, section: ssh_connection}
+ #const: ANSIBLE_SSH_EXECUTABLE
+ version_added: "2.2"
+ vars:
+ - name: ansible_ssh_executable
+ version_added: '2.7'
+ sftp_executable:
+ default: sftp
+ description:
+ - This defines the location of the sftp binary. It defaults to ``sftp`` which will use the first binary available in $PATH.
+ env: [{name: ANSIBLE_SFTP_EXECUTABLE}]
+ ini:
+ - {key: sftp_executable, section: ssh_connection}
+ version_added: "2.6"
+ vars:
+ - name: ansible_sftp_executable
+ version_added: '2.7'
+ scp_executable:
+ default: scp
+ description:
+ - This defines the location of the scp binary. It defaults to `scp` which will use the first binary available in $PATH.
+ env: [{name: ANSIBLE_SCP_EXECUTABLE}]
+ ini:
+ - {key: scp_executable, section: ssh_connection}
+ version_added: "2.6"
+ vars:
+ - name: ansible_scp_executable
+ version_added: '2.7'
+ scp_extra_args:
+ description: Extra exclusive to the ``scp`` CLI
+ vars:
+ - name: ansible_scp_extra_args
+ env:
+ - name: ANSIBLE_SCP_EXTRA_ARGS
+ version_added: '2.7'
+ ini:
+ - key: scp_extra_args
+ section: ssh_connection
+ version_added: '2.7'
+ sftp_extra_args:
+ description: Extra exclusive to the ``sftp`` CLI
+ vars:
+ - name: ansible_sftp_extra_args
+ env:
+ - name: ANSIBLE_SFTP_EXTRA_ARGS
+ version_added: '2.7'
+ ini:
+ - key: sftp_extra_args
+ section: ssh_connection
+ version_added: '2.7'
+ ssh_extra_args:
+ description: Extra exclusive to the 'ssh' CLI
+ vars:
+ - name: ansible_ssh_extra_args
+ env:
+ - name: ANSIBLE_SSH_EXTRA_ARGS
+ version_added: '2.7'
+ ini:
+ - key: ssh_extra_args
+ section: ssh_connection
+ version_added: '2.7'
+ retries:
+ # constant: ANSIBLE_SSH_RETRIES
+ description: Number of attempts to connect.
+ default: 3
+ type: integer
+ env:
+ - name: ANSIBLE_SSH_RETRIES
+ ini:
+ - section: connection
+ key: retries
+ - section: ssh_connection
+ key: retries
+ vars:
+ - name: ansible_ssh_retries
+ version_added: '2.7'
+ port:
+ description: Remote port to connect to.
+ type: int
+ default: 22
+ ini:
+ - section: defaults
+ key: remote_port
+ env:
+ - name: ANSIBLE_REMOTE_PORT
+ vars:
+ - name: ansible_port
+ - name: ansible_ssh_port
+ remote_user:
+ description:
+ - User name with which to login to the remote server, normally set by the remote_user keyword.
+ - If no user is supplied, Ansible will let the ssh client binary choose the user as it normally
+ ini:
+ - section: defaults
+ key: remote_user
+ env:
+ - name: ANSIBLE_REMOTE_USER
+ vars:
+ - name: ansible_user
+ - name: ansible_ssh_user
+ pipelining:
+ default: ANSIBLE_PIPELINING
+ description:
+ - Pipelining reduces the number of SSH operations required to execute a module on the remote server,
+ by executing many Ansible modules without actual file transfer.
+ - This can result in a very significant performance improvement when enabled.
+ - However this conflicts with privilege escalation (become).
+ For example, when using sudo operations you must first disable 'requiretty' in the sudoers file for the target hosts,
+ which is why this feature is disabled by default.
+ env:
+ - name: ANSIBLE_PIPELINING
+ - name: ANSIBLE_SSH_PIPELINING
+ ini:
+ - section: defaults
+ key: pipelining
+ - section: ssh_connection
+ key: pipelining
+ type: boolean
+ vars:
+ - name: ansible_pipelining
+ - name: ansible_ssh_pipelining
+ private_key_file:
+ description:
+ - Path to private key file to use for authentication
+ ini:
+ - section: defaults
+ key: private_key_file
+ env:
+ - name: ANSIBLE_PRIVATE_KEY_FILE
+ vars:
+ - name: ansible_private_key_file
+ - name: ansible_ssh_private_key_file
+
+ control_path:
+ description:
+ - This is the location to save ssh's ControlPath sockets, it uses ssh's variable substitution.
+ - Since 2.3, if null, ansible will generate a unique hash. Use `%(directory)s` to indicate where to use the control dir path setting.
+ env:
+ - name: ANSIBLE_SSH_CONTROL_PATH
+ ini:
+ - key: control_path
+ section: ssh_connection
+ vars:
+ - name: ansible_control_path
+ version_added: '2.7'
+ control_path_dir:
+ default: ~/.ansible/cp
+ description:
+ - This sets the directory to use for ssh control path if the control path setting is null.
+ - Also, provides the `%(directory)s` variable for the control path setting.
+ env:
+ - name: ANSIBLE_SSH_CONTROL_PATH_DIR
+ ini:
+ - section: ssh_connection
+ key: control_path_dir
+ vars:
+ - name: ansible_control_path_dir
+ version_added: '2.7'
+ sftp_batch_mode:
+ default: 'yes'
+ description: 'TODO: write it'
+ env: [{name: ANSIBLE_SFTP_BATCH_MODE}]
+ ini:
+ - {key: sftp_batch_mode, section: ssh_connection}
+ type: bool
+ vars:
+ - name: ansible_sftp_batch_mode
+ version_added: '2.7'
+ scp_if_ssh:
+ default: smart
+ description:
+ - "Preferred method to use when transfering files over ssh"
+ - When set to smart, Ansible will try them until one succeeds or they all fail
+ - If set to True, it will force 'scp', if False it will use 'sftp'
+ env: [{name: ANSIBLE_SCP_IF_SSH}]
+ ini:
+ - {key: scp_if_ssh, section: ssh_connection}
+ vars:
+ - name: ansible_scp_if_ssh
+ version_added: '2.7'
+ use_tty:
+ version_added: '2.5'
+ default: 'yes'
+ description: add -tt to ssh commands to force tty allocation
+ env: [{name: ANSIBLE_SSH_USETTY}]
+ ini:
+ - {key: usetty, section: ssh_connection}
+ type: bool
+ vars:
+ - name: ansible_ssh_use_tty
+ version_added: '2.7'
+'''
+
+import errno
+import fcntl
+import hashlib
+import os
+import pty
+import re
+import subprocess
+import time
+
+from functools import wraps
+from ansible import constants as C
+from ansible.errors import (
+ AnsibleAuthenticationFailure,
+ AnsibleConnectionFailure,
+ AnsibleError,
+ AnsibleFileNotFound,
+)
+from ansible.errors import AnsibleOptionsError
+from ansible.module_utils.compat import selectors
+from ansible.module_utils.six import PY3, text_type, binary_type
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.parsing.convert_bool import BOOLEANS, boolean
+from ansible.plugins.connection import ConnectionBase, BUFSIZE
+from ansible.plugins.shell.powershell import _parse_clixml
+from ansible.utils.display import Display
+from ansible.utils.path import unfrackpath, makedirs_safe
+
+display = Display()
+
+
+b_NOT_SSH_ERRORS = (b'Traceback (most recent call last):', # Python-2.6 when there's an exception
+ # while invoking a script via -m
+ b'PHP Parse error:', # Php always returns error 255
+ )
+
+SSHPASS_AVAILABLE = None
+
+
+class AnsibleControlPersistBrokenPipeError(AnsibleError):
+ ''' ControlPersist broken pipe '''
+ pass
+
+
+def _handle_error(remaining_retries, command, return_tuple, no_log, host, display=display):
+
+ # sshpass errors
+ if command == b'sshpass':
+ # Error 5 is invalid/incorrect password. Raise an exception to prevent retries from locking the account.
+ if return_tuple[0] == 5:
+ msg = 'Invalid/incorrect username/password. Skipping remaining {0} retries to prevent account lockout:'.format(remaining_retries)
+ if remaining_retries <= 0:
+ msg = 'Invalid/incorrect password:'
+ if no_log:
+ msg = '{0} <error censored due to no log>'.format(msg)
+ else:
+ msg = '{0} {1}'.format(msg, to_native(return_tuple[2]).rstrip())
+ raise AnsibleAuthenticationFailure(msg)
+
+ # sshpass returns codes are 1-6. We handle 5 previously, so this catches other scenarios.
+ # No exception is raised, so the connection is retried - except when attempting to use
+ # sshpass_prompt with an sshpass that won't let us pass -P, in which case we fail loudly.
+ elif return_tuple[0] in [1, 2, 3, 4, 6]:
+ msg = 'sshpass error:'
+ if no_log:
+ msg = '{0} <error censored due to no log>'.format(msg)
+ else:
+ details = to_native(return_tuple[2]).rstrip()
+ if "sshpass: invalid option -- 'P'" in details:
+ details = 'Installed sshpass version does not support customized password prompts. ' \
+ 'Upgrade sshpass to use sshpass_prompt, or otherwise switch to ssh keys.'
+ raise AnsibleError('{0} {1}'.format(msg, details))
+ msg = '{0} {1}'.format(msg, details)
+
+ if return_tuple[0] == 255:
+ SSH_ERROR = True
+ for signature in b_NOT_SSH_ERRORS:
+ if signature in return_tuple[1]:
+ SSH_ERROR = False
+ break
+
+ if SSH_ERROR:
+ msg = "Failed to connect to the host via ssh:"
+ if no_log:
+ msg = '{0} <error censored due to no log>'.format(msg)
+ else:
+ msg = '{0} {1}'.format(msg, to_native(return_tuple[2]).rstrip())
+ raise AnsibleConnectionFailure(msg)
+
+ # For other errors, no exception is raised so the connection is retried and we only log the messages
+ if 1 <= return_tuple[0] <= 254:
+ msg = u"Failed to connect to the host via ssh:"
+ if no_log:
+ msg = u'{0} <error censored due to no log>'.format(msg)
+ else:
+ msg = u'{0} {1}'.format(msg, to_text(return_tuple[2]).rstrip())
+ display.vvv(msg, host=host)
+
+
+def _ssh_retry(func):
+ """
+ Decorator to retry ssh/scp/sftp in the case of a connection failure
+
+ Will retry if:
+ * an exception is caught
+ * ssh returns 255
+ Will not retry if
+ * sshpass returns 5 (invalid password, to prevent account lockouts)
+ * remaining_tries is < 2
+ * retries limit reached
+ """
+ @wraps(func)
+ def wrapped(self, *args, **kwargs):
+ remaining_tries = int(C.ANSIBLE_SSH_RETRIES) + 1
+ cmd_summary = u"%s..." % to_text(args[0])
+ conn_password = self.get_option('password') or self._play_context.password
+ for attempt in range(remaining_tries):
+ cmd = args[0]
+ if attempt != 0 and conn_password and isinstance(cmd, list):
+ # If this is a retry, the fd/pipe for sshpass is closed, and we need a new one
+ self.sshpass_pipe = os.pipe()
+ cmd[1] = b'-d' + to_bytes(self.sshpass_pipe[0], nonstring='simplerepr', errors='surrogate_or_strict')
+
+ try:
+ try:
+ return_tuple = func(self, *args, **kwargs)
+ if self._play_context.no_log:
+ display.vvv(u'rc=%s, stdout and stderr censored due to no log' % return_tuple[0], host=self.host)
+ else:
+ display.vvv(return_tuple, host=self.host)
+ # 0 = success
+ # 1-254 = remote command return code
+ # 255 could be a failure from the ssh command itself
+ except (AnsibleControlPersistBrokenPipeError):
+ # Retry one more time because of the ControlPersist broken pipe (see #16731)
+ cmd = args[0]
+ if conn_password and isinstance(cmd, list):
+ # This is a retry, so the fd/pipe for sshpass is closed, and we need a new one
+ self.sshpass_pipe = os.pipe()
+ cmd[1] = b'-d' + to_bytes(self.sshpass_pipe[0], nonstring='simplerepr', errors='surrogate_or_strict')
+ display.vvv(u"RETRYING BECAUSE OF CONTROLPERSIST BROKEN PIPE")
+ return_tuple = func(self, *args, **kwargs)
+
+ remaining_retries = remaining_tries - attempt - 1
+ _handle_error(remaining_retries, cmd[0], return_tuple, self._play_context.no_log, self.host)
+
+ break
+
+ # 5 = Invalid/incorrect password from sshpass
+ except AnsibleAuthenticationFailure:
+ # Raising this exception, which is subclassed from AnsibleConnectionFailure, prevents further retries
+ raise
+
+ except (AnsibleConnectionFailure, Exception) as e:
+
+ if attempt == remaining_tries - 1:
+ raise
+ else:
+ pause = 2 ** attempt - 1
+ if pause > 30:
+ pause = 30
+
+ if isinstance(e, AnsibleConnectionFailure):
+ msg = u"ssh_retry: attempt: %d, ssh return code is 255. cmd (%s), pausing for %d seconds" % (attempt + 1, cmd_summary, pause)
+ else:
+ msg = (u"ssh_retry: attempt: %d, caught exception(%s) from cmd (%s), "
+ u"pausing for %d seconds" % (attempt + 1, to_text(e), cmd_summary, pause))
+
+ display.vv(msg, host=self.host)
+
+ time.sleep(pause)
+ continue
+
+ return return_tuple
+ return wrapped
+
+
+class Connection(ConnectionBase):
+ ''' ssh based connections '''
+
+ transport = 'ssh'
+ has_pipelining = True
+
+ def __init__(self, *args, **kwargs):
+ super(Connection, self).__init__(*args, **kwargs)
+
+ self.host = self._play_context.remote_addr
+ self.port = self._play_context.port
+ self.user = self._play_context.remote_user
+ self.control_path = C.ANSIBLE_SSH_CONTROL_PATH
+ self.control_path_dir = C.ANSIBLE_SSH_CONTROL_PATH_DIR
+
+ # Windows operates differently from a POSIX connection/shell plugin,
+ # we need to set various properties to ensure SSH on Windows continues
+ # to work
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ self.has_native_async = True
+ self.always_pipeline_modules = True
+ self.module_implementation_preferences = ('.ps1', '.exe', '')
+ self.allow_executable = False
+
+ # The connection is created by running ssh/scp/sftp from the exec_command,
+ # put_file, and fetch_file methods, so we don't need to do any connection
+ # management here.
+
+ def _connect(self):
+ return self
+
+ @staticmethod
+ def _create_control_path(host, port, user, connection=None, pid=None):
+ '''Make a hash for the controlpath based on con attributes'''
+ pstring = '%s-%s-%s' % (host, port, user)
+ if connection:
+ pstring += '-%s' % connection
+ if pid:
+ pstring += '-%s' % to_text(pid)
+ m = hashlib.sha1()
+ m.update(to_bytes(pstring))
+ digest = m.hexdigest()
+ cpath = '%(directory)s/' + digest[:10]
+ return cpath
+
+ @staticmethod
+ def _sshpass_available():
+ global SSHPASS_AVAILABLE
+
+ # We test once if sshpass is available, and remember the result. It
+ # would be nice to use distutils.spawn.find_executable for this, but
+ # distutils isn't always available; shutils.which() is Python3-only.
+
+ if SSHPASS_AVAILABLE is None:
+ try:
+ p = subprocess.Popen(["sshpass"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p.communicate()
+ SSHPASS_AVAILABLE = True
+ except OSError:
+ SSHPASS_AVAILABLE = False
+
+ return SSHPASS_AVAILABLE
+
+ @staticmethod
+ def _persistence_controls(b_command):
+ '''
+ Takes a command array and scans it for ControlPersist and ControlPath
+ settings and returns two booleans indicating whether either was found.
+ This could be smarter, e.g. returning false if ControlPersist is 'no',
+ but for now we do it simple way.
+ '''
+
+ controlpersist = False
+ controlpath = False
+
+ for b_arg in (a.lower() for a in b_command):
+ if b'controlpersist' in b_arg:
+ controlpersist = True
+ elif b'controlpath' in b_arg:
+ controlpath = True
+
+ return controlpersist, controlpath
+
+ def _add_args(self, b_command, b_args, explanation):
+ """
+ Adds arguments to the ssh command and displays a caller-supplied explanation of why.
+
+ :arg b_command: A list containing the command to add the new arguments to.
+ This list will be modified by this method.
+ :arg b_args: An iterable of new arguments to add. This iterable is used
+ more than once so it must be persistent (ie: a list is okay but a
+ StringIO would not)
+ :arg explanation: A text string containing explaining why the arguments
+ were added. It will be displayed with a high enough verbosity.
+ .. note:: This function does its work via side-effect. The b_command list has the new arguments appended.
+ """
+ display.vvvvv(u'SSH: %s: (%s)' % (explanation, ')('.join(to_text(a) for a in b_args)), host=self._play_context.remote_addr)
+ b_command += b_args
+
+ def _build_command(self, binary, *other_args):
+ '''
+ Takes a binary (ssh, scp, sftp) and optional extra arguments and returns
+ a command line as an array that can be passed to subprocess.Popen.
+ '''
+
+ b_command = []
+ conn_password = self.get_option('password') or self._play_context.password
+
+ #
+ # First, the command to invoke
+ #
+
+ # If we want to use password authentication, we have to set up a pipe to
+ # write the password to sshpass.
+
+ if conn_password:
+ if not self._sshpass_available():
+ raise AnsibleError("to use the 'ssh' connection type with passwords, you must install the sshpass program")
+
+ self.sshpass_pipe = os.pipe()
+ b_command += [b'sshpass', b'-d' + to_bytes(self.sshpass_pipe[0], nonstring='simplerepr', errors='surrogate_or_strict')]
+
+ password_prompt = self.get_option('sshpass_prompt')
+ if password_prompt:
+ b_command += [b'-P', to_bytes(password_prompt, errors='surrogate_or_strict')]
+
+ if binary == 'ssh':
+ b_command += [to_bytes(self._play_context.ssh_executable, errors='surrogate_or_strict')]
+ else:
+ b_command += [to_bytes(binary, errors='surrogate_or_strict')]
+
+ #
+ # Next, additional arguments based on the configuration.
+ #
+
+ # sftp batch mode allows us to correctly catch failed transfers, but can
+ # be disabled if the client side doesn't support the option. However,
+ # sftp batch mode does not prompt for passwords so it must be disabled
+ # if not using controlpersist and using sshpass
+ if binary == 'sftp' and C.DEFAULT_SFTP_BATCH_MODE:
+ if conn_password:
+ b_args = [b'-o', b'BatchMode=no']
+ self._add_args(b_command, b_args, u'disable batch mode for sshpass')
+ b_command += [b'-b', b'-']
+
+ if self._play_context.verbosity > 3:
+ b_command.append(b'-vvv')
+
+ #
+ # Next, we add [ssh_connection]ssh_args from ansible.cfg.
+ #
+
+ ssh_args = self.get_option('ssh_args')
+ if ssh_args:
+ b_args = [to_bytes(a, errors='surrogate_or_strict') for a in
+ self._split_ssh_args(ssh_args)]
+ self._add_args(b_command, b_args, u"ansible.cfg set ssh_args")
+
+ # Now we add various arguments controlled by configuration file settings
+ # (e.g. host_key_checking) or inventory variables (ansible_ssh_port) or
+ # a combination thereof.
+
+ if not C.HOST_KEY_CHECKING:
+ b_args = (b"-o", b"StrictHostKeyChecking=no")
+ self._add_args(b_command, b_args, u"ANSIBLE_HOST_KEY_CHECKING/host_key_checking disabled")
+
+ if self._play_context.port is not None:
+ b_args = (b"-o", b"Port=" + to_bytes(self._play_context.port, nonstring='simplerepr', errors='surrogate_or_strict'))
+ self._add_args(b_command, b_args, u"ANSIBLE_REMOTE_PORT/remote_port/ansible_port set")
+
+ key = self._play_context.private_key_file
+ if key:
+ b_args = (b"-o", b'IdentityFile="' + to_bytes(os.path.expanduser(key), errors='surrogate_or_strict') + b'"')
+ self._add_args(b_command, b_args, u"ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set")
+
+ if not conn_password:
+ self._add_args(
+ b_command, (
+ b"-o", b"KbdInteractiveAuthentication=no",
+ b"-o", b"PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
+ b"-o", b"PasswordAuthentication=no"
+ ),
+ u"ansible_password/ansible_ssh_password not set"
+ )
+
+ user = self._play_context.remote_user
+ if user:
+ self._add_args(
+ b_command,
+ (b"-o", b'User="%s"' % to_bytes(self._play_context.remote_user, errors='surrogate_or_strict')),
+ u"ANSIBLE_REMOTE_USER/remote_user/ansible_user/user/-u set"
+ )
+
+ self._add_args(
+ b_command,
+ (b"-o", b"ConnectTimeout=" + to_bytes(self._play_context.timeout, errors='surrogate_or_strict', nonstring='simplerepr')),
+ u"ANSIBLE_TIMEOUT/timeout set"
+ )
+
+ # Add in any common or binary-specific arguments from the PlayContext
+ # (i.e. inventory or task settings or overrides on the command line).
+
+ for opt in (u'ssh_common_args', u'{0}_extra_args'.format(binary)):
+ attr = getattr(self._play_context, opt, None)
+ if attr is not None:
+ b_args = [to_bytes(a, errors='surrogate_or_strict') for a in self._split_ssh_args(attr)]
+ self._add_args(b_command, b_args, u"PlayContext set %s" % opt)
+
+ # Check if ControlPersist is enabled and add a ControlPath if one hasn't
+ # already been set.
+
+ controlpersist, controlpath = self._persistence_controls(b_command)
+
+ if controlpersist:
+ self._persistent = True
+
+ if not controlpath:
+ cpdir = unfrackpath(self.control_path_dir)
+ b_cpdir = to_bytes(cpdir, errors='surrogate_or_strict')
+
+ # The directory must exist and be writable.
+ makedirs_safe(b_cpdir, 0o700)
+ if not os.access(b_cpdir, os.W_OK):
+ raise AnsibleError("Cannot write to ControlPath %s" % to_native(cpdir))
+
+ if not self.control_path:
+ self.control_path = self._create_control_path(
+ self.host,
+ self.port,
+ self.user
+ )
+ b_args = (b"-o", b"ControlPath=" + to_bytes(self.control_path % dict(directory=cpdir), errors='surrogate_or_strict'))
+ self._add_args(b_command, b_args, u"found only ControlPersist; added ControlPath")
+
+ # Finally, we add any caller-supplied extras.
+ if other_args:
+ b_command += [to_bytes(a) for a in other_args]
+
+ return b_command
+
+ def _send_initial_data(self, fh, in_data, ssh_process):
+ '''
+ Writes initial data to the stdin filehandle of the subprocess and closes
+ it. (The handle must be closed; otherwise, for example, "sftp -b -" will
+ just hang forever waiting for more commands.)
+ '''
+
+ display.debug(u'Sending initial data')
+
+ try:
+ fh.write(to_bytes(in_data))
+ fh.close()
+ except (OSError, IOError) as e:
+ # The ssh connection may have already terminated at this point, with a more useful error
+ # Only raise AnsibleConnectionFailure if the ssh process is still alive
+ time.sleep(0.001)
+ ssh_process.poll()
+ if getattr(ssh_process, 'returncode', None) is None:
+ raise AnsibleConnectionFailure(
+ 'Data could not be sent to remote host "%s". Make sure this host can be reached '
+ 'over ssh: %s' % (self.host, to_native(e)), orig_exc=e
+ )
+
+ display.debug(u'Sent initial data (%d bytes)' % len(in_data))
+
+ # Used by _run() to kill processes on failures
+ @staticmethod
+ def _terminate_process(p):
+ """ Terminate a process, ignoring errors """
+ try:
+ p.terminate()
+ except (OSError, IOError):
+ pass
+
+ # This is separate from _run() because we need to do the same thing for stdout
+ # and stderr.
+ def _examine_output(self, source, state, b_chunk, sudoable):
+ '''
+ Takes a string, extracts complete lines from it, tests to see if they
+ are a prompt, error message, etc., and sets appropriate flags in self.
+ Prompt and success lines are removed.
+
+ Returns the processed (i.e. possibly-edited) output and the unprocessed
+ remainder (to be processed with the next chunk) as strings.
+ '''
+
+ output = []
+ for b_line in b_chunk.splitlines(True):
+ display_line = to_text(b_line).rstrip('\r\n')
+ suppress_output = False
+
+ # display.debug("Examining line (source=%s, state=%s): '%s'" % (source, state, display_line))
+ if self.become.expect_prompt() and self.become.check_password_prompt(b_line):
+ display.debug(u"become_prompt: (source=%s, state=%s): '%s'" % (source, state, display_line))
+ self._flags['become_prompt'] = True
+ suppress_output = True
+ elif self.become.success and self.become.check_success(b_line):
+ display.debug(u"become_success: (source=%s, state=%s): '%s'" % (source, state, display_line))
+ self._flags['become_success'] = True
+ suppress_output = True
+ elif sudoable and self.become.check_incorrect_password(b_line):
+ display.debug(u"become_error: (source=%s, state=%s): '%s'" % (source, state, display_line))
+ self._flags['become_error'] = True
+ elif sudoable and self.become.check_missing_password(b_line):
+ display.debug(u"become_nopasswd_error: (source=%s, state=%s): '%s'" % (source, state, display_line))
+ self._flags['become_nopasswd_error'] = True
+
+ if not suppress_output:
+ output.append(b_line)
+
+ # The chunk we read was most likely a series of complete lines, but just
+ # in case the last line was incomplete (and not a prompt, which we would
+ # have removed from the output), we retain it to be processed with the
+ # next chunk.
+
+ remainder = b''
+ if output and not output[-1].endswith(b'\n'):
+ remainder = output[-1]
+ output = output[:-1]
+
+ return b''.join(output), remainder
+
+ def _bare_run(self, cmd, in_data, sudoable=True, checkrc=True):
+ '''
+ Starts the command and communicates with it until it ends.
+ '''
+
+ # We don't use _shell.quote as this is run on the controller and independent from the shell plugin chosen
+ display_cmd = u' '.join(shlex_quote(to_text(c)) for c in cmd)
+ display.vvv(u'SSH: EXEC {0}'.format(display_cmd), host=self.host)
+
+ # Start the given command. If we don't need to pipeline data, we can try
+ # to use a pseudo-tty (ssh will have been invoked with -tt). If we are
+ # pipelining data, or can't create a pty, we fall back to using plain
+ # old pipes.
+
+ p = None
+
+ if isinstance(cmd, (text_type, binary_type)):
+ cmd = to_bytes(cmd)
+ else:
+ cmd = list(map(to_bytes, cmd))
+
+ conn_password = self.get_option('password') or self._play_context.password
+
+ if not in_data:
+ try:
+ # Make sure stdin is a proper pty to avoid tcgetattr errors
+ master, slave = pty.openpty()
+ if PY3 and conn_password:
+ # pylint: disable=unexpected-keyword-arg
+ p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE, pass_fds=self.sshpass_pipe)
+ else:
+ p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdin = os.fdopen(master, 'wb', 0)
+ os.close(slave)
+ except (OSError, IOError):
+ p = None
+
+ if not p:
+ try:
+ if PY3 and conn_password:
+ # pylint: disable=unexpected-keyword-arg
+ p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, pass_fds=self.sshpass_pipe)
+ else:
+ p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ stdin = p.stdin
+ except (OSError, IOError) as e:
+ raise AnsibleError('Unable to execute ssh command line on a controller due to: %s' % to_native(e))
+
+ # If we are using SSH password authentication, write the password into
+ # the pipe we opened in _build_command.
+
+ if conn_password:
+ os.close(self.sshpass_pipe[0])
+ try:
+ os.write(self.sshpass_pipe[1], to_bytes(conn_password) + b'\n')
+ except OSError as e:
+ # Ignore broken pipe errors if the sshpass process has exited.
+ if e.errno != errno.EPIPE or p.poll() is None:
+ raise
+ os.close(self.sshpass_pipe[1])
+
+ #
+ # SSH state machine
+ #
+
+ # Now we read and accumulate output from the running process until it
+ # exits. Depending on the circumstances, we may also need to write an
+ # escalation password and/or pipelined input to the process.
+
+ states = [
+ 'awaiting_prompt', 'awaiting_escalation', 'ready_to_send', 'awaiting_exit'
+ ]
+
+ # Are we requesting privilege escalation? Right now, we may be invoked
+ # to execute sftp/scp with sudoable=True, but we can request escalation
+ # only when using ssh. Otherwise we can send initial data straightaway.
+
+ state = states.index('ready_to_send')
+ if to_bytes(self.get_option('ssh_executable')) in cmd and sudoable:
+ prompt = getattr(self.become, 'prompt', None)
+ if prompt:
+ # We're requesting escalation with a password, so we have to
+ # wait for a password prompt.
+ state = states.index('awaiting_prompt')
+ display.debug(u'Initial state: %s: %s' % (states[state], to_text(prompt)))
+ elif self.become and self.become.success:
+ # We're requesting escalation without a password, so we have to
+ # detect success/failure before sending any initial data.
+ state = states.index('awaiting_escalation')
+ display.debug(u'Initial state: %s: %s' % (states[state], to_text(self.become.success)))
+
+ # We store accumulated stdout and stderr output from the process here,
+ # but strip any privilege escalation prompt/confirmation lines first.
+ # Output is accumulated into tmp_*, complete lines are extracted into
+ # an array, then checked and removed or copied to stdout or stderr. We
+ # set any flags based on examining the output in self._flags.
+
+ b_stdout = b_stderr = b''
+ b_tmp_stdout = b_tmp_stderr = b''
+
+ self._flags = dict(
+ become_prompt=False, become_success=False,
+ become_error=False, become_nopasswd_error=False
+ )
+
+ # select timeout should be longer than the connect timeout, otherwise
+ # they will race each other when we can't connect, and the connect
+ # timeout usually fails
+ timeout = 2 + self._play_context.timeout
+ for fd in (p.stdout, p.stderr):
+ fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ # TODO: bcoca would like to use SelectSelector() when open
+ # filehandles is low, then switch to more efficient ones when higher.
+ # select is faster when filehandles is low.
+ selector = selectors.DefaultSelector()
+ selector.register(p.stdout, selectors.EVENT_READ)
+ selector.register(p.stderr, selectors.EVENT_READ)
+
+ # If we can send initial data without waiting for anything, we do so
+ # before we start polling
+ if states[state] == 'ready_to_send' and in_data:
+ self._send_initial_data(stdin, in_data, p)
+ state += 1
+
+ try:
+ while True:
+ poll = p.poll()
+ events = selector.select(timeout)
+
+ # We pay attention to timeouts only while negotiating a prompt.
+
+ if not events:
+ # We timed out
+ if state <= states.index('awaiting_escalation'):
+ # If the process has already exited, then it's not really a
+ # timeout; we'll let the normal error handling deal with it.
+ if poll is not None:
+ break
+ self._terminate_process(p)
+ raise AnsibleError('Timeout (%ds) waiting for privilege escalation prompt: %s' % (timeout, to_native(b_stdout)))
+
+ # Read whatever output is available on stdout and stderr, and stop
+ # listening to the pipe if it's been closed.
+
+ for key, event in events:
+ if key.fileobj == p.stdout:
+ b_chunk = p.stdout.read()
+ if b_chunk == b'':
+ # stdout has been closed, stop watching it
+ selector.unregister(p.stdout)
+ # When ssh has ControlMaster (+ControlPath/Persist) enabled, the
+ # first connection goes into the background and we never see EOF
+ # on stderr. If we see EOF on stdout, lower the select timeout
+ # to reduce the time wasted selecting on stderr if we observe
+ # that the process has not yet existed after this EOF. Otherwise
+ # we may spend a long timeout period waiting for an EOF that is
+ # not going to arrive until the persisted connection closes.
+ timeout = 1
+ b_tmp_stdout += b_chunk
+ display.debug(u"stdout chunk (state=%s):\n>>>%s<<<\n" % (state, to_text(b_chunk)))
+ elif key.fileobj == p.stderr:
+ b_chunk = p.stderr.read()
+ if b_chunk == b'':
+ # stderr has been closed, stop watching it
+ selector.unregister(p.stderr)
+ b_tmp_stderr += b_chunk
+ display.debug("stderr chunk (state=%s):\n>>>%s<<<\n" % (state, to_text(b_chunk)))
+
+ # We examine the output line-by-line until we have negotiated any
+ # privilege escalation prompt and subsequent success/error message.
+ # Afterwards, we can accumulate output without looking at it.
+
+ if state < states.index('ready_to_send'):
+ if b_tmp_stdout:
+ b_output, b_unprocessed = self._examine_output('stdout', states[state], b_tmp_stdout, sudoable)
+ b_stdout += b_output
+ b_tmp_stdout = b_unprocessed
+
+ if b_tmp_stderr:
+ b_output, b_unprocessed = self._examine_output('stderr', states[state], b_tmp_stderr, sudoable)
+ b_stderr += b_output
+ b_tmp_stderr = b_unprocessed
+ else:
+ b_stdout += b_tmp_stdout
+ b_stderr += b_tmp_stderr
+ b_tmp_stdout = b_tmp_stderr = b''
+
+ # If we see a privilege escalation prompt, we send the password.
+ # (If we're expecting a prompt but the escalation succeeds, we
+ # didn't need the password and can carry on regardless.)
+
+ if states[state] == 'awaiting_prompt':
+ if self._flags['become_prompt']:
+ display.debug(u'Sending become_password in response to prompt')
+ become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
+ stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
+ # On python3 stdin is a BufferedWriter, and we don't have a guarantee
+ # that the write will happen without a flush
+ stdin.flush()
+ self._flags['become_prompt'] = False
+ state += 1
+ elif self._flags['become_success']:
+ state += 1
+
+ # We've requested escalation (with or without a password), now we
+ # wait for an error message or a successful escalation.
+
+ if states[state] == 'awaiting_escalation':
+ if self._flags['become_success']:
+ display.vvv(u'Escalation succeeded')
+ self._flags['become_success'] = False
+ state += 1
+ elif self._flags['become_error']:
+ display.vvv(u'Escalation failed')
+ self._terminate_process(p)
+ self._flags['become_error'] = False
+ raise AnsibleError('Incorrect %s password' % self.become.name)
+ elif self._flags['become_nopasswd_error']:
+ display.vvv(u'Escalation requires password')
+ self._terminate_process(p)
+ self._flags['become_nopasswd_error'] = False
+ raise AnsibleError('Missing %s password' % self.become.name)
+ elif self._flags['become_prompt']:
+ # This shouldn't happen, because we should see the "Sorry,
+ # try again" message first.
+ display.vvv(u'Escalation prompt repeated')
+ self._terminate_process(p)
+ self._flags['become_prompt'] = False
+ raise AnsibleError('Incorrect %s password' % self.become.name)
+
+ # Once we're sure that the privilege escalation prompt, if any, has
+ # been dealt with, we can send any initial data and start waiting
+ # for output.
+
+ if states[state] == 'ready_to_send':
+ if in_data:
+ self._send_initial_data(stdin, in_data, p)
+ state += 1
+
+ # Now we're awaiting_exit: has the child process exited? If it has,
+ # and we've read all available output from it, we're done.
+
+ if poll is not None:
+ if not selector.get_map() or not events:
+ break
+ # We should not see further writes to the stdout/stderr file
+ # descriptors after the process has closed, set the select
+ # timeout to gather any last writes we may have missed.
+ timeout = 0
+ continue
+
+ # If the process has not yet exited, but we've already read EOF from
+ # its stdout and stderr (and thus no longer watching any file
+ # descriptors), we can just wait for it to exit.
+
+ elif not selector.get_map():
+ p.wait()
+ break
+
+ # Otherwise there may still be outstanding data to read.
+ finally:
+ selector.close()
+ # close stdin, stdout, and stderr after process is terminated and
+ # stdout/stderr are read completely (see also issues #848, #64768).
+ stdin.close()
+ p.stdout.close()
+ p.stderr.close()
+
+ if C.HOST_KEY_CHECKING:
+ if cmd[0] == b"sshpass" and p.returncode == 6:
+ raise AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support '
+ 'this. Please add this host\'s fingerprint to your known_hosts file to manage this host.')
+
+ controlpersisterror = b'Bad configuration option: ControlPersist' in b_stderr or b'unknown configuration option: ControlPersist' in b_stderr
+ if p.returncode != 0 and controlpersisterror:
+ raise AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" '
+ '(or ssh_args in [ssh_connection] section of the config file) before running again')
+
+ # If we find a broken pipe because of ControlPersist timeout expiring (see #16731),
+ # we raise a special exception so that we can retry a connection.
+ controlpersist_broken_pipe = b'mux_client_hello_exchange: write packet: Broken pipe' in b_stderr
+ if p.returncode == 255:
+
+ additional = to_native(b_stderr)
+ if controlpersist_broken_pipe:
+ raise AnsibleControlPersistBrokenPipeError('Data could not be sent because of ControlPersist broken pipe: %s' % additional)
+
+ elif in_data and checkrc:
+ raise AnsibleConnectionFailure('Data could not be sent to remote host "%s". Make sure this host can be reached over ssh: %s'
+ % (self.host, additional))
+
+ return (p.returncode, b_stdout, b_stderr)
+
+ @_ssh_retry
+ def _run(self, cmd, in_data, sudoable=True, checkrc=True):
+ """Wrapper around _bare_run that retries the connection
+ """
+ return self._bare_run(cmd, in_data, sudoable=sudoable, checkrc=checkrc)
+
+ @_ssh_retry
+ def _file_transport_command(self, in_path, out_path, sftp_action):
+ # scp and sftp require square brackets for IPv6 addresses, but
+ # accept them for hostnames and IPv4 addresses too.
+ host = '[%s]' % self.host
+
+ smart_methods = ['sftp', 'scp', 'piped']
+
+ # Windows does not support dd so we cannot use the piped method
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ smart_methods.remove('piped')
+
+ # Transfer methods to try
+ methods = []
+
+ # Use the transfer_method option if set, otherwise use scp_if_ssh
+ ssh_transfer_method = self._play_context.ssh_transfer_method
+ if ssh_transfer_method is not None:
+ if not (ssh_transfer_method in ('smart', 'sftp', 'scp', 'piped')):
+ raise AnsibleOptionsError('transfer_method needs to be one of [smart|sftp|scp|piped]')
+ if ssh_transfer_method == 'smart':
+ methods = smart_methods
+ else:
+ methods = [ssh_transfer_method]
+ else:
+ # since this can be a non-bool now, we need to handle it correctly
+ scp_if_ssh = C.DEFAULT_SCP_IF_SSH
+ if not isinstance(scp_if_ssh, bool):
+ scp_if_ssh = scp_if_ssh.lower()
+ if scp_if_ssh in BOOLEANS:
+ scp_if_ssh = boolean(scp_if_ssh, strict=False)
+ elif scp_if_ssh != 'smart':
+ raise AnsibleOptionsError('scp_if_ssh needs to be one of [smart|True|False]')
+ if scp_if_ssh == 'smart':
+ methods = smart_methods
+ elif scp_if_ssh is True:
+ methods = ['scp']
+ else:
+ methods = ['sftp']
+
+ for method in methods:
+ returncode = stdout = stderr = None
+ if method == 'sftp':
+ cmd = self._build_command(self.get_option('sftp_executable'), to_bytes(host))
+ in_data = u"{0} {1} {2}\n".format(sftp_action, shlex_quote(in_path), shlex_quote(out_path))
+ in_data = to_bytes(in_data, nonstring='passthru')
+ (returncode, stdout, stderr) = self._bare_run(cmd, in_data, checkrc=False)
+ elif method == 'scp':
+ scp = self.get_option('scp_executable')
+
+ if sftp_action == 'get':
+ cmd = self._build_command(scp, u'{0}:{1}'.format(host, self._shell.quote(in_path)), out_path)
+ else:
+ cmd = self._build_command(scp, in_path, u'{0}:{1}'.format(host, self._shell.quote(out_path)))
+ in_data = None
+ (returncode, stdout, stderr) = self._bare_run(cmd, in_data, checkrc=False)
+ elif method == 'piped':
+ if sftp_action == 'get':
+ # we pass sudoable=False to disable pty allocation, which
+ # would end up mixing stdout/stderr and screwing with newlines
+ (returncode, stdout, stderr) = self.exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE), sudoable=False)
+ with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file:
+ out_file.write(stdout)
+ else:
+ with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as f:
+ in_data = to_bytes(f.read(), nonstring='passthru')
+ if not in_data:
+ count = ' count=0'
+ else:
+ count = ''
+ (returncode, stdout, stderr) = self.exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), in_data=in_data, sudoable=False)
+
+ # Check the return code and rollover to next method if failed
+ if returncode == 0:
+ return (returncode, stdout, stderr)
+ else:
+ # If not in smart mode, the data will be printed by the raise below
+ if len(methods) > 1:
+ display.warning(u'%s transfer mechanism failed on %s. Use ANSIBLE_DEBUG=1 to see detailed information' % (method, host))
+ display.debug(u'%s' % to_text(stdout))
+ display.debug(u'%s' % to_text(stderr))
+
+ if returncode == 255:
+ raise AnsibleConnectionFailure("Failed to connect to the host via %s: %s" % (method, to_native(stderr)))
+ else:
+ raise AnsibleError("failed to transfer file to %s %s:\n%s\n%s" %
+ (to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr)))
+
+ def _escape_win_path(self, path):
+ """ converts a Windows path to one that's supported by SFTP and SCP """
+ # If using a root path then we need to start with /
+ prefix = ""
+ if re.match(r'^\w{1}:', path):
+ prefix = "/"
+
+ # Convert all '\' to '/'
+ return "%s%s" % (prefix, path.replace("\\", "/"))
+
+ #
+ # Main public methods
+ #
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ ''' run a command on the remote host '''
+
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ display.vvv(u"ESTABLISH SSH CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self._play_context.remote_addr)
+
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ # Become method 'runas' is done in the wrapper that is executed,
+ # need to disable sudoable so the bare_run is not waiting for a
+ # prompt that will not occur
+ sudoable = False
+
+ # Make sure our first command is to set the console encoding to
+ # utf-8, this must be done via chcp to get utf-8 (65001)
+ cmd_parts = ["chcp.com", "65001", self._shell._SHELL_REDIRECT_ALLNULL, self._shell._SHELL_AND]
+ cmd_parts.extend(self._shell._encode_script(cmd, as_list=True, strict_mode=False, preserve_rc=False))
+ cmd = ' '.join(cmd_parts)
+
+ # we can only use tty when we are not pipelining the modules. piping
+ # data into /usr/bin/python inside a tty automatically invokes the
+ # python interactive-mode but the modules are not compatible with the
+ # interactive-mode ("unexpected indent" mainly because of empty lines)
+
+ ssh_executable = self._play_context.ssh_executable
+
+ # -tt can cause various issues in some environments so allow the user
+ # to disable it as a troubleshooting method.
+ use_tty = self.get_option('use_tty')
+
+ if not in_data and sudoable and use_tty:
+ args = (ssh_executable, '-tt', self.host, cmd)
+ else:
+ args = (ssh_executable, self.host, cmd)
+
+ cmd = self._build_command(*args)
+ (returncode, stdout, stderr) = self._run(cmd, in_data, sudoable=sudoable)
+
+ # When running on Windows, stderr may contain CLIXML encoded output
+ if getattr(self._shell, "_IS_WINDOWS", False) and stderr.startswith(b"#< CLIXML"):
+ stderr = _parse_clixml(stderr)
+
+ return (returncode, stdout, stderr)
+
+ def put_file(self, in_path, out_path):
+ ''' transfer a file from local to remote '''
+
+ super(Connection, self).put_file(in_path, out_path)
+
+ display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self.host)
+ if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
+ raise AnsibleFileNotFound("file or module does not exist: {0}".format(to_native(in_path)))
+
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ out_path = self._escape_win_path(out_path)
+
+ return self._file_transport_command(in_path, out_path, 'put')
+
+ def fetch_file(self, in_path, out_path):
+ ''' fetch a file from remote to local '''
+
+ super(Connection, self).fetch_file(in_path, out_path)
+
+ display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self.host)
+
+ # need to add / if path is rooted
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ in_path = self._escape_win_path(in_path)
+
+ return self._file_transport_command(in_path, out_path, 'get')
+
+ def reset(self):
+ # If we have a persistent ssh connection (ControlPersist), we can ask it to stop listening.
+ cmd = self._build_command(self._play_context.ssh_executable, '-O', 'stop', self.host)
+ controlpersist, controlpath = self._persistence_controls(cmd)
+ cp_arg = [a for a in cmd if a.startswith(b"ControlPath=")]
+
+ # only run the reset if the ControlPath already exists or if it isn't
+ # configured and ControlPersist is set
+ run_reset = False
+ if controlpersist and len(cp_arg) > 0:
+ cp_path = cp_arg[0].split(b"=", 1)[-1]
+ if os.path.exists(cp_path):
+ run_reset = True
+ elif controlpersist:
+ run_reset = True
+
+ if run_reset:
+ display.vvv(u'sending stop: %s' % to_text(cmd))
+ p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = p.communicate()
+ status_code = p.wait()
+ if status_code != 0:
+ display.warning(u"Failed to reset connection:%s" % to_text(stderr))
+
+ self.close()
+
+ def close(self):
+ self._connected = False
diff --git a/lib/ansible/plugins/connection/winrm.py b/lib/ansible/plugins/connection/winrm.py
new file mode 100644
index 00000000..3a1bc3d4
--- /dev/null
+++ b/lib/ansible/plugins/connection/winrm.py
@@ -0,0 +1,712 @@
+# (c) 2014, Chris Church <chris@ninemoreminutes.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ author: Ansible Core Team
+ connection: winrm
+ short_description: Run tasks over Microsoft's WinRM
+ description:
+ - Run commands or put/fetch on a target via WinRM
+ - This plugin allows extra arguments to be passed that are supported by the protocol but not explicitly defined here.
+ They should take the form of variables declared with the following pattern `ansible_winrm_<option>`.
+ version_added: "2.0"
+ requirements:
+ - pywinrm (python library)
+ options:
+ # figure out more elegant 'delegation'
+ remote_addr:
+ description:
+ - Address of the windows machine
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ - name: ansible_winrm_host
+ type: str
+ remote_user:
+ description:
+ - The user to log in as to the Windows machine
+ vars:
+ - name: ansible_user
+ - name: ansible_winrm_user
+ type: str
+ remote_password:
+ description: Authentication password for the C(remote_user). Can be supplied as CLI option.
+ vars:
+ - name: ansible_password
+ - name: ansible_winrm_pass
+ - name: ansible_winrm_password
+ type: str
+ aliases:
+ - password # Needed for --ask-pass to come through on delegation
+ port:
+ description:
+ - port for winrm to connect on remote target
+ - The default is the https (5986) port, if using http it should be 5985
+ vars:
+ - name: ansible_port
+ - name: ansible_winrm_port
+ default: 5986
+ type: integer
+ scheme:
+ description:
+ - URI scheme to use
+ - If not set, then will default to C(https) or C(http) if I(port) is
+ C(5985).
+ choices: [http, https]
+ vars:
+ - name: ansible_winrm_scheme
+ type: str
+ path:
+ description: URI path to connect to
+ default: '/wsman'
+ vars:
+ - name: ansible_winrm_path
+ type: str
+ transport:
+ description:
+ - List of winrm transports to attempt to use (ssl, plaintext, kerberos, etc)
+ - If None (the default) the plugin will try to automatically guess the correct list
+ - The choices available depend on your version of pywinrm
+ type: list
+ vars:
+ - name: ansible_winrm_transport
+ kerberos_command:
+ description: kerberos command to use to request a authentication ticket
+ default: kinit
+ vars:
+ - name: ansible_winrm_kinit_cmd
+ type: str
+ kerberos_mode:
+ description:
+ - kerberos usage mode.
+ - The managed option means Ansible will obtain kerberos ticket.
+ - While the manual one means a ticket must already have been obtained by the user.
+ - If having issues with Ansible freezing when trying to obtain the
+ Kerberos ticket, you can either set this to C(manual) and obtain
+ it outside Ansible or install C(pexpect) through pip and try
+ again.
+ choices: [managed, manual]
+ vars:
+ - name: ansible_winrm_kinit_mode
+ type: str
+ connection_timeout:
+ description:
+ - Sets the operation and read timeout settings for the WinRM
+ connection.
+ - Corresponds to the C(operation_timeout_sec) and
+ C(read_timeout_sec) args in pywinrm so avoid setting these vars
+ with this one.
+ - The default value is whatever is set in the installed version of
+ pywinrm.
+ vars:
+ - name: ansible_winrm_connection_timeout
+ type: int
+"""
+
+import base64
+import logging
+import os
+import re
+import traceback
+import json
+import tempfile
+import subprocess
+
+HAVE_KERBEROS = False
+try:
+ import kerberos
+ HAVE_KERBEROS = True
+except ImportError:
+ pass
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleConnectionFailure
+from ansible.errors import AnsibleFileNotFound
+from ansible.module_utils.json_utils import _filter_non_json_lines
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils.six.moves.urllib.parse import urlunsplit
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.six import binary_type, PY3
+from ansible.plugins.connection import ConnectionBase
+from ansible.plugins.shell.powershell import _parse_clixml
+from ansible.utils.hashing import secure_hash
+from ansible.utils.display import Display
+
+# getargspec is deprecated in favour of getfullargspec in Python 3 but
+# getfullargspec is not available in Python 2
+if PY3:
+ from inspect import getfullargspec as getargspec
+else:
+ from inspect import getargspec
+
+try:
+ import winrm
+ from winrm import Response
+ from winrm.protocol import Protocol
+ import requests.exceptions
+ HAS_WINRM = True
+except ImportError as e:
+ HAS_WINRM = False
+ WINRM_IMPORT_ERR = e
+
+try:
+ import xmltodict
+ HAS_XMLTODICT = True
+except ImportError as e:
+ HAS_XMLTODICT = False
+ XMLTODICT_IMPORT_ERR = e
+
+HAS_PEXPECT = False
+try:
+ import pexpect
+ # echo was added in pexpect 3.3+ which is newer than the RHEL package
+ # we can only use pexpect for kerb auth if echo is a valid kwarg
+ # https://github.com/ansible/ansible/issues/43462
+ if hasattr(pexpect, 'spawn'):
+ argspec = getargspec(pexpect.spawn.__init__)
+ if 'echo' in argspec.args:
+ HAS_PEXPECT = True
+except ImportError as e:
+ pass
+
+# used to try and parse the hostname and detect if IPv6 is being used
+try:
+ import ipaddress
+ HAS_IPADDRESS = True
+except ImportError:
+ HAS_IPADDRESS = False
+
+display = Display()
+
+
+class Connection(ConnectionBase):
+ '''WinRM connections over HTTP/HTTPS.'''
+
+ transport = 'winrm'
+ module_implementation_preferences = ('.ps1', '.exe', '')
+ allow_executable = False
+ has_pipelining = True
+ allow_extras = True
+
+ def __init__(self, *args, **kwargs):
+
+ self.always_pipeline_modules = True
+ self.has_native_async = True
+
+ self.protocol = None
+ self.shell_id = None
+ self.delegate = None
+ self._shell_type = 'powershell'
+
+ super(Connection, self).__init__(*args, **kwargs)
+
+ if not C.DEFAULT_DEBUG:
+ logging.getLogger('requests_credssp').setLevel(logging.INFO)
+ logging.getLogger('requests_kerberos').setLevel(logging.INFO)
+ logging.getLogger('urllib3').setLevel(logging.INFO)
+
+ def _build_winrm_kwargs(self):
+ # this used to be in set_options, as win_reboot needs to be able to
+ # override the conn timeout, we need to be able to build the args
+ # after setting individual options. This is called by _connect before
+ # starting the WinRM connection
+ self._winrm_host = self.get_option('remote_addr')
+ self._winrm_user = self.get_option('remote_user')
+ self._winrm_pass = self.get_option('remote_password')
+
+ self._winrm_port = self.get_option('port')
+
+ self._winrm_scheme = self.get_option('scheme')
+ # old behaviour, scheme should default to http if not set and the port
+ # is 5985 otherwise https
+ if self._winrm_scheme is None:
+ self._winrm_scheme = 'http' if self._winrm_port == 5985 else 'https'
+
+ self._winrm_path = self.get_option('path')
+ self._kinit_cmd = self.get_option('kerberos_command')
+ self._winrm_transport = self.get_option('transport')
+ self._winrm_connection_timeout = self.get_option('connection_timeout')
+
+ if hasattr(winrm, 'FEATURE_SUPPORTED_AUTHTYPES'):
+ self._winrm_supported_authtypes = set(winrm.FEATURE_SUPPORTED_AUTHTYPES)
+ else:
+ # for legacy versions of pywinrm, use the values we know are supported
+ self._winrm_supported_authtypes = set(['plaintext', 'ssl', 'kerberos'])
+
+ # calculate transport if needed
+ if self._winrm_transport is None or self._winrm_transport[0] is None:
+ # TODO: figure out what we want to do with auto-transport selection in the face of NTLM/Kerb/CredSSP/Cert/Basic
+ transport_selector = ['ssl'] if self._winrm_scheme == 'https' else ['plaintext']
+
+ if HAVE_KERBEROS and ((self._winrm_user and '@' in self._winrm_user)):
+ self._winrm_transport = ['kerberos'] + transport_selector
+ else:
+ self._winrm_transport = transport_selector
+
+ unsupported_transports = set(self._winrm_transport).difference(self._winrm_supported_authtypes)
+
+ if unsupported_transports:
+ raise AnsibleError('The installed version of WinRM does not support transport(s) %s' %
+ to_native(list(unsupported_transports), nonstring='simplerepr'))
+
+ # if kerberos is among our transports and there's a password specified, we're managing the tickets
+ kinit_mode = self.get_option('kerberos_mode')
+ if kinit_mode is None:
+ # HACK: ideally, remove multi-transport stuff
+ self._kerb_managed = "kerberos" in self._winrm_transport and (self._winrm_pass is not None and self._winrm_pass != "")
+ elif kinit_mode == "managed":
+ self._kerb_managed = True
+ elif kinit_mode == "manual":
+ self._kerb_managed = False
+
+ # arg names we're going passing directly
+ internal_kwarg_mask = set(['self', 'endpoint', 'transport', 'username', 'password', 'scheme', 'path', 'kinit_mode', 'kinit_cmd'])
+
+ self._winrm_kwargs = dict(username=self._winrm_user, password=self._winrm_pass)
+ argspec = getargspec(Protocol.__init__)
+ supported_winrm_args = set(argspec.args)
+ supported_winrm_args.update(internal_kwarg_mask)
+ passed_winrm_args = set([v.replace('ansible_winrm_', '') for v in self.get_option('_extras')])
+ unsupported_args = passed_winrm_args.difference(supported_winrm_args)
+
+ # warn for kwargs unsupported by the installed version of pywinrm
+ for arg in unsupported_args:
+ display.warning("ansible_winrm_{0} unsupported by pywinrm (is an up-to-date version of pywinrm installed?)".format(arg))
+
+ # pass through matching extras, excluding the list we want to treat specially
+ for arg in passed_winrm_args.difference(internal_kwarg_mask).intersection(supported_winrm_args):
+ self._winrm_kwargs[arg] = self.get_option('_extras')['ansible_winrm_%s' % arg]
+
+ # Until pykerberos has enough goodies to implement a rudimentary kinit/klist, simplest way is to let each connection
+ # auth itself with a private CCACHE.
+ def _kerb_auth(self, principal, password):
+ if password is None:
+ password = ""
+
+ self._kerb_ccache = tempfile.NamedTemporaryFile()
+ display.vvvvv("creating Kerberos CC at %s" % self._kerb_ccache.name)
+ krb5ccname = "FILE:%s" % self._kerb_ccache.name
+ os.environ["KRB5CCNAME"] = krb5ccname
+ krb5env = dict(KRB5CCNAME=krb5ccname)
+
+ # stores various flags to call with kinit, we currently only use this
+ # to set -f so we can get a forward-able ticket (cred delegation)
+ kinit_flags = []
+ if boolean(self.get_option('_extras').get('ansible_winrm_kerberos_delegation', False)):
+ kinit_flags.append('-f')
+
+ kinit_cmdline = [self._kinit_cmd]
+ kinit_cmdline.extend(kinit_flags)
+ kinit_cmdline.append(principal)
+
+ # pexpect runs the process in its own pty so it can correctly send
+ # the password as input even on MacOS which blocks subprocess from
+ # doing so. Unfortunately it is not available on the built in Python
+ # so we can only use it if someone has installed it
+ if HAS_PEXPECT:
+ proc_mechanism = "pexpect"
+ command = kinit_cmdline.pop(0)
+ password = to_text(password, encoding='utf-8',
+ errors='surrogate_or_strict')
+
+ display.vvvv("calling kinit with pexpect for principal %s"
+ % principal)
+ try:
+ child = pexpect.spawn(command, kinit_cmdline, timeout=60,
+ env=krb5env, echo=False)
+ except pexpect.ExceptionPexpect as err:
+ err_msg = "Kerberos auth failure when calling kinit cmd " \
+ "'%s': %s" % (command, to_native(err))
+ raise AnsibleConnectionFailure(err_msg)
+
+ try:
+ child.expect(".*:")
+ child.sendline(password)
+ except OSError as err:
+ # child exited before the pass was sent, Ansible will raise
+ # error based on the rc below, just display the error here
+ display.vvvv("kinit with pexpect raised OSError: %s"
+ % to_native(err))
+
+ # technically this is the stdout + stderr but to match the
+ # subprocess error checking behaviour, we will call it stderr
+ stderr = child.read()
+ child.wait()
+ rc = child.exitstatus
+ else:
+ proc_mechanism = "subprocess"
+ password = to_bytes(password, encoding='utf-8',
+ errors='surrogate_or_strict')
+
+ display.vvvv("calling kinit with subprocess for principal %s"
+ % principal)
+ try:
+ p = subprocess.Popen(kinit_cmdline, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=krb5env)
+
+ except OSError as err:
+ err_msg = "Kerberos auth failure when calling kinit cmd " \
+ "'%s': %s" % (self._kinit_cmd, to_native(err))
+ raise AnsibleConnectionFailure(err_msg)
+
+ stdout, stderr = p.communicate(password + b'\n')
+ rc = p.returncode != 0
+
+ if rc != 0:
+ # one last attempt at making sure the password does not exist
+ # in the output
+ exp_msg = to_native(stderr.strip())
+ exp_msg = exp_msg.replace(to_native(password), "<redacted>")
+
+ err_msg = "Kerberos auth failure for principal %s with %s: %s" \
+ % (principal, proc_mechanism, exp_msg)
+ raise AnsibleConnectionFailure(err_msg)
+
+ display.vvvvv("kinit succeeded for principal %s" % principal)
+
+ def _winrm_connect(self):
+ '''
+ Establish a WinRM connection over HTTP/HTTPS.
+ '''
+ display.vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" %
+ (self._winrm_user, self._winrm_port, self._winrm_host), host=self._winrm_host)
+
+ winrm_host = self._winrm_host
+ if HAS_IPADDRESS:
+ display.debug("checking if winrm_host %s is an IPv6 address" % winrm_host)
+ try:
+ ipaddress.IPv6Address(winrm_host)
+ except ipaddress.AddressValueError:
+ pass
+ else:
+ winrm_host = "[%s]" % winrm_host
+
+ netloc = '%s:%d' % (winrm_host, self._winrm_port)
+ endpoint = urlunsplit((self._winrm_scheme, netloc, self._winrm_path, '', ''))
+ errors = []
+ for transport in self._winrm_transport:
+ if transport == 'kerberos':
+ if not HAVE_KERBEROS:
+ errors.append('kerberos: the python kerberos library is not installed')
+ continue
+ if self._kerb_managed:
+ self._kerb_auth(self._winrm_user, self._winrm_pass)
+ display.vvvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self._winrm_host)
+ try:
+ winrm_kwargs = self._winrm_kwargs.copy()
+ if self._winrm_connection_timeout:
+ winrm_kwargs['operation_timeout_sec'] = self._winrm_connection_timeout
+ winrm_kwargs['read_timeout_sec'] = self._winrm_connection_timeout + 1
+ protocol = Protocol(endpoint, transport=transport, **winrm_kwargs)
+
+ # open the shell from connect so we know we're able to talk to the server
+ if not self.shell_id:
+ self.shell_id = protocol.open_shell(codepage=65001) # UTF-8
+ display.vvvvv('WINRM OPEN SHELL: %s' % self.shell_id, host=self._winrm_host)
+
+ return protocol
+ except Exception as e:
+ err_msg = to_text(e).strip()
+ if re.search(to_text(r'Operation\s+?timed\s+?out'), err_msg, re.I):
+ raise AnsibleError('the connection attempt timed out')
+ m = re.search(to_text(r'Code\s+?(\d{3})'), err_msg)
+ if m:
+ code = int(m.groups()[0])
+ if code == 401:
+ err_msg = 'the specified credentials were rejected by the server'
+ elif code == 411:
+ return protocol
+ errors.append(u'%s: %s' % (transport, err_msg))
+ display.vvvvv(u'WINRM CONNECTION ERROR: %s\n%s' % (err_msg, to_text(traceback.format_exc())), host=self._winrm_host)
+ if errors:
+ raise AnsibleConnectionFailure(', '.join(map(to_native, errors)))
+ else:
+ raise AnsibleError('No transport found for WinRM connection')
+
+ def _winrm_send_input(self, protocol, shell_id, command_id, stdin, eof=False):
+ rq = {'env:Envelope': protocol._get_soap_header(
+ resource_uri='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd',
+ action='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Send',
+ shell_id=shell_id)}
+ stream = rq['env:Envelope'].setdefault('env:Body', {}).setdefault('rsp:Send', {})\
+ .setdefault('rsp:Stream', {})
+ stream['@Name'] = 'stdin'
+ stream['@CommandId'] = command_id
+ stream['#text'] = base64.b64encode(to_bytes(stdin))
+ if eof:
+ stream['@End'] = 'true'
+ protocol.send_message(xmltodict.unparse(rq))
+
+ def _winrm_exec(self, command, args=(), from_exec=False, stdin_iterator=None):
+ if not self.protocol:
+ self.protocol = self._winrm_connect()
+ self._connected = True
+ if from_exec:
+ display.vvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
+ else:
+ display.vvvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
+ command_id = None
+ try:
+ stdin_push_failed = False
+ command_id = self.protocol.run_command(self.shell_id, to_bytes(command), map(to_bytes, args), console_mode_stdin=(stdin_iterator is None))
+
+ try:
+ if stdin_iterator:
+ for (data, is_last) in stdin_iterator:
+ self._winrm_send_input(self.protocol, self.shell_id, command_id, data, eof=is_last)
+
+ except Exception as ex:
+ display.warning("ERROR DURING WINRM SEND INPUT - attempting to recover: %s %s"
+ % (type(ex).__name__, to_text(ex)))
+ display.debug(traceback.format_exc())
+ stdin_push_failed = True
+
+ # NB: this can hang if the receiver is still running (eg, network failed a Send request but the server's still happy).
+ # FUTURE: Consider adding pywinrm status check/abort operations to see if the target is still running after a failure.
+ resptuple = self.protocol.get_command_output(self.shell_id, command_id)
+ # ensure stdout/stderr are text for py3
+ # FUTURE: this should probably be done internally by pywinrm
+ response = Response(tuple(to_text(v) if isinstance(v, binary_type) else v for v in resptuple))
+
+ # TODO: check result from response and set stdin_push_failed if we have nonzero
+ if from_exec:
+ display.vvvvv('WINRM RESULT %r' % to_text(response), host=self._winrm_host)
+ else:
+ display.vvvvvv('WINRM RESULT %r' % to_text(response), host=self._winrm_host)
+
+ display.vvvvvv('WINRM STDOUT %s' % to_text(response.std_out), host=self._winrm_host)
+ display.vvvvvv('WINRM STDERR %s' % to_text(response.std_err), host=self._winrm_host)
+
+ if stdin_push_failed:
+ # There are cases where the stdin input failed but the WinRM service still processed it. We attempt to
+ # see if stdout contains a valid json return value so we can ignore this error
+ try:
+ filtered_output, dummy = _filter_non_json_lines(response.std_out)
+ json.loads(filtered_output)
+ except ValueError:
+ # stdout does not contain a return response, stdin input was a fatal error
+ stderr = to_bytes(response.std_err, encoding='utf-8')
+ if stderr.startswith(b"#< CLIXML"):
+ stderr = _parse_clixml(stderr)
+
+ raise AnsibleError('winrm send_input failed; \nstdout: %s\nstderr %s'
+ % (to_native(response.std_out), to_native(stderr)))
+
+ return response
+ except requests.exceptions.Timeout as exc:
+ raise AnsibleConnectionFailure('winrm connection error: %s' % to_native(exc))
+ finally:
+ if command_id:
+ self.protocol.cleanup_command(self.shell_id, command_id)
+
+ def _connect(self):
+
+ if not HAS_WINRM:
+ raise AnsibleError("winrm or requests is not installed: %s" % to_native(WINRM_IMPORT_ERR))
+ elif not HAS_XMLTODICT:
+ raise AnsibleError("xmltodict is not installed: %s" % to_native(XMLTODICT_IMPORT_ERR))
+
+ super(Connection, self)._connect()
+ if not self.protocol:
+ self._build_winrm_kwargs() # build the kwargs from the options set
+ self.protocol = self._winrm_connect()
+ self._connected = True
+ return self
+
+ def reset(self):
+ self.protocol = None
+ self.shell_id = None
+ self._connect()
+
+ def _wrapper_payload_stream(self, payload, buffer_size=200000):
+ payload_bytes = to_bytes(payload)
+ byte_count = len(payload_bytes)
+ for i in range(0, byte_count, buffer_size):
+ yield payload_bytes[i:i + buffer_size], i + buffer_size >= byte_count
+
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+ cmd_parts = self._shell._encode_script(cmd, as_list=True, strict_mode=False, preserve_rc=False)
+
+ # TODO: display something meaningful here
+ display.vvv("EXEC (via pipeline wrapper)")
+
+ stdin_iterator = None
+
+ if in_data:
+ stdin_iterator = self._wrapper_payload_stream(in_data)
+
+ result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True, stdin_iterator=stdin_iterator)
+
+ result.std_out = to_bytes(result.std_out)
+ result.std_err = to_bytes(result.std_err)
+
+ # parse just stderr from CLIXML output
+ if result.std_err.startswith(b"#< CLIXML"):
+ try:
+ result.std_err = _parse_clixml(result.std_err)
+ except Exception:
+ # unsure if we're guaranteed a valid xml doc- use raw output in case of error
+ pass
+
+ return (result.status_code, result.std_out, result.std_err)
+
+ # FUTURE: determine buffer size at runtime via remote winrm config?
+ def _put_file_stdin_iterator(self, in_path, out_path, buffer_size=250000):
+ in_size = os.path.getsize(to_bytes(in_path, errors='surrogate_or_strict'))
+ offset = 0
+ with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
+ for out_data in iter((lambda: in_file.read(buffer_size)), b''):
+ offset += len(out_data)
+ self._display.vvvvv('WINRM PUT "%s" to "%s" (offset=%d size=%d)' % (in_path, out_path, offset, len(out_data)), host=self._winrm_host)
+ # yes, we're double-encoding over the wire in this case- we want to ensure that the data shipped to the end PS pipeline is still b64-encoded
+ b64_data = base64.b64encode(out_data) + b'\r\n'
+ # cough up the data, as well as an indicator if this is the last chunk so winrm_send knows to set the End signal
+ yield b64_data, (in_file.tell() == in_size)
+
+ if offset == 0: # empty file, return an empty buffer + eof to close it
+ yield "", True
+
+ def put_file(self, in_path, out_path):
+ super(Connection, self).put_file(in_path, out_path)
+ out_path = self._shell._unquote(out_path)
+ display.vvv('PUT "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host)
+ if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
+ raise AnsibleFileNotFound('file or module does not exist: "%s"' % to_native(in_path))
+
+ script_template = u'''
+ begin {{
+ $path = '{0}'
+
+ $DebugPreference = "Continue"
+ $ErrorActionPreference = "Stop"
+ Set-StrictMode -Version 2
+
+ $fd = [System.IO.File]::Create($path)
+
+ $sha1 = [System.Security.Cryptography.SHA1CryptoServiceProvider]::Create()
+
+ $bytes = @() #initialize for empty file case
+ }}
+ process {{
+ $bytes = [System.Convert]::FromBase64String($input)
+ $sha1.TransformBlock($bytes, 0, $bytes.Length, $bytes, 0) | Out-Null
+ $fd.Write($bytes, 0, $bytes.Length)
+ }}
+ end {{
+ $sha1.TransformFinalBlock($bytes, 0, 0) | Out-Null
+
+ $hash = [System.BitConverter]::ToString($sha1.Hash).Replace("-", "").ToLowerInvariant()
+
+ $fd.Close()
+
+ Write-Output "{{""sha1"":""$hash""}}"
+ }}
+ '''
+
+ script = script_template.format(self._shell._escape(out_path))
+ cmd_parts = self._shell._encode_script(script, as_list=True, strict_mode=False, preserve_rc=False)
+
+ result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], stdin_iterator=self._put_file_stdin_iterator(in_path, out_path))
+ # TODO: improve error handling
+ if result.status_code != 0:
+ raise AnsibleError(to_native(result.std_err))
+
+ try:
+ put_output = json.loads(result.std_out)
+ except ValueError:
+ # stdout does not contain a valid response
+ stderr = to_bytes(result.std_err, encoding='utf-8')
+ if stderr.startswith(b"#< CLIXML"):
+ stderr = _parse_clixml(stderr)
+ raise AnsibleError('winrm put_file failed; \nstdout: %s\nstderr %s' % (to_native(result.std_out), to_native(stderr)))
+
+ remote_sha1 = put_output.get("sha1")
+ if not remote_sha1:
+ raise AnsibleError("Remote sha1 was not returned")
+
+ local_sha1 = secure_hash(in_path)
+
+ if not remote_sha1 == local_sha1:
+ raise AnsibleError("Remote sha1 hash {0} does not match local hash {1}".format(to_native(remote_sha1), to_native(local_sha1)))
+
+ def fetch_file(self, in_path, out_path):
+ super(Connection, self).fetch_file(in_path, out_path)
+ in_path = self._shell._unquote(in_path)
+ out_path = out_path.replace('\\', '/')
+ # consistent with other connection plugins, we assume the caller has created the target dir
+ display.vvv('FETCH "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host)
+ buffer_size = 2**19 # 0.5MB chunks
+ out_file = None
+ try:
+ offset = 0
+ while True:
+ try:
+ script = '''
+ $path = '%(path)s'
+ If (Test-Path -Path $path -PathType Leaf)
+ {
+ $buffer_size = %(buffer_size)d
+ $offset = %(offset)d
+
+ $stream = New-Object -TypeName IO.FileStream($path, [IO.FileMode]::Open, [IO.FileAccess]::Read, [IO.FileShare]::ReadWrite)
+ $stream.Seek($offset, [System.IO.SeekOrigin]::Begin) > $null
+ $buffer = New-Object -TypeName byte[] $buffer_size
+ $bytes_read = $stream.Read($buffer, 0, $buffer_size)
+ if ($bytes_read -gt 0) {
+ $bytes = $buffer[0..($bytes_read - 1)]
+ [System.Convert]::ToBase64String($bytes)
+ }
+ $stream.Close() > $null
+ }
+ ElseIf (Test-Path -Path $path -PathType Container)
+ {
+ Write-Host "[DIR]";
+ }
+ Else
+ {
+ Write-Error "$path does not exist";
+ Exit 1;
+ }
+ ''' % dict(buffer_size=buffer_size, path=self._shell._escape(in_path), offset=offset)
+ display.vvvvv('WINRM FETCH "%s" to "%s" (offset=%d)' % (in_path, out_path, offset), host=self._winrm_host)
+ cmd_parts = self._shell._encode_script(script, as_list=True, preserve_rc=False)
+ result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
+ if result.status_code != 0:
+ raise IOError(to_native(result.std_err))
+ if result.std_out.strip() == '[DIR]':
+ data = None
+ else:
+ data = base64.b64decode(result.std_out.strip())
+ if data is None:
+ break
+ else:
+ if not out_file:
+ # If out_path is a directory and we're expecting a file, bail out now.
+ if os.path.isdir(to_bytes(out_path, errors='surrogate_or_strict')):
+ break
+ out_file = open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb')
+ out_file.write(data)
+ if len(data) < buffer_size:
+ break
+ offset += len(data)
+ except Exception:
+ traceback.print_exc()
+ raise AnsibleError('failed to transfer file to "%s"' % to_native(out_path))
+ finally:
+ if out_file:
+ out_file.close()
+
+ def close(self):
+ if self.protocol and self.shell_id:
+ display.vvvvv('WINRM CLOSE SHELL: %s' % self.shell_id, host=self._winrm_host)
+ self.protocol.close_shell(self.shell_id)
+ self.shell_id = None
+ self.protocol = None
+ self._connected = False
diff --git a/lib/ansible/plugins/doc_fragments/__init__.py b/lib/ansible/plugins/doc_fragments/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/__init__.py
diff --git a/lib/ansible/plugins/doc_fragments/backup.py b/lib/ansible/plugins/doc_fragments/backup.py
new file mode 100644
index 00000000..d2e76dc1
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/backup.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Ansible, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard documentation fragment
+ DOCUMENTATION = r'''
+options:
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get
+ the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+'''
diff --git a/lib/ansible/plugins/doc_fragments/constructed.py b/lib/ansible/plugins/doc_fragments/constructed.py
new file mode 100644
index 00000000..f2788da0
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/constructed.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ strict:
+ description:
+ - If C(yes) make invalid entries a fatal error, otherwise skip and continue.
+ - Since it is possible to use facts in the expressions they might not always be available
+ and we ignore those errors by default.
+ type: bool
+ default: no
+ compose:
+ description: Create vars from jinja2 expressions.
+ type: dict
+ default: {}
+ groups:
+ description: Add hosts to group based on Jinja2 conditionals.
+ type: dict
+ default: {}
+ keyed_groups:
+ description: Add hosts to group based on the values of a variable.
+ type: list
+ default: []
+'''
diff --git a/lib/ansible/plugins/doc_fragments/decrypt.py b/lib/ansible/plugins/doc_fragments/decrypt.py
new file mode 100644
index 00000000..ea7cf59b
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/decrypt.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Brian Coca <bcoca@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard files documentation fragment
+ DOCUMENTATION = r'''
+options:
+ decrypt:
+ description:
+ - This option controls the autodecryption of source files using vault.
+ type: bool
+ default: yes
+ version_added: '2.4'
+'''
diff --git a/lib/ansible/plugins/doc_fragments/default_callback.py b/lib/ansible/plugins/doc_fragments/default_callback.py
new file mode 100644
index 00000000..df3966b9
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/default_callback.py
@@ -0,0 +1,85 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+ options:
+ display_skipped_hosts:
+ name: Show skipped hosts
+ description: "Toggle to control displaying skipped task/host results in a task"
+ type: bool
+ default: yes
+ env:
+ - name: DISPLAY_SKIPPED_HOSTS
+ deprecated:
+ why: environment variables without "ANSIBLE_" prefix are deprecated
+ version: "2.12"
+ alternatives: the "ANSIBLE_DISPLAY_SKIPPED_HOSTS" environment variable
+ - name: ANSIBLE_DISPLAY_SKIPPED_HOSTS
+ ini:
+ - key: display_skipped_hosts
+ section: defaults
+ display_ok_hosts:
+ name: Show 'ok' hosts
+ description: "Toggle to control displaying 'ok' task/host results in a task"
+ type: bool
+ default: yes
+ env:
+ - name: ANSIBLE_DISPLAY_OK_HOSTS
+ ini:
+ - key: display_ok_hosts
+ section: defaults
+ version_added: '2.7'
+ display_failed_stderr:
+ name: Use STDERR for failed and unreachable tasks
+ description: "Toggle to control whether failed and unreachable tasks are displayed to STDERR (vs. STDOUT)"
+ type: bool
+ default: no
+ env:
+ - name: ANSIBLE_DISPLAY_FAILED_STDERR
+ ini:
+ - key: display_failed_stderr
+ section: defaults
+ version_added: '2.7'
+ show_custom_stats:
+ name: Show custom stats
+ description: 'This adds the custom stats set via the set_stats plugin to the play recap'
+ type: bool
+ default: no
+ env:
+ - name: ANSIBLE_SHOW_CUSTOM_STATS
+ ini:
+ - key: show_custom_stats
+ section: defaults
+ show_per_host_start:
+ name: Show per host task start
+ description: 'This adds output that shows when a task is started to execute for each host'
+ type: bool
+ default: no
+ env:
+ - name: ANSIBLE_SHOW_PER_HOST_START
+ ini:
+ - key: show_per_host_start
+ section: defaults
+ version_added: '2.9'
+ check_mode_markers:
+ name: Show markers when running in check mode
+ description:
+ - Toggle to control displaying markers when running in check mode.
+ - "The markers are C(DRY RUN) at the beggining and ending of playbook execution (when calling C(ansible-playbook --check))
+ and C(CHECK MODE) as a suffix at every play and task that is run in check mode."
+ type: bool
+ default: no
+ version_added: '2.9'
+ env:
+ - name: ANSIBLE_CHECK_MODE_MARKERS
+ ini:
+ - key: check_mode_markers
+ section: defaults
+'''
diff --git a/lib/ansible/plugins/doc_fragments/files.py b/lib/ansible/plugins/doc_fragments/files.py
new file mode 100644
index 00000000..5d6092a6
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/files.py
@@ -0,0 +1,80 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard files documentation fragment
+
+ # Note: mode is overridden by the copy and template modules so if you change the description
+ # here, you should also change it there.
+ DOCUMENTATION = r'''
+options:
+ mode:
+ description:
+ - The permissions the resulting file or directory should have.
+ - For those used to I(/usr/bin/chmod) remember that modes are actually octal numbers.
+ You must either add a leading zero so that Ansible's YAML parser knows it is an octal number
+ (like C(0644) or C(01777)) or quote it (like C('644') or C('1777')) so Ansible receives
+ a string and can do its own conversion from string into number.
+ - Giving Ansible a number without following one of these rules will end up with a decimal
+ number which will have unexpected results.
+ - As of Ansible 1.8, the mode may be specified as a symbolic mode (for example, C(u+rwx) or
+ C(u=rw,g=r,o=r)).
+ type: raw
+ owner:
+ description:
+ - Name of the user that should own the file/directory, as would be fed to I(chown).
+ type: str
+ group:
+ description:
+ - Name of the group that should own the file/directory, as would be fed to I(chown).
+ type: str
+ seuser:
+ description:
+ - The user part of the SELinux file context.
+ - By default it uses the C(system) policy, where applicable.
+ - When set to C(_default), it will use the C(user) portion of the policy if available.
+ type: str
+ serole:
+ description:
+ - The role part of the SELinux file context.
+ - When set to C(_default), it will use the C(role) portion of the policy if available.
+ type: str
+ setype:
+ description:
+ - The type part of the SELinux file context.
+ - When set to C(_default), it will use the C(type) portion of the policy if available.
+ type: str
+ selevel:
+ description:
+ - The level part of the SELinux file context.
+ - This is the MLS/MCS attribute, sometimes known as the C(range).
+ - When set to C(_default), it will use the C(level) portion of the policy if available.
+ type: str
+ unsafe_writes:
+ description:
+ - Influence when to use atomic operation to prevent data corruption or inconsistent reads from the target file.
+ - By default this module uses atomic operations to prevent data corruption or inconsistent reads from the target files,
+ but sometimes systems are configured or just broken in ways that prevent this. One example is docker mounted files,
+ which cannot be updated atomically from inside the container and can only be written in an unsafe manner.
+ - This option allows Ansible to fall back to unsafe methods of updating files when atomic operations fail
+ (however, it doesn't force Ansible to perform unsafe writes).
+ - IMPORTANT! Unsafe writes are subject to race conditions and can lead to data corruption.
+ type: bool
+ default: no
+ version_added: '2.2'
+ attributes:
+ description:
+ - The attributes the resulting file or directory should have.
+ - To get supported flags look at the man page for I(chattr) on the target system.
+ - This string should contain the attributes in the same order as the one displayed by I(lsattr).
+ - The C(=) operator is assumed as default, otherwise C(+) or C(-) operators need to be included in the string.
+ type: str
+ aliases: [ attr ]
+ version_added: '2.3'
+'''
diff --git a/lib/ansible/plugins/doc_fragments/inventory_cache.py b/lib/ansible/plugins/doc_fragments/inventory_cache.py
new file mode 100644
index 00000000..342be334
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/inventory_cache.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # inventory cache
+ DOCUMENTATION = r'''
+options:
+ cache:
+ description:
+ - Toggle to enable/disable the caching of the inventory's source data, requires a cache plugin setup to work.
+ type: bool
+ default: no
+ env:
+ - name: ANSIBLE_INVENTORY_CACHE
+ ini:
+ - section: inventory
+ key: cache
+ cache_plugin:
+ description:
+ - Cache plugin to use for the inventory's source data.
+ type: str
+ default: memory
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN
+ - name: ANSIBLE_INVENTORY_CACHE_PLUGIN
+ ini:
+ - section: defaults
+ key: fact_caching
+ - section: inventory
+ key: cache_plugin
+ cache_timeout:
+ description:
+ - Cache duration in seconds
+ default: 3600
+ type: int
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
+ - name: ANSIBLE_INVENTORY_CACHE_TIMEOUT
+ ini:
+ - section: defaults
+ key: fact_caching_timeout
+ - section: inventory
+ key: cache_timeout
+ cache_connection:
+ description:
+ - Cache connection data or path, read cache plugin documentation for specifics.
+ type: str
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_CONNECTION
+ - name: ANSIBLE_INVENTORY_CACHE_CONNECTION
+ ini:
+ - section: defaults
+ key: fact_caching_connection
+ - section: inventory
+ key: cache_connection
+ cache_prefix:
+ description:
+ - Prefix to use for cache plugin files/tables
+ default: ansible_inventory_
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_PREFIX
+ - name: ANSIBLE_INVENTORY_CACHE_PLUGIN_PREFIX
+ ini:
+ - section: default
+ key: fact_caching_prefix
+ - section: inventory
+ key: cache_prefix
+'''
diff --git a/lib/ansible/plugins/doc_fragments/return_common.py b/lib/ansible/plugins/doc_fragments/return_common.py
new file mode 100644
index 00000000..6f542880
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/return_common.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Ansible, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Standard documentation fragment
+ RETURN = r'''
+changed:
+ description: Whether the module affected changes on the target.
+ returned: always
+ type: bool
+ sample: false
+failed:
+ description: Whether the module failed to execute.
+ returned: always
+ type: bool
+ sample: true
+msg:
+ description: Human-readable message.
+ returned: as needed
+ type: str
+ sample: all ok
+skipped:
+ description: Whether the module was skipped.
+ returned: always
+ type: bool
+ sample: false
+results:
+ description: List of module results,
+ returned: when using a loop.
+ type: list
+ sample: [{changed: True, msg: 'first item changed'}, {changed: False, msg: 'second item ok'}]
+exception:
+ description: Optional information from a handled error.
+ returned: on some errors
+ type: str
+ sample: Unknown error
+'''
diff --git a/lib/ansible/plugins/doc_fragments/shell_common.py b/lib/ansible/plugins/doc_fragments/shell_common.py
new file mode 100644
index 00000000..7ba4049c
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/shell_common.py
@@ -0,0 +1,76 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # common shelldocumentation fragment
+ DOCUMENTATION = """
+options:
+ remote_tmp:
+ description:
+ - Temporary directory to use on targets when executing tasks.
+ default: '~/.ansible/tmp'
+ env: [{name: ANSIBLE_REMOTE_TEMP}, {name: ANSIBLE_REMOTE_TMP}]
+ ini:
+ - section: defaults
+ key: remote_tmp
+ vars:
+ - name: ansible_remote_tmp
+ system_tmpdirs:
+ description:
+ - "List of valid system temporary directories for Ansible to choose when it cannot use
+ ``remote_tmp``, normally due to permission issues. These must be world readable, writable,
+ and executable."
+ default: [ /var/tmp, /tmp ]
+ type: list
+ env: [{name: ANSIBLE_SYSTEM_TMPDIRS}]
+ ini:
+ - section: defaults
+ key: system_tmpdirs
+ vars:
+ - name: ansible_system_tmpdirs
+ async_dir:
+ description:
+ - Directory in which ansible will keep async job information
+ default: '~/.ansible_async'
+ env: [{name: ANSIBLE_ASYNC_DIR}]
+ ini:
+ - section: defaults
+ key: async_dir
+ vars:
+ - name: ansible_async_dir
+ environment:
+ type: dict
+ default: {}
+ description:
+ - dictionary of environment variables and their values to use when executing commands.
+ admin_users:
+ type: list
+ default: ['root', 'toor']
+ description:
+ - list of users to be expected to have admin privileges. This is used by the controller to
+ determine how to share temporary files between the remote user and the become user.
+ env:
+ - name: ANSIBLE_ADMIN_USERS
+ ini:
+ - section: defaults
+ key: admin_users
+ vars:
+ - name: ansible_admin_users
+ world_readable_temp:
+ version_added: '2.10'
+ default: False
+ description:
+ - This makes the temporary files created on the machine world-readable and will issue a warning instead of failing the task.
+ - It is useful when becoming an unprivileged user.
+ env:
+ - name: ANSIBLE_SHELL_ALLOW_WORLD_READABLE_TEMP
+ vars:
+ - name: ansible_shell_allow_world_readable_temp
+ ini:
+ - {key: allow_world_readable_tmpfiles, section: defaults}
+ type: boolean
+"""
diff --git a/lib/ansible/plugins/doc_fragments/shell_windows.py b/lib/ansible/plugins/doc_fragments/shell_windows.py
new file mode 100644
index 00000000..d6d4d7c5
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/shell_windows.py
@@ -0,0 +1,49 @@
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Windows shell documentation fragment
+ # FIXME: set_module_language don't belong here but must be set so they don't fail when someone
+ # get_option('set_module_language') on this plugin
+ DOCUMENTATION = """
+options:
+ async_dir:
+ description:
+ - Directory in which ansible will keep async job information.
+ - Before Ansible 2.8, this was set to C(remote_tmp + "\\.ansible_async").
+ default: '%USERPROFILE%\\.ansible_async'
+ ini:
+ - section: powershell
+ key: async_dir
+ vars:
+ - name: ansible_async_dir
+ version_added: '2.8'
+ remote_tmp:
+ description:
+ - Temporary directory to use on targets when copying files to the host.
+ default: '%TEMP%'
+ ini:
+ - section: powershell
+ key: remote_tmp
+ vars:
+ - name: ansible_remote_tmp
+ set_module_language:
+ description:
+ - Controls if we set the locale for modules when executing on the
+ target.
+ - Windows only supports C(no) as an option.
+ type: bool
+ default: 'no'
+ choices:
+ - 'no'
+ environment:
+ description:
+ - Dictionary of environment variables and their values to use when
+ executing commands.
+ type: dict
+ default: {}
+"""
diff --git a/lib/ansible/plugins/doc_fragments/template_common.py b/lib/ansible/plugins/doc_fragments/template_common.py
new file mode 100644
index 00000000..47b61e74
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/template_common.py
@@ -0,0 +1,114 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard template documentation fragment, use by template and win_template.
+ DOCUMENTATION = r'''
+description:
+- Templates are processed by the L(Jinja2 templating language,http://jinja.pocoo.org/docs/).
+- Documentation on the template formatting can be found in the
+ L(Template Designer Documentation,http://jinja.pocoo.org/docs/templates/).
+- Additional variables listed below can be used in templates.
+- C(ansible_managed) (configurable via the C(defaults) section of C(ansible.cfg)) contains a string which can be used to
+ describe the template name, host, modification time of the template file and the owner uid.
+- C(template_host) contains the node name of the template's machine.
+- C(template_uid) is the numeric user id of the owner.
+- C(template_path) is the path of the template.
+- C(template_fullpath) is the absolute path of the template.
+- C(template_destpath) is the path of the template on the remote system (added in 2.8).
+- C(template_run_date) is the date that the template was rendered.
+options:
+ src:
+ description:
+ - Path of a Jinja2 formatted template on the Ansible controller.
+ - This can be a relative or an absolute path.
+ - The file must be encoded with C(utf-8) but I(output_encoding) can be used to control the encoding of the output
+ template.
+ type: path
+ required: yes
+ dest:
+ description:
+ - Location to render the template to on the remote machine.
+ type: path
+ required: yes
+ newline_sequence:
+ description:
+ - Specify the newline sequence to use for templating files.
+ type: str
+ choices: [ '\n', '\r', '\r\n' ]
+ default: '\n'
+ version_added: '2.4'
+ block_start_string:
+ description:
+ - The string marking the beginning of a block.
+ type: str
+ default: '{%'
+ version_added: '2.4'
+ block_end_string:
+ description:
+ - The string marking the end of a block.
+ type: str
+ default: '%}'
+ version_added: '2.4'
+ variable_start_string:
+ description:
+ - The string marking the beginning of a print statement.
+ type: str
+ default: '{{'
+ version_added: '2.4'
+ variable_end_string:
+ description:
+ - The string marking the end of a print statement.
+ type: str
+ default: '}}'
+ version_added: '2.4'
+ trim_blocks:
+ description:
+ - Determine when newlines should be removed from blocks.
+ - When set to C(yes) the first newline after a block is removed (block, not variable tag!).
+ type: bool
+ default: yes
+ version_added: '2.4'
+ lstrip_blocks:
+ description:
+ - Determine when leading spaces and tabs should be stripped.
+ - When set to C(yes) leading spaces and tabs are stripped from the start of a line to a block.
+ - This functionality requires Jinja 2.7 or newer.
+ type: bool
+ default: no
+ version_added: '2.6'
+ force:
+ description:
+ - Determine when the file is being transferred if the destination already exists.
+ - When set to C(yes), replace the remote file when contents are different than the source.
+ - When set to C(no), the file will only be transferred if the destination does not exist.
+ type: bool
+ default: yes
+ output_encoding:
+ description:
+ - Overrides the encoding used to write the template file defined by C(dest).
+ - It defaults to C(utf-8), but any encoding supported by python can be used.
+ - The source template file must always be encoded using C(utf-8), for homogeneity.
+ type: str
+ default: utf-8
+ version_added: '2.7'
+notes:
+- Including a string that uses a date in the template will result in the template being marked 'changed' each time.
+- Since Ansible 0.9, templates are loaded with C(trim_blocks=True).
+- >
+ Also, you can override jinja2 settings by adding a special header to template file.
+ i.e. C(#jinja2:variable_start_string:'[%', variable_end_string:'%]', trim_blocks: False)
+ which changes the variable interpolation markers to C([% var %]) instead of C({{ var }}).
+ This is the best way to prevent evaluation of things that look like, but should not be Jinja2.
+- Using raw/endraw in Jinja2 will not work as you expect because templates in Ansible are recursively
+ evaluated.
+- To find Byte Order Marks in files, use C(Format-Hex <file> -Count 16) on Windows, and use C(od -a -t x1 -N 16 <file>)
+ on Linux.
+'''
diff --git a/lib/ansible/plugins/doc_fragments/url.py b/lib/ansible/plugins/doc_fragments/url.py
new file mode 100644
index 00000000..ddb8e4d1
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/url.py
@@ -0,0 +1,66 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, John Barker <gundalow@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard files documentation fragment
+ DOCUMENTATION = r'''
+options:
+ url:
+ description:
+ - HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path
+ type: str
+ force:
+ description:
+ - If C(yes) do not get a cached copy.
+ - Alias C(thirsty) has been deprecated and will be removed in 2.13.
+ type: bool
+ default: no
+ aliases: [ thirsty ]
+ http_agent:
+ description:
+ - Header to identify as, generally appears in web server logs.
+ type: str
+ default: ansible-httpget
+ use_proxy:
+ description:
+ - If C(no), it will not use a proxy, even if one is defined in an environment variable on the target hosts.
+ type: bool
+ default: yes
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated.
+ - This should only be used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: yes
+ url_username:
+ description:
+ - The username for use in HTTP basic authentication.
+ - This parameter can be used without I(url_password) for sites that allow empty passwords
+ type: str
+ url_password:
+ description:
+ - The password for use in HTTP basic authentication.
+ - If the I(url_username) parameter is not specified, the I(url_password) parameter will not be used.
+ type: str
+ force_basic_auth:
+ description:
+ - Credentials specified with I(url_username) and I(url_password) should be passed in HTTP Header.
+ type: bool
+ default: no
+ client_cert:
+ description:
+ - PEM formatted certificate chain file to be used for SSL client authentication.
+ - This file can also include the key as well, and if the key is included, C(client_key) is not required.
+ type: path
+ client_key:
+ description:
+ - PEM formatted file that contains your private key to be used for SSL client authentication.
+ - If C(client_cert) contains both the certificate and key, this option is not required.
+ type: path
+'''
diff --git a/lib/ansible/plugins/doc_fragments/url_windows.py b/lib/ansible/plugins/doc_fragments/url_windows.py
new file mode 100644
index 00000000..286f4b4a
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/url_windows.py
@@ -0,0 +1,150 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment:
+
+ # Common options for Ansible.ModuleUtils.WebRequest
+ DOCUMENTATION = r'''
+options:
+ method:
+ description:
+ - The HTTP Method of the request.
+ type: str
+ follow_redirects:
+ description:
+ - Whether or the module should follow redirects.
+ - C(all) will follow all redirect.
+ - C(none) will not follow any redirect.
+ - C(safe) will follow only "safe" redirects, where "safe" means that the
+ client is only doing a C(GET) or C(HEAD) on the URI to which it is being
+ redirected.
+ - When following a redirected URL, the C(Authorization) header and any
+ credentials set will be dropped and not redirected.
+ choices:
+ - all
+ - none
+ - safe
+ default: safe
+ type: str
+ headers:
+ description:
+ - Extra headers to set on the request.
+ - This should be a dictionary where the key is the header name and the
+ value is the value for that header.
+ type: dict
+ http_agent:
+ description:
+ - Header to identify as, generally appears in web server logs.
+ - This is set to the C(User-Agent) header on a HTTP request.
+ default: ansible-httpget
+ type: str
+ maximum_redirection:
+ description:
+ - Specify how many times the module will redirect a connection to an
+ alternative URI before the connection fails.
+ - If set to C(0) or I(follow_redirects) is set to C(none), or C(safe) when
+ not doing a C(GET) or C(HEAD) it prevents all redirection.
+ default: 50
+ type: int
+ timeout:
+ description:
+ - Specifies how long the request can be pending before it times out (in
+ seconds).
+ - Set to C(0) to specify an infinite timeout.
+ default: 30
+ type: int
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated.
+ - This should only be used on personally controlled sites using self-signed
+ certificates.
+ default: yes
+ type: bool
+ client_cert:
+ description:
+ - The path to the client certificate (.pfx) that is used for X509
+ authentication. This path can either be the path to the C(pfx) on the
+ filesystem or the PowerShell certificate path
+ C(Cert:\CurrentUser\My\<thumbprint>).
+ - The WinRM connection must be authenticated with C(CredSSP) or C(become)
+ is used on the task if the certificate file is not password protected.
+ - Other authentication types can set I(client_cert_password) when the cert
+ is password protected.
+ type: str
+ client_cert_password:
+ description:
+ - The password for I(client_cert) if the cert is password protected.
+ type: str
+ force_basic_auth:
+ description:
+ - By default the authentication header is only sent when a webservice
+ responses to an initial request with a 401 status. Since some basic auth
+ services do not properly send a 401, logins will fail.
+ - This option forces the sending of the Basic authentication header upon
+ the original request.
+ default: no
+ type: bool
+ url_username:
+ description:
+ - The username to use for authentication.
+ type: str
+ url_password:
+ description:
+ - The password for I(url_username).
+ type: str
+ use_default_credential:
+ description:
+ - Uses the current user's credentials when authenticating with a server
+ protected with C(NTLM), C(Kerberos), or C(Negotiate) authentication.
+ - Sites that use C(Basic) auth will still require explicit credentials
+ through the I(url_username) and I(url_password) options.
+ - The module will only have access to the user's credentials if using
+ C(become) with a password, you are connecting with SSH using a password,
+ or connecting with WinRM using C(CredSSP) or C(Kerberos with delegation).
+ - If not using C(become) or a different auth method to the ones stated
+ above, there will be no default credentials available and no
+ authentication will occur.
+ default: no
+ type: bool
+ use_proxy:
+ description:
+ - If C(no), it will not use the proxy defined in IE for the current user.
+ default: yes
+ type: bool
+ proxy_url:
+ description:
+ - An explicit proxy to use for the request.
+ - By default, the request will use the IE defined proxy unless I(use_proxy)
+ is set to C(no).
+ type: str
+ proxy_username:
+ description:
+ - The username to use for proxy authentication.
+ type: str
+ proxy_password:
+ description:
+ - The password for I(proxy_username).
+ type: str
+ proxy_use_default_credential:
+ description:
+ - Uses the current user's credentials when authenticating with a proxy host
+ protected with C(NTLM), C(Kerberos), or C(Negotiate) authentication.
+ - Proxies that use C(Basic) auth will still require explicit credentials
+ through the I(proxy_username) and I(proxy_password) options.
+ - The module will only have access to the user's credentials if using
+ C(become) with a password, you are connecting with SSH using a password,
+ or connecting with WinRM using C(CredSSP) or C(Kerberos with delegation).
+ - If not using C(become) or a different auth method to the ones stated
+ above, there will be no default credentials available and no proxy
+ authentication will occur.
+ default: no
+ type: bool
+seealso:
+- module: community.windows.win_inet_proxy
+'''
diff --git a/lib/ansible/plugins/doc_fragments/validate.py b/lib/ansible/plugins/doc_fragments/validate.py
new file mode 100644
index 00000000..99bbe000
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/validate.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Ansible, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Standard documentation fragment
+ DOCUMENTATION = r'''
+options:
+ validate:
+ description:
+ - The validation command to run before copying into place.
+ - The path to the file to validate is passed in via '%s' which must be present as in the examples below.
+ - The command is passed securely so shell features like expansion and pipes will not work.
+ type: str
+'''
diff --git a/lib/ansible/plugins/doc_fragments/vars_plugin_staging.py b/lib/ansible/plugins/doc_fragments/vars_plugin_staging.py
new file mode 100644
index 00000000..b2da29c4
--- /dev/null
+++ b/lib/ansible/plugins/doc_fragments/vars_plugin_staging.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ stage:
+ description:
+ - Control when this vars plugin may be executed.
+ - Setting this option to C(all) will run the vars plugin after importing inventory and whenever it is demanded by a task.
+ - Setting this option to C(task) will only run the vars plugin whenever it is demanded by a task.
+ - Setting this option to C(inventory) will only run the vars plugin after parsing inventory.
+ - If this option is omitted, the global I(RUN_VARS_PLUGINS) configuration is used to determine when to execute the vars plugin.
+ choices: ['all', 'task', 'inventory']
+ version_added: "2.10"
+ type: str
+'''
diff --git a/lib/ansible/plugins/filter/__init__.py b/lib/ansible/plugins/filter/__init__.py
new file mode 100644
index 00000000..980f84a2
--- /dev/null
+++ b/lib/ansible/plugins/filter/__init__.py
@@ -0,0 +1,3 @@
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
diff --git a/lib/ansible/plugins/filter/core.py b/lib/ansible/plugins/filter/core.py
new file mode 100644
index 00000000..99e9e0e3
--- /dev/null
+++ b/lib/ansible/plugins/filter/core.py
@@ -0,0 +1,663 @@
+# (c) 2012, Jeroen Hoekx <jeroen@hoekx.be>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import base64
+import crypt
+import glob
+import hashlib
+import itertools
+import json
+import ntpath
+import os.path
+import re
+import string
+import sys
+import time
+import uuid
+import yaml
+
+import datetime
+from functools import partial
+from random import Random, SystemRandom, shuffle
+
+from jinja2.filters import environmentfilter, do_groupby as _do_groupby
+
+from ansible.errors import AnsibleError, AnsibleFilterError, AnsibleFilterTypeError
+from ansible.module_utils.six import iteritems, string_types, integer_types, reraise
+from ansible.module_utils.six.moves import reduce, shlex_quote
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.common.collections import is_sequence
+from ansible.module_utils.common._collections_compat import Mapping
+from ansible.parsing.ajson import AnsibleJSONEncoder
+from ansible.parsing.yaml.dumper import AnsibleDumper
+from ansible.template import recursive_check_defined
+from ansible.utils.display import Display
+from ansible.utils.encrypt import passlib_or_crypt
+from ansible.utils.hashing import md5s, checksum_s
+from ansible.utils.unicode import unicode_wrap
+from ansible.utils.vars import merge_hash
+
+display = Display()
+
+UUID_NAMESPACE_ANSIBLE = uuid.UUID('361E6D51-FAEC-444A-9079-341386DA8E2E')
+
+
+def to_yaml(a, *args, **kw):
+ '''Make verbose, human readable yaml'''
+ default_flow_style = kw.pop('default_flow_style', None)
+ transformed = yaml.dump(a, Dumper=AnsibleDumper, allow_unicode=True, default_flow_style=default_flow_style, **kw)
+ return to_text(transformed)
+
+
+def to_nice_yaml(a, indent=4, *args, **kw):
+ '''Make verbose, human readable yaml'''
+ transformed = yaml.dump(a, Dumper=AnsibleDumper, indent=indent, allow_unicode=True, default_flow_style=False, **kw)
+ return to_text(transformed)
+
+
+def to_json(a, *args, **kw):
+ ''' Convert the value to JSON '''
+ return json.dumps(a, cls=AnsibleJSONEncoder, *args, **kw)
+
+
+def to_nice_json(a, indent=4, sort_keys=True, *args, **kw):
+ '''Make verbose, human readable JSON'''
+ return to_json(a, indent=indent, sort_keys=sort_keys, separators=(',', ': '), *args, **kw)
+
+
+def to_bool(a):
+ ''' return a bool for the arg '''
+ if a is None or isinstance(a, bool):
+ return a
+ if isinstance(a, string_types):
+ a = a.lower()
+ if a in ('yes', 'on', '1', 'true', 1):
+ return True
+ return False
+
+
+def to_datetime(string, format="%Y-%m-%d %H:%M:%S"):
+ return datetime.datetime.strptime(string, format)
+
+
+def strftime(string_format, second=None):
+ ''' return a date string using string. See https://docs.python.org/2/library/time.html#time.strftime for format '''
+ if second is not None:
+ try:
+ second = float(second)
+ except Exception:
+ raise AnsibleFilterError('Invalid value for epoch value (%s)' % second)
+ return time.strftime(string_format, time.localtime(second))
+
+
+def quote(a):
+ ''' return its argument quoted for shell usage '''
+ return shlex_quote(to_text(a))
+
+
+def fileglob(pathname):
+ ''' return list of matched regular files for glob '''
+ return [g for g in glob.glob(pathname) if os.path.isfile(g)]
+
+
+def regex_replace(value='', pattern='', replacement='', ignorecase=False, multiline=False):
+ ''' Perform a `re.sub` returning a string '''
+
+ value = to_text(value, errors='surrogate_or_strict', nonstring='simplerepr')
+
+ flags = 0
+ if ignorecase:
+ flags |= re.I
+ if multiline:
+ flags |= re.M
+ _re = re.compile(pattern, flags=flags)
+ return _re.sub(replacement, value)
+
+
+def regex_findall(value, regex, multiline=False, ignorecase=False):
+ ''' Perform re.findall and return the list of matches '''
+
+ value = to_text(value, errors='surrogate_or_strict', nonstring='simplerepr')
+
+ flags = 0
+ if ignorecase:
+ flags |= re.I
+ if multiline:
+ flags |= re.M
+ return re.findall(regex, value, flags)
+
+
+def regex_search(value, regex, *args, **kwargs):
+ ''' Perform re.search and return the list of matches or a backref '''
+
+ value = to_text(value, errors='surrogate_or_strict', nonstring='simplerepr')
+
+ groups = list()
+ for arg in args:
+ if arg.startswith('\\g'):
+ match = re.match(r'\\g<(\S+)>', arg).group(1)
+ groups.append(match)
+ elif arg.startswith('\\'):
+ match = int(re.match(r'\\(\d+)', arg).group(1))
+ groups.append(match)
+ else:
+ raise AnsibleFilterError('Unknown argument')
+
+ flags = 0
+ if kwargs.get('ignorecase'):
+ flags |= re.I
+ if kwargs.get('multiline'):
+ flags |= re.M
+
+ match = re.search(regex, value, flags)
+ if match:
+ if not groups:
+ return match.group()
+ else:
+ items = list()
+ for item in groups:
+ items.append(match.group(item))
+ return items
+
+
+def ternary(value, true_val, false_val, none_val=None):
+ ''' value ? true_val : false_val '''
+ if value is None and none_val is not None:
+ return none_val
+ elif bool(value):
+ return true_val
+ else:
+ return false_val
+
+
+def regex_escape(string, re_type='python'):
+ string = to_text(string, errors='surrogate_or_strict', nonstring='simplerepr')
+ '''Escape all regular expressions special characters from STRING.'''
+ if re_type == 'python':
+ return re.escape(string)
+ elif re_type == 'posix_basic':
+ # list of BRE special chars:
+ # https://en.wikibooks.org/wiki/Regular_Expressions/POSIX_Basic_Regular_Expressions
+ return regex_replace(string, r'([].[^$*\\])', r'\\\1')
+ # TODO: implement posix_extended
+ # It's similar to, but different from python regex, which is similar to,
+ # but different from PCRE. It's possible that re.escape would work here.
+ # https://remram44.github.io/regex-cheatsheet/regex.html#programs
+ elif re_type == 'posix_extended':
+ raise AnsibleFilterError('Regex type (%s) not yet implemented' % re_type)
+ else:
+ raise AnsibleFilterError('Invalid regex type (%s)' % re_type)
+
+
+def from_yaml(data):
+ if isinstance(data, string_types):
+ return yaml.safe_load(data)
+ return data
+
+
+def from_yaml_all(data):
+ if isinstance(data, string_types):
+ return yaml.safe_load_all(data)
+ return data
+
+
+@environmentfilter
+def rand(environment, end, start=None, step=None, seed=None):
+ if seed is None:
+ r = SystemRandom()
+ else:
+ r = Random(seed)
+ if isinstance(end, integer_types):
+ if not start:
+ start = 0
+ if not step:
+ step = 1
+ return r.randrange(start, end, step)
+ elif hasattr(end, '__iter__'):
+ if start or step:
+ raise AnsibleFilterError('start and step can only be used with integer values')
+ return r.choice(end)
+ else:
+ raise AnsibleFilterError('random can only be used on sequences and integers')
+
+
+def randomize_list(mylist, seed=None):
+ try:
+ mylist = list(mylist)
+ if seed:
+ r = Random(seed)
+ r.shuffle(mylist)
+ else:
+ shuffle(mylist)
+ except Exception:
+ pass
+ return mylist
+
+
+def get_hash(data, hashtype='sha1'):
+
+ try: # see if hash is supported
+ h = hashlib.new(hashtype)
+ except Exception:
+ return None
+
+ h.update(to_bytes(data, errors='surrogate_or_strict'))
+ return h.hexdigest()
+
+
+def get_encrypted_password(password, hashtype='sha512', salt=None, salt_size=None, rounds=None):
+ passlib_mapping = {
+ 'md5': 'md5_crypt',
+ 'blowfish': 'bcrypt',
+ 'sha256': 'sha256_crypt',
+ 'sha512': 'sha512_crypt',
+ }
+
+ hashtype = passlib_mapping.get(hashtype, hashtype)
+ try:
+ return passlib_or_crypt(password, hashtype, salt=salt, salt_size=salt_size, rounds=rounds)
+ except AnsibleError as e:
+ reraise(AnsibleFilterError, AnsibleFilterError(to_native(e), orig_exc=e), sys.exc_info()[2])
+
+
+def to_uuid(string, namespace=UUID_NAMESPACE_ANSIBLE):
+ uuid_namespace = namespace
+ if not isinstance(uuid_namespace, uuid.UUID):
+ try:
+ uuid_namespace = uuid.UUID(namespace)
+ except (AttributeError, ValueError) as e:
+ raise AnsibleFilterError("Invalid value '%s' for 'namespace': %s" % (to_native(namespace), to_native(e)))
+ # uuid.uuid5() requires bytes on Python 2 and bytes or text or Python 3
+ return to_text(uuid.uuid5(uuid_namespace, to_native(string, errors='surrogate_or_strict')))
+
+
+def mandatory(a, msg=None):
+ from jinja2.runtime import Undefined
+
+ ''' Make a variable mandatory '''
+ if isinstance(a, Undefined):
+ if a._undefined_name is not None:
+ name = "'%s' " % to_text(a._undefined_name)
+ else:
+ name = ''
+
+ if msg is not None:
+ raise AnsibleFilterError(to_native(msg))
+ else:
+ raise AnsibleFilterError("Mandatory variable %s not defined." % name)
+
+ return a
+
+
+def combine(*terms, **kwargs):
+ recursive = kwargs.pop('recursive', False)
+ list_merge = kwargs.pop('list_merge', 'replace')
+ if kwargs:
+ raise AnsibleFilterError("'recursive' and 'list_merge' are the only valid keyword arguments")
+
+ # allow the user to do `[dict1, dict2, ...] | combine`
+ dictionaries = flatten(terms, levels=1)
+
+ # recursively check that every elements are defined (for jinja2)
+ recursive_check_defined(dictionaries)
+
+ if not dictionaries:
+ return {}
+
+ if len(dictionaries) == 1:
+ return dictionaries[0]
+
+ # merge all the dicts so that the dict at the end of the array have precedence
+ # over the dict at the beginning.
+ # we merge the dicts from the highest to the lowest priority because there is
+ # a huge probability that the lowest priority dict will be the biggest in size
+ # (as the low prio dict will hold the "default" values and the others will be "patches")
+ # and merge_hash create a copy of it's first argument.
+ # so high/right -> low/left is more efficient than low/left -> high/right
+ high_to_low_prio_dict_iterator = reversed(dictionaries)
+ result = next(high_to_low_prio_dict_iterator)
+ for dictionary in high_to_low_prio_dict_iterator:
+ result = merge_hash(dictionary, result, recursive, list_merge)
+
+ return result
+
+
+def comment(text, style='plain', **kw):
+ # Predefined comment types
+ comment_styles = {
+ 'plain': {
+ 'decoration': '# '
+ },
+ 'erlang': {
+ 'decoration': '% '
+ },
+ 'c': {
+ 'decoration': '// '
+ },
+ 'cblock': {
+ 'beginning': '/*',
+ 'decoration': ' * ',
+ 'end': ' */'
+ },
+ 'xml': {
+ 'beginning': '<!--',
+ 'decoration': ' - ',
+ 'end': '-->'
+ }
+ }
+
+ # Pointer to the right comment type
+ style_params = comment_styles[style]
+
+ if 'decoration' in kw:
+ prepostfix = kw['decoration']
+ else:
+ prepostfix = style_params['decoration']
+
+ # Default params
+ p = {
+ 'newline': '\n',
+ 'beginning': '',
+ 'prefix': (prepostfix).rstrip(),
+ 'prefix_count': 1,
+ 'decoration': '',
+ 'postfix': (prepostfix).rstrip(),
+ 'postfix_count': 1,
+ 'end': ''
+ }
+
+ # Update default params
+ p.update(style_params)
+ p.update(kw)
+
+ # Compose substrings for the final string
+ str_beginning = ''
+ if p['beginning']:
+ str_beginning = "%s%s" % (p['beginning'], p['newline'])
+ str_prefix = ''
+ if p['prefix']:
+ if p['prefix'] != p['newline']:
+ str_prefix = str(
+ "%s%s" % (p['prefix'], p['newline'])) * int(p['prefix_count'])
+ else:
+ str_prefix = str(
+ "%s" % (p['newline'])) * int(p['prefix_count'])
+ str_text = ("%s%s" % (
+ p['decoration'],
+ # Prepend each line of the text with the decorator
+ text.replace(
+ p['newline'], "%s%s" % (p['newline'], p['decoration'])))).replace(
+ # Remove trailing spaces when only decorator is on the line
+ "%s%s" % (p['decoration'], p['newline']),
+ "%s%s" % (p['decoration'].rstrip(), p['newline']))
+ str_postfix = p['newline'].join(
+ [''] + [p['postfix'] for x in range(p['postfix_count'])])
+ str_end = ''
+ if p['end']:
+ str_end = "%s%s" % (p['newline'], p['end'])
+
+ # Return the final string
+ return "%s%s%s%s%s" % (
+ str_beginning,
+ str_prefix,
+ str_text,
+ str_postfix,
+ str_end)
+
+
+@environmentfilter
+def extract(environment, item, container, morekeys=None):
+ if morekeys is None:
+ keys = [item]
+ elif isinstance(morekeys, list):
+ keys = [item] + morekeys
+ else:
+ keys = [item, morekeys]
+
+ value = container
+ for key in keys:
+ value = environment.getitem(value, key)
+
+ return value
+
+
+@environmentfilter
+def do_groupby(environment, value, attribute):
+ """Overridden groupby filter for jinja2, to address an issue with
+ jinja2>=2.9.0,<2.9.5 where a namedtuple was returned which
+ has repr that prevents ansible.template.safe_eval.safe_eval from being
+ able to parse and eval the data.
+
+ jinja2<2.9.0,>=2.9.5 is not affected, as <2.9.0 uses a tuple, and
+ >=2.9.5 uses a standard tuple repr on the namedtuple.
+
+ The adaptation here, is to run the jinja2 `do_groupby` function, and
+ cast all of the namedtuples to a regular tuple.
+
+ See https://github.com/ansible/ansible/issues/20098
+
+ We may be able to remove this in the future.
+ """
+ return [tuple(t) for t in _do_groupby(environment, value, attribute)]
+
+
+def b64encode(string, encoding='utf-8'):
+ return to_text(base64.b64encode(to_bytes(string, encoding=encoding, errors='surrogate_or_strict')))
+
+
+def b64decode(string, encoding='utf-8'):
+ return to_text(base64.b64decode(to_bytes(string, errors='surrogate_or_strict')), encoding=encoding)
+
+
+def flatten(mylist, levels=None):
+
+ ret = []
+ for element in mylist:
+ if element in (None, 'None', 'null'):
+ # ignore undefined items
+ break
+ elif is_sequence(element):
+ if levels is None:
+ ret.extend(flatten(element))
+ elif levels >= 1:
+ # decrement as we go down the stack
+ ret.extend(flatten(element, levels=(int(levels) - 1)))
+ else:
+ ret.append(element)
+ else:
+ ret.append(element)
+
+ return ret
+
+
+def subelements(obj, subelements, skip_missing=False):
+ '''Accepts a dict or list of dicts, and a dotted accessor and produces a product
+ of the element and the results of the dotted accessor
+
+ >>> obj = [{"name": "alice", "groups": ["wheel"], "authorized": ["/tmp/alice/onekey.pub"]}]
+ >>> subelements(obj, 'groups')
+ [({'name': 'alice', 'groups': ['wheel'], 'authorized': ['/tmp/alice/onekey.pub']}, 'wheel')]
+
+ '''
+ if isinstance(obj, dict):
+ element_list = list(obj.values())
+ elif isinstance(obj, list):
+ element_list = obj[:]
+ else:
+ raise AnsibleFilterError('obj must be a list of dicts or a nested dict')
+
+ if isinstance(subelements, list):
+ subelement_list = subelements[:]
+ elif isinstance(subelements, string_types):
+ subelement_list = subelements.split('.')
+ else:
+ raise AnsibleFilterTypeError('subelements must be a list or a string')
+
+ results = []
+
+ for element in element_list:
+ values = element
+ for subelement in subelement_list:
+ try:
+ values = values[subelement]
+ except KeyError:
+ if skip_missing:
+ values = []
+ break
+ raise AnsibleFilterError("could not find %r key in iterated item %r" % (subelement, values))
+ except TypeError:
+ raise AnsibleFilterTypeError("the key %s should point to a dictionary, got '%s'" % (subelement, values))
+ if not isinstance(values, list):
+ raise AnsibleFilterTypeError("the key %r should point to a list, got %r" % (subelement, values))
+
+ for value in values:
+ results.append((element, value))
+
+ return results
+
+
+def dict_to_list_of_dict_key_value_elements(mydict, key_name='key', value_name='value'):
+ ''' takes a dictionary and transforms it into a list of dictionaries,
+ with each having a 'key' and 'value' keys that correspond to the keys and values of the original '''
+
+ if not isinstance(mydict, Mapping):
+ raise AnsibleFilterTypeError("dict2items requires a dictionary, got %s instead." % type(mydict))
+
+ ret = []
+ for key in mydict:
+ ret.append({key_name: key, value_name: mydict[key]})
+ return ret
+
+
+def list_of_dict_key_value_elements_to_dict(mylist, key_name='key', value_name='value'):
+ ''' takes a list of dicts with each having a 'key' and 'value' keys, and transforms the list into a dictionary,
+ effectively as the reverse of dict2items '''
+
+ if not is_sequence(mylist):
+ raise AnsibleFilterTypeError("items2dict requires a list, got %s instead." % type(mylist))
+
+ return dict((item[key_name], item[value_name]) for item in mylist)
+
+
+def path_join(paths):
+ ''' takes a sequence or a string, and return a concatenation
+ of the different members '''
+ if isinstance(paths, string_types):
+ return os.path.join(paths)
+ elif is_sequence(paths):
+ return os.path.join(*paths)
+ else:
+ raise AnsibleFilterTypeError("|path_join expects string or sequence, got %s instead." % type(paths))
+
+
+class FilterModule(object):
+ ''' Ansible core jinja2 filters '''
+
+ def filters(self):
+ return {
+ # jinja2 overrides
+ 'groupby': do_groupby,
+
+ # base 64
+ 'b64decode': b64decode,
+ 'b64encode': b64encode,
+
+ # uuid
+ 'to_uuid': to_uuid,
+
+ # json
+ 'to_json': to_json,
+ 'to_nice_json': to_nice_json,
+ 'from_json': json.loads,
+
+ # yaml
+ 'to_yaml': to_yaml,
+ 'to_nice_yaml': to_nice_yaml,
+ 'from_yaml': from_yaml,
+ 'from_yaml_all': from_yaml_all,
+
+ # path
+ 'basename': partial(unicode_wrap, os.path.basename),
+ 'dirname': partial(unicode_wrap, os.path.dirname),
+ 'expanduser': partial(unicode_wrap, os.path.expanduser),
+ 'expandvars': partial(unicode_wrap, os.path.expandvars),
+ 'path_join': path_join,
+ 'realpath': partial(unicode_wrap, os.path.realpath),
+ 'relpath': partial(unicode_wrap, os.path.relpath),
+ 'splitext': partial(unicode_wrap, os.path.splitext),
+ 'win_basename': partial(unicode_wrap, ntpath.basename),
+ 'win_dirname': partial(unicode_wrap, ntpath.dirname),
+ 'win_splitdrive': partial(unicode_wrap, ntpath.splitdrive),
+
+ # file glob
+ 'fileglob': fileglob,
+
+ # types
+ 'bool': to_bool,
+ 'to_datetime': to_datetime,
+
+ # date formatting
+ 'strftime': strftime,
+
+ # quote string for shell usage
+ 'quote': quote,
+
+ # hash filters
+ # md5 hex digest of string
+ 'md5': md5s,
+ # sha1 hex digest of string
+ 'sha1': checksum_s,
+ # checksum of string as used by ansible for checksumming files
+ 'checksum': checksum_s,
+ # generic hashing
+ 'password_hash': get_encrypted_password,
+ 'hash': get_hash,
+
+ # regex
+ 'regex_replace': regex_replace,
+ 'regex_escape': regex_escape,
+ 'regex_search': regex_search,
+ 'regex_findall': regex_findall,
+
+ # ? : ;
+ 'ternary': ternary,
+
+ # random stuff
+ 'random': rand,
+ 'shuffle': randomize_list,
+
+ # undefined
+ 'mandatory': mandatory,
+
+ # comment-style decoration
+ 'comment': comment,
+
+ # debug
+ 'type_debug': lambda o: o.__class__.__name__,
+
+ # Data structures
+ 'combine': combine,
+ 'extract': extract,
+ 'flatten': flatten,
+ 'dict2items': dict_to_list_of_dict_key_value_elements,
+ 'items2dict': list_of_dict_key_value_elements_to_dict,
+ 'subelements': subelements,
+ }
diff --git a/lib/ansible/plugins/filter/mathstuff.py b/lib/ansible/plugins/filter/mathstuff.py
new file mode 100644
index 00000000..64d0ba8b
--- /dev/null
+++ b/lib/ansible/plugins/filter/mathstuff.py
@@ -0,0 +1,267 @@
+# Copyright 2014, Brian Coca <bcoca@ansible.com>
+# Copyright 2017, Ken Celenza <ken@networktocode.com>
+# Copyright 2017, Jason Edelman <jason@networktocode.com>
+# Copyright 2017, Ansible Project
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import itertools
+import math
+
+from jinja2.filters import environmentfilter
+
+from ansible.errors import AnsibleFilterError, AnsibleFilterTypeError
+from ansible.module_utils.common.text import formatters
+from ansible.module_utils.six import binary_type, text_type
+from ansible.module_utils.six.moves import zip, zip_longest
+from ansible.module_utils.common._collections_compat import Hashable, Mapping, Iterable
+from ansible.module_utils._text import to_native, to_text
+from ansible.utils.display import Display
+
+try:
+ from jinja2.filters import do_unique
+ HAS_UNIQUE = True
+except ImportError:
+ HAS_UNIQUE = False
+
+display = Display()
+
+
+@environmentfilter
+def unique(environment, a, case_sensitive=False, attribute=None):
+
+ def _do_fail(e):
+ if case_sensitive or attribute:
+ raise AnsibleFilterError("Jinja2's unique filter failed and we cannot fall back to Ansible's version "
+ "as it does not support the parameters supplied", orig_exc=e)
+
+ error = e = None
+ try:
+ if HAS_UNIQUE:
+ c = do_unique(environment, a, case_sensitive=case_sensitive, attribute=attribute)
+ if isinstance(a, Hashable):
+ c = set(c)
+ else:
+ c = list(c)
+ except TypeError as e:
+ error = e
+ _do_fail(e)
+ except Exception as e:
+ error = e
+ _do_fail(e)
+ display.warning('Falling back to Ansible unique filter as Jinja2 one failed: %s' % to_text(e))
+
+ if not HAS_UNIQUE or error:
+
+ # handle Jinja2 specific attributes when using Ansible's version
+ if case_sensitive or attribute:
+ raise AnsibleFilterError("Ansible's unique filter does not support case_sensitive nor attribute parameters, "
+ "you need a newer version of Jinja2 that provides their version of the filter.")
+
+ if isinstance(a, Hashable):
+ c = set(a)
+ else:
+ c = []
+ for x in a:
+ if x not in c:
+ c.append(x)
+ return c
+
+
+@environmentfilter
+def intersect(environment, a, b):
+ if isinstance(a, Hashable) and isinstance(b, Hashable):
+ c = set(a) & set(b)
+ else:
+ c = unique(environment, [x for x in a if x in b])
+ return c
+
+
+@environmentfilter
+def difference(environment, a, b):
+ if isinstance(a, Hashable) and isinstance(b, Hashable):
+ c = set(a) - set(b)
+ else:
+ c = unique(environment, [x for x in a if x not in b])
+ return c
+
+
+@environmentfilter
+def symmetric_difference(environment, a, b):
+ if isinstance(a, Hashable) and isinstance(b, Hashable):
+ c = set(a) ^ set(b)
+ else:
+ isect = intersect(environment, a, b)
+ c = [x for x in union(environment, a, b) if x not in isect]
+ return c
+
+
+@environmentfilter
+def union(environment, a, b):
+ if isinstance(a, Hashable) and isinstance(b, Hashable):
+ c = set(a) | set(b)
+ else:
+ c = unique(environment, a + b)
+ return c
+
+
+def min(a):
+ _min = __builtins__.get('min')
+ return _min(a)
+
+
+def max(a):
+ _max = __builtins__.get('max')
+ return _max(a)
+
+
+def logarithm(x, base=math.e):
+ try:
+ if base == 10:
+ return math.log10(x)
+ else:
+ return math.log(x, base)
+ except TypeError as e:
+ raise AnsibleFilterTypeError('log() can only be used on numbers: %s' % to_native(e))
+
+
+def power(x, y):
+ try:
+ return math.pow(x, y)
+ except TypeError as e:
+ raise AnsibleFilterTypeError('pow() can only be used on numbers: %s' % to_native(e))
+
+
+def inversepower(x, base=2):
+ try:
+ if base == 2:
+ return math.sqrt(x)
+ else:
+ return math.pow(x, 1.0 / float(base))
+ except (ValueError, TypeError) as e:
+ raise AnsibleFilterTypeError('root() can only be used on numbers: %s' % to_native(e))
+
+
+def human_readable(size, isbits=False, unit=None):
+ ''' Return a human readable string '''
+ try:
+ return formatters.bytes_to_human(size, isbits, unit)
+ except TypeError as e:
+ raise AnsibleFilterTypeError("human_readable() failed on bad input: %s" % to_native(e))
+ except Exception:
+ raise AnsibleFilterError("human_readable() can't interpret following string: %s" % size)
+
+
+def human_to_bytes(size, default_unit=None, isbits=False):
+ ''' Return bytes count from a human readable string '''
+ try:
+ return formatters.human_to_bytes(size, default_unit, isbits)
+ except TypeError as e:
+ raise AnsibleFilterTypeError("human_to_bytes() failed on bad input: %s" % to_native(e))
+ except Exception:
+ raise AnsibleFilterError("human_to_bytes() can't interpret following string: %s" % size)
+
+
+def rekey_on_member(data, key, duplicates='error'):
+ """
+ Rekey a dict of dicts on another member
+
+ May also create a dict from a list of dicts.
+
+ duplicates can be one of ``error`` or ``overwrite`` to specify whether to error out if the key
+ value would be duplicated or to overwrite previous entries if that's the case.
+ """
+ if duplicates not in ('error', 'overwrite'):
+ raise AnsibleFilterError("duplicates parameter to rekey_on_member has unknown value: {0}".format(duplicates))
+
+ new_obj = {}
+
+ if isinstance(data, Mapping):
+ iterate_over = data.values()
+ elif isinstance(data, Iterable) and not isinstance(data, (text_type, binary_type)):
+ iterate_over = data
+ else:
+ raise AnsibleFilterTypeError("Type is not a valid list, set, or dict")
+
+ for item in iterate_over:
+ if not isinstance(item, Mapping):
+ raise AnsibleFilterTypeError("List item is not a valid dict")
+
+ try:
+ key_elem = item[key]
+ except KeyError:
+ raise AnsibleFilterError("Key {0} was not found".format(key))
+ except TypeError as e:
+ raise AnsibleFilterTypeError(to_native(e))
+ except Exception as e:
+ raise AnsibleFilterError(to_native(e))
+
+ # Note: if new_obj[key_elem] exists it will always be a non-empty dict (it will at
+ # minimum contain {key: key_elem}
+ if new_obj.get(key_elem, None):
+ if duplicates == 'error':
+ raise AnsibleFilterError("Key {0} is not unique, cannot correctly turn into dict".format(key_elem))
+ elif duplicates == 'overwrite':
+ new_obj[key_elem] = item
+ else:
+ new_obj[key_elem] = item
+
+ return new_obj
+
+
+class FilterModule(object):
+ ''' Ansible math jinja2 filters '''
+
+ def filters(self):
+ filters = {
+ # general math
+ 'min': min,
+ 'max': max,
+
+ # exponents and logarithms
+ 'log': logarithm,
+ 'pow': power,
+ 'root': inversepower,
+
+ # set theory
+ 'unique': unique,
+ 'intersect': intersect,
+ 'difference': difference,
+ 'symmetric_difference': symmetric_difference,
+ 'union': union,
+
+ # combinatorial
+ 'product': itertools.product,
+ 'permutations': itertools.permutations,
+ 'combinations': itertools.combinations,
+
+ # computer theory
+ 'human_readable': human_readable,
+ 'human_to_bytes': human_to_bytes,
+ 'rekey_on_member': rekey_on_member,
+
+ # zip
+ 'zip': zip,
+ 'zip_longest': zip_longest,
+
+ }
+
+ return filters
diff --git a/lib/ansible/plugins/filter/urls.py b/lib/ansible/plugins/filter/urls.py
new file mode 100644
index 00000000..cdd0e42d
--- /dev/null
+++ b/lib/ansible/plugins/filter/urls.py
@@ -0,0 +1,69 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.six import PY3, iteritems, string_types
+from ansible.module_utils.six.moves.urllib.parse import quote, quote_plus, unquote_plus
+from ansible.module_utils._text import to_bytes, to_text
+
+try:
+ from jinja2.filters import do_urlencode
+ HAS_URLENCODE = True
+except ImportError:
+ HAS_URLENCODE = False
+
+
+def unicode_urldecode(string):
+ if PY3:
+ return unquote_plus(string)
+ return to_text(unquote_plus(to_bytes(string)))
+
+
+def do_urldecode(string):
+ return unicode_urldecode(string)
+
+
+# NOTE: We implement urlencode when Jinja2 is older than v2.7
+def unicode_urlencode(string, for_qs=False):
+ safe = b'' if for_qs else b'/'
+ if for_qs:
+ quote_func = quote_plus
+ else:
+ quote_func = quote
+ if PY3:
+ return quote_func(string, safe)
+ return to_text(quote_func(to_bytes(string), safe))
+
+
+def do_urlencode(value):
+ itemiter = None
+ if isinstance(value, dict):
+ itemiter = iteritems(value)
+ elif not isinstance(value, string_types):
+ try:
+ itemiter = iter(value)
+ except TypeError:
+ pass
+ if itemiter is None:
+ return unicode_urlencode(value)
+ return u'&'.join(unicode_urlencode(k) + '=' +
+ unicode_urlencode(v, for_qs=True)
+ for k, v in itemiter)
+
+
+class FilterModule(object):
+ ''' Ansible core jinja2 filters '''
+
+ def filters(self):
+ filters = {
+ 'urldecode': do_urldecode,
+ }
+
+ if not HAS_URLENCODE:
+ filters['urlencode'] = do_urlencode
+
+ return filters
diff --git a/lib/ansible/plugins/filter/urlsplit.py b/lib/ansible/plugins/filter/urlsplit.py
new file mode 100644
index 00000000..84e460aa
--- /dev/null
+++ b/lib/ansible/plugins/filter/urlsplit.py
@@ -0,0 +1,35 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible.errors import AnsibleFilterError
+from ansible.module_utils.six.moves.urllib.parse import urlsplit
+from ansible.utils import helpers
+
+
+def split_url(value, query='', alias='urlsplit'):
+
+ results = helpers.object_to_dict(urlsplit(value), exclude=['count', 'index', 'geturl', 'encode'])
+
+ # If a query is supplied, make sure it's valid then return the results.
+ # If no option is supplied, return the entire dictionary.
+ if query:
+ if query not in results:
+ raise AnsibleFilterError(alias + ': unknown URL component: %s' % query)
+ return results[query]
+ else:
+ return results
+
+
+# ---- Ansible filters ----
+class FilterModule(object):
+ ''' URI filter '''
+
+ def filters(self):
+ return {
+ 'urlsplit': split_url
+ }
diff --git a/lib/ansible/plugins/httpapi/__init__.py b/lib/ansible/plugins/httpapi/__init__.py
new file mode 100644
index 00000000..0773921f
--- /dev/null
+++ b/lib/ansible/plugins/httpapi/__init__.py
@@ -0,0 +1,87 @@
+# (c) 2018 Red Hat Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from abc import abstractmethod
+
+from ansible.plugins import AnsiblePlugin
+
+
+class HttpApiBase(AnsiblePlugin):
+ def __init__(self, connection):
+ super(HttpApiBase, self).__init__()
+
+ self.connection = connection
+ self._become = False
+ self._become_pass = ''
+
+ def set_become(self, become_context):
+ self._become = become_context.become
+ self._become_pass = getattr(become_context, 'become_pass') or ''
+
+ def login(self, username, password):
+ """Call a defined login endpoint to receive an authentication token.
+
+ This should only be implemented if the API has a single endpoint which
+ can turn HTTP basic auth into a token which can be reused for the rest
+ of the calls for the session.
+ """
+ pass
+
+ def logout(self):
+ """ Call to implement session logout.
+
+ Method to clear session gracefully e.g. tokens granted in login
+ need to be revoked.
+ """
+ pass
+
+ def update_auth(self, response, response_text):
+ """Return per-request auth token.
+
+ The response should be a dictionary that can be plugged into the
+ headers of a request. The default implementation uses cookie data.
+ If no authentication data is found, return None
+ """
+ cookie = response.info().get('Set-Cookie')
+ if cookie:
+ return {'Cookie': cookie}
+
+ return None
+
+ def handle_httperror(self, exc):
+ """Overridable method for dealing with HTTP codes.
+
+ This method will attempt to handle known cases of HTTP status codes.
+ If your API uses status codes to convey information in a regular way,
+ you can override this method to handle it appropriately.
+
+ :returns:
+ * True if the code has been handled in a way that the request
+ may be resent without changes.
+ * False if the error cannot be handled or recovered from by the
+ plugin. This will result in the HTTPError being raised as an
+ exception for the caller to deal with as appropriate (most likely
+ by failing).
+ * Any other value returned is taken as a valid response from the
+ server without making another request. In many cases, this can just
+ be the original exception.
+ """
+ if exc.code == 401:
+ if self.connection._auth:
+ # Stored auth appears to be invalid, clear and retry
+ self.connection._auth = None
+ self.login(self.connection.get_option('remote_user'), self.connection.get_option('password'))
+ return True
+ else:
+ # Unauthorized and there's no token. Return an error
+ return False
+
+ return exc
+
+ @abstractmethod
+ def send_request(self, data, **message_kwargs):
+ """Prepares and sends request(s) to device."""
+ pass
diff --git a/lib/ansible/plugins/inventory/__init__.py b/lib/ansible/plugins/inventory/__init__.py
new file mode 100644
index 00000000..d34c1e0a
--- /dev/null
+++ b/lib/ansible/plugins/inventory/__init__.py
@@ -0,0 +1,449 @@
+# (c) 2017, Red Hat, inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import hashlib
+import os
+import string
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.inventory.group import to_safe_group_name as original_safe
+from ansible.parsing.utils.addresses import parse_address
+from ansible.plugins import AnsiblePlugin
+from ansible.plugins.cache import CachePluginAdjudicator as CacheObject
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.module_utils.common._collections_compat import Mapping
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils.six import string_types
+from ansible.template import Templar
+from ansible.utils.display import Display
+from ansible.utils.vars import combine_vars
+
+display = Display()
+
+
+# Helper methods
+def to_safe_group_name(name):
+ # placeholder for backwards compat
+ return original_safe(name, force=True, silent=True)
+
+
+def detect_range(line=None):
+ '''
+ A helper function that checks a given host line to see if it contains
+ a range pattern described in the docstring above.
+
+ Returns True if the given line contains a pattern, else False.
+ '''
+ return '[' in line
+
+
+def expand_hostname_range(line=None):
+ '''
+ A helper function that expands a given line that contains a pattern
+ specified in top docstring, and returns a list that consists of the
+ expanded version.
+
+ The '[' and ']' characters are used to maintain the pseudo-code
+ appearance. They are replaced in this function with '|' to ease
+ string splitting.
+
+ References: https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html#hosts-and-groups
+ '''
+ all_hosts = []
+ if line:
+ # A hostname such as db[1:6]-node is considered to consists
+ # three parts:
+ # head: 'db'
+ # nrange: [1:6]; range() is a built-in. Can't use the name
+ # tail: '-node'
+
+ # Add support for multiple ranges in a host so:
+ # db[01:10:3]node-[01:10]
+ # - to do this we split off at the first [...] set, getting the list
+ # of hosts and then repeat until none left.
+ # - also add an optional third parameter which contains the step. (Default: 1)
+ # so range can be [01:10:2] -> 01 03 05 07 09
+
+ (head, nrange, tail) = line.replace('[', '|', 1).replace(']', '|', 1).split('|')
+ bounds = nrange.split(":")
+ if len(bounds) != 2 and len(bounds) != 3:
+ raise AnsibleError("host range must be begin:end or begin:end:step")
+ beg = bounds[0]
+ end = bounds[1]
+ if len(bounds) == 2:
+ step = 1
+ else:
+ step = bounds[2]
+ if not beg:
+ beg = "0"
+ if not end:
+ raise AnsibleError("host range must specify end value")
+ if beg[0] == '0' and len(beg) > 1:
+ rlen = len(beg) # range length formatting hint
+ if rlen != len(end):
+ raise AnsibleError("host range must specify equal-length begin and end formats")
+
+ def fill(x):
+ return str(x).zfill(rlen) # range sequence
+
+ else:
+ fill = str
+
+ try:
+ i_beg = string.ascii_letters.index(beg)
+ i_end = string.ascii_letters.index(end)
+ if i_beg > i_end:
+ raise AnsibleError("host range must have begin <= end")
+ seq = list(string.ascii_letters[i_beg:i_end + 1:int(step)])
+ except ValueError: # not an alpha range
+ seq = range(int(beg), int(end) + 1, int(step))
+
+ for rseq in seq:
+ hname = ''.join((head, fill(rseq), tail))
+
+ if detect_range(hname):
+ all_hosts.extend(expand_hostname_range(hname))
+ else:
+ all_hosts.append(hname)
+
+ return all_hosts
+
+
+def get_cache_plugin(plugin_name, **kwargs):
+ try:
+ cache = CacheObject(plugin_name, **kwargs)
+ except AnsibleError as e:
+ if 'fact_caching_connection' in to_native(e):
+ raise AnsibleError("error, '%s' inventory cache plugin requires the one of the following to be set "
+ "to a writeable directory path:\nansible.cfg:\n[default]: fact_caching_connection,\n"
+ "[inventory]: cache_connection;\nEnvironment:\nANSIBLE_INVENTORY_CACHE_CONNECTION,\n"
+ "ANSIBLE_CACHE_PLUGIN_CONNECTION." % plugin_name)
+ else:
+ raise e
+
+ if plugin_name != 'memory' and kwargs and not getattr(cache._plugin, '_options', None):
+ raise AnsibleError('Unable to use cache plugin {0} for inventory. Cache options were provided but may not reconcile '
+ 'correctly unless set via set_options. Refer to the porting guide if the plugin derives user settings '
+ 'from ansible.constants.'.format(plugin_name))
+ return cache
+
+
+class BaseInventoryPlugin(AnsiblePlugin):
+ """ Parses an Inventory Source"""
+
+ TYPE = 'generator'
+ _sanitize_group_name = staticmethod(to_safe_group_name)
+
+ def __init__(self):
+
+ super(BaseInventoryPlugin, self).__init__()
+
+ self._options = {}
+ self.inventory = None
+ self.display = display
+
+ def parse(self, inventory, loader, path, cache=True):
+ ''' Populates inventory from the given data. Raises an error on any parse failure
+ :arg inventory: a copy of the previously accumulated inventory data,
+ to be updated with any new data this plugin provides.
+ The inventory can be empty if no other source/plugin ran successfully.
+ :arg loader: a reference to the DataLoader, which can read in YAML and JSON files,
+ it also has Vault support to automatically decrypt files.
+ :arg path: the string that represents the 'inventory source',
+ normally a path to a configuration file for this inventory,
+ but it can also be a raw string for this plugin to consume
+ :arg cache: a boolean that indicates if the plugin should use the cache or not
+ you can ignore if this plugin does not implement caching.
+ '''
+
+ self.loader = loader
+ self.inventory = inventory
+ self.templar = Templar(loader=loader)
+
+ def verify_file(self, path):
+ ''' Verify if file is usable by this plugin, base does minimal accessibility check
+ :arg path: a string that was passed as an inventory source,
+ it normally is a path to a config file, but this is not a requirement,
+ it can also be parsed itself as the inventory data to process.
+ So only call this base class if you expect it to be a file.
+ '''
+
+ valid = False
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ if (os.path.exists(b_path) and os.access(b_path, os.R_OK)):
+ valid = True
+ else:
+ self.display.vvv('Skipping due to inventory source not existing or not being readable by the current user')
+ return valid
+
+ def _populate_host_vars(self, hosts, variables, group=None, port=None):
+ if not isinstance(variables, Mapping):
+ raise AnsibleParserError("Invalid data from file, expected dictionary and got:\n\n%s" % to_native(variables))
+
+ for host in hosts:
+ self.inventory.add_host(host, group=group, port=port)
+ for k in variables:
+ self.inventory.set_variable(host, k, variables[k])
+
+ def _read_config_data(self, path):
+ ''' validate config and set options as appropriate
+ :arg path: path to common yaml format config file for this plugin
+ '''
+
+ config = {}
+ try:
+ # avoid loader cache so meta: refresh_inventory can pick up config changes
+ # if we read more than once, fs cache should be good enough
+ config = self.loader.load_from_file(path, cache=False)
+ except Exception as e:
+ raise AnsibleParserError(to_native(e))
+
+ # a plugin can be loaded via many different names with redirection- if so, we want to accept any of those names
+ valid_names = getattr(self, '_redirected_names') or [self.NAME]
+
+ if not config:
+ # no data
+ raise AnsibleParserError("%s is empty" % (to_native(path)))
+ elif config.get('plugin') not in valid_names:
+ # this is not my config file
+ raise AnsibleParserError("Incorrect plugin name in file: %s" % config.get('plugin', 'none found'))
+ elif not isinstance(config, Mapping):
+ # configs are dictionaries
+ raise AnsibleParserError('inventory source has invalid structure, it should be a dictionary, got: %s' % type(config))
+
+ self.set_options(direct=config)
+ if 'cache' in self._options and self.get_option('cache'):
+ cache_option_keys = [('_uri', 'cache_connection'), ('_timeout', 'cache_timeout'), ('_prefix', 'cache_prefix')]
+ cache_options = dict((opt[0], self.get_option(opt[1])) for opt in cache_option_keys if self.get_option(opt[1]))
+ self._cache = get_cache_plugin(self.get_option('cache_plugin'), **cache_options)
+
+ return config
+
+ def _consume_options(self, data):
+ ''' update existing options from alternate configuration sources not normally used by Ansible.
+ Many API libraries already have existing configuration sources, this allows plugin author to leverage them.
+ :arg data: key/value pairs that correspond to configuration options for this plugin
+ '''
+
+ for k in self._options:
+ if k in data:
+ self._options[k] = data.pop(k)
+
+ def _expand_hostpattern(self, hostpattern):
+ '''
+ Takes a single host pattern and returns a list of hostnames and an
+ optional port number that applies to all of them.
+ '''
+ # Can the given hostpattern be parsed as a host with an optional port
+ # specification?
+
+ try:
+ (pattern, port) = parse_address(hostpattern, allow_ranges=True)
+ except Exception:
+ # not a recognizable host pattern
+ pattern = hostpattern
+ port = None
+
+ # Once we have separated the pattern, we expand it into list of one or
+ # more hostnames, depending on whether it contains any [x:y] ranges.
+
+ if detect_range(pattern):
+ hostnames = expand_hostname_range(pattern)
+ else:
+ hostnames = [pattern]
+
+ return (hostnames, port)
+
+
+class BaseFileInventoryPlugin(BaseInventoryPlugin):
+ """ Parses a File based Inventory Source"""
+
+ TYPE = 'storage'
+
+ def __init__(self):
+
+ super(BaseFileInventoryPlugin, self).__init__()
+
+
+class DeprecatedCache(object):
+ def __init__(self, real_cacheable):
+ self.real_cacheable = real_cacheable
+
+ def get(self, key):
+ display.deprecated('InventoryModule should utilize self._cache as a dict instead of self.cache. '
+ 'When expecting a KeyError, use self._cache[key] instead of using self.cache.get(key). '
+ 'self._cache is a dictionary and will return a default value instead of raising a KeyError '
+ 'when the key does not exist', version='2.12', collection_name='ansible.builtin')
+ return self.real_cacheable._cache[key]
+
+ def set(self, key, value):
+ display.deprecated('InventoryModule should utilize self._cache as a dict instead of self.cache. '
+ 'To set the self._cache dictionary, use self._cache[key] = value instead of self.cache.set(key, value). '
+ 'To force update the underlying cache plugin with the contents of self._cache before parse() is complete, '
+ 'call self.set_cache_plugin and it will use the self._cache dictionary to update the cache plugin',
+ version='2.12', collection_name='ansible.builtin')
+ self.real_cacheable._cache[key] = value
+ self.real_cacheable.set_cache_plugin()
+
+ def __getattr__(self, name):
+ display.deprecated('InventoryModule should utilize self._cache instead of self.cache',
+ version='2.12', collection_name='ansible.builtin')
+ return self.real_cacheable._cache.__getattribute__(name)
+
+
+class Cacheable(object):
+
+ _cache = CacheObject()
+
+ @property
+ def cache(self):
+ return DeprecatedCache(self)
+
+ def load_cache_plugin(self):
+ plugin_name = self.get_option('cache_plugin')
+ cache_option_keys = [('_uri', 'cache_connection'), ('_timeout', 'cache_timeout'), ('_prefix', 'cache_prefix')]
+ cache_options = dict((opt[0], self.get_option(opt[1])) for opt in cache_option_keys if self.get_option(opt[1]))
+ self._cache = get_cache_plugin(plugin_name, **cache_options)
+
+ def get_cache_key(self, path):
+ return "{0}_{1}".format(self.NAME, self._get_cache_prefix(path))
+
+ def _get_cache_prefix(self, path):
+ ''' create predictable unique prefix for plugin/inventory '''
+
+ m = hashlib.sha1()
+ m.update(to_bytes(self.NAME, errors='surrogate_or_strict'))
+ d1 = m.hexdigest()
+
+ n = hashlib.sha1()
+ n.update(to_bytes(path, errors='surrogate_or_strict'))
+ d2 = n.hexdigest()
+
+ return 's_'.join([d1[:5], d2[:5]])
+
+ def clear_cache(self):
+ self._cache.flush()
+
+ def update_cache_if_changed(self):
+ self._cache.update_cache_if_changed()
+
+ def set_cache_plugin(self):
+ self._cache.set_cache()
+
+
+class Constructable(object):
+
+ def _compose(self, template, variables):
+ ''' helper method for plugins to compose variables for Ansible based on jinja2 expression and inventory vars'''
+ t = self.templar
+ t.available_variables = variables
+ return t.template('%s%s%s' % (t.environment.variable_start_string, template, t.environment.variable_end_string), disable_lookups=True)
+
+ def _set_composite_vars(self, compose, variables, host, strict=False):
+ ''' loops over compose entries to create vars for hosts '''
+ if compose and isinstance(compose, dict):
+ for varname in compose:
+ try:
+ composite = self._compose(compose[varname], variables)
+ except Exception as e:
+ if strict:
+ raise AnsibleError("Could not set %s for host %s: %s" % (varname, host, to_native(e)))
+ continue
+ self.inventory.set_variable(host, varname, composite)
+
+ def _add_host_to_composed_groups(self, groups, variables, host, strict=False):
+ ''' helper to create complex groups for plugins based on jinja2 conditionals, hosts that meet the conditional are added to group'''
+ # process each 'group entry'
+ if groups and isinstance(groups, dict):
+ variables = combine_vars(variables, self.inventory.get_host(host).get_vars())
+ self.templar.available_variables = variables
+ for group_name in groups:
+ conditional = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % groups[group_name]
+ group_name = original_safe(group_name, force=True)
+ try:
+ result = boolean(self.templar.template(conditional))
+ except Exception as e:
+ if strict:
+ raise AnsibleParserError("Could not add host %s to group %s: %s" % (host, group_name, to_native(e)))
+ continue
+
+ if result:
+ # ensure group exists, use sanitized name
+ group_name = self.inventory.add_group(group_name)
+ # add host to group
+ self.inventory.add_child(group_name, host)
+
+ def _add_host_to_keyed_groups(self, keys, variables, host, strict=False):
+ ''' helper to create groups for plugins based on variable values and add the corresponding hosts to it'''
+ if keys and isinstance(keys, list):
+ for keyed in keys:
+ if keyed and isinstance(keyed, dict):
+
+ variables = combine_vars(variables, self.inventory.get_host(host).get_vars())
+ try:
+ key = self._compose(keyed.get('key'), variables)
+ except Exception as e:
+ if strict:
+ raise AnsibleParserError("Could not generate group for host %s from %s entry: %s" % (host, keyed.get('key'), to_native(e)))
+ continue
+
+ if key:
+ prefix = keyed.get('prefix', '')
+ sep = keyed.get('separator', '_')
+ raw_parent_name = keyed.get('parent_group', None)
+ if raw_parent_name:
+ try:
+ raw_parent_name = self.templar.template(raw_parent_name)
+ except AnsibleError as e:
+ if strict:
+ raise AnsibleParserError("Could not generate parent group %s for group %s: %s" % (raw_parent_name, key, to_native(e)))
+ continue
+
+ new_raw_group_names = []
+ if isinstance(key, string_types):
+ new_raw_group_names.append(key)
+ elif isinstance(key, list):
+ for name in key:
+ new_raw_group_names.append(name)
+ elif isinstance(key, Mapping):
+ for (gname, gval) in key.items():
+ name = '%s%s%s' % (gname, sep, gval)
+ new_raw_group_names.append(name)
+ else:
+ raise AnsibleParserError("Invalid group name format, expected a string or a list of them or dictionary, got: %s" % type(key))
+
+ for bare_name in new_raw_group_names:
+ gname = self._sanitize_group_name('%s%s%s' % (prefix, sep, bare_name))
+ result_gname = self.inventory.add_group(gname)
+ self.inventory.add_host(host, result_gname)
+
+ if raw_parent_name:
+ parent_name = self._sanitize_group_name(raw_parent_name)
+ self.inventory.add_group(parent_name)
+ self.inventory.add_child(parent_name, result_gname)
+
+ else:
+ # exclude case of empty list and dictionary, because these are valid constructions
+ # simply no groups need to be constructed, but are still falsy
+ if strict and key not in ([], {}):
+ raise AnsibleParserError("No key or key resulted empty for %s in host %s, invalid entry" % (keyed.get('key'), host))
+ else:
+ raise AnsibleParserError("Invalid keyed group entry, it must be a dictionary: %s " % keyed)
diff --git a/lib/ansible/plugins/inventory/advanced_host_list.py b/lib/ansible/plugins/inventory/advanced_host_list.py
new file mode 100644
index 00000000..dae02427
--- /dev/null
+++ b/lib/ansible/plugins/inventory/advanced_host_list.py
@@ -0,0 +1,63 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ inventory: advanced_host_list
+ version_added: "2.4"
+ short_description: Parses a 'host list' with ranges
+ description:
+ - Parses a host list string as a comma separated values of hosts and supports host ranges.
+ - This plugin only applies to inventory sources that are not paths and contain at least one comma.
+'''
+
+EXAMPLES = '''
+ # simple range
+ # ansible -i 'host[1:10],' -m ping
+
+ # still supports w/o ranges also
+ # ansible-playbook -i 'localhost,' play.yml
+'''
+
+import os
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.plugins.inventory import BaseInventoryPlugin
+
+
+class InventoryModule(BaseInventoryPlugin):
+
+ NAME = 'advanced_host_list'
+
+ def verify_file(self, host_list):
+
+ valid = False
+ b_path = to_bytes(host_list, errors='surrogate_or_strict')
+ if not os.path.exists(b_path) and ',' in host_list:
+ valid = True
+ return valid
+
+ def parse(self, inventory, loader, host_list, cache=True):
+ ''' parses the inventory file '''
+
+ super(InventoryModule, self).parse(inventory, loader, host_list)
+
+ try:
+ for h in host_list.split(','):
+ h = h.strip()
+ if h:
+ try:
+ (hostnames, port) = self._expand_hostpattern(h)
+ except AnsibleError as e:
+ self.display.vvv("Unable to parse address from hostname, leaving unchanged: %s" % to_text(e))
+ host = [h]
+ port = None
+
+ for host in hostnames:
+ if host not in self.inventory.hosts:
+ self.inventory.add_host(host, group='ungrouped', port=port)
+ except Exception as e:
+ raise AnsibleParserError("Invalid data from string, could not parse: %s" % to_native(e))
diff --git a/lib/ansible/plugins/inventory/auto.py b/lib/ansible/plugins/inventory/auto.py
new file mode 100644
index 00000000..bbb52bae
--- /dev/null
+++ b/lib/ansible/plugins/inventory/auto.py
@@ -0,0 +1,63 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: auto
+ plugin_type: inventory
+ author:
+ - Matt Davis (@nitzmahone)
+ version_added: "2.5"
+ short_description: Loads and executes an inventory plugin specified in a YAML config
+ description:
+ - By whitelisting C(auto) inventory plugin, any YAML inventory config file with a
+ C(plugin) key at its root will automatically cause the named plugin to be loaded and executed with that
+ config. This effectively provides automatic whitelisting of all installed/accessible inventory plugins.
+ - To disable this behavior, remove C(auto) from the C(INVENTORY_ENABLED) config element.
+'''
+
+EXAMPLES = '''
+# This plugin is not intended for direct use; it is a fallback mechanism for automatic whitelisting of
+# all installed inventory plugins.
+'''
+
+from ansible.errors import AnsibleParserError
+from ansible.plugins.inventory import BaseInventoryPlugin
+from ansible.plugins.loader import inventory_loader
+
+
+class InventoryModule(BaseInventoryPlugin):
+
+ NAME = 'auto'
+
+ def verify_file(self, path):
+ if not path.endswith('.yml') and not path.endswith('.yaml'):
+ return False
+ return super(InventoryModule, self).verify_file(path)
+
+ def parse(self, inventory, loader, path, cache=True):
+ config_data = loader.load_from_file(path, cache=False)
+
+ try:
+ plugin_name = config_data.get('plugin', None)
+ except AttributeError:
+ plugin_name = None
+
+ if not plugin_name:
+ raise AnsibleParserError("no root 'plugin' key found, '{0}' is not a valid YAML inventory plugin config file".format(path))
+
+ plugin = inventory_loader.get(plugin_name)
+
+ if not plugin:
+ raise AnsibleParserError("inventory config '{0}' specifies unknown plugin '{1}'".format(path, plugin_name))
+
+ if not plugin.verify_file(path):
+ raise AnsibleParserError("inventory config '{0}' could not be verified by plugin '{1}'".format(path, plugin_name))
+
+ plugin.parse(inventory, loader, path, cache=cache)
+ try:
+ plugin.update_cache_if_changed()
+ except AttributeError:
+ pass
diff --git a/lib/ansible/plugins/inventory/constructed.py b/lib/ansible/plugins/inventory/constructed.py
new file mode 100644
index 00000000..1e9c375b
--- /dev/null
+++ b/lib/ansible/plugins/inventory/constructed.py
@@ -0,0 +1,137 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: constructed
+ plugin_type: inventory
+ version_added: "2.4"
+ short_description: Uses Jinja2 to construct vars and groups based on existing inventory.
+ description:
+ - Uses a YAML configuration file with a valid YAML or C(.config) extension to define var expressions and group conditionals
+ - The Jinja2 conditionals that qualify a host for membership.
+ - The Jinja2 expressions are calculated and assigned to the variables
+ - Only variables already available from previous inventories or the fact cache can be used for templating.
+ - When I(strict) is False, failed expressions will be ignored (assumes vars were missing).
+ options:
+ plugin:
+ description: token that ensures this is a source file for the 'constructed' plugin.
+ required: True
+ choices: ['constructed']
+ extends_documentation_fragment:
+ - constructed
+'''
+
+EXAMPLES = r'''
+ # inventory.config file in YAML format
+ plugin: constructed
+ strict: False
+ compose:
+ var_sum: var1 + var2
+
+ # this variable will only be set if I have a persistent fact cache enabled (and have non expired facts)
+ # `strict: False` will skip this instead of producing an error if it is missing facts.
+ server_type: "ansible_hostname | regex_replace ('(.{6})(.{2}).*', '\\2')"
+ groups:
+ # simple name matching
+ webservers: inventory_hostname.startswith('web')
+
+ # using ec2 'tags' (assumes aws inventory)
+ development: "'devel' in (ec2_tags|list)"
+
+ # using other host properties populated in inventory
+ private_only: not (public_dns_name is defined or ip_address is defined)
+
+ # complex group membership
+ multi_group: (group_names | intersect(['alpha', 'beta', 'omega'])) | length >= 2
+
+ keyed_groups:
+ # this creates a group per distro (distro_CentOS, distro_Debian) and assigns the hosts that have matching values to it,
+ # using the default separator "_"
+ - prefix: distro
+ key: ansible_distribution
+
+ # the following examples assume the first inventory is from the `aws_ec2` plugin
+ # this creates a group per ec2 architecture and assign hosts to the matching ones (arch_x86_64, arch_sparc, etc)
+ - prefix: arch
+ key: architecture
+
+ # this creates a group per ec2 region like "us_west_1"
+ - prefix: ""
+ separator: ""
+ key: placement.region
+
+ # this creates a common parent group for all ec2 availability zones
+ - key: placement.availability_zone
+ parent_group: all_ec2_zones
+'''
+
+import os
+
+from ansible import constants as C
+from ansible.errors import AnsibleParserError
+from ansible.inventory.helpers import get_group_vars
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
+from ansible.module_utils._text import to_native
+from ansible.utils.vars import combine_vars
+from ansible.vars.fact_cache import FactCache
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable):
+ """ constructs groups and vars using Jinja2 template expressions """
+
+ NAME = 'constructed'
+
+ def __init__(self):
+
+ super(InventoryModule, self).__init__()
+
+ self._cache = FactCache()
+
+ def verify_file(self, path):
+
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ file_name, ext = os.path.splitext(path)
+
+ if not ext or ext in ['.config'] + C.YAML_FILENAME_EXTENSIONS:
+ valid = True
+
+ return valid
+
+ def parse(self, inventory, loader, path, cache=False):
+ ''' parses the inventory file '''
+
+ super(InventoryModule, self).parse(inventory, loader, path, cache=cache)
+
+ self._read_config_data(path)
+
+ strict = self.get_option('strict')
+ fact_cache = FactCache()
+ try:
+ # Go over hosts (less var copies)
+ for host in inventory.hosts:
+
+ # get available variables to templar
+ hostvars = combine_vars(get_group_vars(inventory.hosts[host].get_groups()), inventory.hosts[host].get_vars())
+ if host in fact_cache: # adds facts if cache is active
+ hostvars = combine_vars(hostvars, fact_cache[host])
+
+ # create composite vars
+ self._set_composite_vars(self.get_option('compose'), hostvars, host, strict=strict)
+
+ # refetch host vars in case new ones have been created above
+ hostvars = combine_vars(get_group_vars(inventory.hosts[host].get_groups()), inventory.hosts[host].get_vars())
+ if host in self._cache: # adds facts if cache is active
+ hostvars = combine_vars(hostvars, self._cache[host])
+
+ # constructed groups based on conditionals
+ self._add_host_to_composed_groups(self.get_option('groups'), hostvars, host, strict=strict)
+
+ # constructed groups based variable values
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), hostvars, host, strict=strict)
+
+ except Exception as e:
+ raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e)))
diff --git a/lib/ansible/plugins/inventory/generator.py b/lib/ansible/plugins/inventory/generator.py
new file mode 100644
index 00000000..4a7d3b7a
--- /dev/null
+++ b/lib/ansible/plugins/inventory/generator.py
@@ -0,0 +1,136 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: generator
+ plugin_type: inventory
+ version_added: "2.6"
+ short_description: Uses Jinja2 to construct hosts and groups from patterns
+ description:
+ - Uses a YAML configuration file with a valid YAML or C(.config) extension to define var expressions and group conditionals
+ - Create a template pattern that describes each host, and then use independent configuration layers
+ - Every element of every layer is combined to create a host for every layer combination
+ - Parent groups can be defined with reference to hosts and other groups using the same template variables
+ options:
+ plugin:
+ description: token that ensures this is a source file for the 'generator' plugin.
+ required: True
+ choices: ['generator']
+ hosts:
+ description:
+ - The C(name) key is a template used to generate
+ hostnames based on the C(layers) option. Each variable in the name is expanded to create a
+ cartesian product of all possible layer combinations.
+ - The C(parents) are a list of parent groups that the host belongs to. Each C(parent) item
+ contains a C(name) key, again expanded from the template, and an optional C(parents) key
+ that lists its parents.
+ - Parents can also contain C(vars), which is a dictionary of vars that
+ is then always set for that variable. This can provide easy access to the group name. E.g
+ set an C(application) variable that is set to the value of the C(application) layer name.
+ layers:
+ description:
+ - A dictionary of layers, with the key being the layer name, used as a variable name in the C(host)
+ C(name) and C(parents) keys. Each layer value is a list of possible values for that layer.
+'''
+
+EXAMPLES = '''
+ # inventory.config file in YAML format
+ # remember to enable this inventory plugin in the ansible.cfg before using
+ # View the output using `ansible-inventory -i inventory.config --list`
+ plugin: generator
+ hosts:
+ name: "{{ operation }}_{{ application }}_{{ environment }}_runner"
+ parents:
+ - name: "{{ operation }}_{{ application }}_{{ environment }}"
+ parents:
+ - name: "{{ operation }}_{{ application }}"
+ parents:
+ - name: "{{ operation }}"
+ - name: "{{ application }}"
+ - name: "{{ application }}_{{ environment }}"
+ parents:
+ - name: "{{ application }}"
+ vars:
+ application: "{{ application }}"
+ - name: "{{ environment }}"
+ vars:
+ environment: "{{ environment }}"
+ - name: runner
+ layers:
+ operation:
+ - build
+ - launch
+ environment:
+ - dev
+ - test
+ - prod
+ application:
+ - web
+ - api
+'''
+
+import os
+
+from itertools import product
+
+from ansible import constants as C
+from ansible.errors import AnsibleParserError
+from ansible.plugins.inventory import BaseInventoryPlugin
+
+
+class InventoryModule(BaseInventoryPlugin):
+ """ constructs groups and vars using Jinja2 template expressions """
+
+ NAME = 'generator'
+
+ def __init__(self):
+
+ super(InventoryModule, self).__init__()
+
+ def verify_file(self, path):
+
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ file_name, ext = os.path.splitext(path)
+
+ if not ext or ext in ['.config'] + C.YAML_FILENAME_EXTENSIONS:
+ valid = True
+
+ return valid
+
+ def template(self, pattern, variables):
+ self.templar.available_variables = variables
+ return self.templar.do_template(pattern)
+
+ def add_parents(self, inventory, child, parents, template_vars):
+ for parent in parents:
+ try:
+ groupname = self.template(parent['name'], template_vars)
+ except (AttributeError, ValueError):
+ raise AnsibleParserError("Element %s has a parent with no name element" % child['name'])
+ if groupname not in inventory.groups:
+ inventory.add_group(groupname)
+ group = inventory.groups[groupname]
+ for (k, v) in parent.get('vars', {}).items():
+ group.set_variable(k, self.template(v, template_vars))
+ inventory.add_child(groupname, child)
+ self.add_parents(inventory, groupname, parent.get('parents', []), template_vars)
+
+ def parse(self, inventory, loader, path, cache=False):
+ ''' parses the inventory file '''
+
+ super(InventoryModule, self).parse(inventory, loader, path, cache=cache)
+
+ config = self._read_config_data(path)
+
+ template_inputs = product(*config['layers'].values())
+ for item in template_inputs:
+ template_vars = dict()
+ for i, key in enumerate(config['layers'].keys()):
+ template_vars[key] = item[i]
+ host = self.template(config['hosts']['name'], template_vars)
+ inventory.add_host(host)
+ self.add_parents(inventory, host, config['hosts'].get('parents', []), template_vars)
diff --git a/lib/ansible/plugins/inventory/host_list.py b/lib/ansible/plugins/inventory/host_list.py
new file mode 100644
index 00000000..4a7a98d8
--- /dev/null
+++ b/lib/ansible/plugins/inventory/host_list.py
@@ -0,0 +1,66 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+ inventory: host_list
+ version_added: "2.4"
+ short_description: Parses a 'host list' string
+ description:
+ - Parses a host list string as a comma separated values of hosts
+ - This plugin only applies to inventory strings that are not paths and contain a comma.
+'''
+
+EXAMPLES = r'''
+ # define 2 hosts in command line
+ # ansible -i '10.10.2.6, 10.10.2.4' -m ping all
+
+ # DNS resolvable names
+ # ansible -i 'host1.example.com, host2' -m user -a 'name=me state=absent' all
+
+ # just use localhost
+ # ansible-playbook -i 'localhost,' play.yml -c local
+'''
+
+import os
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.parsing.utils.addresses import parse_address
+from ansible.plugins.inventory import BaseInventoryPlugin
+
+
+class InventoryModule(BaseInventoryPlugin):
+
+ NAME = 'host_list'
+
+ def verify_file(self, host_list):
+
+ valid = False
+ b_path = to_bytes(host_list, errors='surrogate_or_strict')
+ if not os.path.exists(b_path) and ',' in host_list:
+ valid = True
+ return valid
+
+ def parse(self, inventory, loader, host_list, cache=True):
+ ''' parses the inventory file '''
+
+ super(InventoryModule, self).parse(inventory, loader, host_list)
+
+ try:
+ for h in host_list.split(','):
+ h = h.strip()
+ if h:
+ try:
+ (host, port) = parse_address(h, allow_ranges=False)
+ except AnsibleError as e:
+ self.display.vvv("Unable to parse address from hostname, leaving unchanged: %s" % to_text(e))
+ host = h
+ port = None
+
+ if host not in self.inventory.hosts:
+ self.inventory.add_host(host, group='ungrouped', port=port)
+ except Exception as e:
+ raise AnsibleParserError("Invalid data from string, could not parse: %s" % to_native(e))
diff --git a/lib/ansible/plugins/inventory/ini.py b/lib/ansible/plugins/inventory/ini.py
new file mode 100644
index 00000000..2175f421
--- /dev/null
+++ b/lib/ansible/plugins/inventory/ini.py
@@ -0,0 +1,394 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ inventory: ini
+ version_added: "2.4"
+ short_description: Uses an Ansible INI file as inventory source.
+ description:
+ - INI file based inventory, sections are groups or group related with special `:modifiers`.
+ - Entries in sections C([group_1]) are hosts, members of the group.
+ - Hosts can have variables defined inline as key/value pairs separated by C(=).
+ - The C(children) modifier indicates that the section contains groups.
+ - The C(vars) modifier indicates that the section contains variables assigned to members of the group.
+ - Anything found outside a section is considered an 'ungrouped' host.
+ - Values passed in the INI format using the ``key=value`` syntax are interpreted differently depending on where they are declared within your inventory.
+ - When declared inline with the host, INI values are processed by Python's ast.literal_eval function
+ (U(https://docs.python.org/2/library/ast.html#ast.literal_eval)) and interpreted as Python literal structures
+ (strings, numbers, tuples, lists, dicts, booleans, None). Host lines accept multiple C(key=value) parameters per line.
+ Therefore they need a way to indicate that a space is part of a value rather than a separator.
+ - When declared in a C(:vars) section, INI values are interpreted as strings. For example C(var=FALSE) would create a string equal to C(FALSE).
+ Unlike host lines, C(:vars) sections accept only a single entry per line, so everything after the C(=) must be the value for the entry.
+ - Do not rely on types set during definition, always make sure you specify type with a filter when needed when consuming the variable.
+ - See the Examples for proper quoting to prevent changes to variable type.
+ notes:
+ - Whitelisted in configuration by default.
+ - Consider switching to YAML format for inventory sources to avoid confusion on the actual type of a variable.
+ The YAML inventory plugin processes variable values consistently and correctly.
+'''
+
+EXAMPLES = '''
+ example1: |
+ # example cfg file
+ [web]
+ host1
+ host2 ansible_port=222 # defined inline, interpreted as an integer
+
+ [web:vars]
+ http_port=8080 # all members of 'web' will inherit these
+ myvar=23 # defined in a :vars section, interpreted as a string
+
+ [web:children] # child groups will automatically add their hosts to parent group
+ apache
+ nginx
+
+ [apache]
+ tomcat1
+ tomcat2 myvar=34 # host specific vars override group vars
+ tomcat3 mysecret="'03#pa33w0rd'" # proper quoting to prevent value changes
+
+ [nginx]
+ jenkins1
+
+ [nginx:vars]
+ has_java = True # vars in child groups override same in parent
+
+ [all:vars]
+ has_java = False # 'all' is 'top' parent
+
+ example2: |
+ # other example config
+ host1 # this is 'ungrouped'
+
+ # both hosts have same IP but diff ports, also 'ungrouped'
+ host2 ansible_host=127.0.0.1 ansible_port=44
+ host3 ansible_host=127.0.0.1 ansible_port=45
+
+ [g1]
+ host4
+
+ [g2]
+ host4 # same host as above, but member of 2 groups, will inherit vars from both
+ # inventory hostnames are unique
+'''
+
+import ast
+import re
+
+from ansible.inventory.group import to_safe_group_name
+from ansible.plugins.inventory import BaseFileInventoryPlugin
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.utils.shlex import shlex_split
+
+
+class InventoryModule(BaseFileInventoryPlugin):
+ """
+ Takes an INI-format inventory file and builds a list of groups and subgroups
+ with their associated hosts and variable settings.
+ """
+ NAME = 'ini'
+ _COMMENT_MARKERS = frozenset((u';', u'#'))
+ b_COMMENT_MARKERS = frozenset((b';', b'#'))
+
+ def __init__(self):
+
+ super(InventoryModule, self).__init__()
+
+ self.patterns = {}
+ self._filename = None
+
+ def parse(self, inventory, loader, path, cache=True):
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ self._filename = path
+
+ try:
+ # Read in the hosts, groups, and variables defined in the inventory file.
+ if self.loader:
+ (b_data, private) = self.loader._get_file_contents(path)
+ else:
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ with open(b_path, 'rb') as fh:
+ b_data = fh.read()
+
+ try:
+ # Faster to do to_text once on a long string than many
+ # times on smaller strings
+ data = to_text(b_data, errors='surrogate_or_strict').splitlines()
+ except UnicodeError:
+ # Handle non-utf8 in comment lines: https://github.com/ansible/ansible/issues/17593
+ data = []
+ for line in b_data.splitlines():
+ if line and line[0] in self.b_COMMENT_MARKERS:
+ # Replace is okay for comment lines
+ # data.append(to_text(line, errors='surrogate_then_replace'))
+ # Currently we only need these lines for accurate lineno in errors
+ data.append(u'')
+ else:
+ # Non-comment lines still have to be valid uf-8
+ data.append(to_text(line, errors='surrogate_or_strict'))
+
+ self._parse(path, data)
+ except Exception as e:
+ raise AnsibleParserError(e)
+
+ def _raise_error(self, message):
+ raise AnsibleError("%s:%d: " % (self._filename, self.lineno) + message)
+
+ def _parse(self, path, lines):
+ '''
+ Populates self.groups from the given array of lines. Raises an error on
+ any parse failure.
+ '''
+
+ self._compile_patterns()
+
+ # We behave as though the first line of the inventory is '[ungrouped]',
+ # and begin to look for host definitions. We make a single pass through
+ # each line of the inventory, building up self.groups and adding hosts,
+ # subgroups, and setting variables as we go.
+
+ pending_declarations = {}
+ groupname = 'ungrouped'
+ state = 'hosts'
+ self.lineno = 0
+ for line in lines:
+ self.lineno += 1
+
+ line = line.strip()
+ # Skip empty lines and comments
+ if not line or line[0] in self._COMMENT_MARKERS:
+ continue
+
+ # Is this a [section] header? That tells us what group we're parsing
+ # definitions for, and what kind of definitions to expect.
+
+ m = self.patterns['section'].match(line)
+ if m:
+ (groupname, state) = m.groups()
+
+ groupname = to_safe_group_name(groupname)
+
+ state = state or 'hosts'
+ if state not in ['hosts', 'children', 'vars']:
+ title = ":".join(m.groups())
+ self._raise_error("Section [%s] has unknown type: %s" % (title, state))
+
+ # If we haven't seen this group before, we add a new Group.
+ if groupname not in self.inventory.groups:
+ # Either [groupname] or [groupname:children] is sufficient to declare a group,
+ # but [groupname:vars] is allowed only if the # group is declared elsewhere.
+ # We add the group anyway, but make a note in pending_declarations to check at the end.
+ #
+ # It's possible that a group is previously pending due to being defined as a child
+ # group, in that case we simply pass so that the logic below to process pending
+ # declarations will take the appropriate action for a pending child group instead of
+ # incorrectly handling it as a var state pending declaration
+ if state == 'vars' and groupname not in pending_declarations:
+ pending_declarations[groupname] = dict(line=self.lineno, state=state, name=groupname)
+
+ self.inventory.add_group(groupname)
+
+ # When we see a declaration that we've been waiting for, we process and delete.
+ if groupname in pending_declarations and state != 'vars':
+ if pending_declarations[groupname]['state'] == 'children':
+ self._add_pending_children(groupname, pending_declarations)
+ elif pending_declarations[groupname]['state'] == 'vars':
+ del pending_declarations[groupname]
+
+ continue
+ elif line.startswith('[') and line.endswith(']'):
+ self._raise_error("Invalid section entry: '%s'. Please make sure that there are no spaces" % line + " " +
+ "in the section entry, and that there are no other invalid characters")
+
+ # It's not a section, so the current state tells us what kind of
+ # definition it must be. The individual parsers will raise an
+ # error if we feed them something they can't digest.
+
+ # [groupname] contains host definitions that must be added to
+ # the current group.
+ if state == 'hosts':
+ hosts, port, variables = self._parse_host_definition(line)
+ self._populate_host_vars(hosts, variables, groupname, port)
+
+ # [groupname:vars] contains variable definitions that must be
+ # applied to the current group.
+ elif state == 'vars':
+ (k, v) = self._parse_variable_definition(line)
+ self.inventory.set_variable(groupname, k, v)
+
+ # [groupname:children] contains subgroup names that must be
+ # added as children of the current group. The subgroup names
+ # must themselves be declared as groups, but as before, they
+ # may only be declared later.
+ elif state == 'children':
+ child = self._parse_group_name(line)
+ if child not in self.inventory.groups:
+ if child not in pending_declarations:
+ pending_declarations[child] = dict(line=self.lineno, state=state, name=child, parents=[groupname])
+ else:
+ pending_declarations[child]['parents'].append(groupname)
+ else:
+ self.inventory.add_child(groupname, child)
+ else:
+ # This can happen only if the state checker accepts a state that isn't handled above.
+ self._raise_error("Entered unhandled state: %s" % (state))
+
+ # Any entries in pending_declarations not removed by a group declaration above mean that there was an unresolved reference.
+ # We report only the first such error here.
+ for g in pending_declarations:
+ decl = pending_declarations[g]
+ if decl['state'] == 'vars':
+ raise AnsibleError("%s:%d: Section [%s:vars] not valid for undefined group: %s" % (path, decl['line'], decl['name'], decl['name']))
+ elif decl['state'] == 'children':
+ raise AnsibleError("%s:%d: Section [%s:children] includes undefined group: %s" % (path, decl['line'], decl['parents'].pop(), decl['name']))
+
+ def _add_pending_children(self, group, pending):
+ for parent in pending[group]['parents']:
+ self.inventory.add_child(parent, group)
+ if parent in pending and pending[parent]['state'] == 'children':
+ self._add_pending_children(parent, pending)
+ del pending[group]
+
+ def _parse_group_name(self, line):
+ '''
+ Takes a single line and tries to parse it as a group name. Returns the
+ group name if successful, or raises an error.
+ '''
+
+ m = self.patterns['groupname'].match(line)
+ if m:
+ return m.group(1)
+
+ self._raise_error("Expected group name, got: %s" % (line))
+
+ def _parse_variable_definition(self, line):
+ '''
+ Takes a string and tries to parse it as a variable definition. Returns
+ the key and value if successful, or raises an error.
+ '''
+
+ # TODO: We parse variable assignments as a key (anything to the left of
+ # an '='"), an '=', and a value (anything left) and leave the value to
+ # _parse_value to sort out. We should be more systematic here about
+ # defining what is acceptable, how quotes work, and so on.
+
+ if '=' in line:
+ (k, v) = [e.strip() for e in line.split("=", 1)]
+ return (k, self._parse_value(v))
+
+ self._raise_error("Expected key=value, got: %s" % (line))
+
+ def _parse_host_definition(self, line):
+ '''
+ Takes a single line and tries to parse it as a host definition. Returns
+ a list of Hosts if successful, or raises an error.
+ '''
+
+ # A host definition comprises (1) a non-whitespace hostname or range,
+ # optionally followed by (2) a series of key="some value" assignments.
+ # We ignore any trailing whitespace and/or comments. For example, here
+ # are a series of host definitions in a group:
+ #
+ # [groupname]
+ # alpha
+ # beta:2345 user=admin # we'll tell shlex
+ # gamma sudo=True user=root # to ignore comments
+
+ try:
+ tokens = shlex_split(line, comments=True)
+ except ValueError as e:
+ self._raise_error("Error parsing host definition '%s': %s" % (line, e))
+
+ (hostnames, port) = self._expand_hostpattern(tokens[0])
+
+ # Try to process anything remaining as a series of key=value pairs.
+ variables = {}
+ for t in tokens[1:]:
+ if '=' not in t:
+ self._raise_error("Expected key=value host variable assignment, got: %s" % (t))
+ (k, v) = t.split('=', 1)
+ variables[k] = self._parse_value(v)
+
+ return hostnames, port, variables
+
+ def _expand_hostpattern(self, hostpattern):
+ '''
+ do some extra checks over normal processing
+ '''
+ # specification?
+
+ hostnames, port = super(InventoryModule, self)._expand_hostpattern(hostpattern)
+
+ if hostpattern.strip().endswith(':') and port is None:
+ raise AnsibleParserError("Invalid host pattern '%s' supplied, ending in ':' is not allowed, this character is reserved to provide a port." %
+ hostpattern)
+ for pattern in hostnames:
+ # some YAML parsing prevention checks
+ if pattern.strip() == '---':
+ raise AnsibleParserError("Invalid host pattern '%s' supplied, '---' is normally a sign this is a YAML file." % hostpattern)
+
+ return (hostnames, port)
+
+ @staticmethod
+ def _parse_value(v):
+ '''
+ Attempt to transform the string value from an ini file into a basic python object
+ (int, dict, list, unicode string, etc).
+ '''
+ try:
+ v = ast.literal_eval(v)
+ # Using explicit exceptions.
+ # Likely a string that literal_eval does not like. We wil then just set it.
+ except ValueError:
+ # For some reason this was thought to be malformed.
+ pass
+ except SyntaxError:
+ # Is this a hash with an equals at the end?
+ pass
+ return to_text(v, nonstring='passthru', errors='surrogate_or_strict')
+
+ def _compile_patterns(self):
+ '''
+ Compiles the regular expressions required to parse the inventory and
+ stores them in self.patterns.
+ '''
+
+ # Section names are square-bracketed expressions at the beginning of a
+ # line, comprising (1) a group name optionally followed by (2) a tag
+ # that specifies the contents of the section. We ignore any trailing
+ # whitespace and/or comments. For example:
+ #
+ # [groupname]
+ # [somegroup:vars]
+ # [naughty:children] # only get coal in their stockings
+
+ self.patterns['section'] = re.compile(
+ to_text(r'''^\[
+ ([^:\]\s]+) # group name (see groupname below)
+ (?::(\w+))? # optional : and tag name
+ \]
+ \s* # ignore trailing whitespace
+ (?:\#.*)? # and/or a comment till the
+ $ # end of the line
+ ''', errors='surrogate_or_strict'), re.X
+ )
+
+ # FIXME: What are the real restrictions on group names, or rather, what
+ # should they be? At the moment, they must be non-empty sequences of non
+ # whitespace characters excluding ':' and ']', but we should define more
+ # precise rules in order to support better diagnostics.
+
+ self.patterns['groupname'] = re.compile(
+ to_text(r'''^
+ ([^:\]\s]+)
+ \s* # ignore trailing whitespace
+ (?:\#.*)? # and/or a comment till the
+ $ # end of the line
+ ''', errors='surrogate_or_strict'), re.X
+ )
diff --git a/lib/ansible/plugins/inventory/script.py b/lib/ansible/plugins/inventory/script.py
new file mode 100644
index 00000000..b4094a56
--- /dev/null
+++ b/lib/ansible/plugins/inventory/script.py
@@ -0,0 +1,215 @@
+# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ inventory: script
+ version_added: "2.4"
+ short_description: Executes an inventory script that returns JSON
+ options:
+ cache:
+ deprecated:
+ why: This option has never been in use. External scripts must implement their own caching.
+ version: "2.12"
+ description:
+ - This option has no effect. The plugin will not cache results because external inventory scripts
+ are responsible for their own caching. This option will be removed in 2.12.
+ ini:
+ - section: inventory_plugin_script
+ key: cache
+ env:
+ - name: ANSIBLE_INVENTORY_PLUGIN_SCRIPT_CACHE
+ always_show_stderr:
+ description: Toggle display of stderr even when script was successful
+ version_added: "2.5.1"
+ default: True
+ type: boolean
+ ini:
+ - section: inventory_plugin_script
+ key: always_show_stderr
+ env:
+ - name: ANSIBLE_INVENTORY_PLUGIN_SCRIPT_STDERR
+ description:
+ - The source provided must be an executable that returns Ansible inventory JSON
+ - The source must accept C(--list) and C(--host <hostname>) as arguments.
+ C(--host) will only be used if no C(_meta) key is present.
+ This is a performance optimization as the script would be called per host otherwise.
+ notes:
+ - Whitelisted in configuration by default.
+ - The plugin does not cache results because external inventory scripts are responsible for their own caching.
+'''
+
+import os
+import subprocess
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.module_utils.basic import json_dict_bytes_to_unicode
+from ansible.module_utils.six import iteritems
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.common._collections_compat import Mapping
+from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class InventoryModule(BaseInventoryPlugin, Cacheable):
+ ''' Host inventory parser for ansible using external inventory scripts. '''
+
+ NAME = 'script'
+
+ def __init__(self):
+
+ super(InventoryModule, self).__init__()
+
+ self._hosts = set()
+
+ def verify_file(self, path):
+ ''' Verify if file is usable by this plugin, base does minimal accessibility check '''
+
+ valid = super(InventoryModule, self).verify_file(path)
+
+ if valid:
+ # not only accessible, file must be executable and/or have shebang
+ shebang_present = False
+ try:
+ with open(path, 'rb') as inv_file:
+ initial_chars = inv_file.read(2)
+ if initial_chars.startswith(b'#!'):
+ shebang_present = True
+ except Exception:
+ pass
+
+ if not os.access(path, os.X_OK) and not shebang_present:
+ valid = False
+
+ return valid
+
+ def parse(self, inventory, loader, path, cache=None):
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+ self.set_options()
+
+ if self.get_option('cache') is not None:
+ display.deprecated(
+ msg="The 'cache' option is deprecated for the script inventory plugin. "
+ "External scripts implement their own caching and this option has never been used",
+ version="2.12", collection_name='ansible.builtin'
+ )
+
+ # Support inventory scripts that are not prefixed with some
+ # path information but happen to be in the current working
+ # directory when '.' is not in PATH.
+ cmd = [path, "--list"]
+
+ try:
+ try:
+ sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ except OSError as e:
+ raise AnsibleParserError("problem running %s (%s)" % (' '.join(cmd), to_native(e)))
+ (stdout, stderr) = sp.communicate()
+
+ path = to_native(path)
+ err = to_native(stderr or "")
+
+ if err and not err.endswith('\n'):
+ err += '\n'
+
+ if sp.returncode != 0:
+ raise AnsibleError("Inventory script (%s) had an execution error: %s " % (path, err))
+
+ # make sure script output is unicode so that json loader will output unicode strings itself
+ try:
+ data = to_text(stdout, errors="strict")
+ except Exception as e:
+ raise AnsibleError("Inventory {0} contained characters that cannot be interpreted as UTF-8: {1}".format(path, to_native(e)))
+
+ try:
+ processed = self.loader.load(data, json_only=True)
+ except Exception as e:
+ raise AnsibleError("failed to parse executable inventory script results from {0}: {1}\n{2}".format(path, to_native(e), err))
+
+ # if no other errors happened and you want to force displaying stderr, do so now
+ if stderr and self.get_option('always_show_stderr'):
+ self.display.error(msg=to_text(err))
+
+ if not isinstance(processed, Mapping):
+ raise AnsibleError("failed to parse executable inventory script results from {0}: needs to be a json dict\n{1}".format(path, err))
+
+ group = None
+ data_from_meta = None
+
+ # A "_meta" subelement may contain a variable "hostvars" which contains a hash for each host
+ # if this "hostvars" exists at all then do not call --host for each # host.
+ # This is for efficiency and scripts should still return data
+ # if called with --host for backwards compat with 1.2 and earlier.
+ for (group, gdata) in processed.items():
+ if group == '_meta':
+ if 'hostvars' in gdata:
+ data_from_meta = gdata['hostvars']
+ else:
+ self._parse_group(group, gdata)
+
+ for host in self._hosts:
+ got = {}
+ if data_from_meta is None:
+ got = self.get_host_variables(path, host)
+ else:
+ try:
+ got = data_from_meta.get(host, {})
+ except AttributeError as e:
+ raise AnsibleError("Improperly formatted host information for %s: %s" % (host, to_native(e)), orig_exc=e)
+
+ self._populate_host_vars([host], got)
+
+ except Exception as e:
+ raise AnsibleParserError(to_native(e))
+
+ def _parse_group(self, group, data):
+
+ group = self.inventory.add_group(group)
+
+ if not isinstance(data, dict):
+ data = {'hosts': data}
+ # is not those subkeys, then simplified syntax, host with vars
+ elif not any(k in data for k in ('hosts', 'vars', 'children')):
+ data = {'hosts': [group], 'vars': data}
+
+ if 'hosts' in data:
+ if not isinstance(data['hosts'], list):
+ raise AnsibleError("You defined a group '%s' with bad data for the host list:\n %s" % (group, data))
+
+ for hostname in data['hosts']:
+ self._hosts.add(hostname)
+ self.inventory.add_host(hostname, group)
+
+ if 'vars' in data:
+ if not isinstance(data['vars'], dict):
+ raise AnsibleError("You defined a group '%s' with bad data for variables:\n %s" % (group, data))
+
+ for k, v in iteritems(data['vars']):
+ self.inventory.set_variable(group, k, v)
+
+ if group != '_meta' and isinstance(data, dict) and 'children' in data:
+ for child_name in data['children']:
+ child_name = self.inventory.add_group(child_name)
+ self.inventory.add_child(group, child_name)
+
+ def get_host_variables(self, path, host):
+ """ Runs <script> --host <hostname>, to determine additional host variables """
+
+ cmd = [path, "--host", host]
+ try:
+ sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ except OSError as e:
+ raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
+ (out, err) = sp.communicate()
+ if out.strip() == '':
+ return {}
+ try:
+ return json_dict_bytes_to_unicode(self.loader.load(out, file_name=path))
+ except ValueError:
+ raise AnsibleError("could not parse post variable response: %s, %s" % (cmd, out))
diff --git a/lib/ansible/plugins/inventory/toml.py b/lib/ansible/plugins/inventory/toml.py
new file mode 100644
index 00000000..26ad600a
--- /dev/null
+++ b/lib/ansible/plugins/inventory/toml.py
@@ -0,0 +1,262 @@
+# Copyright (c) 2018 Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+ inventory: toml
+ version_added: "2.8"
+ short_description: Uses a specific TOML file as an inventory source.
+ description:
+ - TOML based inventory format
+ - File MUST have a valid '.toml' file extension
+ notes:
+ - Requires the 'toml' python library
+'''
+
+EXAMPLES = r'''
+# Following are examples of 3 different inventories in TOML format
+example1: |
+ [all.vars]
+ has_java = false
+
+ [web]
+ children = [
+ "apache",
+ "nginx"
+ ]
+ vars = { http_port = 8080, myvar = 23 }
+
+ [web.hosts]
+ host1 = {}
+ host2 = { ansible_port = 222 }
+
+ [apache.hosts]
+ tomcat1 = {}
+ tomcat2 = { myvar = 34 }
+ tomcat3 = { mysecret = "03#pa33w0rd" }
+
+ [nginx.hosts]
+ jenkins1 = {}
+
+ [nginx.vars]
+ has_java = true
+
+example2: |
+ [all.vars]
+ has_java = false
+
+ [web]
+ children = [
+ "apache",
+ "nginx"
+ ]
+
+ [web.vars]
+ http_port = 8080
+ myvar = 23
+
+ [web.hosts.host1]
+ [web.hosts.host2]
+ ansible_port = 222
+
+ [apache.hosts.tomcat1]
+
+ [apache.hosts.tomcat2]
+ myvar = 34
+
+ [apache.hosts.tomcat3]
+ mysecret = "03#pa33w0rd"
+
+ [nginx.hosts.jenkins1]
+
+ [nginx.vars]
+ has_java = true
+
+example3: |
+ [ungrouped.hosts]
+ host1 = {}
+ host2 = { ansible_host = "127.0.0.1", ansible_port = 44 }
+ host3 = { ansible_host = "127.0.0.1", ansible_port = 45 }
+
+ [g1.hosts]
+ host4 = {}
+
+ [g2.hosts]
+ host4 = {}
+'''
+
+import os
+
+from functools import partial
+
+from ansible.errors import AnsibleFileNotFound, AnsibleParserError
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.common._collections_compat import MutableMapping, MutableSequence
+from ansible.module_utils.six import string_types, text_type
+from ansible.parsing.yaml.objects import AnsibleSequence, AnsibleUnicode
+from ansible.plugins.inventory import BaseFileInventoryPlugin
+from ansible.utils.display import Display
+from ansible.utils.unsafe_proxy import AnsibleUnsafeBytes, AnsibleUnsafeText
+
+try:
+ import toml
+ HAS_TOML = True
+except ImportError:
+ HAS_TOML = False
+
+display = Display()
+
+WARNING_MSG = (
+ 'The TOML inventory format is marked as preview, which means that it is not guaranteed to have a backwards '
+ 'compatible interface.'
+)
+
+
+if HAS_TOML and hasattr(toml, 'TomlEncoder'):
+ class AnsibleTomlEncoder(toml.TomlEncoder):
+ def __init__(self, *args, **kwargs):
+ super(AnsibleTomlEncoder, self).__init__(*args, **kwargs)
+ # Map our custom YAML object types to dump_funcs from ``toml``
+ self.dump_funcs.update({
+ AnsibleSequence: self.dump_funcs.get(list),
+ AnsibleUnicode: self.dump_funcs.get(str),
+ AnsibleUnsafeBytes: self.dump_funcs.get(str),
+ AnsibleUnsafeText: self.dump_funcs.get(str),
+ })
+ toml_dumps = partial(toml.dumps, encoder=AnsibleTomlEncoder())
+else:
+ def toml_dumps(data):
+ return toml.dumps(convert_yaml_objects_to_native(data))
+
+
+def convert_yaml_objects_to_native(obj):
+ """Older versions of the ``toml`` python library, don't have a pluggable
+ way to tell the encoder about custom types, so we need to ensure objects
+ that we pass are native types.
+
+ Only used on ``toml<0.10.0`` where ``toml.TomlEncoder`` is missing.
+
+ This function recurses an object and ensures we cast any of the types from
+ ``ansible.parsing.yaml.objects`` into their native types, effectively cleansing
+ the data before we hand it over to ``toml``
+
+ This function doesn't directly check for the types from ``ansible.parsing.yaml.objects``
+ but instead checks for the types those objects inherit from, to offer more flexibility.
+ """
+ if isinstance(obj, dict):
+ return dict((k, convert_yaml_objects_to_native(v)) for k, v in obj.items())
+ elif isinstance(obj, list):
+ return [convert_yaml_objects_to_native(v) for v in obj]
+ elif isinstance(obj, text_type):
+ return text_type(obj)
+ else:
+ return obj
+
+
+class InventoryModule(BaseFileInventoryPlugin):
+ NAME = 'toml'
+
+ def _parse_group(self, group, group_data):
+ if group_data is not None and not isinstance(group_data, MutableMapping):
+ self.display.warning("Skipping '%s' as this is not a valid group definition" % group)
+ return
+
+ group = self.inventory.add_group(group)
+ if group_data is None:
+ return
+
+ for key, data in group_data.items():
+ if key == 'vars':
+ if not isinstance(data, MutableMapping):
+ raise AnsibleParserError(
+ 'Invalid "vars" entry for "%s" group, requires a dict, found "%s" instead.' %
+ (group, type(data))
+ )
+ for var, value in data.items():
+ self.inventory.set_variable(group, var, value)
+
+ elif key == 'children':
+ if not isinstance(data, MutableSequence):
+ raise AnsibleParserError(
+ 'Invalid "children" entry for "%s" group, requires a list, found "%s" instead.' %
+ (group, type(data))
+ )
+ for subgroup in data:
+ self._parse_group(subgroup, {})
+ self.inventory.add_child(group, subgroup)
+
+ elif key == 'hosts':
+ if not isinstance(data, MutableMapping):
+ raise AnsibleParserError(
+ 'Invalid "hosts" entry for "%s" group, requires a dict, found "%s" instead.' %
+ (group, type(data))
+ )
+ for host_pattern, value in data.items():
+ hosts, port = self._expand_hostpattern(host_pattern)
+ self._populate_host_vars(hosts, value, group, port)
+ else:
+ self.display.warning(
+ 'Skipping unexpected key "%s" in group "%s", only "vars", "children" and "hosts" are valid' %
+ (key, group)
+ )
+
+ def _load_file(self, file_name):
+ if not file_name or not isinstance(file_name, string_types):
+ raise AnsibleParserError("Invalid filename: '%s'" % to_native(file_name))
+
+ b_file_name = to_bytes(self.loader.path_dwim(file_name))
+ if not self.loader.path_exists(b_file_name):
+ raise AnsibleFileNotFound("Unable to retrieve file contents", file_name=file_name)
+
+ try:
+ (b_data, private) = self.loader._get_file_contents(file_name)
+ return toml.loads(to_text(b_data, errors='surrogate_or_strict'))
+ except toml.TomlDecodeError as e:
+ raise AnsibleParserError(
+ 'TOML file (%s) is invalid: %s' % (file_name, to_native(e)),
+ orig_exc=e
+ )
+ except (IOError, OSError) as e:
+ raise AnsibleParserError(
+ "An error occurred while trying to read the file '%s': %s" % (file_name, to_native(e)),
+ orig_exc=e
+ )
+ except Exception as e:
+ raise AnsibleParserError(
+ "An unexpected error occurred while parsing the file '%s': %s" % (file_name, to_native(e)),
+ orig_exc=e
+ )
+
+ def parse(self, inventory, loader, path, cache=True):
+ ''' parses the inventory file '''
+ if not HAS_TOML:
+ raise AnsibleParserError(
+ 'The TOML inventory plugin requires the python "toml" library'
+ )
+
+ display.warning(WARNING_MSG)
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+ self.set_options()
+
+ try:
+ data = self._load_file(path)
+ except Exception as e:
+ raise AnsibleParserError(e)
+
+ if not data:
+ raise AnsibleParserError('Parsed empty TOML file')
+ elif data.get('plugin'):
+ raise AnsibleParserError('Plugin configuration TOML file, not TOML inventory')
+
+ for group_name in data:
+ self._parse_group(group_name, data[group_name])
+
+ def verify_file(self, path):
+ if super(InventoryModule, self).verify_file(path):
+ file_name, ext = os.path.splitext(path)
+ if ext == '.toml':
+ return True
+ return False
diff --git a/lib/ansible/plugins/inventory/yaml.py b/lib/ansible/plugins/inventory/yaml.py
new file mode 100644
index 00000000..dc882c6d
--- /dev/null
+++ b/lib/ansible/plugins/inventory/yaml.py
@@ -0,0 +1,177 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ inventory: yaml
+ version_added: "2.4"
+ short_description: Uses a specific YAML file as an inventory source.
+ description:
+ - "YAML-based inventory, should start with the C(all) group and contain hosts/vars/children entries."
+ - Host entries can have sub-entries defined, which will be treated as variables.
+ - Vars entries are normal group vars.
+ - "Children are 'child groups', which can also have their own vars/hosts/children and so on."
+ - File MUST have a valid extension, defined in configuration.
+ notes:
+ - If you want to set vars for the C(all) group inside the inventory file, the C(all) group must be the first entry in the file.
+ - Whitelisted in configuration by default.
+ options:
+ yaml_extensions:
+ description: list of 'valid' extensions for files containing YAML
+ type: list
+ default: ['.yaml', '.yml', '.json']
+ env:
+ - name: ANSIBLE_YAML_FILENAME_EXT
+ - name: ANSIBLE_INVENTORY_PLUGIN_EXTS
+ ini:
+ - key: yaml_valid_extensions
+ section: defaults
+ - section: inventory_plugin_yaml
+ key: yaml_valid_extensions
+
+'''
+EXAMPLES = '''
+all: # keys must be unique, i.e. only one 'hosts' per group
+ hosts:
+ test1:
+ test2:
+ host_var: value
+ vars:
+ group_all_var: value
+ children: # key order does not matter, indentation does
+ other_group:
+ children:
+ group_x:
+ hosts:
+ test5 # Note that one machine will work without a colon
+ #group_x:
+ # hosts:
+ # test5 # But this won't
+ # test7 #
+ group_y:
+ hosts:
+ test6: # So always use a colon
+ vars:
+ g2_var2: value3
+ hosts:
+ test4:
+ ansible_host: 127.0.0.1
+ last_group:
+ hosts:
+ test1 # same host as above, additional group membership
+ vars:
+ group_last_var: value
+'''
+
+import os
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.common._collections_compat import MutableMapping
+from ansible.plugins.inventory import BaseFileInventoryPlugin
+
+NoneType = type(None)
+
+
+class InventoryModule(BaseFileInventoryPlugin):
+
+ NAME = 'yaml'
+
+ def __init__(self):
+
+ super(InventoryModule, self).__init__()
+
+ def verify_file(self, path):
+
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ file_name, ext = os.path.splitext(path)
+ if not ext or ext in self.get_option('yaml_extensions'):
+ valid = True
+ return valid
+
+ def parse(self, inventory, loader, path, cache=True):
+ ''' parses the inventory file '''
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+ self.set_options()
+
+ try:
+ data = self.loader.load_from_file(path, cache=False)
+ except Exception as e:
+ raise AnsibleParserError(e)
+
+ if not data:
+ raise AnsibleParserError('Parsed empty YAML file')
+ elif not isinstance(data, MutableMapping):
+ raise AnsibleParserError('YAML inventory has invalid structure, it should be a dictionary, got: %s' % type(data))
+ elif data.get('plugin'):
+ raise AnsibleParserError('Plugin configuration YAML file, not YAML inventory')
+
+ # We expect top level keys to correspond to groups, iterate over them
+ # to get host, vars and subgroups (which we iterate over recursivelly)
+ if isinstance(data, MutableMapping):
+ for group_name in data:
+ self._parse_group(group_name, data[group_name])
+ else:
+ raise AnsibleParserError("Invalid data from file, expected dictionary and got:\n\n%s" % to_native(data))
+
+ def _parse_group(self, group, group_data):
+
+ if isinstance(group_data, (MutableMapping, NoneType)):
+
+ try:
+ group = self.inventory.add_group(group)
+ except AnsibleError as e:
+ raise AnsibleParserError("Unable to add group %s: %s" % (group, to_text(e)))
+
+ if group_data is not None:
+ # make sure they are dicts
+ for section in ['vars', 'children', 'hosts']:
+ if section in group_data:
+ # convert strings to dicts as these are allowed
+ if isinstance(group_data[section], string_types):
+ group_data[section] = {group_data[section]: None}
+
+ if not isinstance(group_data[section], (MutableMapping, NoneType)):
+ raise AnsibleParserError('Invalid "%s" entry for "%s" group, requires a dictionary, found "%s" instead.' %
+ (section, group, type(group_data[section])))
+
+ for key in group_data:
+
+ if not isinstance(group_data[key], (MutableMapping, NoneType)):
+ self.display.warning('Skipping key (%s) in group (%s) as it is not a mapping, it is a %s' % (key, group, type(group_data[key])))
+ continue
+
+ if isinstance(group_data[key], NoneType):
+ self.display.vvv('Skipping empty key (%s) in group (%s)' % (key, group))
+ elif key == 'vars':
+ for var in group_data[key]:
+ self.inventory.set_variable(group, var, group_data[key][var])
+ elif key == 'children':
+ for subgroup in group_data[key]:
+ subgroup = self._parse_group(subgroup, group_data[key][subgroup])
+ self.inventory.add_child(group, subgroup)
+
+ elif key == 'hosts':
+ for host_pattern in group_data[key]:
+ hosts, port = self._parse_host(host_pattern)
+ self._populate_host_vars(hosts, group_data[key][host_pattern] or {}, group, port)
+ else:
+ self.display.warning('Skipping unexpected key (%s) in group (%s), only "vars", "children" and "hosts" are valid' % (key, group))
+
+ else:
+ self.display.warning("Skipping '%s' as this is not a valid group definition" % group)
+
+ return group
+
+ def _parse_host(self, host_pattern):
+ '''
+ Each host key can be a pattern, try to process it and add variables as needed
+ '''
+ (hostnames, port) = self._expand_hostpattern(host_pattern)
+
+ return hostnames, port
diff --git a/lib/ansible/plugins/loader.py b/lib/ansible/plugins/loader.py
new file mode 100644
index 00000000..957fa725
--- /dev/null
+++ b/lib/ansible/plugins/loader.py
@@ -0,0 +1,1275 @@
+# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> and others
+# (c) 2017, Toshio Kuratomi <tkuratomi@ansible.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import glob
+import os
+import os.path
+import sys
+import warnings
+
+from collections import defaultdict, namedtuple
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsiblePluginCircularRedirect, AnsiblePluginRemovedError, AnsibleCollectionUnsupportedVersionError
+from ansible.module_utils._text import to_bytes, to_text, to_native
+from ansible.module_utils.compat.importlib import import_module
+from ansible.module_utils.six import string_types
+from ansible.parsing.utils.yaml import from_yaml
+from ansible.parsing.yaml.loader import AnsibleLoader
+from ansible.plugins import get_plugin_class, MODULE_CACHE, PATH_CACHE, PLUGIN_PATH_CACHE
+from ansible.utils.collection_loader import AnsibleCollectionConfig, AnsibleCollectionRef
+from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder, _get_collection_metadata
+from ansible.utils.display import Display
+from ansible.utils.plugin_docs import add_fragments
+from ansible import __version__ as ansible_version
+
+# TODO: take the packaging dep, or vendor SpecifierSet?
+
+try:
+ from packaging.specifiers import SpecifierSet
+ from packaging.version import Version
+except ImportError:
+ SpecifierSet = None
+ Version = None
+
+try:
+ # use C version if possible for speedup
+ from yaml import CSafeLoader as SafeLoader
+except ImportError:
+ from yaml import SafeLoader
+
+try:
+ import importlib.util
+ imp = None
+except ImportError:
+ import imp
+
+display = Display()
+
+get_with_context_result = namedtuple('get_with_context_result', ['object', 'plugin_load_context'])
+
+
+def get_all_plugin_loaders():
+ return [(name, obj) for (name, obj) in globals().items() if isinstance(obj, PluginLoader)]
+
+
+def add_all_plugin_dirs(path):
+ ''' add any existing plugin dirs in the path provided '''
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ if os.path.isdir(b_path):
+ for name, obj in get_all_plugin_loaders():
+ if obj.subdir:
+ plugin_path = os.path.join(b_path, to_bytes(obj.subdir))
+ if os.path.isdir(plugin_path):
+ obj.add_directory(to_text(plugin_path))
+ else:
+ display.warning("Ignoring invalid path provided to plugin path: '%s' is not a directory" % to_text(path))
+
+
+def get_shell_plugin(shell_type=None, executable=None):
+
+ if not shell_type:
+ # default to sh
+ shell_type = 'sh'
+
+ # mostly for backwards compat
+ if executable:
+ if isinstance(executable, string_types):
+ shell_filename = os.path.basename(executable)
+ try:
+ shell = shell_loader.get(shell_filename)
+ except Exception:
+ shell = None
+
+ if shell is None:
+ for shell in shell_loader.all():
+ if shell_filename in shell.COMPATIBLE_SHELLS:
+ shell_type = shell.SHELL_FAMILY
+ break
+ else:
+ raise AnsibleError("Either a shell type or a shell executable must be provided ")
+
+ shell = shell_loader.get(shell_type)
+ if not shell:
+ raise AnsibleError("Could not find the shell plugin required (%s)." % shell_type)
+
+ if executable:
+ setattr(shell, 'executable', executable)
+
+ return shell
+
+
+def add_dirs_to_loader(which_loader, paths):
+
+ loader = getattr(sys.modules[__name__], '%s_loader' % which_loader)
+ for path in paths:
+ loader.add_directory(path, with_subdir=True)
+
+
+class PluginPathContext(object):
+ def __init__(self, path, internal):
+ self.path = path
+ self.internal = internal
+
+
+class PluginLoadContext(object):
+ def __init__(self):
+ self.original_name = None
+ self.redirect_list = []
+ self.error_list = []
+ self.import_error_list = []
+ self.load_attempts = []
+ self.pending_redirect = None
+ self.exit_reason = None
+ self.plugin_resolved_path = None
+ self.plugin_resolved_name = None
+ self.plugin_resolved_collection = None # empty string for resolved plugins from user-supplied paths
+ self.deprecated = False
+ self.removal_date = None
+ self.removal_version = None
+ self.deprecation_warnings = []
+ self.resolved = False
+
+ def record_deprecation(self, name, deprecation, collection_name):
+ if not deprecation:
+ return self
+
+ warning_text = deprecation.get('warning_text', None)
+ removal_date = deprecation.get('removal_date', None)
+ removal_version = deprecation.get('removal_version', None)
+ # If both removal_date and removal_version are specified, use removal_date
+ if removal_date is not None:
+ removal_version = None
+ if not warning_text:
+ warning_text = '{0} has been deprecated'.format(name)
+
+ display.deprecated(warning_text, date=removal_date, version=removal_version, collection_name=collection_name)
+
+ self.deprecated = True
+ if removal_date:
+ self.removal_date = removal_date
+ if removal_version:
+ self.removal_version = removal_version
+ self.deprecation_warnings.append(warning_text)
+ return self
+
+ def resolve(self, resolved_name, resolved_path, resolved_collection, exit_reason):
+ self.pending_redirect = None
+ self.plugin_resolved_name = resolved_name
+ self.plugin_resolved_path = resolved_path
+ self.plugin_resolved_collection = resolved_collection
+ self.exit_reason = exit_reason
+ self.resolved = True
+ return self
+
+ def redirect(self, redirect_name):
+ self.pending_redirect = redirect_name
+ self.exit_reason = 'pending redirect resolution from {0} to {1}'.format(self.original_name, redirect_name)
+ self.resolved = False
+ return self
+
+ def nope(self, exit_reason):
+ self.pending_redirect = None
+ self.exit_reason = exit_reason
+ self.resolved = False
+ return self
+
+
+class PluginLoader:
+ '''
+ PluginLoader loads plugins from the configured plugin directories.
+
+ It searches for plugins by iterating through the combined list of play basedirs, configured
+ paths, and the python path. The first match is used.
+ '''
+
+ def __init__(self, class_name, package, config, subdir, aliases=None, required_base_class=None):
+ aliases = {} if aliases is None else aliases
+
+ self.class_name = class_name
+ self.base_class = required_base_class
+ self.package = package
+ self.subdir = subdir
+
+ # FIXME: remove alias dict in favor of alias by symlink?
+ self.aliases = aliases
+
+ if config and not isinstance(config, list):
+ config = [config]
+ elif not config:
+ config = []
+
+ self.config = config
+
+ if class_name not in MODULE_CACHE:
+ MODULE_CACHE[class_name] = {}
+ if class_name not in PATH_CACHE:
+ PATH_CACHE[class_name] = None
+ if class_name not in PLUGIN_PATH_CACHE:
+ PLUGIN_PATH_CACHE[class_name] = defaultdict(dict)
+
+ # hold dirs added at runtime outside of config
+ self._extra_dirs = []
+
+ # caches
+ self._module_cache = MODULE_CACHE[class_name]
+ self._paths = PATH_CACHE[class_name]
+ self._plugin_path_cache = PLUGIN_PATH_CACHE[class_name]
+
+ self._searched_paths = set()
+
+ def __repr__(self):
+ return 'PluginLoader(type={0})'.format(AnsibleCollectionRef.legacy_plugin_dir_to_plugin_type(self.subdir))
+
+ def _clear_caches(self):
+
+ if C.OLD_PLUGIN_CACHE_CLEARING:
+ self._paths = None
+ else:
+ # reset global caches
+ MODULE_CACHE[self.class_name] = {}
+ PATH_CACHE[self.class_name] = None
+ PLUGIN_PATH_CACHE[self.class_name] = defaultdict(dict)
+
+ # reset internal caches
+ self._module_cache = MODULE_CACHE[self.class_name]
+ self._paths = PATH_CACHE[self.class_name]
+ self._plugin_path_cache = PLUGIN_PATH_CACHE[self.class_name]
+ self._searched_paths = set()
+
+ def __setstate__(self, data):
+ '''
+ Deserializer.
+ '''
+
+ class_name = data.get('class_name')
+ package = data.get('package')
+ config = data.get('config')
+ subdir = data.get('subdir')
+ aliases = data.get('aliases')
+ base_class = data.get('base_class')
+
+ PATH_CACHE[class_name] = data.get('PATH_CACHE')
+ PLUGIN_PATH_CACHE[class_name] = data.get('PLUGIN_PATH_CACHE')
+
+ self.__init__(class_name, package, config, subdir, aliases, base_class)
+ self._extra_dirs = data.get('_extra_dirs', [])
+ self._searched_paths = data.get('_searched_paths', set())
+
+ def __getstate__(self):
+ '''
+ Serializer.
+ '''
+
+ return dict(
+ class_name=self.class_name,
+ base_class=self.base_class,
+ package=self.package,
+ config=self.config,
+ subdir=self.subdir,
+ aliases=self.aliases,
+ _extra_dirs=self._extra_dirs,
+ _searched_paths=self._searched_paths,
+ PATH_CACHE=PATH_CACHE[self.class_name],
+ PLUGIN_PATH_CACHE=PLUGIN_PATH_CACHE[self.class_name],
+ )
+
+ def format_paths(self, paths):
+ ''' Returns a string suitable for printing of the search path '''
+
+ # Uses a list to get the order right
+ ret = []
+ for i in paths:
+ if i not in ret:
+ ret.append(i)
+ return os.pathsep.join(ret)
+
+ def print_paths(self):
+ return self.format_paths(self._get_paths(subdirs=False))
+
+ def _all_directories(self, dir):
+ results = []
+ results.append(dir)
+ for root, subdirs, files in os.walk(dir, followlinks=True):
+ if '__init__.py' in files:
+ for x in subdirs:
+ results.append(os.path.join(root, x))
+ return results
+
+ def _get_package_paths(self, subdirs=True):
+ ''' Gets the path of a Python package '''
+
+ if not self.package:
+ return []
+ if not hasattr(self, 'package_path'):
+ m = __import__(self.package)
+ parts = self.package.split('.')[1:]
+ for parent_mod in parts:
+ m = getattr(m, parent_mod)
+ self.package_path = os.path.dirname(m.__file__)
+ if subdirs:
+ return self._all_directories(self.package_path)
+ return [self.package_path]
+
+ def _get_paths_with_context(self, subdirs=True):
+ ''' Return a list of PluginPathContext objects to search for plugins in '''
+
+ # FIXME: This is potentially buggy if subdirs is sometimes True and sometimes False.
+ # In current usage, everything calls this with subdirs=True except for module_utils_loader and ansible-doc
+ # which always calls it with subdirs=False. So there currently isn't a problem with this caching.
+ if self._paths is not None:
+ return self._paths
+
+ ret = [PluginPathContext(p, False) for p in self._extra_dirs]
+
+ # look in any configured plugin paths, allow one level deep for subcategories
+ if self.config is not None:
+ for path in self.config:
+ path = os.path.realpath(os.path.expanduser(path))
+ if subdirs:
+ contents = glob.glob("%s/*" % path) + glob.glob("%s/*/*" % path)
+ for c in contents:
+ if os.path.isdir(c) and c not in ret:
+ ret.append(PluginPathContext(c, False))
+ if path not in ret:
+ ret.append(PluginPathContext(path, False))
+
+ # look for any plugins installed in the package subtree
+ # Note package path always gets added last so that every other type of
+ # path is searched before it.
+ ret.extend([PluginPathContext(p, True) for p in self._get_package_paths(subdirs=subdirs)])
+
+ # HACK: because powershell modules are in the same directory
+ # hierarchy as other modules we have to process them last. This is
+ # because powershell only works on windows but the other modules work
+ # anywhere (possibly including windows if the correct language
+ # interpreter is installed). the non-powershell modules can have any
+ # file extension and thus powershell modules are picked up in that.
+ # The non-hack way to fix this is to have powershell modules be
+ # a different PluginLoader/ModuleLoader. But that requires changing
+ # other things too (known thing to change would be PATHS_CACHE,
+ # PLUGIN_PATHS_CACHE, and MODULE_CACHE. Since those three dicts key
+ # on the class_name and neither regular modules nor powershell modules
+ # would have class_names, they would not work as written.
+ #
+ # The expected sort order is paths in the order in 'ret' with paths ending in '/windows' at the end,
+ # also in the original order they were found in 'ret'.
+ # The .sort() method is guaranteed to be stable, so original order is preserved.
+ ret.sort(key=lambda p: p.path.endswith('/windows'))
+
+ # cache and return the result
+ self._paths = ret
+ return ret
+
+ def _get_paths(self, subdirs=True):
+ ''' Return a list of paths to search for plugins in '''
+
+ paths_with_context = self._get_paths_with_context(subdirs=subdirs)
+ return [path_with_context.path for path_with_context in paths_with_context]
+
+ def _load_config_defs(self, name, module, path):
+ ''' Reads plugin docs to find configuration setting definitions, to push to config manager for later use '''
+
+ # plugins w/o class name don't support config
+ if self.class_name:
+ type_name = get_plugin_class(self.class_name)
+
+ # if type name != 'module_doc_fragment':
+ if type_name in C.CONFIGURABLE_PLUGINS:
+ dstring = AnsibleLoader(getattr(module, 'DOCUMENTATION', ''), file_name=path).get_single_data()
+ if dstring:
+ add_fragments(dstring, path, fragment_loader=fragment_loader, is_module=(type_name == 'module'))
+
+ if dstring and 'options' in dstring and isinstance(dstring['options'], dict):
+ C.config.initialize_plugin_configuration_definitions(type_name, name, dstring['options'])
+ display.debug('Loaded config def from plugin (%s/%s)' % (type_name, name))
+
+ def add_directory(self, directory, with_subdir=False):
+ ''' Adds an additional directory to the search path '''
+
+ directory = os.path.realpath(directory)
+
+ if directory is not None:
+ if with_subdir:
+ directory = os.path.join(directory, self.subdir)
+ if directory not in self._extra_dirs:
+ # append the directory and invalidate the path cache
+ self._extra_dirs.append(directory)
+ self._clear_caches()
+ display.debug('Added %s to loader search path' % (directory))
+
+ def _query_collection_routing_meta(self, acr, plugin_type, extension=None):
+ collection_pkg = import_module(acr.n_python_collection_package_name)
+ if not collection_pkg:
+ return None
+
+ # FIXME: shouldn't need this...
+ try:
+ # force any type-specific metadata postprocessing to occur
+ import_module(acr.n_python_collection_package_name + '.plugins.{0}'.format(plugin_type))
+ except ImportError:
+ pass
+
+ # this will be created by the collection PEP302 loader
+ collection_meta = getattr(collection_pkg, '_collection_meta', None)
+
+ if not collection_meta:
+ return None
+
+ # TODO: add subdirs support
+ # check for extension-specific entry first (eg 'setup.ps1')
+ # TODO: str/bytes on extension/name munging
+ if acr.subdirs:
+ subdir_qualified_resource = '.'.join([acr.subdirs, acr.resource])
+ else:
+ subdir_qualified_resource = acr.resource
+ entry = collection_meta.get('plugin_routing', {}).get(plugin_type, {}).get(subdir_qualified_resource + extension, None)
+ if not entry:
+ # try for extension-agnostic entry
+ entry = collection_meta.get('plugin_routing', {}).get(plugin_type, {}).get(subdir_qualified_resource, None)
+ return entry
+
+ def _find_fq_plugin(self, fq_name, extension, plugin_load_context):
+ """Search builtin paths to find a plugin. No external paths are searched,
+ meaning plugins inside roles inside collections will be ignored.
+ """
+
+ plugin_load_context.resolved = False
+
+ plugin_type = AnsibleCollectionRef.legacy_plugin_dir_to_plugin_type(self.subdir)
+
+ acr = AnsibleCollectionRef.from_fqcr(fq_name, plugin_type)
+
+ # check collection metadata to see if any special handling is required for this plugin
+ routing_metadata = self._query_collection_routing_meta(acr, plugin_type, extension=extension)
+
+ # TODO: factor this into a wrapper method
+ if routing_metadata:
+ deprecation = routing_metadata.get('deprecation', None)
+
+ # this will no-op if there's no deprecation metadata for this plugin
+ plugin_load_context.record_deprecation(fq_name, deprecation, acr.collection)
+
+ tombstone = routing_metadata.get('tombstone', None)
+
+ # FIXME: clean up text gen
+ if tombstone:
+ removal_date = tombstone.get('removal_date')
+ removal_version = tombstone.get('removal_version')
+ warning_text = tombstone.get('warning_text') or '{0} has been removed.'.format(fq_name)
+ removed_msg = display.get_deprecation_message(msg=warning_text, version=removal_version,
+ date=removal_date, removed=True,
+ collection_name=acr.collection)
+ plugin_load_context.removal_date = removal_date
+ plugin_load_context.removal_version = removal_version
+ plugin_load_context.resolved = True
+ plugin_load_context.exit_reason = removed_msg
+ raise AnsiblePluginRemovedError(removed_msg, plugin_load_context=plugin_load_context)
+
+ redirect = routing_metadata.get('redirect', None)
+
+ if redirect:
+ # FIXME: remove once this is covered in debug or whatever
+ display.vv("redirecting (type: {0}) {1} to {2}".format(plugin_type, fq_name, redirect))
+ return plugin_load_context.redirect(redirect)
+ # TODO: non-FQCN case, do we support `.` prefix for current collection, assume it with no dots, require it for subdirs in current, or ?
+
+ n_resource = to_native(acr.resource, errors='strict')
+ # we want this before the extension is added
+ full_name = '{0}.{1}'.format(acr.n_python_package_name, n_resource)
+
+ if extension:
+ n_resource += extension
+
+ pkg = sys.modules.get(acr.n_python_package_name)
+ if not pkg:
+ # FIXME: there must be cheaper/safer way to do this
+ try:
+ pkg = import_module(acr.n_python_package_name)
+ except ImportError:
+ return plugin_load_context.nope('Python package {0} not found'.format(acr.n_python_package_name))
+
+ pkg_path = os.path.dirname(pkg.__file__)
+
+ n_resource_path = os.path.join(pkg_path, n_resource)
+
+ # FIXME: and is file or file link or ...
+ if os.path.exists(n_resource_path):
+ return plugin_load_context.resolve(
+ full_name, to_text(n_resource_path), acr.collection, 'found exact match for {0} in {1}'.format(full_name, acr.collection))
+
+ if extension:
+ # the request was extension-specific, don't try for an extensionless match
+ return plugin_load_context.nope('no match for {0} in {1}'.format(to_text(n_resource), acr.collection))
+
+ # look for any matching extension in the package location (sans filter)
+ found_files = [f
+ for f in glob.iglob(os.path.join(pkg_path, n_resource) + '.*')
+ if os.path.isfile(f) and not f.endswith(C.MODULE_IGNORE_EXTS)]
+
+ if not found_files:
+ return plugin_load_context.nope('failed fuzzy extension match for {0} in {1}'.format(full_name, acr.collection))
+
+ if len(found_files) > 1:
+ # TODO: warn?
+ pass
+
+ return plugin_load_context.resolve(
+ full_name, to_text(found_files[0]), acr.collection, 'found fuzzy extension match for {0} in {1}'.format(full_name, acr.collection))
+
+ def find_plugin(self, name, mod_type='', ignore_deprecated=False, check_aliases=False, collection_list=None):
+ ''' Find a plugin named name '''
+ result = self.find_plugin_with_context(name, mod_type, ignore_deprecated, check_aliases, collection_list)
+ if result.resolved and result.plugin_resolved_path:
+ return result.plugin_resolved_path
+
+ return None
+
+ def find_plugin_with_context(self, name, mod_type='', ignore_deprecated=False, check_aliases=False, collection_list=None):
+ ''' Find a plugin named name, returning contextual info about the load, recursively resolving redirection '''
+ plugin_load_context = PluginLoadContext()
+ plugin_load_context.original_name = name
+ while True:
+ result = self._resolve_plugin_step(name, mod_type, ignore_deprecated, check_aliases, collection_list, plugin_load_context=plugin_load_context)
+ if result.pending_redirect:
+ if result.pending_redirect in result.redirect_list:
+ raise AnsiblePluginCircularRedirect('plugin redirect loop resolving {0} (path: {1})'.format(result.original_name, result.redirect_list))
+ name = result.pending_redirect
+ result.pending_redirect = None
+ plugin_load_context = result
+ else:
+ break
+
+ # TODO: smuggle these to the controller when we're in a worker, reduce noise from normal things like missing plugin packages during collection search
+ if plugin_load_context.error_list:
+ display.warning("errors were encountered during the plugin load for {0}:\n{1}".format(name, plugin_load_context.error_list))
+
+ # TODO: display/return import_error_list? Only useful for forensics...
+
+ # FIXME: store structured deprecation data in PluginLoadContext and use display.deprecate
+ # if plugin_load_context.deprecated and C.config.get_config_value('DEPRECATION_WARNINGS'):
+ # for dw in plugin_load_context.deprecation_warnings:
+ # # TODO: need to smuggle these to the controller if we're in a worker context
+ # display.warning('[DEPRECATION WARNING] ' + dw)
+
+ return plugin_load_context
+
+ # FIXME: name bikeshed
+ def _resolve_plugin_step(self, name, mod_type='', ignore_deprecated=False,
+ check_aliases=False, collection_list=None, plugin_load_context=PluginLoadContext()):
+ if not plugin_load_context:
+ raise ValueError('A PluginLoadContext is required')
+
+ plugin_load_context.redirect_list.append(name)
+ plugin_load_context.resolved = False
+
+ global _PLUGIN_FILTERS
+ if name in _PLUGIN_FILTERS[self.package]:
+ plugin_load_context.exit_reason = '{0} matched a defined plugin filter'.format(name)
+ return plugin_load_context
+
+ if mod_type:
+ suffix = mod_type
+ elif self.class_name:
+ # Ansible plugins that run in the controller process (most plugins)
+ suffix = '.py'
+ else:
+ # Only Ansible Modules. Ansible modules can be any executable so
+ # they can have any suffix
+ suffix = ''
+
+ # FIXME: need this right now so we can still load shipped PS module_utils- come up with a more robust solution
+ if (AnsibleCollectionRef.is_valid_fqcr(name) or collection_list) and not name.startswith('Ansible'):
+ if '.' in name or not collection_list:
+ candidates = [name]
+ else:
+ candidates = ['{0}.{1}'.format(c, name) for c in collection_list]
+
+ for candidate_name in candidates:
+ try:
+ plugin_load_context.load_attempts.append(candidate_name)
+ # HACK: refactor this properly
+ if candidate_name.startswith('ansible.legacy'):
+ # 'ansible.legacy' refers to the plugin finding behavior used before collections existed.
+ # They need to search 'library' and the various '*_plugins' directories in order to find the file.
+ plugin_load_context = self._find_plugin_legacy(name.replace('ansible.legacy.', '', 1),
+ plugin_load_context, ignore_deprecated, check_aliases, suffix)
+ else:
+ # 'ansible.builtin' should be handled here. This means only internal, or builtin, paths are searched.
+ plugin_load_context = self._find_fq_plugin(candidate_name, suffix, plugin_load_context=plugin_load_context)
+
+ if candidate_name != plugin_load_context.original_name and candidate_name not in plugin_load_context.redirect_list:
+ plugin_load_context.redirect_list.append(candidate_name)
+
+ if plugin_load_context.resolved or plugin_load_context.pending_redirect: # if we got an answer or need to chase down a redirect, return
+ return plugin_load_context
+ except (AnsiblePluginRemovedError, AnsiblePluginCircularRedirect, AnsibleCollectionUnsupportedVersionError):
+ # these are generally fatal, let them fly
+ raise
+ except ImportError as ie:
+ plugin_load_context.import_error_list.append(ie)
+ except Exception as ex:
+ # FIXME: keep actual errors, not just assembled messages
+ plugin_load_context.error_list.append(to_native(ex))
+
+ if plugin_load_context.error_list:
+ display.debug(msg='plugin lookup for {0} failed; errors: {1}'.format(name, '; '.join(plugin_load_context.error_list)))
+
+ plugin_load_context.exit_reason = 'no matches found for {0}'.format(name)
+
+ return plugin_load_context
+
+ # if we got here, there's no collection list and it's not an FQ name, so do legacy lookup
+
+ return self._find_plugin_legacy(name, plugin_load_context, ignore_deprecated, check_aliases, suffix)
+
+ def _find_plugin_legacy(self, name, plugin_load_context, ignore_deprecated=False, check_aliases=False, suffix=None):
+ """Search library and various *_plugins paths in order to find the file.
+ This was behavior prior to the existence of collections.
+ """
+ plugin_load_context.resolved = False
+
+ if check_aliases:
+ name = self.aliases.get(name, name)
+
+ # The particular cache to look for modules within. This matches the
+ # requested mod_type
+ pull_cache = self._plugin_path_cache[suffix]
+ try:
+ path_with_context = pull_cache[name]
+ plugin_load_context.plugin_resolved_path = path_with_context.path
+ plugin_load_context.plugin_resolved_name = name
+ plugin_load_context.plugin_resolved_collection = 'ansible.builtin' if path_with_context.internal else ''
+ plugin_load_context.resolved = True
+ return plugin_load_context
+ except KeyError:
+ # Cache miss. Now let's find the plugin
+ pass
+
+ # TODO: Instead of using the self._paths cache (PATH_CACHE) and
+ # self._searched_paths we could use an iterator. Before enabling that
+ # we need to make sure we don't want to add additional directories
+ # (add_directory()) once we start using the iterator.
+ # We can use _get_paths_with_context() since add_directory() forces a cache refresh.
+ for path_context in (p for p in self._get_paths_with_context() if p.path not in self._searched_paths and os.path.isdir(p.path)):
+ path = path_context.path
+ display.debug('trying %s' % path)
+ plugin_load_context.load_attempts.append(path)
+ try:
+ full_paths = (os.path.join(path, f) for f in os.listdir(path))
+ except OSError as e:
+ display.warning("Error accessing plugin paths: %s" % to_text(e))
+
+ for full_path in (f for f in full_paths if os.path.isfile(f) and not f.endswith('__init__.py')):
+ full_name = os.path.basename(full_path)
+
+ # HACK: We have no way of executing python byte compiled files as ansible modules so specifically exclude them
+ # FIXME: I believe this is only correct for modules and module_utils.
+ # For all other plugins we want .pyc and .pyo should be valid
+ if any(full_path.endswith(x) for x in C.MODULE_IGNORE_EXTS):
+ continue
+
+ splitname = os.path.splitext(full_name)
+ base_name = splitname[0]
+ internal = path_context.internal
+ try:
+ extension = splitname[1]
+ except IndexError:
+ extension = ''
+
+ # Module found, now enter it into the caches that match this file
+ if base_name not in self._plugin_path_cache['']:
+ self._plugin_path_cache[''][base_name] = PluginPathContext(full_path, internal)
+
+ if full_name not in self._plugin_path_cache['']:
+ self._plugin_path_cache[''][full_name] = PluginPathContext(full_path, internal)
+
+ if base_name not in self._plugin_path_cache[extension]:
+ self._plugin_path_cache[extension][base_name] = PluginPathContext(full_path, internal)
+
+ if full_name not in self._plugin_path_cache[extension]:
+ self._plugin_path_cache[extension][full_name] = PluginPathContext(full_path, internal)
+
+ self._searched_paths.add(path)
+ try:
+ path_with_context = pull_cache[name]
+ plugin_load_context.plugin_resolved_path = path_with_context.path
+ plugin_load_context.plugin_resolved_name = name
+ plugin_load_context.plugin_resolved_collection = 'ansible.builtin' if path_with_context.internal else ''
+ plugin_load_context.resolved = True
+ return plugin_load_context
+ except KeyError:
+ # Didn't find the plugin in this directory. Load modules from the next one
+ pass
+
+ # if nothing is found, try finding alias/deprecated
+ if not name.startswith('_'):
+ alias_name = '_' + name
+ # We've already cached all the paths at this point
+ if alias_name in pull_cache:
+ path_with_context = pull_cache[alias_name]
+ if not ignore_deprecated and not os.path.islink(path_with_context.path):
+ # FIXME: this is not always the case, some are just aliases
+ display.deprecated('%s is kept for backwards compatibility but usage is discouraged. ' # pylint: disable=ansible-deprecated-no-version
+ 'The module documentation details page may explain more about this rationale.' % name.lstrip('_'))
+ plugin_load_context.plugin_resolved_path = path_with_context.path
+ plugin_load_context.plugin_resolved_name = alias_name
+ plugin_load_context.plugin_resolved_collection = 'ansible.builtin' if path_with_context.internal else ''
+ plugin_load_context.resolved = True
+ return plugin_load_context
+
+ # last ditch, if it's something that can be redirected, look for a builtin redirect before giving up
+ candidate_fqcr = 'ansible.builtin.{0}'.format(name)
+ if '.' not in name and AnsibleCollectionRef.is_valid_fqcr(candidate_fqcr):
+ return self._find_fq_plugin(fq_name=candidate_fqcr, extension=suffix, plugin_load_context=plugin_load_context)
+
+ return plugin_load_context.nope('{0} is not eligible for last-chance resolution'.format(name))
+
+ def has_plugin(self, name, collection_list=None):
+ ''' Checks if a plugin named name exists '''
+
+ try:
+ return self.find_plugin(name, collection_list=collection_list) is not None
+ except Exception as ex:
+ if isinstance(ex, AnsibleError):
+ raise
+ # log and continue, likely an innocuous type/package loading failure in collections import
+ display.debug('has_plugin error: {0}'.format(to_text(ex)))
+
+ __contains__ = has_plugin
+
+ def _load_module_source(self, name, path):
+
+ # avoid collisions across plugins
+ if name.startswith('ansible_collections.'):
+ full_name = name
+ else:
+ full_name = '.'.join([self.package, name])
+
+ if full_name in sys.modules:
+ # Avoids double loading, See https://github.com/ansible/ansible/issues/13110
+ return sys.modules[full_name]
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore", RuntimeWarning)
+ if imp is None:
+ spec = importlib.util.spec_from_file_location(to_native(full_name), to_native(path))
+ module = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(module)
+ sys.modules[full_name] = module
+ else:
+ with open(to_bytes(path), 'rb') as module_file:
+ # to_native is used here because imp.load_source's path is for tracebacks and python's traceback formatting uses native strings
+ module = imp.load_source(to_native(full_name), to_native(path), module_file)
+ return module
+
+ def _update_object(self, obj, name, path, redirected_names=None):
+
+ # set extra info on the module, in case we want it later
+ setattr(obj, '_original_path', path)
+ setattr(obj, '_load_name', name)
+ setattr(obj, '_redirected_names', redirected_names or [])
+
+ def get(self, name, *args, **kwargs):
+ return self.get_with_context(name, *args, **kwargs).object
+
+ def get_with_context(self, name, *args, **kwargs):
+ ''' instantiates a plugin of the given name using arguments '''
+
+ found_in_cache = True
+ class_only = kwargs.pop('class_only', False)
+ collection_list = kwargs.pop('collection_list', None)
+ if name in self.aliases:
+ name = self.aliases[name]
+ plugin_load_context = self.find_plugin_with_context(name, collection_list=collection_list)
+ if not plugin_load_context.resolved or not plugin_load_context.plugin_resolved_path:
+ # FIXME: this is probably an error (eg removed plugin)
+ return get_with_context_result(None, plugin_load_context)
+
+ name = plugin_load_context.plugin_resolved_name
+ path = plugin_load_context.plugin_resolved_path
+ redirected_names = plugin_load_context.redirect_list or []
+
+ if path not in self._module_cache:
+ self._module_cache[path] = self._load_module_source(name, path)
+ self._load_config_defs(name, self._module_cache[path], path)
+ found_in_cache = False
+
+ obj = getattr(self._module_cache[path], self.class_name)
+ if self.base_class:
+ # The import path is hardcoded and should be the right place,
+ # so we are not expecting an ImportError.
+ module = __import__(self.package, fromlist=[self.base_class])
+ # Check whether this obj has the required base class.
+ try:
+ plugin_class = getattr(module, self.base_class)
+ except AttributeError:
+ return get_with_context_result(None, plugin_load_context)
+ if not issubclass(obj, plugin_class):
+ return get_with_context_result(None, plugin_load_context)
+
+ # FIXME: update this to use the load context
+ self._display_plugin_load(self.class_name, name, self._searched_paths, path, found_in_cache=found_in_cache, class_only=class_only)
+
+ if not class_only:
+ try:
+ # A plugin may need to use its _load_name in __init__ (for example, to set
+ # or get options from config), so update the object before using the constructor
+ instance = object.__new__(obj)
+ self._update_object(instance, name, path, redirected_names)
+ obj.__init__(instance, *args, **kwargs)
+ obj = instance
+ except TypeError as e:
+ if "abstract" in e.args[0]:
+ # Abstract Base Class. The found plugin file does not
+ # fully implement the defined interface.
+ return get_with_context_result(None, plugin_load_context)
+ raise
+
+ self._update_object(obj, name, path, redirected_names)
+ return get_with_context_result(obj, plugin_load_context)
+
+ def _display_plugin_load(self, class_name, name, searched_paths, path, found_in_cache=None, class_only=None):
+ ''' formats data to display debug info for plugin loading, also avoids processing unless really needed '''
+ if C.DEFAULT_DEBUG:
+ msg = 'Loading %s \'%s\' from %s' % (class_name, os.path.basename(name), path)
+
+ if len(searched_paths) > 1:
+ msg = '%s (searched paths: %s)' % (msg, self.format_paths(searched_paths))
+
+ if found_in_cache or class_only:
+ msg = '%s (found_in_cache=%s, class_only=%s)' % (msg, found_in_cache, class_only)
+
+ display.debug(msg)
+
+ def all(self, *args, **kwargs):
+ '''
+ Iterate through all plugins of this type
+
+ A plugin loader is initialized with a specific type. This function is an iterator returning
+ all of the plugins of that type to the caller.
+
+ :kwarg path_only: If this is set to True, then we return the paths to where the plugins reside
+ instead of an instance of the plugin. This conflicts with class_only and both should
+ not be set.
+ :kwarg class_only: If this is set to True then we return the python class which implements
+ a plugin rather than an instance of the plugin. This conflicts with path_only and both
+ should not be set.
+ :kwarg _dedupe: By default, we only return one plugin per plugin name. Deduplication happens
+ in the same way as the :meth:`get` and :meth:`find_plugin` methods resolve which plugin
+ should take precedence. If this is set to False, then we return all of the plugins
+ found, including those with duplicate names. In the case of duplicates, the order in
+ which they are returned is the one that would take precedence first, followed by the
+ others in decreasing precedence order. This should only be used by subclasses which
+ want to manage their own deduplication of the plugins.
+ :*args: Any extra arguments are passed to each plugin when it is instantiated.
+ :**kwargs: Any extra keyword arguments are passed to each plugin when it is instantiated.
+ '''
+ # TODO: Change the signature of this method to:
+ # def all(return_type='instance', args=None, kwargs=None):
+ # if args is None: args = []
+ # if kwargs is None: kwargs = {}
+ # return_type can be instance, class, or path.
+ # These changes will mean that plugin parameters won't conflict with our params and
+ # will also make it impossible to request both a path and a class at the same time.
+ #
+ # Move _dedupe to be a class attribute, CUSTOM_DEDUPE, with subclasses for filters and
+ # tests setting it to True
+
+ global _PLUGIN_FILTERS
+
+ dedupe = kwargs.pop('_dedupe', True)
+ path_only = kwargs.pop('path_only', False)
+ class_only = kwargs.pop('class_only', False)
+ # Having both path_only and class_only is a coding bug
+ if path_only and class_only:
+ raise AnsibleError('Do not set both path_only and class_only when calling PluginLoader.all()')
+
+ all_matches = []
+ found_in_cache = True
+
+ for i in self._get_paths():
+ all_matches.extend(glob.glob(os.path.join(i, "*.py")))
+
+ loaded_modules = set()
+ for path in sorted(all_matches, key=os.path.basename):
+ name = os.path.splitext(path)[0]
+ basename = os.path.basename(name)
+
+ if basename == '__init__' or basename in _PLUGIN_FILTERS[self.package]:
+ continue
+
+ if dedupe and basename in loaded_modules:
+ continue
+ loaded_modules.add(basename)
+
+ if path_only:
+ yield path
+ continue
+
+ if path not in self._module_cache:
+ try:
+ if self.subdir in ('filter_plugins', 'test_plugins'):
+ # filter and test plugin files can contain multiple plugins
+ # they must have a unique python module name to prevent them from shadowing each other
+ full_name = '{0}_{1}'.format(abs(hash(path)), basename)
+ else:
+ full_name = basename
+ module = self._load_module_source(full_name, path)
+ self._load_config_defs(basename, module, path)
+ except Exception as e:
+ display.warning("Skipping plugin (%s) as it seems to be invalid: %s" % (path, to_text(e)))
+ continue
+ self._module_cache[path] = module
+ found_in_cache = False
+
+ try:
+ obj = getattr(self._module_cache[path], self.class_name)
+ except AttributeError as e:
+ display.warning("Skipping plugin (%s) as it seems to be invalid: %s" % (path, to_text(e)))
+ continue
+
+ if self.base_class:
+ # The import path is hardcoded and should be the right place,
+ # so we are not expecting an ImportError.
+ module = __import__(self.package, fromlist=[self.base_class])
+ # Check whether this obj has the required base class.
+ try:
+ plugin_class = getattr(module, self.base_class)
+ except AttributeError:
+ continue
+ if not issubclass(obj, plugin_class):
+ continue
+
+ self._display_plugin_load(self.class_name, basename, self._searched_paths, path, found_in_cache=found_in_cache, class_only=class_only)
+
+ if not class_only:
+ try:
+ obj = obj(*args, **kwargs)
+ except TypeError as e:
+ display.warning("Skipping plugin (%s) as it seems to be incomplete: %s" % (path, to_text(e)))
+
+ self._update_object(obj, basename, path)
+ yield obj
+
+
+class Jinja2Loader(PluginLoader):
+ """
+ PluginLoader optimized for Jinja2 plugins
+
+ The filter and test plugins are Jinja2 plugins encapsulated inside of our plugin format.
+ The way the calling code is setup, we need to do a few things differently in the all() method
+ """
+ def find_plugin(self, name, collection_list=None):
+ # Nothing using Jinja2Loader use this method. We can't use the base class version because
+ # we deduplicate differently than the base class
+ if '.' in name:
+ return super(Jinja2Loader, self).find_plugin(name, collection_list=collection_list)
+
+ raise AnsibleError('No code should call find_plugin for Jinja2Loaders (Not implemented)')
+
+ def get(self, name, *args, **kwargs):
+ # Nothing using Jinja2Loader use this method. We can't use the base class version because
+ # we deduplicate differently than the base class
+ if '.' in name:
+ return super(Jinja2Loader, self).get(name, *args, **kwargs)
+
+ raise AnsibleError('No code should call find_plugin for Jinja2Loaders (Not implemented)')
+
+ def all(self, *args, **kwargs):
+ """
+ Differences with :meth:`PluginLoader.all`:
+
+ * We do not deduplicate ansible plugin names. This is because we don't care about our
+ plugin names, here. We care about the names of the actual jinja2 plugins which are inside
+ of our plugins.
+ * We reverse the order of the list of plugins compared to other PluginLoaders. This is
+ because of how calling code chooses to sync the plugins from the list. It adds all the
+ Jinja2 plugins from one of our Ansible plugins into a dict. Then it adds the Jinja2
+ plugins from the next Ansible plugin, overwriting any Jinja2 plugins that had the same
+ name. This is an encapsulation violation (the PluginLoader should not know about what
+ calling code does with the data) but we're pushing the common code here. We'll fix
+ this in the future by moving more of the common code into this PluginLoader.
+ * We return a list. We could iterate the list instead but that's extra work for no gain because
+ the API receiving this doesn't care. It just needs an iterable
+ """
+ # We don't deduplicate ansible plugin names. Instead, calling code deduplicates jinja2
+ # plugin names.
+ kwargs['_dedupe'] = False
+
+ # We have to instantiate a list of all plugins so that we can reverse it. We reverse it so
+ # that calling code will deduplicate this correctly.
+ plugins = [p for p in super(Jinja2Loader, self).all(*args, **kwargs)]
+ plugins.reverse()
+
+ return plugins
+
+
+def _load_plugin_filter():
+ filters = defaultdict(frozenset)
+ user_set = False
+ if C.PLUGIN_FILTERS_CFG is None:
+ filter_cfg = '/etc/ansible/plugin_filters.yml'
+ else:
+ filter_cfg = C.PLUGIN_FILTERS_CFG
+ user_set = True
+
+ if os.path.exists(filter_cfg):
+ with open(filter_cfg, 'rb') as f:
+ try:
+ filter_data = from_yaml(f.read())
+ except Exception as e:
+ display.warning(u'The plugin filter file, {0} was not parsable.'
+ u' Skipping: {1}'.format(filter_cfg, to_text(e)))
+ return filters
+
+ try:
+ version = filter_data['filter_version']
+ except KeyError:
+ display.warning(u'The plugin filter file, {0} was invalid.'
+ u' Skipping.'.format(filter_cfg))
+ return filters
+
+ # Try to convert for people specifying version as a float instead of string
+ version = to_text(version)
+ version = version.strip()
+
+ if version == u'1.0':
+ # Modules and action plugins share the same blacklist since the difference between the
+ # two isn't visible to the users
+ try:
+ filters['ansible.modules'] = frozenset(filter_data['module_blacklist'])
+ except TypeError:
+ display.warning(u'Unable to parse the plugin filter file {0} as'
+ u' module_blacklist is not a list.'
+ u' Skipping.'.format(filter_cfg))
+ return filters
+ filters['ansible.plugins.action'] = filters['ansible.modules']
+ else:
+ display.warning(u'The plugin filter file, {0} was a version not recognized by this'
+ u' version of Ansible. Skipping.'.format(filter_cfg))
+ else:
+ if user_set:
+ display.warning(u'The plugin filter file, {0} does not exist.'
+ u' Skipping.'.format(filter_cfg))
+
+ # Specialcase the stat module as Ansible can run very few things if stat is blacklisted.
+ if 'stat' in filters['ansible.modules']:
+ raise AnsibleError('The stat module was specified in the module blacklist file, {0}, but'
+ ' Ansible will not function without the stat module. Please remove stat'
+ ' from the blacklist.'.format(to_native(filter_cfg)))
+ return filters
+
+
+# since we don't want the actual collection loader understanding metadata, we'll do it in an event handler
+def _on_collection_load_handler(collection_name, collection_path):
+ display.vvvv(to_text('Loading collection {0} from {1}'.format(collection_name, collection_path)))
+
+ collection_meta = _get_collection_metadata(collection_name)
+
+ try:
+ if not _does_collection_support_ansible_version(collection_meta.get('requires_ansible', ''), ansible_version):
+ mismatch_behavior = C.config.get_config_value('COLLECTIONS_ON_ANSIBLE_VERSION_MISMATCH')
+ message = 'Collection {0} does not support Ansible version {1}'.format(collection_name, ansible_version)
+ if mismatch_behavior == 'warning':
+ display.warning(message)
+ elif mismatch_behavior == 'error':
+ raise AnsibleCollectionUnsupportedVersionError(message)
+ except AnsibleError:
+ raise
+ except Exception as ex:
+ display.warning('Error parsing collection metadata requires_ansible value from collection {0}: {1}'.format(collection_name, ex))
+
+
+def _does_collection_support_ansible_version(requirement_string, ansible_version):
+ if not requirement_string:
+ return True
+
+ if not SpecifierSet:
+ display.warning('packaging Python module unavailable; unable to validate collection Ansible version requirements')
+ return True
+
+ ss = SpecifierSet(requirement_string)
+
+ # ignore prerelease/postrelease/beta/dev flags for simplicity
+ base_ansible_version = Version(ansible_version).base_version
+
+ return ss.contains(base_ansible_version)
+
+
+def _configure_collection_loader():
+ if AnsibleCollectionConfig.collection_finder:
+ display.warning('AnsibleCollectionFinder has already been configured')
+ return
+
+ finder = _AnsibleCollectionFinder(C.config.get_config_value('COLLECTIONS_PATHS'), C.config.get_config_value('COLLECTIONS_SCAN_SYS_PATH'))
+ finder._install()
+
+ # this should succeed now
+ AnsibleCollectionConfig.on_collection_load += _on_collection_load_handler
+
+
+# TODO: All of the following is initialization code It should be moved inside of an initialization
+# function which is called at some point early in the ansible and ansible-playbook CLI startup.
+
+_PLUGIN_FILTERS = _load_plugin_filter()
+
+_configure_collection_loader()
+
+# doc fragments first
+fragment_loader = PluginLoader(
+ 'ModuleDocFragment',
+ 'ansible.plugins.doc_fragments',
+ C.DOC_FRAGMENT_PLUGIN_PATH,
+ 'doc_fragments',
+)
+
+action_loader = PluginLoader(
+ 'ActionModule',
+ 'ansible.plugins.action',
+ C.DEFAULT_ACTION_PLUGIN_PATH,
+ 'action_plugins',
+ required_base_class='ActionBase',
+)
+
+cache_loader = PluginLoader(
+ 'CacheModule',
+ 'ansible.plugins.cache',
+ C.DEFAULT_CACHE_PLUGIN_PATH,
+ 'cache_plugins',
+)
+
+callback_loader = PluginLoader(
+ 'CallbackModule',
+ 'ansible.plugins.callback',
+ C.DEFAULT_CALLBACK_PLUGIN_PATH,
+ 'callback_plugins',
+)
+
+connection_loader = PluginLoader(
+ 'Connection',
+ 'ansible.plugins.connection',
+ C.DEFAULT_CONNECTION_PLUGIN_PATH,
+ 'connection_plugins',
+ aliases={'paramiko': 'paramiko_ssh'},
+ required_base_class='ConnectionBase',
+)
+
+shell_loader = PluginLoader(
+ 'ShellModule',
+ 'ansible.plugins.shell',
+ 'shell_plugins',
+ 'shell_plugins',
+)
+
+module_loader = PluginLoader(
+ '',
+ 'ansible.modules',
+ C.DEFAULT_MODULE_PATH,
+ 'library',
+)
+
+module_utils_loader = PluginLoader(
+ '',
+ 'ansible.module_utils',
+ C.DEFAULT_MODULE_UTILS_PATH,
+ 'module_utils',
+)
+
+# NB: dedicated loader is currently necessary because PS module_utils expects "with subdir" lookup where
+# regular module_utils doesn't. This can be revisited once we have more granular loaders.
+ps_module_utils_loader = PluginLoader(
+ '',
+ 'ansible.module_utils',
+ C.DEFAULT_MODULE_UTILS_PATH,
+ 'module_utils',
+)
+
+lookup_loader = PluginLoader(
+ 'LookupModule',
+ 'ansible.plugins.lookup',
+ C.DEFAULT_LOOKUP_PLUGIN_PATH,
+ 'lookup_plugins',
+ required_base_class='LookupBase',
+)
+
+filter_loader = Jinja2Loader(
+ 'FilterModule',
+ 'ansible.plugins.filter',
+ C.DEFAULT_FILTER_PLUGIN_PATH,
+ 'filter_plugins',
+)
+
+test_loader = Jinja2Loader(
+ 'TestModule',
+ 'ansible.plugins.test',
+ C.DEFAULT_TEST_PLUGIN_PATH,
+ 'test_plugins'
+)
+
+strategy_loader = PluginLoader(
+ 'StrategyModule',
+ 'ansible.plugins.strategy',
+ C.DEFAULT_STRATEGY_PLUGIN_PATH,
+ 'strategy_plugins',
+ required_base_class='StrategyBase',
+)
+
+terminal_loader = PluginLoader(
+ 'TerminalModule',
+ 'ansible.plugins.terminal',
+ C.DEFAULT_TERMINAL_PLUGIN_PATH,
+ 'terminal_plugins',
+ required_base_class='TerminalBase'
+)
+
+vars_loader = PluginLoader(
+ 'VarsModule',
+ 'ansible.plugins.vars',
+ C.DEFAULT_VARS_PLUGIN_PATH,
+ 'vars_plugins',
+)
+
+cliconf_loader = PluginLoader(
+ 'Cliconf',
+ 'ansible.plugins.cliconf',
+ C.DEFAULT_CLICONF_PLUGIN_PATH,
+ 'cliconf_plugins',
+ required_base_class='CliconfBase'
+)
+
+netconf_loader = PluginLoader(
+ 'Netconf',
+ 'ansible.plugins.netconf',
+ C.DEFAULT_NETCONF_PLUGIN_PATH,
+ 'netconf_plugins',
+ required_base_class='NetconfBase'
+)
+
+inventory_loader = PluginLoader(
+ 'InventoryModule',
+ 'ansible.plugins.inventory',
+ C.DEFAULT_INVENTORY_PLUGIN_PATH,
+ 'inventory_plugins'
+)
+
+httpapi_loader = PluginLoader(
+ 'HttpApi',
+ 'ansible.plugins.httpapi',
+ C.DEFAULT_HTTPAPI_PLUGIN_PATH,
+ 'httpapi_plugins',
+ required_base_class='HttpApiBase',
+)
+
+become_loader = PluginLoader(
+ 'BecomeModule',
+ 'ansible.plugins.become',
+ C.BECOME_PLUGIN_PATH,
+ 'become_plugins'
+)
diff --git a/lib/ansible/plugins/lookup/__init__.py b/lib/ansible/plugins/lookup/__init__.py
new file mode 100644
index 00000000..42f0d1cc
--- /dev/null
+++ b/lib/ansible/plugins/lookup/__init__.py
@@ -0,0 +1,125 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from abc import abstractmethod
+
+from ansible.errors import AnsibleFileNotFound
+from ansible.plugins import AnsiblePlugin
+from ansible.utils.display import Display
+
+display = Display()
+
+__all__ = ['LookupBase']
+
+
+class LookupBase(AnsiblePlugin):
+
+ def __init__(self, loader=None, templar=None, **kwargs):
+
+ super(LookupBase, self).__init__()
+
+ self._loader = loader
+ self._templar = templar
+
+ # Backwards compat: self._display isn't really needed, just import the global display and use that.
+ self._display = display
+
+ def get_basedir(self, variables):
+ if 'role_path' in variables:
+ return variables['role_path']
+ else:
+ return self._loader.get_basedir()
+
+ @staticmethod
+ def _flatten(terms):
+ ret = []
+ for term in terms:
+ if isinstance(term, (list, tuple)):
+ ret.extend(term)
+ else:
+ ret.append(term)
+ return ret
+
+ @staticmethod
+ def _combine(a, b):
+ results = []
+ for x in a:
+ for y in b:
+ results.append(LookupBase._flatten([x, y]))
+ return results
+
+ @staticmethod
+ def _flatten_hash_to_list(terms):
+ ret = []
+ for key in terms:
+ ret.append({'key': key, 'value': terms[key]})
+ return ret
+
+ @abstractmethod
+ def run(self, terms, variables=None, **kwargs):
+ """
+ When the playbook specifies a lookup, this method is run. The
+ arguments to the lookup become the arguments to this method. One
+ additional keyword argument named ``variables`` is added to the method
+ call. It contains the variables available to ansible at the time the
+ lookup is templated. For instance::
+
+ "{{ lookup('url', 'https://toshio.fedorapeople.org/one.txt', validate_certs=True) }}"
+
+ would end up calling the lookup plugin named url's run method like this::
+ run(['https://toshio.fedorapeople.org/one.txt'], variables=available_variables, validate_certs=True)
+
+ Lookup plugins can be used within playbooks for looping. When this
+ happens, the first argument is a list containing the terms. Lookup
+ plugins can also be called from within playbooks to return their
+ values into a variable or parameter. If the user passes a string in
+ this case, it is converted into a list.
+
+ Errors encountered during execution should be returned by raising
+ AnsibleError() with a message describing the error.
+
+ Any strings returned by this method that could ever contain non-ascii
+ must be converted into python's unicode type as the strings will be run
+ through jinja2 which has this requirement. You can use::
+
+ from ansible.module_utils._text import to_text
+ result_string = to_text(result_string)
+ """
+ pass
+
+ def find_file_in_search_path(self, myvars, subdir, needle, ignore_missing=False):
+ '''
+ Return a file (needle) in the task's expected search path.
+ '''
+
+ if 'ansible_search_path' in myvars:
+ paths = myvars['ansible_search_path']
+ else:
+ paths = [self.get_basedir(myvars)]
+
+ result = None
+ try:
+ result = self._loader.path_dwim_relative_stack(paths, subdir, needle)
+ except AnsibleFileNotFound:
+ if not ignore_missing:
+ self._display.warning("Unable to find '%s' in expected paths (use -vvvvv to see paths)" % needle)
+
+ return result
diff --git a/lib/ansible/plugins/lookup/config.py b/lib/ansible/plugins/lookup/config.py
new file mode 100644
index 00000000..ceea80c3
--- /dev/null
+++ b/lib/ansible/plugins/lookup/config.py
@@ -0,0 +1,87 @@
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: config
+ author: Ansible Core Team
+ version_added: "2.5"
+ short_description: Lookup current Ansible configuration values
+ description:
+ - Retrieves the value of an Ansible configuration setting.
+ - You can use C(ansible-config list) to see all available settings.
+ options:
+ _terms:
+ description: The key(s) to look up
+ required: True
+ on_missing:
+ description:
+ - action to take if term is missing from config
+ - Error will raise a fatal error
+ - Skip will just ignore the term
+ - Warn will skip over it but issue a warning
+ default: error
+ type: string
+ choices: ['error', 'skip', 'warn']
+"""
+
+EXAMPLES = """
+ - name: Show configured default become user
+ debug: msg="{{ lookup('config', 'DEFAULT_BECOME_USER')}}"
+
+ - name: print out role paths
+ debug:
+ msg: "These are the configured role paths: {{lookup('config', 'DEFAULT_ROLES_PATH')}}"
+
+ - name: find retry files, skip if missing that key
+ find:
+ paths: "{{lookup('config', 'RETRY_FILES_SAVE_PATH')|default(playbook_dir, True)}}"
+ patterns: "*.retry"
+
+ - name: see the colors
+ debug: msg="{{item}}"
+ loop: "{{lookup('config', 'COLOR_OK', 'COLOR_CHANGED', 'COLOR_SKIP', wantlist=True)}}"
+
+ - name: skip if bad value in var
+ debug: msg="{{ lookup('config', config_in_var, on_missing='skip')}}"
+ var:
+ config_in_var: UNKNOWN
+"""
+
+RETURN = """
+_raw:
+ description:
+ - value(s) of the key(s) in the config
+ type: raw
+"""
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.module_utils.six import string_types
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+
+ missing = kwargs.get('on_missing', 'error').lower()
+ if not isinstance(missing, string_types) or missing not in ['error', 'warn', 'skip']:
+ raise AnsibleError('"on_missing" must be a string and one of "error", "warn" or "skip", not %s' % missing)
+
+ ret = []
+ for term in terms:
+ if not isinstance(term, string_types):
+ raise AnsibleError('Invalid setting identifier, "%s" is not a string, its a %s' % (term, type(term)))
+ try:
+ result = getattr(C, term)
+ if callable(result):
+ raise AnsibleError('Invalid setting "%s" attempted' % term)
+ ret.append(result)
+ except AttributeError:
+ if missing == 'error':
+ raise AnsibleError('Unable to find setting %s' % term)
+ elif missing == 'warn':
+ self._display.warning('Skipping, did not find setting %s' % term)
+ return ret
diff --git a/lib/ansible/plugins/lookup/csvfile.py b/lib/ansible/plugins/lookup/csvfile.py
new file mode 100644
index 00000000..af76ed1b
--- /dev/null
+++ b/lib/ansible/plugins/lookup/csvfile.py
@@ -0,0 +1,166 @@
+# (c) 2013, Jan-Piet Mens <jpmens(at)gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: csvfile
+ author: Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
+ version_added: "1.5"
+ short_description: read data from a TSV or CSV file
+ description:
+ - The csvfile lookup reads the contents of a file in CSV (comma-separated value) format.
+ The lookup looks for the row where the first column matches keyname, and returns the value in the second column, unless a different column is specified.
+ options:
+ col:
+ description: column to return (0 index).
+ default: "1"
+ default:
+ description: what to return if the value is not found in the file.
+ default: ''
+ delimiter:
+ description: field separator in the file, for a tab you can specify "TAB" or "t".
+ default: TAB
+ file:
+ description: name of the CSV/TSV file to open.
+ default: ansible.csv
+ encoding:
+ description: Encoding (character set) of the used CSV file.
+ default: utf-8
+ version_added: "2.1"
+ notes:
+ - The default is for TSV files (tab delimited) not CSV (comma delimited) ... yes the name is misleading.
+"""
+
+EXAMPLES = """
+- name: Match 'Li' on the first column, return the second column (0 based index)
+ debug: msg="The atomic number of Lithium is {{ lookup('csvfile', 'Li file=elements.csv delimiter=,') }}"
+
+- name: msg="Match 'Li' on the first column, but return the 3rd column (columns start counting after the match)"
+ debug: msg="The atomic mass of Lithium is {{ lookup('csvfile', 'Li file=elements.csv delimiter=, col=2') }}"
+
+- name: Define Values From CSV File
+ set_fact:
+ loop_ip: "{{ lookup('csvfile', bgp_neighbor_ip +' file=bgp_neighbors.csv delimiter=, col=1') }}"
+ int_ip: "{{ lookup('csvfile', bgp_neighbor_ip +' file=bgp_neighbors.csv delimiter=, col=2') }}"
+ int_mask: "{{ lookup('csvfile', bgp_neighbor_ip +' file=bgp_neighbors.csv delimiter=, col=3') }}"
+ int_name: "{{ lookup('csvfile', bgp_neighbor_ip +' file=bgp_neighbors.csv delimiter=, col=4') }}"
+ local_as: "{{ lookup('csvfile', bgp_neighbor_ip +' file=bgp_neighbors.csv delimiter=, col=5') }}"
+ neighbor_as: "{{ lookup('csvfile', bgp_neighbor_ip +' file=bgp_neighbors.csv delimiter=, col=6') }}"
+ neigh_int_ip: "{{ lookup('csvfile', bgp_neighbor_ip +' file=bgp_neighbors.csv delimiter=, col=7') }}"
+ delegate_to: localhost
+"""
+
+RETURN = """
+ _raw:
+ description:
+ - value(s) stored in file column
+ type: list
+ elements: str
+"""
+
+import codecs
+import csv
+
+from ansible.errors import AnsibleError, AnsibleAssertionError
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils.six import PY2
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.common._collections_compat import MutableSequence
+
+
+class CSVRecoder:
+ """
+ Iterator that reads an encoded stream and reencodes the input to UTF-8
+ """
+ def __init__(self, f, encoding='utf-8'):
+ self.reader = codecs.getreader(encoding)(f)
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return next(self.reader).encode("utf-8")
+
+ next = __next__ # For Python 2
+
+
+class CSVReader:
+ """
+ A CSV reader which will iterate over lines in the CSV file "f",
+ which is encoded in the given encoding.
+ """
+
+ def __init__(self, f, dialect=csv.excel, encoding='utf-8', **kwds):
+ if PY2:
+ f = CSVRecoder(f, encoding)
+ else:
+ f = codecs.getreader(encoding)(f)
+
+ self.reader = csv.reader(f, dialect=dialect, **kwds)
+
+ def __next__(self):
+ row = next(self.reader)
+ return [to_text(s) for s in row]
+
+ next = __next__ # For Python 2
+
+ def __iter__(self):
+ return self
+
+
+class LookupModule(LookupBase):
+
+ def read_csv(self, filename, key, delimiter, encoding='utf-8', dflt=None, col=1):
+
+ try:
+ f = open(filename, 'rb')
+ creader = CSVReader(f, delimiter=to_native(delimiter), encoding=encoding)
+
+ for row in creader:
+ if len(row) and row[0] == key:
+ return row[int(col)]
+ except Exception as e:
+ raise AnsibleError("csvfile: %s" % to_native(e))
+
+ return dflt
+
+ def run(self, terms, variables=None, **kwargs):
+
+ ret = []
+
+ for term in terms:
+ params = term.split()
+ key = params[0]
+
+ paramvals = {
+ 'col': "1", # column to return
+ 'default': None,
+ 'delimiter': "TAB",
+ 'file': 'ansible.csv',
+ 'encoding': 'utf-8',
+ }
+
+ # parameters specified?
+ try:
+ for param in params[1:]:
+ name, value = param.split('=')
+ if name not in paramvals:
+ raise AnsibleAssertionError('%s not in paramvals' % name)
+ paramvals[name] = value
+ except (ValueError, AssertionError) as e:
+ raise AnsibleError(e)
+
+ if paramvals['delimiter'] == 'TAB':
+ paramvals['delimiter'] = "\t"
+
+ lookupfile = self.find_file_in_search_path(variables, 'files', paramvals['file'])
+ var = self.read_csv(lookupfile, key, paramvals['delimiter'], paramvals['encoding'], paramvals['default'], paramvals['col'])
+ if var is not None:
+ if isinstance(var, MutableSequence):
+ for v in var:
+ ret.append(v)
+ else:
+ ret.append(var)
+ return ret
diff --git a/lib/ansible/plugins/lookup/dict.py b/lib/ansible/plugins/lookup/dict.py
new file mode 100644
index 00000000..95480a33
--- /dev/null
+++ b/lib/ansible/plugins/lookup/dict.py
@@ -0,0 +1,76 @@
+# (c) 2014, Kent R. Spillner <kspillner@acm.org>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: dict
+ version_added: "1.5"
+ short_description: returns key/value pair items from dictionaries
+ description:
+ - Takes dictionaries as input and returns a list with each item in the list being a dictionary with 'key' and 'value' as
+ keys to the previous dictionary's structure.
+ options:
+ _terms:
+ description:
+ - A list of dictionaries
+ required: True
+"""
+
+EXAMPLES = """
+vars:
+ users:
+ alice:
+ name: Alice Appleworth
+ telephone: 123-456-7890
+ bob:
+ name: Bob Bananarama
+ telephone: 987-654-3210
+tasks:
+ # with predefined vars
+ - name: Print phone records
+ debug:
+ msg: "User {{ item.key }} is {{ item.value.name }} ({{ item.value.telephone }})"
+ loop: "{{ lookup('dict', users) }}"
+ # with inline dictionary
+ - name: show dictionary
+ debug:
+ msg: "{{item.key}}: {{item.value}}"
+ with_dict: {a: 1, b: 2, c: 3}
+ # Items from loop can be used in when: statements
+ - name: set_fact when alice in key
+ set_fact:
+ alice_exists: true
+ loop: "{{ lookup('dict', users) }}"
+ when: "'alice' in item.key"
+"""
+
+RETURN = """
+ _list:
+ description:
+ - list of composed dictonaries with key and value
+ type: list
+"""
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils.common._collections_compat import Mapping
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+
+ # FIXME: can remove once with_ special case is removed
+ if not isinstance(terms, list):
+ terms = [terms]
+
+ results = []
+ for term in terms:
+ # Expect any type of Mapping, notably hostvars
+ if not isinstance(term, Mapping):
+ raise AnsibleError("with_dict expects a dict")
+
+ results.extend(self._flatten_hash_to_list(term))
+ return results
diff --git a/lib/ansible/plugins/lookup/env.py b/lib/ansible/plugins/lookup/env.py
new file mode 100644
index 00000000..bb0fae5f
--- /dev/null
+++ b/lib/ansible/plugins/lookup/env.py
@@ -0,0 +1,60 @@
+# (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: env
+ author: Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
+ version_added: "0.9"
+ short_description: Read the value of environment variables
+ description:
+ - Allows you to query the environment variables available on the
+ controller when you invoked Ansible.
+ options:
+ _terms:
+ description:
+ - Environment variable or list of them to lookup the values for.
+ required: True
+ notes:
+ - The module returns an empty string if the environment variable is not
+ defined. This makes it impossbile to differentiate between the case the
+ variable is not defined and the case the variable is defined but it
+ contains an empty string.
+ - The C(default) filter requires second parameter to be set to C(True)
+ in order to set a default value in the case the variable is not
+ defined (see examples).
+"""
+
+EXAMPLES = """
+- name: Basic usage
+ debug:
+ msg: "'{{ lookup('env', 'HOME') }}' is the HOME environment variable."
+
+- name: Example how to set default value if the variable is not defined
+ debug:
+ msg: "'{{ lookup('env', 'USR') | default('nobody', True) }}' is the user."
+"""
+
+RETURN = """
+ _list:
+ description:
+ - Values from the environment variables.
+ type: list
+"""
+
+
+from ansible.plugins.lookup import LookupBase
+from ansible.utils import py3compat
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables, **kwargs):
+ ret = []
+
+ for term in terms:
+ var = term.split()[0]
+ ret.append(py3compat.environ.get(var, ''))
+
+ return ret
diff --git a/lib/ansible/plugins/lookup/file.py b/lib/ansible/plugins/lookup/file.py
new file mode 100644
index 00000000..7b426a6c
--- /dev/null
+++ b/lib/ansible/plugins/lookup/file.py
@@ -0,0 +1,86 @@
+# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: file
+ author: Daniel Hokka Zakrisson (!UNKNOWN) <daniel@hozac.com>
+ version_added: "0.9"
+ short_description: read file contents
+ description:
+ - This lookup returns the contents from a file on the Ansible controller's file system.
+ options:
+ _terms:
+ description: path(s) of files to read
+ required: True
+ rstrip:
+ description: whether or not to remove whitespace from the ending of the looked-up file
+ type: bool
+ required: False
+ default: True
+ lstrip:
+ description: whether or not to remove whitespace from the beginning of the looked-up file
+ type: bool
+ required: False
+ default: False
+ notes:
+ - if read in variable context, the file can be interpreted as YAML if the content is valid to the parser.
+ - this lookup does not understand 'globing', use the fileglob lookup instead.
+"""
+
+EXAMPLES = """
+- debug: msg="the value of foo.txt is {{lookup('file', '/etc/foo.txt') }}"
+
+- name: display multiple file contents
+ debug: var=item
+ with_file:
+ - "/path/to/foo.txt"
+ - "bar.txt" # will be looked in files/ dir relative to play or in role
+ - "/path/to/biz.txt"
+"""
+
+RETURN = """
+ _raw:
+ description:
+ - content of file(s)
+ type: list
+ elements: str
+"""
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils._text import to_text
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+
+ ret = []
+
+ for term in terms:
+ display.debug("File lookup term: %s" % term)
+
+ # Find the file in the expected search path
+ lookupfile = self.find_file_in_search_path(variables, 'files', term)
+ display.vvvv(u"File lookup using %s as file" % lookupfile)
+ try:
+ if lookupfile:
+ b_contents, show_data = self._loader._get_file_contents(lookupfile)
+ contents = to_text(b_contents, errors='surrogate_or_strict')
+ if kwargs.get('lstrip', False):
+ contents = contents.lstrip()
+ if kwargs.get('rstrip', True):
+ contents = contents.rstrip()
+ ret.append(contents)
+ else:
+ raise AnsibleParserError()
+ except AnsibleParserError:
+ raise AnsibleError("could not locate file in lookup: %s" % term)
+
+ return ret
diff --git a/lib/ansible/plugins/lookup/fileglob.py b/lib/ansible/plugins/lookup/fileglob.py
new file mode 100644
index 00000000..aa5d7d34
--- /dev/null
+++ b/lib/ansible/plugins/lookup/fileglob.py
@@ -0,0 +1,82 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: fileglob
+ author: Michael DeHaan
+ version_added: "1.4"
+ short_description: list files matching a pattern
+ description:
+ - Matches all files in a single directory, non-recursively, that match a pattern.
+ It calls Python's "glob" library.
+ options:
+ _terms:
+ description: path(s) of files to read
+ required: True
+ notes:
+ - Patterns are only supported on files, not directory/paths.
+ - Matching is against local system files on the Ansible controller.
+ To iterate a list of files on a remote node, use the M(ansible.builtin.find) module.
+ - Returns a string list of paths joined by commas, or an empty list if no files match. For a 'true list' pass C(wantlist=True) to the lookup.
+"""
+
+EXAMPLES = """
+- name: Display paths of all .txt files in dir
+ debug: msg={{ lookup('fileglob', '/my/path/*.txt') }}
+
+- name: Copy each file over that matches the given pattern
+ copy:
+ src: "{{ item }}"
+ dest: "/etc/fooapp/"
+ owner: "root"
+ mode: 0600
+ with_fileglob:
+ - "/playbooks/files/fooapp/*"
+"""
+
+RETURN = """
+ _list:
+ description:
+ - list of files
+ type: list
+ elements: path
+"""
+
+import os
+import glob
+
+from ansible.plugins.lookup import LookupBase
+from ansible.errors import AnsibleFileNotFound
+from ansible.module_utils._text import to_bytes, to_text
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+
+ ret = []
+ for term in terms:
+ term_file = os.path.basename(term)
+ found_paths = []
+ if term_file != term:
+ found_paths.append(self.find_file_in_search_path(variables, 'files', os.path.dirname(term)))
+ else:
+ # no dir, just file, so use paths and 'files' paths instead
+ if 'ansible_search_path' in variables:
+ paths = variables['ansible_search_path']
+ else:
+ paths = [self.get_basedir(variables)]
+ for p in paths:
+ found_paths.append(os.path.join(p, 'files'))
+ found_paths.append(p)
+
+ for dwimmed_path in found_paths:
+ if dwimmed_path:
+ globbed = glob.glob(to_bytes(os.path.join(dwimmed_path, term_file), errors='surrogate_or_strict'))
+ ret.extend(to_text(g, errors='surrogate_or_strict') for g in globbed if os.path.isfile(g))
+ if ret:
+ break
+ return ret
diff --git a/lib/ansible/plugins/lookup/first_found.py b/lib/ansible/plugins/lookup/first_found.py
new file mode 100644
index 00000000..54bc6849
--- /dev/null
+++ b/lib/ansible/plugins/lookup/first_found.py
@@ -0,0 +1,176 @@
+# (c) 2013, seth vidal <skvidal@fedoraproject.org> red hat, inc
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: first_found
+ author: Seth Vidal (!UNKNOWN) <skvidal@fedoraproject.org>
+ version_added: historical
+ short_description: return first file found from list
+ description:
+ - this lookup checks a list of files and paths and returns the full path to the first combination found.
+ - As all lookups, when fed relative paths it will try use the current task's location first and go up the chain
+ to the containing role/play/include/etc's location.
+ - The list of files has precedence over the paths searched.
+ i.e, A task in a role has a 'file1' in the play's relative path, this will be used, 'file2' in role's relative path will not.
+ - Either a list of files C(_terms) or a key `files` with a list of files is required for this plugin to operate.
+ notes:
+ - This lookup can be used in 'dual mode', either passing a list of file names or a dictionary that has C(files) and C(paths).
+ options:
+ _terms:
+ description: list of file names
+ files:
+ description: list of file names
+ paths:
+ description: list of paths in which to look for the files
+ skip:
+ type: boolean
+ default: False
+ description: Return an empty list if no file is found, instead of an error.
+"""
+
+EXAMPLES = """
+- name: show first existing file or ignore if none do
+ debug: msg={{lookup('first_found', findme, errors='ignore')}}
+ vars:
+ findme:
+ - "/path/to/foo.txt"
+ - "bar.txt" # will be looked in files/ dir relative to role and/or play
+ - "/path/to/biz.txt"
+
+- name: |
+ include tasks only if files exist. Note the use of query() to return
+ a blank list for the loop if no files are found.
+ import_tasks: '{{ item }}'
+ vars:
+ params:
+ files:
+ - path/tasks.yaml
+ - path/other_tasks.yaml
+ loop: "{{ query('first_found', params, errors='ignore') }}"
+
+- name: |
+ copy first existing file found to /some/file,
+ looking in relative directories from where the task is defined and
+ including any play objects that contain it
+ copy: src={{lookup('first_found', findme)}} dest=/some/file
+ vars:
+ findme:
+ - foo
+ - "{{inventory_hostname}}"
+ - bar
+
+- name: same copy but specific paths
+ copy: src={{lookup('first_found', params)}} dest=/some/file
+ vars:
+ params:
+ files:
+ - foo
+ - "{{inventory_hostname}}"
+ - bar
+ paths:
+ - /tmp/production
+ - /tmp/staging
+
+- name: INTERFACES | Create Ansible header for /etc/network/interfaces
+ template:
+ src: "{{ lookup('first_found', findme)}}"
+ dest: "/etc/foo.conf"
+ vars:
+ findme:
+ - "{{ ansible_virtualization_type }}_foo.conf"
+ - "default_foo.conf"
+
+- name: read vars from first file found, use 'vars/' relative subdir
+ include_vars: "{{lookup('first_found', params)}}"
+ vars:
+ params:
+ files:
+ - '{{ansible_distribution}}.yml'
+ - '{{ansible_os_family}}.yml'
+ - default.yml
+ paths:
+ - 'vars'
+"""
+
+RETURN = """
+ _raw:
+ description:
+ - path to file found
+ type: list
+ elements: path
+"""
+import os
+
+from jinja2.exceptions import UndefinedError
+
+from ansible.errors import AnsibleFileNotFound, AnsibleLookupError, AnsibleUndefinedVariable
+from ansible.module_utils.six import string_types
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables, **kwargs):
+
+ anydict = False
+ skip = False
+
+ for term in terms:
+ if isinstance(term, dict):
+ anydict = True
+
+ total_search = []
+ if anydict:
+ for term in terms:
+ if isinstance(term, dict):
+
+ files = term.get('files', [])
+ paths = term.get('paths', [])
+ skip = boolean(term.get('skip', False), strict=False)
+
+ filelist = files
+ if isinstance(files, string_types):
+ files = files.replace(',', ' ')
+ files = files.replace(';', ' ')
+ filelist = files.split(' ')
+
+ pathlist = paths
+ if paths:
+ if isinstance(paths, string_types):
+ paths = paths.replace(',', ' ')
+ paths = paths.replace(':', ' ')
+ paths = paths.replace(';', ' ')
+ pathlist = paths.split(' ')
+
+ if not pathlist:
+ total_search = filelist
+ else:
+ for path in pathlist:
+ for fn in filelist:
+ f = os.path.join(path, fn)
+ total_search.append(f)
+ else:
+ total_search.append(term)
+ else:
+ total_search = self._flatten(terms)
+
+ for fn in total_search:
+ try:
+ fn = self._templar.template(fn)
+ except (AnsibleUndefinedVariable, UndefinedError):
+ continue
+
+ # get subdir if set by task executor, default to files otherwise
+ subdir = getattr(self, '_subdir', 'files')
+ path = None
+ path = self.find_file_in_search_path(variables, subdir, fn, ignore_missing=True)
+ if path is not None:
+ return [path]
+ if skip:
+ return []
+ raise AnsibleLookupError("No file was found when using first_found. Use errors='ignore' to allow this task to be skipped if no "
+ "files are found")
diff --git a/lib/ansible/plugins/lookup/indexed_items.py b/lib/ansible/plugins/lookup/indexed_items.py
new file mode 100644
index 00000000..967e3999
--- /dev/null
+++ b/lib/ansible/plugins/lookup/indexed_items.py
@@ -0,0 +1,52 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: indexed_items
+ author: Michael DeHaan
+ version_added: "1.3"
+ short_description: rewrites lists to return 'indexed items'
+ description:
+ - use this lookup if you want to loop over an array and also get the numeric index of where you are in the array as you go
+ - any list given will be transformed with each resulting element having the it's previous position in item.0 and its value in item.1
+ options:
+ _terms:
+ description: list of items
+ required: True
+"""
+
+EXAMPLES = """
+- name: indexed loop demo
+ debug:
+ msg: "at array position {{ item.0 }} there is a value {{ item.1 }}"
+ with_indexed_items:
+ - "{{ some_list }}"
+"""
+
+RETURN = """
+ _raw:
+ description:
+ - list with each item.0 giving you the position and item.1 the value
+ type: list
+ elements: list
+"""
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+
+ def __init__(self, basedir=None, **kwargs):
+ self.basedir = basedir
+
+ def run(self, terms, variables, **kwargs):
+
+ if not isinstance(terms, list):
+ raise AnsibleError("with_indexed_items expects a list")
+
+ items = self._flatten(terms)
+ return list(zip(range(len(items)), items))
diff --git a/lib/ansible/plugins/lookup/ini.py b/lib/ansible/plugins/lookup/ini.py
new file mode 100644
index 00000000..b53468b3
--- /dev/null
+++ b/lib/ansible/plugins/lookup/ini.py
@@ -0,0 +1,165 @@
+# (c) 2015, Yannig Perre <yannig.perre(at)gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: ini
+ author: Yannig Perre (!UNKNOWN) <yannig.perre(at)gmail.com>
+ version_added: "2.0"
+ short_description: read data from a ini file
+ description:
+ - "The ini lookup reads the contents of a file in INI format C(key1=value1).
+ This plugin retrieves the value on the right side after the equal sign C('=') of a given section C([section])."
+ - "You can also read a property file which - in this case - does not contain section."
+ options:
+ _terms:
+ description: The key(s) to look up
+ required: True
+ type:
+ description: Type of the file. 'properties' refers to the Java properties files.
+ default: 'ini'
+ choices: ['ini', 'properties']
+ file:
+ description: Name of the file to load.
+ default: ansible.ini
+ section:
+ default: global
+ description: Section where to lookup the key.
+ re:
+ default: False
+ type: boolean
+ description: Flag to indicate if the key supplied is a regexp.
+ encoding:
+ default: utf-8
+ description: Text encoding to use.
+ default:
+ description: Return value if the key is not in the ini file.
+ default: ''
+"""
+
+EXAMPLES = """
+- debug: msg="User in integration is {{ lookup('ini', 'user section=integration file=users.ini') }}"
+
+- debug: msg="User in production is {{ lookup('ini', 'user section=production file=users.ini') }}"
+
+- debug: msg="user.name is {{ lookup('ini', 'user.name type=properties file=user.properties') }}"
+
+- debug:
+ msg: "{{ item }}"
+ with_ini:
+ - '.* section=section1 file=test.ini re=True'
+"""
+
+RETURN = """
+_raw:
+ description:
+ - value(s) of the key(s) in the ini file
+ type: list
+ elements: str
+"""
+import os
+import re
+from io import StringIO
+
+from ansible.errors import AnsibleError, AnsibleAssertionError
+from ansible.module_utils.six.moves import configparser
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.module_utils.common._collections_compat import MutableSequence
+from ansible.plugins.lookup import LookupBase
+
+
+def _parse_params(term):
+ '''Safely split parameter term to preserve spaces'''
+
+ keys = ['key', 'type', 'section', 'file', 're', 'default', 'encoding']
+ params = {}
+ for k in keys:
+ params[k] = ''
+
+ thiskey = 'key'
+ for idp, phrase in enumerate(term.split()):
+ for k in keys:
+ if ('%s=' % k) in phrase:
+ thiskey = k
+ if idp == 0 or not params[thiskey]:
+ params[thiskey] = phrase
+ else:
+ params[thiskey] += ' ' + phrase
+
+ rparams = [params[x] for x in keys if params[x]]
+ return rparams
+
+
+class LookupModule(LookupBase):
+
+ def get_value(self, key, section, dflt, is_regexp):
+ # Retrieve all values from a section using a regexp
+ if is_regexp:
+ return [v for k, v in self.cp.items(section) if re.match(key, k)]
+ value = None
+ # Retrieve a single value
+ try:
+ value = self.cp.get(section, key)
+ except configparser.NoOptionError:
+ return dflt
+ return value
+
+ def run(self, terms, variables=None, **kwargs):
+
+ self.cp = configparser.ConfigParser()
+
+ ret = []
+ for term in terms:
+ params = _parse_params(term)
+ key = params[0]
+
+ paramvals = {
+ 'file': 'ansible.ini',
+ 're': False,
+ 'default': None,
+ 'section': "global",
+ 'type': "ini",
+ 'encoding': 'utf-8',
+ }
+
+ # parameters specified?
+ try:
+ for param in params[1:]:
+ name, value = param.split('=')
+ if name not in paramvals:
+ raise AnsibleAssertionError('%s not in paramvals' %
+ name)
+ paramvals[name] = value
+ except (ValueError, AssertionError) as e:
+ raise AnsibleError(e)
+
+ # Retrieve file path
+ path = self.find_file_in_search_path(variables, 'files',
+ paramvals['file'])
+
+ # Create StringIO later used to parse ini
+ config = StringIO()
+ # Special case for java properties
+ if paramvals['type'] == "properties":
+ config.write(u'[java_properties]\n')
+ paramvals['section'] = 'java_properties'
+
+ # Open file using encoding
+ contents, show_data = self._loader._get_file_contents(path)
+ contents = to_text(contents, errors='surrogate_or_strict',
+ encoding=paramvals['encoding'])
+ config.write(contents)
+ config.seek(0, os.SEEK_SET)
+
+ self.cp.readfp(config)
+ var = self.get_value(key, paramvals['section'],
+ paramvals['default'], paramvals['re'])
+ if var is not None:
+ if isinstance(var, MutableSequence):
+ for v in var:
+ ret.append(v)
+ else:
+ ret.append(var)
+ return ret
diff --git a/lib/ansible/plugins/lookup/inventory_hostnames.py b/lib/ansible/plugins/lookup/inventory_hostnames.py
new file mode 100644
index 00000000..a9f521cc
--- /dev/null
+++ b/lib/ansible/plugins/lookup/inventory_hostnames.py
@@ -0,0 +1,74 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2013, Steven Dossett <sdossett@panath.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: inventory_hostnames
+ author:
+ - Michael DeHaan
+ - Steven Dossett (!UNKNOWN) <sdossett@panath.com>
+ version_added: "1.3"
+ short_description: list of inventory hosts matching a host pattern
+ description:
+ - "This lookup understands 'host patterns' as used by the `hosts:` keyword in plays
+ and can return a list of matching hosts from inventory"
+ notes:
+ - this is only worth for 'hostname patterns' it is easier to loop over the group/group_names variables otherwise.
+"""
+
+EXAMPLES = """
+- name: show all the hosts matching the pattern, i.e. all but the group www
+ debug:
+ msg: "{{ item }}"
+ with_inventory_hostnames:
+ - all:!www
+"""
+
+RETURN = """
+ _hostnames:
+ description: list of hostnames that matched the host pattern in inventory
+ type: list
+"""
+
+from ansible.inventory.manager import split_host_pattern, order_patterns
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.helpers import deduplicate_list
+
+
+class LookupModule(LookupBase):
+
+ def get_hosts(self, variables, pattern):
+ hosts = []
+ if pattern[0] in ('!', '&'):
+ obj = pattern[1:]
+ else:
+ obj = pattern
+
+ if obj in variables['groups']:
+ hosts = variables['groups'][obj]
+ elif obj in variables['groups']['all']:
+ hosts = [obj]
+ return hosts
+
+ def run(self, terms, variables=None, **kwargs):
+
+ host_list = []
+
+ for term in terms:
+ patterns = order_patterns(split_host_pattern(term))
+
+ for p in patterns:
+ that = self.get_hosts(variables, p)
+ if p.startswith("!"):
+ host_list = [h for h in host_list if h not in that]
+ elif p.startswith("&"):
+ host_list = [h for h in host_list if h in that]
+ else:
+ host_list.extend(that)
+
+ # return unique list
+ return deduplicate_list(host_list)
diff --git a/lib/ansible/plugins/lookup/items.py b/lib/ansible/plugins/lookup/items.py
new file mode 100644
index 00000000..3410e746
--- /dev/null
+++ b/lib/ansible/plugins/lookup/items.py
@@ -0,0 +1,73 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: items
+ author: Michael DeHaan
+ version_added: historical
+ short_description: list of items
+ description:
+ - this lookup returns a list of items given to it, if any of the top level items is also a list it will flatten it, but it will not recurse
+ notes:
+ - this is the standard lookup used for loops in most examples
+ - check out the 'flattened' lookup for recursive flattening
+ - if you do not want flattening nor any other transformation look at the 'list' lookup.
+ options:
+ _terms:
+ description: list of items
+ required: True
+"""
+
+EXAMPLES = """
+- name: "loop through list"
+ debug:
+ msg: "An item: {{ item }}"
+ with_items:
+ - 1
+ - 2
+ - 3
+
+- name: add several users
+ user:
+ name: "{{ item }}"
+ groups: "wheel"
+ state: present
+ with_items:
+ - testuser1
+ - testuser2
+
+- name: "loop through list from a variable"
+ debug:
+ msg: "An item: {{ item }}"
+ with_items: "{{ somelist }}"
+
+- name: more complex items to add several users
+ user:
+ name: "{{ item.name }}"
+ uid: "{{ item.uid }}"
+ groups: "{{ item.groups }}"
+ state: present
+ with_items:
+ - { name: testuser1, uid: 1002, groups: "wheel, staff" }
+ - { name: testuser2, uid: 1003, groups: staff }
+
+"""
+
+RETURN = """
+ _raw:
+ description:
+ - once flattened list
+ type: list
+"""
+
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, **kwargs):
+
+ return self._flatten(terms)
diff --git a/lib/ansible/plugins/lookup/lines.py b/lib/ansible/plugins/lookup/lines.py
new file mode 100644
index 00000000..b7fb875b
--- /dev/null
+++ b/lib/ansible/plugins/lookup/lines.py
@@ -0,0 +1,62 @@
+# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: lines
+ author: Daniel Hokka Zakrisson (!UNKNOWN) <daniel@hozac.com>
+ version_added: "0.9"
+ short_description: read lines from command
+ description:
+ - Run one or more commands and split the output into lines, returning them as a list
+ options:
+ _terms:
+ description: command(s) to run
+ required: True
+ notes:
+ - Like all lookups, this runs on the Ansible controller and is unaffected by other keywords such as 'become'.
+ If you need to use different permissions, you must change the command or run Ansible as another user.
+ - Alternatively, you can use a shell/command task that runs against localhost and registers the result.
+"""
+
+EXAMPLES = """
+- name: We could read the file directly, but this shows output from command
+ debug: msg="{{ item }} is an output line from running cat on /etc/motd"
+ with_lines: cat /etc/motd
+
+- name: More useful example of looping over a command result
+ shell: "/usr/bin/frobnicate {{ item }}"
+ with_lines:
+ - "/usr/bin/frobnications_per_host --param {{ inventory_hostname }}"
+"""
+
+RETURN = """
+ _list:
+ description:
+ - lines of stdout from command
+ type: list
+ elements: str
+"""
+
+import subprocess
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils._text import to_text
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables, **kwargs):
+
+ ret = []
+ for term in terms:
+ p = subprocess.Popen(term, cwd=self._loader.get_basedir(), shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+ (stdout, stderr) = p.communicate()
+ if p.returncode == 0:
+ ret.extend([to_text(l) for l in stdout.splitlines()])
+ else:
+ raise AnsibleError("lookup_plugin.lines(%s) returned %d" % (term, p.returncode))
+ return ret
diff --git a/lib/ansible/plugins/lookup/list.py b/lib/ansible/plugins/lookup/list.py
new file mode 100644
index 00000000..e57cdd64
--- /dev/null
+++ b/lib/ansible/plugins/lookup/list.py
@@ -0,0 +1,44 @@
+# (c) 2012-17 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: list
+ author: Ansible Core Team
+ version_added: "2.0"
+ short_description: simply returns what it is given.
+ description:
+ - this is mostly a noop, to be used as a with_list loop when you dont want the content transformed in any way.
+"""
+
+EXAMPLES = """
+- name: unlike with_items you will get 3 items from this loop, the 2nd one being a list
+ debug: var=item
+ with_list:
+ - 1
+ - [2,3]
+ - 4
+"""
+
+RETURN = """
+ _list:
+ description: basically the same as you fed in
+ type: list
+ elements: raw
+"""
+
+from ansible.module_utils.common._collections_compat import Sequence
+from ansible.plugins.lookup import LookupBase
+from ansible.errors import AnsibleError
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, **kwargs):
+ if not isinstance(terms, Sequence):
+ raise AnsibleError("with_list expects a list")
+ return terms
diff --git a/lib/ansible/plugins/lookup/nested.py b/lib/ansible/plugins/lookup/nested.py
new file mode 100644
index 00000000..45304b7b
--- /dev/null
+++ b/lib/ansible/plugins/lookup/nested.py
@@ -0,0 +1,85 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: nested
+ version_added: "1.1"
+ short_description: composes a list with nested elements of other lists
+ description:
+ - Takes the input lists and returns a list with elements that are lists composed of the elements of the input lists
+ options:
+ _raw:
+ description:
+ - a set of lists
+ required: True
+"""
+
+EXAMPLES = """
+- name: give users access to multiple databases
+ mysql_user:
+ name: "{{ item[0] }}"
+ priv: "{{ item[1] }}.*:ALL"
+ append_privs: yes
+ password: "foo"
+ with_nested:
+ - [ 'alice', 'bob' ]
+ - [ 'clientdb', 'employeedb', 'providerdb' ]
+# As with the case of 'with_items' above, you can use previously defined variables.:
+
+- name: here, 'users' contains the above list of employees
+ mysql_user:
+ name: "{{ item[0] }}"
+ priv: "{{ item[1] }}.*:ALL"
+ append_privs: yes
+ password: "foo"
+ with_nested:
+ - "{{ users }}"
+ - [ 'clientdb', 'employeedb', 'providerdb' ]
+"""
+
+RETURN = """
+ _list:
+ description:
+ - A list composed of lists paring the elements of the input lists
+ type: list
+"""
+
+from jinja2.exceptions import UndefinedError
+
+from ansible.errors import AnsibleError, AnsibleUndefinedVariable
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.listify import listify_lookup_plugin_terms
+
+
+class LookupModule(LookupBase):
+
+ def _lookup_variables(self, terms, variables):
+ results = []
+ for x in terms:
+ try:
+ intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader, fail_on_undefined=True)
+ except UndefinedError as e:
+ raise AnsibleUndefinedVariable("One of the nested variables was undefined. The error was: %s" % e)
+ results.append(intermediate)
+ return results
+
+ def run(self, terms, variables=None, **kwargs):
+
+ terms = self._lookup_variables(terms, variables)
+
+ my_list = terms[:]
+ my_list.reverse()
+ result = []
+ if len(my_list) == 0:
+ raise AnsibleError("with_nested requires at least one element in the nested list")
+ result = my_list.pop()
+ while len(my_list) > 0:
+ result2 = self._combine(result, my_list.pop())
+ result = result2
+ new_result = []
+ for x in result:
+ new_result.append(self._flatten(x))
+ return new_result
diff --git a/lib/ansible/plugins/lookup/password.py b/lib/ansible/plugins/lookup/password.py
new file mode 100644
index 00000000..81b5d500
--- /dev/null
+++ b/lib/ansible/plugins/lookup/password.py
@@ -0,0 +1,343 @@
+# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
+# (c) 2013, Javier Candeira <javier@candeira.com>
+# (c) 2013, Maykel Moya <mmoya@speedyrails.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: password
+ version_added: "1.1"
+ author:
+ - Daniel Hokka Zakrisson (!UNKNOWN) <daniel@hozac.com>
+ - Javier Candeira (!UNKNOWN) <javier@candeira.com>
+ - Maykel Moya (!UNKNOWN) <mmoya@speedyrails.com>
+ short_description: retrieve or generate a random password, stored in a file
+ description:
+ - Generates a random plaintext password and stores it in a file at a given filepath.
+ - If the file exists previously, it will retrieve its contents, behaving just like with_file.
+ - 'Usage of variables like C("{{ inventory_hostname }}") in the filepath can be used to set up random passwords per host,
+ which simplifies password management in C("host_vars") variables.'
+ - A special case is using /dev/null as a path. The password lookup will generate a new random password each time,
+ but will not write it to /dev/null. This can be used when you need a password without storing it on the controller.
+ options:
+ _terms:
+ description:
+ - path to the file that stores/will store the passwords
+ required: True
+ encrypt:
+ description:
+ - Which hash scheme to encrypt the returning password, should be one hash scheme from C(passlib.hash; md5_crypt, bcrypt, sha256_crypt, sha512_crypt)
+ - If not provided, the password will be returned in plain text.
+ - Note that the password is always stored as plain text, only the returning password is encrypted.
+ - Encrypt also forces saving the salt value for idempotence.
+ - Note that before 2.6 this option was incorrectly labeled as a boolean for a long time.
+ chars:
+ version_added: "1.4"
+ description:
+ - Define comma separated list of names that compose a custom character set in the generated passwords.
+ - 'By default generated passwords contain a random mix of upper and lowercase ASCII letters, the numbers 0-9 and punctuation (". , : - _").'
+ - "They can be either parts of Python's string module attributes (ascii_letters,digits, etc) or are used literally ( :, -)."
+ - "To enter comma use two commas ',,' somewhere - preferably at the end. Quotes and double quotes are not supported."
+ type: string
+ length:
+ description: The length of the generated password.
+ default: 20
+ type: integer
+ notes:
+ - A great alternative to the password lookup plugin,
+ if you don't need to generate random passwords on a per-host basis,
+ would be to use Vault in playbooks.
+ Read the documentation there and consider using it first,
+ it will be more desirable for most applications.
+ - If the file already exists, no data will be written to it.
+ If the file has contents, those contents will be read in as the password.
+ Empty files cause the password to return as an empty string.
+ - 'As all lookups, this runs on the Ansible host as the user running the playbook, and "become" does not apply,
+ the target file must be readable by the playbook user, or, if it does not exist,
+ the playbook user must have sufficient privileges to create it.
+ (So, for example, attempts to write into areas such as /etc will fail unless the entire playbook is being run as root).'
+"""
+
+EXAMPLES = """
+- name: create a mysql user with a random password
+ mysql_user:
+ name: "{{ client }}"
+ password: "{{ lookup('password', 'credentials/' + client + '/' + tier + '/' + role + '/mysqlpassword length=15') }}"
+ priv: "{{ client }}_{{ tier }}_{{ role }}.*:ALL"
+
+- name: create a mysql user with a random password using only ascii letters
+ mysql_user:
+ name: "{{ client }}"
+ password: "{{ lookup('password', '/tmp/passwordfile chars=ascii_letters') }}"
+ priv: '{{ client }}_{{ tier }}_{{ role }}.*:ALL'
+
+- name: create a mysql user with an 8 character random password using only digits
+ mysql_user:
+ name: "{{ client }}"
+ password: "{{ lookup('password', '/tmp/passwordfile length=8 chars=digits') }}"
+ priv: "{{ client }}_{{ tier }}_{{ role }}.*:ALL"
+
+- name: create a mysql user with a random password using many different char sets
+ mysql_user:
+ name: "{{ client }}"
+ password: "{{ lookup('password', '/tmp/passwordfile chars=ascii_letters,digits,hexdigits,punctuation') }}"
+ priv: "{{ client }}_{{ tier }}_{{ role }}.*:ALL"
+"""
+
+RETURN = """
+_raw:
+ description:
+ - a password
+ type: list
+ elements: str
+"""
+
+import os
+import string
+import time
+import shutil
+import hashlib
+
+from ansible.errors import AnsibleError, AnsibleAssertionError
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.parsing.splitter import parse_kv
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.encrypt import do_encrypt, random_password, random_salt
+from ansible.utils.path import makedirs_safe
+
+
+DEFAULT_LENGTH = 20
+VALID_PARAMS = frozenset(('length', 'encrypt', 'chars'))
+
+
+def _parse_parameters(term):
+ """Hacky parsing of params
+
+ See https://github.com/ansible/ansible-modules-core/issues/1968#issuecomment-136842156
+ and the first_found lookup For how we want to fix this later
+ """
+ first_split = term.split(' ', 1)
+ if len(first_split) <= 1:
+ # Only a single argument given, therefore it's a path
+ relpath = term
+ params = dict()
+ else:
+ relpath = first_split[0]
+ params = parse_kv(first_split[1])
+ if '_raw_params' in params:
+ # Spaces in the path?
+ relpath = u' '.join((relpath, params['_raw_params']))
+ del params['_raw_params']
+
+ # Check that we parsed the params correctly
+ if not term.startswith(relpath):
+ # Likely, the user had a non parameter following a parameter.
+ # Reject this as a user typo
+ raise AnsibleError('Unrecognized value after key=value parameters given to password lookup')
+ # No _raw_params means we already found the complete path when
+ # we split it initially
+
+ # Check for invalid parameters. Probably a user typo
+ invalid_params = frozenset(params.keys()).difference(VALID_PARAMS)
+ if invalid_params:
+ raise AnsibleError('Unrecognized parameter(s) given to password lookup: %s' % ', '.join(invalid_params))
+
+ # Set defaults
+ params['length'] = int(params.get('length', DEFAULT_LENGTH))
+ params['encrypt'] = params.get('encrypt', None)
+
+ params['chars'] = params.get('chars', None)
+ if params['chars']:
+ tmp_chars = []
+ if u',,' in params['chars']:
+ tmp_chars.append(u',')
+ tmp_chars.extend(c for c in params['chars'].replace(u',,', u',').split(u',') if c)
+ params['chars'] = tmp_chars
+ else:
+ # Default chars for password
+ params['chars'] = [u'ascii_letters', u'digits', u".,:-_"]
+
+ return relpath, params
+
+
+def _read_password_file(b_path):
+ """Read the contents of a password file and return it
+ :arg b_path: A byte string containing the path to the password file
+ :returns: a text string containing the contents of the password file or
+ None if no password file was present.
+ """
+ content = None
+
+ if os.path.exists(b_path):
+ with open(b_path, 'rb') as f:
+ b_content = f.read().rstrip()
+ content = to_text(b_content, errors='surrogate_or_strict')
+
+ return content
+
+
+def _gen_candidate_chars(characters):
+ '''Generate a string containing all valid chars as defined by ``characters``
+
+ :arg characters: A list of character specs. The character specs are
+ shorthand names for sets of characters like 'digits', 'ascii_letters',
+ or 'punctuation' or a string to be included verbatim.
+
+ The values of each char spec can be:
+
+ * a name of an attribute in the 'strings' module ('digits' for example).
+ The value of the attribute will be added to the candidate chars.
+ * a string of characters. If the string isn't an attribute in 'string'
+ module, the string will be directly added to the candidate chars.
+
+ For example::
+
+ characters=['digits', '?|']``
+
+ will match ``string.digits`` and add all ascii digits. ``'?|'`` will add
+ the question mark and pipe characters directly. Return will be the string::
+
+ u'0123456789?|'
+ '''
+ chars = []
+ for chars_spec in characters:
+ # getattr from string expands things like "ascii_letters" and "digits"
+ # into a set of characters.
+ chars.append(to_text(getattr(string, to_native(chars_spec), chars_spec),
+ errors='strict'))
+ chars = u''.join(chars).replace(u'"', u'').replace(u"'", u'')
+ return chars
+
+
+def _parse_content(content):
+ '''parse our password data format into password and salt
+
+ :arg content: The data read from the file
+ :returns: password and salt
+ '''
+ password = content
+ salt = None
+
+ salt_slug = u' salt='
+ try:
+ sep = content.rindex(salt_slug)
+ except ValueError:
+ # No salt
+ pass
+ else:
+ salt = password[sep + len(salt_slug):]
+ password = content[:sep]
+
+ return password, salt
+
+
+def _format_content(password, salt, encrypt=None):
+ """Format the password and salt for saving
+ :arg password: the plaintext password to save
+ :arg salt: the salt to use when encrypting a password
+ :arg encrypt: Which method the user requests that this password is encrypted.
+ Note that the password is saved in clear. Encrypt just tells us if we
+ must save the salt value for idempotence. Defaults to None.
+ :returns: a text string containing the formatted information
+
+ .. warning:: Passwords are saved in clear. This is because the playbooks
+ expect to get cleartext passwords from this lookup.
+ """
+ if not encrypt and not salt:
+ return password
+
+ # At this point, the calling code should have assured us that there is a salt value.
+ if not salt:
+ raise AnsibleAssertionError('_format_content was called with encryption requested but no salt value')
+
+ return u'%s salt=%s' % (password, salt)
+
+
+def _write_password_file(b_path, content):
+ b_pathdir = os.path.dirname(b_path)
+ makedirs_safe(b_pathdir, mode=0o700)
+
+ with open(b_path, 'wb') as f:
+ os.chmod(b_path, 0o600)
+ b_content = to_bytes(content, errors='surrogate_or_strict') + b'\n'
+ f.write(b_content)
+
+
+def _get_lock(b_path):
+ """Get the lock for writing password file."""
+ first_process = False
+ b_pathdir = os.path.dirname(b_path)
+ lockfile_name = to_bytes("%s.ansible_lockfile" % hashlib.sha1(b_path).hexdigest())
+ lockfile = os.path.join(b_pathdir, lockfile_name)
+ if not os.path.exists(lockfile) and b_path != to_bytes('/dev/null'):
+ try:
+ makedirs_safe(b_pathdir, mode=0o700)
+ fd = os.open(lockfile, os.O_CREAT | os.O_EXCL)
+ os.close(fd)
+ first_process = True
+ except OSError as e:
+ if e.strerror != 'File exists':
+ raise
+
+ counter = 0
+ # if the lock is got by other process, wait until it's released
+ while os.path.exists(lockfile) and not first_process:
+ time.sleep(2 ** counter)
+ if counter >= 2:
+ raise AnsibleError("Password lookup cannot get the lock in 7 seconds, abort..."
+ "This may caused by un-removed lockfile"
+ "you can manually remove it from controller machine at %s and try again" % lockfile)
+ counter += 1
+ return first_process, lockfile
+
+
+def _release_lock(lockfile):
+ """Release the lock so other processes can read the password file."""
+ if os.path.exists(lockfile):
+ os.remove(lockfile)
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables, **kwargs):
+ ret = []
+
+ for term in terms:
+ relpath, params = _parse_parameters(term)
+ path = self._loader.path_dwim(relpath)
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ chars = _gen_candidate_chars(params['chars'])
+
+ changed = None
+ # make sure only one process finishes all the job first
+ first_process, lockfile = _get_lock(b_path)
+
+ content = _read_password_file(b_path)
+
+ if content is None or b_path == to_bytes('/dev/null'):
+ plaintext_password = random_password(params['length'], chars)
+ salt = None
+ changed = True
+ else:
+ plaintext_password, salt = _parse_content(content)
+
+ if params['encrypt'] and not salt:
+ changed = True
+ salt = random_salt()
+
+ if changed and b_path != to_bytes('/dev/null'):
+ content = _format_content(plaintext_password, salt, encrypt=params['encrypt'])
+ _write_password_file(b_path, content)
+
+ if first_process:
+ # let other processes continue
+ _release_lock(lockfile)
+
+ if params['encrypt']:
+ password = do_encrypt(plaintext_password, params['encrypt'], salt=salt)
+ ret.append(password)
+ else:
+ ret.append(plaintext_password)
+
+ return ret
diff --git a/lib/ansible/plugins/lookup/pipe.py b/lib/ansible/plugins/lookup/pipe.py
new file mode 100644
index 00000000..a640a0cf
--- /dev/null
+++ b/lib/ansible/plugins/lookup/pipe.py
@@ -0,0 +1,76 @@
+# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+ lookup: pipe
+ author: Daniel Hokka Zakrisson (!UNKNOWN) <daniel@hozac.com>
+ version_added: "0.9"
+ short_description: read output from a command
+ description:
+ - Run a command and return the output.
+ options:
+ _terms:
+ description: command(s) to run.
+ required: True
+ notes:
+ - Like all lookups this runs on the Ansible controller and is unaffected by other keywords, such as become,
+ so if you need to different permissions you must change the command or run Ansible as another user.
+ - Alternatively you can use a shell/command task that runs against localhost and registers the result.
+ - Pipe lookup internally invokes Popen with shell=True (this is required and intentional).
+ This type of invocation is considered as security issue if appropriate care is not taken to sanitize any user provided or variable input.
+ It is strongly recommended to pass user input or variable input via quote filter before using with pipe lookup.
+ See example section for this.
+ Read more about this L(Bandit B602 docs,https://bandit.readthedocs.io/en/latest/plugins/b602_subprocess_popen_with_shell_equals_true.html)
+"""
+
+EXAMPLES = r"""
+- name: raw result of running date command"
+ debug:
+ msg: "{{ lookup('pipe', 'date') }}"
+
+- name: Always use quote filter to make sure your variables are safe to use with shell
+ debug:
+ msg: "{{ lookup('pipe', 'getent passwd ' + myuser | quote ) }}"
+"""
+
+RETURN = r"""
+ _string:
+ description:
+ - stdout from command
+ type: list
+ elements: str
+"""
+
+import subprocess
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables, **kwargs):
+
+ ret = []
+ for term in terms:
+ '''
+ http://docs.python.org/2/library/subprocess.html#popen-constructor
+
+ The shell argument (which defaults to False) specifies whether to use the
+ shell as the program to execute. If shell is True, it is recommended to pass
+ args as a string rather than as a sequence
+
+ https://github.com/ansible/ansible/issues/6550
+ '''
+ term = str(term)
+
+ p = subprocess.Popen(term, cwd=self._loader.get_basedir(), shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+ (stdout, stderr) = p.communicate()
+ if p.returncode == 0:
+ ret.append(stdout.decode("utf-8").rstrip())
+ else:
+ raise AnsibleError("lookup_plugin.pipe(%s) returned %d" % (term, p.returncode))
+ return ret
diff --git a/lib/ansible/plugins/lookup/random_choice.py b/lib/ansible/plugins/lookup/random_choice.py
new file mode 100644
index 00000000..348a41d6
--- /dev/null
+++ b/lib/ansible/plugins/lookup/random_choice.py
@@ -0,0 +1,53 @@
+# (c) 2013, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: random_choice
+ author: Michael DeHaan
+ version_added: "1.1"
+ short_description: return random element from list
+ description:
+ - The 'random_choice' feature can be used to pick something at random. While it's not a load balancer (there are modules for those),
+ it can somewhat be used as a poor man's load balancer in a MacGyver like situation.
+ - At a more basic level, they can be used to add chaos and excitement to otherwise predictable automation environments.
+"""
+
+EXAMPLES = """
+- name: Magic 8 ball for MUDs
+ debug:
+ msg: "{{ item }}"
+ with_random_choice:
+ - "go through the door"
+ - "drink from the goblet"
+ - "press the red button"
+ - "do nothing"
+"""
+
+RETURN = """
+ _raw:
+ description:
+ - random item
+ type: raw
+"""
+import random
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, inject=None, **kwargs):
+
+ ret = terms
+ if terms:
+ try:
+ ret = [random.choice(terms)]
+ except Exception as e:
+ raise AnsibleError("Unable to choose random term: %s" % to_native(e))
+
+ return ret
diff --git a/lib/ansible/plugins/lookup/sequence.py b/lib/ansible/plugins/lookup/sequence.py
new file mode 100644
index 00000000..ddf65676
--- /dev/null
+++ b/lib/ansible/plugins/lookup/sequence.py
@@ -0,0 +1,268 @@
+# (c) 2013, Jayson Vantuyl <jayson@aggressive.ly>
+# (c) 2012-17 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: sequence
+ author: Jayson Vantuyl (!UNKNOWN) <jayson@aggressive.ly>
+ version_added: "1.0"
+ short_description: generate a list based on a number sequence
+ description:
+ - generates a sequence of items. You can specify a start value, an end value, an optional "stride" value that specifies the number of steps
+ to increment the sequence, and an optional printf-style format string.
+ - 'Arguments can be specified as key=value pair strings or as a shortcut form of the arguments string is also accepted: [start-]end[/stride][:format].'
+ - 'Numerical values can be specified in decimal, hexadecimal (0x3f8) or octal (0600).'
+ - Starting at version 1.9.2, negative strides are allowed.
+ - Generated items are strings. Use Jinja2 filters to convert items to preferred type, e.g. ``{{ 1 + item|int }}``.
+ - See also Jinja2 ``range`` filter as an alternative.
+ options:
+ start:
+ description: number at which to start the sequence
+ default: 0
+ type: integer
+ end:
+ description: number at which to end the sequence, dont use this with count
+ type: integer
+ default: 0
+ count:
+ description: number of elements in the sequence, this is not to be used with end
+ type: integer
+ default: 0
+ stride:
+ description: increments between sequence numbers, the default is 1 unless the end is less than the start, then it is -1.
+ type: integer
+ format:
+ description: return a string with the generated number formatted in
+"""
+
+EXAMPLES = """
+- name: create some test users
+ user:
+ name: "{{ item }}"
+ state: present
+ groups: "evens"
+ with_sequence: start=0 end=32 format=testuser%02x
+
+- name: create a series of directories with even numbers for some reason
+ file:
+ dest: "/var/stuff/{{ item }}"
+ state: directory
+ with_sequence: start=4 end=16 stride=2
+
+- name: a simpler way to use the sequence plugin create 4 groups
+ group:
+ name: "group{{ item }}"
+ state: present
+ with_sequence: count=4
+
+- name: the final countdown
+ debug: msg={{item}} seconds to detonation
+ with_sequence: end=0 start=10
+
+- name: Use of variable
+ debug:
+ msg: "{{ item }}"
+ with_sequence: start=1 end="{{ end_at }}"
+ vars:
+ - end_at: 10
+"""
+
+RETURN = """
+ _list:
+ description:
+ - A list containing generated sequence of items
+ type: list
+ elements: str
+"""
+
+from re import compile as re_compile, IGNORECASE
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.six.moves import xrange
+from ansible.parsing.splitter import parse_kv
+from ansible.plugins.lookup import LookupBase
+
+
+# shortcut format
+NUM = "(0?x?[0-9a-f]+)"
+SHORTCUT = re_compile(
+ "^(" + # Group 0
+ NUM + # Group 1: Start
+ "-)?" +
+ NUM + # Group 2: End
+ "(/" + # Group 3
+ NUM + # Group 4: Stride
+ ")?" +
+ "(:(.+))?$", # Group 5, Group 6: Format String
+ IGNORECASE
+)
+
+
+class LookupModule(LookupBase):
+ """
+ sequence lookup module
+
+ Used to generate some sequence of items. Takes arguments in two forms.
+
+ The simple / shortcut form is:
+
+ [start-]end[/stride][:format]
+
+ As indicated by the brackets: start, stride, and format string are all
+ optional. The format string is in the style of printf. This can be used
+ to pad with zeros, format in hexadecimal, etc. All of the numerical values
+ can be specified in octal (i.e. 0664) or hexadecimal (i.e. 0x3f8).
+ Negative numbers are not supported.
+
+ Some examples:
+
+ 5 -> ["1","2","3","4","5"]
+ 5-8 -> ["5", "6", "7", "8"]
+ 2-10/2 -> ["2", "4", "6", "8", "10"]
+ 4:host%02d -> ["host01","host02","host03","host04"]
+
+ The standard Ansible key-value form is accepted as well. For example:
+
+ start=5 end=11 stride=2 format=0x%02x -> ["0x05","0x07","0x09","0x0a"]
+
+ This format takes an alternate form of "end" called "count", which counts
+ some number from the starting value. For example:
+
+ count=5 -> ["1", "2", "3", "4", "5"]
+ start=0x0f00 count=4 format=%04x -> ["0f00", "0f01", "0f02", "0f03"]
+ start=0 count=5 stride=2 -> ["0", "2", "4", "6", "8"]
+ start=1 count=5 stride=2 -> ["1", "3", "5", "7", "9"]
+
+ The count option is mostly useful for avoiding off-by-one errors and errors
+ calculating the number of entries in a sequence when a stride is specified.
+ """
+
+ def reset(self):
+ """set sensible defaults"""
+ self.start = 1
+ self.count = None
+ self.end = None
+ self.stride = 1
+ self.format = "%d"
+
+ def parse_kv_args(self, args):
+ """parse key-value style arguments"""
+ for arg in ["start", "end", "count", "stride"]:
+ try:
+ arg_raw = args.pop(arg, None)
+ if arg_raw is None:
+ continue
+ arg_cooked = int(arg_raw, 0)
+ setattr(self, arg, arg_cooked)
+ except ValueError:
+ raise AnsibleError(
+ "can't parse arg %s=%r as integer"
+ % (arg, arg_raw)
+ )
+ if 'format' in args:
+ self.format = args.pop("format")
+ if args:
+ raise AnsibleError(
+ "unrecognized arguments to with_sequence: %r"
+ % args.keys()
+ )
+
+ def parse_simple_args(self, term):
+ """parse the shortcut forms, return True/False"""
+ match = SHORTCUT.match(term)
+ if not match:
+ return False
+
+ _, start, end, _, stride, _, format = match.groups()
+
+ if start is not None:
+ try:
+ start = int(start, 0)
+ except ValueError:
+ raise AnsibleError("can't parse start=%s as integer" % start)
+ if end is not None:
+ try:
+ end = int(end, 0)
+ except ValueError:
+ raise AnsibleError("can't parse end=%s as integer" % end)
+ if stride is not None:
+ try:
+ stride = int(stride, 0)
+ except ValueError:
+ raise AnsibleError("can't parse stride=%s as integer" % stride)
+
+ if start is not None:
+ self.start = start
+ if end is not None:
+ self.end = end
+ if stride is not None:
+ self.stride = stride
+ if format is not None:
+ self.format = format
+
+ return True
+
+ def sanity_check(self):
+ if self.count is None and self.end is None:
+ raise AnsibleError("must specify count or end in with_sequence")
+ elif self.count is not None and self.end is not None:
+ raise AnsibleError("can't specify both count and end in with_sequence")
+ elif self.count is not None:
+ # convert count to end
+ if self.count != 0:
+ self.end = self.start + self.count * self.stride - 1
+ else:
+ self.start = 0
+ self.end = 0
+ self.stride = 0
+ del self.count
+ if self.stride > 0 and self.end < self.start:
+ raise AnsibleError("to count backwards make stride negative")
+ if self.stride < 0 and self.end > self.start:
+ raise AnsibleError("to count forward don't make stride negative")
+ if self.format.count('%') != 1:
+ raise AnsibleError("bad formatting string: %s" % self.format)
+
+ def generate_sequence(self):
+ if self.stride >= 0:
+ adjust = 1
+ else:
+ adjust = -1
+ numbers = xrange(self.start, self.end + adjust, self.stride)
+
+ for i in numbers:
+ try:
+ formatted = self.format % i
+ yield formatted
+ except (ValueError, TypeError):
+ raise AnsibleError(
+ "problem formatting %r with %r" % (i, self.format)
+ )
+
+ def run(self, terms, variables, **kwargs):
+ results = []
+
+ for term in terms:
+ try:
+ self.reset() # clear out things for this iteration
+ try:
+ if not self.parse_simple_args(term):
+ self.parse_kv_args(parse_kv(term))
+ except AnsibleError:
+ raise
+ except Exception as e:
+ raise AnsibleError("unknown error parsing with_sequence arguments: %r. Error was: %s" % (term, e))
+
+ self.sanity_check()
+ if self.stride != 0:
+ results.extend(self.generate_sequence())
+ except AnsibleError:
+ raise
+ except Exception as e:
+ raise AnsibleError(
+ "unknown error generating sequence: %s" % e
+ )
+
+ return results
diff --git a/lib/ansible/plugins/lookup/subelements.py b/lib/ansible/plugins/lookup/subelements.py
new file mode 100644
index 00000000..b05856bf
--- /dev/null
+++ b/lib/ansible/plugins/lookup/subelements.py
@@ -0,0 +1,169 @@
+# (c) 2013, Serge van Ginderachter <serge@vanginderachter.be>
+# (c) 2012-17 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: subelements
+ author: Serge van Ginderachter (!UNKNOWN) <serge@vanginderachter.be>
+ version_added: "1.4"
+ short_description: traverse nested key from a list of dictionaries
+ description:
+ - Subelements walks a list of hashes (aka dictionaries) and then traverses a list with a given (nested sub-)key inside of those records.
+ options:
+ _terms:
+ description: tuple of list of dictionaries and dictionary key to extract
+ required: True
+ skip_missing:
+ default: False
+ description:
+ - Lookup accepts this flag from a dictionary as optional. See Example section for more information.
+ - If set to C(True), the lookup plugin will skip the lists items that do not contain the given subkey.
+ - If set to C(False), the plugin will yield an error and complain about the missing subkey.
+"""
+
+EXAMPLES = """
+- name: show var structure as it is needed for example to make sense
+ hosts: all
+ vars:
+ users:
+ - name: alice
+ authorized:
+ - /tmp/alice/onekey.pub
+ - /tmp/alice/twokey.pub
+ mysql:
+ password: mysql-password
+ hosts:
+ - "%"
+ - "127.0.0.1"
+ - "::1"
+ - "localhost"
+ privs:
+ - "*.*:SELECT"
+ - "DB1.*:ALL"
+ groups:
+ - wheel
+ - name: bob
+ authorized:
+ - /tmp/bob/id_rsa.pub
+ mysql:
+ password: other-mysql-password
+ hosts:
+ - "db1"
+ privs:
+ - "*.*:SELECT"
+ - "DB2.*:ALL"
+ tasks:
+ - name: Set authorized ssh key, extracting just that data from 'users'
+ authorized_key:
+ user: "{{ item.0.name }}"
+ key: "{{ lookup('file', item.1) }}"
+ with_subelements:
+ - "{{ users }}"
+ - authorized
+
+ - name: Setup MySQL users, given the mysql hosts and privs subkey lists
+ mysql_user:
+ name: "{{ item.0.name }}"
+ password: "{{ item.0.mysql.password }}"
+ host: "{{ item.1 }}"
+ priv: "{{ item.0.mysql.privs | join('/') }}"
+ with_subelements:
+ - "{{ users }}"
+ - mysql.hosts
+
+ - name: list groups for users that have them, don't error if groups key is missing
+ debug: var=item
+ loop: "{{ q('subelements', users, 'groups', {'skip_missing': True}) }}"
+"""
+
+RETURN = """
+_list:
+ description: list of subelements extracted
+"""
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.six import string_types
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.listify import listify_lookup_plugin_terms
+
+
+FLAGS = ('skip_missing',)
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables, **kwargs):
+
+ def _raise_terms_error(msg=""):
+ raise AnsibleError(
+ "subelements lookup expects a list of two or three items, " + msg)
+
+ terms[0] = listify_lookup_plugin_terms(terms[0], templar=self._templar, loader=self._loader)
+
+ # check lookup terms - check number of terms
+ if not isinstance(terms, list) or not 2 <= len(terms) <= 3:
+ _raise_terms_error()
+
+ # first term should be a list (or dict), second a string holding the subkey
+ if not isinstance(terms[0], (list, dict)) or not isinstance(terms[1], string_types):
+ _raise_terms_error("first a dict or a list, second a string pointing to the subkey")
+ subelements = terms[1].split(".")
+
+ if isinstance(terms[0], dict): # convert to list:
+ if terms[0].get('skipped', False) is not False:
+ # the registered result was completely skipped
+ return []
+ elementlist = []
+ for key in terms[0]:
+ elementlist.append(terms[0][key])
+ else:
+ elementlist = terms[0]
+
+ # check for optional flags in third term
+ flags = {}
+ if len(terms) == 3:
+ flags = terms[2]
+ if not isinstance(flags, dict) and not all([isinstance(key, string_types) and key in FLAGS for key in flags]):
+ _raise_terms_error("the optional third item must be a dict with flags %s" % FLAGS)
+
+ # build_items
+ ret = []
+ for item0 in elementlist:
+ if not isinstance(item0, dict):
+ raise AnsibleError("subelements lookup expects a dictionary, got '%s'" % item0)
+ if item0.get('skipped', False) is not False:
+ # this particular item is to be skipped
+ continue
+
+ skip_missing = boolean(flags.get('skip_missing', False), strict=False)
+ subvalue = item0
+ lastsubkey = False
+ sublist = []
+ for subkey in subelements:
+ if subkey == subelements[-1]:
+ lastsubkey = True
+ if subkey not in subvalue:
+ if skip_missing:
+ continue
+ else:
+ raise AnsibleError("could not find '%s' key in iterated item '%s'" % (subkey, subvalue))
+ if not lastsubkey:
+ if not isinstance(subvalue[subkey], dict):
+ if skip_missing:
+ continue
+ else:
+ raise AnsibleError("the key %s should point to a dictionary, got '%s'" % (subkey, subvalue[subkey]))
+ else:
+ subvalue = subvalue[subkey]
+ else: # lastsubkey
+ if not isinstance(subvalue[subkey], list):
+ raise AnsibleError("the key %s should point to a list, got '%s'" % (subkey, subvalue[subkey]))
+ else:
+ sublist = subvalue.pop(subkey, [])
+ for item1 in sublist:
+ ret.append((item0, item1))
+
+ return ret
diff --git a/lib/ansible/plugins/lookup/template.py b/lib/ansible/plugins/lookup/template.py
new file mode 100644
index 00000000..dd4a2749
--- /dev/null
+++ b/lib/ansible/plugins/lookup/template.py
@@ -0,0 +1,114 @@
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2012-17, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: template
+ author: Michael DeHaan
+ version_added: "0.9"
+ short_description: retrieve contents of file after templating with Jinja2
+ description:
+ - Returns a list of strings; for each template in the list of templates you pass in, returns a string containing the results of processing that template.
+ options:
+ _terms:
+ description: list of files to template
+ convert_data:
+ type: bool
+ description: whether to convert YAML into data. If False, strings that are YAML will be left untouched.
+ variable_start_string:
+ description: The string marking the beginning of a print statement.
+ default: '{{'
+ version_added: '2.8'
+ type: str
+ variable_end_string:
+ description: The string marking the end of a print statement.
+ default: '}}'
+ version_added: '2.8'
+ type: str
+"""
+
+EXAMPLES = """
+- name: show templating results
+ debug:
+ msg: "{{ lookup('template', './some_template.j2') }}"
+
+- name: show templating results with different variable start and end string
+ debug:
+ msg: "{{ lookup('template', './some_template.j2', variable_start_string='[%', variable_end_string='%]') }}"
+"""
+
+RETURN = """
+_raw:
+ description: file(s) content after templating
+ type: list
+ elements: raw
+"""
+
+from copy import deepcopy
+import os
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.template import generate_ansible_template_vars
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables, **kwargs):
+
+ convert_data_p = kwargs.get('convert_data', True)
+ lookup_template_vars = kwargs.get('template_vars', {})
+ ret = []
+
+ variable_start_string = kwargs.get('variable_start_string', None)
+ variable_end_string = kwargs.get('variable_end_string', None)
+
+ for term in terms:
+ display.debug("File lookup term: %s" % term)
+
+ lookupfile = self.find_file_in_search_path(variables, 'templates', term)
+ display.vvvv("File lookup using %s as file" % lookupfile)
+ if lookupfile:
+ b_template_data, show_data = self._loader._get_file_contents(lookupfile)
+ template_data = to_text(b_template_data, errors='surrogate_or_strict')
+
+ # set jinja2 internal search path for includes
+ searchpath = variables.get('ansible_search_path', [])
+ if searchpath:
+ # our search paths aren't actually the proper ones for jinja includes.
+ # We want to search into the 'templates' subdir of each search path in
+ # addition to our original search paths.
+ newsearchpath = []
+ for p in searchpath:
+ newsearchpath.append(os.path.join(p, 'templates'))
+ newsearchpath.append(p)
+ searchpath = newsearchpath
+ searchpath.insert(0, os.path.dirname(lookupfile))
+
+ # The template will have access to all existing variables,
+ # plus some added by ansible (e.g., template_{path,mtime}),
+ # plus anything passed to the lookup with the template_vars=
+ # argument.
+ vars = deepcopy(variables)
+ vars.update(generate_ansible_template_vars(lookupfile))
+ vars.update(lookup_template_vars)
+
+ # do the templating
+ with self._templar.set_temporary_context(variable_start_string=variable_start_string,
+ variable_end_string=variable_end_string,
+ available_variables=vars, searchpath=searchpath):
+ res = self._templar.template(template_data, preserve_trailing_newlines=True,
+ convert_data=convert_data_p, escape_backslashes=False)
+
+ ret.append(res)
+ else:
+ raise AnsibleError("the template file %s could not be found for the lookup" % term)
+
+ return ret
diff --git a/lib/ansible/plugins/lookup/together.py b/lib/ansible/plugins/lookup/together.py
new file mode 100644
index 00000000..a20e205a
--- /dev/null
+++ b/lib/ansible/plugins/lookup/together.py
@@ -0,0 +1,67 @@
+# (c) 2013, Bradley Young <young.bradley@gmail.com>
+# (c) 2012-17 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: together
+ author: Bradley Young (!UNKNOWN) <young.bradley@gmail.com>
+ version_added: '1.3'
+ short_description: merges lists into synchronized list
+ description:
+ - Creates a list with the iterated elements of the supplied lists
+ - "To clarify with an example, [ 'a', 'b' ] and [ 1, 2 ] turn into [ ('a',1), ('b', 2) ]"
+ - This is basically the same as the 'zip_longest' filter and Python function
+ - Any 'unbalanced' elements will be substituted with 'None'
+ options:
+ _terms:
+ description: list of lists to merge
+ required: True
+"""
+
+EXAMPLES = """
+- name: item.0 returns from the 'a' list, item.1 returns from the '1' list
+ debug:
+ msg: "{{ item.0 }} and {{ item.1 }}"
+ with_together:
+ - ['a', 'b', 'c', 'd']
+ - [1, 2, 3, 4]
+"""
+
+RETURN = """
+ _list:
+ description: synchronized list
+ type: list
+ elements: list
+"""
+from ansible.errors import AnsibleError
+from ansible.module_utils.six.moves import zip_longest
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.listify import listify_lookup_plugin_terms
+
+
+class LookupModule(LookupBase):
+ """
+ Transpose a list of arrays:
+ [1, 2, 3], [4, 5, 6] -> [1, 4], [2, 5], [3, 6]
+ Replace any empty spots in 2nd array with None:
+ [1, 2], [3] -> [1, 3], [2, None]
+ """
+
+ def _lookup_variables(self, terms):
+ results = []
+ for x in terms:
+ intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader)
+ results.append(intermediate)
+ return results
+
+ def run(self, terms, variables=None, **kwargs):
+
+ terms = self._lookup_variables(terms)
+
+ my_list = terms[:]
+ if len(my_list) == 0:
+ raise AnsibleError("with_together requires at least one element in each list")
+
+ return [self._flatten(x) for x in zip_longest(*my_list, fillvalue=None)]
diff --git a/lib/ansible/plugins/lookup/unvault.py b/lib/ansible/plugins/lookup/unvault.py
new file mode 100644
index 00000000..234a52a7
--- /dev/null
+++ b/lib/ansible/plugins/lookup/unvault.py
@@ -0,0 +1,63 @@
+# (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: unvault
+ author: Ansible Core Team
+ version_added: "2.10"
+ short_description: read vaulted file(s) contents
+ description:
+ - This lookup returns the contents from vaulted (or not) file(s) on the Ansible controller's file system.
+ options:
+ _terms:
+ description: path(s) of files to read
+ required: True
+ notes:
+ - This lookup does not understand 'globbing' nor shell environment variables.
+"""
+
+EXAMPLES = """
+- debug: msg="the value of foo.txt is {{lookup('unvault', '/etc/foo.txt')|to_string }}"
+"""
+
+RETURN = """
+ _raw:
+ description:
+ - content of file(s) as bytes
+ type: list
+ elements: raw
+"""
+
+from ansible.errors import AnsibleParserError
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils._text import to_text
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+
+ self.set_options(direct=kwargs)
+
+ ret = []
+
+ for term in terms:
+ display.debug("Unvault lookup term: %s" % term)
+
+ # Find the file in the expected search path
+ lookupfile = self.find_file_in_search_path(variables, 'files', term)
+ display.vvvv(u"Unvault lookup found %s" % lookupfile)
+ if lookupfile:
+ actual_file = self._loader.get_real_file(lookupfile, decrypt=True)
+ with open(actual_file, 'rb') as f:
+ b_contents = f.read()
+ ret.append(b_contents)
+ else:
+ raise AnsibleParserError('Unable to find file matching "%s" ' % term)
+
+ return ret
diff --git a/lib/ansible/plugins/lookup/url.py b/lib/ansible/plugins/lookup/url.py
new file mode 100644
index 00000000..1c24ebf8
--- /dev/null
+++ b/lib/ansible/plugins/lookup/url.py
@@ -0,0 +1,221 @@
+# (c) 2015, Brian Coca <bcoca@ansible.com>
+# (c) 2012-17 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+lookup: url
+author: Brian Coca (@bcoca)
+version_added: "1.9"
+short_description: return contents from URL
+description:
+ - Returns the content of the URL requested to be used as data in play.
+options:
+ _terms:
+ description: urls to query
+ validate_certs:
+ description: Flag to control SSL certificate validation
+ type: boolean
+ default: True
+ split_lines:
+ description: Flag to control if content is returned as a list of lines or as a single text blob
+ type: boolean
+ default: True
+ use_proxy:
+ description: Flag to control if the lookup will observe HTTP proxy environment variables when present.
+ type: boolean
+ default: True
+ username:
+ description: Username to use for HTTP authentication.
+ type: string
+ version_added: "2.8"
+ password:
+ description: Password to use for HTTP authentication.
+ type: string
+ version_added: "2.8"
+ headers:
+ description: HTTP request headers
+ type: dictionary
+ default: {}
+ version_added: "2.9"
+ force:
+ description: Whether or not to set "cache-control" header with value "no-cache"
+ type: boolean
+ version_added: "2.10"
+ default: False
+ vars:
+ - name: ansible_lookup_url_force
+ env:
+ - name: ANSIBLE_LOOKUP_URL_FORCE
+ ini:
+ - section: url_lookup
+ key: force
+ timeout:
+ description: How long to wait for the server to send data before giving up
+ type: float
+ version_added: "2.10"
+ default: 10
+ vars:
+ - name: ansible_lookup_url_timeout
+ env:
+ - name: ANSIBLE_LOOKUP_URL_TIMEOUT
+ ini:
+ - section: url_lookup
+ key: timeout
+ http_agent:
+ description: User-Agent to use in the request
+ type: string
+ version_added: "2.10"
+ vars:
+ - name: ansible_lookup_url_agent
+ env:
+ - name: ANSIBLE_LOOKUP_URL_AGENT
+ ini:
+ - section: url_lookup
+ key: agent
+ force_basic_auth:
+ description: Force basic authentication
+ type: boolean
+ version_added: "2.10"
+ default: False
+ vars:
+ - name: ansible_lookup_url_agent
+ env:
+ - name: ANSIBLE_LOOKUP_URL_AGENT
+ ini:
+ - section: url_lookup
+ key: agent
+ follow_redirects:
+ description: String of urllib2, all/yes, safe, none to determine how redirects are followed, see RedirectHandlerFactory for more information
+ type: string
+ version_added: "2.10"
+ default: 'urllib2'
+ vars:
+ - name: ansible_lookup_url_follow_redirects
+ env:
+ - name: ANSIBLE_LOOKUP_URL_FOLLOW_REDIRECTS
+ ini:
+ - section: url_lookup
+ key: follow_redirects
+ use_gssapi:
+ description: Use GSSAPI handler of requests
+ type: boolean
+ version_added: "2.10"
+ default: False
+ vars:
+ - name: ansible_lookup_url_use_gssapi
+ env:
+ - name: ANSIBLE_LOOKUP_URL_USE_GSSAPI
+ ini:
+ - section: url_lookup
+ key: use_gssapi
+ unix_socket:
+ description: String of file system path to unix socket file to use when establishing connection to the provided url
+ type: string
+ version_added: "2.10"
+ vars:
+ - name: ansible_lookup_url_unix_socket
+ env:
+ - name: ANSIBLE_LOOKUP_URL_UNIX_SOCKET
+ ini:
+ - section: url_lookup
+ key: unix_socket
+ ca_path:
+ description: String of file system path to CA cert bundle to use
+ type: string
+ version_added: "2.10"
+ vars:
+ - name: ansible_lookup_url_ca_path
+ env:
+ - name: ANSIBLE_LOOKUP_URL_CA_PATH
+ ini:
+ - section: url_lookup
+ key: ca_path
+ unredirected_headers:
+ description: A list of headers to not attach on a redirected request
+ type: list
+ version_added: "2.10"
+ vars:
+ - name: ansible_lookup_url_unredir_headers
+ env:
+ - name: ANSIBLE_LOOKUP_URL_UNREDIR_HEADERS
+ ini:
+ - section: url_lookup
+ key: unredirected_headers
+"""
+
+EXAMPLES = """
+- name: url lookup splits lines by default
+ debug: msg="{{item}}"
+ loop: "{{ lookup('url', 'https://github.com/gremlin.keys', wantlist=True) }}"
+
+- name: display ip ranges
+ debug: msg="{{ lookup('url', 'https://ip-ranges.amazonaws.com/ip-ranges.json', split_lines=False) }}"
+
+- name: url lookup using authentication
+ debug: msg="{{ lookup('url', 'https://some.private.site.com/file.txt', username='bob', password='hunter2') }}"
+
+- name: url lookup using basic authentication
+ debug: msg="{{ lookup('url', 'https://some.private.site.com/file.txt', username='bob', password='hunter2', force_basic_auth='True') }}"
+
+- name: url lookup using headers
+ debug: msg="{{ lookup('url', 'https://some.private.site.com/api/service', headers={'header1':'value1', 'header2':'value2'} ) }}"
+"""
+
+RETURN = """
+ _list:
+ description: list of list of lines or content of url(s)
+ type: list
+ elements: str
+"""
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
+from ansible.module_utils._text import to_text, to_native
+from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+
+ self.set_options(var_options=variables, direct=kwargs)
+
+ ret = []
+ for term in terms:
+ display.vvvv("url lookup connecting to %s" % term)
+ try:
+ response = open_url(term, validate_certs=self.get_option('validate_certs'),
+ use_proxy=self.get_option('use_proxy'),
+ url_username=self.get_option('username'),
+ url_password=self.get_option('password'),
+ headers=self.get_option('headers'),
+ force=self.get_option('force'),
+ timeout=self.get_option('timeout'),
+ http_agent=self.get_option('http_agent'),
+ force_basic_auth=self.get_option('force_basic_auth'),
+ follow_redirects=self.get_option('follow_redirects'),
+ use_gssapi=self.get_option('use_gssapi'),
+ unix_socket=self.get_option('unix_socket'),
+ ca_path=self.get_option('ca_path'),
+ unredirected_headers=self.get_option('unredirected_headers'))
+ except HTTPError as e:
+ raise AnsibleError("Received HTTP error for %s : %s" % (term, to_native(e)))
+ except URLError as e:
+ raise AnsibleError("Failed lookup url for %s : %s" % (term, to_native(e)))
+ except SSLValidationError as e:
+ raise AnsibleError("Error validating the server's certificate for %s: %s" % (term, to_native(e)))
+ except ConnectionError as e:
+ raise AnsibleError("Error connecting to %s: %s" % (term, to_native(e)))
+
+ if self.get_option('split_lines'):
+ for line in response.read().splitlines():
+ ret.append(to_text(line))
+ else:
+ ret.append(to_text(response.read()))
+ return ret
diff --git a/lib/ansible/plugins/lookup/varnames.py b/lib/ansible/plugins/lookup/varnames.py
new file mode 100644
index 00000000..32862951
--- /dev/null
+++ b/lib/ansible/plugins/lookup/varnames.py
@@ -0,0 +1,80 @@
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: varnames
+ author: Ansible Core Team
+ version_added: "2.8"
+ short_description: Lookup matching variable names
+ description:
+ - Retrieves a list of matching Ansible variable names.
+ options:
+ _terms:
+ description: List of Python regex patterns to search for in variable names.
+ required: True
+"""
+
+EXAMPLES = """
+- name: List variables that start with qz_
+ debug: msg="{{ lookup('varnames', '^qz_.+')}}"
+ vars:
+ qz_1: hello
+ qz_2: world
+ qa_1: "I won't show"
+ qz_: "I won't show either"
+
+- name: Show all variables
+ debug: msg="{{ lookup('varnames', '.+')}}"
+
+- name: Show variables with 'hosts' in their names
+ debug: msg="{{ lookup('varnames', 'hosts')}}"
+
+- name: Find several related variables that end specific way
+ debug: msg="{{ lookup('varnames', '.+_zone$', '.+_location$') }}"
+
+"""
+
+RETURN = """
+_value:
+ description:
+ - List of the variable names requested.
+ type: list
+"""
+
+import re
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import string_types
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+
+ if variables is None:
+ raise AnsibleError('No variables available to search')
+
+ # no options, yet
+ # self.set_options(direct=kwargs)
+
+ ret = []
+ variable_names = list(variables.keys())
+ for term in terms:
+
+ if not isinstance(term, string_types):
+ raise AnsibleError('Invalid setting identifier, "%s" is not a string, its a %s' % (term, type(term)))
+
+ try:
+ name = re.compile(term)
+ except Exception as e:
+ raise AnsibleError('Unable to use "%s" as a search parameter: %s' % (term, to_native(e)))
+
+ for varname in variable_names:
+ if name.search(varname):
+ ret.append(varname)
+
+ return ret
diff --git a/lib/ansible/plugins/lookup/vars.py b/lib/ansible/plugins/lookup/vars.py
new file mode 100644
index 00000000..da3848ba
--- /dev/null
+++ b/lib/ansible/plugins/lookup/vars.py
@@ -0,0 +1,106 @@
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: vars
+ author: Ansible Core Team
+ version_added: "2.5"
+ short_description: Lookup templated value of variables
+ description:
+ - 'Retrieves the value of an Ansible variable. Note: Only returns top level variable names.'
+ options:
+ _terms:
+ description: The variable names to look up.
+ required: True
+ default:
+ description:
+ - What to return if a variable is undefined.
+ - If no default is set, it will result in an error if any of the variables is undefined.
+"""
+
+EXAMPLES = """
+- name: Show value of 'variablename'
+ debug: msg="{{ lookup('vars', 'variabl' + myvar) }}"
+ vars:
+ variablename: hello
+ myvar: ename
+
+- name: Show default empty since i dont have 'variablnotename'
+ debug: msg="{{ lookup('vars', 'variabl' + myvar, default='')}}"
+ vars:
+ variablename: hello
+ myvar: notename
+
+- name: Produce an error since i dont have 'variablnotename'
+ debug: msg="{{ lookup('vars', 'variabl' + myvar)}}"
+ ignore_errors: True
+ vars:
+ variablename: hello
+ myvar: notename
+
+- name: find several related variables
+ debug: msg="{{ lookup('vars', 'ansible_play_hosts', 'ansible_play_batch', 'ansible_play_hosts_all') }}"
+
+- name: Access nested variables
+ debug: msg="{{ lookup('vars', 'variabl' + myvar).sub_var }}"
+ ignore_errors: True
+ vars:
+ variablename:
+ sub_var: 12
+ myvar: ename
+
+- name: alternate way to find some 'prefixed vars' in loop
+ debug: msg="{{ lookup('vars', 'ansible_play_' + item) }}"
+ loop:
+ - hosts
+ - batch
+ - hosts_all
+"""
+
+RETURN = """
+_value:
+ description:
+ - value of the variables requested.
+ type: list
+ elements: raw
+"""
+
+from ansible.errors import AnsibleError, AnsibleUndefinedVariable
+from ansible.module_utils.six import string_types
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+ if variables is not None:
+ self._templar.available_variables = variables
+ myvars = getattr(self._templar, '_available_variables', {})
+
+ self.set_options(direct=kwargs)
+ default = self.get_option('default')
+
+ ret = []
+ for term in terms:
+ if not isinstance(term, string_types):
+ raise AnsibleError('Invalid setting identifier, "%s" is not a string, its a %s' % (term, type(term)))
+
+ try:
+ try:
+ value = myvars[term]
+ except KeyError:
+ try:
+ value = myvars['hostvars'][myvars['inventory_hostname']][term]
+ except KeyError:
+ raise AnsibleUndefinedVariable('No variable found with this name: %s' % term)
+
+ ret.append(self._templar.template(value, fail_on_undefined=True))
+ except AnsibleUndefinedVariable:
+ if default is not None:
+ ret.append(default)
+ else:
+ raise
+
+ return ret
diff --git a/lib/ansible/plugins/netconf/__init__.py b/lib/ansible/plugins/netconf/__init__.py
new file mode 100644
index 00000000..d25f4d35
--- /dev/null
+++ b/lib/ansible/plugins/netconf/__init__.py
@@ -0,0 +1,373 @@
+#
+# (c) 2017 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from abc import abstractmethod
+from functools import wraps
+
+from ansible.errors import AnsibleError
+from ansible.plugins import AnsiblePlugin
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.basic import missing_required_lib
+
+try:
+ from ncclient.operations import RPCError
+ from ncclient.xml_ import to_xml, to_ele, NCElement
+ HAS_NCCLIENT = True
+ NCCLIENT_IMP_ERR = None
+except (ImportError, AttributeError) as err: # paramiko and gssapi are incompatible and raise AttributeError not ImportError
+ HAS_NCCLIENT = False
+ NCCLIENT_IMP_ERR = err
+
+try:
+ from lxml.etree import Element, SubElement, tostring, fromstring
+except ImportError:
+ from xml.etree.ElementTree import Element, SubElement, tostring, fromstring
+
+
+def ensure_ncclient(func):
+ @wraps(func)
+ def wrapped(self, *args, **kwargs):
+ if not HAS_NCCLIENT:
+ raise AnsibleError("%s: %s" % (missing_required_lib('ncclient'), to_native(NCCLIENT_IMP_ERR)))
+ return func(self, *args, **kwargs)
+ return wrapped
+
+
+class NetconfBase(AnsiblePlugin):
+ """
+ A base class for implementing Netconf connections
+
+ .. note:: Unlike most of Ansible, nearly all strings in
+ :class:`TerminalBase` plugins are byte strings. This is because of
+ how close to the underlying platform these plugins operate. Remember
+ to mark literal strings as byte string (``b"string"``) and to use
+ :func:`~ansible.module_utils._text.to_bytes` and
+ :func:`~ansible.module_utils._text.to_text` to avoid unexpected
+ problems.
+
+ List of supported rpc's:
+ :get: Retrieves running configuration and device state information
+ :get_config: Retrieves the specified configuration from the device
+ :edit_config: Loads the specified commands into the remote device
+ :commit: Load configuration from candidate to running
+ :discard_changes: Discard changes to candidate datastore
+ :validate: Validate the contents of the specified configuration.
+ :lock: Allows the client to lock the configuration system of a device.
+ :unlock: Release a configuration lock, previously obtained with the lock operation.
+ :copy_config: create or replace an entire configuration datastore with the contents of another complete
+ configuration datastore.
+ :get-schema: Retrieves the required schema from the device
+ :get_capabilities: Retrieves device information and supported rpc methods
+
+ For JUNOS:
+ :execute_rpc: RPC to be execute on remote device
+ :load_configuration: Loads given configuration on device
+
+ Note: rpc support depends on the capabilites of remote device.
+
+ :returns: Returns output received from remote device as byte string
+ Note: the 'result' or 'error' from response should to be converted to object
+ of ElementTree using 'fromstring' to parse output as xml doc
+
+ 'get_capabilities()' returns 'result' as a json string.
+
+ Usage:
+ from ansible.module_utils.connection import Connection
+
+ conn = Connection()
+ data = conn.execute_rpc(rpc)
+ reply = fromstring(reply)
+
+ data = conn.get_capabilities()
+ json.loads(data)
+
+ conn.load_configuration(config=[''set system ntp server 1.1.1.1''], action='set', format='text')
+ """
+
+ __rpc__ = ['rpc', 'get_config', 'get', 'edit_config', 'validate', 'copy_config', 'dispatch', 'lock', 'unlock',
+ 'discard_changes', 'commit', 'get_schema', 'delete_config', 'get_device_operations']
+
+ def __init__(self, connection):
+ super(NetconfBase, self).__init__()
+ self._connection = connection
+
+ @property
+ def m(self):
+ return self._connection.manager
+
+ def rpc(self, name):
+ """
+ RPC to be execute on remote device
+ :param name: Name of rpc in string format
+ :return: Received rpc response from remote host
+ """
+ try:
+ obj = to_ele(name)
+ resp = self.m.rpc(obj)
+ return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
+ except RPCError as exc:
+ msg = exc.xml
+ raise Exception(to_xml(msg))
+
+ def get_config(self, source=None, filter=None):
+ """
+ Retrieve all or part of a specified configuration
+ (by default entire configuration is retrieved).
+ :param source: Name of the configuration datastore being queried, defaults to running datastore
+ :param filter: This argument specifies the portion of the configuration data to retrieve
+ :return: Returns xml string containing the RPC response received from remote host
+ """
+ if isinstance(filter, list):
+ filter = tuple(filter)
+
+ if not source:
+ source = 'running'
+ resp = self.m.get_config(source=source, filter=filter)
+ return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
+
+ def get(self, filter=None, with_defaults=None):
+ """
+ Retrieve device configuration and state information.
+ :param filter: This argument specifies the portion of the state data to retrieve
+ (by default entire state data is retrieved)
+ :param with_defaults: defines an explicit method of retrieving default values
+ from the configuration
+ :return: Returns xml string containing the RPC response received from remote host
+ """
+ if isinstance(filter, list):
+ filter = tuple(filter)
+ resp = self.m.get(filter=filter, with_defaults=with_defaults)
+ response = resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
+ return response
+
+ def edit_config(self, config=None, format='xml', target='candidate', default_operation=None, test_option=None, error_option=None):
+ """
+ Loads all or part of the specified *config* to the *target* configuration datastore.
+ :param config: Is the configuration, which must be rooted in the `config` element.
+ It can be specified either as a string or an :class:`~xml.etree.ElementTree.Element`.
+ :param format: The format of configuration eg. xml, text
+ :param target: Is the name of the configuration datastore being edited
+ :param default_operation: If specified must be one of { `"merge"`, `"replace"`, or `"none"` }
+ :param test_option: If specified must be one of { `"test_then_set"`, `"set"` }
+ :param error_option: If specified must be one of { `"stop-on-error"`, `"continue-on-error"`, `"rollback-on-error"` }
+ The `"rollback-on-error"` *error_option* depends on the `:rollback-on-error` capability.
+ :return: Returns xml string containing the RPC response received from remote host
+ """
+ if config is None:
+ raise ValueError('config value must be provided')
+ resp = self.m.edit_config(config, format=format, target=target, default_operation=default_operation, test_option=test_option,
+ error_option=error_option)
+ return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
+
+ def validate(self, source='candidate'):
+ """
+ Validate the contents of the specified configuration.
+ :param source: Is the name of the configuration datastore being validated or `config` element
+ containing the configuration subtree to be validated
+ :return: Returns xml string containing the RPC response received from remote host
+ """
+ resp = self.m.validate(source=source)
+ return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
+
+ def copy_config(self, source, target):
+ """
+ Create or replace an entire configuration datastore with the contents of another complete configuration datastore.
+ :param source: Is the name of the configuration datastore to use as the source of the copy operation or `config`
+ element containing the configuration subtree to copy
+ :param target: Is the name of the configuration datastore to use as the destination of the copy operation
+ :return: Returns xml string containing the RPC response received from remote host
+ """
+ resp = self.m.copy_config(source, target)
+ return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
+
+ def dispatch(self, rpc_command=None, source=None, filter=None):
+ """
+ Execute rpc on the remote device eg. dispatch('clear-arp-table')
+ :param rpc_command: specifies rpc command to be dispatched either in plain text or in xml element format (depending on command)
+ :param source: name of the configuration datastore being queried
+ :param filter: specifies the portion of the configuration to retrieve (by default entire configuration is retrieved)
+ :return: Returns xml string containing the RPC response received from remote host
+ """
+ if rpc_command is None:
+ raise ValueError('rpc_command value must be provided')
+
+ resp = self.m.dispatch(fromstring(rpc_command), source=source, filter=filter)
+
+ if isinstance(resp, NCElement):
+ # In case xml reply is transformed or namespace is removed in
+ # ncclient device specific handler return modified xml response
+ result = resp.data_xml
+ elif hasattr(resp, 'data_ele') and resp.data_ele:
+ # if data node is present in xml response return the xml string
+ # with data node as root
+ result = resp.data_xml
+ else:
+ # return raw xml string received from host with rpc-reply as the root node
+ result = resp.xml
+
+ return result
+
+ def lock(self, target="candidate"):
+ """
+ Allows the client to lock the configuration system of a device.
+ :param target: is the name of the configuration datastore to lock,
+ defaults to candidate datastore
+ :return: Returns xml string containing the RPC response received from remote host
+ """
+ resp = self.m.lock(target=target)
+ return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
+
+ def unlock(self, target="candidate"):
+ """
+ Release a configuration lock, previously obtained with the lock operation.
+ :param target: is the name of the configuration datastore to unlock,
+ defaults to candidate datastore
+ :return: Returns xml string containing the RPC response received from remote host
+ """
+ resp = self.m.unlock(target=target)
+ return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
+
+ def discard_changes(self):
+ """
+ Revert the candidate configuration to the currently running configuration.
+ Any uncommitted changes are discarded.
+ :return: Returns xml string containing the RPC response received from remote host
+ """
+ resp = self.m.discard_changes()
+ return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
+
+ def commit(self, confirmed=False, timeout=None, persist=None):
+ """
+ Commit the candidate configuration as the device's new current configuration.
+ Depends on the `:candidate` capability.
+ A confirmed commit (i.e. if *confirmed* is `True`) is reverted if there is no
+ followup commit within the *timeout* interval. If no timeout is specified the
+ confirm timeout defaults to 600 seconds (10 minutes).
+ A confirming commit may have the *confirmed* parameter but this is not required.
+ Depends on the `:confirmed-commit` capability.
+ :param confirmed: whether this is a confirmed commit
+ :param timeout: specifies the confirm timeout in seconds
+ :param persist: make the confirmed commit survive a session termination,
+ and set a token on the ongoing confirmed commit
+ :return: Returns xml string containing the RPC response received from remote host
+ """
+ timeout = to_text(timeout, errors='surrogate_or_strict')
+ resp = self.m.commit(confirmed=confirmed, timeout=timeout, persist=persist)
+ return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
+
+ def get_schema(self, identifier=None, version=None, format=None):
+ """
+ Retrieve a named schema, with optional revision and type.
+ :param identifier: name of the schema to be retrieved
+ :param version: version of schema to get
+ :param format: format of the schema to be retrieved, yang is the default
+ :return: Returns xml string containing the RPC response received from remote host
+ """
+ resp = self.m.get_schema(identifier, version=version, format=format)
+ return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
+
+ def delete_config(self, target):
+ """
+ delete a configuration datastore
+ :param target: specifies the name or URL of configuration datastore to delete
+ :return: Returns xml string containing the RPC response received from remote host
+ """
+ resp = self.m.delete_config(target)
+ return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
+
+ def locked(self, target):
+ return self.m.locked(target)
+
+ @abstractmethod
+ def get_capabilities(self):
+ """
+ Retrieves device information and supported
+ rpc methods by device platform and return result
+ as a string
+ :return: Netconf session capability
+ """
+ pass
+
+ @staticmethod
+ def guess_network_os(obj):
+ """
+ Identifies the operating system of network device.
+ :param obj: ncclient manager connection instance
+ :return: The name of network operating system.
+ """
+ pass
+
+ def get_base_rpc(self):
+ """
+ Returns list of base rpc method supported by remote device
+ :return: List of RPC supported
+ """
+ return self.__rpc__
+
+ def put_file(self, source, destination):
+ """
+ Copies file to remote host
+ :param source: Source location of file
+ :param destination: Destination file path
+ :return: Returns xml string containing the RPC response received from remote host
+ """
+ pass
+
+ def fetch_file(self, source, destination):
+ """
+ Fetch file from remote host
+ :param source: Source location of file
+ :param destination: Source location of file
+ :return: Returns xml string containing the RPC response received from remote host
+ """
+ pass
+
+ def get_device_operations(self, server_capabilities):
+ """
+ Retrieve remote host capability from Netconf server hello message.
+ :param server_capabilities: Server capabilities received during Netconf session initialization
+ :return: Remote host capabilities in dictionary format
+ """
+ operations = {}
+ capabilities = '\n'.join(server_capabilities)
+ operations['supports_commit'] = ':candidate' in capabilities
+ operations['supports_defaults'] = ':with-defaults' in capabilities
+ operations['supports_confirm_commit'] = ':confirmed-commit' in capabilities
+ operations['supports_startup'] = ':startup' in capabilities
+ operations['supports_xpath'] = ':xpath' in capabilities
+ operations['supports_writable_running'] = ':writable-running' in capabilities
+ operations['supports_validate'] = ':validate' in capabilities
+
+ operations['lock_datastore'] = []
+ if operations['supports_writable_running']:
+ operations['lock_datastore'].append('running')
+
+ if operations['supports_commit']:
+ operations['lock_datastore'].append('candidate')
+
+ if operations['supports_startup']:
+ operations['lock_datastore'].append('startup')
+
+ operations['supports_lock'] = bool(operations['lock_datastore'])
+
+ return operations
+
+# TODO Restore .xml, when ncclient supports it for all platforms
diff --git a/lib/ansible/plugins/shell/__init__.py b/lib/ansible/plugins/shell/__init__.py
new file mode 100644
index 00000000..41c24441
--- /dev/null
+++ b/lib/ansible/plugins/shell/__init__.py
@@ -0,0 +1,227 @@
+# (c) 2016 RedHat
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import os.path
+import random
+import re
+import time
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.six import text_type
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils._text import to_native
+from ansible.plugins import AnsiblePlugin
+
+_USER_HOME_PATH_RE = re.compile(r'^~[_.A-Za-z0-9][-_.A-Za-z0-9]*$')
+
+
+class ShellBase(AnsiblePlugin):
+ def __init__(self):
+
+ super(ShellBase, self).__init__()
+
+ self.env = {}
+ self.tmpdir = None
+ self.executable = None
+
+ def _normalize_system_tmpdirs(self):
+ # Normalize the tmp directory strings. We don't use expanduser/expandvars because those
+ # can vary between remote user and become user. Therefore the safest practice will be for
+ # this to always be specified as full paths)
+ normalized_paths = [d.rstrip('/') for d in self.get_option('system_tmpdirs')]
+
+ # Make sure all system_tmpdirs are absolute otherwise they'd be relative to the login dir
+ # which is almost certainly going to fail in a cornercase.
+ if not all(os.path.isabs(d) for d in normalized_paths):
+ raise AnsibleError('The configured system_tmpdirs contains a relative path: {0}. All'
+ ' system_tmpdirs must be absolute'.format(to_native(normalized_paths)))
+
+ self.set_option('system_tmpdirs', normalized_paths)
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+
+ super(ShellBase, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ # set env if needed, deal with environment's 'dual nature' list of dicts or dict
+ env = self.get_option('environment')
+ if isinstance(env, list):
+ for env_dict in env:
+ self.env.update(env_dict)
+ else:
+ self.env.update(env)
+
+ # We can remove the try: except in the future when we make ShellBase a proper subset of
+ # *all* shells. Right now powershell and third party shells which do not use the
+ # shell_common documentation fragment (and so do not have system_tmpdirs) will fail
+ try:
+ self._normalize_system_tmpdirs()
+ except KeyError:
+ pass
+
+ @staticmethod
+ def _generate_temp_dir_name():
+ return 'ansible-tmp-%s-%s-%s' % (time.time(), os.getpid(), random.randint(0, 2**48))
+
+ def env_prefix(self, **kwargs):
+ return ' '.join(['%s=%s' % (k, shlex_quote(text_type(v))) for k, v in kwargs.items()])
+
+ def join_path(self, *args):
+ return os.path.join(*args)
+
+ # some shells (eg, powershell) are snooty about filenames/extensions, this lets the shell plugin have a say
+ def get_remote_filename(self, pathname):
+ base_name = os.path.basename(pathname.strip())
+ return base_name.strip()
+
+ def path_has_trailing_slash(self, path):
+ return path.endswith('/')
+
+ def chmod(self, paths, mode):
+ cmd = ['chmod', mode]
+ cmd.extend(paths)
+ cmd = [shlex_quote(c) for c in cmd]
+
+ return ' '.join(cmd)
+
+ def chown(self, paths, user):
+ cmd = ['chown', user]
+ cmd.extend(paths)
+ cmd = [shlex_quote(c) for c in cmd]
+
+ return ' '.join(cmd)
+
+ def set_user_facl(self, paths, user, mode):
+ """Only sets acls for users as that's really all we need"""
+ cmd = ['setfacl', '-m', 'u:%s:%s' % (user, mode)]
+ cmd.extend(paths)
+ cmd = [shlex_quote(c) for c in cmd]
+
+ return ' '.join(cmd)
+
+ def remove(self, path, recurse=False):
+ path = shlex_quote(path)
+ cmd = 'rm -f '
+ if recurse:
+ cmd += '-r '
+ return cmd + "%s %s" % (path, self._SHELL_REDIRECT_ALLNULL)
+
+ def exists(self, path):
+ cmd = ['test', '-e', shlex_quote(path)]
+ return ' '.join(cmd)
+
+ def mkdtemp(self, basefile=None, system=False, mode=0o700, tmpdir=None):
+ if not basefile:
+ basefile = self.__class__._generate_temp_dir_name()
+
+ # When system is specified we have to create this in a directory where
+ # other users can read and access the tmp directory.
+ # This is because we use system to create tmp dirs for unprivileged users who are
+ # sudo'ing to a second unprivileged user.
+ # The 'system_tmpdirs' setting defines dirctories we can use for this purpose
+ # the default are, /tmp and /var/tmp.
+ # So we only allow one of those locations if system=True, using the
+ # passed in tmpdir if it is valid or the first one from the setting if not.
+
+ if system:
+ if tmpdir:
+ tmpdir = tmpdir.rstrip('/')
+
+ if tmpdir in self.get_option('system_tmpdirs'):
+ basetmpdir = tmpdir
+ else:
+ basetmpdir = self.get_option('system_tmpdirs')[0]
+ else:
+ if tmpdir is None:
+ basetmpdir = self.get_option('remote_tmp')
+ else:
+ basetmpdir = tmpdir
+
+ basetmp = self.join_path(basetmpdir, basefile)
+
+ # use mkdir -p to ensure parents exist, but mkdir fullpath to ensure last one is created by us
+ cmd = 'mkdir -p %s echo %s %s' % (self._SHELL_SUB_LEFT, basetmpdir, self._SHELL_SUB_RIGHT)
+ cmd += '%s mkdir %s echo %s %s' % (self._SHELL_AND, self._SHELL_SUB_LEFT, basetmp, self._SHELL_SUB_RIGHT)
+ cmd += ' %s echo %s=%s echo %s %s' % (self._SHELL_AND, basefile, self._SHELL_SUB_LEFT, basetmp, self._SHELL_SUB_RIGHT)
+
+ # change the umask in a subshell to achieve the desired mode
+ # also for directories created with `mkdir -p`
+ if mode:
+ tmp_umask = 0o777 & ~mode
+ cmd = '%s umask %o %s %s %s' % (self._SHELL_GROUP_LEFT, tmp_umask, self._SHELL_AND, cmd, self._SHELL_GROUP_RIGHT)
+
+ return cmd
+
+ def expand_user(self, user_home_path, username=''):
+ ''' Return a command to expand tildes in a path
+
+ It can be either "~" or "~username". We just ignore $HOME
+ We use the POSIX definition of a username:
+ http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_426
+ http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_276
+
+ Falls back to 'current working directory' as we assume 'home is where the remote user ends up'
+ '''
+
+ # Check that the user_path to expand is safe
+ if user_home_path != '~':
+ if not _USER_HOME_PATH_RE.match(user_home_path):
+ # shlex_quote will make the shell return the string verbatim
+ user_home_path = shlex_quote(user_home_path)
+ elif username:
+ # if present the user name is appended to resolve "that user's home"
+ user_home_path += username
+
+ return 'echo %s' % user_home_path
+
+ def pwd(self):
+ """Return the working directory after connecting"""
+ return 'echo %spwd%s' % (self._SHELL_SUB_LEFT, self._SHELL_SUB_RIGHT)
+
+ def build_module_command(self, env_string, shebang, cmd, arg_path=None):
+ # don't quote the cmd if it's an empty string, because this will break pipelining mode
+ if cmd.strip() != '':
+ cmd = shlex_quote(cmd)
+
+ cmd_parts = []
+ if shebang:
+ shebang = shebang.replace("#!", "").strip()
+ else:
+ shebang = ""
+ cmd_parts.extend([env_string.strip(), shebang, cmd])
+ if arg_path is not None:
+ cmd_parts.append(arg_path)
+ new_cmd = " ".join(cmd_parts)
+ return new_cmd
+
+ def append_command(self, cmd, cmd_to_append):
+ """Append an additional command if supported by the shell"""
+
+ if self._SHELL_AND:
+ cmd += ' %s %s' % (self._SHELL_AND, cmd_to_append)
+
+ return cmd
+
+ def wrap_for_exec(self, cmd):
+ """wrap script execution with any necessary decoration (eg '&' for quoted powershell script paths)"""
+ return cmd
+
+ def quote(self, cmd):
+ """Returns a shell-escaped string that can be safely used as one token in a shell command line"""
+ return shlex_quote(cmd)
diff --git a/lib/ansible/plugins/shell/cmd.py b/lib/ansible/plugins/shell/cmd.py
new file mode 100644
index 00000000..d83aa115
--- /dev/null
+++ b/lib/ansible/plugins/shell/cmd.py
@@ -0,0 +1,58 @@
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+name: cmd
+plugin_type: shell
+version_added: '2.8'
+short_description: Windows Command Prompt
+description:
+- Used with the 'ssh' connection plugin and no C(DefaultShell) has been set on the Windows host.
+extends_documentation_fragment:
+- shell_windows
+'''
+
+import re
+
+from ansible.plugins.shell.powershell import ShellModule as PSShellModule
+
+# these are the metachars that have a special meaning in cmd that we want to escape when quoting
+_find_unsafe = re.compile(r'[\s\(\)\%\!^\"\<\>\&\|]').search
+
+
+class ShellModule(PSShellModule):
+
+ # Common shell filenames that this plugin handles
+ COMPATIBLE_SHELLS = frozenset()
+ # Family of shells this has. Must match the filename without extension
+ SHELL_FAMILY = 'cmd'
+
+ _SHELL_REDIRECT_ALLNULL = '>nul 2>&1'
+ _SHELL_AND = '&&'
+
+ # Used by various parts of Ansible to do Windows specific changes
+ _IS_WINDOWS = True
+
+ def quote(self, s):
+ # cmd does not support single quotes that the shlex_quote uses. We need to override the quoting behaviour to
+ # better match cmd.exe.
+ # https://blogs.msdn.microsoft.com/twistylittlepassagesallalike/2011/04/23/everyone-quotes-command-line-arguments-the-wrong-way/
+
+ # Return an empty argument
+ if not s:
+ return '""'
+
+ if _find_unsafe(s) is None:
+ return s
+
+ # Escape the metachars as we are quoting the string to stop cmd from interpreting that metachar. For example
+ # 'file &whoami.exe' would result in 'file $(whoami.exe)' instead of the literal string
+ # https://stackoverflow.com/questions/3411771/multiple-character-replace-with-python
+ for c in '^()%!"<>&|': # '^' must be the first char that we scan and replace
+ if c in s:
+ # I can't find any docs that explicitly say this but to escape ", it needs to be prefixed with \^.
+ s = s.replace(c, ("\\^" if c == '"' else "^") + c)
+
+ return '^"' + s + '^"'
diff --git a/lib/ansible/plugins/shell/powershell.py b/lib/ansible/plugins/shell/powershell.py
new file mode 100644
index 00000000..64ea5824
--- /dev/null
+++ b/lib/ansible/plugins/shell/powershell.py
@@ -0,0 +1,288 @@
+# Copyright (c) 2014, Chris Church <chris@ninemoreminutes.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+name: powershell
+plugin_type: shell
+version_added: historical
+short_description: Windows PowerShell
+description:
+- The only option when using 'winrm' or 'psrp' as a connection plugin.
+- Can also be used when using 'ssh' as a connection plugin and the C(DefaultShell) has been configured to PowerShell.
+extends_documentation_fragment:
+- shell_windows
+'''
+
+import base64
+import os
+import re
+import shlex
+import pkgutil
+import xml.etree.ElementTree as ET
+import ntpath
+
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.plugins.shell import ShellBase
+
+
+_common_args = ['PowerShell', '-NoProfile', '-NonInteractive', '-ExecutionPolicy', 'Unrestricted']
+
+
+def _parse_clixml(data, stream="Error"):
+ """
+ Takes a byte string like '#< CLIXML\r\n<Objs...' and extracts the stream
+ message encoded in the XML data. CLIXML is used by PowerShell to encode
+ multiple objects in stderr.
+ """
+ lines = []
+
+ # There are some scenarios where the stderr contains a nested CLIXML element like
+ # '<# CLIXML\r\n<# CLIXML\r\n<Objs>...</Objs><Objs>...</Objs>'.
+ # Parse each individual <Objs> element and add the error strings to our stderr list.
+ # https://github.com/ansible/ansible/issues/69550
+ while data:
+ end_idx = data.find(b"</Objs>") + 7
+ current_element = data[data.find(b"<Objs "):end_idx]
+ data = data[end_idx:]
+
+ clixml = ET.fromstring(current_element)
+ namespace_match = re.match(r'{(.*)}', clixml.tag)
+ namespace = "{%s}" % namespace_match.group(1) if namespace_match else ""
+
+ strings = clixml.findall("./%sS" % namespace)
+ lines.extend([e.text.replace('_x000D__x000A_', '') for e in strings if e.attrib.get('S') == stream])
+
+ return to_bytes('\r\n'.join(lines))
+
+
+class ShellModule(ShellBase):
+
+ # Common shell filenames that this plugin handles
+ # Powershell is handled differently. It's selected when winrm is the
+ # connection
+ COMPATIBLE_SHELLS = frozenset()
+ # Family of shells this has. Must match the filename without extension
+ SHELL_FAMILY = 'powershell'
+
+ _SHELL_REDIRECT_ALLNULL = '> $null'
+ _SHELL_AND = ';'
+
+ # Used by various parts of Ansible to do Windows specific changes
+ _IS_WINDOWS = True
+
+ # TODO: add binary module support
+
+ def env_prefix(self, **kwargs):
+ # powershell/winrm env handling is handled in the exec wrapper
+ return ""
+
+ def join_path(self, *args):
+ # use normpath() to remove doubled slashed and convert forward to backslashes
+ parts = [ntpath.normpath(self._unquote(arg)) for arg in args]
+
+ # Becuase ntpath.join treats any component that begins with a backslash as an absolute path,
+ # we have to strip slashes from at least the beginning, otherwise join will ignore all previous
+ # path components except for the drive.
+ return ntpath.join(parts[0], *[part.strip('\\') for part in parts[1:]])
+
+ def get_remote_filename(self, pathname):
+ # powershell requires that script files end with .ps1
+ base_name = os.path.basename(pathname.strip())
+ name, ext = os.path.splitext(base_name.strip())
+ if ext.lower() not in ['.ps1', '.exe']:
+ return name + '.ps1'
+
+ return base_name.strip()
+
+ def path_has_trailing_slash(self, path):
+ # Allow Windows paths to be specified using either slash.
+ path = self._unquote(path)
+ return path.endswith('/') or path.endswith('\\')
+
+ def chmod(self, paths, mode):
+ raise NotImplementedError('chmod is not implemented for Powershell')
+
+ def chown(self, paths, user):
+ raise NotImplementedError('chown is not implemented for Powershell')
+
+ def set_user_facl(self, paths, user, mode):
+ raise NotImplementedError('set_user_facl is not implemented for Powershell')
+
+ def remove(self, path, recurse=False):
+ path = self._escape(self._unquote(path))
+ if recurse:
+ return self._encode_script('''Remove-Item '%s' -Force -Recurse;''' % path)
+ else:
+ return self._encode_script('''Remove-Item '%s' -Force;''' % path)
+
+ def mkdtemp(self, basefile=None, system=False, mode=None, tmpdir=None):
+ # Windows does not have an equivalent for the system temp files, so
+ # the param is ignored
+ if not basefile:
+ basefile = self.__class__._generate_temp_dir_name()
+ basefile = self._escape(self._unquote(basefile))
+ basetmpdir = tmpdir if tmpdir else self.get_option('remote_tmp')
+
+ script = '''
+ $tmp_path = [System.Environment]::ExpandEnvironmentVariables('%s')
+ $tmp = New-Item -Type Directory -Path $tmp_path -Name '%s'
+ Write-Output -InputObject $tmp.FullName
+ ''' % (basetmpdir, basefile)
+ return self._encode_script(script.strip())
+
+ def expand_user(self, user_home_path, username=''):
+ # PowerShell only supports "~" (not "~username"). Resolve-Path ~ does
+ # not seem to work remotely, though by default we are always starting
+ # in the user's home directory.
+ user_home_path = self._unquote(user_home_path)
+ if user_home_path == '~':
+ script = 'Write-Output (Get-Location).Path'
+ elif user_home_path.startswith('~\\'):
+ script = "Write-Output ((Get-Location).Path + '%s')" % self._escape(user_home_path[1:])
+ else:
+ script = "Write-Output '%s'" % self._escape(user_home_path)
+ return self._encode_script(script)
+
+ def exists(self, path):
+ path = self._escape(self._unquote(path))
+ script = '''
+ If (Test-Path '%s')
+ {
+ $res = 0;
+ }
+ Else
+ {
+ $res = 1;
+ }
+ Write-Output '$res';
+ Exit $res;
+ ''' % path
+ return self._encode_script(script)
+
+ def checksum(self, path, *args, **kwargs):
+ path = self._escape(self._unquote(path))
+ script = '''
+ If (Test-Path -PathType Leaf '%(path)s')
+ {
+ $sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider;
+ $fp = [System.IO.File]::Open('%(path)s', [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read);
+ [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower();
+ $fp.Dispose();
+ }
+ ElseIf (Test-Path -PathType Container '%(path)s')
+ {
+ Write-Output "3";
+ }
+ Else
+ {
+ Write-Output "1";
+ }
+ ''' % dict(path=path)
+ return self._encode_script(script)
+
+ def build_module_command(self, env_string, shebang, cmd, arg_path=None):
+ bootstrap_wrapper = pkgutil.get_data("ansible.executor.powershell", "bootstrap_wrapper.ps1")
+
+ # pipelining bypass
+ if cmd == '':
+ return self._encode_script(script=bootstrap_wrapper, strict_mode=False, preserve_rc=False)
+
+ # non-pipelining
+
+ cmd_parts = shlex.split(cmd, posix=False)
+ cmd_parts = list(map(to_text, cmd_parts))
+ if shebang and shebang.lower() == '#!powershell':
+ if not self._unquote(cmd_parts[0]).lower().endswith('.ps1'):
+ # we're running a module via the bootstrap wrapper
+ cmd_parts[0] = '"%s.ps1"' % self._unquote(cmd_parts[0])
+ wrapper_cmd = "type " + cmd_parts[0] + " | " + self._encode_script(script=bootstrap_wrapper, strict_mode=False, preserve_rc=False)
+ return wrapper_cmd
+ elif shebang and shebang.startswith('#!'):
+ cmd_parts.insert(0, shebang[2:])
+ elif not shebang:
+ # The module is assumed to be a binary
+ cmd_parts[0] = self._unquote(cmd_parts[0])
+ cmd_parts.append(arg_path)
+ script = '''
+ Try
+ {
+ %s
+ %s
+ }
+ Catch
+ {
+ $_obj = @{ failed = $true }
+ If ($_.Exception.GetType)
+ {
+ $_obj.Add('msg', $_.Exception.Message)
+ }
+ Else
+ {
+ $_obj.Add('msg', $_.ToString())
+ }
+ If ($_.InvocationInfo.PositionMessage)
+ {
+ $_obj.Add('exception', $_.InvocationInfo.PositionMessage)
+ }
+ ElseIf ($_.ScriptStackTrace)
+ {
+ $_obj.Add('exception', $_.ScriptStackTrace)
+ }
+ Try
+ {
+ $_obj.Add('error_record', ($_ | ConvertTo-Json | ConvertFrom-Json))
+ }
+ Catch
+ {
+ }
+ Echo $_obj | ConvertTo-Json -Compress -Depth 99
+ Exit 1
+ }
+ ''' % (env_string, ' '.join(cmd_parts))
+ return self._encode_script(script, preserve_rc=False)
+
+ def wrap_for_exec(self, cmd):
+ return '& %s; exit $LASTEXITCODE' % cmd
+
+ def _unquote(self, value):
+ '''Remove any matching quotes that wrap the given value.'''
+ value = to_text(value or '')
+ m = re.match(r'^\s*?\'(.*?)\'\s*?$', value)
+ if m:
+ return m.group(1)
+ m = re.match(r'^\s*?"(.*?)"\s*?$', value)
+ if m:
+ return m.group(1)
+ return value
+
+ def _escape(self, value):
+ '''Return value escaped for use in PowerShell single quotes.'''
+ # There are 5 chars that need to be escaped in a single quote.
+ # https://github.com/PowerShell/PowerShell/blob/b7cb335f03fe2992d0cbd61699de9d9aafa1d7c1/src/System.Management.Automation/engine/parser/CharTraits.cs#L265-L272
+ return re.compile(u"(['\u2018\u2019\u201a\u201b])").sub(u'\\1\\1', value)
+
+ def _encode_script(self, script, as_list=False, strict_mode=True, preserve_rc=True):
+ '''Convert a PowerShell script to a single base64-encoded command.'''
+ script = to_text(script)
+
+ if script == u'-':
+ cmd_parts = _common_args + ['-Command', '-']
+
+ else:
+ if strict_mode:
+ script = u'Set-StrictMode -Version Latest\r\n%s' % script
+ # try to propagate exit code if present- won't work with begin/process/end-style scripts (ala put_file)
+ # NB: the exit code returned may be incorrect in the case of a successful command followed by an invalid command
+ if preserve_rc:
+ script = u'%s\r\nIf (-not $?) { If (Get-Variable LASTEXITCODE -ErrorAction SilentlyContinue) { exit $LASTEXITCODE } Else { exit 1 } }\r\n'\
+ % script
+ script = '\n'.join([x.strip() for x in script.splitlines() if x.strip()])
+ encoded_script = to_text(base64.b64encode(script.encode('utf-16-le')), 'utf-8')
+ cmd_parts = _common_args + ['-EncodedCommand', encoded_script]
+
+ if as_list:
+ return cmd_parts
+ return ' '.join(cmd_parts)
diff --git a/lib/ansible/plugins/shell/sh.py b/lib/ansible/plugins/shell/sh.py
new file mode 100644
index 00000000..76a386f7
--- /dev/null
+++ b/lib/ansible/plugins/shell/sh.py
@@ -0,0 +1,79 @@
+# Copyright (c) 2014, Chris Church <chris@ninemoreminutes.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+name: sh
+plugin_type: shell
+short_description: "POSIX shell (/bin/sh)"
+version_added: historical
+description:
+ - This shell plugin is the one you want to use on most Unix systems, it is the most compatible and widely installed shell.
+extends_documentation_fragment:
+ - shell_common
+'''
+
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.plugins.shell import ShellBase
+
+
+class ShellModule(ShellBase):
+
+ # Common shell filenames that this plugin handles.
+ # Note: sh is the default shell plugin so this plugin may also be selected
+ # This code needs to be SH-compliant. BASH-isms will not work if /bin/sh points to a non-BASH shell.
+
+ # if the filename is not listed in any Shell plugin.
+ COMPATIBLE_SHELLS = frozenset(('sh', 'zsh', 'bash', 'dash', 'ksh'))
+ # Family of shells this has. Must match the filename without extension
+ SHELL_FAMILY = 'sh'
+
+ # commonly used
+ ECHO = 'echo'
+ COMMAND_SEP = ';'
+
+ # How to end lines in a python script one-liner
+ _SHELL_EMBEDDED_PY_EOL = '\n'
+ _SHELL_REDIRECT_ALLNULL = '> /dev/null 2>&1'
+ _SHELL_AND = '&&'
+ _SHELL_OR = '||'
+ _SHELL_SUB_LEFT = '"`'
+ _SHELL_SUB_RIGHT = '`"'
+ _SHELL_GROUP_LEFT = '('
+ _SHELL_GROUP_RIGHT = ')'
+
+ def checksum(self, path, python_interp):
+ # In the following test, each condition is a check and logical
+ # comparison (|| or &&) that sets the rc value. Every check is run so
+ # the last check in the series to fail will be the rc that is returned.
+ #
+ # If a check fails we error before invoking the hash functions because
+ # hash functions may successfully take the hash of a directory on BSDs
+ # (UFS filesystem?) which is not what the rest of the ansible code expects
+ #
+ # If all of the available hashing methods fail we fail with an rc of 0.
+ # This logic is added to the end of the cmd at the bottom of this function.
+
+ # Return codes:
+ # checksum: success!
+ # 0: Unknown error
+ # 1: Remote file does not exist
+ # 2: No read permissions on the file
+ # 3: File is a directory
+ # 4: No python interpreter
+
+ # Quoting gets complex here. We're writing a python string that's
+ # used by a variety of shells on the remote host to invoke a python
+ # "one-liner".
+ shell_escaped_path = shlex_quote(path)
+ test = "rc=flag; [ -r %(p)s ] %(shell_or)s rc=2; [ -f %(p)s ] %(shell_or)s rc=1; [ -d %(p)s ] %(shell_and)s rc=3; %(i)s -V 2>/dev/null %(shell_or)s rc=4; [ x\"$rc\" != \"xflag\" ] %(shell_and)s echo \"${rc} \"%(p)s %(shell_and)s exit 0" % dict(p=shell_escaped_path, i=python_interp, shell_and=self._SHELL_AND, shell_or=self._SHELL_OR) # NOQA
+ csums = [
+ u"({0} -c 'import hashlib; BLOCKSIZE = 65536; hasher = hashlib.sha1();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # NOQA Python > 2.4 (including python3)
+ u"({0} -c 'import sha; BLOCKSIZE = 65536; hasher = sha.sha();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # NOQA Python == 2.4
+ ]
+
+ cmd = (" %s " % self._SHELL_OR).join(csums)
+ cmd = "%s; %s %s (echo \'0 \'%s)" % (test, cmd, self._SHELL_OR, shell_escaped_path)
+ return cmd
diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py
new file mode 100644
index 00000000..2e6d49e7
--- /dev/null
+++ b/lib/ansible/plugins/strategy/__init__.py
@@ -0,0 +1,1384 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import cmd
+import functools
+import os
+import pprint
+import sys
+import threading
+import time
+
+from collections import deque
+from multiprocessing import Lock
+from jinja2.exceptions import UndefinedError
+
+from ansible import constants as C
+from ansible import context
+from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleParserError, AnsibleUndefinedVariable
+from ansible.executor import action_write_locks
+from ansible.executor.process.worker import WorkerProcess
+from ansible.executor.task_result import TaskResult
+from ansible.module_utils.six.moves import queue as Queue
+from ansible.module_utils.six import iteritems, itervalues, string_types
+from ansible.module_utils._text import to_text
+from ansible.module_utils.connection import Connection, ConnectionError
+from ansible.playbook.conditional import Conditional
+from ansible.playbook.handler import Handler
+from ansible.playbook.helpers import load_list_of_blocks
+from ansible.playbook.included_file import IncludedFile
+from ansible.playbook.task_include import TaskInclude
+from ansible.plugins import loader as plugin_loader
+from ansible.template import Templar
+from ansible.utils.display import Display
+from ansible.utils.vars import combine_vars
+from ansible.vars.clean import strip_internal_keys, module_response_deepcopy
+
+display = Display()
+
+__all__ = ['StrategyBase']
+
+# This list can be an exact match, or start of string bound
+# does not accept regex
+ALWAYS_DELEGATE_FACT_PREFIXES = frozenset((
+ 'discovered_interpreter_',
+))
+
+
+class StrategySentinel:
+ pass
+
+
+def SharedPluginLoaderObj():
+ '''This only exists for backwards compat, do not use.
+ '''
+ display.deprecated('SharedPluginLoaderObj is deprecated, please directly use ansible.plugins.loader',
+ version='2.11', collection_name='ansible.builtin')
+ return plugin_loader
+
+
+_sentinel = StrategySentinel()
+
+
+def post_process_whens(result, task, templar):
+
+ cond = None
+ if task.changed_when:
+ cond = Conditional(loader=templar._loader)
+ cond.when = task.changed_when
+ result['changed'] = cond.evaluate_conditional(templar, templar.available_variables)
+
+ if task.failed_when:
+ if cond is None:
+ cond = Conditional(loader=templar._loader)
+ cond.when = task.failed_when
+ failed_when_result = cond.evaluate_conditional(templar, templar.available_variables)
+ result['failed_when_result'] = result['failed'] = failed_when_result
+
+
+def results_thread_main(strategy):
+ while True:
+ try:
+ result = strategy._final_q.get()
+ if isinstance(result, StrategySentinel):
+ break
+ elif isinstance(result, TaskResult):
+ with strategy._results_lock:
+ # only handlers have the listen attr, so this must be a handler
+ # we split up the results into two queues here to make sure
+ # handler and regular result processing don't cross wires
+ if 'listen' in result._task_fields:
+ strategy._handler_results.append(result)
+ else:
+ strategy._results.append(result)
+ else:
+ display.warning('Received an invalid object (%s) in the result queue: %r' % (type(result), result))
+ except (IOError, EOFError):
+ break
+ except Queue.Empty:
+ pass
+
+
+def debug_closure(func):
+ """Closure to wrap ``StrategyBase._process_pending_results`` and invoke the task debugger"""
+ @functools.wraps(func)
+ def inner(self, iterator, one_pass=False, max_passes=None, do_handlers=False):
+ status_to_stats_map = (
+ ('is_failed', 'failures'),
+ ('is_unreachable', 'dark'),
+ ('is_changed', 'changed'),
+ ('is_skipped', 'skipped'),
+ )
+
+ # We don't know the host yet, copy the previous states, for lookup after we process new results
+ prev_host_states = iterator._host_states.copy()
+
+ results = func(self, iterator, one_pass=one_pass, max_passes=max_passes, do_handlers=do_handlers)
+ _processed_results = []
+
+ for result in results:
+ task = result._task
+ host = result._host
+ _queued_task_args = self._queued_task_cache.pop((host.name, task._uuid), None)
+ task_vars = _queued_task_args['task_vars']
+ play_context = _queued_task_args['play_context']
+ # Try to grab the previous host state, if it doesn't exist use get_host_state to generate an empty state
+ try:
+ prev_host_state = prev_host_states[host.name]
+ except KeyError:
+ prev_host_state = iterator.get_host_state(host)
+
+ while result.needs_debugger(globally_enabled=self.debugger_active):
+ next_action = NextAction()
+ dbg = Debugger(task, host, task_vars, play_context, result, next_action)
+ dbg.cmdloop()
+
+ if next_action.result == NextAction.REDO:
+ # rollback host state
+ self._tqm.clear_failed_hosts()
+ iterator._host_states[host.name] = prev_host_state
+ for method, what in status_to_stats_map:
+ if getattr(result, method)():
+ self._tqm._stats.decrement(what, host.name)
+ self._tqm._stats.decrement('ok', host.name)
+
+ # redo
+ self._queue_task(host, task, task_vars, play_context)
+
+ _processed_results.extend(debug_closure(func)(self, iterator, one_pass))
+ break
+ elif next_action.result == NextAction.CONTINUE:
+ _processed_results.append(result)
+ break
+ elif next_action.result == NextAction.EXIT:
+ # Matches KeyboardInterrupt from bin/ansible
+ sys.exit(99)
+ else:
+ _processed_results.append(result)
+
+ return _processed_results
+ return inner
+
+
+class StrategyBase:
+
+ '''
+ This is the base class for strategy plugins, which contains some common
+ code useful to all strategies like running handlers, cleanup actions, etc.
+ '''
+
+ # by default, strategies should support throttling but we allow individual
+ # strategies to disable this and either forego supporting it or managing
+ # the throttling internally (as `free` does)
+ ALLOW_BASE_THROTTLING = True
+
+ def __init__(self, tqm):
+ self._tqm = tqm
+ self._inventory = tqm.get_inventory()
+ self._workers = tqm._workers
+ self._variable_manager = tqm.get_variable_manager()
+ self._loader = tqm.get_loader()
+ self._final_q = tqm._final_q
+ self._step = context.CLIARGS.get('step', False)
+ self._diff = context.CLIARGS.get('diff', False)
+
+ # the task cache is a dictionary of tuples of (host.name, task._uuid)
+ # used to find the original task object of in-flight tasks and to store
+ # the task args/vars and play context info used to queue the task.
+ self._queued_task_cache = {}
+
+ # Backwards compat: self._display isn't really needed, just import the global display and use that.
+ self._display = display
+
+ # internal counters
+ self._pending_results = 0
+ self._pending_handler_results = 0
+ self._cur_worker = 0
+
+ # this dictionary is used to keep track of hosts that have
+ # outstanding tasks still in queue
+ self._blocked_hosts = dict()
+
+ # this dictionary is used to keep track of hosts that have
+ # flushed handlers
+ self._flushed_hosts = dict()
+
+ self._results = deque()
+ self._handler_results = deque()
+ self._results_lock = threading.Condition(threading.Lock())
+
+ # create the result processing thread for reading results in the background
+ self._results_thread = threading.Thread(target=results_thread_main, args=(self,))
+ self._results_thread.daemon = True
+ self._results_thread.start()
+
+ # holds the list of active (persistent) connections to be shutdown at
+ # play completion
+ self._active_connections = dict()
+
+ # Caches for get_host calls, to avoid calling excessively
+ # These values should be set at the top of the ``run`` method of each
+ # strategy plugin. Use ``_set_hosts_cache`` to set these values
+ self._hosts_cache = []
+ self._hosts_cache_all = []
+
+ self.debugger_active = C.ENABLE_TASK_DEBUGGER
+
+ def _set_hosts_cache(self, play, refresh=True):
+ """Responsible for setting _hosts_cache and _hosts_cache_all
+
+ See comment in ``__init__`` for the purpose of these caches
+ """
+ if not refresh and all((self._hosts_cache, self._hosts_cache_all)):
+ return
+
+ if Templar(None).is_template(play.hosts):
+ _pattern = 'all'
+ else:
+ _pattern = play.hosts or 'all'
+ self._hosts_cache_all = [h.name for h in self._inventory.get_hosts(pattern=_pattern, ignore_restrictions=True)]
+ self._hosts_cache = [h.name for h in self._inventory.get_hosts(play.hosts, order=play.order)]
+
+ def cleanup(self):
+ # close active persistent connections
+ for sock in itervalues(self._active_connections):
+ try:
+ conn = Connection(sock)
+ conn.reset()
+ except ConnectionError as e:
+ # most likely socket is already closed
+ display.debug("got an error while closing persistent connection: %s" % e)
+ self._final_q.put(_sentinel)
+ self._results_thread.join()
+
+ def run(self, iterator, play_context, result=0):
+ # execute one more pass through the iterator without peeking, to
+ # make sure that all of the hosts are advanced to their final task.
+ # This should be safe, as everything should be ITERATING_COMPLETE by
+ # this point, though the strategy may not advance the hosts itself.
+
+ for host in self._hosts_cache:
+ if host not in self._tqm._unreachable_hosts:
+ try:
+ iterator.get_next_task_for_host(self._inventory.hosts[host])
+ except KeyError:
+ iterator.get_next_task_for_host(self._inventory.get_host(host))
+
+ # save the failed/unreachable hosts, as the run_handlers()
+ # method will clear that information during its execution
+ failed_hosts = iterator.get_failed_hosts()
+ unreachable_hosts = self._tqm._unreachable_hosts.keys()
+
+ display.debug("running handlers")
+ handler_result = self.run_handlers(iterator, play_context)
+ if isinstance(handler_result, bool) and not handler_result:
+ result |= self._tqm.RUN_ERROR
+ elif not handler_result:
+ result |= handler_result
+
+ # now update with the hosts (if any) that failed or were
+ # unreachable during the handler execution phase
+ failed_hosts = set(failed_hosts).union(iterator.get_failed_hosts())
+ unreachable_hosts = set(unreachable_hosts).union(self._tqm._unreachable_hosts.keys())
+
+ # return the appropriate code, depending on the status hosts after the run
+ if not isinstance(result, bool) and result != self._tqm.RUN_OK:
+ return result
+ elif len(unreachable_hosts) > 0:
+ return self._tqm.RUN_UNREACHABLE_HOSTS
+ elif len(failed_hosts) > 0:
+ return self._tqm.RUN_FAILED_HOSTS
+ else:
+ return self._tqm.RUN_OK
+
+ def get_hosts_remaining(self, play):
+ self._set_hosts_cache(play, refresh=False)
+ ignore = set(self._tqm._failed_hosts).union(self._tqm._unreachable_hosts)
+ return [host for host in self._hosts_cache if host not in ignore]
+
+ def get_failed_hosts(self, play):
+ self._set_hosts_cache(play, refresh=False)
+ return [host for host in self._hosts_cache if host in self._tqm._failed_hosts]
+
+ def add_tqm_variables(self, vars, play):
+ '''
+ Base class method to add extra variables/information to the list of task
+ vars sent through the executor engine regarding the task queue manager state.
+ '''
+ vars['ansible_current_hosts'] = self.get_hosts_remaining(play)
+ vars['ansible_failed_hosts'] = self.get_failed_hosts(play)
+
+ def _queue_task(self, host, task, task_vars, play_context):
+ ''' handles queueing the task up to be sent to a worker '''
+
+ display.debug("entering _queue_task() for %s/%s" % (host.name, task.action))
+
+ # Add a write lock for tasks.
+ # Maybe this should be added somewhere further up the call stack but
+ # this is the earliest in the code where we have task (1) extracted
+ # into its own variable and (2) there's only a single code path
+ # leading to the module being run. This is called by three
+ # functions: __init__.py::_do_handler_run(), linear.py::run(), and
+ # free.py::run() so we'd have to add to all three to do it there.
+ # The next common higher level is __init__.py::run() and that has
+ # tasks inside of play_iterator so we'd have to extract them to do it
+ # there.
+
+ if task.action not in action_write_locks.action_write_locks:
+ display.debug('Creating lock for %s' % task.action)
+ action_write_locks.action_write_locks[task.action] = Lock()
+
+ # create a templar and template things we need later for the queuing process
+ templar = Templar(loader=self._loader, variables=task_vars)
+
+ try:
+ throttle = int(templar.template(task.throttle))
+ except Exception as e:
+ raise AnsibleError("Failed to convert the throttle value to an integer.", obj=task._ds, orig_exc=e)
+
+ # and then queue the new task
+ try:
+ # Determine the "rewind point" of the worker list. This means we start
+ # iterating over the list of workers until the end of the list is found.
+ # Normally, that is simply the length of the workers list (as determined
+ # by the forks or serial setting), however a task/block/play may "throttle"
+ # that limit down.
+ rewind_point = len(self._workers)
+ if throttle > 0 and self.ALLOW_BASE_THROTTLING:
+ if task.run_once:
+ display.debug("Ignoring 'throttle' as 'run_once' is also set for '%s'" % task.get_name())
+ else:
+ if throttle <= rewind_point:
+ display.debug("task: %s, throttle: %d" % (task.get_name(), throttle))
+ rewind_point = throttle
+
+ queued = False
+ starting_worker = self._cur_worker
+ while True:
+ if self._cur_worker >= rewind_point:
+ self._cur_worker = 0
+
+ worker_prc = self._workers[self._cur_worker]
+ if worker_prc is None or not worker_prc.is_alive():
+ self._queued_task_cache[(host.name, task._uuid)] = {
+ 'host': host,
+ 'task': task,
+ 'task_vars': task_vars,
+ 'play_context': play_context
+ }
+
+ worker_prc = WorkerProcess(self._final_q, task_vars, host, task, play_context, self._loader, self._variable_manager, plugin_loader)
+ self._workers[self._cur_worker] = worker_prc
+ self._tqm.send_callback('v2_runner_on_start', host, task)
+ worker_prc.start()
+ display.debug("worker is %d (out of %d available)" % (self._cur_worker + 1, len(self._workers)))
+ queued = True
+
+ self._cur_worker += 1
+
+ if self._cur_worker >= rewind_point:
+ self._cur_worker = 0
+
+ if queued:
+ break
+ elif self._cur_worker == starting_worker:
+ time.sleep(0.0001)
+
+ if isinstance(task, Handler):
+ self._pending_handler_results += 1
+ else:
+ self._pending_results += 1
+ except (EOFError, IOError, AssertionError) as e:
+ # most likely an abort
+ display.debug("got an error while queuing: %s" % e)
+ return
+ display.debug("exiting _queue_task() for %s/%s" % (host.name, task.action))
+
+ def get_task_hosts(self, iterator, task_host, task):
+ if task.run_once:
+ host_list = [host for host in self._hosts_cache if host not in self._tqm._unreachable_hosts]
+ else:
+ host_list = [task_host.name]
+ return host_list
+
+ def get_delegated_hosts(self, result, task):
+ host_name = result.get('_ansible_delegated_vars', {}).get('ansible_delegated_host', None)
+ return [host_name or task.delegate_to]
+
+ def _set_always_delegated_facts(self, result, task):
+ """Sets host facts for ``delegate_to`` hosts for facts that should
+ always be delegated
+
+ This operation mutates ``result`` to remove the always delegated facts
+
+ See ``ALWAYS_DELEGATE_FACT_PREFIXES``
+ """
+ if task.delegate_to is None:
+ return
+
+ facts = result['ansible_facts']
+ always_keys = set()
+ _add = always_keys.add
+ for fact_key in facts:
+ for always_key in ALWAYS_DELEGATE_FACT_PREFIXES:
+ if fact_key.startswith(always_key):
+ _add(fact_key)
+ if always_keys:
+ _pop = facts.pop
+ always_facts = {
+ 'ansible_facts': dict((k, _pop(k)) for k in list(facts) if k in always_keys)
+ }
+ host_list = self.get_delegated_hosts(result, task)
+ _set_host_facts = self._variable_manager.set_host_facts
+ for target_host in host_list:
+ _set_host_facts(target_host, always_facts)
+
+ @debug_closure
+ def _process_pending_results(self, iterator, one_pass=False, max_passes=None, do_handlers=False):
+ '''
+ Reads results off the final queue and takes appropriate action
+ based on the result (executing callbacks, updating state, etc.).
+ '''
+
+ ret_results = []
+ handler_templar = Templar(self._loader)
+
+ def get_original_host(host_name):
+ # FIXME: this should not need x2 _inventory
+ host_name = to_text(host_name)
+ if host_name in self._inventory.hosts:
+ return self._inventory.hosts[host_name]
+ else:
+ return self._inventory.get_host(host_name)
+
+ def search_handler_blocks_by_name(handler_name, handler_blocks):
+ # iterate in reversed order since last handler loaded with the same name wins
+ for handler_block in reversed(handler_blocks):
+ for handler_task in handler_block.block:
+ if handler_task.name:
+ if not handler_task.cached_name:
+ if handler_templar.is_template(handler_task.name):
+ handler_templar.available_variables = self._variable_manager.get_vars(play=iterator._play,
+ task=handler_task,
+ _hosts=self._hosts_cache,
+ _hosts_all=self._hosts_cache_all)
+ handler_task.name = handler_templar.template(handler_task.name)
+ handler_task.cached_name = True
+
+ try:
+ # first we check with the full result of get_name(), which may
+ # include the role name (if the handler is from a role). If that
+ # is not found, we resort to the simple name field, which doesn't
+ # have anything extra added to it.
+ candidates = (
+ handler_task.name,
+ handler_task.get_name(include_role_fqcn=False),
+ handler_task.get_name(include_role_fqcn=True),
+ )
+
+ if handler_name in candidates:
+ return handler_task
+ except (UndefinedError, AnsibleUndefinedVariable):
+ # We skip this handler due to the fact that it may be using
+ # a variable in the name that was conditionally included via
+ # set_fact or some other method, and we don't want to error
+ # out unnecessarily
+ continue
+ return None
+
+ cur_pass = 0
+ while True:
+ try:
+ self._results_lock.acquire()
+ if do_handlers:
+ task_result = self._handler_results.popleft()
+ else:
+ task_result = self._results.popleft()
+ except IndexError:
+ break
+ finally:
+ self._results_lock.release()
+
+ # get the original host and task. We then assign them to the TaskResult for use in callbacks/etc.
+ original_host = get_original_host(task_result._host)
+ queue_cache_entry = (original_host.name, task_result._task)
+ found_task = self._queued_task_cache.get(queue_cache_entry)['task']
+ original_task = found_task.copy(exclude_parent=True, exclude_tasks=True)
+ original_task._parent = found_task._parent
+ original_task.from_attrs(task_result._task_fields)
+
+ task_result._host = original_host
+ task_result._task = original_task
+
+ # send callbacks for 'non final' results
+ if '_ansible_retry' in task_result._result:
+ self._tqm.send_callback('v2_runner_retry', task_result)
+ continue
+ elif '_ansible_item_result' in task_result._result:
+ if task_result.is_failed() or task_result.is_unreachable():
+ self._tqm.send_callback('v2_runner_item_on_failed', task_result)
+ elif task_result.is_skipped():
+ self._tqm.send_callback('v2_runner_item_on_skipped', task_result)
+ else:
+ if 'diff' in task_result._result:
+ if self._diff or getattr(original_task, 'diff', False):
+ self._tqm.send_callback('v2_on_file_diff', task_result)
+ self._tqm.send_callback('v2_runner_item_on_ok', task_result)
+ continue
+
+ # all host status messages contain 2 entries: (msg, task_result)
+ role_ran = False
+ if task_result.is_failed():
+ role_ran = True
+ ignore_errors = original_task.ignore_errors
+ if not ignore_errors:
+ display.debug("marking %s as failed" % original_host.name)
+ if original_task.run_once:
+ # if we're using run_once, we have to fail every host here
+ for h in self._inventory.get_hosts(iterator._play.hosts):
+ if h.name not in self._tqm._unreachable_hosts:
+ state, _ = iterator.get_next_task_for_host(h, peek=True)
+ iterator.mark_host_failed(h)
+ state, new_task = iterator.get_next_task_for_host(h, peek=True)
+ else:
+ iterator.mark_host_failed(original_host)
+
+ # grab the current state and if we're iterating on the rescue portion
+ # of a block then we save the failed task in a special var for use
+ # within the rescue/always
+ state, _ = iterator.get_next_task_for_host(original_host, peek=True)
+
+ if iterator.is_failed(original_host) and state and state.run_state == iterator.ITERATING_COMPLETE:
+ self._tqm._failed_hosts[original_host.name] = True
+
+ # Use of get_active_state() here helps detect proper state if, say, we are in a rescue
+ # block from an included file (include_tasks). In a non-included rescue case, a rescue
+ # that starts with a new 'block' will have an active state of ITERATING_TASKS, so we also
+ # check the current state block tree to see if any blocks are rescuing.
+ if state and (iterator.get_active_state(state).run_state == iterator.ITERATING_RESCUE or
+ iterator.is_any_block_rescuing(state)):
+ self._tqm._stats.increment('rescued', original_host.name)
+ self._variable_manager.set_nonpersistent_facts(
+ original_host.name,
+ dict(
+ ansible_failed_task=original_task.serialize(),
+ ansible_failed_result=task_result._result,
+ ),
+ )
+ else:
+ self._tqm._stats.increment('failures', original_host.name)
+ else:
+ self._tqm._stats.increment('ok', original_host.name)
+ self._tqm._stats.increment('ignored', original_host.name)
+ if 'changed' in task_result._result and task_result._result['changed']:
+ self._tqm._stats.increment('changed', original_host.name)
+ self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=ignore_errors)
+ elif task_result.is_unreachable():
+ ignore_unreachable = original_task.ignore_unreachable
+ if not ignore_unreachable:
+ self._tqm._unreachable_hosts[original_host.name] = True
+ iterator._play._removed_hosts.append(original_host.name)
+ else:
+ self._tqm._stats.increment('skipped', original_host.name)
+ task_result._result['skip_reason'] = 'Host %s is unreachable' % original_host.name
+ self._tqm._stats.increment('dark', original_host.name)
+ self._tqm.send_callback('v2_runner_on_unreachable', task_result)
+ elif task_result.is_skipped():
+ self._tqm._stats.increment('skipped', original_host.name)
+ self._tqm.send_callback('v2_runner_on_skipped', task_result)
+ else:
+ role_ran = True
+
+ if original_task.loop:
+ # this task had a loop, and has more than one result, so
+ # loop over all of them instead of a single result
+ result_items = task_result._result.get('results', [])
+ else:
+ result_items = [task_result._result]
+
+ for result_item in result_items:
+ if '_ansible_notify' in result_item:
+ if task_result.is_changed():
+ # The shared dictionary for notified handlers is a proxy, which
+ # does not detect when sub-objects within the proxy are modified.
+ # So, per the docs, we reassign the list so the proxy picks up and
+ # notifies all other threads
+ for handler_name in result_item['_ansible_notify']:
+ found = False
+ # Find the handler using the above helper. First we look up the
+ # dependency chain of the current task (if it's from a role), otherwise
+ # we just look through the list of handlers in the current play/all
+ # roles and use the first one that matches the notify name
+ target_handler = search_handler_blocks_by_name(handler_name, iterator._play.handlers)
+ if target_handler is not None:
+ found = True
+ if target_handler.notify_host(original_host):
+ self._tqm.send_callback('v2_playbook_on_notify', target_handler, original_host)
+
+ for listening_handler_block in iterator._play.handlers:
+ for listening_handler in listening_handler_block.block:
+ listeners = getattr(listening_handler, 'listen', []) or []
+ if not listeners:
+ continue
+
+ listeners = listening_handler.get_validated_value(
+ 'listen', listening_handler._valid_attrs['listen'], listeners, handler_templar
+ )
+ if handler_name not in listeners:
+ continue
+ else:
+ found = True
+
+ if listening_handler.notify_host(original_host):
+ self._tqm.send_callback('v2_playbook_on_notify', listening_handler, original_host)
+
+ # and if none were found, then we raise an error
+ if not found:
+ msg = ("The requested handler '%s' was not found in either the main handlers list nor in the listening "
+ "handlers list" % handler_name)
+ if C.ERROR_ON_MISSING_HANDLER:
+ raise AnsibleError(msg)
+ else:
+ display.warning(msg)
+
+ if 'add_host' in result_item:
+ # this task added a new host (add_host module)
+ new_host_info = result_item.get('add_host', dict())
+ self._add_host(new_host_info, result_item)
+ post_process_whens(result_item, original_task, handler_templar)
+
+ elif 'add_group' in result_item:
+ # this task added a new group (group_by module)
+ self._add_group(original_host, result_item)
+ post_process_whens(result_item, original_task, handler_templar)
+
+ if 'ansible_facts' in result_item:
+ # if delegated fact and we are delegating facts, we need to change target host for them
+ if original_task.delegate_to is not None and original_task.delegate_facts:
+ host_list = self.get_delegated_hosts(result_item, original_task)
+ else:
+ # Set facts that should always be on the delegated hosts
+ self._set_always_delegated_facts(result_item, original_task)
+
+ host_list = self.get_task_hosts(iterator, original_host, original_task)
+
+ if original_task.action in C._ACTION_INCLUDE_VARS:
+ for (var_name, var_value) in iteritems(result_item['ansible_facts']):
+ # find the host we're actually referring too here, which may
+ # be a host that is not really in inventory at all
+ for target_host in host_list:
+ self._variable_manager.set_host_variable(target_host, var_name, var_value)
+ else:
+ cacheable = result_item.pop('_ansible_facts_cacheable', False)
+ for target_host in host_list:
+ # so set_fact is a misnomer but 'cacheable = true' was meant to create an 'actual fact'
+ # to avoid issues with precedence and confusion with set_fact normal operation,
+ # we set BOTH fact and nonpersistent_facts (aka hostvar)
+ # when fact is retrieved from cache in subsequent operations it will have the lower precedence,
+ # but for playbook setting it the 'higher' precedence is kept
+ is_set_fact = original_task.action in C._ACTION_SET_FACT
+ if not is_set_fact or cacheable:
+ self._variable_manager.set_host_facts(target_host, result_item['ansible_facts'].copy())
+ if is_set_fact:
+ self._variable_manager.set_nonpersistent_facts(target_host, result_item['ansible_facts'].copy())
+
+ if 'ansible_stats' in result_item and 'data' in result_item['ansible_stats'] and result_item['ansible_stats']['data']:
+
+ if 'per_host' not in result_item['ansible_stats'] or result_item['ansible_stats']['per_host']:
+ host_list = self.get_task_hosts(iterator, original_host, original_task)
+ else:
+ host_list = [None]
+
+ data = result_item['ansible_stats']['data']
+ aggregate = 'aggregate' in result_item['ansible_stats'] and result_item['ansible_stats']['aggregate']
+ for myhost in host_list:
+ for k in data.keys():
+ if aggregate:
+ self._tqm._stats.update_custom_stats(k, data[k], myhost)
+ else:
+ self._tqm._stats.set_custom_stats(k, data[k], myhost)
+
+ if 'diff' in task_result._result:
+ if self._diff or getattr(original_task, 'diff', False):
+ self._tqm.send_callback('v2_on_file_diff', task_result)
+
+ if not isinstance(original_task, TaskInclude):
+ self._tqm._stats.increment('ok', original_host.name)
+ if 'changed' in task_result._result and task_result._result['changed']:
+ self._tqm._stats.increment('changed', original_host.name)
+
+ # finally, send the ok for this task
+ self._tqm.send_callback('v2_runner_on_ok', task_result)
+
+ # register final results
+ if original_task.register:
+ host_list = self.get_task_hosts(iterator, original_host, original_task)
+
+ clean_copy = strip_internal_keys(module_response_deepcopy(task_result._result))
+ if 'invocation' in clean_copy:
+ del clean_copy['invocation']
+
+ for target_host in host_list:
+ self._variable_manager.set_nonpersistent_facts(target_host, {original_task.register: clean_copy})
+
+ if do_handlers:
+ self._pending_handler_results -= 1
+ else:
+ self._pending_results -= 1
+ if original_host.name in self._blocked_hosts:
+ del self._blocked_hosts[original_host.name]
+
+ # If this is a role task, mark the parent role as being run (if
+ # the task was ok or failed, but not skipped or unreachable)
+ if original_task._role is not None and role_ran: # TODO: and original_task.action not in C._ACTION_INCLUDE_ROLE:?
+ # lookup the role in the ROLE_CACHE to make sure we're dealing
+ # with the correct object and mark it as executed
+ for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[original_task._role.get_name()]):
+ if role_obj._uuid == original_task._role._uuid:
+ role_obj._had_task_run[original_host.name] = True
+
+ ret_results.append(task_result)
+
+ if one_pass or max_passes is not None and (cur_pass + 1) >= max_passes:
+ break
+
+ cur_pass += 1
+
+ return ret_results
+
+ def _wait_on_handler_results(self, iterator, handler, notified_hosts):
+ '''
+ Wait for the handler tasks to complete, using a short sleep
+ between checks to ensure we don't spin lock
+ '''
+
+ ret_results = []
+ handler_results = 0
+
+ display.debug("waiting for handler results...")
+ while (self._pending_handler_results > 0 and
+ handler_results < len(notified_hosts) and
+ not self._tqm._terminated):
+
+ if self._tqm.has_dead_workers():
+ raise AnsibleError("A worker was found in a dead state")
+
+ results = self._process_pending_results(iterator, do_handlers=True)
+ ret_results.extend(results)
+ handler_results += len([
+ r._host for r in results if r._host in notified_hosts and
+ r.task_name == handler.name])
+ if self._pending_handler_results > 0:
+ time.sleep(C.DEFAULT_INTERNAL_POLL_INTERVAL)
+
+ display.debug("no more pending handlers, returning what we have")
+
+ return ret_results
+
+ def _wait_on_pending_results(self, iterator):
+ '''
+ Wait for the shared counter to drop to zero, using a short sleep
+ between checks to ensure we don't spin lock
+ '''
+
+ ret_results = []
+
+ display.debug("waiting for pending results...")
+ while self._pending_results > 0 and not self._tqm._terminated:
+
+ if self._tqm.has_dead_workers():
+ raise AnsibleError("A worker was found in a dead state")
+
+ results = self._process_pending_results(iterator)
+ ret_results.extend(results)
+ if self._pending_results > 0:
+ time.sleep(C.DEFAULT_INTERNAL_POLL_INTERVAL)
+
+ display.debug("no more pending results, returning what we have")
+
+ return ret_results
+
+ def _add_host(self, host_info, result_item):
+ '''
+ Helper function to add a new host to inventory based on a task result.
+ '''
+
+ changed = False
+
+ if host_info:
+ host_name = host_info.get('host_name')
+
+ # Check if host in inventory, add if not
+ if host_name not in self._inventory.hosts:
+ self._inventory.add_host(host_name, 'all')
+ self._hosts_cache_all.append(host_name)
+ changed = True
+ new_host = self._inventory.hosts.get(host_name)
+
+ # Set/update the vars for this host
+ new_host_vars = new_host.get_vars()
+ new_host_combined_vars = combine_vars(new_host_vars, host_info.get('host_vars', dict()))
+ if new_host_vars != new_host_combined_vars:
+ new_host.vars = new_host_combined_vars
+ changed = True
+
+ new_groups = host_info.get('groups', [])
+ for group_name in new_groups:
+ if group_name not in self._inventory.groups:
+ group_name = self._inventory.add_group(group_name)
+ changed = True
+ new_group = self._inventory.groups[group_name]
+ if new_group.add_host(self._inventory.hosts[host_name]):
+ changed = True
+
+ # reconcile inventory, ensures inventory rules are followed
+ if changed:
+ self._inventory.reconcile_inventory()
+
+ result_item['changed'] = changed
+
+ def _add_group(self, host, result_item):
+ '''
+ Helper function to add a group (if it does not exist), and to assign the
+ specified host to that group.
+ '''
+
+ changed = False
+
+ # the host here is from the executor side, which means it was a
+ # serialized/cloned copy and we'll need to look up the proper
+ # host object from the master inventory
+ real_host = self._inventory.hosts.get(host.name)
+ if real_host is None:
+ if host.name == self._inventory.localhost.name:
+ real_host = self._inventory.localhost
+ else:
+ raise AnsibleError('%s cannot be matched in inventory' % host.name)
+ group_name = result_item.get('add_group')
+ parent_group_names = result_item.get('parent_groups', [])
+
+ if group_name not in self._inventory.groups:
+ group_name = self._inventory.add_group(group_name)
+
+ for name in parent_group_names:
+ if name not in self._inventory.groups:
+ # create the new group and add it to inventory
+ self._inventory.add_group(name)
+ changed = True
+
+ group = self._inventory.groups[group_name]
+ for parent_group_name in parent_group_names:
+ parent_group = self._inventory.groups[parent_group_name]
+ new = parent_group.add_child_group(group)
+ if new and not changed:
+ changed = True
+
+ if real_host not in group.get_hosts():
+ changed = group.add_host(real_host)
+
+ if group not in real_host.get_groups():
+ changed = real_host.add_group(group)
+
+ if changed:
+ self._inventory.reconcile_inventory()
+
+ result_item['changed'] = changed
+
+ def _copy_included_file(self, included_file):
+ '''
+ A proven safe and performant way to create a copy of an included file
+ '''
+ ti_copy = included_file._task.copy(exclude_parent=True)
+ ti_copy._parent = included_file._task._parent
+
+ temp_vars = ti_copy.vars.copy()
+ temp_vars.update(included_file._vars)
+
+ ti_copy.vars = temp_vars
+
+ return ti_copy
+
+ def _load_included_file(self, included_file, iterator, is_handler=False):
+ '''
+ Loads an included YAML file of tasks, applying the optional set of variables.
+ '''
+
+ display.debug("loading included file: %s" % included_file._filename)
+ try:
+ data = self._loader.load_from_file(included_file._filename)
+ if data is None:
+ return []
+ elif not isinstance(data, list):
+ raise AnsibleError("included task files must contain a list of tasks")
+
+ ti_copy = self._copy_included_file(included_file)
+ # pop tags out of the include args, if they were specified there, and assign
+ # them to the include. If the include already had tags specified, we raise an
+ # error so that users know not to specify them both ways
+ tags = included_file._task.vars.pop('tags', [])
+ if isinstance(tags, string_types):
+ tags = tags.split(',')
+ if len(tags) > 0:
+ if len(included_file._task.tags) > 0:
+ raise AnsibleParserError("Include tasks should not specify tags in more than one way (both via args and directly on the task). "
+ "Mixing tag specify styles is prohibited for whole import hierarchy, not only for single import statement",
+ obj=included_file._task._ds)
+ display.deprecated("You should not specify tags in the include parameters. All tags should be specified using the task-level option",
+ version='2.12', collection_name='ansible.builtin')
+ included_file._task.tags = tags
+
+ block_list = load_list_of_blocks(
+ data,
+ play=iterator._play,
+ parent_block=ti_copy.build_parent_block(),
+ role=included_file._task._role,
+ use_handlers=is_handler,
+ loader=self._loader,
+ variable_manager=self._variable_manager,
+ )
+
+ # since we skip incrementing the stats when the task result is
+ # first processed, we do so now for each host in the list
+ for host in included_file._hosts:
+ self._tqm._stats.increment('ok', host.name)
+
+ except AnsibleError as e:
+ if isinstance(e, AnsibleFileNotFound):
+ reason = "Could not find or access '%s' on the Ansible Controller." % to_text(e.file_name)
+ else:
+ reason = to_text(e)
+
+ # mark all of the hosts including this file as failed, send callbacks,
+ # and increment the stats for this host
+ for host in included_file._hosts:
+ tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=reason))
+ iterator.mark_host_failed(host)
+ self._tqm._failed_hosts[host.name] = True
+ self._tqm._stats.increment('failures', host.name)
+ self._tqm.send_callback('v2_runner_on_failed', tr)
+ return []
+
+ # finally, send the callback and return the list of blocks loaded
+ self._tqm.send_callback('v2_playbook_on_include', included_file)
+ display.debug("done processing included file")
+ return block_list
+
+ def run_handlers(self, iterator, play_context):
+ '''
+ Runs handlers on those hosts which have been notified.
+ '''
+
+ result = self._tqm.RUN_OK
+
+ for handler_block in iterator._play.handlers:
+ # FIXME: handlers need to support the rescue/always portions of blocks too,
+ # but this may take some work in the iterator and gets tricky when
+ # we consider the ability of meta tasks to flush handlers
+ for handler in handler_block.block:
+ if handler.notified_hosts:
+ result = self._do_handler_run(handler, handler.get_name(), iterator=iterator, play_context=play_context)
+ if not result:
+ break
+ return result
+
+ def _do_handler_run(self, handler, handler_name, iterator, play_context, notified_hosts=None):
+
+ # FIXME: need to use iterator.get_failed_hosts() instead?
+ # if not len(self.get_hosts_remaining(iterator._play)):
+ # self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
+ # result = False
+ # break
+ if notified_hosts is None:
+ notified_hosts = handler.notified_hosts[:]
+
+ # strategy plugins that filter hosts need access to the iterator to identify failed hosts
+ failed_hosts = self._filter_notified_failed_hosts(iterator, notified_hosts)
+ notified_hosts = self._filter_notified_hosts(notified_hosts)
+ notified_hosts += failed_hosts
+
+ if len(notified_hosts) > 0:
+ saved_name = handler.name
+ handler.name = handler_name
+ self._tqm.send_callback('v2_playbook_on_handler_task_start', handler)
+ handler.name = saved_name
+
+ bypass_host_loop = False
+ try:
+ action = plugin_loader.action_loader.get(handler.action, class_only=True, collection_list=handler.collections)
+ if getattr(action, 'BYPASS_HOST_LOOP', False):
+ bypass_host_loop = True
+ except KeyError:
+ # we don't care here, because the action may simply not have a
+ # corresponding action plugin
+ pass
+
+ host_results = []
+ for host in notified_hosts:
+ if not iterator.is_failed(host) or iterator._play.force_handlers:
+ task_vars = self._variable_manager.get_vars(play=iterator._play, host=host, task=handler,
+ _hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all)
+ self.add_tqm_variables(task_vars, play=iterator._play)
+ templar = Templar(loader=self._loader, variables=task_vars)
+ if not handler.cached_name:
+ handler.name = templar.template(handler.name)
+ handler.cached_name = True
+
+ self._queue_task(host, handler, task_vars, play_context)
+
+ if templar.template(handler.run_once) or bypass_host_loop:
+ break
+
+ # collect the results from the handler run
+ host_results = self._wait_on_handler_results(iterator, handler, notified_hosts)
+
+ included_files = IncludedFile.process_include_results(
+ host_results,
+ iterator=iterator,
+ loader=self._loader,
+ variable_manager=self._variable_manager
+ )
+
+ result = True
+ if len(included_files) > 0:
+ for included_file in included_files:
+ try:
+ new_blocks = self._load_included_file(included_file, iterator=iterator, is_handler=True)
+ # for every task in each block brought in by the include, add the list
+ # of hosts which included the file to the notified_handlers dict
+ for block in new_blocks:
+ iterator._play.handlers.append(block)
+ for task in block.block:
+ task_name = task.get_name()
+ display.debug("adding task '%s' included in handler '%s'" % (task_name, handler_name))
+ task.notified_hosts = included_file._hosts[:]
+ result = self._do_handler_run(
+ handler=task,
+ handler_name=task_name,
+ iterator=iterator,
+ play_context=play_context,
+ notified_hosts=included_file._hosts[:],
+ )
+ if not result:
+ break
+ except AnsibleError as e:
+ for host in included_file._hosts:
+ iterator.mark_host_failed(host)
+ self._tqm._failed_hosts[host.name] = True
+ display.warning(to_text(e))
+ continue
+
+ # remove hosts from notification list
+ handler.notified_hosts = [
+ h for h in handler.notified_hosts
+ if h not in notified_hosts]
+ display.debug("done running handlers, result is: %s" % result)
+ return result
+
+ def _filter_notified_failed_hosts(self, iterator, notified_hosts):
+ return []
+
+ def _filter_notified_hosts(self, notified_hosts):
+ '''
+ Filter notified hosts accordingly to strategy
+ '''
+
+ # As main strategy is linear, we do not filter hosts
+ # We return a copy to avoid race conditions
+ return notified_hosts[:]
+
+ def _take_step(self, task, host=None):
+
+ ret = False
+ msg = u'Perform task: %s ' % task
+ if host:
+ msg += u'on %s ' % host
+ msg += u'(N)o/(y)es/(c)ontinue: '
+ resp = display.prompt(msg)
+
+ if resp.lower() in ['y', 'yes']:
+ display.debug("User ran task")
+ ret = True
+ elif resp.lower() in ['c', 'continue']:
+ display.debug("User ran task and canceled step mode")
+ self._step = False
+ ret = True
+ else:
+ display.debug("User skipped task")
+
+ display.banner(msg)
+
+ return ret
+
+ def _cond_not_supported_warn(self, task_name):
+ display.warning("%s task does not support when conditional" % task_name)
+
+ def _execute_meta(self, task, play_context, iterator, target_host):
+
+ # meta tasks store their args in the _raw_params field of args,
+ # since they do not use k=v pairs, so get that
+ meta_action = task.args.get('_raw_params')
+
+ def _evaluate_conditional(h):
+ all_vars = self._variable_manager.get_vars(play=iterator._play, host=h, task=task,
+ _hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all)
+ templar = Templar(loader=self._loader, variables=all_vars)
+ return task.evaluate_conditional(templar, all_vars)
+
+ skipped = False
+ msg = ''
+ # The top-level conditions should only compare meta_action
+ if meta_action == 'noop':
+ # FIXME: issue a callback for the noop here?
+ if task.when:
+ self._cond_not_supported_warn(meta_action)
+ msg = "noop"
+ elif meta_action == 'flush_handlers':
+ if task.when:
+ self._cond_not_supported_warn(meta_action)
+ self._flushed_hosts[target_host] = True
+ self.run_handlers(iterator, play_context)
+ self._flushed_hosts[target_host] = False
+ msg = "ran handlers"
+ elif meta_action == 'refresh_inventory':
+ if task.when:
+ self._cond_not_supported_warn(meta_action)
+ self._inventory.refresh_inventory()
+ self._set_hosts_cache(iterator._play)
+ msg = "inventory successfully refreshed"
+ elif meta_action == 'clear_facts':
+ if _evaluate_conditional(target_host):
+ for host in self._inventory.get_hosts(iterator._play.hosts):
+ hostname = host.get_name()
+ self._variable_manager.clear_facts(hostname)
+ msg = "facts cleared"
+ else:
+ skipped = True
+ elif meta_action == 'clear_host_errors':
+ if _evaluate_conditional(target_host):
+ for host in self._inventory.get_hosts(iterator._play.hosts):
+ self._tqm._failed_hosts.pop(host.name, False)
+ self._tqm._unreachable_hosts.pop(host.name, False)
+ iterator._host_states[host.name].fail_state = iterator.FAILED_NONE
+ msg = "cleared host errors"
+ else:
+ skipped = True
+ elif meta_action == 'end_play':
+ if _evaluate_conditional(target_host):
+ for host in self._inventory.get_hosts(iterator._play.hosts):
+ if host.name not in self._tqm._unreachable_hosts:
+ iterator._host_states[host.name].run_state = iterator.ITERATING_COMPLETE
+ msg = "ending play"
+ elif meta_action == 'end_host':
+ if _evaluate_conditional(target_host):
+ iterator._host_states[target_host.name].run_state = iterator.ITERATING_COMPLETE
+ iterator._play._removed_hosts.append(target_host.name)
+ msg = "ending play for %s" % target_host.name
+ else:
+ skipped = True
+ msg = "end_host conditional evaluated to false, continuing execution for %s" % target_host.name
+ elif meta_action == 'reset_connection':
+ all_vars = self._variable_manager.get_vars(play=iterator._play, host=target_host, task=task,
+ _hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all)
+ templar = Templar(loader=self._loader, variables=all_vars)
+
+ # apply the given task's information to the connection info,
+ # which may override some fields already set by the play or
+ # the options specified on the command line
+ play_context = play_context.set_task_and_variable_override(task=task, variables=all_vars, templar=templar)
+
+ # fields set from the play/task may be based on variables, so we have to
+ # do the same kind of post validation step on it here before we use it.
+ play_context.post_validate(templar=templar)
+
+ # now that the play context is finalized, if the remote_addr is not set
+ # default to using the host's address field as the remote address
+ if not play_context.remote_addr:
+ play_context.remote_addr = target_host.address
+
+ # We also add "magic" variables back into the variables dict to make sure
+ # a certain subset of variables exist.
+ play_context.update_vars(all_vars)
+
+ if task.when:
+ self._cond_not_supported_warn(meta_action)
+
+ if target_host in self._active_connections:
+ connection = Connection(self._active_connections[target_host])
+ del self._active_connections[target_host]
+ else:
+ connection = plugin_loader.connection_loader.get(play_context.connection, play_context, os.devnull)
+ play_context.set_attributes_from_plugin(connection)
+
+ if connection:
+ try:
+ connection.reset()
+ msg = 'reset connection'
+ except ConnectionError as e:
+ # most likely socket is already closed
+ display.debug("got an error while closing persistent connection: %s" % e)
+ else:
+ msg = 'no connection, nothing to reset'
+ else:
+ raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
+
+ result = {'msg': msg}
+ if skipped:
+ result['skipped'] = True
+ else:
+ result['changed'] = False
+
+ display.vv("META: %s" % msg)
+
+ return [TaskResult(target_host, task, result)]
+
+ def get_hosts_left(self, iterator):
+ ''' returns list of available hosts for this iterator by filtering out unreachables '''
+
+ hosts_left = []
+ for host in self._hosts_cache:
+ if host not in self._tqm._unreachable_hosts:
+ try:
+ hosts_left.append(self._inventory.hosts[host])
+ except KeyError:
+ hosts_left.append(self._inventory.get_host(host))
+ return hosts_left
+
+ def update_active_connections(self, results):
+ ''' updates the current active persistent connections '''
+ for r in results:
+ if 'args' in r._task_fields:
+ socket_path = r._task_fields['args'].get('_ansible_socket')
+ if socket_path:
+ if r._host not in self._active_connections:
+ self._active_connections[r._host] = socket_path
+
+
+class NextAction(object):
+ """ The next action after an interpreter's exit. """
+ REDO = 1
+ CONTINUE = 2
+ EXIT = 3
+
+ def __init__(self, result=EXIT):
+ self.result = result
+
+
+class Debugger(cmd.Cmd):
+ prompt_continuous = '> ' # multiple lines
+
+ def __init__(self, task, host, task_vars, play_context, result, next_action):
+ # cmd.Cmd is old-style class
+ cmd.Cmd.__init__(self)
+
+ self.prompt = '[%s] %s (debug)> ' % (host, task)
+ self.intro = None
+ self.scope = {}
+ self.scope['task'] = task
+ self.scope['task_vars'] = task_vars
+ self.scope['host'] = host
+ self.scope['play_context'] = play_context
+ self.scope['result'] = result
+ self.next_action = next_action
+
+ def cmdloop(self):
+ try:
+ cmd.Cmd.cmdloop(self)
+ except KeyboardInterrupt:
+ pass
+
+ do_h = cmd.Cmd.do_help
+
+ def do_EOF(self, args):
+ """Quit"""
+ return self.do_quit(args)
+
+ def do_quit(self, args):
+ """Quit"""
+ display.display('User interrupted execution')
+ self.next_action.result = NextAction.EXIT
+ return True
+
+ do_q = do_quit
+
+ def do_continue(self, args):
+ """Continue to next result"""
+ self.next_action.result = NextAction.CONTINUE
+ return True
+
+ do_c = do_continue
+
+ def do_redo(self, args):
+ """Schedule task for re-execution. The re-execution may not be the next result"""
+ self.next_action.result = NextAction.REDO
+ return True
+
+ do_r = do_redo
+
+ def do_update_task(self, args):
+ """Recreate the task from ``task._ds``, and template with updated ``task_vars``"""
+ templar = Templar(None, shared_loader_obj=None, variables=self.scope['task_vars'])
+ task = self.scope['task']
+ task = task.load_data(task._ds)
+ task.post_validate(templar)
+ self.scope['task'] = task
+
+ do_u = do_update_task
+
+ def evaluate(self, args):
+ try:
+ return eval(args, globals(), self.scope)
+ except Exception:
+ t, v = sys.exc_info()[:2]
+ if isinstance(t, str):
+ exc_type_name = t
+ else:
+ exc_type_name = t.__name__
+ display.display('***%s:%s' % (exc_type_name, repr(v)))
+ raise
+
+ def do_pprint(self, args):
+ """Pretty Print"""
+ try:
+ result = self.evaluate(args)
+ display.display(pprint.pformat(result))
+ except Exception:
+ pass
+
+ do_p = do_pprint
+
+ def execute(self, args):
+ try:
+ code = compile(args + '\n', '<stdin>', 'single')
+ exec(code, globals(), self.scope)
+ except Exception:
+ t, v = sys.exc_info()[:2]
+ if isinstance(t, str):
+ exc_type_name = t
+ else:
+ exc_type_name = t.__name__
+ display.display('***%s:%s' % (exc_type_name, repr(v)))
+ raise
+
+ def default(self, line):
+ try:
+ self.execute(line)
+ except Exception:
+ pass
diff --git a/lib/ansible/plugins/strategy/debug.py b/lib/ansible/plugins/strategy/debug.py
new file mode 100644
index 00000000..1b23c7df
--- /dev/null
+++ b/lib/ansible/plugins/strategy/debug.py
@@ -0,0 +1,37 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ strategy: debug
+ short_description: Executes tasks in interactive debug session.
+ description:
+ - Task execution is 'linear' but controlled by an interactive debug session.
+ version_added: "2.1"
+ author: Kishin Yagami (!UNKNOWN)
+'''
+
+import cmd
+import pprint
+import sys
+
+from ansible.plugins.strategy.linear import StrategyModule as LinearStrategyModule
+
+
+class StrategyModule(LinearStrategyModule):
+ def __init__(self, tqm):
+ super(StrategyModule, self).__init__(tqm)
+ self.debugger_active = True
diff --git a/lib/ansible/plugins/strategy/free.py b/lib/ansible/plugins/strategy/free.py
new file mode 100644
index 00000000..77988087
--- /dev/null
+++ b/lib/ansible/plugins/strategy/free.py
@@ -0,0 +1,284 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ strategy: free
+ short_description: Executes tasks without waiting for all hosts
+ description:
+ - Task execution is as fast as possible per batch as defined by C(serial) (default all).
+ Ansible will not wait for other hosts to finish the current task before queuing more tasks for other hosts.
+ All hosts are still attempted for the current task, but it prevents blocking new tasks for hosts that have already finished.
+ - With the free strategy, unlike the default linear strategy, a host that is slow or stuck on a specific task
+ won't hold up the rest of the hosts and tasks.
+ version_added: "2.0"
+ author: Ansible Core Team
+'''
+
+import time
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.playbook.included_file import IncludedFile
+from ansible.plugins.loader import action_loader
+from ansible.plugins.strategy import StrategyBase
+from ansible.template import Templar
+from ansible.module_utils._text import to_text
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class StrategyModule(StrategyBase):
+
+ # This strategy manages throttling on its own, so we don't want it done in queue_task
+ ALLOW_BASE_THROTTLING = False
+
+ def _filter_notified_failed_hosts(self, iterator, notified_hosts):
+
+ # If --force-handlers is used we may act on hosts that have failed
+ return [host for host in notified_hosts if iterator.is_failed(host)]
+
+ def _filter_notified_hosts(self, notified_hosts):
+ '''
+ Filter notified hosts accordingly to strategy
+ '''
+
+ # We act only on hosts that are ready to flush handlers
+ return [host for host in notified_hosts
+ if host in self._flushed_hosts and self._flushed_hosts[host]]
+
+ def __init__(self, tqm):
+ super(StrategyModule, self).__init__(tqm)
+ self._host_pinned = False
+
+ def run(self, iterator, play_context):
+ '''
+ The "free" strategy is a bit more complex, in that it allows tasks to
+ be sent to hosts as quickly as they can be processed. This means that
+ some hosts may finish very quickly if run tasks result in little or no
+ work being done versus other systems.
+
+ The algorithm used here also tries to be more "fair" when iterating
+ through hosts by remembering the last host in the list to be given a task
+ and starting the search from there as opposed to the top of the hosts
+ list again, which would end up favoring hosts near the beginning of the
+ list.
+ '''
+
+ # the last host to be given a task
+ last_host = 0
+
+ result = self._tqm.RUN_OK
+
+ # start with all workers being counted as being free
+ workers_free = len(self._workers)
+
+ self._set_hosts_cache(iterator._play)
+
+ work_to_do = True
+ while work_to_do and not self._tqm._terminated:
+
+ hosts_left = self.get_hosts_left(iterator)
+
+ if len(hosts_left) == 0:
+ self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
+ result = False
+ break
+
+ work_to_do = False # assume we have no more work to do
+ starting_host = last_host # save current position so we know when we've looped back around and need to break
+
+ # try and find an unblocked host with a task to run
+ host_results = []
+ while True:
+ host = hosts_left[last_host]
+ display.debug("next free host: %s" % host)
+ host_name = host.get_name()
+
+ # peek at the next task for the host, to see if there's
+ # anything to do do for this host
+ (state, task) = iterator.get_next_task_for_host(host, peek=True)
+ display.debug("free host state: %s" % state, host=host_name)
+ display.debug("free host task: %s" % task, host=host_name)
+ if host_name not in self._tqm._unreachable_hosts and task:
+
+ # set the flag so the outer loop knows we've still found
+ # some work which needs to be done
+ work_to_do = True
+
+ display.debug("this host has work to do", host=host_name)
+
+ # check to see if this host is blocked (still executing a previous task)
+ if (host_name not in self._blocked_hosts or not self._blocked_hosts[host_name]):
+
+ display.debug("getting variables", host=host_name)
+ task_vars = self._variable_manager.get_vars(play=iterator._play, host=host, task=task,
+ _hosts=self._hosts_cache,
+ _hosts_all=self._hosts_cache_all)
+ self.add_tqm_variables(task_vars, play=iterator._play)
+ templar = Templar(loader=self._loader, variables=task_vars)
+ display.debug("done getting variables", host=host_name)
+
+ try:
+ throttle = int(templar.template(task.throttle))
+ except Exception as e:
+ raise AnsibleError("Failed to convert the throttle value to an integer.", obj=task._ds, orig_exc=e)
+
+ if throttle > 0:
+ same_tasks = 0
+ for worker in self._workers:
+ if worker and worker.is_alive() and worker._task._uuid == task._uuid:
+ same_tasks += 1
+
+ display.debug("task: %s, same_tasks: %d" % (task.get_name(), same_tasks))
+ if same_tasks >= throttle:
+ break
+
+ # pop the task, mark the host blocked, and queue it
+ self._blocked_hosts[host_name] = True
+ (state, task) = iterator.get_next_task_for_host(host)
+
+ try:
+ action = action_loader.get(task.action, class_only=True, collection_list=task.collections)
+ except KeyError:
+ # we don't care here, because the action may simply not have a
+ # corresponding action plugin
+ action = None
+
+ try:
+ task.name = to_text(templar.template(task.name, fail_on_undefined=False), nonstring='empty')
+ display.debug("done templating", host=host_name)
+ except Exception:
+ # just ignore any errors during task name templating,
+ # we don't care if it just shows the raw name
+ display.debug("templating failed for some reason", host=host_name)
+
+ run_once = templar.template(task.run_once) or action and getattr(action, 'BYPASS_HOST_LOOP', False)
+ if run_once:
+ if action and getattr(action, 'BYPASS_HOST_LOOP', False):
+ raise AnsibleError("The '%s' module bypasses the host loop, which is currently not supported in the free strategy "
+ "and would instead execute for every host in the inventory list." % task.action, obj=task._ds)
+ else:
+ display.warning("Using run_once with the free strategy is not currently supported. This task will still be "
+ "executed for every host in the inventory list.")
+
+ # check to see if this task should be skipped, due to it being a member of a
+ # role which has already run (and whether that role allows duplicate execution)
+ if task._role and task._role.has_run(host):
+ # If there is no metadata, the default behavior is to not allow duplicates,
+ # if there is metadata, check to see if the allow_duplicates flag was set to true
+ if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
+ display.debug("'%s' skipped because role has already run" % task, host=host_name)
+ del self._blocked_hosts[host_name]
+ continue
+
+ if task.action in C._ACTION_META:
+ self._execute_meta(task, play_context, iterator, target_host=host)
+ self._blocked_hosts[host_name] = False
+ else:
+ # handle step if needed, skip meta actions as they are used internally
+ if not self._step or self._take_step(task, host_name):
+ if task.any_errors_fatal:
+ display.warning("Using any_errors_fatal with the free strategy is not supported, "
+ "as tasks are executed independently on each host")
+ self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
+ self._queue_task(host, task, task_vars, play_context)
+ # each task is counted as a worker being busy
+ workers_free -= 1
+ del task_vars
+ else:
+ display.debug("%s is blocked, skipping for now" % host_name)
+
+ # all workers have tasks to do (and the current host isn't done with the play).
+ # loop back to starting host and break out
+ if self._host_pinned and workers_free == 0 and work_to_do:
+ last_host = starting_host
+ break
+
+ # move on to the next host and make sure we
+ # haven't gone past the end of our hosts list
+ last_host += 1
+ if last_host > len(hosts_left) - 1:
+ last_host = 0
+
+ # if we've looped around back to the start, break out
+ if last_host == starting_host:
+ break
+
+ results = self._process_pending_results(iterator)
+ host_results.extend(results)
+
+ # each result is counted as a worker being free again
+ workers_free += len(results)
+
+ self.update_active_connections(results)
+
+ included_files = IncludedFile.process_include_results(
+ host_results,
+ iterator=iterator,
+ loader=self._loader,
+ variable_manager=self._variable_manager
+ )
+
+ if len(included_files) > 0:
+ all_blocks = dict((host, []) for host in hosts_left)
+ for included_file in included_files:
+ display.debug("collecting new blocks for %s" % included_file)
+ try:
+ if included_file._is_role:
+ new_ir = self._copy_included_file(included_file)
+
+ new_blocks, handler_blocks = new_ir.get_block_list(
+ play=iterator._play,
+ variable_manager=self._variable_manager,
+ loader=self._loader,
+ )
+ else:
+ new_blocks = self._load_included_file(included_file, iterator=iterator)
+ except AnsibleError as e:
+ for host in included_file._hosts:
+ iterator.mark_host_failed(host)
+ display.warning(to_text(e))
+ continue
+
+ for new_block in new_blocks:
+ task_vars = self._variable_manager.get_vars(play=iterator._play, task=new_block._parent,
+ _hosts=self._hosts_cache,
+ _hosts_all=self._hosts_cache_all)
+ final_block = new_block.filter_tagged_tasks(task_vars)
+ for host in hosts_left:
+ if host in included_file._hosts:
+ all_blocks[host].append(final_block)
+ display.debug("done collecting new blocks for %s" % included_file)
+
+ display.debug("adding all collected blocks from %d included file(s) to iterator" % len(included_files))
+ for host in hosts_left:
+ iterator.add_tasks(host, all_blocks[host])
+ display.debug("done adding collected blocks to iterator")
+
+ # pause briefly so we don't spin lock
+ time.sleep(C.DEFAULT_INTERNAL_POLL_INTERVAL)
+
+ # collect all the final results
+ results = self._wait_on_pending_results(iterator)
+
+ # run the base class run() method, which executes the cleanup function
+ # and runs any outstanding handlers which have been triggered
+ return super(StrategyModule, self).run(iterator, play_context, result)
diff --git a/lib/ansible/plugins/strategy/host_pinned.py b/lib/ansible/plugins/strategy/host_pinned.py
new file mode 100644
index 00000000..ba293d36
--- /dev/null
+++ b/lib/ansible/plugins/strategy/host_pinned.py
@@ -0,0 +1,45 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ strategy: host_pinned
+ short_description: Executes tasks on each host without interruption
+ description:
+ - Task execution is as fast as possible per host in batch as defined by C(serial) (default all).
+ Ansible will not start a play for a host unless the play can be finished without interruption by tasks for another host,
+ i.e. the number of hosts with an active play does not exceed the number of forks.
+ Ansible will not wait for other hosts to finish the current task before queuing the next task for a host that has finished.
+ Once a host is done with the play, it opens it's slot to a new host that was waiting to start.
+ Other than that, it behaves just like the "free" strategy.
+ version_added: "2.7"
+ author: Ansible Core Team
+'''
+
+from ansible.plugins.strategy.free import StrategyModule as FreeStrategyModule
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class StrategyModule(FreeStrategyModule):
+
+ def __init__(self, tqm):
+ super(StrategyModule, self).__init__(tqm)
+ self._host_pinned = True
diff --git a/lib/ansible/plugins/strategy/linear.py b/lib/ansible/plugins/strategy/linear.py
new file mode 100644
index 00000000..1810757d
--- /dev/null
+++ b/lib/ansible/plugins/strategy/linear.py
@@ -0,0 +1,461 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ strategy: linear
+ short_description: Executes tasks in a linear fashion
+ description:
+ - Task execution is in lockstep per host batch as defined by C(serial) (default all).
+ Up to the fork limit of hosts will execute each task at the same time and then
+ the next series of hosts until the batch is done, before going on to the next task.
+ version_added: "2.0"
+ notes:
+ - This was the default Ansible behaviour before 'strategy plugins' were introduced in 2.0.
+ author: Ansible Core Team
+'''
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleAssertionError
+from ansible.executor.play_iterator import PlayIterator
+from ansible.module_utils.six import iteritems
+from ansible.module_utils._text import to_text
+from ansible.playbook.block import Block
+from ansible.playbook.included_file import IncludedFile
+from ansible.playbook.task import Task
+from ansible.plugins.loader import action_loader
+from ansible.plugins.strategy import StrategyBase
+from ansible.template import Templar
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class StrategyModule(StrategyBase):
+
+ noop_task = None
+
+ def _replace_with_noop(self, target):
+ if self.noop_task is None:
+ raise AnsibleAssertionError('strategy.linear.StrategyModule.noop_task is None, need Task()')
+
+ result = []
+ for el in target:
+ if isinstance(el, Task):
+ result.append(self.noop_task)
+ elif isinstance(el, Block):
+ result.append(self._create_noop_block_from(el, el._parent))
+ return result
+
+ def _create_noop_block_from(self, original_block, parent):
+ noop_block = Block(parent_block=parent)
+ noop_block.block = self._replace_with_noop(original_block.block)
+ noop_block.always = self._replace_with_noop(original_block.always)
+ noop_block.rescue = self._replace_with_noop(original_block.rescue)
+
+ return noop_block
+
+ def _prepare_and_create_noop_block_from(self, original_block, parent, iterator):
+ self.noop_task = Task()
+ self.noop_task.action = 'meta'
+ self.noop_task.args['_raw_params'] = 'noop'
+ self.noop_task.set_loader(iterator._play._loader)
+
+ return self._create_noop_block_from(original_block, parent)
+
+ def _get_next_task_lockstep(self, hosts, iterator):
+ '''
+ Returns a list of (host, task) tuples, where the task may
+ be a noop task to keep the iterator in lock step across
+ all hosts.
+ '''
+
+ noop_task = Task()
+ noop_task.action = 'meta'
+ noop_task.args['_raw_params'] = 'noop'
+ noop_task.set_loader(iterator._play._loader)
+
+ host_tasks = {}
+ display.debug("building list of next tasks for hosts")
+ for host in hosts:
+ host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True)
+ display.debug("done building task lists")
+
+ num_setups = 0
+ num_tasks = 0
+ num_rescue = 0
+ num_always = 0
+
+ display.debug("counting tasks in each state of execution")
+ host_tasks_to_run = [(host, state_task)
+ for host, state_task in iteritems(host_tasks)
+ if state_task and state_task[1]]
+
+ if host_tasks_to_run:
+ try:
+ lowest_cur_block = min(
+ (iterator.get_active_state(s).cur_block for h, (s, t) in host_tasks_to_run
+ if s.run_state != PlayIterator.ITERATING_COMPLETE))
+ except ValueError:
+ lowest_cur_block = None
+ else:
+ # empty host_tasks_to_run will just run till the end of the function
+ # without ever touching lowest_cur_block
+ lowest_cur_block = None
+
+ for (k, v) in host_tasks_to_run:
+ (s, t) = v
+
+ s = iterator.get_active_state(s)
+ if s.cur_block > lowest_cur_block:
+ # Not the current block, ignore it
+ continue
+
+ if s.run_state == PlayIterator.ITERATING_SETUP:
+ num_setups += 1
+ elif s.run_state == PlayIterator.ITERATING_TASKS:
+ num_tasks += 1
+ elif s.run_state == PlayIterator.ITERATING_RESCUE:
+ num_rescue += 1
+ elif s.run_state == PlayIterator.ITERATING_ALWAYS:
+ num_always += 1
+ display.debug("done counting tasks in each state of execution:\n\tnum_setups: %s\n\tnum_tasks: %s\n\tnum_rescue: %s\n\tnum_always: %s" % (num_setups,
+ num_tasks,
+ num_rescue,
+ num_always))
+
+ def _advance_selected_hosts(hosts, cur_block, cur_state):
+ '''
+ This helper returns the task for all hosts in the requested
+ state, otherwise they get a noop dummy task. This also advances
+ the state of the host, since the given states are determined
+ while using peek=True.
+ '''
+ # we return the values in the order they were originally
+ # specified in the given hosts array
+ rvals = []
+ display.debug("starting to advance hosts")
+ for host in hosts:
+ host_state_task = host_tasks.get(host.name)
+ if host_state_task is None:
+ continue
+ (s, t) = host_state_task
+ s = iterator.get_active_state(s)
+ if t is None:
+ continue
+ if s.run_state == cur_state and s.cur_block == cur_block:
+ new_t = iterator.get_next_task_for_host(host)
+ rvals.append((host, t))
+ else:
+ rvals.append((host, noop_task))
+ display.debug("done advancing hosts to next task")
+ return rvals
+
+ # if any hosts are in ITERATING_SETUP, return the setup task
+ # while all other hosts get a noop
+ if num_setups:
+ display.debug("advancing hosts in ITERATING_SETUP")
+ return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_SETUP)
+
+ # if any hosts are in ITERATING_TASKS, return the next normal
+ # task for these hosts, while all other hosts get a noop
+ if num_tasks:
+ display.debug("advancing hosts in ITERATING_TASKS")
+ return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_TASKS)
+
+ # if any hosts are in ITERATING_RESCUE, return the next rescue
+ # task for these hosts, while all other hosts get a noop
+ if num_rescue:
+ display.debug("advancing hosts in ITERATING_RESCUE")
+ return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_RESCUE)
+
+ # if any hosts are in ITERATING_ALWAYS, return the next always
+ # task for these hosts, while all other hosts get a noop
+ if num_always:
+ display.debug("advancing hosts in ITERATING_ALWAYS")
+ return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_ALWAYS)
+
+ # at this point, everything must be ITERATING_COMPLETE, so we
+ # return None for all hosts in the list
+ display.debug("all hosts are done, so returning None's for all hosts")
+ return [(host, None) for host in hosts]
+
+ def run(self, iterator, play_context):
+ '''
+ The linear strategy is simple - get the next task and queue
+ it for all hosts, then wait for the queue to drain before
+ moving on to the next task
+ '''
+
+ # iterate over each task, while there is one left to run
+ result = self._tqm.RUN_OK
+ work_to_do = True
+
+ self._set_hosts_cache(iterator._play)
+
+ while work_to_do and not self._tqm._terminated:
+
+ try:
+ display.debug("getting the remaining hosts for this loop")
+ hosts_left = self.get_hosts_left(iterator)
+ display.debug("done getting the remaining hosts for this loop")
+
+ # queue up this task for each host in the inventory
+ callback_sent = False
+ work_to_do = False
+
+ host_results = []
+ host_tasks = self._get_next_task_lockstep(hosts_left, iterator)
+
+ # skip control
+ skip_rest = False
+ choose_step = True
+
+ # flag set if task is set to any_errors_fatal
+ any_errors_fatal = False
+
+ results = []
+ for (host, task) in host_tasks:
+ if not task:
+ continue
+
+ if self._tqm._terminated:
+ break
+
+ run_once = False
+ work_to_do = True
+
+ # check to see if this task should be skipped, due to it being a member of a
+ # role which has already run (and whether that role allows duplicate execution)
+ if task._role and task._role.has_run(host):
+ # If there is no metadata, the default behavior is to not allow duplicates,
+ # if there is metadata, check to see if the allow_duplicates flag was set to true
+ if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
+ display.debug("'%s' skipped because role has already run" % task)
+ continue
+
+ display.debug("getting variables")
+ task_vars = self._variable_manager.get_vars(play=iterator._play, host=host, task=task,
+ _hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all)
+ self.add_tqm_variables(task_vars, play=iterator._play)
+ templar = Templar(loader=self._loader, variables=task_vars)
+ display.debug("done getting variables")
+
+ # test to see if the task across all hosts points to an action plugin which
+ # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
+ # will only send this task to the first host in the list.
+
+ task.action = templar.template(task.action)
+
+ try:
+ action = action_loader.get(task.action, class_only=True, collection_list=task.collections)
+ except KeyError:
+ # we don't care here, because the action may simply not have a
+ # corresponding action plugin
+ action = None
+
+ if task.action in C._ACTION_META:
+ # for the linear strategy, we run meta tasks just once and for
+ # all hosts currently being iterated over rather than one host
+ results.extend(self._execute_meta(task, play_context, iterator, host))
+ if task.args.get('_raw_params', None) not in ('noop', 'reset_connection', 'end_host'):
+ run_once = True
+ if (task.any_errors_fatal or run_once) and not task.ignore_errors:
+ any_errors_fatal = True
+ else:
+ # handle step if needed, skip meta actions as they are used internally
+ if self._step and choose_step:
+ if self._take_step(task):
+ choose_step = False
+ else:
+ skip_rest = True
+ break
+
+ run_once = templar.template(task.run_once) or action and getattr(action, 'BYPASS_HOST_LOOP', False)
+
+ if (task.any_errors_fatal or run_once) and not task.ignore_errors:
+ any_errors_fatal = True
+
+ if not callback_sent:
+ display.debug("sending task start callback, copying the task so we can template it temporarily")
+ saved_name = task.name
+ display.debug("done copying, going to template now")
+ try:
+ task.name = to_text(templar.template(task.name, fail_on_undefined=False), nonstring='empty')
+ display.debug("done templating")
+ except Exception:
+ # just ignore any errors during task name templating,
+ # we don't care if it just shows the raw name
+ display.debug("templating failed for some reason")
+ display.debug("here goes the callback...")
+ self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
+ task.name = saved_name
+ callback_sent = True
+ display.debug("sending task start callback")
+
+ self._blocked_hosts[host.get_name()] = True
+ self._queue_task(host, task, task_vars, play_context)
+ del task_vars
+
+ # if we're bypassing the host loop, break out now
+ if run_once:
+ break
+
+ results += self._process_pending_results(iterator, max_passes=max(1, int(len(self._tqm._workers) * 0.1)))
+
+ # go to next host/task group
+ if skip_rest:
+ continue
+
+ display.debug("done queuing things up, now waiting for results queue to drain")
+ if self._pending_results > 0:
+ results += self._wait_on_pending_results(iterator)
+
+ host_results.extend(results)
+
+ self.update_active_connections(results)
+
+ included_files = IncludedFile.process_include_results(
+ host_results,
+ iterator=iterator,
+ loader=self._loader,
+ variable_manager=self._variable_manager
+ )
+
+ include_failure = False
+ if len(included_files) > 0:
+ display.debug("we have included files to process")
+
+ display.debug("generating all_blocks data")
+ all_blocks = dict((host, []) for host in hosts_left)
+ display.debug("done generating all_blocks data")
+ for included_file in included_files:
+ display.debug("processing included file: %s" % included_file._filename)
+ # included hosts get the task list while those excluded get an equal-length
+ # list of noop tasks, to make sure that they continue running in lock-step
+ try:
+ if included_file._is_role:
+ new_ir = self._copy_included_file(included_file)
+
+ new_blocks, handler_blocks = new_ir.get_block_list(
+ play=iterator._play,
+ variable_manager=self._variable_manager,
+ loader=self._loader,
+ )
+ else:
+ new_blocks = self._load_included_file(included_file, iterator=iterator)
+
+ display.debug("iterating over new_blocks loaded from include file")
+ for new_block in new_blocks:
+ task_vars = self._variable_manager.get_vars(
+ play=iterator._play,
+ task=new_block._parent,
+ _hosts=self._hosts_cache,
+ _hosts_all=self._hosts_cache_all,
+ )
+ display.debug("filtering new block on tags")
+ final_block = new_block.filter_tagged_tasks(task_vars)
+ display.debug("done filtering new block on tags")
+
+ noop_block = self._prepare_and_create_noop_block_from(final_block, task._parent, iterator)
+
+ for host in hosts_left:
+ if host in included_file._hosts:
+ all_blocks[host].append(final_block)
+ else:
+ all_blocks[host].append(noop_block)
+ display.debug("done iterating over new_blocks loaded from include file")
+
+ except AnsibleError as e:
+ for host in included_file._hosts:
+ self._tqm._failed_hosts[host.name] = True
+ iterator.mark_host_failed(host)
+ display.error(to_text(e), wrap_text=False)
+ include_failure = True
+ continue
+
+ # finally go through all of the hosts and append the
+ # accumulated blocks to their list of tasks
+ display.debug("extending task lists for all hosts with included blocks")
+
+ for host in hosts_left:
+ iterator.add_tasks(host, all_blocks[host])
+
+ display.debug("done extending task lists")
+ display.debug("done processing included files")
+
+ display.debug("results queue empty")
+
+ display.debug("checking for any_errors_fatal")
+ failed_hosts = []
+ unreachable_hosts = []
+ for res in results:
+ # execute_meta() does not set 'failed' in the TaskResult
+ # so we skip checking it with the meta tasks and look just at the iterator
+ if (res.is_failed() or res._task.action in C._ACTION_META) and iterator.is_failed(res._host):
+ failed_hosts.append(res._host.name)
+ elif res.is_unreachable():
+ unreachable_hosts.append(res._host.name)
+
+ # if any_errors_fatal and we had an error, mark all hosts as failed
+ if any_errors_fatal and (len(failed_hosts) > 0 or len(unreachable_hosts) > 0):
+ dont_fail_states = frozenset([iterator.ITERATING_RESCUE, iterator.ITERATING_ALWAYS])
+ for host in hosts_left:
+ (s, _) = iterator.get_next_task_for_host(host, peek=True)
+ # the state may actually be in a child state, use the get_active_state()
+ # method in the iterator to figure out the true active state
+ s = iterator.get_active_state(s)
+ if s.run_state not in dont_fail_states or \
+ s.run_state == iterator.ITERATING_RESCUE and s.fail_state & iterator.FAILED_RESCUE != 0:
+ self._tqm._failed_hosts[host.name] = True
+ result |= self._tqm.RUN_FAILED_BREAK_PLAY
+ display.debug("done checking for any_errors_fatal")
+
+ display.debug("checking for max_fail_percentage")
+ if iterator._play.max_fail_percentage is not None and len(results) > 0:
+ percentage = iterator._play.max_fail_percentage / 100.0
+
+ if (len(self._tqm._failed_hosts) / iterator.batch_size) > percentage:
+ for host in hosts_left:
+ # don't double-mark hosts, or the iterator will potentially
+ # fail them out of the rescue/always states
+ if host.name not in failed_hosts:
+ self._tqm._failed_hosts[host.name] = True
+ iterator.mark_host_failed(host)
+ self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
+ result |= self._tqm.RUN_FAILED_BREAK_PLAY
+ display.debug('(%s failed / %s total )> %s max fail' % (len(self._tqm._failed_hosts), iterator.batch_size, percentage))
+ display.debug("done checking for max_fail_percentage")
+
+ display.debug("checking to see if all hosts have failed and the running result is not ok")
+ if result != self._tqm.RUN_OK and len(self._tqm._failed_hosts) >= len(hosts_left):
+ display.debug("^ not ok, so returning result now")
+ self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
+ return result
+ display.debug("done checking to see if all hosts have failed")
+
+ except (IOError, EOFError) as e:
+ display.debug("got IOError/EOFError in task loop: %s" % e)
+ # most likely an abort, return failed
+ return self._tqm.RUN_UNKNOWN_ERROR
+
+ # run the base class run() method, which executes the cleanup function
+ # and runs any outstanding handlers which have been triggered
+
+ return super(StrategyModule, self).run(iterator, play_context, result)
diff --git a/lib/ansible/plugins/terminal/__init__.py b/lib/ansible/plugins/terminal/__init__.py
new file mode 100644
index 00000000..bb3ad181
--- /dev/null
+++ b/lib/ansible/plugins/terminal/__init__.py
@@ -0,0 +1,134 @@
+#
+# (c) 2016 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from abc import ABCMeta, abstractmethod
+
+from ansible.errors import AnsibleConnectionFailure
+from ansible.module_utils.six import with_metaclass
+
+
+class TerminalBase(with_metaclass(ABCMeta, object)):
+ '''
+ A base class for implementing cli connections
+
+ .. note:: Unlike most of Ansible, nearly all strings in
+ :class:`TerminalBase` plugins are byte strings. This is because of
+ how close to the underlying platform these plugins operate. Remember
+ to mark literal strings as byte string (``b"string"``) and to use
+ :func:`~ansible.module_utils._text.to_bytes` and
+ :func:`~ansible.module_utils._text.to_text` to avoid unexpected
+ problems.
+ '''
+
+ #: compiled bytes regular expressions as stdout
+ terminal_stdout_re = []
+
+ #: compiled bytes regular expressions as stderr
+ terminal_stderr_re = []
+
+ #: compiled bytes regular expressions to remove ANSI codes
+ ansi_re = [
+ re.compile(br'\x1b\[\?1h\x1b='), # CSI ? 1 h ESC =
+ re.compile(br'\x08.'), # [Backspace] .
+ re.compile(br"\x1b\[m"), # ANSI reset code
+ ]
+
+ #: terminal initial prompt
+ terminal_initial_prompt = None
+
+ #: terminal initial answer
+ terminal_initial_answer = None
+
+ #: Send newline after prompt match
+ terminal_inital_prompt_newline = True
+
+ def __init__(self, connection):
+ self._connection = connection
+
+ def _exec_cli_command(self, cmd, check_rc=True):
+ '''
+ Executes the CLI command on the remote device and returns the output
+
+ :arg cmd: Byte string command to be executed
+ '''
+ return self._connection.exec_command(cmd)
+
+ def _get_prompt(self):
+ """
+ Returns the current prompt from the device
+
+ :returns: A byte string of the prompt
+ """
+ return self._connection.get_prompt()
+
+ def on_open_shell(self):
+ """Called after the SSH session is established
+
+ This method is called right after the invoke_shell() is called from
+ the Paramiko SSHClient instance. It provides an opportunity to setup
+ terminal parameters such as disbling paging for instance.
+ """
+ pass
+
+ def on_close_shell(self):
+ """Called before the connection is closed
+
+ This method gets called once the connection close has been requested
+ but before the connection is actually closed. It provides an
+ opportunity to clean up any terminal resources before the shell is
+ actually closed
+ """
+ pass
+
+ def on_become(self, passwd=None):
+ """Called when privilege escalation is requested
+
+ :kwarg passwd: String containing the password
+
+ This method is called when the privilege is requested to be elevated
+ in the play context by setting become to True. It is the responsibility
+ of the terminal plugin to actually do the privilege escalation such
+ as entering `enable` mode for instance
+ """
+ pass
+
+ def on_unbecome(self):
+ """Called when privilege deescalation is requested
+
+ This method is called when the privilege changed from escalated
+ (become=True) to non escalated (become=False). It is the responsibility
+ of this method to actually perform the deauthorization procedure
+ """
+ pass
+
+ def on_authorize(self, passwd=None):
+ """Deprecated method for privilege escalation
+
+ :kwarg passwd: String containing the password
+ """
+ return self.on_become(passwd)
+
+ def on_deauthorize(self):
+ """Deprecated method for privilege deescalation
+ """
+ return self.on_unbecome()
diff --git a/lib/ansible/plugins/test/__init__.py b/lib/ansible/plugins/test/__init__.py
new file mode 100644
index 00000000..980f84a2
--- /dev/null
+++ b/lib/ansible/plugins/test/__init__.py
@@ -0,0 +1,3 @@
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
diff --git a/lib/ansible/plugins/test/core.py b/lib/ansible/plugins/test/core.py
new file mode 100644
index 00000000..40733a14
--- /dev/null
+++ b/lib/ansible/plugins/test/core.py
@@ -0,0 +1,250 @@
+# (c) 2012, Jeroen Hoekx <jeroen@hoekx.be>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import operator as py_operator
+from distutils.version import LooseVersion, StrictVersion
+
+from ansible import errors
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common._collections_compat import MutableMapping, MutableSequence
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.utils.display import Display
+
+display = Display()
+
+
+def failed(result):
+ ''' Test if task result yields failed '''
+ if not isinstance(result, MutableMapping):
+ raise errors.AnsibleFilterError("The 'failed' test expects a dictionary")
+ return result.get('failed', False)
+
+
+def success(result):
+ ''' Test if task result yields success '''
+ return not failed(result)
+
+
+def unreachable(result):
+ ''' Test if task result yields unreachable '''
+ if not isinstance(result, MutableMapping):
+ raise errors.AnsibleFilterError("The 'unreachable' test expects a dictionary")
+ return result.get('unreachable', False)
+
+
+def reachable(result):
+ ''' Test if task result yields reachable '''
+ return not unreachable(result)
+
+
+def changed(result):
+ ''' Test if task result yields changed '''
+ if not isinstance(result, MutableMapping):
+ raise errors.AnsibleFilterError("The 'changed' test expects a dictionary")
+ if 'changed' not in result:
+ changed = False
+ if (
+ 'results' in result and # some modules return a 'results' key
+ isinstance(result['results'], MutableSequence) and
+ isinstance(result['results'][0], MutableMapping)
+ ):
+ for res in result['results']:
+ if res.get('changed', False):
+ changed = True
+ break
+ else:
+ changed = result.get('changed', False)
+ return changed
+
+
+def skipped(result):
+ ''' Test if task result yields skipped '''
+ if not isinstance(result, MutableMapping):
+ raise errors.AnsibleFilterError("The 'skipped' test expects a dictionary")
+ return result.get('skipped', False)
+
+
+def started(result):
+ ''' Test if async task has started '''
+ if not isinstance(result, MutableMapping):
+ raise errors.AnsibleFilterError("The 'started' test expects a dictionary")
+ if 'started' in result:
+ # For async tasks, return status
+ # NOTE: The value of started is 0 or 1, not False or True :-/
+ return result.get('started', 0) == 1
+ else:
+ # For non-async tasks, warn user, but return as if started
+ display.warning("The 'started' test expects an async task, but a non-async task was tested")
+ return True
+
+
+def finished(result):
+ ''' Test if async task has finished '''
+ if not isinstance(result, MutableMapping):
+ raise errors.AnsibleFilterError("The 'finished' test expects a dictionary")
+ if 'finished' in result:
+ # For async tasks, return status
+ # NOTE: The value of finished is 0 or 1, not False or True :-/
+ return result.get('finished', 0) == 1
+ else:
+ # For non-async tasks, warn user, but return as if finished
+ display.warning("The 'finished' test expects an async task, but a non-async task was tested")
+ return True
+
+
+def regex(value='', pattern='', ignorecase=False, multiline=False, match_type='search'):
+ ''' Expose `re` as a boolean filter using the `search` method by default.
+ This is likely only useful for `search` and `match` which already
+ have their own filters.
+ '''
+ # In addition to ensuring the correct type, to_text here will ensure
+ # _fail_with_undefined_error happens if the value is Undefined
+ value = to_text(value, errors='surrogate_or_strict')
+ flags = 0
+ if ignorecase:
+ flags |= re.I
+ if multiline:
+ flags |= re.M
+ _re = re.compile(pattern, flags=flags)
+ return bool(getattr(_re, match_type, 'search')(value))
+
+
+def vault_encrypted(value):
+ """Evaulate whether a variable is a single vault encrypted value
+
+ .. versionadded:: 2.10
+ """
+ return getattr(value, '__ENCRYPTED__', False) and value.is_encrypted()
+
+
+def match(value, pattern='', ignorecase=False, multiline=False):
+ ''' Perform a `re.match` returning a boolean '''
+ return regex(value, pattern, ignorecase, multiline, 'match')
+
+
+def search(value, pattern='', ignorecase=False, multiline=False):
+ ''' Perform a `re.search` returning a boolean '''
+ return regex(value, pattern, ignorecase, multiline, 'search')
+
+
+def version_compare(value, version, operator='eq', strict=False):
+ ''' Perform a version comparison on a value '''
+ op_map = {
+ '==': 'eq', '=': 'eq', 'eq': 'eq',
+ '<': 'lt', 'lt': 'lt',
+ '<=': 'le', 'le': 'le',
+ '>': 'gt', 'gt': 'gt',
+ '>=': 'ge', 'ge': 'ge',
+ '!=': 'ne', '<>': 'ne', 'ne': 'ne'
+ }
+
+ if strict:
+ Version = StrictVersion
+ else:
+ Version = LooseVersion
+
+ if operator in op_map:
+ operator = op_map[operator]
+ else:
+ raise errors.AnsibleFilterError('Invalid operator type')
+
+ try:
+ method = getattr(py_operator, operator)
+ return method(Version(str(value)), Version(str(version)))
+ except Exception as e:
+ raise errors.AnsibleFilterError('Version comparison: %s' % e)
+
+
+def truthy(value, convert_bool=False):
+ """Evaluate as value for truthiness using python ``bool``
+
+ Optionally, attempt to do a conversion to bool from boolean like values
+ such as ``"false"``, ``"true"``, ``"yes"``, ``"no"``, ``"on"``, ``"off"``, etc.
+
+ .. versionadded:: 2.10
+ """
+ if convert_bool:
+ try:
+ value = boolean(value)
+ except TypeError:
+ pass
+
+ return bool(value)
+
+
+def falsy(value, convert_bool=False):
+ """Evaluate as value for falsiness using python ``bool``
+
+ Optionally, attempt to do a conversion to bool from boolean like values
+ such as ``"false"``, ``"true"``, ``"yes"``, ``"no"``, ``"on"``, ``"off"``, etc.
+
+ .. versionadded:: 2.10
+ """
+ return not truthy(value, convert_bool=convert_bool)
+
+
+class TestModule(object):
+ ''' Ansible core jinja2 tests '''
+
+ def tests(self):
+ return {
+ # failure testing
+ 'failed': failed,
+ 'failure': failed,
+ 'succeeded': success,
+ 'success': success,
+ 'successful': success,
+ 'reachable': reachable,
+ 'unreachable': unreachable,
+
+ # changed testing
+ 'changed': changed,
+ 'change': changed,
+
+ # skip testing
+ 'skipped': skipped,
+ 'skip': skipped,
+
+ # async testing
+ 'finished': finished,
+ 'started': started,
+
+ # regex
+ 'match': match,
+ 'search': search,
+ 'regex': regex,
+
+ # version comparison
+ 'version_compare': version_compare,
+ 'version': version_compare,
+
+ # lists
+ 'any': any,
+ 'all': all,
+
+ # truthiness
+ 'truthy': truthy,
+ 'falsy': falsy,
+
+ # vault
+ 'vault_encrypted': vault_encrypted,
+ }
diff --git a/lib/ansible/plugins/test/files.py b/lib/ansible/plugins/test/files.py
new file mode 100644
index 00000000..bb0dfd01
--- /dev/null
+++ b/lib/ansible/plugins/test/files.py
@@ -0,0 +1,48 @@
+# (c) 2015, Ansible, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from os.path import isdir, isfile, isabs, exists, lexists, islink, samefile, ismount
+from ansible import errors
+
+
+class TestModule(object):
+ ''' Ansible file jinja2 tests '''
+
+ def tests(self):
+ return {
+ # file testing
+ 'is_dir': isdir,
+ 'directory': isdir,
+ 'is_file': isfile,
+ 'file': isfile,
+ 'is_link': islink,
+ 'link': islink,
+ 'exists': exists,
+ 'link_exists': lexists,
+
+ # path testing
+ 'is_abs': isabs,
+ 'abs': isabs,
+ 'is_same_file': samefile,
+ 'same_file': samefile,
+ 'is_mount': ismount,
+ 'mount': ismount,
+ }
diff --git a/lib/ansible/plugins/test/mathstuff.py b/lib/ansible/plugins/test/mathstuff.py
new file mode 100644
index 00000000..952562cc
--- /dev/null
+++ b/lib/ansible/plugins/test/mathstuff.py
@@ -0,0 +1,62 @@
+# (c) 2016, Ansible, Inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import math
+
+
+def issubset(a, b):
+ return set(a) <= set(b)
+
+
+def issuperset(a, b):
+ return set(a) >= set(b)
+
+
+def isnotanumber(x):
+ try:
+ return math.isnan(x)
+ except TypeError:
+ return False
+
+
+def contains(seq, value):
+ '''Opposite of the ``in`` test, allowing use as a test in filters like ``selectattr``
+
+ .. versionadded:: 2.8
+ '''
+ return value in seq
+
+
+class TestModule:
+ ''' Ansible math jinja2 tests '''
+
+ def tests(self):
+ return {
+ # set theory
+ 'issubset': issubset,
+ 'subset': issubset,
+ 'issuperset': issuperset,
+ 'superset': issuperset,
+ 'contains': contains,
+
+ # numbers
+ 'isnan': isnotanumber,
+ 'nan': isnotanumber,
+ }
diff --git a/lib/ansible/plugins/vars/__init__.py b/lib/ansible/plugins/vars/__init__.py
new file mode 100644
index 00000000..2a7bafd9
--- /dev/null
+++ b/lib/ansible/plugins/vars/__init__.py
@@ -0,0 +1,41 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2014, Serge van Ginderachter <serge@vanginderachter.be>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins import AnsiblePlugin
+from ansible.utils.path import basedir
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class BaseVarsPlugin(AnsiblePlugin):
+
+ """
+ Loads variables for groups and/or hosts
+ """
+
+ def __init__(self):
+ """ constructor """
+ super(BaseVarsPlugin, self).__init__()
+ self._display = display
+
+ def get_vars(self, loader, path, entities):
+ """ Gets variables. """
+ self._basedir = basedir(path)
diff --git a/lib/ansible/plugins/vars/host_group_vars.py b/lib/ansible/plugins/vars/host_group_vars.py
new file mode 100644
index 00000000..377a77de
--- /dev/null
+++ b/lib/ansible/plugins/vars/host_group_vars.py
@@ -0,0 +1,115 @@
+# Copyright 2017 RedHat, inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#############################################
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ vars: host_group_vars
+ version_added: "2.4"
+ short_description: In charge of loading group_vars and host_vars
+ requirements:
+ - whitelist in configuration
+ description:
+ - Loads YAML vars into corresponding groups/hosts in group_vars/ and host_vars/ directories.
+ - Files are restricted by extension to one of .yaml, .json, .yml or no extension.
+ - Hidden (starting with '.') and backup (ending with '~') files and directories are ignored.
+ - Only applies to inventory sources that are existing paths.
+ - Starting in 2.10, this plugin requires whitelisting and is whitelisted by default.
+ options:
+ stage:
+ ini:
+ - key: stage
+ section: vars_host_group_vars
+ env:
+ - name: ANSIBLE_VARS_PLUGIN_STAGE
+ _valid_extensions:
+ default: [".yml", ".yaml", ".json"]
+ description:
+ - "Check all of these extensions when looking for 'variable' files which should be YAML or JSON or vaulted versions of these."
+ - 'This affects vars_files, include_vars, inventory and vars plugins among others.'
+ env:
+ - name: ANSIBLE_YAML_FILENAME_EXT
+ ini:
+ - section: yaml_valid_extensions
+ key: defaults
+ type: list
+ extends_documentation_fragment:
+ - vars_plugin_staging
+'''
+
+import os
+from ansible import constants as C
+from ansible.errors import AnsibleParserError
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.plugins.vars import BaseVarsPlugin
+from ansible.inventory.host import Host
+from ansible.inventory.group import Group
+from ansible.utils.vars import combine_vars
+
+FOUND = {}
+
+
+class VarsModule(BaseVarsPlugin):
+
+ REQUIRES_WHITELIST = True
+
+ def get_vars(self, loader, path, entities, cache=True):
+ ''' parses the inventory file '''
+
+ if not isinstance(entities, list):
+ entities = [entities]
+
+ super(VarsModule, self).get_vars(loader, path, entities)
+
+ data = {}
+ for entity in entities:
+ if isinstance(entity, Host):
+ subdir = 'host_vars'
+ elif isinstance(entity, Group):
+ subdir = 'group_vars'
+ else:
+ raise AnsibleParserError("Supplied entity must be Host or Group, got %s instead" % (type(entity)))
+
+ # avoid 'chroot' type inventory hostnames /path/to/chroot
+ if not entity.name.startswith(os.path.sep):
+ try:
+ found_files = []
+ # load vars
+ b_opath = os.path.realpath(to_bytes(os.path.join(self._basedir, subdir)))
+ opath = to_text(b_opath)
+ key = '%s.%s' % (entity.name, opath)
+ if cache and key in FOUND:
+ found_files = FOUND[key]
+ else:
+ # no need to do much if path does not exist for basedir
+ if os.path.exists(b_opath):
+ if os.path.isdir(b_opath):
+ self._display.debug("\tprocessing dir %s" % opath)
+ found_files = loader.find_vars_files(opath, entity.name)
+ FOUND[key] = found_files
+ else:
+ self._display.warning("Found %s that is not a directory, skipping: %s" % (subdir, opath))
+
+ for found in found_files:
+ new_data = loader.load_from_file(found, cache=True, unsafe=True)
+ if new_data: # ignore empty files
+ data = combine_vars(data, new_data)
+
+ except Exception as e:
+ raise AnsibleParserError(to_native(e))
+ return data
diff --git a/lib/ansible/release.py b/lib/ansible/release.py
new file mode 100644
index 00000000..e15e306e
--- /dev/null
+++ b/lib/ansible/release.py
@@ -0,0 +1,24 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+__version__ = '2.10.4'
+__author__ = 'Ansible, Inc.'
+__codename__ = 'When the Levee Breaks'
diff --git a/lib/ansible/template/__init__.py b/lib/ansible/template/__init__.py
new file mode 100644
index 00000000..e1884c94
--- /dev/null
+++ b/lib/ansible/template/__init__.py
@@ -0,0 +1,1096 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ast
+import datetime
+import os
+import pkgutil
+import pwd
+import re
+import time
+
+from contextlib import contextmanager
+from distutils.version import LooseVersion
+from numbers import Number
+from traceback import format_exc
+
+try:
+ from hashlib import sha1
+except ImportError:
+ from sha import sha as sha1
+
+from jinja2.exceptions import TemplateSyntaxError, UndefinedError
+from jinja2.loaders import FileSystemLoader
+from jinja2.runtime import Context, StrictUndefined
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleFilterError, AnsiblePluginRemovedError, AnsibleUndefinedVariable, AnsibleAssertionError
+from ansible.module_utils.six import iteritems, string_types, text_type
+from ansible.module_utils.six.moves import range
+from ansible.module_utils._text import to_native, to_text, to_bytes
+from ansible.module_utils.common._collections_compat import Iterator, Sequence, Mapping, MappingView, MutableMapping
+from ansible.module_utils.common.collections import is_sequence
+from ansible.module_utils.compat.importlib import import_module
+from ansible.plugins.loader import filter_loader, lookup_loader, test_loader
+from ansible.template.safe_eval import safe_eval
+from ansible.template.template import AnsibleJ2Template
+from ansible.template.vars import AnsibleJ2Vars
+from ansible.utils.collection_loader import AnsibleCollectionRef
+from ansible.utils.display import Display
+from ansible.utils.collection_loader._collection_finder import _get_collection_metadata
+from ansible.utils.unsafe_proxy import wrap_var
+
+display = Display()
+
+
+__all__ = ['Templar', 'generate_ansible_template_vars']
+
+# A regex for checking to see if a variable we're trying to
+# expand is just a single variable name.
+
+# Primitive Types which we don't want Jinja to convert to strings.
+NON_TEMPLATED_TYPES = (bool, Number)
+
+JINJA2_OVERRIDE = '#jinja2:'
+
+from jinja2 import __version__ as j2_version
+
+USE_JINJA2_NATIVE = False
+if C.DEFAULT_JINJA2_NATIVE:
+ try:
+ from jinja2.nativetypes import NativeEnvironment as Environment
+ from ansible.template.native_helpers import ansible_native_concat as j2_concat
+ from ansible.template.native_helpers import NativeJinjaText
+ USE_JINJA2_NATIVE = True
+ except ImportError:
+ from jinja2 import Environment
+ from jinja2.utils import concat as j2_concat
+ display.warning(
+ 'jinja2_native requires Jinja 2.10 and above. '
+ 'Version detected: %s. Falling back to default.' % j2_version
+ )
+else:
+ from jinja2 import Environment
+ from jinja2.utils import concat as j2_concat
+
+
+JINJA2_BEGIN_TOKENS = frozenset(('variable_begin', 'block_begin', 'comment_begin', 'raw_begin'))
+JINJA2_END_TOKENS = frozenset(('variable_end', 'block_end', 'comment_end', 'raw_end'))
+
+
+RANGE_TYPE = type(range(0))
+
+
+def generate_ansible_template_vars(path, dest_path=None):
+ b_path = to_bytes(path)
+ try:
+ template_uid = pwd.getpwuid(os.stat(b_path).st_uid).pw_name
+ except (KeyError, TypeError):
+ template_uid = os.stat(b_path).st_uid
+
+ temp_vars = {
+ 'template_host': to_text(os.uname()[1]),
+ 'template_path': path,
+ 'template_mtime': datetime.datetime.fromtimestamp(os.path.getmtime(b_path)),
+ 'template_uid': to_text(template_uid),
+ 'template_fullpath': os.path.abspath(path),
+ 'template_run_date': datetime.datetime.now(),
+ 'template_destpath': to_native(dest_path) if dest_path else None,
+ }
+
+ managed_default = C.DEFAULT_MANAGED_STR
+ managed_str = managed_default.format(
+ host=temp_vars['template_host'],
+ uid=temp_vars['template_uid'],
+ file=temp_vars['template_path'],
+ )
+ temp_vars['ansible_managed'] = to_text(time.strftime(to_native(managed_str), time.localtime(os.path.getmtime(b_path))))
+
+ return temp_vars
+
+
+def _escape_backslashes(data, jinja_env):
+ """Double backslashes within jinja2 expressions
+
+ A user may enter something like this in a playbook::
+
+ debug:
+ msg: "Test Case 1\\3; {{ test1_name | regex_replace('^(.*)_name$', '\\1')}}"
+
+ The string inside of the {{ gets interpreted multiple times First by yaml.
+ Then by python. And finally by jinja2 as part of it's variable. Because
+ it is processed by both python and jinja2, the backslash escaped
+ characters get unescaped twice. This means that we'd normally have to use
+ four backslashes to escape that. This is painful for playbook authors as
+ they have to remember different rules for inside vs outside of a jinja2
+ expression (The backslashes outside of the "{{ }}" only get processed by
+ yaml and python. So they only need to be escaped once). The following
+ code fixes this by automatically performing the extra quoting of
+ backslashes inside of a jinja2 expression.
+
+ """
+ if '\\' in data and '{{' in data:
+ new_data = []
+ d2 = jinja_env.preprocess(data)
+ in_var = False
+
+ for token in jinja_env.lex(d2):
+ if token[1] == 'variable_begin':
+ in_var = True
+ new_data.append(token[2])
+ elif token[1] == 'variable_end':
+ in_var = False
+ new_data.append(token[2])
+ elif in_var and token[1] == 'string':
+ # Double backslashes only if we're inside of a jinja2 variable
+ new_data.append(token[2].replace('\\', '\\\\'))
+ else:
+ new_data.append(token[2])
+
+ data = ''.join(new_data)
+
+ return data
+
+
+def is_template(data, jinja_env):
+ """This function attempts to quickly detect whether a value is a jinja2
+ template. To do so, we look for the first 2 matching jinja2 tokens for
+ start and end delimiters.
+ """
+ found = None
+ start = True
+ comment = False
+ d2 = jinja_env.preprocess(data)
+
+ # This wraps a lot of code, but this is due to lex returning a generator
+ # so we may get an exception at any part of the loop
+ try:
+ for token in jinja_env.lex(d2):
+ if token[1] in JINJA2_BEGIN_TOKENS:
+ if start and token[1] == 'comment_begin':
+ # Comments can wrap other token types
+ comment = True
+ start = False
+ # Example: variable_end -> variable
+ found = token[1].split('_')[0]
+ elif token[1] in JINJA2_END_TOKENS:
+ if token[1].split('_')[0] == found:
+ return True
+ elif comment:
+ continue
+ return False
+ except TemplateSyntaxError:
+ return False
+
+ return False
+
+
+def _count_newlines_from_end(in_str):
+ '''
+ Counts the number of newlines at the end of a string. This is used during
+ the jinja2 templating to ensure the count matches the input, since some newlines
+ may be thrown away during the templating.
+ '''
+
+ try:
+ i = len(in_str)
+ j = i - 1
+ while in_str[j] == '\n':
+ j -= 1
+ return i - 1 - j
+ except IndexError:
+ # Uncommon cases: zero length string and string containing only newlines
+ return i
+
+
+def recursive_check_defined(item):
+ from jinja2.runtime import Undefined
+
+ if isinstance(item, MutableMapping):
+ for key in item:
+ recursive_check_defined(item[key])
+ elif isinstance(item, list):
+ for i in item:
+ recursive_check_defined(i)
+ else:
+ if isinstance(item, Undefined):
+ raise AnsibleFilterError("{0} is undefined".format(item))
+
+
+def _is_rolled(value):
+ """Helper method to determine if something is an unrolled generator,
+ iterator, or similar object
+ """
+ return (
+ isinstance(value, Iterator) or
+ isinstance(value, MappingView) or
+ isinstance(value, RANGE_TYPE)
+ )
+
+
+def _unroll_iterator(func):
+ """Wrapper function, that intercepts the result of a filter
+ and auto unrolls a generator, so that users are not required to
+ explicitly use ``|list`` to unroll.
+ """
+ def wrapper(*args, **kwargs):
+ ret = func(*args, **kwargs)
+ if _is_rolled(ret):
+ return list(ret)
+ return ret
+
+ wrapper.__UNROLLED__ = True
+ return _update_wrapper(wrapper, func)
+
+
+def _update_wrapper(wrapper, func):
+ # This code is duplicated from ``functools.update_wrapper`` from Py3.7.
+ # ``functools.update_wrapper`` was failing when the func was ``functools.partial``
+ for attr in ('__module__', '__name__', '__qualname__', '__doc__', '__annotations__'):
+ try:
+ value = getattr(func, attr)
+ except AttributeError:
+ pass
+ else:
+ setattr(wrapper, attr, value)
+ for attr in ('__dict__',):
+ getattr(wrapper, attr).update(getattr(func, attr, {}))
+ wrapper.__wrapped__ = func
+ return wrapper
+
+
+def _wrap_native_text(func):
+ """Wrapper function, that intercepts the result of a filter
+ and wraps it into NativeJinjaText which is then used
+ in ``ansible_native_concat`` to indicate that it is a text
+ which should not be passed into ``literal_eval``.
+ """
+ def wrapper(*args, **kwargs):
+ ret = func(*args, **kwargs)
+ return NativeJinjaText(ret)
+
+ return _update_wrapper(wrapper, func)
+
+
+class AnsibleUndefined(StrictUndefined):
+ '''
+ A custom Undefined class, which returns further Undefined objects on access,
+ rather than throwing an exception.
+ '''
+ def __getattr__(self, name):
+ if name == '__UNSAFE__':
+ # AnsibleUndefined should never be assumed to be unsafe
+ # This prevents ``hasattr(val, '__UNSAFE__')`` from evaluating to ``True``
+ raise AttributeError(name)
+ # Return original Undefined object to preserve the first failure context
+ return self
+
+ def __getitem__(self, key):
+ # Return original Undefined object to preserve the first failure context
+ return self
+
+ def __repr__(self):
+ return 'AnsibleUndefined'
+
+ def __contains__(self, item):
+ # Return original Undefined object to preserve the first failure context
+ return self
+
+
+class AnsibleContext(Context):
+ '''
+ A custom context, which intercepts resolve() calls and sets a flag
+ internally if any variable lookup returns an AnsibleUnsafe value. This
+ flag is checked post-templating, and (when set) will result in the
+ final templated result being wrapped in AnsibleUnsafe.
+ '''
+ def __init__(self, *args, **kwargs):
+ super(AnsibleContext, self).__init__(*args, **kwargs)
+ self.unsafe = False
+
+ def _is_unsafe(self, val):
+ '''
+ Our helper function, which will also recursively check dict and
+ list entries due to the fact that they may be repr'd and contain
+ a key or value which contains jinja2 syntax and would otherwise
+ lose the AnsibleUnsafe value.
+ '''
+ if isinstance(val, dict):
+ for key in val.keys():
+ if self._is_unsafe(val[key]):
+ return True
+ elif isinstance(val, list):
+ for item in val:
+ if self._is_unsafe(item):
+ return True
+ elif getattr(val, '__UNSAFE__', False) is True:
+ return True
+ return False
+
+ def _update_unsafe(self, val):
+ if val is not None and not self.unsafe and self._is_unsafe(val):
+ self.unsafe = True
+
+ def resolve(self, key):
+ '''
+ The intercepted resolve(), which uses the helper above to set the
+ internal flag whenever an unsafe variable value is returned.
+ '''
+ val = super(AnsibleContext, self).resolve(key)
+ self._update_unsafe(val)
+ return val
+
+ def resolve_or_missing(self, key):
+ val = super(AnsibleContext, self).resolve_or_missing(key)
+ self._update_unsafe(val)
+ return val
+
+ def get_all(self):
+ """Return the complete context as a dict including the exported
+ variables. For optimizations reasons this might not return an
+ actual copy so be careful with using it.
+
+ This is to prevent from running ``AnsibleJ2Vars`` through dict():
+
+ ``dict(self.parent, **self.vars)``
+
+ In Ansible this means that ALL variables would be templated in the
+ process of re-creating the parent because ``AnsibleJ2Vars`` templates
+ each variable in its ``__getitem__`` method. Instead we re-create the
+ parent via ``AnsibleJ2Vars.add_locals`` that creates a new
+ ``AnsibleJ2Vars`` copy without templating each variable.
+
+ This will prevent unnecessarily templating unused variables in cases
+ like setting a local variable and passing it to {% include %}
+ in a template.
+
+ Also see ``AnsibleJ2Template``and
+ https://github.com/pallets/jinja/commit/d67f0fd4cc2a4af08f51f4466150d49da7798729
+ """
+ if LooseVersion(j2_version) >= LooseVersion('2.9'):
+ if not self.vars:
+ return self.parent
+ if not self.parent:
+ return self.vars
+
+ if isinstance(self.parent, AnsibleJ2Vars):
+ return self.parent.add_locals(self.vars)
+ else:
+ # can this happen in Ansible?
+ return dict(self.parent, **self.vars)
+
+
+class JinjaPluginIntercept(MutableMapping):
+ def __init__(self, delegatee, pluginloader, *args, **kwargs):
+ super(JinjaPluginIntercept, self).__init__(*args, **kwargs)
+ self._delegatee = delegatee
+ self._pluginloader = pluginloader
+
+ if self._pluginloader.class_name == 'FilterModule':
+ self._method_map_name = 'filters'
+ self._dirname = 'filter'
+ elif self._pluginloader.class_name == 'TestModule':
+ self._method_map_name = 'tests'
+ self._dirname = 'test'
+
+ self._collection_jinja_func_cache = {}
+
+ # FUTURE: we can cache FQ filter/test calls for the entire duration of a run, since a given collection's impl's
+ # aren't supposed to change during a run
+ def __getitem__(self, key):
+ try:
+ if not isinstance(key, string_types):
+ raise ValueError('key must be a string')
+
+ key = to_native(key)
+
+ if '.' not in key: # might be a built-in or legacy, check the delegatee dict first, then try for a last-chance base redirect
+ func = self._delegatee.get(key)
+
+ if func:
+ return func
+
+ # didn't find it in the pre-built Jinja env, assume it's a former builtin and follow the normal routing path
+ leaf_key = key
+ key = 'ansible.builtin.' + key
+ else:
+ leaf_key = key.split('.')[-1]
+
+ acr = AnsibleCollectionRef.try_parse_fqcr(key, self._dirname)
+
+ if not acr:
+ raise KeyError('invalid plugin name: {0}'.format(key))
+
+ ts = _get_collection_metadata(acr.collection)
+
+ # TODO: implement support for collection-backed redirect (currently only builtin)
+ # TODO: implement cycle detection (unified across collection redir as well)
+
+ routing_entry = ts.get('plugin_routing', {}).get(self._dirname, {}).get(leaf_key, {})
+
+ deprecation_entry = routing_entry.get('deprecation')
+ if deprecation_entry:
+ warning_text = deprecation_entry.get('warning_text')
+ removal_date = deprecation_entry.get('removal_date')
+ removal_version = deprecation_entry.get('removal_version')
+
+ if not warning_text:
+ warning_text = '{0} "{1}" is deprecated'.format(self._dirname, key)
+
+ display.deprecated(warning_text, version=removal_version, date=removal_date, collection_name=acr.collection)
+
+ tombstone_entry = routing_entry.get('tombstone')
+
+ if tombstone_entry:
+ warning_text = tombstone_entry.get('warning_text')
+ removal_date = tombstone_entry.get('removal_date')
+ removal_version = tombstone_entry.get('removal_version')
+
+ if not warning_text:
+ warning_text = '{0} "{1}" has been removed'.format(self._dirname, key)
+
+ exc_msg = display.get_deprecation_message(warning_text, version=removal_version, date=removal_date,
+ collection_name=acr.collection, removed=True)
+
+ raise AnsiblePluginRemovedError(exc_msg)
+
+ redirect_fqcr = routing_entry.get('redirect', None)
+ if redirect_fqcr:
+ acr = AnsibleCollectionRef.from_fqcr(ref=redirect_fqcr, ref_type=self._dirname)
+ display.vvv('redirecting {0} {1} to {2}.{3}'.format(self._dirname, key, acr.collection, acr.resource))
+ key = redirect_fqcr
+ # TODO: handle recursive forwarding (not necessary for builtin, but definitely for further collection redirs)
+
+ func = self._collection_jinja_func_cache.get(key)
+
+ if func:
+ return func
+
+ try:
+ pkg = import_module(acr.n_python_package_name)
+ except ImportError:
+ raise KeyError()
+
+ parent_prefix = acr.collection
+
+ if acr.subdirs:
+ parent_prefix = '{0}.{1}'.format(parent_prefix, acr.subdirs)
+
+ # TODO: implement collection-level redirect
+
+ for dummy, module_name, ispkg in pkgutil.iter_modules(pkg.__path__, prefix=parent_prefix + '.'):
+ if ispkg:
+ continue
+
+ try:
+ plugin_impl = self._pluginloader.get(module_name)
+ except Exception as e:
+ raise TemplateSyntaxError(to_native(e), 0)
+
+ method_map = getattr(plugin_impl, self._method_map_name)
+
+ for func_name, func in iteritems(method_map()):
+ fq_name = '.'.join((parent_prefix, func_name))
+ # FIXME: detect/warn on intra-collection function name collisions
+ if USE_JINJA2_NATIVE and func_name in C.STRING_TYPE_FILTERS:
+ self._collection_jinja_func_cache[fq_name] = _wrap_native_text(func)
+ else:
+ self._collection_jinja_func_cache[fq_name] = _unroll_iterator(func)
+
+ function_impl = self._collection_jinja_func_cache[key]
+ return function_impl
+ except AnsiblePluginRemovedError as apre:
+ raise TemplateSyntaxError(to_native(apre), 0)
+ except KeyError:
+ raise
+ except Exception as ex:
+ display.warning('an unexpected error occurred during Jinja2 environment setup: {0}'.format(to_native(ex)))
+ display.vvv('exception during Jinja2 environment setup: {0}'.format(format_exc()))
+ raise TemplateSyntaxError(to_native(ex), 0)
+
+ def __setitem__(self, key, value):
+ return self._delegatee.__setitem__(key, value)
+
+ def __delitem__(self, key):
+ raise NotImplementedError()
+
+ def __iter__(self):
+ # not strictly accurate since we're not counting dynamically-loaded values
+ return iter(self._delegatee)
+
+ def __len__(self):
+ # not strictly accurate since we're not counting dynamically-loaded values
+ return len(self._delegatee)
+
+
+class AnsibleEnvironment(Environment):
+ '''
+ Our custom environment, which simply allows us to override the class-level
+ values for the Template and Context classes used by jinja2 internally.
+ '''
+ context_class = AnsibleContext
+ template_class = AnsibleJ2Template
+
+ def __init__(self, *args, **kwargs):
+ super(AnsibleEnvironment, self).__init__(*args, **kwargs)
+
+ self.filters = JinjaPluginIntercept(self.filters, filter_loader)
+ self.tests = JinjaPluginIntercept(self.tests, test_loader)
+
+
+class Templar:
+ '''
+ The main class for templating, with the main entry-point of template().
+ '''
+
+ def __init__(self, loader, shared_loader_obj=None, variables=None):
+ variables = {} if variables is None else variables
+
+ self._loader = loader
+ self._filters = None
+ self._tests = None
+ self._available_variables = variables
+ self._cached_result = {}
+
+ if loader:
+ self._basedir = loader.get_basedir()
+ else:
+ self._basedir = './'
+
+ if shared_loader_obj:
+ self._filter_loader = getattr(shared_loader_obj, 'filter_loader')
+ self._test_loader = getattr(shared_loader_obj, 'test_loader')
+ self._lookup_loader = getattr(shared_loader_obj, 'lookup_loader')
+ else:
+ self._filter_loader = filter_loader
+ self._test_loader = test_loader
+ self._lookup_loader = lookup_loader
+
+ # flags to determine whether certain failures during templating
+ # should result in fatal errors being raised
+ self._fail_on_lookup_errors = True
+ self._fail_on_filter_errors = True
+ self._fail_on_undefined_errors = C.DEFAULT_UNDEFINED_VAR_BEHAVIOR
+
+ self.environment = AnsibleEnvironment(
+ trim_blocks=True,
+ undefined=AnsibleUndefined,
+ extensions=self._get_extensions(),
+ finalize=self._finalize,
+ loader=FileSystemLoader(self._basedir),
+ )
+
+ # jinja2 global is inconsistent across versions, this normalizes them
+ self.environment.globals['dict'] = dict
+
+ # Custom globals
+ self.environment.globals['lookup'] = self._lookup
+ self.environment.globals['query'] = self.environment.globals['q'] = self._query_lookup
+ self.environment.globals['now'] = self._now_datetime
+ self.environment.globals['finalize'] = self._finalize
+
+ # the current rendering context under which the templar class is working
+ self.cur_context = None
+
+ self.SINGLE_VAR = re.compile(r"^%s\s*(\w*)\s*%s$" % (self.environment.variable_start_string, self.environment.variable_end_string))
+
+ self._clean_regex = re.compile(r'(?:%s|%s|%s|%s)' % (
+ self.environment.variable_start_string,
+ self.environment.block_start_string,
+ self.environment.block_end_string,
+ self.environment.variable_end_string
+ ))
+ self._no_type_regex = re.compile(r'.*?\|\s*(?:%s)(?:\([^\|]*\))?\s*\)?\s*(?:%s)' %
+ ('|'.join(C.STRING_TYPE_FILTERS), self.environment.variable_end_string))
+
+ def _get_filters(self):
+ '''
+ Returns filter plugins, after loading and caching them if need be
+ '''
+
+ if self._filters is not None:
+ return self._filters.copy()
+
+ self._filters = dict()
+
+ for fp in self._filter_loader.all():
+ self._filters.update(fp.filters())
+
+ if USE_JINJA2_NATIVE:
+ for string_filter in C.STRING_TYPE_FILTERS:
+ try:
+ orig_filter = self._filters[string_filter]
+ except KeyError:
+ try:
+ orig_filter = self.environment.filters[string_filter]
+ except KeyError:
+ continue
+ self._filters[string_filter] = _wrap_native_text(orig_filter)
+
+ return self._filters.copy()
+
+ def _get_tests(self):
+ '''
+ Returns tests plugins, after loading and caching them if need be
+ '''
+
+ if self._tests is not None:
+ return self._tests.copy()
+
+ self._tests = dict()
+ for fp in self._test_loader.all():
+ self._tests.update(fp.tests())
+
+ return self._tests.copy()
+
+ def _get_extensions(self):
+ '''
+ Return jinja2 extensions to load.
+
+ If some extensions are set via jinja_extensions in ansible.cfg, we try
+ to load them with the jinja environment.
+ '''
+
+ jinja_exts = []
+ if C.DEFAULT_JINJA2_EXTENSIONS:
+ # make sure the configuration directive doesn't contain spaces
+ # and split extensions in an array
+ jinja_exts = C.DEFAULT_JINJA2_EXTENSIONS.replace(" ", "").split(',')
+
+ return jinja_exts
+
+ @property
+ def available_variables(self):
+ return self._available_variables
+
+ @available_variables.setter
+ def available_variables(self, variables):
+ '''
+ Sets the list of template variables this Templar instance will use
+ to template things, so we don't have to pass them around between
+ internal methods. We also clear the template cache here, as the variables
+ are being changed.
+ '''
+
+ if not isinstance(variables, Mapping):
+ raise AnsibleAssertionError("the type of 'variables' should be a Mapping but was a %s" % (type(variables)))
+ self._available_variables = variables
+ self._cached_result = {}
+
+ def set_available_variables(self, variables):
+ display.deprecated(
+ 'set_available_variables is being deprecated. Use "@available_variables.setter" instead.',
+ version='2.13', collection_name='ansible.builtin'
+ )
+ self.available_variables = variables
+
+ @contextmanager
+ def set_temporary_context(self, **kwargs):
+ """Context manager used to set temporary templating context, without having to worry about resetting
+ original values afterward
+
+ Use a keyword that maps to the attr you are setting. Applies to ``self.environment`` by default, to
+ set context on another object, it must be in ``mapping``.
+ """
+ mapping = {
+ 'available_variables': self,
+ 'searchpath': self.environment.loader,
+ }
+ original = {}
+
+ for key, value in kwargs.items():
+ obj = mapping.get(key, self.environment)
+ try:
+ original[key] = getattr(obj, key)
+ if value is not None:
+ setattr(obj, key, value)
+ except AttributeError:
+ # Ignore invalid attrs, lstrip_blocks was added in jinja2==2.7
+ pass
+
+ yield
+
+ for key in original:
+ obj = mapping.get(key, self.environment)
+ setattr(obj, key, original[key])
+
+ def template(self, variable, convert_bare=False, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None,
+ convert_data=True, static_vars=None, cache=True, disable_lookups=False):
+ '''
+ Templates (possibly recursively) any given data as input. If convert_bare is
+ set to True, the given data will be wrapped as a jinja2 variable ('{{foo}}')
+ before being sent through the template engine.
+ '''
+ static_vars = [''] if static_vars is None else static_vars
+
+ # Don't template unsafe variables, just return them.
+ if hasattr(variable, '__UNSAFE__'):
+ return variable
+
+ if fail_on_undefined is None:
+ fail_on_undefined = self._fail_on_undefined_errors
+
+ try:
+ if convert_bare:
+ variable = self._convert_bare_variable(variable)
+
+ if isinstance(variable, string_types):
+ result = variable
+
+ if self.is_possibly_template(variable):
+ # Check to see if the string we are trying to render is just referencing a single
+ # var. In this case we don't want to accidentally change the type of the variable
+ # to a string by using the jinja template renderer. We just want to pass it.
+ only_one = self.SINGLE_VAR.match(variable)
+ if only_one:
+ var_name = only_one.group(1)
+ if var_name in self._available_variables:
+ resolved_val = self._available_variables[var_name]
+ if isinstance(resolved_val, NON_TEMPLATED_TYPES):
+ return resolved_val
+ elif resolved_val is None:
+ return C.DEFAULT_NULL_REPRESENTATION
+
+ # Using a cache in order to prevent template calls with already templated variables
+ sha1_hash = None
+ if cache:
+ variable_hash = sha1(text_type(variable).encode('utf-8'))
+ options_hash = sha1(
+ (
+ text_type(preserve_trailing_newlines) +
+ text_type(escape_backslashes) +
+ text_type(fail_on_undefined) +
+ text_type(overrides)
+ ).encode('utf-8')
+ )
+ sha1_hash = variable_hash.hexdigest() + options_hash.hexdigest()
+ if cache and sha1_hash in self._cached_result:
+ result = self._cached_result[sha1_hash]
+ else:
+ result = self.do_template(
+ variable,
+ preserve_trailing_newlines=preserve_trailing_newlines,
+ escape_backslashes=escape_backslashes,
+ fail_on_undefined=fail_on_undefined,
+ overrides=overrides,
+ disable_lookups=disable_lookups,
+ )
+
+ if not USE_JINJA2_NATIVE:
+ unsafe = hasattr(result, '__UNSAFE__')
+ if convert_data and not self._no_type_regex.match(variable):
+ # if this looks like a dictionary or list, convert it to such using the safe_eval method
+ if (result.startswith("{") and not result.startswith(self.environment.variable_start_string)) or \
+ result.startswith("[") or result in ("True", "False"):
+ eval_results = safe_eval(result, include_exceptions=True)
+ if eval_results[1] is None:
+ result = eval_results[0]
+ if unsafe:
+ result = wrap_var(result)
+ else:
+ # FIXME: if the safe_eval raised an error, should we do something with it?
+ pass
+
+ # we only cache in the case where we have a single variable
+ # name, to make sure we're not putting things which may otherwise
+ # be dynamic in the cache (filters, lookups, etc.)
+ if cache and only_one:
+ self._cached_result[sha1_hash] = result
+
+ return result
+
+ elif is_sequence(variable):
+ return [self.template(
+ v,
+ preserve_trailing_newlines=preserve_trailing_newlines,
+ fail_on_undefined=fail_on_undefined,
+ overrides=overrides,
+ disable_lookups=disable_lookups,
+ ) for v in variable]
+ elif isinstance(variable, Mapping):
+ d = {}
+ # we don't use iteritems() here to avoid problems if the underlying dict
+ # changes sizes due to the templating, which can happen with hostvars
+ for k in variable.keys():
+ if k not in static_vars:
+ d[k] = self.template(
+ variable[k],
+ preserve_trailing_newlines=preserve_trailing_newlines,
+ fail_on_undefined=fail_on_undefined,
+ overrides=overrides,
+ disable_lookups=disable_lookups,
+ )
+ else:
+ d[k] = variable[k]
+ return d
+ else:
+ return variable
+
+ except AnsibleFilterError:
+ if self._fail_on_filter_errors:
+ raise
+ else:
+ return variable
+
+ def is_template(self, data):
+ '''lets us know if data has a template'''
+ if isinstance(data, string_types):
+ return is_template(data, self.environment)
+ elif isinstance(data, (list, tuple)):
+ for v in data:
+ if self.is_template(v):
+ return True
+ elif isinstance(data, dict):
+ for k in data:
+ if self.is_template(k) or self.is_template(data[k]):
+ return True
+ return False
+
+ templatable = is_template
+
+ def is_possibly_template(self, data):
+ '''Determines if a string looks like a template, by seeing if it
+ contains a jinja2 start delimiter. Does not guarantee that the string
+ is actually a template.
+
+ This is different than ``is_template`` which is more strict.
+ This method may return ``True`` on a string that is not templatable.
+
+ Useful when guarding passing a string for templating, but when
+ you want to allow the templating engine to make the final
+ assessment which may result in ``TemplateSyntaxError``.
+ '''
+ env = self.environment
+ if isinstance(data, string_types):
+ for marker in (env.block_start_string, env.variable_start_string, env.comment_start_string):
+ if marker in data:
+ return True
+ return False
+
+ def _convert_bare_variable(self, variable):
+ '''
+ Wraps a bare string, which may have an attribute portion (ie. foo.bar)
+ in jinja2 variable braces so that it is evaluated properly.
+ '''
+
+ if isinstance(variable, string_types):
+ contains_filters = "|" in variable
+ first_part = variable.split("|")[0].split(".")[0].split("[")[0]
+ if (contains_filters or first_part in self._available_variables) and self.environment.variable_start_string not in variable:
+ return "%s%s%s" % (self.environment.variable_start_string, variable, self.environment.variable_end_string)
+
+ # the variable didn't meet the conditions to be converted,
+ # so just return it as-is
+ return variable
+
+ def _finalize(self, thing):
+ '''
+ A custom finalize method for jinja2, which prevents None from being returned. This
+ avoids a string of ``"None"`` as ``None`` has no importance in YAML.
+
+ If using ANSIBLE_JINJA2_NATIVE we bypass this and return the actual value always
+ '''
+ if _is_rolled(thing):
+ # Auto unroll a generator, so that users are not required to
+ # explicitly use ``|list`` to unroll
+ # This only affects the scenario where the final result of templating
+ # is a generator, and not where a filter creates a generator in the middle
+ # of a template. See ``_unroll_iterator`` for the other case. This is probably
+ # unncessary
+ return list(thing)
+
+ if USE_JINJA2_NATIVE:
+ return thing
+
+ return thing if thing is not None else ''
+
+ def _fail_lookup(self, name, *args, **kwargs):
+ raise AnsibleError("The lookup `%s` was found, however lookups were disabled from templating" % name)
+
+ def _now_datetime(self, utc=False, fmt=None):
+ '''jinja2 global function to return current datetime, potentially formatted via strftime'''
+ if utc:
+ now = datetime.datetime.utcnow()
+ else:
+ now = datetime.datetime.now()
+
+ if fmt:
+ return now.strftime(fmt)
+
+ return now
+
+ def _query_lookup(self, name, *args, **kwargs):
+ ''' wrapper for lookup, force wantlist true'''
+ kwargs['wantlist'] = True
+ return self._lookup(name, *args, **kwargs)
+
+ def _lookup(self, name, *args, **kwargs):
+ instance = self._lookup_loader.get(name, loader=self._loader, templar=self)
+
+ if instance is not None:
+ wantlist = kwargs.pop('wantlist', False)
+ allow_unsafe = kwargs.pop('allow_unsafe', C.DEFAULT_ALLOW_UNSAFE_LOOKUPS)
+ errors = kwargs.pop('errors', 'strict')
+
+ from ansible.utils.listify import listify_lookup_plugin_terms
+ loop_terms = listify_lookup_plugin_terms(terms=args, templar=self, loader=self._loader, fail_on_undefined=True, convert_bare=False)
+ # safely catch run failures per #5059
+ try:
+ ran = instance.run(loop_terms, variables=self._available_variables, **kwargs)
+ except (AnsibleUndefinedVariable, UndefinedError) as e:
+ raise AnsibleUndefinedVariable(e)
+ except Exception as e:
+ if self._fail_on_lookup_errors:
+ msg = u"An unhandled exception occurred while running the lookup plugin '%s'. Error was a %s, original message: %s" % \
+ (name, type(e), to_text(e))
+ if errors == 'warn':
+ display.warning(msg)
+ elif errors == 'ignore':
+ display.display(msg, log_only=True)
+ else:
+ raise AnsibleError(to_native(msg))
+ ran = [] if wantlist else None
+
+ if ran and not allow_unsafe:
+ if wantlist:
+ ran = wrap_var(ran)
+ else:
+ try:
+ ran = wrap_var(",".join(ran))
+ except TypeError:
+ # Lookup Plugins should always return lists. Throw an error if that's not
+ # the case:
+ if not isinstance(ran, Sequence):
+ raise AnsibleError("The lookup plugin '%s' did not return a list."
+ % name)
+
+ # The TypeError we can recover from is when the value *inside* of the list
+ # is not a string
+ if len(ran) == 1:
+ ran = wrap_var(ran[0])
+ else:
+ ran = wrap_var(ran)
+
+ if self.cur_context:
+ self.cur_context.unsafe = True
+ return ran
+ else:
+ raise AnsibleError("lookup plugin (%s) not found" % name)
+
+ def do_template(self, data, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None, disable_lookups=False):
+ if USE_JINJA2_NATIVE and not isinstance(data, string_types):
+ return data
+
+ # For preserving the number of input newlines in the output (used
+ # later in this method)
+ data_newlines = _count_newlines_from_end(data)
+
+ if fail_on_undefined is None:
+ fail_on_undefined = self._fail_on_undefined_errors
+
+ try:
+ # allows template header overrides to change jinja2 options.
+ if overrides is None:
+ myenv = self.environment.overlay()
+ else:
+ myenv = self.environment.overlay(overrides)
+
+ # Get jinja env overrides from template
+ if hasattr(data, 'startswith') and data.startswith(JINJA2_OVERRIDE):
+ eol = data.find('\n')
+ line = data[len(JINJA2_OVERRIDE):eol]
+ data = data[eol + 1:]
+ for pair in line.split(','):
+ (key, val) = pair.split(':')
+ key = key.strip()
+ setattr(myenv, key, ast.literal_eval(val.strip()))
+
+ # Adds Ansible custom filters and tests
+ myenv.filters.update(self._get_filters())
+ for k in myenv.filters:
+ if not getattr(myenv.filters[k], '__UNROLLED__', False):
+ myenv.filters[k] = _unroll_iterator(myenv.filters[k])
+ myenv.tests.update(self._get_tests())
+
+ if escape_backslashes:
+ # Allow users to specify backslashes in playbooks as "\\" instead of as "\\\\".
+ data = _escape_backslashes(data, myenv)
+
+ try:
+ t = myenv.from_string(data)
+ except TemplateSyntaxError as e:
+ raise AnsibleError("template error while templating string: %s. String: %s" % (to_native(e), to_native(data)))
+ except Exception as e:
+ if 'recursion' in to_native(e):
+ raise AnsibleError("recursive loop detected in template string: %s" % to_native(data))
+ else:
+ return data
+
+ if disable_lookups:
+ t.globals['query'] = t.globals['q'] = t.globals['lookup'] = self._fail_lookup
+
+ jvars = AnsibleJ2Vars(self, t.globals)
+
+ self.cur_context = new_context = t.new_context(jvars, shared=True)
+ rf = t.root_render_func(new_context)
+
+ try:
+ res = j2_concat(rf)
+ if getattr(new_context, 'unsafe', False):
+ res = wrap_var(res)
+ except TypeError as te:
+ if 'AnsibleUndefined' in to_native(te):
+ errmsg = "Unable to look up a name or access an attribute in template string (%s).\n" % to_native(data)
+ errmsg += "Make sure your variable name does not contain invalid characters like '-': %s" % to_native(te)
+ raise AnsibleUndefinedVariable(errmsg)
+ else:
+ display.debug("failing because of a type error, template data is: %s" % to_text(data))
+ raise AnsibleError("Unexpected templating type error occurred on (%s): %s" % (to_native(data), to_native(te)))
+
+ if USE_JINJA2_NATIVE and not isinstance(res, string_types):
+ return res
+
+ if preserve_trailing_newlines:
+ # The low level calls above do not preserve the newline
+ # characters at the end of the input data, so we use the
+ # calculate the difference in newlines and append them
+ # to the resulting output for parity
+ #
+ # jinja2 added a keep_trailing_newline option in 2.7 when
+ # creating an Environment. That would let us make this code
+ # better (remove a single newline if
+ # preserve_trailing_newlines is False). Once we can depend on
+ # that version being present, modify our code to set that when
+ # initializing self.environment and remove a single trailing
+ # newline here if preserve_newlines is False.
+ res_newlines = _count_newlines_from_end(res)
+ if data_newlines > res_newlines:
+ res += self.environment.newline_sequence * (data_newlines - res_newlines)
+ return res
+ except (UndefinedError, AnsibleUndefinedVariable) as e:
+ if fail_on_undefined:
+ raise AnsibleUndefinedVariable(e)
+ else:
+ display.debug("Ignoring undefined failure: %s" % to_text(e))
+ return data
+
+ # for backwards compatibility in case anyone is using old private method directly
+ _do_template = do_template
diff --git a/lib/ansible/template/native_helpers.py b/lib/ansible/template/native_helpers.py
new file mode 100644
index 00000000..81bef436
--- /dev/null
+++ b/lib/ansible/template/native_helpers.py
@@ -0,0 +1,91 @@
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ast import literal_eval
+from itertools import islice, chain
+import types
+
+from jinja2.runtime import StrictUndefined
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common.collections import is_sequence, Mapping
+from ansible.module_utils.common.text.converters import container_to_text
+from ansible.module_utils.six import PY2, text_type
+from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
+
+
+class NativeJinjaText(text_type):
+ pass
+
+
+def _fail_on_undefined(data):
+ """Recursively find an undefined value in a nested data structure
+ and properly raise the undefined exception.
+ """
+ if isinstance(data, Mapping):
+ for value in data.values():
+ _fail_on_undefined(value)
+ elif is_sequence(data):
+ for item in data:
+ _fail_on_undefined(item)
+ else:
+ if isinstance(data, StrictUndefined):
+ # To actually raise the undefined exception we need to
+ # access the undefined object otherwise the exception would
+ # be raised on the next access which might not be properly
+ # handled.
+ # See https://github.com/ansible/ansible/issues/52158
+ # and StrictUndefined implementation in upstream Jinja2.
+ str(data)
+
+ return data
+
+
+def ansible_native_concat(nodes):
+ """Return a native Python type from the list of compiled nodes. If the
+ result is a single node, its value is returned. Otherwise, the nodes are
+ concatenated as strings. If the result can be parsed with
+ :func:`ast.literal_eval`, the parsed value is returned. Otherwise, the
+ string is returned.
+
+ https://github.com/pallets/jinja/blob/master/src/jinja2/nativetypes.py
+ """
+ head = list(islice(nodes, 2))
+
+ if not head:
+ return None
+
+ if len(head) == 1:
+ out = _fail_on_undefined(head[0])
+
+ # TODO send unvaulted data to literal_eval?
+ if isinstance(out, AnsibleVaultEncryptedUnicode):
+ return out.data
+
+ if isinstance(out, NativeJinjaText):
+ # Sometimes (e.g. ``| string``) we need to mark variables
+ # in a special way so that they remain strings and are not
+ # passed into literal_eval.
+ # See:
+ # https://github.com/ansible/ansible/issues/70831
+ # https://github.com/pallets/jinja/issues/1200
+ # https://github.com/ansible/ansible/issues/70831#issuecomment-664190894
+ return out
+ else:
+ if isinstance(nodes, types.GeneratorType):
+ nodes = chain(head, nodes)
+ out = u''.join([to_text(_fail_on_undefined(v)) for v in nodes])
+
+ try:
+ out = literal_eval(out)
+ if PY2:
+ # ensure bytes are not returned back into Ansible from templating
+ out = container_to_text(out)
+ return out
+ except (ValueError, SyntaxError, MemoryError):
+ return out
diff --git a/lib/ansible/template/safe_eval.py b/lib/ansible/template/safe_eval.py
new file mode 100644
index 00000000..43ce250c
--- /dev/null
+++ b/lib/ansible/template/safe_eval.py
@@ -0,0 +1,166 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ast
+import sys
+
+from ansible import constants as C
+from ansible.module_utils.common.text.converters import container_to_text, to_native
+from ansible.module_utils.six import string_types, PY2
+from ansible.module_utils.six.moves import builtins
+from ansible.plugins.loader import filter_loader, test_loader
+
+
+def safe_eval(expr, locals=None, include_exceptions=False):
+ '''
+ This is intended for allowing things like:
+ with_items: a_list_variable
+
+ Where Jinja2 would return a string but we do not want to allow it to
+ call functions (outside of Jinja2, where the env is constrained).
+
+ Based on:
+ http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe
+ '''
+ locals = {} if locals is None else locals
+
+ # define certain JSON types
+ # eg. JSON booleans are unknown to python eval()
+ OUR_GLOBALS = {
+ '__builtins__': {}, # avoid global builtins as per eval docs
+ 'false': False,
+ 'null': None,
+ 'true': True,
+ # also add back some builtins we do need
+ 'True': True,
+ 'False': False,
+ 'None': None
+ }
+
+ # this is the whitelist of AST nodes we are going to
+ # allow in the evaluation. Any node type other than
+ # those listed here will raise an exception in our custom
+ # visitor class defined below.
+ SAFE_NODES = set(
+ (
+ ast.Add,
+ ast.BinOp,
+ # ast.Call,
+ ast.Compare,
+ ast.Dict,
+ ast.Div,
+ ast.Expression,
+ ast.List,
+ ast.Load,
+ ast.Mult,
+ ast.Num,
+ ast.Name,
+ ast.Str,
+ ast.Sub,
+ ast.USub,
+ ast.Tuple,
+ ast.UnaryOp,
+ )
+ )
+
+ # AST node types were expanded after 2.6
+ if sys.version_info[:2] >= (2, 7):
+ SAFE_NODES.update(
+ set(
+ (ast.Set,)
+ )
+ )
+
+ # And in Python 3.4 too
+ if sys.version_info[:2] >= (3, 4):
+ SAFE_NODES.update(
+ set(
+ (ast.NameConstant,)
+ )
+ )
+
+ # And in Python 3.6 too, although not encountered until Python 3.8, see https://bugs.python.org/issue32892
+ if sys.version_info[:2] >= (3, 6):
+ SAFE_NODES.update(
+ set(
+ (ast.Constant,)
+ )
+ )
+
+ filter_list = []
+ for filter_ in filter_loader.all():
+ filter_list.extend(filter_.filters().keys())
+
+ test_list = []
+ for test in test_loader.all():
+ test_list.extend(test.tests().keys())
+
+ CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list + test_list
+
+ class CleansingNodeVisitor(ast.NodeVisitor):
+ def generic_visit(self, node, inside_call=False):
+ if type(node) not in SAFE_NODES:
+ raise Exception("invalid expression (%s)" % expr)
+ elif isinstance(node, ast.Call):
+ inside_call = True
+ elif isinstance(node, ast.Name) and inside_call:
+ # Disallow calls to builtin functions that we have not vetted
+ # as safe. Other functions are excluded by setting locals in
+ # the call to eval() later on
+ if hasattr(builtins, node.id) and node.id not in CALL_WHITELIST:
+ raise Exception("invalid function: %s" % node.id)
+ # iterate over all child nodes
+ for child_node in ast.iter_child_nodes(node):
+ self.generic_visit(child_node, inside_call)
+
+ if not isinstance(expr, string_types):
+ # already templated to a datastructure, perhaps?
+ if include_exceptions:
+ return (expr, None)
+ return expr
+
+ cnv = CleansingNodeVisitor()
+ try:
+ parsed_tree = ast.parse(expr, mode='eval')
+ cnv.visit(parsed_tree)
+ compiled = compile(parsed_tree, to_native(expr), 'eval')
+ # Note: passing our own globals and locals here constrains what
+ # callables (and other identifiers) are recognized. this is in
+ # addition to the filtering of builtins done in CleansingNodeVisitor
+ result = eval(compiled, OUR_GLOBALS, dict(locals))
+ if PY2:
+ # On Python 2 u"{'key': 'value'}" is evaluated to {'key': 'value'},
+ # ensure it is converted to {u'key': u'value'}.
+ result = container_to_text(result)
+
+ if include_exceptions:
+ return (result, None)
+ else:
+ return result
+ except SyntaxError as e:
+ # special handling for syntax errors, we just return
+ # the expression string back as-is to support late evaluation
+ if include_exceptions:
+ return (expr, None)
+ return expr
+ except Exception as e:
+ if include_exceptions:
+ return (expr, e)
+ return expr
diff --git a/lib/ansible/template/template.py b/lib/ansible/template/template.py
new file mode 100644
index 00000000..5a555883
--- /dev/null
+++ b/lib/ansible/template/template.py
@@ -0,0 +1,43 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import jinja2
+
+__all__ = ['AnsibleJ2Template']
+
+
+class AnsibleJ2Template(jinja2.environment.Template):
+ '''
+ A helper class, which prevents Jinja2 from running AnsibleJ2Vars through dict().
+ Without this, {% include %} and similar will create new contexts unlike the special
+ one created in Templar.template. This ensures they are all alike, except for
+ potential locals.
+ '''
+
+ def new_context(self, vars=None, shared=False, locals=None):
+ if vars is not None:
+ if isinstance(vars, dict):
+ vars = vars.copy()
+ if locals is not None:
+ vars.update(locals)
+ else:
+ vars = vars.add_locals(locals)
+ return self.environment.context_class(self.environment, vars, self.name, self.blocks)
diff --git a/lib/ansible/template/vars.py b/lib/ansible/template/vars.py
new file mode 100644
index 00000000..464deecf
--- /dev/null
+++ b/lib/ansible/template/vars.py
@@ -0,0 +1,130 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from jinja2.utils import missing
+
+from ansible.errors import AnsibleError, AnsibleUndefinedVariable
+from ansible.module_utils.six import iteritems
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common._collections_compat import Mapping
+
+
+__all__ = ['AnsibleJ2Vars']
+
+
+class AnsibleJ2Vars(Mapping):
+ '''
+ Helper class to template all variable content before jinja2 sees it. This is
+ done by hijacking the variable storage that jinja2 uses, and overriding __contains__
+ and __getitem__ to look like a dict. Added bonus is avoiding duplicating the large
+ hashes that inject tends to be.
+
+ To facilitate using builtin jinja2 things like range, globals are also handled here.
+ '''
+
+ def __init__(self, templar, globals, locals=None, *extras):
+ '''
+ Initializes this object with a valid Templar() object, as
+ well as several dictionaries of variables representing
+ different scopes (in jinja2 terminology).
+ '''
+
+ self._templar = templar
+ self._globals = globals
+ self._extras = extras
+ self._locals = dict()
+ if isinstance(locals, dict):
+ for key, val in iteritems(locals):
+ if val is not missing:
+ if key[:2] == 'l_':
+ self._locals[key[2:]] = val
+ elif key not in ('context', 'environment', 'template'):
+ self._locals[key] = val
+
+ def __contains__(self, k):
+ if k in self._templar.available_variables:
+ return True
+ if k in self._locals:
+ return True
+ for i in self._extras:
+ if k in i:
+ return True
+ if k in self._globals:
+ return True
+ return False
+
+ def __iter__(self):
+ keys = set()
+ keys.update(self._templar.available_variables, self._locals, self._globals, *self._extras)
+ return iter(keys)
+
+ def __len__(self):
+ keys = set()
+ keys.update(self._templar.available_variables, self._locals, self._globals, *self._extras)
+ return len(keys)
+
+ def __getitem__(self, varname):
+ if varname not in self._templar.available_variables:
+ if varname in self._locals:
+ return self._locals[varname]
+ for i in self._extras:
+ if varname in i:
+ return i[varname]
+ if varname in self._globals:
+ return self._globals[varname]
+ else:
+ raise KeyError("undefined variable: %s" % varname)
+
+ variable = self._templar.available_variables[varname]
+
+ # HostVars is special, return it as-is, as is the special variable
+ # 'vars', which contains the vars structure
+ from ansible.vars.hostvars import HostVars
+ if isinstance(variable, dict) and varname == "vars" or isinstance(variable, HostVars) or hasattr(variable, '__UNSAFE__'):
+ return variable
+ else:
+ value = None
+ try:
+ value = self._templar.template(variable)
+ except AnsibleUndefinedVariable as e:
+ raise AnsibleUndefinedVariable("%s: %s" % (to_native(variable), e.message))
+ except Exception as e:
+ msg = getattr(e, 'message', None) or to_native(e)
+ raise AnsibleError("An unhandled exception occurred while templating '%s'. "
+ "Error was a %s, original message: %s" % (to_native(variable), type(e), msg))
+
+ return value
+
+ def add_locals(self, locals):
+ '''
+ If locals are provided, create a copy of self containing those
+ locals in addition to what is already in this variable proxy.
+ '''
+ if locals is None:
+ return self
+
+ # FIXME run this only on jinja2>=2.9?
+ # prior to version 2.9, locals contained all of the vars and not just the current
+ # local vars so this was not necessary for locals to propagate down to nested includes
+ new_locals = self._locals.copy()
+ new_locals.update(locals)
+
+ return AnsibleJ2Vars(self._templar, self._globals, locals=new_locals, *self._extras)
diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py
new file mode 100644
index 00000000..ae8ccff5
--- /dev/null
+++ b/lib/ansible/utils/__init__.py
@@ -0,0 +1,20 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
diff --git a/lib/ansible/utils/cmd_functions.py b/lib/ansible/utils/cmd_functions.py
new file mode 100644
index 00000000..7a0fb23e
--- /dev/null
+++ b/lib/ansible/utils/cmd_functions.py
@@ -0,0 +1,82 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import select
+import shlex
+import subprocess
+import sys
+
+from ansible.module_utils.six import PY2, PY3
+from ansible.module_utils._text import to_bytes
+
+
+def run_cmd(cmd, live=False, readsize=10):
+
+ # readsize = 10
+
+ # On python2, shlex needs byte strings
+ if PY2:
+ cmd = to_bytes(cmd, errors='surrogate_or_strict')
+ cmdargs = shlex.split(cmd)
+
+ # subprocess should be passed byte strings. (on python2.6 it must be
+ # passed byte strtings)
+ cmdargs = [to_bytes(a, errors='surrogate_or_strict') for a in cmdargs]
+
+ p = subprocess.Popen(cmdargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ stdout = b''
+ stderr = b''
+ rpipes = [p.stdout, p.stderr]
+ while True:
+ rfd, wfd, efd = select.select(rpipes, [], rpipes, 1)
+
+ if p.stdout in rfd:
+ dat = os.read(p.stdout.fileno(), readsize)
+ if live:
+ # On python3, stdout has a codec to go from text type to bytes
+ if PY3:
+ sys.stdout.buffer.write(dat)
+ else:
+ sys.stdout.write(dat)
+ stdout += dat
+ if dat == b'':
+ rpipes.remove(p.stdout)
+ if p.stderr in rfd:
+ dat = os.read(p.stderr.fileno(), readsize)
+ stderr += dat
+ if live:
+ # On python3, stdout has a codec to go from text type to bytes
+ if PY3:
+ sys.stdout.buffer.write(dat)
+ else:
+ sys.stdout.write(dat)
+ if dat == b'':
+ rpipes.remove(p.stderr)
+ # only break out if we've emptied the pipes, or there is nothing to
+ # read from and the process has finished.
+ if (not rpipes or not rfd) and p.poll() is not None:
+ break
+ # Calling wait while there are still pipes to read can cause a lock
+ elif not rpipes and p.poll() is None:
+ p.wait()
+
+ return p.returncode, stdout, stderr
diff --git a/lib/ansible/utils/collection_loader/__init__.py b/lib/ansible/utils/collection_loader/__init__.py
new file mode 100644
index 00000000..a81f5039
--- /dev/null
+++ b/lib/ansible/utils/collection_loader/__init__.py
@@ -0,0 +1,23 @@
+# (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# FIXME: decide what of this we want to actually be public/toplevel, put other stuff on a utility class?
+from ._collection_config import AnsibleCollectionConfig
+from ._collection_finder import AnsibleCollectionRef
+from ansible.module_utils.common.text.converters import to_text
+
+
+def resource_from_fqcr(ref):
+ """
+ Return resource from a fully-qualified collection reference,
+ or from a simple resource name.
+ For fully-qualified collection references, this is equivalent to
+ ``AnsibleCollectionRef.from_fqcr(ref).resource``.
+ :param ref: collection reference to parse
+ :return: the resource as a unicode string
+ """
+ ref = to_text(ref, errors='strict')
+ return ref.split(u'.')[-1]
diff --git a/lib/ansible/utils/collection_loader/_collection_config.py b/lib/ansible/utils/collection_loader/_collection_config.py
new file mode 100644
index 00000000..e717cde9
--- /dev/null
+++ b/lib/ansible/utils/collection_loader/_collection_config.py
@@ -0,0 +1,101 @@
+# (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.common.text.converters import to_text
+from ansible.module_utils.six import with_metaclass
+
+
+class _EventSource:
+ def __init__(self):
+ self._handlers = set()
+
+ def __iadd__(self, handler):
+ if not callable(handler):
+ raise ValueError('handler must be callable')
+ self._handlers.add(handler)
+ return self
+
+ def __isub__(self, handler):
+ try:
+ self._handlers.remove(handler)
+ except KeyError:
+ pass
+
+ return self
+
+ def _on_exception(self, handler, exc, *args, **kwargs):
+ # if we return True, we want the caller to re-raise
+ return True
+
+ def fire(self, *args, **kwargs):
+ for h in self._handlers:
+ try:
+ h(*args, **kwargs)
+ except Exception as ex:
+ if self._on_exception(h, ex, *args, **kwargs):
+ raise
+
+
+class _AnsibleCollectionConfig(type):
+ def __init__(cls, meta, name, bases):
+ cls._collection_finder = None
+ cls._default_collection = None
+ cls._on_collection_load = _EventSource()
+
+ @property
+ def collection_finder(cls):
+ return cls._collection_finder
+
+ @collection_finder.setter
+ def collection_finder(cls, value):
+ if cls._collection_finder:
+ raise ValueError('an AnsibleCollectionFinder has already been configured')
+
+ cls._collection_finder = value
+
+ @property
+ def collection_paths(cls):
+ cls._require_finder()
+ return [to_text(p) for p in cls._collection_finder._n_collection_paths]
+
+ @property
+ def default_collection(cls):
+ return cls._default_collection
+
+ @default_collection.setter
+ def default_collection(cls, value):
+ if cls._default_collection:
+ raise ValueError('default collection {0} has already been configured'.format(value))
+
+ cls._default_collection = value
+
+ @property
+ def on_collection_load(cls):
+ return cls._on_collection_load
+
+ @on_collection_load.setter
+ def on_collection_load(cls, value):
+ if value is not cls._on_collection_load:
+ raise ValueError('on_collection_load is not directly settable (use +=)')
+
+ @property
+ def playbook_paths(cls):
+ cls._require_finder()
+ return [to_text(p) for p in cls._collection_finder._n_playbook_paths]
+
+ @playbook_paths.setter
+ def playbook_paths(cls, value):
+ cls._require_finder()
+ cls._collection_finder.set_playbook_paths(value)
+
+ def _require_finder(cls):
+ if not cls._collection_finder:
+ raise NotImplementedError('an AnsibleCollectionFinder has not been installed in this process')
+
+
+# concrete class of our metaclass type that defines the class properties we want
+class AnsibleCollectionConfig(with_metaclass(_AnsibleCollectionConfig)):
+ pass
diff --git a/lib/ansible/utils/collection_loader/_collection_finder.py b/lib/ansible/utils/collection_loader/_collection_finder.py
new file mode 100644
index 00000000..99d5ddc9
--- /dev/null
+++ b/lib/ansible/utils/collection_loader/_collection_finder.py
@@ -0,0 +1,979 @@
+# (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import os.path
+import pkgutil
+import re
+import sys
+
+
+# DO NOT add new non-stdlib import deps here, this loader is used by external tools (eg ansible-test import sanity)
+# that only allow stdlib and module_utils
+from ansible.module_utils.common.text.converters import to_native, to_text, to_bytes
+from ansible.module_utils.six import string_types, PY3
+from ._collection_config import AnsibleCollectionConfig
+
+from contextlib import contextmanager
+from types import ModuleType
+
+try:
+ from importlib import import_module
+except ImportError:
+ def import_module(name):
+ __import__(name)
+ return sys.modules[name]
+
+try:
+ from importlib import reload as reload_module
+except ImportError:
+ # 2.7 has a global reload function instead...
+ reload_module = reload # pylint:disable=undefined-variable
+
+# NB: this supports import sanity test providing a different impl
+try:
+ from ._collection_meta import _meta_yml_to_dict
+except ImportError:
+ _meta_yml_to_dict = None
+
+
+class _AnsibleCollectionFinder:
+ def __init__(self, paths=None, scan_sys_paths=True):
+ # TODO: accept metadata loader override
+ self._ansible_pkg_path = to_native(os.path.dirname(to_bytes(sys.modules['ansible'].__file__)))
+
+ if isinstance(paths, string_types):
+ paths = [paths]
+ elif paths is None:
+ paths = []
+
+ # expand any placeholders in configured paths
+ paths = [os.path.expanduser(to_native(p, errors='surrogate_or_strict')) for p in paths]
+
+ if scan_sys_paths:
+ # append all sys.path entries with an ansible_collections package
+ for path in sys.path:
+ if (
+ path not in paths and
+ os.path.isdir(to_bytes(
+ os.path.join(path, 'ansible_collections'),
+ errors='surrogate_or_strict',
+ ))
+ ):
+ paths.append(path)
+
+ self._n_configured_paths = paths
+ self._n_cached_collection_paths = None
+ self._n_cached_collection_qualified_paths = None
+
+ self._n_playbook_paths = []
+
+ @classmethod
+ def _remove(cls):
+ for mps in sys.meta_path:
+ if isinstance(mps, _AnsibleCollectionFinder):
+ sys.meta_path.remove(mps)
+
+ # remove any path hooks that look like ours
+ for ph in sys.path_hooks:
+ if hasattr(ph, '__self__') and isinstance(ph.__self__, _AnsibleCollectionFinder):
+ sys.path_hooks.remove(ph)
+
+ # zap any cached path importer cache entries that might refer to us
+ sys.path_importer_cache.clear()
+
+ AnsibleCollectionConfig._collection_finder = None
+
+ # validate via the public property that we really killed it
+ if AnsibleCollectionConfig.collection_finder is not None:
+ raise AssertionError('_AnsibleCollectionFinder remove did not reset AnsibleCollectionConfig.collection_finder')
+
+ def _install(self):
+ self._remove()
+ sys.meta_path.insert(0, self)
+
+ sys.path_hooks.insert(0, self._ansible_collection_path_hook)
+
+ AnsibleCollectionConfig.collection_finder = self
+
+ def _ansible_collection_path_hook(self, path):
+ path = to_native(path)
+ interesting_paths = self._n_cached_collection_qualified_paths
+ if not interesting_paths:
+ interesting_paths = [os.path.join(p, 'ansible_collections') for p in
+ self._n_collection_paths]
+ interesting_paths.insert(0, self._ansible_pkg_path)
+ self._n_cached_collection_qualified_paths = interesting_paths
+
+ if any(path.startswith(p) for p in interesting_paths):
+ return _AnsiblePathHookFinder(self, path)
+
+ raise ImportError('not interested')
+
+ @property
+ def _n_collection_paths(self):
+ paths = self._n_cached_collection_paths
+ if not paths:
+ self._n_cached_collection_paths = paths = self._n_playbook_paths + self._n_configured_paths
+ return paths
+
+ def set_playbook_paths(self, playbook_paths):
+ if isinstance(playbook_paths, string_types):
+ playbook_paths = [playbook_paths]
+
+ # track visited paths; we have to preserve the dir order as-passed in case there are duplicate collections (first one wins)
+ added_paths = set()
+
+ # de-dupe
+ self._n_playbook_paths = [os.path.join(to_native(p), 'collections') for p in playbook_paths if not (p in added_paths or added_paths.add(p))]
+ self._n_cached_collection_paths = None
+ # HACK: playbook CLI sets this relatively late, so we've already loaded some packages whose paths might depend on this. Fix those up.
+ # NB: this should NOT be used for late additions; ideally we'd fix the playbook dir setup earlier in Ansible init
+ # to prevent this from occurring
+ for pkg in ['ansible_collections', 'ansible_collections.ansible']:
+ self._reload_hack(pkg)
+
+ def _reload_hack(self, fullname):
+ m = sys.modules.get(fullname)
+ if not m:
+ return
+ reload_module(m)
+
+ def find_module(self, fullname, path=None):
+ # Figure out what's being asked for, and delegate to a special-purpose loader
+
+ split_name = fullname.split('.')
+ toplevel_pkg = split_name[0]
+ module_to_find = split_name[-1]
+ part_count = len(split_name)
+
+ if toplevel_pkg not in ['ansible', 'ansible_collections']:
+ # not interested in anything other than ansible_collections (and limited cases under ansible)
+ return None
+
+ # sanity check what we're getting from import, canonicalize path values
+ if part_count == 1:
+ if path:
+ raise ValueError('path should not be specified for top-level packages (trying to find {0})'.format(fullname))
+ else:
+ # seed the path to the configured collection roots
+ path = self._n_collection_paths
+
+ if part_count > 1 and path is None:
+ raise ValueError('path must be specified for subpackages (trying to find {0})'.format(fullname))
+
+ # NB: actual "find"ing is delegated to the constructors on the various loaders; they'll ImportError if not found
+ try:
+ if toplevel_pkg == 'ansible':
+ # something under the ansible package, delegate to our internal loader in case of redirections
+ return _AnsibleInternalRedirectLoader(fullname=fullname, path_list=path)
+ if part_count == 1:
+ return _AnsibleCollectionRootPkgLoader(fullname=fullname, path_list=path)
+ if part_count == 2: # ns pkg eg, ansible_collections, ansible_collections.somens
+ return _AnsibleCollectionNSPkgLoader(fullname=fullname, path_list=path)
+ elif part_count == 3: # collection pkg eg, ansible_collections.somens.somecoll
+ return _AnsibleCollectionPkgLoader(fullname=fullname, path_list=path)
+ # anything below the collection
+ return _AnsibleCollectionLoader(fullname=fullname, path_list=path)
+ except ImportError:
+ # TODO: log attempt to load context
+ return None
+
+
+# Implements a path_hook finder for iter_modules (since it's only path based). This finder does not need to actually
+# function as a finder in most cases, since our meta_path finder is consulted first for *almost* everything, except
+# pkgutil.iter_modules, and under py2, pkgutil.get_data if the parent package passed has not been loaded yet.
+class _AnsiblePathHookFinder:
+ def __init__(self, collection_finder, pathctx):
+ # when called from a path_hook, find_module doesn't usually get the path arg, so this provides our context
+ self._pathctx = to_native(pathctx)
+ self._collection_finder = collection_finder
+ if PY3:
+ # cache the native FileFinder (take advantage of its filesystem cache for future find/load requests)
+ self._file_finder = None
+
+ # class init is fun- this method has a self arg that won't get used
+ def _get_filefinder_path_hook(self=None):
+ _file_finder_hook = None
+ if PY3:
+ # try to find the FileFinder hook to call for fallback path-based imports in Py3
+ _file_finder_hook = [ph for ph in sys.path_hooks if 'FileFinder' in repr(ph)]
+ if len(_file_finder_hook) != 1:
+ raise Exception('need exactly one FileFinder import hook (found {0})'.format(len(_file_finder_hook)))
+ _file_finder_hook = _file_finder_hook[0]
+
+ return _file_finder_hook
+
+ _filefinder_path_hook = _get_filefinder_path_hook()
+
+ def find_module(self, fullname, path=None):
+ # we ignore the passed in path here- use what we got from the path hook init
+ split_name = fullname.split('.')
+ toplevel_pkg = split_name[0]
+
+ if toplevel_pkg == 'ansible_collections':
+ # collections content? delegate to the collection finder
+ return self._collection_finder.find_module(fullname, path=[self._pathctx])
+ else:
+ # Something else; we'd normally restrict this to `ansible` descendent modules so that any weird loader
+ # behavior that arbitrary Python modules have can be serviced by those loaders. In some dev/test
+ # scenarios (eg a venv under a collection) our path_hook signs us up to load non-Ansible things, and
+ # it's too late by the time we've reached this point, but also too expensive for the path_hook to figure
+ # out what we *shouldn't* be loading with the limited info it has. So we'll just delegate to the
+ # normal path-based loader as best we can to service it. This also allows us to take advantage of Python's
+ # built-in FS caching and byte-compilation for most things.
+ if PY3:
+ # create or consult our cached file finder for this path
+ if not self._file_finder:
+ try:
+ self._file_finder = _AnsiblePathHookFinder._filefinder_path_hook(self._pathctx)
+ except ImportError:
+ # FUTURE: log at a high logging level? This is normal for things like python36.zip on the path, but
+ # might not be in some other situation...
+ return None
+
+ spec = self._file_finder.find_spec(fullname)
+ if not spec:
+ return None
+ return spec.loader
+ else:
+ # call py2's internal loader
+ return pkgutil.ImpImporter(self._pathctx).find_module(fullname)
+
+ def iter_modules(self, prefix):
+ # NB: this currently represents only what's on disk, and does not handle package redirection
+ return _iter_modules_impl([self._pathctx], prefix)
+
+ def __repr__(self):
+ return "{0}(path='{1}')".format(self.__class__.__name__, self._pathctx)
+
+
+class _AnsibleCollectionPkgLoaderBase:
+ _allows_package_code = False
+
+ def __init__(self, fullname, path_list=None):
+ self._fullname = fullname
+ self._redirect_module = None
+ self._split_name = fullname.split('.')
+ self._rpart_name = fullname.rpartition('.')
+ self._parent_package_name = self._rpart_name[0] # eg ansible_collections for ansible_collections.somens, '' for toplevel
+ self._package_to_load = self._rpart_name[2] # eg somens for ansible_collections.somens
+
+ self._source_code_path = None
+ self._decoded_source = None
+ self._compiled_code = None
+
+ self._validate_args()
+
+ self._candidate_paths = self._get_candidate_paths([to_native(p) for p in path_list])
+ self._subpackage_search_paths = self._get_subpackage_search_paths(self._candidate_paths)
+
+ self._validate_final()
+
+ # allow subclasses to validate args and sniff split values before we start digging around
+ def _validate_args(self):
+ if self._split_name[0] != 'ansible_collections':
+ raise ImportError('this loader can only load packages from the ansible_collections package, not {0}'.format(self._fullname))
+
+ # allow subclasses to customize candidate path filtering
+ def _get_candidate_paths(self, path_list):
+ return [os.path.join(p, self._package_to_load) for p in path_list]
+
+ # allow subclasses to customize finding paths
+ def _get_subpackage_search_paths(self, candidate_paths):
+ # filter candidate paths for existence (NB: silently ignoring package init code and same-named modules)
+ return [p for p in candidate_paths if os.path.isdir(to_bytes(p))]
+
+ # allow subclasses to customize state validation/manipulation before we return the loader instance
+ def _validate_final(self):
+ return
+
+ @staticmethod
+ @contextmanager
+ def _new_or_existing_module(name, **kwargs):
+ # handle all-or-nothing sys.modules creation/use-existing/delete-on-exception-if-created behavior
+ created_module = False
+ module = sys.modules.get(name)
+ try:
+ if not module:
+ module = ModuleType(name)
+ created_module = True
+ sys.modules[name] = module
+ # always override the values passed, except name (allow reference aliasing)
+ for attr, value in kwargs.items():
+ setattr(module, attr, value)
+ yield module
+ except Exception:
+ if created_module:
+ if sys.modules.get(name):
+ sys.modules.pop(name)
+ raise
+
+ # basic module/package location support
+ # NB: this does not support distributed packages!
+ @staticmethod
+ def _module_file_from_path(leaf_name, path):
+ has_code = True
+ package_path = os.path.join(to_native(path), to_native(leaf_name))
+ module_path = None
+
+ # if the submodule is a package, assemble valid submodule paths, but stop looking for a module
+ if os.path.isdir(to_bytes(package_path)):
+ # is there a package init?
+ module_path = os.path.join(package_path, '__init__.py')
+ if not os.path.isfile(to_bytes(module_path)):
+ module_path = os.path.join(package_path, '__synthetic__')
+ has_code = False
+ else:
+ module_path = package_path + '.py'
+ package_path = None
+ if not os.path.isfile(to_bytes(module_path)):
+ raise ImportError('{0} not found at {1}'.format(leaf_name, path))
+
+ return module_path, has_code, package_path
+
+ def load_module(self, fullname):
+ # short-circuit redirect; we've already imported the redirected module, so just alias it and return it
+ if self._redirect_module:
+ sys.modules[self._fullname] = self._redirect_module
+ return self._redirect_module
+
+ # we're actually loading a module/package
+ module_attrs = dict(
+ __loader__=self,
+ __file__=self.get_filename(fullname),
+ __package__=self._parent_package_name # sane default for non-packages
+ )
+
+ # eg, I am a package
+ if self._subpackage_search_paths is not None: # empty is legal
+ module_attrs['__path__'] = self._subpackage_search_paths
+ module_attrs['__package__'] = fullname # per PEP366
+
+ with self._new_or_existing_module(fullname, **module_attrs) as module:
+ # execute the module's code in its namespace
+ code_obj = self.get_code(fullname)
+ if code_obj is not None: # things like NS packages that can't have code on disk will return None
+ exec(code_obj, module.__dict__)
+
+ return module
+
+ def is_package(self, fullname):
+ if fullname != self._fullname:
+ raise ValueError('this loader cannot answer is_package for {0}, only {1}'.format(fullname, self._fullname))
+ return self._subpackage_search_paths is not None
+
+ def get_source(self, fullname):
+ if self._decoded_source:
+ return self._decoded_source
+ if fullname != self._fullname:
+ raise ValueError('this loader cannot load source for {0}, only {1}'.format(fullname, self._fullname))
+ if not self._source_code_path:
+ return None
+ # FIXME: what do we want encoding/newline requirements to be?
+ self._decoded_source = self.get_data(self._source_code_path)
+ return self._decoded_source
+
+ def get_data(self, path):
+ if not path:
+ raise ValueError('a path must be specified')
+
+ # TODO: ensure we're being asked for a path below something we own
+ # TODO: try to handle redirects internally?
+
+ if not path[0] == '/':
+ # relative to current package, search package paths if possible (this may not be necessary)
+ # candidate_paths = [os.path.join(ssp, path) for ssp in self._subpackage_search_paths]
+ raise ValueError('relative resource paths not supported')
+ else:
+ candidate_paths = [path]
+
+ for p in candidate_paths:
+ b_path = to_bytes(p)
+ if os.path.isfile(b_path):
+ with open(b_path, 'rb') as fd:
+ return fd.read()
+ # HACK: if caller asks for __init__.py and the parent dir exists, return empty string (this keep consistency
+ # with "collection subpackages don't require __init__.py" working everywhere with get_data
+ elif b_path.endswith(b'__init__.py') and os.path.isdir(os.path.dirname(b_path)):
+ return ''
+
+ return None
+
+ def _synthetic_filename(self, fullname):
+ return '<ansible_synthetic_collection_package>'
+
+ def get_filename(self, fullname):
+ if fullname != self._fullname:
+ raise ValueError('this loader cannot find files for {0}, only {1}'.format(fullname, self._fullname))
+
+ filename = self._source_code_path
+
+ if not filename and self.is_package(fullname):
+ if len(self._subpackage_search_paths) == 1:
+ filename = os.path.join(self._subpackage_search_paths[0], '__synthetic__')
+ else:
+ filename = self._synthetic_filename(fullname)
+
+ return filename
+
+ def get_code(self, fullname):
+ if self._compiled_code:
+ return self._compiled_code
+
+ # this may or may not be an actual filename, but it's the value we'll use for __file__
+ filename = self.get_filename(fullname)
+ if not filename:
+ filename = '<string>'
+
+ source_code = self.get_source(fullname)
+
+ # for things like synthetic modules that really have no source on disk, don't return a code object at all
+ # vs things like an empty package init (which has an empty string source on disk)
+ if source_code is None:
+ return None
+
+ self._compiled_code = compile(source=source_code, filename=filename, mode='exec', flags=0, dont_inherit=True)
+
+ return self._compiled_code
+
+ def iter_modules(self, prefix):
+ return _iter_modules_impl(self._subpackage_search_paths, prefix)
+
+ def __repr__(self):
+ return '{0}(path={1})'.format(self.__class__.__name__, self._subpackage_search_paths or self._source_code_path)
+
+
+class _AnsibleCollectionRootPkgLoader(_AnsibleCollectionPkgLoaderBase):
+ def _validate_args(self):
+ super(_AnsibleCollectionRootPkgLoader, self)._validate_args()
+ if len(self._split_name) != 1:
+ raise ImportError('this loader can only load the ansible_collections toplevel package, not {0}'.format(self._fullname))
+
+
+# Implements Ansible's custom namespace package support.
+# The ansible_collections package and one level down (collections namespaces) are Python namespace packages
+# that search across all configured collection roots. The collection package (two levels down) is the first one found
+# on the configured collection root path, and Python namespace package aggregation is not allowed at or below
+# the collection. Implements implicit package (package dir) support for both Py2/3. Package init code is ignored
+# by this loader.
+class _AnsibleCollectionNSPkgLoader(_AnsibleCollectionPkgLoaderBase):
+ def _validate_args(self):
+ super(_AnsibleCollectionNSPkgLoader, self)._validate_args()
+ if len(self._split_name) != 2:
+ raise ImportError('this loader can only load collections namespace packages, not {0}'.format(self._fullname))
+
+ def _validate_final(self):
+ # special-case the `ansible` namespace, since `ansible.builtin` is magical
+ if not self._subpackage_search_paths and self._package_to_load != 'ansible':
+ raise ImportError('no {0} found in {1}'.format(self._package_to_load, self._candidate_paths))
+
+
+# handles locating the actual collection package and associated metadata
+class _AnsibleCollectionPkgLoader(_AnsibleCollectionPkgLoaderBase):
+ def _validate_args(self):
+ super(_AnsibleCollectionPkgLoader, self)._validate_args()
+ if len(self._split_name) != 3:
+ raise ImportError('this loader can only load collection packages, not {0}'.format(self._fullname))
+
+ def _validate_final(self):
+ if self._split_name[1:3] == ['ansible', 'builtin']:
+ # we don't want to allow this one to have on-disk search capability
+ self._subpackage_search_paths = []
+ elif not self._subpackage_search_paths:
+ raise ImportError('no {0} found in {1}'.format(self._package_to_load, self._candidate_paths))
+ else:
+ # only search within the first collection we found
+ self._subpackage_search_paths = [self._subpackage_search_paths[0]]
+
+ def load_module(self, fullname):
+ if not _meta_yml_to_dict:
+ raise ValueError('ansible.utils.collection_loader._meta_yml_to_dict is not set')
+
+ module = super(_AnsibleCollectionPkgLoader, self).load_module(fullname)
+
+ module._collection_meta = {}
+ # TODO: load collection metadata, cache in __loader__ state
+
+ collection_name = '.'.join(self._split_name[1:3])
+
+ if collection_name == 'ansible.builtin':
+ # ansible.builtin is a synthetic collection, get its routing config from the Ansible distro
+ ansible_pkg_path = os.path.dirname(import_module('ansible').__file__)
+ metadata_path = os.path.join(ansible_pkg_path, 'config/ansible_builtin_runtime.yml')
+ with open(to_bytes(metadata_path), 'rb') as fd:
+ raw_routing = fd.read()
+ else:
+ b_routing_meta_path = to_bytes(os.path.join(module.__path__[0], 'meta/runtime.yml'))
+ if os.path.isfile(b_routing_meta_path):
+ with open(b_routing_meta_path, 'rb') as fd:
+ raw_routing = fd.read()
+ else:
+ raw_routing = ''
+ try:
+ if raw_routing:
+ routing_dict = _meta_yml_to_dict(raw_routing, (collection_name, 'runtime.yml'))
+ module._collection_meta = self._canonicalize_meta(routing_dict)
+ except Exception as ex:
+ raise ValueError('error parsing collection metadata: {0}'.format(to_native(ex)))
+
+ AnsibleCollectionConfig.on_collection_load.fire(collection_name=collection_name, collection_path=os.path.dirname(module.__file__))
+
+ return module
+
+ def _canonicalize_meta(self, meta_dict):
+ # TODO: rewrite import keys and all redirect targets that start with .. (current namespace) and . (current collection)
+ # OR we could do it all on the fly?
+ # if not meta_dict:
+ # return {}
+ #
+ # ns_name = '.'.join(self._split_name[0:2])
+ # collection_name = '.'.join(self._split_name[0:3])
+ #
+ # #
+ # for routing_type, routing_type_dict in iteritems(meta_dict.get('plugin_routing', {})):
+ # for plugin_key, plugin_dict in iteritems(routing_type_dict):
+ # redirect = plugin_dict.get('redirect', '')
+ # if redirect.startswith('..'):
+ # redirect = redirect[2:]
+
+ action_groups = meta_dict.pop('action_groups', {})
+ meta_dict['action_groups'] = {}
+ for group_name in action_groups:
+ for action_name in action_groups[group_name]:
+ if action_name in meta_dict['action_groups']:
+ meta_dict['action_groups'][action_name].append(group_name)
+ else:
+ meta_dict['action_groups'][action_name] = [group_name]
+
+ return meta_dict
+
+
+# loads everything under a collection, including handling redirections defined by the collection
+class _AnsibleCollectionLoader(_AnsibleCollectionPkgLoaderBase):
+ # HACK: stash this in a better place
+ _redirected_package_map = {}
+ _allows_package_code = True
+
+ def _validate_args(self):
+ super(_AnsibleCollectionLoader, self)._validate_args()
+ if len(self._split_name) < 4:
+ raise ValueError('this loader is only for sub-collection modules/packages, not {0}'.format(self._fullname))
+
+ def _get_candidate_paths(self, path_list):
+ if len(path_list) != 1 and self._split_name[1:3] != ['ansible', 'builtin']:
+ raise ValueError('this loader requires exactly one path to search')
+
+ return path_list
+
+ def _get_subpackage_search_paths(self, candidate_paths):
+ collection_name = '.'.join(self._split_name[1:3])
+ collection_meta = _get_collection_metadata(collection_name)
+
+ # check for explicit redirection, as well as ancestor package-level redirection (only load the actual code once!)
+ redirect = None
+ explicit_redirect = False
+
+ routing_entry = _nested_dict_get(collection_meta, ['import_redirection', self._fullname])
+ if routing_entry:
+ redirect = routing_entry.get('redirect')
+
+ if redirect:
+ explicit_redirect = True
+ else:
+ redirect = _get_ancestor_redirect(self._redirected_package_map, self._fullname)
+
+ # NB: package level redirection requires hooking all future imports beneath the redirected source package
+ # in order to ensure sanity on future relative imports. We always import everything under its "real" name,
+ # then add a sys.modules entry with the redirected name using the same module instance. If we naively imported
+ # the source for each redirection, most submodules would import OK, but we'd have N runtime copies of the module
+ # (one for each name), and relative imports that ascend above the redirected package would break (since they'd
+ # see the redirected ancestor package contents instead of the package where they actually live).
+ if redirect:
+ # FIXME: wrap this so we can be explicit about a failed redirection
+ self._redirect_module = import_module(redirect)
+ if explicit_redirect and hasattr(self._redirect_module, '__path__') and self._redirect_module.__path__:
+ # if the import target looks like a package, store its name so we can rewrite future descendent loads
+ self._redirected_package_map[self._fullname] = redirect
+
+ # if we redirected, don't do any further custom package logic
+ return None
+
+ # we're not doing a redirect- try to find what we need to actually load a module/package
+
+ # this will raise ImportError if we can't find the requested module/package at all
+ if not candidate_paths:
+ # noplace to look, just ImportError
+ raise ImportError('package has no paths')
+
+ found_path, has_code, package_path = self._module_file_from_path(self._package_to_load, candidate_paths[0])
+
+ # still here? we found something to load...
+ if has_code:
+ self._source_code_path = found_path
+
+ if package_path:
+ return [package_path] # always needs to be a list
+
+ return None
+
+
+# This loader only answers for intercepted Ansible Python modules. Normal imports will fail here and be picked up later
+# by our path_hook importer (which proxies the built-in import mechanisms, allowing normal caching etc to occur)
+class _AnsibleInternalRedirectLoader:
+ def __init__(self, fullname, path_list):
+ self._redirect = None
+
+ split_name = fullname.split('.')
+ toplevel_pkg = split_name[0]
+ module_to_load = split_name[-1]
+
+ if toplevel_pkg != 'ansible':
+ raise ImportError('not interested')
+
+ builtin_meta = _get_collection_metadata('ansible.builtin')
+
+ routing_entry = _nested_dict_get(builtin_meta, ['import_redirection', fullname])
+ if routing_entry:
+ self._redirect = routing_entry.get('redirect')
+
+ if not self._redirect:
+ raise ImportError('not redirected, go ask path_hook')
+
+ def load_module(self, fullname):
+ # since we're delegating to other loaders, this should only be called for internal redirects where we answered
+ # find_module with this loader, in which case we'll just directly import the redirection target, insert it into
+ # sys.modules under the name it was requested by, and return the original module.
+
+ # should never see this
+ if not self._redirect:
+ raise ValueError('no redirect found for {0}'.format(fullname))
+
+ # FIXME: smuggle redirection context, provide warning/error that we tried and failed to redirect
+ mod = import_module(self._redirect)
+ sys.modules[fullname] = mod
+ return mod
+
+
+class AnsibleCollectionRef:
+ # FUTURE: introspect plugin loaders to get these dynamically?
+ VALID_REF_TYPES = frozenset(to_text(r) for r in ['action', 'become', 'cache', 'callback', 'cliconf', 'connection',
+ 'doc_fragments', 'filter', 'httpapi', 'inventory', 'lookup',
+ 'module_utils', 'modules', 'netconf', 'role', 'shell', 'strategy',
+ 'terminal', 'test', 'vars'])
+
+ # FIXME: tighten this up to match Python identifier reqs, etc
+ VALID_COLLECTION_NAME_RE = re.compile(to_text(r'^(\w+)\.(\w+)$'))
+ VALID_SUBDIRS_RE = re.compile(to_text(r'^\w+(\.\w+)*$'))
+ VALID_FQCR_RE = re.compile(to_text(r'^\w+\.\w+\.\w+(\.\w+)*$')) # can have 0-N included subdirs as well
+
+ def __init__(self, collection_name, subdirs, resource, ref_type):
+ """
+ Create an AnsibleCollectionRef from components
+ :param collection_name: a collection name of the form 'namespace.collectionname'
+ :param subdirs: optional subdir segments to be appended below the plugin type (eg, 'subdir1.subdir2')
+ :param resource: the name of the resource being references (eg, 'mymodule', 'someaction', 'a_role')
+ :param ref_type: the type of the reference, eg 'module', 'role', 'doc_fragment'
+ """
+ collection_name = to_text(collection_name, errors='strict')
+ if subdirs is not None:
+ subdirs = to_text(subdirs, errors='strict')
+ resource = to_text(resource, errors='strict')
+ ref_type = to_text(ref_type, errors='strict')
+
+ if not self.is_valid_collection_name(collection_name):
+ raise ValueError('invalid collection name (must be of the form namespace.collection): {0}'.format(to_native(collection_name)))
+
+ if ref_type not in self.VALID_REF_TYPES:
+ raise ValueError('invalid collection ref_type: {0}'.format(ref_type))
+
+ self.collection = collection_name
+ if subdirs:
+ if not re.match(self.VALID_SUBDIRS_RE, subdirs):
+ raise ValueError('invalid subdirs entry: {0} (must be empty/None or of the form subdir1.subdir2)'.format(to_native(subdirs)))
+ self.subdirs = subdirs
+ else:
+ self.subdirs = u''
+
+ self.resource = resource
+ self.ref_type = ref_type
+
+ package_components = [u'ansible_collections', self.collection]
+ fqcr_components = [self.collection]
+
+ self.n_python_collection_package_name = to_native('.'.join(package_components))
+
+ if self.ref_type == u'role':
+ package_components.append(u'roles')
+ else:
+ # we assume it's a plugin
+ package_components += [u'plugins', self.ref_type]
+
+ if self.subdirs:
+ package_components.append(self.subdirs)
+ fqcr_components.append(self.subdirs)
+
+ if self.ref_type == u'role':
+ # roles are their own resource
+ package_components.append(self.resource)
+
+ fqcr_components.append(self.resource)
+
+ self.n_python_package_name = to_native('.'.join(package_components))
+ self._fqcr = u'.'.join(fqcr_components)
+
+ def __repr__(self):
+ return 'AnsibleCollectionRef(collection={0!r}, subdirs={1!r}, resource={2!r})'.format(self.collection, self.subdirs, self.resource)
+
+ @property
+ def fqcr(self):
+ return self._fqcr
+
+ @staticmethod
+ def from_fqcr(ref, ref_type):
+ """
+ Parse a string as a fully-qualified collection reference, raises ValueError if invalid
+ :param ref: collection reference to parse (a valid ref is of the form 'ns.coll.resource' or 'ns.coll.subdir1.subdir2.resource')
+ :param ref_type: the type of the reference, eg 'module', 'role', 'doc_fragment'
+ :return: a populated AnsibleCollectionRef object
+ """
+ # assuming the fq_name is of the form (ns).(coll).(optional_subdir_N).(resource_name),
+ # we split the resource name off the right, split ns and coll off the left, and we're left with any optional
+ # subdirs that need to be added back below the plugin-specific subdir we'll add. So:
+ # ns.coll.resource -> ansible_collections.ns.coll.plugins.(plugintype).resource
+ # ns.coll.subdir1.resource -> ansible_collections.ns.coll.plugins.subdir1.(plugintype).resource
+ # ns.coll.rolename -> ansible_collections.ns.coll.roles.rolename
+ if not AnsibleCollectionRef.is_valid_fqcr(ref):
+ raise ValueError('{0} is not a valid collection reference'.format(to_native(ref)))
+
+ ref = to_text(ref, errors='strict')
+ ref_type = to_text(ref_type, errors='strict')
+
+ resource_splitname = ref.rsplit(u'.', 1)
+ package_remnant = resource_splitname[0]
+ resource = resource_splitname[1]
+
+ # split the left two components of the collection package name off, anything remaining is plugin-type
+ # specific subdirs to be added back on below the plugin type
+ package_splitname = package_remnant.split(u'.', 2)
+ if len(package_splitname) == 3:
+ subdirs = package_splitname[2]
+ else:
+ subdirs = u''
+
+ collection_name = u'.'.join(package_splitname[0:2])
+
+ return AnsibleCollectionRef(collection_name, subdirs, resource, ref_type)
+
+ @staticmethod
+ def try_parse_fqcr(ref, ref_type):
+ """
+ Attempt to parse a string as a fully-qualified collection reference, returning None on failure (instead of raising an error)
+ :param ref: collection reference to parse (a valid ref is of the form 'ns.coll.resource' or 'ns.coll.subdir1.subdir2.resource')
+ :param ref_type: the type of the reference, eg 'module', 'role', 'doc_fragment'
+ :return: a populated AnsibleCollectionRef object on successful parsing, else None
+ """
+ try:
+ return AnsibleCollectionRef.from_fqcr(ref, ref_type)
+ except ValueError:
+ pass
+
+ @staticmethod
+ def legacy_plugin_dir_to_plugin_type(legacy_plugin_dir_name):
+ """
+ Utility method to convert from a PluginLoader dir name to a plugin ref_type
+ :param legacy_plugin_dir_name: PluginLoader dir name (eg, 'action_plugins', 'library')
+ :return: the corresponding plugin ref_type (eg, 'action', 'role')
+ """
+ legacy_plugin_dir_name = to_text(legacy_plugin_dir_name)
+
+ plugin_type = legacy_plugin_dir_name.replace(u'_plugins', u'')
+
+ if plugin_type == u'library':
+ plugin_type = u'modules'
+
+ if plugin_type not in AnsibleCollectionRef.VALID_REF_TYPES:
+ raise ValueError('{0} cannot be mapped to a valid collection ref type'.format(to_native(legacy_plugin_dir_name)))
+
+ return plugin_type
+
+ @staticmethod
+ def is_valid_fqcr(ref, ref_type=None):
+ """
+ Validates if is string is a well-formed fully-qualified collection reference (does not look up the collection itself)
+ :param ref: candidate collection reference to validate (a valid ref is of the form 'ns.coll.resource' or 'ns.coll.subdir1.subdir2.resource')
+ :param ref_type: optional reference type to enable deeper validation, eg 'module', 'role', 'doc_fragment'
+ :return: True if the collection ref passed is well-formed, False otherwise
+ """
+
+ ref = to_text(ref)
+
+ if not ref_type:
+ return bool(re.match(AnsibleCollectionRef.VALID_FQCR_RE, ref))
+
+ return bool(AnsibleCollectionRef.try_parse_fqcr(ref, ref_type))
+
+ @staticmethod
+ def is_valid_collection_name(collection_name):
+ """
+ Validates if the given string is a well-formed collection name (does not look up the collection itself)
+ :param collection_name: candidate collection name to validate (a valid name is of the form 'ns.collname')
+ :return: True if the collection name passed is well-formed, False otherwise
+ """
+
+ collection_name = to_text(collection_name)
+
+ return bool(re.match(AnsibleCollectionRef.VALID_COLLECTION_NAME_RE, collection_name))
+
+
+def _get_collection_role_path(role_name, collection_list=None):
+ acr = AnsibleCollectionRef.try_parse_fqcr(role_name, 'role')
+
+ if acr:
+ # looks like a valid qualified collection ref; skip the collection_list
+ collection_list = [acr.collection]
+ subdirs = acr.subdirs
+ resource = acr.resource
+ elif not collection_list:
+ return None # not a FQ role and no collection search list spec'd, nothing to do
+ else:
+ resource = role_name # treat as unqualified, loop through the collection search list to try and resolve
+ subdirs = ''
+
+ for collection_name in collection_list:
+ try:
+ acr = AnsibleCollectionRef(collection_name=collection_name, subdirs=subdirs, resource=resource, ref_type='role')
+ # FIXME: error handling/logging; need to catch any import failures and move along
+ pkg = import_module(acr.n_python_package_name)
+
+ if pkg is not None:
+ # the package is now loaded, get the collection's package and ask where it lives
+ path = os.path.dirname(to_bytes(sys.modules[acr.n_python_package_name].__file__, errors='surrogate_or_strict'))
+ return resource, to_text(path, errors='surrogate_or_strict'), collection_name
+
+ except IOError:
+ continue
+ except Exception as ex:
+ # FIXME: pick out typical import errors first, then error logging
+ continue
+
+ return None
+
+
+def _get_collection_name_from_path(path):
+ """
+ Return the containing collection name for a given path, or None if the path is not below a configured collection, or
+ the collection cannot be loaded (eg, the collection is masked by another of the same name higher in the configured
+ collection roots).
+ :param path: path to evaluate for collection containment
+ :return: collection name or None
+ """
+
+ # FIXME: mess with realpath canonicalization or not?
+ path = to_native(path)
+
+ path_parts = path.split('/')
+ if path_parts.count('ansible_collections') != 1:
+ return None
+
+ ac_pos = path_parts.index('ansible_collections')
+
+ # make sure it's followed by at least a namespace and collection name
+ if len(path_parts) < ac_pos + 3:
+ return None
+
+ candidate_collection_name = '.'.join(path_parts[ac_pos + 1:ac_pos + 3])
+
+ try:
+ # we've got a name for it, now see if the path prefix matches what the loader sees
+ imported_pkg_path = to_native(os.path.dirname(to_bytes(import_module('ansible_collections.' + candidate_collection_name).__file__)))
+ except ImportError:
+ return None
+
+ # reassemble the original path prefix up the collection name, and it should match what we just imported. If not
+ # this is probably a collection root that's not configured.
+
+ original_path_prefix = os.path.join('/', *path_parts[0:ac_pos + 3])
+
+ if original_path_prefix != imported_pkg_path:
+ return None
+
+ return candidate_collection_name
+
+
+def _get_import_redirect(collection_meta_dict, fullname):
+ if not collection_meta_dict:
+ return None
+
+ return _nested_dict_get(collection_meta_dict, ['import_redirection', fullname, 'redirect'])
+
+
+def _get_ancestor_redirect(redirected_package_map, fullname):
+ # walk the requested module's ancestor packages to see if any have been previously redirected
+ cur_pkg = fullname
+ while cur_pkg:
+ cur_pkg = cur_pkg.rpartition('.')[0]
+ ancestor_redirect = redirected_package_map.get(cur_pkg)
+ if ancestor_redirect:
+ # rewrite the prefix on fullname so we import the target first, then alias it
+ redirect = ancestor_redirect + fullname[len(cur_pkg):]
+ return redirect
+ return None
+
+
+def _nested_dict_get(root_dict, key_list):
+ cur_value = root_dict
+ for key in key_list:
+ cur_value = cur_value.get(key)
+ if not cur_value:
+ return None
+
+ return cur_value
+
+
+def _iter_modules_impl(paths, prefix=''):
+ # NB: this currently only iterates what's on disk- redirected modules are not considered
+ if not prefix:
+ prefix = ''
+ else:
+ prefix = to_native(prefix)
+ # yield (module_loader, name, ispkg) for each module/pkg under path
+ # TODO: implement ignore/silent catch for unreadable?
+ for b_path in map(to_bytes, paths):
+ if not os.path.isdir(b_path):
+ continue
+ for b_basename in sorted(os.listdir(b_path)):
+ b_candidate_module_path = os.path.join(b_path, b_basename)
+ if os.path.isdir(b_candidate_module_path):
+ # exclude things that obviously aren't Python package dirs
+ # FIXME: this dir is adjustable in py3.8+, check for it
+ if b'.' in b_basename or b_basename == b'__pycache__':
+ continue
+
+ # TODO: proper string handling?
+ yield prefix + to_native(b_basename), True
+ else:
+ # FIXME: match builtin ordering for package/dir/file, support compiled?
+ if b_basename.endswith(b'.py') and b_basename != b'__init__.py':
+ yield prefix + to_native(os.path.splitext(b_basename)[0]), False
+
+
+def _get_collection_metadata(collection_name):
+ collection_name = to_native(collection_name)
+ if not collection_name or not isinstance(collection_name, string_types) or len(collection_name.split('.')) != 2:
+ raise ValueError('collection_name must be a non-empty string of the form namespace.collection')
+
+ try:
+ collection_pkg = import_module('ansible_collections.' + collection_name)
+ except ImportError:
+ raise ValueError('unable to locate collection {0}'.format(collection_name))
+
+ _collection_meta = getattr(collection_pkg, '_collection_meta', None)
+
+ if _collection_meta is None:
+ raise ValueError('collection metadata was not loaded for collection {0}'.format(collection_name))
+
+ return _collection_meta
diff --git a/lib/ansible/utils/collection_loader/_collection_meta.py b/lib/ansible/utils/collection_loader/_collection_meta.py
new file mode 100644
index 00000000..b306b810
--- /dev/null
+++ b/lib/ansible/utils/collection_loader/_collection_meta.py
@@ -0,0 +1,33 @@
+# (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from yaml import load
+try:
+ from yaml import CSafeLoader as SafeLoader
+except ImportError:
+ from yaml import SafeLoader
+
+try:
+ from collections.abc import Mapping # pylint: disable=ansible-bad-import-from
+except ImportError:
+ from collections import Mapping # pylint: disable=ansible-bad-import-from
+
+
+def _meta_yml_to_dict(yaml_string_data, content_id):
+ """
+ Converts string YAML dictionary to a Python dictionary. This function may be monkeypatched to another implementation
+ by some tools (eg the import sanity test).
+ :param yaml_string_data: a bytes-ish YAML dictionary
+ :param content_id: a unique ID representing the content to allow other implementations to cache the output
+ :return: a Python dictionary representing the YAML dictionary content
+ """
+ # NB: content_id is passed in, but not used by this implementation
+ routing_dict = load(yaml_string_data, Loader=SafeLoader)
+ if not routing_dict:
+ routing_dict = {}
+ if not isinstance(routing_dict, Mapping):
+ raise ValueError('collection metadata must be an instance of Python Mapping')
+ return routing_dict
diff --git a/lib/ansible/utils/color.py b/lib/ansible/utils/color.py
new file mode 100644
index 00000000..8762b44f
--- /dev/null
+++ b/lib/ansible/utils/color.py
@@ -0,0 +1,127 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import sys
+
+from ansible import constants as C
+
+ANSIBLE_COLOR = True
+if C.ANSIBLE_NOCOLOR:
+ ANSIBLE_COLOR = False
+elif not hasattr(sys.stdout, 'isatty') or not sys.stdout.isatty():
+ ANSIBLE_COLOR = False
+else:
+ try:
+ import curses
+ curses.setupterm()
+ if curses.tigetnum('colors') < 0:
+ ANSIBLE_COLOR = False
+ except ImportError:
+ # curses library was not found
+ pass
+ except curses.error:
+ # curses returns an error (e.g. could not find terminal)
+ ANSIBLE_COLOR = False
+
+if C.ANSIBLE_FORCE_COLOR:
+ ANSIBLE_COLOR = True
+
+# --- begin "pretty"
+#
+# pretty - A miniature library that provides a Python print and stdout
+# wrapper that makes colored terminal text easier to use (e.g. without
+# having to mess around with ANSI escape sequences). This code is public
+# domain - there is no license except that you must leave this header.
+#
+# Copyright (C) 2008 Brian Nez <thedude at bri1 dot com>
+#
+# http://nezzen.net/2008/06/23/colored-text-in-python-using-ansi-escape-sequences/
+
+codeCodes = {
+ 'black': u'0;30', 'bright gray': u'0;37',
+ 'blue': u'0;34', 'white': u'1;37',
+ 'green': u'0;32', 'bright blue': u'1;34',
+ 'cyan': u'0;36', 'bright green': u'1;32',
+ 'red': u'0;31', 'bright cyan': u'1;36',
+ 'purple': u'0;35', 'bright red': u'1;31',
+ 'yellow': u'0;33', 'bright purple': u'1;35',
+ 'dark gray': u'1;30', 'bright yellow': u'1;33',
+ 'magenta': u'0;35', 'bright magenta': u'1;35',
+ 'normal': u'0',
+}
+
+
+def parsecolor(color):
+ """SGR parameter string for the specified color name."""
+ matches = re.match(r"color(?P<color>[0-9]+)"
+ r"|(?P<rgb>rgb(?P<red>[0-5])(?P<green>[0-5])(?P<blue>[0-5]))"
+ r"|gray(?P<gray>[0-9]+)", color)
+ if not matches:
+ return codeCodes[color]
+ if matches.group('color'):
+ return u'38;5;%d' % int(matches.group('color'))
+ if matches.group('rgb'):
+ return u'38;5;%d' % (16 + 36 * int(matches.group('red')) +
+ 6 * int(matches.group('green')) +
+ int(matches.group('blue')))
+ if matches.group('gray'):
+ return u'38;5;%d' % (232 + int(matches.group('gray')))
+
+
+def stringc(text, color, wrap_nonvisible_chars=False):
+ """String in color."""
+
+ if ANSIBLE_COLOR:
+ color_code = parsecolor(color)
+ fmt = u"\033[%sm%s\033[0m"
+ if wrap_nonvisible_chars:
+ # This option is provided for use in cases when the
+ # formatting of a command line prompt is needed, such as
+ # `ansible-console`. As said in `readline` sources:
+ # readline/display.c:321
+ # /* Current implementation:
+ # \001 (^A) start non-visible characters
+ # \002 (^B) end non-visible characters
+ # all characters except \001 and \002 (following a \001) are copied to
+ # the returned string; all characters except those between \001 and
+ # \002 are assumed to be `visible'. */
+ fmt = u"\001\033[%sm\002%s\001\033[0m\002"
+ return u"\n".join([fmt % (color_code, t) for t in text.split(u'\n')])
+ else:
+ return text
+
+
+def colorize(lead, num, color):
+ """ Print 'lead' = 'num' in 'color' """
+ s = u"%s=%-4s" % (lead, str(num))
+ if num != 0 and ANSIBLE_COLOR and color is not None:
+ s = stringc(s, color)
+ return s
+
+
+def hostcolor(host, stats, color=True):
+ if ANSIBLE_COLOR and color:
+ if stats['failures'] != 0 or stats['unreachable'] != 0:
+ return u"%-37s" % stringc(host, C.COLOR_ERROR)
+ elif stats['changed'] != 0:
+ return u"%-37s" % stringc(host, C.COLOR_CHANGED)
+ else:
+ return u"%-37s" % stringc(host, C.COLOR_OK)
+ return u"%-26s" % host
diff --git a/lib/ansible/utils/context_objects.py b/lib/ansible/utils/context_objects.py
new file mode 100644
index 00000000..71241749
--- /dev/null
+++ b/lib/ansible/utils/context_objects.py
@@ -0,0 +1,92 @@
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+Hold command line arguments for use in other modules
+"""
+
+from abc import ABCMeta
+
+from ansible.module_utils.common._collections_compat import (Container, Mapping, Sequence, Set)
+from ansible.module_utils.common.collections import ImmutableDict
+from ansible.module_utils.six import add_metaclass, binary_type, text_type
+from ansible.utils.singleton import Singleton
+
+
+def _make_immutable(obj):
+ """Recursively convert a container and objects inside of it into immutable data types"""
+ if isinstance(obj, (text_type, binary_type)):
+ # Strings first because they are also sequences
+ return obj
+ elif isinstance(obj, Mapping):
+ temp_dict = {}
+ for key, value in obj.items():
+ if isinstance(value, Container):
+ temp_dict[key] = _make_immutable(value)
+ else:
+ temp_dict[key] = value
+ return ImmutableDict(temp_dict)
+ elif isinstance(obj, Set):
+ temp_set = set()
+ for value in obj:
+ if isinstance(value, Container):
+ temp_set.add(_make_immutable(value))
+ else:
+ temp_set.add(value)
+ return frozenset(temp_set)
+ elif isinstance(obj, Sequence):
+ temp_sequence = []
+ for value in obj:
+ if isinstance(value, Container):
+ temp_sequence.append(_make_immutable(value))
+ else:
+ temp_sequence.append(value)
+ return tuple(temp_sequence)
+
+ return obj
+
+
+class _ABCSingleton(Singleton, ABCMeta):
+ """
+ Combine ABCMeta based classes with Singleton based classes
+
+ Combine Singleton and ABCMeta so we have a metaclass that unambiguously knows which can override
+ the other. Useful for making new types of containers which are also Singletons.
+ """
+ pass
+
+
+class CLIArgs(ImmutableDict):
+ """
+ Hold a parsed copy of cli arguments
+
+ We have both this non-Singleton version and the Singleton, GlobalCLIArgs, version to leave us
+ room to implement a Context object in the future. Whereas there should only be one set of args
+ in a global context, individual Context objects might want to pretend that they have different
+ command line switches to trigger different behaviour when they run. So if we support Contexts
+ in the future, they would use CLIArgs instead of GlobalCLIArgs to store their version of command
+ line flags.
+ """
+ def __init__(self, mapping):
+ toplevel = {}
+ for key, value in mapping.items():
+ toplevel[key] = _make_immutable(value)
+ super(CLIArgs, self).__init__(toplevel)
+
+ @classmethod
+ def from_options(cls, options):
+ return cls(vars(options))
+
+
+@add_metaclass(_ABCSingleton)
+class GlobalCLIArgs(CLIArgs):
+ """
+ Globally hold a parsed copy of cli arguments.
+
+ Only one of these exist per program as it is for global context
+ """
+ pass
diff --git a/lib/ansible/utils/display.py b/lib/ansible/utils/display.py
new file mode 100644
index 00000000..ca3dc3c2
--- /dev/null
+++ b/lib/ansible/utils/display.py
@@ -0,0 +1,438 @@
+# (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import errno
+import fcntl
+import getpass
+import locale
+import logging
+import os
+import random
+import subprocess
+import sys
+import textwrap
+import time
+
+from struct import unpack, pack
+from termios import TIOCGWINSZ
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleAssertionError
+from ansible.module_utils._text import to_bytes, to_text, to_native
+from ansible.module_utils.six import with_metaclass, string_types
+from ansible.utils.color import stringc
+from ansible.utils.singleton import Singleton
+from ansible.utils.unsafe_proxy import wrap_var
+
+try:
+ # Python 2
+ input = raw_input
+except NameError:
+ # Python 3, we already have raw_input
+ pass
+
+
+class FilterBlackList(logging.Filter):
+ def __init__(self, blacklist):
+ self.blacklist = [logging.Filter(name) for name in blacklist]
+
+ def filter(self, record):
+ return not any(f.filter(record) for f in self.blacklist)
+
+
+class FilterUserInjector(logging.Filter):
+ """
+ This is a filter which injects the current user as the 'user' attribute on each record. We need to add this filter
+ to all logger handlers so that 3rd party libraries won't print an exception due to user not being defined.
+ """
+
+ try:
+ username = getpass.getuser()
+ except KeyError:
+ # people like to make containers w/o actual valid passwd/shadow and use host uids
+ username = 'uid=%s' % os.getuid()
+
+ def filter(self, record):
+ record.user = FilterUserInjector.username
+ return True
+
+
+logger = None
+# TODO: make this a callback event instead
+if getattr(C, 'DEFAULT_LOG_PATH'):
+ path = C.DEFAULT_LOG_PATH
+ if path and (os.path.exists(path) and os.access(path, os.W_OK)) or os.access(os.path.dirname(path), os.W_OK):
+ # NOTE: level is kept at INFO to avoid security disclosures caused by certain libraries when using DEBUG
+ logging.basicConfig(filename=path, level=logging.INFO, # DO NOT set to logging.DEBUG
+ format='%(asctime)s p=%(process)d u=%(user)s n=%(name)s | %(message)s')
+
+ logger = logging.getLogger('ansible')
+ for handler in logging.root.handlers:
+ handler.addFilter(FilterBlackList(getattr(C, 'DEFAULT_LOG_FILTER', [])))
+ handler.addFilter(FilterUserInjector())
+ else:
+ print("[WARNING]: log file at %s is not writeable and we cannot create it, aborting\n" % path, file=sys.stderr)
+
+# map color to log levels
+color_to_log_level = {C.COLOR_ERROR: logging.ERROR,
+ C.COLOR_WARN: logging.WARNING,
+ C.COLOR_OK: logging.INFO,
+ C.COLOR_SKIP: logging.WARNING,
+ C.COLOR_UNREACHABLE: logging.ERROR,
+ C.COLOR_DEBUG: logging.DEBUG,
+ C.COLOR_CHANGED: logging.INFO,
+ C.COLOR_DEPRECATE: logging.WARNING,
+ C.COLOR_VERBOSE: logging.INFO}
+
+b_COW_PATHS = (
+ b"/usr/bin/cowsay",
+ b"/usr/games/cowsay",
+ b"/usr/local/bin/cowsay", # BSD path for cowsay
+ b"/opt/local/bin/cowsay", # MacPorts path for cowsay
+)
+
+
+class Display(with_metaclass(Singleton, object)):
+
+ def __init__(self, verbosity=0):
+
+ self.columns = None
+ self.verbosity = verbosity
+
+ # list of all deprecation messages to prevent duplicate display
+ self._deprecations = {}
+ self._warns = {}
+ self._errors = {}
+
+ self.b_cowsay = None
+ self.noncow = C.ANSIBLE_COW_SELECTION
+
+ self.set_cowsay_info()
+
+ if self.b_cowsay:
+ try:
+ cmd = subprocess.Popen([self.b_cowsay, "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ (out, err) = cmd.communicate()
+ self.cows_available = set([to_text(c) for c in out.split()])
+ if C.ANSIBLE_COW_WHITELIST and any(C.ANSIBLE_COW_WHITELIST):
+ self.cows_available = set(C.ANSIBLE_COW_WHITELIST).intersection(self.cows_available)
+ except Exception:
+ # could not execute cowsay for some reason
+ self.b_cowsay = False
+
+ self._set_column_width()
+
+ def set_cowsay_info(self):
+ if C.ANSIBLE_NOCOWS:
+ return
+
+ if C.ANSIBLE_COW_PATH:
+ self.b_cowsay = C.ANSIBLE_COW_PATH
+ else:
+ for b_cow_path in b_COW_PATHS:
+ if os.path.exists(b_cow_path):
+ self.b_cowsay = b_cow_path
+
+ def display(self, msg, color=None, stderr=False, screen_only=False, log_only=False, newline=True):
+ """ Display a message to the user
+
+ Note: msg *must* be a unicode string to prevent UnicodeError tracebacks.
+ """
+
+ nocolor = msg
+
+ if not log_only:
+
+ has_newline = msg.endswith(u'\n')
+ if has_newline:
+ msg2 = msg[:-1]
+ else:
+ msg2 = msg
+
+ if color:
+ msg2 = stringc(msg2, color)
+
+ if has_newline or newline:
+ msg2 = msg2 + u'\n'
+
+ msg2 = to_bytes(msg2, encoding=self._output_encoding(stderr=stderr))
+ if sys.version_info >= (3,):
+ # Convert back to text string on python3
+ # We first convert to a byte string so that we get rid of
+ # characters that are invalid in the user's locale
+ msg2 = to_text(msg2, self._output_encoding(stderr=stderr), errors='replace')
+
+ # Note: After Display() class is refactored need to update the log capture
+ # code in 'bin/ansible-connection' (and other relevant places).
+ if not stderr:
+ fileobj = sys.stdout
+ else:
+ fileobj = sys.stderr
+
+ fileobj.write(msg2)
+
+ try:
+ fileobj.flush()
+ except IOError as e:
+ # Ignore EPIPE in case fileobj has been prematurely closed, eg.
+ # when piping to "head -n1"
+ if e.errno != errno.EPIPE:
+ raise
+
+ if logger and not screen_only:
+ # We first convert to a byte string so that we get rid of
+ # color and characters that are invalid in the user's locale
+ msg2 = to_bytes(nocolor.lstrip(u'\n'))
+
+ if sys.version_info >= (3,):
+ # Convert back to text string on python3
+ msg2 = to_text(msg2, self._output_encoding(stderr=stderr))
+
+ lvl = logging.INFO
+ if color:
+ # set logger level based on color (not great)
+ try:
+ lvl = color_to_log_level[color]
+ except KeyError:
+ # this should not happen, but JIC
+ raise AnsibleAssertionError('Invalid color supplied to display: %s' % color)
+ # actually log
+ logger.log(lvl, msg2)
+
+ def v(self, msg, host=None):
+ return self.verbose(msg, host=host, caplevel=0)
+
+ def vv(self, msg, host=None):
+ return self.verbose(msg, host=host, caplevel=1)
+
+ def vvv(self, msg, host=None):
+ return self.verbose(msg, host=host, caplevel=2)
+
+ def vvvv(self, msg, host=None):
+ return self.verbose(msg, host=host, caplevel=3)
+
+ def vvvvv(self, msg, host=None):
+ return self.verbose(msg, host=host, caplevel=4)
+
+ def vvvvvv(self, msg, host=None):
+ return self.verbose(msg, host=host, caplevel=5)
+
+ def debug(self, msg, host=None):
+ if C.DEFAULT_DEBUG:
+ if host is None:
+ self.display("%6d %0.5f: %s" % (os.getpid(), time.time(), msg), color=C.COLOR_DEBUG)
+ else:
+ self.display("%6d %0.5f [%s]: %s" % (os.getpid(), time.time(), host, msg), color=C.COLOR_DEBUG)
+
+ def verbose(self, msg, host=None, caplevel=2):
+
+ to_stderr = C.VERBOSE_TO_STDERR
+ if self.verbosity > caplevel:
+ if host is None:
+ self.display(msg, color=C.COLOR_VERBOSE, stderr=to_stderr)
+ else:
+ self.display("<%s> %s" % (host, msg), color=C.COLOR_VERBOSE, stderr=to_stderr)
+
+ def get_deprecation_message(self, msg, version=None, removed=False, date=None, collection_name=None):
+ ''' used to print out a deprecation message.'''
+ msg = msg.strip()
+ if msg and msg[-1] not in ['!', '?', '.']:
+ msg += '.'
+
+ if collection_name == 'ansible.builtin':
+ collection_name = 'ansible-base'
+
+ if removed:
+ header = '[DEPRECATED]: {0}'.format(msg)
+ removal_fragment = 'This feature was removed'
+ help_text = 'Please update your playbooks.'
+ else:
+ header = '[DEPRECATION WARNING]: {0}'.format(msg)
+ removal_fragment = 'This feature will be removed'
+ # FUTURE: make this a standalone warning so it only shows up once?
+ help_text = 'Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg.'
+
+ if collection_name:
+ from_fragment = 'from {0}'.format(collection_name)
+ else:
+ from_fragment = ''
+
+ if date:
+ when = 'in a release after {0}.'.format(date)
+ elif version:
+ when = 'in version {0}.'.format(version)
+ else:
+ when = 'in a future release.'
+
+ message_text = ' '.join(f for f in [header, removal_fragment, from_fragment, when, help_text] if f)
+
+ return message_text
+
+ def deprecated(self, msg, version=None, removed=False, date=None, collection_name=None):
+ if not removed and not C.DEPRECATION_WARNINGS:
+ return
+
+ message_text = self.get_deprecation_message(msg, version=version, removed=removed, date=date, collection_name=collection_name)
+
+ if removed:
+ raise AnsibleError(message_text)
+
+ wrapped = textwrap.wrap(message_text, self.columns, drop_whitespace=False)
+ message_text = "\n".join(wrapped) + "\n"
+
+ if message_text not in self._deprecations:
+ self.display(message_text.strip(), color=C.COLOR_DEPRECATE, stderr=True)
+ self._deprecations[message_text] = 1
+
+ def warning(self, msg, formatted=False):
+
+ if not formatted:
+ new_msg = "[WARNING]: %s" % msg
+ wrapped = textwrap.wrap(new_msg, self.columns)
+ new_msg = "\n".join(wrapped) + "\n"
+ else:
+ new_msg = "\n[WARNING]: \n%s" % msg
+
+ if new_msg not in self._warns:
+ self.display(new_msg, color=C.COLOR_WARN, stderr=True)
+ self._warns[new_msg] = 1
+
+ def system_warning(self, msg):
+ if C.SYSTEM_WARNINGS:
+ self.warning(msg)
+
+ def banner(self, msg, color=None, cows=True):
+ '''
+ Prints a header-looking line with cowsay or stars with length depending on terminal width (3 minimum)
+ '''
+ if self.b_cowsay and cows:
+ try:
+ self.banner_cowsay(msg)
+ return
+ except OSError:
+ self.warning("somebody cleverly deleted cowsay or something during the PB run. heh.")
+
+ msg = msg.strip()
+ star_len = self.columns - len(msg)
+ if star_len <= 3:
+ star_len = 3
+ stars = u"*" * star_len
+ self.display(u"\n%s %s" % (msg, stars), color=color)
+
+ def banner_cowsay(self, msg, color=None):
+ if u": [" in msg:
+ msg = msg.replace(u"[", u"")
+ if msg.endswith(u"]"):
+ msg = msg[:-1]
+ runcmd = [self.b_cowsay, b"-W", b"60"]
+ if self.noncow:
+ thecow = self.noncow
+ if thecow == 'random':
+ thecow = random.choice(list(self.cows_available))
+ runcmd.append(b'-f')
+ runcmd.append(to_bytes(thecow))
+ runcmd.append(to_bytes(msg))
+ cmd = subprocess.Popen(runcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ (out, err) = cmd.communicate()
+ self.display(u"%s\n" % to_text(out), color=color)
+
+ def error(self, msg, wrap_text=True):
+ if wrap_text:
+ new_msg = u"\n[ERROR]: %s" % msg
+ wrapped = textwrap.wrap(new_msg, self.columns)
+ new_msg = u"\n".join(wrapped) + u"\n"
+ else:
+ new_msg = u"ERROR! %s" % msg
+ if new_msg not in self._errors:
+ self.display(new_msg, color=C.COLOR_ERROR, stderr=True)
+ self._errors[new_msg] = 1
+
+ @staticmethod
+ def prompt(msg, private=False):
+ prompt_string = to_bytes(msg, encoding=Display._output_encoding())
+ if sys.version_info >= (3,):
+ # Convert back into text on python3. We do this double conversion
+ # to get rid of characters that are illegal in the user's locale
+ prompt_string = to_text(prompt_string)
+
+ if private:
+ return getpass.getpass(prompt_string)
+ else:
+ return input(prompt_string)
+
+ def do_var_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None, unsafe=None):
+
+ result = None
+ if sys.__stdin__.isatty():
+
+ do_prompt = self.prompt
+
+ if prompt and default is not None:
+ msg = "%s [%s]: " % (prompt, default)
+ elif prompt:
+ msg = "%s: " % prompt
+ else:
+ msg = 'input for %s: ' % varname
+
+ if confirm:
+ while True:
+ result = do_prompt(msg, private)
+ second = do_prompt("confirm " + msg, private)
+ if result == second:
+ break
+ self.display("***** VALUES ENTERED DO NOT MATCH ****")
+ else:
+ result = do_prompt(msg, private)
+ else:
+ result = None
+ self.warning("Not prompting as we are not in interactive mode")
+
+ # if result is false and default is not None
+ if not result and default is not None:
+ result = default
+
+ if encrypt:
+ # Circular import because encrypt needs a display class
+ from ansible.utils.encrypt import do_encrypt
+ result = do_encrypt(result, encrypt, salt_size, salt)
+
+ # handle utf-8 chars
+ result = to_text(result, errors='surrogate_or_strict')
+
+ if unsafe:
+ result = wrap_var(result)
+ return result
+
+ @staticmethod
+ def _output_encoding(stderr=False):
+ encoding = locale.getpreferredencoding()
+ # https://bugs.python.org/issue6202
+ # Python2 hardcodes an obsolete value on Mac. Use MacOSX defaults
+ # instead.
+ if encoding in ('mac-roman',):
+ encoding = 'utf-8'
+ return encoding
+
+ def _set_column_width(self):
+ if os.isatty(1):
+ tty_size = unpack('HHHH', fcntl.ioctl(1, TIOCGWINSZ, pack('HHHH', 0, 0, 0, 0)))[1]
+ else:
+ tty_size = 0
+ self.columns = max(79, tty_size - 1)
diff --git a/lib/ansible/utils/encrypt.py b/lib/ansible/utils/encrypt.py
new file mode 100644
index 00000000..4a35d8cf
--- /dev/null
+++ b/lib/ansible/utils/encrypt.py
@@ -0,0 +1,197 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import crypt
+import multiprocessing
+import random
+import string
+import sys
+
+from collections import namedtuple
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleAssertionError
+from ansible.module_utils.six import text_type
+from ansible.module_utils._text import to_text, to_bytes
+from ansible.utils.display import Display
+
+PASSLIB_AVAILABLE = False
+try:
+ import passlib
+ import passlib.hash
+ from passlib.utils.handlers import HasRawSalt
+
+ PASSLIB_AVAILABLE = True
+except Exception:
+ pass
+
+display = Display()
+
+__all__ = ['do_encrypt']
+
+_LOCK = multiprocessing.Lock()
+
+DEFAULT_PASSWORD_LENGTH = 20
+
+
+def random_password(length=DEFAULT_PASSWORD_LENGTH, chars=C.DEFAULT_PASSWORD_CHARS):
+ '''Return a random password string of length containing only chars
+
+ :kwarg length: The number of characters in the new password. Defaults to 20.
+ :kwarg chars: The characters to choose from. The default is all ascii
+ letters, ascii digits, and these symbols ``.,:-_``
+ '''
+ if not isinstance(chars, text_type):
+ raise AnsibleAssertionError('%s (%s) is not a text_type' % (chars, type(chars)))
+
+ random_generator = random.SystemRandom()
+ return u''.join(random_generator.choice(chars) for dummy in range(length))
+
+
+def random_salt(length=8):
+ """Return a text string suitable for use as a salt for the hash functions we use to encrypt passwords.
+ """
+ # Note passlib salt values must be pure ascii so we can't let the user
+ # configure this
+ salt_chars = string.ascii_letters + string.digits + u'./'
+ return random_password(length=length, chars=salt_chars)
+
+
+class BaseHash(object):
+ algo = namedtuple('algo', ['crypt_id', 'salt_size', 'implicit_rounds'])
+ algorithms = {
+ 'md5_crypt': algo(crypt_id='1', salt_size=8, implicit_rounds=None),
+ 'bcrypt': algo(crypt_id='2a', salt_size=22, implicit_rounds=None),
+ 'sha256_crypt': algo(crypt_id='5', salt_size=16, implicit_rounds=5000),
+ 'sha512_crypt': algo(crypt_id='6', salt_size=16, implicit_rounds=5000),
+ }
+
+ def __init__(self, algorithm):
+ self.algorithm = algorithm
+
+
+class CryptHash(BaseHash):
+ def __init__(self, algorithm):
+ super(CryptHash, self).__init__(algorithm)
+
+ if sys.platform.startswith('darwin'):
+ raise AnsibleError("crypt.crypt not supported on Mac OS X/Darwin, install passlib python module")
+
+ if algorithm not in self.algorithms:
+ raise AnsibleError("crypt.crypt does not support '%s' algorithm" % self.algorithm)
+ self.algo_data = self.algorithms[algorithm]
+
+ def hash(self, secret, salt=None, salt_size=None, rounds=None):
+ salt = self._salt(salt, salt_size)
+ rounds = self._rounds(rounds)
+ return self._hash(secret, salt, rounds)
+
+ def _salt(self, salt, salt_size):
+ salt_size = salt_size or self.algo_data.salt_size
+ return salt or random_salt(salt_size)
+
+ def _rounds(self, rounds):
+ if rounds == self.algo_data.implicit_rounds:
+ # Passlib does not include the rounds if it is the same as implicit_rounds.
+ # Make crypt lib behave the same, by not explicitly specifying the rounds in that case.
+ return None
+ else:
+ return rounds
+
+ def _hash(self, secret, salt, rounds):
+ if rounds is None:
+ saltstring = "$%s$%s" % (self.algo_data.crypt_id, salt)
+ else:
+ saltstring = "$%s$rounds=%d$%s" % (self.algo_data.crypt_id, rounds, salt)
+ result = crypt.crypt(secret, saltstring)
+
+ # crypt.crypt returns None if it cannot parse saltstring
+ # None as result would be interpreted by the some modules (user module)
+ # as no password at all.
+ if not result:
+ raise AnsibleError("crypt.crypt does not support '%s' algorithm" % self.algorithm)
+
+ return result
+
+
+class PasslibHash(BaseHash):
+ def __init__(self, algorithm):
+ super(PasslibHash, self).__init__(algorithm)
+
+ if not PASSLIB_AVAILABLE:
+ raise AnsibleError("passlib must be installed to hash with '%s'" % algorithm)
+
+ try:
+ self.crypt_algo = getattr(passlib.hash, algorithm)
+ except Exception:
+ raise AnsibleError("passlib does not support '%s' algorithm" % algorithm)
+
+ def hash(self, secret, salt=None, salt_size=None, rounds=None):
+ salt = self._clean_salt(salt)
+ rounds = self._clean_rounds(rounds)
+ return self._hash(secret, salt=salt, salt_size=salt_size, rounds=rounds)
+
+ def _clean_salt(self, salt):
+ if not salt:
+ return None
+ elif issubclass(self.crypt_algo, HasRawSalt):
+ return to_bytes(salt, encoding='ascii', errors='strict')
+ else:
+ return to_text(salt, encoding='ascii', errors='strict')
+
+ def _clean_rounds(self, rounds):
+ algo_data = self.algorithms.get(self.algorithm)
+ if rounds:
+ return rounds
+ elif algo_data and algo_data.implicit_rounds:
+ # The default rounds used by passlib depend on the passlib version.
+ # For consistency ensure that passlib behaves the same as crypt in case no rounds were specified.
+ # Thus use the crypt defaults.
+ return algo_data.implicit_rounds
+ else:
+ return None
+
+ def _hash(self, secret, salt, salt_size, rounds):
+ # Not every hash algorithm supports every parameter.
+ # Thus create the settings dict only with set parameters.
+ settings = {}
+ if salt:
+ settings['salt'] = salt
+ if salt_size:
+ settings['salt_size'] = salt_size
+ if rounds:
+ settings['rounds'] = rounds
+
+ # starting with passlib 1.7 'using' and 'hash' should be used instead of 'encrypt'
+ if hasattr(self.crypt_algo, 'hash'):
+ result = self.crypt_algo.using(**settings).hash(secret)
+ elif hasattr(self.crypt_algo, 'encrypt'):
+ result = self.crypt_algo.encrypt(secret, **settings)
+ else:
+ raise AnsibleError("installed passlib version %s not supported" % passlib.__version__)
+
+ # passlib.hash should always return something or raise an exception.
+ # Still ensure that there is always a result.
+ # Otherwise an empty password might be assumed by some modules, like the user module.
+ if not result:
+ raise AnsibleError("failed to hash with algorithm '%s'" % self.algorithm)
+
+ # Hashes from passlib.hash should be represented as ascii strings of hex
+ # digits so this should not traceback. If it's not representable as such
+ # we need to traceback and then blacklist such algorithms because it may
+ # impact calling code.
+ return to_text(result, errors='strict')
+
+
+def passlib_or_crypt(secret, algorithm, salt=None, salt_size=None, rounds=None):
+ if PASSLIB_AVAILABLE:
+ return PasslibHash(algorithm).hash(secret, salt=salt, salt_size=salt_size, rounds=rounds)
+ else:
+ return CryptHash(algorithm).hash(secret, salt=salt, salt_size=salt_size, rounds=rounds)
+
+
+def do_encrypt(result, encrypt, salt_size=None, salt=None):
+ return passlib_or_crypt(result, encrypt, salt_size=salt_size, salt=salt)
diff --git a/lib/ansible/utils/fqcn.py b/lib/ansible/utils/fqcn.py
new file mode 100644
index 00000000..a492be1f
--- /dev/null
+++ b/lib/ansible/utils/fqcn.py
@@ -0,0 +1,33 @@
+# (c) 2020, Felix Fontein <felix@fontein.de>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def add_internal_fqcns(names):
+ '''
+ Given a sequence of action/module names, returns a list of these names
+ with the same names with the prefixes `ansible.builtin.` and
+ `ansible.legacy.` added for all names that are not already FQCNs.
+ '''
+ result = []
+ for name in names:
+ result.append(name)
+ if '.' not in name:
+ result.append('ansible.builtin.%s' % name)
+ result.append('ansible.legacy.%s' % name)
+ return result
diff --git a/lib/ansible/utils/galaxy.py b/lib/ansible/utils/galaxy.py
new file mode 100644
index 00000000..cb1f125b
--- /dev/null
+++ b/lib/ansible/utils/galaxy.py
@@ -0,0 +1,94 @@
+# (c) 2014 Michael DeHaan, <michael@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import tempfile
+from subprocess import Popen, PIPE
+import tarfile
+
+import ansible.constants as C
+from ansible.errors import AnsibleError
+from ansible.utils.display import Display
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.common.text.converters import to_text, to_native
+
+
+display = Display()
+
+
+def scm_archive_collection(src, name=None, version='HEAD'):
+ return scm_archive_resource(src, scm='git', name=name, version=version, keep_scm_meta=False)
+
+
+def scm_archive_resource(src, scm='git', name=None, version='HEAD', keep_scm_meta=False):
+
+ def run_scm_cmd(cmd, tempdir):
+ try:
+ stdout = ''
+ stderr = ''
+ popen = Popen(cmd, cwd=tempdir, stdout=PIPE, stderr=PIPE)
+ stdout, stderr = popen.communicate()
+ except Exception as e:
+ ran = " ".join(cmd)
+ display.debug("ran %s:" % ran)
+ raise AnsibleError("when executing %s: %s" % (ran, to_native(e)))
+ if popen.returncode != 0:
+ raise AnsibleError("- command %s failed in directory %s (rc=%s) - %s" % (' '.join(cmd), tempdir, popen.returncode, to_native(stderr)))
+
+ if scm not in ['hg', 'git']:
+ raise AnsibleError("- scm %s is not currently supported" % scm)
+
+ try:
+ scm_path = get_bin_path(scm)
+ except (ValueError, OSError, IOError):
+ raise AnsibleError("could not find/use %s, it is required to continue with installing %s" % (scm, src))
+
+ tempdir = tempfile.mkdtemp(dir=C.DEFAULT_LOCAL_TMP)
+ clone_cmd = [scm_path, 'clone', src, name]
+ run_scm_cmd(clone_cmd, tempdir)
+
+ if scm == 'git' and version:
+ checkout_cmd = [scm_path, 'checkout', to_text(version)]
+ run_scm_cmd(checkout_cmd, os.path.join(tempdir, name))
+
+ temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.tar', dir=C.DEFAULT_LOCAL_TMP)
+ archive_cmd = None
+ if keep_scm_meta:
+ display.vvv('tarring %s from %s to %s' % (name, tempdir, temp_file.name))
+ with tarfile.open(temp_file.name, "w") as tar:
+ tar.add(os.path.join(tempdir, name), arcname=name)
+ elif scm == 'hg':
+ archive_cmd = [scm_path, 'archive', '--prefix', "%s/" % name]
+ if version:
+ archive_cmd.extend(['-r', version])
+ archive_cmd.append(temp_file.name)
+ elif scm == 'git':
+ archive_cmd = [scm_path, 'archive', '--prefix=%s/' % name, '--output=%s' % temp_file.name]
+ if version:
+ archive_cmd.append(version)
+ else:
+ archive_cmd.append('HEAD')
+
+ if archive_cmd is not None:
+ display.vvv('archiving %s' % archive_cmd)
+ run_scm_cmd(archive_cmd, os.path.join(tempdir, name))
+
+ return temp_file.name
diff --git a/lib/ansible/utils/hashing.py b/lib/ansible/utils/hashing.py
new file mode 100644
index 00000000..5f36522e
--- /dev/null
+++ b/lib/ansible/utils/hashing.py
@@ -0,0 +1,98 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+# Note, sha1 is the only hash algorithm compatible with python2.4 and with
+# FIPS-140 mode (as of 11-2014)
+try:
+ from hashlib import sha1
+except ImportError:
+ from sha import sha as sha1
+
+# Backwards compat only
+try:
+ from hashlib import md5 as _md5
+except ImportError:
+ try:
+ from md5 import md5 as _md5
+ except ImportError:
+ # Assume we're running in FIPS mode here
+ _md5 = None
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_bytes
+
+
+def secure_hash_s(data, hash_func=sha1):
+ ''' Return a secure hash hex digest of data. '''
+
+ digest = hash_func()
+ data = to_bytes(data, errors='surrogate_or_strict')
+ digest.update(data)
+ return digest.hexdigest()
+
+
+def secure_hash(filename, hash_func=sha1):
+ ''' Return a secure hash hex digest of local file, None if file is not present or a directory. '''
+
+ if not os.path.exists(to_bytes(filename, errors='surrogate_or_strict')) or os.path.isdir(to_bytes(filename, errors='strict')):
+ return None
+ digest = hash_func()
+ blocksize = 64 * 1024
+ try:
+ infile = open(to_bytes(filename, errors='surrogate_or_strict'), 'rb')
+ block = infile.read(blocksize)
+ while block:
+ digest.update(block)
+ block = infile.read(blocksize)
+ infile.close()
+ except IOError as e:
+ raise AnsibleError("error while accessing the file %s, error was: %s" % (filename, e))
+ return digest.hexdigest()
+
+
+# The checksum algorithm must match with the algorithm in ShellModule.checksum() method
+checksum = secure_hash
+checksum_s = secure_hash_s
+
+
+#
+# Backwards compat functions. Some modules include md5s in their return values
+# Continue to support that for now. As of ansible-1.8, all of those modules
+# should also return "checksum" (sha1 for now)
+# Do not use md5 unless it is needed for:
+# 1) Optional backwards compatibility
+# 2) Compliance with a third party protocol
+#
+# MD5 will not work on systems which are FIPS-140-2 compliant.
+#
+
+def md5s(data):
+ if not _md5:
+ raise ValueError('MD5 not available. Possibly running in FIPS mode')
+ return secure_hash_s(data, _md5)
+
+
+def md5(filename):
+ if not _md5:
+ raise ValueError('MD5 not available. Possibly running in FIPS mode')
+ return secure_hash(filename, _md5)
diff --git a/lib/ansible/utils/helpers.py b/lib/ansible/utils/helpers.py
new file mode 100644
index 00000000..658ad99c
--- /dev/null
+++ b/lib/ansible/utils/helpers.py
@@ -0,0 +1,51 @@
+# (c) 2016, Ansible by Red Hat <info@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.six import string_types
+
+
+def pct_to_int(value, num_items, min_value=1):
+ '''
+ Converts a given value to a percentage if specified as "x%",
+ otherwise converts the given value to an integer.
+ '''
+ if isinstance(value, string_types) and value.endswith('%'):
+ value_pct = int(value.replace("%", ""))
+ return int((value_pct / 100.0) * num_items) or min_value
+ else:
+ return int(value)
+
+
+def object_to_dict(obj, exclude=None):
+ """
+ Converts an object into a dict making the properties into keys, allows excluding certain keys
+ """
+ if exclude is None or not isinstance(exclude, list):
+ exclude = []
+ return dict((key, getattr(obj, key)) for key in dir(obj) if not (key.startswith('_') or key in exclude))
+
+
+def deduplicate_list(original_list):
+ """
+ Creates a deduplicated list with the order in which each item is first found.
+ """
+ seen = set()
+ return [x for x in original_list if x not in seen and not seen.add(x)]
diff --git a/lib/ansible/utils/jsonrpc.py b/lib/ansible/utils/jsonrpc.py
new file mode 100644
index 00000000..e48c979d
--- /dev/null
+++ b/lib/ansible/utils/jsonrpc.py
@@ -0,0 +1,113 @@
+# (c) 2017, Peter Sprygada <psprygad@redhat.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import traceback
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.connection import ConnectionError
+from ansible.module_utils.six import binary_type, text_type
+from ansible.module_utils.six.moves import cPickle
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class JsonRpcServer(object):
+
+ _objects = set()
+
+ def handle_request(self, request):
+ request = json.loads(to_text(request, errors='surrogate_then_replace'))
+
+ method = request.get('method')
+
+ if method.startswith('rpc.') or method.startswith('_'):
+ error = self.invalid_request()
+ return json.dumps(error)
+
+ args, kwargs = request.get('params')
+ setattr(self, '_identifier', request.get('id'))
+
+ rpc_method = None
+ for obj in self._objects:
+ rpc_method = getattr(obj, method, None)
+ if rpc_method:
+ break
+
+ if not rpc_method:
+ error = self.method_not_found()
+ response = json.dumps(error)
+ else:
+ try:
+ result = rpc_method(*args, **kwargs)
+ except ConnectionError as exc:
+ display.vvv(traceback.format_exc())
+ try:
+ error = self.error(code=exc.code, message=to_text(exc))
+ except AttributeError:
+ error = self.internal_error(data=to_text(exc))
+ response = json.dumps(error)
+ except Exception as exc:
+ display.vvv(traceback.format_exc())
+ error = self.internal_error(data=to_text(exc, errors='surrogate_then_replace'))
+ response = json.dumps(error)
+ else:
+ if isinstance(result, dict) and 'jsonrpc' in result:
+ response = result
+ else:
+ response = self.response(result)
+
+ try:
+ response = json.dumps(response)
+ except Exception as exc:
+ display.vvv(traceback.format_exc())
+ error = self.internal_error(data=to_text(exc, errors='surrogate_then_replace'))
+ response = json.dumps(error)
+
+ delattr(self, '_identifier')
+
+ return response
+
+ def register(self, obj):
+ self._objects.add(obj)
+
+ def header(self):
+ return {'jsonrpc': '2.0', 'id': self._identifier}
+
+ def response(self, result=None):
+ response = self.header()
+ if isinstance(result, binary_type):
+ result = to_text(result)
+ if not isinstance(result, text_type):
+ response["result_type"] = "pickle"
+ result = to_text(cPickle.dumps(result, protocol=0))
+ response['result'] = result
+ return response
+
+ def error(self, code, message, data=None):
+ response = self.header()
+ error = {'code': code, 'message': message}
+ if data:
+ error['data'] = data
+ response['error'] = error
+ return response
+
+ # json-rpc standard errors (-32768 .. -32000)
+ def parse_error(self, data=None):
+ return self.error(-32700, 'Parse error', data)
+
+ def method_not_found(self, data=None):
+ return self.error(-32601, 'Method not found', data)
+
+ def invalid_request(self, data=None):
+ return self.error(-32600, 'Invalid request', data)
+
+ def invalid_params(self, data=None):
+ return self.error(-32602, 'Invalid params', data)
+
+ def internal_error(self, data=None):
+ return self.error(-32603, 'Internal error', data)
diff --git a/lib/ansible/utils/listify.py b/lib/ansible/utils/listify.py
new file mode 100644
index 00000000..709eae5f
--- /dev/null
+++ b/lib/ansible/utils/listify.py
@@ -0,0 +1,40 @@
+# (c) 2014 Michael DeHaan, <michael@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.six import string_types
+from ansible.module_utils.common._collections_compat import Iterable
+from ansible.template.safe_eval import safe_eval
+
+
+__all__ = ['listify_lookup_plugin_terms']
+
+
+def listify_lookup_plugin_terms(terms, templar, loader, fail_on_undefined=True, convert_bare=False):
+
+ if isinstance(terms, string_types):
+ terms = templar.template(terms.strip(), convert_bare=convert_bare, fail_on_undefined=fail_on_undefined)
+ else:
+ terms = templar.template(terms, fail_on_undefined=fail_on_undefined)
+
+ if isinstance(terms, string_types) or not isinstance(terms, Iterable):
+ terms = [terms]
+
+ return terms
diff --git a/lib/ansible/utils/multiprocessing.py b/lib/ansible/utils/multiprocessing.py
new file mode 100644
index 00000000..dcc18659
--- /dev/null
+++ b/lib/ansible/utils/multiprocessing.py
@@ -0,0 +1,21 @@
+# Copyright (c) 2019 Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import multiprocessing
+
+# Explicit multiprocessing context using the fork start method
+# This exists as a compat layer now that Python3.8 has changed the default
+# start method for macOS to ``spawn`` which is incompatible with our
+# code base currently
+#
+# This exists in utils to allow it to be easily imported into various places
+# without causing circular import or dependency problems
+try:
+ context = multiprocessing.get_context('fork')
+except AttributeError:
+ # Py2 has no context functionality, and only supports fork
+ context = multiprocessing
diff --git a/lib/ansible/utils/path.py b/lib/ansible/utils/path.py
new file mode 100644
index 00000000..df2769fb
--- /dev/null
+++ b/lib/ansible/utils/path.py
@@ -0,0 +1,157 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import shutil
+
+from errno import EEXIST
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_bytes, to_native, to_text
+
+
+__all__ = ['unfrackpath', 'makedirs_safe']
+
+
+def unfrackpath(path, follow=True, basedir=None):
+ '''
+ Returns a path that is free of symlinks (if follow=True), environment variables, relative path traversals and symbols (~)
+
+ :arg path: A byte or text string representing a path to be canonicalized
+ :arg follow: A boolean to indicate of symlinks should be resolved or not
+ :raises UnicodeDecodeError: If the canonicalized version of the path
+ contains non-utf8 byte sequences.
+ :rtype: A text string (unicode on pyyhon2, str on python3).
+ :returns: An absolute path with symlinks, environment variables, and tilde
+ expanded. Note that this does not check whether a path exists.
+
+ example::
+ '$HOME/../../var/mail' becomes '/var/spool/mail'
+ '''
+
+ b_basedir = to_bytes(basedir, errors='surrogate_or_strict', nonstring='passthru')
+
+ if b_basedir is None:
+ b_basedir = to_bytes(os.getcwd(), errors='surrogate_or_strict')
+ elif os.path.isfile(b_basedir):
+ b_basedir = os.path.dirname(b_basedir)
+
+ b_final_path = os.path.expanduser(os.path.expandvars(to_bytes(path, errors='surrogate_or_strict')))
+
+ if not os.path.isabs(b_final_path):
+ b_final_path = os.path.join(b_basedir, b_final_path)
+
+ if follow:
+ b_final_path = os.path.realpath(b_final_path)
+
+ return to_text(os.path.normpath(b_final_path), errors='surrogate_or_strict')
+
+
+def makedirs_safe(path, mode=None):
+ '''
+ A *potentially insecure* way to ensure the existence of a directory chain. The "safe" in this function's name
+ refers only to its ability to ignore `EEXIST` in the case of multiple callers operating on the same part of
+ the directory chain. This function is not safe to use under world-writable locations when the first level of the
+ path to be created contains a predictable component. Always create a randomly-named element first if there is any
+ chance the parent directory might be world-writable (eg, /tmp) to prevent symlink hijacking and potential
+ disclosure or modification of sensitive file contents.
+
+ :arg path: A byte or text string representing a directory chain to be created
+ :kwarg mode: If given, the mode to set the directory to
+ :raises AnsibleError: If the directory cannot be created and does not already exist.
+ :raises UnicodeDecodeError: if the path is not decodable in the utf-8 encoding.
+ '''
+
+ rpath = unfrackpath(path)
+ b_rpath = to_bytes(rpath)
+ if not os.path.exists(b_rpath):
+ try:
+ if mode:
+ os.makedirs(b_rpath, mode)
+ else:
+ os.makedirs(b_rpath)
+ except OSError as e:
+ if e.errno != EEXIST:
+ raise AnsibleError("Unable to create local directories(%s): %s" % (to_native(rpath), to_native(e)))
+
+
+def basedir(source):
+ """ returns directory for inventory or playbook """
+ source = to_bytes(source, errors='surrogate_or_strict')
+ dname = None
+ if os.path.isdir(source):
+ dname = source
+ elif source in [None, '', '.']:
+ dname = os.getcwd()
+ elif os.path.isfile(source):
+ dname = os.path.dirname(source)
+
+ if dname:
+ # don't follow symlinks for basedir, enables source re-use
+ dname = os.path.abspath(dname)
+
+ return to_text(dname, errors='surrogate_or_strict')
+
+
+def cleanup_tmp_file(path, warn=False):
+ """
+ Removes temporary file or directory. Optionally display a warning if unable
+ to remove the file or directory.
+
+ :arg path: Path to file or directory to be removed
+ :kwarg warn: Whether or not to display a warning when the file or directory
+ cannot be removed
+ """
+ try:
+ if os.path.exists(path):
+ try:
+ if os.path.isdir(path):
+ shutil.rmtree(path)
+ elif os.path.isfile(path):
+ os.unlink(path)
+ except Exception as e:
+ if warn:
+ # Importing here to avoid circular import
+ from ansible.utils.display import Display
+ display = Display()
+ display.display(u'Unable to remove temporary file {0}'.format(to_text(e)))
+ except Exception:
+ pass
+
+
+def is_subpath(child, parent):
+ """
+ Compares paths to check if one is contained in the other
+ :arg: child: Path to test
+ :arg parent; Path to test against
+ """
+ test = False
+
+ abs_child = unfrackpath(child, follow=False)
+ abs_parent = unfrackpath(parent, follow=False)
+
+ c = abs_child.split(os.path.sep)
+ p = abs_parent.split(os.path.sep)
+
+ try:
+ test = c[:len(p)] == p
+ except IndexError:
+ # child is shorter than parent so cannot be subpath
+ pass
+
+ return test
diff --git a/lib/ansible/utils/plugin_docs.py b/lib/ansible/utils/plugin_docs.py
new file mode 100644
index 00000000..6522f76e
--- /dev/null
+++ b/lib/ansible/utils/plugin_docs.py
@@ -0,0 +1,258 @@
+# Copyright: (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible import constants as C
+from ansible.release import __version__ as ansible_version
+from ansible.errors import AnsibleError, AnsibleAssertionError
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common._collections_compat import MutableMapping, MutableSet, MutableSequence
+from ansible.parsing.plugin_docs import read_docstring
+from ansible.parsing.yaml.loader import AnsibleLoader
+from ansible.utils.display import Display
+
+display = Display()
+
+
+# modules that are ok that they do not have documentation strings
+BLACKLIST = {
+ 'MODULE': frozenset(('async_wrapper',)),
+ 'CACHE': frozenset(('base',)),
+}
+
+
+def merge_fragment(target, source):
+
+ for key, value in source.items():
+ if key in target:
+ # assumes both structures have same type
+ if isinstance(target[key], MutableMapping):
+ value.update(target[key])
+ elif isinstance(target[key], MutableSet):
+ value.add(target[key])
+ elif isinstance(target[key], MutableSequence):
+ value = sorted(frozenset(value + target[key]))
+ else:
+ raise Exception("Attempt to extend a documentation fragement, invalid type for %s" % key)
+ target[key] = value
+
+
+def _process_versions_and_dates(fragment, is_module, return_docs, callback):
+ def process_deprecation(deprecation, top_level=False):
+ collection_name = 'removed_from_collection' if top_level else 'collection_name'
+ if not isinstance(deprecation, MutableMapping):
+ return
+ if (is_module or top_level) and 'removed_in' in deprecation: # used in module deprecations
+ callback(deprecation, 'removed_in', collection_name)
+ if 'removed_at_date' in deprecation:
+ callback(deprecation, 'removed_at_date', collection_name)
+ if not (is_module or top_level) and 'version' in deprecation: # used in plugin option deprecations
+ callback(deprecation, 'version', collection_name)
+
+ def process_option_specifiers(specifiers):
+ for specifier in specifiers:
+ if not isinstance(specifier, MutableMapping):
+ continue
+ if 'version_added' in specifier:
+ callback(specifier, 'version_added', 'version_added_collection')
+ if isinstance(specifier.get('deprecated'), MutableMapping):
+ process_deprecation(specifier['deprecated'])
+
+ def process_options(options):
+ for option in options.values():
+ if not isinstance(option, MutableMapping):
+ continue
+ if 'version_added' in option:
+ callback(option, 'version_added', 'version_added_collection')
+ if not is_module:
+ if isinstance(option.get('env'), list):
+ process_option_specifiers(option['env'])
+ if isinstance(option.get('ini'), list):
+ process_option_specifiers(option['ini'])
+ if isinstance(option.get('vars'), list):
+ process_option_specifiers(option['vars'])
+ if isinstance(option.get('deprecated'), MutableMapping):
+ process_deprecation(option['deprecated'])
+ if isinstance(option.get('suboptions'), MutableMapping):
+ process_options(option['suboptions'])
+
+ def process_return_values(return_values):
+ for return_value in return_values.values():
+ if not isinstance(return_value, MutableMapping):
+ continue
+ if 'version_added' in return_value:
+ callback(return_value, 'version_added', 'version_added_collection')
+ if isinstance(return_value.get('contains'), MutableMapping):
+ process_return_values(return_value['contains'])
+
+ if not fragment:
+ return
+
+ if return_docs:
+ process_return_values(fragment)
+ return
+
+ if 'version_added' in fragment:
+ callback(fragment, 'version_added', 'version_added_collection')
+ if isinstance(fragment.get('deprecated'), MutableMapping):
+ process_deprecation(fragment['deprecated'], top_level=True)
+ if isinstance(fragment.get('options'), MutableMapping):
+ process_options(fragment['options'])
+
+
+def add_collection_to_versions_and_dates(fragment, collection_name, is_module, return_docs=False):
+ def add(options, option, collection_name_field):
+ if collection_name_field not in options:
+ options[collection_name_field] = collection_name
+
+ _process_versions_and_dates(fragment, is_module, return_docs, add)
+
+
+def remove_current_collection_from_versions_and_dates(fragment, collection_name, is_module, return_docs=False):
+ def remove(options, option, collection_name_field):
+ if options.get(collection_name_field) == collection_name:
+ del options[collection_name_field]
+
+ _process_versions_and_dates(fragment, is_module, return_docs, remove)
+
+
+def add_fragments(doc, filename, fragment_loader, is_module=False):
+
+ fragments = doc.pop('extends_documentation_fragment', [])
+
+ if isinstance(fragments, string_types):
+ fragments = [fragments]
+
+ unknown_fragments = []
+
+ # doc_fragments are allowed to specify a fragment var other than DOCUMENTATION
+ # with a . separator; this is complicated by collections-hosted doc_fragments that
+ # use the same separator. Assume it's collection-hosted normally first, try to load
+ # as-specified. If failure, assume the right-most component is a var, split it off,
+ # and retry the load.
+ for fragment_slug in fragments:
+ fragment_name = fragment_slug
+ fragment_var = 'DOCUMENTATION'
+
+ fragment_class = fragment_loader.get(fragment_name)
+ if fragment_class is None and '.' in fragment_slug:
+ splitname = fragment_slug.rsplit('.', 1)
+ fragment_name = splitname[0]
+ fragment_var = splitname[1].upper()
+ fragment_class = fragment_loader.get(fragment_name)
+
+ if fragment_class is None:
+ unknown_fragments.append(fragment_slug)
+ continue
+
+ fragment_yaml = getattr(fragment_class, fragment_var, None)
+ if fragment_yaml is None:
+ if fragment_var != 'DOCUMENTATION':
+ # if it's asking for something specific that's missing, that's an error
+ unknown_fragments.append(fragment_slug)
+ continue
+ else:
+ fragment_yaml = '{}' # TODO: this is still an error later since we require 'options' below...
+
+ fragment = AnsibleLoader(fragment_yaml, file_name=filename).get_single_data()
+
+ real_collection_name = 'ansible.builtin'
+ real_fragment_name = getattr(fragment_class, '_load_name')
+ if real_fragment_name.startswith('ansible_collections.'):
+ real_collection_name = '.'.join(real_fragment_name.split('.')[1:3])
+ add_collection_to_versions_and_dates(fragment, real_collection_name, is_module=is_module)
+
+ if 'notes' in fragment:
+ notes = fragment.pop('notes')
+ if notes:
+ if 'notes' not in doc:
+ doc['notes'] = []
+ doc['notes'].extend(notes)
+
+ if 'seealso' in fragment:
+ seealso = fragment.pop('seealso')
+ if seealso:
+ if 'seealso' not in doc:
+ doc['seealso'] = []
+ doc['seealso'].extend(seealso)
+
+ if 'options' not in fragment:
+ raise Exception("missing options in fragment (%s), possibly misformatted?: %s" % (fragment_name, filename))
+
+ # ensure options themselves are directly merged
+ if 'options' in doc:
+ try:
+ merge_fragment(doc['options'], fragment.pop('options'))
+ except Exception as e:
+ raise AnsibleError("%s options (%s) of unknown type: %s" % (to_native(e), fragment_name, filename))
+ else:
+ doc['options'] = fragment.pop('options')
+
+ # merge rest of the sections
+ try:
+ merge_fragment(doc, fragment)
+ except Exception as e:
+ raise AnsibleError("%s (%s) of unknown type: %s" % (to_native(e), fragment_name, filename))
+
+ if unknown_fragments:
+ raise AnsibleError('unknown doc_fragment(s) in file {0}: {1}'.format(filename, to_native(', '.join(unknown_fragments))))
+
+
+def get_docstring(filename, fragment_loader, verbose=False, ignore_errors=False, collection_name=None, is_module=False):
+ """
+ DOCUMENTATION can be extended using documentation fragments loaded by the PluginLoader from the doc_fragments plugins.
+ """
+
+ data = read_docstring(filename, verbose=verbose, ignore_errors=ignore_errors)
+
+ if data.get('doc', False):
+ # add collection name to versions and dates
+ if collection_name is not None:
+ add_collection_to_versions_and_dates(data['doc'], collection_name, is_module=is_module)
+
+ # add fragments to documentation
+ add_fragments(data['doc'], filename, fragment_loader=fragment_loader, is_module=is_module)
+
+ if data.get('returndocs', False):
+ # add collection name to versions and dates
+ if collection_name is not None:
+ add_collection_to_versions_and_dates(data['returndocs'], collection_name, is_module=is_module, return_docs=True)
+
+ return data['doc'], data['plainexamples'], data['returndocs'], data['metadata']
+
+
+def get_versioned_doclink(path):
+ """
+ returns a versioned documentation link for the current Ansible major.minor version; used to generate
+ in-product warning/error links to the configured DOCSITE_ROOT_URL
+ (eg, https://docs.ansible.com/ansible/2.8/somepath/doc.html)
+
+ :param path: relative path to a document under docs/docsite/rst;
+ :return: absolute URL to the specified doc for the current version of Ansible
+ """
+ path = to_native(path)
+ try:
+ base_url = C.config.get_config_value('DOCSITE_ROOT_URL')
+ if not base_url.endswith('/'):
+ base_url += '/'
+ if path.startswith('/'):
+ path = path[1:]
+ split_ver = ansible_version.split('.')
+ if len(split_ver) < 3:
+ raise RuntimeError('invalid version ({0})'.format(ansible_version))
+
+ doc_version = '{0}.{1}'.format(split_ver[0], split_ver[1])
+
+ # check to see if it's a X.Y.0 non-rc prerelease or dev release, if so, assume devel (since the X.Y doctree
+ # isn't published until beta-ish)
+ if split_ver[2].startswith('0'):
+ # exclude rc; we should have the X.Y doctree live by rc1
+ if any((pre in split_ver[2]) for pre in ['a', 'b']) or len(split_ver) > 3 and 'dev' in split_ver[3]:
+ doc_version = 'devel'
+
+ return '{0}{1}/{2}'.format(base_url, doc_version, path)
+ except Exception as ex:
+ return '(unable to create versioned doc link for path {0}: {1})'.format(path, to_native(ex))
diff --git a/lib/ansible/utils/py3compat.py b/lib/ansible/utils/py3compat.py
new file mode 100644
index 00000000..6b46b5e8
--- /dev/null
+++ b/lib/ansible/utils/py3compat.py
@@ -0,0 +1,69 @@
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Toshio Kuratomi <a.badger@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# Note that the original author of this, Toshio Kuratomi, is trying to submit this to six. If
+# successful, the code in six will be available under six's more liberal license:
+# https://mail.python.org/pipermail/python-porting/2018-July/000539.html
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+from ansible.module_utils.six import PY3
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.module_utils.common._collections_compat import MutableMapping
+
+__all__ = ('environ',)
+
+
+class _TextEnviron(MutableMapping):
+ """
+ Utility class to return text strings from the environment instead of byte strings
+
+ Mimics the behaviour of os.environ on Python3
+ """
+ def __init__(self, env=None, encoding=None):
+ if env is None:
+ env = os.environ
+ self._raw_environ = env
+ self._value_cache = {}
+ # Since we're trying to mimic Python3's os.environ, use sys.getfilesystemencoding()
+ # instead of utf-8
+ if encoding is None:
+ # Since we're trying to mimic Python3's os.environ, use sys.getfilesystemencoding()
+ # instead of utf-8
+ self.encoding = sys.getfilesystemencoding()
+ else:
+ self.encoding = encoding
+
+ def __delitem__(self, key):
+ del self._raw_environ[key]
+
+ def __getitem__(self, key):
+ value = self._raw_environ[key]
+ if PY3:
+ return value
+ # Cache keys off of the undecoded values to handle any environment variables which change
+ # during a run
+ if value not in self._value_cache:
+ self._value_cache[value] = to_text(value, encoding=self.encoding,
+ nonstring='passthru', errors='surrogate_or_strict')
+ return self._value_cache[value]
+
+ def __setitem__(self, key, value):
+ self._raw_environ[key] = to_bytes(value, encoding=self.encoding, nonstring='strict',
+ errors='surrogate_or_strict')
+
+ def __iter__(self):
+ return self._raw_environ.__iter__()
+
+ def __len__(self):
+ return len(self._raw_environ)
+
+
+environ = _TextEnviron(encoding='utf-8')
diff --git a/lib/ansible/utils/sentinel.py b/lib/ansible/utils/sentinel.py
new file mode 100644
index 00000000..ca4f8276
--- /dev/null
+++ b/lib/ansible/utils/sentinel.py
@@ -0,0 +1,68 @@
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class Sentinel:
+ """
+ Object which can be used to mark whether an entry as being special
+
+ A sentinel value demarcates a value or marks an entry as having a special meaning. In C, the
+ Null byte is used as a sentinel for the end of a string. In Python, None is often used as
+ a Sentinel in optional parameters to mean that the parameter was not set by the user.
+
+ You should use None as a Sentinel value any Python code where None is not a valid entry. If
+ None is a valid entry, though, then you need to create a different value, which is the purpose
+ of this class.
+
+ Example of using Sentinel as a default parameter value::
+
+ def confirm_big_red_button(tristate=Sentinel):
+ if tristate is Sentinel:
+ print('You must explicitly press the big red button to blow up the base')
+ elif tristate is True:
+ print('Countdown to destruction activated')
+ elif tristate is False:
+ print('Countdown stopped')
+ elif tristate is None:
+ print('Waiting for more input')
+
+ Example of using Sentinel to tell whether a dict which has a default value has been changed::
+
+ values = {'one': Sentinel, 'two': Sentinel}
+ defaults = {'one': 1, 'two': 2}
+
+ # [.. Other code which does things including setting a new value for 'one' ..]
+ values['one'] = None
+ # [..]
+
+ print('You made changes to:')
+ for key, value in values.items():
+ if value is Sentinel:
+ continue
+ print('%s: %s' % (key, value)
+ """
+
+ def __new__(cls):
+ """
+ Return the cls itself. This makes both equality and identity True for comparing the class
+ to an instance of the class, preventing common usage errors.
+
+ Preferred usage::
+
+ a = Sentinel
+ if a is Sentinel:
+ print('Sentinel value')
+
+ However, these are True as well, eliminating common usage errors::
+
+ if Sentinel is Sentinel():
+ print('Sentinel value')
+
+ if Sentinel == Sentinel():
+ print('Sentinel value')
+ """
+ return cls
diff --git a/lib/ansible/utils/shlex.py b/lib/ansible/utils/shlex.py
new file mode 100644
index 00000000..5e82021b
--- /dev/null
+++ b/lib/ansible/utils/shlex.py
@@ -0,0 +1,34 @@
+# (c) 2015, Marius Gedminas <marius@gedmin.as>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# alongwith Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import shlex
+from ansible.module_utils.six import PY3
+from ansible.module_utils._text import to_bytes, to_text
+
+
+if PY3:
+ # shlex.split() wants Unicode (i.e. ``str``) input on Python 3
+ shlex_split = shlex.split
+else:
+ # shlex.split() wants bytes (i.e. ``str``) input on Python 2
+ def shlex_split(s, comments=False, posix=True):
+ return map(to_text, shlex.split(to_bytes(s), comments, posix))
+ shlex_split.__doc__ = shlex.split.__doc__
diff --git a/lib/ansible/utils/singleton.py b/lib/ansible/utils/singleton.py
new file mode 100644
index 00000000..4299403e
--- /dev/null
+++ b/lib/ansible/utils/singleton.py
@@ -0,0 +1,29 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from threading import RLock
+
+
+class Singleton(type):
+ """Metaclass for classes that wish to implement Singleton
+ functionality. If an instance of the class exists, it's returned,
+ otherwise a single instance is instantiated and returned.
+ """
+ def __init__(cls, name, bases, dct):
+ super(Singleton, cls).__init__(name, bases, dct)
+ cls.__instance = None
+ cls.__rlock = RLock()
+
+ def __call__(cls, *args, **kw):
+ if cls.__instance is not None:
+ return cls.__instance
+
+ with cls.__rlock:
+ if cls.__instance is None:
+ cls.__instance = super(Singleton, cls).__call__(*args, **kw)
+
+ return cls.__instance
diff --git a/lib/ansible/utils/ssh_functions.py b/lib/ansible/utils/ssh_functions.py
new file mode 100644
index 00000000..11ab7e13
--- /dev/null
+++ b/lib/ansible/utils/ssh_functions.py
@@ -0,0 +1,65 @@
+# (c) 2016, James Tanner
+# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import subprocess
+
+from ansible import constants as C
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.compat.paramiko import paramiko
+
+
+_HAS_CONTROLPERSIST = {}
+
+
+def check_for_controlpersist(ssh_executable):
+ try:
+ # If we've already checked this executable
+ return _HAS_CONTROLPERSIST[ssh_executable]
+ except KeyError:
+ pass
+
+ b_ssh_exec = to_bytes(ssh_executable, errors='surrogate_or_strict')
+ has_cp = True
+ try:
+ cmd = subprocess.Popen([b_ssh_exec, '-o', 'ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ (out, err) = cmd.communicate()
+ if b"Bad configuration option" in err or b"Usage:" in err:
+ has_cp = False
+ except OSError:
+ has_cp = False
+
+ _HAS_CONTROLPERSIST[ssh_executable] = has_cp
+ return has_cp
+
+
+def set_default_transport():
+
+ # deal with 'smart' connection .. one time ..
+ if C.DEFAULT_TRANSPORT == 'smart':
+ # TODO: check if we can deprecate this as ssh w/o control persist should
+ # not be as common anymore.
+
+ # see if SSH can support ControlPersist if not use paramiko
+ if not check_for_controlpersist(C.ANSIBLE_SSH_EXECUTABLE) and paramiko is not None:
+ C.DEFAULT_TRANSPORT = "paramiko"
+ else:
+ C.DEFAULT_TRANSPORT = "ssh"
diff --git a/lib/ansible/utils/unicode.py b/lib/ansible/utils/unicode.py
new file mode 100644
index 00000000..29a52237
--- /dev/null
+++ b/lib/ansible/utils/unicode.py
@@ -0,0 +1,33 @@
+# (c) 2012-2014, Toshio Kuratomi <a.badger@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils._text import to_text
+
+
+__all__ = ('unicode_wrap')
+
+
+def unicode_wrap(func, *args, **kwargs):
+ """If a function returns a string, force it to be a text string.
+
+ Use with partial to ensure that filter plugins will return text values.
+ """
+ return to_text(func(*args, **kwargs), nonstring='passthru')
diff --git a/lib/ansible/utils/unsafe_proxy.py b/lib/ansible/utils/unsafe_proxy.py
new file mode 100644
index 00000000..8c5d2261
--- /dev/null
+++ b/lib/ansible/utils/unsafe_proxy.py
@@ -0,0 +1,139 @@
+# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+# --------------------------------------------
+#
+# 1. This LICENSE AGREEMENT is between the Python Software Foundation
+# ("PSF"), and the Individual or Organization ("Licensee") accessing and
+# otherwise using this software ("Python") in source or binary form and
+# its associated documentation.
+#
+# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
+# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+# analyze, test, perform and/or display publicly, prepare derivative works,
+# distribute, and otherwise use Python alone or in any derivative version,
+# provided, however, that PSF's License Agreement and PSF's notice of copyright,
+# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014 Python Software Foundation; All Rights Reserved" are
+# retained in Python alone or in any derivative version prepared by Licensee.
+#
+# 3. In the event Licensee prepares a derivative work that is based on
+# or incorporates Python or any part thereof, and wants to make
+# the derivative work available to others as provided herein, then
+# Licensee hereby agrees to include in any such work a brief summary of
+# the changes made to Python.
+#
+# 4. PSF is making Python available to Licensee on an "AS IS"
+# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+# INFRINGE ANY THIRD PARTY RIGHTS.
+#
+# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+#
+# 6. This License Agreement will automatically terminate upon a material
+# breach of its terms and conditions.
+#
+# 7. Nothing in this License Agreement shall be deemed to create any
+# relationship of agency, partnership, or joint venture between PSF and
+# Licensee. This License Agreement does not grant permission to use PSF
+# trademarks or trade name in a trademark sense to endorse or promote
+# products or services of Licensee, or any third party.
+#
+# 8. By copying, installing or otherwise using Python, Licensee
+# agrees to be bound by the terms and conditions of this License
+# Agreement.
+#
+# Original Python Recipe for Proxy:
+# http://code.activestate.com/recipes/496741-object-proxying/
+# Author: Tomer Filiba
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.module_utils.common._collections_compat import Mapping, Set
+from ansible.module_utils.common.collections import is_sequence
+from ansible.module_utils.six import string_types, binary_type, text_type
+
+
+__all__ = ['AnsibleUnsafe', 'wrap_var']
+
+
+class AnsibleUnsafe(object):
+ __UNSAFE__ = True
+
+
+class AnsibleUnsafeBytes(binary_type, AnsibleUnsafe):
+ def decode(self, *args, **kwargs):
+ """Wrapper method to ensure type conversions maintain unsafe context"""
+ return AnsibleUnsafeText(super(AnsibleUnsafeBytes, self).decode(*args, **kwargs))
+
+
+class AnsibleUnsafeText(text_type, AnsibleUnsafe):
+ def encode(self, *args, **kwargs):
+ """Wrapper method to ensure type conversions maintain unsafe context"""
+ return AnsibleUnsafeBytes(super(AnsibleUnsafeText, self).encode(*args, **kwargs))
+
+
+class UnsafeProxy(object):
+ def __new__(cls, obj, *args, **kwargs):
+ from ansible.utils.display import Display
+ Display().deprecated(
+ 'UnsafeProxy is being deprecated. Use wrap_var or AnsibleUnsafeBytes/AnsibleUnsafeText directly instead',
+ version='2.13', collection_name='ansible.builtin'
+ )
+ # In our usage we should only receive unicode strings.
+ # This conditional and conversion exists to sanity check the values
+ # we're given but we may want to take it out for testing and sanitize
+ # our input instead.
+ if isinstance(obj, AnsibleUnsafe):
+ return obj
+
+ if isinstance(obj, string_types):
+ obj = AnsibleUnsafeText(to_text(obj, errors='surrogate_or_strict'))
+ return obj
+
+
+def _wrap_dict(v):
+ return dict((wrap_var(k), wrap_var(item)) for k, item in v.items())
+
+
+def _wrap_sequence(v):
+ """Wraps a sequence with unsafe, not meant for strings, primarily
+ ``tuple`` and ``list``
+ """
+ v_type = type(v)
+ return v_type(wrap_var(item) for item in v)
+
+
+def _wrap_set(v):
+ return set(wrap_var(item) for item in v)
+
+
+def wrap_var(v):
+ if v is None or isinstance(v, AnsibleUnsafe):
+ return v
+
+ if isinstance(v, Mapping):
+ v = _wrap_dict(v)
+ elif isinstance(v, Set):
+ v = _wrap_set(v)
+ elif is_sequence(v):
+ v = _wrap_sequence(v)
+ elif isinstance(v, binary_type):
+ v = AnsibleUnsafeBytes(v)
+ elif isinstance(v, text_type):
+ v = AnsibleUnsafeText(v)
+
+ return v
+
+
+def to_unsafe_bytes(*args, **kwargs):
+ return wrap_var(to_bytes(*args, **kwargs))
+
+
+def to_unsafe_text(*args, **kwargs):
+ return wrap_var(to_text(*args, **kwargs))
diff --git a/lib/ansible/utils/vars.py b/lib/ansible/utils/vars.py
new file mode 100644
index 00000000..17ef2df9
--- /dev/null
+++ b/lib/ansible/utils/vars.py
@@ -0,0 +1,295 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import keyword
+import random
+import uuid
+
+from json import dumps
+
+
+from ansible import constants as C
+from ansible import context
+from ansible.errors import AnsibleError, AnsibleOptionsError
+from ansible.module_utils.six import iteritems, string_types, PY3
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.common._collections_compat import MutableMapping, MutableSequence
+from ansible.parsing.splitter import parse_kv
+
+
+ADDITIONAL_PY2_KEYWORDS = frozenset(("True", "False", "None"))
+
+_MAXSIZE = 2 ** 32
+cur_id = 0
+node_mac = ("%012x" % uuid.getnode())[:12]
+random_int = ("%08x" % random.randint(0, _MAXSIZE))[:8]
+
+
+def get_unique_id():
+ global cur_id
+ cur_id += 1
+ return "-".join([
+ node_mac[0:8],
+ node_mac[8:12],
+ random_int[0:4],
+ random_int[4:8],
+ ("%012x" % cur_id)[:12],
+ ])
+
+
+def _validate_mutable_mappings(a, b):
+ """
+ Internal convenience function to ensure arguments are MutableMappings
+
+ This checks that all arguments are MutableMappings or raises an error
+
+ :raises AnsibleError: if one of the arguments is not a MutableMapping
+ """
+
+ # If this becomes generally needed, change the signature to operate on
+ # a variable number of arguments instead.
+
+ if not (isinstance(a, MutableMapping) and isinstance(b, MutableMapping)):
+ myvars = []
+ for x in [a, b]:
+ try:
+ myvars.append(dumps(x))
+ except Exception:
+ myvars.append(to_native(x))
+ raise AnsibleError("failed to combine variables, expected dicts but got a '{0}' and a '{1}': \n{2}\n{3}".format(
+ a.__class__.__name__, b.__class__.__name__, myvars[0], myvars[1])
+ )
+
+
+def combine_vars(a, b):
+ """
+ Return a copy of dictionaries of variables based on configured hash behavior
+ """
+
+ if C.DEFAULT_HASH_BEHAVIOUR == "merge":
+ return merge_hash(a, b)
+ else:
+ # HASH_BEHAVIOUR == 'replace'
+ _validate_mutable_mappings(a, b)
+ result = a.copy()
+ result.update(b)
+ return result
+
+
+def merge_hash(x, y, recursive=True, list_merge='replace'):
+ """
+ Return a new dictionary result of the merges of y into x,
+ so that keys from y take precedence over keys from x.
+ (x and y aren't modified)
+ """
+ if list_merge not in ('replace', 'keep', 'append', 'prepend', 'append_rp', 'prepend_rp'):
+ raise AnsibleError("merge_hash: 'list_merge' argument can only be equal to 'replace', 'keep', 'append', 'prepend', 'append_rp' or 'prepend_rp'")
+
+ # verify x & y are dicts
+ _validate_mutable_mappings(x, y)
+
+ # to speed things up: if x is empty or equal to y, return y
+ # (this `if` can be remove without impact on the function
+ # except performance)
+ if x == {} or x == y:
+ return y.copy()
+
+ # in the following we will copy elements from y to x, but
+ # we don't want to modify x, so we create a copy of it
+ x = x.copy()
+
+ # to speed things up: use dict.update if possible
+ # (this `if` can be remove without impact on the function
+ # except performance)
+ if not recursive and list_merge == 'replace':
+ x.update(y)
+ return x
+
+ # insert each element of y in x, overriding the one in x
+ # (as y has higher priority)
+ # we copy elements from y to x instead of x to y because
+ # there is a high probability x will be the "default" dict the user
+ # want to "patch" with y
+ # therefore x will have much more elements than y
+ for key, y_value in iteritems(y):
+ # if `key` isn't in x
+ # update x and move on to the next element of y
+ if key not in x:
+ x[key] = y_value
+ continue
+ # from this point we know `key` is in x
+
+ x_value = x[key]
+
+ # if both x's element and y's element are dicts
+ # recursively "combine" them or override x's with y's element
+ # depending on the `recursive` argument
+ # and move on to the next element of y
+ if isinstance(x_value, MutableMapping) and isinstance(y_value, MutableMapping):
+ if recursive:
+ x[key] = merge_hash(x_value, y_value, recursive, list_merge)
+ else:
+ x[key] = y_value
+ continue
+
+ # if both x's element and y's element are lists
+ # "merge" them depending on the `list_merge` argument
+ # and move on to the next element of y
+ if isinstance(x_value, MutableSequence) and isinstance(y_value, MutableSequence):
+ if list_merge == 'replace':
+ # replace x value by y's one as it has higher priority
+ x[key] = y_value
+ elif list_merge == 'append':
+ x[key] = x_value + y_value
+ elif list_merge == 'prepend':
+ x[key] = y_value + x_value
+ elif list_merge == 'append_rp':
+ # append all elements from y_value (high prio) to x_value (low prio)
+ # and remove x_value elements that are also in y_value
+ # we don't remove elements from x_value nor y_value that were already in double
+ # (we assume that there is a reason if there where such double elements)
+ # _rp stands for "remove present"
+ x[key] = [z for z in x_value if z not in y_value] + y_value
+ elif list_merge == 'prepend_rp':
+ # same as 'append_rp' but y_value elements are prepend
+ x[key] = y_value + [z for z in x_value if z not in y_value]
+ # else 'keep'
+ # keep x value even if y it's of higher priority
+ # it's done by not changing x[key]
+ continue
+
+ # else just override x's element with y's one
+ x[key] = y_value
+
+ return x
+
+
+def load_extra_vars(loader):
+ extra_vars = {}
+ for extra_vars_opt in context.CLIARGS.get('extra_vars', tuple()):
+ data = None
+ extra_vars_opt = to_text(extra_vars_opt, errors='surrogate_or_strict')
+ if extra_vars_opt is None or not extra_vars_opt:
+ continue
+
+ if extra_vars_opt.startswith(u"@"):
+ # Argument is a YAML file (JSON is a subset of YAML)
+ data = loader.load_from_file(extra_vars_opt[1:])
+ elif extra_vars_opt[0] in [u'/', u'.']:
+ raise AnsibleOptionsError("Please prepend extra_vars filename '%s' with '@'" % extra_vars_opt)
+ elif extra_vars_opt[0] in [u'[', u'{']:
+ # Arguments as YAML
+ data = loader.load(extra_vars_opt)
+ else:
+ # Arguments as Key-value
+ data = parse_kv(extra_vars_opt)
+
+ if isinstance(data, MutableMapping):
+ extra_vars = combine_vars(extra_vars, data)
+ else:
+ raise AnsibleOptionsError("Invalid extra vars data supplied. '%s' could not be made into a dictionary" % extra_vars_opt)
+
+ return extra_vars
+
+
+def load_options_vars(version):
+
+ if version is None:
+ version = 'Unknown'
+ options_vars = {'ansible_version': version}
+ attrs = {'check': 'check_mode',
+ 'diff': 'diff_mode',
+ 'forks': 'forks',
+ 'inventory': 'inventory_sources',
+ 'skip_tags': 'skip_tags',
+ 'subset': 'limit',
+ 'tags': 'run_tags',
+ 'verbosity': 'verbosity'}
+
+ for attr, alias in attrs.items():
+ opt = context.CLIARGS.get(attr)
+ if opt is not None:
+ options_vars['ansible_%s' % alias] = opt
+
+ return options_vars
+
+
+def _isidentifier_PY3(ident):
+ if not isinstance(ident, string_types):
+ return False
+
+ # NOTE Python 3.7 offers str.isascii() so switch over to using it once
+ # we stop supporting 3.5 and 3.6 on the controller
+ try:
+ # Python 2 does not allow non-ascii characters in identifiers so unify
+ # the behavior for Python 3
+ ident.encode('ascii')
+ except UnicodeEncodeError:
+ return False
+
+ if not ident.isidentifier():
+ return False
+
+ if keyword.iskeyword(ident):
+ return False
+
+ return True
+
+
+def _isidentifier_PY2(ident):
+ if not isinstance(ident, string_types):
+ return False
+
+ if not ident:
+ return False
+
+ if C.INVALID_VARIABLE_NAMES.search(ident):
+ return False
+
+ if keyword.iskeyword(ident) or ident in ADDITIONAL_PY2_KEYWORDS:
+ return False
+
+ return True
+
+
+if PY3:
+ isidentifier = _isidentifier_PY3
+else:
+ isidentifier = _isidentifier_PY2
+
+
+isidentifier.__doc__ = """Determine if string is valid identifier.
+
+The purpose of this function is to be used to validate any variables created in
+a play to be valid Python identifiers and to not conflict with Python keywords
+to prevent unexpected behavior. Since Python 2 and Python 3 differ in what
+a valid identifier is, this function unifies the validation so playbooks are
+portable between the two. The following changes were made:
+
+ * disallow non-ascii characters (Python 3 allows for them as opposed to Python 2)
+ * True, False and None are reserved keywords (these are reserved keywords
+ on Python 3 as opposed to Python 2)
+
+:arg ident: A text string of identifier to check. Note: It is callers
+ responsibility to convert ident to text if it is not already.
+
+Originally posted at http://stackoverflow.com/a/29586366
+"""
diff --git a/lib/ansible/utils/version.py b/lib/ansible/utils/version.py
new file mode 100644
index 00000000..d69723b4
--- /dev/null
+++ b/lib/ansible/utils/version.py
@@ -0,0 +1,272 @@
+# Copyright (c) 2020 Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from distutils.version import LooseVersion, Version
+
+from ansible.module_utils.six import text_type
+
+
+# Regular expression taken from
+# https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string
+SEMVER_RE = re.compile(
+ r'''
+ ^
+ (?P<major>0|[1-9]\d*)
+ \.
+ (?P<minor>0|[1-9]\d*)
+ \.
+ (?P<patch>0|[1-9]\d*)
+ (?:
+ -
+ (?P<prerelease>
+ (?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)
+ (?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*
+ )
+ )?
+ (?:
+ \+
+ (?P<buildmetadata>[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*)
+ )?
+ $
+ ''',
+ flags=re.X
+)
+
+
+class _Alpha:
+ """Class to easily allow comparing strings
+
+ Largely this exists to make comparing an integer and a string on py3
+ so that it works like py2.
+ """
+ def __init__(self, specifier):
+ self.specifier = specifier
+
+ def __repr__(self):
+ return repr(self.specifier)
+
+ def __eq__(self, other):
+ if isinstance(other, _Alpha):
+ return self.specifier == other.specifier
+ elif isinstance(other, str):
+ return self.specifier == other
+
+ return False
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __lt__(self, other):
+ if isinstance(other, _Alpha):
+ return self.specifier < other.specifier
+ elif isinstance(other, str):
+ return self.specifier < other
+ elif isinstance(other, _Numeric):
+ return False
+
+ raise ValueError
+
+ def __le__(self, other):
+ return self.__lt__(other) or self.__eq__(other)
+
+ def __gt__(self, other):
+ return not self.__le__(other)
+
+ def __ge__(self, other):
+ return not self.__lt__(other)
+
+
+class _Numeric:
+ """Class to easily allow comparing numbers
+
+ Largely this exists to make comparing an integer and a string on py3
+ so that it works like py2.
+ """
+ def __init__(self, specifier):
+ self.specifier = int(specifier)
+
+ def __repr__(self):
+ return repr(self.specifier)
+
+ def __eq__(self, other):
+ if isinstance(other, _Numeric):
+ return self.specifier == other.specifier
+ elif isinstance(other, int):
+ return self.specifier == other
+
+ return False
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __lt__(self, other):
+ if isinstance(other, _Numeric):
+ return self.specifier < other.specifier
+ elif isinstance(other, int):
+ return self.specifier < other
+ elif isinstance(other, _Alpha):
+ return True
+
+ raise ValueError
+
+ def __le__(self, other):
+ return self.__lt__(other) or self.__eq__(other)
+
+ def __gt__(self, other):
+ return not self.__le__(other)
+
+ def __ge__(self, other):
+ return not self.__lt__(other)
+
+
+class SemanticVersion(Version):
+ """Version comparison class that implements Semantic Versioning 2.0.0
+
+ Based off of ``distutils.version.Version``
+ """
+
+ version_re = SEMVER_RE
+
+ def __init__(self, vstring=None):
+ self.vstring = vstring
+ self.major = None
+ self.minor = None
+ self.patch = None
+ self.prerelease = ()
+ self.buildmetadata = ()
+
+ if vstring:
+ self.parse(vstring)
+
+ def __repr__(self):
+ return 'SemanticVersion(%r)' % self.vstring
+
+ @staticmethod
+ def from_loose_version(loose_version):
+ """This method is designed to take a ``LooseVersion``
+ and attempt to construct a ``SemanticVersion`` from it
+
+ This is useful where you want to do simple version math
+ without requiring users to provide a compliant semver.
+ """
+ if not isinstance(loose_version, LooseVersion):
+ raise ValueError("%r is not a LooseVersion" % loose_version)
+
+ try:
+ version = loose_version.version[:]
+ except AttributeError:
+ raise ValueError("%r is not a LooseVersion" % loose_version)
+
+ extra_idx = 3
+ for marker in ('-', '+'):
+ try:
+ idx = version.index(marker)
+ except ValueError:
+ continue
+ else:
+ if idx < extra_idx:
+ extra_idx = idx
+ version = version[:extra_idx]
+
+ if version and set(type(v) for v in version) != set((int,)):
+ raise ValueError("Non integer values in %r" % loose_version)
+
+ # Extra is everything to the right of the core version
+ extra = re.search('[+-].+$', loose_version.vstring)
+
+ version = version + [0] * (3 - len(version))
+ return SemanticVersion(
+ '%s%s' % (
+ '.'.join(str(v) for v in version),
+ extra.group(0) if extra else ''
+ )
+ )
+
+ def parse(self, vstring):
+ match = SEMVER_RE.match(vstring)
+ if not match:
+ raise ValueError("invalid semantic version '%s'" % vstring)
+
+ (major, minor, patch, prerelease, buildmetadata) = match.group(1, 2, 3, 4, 5)
+ self.major = int(major)
+ self.minor = int(minor)
+ self.patch = int(patch)
+
+ if prerelease:
+ self.prerelease = tuple(_Numeric(x) if x.isdigit() else _Alpha(x) for x in prerelease.split('.'))
+ if buildmetadata:
+ self.buildmetadata = tuple(_Numeric(x) if x.isdigit() else _Alpha(x) for x in buildmetadata.split('.'))
+
+ @property
+ def core(self):
+ return self.major, self.minor, self.patch
+
+ @property
+ def is_prerelease(self):
+ return bool(self.prerelease)
+
+ @property
+ def is_stable(self):
+ # Major version zero (0.y.z) is for initial development. Anything MAY change at any time.
+ # The public API SHOULD NOT be considered stable.
+ # https://semver.org/#spec-item-4
+ return not (self.major == 0 or self.is_prerelease)
+
+ def _cmp(self, other):
+ if isinstance(other, str):
+ other = SemanticVersion(other)
+
+ if self.core != other.core:
+ # if the core version doesn't match
+ # prerelease and buildmetadata doesn't matter
+ if self.core < other.core:
+ return -1
+ else:
+ return 1
+
+ if not any((self.prerelease, other.prerelease)):
+ return 0
+
+ if self.prerelease and not other.prerelease:
+ return -1
+ elif not self.prerelease and other.prerelease:
+ return 1
+ else:
+ if self.prerelease < other.prerelease:
+ return -1
+ elif self.prerelease > other.prerelease:
+ return 1
+
+ # Build metadata MUST be ignored when determining version precedence
+ # https://semver.org/#spec-item-10
+ # With the above in mind it is ignored here
+
+ # If we have made it here, things should be equal
+ return 0
+
+ # The Py2 and Py3 implementations of distutils.version.Version
+ # are quite different, this makes the Py2 and Py3 implementations
+ # the same
+ def __eq__(self, other):
+ return self._cmp(other) == 0
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __lt__(self, other):
+ return self._cmp(other) < 0
+
+ def __le__(self, other):
+ return self._cmp(other) <= 0
+
+ def __gt__(self, other):
+ return self._cmp(other) > 0
+
+ def __ge__(self, other):
+ return self._cmp(other) >= 0
diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/lib/ansible/vars/__init__.py
diff --git a/lib/ansible/vars/clean.py b/lib/ansible/vars/clean.py
new file mode 100644
index 00000000..4b89b7b4
--- /dev/null
+++ b/lib/ansible/vars/clean.py
@@ -0,0 +1,176 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.module_utils import six
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common._collections_compat import MutableMapping, MutableSequence
+from ansible.plugins.loader import connection_loader
+from ansible.utils.display import Display
+
+display = Display()
+
+
+def module_response_deepcopy(v):
+ """Function to create a deep copy of module response data
+
+ Designed to be used within the Ansible "engine" to improve performance
+ issues where ``copy.deepcopy`` was used previously, largely with CPU
+ and memory contention.
+
+ This only supports the following data types, and was designed to only
+ handle specific workloads:
+
+ * ``dict``
+ * ``list``
+
+ The data we pass here will come from a serialization such
+ as JSON, so we shouldn't have need for other data types such as
+ ``set`` or ``tuple``.
+
+ Take note that this function should not be used extensively as a
+ replacement for ``deepcopy`` due to the naive way in which this
+ handles other data types.
+
+ Do not expect uses outside of those listed below to maintain
+ backwards compatibility, in case we need to extend this function
+ to handle our specific needs:
+
+ * ``ansible.executor.task_result.TaskResult.clean_copy``
+ * ``ansible.vars.clean.clean_facts``
+ * ``ansible.vars.namespace_facts``
+ """
+ if isinstance(v, dict):
+ ret = v.copy()
+ items = six.iteritems(ret)
+ elif isinstance(v, list):
+ ret = v[:]
+ items = enumerate(ret)
+ else:
+ return v
+
+ for key, value in items:
+ if isinstance(value, (dict, list)):
+ ret[key] = module_response_deepcopy(value)
+ else:
+ ret[key] = value
+
+ return ret
+
+
+def strip_internal_keys(dirty, exceptions=None):
+ # All keys starting with _ansible_ are internal, so change the 'dirty' mapping and remove them.
+
+ if exceptions is None:
+ exceptions = tuple()
+
+ if isinstance(dirty, MutableSequence):
+
+ for element in dirty:
+ if isinstance(element, (MutableMapping, MutableSequence)):
+ strip_internal_keys(element, exceptions=exceptions)
+
+ elif isinstance(dirty, MutableMapping):
+
+ # listify to avoid updating dict while iterating over it
+ for k in list(dirty.keys()):
+ if isinstance(k, six.string_types):
+ if k.startswith('_ansible_') and k not in exceptions:
+ del dirty[k]
+ continue
+
+ if isinstance(dirty[k], (MutableMapping, MutableSequence)):
+ strip_internal_keys(dirty[k], exceptions=exceptions)
+ else:
+ raise AnsibleError("Cannot strip invalid keys from %s" % type(dirty))
+
+ return dirty
+
+
+def remove_internal_keys(data):
+ '''
+ More nuanced version of strip_internal_keys
+ '''
+ for key in list(data.keys()):
+ if (key.startswith('_ansible_') and key != '_ansible_parsed') or key in C.INTERNAL_RESULT_KEYS:
+ display.warning("Removed unexpected internal key in module return: %s = %s" % (key, data[key]))
+ del data[key]
+
+ # remove bad/empty internal keys
+ for key in ['warnings', 'deprecations']:
+ if key in data and not data[key]:
+ del data[key]
+
+ # cleanse fact values that are allowed from actions but not modules
+ for key in list(data.get('ansible_facts', {}).keys()):
+ if key.startswith('discovered_interpreter_') or key.startswith('ansible_discovered_interpreter_'):
+ del data['ansible_facts'][key]
+
+
+def clean_facts(facts):
+ ''' remove facts that can override internal keys or otherwise deemed unsafe '''
+ data = module_response_deepcopy(facts)
+
+ remove_keys = set()
+ fact_keys = set(data.keys())
+ # first we add all of our magic variable names to the set of
+ # keys we want to remove from facts
+ # NOTE: these will eventually disappear in favor of others below
+ for magic_var in C.MAGIC_VARIABLE_MAPPING:
+ remove_keys.update(fact_keys.intersection(C.MAGIC_VARIABLE_MAPPING[magic_var]))
+
+ # remove common connection vars
+ remove_keys.update(fact_keys.intersection(C.COMMON_CONNECTION_VARS))
+
+ # next we remove any connection plugin specific vars
+ for conn_path in connection_loader.all(path_only=True):
+ conn_name = os.path.splitext(os.path.basename(conn_path))[0]
+ re_key = re.compile('^ansible_%s_' % conn_name)
+ for fact_key in fact_keys:
+ # most lightweight VM or container tech creates devices with this pattern, this avoids filtering them out
+ if (re_key.match(fact_key) and not fact_key.endswith(('_bridge', '_gwbridge'))) or fact_key.startswith('ansible_become_'):
+ remove_keys.add(fact_key)
+
+ # remove some KNOWN keys
+ for hard in C.RESTRICTED_RESULT_KEYS + C.INTERNAL_RESULT_KEYS:
+ if hard in fact_keys:
+ remove_keys.add(hard)
+
+ # finally, we search for interpreter keys to remove
+ re_interp = re.compile('^ansible_.*_interpreter$')
+ for fact_key in fact_keys:
+ if re_interp.match(fact_key):
+ remove_keys.add(fact_key)
+ # then we remove them (except for ssh host keys)
+ for r_key in remove_keys:
+ if not r_key.startswith('ansible_ssh_host_key_'):
+ try:
+ r_val = to_text(data[r_key])
+ if len(r_val) > 24:
+ r_val = '%s ... %s' % (r_val[:13], r_val[-6:])
+ except Exception:
+ r_val = ' <failed to convert value to a string> '
+ display.warning("Removed restricted key from module data: %s = %s" % (r_key, r_val))
+ del data[r_key]
+
+ return strip_internal_keys(data)
+
+
+def namespace_facts(facts):
+ ''' return all facts inside 'ansible_facts' w/o an ansible_ prefix '''
+ deprefixed = {}
+ for k in facts:
+ if k.startswith('ansible_') and k not in ('ansible_local',):
+ deprefixed[k[8:]] = module_response_deepcopy(facts[k])
+ else:
+ deprefixed[k] = module_response_deepcopy(facts[k])
+
+ return {'ansible_facts': deprefixed}
diff --git a/lib/ansible/vars/fact_cache.py b/lib/ansible/vars/fact_cache.py
new file mode 100644
index 00000000..a8e63bc5
--- /dev/null
+++ b/lib/ansible/vars/fact_cache.py
@@ -0,0 +1,111 @@
+# Copyright: (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.module_utils.common._collections_compat import MutableMapping
+from ansible.plugins.loader import cache_loader
+from ansible.utils.display import Display
+
+
+display = Display()
+
+
+class FactCache(MutableMapping):
+
+ def __init__(self, *args, **kwargs):
+
+ self._plugin = cache_loader.get(C.CACHE_PLUGIN)
+ if not self._plugin:
+ raise AnsibleError('Unable to load the facts cache plugin (%s).' % (C.CACHE_PLUGIN))
+
+ super(FactCache, self).__init__(*args, **kwargs)
+
+ def __getitem__(self, key):
+ if not self._plugin.contains(key):
+ raise KeyError
+ return self._plugin.get(key)
+
+ def __setitem__(self, key, value):
+ self._plugin.set(key, value)
+
+ def __delitem__(self, key):
+ self._plugin.delete(key)
+
+ def __contains__(self, key):
+ return self._plugin.contains(key)
+
+ def __iter__(self):
+ return iter(self._plugin.keys())
+
+ def __len__(self):
+ return len(self._plugin.keys())
+
+ def copy(self):
+ """ Return a primitive copy of the keys and values from the cache. """
+ return dict(self)
+
+ def keys(self):
+ return self._plugin.keys()
+
+ def flush(self):
+ """ Flush the fact cache of all keys. """
+ self._plugin.flush()
+
+ def first_order_merge(self, key, value):
+ host_facts = {key: value}
+
+ try:
+ host_cache = self._plugin.get(key)
+ if host_cache:
+ host_cache.update(value)
+ host_facts[key] = host_cache
+ except KeyError:
+ pass
+
+ super(FactCache, self).update(host_facts)
+
+ def update(self, *args):
+ """
+ Backwards compat shim
+
+ We thought we needed this to ensure we always called the plugin's set() method but
+ MutableMapping.update() will call our __setitem__() just fine. It's the calls to update
+ that we need to be careful of. This contains a bug::
+
+ fact_cache[host.name].update(facts)
+
+ It retrieves a *copy* of the facts for host.name and then updates the copy. So the changes
+ aren't persisted.
+
+ Instead we need to do::
+
+ fact_cache.update({host.name, facts})
+
+ Which will use FactCache's update() method.
+
+ We currently need this shim for backwards compat because the update() method that we had
+ implemented took key and value as arguments instead of taking a dict. We can remove the
+ shim in 2.12 as MutableMapping.update() should do everything that we need.
+ """
+ if len(args) == 2:
+ # Deprecated. Call the new function with this name
+ display.deprecated('Calling FactCache().update(key, value) is deprecated. Use'
+ ' FactCache().first_order_merge(key, value) if you want the old'
+ ' behaviour or use FactCache().update({key: value}) if you want'
+ ' dict-like behaviour.', version='2.12', collection_name='ansible.builtin')
+ return self.first_order_merge(*args)
+
+ elif len(args) == 1:
+ host_facts = args[0]
+
+ else:
+ raise TypeError('update expected at most 1 argument, got {0}'.format(len(args)))
+
+ super(FactCache, self).update(host_facts)
diff --git a/lib/ansible/vars/hostvars.py b/lib/ansible/vars/hostvars.py
new file mode 100644
index 00000000..b47ec197
--- /dev/null
+++ b/lib/ansible/vars/hostvars.py
@@ -0,0 +1,154 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.common._collections_compat import Mapping
+from ansible.template import Templar, AnsibleUndefined
+
+STATIC_VARS = [
+ 'ansible_version',
+ 'ansible_play_hosts',
+ 'ansible_dependent_role_names',
+ 'ansible_play_role_names',
+ 'ansible_role_names',
+ 'inventory_hostname',
+ 'inventory_hostname_short',
+ 'inventory_file',
+ 'inventory_dir',
+ 'groups',
+ 'group_names',
+ 'omit',
+ 'playbook_dir',
+ 'play_hosts',
+ 'role_names',
+ 'ungrouped',
+]
+
+__all__ = ['HostVars', 'HostVarsVars']
+
+
+# Note -- this is a Mapping, not a MutableMapping
+class HostVars(Mapping):
+ ''' A special view of vars_cache that adds values from the inventory when needed. '''
+
+ def __init__(self, inventory, variable_manager, loader):
+ self._inventory = inventory
+ self._loader = loader
+ self._variable_manager = variable_manager
+ variable_manager._hostvars = self
+
+ def set_variable_manager(self, variable_manager):
+ self._variable_manager = variable_manager
+ variable_manager._hostvars = self
+
+ def set_inventory(self, inventory):
+ self._inventory = inventory
+
+ def _find_host(self, host_name):
+ # does not use inventory.hosts so it can create localhost on demand
+ return self._inventory.get_host(host_name)
+
+ def raw_get(self, host_name):
+ '''
+ Similar to __getitem__, however the returned data is not run through
+ the templating engine to expand variables in the hostvars.
+ '''
+ host = self._find_host(host_name)
+ if host is None:
+ return AnsibleUndefined(name="hostvars['%s']" % host_name)
+
+ return self._variable_manager.get_vars(host=host, include_hostvars=False)
+
+ def __setstate__(self, state):
+ self.__dict__.update(state)
+
+ # Methods __getstate__ and __setstate__ of VariableManager do not
+ # preserve _loader and _hostvars attributes to improve pickle
+ # performance and memory utilization. Since HostVars holds values
+ # of those attributes already, assign them if needed.
+ if self._variable_manager._loader is None:
+ self._variable_manager._loader = self._loader
+
+ if self._variable_manager._hostvars is None:
+ self._variable_manager._hostvars = self
+
+ def __getitem__(self, host_name):
+ data = self.raw_get(host_name)
+ if isinstance(data, AnsibleUndefined):
+ return data
+ return HostVarsVars(data, loader=self._loader)
+
+ def set_host_variable(self, host, varname, value):
+ self._variable_manager.set_host_variable(host, varname, value)
+
+ def set_nonpersistent_facts(self, host, facts):
+ self._variable_manager.set_nonpersistent_facts(host, facts)
+
+ def set_host_facts(self, host, facts):
+ self._variable_manager.set_host_facts(host, facts)
+
+ def __contains__(self, host_name):
+ # does not use inventory.hosts so it can create localhost on demand
+ return self._find_host(host_name) is not None
+
+ def __iter__(self):
+ for host in self._inventory.hosts:
+ yield host
+
+ def __len__(self):
+ return len(self._inventory.hosts)
+
+ def __repr__(self):
+ out = {}
+ for host in self._inventory.hosts:
+ out[host] = self.get(host)
+ return repr(out)
+
+ def __deepcopy__(self, memo):
+ # We do not need to deepcopy because HostVars is immutable,
+ # however we have to implement the method so we can deepcopy
+ # variables' dicts that contain HostVars.
+ return self
+
+
+class HostVarsVars(Mapping):
+
+ def __init__(self, variables, loader):
+ self._vars = variables
+ self._loader = loader
+
+ def __getitem__(self, var):
+ templar = Templar(variables=self._vars, loader=self._loader)
+ foo = templar.template(self._vars[var], fail_on_undefined=False, static_vars=STATIC_VARS)
+ return foo
+
+ def __contains__(self, var):
+ return (var in self._vars)
+
+ def __iter__(self):
+ for var in self._vars.keys():
+ yield var
+
+ def __len__(self):
+ return len(self._vars.keys())
+
+ def __repr__(self):
+ templar = Templar(variables=self._vars, loader=self._loader)
+ return repr(templar.template(self._vars, fail_on_undefined=False, static_vars=STATIC_VARS))
diff --git a/lib/ansible/vars/manager.py b/lib/ansible/vars/manager.py
new file mode 100644
index 00000000..b690260c
--- /dev/null
+++ b/lib/ansible/vars/manager.py
@@ -0,0 +1,719 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+from collections import defaultdict
+
+try:
+ from hashlib import sha1
+except ImportError:
+ from sha import sha as sha1
+
+from jinja2.exceptions import UndefinedError
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleFileNotFound, AnsibleAssertionError, AnsibleTemplateError
+from ansible.inventory.host import Host
+from ansible.inventory.helpers import sort_groups, get_group_vars
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common._collections_compat import Mapping, MutableMapping, Sequence
+from ansible.module_utils.six import iteritems, text_type, string_types
+from ansible.plugins.loader import lookup_loader
+from ansible.vars.fact_cache import FactCache
+from ansible.template import Templar
+from ansible.utils.display import Display
+from ansible.utils.listify import listify_lookup_plugin_terms
+from ansible.utils.vars import combine_vars, load_extra_vars, load_options_vars
+from ansible.utils.unsafe_proxy import wrap_var
+from ansible.vars.clean import namespace_facts, clean_facts
+from ansible.vars.plugins import get_vars_from_inventory_sources, get_vars_from_path
+
+display = Display()
+
+
+def preprocess_vars(a):
+ '''
+ Ensures that vars contained in the parameter passed in are
+ returned as a list of dictionaries, to ensure for instance
+ that vars loaded from a file conform to an expected state.
+ '''
+
+ if a is None:
+ return None
+ elif not isinstance(a, list):
+ data = [a]
+ else:
+ data = a
+
+ for item in data:
+ if not isinstance(item, MutableMapping):
+ raise AnsibleError("variable files must contain either a dictionary of variables, or a list of dictionaries. Got: %s (%s)" % (a, type(a)))
+
+ return data
+
+
+class VariableManager:
+
+ _ALLOWED = frozenset(['plugins_by_group', 'groups_plugins_play', 'groups_plugins_inventory', 'groups_inventory',
+ 'all_plugins_play', 'all_plugins_inventory', 'all_inventory'])
+
+ def __init__(self, loader=None, inventory=None, version_info=None):
+ self._nonpersistent_fact_cache = defaultdict(dict)
+ self._vars_cache = defaultdict(dict)
+ self._extra_vars = defaultdict(dict)
+ self._host_vars_files = defaultdict(dict)
+ self._group_vars_files = defaultdict(dict)
+ self._inventory = inventory
+ self._loader = loader
+ self._hostvars = None
+ self._omit_token = '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest()
+
+ self._options_vars = load_options_vars(version_info)
+
+ # If the basedir is specified as the empty string then it results in cwd being used.
+ # This is not a safe location to load vars from.
+ basedir = self._options_vars.get('basedir', False)
+ self.safe_basedir = bool(basedir is False or basedir)
+
+ # load extra vars
+ self._extra_vars = load_extra_vars(loader=self._loader)
+
+ # load fact cache
+ try:
+ self._fact_cache = FactCache()
+ except AnsibleError as e:
+ # bad cache plugin is not fatal error
+ # fallback to a dict as in memory cache
+ display.warning(to_text(e))
+ self._fact_cache = {}
+
+ def __getstate__(self):
+ data = dict(
+ fact_cache=self._fact_cache,
+ np_fact_cache=self._nonpersistent_fact_cache,
+ vars_cache=self._vars_cache,
+ extra_vars=self._extra_vars,
+ host_vars_files=self._host_vars_files,
+ group_vars_files=self._group_vars_files,
+ omit_token=self._omit_token,
+ options_vars=self._options_vars,
+ inventory=self._inventory,
+ safe_basedir=self.safe_basedir,
+ )
+ return data
+
+ def __setstate__(self, data):
+ self._fact_cache = data.get('fact_cache', defaultdict(dict))
+ self._nonpersistent_fact_cache = data.get('np_fact_cache', defaultdict(dict))
+ self._vars_cache = data.get('vars_cache', defaultdict(dict))
+ self._extra_vars = data.get('extra_vars', dict())
+ self._host_vars_files = data.get('host_vars_files', defaultdict(dict))
+ self._group_vars_files = data.get('group_vars_files', defaultdict(dict))
+ self._omit_token = data.get('omit_token', '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest())
+ self._inventory = data.get('inventory', None)
+ self._options_vars = data.get('options_vars', dict())
+ self.safe_basedir = data.get('safe_basedir', False)
+ self._loader = None
+ self._hostvars = None
+
+ @property
+ def extra_vars(self):
+ return self._extra_vars
+
+ def set_inventory(self, inventory):
+ self._inventory = inventory
+
+ def get_vars(self, play=None, host=None, task=None, include_hostvars=True, include_delegate_to=True, use_cache=True,
+ _hosts=None, _hosts_all=None, stage='task'):
+ '''
+ Returns the variables, with optional "context" given via the parameters
+ for the play, host, and task (which could possibly result in different
+ sets of variables being returned due to the additional context).
+
+ The order of precedence is:
+ - play->roles->get_default_vars (if there is a play context)
+ - group_vars_files[host] (if there is a host context)
+ - host_vars_files[host] (if there is a host context)
+ - host->get_vars (if there is a host context)
+ - fact_cache[host] (if there is a host context)
+ - play vars (if there is a play context)
+ - play vars_files (if there's no host context, ignore
+ file names that cannot be templated)
+ - task->get_vars (if there is a task context)
+ - vars_cache[host] (if there is a host context)
+ - extra vars
+
+ ``_hosts`` and ``_hosts_all`` should be considered private args, with only internal trusted callers relying
+ on the functionality they provide. These arguments may be removed at a later date without a deprecation
+ period and without warning.
+ '''
+
+ display.debug("in VariableManager get_vars()")
+
+ all_vars = dict()
+ magic_variables = self._get_magic_variables(
+ play=play,
+ host=host,
+ task=task,
+ include_hostvars=include_hostvars,
+ include_delegate_to=include_delegate_to,
+ _hosts=_hosts,
+ _hosts_all=_hosts_all,
+ )
+
+ _vars_sources = {}
+
+ def _combine_and_track(data, new_data, source):
+ '''
+ Wrapper function to update var sources dict and call combine_vars()
+
+ See notes in the VarsWithSources docstring for caveats and limitations of the source tracking
+ '''
+ if C.DEFAULT_DEBUG:
+ # Populate var sources dict
+ for key in new_data:
+ _vars_sources[key] = source
+ return combine_vars(data, new_data)
+
+ # default for all cases
+ basedirs = []
+ if self.safe_basedir: # avoid adhoc/console loading cwd
+ basedirs = [self._loader.get_basedir()]
+
+ if play:
+ # first we compile any vars specified in defaults/main.yml
+ # for all roles within the specified play
+ for role in play.get_roles():
+ all_vars = _combine_and_track(all_vars, role.get_default_vars(), "role '%s' defaults" % role.name)
+
+ if task:
+ # set basedirs
+ if C.PLAYBOOK_VARS_ROOT == 'all': # should be default
+ basedirs = task.get_search_path()
+ elif C.PLAYBOOK_VARS_ROOT in ('bottom', 'playbook_dir'): # only option in 2.4.0
+ basedirs = [task.get_search_path()[0]]
+ elif C.PLAYBOOK_VARS_ROOT != 'top':
+ # preserves default basedirs, only option pre 2.3
+ raise AnsibleError('Unknown playbook vars logic: %s' % C.PLAYBOOK_VARS_ROOT)
+
+ # if we have a task in this context, and that task has a role, make
+ # sure it sees its defaults above any other roles, as we previously
+ # (v1) made sure each task had a copy of its roles default vars
+ if task._role is not None and (play or task.action in C._ACTION_INCLUDE_ROLE):
+ all_vars = _combine_and_track(all_vars, task._role.get_default_vars(dep_chain=task.get_dep_chain()),
+ "role '%s' defaults" % task._role.name)
+
+ if host:
+ # THE 'all' group and the rest of groups for a host, used below
+ all_group = self._inventory.groups.get('all')
+ host_groups = sort_groups([g for g in host.get_groups() if g.name not in ['all']])
+
+ def _get_plugin_vars(plugin, path, entities):
+ data = {}
+ try:
+ data = plugin.get_vars(self._loader, path, entities)
+ except AttributeError:
+ try:
+ for entity in entities:
+ if isinstance(entity, Host):
+ data.update(plugin.get_host_vars(entity.name))
+ else:
+ data.update(plugin.get_group_vars(entity.name))
+ except AttributeError:
+ if hasattr(plugin, 'run'):
+ raise AnsibleError("Cannot use v1 type vars plugin %s from %s" % (plugin._load_name, plugin._original_path))
+ else:
+ raise AnsibleError("Invalid vars plugin %s from %s" % (plugin._load_name, plugin._original_path))
+ return data
+
+ # internal functions that actually do the work
+ def _plugins_inventory(entities):
+ ''' merges all entities by inventory source '''
+ return get_vars_from_inventory_sources(self._loader, self._inventory._sources, entities, stage)
+
+ def _plugins_play(entities):
+ ''' merges all entities adjacent to play '''
+ data = {}
+ for path in basedirs:
+ data = _combine_and_track(data, get_vars_from_path(self._loader, path, entities, stage), "path '%s'" % path)
+ return data
+
+ # configurable functions that are sortable via config, remember to add to _ALLOWED if expanding this list
+ def all_inventory():
+ return all_group.get_vars()
+
+ def all_plugins_inventory():
+ return _plugins_inventory([all_group])
+
+ def all_plugins_play():
+ return _plugins_play([all_group])
+
+ def groups_inventory():
+ ''' gets group vars from inventory '''
+ return get_group_vars(host_groups)
+
+ def groups_plugins_inventory():
+ ''' gets plugin sources from inventory for groups '''
+ return _plugins_inventory(host_groups)
+
+ def groups_plugins_play():
+ ''' gets plugin sources from play for groups '''
+ return _plugins_play(host_groups)
+
+ def plugins_by_groups():
+ '''
+ merges all plugin sources by group,
+ This should be used instead, NOT in combination with the other groups_plugins* functions
+ '''
+ data = {}
+ for group in host_groups:
+ data[group] = _combine_and_track(data[group], _plugins_inventory(group), "inventory group_vars for '%s'" % group)
+ data[group] = _combine_and_track(data[group], _plugins_play(group), "playbook group_vars for '%s'" % group)
+ return data
+
+ # Merge groups as per precedence config
+ # only allow to call the functions we want exposed
+ for entry in C.VARIABLE_PRECEDENCE:
+ if entry in self._ALLOWED:
+ display.debug('Calling %s to load vars for %s' % (entry, host.name))
+ all_vars = _combine_and_track(all_vars, locals()[entry](), "group vars, precedence entry '%s'" % entry)
+ else:
+ display.warning('Ignoring unknown variable precedence entry: %s' % (entry))
+
+ # host vars, from inventory, inventory adjacent and play adjacent via plugins
+ all_vars = _combine_and_track(all_vars, host.get_vars(), "host vars for '%s'" % host)
+ all_vars = _combine_and_track(all_vars, _plugins_inventory([host]), "inventory host_vars for '%s'" % host)
+ all_vars = _combine_and_track(all_vars, _plugins_play([host]), "playbook host_vars for '%s'" % host)
+
+ # finally, the facts caches for this host, if it exists
+ # TODO: cleaning of facts should eventually become part of taskresults instead of vars
+ try:
+ facts = wrap_var(self._fact_cache.get(host.name, {}))
+ all_vars.update(namespace_facts(facts))
+
+ # push facts to main namespace
+ if C.INJECT_FACTS_AS_VARS:
+ all_vars = _combine_and_track(all_vars, wrap_var(clean_facts(facts)), "facts")
+ else:
+ # always 'promote' ansible_local
+ all_vars = _combine_and_track(all_vars, wrap_var({'ansible_local': facts.get('ansible_local', {})}), "facts")
+ except KeyError:
+ pass
+
+ if play:
+ all_vars = _combine_and_track(all_vars, play.get_vars(), "play vars")
+
+ vars_files = play.get_vars_files()
+ try:
+ for vars_file_item in vars_files:
+ # create a set of temporary vars here, which incorporate the extra
+ # and magic vars so we can properly template the vars_files entries
+ temp_vars = combine_vars(all_vars, self._extra_vars)
+ temp_vars = combine_vars(temp_vars, magic_variables)
+ templar = Templar(loader=self._loader, variables=temp_vars)
+
+ # we assume each item in the list is itself a list, as we
+ # support "conditional includes" for vars_files, which mimics
+ # the with_first_found mechanism.
+ vars_file_list = vars_file_item
+ if not isinstance(vars_file_list, list):
+ vars_file_list = [vars_file_list]
+
+ # now we iterate through the (potential) files, and break out
+ # as soon as we read one from the list. If none are found, we
+ # raise an error, which is silently ignored at this point.
+ try:
+ for vars_file in vars_file_list:
+ vars_file = templar.template(vars_file)
+ if not (isinstance(vars_file, Sequence)):
+ raise AnsibleError(
+ "Invalid vars_files entry found: %r\n"
+ "vars_files entries should be either a string type or "
+ "a list of string types after template expansion" % vars_file
+ )
+ try:
+ data = preprocess_vars(self._loader.load_from_file(vars_file, unsafe=True))
+ if data is not None:
+ for item in data:
+ all_vars = _combine_and_track(all_vars, item, "play vars_files from '%s'" % vars_file)
+ break
+ except AnsibleFileNotFound:
+ # we continue on loader failures
+ continue
+ except AnsibleParserError:
+ raise
+ else:
+ # if include_delegate_to is set to False, we ignore the missing
+ # vars file here because we're working on a delegated host
+ if include_delegate_to:
+ raise AnsibleFileNotFound("vars file %s was not found" % vars_file_item)
+ except (UndefinedError, AnsibleUndefinedVariable):
+ if host is not None and self._fact_cache.get(host.name, dict()).get('module_setup') and task is not None:
+ raise AnsibleUndefinedVariable("an undefined variable was found when attempting to template the vars_files item '%s'"
+ % vars_file_item, obj=vars_file_item)
+ else:
+ # we do not have a full context here, and the missing variable could be because of that
+ # so just show a warning and continue
+ display.vvv("skipping vars_file '%s' due to an undefined variable" % vars_file_item)
+ continue
+
+ display.vvv("Read vars_file '%s'" % vars_file_item)
+ except TypeError:
+ raise AnsibleParserError("Error while reading vars files - please supply a list of file names. "
+ "Got '%s' of type %s" % (vars_files, type(vars_files)))
+
+ # By default, we now merge in all vars from all roles in the play,
+ # unless the user has disabled this via a config option
+ if not C.DEFAULT_PRIVATE_ROLE_VARS:
+ for role in play.get_roles():
+ all_vars = _combine_and_track(all_vars, role.get_vars(include_params=False), "role '%s' vars" % role.name)
+
+ # next, we merge in the vars from the role, which will specifically
+ # follow the role dependency chain, and then we merge in the tasks
+ # vars (which will look at parent blocks/task includes)
+ if task:
+ if task._role:
+ all_vars = _combine_and_track(all_vars, task._role.get_vars(task.get_dep_chain(), include_params=False),
+ "role '%s' vars" % task._role.name)
+ all_vars = _combine_and_track(all_vars, task.get_vars(), "task vars")
+
+ # next, we merge in the vars cache (include vars) and nonpersistent
+ # facts cache (set_fact/register), in that order
+ if host:
+ # include_vars non-persistent cache
+ all_vars = _combine_and_track(all_vars, self._vars_cache.get(host.get_name(), dict()), "include_vars")
+ # fact non-persistent cache
+ all_vars = _combine_and_track(all_vars, self._nonpersistent_fact_cache.get(host.name, dict()), "set_fact")
+
+ # next, we merge in role params and task include params
+ if task:
+ if task._role:
+ all_vars = _combine_and_track(all_vars, task._role.get_role_params(task.get_dep_chain()), "role '%s' params" % task._role.name)
+
+ # special case for include tasks, where the include params
+ # may be specified in the vars field for the task, which should
+ # have higher precedence than the vars/np facts above
+ all_vars = _combine_and_track(all_vars, task.get_include_params(), "include params")
+
+ # extra vars
+ all_vars = _combine_and_track(all_vars, self._extra_vars, "extra vars")
+
+ # magic variables
+ all_vars = _combine_and_track(all_vars, magic_variables, "magic vars")
+
+ # special case for the 'environment' magic variable, as someone
+ # may have set it as a variable and we don't want to stomp on it
+ if task:
+ all_vars['environment'] = task.environment
+
+ # if we have a task and we're delegating to another host, figure out the
+ # variables for that host now so we don't have to rely on hostvars later
+ if task and task.delegate_to is not None and include_delegate_to:
+ all_vars['ansible_delegated_vars'], all_vars['_ansible_loop_cache'] = self._get_delegated_vars(play, task, all_vars)
+
+ # 'vars' magic var
+ if task or play:
+ # has to be copy, otherwise recursive ref
+ all_vars['vars'] = all_vars.copy()
+
+ display.debug("done with get_vars()")
+ if C.DEFAULT_DEBUG:
+ # Use VarsWithSources wrapper class to display var sources
+ return VarsWithSources.new_vars_with_sources(all_vars, _vars_sources)
+ else:
+ return all_vars
+
+ def _get_magic_variables(self, play, host, task, include_hostvars, include_delegate_to,
+ _hosts=None, _hosts_all=None):
+ '''
+ Returns a dictionary of so-called "magic" variables in Ansible,
+ which are special variables we set internally for use.
+ '''
+
+ variables = {}
+ variables['playbook_dir'] = os.path.abspath(self._loader.get_basedir())
+ variables['ansible_playbook_python'] = sys.executable
+ variables['ansible_config_file'] = C.CONFIG_FILE
+
+ if play:
+ # This is a list of all role names of all dependencies for all roles for this play
+ dependency_role_names = list(set([d.get_name() for r in play.roles for d in r.get_all_dependencies()]))
+ # This is a list of all role names of all roles for this play
+ play_role_names = [r.get_name() for r in play.roles]
+
+ # ansible_role_names includes all role names, dependent or directly referenced by the play
+ variables['ansible_role_names'] = list(set(dependency_role_names + play_role_names))
+ # ansible_play_role_names includes the names of all roles directly referenced by this play
+ # roles that are implicitly referenced via dependencies are not listed.
+ variables['ansible_play_role_names'] = play_role_names
+ # ansible_dependent_role_names includes the names of all roles that are referenced via dependencies
+ # dependencies that are also explicitly named as roles are included in this list
+ variables['ansible_dependent_role_names'] = dependency_role_names
+
+ # DEPRECATED: role_names should be deprecated in favor of ansible_role_names or ansible_play_role_names
+ variables['role_names'] = variables['ansible_play_role_names']
+
+ variables['ansible_play_name'] = play.get_name()
+
+ if task:
+ if task._role:
+ variables['role_name'] = task._role.get_name(include_role_fqcn=False)
+ variables['role_path'] = task._role._role_path
+ variables['role_uuid'] = text_type(task._role._uuid)
+ variables['ansible_collection_name'] = task._role._role_collection
+ variables['ansible_role_name'] = task._role.get_name()
+
+ if self._inventory is not None:
+ variables['groups'] = self._inventory.get_groups_dict()
+ if play:
+ templar = Templar(loader=self._loader)
+ if templar.is_template(play.hosts):
+ pattern = 'all'
+ else:
+ pattern = play.hosts or 'all'
+ # add the list of hosts in the play, as adjusted for limit/filters
+ if not _hosts_all:
+ _hosts_all = [h.name for h in self._inventory.get_hosts(pattern=pattern, ignore_restrictions=True)]
+ if not _hosts:
+ _hosts = [h.name for h in self._inventory.get_hosts()]
+
+ variables['ansible_play_hosts_all'] = _hosts_all[:]
+ variables['ansible_play_hosts'] = [x for x in variables['ansible_play_hosts_all'] if x not in play._removed_hosts]
+ variables['ansible_play_batch'] = [x for x in _hosts if x not in play._removed_hosts]
+
+ # DEPRECATED: play_hosts should be deprecated in favor of ansible_play_batch,
+ # however this would take work in the templating engine, so for now we'll add both
+ variables['play_hosts'] = variables['ansible_play_batch']
+
+ # the 'omit' value allows params to be left out if the variable they are based on is undefined
+ variables['omit'] = self._omit_token
+ # Set options vars
+ for option, option_value in iteritems(self._options_vars):
+ variables[option] = option_value
+
+ if self._hostvars is not None and include_hostvars:
+ variables['hostvars'] = self._hostvars
+
+ return variables
+
+ def _get_delegated_vars(self, play, task, existing_variables):
+ if not hasattr(task, 'loop'):
+ # This "task" is not a Task, so we need to skip it
+ return {}, None
+
+ # we unfortunately need to template the delegate_to field here,
+ # as we're fetching vars before post_validate has been called on
+ # the task that has been passed in
+ vars_copy = existing_variables.copy()
+ templar = Templar(loader=self._loader, variables=vars_copy)
+
+ items = []
+ has_loop = True
+ if task.loop_with is not None:
+ if task.loop_with in lookup_loader:
+ try:
+ loop_terms = listify_lookup_plugin_terms(terms=task.loop, templar=templar,
+ loader=self._loader, fail_on_undefined=True, convert_bare=False)
+ items = wrap_var(lookup_loader.get(task.loop_with, loader=self._loader, templar=templar).run(terms=loop_terms, variables=vars_copy))
+ except AnsibleTemplateError:
+ # This task will be skipped later due to this, so we just setup
+ # a dummy array for the later code so it doesn't fail
+ items = [None]
+ else:
+ raise AnsibleError("Failed to find the lookup named '%s' in the available lookup plugins" % task.loop_with)
+ elif task.loop is not None:
+ try:
+ items = templar.template(task.loop)
+ except AnsibleTemplateError:
+ # This task will be skipped later due to this, so we just setup
+ # a dummy array for the later code so it doesn't fail
+ items = [None]
+ else:
+ has_loop = False
+ items = [None]
+
+ # since host can change per loop, we keep dict per host name resolved
+ delegated_host_vars = dict()
+ item_var = getattr(task.loop_control, 'loop_var', 'item')
+ cache_items = False
+ for item in items:
+ # update the variables with the item value for templating, in case we need it
+ if item is not None:
+ vars_copy[item_var] = item
+
+ templar.available_variables = vars_copy
+ delegated_host_name = templar.template(task.delegate_to, fail_on_undefined=False)
+ if delegated_host_name != task.delegate_to:
+ cache_items = True
+ if delegated_host_name is None:
+ raise AnsibleError(message="Undefined delegate_to host for task:", obj=task._ds)
+ if not isinstance(delegated_host_name, string_types):
+ raise AnsibleError(message="the field 'delegate_to' has an invalid type (%s), and could not be"
+ " converted to a string type." % type(delegated_host_name), obj=task._ds)
+
+ if delegated_host_name in delegated_host_vars:
+ # no need to repeat ourselves, as the delegate_to value
+ # does not appear to be tied to the loop item variable
+ continue
+
+ # now try to find the delegated-to host in inventory, or failing that,
+ # create a new host on the fly so we can fetch variables for it
+ delegated_host = None
+ if self._inventory is not None:
+ delegated_host = self._inventory.get_host(delegated_host_name)
+ # try looking it up based on the address field, and finally
+ # fall back to creating a host on the fly to use for the var lookup
+ if delegated_host is None:
+ for h in self._inventory.get_hosts(ignore_limits=True, ignore_restrictions=True):
+ # check if the address matches, or if both the delegated_to host
+ # and the current host are in the list of localhost aliases
+ if h.address == delegated_host_name:
+ delegated_host = h
+ break
+ else:
+ delegated_host = Host(name=delegated_host_name)
+ else:
+ delegated_host = Host(name=delegated_host_name)
+
+ # now we go fetch the vars for the delegated-to host and save them in our
+ # master dictionary of variables to be used later in the TaskExecutor/PlayContext
+ delegated_host_vars[delegated_host_name] = self.get_vars(
+ play=play,
+ host=delegated_host,
+ task=task,
+ include_delegate_to=False,
+ include_hostvars=True,
+ )
+ delegated_host_vars[delegated_host_name]['inventory_hostname'] = vars_copy.get('inventory_hostname')
+
+ _ansible_loop_cache = None
+ if has_loop and cache_items:
+ # delegate_to templating produced a change, so we will cache the templated items
+ # in a special private hostvar
+ # this ensures that delegate_to+loop doesn't produce different results than TaskExecutor
+ # which may reprocess the loop
+ _ansible_loop_cache = items
+
+ return delegated_host_vars, _ansible_loop_cache
+
+ def clear_facts(self, hostname):
+ '''
+ Clears the facts for a host
+ '''
+ self._fact_cache.pop(hostname, None)
+
+ def set_host_facts(self, host, facts):
+ '''
+ Sets or updates the given facts for a host in the fact cache.
+ '''
+
+ if not isinstance(facts, Mapping):
+ raise AnsibleAssertionError("the type of 'facts' to set for host_facts should be a Mapping but is a %s" % type(facts))
+
+ try:
+ host_cache = self._fact_cache[host]
+ except KeyError:
+ # We get to set this as new
+ host_cache = facts
+ else:
+ if not isinstance(host_cache, MutableMapping):
+ raise TypeError('The object retrieved for {0} must be a MutableMapping but was'
+ ' a {1}'.format(host, type(host_cache)))
+ # Update the existing facts
+ host_cache.update(facts)
+
+ # Save the facts back to the backing store
+ self._fact_cache[host] = host_cache
+
+ def set_nonpersistent_facts(self, host, facts):
+ '''
+ Sets or updates the given facts for a host in the fact cache.
+ '''
+
+ if not isinstance(facts, Mapping):
+ raise AnsibleAssertionError("the type of 'facts' to set for nonpersistent_facts should be a Mapping but is a %s" % type(facts))
+
+ try:
+ self._nonpersistent_fact_cache[host].update(facts)
+ except KeyError:
+ self._nonpersistent_fact_cache[host] = facts
+
+ def set_host_variable(self, host, varname, value):
+ '''
+ Sets a value in the vars_cache for a host.
+ '''
+ if host not in self._vars_cache:
+ self._vars_cache[host] = dict()
+ if varname in self._vars_cache[host] and isinstance(self._vars_cache[host][varname], MutableMapping) and isinstance(value, MutableMapping):
+ self._vars_cache[host] = combine_vars(self._vars_cache[host], {varname: value})
+ else:
+ self._vars_cache[host][varname] = value
+
+
+class VarsWithSources(MutableMapping):
+ '''
+ Dict-like class for vars that also provides source information for each var
+
+ This class can only store the source for top-level vars. It does no tracking
+ on its own, just shows a debug message with the information that it is provided
+ when a particular var is accessed.
+ '''
+ def __init__(self, *args, **kwargs):
+ ''' Dict-compatible constructor '''
+ self.data = dict(*args, **kwargs)
+ self.sources = {}
+
+ @classmethod
+ def new_vars_with_sources(cls, data, sources):
+ ''' Alternate constructor method to instantiate class with sources '''
+ v = cls(data)
+ v.sources = sources
+ return v
+
+ def get_source(self, key):
+ return self.sources.get(key, None)
+
+ def __getitem__(self, key):
+ val = self.data[key]
+ # See notes in the VarsWithSources docstring for caveats and limitations of the source tracking
+ display.debug("variable '%s' from source: %s" % (key, self.sources.get(key, "unknown")))
+ return val
+
+ def __setitem__(self, key, value):
+ self.data[key] = value
+
+ def __delitem__(self, key):
+ del self.data[key]
+
+ def __iter__(self):
+ return iter(self.data)
+
+ def __len__(self):
+ return len(self.data)
+
+ # Prevent duplicate debug messages by defining our own __contains__ pointing at the underlying dict
+ def __contains__(self, key):
+ return self.data.__contains__(key)
+
+ def copy(self):
+ return VarsWithSources.new_vars_with_sources(self.data.copy(), self.sources.copy())
diff --git a/lib/ansible/vars/plugins.py b/lib/ansible/vars/plugins.py
new file mode 100644
index 00000000..1411129d
--- /dev/null
+++ b/lib/ansible/vars/plugins.py
@@ -0,0 +1,95 @@
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.inventory.host import Host
+from ansible.module_utils._text import to_bytes
+from ansible.plugins.loader import vars_loader
+from ansible.utils.collection_loader import AnsibleCollectionRef
+from ansible.utils.display import Display
+from ansible.utils.vars import combine_vars
+
+display = Display()
+
+
+def get_plugin_vars(loader, plugin, path, entities):
+
+ data = {}
+ try:
+ data = plugin.get_vars(loader, path, entities)
+ except AttributeError:
+ try:
+ for entity in entities:
+ if isinstance(entity, Host):
+ data.update(plugin.get_host_vars(entity.name))
+ else:
+ data.update(plugin.get_group_vars(entity.name))
+ except AttributeError:
+ if hasattr(plugin, 'run'):
+ raise AnsibleError("Cannot use v1 type vars plugin %s from %s" % (plugin._load_name, plugin._original_path))
+ else:
+ raise AnsibleError("Invalid vars plugin %s from %s" % (plugin._load_name, plugin._original_path))
+ return data
+
+
+def get_vars_from_path(loader, path, entities, stage):
+
+ data = {}
+
+ vars_plugin_list = list(vars_loader.all())
+ for plugin_name in C.VARIABLE_PLUGINS_ENABLED:
+ if AnsibleCollectionRef.is_valid_fqcr(plugin_name):
+ vars_plugin = vars_loader.get(plugin_name)
+ if vars_plugin is None:
+ # Error if there's no play directory or the name is wrong?
+ continue
+ if vars_plugin not in vars_plugin_list:
+ vars_plugin_list.append(vars_plugin)
+
+ for plugin in vars_plugin_list:
+ if plugin._load_name not in C.VARIABLE_PLUGINS_ENABLED and getattr(plugin, 'REQUIRES_WHITELIST', False):
+ # 2.x plugins shipped with ansible should require whitelisting, older or non shipped should load automatically
+ continue
+
+ has_stage = hasattr(plugin, 'get_option') and plugin.has_option('stage')
+
+ # if a plugin-specific setting has not been provided, use the global setting
+ # older/non shipped plugins that don't support the plugin-specific setting should also use the global setting
+ use_global = (has_stage and plugin.get_option('stage') is None) or not has_stage
+
+ if use_global:
+ if C.RUN_VARS_PLUGINS == 'demand' and stage == 'inventory':
+ continue
+ elif C.RUN_VARS_PLUGINS == 'start' and stage == 'task':
+ continue
+ elif has_stage and plugin.get_option('stage') not in ('all', stage):
+ continue
+
+ data = combine_vars(data, get_plugin_vars(loader, plugin, path, entities))
+
+ return data
+
+
+def get_vars_from_inventory_sources(loader, sources, entities, stage):
+
+ data = {}
+ for path in sources:
+
+ if path is None:
+ continue
+ if ',' in path and not os.path.exists(path): # skip host lists
+ continue
+ elif not os.path.isdir(to_bytes(path)):
+ # always pass the directory of the inventory source file
+ path = os.path.dirname(path)
+
+ data = combine_vars(data, get_vars_from_path(loader, path, entities, stage))
+
+ return data
diff --git a/lib/ansible/vars/reserved.py b/lib/ansible/vars/reserved.py
new file mode 100644
index 00000000..d7daea07
--- /dev/null
+++ b/lib/ansible/vars/reserved.py
@@ -0,0 +1,81 @@
+# (c) 2017 Ansible By Red Hat
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.playbook import Play
+from ansible.playbook.block import Block
+from ansible.playbook.role import Role
+from ansible.playbook.task import Task
+from ansible.utils.display import Display
+
+display = Display()
+
+
+def get_reserved_names(include_private=True):
+ ''' this function returns the list of reserved names associated with play objects'''
+
+ public = set()
+ private = set()
+ result = set()
+
+ # FIXME: find a way to 'not hardcode', possibly need role deps/includes
+ class_list = [Play, Role, Block, Task]
+
+ for aclass in class_list:
+ aobj = aclass()
+
+ # build ordered list to loop over and dict with attributes
+ for attribute in aobj.__dict__['_attributes']:
+ if 'private' in attribute:
+ private.add(attribute)
+ else:
+ public.add(attribute)
+
+ # local_action is implicit with action
+ if 'action' in public:
+ public.add('local_action')
+
+ # loop implies with_
+ # FIXME: remove after with_ is not only deprecated but removed
+ if 'loop' in private or 'loop' in public:
+ public.add('with_')
+
+ if include_private:
+ result = public.union(private)
+ else:
+ result = public
+
+ return result
+
+
+def warn_if_reserved(myvars):
+ ''' this function warns if any variable passed conflicts with internally reserved names '''
+
+ varnames = set(myvars)
+ varnames.discard('vars') # we add this one internally, so safe to ignore
+ for varname in varnames.intersection(_RESERVED_NAMES):
+ display.warning('Found variable using reserved name: %s' % varname)
+
+
+def is_reserved_name(name):
+ return name in _RESERVED_NAMES
+
+
+_RESERVED_NAMES = frozenset(get_reserved_names())
diff --git a/licenses/Apache-License.txt b/licenses/Apache-License.txt
new file mode 100644
index 00000000..e06d2081
--- /dev/null
+++ b/licenses/Apache-License.txt
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/licenses/MIT-license.txt b/licenses/MIT-license.txt
new file mode 100644
index 00000000..071f4dfc
--- /dev/null
+++ b/licenses/MIT-license.txt
@@ -0,0 +1,14 @@
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+associated documentation files (the "Software"), to deal in the Software without restriction,
+including without limitation the rights to use, copy, modify, merge, publish, distribute,
+sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES
+OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/licenses/PSF-license.txt b/licenses/PSF-license.txt
new file mode 100644
index 00000000..b6bddd8f
--- /dev/null
+++ b/licenses/PSF-license.txt
@@ -0,0 +1,48 @@
+PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+--------------------------------------------
+
+1. This LICENSE AGREEMENT is between the Python Software Foundation
+("PSF"), and the Individual or Organization ("Licensee") accessing and
+otherwise using this software ("Python") in source or binary form and
+its associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, PSF hereby
+grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+analyze, test, perform and/or display publicly, prepare derivative works,
+distribute, and otherwise use Python alone or in any derivative version,
+provided, however, that PSF's License Agreement and PSF's notice of copyright,
+i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+2011, 2012, 2013, 2014, 2015, 2016, 2017 Python Software Foundation; All Rights
+Reserved" are retained in Python alone or in any derivative version prepared by
+Licensee.
+
+3. In the event Licensee prepares a derivative work that is based on
+or incorporates Python or any part thereof, and wants to make
+the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to Python.
+
+4. PSF is making Python available to Licensee on an "AS IS"
+basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. Nothing in this License Agreement shall be deemed to create any
+relationship of agency, partnership, or joint venture between PSF and
+Licensee. This License Agreement does not grant permission to use PSF
+trademarks or trade name in a trademark sense to endorse or promote
+products or services of Licensee, or any third party.
+
+8. By copying, installing or otherwise using Python, Licensee
+agrees to be bound by the terms and conditions of this License
+Agreement.
diff --git a/licenses/simplified_bsd.txt b/licenses/simplified_bsd.txt
new file mode 100644
index 00000000..6810e04e
--- /dev/null
+++ b/licenses/simplified_bsd.txt
@@ -0,0 +1,8 @@
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/packaging/arch/README.md b/packaging/arch/README.md
new file mode 100644
index 00000000..994daca7
--- /dev/null
+++ b/packaging/arch/README.md
@@ -0,0 +1,8 @@
+Arch Packaging Files
+--------------------
+
+You can find the source files for [ansible-git][1] in the [Arch User Repository][2] and for the stable version [ansible][3] you can do so in \[community\].
+
+ [1]: https://aur.archlinux.org/packages/ansible-git/
+ [2]: https://wiki.archlinux.org/index.php/Arch_User_Repository
+ [3]: https://www.archlinux.org/packages/community/any/ansible/
diff --git a/packaging/debian/Dockerfile b/packaging/debian/Dockerfile
new file mode 100644
index 00000000..de831d09
--- /dev/null
+++ b/packaging/debian/Dockerfile
@@ -0,0 +1,20 @@
+FROM ubuntu:xenial
+
+RUN apt-get update && apt-get install -y \
+ python-docutils \
+ cdbs \
+ debootstrap \
+ devscripts \
+ make \
+ pbuilder \
+ python-jinja2 \
+ python-setuptools \
+ python-yaml \
+ && \
+ apt-get clean
+
+VOLUME /ansible
+WORKDIR /ansible
+
+ENTRYPOINT ["/bin/bash", "-c"]
+CMD ["make deb"]
diff --git a/packaging/debian/README.md b/packaging/debian/README.md
new file mode 100644
index 00000000..7eaa4721
--- /dev/null
+++ b/packaging/debian/README.md
@@ -0,0 +1,39 @@
+Ansible Debian Package
+======================
+
+To create an Ansible DEB package:
+
+__Note__: You must run this target as root or set `PBUILDER_BIN='sudo pbuilder'`
+
+```
+apt-get install python-docutils cdbs debootstrap devscripts make pbuilder python-setuptools
+git clone https://github.com/ansible/ansible.git
+cd ansible
+DEB_DIST='xenial trusty precise' make deb
+```
+
+Building in Docker:
+
+```
+git clone https://github.com/ansible/ansible.git
+cd ansible
+docker build -t ansible-deb-builder -f packaging/debian/Dockerfile .
+docker run --privileged -e DEB_DIST='trusty' -v $(pwd):/ansible ansible-deb-builder
+```
+
+The debian package file will be placed in the `deb-build` directory. This can then be added to an APT repository or installed with `dpkg -i <package-file>`.
+
+Note that `dpkg -i` does not resolve dependencies.
+
+To install the Ansible DEB package and resolve dependencies:
+
+```
+dpkg -i <package-file>
+apt-get -fy install
+```
+
+Or, if you are running Debian Stretch (or later) or Ubuntu Xenial (or later):
+
+```
+apt install /path/to/<package-file>
+```
diff --git a/packaging/debian/ansible-base.dirs b/packaging/debian/ansible-base.dirs
new file mode 100644
index 00000000..fba15a43
--- /dev/null
+++ b/packaging/debian/ansible-base.dirs
@@ -0,0 +1,4 @@
+etc/ansible
+etc/ansible/roles
+usr/lib/python3/dist-packages/ansible
+usr/share/ansible
diff --git a/packaging/debian/ansible-base.install b/packaging/debian/ansible-base.install
new file mode 100644
index 00000000..d2dac7c1
--- /dev/null
+++ b/packaging/debian/ansible-base.install
@@ -0,0 +1,15 @@
+examples/hosts etc/ansible
+docs/man/man1/*.1 usr/share/man/man1
+debian/tmp/usr/bin/ansible-galaxy usr/bin
+debian/tmp/usr/bin/ansible-vault usr/bin
+debian/tmp/usr/bin/ansible-doc usr/bin
+debian/tmp/usr/bin/ansible-console usr/bin
+debian/tmp/usr/bin/ansible-connection usr/bin
+debian/tmp/usr/bin/ansible-inventory usr/bin
+debian/tmp/usr/bin/ansible-config usr/bin
+debian/tmp/usr/bin/ansible-pull usr/bin
+debian/tmp/usr/bin/ansible-playbook usr/bin
+debian/tmp/usr/bin/ansible usr/bin
+examples/ansible.cfg etc/ansible
+debian/tmp/usr/lib/python3/dist-packages/ansible usr/lib/python3/dist-packages
+debian/tmp/usr/lib/python3/dist-packages/ansible_*.egg-info
diff --git a/packaging/debian/ansible-test.install b/packaging/debian/ansible-test.install
new file mode 100644
index 00000000..92528905
--- /dev/null
+++ b/packaging/debian/ansible-test.install
@@ -0,0 +1,2 @@
+debian/tmp/usr/bin/ansible-test usr/bin
+debian/tmp/usr/lib/python3/dist-packages/ansible_test usr/lib/python3/dist-packages
diff --git a/packaging/debian/changelog b/packaging/debian/changelog
new file mode 100644
index 00000000..0eb840ce
--- /dev/null
+++ b/packaging/debian/changelog
@@ -0,0 +1,5 @@
+ansible-base (%VERSION%-%RELEASE%~%DIST%) %DIST%; urgency=low
+
+ * %VERSION% release
+
+ -- Ansible, Inc. <info@ansible.com> %DATE%
diff --git a/packaging/debian/compat b/packaging/debian/compat
new file mode 100644
index 00000000..7ed6ff82
--- /dev/null
+++ b/packaging/debian/compat
@@ -0,0 +1 @@
+5
diff --git a/packaging/debian/control b/packaging/debian/control
new file mode 100644
index 00000000..71380057
--- /dev/null
+++ b/packaging/debian/control
@@ -0,0 +1,29 @@
+Source: ansible-base
+Section: admin
+Priority: optional
+Standards-Version: 3.9.3
+Maintainer: Ansible, Inc. <info@ansible.com>
+Build-Depends: cdbs, debhelper (>= 5.0.0), python3-docutils, python3, dh-python | python-support, python3-setuptools, lsb-release, python3-straight.plugin, python3-packaging, python3-jinja2
+Homepage: https://github.com/ansible/ansible/
+
+Package: ansible-base
+Architecture: all
+Depends: python3-jinja2, python3-yaml, python3-paramiko, python3-cryptography, sshpass, ${misc:Depends}, ${python:Depends}
+Description: Ansible IT Automation
+ Ansible is a radically simple model-driven configuration management,
+ multi-node deployment, and remote task execution system. Ansible works
+ over SSH and does not require any software or daemons to be installed
+ on remote nodes. Extension modules can be written in any language and
+ are transferred to managed machines automatically.
+
+Package: ansible-test
+Architecture: all
+Depends: ansible-base (= ${binary:Version}), python3-venv, ${misc:Depends}
+Description: Ansible IT Automation
+ Ansible is a radically simple model-driven configuration management,
+ multi-node deployment, and remote task execution system. Ansible works
+ over SSH and does not require any software or daemons to be installed
+ on remote nodes. Extension modules can be written in any language and
+ are transferred to managed machines automatically.
+ This package installs the ansible-test command for testing modules and
+ plugins developed for ansible.
diff --git a/packaging/debian/copyright b/packaging/debian/copyright
new file mode 100644
index 00000000..4a17425f
--- /dev/null
+++ b/packaging/debian/copyright
@@ -0,0 +1,26 @@
+This package was debianized by Henry Graham (hzgraham) <Henry.Graham@mail.wvu.edu> on
+Tue, 17 Apr 2012 12:19:47 -0400.
+
+It was downloaded from https://github.com/ansible/ansible.git
+
+Copyright: Henry Graham (hzgraham) <Henry.Graham@mail.wvu.edu>
+
+License:
+
+ This package is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 dated June, 1991.
+
+ This package is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this package; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301,
+ USA.
+
+On Debian systems, the complete text of the GNU General
+Public License can be found in `/usr/share/common-licenses/GPL'.
+
diff --git a/packaging/debian/docs b/packaging/debian/docs
new file mode 100644
index 00000000..a1320b1b
--- /dev/null
+++ b/packaging/debian/docs
@@ -0,0 +1 @@
+README.rst
diff --git a/packaging/debian/pycompat b/packaging/debian/pycompat
new file mode 100644
index 00000000..0cfbf088
--- /dev/null
+++ b/packaging/debian/pycompat
@@ -0,0 +1 @@
+2
diff --git a/packaging/debian/rules b/packaging/debian/rules
new file mode 100755
index 00000000..cd1e9534
--- /dev/null
+++ b/packaging/debian/rules
@@ -0,0 +1,17 @@
+#!/usr/bin/make -f
+# -- makefile --
+
+DEB_PYTHON3_MODULE_PACKAGES=ansible-base ansible_test
+#DEB_PYTHON_INSTALL_ARGS_ALL="--install-purelib=/usr/lib/python2.7/site-packages/"
+DEB_PYTHON_DISTUTILS_INSTALLDIR_SKEL = /usr/lib/python3/dist-packages/
+
+include /usr/share/cdbs/1/rules/debhelper.mk
+include /usr/share/cdbs/1/class/python-distutils.mk
+
+# dist-packages for the modern distro, site-packages for the older (e.g: Ubuntu 14.04)
+ifeq ($(shell lsb_release -cs), precise)
+ export ANSIBLE_CRYPTO_BACKEND = pycrypto
+endif
+ifeq ($(shell lsb_release -cs), trusty)
+ export ANSIBLE_CRYPTO_BACKEND = pycrypto
+endif
diff --git a/packaging/gentoo/README.md b/packaging/gentoo/README.md
new file mode 100644
index 00000000..991692c9
--- /dev/null
+++ b/packaging/gentoo/README.md
@@ -0,0 +1,3 @@
+Gentoo ebuilds are available in the main tree:
+
+emerge ansible
diff --git a/packaging/macports/.gitignore b/packaging/macports/.gitignore
new file mode 100644
index 00000000..2af97a6f
--- /dev/null
+++ b/packaging/macports/.gitignore
@@ -0,0 +1,2 @@
+PortIndex
+PortIndex.quick
diff --git a/packaging/macports/README.md b/packaging/macports/README.md
new file mode 100644
index 00000000..7984a96c
--- /dev/null
+++ b/packaging/macports/README.md
@@ -0,0 +1,39 @@
+This portfile installs ansible from the git repository, it will install the
+latest and greatest version of ansible. This portfile does not install the
+required dependencies to run in accelerated mode.
+
+## Installing the stable version of ansible via macports
+
+If you wish to run a stable version of ansible please do the following
+
+First update your macports repo to the latest versions
+
+ $ sudo port sync
+
+Then install ansible
+
+ $ sudo port install ansible
+
+## Installing the devel version of ansible via macports
+
+To use this Portfile to install the development version of ansible one should
+follow the instructions at
+<http://guide.macports.org/#development.local-repositories>
+
+The basic idea is to add the _ansible/packaging/macports_ directory to your
+_/opt/local/etc/macports/sources.conf_ file. You should have something similar
+to this at the end of the file
+
+ file:///Users/jtang/develop/ansible/packaging/macports
+ rsync://rsync.macports.org/release/tarballs/ports.tar [default]
+
+In the _ansible/packaging/macports_ directory, do this
+
+ $ portindex
+
+Once the index is created the _Portfile_ will override the one in the upstream
+macports repository.
+
+Installing newer development versions should involve an uninstall, clean,
+install process or else the Portfile will need its version number/epoch
+bumped.
diff --git a/packaging/macports/sysutils/ansible/Portfile b/packaging/macports/sysutils/ansible/Portfile
new file mode 100644
index 00000000..9a386d77
--- /dev/null
+++ b/packaging/macports/sysutils/ansible/Portfile
@@ -0,0 +1,67 @@
+# -*- coding: utf-8; mode: tcl; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- vim:fenc=utf-8:ft=tcl:et:sw=4:ts=4:sts=4
+# $Id: Portfile 102428 2013-02-02 18:34:49Z blair@macports.org $
+
+PortSystem 1.0
+PortGroup python 1.0
+
+name ansible
+version devel
+categories sysutils
+supported_archs noarch
+
+maintainers nomaintainer
+
+homepage https://ansible.com/
+description Ansible IT Automation
+long_description \
+ Ansible is a radically simple model-driven configuration \
+ management, multi-node deployment, and orchestration \
+ engine. Ansible works over SSH and does not require any software \
+ or daemons to be installed on remote nodes. Extension modules can \
+ be written in any language and are transferred to managed machines \
+ automatically.
+
+license GPL-3+
+
+platforms darwin
+
+fetch.type git
+git.url https://github.com/ansible/ansible.git
+git.branch ${version}
+
+python.default_version 27
+depends_lib-append port:py${python.version}-jinja2 \
+ port:py${python.version}-paramiko \
+ port:py${python.version}-yaml
+
+patch {
+ fs-traverse f ${worksrcpath} {
+ if {[file isfile ${f}]} {
+ reinplace -locale C "s#/etc/ansible#${prefix}/etc/ansible#g" ${f}
+ reinplace -locale C "s#/usr/share/ansible#${prefix}/share/ansible#g" ${f}
+ }
+ }
+}
+
+post-destroot {
+ # documentation and examples
+ xinstall -m 644 -W ${worksrcpath} README.rst CHANGELOG.md CONTRIBUTING.md COPYING \
+ ${destroot}${prefix}/share/doc/${name}
+
+ xinstall -m 755 -d ${destroot}${prefix}/share/doc/examples
+ xinstall -m 755 ${worksrcpath}/examples/ansible.cfg ${destroot}${prefix}/share/doc/${name}/examples
+ xinstall -m 755 ${worksrcpath}/examples/hosts ${destroot}${prefix}/share/doc/${name}/examples
+
+ file copy ${worksrcpath}/examples/playbooks ${destroot}${prefix}/share/doc/${name}/examples/
+
+ # man pages
+ xinstall -d 644 ${destroot}${prefix}/share/man/man1
+ eval xinstall -m 755 [glob ${worksrcpath}/docs/man/man1/*.1] ${destroot}${prefix}/share/man/man1
+
+ # install sample config and hosts file
+ xinstall -m 755 -d ${destroot}${prefix}/etc/ansible
+ xinstall -m 755 ${worksrcpath}/examples/ansible.cfg ${destroot}${prefix}/etc/ansible/ansible.cfg
+ xinstall -m 755 ${worksrcpath}/examples/hosts ${destroot}${prefix}/etc/ansible/hosts
+}
+
+python.link_binaries_suffix
diff --git a/packaging/release/Makefile b/packaging/release/Makefile
new file mode 100644
index 00000000..d1ff8f88
--- /dev/null
+++ b/packaging/release/Makefile
@@ -0,0 +1,61 @@
+version ?= $(shell python versionhelper/version_helper.py --raw)
+
+.PHONY: all
+all:
+ @echo "USAGE:"
+ @echo
+ @echo "make release version={version} # current version is '${version}'"
+ @echo "make publish"
+ @echo
+ @echo "NOTE: Make sure to source hacking/env-setup before running these targets."
+
+.PHONY: release
+release: version summary changelog commit-release
+ git show -p
+ git status
+ @echo
+ @echo 'Run `git push` if you are satisfied with the changes.'
+
+.PHONY: version
+version:
+ sed -i.bak "s/^__version__ = .*$$/__version__ = '${version}'/" ../../lib/ansible/release.py
+ rm ../../lib/ansible/release.py.bak
+
+.PHONY: summary
+summary:
+ @printf '%s\n%s\n%s\n' \
+ 'release_summary: |' \
+ ' | Release Date: $(shell date '+%Y-%m-%d')' \
+ ' | `Porting Guide <https://docs.ansible.com/ansible/devel/porting_guides.html>`__' > \
+ ../../changelogs/fragments/v${version}_summary.yaml
+
+.PHONY: changelog
+changelog:
+ antsibull-changelog release -vv --use-ansible-doc && antsibull-changelog generate -vv --use-ansible-doc
+ ansible-test sanity changelogs/
+
+.PHONY: commit-release
+commit-release:
+ git add ../../changelogs/ ../../lib/ansible/release.py
+ git commit -m "New release v${version}"
+
+.PHONY: publish
+publish: tag postversion commit-postversion
+ git show -p
+ git status
+ @echo
+ @echo 'Run `git push --follow-tags` if you are satisfied with the changes.'
+
+.PHONY: tag
+tag:
+ git tag -a v${version} -m "New release v${version}"
+
+.PHONY: postversion
+postversion:
+ sed -i.bak "s/^__version__ = .*$$/__version__ = '${version}.post0'/" ../../lib/ansible/release.py
+ rm ../../lib/ansible/release.py.bak
+
+.PHONY: commit-postversion
+commit-postversion:
+ git add ../../lib/ansible/release.py
+ git commit -m "Update Ansible release version to v${version}."
diff --git a/packaging/release/tests/__init__.py b/packaging/release/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/packaging/release/tests/__init__.py
diff --git a/packaging/release/tests/version_helper_test.py b/packaging/release/tests/version_helper_test.py
new file mode 100644
index 00000000..ff14bd4d
--- /dev/null
+++ b/packaging/release/tests/version_helper_test.py
@@ -0,0 +1,47 @@
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from packaging.version import InvalidVersion
+from versionhelper.version_helper import AnsibleVersionMunger
+
+
+@pytest.mark.parametrize('version,revision,codename,output_propname,expected', [
+ ('2.5.0.dev1', None, None, 'raw', '2.5.0.dev1'),
+ ('2.5.0a0.post0', None, None, 'raw', '2.5.0a0.post0'),
+ ('2.5.0', None, None, 'raw', '2.5.0'),
+ ('2.5.0.dev1', None, None, 'major_version', '2.5'),
+ ('2.5.0', None, None, 'major_version', '2.5'),
+ ('2.5.0.dev1', None, None, 'base_version', '2.5.0'),
+ ('2.5.0', None, None, 'base_version', '2.5.0'),
+ ('2.5.0.dev1', None, None, 'deb_version', '2.5.0~dev1'),
+ ('2.5.0b1', None, None, 'deb_version', '2.5.0~b1'),
+ ('2.5.0b1.dev1', None, None, 'deb_version', '2.5.0~b1~dev1'),
+ ('2.5.0b1.post0', None, None, 'deb_version', '2.5.0~b1~post0'),
+ ('2.5.0', None, None, 'deb_version', '2.5.0'),
+ ('2.5.0.dev1', None, None, 'deb_release', '1'),
+ ('2.5.0b1', 2, None, 'deb_release', '2'),
+ ('2.5.0.dev1', None, None, 'rpm_release', '0.1.dev1'),
+ ('2.5.0a1', None, None, 'rpm_release', '0.101.a1'),
+ ('2.5.0a1.post0', None, None, 'rpm_release', '0.101.a1.post0'),
+ ('2.5.0b1', None, None, 'rpm_release', '0.201.b1'),
+ ('2.5.0rc1', None, None, 'rpm_release', '0.1001.rc1'),
+ ('2.5.0rc1', '0.99', None, 'rpm_release', '0.99.rc1'),
+ ('2.5.0.rc.1', None, None, 'rpm_release', '0.1001.rc.1'),
+ ('2.5.0.rc1.dev1', None, None, 'rpm_release', '0.1001.rc1.dev1'),
+ ('2.5.0', None, None, 'rpm_release', '1'),
+ ('2.5.0', 2, None, 'rpm_release', '2'),
+ ('2.5.0', None, None, 'codename', 'UNKNOWN'),
+ ('2.5.0', None, 'LedZeppelinSongHere', 'codename', 'LedZeppelinSongHere'),
+ ('2.5.0x1', None, None, None, InvalidVersion)
+])
+def test_output_values(version, revision, codename, output_propname, expected):
+ try:
+ v = AnsibleVersionMunger(version, revision, codename)
+ assert getattr(v, output_propname) == expected
+ except Exception as ex:
+ if isinstance(expected, type):
+ assert isinstance(ex, expected)
+ else:
+ raise
diff --git a/packaging/release/versionhelper/__init__.py b/packaging/release/versionhelper/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/packaging/release/versionhelper/__init__.py
diff --git a/packaging/release/versionhelper/version_helper.py b/packaging/release/versionhelper/version_helper.py
new file mode 100644
index 00000000..163494b6
--- /dev/null
+++ b/packaging/release/versionhelper/version_helper.py
@@ -0,0 +1,195 @@
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import argparse
+import os
+import re
+import sys
+
+from packaging.version import Version, VERSION_PATTERN
+
+
+class AnsibleVersionMunger(object):
+ tag_offsets = dict(
+ dev=0,
+ a=100,
+ b=200,
+ rc=1000
+ )
+
+ # TODO: allow overrides here for packaging bump etc
+ def __init__(self, raw_version, revision=None, codename=None):
+ self._raw_version = raw_version
+ self._revision = revision
+ self._parsed_version = Version(raw_version)
+ self._codename = codename
+ self._parsed_regex_match = re.match(VERSION_PATTERN, raw_version, re.VERBOSE | re.IGNORECASE)
+
+ @property
+ def deb_version(self):
+ v = self._parsed_version
+
+ match = self._parsed_regex_match
+
+ # treat dev/post as prerelease for now; treat dev/post as equivalent and disallow together
+ if v.is_prerelease or match.group('dev') or match.group('post'):
+ if match.group('dev') and match.group('post'):
+ raise Exception("dev and post may not currently be used together")
+ if match.group('pre'):
+ tag_value = match.group('pre')
+ tag_type = match.group('pre_l')
+ if match.group('dev'):
+ tag_value += ('~%s' % match.group('dev').strip('.'))
+ if match.group('post'):
+ tag_value += ('~%s' % match.group('post').strip('.'))
+ elif match.group('dev'):
+ tag_type = "dev"
+ tag_value = match.group('dev').strip('.')
+ elif match.group('post'):
+ tag_type = "dev"
+ tag_value = match.group('post').strip('.')
+ else:
+ raise Exception("unknown prerelease type for version {0}".format(self._raw_version))
+ else:
+ tag_type = None
+ tag_value = ''
+
+ # not a pre/post/dev release, just return base version
+ if not tag_type:
+ return '{base_version}'.format(base_version=self.base_version)
+
+ # it is a pre/dev release, include the tag value with a ~
+ return '{base_version}~{tag_value}'.format(base_version=self.base_version, tag_value=tag_value)
+
+ @property
+ def deb_release(self):
+ return '1' if self._revision is None else str(self._revision)
+
+ @property
+ def rpm_release(self):
+ v = self._parsed_version
+ match = self._parsed_regex_match
+
+ # treat presence of dev/post as prerelease for now; treat dev/post the same and disallow together
+ if v.is_prerelease or match.group('dev') or match.group('post'):
+ if match.group('dev') and match.group('post'):
+ raise Exception("dev and post may not currently be used together")
+ if match.group('pre'):
+ tag_value = match.group('pre')
+ tag_type = match.group('pre_l')
+ tag_ver = match.group('pre_n')
+ if match.group('dev'):
+ tag_value += match.group('dev')
+ if match.group('post'):
+ tag_value += match.group('post')
+ elif match.group('dev'):
+ tag_type = "dev"
+ tag_value = match.group('dev')
+ tag_ver = match.group('dev_n')
+ elif match.group('post'):
+ tag_type = "dev"
+ tag_value = match.group('post')
+ tag_ver = match.group('post_n')
+ else:
+ raise Exception("unknown prerelease type for version {0}".format(self._raw_version))
+ else:
+ tag_type = None
+ tag_value = ''
+ tag_ver = 0
+
+ # not a pre/post/dev release, just append revision (default 1)
+ if not tag_type:
+ if self._revision is None:
+ self._revision = 1
+ return '{revision}'.format(revision=self._revision)
+
+ # cleanse tag value in case it starts with .
+ tag_value = tag_value.strip('.')
+
+ # coerce to int and None == 0
+ tag_ver = int(tag_ver if tag_ver else 0)
+
+ if self._revision is None:
+ tag_offset = self.tag_offsets.get(tag_type)
+ if tag_offset is None:
+ raise Exception('no tag offset defined for tag {0}'.format(tag_type))
+ pkgrel = '0.{0}'.format(tag_offset + tag_ver)
+ else:
+ pkgrel = self._revision
+
+ return '{pkgrel}.{tag_value}'.format(pkgrel=pkgrel, tag_value=tag_value)
+
+ @property
+ def raw(self):
+ return self._raw_version
+
+ # return the x.y.z version without any other modifiers present
+ @property
+ def base_version(self):
+ return self._parsed_version.base_version
+
+ # return the x.y version without any other modifiers present
+ @property
+ def major_version(self):
+ return re.match(r'^(\d+.\d+)', self._raw_version).group(1)
+
+ @property
+ def codename(self):
+ return self._codename if self._codename else "UNKNOWN"
+
+
+def main():
+ parser = argparse.ArgumentParser(description='Extract/transform Ansible versions to various packaging formats')
+
+ group = parser.add_mutually_exclusive_group(required=True)
+ group.add_argument('--raw', action='store_true')
+ group.add_argument('--majorversion', action='store_true')
+ group.add_argument('--baseversion', action='store_true')
+ group.add_argument('--debversion', action='store_true')
+ group.add_argument('--debrelease', action='store_true')
+ group.add_argument('--rpmrelease', action='store_true')
+ group.add_argument('--codename', action='store_true')
+ group.add_argument('--all', action='store_true')
+
+ parser.add_argument('--revision', action='store', default='auto')
+
+ args = parser.parse_args()
+
+ mydir = os.path.dirname(__file__)
+ release_loc = os.path.normpath(mydir + '/../../../lib')
+
+ sys.path.insert(0, release_loc)
+
+ from ansible import release
+
+ rev = None
+ if args.revision != 'auto':
+ rev = args.revision
+
+ v_raw = release.__version__
+ codename = release.__codename__
+ v = AnsibleVersionMunger(v_raw, revision=rev, codename=codename)
+
+ if args.raw:
+ print(v.raw)
+ elif args.baseversion:
+ print(v.base_version)
+ elif args.majorversion:
+ print(v.major_version)
+ elif args.debversion:
+ print(v.deb_version)
+ elif args.debrelease:
+ print(v.deb_release)
+ elif args.rpmrelease:
+ print(v.rpm_release)
+ elif args.codename:
+ print(v.codename)
+ elif args.all:
+ props = [name for (name, impl) in vars(AnsibleVersionMunger).items() if isinstance(impl, property)]
+
+ for propname in props:
+ print('{0}: {1}'.format(propname, getattr(v, propname)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/packaging/sdist/check-link-behavior.py b/packaging/sdist/check-link-behavior.py
new file mode 100755
index 00000000..34e05023
--- /dev/null
+++ b/packaging/sdist/check-link-behavior.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+"""Checks for link behavior required for sdist to retain symlinks."""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import os
+import platform
+import shutil
+import sys
+import tempfile
+
+
+def main():
+ """Main program entry point."""
+ temp_dir = tempfile.mkdtemp()
+
+ target_path = os.path.join(temp_dir, 'file.txt')
+ symlink_path = os.path.join(temp_dir, 'symlink.txt')
+ hardlink_path = os.path.join(temp_dir, 'hardlink.txt')
+
+ try:
+ with open(target_path, 'w'):
+ pass
+
+ os.symlink(target_path, symlink_path)
+ os.link(symlink_path, hardlink_path)
+
+ if not os.path.islink(symlink_path):
+ abort('Symbolic link not created.')
+
+ if not os.path.islink(hardlink_path):
+ # known issue on MacOS (Darwin)
+ abort('Hard link of symbolic link created as a regular file.')
+ finally:
+ shutil.rmtree(temp_dir)
+
+
+def abort(reason):
+ """
+ :type reason: str
+ """
+ sys.exit('ERROR: %s\n'
+ 'This will prevent symbolic links from being preserved in the resulting tarball.\n'
+ 'Aborting creation of sdist on platform: %s'
+ % (reason, platform.system()))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 00000000..44689bb5
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,9 @@
+# Note: this requirements.txt file is used to specify what dependencies are
+# needed to make the package run rather than for deployment of a tested set of
+# packages. Thus, this should be the loosest set possible (only required
+# packages, not optional ones, and with the widest range of versions that could
+# be suitable)
+jinja2
+PyYAML
+cryptography
+packaging
diff --git a/setup.py b/setup.py
new file mode 100644
index 00000000..b15d4c96
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,428 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import os.path
+import re
+import sys
+import warnings
+
+from collections import defaultdict
+
+try:
+ from setuptools import setup, find_packages
+ from setuptools.command.build_py import build_py as BuildPy
+ from setuptools.command.install_lib import install_lib as InstallLib
+ from setuptools.command.install_scripts import install_scripts as InstallScripts
+except ImportError:
+ print("Ansible now needs setuptools in order to build. Install it using"
+ " your package manager (usually python-setuptools) or via pip (pip"
+ " install setuptools).", file=sys.stderr)
+ sys.exit(1)
+
+# `distutils` must be imported after `setuptools` or it will cause explosions
+# with `setuptools >=48.0.0, <49.1`.
+# Refs:
+# * https://github.com/ansible/ansible/issues/70456
+# * https://github.com/pypa/setuptools/issues/2230
+# * https://github.com/pypa/setuptools/commit/bd110264
+from distutils.command.build_scripts import build_scripts as BuildScripts
+from distutils.command.sdist import sdist as SDist
+
+
+def find_package_info(*file_paths):
+ try:
+ with open(os.path.join(*file_paths), 'r') as f:
+ info_file = f.read()
+ except Exception:
+ raise RuntimeError("Unable to find package info.")
+
+ # The version line must have the form
+ # __version__ = 'ver'
+ version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
+ info_file, re.M)
+ author_match = re.search(r"^__author__ = ['\"]([^'\"]*)['\"]",
+ info_file, re.M)
+
+ if version_match and author_match:
+ return version_match.group(1), author_match.group(1)
+ raise RuntimeError("Unable to find package info.")
+
+
+def _validate_install_ansible_base():
+ """Validate that we can install ansible-base. Currently this only
+ cares about upgrading to ansible-base from ansible<2.10
+ """
+ # Skip common commands we can ignore
+ # Do NOT add bdist_wheel here, we don't ship wheels
+ # and bdist_wheel is the only place we can prevent pip
+ # from installing, as pip creates a wheel, and installs the wheel
+ # and we have no influence over installation within a wheel
+ if set(('sdist', 'egg_info')).intersection(sys.argv):
+ return
+
+ if os.getenv('ANSIBLE_SKIP_CONFLICT_CHECK', '') not in ('', '0'):
+ return
+
+ # Save these for later restoring things to pre invocation
+ sys_modules = sys.modules.copy()
+ sys_modules_keys = set(sys_modules)
+
+ # Make sure `lib` isn't in `sys.path` that could confuse this
+ sys_path = sys.path[:]
+ abspath = os.path.abspath
+ sys.path[:] = [p for p in sys.path if abspath(p) != abspath('lib')]
+
+ try:
+ from ansible.release import __version__
+ except ImportError:
+ pass
+ else:
+ version_tuple = tuple(int(v) for v in __version__.split('.')[:2])
+ if version_tuple < (2, 10):
+ stars = '*' * 76
+ raise RuntimeError(
+ '''
+
+ %s
+
+ Cannot install ansible-base with a pre-existing ansible==%s
+ installation.
+
+ Installing ansible-base with ansible-2.9 or older currently installed with
+ pip is known to cause problems. Please uninstall ansible and install the new
+ version:
+
+ pip uninstall ansible
+ pip install ansible-base
+
+ If you want to skip the conflict checks and manually resolve any issues
+ afterwards, set the ANSIBLE_SKIP_CONFLICT_CHECK environment variable:
+
+ ANSIBLE_SKIP_CONFLICT_CHECK=1 pip install ansible-base
+
+ %s
+ ''' % (stars, __version__, stars)
+ )
+ finally:
+ sys.path[:] = sys_path
+ for key in sys_modules_keys.symmetric_difference(sys.modules):
+ sys.modules.pop(key, None)
+ sys.modules.update(sys_modules)
+
+
+_validate_install_ansible_base()
+
+
+SYMLINK_CACHE = 'SYMLINK_CACHE.json'
+
+
+def _find_symlinks(topdir, extension=''):
+ """Find symlinks that should be maintained
+
+ Maintained symlinks exist in the bin dir or are modules which have
+ aliases. Our heuristic is that they are a link in a certain path which
+ point to a file in the same directory.
+
+ .. warn::
+
+ We want the symlinks in :file:`bin/` that link into :file:`lib/ansible/*` (currently,
+ :command:`ansible`, :command:`ansible-test`, and :command:`ansible-connection`) to become
+ real files on install. Updates to the heuristic here *must not* add them to the symlink
+ cache.
+ """
+ symlinks = defaultdict(list)
+ for base_path, dirs, files in os.walk(topdir):
+ for filename in files:
+ filepath = os.path.join(base_path, filename)
+ if os.path.islink(filepath) and filename.endswith(extension):
+ target = os.readlink(filepath)
+ if target.startswith('/'):
+ # We do not support absolute symlinks at all
+ continue
+
+ if os.path.dirname(target) == '':
+ link = filepath[len(topdir):]
+ if link.startswith('/'):
+ link = link[1:]
+ symlinks[os.path.basename(target)].append(link)
+ else:
+ # Count how many directory levels from the topdir we are
+ levels_deep = os.path.dirname(filepath).count('/')
+
+ # Count the number of directory levels higher we walk up the tree in target
+ target_depth = 0
+ for path_component in target.split('/'):
+ if path_component == '..':
+ target_depth += 1
+ # If we walk past the topdir, then don't store
+ if target_depth >= levels_deep:
+ break
+ else:
+ target_depth -= 1
+ else:
+ # If we managed to stay within the tree, store the symlink
+ link = filepath[len(topdir):]
+ if link.startswith('/'):
+ link = link[1:]
+ symlinks[target].append(link)
+
+ return symlinks
+
+
+def _cache_symlinks(symlink_data):
+ with open(SYMLINK_CACHE, 'w') as f:
+ json.dump(symlink_data, f)
+
+
+def _maintain_symlinks(symlink_type, base_path):
+ """Switch a real file into a symlink"""
+ try:
+ # Try the cache first because going from git checkout to sdist is the
+ # only time we know that we're going to cache correctly
+ with open(SYMLINK_CACHE, 'r') as f:
+ symlink_data = json.load(f)
+ except (IOError, OSError) as e:
+ # IOError on py2, OSError on py3. Both have errno
+ if e.errno == 2:
+ # SYMLINKS_CACHE doesn't exist. Fallback to trying to create the
+ # cache now. Will work if we're running directly from a git
+ # checkout or from an sdist created earlier.
+ library_symlinks = _find_symlinks('lib', '.py')
+ library_symlinks.update(_find_symlinks('test/lib'))
+
+ symlink_data = {'script': _find_symlinks('bin'),
+ 'library': library_symlinks,
+ }
+
+ # Sanity check that something we know should be a symlink was
+ # found. We'll take that to mean that the current directory
+ # structure properly reflects symlinks in the git repo
+ if 'ansible-playbook' in symlink_data['script']['ansible']:
+ _cache_symlinks(symlink_data)
+ else:
+ raise RuntimeError(
+ "Pregenerated symlink list was not present and expected "
+ "symlinks in ./bin were missing or broken. "
+ "Perhaps this isn't a git checkout?"
+ )
+ else:
+ raise
+ symlinks = symlink_data[symlink_type]
+
+ for source in symlinks:
+ for dest in symlinks[source]:
+ dest_path = os.path.join(base_path, dest)
+ if not os.path.islink(dest_path):
+ try:
+ os.unlink(dest_path)
+ except OSError as e:
+ if e.errno == 2:
+ # File does not exist which is all we wanted
+ pass
+ os.symlink(source, dest_path)
+
+
+class BuildPyCommand(BuildPy):
+ def run(self):
+ BuildPy.run(self)
+ _maintain_symlinks('library', self.build_lib)
+
+
+class BuildScriptsCommand(BuildScripts):
+ def run(self):
+ BuildScripts.run(self)
+ _maintain_symlinks('script', self.build_dir)
+
+
+class InstallLibCommand(InstallLib):
+ def run(self):
+ InstallLib.run(self)
+ _maintain_symlinks('library', self.install_dir)
+
+
+class InstallScriptsCommand(InstallScripts):
+ def run(self):
+ InstallScripts.run(self)
+ _maintain_symlinks('script', self.install_dir)
+
+
+class SDistCommand(SDist):
+ def run(self):
+ # have to generate the cache of symlinks for release as sdist is the
+ # only command that has access to symlinks from the git repo
+ library_symlinks = _find_symlinks('lib', '.py')
+ library_symlinks.update(_find_symlinks('test/lib'))
+
+ symlinks = {'script': _find_symlinks('bin'),
+ 'library': library_symlinks,
+ }
+ _cache_symlinks(symlinks)
+
+ SDist.run(self)
+
+ # Print warnings at the end because no one will see warnings before all the normal status
+ # output
+ if os.environ.get('_ANSIBLE_SDIST_FROM_MAKEFILE', False) != '1':
+ warnings.warn('When setup.py sdist is run from outside of the Makefile,'
+ ' the generated tarball may be incomplete. Use `make snapshot`'
+ ' to create a tarball from an arbitrary checkout or use'
+ ' `cd packaging/release && make release version=[..]` for official builds.',
+ RuntimeWarning)
+
+
+def read_file(file_name):
+ """Read file and return its contents."""
+ with open(file_name, 'r') as f:
+ return f.read()
+
+
+def read_requirements(file_name):
+ """Read requirements file as a list."""
+ reqs = read_file(file_name).splitlines()
+ if not reqs:
+ raise RuntimeError(
+ "Unable to read requirements from the %s file"
+ "That indicates this copy of the source code is incomplete."
+ % file_name
+ )
+ return reqs
+
+
+PYCRYPTO_DIST = 'pycrypto'
+
+
+def get_crypto_req():
+ """Detect custom crypto from ANSIBLE_CRYPTO_BACKEND env var.
+
+ pycrypto or cryptography. We choose a default but allow the user to
+ override it. This translates into pip install of the sdist deciding what
+ package to install and also the runtime dependencies that pkg_resources
+ knows about.
+ """
+ crypto_backend = os.environ.get('ANSIBLE_CRYPTO_BACKEND', '').strip()
+
+ if crypto_backend == PYCRYPTO_DIST:
+ # Attempt to set version requirements
+ return '%s >= 2.6' % PYCRYPTO_DIST
+
+ return crypto_backend or None
+
+
+def substitute_crypto_to_req(req):
+ """Replace crypto requirements if customized."""
+ crypto_backend = get_crypto_req()
+
+ if crypto_backend is None:
+ return req
+
+ def is_not_crypto(r):
+ CRYPTO_LIBS = PYCRYPTO_DIST, 'cryptography'
+ return not any(r.lower().startswith(c) for c in CRYPTO_LIBS)
+
+ return [r for r in req if is_not_crypto(r)] + [crypto_backend]
+
+
+def get_dynamic_setup_params():
+ """Add dynamically calculated setup params to static ones."""
+ return {
+ # Retrieve the long description from the README
+ 'long_description': read_file('README.rst'),
+ 'install_requires': substitute_crypto_to_req(
+ read_requirements('requirements.txt'),
+ ),
+ }
+
+
+here = os.path.abspath(os.path.dirname(__file__))
+__version__, __author__ = find_package_info(here, 'lib', 'ansible', 'release.py')
+static_setup_params = dict(
+ # Use the distutils SDist so that symlinks are not expanded
+ # Use a custom Build for the same reason
+ cmdclass={
+ 'build_py': BuildPyCommand,
+ 'build_scripts': BuildScriptsCommand,
+ 'install_lib': InstallLibCommand,
+ 'install_scripts': InstallScriptsCommand,
+ 'sdist': SDistCommand,
+ },
+ name='ansible-base',
+ version=__version__,
+ description='Radically simple IT automation',
+ author=__author__,
+ author_email='info@ansible.com',
+ url='https://ansible.com/',
+ project_urls={
+ 'Bug Tracker': 'https://github.com/ansible/ansible/issues',
+ 'CI: Shippable': 'https://app.shippable.com/github/ansible/ansible',
+ 'Code of Conduct': 'https://docs.ansible.com/ansible/latest/community/code_of_conduct.html',
+ 'Documentation': 'https://docs.ansible.com/ansible/',
+ 'Mailing lists': 'https://docs.ansible.com/ansible/latest/community/communication.html#mailing-list-information',
+ 'Source Code': 'https://github.com/ansible/ansible',
+ },
+ license='GPLv3+',
+ # Ansible will also make use of a system copy of python-six and
+ # python-selectors2 if installed but use a Bundled copy if it's not.
+ python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*',
+ package_dir={'': 'lib',
+ 'ansible_test': 'test/lib/ansible_test'},
+ packages=find_packages('lib') + find_packages('test/lib'),
+ include_package_data=True,
+ classifiers=[
+ 'Development Status :: 5 - Production/Stable',
+ 'Environment :: Console',
+ 'Intended Audience :: Developers',
+ 'Intended Audience :: Information Technology',
+ 'Intended Audience :: System Administrators',
+ 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
+ 'Natural Language :: English',
+ 'Operating System :: POSIX',
+ 'Programming Language :: Python :: 2',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
+ 'Programming Language :: Python :: 3.7',
+ 'Programming Language :: Python :: 3.8',
+ 'Topic :: System :: Installation/Setup',
+ 'Topic :: System :: Systems Administration',
+ 'Topic :: Utilities',
+ ],
+ scripts=[
+ 'bin/ansible',
+ 'bin/ansible-playbook',
+ 'bin/ansible-pull',
+ 'bin/ansible-doc',
+ 'bin/ansible-galaxy',
+ 'bin/ansible-console',
+ 'bin/ansible-connection',
+ 'bin/ansible-vault',
+ 'bin/ansible-config',
+ 'bin/ansible-inventory',
+ 'bin/ansible-test',
+ ],
+ data_files=[],
+ # Installing as zip files would break due to references to __file__
+ zip_safe=False
+)
+
+
+def main():
+ """Invoke installation process using setuptools."""
+ setup_params = dict(static_setup_params, **get_dynamic_setup_params())
+ ignore_warning_regex = (
+ r"Unknown distribution option: '(project_urls|python_requires)'"
+ )
+ warnings.filterwarnings(
+ 'ignore',
+ message=ignore_warning_regex,
+ category=UserWarning,
+ module='distutils.dist',
+ )
+ setup(**setup_params)
+ warnings.resetwarnings()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/ansible_test/Makefile b/test/ansible_test/Makefile
new file mode 100644
index 00000000..7fb2a007
--- /dev/null
+++ b/test/ansible_test/Makefile
@@ -0,0 +1,13 @@
+all: sanity unit validate-modules-unit
+
+.PHONY: sanity
+sanity:
+ $(abspath ${CURDIR}/../../bin/ansible-test) sanity test/lib/ ${FLAGS}
+
+.PHONY: unit
+unit:
+ PYTHONPATH=$(abspath ${CURDIR}/../lib) pytest unit ${FLAGS}
+
+.PHONY: validate-modules-unit
+validate-modules-unit:
+ PYTHONPATH=$(abspath ${CURDIR}/../lib/ansible_test/_data/sanity/validate-modules):$(abspath ${CURDIR}/../../lib) pytest validate-modules-unit ${FLAGS}
diff --git a/test/ansible_test/unit/test_diff.py b/test/ansible_test/unit/test_diff.py
new file mode 100644
index 00000000..1f2559d2
--- /dev/null
+++ b/test/ansible_test/unit/test_diff.py
@@ -0,0 +1,105 @@
+"""Tests for diff module."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import subprocess
+import pytest
+
+from ansible_test._internal.util import (
+ to_text,
+ to_bytes,
+)
+
+from ansible_test._internal.diff import (
+ parse_diff,
+ FileDiff,
+)
+
+
+def get_diff(base, head=None):
+ """Return a git diff between the base and head revision.
+ :type base: str
+ :type head: str | None
+ :rtype: list[str]
+ """
+ if not head or head == 'HEAD':
+ head = to_text(subprocess.check_output(['git', 'rev-parse', 'HEAD'])).strip()
+
+ cache = '/tmp/git-diff-cache-%s-%s.log' % (base, head)
+
+ if os.path.exists(cache):
+ with open(cache, 'rb') as cache_fd:
+ lines = to_text(cache_fd.read()).splitlines()
+ else:
+ lines = to_text(subprocess.check_output(['git', 'diff', base, head]), errors='replace').splitlines()
+
+ with open(cache, 'wb') as cache_fd:
+ cache_fd.write(to_bytes('\n'.join(lines)))
+
+ assert lines
+
+ return lines
+
+
+def get_parsed_diff(base, head=None):
+ """Return a parsed git diff between the base and head revision.
+ :type base: str
+ :type head: str | None
+ :rtype: list[FileDiff]
+ """
+ lines = get_diff(base, head)
+ items = parse_diff(lines)
+
+ assert items
+
+ for item in items:
+ assert item.headers
+ assert item.is_complete
+
+ item.old.format_lines()
+ item.new.format_lines()
+
+ for line_range in item.old.ranges:
+ assert line_range[1] >= line_range[0] > 0
+
+ for line_range in item.new.ranges:
+ assert line_range[1] >= line_range[0] > 0
+
+ return items
+
+
+RANGES_TO_TEST = (
+ ('f31421576b00f0b167cdbe61217c31c21a41ac02', 'HEAD'),
+ ('b8125ac1a61f2c7d1de821c78c884560071895f1', '32146acf4e43e6f95f54d9179bf01f0df9814217')
+)
+
+
+@pytest.mark.parametrize("base, head", RANGES_TO_TEST)
+def test_parse_diff(base, head):
+ """Integration test to verify parsing of ansible/ansible history."""
+ get_parsed_diff(base, head)
+
+
+def test_parse_delete():
+ """Integration test to verify parsing of a deleted file."""
+ commit = 'ee17b914554861470b382e9e80a8e934063e0860'
+ items = get_parsed_diff(commit + '~', commit)
+ deletes = [item for item in items if not item.new.exists]
+
+ assert len(deletes) == 1
+ assert deletes[0].old.path == 'lib/ansible/plugins/connection/nspawn.py'
+ assert deletes[0].new.path == 'lib/ansible/plugins/connection/nspawn.py'
+
+
+def test_parse_rename():
+ """Integration test to verify parsing of renamed files."""
+ commit = '16a39639f568f4dd5cb233df2d0631bdab3a05e9'
+ items = get_parsed_diff(commit + '~', commit)
+ renames = [item for item in items if item.old.path != item.new.path and item.old.exists and item.new.exists]
+
+ assert len(renames) == 2
+ assert renames[0].old.path == 'test/integration/targets/eos_eapi/tests/cli/badtransport.yaml'
+ assert renames[0].new.path == 'test/integration/targets/eos_eapi/tests/cli/badtransport.1'
+ assert renames[1].old.path == 'test/integration/targets/eos_eapi/tests/cli/zzz_reset.yaml'
+ assert renames[1].new.path == 'test/integration/targets/eos_eapi/tests/cli/zzz_reset.1'
diff --git a/test/ansible_test/validate-modules-unit/test_validate_modules_regex.py b/test/ansible_test/validate-modules-unit/test_validate_modules_regex.py
new file mode 100644
index 00000000..8c0b45ca
--- /dev/null
+++ b/test/ansible_test/validate-modules-unit/test_validate_modules_regex.py
@@ -0,0 +1,43 @@
+"""Tests for validate-modules regexes."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from validate_modules.main import TYPE_REGEX
+
+
+@pytest.mark.parametrize('cstring,cexpected', [
+ ['if type(foo) is Bar', True],
+ ['if Bar is type(foo)', True],
+ ['if type(foo) is not Bar', True],
+ ['if Bar is not type(foo)', True],
+ ['if type(foo) == Bar', True],
+ ['if Bar == type(foo)', True],
+ ['if type(foo)==Bar', True],
+ ['if Bar==type(foo)', True],
+ ['if type(foo) != Bar', True],
+ ['if Bar != type(foo)', True],
+ ['if type(foo)!=Bar', True],
+ ['if Bar!=type(foo)', True],
+ ['if foo or type(bar) != Bar', True],
+ ['x = type(foo)', False],
+ ["error = err.message + ' ' + str(err) + ' - ' + str(type(err))", False],
+ # cloud/amazon/ec2_group.py
+ ["module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule))", False],
+ # files/patch.py
+ ["p = type('Params', (), module.params)", False], # files/patch.py
+ # system/osx_defaults.py
+ ["if self.current_value is not None and not isinstance(self.current_value, type(self.value)):", True],
+ # system/osx_defaults.py
+ ['raise OSXDefaultsException("Type mismatch. Type in defaults: " + type(self.current_value).__name__)', False],
+ # network/nxos/nxos_interface.py
+ ["if get_interface_type(interface) == 'svi':", False],
+])
+def test_type_regex(cstring, cexpected): # type: (str, str) -> None
+ """Check TYPE_REGEX against various examples to verify it correctly matches or does not match."""
+ match = TYPE_REGEX.match(cstring)
+ if cexpected and not match:
+ assert False, "%s should have matched" % cstring
+ elif not cexpected and match:
+ assert False, "%s should not have matched" % cstring
diff --git a/test/integration/network-integration.cfg b/test/integration/network-integration.cfg
new file mode 100644
index 00000000..00764bcd
--- /dev/null
+++ b/test/integration/network-integration.cfg
@@ -0,0 +1,14 @@
+# NOTE: This file is used by ansible-test to override specific Ansible constants
+# This file is used by `ansible-test network-integration`
+
+[defaults]
+host_key_checking = False
+timeout = 90
+
+[ssh_connection]
+ssh_args = '-o UserKnownHostsFile=/dev/null'
+
+[persistent_connection]
+command_timeout = 100
+connect_timeout = 100
+connect_retry_timeout = 100
diff --git a/test/integration/network-integration.requirements.txt b/test/integration/network-integration.requirements.txt
new file mode 100644
index 00000000..9c4d78d6
--- /dev/null
+++ b/test/integration/network-integration.requirements.txt
@@ -0,0 +1 @@
+scp # needed by incidental_ios_file
diff --git a/test/integration/targets/add_host/aliases b/test/integration/targets/add_host/aliases
new file mode 100644
index 00000000..765b70da
--- /dev/null
+++ b/test/integration/targets/add_host/aliases
@@ -0,0 +1 @@
+shippable/posix/group2
diff --git a/test/integration/targets/add_host/tasks/main.yml b/test/integration/targets/add_host/tasks/main.yml
new file mode 100644
index 00000000..399b0b6b
--- /dev/null
+++ b/test/integration/targets/add_host/tasks/main.yml
@@ -0,0 +1,159 @@
+# test code for the add_host action
+# (c) 2015, Matt Davis <mdavis@ansible.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# See https://github.com/ansible/ansible/issues/36045
+- set_fact:
+ inventory_data:
+ ansible_ssh_common_args: "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
+ # ansible_ssh_host: "127.0.0.3"
+ ansible_host: "127.0.0.3"
+ ansible_ssh_pass: "foobar"
+ # ansible_ssh_port: "2222"
+ ansible_port: "2222"
+ ansible_ssh_private_key_file: "/tmp/inventory-cloudj9cGz5/identity"
+ ansible_ssh_user: "root"
+ hostname: "newdynamichost2"
+
+- name: Show inventory_data for 36045
+ debug:
+ msg: "{{ inventory_data }}"
+
+- name: Add host from dict 36045
+ add_host: "{{ inventory_data }}"
+
+- name: show newly added host
+ debug:
+ msg: "{{hostvars['newdynamichost2'].group_names}}"
+
+- name: ensure that dynamically-added newdynamichost2 is visible via hostvars, groups 36045
+ assert:
+ that:
+ - hostvars['newdynamichost2'] is defined
+ - hostvars['newdynamichost2'].group_names is defined
+
+# end of https://github.com/ansible/ansible/issues/36045 related tests
+
+- name: add a host to the runtime inventory
+ add_host:
+ name: newdynamichost
+ groups: newdynamicgroup
+ a_var: from add_host
+
+- debug: msg={{hostvars['newdynamichost'].group_names}}
+
+- name: ensure that dynamically-added host is visible via hostvars, groups, etc (there are several caches that could break this)
+ assert:
+ that:
+ - hostvars['bogushost'] is not defined # there was a bug where an undefined host was a "type" instead of an instance- ensure this works before we rely on it
+ - hostvars['newdynamichost'] is defined
+ - hostvars['newdynamichost'].group_names is defined
+ - "'newdynamicgroup' in hostvars['newdynamichost'].group_names"
+ - hostvars['newdynamichost']['bogusvar'] is not defined
+ - hostvars['newdynamichost']['a_var'] is defined
+ - hostvars['newdynamichost']['a_var'] == 'from add_host'
+ - groups['bogusgroup'] is not defined # same check as above to ensure that bogus groups are undefined...
+ - groups['newdynamicgroup'] is defined
+ - "'newdynamichost' in groups['newdynamicgroup']"
+
+# Tests for idempotency
+- name: Add testhost01 dynamic host
+ add_host:
+ name: testhost01
+ register: add_testhost01
+
+- name: Try adding testhost01 again, with no changes
+ add_host:
+ name: testhost01
+ register: add_testhost01_idem
+
+- name: Add a host variable to testhost01
+ add_host:
+ name: testhost01
+ foo: bar
+ register: hostvar_testhost01
+
+- name: Add the same host variable to testhost01, with no changes
+ add_host:
+ name: testhost01
+ foo: bar
+ register: hostvar_testhost01_idem
+
+- name: Add another host, testhost02
+ add_host:
+ name: testhost02
+ register: add_testhost02
+
+- name: Add it again for good measure
+ add_host:
+ name: testhost02
+ register: add_testhost02_idem
+
+- name: Add testhost02 to a group
+ add_host:
+ name: testhost02
+ groups:
+ - testhostgroup
+ register: add_group_testhost02
+
+- name: Add testhost01 to the same group
+ add_host:
+ name: testhost01
+ groups:
+ - testhostgroup
+ register: add_group_testhost01
+
+- name: Add testhost02 to the group again
+ add_host:
+ name: testhost02
+ groups:
+ - testhostgroup
+ register: add_group_testhost02_idem
+
+- name: Add testhost01 to the group again
+ add_host:
+ name: testhost01
+ groups:
+ - testhostgroup
+ register: add_group_testhost01_idem
+
+- assert:
+ that:
+ - add_testhost01 is changed
+ - add_testhost01_idem is not changed
+ - hostvar_testhost01 is changed
+ - hostvar_testhost01_idem is not changed
+ - add_testhost02 is changed
+ - add_testhost02_idem is not changed
+ - add_group_testhost02 is changed
+ - add_group_testhost01 is changed
+ - add_group_testhost02_idem is not changed
+ - add_group_testhost01_idem is not changed
+ - groups['testhostgroup']|length == 2
+ - "'testhost01' in groups['testhostgroup']"
+ - "'testhost02' in groups['testhostgroup']"
+ - hostvars['testhost01']['foo'] == 'bar'
+
+- name: Give invalid input
+ add_host: namenewdynamichost groupsnewdynamicgroup a_varfromadd_host
+ ignore_errors: true
+ register: badinput
+
+- name: verify we detected bad input
+ assert:
+ that:
+ - badinput is failed
diff --git a/test/integration/targets/ansiballz_python/aliases b/test/integration/targets/ansiballz_python/aliases
new file mode 100644
index 00000000..f8e28c7e
--- /dev/null
+++ b/test/integration/targets/ansiballz_python/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group1
+skip/aix
diff --git a/test/integration/targets/ansiballz_python/library/check_rlimit_and_maxfd.py b/test/integration/targets/ansiballz_python/library/check_rlimit_and_maxfd.py
new file mode 100644
index 00000000..a01ee997
--- /dev/null
+++ b/test/integration/targets/ansiballz_python/library/check_rlimit_and_maxfd.py
@@ -0,0 +1,31 @@
+#!/usr/bin/python
+#
+# Copyright 2018 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import resource
+import subprocess
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict()
+ )
+
+ rlimit_nofile = resource.getrlimit(resource.RLIMIT_NOFILE)
+
+ try:
+ maxfd = subprocess.MAXFD
+ except AttributeError:
+ maxfd = -1
+
+ module.exit_json(rlimit_nofile=rlimit_nofile, maxfd=maxfd, infinity=resource.RLIM_INFINITY)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/ansiballz_python/library/custom_module.py b/test/integration/targets/ansiballz_python/library/custom_module.py
new file mode 100644
index 00000000..625823ea
--- /dev/null
+++ b/test/integration/targets/ansiballz_python/library/custom_module.py
@@ -0,0 +1,19 @@
+#!/usr/bin/python
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ..module_utils.basic import AnsibleModule # pylint: disable=relative-beyond-top-level
+from ..module_utils.custom_util import forty_two # pylint: disable=relative-beyond-top-level
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict()
+ )
+
+ module.exit_json(answer=forty_two())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/ansiballz_python/library/sys_check.py b/test/integration/targets/ansiballz_python/library/sys_check.py
new file mode 100644
index 00000000..aa22fe68
--- /dev/null
+++ b/test/integration/targets/ansiballz_python/library/sys_check.py
@@ -0,0 +1,23 @@
+#!/usr/bin/python
+# https://github.com/ansible/ansible/issues/64664
+# https://github.com/ansible/ansible/issues/64479
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule({})
+
+ this_module = sys.modules[__name__]
+ module.exit_json(
+ failed=not getattr(this_module, 'AnsibleModule', False)
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/ansiballz_python/module_utils/custom_util.py b/test/integration/targets/ansiballz_python/module_utils/custom_util.py
new file mode 100644
index 00000000..0393db47
--- /dev/null
+++ b/test/integration/targets/ansiballz_python/module_utils/custom_util.py
@@ -0,0 +1,6 @@
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+def forty_two():
+ return 42
diff --git a/test/integration/targets/ansiballz_python/tasks/main.yml b/test/integration/targets/ansiballz_python/tasks/main.yml
new file mode 100644
index 00000000..0aaa6451
--- /dev/null
+++ b/test/integration/targets/ansiballz_python/tasks/main.yml
@@ -0,0 +1,68 @@
+- name: get the ansible-test imposed file descriptor limit
+ check_rlimit_and_maxfd:
+ register: rlimit_limited_return
+
+- name: get existing file descriptor limit
+ check_rlimit_and_maxfd:
+ register: rlimit_original_return
+ vars:
+ ansible_python_module_rlimit_nofile: 0 # ignore limit set by ansible-test
+
+- name: attempt to set a value lower than existing soft limit
+ check_rlimit_and_maxfd:
+ vars:
+ ansible_python_module_rlimit_nofile: '{{ rlimit_original_return.rlimit_nofile[0] - 1 }}'
+ register: rlimit_below_soft_return
+
+- name: attempt to set a value higher than existing soft limit
+ check_rlimit_and_maxfd:
+ vars:
+ ansible_python_module_rlimit_nofile: '{{ rlimit_original_return.rlimit_nofile[0] + 1 }}'
+ register: rlimit_above_soft_return
+
+- name: attempt to set a value lower than existing hard limit
+ check_rlimit_and_maxfd:
+ vars:
+ ansible_python_module_rlimit_nofile: '{{ rlimit_original_return.rlimit_nofile[1] - 1 }}'
+ register: rlimit_below_hard_return
+
+- name: attempt to set a value higher than existing hard limit
+ check_rlimit_and_maxfd:
+ vars:
+ ansible_python_module_rlimit_nofile: '{{ rlimit_original_return.rlimit_nofile[1] + 1 }}'
+ register: rlimit_above_hard_return
+
+- name: run a role module which uses a role module_util using relative imports
+ custom_module:
+ register: custom_module_return
+
+- assert:
+ that:
+ # make sure ansible-test was able to set the limit unless it exceeds the hard limit or the value is lower on macOS
+ - rlimit_limited_return.rlimit_nofile[0] == 1024 or rlimit_original_return.rlimit_nofile[1] < 1024 or (rlimit_limited_return.rlimit_nofile[0] < 1024 and ansible_distribution == 'MacOSX')
+ # make sure that maxfd matches the soft limit on Python 2.x (-1 on Python 3.x)
+ - rlimit_limited_return.maxfd == rlimit_limited_return.rlimit_nofile[0] or rlimit_limited_return.maxfd == -1
+
+ # we should always be able to set the limit lower than the existing soft limit
+ - rlimit_below_soft_return.rlimit_nofile[0] == rlimit_original_return.rlimit_nofile[0] - 1
+ # the hard limit should not have changed
+ - rlimit_below_soft_return.rlimit_nofile[1] == rlimit_original_return.rlimit_nofile[1]
+ # lowering the limit should also lower the max file descriptors reported by Python 2.x (-1 on Python 3.x)
+ - rlimit_below_soft_return.maxfd == rlimit_original_return.rlimit_nofile[0] - 1 or rlimit_below_soft_return.maxfd == -1
+
+ # we should be able to set the limit higher than the existing soft limit if it does not exceed the hard limit (except on macOS)
+ - rlimit_above_soft_return.rlimit_nofile[0] == rlimit_original_return.rlimit_nofile[0] + 1 or rlimit_original_return.rlimit_nofile[0] == rlimit_original_return.rlimit_nofile[1] or ansible_distribution == 'MacOSX'
+
+ # we should be able to set the limit lower than the existing hard limit (except on macOS)
+ - rlimit_below_hard_return.rlimit_nofile[0] == rlimit_original_return.rlimit_nofile[1] - 1 or ansible_distribution == 'MacOSX'
+
+ # setting the limit higher than the existing hard limit should use the hard limit (except on macOS)
+ - rlimit_above_hard_return.rlimit_nofile[0] == rlimit_original_return.rlimit_nofile[1] or ansible_distribution == 'MacOSX'
+
+ # custom module returned the correct answer
+ - custom_module_return.answer == 42
+
+# https://github.com/ansible/ansible/issues/64664
+# https://github.com/ansible/ansible/issues/64479
+- name: Run module that tries to access itself via sys.modules
+ sys_check:
diff --git a/test/integration/targets/ansible-doc/aliases b/test/integration/targets/ansible-doc/aliases
new file mode 100644
index 00000000..a6dafcf8
--- /dev/null
+++ b/test/integration/targets/ansible-doc/aliases
@@ -0,0 +1 @@
+shippable/posix/group1
diff --git a/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/MANIFEST.json b/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/MANIFEST.json
new file mode 100644
index 00000000..243a5e43
--- /dev/null
+++ b/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/MANIFEST.json
@@ -0,0 +1,30 @@
+{
+ "collection_info": {
+ "description": null,
+ "repository": "",
+ "tags": [],
+ "dependencies": {},
+ "authors": [
+ "Ansible (https://ansible.com)"
+ ],
+ "issues": "",
+ "name": "testcol",
+ "license": [
+ "GPL-3.0-or-later"
+ ],
+ "documentation": "",
+ "namespace": "testns",
+ "version": "0.1.1231",
+ "readme": "README.md",
+ "license_file": "COPYING",
+ "homepage": "",
+ },
+ "file_manifest_file": {
+ "format": 1,
+ "ftype": "file",
+ "chksum_sha256": "4c15a867ceba8ba1eaf2f4a58844bb5dbb82fec00645fc7eb74a3d31964900f6",
+ "name": "FILES.json",
+ "chksum_type": "sha256"
+ },
+ "format": 1
+}
diff --git a/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/cache/notjsonfile.py b/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/cache/notjsonfile.py
new file mode 100644
index 00000000..ee56f6ee
--- /dev/null
+++ b/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/cache/notjsonfile.py
@@ -0,0 +1,49 @@
+# (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ cache: notjsonfile
+ short_description: JSON formatted files.
+ description:
+ - This cache uses JSON formatted, per host, files saved to the filesystem.
+ author: Ansible Core (@ansible-core)
+ options:
+ _uri:
+ required: True
+ description:
+ - Path in which the cache plugin will save the JSON files
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_CONNECTION
+ ini:
+ - key: fact_caching_connection
+ section: defaults
+ _prefix:
+ description: User defined prefix to use when creating the JSON files
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_PREFIX
+ ini:
+ - key: fact_caching_prefix
+ section: defaults
+ _timeout:
+ default: 86400
+ description: Expiration timeout for the cache plugin data
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
+ ini:
+ - key: fact_caching_timeout
+ section: defaults
+ type: integer
+'''
+
+from ansible.plugins.cache import BaseFileCacheModule
+
+
+class CacheModule(BaseFileCacheModule):
+ """
+ A caching module backed by json files.
+ """
+ pass
diff --git a/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/inventory/statichost.py b/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/inventory/statichost.py
new file mode 100644
index 00000000..cbb8f0fb
--- /dev/null
+++ b/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/inventory/statichost.py
@@ -0,0 +1,35 @@
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ inventory: statichost
+ short_description: Add a single host
+ description: Add a single host
+ extends_documentation_fragment:
+ - inventory_cache
+ options:
+ plugin:
+ description: plugin name (must be statichost)
+ required: true
+ hostname:
+ description: Toggle display of stderr even when script was successful
+ required: True
+'''
+
+from ansible.errors import AnsibleParserError
+from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable
+
+
+class InventoryModule(BaseInventoryPlugin, Cacheable):
+
+ NAME = 'testns.content_adj.statichost'
+
+ def verify_file(self, path):
+ pass
+
+ def parse(self, inventory, loader, path, cache=None):
+
+ pass
diff --git a/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/lookup/noop.py b/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/lookup/noop.py
new file mode 100644
index 00000000..daecac5d
--- /dev/null
+++ b/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/lookup/noop.py
@@ -0,0 +1,37 @@
+# (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: noop
+ author: Ansible core team
+ short_description: returns input
+ description:
+ - this is a noop
+"""
+
+EXAMPLES = """
+- name: do nothing
+ debug: msg="{{ lookup('testns.testcol.noop', [1,2,3,4] }}"
+"""
+
+RETURN = """
+ _list:
+ description: input given
+"""
+
+from ansible.module_utils.common._collections_compat import Sequence
+from ansible.plugins.lookup import LookupBase
+from ansible.errors import AnsibleError
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, **kwargs):
+ if not isinstance(terms, Sequence):
+ raise AnsibleError("testns.testcol.noop expects a list")
+ return terms
diff --git a/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/modules/fakemodule.py b/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/modules/fakemodule.py
new file mode 100644
index 00000000..decdbef4
--- /dev/null
+++ b/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/modules/fakemodule.py
@@ -0,0 +1,26 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+ module: fakemodule
+ short_desciptoin: fake module
+ description:
+ - this is a fake module
+ options:
+ _notreal:
+ description: really not a real option
+ author:
+ - me
+"""
+
+import json
+
+
+def main():
+ print(json.dumps(dict(changed=False, source='testns.testcol.fakemodule')))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/modules/notrealmodule.py b/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/modules/notrealmodule.py
new file mode 100644
index 00000000..4479f23f
--- /dev/null
+++ b/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/modules/notrealmodule.py
@@ -0,0 +1,13 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+
+def main():
+ print(json.dumps(dict(changed=False, source='testns.testcol.notrealmodule')))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/vars/noop_vars_plugin.py b/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/vars/noop_vars_plugin.py
new file mode 100644
index 00000000..ccb33b04
--- /dev/null
+++ b/test/integration/targets/ansible-doc/collections/ansible_collections/testns/testcol/plugins/vars/noop_vars_plugin.py
@@ -0,0 +1,27 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ vars: noop_vars_plugin
+ short_description: Do NOT load host and group vars
+ description: don't test loading host and group vars from a collection
+ options:
+ stage:
+ default: all
+ choices: ['all', 'inventory', 'task']
+ type: str
+ ini:
+ - key: stage
+ section: testns.testcol.noop_vars_plugin
+ env:
+ - name: ANSIBLE_VARS_PLUGIN_STAGE
+'''
+
+from ansible.plugins.vars import BaseVarsPlugin
+
+
+class VarsModule(BaseVarsPlugin):
+
+ def get_vars(self, loader, path, entities, cache=True):
+ super(VarsModule, self).get_vars(loader, path, entities)
+ return {'collection': 'yes', 'notreal': 'value'}
diff --git a/test/integration/targets/ansible-doc/fakemodule.output b/test/integration/targets/ansible-doc/fakemodule.output
new file mode 100644
index 00000000..adc27e08
--- /dev/null
+++ b/test/integration/targets/ansible-doc/fakemodule.output
@@ -0,0 +1,15 @@
+> TESTNS.TESTCOL.FAKEMODULE (./collections/ansible_collections/testns/testcol/plugins/modules/fakemodule.py)
+
+ this is a fake module
+
+OPTIONS (= is mandatory):
+
+- _notreal
+ really not a real option
+ [Default: (null)]
+
+
+AUTHOR: me
+
+SHORT_DESCIPTOIN: fake module
+
diff --git a/test/integration/targets/ansible-doc/inventory b/test/integration/targets/ansible-doc/inventory
new file mode 100644
index 00000000..ab9b62c8
--- /dev/null
+++ b/test/integration/targets/ansible-doc/inventory
@@ -0,0 +1 @@
+not_empty # avoid empty empty hosts list warning without defining explicit localhost
diff --git a/test/integration/targets/ansible-doc/library/test_docs.py b/test/integration/targets/ansible-doc/library/test_docs.py
new file mode 100644
index 00000000..39ae3728
--- /dev/null
+++ b/test/integration/targets/ansible-doc/library/test_docs.py
@@ -0,0 +1,39 @@
+#!/usr/bin/python
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = '''
+---
+module: test_docs
+short_description: Test module
+description:
+ - Test module
+author:
+ - Ansible Core Team
+'''
+
+EXAMPLES = '''
+'''
+
+RETURN = '''
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(),
+ )
+
+ module.exit_json()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/ansible-doc/library/test_docs_missing_description.py b/test/integration/targets/ansible-doc/library/test_docs_missing_description.py
new file mode 100644
index 00000000..6ed41836
--- /dev/null
+++ b/test/integration/targets/ansible-doc/library/test_docs_missing_description.py
@@ -0,0 +1,40 @@
+#!/usr/bin/python
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: test_docs_returns
+short_description: Test module
+description:
+ - Test module
+author:
+ - Ansible Core Team
+options:
+ test:
+ type: str
+'''
+
+EXAMPLES = '''
+'''
+
+RETURN = '''
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ test=dict(type='str'),
+ ),
+ )
+
+ module.exit_json()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/ansible-doc/library/test_docs_no_metadata.py b/test/integration/targets/ansible-doc/library/test_docs_no_metadata.py
new file mode 100644
index 00000000..4ea86f02
--- /dev/null
+++ b/test/integration/targets/ansible-doc/library/test_docs_no_metadata.py
@@ -0,0 +1,35 @@
+#!/usr/bin/python
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: test_docs_no_metadata
+short_description: Test module
+description:
+ - Test module
+author:
+ - Ansible Core Team
+'''
+
+EXAMPLES = '''
+'''
+
+RETURN = '''
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(),
+ )
+
+ module.exit_json()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/ansible-doc/library/test_docs_no_status.py b/test/integration/targets/ansible-doc/library/test_docs_no_status.py
new file mode 100644
index 00000000..1b0db4e9
--- /dev/null
+++ b/test/integration/targets/ansible-doc/library/test_docs_no_status.py
@@ -0,0 +1,38 @@
+#!/usr/bin/python
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'supported_by': 'core'}
+
+DOCUMENTATION = '''
+---
+module: test_docs_no_status
+short_description: Test module
+description:
+ - Test module
+author:
+ - Ansible Core Team
+'''
+
+EXAMPLES = '''
+'''
+
+RETURN = '''
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(),
+ )
+
+ module.exit_json()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/ansible-doc/library/test_docs_non_iterable_status.py b/test/integration/targets/ansible-doc/library/test_docs_non_iterable_status.py
new file mode 100644
index 00000000..63d080f6
--- /dev/null
+++ b/test/integration/targets/ansible-doc/library/test_docs_non_iterable_status.py
@@ -0,0 +1,39 @@
+#!/usr/bin/python
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': 1,
+ 'supported_by': 'core'}
+
+DOCUMENTATION = '''
+---
+module: test_docs_non_iterable_status
+short_description: Test module
+description:
+ - Test module
+author:
+ - Ansible Core Team
+'''
+
+EXAMPLES = '''
+'''
+
+RETURN = '''
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(),
+ )
+
+ module.exit_json()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/ansible-doc/library/test_docs_removed_precedence.py b/test/integration/targets/ansible-doc/library/test_docs_removed_precedence.py
new file mode 100644
index 00000000..3de1c690
--- /dev/null
+++ b/test/integration/targets/ansible-doc/library/test_docs_removed_precedence.py
@@ -0,0 +1,40 @@
+#!/usr/bin/python
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: test_docs_removed_precedence
+short_description: Test module
+description:
+ - Test module
+author:
+ - Ansible Core Team
+deprecated:
+ alternative: new_module
+ why: Updated module released with more functionality
+ removed_at_date: '2022-06-01'
+ removed_in: '2.14'
+'''
+
+EXAMPLES = '''
+'''
+
+RETURN = '''
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(),
+ )
+
+ module.exit_json()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/ansible-doc/library/test_docs_removed_status.py b/test/integration/targets/ansible-doc/library/test_docs_removed_status.py
new file mode 100644
index 00000000..cb48c169
--- /dev/null
+++ b/test/integration/targets/ansible-doc/library/test_docs_removed_status.py
@@ -0,0 +1,39 @@
+#!/usr/bin/python
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['removed'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = '''
+---
+module: test_docs_removed_status
+short_description: Test module
+description:
+ - Test module
+author:
+ - Ansible Core Team
+'''
+
+EXAMPLES = '''
+'''
+
+RETURN = '''
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(),
+ )
+
+ module.exit_json()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/ansible-doc/library/test_docs_returns.py b/test/integration/targets/ansible-doc/library/test_docs_returns.py
new file mode 100644
index 00000000..77c13764
--- /dev/null
+++ b/test/integration/targets/ansible-doc/library/test_docs_returns.py
@@ -0,0 +1,56 @@
+#!/usr/bin/python
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: test_docs_returns
+short_description: Test module
+description:
+ - Test module
+author:
+ - Ansible Core Team
+'''
+
+EXAMPLES = '''
+'''
+
+RETURN = '''
+z_last:
+ description: A last result.
+ type: str
+ returned: success
+
+m_middle:
+ description:
+ - This should be in the middle.
+ - Has some more data
+ type: dict
+ returned: success and 1st of month
+ contains:
+ suboption:
+ description: A suboption.
+ type: str
+ choices: [ARF, BARN, c_without_capital_first_letter]
+
+a_first:
+ description: A first result.
+ type: str
+ returned: success
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(),
+ )
+
+ module.exit_json()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/ansible-doc/library/test_docs_returns_broken.py b/test/integration/targets/ansible-doc/library/test_docs_returns_broken.py
new file mode 100644
index 00000000..d6d62643
--- /dev/null
+++ b/test/integration/targets/ansible-doc/library/test_docs_returns_broken.py
@@ -0,0 +1,40 @@
+#!/usr/bin/python
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: test_docs_returns_broken
+short_description: Test module
+description:
+ - Test module
+author:
+ - Ansible Core Team
+'''
+
+EXAMPLES = '''
+'''
+
+RETURN = '''
+test:
+ description: A test return value.
+ type: str
+
+broken_key: [
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(),
+ )
+
+ module.exit_json()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/ansible-doc/library/test_docs_suboptions.py b/test/integration/targets/ansible-doc/library/test_docs_suboptions.py
new file mode 100644
index 00000000..c922d1d6
--- /dev/null
+++ b/test/integration/targets/ansible-doc/library/test_docs_suboptions.py
@@ -0,0 +1,70 @@
+#!/usr/bin/python
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: test_docs_suboptions
+short_description: Test module
+description:
+ - Test module
+author:
+ - Ansible Core Team
+options:
+ with_suboptions:
+ description:
+ - An option with suboptions.
+ - Use with care.
+ type: dict
+ suboptions:
+ z_last:
+ description: The last suboption.
+ type: str
+ m_middle:
+ description:
+ - The suboption in the middle.
+ - Has its own suboptions.
+ suboptions:
+ a_suboption:
+ description: A sub-suboption.
+ type: str
+ a_first:
+ description: The first suboption.
+ type: str
+'''
+
+EXAMPLES = '''
+'''
+
+RETURN = '''
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ test_docs_suboptions=dict(
+ type='dict',
+ options=dict(
+ a_first=dict(type='str'),
+ m_middle=dict(
+ type='dict',
+ options=dict(
+ a_suboption=dict(type='str')
+ ),
+ ),
+ z_last=dict(type='str'),
+ ),
+ ),
+ ),
+ )
+
+ module.exit_json()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/ansible-doc/library/test_empty.py b/test/integration/targets/ansible-doc/library/test_empty.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/ansible-doc/library/test_empty.py
diff --git a/test/integration/targets/ansible-doc/library/test_no_docs.py b/test/integration/targets/ansible-doc/library/test_no_docs.py
new file mode 100644
index 00000000..5503aedb
--- /dev/null
+++ b/test/integration/targets/ansible-doc/library/test_no_docs.py
@@ -0,0 +1,23 @@
+#!/usr/bin/python
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(),
+ )
+
+ module.exit_json()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/ansible-doc/library/test_no_docs_no_metadata.py b/test/integration/targets/ansible-doc/library/test_no_docs_no_metadata.py
new file mode 100644
index 00000000..48872684
--- /dev/null
+++ b/test/integration/targets/ansible-doc/library/test_no_docs_no_metadata.py
@@ -0,0 +1,18 @@
+#!/usr/bin/python
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(),
+ )
+
+ module.exit_json()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/ansible-doc/library/test_no_docs_no_status.py b/test/integration/targets/ansible-doc/library/test_no_docs_no_status.py
new file mode 100644
index 00000000..f90c5c71
--- /dev/null
+++ b/test/integration/targets/ansible-doc/library/test_no_docs_no_status.py
@@ -0,0 +1,22 @@
+#!/usr/bin/python
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'supported_by': 'core'}
+
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(),
+ )
+
+ module.exit_json()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/ansible-doc/library/test_no_docs_non_iterable_status.py b/test/integration/targets/ansible-doc/library/test_no_docs_non_iterable_status.py
new file mode 100644
index 00000000..44fbedee
--- /dev/null
+++ b/test/integration/targets/ansible-doc/library/test_no_docs_non_iterable_status.py
@@ -0,0 +1,23 @@
+#!/usr/bin/python
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': 1,
+ 'supported_by': 'core'}
+
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(),
+ )
+
+ module.exit_json()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/ansible-doc/runme.sh b/test/integration/targets/ansible-doc/runme.sh
new file mode 100755
index 00000000..b5929f60
--- /dev/null
+++ b/test/integration/targets/ansible-doc/runme.sh
@@ -0,0 +1,42 @@
+#!/usr/bin/env bash
+
+set -eux
+ansible-playbook test.yml -i inventory "$@"
+
+(
+unset ANSIBLE_PLAYBOOK_DIR
+cd "$(dirname "$0")"
+
+# test module docs from collection
+current_out="$(ansible-doc --playbook-dir ./ testns.testcol.fakemodule)"
+expected_out="$(cat fakemodule.output)"
+test "$current_out" == "$expected_out"
+
+# ensure we do work with valid collection name for list
+ansible-doc --list testns.testcol --playbook-dir ./ 2>&1 | grep -v "Invalid collection pattern"
+
+# ensure we dont break on invalid collection name for list
+ansible-doc --list testns.testcol.fakemodule --playbook-dir ./ 2>&1 | grep "Invalid collection pattern"
+
+
+# test listing diff plugin types from collection
+for ptype in cache inventory lookup vars
+do
+ # each plugin type adds 1 from collection
+ # FIXME pre=$(ansible-doc -l -t ${ptype}|wc -l)
+ # FIXME post=$(ansible-doc -l -t ${ptype} --playbook-dir ./|wc -l)
+ # FIXME test "$pre" -eq $((post - 1))
+
+ # ensure we ONLY list from the collection
+ justcol=$(ansible-doc -l -t ${ptype} --playbook-dir ./ testns.testcol|wc -l)
+ test "$justcol" -eq 1
+
+ # ensure we get 0 plugins when restricting to collection, but not supplying it
+ justcol=$(ansible-doc -l -t ${ptype} testns.testcol|wc -l)
+ test "$justcol" -eq 0
+
+ # ensure we get 1 plugins when restricting namespace
+ justcol=$(ansible-doc -l -t ${ptype} --playbook-dir ./ testns|wc -l)
+ test "$justcol" -eq 1
+done
+)
diff --git a/test/integration/targets/ansible-doc/test.yml b/test/integration/targets/ansible-doc/test.yml
new file mode 100644
index 00000000..a077a994
--- /dev/null
+++ b/test/integration/targets/ansible-doc/test.yml
@@ -0,0 +1,138 @@
+- hosts: localhost
+ gather_facts: no
+ environment:
+ ANSIBLE_LIBRARY: "{{ playbook_dir }}/library"
+ tasks:
+ - name: module with missing description return docs
+ command: ansible-doc test_docs_missing_description
+ register: result
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is failed
+ - |
+ "ERROR! Unable to retrieve documentation from 'test_docs_missing_description' due to: All (sub-)options and return values must have a 'description' field"
+ in result.stderr
+
+ - name: module with suboptions
+ command: ansible-doc test_docs_suboptions
+ register: result
+ ignore_errors: true
+
+ - set_fact:
+ actual_output: >-
+ {{ result.stdout | regex_replace('^(> [A-Z_]+ +\().+library/([a-z_]+.py)\)$', '\1library/\2)', multiline=true) }}
+ expected_output: "{{ lookup('file', 'test_docs_suboptions.output') }}"
+
+ - assert:
+ that:
+ - result is succeeded
+ - actual_output == expected_output
+
+ - name: module with return docs
+ command: ansible-doc test_docs_returns
+ register: result
+ ignore_errors: true
+
+ - set_fact:
+ actual_output: >-
+ {{ result.stdout | regex_replace('^(> [A-Z_]+ +\().+library/([a-z_]+.py)\)$', '\1library/\2)', multiline=true) }}
+ expected_output: "{{ lookup('file', 'test_docs_returns.output') }}"
+
+ - assert:
+ that:
+ - result is succeeded
+ - actual_output == expected_output
+
+ - name: module with broken return docs
+ command: ansible-doc test_docs_returns_broken
+ register: result
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is failed
+ - '"ERROR! module test_docs_returns_broken missing documentation (or could not parse documentation)" in result.stderr'
+
+ - name: non-existent module
+ command: ansible-doc test_does_not_exist
+ register: result
+ - assert:
+ that:
+ - '"[WARNING]: module test_does_not_exist not found in:" in result.stderr'
+
+ - name: documented module
+ command: ansible-doc test_docs
+ register: result
+ - assert:
+ that:
+ - '"WARNING" not in result.stderr'
+ - '"TEST_DOCS " in result.stdout'
+ - '"AUTHOR: Ansible Core Team" in result.stdout'
+
+ - name: documented module without metadata
+ command: ansible-doc test_docs_no_metadata
+ register: result
+ - assert:
+ that:
+ - '"WARNING" not in result.stderr'
+ - '"TEST_DOCS_NO_METADATA " in result.stdout'
+ - '"AUTHOR: Ansible Core Team" in result.stdout'
+
+ - name: documented module with no status in metadata
+ command: ansible-doc test_docs_no_status
+ register: result
+ - assert:
+ that:
+ - '"WARNING" not in result.stderr'
+ - '"TEST_DOCS_NO_STATUS " in result.stdout'
+ - '"AUTHOR: Ansible Core Team" in result.stdout'
+
+ - name: documented module with non-iterable status in metadata
+ command: ansible-doc test_docs_non_iterable_status
+ register: result
+ - assert:
+ that:
+ - '"WARNING" not in result.stderr'
+ - '"TEST_DOCS_NON_ITERABLE_STATUS " in result.stdout'
+ - '"AUTHOR: Ansible Core Team" in result.stdout'
+
+ - name: documented module with removed status
+ command: ansible-doc test_docs_removed_status
+ register: result
+
+ - assert:
+ that:
+ - '"WARNING" not in result.stderr'
+ - '"TEST_DOCS_REMOVED_STATUS " in result.stdout'
+ - '"AUTHOR: Ansible Core Team" in result.stdout'
+
+ - name: empty module
+ command: ansible-doc test_empty
+ register: result
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is failed
+
+ - name: module with no documentation
+ command: ansible-doc test_no_docs
+ register: result
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is failed
+
+ - name: deprecated module with both removed date and version (date should get precedence)
+ command: ansible-doc test_docs_removed_precedence
+ register: result
+
+ - assert:
+ that:
+ - '"DEPRECATED" in result.stdout'
+ - '"Reason: Updated module released with more functionality" in result.stdout'
+ - '"Will be removed in a release after 2022-06-01" in result.stdout'
+ - '"Alternatives: new_module" in result.stdout'
diff --git a/test/integration/targets/ansible-doc/test_docs_returns.output b/test/integration/targets/ansible-doc/test_docs_returns.output
new file mode 100644
index 00000000..9fbbc8c7
--- /dev/null
+++ b/test/integration/targets/ansible-doc/test_docs_returns.output
@@ -0,0 +1,37 @@
+> TEST_DOCS_RETURNS (library/test_docs_returns.py)
+
+ Test module
+
+AUTHOR: Ansible Core Team
+
+EXAMPLES:
+
+
+
+
+RETURN VALUES:
+- a_first
+ A first result.
+
+ returned: success
+ type: str
+
+- m_middle
+ This should be in the middle.
+ Has some more data
+
+ returned: success and 1st of month
+ type: dict
+
+ CONTAINS:
+
+ - suboption
+ A suboption.
+ (Choices: ARF, BARN, c_without_capital_first_letter)
+ type: str
+
+- z_last
+ A last result.
+
+ returned: success
+ type: str
diff --git a/test/integration/targets/ansible-doc/test_docs_suboptions.output b/test/integration/targets/ansible-doc/test_docs_suboptions.output
new file mode 100644
index 00000000..52b51d9d
--- /dev/null
+++ b/test/integration/targets/ansible-doc/test_docs_suboptions.output
@@ -0,0 +1,43 @@
+> TEST_DOCS_SUBOPTIONS (library/test_docs_suboptions.py)
+
+ Test module
+
+OPTIONS (= is mandatory):
+
+- with_suboptions
+ An option with suboptions.
+ Use with care.
+ [Default: (null)]
+ type: dict
+
+ SUBOPTIONS:
+
+ - a_first
+ The first suboption.
+ [Default: (null)]
+ type: str
+
+ - m_middle
+ The suboption in the middle.
+ Has its own suboptions.
+ [Default: (null)]
+
+ SUBOPTIONS:
+
+ - a_suboption
+ A sub-suboption.
+ [Default: (null)]
+ type: str
+
+ - z_last
+ The last suboption.
+ [Default: (null)]
+ type: str
+
+
+AUTHOR: Ansible Core Team
+
+EXAMPLES:
+
+
+
diff --git a/test/integration/targets/ansible-galaxy-collection-scm/aliases b/test/integration/targets/ansible-galaxy-collection-scm/aliases
new file mode 100644
index 00000000..9c34b360
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy-collection-scm/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group4
+skip/aix
+skip/python2.6 # ansible-galaxy uses tarfile with features not available until 2.7
diff --git a/test/integration/targets/ansible-galaxy-collection-scm/meta/main.yml b/test/integration/targets/ansible-galaxy-collection-scm/meta/main.yml
new file mode 100644
index 00000000..e3dd5fb1
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy-collection-scm/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+- setup_remote_tmp_dir
diff --git a/test/integration/targets/ansible-galaxy-collection-scm/tasks/download.yml b/test/integration/targets/ansible-galaxy-collection-scm/tasks/download.yml
new file mode 100644
index 00000000..672b849c
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy-collection-scm/tasks/download.yml
@@ -0,0 +1,47 @@
+- name: create test download dir
+ file:
+ path: '{{ galaxy_dir }}/download'
+ state: directory
+
+- name: download a git repository
+ command: >
+ ansible-galaxy collection download
+ git+https://github.com/ansible-collections/amazon.aws.git,37875c5b4ba5bf3cc43e07edf29f3432fd76def5
+ git+https://github.com/AlanCoding/awx.git#awx_collection,750c22a150d04eef1cb625fd4f83cce57949416c
+ args:
+ chdir: '{{ galaxy_dir }}/download'
+ register: download_collection
+
+- name: check that the amazon.aws collection was downloaded
+ stat:
+ path: '{{ galaxy_dir }}/download/collections/amazon-aws-1.0.0.tar.gz'
+ register: download_collection_amazon_actual
+
+- name: check that the awx.awx collection was downloaded
+ stat:
+ path: '{{ galaxy_dir }}/download/collections/awx-awx-0.0.1-devel.tar.gz'
+ register: download_collection_awx_actual
+
+- assert:
+ that:
+ - '"Downloading collection ''amazon.aws'' to" in download_collection.stdout'
+ - '"Downloading collection ''awx.awx'' to" in download_collection.stdout'
+ - download_collection_amazon_actual.stat.exists
+ - download_collection_awx_actual.stat.exists
+
+- name: test the downloaded repository can be installed
+ command: 'ansible-galaxy collection install -r requirements.yml'
+ args:
+ chdir: '{{ galaxy_dir }}/download/collections/'
+
+- name: list installed collections
+ command: 'ansible-galaxy collection list'
+ register: installed_collections
+
+- assert:
+ that:
+ - "'amazon.aws' in installed_collections.stdout"
+ - "'awx.awx' in installed_collections.stdout"
+
+- include_tasks: ./empty_installed_collections.yml
+ when: cleanup
diff --git a/test/integration/targets/ansible-galaxy-collection-scm/tasks/empty_installed_collections.yml b/test/integration/targets/ansible-galaxy-collection-scm/tasks/empty_installed_collections.yml
new file mode 100644
index 00000000..f21a6f6b
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy-collection-scm/tasks/empty_installed_collections.yml
@@ -0,0 +1,7 @@
+- name: delete installed collections
+ file:
+ state: "{{ item }}"
+ path: "{{ galaxy_dir }}/ansible_collections"
+ loop:
+ - absent
+ - directory
diff --git a/test/integration/targets/ansible-galaxy-collection-scm/tasks/individual_collection_repo.yml b/test/integration/targets/ansible-galaxy-collection-scm/tasks/individual_collection_repo.yml
new file mode 100644
index 00000000..1b761f60
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy-collection-scm/tasks/individual_collection_repo.yml
@@ -0,0 +1,20 @@
+- name: Clone a git repository
+ git:
+ repo: https://github.com/ansible-collections/amazon.aws.git
+ dest: '{{ galaxy_dir }}/development/amazon.aws/'
+
+- name: install
+ command: 'ansible-galaxy collection install git+file://{{galaxy_dir }}/development/amazon.aws/.git'
+ args:
+ chdir: '{{ galaxy_dir }}/development'
+
+- name: list installed collections
+ command: 'ansible-galaxy collection list'
+ register: installed_collections
+
+- assert:
+ that:
+ - "'amazon.aws' in installed_collections.stdout"
+
+- include_tasks: ./empty_installed_collections.yml
+ when: cleanup
diff --git a/test/integration/targets/ansible-galaxy-collection-scm/tasks/main.yml b/test/integration/targets/ansible-galaxy-collection-scm/tasks/main.yml
new file mode 100644
index 00000000..7db7e1d6
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy-collection-scm/tasks/main.yml
@@ -0,0 +1,41 @@
+---
+- name: set the temp test directory
+ set_fact:
+ galaxy_dir: "{{ remote_tmp_dir }}/galaxy"
+
+- name: Test installing collections from git repositories
+ environment:
+ ANSIBLE_COLLECTIONS_PATHS: '{{ galaxy_dir }}'
+ vars:
+ cleanup: True
+ galaxy_dir: "{{ galaxy_dir }}"
+ block:
+
+ - include_tasks: ./setup.yml
+ - include_tasks: ./individual_collection_repo.yml
+ - include_tasks: ./setup_multi_collection_repo.yml
+ - include_tasks: ./multi_collection_repo_all.yml
+ - include_tasks: ./scm_dependency.yml
+ vars:
+ cleanup: False
+ - include_tasks: ./reinstalling.yml
+ - include_tasks: ./multi_collection_repo_individual.yml
+ - include_tasks: ./setup_recursive_scm_dependency.yml
+ - include_tasks: ./scm_dependency_deduplication.yml
+ - include_tasks: ./download.yml
+
+ always:
+
+ - name: Remove the directories for installing collections and git repositories
+ file:
+ path: '{{ item }}'
+ state: absent
+ loop:
+ - '{{ galaxy_dir }}/ansible_collections'
+ - '{{ galaxy_dir }}/development'
+
+ - name: remove git
+ package:
+ name: git
+ state: absent
+ when: git_install is changed
diff --git a/test/integration/targets/ansible-galaxy-collection-scm/tasks/multi_collection_repo_all.yml b/test/integration/targets/ansible-galaxy-collection-scm/tasks/multi_collection_repo_all.yml
new file mode 100644
index 00000000..2992062a
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy-collection-scm/tasks/multi_collection_repo_all.yml
@@ -0,0 +1,14 @@
+- name: Install all collections by default
+ command: 'ansible-galaxy collection install git+file://{{ galaxy_dir }}/development/ansible_test/.git'
+
+- name: list installed collections
+ command: 'ansible-galaxy collection list'
+ register: installed_collections
+
+- assert:
+ that:
+ - "'ansible_test.collection_1' in installed_collections.stdout"
+ - "'ansible_test.collection_2' in installed_collections.stdout"
+
+- include_tasks: ./empty_installed_collections.yml
+ when: cleanup
diff --git a/test/integration/targets/ansible-galaxy-collection-scm/tasks/multi_collection_repo_individual.yml b/test/integration/targets/ansible-galaxy-collection-scm/tasks/multi_collection_repo_individual.yml
new file mode 100644
index 00000000..48f6407a
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy-collection-scm/tasks/multi_collection_repo_individual.yml
@@ -0,0 +1,15 @@
+- name: test installing one collection
+ command: 'ansible-galaxy collection install git+file://{{ galaxy_dir }}/development/ansible_test/.git#collection_2'
+
+- name: list installed collections
+ command: 'ansible-galaxy collection list'
+ register: installed_collections
+
+- assert:
+ that:
+ - "'amazon.aws' not in installed_collections.stdout"
+ - "'ansible_test.collection_1' not in installed_collections.stdout"
+ - "'ansible_test.collection_2' in installed_collections.stdout"
+
+- include_tasks: ./empty_installed_collections.yml
+ when: cleanup
diff --git a/test/integration/targets/ansible-galaxy-collection-scm/tasks/reinstalling.yml b/test/integration/targets/ansible-galaxy-collection-scm/tasks/reinstalling.yml
new file mode 100644
index 00000000..c0f6c910
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy-collection-scm/tasks/reinstalling.yml
@@ -0,0 +1,31 @@
+- name: Rerun installing a collection with a dep
+ command: 'ansible-galaxy collection install git+file://{{ galaxy_dir }}/development/ansible_test/.git#/collection_1/'
+ register: installed
+
+- assert:
+ that:
+ - "'Skipping' in installed.stdout"
+ - "'Created' not in installed.stdout"
+
+- name: Only reinstall the collection
+ command: 'ansible-galaxy collection install git+file://{{ galaxy_dir }}/development/ansible_test/.git#/collection_1/ --force'
+ register: installed
+
+- assert:
+ that:
+ - "'Created collection for ansible_test.collection_1' in installed.stdout"
+ - "'Created collection for ansible_test.collection_2' not in installed.stdout"
+ - "'Skipping' in installed.stdout"
+
+- name: Reinstall the collection and dependency
+ command: 'ansible-galaxy collection install git+file://{{ galaxy_dir }}/development/ansible_test/.git#/collection_1/ --force-with-deps'
+ register: installed
+
+- assert:
+ that:
+ - "'Created collection for ansible_test.collection_1' in installed.stdout"
+ - "'Created collection for ansible_test.collection_2' in installed.stdout"
+ - "'Skipping' not in installed.stdout"
+
+- include_tasks: ./empty_installed_collections.yml
+ when: cleanup
diff --git a/test/integration/targets/ansible-galaxy-collection-scm/tasks/scm_dependency.yml b/test/integration/targets/ansible-galaxy-collection-scm/tasks/scm_dependency.yml
new file mode 100644
index 00000000..5a23663e
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy-collection-scm/tasks/scm_dependency.yml
@@ -0,0 +1,14 @@
+- name: test installing one collection that has a SCM dep
+ command: 'ansible-galaxy collection install git+file://{{ galaxy_dir }}/development/ansible_test/.git#/collection_1/'
+
+- name: list installed collections
+ command: 'ansible-galaxy collection list'
+ register: installed_collections
+
+- assert:
+ that:
+ - "'ansible_test.collection_1' in installed_collections.stdout"
+ - "'ansible_test.collection_2' in installed_collections.stdout"
+
+- include_tasks: ./empty_installed_collections.yml
+ when: cleanup
diff --git a/test/integration/targets/ansible-galaxy-collection-scm/tasks/scm_dependency_deduplication.yml b/test/integration/targets/ansible-galaxy-collection-scm/tasks/scm_dependency_deduplication.yml
new file mode 100644
index 00000000..bc10f24c
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy-collection-scm/tasks/scm_dependency_deduplication.yml
@@ -0,0 +1,54 @@
+- name: Install all collections in a repo, one of which has a recursive dependency
+ command: 'ansible-galaxy collection install git+file://{{ galaxy_dir }}/development/namespace_1/.git'
+ register: command
+
+- assert:
+ that:
+ - command.stdout_lines | length == 9
+ - command.stdout_lines[0] == "Starting galaxy collection install process"
+ - command.stdout_lines[1] == "Process install dependency map"
+ - command.stdout_lines[2] == "Starting collection install process"
+ - "'namespace_1.collection_1' in command.stdout_lines[3]"
+ - "'namespace_1.collection_1' in command.stdout_lines[4]"
+ - "'namespace_1.collection_1' in command.stdout_lines[5]"
+ - "'namespace_2.collection_2' in command.stdout_lines[6]"
+ - "'namespace_2.collection_2' in command.stdout_lines[7]"
+ - "'namespace_2.collection_2' in command.stdout_lines[8]"
+
+- name: list installed collections
+ command: 'ansible-galaxy collection list'
+ register: installed_collections
+
+- assert:
+ that:
+ - "'namespace_1.collection_1' in installed_collections.stdout"
+ - "'namespace_2.collection_2' in installed_collections.stdout"
+
+- name: Install a specific collection in a repo with a recursive dependency
+ command: 'ansible-galaxy collection install git+file://{{ galaxy_dir }}/development/namespace_1/.git#/collection_1/ --force-with-deps'
+ register: command
+
+- assert:
+ that:
+ - command.stdout_lines | length == 9
+ - command.stdout_lines[0] == "Starting galaxy collection install process"
+ - command.stdout_lines[1] == "Process install dependency map"
+ - command.stdout_lines[2] == "Starting collection install process"
+ - "'namespace_1.collection_1' in command.stdout_lines[3]"
+ - "'namespace_1.collection_1' in command.stdout_lines[4]"
+ - "'namespace_1.collection_1' in command.stdout_lines[5]"
+ - "'namespace_2.collection_2' in command.stdout_lines[6]"
+ - "'namespace_2.collection_2' in command.stdout_lines[7]"
+ - "'namespace_2.collection_2' in command.stdout_lines[8]"
+
+- name: list installed collections
+ command: 'ansible-galaxy collection list'
+ register: installed_collections
+
+- assert:
+ that:
+ - "'namespace_1.collection_1' in installed_collections.stdout"
+ - "'namespace_2.collection_2' in installed_collections.stdout"
+
+- include_tasks: ./empty_installed_collections.yml
+ when: cleanup
diff --git a/test/integration/targets/ansible-galaxy-collection-scm/tasks/setup.yml b/test/integration/targets/ansible-galaxy-collection-scm/tasks/setup.yml
new file mode 100644
index 00000000..f4beb9d6
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy-collection-scm/tasks/setup.yml
@@ -0,0 +1,19 @@
+- name: ensure git is installed
+ package:
+ name: git
+ when: ansible_distribution != "MacOSX"
+ register: git_install
+
+- name: set git global user.email if not already set
+ shell: git config --global user.email || git config --global user.email "noreply@example.com"
+
+- name: set git global user.name if not already set
+ shell: git config --global user.name || git config --global user.name "Ansible Test Runner"
+
+- name: Create a directory for installing collections and creating git repositories
+ file:
+ path: '{{ item }}'
+ state: directory
+ loop:
+ - '{{ galaxy_dir }}/ansible_collections'
+ - '{{ galaxy_dir }}/development/ansible_test'
diff --git a/test/integration/targets/ansible-galaxy-collection-scm/tasks/setup_multi_collection_repo.yml b/test/integration/targets/ansible-galaxy-collection-scm/tasks/setup_multi_collection_repo.yml
new file mode 100644
index 00000000..4a662ca6
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy-collection-scm/tasks/setup_multi_collection_repo.yml
@@ -0,0 +1,27 @@
+- name: Initialize a git repo
+ command: 'git init {{ galaxy_dir }}/development/ansible_test'
+
+- stat:
+ path: "{{ galaxy_dir }}/development/ansible_test"
+
+- name: Add a couple collections to the repository
+ command: 'ansible-galaxy collection init {{ item }}'
+ args:
+ chdir: '{{ galaxy_dir }}/development'
+ loop:
+ - 'ansible_test.collection_1'
+ - 'ansible_test.collection_2'
+
+- name: Add collection_2 as a dependency of collection_1
+ lineinfile:
+ path: '{{ galaxy_dir }}/development/ansible_test/collection_1/galaxy.yml'
+ regexp: '^dependencies'
+ line: "dependencies: {'git+file://{{ galaxy_dir }}/development/ansible_test/.git#collection_2/': '*'}"
+
+- name: Commit the changes
+ command: '{{ item }}'
+ args:
+ chdir: '{{ galaxy_dir }}/development/ansible_test'
+ loop:
+ - git add ./
+ - git commit -m 'add collections'
diff --git a/test/integration/targets/ansible-galaxy-collection-scm/tasks/setup_recursive_scm_dependency.yml b/test/integration/targets/ansible-galaxy-collection-scm/tasks/setup_recursive_scm_dependency.yml
new file mode 100644
index 00000000..df0af917
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy-collection-scm/tasks/setup_recursive_scm_dependency.yml
@@ -0,0 +1,33 @@
+- name: Initialize git repositories
+ command: 'git init {{ galaxy_dir }}/development/{{ item }}'
+ loop:
+ - namespace_1
+ - namespace_2
+
+- name: Add a couple collections to the repository
+ command: 'ansible-galaxy collection init {{ item }}'
+ args:
+ chdir: '{{ galaxy_dir }}/development'
+ loop:
+ - 'namespace_1.collection_1'
+ - 'namespace_2.collection_2'
+
+- name: Add collection_2 as a dependency of collection_1
+ lineinfile:
+ path: '{{ galaxy_dir }}/development/namespace_1/collection_1/galaxy.yml'
+ regexp: '^dependencies'
+ line: "dependencies: {'git+file://{{ galaxy_dir }}/development/namespace_2/.git#collection_2/': '*'}"
+
+- name: Add collection_1 as a dependency on collection_2
+ lineinfile:
+ path: '{{ galaxy_dir }}/development/namespace_2/collection_2/galaxy.yml'
+ regexp: '^dependencies'
+ line: "dependencies: {'git+file://{{ galaxy_dir }}/development/namespace_1/.git#collection_1/': 'master'}"
+
+- name: Commit the changes
+ shell: git add ./; git commit -m 'add collection'
+ args:
+ chdir: '{{ galaxy_dir }}/development/{{ item }}'
+ loop:
+ - namespace_1
+ - namespace_2
diff --git a/test/integration/targets/ansible-galaxy-collection/aliases b/test/integration/targets/ansible-galaxy-collection/aliases
new file mode 100644
index 00000000..4b3ebea3
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy-collection/aliases
@@ -0,0 +1,3 @@
+shippable/fallaxy/group1
+shippable/fallaxy/smoketest
+cloud/fallaxy
diff --git a/test/integration/targets/ansible-galaxy-collection/files/build_bad_tar.py b/test/integration/targets/ansible-galaxy-collection/files/build_bad_tar.py
new file mode 100644
index 00000000..6182e865
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy-collection/files/build_bad_tar.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python
+
+# Copyright: (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import hashlib
+import io
+import json
+import os
+import sys
+import tarfile
+
+manifest = {
+ 'collection_info': {
+ 'namespace': 'suspicious',
+ 'name': 'test',
+ 'version': '1.0.0',
+ 'dependencies': {},
+ },
+ 'file_manifest_file': {
+ 'name': 'FILES.json',
+ 'ftype': 'file',
+ 'chksum_type': 'sha256',
+ 'chksum_sha256': None,
+ 'format': 1
+ },
+ 'format': 1,
+}
+
+files = {
+ 'files': [
+ {
+ 'name': '.',
+ 'ftype': 'dir',
+ 'chksum_type': None,
+ 'chksum_sha256': None,
+ 'format': 1,
+ },
+ ],
+ 'format': 1,
+}
+
+
+def add_file(tar_file, filename, b_content, update_files=True):
+ tar_info = tarfile.TarInfo(filename)
+ tar_info.size = len(b_content)
+ tar_info.mode = 0o0755
+ tar_file.addfile(tarinfo=tar_info, fileobj=io.BytesIO(b_content))
+
+ if update_files:
+ sha256 = hashlib.sha256()
+ sha256.update(b_content)
+
+ files['files'].append({
+ 'name': filename,
+ 'ftype': 'file',
+ 'chksum_type': 'sha256',
+ 'chksum_sha256': sha256.hexdigest(),
+ 'format': 1
+ })
+
+
+collection_tar = os.path.join(sys.argv[1], 'suspicious-test-1.0.0.tar.gz')
+with tarfile.open(collection_tar, mode='w:gz') as tar_file:
+ add_file(tar_file, '../../outside.sh', b"#!/usr/bin/env bash\necho \"you got pwned\"")
+
+ b_files = json.dumps(files).encode('utf-8')
+ b_files_hash = hashlib.sha256()
+ b_files_hash.update(b_files)
+ manifest['file_manifest_file']['chksum_sha256'] = b_files_hash.hexdigest()
+ add_file(tar_file, 'FILES.json', b_files)
+ add_file(tar_file, 'MANIFEST.json', json.dumps(manifest).encode('utf-8'))
+
+ b_manifest = json.dumps(manifest).encode('utf-8')
+
+ for name, b in [('MANIFEST.json', b_manifest), ('FILES.json', b_files)]:
+ b_io = io.BytesIO(b)
+ tar_info = tarfile.TarInfo(name)
+ tar_info.size = len(b)
+ tar_info.mode = 0o0644
+ tar_file.addfile(tarinfo=tar_info, fileobj=b_io)
diff --git a/test/integration/targets/ansible-galaxy-collection/library/setup_collections.py b/test/integration/targets/ansible-galaxy-collection/library/setup_collections.py
new file mode 100644
index 00000000..b876a65f
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy-collection/library/setup_collections.py
@@ -0,0 +1,169 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+---
+module: setup_collections
+short_description: Set up test collections based on the input
+description:
+- Builds and publishes a whole bunch of collections used for testing in bulk.
+options:
+ server:
+ description:
+ - The Galaxy server to upload the collections to.
+ required: yes
+ type: str
+ token:
+ description:
+ - The token used to authenticate with the Galaxy server.
+ required: yes
+ type: str
+ collections:
+ description:
+ - A list of collection details to use for the build.
+ required: yes
+ type: list
+ elements: dict
+ options:
+ namespace:
+ description:
+ - The namespace of the collection.
+ required: yes
+ type: str
+ name:
+ description:
+ - The name of the collection.
+ required: yes
+ type: str
+ version:
+ description:
+ - The version of the collection.
+ type: str
+ default: '1.0.0'
+ dependencies:
+ description:
+ - The dependencies of the collection.
+ type: dict
+ default: '{}'
+author:
+- Jordan Borean (@jborean93)
+'''
+
+EXAMPLES = '''
+- name: Build test collections
+ setup_collections:
+ path: ~/ansible/collections/ansible_collections
+ collections:
+ - namespace: namespace1
+ name: name1
+ version: 0.0.1
+ - namespace: namespace1
+ name: name1
+ version: 0.0.2
+'''
+
+RETURN = '''
+#
+'''
+
+import os
+import tempfile
+import yaml
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes
+
+
+def run_module():
+ module_args = dict(
+ server=dict(type='str', required=True),
+ token=dict(type='str', required=True),
+ collections=dict(
+ type='list',
+ elements='dict',
+ required=True,
+ options=dict(
+ namespace=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ version=dict(type='str', default='1.0.0'),
+ dependencies=dict(type='dict', default={}),
+ use_symlink=dict(type='bool', default=False),
+ ),
+ ),
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=False
+ )
+
+ result = dict(changed=True)
+
+ for idx, collection in enumerate(module.params['collections']):
+ collection_dir = os.path.join(module.tmpdir, "%s-%s-%s" % (collection['namespace'], collection['name'],
+ collection['version']))
+ b_collection_dir = to_bytes(collection_dir, errors='surrogate_or_strict')
+ os.mkdir(b_collection_dir)
+
+ with open(os.path.join(b_collection_dir, b'README.md'), mode='wb') as fd:
+ fd.write(b"Collection readme")
+
+ galaxy_meta = {
+ 'namespace': collection['namespace'],
+ 'name': collection['name'],
+ 'version': collection['version'],
+ 'readme': 'README.md',
+ 'authors': ['Collection author <name@email.com'],
+ 'dependencies': collection['dependencies'],
+ }
+ with open(os.path.join(b_collection_dir, b'galaxy.yml'), mode='wb') as fd:
+ fd.write(to_bytes(yaml.safe_dump(galaxy_meta), errors='surrogate_or_strict'))
+
+ with tempfile.NamedTemporaryFile(mode='wb') as temp_fd:
+ temp_fd.write(b"data")
+
+ if collection['use_symlink']:
+ os.mkdir(os.path.join(b_collection_dir, b'docs'))
+ os.mkdir(os.path.join(b_collection_dir, b'plugins'))
+ b_target_file = b'RE\xc3\x85DM\xc3\x88.md'
+ with open(os.path.join(b_collection_dir, b_target_file), mode='wb') as fd:
+ fd.write(b'data')
+
+ os.symlink(b_target_file, os.path.join(b_collection_dir, b_target_file + b'-link'))
+ os.symlink(temp_fd.name, os.path.join(b_collection_dir, b_target_file + b'-outside-link'))
+ os.symlink(os.path.join(b'..', b_target_file), os.path.join(b_collection_dir, b'docs', b_target_file))
+ os.symlink(os.path.join(b_collection_dir, b_target_file),
+ os.path.join(b_collection_dir, b'plugins', b_target_file))
+ os.symlink(b'docs', os.path.join(b_collection_dir, b'docs-link'))
+
+ release_filename = '%s-%s-%s.tar.gz' % (collection['namespace'], collection['name'], collection['version'])
+ collection_path = os.path.join(collection_dir, release_filename)
+ module.run_command(['ansible-galaxy', 'collection', 'build'], cwd=collection_dir)
+
+ # To save on time, skip the import wait until the last collection is being uploaded.
+ publish_args = ['ansible-galaxy', 'collection', 'publish', collection_path, '--server',
+ module.params['server'], '--token', module.params['token']]
+ if idx != (len(module.params['collections']) - 1):
+ publish_args.append('--no-wait')
+ module.run_command(publish_args)
+
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/ansible-galaxy-collection/meta/main.yml b/test/integration/targets/ansible-galaxy-collection/meta/main.yml
new file mode 100644
index 00000000..e3dd5fb1
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy-collection/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+- setup_remote_tmp_dir
diff --git a/test/integration/targets/ansible-galaxy-collection/tasks/build.yml b/test/integration/targets/ansible-galaxy-collection/tasks/build.yml
new file mode 100644
index 00000000..a5ba1d47
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy-collection/tasks/build.yml
@@ -0,0 +1,53 @@
+---
+- name: build basic collection based on current directory
+ command: ansible-galaxy collection build {{ galaxy_verbosity }}
+ args:
+ chdir: '{{ galaxy_dir }}/scratch/ansible_test/my_collection'
+ register: build_current_dir
+
+- name: get result of build basic collection on current directory
+ stat:
+ path: '{{ galaxy_dir }}/scratch/ansible_test/my_collection/ansible_test-my_collection-1.0.0.tar.gz'
+ register: build_current_dir_actual
+
+- name: assert build basic collection based on current directory
+ assert:
+ that:
+ - '"Created collection for ansible_test.my_collection" in build_current_dir.stdout'
+ - build_current_dir_actual.stat.exists
+
+- name: build basic collection based on relative dir
+ command: ansible-galaxy collection build scratch/ansible_test/my_collection {{ galaxy_verbosity }}
+ args:
+ chdir: '{{ galaxy_dir }}'
+ register: build_relative_dir
+
+- name: get result of build basic collection based on relative dir
+ stat:
+ path: '{{ galaxy_dir }}/ansible_test-my_collection-1.0.0.tar.gz'
+ register: build_relative_dir_actual
+
+- name: assert build basic collection based on relative dir
+ assert:
+ that:
+ - '"Created collection for ansible_test.my_collection" in build_relative_dir.stdout'
+ - build_relative_dir_actual.stat.exists
+
+- name: fail to build existing collection without force
+ command: ansible-galaxy collection build scratch/ansible_test/my_collection {{ galaxy_verbosity }}
+ args:
+ chdir: '{{ galaxy_dir }}'
+ ignore_errors: yes
+ register: build_existing_no_force
+
+- name: build existing collection with force
+ command: ansible-galaxy collection build scratch/ansible_test/my_collection --force {{ galaxy_verbosity }}
+ args:
+ chdir: '{{ galaxy_dir }}'
+ register: build_existing_force
+
+- name: assert build existing collection
+ assert:
+ that:
+ - '"use --force to re-create the collection artifact" in build_existing_no_force.stderr'
+ - '"Created collection for ansible_test.my_collection" in build_existing_force.stdout'
diff --git a/test/integration/targets/ansible-galaxy-collection/tasks/download.yml b/test/integration/targets/ansible-galaxy-collection/tasks/download.yml
new file mode 100644
index 00000000..bdd743b2
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy-collection/tasks/download.yml
@@ -0,0 +1,142 @@
+---
+- name: create test download dir
+ file:
+ path: '{{ galaxy_dir }}/download'
+ state: directory
+
+- name: download collection with multiple dependencies
+ command: ansible-galaxy collection download parent_dep.parent_collection -s {{ fallaxy_galaxy_server }} {{ galaxy_verbosity }}
+ register: download_collection
+ args:
+ chdir: '{{ galaxy_dir }}/download'
+
+- name: get result of download collection with multiple dependencies
+ find:
+ path: '{{ galaxy_dir }}/download/collections'
+ file_type: file
+ register: download_collection_actual
+
+- name: assert download collection with multiple dependencies
+ assert:
+ that:
+ - '"Downloading collection ''parent_dep.parent_collection'' to" in download_collection.stdout'
+ - '"Downloading collection ''child_dep.child_collection'' to" in download_collection.stdout'
+ - '"Downloading collection ''child_dep.child_dep2'' to" in download_collection.stdout'
+ - download_collection_actual.examined == 4
+ - download_collection_actual.matched == 4
+ - (download_collection_actual.files[0].path | basename) in ['requirements.yml', 'child_dep-child_dep2-1.2.2.tar.gz', 'child_dep-child_collection-0.9.9.tar.gz', 'parent_dep-parent_collection-1.0.0.tar.gz']
+ - (download_collection_actual.files[1].path | basename) in ['requirements.yml', 'child_dep-child_dep2-1.2.2.tar.gz', 'child_dep-child_collection-0.9.9.tar.gz', 'parent_dep-parent_collection-1.0.0.tar.gz']
+ - (download_collection_actual.files[2].path | basename) in ['requirements.yml', 'child_dep-child_dep2-1.2.2.tar.gz', 'child_dep-child_collection-0.9.9.tar.gz', 'parent_dep-parent_collection-1.0.0.tar.gz']
+ - (download_collection_actual.files[3].path | basename) in ['requirements.yml', 'child_dep-child_dep2-1.2.2.tar.gz', 'child_dep-child_collection-0.9.9.tar.gz', 'parent_dep-parent_collection-1.0.0.tar.gz']
+
+- name: test install of download requirements file
+ command: ansible-galaxy collection install -r requirements.yml -p '{{ galaxy_dir }}/download' {{ galaxy_verbosity }}
+ args:
+ chdir: '{{ galaxy_dir }}/download/collections'
+ register: install_download
+
+- name: get result of test install of download requirements file
+ slurp:
+ path: '{{ galaxy_dir }}/download/ansible_collections/{{ collection.namespace }}/{{ collection.name }}/MANIFEST.json'
+ register: install_download_actual
+ loop_control:
+ loop_var: collection
+ loop:
+ - namespace: parent_dep
+ name: parent_collection
+ - namespace: child_dep
+ name: child_collection
+ - namespace: child_dep
+ name: child_dep2
+
+- name: assert test install of download requirements file
+ assert:
+ that:
+ - '"Installing ''parent_dep.parent_collection:1.0.0'' to" in install_download.stdout'
+ - '"Installing ''child_dep.child_collection:0.9.9'' to" in install_download.stdout'
+ - '"Installing ''child_dep.child_dep2:1.2.2'' to" in install_download.stdout'
+ - (install_download_actual.results[0].content | b64decode | from_json).collection_info.version == '1.0.0'
+ - (install_download_actual.results[1].content | b64decode | from_json).collection_info.version == '0.9.9'
+ - (install_download_actual.results[2].content | b64decode | from_json).collection_info.version == '1.2.2'
+
+- name: create test requirements file for download
+ copy:
+ content: |
+ collections:
+ - name: namespace1.name1
+ version: 1.1.0-beta.1
+
+ dest: '{{ galaxy_dir }}/download/download.yml'
+
+- name: download collection with req to custom dir
+ command: ansible-galaxy collection download -r '{{ galaxy_dir }}/download/download.yml' -s {{ fallaxy_ah_server }} -p '{{ galaxy_dir }}/download/collections-custom' {{ galaxy_verbosity }}
+ register: download_req_custom_path
+
+- name: get result of download collection with req to custom dir
+ find:
+ path: '{{ galaxy_dir }}/download/collections-custom'
+ file_type: file
+ register: download_req_custom_path_actual
+
+- name: assert download collection with multiple dependencies
+ assert:
+ that:
+ - '"Downloading collection ''namespace1.name1'' to" in download_req_custom_path.stdout'
+ - download_req_custom_path_actual.examined == 2
+ - download_req_custom_path_actual.matched == 2
+ - (download_req_custom_path_actual.files[0].path | basename) in ['requirements.yml', 'namespace1-name1-1.1.0-beta.1.tar.gz']
+ - (download_req_custom_path_actual.files[1].path | basename) in ['requirements.yml', 'namespace1-name1-1.1.0-beta.1.tar.gz']
+
+# https://github.com/ansible/ansible/issues/68186
+- name: create test requirements file without roles and collections
+ copy:
+ content: |
+ collections:
+ roles:
+
+ dest: '{{ galaxy_dir }}/download/no_roles_no_collections.yml'
+
+- name: install collection with requirements
+ command: ansible-galaxy collection install -r '{{ galaxy_dir }}/download/no_roles_no_collections.yml' {{ galaxy_verbosity }}
+ register: install_no_requirements
+
+- name: assert install collection with no roles and no collections in requirements
+ assert:
+ that:
+ - '"Skipping install, no requirements found" in install_no_requirements.stdout'
+
+- name: Test downloading a tar.gz collection artifact
+ block:
+
+ - name: get result of build basic collection on current directory
+ stat:
+ path: '{{ galaxy_dir }}/scratch/ansible_test/my_collection/ansible_test-my_collection-1.0.0.tar.gz'
+ register: result
+
+ - name: create default skeleton
+ command: ansible-galaxy collection init ansible_test.my_collection {{ galaxy_verbosity }}
+ args:
+ chdir: '{{ galaxy_dir }}/scratch'
+ when: not result.stat.exists
+
+ - name: build the tar.gz
+ command: ansible-galaxy collection build {{ galaxy_verbosity }}
+ args:
+ chdir: '{{ galaxy_dir }}/scratch/ansible_test/my_collection'
+ when: not result.stat.exists
+
+ - name: download a tar.gz file
+ command: ansible-galaxy collection download '{{ galaxy_dir }}/scratch/ansible_test/my_collection/ansible_test-my_collection-1.0.0.tar.gz'
+ args:
+ chdir: '{{ galaxy_dir }}/download'
+ register: download_collection
+
+ - name: get result of downloaded tar.gz
+ stat:
+ path: '{{ galaxy_dir }}/download/collections/ansible_test-my_collection-1.0.0.tar.gz'
+ register: download_collection_actual
+
+ - assert:
+ that:
+ - '"Downloading collection ''ansible_test.my_collection'' to" in download_collection.stdout'
+ - download_collection_actual.stat.exists
diff --git a/test/integration/targets/ansible-galaxy-collection/tasks/init.yml b/test/integration/targets/ansible-galaxy-collection/tasks/init.yml
new file mode 100644
index 00000000..15ec5eab
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy-collection/tasks/init.yml
@@ -0,0 +1,44 @@
+---
+- name: create default skeleton
+ command: ansible-galaxy collection init ansible_test.my_collection {{ galaxy_verbosity }}
+ args:
+ chdir: '{{ galaxy_dir }}/scratch'
+ register: init_relative
+
+- name: get result of create default skeleton
+ find:
+ path: '{{ galaxy_dir }}/scratch/ansible_test/my_collection'
+ recurse: yes
+ file_type: directory
+ register: init_relative_actual
+
+- debug:
+ var: init_relative_actual.files | map(attribute='path') | list
+
+- name: assert create default skeleton
+ assert:
+ that:
+ - '"Collection ansible_test.my_collection was created successfully" in init_relative.stdout'
+ - init_relative_actual.files | length == 3
+ - (init_relative_actual.files | map(attribute='path') | list)[0] | basename in ['docs', 'plugins', 'roles']
+ - (init_relative_actual.files | map(attribute='path') | list)[1] | basename in ['docs', 'plugins', 'roles']
+ - (init_relative_actual.files | map(attribute='path') | list)[2] | basename in ['docs', 'plugins', 'roles']
+
+- name: create collection with custom init path
+ command: ansible-galaxy collection init ansible_test2.my_collection --init-path "{{ galaxy_dir }}/scratch/custom-init-dir" {{ galaxy_verbosity }}
+ register: init_custom_path
+
+- name: get result of create default skeleton
+ find:
+ path: '{{ galaxy_dir }}/scratch/custom-init-dir/ansible_test2/my_collection'
+ file_type: directory
+ register: init_custom_path_actual
+
+- name: assert create collection with custom init path
+ assert:
+ that:
+ - '"Collection ansible_test2.my_collection was created successfully" in init_custom_path.stdout'
+ - init_custom_path_actual.files | length == 3
+ - (init_custom_path_actual.files | map(attribute='path') | list)[0] | basename in ['docs', 'plugins', 'roles']
+ - (init_custom_path_actual.files | map(attribute='path') | list)[1] | basename in ['docs', 'plugins', 'roles']
+ - (init_custom_path_actual.files | map(attribute='path') | list)[2] | basename in ['docs', 'plugins', 'roles']
diff --git a/test/integration/targets/ansible-galaxy-collection/tasks/install.yml b/test/integration/targets/ansible-galaxy-collection/tasks/install.yml
new file mode 100644
index 00000000..11ce1c01
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy-collection/tasks/install.yml
@@ -0,0 +1,330 @@
+---
+- name: create test collection install directory - {{ test_name }}
+ file:
+ path: '{{ galaxy_dir }}/ansible_collections'
+ state: directory
+
+- name: install simple collection with implicit path - {{ test_name }}
+ command: ansible-galaxy collection install namespace1.name1 -s '{{ test_server }}' {{ galaxy_verbosity }}
+ environment:
+ ANSIBLE_COLLECTIONS_PATH: '{{ galaxy_dir }}/ansible_collections'
+ register: install_normal
+
+- name: get installed files of install simple collection with implicit path - {{ test_name }}
+ find:
+ path: '{{ galaxy_dir }}/ansible_collections/namespace1/name1'
+ file_type: file
+ register: install_normal_files
+
+- name: get the manifest of install simple collection with implicit path - {{ test_name }}
+ slurp:
+ path: '{{ galaxy_dir }}/ansible_collections/namespace1/name1/MANIFEST.json'
+ register: install_normal_manifest
+
+- name: assert install simple collection with implicit path - {{ test_name }}
+ assert:
+ that:
+ - '"Installing ''namespace1.name1:1.0.9'' to" in install_normal.stdout'
+ - install_normal_files.files | length == 3
+ - install_normal_files.files[0].path | basename in ['MANIFEST.json', 'FILES.json', 'README.md']
+ - install_normal_files.files[1].path | basename in ['MANIFEST.json', 'FILES.json', 'README.md']
+ - install_normal_files.files[2].path | basename in ['MANIFEST.json', 'FILES.json', 'README.md']
+ - (install_normal_manifest.content | b64decode | from_json).collection_info.version == '1.0.9'
+
+- name: install existing without --force - {{ test_name }}
+ command: ansible-galaxy collection install namespace1.name1 -s '{{ test_server }}' {{ galaxy_verbosity }}
+ environment:
+ ANSIBLE_COLLECTIONS_PATH: '{{ galaxy_dir }}/ansible_collections'
+ register: install_existing_no_force
+
+- name: assert install existing without --force - {{ test_name }}
+ assert:
+ that:
+ - '"Skipping ''namespace1.name1'' as it is already installed" in install_existing_no_force.stdout'
+
+- name: install existing with --force - {{ test_name }}
+ command: ansible-galaxy collection install namespace1.name1 -s '{{ test_server }}' --force {{ galaxy_verbosity }}
+ environment:
+ ANSIBLE_COLLECTIONS_PATH: '{{ galaxy_dir }}/ansible_collections'
+ register: install_existing_force
+
+- name: assert install existing with --force - {{ test_name }}
+ assert:
+ that:
+ - '"Installing ''namespace1.name1:1.0.9'' to" in install_existing_force.stdout'
+
+- name: remove test installed collection - {{ test_name }}
+ file:
+ path: '{{ galaxy_dir }}/ansible_collections/namespace1'
+ state: absent
+
+- name: install pre-release as explicit version to custom dir - {{ test_name }}
+ command: ansible-galaxy collection install 'namespace1.name1:1.1.0-beta.1' -s '{{ test_server }}' -p '{{ galaxy_dir }}/ansible_collections' {{ galaxy_verbosity }}
+ register: install_prerelease
+
+- name: get result of install pre-release as explicit version to custom dir - {{ test_name }}
+ slurp:
+ path: '{{ galaxy_dir }}/ansible_collections/namespace1/name1/MANIFEST.json'
+ register: install_prerelease_actual
+
+- name: assert install pre-release as explicit version to custom dir - {{ test_name }}
+ assert:
+ that:
+ - '"Installing ''namespace1.name1:1.1.0-beta.1'' to" in install_prerelease.stdout'
+ - (install_prerelease_actual.content | b64decode | from_json).collection_info.version == '1.1.0-beta.1'
+
+- name: Remove beta
+ file:
+ path: '{{ galaxy_dir }}/ansible_collections/namespace1/name1'
+ state: absent
+
+- name: install pre-release version with --pre to custom dir - {{ test_name }}
+ command: ansible-galaxy collection install --pre 'namespace1.name1' -s '{{ test_server }}' -p '{{ galaxy_dir }}/ansible_collections' {{ galaxy_verbosity }}
+ register: install_prerelease
+
+- name: get result of install pre-release version with --pre to custom dir - {{ test_name }}
+ slurp:
+ path: '{{ galaxy_dir }}/ansible_collections/namespace1/name1/MANIFEST.json'
+ register: install_prerelease_actual
+
+- name: assert install pre-release version with --pre to custom dir - {{ test_name }}
+ assert:
+ that:
+ - '"Installing ''namespace1.name1:1.1.0-beta.1'' to" in install_prerelease.stdout'
+ - (install_prerelease_actual.content | b64decode | from_json).collection_info.version == '1.1.0-beta.1'
+
+- name: install multiple collections with dependencies - {{ test_name }}
+ command: ansible-galaxy collection install parent_dep.parent_collection namespace2.name -s {{ test_name }} {{ galaxy_verbosity }}
+ args:
+ chdir: '{{ galaxy_dir }}/ansible_collections'
+ environment:
+ ANSIBLE_COLLECTIONS_PATH: '{{ galaxy_dir }}/ansible_collections'
+ ANSIBLE_CONFIG: '{{ galaxy_dir }}/ansible.cfg'
+ register: install_multiple_with_dep
+
+- name: get result of install multiple collections with dependencies - {{ test_name }}
+ slurp:
+ path: '{{ galaxy_dir }}/ansible_collections/{{ collection.namespace }}/{{ collection.name }}/MANIFEST.json'
+ register: install_multiple_with_dep_actual
+ loop_control:
+ loop_var: collection
+ loop:
+ - namespace: namespace2
+ name: name
+ - namespace: parent_dep
+ name: parent_collection
+ - namespace: child_dep
+ name: child_collection
+ - namespace: child_dep
+ name: child_dep2
+
+- name: assert install multiple collections with dependencies - {{ test_name }}
+ assert:
+ that:
+ - (install_multiple_with_dep_actual.results[0].content | b64decode | from_json).collection_info.version == '1.0.0'
+ - (install_multiple_with_dep_actual.results[1].content | b64decode | from_json).collection_info.version == '1.0.0'
+ - (install_multiple_with_dep_actual.results[2].content | b64decode | from_json).collection_info.version == '0.9.9'
+ - (install_multiple_with_dep_actual.results[3].content | b64decode | from_json).collection_info.version == '1.2.2'
+
+- name: expect failure with dep resolution failure
+ command: ansible-galaxy collection install fail_namespace.fail_collection -s {{ test_server }} {{ galaxy_verbosity }}
+ register: fail_dep_mismatch
+ failed_when: '"Cannot meet dependency requirement ''fail_dep2.name:<0.0.5'' for collection fail_namespace.fail_collection" not in fail_dep_mismatch.stderr'
+
+- name: download a collection for an offline install - {{ test_name }}
+ get_url:
+ url: '{{ test_server }}custom/collections/namespace3-name-1.0.0.tar.gz'
+ dest: '{{ galaxy_dir }}/namespace3.tar.gz'
+
+- name: install a collection from a tarball - {{ test_name }}
+ command: ansible-galaxy collection install '{{ galaxy_dir }}/namespace3.tar.gz' {{ galaxy_verbosity }}
+ register: install_tarball
+ environment:
+ ANSIBLE_COLLECTIONS_PATH: '{{ galaxy_dir }}/ansible_collections'
+
+- name: get result of install collection from a tarball - {{ test_name }}
+ slurp:
+ path: '{{ galaxy_dir }}/ansible_collections/namespace3/name/MANIFEST.json'
+ register: install_tarball_actual
+
+- name: assert install a collection from a tarball - {{ test_name }}
+ assert:
+ that:
+ - '"Installing ''namespace3.name:1.0.0'' to" in install_tarball.stdout'
+ - (install_tarball_actual.content | b64decode | from_json).collection_info.version == '1.0.0'
+
+- name: setup bad tarball - {{ test_name }}
+ script: build_bad_tar.py {{ galaxy_dir | quote }}
+
+- name: fail to install a collection from a bad tarball - {{ test_name }}
+ command: ansible-galaxy collection install '{{ galaxy_dir }}/suspicious-test-1.0.0.tar.gz' {{ galaxy_verbosity }}
+ register: fail_bad_tar
+ failed_when: fail_bad_tar.rc != 1 and "Cannot extract tar entry '../../outside.sh' as it will be placed outside the collection directory" not in fail_bad_tar.stderr
+ environment:
+ ANSIBLE_COLLECTIONS_PATH: '{{ galaxy_dir }}/ansible_collections'
+
+- name: get result of failed collection install - {{ test_name }}
+ stat:
+ path: '{{ galaxy_dir }}/ansible_collections\suspicious'
+ register: fail_bad_tar_actual
+
+- name: assert result of failed collection install - {{ test_name }}
+ assert:
+ that:
+ - not fail_bad_tar_actual.stat.exists
+
+- name: install a collection from a URI - {{ test_name }}
+ command: ansible-galaxy collection install '{{ test_server }}custom/collections/namespace4-name-1.0.0.tar.gz' {{ galaxy_verbosity }}
+ register: install_uri
+ environment:
+ ANSIBLE_COLLECTIONS_PATH: '{{ galaxy_dir }}/ansible_collections'
+
+- name: get result of install collection from a URI - {{ test_name }}
+ slurp:
+ path: '{{ galaxy_dir }}/ansible_collections/namespace4/name/MANIFEST.json'
+ register: install_uri_actual
+
+- name: assert install a collection from a URI - {{ test_name }}
+ assert:
+ that:
+ - '"Installing ''namespace4.name:1.0.0'' to" in install_uri.stdout'
+ - (install_uri_actual.content | b64decode | from_json).collection_info.version == '1.0.0'
+
+- name: fail to install a collection with an undefined URL - {{ test_name }}
+ command: ansible-galaxy collection install namespace5.name {{ galaxy_verbosity }}
+ register: fail_undefined_server
+ failed_when: '"No setting was provided for required configuration plugin_type: galaxy_server plugin: undefined" not in fail_undefined_server.stderr'
+ environment:
+ ANSIBLE_GALAXY_SERVER_LIST: undefined
+
+- name: install a collection with an empty server list - {{ test_name }}
+ command: ansible-galaxy collection install namespace5.name -s '{{ test_server }}' {{ galaxy_verbosity }}
+ register: install_empty_server_list
+ environment:
+ ANSIBLE_COLLECTIONS_PATH: '{{ galaxy_dir }}/ansible_collections'
+ ANSIBLE_GALAXY_SERVER_LIST: ''
+
+- name: get result of a collection with an empty server list - {{ test_name }}
+ slurp:
+ path: '{{ galaxy_dir }}/ansible_collections/namespace5/name/MANIFEST.json'
+ register: install_empty_server_list_actual
+
+- name: assert install a collection with an empty server list - {{ test_name }}
+ assert:
+ that:
+ - '"Installing ''namespace5.name:1.0.0'' to" in install_empty_server_list.stdout'
+ - (install_empty_server_list_actual.content | b64decode | from_json).collection_info.version == '1.0.0'
+
+- name: create test requirements file with both roles and collections - {{ test_name }}
+ copy:
+ content: |
+ collections:
+ - namespace6.name
+ - name: namespace7.name
+ roles:
+ - skip.me
+ dest: '{{ galaxy_dir }}/ansible_collections/requirements-with-role.yml'
+
+# Need to run with -vvv to validate the roles will be skipped msg
+- name: install collections only with requirements-with-role.yml - {{ test_name }}
+ command: ansible-galaxy collection install -r '{{ galaxy_dir }}/ansible_collections/requirements-with-role.yml' -s '{{ test_server }}' -vvv
+ register: install_req_collection
+ environment:
+ ANSIBLE_COLLECTIONS_PATH: '{{ galaxy_dir }}/ansible_collections'
+
+- name: get result of install collections only with requirements-with-roles.yml - {{ test_name }}
+ slurp:
+ path: '{{ galaxy_dir }}/ansible_collections/{{ collection }}/name/MANIFEST.json'
+ register: install_req_collection_actual
+ loop_control:
+ loop_var: collection
+ loop:
+ - namespace6
+ - namespace7
+
+- name: assert install collections only with requirements-with-role.yml - {{ test_name }}
+ assert:
+ that:
+ - '"contains roles which will be ignored" in install_req_collection.stdout'
+ - '"Installing ''namespace6.name:1.0.0'' to" in install_req_collection.stdout'
+ - '"Installing ''namespace7.name:1.0.0'' to" in install_req_collection.stdout'
+ - (install_req_collection_actual.results[0].content | b64decode | from_json).collection_info.version == '1.0.0'
+ - (install_req_collection_actual.results[1].content | b64decode | from_json).collection_info.version == '1.0.0'
+
+- name: create test requirements file with just collections - {{ test_name }}
+ copy:
+ content: |
+ collections:
+ - namespace8.name
+ - name: namespace9.name
+ dest: '{{ galaxy_dir }}/ansible_collections/requirements.yaml'
+
+- name: install collections with ansible-galaxy install - {{ test_name }}
+ command: ansible-galaxy install -r '{{ galaxy_dir }}/ansible_collections/requirements.yaml' -s '{{ test_server }}'
+ register: install_req
+ environment:
+ ANSIBLE_COLLECTIONS_PATH: '{{ galaxy_dir }}/ansible_collections'
+
+- name: get result of install collections with ansible-galaxy install - {{ test_name }}
+ slurp:
+ path: '{{ galaxy_dir }}/ansible_collections/{{ collection }}/name/MANIFEST.json'
+ register: install_req_actual
+ loop_control:
+ loop_var: collection
+ loop:
+ - namespace8
+ - namespace9
+
+- name: assert install collections with ansible-galaxy install - {{ test_name }}
+ assert:
+ that:
+ - '"Installing ''namespace8.name:1.0.0'' to" in install_req.stdout'
+ - '"Installing ''namespace9.name:1.0.0'' to" in install_req.stdout'
+ - (install_req_actual.results[0].content | b64decode | from_json).collection_info.version == '1.0.0'
+ - (install_req_actual.results[1].content | b64decode | from_json).collection_info.version == '1.0.0'
+
+- name: remove test collection install directory - {{ test_name }}
+ file:
+ path: '{{ galaxy_dir }}/ansible_collections'
+ state: absent
+
+- name: install collection with symlink - {{ test_name }}
+ command: ansible-galaxy collection install symlink.symlink -s '{{ test_server }}' {{ galaxy_verbosity }}
+ environment:
+ ANSIBLE_COLLECTIONS_PATHS: '{{ galaxy_dir }}/ansible_collections'
+ register: install_symlink
+
+- find:
+ paths: '{{ galaxy_dir }}/ansible_collections/symlink/symlink'
+ recurse: yes
+ file_type: any
+
+- name: get result of install collection with symlink - {{ test_name }}
+ stat:
+ path: '{{ galaxy_dir }}/ansible_collections/symlink/symlink/{{ path }}'
+ register: install_symlink_actual
+ loop_control:
+ loop_var: path
+ loop:
+ - REÅDMÈ.md-link
+ - docs/REÅDMÈ.md
+ - plugins/REÅDMÈ.md
+ - REÅDMÈ.md-outside-link
+ - docs-link
+ - docs-link/REÅDMÈ.md
+
+- name: assert install collection with symlink - {{ test_name }}
+ assert:
+ that:
+ - '"Installing ''symlink.symlink:1.0.0'' to" in install_symlink.stdout'
+ - install_symlink_actual.results[0].stat.islnk
+ - install_symlink_actual.results[0].stat.lnk_target == 'REÅDMÈ.md'
+ - install_symlink_actual.results[1].stat.islnk
+ - install_symlink_actual.results[1].stat.lnk_target == '../REÅDMÈ.md'
+ - install_symlink_actual.results[2].stat.islnk
+ - install_symlink_actual.results[2].stat.lnk_target == '../REÅDMÈ.md'
+ - install_symlink_actual.results[3].stat.isreg
+ - install_symlink_actual.results[4].stat.islnk
+ - install_symlink_actual.results[4].stat.lnk_target == 'docs'
+ - install_symlink_actual.results[5].stat.islnk
+ - install_symlink_actual.results[5].stat.lnk_target == '../REÅDMÈ.md'
diff --git a/test/integration/targets/ansible-galaxy-collection/tasks/main.yml b/test/integration/targets/ansible-galaxy-collection/tasks/main.yml
new file mode 100644
index 00000000..c4cc9edb
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy-collection/tasks/main.yml
@@ -0,0 +1,175 @@
+---
+- name: set some facts for tests
+ set_fact:
+ galaxy_dir: "{{ remote_tmp_dir }}/galaxy"
+
+- name: create scratch dir used for testing
+ file:
+ path: '{{ galaxy_dir }}/scratch'
+ state: directory
+
+- name: run ansible-galaxy collection init tests
+ import_tasks: init.yml
+
+- name: run ansible-galaxy collection build tests
+ import_tasks: build.yml
+
+- name: create test ansible.cfg that contains the Galaxy server list
+ template:
+ src: ansible.cfg.j2
+ dest: '{{ galaxy_dir }}/ansible.cfg'
+
+- name: run ansible-galaxy collection publish tests for {{ test_name }}
+ include_tasks: publish.yml
+ vars:
+ test_name: '{{ item.name }}'
+ test_server: '{{ item.server }}'
+ with_items:
+ - name: galaxy
+ server: '{{ fallaxy_galaxy_server }}'
+ - name: automation_hub
+ server: '{{ fallaxy_ah_server }}'
+
+# We use a module for this so we can speed up the test time.
+- name: setup test collections for install and download test
+ setup_collections:
+ server: '{{ fallaxy_galaxy_server }}'
+ token: '{{ fallaxy_token }}'
+ collections:
+ # Scenario to test out pre-release being ignored unless explicitly set and version pagination.
+ - namespace: namespace1
+ name: name1
+ version: 0.0.1
+ - namespace: namespace1
+ name: name1
+ version: 0.0.2
+ - namespace: namespace1
+ name: name1
+ version: 0.0.3
+ - namespace: namespace1
+ name: name1
+ version: 0.0.4
+ - namespace: namespace1
+ name: name1
+ version: 0.0.5
+ - namespace: namespace1
+ name: name1
+ version: 0.0.6
+ - namespace: namespace1
+ name: name1
+ version: 0.0.7
+ - namespace: namespace1
+ name: name1
+ version: 0.0.8
+ - namespace: namespace1
+ name: name1
+ version: 0.0.9
+ - namespace: namespace1
+ name: name1
+ version: 0.0.10
+ - namespace: namespace1
+ name: name1
+ version: 0.1.0
+ - namespace: namespace1
+ name: name1
+ version: 1.0.0
+ - namespace: namespace1
+ name: name1
+ version: 1.0.9
+ - namespace: namespace1
+ name: name1
+ version: 1.1.0-beta.1
+
+ # Pad out number of namespaces for pagination testing
+ - namespace: namespace2
+ name: name
+ - namespace: namespace3
+ name: name
+ - namespace: namespace4
+ name: name
+ - namespace: namespace5
+ name: name
+ - namespace: namespace6
+ name: name
+ - namespace: namespace7
+ name: name
+ - namespace: namespace8
+ name: name
+ - namespace: namespace9
+ name: name
+
+ # Complex dependency resolution
+ - namespace: parent_dep
+ name: parent_collection
+ dependencies:
+ child_dep.child_collection: '>=0.5.0,<1.0.0'
+ - namespace: child_dep
+ name: child_collection
+ version: 0.4.0
+ - namespace: child_dep
+ name: child_collection
+ version: 0.5.0
+ - namespace: child_dep
+ name: child_collection
+ version: 0.9.9
+ dependencies:
+ child_dep.child_dep2: '!=1.2.3'
+ - namespace: child_dep
+ name: child_collection
+ - namespace: child_dep
+ name: child_dep2
+ version: 1.2.2
+ - namespace: child_dep
+ name: child_dep2
+ version: 1.2.3
+
+ # Dep resolution failure
+ - namespace: fail_namespace
+ name: fail_collection
+ version: 2.1.2
+ dependencies:
+ fail_dep.name: '0.0.5'
+ fail_dep2.name: '<0.0.5'
+ - namespace: fail_dep
+ name: name
+ version: '0.0.5'
+ dependencies:
+ fail_dep2.name: '>0.0.5'
+ - namespace: fail_dep2
+ name: name
+
+ # Symlink tests
+ - namespace: symlink
+ name: symlink
+ use_symlink: yes
+
+- name: run ansible-galaxy collection install tests for {{ test_name }}
+ include_tasks: install.yml
+ vars:
+ test_name: '{{ item.name }}'
+ test_server: '{{ item.server }}'
+ with_items:
+ - name: galaxy
+ server: '{{ fallaxy_galaxy_server }}'
+ - name: automation_hub
+ server: '{{ fallaxy_ah_server }}'
+
+# fake.fake does not exist but we check the output to ensure it checked all 3
+# servers defined in the config. We hardcode to -vvv as that's what level the
+# message is shown
+- name: test install fallback on server list
+ command: ansible-galaxy collection install fake.fake -vvv
+ ignore_errors: yes
+ environment:
+ ANSIBLE_CONFIG: '{{ galaxy_dir }}/ansible.cfg'
+ register: missing_fallback
+
+- name: assert test install fallback on server list
+ assert:
+ that:
+ - missing_fallback.rc == 1
+ - '"Collection ''fake.fake'' is not available from server galaxy" in missing_fallback.stdout'
+ - '"Collection ''fake.fake'' is not available from server automation_hub" in missing_fallback.stdout'
+
+- name: run ansible-galaxy collection download tests
+ include_tasks: download.yml
diff --git a/test/integration/targets/ansible-galaxy-collection/tasks/publish.yml b/test/integration/targets/ansible-galaxy-collection/tasks/publish.yml
new file mode 100644
index 00000000..aa137304
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy-collection/tasks/publish.yml
@@ -0,0 +1,46 @@
+---
+- name: fail to publish with no token - {{ test_name }}
+ command: ansible-galaxy collection publish ansible_test-my_collection-1.0.0.tar.gz -s {{ test_server }} {{ galaxy_verbosity }}
+ args:
+ chdir: '{{ galaxy_dir }}'
+ register: fail_no_token
+ failed_when: '"HTTP Code: 401" not in fail_no_token.stderr'
+
+- name: fail to publish with invalid token - {{ test_name }}
+ command: ansible-galaxy collection publish ansible_test-my_collection-1.0.0.tar.gz -s {{ test_server }} --token fail {{ galaxy_verbosity }}
+ args:
+ chdir: '{{ galaxy_dir }}'
+ register: fail_invalid_token
+ failed_when: '"HTTP Code: 401" not in fail_invalid_token.stderr'
+
+- name: publish collection - {{ test_name }}
+ command: ansible-galaxy collection publish ansible_test-my_collection-1.0.0.tar.gz -s {{ test_server }} --token {{ fallaxy_token }} {{ galaxy_verbosity }}
+ args:
+ chdir: '{{ galaxy_dir }}'
+ register: publish_collection
+
+- name: get result of publish collection - {{ test_name }}
+ uri:
+ url: '{{ test_server }}v2/collections/ansible_test/my_collection/versions/1.0.0/'
+ return_content: yes
+ register: publish_collection_actual
+
+- name: assert publish collection - {{ test_name }}
+ assert:
+ that:
+ - '"Collection has been successfully published and imported to the Galaxy server" in publish_collection.stdout'
+ - publish_collection_actual.json.metadata.name == 'my_collection'
+ - publish_collection_actual.json.metadata.namespace == 'ansible_test'
+ - publish_collection_actual.json.metadata.version == '1.0.0'
+
+- name: fail to publish existing collection version - {{ test_name }}
+ command: ansible-galaxy collection publish ansible_test-my_collection-1.0.0.tar.gz -s {{ test_server }} --token {{ fallaxy_token }} {{ galaxy_verbosity }}
+ args:
+ chdir: '{{ galaxy_dir }}'
+ register: fail_publish_existing
+ failed_when: '"Artifact already exists" not in fail_publish_existing.stderr'
+
+- name: reset published collections - {{ test_name }}
+ uri:
+ url: '{{ test_server }}custom/reset/'
+ method: POST
diff --git a/test/integration/targets/ansible-galaxy-collection/templates/ansible.cfg.j2 b/test/integration/targets/ansible-galaxy-collection/templates/ansible.cfg.j2
new file mode 100644
index 00000000..74d36aac
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy-collection/templates/ansible.cfg.j2
@@ -0,0 +1,10 @@
+[galaxy]
+server_list=galaxy,automation_hub
+
+[galaxy_server.galaxy]
+url={{ fallaxy_galaxy_server }}
+token={{ fallaxy_token }}
+
+[galaxy_server.automation_hub]
+url={{ fallaxy_ah_server }}
+token={{ fallaxy_token }}
diff --git a/test/integration/targets/ansible-galaxy-collection/vars/main.yml b/test/integration/targets/ansible-galaxy-collection/vars/main.yml
new file mode 100644
index 00000000..bc006ca5
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy-collection/vars/main.yml
@@ -0,0 +1 @@
+galaxy_verbosity: "{{ '' if not ansible_verbosity else '-' ~ ('v' * ansible_verbosity) }}"
diff --git a/test/integration/targets/ansible-galaxy/aliases b/test/integration/targets/ansible-galaxy/aliases
new file mode 100644
index 00000000..48ed7d60
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy/aliases
@@ -0,0 +1,4 @@
+destructive
+shippable/posix/group4
+skip/python2.6 # build uses tarfile with features not available until 2.7
+skip/aix
diff --git a/test/integration/targets/ansible-galaxy/cleanup-default.yml b/test/integration/targets/ansible-galaxy/cleanup-default.yml
new file mode 100644
index 00000000..f2265c09
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy/cleanup-default.yml
@@ -0,0 +1,5 @@
+- name: remove unwanted packages
+ package:
+ name: git
+ state: absent
+ when: git_install.changed
diff --git a/test/integration/targets/ansible-galaxy/cleanup-freebsd.yml b/test/integration/targets/ansible-galaxy/cleanup-freebsd.yml
new file mode 100644
index 00000000..fa224d83
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy/cleanup-freebsd.yml
@@ -0,0 +1,6 @@
+- name: remove auto-installed packages from FreeBSD
+ pkgng:
+ name: git
+ state: absent
+ autoremove: yes
+ when: git_install.changed
diff --git a/test/integration/targets/ansible-galaxy/cleanup.yml b/test/integration/targets/ansible-galaxy/cleanup.yml
new file mode 100644
index 00000000..57442631
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy/cleanup.yml
@@ -0,0 +1,19 @@
+- hosts: localhost
+ vars:
+ git_install: '{{ lookup("file", lookup("env", "OUTPUT_DIR") + "/git_install.json") | from_json }}'
+ tasks:
+ - name: cleanup
+ include_tasks: "{{ cleanup_filename }}"
+ with_first_found:
+ - "cleanup-{{ ansible_distribution | lower }}.yml"
+ - "cleanup-default.yml"
+ loop_control:
+ loop_var: cleanup_filename
+
+ - name: Remove default collection directories
+ file:
+ path: "{{ item }}"
+ state: absent
+ loop:
+ - "~/.ansible/collections/ansible_collections"
+ - /usr/share/ansible/collections/ansible_collections
diff --git a/test/integration/targets/ansible-galaxy/runme.sh b/test/integration/targets/ansible-galaxy/runme.sh
new file mode 100755
index 00000000..2cd59825
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy/runme.sh
@@ -0,0 +1,425 @@
+#!/usr/bin/env bash
+
+set -eux -o pipefail
+
+ansible-playbook setup.yml "$@"
+
+trap 'ansible-playbook ${ANSIBLE_PLAYBOOK_DIR}/cleanup.yml' EXIT
+
+# Very simple version test
+ansible-galaxy --version
+
+# Need a relative custom roles path for testing various scenarios of -p
+galaxy_relative_rolespath="my/custom/roles/path"
+
+# Status message function (f_ to designate that it's a function)
+f_ansible_galaxy_status()
+{
+ printf "\n\n\n### Testing ansible-galaxy: %s\n" "${@}"
+}
+
+# Use to initialize a repository. Must call the post function too.
+f_ansible_galaxy_create_role_repo_pre()
+{
+ repo_name=$1
+ repo_dir=$2
+
+ pushd "${repo_dir}"
+ ansible-galaxy init "${repo_name}"
+ pushd "${repo_name}"
+ git init .
+
+ # Prep git, because it doesn't work inside a docker container without it
+ git config user.email "tester@ansible.com"
+ git config user.name "Ansible Tester"
+
+ # f_ansible_galaxy_create_role_repo_post
+}
+
+# Call after f_ansible_galaxy_create_repo_pre.
+f_ansible_galaxy_create_role_repo_post()
+{
+ repo_name=$1
+ repo_tar=$2
+
+ # f_ansible_galaxy_create_role_repo_pre
+
+ git add .
+ git commit -m "local testing ansible galaxy role"
+
+ git archive \
+ --format=tar \
+ --prefix="${repo_name}/" \
+ master > "${repo_tar}"
+ popd # "${repo_name}"
+ popd # "${repo_dir}"
+}
+
+# Prep the local git repos with role and make a tar archive so we can test
+# different things
+galaxy_local_test_role="test-role"
+galaxy_local_test_role_dir=$(mktemp -d)
+galaxy_local_test_role_git_repo="${galaxy_local_test_role_dir}/${galaxy_local_test_role}"
+galaxy_local_test_role_tar="${galaxy_local_test_role_dir}/${galaxy_local_test_role}.tar"
+
+f_ansible_galaxy_create_role_repo_pre "${galaxy_local_test_role}" "${galaxy_local_test_role_dir}"
+f_ansible_galaxy_create_role_repo_post "${galaxy_local_test_role}" "${galaxy_local_test_role_tar}"
+
+galaxy_local_parent_role="parent-role"
+galaxy_local_parent_role_dir=$(mktemp -d)
+galaxy_local_parent_role_git_repo="${galaxy_local_parent_role_dir}/${galaxy_local_parent_role}"
+galaxy_local_parent_role_tar="${galaxy_local_parent_role_dir}/${galaxy_local_parent_role}.tar"
+
+# Create parent-role repository
+f_ansible_galaxy_create_role_repo_pre "${galaxy_local_parent_role}" "${galaxy_local_parent_role_dir}"
+
+ cat <<EOF > meta/requirements.yml
+- src: git+file:///${galaxy_local_test_role_git_repo}
+EOF
+f_ansible_galaxy_create_role_repo_post "${galaxy_local_parent_role}" "${galaxy_local_parent_role_tar}"
+
+# Galaxy install test case
+#
+# Install local git repo
+f_ansible_galaxy_status "install of local git repo"
+galaxy_testdir=$(mktemp -d)
+pushd "${galaxy_testdir}"
+
+ ansible-galaxy install git+file:///"${galaxy_local_test_role_git_repo}" "$@"
+
+ # Test that the role was installed to the expected directory
+ [[ -d "${HOME}/.ansible/roles/${galaxy_local_test_role}" ]]
+popd # ${galaxy_testdir}
+rm -fr "${galaxy_testdir}"
+rm -fr "${HOME}/.ansible/roles/${galaxy_local_test_role}"
+
+# Galaxy install test case
+#
+# Install local git repo and ensure that if a role_path is passed, it is in fact used
+f_ansible_galaxy_status "install of local git repo with -p \$role_path"
+galaxy_testdir=$(mktemp -d)
+pushd "${galaxy_testdir}"
+ mkdir -p "${galaxy_relative_rolespath}"
+
+ ansible-galaxy install git+file:///"${galaxy_local_test_role_git_repo}" -p "${galaxy_relative_rolespath}" "$@"
+
+ # Test that the role was installed to the expected directory
+ [[ -d "${galaxy_relative_rolespath}/${galaxy_local_test_role}" ]]
+popd # ${galaxy_testdir}
+rm -fr "${galaxy_testdir}"
+
+# Galaxy install test case
+#
+# Install local git repo with a meta/requirements.yml
+f_ansible_galaxy_status "install of local git repo with meta/requirements.yml"
+galaxy_testdir=$(mktemp -d)
+pushd "${galaxy_testdir}"
+
+ ansible-galaxy install git+file:///"${galaxy_local_parent_role_git_repo}" "$@"
+
+ # Test that the role was installed to the expected directory
+ [[ -d "${HOME}/.ansible/roles/${galaxy_local_parent_role}" ]]
+
+ # Test that the dependency was also installed
+ [[ -d "${HOME}/.ansible/roles/${galaxy_local_test_role}" ]]
+
+popd # ${galaxy_testdir}
+rm -fr "${galaxy_testdir}"
+rm -fr "${HOME}/.ansible/roles/${galaxy_local_parent_role}"
+rm -fr "${HOME}/.ansible/roles/${galaxy_local_test_role}"
+
+# Galaxy install test case
+#
+# Install local git repo with a meta/requirements.yml + --no-deps argument
+f_ansible_galaxy_status "install of local git repo with meta/requirements.yml + --no-deps argument"
+galaxy_testdir=$(mktemp -d)
+pushd "${galaxy_testdir}"
+
+ ansible-galaxy install git+file:///"${galaxy_local_parent_role_git_repo}" --no-deps "$@"
+
+ # Test that the role was installed to the expected directory
+ [[ -d "${HOME}/.ansible/roles/${galaxy_local_parent_role}" ]]
+
+ # Test that the dependency was not installed
+ [[ ! -d "${HOME}/.ansible/roles/${galaxy_local_test_role}" ]]
+
+popd # ${galaxy_testdir}
+rm -fr "${galaxy_testdir}"
+rm -fr "${HOME}/.ansible/roles/${galaxy_local_test_role}"
+
+# Galaxy install test case
+#
+# Ensure that if both a role_file and role_path is provided, they are both
+# honored
+#
+# Protect against regression (GitHub Issue #35217)
+# https://github.com/ansible/ansible/issues/35217
+
+f_ansible_galaxy_status \
+ "install of local git repo and local tarball with -p \$role_path and -r \$role_file" \
+ "Protect against regression (Issue #35217)"
+galaxy_testdir=$(mktemp -d)
+pushd "${galaxy_testdir}"
+
+ git clone "${galaxy_local_test_role_git_repo}" "${galaxy_local_test_role}"
+ ansible-galaxy init roles-path-bug "$@"
+ pushd roles-path-bug
+ cat <<EOF > ansible.cfg
+[defaults]
+roles_path = ../:../../:../roles:roles/
+EOF
+ cat <<EOF > requirements.yml
+---
+- src: ${galaxy_local_test_role_tar}
+ name: ${galaxy_local_test_role}
+EOF
+
+ ansible-galaxy install -r requirements.yml -p roles/ "$@"
+ popd # roles-path-bug
+
+ # Test that the role was installed to the expected directory
+ [[ -d "${galaxy_testdir}/roles-path-bug/roles/${galaxy_local_test_role}" ]]
+
+popd # ${galaxy_testdir}
+rm -fr "${galaxy_testdir}"
+
+
+# Galaxy role list tests
+#
+# Basic tests to ensure listing roles works
+
+f_ansible_galaxy_status "role list"
+galaxy_testdir=$(mktemp -d)
+pushd "${galaxy_testdir}"
+ ansible-galaxy install git+file:///"${galaxy_local_test_role_git_repo}" "$@"
+
+ ansible-galaxy role list | tee out.txt
+ ansible-galaxy role list test-role | tee -a out.txt
+
+ [[ $(grep -c '^- test-role' out.txt ) -eq 2 ]]
+popd # ${galaxy_testdir}
+
+# Galaxy role test case
+#
+# Test listing a specific role that is not in the first path in ANSIBLE_ROLES_PATH.
+# https://github.com/ansible/ansible/issues/60167#issuecomment-585460706
+
+f_ansible_galaxy_status \
+ "list specific role not in the first path in ANSIBLE_ROLES_PATH"
+
+role_testdir=$(mktemp -d)
+pushd "${role_testdir}"
+
+ mkdir testroles
+ ansible-galaxy role init --init-path ./local-roles quark
+ ANSIBLE_ROLES_PATH=./local-roles:${HOME}/.ansible/roles ansible-galaxy role list quark | tee out.txt
+
+ [[ $(grep -c 'not found' out.txt) -eq 0 ]]
+
+ ANSIBLE_ROLES_PATH=${HOME}/.ansible/roles:./local-roles ansible-galaxy role list quark | tee out.txt
+
+ [[ $(grep -c 'not found' out.txt) -eq 0 ]]
+
+popd # ${role_testdir}
+rm -fr "${role_testdir}"
+
+
+# Galaxy role info tests
+
+# Get info about role that is not installed
+
+f_ansible_galaxy_status "role info"
+galaxy_testdir=$(mktemp -d)
+pushd "${galaxy_testdir}"
+ ansible-galaxy role info samdoran.fish | tee out.txt
+
+ [[ $(grep -c 'not found' out.txt ) -eq 0 ]]
+ [[ $(grep -c 'Role:.*samdoran\.fish' out.txt ) -eq 1 ]]
+
+popd # ${galaxy_testdir}
+
+f_ansible_galaxy_status \
+ "role info non-existant role"
+
+role_testdir=$(mktemp -d)
+pushd "${role_testdir}"
+
+ ansible-galaxy role info notaroll | tee out.txt
+
+ grep -- '- the role notaroll was not found' out.txt
+
+f_ansible_galaxy_status \
+ "role info description offline"
+
+ mkdir testroles
+ ansible-galaxy role init testdesc --init-path ./testroles
+
+ # Only galaxy_info['description'] exists in file
+ sed -i -e 's#[[:space:]]\{1,\}description:.*$# description: Description in galaxy_info#' ./testroles/testdesc/meta/main.yml
+ ansible-galaxy role info -p ./testroles --offline testdesc | tee out.txt
+ grep 'description: Description in galaxy_info' out.txt
+
+ # Both top level 'description' and galaxy_info['description'] exist in file
+ # Use shell-fu instead of sed to prepend a line to a file because BSD
+ # and macOS sed don't work the same as GNU sed.
+ echo 'description: Top level' | \
+ cat - ./testroles/testdesc/meta/main.yml > tmp.yml && \
+ mv tmp.yml ./testroles/testdesc/meta/main.yml
+ ansible-galaxy role info -p ./testroles --offline testdesc | tee out.txt
+ grep 'description: Top level' out.txt
+
+ # Only top level 'description' exists in file
+ sed -i.bak '/^[[:space:]]\{1,\}description: Description in galaxy_info/d' ./testroles/testdesc/meta/main.yml
+ ansible-galaxy role info -p ./testroles --offline testdesc | tee out.txt
+ grep 'description: Top level' out.txt
+
+popd # ${role_testdir}
+rm -fr "${role_testdir}"
+
+# Properly list roles when the role name is a subset of the path, or the role
+# name is the same name as the parent directory of the role. Issue #67365
+#
+# ./parrot/parrot
+# ./parrot/arr
+# ./testing-roles/test
+
+f_ansible_galaxy_status \
+ "list roles where the role name is the same or a subset of the role path (#67365)"
+
+role_testdir=$(mktemp -d)
+pushd "${role_testdir}"
+
+ mkdir parrot
+ ansible-galaxy role init --init-path ./parrot parrot
+ ansible-galaxy role init --init-path ./parrot parrot-ship
+ ansible-galaxy role init --init-path ./parrot arr
+
+ ansible-galaxy role list -p ./parrot | tee out.txt
+
+ [[ $(grep -Ec '\- (parrot|arr)' out.txt) -eq 3 ]]
+ ansible-galaxy role list test-role | tee -a out.txt
+
+popd # ${role_testdir}
+rm -rf "${role_testdir}"
+
+f_ansible_galaxy_status \
+ "Test role with non-ascii characters"
+
+role_testdir=$(mktemp -d)
+pushd "${role_testdir}"
+
+ mkdir nonascii
+ ansible-galaxy role init --init-path ./nonascii nonascii
+ touch nonascii/ÅÑŚÌβÅÈ.txt
+ tar czvf nonascii.tar.gz nonascii
+ ansible-galaxy role install -p ./roles nonascii.tar.gz
+
+popd # ${role_testdir}
+rm -rf "${role_testdir}"
+
+#################################
+# ansible-galaxy collection tests
+#################################
+# TODO: Move these to ansible-galaxy-collection
+
+galaxy_testdir=$(mktemp -d)
+pushd "${galaxy_testdir}"
+
+## ansible-galaxy collection list tests
+
+# Create more collections and put them in various places
+f_ansible_galaxy_status \
+ "setting up for collection list tests"
+
+rm -rf ansible_test/* install/*
+
+NAMES=(zoo museum airport)
+for n in "${NAMES[@]}"; do
+ ansible-galaxy collection init "ansible_test.$n"
+ ansible-galaxy collection build "ansible_test/$n"
+done
+
+ansible-galaxy collection install ansible_test-zoo-1.0.0.tar.gz
+ansible-galaxy collection install ansible_test-museum-1.0.0.tar.gz -p ./install
+ansible-galaxy collection install ansible_test-airport-1.0.0.tar.gz -p ./local
+
+# Change the collection version and install to another location
+sed -i -e 's#^version:.*#version: 2.5.0#' ansible_test/zoo/galaxy.yml
+ansible-galaxy collection build ansible_test/zoo
+ansible-galaxy collection install ansible_test-zoo-2.5.0.tar.gz -p ./local
+
+# Test listing a collection that contains a galaxy.yml
+ansible-galaxy collection init "ansible_test.development"
+mv ./ansible_test/development "${galaxy_testdir}/local/ansible_collections/ansible_test/"
+
+export ANSIBLE_COLLECTIONS_PATH=~/.ansible/collections:${galaxy_testdir}/local
+
+f_ansible_galaxy_status \
+ "collection list all collections"
+
+ ansible-galaxy collection list -p ./install | tee out.txt
+
+ [[ $(grep -c ansible_test out.txt) -eq 5 ]]
+
+f_ansible_galaxy_status \
+ "collection list specific collection"
+
+ ansible-galaxy collection list -p ./install ansible_test.airport | tee out.txt
+
+ [[ $(grep -c 'ansible_test\.airport' out.txt) -eq 1 ]]
+
+f_ansible_galaxy_status \
+ "collection list specific collection which contains galaxy.yml"
+
+ ansible-galaxy collection list -p ./install ansible_test.development 2>&1 | tee out.txt
+
+ [[ $(grep -c 'ansible_test\.development' out.txt) -eq 1 ]]
+ [[ $(grep -c 'WARNING' out.txt) -eq 0 ]]
+
+f_ansible_galaxy_status \
+ "collection list specific collection found in multiple places"
+
+ ansible-galaxy collection list -p ./install ansible_test.zoo | tee out.txt
+
+ [[ $(grep -c 'ansible_test\.zoo' out.txt) -eq 2 ]]
+
+f_ansible_galaxy_status \
+ "collection list all with duplicate paths"
+
+ ansible-galaxy collection list -p ~/.ansible/collections | tee out.txt
+
+ [[ $(grep -c '# /root/.ansible/collections/ansible_collections' out.txt) -eq 1 ]]
+
+f_ansible_galaxy_status \
+ "collection list invalid collection name"
+
+ ansible-galaxy collection list -p ./install dirty.wraughten.name "$@" 2>&1 | tee out.txt || echo "expected failure"
+
+ grep 'ERROR! Invalid collection name' out.txt
+
+f_ansible_galaxy_status \
+ "collection list path not found"
+
+ ansible-galaxy collection list -p ./nope "$@" 2>&1 | tee out.txt || echo "expected failure"
+
+ grep '\[WARNING\]: - the configured path' out.txt
+
+f_ansible_galaxy_status \
+ "collection list missing ansible_collections dir inside path"
+
+ mkdir emptydir
+
+ ansible-galaxy collection list -p ./emptydir "$@"
+
+ rmdir emptydir
+
+unset ANSIBLE_COLLECTIONS_PATH
+
+## end ansible-galaxy collection list
+
+
+popd # ${galaxy_testdir}
+
+rm -fr "${galaxy_testdir}"
+
+rm -fr "${galaxy_local_test_role_dir}"
diff --git a/test/integration/targets/ansible-galaxy/setup.yml b/test/integration/targets/ansible-galaxy/setup.yml
new file mode 100644
index 00000000..a82d02ae
--- /dev/null
+++ b/test/integration/targets/ansible-galaxy/setup.yml
@@ -0,0 +1,11 @@
+- hosts: localhost
+ tasks:
+ - name: install git
+ package:
+ name: git
+ when: ansible_distribution != "MacOSX"
+ register: git_install
+ - name: save install result
+ copy:
+ content: '{{ git_install }}'
+ dest: '{{ lookup("env", "OUTPUT_DIR") }}/git_install.json'
diff --git a/test/integration/targets/ansible-runner/aliases b/test/integration/targets/ansible-runner/aliases
new file mode 100644
index 00000000..ec9eb3af
--- /dev/null
+++ b/test/integration/targets/ansible-runner/aliases
@@ -0,0 +1,6 @@
+shippable/posix/group3
+skip/python3
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
diff --git a/test/integration/targets/ansible-runner/files/adhoc_example1.py b/test/integration/targets/ansible-runner/files/adhoc_example1.py
new file mode 100644
index 00000000..3e0d8414
--- /dev/null
+++ b/test/integration/targets/ansible-runner/files/adhoc_example1.py
@@ -0,0 +1,26 @@
+import json
+import os
+import sys
+import ansible_runner
+
+# the first positional arg should be where the artifacts live
+output_dir = sys.argv[1]
+
+# this calls a single module directly, aka "adhoc" mode
+r = ansible_runner.run(
+ private_data_dir=output_dir,
+ host_pattern='localhost',
+ module='shell',
+ module_args='whoami'
+)
+
+data = {
+ 'rc': r.rc,
+ 'status': r.status,
+ 'events': [x['event'] for x in r.events],
+ 'stats': r.stats
+}
+
+# insert this header for the flask controller
+print('#STARTJSON')
+json.dump(data, sys.stdout)
diff --git a/test/integration/targets/ansible-runner/files/constraints.txt b/test/integration/targets/ansible-runner/files/constraints.txt
new file mode 100644
index 00000000..c3e39402
--- /dev/null
+++ b/test/integration/targets/ansible-runner/files/constraints.txt
@@ -0,0 +1,5 @@
+psutil < 5.7.0 # Greater than this version breaks on older pip
+pexpect >= 4.5, <= 4.8.0
+python-daemon <= 2.2.4
+pyyaml < 5.1 ; python_version < '2.7' # pyyaml 5.1 and later require python 2.7 or later
+six <= 1.14.0
diff --git a/test/integration/targets/ansible-runner/files/playbook_example1.py b/test/integration/targets/ansible-runner/files/playbook_example1.py
new file mode 100644
index 00000000..83cb19ff
--- /dev/null
+++ b/test/integration/targets/ansible-runner/files/playbook_example1.py
@@ -0,0 +1,38 @@
+import json
+import os
+import sys
+import ansible_runner
+
+
+PLAYBOOK = '''
+- hosts: localhost
+ gather_facts: False
+ tasks:
+ - set_fact:
+ foo: bar
+'''
+
+# the first positional arg should be where the artifacts live
+output_dir = sys.argv[1]
+
+invdir = os.path.join(output_dir, 'inventory')
+if not os.path.isdir(invdir):
+ os.makedirs(invdir)
+with open(os.path.join(invdir, 'hosts'), 'w') as f:
+ f.write('localhost\n')
+pbfile = os.path.join(output_dir, 'test.yml')
+with open(pbfile, 'w') as f:
+ f.write(PLAYBOOK)
+
+r = ansible_runner.run(private_data_dir=output_dir, playbook='test.yml')
+
+data = {
+ 'rc': r.rc,
+ 'status': r.status,
+ 'events': [x['event'] for x in r.events],
+ 'stats': r.stats
+}
+
+# insert this header for the flask controller
+print('#STARTJSON')
+json.dump(data, sys.stdout)
diff --git a/test/integration/targets/ansible-runner/filter_plugins/parse.py b/test/integration/targets/ansible-runner/filter_plugins/parse.py
new file mode 100644
index 00000000..7842f6c6
--- /dev/null
+++ b/test/integration/targets/ansible-runner/filter_plugins/parse.py
@@ -0,0 +1,17 @@
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import re
+import json
+
+
+def parse_json(value):
+ return json.dumps(json.loads(re.sub('^.*\n#STARTJSON\n', '', value, flags=re.DOTALL)), indent=4, sort_keys=True)
+
+
+class FilterModule(object):
+ def filters(self):
+ return {
+ 'parse_json': parse_json,
+ }
diff --git a/test/integration/targets/ansible-runner/inventory b/test/integration/targets/ansible-runner/inventory
new file mode 100644
index 00000000..009f6c33
--- /dev/null
+++ b/test/integration/targets/ansible-runner/inventory
@@ -0,0 +1 @@
+# no hosts required, test only requires implicit localhost
diff --git a/test/integration/targets/ansible-runner/runme.sh b/test/integration/targets/ansible-runner/runme.sh
new file mode 100755
index 00000000..384de80f
--- /dev/null
+++ b/test/integration/targets/ansible-runner/runme.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ANSIBLE_ROLES_PATH=../ ansible-playbook test.yml -i inventory "$@"
diff --git a/test/integration/targets/ansible-runner/tasks/adhoc_example1.yml b/test/integration/targets/ansible-runner/tasks/adhoc_example1.yml
new file mode 100644
index 00000000..c6fdf03f
--- /dev/null
+++ b/test/integration/targets/ansible-runner/tasks/adhoc_example1.yml
@@ -0,0 +1,16 @@
+- name: execute the script
+ command: "'{{ ansible_python_interpreter }}' '{{ role_path }}/files/adhoc_example1.py' '{{ lookup('env', 'OUTPUT_DIR') }}'"
+ environment:
+ AWX_LIB_DIRECTORY: "{{ callback_path }}"
+ register: script
+
+- name: parse script output
+ # work around for ansible-runner showing ansible warnings on stdout
+ set_fact:
+ adexec1_json: "{{ script.stdout | parse_json }}"
+
+- assert:
+ that:
+ - "adexec1_json.rc == 0"
+ - "adexec1_json.events|length == 4"
+ - "'localhost' in adexec1_json.stats.ok"
diff --git a/test/integration/targets/ansible-runner/tasks/main.yml b/test/integration/targets/ansible-runner/tasks/main.yml
new file mode 100644
index 00000000..5608786b
--- /dev/null
+++ b/test/integration/targets/ansible-runner/tasks/main.yml
@@ -0,0 +1,5 @@
+- block:
+ - include_tasks: setup.yml
+ - include_tasks: adhoc_example1.yml
+ - include_tasks: playbook_example1.yml
+ when: ansible_distribution in ('RedHat', 'CentOS') and ansible_distribution_major_version == '7'
diff --git a/test/integration/targets/ansible-runner/tasks/playbook_example1.yml b/test/integration/targets/ansible-runner/tasks/playbook_example1.yml
new file mode 100644
index 00000000..ec1f7cda
--- /dev/null
+++ b/test/integration/targets/ansible-runner/tasks/playbook_example1.yml
@@ -0,0 +1,16 @@
+- name: execute the script
+ command: "'{{ ansible_python_interpreter }}' '{{ role_path }}/files/playbook_example1.py' '{{ lookup('env', 'OUTPUT_DIR') }}'"
+ environment:
+ AWX_LIB_DIRECTORY: "{{ callback_path }}"
+ register: script
+
+- name: parse script output
+ # work around for ansible-runner showing ansible warnings on stdout
+ set_fact:
+ pbexec_json: "{{ script.stdout | parse_json }}"
+
+- assert:
+ that:
+ - "pbexec_json.rc == 0"
+ - "pbexec_json.events|length == 7"
+ - "'localhost' in pbexec_json.stats.ok"
diff --git a/test/integration/targets/ansible-runner/tasks/setup.yml b/test/integration/targets/ansible-runner/tasks/setup.yml
new file mode 100644
index 00000000..ea24ced5
--- /dev/null
+++ b/test/integration/targets/ansible-runner/tasks/setup.yml
@@ -0,0 +1,19 @@
+- name: Install docutils
+ pip:
+ name: docutils
+
+- name: Install ansible-runner
+ pip:
+ name: ansible-runner
+ version: 1.2.0
+ extra_args:
+ -c {{ role_path }}/files/constraints.txt
+
+- name: Find location of ansible-runner installation
+ command: "'{{ ansible_python_interpreter }}' -c 'import os, ansible_runner; print(os.path.dirname(ansible_runner.__file__))'"
+ register: ansible_runner_path
+
+# work around for https://github.com/ansible/ansible-runner/issues/132
+- name: Set callback path to work around ansible-runner bug
+ set_fact:
+ callback_path: ":{{ ansible_runner_path.stdout }}/callbacks"
diff --git a/test/integration/targets/ansible-runner/test.yml b/test/integration/targets/ansible-runner/test.yml
new file mode 100644
index 00000000..113f8e7c
--- /dev/null
+++ b/test/integration/targets/ansible-runner/test.yml
@@ -0,0 +1,3 @@
+- hosts: localhost
+ roles:
+ - ansible-runner
diff --git a/test/integration/targets/ansible-test-docker/aliases b/test/integration/targets/ansible-test-docker/aliases
new file mode 100644
index 00000000..d1284cf7
--- /dev/null
+++ b/test/integration/targets/ansible-test-docker/aliases
@@ -0,0 +1 @@
+shippable/generic/group1 # Runs in the default test container so access to tools like pwsh
diff --git a/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/galaxy.yml b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/galaxy.yml
new file mode 100644
index 00000000..08a32e80
--- /dev/null
+++ b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/galaxy.yml
@@ -0,0 +1,6 @@
+namespace: ns
+name: col
+version: 1.0.0
+readme: README.rst
+authors:
+ - Ansible
diff --git a/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/doc_fragments/ps_util.py b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/doc_fragments/ps_util.py
new file mode 100644
index 00000000..e69844b3
--- /dev/null
+++ b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/doc_fragments/ps_util.py
@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment:
+
+ DOCUMENTATION = r'''
+options:
+ option1:
+ description:
+ - Test description
+ required: yes
+ aliases:
+ - alias1
+ type: str
+'''
diff --git a/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/module_utils/PSUtil.psm1 b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/module_utils/PSUtil.psm1
new file mode 100644
index 00000000..d37e681a
--- /dev/null
+++ b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/module_utils/PSUtil.psm1
@@ -0,0 +1,16 @@
+# Copyright (c) 2020 Ansible Project
+# # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+Function Get-PSUtilSpec {
+ <#
+ .SYNOPSIS
+ Shared util spec test
+ #>
+ @{
+ options = @{
+ option1 = @{ type = 'str'; required = $true; aliases = 'alias1' }
+ }
+ }
+}
+
+Export-ModuleMember -Function Get-PSUtilSpec
diff --git a/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/module_utils/my_util.py b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/module_utils/my_util.py
new file mode 100644
index 00000000..b9c531cf
--- /dev/null
+++ b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/module_utils/my_util.py
@@ -0,0 +1,6 @@
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+def hello(name):
+ return 'Hello %s' % name
diff --git a/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/modules/hello.py b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/modules/hello.py
new file mode 100644
index 00000000..c8a0cf75
--- /dev/null
+++ b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/modules/hello.py
@@ -0,0 +1,46 @@
+#!/usr/bin/python
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: hello
+short_description: Hello test module
+description: Hello test module.
+options:
+ name:
+ description: Name to say hello to.
+ type: str
+author:
+ - Ansible Core Team
+'''
+
+EXAMPLES = '''
+- minimal:
+'''
+
+RETURN = ''''''
+
+from ansible.module_utils.basic import AnsibleModule
+from ..module_utils.my_util import hello
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str'),
+ ),
+ )
+
+ module.exit_json(**say_hello(module.params['name']))
+
+
+def say_hello(name):
+ return dict(
+ message=hello(name),
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/modules/win_util_args.ps1 b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/modules/win_util_args.ps1
new file mode 100644
index 00000000..9dab99da
--- /dev/null
+++ b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/modules/win_util_args.ps1
@@ -0,0 +1,16 @@
+#!powershell
+
+# Copyright (c) 2020 Ansible Project
+# # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#AnsibleRequires -CSharpUtil Ansible.Basic
+#AnsibleRequires -PowerShell ..module_utils.PSUtil
+
+$spec = @{
+ options = @{
+ my_opt = @{ type = "str"; required = $true }
+ }
+}
+
+$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec, @(Get-PSUtilSpec))
+$module.ExitJson()
diff --git a/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/modules/win_util_args.py b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/modules/win_util_args.py
new file mode 100644
index 00000000..ed49f4ea
--- /dev/null
+++ b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/modules/win_util_args.py
@@ -0,0 +1,39 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = r'''
+---
+module: win_util_args
+short_description: Short description
+description:
+- Some test description for the module
+options:
+ my_opt:
+ description:
+ - Test description
+ required: yes
+ type: str
+extends_documentation_fragment:
+- ns.col.ps_util
+
+author:
+- Ansible Test (@ansible)
+'''
+
+EXAMPLES = r'''
+- win_util_args:
+ option1: test
+ my_opt: test
+'''
+
+RETURN = r'''
+#
+'''
diff --git a/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/integration/targets/minimal/tasks/main.yml b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/integration/targets/minimal/tasks/main.yml
new file mode 100644
index 00000000..c45c199c
--- /dev/null
+++ b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/integration/targets/minimal/tasks/main.yml
@@ -0,0 +1,7 @@
+- hello:
+ name: Ansibull
+ register: hello
+
+- assert:
+ that:
+ - hello.message == 'Hello Ansibull'
diff --git a/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/unit/plugins/module_utils/test_my_util.py b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/unit/plugins/module_utils/test_my_util.py
new file mode 100644
index 00000000..7df87103
--- /dev/null
+++ b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/unit/plugins/module_utils/test_my_util.py
@@ -0,0 +1,8 @@
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from .....plugins.module_utils.my_util import hello
+
+
+def test_hello():
+ assert hello('Ansibull') == 'Hello Ansibull'
diff --git a/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/unit/plugins/modules/test_hello.py b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/unit/plugins/modules/test_hello.py
new file mode 100644
index 00000000..95ee0574
--- /dev/null
+++ b/test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/unit/plugins/modules/test_hello.py
@@ -0,0 +1,8 @@
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from .....plugins.modules.hello import say_hello
+
+
+def test_say_hello():
+ assert say_hello('Ansibull') == dict(message='Hello Ansibull')
diff --git a/test/integration/targets/ansible-test-docker/collection-tests/docker.sh b/test/integration/targets/ansible-test-docker/collection-tests/docker.sh
new file mode 100755
index 00000000..e0e34290
--- /dev/null
+++ b/test/integration/targets/ansible-test-docker/collection-tests/docker.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+
+set -eux -o pipefail
+
+cp -a "${TEST_DIR}/ansible_collections" "${WORK_DIR}"
+cd "${WORK_DIR}/ansible_collections/ns/col"
+
+# common args for all tests
+# because we are running in shippable/generic/ we are already in the default docker container
+common=(--python "${ANSIBLE_TEST_PYTHON_VERSION}" --color --truncate 0 "${@}")
+
+# prime the venv to work around issue with PyYAML detection in ansible-test
+ansible-test sanity "${common[@]}" --test ignores
+
+# tests
+ansible-test sanity "${common[@]}"
+ansible-test units "${common[@]}"
+ansible-test integration "${common[@]}"
diff --git a/test/integration/targets/ansible-test-docker/runme.sh b/test/integration/targets/ansible-test-docker/runme.sh
new file mode 100755
index 00000000..7c956b4f
--- /dev/null
+++ b/test/integration/targets/ansible-test-docker/runme.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+
+set -eu -o pipefail
+
+# tests must be executed outside of the ansible source tree
+# otherwise ansible-test will test the ansible source instead of the test collection
+# the temporary directory provided by ansible-test resides within the ansible source tree
+tmp_dir=$(mktemp -d)
+
+trap 'rm -rf "${tmp_dir}"' EXIT
+
+export TEST_DIR
+export WORK_DIR
+
+TEST_DIR="$PWD"
+
+for test in collection-tests/*.sh; do
+ WORK_DIR="${tmp_dir}/$(basename "${test}" ".sh")"
+ mkdir "${WORK_DIR}"
+ echo "**********************************************************************"
+ echo "TEST: ${test}: STARTING"
+ "${test}" "${@}" || (echo "TEST: ${test}: FAILED" && exit 1)
+ echo "TEST: ${test}: PASSED"
+done
diff --git a/test/integration/targets/ansible-test/aliases b/test/integration/targets/ansible-test/aliases
new file mode 100644
index 00000000..f8e28c7e
--- /dev/null
+++ b/test/integration/targets/ansible-test/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group1
+skip/aix
diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col/README.rst b/test/integration/targets/ansible-test/ansible_collections/ns/col/README.rst
new file mode 100644
index 00000000..d8138d3b
--- /dev/null
+++ b/test/integration/targets/ansible-test/ansible_collections/ns/col/README.rst
@@ -0,0 +1,3 @@
+README
+------
+This is a simple collection used to verify that ``ansible-test`` works on a collection.
diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col/galaxy.yml b/test/integration/targets/ansible-test/ansible_collections/ns/col/galaxy.yml
new file mode 100644
index 00000000..08a32e80
--- /dev/null
+++ b/test/integration/targets/ansible-test/ansible_collections/ns/col/galaxy.yml
@@ -0,0 +1,6 @@
+namespace: ns
+name: col
+version: 1.0.0
+readme: README.rst
+authors:
+ - Ansible
diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col/meta/runtime.yml b/test/integration/targets/ansible-test/ansible_collections/ns/col/meta/runtime.yml
new file mode 100644
index 00000000..1ac15484
--- /dev/null
+++ b/test/integration/targets/ansible-test/ansible_collections/ns/col/meta/runtime.yml
@@ -0,0 +1,4 @@
+plugin_routing:
+ modules:
+ hi:
+ redirect: hello
diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/module_utils/__init__.py b/test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/module_utils/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/module_utils/__init__.py
diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/module_utils/my_util.py b/test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/module_utils/my_util.py
new file mode 100644
index 00000000..b9c531cf
--- /dev/null
+++ b/test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/module_utils/my_util.py
@@ -0,0 +1,6 @@
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+def hello(name):
+ return 'Hello %s' % name
diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/modules/hello.py b/test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/modules/hello.py
new file mode 100644
index 00000000..033b6c90
--- /dev/null
+++ b/test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/modules/hello.py
@@ -0,0 +1,46 @@
+#!/usr/bin/python
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: hello
+short_description: Hello test module
+description: Hello test module.
+options:
+ name:
+ description: Name to say hello to.
+ type: str
+author:
+ - Ansible Core Team
+'''
+
+EXAMPLES = '''
+- hello:
+'''
+
+RETURN = ''''''
+
+from ansible.module_utils.basic import AnsibleModule
+from ..module_utils.my_util import hello
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str'),
+ ),
+ )
+
+ module.exit_json(**say_hello(module.params['name']))
+
+
+def say_hello(name):
+ return dict(
+ message=hello(name),
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col/tests/integration/targets/hello/tasks/main.yml b/test/integration/targets/ansible-test/ansible_collections/ns/col/tests/integration/targets/hello/tasks/main.yml
new file mode 100644
index 00000000..c45c199c
--- /dev/null
+++ b/test/integration/targets/ansible-test/ansible_collections/ns/col/tests/integration/targets/hello/tasks/main.yml
@@ -0,0 +1,7 @@
+- hello:
+ name: Ansibull
+ register: hello
+
+- assert:
+ that:
+ - hello.message == 'Hello Ansibull'
diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col/tests/sanity/ignore.txt b/test/integration/targets/ansible-test/ansible_collections/ns/col/tests/sanity/ignore.txt
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/ansible-test/ansible_collections/ns/col/tests/sanity/ignore.txt
diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col/tests/unit/plugins/module_utils/test_my_util.py b/test/integration/targets/ansible-test/ansible_collections/ns/col/tests/unit/plugins/module_utils/test_my_util.py
new file mode 100644
index 00000000..7df87103
--- /dev/null
+++ b/test/integration/targets/ansible-test/ansible_collections/ns/col/tests/unit/plugins/module_utils/test_my_util.py
@@ -0,0 +1,8 @@
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from .....plugins.module_utils.my_util import hello
+
+
+def test_hello():
+ assert hello('Ansibull') == 'Hello Ansibull'
diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col/tests/unit/plugins/modules/test_hello.py b/test/integration/targets/ansible-test/ansible_collections/ns/col/tests/unit/plugins/modules/test_hello.py
new file mode 100644
index 00000000..95ee0574
--- /dev/null
+++ b/test/integration/targets/ansible-test/ansible_collections/ns/col/tests/unit/plugins/modules/test_hello.py
@@ -0,0 +1,8 @@
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from .....plugins.modules.hello import say_hello
+
+
+def test_say_hello():
+ assert say_hello('Ansibull') == dict(message='Hello Ansibull')
diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/integration/constraints.txt b/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/integration/constraints.txt
new file mode 100644
index 00000000..01bb5cff
--- /dev/null
+++ b/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/integration/constraints.txt
@@ -0,0 +1 @@
+botocore == 1.13.49
diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/integration/requirements.txt b/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/integration/requirements.txt
new file mode 100644
index 00000000..c5b9e129
--- /dev/null
+++ b/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/integration/requirements.txt
@@ -0,0 +1 @@
+botocore
diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/integration/targets/constraints/tasks/main.yml b/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/integration/targets/constraints/tasks/main.yml
new file mode 100644
index 00000000..c2c1f1a4
--- /dev/null
+++ b/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/integration/targets/constraints/tasks/main.yml
@@ -0,0 +1,7 @@
+- name: get botocore version
+ command: python -c "import botocore; print(botocore.__version__)"
+ register: botocore_version
+- name: check botocore version
+ assert:
+ that:
+ - 'botocore_version.stdout == "1.13.49"'
diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/unit/constraints.txt b/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/unit/constraints.txt
new file mode 100644
index 00000000..d0986894
--- /dev/null
+++ b/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/unit/constraints.txt
@@ -0,0 +1 @@
+botocore == 1.13.50
diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/unit/plugins/modules/test_constraints.py b/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/unit/plugins/modules/test_constraints.py
new file mode 100644
index 00000000..857e8e55
--- /dev/null
+++ b/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/unit/plugins/modules/test_constraints.py
@@ -0,0 +1,8 @@
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import botocore
+
+
+def test_constraints():
+ assert botocore.__version__ == '1.13.50'
diff --git a/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/unit/requirements.txt b/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/unit/requirements.txt
new file mode 100644
index 00000000..c5b9e129
--- /dev/null
+++ b/test/integration/targets/ansible-test/ansible_collections/ns/col_constraints/tests/unit/requirements.txt
@@ -0,0 +1 @@
+botocore
diff --git a/test/integration/targets/ansible-test/collection-tests/constraints.sh b/test/integration/targets/ansible-test/collection-tests/constraints.sh
new file mode 100755
index 00000000..d3bbc6ab
--- /dev/null
+++ b/test/integration/targets/ansible-test/collection-tests/constraints.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+
+set -eux -o pipefail
+
+cp -a "${TEST_DIR}/ansible_collections" "${WORK_DIR}"
+cd "${WORK_DIR}/ansible_collections/ns/col_constraints"
+
+# common args for all tests
+# each test will be run in a separate venv to verify that requirements have been properly specified
+common=(--venv --python "${ANSIBLE_TEST_PYTHON_VERSION}" --color --truncate 0 "${@}")
+
+# unit tests
+
+rm -rf "tests/output"
+ansible-test units "${common[@]}"
+
+# integration tests
+
+rm -rf "tests/output"
+ansible-test integration "${common[@]}"
diff --git a/test/integration/targets/ansible-test/collection-tests/coverage.sh b/test/integration/targets/ansible-test/collection-tests/coverage.sh
new file mode 100755
index 00000000..3d01dd4b
--- /dev/null
+++ b/test/integration/targets/ansible-test/collection-tests/coverage.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+
+set -eux -o pipefail
+
+cp -a "${TEST_DIR}/ansible_collections" "${WORK_DIR}"
+cd "${WORK_DIR}/ansible_collections/ns/col"
+
+# rename the sanity ignore file to match the current ansible version and update import ignores with the python version
+ansible_version="$(python -c 'import ansible.release; print(".".join(ansible.release.__version__.split(".")[:2]))')"
+sed "s/ import$/ import-${ANSIBLE_TEST_PYTHON_VERSION}/;" < "tests/sanity/ignore.txt" > "tests/sanity/ignore-${ansible_version}.txt"
+
+# common args for all tests
+common=(--venv --color --truncate 0 "${@}")
+test_common=("${common[@]}" --python "${ANSIBLE_TEST_PYTHON_VERSION}")
+
+# run a lightweight test that generates code coverge output
+ansible-test sanity --test import "${test_common[@]}" --coverage
+
+# report on code coverage in all supported formats
+ansible-test coverage report "${common[@]}"
+ansible-test coverage html "${common[@]}"
+ansible-test coverage xml "${common[@]}"
diff --git a/test/integration/targets/ansible-test/collection-tests/git-at-collection-base.sh b/test/integration/targets/ansible-test/collection-tests/git-at-collection-base.sh
new file mode 100755
index 00000000..31ebfbbf
--- /dev/null
+++ b/test/integration/targets/ansible-test/collection-tests/git-at-collection-base.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+
+set -eux -o pipefail
+
+export GIT_TOP_LEVEL SUBMODULE_DST
+
+GIT_TOP_LEVEL="${WORK_DIR}/super/ansible_collections/ns/col"
+SUBMODULE_DST="sub"
+
+source collection-tests/git-common.bash
diff --git a/test/integration/targets/ansible-test/collection-tests/git-at-collection-root.sh b/test/integration/targets/ansible-test/collection-tests/git-at-collection-root.sh
new file mode 100755
index 00000000..8af4387a
--- /dev/null
+++ b/test/integration/targets/ansible-test/collection-tests/git-at-collection-root.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+
+set -eux -o pipefail
+
+export GIT_TOP_LEVEL SUBMODULE_DST
+
+GIT_TOP_LEVEL="${WORK_DIR}/super"
+SUBMODULE_DST="ansible_collections/ns/col/sub"
+
+source collection-tests/git-common.bash
diff --git a/test/integration/targets/ansible-test/collection-tests/git-common.bash b/test/integration/targets/ansible-test/collection-tests/git-common.bash
new file mode 100755
index 00000000..069b157c
--- /dev/null
+++ b/test/integration/targets/ansible-test/collection-tests/git-common.bash
@@ -0,0 +1,43 @@
+#!/usr/bin/env bash
+
+set -eux -o pipefail
+
+# make sure git is installed
+git --version || ansible-playbook collection-tests/install-git.yml -i ../../inventory "$@"
+
+# init sub project
+mkdir "${WORK_DIR}/sub"
+cd "${WORK_DIR}/sub"
+touch "README.md"
+git init
+git config user.name 'Ansible Test'
+git config user.email 'ansible-test@ansible.com'
+git add "README.md"
+git commit -m "Initial commit."
+
+# init super project
+rm -rf "${WORK_DIR}/super" # needed when re-creating in place
+mkdir "${WORK_DIR}/super"
+cp -a "${TEST_DIR}/ansible_collections" "${WORK_DIR}/super"
+cd "${GIT_TOP_LEVEL}"
+git init
+
+# add submodule
+git submodule add "${WORK_DIR}/sub" "${SUBMODULE_DST}"
+
+# prepare for tests
+expected="${WORK_DIR}/expected.txt"
+actual="${WORK_DIR}/actual.txt"
+cd "${WORK_DIR}/super/ansible_collections/ns/col"
+mkdir tests/.git
+touch tests/.git/keep.txt # make sure ansible-test correctly ignores version control within collection subdirectories
+find . -type f ! -path '*/.git/*' ! -name .git | sed 's|^\./||' | sort >"${expected}"
+set -x
+
+# test at the collection base
+ansible-test env --list-files | sort >"${actual}"
+diff --unified "${expected}" "${actual}"
+
+# test at the submodule base
+(cd sub && ansible-test env --list-files | sort >"${actual}")
+diff --unified "${expected}" "${actual}"
diff --git a/test/integration/targets/ansible-test/collection-tests/install-git.yml b/test/integration/targets/ansible-test/collection-tests/install-git.yml
new file mode 100644
index 00000000..29adead7
--- /dev/null
+++ b/test/integration/targets/ansible-test/collection-tests/install-git.yml
@@ -0,0 +1,5 @@
+- hosts: localhost
+ tasks:
+ - name: Make sure git is installed
+ package:
+ name: git
diff --git a/test/integration/targets/ansible-test/collection-tests/venv.sh b/test/integration/targets/ansible-test/collection-tests/venv.sh
new file mode 100755
index 00000000..6ff496b6
--- /dev/null
+++ b/test/integration/targets/ansible-test/collection-tests/venv.sh
@@ -0,0 +1,39 @@
+#!/usr/bin/env bash
+
+set -eux -o pipefail
+
+cp -a "${TEST_DIR}/ansible_collections" "${WORK_DIR}"
+cd "${WORK_DIR}/ansible_collections/ns/col"
+
+# common args for all tests
+# each test will be run in a separate venv to verify that requirements have been properly specified
+common=(--venv --python "${ANSIBLE_TEST_PYTHON_VERSION}" --color --truncate 0 "${@}")
+
+# sanity tests
+
+tests=()
+
+set +x
+
+while IFS='' read -r line; do
+ tests+=("$line");
+done < <(
+ ansible-test sanity --list-tests
+)
+
+set -x
+
+for test in "${tests[@]}"; do
+ rm -rf "tests/output"
+ ansible-test sanity "${common[@]}" --test "${test}"
+done
+
+# unit tests
+
+rm -rf "tests/output"
+ansible-test units "${common[@]}"
+
+# integration tests
+
+rm -rf "tests/output"
+ansible-test integration "${common[@]}"
diff --git a/test/integration/targets/ansible-test/runme.sh b/test/integration/targets/ansible-test/runme.sh
new file mode 100755
index 00000000..7c956b4f
--- /dev/null
+++ b/test/integration/targets/ansible-test/runme.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+
+set -eu -o pipefail
+
+# tests must be executed outside of the ansible source tree
+# otherwise ansible-test will test the ansible source instead of the test collection
+# the temporary directory provided by ansible-test resides within the ansible source tree
+tmp_dir=$(mktemp -d)
+
+trap 'rm -rf "${tmp_dir}"' EXIT
+
+export TEST_DIR
+export WORK_DIR
+
+TEST_DIR="$PWD"
+
+for test in collection-tests/*.sh; do
+ WORK_DIR="${tmp_dir}/$(basename "${test}" ".sh")"
+ mkdir "${WORK_DIR}"
+ echo "**********************************************************************"
+ echo "TEST: ${test}: STARTING"
+ "${test}" "${@}" || (echo "TEST: ${test}: FAILED" && exit 1)
+ echo "TEST: ${test}: PASSED"
+done
diff --git a/test/integration/targets/ansible/adhoc-callback.stdout b/test/integration/targets/ansible/adhoc-callback.stdout
new file mode 100644
index 00000000..05a93dd6
--- /dev/null
+++ b/test/integration/targets/ansible/adhoc-callback.stdout
@@ -0,0 +1,12 @@
+v2_playbook_on_start
+v2_on_any
+v2_playbook_on_play_start
+v2_on_any
+v2_playbook_on_task_start
+v2_on_any
+v2_runner_on_start
+v2_on_any
+v2_runner_on_ok
+v2_on_any
+v2_playbook_on_stats
+v2_on_any
diff --git a/test/integration/targets/ansible/aliases b/test/integration/targets/ansible/aliases
new file mode 100644
index 00000000..f71c8117
--- /dev/null
+++ b/test/integration/targets/ansible/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group4
+skip/aix
diff --git a/test/integration/targets/ansible/ansible-testé.cfg b/test/integration/targets/ansible/ansible-testé.cfg
new file mode 100644
index 00000000..61a99f48
--- /dev/null
+++ b/test/integration/targets/ansible/ansible-testé.cfg
@@ -0,0 +1,2 @@
+[defaults]
+remote_user = admin
diff --git a/test/integration/targets/ansible/callback_plugins/callback_debug.py b/test/integration/targets/ansible/callback_plugins/callback_debug.py
new file mode 100644
index 00000000..cac122c1
--- /dev/null
+++ b/test/integration/targets/ansible/callback_plugins/callback_debug.py
@@ -0,0 +1,24 @@
+# (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'callback_debug'
+
+ def __init__(self, *args, **kwargs):
+ super(CallbackModule, self).__init__(*args, **kwargs)
+ self._display.display('__init__')
+
+ for cb in [x for x in dir(CallbackBase) if x.startswith('v2_')]:
+ delattr(CallbackBase, cb)
+
+ def __getattr__(self, name):
+ if name.startswith('v2_'):
+ return(lambda *args, **kwargs: self._display.display(name))
diff --git a/test/integration/targets/ansible/no-extension b/test/integration/targets/ansible/no-extension
new file mode 100644
index 00000000..61a99f48
--- /dev/null
+++ b/test/integration/targets/ansible/no-extension
@@ -0,0 +1,2 @@
+[defaults]
+remote_user = admin
diff --git a/test/integration/targets/ansible/playbook.yml b/test/integration/targets/ansible/playbook.yml
new file mode 100644
index 00000000..c38b9060
--- /dev/null
+++ b/test/integration/targets/ansible/playbook.yml
@@ -0,0 +1,5 @@
+- hosts: all
+ gather_facts: false
+ tasks:
+ - debug:
+ msg: "{{ username }}"
diff --git a/test/integration/targets/ansible/playbookdir_cfg.ini b/test/integration/targets/ansible/playbookdir_cfg.ini
new file mode 100644
index 00000000..f4bf8af8
--- /dev/null
+++ b/test/integration/targets/ansible/playbookdir_cfg.ini
@@ -0,0 +1,2 @@
+[defaults]
+playbook_dir = /tmp
diff --git a/test/integration/targets/ansible/runme.sh b/test/integration/targets/ansible/runme.sh
new file mode 100755
index 00000000..23ae1863
--- /dev/null
+++ b/test/integration/targets/ansible/runme.sh
@@ -0,0 +1,64 @@
+#!/usr/bin/env bash
+
+set -eux -o pipefail
+
+ansible --version
+ansible --help
+
+ansible testhost -i ../../inventory -m ping "$@"
+ansible testhost -i ../../inventory -m setup "$@"
+
+ansible-config view -c ./ansible-testé.cfg | grep 'remote_user = admin'
+ansible-config dump -c ./ansible-testé.cfg | grep 'DEFAULT_REMOTE_USER([^)]*) = admin\>'
+ANSIBLE_REMOTE_USER=administrator ansible-config dump| grep 'DEFAULT_REMOTE_USER([^)]*) = administrator\>'
+ansible-config list | grep 'DEFAULT_REMOTE_USER'
+
+# 'view' command must fail when config file is missing or has an invalid file extension
+ansible-config view -c ./ansible-non-existent.cfg 2> err1.txt || grep -Eq 'ERROR! The provided configuration file is missing or not accessible:' err1.txt || (cat err*.txt; rm -f err1.txt; exit 1)
+ansible-config view -c ./no-extension 2> err2.txt || grep -q 'Unsupported configuration file extension' err2.txt || (cat err2.txt; rm -f err*.txt; exit 1)
+rm -f err*.txt
+
+# test setting playbook_dir via envvar
+ANSIBLE_PLAYBOOK_DIR=/tmp ansible localhost -m debug -a var=playbook_dir | grep '"playbook_dir": "/tmp"'
+
+# test setting playbook_dir via cmdline
+ansible localhost -m debug -a var=playbook_dir --playbook-dir=/tmp | grep '"playbook_dir": "/tmp"'
+
+# test setting playbook dir via ansible.cfg
+env -u ANSIBLE_PLAYBOOK_DIR ANSIBLE_CONFIG=./playbookdir_cfg.ini ansible localhost -m debug -a var=playbook_dir | grep '"playbook_dir": "/tmp"'
+
+# test adhoc callback triggers
+ANSIBLE_STDOUT_CALLBACK=callback_debug ANSIBLE_LOAD_CALLBACK_PLUGINS=1 ansible --playbook-dir . testhost -i ../../inventory -m ping | grep -E '^v2_' | diff -u adhoc-callback.stdout -
+
+# Test that no tmp dirs are left behind when running ansible-config
+TMP_DIR=~/.ansible/tmptest
+if [[ -d "$TMP_DIR" ]]; then
+ rm -rf "$TMP_DIR"
+fi
+ANSIBLE_LOCAL_TEMP="$TMP_DIR" ansible-config list > /dev/null
+ANSIBLE_LOCAL_TEMP="$TMP_DIR" ansible-config dump > /dev/null
+ANSIBLE_LOCAL_TEMP="$TMP_DIR" ansible-config view > /dev/null
+
+# wc on macOS is dumb and returns leading spaces
+file_count=$(find "$TMP_DIR" -type d -maxdepth 1 | wc -l | sed 's/^ *//')
+if [[ $file_count -ne 1 ]]; then
+ echo "$file_count temporary files were left behind by ansible-config"
+ if [[ -d "$TMP_DIR" ]]; then
+ rm -rf "$TMP_DIR"
+ fi
+ exit 1
+fi
+
+# Ensure extra vars filename is prepended with '@' sign
+if ansible-playbook -i ../../inventory --extra-vars /tmp/non-existing-file playbook.yml; then
+ echo "extra_vars filename without '@' sign should cause failure"
+ exit 1
+fi
+
+# Ensure extra vars filename is prepended with '@' sign
+if ansible-playbook -i ../../inventory --extra-vars ./vars.yml playbook.yml; then
+ echo "extra_vars filename without '@' sign should cause failure"
+ exit 1
+fi
+
+ansible-playbook -i ../../inventory --extra-vars @./vars.yml playbook.yml
diff --git a/test/integration/targets/ansible/vars.yml b/test/integration/targets/ansible/vars.yml
new file mode 100644
index 00000000..a19e454f
--- /dev/null
+++ b/test/integration/targets/ansible/vars.yml
@@ -0,0 +1 @@
+username: ansiboy
diff --git a/test/integration/targets/any_errors_fatal/18602.yml b/test/integration/targets/any_errors_fatal/18602.yml
new file mode 100644
index 00000000..66bcb88b
--- /dev/null
+++ b/test/integration/targets/any_errors_fatal/18602.yml
@@ -0,0 +1,21 @@
+---
+ - hosts: localhost
+ any_errors_fatal: true
+ tasks:
+ - block:
+ - debug: msg='i execute normally'
+ - name: EXPECTED FAILURE primary block command
+ command: /bin/false
+ - debug: msg='i never execute, cause ERROR!'
+ rescue:
+ - name: rescue block debug
+ debug: msg='I caught an error'
+ - name: EXPECTED FAILURE rescue block command
+ command: /bin/false
+ - debug: msg='I also never execute :-('
+ always:
+ - name: A debug task in the always block
+ debug: msg="this always executes"
+
+ - set_fact:
+ always_ran: true
diff --git a/test/integration/targets/any_errors_fatal/aliases b/test/integration/targets/any_errors_fatal/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/test/integration/targets/any_errors_fatal/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/test/integration/targets/any_errors_fatal/always_block.yml b/test/integration/targets/any_errors_fatal/always_block.yml
new file mode 100644
index 00000000..8c6fbffa
--- /dev/null
+++ b/test/integration/targets/any_errors_fatal/always_block.yml
@@ -0,0 +1,27 @@
+---
+- hosts: testhost
+ gather_facts: false
+ any_errors_fatal: true
+ tasks:
+ - block:
+ - name: initial block debug
+ debug: msg='any_errors_fatal_block, i execute normally'
+
+ - name: EXPECTED FAILURE any_errors_fatal, initial block, bin/false to simulate failure
+ command: /bin/false
+
+ - name: after a task that fails I should never execute
+ debug:
+ msg: 'any_errors_fatal_block_post_fail ... i never execute, cause ERROR!'
+ rescue:
+ - name: any_errors_fatal_rescue_block debug
+ debug: msg='any_errors_fatal_rescue_block_start ... I caught an error'
+
+ - name: EXPECTED FAILURE any_errors_fatal in rescue block, using bin/false to simulate error
+ command: /bin/false
+
+ - name: any_errors_fatal post debug
+ debug: msg='any_errors_fatal_rescue_block_post_fail ... I also never execute :-('
+ always:
+ - name: any errors fatal always block debug
+ debug: msg='any_errors_fatal_always_block_start'
diff --git a/test/integration/targets/any_errors_fatal/inventory b/test/integration/targets/any_errors_fatal/inventory
new file mode 100644
index 00000000..3ae8d9c3
--- /dev/null
+++ b/test/integration/targets/any_errors_fatal/inventory
@@ -0,0 +1,6 @@
+[local]
+testhost ansible_connection=local host_var_role_name=role3
+testhost2 ansible_connection=local host_var_role_name=role2
+
+[local:vars]
+ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/test/integration/targets/any_errors_fatal/on_includes.yml b/test/integration/targets/any_errors_fatal/on_includes.yml
new file mode 100644
index 00000000..981d9f46
--- /dev/null
+++ b/test/integration/targets/any_errors_fatal/on_includes.yml
@@ -0,0 +1,7 @@
+---
+# based on https://github.com/ansible/ansible/issues/22924
+- name: Test any errors fatal
+ hosts: testhost,testhost2
+ any_errors_fatal: True
+ tasks:
+ - include: test_fatal.yml
diff --git a/test/integration/targets/any_errors_fatal/play_level.yml b/test/integration/targets/any_errors_fatal/play_level.yml
new file mode 100644
index 00000000..d5a89206
--- /dev/null
+++ b/test/integration/targets/any_errors_fatal/play_level.yml
@@ -0,0 +1,15 @@
+- hosts: testhost
+ gather_facts: no
+ any_errors_fatal: true
+ tasks:
+ - name: EXPECTED FAILURE shell exe of /bin/false for testhost
+ shell: '{{ "/bin/false" if inventory_hostname == "testhost" else "/bin/true" }}'
+
+ - debug:
+ msg: "any_errors_fatal_play_level_post_fail"
+
+- hosts: testhost
+ any_errors_fatal: true
+ tasks:
+ - debug:
+ msg: "and in another play"
diff --git a/test/integration/targets/any_errors_fatal/runme.sh b/test/integration/targets/any_errors_fatal/runme.sh
new file mode 100755
index 00000000..02cd499f
--- /dev/null
+++ b/test/integration/targets/any_errors_fatal/runme.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+
+set -ux
+ansible-playbook -i inventory "$@" play_level.yml| tee out.txt | grep 'any_errors_fatal_play_level_post_fail'
+res=$?
+cat out.txt
+if [ "${res}" -eq 0 ] ; then
+ exit 1
+fi
+
+ansible-playbook -i inventory "$@" on_includes.yml | tee out.txt | grep 'any_errors_fatal_this_should_never_be_reached'
+res=$?
+cat out.txt
+if [ "${res}" -eq 0 ] ; then
+ exit 1
+fi
+
+set -ux
+
+ansible-playbook -i inventory "$@" always_block.yml | tee out.txt | grep 'any_errors_fatal_always_block_start'
+res=$?
+cat out.txt
+exit $res
diff --git a/test/integration/targets/any_errors_fatal/test_fatal.yml b/test/integration/targets/any_errors_fatal/test_fatal.yml
new file mode 100644
index 00000000..a12d741e
--- /dev/null
+++ b/test/integration/targets/any_errors_fatal/test_fatal.yml
@@ -0,0 +1,12 @@
+---
+- name: Setting the fact for 'test' to 'test value'
+ set_fact:
+ test: "test value"
+ when: inventory_hostname == 'testhost2'
+
+- name: EXPECTED FAILURE ejinja eval of a var that should not exist
+ debug: msg="{{ test }}"
+
+- name: testhost should never reach here as testhost2 failure above should end play
+ debug:
+ msg: "any_errors_fatal_this_should_never_be_reached"
diff --git a/test/integration/targets/apt/aliases b/test/integration/targets/apt/aliases
new file mode 100644
index 00000000..941bce38
--- /dev/null
+++ b/test/integration/targets/apt/aliases
@@ -0,0 +1,7 @@
+shippable/posix/group5
+destructive
+skip/freebsd
+skip/osx
+skip/macos
+skip/rhel
+skip/aix
diff --git a/test/integration/targets/apt/defaults/main.yml b/test/integration/targets/apt/defaults/main.yml
new file mode 100644
index 00000000..05a5780f
--- /dev/null
+++ b/test/integration/targets/apt/defaults/main.yml
@@ -0,0 +1 @@
+apt_foreign_arch: i386
diff --git a/test/integration/targets/apt/meta/main.yml b/test/integration/targets/apt/meta/main.yml
new file mode 100644
index 00000000..162d7fab
--- /dev/null
+++ b/test/integration/targets/apt/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_deb_repo
diff --git a/test/integration/targets/apt/tasks/apt-builddep.yml b/test/integration/targets/apt/tasks/apt-builddep.yml
new file mode 100644
index 00000000..d35c90b2
--- /dev/null
+++ b/test/integration/targets/apt/tasks/apt-builddep.yml
@@ -0,0 +1,55 @@
+# test installing build-deps using netcat and quilt as test victims.
+#
+# Deps can be discovered like so (taken from ubuntu 12.04)
+# ====
+# root@localhost:~ # apt-rdepends --build-depends --follow=DEPENDS netcat
+# Reading package lists... Done
+# Building dependency tree
+# Reading state information... Done
+# netcat
+# Build-Depends: debhelper (>= 8.0.0)
+# Build-Depends: quilt
+# root@localhost:~ #
+# ====
+# Since many things depend on debhelper, let's just uninstall quilt, then
+# install build-dep for netcat to get it back. build-dep doesn't have an
+# uninstall, so we don't need to test for reverse actions (eg, uninstall
+# build-dep and ensure things are clean)
+
+# uninstall quilt
+- name: check quilt with dpkg
+ shell: dpkg -s quilt
+ register: dpkg_result
+ ignore_errors: true
+ tags: ['test_apt_builddep']
+
+- name: uninstall quilt with apt
+ apt: pkg=quilt state=absent purge=yes
+ register: apt_result
+ when: dpkg_result is successful
+ tags: ['test_apt_builddep']
+
+# install build-dep for netcat
+- name: install netcat build-dep with apt
+ apt: pkg=netcat state=build-dep
+ register: apt_result
+ tags: ['test_apt_builddep']
+
+- name: verify build_dep of netcat
+ assert:
+ that:
+ - "'changed' in apt_result"
+ tags: ['test_apt_builddep']
+
+# ensure debhelper and qilt are installed
+- name: check build_deps with dpkg
+ shell: dpkg --get-selections | egrep '(debhelper|quilt)'
+ failed_when: False
+ register: dpkg_result
+ tags: ['test_apt_builddep']
+
+- name: verify build_deps are really there
+ assert:
+ that:
+ - "dpkg_result.rc == 0"
+ tags: ['test_apt_builddep']
diff --git a/test/integration/targets/apt/tasks/apt-multiarch.yml b/test/integration/targets/apt/tasks/apt-multiarch.yml
new file mode 100644
index 00000000..6241664d
--- /dev/null
+++ b/test/integration/targets/apt/tasks/apt-multiarch.yml
@@ -0,0 +1,34 @@
+# verify that apt is handling multi-arch systems properly
+- name: add architecture {{ apt_foreign_arch }}
+ command: dpkg --add-architecture {{ apt_foreign_arch }}
+
+- name: install hello:{{ apt_foreign_arch }} with apt
+ apt: pkg=hello:{{ apt_foreign_arch }} state=present update_cache=yes
+ register: apt_result
+ until: apt_result is success
+
+- name: uninstall hello:{{ apt_foreign_arch }} with apt
+ apt: pkg=hello:{{ apt_foreign_arch }} state=absent purge=yes
+
+- name: install deb file
+ apt: deb="/var/cache/apt/archives/hello_{{ hello_version.stdout }}_{{ apt_foreign_arch }}.deb"
+ register: apt_multi_initial
+
+- name: install deb file again
+ apt: deb="/var/cache/apt/archives/hello_{{ hello_version.stdout }}_{{ apt_foreign_arch }}.deb"
+ register: apt_multi_secondary
+
+- name: verify installation of hello:{{ apt_foreign_arch }}
+ assert:
+ that:
+ - "apt_multi_initial.changed"
+ - "not apt_multi_secondary.changed"
+
+- name: remove all {{ apt_foreign_arch }} packages
+ apt:
+ name: "*:{{ apt_foreign_arch }}"
+ state: absent
+ purge: yes
+
+- name: remove {{ apt_foreign_arch }} architecture
+ command: dpkg --remove-architecture {{ apt_foreign_arch }}
diff --git a/test/integration/targets/apt/tasks/apt.yml b/test/integration/targets/apt/tasks/apt.yml
new file mode 100644
index 00000000..5bb6e7b9
--- /dev/null
+++ b/test/integration/targets/apt/tasks/apt.yml
@@ -0,0 +1,413 @@
+- name: use python-apt
+ set_fact:
+ python_apt: python-apt
+ when: ansible_python_version is version('3', '<')
+
+- name: use python3-apt
+ set_fact:
+ python_apt: python3-apt
+ when: ansible_python_version is version('3', '>=')
+
+- name: use Debian mirror
+ set_fact:
+ distro_mirror: http://ftp.debian.org/debian
+ when: ansible_distribution == 'Debian'
+
+- name: use Ubuntu mirror
+ set_fact:
+ distro_mirror: http://archive.ubuntu.com/ubuntu
+ when: ansible_distribution == 'Ubuntu'
+
+# UNINSTALL 'python-apt'
+# The `apt` module has the smarts to auto-install `python-apt`. To test, we
+# will first uninstall `python-apt`.
+- name: check {{ python_apt }} with dpkg
+ shell: dpkg -s {{ python_apt }}
+ register: dpkg_result
+ ignore_errors: true
+
+- name: uninstall {{ python_apt }} with apt
+ apt: pkg={{ python_apt }} state=absent purge=yes
+ register: apt_result
+ when: dpkg_result is successful
+
+# In check mode, auto-install of `python-apt` must fail
+- name: test fail uninstall hello without required apt deps in check mode
+ apt:
+ pkg: hello
+ state: absent
+ purge: yes
+ register: apt_result
+ check_mode: yes
+ ignore_errors: yes
+
+- name: verify fail uninstall hello without required apt deps in check mode
+ assert:
+ that:
+ - apt_result is failed
+ - '"If run normally this module can auto-install it." in apt_result.msg'
+
+- name: check {{ python_apt }} with dpkg
+ shell: dpkg -s {{ python_apt }}
+ register: dpkg_result
+ ignore_errors: true
+
+# UNINSTALL 'hello'
+# With 'python-apt' uninstalled, the first call to 'apt' should install
+# python-apt without updating the cache.
+- name: uninstall hello with apt and prevent updating the cache
+ apt:
+ pkg: hello
+ state: absent
+ purge: yes
+ update_cache: no
+ register: apt_result
+
+- name: check hello with dpkg
+ shell: dpkg-query -l hello
+ failed_when: False
+ register: dpkg_result
+
+- name: verify uninstall hello with apt and prevent updating the cache
+ assert:
+ that:
+ - "'changed' in apt_result"
+ - apt_result is not changed
+ - "dpkg_result.rc == 1"
+ - "'Auto-installing missing dependency without updating cache: {{ python_apt }}' in apt_result.warnings"
+
+- name: Test installing fnmatch package
+ apt:
+ name:
+ - hel?o
+ - he?lo
+ register: apt_install_fnmatch
+
+- name: Test uninstalling fnmatch package
+ apt:
+ name:
+ - hel?o
+ - he?lo
+ state: absent
+ register: apt_uninstall_fnmatch
+
+- name: verify fnmatch
+ assert:
+ that:
+ - apt_install_fnmatch is changed
+ - apt_uninstall_fnmatch is changed
+
+- name: Test update_cache 1
+ apt:
+ update_cache: true
+ cache_valid_time: 10
+ register: apt_update_cache_1
+
+- name: Test update_cache 2
+ apt:
+ update_cache: true
+ cache_valid_time: 10
+ register: apt_update_cache_2
+
+- name: verify update_cache
+ assert:
+ that:
+ - apt_update_cache_1 is changed
+ - apt_update_cache_2 is not changed
+
+- name: uninstall {{ python_apt }} with apt again
+ apt:
+ pkg: "{{ python_apt }}"
+ state: absent
+ purge: yes
+
+# UNINSTALL 'hello'
+# With 'python-apt' uninstalled, the first call to 'apt' should install
+# python-apt.
+- name: uninstall hello with apt
+ apt: pkg=hello state=absent purge=yes
+ register: apt_result
+ until: apt_result is success
+
+- name: check hello with dpkg
+ shell: dpkg-query -l hello
+ failed_when: False
+ register: dpkg_result
+
+- name: verify uninstallation of hello
+ assert:
+ that:
+ - "'changed' in apt_result"
+ - apt_result is not changed
+ - "dpkg_result.rc == 1"
+ - "'Updating cache and auto-installing missing dependency: {{ python_apt }}' in apt_result.warnings"
+
+# UNINSTALL AGAIN
+- name: uninstall hello with apt
+ apt: pkg=hello state=absent purge=yes
+ register: apt_result
+
+- name: verify no change on re-uninstall
+ assert:
+ that:
+ - "not apt_result.changed"
+
+# INSTALL
+- name: install hello with apt
+ apt: name=hello state=present
+ register: apt_result
+
+- name: check hello with dpkg
+ shell: dpkg-query -l hello
+ failed_when: False
+ register: dpkg_result
+
+- name: verify installation of hello
+ assert:
+ that:
+ - "apt_result.changed"
+ - "dpkg_result.rc == 0"
+
+- name: verify apt module outputs
+ assert:
+ that:
+ - "'changed' in apt_result"
+ - "'stderr' in apt_result"
+ - "'stdout' in apt_result"
+ - "'stdout_lines' in apt_result"
+
+# INSTALL AGAIN
+- name: install hello with apt
+ apt: name=hello state=present
+ register: apt_result
+
+- name: verify no change on re-install
+ assert:
+ that:
+ - "not apt_result.changed"
+
+# UNINSTALL AGAIN
+- name: uninstall hello with apt
+ apt: pkg=hello state=absent purge=yes
+ register: apt_result
+
+# INSTALL WITH VERSION WILDCARD
+- name: install hello with apt
+ apt: name=hello=2.* state=present
+ register: apt_result
+
+- name: check hello with wildcard with dpkg
+ shell: dpkg-query -l hello
+ failed_when: False
+ register: dpkg_result
+
+- name: verify installation of hello
+ assert:
+ that:
+ - "apt_result.changed"
+ - "dpkg_result.rc == 0"
+
+- name: check hello version
+ shell: dpkg -s hello | grep Version | awk '{print $2}'
+ register: hello_version
+
+- name: check hello architecture
+ shell: dpkg -s hello | grep Architecture | awk '{print $2}'
+ register: hello_architecture
+
+- name: uninstall hello with apt
+ apt: pkg=hello state=absent purge=yes
+
+- name: install deb file
+ apt: deb="/var/cache/apt/archives/hello_{{ hello_version.stdout }}_{{ hello_architecture.stdout }}.deb"
+ register: apt_initial
+
+- name: install deb file again
+ apt: deb="/var/cache/apt/archives/hello_{{ hello_version.stdout }}_{{ hello_architecture.stdout }}.deb"
+ register: apt_secondary
+
+- name: verify installation of hello
+ assert:
+ that:
+ - "apt_initial.changed"
+ - "not apt_secondary.changed"
+
+- name: uninstall hello with apt
+ apt: pkg=hello state=absent purge=yes
+
+- name: install deb file from URL
+ apt: deb="{{ distro_mirror }}/pool/main/h/hello/hello_{{ hello_version.stdout }}_{{ hello_architecture.stdout }}.deb"
+ register: apt_url
+
+- name: verify installation of hello
+ assert:
+ that:
+ - "apt_url.changed"
+
+- name: uninstall hello with apt
+ apt: pkg=hello state=absent purge=yes
+
+- name: force install of deb
+ apt: deb="/var/cache/apt/archives/hello_{{ hello_version.stdout }}_{{ hello_architecture.stdout }}.deb" force=true
+ register: dpkg_force
+
+- name: verify installation of hello
+ assert:
+ that:
+ - "dpkg_force.changed"
+
+# NEGATIVE: upgrade all packages while providing additional packages to install
+- name: provide additional packages to install while upgrading all installed packages
+ apt: pkg=*,test state=latest
+ ignore_errors: True
+ register: apt_result
+
+- name: verify failure of upgrade packages and install
+ assert:
+ that:
+ - "not apt_result.changed"
+ - "apt_result.failed"
+
+- name: autoclean during install
+ apt: pkg=hello state=present autoclean=yes
+
+# https://github.com/ansible/ansible/issues/23155
+- name: create a repo file
+ copy:
+ dest: /etc/apt/sources.list.d/non-existing.list
+ content: deb http://ppa.launchpad.net/non-existing trusty main
+
+- name: test for sane error message
+ apt:
+ update_cache: yes
+ register: apt_result
+ ignore_errors: yes
+
+- name: verify sane error message
+ assert:
+ that:
+ - "'Failed to fetch' in apt_result['msg']"
+ - "'403' in apt_result['msg']"
+
+- name: Clean up
+ file:
+ name: /etc/apt/sources.list.d/non-existing.list
+ state: absent
+
+# https://github.com/ansible/ansible/issues/28907
+- name: Install parent package
+ apt:
+ name: libcaca-dev
+
+- name: Install child package
+ apt:
+ name: libslang2-dev
+
+- shell: apt-mark showmanual | grep libcaca-dev
+ ignore_errors: yes
+ register: parent_output
+
+- name: Check that parent package is marked as installed manually
+ assert:
+ that:
+ - "'libcaca-dev' in parent_output.stdout"
+
+- shell: apt-mark showmanual | grep libslang2-dev
+ ignore_errors: yes
+ register: child_output
+
+- name: Check that child package is marked as installed manually
+ assert:
+ that:
+ - "'libslang2-dev' in child_output.stdout"
+
+- name: Clean up
+ apt:
+ name: "{{ pkgs }}"
+ state: absent
+ vars:
+ pkgs:
+ - libcaca-dev
+ - libslang2-dev
+
+# https://github.com/ansible/ansible/issues/38995
+- name: build-dep for a package
+ apt:
+ name: tree
+ state: build-dep
+ register: apt_result
+
+- name: Check the result
+ assert:
+ that:
+ - apt_result is changed
+
+- name: build-dep for a package (idempotency)
+ apt:
+ name: tree
+ state: build-dep
+ register: apt_result
+
+- name: Check the result
+ assert:
+ that:
+ - apt_result is not changed
+
+# check policy_rc_d parameter
+
+- name: Install unscd but forbid service start
+ apt:
+ name: unscd
+ policy_rc_d: 101
+
+- name: Stop unscd service
+ service:
+ name: unscd
+ state: stopped
+ register: service_unscd_stop
+
+- name: unscd service shouldn't have been stopped by previous task
+ assert:
+ that: service_unscd_stop is not changed
+
+- name: Uninstall unscd
+ apt:
+ name: unscd
+ policy_rc_d: 101
+
+- name: Create incorrect /usr/sbin/policy-rc.d
+ copy:
+ dest: /usr/sbin/policy-rc.d
+ content: apt integration test
+ mode: 0755
+
+- name: Install unscd but forbid service start
+ apt:
+ name: unscd
+ policy_rc_d: 101
+
+- name: Stop unscd service
+ service:
+ name: unscd
+ state: stopped
+ register: service_unscd_stop
+
+- name: unscd service shouldn't have been stopped by previous task
+ assert:
+ that: service_unscd_stop is not changed
+
+- name: Create incorrect /usr/sbin/policy-rc.d
+ copy:
+ dest: /usr/sbin/policy-rc.d
+ content: apt integration test
+ mode: 0755
+ register: policy_rc_d
+
+- name: Check if /usr/sbin/policy-rc.d was correctly backed-up during unscd install
+ assert:
+ that: policy_rc_d is not changed
+
+- name: Delete /usr/sbin/policy-rc.d
+ file:
+ path: /usr/sbin/policy-rc.d
+ state: absent
diff --git a/test/integration/targets/apt/tasks/main.yml b/test/integration/targets/apt/tasks/main.yml
new file mode 100644
index 00000000..1ecd8a63
--- /dev/null
+++ b/test/integration/targets/apt/tasks/main.yml
@@ -0,0 +1,40 @@
+# (c) 2014, James Tanner <tanner.jc@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- block:
+ - include: 'apt.yml'
+
+ - include: 'url-with-deps.yml'
+
+ - include: 'apt-multiarch.yml'
+ when:
+ - ansible_userspace_architecture != apt_foreign_arch
+
+ - include: 'apt-builddep.yml'
+
+ - block:
+ - include: 'repo.yml'
+ always:
+ - file:
+ path: /etc/apt/sources.list.d/file_tmp_repo.list
+ state: absent
+ - file:
+ name: "{{ repodir }}"
+ state: absent
+
+ when:
+ - ansible_distribution in ('Ubuntu', 'Debian')
diff --git a/test/integration/targets/apt/tasks/repo.yml b/test/integration/targets/apt/tasks/repo.yml
new file mode 100644
index 00000000..f568be9f
--- /dev/null
+++ b/test/integration/targets/apt/tasks/repo.yml
@@ -0,0 +1,253 @@
+- block:
+ - name: Install foo package version 1.0.0
+ apt:
+ name: foo=1.0.0
+ allow_unauthenticated: yes
+ register: apt_result
+
+ - name: Check install with dpkg
+ shell: dpkg-query -l foo
+ register: dpkg_result
+
+ - name: Check if install was successful
+ assert:
+ that:
+ - "apt_result is success"
+ - "dpkg_result is success"
+ - "'1.0.0' in dpkg_result.stdout"
+
+ - name: Update to foo version 1.0.1
+ apt:
+ name: foo
+ state: latest
+ allow_unauthenticated: yes
+ register: apt_result
+
+ - name: Check install with dpkg
+ shell: dpkg-query -l foo
+ register: dpkg_result
+
+ - name: Check if install was successful
+ assert:
+ that:
+ - "apt_result is success"
+ - "dpkg_result is success"
+ - "'1.0.1' in dpkg_result.stdout"
+ always:
+ - name: Clean up
+ apt:
+ name: foo
+ state: absent
+ allow_unauthenticated: yes
+
+
+# https://github.com/ansible/ansible/issues/30638
+- block:
+ - name: Fail to install foo=1.0.1 since foo is not installed and only_upgrade is set
+ apt:
+ name: foo=1.0.1
+ state: present
+ only_upgrade: yes
+ allow_unauthenticated: yes
+ ignore_errors: yes
+ register: apt_result
+
+ - name: Check that foo was not upgraded
+ assert:
+ that:
+ - "apt_result is not changed"
+
+ - apt:
+ name: foo=1.0.0
+ allow_unauthenticated: yes
+
+ - name: Upgrade foo to 1.0.1
+ apt:
+ name: foo=1.0.1
+ state: present
+ only_upgrade: yes
+ allow_unauthenticated: yes
+ register: apt_result
+
+ - name: Check install with dpkg
+ shell: dpkg-query -l foo
+ register: dpkg_result
+
+ - name: Check if install was successful
+ assert:
+ that:
+ - "apt_result is success"
+ - "dpkg_result is success"
+ - "'1.0.1' in dpkg_result.stdout"
+ always:
+ - name: Clean up
+ apt:
+ name: foo
+ state: absent
+ allow_unauthenticated: yes
+
+
+# https://github.com/ansible/ansible/issues/35900
+- block:
+ - name: Disable ubuntu repos so system packages are not upgraded and do not change testing env
+ command: mv /etc/apt/sources.list /etc/apt/sources.list.backup
+
+ - name: Install foobar, installs foo as a dependency
+ apt:
+ name: foobar=1.0.0
+ allow_unauthenticated: yes
+
+ - name: Upgrade foobar to a version which does not depend on foo, autoremove should remove foo
+ apt:
+ upgrade: dist
+ autoremove: yes
+ allow_unauthenticated: yes
+
+ - name: Check foo with dpkg
+ shell: dpkg-query -l foo
+ register: dpkg_result
+ ignore_errors: yes
+
+ - name: Check that foo was removed by autoremove
+ assert:
+ that:
+ - "dpkg_result is failed"
+
+ always:
+ - name: Clean up
+ apt:
+ pkg: foo,foobar
+ state: absent
+ autoclean: yes
+
+ - name: Restore ubuntu repos
+ command: mv /etc/apt/sources.list.backup /etc/apt/sources.list
+
+
+# https://github.com/ansible/ansible/issues/26298
+- block:
+ - name: Disable ubuntu repos so system packages are not upgraded and do not change testing env
+ command: mv /etc/apt/sources.list /etc/apt/sources.list.backup
+
+ - name: Install foobar, installs foo as a dependency
+ apt:
+ name: foobar=1.0.0
+ allow_unauthenticated: yes
+
+ - name: Upgrade foobar to a version which does not depend on foo
+ apt:
+ upgrade: dist
+ force: yes # workaround for --allow-unauthenticated used along with upgrade
+
+ - name: autoremove should remove foo
+ apt:
+ autoremove: yes
+ register: autoremove_result
+
+ - name: Check that autoremove correctly reports changed=True
+ assert:
+ that:
+ - "autoremove_result is changed"
+
+ - name: Check foo with dpkg
+ shell: dpkg-query -l foo
+ register: dpkg_result
+ ignore_errors: yes
+
+ - name: Check that foo was removed by autoremove
+ assert:
+ that:
+ - "dpkg_result is failed"
+
+ - name: Nothing to autoremove
+ apt:
+ autoremove: yes
+ register: autoremove_result
+
+ - name: Check that autoremove correctly reports changed=False
+ assert:
+ that:
+ - "autoremove_result is not changed"
+
+ - name: Create a fake .deb file for autoclean to remove
+ file:
+ name: /var/cache/apt/archives/python3-q_2.4-1_all.deb
+ state: touch
+
+ - name: autoclean fake .deb file
+ apt:
+ autoclean: yes
+ register: autoclean_result
+
+ - name: Check if the .deb file exists
+ stat:
+ path: /var/cache/apt/archives/python3-q_2.4-1_all.deb
+ register: stat_result
+
+ - name: Check that autoclean correctly reports changed=True and file was removed
+ assert:
+ that:
+ - "autoclean_result is changed"
+ - "not stat_result.stat.exists"
+
+ - name: Nothing to autoclean
+ apt:
+ autoclean: yes
+ register: autoclean_result
+
+ - name: Check that autoclean correctly reports changed=False
+ assert:
+ that:
+ - "autoclean_result is not changed"
+
+ always:
+ - name: Clean up
+ apt:
+ pkg: foo,foobar
+ state: absent
+ autoclean: yes
+
+ - name: Restore ubuntu repos
+ command: mv /etc/apt/sources.list.backup /etc/apt/sources.list
+
+
+- name: Upgrades
+ block:
+ - include: "upgrade.yml aptitude_present={{ True | bool }} upgrade_type=dist force_apt_get={{ False | bool }}"
+
+ - name: Check if aptitude is installed
+ command: dpkg-query --show --showformat='${db:Status-Abbrev}' aptitude
+ register: aptitude_status
+
+ - name: Remove aptitude, if installed, to test fall-back to apt-get
+ apt:
+ pkg: aptitude
+ state: absent
+ when:
+ - aptitude_status.stdout.find('ii') != -1
+
+ - include: "upgrade.yml aptitude_present={{ False | bool }} upgrade_type={{ item.upgrade_type }} force_apt_get={{ item.force_apt_get }}"
+ with_items:
+ - { upgrade_type: safe, force_apt_get: False }
+ - { upgrade_type: full, force_apt_get: False }
+ - { upgrade_type: safe, force_apt_get: True }
+ - { upgrade_type: full, force_apt_get: True }
+
+ - name: (Re-)Install aptitude, run same tests again
+ apt:
+ pkg: aptitude
+ state: present
+
+ - include: "upgrade.yml aptitude_present={{ True | bool }} upgrade_type={{ item.upgrade_type }} force_apt_get={{ item.force_apt_get }}"
+ with_items:
+ - { upgrade_type: safe, force_apt_get: False }
+ - { upgrade_type: full, force_apt_get: False }
+ - { upgrade_type: safe, force_apt_get: True }
+ - { upgrade_type: full, force_apt_get: True }
+
+ - name: Remove aptitude if not originally present
+ apt:
+ pkg: aptitude
+ state: absent
+ when:
+ - aptitude_status.stdout.find('ii') == -1
diff --git a/test/integration/targets/apt/tasks/upgrade.yml b/test/integration/targets/apt/tasks/upgrade.yml
new file mode 100644
index 00000000..cf747c81
--- /dev/null
+++ b/test/integration/targets/apt/tasks/upgrade.yml
@@ -0,0 +1,64 @@
+- block:
+ - name: Disable ubuntu repos so system packages are not upgraded and do not change testing env
+ command: mv /etc/apt/sources.list /etc/apt/sources.list.backup
+
+ - name: install foo-1.0.0
+ apt:
+ name: foo=1.0.0
+ state: present
+ allow_unauthenticated: yes
+
+ - name: check foo version
+ shell: dpkg -s foo | grep Version | awk '{print $2}'
+ register: foo_version
+
+ - name: ensure the correct version of foo has been installed
+ assert:
+ that:
+ - "'1.0.0' in foo_version.stdout"
+
+ - name: "(upgrade type: {{upgrade_type}}) upgrade packages to latest version, force_apt_get: {{force_apt_get}}"
+ apt:
+ upgrade: "{{ upgrade_type }}"
+ force_apt_get: "{{ force_apt_get }}"
+ force: yes
+ register: upgrade_result
+
+ - name: check foo version
+ shell: dpkg -s foo | grep Version | awk '{print $2}'
+ register: foo_version
+
+ - name: check that warning is not given when force_apt_get set
+ assert:
+ that:
+ - "'warnings' not in upgrade_result"
+ when:
+ - force_apt_get
+
+ - name: check that old version upgraded correctly
+ assert:
+ that:
+ - "'1.0.0' not in foo_version.stdout"
+ - "{{ foo_version.changed }}"
+
+ - name: "(upgrade type: {{upgrade_type}}) upgrade packages to latest version (Idempotant)"
+ apt:
+ upgrade: "{{ upgrade_type }}"
+ force_apt_get: "{{ force_apt_get }}"
+ force: yes
+ register: second_upgrade_result
+
+ - name: check that nothing has changed (Idempotant)
+ assert:
+ that:
+ - "second_upgrade_result.changed == false"
+
+ always:
+ - name: Clean up
+ apt:
+ pkg: foo,foobar
+ state: absent
+ autoclean: yes
+
+ - name: Restore ubuntu repos
+ command: mv /etc/apt/sources.list.backup /etc/apt/sources.list
diff --git a/test/integration/targets/apt/tasks/url-with-deps.yml b/test/integration/targets/apt/tasks/url-with-deps.yml
new file mode 100644
index 00000000..ed2f7073
--- /dev/null
+++ b/test/integration/targets/apt/tasks/url-with-deps.yml
@@ -0,0 +1,56 @@
+- block:
+ - name: Install https transport for apt
+ apt:
+ name: apt-transport-https
+
+ - name: Ensure echo-hello is not installed
+ apt:
+ name: echo-hello
+ state: absent
+ purge: yes
+
+ # Note that this .deb is just a stupidly tiny one that has a dependency
+ # on vim-tiny. Really any .deb will work here so long as it has
+ # dependencies that exist in a repo and get brought in.
+ # The source and files for building this .deb can be found here:
+ # https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/apt/echo-hello-source.tar.gz
+ - name: Install deb file with dependencies from URL (check_mode)
+ apt:
+ deb: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/apt/echo-hello_1.0_all.deb
+ check_mode: true
+ register: apt_url_deps_check_mode
+
+ - name: check to make sure we didn't install the package due to check_mode
+ shell: dpkg-query -l echo-hello
+ failed_when: false
+ register: dpkg_result_check_mode
+
+ - name: verify check_mode installation of echo-hello
+ assert:
+ that:
+ - apt_url_deps_check_mode is changed
+ - dpkg_result_check_mode.rc != 0
+
+ - name: Install deb file with dependencies from URL (for real this time)
+ apt:
+ deb: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/apt/echo-hello_1.0_all.deb
+ register: apt_url_deps
+
+ - name: check to make sure we installed the package
+ shell: dpkg-query -l echo-hello
+ failed_when: False
+ register: dpkg_result
+
+ - name: verify real installation of echo-hello
+ assert:
+ that:
+ - apt_url_deps is changed
+ - dpkg_result is successful
+ - dpkg_result.rc == 0
+
+ always:
+ - name: uninstall echo-hello with apt
+ apt:
+ pkg: echo-hello
+ state: absent
+ purge: yes
diff --git a/test/integration/targets/apt_key/aliases b/test/integration/targets/apt_key/aliases
new file mode 100644
index 00000000..f46fd701
--- /dev/null
+++ b/test/integration/targets/apt_key/aliases
@@ -0,0 +1,6 @@
+shippable/posix/group1
+skip/freebsd
+skip/osx
+skip/macos
+skip/rhel
+skip/aix
diff --git a/test/integration/targets/apt_key/meta/main.yml b/test/integration/targets/apt_key/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/test/integration/targets/apt_key/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/test/integration/targets/apt_key/tasks/apt_key.yml b/test/integration/targets/apt_key/tasks/apt_key.yml
new file mode 100644
index 00000000..a5969b6f
--- /dev/null
+++ b/test/integration/targets/apt_key/tasks/apt_key.yml
@@ -0,0 +1,19 @@
+- name: run first docs example
+ apt_key:
+ keyserver: keyserver.ubuntu.com
+ id: 36A1D7869245C8950F966E92D8576A8BA88D21E9
+ register: apt_key_test0
+- debug: var=apt_key_test0
+
+- name: re-run first docs example
+ apt_key:
+ keyserver: keyserver.ubuntu.com
+ id: 36A1D7869245C8950F966E92D8576A8BA88D21E9
+ register: apt_key_test1
+
+- name: validate results
+ assert:
+ that:
+ - 'apt_key_test0.changed is defined'
+ - 'apt_key_test0.changed'
+ - 'not apt_key_test1.changed'
diff --git a/test/integration/targets/apt_key/tasks/main.yml b/test/integration/targets/apt_key/tasks/main.yml
new file mode 100644
index 00000000..a268b2b9
--- /dev/null
+++ b/test/integration/targets/apt_key/tasks/main.yml
@@ -0,0 +1,28 @@
+# Test code for the apt_key module.
+# (c) 2017, James Tanner <tanner.jc@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- set_fact: output_dir_test={{output_dir}}/apt_key
+
+- name: make sure our testing sub-directory does not exist
+ file: path="{{ output_dir_test }}" state=absent
+
+- name: create our testing sub-directory
+ file: path="{{ output_dir_test }}" state=directory
+
+- include: 'apt_key.yml'
+ when: ansible_distribution in ('Ubuntu', 'Debian')
diff --git a/test/integration/targets/apt_repository/aliases b/test/integration/targets/apt_repository/aliases
new file mode 100644
index 00000000..7e462190
--- /dev/null
+++ b/test/integration/targets/apt_repository/aliases
@@ -0,0 +1,7 @@
+destructive
+shippable/posix/group1
+skip/freebsd
+skip/osx
+skip/macos
+skip/rhel
+skip/aix
diff --git a/test/integration/targets/apt_repository/meta/main.yml b/test/integration/targets/apt_repository/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/test/integration/targets/apt_repository/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/test/integration/targets/apt_repository/tasks/apt.yml b/test/integration/targets/apt_repository/tasks/apt.yml
new file mode 100644
index 00000000..66790bb0
--- /dev/null
+++ b/test/integration/targets/apt_repository/tasks/apt.yml
@@ -0,0 +1,243 @@
+---
+
+- set_fact:
+ test_ppa_name: 'ppa:git-core/ppa'
+ test_ppa_filename: 'git-core'
+ test_ppa_spec: 'deb http://ppa.launchpad.net/git-core/ppa/ubuntu {{ansible_distribution_release}} main'
+ test_ppa_key: 'E1DF1F24' # http://keyserver.ubuntu.com:11371/pks/lookup?search=0xD06AAF4C11DAB86DF421421EFE6B20ECA7AD98A1&op=index
+
+- name: show python version
+ debug: var=ansible_python_version
+
+- name: use python-apt
+ set_fact:
+ python_apt: python-apt
+ when: ansible_python_version is version('3', '<')
+
+- name: use python3-apt
+ set_fact:
+ python_apt: python3-apt
+ when: ansible_python_version is version('3', '>=')
+
+# UNINSTALL 'python-apt'
+# The `apt_repository` module has the smarts to auto-install `python-apt`. To
+# test, we will first uninstall `python-apt`.
+- name: check {{ python_apt }} with dpkg
+ shell: dpkg -s {{ python_apt }}
+ register: dpkg_result
+ ignore_errors: true
+
+- name: uninstall {{ python_apt }} with apt
+ apt: pkg={{ python_apt }} state=absent purge=yes
+ register: apt_result
+ when: dpkg_result is successful
+
+#
+# TEST: apt_repository: repo=<name>
+#
+- include: 'cleanup.yml'
+
+- name: 'record apt cache mtime'
+ stat: path='/var/cache/apt/pkgcache.bin'
+ register: cache_before
+
+- name: 'name=<name> (expect: pass)'
+ apt_repository: repo='{{test_ppa_name}}' state=present
+ register: result
+
+- name: 'assert the apt cache did *NOT* change'
+ assert:
+ that:
+ - 'result.changed'
+ - 'result.state == "present"'
+ - 'result.repo == "{{test_ppa_name}}"'
+
+- name: 'examine apt cache mtime'
+ stat: path='/var/cache/apt/pkgcache.bin'
+ register: cache_after
+
+- name: 'assert the apt cache did change'
+ assert:
+ that:
+ - 'cache_before.stat.mtime != cache_after.stat.mtime'
+
+- name: 'ensure ppa key is installed (expect: pass)'
+ apt_key: id='{{test_ppa_key}}' state=present
+
+#
+# TEST: apt_repository: repo=<name> update_cache=no
+#
+- include: 'cleanup.yml'
+
+- name: 'record apt cache mtime'
+ stat: path='/var/cache/apt/pkgcache.bin'
+ register: cache_before
+
+- name: 'name=<name> update_cache=no (expect: pass)'
+ apt_repository: repo='{{test_ppa_name}}' state=present update_cache=no
+ register: result
+
+- assert:
+ that:
+ - 'result.changed'
+ - 'result.state == "present"'
+ - 'result.repo == "{{test_ppa_name}}"'
+
+- name: 'examine apt cache mtime'
+ stat: path='/var/cache/apt/pkgcache.bin'
+ register: cache_after
+
+- name: 'assert the apt cache did *NOT* change'
+ assert:
+ that:
+ - 'cache_before.stat.mtime == cache_after.stat.mtime'
+
+- name: 'ensure ppa key is installed (expect: pass)'
+ apt_key: id='{{test_ppa_key}}' state=present
+
+#
+# TEST: apt_repository: repo=<name> update_cache=yes
+#
+- include: 'cleanup.yml'
+
+- name: 'record apt cache mtime'
+ stat: path='/var/cache/apt/pkgcache.bin'
+ register: cache_before
+
+- name: 'name=<name> update_cache=yes (expect: pass)'
+ apt_repository: repo='{{test_ppa_name}}' state=present update_cache=yes
+ register: result
+
+- assert:
+ that:
+ - 'result.changed'
+ - 'result.state == "present"'
+ - 'result.repo == "{{test_ppa_name}}"'
+
+- name: 'examine apt cache mtime'
+ stat: path='/var/cache/apt/pkgcache.bin'
+ register: cache_after
+
+- name: 'assert the apt cache did change'
+ assert:
+ that:
+ - 'cache_before.stat.mtime != cache_after.stat.mtime'
+
+- name: 'ensure ppa key is installed (expect: pass)'
+ apt_key: id='{{test_ppa_key}}' state=present
+
+#
+# TEST: apt_repository: repo=<spec>
+#
+- include: 'cleanup.yml'
+
+- name: 'record apt cache mtime'
+ stat: path='/var/cache/apt/pkgcache.bin'
+ register: cache_before
+
+- name: ensure ppa key is present before adding repo that requires authentication
+ apt_key: keyserver=keyserver.ubuntu.com id='{{test_ppa_key}}' state=present
+
+- name: 'name=<spec> (expect: pass)'
+ apt_repository: repo='{{test_ppa_spec}}' state=present
+ register: result
+
+- name: update the cache
+ apt:
+ update_cache: true
+ register: result_cache
+
+- assert:
+ that:
+ - 'result.changed'
+ - 'result.state == "present"'
+ - 'result.repo == "{{test_ppa_spec}}"'
+ - result_cache is not changed
+
+- name: 'examine apt cache mtime'
+ stat: path='/var/cache/apt/pkgcache.bin'
+ register: cache_after
+
+- name: 'assert the apt cache did change'
+ assert:
+ that:
+ - 'cache_before.stat.mtime != cache_after.stat.mtime'
+
+- name: remove repo by spec
+ apt_repository: repo='{{test_ppa_spec}}' state=absent
+ register: result
+
+# When installing a repo with the spec, the key is *NOT* added
+- name: 'ensure ppa key is absent (expect: pass)'
+ apt_key: id='{{test_ppa_key}}' state=absent
+
+#
+# TEST: apt_repository: repo=<spec> filename=<filename>
+#
+- include: 'cleanup.yml'
+
+- name: 'record apt cache mtime'
+ stat: path='/var/cache/apt/pkgcache.bin'
+ register: cache_before
+
+- name: ensure ppa key is present before adding repo that requires authentication
+ apt_key: keyserver=keyserver.ubuntu.com id='{{test_ppa_key}}' state=present
+
+- name: 'name=<spec> filename=<filename> (expect: pass)'
+ apt_repository: repo='{{test_ppa_spec}}' filename='{{test_ppa_filename}}' state=present
+ register: result
+
+- assert:
+ that:
+ - 'result.changed'
+ - 'result.state == "present"'
+ - 'result.repo == "{{test_ppa_spec}}"'
+
+- name: 'examine source file'
+ stat: path='/etc/apt/sources.list.d/{{test_ppa_filename}}.list'
+ register: source_file
+
+- name: 'assert source file exists'
+ assert:
+ that:
+ - 'source_file.stat.exists == True'
+
+- name: 'examine apt cache mtime'
+ stat: path='/var/cache/apt/pkgcache.bin'
+ register: cache_after
+
+- name: 'assert the apt cache did change'
+ assert:
+ that:
+ - 'cache_before.stat.mtime != cache_after.stat.mtime'
+
+# When installing a repo with the spec, the key is *NOT* added
+- name: 'ensure ppa key is absent (expect: pass)'
+ apt_key: id='{{test_ppa_key}}' state=absent
+
+- name: Test apt_repository with a null value for repo
+ apt_repository:
+ repo:
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is failed
+ - result.msg == 'Please set argument \'repo\' to a non-empty value'
+
+- name: Test apt_repository with an empty value for repo
+ apt_repository:
+ repo: ""
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is failed
+ - result.msg == 'Please set argument \'repo\' to a non-empty value'
+
+#
+# TEARDOWN
+#
+- include: 'cleanup.yml'
diff --git a/test/integration/targets/apt_repository/tasks/cleanup.yml b/test/integration/targets/apt_repository/tasks/cleanup.yml
new file mode 100644
index 00000000..92280ced
--- /dev/null
+++ b/test/integration/targets/apt_repository/tasks/cleanup.yml
@@ -0,0 +1,17 @@
+---
+# tasks to cleanup a repo and assert it is gone
+
+- name: remove existing ppa
+ apt_repository: repo={{test_ppa_name}} state=absent
+ ignore_errors: true
+
+- name: test that ppa does not exist (expect pass)
+ shell: cat /etc/apt/sources.list /etc/apt/sources.list.d/* | grep "{{test_ppa_spec}}"
+ register: command
+ failed_when: command.rc == 0
+ changed_when: false
+
+# Should this use apt-key, maybe?
+- name: remove ppa key
+ apt_key: id={{test_ppa_key}} state=absent
+ ignore_errors: true
diff --git a/test/integration/targets/apt_repository/tasks/main.yml b/test/integration/targets/apt_repository/tasks/main.yml
new file mode 100644
index 00000000..41010112
--- /dev/null
+++ b/test/integration/targets/apt_repository/tasks/main.yml
@@ -0,0 +1,25 @@
+# test code for the apt_repository module
+# (c) 2014, James Laska <jlaska@ansible.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- include: 'apt.yml'
+ when: ansible_distribution in ('Ubuntu')
+
+- include: mode.yaml
+ when: ansible_distribution in ('Ubuntu')
+ tags:
+ - test_apt_repository_mode \ No newline at end of file
diff --git a/test/integration/targets/apt_repository/tasks/mode.yaml b/test/integration/targets/apt_repository/tasks/mode.yaml
new file mode 100644
index 00000000..d9895368
--- /dev/null
+++ b/test/integration/targets/apt_repository/tasks/mode.yaml
@@ -0,0 +1,130 @@
+---
+
+# These tests are likely slower than they should be, since each
+# invocation of apt_repository seems to end up querying for
+# lots (all?) configured repos.
+
+- set_fact:
+ test_repo_spec: "deb http://apt.postgresql.org/pub/repos/apt/ {{ ansible_distribution_release }}-pgdg main"
+ test_repo_path: /etc/apt/sources.list.d/apt_postgresql_org_pub_repos_apt.list
+
+- include: mode_cleanup.yaml
+
+- name: Add GPG key to verify signatures
+ apt_key:
+ id: 7FCC7D46ACCC4CF8
+ keyserver: keyserver.ubuntu.com
+
+- name: Mode specified as yaml literal 0600
+ apt_repository:
+ repo: "{{ test_repo_spec }}"
+ state: present
+ mode: 0600
+ register: mode_given_results
+
+- name: Gather mode_given_as_literal_yaml stat
+ stat:
+ path: "{{ test_repo_path }}"
+ register: mode_given_yaml_literal_0600
+
+- name: Show mode_given_yaml_literal_0600
+ debug:
+ var: mode_given_yaml_literal_0600
+
+- include: mode_cleanup.yaml
+
+- name: Assert mode_given_yaml_literal_0600 is correct
+ assert:
+ that: "mode_given_yaml_literal_0600.stat.mode == '0600'"
+
+- name: No mode specified
+ apt_repository:
+ repo: "{{ test_repo_spec }}"
+ state: present
+ register: no_mode_results
+
+- name: Gather no mode stat
+ stat:
+ path: "{{ test_repo_path }}"
+ register: no_mode_stat
+
+- name: Show no mode stat
+ debug:
+ var: no_mode_stat
+
+- include: mode_cleanup.yaml
+
+- name: Assert no_mode_stat is correct
+ assert:
+ that: "no_mode_stat.stat.mode == '0644'"
+
+- name: Mode specified as string 0600
+ apt_repository:
+ repo: "{{ test_repo_spec }}"
+ state: present
+ mode: "0600"
+ register: mode_given_string_results
+
+- name: Gather mode_given_string stat
+ stat:
+ path: "{{ test_repo_path }}"
+ register: mode_given_string_stat
+
+- name: Show mode_given_string_stat
+ debug:
+ var: mode_given_string_stat
+
+- include: mode_cleanup.yaml
+
+- name: Mode specified as string 600
+ apt_repository:
+ repo: "{{ test_repo_spec }}"
+ state: present
+ mode: "600"
+ register: mode_given_string_600_results
+
+- name: Gather mode_given_600_string stat
+ stat:
+ path: "{{ test_repo_path }}"
+ register: mode_given_string_600_stat
+
+- name: Show mode_given_string_stat
+ debug:
+ var: mode_given_string_600_stat
+
+- include: mode_cleanup.yaml
+
+- name: Assert mode is correct
+ assert:
+ that: "mode_given_string_600_stat.stat.mode == '0600'"
+
+- name: Mode specified as yaml literal 600
+ apt_repository:
+ repo: "{{ test_repo_spec }}"
+ state: present
+ mode: 600
+ register: mode_given_short_results
+
+- name: Gather mode_given_yaml_literal_600 stat
+ stat:
+ path: "{{ test_repo_path }}"
+ register: mode_given_yaml_literal_600
+
+- name: Show mode_given_yaml_literal_600
+ debug:
+ var: mode_given_yaml_literal_600
+
+- include: mode_cleanup.yaml
+
+# a literal 600 as the mode will fail currently, in the sense that it
+# doesn't guess and consider 600 and 0600 to be the same, and will instead
+# intepret literal 600 as the decimal 600 (and thereby octal 1130).
+# The literal 0600 can be interpreted as octal correctly. Note that
+# a decimal 644 is octal 420. The default perm is 0644 so a mis intrpretation
+# of 644 was previously resulting in a default file mode of 0420.
+# 'mode: 600' is likely not what a user meant but there isnt enough info
+# to determine that. Note that a string arg of '600' will be intrepeted as 0600.
+# See https://github.com/ansible/ansible/issues/16370
+- name: Assert mode_given_yaml_literal_600 is correct
+ assert:
+ that: "mode_given_yaml_literal_600.stat.mode == '1130'" \ No newline at end of file
diff --git a/test/integration/targets/apt_repository/tasks/mode_cleanup.yaml b/test/integration/targets/apt_repository/tasks/mode_cleanup.yaml
new file mode 100644
index 00000000..726de111
--- /dev/null
+++ b/test/integration/targets/apt_repository/tasks/mode_cleanup.yaml
@@ -0,0 +1,7 @@
+---
+# tasks to cleanup after creating a repo file, specifically for testing the 'mode' arg
+
+- name: Delete existing repo
+ file:
+ path: "{{ test_repo_path }}"
+ state: absent \ No newline at end of file
diff --git a/test/integration/targets/args/aliases b/test/integration/targets/args/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/test/integration/targets/args/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/test/integration/targets/args/runme.sh b/test/integration/targets/args/runme.sh
new file mode 100755
index 00000000..af1c31d7
--- /dev/null
+++ b/test/integration/targets/args/runme.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+
+set -eu
+
+echo "arg[#]: $#"
+echo "arg[0]: $0"
+
+i=0
+for arg in "$@"; do
+ i=$((i+1))
+ echo "arg[$i]: ${arg}"
+done
diff --git a/test/integration/targets/argspec/aliases b/test/integration/targets/argspec/aliases
new file mode 100644
index 00000000..70a7b7a9
--- /dev/null
+++ b/test/integration/targets/argspec/aliases
@@ -0,0 +1 @@
+shippable/posix/group5
diff --git a/test/integration/targets/argspec/library/argspec.py b/test/integration/targets/argspec/library/argspec.py
new file mode 100644
index 00000000..08dad1a0
--- /dev/null
+++ b/test/integration/targets/argspec/library/argspec.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# Copyright: (c) 2020, Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ {
+ 'required': {
+ 'required': True,
+ },
+ 'required_one_of_one': {},
+ 'required_one_of_two': {},
+ 'required_by_one': {},
+ 'required_by_two': {},
+ 'required_by_three': {},
+ 'state': {
+ 'type': 'str',
+ 'choices': ['absent', 'present'],
+ },
+ 'path': {},
+ 'content': {},
+ 'mapping': {
+ 'type': 'dict',
+ },
+ 'required_one_of': {
+ 'required_one_of': [['thing', 'other']],
+ 'type': 'list',
+ 'elements': 'dict',
+ 'options': {
+ 'thing': {},
+ 'other': {},
+ },
+ },
+ 'required_by': {
+ 'required_by': {'thing': 'other'},
+ 'type': 'list',
+ 'elements': 'dict',
+ 'options': {
+ 'thing': {},
+ 'other': {},
+ },
+ },
+ 'required_together': {
+ 'required_together': [['thing', 'other']],
+ 'type': 'list',
+ 'elements': 'dict',
+ 'options': {
+ 'thing': {},
+ 'other': {},
+ 'another': {},
+ },
+ },
+ 'required_if': {
+ 'required_if': (
+ ('thing', 'foo', ('other',), True),
+ ),
+ 'type': 'list',
+ 'elements': 'dict',
+ 'options': {
+ 'thing': {},
+ 'other': {},
+ 'another': {},
+ },
+ },
+ 'json': {
+ 'type': 'json',
+ },
+ 'fail_on_missing_params': {
+ 'type': 'list',
+ 'default': [],
+ },
+ 'needed_param': {},
+ 'required_together_one': {},
+ 'required_together_two': {},
+ 'suboptions_list_no_elements': {
+ 'type': 'list',
+ 'options': {
+ 'thing': {},
+ },
+ },
+ 'choices_with_strings_like_bools': {
+ 'type': 'str',
+ 'choices': [
+ 'on',
+ 'off',
+ ],
+ },
+ 'choices': {
+ 'type': 'str',
+ 'choices': [
+ 'foo',
+ 'bar',
+ ],
+ },
+ 'list_choices': {
+ 'type': 'list',
+ 'choices': [
+ 'foo',
+ 'bar',
+ 'baz',
+ ],
+ },
+ 'primary': {
+ 'type': 'str',
+ 'aliases': [
+ 'alias',
+ ],
+ },
+ 'password': {
+ 'type': 'str',
+ 'no_log': True,
+ },
+ 'not_a_password': {
+ 'type': 'str',
+ 'no_log': False,
+ },
+ 'maybe_password': {
+ 'type': 'str',
+ },
+ 'int': {
+ 'type': 'int',
+ },
+ },
+ required_if=(
+ ('state', 'present', ('path', 'content'), True),
+ ),
+ mutually_exclusive=(
+ ('path', 'content'),
+ ),
+ required_one_of=(
+ ('required_one_of_one', 'required_one_of_two'),
+ ),
+ required_by={
+ 'required_by_one': ('required_by_two', 'required_by_three'),
+ },
+ required_together=(
+ ('required_together_one', 'required_together_two'),
+ ),
+ )
+
+ module.fail_on_missing_params(module.params['fail_on_missing_params'])
+
+ module.exit_json(**module.params)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/argspec/tasks/main.yml b/test/integration/targets/argspec/tasks/main.yml
new file mode 100644
index 00000000..d90bdf02
--- /dev/null
+++ b/test/integration/targets/argspec/tasks/main.yml
@@ -0,0 +1,419 @@
+- argspec:
+ required: value
+ required_one_of_one: value
+
+- argspec:
+ required_one_of_one: value
+ register: argspec_required_fail
+ ignore_errors: true
+
+- argspec:
+ required: value
+ required_one_of_two: value
+
+- argspec:
+ required: value
+ register: argspec_required_one_of_fail
+ ignore_errors: true
+
+- argspec:
+ required: value
+ required_one_of_two: value
+ required_by_one: value
+ required_by_two: value
+ required_by_three: value
+
+- argspec:
+ required: value
+ required_one_of_two: value
+ required_by_one: value
+ required_by_two: value
+ register: argspec_required_by_fail
+ ignore_errors: true
+
+- argspec:
+ state: absent
+ required: value
+ required_one_of_one: value
+
+- argspec:
+ state: present
+ required: value
+ required_one_of_one: value
+ register: argspec_required_if_fail
+ ignore_errors: true
+
+- argspec:
+ state: present
+ path: foo
+ required: value
+ required_one_of_one: value
+
+- argspec:
+ state: present
+ content: foo
+ required: value
+ required_one_of_one: value
+
+- argspec:
+ state: present
+ content: foo
+ path: foo
+ required: value
+ required_one_of_one: value
+ register: argspec_mutually_exclusive_fail
+ ignore_errors: true
+
+- argspec:
+ mapping:
+ foo: bar
+ required: value
+ required_one_of_one: value
+ register: argspec_good_mapping
+
+- argspec:
+ mapping: foo=bar
+ required: value
+ required_one_of_one: value
+ register: argspec_good_mapping_kv
+
+- argspec:
+ mapping: !!str '{"foo": "bar"}'
+ required: value
+ required_one_of_one: value
+ register: argspec_good_mapping_json
+
+- argspec:
+ mapping: !!str '{"foo": False}'
+ required: value
+ required_one_of_one: value
+ register: argspec_good_mapping_dict_repr
+
+- argspec:
+ mapping: foo
+ required: value
+ required_one_of_one: value
+ register: argspec_bad_mapping_string
+ ignore_errors: true
+
+- argspec:
+ mapping: 1
+ required: value
+ required_one_of_one: value
+ register: argspec_bad_mapping_int
+ ignore_errors: true
+
+- argspec:
+ mapping:
+ - foo
+ - bar
+ required: value
+ required_one_of_one: value
+ register: argspec_bad_mapping_list
+ ignore_errors: true
+
+- argspec:
+ required_together:
+ - thing: foo
+ other: bar
+ another: baz
+ required: value
+ required_one_of_one: value
+
+- argspec:
+ required_together:
+ - another: baz
+ required: value
+ required_one_of_one: value
+
+- argspec:
+ required_together:
+ - thing: foo
+ required: value
+ required_one_of_one: value
+ register: argspec_required_together_fail
+ ignore_errors: true
+
+- argspec:
+ required_together:
+ - thing: foo
+ other: bar
+ required: value
+ required_one_of_one: value
+
+- argspec:
+ required_if:
+ - thing: bar
+ required: value
+ required_one_of_one: value
+
+- argspec:
+ required_if:
+ - thing: foo
+ other: bar
+ required: value
+ required_one_of_one: value
+
+- argspec:
+ required_if:
+ - thing: foo
+ required: value
+ required_one_of_one: value
+ register: argspec_required_if_fail_2
+ ignore_errors: true
+
+- argspec:
+ required_one_of:
+ - thing: foo
+ other: bar
+ required: value
+ required_one_of_one: value
+
+- argspec:
+ required_one_of:
+ - {}
+ required: value
+ required_one_of_one: value
+ register: argspec_required_one_of_fail_2
+ ignore_errors: true
+
+- argspec:
+ required_by:
+ - thing: foo
+ other: bar
+ required: value
+ required_one_of_one: value
+
+- argspec:
+ required_by:
+ - thing: foo
+ required: value
+ required_one_of_one: value
+ register: argspec_required_by_fail_2
+ ignore_errors: true
+
+- argspec:
+ json: !!str '{"foo": "bar"}'
+ required: value
+ required_one_of_one: value
+ register: argspec_good_json_string
+
+- argspec:
+ json:
+ foo: bar
+ required: value
+ required_one_of_one: value
+ register: argspec_good_json_dict
+
+- argspec:
+ json: 1
+ required: value
+ required_one_of_one: value
+ register: argspec_bad_json
+ ignore_errors: true
+
+- argspec:
+ fail_on_missing_params:
+ - needed_param
+ needed_param: whatever
+ required: value
+ required_one_of_one: value
+
+- argspec:
+ fail_on_missing_params:
+ - needed_param
+ required: value
+ required_one_of_one: value
+ register: argspec_fail_on_missing_params_bad
+ ignore_errors: true
+
+- argspec:
+ required_together_one: foo
+ required_together_two: bar
+ required: value
+ required_one_of_one: value
+
+- argspec:
+ required_together_one: foo
+ required: value
+ required_one_of_one: value
+ register: argspec_fail_required_together_2
+ ignore_errors: true
+
+- argspec:
+ suboptions_list_no_elements:
+ - thing: foo
+ required: value
+ required_one_of_one: value
+ register: argspec_suboptions_list_no_elements
+
+- argspec:
+ choices_with_strings_like_bools: on
+ required: value
+ required_one_of_one: value
+ register: argspec_choices_with_strings_like_bools_true
+
+- argspec:
+ choices_with_strings_like_bools: 'on'
+ required: value
+ required_one_of_one: value
+ register: argspec_choices_with_strings_like_bools_true_bool
+
+- argspec:
+ choices_with_strings_like_bools: off
+ required: value
+ required_one_of_one: value
+ register: argspec_choices_with_strings_like_bools_false
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ choices: foo
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ choices: baz
+ register: argspec_choices_bad_choice
+ ignore_errors: true
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ list_choices:
+ - bar
+ - baz
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ list_choices:
+ - bar
+ - baz
+ - qux
+ register: argspec_list_choices_bad_choice
+ ignore_errors: true
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ primary: foo
+ register: argspec_aliases_primary
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ alias: foo
+ register: argspec_aliases_alias
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ primary: foo
+ alias: foo
+ register: argspec_aliases_both
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ primary: foo
+ alias: bar
+ register: argspec_aliases_both_different
+
+- command: >-
+ ansible localhost -m argspec
+ -a 'required=value required_one_of_one=value primary=foo alias=bar'
+ environment:
+ ANSIBLE_LIBRARY: '{{ role_path }}/library'
+ register: argspec_aliases_both_warning
+
+- command: ansible localhost -m import_role -a 'role=argspec tasks_from=password_no_log.yml'
+ register: argspec_password_no_log
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ int: 1
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ int: foo
+ register: argspec_int_invalid
+ ignore_errors: true
+
+- assert:
+ that:
+ - argspec_required_fail is failed
+
+ - argspec_required_one_of_fail is failed
+
+ - argspec_required_by_fail is failed
+
+ - argspec_required_if_fail is failed
+
+ - argspec_mutually_exclusive_fail is failed
+
+ - argspec_good_mapping is successful
+ - >-
+ argspec_good_mapping.mapping == {'foo': 'bar'}
+ - argspec_good_mapping_json is successful
+ - >-
+ argspec_good_mapping_json.mapping == {'foo': 'bar'}
+ - argspec_good_mapping_dict_repr is successful
+ - >-
+ argspec_good_mapping_dict_repr.mapping == {'foo': False}
+ - argspec_good_mapping_kv is successful
+ - >-
+ argspec_good_mapping_kv.mapping == {'foo': 'bar'}
+ - argspec_bad_mapping_string is failed
+ - argspec_bad_mapping_int is failed
+ - argspec_bad_mapping_list is failed
+
+ - argspec_required_together_fail is failed
+
+ - argspec_required_if_fail_2 is failed
+
+ - argspec_required_one_of_fail_2 is failed
+
+ - argspec_required_by_fail_2 is failed
+
+ - argspec_good_json_string is successful
+ - >-
+ argspec_good_json_string.json == '{"foo": "bar"}'
+ - argspec_good_json_dict is successful
+ - >-
+ argspec_good_json_dict.json == '{"foo": "bar"}'
+ - argspec_bad_json is failed
+
+ - argspec_fail_on_missing_params_bad is failed
+
+ - argspec_fail_required_together_2 is failed
+
+ - >-
+ argspec_suboptions_list_no_elements.suboptions_list_no_elements.0 == {'thing': 'foo'}
+
+ - argspec_choices_with_strings_like_bools_true.choices_with_strings_like_bools == 'on'
+ - argspec_choices_with_strings_like_bools_true_bool.choices_with_strings_like_bools == 'on'
+ - argspec_choices_with_strings_like_bools_false.choices_with_strings_like_bools == 'off'
+
+ - argspec_choices_bad_choice is failed
+
+ - argspec_list_choices_bad_choice is failed
+
+ - argspec_aliases_primary.primary == 'foo'
+ - argspec_aliases_primary.alias is undefined
+ - argspec_aliases_alias.primary == 'foo'
+ - argspec_aliases_alias.alias == 'foo'
+ - argspec_aliases_both.primary == 'foo'
+ - argspec_aliases_both.alias == 'foo'
+ - argspec_aliases_both_different.primary == 'bar'
+ - argspec_aliases_both_different.alias == 'bar'
+ - '"[WARNING]: Both option primary and its alias alias are set." in argspec_aliases_both_warning.stderr'
+
+ - '"Module did not set no_log for maybe_password" in argspec_password_no_log.stderr'
+ - '"Module did not set no_log for password" not in argspec_password_no_log.stderr'
+ - '"Module did not set no_log for not_a_password" not in argspec_password_no_log.stderr'
+ - argspec_password_no_log.stdout|regex_findall('VALUE_SPECIFIED_IN_NO_LOG_PARAMETER')|length == 1
+
+ - argspec_int_invalid is failed
diff --git a/test/integration/targets/argspec/tasks/password_no_log.yml b/test/integration/targets/argspec/tasks/password_no_log.yml
new file mode 100644
index 00000000..99c3307b
--- /dev/null
+++ b/test/integration/targets/argspec/tasks/password_no_log.yml
@@ -0,0 +1,14 @@
+- argspec:
+ required: value
+ required_one_of_one: value
+ password: foo
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ not_a_password: foo
+
+- argspec:
+ required: value
+ required_one_of_one: value
+ maybe_password: foo
diff --git a/test/integration/targets/assemble/aliases b/test/integration/targets/assemble/aliases
new file mode 100644
index 00000000..765b70da
--- /dev/null
+++ b/test/integration/targets/assemble/aliases
@@ -0,0 +1 @@
+shippable/posix/group2
diff --git a/test/integration/targets/assemble/files/fragment1 b/test/integration/targets/assemble/files/fragment1
new file mode 100644
index 00000000..a00d3ea0
--- /dev/null
+++ b/test/integration/targets/assemble/files/fragment1
@@ -0,0 +1 @@
+this is fragment 1
diff --git a/test/integration/targets/assemble/files/fragment2 b/test/integration/targets/assemble/files/fragment2
new file mode 100644
index 00000000..860f7603
--- /dev/null
+++ b/test/integration/targets/assemble/files/fragment2
@@ -0,0 +1 @@
+this is fragment 2
diff --git a/test/integration/targets/assemble/files/fragment3 b/test/integration/targets/assemble/files/fragment3
new file mode 100644
index 00000000..df95b24b
--- /dev/null
+++ b/test/integration/targets/assemble/files/fragment3
@@ -0,0 +1 @@
+this is fragment 3
diff --git a/test/integration/targets/assemble/files/fragment4 b/test/integration/targets/assemble/files/fragment4
new file mode 100644
index 00000000..c83252bb
--- /dev/null
+++ b/test/integration/targets/assemble/files/fragment4
@@ -0,0 +1 @@
+this is fragment 4
diff --git a/test/integration/targets/assemble/files/fragment5 b/test/integration/targets/assemble/files/fragment5
new file mode 100644
index 00000000..8a527d15
--- /dev/null
+++ b/test/integration/targets/assemble/files/fragment5
@@ -0,0 +1 @@
+this is fragment 5
diff --git a/test/integration/targets/assemble/meta/main.yml b/test/integration/targets/assemble/meta/main.yml
new file mode 100644
index 00000000..a9d0b468
--- /dev/null
+++ b/test/integration/targets/assemble/meta/main.yml
@@ -0,0 +1,20 @@
+# test code for the assemble module
+# (c) 2014, James Cammarata <jcammarata@ansible.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+dependencies:
+ - prepare_tests
diff --git a/test/integration/targets/assemble/tasks/main.yml b/test/integration/targets/assemble/tasks/main.yml
new file mode 100644
index 00000000..5e779cfb
--- /dev/null
+++ b/test/integration/targets/assemble/tasks/main.yml
@@ -0,0 +1,163 @@
+# test code for the assemble module
+# (c) 2014, James Cammarata <jcammarata@ansible.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: create a new directory for file source
+ file: dest="{{output_dir}}/src" state=directory
+ register: result
+
+- name: assert the directory was created
+ assert:
+ that:
+ - "result.state == 'directory'"
+
+- name: copy the files to a new directory
+ copy: src="./" dest="{{output_dir}}/src"
+ register: result
+
+- name: create unicode file for test
+ shell: echo "π" > {{ output_dir }}/src/ßΩ.txt
+ register: result
+
+- name: assert that the new file was created
+ assert:
+ that:
+ - "result.changed == true"
+
+- name: test assemble with all fragments
+ assemble: src="{{output_dir}}/src" dest="{{output_dir}}/assembled1"
+ register: result
+
+- name: assert the fragments were assembled
+ assert:
+ that:
+ - "result.state == 'file'"
+ - "result.changed == True"
+ - "result.checksum == '74152e9224f774191bc0bedf460d35de86ad90e6'"
+
+- name: test assemble with all fragments
+ assemble: src="{{output_dir}}/src" dest="{{output_dir}}/assembled1"
+ register: result
+
+- name: assert that the same assemble made no changes
+ assert:
+ that:
+ - "result.state == 'file'"
+ - "result.changed == False"
+ - "result.checksum == '74152e9224f774191bc0bedf460d35de86ad90e6'"
+
+- name: test assemble with all fragments and decrypt=True
+ assemble: src="{{output_dir}}/src" dest="{{output_dir}}/assembled2" decrypt=yes
+ register: result
+
+- name: assert the fragments were assembled with decrypt=True
+ assert:
+ that:
+ - "result.state == 'file'"
+ - "result.changed == True"
+ - "result.checksum == '74152e9224f774191bc0bedf460d35de86ad90e6'"
+
+- name: test assemble with all fragments and decrypt=True
+ assemble: src="{{output_dir}}/src" dest="{{output_dir}}/assembled2" decrypt=yes
+ register: result
+
+- name: assert that the same assemble made no changes with decrypt=True
+ assert:
+ that:
+ - "result.state == 'file'"
+ - "result.changed == False"
+ - "result.checksum == '74152e9224f774191bc0bedf460d35de86ad90e6'"
+
+- name: test assemble with fragments matching a regex
+ assemble: src="{{output_dir}}/src" dest="{{output_dir}}/assembled3" regexp="^fragment[1-3]$"
+ register: result
+
+- name: assert the fragments were assembled with a regex
+ assert:
+ that:
+ - "result.state == 'file'"
+ - "result.checksum == 'edfe2d7487ef8f5ebc0f1c4dc57ba7b70a7b8e2b'"
+
+- name: test assemble with fragments matching a regex and decrypt=True
+ assemble: src="{{output_dir}}/src" dest="{{output_dir}}/assembled4" regexp="^fragment[1-3]$" decrypt=yes
+ register: result
+
+- name: assert the fragments were assembled with a regex and decrypt=True
+ assert:
+ that:
+ - "result.state == 'file'"
+ - "result.checksum == 'edfe2d7487ef8f5ebc0f1c4dc57ba7b70a7b8e2b'"
+
+- name: test assemble with a delimiter
+ assemble: src="{{output_dir}}/src" dest="{{output_dir}}/assembled5" delimiter="#--- delimiter ---#"
+ register: result
+
+- name: assert the fragments were assembled with a delimiter
+ assert:
+ that:
+ - "result.state == 'file'"
+ - "result.checksum == 'd986cefb82e34e4cf14d33a3cda132ff45aa2980'"
+
+- name: test assemble with a delimiter and decrypt=True
+ assemble: src="{{output_dir}}/src" dest="{{output_dir}}/assembled6" delimiter="#--- delimiter ---#" decrypt=yes
+ register: result
+
+- name: assert the fragments were assembled with a delimiter and decrypt=True
+ assert:
+ that:
+ - "result.state == 'file'"
+ - "result.checksum == 'd986cefb82e34e4cf14d33a3cda132ff45aa2980'"
+
+- name: test assemble with remote_src=False
+ assemble: src="./" dest="{{output_dir}}/assembled7" remote_src=no
+ register: result
+
+- name: assert the fragments were assembled without remote
+ assert:
+ that:
+ - "result.state == 'file'"
+ - "result.checksum == '048a1bd1951aa5ccc427eeb4ca19aee45e9c68b3'"
+
+- name: test assemble with remote_src=False and decrypt=True
+ assemble: src="./" dest="{{output_dir}}/assembled8" remote_src=no decrypt=yes
+ register: result
+
+- name: assert the fragments were assembled without remote and decrypt=True
+ assert:
+ that:
+ - "result.state == 'file'"
+ - "result.checksum == '048a1bd1951aa5ccc427eeb4ca19aee45e9c68b3'"
+
+- name: test assemble with remote_src=False and a delimiter
+ assemble: src="./" dest="{{output_dir}}/assembled9" remote_src=no delimiter="#--- delimiter ---#"
+ register: result
+
+- name: assert the fragments were assembled without remote
+ assert:
+ that:
+ - "result.state == 'file'"
+ - "result.checksum == '505359f48c65b3904127cf62b912991d4da7ed6d'"
+
+- name: test assemble with remote_src=False and a delimiter and decrypt=True
+ assemble: src="./" dest="{{output_dir}}/assembled10" remote_src=no delimiter="#--- delimiter ---#" decrypt=yes
+ register: result
+
+- name: assert the fragments were assembled without remote
+ assert:
+ that:
+ - "result.state == 'file'"
+ - "result.checksum == '505359f48c65b3904127cf62b912991d4da7ed6d'"
diff --git a/test/integration/targets/assert/aliases b/test/integration/targets/assert/aliases
new file mode 100644
index 00000000..757c9966
--- /dev/null
+++ b/test/integration/targets/assert/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group3
+skip/aix
diff --git a/test/integration/targets/assert/assert_quiet.out.quiet.stderr b/test/integration/targets/assert/assert_quiet.out.quiet.stderr
new file mode 100644
index 00000000..bd973b04
--- /dev/null
+++ b/test/integration/targets/assert/assert_quiet.out.quiet.stderr
@@ -0,0 +1,2 @@
++ ansible-playbook -i localhost, -c local quiet.yml
+++ set +x
diff --git a/test/integration/targets/assert/assert_quiet.out.quiet.stdout b/test/integration/targets/assert/assert_quiet.out.quiet.stdout
new file mode 100644
index 00000000..b62aac6c
--- /dev/null
+++ b/test/integration/targets/assert/assert_quiet.out.quiet.stdout
@@ -0,0 +1,17 @@
+
+PLAY [localhost] ***************************************************************
+
+TASK [assert] ******************************************************************
+ok: [localhost] => (item=item_A)
+
+TASK [assert] ******************************************************************
+ok: [localhost] => (item=item_A) => {
+ "ansible_loop_var": "item",
+ "changed": false,
+ "item": "item_A",
+ "msg": "All assertions passed"
+}
+
+PLAY RECAP *********************************************************************
+localhost : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
+
diff --git a/test/integration/targets/assert/inventory b/test/integration/targets/assert/inventory
new file mode 100644
index 00000000..16182004
--- /dev/null
+++ b/test/integration/targets/assert/inventory
@@ -0,0 +1,3 @@
+[all]
+localhost
+
diff --git a/test/integration/targets/assert/quiet.yml b/test/integration/targets/assert/quiet.yml
new file mode 100644
index 00000000..6834712c
--- /dev/null
+++ b/test/integration/targets/assert/quiet.yml
@@ -0,0 +1,16 @@
+---
+- hosts: localhost
+ gather_facts: False
+ vars:
+ item_A: yes
+ tasks:
+ - assert:
+ that: "{{ item }} is defined"
+ quiet: True
+ with_items:
+ - item_A
+ - assert:
+ that: "{{ item }} is defined"
+ quiet: False
+ with_items:
+ - item_A
diff --git a/test/integration/targets/assert/runme.sh b/test/integration/targets/assert/runme.sh
new file mode 100755
index 00000000..ca0a8587
--- /dev/null
+++ b/test/integration/targets/assert/runme.sh
@@ -0,0 +1,71 @@
+#!/usr/bin/env bash
+
+# This test compares "known good" output with various settings against output
+# with the current code. It's brittle by nature, but this is probably the
+# "best" approach possible.
+#
+# Notes:
+# * options passed to this script (such as -v) are ignored, as they would change
+# the output and break the test
+# * the number of asterisks after a "banner" differs is forced to 79 by
+# redirecting stdin from /dev/null
+
+set -eux
+
+run_test() {
+ # testname is playbook name
+ local testname=$1
+
+ # The shenanigans with redirection and 'tee' are to capture STDOUT and
+ # STDERR separately while still displaying both to the console
+ { ansible-playbook -i 'localhost,' -c local "${testname}.yml" \
+ > >(set +x; tee "${OUTFILE}.${testname}.stdout"); } \
+ 2> >(set +x; tee "${OUTFILE}.${testname}.stderr" >&2) 0</dev/null
+
+ sed -i -e 's/ *$//' "${OUTFILE}.${testname}.stdout"
+ sed -i -e 's/ *$//' "${OUTFILE}.${testname}.stderr"
+
+ # Scrub deprication warning that shows up in Python 2.6 on CentOS 6
+ sed -i -e '/RandomPool_DeprecationWarning/d' "${OUTFILE}.${testname}.stderr"
+
+ diff -u "${ORIGFILE}.${testname}.stdout" "${OUTFILE}.${testname}.stdout" || diff_failure
+ diff -u "${ORIGFILE}.${testname}.stderr" "${OUTFILE}.${testname}.stderr" || diff_failure
+}
+
+diff_failure() {
+ if [[ $INIT = 0 ]]; then
+ echo "FAILURE...diff mismatch!"
+ exit 1
+ fi
+}
+
+cleanup() {
+ if [[ $INIT = 0 ]]; then
+ rm -f "${OUTFILE}."*
+ fi
+}
+
+BASEFILE=assert_quiet.out
+
+ORIGFILE="${BASEFILE}"
+OUTFILE="${BASEFILE}.new"
+
+trap 'cleanup' EXIT
+
+# The --init flag will (re)generate the "good" output files used by the tests
+INIT=0
+if [[ ${1:-} == "--init" ]]; then
+ shift
+ OUTFILE=$ORIGFILE
+ INIT=1
+fi
+
+# Force the 'default' callback plugin
+export ANSIBLE_STDOUT_CALLBACK=default
+# Disable color in output for consistency
+export ANSIBLE_FORCE_COLOR=0
+export ANSIBLE_NOCOLOR=1
+# Disable retry files
+export ANSIBLE_RETRY_FILES_ENABLED=0
+
+run_test quiet
diff --git a/test/integration/targets/async/aliases b/test/integration/targets/async/aliases
new file mode 100644
index 00000000..4d56e5c7
--- /dev/null
+++ b/test/integration/targets/async/aliases
@@ -0,0 +1,4 @@
+async_status
+async_wrapper
+shippable/posix/group2
+skip/aix
diff --git a/test/integration/targets/async/library/async_test.py b/test/integration/targets/async/library/async_test.py
new file mode 100644
index 00000000..f89bd10e
--- /dev/null
+++ b/test/integration/targets/async/library/async_test.py
@@ -0,0 +1,49 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import sys
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ if "--interactive" in sys.argv:
+ import ansible.module_utils.basic
+ ansible.module_utils.basic._ANSIBLE_ARGS = json.dumps(dict(
+ ANSIBLE_MODULE_ARGS=dict(
+ fail_mode="graceful"
+ )
+ ))
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ fail_mode=dict(type='list', default=['success'])
+ )
+ )
+
+ result = dict(changed=True)
+
+ fail_mode = module.params['fail_mode']
+
+ try:
+ if 'leading_junk' in fail_mode:
+ print("leading junk before module output")
+
+ if 'graceful' in fail_mode:
+ module.fail_json(msg="failed gracefully")
+
+ if 'exception' in fail_mode:
+ raise Exception('failing via exception')
+
+ if 'stderr' in fail_mode:
+ print('printed to stderr', file=sys.stderr)
+
+ module.exit_json(**result)
+
+ finally:
+ if 'trailing_junk' in fail_mode:
+ print("trailing junk after module output")
+
+
+main()
diff --git a/test/integration/targets/async/meta/main.yml b/test/integration/targets/async/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/test/integration/targets/async/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/test/integration/targets/async/tasks/main.yml b/test/integration/targets/async/tasks/main.yml
new file mode 100644
index 00000000..b1925d25
--- /dev/null
+++ b/test/integration/targets/async/tasks/main.yml
@@ -0,0 +1,300 @@
+# test code for the async keyword
+# (c) 2014, James Tanner <tanner.jc@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: run a 2 second loop
+ shell: for i in $(seq 1 2); do echo $i ; sleep 1; done;
+ async: 10
+ poll: 1
+ register: async_result
+
+
+- debug: var=async_result
+
+- name: validate async returns
+ assert:
+ that:
+ - "'ansible_job_id' in async_result"
+ - "'changed' in async_result"
+ - "'cmd' in async_result"
+ - "'delta' in async_result"
+ - "'end' in async_result"
+ - "'rc' in async_result"
+ - "'start' in async_result"
+ - "'stderr' in async_result"
+ - "'stdout' in async_result"
+ - "'stdout_lines' in async_result"
+ - async_result.rc == 0
+ - async_result.finished == 1
+ - async_result is finished
+
+- name: test async without polling
+ command: sleep 5
+ async: 30
+ poll: 0
+ register: async_result
+
+- debug: var=async_result
+
+- name: validate async without polling returns
+ assert:
+ that:
+ - "'ansible_job_id' in async_result"
+ - "'started' in async_result"
+ - async_result.finished == 0
+ - async_result is not finished
+
+- name: test skipped task handling
+ command: /bin/true
+ async: 15
+ poll: 0
+ when: False
+
+# test async "fire and forget, but check later"
+
+- name: 'start a task with "fire-and-forget"'
+ command: sleep 3
+ async: 30
+ poll: 0
+ register: fnf_task
+
+- name: assert task was successfully started
+ assert:
+ that:
+ - fnf_task.started == 1
+ - fnf_task is started
+ - "'ansible_job_id' in fnf_task"
+
+- name: 'check on task started as a "fire-and-forget"'
+ async_status: jid={{ fnf_task.ansible_job_id }}
+ register: fnf_result
+ until: fnf_result is finished
+ retries: 10
+ delay: 1
+
+- name: assert task was successfully checked
+ assert:
+ that:
+ - fnf_result.finished
+ - fnf_result is finished
+
+- name: test graceful module failure
+ async_test:
+ fail_mode: graceful
+ async: 30
+ poll: 1
+ register: async_result
+ ignore_errors: true
+
+- name: assert task failed correctly
+ assert:
+ that:
+ - async_result.ansible_job_id is match('\d+\.\d+')
+ - async_result.finished == 1
+ - async_result is finished
+ - async_result is not changed
+ - async_result is failed
+ - async_result.msg == 'failed gracefully'
+
+- name: test exception module failure
+ async_test:
+ fail_mode: exception
+ async: 5
+ poll: 1
+ register: async_result
+ ignore_errors: true
+
+- name: validate response
+ assert:
+ that:
+ - async_result.ansible_job_id is match('\d+\.\d+')
+ - async_result.finished == 1
+ - async_result is finished
+ - async_result.changed == false
+ - async_result is not changed
+ - async_result.failed == true
+ - async_result is failed
+ - async_result.stderr is search('failing via exception', multiline=True)
+
+- name: test leading junk before JSON
+ async_test:
+ fail_mode: leading_junk
+ async: 5
+ poll: 1
+ register: async_result
+
+- name: validate response
+ assert:
+ that:
+ - async_result.ansible_job_id is match('\d+\.\d+')
+ - async_result.finished == 1
+ - async_result is finished
+ - async_result.changed == true
+ - async_result is changed
+ - async_result is successful
+
+- name: test trailing junk after JSON
+ async_test:
+ fail_mode: trailing_junk
+ async: 5
+ poll: 1
+ register: async_result
+
+- name: validate response
+ assert:
+ that:
+ - async_result.ansible_job_id is match('\d+\.\d+')
+ - async_result.finished == 1
+ - async_result is finished
+ - async_result.changed == true
+ - async_result is changed
+ - async_result is successful
+ - async_result.warnings[0] is search('trailing junk after module output')
+
+- name: test stderr handling
+ async_test:
+ fail_mode: stderr
+ async: 30
+ poll: 1
+ register: async_result
+ ignore_errors: true
+
+- assert:
+ that:
+ - async_result.stderr == "printed to stderr\n"
+
+# NOTE: This should report a warning that cannot be tested
+- name: test async properties on non-async task
+ command: sleep 1
+ register: non_async_result
+
+- name: validate response
+ assert:
+ that:
+ - non_async_result is successful
+ - non_async_result is changed
+ - non_async_result is finished
+ - "'ansible_job_id' not in non_async_result"
+
+- name: set fact of custom tmp dir
+ set_fact:
+ custom_async_tmp: ~/.ansible_async_test
+
+- name: ensure custom async tmp dir is absent
+ file:
+ path: '{{ custom_async_tmp }}'
+ state: absent
+
+- block:
+ - name: run async task with custom dir
+ command: sleep 1
+ register: async_custom_dir
+ async: 5
+ poll: 1
+ vars:
+ ansible_async_dir: '{{ custom_async_tmp }}'
+
+ - name: check if the async temp dir is created
+ stat:
+ path: '{{ custom_async_tmp }}'
+ register: async_custom_dir_result
+
+ - name: assert run async task with custom dir
+ assert:
+ that:
+ - async_custom_dir is successful
+ - async_custom_dir is finished
+ - async_custom_dir_result.stat.exists
+
+ - name: remove custom async dir again
+ file:
+ path: '{{ custom_async_tmp }}'
+ state: absent
+
+ - name: run async task with custom dir - deprecated format
+ command: sleep 1
+ register: async_custom_dir_dep
+ async: 5
+ poll: 1
+ environment:
+ ANSIBLE_ASYNC_DIR: '{{ custom_async_tmp }}'
+
+ - name: check if the async temp dir is created - deprecated format
+ stat:
+ path: '{{ custom_async_tmp }}'
+ register: async_custom_dir_dep_result
+
+ - name: assert run async task with custom dir - deprecated format
+ assert:
+ that:
+ - async_custom_dir_dep is successful
+ - async_custom_dir_dep is finished
+ - async_custom_dir_dep_result.stat.exists
+
+ - name: remove custom async dir after deprecation test
+ file:
+ path: '{{ custom_async_tmp }}'
+ state: absent
+
+ - name: run fire and forget async task with custom dir
+ command: echo moo
+ register: async_fandf_custom_dir
+ async: 5
+ poll: 0
+ vars:
+ ansible_async_dir: '{{ custom_async_tmp }}'
+
+ - name: fail to get async status with custom dir with defaults
+ async_status:
+ jid: '{{ async_fandf_custom_dir.ansible_job_id }}'
+ register: async_fandf_custom_dir_fail
+ ignore_errors: yes
+
+ - name: get async status with custom dir using newer format
+ async_status:
+ jid: '{{ async_fandf_custom_dir.ansible_job_id }}'
+ register: async_fandf_custom_dir_result
+ vars:
+ ansible_async_dir: '{{ custom_async_tmp }}'
+
+ - name: get async status with custom dir - deprecated format
+ async_status:
+ jid: '{{ async_fandf_custom_dir.ansible_job_id }}'
+ register: async_fandf_custom_dir_dep_result
+ environment:
+ ANSIBLE_ASYNC_DIR: '{{ custom_async_tmp }}'
+
+ - name: assert run fire and forget async task with custom dir
+ assert:
+ that:
+ - async_fandf_custom_dir is successful
+ - async_fandf_custom_dir_fail is failed
+ - async_fandf_custom_dir_fail.msg == "could not find job"
+ - async_fandf_custom_dir_result is successful
+ - async_fandf_custom_dir_dep_result is successful
+
+ always:
+ - name: remove custom tmp dir after test
+ file:
+ path: '{{ custom_async_tmp }}'
+ state: absent
+
+- name: Test that async has stdin
+ command: >
+ {{ ansible_python_interpreter|default('/usr/bin/python') }} -c 'import os; os.fdopen(os.dup(0), "r")'
+ async: 1
+ poll: 1
diff --git a/test/integration/targets/async_extra_data/aliases b/test/integration/targets/async_extra_data/aliases
new file mode 100644
index 00000000..70a7b7a9
--- /dev/null
+++ b/test/integration/targets/async_extra_data/aliases
@@ -0,0 +1 @@
+shippable/posix/group5
diff --git a/test/integration/targets/async_extra_data/library/junkping.py b/test/integration/targets/async_extra_data/library/junkping.py
new file mode 100644
index 00000000..b61d965d
--- /dev/null
+++ b/test/integration/targets/async_extra_data/library/junkping.py
@@ -0,0 +1,15 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+
+def main():
+ print("junk_before_module_output")
+ print(json.dumps(dict(changed=False, source='user')))
+ print("junk_after_module_output")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/async_extra_data/runme.sh b/test/integration/targets/async_extra_data/runme.sh
new file mode 100755
index 00000000..46132731
--- /dev/null
+++ b/test/integration/targets/async_extra_data/runme.sh
@@ -0,0 +1,7 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# Verify that extra data before module JSON output during async call is ignored, and that the warning exists.
+ANSIBLE_DEBUG=0 ansible-playbook -i ../../inventory test_async.yml -v "$@" \
+ | grep 'junk after the JSON data: junk_after_module_output'
diff --git a/test/integration/targets/async_extra_data/test_async.yml b/test/integration/targets/async_extra_data/test_async.yml
new file mode 100644
index 00000000..480a2a65
--- /dev/null
+++ b/test/integration/targets/async_extra_data/test_async.yml
@@ -0,0 +1,10 @@
+- hosts: testhost
+ gather_facts: false
+ tasks:
+ # make sure non-JSON data before module output is ignored
+ - name: async ping wrapped in extra junk
+ junkping:
+ async: 10
+ poll: 1
+ register: result
+ - debug: var=result
diff --git a/test/integration/targets/async_fail/action_plugins/normal.py b/test/integration/targets/async_fail/action_plugins/normal.py
new file mode 100644
index 00000000..297cbd9b
--- /dev/null
+++ b/test/integration/targets/async_fail/action_plugins/normal.py
@@ -0,0 +1,62 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError
+from ansible.plugins.action import ActionBase
+from ansible.utils.vars import merge_hash
+
+
+class ActionModule(ActionBase):
+
+ def run(self, tmp=None, task_vars=None):
+
+ # individual modules might disagree but as the generic the action plugin, pass at this point.
+ self._supports_check_mode = True
+ self._supports_async = True
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ if not result.get('skipped'):
+
+ if result.get('invocation', {}).get('module_args'):
+ # avoid passing to modules in case of no_log
+ # should not be set anymore but here for backwards compatibility
+ del result['invocation']['module_args']
+
+ # FUTURE: better to let _execute_module calculate this internally?
+ wrap_async = self._task.async_val and not self._connection.has_native_async
+
+ # do work!
+ result = merge_hash(result, self._execute_module(task_vars=task_vars, wrap_async=wrap_async))
+
+ # hack to keep --verbose from showing all the setup module result
+ # moved from setup module as now we filter out all _ansible_ from result
+ if self._task.action == 'setup':
+ result['_ansible_verbose_override'] = True
+
+ # Simulate a transient network failure
+ if self._task.action == 'async_status' and 'finished' in result and result['finished'] != 1:
+ raise AnsibleError('Pretend to fail somewher ein executing async_status')
+
+ if not wrap_async:
+ # remove a temporary path we created
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ return result
diff --git a/test/integration/targets/async_fail/aliases b/test/integration/targets/async_fail/aliases
new file mode 100644
index 00000000..c989cd70
--- /dev/null
+++ b/test/integration/targets/async_fail/aliases
@@ -0,0 +1,3 @@
+async_status
+async_wrapper
+shippable/posix/group2
diff --git a/test/integration/targets/async_fail/library/async_test.py b/test/integration/targets/async_fail/library/async_test.py
new file mode 100644
index 00000000..838f2f07
--- /dev/null
+++ b/test/integration/targets/async_fail/library/async_test.py
@@ -0,0 +1,50 @@
+import json
+import sys
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ if "--interactive" in sys.argv:
+ import ansible.module_utils.basic
+ ansible.module_utils.basic._ANSIBLE_ARGS = json.dumps(dict(
+ ANSIBLE_MODULE_ARGS=dict(
+ fail_mode="graceful"
+ )
+ ))
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ fail_mode=dict(type='list', default=['success'])
+ )
+ )
+
+ result = dict(changed=True)
+
+ fail_mode = module.params['fail_mode']
+
+ try:
+ if 'leading_junk' in fail_mode:
+ print("leading junk before module output")
+
+ if 'graceful' in fail_mode:
+ module.fail_json(msg="failed gracefully")
+
+ if 'exception' in fail_mode:
+ raise Exception('failing via exception')
+
+ if 'recovered_fail' in fail_mode:
+ result = {"msg": "succeeded", "failed": False, "changed": True}
+ # Wait in the middle to setup a race where the controller reads incomplete data from our
+ # special async_status the first poll
+ time.sleep(5)
+
+ module.exit_json(**result)
+
+ finally:
+ if 'trailing_junk' in fail_mode:
+ print("trailing junk after module output")
+
+
+main()
diff --git a/test/integration/targets/async_fail/meta/main.yml b/test/integration/targets/async_fail/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/test/integration/targets/async_fail/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/test/integration/targets/async_fail/tasks/main.yml b/test/integration/targets/async_fail/tasks/main.yml
new file mode 100644
index 00000000..40f72e10
--- /dev/null
+++ b/test/integration/targets/async_fail/tasks/main.yml
@@ -0,0 +1,36 @@
+# test code for the async keyword failing in the middle of output
+# (c) 2018, Ansible Project
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# This uses a special copy of the normal action plugin which simulates
+# a transient failure in the module
+- name: test that we can recover from initial failures to read
+ async_test:
+ fail_mode: recovered_fail
+ async: 10
+ poll: 1
+ register: async_result
+
+- name: validate that by the end of the retry interval, we succeeded
+ assert:
+ that:
+ - async_result.ansible_job_id is match('\d+\.\d+')
+ - async_result.finished == 1
+ - async_result is finished
+ - async_result is changed
+ - async_result is successful
+ - async_result.msg is search('succeeded')
diff --git a/test/integration/targets/become/aliases b/test/integration/targets/become/aliases
new file mode 100644
index 00000000..3a07aab3
--- /dev/null
+++ b/test/integration/targets/become/aliases
@@ -0,0 +1,3 @@
+destructive
+shippable/posix/group1
+skip/aix
diff --git a/test/integration/targets/become/files/baz.txt b/test/integration/targets/become/files/baz.txt
new file mode 100644
index 00000000..b8d834da
--- /dev/null
+++ b/test/integration/targets/become/files/baz.txt
@@ -0,0 +1 @@
+testing tilde expansion with become
diff --git a/test/integration/targets/become/tasks/default.yml b/test/integration/targets/become/tasks/default.yml
new file mode 100644
index 00000000..4ba10170
--- /dev/null
+++ b/test/integration/targets/become/tasks/default.yml
@@ -0,0 +1,82 @@
+- name: Create test user (become_method=default)
+ become: True
+ become_user: root
+ user:
+ name: "{{ become_test_user }}"
+ group: '{{ "staff" if ansible_facts.distribution == "MacOSX" else omit }}'
+
+- name: test becoming user (become_method=default)
+ shell: whoami
+ become: True
+ become_user: "{{ become_test_user }}"
+ register: results
+
+- assert:
+ that:
+ - "results.stdout == '{{ become_test_user }}'"
+
+- name: tilde expansion honors become in file (become_method=default)
+ become: True
+ become_user: "{{ become_test_user }}"
+ file:
+ path: "~/foo.txt"
+ state: touch
+
+- name: check that the path in the user's home dir was created (become_method=default)
+ become: True
+ become_user: "{{ become_test_user }}"
+ stat:
+ path: "~{{ become_test_user }}/foo.txt"
+ register: results
+
+- assert:
+ that:
+ - "results.stat.exists == True"
+ - "results.stat.path|dirname|basename == '{{ become_test_user }}'"
+
+- name: tilde expansion honors become in template (become_method=default)
+ become: True
+ become_user: "{{ become_test_user }}"
+ template:
+ src: "bar.j2"
+ dest: "~/bar.txt"
+
+- name: check that the path in the user's home dir was created (become_method=default)
+ become: True
+ become_user: "{{ become_test_user }}"
+ stat:
+ path: "~{{ become_test_user }}/bar.txt"
+ register: results
+
+- assert:
+ that:
+ - "results.stat.exists == True"
+ - "results.stat.path|dirname|basename == '{{ become_test_user }}'"
+
+- name: tilde expansion honors become in copy (become_method=default)
+ become: True
+ become_user: "{{ become_test_user }}"
+ copy:
+ src: baz.txt
+ dest: "~/baz.txt"
+
+- name: check that the path in the user's home dir was created (become_method=default)
+ become: True
+ become_user: "{{ become_test_user }}"
+ stat:
+ path: "~{{ become_test_user }}/baz.txt"
+ register: results
+
+- assert:
+ that:
+ - "results.stat.exists == True"
+ - "results.stat.path|dirname|basename == '{{ become_test_user }}'"
+
+- name: Remove test user and their home dir (become_method=default)
+ become: True
+ become_user: root
+ user:
+ name: "{{ become_test_user }}"
+ state: "absent"
+ remove: "yes"
+ force: "yes"
diff --git a/test/integration/targets/become/tasks/main.yml b/test/integration/targets/become/tasks/main.yml
new file mode 100644
index 00000000..3feb5cc7
--- /dev/null
+++ b/test/integration/targets/become/tasks/main.yml
@@ -0,0 +1,5 @@
+- include_vars: default.yml
+
+- include: default.yml
+- include: sudo.yml
+- include: su.yml
diff --git a/test/integration/targets/become/tasks/su.yml b/test/integration/targets/become/tasks/su.yml
new file mode 100644
index 00000000..d314b0a8
--- /dev/null
+++ b/test/integration/targets/become/tasks/su.yml
@@ -0,0 +1,91 @@
+- name: Create test user (become_method=su)
+ become: True
+ become_user: root
+ become_method: su
+ user:
+ name: "{{ become_test_user }}"
+ group: '{{ "staff" if ansible_facts.distribution == "MacOSX" else omit }}'
+
+- name: test becoming user (become_method=su)
+ shell: whoami
+ become: True
+ become_user: "{{ become_test_user }}"
+ become_method: su
+ register: results
+
+- assert:
+ that:
+ - "results.stdout == '{{ become_test_user }}'"
+
+- name: tilde expansion honors become in file (become_method=su)
+ become: True
+ become_user: "{{ become_test_user }}"
+ become_method: su
+ file:
+ path: "~/foo.txt"
+ state: touch
+
+- name: check that the path in the user's home dir was created (become_method=su)
+ become: True
+ become_user: "{{ become_test_user }}"
+ become_method: su
+ stat:
+ path: "~{{ become_test_user }}/foo.txt"
+ register: results
+
+- assert:
+ that:
+ - "results.stat.exists == True"
+ - "results.stat.path|dirname|basename == '{{ become_test_user }}'"
+
+- name: tilde expansion honors become in template (become_method=su)
+ become: True
+ become_user: "{{ become_test_user }}"
+ become_method: su
+ template:
+ src: "bar.j2"
+ dest: "~/bar.txt"
+
+- name: check that the path in the user's home dir was created (become_method=su)
+ become: True
+ become_user: "{{ become_test_user }}"
+ become_method: su
+ stat:
+ path: "~{{ become_test_user }}/bar.txt"
+ register: results
+
+- assert:
+ that:
+ - "results.stat.exists == True"
+ - "results.stat.path|dirname|basename == '{{ become_test_user }}'"
+
+- name: tilde expansion honors become in copy (become_method=su)
+ become: True
+ become_user: "{{ become_test_user }}"
+ become_method: su
+ copy:
+ src: baz.txt
+ dest: "~/baz.txt"
+
+- name: check that the path in the user's home dir was created (become_method=su)
+ become: True
+ become_user: "{{ become_test_user }}"
+ become_method: su
+ stat:
+ path: "~{{ become_test_user }}/baz.txt"
+ register: results
+
+- assert:
+ that:
+ - "results.stat.exists == True"
+ - "results.stat.path|dirname|basename == '{{ become_test_user }}'"
+
+- name: Remove test user and their home dir (become_method=su)
+ become: True
+ become_user: root
+ become_method: su
+ user:
+ name: "{{ become_test_user }}"
+ state: "absent"
+ remove: "yes"
+ force: "yes"
diff --git a/test/integration/targets/become/tasks/sudo.yml b/test/integration/targets/become/tasks/sudo.yml
new file mode 100644
index 00000000..636ec378
--- /dev/null
+++ b/test/integration/targets/become/tasks/sudo.yml
@@ -0,0 +1,91 @@
+- name: Create test user (become_method=sudo)
+ become: True
+ become_user: root
+ become_method: sudo
+ user:
+ name: "{{ become_test_user }}"
+ group: '{{ "staff" if ansible_facts.distribution == "MacOSX" else omit }}'
+
+- name: test becoming user (become_method=sudo)
+ shell: whoami
+ become: True
+ become_user: "{{ become_test_user }}"
+ become_method: sudo
+ register: results
+
+- assert:
+ that:
+ - "results.stdout == '{{ become_test_user }}'"
+
+- name: tilde expansion honors become in file (become_method=sudo)
+ become: True
+ become_user: "{{ become_test_user }}"
+ become_method: sudo
+ file:
+ path: "~/foo.txt"
+ state: touch
+
+- name: check that the path in the user's home dir was created (become_method=sudo)
+ become: True
+ become_user: "{{ become_test_user }}"
+ become_method: sudo
+ stat:
+ path: "~{{ become_test_user }}/foo.txt"
+ register: results
+
+- assert:
+ that:
+ - "results.stat.exists == True"
+ - "results.stat.path|dirname|basename == '{{ become_test_user }}'"
+
+- name: tilde expansion honors become in template (become_method=sudo)
+ become: True
+ become_user: "{{ become_test_user }}"
+ become_method: sudo
+ template:
+ src: "bar.j2"
+ dest: "~/bar.txt"
+
+- name: check that the path in the user's home dir was created (become_method=sudo)
+ become: True
+ become_user: "{{ become_test_user }}"
+ become_method: sudo
+ stat:
+ path: "~{{ become_test_user }}/bar.txt"
+ register: results
+
+- assert:
+ that:
+ - "results.stat.exists == True"
+ - "results.stat.path|dirname|basename == '{{ become_test_user }}'"
+
+- name: tilde expansion honors become in copy (become_method=sudo)
+ become: True
+ become_user: "{{ become_test_user }}"
+ become_method: sudo
+ copy:
+ src: baz.txt
+ dest: "~/baz.txt"
+
+- name: check that the path in the user's home dir was created (become_method=sudo)
+ become: True
+ become_user: "{{ become_test_user }}"
+ become_method: sudo
+ stat:
+ path: "~{{ become_test_user }}/baz.txt"
+ register: results
+
+- assert:
+ that:
+ - "results.stat.exists == True"
+ - "results.stat.path|dirname|basename == '{{ become_test_user }}'"
+
+- name: Remove test user and their home dir (become_method=sudo)
+ become: True
+ become_user: root
+ become_method: sudo
+ user:
+ name: "{{ become_test_user }}"
+ state: "absent"
+ remove: "yes"
+ force: "yes"
diff --git a/test/integration/targets/become/templates/bar.j2 b/test/integration/targets/become/templates/bar.j2
new file mode 100644
index 00000000..7c5fe0ab
--- /dev/null
+++ b/test/integration/targets/become/templates/bar.j2
@@ -0,0 +1 @@
+{{ become_test_user }}
diff --git a/test/integration/targets/become/vars/default.yml b/test/integration/targets/become/vars/default.yml
new file mode 100644
index 00000000..223d44ed
--- /dev/null
+++ b/test/integration/targets/become/vars/default.yml
@@ -0,0 +1 @@
+become_test_user: ansibletest1
diff --git a/test/integration/targets/binary/aliases b/test/integration/targets/binary/aliases
new file mode 100644
index 00000000..765b70da
--- /dev/null
+++ b/test/integration/targets/binary/aliases
@@ -0,0 +1 @@
+shippable/posix/group2
diff --git a/test/integration/targets/binary/files/b64_latin1 b/test/integration/targets/binary/files/b64_latin1
new file mode 100644
index 00000000..c7fbdeb6
--- /dev/null
+++ b/test/integration/targets/binary/files/b64_latin1
@@ -0,0 +1 @@
+Café Eñe
diff --git a/test/integration/targets/binary/files/b64_utf8 b/test/integration/targets/binary/files/b64_utf8
new file mode 100644
index 00000000..c7fbdeb6
--- /dev/null
+++ b/test/integration/targets/binary/files/b64_utf8
@@ -0,0 +1 @@
+Café Eñe
diff --git a/test/integration/targets/binary/files/from_playbook b/test/integration/targets/binary/files/from_playbook
new file mode 100644
index 00000000..c7fbdeb6
--- /dev/null
+++ b/test/integration/targets/binary/files/from_playbook
@@ -0,0 +1 @@
+Café Eñe
diff --git a/test/integration/targets/binary/meta/main.yml b/test/integration/targets/binary/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/test/integration/targets/binary/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/test/integration/targets/binary/tasks/main.yml b/test/integration/targets/binary/tasks/main.yml
new file mode 100644
index 00000000..486ee6d6
--- /dev/null
+++ b/test/integration/targets/binary/tasks/main.yml
@@ -0,0 +1,131 @@
+---
+# Various ways users want to use binary data
+# Could integrate into individual modules but currently these don't all work.
+# Probably easier to see them all in a single block to know what we're testing.
+# When we can start testing v2 we should test that all of these work.
+
+# In v1: The following line will traceback if it's the first task in the role.
+# Does not traceback if it's the second or third etc task.
+- debug: msg="{{ utf8_simple_accents|b64decode}}"
+
+# Expected values of the written files
+- name: get checksums that we expect later files to have
+ copy:
+ src: from_playbook
+ dest: "{{ output_dir }}"
+
+- copy:
+ src: b64_utf8
+ dest: "{{ output_dir }}"
+
+- copy:
+ src: b64_latin1
+ dest: "{{ output_dir }}"
+
+- stat:
+ path: "{{ output_dir }}/from_playbook"
+ register: from_playbook
+
+- stat:
+ path: "{{ output_dir }}/b64_utf8"
+ register: b64_utf8
+
+- stat:
+ path: "{{ output_dir }}/b64_latin1"
+ register: b64_latin1
+
+# Tests themselves
+- name: copy with utf-8 content in a playbook
+ copy:
+ content: "{{ simple_accents }}\n"
+ dest: "{{ output_dir }}/from_playbook.txt"
+
+- name: Check that copying utf-8 content matches
+ stat:
+ path: "{{ output_dir }}/from_playbook.txt"
+ register: results
+
+- assert:
+ that:
+ - 'results.stat.checksum == from_playbook.stat.checksum'
+
+- name: copy with utf8 in a base64 encoded string
+ copy:
+ content: "{{ utf8_simple_accents|b64decode }}\n"
+ dest: "{{ output_dir }}/b64_utf8.txt"
+
+- name: Check that utf8 in a base64 string matches
+ stat:
+ path: "{{ output_dir }}/b64_utf8.txt"
+ register: results
+
+- assert:
+ that:
+ - 'results.stat.checksum == b64_utf8.stat.checksum'
+
+- name: copy with latin1 in a base64 encoded string
+ copy:
+ content: "{{ latin1_simple_accents|b64decode }}\n"
+ dest: "{{ output_dir }}/b64_latin1.txt"
+
+- name: Check that latin1 in a base64 string matches
+ stat:
+ path: "{{ output_dir }}/b64_latin1.txt"
+ register: results
+
+- assert:
+ that:
+ - 'results.stat.checksum == b64_latin1.stat.checksum'
+ # This one depends on being able to pass binary data through
+ # Might be a while before we find a solution for this
+ ignore_errors: True
+
+- name: Template with a unicode string from the playbook
+ template:
+ src: "from_playbook_template.j2"
+ dest: "{{ output_dir }}/from_playbook_template.txt"
+
+- name: Check that writing a template from a playbook var matches
+ stat:
+ path: "{{ output_dir }}/from_playbook_template.txt"
+ register: results
+
+- assert:
+ that:
+ - 'results.stat.checksum == from_playbook.stat.checksum'
+
+- name: Template with utf8 in a base64 encoded string
+ template:
+ src: "b64_utf8_template.j2"
+ dest: "{{ output_dir }}/b64_utf8_template.txt"
+
+- name: Check that writing a template from a base64 encoded utf8 string matches
+ stat:
+ path: "{{ output_dir }}/b64_utf8_template.txt"
+ register: results
+
+- assert:
+ that:
+ - 'results.stat.checksum == b64_utf8.stat.checksum'
+
+- name: Template with latin1 in a base64 encoded string
+ template:
+ src: "b64_latin1_template.j2"
+ dest: "{{ output_dir }}/b64_latin1_template.txt"
+
+- name: Check that writing a template from a base64 encoded latin1 string matches
+ stat:
+ path: "{{ output_dir }}/b64_latin1_template.txt"
+ register: results
+
+- assert:
+ that:
+ - 'results.stat.checksum == b64_latin1.stat.checksum'
+ # This one depends on being able to pass binary data through
+ # Might be a while before we find a solution for this
+ ignore_errors: True
+
+# These might give garbled output but none of them should traceback
+- debug: var=simple_accents
+- debug: msg="{{ utf8_simple_accents|b64decode}}"
+- debug: msg="{{ latin1_simple_accents|b64decode}}"
diff --git a/test/integration/targets/binary/templates/b64_latin1_template.j2 b/test/integration/targets/binary/templates/b64_latin1_template.j2
new file mode 100644
index 00000000..ee2fc1b1
--- /dev/null
+++ b/test/integration/targets/binary/templates/b64_latin1_template.j2
@@ -0,0 +1 @@
+{{ latin1_simple_accents|b64decode }}
diff --git a/test/integration/targets/binary/templates/b64_utf8_template.j2 b/test/integration/targets/binary/templates/b64_utf8_template.j2
new file mode 100644
index 00000000..9fd3ed48
--- /dev/null
+++ b/test/integration/targets/binary/templates/b64_utf8_template.j2
@@ -0,0 +1 @@
+{{ utf8_simple_accents|b64decode }}
diff --git a/test/integration/targets/binary/templates/from_playbook_template.j2 b/test/integration/targets/binary/templates/from_playbook_template.j2
new file mode 100644
index 00000000..3be6dd4f
--- /dev/null
+++ b/test/integration/targets/binary/templates/from_playbook_template.j2
@@ -0,0 +1 @@
+{{ simple_accents }}
diff --git a/test/integration/targets/binary/vars/main.yml b/test/integration/targets/binary/vars/main.yml
new file mode 100644
index 00000000..f6d40232
--- /dev/null
+++ b/test/integration/targets/binary/vars/main.yml
@@ -0,0 +1,3 @@
+simple_accents: 'Café Eñe'
+utf8_simple_accents: 'Q2Fmw6kgRcOxZQ=='
+latin1_simple_accents: 'Q2Fm6SBF8WU='
diff --git a/test/integration/targets/binary_modules/Makefile b/test/integration/targets/binary_modules/Makefile
new file mode 100644
index 00000000..c3092e47
--- /dev/null
+++ b/test/integration/targets/binary_modules/Makefile
@@ -0,0 +1,16 @@
+.PHONY: all clean
+
+all:
+ # Compiled versions of these binary modules are available at the url below.
+ # This avoids a dependency on go and keeps the binaries out of our git repository.
+ # https://ansible-ci-files.s3.amazonaws.com/test/integration/roles/test_binary_modules/
+ cd library; \
+ GOOS=linux GOARCH=amd64 go build -o helloworld_linux_x86_64 helloworld.go; \
+ GOOS=linux GOARCH=ppc64le go build -o helloworld_linux_ppc64le helloworld.go; \
+ GOOS=aix GOARCH=ppc64 go build -o helloworld_aix_chrp helloworld.go; \
+ GOOS=windows GOARCH=amd64 go build -o helloworld_win32nt_64-bit.exe helloworld.go; \
+ GOOS=darwin GOARCH=amd64 go build -o helloworld_darwin_x86_64 helloworld.go; \
+ GOOS=freebsd GOARCH=amd64 go build -o helloworld_freebsd_amd64 helloworld.go
+
+clean:
+ rm -f library/helloworld_*
diff --git a/test/integration/targets/binary_modules/aliases b/test/integration/targets/binary_modules/aliases
new file mode 100644
index 00000000..136c05e0
--- /dev/null
+++ b/test/integration/targets/binary_modules/aliases
@@ -0,0 +1 @@
+hidden
diff --git a/test/integration/targets/binary_modules/download_binary_modules.yml b/test/integration/targets/binary_modules/download_binary_modules.yml
new file mode 100644
index 00000000..e8f51b1a
--- /dev/null
+++ b/test/integration/targets/binary_modules/download_binary_modules.yml
@@ -0,0 +1,9 @@
+- hosts: testhost
+ tasks:
+ - name: download binary module
+ tags: test_binary_modules
+ get_url:
+ url: "https://ansible-ci-files.s3.amazonaws.com/test/integration/roles/test_binary_modules/{{ filename }}"
+ dest: "{{ playbook_dir }}/library/{{ filename }}"
+ mode: 0755
+ delegate_to: localhost
diff --git a/test/integration/targets/binary_modules/group_vars/all b/test/integration/targets/binary_modules/group_vars/all
new file mode 100644
index 00000000..1d3ff5e4
--- /dev/null
+++ b/test/integration/targets/binary_modules/group_vars/all
@@ -0,0 +1,3 @@
+system: "{{ ansible_system|lower }}"
+suffix: "{{ '.exe' if system == 'win32nt' else '' }}"
+filename: "helloworld_{{ system }}_{{ ansible_architecture }}{{ suffix }}"
diff --git a/test/integration/targets/binary_modules/library/.gitignore b/test/integration/targets/binary_modules/library/.gitignore
new file mode 100644
index 00000000..d034a06a
--- /dev/null
+++ b/test/integration/targets/binary_modules/library/.gitignore
@@ -0,0 +1 @@
+helloworld_*
diff --git a/test/integration/targets/binary_modules/library/helloworld.go b/test/integration/targets/binary_modules/library/helloworld.go
new file mode 100644
index 00000000..a4c16b20
--- /dev/null
+++ b/test/integration/targets/binary_modules/library/helloworld.go
@@ -0,0 +1,89 @@
+// This file is part of Ansible
+//
+// Ansible is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// Ansible is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os"
+)
+
+type ModuleArgs struct {
+ Name string
+}
+
+type Response struct {
+ Msg string `json:"msg"`
+ Changed bool `json:"changed"`
+ Failed bool `json:"failed"`
+}
+
+func ExitJson(responseBody Response) {
+ returnResponse(responseBody)
+}
+
+func FailJson(responseBody Response) {
+ responseBody.Failed = true
+ returnResponse(responseBody)
+}
+
+func returnResponse(responseBody Response) {
+ var response []byte
+ var err error
+ response, err = json.Marshal(responseBody)
+ if err != nil {
+ response, _ = json.Marshal(Response{Msg: "Invalid response object"})
+ }
+ fmt.Println(string(response))
+ if responseBody.Failed {
+ os.Exit(1)
+ } else {
+ os.Exit(0)
+ }
+}
+
+func main() {
+ var response Response
+
+ if len(os.Args) != 2 {
+ response.Msg = "No argument file provided"
+ FailJson(response)
+ }
+
+ argsFile := os.Args[1]
+
+ text, err := ioutil.ReadFile(argsFile)
+ if err != nil {
+ response.Msg = "Could not read configuration file: " + argsFile
+ FailJson(response)
+ }
+
+ var moduleArgs ModuleArgs
+ err = json.Unmarshal(text, &moduleArgs)
+ if err != nil {
+ response.Msg = "Configuration file not valid JSON: " + argsFile
+ FailJson(response)
+ }
+
+ var name string = "World"
+ if moduleArgs.Name != "" {
+ name = moduleArgs.Name
+ }
+
+ response.Msg = "Hello, " + name + "!"
+ ExitJson(response)
+}
diff --git a/test/integration/targets/binary_modules/roles/test_binary_modules/tasks/main.yml b/test/integration/targets/binary_modules/roles/test_binary_modules/tasks/main.yml
new file mode 100644
index 00000000..35a58dcb
--- /dev/null
+++ b/test/integration/targets/binary_modules/roles/test_binary_modules/tasks/main.yml
@@ -0,0 +1,53 @@
+- debug: var=ansible_system
+
+- name: ping
+ ping:
+ when: ansible_system != 'Win32NT'
+
+- name: win_ping
+ action: win_ping
+ when: ansible_system == 'Win32NT'
+
+- name: Hello, World!
+ action: "{{ filename }}"
+ register: hello_world
+
+- assert:
+ that:
+ - 'hello_world.msg == "Hello, World!"'
+
+- name: Hello, Ansible!
+ action: "{{ filename }}"
+ args:
+ name: Ansible
+ register: hello_ansible
+
+- assert:
+ that:
+ - 'hello_ansible.msg == "Hello, Ansible!"'
+
+- name: Async Hello, World!
+ action: "{{ filename }}"
+ async: 10
+ poll: 1
+ when: ansible_system != 'Win32NT'
+ register: async_hello_world
+
+- assert:
+ that:
+ - 'async_hello_world.msg == "Hello, World!"'
+ when: async_hello_world is not skipped
+
+- name: Async Hello, Ansible!
+ action: "{{ filename }}"
+ args:
+ name: Ansible
+ async: 10
+ poll: 1
+ when: ansible_system != 'Win32NT'
+ register: async_hello_ansible
+
+- assert:
+ that:
+ - 'async_hello_ansible.msg == "Hello, Ansible!"'
+ when: async_hello_ansible is not skipped
diff --git a/test/integration/targets/binary_modules/test.sh b/test/integration/targets/binary_modules/test.sh
new file mode 100755
index 00000000..7f046670
--- /dev/null
+++ b/test/integration/targets/binary_modules/test.sh
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+
+set -eux
+
+[ -f "${INVENTORY}" ]
+
+ANSIBLE_HOST_KEY_CHECKING=false ansible-playbook download_binary_modules.yml -i "${INVENTORY}" -v "$@"
+ANSIBLE_HOST_KEY_CHECKING=false ansible-playbook test_binary_modules.yml -i "${INVENTORY}" -v "$@"
diff --git a/test/integration/targets/binary_modules/test_binary_modules.yml b/test/integration/targets/binary_modules/test_binary_modules.yml
new file mode 100644
index 00000000..bdf2a061
--- /dev/null
+++ b/test/integration/targets/binary_modules/test_binary_modules.yml
@@ -0,0 +1,5 @@
+- hosts: testhost
+ roles:
+ - role: test_binary_modules
+ tags:
+ - test_binary_modules
diff --git a/test/integration/targets/binary_modules_posix/aliases b/test/integration/targets/binary_modules_posix/aliases
new file mode 100644
index 00000000..2c6e4a07
--- /dev/null
+++ b/test/integration/targets/binary_modules_posix/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group3
+needs/target/binary_modules
diff --git a/test/integration/targets/binary_modules_posix/runme.sh b/test/integration/targets/binary_modules_posix/runme.sh
new file mode 100755
index 00000000..670477d1
--- /dev/null
+++ b/test/integration/targets/binary_modules_posix/runme.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/env bash
+
+set -eux
+
+cd ../binary_modules
+INVENTORY=../../inventory ./test.sh "$@"
diff --git a/test/integration/targets/binary_modules_winrm/aliases b/test/integration/targets/binary_modules_winrm/aliases
new file mode 100644
index 00000000..ba3d2000
--- /dev/null
+++ b/test/integration/targets/binary_modules_winrm/aliases
@@ -0,0 +1,4 @@
+shippable/windows/group1
+shippable/windows/smoketest
+windows
+needs/target/binary_modules
diff --git a/test/integration/targets/binary_modules_winrm/runme.sh b/test/integration/targets/binary_modules_winrm/runme.sh
new file mode 100755
index 00000000..f182c2d6
--- /dev/null
+++ b/test/integration/targets/binary_modules_winrm/runme.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/env bash
+
+set -eux
+
+cd ../binary_modules
+INVENTORY=../../inventory.winrm ./test.sh "$@"
diff --git a/test/integration/targets/blockinfile/aliases b/test/integration/targets/blockinfile/aliases
new file mode 100644
index 00000000..a6dafcf8
--- /dev/null
+++ b/test/integration/targets/blockinfile/aliases
@@ -0,0 +1 @@
+shippable/posix/group1
diff --git a/test/integration/targets/blockinfile/files/sshd_config b/test/integration/targets/blockinfile/files/sshd_config
new file mode 100644
index 00000000..41fea190
--- /dev/null
+++ b/test/integration/targets/blockinfile/files/sshd_config
@@ -0,0 +1,135 @@
+# $OpenBSD: sshd_config,v 1.100 2016/08/15 12:32:04 naddy Exp $
+
+# This is the sshd server system-wide configuration file. See
+# sshd_config(5) for more information.
+
+# This sshd was compiled with PATH=/usr/local/bin:/usr/bin
+
+# The strategy used for options in the default sshd_config shipped with
+# OpenSSH is to specify options with their default value where
+# possible, but leave them commented. Uncommented options override the
+# default value.
+
+# If you want to change the port on a SELinux system, you have to tell
+# SELinux about this change.
+# semanage port -a -t ssh_port_t -p tcp #PORTNUMBER
+#
+#Port 22
+#AddressFamily any
+#ListenAddress 0.0.0.0
+#ListenAddress ::
+
+HostKey /etc/ssh/ssh_host_rsa_key
+#HostKey /etc/ssh/ssh_host_dsa_key
+HostKey /etc/ssh/ssh_host_ecdsa_key
+HostKey /etc/ssh/ssh_host_ed25519_key
+
+# Ciphers and keying
+#RekeyLimit default none
+
+# Logging
+#SyslogFacility AUTH
+SyslogFacility AUTHPRIV
+#LogLevel INFO
+
+# Authentication:
+
+#LoginGraceTime 2m
+PermitRootLogin yes
+#StrictModes yes
+#MaxAuthTries 6
+#MaxSessions 10
+
+#PubkeyAuthentication yes
+
+# The default is to check both .ssh/authorized_keys and .ssh/authorized_keys2
+# but this is overridden so installations will only check .ssh/authorized_keys
+AuthorizedKeysFile .ssh/authorized_keys
+
+#AuthorizedPrincipalsFile none
+
+#AuthorizedKeysCommand none
+#AuthorizedKeysCommandUser nobody
+
+# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
+#HostbasedAuthentication no
+# Change to yes if you don't trust ~/.ssh/known_hosts for
+# HostbasedAuthentication
+#IgnoreUserKnownHosts no
+# Don't read the user's ~/.rhosts and ~/.shosts files
+#IgnoreRhosts yes
+
+# To disable tunneled clear text passwords, change to no here!
+#PermitEmptyPasswords no
+
+# Change to no to disable s/key passwords
+#ChallengeResponseAuthentication yes
+ChallengeResponseAuthentication no
+
+# Kerberos options
+#KerberosAuthentication no
+#KerberosOrLocalPasswd yes
+#KerberosTicketCleanup yes
+#KerberosGetAFSToken no
+#KerberosUseKuserok yes
+
+# GSSAPI options
+GSSAPIAuthentication yes
+GSSAPICleanupCredentials no
+#GSSAPIStrictAcceptorCheck yes
+#GSSAPIKeyExchange no
+#GSSAPIEnablek5users no
+
+# Set this to 'yes' to enable PAM authentication, account processing,
+# and session processing. If this is enabled, PAM authentication will
+# be allowed through the ChallengeResponseAuthentication and
+# PAM authentication via ChallengeResponseAuthentication may bypass
+# the setting of "PermitRootLogin without-password".
+# If you just want the PAM account and session checks to run without
+# and ChallengeResponseAuthentication to 'no'.
+# WARNING: 'UsePAM no' is not supported in Fedora and may cause several
+# problems.
+UsePAM yes
+
+#AllowAgentForwarding yes
+#AllowTcpForwarding yes
+#GatewayPorts no
+X11Forwarding yes
+#X11DisplayOffset 10
+#X11UseLocalhost yes
+#PermitTTY yes
+#PrintMotd yes
+#PrintLastLog yes
+#TCPKeepAlive yes
+#UseLogin no
+#UsePrivilegeSeparation sandbox
+#PermitUserEnvironment no
+#Compression delayed
+#ClientAliveInterval 0
+#ClientAliveCountMax 3
+#ShowPatchLevel no
+#UseDNS no
+#PidFile /var/run/sshd.pid
+#MaxStartups 10:30:100
+#PermitTunnel no
+#ChrootDirectory none
+#VersionAddendum none
+
+# no default banner path
+#Banner none
+
+# Accept locale-related environment variables
+AcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES
+AcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT
+AcceptEnv LC_IDENTIFICATION LC_ALL LANGUAGE
+AcceptEnv XMODIFIERS
+
+# override default of no subsystems
+Subsystem sftp /usr/libexec/openssh/sftp-server
+
+# Example of overriding settings on a per-user basis
+#Match User anoncvs
+# X11Forwarding no
+# AllowTcpForwarding no
+# PermitTTY no
+# ForceCommand cvs server
diff --git a/test/integration/targets/blockinfile/meta/main.yml b/test/integration/targets/blockinfile/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/test/integration/targets/blockinfile/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/test/integration/targets/blockinfile/tasks/add_block_to_existing_file.yml b/test/integration/targets/blockinfile/tasks/add_block_to_existing_file.yml
new file mode 100644
index 00000000..dbb93ecc
--- /dev/null
+++ b/test/integration/targets/blockinfile/tasks/add_block_to_existing_file.yml
@@ -0,0 +1,47 @@
+- name: copy the sshd_config to the test dir
+ copy:
+ src: sshd_config
+ dest: "{{ output_dir_test }}"
+
+- name: insert/update "Match User" configuration block in sshd_config
+ blockinfile:
+ path: "{{ output_dir_test }}/sshd_config"
+ block: |
+ Match User ansible-agent
+ PasswordAuthentication no
+ backup: yes
+ register: blockinfile_test0
+
+- name: check content
+ shell: 'grep -c -e "Match User ansible-agent" -e "PasswordAuthentication no" {{ output_dir_test }}/sshd_config'
+ register: blockinfile_test0_grep
+
+- debug:
+ var: blockinfile_test0
+ verbosity: 1
+
+- debug:
+ var: blockinfile_test0_grep
+ verbosity: 1
+
+- name: validate first example results
+ assert:
+ that:
+ - 'blockinfile_test0.changed is defined'
+ - 'blockinfile_test0.msg is defined'
+ - 'blockinfile_test0.changed'
+ - 'blockinfile_test0.msg == "Block inserted"'
+ - 'blockinfile_test0_grep.stdout == "2"'
+
+- name: check idemptotence
+ blockinfile:
+ path: "{{ output_dir_test }}/sshd_config"
+ block: |
+ Match User ansible-agent
+ PasswordAuthentication no
+ register: blockinfile_test1
+
+- name: validate idempotence results
+ assert:
+ that:
+ - 'not blockinfile_test1.changed'
diff --git a/test/integration/targets/blockinfile/tasks/block_without_trailing_newline.yml b/test/integration/targets/blockinfile/tasks/block_without_trailing_newline.yml
new file mode 100644
index 00000000..57dac60e
--- /dev/null
+++ b/test/integration/targets/blockinfile/tasks/block_without_trailing_newline.yml
@@ -0,0 +1,30 @@
+- name: Add block without trailing line separator
+ blockinfile:
+ path: "{{ output_dir_test }}/chomped_block_test.txt"
+ create: yes
+ content: |-
+ one
+ two
+ three
+ register: chomptest1
+
+- name: Add block without trailing line separator again
+ blockinfile:
+ path: "{{ output_dir_test }}/chomped_block_test.txt"
+ content: |-
+ one
+ two
+ three
+ register: chomptest2
+
+- name: Check output file
+ stat:
+ path: "{{ output_dir_test }}/chomped_block_test.txt"
+ register: chomptest_file
+
+- name: Ensure chomptest results are correct
+ assert:
+ that:
+ - chomptest1 is changed
+ - chomptest2 is not changed
+ - chomptest_file.stat.checksum == '50d49f528a5f7147c7029ed6220c326b1ee2c4ae'
diff --git a/test/integration/targets/blockinfile/tasks/create_file.yml b/test/integration/targets/blockinfile/tasks/create_file.yml
new file mode 100644
index 00000000..94e47203
--- /dev/null
+++ b/test/integration/targets/blockinfile/tasks/create_file.yml
@@ -0,0 +1,32 @@
+- name: Create a file with blockinfile
+ blockinfile:
+ path: "{{ output_dir_test }}/empty.txt"
+ block: |
+ Hey
+ there
+ state: present
+ create: yes
+ register: empty_test_1
+
+- name: Run a task that results in an empty file
+ blockinfile:
+ path: "{{ output_dir_test }}/empty.txt"
+ block: |
+ Hey
+ there
+ state: absent
+ create: yes
+ register: empty_test_2
+
+- stat:
+ path: "{{ output_dir_test }}/empty.txt"
+ register: empty_test_stat
+
+- name: Ensure empty file was created
+ assert:
+ that:
+ - empty_test_1 is changed
+ - "'File created' in empty_test_1.msg"
+ - empty_test_2 is changed
+ - "'Block removed' in empty_test_2.msg"
+ - empty_test_stat.stat.size == 0
diff --git a/test/integration/targets/blockinfile/tasks/diff.yml b/test/integration/targets/blockinfile/tasks/diff.yml
new file mode 100644
index 00000000..4a2f9454
--- /dev/null
+++ b/test/integration/targets/blockinfile/tasks/diff.yml
@@ -0,0 +1,18 @@
+- name: Create a test file
+ copy:
+ content: diff test
+ dest: "{{ output_dir_test }}/diff.txt"
+
+- name: Add block to file with diff
+ blockinfile:
+ path: "{{ output_dir_test }}/diff.txt"
+ block: |
+ line 1
+ line 2
+ register: difftest
+ diff: yes
+
+- name: Ensure diff was shown
+ assert:
+ that:
+ - difftest.diff | length > 0
diff --git a/test/integration/targets/blockinfile/tasks/file_without_trailing_newline.yml b/test/integration/targets/blockinfile/tasks/file_without_trailing_newline.yml
new file mode 100644
index 00000000..fe4e2abc
--- /dev/null
+++ b/test/integration/targets/blockinfile/tasks/file_without_trailing_newline.yml
@@ -0,0 +1,36 @@
+- name: Create file without trailing newline
+ copy:
+ content: '# File with no newline'
+ dest: "{{ output_dir_test }}/no_newline_at_end.txt"
+ register: no_newline
+
+
+- name: Add block to file that does not have a newline at the end
+ blockinfile:
+ path: "{{ output_dir_test }}/no_newline_at_end.txt"
+ content: |
+ one
+ two
+ three
+ register: no_newline_test1
+
+- name: Add block to file that does not have a newline at the end again
+ blockinfile:
+ path: "{{ output_dir_test }}/no_newline_at_end.txt"
+ content: |
+ one
+ two
+ three
+ register: no_newline_test2
+
+- name: Stat the file
+ stat:
+ path: "{{ output_dir_test }}/no_newline_at_end.txt"
+ register: no_newline_file
+
+- name: Ensure block was correctly written to file with no newline at end
+ assert:
+ that:
+ - no_newline_test1 is changed
+ - no_newline_test2 is not changed
+ - no_newline_file.stat.checksum == 'dab16f864025e59125e74d1498ffb2bb048224e6'
diff --git a/test/integration/targets/blockinfile/tasks/insertafter.yml b/test/integration/targets/blockinfile/tasks/insertafter.yml
new file mode 100644
index 00000000..daf7bcf1
--- /dev/null
+++ b/test/integration/targets/blockinfile/tasks/insertafter.yml
@@ -0,0 +1,37 @@
+- name: Create insertafter test file
+ copy:
+ dest: "{{ output_dir }}/after.txt"
+ content: |
+ line1
+ line2
+ line3
+
+- name: Add block using insertafter
+ blockinfile:
+ path: "{{ output_dir }}/after.txt"
+ insertafter: line2
+ block: |
+ block1
+ block2
+ register: after1
+
+- name: Add block using insertafter again
+ blockinfile:
+ path: "{{ output_dir }}/after.txt"
+ insertafter: line2
+ block: |
+ block1
+ block2
+ register: after2
+
+- name: Stat the after.txt file
+ stat:
+ path: "{{ output_dir }}/after.txt"
+ register: after_file
+
+- name: Ensure insertafter worked correctly
+ assert:
+ that:
+ - after1 is changed
+ - after2 is not changed
+ - after_file.stat.checksum == 'a8adeb971358230a28ce554f3b8fdd1ef65fdf1c'
diff --git a/test/integration/targets/blockinfile/tasks/insertbefore.yml b/test/integration/targets/blockinfile/tasks/insertbefore.yml
new file mode 100644
index 00000000..6089af15
--- /dev/null
+++ b/test/integration/targets/blockinfile/tasks/insertbefore.yml
@@ -0,0 +1,39 @@
+- name: Create insertbefore test file
+ copy:
+ dest: "{{ output_dir }}/before.txt"
+ content: |
+ line1
+ line2
+ line3
+
+- name: Add block using insertbefore
+ blockinfile:
+ path: "{{ output_dir }}/before.txt"
+ insertbefore: line2
+ block: |
+ block1
+ block2
+ register: after1
+
+- name: Add block using insertbefore again
+ blockinfile:
+ path: "{{ output_dir }}/before.txt"
+ insertbefore: line2
+ block: |
+ block1
+ block2
+ register: after2
+
+- name: Stat the before.txt file
+ stat:
+ path: "{{ output_dir }}/before.txt"
+ register: after_file
+
+- command: cat {{ output_dir }}/before.txt
+
+- name: Ensure insertbefore worked correctly
+ assert:
+ that:
+ - after1 is changed
+ - after2 is not changed
+ - after_file.stat.checksum == '16681d1d7f29d173243bb951d6afb9c0824d7bf4'
diff --git a/test/integration/targets/blockinfile/tasks/main.yml b/test/integration/targets/blockinfile/tasks/main.yml
new file mode 100644
index 00000000..4bc0b8d1
--- /dev/null
+++ b/test/integration/targets/blockinfile/tasks/main.yml
@@ -0,0 +1,40 @@
+# Test code for the blockinfile module.
+# (c) 2017, James Tanner <tanner.jc@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- set_fact:
+ output_dir_test: "{{ output_dir }}/test_blockinfile"
+
+- name: make sure our testing sub-directory does not exist
+ file:
+ path: "{{ output_dir_test }}"
+ state: absent
+
+- name: create our testing sub-directory
+ file:
+ path: "{{ output_dir_test }}"
+ state: directory
+
+- import_tasks: add_block_to_existing_file.yml
+- import_tasks: create_file.yml
+- import_tasks: preserve_line_endings.yml
+- import_tasks: block_without_trailing_newline.yml
+- import_tasks: file_without_trailing_newline.yml
+- import_tasks: diff.yml
+- import_tasks: validate.yml
+- import_tasks: insertafter.yml
+- import_tasks: insertbefore.yml
diff --git a/test/integration/targets/blockinfile/tasks/preserve_line_endings.yml b/test/integration/targets/blockinfile/tasks/preserve_line_endings.yml
new file mode 100644
index 00000000..bb2dee29
--- /dev/null
+++ b/test/integration/targets/blockinfile/tasks/preserve_line_endings.yml
@@ -0,0 +1,24 @@
+- name: create line_endings_test.txt in the test dir
+ copy:
+ dest: "{{ output_dir_test }}/line_endings_test.txt"
+ # generating the content like this instead of copying a fixture file
+ # prevents sanity checks from warning about mixed line endings
+ content: "unix\nunix\nunix\n\ndos\r\ndos\r\ndos\r\n\nunix\nunix\n# BEGIN ANSIBLE MANAGED BLOCK\ndos\r\n# END ANSIBLE MANAGED BLOCK\nunix\nunix\nunix\nunix\n"
+
+- name: insert/update "dos" configuration block in line_endings_test.txt
+ blockinfile:
+ path: "{{ output_dir_test }}/line_endings_test.txt"
+ block: "dos\r\ndos\r\ndos\r\n"
+ register: blockinfile_test2
+
+- name: check content
+ # using the more precise `grep -Pc "^dos\\r$" ...` fails on BSD/macOS
+ shell: 'grep -c "^dos.$" {{ output_dir_test }}/line_endings_test.txt'
+ register: blockinfile_test2_grep
+
+- name: validate line_endings_test.txt results
+ assert:
+ that:
+ - 'blockinfile_test2 is changed'
+ - 'blockinfile_test2.msg == "Block inserted"'
+ - 'blockinfile_test2_grep.stdout == "6"'
diff --git a/test/integration/targets/blockinfile/tasks/validate.yml b/test/integration/targets/blockinfile/tasks/validate.yml
new file mode 100644
index 00000000..105bca53
--- /dev/null
+++ b/test/integration/targets/blockinfile/tasks/validate.yml
@@ -0,0 +1,28 @@
+- name: EXPECTED FAILURE test improper validate
+ blockinfile:
+ path: "{{ output_dir }}/validate.txt"
+ block: |
+ line1
+ line2
+ create: yes
+ validate: grep
+ ignore_errors: yes
+
+- name: EXPECTED FAILURE test failure to validate
+ blockinfile:
+ path: "{{ output_dir }}/validate.txt"
+ block: |
+ line1
+ line2
+ create: yes
+ validate: grep line47 %s
+ ignore_errors: yes
+
+- name: Test proper validate
+ blockinfile:
+ path: "{{ output_dir }}/validate.txt"
+ block: |
+ line1
+ line2
+ create: yes
+ validate: grep line1 %s
diff --git a/test/integration/targets/blocks/aliases b/test/integration/targets/blocks/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/test/integration/targets/blocks/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/test/integration/targets/blocks/always_failure_no_rescue_rc.yml b/test/integration/targets/blocks/always_failure_no_rescue_rc.yml
new file mode 100644
index 00000000..924643ce
--- /dev/null
+++ b/test/integration/targets/blocks/always_failure_no_rescue_rc.yml
@@ -0,0 +1,13 @@
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - block:
+ - name: EXPECTED FAILURE
+ fail:
+ msg: Failure in block
+ always:
+ - name: EXPECTED FAILURE
+ fail:
+ msg: Failure in always
+ - debug:
+ msg: DID NOT RUN
diff --git a/test/integration/targets/blocks/always_failure_with_rescue_rc.yml b/test/integration/targets/blocks/always_failure_with_rescue_rc.yml
new file mode 100644
index 00000000..f3029cbc
--- /dev/null
+++ b/test/integration/targets/blocks/always_failure_with_rescue_rc.yml
@@ -0,0 +1,16 @@
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - block:
+ - name: EXPECTED FAILURE
+ fail:
+ msg: Failure in block
+ rescue:
+ - debug:
+ msg: Rescue
+ always:
+ - name: EXPECTED FAILURE
+ fail:
+ msg: Failure in always
+ - debug:
+ msg: DID NOT RUN
diff --git a/test/integration/targets/blocks/always_no_rescue_rc.yml b/test/integration/targets/blocks/always_no_rescue_rc.yml
new file mode 100644
index 00000000..a4e86416
--- /dev/null
+++ b/test/integration/targets/blocks/always_no_rescue_rc.yml
@@ -0,0 +1,12 @@
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - block:
+ - name: EXPECTED FAILURE
+ fail:
+ msg: Failure in block
+ always:
+ - debug:
+ msg: Always
+ - debug:
+ msg: DID NOT RUN
diff --git a/test/integration/targets/blocks/block_fail.yml b/test/integration/targets/blocks/block_fail.yml
new file mode 100644
index 00000000..6b84d056
--- /dev/null
+++ b/test/integration/targets/blocks/block_fail.yml
@@ -0,0 +1,5 @@
+---
+- name: Include tasks that have a failure in a block
+ hosts: localhost
+ tasks:
+ - include_tasks: block_fail_tasks.yml
diff --git a/test/integration/targets/blocks/block_fail_tasks.yml b/test/integration/targets/blocks/block_fail_tasks.yml
new file mode 100644
index 00000000..6e70dc23
--- /dev/null
+++ b/test/integration/targets/blocks/block_fail_tasks.yml
@@ -0,0 +1,9 @@
+- block:
+ - name: EXPECTED FAILURE
+ fail:
+ msg: failure
+
+ always:
+ - name: run always task
+ debug:
+ msg: TEST COMPLETE
diff --git a/test/integration/targets/blocks/block_in_rescue.yml b/test/integration/targets/blocks/block_in_rescue.yml
new file mode 100644
index 00000000..15360304
--- /dev/null
+++ b/test/integration/targets/blocks/block_in_rescue.yml
@@ -0,0 +1,33 @@
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - block:
+ - name: "EXPECTED FAILURE"
+ fail:
+ msg: "fail to test single level block in rescue"
+ rescue:
+ - block:
+ - debug:
+ msg: Rescued!
+
+ - block:
+ - name: "EXPECTED FAILURE"
+ fail:
+ msg: "fail to test multi-level block in rescue"
+ rescue:
+ - block:
+ - block:
+ - debug:
+ msg: Rescued!
+
+ - name: "Outer block"
+ block:
+ - name: "Inner block"
+ block:
+ - name: "EXPECTED FAILURE"
+ fail:
+ msg: "fail to test multi-level block"
+ rescue:
+ - name: "Rescue block"
+ block:
+ - debug: msg="Inner block rescue"
diff --git a/test/integration/targets/blocks/block_rescue_vars.yml b/test/integration/targets/blocks/block_rescue_vars.yml
new file mode 100644
index 00000000..404f7a37
--- /dev/null
+++ b/test/integration/targets/blocks/block_rescue_vars.yml
@@ -0,0 +1,16 @@
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - block:
+ - name: EXPECTED FAILURE
+ fail:
+ rescue:
+ - name: Assert that ansible_failed_task is defined
+ assert:
+ that:
+ - ansible_failed_task is defined
+
+ - name: Assert that ansible_failed_result is defined
+ assert:
+ that:
+ - ansible_failed_result is defined
diff --git a/test/integration/targets/blocks/fail.yml b/test/integration/targets/blocks/fail.yml
new file mode 100644
index 00000000..ae946551
--- /dev/null
+++ b/test/integration/targets/blocks/fail.yml
@@ -0,0 +1,2 @@
+- name: EXPECTED FAILURE
+ fail: msg="{{msg}}"
diff --git a/test/integration/targets/blocks/issue29047.yml b/test/integration/targets/blocks/issue29047.yml
new file mode 100644
index 00000000..9743773c
--- /dev/null
+++ b/test/integration/targets/blocks/issue29047.yml
@@ -0,0 +1,4 @@
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - include_tasks: issue29047_tasks.yml
diff --git a/test/integration/targets/blocks/issue29047_tasks.yml b/test/integration/targets/blocks/issue29047_tasks.yml
new file mode 100644
index 00000000..3470d867
--- /dev/null
+++ b/test/integration/targets/blocks/issue29047_tasks.yml
@@ -0,0 +1,13 @@
+---
+- name: "EXPECTED FAILURE"
+ block:
+ - fail:
+ msg: "EXPECTED FAILURE"
+ rescue:
+ - name: Assert that ansible_failed_task is defined
+ assert:
+ that: ansible_failed_task is defined
+
+ - name: Assert that ansible_failed_result is defined
+ assert:
+ that: ansible_failed_result is defined
diff --git a/test/integration/targets/blocks/issue71306.yml b/test/integration/targets/blocks/issue71306.yml
new file mode 100644
index 00000000..9762f6ee
--- /dev/null
+++ b/test/integration/targets/blocks/issue71306.yml
@@ -0,0 +1,16 @@
+- hosts: all
+ gather_facts: no
+ tasks:
+ - block:
+ - block:
+ - block:
+ - name: EXPECTED FAILURE
+ fail:
+ when: ansible_host == "host1"
+
+ - debug:
+ msg: "I am successful!"
+ run_once: true
+ rescue:
+ - debug:
+ msg: "Attemp 1 failed!"
diff --git a/test/integration/targets/blocks/main.yml b/test/integration/targets/blocks/main.yml
new file mode 100644
index 00000000..012d5ab2
--- /dev/null
+++ b/test/integration/targets/blocks/main.yml
@@ -0,0 +1,128 @@
+- name: simple block test
+ hosts: testhost
+ gather_facts: yes
+ strategy: "{{test_strategy|default('linear')}}"
+ vars:
+ block_tasks_run: false
+ block_rescue_run: false
+ block_always_run: false
+ nested_block_always_run: false
+ tasks_run_after_failure: false
+ rescue_run_after_failure: false
+ always_run_after_failure: false
+ nested_block_fail_always: false
+ tasks:
+ - block:
+ - name: set block tasks run flag
+ set_fact:
+ block_tasks_run: true
+ - name: EXPECTED FAILURE fail in tasks
+ fail:
+ - name: tasks flag should not be set after failure
+ set_fact:
+ tasks_run_after_failure: true
+ rescue:
+ - name: set block rescue run flag
+ set_fact:
+ block_rescue_run: true
+ - name: EXPECTED FAILURE fail in rescue
+ fail:
+ - name: tasks flag should not be set after failure in rescue
+ set_fact:
+ rescue_run_after_failure: true
+ always:
+ - name: set block always run flag
+ set_fact:
+ block_always_run: true
+ #- block:
+ # - meta: noop
+ # always:
+ # - name: set nested block always run flag
+ # set_fact:
+ # nested_block_always_run: true
+ # - name: fail in always
+ # fail:
+ # - name: tasks flag should not be set after failure in always
+ # set_fact:
+ # always_run_after_failure: true
+ - meta: clear_host_errors
+
+ # https://github.com/ansible/ansible/issues/35148
+ - block:
+ - block:
+ - name: EXPECTED FAILURE test triggering always by failing in nested block with run_once set
+ fail:
+ run_once: true
+ always:
+ - name: set block fail always run flag
+ set_fact:
+ nested_block_fail_always: true
+ - meta: clear_host_errors
+
+ - block:
+ - block:
+ - name: EXPECTED FAILURE test triggering always by failing in nested block with any_errors_fatal set
+ fail:
+ any_errors_fatal: true
+ always:
+ - name: set block fail always run flag
+ set_fact:
+ nested_block_fail_always: true
+ - meta: clear_host_errors
+
+ post_tasks:
+ - assert:
+ that:
+ - block_tasks_run
+ - block_rescue_run
+ - block_always_run
+ #- nested_block_always_run
+ - not tasks_run_after_failure
+ - not rescue_run_after_failure
+ - not always_run_after_failure
+ - nested_block_fail_always
+ - debug: msg="TEST COMPLETE"
+
+- name: block with includes
+ hosts: testhost
+ gather_facts: yes
+ strategy: "{{test_strategy|default('linear')}}"
+ vars:
+ rescue_run_after_include_fail: false
+ always_run_after_include_fail_in_rescue: false
+ tasks_run_after_failure: false
+ rescue_run_after_failure: false
+ always_run_after_failure: false
+ tasks:
+ - block:
+ - name: include fail.yml in tasks
+ include: fail.yml
+ args:
+ msg: "failed from tasks"
+ - name: tasks flag should not be set after failure
+ set_fact:
+ tasks_run_after_failure: true
+ rescue:
+ - set_fact:
+ rescue_run_after_include_fail: true
+ - name: include fail.yml in rescue
+ include: fail.yml
+ args:
+ msg: "failed from rescue"
+ - name: flag should not be set after failure in rescue
+ set_fact:
+ rescue_run_after_failure: true
+ always:
+ - set_fact:
+ always_run_after_include_fail_in_rescue: true
+ - meta: clear_host_errors
+
+ post_tasks:
+ - assert:
+ that:
+ - rescue_run_after_include_fail
+ - always_run_after_include_fail_in_rescue
+ - not tasks_run_after_failure
+ - not rescue_run_after_failure
+ - not always_run_after_failure
+ - debug: msg="TEST COMPLETE"
diff --git a/test/integration/targets/blocks/nested_fail.yml b/test/integration/targets/blocks/nested_fail.yml
new file mode 100644
index 00000000..31ae870e
--- /dev/null
+++ b/test/integration/targets/blocks/nested_fail.yml
@@ -0,0 +1,3 @@
+- include: fail.yml
+ args:
+ msg: "nested {{msg}}"
diff --git a/test/integration/targets/blocks/nested_nested_fail.yml b/test/integration/targets/blocks/nested_nested_fail.yml
new file mode 100644
index 00000000..e9a050fb
--- /dev/null
+++ b/test/integration/targets/blocks/nested_nested_fail.yml
@@ -0,0 +1,3 @@
+- include: nested_fail.yml
+ args:
+ msg: "nested {{msg}}"
diff --git a/test/integration/targets/blocks/runme.sh b/test/integration/targets/blocks/runme.sh
new file mode 100755
index 00000000..4f3db1db
--- /dev/null
+++ b/test/integration/targets/blocks/runme.sh
@@ -0,0 +1,95 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# This test does not use "$@" to avoid further increasing the verbosity beyond what is required for the test.
+# Increasing verbosity from -vv to -vvv can increase the line count from ~400 to ~9K on our centos6 test container.
+
+# remove old output log
+rm -f block_test.out
+# run the test and check to make sure the right number of completions was logged
+ansible-playbook -vv main.yml -i ../../inventory | tee block_test.out
+env python -c \
+ 'import sys, re; sys.stdout.write(re.sub("\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]", "", sys.stdin.read()))' \
+ <block_test.out >block_test_wo_colors.out
+[ "$(grep -c 'TEST COMPLETE' block_test.out)" = "$(grep -E '^[0-9]+ plays in' block_test_wo_colors.out | cut -f1 -d' ')" ]
+# cleanup the output log again, to make sure the test is clean
+rm -f block_test.out block_test_wo_colors.out
+# run test with free strategy and again count the completions
+ansible-playbook -vv main.yml -i ../../inventory -e test_strategy=free | tee block_test.out
+env python -c \
+ 'import sys, re; sys.stdout.write(re.sub("\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]", "", sys.stdin.read()))' \
+ <block_test.out >block_test_wo_colors.out
+[ "$(grep -c 'TEST COMPLETE' block_test.out)" = "$(grep -E '^[0-9]+ plays in' block_test_wo_colors.out | cut -f1 -d' ')" ]
+# cleanup the output log again, to make sure the test is clean
+rm -f block_test.out block_test_wo_colors.out
+# run test with host_pinned strategy and again count the completions
+ansible-playbook -vv main.yml -i ../../inventory -e test_strategy=host_pinned | tee block_test.out
+env python -c \
+ 'import sys, re; sys.stdout.write(re.sub("\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]", "", sys.stdin.read()))' \
+ <block_test.out >block_test_wo_colors.out
+[ "$(grep -c 'TEST COMPLETE' block_test.out)" = "$(grep -E '^[0-9]+ plays in' block_test_wo_colors.out | cut -f1 -d' ')" ]
+
+# run test that includes tasks that fail inside a block with always
+rm -f block_test.out block_test_wo_colors.out
+ansible-playbook -vv block_fail.yml -i ../../inventory | tee block_test.out
+env python -c \
+ 'import sys, re; sys.stdout.write(re.sub("\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]", "", sys.stdin.read()))' \
+ <block_test.out >block_test_wo_colors.out
+[ "$(grep -c 'TEST COMPLETE' block_test.out)" = "$(grep -E '^[0-9]+ plays in' block_test_wo_colors.out | cut -f1 -d' ')" ]
+
+ansible-playbook -vv block_rescue_vars.yml
+
+# https://github.com/ansible/ansible/issues/70000
+set +e
+exit_code=0
+ansible-playbook -vv always_failure_with_rescue_rc.yml > rc_test.out || exit_code=$?
+set -e
+cat rc_test.out
+[ $exit_code -eq 2 ]
+[ "$(grep -c 'Failure in block' rc_test.out )" -eq 1 ]
+[ "$(grep -c 'Rescue' rc_test.out )" -eq 1 ]
+[ "$(grep -c 'Failure in always' rc_test.out )" -eq 1 ]
+[ "$(grep -c 'DID NOT RUN' rc_test.out )" -eq 0 ]
+rm -f rc_test_out
+
+set +e
+exit_code=0
+ansible-playbook -vv always_no_rescue_rc.yml > rc_test.out || exit_code=$?
+set -e
+cat rc_test.out
+[ $exit_code -eq 2 ]
+[ "$(grep -c 'Failure in block' rc_test.out )" -eq 1 ]
+[ "$(grep -c 'Always' rc_test.out )" -eq 1 ]
+[ "$(grep -c 'DID NOT RUN' rc_test.out )" -eq 0 ]
+rm -f rc_test.out
+
+set +e
+exit_code=0
+ansible-playbook -vv always_failure_no_rescue_rc.yml > rc_test.out || exit_code=$?
+set -e
+cat rc_test.out
+[ $exit_code -eq 2 ]
+[ "$(grep -c 'Failure in block' rc_test.out )" -eq 1 ]
+[ "$(grep -c 'Failure in always' rc_test.out )" -eq 1 ]
+[ "$(grep -c 'DID NOT RUN' rc_test.out )" -eq 0 ]
+rm -f rc_test.out
+
+# https://github.com/ansible/ansible/issues/71306
+set +e
+exit_code=0
+ansible-playbook -i host1,host2 -vv issue71306.yml > rc_test.out || exit_code=$?
+set -e
+cat rc_test.out
+[ $exit_code -eq 0 ]
+rm -f rc_test_out
+
+# https://github.com/ansible/ansible/issues/29047
+ansible-playbook -vv issue29047.yml -i ../../inventory
+
+# https://github.com/ansible/ansible/issues/61253
+ansible-playbook -vv block_in_rescue.yml -i ../../inventory > rc_test.out
+cat rc_test.out
+[ "$(grep -c 'rescued=3' rc_test.out)" -eq 1 ]
+[ "$(grep -c 'failed=0' rc_test.out)" -eq 1 ]
+rm -f rc_test.out
diff --git a/test/integration/targets/builtin_vars_prompt/aliases b/test/integration/targets/builtin_vars_prompt/aliases
new file mode 100644
index 00000000..4317d112
--- /dev/null
+++ b/test/integration/targets/builtin_vars_prompt/aliases
@@ -0,0 +1,3 @@
+setup/always/setup_passlib
+setup/always/setup_pexpect
+shippable/posix/group4
diff --git a/test/integration/targets/builtin_vars_prompt/runme.sh b/test/integration/targets/builtin_vars_prompt/runme.sh
new file mode 100755
index 00000000..af555794
--- /dev/null
+++ b/test/integration/targets/builtin_vars_prompt/runme.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# Interactively test vars_prompt
+python test-vars_prompt.py -i ../../inventory "$@"
diff --git a/test/integration/targets/builtin_vars_prompt/test-vars_prompt.py b/test/integration/targets/builtin_vars_prompt/test-vars_prompt.py
new file mode 100644
index 00000000..6c805fdd
--- /dev/null
+++ b/test/integration/targets/builtin_vars_prompt/test-vars_prompt.py
@@ -0,0 +1,127 @@
+#!/usr/bin/env python
+
+import os
+import pexpect
+import sys
+
+from ansible.module_utils.six import PY2
+
+if PY2:
+ log_buffer = sys.stdout
+else:
+ log_buffer = sys.stdout.buffer
+
+env_vars = {
+ 'ANSIBLE_ROLES_PATH': './roles',
+ 'ANSIBLE_NOCOLOR': 'True',
+ 'ANSIBLE_RETRY_FILES_ENABLED': 'False',
+}
+
+
+def run_test(playbook, test_spec, args=None, timeout=10, env=None):
+
+ if not env:
+ env = os.environ.copy()
+ env.update(env_vars)
+
+ if not args:
+ args = sys.argv[1:]
+
+ vars_prompt_test = pexpect.spawn(
+ 'ansible-playbook',
+ args=[playbook] + args,
+ timeout=timeout,
+ env=env,
+ )
+
+ vars_prompt_test.logfile = log_buffer
+ for item in test_spec[0]:
+ vars_prompt_test.expect(item[0])
+ if item[1]:
+ vars_prompt_test.send(item[1])
+ vars_prompt_test.expect(test_spec[1])
+ vars_prompt_test.expect(pexpect.EOF)
+ vars_prompt_test.close()
+
+
+# These are the tests to run. Each test is a playbook and a test_spec.
+#
+# The test_spec is a list with two elements.
+#
+# The first element is a list of two element tuples. The first is the regexp to look
+# for in the output, the second is the line to send.
+#
+# The last element is the last string of text to look for in the output.
+#
+tests = [
+ # Basic vars_prompt
+ {'playbook': 'vars_prompt-1.yml',
+ 'test_spec': [
+ [('input:', 'some input\r')],
+ '"input": "some input"']},
+
+ # Custom prompt
+ {'playbook': 'vars_prompt-2.yml',
+ 'test_spec': [
+ [('Enter some input:', 'some more input\r')],
+ '"input": "some more input"']},
+
+ # Test confirm, both correct and incorrect
+ {'playbook': 'vars_prompt-3.yml',
+ 'test_spec': [
+ [('input:', 'confirm me\r'),
+ ('confirm input:', 'confirm me\r')],
+ '"input": "confirm me"']},
+
+ {'playbook': 'vars_prompt-3.yml',
+ 'test_spec': [
+ [('input:', 'confirm me\r'),
+ ('confirm input:', 'incorrect\r'),
+ (r'\*\*\*\*\* VALUES ENTERED DO NOT MATCH \*\*\*\*', ''),
+ ('input:', 'confirm me\r'),
+ ('confirm input:', 'confirm me\r')],
+ '"input": "confirm me"']},
+
+ # Test private
+ {'playbook': 'vars_prompt-4.yml',
+ 'test_spec': [
+ [('not_secret', 'this is displayed\r'),
+ ('this is displayed', '')],
+ '"not_secret": "this is displayed"']},
+
+ # Test hashing
+ {'playbook': 'vars_prompt-5.yml',
+ 'test_spec': [
+ [('password', 'Scenic-Improving-Payphone\r'),
+ ('confirm password', 'Scenic-Improving-Payphone\r')],
+ r'"password": "\$6\$']},
+
+ # Test variables in prompt field
+ # https://github.com/ansible/ansible/issues/32723
+ {'playbook': 'vars_prompt-6.yml',
+ 'test_spec': [
+ [('prompt from variable:', 'input\r')],
+ '']},
+
+ # Test play vars coming from vars_prompt
+ # https://github.com/ansible/ansible/issues/37984
+ {'playbook': 'vars_prompt-7.yml',
+ 'test_spec': [
+ [('prompting for host:', 'testhost\r')],
+ r'testhost.*ok=1']},
+
+ # Test play unsafe toggle
+ {'playbook': 'unsafe.yml',
+ 'test_spec': [
+ [('prompting for variable:', '{{whole}}\r')],
+ r'testhost.*ok=2']},
+
+ # Test unsupported keys
+ {'playbook': 'unsupported.yml',
+ 'test_spec': [
+ [],
+ "Invalid vars_prompt data structure, found unsupported key 'when'"]},
+]
+
+for t in tests:
+ run_test(playbook=t['playbook'], test_spec=t['test_spec'])
diff --git a/test/integration/targets/builtin_vars_prompt/unsafe.yml b/test/integration/targets/builtin_vars_prompt/unsafe.yml
new file mode 100644
index 00000000..348ce152
--- /dev/null
+++ b/test/integration/targets/builtin_vars_prompt/unsafe.yml
@@ -0,0 +1,20 @@
+- name: Test vars_prompt unsafe
+ hosts: testhost
+ become: no
+ gather_facts: no
+ vars:
+ whole: INVALID
+ vars_prompt:
+ - name: input
+ prompt: prompting for variable
+ unsafe: true
+
+ tasks:
+ - name:
+ assert:
+ that:
+ - input != whole
+ - input != 'INVALID'
+
+ - debug:
+ var: input
diff --git a/test/integration/targets/builtin_vars_prompt/unsupported.yml b/test/integration/targets/builtin_vars_prompt/unsupported.yml
new file mode 100644
index 00000000..eab02fd6
--- /dev/null
+++ b/test/integration/targets/builtin_vars_prompt/unsupported.yml
@@ -0,0 +1,18 @@
+- name: Test vars_prompt unsupported key
+ hosts: testhost
+ become: no
+ gather_facts: no
+ vars_prompt:
+ - name: input
+ prompt: prompting for variable
+ # Unsupported key for vars_prompt
+ when: foo is defined
+
+ tasks:
+ - name:
+ assert:
+ that:
+ - input is not defined
+
+ - debug:
+ var: input
diff --git a/test/integration/targets/builtin_vars_prompt/vars_prompt-1.yml b/test/integration/targets/builtin_vars_prompt/vars_prompt-1.yml
new file mode 100644
index 00000000..727c60e7
--- /dev/null
+++ b/test/integration/targets/builtin_vars_prompt/vars_prompt-1.yml
@@ -0,0 +1,15 @@
+- name: Basic vars_prompt test
+ hosts: testhost
+ become: no
+ gather_facts: no
+
+ vars_prompt:
+ - name: input
+
+ tasks:
+ - assert:
+ that:
+ - input == 'some input'
+
+ - debug:
+ var: input
diff --git a/test/integration/targets/builtin_vars_prompt/vars_prompt-2.yml b/test/integration/targets/builtin_vars_prompt/vars_prompt-2.yml
new file mode 100644
index 00000000..d8f20db8
--- /dev/null
+++ b/test/integration/targets/builtin_vars_prompt/vars_prompt-2.yml
@@ -0,0 +1,16 @@
+- name: Test vars_prompt custom prompt
+ hosts: testhost
+ become: no
+ gather_facts: no
+
+ vars_prompt:
+ - name: input
+ prompt: "Enter some input"
+
+ tasks:
+ - assert:
+ that:
+ - input == 'some more input'
+
+ - debug:
+ var: input
diff --git a/test/integration/targets/builtin_vars_prompt/vars_prompt-3.yml b/test/integration/targets/builtin_vars_prompt/vars_prompt-3.yml
new file mode 100644
index 00000000..f8148182
--- /dev/null
+++ b/test/integration/targets/builtin_vars_prompt/vars_prompt-3.yml
@@ -0,0 +1,17 @@
+- name: Test vars_prompt confirm
+ hosts: testhost
+ become: no
+ gather_facts: no
+
+ vars_prompt:
+ - name: input
+ confirm: yes
+
+ tasks:
+ - name:
+ assert:
+ that:
+ - input == 'confirm me'
+
+ - debug:
+ var: input
diff --git a/test/integration/targets/builtin_vars_prompt/vars_prompt-4.yml b/test/integration/targets/builtin_vars_prompt/vars_prompt-4.yml
new file mode 100644
index 00000000..d33cc902
--- /dev/null
+++ b/test/integration/targets/builtin_vars_prompt/vars_prompt-4.yml
@@ -0,0 +1,16 @@
+- name: Test vars_prompt not private
+ hosts: testhost
+ become: no
+ gather_facts: no
+
+ vars_prompt:
+ - name: not_secret
+ private: no
+
+ tasks:
+ - assert:
+ that:
+ - not_secret == 'this is displayed'
+
+ - debug:
+ var: not_secret
diff --git a/test/integration/targets/builtin_vars_prompt/vars_prompt-5.yml b/test/integration/targets/builtin_vars_prompt/vars_prompt-5.yml
new file mode 100644
index 00000000..62c8ad8e
--- /dev/null
+++ b/test/integration/targets/builtin_vars_prompt/vars_prompt-5.yml
@@ -0,0 +1,14 @@
+- name: Test vars_prompt hashing
+ hosts: testhost
+ become: no
+ gather_facts: no
+
+ vars_prompt:
+ - name: password
+ confirm: yes
+ encrypt: sha512_crypt
+ salt: 'jESIyad4F08hP3Ta'
+
+ tasks:
+ - debug:
+ var: password
diff --git a/test/integration/targets/builtin_vars_prompt/vars_prompt-6.yml b/test/integration/targets/builtin_vars_prompt/vars_prompt-6.yml
new file mode 100644
index 00000000..ea3fe620
--- /dev/null
+++ b/test/integration/targets/builtin_vars_prompt/vars_prompt-6.yml
@@ -0,0 +1,20 @@
+- name: Test vars_prompt custom variables in prompt
+ hosts: testhost
+ become: no
+ gather_facts: no
+
+ vars:
+ prompt_var: prompt from variable
+
+ vars_prompt:
+ - name: input
+ prompt: "{{ prompt_var }}"
+
+ tasks:
+ - name:
+ assert:
+ that:
+ - input == 'input'
+
+ - debug:
+ var: input
diff --git a/test/integration/targets/builtin_vars_prompt/vars_prompt-7.yml b/test/integration/targets/builtin_vars_prompt/vars_prompt-7.yml
new file mode 100644
index 00000000..a6b086d0
--- /dev/null
+++ b/test/integration/targets/builtin_vars_prompt/vars_prompt-7.yml
@@ -0,0 +1,12 @@
+- name: Test vars_prompt play vars
+ hosts: "{{ target_hosts }}"
+ become: no
+ gather_facts: no
+
+ vars_prompt:
+ - name: target_hosts
+ prompt: prompting for host
+ private: no
+
+ tasks:
+ - ping:
diff --git a/test/integration/targets/callback_default/aliases b/test/integration/targets/callback_default/aliases
new file mode 100644
index 00000000..f8e28c7e
--- /dev/null
+++ b/test/integration/targets/callback_default/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group1
+skip/aix
diff --git a/test/integration/targets/callback_default/callback_default.out.check_markers_dry.stderr b/test/integration/targets/callback_default/callback_default.out.check_markers_dry.stderr
new file mode 100644
index 00000000..431a0200
--- /dev/null
+++ b/test/integration/targets/callback_default/callback_default.out.check_markers_dry.stderr
@@ -0,0 +1,2 @@
++ ansible-playbook -i inventory --check test_dryrun.yml
+++ set +x
diff --git a/test/integration/targets/callback_default/callback_default.out.check_markers_dry.stdout b/test/integration/targets/callback_default/callback_default.out.check_markers_dry.stdout
new file mode 100644
index 00000000..8a349097
--- /dev/null
+++ b/test/integration/targets/callback_default/callback_default.out.check_markers_dry.stdout
@@ -0,0 +1,78 @@
+
+DRY RUN ************************************************************************
+
+PLAY [A common play] [CHECK MODE] **********************************************
+
+TASK [debug] [CHECK MODE] ******************************************************
+ok: [testhost] => {
+ "msg": "ansible_check_mode: True"
+}
+
+TASK [Command] [CHECK MODE] ****************************************************
+skipping: [testhost]
+
+TASK [Command with check_mode: false] ******************************************
+changed: [testhost]
+
+TASK [Command with check_mode: true] [CHECK MODE] ******************************
+skipping: [testhost]
+
+PLAY [Play with check_mode: true (runs always in check_mode)] [CHECK MODE] *****
+
+TASK [debug] [CHECK MODE] ******************************************************
+ok: [testhost] => {
+ "msg": "ansible_check_mode: True"
+}
+
+TASK [Command] [CHECK MODE] ****************************************************
+skipping: [testhost]
+
+TASK [Command with check_mode: false] ******************************************
+changed: [testhost]
+
+TASK [Command with check_mode: true] [CHECK MODE] ******************************
+skipping: [testhost]
+
+PLAY [Play with check_mode: false (runs always in wet mode)] *******************
+
+TASK [debug] *******************************************************************
+ok: [testhost] => {
+ "msg": "ansible_check_mode: True"
+}
+
+TASK [Command] *****************************************************************
+changed: [testhost]
+
+TASK [Command with check_mode: false] ******************************************
+changed: [testhost]
+
+TASK [Command with check_mode: true] [CHECK MODE] ******************************
+skipping: [testhost]
+
+PLAY [Play with a block with check_mode: true] [CHECK MODE] ********************
+
+TASK [Command] [CHECK MODE] ****************************************************
+skipping: [testhost]
+
+TASK [Command with check_mode: false] ******************************************
+changed: [testhost]
+
+TASK [Command with check_mode: true] [CHECK MODE] ******************************
+skipping: [testhost]
+
+PLAY [Play with a block with check_mode: false] [CHECK MODE] *******************
+
+TASK [Command] *****************************************************************
+changed: [testhost]
+
+TASK [Command with check_mode: false] ******************************************
+changed: [testhost]
+
+TASK [Command with check_mode: true] [CHECK MODE] ******************************
+skipping: [testhost]
+
+PLAY RECAP *********************************************************************
+testhost : ok=10 changed=7 unreachable=0 failed=0 skipped=8 rescued=0 ignored=0
+
+
+DRY RUN ************************************************************************
diff --git a/test/integration/targets/callback_default/callback_default.out.check_markers_wet.stderr b/test/integration/targets/callback_default/callback_default.out.check_markers_wet.stderr
new file mode 100644
index 00000000..e4309428
--- /dev/null
+++ b/test/integration/targets/callback_default/callback_default.out.check_markers_wet.stderr
@@ -0,0 +1,2 @@
++ ansible-playbook -i inventory test_dryrun.yml
+++ set +x
diff --git a/test/integration/targets/callback_default/callback_default.out.check_markers_wet.stdout b/test/integration/targets/callback_default/callback_default.out.check_markers_wet.stdout
new file mode 100644
index 00000000..f5f45105
--- /dev/null
+++ b/test/integration/targets/callback_default/callback_default.out.check_markers_wet.stdout
@@ -0,0 +1,74 @@
+
+PLAY [A common play] ***********************************************************
+
+TASK [debug] *******************************************************************
+ok: [testhost] => {
+ "msg": "ansible_check_mode: False"
+}
+
+TASK [Command] *****************************************************************
+changed: [testhost]
+
+TASK [Command with check_mode: false] ******************************************
+changed: [testhost]
+
+TASK [Command with check_mode: true] [CHECK MODE] ******************************
+skipping: [testhost]
+
+PLAY [Play with check_mode: true (runs always in check_mode)] [CHECK MODE] *****
+
+TASK [debug] [CHECK MODE] ******************************************************
+ok: [testhost] => {
+ "msg": "ansible_check_mode: False"
+}
+
+TASK [Command] [CHECK MODE] ****************************************************
+skipping: [testhost]
+
+TASK [Command with check_mode: false] ******************************************
+changed: [testhost]
+
+TASK [Command with check_mode: true] [CHECK MODE] ******************************
+skipping: [testhost]
+
+PLAY [Play with check_mode: false (runs always in wet mode)] *******************
+
+TASK [debug] *******************************************************************
+ok: [testhost] => {
+ "msg": "ansible_check_mode: False"
+}
+
+TASK [Command] *****************************************************************
+changed: [testhost]
+
+TASK [Command with check_mode: false] ******************************************
+changed: [testhost]
+
+TASK [Command with check_mode: true] [CHECK MODE] ******************************
+skipping: [testhost]
+
+PLAY [Play with a block with check_mode: true] *********************************
+
+TASK [Command] [CHECK MODE] ****************************************************
+skipping: [testhost]
+
+TASK [Command with check_mode: false] ******************************************
+changed: [testhost]
+
+TASK [Command with check_mode: true] [CHECK MODE] ******************************
+skipping: [testhost]
+
+PLAY [Play with a block with check_mode: false] ********************************
+
+TASK [Command] *****************************************************************
+changed: [testhost]
+
+TASK [Command with check_mode: false] ******************************************
+changed: [testhost]
+
+TASK [Command with check_mode: true] [CHECK MODE] ******************************
+skipping: [testhost]
+
+PLAY RECAP *********************************************************************
+testhost : ok=11 changed=8 unreachable=0 failed=0 skipped=7 rescued=0 ignored=0
+
diff --git a/test/integration/targets/callback_default/callback_default.out.check_nomarkers_dry.stderr b/test/integration/targets/callback_default/callback_default.out.check_nomarkers_dry.stderr
new file mode 100644
index 00000000..431a0200
--- /dev/null
+++ b/test/integration/targets/callback_default/callback_default.out.check_nomarkers_dry.stderr
@@ -0,0 +1,2 @@
++ ansible-playbook -i inventory --check test_dryrun.yml
+++ set +x
diff --git a/test/integration/targets/callback_default/callback_default.out.check_nomarkers_dry.stdout b/test/integration/targets/callback_default/callback_default.out.check_nomarkers_dry.stdout
new file mode 100644
index 00000000..e984d499
--- /dev/null
+++ b/test/integration/targets/callback_default/callback_default.out.check_nomarkers_dry.stdout
@@ -0,0 +1,74 @@
+
+PLAY [A common play] ***********************************************************
+
+TASK [debug] *******************************************************************
+ok: [testhost] => {
+ "msg": "ansible_check_mode: True"
+}
+
+TASK [Command] *****************************************************************
+skipping: [testhost]
+
+TASK [Command with check_mode: false] ******************************************
+changed: [testhost]
+
+TASK [Command with check_mode: true] *******************************************
+skipping: [testhost]
+
+PLAY [Play with check_mode: true (runs always in check_mode)] ******************
+
+TASK [debug] *******************************************************************
+ok: [testhost] => {
+ "msg": "ansible_check_mode: True"
+}
+
+TASK [Command] *****************************************************************
+skipping: [testhost]
+
+TASK [Command with check_mode: false] ******************************************
+changed: [testhost]
+
+TASK [Command with check_mode: true] *******************************************
+skipping: [testhost]
+
+PLAY [Play with check_mode: false (runs always in wet mode)] *******************
+
+TASK [debug] *******************************************************************
+ok: [testhost] => {
+ "msg": "ansible_check_mode: True"
+}
+
+TASK [Command] *****************************************************************
+changed: [testhost]
+
+TASK [Command with check_mode: false] ******************************************
+changed: [testhost]
+
+TASK [Command with check_mode: true] *******************************************
+skipping: [testhost]
+
+PLAY [Play with a block with check_mode: true] *********************************
+
+TASK [Command] *****************************************************************
+skipping: [testhost]
+
+TASK [Command with check_mode: false] ******************************************
+changed: [testhost]
+
+TASK [Command with check_mode: true] *******************************************
+skipping: [testhost]
+
+PLAY [Play with a block with check_mode: false] ********************************
+
+TASK [Command] *****************************************************************
+changed: [testhost]
+
+TASK [Command with check_mode: false] ******************************************
+changed: [testhost]
+
+TASK [Command with check_mode: true] *******************************************
+skipping: [testhost]
+
+PLAY RECAP *********************************************************************
+testhost : ok=10 changed=7 unreachable=0 failed=0 skipped=8 rescued=0 ignored=0
+
diff --git a/test/integration/targets/callback_default/callback_default.out.check_nomarkers_wet.stderr b/test/integration/targets/callback_default/callback_default.out.check_nomarkers_wet.stderr
new file mode 100644
index 00000000..e4309428
--- /dev/null
+++ b/test/integration/targets/callback_default/callback_default.out.check_nomarkers_wet.stderr
@@ -0,0 +1,2 @@
++ ansible-playbook -i inventory test_dryrun.yml
+++ set +x
diff --git a/test/integration/targets/callback_default/callback_default.out.check_nomarkers_wet.stdout b/test/integration/targets/callback_default/callback_default.out.check_nomarkers_wet.stdout
new file mode 100644
index 00000000..2b331bb8
--- /dev/null
+++ b/test/integration/targets/callback_default/callback_default.out.check_nomarkers_wet.stdout
@@ -0,0 +1,74 @@
+
+PLAY [A common play] ***********************************************************
+
+TASK [debug] *******************************************************************
+ok: [testhost] => {
+ "msg": "ansible_check_mode: False"
+}
+
+TASK [Command] *****************************************************************
+changed: [testhost]
+
+TASK [Command with check_mode: false] ******************************************
+changed: [testhost]
+
+TASK [Command with check_mode: true] *******************************************
+skipping: [testhost]
+
+PLAY [Play with check_mode: true (runs always in check_mode)] ******************
+
+TASK [debug] *******************************************************************
+ok: [testhost] => {
+ "msg": "ansible_check_mode: False"
+}
+
+TASK [Command] *****************************************************************
+skipping: [testhost]
+
+TASK [Command with check_mode: false] ******************************************
+changed: [testhost]
+
+TASK [Command with check_mode: true] *******************************************
+skipping: [testhost]
+
+PLAY [Play with check_mode: false (runs always in wet mode)] *******************
+
+TASK [debug] *******************************************************************
+ok: [testhost] => {
+ "msg": "ansible_check_mode: False"
+}
+
+TASK [Command] *****************************************************************
+changed: [testhost]
+
+TASK [Command with check_mode: false] ******************************************
+changed: [testhost]
+
+TASK [Command with check_mode: true] *******************************************
+skipping: [testhost]
+
+PLAY [Play with a block with check_mode: true] *********************************
+
+TASK [Command] *****************************************************************
+skipping: [testhost]
+
+TASK [Command with check_mode: false] ******************************************
+changed: [testhost]
+
+TASK [Command with check_mode: true] *******************************************
+skipping: [testhost]
+
+PLAY [Play with a block with check_mode: false] ********************************
+
+TASK [Command] *****************************************************************
+changed: [testhost]
+
+TASK [Command with check_mode: false] ******************************************
+changed: [testhost]
+
+TASK [Command with check_mode: true] *******************************************
+skipping: [testhost]
+
+PLAY RECAP *********************************************************************
+testhost : ok=11 changed=8 unreachable=0 failed=0 skipped=7 rescued=0 ignored=0
+
diff --git a/test/integration/targets/callback_default/callback_default.out.default.stderr b/test/integration/targets/callback_default/callback_default.out.default.stderr
new file mode 100644
index 00000000..d3e07d47
--- /dev/null
+++ b/test/integration/targets/callback_default/callback_default.out.default.stderr
@@ -0,0 +1,2 @@
++ ansible-playbook -i inventory test.yml
+++ set +x
diff --git a/test/integration/targets/callback_default/callback_default.out.default.stdout b/test/integration/targets/callback_default/callback_default.out.default.stdout
new file mode 100644
index 00000000..05f90beb
--- /dev/null
+++ b/test/integration/targets/callback_default/callback_default.out.default.stdout
@@ -0,0 +1,72 @@
+
+PLAY [testhost] ****************************************************************
+
+TASK [Changed task] ************************************************************
+changed: [testhost]
+
+TASK [Ok task] *****************************************************************
+ok: [testhost]
+
+TASK [Failed task] *************************************************************
+fatal: [testhost]: FAILED! => {"changed": false, "msg": "no reason"}
+...ignoring
+
+TASK [Skipped task] ************************************************************
+skipping: [testhost]
+
+TASK [Task with var in name (foo bar)] *****************************************
+changed: [testhost]
+
+TASK [Loop task] ***************************************************************
+changed: [testhost] => (item=foo-1)
+changed: [testhost] => (item=foo-2)
+changed: [testhost] => (item=foo-3)
+
+TASK [debug loop] **************************************************************
+changed: [testhost] => (item=debug-1) => {
+ "msg": "debug-1"
+}
+failed: [testhost] (item=debug-2) => {
+ "msg": "debug-2"
+}
+ok: [testhost] => (item=debug-3) => {
+ "msg": "debug-3"
+}
+skipping: [testhost] => (item=debug-4)
+fatal: [testhost]: FAILED! => {"msg": "All items completed"}
+...ignoring
+
+TASK [EXPECTED FAILURE Failed task to be rescued] ******************************
+fatal: [testhost]: FAILED! => {"changed": false, "msg": "Failed as requested from task"}
+
+TASK [Rescue task] *************************************************************
+changed: [testhost]
+
+TASK [include_tasks] ***********************************************************
+included: .../test/integration/targets/callback_default/include_me.yml for testhost => (item=1)
+
+TASK [debug] *******************************************************************
+ok: [testhost] => {
+ "item": 1
+}
+
+RUNNING HANDLER [Test handler 1] ***********************************************
+changed: [testhost]
+
+RUNNING HANDLER [Test handler 2] ***********************************************
+ok: [testhost]
+
+RUNNING HANDLER [Test handler 3] ***********************************************
+changed: [testhost]
+
+PLAY [testhost] ****************************************************************
+
+TASK [First free task] *********************************************************
+changed: [testhost]
+
+TASK [Second free task] ********************************************************
+changed: [testhost]
+
+PLAY RECAP *********************************************************************
+testhost : ok=14 changed=9 unreachable=0 failed=0 skipped=1 rescued=1 ignored=2
+
diff --git a/test/integration/targets/callback_default/callback_default.out.failed_to_stderr.stderr b/test/integration/targets/callback_default/callback_default.out.failed_to_stderr.stderr
new file mode 100644
index 00000000..932a2e4f
--- /dev/null
+++ b/test/integration/targets/callback_default/callback_default.out.failed_to_stderr.stderr
@@ -0,0 +1,5 @@
++ ansible-playbook -i inventory test.yml
+++ set +x
+fatal: [testhost]: FAILED! => {"changed": false, "msg": "no reason"}
+fatal: [testhost]: FAILED! => {"msg": "All items completed"}
+fatal: [testhost]: FAILED! => {"changed": false, "msg": "Failed as requested from task"}
diff --git a/test/integration/targets/callback_default/callback_default.out.failed_to_stderr.stdout b/test/integration/targets/callback_default/callback_default.out.failed_to_stderr.stdout
new file mode 100644
index 00000000..fe990d42
--- /dev/null
+++ b/test/integration/targets/callback_default/callback_default.out.failed_to_stderr.stdout
@@ -0,0 +1,69 @@
+
+PLAY [testhost] ****************************************************************
+
+TASK [Changed task] ************************************************************
+changed: [testhost]
+
+TASK [Ok task] *****************************************************************
+ok: [testhost]
+
+TASK [Failed task] *************************************************************
+...ignoring
+
+TASK [Skipped task] ************************************************************
+skipping: [testhost]
+
+TASK [Task with var in name (foo bar)] *****************************************
+changed: [testhost]
+
+TASK [Loop task] ***************************************************************
+changed: [testhost] => (item=foo-1)
+changed: [testhost] => (item=foo-2)
+changed: [testhost] => (item=foo-3)
+
+TASK [debug loop] **************************************************************
+changed: [testhost] => (item=debug-1) => {
+ "msg": "debug-1"
+}
+failed: [testhost] (item=debug-2) => {
+ "msg": "debug-2"
+}
+ok: [testhost] => (item=debug-3) => {
+ "msg": "debug-3"
+}
+skipping: [testhost] => (item=debug-4)
+...ignoring
+
+TASK [EXPECTED FAILURE Failed task to be rescued] ******************************
+
+TASK [Rescue task] *************************************************************
+changed: [testhost]
+
+TASK [include_tasks] ***********************************************************
+included: .../test/integration/targets/callback_default/include_me.yml for testhost => (item=1)
+
+TASK [debug] *******************************************************************
+ok: [testhost] => {
+ "item": 1
+}
+
+RUNNING HANDLER [Test handler 1] ***********************************************
+changed: [testhost]
+
+RUNNING HANDLER [Test handler 2] ***********************************************
+ok: [testhost]
+
+RUNNING HANDLER [Test handler 3] ***********************************************
+changed: [testhost]
+
+PLAY [testhost] ****************************************************************
+
+TASK [First free task] *********************************************************
+changed: [testhost]
+
+TASK [Second free task] ********************************************************
+changed: [testhost]
+
+PLAY RECAP *********************************************************************
+testhost : ok=14 changed=9 unreachable=0 failed=0 skipped=1 rescued=1 ignored=2
+
diff --git a/test/integration/targets/callback_default/callback_default.out.hide_ok.stderr b/test/integration/targets/callback_default/callback_default.out.hide_ok.stderr
new file mode 100644
index 00000000..d3e07d47
--- /dev/null
+++ b/test/integration/targets/callback_default/callback_default.out.hide_ok.stderr
@@ -0,0 +1,2 @@
++ ansible-playbook -i inventory test.yml
+++ set +x
diff --git a/test/integration/targets/callback_default/callback_default.out.hide_ok.stdout b/test/integration/targets/callback_default/callback_default.out.hide_ok.stdout
new file mode 100644
index 00000000..c1e1846b
--- /dev/null
+++ b/test/integration/targets/callback_default/callback_default.out.hide_ok.stdout
@@ -0,0 +1,56 @@
+
+PLAY [testhost] ****************************************************************
+
+TASK [Changed task] ************************************************************
+changed: [testhost]
+
+TASK [Failed task] *************************************************************
+fatal: [testhost]: FAILED! => {"changed": false, "msg": "no reason"}
+...ignoring
+
+TASK [Skipped task] ************************************************************
+skipping: [testhost]
+
+TASK [Task with var in name (foo bar)] *****************************************
+changed: [testhost]
+
+TASK [Loop task] ***************************************************************
+changed: [testhost] => (item=foo-1)
+changed: [testhost] => (item=foo-2)
+changed: [testhost] => (item=foo-3)
+
+TASK [debug loop] **************************************************************
+changed: [testhost] => (item=debug-1) => {
+ "msg": "debug-1"
+}
+failed: [testhost] (item=debug-2) => {
+ "msg": "debug-2"
+}
+skipping: [testhost] => (item=debug-4)
+fatal: [testhost]: FAILED! => {"msg": "All items completed"}
+...ignoring
+
+TASK [EXPECTED FAILURE Failed task to be rescued] ******************************
+fatal: [testhost]: FAILED! => {"changed": false, "msg": "Failed as requested from task"}
+
+TASK [Rescue task] *************************************************************
+changed: [testhost]
+included: .../test/integration/targets/callback_default/include_me.yml for testhost => (item=1)
+
+RUNNING HANDLER [Test handler 1] ***********************************************
+changed: [testhost]
+
+RUNNING HANDLER [Test handler 3] ***********************************************
+changed: [testhost]
+
+PLAY [testhost] ****************************************************************
+
+TASK [First free task] *********************************************************
+changed: [testhost]
+
+TASK [Second free task] ********************************************************
+changed: [testhost]
+
+PLAY RECAP *********************************************************************
+testhost : ok=14 changed=9 unreachable=0 failed=0 skipped=1 rescued=1 ignored=2
+
diff --git a/test/integration/targets/callback_default/callback_default.out.hide_skipped.stderr b/test/integration/targets/callback_default/callback_default.out.hide_skipped.stderr
new file mode 100644
index 00000000..d3e07d47
--- /dev/null
+++ b/test/integration/targets/callback_default/callback_default.out.hide_skipped.stderr
@@ -0,0 +1,2 @@
++ ansible-playbook -i inventory test.yml
+++ set +x
diff --git a/test/integration/targets/callback_default/callback_default.out.hide_skipped.stdout b/test/integration/targets/callback_default/callback_default.out.hide_skipped.stdout
new file mode 100644
index 00000000..660c7285
--- /dev/null
+++ b/test/integration/targets/callback_default/callback_default.out.hide_skipped.stdout
@@ -0,0 +1,66 @@
+
+PLAY [testhost] ****************************************************************
+
+TASK [Changed task] ************************************************************
+changed: [testhost]
+
+TASK [Ok task] *****************************************************************
+ok: [testhost]
+
+TASK [Failed task] *************************************************************
+fatal: [testhost]: FAILED! => {"changed": false, "msg": "no reason"}
+...ignoring
+
+TASK [Task with var in name (foo bar)] *****************************************
+changed: [testhost]
+
+TASK [Loop task] ***************************************************************
+changed: [testhost] => (item=foo-1)
+changed: [testhost] => (item=foo-2)
+changed: [testhost] => (item=foo-3)
+
+TASK [debug loop] **************************************************************
+changed: [testhost] => (item=debug-1) => {
+ "msg": "debug-1"
+}
+failed: [testhost] (item=debug-2) => {
+ "msg": "debug-2"
+}
+ok: [testhost] => (item=debug-3) => {
+ "msg": "debug-3"
+}
+fatal: [testhost]: FAILED! => {"msg": "All items completed"}
+...ignoring
+
+TASK [EXPECTED FAILURE Failed task to be rescued] ******************************
+fatal: [testhost]: FAILED! => {"changed": false, "msg": "Failed as requested from task"}
+
+TASK [Rescue task] *************************************************************
+changed: [testhost]
+included: .../test/integration/targets/callback_default/include_me.yml for testhost => (item=1)
+
+TASK [debug] *******************************************************************
+ok: [testhost] => {
+ "item": 1
+}
+
+RUNNING HANDLER [Test handler 1] ***********************************************
+changed: [testhost]
+
+RUNNING HANDLER [Test handler 2] ***********************************************
+ok: [testhost]
+
+RUNNING HANDLER [Test handler 3] ***********************************************
+changed: [testhost]
+
+PLAY [testhost] ****************************************************************
+
+TASK [First free task] *********************************************************
+changed: [testhost]
+
+TASK [Second free task] ********************************************************
+changed: [testhost]
+
+PLAY RECAP *********************************************************************
+testhost : ok=14 changed=9 unreachable=0 failed=0 skipped=1 rescued=1 ignored=2
+
diff --git a/test/integration/targets/callback_default/callback_default.out.hide_skipped_ok.stderr b/test/integration/targets/callback_default/callback_default.out.hide_skipped_ok.stderr
new file mode 100644
index 00000000..d3e07d47
--- /dev/null
+++ b/test/integration/targets/callback_default/callback_default.out.hide_skipped_ok.stderr
@@ -0,0 +1,2 @@
++ ansible-playbook -i inventory test.yml
+++ set +x
diff --git a/test/integration/targets/callback_default/callback_default.out.hide_skipped_ok.stdout b/test/integration/targets/callback_default/callback_default.out.hide_skipped_ok.stdout
new file mode 100644
index 00000000..13948b9f
--- /dev/null
+++ b/test/integration/targets/callback_default/callback_default.out.hide_skipped_ok.stdout
@@ -0,0 +1,52 @@
+
+PLAY [testhost] ****************************************************************
+
+TASK [Changed task] ************************************************************
+changed: [testhost]
+
+TASK [Failed task] *************************************************************
+fatal: [testhost]: FAILED! => {"changed": false, "msg": "no reason"}
+...ignoring
+
+TASK [Task with var in name (foo bar)] *****************************************
+changed: [testhost]
+
+TASK [Loop task] ***************************************************************
+changed: [testhost] => (item=foo-1)
+changed: [testhost] => (item=foo-2)
+changed: [testhost] => (item=foo-3)
+
+TASK [debug loop] **************************************************************
+changed: [testhost] => (item=debug-1) => {
+ "msg": "debug-1"
+}
+failed: [testhost] (item=debug-2) => {
+ "msg": "debug-2"
+}
+fatal: [testhost]: FAILED! => {"msg": "All items completed"}
+...ignoring
+
+TASK [EXPECTED FAILURE Failed task to be rescued] ******************************
+fatal: [testhost]: FAILED! => {"changed": false, "msg": "Failed as requested from task"}
+
+TASK [Rescue task] *************************************************************
+changed: [testhost]
+included: .../test/integration/targets/callback_default/include_me.yml for testhost => (item=1)
+
+RUNNING HANDLER [Test handler 1] ***********************************************
+changed: [testhost]
+
+RUNNING HANDLER [Test handler 3] ***********************************************
+changed: [testhost]
+
+PLAY [testhost] ****************************************************************
+
+TASK [First free task] *********************************************************
+changed: [testhost]
+
+TASK [Second free task] ********************************************************
+changed: [testhost]
+
+PLAY RECAP *********************************************************************
+testhost : ok=14 changed=9 unreachable=0 failed=0 skipped=1 rescued=1 ignored=2
+
diff --git a/test/integration/targets/callback_default/include_me.yml b/test/integration/targets/callback_default/include_me.yml
new file mode 100644
index 00000000..51470f3c
--- /dev/null
+++ b/test/integration/targets/callback_default/include_me.yml
@@ -0,0 +1,2 @@
+- debug:
+ var: item
diff --git a/test/integration/targets/callback_default/inventory b/test/integration/targets/callback_default/inventory
new file mode 100644
index 00000000..e75c585d
--- /dev/null
+++ b/test/integration/targets/callback_default/inventory
@@ -0,0 +1,5 @@
+[local]
+testhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+
+[nonexistent]
+testhost5 ansible_host=169.254.199.200 # no connection is ever established with this host
diff --git a/test/integration/targets/callback_default/runme.sh b/test/integration/targets/callback_default/runme.sh
new file mode 100755
index 00000000..6e6983b1
--- /dev/null
+++ b/test/integration/targets/callback_default/runme.sh
@@ -0,0 +1,186 @@
+#!/usr/bin/env bash
+
+# This test compares "known good" output with various settings against output
+# with the current code. It's brittle by nature, but this is probably the
+# "best" approach possible.
+#
+# Notes:
+# * options passed to this script (such as -v) are ignored, as they would change
+# the output and break the test
+# * the number of asterisks after a "banner" differs depending on the number of
+# columns on the TTY, so we must adjust the columns for the current session
+# for consistency
+
+set -eux
+
+run_test() {
+ local testname=$1
+
+ # outout was recorded w/o cowsay, ensure we reproduce the same
+ export ANSIBLE_NOCOWS=1
+
+ # The shenanigans with redirection and 'tee' are to capture STDOUT and
+ # STDERR separately while still displaying both to the console
+ { ansible-playbook -i inventory test.yml \
+ > >(set +x; tee "${OUTFILE}.${testname}.stdout"); } \
+ 2> >(set +x; tee "${OUTFILE}.${testname}.stderr" >&2)
+ # Scrub deprication warning that shows up in Python 2.6 on CentOS 6
+ sed -i -e '/RandomPool_DeprecationWarning/d' "${OUTFILE}.${testname}.stderr"
+ sed -i -e 's/included: .*\/test\/integration/included: ...\/test\/integration/g' "${OUTFILE}.${testname}.stdout"
+
+ diff -u "${ORIGFILE}.${testname}.stdout" "${OUTFILE}.${testname}.stdout" || diff_failure
+ diff -u "${ORIGFILE}.${testname}.stderr" "${OUTFILE}.${testname}.stderr" || diff_failure
+}
+
+run_test_dryrun() {
+ local testname=$1
+ # optional, pass --check to run a dry run
+ local chk=${2:-}
+
+ # outout was recorded w/o cowsay, ensure we reproduce the same
+ export ANSIBLE_NOCOWS=1
+
+ # This needed to satisfy shellcheck that can not accept unquoted variable
+ cmd="ansible-playbook -i inventory ${chk} test_dryrun.yml"
+
+ # The shenanigans with redirection and 'tee' are to capture STDOUT and
+ # STDERR separately while still displaying both to the console
+ { $cmd \
+ > >(set +x; tee "${OUTFILE}.${testname}.stdout"); } \
+ 2> >(set +x; tee "${OUTFILE}.${testname}.stderr" >&2)
+ # Scrub deprication warning that shows up in Python 2.6 on CentOS 6
+ sed -i -e '/RandomPool_DeprecationWarning/d' "${OUTFILE}.${testname}.stderr"
+
+ diff -u "${ORIGFILE}.${testname}.stdout" "${OUTFILE}.${testname}.stdout" || diff_failure
+ diff -u "${ORIGFILE}.${testname}.stderr" "${OUTFILE}.${testname}.stderr" || diff_failure
+}
+
+diff_failure() {
+ if [[ $INIT = 0 ]]; then
+ echo "FAILURE...diff mismatch!"
+ exit 1
+ fi
+}
+
+cleanup() {
+ if [[ $INIT = 0 ]]; then
+ rm -rf "${OUTFILE}.*"
+ fi
+
+ if [[ -f "${BASEFILE}.unreachable.stdout" ]]; then
+ rm -rf "${BASEFILE}.unreachable.stdout"
+ fi
+
+ if [[ -f "${BASEFILE}.unreachable.stderr" ]]; then
+ rm -rf "${BASEFILE}.unreachable.stderr"
+ fi
+
+ # Restore TTY cols
+ if [[ -n ${TTY_COLS:-} ]]; then
+ stty cols "${TTY_COLS}"
+ fi
+}
+
+adjust_tty_cols() {
+ if [[ -t 1 ]]; then
+ # Preserve existing TTY cols
+ TTY_COLS=$( stty -a | grep -Eo '; columns [0-9]+;' | cut -d';' -f2 | cut -d' ' -f3 )
+ # Override TTY cols to make comparing ansible-playbook output easier
+ # This value matches the default in the code when there is no TTY
+ stty cols 79
+ fi
+}
+
+BASEFILE=callback_default.out
+
+ORIGFILE="${BASEFILE}"
+OUTFILE="${BASEFILE}.new"
+
+trap 'cleanup' EXIT
+
+# The --init flag will (re)generate the "good" output files used by the tests
+INIT=0
+if [[ ${1:-} == "--init" ]]; then
+ shift
+ OUTFILE=$ORIGFILE
+ INIT=1
+fi
+
+adjust_tty_cols
+
+# Force the 'default' callback plugin, since that's what we're testing
+export ANSIBLE_STDOUT_CALLBACK=default
+# Disable color in output for consistency
+export ANSIBLE_FORCE_COLOR=0
+export ANSIBLE_NOCOLOR=1
+
+# Default settings
+export ANSIBLE_DISPLAY_SKIPPED_HOSTS=1
+export ANSIBLE_DISPLAY_OK_HOSTS=1
+export ANSIBLE_DISPLAY_FAILED_STDERR=0
+export ANSIBLE_CHECK_MODE_MARKERS=0
+
+run_test default
+
+# Hide skipped
+export ANSIBLE_DISPLAY_SKIPPED_HOSTS=0
+
+run_test hide_skipped
+
+# Hide skipped/ok
+export ANSIBLE_DISPLAY_SKIPPED_HOSTS=0
+export ANSIBLE_DISPLAY_OK_HOSTS=0
+
+run_test hide_skipped_ok
+
+# Hide ok
+export ANSIBLE_DISPLAY_SKIPPED_HOSTS=1
+export ANSIBLE_DISPLAY_OK_HOSTS=0
+
+run_test hide_ok
+
+# Failed to stderr
+export ANSIBLE_DISPLAY_SKIPPED_HOSTS=1
+export ANSIBLE_DISPLAY_OK_HOSTS=1
+export ANSIBLE_DISPLAY_FAILED_STDERR=1
+
+run_test failed_to_stderr
+
+# Default settings with unreachable tasks
+export ANSIBLE_DISPLAY_SKIPPED_HOSTS=1
+export ANSIBLE_DISPLAY_OK_HOSTS=1
+export ANSIBLE_DISPLAY_FAILED_STDERR=1
+export ANSIBLE_TIMEOUT=1
+
+# Check if UNREACHBLE is available in stderr
+set +e
+ansible-playbook -i inventory test_2.yml > >(set +x; tee "${BASEFILE}.unreachable.stdout";) 2> >(set +x; tee "${BASEFILE}.unreachable.stderr" >&2) || true
+set -e
+if test "$(grep -c 'UNREACHABLE' "${BASEFILE}.unreachable.stderr")" -ne 1; then
+ echo "Test failed"
+ exit 1
+fi
+
+## DRY RUN tests
+#
+# Default settings with dry run tasks
+export ANSIBLE_DISPLAY_SKIPPED_HOSTS=1
+export ANSIBLE_DISPLAY_OK_HOSTS=1
+export ANSIBLE_DISPLAY_FAILED_STDERR=1
+# Enable Check mode markers
+export ANSIBLE_CHECK_MODE_MARKERS=1
+
+# Test the wet run with check markers
+run_test_dryrun check_markers_wet
+
+# Test the dry run with check markers
+run_test_dryrun check_markers_dry --check
+
+# Disable Check mode markers
+export ANSIBLE_CHECK_MODE_MARKERS=0
+
+# Test the wet run without check markers
+run_test_dryrun check_nomarkers_wet
+
+# Test the dry run without check markers
+run_test_dryrun check_nomarkers_dry --check
diff --git a/test/integration/targets/callback_default/test.yml b/test/integration/targets/callback_default/test.yml
new file mode 100644
index 00000000..b31787bf
--- /dev/null
+++ b/test/integration/targets/callback_default/test.yml
@@ -0,0 +1,88 @@
+---
+- hosts: testhost
+ gather_facts: no
+ vars:
+ foo: foo bar
+ tasks:
+ - name: Changed task
+ command: echo foo
+ changed_when: true
+ notify: test handlers
+
+ - name: Ok task
+ command: echo foo
+ changed_when: false
+
+ - name: Failed task
+ fail:
+ msg: no reason
+ ignore_errors: yes
+
+ - name: Skipped task
+ command: echo foo
+ when: false
+
+ - name: Task with var in name ({{ foo }})
+ command: echo foo
+
+ - name: Loop task
+ command: echo foo
+ loop:
+ - 1
+ - 2
+ - 3
+ loop_control:
+ label: foo-{{ item }}
+
+ # detect "changed" debug tasks being hidden with display_ok_tasks=false
+ - name: debug loop
+ debug:
+ msg: debug-{{ item }}
+ changed_when: item == 1
+ failed_when: item == 2
+ when: item != 4
+ ignore_errors: yes
+ loop:
+ - 1
+ - 2
+ - 3
+ - 4
+ loop_control:
+ label: debug-{{ item }}
+
+ - block:
+ - name: EXPECTED FAILURE Failed task to be rescued
+ fail:
+ rescue:
+ - name: Rescue task
+ command: echo rescued
+
+ - include_tasks: include_me.yml
+ loop:
+ - 1
+
+ handlers:
+ - name: Test handler 1
+ command: echo foo
+ listen: test handlers
+
+ - name: Test handler 2
+ command: echo foo
+ changed_when: false
+ listen: test handlers
+
+ - name: Test handler 3
+ command: echo foo
+ listen: test handlers
+
+# An issue was found previously for tasks in a play using strategy 'free' after
+# a non-'free' play in the same playbook, so we protect against a regression.
+- hosts: testhost
+ gather_facts: no
+ strategy: free
+ tasks:
+ - name: First free task
+ command: echo foo
+
+ - name: Second free task
+ command: echo foo
diff --git a/test/integration/targets/callback_default/test_2.yml b/test/integration/targets/callback_default/test_2.yml
new file mode 100644
index 00000000..2daded71
--- /dev/null
+++ b/test/integration/targets/callback_default/test_2.yml
@@ -0,0 +1,6 @@
+- hosts: nonexistent
+ gather_facts: no
+ tasks:
+ - name: Test task for unreachable host
+ command: echo foo
+ ignore_errors: True
diff --git a/test/integration/targets/callback_default/test_dryrun.yml b/test/integration/targets/callback_default/test_dryrun.yml
new file mode 100644
index 00000000..26cf0831
--- /dev/null
+++ b/test/integration/targets/callback_default/test_dryrun.yml
@@ -0,0 +1,93 @@
+---
+- name: A common play
+ hosts: testhost
+ gather_facts: no
+ tasks:
+ - debug:
+ msg: 'ansible_check_mode: {{ansible_check_mode}}'
+
+ - name: Command
+ command: ls -l
+
+ - name: "Command with check_mode: false"
+ command: ls -l
+ check_mode: false
+
+ - name: "Command with check_mode: true"
+ command: ls -l
+ check_mode: true
+
+
+- name: "Play with check_mode: true (runs always in check_mode)"
+ hosts: testhost
+ gather_facts: no
+ check_mode: true
+ tasks:
+ - debug:
+ msg: 'ansible_check_mode: {{ansible_check_mode}}'
+
+ - name: Command
+ command: ls -l
+
+ - name: "Command with check_mode: false"
+ command: ls -l
+ check_mode: false
+
+ - name: "Command with check_mode: true"
+ command: ls -l
+ check_mode: true
+
+
+- name: "Play with check_mode: false (runs always in wet mode)"
+ hosts: testhost
+ gather_facts: no
+ check_mode: false
+ tasks:
+ - debug:
+ msg: 'ansible_check_mode: {{ansible_check_mode}}'
+
+ - name: Command
+ command: ls -l
+
+ - name: "Command with check_mode: false"
+ command: ls -l
+ check_mode: false
+
+ - name: "Command with check_mode: true"
+ command: ls -l
+ check_mode: true
+
+
+- name: "Play with a block with check_mode: true"
+ hosts: testhost
+ gather_facts: no
+ tasks:
+ - block:
+ - name: Command
+ command: ls -l
+
+ - name: "Command with check_mode: false"
+ command: ls -l
+ check_mode: false
+
+ - name: "Command with check_mode: true"
+ command: ls -l
+ check_mode: true
+ check_mode: true
+
+- name: "Play with a block with check_mode: false"
+ hosts: testhost
+ gather_facts: no
+ tasks:
+ - block:
+ - name: Command
+ command: ls -l
+
+ - name: "Command with check_mode: false"
+ command: ls -l
+ check_mode: false
+
+ - name: "Command with check_mode: true"
+ command: ls -l
+ check_mode: true
+ check_mode: false
diff --git a/test/integration/targets/changed_when/aliases b/test/integration/targets/changed_when/aliases
new file mode 100644
index 00000000..765b70da
--- /dev/null
+++ b/test/integration/targets/changed_when/aliases
@@ -0,0 +1 @@
+shippable/posix/group2
diff --git a/test/integration/targets/changed_when/meta/main.yml b/test/integration/targets/changed_when/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/test/integration/targets/changed_when/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/test/integration/targets/changed_when/tasks/main.yml b/test/integration/targets/changed_when/tasks/main.yml
new file mode 100644
index 00000000..7b997189
--- /dev/null
+++ b/test/integration/targets/changed_when/tasks/main.yml
@@ -0,0 +1,61 @@
+# test code for the changed_when parameter
+# (c) 2014, James Tanner <tanner.jc@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: ensure shell is always changed
+ shell: ls -al /tmp
+ register: shell_result
+
+- debug: var=shell_result
+
+- name: changed should always be true for shell
+ assert:
+ that:
+ - "shell_result.changed"
+
+- name: test changed_when override for shell
+ shell: ls -al /tmp
+ changed_when: False
+ register: shell_result
+
+- debug: var=shell_result
+
+- name: changed should be false
+ assert:
+ that:
+ - "not shell_result.changed"
+
+- name: Add hosts to test group and ensure it appears as changed
+ group_by:
+ key: "cw_test1_{{ inventory_hostname }}"
+ register: groupby
+
+- name: verify its changed
+ assert:
+ that:
+ - groupby is changed
+
+- name: Add hosts to test group and ensure it does NOT appear as changed
+ group_by:
+ key: "cw_test2_{{ inventory_hostname }}"
+ changed_when: False
+ register: groupby
+
+- name: verify its not changed
+ assert:
+ that:
+ - groupby is not changed
diff --git a/test/integration/targets/check_mode/aliases b/test/integration/targets/check_mode/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/test/integration/targets/check_mode/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/test/integration/targets/check_mode/check_mode-not-on-cli.yml b/test/integration/targets/check_mode/check_mode-not-on-cli.yml
new file mode 100644
index 00000000..1b0c734b
--- /dev/null
+++ b/test/integration/targets/check_mode/check_mode-not-on-cli.yml
@@ -0,0 +1,37 @@
+---
+# Run withhout --check
+- hosts: testhost
+ gather_facts: False
+ tasks:
+ - command: 'echo ran'
+ register: command_out
+
+ - debug: var=command_out
+ - name: check that this did not run in check mode
+ assert:
+ that:
+ - '"ran" in command_out["stdout"]'
+
+- hosts: testhost
+ gather_facts: False
+ check_mode: True
+ tasks:
+ - command: 'echo ran'
+ register: command_out
+
+ - name: check that play level check_mode overrode the cli
+ assert:
+ that:
+ - '"check mode" in command_out["msg"]'
+
+- hosts: testhost
+ gather_facts: False
+ tasks:
+ - command: 'echo ran'
+ register: command_out
+ check_mode: True
+
+ - name: check that task level check_mode overrode the cli
+ assert:
+ that:
+ - '"check mode" in command_out["msg"]'
diff --git a/test/integration/targets/check_mode/check_mode-on-cli.yml b/test/integration/targets/check_mode/check_mode-on-cli.yml
new file mode 100644
index 00000000..0af34b83
--- /dev/null
+++ b/test/integration/targets/check_mode/check_mode-on-cli.yml
@@ -0,0 +1,36 @@
+---
+# Run with --check
+- hosts: testhost
+ gather_facts: False
+ tasks:
+ - command: 'echo ran'
+ register: command_out
+
+ - name: check that this did not run in check mode
+ assert:
+ that:
+ - '"check mode" in command_out["msg"]'
+
+- hosts: testhost
+ gather_facts: False
+ check_mode: False
+ tasks:
+ - command: 'echo ran'
+ register: command_out
+
+ - name: check that play level check_mode overrode the cli
+ assert:
+ that:
+ - '"ran" in command_out["stdout"]'
+
+- hosts: testhost
+ gather_facts: False
+ tasks:
+ - command: 'echo ran'
+ register: command_out
+ check_mode: False
+
+ - name: check that task level check_mode overrode the cli
+ assert:
+ that:
+ - '"ran" in command_out["stdout"]'
diff --git a/test/integration/targets/check_mode/check_mode.yml b/test/integration/targets/check_mode/check_mode.yml
new file mode 100644
index 00000000..a5777506
--- /dev/null
+++ b/test/integration/targets/check_mode/check_mode.yml
@@ -0,0 +1,7 @@
+- name: Test that check works with check_mode specified in roles
+ hosts: testhost
+ vars:
+ - output_dir: .
+ roles:
+ - { role: test_always_run, tags: test_always_run }
+ - { role: test_check_mode, tags: test_check_mode }
diff --git a/test/integration/targets/check_mode/roles/test_always_run/meta/main.yml b/test/integration/targets/check_mode/roles/test_always_run/meta/main.yml
new file mode 100644
index 00000000..d06fd48c
--- /dev/null
+++ b/test/integration/targets/check_mode/roles/test_always_run/meta/main.yml
@@ -0,0 +1,17 @@
+# test code for the check_mode: no option
+# (c) 2014, James Cammarata <jcammarata@ansible.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
diff --git a/test/integration/targets/check_mode/roles/test_always_run/tasks/main.yml b/test/integration/targets/check_mode/roles/test_always_run/tasks/main.yml
new file mode 100644
index 00000000..59bfb1d6
--- /dev/null
+++ b/test/integration/targets/check_mode/roles/test_always_run/tasks/main.yml
@@ -0,0 +1,29 @@
+# test code for the check_mode: no option
+# (c) 2014, James Cammarata <jcammarata@ansible.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: run a command while in check mode
+ shell: echo "running"
+ check_mode: no
+ register: result
+
+- name: assert that the command was run
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.stdout == 'running'"
+ - "result.rc == 0"
diff --git a/test/integration/targets/check_mode/roles/test_check_mode/files/foo.txt b/test/integration/targets/check_mode/roles/test_check_mode/files/foo.txt
new file mode 100644
index 00000000..3e96db9b
--- /dev/null
+++ b/test/integration/targets/check_mode/roles/test_check_mode/files/foo.txt
@@ -0,0 +1 @@
+templated_var_loaded
diff --git a/test/integration/targets/check_mode/roles/test_check_mode/tasks/main.yml b/test/integration/targets/check_mode/roles/test_check_mode/tasks/main.yml
new file mode 100644
index 00000000..f926d144
--- /dev/null
+++ b/test/integration/targets/check_mode/roles/test_check_mode/tasks/main.yml
@@ -0,0 +1,50 @@
+# test code for the template module
+# (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: fill in a basic template in check mode
+ template: src=foo.j2 dest={{output_dir}}/checkmode_foo.templated mode=0644
+ register: template_result
+
+- name: check whether file exists
+ stat: path={{output_dir}}/checkmode_foo.templated
+ register: foo
+
+- name: verify that the file was marked as changed in check mode
+ assert:
+ that:
+ - "template_result is changed"
+ - "not foo.stat.exists"
+
+- name: Actually create the file, disable check mode
+ template: src=foo.j2 dest={{output_dir}}/checkmode_foo.templated2 mode=0644
+ check_mode: no
+ register: checkmode_disabled
+
+- name: fill in template with new content
+ template: src=foo.j2 dest={{output_dir}}/checkmode_foo.templated2 mode=0644
+ register: template_result2
+
+- name: remove templated file
+ file: path={{output_dir}}/checkmode_foo.templated2 state=absent
+ check_mode: no
+
+- name: verify that the file was not changed
+ assert:
+ that:
+ - "checkmode_disabled is changed"
+ - "template_result2 is not changed"
diff --git a/test/integration/targets/check_mode/roles/test_check_mode/templates/foo.j2 b/test/integration/targets/check_mode/roles/test_check_mode/templates/foo.j2
new file mode 100644
index 00000000..55aab8f1
--- /dev/null
+++ b/test/integration/targets/check_mode/roles/test_check_mode/templates/foo.j2
@@ -0,0 +1 @@
+{{ templated_var }}
diff --git a/test/integration/targets/check_mode/roles/test_check_mode/vars/main.yml b/test/integration/targets/check_mode/roles/test_check_mode/vars/main.yml
new file mode 100644
index 00000000..1e8f64cc
--- /dev/null
+++ b/test/integration/targets/check_mode/roles/test_check_mode/vars/main.yml
@@ -0,0 +1 @@
+templated_var: templated_var_loaded
diff --git a/test/integration/targets/check_mode/runme.sh b/test/integration/targets/check_mode/runme.sh
new file mode 100755
index 00000000..954ac6ff
--- /dev/null
+++ b/test/integration/targets/check_mode/runme.sh
@@ -0,0 +1,7 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ansible-playbook check_mode.yml -i ../../inventory -v --check "$@"
+ansible-playbook check_mode-on-cli.yml -i ../../inventory -v --check "$@"
+ansible-playbook check_mode-not-on-cli.yml -i ../../inventory -v "$@"
diff --git a/test/integration/targets/cli/aliases b/test/integration/targets/cli/aliases
new file mode 100644
index 00000000..a8816e11
--- /dev/null
+++ b/test/integration/targets/cli/aliases
@@ -0,0 +1,5 @@
+destructive
+needs/root
+needs/ssh
+needs/target/setup_pexpect
+shippable/posix/group3
diff --git a/test/integration/targets/cli/runme.sh b/test/integration/targets/cli/runme.sh
new file mode 100755
index 00000000..d9e84625
--- /dev/null
+++ b/test/integration/targets/cli/runme.sh
@@ -0,0 +1,7 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ANSIBLE_ROLES_PATH=../ ansible-playbook setup.yml
+
+python test-cli.py
diff --git a/test/integration/targets/cli/setup.yml b/test/integration/targets/cli/setup.yml
new file mode 100644
index 00000000..901cfd14
--- /dev/null
+++ b/test/integration/targets/cli/setup.yml
@@ -0,0 +1,42 @@
+- hosts: localhost
+ gather_facts: yes
+ roles:
+ - setup_pexpect
+
+ tasks:
+ - name: Test ansible-playbook and ansible with -K
+ block:
+ - name: Create user to connect as
+ user:
+ name: cliuser1
+ shell: /bin/bash
+ groups: wheel
+ append: yes
+ password: "{{ 'secretpassword' | password_hash('sha512', 'mysecretsalt') }}"
+ - name: Create user to become
+ user:
+ name: cliuser2
+ shell: /bin/bash
+ password: "{{ 'secretpassword' | password_hash('sha512', 'mysecretsalt') }}"
+ # Sometimes this file doesn't get removed, and we need it gone to ssh
+ - name: Remove /run/nologin
+ file:
+ path: /run/nologin
+ state: absent
+ # Make Ansible run Python to run Ansible
+ - name: Run the test
+ shell: python test_k_and_K.py {{ ansible_python_interpreter }}
+ always:
+ - name: Remove users
+ user:
+ name: "{{ item }}"
+ state: absent
+ with_items:
+ - cliuser1
+ - cliuser2
+ # For now, we don't test this everywhere, because `user` works differently
+ # on some platforms, as does sudo/sudoers. On Fedora, we can just add
+ # the user to 'wheel' and things magically work.
+ # TODO: In theory, we should test this with all the different 'become'
+ # plugins in base.
+ when: ansible_distribution == 'Fedora'
diff --git a/test/integration/targets/cli/test-cli.py b/test/integration/targets/cli/test-cli.py
new file mode 100644
index 00000000..9893d665
--- /dev/null
+++ b/test/integration/targets/cli/test-cli.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+# Copyright (c) 2019 Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+import pexpect
+
+os.environ['ANSIBLE_NOCOLOR'] = '1'
+out = pexpect.run(
+ 'ansible localhost -m debug -a msg="{{ ansible_password }}" -k',
+ events={
+ 'SSH password:': '{{ 1 + 2 }}\n'
+ }
+)
+
+assert b'{{ 1 + 2 }}' in out
diff --git a/test/integration/targets/cli/test_k_and_K.py b/test/integration/targets/cli/test_k_and_K.py
new file mode 100644
index 00000000..f7077fba
--- /dev/null
+++ b/test/integration/targets/cli/test_k_and_K.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+import pexpect
+
+os.environ['ANSIBLE_NOCOLOR'] = '1'
+
+out = pexpect.run(
+ 'ansible -c ssh -i localhost, -u cliuser1 -e ansible_python_interpreter={0} '
+ '-m command -a whoami -Kkb --become-user cliuser2 localhost'.format(sys.argv[1]),
+ events={
+ 'SSH password:': 'secretpassword\n',
+ 'BECOME password': 'secretpassword\n',
+ },
+ timeout=10
+)
+
+print(out)
+
+assert b'cliuser2' in out
diff --git a/test/integration/targets/collections/a.statichost.yml b/test/integration/targets/collections/a.statichost.yml
new file mode 100644
index 00000000..683878aa
--- /dev/null
+++ b/test/integration/targets/collections/a.statichost.yml
@@ -0,0 +1,3 @@
+# use a plugin defined in a content-adjacent collection to ensure we added it properly
+plugin: testns.content_adj.statichost
+hostname: dynamic_host_a
diff --git a/test/integration/targets/collections/aliases b/test/integration/targets/collections/aliases
new file mode 100644
index 00000000..1a9cc499
--- /dev/null
+++ b/test/integration/targets/collections/aliases
@@ -0,0 +1,4 @@
+posix
+shippable/posix/group4
+shippable/windows/group1
+windows
diff --git a/test/integration/targets/collections/cache.statichost.yml b/test/integration/targets/collections/cache.statichost.yml
new file mode 100644
index 00000000..b2adcfa6
--- /dev/null
+++ b/test/integration/targets/collections/cache.statichost.yml
@@ -0,0 +1,7 @@
+# use inventory and cache plugins defined in a content-adjacent collection
+plugin: testns.content_adj.statichost
+hostname: cache_host_a
+cache_plugin: testns.content_adj.custom_jsonfile
+cache: yes
+cache_connection: inventory_cache
+cache_prefix: 'prefix_'
diff --git a/test/integration/targets/collections/check_populated_inventory.yml b/test/integration/targets/collections/check_populated_inventory.yml
new file mode 100644
index 00000000..ab33081a
--- /dev/null
+++ b/test/integration/targets/collections/check_populated_inventory.yml
@@ -0,0 +1,11 @@
+---
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - assert:
+ that:
+ - "groups.all | length == 2"
+ - "groups.ungrouped == groups.all"
+ - "'cache_host_a' in groups.all"
+ - "'dynamic_host_a' in groups.all"
diff --git a/test/integration/targets/collections/collection_root_sys/ansible_collections/testns/coll_in_sys/plugins/modules/systestmodule.py b/test/integration/targets/collections/collection_root_sys/ansible_collections/testns/coll_in_sys/plugins/modules/systestmodule.py
new file mode 100644
index 00000000..cba38120
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_sys/ansible_collections/testns/coll_in_sys/plugins/modules/systestmodule.py
@@ -0,0 +1,13 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+
+def main():
+ print(json.dumps(dict(changed=False, source='sys')))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/collections/collection_root_sys/ansible_collections/testns/testcoll/plugins/modules/maskedmodule.py b/test/integration/targets/collections/collection_root_sys/ansible_collections/testns/testcoll/plugins/modules/maskedmodule.py
new file mode 100644
index 00000000..e3db81be
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_sys/ansible_collections/testns/testcoll/plugins/modules/maskedmodule.py
@@ -0,0 +1,13 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+
+def main():
+ print(json.dumps(dict(changed=False, failed=True, msg='this collection should be masked by testcoll in the user content root')))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/collections/collection_root_sys/ansible_collections/testns/testcoll/plugins/modules/testmodule.py b/test/integration/targets/collections/collection_root_sys/ansible_collections/testns/testcoll/plugins/modules/testmodule.py
new file mode 100644
index 00000000..cba38120
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_sys/ansible_collections/testns/testcoll/plugins/modules/testmodule.py
@@ -0,0 +1,13 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+
+def main():
+ print(json.dumps(dict(changed=False, source='sys')))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/collections/collection_root_sys/ansible_collections/testns/testcoll/roles/maskedrole/tasks/main.yml b/test/integration/targets/collections/collection_root_sys/ansible_collections/testns/testcoll/roles/maskedrole/tasks/main.yml
new file mode 100644
index 00000000..21fe324a
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_sys/ansible_collections/testns/testcoll/roles/maskedrole/tasks/main.yml
@@ -0,0 +1,2 @@
+- fail:
+ msg: this role should never be visible or runnable
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/ansible/builtin/plugins/modules/ping.py b/test/integration/targets/collections/collection_root_user/ansible_collections/ansible/builtin/plugins/modules/ping.py
new file mode 100644
index 00000000..07476709
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/ansible/builtin/plugins/modules/ping.py
@@ -0,0 +1,13 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+
+def main():
+ print(json.dumps(dict(changed=False, source='overridden ansible.builtin (should not be possible)')))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/ansible/bullcoll/plugins/modules/bullmodule.py b/test/integration/targets/collections/collection_root_user/ansible_collections/ansible/bullcoll/plugins/modules/bullmodule.py
new file mode 100644
index 00000000..5ea354e7
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/ansible/bullcoll/plugins/modules/bullmodule.py
@@ -0,0 +1,13 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+
+def main():
+ print(json.dumps(dict(changed=False, source='user_ansible_bullcoll')))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/othercoll/plugins/module_utils/formerly_testcoll_pkg/__init__.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/othercoll/plugins/module_utils/formerly_testcoll_pkg/__init__.py
new file mode 100644
index 00000000..aa5c3eed
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/othercoll/plugins/module_utils/formerly_testcoll_pkg/__init__.py
@@ -0,0 +1 @@
+thing = "hello from testns.othercoll.formerly_testcoll_pkg.thing"
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/othercoll/plugins/module_utils/formerly_testcoll_pkg/submod.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/othercoll/plugins/module_utils/formerly_testcoll_pkg/submod.py
new file mode 100644
index 00000000..eb49a163
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/othercoll/plugins/module_utils/formerly_testcoll_pkg/submod.py
@@ -0,0 +1 @@
+thing = "hello from formerly_testcoll_pkg.submod.thing"
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testbroken/plugins/filter/broken_filter.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testbroken/plugins/filter/broken_filter.py
new file mode 100644
index 00000000..51fe8524
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testbroken/plugins/filter/broken_filter.py
@@ -0,0 +1,13 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class FilterModule(object):
+
+ def filters(self):
+ return {
+ 'broken': lambda x: 'broken',
+ }
+
+
+raise Exception('This is a broken filter plugin.')
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/meta/runtime.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/meta/runtime.yml
new file mode 100644
index 00000000..f5b617d9
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/meta/runtime.yml
@@ -0,0 +1,52 @@
+plugin_routing:
+ action:
+ uses_redirected_action:
+ redirect: testns.testcoll.subclassed_normal
+ callback:
+ removedcallback:
+ tombstone:
+ removal_date: '2020-01-01'
+ connection:
+ redirected_local:
+ redirect: ansible.builtin.local
+ modules:
+ multilevel1:
+ redirect: testns.testcoll.multilevel2
+ multilevel2:
+ redirect: testns.testcoll.multilevel3
+ multilevel3:
+ redirect: testns.testcoll.ping
+ uses_redirected_action:
+ redirect: ansible.builtin.ping
+ setup.ps1: ansible.windows.setup
+ looped_ping:
+ redirect: testns.testcoll.looped_ping2
+ looped_ping2:
+ redirect: testns.testcoll.looped_ping
+ bogus_redirect:
+ redirect: bogus.collection.shouldbomb
+ deprecated_ping:
+ deprecation:
+ removal_date: 2020-12-31
+ warning_text: old_ping will be removed in a future release of this collection. Use new_ping instead.
+ foobar_facts:
+ redirect: foobar_info
+ aliased_ping:
+ redirect: ansible.builtin.ping
+ dead_ping:
+ tombstone:
+ removal_date: 2019-12-31
+ warning_text: dead_ping has been removed
+ module_utils:
+ moved_out_root:
+ redirect: testns.content_adj.sub1.foomodule
+ formerly_testcoll_pkg:
+ redirect: ansible_collections.testns.othercoll.plugins.module_utils.formerly_testcoll_pkg
+ formerly_testcoll_pkg.submod:
+ redirect: ansible_collections.testns.othercoll.plugins.module_utils.formerly_testcoll_pkg.submod
+ missing_redirect_target_collection:
+ redirect: bogusns.boguscoll.bogusmu
+ missing_redirect_target_module:
+ redirect: testns.othercoll.bogusmu
+
+requires_ansible: '>=2.11'
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/default_collection_playbook.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/default_collection_playbook.yml
new file mode 100644
index 00000000..1d1aee7d
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/default_collection_playbook.yml
@@ -0,0 +1,49 @@
+# verify default collection action/module lookup works
+# since we're running this playbook inside a collection, it will set that collection as the default search for all playboooks
+# and non-collection roles to allow for easy migration of old integration tests to collections
+- hosts: testhost
+ tasks:
+ - testmodule:
+
+- hosts: testhost
+ vars:
+ test_role_input: task static default collection
+ tasks:
+ - import_role:
+ name: testrole # unqualified role lookup should work; inheriting from the containing collection
+ - assert:
+ that:
+ - test_role_output.msg == test_role_input
+ - vars:
+ test_role_input: task static legacy embedded default collection
+ block:
+ - import_role:
+ name: non_coll_role
+ - assert:
+ that:
+ - test_role_output.msg == test_role_input
+
+- hosts: testhost
+ vars:
+ test_role_input: keyword static default collection
+ roles:
+ - testrole
+ tasks:
+ - debug: var=test_role_input
+ - debug: var=test_role_output
+ - assert:
+ that:
+ - test_role_output.msg == test_role_input
+
+- hosts: testhost
+ vars:
+ test_role_input: task dynamic default collection
+ tasks:
+ - include_role:
+ name: testrole # unqualified role lookup should work; inheriting from the containing collection
+ - include_role:
+ name: non_coll_role
+ - assert:
+ that:
+ - testmodule_out_from_non_coll_role is success
+ - embedded_module_out_from_non_coll_role is success
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/roles/non_coll_role/library/embedded_module.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/roles/non_coll_role/library/embedded_module.py
new file mode 100644
index 00000000..54402d12
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/roles/non_coll_role/library/embedded_module.py
@@ -0,0 +1,13 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+
+def main():
+ print(json.dumps(dict(changed=False, source='collection_embedded_non_collection_role')))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/roles/non_coll_role/tasks/main.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/roles/non_coll_role/tasks/main.yml
new file mode 100644
index 00000000..d41ae90e
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/roles/non_coll_role/tasks/main.yml
@@ -0,0 +1,29 @@
+- testmodule:
+ register: testmodule_out_from_non_coll_role
+
+- embedded_module:
+ register: embedded_module_out_from_non_coll_role
+
+- name: check collections list from role meta
+ plugin_lookup:
+ register: pluginlookup_out
+
+- debug: var=pluginlookup_out
+
+- debug:
+ msg: '{{ test_role_input | default("(undefined)") }}'
+ register: test_role_output
+
+- assert:
+ that:
+ - test_role_input is not defined or test_role_input == test_role_output.msg
+
+- vars:
+ test_role_input: include another non-coll role
+ block:
+ - include_role:
+ name: non_coll_role_to_call
+
+ - assert:
+ that:
+ - test_role_output.msg == test_role_input
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/roles/non_coll_role_to_call/tasks/main.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/roles/non_coll_role_to_call/tasks/main.yml
new file mode 100644
index 00000000..98445ce3
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/roles/non_coll_role_to_call/tasks/main.yml
@@ -0,0 +1,7 @@
+- debug:
+ msg: '{{ test_role_input | default("(undefined)") }}'
+ register: test_role_output
+
+- assert:
+ that:
+ - test_role_input is not defined or test_role_input == test_role_output.msg
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/action_subdir/subdir_ping_action.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/action_subdir/subdir_ping_action.py
new file mode 100644
index 00000000..5af73342
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/action_subdir/subdir_ping_action.py
@@ -0,0 +1,19 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+ TRANSFERS_FILES = False
+ _VALID_ARGS = frozenset()
+
+ def run(self, tmp=None, task_vars=None):
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(None, task_vars)
+
+ result = dict(changed=False)
+
+ return result
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/bypass_host_loop.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/bypass_host_loop.py
new file mode 100644
index 00000000..b15493d9
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/bypass_host_loop.py
@@ -0,0 +1,17 @@
+# Copyright: (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+
+ BYPASS_HOST_LOOP = True
+
+ def run(self, tmp=None, task_vars=None):
+ result = super(ActionModule, self).run(tmp, task_vars)
+ result['bypass_inventory_hostname'] = task_vars['inventory_hostname']
+ return result
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/plugin_lookup.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/plugin_lookup.py
new file mode 100644
index 00000000..1b882810
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/plugin_lookup.py
@@ -0,0 +1,33 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.action import ActionBase
+from ansible.plugins import loader
+
+
+class ActionModule(ActionBase):
+ TRANSFERS_FILES = False
+ _VALID_ARGS = frozenset(('type', 'name'))
+
+ def run(self, tmp=None, task_vars=None):
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(None, task_vars)
+
+ type = self._task.args.get('type')
+ name = self._task.args.get('name')
+
+ result = dict(changed=False, collection_list=self._task.collections)
+
+ if all([type, name]):
+ attr_name = '{0}_loader'.format(type)
+
+ typed_loader = getattr(loader, attr_name, None)
+
+ if not typed_loader:
+ return (dict(failed=True, msg='invalid plugin type {0}'.format(type)))
+
+ result['plugin_path'] = typed_loader.find_plugin(name, collection_list=self._task.collections)
+
+ return result
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/subclassed_normal.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/subclassed_normal.py
new file mode 100644
index 00000000..f0eff30b
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/subclassed_normal.py
@@ -0,0 +1,11 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.action.normal import ActionModule as NormalAction
+
+
+class ActionModule(NormalAction):
+ def run(self, *args, **kwargs):
+ result = super(ActionModule, self).run(*args, **kwargs)
+ result['hacked'] = 'I got run under a subclassed normal, yay'
+ return result
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/uses_redirected_import.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/uses_redirected_import.py
new file mode 100644
index 00000000..701d7b46
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/action/uses_redirected_import.py
@@ -0,0 +1,20 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.action import ActionBase
+from ansible.module_utils.formerly_core import thingtocall
+
+
+class ActionModule(ActionBase):
+ TRANSFERS_FILES = False
+ _VALID_ARGS = frozenset()
+
+ def run(self, tmp=None, task_vars=None):
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(None, task_vars)
+
+ result = dict(changed=False, ttc_res=thingtocall())
+
+ return result
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/callback/usercallback.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/callback/usercallback.py
new file mode 100644
index 00000000..c5b0f66a
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/callback/usercallback.py
@@ -0,0 +1,27 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.callback import CallbackBase
+
+DOCUMENTATION = '''
+ callback: usercallback
+ callback_type: notification
+ short_description: does stuff
+ description:
+ - does some stuff
+'''
+
+
+class CallbackModule(CallbackBase):
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'usercallback'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self):
+
+ super(CallbackModule, self).__init__()
+ self._display.display("loaded usercallback from collection, yay")
+
+ def v2_runner_on_ok(self, result):
+ self._display.display("usercallback says ok")
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/connection/localconn.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/connection/localconn.py
new file mode 100644
index 00000000..fc19a99d
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/connection/localconn.py
@@ -0,0 +1,41 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils._text import to_native
+from ansible.plugins.connection import ConnectionBase
+
+DOCUMENTATION = """
+ connection: localconn
+ short_description: do stuff local
+ description:
+ - does stuff
+ options:
+ connectionvar:
+ description:
+ - something we set
+ default: the_default
+ vars:
+ - name: ansible_localconn_connectionvar
+"""
+
+
+class Connection(ConnectionBase):
+ transport = 'local'
+ has_pipelining = True
+
+ def _connect(self):
+ return self
+
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ stdout = 'localconn ran {0}'.format(to_native(cmd))
+ stderr = 'connectionvar is {0}'.format(to_native(self.get_option('connectionvar')))
+ return (0, stdout, stderr)
+
+ def put_file(self, in_path, out_path):
+ raise NotImplementedError('just a test')
+
+ def fetch_file(self, in_path, out_path):
+ raise NotImplementedError('just a test')
+
+ def close(self):
+ self._connected = False
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/doc_fragments/frag.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/doc_fragments/frag.py
new file mode 100644
index 00000000..4549f2d6
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/doc_fragments/frag.py
@@ -0,0 +1,18 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ DOCUMENTATION = r'''
+options:
+ normal_doc_frag:
+ description:
+ - an option
+'''
+
+ OTHER_DOCUMENTATION = r'''
+options:
+ other_doc_frag:
+ description:
+ - another option
+'''
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/filter/filter_subdir/my_subdir_filters.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/filter/filter_subdir/my_subdir_filters.py
new file mode 100644
index 00000000..a5498a43
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/filter/filter_subdir/my_subdir_filters.py
@@ -0,0 +1,14 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def test_subdir_filter(data):
+ return "{0}_via_testfilter_from_subdir".format(data)
+
+
+class FilterModule(object):
+
+ def filters(self):
+ return {
+ 'test_subdir_filter': test_subdir_filter
+ }
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/filter/myfilters.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/filter/myfilters.py
new file mode 100644
index 00000000..0ce239e2
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/filter/myfilters.py
@@ -0,0 +1,14 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def testfilter(data):
+ return "{0}_via_testfilter_from_userdir".format(data)
+
+
+class FilterModule(object):
+
+ def filters(self):
+ return {
+ 'testfilter': testfilter
+ }
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/filter/myfilters2.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/filter/myfilters2.py
new file mode 100644
index 00000000..07239222
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/filter/myfilters2.py
@@ -0,0 +1,14 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def testfilter2(data):
+ return "{0}_via_testfilter2_from_userdir".format(data)
+
+
+class FilterModule(object):
+
+ def filters(self):
+ return {
+ 'testfilter2': testfilter2
+ }
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/lookup/lookup_subdir/my_subdir_lookup.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/lookup/lookup_subdir/my_subdir_lookup.py
new file mode 100644
index 00000000..dd9818c9
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/lookup/lookup_subdir/my_subdir_lookup.py
@@ -0,0 +1,11 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables, **kwargs):
+
+ return ['subdir_lookup_from_user_dir']
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/lookup/mylookup.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/lookup/mylookup.py
new file mode 100644
index 00000000..1cf3d28f
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/lookup/mylookup.py
@@ -0,0 +1,11 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables, **kwargs):
+
+ return ['mylookup_from_user_dir']
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/lookup/mylookup2.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/lookup/mylookup2.py
new file mode 100644
index 00000000..bda671f5
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/lookup/mylookup2.py
@@ -0,0 +1,12 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables, **kwargs):
+
+ return ['mylookup2_from_user_dir']
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/AnotherCSMU.cs b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/AnotherCSMU.cs
new file mode 100644
index 00000000..68d2bc7a
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/AnotherCSMU.cs
@@ -0,0 +1,12 @@
+using System;
+
+namespace ansible_collections.testns.testcoll.plugins.module_utils.AnotherCSMU
+{
+ public class AnotherThing
+ {
+ public static string CallMe()
+ {
+ return "Hello from nested user-collection-hosted AnotherCSMU";
+ }
+ }
+}
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/MyCSMU.cs b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/MyCSMU.cs
new file mode 100644
index 00000000..2b7843d7
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/MyCSMU.cs
@@ -0,0 +1,19 @@
+using System;
+
+using ansible_collections.testns.testcoll.plugins.module_utils.AnotherCSMU;
+using ansible_collections.testns.testcoll.plugins.module_utils.subpkg.subcs;
+
+//TypeAccelerator -Name MyCSMU -TypeName CustomThing
+
+namespace ansible_collections.testns.testcoll.plugins.module_utils.MyCSMU
+{
+ public class CustomThing
+ {
+ public static string HelloWorld()
+ {
+ string res1 = AnotherThing.CallMe();
+ string res2 = NestedUtil.HelloWorld();
+ return String.Format("Hello from user_mu collection-hosted MyCSMU, also {0} and {1}", res1, res2);
+ }
+ }
+}
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/MyPSMU.psm1 b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/MyPSMU.psm1
new file mode 100644
index 00000000..09da66d5
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/MyPSMU.psm1
@@ -0,0 +1,9 @@
+Function Invoke-FromUserPSMU {
+ <#
+ .SYNOPSIS
+ Test function
+ #>
+ return "from user_mu"
+}
+
+Export-ModuleMember -Function Invoke-FromUserPSMU
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/base.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/base.py
new file mode 100644
index 00000000..0654d182
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/base.py
@@ -0,0 +1,12 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.testns.testcoll.plugins.module_utils import secondary
+import ansible_collections.testns.testcoll.plugins.module_utils.secondary
+
+
+def thingtocall():
+ if secondary != ansible_collections.testns.testcoll.plugins.module_utils.secondary:
+ raise Exception()
+
+ return "thingtocall in base called " + ansible_collections.testns.testcoll.plugins.module_utils.secondary.thingtocall()
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/leaf.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/leaf.py
new file mode 100644
index 00000000..ad847105
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/leaf.py
@@ -0,0 +1,6 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def thingtocall():
+ return "thingtocall in leaf"
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/nested_same/__init__.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/nested_same/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/nested_same/__init__.py
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/nested_same/nested_same/__init__.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/nested_same/nested_same/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/nested_same/nested_same/__init__.py
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/nested_same/nested_same/nested_same.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/nested_same/nested_same/nested_same.py
new file mode 100644
index 00000000..77407564
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/nested_same/nested_same/nested_same.py
@@ -0,0 +1,6 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def nested_same():
+ return 'hello from nested_same'
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/secondary.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/secondary.py
new file mode 100644
index 00000000..9a315686
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/secondary.py
@@ -0,0 +1,6 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def thingtocall():
+ return "thingtocall in secondary"
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg/__init__.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg/__init__.py
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg/subcs.cs b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg/subcs.cs
new file mode 100644
index 00000000..ebeb8ce5
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg/subcs.cs
@@ -0,0 +1,13 @@
+using System;
+
+namespace ansible_collections.testns.testcoll.plugins.module_utils.subpkg.subcs
+{
+ public class NestedUtil
+ {
+ public static string HelloWorld()
+ {
+ string res = "Hello from subpkg.subcs";
+ return res;
+ }
+ }
+}
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg/submod.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg/submod.py
new file mode 100644
index 00000000..3c24bc44
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg/submod.py
@@ -0,0 +1,6 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def thingtocall():
+ return "thingtocall in subpkg.submod"
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg/subps.psm1 b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg/subps.psm1
new file mode 100644
index 00000000..1db0ab97
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg/subps.psm1
@@ -0,0 +1,9 @@
+Function Invoke-SubUserPSMU {
+ <#
+ .SYNOPSIS
+ Test function
+ #>
+ return "from subpkg.subps.psm1"
+}
+
+Export-ModuleMember -Function Invoke-SubUserPSMU
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg_with_init.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg_with_init.py
new file mode 100644
index 00000000..b48a717c
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg_with_init.py
@@ -0,0 +1,11 @@
+# NB: this module should never be loaded, since we'll see the subpkg_with_init package dir first
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def thingtocall():
+ raise Exception('this should never be called (loaded discrete module instead of package module)')
+
+
+def anotherthingtocall():
+ raise Exception('this should never be called (loaded discrete module instead of package module)')
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg_with_init/__init__.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg_with_init/__init__.py
new file mode 100644
index 00000000..d424796f
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg_with_init/__init__.py
@@ -0,0 +1,10 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# exercise relative imports in package init; they behave differently
+from .mod_in_subpkg_with_init import thingtocall as submod_thingtocall
+from ..subpkg.submod import thingtocall as cousin_submod_thingtocall # pylint: disable=relative-beyond-top-level
+
+
+def thingtocall():
+ return "thingtocall in subpkg_with_init"
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg_with_init/mod_in_subpkg_with_init.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg_with_init/mod_in_subpkg_with_init.py
new file mode 100644
index 00000000..27747dae
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/module_utils/subpkg_with_init/mod_in_subpkg_with_init.py
@@ -0,0 +1,6 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def thingtocall():
+ return "thingtocall in mod_in_subpkg_with_init"
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/deprecated_ping.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/deprecated_ping.py
new file mode 100644
index 00000000..9698ba6f
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/deprecated_ping.py
@@ -0,0 +1,13 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+
+def main():
+ print(json.dumps(dict(changed=False, source='user', is_deprecated=True)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/module_subdir/subdir_ping_module.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/module_subdir/subdir_ping_module.py
new file mode 100644
index 00000000..5a70174d
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/module_subdir/subdir_ping_module.py
@@ -0,0 +1,14 @@
+#!/usr/bin/python
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+
+def main():
+ print(json.dumps(dict(changed=False, source='user')))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/ping.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/ping.py
new file mode 100644
index 00000000..2ca079c6
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/ping.py
@@ -0,0 +1,13 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+
+def main():
+ print(json.dumps(dict(changed=False, source='user')))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/testmodule.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/testmodule.py
new file mode 100644
index 00000000..e2efadae
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/testmodule.py
@@ -0,0 +1,21 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+DOCUMENTATION = r'''
+module: testmodule
+description: for testing
+extends_documentation_fragment:
+ - testns.testcoll.frag
+ - testns.testcoll.frag.other_documentation
+'''
+
+
+def main():
+ print(json.dumps(dict(changed=False, source='user')))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/testmodule_bad_docfrags.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/testmodule_bad_docfrags.py
new file mode 100644
index 00000000..46ccb76c
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/testmodule_bad_docfrags.py
@@ -0,0 +1,25 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+DOCUMENTATION = r'''
+module: testmodule
+description: for testing
+extends_documentation_fragment:
+ - noncollbogusfrag
+ - noncollbogusfrag.bogusvar
+ - bogusns.testcoll.frag
+ - testns.boguscoll.frag
+ - testns.testcoll.bogusfrag
+ - testns.testcoll.frag.bogusvar
+'''
+
+
+def main():
+ print(json.dumps(dict(changed=False, source='user')))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_base_mu_granular_nested_import.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_base_mu_granular_nested_import.py
new file mode 100644
index 00000000..4054e36f
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_base_mu_granular_nested_import.py
@@ -0,0 +1,19 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import sys
+
+from ansible_collections.testns.testcoll.plugins.module_utils.base import thingtocall
+
+
+def main():
+ mu_result = thingtocall()
+ print(json.dumps(dict(changed=False, source='user', mu_result=mu_result)))
+
+ sys.exit()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_collection_redirected_mu.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_collection_redirected_mu.py
new file mode 100644
index 00000000..b169fdea
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_collection_redirected_mu.py
@@ -0,0 +1,21 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import sys
+
+from ansible_collections.testns.testcoll.plugins.module_utils.moved_out_root import importme
+from ..module_utils.formerly_testcoll_pkg import thing as movedthing # pylint: disable=relative-beyond-top-level
+from ..module_utils.formerly_testcoll_pkg.submod import thing as submodmovedthing # pylint: disable=relative-beyond-top-level
+
+
+def main():
+ mu_result = importme()
+ print(json.dumps(dict(changed=False, source='user', mu_result=mu_result, mu_result2=movedthing, mu_result3=submodmovedthing)))
+
+ sys.exit()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_core_redirected_mu.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_core_redirected_mu.py
new file mode 100644
index 00000000..28a07729
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_core_redirected_mu.py
@@ -0,0 +1,19 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import sys
+
+from ansible.module_utils.formerly_core import thingtocall
+
+
+def main():
+ mu_result = thingtocall()
+ print(json.dumps(dict(changed=False, source='user', mu_result=mu_result)))
+
+ sys.exit()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_flat_import.bak b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_flat_import.bak
new file mode 100644
index 00000000..703f4548
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_flat_import.bak
@@ -0,0 +1,3 @@
+# Intentionally blank, and intentionally attempting to shadow
+# uses_leaf_mu_flat_import.py. MODULE_IGNORE_EXTS should prevent this file
+# from ever being loaded.
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_flat_import.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_flat_import.py
new file mode 100644
index 00000000..295d4329
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_flat_import.py
@@ -0,0 +1,19 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import sys
+
+import ansible_collections.testns.testcoll.plugins.module_utils.leaf
+
+
+def main():
+ mu_result = ansible_collections.testns.testcoll.plugins.module_utils.leaf.thingtocall()
+ print(json.dumps(dict(changed=False, source='user', mu_result=mu_result)))
+
+ sys.exit()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_flat_import.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_flat_import.yml
new file mode 100644
index 00000000..703f4548
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_flat_import.yml
@@ -0,0 +1,3 @@
+# Intentionally blank, and intentionally attempting to shadow
+# uses_leaf_mu_flat_import.py. MODULE_IGNORE_EXTS should prevent this file
+# from ever being loaded.
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_granular_import.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_granular_import.py
new file mode 100644
index 00000000..3794f496
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_granular_import.py
@@ -0,0 +1,19 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import sys
+
+from ansible_collections.testns.testcoll.plugins.module_utils.leaf import thingtocall as aliasedthing
+
+
+def main():
+ mu_result = aliasedthing()
+ print(json.dumps(dict(changed=False, source='user', mu_result=mu_result)))
+
+ sys.exit()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_module_import_from.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_module_import_from.py
new file mode 100644
index 00000000..559e3e56
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_leaf_mu_module_import_from.py
@@ -0,0 +1,31 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import sys
+
+from ansible_collections.testns.testcoll.plugins.module_utils import leaf, secondary
+# FIXME: this one needs pkginit synthesis to work
+# from ansible_collections.testns.testcoll.plugins.module_utils.subpkg import submod
+from ansible_collections.testns.testcoll.plugins.module_utils.subpkg_with_init import (thingtocall as spwi_thingtocall,
+ submod_thingtocall as spwi_submod_thingtocall,
+ cousin_submod_thingtocall as spwi_cousin_submod_thingtocall)
+
+
+def main():
+ mu_result = leaf.thingtocall()
+ mu2_result = secondary.thingtocall()
+ mu3_result = "thingtocall in subpkg.submod" # FIXME: this one needs pkginit synthesis to work
+ # mu3_result = submod.thingtocall()
+ mu4_result = spwi_thingtocall()
+ mu5_result = spwi_submod_thingtocall()
+ mu6_result = spwi_cousin_submod_thingtocall()
+ print(json.dumps(dict(changed=False, source='user', mu_result=mu_result, mu2_result=mu2_result,
+ mu3_result=mu3_result, mu4_result=mu4_result, mu5_result=mu5_result, mu6_result=mu6_result)))
+
+ sys.exit()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_mu_missing.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_mu_missing.py
new file mode 100644
index 00000000..b945eb68
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_mu_missing.py
@@ -0,0 +1,16 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import sys
+
+from ..module_utils import bogusmu # pylint: disable=relative-beyond-top-level
+
+
+def main():
+ raise Exception('should never get here')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_mu_missing_redirect_collection.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_mu_missing_redirect_collection.py
new file mode 100644
index 00000000..59cb3c5e
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_mu_missing_redirect_collection.py
@@ -0,0 +1,16 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import sys
+
+from ..module_utils import missing_redirect_target_collection # pylint: disable=relative-beyond-top-level
+
+
+def main():
+ raise Exception('should never get here')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_mu_missing_redirect_module.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_mu_missing_redirect_module.py
new file mode 100644
index 00000000..31ffd17c
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_mu_missing_redirect_module.py
@@ -0,0 +1,16 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import sys
+
+from ..module_utils import missing_redirect_target_module # pylint: disable=relative-beyond-top-level
+
+
+def main():
+ raise Exception('should never get here')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_nested_same_as_func.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_nested_same_as_func.py
new file mode 100644
index 00000000..26fa53c0
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_nested_same_as_func.py
@@ -0,0 +1,19 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import sys
+
+from ansible_collections.testns.testcoll.plugins.module_utils.nested_same.nested_same.nested_same import nested_same
+
+
+def main():
+ mu_result = nested_same()
+ print(json.dumps(dict(changed=False, source='user', mu_result=mu_result)))
+
+ sys.exit()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_nested_same_as_module.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_nested_same_as_module.py
new file mode 100644
index 00000000..e017c14f
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/uses_nested_same_as_module.py
@@ -0,0 +1,19 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import sys
+
+from ansible_collections.testns.testcoll.plugins.module_utils.nested_same.nested_same import nested_same
+
+
+def main():
+ mu_result = nested_same.nested_same()
+ print(json.dumps(dict(changed=False, source='user', mu_result=mu_result)))
+
+ sys.exit()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_csbasic_only.ps1 b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_csbasic_only.ps1
new file mode 100644
index 00000000..df175831
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_csbasic_only.ps1
@@ -0,0 +1,22 @@
+#!powershell
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#AnsibleRequires -CSharpUtil Ansible.Basic
+
+$spec = @{
+ options = @{
+ data = @{ type = "str"; default = "pong" }
+ }
+ supports_check_mode = $true
+}
+$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec)
+$data = $module.Params.data
+
+if ($data -eq "crash") {
+ throw "boom"
+}
+
+$module.Result.ping = $data
+$module.Result.source = "user"
+$module.ExitJson() \ No newline at end of file
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_selfcontained.ps1 b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_selfcontained.ps1
new file mode 100644
index 00000000..661bc0f6
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_selfcontained.ps1
@@ -0,0 +1,9 @@
+#!powershell
+
+$res = @{
+ changed = $false
+ source = "user"
+ msg = "hi from selfcontained.ps1"
+}
+
+ConvertTo-Json $res \ No newline at end of file
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_selfcontained.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_selfcontained.py
new file mode 100644
index 00000000..ce99bfa5
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_selfcontained.py
@@ -0,0 +1 @@
+# docs for Windows module would go here; just ensure we don't accidentally load this instead of the .ps1
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_uses_coll_csmu.ps1 b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_uses_coll_csmu.ps1
new file mode 100644
index 00000000..af00627b
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_uses_coll_csmu.ps1
@@ -0,0 +1,26 @@
+#!powershell
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#AnsibleRequires -CSharpUtil Ansible.Basic
+#AnsibleRequires -CSharpUtil ansible_collections.testns.testcoll.plugins.module_utils.MyCSMU
+#AnsibleRequires -CSharpUtil ansible_collections.testns.testcoll.plugins.module_utils.subpkg.subcs
+
+$spec = @{
+ options = @{
+ data = @{ type = "str"; default = "called from $([ansible_collections.testns.testcoll.plugins.module_utils.MyCSMU.CustomThing]::HelloWorld())" }
+ }
+ supports_check_mode = $true
+}
+$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec)
+$data = $module.Params.data
+
+if ($data -eq "crash") {
+ throw "boom"
+}
+
+$module.Result.ping = $data
+$module.Result.source = "user"
+$module.Result.subpkg = [ansible_collections.testns.testcoll.plugins.module_utils.subpkg.subcs.NestedUtil]::HelloWorld()
+$module.Result.type_accelerator = "called from $([MyCSMU]::HelloWorld())"
+$module.ExitJson()
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_uses_coll_psmu.ps1 b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_uses_coll_psmu.ps1
new file mode 100644
index 00000000..cbca7b70
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_uses_coll_psmu.ps1
@@ -0,0 +1,25 @@
+#!powershell
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#AnsibleRequires -CSharpUtil Ansible.Basic
+#AnsibleRequires -Powershell ansible_collections.testns.testcoll.plugins.module_utils.MyPSMU
+#AnsibleRequires -PowerShell ansible_collections.testns.testcoll.plugins.module_utils.subpkg.subps
+
+$spec = @{
+ options = @{
+ data = @{ type = "str"; default = "called from $(Invoke-FromUserPSMU)" }
+ }
+ supports_check_mode = $true
+}
+$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec)
+$data = $module.Params.data
+
+if ($data -eq "crash") {
+ throw "boom"
+}
+
+$module.Result.ping = $data
+$module.Result.source = "user"
+$module.Result.subpkg = Invoke-SubUserPSMU
+$module.ExitJson()
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/test/mytests.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/test/mytests.py
new file mode 100644
index 00000000..ba610fb2
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/test/mytests.py
@@ -0,0 +1,13 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def testtest(data):
+ return data == 'from_user'
+
+
+class TestModule(object):
+ def tests(self):
+ return {
+ 'testtest': testtest
+ }
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/test/mytests2.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/test/mytests2.py
new file mode 100644
index 00000000..183944ff
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/test/mytests2.py
@@ -0,0 +1,13 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def testtest(data):
+ return data == 'from_user2'
+
+
+class TestModule(object):
+ def tests(self):
+ return {
+ 'testtest2': testtest
+ }
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/test/test_subdir/my_subdir_tests.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/test/test_subdir/my_subdir_tests.py
new file mode 100644
index 00000000..98a8f893
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/test/test_subdir/my_subdir_tests.py
@@ -0,0 +1,13 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def subdir_test(data):
+ return data == 'subdir_from_user'
+
+
+class TestModule(object):
+ def tests(self):
+ return {
+ 'subdir_test': subdir_test
+ }
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/vars/custom_vars.py b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/vars/custom_vars.py
new file mode 100644
index 00000000..c603d72e
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/vars/custom_vars.py
@@ -0,0 +1,44 @@
+# Copyright 2019 RedHat, inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#############################################
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ vars: custom_vars
+ version_added: "2.10"
+ short_description: load host and group vars
+ description: test loading host and group vars from a collection
+ options:
+ stage:
+ choices: ['all', 'inventory', 'task']
+ type: str
+ ini:
+ - key: stage
+ section: custom_vars
+ env:
+ - name: ANSIBLE_VARS_PLUGIN_STAGE
+'''
+
+from ansible.plugins.vars import BaseVarsPlugin
+
+
+class VarsModule(BaseVarsPlugin):
+
+ def get_vars(self, loader, path, entities, cache=True):
+ super(VarsModule, self).get_vars(loader, path, entities)
+ return {'collection': 'collection_root_user'}
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/call_standalone/tasks/main.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/call_standalone/tasks/main.yml
new file mode 100644
index 00000000..f5dcc0fc
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/call_standalone/tasks/main.yml
@@ -0,0 +1,6 @@
+- include_role:
+ name: standalone
+
+- assert:
+ that:
+ - standalone_role_var is defined
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/calls_intra_collection_dep_role_unqualified/meta/main.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/calls_intra_collection_dep_role_unqualified/meta/main.yml
new file mode 100644
index 00000000..b3a88198
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/calls_intra_collection_dep_role_unqualified/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - testrole # since testrole lives in this collection, we'll check there first
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/calls_intra_collection_dep_role_unqualified/tasks/main.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/calls_intra_collection_dep_role_unqualified/tasks/main.yml
new file mode 100644
index 00000000..99297f70
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/calls_intra_collection_dep_role_unqualified/tasks/main.yml
@@ -0,0 +1,7 @@
+- debug:
+ msg: '{{ outer_role_input | default("(undefined)") }}'
+ register: outer_role_output
+
+- assert:
+ that:
+ - outer_role_input is not defined or outer_role_input == outer_role_output.msg
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/common_handlers/handlers/main.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/common_handlers/handlers/main.yml
new file mode 100644
index 00000000..d9f73231
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/common_handlers/handlers/main.yml
@@ -0,0 +1,6 @@
+# This handler should only be called 1 time, if it's called more than once
+# this task should fail on subsequent executions
+- name: test_fqcn_handler
+ set_fact:
+ handler_counter: '{{ handler_counter|int + 1 }}'
+ failed_when: handler_counter|int > 1
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/role_subdir/subdir_testrole/tasks/main.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/role_subdir/subdir_testrole/tasks/main.yml
new file mode 100644
index 00000000..64f5242b
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/role_subdir/subdir_testrole/tasks/main.yml
@@ -0,0 +1,10 @@
+- debug:
+ msg: '{{ test_role_input | default("(undefined)") }}'
+ register: test_role_output
+
+- set_fact:
+ testrole_source: collection
+
+- assert:
+ that:
+ - test_role_input is not defined or test_role_input == test_role_output.msg
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/test_fqcn_handlers/meta/main.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/test_fqcn_handlers/meta/main.yml
new file mode 100644
index 00000000..9218f3d7
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/test_fqcn_handlers/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - testns.testcoll.common_handlers
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/test_fqcn_handlers/tasks/main.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/test_fqcn_handlers/tasks/main.yml
new file mode 100644
index 00000000..db8767d2
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/test_fqcn_handlers/tasks/main.yml
@@ -0,0 +1,7 @@
+- debug:
+ msg: Fire fqcn handler
+ changed_when: true
+ notify:
+ - 'testns.testcoll.common_handlers : test_fqcn_handler'
+ - 'common_handlers : test_fqcn_handler'
+ - 'test_fqcn_handler'
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole/meta/main.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole/meta/main.yml
new file mode 100644
index 00000000..8c22c1c6
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole/meta/main.yml
@@ -0,0 +1,4 @@
+collections:
+- ansible.builtin
+- testns.coll_in_sys
+- bogus.fromrolemeta
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole/tasks/main.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole/tasks/main.yml
new file mode 100644
index 00000000..7c05abb1
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole/tasks/main.yml
@@ -0,0 +1,39 @@
+# test using builtin module of multiple types in a role in a collection
+# https://github.com/ansible/ansible/issues/65298
+- name: Run setup module because there is both setup.ps1 and setup.py
+ setup:
+ gather_subset: min
+
+- name: check collections list from role meta
+ plugin_lookup:
+ register: pluginlookup_out
+
+- name: call role-local ping module
+ ping:
+ register: ping_out
+
+- name: call unqualified module in another collection listed in role meta (testns.coll_in_sys)
+ systestmodule:
+ register: systestmodule_out
+
+# verify that pluginloader caching doesn't prevent us from explicitly calling a builtin plugin with the same name
+- name: call builtin ping module explicitly
+ ansible.builtin.ping:
+ register: builtinping_out
+
+- debug:
+ msg: '{{ test_role_input | default("(undefined)") }}'
+ register: test_role_output
+
+- set_fact:
+ testrole_source: collection
+
+# FIXME: add tests to ensure that block/task level stuff in a collection-hosted role properly inherit role default/meta values
+
+- assert:
+ that:
+ - pluginlookup_out.collection_list == ['testns.testcoll', 'ansible.builtin', 'testns.coll_in_sys', 'bogus.fromrolemeta']
+ - ping_out.source is defined and ping_out.source == 'user'
+ - systestmodule_out.source is defined and systestmodule_out.source == 'sys'
+ - builtinping_out.ping is defined and builtinping_out.ping == 'pong'
+ - test_role_input is not defined or test_role_input == test_role_output.msg
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole_main_yaml/meta/main.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole_main_yaml/meta/main.yml
new file mode 100644
index 00000000..8c22c1c6
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole_main_yaml/meta/main.yml
@@ -0,0 +1,4 @@
+collections:
+- ansible.builtin
+- testns.coll_in_sys
+- bogus.fromrolemeta
diff --git a/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole_main_yaml/tasks/main.yml b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole_main_yaml/tasks/main.yml
new file mode 100644
index 00000000..31e3af5e
--- /dev/null
+++ b/test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/roles/testrole_main_yaml/tasks/main.yml
@@ -0,0 +1,33 @@
+- name: check collections list from role meta
+ plugin_lookup:
+ register: pluginlookup_out
+
+- name: call role-local ping module
+ ping:
+ register: ping_out
+
+- name: call unqualified module in another collection listed in role meta (testns.coll_in_sys)
+ systestmodule:
+ register: systestmodule_out
+
+# verify that pluginloader caching doesn't prevent us from explicitly calling a builtin plugin with the same name
+- name: call builtin ping module explicitly
+ ansible.builtin.ping:
+ register: builtinping_out
+
+- debug:
+ msg: '{{ test_role_input | default("(undefined)") }}'
+ register: test_role_output
+
+- set_fact:
+ testrole_source: collection
+
+# FIXME: add tests to ensure that block/task level stuff in a collection-hosted role properly inherit role default/meta values
+
+- assert:
+ that:
+ - pluginlookup_out.collection_list == ['testns.testcoll', 'ansible.builtin', 'testns.coll_in_sys', 'bogus.fromrolemeta']
+ - ping_out.source is defined and ping_out.source == 'user'
+ - systestmodule_out.source is defined and systestmodule_out.source == 'sys'
+ - builtinping_out.ping is defined and builtinping_out.ping == 'pong'
+ - test_role_input is not defined or test_role_input == test_role_output.msg
diff --git a/test/integration/targets/collections/collections/ansible_collections/me/mycoll1/plugins/action/action1.py b/test/integration/targets/collections/collections/ansible_collections/me/mycoll1/plugins/action/action1.py
new file mode 100644
index 00000000..e9f97311
--- /dev/null
+++ b/test/integration/targets/collections/collections/ansible_collections/me/mycoll1/plugins/action/action1.py
@@ -0,0 +1,29 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for file transfer operations '''
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+
+ if result.get('skipped'):
+ return result
+
+ module_args = self._task.args.copy()
+
+ result.update(
+ self._execute_module(
+ module_name='me.mycoll2.module1',
+ module_args=module_args,
+ task_vars=task_vars,
+ )
+ )
+
+ return result
diff --git a/test/integration/targets/collections/collections/ansible_collections/me/mycoll1/plugins/modules/action1.py b/test/integration/targets/collections/collections/ansible_collections/me/mycoll1/plugins/modules/action1.py
new file mode 100644
index 00000000..66bb5a41
--- /dev/null
+++ b/test/integration/targets/collections/collections/ansible_collections/me/mycoll1/plugins/modules/action1.py
@@ -0,0 +1,24 @@
+#!/usr/bin/python
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = '''
+---
+module: action1
+short_description: Action Test module
+description:
+ - Action Test module
+author:
+ - Ansible Core Team
+'''
+
+EXAMPLES = '''
+'''
+
+RETURN = '''
+'''
diff --git a/test/integration/targets/collections/collections/ansible_collections/me/mycoll2/plugins/modules/module1.py b/test/integration/targets/collections/collections/ansible_collections/me/mycoll2/plugins/modules/module1.py
new file mode 100644
index 00000000..00bb993b
--- /dev/null
+++ b/test/integration/targets/collections/collections/ansible_collections/me/mycoll2/plugins/modules/module1.py
@@ -0,0 +1,43 @@
+#!/usr/bin/python
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = '''
+---
+module: module1
+short_description: module1 Test module
+description:
+ - module1 Test module
+author:
+ - Ansible Core Team
+'''
+
+EXAMPLES = '''
+'''
+
+RETURN = '''
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ desc=dict(type='str'),
+ ),
+ )
+
+ results = dict(msg="you just ran me.mycoll2.module1", desc=module.params.get('desc'))
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/cache/custom_jsonfile.py b/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/cache/custom_jsonfile.py
new file mode 100644
index 00000000..7605dc41
--- /dev/null
+++ b/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/cache/custom_jsonfile.py
@@ -0,0 +1,63 @@
+# (c) 2014, Brian Coca, Josh Drake, et al
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ cache: jsonfile
+ short_description: JSON formatted files.
+ description:
+ - This cache uses JSON formatted, per host, files saved to the filesystem.
+ version_added: "1.9"
+ author: Ansible Core (@ansible-core)
+ options:
+ _uri:
+ required: True
+ description:
+ - Path in which the cache plugin will save the JSON files
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_CONNECTION
+ ini:
+ - key: fact_caching_connection
+ section: defaults
+ _prefix:
+ description: User defined prefix to use when creating the JSON files
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_PREFIX
+ ini:
+ - key: fact_caching_prefix
+ section: defaults
+ _timeout:
+ default: 86400
+ description: Expiration timeout for the cache plugin data
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
+ ini:
+ - key: fact_caching_timeout
+ section: defaults
+ type: integer
+'''
+
+import codecs
+import json
+
+from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
+from ansible.plugins.cache import BaseFileCacheModule
+
+
+class CacheModule(BaseFileCacheModule):
+ """
+ A caching module backed by json files.
+ """
+
+ def _load(self, filepath):
+ # Valid JSON is always UTF-8 encoded.
+ with codecs.open(filepath, 'r', encoding='utf-8') as f:
+ return json.load(f, cls=AnsibleJSONDecoder)
+
+ def _dump(self, value, filepath):
+ with codecs.open(filepath, 'w', encoding='utf-8') as f:
+ f.write(json.dumps(value, cls=AnsibleJSONEncoder, sort_keys=True, indent=4))
diff --git a/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/inventory/statichost.py b/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/inventory/statichost.py
new file mode 100644
index 00000000..ae6941f3
--- /dev/null
+++ b/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/inventory/statichost.py
@@ -0,0 +1,68 @@
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ inventory: statichost
+ short_description: Add a single host
+ description: Add a single host
+ extends_documentation_fragment:
+ - inventory_cache
+ options:
+ plugin:
+ description: plugin name (must be statichost)
+ required: true
+ hostname:
+ description: Toggle display of stderr even when script was successful
+ required: True
+'''
+
+from ansible.errors import AnsibleParserError
+from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable
+
+
+class InventoryModule(BaseInventoryPlugin, Cacheable):
+
+ NAME = 'testns.content_adj.statichost'
+
+ def __init__(self):
+
+ super(InventoryModule, self).__init__()
+
+ self._hosts = set()
+
+ def verify_file(self, path):
+ ''' Verify if file is usable by this plugin, base does minimal accessibility check '''
+
+ if not path.endswith('.statichost.yml') and not path.endswith('.statichost.yaml'):
+ return False
+ return super(InventoryModule, self).verify_file(path)
+
+ def parse(self, inventory, loader, path, cache=None):
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ # Initialize and validate options
+ self._read_config_data(path)
+
+ # Exercise cache
+ cache_key = self.get_cache_key(path)
+ attempt_to_read_cache = self.get_option('cache') and cache
+ cache_needs_update = self.get_option('cache') and not cache
+ if attempt_to_read_cache:
+ try:
+ host_to_add = self._cache[cache_key]
+ except KeyError:
+ cache_needs_update = True
+ if not attempt_to_read_cache or cache_needs_update:
+ host_to_add = self.get_option('hostname')
+
+ # this is where the magic happens
+ self.inventory.add_host(host_to_add, 'all')
+ self._cache[cache_key] = host_to_add
+
+ # self.inventory.add_group()...
+ # self.inventory.add_child()...
+ # self.inventory.set_variable()..
diff --git a/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/module_utils/__init__.py b/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/module_utils/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/module_utils/__init__.py
diff --git a/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/module_utils/sub1/__init__.py b/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/module_utils/sub1/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/module_utils/sub1/__init__.py
diff --git a/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/module_utils/sub1/foomodule.py b/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/module_utils/sub1/foomodule.py
new file mode 100644
index 00000000..eeffe01e
--- /dev/null
+++ b/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/module_utils/sub1/foomodule.py
@@ -0,0 +1,6 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def importme():
+ return "hello from {0}".format(__name__)
diff --git a/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/modules/contentadjmodule.py b/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/modules/contentadjmodule.py
new file mode 100644
index 00000000..0fa98eb0
--- /dev/null
+++ b/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/modules/contentadjmodule.py
@@ -0,0 +1,13 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+
+def main():
+ print(json.dumps(dict(changed=False, source='content_adj')))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/vars/custom_adj_vars.py b/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/vars/custom_adj_vars.py
new file mode 100644
index 00000000..0cd9a1d5
--- /dev/null
+++ b/test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/vars/custom_adj_vars.py
@@ -0,0 +1,45 @@
+# Copyright 2019 RedHat, inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#############################################
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ vars: custom_adj_vars
+ version_added: "2.10"
+ short_description: load host and group vars
+ description: test loading host and group vars from a collection
+ options:
+ stage:
+ default: all
+ choices: ['all', 'inventory', 'task']
+ type: str
+ ini:
+ - key: stage
+ section: custom_adj_vars
+ env:
+ - name: ANSIBLE_VARS_PLUGIN_STAGE
+'''
+
+from ansible.plugins.vars import BaseVarsPlugin
+
+
+class VarsModule(BaseVarsPlugin):
+
+ def get_vars(self, loader, path, entities, cache=True):
+ super(VarsModule, self).get_vars(loader, path, entities)
+ return {'collection': 'adjacent', 'adj_var': 'value'}
diff --git a/test/integration/targets/collections/custom_vars_plugins/v1_vars_plugin.py b/test/integration/targets/collections/custom_vars_plugins/v1_vars_plugin.py
new file mode 100644
index 00000000..b5792d88
--- /dev/null
+++ b/test/integration/targets/collections/custom_vars_plugins/v1_vars_plugin.py
@@ -0,0 +1,37 @@
+# Copyright 2019 RedHat, inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#############################################
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ vars: v1_vars_plugin
+ version_added: "2.10"
+ short_description: load host and group vars
+ description:
+ - 3rd party vars plugin to test loading host and group vars without requiring whitelisting and without a plugin-specific stage option
+ options:
+'''
+
+from ansible.plugins.vars import BaseVarsPlugin
+
+
+class VarsModule(BaseVarsPlugin):
+
+ def get_vars(self, loader, path, entities, cache=True):
+ super(VarsModule, self).get_vars(loader, path, entities)
+ return {'collection': False, 'name': 'v1_vars_plugin', 'v1_vars_plugin': True}
diff --git a/test/integration/targets/collections/custom_vars_plugins/v2_vars_plugin.py b/test/integration/targets/collections/custom_vars_plugins/v2_vars_plugin.py
new file mode 100644
index 00000000..fc140162
--- /dev/null
+++ b/test/integration/targets/collections/custom_vars_plugins/v2_vars_plugin.py
@@ -0,0 +1,45 @@
+# Copyright 2019 RedHat, inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#############################################
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ vars: v2_vars_plugin
+ version_added: "2.10"
+ short_description: load host and group vars
+ description:
+ - 3rd party vars plugin to test loading host and group vars without requiring whitelisting and with a plugin-specific stage option
+ options:
+ stage:
+ choices: ['all', 'inventory', 'task']
+ type: str
+ ini:
+ - key: stage
+ section: other_vars_plugin
+ env:
+ - name: ANSIBLE_VARS_PLUGIN_STAGE
+'''
+
+from ansible.plugins.vars import BaseVarsPlugin
+
+
+class VarsModule(BaseVarsPlugin):
+
+ def get_vars(self, loader, path, entities, cache=True):
+ super(VarsModule, self).get_vars(loader, path, entities)
+ return {'collection': False, 'name': 'v2_vars_plugin', 'v2_vars_plugin': True}
diff --git a/test/integration/targets/collections/custom_vars_plugins/vars_req_whitelist.py b/test/integration/targets/collections/custom_vars_plugins/vars_req_whitelist.py
new file mode 100644
index 00000000..0ab95273
--- /dev/null
+++ b/test/integration/targets/collections/custom_vars_plugins/vars_req_whitelist.py
@@ -0,0 +1,46 @@
+# Copyright 2019 RedHat, inc
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#############################################
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ vars: vars_req_whitelist
+ version_added: "2.10"
+ short_description: load host and group vars
+ description: test loading host and group vars from a collection
+ options:
+ stage:
+ choices: ['all', 'inventory', 'task']
+ type: str
+ ini:
+ - key: stage
+ section: vars_req_whitelist
+ env:
+ - name: ANSIBLE_VARS_PLUGIN_STAGE
+'''
+
+from ansible.plugins.vars import BaseVarsPlugin
+
+
+class VarsModule(BaseVarsPlugin):
+
+ REQUIRES_WHITELIST = True
+
+ def get_vars(self, loader, path, entities, cache=True):
+ super(VarsModule, self).get_vars(loader, path, entities)
+ return {'whitelisted': True, 'collection': False}
diff --git a/test/integration/targets/collections/filter_plugins/override_formerly_core_masked_filter.py b/test/integration/targets/collections/filter_plugins/override_formerly_core_masked_filter.py
new file mode 100644
index 00000000..600b1fd8
--- /dev/null
+++ b/test/integration/targets/collections/filter_plugins/override_formerly_core_masked_filter.py
@@ -0,0 +1,13 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def override_formerly_core_masked_filter(*args, **kwargs):
+ return 'hello from overridden formerly_core_masked_filter'
+
+
+class FilterModule(object):
+ def filters(self):
+ return {
+ 'formerly_core_masked_filter': override_formerly_core_masked_filter
+ }
diff --git a/test/integration/targets/collections/includeme.yml b/test/integration/targets/collections/includeme.yml
new file mode 100644
index 00000000..219ee58f
--- /dev/null
+++ b/test/integration/targets/collections/includeme.yml
@@ -0,0 +1,6 @@
+- testns.testcoll.plugin_lookup:
+ register: included_plugin_lookup_out
+
+- assert:
+ that:
+ - included_plugin_lookup_out.collection_list == ['bogus.bogus', 'ansible.legacy']
diff --git a/test/integration/targets/collections/inventory_test.yml b/test/integration/targets/collections/inventory_test.yml
new file mode 100644
index 00000000..b5089278
--- /dev/null
+++ b/test/integration/targets/collections/inventory_test.yml
@@ -0,0 +1,26 @@
+- name: test a collection-hosted connection plugin against hosts from collection-hosted inventory plugins
+ hosts: dynamic_host_a, dynamic_host_redirected
+ gather_facts: no
+ vars:
+ ansible_connection: testns.testcoll.localconn
+ ansible_localconn_connectionvar: from_play
+ tasks:
+ - raw: echo 'hello world'
+ register: connection_out
+
+ - assert:
+ that:
+ - connection_out.stdout == "localconn ran echo 'hello world'"
+ # ensure that the connection var we overrode above made it into the running config
+ - connection_out.stderr == "connectionvar is from_play"
+
+
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - assert:
+ that:
+ - hostvars['dynamic_host_a'] is defined
+ - hostvars['dynamic_host_a'].connection_out.stdout == "localconn ran echo 'hello world'"
+ - hostvars['dynamic_host_redirected'] is defined
+ - hostvars['dynamic_host_redirected'].connection_out.stdout == "localconn ran echo 'hello world'"
diff --git a/test/integration/targets/collections/invocation_tests.yml b/test/integration/targets/collections/invocation_tests.yml
new file mode 100644
index 00000000..c80e1edc
--- /dev/null
+++ b/test/integration/targets/collections/invocation_tests.yml
@@ -0,0 +1,5 @@
+- hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: run action that invokes module from another collection
+ me.mycoll1.action1: desc="this should run me.mycoll2.module1"
diff --git a/test/integration/targets/collections/library/ping.py b/test/integration/targets/collections/library/ping.py
new file mode 100644
index 00000000..7a416a64
--- /dev/null
+++ b/test/integration/targets/collections/library/ping.py
@@ -0,0 +1,13 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+
+def main():
+ print(json.dumps(dict(changed=False, source='legacy_library_dir')))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/collections/noop.yml b/test/integration/targets/collections/noop.yml
new file mode 100644
index 00000000..81c6e473
--- /dev/null
+++ b/test/integration/targets/collections/noop.yml
@@ -0,0 +1,4 @@
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - debug:
diff --git a/test/integration/targets/collections/posix.yml b/test/integration/targets/collections/posix.yml
new file mode 100644
index 00000000..903fb4ff
--- /dev/null
+++ b/test/integration/targets/collections/posix.yml
@@ -0,0 +1,443 @@
+- hosts: testhost
+ tasks:
+ # basic test of FQ module lookup and that we got the right one (user-dir hosted)
+ - name: exec FQ module in a user-dir testns collection
+ testns.testcoll.testmodule:
+ register: testmodule_out
+
+ # verifies that distributed collection subpackages are visible under a multi-location namespace (testns exists in user and sys locations)
+ - name: exec FQ module in a sys-dir testns collection
+ testns.coll_in_sys.systestmodule:
+ register: systestmodule_out
+
+ # verifies that content-adjacent collections were automatically added to the installed content roots
+ - name: exec FQ module from content-adjacent collection
+ testns.content_adj.contentadjmodule:
+ register: contentadjmodule_out
+
+ # content should only be loaded from the first visible instance of a collection
+ - name: attempt to look up FQ module in a masked collection
+ testns.testcoll.plugin_lookup:
+ type: module
+ name: testns.testcoll.maskedmodule
+ register: maskedmodule_out
+
+ # ensure the ansible ns can have real collections added to it
+ - name: call an external module in the ansible namespace
+ ansible.bullcoll.bullmodule:
+ register: bullmodule_out
+
+ # ensure the ansible ns cannot override ansible.builtin externally
+ - name: call an external module in the ansible.builtin collection (should use the built in module)
+ ansible.builtin.ping:
+ register: builtin_ping_out
+
+ # action in a collection subdir
+ - name: test subdir action FQ
+ testns.testcoll.action_subdir.subdir_ping_action:
+ register: subdir_ping_action_out
+
+ # module in a collection subdir
+ - name: test subdir module FQ
+ testns.testcoll.module_subdir.subdir_ping_module:
+ register: subdir_ping_module_out
+
+ # module with a granular module_utils import (from (this collection).module_utils.leaf import thingtocall)
+ - name: exec module with granular module utils import from this collection
+ testns.testcoll.uses_leaf_mu_granular_import:
+ register: granular_out
+
+ # module with a granular nested module_utils import (from (this collection).module_utils.base import thingtocall,
+ # where base imports secondary from the same collection's module_utils)
+ - name: exec module with nested module utils from this collection
+ testns.testcoll.uses_base_mu_granular_nested_import:
+ register: granular_nested_out
+
+ # module with a flat module_utils import (import (this collection).module_utils.leaf)
+ - name: exec module with flat module_utils import from this collection
+ testns.testcoll.uses_leaf_mu_flat_import:
+ register: flat_out
+
+ # module with a full-module module_utils import using 'from' (from (this collection).module_utils import leaf)
+ - name: exec module with full-module module_utils import using 'from' from this collection
+ testns.testcoll.uses_leaf_mu_module_import_from:
+ register: from_out
+
+ # module with multiple levels of the same nested package name and imported as a function
+ - name: exec module with multiple levels of the same nested package name imported as a function
+ testns.testcoll.uses_nested_same_as_func:
+ register: from_nested_func
+
+ # module with multiple levels of the same nested package name and imported as a module
+ - name: exec module with multiple levels of the same nested package name imported as a module
+ testns.testcoll.uses_nested_same_as_module:
+ register: from_nested_module
+
+ # module using a bunch of collection-level redirected module_utils
+ - name: exec module using a bunch of collection-level redirected module_utils
+ testns.testcoll.uses_collection_redirected_mu:
+ register: from_redirected_mu
+
+ # module with bogus MU
+ - name: exec module with bogus MU
+ testns.testcoll.uses_mu_missing:
+ ignore_errors: true
+ register: from_missing_mu
+
+ # module with redirected MU, redirect collection not found
+ - name: exec module with a missing redirect target collection
+ testns.testcoll.uses_mu_missing_redirect_collection:
+ ignore_errors: true
+ register: from_missing_redir_collection
+
+ # module with redirected MU, redirect module not found
+ - name: exec module with a missing redirect target module
+ testns.testcoll.uses_mu_missing_redirect_module:
+ ignore_errors: true
+ register: from_missing_redir_module
+
+ - assert:
+ that:
+ - testmodule_out.source == 'user'
+ - systestmodule_out.source == 'sys'
+ - contentadjmodule_out.source == 'content_adj'
+ - not maskedmodule_out.plugin_path
+ - bullmodule_out.source == 'user_ansible_bullcoll'
+ - builtin_ping_out.source is not defined
+ - builtin_ping_out.ping == 'pong'
+ - subdir_ping_action_out is not changed
+ - subdir_ping_module_out is not changed
+ - granular_out.mu_result == 'thingtocall in leaf'
+ - granular_nested_out.mu_result == 'thingtocall in base called thingtocall in secondary'
+ - flat_out.mu_result == 'thingtocall in leaf'
+ - from_out.mu_result == 'thingtocall in leaf'
+ - from_out.mu2_result == 'thingtocall in secondary'
+ - from_out.mu3_result == 'thingtocall in subpkg.submod'
+ - from_out.mu4_result == 'thingtocall in subpkg_with_init'
+ - from_out.mu5_result == 'thingtocall in mod_in_subpkg_with_init'
+ - from_out.mu6_result == 'thingtocall in subpkg.submod'
+ - from_nested_func.mu_result == 'hello from nested_same'
+ - from_nested_module.mu_result == 'hello from nested_same'
+ - from_redirected_mu.mu_result == 'hello from ansible_collections.testns.content_adj.plugins.module_utils.sub1.foomodule'
+ - from_redirected_mu.mu_result2 == 'hello from testns.othercoll.formerly_testcoll_pkg.thing'
+ - from_redirected_mu.mu_result3 == 'hello from formerly_testcoll_pkg.submod.thing'
+ - from_missing_mu is failed
+ - "'Could not find imported module support' in from_missing_mu.msg"
+ - from_missing_redir_collection is failed
+ - "'unable to locate collection bogusns.boguscoll' in from_missing_redir_collection.msg"
+ - from_missing_redir_module is failed
+ - "'Could not find imported module support code for ansible_collections.testns.testcoll.plugins.modules.uses_mu_missing_redirect_module' in from_missing_redir_module.msg"
+
+
+- hosts: testhost
+ tasks:
+ - name: exercise filters/tests/lookups
+ assert:
+ that:
+ - "'data' | testns.testcoll.testfilter == 'data_via_testfilter_from_userdir'"
+ - "'data' | testns.testcoll.testfilter2 == 'data_via_testfilter2_from_userdir'"
+ - "'data' | testns.testcoll.filter_subdir.test_subdir_filter == 'data_via_testfilter_from_subdir'"
+ - "'from_user' is testns.testcoll.testtest"
+ - "'from_user2' is testns.testcoll.testtest2"
+ - "'subdir_from_user' is testns.testcoll.test_subdir.subdir_test"
+ - lookup('testns.testcoll.mylookup') == 'mylookup_from_user_dir'
+ - lookup('testns.testcoll.mylookup2') == 'mylookup2_from_user_dir'
+ - lookup('testns.testcoll.lookup_subdir.my_subdir_lookup') == 'subdir_lookup_from_user_dir'
+
+ - debug:
+ msg: "{{ 'foo'|testns.testbroken.broken }}"
+ register: result
+ ignore_errors: true
+
+ - assert:
+ that:
+ - |
+ 'This is a broken filter plugin.' in result.msg
+
+ - debug:
+ msg: "{{ 'foo'|missing.collection.filter }}"
+ register: result
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is failed
+
+# ensure that the synthetic ansible.builtin collection limits to builtin plugins, that ansible.legacy loads overrides
+# from legacy plugin dirs, and that a same-named plugin loaded from a real collection is not masked by the others
+- hosts: testhost
+ tasks:
+ - name: test unqualified ping from library dir
+ ping:
+ register: unqualified_ping_out
+
+ - name: test legacy-qualified ping from library dir
+ ansible.legacy.ping:
+ register: legacy_ping_out
+
+ - name: test builtin ping
+ ansible.builtin.ping:
+ register: builtin_ping_out
+
+ - name: test collection-based ping
+ testns.testcoll.ping:
+ register: collection_ping_out
+
+ - assert:
+ that:
+ - unqualified_ping_out.source == 'legacy_library_dir'
+ - legacy_ping_out.source == 'legacy_library_dir'
+ - builtin_ping_out.ping == 'pong'
+ - collection_ping_out.source == 'user'
+
+# verify the default value for the collections list is empty
+- hosts: testhost
+ tasks:
+ - name: sample default collections value
+ testns.testcoll.plugin_lookup:
+ register: coll_default_out
+
+ - assert:
+ that:
+ # in original release, collections defaults to empty, which is mostly equivalent to ansible.legacy
+ - not coll_default_out.collection_list
+
+
+# ensure that inheritance/masking works as expected, that the proper default values are injected when missing,
+# and that the order is preserved if one of the magic values is explicitly specified
+- name: verify collections keyword play/block/task inheritance and magic values
+ hosts: testhost
+ collections:
+ - bogus.fromplay
+ tasks:
+ - name: sample play collections value
+ testns.testcoll.plugin_lookup:
+ register: coll_play_out
+
+ - name: collections override block-level
+ collections:
+ - bogus.fromblock
+ block:
+ - name: sample block collections value
+ testns.testcoll.plugin_lookup:
+ register: coll_block_out
+
+ - name: sample task collections value
+ collections:
+ - bogus.fromtask
+ testns.testcoll.plugin_lookup:
+ register: coll_task_out
+
+ - name: sample task with explicit core
+ collections:
+ - ansible.builtin
+ - bogus.fromtaskexplicitcore
+ testns.testcoll.plugin_lookup:
+ register: coll_task_core
+
+ - name: sample task with explicit legacy
+ collections:
+ - ansible.legacy
+ - bogus.fromtaskexplicitlegacy
+ testns.testcoll.plugin_lookup:
+ register: coll_task_legacy
+
+ - assert:
+ that:
+ # ensure that parent value inheritance is masked properly by explicit setting
+ - coll_play_out.collection_list == ['bogus.fromplay', 'ansible.legacy']
+ - coll_block_out.collection_list == ['bogus.fromblock', 'ansible.legacy']
+ - coll_task_out.collection_list == ['bogus.fromtask', 'ansible.legacy']
+ - coll_task_core.collection_list == ['ansible.builtin', 'bogus.fromtaskexplicitcore']
+ - coll_task_legacy.collection_list == ['ansible.legacy', 'bogus.fromtaskexplicitlegacy']
+
+- name: verify unqualified plugin resolution behavior
+ hosts: testhost
+ collections:
+ - testns.testcoll
+ - testns.coll_in_sys
+ - testns.contentadj
+ tasks:
+ # basic test of unqualified module lookup and that we got the right one (user-dir hosted, there's another copy of
+ # this one in the same-named collection in sys dir that should be masked
+ - name: exec unqualified module in a user-dir testns collection
+ testmodule:
+ register: testmodule_out
+
+ # use another collection to verify that we're looking in all collections listed on the play
+ - name: exec unqualified module in a sys-dir testns collection
+ systestmodule:
+ register: systestmodule_out
+
+ - assert:
+ that:
+ - testmodule_out.source == 'user'
+ - systestmodule_out.source == 'sys'
+
+# test keyword-static execution of a FQ collection-backed role with "tasks/main.yaml"
+- name: verify collection-backed role execution (keyword static)
+ hosts: testhost
+ collections:
+ # set to ansible.builtin only to ensure that roles function properly without inheriting the play's collections config
+ - ansible.builtin
+ vars:
+ test_role_input: keyword static
+ roles:
+ - role: testns.testcoll.testrole_main_yaml
+ tasks:
+ - name: ensure role executed
+ assert:
+ that:
+ - test_role_output.msg == test_role_input
+ - testrole_source == 'collection'
+
+
+# test dynamic execution of a FQ collection-backed role
+- name: verify collection-backed role execution (dynamic)
+ hosts: testhost
+ collections:
+ # set to ansible.builtin only to ensure that roles function properly without inheriting the play's collections config
+ - ansible.builtin
+ vars:
+ test_role_input: dynamic
+ tasks:
+ - include_role:
+ name: testns.testcoll.testrole
+ - name: ensure role executed
+ assert:
+ that:
+ - test_role_output.msg == test_role_input
+ - testrole_source == 'collection'
+
+# test task-static execution of a FQ collection-backed role
+- name: verify collection-backed role execution (task static)
+ hosts: testhost
+ collections:
+ - ansible.builtin
+ vars:
+ test_role_input: task static
+ tasks:
+ - import_role:
+ name: testns.testcoll.testrole
+ - name: ensure role executed
+ assert:
+ that:
+ - test_role_output.msg == test_role_input
+ - testrole_source == 'collection'
+
+
+# test a legacy playbook-adjacent role, ensure that play collections config is not inherited
+- name: verify legacy playbook-adjacent role behavior
+ hosts: testhost
+ collections:
+ - bogus.bogus
+ vars:
+ test_role_input: legacy playbook-adjacent
+ roles:
+ - testrole
+# FIXME: this should technically work to look up a playbook-adjacent role
+# - ansible.legacy.testrole
+ tasks:
+ - name: ensure role executed
+ assert:
+ that:
+ - test_role_output.msg == test_role_input
+ - testrole_source == 'legacy roles dir'
+
+
+# test dynamic execution of a FQ collection-backed role
+- name: verify collection-backed role execution in subdir (include)
+ hosts: testhost
+ vars:
+ test_role_input: dynamic (subdir)
+ tasks:
+ - include_role:
+ name: testns.testcoll.role_subdir.subdir_testrole
+ - name: ensure role executed
+ assert:
+ that:
+ - test_role_output.msg == test_role_input
+ - testrole_source == 'collection'
+
+
+# test collection-relative role deps (keyword static)
+- name: verify collection-relative role deps
+ hosts: testhost
+ vars:
+ outer_role_input: keyword static outer
+ test_role_input: keyword static inner
+ roles:
+ - testns.testcoll.calls_intra_collection_dep_role_unqualified
+ tasks:
+ - assert:
+ that:
+ - outer_role_output.msg == outer_role_input
+ - test_role_output.msg == test_role_input
+ - testrole_source == 'collection'
+
+# test collection-relative role deps (task static)
+- name: verify collection-relative role deps
+ hosts: testhost
+ vars:
+ outer_role_input: task static outer
+ test_role_input: task static inner
+ tasks:
+ - import_role:
+ name: testns.testcoll.calls_intra_collection_dep_role_unqualified
+ - assert:
+ that:
+ - outer_role_output.msg == outer_role_input
+ - test_role_output.msg == test_role_input
+ - testrole_source == 'collection'
+
+# test collection-relative role deps (task dynamic)
+- name: verify collection-relative role deps
+ hosts: testhost
+ vars:
+ outer_role_input: task dynamic outer
+ test_role_input: task dynamic inner
+ tasks:
+ - include_role:
+ name: testns.testcoll.calls_intra_collection_dep_role_unqualified
+ - assert:
+ that:
+ - outer_role_output.msg == outer_role_input
+ - test_role_output.msg == test_role_input
+ - testrole_source == 'collection'
+
+
+- name: validate static task include behavior
+ hosts: testhost
+ collections:
+ - bogus.bogus
+ tasks:
+ - import_tasks: includeme.yml
+
+
+- name: validate dynamic task include behavior
+ hosts: testhost
+ collections:
+ - bogus.bogus
+ tasks:
+ - include_tasks: includeme.yml
+
+
+- import_playbook: test_collection_meta.yml
+- name: Test FQCN handlers
+ hosts: testhost
+ vars:
+ handler_counter: 0
+ roles:
+ - testns.testcoll.test_fqcn_handlers
+
+- name: Ensure a collection role can call a standalone role
+ hosts: testhost
+ roles:
+ - testns.testcoll.call_standalone
+
+# Issue https://github.com/ansible/ansible/issues/69054
+- name: Test collection as string
+ hosts: testhost
+ collections: foo
+ tasks:
+ - debug: msg="Test"
diff --git a/test/integration/targets/collections/redirected.statichost.yml b/test/integration/targets/collections/redirected.statichost.yml
new file mode 100644
index 00000000..9fd2c2d8
--- /dev/null
+++ b/test/integration/targets/collections/redirected.statichost.yml
@@ -0,0 +1,3 @@
+# use a plugin redirected by core to a collection to ensure inventory redirection and redirected config names are working
+plugin: formerly_core_inventory # this is defined in the ansible-base runtime.yml routing to point at testns.content_adj.statichost
+hostname: dynamic_host_redirected
diff --git a/test/integration/targets/collections/roles/standalone/tasks/main.yml b/test/integration/targets/collections/roles/standalone/tasks/main.yml
new file mode 100644
index 00000000..b4dd23db
--- /dev/null
+++ b/test/integration/targets/collections/roles/standalone/tasks/main.yml
@@ -0,0 +1,2 @@
+- set_fact:
+ standalone_role_var: True
diff --git a/test/integration/targets/collections/roles/testrole/tasks/main.yml b/test/integration/targets/collections/roles/testrole/tasks/main.yml
new file mode 100644
index 00000000..cbf6b8e7
--- /dev/null
+++ b/test/integration/targets/collections/roles/testrole/tasks/main.yml
@@ -0,0 +1,28 @@
+- debug:
+ msg: executing testrole from legacy playbook-adjacent roles dir
+
+- name: exec a FQ module from a legacy role
+ testns.testcoll.testmodule:
+ register: coll_module_out
+
+- name: exec a legacy playbook-adjacent module from a legacy role
+ ping:
+ register: ping_out
+
+- name: sample collections list inside a legacy role (should be empty)
+ testns.testcoll.plugin_lookup:
+ register: plugin_lookup_out
+
+- debug:
+ msg: '{{ test_role_input | default("(undefined)") }}'
+ register: test_role_output
+
+- set_fact:
+ testrole_source: legacy roles dir
+
+- assert:
+ that:
+ - coll_module_out.source == 'user'
+ # ensure we used the library/ ping override, not the builtin or one from another collection
+ - ping_out.source == 'legacy_library_dir'
+ - not plugin_lookup_out.collection_list
diff --git a/test/integration/targets/collections/runme.sh b/test/integration/targets/collections/runme.sh
new file mode 100755
index 00000000..dc01a6c0
--- /dev/null
+++ b/test/integration/targets/collections/runme.sh
@@ -0,0 +1,111 @@
+#!/usr/bin/env bash
+
+set -eux
+
+export ANSIBLE_COLLECTIONS_PATH=$PWD/collection_root_user:$PWD/collection_root_sys
+export ANSIBLE_GATHERING=explicit
+export ANSIBLE_GATHER_SUBSET=minimal
+export ANSIBLE_HOST_PATTERN_MISMATCH=error
+
+# FUTURE: just use INVENTORY_PATH as-is once ansible-test sets the right dir
+ipath=../../$(basename "${INVENTORY_PATH:-../../inventory}")
+export INVENTORY_PATH="$ipath"
+
+echo "--- validating callbacks"
+# validate FQ callbacks in ansible-playbook
+ANSIBLE_CALLBACK_WHITELIST=testns.testcoll.usercallback ansible-playbook noop.yml | grep "usercallback says ok"
+# use adhoc for the rest of these tests, must force it to load other callbacks
+export ANSIBLE_LOAD_CALLBACK_PLUGINS=1
+# validate redirected callback
+ANSIBLE_CALLBACK_WHITELIST=formerly_core_callback ansible localhost -m debug 2>&1 | grep -- "usercallback says ok"
+## validate missing redirected callback
+ANSIBLE_CALLBACK_WHITELIST=formerly_core_missing_callback ansible localhost -m debug 2>&1 | grep -- "Skipping callback plugin 'formerly_core_missing_callback'"
+## validate redirected + removed callback (fatal)
+ANSIBLE_CALLBACK_WHITELIST=formerly_core_removed_callback ansible localhost -m debug 2>&1 | grep -- "testns.testcoll.removedcallback has been removed"
+# validate avoiding duplicate loading of callback, even if using diff names
+[ "$(ANSIBLE_CALLBACK_WHITELIST=testns.testcoll.usercallback,formerly_core_callback ansible localhost -m debug 2>&1 | grep -c 'usercallback says ok')" = "1" ]
+# ensure non existing callback does not crash ansible
+ANSIBLE_CALLBACK_WHITELIST=charlie.gomez.notme ansible localhost -m debug 2>&1 | grep -- "Skipping callback plugin 'charlie.gomez.notme'"
+unset ANSIBLE_LOAD_CALLBACK_PLUGINS
+# adhoc normally shouldn't load non-default plugins- let's be sure
+output=$(ANSIBLE_CALLBACK_WHITELIST=testns.testcoll.usercallback ansible localhost -m debug)
+if [[ "${output}" =~ "usercallback says ok" ]]; then echo fail; exit 1; fi
+
+echo "--- validating docs"
+# test documentation
+ansible-doc testns.testcoll.testmodule -vvv | grep -- "- normal_doc_frag"
+# same with symlink
+ln -s "${PWD}/testcoll2" ./collection_root_sys/ansible_collections/testns/testcoll2
+ansible-doc testns.testcoll2.testmodule2 -vvv | grep "Test module"
+# now test we can list with symlink
+ansible-doc -l -vvv| grep "testns.testcoll2.testmodule2"
+
+echo "testing bad doc_fragments (expected ERROR message follows)"
+# test documentation failure
+ansible-doc testns.testcoll.testmodule_bad_docfrags -vvv 2>&1 | grep -- "unknown doc_fragment"
+
+echo "--- validating default collection"
+# test adhoc default collection resolution (use unqualified collection module with playbook dir under its collection)
+
+echo "testing adhoc default collection support with explicit playbook dir"
+ANSIBLE_PLAYBOOK_DIR=./collection_root_user/ansible_collections/testns/testcoll ansible localhost -m testmodule
+
+# we need multiple plays, and conditional import_playbook is noisy and causes problems, so choose here which one to use...
+if [[ ${INVENTORY_PATH} == *.winrm ]]; then
+ export TEST_PLAYBOOK=windows.yml
+else
+ export TEST_PLAYBOOK=posix.yml
+
+ echo "testing default collection support"
+ ansible-playbook -i "${INVENTORY_PATH}" collection_root_user/ansible_collections/testns/testcoll/playbooks/default_collection_playbook.yml "$@"
+fi
+
+echo "--- validating collections support in playbooks/roles"
+# run test playbooks
+ansible-playbook -i "${INVENTORY_PATH}" -v "${TEST_PLAYBOOK}" "$@"
+
+if [[ ${INVENTORY_PATH} != *.winrm ]]; then
+ ansible-playbook -i "${INVENTORY_PATH}" -v invocation_tests.yml "$@"
+fi
+
+echo "--- validating bypass_host_loop with collection search"
+ansible-playbook -i host1,host2, -v test_bypass_host_loop.yml "$@"
+
+echo "--- validating inventory"
+# test collection inventories
+ansible-playbook inventory_test.yml -i a.statichost.yml -i redirected.statichost.yml "$@"
+
+# test adjacent with --playbook-dir
+export ANSIBLE_COLLECTIONS_PATH=''
+ANSIBLE_INVENTORY_ANY_UNPARSED_IS_FAILED=1 ansible-inventory --list --export --playbook-dir=. -v "$@"
+
+# use an inventory source with caching enabled
+ansible-playbook -i a.statichost.yml -i ./cache.statichost.yml -v check_populated_inventory.yml
+
+# Check that the inventory source with caching enabled was stored
+if [[ "$(find ./inventory_cache -type f ! -path "./inventory_cache/.keep" | wc -l)" -ne "1" ]]; then
+ echo "Failed to find the expected single cache"
+ exit 1
+fi
+
+CACHEFILE="$(find ./inventory_cache -type f ! -path './inventory_cache/.keep')"
+
+if [[ $CACHEFILE != ./inventory_cache/prefix_* ]]; then
+ echo "Unexpected cache file"
+ exit 1
+fi
+
+# Check the cache for the expected hosts
+
+if [[ "$(grep -wc "cache_host_a" "$CACHEFILE")" -ne "1" ]]; then
+ echo "Failed to cache host as expected"
+ exit 1
+fi
+
+if [[ "$(grep -wc "dynamic_host_a" "$CACHEFILE")" -ne "0" ]]; then
+ echo "Cached an incorrect source"
+ exit 1
+fi
+
+./vars_plugin_tests.sh
+
diff --git a/test/integration/targets/collections/test_bypass_host_loop.yml b/test/integration/targets/collections/test_bypass_host_loop.yml
new file mode 100644
index 00000000..e95262b8
--- /dev/null
+++ b/test/integration/targets/collections/test_bypass_host_loop.yml
@@ -0,0 +1,22 @@
+- name: Test collection lookup bypass host list
+ hosts: all
+ connection: local
+ gather_facts: false
+ collections:
+ - testns.testcoll
+ tasks:
+ - meta: end_host
+ when: lookup('pipe', ansible_playbook_python ~ ' -c "import jinja2; print(jinja2.__version__)"') is version('2.7', '<')
+
+ - bypass_host_loop:
+ register: bypass
+
+ - run_once: true
+ vars:
+ bypass_hosts: '{{ hostvars|dictsort|map(attribute="1.bypass.bypass_inventory_hostname")|select("defined")|unique }}'
+ block:
+ - debug:
+ var: bypass_hosts
+
+ - assert:
+ that: bypass_hosts|length == 1
diff --git a/test/integration/targets/collections/test_collection_meta.yml b/test/integration/targets/collections/test_collection_meta.yml
new file mode 100644
index 00000000..22a00b21
--- /dev/null
+++ b/test/integration/targets/collections/test_collection_meta.yml
@@ -0,0 +1,46 @@
+- hosts: localhost
+ gather_facts: no
+ collections:
+ - testns.testcoll
+ vars:
+ # redirect connection
+ ansible_connection: testns.testcoll.redirected_local
+ tasks:
+ - assert:
+ that: ('data' | testns.testcoll.testfilter) == 'data_via_testfilter_from_userdir'
+
+ # redirect module (multiple levels)
+ - multilevel1:
+ # redirect action
+ - uses_redirected_action:
+ # redirect import (consumed via action)
+ - uses_redirected_import:
+ # redirect lookup
+ - assert:
+ that: lookup('formerly_core_lookup') == 'mylookup_from_user_dir'
+ # redirect filter
+ - assert:
+ that: ('yes' | formerly_core_filter) == True
+ # legacy filter should mask redirected
+ - assert:
+ that: ('' | formerly_core_masked_filter) == 'hello from overridden formerly_core_masked_filter'
+ # redirect test
+ - assert:
+ that:
+ - "'stuff' is formerly_core_test('tuf')"
+ - "'hello override' is formerly_core_masked_test"
+ # redirect module (formerly internal)
+ - formerly_core_ping:
+ # redirect module from collection (with subdir)
+ - testns.testcoll.module_subdir.subdir_ping_module:
+ # redirect module_utils plugin (consumed via module)
+ - uses_core_redirected_mu:
+ # deprecated module (issues warning)
+ - deprecated_ping:
+ # redirect module (internal alias)
+ - aliased_ping:
+ # redirect module (cycle detection, fatal)
+# - looped_ping:
+
+ # removed module (fatal)
+# - dead_ping:
diff --git a/test/integration/targets/collections/test_plugins/override_formerly_core_masked_test.py b/test/integration/targets/collections/test_plugins/override_formerly_core_masked_test.py
new file mode 100644
index 00000000..11c7f7a7
--- /dev/null
+++ b/test/integration/targets/collections/test_plugins/override_formerly_core_masked_test.py
@@ -0,0 +1,16 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def override_formerly_core_masked_test(value, *args, **kwargs):
+ if value != 'hello override':
+ raise Exception('expected "hello override" only...')
+
+ return True
+
+
+class TestModule(object):
+ def tests(self):
+ return {
+ 'formerly_core_masked_test': override_formerly_core_masked_test
+ }
diff --git a/test/integration/targets/collections/testcoll2/MANIFEST.json b/test/integration/targets/collections/testcoll2/MANIFEST.json
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/collections/testcoll2/MANIFEST.json
diff --git a/test/integration/targets/collections/testcoll2/plugins/modules/testmodule2.py b/test/integration/targets/collections/testcoll2/plugins/modules/testmodule2.py
new file mode 100644
index 00000000..7f6eb024
--- /dev/null
+++ b/test/integration/targets/collections/testcoll2/plugins/modules/testmodule2.py
@@ -0,0 +1,33 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = '''
+---
+module: testmodule2
+short_description: Test module
+description:
+ - Test module
+author:
+ - Ansible Core Team
+'''
+
+EXAMPLES = '''
+'''
+
+RETURN = '''
+'''
+
+import json
+
+
+def main():
+ print(json.dumps(dict(changed=False, source='sys')))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/collections/vars_plugin_tests.sh b/test/integration/targets/collections/vars_plugin_tests.sh
new file mode 100755
index 00000000..2118af6d
--- /dev/null
+++ b/test/integration/targets/collections/vars_plugin_tests.sh
@@ -0,0 +1,91 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# Collections vars plugins must be whitelisted with FQCN because PluginLoader.all() does not search collections
+
+# Let vars plugins run for inventory by using the global setting
+export ANSIBLE_RUN_VARS_PLUGINS=start
+
+# Test vars plugin in a playbook-adjacent collection
+export ANSIBLE_VARS_ENABLED=testns.content_adj.custom_adj_vars
+
+ansible-inventory -i a.statichost.yml --list --playbook-dir=./ | tee out.txt
+
+grep '"collection": "adjacent"' out.txt
+grep '"adj_var": "value"' out.txt
+
+# Test vars plugin in a collection path
+export ANSIBLE_VARS_ENABLED=testns.testcoll.custom_vars
+export ANSIBLE_COLLECTIONS_PATH=$PWD/collection_root_user:$PWD/collection_root_sys
+
+ansible-inventory -i a.statichost.yml --list --playbook-dir=./ | tee out.txt
+
+grep '"collection": "collection_root_user"' out.txt
+grep -v '"adj_var": "value"' out.txt
+
+# Test enabled vars plugins order reflects the order in which variables are merged
+export ANSIBLE_VARS_ENABLED=testns.content_adj.custom_adj_vars,testns.testcoll.custom_vars
+
+ansible-inventory -i a.statichost.yml --list --playbook-dir=./ | tee out.txt
+
+grep '"collection": "collection_root_user"' out.txt
+grep '"adj_var": "value"' out.txt
+grep -v '"collection": "adjacent"' out.txt
+
+# Test that 3rd party plugins in plugin_path do not need to require whitelisting by default
+# Plugins shipped with Ansible and in the custom plugin dir should be used first
+export ANSIBLE_VARS_PLUGINS=./custom_vars_plugins
+
+ansible-inventory -i a.statichost.yml --list --playbook-dir=./ | tee out.txt
+
+grep '"name": "v2_vars_plugin"' out.txt
+grep '"collection": "collection_root_user"' out.txt
+grep '"adj_var": "value"' out.txt
+grep -v '"whitelisted": true' out.txt
+
+# Test plugins in plugin paths that opt-in to require whitelisting
+unset ANSIBLE_VARS_ENABLED
+unset ANSIBLE_COLLECTIONS_PATH
+
+ANSIBLE_VARS_ENABLED=vars_req_whitelist ansible-inventory -i a.statichost.yml --list --playbook-dir=./ | tee out.txt
+
+grep '"whitelisted": true' out.txt
+
+# Test vars plugins that support the stage setting don't run for inventory when stage is set to 'task'
+# and that the vars plugins that don't support the stage setting don't run for inventory when the global setting is 'demand'
+ANSIBLE_VARS_PLUGIN_STAGE=task ansible-inventory -i a.statichost.yml --list --playbook-dir=./ | tee out.txt
+
+grep -v '"v1_vars_plugin": true' out.txt
+grep -v '"v2_vars_plugin": true' out.txt
+grep -v '"vars_req_whitelist": true' out.txt
+grep -v '"collection": "adjacent"' out.txt
+grep -v '"collection": "collection_root_user"' out.txt
+grep -v '"adj_var": "value"' out.txt
+
+# Test that the global setting allows v1 and v2 plugins to run after importing inventory
+ANSIBLE_RUN_VARS_PLUGINS=start ansible-inventory -i a.statichost.yml --list --playbook-dir=./ | tee out.txt
+
+grep -v '"vars_req_whitelist": true' out.txt
+grep '"v1_vars_plugin": true' out.txt
+grep '"v2_vars_plugin": true' out.txt
+grep '"name": "v2_vars_plugin"' out.txt
+
+# Test that vars plugins in collections and in the vars plugin path are available for tasks
+cat << EOF > "test_task_vars.yml"
+---
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - debug: msg="{{ name }}"
+ - debug: msg="{{ collection }}"
+ - debug: msg="{{ adj_var }}"
+EOF
+
+export ANSIBLE_VARS_ENABLED=testns.content_adj.custom_adj_vars
+
+ANSIBLE_VARS_PLUGIN_STAGE=task ANSIBLE_VARS_PLUGINS=./custom_vars_plugins ansible-playbook test_task_vars.yml | grep "ok=3"
+ANSIBLE_RUN_VARS_PLUGINS=start ANSIBLE_VARS_PLUGIN_STAGE=inventory ANSIBLE_VARS_PLUGINS=./custom_vars_plugins ansible-playbook test_task_vars.yml | grep "ok=3"
+ANSIBLE_RUN_VARS_PLUGINS=demand ANSIBLE_VARS_PLUGIN_STAGE=inventory ANSIBLE_VARS_PLUGINS=./custom_vars_plugins ansible-playbook test_task_vars.yml | grep "ok=3"
+ANSIBLE_VARS_PLUGINS=./custom_vars_plugins ansible-playbook test_task_vars.yml | grep "ok=3"
diff --git a/test/integration/targets/collections/windows.yml b/test/integration/targets/collections/windows.yml
new file mode 100644
index 00000000..4bdfb0ed
--- /dev/null
+++ b/test/integration/targets/collections/windows.yml
@@ -0,0 +1,28 @@
+- hosts: windows
+ tasks:
+ - testns.testcoll.win_selfcontained:
+ register: selfcontained_out
+
+ - testns.testcoll.win_csbasic_only:
+ register: csbasic_only_out
+
+ - testns.testcoll.win_uses_coll_psmu:
+ register: uses_coll_psmu
+
+ - testns.testcoll.win_uses_coll_csmu:
+ register: uses_coll_csmu
+
+ - assert:
+ that:
+ - selfcontained_out.source == 'user'
+ - csbasic_only_out.source == 'user'
+ # win_uses_coll_psmu
+ - uses_coll_psmu.source == 'user'
+ - "'user_mu' in uses_coll_psmu.ping"
+ - uses_coll_psmu.subpkg == 'from subpkg.subps.psm1'
+ # win_uses_coll_csmu
+ - uses_coll_csmu.source == 'user'
+ - "'user_mu' in uses_coll_csmu.ping"
+ - "'Hello from subpkg.subcs' in uses_coll_csmu.ping"
+ - uses_coll_csmu.subpkg == 'Hello from subpkg.subcs'
+ - uses_coll_csmu.type_accelerator == uses_coll_csmu.ping
diff --git a/test/integration/targets/collections_plugin_namespace/aliases b/test/integration/targets/collections_plugin_namespace/aliases
new file mode 100644
index 00000000..a6dafcf8
--- /dev/null
+++ b/test/integration/targets/collections_plugin_namespace/aliases
@@ -0,0 +1 @@
+shippable/posix/group1
diff --git a/test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/filter/test_filter.py b/test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/filter/test_filter.py
new file mode 100644
index 00000000..dca094be
--- /dev/null
+++ b/test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/filter/test_filter.py
@@ -0,0 +1,15 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def filter_name(a):
+ return __name__
+
+
+class FilterModule(object):
+ def filters(self):
+ filters = {
+ 'filter_name': filter_name,
+ }
+
+ return filters
diff --git a/test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/lookup/lookup_name.py b/test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/lookup/lookup_name.py
new file mode 100644
index 00000000..d0af703b
--- /dev/null
+++ b/test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/lookup/lookup_name.py
@@ -0,0 +1,9 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables, **kwargs):
+ return [__name__]
diff --git a/test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/lookup/lookup_no_future_boilerplate.py b/test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/lookup/lookup_no_future_boilerplate.py
new file mode 100644
index 00000000..79e80f62
--- /dev/null
+++ b/test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/lookup/lookup_no_future_boilerplate.py
@@ -0,0 +1,10 @@
+# do not add future boilerplate to this plugin
+# specifically, do not add absolute_import, as the purpose of this plugin is to test implicit relative imports on Python 2.x
+__metaclass__ = type
+
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables, **kwargs):
+ return [__name__]
diff --git a/test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/test/test_test.py b/test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/test/test_test.py
new file mode 100644
index 00000000..1739072f
--- /dev/null
+++ b/test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/test/test_test.py
@@ -0,0 +1,13 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def test_name_ok(value):
+ return __name__ == 'ansible_collections.my_ns.my_col.plugins.test.test_test'
+
+
+class TestModule:
+ def tests(self):
+ return {
+ 'test_name_ok': test_name_ok,
+ }
diff --git a/test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/roles/test/tasks/main.yml b/test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/roles/test/tasks/main.yml
new file mode 100644
index 00000000..d80f5470
--- /dev/null
+++ b/test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/roles/test/tasks/main.yml
@@ -0,0 +1,12 @@
+- set_fact:
+ filter_name: "{{ 1 | my_ns.my_col.filter_name }}"
+ lookup_name: "{{ lookup('my_ns.my_col.lookup_name') }}"
+ lookup_no_future_boilerplate: "{{ lookup('my_ns.my_col.lookup_no_future_boilerplate') }}"
+ test_name_ok: "{{ 1 is my_ns.my_col.test_name_ok }}"
+
+- assert:
+ that:
+ - filter_name == 'ansible_collections.my_ns.my_col.plugins.filter.test_filter'
+ - lookup_name == 'ansible_collections.my_ns.my_col.plugins.lookup.lookup_name'
+ - lookup_no_future_boilerplate == 'ansible_collections.my_ns.my_col.plugins.lookup.lookup_no_future_boilerplate'
+ - test_name_ok
diff --git a/test/integration/targets/collections_plugin_namespace/runme.sh b/test/integration/targets/collections_plugin_namespace/runme.sh
new file mode 100755
index 00000000..96e83d36
--- /dev/null
+++ b/test/integration/targets/collections_plugin_namespace/runme.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ANSIBLE_COLLECTIONS_PATH="${PWD}/collection_root" ansible-playbook test.yml -i ../../inventory "$@"
diff --git a/test/integration/targets/collections_plugin_namespace/test.yml b/test/integration/targets/collections_plugin_namespace/test.yml
new file mode 100644
index 00000000..d1c3f1b7
--- /dev/null
+++ b/test/integration/targets/collections_plugin_namespace/test.yml
@@ -0,0 +1,3 @@
+- hosts: testhost
+ roles:
+ - my_ns.my_col.test
diff --git a/test/integration/targets/collections_relative_imports/aliases b/test/integration/targets/collections_relative_imports/aliases
new file mode 100644
index 00000000..996481b4
--- /dev/null
+++ b/test/integration/targets/collections_relative_imports/aliases
@@ -0,0 +1,4 @@
+posix
+shippable/posix/group1
+shippable/windows/group1
+windows
diff --git a/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/PSRel1.psm1 b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/PSRel1.psm1
new file mode 100644
index 00000000..bf812643
--- /dev/null
+++ b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/PSRel1.psm1
@@ -0,0 +1,11 @@
+#AnsibleRequires -PowerShell .sub_pkg.PSRel2
+
+Function Invoke-FromPSRel1 {
+ <#
+ .SYNOPSIS
+ Test function
+ #>
+ return "$(Invoke-FromPSRel2) -> Invoke-FromPSRel1"
+}
+
+Export-ModuleMember -Function Invoke-FromPSRel1
diff --git a/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util1.py b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util1.py
new file mode 100644
index 00000000..196b4abf
--- /dev/null
+++ b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util1.py
@@ -0,0 +1,6 @@
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+def one():
+ return 1
diff --git a/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util2.py b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util2.py
new file mode 100644
index 00000000..0d985bf3
--- /dev/null
+++ b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util2.py
@@ -0,0 +1,8 @@
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from .my_util1 import one
+
+
+def two():
+ return one() * 2
diff --git a/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util3.py b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util3.py
new file mode 100644
index 00000000..1529d7b2
--- /dev/null
+++ b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util3.py
@@ -0,0 +1,8 @@
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from . import my_util2
+
+
+def three():
+ return my_util2.two() + 1
diff --git a/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/sub_pkg/PSRel2.psm1 b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/sub_pkg/PSRel2.psm1
new file mode 100644
index 00000000..d0aa3686
--- /dev/null
+++ b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/sub_pkg/PSRel2.psm1
@@ -0,0 +1,11 @@
+#AnsibleRequires -PowerShell ansible_collections.my_ns.my_col2.plugins.module_utils.PSRel3
+
+Function Invoke-FromPSRel2 {
+ <#
+ .SYNOPSIS
+ Test function
+ #>
+ return "$(Invoke-FromPSRel3) -> Invoke-FromPSRel2"
+}
+
+Export-ModuleMember -Function Invoke-FromPSRel2
diff --git a/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/modules/my_module.py b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/modules/my_module.py
new file mode 100644
index 00000000..0cdf5008
--- /dev/null
+++ b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/modules/my_module.py
@@ -0,0 +1,24 @@
+#!/usr/bin/python
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+from ..module_utils.my_util2 import two
+from ..module_utils import my_util3
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(),
+ supports_check_mode=True
+ )
+
+ result = dict(
+ two=two(),
+ three=my_util3.three(),
+ )
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/modules/win_relative.ps1 b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/modules/win_relative.ps1
new file mode 100644
index 00000000..383df0a3
--- /dev/null
+++ b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/modules/win_relative.ps1
@@ -0,0 +1,10 @@
+#!powershell
+
+#AnsibleRequires -CSharpUtil Ansible.Basic
+#AnsibleRequires -PowerShell ..module_utils.PSRel1
+
+$module = [Ansible.Basic.AnsibleModule]::Create($args, @{})
+
+$module.Result.data = Invoke-FromPSRel1
+
+$module.ExitJson()
diff --git a/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/roles/test/tasks/main.yml b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/roles/test/tasks/main.yml
new file mode 100644
index 00000000..9ba0f7ed
--- /dev/null
+++ b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/roles/test/tasks/main.yml
@@ -0,0 +1,4 @@
+- name: fully qualified module usage with relative imports
+ my_ns.my_col.my_module:
+- name: collection relative module usage with relative imports
+ my_module:
diff --git a/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col2/plugins/module_utils/PSRel3.psm1 b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col2/plugins/module_utils/PSRel3.psm1
new file mode 100644
index 00000000..46edd5a9
--- /dev/null
+++ b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col2/plugins/module_utils/PSRel3.psm1
@@ -0,0 +1,11 @@
+#AnsibleRequires -CSharpUtil .sub_pkg.CSRel4
+
+Function Invoke-FromPSRel3 {
+ <#
+ .SYNOPSIS
+ Test function
+ #>
+ return "$([CSRel4]::Invoke()) -> Invoke-FromPSRel3"
+}
+
+Export-ModuleMember -Function Invoke-FromPSRel3
diff --git a/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col2/plugins/module_utils/sub_pkg/CSRel4.cs b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col2/plugins/module_utils/sub_pkg/CSRel4.cs
new file mode 100644
index 00000000..c50024b6
--- /dev/null
+++ b/test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col2/plugins/module_utils/sub_pkg/CSRel4.cs
@@ -0,0 +1,14 @@
+using System;
+
+//TypeAccelerator -Name CSRel4 -TypeName TestClass
+
+namespace ansible_collections.my_ns.my_col.plugins.module_utils.sub_pkg.CSRel4
+{
+ public class TestClass
+ {
+ public static string Invoke()
+ {
+ return "CSRel4.Invoke()";
+ }
+ }
+}
diff --git a/test/integration/targets/collections_relative_imports/runme.sh b/test/integration/targets/collections_relative_imports/runme.sh
new file mode 100755
index 00000000..754efafe
--- /dev/null
+++ b/test/integration/targets/collections_relative_imports/runme.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# we need multiple plays, and conditional import_playbook is noisy and causes problems, so choose here which one to use...
+if [[ ${INVENTORY_PATH} == *.winrm ]]; then
+ export TEST_PLAYBOOK=windows.yml
+else
+ export TEST_PLAYBOOK=test.yml
+
+fi
+
+ANSIBLE_COLLECTIONS_PATH="${PWD}/collection_root" ansible-playbook "${TEST_PLAYBOOK}" -i "${INVENTORY_PATH}" "$@"
diff --git a/test/integration/targets/collections_relative_imports/test.yml b/test/integration/targets/collections_relative_imports/test.yml
new file mode 100644
index 00000000..d1c3f1b7
--- /dev/null
+++ b/test/integration/targets/collections_relative_imports/test.yml
@@ -0,0 +1,3 @@
+- hosts: testhost
+ roles:
+ - my_ns.my_col.test
diff --git a/test/integration/targets/collections_relative_imports/windows.yml b/test/integration/targets/collections_relative_imports/windows.yml
new file mode 100644
index 00000000..aa6badfa
--- /dev/null
+++ b/test/integration/targets/collections_relative_imports/windows.yml
@@ -0,0 +1,11 @@
+- hosts: windows
+ gather_facts: no
+ tasks:
+ - name: test out relative imports on Windows modules
+ my_ns.my_col.win_relative:
+ register: win_relative
+
+ - name: assert relative imports on Windows modules
+ assert:
+ that:
+ - win_relative.data == 'CSRel4.Invoke() -> Invoke-FromPSRel3 -> Invoke-FromPSRel2 -> Invoke-FromPSRel1'
diff --git a/test/integration/targets/collections_runtime_pythonpath/aliases b/test/integration/targets/collections_runtime_pythonpath/aliases
new file mode 100644
index 00000000..0a772ad7
--- /dev/null
+++ b/test/integration/targets/collections_runtime_pythonpath/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group4
+skip/python2.6
+skip/aix
diff --git a/test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-boo/ansible_collections/python/dist/plugins/modules/boo.py b/test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-boo/ansible_collections/python/dist/plugins/modules/boo.py
new file mode 100644
index 00000000..a2313b12
--- /dev/null
+++ b/test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-boo/ansible_collections/python/dist/plugins/modules/boo.py
@@ -0,0 +1,28 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""Say hello."""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ 'name': {'default': 'world'},
+ },
+ )
+ name = module.params['name']
+
+ module.exit_json(
+ msg='Greeting {name} completed.'.
+ format(name=name.title()),
+ greeting='Hello, {name}!'.format(name=name),
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-boo/pyproject.toml b/test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-boo/pyproject.toml
new file mode 100644
index 00000000..feec734a
--- /dev/null
+++ b/test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-boo/pyproject.toml
@@ -0,0 +1,6 @@
+[build-system]
+requires = [
+ "setuptools >= 44",
+ "wheel",
+]
+build-backend = "setuptools.build_meta"
diff --git a/test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-boo/setup.cfg b/test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-boo/setup.cfg
new file mode 100644
index 00000000..d25ebb0f
--- /dev/null
+++ b/test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-boo/setup.cfg
@@ -0,0 +1,15 @@
+[metadata]
+name = ansible-collections.python.dist
+version = 1.0.0rc2.post3.dev4
+
+[options]
+package_dir =
+ = .
+packages =
+ ansible_collections
+ ansible_collections.python
+ ansible_collections.python.dist
+ ansible_collections.python.dist.plugins
+ ansible_collections.python.dist.plugins.modules
+zip_safe = True
+include_package_data = True
diff --git a/test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-foo/ansible_collections/python/dist/plugins/modules/boo.py b/test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-foo/ansible_collections/python/dist/plugins/modules/boo.py
new file mode 100644
index 00000000..1ef03330
--- /dev/null
+++ b/test/integration/targets/collections_runtime_pythonpath/ansible-collection-python-dist-foo/ansible_collections/python/dist/plugins/modules/boo.py
@@ -0,0 +1,28 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""Say hello in Ukrainian."""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ 'name': {'default': 'Ñвіт'},
+ },
+ )
+ name = module.params['name']
+
+ module.exit_json(
+ msg='Greeting {name} completed.'.
+ format(name=name.title()),
+ greeting='Привіт, {name}!'.format(name=name),
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/collections_runtime_pythonpath/runme.sh b/test/integration/targets/collections_runtime_pythonpath/runme.sh
new file mode 100755
index 00000000..654104a1
--- /dev/null
+++ b/test/integration/targets/collections_runtime_pythonpath/runme.sh
@@ -0,0 +1,60 @@
+#!/usr/bin/env bash
+
+set -eux -o pipefail
+
+
+export PIP_DISABLE_PIP_VERSION_CHECK=1
+
+
+source virtualenv.sh
+
+
+>&2 echo \
+ === Test that the module \
+ gets picked up if discoverable \
+ via PYTHONPATH env var ===
+PYTHONPATH="${PWD}/ansible-collection-python-dist-boo:$PYTHONPATH" \
+ansible \
+ -m python.dist.boo \
+ -a 'name=Bob' \
+ -c local localhost \
+ "$@" | grep -E '"greeting": "Hello, Bob!",'
+
+
+>&2 echo \
+ === Test that the module \
+ gets picked up if installed \
+ into site-packages ===
+python -m pip.__main__ install pep517
+( # Build a binary Python dist (a wheel) using PEP517:
+ cp -r ansible-collection-python-dist-boo "${OUTPUT_DIR}/"
+ cd "${OUTPUT_DIR}/ansible-collection-python-dist-boo"
+ python -m pep517.build --binary --out-dir dist .
+)
+# Install a pre-built dist with pip:
+python -m pip.__main__ install \
+ --no-index \
+ -f "${OUTPUT_DIR}/ansible-collection-python-dist-boo/dist/" \
+ --only-binary=ansible-collections.python.dist \
+ ansible-collections.python.dist
+python -m pip.__main__ show ansible-collections.python.dist
+ansible \
+ -m python.dist.boo \
+ -a 'name=Frodo' \
+ -c local localhost \
+ "$@" | grep -E '"greeting": "Hello, Frodo!",'
+
+
+>&2 echo \
+ === Test that ansible_collections \
+ root takes precedence over \
+ PYTHONPATH/site-packages ===
+# This is done by injecting a module with the same FQCN
+# into another collection root.
+ANSIBLE_COLLECTIONS_PATH="${PWD}/ansible-collection-python-dist-foo" \
+PYTHONPATH="${PWD}/ansible-collection-python-dist-boo:$PYTHONPATH" \
+ansible \
+ -m python.dist.boo \
+ -a 'name=Степан' \
+ -c local localhost \
+ "$@" | grep -E '"greeting": "Привіт, Степан!",'
diff --git a/test/integration/targets/command_shell/aliases b/test/integration/targets/command_shell/aliases
new file mode 100644
index 00000000..8dd7b884
--- /dev/null
+++ b/test/integration/targets/command_shell/aliases
@@ -0,0 +1,4 @@
+command
+shippable/posix/group2
+shell
+skip/aix
diff --git a/test/integration/targets/command_shell/files/create_afile.sh b/test/integration/targets/command_shell/files/create_afile.sh
new file mode 100755
index 00000000..e6fae448
--- /dev/null
+++ b/test/integration/targets/command_shell/files/create_afile.sh
@@ -0,0 +1,3 @@
+#!/usr/bin/env bash
+
+echo "win" > "$1" \ No newline at end of file
diff --git a/test/integration/targets/command_shell/files/remove_afile.sh b/test/integration/targets/command_shell/files/remove_afile.sh
new file mode 100755
index 00000000..4a7fea66
--- /dev/null
+++ b/test/integration/targets/command_shell/files/remove_afile.sh
@@ -0,0 +1,3 @@
+#!/usr/bin/env bash
+
+rm "$1" \ No newline at end of file
diff --git a/test/integration/targets/command_shell/files/test.sh b/test/integration/targets/command_shell/files/test.sh
new file mode 100755
index 00000000..ade17e9b
--- /dev/null
+++ b/test/integration/targets/command_shell/files/test.sh
@@ -0,0 +1,3 @@
+#!/usr/bin/env bash
+
+echo -n "win" \ No newline at end of file
diff --git a/test/integration/targets/command_shell/meta/main.yml b/test/integration/targets/command_shell/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/test/integration/targets/command_shell/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/test/integration/targets/command_shell/tasks/main.yml b/test/integration/targets/command_shell/tasks/main.yml
new file mode 100644
index 00000000..1d614e49
--- /dev/null
+++ b/test/integration/targets/command_shell/tasks/main.yml
@@ -0,0 +1,446 @@
+# Test code for the command and shell modules.
+
+# Copyright: (c) 2014, Richard Isaacson <richard.c.isaacson@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: use command to execute sudo
+ command: sudo -h
+ register: become
+
+- name: assert become warning was reported
+ assert:
+ that:
+ - "become.warnings | length() == 1"
+ - "'Consider using' in become.warnings[0]"
+
+- name: use command to execute sudo without warnings
+ command: sudo -h warn=no
+ register: become
+
+- name: assert become warning was not reported
+ assert:
+ that:
+ - "'warnings' not in become"
+
+- name: use command to execute tar
+ command: tar --help
+ register: tar
+
+- name: assert tar warning was reported
+ assert:
+ that:
+ - tar.warnings | length() == 1
+ - '"Consider using the unarchive module rather than running ''tar''" in tar.warnings[0]'
+
+- name: use command to execute chown
+ command: chown -h
+ register: chown
+ ignore_errors: true
+
+- name: assert chown warning was reported
+ assert:
+ that:
+ - chown.warnings | length() == 1
+ - '"Consider using the file module with owner rather than running ''chown''" in chown.warnings[0]'
+
+- name: use command with unsupported executable arg
+ command: ls /dev/null
+ args:
+ executable: /bogus
+ register: executable
+
+- name: assert executable warning was reported
+ assert:
+ that:
+ - executable.stdout == '/dev/null'
+ - executable.warnings | length() == 1
+ - "'no longer supported' in executable.warnings[0]"
+
+# The warning isn't on the task since it comes from the action plugin. Not sure
+# how to test for that.
+#
+# - name: Use command with reboot
+# command: sleep 2 && /not/shutdown -r now
+# ignore_errors: yes
+# register: reboot
+
+# - name: Assert that reboot warning was issued
+# assert:
+# that:
+# - '"Consider using the reboot module" in reboot.warnings[0]'
+
+- name: use command with no command
+ command:
+ args:
+ chdir: /
+ register: no_command
+ ignore_errors: true
+
+- name: assert executable fails with no command
+ assert:
+ that:
+ - no_command is failed
+ - no_command.msg == 'no command given'
+ - no_command.rc == 256
+
+- name: use argv
+ command:
+ argv:
+ - echo
+ - testing
+ register: argv_command
+ ignore_errors: true
+
+- name: assert executable works with argv
+ assert:
+ that:
+ - "argv_command.stdout == 'testing'"
+
+- name: use argv and command string
+ command: echo testing
+ args:
+ argv:
+ - echo
+ - testing
+ register: argv_and_string_command
+ ignore_errors: true
+
+- name: assert executable fails with both argv and command string
+ assert:
+ that:
+ - argv_and_string_command is failed
+ - argv_and_string_command.msg == 'only command or argv can be given, not both'
+ - argv_and_string_command.rc == 256
+
+- set_fact:
+ output_dir_test: "{{ output_dir }}/test_command_shell"
+
+- name: make sure our testing sub-directory does not exist
+ file:
+ path: "{{ output_dir_test }}"
+ state: absent
+
+- name: create our testing sub-directory
+ file:
+ path: "{{ output_dir_test }}"
+ state: directory
+
+- name: prep our test script
+ copy:
+ src: test.sh
+ dest: "{{ output_dir_test }}"
+ mode: '0755'
+
+- name: prep our test script
+ copy:
+ src: create_afile.sh
+ dest: "{{ output_dir_test }}"
+ mode: '0755'
+
+- name: prep our test script
+ copy:
+ src: remove_afile.sh
+ dest: "{{ output_dir_test }}"
+ mode: '0755'
+
+- name: locate bash
+ shell: which bash
+ register: bash
+
+##
+## command
+##
+
+- name: execute the test.sh script via command
+ command: "{{ output_dir_test }}/test.sh"
+ register: command_result0
+
+- name: assert that the script executed correctly
+ assert:
+ that:
+ - command_result0.rc == 0
+ - command_result0.stderr == ''
+ - command_result0.stdout == 'win'
+
+# executable
+
+# FIXME doesn't have the expected stdout.
+
+#- name: execute the test.sh script with executable via command
+# command: "{{output_dir_test }}/test.sh executable={{ bash.stdout }}"
+# register: command_result1
+#
+#- name: assert that the script executed correctly with command
+# assert:
+# that:
+# - "command_result1.rc == 0"
+# - "command_result1.stderr == ''"
+# - "command_result1.stdout == 'win'"
+
+# chdir
+
+- name: execute the test.sh script with chdir via command
+ command: ./test.sh
+ args:
+ chdir: "{{ output_dir_test }}"
+ register: command_result2
+
+- name: assert that the script executed correctly with chdir
+ assert:
+ that:
+ - command_result2.rc == 0
+ - command_result2.stderr == ''
+ - command_result2.stdout == 'win'
+
+# creates
+
+- name: verify that afile.txt is absent
+ file:
+ path: "{{ output_dir_test }}/afile.txt"
+ state: absent
+
+- name: create afile.txt with create_afile.sh via command
+ command: "{{ output_dir_test }}/create_afile.sh {{output_dir_test }}/afile.txt"
+ args:
+ creates: "{{ output_dir_test }}/afile.txt"
+
+- name: verify that afile.txt is present
+ file:
+ path: "{{ output_dir_test }}/afile.txt"
+ state: file
+
+- name: re-run previous command using creates with globbing
+ command: "{{ output_dir_test }}/create_afile.sh {{ output_dir_test }}/afile.txt"
+ args:
+ creates: "{{ output_dir_test }}/afile.*"
+ register: command_result3
+
+- name: assert that creates with globbing is working
+ assert:
+ that:
+ - command_result3 is not changed
+
+# removes
+
+- name: remove afile.txt with remote_afile.sh via command
+ command: "{{ output_dir_test }}/remove_afile.sh {{ output_dir_test }}/afile.txt"
+ args:
+ removes: "{{ output_dir_test }}/afile.txt"
+
+- name: verify that afile.txt is absent
+ file: path={{output_dir_test}}/afile.txt state=absent
+
+- name: re-run previous command using removes with globbing
+ command: "{{ output_dir_test }}/remove_afile.sh {{ output_dir_test }}/afile.txt"
+ args:
+ removes: "{{ output_dir_test }}/afile.*"
+ register: command_result4
+
+- name: assert that removes with globbing is working
+ assert:
+ that:
+ - command_result4.changed != True
+
+- name: pass stdin to cat via command
+ command: cat
+ args:
+ stdin: 'foobar'
+ register: command_result5
+
+- name: assert that stdin is passed
+ assert:
+ that:
+ - command_result5.stdout == 'foobar'
+
+- name: send to stdin literal multiline block
+ command: "{{ ansible_python.executable }} -c 'import hashlib, sys; print(hashlib.sha1((sys.stdin.buffer if hasattr(sys.stdin, \"buffer\") else sys.stdin).read()).hexdigest())'"
+ args:
+ stdin: |-
+ this is the first line
+ this is the second line
+
+ this line is after an empty line
+ this line is the last line
+ register: command_result6
+
+- name: assert the multiline input was passed correctly
+ assert:
+ that:
+ - "command_result6.stdout == '9cd0697c6a9ff6689f0afb9136fa62e0b3fee903'"
+
+##
+## shell
+##
+
+- name: Execute the test.sh script
+ shell: "{{ output_dir_test }}/test.sh"
+ register: shell_result0
+
+- name: Assert that the script executed correctly
+ assert:
+ that:
+ - shell_result0 is changed
+ - shell_result0.cmd == '{{ output_dir_test }}/test.sh'
+ - shell_result0.rc == 0
+ - shell_result0.stderr == ''
+ - shell_result0.stdout == 'win'
+
+# executable
+
+# FIXME doesn't pass the expected stdout
+
+#- name: execute the test.sh script
+# shell: "{{output_dir_test }}/test.sh executable={{ bash.stdout }}"
+# register: shell_result1
+#
+#- name: assert that the shell executed correctly
+# assert:
+# that:
+# - "shell_result1.rc == 0"
+# - "shell_result1.stderr == ''"
+# - "shell_result1.stdout == 'win'"
+
+# chdir
+
+- name: Execute the test.sh script with chdir
+ shell: ./test.sh
+ args:
+ chdir: "{{ output_dir_test }}"
+ register: shell_result2
+
+- name: Assert that the shell executed correctly with chdir
+ assert:
+ that:
+ - shell_result2 is changed
+ - shell_result2.cmd == './test.sh'
+ - shell_result2.rc == 0
+ - shell_result2.stderr == ''
+ - shell_result2.stdout == 'win'
+
+# creates
+
+- name: Verify that afile.txt is absent
+ file:
+ path: "{{ output_dir_test }}/afile.txt"
+ state: absent
+
+- name: Execute the test.sh script with chdir
+ shell: "{{ output_dir_test }}/test.sh > {{ output_dir_test }}/afile.txt"
+ args:
+ chdir: "{{ output_dir_test }}"
+ creates: "{{ output_dir_test }}/afile.txt"
+
+- name: Verify that afile.txt is present
+ file:
+ path: "{{ output_dir_test }}/afile.txt"
+ state: file
+
+# multiline
+
+- name: Remove test file previously created
+ file:
+ path: "{{ output_dir_test }}/afile.txt"
+ state: absent
+
+- name: Execute a shell command using a literal multiline block
+ args:
+ executable: "{{ bash.stdout }}"
+ shell: |
+ echo this is a \
+ "multiline echo" \
+ "with a new line
+ in quotes" \
+ | {{ ansible_python.executable }} -c 'import hashlib, sys; print(hashlib.sha1((sys.stdin.buffer if hasattr(sys.stdin, "buffer") else sys.stdin).read()).hexdigest())'
+ echo "this is a second line"
+ register: shell_result5
+
+- name: Assert the multiline shell command ran as expected
+ assert:
+ that:
+ - shell_result5 is changed
+ - shell_result5.rc == 0
+ - shell_result5.cmd == 'echo this is a "multiline echo" "with a new line\nin quotes" | ' + ansible_python.executable + ' -c \'import hashlib, sys; print(hashlib.sha1((sys.stdin.buffer if hasattr(sys.stdin, "buffer") else sys.stdin).read()).hexdigest())\'\necho "this is a second line"\n'
+ - shell_result5.stdout == '5575bb6b71c9558db0b6fbbf2f19909eeb4e3b98\nthis is a second line'
+
+- name: Execute a shell command using a literal multiline block with arguments in it
+ shell: |
+ executable="{{ bash.stdout }}"
+ creates={{ output_dir_test }}/afile.txt
+ echo "test"
+ register: shell_result6
+
+- name: Assert the multiline shell command with arguments in it run as expected
+ assert:
+ that:
+ - shell_result6 is changed
+ - shell_result6.rc == 0
+ - shell_result6.cmd == 'echo "test"\n'
+ - shell_result6.stdout == 'test'
+
+- name: Execute a shell command using a multiline block where whitespaces matter
+ shell: |
+ cat <<EOF
+ One
+ Two
+ Three
+ EOF
+ register: shell_result7
+
+- name: Assert the multiline shell command outputs with whitespaces preserved
+ assert:
+ that:
+ - shell_result7 is changed
+ - shell_result7.rc == 0
+ - shell_result7.cmd == 'cat <<EOF\nOne\n Two\n Three\nEOF\n'
+ - shell_result7.stdout == 'One\n Two\n Three'
+
+- name: execute a shell command with no trailing newline to stdin
+ shell: cat > {{output_dir_test }}/afile.txt
+ args:
+ stdin: test
+ stdin_add_newline: no
+
+- name: make sure content matches expected
+ copy:
+ dest: "{{output_dir_test }}/afile.txt"
+ content: test
+ register: shell_result7
+ failed_when:
+ - shell_result7 is failed or
+ shell_result7 is changed
+
+- name: execute a shell command with trailing newline to stdin
+ shell: cat > {{output_dir_test }}/afile.txt
+ args:
+ stdin: test
+ stdin_add_newline: yes
+
+- name: make sure content matches expected
+ copy:
+ dest: "{{output_dir_test }}/afile.txt"
+ content: |
+ test
+ register: shell_result8
+ failed_when:
+ - shell_result8 is failed or
+ shell_result8 is changed
+
+- name: execute a shell command with trailing newline to stdin, default
+ shell: cat > {{output_dir_test }}/afile.txt
+ args:
+ stdin: test
+
+- name: make sure content matches expected
+ copy:
+ dest: "{{output_dir_test }}/afile.txt"
+ content: |
+ test
+ register: shell_result9
+ failed_when:
+ - shell_result9 is failed or
+ shell_result9 is changed
+
+- name: remove the previously created file
+ file:
+ path: "{{ output_dir_test }}/afile.txt"
+ state: absent
diff --git a/test/integration/targets/common_network/aliases b/test/integration/targets/common_network/aliases
new file mode 100644
index 00000000..70a7b7a9
--- /dev/null
+++ b/test/integration/targets/common_network/aliases
@@ -0,0 +1 @@
+shippable/posix/group5
diff --git a/test/integration/targets/common_network/tasks/main.yml b/test/integration/targets/common_network/tasks/main.yml
new file mode 100644
index 00000000..97b3dd0d
--- /dev/null
+++ b/test/integration/targets/common_network/tasks/main.yml
@@ -0,0 +1,4 @@
+- assert:
+ that:
+ - '"00:00:00:a1:2b:cc" is is_mac'
+ - '"foo" is not is_mac'
diff --git a/test/integration/targets/common_network/test_plugins/is_mac.py b/test/integration/targets/common_network/test_plugins/is_mac.py
new file mode 100644
index 00000000..6a4d4092
--- /dev/null
+++ b/test/integration/targets/common_network/test_plugins/is_mac.py
@@ -0,0 +1,14 @@
+# Copyright: (c) 2020, Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.common.network import is_mac
+
+
+class TestModule(object):
+ def tests(self):
+ return {
+ 'is_mac': is_mac,
+ }
diff --git a/test/integration/targets/conditionals/aliases b/test/integration/targets/conditionals/aliases
new file mode 100644
index 00000000..a6dafcf8
--- /dev/null
+++ b/test/integration/targets/conditionals/aliases
@@ -0,0 +1 @@
+shippable/posix/group1
diff --git a/test/integration/targets/conditionals/play.yml b/test/integration/targets/conditionals/play.yml
new file mode 100644
index 00000000..c6bb3815
--- /dev/null
+++ b/test/integration/targets/conditionals/play.yml
@@ -0,0 +1,551 @@
+# (c) 2014, James Cammarata <jcammarata@ansible.com>
+# (c) 2019, Ansible Project
+
+- hosts: testhost
+ gather_facts: false
+ vars_files:
+ - vars/main.yml
+ tasks:
+ - name: set conditional bare vars status
+ set_fact:
+ bare: "{{lookup('config', 'CONDITIONAL_BARE_VARS')|bool}}"
+
+ - name: test conditional '=='
+ shell: echo 'testing'
+ when: 1 == 1
+ register: result
+
+ - name: assert conditional '==' ran
+ assert:
+ that:
+ - result is changed
+ - "result.stdout == 'testing'"
+ - "result.rc == 0"
+
+ - name: test bad conditional '=='
+ shell: echo 'testing'
+ when: 0 == 1
+ register: result
+
+ - name: assert bad conditional '==' did NOT run
+ assert:
+ that:
+ - result is skipped
+
+ - name: test conditional '!='
+ shell: echo 'testing'
+ when: 0 != 1
+ register: result
+
+ - name: assert conditional '!=' ran
+ assert:
+ that:
+ - result is changed
+ - "result.stdout == 'testing'"
+ - "result.rc == 0"
+
+ - name: test bad conditional '!='
+ shell: echo 'testing'
+ when: 1 != 1
+ register: result
+
+ - name: assert bad conditional '!=' did NOT run
+ assert:
+ that:
+ - result is skipped
+
+ - name: test conditional 'in'
+ shell: echo 'testing'
+ when: 1 in [1,2,3]
+ register: result
+
+ - name: assert conditional 'in' ran
+ assert:
+ that:
+ - result is changed
+ - "result.stdout == 'testing'"
+ - "result.rc == 0"
+
+ - name: test bad conditional 'in'
+ shell: echo 'testing'
+ when: 1 in [7,8,9]
+ register: result
+
+ - name: assert bad conditional 'in' did NOT run
+ assert:
+ that:
+ - result is skipped
+
+ - name: test conditional 'not in'
+ shell: echo 'testing'
+ when: 0 not in [1,2,3]
+ register: result
+
+ - name: assert conditional 'not in' ran
+ assert:
+ that:
+ - result is changed
+ - "result.stdout == 'testing'"
+ - "result.rc == 0"
+
+ - name: test bad conditional 'not in'
+ shell: echo 'testing'
+ when: 1 not in [1,2,3]
+ register: result
+
+ - name: assert bad conditional 'not in' did NOT run
+ assert:
+ that:
+ - result is skipped
+
+ - name: test conditional 'is defined'
+ shell: echo 'testing'
+ when: test_bare is defined
+ register: result
+
+ - name: assert conditional 'is defined' ran
+ assert:
+ that:
+ - result is changed
+ - "result.stdout == 'testing'"
+ - "result.rc == 0"
+
+ - name: test bad conditional 'is defined'
+ shell: echo 'testing'
+ when: foo_asdf_xyz is defined
+ register: result
+
+ - name: assert bad conditional 'is defined' did NOT run
+ assert:
+ that:
+ - result is skipped
+
+ - name: test conditional 'is not defined'
+ shell: echo 'testing'
+ when: foo_asdf_xyz is not defined
+ register: result
+
+ - name: assert conditional 'is not defined' ran
+ assert:
+ that:
+ - result is changed
+ - "result.stdout == 'testing'"
+ - "result.rc == 0"
+
+ - name: test bad conditional 'is not defined'
+ shell: echo 'testing'
+ when: test_bare is not defined
+ register: result
+
+ - name: assert bad conditional 'is not defined' did NOT run
+ assert:
+ that:
+ - result is skipped
+
+ - name: test bad conditional 'is undefined'
+ shell: echo 'testing'
+ when: test_bare is undefined
+ register: result
+
+ - name: assert bad conditional 'is undefined' did NOT run
+ assert:
+ that:
+ - result is skipped
+
+ - name: test bare conditional
+ shell: echo 'testing'
+ when: test_bare
+ register: result
+
+ - name: assert bare conditional ran
+ assert:
+ that:
+ - result is changed
+ - "result.stdout == 'testing'"
+ - "result.rc == 0"
+
+ - name: test conditional using a variable
+ shell: echo 'testing'
+ when: test_bare_var == 123
+ register: result
+
+ - name: assert conditional using a variable ran
+ assert:
+ that:
+ - result is changed
+ - "result.stdout == 'testing'"
+ - "result.rc == 0"
+
+ - name: test good conditional based on nested variables
+ shell: echo 'testing'
+ when: test_bare_nested_good
+ register: result
+
+ - name: assert good conditional based on nested var ran
+ assert:
+ that:
+ - result is changed
+ - "result.stdout == 'testing'"
+ - "result.rc == 0"
+
+ - name: test bad conditional based on nested variables
+ shell: echo 'testing'
+ when: test_bare_nested_bad
+ register: result
+
+ - debug: var={{item}}
+ loop:
+ - bare
+ - result
+ - test_bare_nested_bad
+
+ - name: assert that the bad nested conditional is skipped since 'bare' since 'string' template is resolved to 'false'
+ assert:
+ that:
+ - result is skipped
+
+ when: bare|bool
+
+ - name: assert that the bad nested conditional did run since non bare 'string' is untemplated but 'trueish'
+ assert:
+ that:
+ - result is skipped
+ when: not bare|bool
+ - result is changed
+
+ - name: test bad conditional based on nested variables with bool filter
+ shell: echo 'testing'
+ when: test_bare_nested_bad|bool
+ register: result
+
+ - name: assert that the bad nested conditional did NOT run as bool forces evaluation
+ assert:
+ that:
+ - result is skipped
+
+ #-----------------------------------------------------------------------
+ # proper booleanification tests (issue #8629)
+
+ - name: set fact to string 'false'
+ set_fact: bool_test1=false
+
+ - name: set fact to string 'False'
+ set_fact: bool_test2=False
+
+ - name: set fact to a proper boolean using complex args
+ set_fact:
+ bool_test3: false
+
+ - name: "test boolean value 'false' string using 'when: var'"
+ command: echo 'hi'
+ when: bool_test1
+ register: result
+
+ - name: assert that the task did not run for 'false'
+ assert:
+ that:
+ - result is skipped
+
+ - name: "test boolean value 'false' string using 'when: not var'"
+ command: echo 'hi'
+ when: not bool_test1
+ register: result
+
+ - name: assert that the task DID run for not 'false'
+ assert:
+ that:
+ - result is changed
+
+ - name: "test boolean value of 'False' string using 'when: var'"
+ command: echo 'hi'
+ when: bool_test2
+ register: result
+
+ - name: assert that the task did not run for 'False'
+ assert:
+ that:
+ - result is skipped
+
+ - name: "test boolean value 'False' string using 'when: not var'"
+ command: echo 'hi'
+ when: not bool_test2
+ register: result
+
+ - name: assert that the task DID run for not 'False'
+ assert:
+ that:
+ - result is changed
+
+ - name: "test proper boolean value of complex arg using 'when: var'"
+ command: echo 'hi'
+ when: bool_test3
+ register: result
+
+ - name: assert that the task did not run for proper boolean false
+ assert:
+ that:
+ - result is skipped
+
+ - name: "test proper boolean value of complex arg using 'when: not var'"
+ command: echo 'hi'
+ when: not bool_test3
+ register: result
+
+ - name: assert that the task DID run for not false
+ assert:
+ that:
+ - result is changed
+
+ - set_fact: skipped_bad_attribute=True
+ - block:
+ - name: test a with_items loop using a variable with a missing attribute
+ debug: var=item
+ with_items: "{{cond_bad_attribute.results | default('')}}"
+ register: result
+ - set_fact: skipped_bad_attribute=False
+ - name: assert the task was skipped
+ assert:
+ that:
+ - skipped_bad_attribute
+ when: cond_bad_attribute is defined and 'results' in cond_bad_attribute
+
+ - name: test a with_items loop skipping a single item
+ debug: var=item
+ with_items: "{{cond_list_of_items.results}}"
+ when: item != 'b'
+ register: result
+
+ - debug: var=result
+
+ - name: assert only a single item was skipped
+ assert:
+ that:
+ - result.results|length == 3
+ - result.results[1].skipped
+
+ - name: test complex templated condition
+ debug: msg="it works"
+ when: vars_file_var in things1|union([vars_file_var])
+
+ - name: test dict with invalid key is undefined
+ vars:
+ mydict:
+ a: foo
+ b: bar
+ debug: var=mydict['c']
+ register: result
+ when: mydict['c'] is undefined
+
+ - name: assert the task did not fail
+ assert:
+ that:
+ - result is success
+
+ - name: test dict with invalid key does not run with conditional is defined
+ vars:
+ mydict:
+ a: foo
+ b: bar
+ debug: var=mydict['c']
+ when: mydict['c'] is defined
+ register: result
+
+ - name: assert the task was skipped
+ assert:
+ that:
+ - result is skipped
+
+ - name: test list with invalid element does not run with conditional is defined
+ vars:
+ mylist: []
+ debug: var=mylist[0]
+ when: mylist[0] is defined
+ register: result
+
+ - name: assert the task was skipped
+ assert:
+ that:
+ - result is skipped
+
+ - name: test list with invalid element is undefined
+ vars:
+ mylist: []
+ debug: var=mylist[0]
+ when: mylist[0] is undefined
+ register: result
+
+ - name: assert the task did not fail
+ assert:
+ that:
+ - result is success
+
+
+ - name: Deal with multivar equality
+ tags: ['leveldiff']
+ when: not bare|bool
+ vars:
+ toplevel_hash:
+ hash_var_one: justastring
+ hash_var_two: something.with.dots
+ hash_var_three: something:with:colons
+ hash_var_four: something/with/slashes
+ hash_var_five: something with spaces
+ hash_var_six: yes
+ hash_var_seven: no
+ toplevel_var_one: justastring
+ toplevel_var_two: something.with.dots
+ toplevel_var_three: something:with:colons
+ toplevel_var_four: something/with/slashes
+ toplevel_var_five: something with spaces
+ toplevel_var_six: yes
+ toplevel_var_seven: no
+ block:
+
+ - name: var subkey simple string
+ debug:
+ var: toplevel_hash.hash_var_one
+ register: sub
+ when: toplevel_hash.hash_var_one
+
+ - name: toplevel simple string
+ debug:
+ var: toplevel_var_one
+ when: toplevel_var_one
+ register: top
+ ignore_errors: yes
+
+ - name: ensure top and multi work same
+ assert:
+ that:
+ - top is not skipped
+ - sub is not skipped
+ - top is not failed
+ - sub is not failed
+
+ - name: var subkey string with dots
+ debug:
+ var: toplevel_hash.hash_var_two
+ register: sub
+ when: toplevel_hash.hash_var_two
+
+ - debug:
+ var: toplevel_var_two
+ when: toplevel_var_two
+ register: top
+ ignore_errors: yes
+
+ - name: ensure top and multi work same
+ assert:
+ that:
+ - top is not skipped
+ - sub is not skipped
+ - top is not failed
+ - sub is not failed
+
+ - name: var subkey string with dots
+ debug:
+ var: toplevel_hash.hash_var_three
+ register: sub
+ when: toplevel_hash.hash_var_three
+
+ - debug:
+ var: toplevel_var_three
+ when: toplevel_var_three
+ register: top
+ ignore_errors: yes
+
+ - name: ensure top and multi work same
+ assert:
+ that:
+ - top is not skipped
+ - sub is not skipped
+ - top is not failed
+ - sub is not failed
+
+ - name: var subkey string with colon
+ debug:
+ var: toplevel_hash.hash_var_four
+ register: sub
+ when: toplevel_hash.hash_var_four
+
+ - debug:
+ var: toplevel_var_four
+ when: toplevel_var_four
+ register: top
+ ignore_errors: yes
+
+ - name: ensure top and multi work same
+ assert:
+ that:
+ - top is not skipped
+ - sub is not skipped
+ - top is not failed
+ - sub is not failed
+
+ - name: var subkey string with spaces
+ debug:
+ var: toplevel_hash.hash_var_five
+ register: sub
+ when: toplevel_hash.hash_var_five
+
+ - debug:
+ var: toplevel_var_five
+ when: toplevel_var_five
+ register: top
+ ignore_errors: yes
+
+ - name: ensure top and multi work same
+ assert:
+ that:
+ - top is not skipped
+ - sub is not skipped
+ - top is not failed
+ - sub is not failed
+
+ - name: var subkey with 'yes' value
+ debug:
+ var: toplevel_hash.hash_var_six
+ register: sub
+ when: toplevel_hash.hash_var_six
+
+ - debug:
+ var: toplevel_var_six
+ register: top
+ when: toplevel_var_six
+
+ - name: ensure top and multi work same
+ assert:
+ that:
+ - top is not skipped
+ - sub is not skipped
+
+ - name: var subkey with 'no' value
+ debug:
+ var: toplevel_hash.hash_var_seven
+ register: sub
+ when: toplevel_hash.hash_var_seven
+
+ - debug:
+ var: toplevel_var_seven
+ register: top
+ when: toplevel_var_seven
+
+ - name: ensure top and multi work same
+ assert:
+ that:
+ - top is skipped
+ - sub is skipped
+
+ - name: test that 'comparison expression' item works with_items
+ assert:
+ that:
+ - item
+ with_items:
+ - 1 == 1
+
+ - name: test that 'comparison expression' item works in loop
+ assert:
+ that:
+ - item
+ loop:
+ - 1 == 1
diff --git a/test/integration/targets/conditionals/runme.sh b/test/integration/targets/conditionals/runme.sh
new file mode 100755
index 00000000..934443a5
--- /dev/null
+++ b/test/integration/targets/conditionals/runme.sh
@@ -0,0 +1,15 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ANSIBLE_CONDITIONAL_BARE_VARS=1 ansible-playbook -i ../../inventory play.yml "$@"
+ANSIBLE_CONDITIONAL_BARE_VARS=0 ansible-playbook -i ../../inventory play.yml "$@"
+
+export ANSIBLE_CONDITIONAL_BARE_VARS=1
+export ANSIBLE_DEPRECATION_WARNINGS=True
+
+# No warnings for conditionals that are already type bool
+test "$(ansible-playbook -i ../../inventory test_no_warnings.yml "$@" 2>&1 | grep -c '\[DEPRECATION WARNING\]')" = 0
+
+# Warn for bare vars of other types since they may be interpreted differently when CONDITIONAL_BARE_VARS defaults to False
+test "$(ansible-playbook -i ../../inventory test_warnings.yml "$@" 2>&1 | grep -c '\[DEPRECATION WARNING\]')" = 2
diff --git a/test/integration/targets/conditionals/test_no_warnings.yml b/test/integration/targets/conditionals/test_no_warnings.yml
new file mode 100644
index 00000000..93280447
--- /dev/null
+++ b/test/integration/targets/conditionals/test_no_warnings.yml
@@ -0,0 +1,18 @@
+- hosts: testhost
+ gather_facts: false
+ vars:
+ boolean_var: false
+ nested:
+ bool_var: false
+ tasks:
+ - name: Run tasks with previous warnings requesting the bool filter on type boolean vars
+ block:
+ - debug:
+ when: boolean_var
+ - debug:
+ when: nested.bool_var
+ - debug:
+ when: double_interpolated
+ vars:
+ double_interpolated: "{{ other }}"
+ other: false
diff --git a/test/integration/targets/conditionals/test_warnings.yml b/test/integration/targets/conditionals/test_warnings.yml
new file mode 100644
index 00000000..4186cd01
--- /dev/null
+++ b/test/integration/targets/conditionals/test_warnings.yml
@@ -0,0 +1,14 @@
+- hosts: testhost
+ gather_facts: false
+ vars:
+ str_boolean_var: 'false'
+ tasks:
+ - name: Run tasks with warnings for conditionals that will change in behavior depending on CONDITIONAL_BARE_VARS
+ block:
+ - debug:
+ when: str_boolean_var
+ - debug:
+ when: double_interpolated
+ vars:
+ double_interpolated: other
+ other: false
diff --git a/test/integration/targets/conditionals/vars/main.yml b/test/integration/targets/conditionals/vars/main.yml
new file mode 100644
index 00000000..d6221478
--- /dev/null
+++ b/test/integration/targets/conditionals/vars/main.yml
@@ -0,0 +1,22 @@
+---
+# foo is a dictionary that will be used to check that
+# a conditional passes a with_items loop on a variable
+# with a missing attribute (ie. foo.results)
+cond_bad_attribute:
+ bar: a
+
+cond_list_of_items:
+ results:
+ - a
+ - b
+ - c
+
+things1:
+ - 1
+ - 2
+vars_file_var: 321
+
+test_bare: true
+test_bare_var: 123
+test_bare_nested_good: "test_bare_var == 123"
+test_bare_nested_bad: "{{test_bare_var}} == 321"
diff --git a/test/integration/targets/config/aliases b/test/integration/targets/config/aliases
new file mode 100644
index 00000000..a6dafcf8
--- /dev/null
+++ b/test/integration/targets/config/aliases
@@ -0,0 +1 @@
+shippable/posix/group1
diff --git a/test/integration/targets/config/runme.sh b/test/integration/targets/config/runme.sh
new file mode 100755
index 00000000..73c3778b
--- /dev/null
+++ b/test/integration/targets/config/runme.sh
@@ -0,0 +1,17 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# ignore empty env var and use default
+# shellcheck disable=SC1007
+ANSIBLE_TIMEOUT= ansible -m ping testhost -i ../../inventory "$@"
+
+# env var is wrong type, this should be a fatal error pointing at the setting
+ANSIBLE_TIMEOUT='lola' ansible -m ping testhost -i ../../inventory "$@" 2>&1|grep 'Invalid type for configuration option setting: DEFAULT_TIMEOUT'
+
+# https://github.com/ansible/ansible/issues/69577
+ANSIBLE_REMOTE_TMP="$HOME/.ansible/directory_with_no_space" ansible -m ping testhost -i ../../inventory "$@"
+
+ANSIBLE_REMOTE_TMP="$HOME/.ansible/directory with space" ansible -m ping testhost -i ../../inventory "$@"
+
+ANSIBLE_CONFIG=nonexistent.cfg ansible-config dump --only-changed -v | grep 'No config file found; using defaults'
diff --git a/test/integration/targets/connection/aliases b/test/integration/targets/connection/aliases
new file mode 100644
index 00000000..136c05e0
--- /dev/null
+++ b/test/integration/targets/connection/aliases
@@ -0,0 +1 @@
+hidden
diff --git a/test/integration/targets/connection/test.sh b/test/integration/targets/connection/test.sh
new file mode 100755
index 00000000..18fb2b77
--- /dev/null
+++ b/test/integration/targets/connection/test.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+
+set -eux
+
+[ -f "${INVENTORY}" ]
+
+# Run connection tests with both the default and C locale.
+
+ ansible-playbook test_connection.yml -i "${INVENTORY}" "$@"
+LC_ALL=C LANG=C ansible-playbook test_connection.yml -i "${INVENTORY}" "$@"
+
+# Check that connection vars do not appear in the output
+# https://github.com/ansible/ansible/pull/70853
+trap "rm out.txt" EXIT
+
+ansible all -i "${INVENTORY}" -m set_fact -a "testing=value" -v | tee out.txt
+if grep 'ansible_host' out.txt
+then
+ echo "FAILURE: Connection vars in output"
+ exit 1
+else
+ echo "SUCCESS: Connection vars not found"
+fi
diff --git a/test/integration/targets/connection/test_connection.yml b/test/integration/targets/connection/test_connection.yml
new file mode 100644
index 00000000..21699422
--- /dev/null
+++ b/test/integration/targets/connection/test_connection.yml
@@ -0,0 +1,43 @@
+- hosts: "{{ target_hosts }}"
+ gather_facts: no
+ serial: 1
+ tasks:
+
+ ### raw with unicode arg and output
+
+ - name: raw with unicode arg and output
+ raw: echo 汉语
+ register: command
+ - name: check output of raw with unicode arg and output
+ assert:
+ that:
+ - "'汉语' in command.stdout"
+ - command is changed # as of 2.2, raw should default to changed: true for consistency w/ shell/command/script modules
+
+ ### copy local file with unicode filename and content
+
+ - name: create local file with unicode filename and content
+ local_action: lineinfile dest={{ local_tmp }}-汉语/汉语.txt create=true line=汉语
+ - name: remove remote file with unicode filename and content
+ action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语/汉语.txt state=absent"
+ - name: create remote directory with unicode name
+ action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语 state=directory"
+ - name: copy local file with unicode filename and content
+ action: "{{ action_prefix }}copy src={{ local_tmp }}-汉语/汉语.txt dest={{ remote_tmp }}-汉语/汉语.txt"
+
+ ### fetch remote file with unicode filename and content
+
+ - name: remove local file with unicode filename and content
+ local_action: file path={{ local_tmp }}-汉语/汉语.txt state=absent
+ - name: fetch remote file with unicode filename and content
+ fetch: src={{ remote_tmp }}-汉语/汉语.txt dest={{ local_tmp }}-汉语/汉语.txt fail_on_missing=true validate_checksum=true flat=true
+
+ ### remove local and remote temp files
+
+ - name: remove local temp file
+ local_action: file path={{ local_tmp }}-汉语 state=absent
+ - name: remove remote temp file
+ action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语 state=absent"
+
+ ### test wait_for_connection plugin
+ - wait_for_connection:
diff --git a/test/integration/targets/connection_delegation/action_plugins/delegation_action.py b/test/integration/targets/connection_delegation/action_plugins/delegation_action.py
new file mode 100644
index 00000000..9d419e75
--- /dev/null
+++ b/test/integration/targets/connection_delegation/action_plugins/delegation_action.py
@@ -0,0 +1,12 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+
+ def run(self, tmp=None, task_vars=None):
+ return {
+ 'remote_password': self._connection.get_option('remote_password'),
+ }
diff --git a/test/integration/targets/connection_delegation/aliases b/test/integration/targets/connection_delegation/aliases
new file mode 100644
index 00000000..87caabdf
--- /dev/null
+++ b/test/integration/targets/connection_delegation/aliases
@@ -0,0 +1,5 @@
+shippable/posix/group1
+skip/freebsd # No sshpass
+skip/osx # No sshpass
+skip/macos # No sshpass
+skip/rhel # No sshpass
diff --git a/test/integration/targets/connection_delegation/connection_plugins/delegation_connection.py b/test/integration/targets/connection_delegation/connection_plugins/delegation_connection.py
new file mode 100644
index 00000000..f61846cf
--- /dev/null
+++ b/test/integration/targets/connection_delegation/connection_plugins/delegation_connection.py
@@ -0,0 +1,45 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+author: Ansible Core Team
+connection: delegation_connection
+short_description: Test connection for delegated host check
+description:
+- Some further description that you don't care about.
+options:
+ remote_password:
+ description: The remote password
+ type: str
+ vars:
+ - name: ansible_password
+ # Tests that an aliased key gets the -k option which hardcodes the value to password
+ aliases:
+ - password
+"""
+
+from ansible.plugins.connection import ConnectionBase
+
+
+class Connection(ConnectionBase):
+
+ transport = 'delegation_connection'
+ has_pipelining = True
+
+ def __init__(self, *args, **kwargs):
+ super(Connection, self).__init__(*args, **kwargs)
+
+ def _connect(self):
+ super(Connection, self)._connect()
+
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ super(Connection, self).exec_command(cmd, in_data, sudoable)
+
+ def put_file(self, in_path, out_path):
+ super(Connection, self).put_file(in_path, out_path)
+
+ def fetch_file(self, in_path, out_path):
+ super(Connection, self).fetch_file(in_path, out_path)
+
+ def close(self):
+ super(Connection, self).close()
diff --git a/test/integration/targets/connection_delegation/inventory.ini b/test/integration/targets/connection_delegation/inventory.ini
new file mode 100644
index 00000000..e7f846d3
--- /dev/null
+++ b/test/integration/targets/connection_delegation/inventory.ini
@@ -0,0 +1 @@
+my_host ansible_host=127.0.0.1 ansible_connection=delegation_connection
diff --git a/test/integration/targets/connection_delegation/runme.sh b/test/integration/targets/connection_delegation/runme.sh
new file mode 100755
index 00000000..eb26f7c5
--- /dev/null
+++ b/test/integration/targets/connection_delegation/runme.sh
@@ -0,0 +1,9 @@
+#!/usr/bin/env bash
+
+set -ux
+
+echo "Checking if sshpass is present"
+which sshpass 2>&1 || exit 0
+echo "sshpass is present, continuing with test"
+
+sshpass -p my_password ansible-playbook -i inventory.ini test.yml -k "$@"
diff --git a/test/integration/targets/connection_delegation/test.yml b/test/integration/targets/connection_delegation/test.yml
new file mode 100644
index 00000000..678bef51
--- /dev/null
+++ b/test/integration/targets/connection_delegation/test.yml
@@ -0,0 +1,23 @@
+---
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: test connection receives -k from play_context when delegating
+ delegation_action:
+ delegate_to: my_host
+ register: result
+
+ - assert:
+ that:
+ - result.remote_password == 'my_password'
+
+ - name: ensure vars set for that host take precedence over -k
+ delegation_action:
+ delegate_to: my_host
+ vars:
+ ansible_password: other_password
+ register: result
+
+ - assert:
+ that:
+ - result.remote_password == 'other_password'
diff --git a/test/integration/targets/connection_local/aliases b/test/integration/targets/connection_local/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/test/integration/targets/connection_local/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/test/integration/targets/connection_local/runme.sh b/test/integration/targets/connection_local/runme.sh
new file mode 120000
index 00000000..70aa5dbd
--- /dev/null
+++ b/test/integration/targets/connection_local/runme.sh
@@ -0,0 +1 @@
+../connection_posix/test.sh \ No newline at end of file
diff --git a/test/integration/targets/connection_local/test_connection.inventory b/test/integration/targets/connection_local/test_connection.inventory
new file mode 100644
index 00000000..64a27227
--- /dev/null
+++ b/test/integration/targets/connection_local/test_connection.inventory
@@ -0,0 +1,7 @@
+[local]
+local-pipelining ansible_ssh_pipelining=true
+local-no-pipelining ansible_ssh_pipelining=false
+[local:vars]
+ansible_host=localhost
+ansible_connection=local
+ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/test/integration/targets/connection_paramiko_ssh/aliases b/test/integration/targets/connection_paramiko_ssh/aliases
new file mode 100644
index 00000000..ad44392e
--- /dev/null
+++ b/test/integration/targets/connection_paramiko_ssh/aliases
@@ -0,0 +1,5 @@
+needs/ssh
+shippable/posix/group3
+needs/target/setup_paramiko
+destructive # potentially installs/uninstalls OS packages via setup_paramiko
+skip/aix
diff --git a/test/integration/targets/connection_paramiko_ssh/runme.sh b/test/integration/targets/connection_paramiko_ssh/runme.sh
new file mode 100755
index 00000000..123f6e23
--- /dev/null
+++ b/test/integration/targets/connection_paramiko_ssh/runme.sh
@@ -0,0 +1,7 @@
+#!/usr/bin/env bash
+
+set -eux
+
+source ../setup_paramiko/setup.sh
+
+./test.sh
diff --git a/test/integration/targets/connection_paramiko_ssh/test.sh b/test/integration/targets/connection_paramiko_ssh/test.sh
new file mode 120000
index 00000000..70aa5dbd
--- /dev/null
+++ b/test/integration/targets/connection_paramiko_ssh/test.sh
@@ -0,0 +1 @@
+../connection_posix/test.sh \ No newline at end of file
diff --git a/test/integration/targets/connection_paramiko_ssh/test_connection.inventory b/test/integration/targets/connection_paramiko_ssh/test_connection.inventory
new file mode 100644
index 00000000..a3f34ab7
--- /dev/null
+++ b/test/integration/targets/connection_paramiko_ssh/test_connection.inventory
@@ -0,0 +1,7 @@
+[paramiko_ssh]
+paramiko_ssh-pipelining ansible_ssh_pipelining=true
+paramiko_ssh-no-pipelining ansible_ssh_pipelining=false
+[paramiko_ssh:vars]
+ansible_host=localhost
+ansible_connection=paramiko_ssh
+ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/test/integration/targets/connection_posix/aliases b/test/integration/targets/connection_posix/aliases
new file mode 100644
index 00000000..f5e09799
--- /dev/null
+++ b/test/integration/targets/connection_posix/aliases
@@ -0,0 +1,2 @@
+needs/target/connection
+hidden
diff --git a/test/integration/targets/connection_posix/test.sh b/test/integration/targets/connection_posix/test.sh
new file mode 100755
index 00000000..d3976ff3
--- /dev/null
+++ b/test/integration/targets/connection_posix/test.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# Connection tests for POSIX platforms use this script by linking to it from the appropriate 'connection_' target dir.
+# The name of the inventory group to test is extracted from the directory name following the 'connection_' prefix.
+
+group=$(python -c \
+ "from os import path; print(path.basename(path.abspath(path.dirname('$0'))).replace('connection_', ''))")
+
+cd ../connection
+
+INVENTORY="../connection_${group}/test_connection.inventory" ./test.sh \
+ -e target_hosts="${group}" \
+ -e action_prefix= \
+ -e local_tmp=/tmp/ansible-local \
+ -e remote_tmp=/tmp/ansible-remote \
+ "$@"
diff --git a/test/integration/targets/connection_psrp/aliases b/test/integration/targets/connection_psrp/aliases
new file mode 100644
index 00000000..b3e9b8bc
--- /dev/null
+++ b/test/integration/targets/connection_psrp/aliases
@@ -0,0 +1,4 @@
+windows
+shippable/windows/group1
+shippable/windows/smoketest
+needs/target/connection
diff --git a/test/integration/targets/connection_psrp/files/empty.txt b/test/integration/targets/connection_psrp/files/empty.txt
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/connection_psrp/files/empty.txt
diff --git a/test/integration/targets/connection_psrp/runme.sh b/test/integration/targets/connection_psrp/runme.sh
new file mode 100755
index 00000000..35984bba
--- /dev/null
+++ b/test/integration/targets/connection_psrp/runme.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# make sure hosts are using psrp connections
+ansible -i ../../inventory.winrm localhost \
+ -m template \
+ -a "src=test_connection.inventory.j2 dest=${OUTPUT_DIR}/test_connection.inventory" \
+ "$@"
+
+python.py -m pip install pypsrp
+cd ../connection
+
+INVENTORY="${OUTPUT_DIR}/test_connection.inventory" ./test.sh \
+ -e target_hosts=windows \
+ -e action_prefix=win_ \
+ -e local_tmp=/tmp/ansible-local \
+ -e remote_tmp=c:/windows/temp/ansible-remote \
+ "$@"
+
+cd ../connection_psrp
+
+ansible-playbook -i "${OUTPUT_DIR}/test_connection.inventory" tests.yml \
+ "$@"
diff --git a/test/integration/targets/connection_psrp/test_connection.inventory.j2 b/test/integration/targets/connection_psrp/test_connection.inventory.j2
new file mode 100644
index 00000000..d2d3a492
--- /dev/null
+++ b/test/integration/targets/connection_psrp/test_connection.inventory.j2
@@ -0,0 +1,9 @@
+[windows]
+{% for host in vars.groups.windows %}
+{{ host }} ansible_host={{ hostvars[host]['ansible_host'] }} ansible_port={{ hostvars[host]['ansible_port'] }} ansible_user={{ hostvars[host]['ansible_user'] }} ansible_password={{ hostvars[host]['ansible_password'] }}
+{% endfor %}
+
+[windows:vars]
+ansible_connection=psrp
+ansible_psrp_auth=negotiate
+ansible_psrp_cert_validation=ignore
diff --git a/test/integration/targets/connection_psrp/tests.yml b/test/integration/targets/connection_psrp/tests.yml
new file mode 100644
index 00000000..dabbf407
--- /dev/null
+++ b/test/integration/targets/connection_psrp/tests.yml
@@ -0,0 +1,133 @@
+---
+# these are extra tests for psrp that aren't covered under test/integration/targets/connection/*
+- name: test out psrp specific tests
+ hosts: windows
+ serial: 1
+ gather_facts: no
+
+ tasks:
+ - name: test complex objects in raw output
+ # until PyYAML is upgraded to 4.x we need to use the \U escape for a unicode codepoint
+ # and enclose in a quote to it translates the \U
+ raw: "
+ [PSCustomObject]@{string = 'string'};
+ [PSCustomObject]@{unicode = 'poo - \U0001F4A9'};
+ [PSCustomObject]@{integer = 1};
+ [PSCustomObject]@{list = @(1, 2)};
+ Get-Service -Name winrm;
+ Write-Output -InputObject 'string - \U0001F4A9';"
+ register: raw_out
+
+ - name: assert complex objects in raw output
+ assert:
+ that:
+ - raw_out.stdout_lines|count == 6
+ - "raw_out.stdout_lines[0] == 'string: string'"
+ - "raw_out.stdout_lines[1] == 'unicode: poo - \U0001F4A9'"
+ - "raw_out.stdout_lines[2] == 'integer: 1'"
+ - "raw_out.stdout_lines[3] == \"list: [1, 2]\""
+ - raw_out.stdout_lines[4] == "winrm"
+ - raw_out.stdout_lines[5] == "string - \U0001F4A9"
+
+ # Become only works on Server 2008 when running with basic auth, skip this host for now as it is too complicated to
+ # override the auth protocol in the tests.
+ - name: check if we running on Server 2008
+ win_shell: '[System.Environment]::OSVersion.Version -ge [Version]"6.1"'
+ register: os_version
+
+ - name: test out become with psrp
+ win_whoami:
+ when: os_version|bool
+ register: whoami_out
+ become: yes
+ become_method: runas
+ become_user: SYSTEM
+
+ - name: assert test out become with psrp
+ assert:
+ that:
+ - whoami_out.account.sid == "S-1-5-18"
+ when: os_version|bool
+
+ - name: test out async with psrp
+ win_shell: Start-Sleep -Seconds 2; Write-Output abc
+ async: 10
+ poll: 1
+ register: async_out
+
+ - name: assert est out async with psrp
+ assert:
+ that:
+ - async_out.stdout_lines == ["abc"]
+
+ - name: Output unicode characters from Powershell using PSRP
+ win_command: "powershell.exe -ExecutionPolicy ByPass -Command \"Write-Host '\U0001F4A9'\""
+ register: command_unicode_output
+
+ - name: Assert unicode output
+ assert:
+ that:
+ - command_unicode_output is changed
+ - command_unicode_output.rc == 0
+ - "command_unicode_output.stdout == '\U0001F4A9\n'"
+ - command_unicode_output.stderr == ''
+
+ - name: Output unicode characters from Powershell using PSRP
+ win_shell: "Write-Host '\U0001F4A9'"
+ register: shell_unicode_output
+
+ - name: Assert unicode output
+ assert:
+ that:
+ - shell_unicode_output is changed
+ - shell_unicode_output.rc == 0
+ - "shell_unicode_output.stdout == '\U0001F4A9\n'"
+ - shell_unicode_output.stderr == ''
+
+ - name: copy empty file
+ win_copy:
+ src: empty.txt
+ dest: C:\Windows\TEMP\empty.txt
+ register: copy_empty
+
+ - name: get result of copy empty file
+ win_stat:
+ path: C:\Windows\TEMP\empty.txt
+ get_checksum: yes
+ register: copy_empty_actual
+
+ - name: assert copy empty file
+ assert:
+ that:
+ - copy_empty.checksum == 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
+ - copy_empty_actual.stat.checksum == 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
+ - copy_empty_actual.stat.size == 0
+
+ - block:
+ - name: fetch empty file
+ fetch:
+ src: C:\Windows\TEMP\empty.txt
+ dest: /tmp/empty.txt
+ flat: yes
+ register: fetch_empty
+
+ - name: get result of fetch empty file
+ stat:
+ path: /tmp/empty.txt
+ get_checksum: yes
+ register: fetch_empty_actual
+ delegate_to: localhost
+
+ - name: assert fetch empty file
+ assert:
+ that:
+ - fetch_empty.checksum == 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
+ - fetch_empty_actual.stat.checksum == 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
+ - fetch_empty_actual.stat.size == 0
+
+ always:
+ - name: remove tmp file
+ file:
+ path: /tmp/empty.txt
+ state: absent
+ delegate_to: localhost
diff --git a/test/integration/targets/connection_ssh/aliases b/test/integration/targets/connection_ssh/aliases
new file mode 100644
index 00000000..1d822b45
--- /dev/null
+++ b/test/integration/targets/connection_ssh/aliases
@@ -0,0 +1,3 @@
+needs/ssh
+shippable/posix/group1
+skip/aix
diff --git a/test/integration/targets/connection_ssh/posix.sh b/test/integration/targets/connection_ssh/posix.sh
new file mode 120000
index 00000000..70aa5dbd
--- /dev/null
+++ b/test/integration/targets/connection_ssh/posix.sh
@@ -0,0 +1 @@
+../connection_posix/test.sh \ No newline at end of file
diff --git a/test/integration/targets/connection_ssh/runme.sh b/test/integration/targets/connection_ssh/runme.sh
new file mode 100755
index 00000000..e7b2b21f
--- /dev/null
+++ b/test/integration/targets/connection_ssh/runme.sh
@@ -0,0 +1,65 @@
+#!/usr/bin/env bash
+
+set -ux
+
+# We skip this whole section if the test node doesn't have sshpass on it.
+if command -v sshpass > /dev/null; then
+ # Check if our sshpass supports -P
+ sshpass -P foo > /dev/null
+ sshpass_supports_prompt=$?
+ if [[ $sshpass_supports_prompt -eq 0 ]]; then
+ # If the prompt is wrong, we'll end up hanging (due to sshpass hanging).
+ # We should probably do something better here, like timing out in Ansible,
+ # but this has been the behavior for a long time, before we supported custom
+ # password prompts.
+ #
+ # So we search for a custom password prompt that is clearly wrong and call
+ # ansible with timeout. If we time out, our custom prompt was successfully
+ # searched for. It's a weird way of doing things, but it does ensure
+ # that the flag gets passed to sshpass.
+ timeout 5 ansible -m ping \
+ -e ansible_connection=ssh \
+ -e ansible_sshpass_prompt=notThis: \
+ -e ansible_password=foo \
+ -e ansible_user=definitelynotroot \
+ -i test_connection.inventory \
+ ssh-pipelining
+ ret=$?
+ if [[ $ret -ne 124 ]]; then
+ echo "Expected to time out and we did not. Exiting with failure."
+ exit 1
+ fi
+ else
+ ansible -m ping \
+ -e ansible_connection=ssh \
+ -e ansible_sshpass_prompt=notThis: \
+ -e ansible_password=foo \
+ -e ansible_user=definitelynotroot \
+ -i test_connection.inventory \
+ ssh-pipelining | grep 'customized password prompts'
+ ret=$?
+ [[ $ret -eq 0 ]] || exit $ret
+ fi
+fi
+
+set -e
+
+# temporary work-around for issues due to new scp filename checking
+# https://github.com/ansible/ansible/issues/52640
+if [[ "$(scp -T 2>&1)" == "usage: scp "* ]]; then
+ # scp supports the -T option
+ # work-around required
+ scp_args=("-e" "ansible_scp_extra_args=-T")
+else
+ # scp does not support the -T option
+ # no work-around required
+ # however we need to put something in the array to keep older versions of bash happy
+ scp_args=("-e" "")
+fi
+
+# sftp
+./posix.sh "$@"
+# scp
+ANSIBLE_SCP_IF_SSH=true ./posix.sh "$@" "${scp_args[@]}"
+# piped
+ANSIBLE_SSH_TRANSFER_METHOD=piped ./posix.sh "$@"
diff --git a/test/integration/targets/connection_ssh/test_connection.inventory b/test/integration/targets/connection_ssh/test_connection.inventory
new file mode 100644
index 00000000..a1a4ff1e
--- /dev/null
+++ b/test/integration/targets/connection_ssh/test_connection.inventory
@@ -0,0 +1,7 @@
+[ssh]
+ssh-pipelining ansible_ssh_pipelining=true
+ssh-no-pipelining ansible_ssh_pipelining=false
+[ssh:vars]
+ansible_host=localhost
+ansible_connection=ssh
+ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/test/integration/targets/connection_windows_ssh/aliases b/test/integration/targets/connection_windows_ssh/aliases
new file mode 100644
index 00000000..45a48f09
--- /dev/null
+++ b/test/integration/targets/connection_windows_ssh/aliases
@@ -0,0 +1,6 @@
+windows
+shippable/windows/group1
+shippable/windows/smoketest
+skip/windows/2008 # Windows Server 2008 does not support Win32-OpenSSH
+needs/target/connection
+needs/target/setup_remote_tmp_dir
diff --git a/test/integration/targets/connection_windows_ssh/runme.sh b/test/integration/targets/connection_windows_ssh/runme.sh
new file mode 100755
index 00000000..488bb7c5
--- /dev/null
+++ b/test/integration/targets/connection_windows_ssh/runme.sh
@@ -0,0 +1,54 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# We need to run these tests with both the powershell and cmd shell type
+
+### cmd tests - no DefaultShell set ###
+ansible -i ../../inventory.winrm localhost \
+ -m template \
+ -a "src=test_connection.inventory.j2 dest=${OUTPUT_DIR}/test_connection.inventory" \
+ -e "test_shell_type=cmd" \
+ "$@"
+
+# https://github.com/PowerShell/Win32-OpenSSH/wiki/DefaultShell
+ansible -i ../../inventory.winrm windows \
+ -m win_regedit \
+ -a "path=HKLM:\\\\SOFTWARE\\\\OpenSSH name=DefaultShell state=absent" \
+ "$@"
+
+# Need to flush the connection to ensure we get a new shell for the next tests
+ansible -i "${OUTPUT_DIR}/test_connection.inventory" windows \
+ -m meta -a "reset_connection" \
+ "$@"
+
+# sftp
+./windows.sh "$@"
+# scp
+ANSIBLE_SCP_IF_SSH=true ./windows.sh "$@"
+# other tests not part of the generic connection test framework
+ansible-playbook -i "${OUTPUT_DIR}/test_connection.inventory" tests.yml \
+ "$@"
+
+### powershell tests - explicit DefaultShell set ###
+# we do this last as the default shell on our CI instances is set to PowerShell
+ansible -i ../../inventory.winrm localhost \
+ -m template \
+ -a "src=test_connection.inventory.j2 dest=${OUTPUT_DIR}/test_connection.inventory" \
+ -e "test_shell_type=powershell" \
+ "$@"
+
+# ensure the default shell is set to PowerShell
+ansible -i ../../inventory.winrm windows \
+ -m win_regedit \
+ -a "path=HKLM:\\\\SOFTWARE\\\\OpenSSH name=DefaultShell data=C:\\\\Windows\\\\System32\\\\WindowsPowerShell\\\\v1.0\\\\powershell.exe" \
+ "$@"
+
+ansible -i "${OUTPUT_DIR}/test_connection.inventory" windows \
+ -m meta -a "reset_connection" \
+ "$@"
+
+./windows.sh "$@"
+ANSIBLE_SCP_IF_SSH=true ./windows.sh "$@"
+ansible-playbook -i "${OUTPUT_DIR}/test_connection.inventory" tests.yml \
+ "$@"
diff --git a/test/integration/targets/connection_windows_ssh/test_connection.inventory.j2 b/test/integration/targets/connection_windows_ssh/test_connection.inventory.j2
new file mode 100644
index 00000000..5893eafe
--- /dev/null
+++ b/test/integration/targets/connection_windows_ssh/test_connection.inventory.j2
@@ -0,0 +1,12 @@
+[windows]
+{% for host in vars.groups.windows %}
+{{ host }} ansible_host={{ hostvars[host]['ansible_host'] }} ansible_user={{ hostvars[host]['ansible_user'] }}{{ ' ansible_ssh_private_key_file=' ~ hostvars[host]['ansible_ssh_private_key_file'] if (hostvars[host]['ansible_ssh_private_key_file']|default()) else '' }}
+{% endfor %}
+
+[windows:vars]
+ansible_shell_type={{ test_shell_type }}
+ansible_connection=ssh
+ansible_port=22
+# used to preserve the existing environment and not touch existing files
+ansible_ssh_extra_args="-o UserKnownHostsFile=/dev/null"
+ansible_ssh_host_key_checking=False
diff --git a/test/integration/targets/connection_windows_ssh/tests.yml b/test/integration/targets/connection_windows_ssh/tests.yml
new file mode 100644
index 00000000..e9b538b4
--- /dev/null
+++ b/test/integration/targets/connection_windows_ssh/tests.yml
@@ -0,0 +1,32 @@
+---
+- name: test out Windows SSH specific tests
+ hosts: windows
+ serial: 1
+ gather_facts: no
+
+ tasks:
+ - name: test out become with Windows SSH
+ win_whoami:
+ register: win_ssh_become
+ become: yes
+ become_method: runas
+ become_user: SYSTEM
+
+ - name: assert test out become with Windows SSH
+ assert:
+ that:
+ - win_ssh_become.account.sid == "S-1-5-18"
+
+ - name: test out async with Windows SSH
+ win_shell: Write-Host café
+ async: 20
+ poll: 3
+ register: win_ssh_async
+
+ - name: assert test out async with Windows SSH
+ assert:
+ that:
+ - win_ssh_async is changed
+ - win_ssh_async.rc == 0
+ - win_ssh_async.stdout == "café\n"
+ - win_ssh_async.stderr == ""
diff --git a/test/integration/targets/connection_windows_ssh/tests_fetch.yml b/test/integration/targets/connection_windows_ssh/tests_fetch.yml
new file mode 100644
index 00000000..0b4fe949
--- /dev/null
+++ b/test/integration/targets/connection_windows_ssh/tests_fetch.yml
@@ -0,0 +1,41 @@
+# This must be a play as we need to invoke it with the ANSIBLE_SCP_IF_SSH env
+# to control the mechanism used. Unfortunately while ansible_scp_if_ssh is
+# documented, it isn't actually used hence the separate invocation
+---
+- name: further fetch tests with metachar characters in filename
+ hosts: windows
+ force_handlers: yes
+ serial: 1
+ gather_facts: no
+
+ tasks:
+ - name: setup remote tmp dir
+ import_role:
+ name: ../../setup_remote_tmp_dir
+
+ - name: create remote file with metachar in name
+ win_copy:
+ content: some content
+ dest: '{{ remote_tmp_dir }}\file ^with &whoami'
+
+ - name: test fetch against a file with cmd metacharacters
+ block:
+ - name: fetch file with metachar in name
+ fetch:
+ src: '{{ remote_tmp_dir }}\file ^with &whoami'
+ dest: ansible-test.txt
+ flat: yes
+ register: fetch_res
+
+ - name: assert fetch file with metachar in name
+ assert:
+ that:
+ - fetch_res is changed
+ - fetch_res.checksum == '94e66df8cd09d410c62d9e0dc59d3a884e458e05'
+
+ always:
+ - name: remove local copy of file
+ file:
+ path: ansible-test.txt
+ state: absent
+ delegate_to: localhost
diff --git a/test/integration/targets/connection_windows_ssh/windows.sh b/test/integration/targets/connection_windows_ssh/windows.sh
new file mode 100755
index 00000000..d2db50f8
--- /dev/null
+++ b/test/integration/targets/connection_windows_ssh/windows.sh
@@ -0,0 +1,25 @@
+#!/usr/bin/env bash
+
+set -eux
+
+cd ../connection
+
+# A recent patch to OpenSSH causes a validation error when running through Ansible. It seems like if the path is quoted
+# then it will fail with 'protocol error: filename does not match request'. We currently ignore this by setting
+# 'ansible_scp_extra_args=-T' to ignore this check but this should be removed once that bug is fixed and our test
+# container has been updated.
+# https://unix.stackexchange.com/questions/499958/why-does-scps-strict-filename-checking-reject-quoted-last-component-but-not-oth
+# https://github.com/openssh/openssh-portable/commit/391ffc4b9d31fa1f4ad566499fef9176ff8a07dc
+INVENTORY="${OUTPUT_DIR}/test_connection.inventory" ./test.sh \
+ -e target_hosts=windows \
+ -e action_prefix=win_ \
+ -e local_tmp=/tmp/ansible-local \
+ -e remote_tmp=c:/windows/temp/ansible-remote \
+ -e ansible_scp_extra_args=-T \
+ "$@"
+
+cd ../connection_windows_ssh
+
+ansible-playbook -i "${OUTPUT_DIR}/test_connection.inventory" tests_fetch.yml \
+ -e ansible_scp_extra_args=-T \
+ "$@"
diff --git a/test/integration/targets/connection_winrm/aliases b/test/integration/targets/connection_winrm/aliases
new file mode 100644
index 00000000..b3e9b8bc
--- /dev/null
+++ b/test/integration/targets/connection_winrm/aliases
@@ -0,0 +1,4 @@
+windows
+shippable/windows/group1
+shippable/windows/smoketest
+needs/target/connection
diff --git a/test/integration/targets/connection_winrm/runme.sh b/test/integration/targets/connection_winrm/runme.sh
new file mode 100755
index 00000000..e6772415
--- /dev/null
+++ b/test/integration/targets/connection_winrm/runme.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# make sure hosts are using winrm connections
+ansible -i ../../inventory.winrm localhost \
+ -m template \
+ -a "src=test_connection.inventory.j2 dest=${OUTPUT_DIR}/test_connection.inventory" \
+ "$@"
+
+cd ../connection
+
+INVENTORY="${OUTPUT_DIR}/test_connection.inventory" ./test.sh \
+ -e target_hosts=windows \
+ -e action_prefix=win_ \
+ -e local_tmp=/tmp/ansible-local \
+ -e remote_tmp=c:/windows/temp/ansible-remote \
+ "$@"
diff --git a/test/integration/targets/connection_winrm/test_connection.inventory.j2 b/test/integration/targets/connection_winrm/test_connection.inventory.j2
new file mode 100644
index 00000000..7c4f3dc9
--- /dev/null
+++ b/test/integration/targets/connection_winrm/test_connection.inventory.j2
@@ -0,0 +1,10 @@
+[windows]
+{% for host in vars.groups.windows %}
+{{ host }} ansible_host={{ hostvars[host]['ansible_host'] }} ansible_port={{ hostvars[host]['ansible_port'] }} ansible_user={{ hostvars[host]['ansible_user'] }} ansible_password={{ hostvars[host]['ansible_password'] }}
+{% endfor %}
+
+[windows:vars]
+ansible_connection=winrm
+# we don't know if we're using an encrypted connection or not, so we'll use message encryption
+ansible_winrm_transport=ntlm
+ansible_winrm_server_cert_validation=ignore
diff --git a/test/integration/targets/copy/aliases b/test/integration/targets/copy/aliases
new file mode 100644
index 00000000..db9bbd8c
--- /dev/null
+++ b/test/integration/targets/copy/aliases
@@ -0,0 +1,4 @@
+needs/root
+shippable/posix/group2
+destructive
+skip/aix
diff --git a/test/integration/targets/copy/defaults/main.yml b/test/integration/targets/copy/defaults/main.yml
new file mode 100644
index 00000000..8e9a5836
--- /dev/null
+++ b/test/integration/targets/copy/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+remote_unprivileged_user: tmp_ansible_test_user
diff --git a/test/integration/targets/copy/files/foo.txt b/test/integration/targets/copy/files/foo.txt
new file mode 100644
index 00000000..7c6ded14
--- /dev/null
+++ b/test/integration/targets/copy/files/foo.txt
@@ -0,0 +1 @@
+foo.txt
diff --git a/test/integration/targets/copy/files/subdir/bar.txt b/test/integration/targets/copy/files/subdir/bar.txt
new file mode 100644
index 00000000..76018072
--- /dev/null
+++ b/test/integration/targets/copy/files/subdir/bar.txt
@@ -0,0 +1 @@
+baz
diff --git a/test/integration/targets/copy/files/subdir/subdir1/bar.txt b/test/integration/targets/copy/files/subdir/subdir1/bar.txt
new file mode 120000
index 00000000..315e865d
--- /dev/null
+++ b/test/integration/targets/copy/files/subdir/subdir1/bar.txt
@@ -0,0 +1 @@
+../bar.txt \ No newline at end of file
diff --git a/test/integration/targets/copy/files/subdir/subdir2/baz.txt b/test/integration/targets/copy/files/subdir/subdir2/baz.txt
new file mode 100644
index 00000000..76018072
--- /dev/null
+++ b/test/integration/targets/copy/files/subdir/subdir2/baz.txt
@@ -0,0 +1 @@
+baz
diff --git a/test/integration/targets/copy/files/subdir/subdir2/subdir3/subdir4/qux.txt b/test/integration/targets/copy/files/subdir/subdir2/subdir3/subdir4/qux.txt
new file mode 100644
index 00000000..78df5b06
--- /dev/null
+++ b/test/integration/targets/copy/files/subdir/subdir2/subdir3/subdir4/qux.txt
@@ -0,0 +1 @@
+qux \ No newline at end of file
diff --git a/test/integration/targets/copy/meta/main.yml b/test/integration/targets/copy/meta/main.yml
new file mode 100644
index 00000000..06d4fd29
--- /dev/null
+++ b/test/integration/targets/copy/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_nobody
diff --git a/test/integration/targets/copy/tasks/acls.yml b/test/integration/targets/copy/tasks/acls.yml
new file mode 100644
index 00000000..9a3be9b0
--- /dev/null
+++ b/test/integration/targets/copy/tasks/acls.yml
@@ -0,0 +1,33 @@
+- block:
+ - block:
+ - name: Testing ACLs
+ copy:
+ content: "TEST"
+ mode: 0644
+ dest: "~/test.txt"
+
+ - shell: getfacl ~/test.txt
+ register: acls
+
+ become: yes
+ become_user: "{{ remote_unprivileged_user }}"
+
+ - name: Check that there are no ACLs leftovers
+ assert:
+ that:
+ - "'user:{{ remote_unprivileged_user }}:r-x\t#effective:r--' not in acls.stdout_lines"
+
+ - name: Check that permissions match with what was set in the mode param
+ assert:
+ that:
+ - "'user::rw-' in acls.stdout_lines"
+ - "'group::r--' in acls.stdout_lines"
+ - "'other::r--' in acls.stdout_lines"
+
+ always:
+ - name: Clean up
+ file:
+ path: "~/test.txt"
+ state: absent
+ become: yes
+ become_user: "{{ remote_unprivileged_user }}"
diff --git a/test/integration/targets/copy/tasks/check_mode.yml b/test/integration/targets/copy/tasks/check_mode.yml
new file mode 100644
index 00000000..5b405cc4
--- /dev/null
+++ b/test/integration/targets/copy/tasks/check_mode.yml
@@ -0,0 +1,126 @@
+- block:
+
+ - name: check_mode - Create another clean copy of 'subdir' not messed with by previous tests (check_mode)
+ copy:
+ src: subdir
+ dest: 'checkmode_subdir/'
+ directory_mode: 0700
+ local_follow: False
+ check_mode: true
+ register: check_mode_subdir_first
+
+ - name: check_mode - Stat the new dir to make sure it really doesn't exist
+ stat:
+ path: 'checkmode_subdir/'
+ register: check_mode_subdir_first_stat
+
+ - name: check_mode - Actually do it
+ copy:
+ src: subdir
+ dest: 'checkmode_subdir/'
+ directory_mode: 0700
+ local_follow: False
+ register: check_mode_subdir_real
+
+ - name: check_mode - Stat the new dir to make sure it really exists
+ stat:
+ path: 'checkmode_subdir/'
+ register: check_mode_subdir_real_stat
+
+ # Quick sanity before we move on
+ - assert:
+ that:
+ - check_mode_subdir_first is changed
+ - not check_mode_subdir_first_stat.stat.exists
+ - check_mode_subdir_real is changed
+ - check_mode_subdir_real_stat.stat.exists
+
+ # Do some finagling here. First, use check_mode to ensure it never gets
+ # created. Then actualy create it, and use check_mode to ensure that doing
+ # the same copy gets marked as no change.
+ #
+ # This same pattern repeats for several other src/dest combinations.
+ - name: check_mode - Ensure dest with trailing / never gets created but would be without check_mode
+ copy:
+ remote_src: true
+ src: 'checkmode_subdir/'
+ dest: 'destdir_should_never_exist_because_of_check_mode/'
+ follow: true
+ check_mode: true
+ register: check_mode_trailing_slash_first
+
+ - name: check_mode - Stat the new dir to make sure it really doesn't exist
+ stat:
+ path: 'destdir_should_never_exist_because_of_check_mode/'
+ register: check_mode_trailing_slash_first_stat
+
+ - name: check_mode - Create the above copy for real now (without check_mode)
+ copy:
+ remote_src: true
+ src: 'checkmode_subdir/'
+ dest: 'destdir_should_never_exist_because_of_check_mode/'
+ register: check_mode_trailing_slash_real
+
+ - name: check_mode - Stat the new dir to make sure it really exists
+ stat:
+ path: 'destdir_should_never_exist_because_of_check_mode/'
+ register: check_mode_trailing_slash_real_stat
+
+ - name: check_mode - Do the same copy yet again (with check_mode this time) to ensure it's marked unchanged
+ copy:
+ remote_src: true
+ src: 'checkmode_subdir/'
+ dest: 'destdir_should_never_exist_because_of_check_mode/'
+ check_mode: true
+ register: check_mode_trailing_slash_second
+
+ # Repeat the same basic pattern here.
+
+ - name: check_mode - Do another basic copy (with check_mode)
+ copy:
+ src: foo.txt
+ dest: "{{ remote_dir }}/foo-check_mode.txt"
+ mode: 0444
+ check_mode: true
+ register: check_mode_foo_first
+
+ - name: check_mode - Stat the new file to make sure it really doesn't exist
+ stat:
+ path: "{{ remote_dir }}/foo-check_mode.txt"
+ register: check_mode_foo_first_stat
+
+ - name: check_mode - Do the same basic copy (without check_mode)
+ copy:
+ src: foo.txt
+ dest: "{{ remote_dir }}/foo-check_mode.txt"
+ mode: 0444
+ register: check_mode_foo_real
+
+ - name: check_mode - Stat the new file to make sure it really exists
+ stat:
+ path: "{{ remote_dir }}/foo-check_mode.txt"
+ register: check_mode_foo_real_stat
+
+ - name: check_mode - And again (with check_mode)
+ copy:
+ src: foo.txt
+ dest: "{{ remote_dir }}/foo-check_mode.txt"
+ mode: 0444
+ register: check_mode_foo_second
+
+ - assert:
+ that:
+ - check_mode_subdir_first is changed
+
+ - check_mode_trailing_slash_first is changed
+ # TODO: This is a legitimate bug
+ #- not check_mode_trailing_slash_first_stat.stat.exists
+ - check_mode_trailing_slash_real is changed
+ - check_mode_trailing_slash_real_stat.stat.exists
+ - check_mode_trailing_slash_second is not changed
+
+ - check_mode_foo_first is changed
+ - not check_mode_foo_first_stat.stat.exists
+ - check_mode_foo_real is changed
+ - check_mode_foo_real_stat.stat.exists
+ - check_mode_foo_second is not changed
diff --git a/test/integration/targets/copy/tasks/dest_in_non_existent_directories.yml b/test/integration/targets/copy/tasks/dest_in_non_existent_directories.yml
new file mode 100644
index 00000000..c86caa1e
--- /dev/null
+++ b/test/integration/targets/copy/tasks/dest_in_non_existent_directories.yml
@@ -0,0 +1,29 @@
+# src is a file, dest is a non-existent directory (2 levels of directories):
+# checks that dest is created
+- name: Ensure that dest top directory doesn't exist
+ file:
+ path: '{{ remote_dir }}/{{ item.dest.split("/")[0] }}'
+ state: absent
+
+- name: Copy file, dest is a nonexistent target directory
+ copy:
+ src: '{{ item.src }}'
+ dest: '{{ remote_dir }}/{{ item.dest }}'
+ register: copy_result
+
+- name: assert copy worked
+ assert:
+ that:
+ - 'copy_result is successful'
+ - 'copy_result is changed'
+
+- name: stat copied file
+ stat:
+ path: '{{ remote_dir }}/{{ item.check }}'
+ register: stat_file_in_dir_result
+
+- name: assert that file exists
+ assert:
+ that:
+ - stat_file_in_dir_result.stat.exists
+ - stat_file_in_dir_result.stat.isreg
diff --git a/test/integration/targets/copy/tasks/dest_in_non_existent_directories_remote_src.yml b/test/integration/targets/copy/tasks/dest_in_non_existent_directories_remote_src.yml
new file mode 100644
index 00000000..fad53e71
--- /dev/null
+++ b/test/integration/targets/copy/tasks/dest_in_non_existent_directories_remote_src.yml
@@ -0,0 +1,43 @@
+# src is a file, dest is a non-existent directory (2 levels of directories):
+# checks that dest is created
+- name: Ensure that dest top directory doesn't exist
+ file:
+ path: '{{ remote_dir }}/{{ item.dest.split("/")[0] }}'
+ state: absent
+
+- name: create subdir
+ file:
+ path: subdir
+ state: directory
+
+- name: create src file
+ file:
+ path: "{{ item }}"
+ state: touch
+ loop:
+ - foo.txt
+ - subdir/bar.txt
+
+- name: Copy file, dest is a nonexistent target directory
+ copy:
+ src: '{{ item.src }}'
+ dest: '{{ remote_dir }}/{{ item.dest }}'
+ remote_src: true
+ register: copy_result
+
+- name: assert copy worked
+ assert:
+ that:
+ - 'copy_result is successful'
+ - 'copy_result is changed'
+
+- name: stat copied file
+ stat:
+ path: '{{ remote_dir }}/{{ item.check }}'
+ register: stat_file_in_dir_result
+
+- name: assert that file exists
+ assert:
+ that:
+ - stat_file_in_dir_result.stat.exists
+ - stat_file_in_dir_result.stat.isreg
diff --git a/test/integration/targets/copy/tasks/main.yml b/test/integration/targets/copy/tasks/main.yml
new file mode 100644
index 00000000..33a92bf9
--- /dev/null
+++ b/test/integration/targets/copy/tasks/main.yml
@@ -0,0 +1,117 @@
+- block:
+
+ - name: Create a local temporary directory
+ shell: mktemp -d /tmp/ansible_test.XXXXXXXXX
+ register: tempfile_result
+ delegate_to: localhost
+
+ - set_fact:
+ local_temp_dir: '{{ tempfile_result.stdout }}'
+ remote_dir: '{{ output_dir }}'
+ symlinks:
+ ansible-test-abs-link: /tmp/ansible-test-abs-link
+ ansible-test-abs-link-dir: /tmp/ansible-test-abs-link-dir
+ circles: ../
+ invalid: invalid
+ invalid2: ../invalid
+ out_of_tree_circle: /tmp/ansible-test-link-dir/out_of_tree_circle
+ subdir3: ../subdir2/subdir3
+
+ - file: path={{local_temp_dir}} state=directory
+ name: ensure temp dir exists
+
+ # file cannot do this properly, use command instead
+ - name: Create symbolic link
+ command: "ln -s '{{ item.value }}' '{{ item.key }}'"
+ args:
+ chdir: '{{role_path}}/files/subdir/subdir1'
+ warn: no
+ with_dict: "{{ symlinks }}"
+
+ - name: Create remote unprivileged remote user
+ user:
+ name: '{{ remote_unprivileged_user }}'
+ register: user
+
+ - name: Check sudoers dir
+ stat:
+ path: /etc/sudoers.d
+ register: etc_sudoers
+
+ - name: Set sudoers.d path fact
+ set_fact:
+ sudoers_d_file: "{{ '/etc/sudoers.d' if etc_sudoers.stat.exists else '/usr/local/etc/sudoers.d' }}/{{ remote_unprivileged_user }}"
+
+ - name: Create sudoers file
+ copy:
+ dest: "{{ sudoers_d_file }}"
+ content: "{{ remote_unprivileged_user }} ALL=(ALL) NOPASSWD: ALL"
+
+ - file:
+ path: "{{ user.home }}/.ssh"
+ owner: '{{ remote_unprivileged_user }}'
+ state: directory
+ mode: 0700
+
+ - name: Duplicate authorized_keys
+ copy:
+ src: $HOME/.ssh/authorized_keys
+ dest: '{{ user.home }}/.ssh/authorized_keys'
+ owner: '{{ remote_unprivileged_user }}'
+ mode: 0600
+ remote_src: yes
+
+ - file:
+ path: "{{ remote_dir }}"
+ state: directory
+ remote_user: '{{ remote_unprivileged_user }}'
+
+ # execute tests tasks using an unprivileged user, this is useful to avoid
+ # local/remote ambiguity when controller and managed hosts are identical.
+ - import_tasks: tests.yml
+ remote_user: '{{ remote_unprivileged_user }}'
+
+ - import_tasks: acls.yml
+ when: ansible_system == 'Linux'
+
+ - import_tasks: no_log.yml
+
+ - import_tasks: check_mode.yml
+
+ # https://github.com/ansible/ansible/issues/57618
+ - name: Test diff contents
+ copy:
+ content: 'Ansible managed\n'
+ dest: "{{ local_temp_dir }}/file.txt"
+ diff: yes
+ register: diff_output
+
+ - assert:
+ that:
+ - 'diff_output.diff[0].before == ""'
+ - '"Ansible managed" in diff_output.diff[0].after'
+
+ always:
+ - name: Cleaning
+ file:
+ path: '{{ local_temp_dir }}'
+ state: absent
+ delegate_to: localhost
+
+ - name: Remove symbolic link
+ file:
+ path: '{{ role_path }}/files/subdir/subdir1/{{ item.key }}'
+ state: absent
+ delegate_to: localhost
+ with_dict: "{{ symlinks }}"
+
+ - name: Remote unprivileged remote user
+ user:
+ name: '{{ remote_unprivileged_user }}'
+ state: absent
+ remove: yes
+
+ - name: Remove sudoers.d file
+ file:
+ path: "{{ sudoers_d_file }}"
+ state: absent
diff --git a/test/integration/targets/copy/tasks/no_log.yml b/test/integration/targets/copy/tasks/no_log.yml
new file mode 100644
index 00000000..980c3177
--- /dev/null
+++ b/test/integration/targets/copy/tasks/no_log.yml
@@ -0,0 +1,82 @@
+- block:
+
+ - set_fact:
+ dest: "{{ local_temp_dir }}/test_no_log"
+
+ - name: ensure playbook and dest files don't exist yet
+ file:
+ path: "{{ item }}"
+ state: absent
+ loop:
+ - "{{ local_temp_dir }}/test_no_log.yml"
+ - "{{ dest }}"
+
+ - name: create a playbook to run with command
+ copy:
+ dest: "{{local_temp_dir}}/test_no_log.yml"
+ content: !unsafe |
+ ---
+ - hosts: localhost
+ gather_facts: no
+ tasks:
+ - copy:
+ dest: "{{ dest }}"
+ content: "{{ secret }}"
+
+ - name: copy the secret while using -vvv and check mode
+ command: "ansible-playbook {{local_temp_dir}}/test_no_log.yml -vvv -e secret=SECRET -e dest={{dest}} --check"
+ register: result
+
+ - assert:
+ that:
+ - "'SECRET' not in result.stdout"
+
+ - name: copy the secret while using -vvv
+ command: "ansible-playbook {{local_temp_dir}}/test_no_log.yml -vvv -e secret=SECRET -e dest={{dest}}"
+ register: result
+
+ - assert:
+ that:
+ - "'SECRET' not in result.stdout"
+
+ - name: copy the secret while using -vvv and check mode again
+ command: "ansible-playbook {{local_temp_dir}}/test_no_log.yml -vvv -e secret=SECRET -e dest={{dest}} --check"
+ register: result
+
+ - assert:
+ that:
+ - "'SECRET' not in result.stdout"
+
+ - name: copy the secret while using -vvv again
+ command: "ansible-playbook {{local_temp_dir}}/test_no_log.yml -vvv -e secret=SECRET -e dest={{dest}}"
+ register: result
+
+ - assert:
+ that:
+ - "'SECRET' not in result.stdout"
+
+ - name: copy a new secret while using -vvv and check mode
+ command: "ansible-playbook {{local_temp_dir}}/test_no_log.yml -vvv -e secret=NEWSECRET -e dest={{dest}} --check"
+ register: result
+
+ - assert:
+ that:
+ - "'NEWSECRET' not in result.stdout"
+
+ - name: copy a new secret while using -vvv
+ command: "ansible-playbook {{local_temp_dir}}/test_no_log.yml -vvv -e secret=NEWSECRET -e dest={{dest}}"
+ register: result
+
+ - assert:
+ that:
+ - "'NEWSECRET' not in result.stdout"
+
+ always:
+
+ - name: remove temp test files
+ file:
+ path: "{{ item }}"
+ state: absent
+ loop:
+ - "{{ local_temp_dir }}/test_no_log.yml"
+ - "{{ dest }}"
diff --git a/test/integration/targets/copy/tasks/src_file_dest_file_in_non_existent_dir.yml b/test/integration/targets/copy/tasks/src_file_dest_file_in_non_existent_dir.yml
new file mode 100644
index 00000000..f4ab9998
--- /dev/null
+++ b/test/integration/targets/copy/tasks/src_file_dest_file_in_non_existent_dir.yml
@@ -0,0 +1,26 @@
+- name: Ensure that dest top directory doesn't exist
+ file:
+ path: '{{ remote_dir }}/{{ dest.split("/")[0] }}'
+ state: absent
+
+- name: Copy file, dest is a file in non-existing target directory
+ copy:
+ src: foo.txt
+ dest: '{{ remote_dir }}/{{ dest }}'
+ register: copy_result
+ ignore_errors: True
+
+- name: Assert copy failed
+ assert:
+ that:
+ - 'copy_result is failed'
+
+- name: Stat dest path
+ stat:
+ path: '{{ remote_dir }}/{{ dest.split("/")[0] }}'
+ register: stat_file_in_dir_result
+
+- name: assert that dest doesn't exist
+ assert:
+ that:
+ - 'not stat_file_in_dir_result.stat.exists'
diff --git a/test/integration/targets/copy/tasks/src_file_dest_file_in_non_existent_dir_remote_src.yml b/test/integration/targets/copy/tasks/src_file_dest_file_in_non_existent_dir_remote_src.yml
new file mode 100644
index 00000000..61d87969
--- /dev/null
+++ b/test/integration/targets/copy/tasks/src_file_dest_file_in_non_existent_dir_remote_src.yml
@@ -0,0 +1,32 @@
+- name: Ensure that dest top directory doesn't exist
+ file:
+ path: '{{ remote_dir }}/{{ dest.split("/")[0] }}'
+ state: absent
+
+- name: create src file
+ file:
+ path: foo.txt
+ state: touch
+
+- name: Copy file, dest is a file in non-existing target directory
+ copy:
+ src: foo.txt
+ dest: '{{ remote_dir }}/{{ dest }}'
+ remote_src: true
+ register: copy_result
+ ignore_errors: True
+
+- name: Assert copy failed
+ assert:
+ that:
+ - 'copy_result is failed'
+
+- name: Stat dest path
+ stat:
+ path: '{{ remote_dir }}/{{ dest.split("/")[0] }}'
+ register: stat_file_in_dir_result
+
+- name: assert that dest doesn't exist
+ assert:
+ that:
+ - 'not stat_file_in_dir_result.stat.exists'
diff --git a/test/integration/targets/copy/tasks/tests.yml b/test/integration/targets/copy/tasks/tests.yml
new file mode 100644
index 00000000..be955317
--- /dev/null
+++ b/test/integration/targets/copy/tasks/tests.yml
@@ -0,0 +1,2261 @@
+# test code for the copy module and action plugin
+# (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2017, Ansible Project
+#
+# GNU General Public License v3 or later (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt )
+#
+
+- name: Record the output directory
+ set_fact:
+ remote_file: "{{ remote_dir }}/foo.txt"
+
+- name: Initiate a basic copy, and also test the mode
+ copy:
+ src: foo.txt
+ dest: "{{ remote_file }}"
+ mode: 0444
+ register: copy_result
+
+- name: Record the sha of the test file for later tests
+ set_fact:
+ remote_file_hash: "{{ copy_result['checksum'] }}"
+
+- name: Check the mode of the output file
+ file:
+ name: "{{ remote_file }}"
+ state: file
+ register: file_result_check
+
+- name: Assert the mode is correct
+ assert:
+ that:
+ - "file_result_check.mode == '0444'"
+
+# same as expanduser & expandvars
+- command: 'echo {{ remote_dir }}'
+ register: echo
+
+- set_fact:
+ remote_dir_expanded: '{{ echo.stdout }}'
+ remote_file_expanded: '{{ echo.stdout }}/foo.txt'
+
+- debug:
+ var: copy_result
+ verbosity: 1
+
+- name: Assert basic copy worked
+ assert:
+ that:
+ - "'changed' in copy_result"
+ - copy_result.dest == remote_file_expanded
+ - "'group' in copy_result"
+ - "'gid' in copy_result"
+ - "'checksum' in copy_result"
+ - "'owner' in copy_result"
+ - "'size' in copy_result"
+ - "'src' in copy_result"
+ - "'state' in copy_result"
+ - "'uid' in copy_result"
+
+- name: Verify that the file was marked as changed
+ assert:
+ that:
+ - "copy_result.changed == true"
+
+- name: Verify that the file checksums are correct
+ assert:
+ that:
+ - "copy_result.checksum == ('foo.txt\n'|hash('sha1'))"
+
+- name: Verify that the legacy md5sum is correct
+ assert:
+ that:
+ - "copy_result.md5sum == ('foo.txt\n'|hash('md5'))"
+ when: ansible_fips|bool != True
+
+- name: Check the stat results of the file
+ stat:
+ path: "{{ remote_file }}"
+ register: stat_results
+
+- debug:
+ var: stat_results
+ verbosity: 1
+
+- name: Assert the stat results are correct
+ assert:
+ that:
+ - "stat_results.stat.exists == true"
+ - "stat_results.stat.isblk == false"
+ - "stat_results.stat.isfifo == false"
+ - "stat_results.stat.isreg == true"
+ - "stat_results.stat.issock == false"
+ - "stat_results.stat.checksum == ('foo.txt\n'|hash('sha1'))"
+
+- name: Overwrite the file via same means
+ copy:
+ src: foo.txt
+ dest: "{{ remote_file }}"
+ decrypt: no
+ register: copy_result2
+
+- name: Assert that the file was not changed
+ assert:
+ that:
+ - "copy_result2 is not changed"
+
+- name: Assert basic copy worked
+ assert:
+ that:
+ - "'changed' in copy_result2"
+ - copy_result2.dest == remote_file_expanded
+ - "'group' in copy_result2"
+ - "'gid' in copy_result2"
+ - "'checksum' in copy_result2"
+ - "'owner' in copy_result2"
+ - "'size' in copy_result2"
+ - "'state' in copy_result2"
+ - "'uid' in copy_result2"
+
+- name: Overwrite the file using the content system
+ copy:
+ content: "modified"
+ dest: "{{ remote_file }}"
+ decrypt: no
+ register: copy_result3
+
+- name: Check the stat results of the file
+ stat:
+ path: "{{ remote_file }}"
+ register: stat_results
+
+- debug:
+ var: stat_results
+ verbosity: 1
+
+- name: Assert that the file has changed
+ assert:
+ that:
+ - "copy_result3 is changed"
+ - "'content' not in copy_result3"
+ - "stat_results.stat.checksum == ('modified'|hash('sha1'))"
+ - "stat_results.stat.mode != '0700'"
+
+- name: Overwrite the file again using the content system, also passing along file params
+ copy:
+ content: "modified"
+ dest: "{{ remote_file }}"
+ mode: 0700
+ decrypt: no
+ register: copy_result4
+
+- name: Check the stat results of the file
+ stat:
+ path: "{{ remote_file }}"
+ register: stat_results
+
+- debug:
+ var: stat_results
+ verbosity: 1
+
+- name: Assert that the file has changed
+ assert:
+ that:
+ - "copy_result3 is changed"
+ - "'content' not in copy_result3"
+ - "stat_results.stat.checksum == ('modified'|hash('sha1'))"
+ - "stat_results.stat.mode == '0700'"
+
+- name: Create a hardlink to the file
+ file:
+ src: '{{ remote_file }}'
+ dest: '{{ remote_dir }}/hard.lnk'
+ state: hard
+
+- name: copy the same contents into place
+ copy:
+ content: 'modified'
+ dest: '{{ remote_file }}'
+ mode: 0700
+ decrypt: no
+ register: copy_results
+
+- name: Check the stat results of the file
+ stat:
+ path: "{{ remote_file }}"
+ register: stat_results
+
+- name: Check the stat results of the hard link
+ stat:
+ path: "{{ remote_dir }}/hard.lnk"
+ register: hlink_results
+
+- name: Check that the file did not change
+ assert:
+ that:
+ - 'stat_results.stat.inode == hlink_results.stat.inode'
+ - 'copy_results.changed == False'
+ - "stat_results.stat.checksum == ('modified'|hash('sha1'))"
+
+- name: copy the same contents into place but change mode
+ copy:
+ content: 'modified'
+ dest: '{{ remote_file }}'
+ mode: 0404
+ decrypt: no
+ register: copy_results
+
+- name: Check the stat results of the file
+ stat:
+ path: "{{ remote_file }}"
+ register: stat_results
+
+- name: Check the stat results of the hard link
+ stat:
+ path: "{{ remote_dir }}/hard.lnk"
+ register: hlink_results
+
+- name: Check that the file changed permissions but is still the same
+ assert:
+ that:
+ - 'stat_results.stat.inode == hlink_results.stat.inode'
+ - 'copy_results.changed == True'
+ - 'stat_results.stat.mode == hlink_results.stat.mode'
+ - 'stat_results.stat.mode == "0404"'
+ - "stat_results.stat.checksum == ('modified'|hash('sha1'))"
+
+- name: copy the different contents into place
+ copy:
+ content: 'adjusted'
+ dest: '{{ remote_file }}'
+ mode: 0404
+ register: copy_results
+
+- name: Check the stat results of the file
+ stat:
+ path: "{{ remote_file }}"
+ register: stat_results
+
+- name: Check the stat results of the hard link
+ stat:
+ path: "{{ remote_dir }}/hard.lnk"
+ register: hlink_results
+
+- name: Check that the file changed and hardlink was broken
+ assert:
+ that:
+ - 'stat_results.stat.inode != hlink_results.stat.inode'
+ - 'copy_results.changed == True'
+ - "stat_results.stat.checksum == ('adjusted'|hash('sha1'))"
+ - "hlink_results.stat.checksum == ('modified'|hash('sha1'))"
+
+- name: Try invalid copy input location fails
+ copy:
+ src: invalid_file_location_does_not_exist
+ dest: "{{ remote_dir }}/file.txt"
+ ignore_errors: True
+ register: failed_copy
+
+- name: Assert that invalid source failed
+ assert:
+ that:
+ - "failed_copy.failed"
+ - "'invalid_file_location_does_not_exist' in failed_copy.msg"
+
+- name: Try empty source to ensure it fails
+ copy:
+ src: ''
+ dest: "{{ remote_dir }}"
+ ignore_errors: True
+ register: failed_copy
+
+- debug:
+ var: failed_copy
+ verbosity: 1
+
+- name: Assert that empty source failed
+ assert:
+ that:
+ - failed_copy is failed
+ - "'src (or content) is required' in failed_copy.msg"
+
+- name: Try without destination to ensure it fails
+ copy:
+ src: foo.txt
+ ignore_errors: True
+ register: failed_copy
+
+- debug:
+ var: failed_copy
+ verbosity: 1
+
+- name: Assert that missing destination failed
+ assert:
+ that:
+ - failed_copy is failed
+ - "'dest is required' in failed_copy.msg"
+
+- name: Try without source to ensure it fails
+ copy:
+ dest: "{{ remote_file }}"
+ ignore_errors: True
+ register: failed_copy
+
+- debug:
+ var: failed_copy
+ verbosity: 1
+
+- name: Assert that missing source failed
+ assert:
+ that:
+ - failed_copy is failed
+ - "'src (or content) is required' in failed_copy.msg"
+
+- name: Try with both src and content to ensure it fails
+ copy:
+ src: foo.txt
+ content: testing
+ dest: "{{ remote_file }}"
+ ignore_errors: True
+ register: failed_copy
+
+- name: Assert that mutually exclusive parameters failed
+ assert:
+ that:
+ - failed_copy is failed
+ - "'mutually exclusive' in failed_copy.msg"
+
+- name: Try with content and directory as destination to ensure it fails
+ copy:
+ content: testing
+ dest: "{{ remote_dir }}"
+ ignore_errors: True
+ register: failed_copy
+
+- debug:
+ var: failed_copy
+ verbosity: 1
+
+- name: Assert that content and directory as destination failed
+ assert:
+ that:
+ - failed_copy is failed
+ - "'can not use content with a dir as dest' in failed_copy.msg"
+
+- name: Clean up
+ file:
+ path: "{{ remote_file }}"
+ state: absent
+
+- name: Copy source file to destination directory with mode
+ copy:
+ src: foo.txt
+ dest: "{{ remote_dir }}"
+ mode: 0500
+ register: copy_results
+
+- name: Check the stat results of the file
+ stat:
+ path: '{{ remote_file }}'
+ register: stat_results
+
+- debug:
+ var: stat_results
+ verbosity: 1
+
+- name: Assert that the file has changed
+ assert:
+ that:
+ - "copy_results is changed"
+ - "stat_results.stat.checksum == ('foo.txt\n'|hash('sha1'))"
+ - "stat_results.stat.mode == '0500'"
+
+# Test copy with mode=preserve
+- name: Create file and set perms to an odd value
+ copy:
+ content: "foo.txt\n"
+ dest: '{{ local_temp_dir }}/foo.txt'
+ mode: 0547
+ delegate_to: localhost
+
+- name: Copy with mode=preserve
+ copy:
+ src: '{{ local_temp_dir }}/foo.txt'
+ dest: '{{ remote_dir }}/copy-foo.txt'
+ mode: preserve
+ register: copy_results
+
+- name: Check the stat results of the file
+ stat:
+ path: '{{ remote_dir }}/copy-foo.txt'
+ register: stat_results
+
+- name: Assert that the file has changed and has correct mode
+ assert:
+ that:
+ - "copy_results is changed"
+ - "copy_results.mode == '0547'"
+ - "stat_results.stat.checksum == ('foo.txt\n'|hash('sha1'))"
+ - "stat_results.stat.mode == '0547'"
+
+- name: Test copy with mode=preserve and remote_src=True
+ copy:
+ src: '{{ remote_dir }}/copy-foo.txt'
+ dest: '{{ remote_dir }}/copy-foo2.txt'
+ mode: 'preserve'
+ remote_src: True
+ register: copy_results2
+
+- name: Check the stat results of the file
+ stat:
+ path: '{{ remote_dir }}/copy-foo2.txt'
+ register: stat_results2
+
+- name: Assert that the file has changed and has correct mode
+ assert:
+ that:
+ - "copy_results2 is changed"
+ - "copy_results2.mode == '0547'"
+ - "stat_results2.stat.checksum == ('foo.txt\n'|hash('sha1'))"
+ - "stat_results2.stat.mode == '0547'"
+
+#
+# test recursive copy local_follow=False, no trailing slash
+#
+
+- name: Create empty directory in the role we're copying from (git can't store empty dirs)
+ file:
+ path: '{{ role_path }}/files/subdir/subdira'
+ state: directory
+ delegate_to: localhost
+
+- name: Set the output subdirectory
+ set_fact:
+ remote_subdir: "{{ remote_dir }}/sub"
+
+- name: Make an output subdirectory
+ file:
+ name: "{{ remote_subdir }}"
+ state: directory
+
+- name: Setup link target for absolute link
+ copy:
+ dest: /tmp/ansible-test-abs-link
+ content: target
+ delegate_to: localhost
+
+- name: Setup link target dir for absolute link
+ file:
+ dest: /tmp/ansible-test-abs-link-dir
+ state: directory
+ delegate_to: localhost
+
+- name: Test recursive copy to directory no trailing slash, local_follow=False
+ copy:
+ src: subdir
+ dest: "{{ remote_subdir }}"
+ directory_mode: 0700
+ local_follow: False
+ register: recursive_copy_result
+
+- debug:
+ var: recursive_copy_result
+ verbosity: 1
+
+- name: Assert that the recursive copy did something
+ assert:
+ that:
+ - "recursive_copy_result is changed"
+
+- name: Check that a file in a directory was transferred
+ stat:
+ path: "{{ remote_dir }}/sub/subdir/bar.txt"
+ register: stat_bar
+
+- name: Check that a file in a deeper directory was transferred
+ stat:
+ path: "{{ remote_dir }}/sub/subdir/subdir2/baz.txt"
+ register: stat_bar2
+
+- name: Check that a file in a directory whose parent contains a directory alone was transferred
+ stat:
+ path: "{{ remote_dir }}/sub/subdir/subdir2/subdir3/subdir4/qux.txt"
+ register: stat_bar3
+
+- name: Assert recursive copy files
+ assert:
+ that:
+ - "stat_bar.stat.exists"
+ - "stat_bar2.stat.exists"
+ - "stat_bar3.stat.exists"
+
+- name: Check symlink to absolute path
+ stat:
+ path: '{{ remote_dir }}/sub/subdir/subdir1/ansible-test-abs-link'
+ register: stat_abs_link
+
+- name: Check symlink to relative path
+ stat:
+ path: '{{ remote_dir }}/sub/subdir/subdir1/bar.txt'
+ register: stat_relative_link
+
+- name: Check symlink to self
+ stat:
+ path: '{{ remote_dir }}/sub/subdir/subdir1/invalid'
+ register: stat_self_link
+
+- name: Check symlink to nonexistent file
+ stat:
+ path: '{{ remote_dir }}/sub/subdir/subdir1/invalid2'
+ register: stat_invalid_link
+
+- name: Check symlink to directory in copy
+ stat:
+ path: '{{ remote_dir }}/sub/subdir/subdir1/subdir3'
+ register: stat_dir_in_copy_link
+
+- name: Check symlink to directory outside of copy
+ stat:
+ path: '{{ remote_dir }}/sub/subdir/subdir1/ansible-test-abs-link-dir'
+ register: stat_dir_outside_copy_link
+
+- name: Assert recursive copy symlinks local_follow=False
+ assert:
+ that:
+ - "stat_abs_link.stat.exists"
+ - "stat_abs_link.stat.islnk"
+ - "'/tmp/ansible-test-abs-link' == stat_abs_link.stat.lnk_target"
+ - "stat_relative_link.stat.exists"
+ - "stat_relative_link.stat.islnk"
+ - "'../bar.txt' == stat_relative_link.stat.lnk_target"
+ - "stat_self_link.stat.exists"
+ - "stat_self_link.stat.islnk"
+ - "'invalid' in stat_self_link.stat.lnk_target"
+ - "stat_invalid_link.stat.exists"
+ - "stat_invalid_link.stat.islnk"
+ - "'../invalid' in stat_invalid_link.stat.lnk_target"
+ - "stat_dir_in_copy_link.stat.exists"
+ - "stat_dir_in_copy_link.stat.islnk"
+ - "'../subdir2/subdir3' in stat_dir_in_copy_link.stat.lnk_target"
+ - "stat_dir_outside_copy_link.stat.exists"
+ - "stat_dir_outside_copy_link.stat.islnk"
+ - "'/tmp/ansible-test-abs-link-dir' == stat_dir_outside_copy_link.stat.lnk_target"
+
+- name: Stat the recursively copied directories
+ stat:
+ path: "{{ remote_dir }}/sub/{{ item }}"
+ register: dir_stats
+ with_items:
+ - "subdir"
+ - "subdir/subdira"
+ - "subdir/subdir1"
+ - "subdir/subdir2"
+ - "subdir/subdir2/subdir3"
+ - "subdir/subdir2/subdir3/subdir4"
+
+- debug:
+ var: stat_results
+ verbosity: 1
+
+- name: Assert recursive copied directories mode (1)
+ assert:
+ that:
+ - "item.stat.exists"
+ - "item.stat.mode == '0700'"
+ with_items: "{{dir_stats.results}}"
+
+- name: Test recursive copy to directory no trailing slash, local_follow=False second time
+ copy:
+ src: subdir
+ dest: "{{ remote_subdir }}"
+ directory_mode: 0700
+ local_follow: False
+ register: recursive_copy_result
+
+- name: Assert that the second copy did not change anything
+ assert:
+ that:
+ - "recursive_copy_result is not changed"
+
+- name: Cleanup the recursive copy subdir
+ file:
+ name: "{{ remote_subdir }}"
+ state: absent
+
+#
+# Recursive copy with local_follow=False, trailing slash
+#
+
+- name: Set the output subdirectory
+ set_fact:
+ remote_subdir: "{{ remote_dir }}/sub"
+
+- name: Make an output subdirectory
+ file:
+ name: "{{ remote_subdir }}"
+ state: directory
+
+- name: Setup link target for absolute link
+ copy:
+ dest: /tmp/ansible-test-abs-link
+ content: target
+ delegate_to: localhost
+
+- name: Setup link target dir for absolute link
+ file:
+ dest: /tmp/ansible-test-abs-link-dir
+ state: directory
+ delegate_to: localhost
+
+- name: Test recursive copy to directory trailing slash, local_follow=False
+ copy:
+ src: subdir/
+ dest: "{{ remote_subdir }}"
+ directory_mode: 0700
+ local_follow: False
+ register: recursive_copy_result
+
+- debug:
+ var: recursive_copy_result
+ verbosity: 1
+
+- name: Assert that the recursive copy did something
+ assert:
+ that:
+ - "recursive_copy_result is changed"
+
+- name: Check that a file in a directory was transferred
+ stat:
+ path: "{{ remote_dir }}/sub/bar.txt"
+ register: stat_bar
+
+- name: Check that a file in a deeper directory was transferred
+ stat:
+ path: "{{ remote_dir }}/sub/subdir2/baz.txt"
+ register: stat_bar2
+
+- name: Check that a file in a directory whose parent contains a directory alone was transferred
+ stat:
+ path: "{{ remote_dir }}/sub/subdir2/subdir3/subdir4/qux.txt"
+ register: stat_bar3
+
+- name: Assert recursive copy files
+ assert:
+ that:
+ - "stat_bar.stat.exists"
+ - "stat_bar2.stat.exists"
+ - "stat_bar3.stat.exists"
+
+- name: Check symlink to absolute path
+ stat:
+ path: '{{ remote_dir }}/sub/subdir1/ansible-test-abs-link'
+ register: stat_abs_link
+
+- name: Check symlink to relative path
+ stat:
+ path: '{{ remote_dir }}/sub/subdir1/bar.txt'
+ register: stat_relative_link
+
+- name: Check symlink to self
+ stat:
+ path: '{{ remote_dir }}/sub/subdir1/invalid'
+ register: stat_self_link
+
+- name: Check symlink to nonexistent file
+ stat:
+ path: '{{ remote_dir }}/sub/subdir1/invalid2'
+ register: stat_invalid_link
+
+- name: Check symlink to directory in copy
+ stat:
+ path: '{{ remote_dir }}/sub/subdir1/subdir3'
+ register: stat_dir_in_copy_link
+
+- name: Check symlink to directory outside of copy
+ stat:
+ path: '{{ remote_dir }}/sub/subdir1/ansible-test-abs-link-dir'
+ register: stat_dir_outside_copy_link
+
+- name: Assert recursive copy symlinks local_follow=False trailing slash
+ assert:
+ that:
+ - "stat_abs_link.stat.exists"
+ - "stat_abs_link.stat.islnk"
+ - "'/tmp/ansible-test-abs-link' == stat_abs_link.stat.lnk_target"
+ - "stat_relative_link.stat.exists"
+ - "stat_relative_link.stat.islnk"
+ - "'../bar.txt' == stat_relative_link.stat.lnk_target"
+ - "stat_self_link.stat.exists"
+ - "stat_self_link.stat.islnk"
+ - "'invalid' in stat_self_link.stat.lnk_target"
+ - "stat_invalid_link.stat.exists"
+ - "stat_invalid_link.stat.islnk"
+ - "'../invalid' in stat_invalid_link.stat.lnk_target"
+ - "stat_dir_in_copy_link.stat.exists"
+ - "stat_dir_in_copy_link.stat.islnk"
+ - "'../subdir2/subdir3' in stat_dir_in_copy_link.stat.lnk_target"
+ - "stat_dir_outside_copy_link.stat.exists"
+ - "stat_dir_outside_copy_link.stat.islnk"
+ - "'/tmp/ansible-test-abs-link-dir' == stat_dir_outside_copy_link.stat.lnk_target"
+
+- name: Stat the recursively copied directories
+ stat:
+ path: "{{ remote_dir }}/sub/{{ item }}"
+ register: dir_stats
+ with_items:
+ - "subdira"
+ - "subdir1"
+ - "subdir2"
+ - "subdir2/subdir3"
+ - "subdir2/subdir3/subdir4"
+
+- debug:
+ var: dir_stats
+ verbosity: 1
+
+- name: Assert recursive copied directories mode (2)
+ assert:
+ that:
+ - "item.stat.mode == '0700'"
+ with_items: "{{dir_stats.results}}"
+
+- name: Test recursive copy to directory trailing slash, local_follow=False second time
+ copy:
+ src: subdir/
+ dest: "{{ remote_subdir }}"
+ directory_mode: 0700
+ local_follow: False
+ register: recursive_copy_result
+
+- name: Assert that the second copy did not change anything
+ assert:
+ that:
+ - "recursive_copy_result is not changed"
+
+- name: Cleanup the recursive copy subdir
+ file:
+ name: "{{ remote_subdir }}"
+ state: absent
+
+#
+# test recursive copy local_follow=True, no trailing slash
+#
+
+- name: Set the output subdirectory
+ set_fact:
+ remote_subdir: "{{ remote_dir }}/sub"
+
+- name: Make an output subdirectory
+ file:
+ name: "{{ remote_subdir }}"
+ state: directory
+
+- name: Setup link target for absolute link
+ copy:
+ dest: /tmp/ansible-test-abs-link
+ content: target
+ delegate_to: localhost
+
+- name: Setup link target dir for absolute link
+ file:
+ dest: /tmp/ansible-test-abs-link-dir
+ state: directory
+ delegate_to: localhost
+
+- name: Test recursive copy to directory no trailing slash, local_follow=True
+ copy:
+ src: subdir
+ dest: "{{ remote_subdir }}"
+ directory_mode: 0700
+ local_follow: True
+ register: recursive_copy_result
+
+- debug:
+ var: recursive_copy_result
+ verbosity: 1
+
+- name: Assert that the recursive copy did something
+ assert:
+ that:
+ - "recursive_copy_result is changed"
+
+- name: Check that a file in a directory was transferred
+ stat:
+ path: "{{ remote_dir }}/sub/subdir/bar.txt"
+ register: stat_bar
+
+- name: Check that a file in a deeper directory was transferred
+ stat:
+ path: "{{ remote_dir }}/sub/subdir/subdir2/baz.txt"
+ register: stat_bar2
+
+- name: Check that a file in a directory whose parent contains a directory alone was transferred
+ stat:
+ path: "{{ remote_dir }}/sub/subdir/subdir2/subdir3/subdir4/qux.txt"
+ register: stat_bar3
+
+- name: Check that a file in a directory whose parent is a symlink was transferred
+ stat:
+ path: "{{ remote_dir }}/sub/subdir/subdir1/subdir3/subdir4/qux.txt"
+ register: stat_bar4
+
+- name: Assert recursive copy files
+ assert:
+ that:
+ - "stat_bar.stat.exists"
+ - "stat_bar2.stat.exists"
+ - "stat_bar3.stat.exists"
+ - "stat_bar4.stat.exists"
+
+- name: Check symlink to absolute path
+ stat:
+ path: '{{ remote_dir }}/sub/subdir/subdir1/ansible-test-abs-link'
+ register: stat_abs_link
+
+- name: Check symlink to relative path
+ stat:
+ path: '{{ remote_dir }}/sub/subdir/subdir1/bar.txt'
+ register: stat_relative_link
+
+- name: Check symlink to self
+ stat:
+ path: '{{ remote_dir }}/sub/subdir/subdir1/invalid'
+ register: stat_self_link
+
+- name: Check symlink to nonexistent file
+ stat:
+ path: '{{ remote_dir }}/sub/subdir/subdir1/invalid2'
+ register: stat_invalid_link
+
+- name: Check symlink to directory in copy
+ stat:
+ path: '{{ remote_dir }}/sub/subdir/subdir1/subdir3'
+ register: stat_dir_in_copy_link
+
+- name: Check symlink to directory outside of copy
+ stat:
+ path: '{{ remote_dir }}/sub/subdir/subdir1/ansible-test-abs-link-dir'
+ register: stat_dir_outside_copy_link
+
+- name: Assert recursive copy symlinks local_follow=True
+ assert:
+ that:
+ - "stat_abs_link.stat.exists"
+ - "not stat_abs_link.stat.islnk"
+ - "stat_abs_link.stat.checksum == ('target'|hash('sha1'))"
+ - "stat_relative_link.stat.exists"
+ - "not stat_relative_link.stat.islnk"
+ - "stat_relative_link.stat.checksum == ('baz\n'|hash('sha1'))"
+ - "stat_self_link.stat.exists"
+ - "stat_self_link.stat.islnk"
+ - "'invalid' in stat_self_link.stat.lnk_target"
+ - "stat_invalid_link.stat.exists"
+ - "stat_invalid_link.stat.islnk"
+ - "'../invalid' in stat_invalid_link.stat.lnk_target"
+ - "stat_dir_in_copy_link.stat.exists"
+ - "not stat_dir_in_copy_link.stat.islnk"
+ - "stat_dir_in_copy_link.stat.isdir"
+ -
+ - "stat_dir_outside_copy_link.stat.exists"
+ - "not stat_dir_outside_copy_link.stat.islnk"
+ - "stat_dir_outside_copy_link.stat.isdir"
+
+- name: Stat the recursively copied directories
+ stat:
+ path: "{{ remote_dir }}/sub/{{ item }}"
+ register: dir_stats
+ with_items:
+ - "subdir"
+ - "subdir/subdira"
+ - "subdir/subdir1"
+ - "subdir/subdir1/subdir3"
+ - "subdir/subdir1/subdir3/subdir4"
+ - "subdir/subdir2"
+ - "subdir/subdir2/subdir3"
+ - "subdir/subdir2/subdir3/subdir4"
+
+- debug:
+ var: dir_stats
+ verbosity: 1
+
+- name: Assert recursive copied directories mode (3)
+ assert:
+ that:
+ - "item.stat.mode == '0700'"
+ with_items: "{{dir_stats.results}}"
+
+- name: Test recursive copy to directory no trailing slash, local_follow=True second time
+ copy:
+ src: subdir
+ dest: "{{ remote_subdir }}"
+ directory_mode: 0700
+ local_follow: True
+ register: recursive_copy_result
+
+- name: Assert that the second copy did not change anything
+ assert:
+ that:
+ - "recursive_copy_result is not changed"
+
+- name: Cleanup the recursive copy subdir
+ file:
+ name: "{{ remote_subdir }}"
+ state: absent
+
+#
+# Recursive copy of tricky symlinks
+#
+- block:
+ - name: Create a directory to copy from
+ file:
+ path: '{{ local_temp_dir }}/source1'
+ state: directory
+
+ - name: Create a directory outside of the tree
+ file:
+ path: '{{ local_temp_dir }}/source2'
+ state: directory
+
+ - name: Create a symlink to a directory outside of the tree
+ file:
+ path: '{{ local_temp_dir }}/source1/link'
+ src: '{{ local_temp_dir }}/source2'
+ state: link
+
+ - name: Create a circular link back to the tree
+ file:
+ path: '{{ local_temp_dir }}/source2/circle'
+ src: '../source1'
+ state: link
+
+ - name: Create output directory
+ file:
+ path: '{{ local_temp_dir }}/dest1'
+ state: directory
+ delegate_to: localhost
+
+- name: Recursive copy the source
+ copy:
+ src: '{{ local_temp_dir }}/source1'
+ dest: '{{ remote_dir }}/dest1'
+ local_follow: True
+ register: copy_result
+
+- name: Check that the tree link is now a directory
+ stat:
+ path: '{{ remote_dir }}/dest1/source1/link'
+ register: link_result
+
+- name: Check that the out of tree link is still a link
+ stat:
+ path: '{{ remote_dir }}/dest1/source1/link/circle'
+ register: circle_result
+
+- name: Verify that the recursive copy worked
+ assert:
+ that:
+ - 'copy_result.changed'
+ - 'link_result.stat.isdir'
+ - 'not link_result.stat.islnk'
+ - 'circle_result.stat.islnk'
+ - '"../source1" == circle_result.stat.lnk_target'
+
+- name: Recursive copy the source a second time
+ copy:
+ src: '{{ local_temp_dir }}/source1'
+ dest: '{{ remote_dir }}/dest1'
+ local_follow: True
+ register: copy_result
+
+- name: Verify that the recursive copy made no changes
+ assert:
+ that:
+ - 'not copy_result.changed'
+
+#
+# Recursive copy with absolute paths (#27439)
+#
+- name: Test that remote_dir is appropriate for this test (absolute path)
+ assert:
+ that:
+ - '{{ remote_dir_expanded[0] == "/" }}'
+
+- block:
+ - name: Create a directory to copy
+ file:
+ path: '{{ local_temp_dir }}/source_recursive'
+ state: directory
+
+ - name: Create a file inside of the directory
+ copy:
+ content: "testing"
+ dest: '{{ local_temp_dir }}/source_recursive/file'
+
+ - name: Create a directory to place the test output in
+ file:
+ path: '{{ local_temp_dir }}/destination'
+ state: directory
+ delegate_to: localhost
+
+- name: Copy the directory and files within (no trailing slash)
+ copy:
+ src: '{{ local_temp_dir }}/source_recursive'
+ dest: '{{ remote_dir }}/destination'
+
+- name: Stat the recursively copied directory
+ stat:
+ path: "{{ remote_dir }}/destination/{{ item }}"
+ register: copied_stat
+ with_items:
+ - "source_recursive"
+ - "source_recursive/file"
+ - "file"
+
+- debug:
+ var: copied_stat
+ verbosity: 1
+
+- name: Assert with no trailing slash, directory and file is copied
+ assert:
+ that:
+ - "copied_stat.results[0].stat.exists"
+ - "copied_stat.results[1].stat.exists"
+ - "not copied_stat.results[2].stat.exists"
+
+- name: Cleanup
+ file:
+ path: '{{ remote_dir }}/destination'
+ state: absent
+
+# Try again with no trailing slash
+
+- name: Create a directory to place the test output in
+ file:
+ path: '{{ remote_dir }}/destination'
+ state: directory
+
+- name: Copy just the files inside of the directory
+ copy:
+ src: '{{ local_temp_dir }}/source_recursive/'
+ dest: '{{ remote_dir }}/destination'
+
+- name: Stat the recursively copied directory
+ stat:
+ path: "{{ remote_dir }}/destination/{{ item }}"
+ register: copied_stat
+ with_items:
+ - "source_recursive"
+ - "source_recursive/file"
+ - "file"
+
+- debug:
+ var: copied_stat
+ verbosity: 1
+
+- name: Assert with trailing slash, only the file is copied
+ assert:
+ that:
+ - "not copied_stat.results[0].stat.exists"
+ - "not copied_stat.results[1].stat.exists"
+ - "copied_stat.results[2].stat.exists"
+
+#
+# Recursive copy with relative paths (#34893)
+#
+
+- name: Create a directory to copy
+ file:
+ path: 'source_recursive'
+ state: directory
+ delegate_to: localhost
+
+- name: Create a file inside of the directory
+ copy:
+ content: "testing"
+ dest: 'source_recursive/file'
+ delegate_to: localhost
+
+- name: Create a directory to place the test output in
+ file:
+ path: 'destination'
+ state: directory
+ delegate_to: localhost
+
+- name: Copy the directory and files within (no trailing slash)
+ copy:
+ src: 'source_recursive'
+ dest: 'destination'
+
+- name: Stat the recursively copied directory
+ stat:
+ path: "destination/{{ item }}"
+ register: copied_stat
+ with_items:
+ - "source_recursive"
+ - "source_recursive/file"
+ - "file"
+
+- debug:
+ var: copied_stat
+ verbosity: 1
+
+- name: Assert with no trailing slash, directory and file is copied
+ assert:
+ that:
+ - "copied_stat.results[0].stat.exists"
+ - "copied_stat.results[1].stat.exists"
+ - "not copied_stat.results[2].stat.exists"
+
+- name: Cleanup
+ file:
+ path: 'destination'
+ state: absent
+
+# Try again with no trailing slash
+
+- name: Create a directory to place the test output in
+ file:
+ path: 'destination'
+ state: directory
+
+- name: Copy just the files inside of the directory
+ copy:
+ src: 'source_recursive/'
+ dest: 'destination'
+
+- name: Stat the recursively copied directory
+ stat:
+ path: "destination/{{ item }}"
+ register: copied_stat
+ with_items:
+ - "source_recursive"
+ - "source_recursive/file"
+ - "file"
+
+- debug:
+ var: copied_stat
+ verbosity: 1
+
+- name: Assert with trailing slash, only the file is copied
+ assert:
+ that:
+ - "not copied_stat.results[0].stat.exists"
+ - "not copied_stat.results[1].stat.exists"
+ - "copied_stat.results[2].stat.exists"
+
+- name: Cleanup
+ file:
+ path: 'destination'
+ state: absent
+
+- name: Cleanup
+ file:
+ path: 'source_recursive'
+ state: absent
+
+#
+# issue 8394
+#
+
+- name: Create a file with content and a literal multiline block
+ copy:
+ content: |
+ this is the first line
+ this is the second line
+
+ this line is after an empty line
+ this line is the last line
+ dest: "{{ remote_dir }}/multiline.txt"
+ register: copy_result6
+
+- debug:
+ var: copy_result6
+ verbosity: 1
+
+- name: Assert the multiline file was created correctly
+ assert:
+ that:
+ - "copy_result6.changed"
+ - "copy_result6.dest == '{{remote_dir_expanded}}/multiline.txt'"
+ - "copy_result6.checksum == '9cd0697c6a9ff6689f0afb9136fa62e0b3fee903'"
+
+# test overwriting a file as an unprivileged user (pull request #8624)
+# this can't be relative to {{remote_dir}} as ~root usually has mode 700
+- block:
+ - name: Create world writable directory
+ file:
+ dest: /tmp/worldwritable
+ state: directory
+ mode: 0777
+
+ - name: Create world writable file
+ copy:
+ dest: /tmp/worldwritable/file.txt
+ content: "bar"
+ mode: 0666
+
+ - name: Overwrite the file as user nobody
+ copy:
+ dest: /tmp/worldwritable/file.txt
+ content: "baz"
+ become: yes
+ become_user: nobody
+ register: copy_result7
+
+ - name: Assert the file was overwritten
+ assert:
+ that:
+ - "copy_result7.changed"
+ - "copy_result7.dest == '/tmp/worldwritable/file.txt'"
+ - "copy_result7.checksum == ('baz'|hash('sha1'))"
+
+ - name: Clean up
+ file:
+ dest: /tmp/worldwritable
+ state: absent
+
+ remote_user: root
+
+#
+# Follow=True tests
+#
+
+# test overwriting a link using "follow=yes" so that the link
+# is preserved and the link target is updated
+
+- name: Create a test file to symlink to
+ copy:
+ dest: "{{ remote_dir }}/follow_test"
+ content: "this is the follow test file\n"
+
+- name: Create a symlink to the test file
+ file:
+ path: "{{ remote_dir }}/follow_link"
+ src: './follow_test'
+ state: link
+
+- name: Update the test file using follow=True to preserve the link
+ copy:
+ dest: "{{ remote_dir }}/follow_link"
+ src: foo.txt
+ follow: yes
+ register: replace_follow_result
+
+- name: Stat the link path
+ stat:
+ path: "{{ remote_dir }}/follow_link"
+ register: stat_link_result
+
+- name: Assert that the link is still a link and contents were changed
+ assert:
+ that:
+ - stat_link_result['stat']['islnk']
+ - stat_link_result['stat']['lnk_target'] == './follow_test'
+ - replace_follow_result['changed']
+ - "replace_follow_result['checksum'] == remote_file_hash"
+
+# Symlink handling when the dest is already there
+# https://github.com/ansible/ansible-modules-core/issues/1568
+
+- name: test idempotency by trying to copy to the symlink with the same contents
+ copy:
+ dest: "{{ remote_dir }}/follow_link"
+ src: foo.txt
+ follow: yes
+ register: replace_follow_result
+
+- name: Stat the link path
+ stat:
+ path: "{{ remote_dir }}/follow_link"
+ register: stat_link_result
+
+- name: Assert that the link is still a link and contents were changed
+ assert:
+ that:
+ - stat_link_result['stat']['islnk']
+ - stat_link_result['stat']['lnk_target'] == './follow_test'
+ - not replace_follow_result['changed']
+ - replace_follow_result['checksum'] == remote_file_hash
+
+
+- name: Update the test file using follow=False to overwrite the link
+ copy:
+ dest: '{{ remote_dir }}/follow_link'
+ content: 'modified'
+ follow: False
+ register: copy_results
+
+- name: Check the stat results of the file
+ stat:
+ path: '{{remote_dir}}/follow_link'
+ register: stat_results
+
+- debug:
+ var: stat_results
+ verbosity: 1
+
+- name: Assert that the file has changed and is not a link
+ assert:
+ that:
+ - "copy_results is changed"
+ - "'content' not in copy_results"
+ - "stat_results.stat.checksum == ('modified'|hash('sha1'))"
+ - "not stat_results.stat.islnk"
+
+# test overwriting a link using "follow=yes" so that the link
+# is preserved and the link target is updated when the thing being copied is a link
+
+#
+# File mode tests
+#
+
+- name: setup directory for test
+ file: state=directory dest={{remote_dir }}/directory mode=0755
+
+- name: set file mode when the destination is a directory
+ copy: src=foo.txt dest={{remote_dir}}/directory/ mode=0705
+
+- name: set file mode when the destination is a directory
+ copy: src=foo.txt dest={{remote_dir}}/directory/ mode=0604
+ register: file_result
+
+- name: check that the file has the correct attributes
+ stat: path={{ remote_dir }}/directory/foo.txt
+ register: file_attrs
+
+- assert:
+ that:
+ - "file_attrs.stat.mode == '0604'"
+ # The below assertions make an invalid assumption, these were not explicitly set
+ # - "file_attrs.stat.uid == 0"
+ # - "file_attrs.stat.pw_name == 'root'"
+
+- name: check that the containing directory did not change attributes
+ stat: path={{ remote_dir }}/directory/
+ register: dir_attrs
+
+- assert:
+ that:
+ - "dir_attrs.stat.mode == '0755'"
+
+# Test that recursive copy of a directory containing a symlink to another
+# directory, with mode=preserve and local_follow=no works.
+# See: https://github.com/ansible/ansible/issues/68471
+
+- name: Test recursive copy of dir with symlinks, mode=preserve, local_follow=False
+ copy:
+ src: '{{ role_path }}/files/subdir/'
+ dest: '{{ local_temp_dir }}/preserve_symlink/'
+ mode: preserve
+ local_follow: no
+
+- name: check that we actually used and still have a symlink
+ stat: path={{ local_temp_dir }}/preserve_symlink/subdir1/bar.txt
+ register: symlink_path
+
+- assert:
+ that:
+ - symlink_path.stat.exists
+ - symlink_path.stat.islnk
+
+#
+# I believe the below section is now covered in the recursive copying section.
+# Hold on for now as an original test case but delete once confirmed that
+# everything is passing
+
+#
+# Recursive copying with symlinks tests
+#
+- delegate_to: localhost
+ block:
+ - name: Create a test dir to copy
+ file:
+ path: '{{ local_temp_dir }}/top_dir'
+ state: directory
+
+ - name: Create a test dir to symlink to
+ file:
+ path: '{{ local_temp_dir }}/linked_dir'
+ state: directory
+
+ - name: Create a file in the test dir
+ copy:
+ dest: '{{ local_temp_dir }}/linked_dir/file1'
+ content: 'hello world'
+
+ - name: Create a link to the test dir
+ file:
+ path: '{{ local_temp_dir }}/top_dir/follow_link_dir'
+ src: '{{ local_temp_dir }}/linked_dir'
+ state: link
+
+ - name: Create a circular subdir
+ file:
+ path: '{{ local_temp_dir }}/top_dir/subdir'
+ state: directory
+
+ ### FIXME: Also add a test for a relative symlink
+ - name: Create a circular symlink
+ file:
+ path: '{{ local_temp_dir }}/top_dir/subdir/circle'
+ src: '{{ local_temp_dir }}/top_dir/'
+ state: link
+
+- name: Copy the directory's link
+ copy:
+ src: '{{ local_temp_dir }}/top_dir'
+ dest: '{{ remote_dir }}/new_dir'
+ local_follow: True
+
+- name: Stat the copied path
+ stat:
+ path: '{{ remote_dir }}/new_dir/top_dir/follow_link_dir'
+ register: stat_dir_result
+
+- name: Stat the copied file
+ stat:
+ path: '{{ remote_dir }}/new_dir/top_dir/follow_link_dir/file1'
+ register: stat_file_in_dir_result
+
+- name: Stat the circular symlink
+ stat:
+ path: '{{ remote_dir }}/new_dir/top_dir/subdir/circle'
+ register: stat_circular_symlink_result
+
+- name: Assert that the directory exists
+ assert:
+ that:
+ - stat_dir_result.stat.exists
+ - stat_dir_result.stat.isdir
+ - stat_file_in_dir_result.stat.exists
+ - stat_file_in_dir_result.stat.isreg
+ - stat_circular_symlink_result.stat.exists
+ - stat_circular_symlink_result.stat.islnk
+
+# Relative paths in dest:
+- name: Smoketest that copying content to an implicit relative path works
+ copy:
+ content: 'testing'
+ dest: 'ansible-testing.txt'
+ register: relative_results
+
+- name: Assert that copying to an implicit relative path reported changed
+ assert:
+ that:
+ - 'relative_results["changed"]'
+ - 'relative_results["checksum"] == "dc724af18fbdd4e59189f5fe768a5f8311527050"'
+
+- name: Test that copying the same content with an implicit relative path reports no change
+ copy:
+ content: 'testing'
+ dest: 'ansible-testing.txt'
+ register: relative_results
+
+- name: Assert that copying the same content with an implicit relative path reports no change
+ assert:
+ that:
+ - 'not relative_results["changed"]'
+ - 'relative_results["checksum"] == "dc724af18fbdd4e59189f5fe768a5f8311527050"'
+
+- name: Test that copying different content with an implicit relative path reports change
+ copy:
+ content: 'testing2'
+ dest: 'ansible-testing.txt'
+ register: relative_results
+
+- name: Assert that copying different content with an implicit relative path reports changed
+ assert:
+ that:
+ - 'relative_results["changed"]'
+ - 'relative_results["checksum"] == "596b29ec9afea9e461a20610d150939b9c399d93"'
+
+- name: Smoketest that explicit relative path works
+ copy:
+ content: 'testing'
+ dest: './ansible-testing.txt'
+ register: relative_results
+
+- name: Assert that explicit relative paths reports change
+ assert:
+ that:
+ - 'relative_results["changed"]'
+ - 'relative_results["checksum"] == "dc724af18fbdd4e59189f5fe768a5f8311527050"'
+
+- name: Cleanup relative path tests
+ file:
+ path: 'ansible-testing.txt'
+ state: absent
+
+# src is a file, dest is a non-existent directory (2 levels of directories):
+# using remote_src
+# checks that dest is created
+- include: dest_in_non_existent_directories_remote_src.yml
+ with_items:
+ - { src: 'foo.txt', dest: 'new_sub_dir1/sub_dir2/', check: 'new_sub_dir1/sub_dir2/foo.txt' }
+
+# src is a file, dest is file in a non-existent directory: checks that a failure occurs
+# using remote_src
+- include: src_file_dest_file_in_non_existent_dir_remote_src.yml
+ with_items:
+ - 'new_sub_dir1/sub_dir2/foo.txt'
+ - 'new_sub_dir1/foo.txt'
+ loop_control:
+ loop_var: 'dest'
+
+# src is a file, dest is a non-existent directory (2 levels of directories):
+# checks that dest is created
+- include: dest_in_non_existent_directories.yml
+ with_items:
+ - { src: 'foo.txt', dest: 'new_sub_dir1/sub_dir2/', check: 'new_sub_dir1/sub_dir2/foo.txt' }
+ - { src: 'subdir', dest: 'new_sub_dir1/sub_dir2/', check: 'new_sub_dir1/sub_dir2/subdir/bar.txt' }
+ - { src: 'subdir/', dest: 'new_sub_dir1/sub_dir2/', check: 'new_sub_dir1/sub_dir2/bar.txt' }
+ - { src: 'subdir', dest: 'new_sub_dir1/sub_dir2', check: 'new_sub_dir1/sub_dir2/subdir/bar.txt' }
+ - { src: 'subdir/', dest: 'new_sub_dir1/sub_dir2', check: 'new_sub_dir1/sub_dir2/bar.txt' }
+
+# src is a file, dest is file in a non-existent directory: checks that a failure occurs
+- include: src_file_dest_file_in_non_existent_dir.yml
+ with_items:
+ - 'new_sub_dir1/sub_dir2/foo.txt'
+ - 'new_sub_dir1/foo.txt'
+ loop_control:
+ loop_var: 'dest'
+#
+# Recursive copying on remote host
+#
+## prepare for test
+- block:
+
+ - name: execute - Create a test src dir
+ file:
+ path: '{{ remote_dir }}/remote_dir_src'
+ state: directory
+
+ - name: gather - Stat the remote_dir_src
+ stat:
+ path: '{{ remote_dir }}/remote_dir_src'
+ register: stat_remote_dir_src_before
+
+ - name: execute - Create a subdir
+ file:
+ path: '{{ remote_dir }}/remote_dir_src/subdir'
+ state: directory
+
+ - name: gather - Stat the remote_dir_src/subdir
+ stat:
+ path: '{{ remote_dir }}/remote_dir_src/subdir'
+ register: stat_remote_dir_src_subdir_before
+
+ - name: execute - Create a file in the top of src
+ copy:
+ dest: '{{ remote_dir }}/remote_dir_src/file1'
+ content: 'hello world 1'
+
+ - name: gather - Stat the remote_dir_src/file1
+ stat:
+ path: '{{ remote_dir }}/remote_dir_src/file1'
+ register: stat_remote_dir_src_file1_before
+
+ - name: execute - Create a file in the subdir
+ copy:
+ dest: '{{ remote_dir }}/remote_dir_src/subdir/file12'
+ content: 'hello world 12'
+
+ - name: gather - Stat the remote_dir_src/subdir/file12
+ stat:
+ path: '{{ remote_dir }}/remote_dir_src/subdir/file12'
+ register: stat_remote_dir_src_subdir_file12_before
+
+ - name: execute - Create a link to the file12
+ file:
+ path: '{{ remote_dir }}/remote_dir_src/link_file12'
+ src: '{{ remote_dir }}/remote_dir_src/subdir/file12'
+ state: link
+
+ - name: gather - Stat the remote_dir_src/link_file12
+ stat:
+ path: '{{ remote_dir }}/remote_dir_src/link_file12'
+ register: stat_remote_dir_src_link_file12_before
+
+### test when src endswith os.sep and dest isdir
+- block:
+
+### local_follow: True
+ - name: execute - Create a test dest dir
+ file:
+ path: '{{ remote_dir }}/testcase1_local_follow_true'
+ state: directory
+
+ - name: execute - Copy the directory on remote with local_follow True
+ copy:
+ remote_src: True
+ src: '{{ remote_dir }}/remote_dir_src/'
+ dest: '{{ remote_dir }}/testcase1_local_follow_true'
+ local_follow: True
+ register: testcase1
+
+ - name: gather - Stat the testcase1_local_follow_true
+ stat:
+ path: '{{ remote_dir }}/testcase1_local_follow_true'
+ register: stat_testcase1_local_follow_true
+ - name: gather - Stat the testcase1_local_follow_true/subdir
+ stat:
+ path: '{{ remote_dir }}/testcase1_local_follow_true/subdir'
+ register: stat_testcase1_local_follow_true_subdir
+ - name: gather - Stat the testcase1_local_follow_true/file1
+ stat:
+ path: '{{ remote_dir }}/testcase1_local_follow_true/file1'
+ register: stat_testcase1_local_follow_true_file1
+ - name: gather - Stat the testcase1_local_follow_true/subdir/file12
+ stat:
+ path: '{{ remote_dir }}/testcase1_local_follow_true/subdir/file12'
+ register: stat_testcase1_local_follow_true_subdir_file12
+ - name: gather - Stat the testcase1_local_follow_true/link_file12
+ stat:
+ path: '{{ remote_dir }}/testcase1_local_follow_true/link_file12'
+ register: stat_testcase1_local_follow_true_link_file12
+
+ - name: assert - remote_dir_src has copied with local_follow True.
+ assert:
+ that:
+ - testcase1 is changed
+ - "stat_testcase1_local_follow_true.stat.isdir"
+ - "stat_testcase1_local_follow_true_subdir.stat.isdir"
+ - "stat_testcase1_local_follow_true_file1.stat.exists"
+ - "stat_remote_dir_src_file1_before.stat.checksum == stat_testcase1_local_follow_true_file1.stat.checksum"
+ - "stat_testcase1_local_follow_true_subdir_file12.stat.exists"
+ - "stat_remote_dir_src_subdir_file12_before.stat.checksum == stat_testcase1_local_follow_true_subdir_file12.stat.checksum"
+ - "stat_testcase1_local_follow_true_link_file12.stat.exists"
+ - "not stat_testcase1_local_follow_true_link_file12.stat.islnk"
+ - "stat_remote_dir_src_subdir_file12_before.stat.checksum == stat_testcase1_local_follow_true_link_file12.stat.checksum"
+
+### local_follow: False
+ - name: execute - Create a test dest dir
+ file:
+ path: '{{ remote_dir }}/testcase1_local_follow_false'
+ state: directory
+
+ - name: execute - Copy the directory on remote with local_follow False
+ copy:
+ remote_src: True
+ src: '{{ remote_dir }}/remote_dir_src/'
+ dest: '{{ remote_dir }}/testcase1_local_follow_false'
+ local_follow: False
+ register: testcase1
+
+ - name: gather - Stat the testcase1_local_follow_false
+ stat:
+ path: '{{ remote_dir }}/testcase1_local_follow_false'
+ register: stat_testcase1_local_follow_false
+ - name: gather - Stat the testcase1_local_follow_false/subdir
+ stat:
+ path: '{{ remote_dir }}/testcase1_local_follow_false/subdir'
+ register: stat_testcase1_local_follow_false_subdir
+ - name: gather - Stat the testcase1_local_follow_false/file1
+ stat:
+ path: '{{ remote_dir }}/testcase1_local_follow_false/file1'
+ register: stat_testcase1_local_follow_false_file1
+ - name: gather - Stat the testcase1_local_follow_false/subdir/file12
+ stat:
+ path: '{{ remote_dir }}/testcase1_local_follow_false/subdir/file12'
+ register: stat_testcase1_local_follow_false_subdir_file12
+ - name: gather - Stat the testcase1_local_follow_false/link_file12
+ stat:
+ path: '{{ remote_dir }}/testcase1_local_follow_false/link_file12'
+ register: stat_testcase1_local_follow_false_link_file12
+
+ - name: assert - remote_dir_src has copied with local_follow True.
+ assert:
+ that:
+ - testcase1 is changed
+ - "stat_testcase1_local_follow_false.stat.isdir"
+ - "stat_testcase1_local_follow_false_subdir.stat.isdir"
+ - "stat_testcase1_local_follow_false_file1.stat.exists"
+ - "stat_remote_dir_src_file1_before.stat.checksum == stat_testcase1_local_follow_false_file1.stat.checksum"
+ - "stat_testcase1_local_follow_false_subdir_file12.stat.exists"
+ - "stat_remote_dir_src_subdir_file12_before.stat.checksum == stat_testcase1_local_follow_false_subdir_file12.stat.checksum"
+ - "stat_testcase1_local_follow_false_link_file12.stat.exists"
+ - "stat_testcase1_local_follow_false_link_file12.stat.islnk"
+
+## test when src endswith os.sep and dest not exists
+
+- block:
+ - name: execute - Copy the directory on remote with local_follow True
+ copy:
+ remote_src: True
+ src: '{{ remote_dir }}/remote_dir_src/'
+ dest: '{{ remote_dir }}/testcase2_local_follow_true'
+ local_follow: True
+ register: testcase2
+
+ - name: gather - Stat the testcase2_local_follow_true
+ stat:
+ path: '{{ remote_dir }}/testcase2_local_follow_true'
+ register: stat_testcase2_local_follow_true
+ - name: gather - Stat the testcase2_local_follow_true/subdir
+ stat:
+ path: '{{ remote_dir }}/testcase2_local_follow_true/subdir'
+ register: stat_testcase2_local_follow_true_subdir
+ - name: gather - Stat the testcase2_local_follow_true/file1
+ stat:
+ path: '{{ remote_dir }}/testcase2_local_follow_true/file1'
+ register: stat_testcase2_local_follow_true_file1
+ - name: gather - Stat the testcase2_local_follow_true/subdir/file12
+ stat:
+ path: '{{ remote_dir }}/testcase2_local_follow_true/subdir/file12'
+ register: stat_testcase2_local_follow_true_subdir_file12
+ - name: gather - Stat the testcase2_local_follow_true/link_file12
+ stat:
+ path: '{{ remote_dir }}/testcase2_local_follow_true/link_file12'
+ register: stat_testcase2_local_follow_true_link_file12
+
+ - name: assert - remote_dir_src has copied with local_follow True.
+ assert:
+ that:
+ - testcase2 is changed
+ - "stat_testcase2_local_follow_true.stat.isdir"
+ - "stat_testcase2_local_follow_true_subdir.stat.isdir"
+ - "stat_testcase2_local_follow_true_file1.stat.exists"
+ - "stat_remote_dir_src_file1_before.stat.checksum == stat_testcase2_local_follow_true_file1.stat.checksum"
+ - "stat_testcase2_local_follow_true_subdir_file12.stat.exists"
+ - "stat_remote_dir_src_subdir_file12_before.stat.checksum == stat_testcase2_local_follow_true_subdir_file12.stat.checksum"
+ - "stat_testcase2_local_follow_true_link_file12.stat.exists"
+ - "not stat_testcase2_local_follow_true_link_file12.stat.islnk"
+ - "stat_remote_dir_src_subdir_file12_before.stat.checksum == stat_testcase2_local_follow_true_link_file12.stat.checksum"
+
+### local_follow: False
+ - name: execute - Copy the directory on remote with local_follow False
+ copy:
+ remote_src: True
+ src: '{{ remote_dir }}/remote_dir_src/'
+ dest: '{{ remote_dir }}/testcase2_local_follow_false'
+ local_follow: False
+ register: testcase2
+
+ - name: execute - Copy the directory on remote with local_follow False
+ copy:
+ remote_src: True
+ src: '{{ remote_dir }}/remote_dir_src/'
+ dest: '{{ remote_dir }}/testcase2_local_follow_false'
+ local_follow: False
+ register: testcase1
+
+ - name: gather - Stat the testcase2_local_follow_false
+ stat:
+ path: '{{ remote_dir }}/testcase2_local_follow_false'
+ register: stat_testcase2_local_follow_false
+ - name: gather - Stat the testcase2_local_follow_false/subdir
+ stat:
+ path: '{{ remote_dir }}/testcase2_local_follow_false/subdir'
+ register: stat_testcase2_local_follow_false_subdir
+ - name: gather - Stat the testcase2_local_follow_false/file1
+ stat:
+ path: '{{ remote_dir }}/testcase2_local_follow_false/file1'
+ register: stat_testcase2_local_follow_false_file1
+ - name: gather - Stat the testcase2_local_follow_false/subdir/file12
+ stat:
+ path: '{{ remote_dir }}/testcase2_local_follow_false/subdir/file12'
+ register: stat_testcase2_local_follow_false_subdir_file12
+ - name: gather - Stat the testcase2_local_follow_false/link_file12
+ stat:
+ path: '{{ remote_dir }}/testcase2_local_follow_false/link_file12'
+ register: stat_testcase2_local_follow_false_link_file12
+
+ - name: assert - remote_dir_src has copied with local_follow True.
+ assert:
+ that:
+ - testcase2 is changed
+ - "stat_testcase2_local_follow_false.stat.isdir"
+ - "stat_testcase2_local_follow_false_subdir.stat.isdir"
+ - "stat_testcase2_local_follow_false_file1.stat.exists"
+ - "stat_remote_dir_src_file1_before.stat.checksum == stat_testcase2_local_follow_false_file1.stat.checksum"
+ - "stat_testcase2_local_follow_false_subdir_file12.stat.exists"
+ - "stat_remote_dir_src_subdir_file12_before.stat.checksum == stat_testcase2_local_follow_false_subdir_file12.stat.checksum"
+ - "stat_testcase2_local_follow_false_link_file12.stat.exists"
+ - "stat_testcase2_local_follow_false_link_file12.stat.islnk"
+
+## test when src not endswith os.sep and dest isdir
+- block:
+
+### local_follow: True
+ - name: execute - Create a test dest dir
+ file:
+ path: '{{ remote_dir }}/testcase3_local_follow_true'
+ state: directory
+
+ - name: execute - Copy the directory on remote with local_follow True
+ copy:
+ remote_src: True
+ src: '{{ remote_dir }}/remote_dir_src'
+ dest: '{{ remote_dir }}/testcase3_local_follow_true'
+ local_follow: True
+ register: testcase3
+
+ - name: gather - Stat the testcase3_local_follow_true
+ stat:
+ path: '{{ remote_dir }}/testcase3_local_follow_true/remote_dir_src'
+ register: stat_testcase3_local_follow_true_remote_dir_src
+ - name: gather - Stat the testcase3_local_follow_true/remote_dir_src/subdir
+ stat:
+ path: '{{ remote_dir }}/testcase3_local_follow_true/remote_dir_src/subdir'
+ register: stat_testcase3_local_follow_true_remote_dir_src_subdir
+ - name: gather - Stat the testcase3_local_follow_true/remote_dir_src/file1
+ stat:
+ path: '{{ remote_dir }}/testcase3_local_follow_true/remote_dir_src/file1'
+ register: stat_testcase3_local_follow_true_remote_dir_src_file1
+ - name: gather - Stat the testcase3_local_follow_true/remote_dir_src/subdir/file12
+ stat:
+ path: '{{ remote_dir }}/testcase3_local_follow_true/remote_dir_src/subdir/file12'
+ register: stat_testcase3_local_follow_true_remote_dir_src_subdir_file12
+ - name: gather - Stat the testcase3_local_follow_true/remote_dir_src/link_file12
+ stat:
+ path: '{{ remote_dir }}/testcase3_local_follow_true/remote_dir_src/link_file12'
+ register: stat_testcase3_local_follow_true_remote_dir_src_link_file12
+
+ - name: assert - remote_dir_src has copied with local_follow True.
+ assert:
+ that:
+ - testcase3 is changed
+ - "stat_testcase3_local_follow_true_remote_dir_src.stat.isdir"
+ - "stat_testcase3_local_follow_true_remote_dir_src_subdir.stat.isdir"
+ - "stat_testcase3_local_follow_true_remote_dir_src_file1.stat.exists"
+ - "stat_remote_dir_src_file1_before.stat.checksum == stat_testcase3_local_follow_true_remote_dir_src_file1.stat.checksum"
+ - "stat_testcase3_local_follow_true_remote_dir_src_subdir_file12.stat.exists"
+ - "stat_remote_dir_src_subdir_file12_before.stat.checksum == stat_testcase3_local_follow_true_remote_dir_src_subdir_file12.stat.checksum"
+ - "stat_testcase3_local_follow_true_remote_dir_src_link_file12.stat.exists"
+ - "not stat_testcase3_local_follow_true_remote_dir_src_link_file12.stat.islnk"
+ - "stat_remote_dir_src_subdir_file12_before.stat.checksum == stat_testcase3_local_follow_true_remote_dir_src_link_file12.stat.checksum"
+
+### local_follow: False
+ - name: execute - Create a test dest dir
+ file:
+ path: '{{ remote_dir }}/testcase3_local_follow_false'
+ state: directory
+
+ - name: execute - Copy the directory on remote with local_follow False
+ copy:
+ remote_src: True
+ src: '{{ remote_dir }}/remote_dir_src'
+ dest: '{{ remote_dir }}/testcase3_local_follow_false'
+ local_follow: False
+ register: testcase3
+
+ - name: gather - Stat the testcase3_local_follow_false
+ stat:
+ path: '{{ remote_dir }}/testcase3_local_follow_false/remote_dir_src'
+ register: stat_testcase3_local_follow_false_remote_dir_src
+ - name: gather - Stat the testcase3_local_follow_false/remote_dir_src/subdir
+ stat:
+ path: '{{ remote_dir }}/testcase3_local_follow_false/remote_dir_src/subdir'
+ register: stat_testcase3_local_follow_false_remote_dir_src_subdir
+ - name: gather - Stat the testcase3_local_follow_false/remote_dir_src/file1
+ stat:
+ path: '{{ remote_dir }}/testcase3_local_follow_false/remote_dir_src/file1'
+ register: stat_testcase3_local_follow_false_remote_dir_src_file1
+ - name: gather - Stat the testcase3_local_follow_false/remote_dir_src/subdir/file12
+ stat:
+ path: '{{ remote_dir }}/testcase3_local_follow_false/remote_dir_src/subdir/file12'
+ register: stat_testcase3_local_follow_false_remote_dir_src_subdir_file12
+ - name: gather - Stat the testcase3_local_follow_false/remote_dir_src/link_file12
+ stat:
+ path: '{{ remote_dir }}/testcase3_local_follow_false/remote_dir_src/link_file12'
+ register: stat_testcase3_local_follow_false_remote_dir_src_link_file12
+
+ - name: assert - remote_dir_src has copied with local_follow False.
+ assert:
+ that:
+ - testcase3 is changed
+ - "stat_testcase3_local_follow_false_remote_dir_src.stat.isdir"
+ - "stat_testcase3_local_follow_false_remote_dir_src_subdir.stat.isdir"
+ - "stat_testcase3_local_follow_false_remote_dir_src_file1.stat.exists"
+ - "stat_remote_dir_src_file1_before.stat.checksum == stat_testcase3_local_follow_false_remote_dir_src_file1.stat.checksum"
+ - "stat_testcase3_local_follow_false_remote_dir_src_subdir_file12.stat.exists"
+ - "stat_remote_dir_src_subdir_file12_before.stat.checksum == stat_testcase3_local_follow_false_remote_dir_src_subdir_file12.stat.checksum"
+ - "stat_testcase3_local_follow_false_remote_dir_src_link_file12.stat.exists"
+ - "stat_testcase3_local_follow_false_remote_dir_src_link_file12.stat.islnk"
+
+## test when src not endswith os.sep and dest not exists
+- block:
+### local_follow: True
+ - name: execute - Copy the directory on remote with local_follow True
+ copy:
+ remote_src: True
+ src: '{{ remote_dir }}/remote_dir_src'
+ dest: '{{ remote_dir }}/testcase4_local_follow_true'
+ local_follow: True
+ register: testcase4
+
+ - name: gather - Stat the testcase4_local_follow_true
+ stat:
+ path: '{{ remote_dir }}/testcase4_local_follow_true/remote_dir_src'
+ register: stat_testcase4_local_follow_true_remote_dir_src
+ - name: gather - Stat the testcase4_local_follow_true/remote_dir_src/subdir
+ stat:
+ path: '{{ remote_dir }}/testcase4_local_follow_true/remote_dir_src/subdir'
+ register: stat_testcase4_local_follow_true_remote_dir_src_subdir
+ - name: gather - Stat the testcase4_local_follow_true/remote_dir_src/file1
+ stat:
+ path: '{{ remote_dir }}/testcase4_local_follow_true/remote_dir_src/file1'
+ register: stat_testcase4_local_follow_true_remote_dir_src_file1
+ - name: gather - Stat the testcase4_local_follow_true/remote_dir_src/subdir/file12
+ stat:
+ path: '{{ remote_dir }}/testcase4_local_follow_true/remote_dir_src/subdir/file12'
+ register: stat_testcase4_local_follow_true_remote_dir_src_subdir_file12
+ - name: gather - Stat the testcase4_local_follow_true/remote_dir_src/link_file12
+ stat:
+ path: '{{ remote_dir }}/testcase4_local_follow_true/remote_dir_src/link_file12'
+ register: stat_testcase4_local_follow_true_remote_dir_src_link_file12
+
+ - name: assert - remote_dir_src has copied with local_follow True.
+ assert:
+ that:
+ - testcase4 is changed
+ - "stat_testcase4_local_follow_true_remote_dir_src.stat.isdir"
+ - "stat_testcase4_local_follow_true_remote_dir_src_subdir.stat.isdir"
+ - "stat_testcase4_local_follow_true_remote_dir_src_file1.stat.exists"
+ - "stat_remote_dir_src_file1_before.stat.checksum == stat_testcase4_local_follow_true_remote_dir_src_file1.stat.checksum"
+ - "stat_testcase4_local_follow_true_remote_dir_src_subdir_file12.stat.exists"
+ - "stat_remote_dir_src_subdir_file12_before.stat.checksum == stat_testcase4_local_follow_true_remote_dir_src_subdir_file12.stat.checksum"
+ - "stat_testcase4_local_follow_true_remote_dir_src_link_file12.stat.exists"
+ - "not stat_testcase4_local_follow_true_remote_dir_src_link_file12.stat.islnk"
+ - "stat_remote_dir_src_subdir_file12_before.stat.checksum == stat_testcase4_local_follow_true_remote_dir_src_link_file12.stat.checksum"
+
+### local_follow: False
+ - name: execute - Copy the directory on remote with local_follow False
+ copy:
+ remote_src: True
+ src: '{{ remote_dir }}/remote_dir_src'
+ dest: '{{ remote_dir }}/testcase4_local_follow_false'
+ local_follow: False
+ register: testcase4
+
+ - name: gather - Stat the testcase4_local_follow_false
+ stat:
+ path: '{{ remote_dir }}/testcase4_local_follow_false/remote_dir_src'
+ register: stat_testcase4_local_follow_false_remote_dir_src
+ - name: gather - Stat the testcase4_local_follow_false/remote_dir_src/subdir
+ stat:
+ path: '{{ remote_dir }}/testcase4_local_follow_false/remote_dir_src/subdir'
+ register: stat_testcase4_local_follow_false_remote_dir_src_subdir
+ - name: gather - Stat the testcase4_local_follow_false/remote_dir_src/file1
+ stat:
+ path: '{{ remote_dir }}/testcase4_local_follow_false/remote_dir_src/file1'
+ register: stat_testcase4_local_follow_false_remote_dir_src_file1
+ - name: gather - Stat the testcase4_local_follow_false/remote_dir_src/subdir/file12
+ stat:
+ path: '{{ remote_dir }}/testcase4_local_follow_false/remote_dir_src/subdir/file12'
+ register: stat_testcase4_local_follow_false_remote_dir_src_subdir_file12
+ - name: gather - Stat the testcase4_local_follow_false/remote_dir_src/link_file12
+ stat:
+ path: '{{ remote_dir }}/testcase4_local_follow_false/remote_dir_src/link_file12'
+ register: stat_testcase4_local_follow_false_remote_dir_src_link_file12
+
+ - name: assert - remote_dir_src has copied with local_follow False.
+ assert:
+ that:
+ - testcase4 is changed
+ - "stat_testcase4_local_follow_false_remote_dir_src.stat.isdir"
+ - "stat_testcase4_local_follow_false_remote_dir_src_subdir.stat.isdir"
+ - "stat_testcase4_local_follow_false_remote_dir_src_file1.stat.exists"
+ - "stat_remote_dir_src_file1_before.stat.checksum == stat_testcase4_local_follow_false_remote_dir_src_file1.stat.checksum"
+ - "stat_testcase4_local_follow_false_remote_dir_src_subdir_file12.stat.exists"
+ - "stat_remote_dir_src_subdir_file12_before.stat.checksum == stat_testcase4_local_follow_false_remote_dir_src_subdir_file12.stat.checksum"
+ - "stat_testcase4_local_follow_false_remote_dir_src_link_file12.stat.exists"
+ - "stat_testcase4_local_follow_false_remote_dir_src_link_file12.stat.islnk"
+
+- block:
+ - name: execute - Clone the source directory on remote
+ copy:
+ remote_src: True
+ src: '{{ remote_dir }}/remote_dir_src/'
+ dest: '{{ remote_dir }}/testcase5_remote_src_subdirs_src'
+ - name: Create a 2nd level subdirectory
+ file:
+ path: '{{ remote_dir }}/testcase5_remote_src_subdirs_src/subdir/subdir2/'
+ state: directory
+ - name: execute - Copy the directory on remote
+ copy:
+ remote_src: True
+ src: '{{ remote_dir }}/testcase5_remote_src_subdirs_src/'
+ dest: '{{ remote_dir }}/testcase5_remote_src_subdirs_dest'
+ local_follow: True
+ - name: execute - Create a new file in the subdir
+ copy:
+ dest: '{{ remote_dir }}/testcase5_remote_src_subdirs_src/subdir/subdir2/file13'
+ content: 'very new file'
+ - name: gather - Stat the testcase5_remote_src_subdirs_src/subdir/subdir2/file13
+ stat:
+ path: '{{ remote_dir }}/testcase5_remote_src_subdirs_src/subdir/subdir2/file13'
+ - name: execute - Copy the directory on remote
+ copy:
+ remote_src: True
+ src: '{{ remote_dir }}/testcase5_remote_src_subdirs_src/'
+ dest: '{{ remote_dir }}/testcase5_remote_src_subdirs_dest/'
+ register: testcase5_new
+ - name: execute - Edit a file in the subdir
+ copy:
+ dest: '{{ remote_dir }}/testcase5_remote_src_subdirs_src/subdir/subdir2/file13'
+ content: 'NOT hello world 12'
+ - name: gather - Stat the testcase5_remote_src_subdirs_src/subdir/subdir2/file13
+ stat:
+ path: '{{ remote_dir }}/testcase5_remote_src_subdirs_src/subdir/subdir2/file13'
+ register: stat_testcase5_remote_src_subdirs_file13_before
+ - name: execute - Copy the directory on remote
+ copy:
+ remote_src: True
+ src: '{{ remote_dir }}/testcase5_remote_src_subdirs_src/'
+ dest: '{{ remote_dir }}/testcase5_remote_src_subdirs_dest/'
+ register: testcase5_edited
+ - name: gather - Stat the testcase5_remote_src_subdirs_dest/subdir/subdir2/file13
+ stat:
+ path: '{{ remote_dir }}/testcase5_remote_src_subdirs_dest/subdir/subdir2/file13'
+ register: stat_testcase5_remote_src_subdirs_file13
+ - name: assert - remote_dir_src has copied with local_follow False.
+ assert:
+ that:
+ - testcase5_new is changed
+ - testcase5_edited is changed
+ - "stat_testcase5_remote_src_subdirs_file13.stat.exists"
+ - "stat_testcase5_remote_src_subdirs_file13_before.stat.checksum == stat_testcase5_remote_src_subdirs_file13.stat.checksum"
+
+
+## test copying the directory on remote with chown
+
+
+- block:
+
+ - set_fact:
+ ansible_copy_test_user_name: 'ansible_copy_test_{{ 100000 | random }}'
+
+ - name: execute - create a user for test
+ user:
+ name: '{{ ansible_copy_test_user_name }}'
+ state: present
+ become: true
+ register: ansible_copy_test_user
+
+ - name: execute - create a group for test
+ group:
+ name: '{{ ansible_copy_test_user_name }}'
+ state: present
+ become: true
+ register: ansible_copy_test_group
+
+ - name: execute - Copy the directory on remote with chown
+ copy:
+ remote_src: True
+ src: '{{ remote_dir_expanded }}/remote_dir_src/'
+ dest: '{{ remote_dir_expanded }}/new_dir_with_chown'
+ owner: '{{ ansible_copy_test_user_name }}'
+ group: '{{ ansible_copy_test_user_name }}'
+ follow: true
+ register: testcase5
+ become: true
+
+ - name: gather - Stat the new_dir_with_chown
+ stat:
+ path: '{{ remote_dir }}/new_dir_with_chown'
+ register: stat_new_dir_with_chown
+
+ - name: gather - Stat the new_dir_with_chown/file1
+ stat:
+ path: '{{ remote_dir }}/new_dir_with_chown/file1'
+ register: stat_new_dir_with_chown_file1
+
+ - name: gather - Stat the new_dir_with_chown/subdir
+ stat:
+ path: '{{ remote_dir }}/new_dir_with_chown/subdir'
+ register: stat_new_dir_with_chown_subdir
+
+ - name: gather - Stat the new_dir_with_chown/subdir/file12
+ stat:
+ path: '{{ remote_dir }}/new_dir_with_chown/subdir/file12'
+ register: stat_new_dir_with_chown_subdir_file12
+
+ - name: gather - Stat the new_dir_with_chown/link_file12
+ stat:
+ path: '{{ remote_dir }}/new_dir_with_chown/link_file12'
+ register: stat_new_dir_with_chown_link_file12
+
+ - name: assert - owner and group have changed
+ assert:
+ that:
+ - testcase5 is changed
+ - "stat_new_dir_with_chown.stat.uid == {{ ansible_copy_test_user.uid }}"
+ - "stat_new_dir_with_chown.stat.gid == {{ ansible_copy_test_group.gid }}"
+ - "stat_new_dir_with_chown.stat.pw_name == '{{ ansible_copy_test_user_name }}'"
+ - "stat_new_dir_with_chown.stat.gr_name == '{{ ansible_copy_test_user_name }}'"
+ - "stat_new_dir_with_chown_file1.stat.uid == {{ ansible_copy_test_user.uid }}"
+ - "stat_new_dir_with_chown_file1.stat.gid == {{ ansible_copy_test_group.gid }}"
+ - "stat_new_dir_with_chown_file1.stat.pw_name == '{{ ansible_copy_test_user_name }}'"
+ - "stat_new_dir_with_chown_file1.stat.gr_name == '{{ ansible_copy_test_user_name }}'"
+ - "stat_new_dir_with_chown_subdir.stat.uid == {{ ansible_copy_test_user.uid }}"
+ - "stat_new_dir_with_chown_subdir.stat.gid == {{ ansible_copy_test_group.gid }}"
+ - "stat_new_dir_with_chown_subdir.stat.pw_name == '{{ ansible_copy_test_user_name }}'"
+ - "stat_new_dir_with_chown_subdir.stat.gr_name == '{{ ansible_copy_test_user_name }}'"
+ - "stat_new_dir_with_chown_subdir_file12.stat.uid == {{ ansible_copy_test_user.uid }}"
+ - "stat_new_dir_with_chown_subdir_file12.stat.gid == {{ ansible_copy_test_group.gid }}"
+ - "stat_new_dir_with_chown_subdir_file12.stat.pw_name == '{{ ansible_copy_test_user_name }}'"
+ - "stat_new_dir_with_chown_subdir_file12.stat.gr_name == '{{ ansible_copy_test_user_name }}'"
+ - "stat_new_dir_with_chown_link_file12.stat.uid == {{ ansible_copy_test_user.uid }}"
+ - "stat_new_dir_with_chown_link_file12.stat.gid == {{ ansible_copy_test_group.gid }}"
+ - "stat_new_dir_with_chown_link_file12.stat.pw_name == '{{ ansible_copy_test_user_name }}'"
+ - "stat_new_dir_with_chown_link_file12.stat.gr_name == '{{ ansible_copy_test_user_name }}'"
+
+ always:
+ - name: execute - remove the user for test
+ user:
+ name: '{{ ansible_copy_test_user_name }}'
+ state: absent
+ remove: yes
+ become: true
+
+ - name: execute - remove the group for test
+ group:
+ name: '{{ ansible_copy_test_user_name }}'
+ state: absent
+ become: true
+
+## testcase last - make sure remote_dir_src not change
+- block:
+ - name: Stat the remote_dir_src
+ stat:
+ path: '{{ remote_dir }}/remote_dir_src'
+ register: stat_remote_dir_src_after
+
+ - name: Stat the remote_dir_src/subdir
+ stat:
+ path: '{{ remote_dir }}/remote_dir_src/subdir'
+ register: stat_remote_dir_src_subdir_after
+
+ - name: Stat the remote_dir_src/file1
+ stat:
+ path: '{{ remote_dir }}/remote_dir_src/file1'
+ register: stat_remote_dir_src_file1_after
+
+ - name: Stat the remote_dir_src/subdir/file12
+ stat:
+ path: '{{ remote_dir }}/remote_dir_src/subdir/file12'
+ register: stat_remote_dir_src_subdir_file12_after
+
+ - name: Stat the remote_dir_src/link_file12
+ stat:
+ path: '{{ remote_dir }}/remote_dir_src/link_file12'
+ register: stat_remote_dir_src_link_file12_after
+
+ - name: Assert that remote_dir_src not change.
+ assert:
+ that:
+ - "stat_remote_dir_src_after.stat.exists"
+ - "stat_remote_dir_src_after.stat.isdir"
+ - "stat_remote_dir_src_before.stat.uid == stat_remote_dir_src_after.stat.uid"
+ - "stat_remote_dir_src_before.stat.gid == stat_remote_dir_src_after.stat.gid"
+ - "stat_remote_dir_src_before.stat.pw_name == stat_remote_dir_src_after.stat.pw_name"
+ - "stat_remote_dir_src_before.stat.gr_name == stat_remote_dir_src_after.stat.gr_name"
+ - "stat_remote_dir_src_before.stat.path == stat_remote_dir_src_after.stat.path"
+ - "stat_remote_dir_src_before.stat.mode == stat_remote_dir_src_after.stat.mode"
+
+ - "stat_remote_dir_src_subdir_after.stat.exists"
+ - "stat_remote_dir_src_subdir_after.stat.isdir"
+ - "stat_remote_dir_src_subdir_before.stat.uid == stat_remote_dir_src_subdir_after.stat.uid"
+ - "stat_remote_dir_src_subdir_before.stat.gid == stat_remote_dir_src_subdir_after.stat.gid"
+ - "stat_remote_dir_src_subdir_before.stat.pw_name == stat_remote_dir_src_subdir_after.stat.pw_name"
+ - "stat_remote_dir_src_subdir_before.stat.gr_name == stat_remote_dir_src_subdir_after.stat.gr_name"
+ - "stat_remote_dir_src_subdir_before.stat.path == stat_remote_dir_src_subdir_after.stat.path"
+ - "stat_remote_dir_src_subdir_before.stat.mode == stat_remote_dir_src_subdir_after.stat.mode"
+
+ - "stat_remote_dir_src_file1_after.stat.exists"
+ - "stat_remote_dir_src_file1_before.stat.uid == stat_remote_dir_src_file1_after.stat.uid"
+ - "stat_remote_dir_src_file1_before.stat.gid == stat_remote_dir_src_file1_after.stat.gid"
+ - "stat_remote_dir_src_file1_before.stat.pw_name == stat_remote_dir_src_file1_after.stat.pw_name"
+ - "stat_remote_dir_src_file1_before.stat.gr_name == stat_remote_dir_src_file1_after.stat.gr_name"
+ - "stat_remote_dir_src_file1_before.stat.path == stat_remote_dir_src_file1_after.stat.path"
+ - "stat_remote_dir_src_file1_before.stat.mode == stat_remote_dir_src_file1_after.stat.mode"
+ - "stat_remote_dir_src_file1_before.stat.checksum == stat_remote_dir_src_file1_after.stat.checksum"
+
+ - "stat_remote_dir_src_subdir_file12_after.stat.exists"
+ - "stat_remote_dir_src_subdir_file12_before.stat.uid == stat_remote_dir_src_subdir_file12_after.stat.uid"
+ - "stat_remote_dir_src_subdir_file12_before.stat.gid == stat_remote_dir_src_subdir_file12_after.stat.gid"
+ - "stat_remote_dir_src_subdir_file12_before.stat.pw_name == stat_remote_dir_src_subdir_file12_after.stat.pw_name"
+ - "stat_remote_dir_src_subdir_file12_before.stat.gr_name == stat_remote_dir_src_subdir_file12_after.stat.gr_name"
+ - "stat_remote_dir_src_subdir_file12_before.stat.path == stat_remote_dir_src_subdir_file12_after.stat.path"
+ - "stat_remote_dir_src_subdir_file12_before.stat.mode == stat_remote_dir_src_subdir_file12_after.stat.mode"
+ - "stat_remote_dir_src_subdir_file12_before.stat.checksum == stat_remote_dir_src_subdir_file12_after.stat.checksum"
+
+ - "stat_remote_dir_src_link_file12_after.stat.exists"
+ - "stat_remote_dir_src_link_file12_after.stat.islnk"
+ - "stat_remote_dir_src_link_file12_before.stat.uid == stat_remote_dir_src_link_file12_after.stat.uid"
+ - "stat_remote_dir_src_link_file12_before.stat.gid == stat_remote_dir_src_link_file12_after.stat.gid"
+ - "stat_remote_dir_src_link_file12_before.stat.pw_name == stat_remote_dir_src_link_file12_after.stat.pw_name"
+ - "stat_remote_dir_src_link_file12_before.stat.gr_name == stat_remote_dir_src_link_file12_after.stat.gr_name"
+ - "stat_remote_dir_src_link_file12_before.stat.path == stat_remote_dir_src_link_file12_after.stat.path"
+ - "stat_remote_dir_src_link_file12_before.stat.mode == stat_remote_dir_src_link_file12_after.stat.mode"
+
+# Test for issue 69783: copy with remote_src=yes and src='dir/' preserves all permissions
+- block:
+ - name: Create directory structure
+ file:
+ path: "{{ local_temp_dir }}/test69783/{{ item }}"
+ state: directory
+ loop:
+ - "src/dir"
+ - "dest"
+
+ - name: Create source file structure
+ file:
+ path: "{{ local_temp_dir }}/test69783/src/{{ item.name }}"
+ state: touch
+ mode: "{{ item.mode }}"
+ loop:
+ - { name: 'readwrite', mode: '0644' }
+ - { name: 'executable', mode: '0755' }
+ - { name: 'readonly', mode: '0444' }
+ - { name: 'dir/readwrite', mode: '0644' }
+ - { name: 'dir/executable', mode: '0755' }
+ - { name: 'dir/readonly', mode: '0444' }
+
+ - name: Recursive remote copy with preserve
+ copy:
+ src: "{{ local_temp_dir }}/test69783/src/"
+ dest: "{{ local_temp_dir }}/test69783/dest/"
+ remote_src: yes
+ mode: preserve
+
+ - name: Stat dest 'readwrite' file
+ stat:
+ path: "{{ local_temp_dir}}/test69783/dest/readwrite"
+ register: dest_readwrite_stat
+
+ - name: Stat dest 'executable' file
+ stat:
+ path: "{{ local_temp_dir}}/test69783/dest/executable"
+ register: dest_executable_stat
+
+ - name: Stat dest 'readonly' file
+ stat:
+ path: "{{ local_temp_dir}}/test69783/dest/readonly"
+ register: dest_readonly_stat
+
+ - name: Stat dest 'dir/readwrite' file
+ stat:
+ path: "{{ local_temp_dir}}/test69783/dest/dir/readwrite"
+ register: dest_dir_readwrite_stat
+
+ - name: Stat dest 'dir/executable' file
+ stat:
+ path: "{{ local_temp_dir}}/test69783/dest/dir/executable"
+ register: dest_dir_executable_stat
+
+ - name: Stat dest 'dir/readonly' file
+ stat:
+ path: "{{ local_temp_dir}}/test69783/dest/dir/readonly"
+ register: dest_dir_readonly_stat
+
+ - name: Assert modes are preserved
+ assert:
+ that:
+ - "dest_readwrite_stat.stat.mode == '0644'"
+ - "dest_executable_stat.stat.mode == '0755'"
+ - "dest_readonly_stat.stat.mode == '0444'"
+ - "dest_dir_readwrite_stat.stat.mode == '0644'"
+ - "dest_dir_executable_stat.stat.mode == '0755'"
+ - "dest_dir_readonly_stat.stat.mode == '0444'"
diff --git a/test/integration/targets/cron/aliases b/test/integration/targets/cron/aliases
new file mode 100644
index 00000000..b2033afd
--- /dev/null
+++ b/test/integration/targets/cron/aliases
@@ -0,0 +1,5 @@
+destructive
+shippable/posix/group4
+skip/aix
+skip/osx
+skip/macos
diff --git a/test/integration/targets/cron/defaults/main.yml b/test/integration/targets/cron/defaults/main.yml
new file mode 100644
index 00000000..37e6fc37
--- /dev/null
+++ b/test/integration/targets/cron/defaults/main.yml
@@ -0,0 +1 @@
+faketime_pkg: libfaketime
diff --git a/test/integration/targets/cron/meta/main.yml b/test/integration/targets/cron/meta/main.yml
new file mode 100644
index 00000000..2d2436a1
--- /dev/null
+++ b/test/integration/targets/cron/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_cron
diff --git a/test/integration/targets/cron/tasks/main.yml b/test/integration/targets/cron/tasks/main.yml
new file mode 100644
index 00000000..3537b48d
--- /dev/null
+++ b/test/integration/targets/cron/tasks/main.yml
@@ -0,0 +1,213 @@
+- name: add cron task (check mode enabled, cron task not already created)
+ cron:
+ name: test cron task
+ job: 'date > {{ remote_dir }}/cron_canary1'
+ check_mode: yes
+ register: check_mode_enabled_state_present
+
+- assert:
+ that: check_mode_enabled_state_present is changed
+
+- name: add cron task (check mode disabled, task hasn't already been created)
+ cron:
+ name: test cron task
+ job: 'date > {{ remote_dir }}/cron_canary1'
+ register: add_cron_task
+
+- assert:
+ that: add_cron_task is changed
+
+- name: add cron task (check mode enabled, cron task already exists)
+ cron:
+ name: test cron task
+ job: 'date > {{ remote_dir }}/cron_canary1'
+ check_mode: yes
+ register: check_mode_enabled_state_present_cron_task_already_exists
+
+- assert:
+ that: check_mode_enabled_state_present_cron_task_already_exists is not changed
+
+- name: add cron task (check mode disabled, cron task already created)
+ cron:
+ name: test cron task
+ job: 'date > {{ remote_dir }}/cron_canary1'
+ register: cron_task_already_created
+
+- assert:
+ that: cron_task_already_created is not changed
+
+- block:
+ - name: wait for canary creation
+ wait_for:
+ path: '{{ remote_dir }}/cron_canary1'
+ timeout: '{{ 20 if faketime_pkg else 70 }}'
+ register: wait_canary
+ always:
+ - name: display some logs in case of failure
+ command: 'journalctl -u {{ cron_service }}'
+ when: wait_canary is failed and ansible_service_mgr == 'systemd'
+
+- debug:
+ msg: 'elapsed time waiting for canary: {{ wait_canary.elapsed }}'
+
+- name: Check check_mode
+ cron:
+ name: test cron task
+ job: 'date > {{ remote_dir }}/cron_canary1'
+ state: absent
+ check_mode: yes
+ register: check_check_mode
+
+- assert:
+ that: check_check_mode is changed
+
+- name: Remove a cron task
+ cron:
+ name: test cron task
+ job: 'date > {{ remote_dir }}/cron_canary1'
+ state: absent
+ register: remove_task
+
+- assert:
+ that: remove_task is changed
+
+- name: 'cron task missing: check idempotence (check mode enabled, state=absent)'
+ cron:
+ name: test cron task
+ job: 'date > {{ remote_dir }}/cron_canary1'
+ state: absent
+ register: check_mode_enabled_remove_task_idempotence
+
+- assert:
+ that: check_mode_enabled_remove_task_idempotence is not changed
+
+- name: 'cron task missing: check idempotence (check mode disabled, state=absent)'
+ cron:
+ name: test cron task
+ job: 'date > {{ remote_dir }}/cron_canary1'
+ state: absent
+ register: remove_task_idempotence
+
+- assert:
+ that: remove_task_idempotence is not changed
+
+- name: Check that removing a cron task with cron_file and without specifying an user is allowed (#58493)
+ cron:
+ cron_file: unexistent_cron_file
+ state: absent
+ register: remove_cron_file
+
+- assert:
+ that: remove_cron_file is not changed
+
+- name: Non regression test - cron file should not be empty after adding var (#71207)
+ when: ansible_distribution != 'Alpine'
+ block:
+ - name: Cron file creation
+ cron:
+ cron_file: cron_filename
+ name: "simple cron job"
+ job: 'echo "_o/"'
+ user: root
+
+ - name: Add var to the cron file
+ cron:
+ cron_file: cron_filename
+ env: yes
+ name: FOO
+ value: bar
+ user: root
+
+ - name: "Ensure cron_file still contains job string"
+ replace:
+ path: /etc/cron.d/cron_filename
+ regexp: "_o/"
+ replace: "OK"
+ register: find_chars
+ failed_when: (find_chars is not changed) or (find_chars is failed)
+
+# BusyBox does not have /etc/cron.d
+- name: Removing a cron file when the name is specified is allowed (#57471)
+ block:
+ - name: Cron file creation
+ cron:
+ cron_file: cron_filename
+ name: "integration test cron"
+ job: 'ls'
+ user: root
+
+ - name: Cron file deletion
+ cron:
+ cron_file: cron_filename
+ name: "integration test cron"
+ state: absent
+
+ - name: Check file succesfull deletion
+ stat:
+ path: /etc/cron.d/cron_filename
+ register: cron_file_stats
+
+ - assert:
+ that: not cron_file_stats.stat.exists
+
+- name: Allow non-ascii chars in job (#69492)
+ block:
+ - name: Cron file creation
+ cron:
+ cron_file: cron_filename
+ name: "cron job that contain non-ascii chars in job (ã“ã‚Œã¯æ—¥æœ¬èªžã§ã™; This is Japanese)"
+ job: 'echo "ã†ã©ã‚“ã¯å¥½ãã ãŒãŠåŒ–ã‘👻ã¯è‹¦æ‰‹ã§ã‚る。"'
+ user: root
+
+ - name: "Ensure cron_file contains job string"
+ replace:
+ path: /etc/cron.d/cron_filename
+ regexp: "ã†ã©ã‚“ã¯å¥½ãã ãŒãŠåŒ–ã‘👻ã¯è‹¦æ‰‹ã§ã‚る。"
+ replace: "ãã‚Œã¯æ©Ÿå¯†æƒ…報🔓ã§ã™ã€‚"
+ register: find_chars
+ failed_when: (find_chars is not changed) or (find_chars is failed)
+
+ - name: Cron file deletion
+ cron:
+ cron_file: cron_filename
+ name: "cron job that contain non-ascii chars in job (ã“ã‚Œã¯æ—¥æœ¬èªžã§ã™; This is Japanese)"
+ state: absent
+
+ - name: Check file succesfull deletion
+ stat:
+ path: /etc/cron.d/cron_filename
+ register: cron_file_stats
+
+ - assert:
+ that: not cron_file_stats.stat.exists
+
+- name: Allow non-ascii chars in cron_file (#69492)
+ block:
+ - name: Cron file creation with non-ascii filename (ã“ã‚Œã¯æ—¥æœ¬èªžã§ã™; This is Japanese)
+ cron:
+ cron_file: 'ãªã›ã°å¤§æŠµãªã‚“ã¨ã‹ãªã‚‹ðŸ‘Š'
+ name: "integration test cron"
+ job: 'echo "Hello, ansible!"'
+ user: root
+
+ - name: Check file exists
+ stat:
+ path: "/etc/cron.d/ãªã›ã°å¤§æŠµãªã‚“ã¨ã‹ãªã‚‹ðŸ‘Š"
+ register: cron_file_stats
+
+ - assert:
+ that: cron_file_stats.stat.exists
+
+ - name: Cron file deletion
+ cron:
+ cron_file: 'ãªã›ã°å¤§æŠµãªã‚“ã¨ã‹ãªã‚‹ðŸ‘Š'
+ name: "integration test cron"
+ state: absent
+
+ - name: Check file succesfull deletion
+ stat:
+ path: "/etc/cron.d/ãªã›ã°å¤§æŠµãªã‚“ã¨ã‹ãªã‚‹ðŸ‘Š"
+ register: cron_file_stats
+
+ - assert:
+ that: not cron_file_stats.stat.exists
diff --git a/test/integration/targets/dataloader/aliases b/test/integration/targets/dataloader/aliases
new file mode 100644
index 00000000..a6dafcf8
--- /dev/null
+++ b/test/integration/targets/dataloader/aliases
@@ -0,0 +1 @@
+shippable/posix/group1
diff --git a/test/integration/targets/dataloader/attempt_to_load_invalid_json.yml b/test/integration/targets/dataloader/attempt_to_load_invalid_json.yml
new file mode 100644
index 00000000..536e6daa
--- /dev/null
+++ b/test/integration/targets/dataloader/attempt_to_load_invalid_json.yml
@@ -0,0 +1,4 @@
+- hosts: localhost
+ gather_facts: false
+ vars_files:
+ - vars/invalid.json
diff --git a/test/integration/targets/dataloader/runme.sh b/test/integration/targets/dataloader/runme.sh
new file mode 100755
index 00000000..6a1bc9a0
--- /dev/null
+++ b/test/integration/targets/dataloader/runme.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# check if we get proper json error
+ansible-playbook -i ../../inventory attempt_to_load_invalid_json.yml "$@" 2>&1|grep 'JSON:'
diff --git a/test/integration/targets/dataloader/vars/invalid.json b/test/integration/targets/dataloader/vars/invalid.json
new file mode 100644
index 00000000..8d4e4304
--- /dev/null
+++ b/test/integration/targets/dataloader/vars/invalid.json
@@ -0,0 +1 @@
+{ }}
diff --git a/test/integration/targets/debconf/aliases b/test/integration/targets/debconf/aliases
new file mode 100644
index 00000000..f8e28c7e
--- /dev/null
+++ b/test/integration/targets/debconf/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group1
+skip/aix
diff --git a/test/integration/targets/debconf/meta/main.yml b/test/integration/targets/debconf/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/test/integration/targets/debconf/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/test/integration/targets/debconf/tasks/main.yml b/test/integration/targets/debconf/tasks/main.yml
new file mode 100644
index 00000000..d3d63cdf
--- /dev/null
+++ b/test/integration/targets/debconf/tasks/main.yml
@@ -0,0 +1,36 @@
+# Test code for the debconf module.
+# (c) 2017, James Tanner <tanner.jc@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+##
+## debconf query
+##
+
+- block:
+ - name: query the tzdata package
+ debconf:
+ name: tzdata
+ register: debconf_test0
+
+ - name: validate results for test 0
+ assert:
+ that:
+ - 'debconf_test0.changed is defined'
+ - 'debconf_test0.current is defined'
+ - '"tzdata/Zones/Etc" in debconf_test0.current'
+ - 'debconf_test0.current["tzdata/Zones/Etc"] == "UTC"'
+ when: ansible_distribution in ('Ubuntu', 'Debian')
diff --git a/test/integration/targets/debug/aliases b/test/integration/targets/debug/aliases
new file mode 100644
index 00000000..a6dafcf8
--- /dev/null
+++ b/test/integration/targets/debug/aliases
@@ -0,0 +1 @@
+shippable/posix/group1
diff --git a/test/integration/targets/debug/main.yml b/test/integration/targets/debug/main.yml
new file mode 100644
index 00000000..9e49b827
--- /dev/null
+++ b/test/integration/targets/debug/main.yml
@@ -0,0 +1,6 @@
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: test item being present in the output
+ debug: var=item
+ loop: [1, 2, 3]
diff --git a/test/integration/targets/debug/main_fqcn.yml b/test/integration/targets/debug/main_fqcn.yml
new file mode 100644
index 00000000..d6a00fc8
--- /dev/null
+++ b/test/integration/targets/debug/main_fqcn.yml
@@ -0,0 +1,6 @@
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: test item being present in the output
+ ansible.builtin.debug: var=item
+ loop: [1, 2, 3]
diff --git a/test/integration/targets/debug/runme.sh b/test/integration/targets/debug/runme.sh
new file mode 100755
index 00000000..5ccb1bfd
--- /dev/null
+++ b/test/integration/targets/debug/runme.sh
@@ -0,0 +1,17 @@
+#!/usr/bin/env bash
+
+set -eux
+
+trap 'rm -f out' EXIT
+
+ansible-playbook main.yml -i ../../inventory | tee out
+for i in 1 2 3; do
+ grep "ok: \[localhost\] => (item=$i)" out
+ grep "\"item\": $i" out
+done
+
+ansible-playbook main_fqcn.yml -i ../../inventory | tee out
+for i in 1 2 3; do
+ grep "ok: \[localhost\] => (item=$i)" out
+ grep "\"item\": $i" out
+done
diff --git a/test/integration/targets/delegate_to/aliases b/test/integration/targets/delegate_to/aliases
new file mode 100644
index 00000000..b8e973da
--- /dev/null
+++ b/test/integration/targets/delegate_to/aliases
@@ -0,0 +1,4 @@
+shippable/posix/group3
+needs/ssh
+needs/root # only on macOS and FreeBSD to configure network interfaces
+skip/aix
diff --git a/test/integration/targets/delegate_to/connection_plugins/fakelocal.py b/test/integration/targets/delegate_to/connection_plugins/fakelocal.py
new file mode 100644
index 00000000..59ddcf05
--- /dev/null
+++ b/test/integration/targets/delegate_to/connection_plugins/fakelocal.py
@@ -0,0 +1,76 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ connection: fakelocal
+ short_description: dont execute anything
+ description:
+ - This connection plugin just verifies parameters passed in
+ author: ansible (@core)
+ version_added: histerical
+ options:
+ password:
+ description: Authentication password for the C(remote_user). Can be supplied as CLI option.
+ vars:
+ - name: ansible_password
+ remote_user:
+ description:
+ - User name with which to login to the remote server, normally set by the remote_user keyword.
+ ini:
+ - section: defaults
+ key: remote_user
+ vars:
+ - name: ansible_user
+'''
+
+from ansible.errors import AnsibleConnectionFailure
+from ansible.plugins.connection import ConnectionBase
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class Connection(ConnectionBase):
+ ''' Local based connections '''
+
+ transport = 'fakelocal'
+ has_pipelining = True
+
+ def __init__(self, *args, **kwargs):
+
+ super(Connection, self).__init__(*args, **kwargs)
+ self.cwd = None
+
+ def _connect(self):
+ ''' verify '''
+
+ if self.get_option('remote_user') == 'invaliduser' and self.get_option('password') == 'badpassword':
+ raise AnsibleConnectionFailure('Got invaliduser and badpassword')
+
+ if not self._connected:
+ display.vvv(u"ESTABLISH FAKELOCAL CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self._play_context.remote_addr)
+ self._connected = True
+ return self
+
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ ''' run a command on the local host '''
+
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ return 0, '{"msg": "ALL IS GOOD"}', ''
+
+ def put_file(self, in_path, out_path):
+ ''' transfer a file from local to local '''
+
+ super(Connection, self).put_file(in_path, out_path)
+
+ def fetch_file(self, in_path, out_path):
+ ''' fetch a file from local to local -- for compatibility '''
+
+ super(Connection, self).fetch_file(in_path, out_path)
+
+ def close(self):
+ ''' terminate the connection; nothing to do here '''
+ self._connected = False
diff --git a/test/integration/targets/delegate_to/delegate_and_nolog.yml b/test/integration/targets/delegate_to/delegate_and_nolog.yml
new file mode 100644
index 00000000..d8ed64fe
--- /dev/null
+++ b/test/integration/targets/delegate_to/delegate_and_nolog.yml
@@ -0,0 +1,8 @@
+- hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: no log filtering caused delegation to fail https://github.com/ansible/ansible/issues/43026
+ become: False
+ no_log: true
+ debug:
+ delegate_to: localhost
diff --git a/test/integration/targets/delegate_to/delegate_facts_block.yml b/test/integration/targets/delegate_to/delegate_facts_block.yml
new file mode 100644
index 00000000..2edfeb42
--- /dev/null
+++ b/test/integration/targets/delegate_to/delegate_facts_block.yml
@@ -0,0 +1,25 @@
+- hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: set var to delegated host directly
+ set_fact: qq1=333
+ delegate_facts: true
+ delegate_to: localhost
+
+ - name: ensure qq1 exists in localhost but not in testhost
+ assert:
+ that:
+ - qq1 is undefined
+ - "'qq1' in hostvars['localhost']"
+
+ - name: set var to delegated host via inheritance
+ block:
+ - set_fact: qq2=333
+ delegate_facts: true
+ delegate_to: localhost
+
+ - name: ensure qq2 exists in localhost but not in testhost
+ assert:
+ that:
+ - qq2 is undefined
+ - "'qq2' in hostvars['localhost']"
diff --git a/test/integration/targets/delegate_to/delegate_local_from_root.yml b/test/integration/targets/delegate_to/delegate_local_from_root.yml
new file mode 100644
index 00000000..c9be4ff2
--- /dev/null
+++ b/test/integration/targets/delegate_to/delegate_local_from_root.yml
@@ -0,0 +1,10 @@
+- name: handle case from issue 72541
+ hosts: testhost
+ gather_facts: false
+ remote_user: root
+ tasks:
+ - name: ensure we copy w/o errors due to remote user not being overriden
+ copy:
+ src: testfile
+ dest: "{{ playbook_dir }}"
+ delegate_to: localhost
diff --git a/test/integration/targets/delegate_to/delegate_vars_hanldling.yml b/test/integration/targets/delegate_to/delegate_vars_hanldling.yml
new file mode 100644
index 00000000..6ac64e9c
--- /dev/null
+++ b/test/integration/targets/delegate_to/delegate_vars_hanldling.yml
@@ -0,0 +1,58 @@
+- name: setup delegated hsot
+ hosts: localhost
+ gather_facts: false
+ tasks:
+ - add_host:
+ name: delegatetome
+ ansible_host: 127.0.0.4
+
+- name: ensure we dont use orig host vars if delegated one does not define them
+ hosts: testhost
+ gather_facts: false
+ connection: local
+ tasks:
+ - name: force current host to use winrm
+ set_fact:
+ ansible_connection: winrm
+
+ - name: this should fail (missing winrm or unreachable)
+ ping:
+ ignore_errors: true
+ ignore_unreachable: true
+ register: orig
+
+ - name: ensure prev failed
+ assert:
+ that:
+ - orig is failed or orig is unreachable
+
+ - name: this will only fail if we take orig host ansible_connection instead of defaults
+ ping:
+ delegate_to: delegatetome
+
+
+- name: ensure plugin specific vars are properly used
+ hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: set unusable ssh args
+ set_fact:
+ ansible_host: 127.0.0.1
+ ansible_connection: ssh
+ ansible_ssh_common_args: 'MEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE'
+ ansible_connection_timeout: 5
+
+ - name: fail to ping with bad args
+ ping:
+ register: bad_args_ping
+ ignore_unreachable: true
+
+ - debug: var=bad_args_ping
+ - name: ensure prev failed
+ assert:
+ that:
+ - bad_args_ping is failed or bad_args_ping is unreachable
+
+ - name: this should work by ignoring the bad ags for orig host
+ ping:
+ delegate_to: delegatetome
diff --git a/test/integration/targets/delegate_to/discovery_applied.yml b/test/integration/targets/delegate_to/discovery_applied.yml
new file mode 100644
index 00000000..fafe664c
--- /dev/null
+++ b/test/integration/targets/delegate_to/discovery_applied.yml
@@ -0,0 +1,8 @@
+- hosts: testhost
+ gather_facts: no
+ tasks:
+ - command: ls
+ delegate_to: "{{ item }}"
+ with_items:
+ - localhost
+ - "{{ inventory_hostname }}"
diff --git a/test/integration/targets/delegate_to/files/testfile b/test/integration/targets/delegate_to/files/testfile
new file mode 100644
index 00000000..492bafce
--- /dev/null
+++ b/test/integration/targets/delegate_to/files/testfile
@@ -0,0 +1 @@
+nothing special
diff --git a/test/integration/targets/delegate_to/has_hostvars.yml b/test/integration/targets/delegate_to/has_hostvars.yml
new file mode 100644
index 00000000..9e8926bd
--- /dev/null
+++ b/test/integration/targets/delegate_to/has_hostvars.yml
@@ -0,0 +1,64 @@
+- name: ensure delegated host has hostvars available for resolving connection
+ hosts: testhost
+ gather_facts: false
+ tasks:
+
+ - name: ensure delegated host uses current host as inventory_hostname
+ assert:
+ that:
+ - inventory_hostname == ansible_delegated_vars['testhost5']['inventory_hostname']
+ delegate_to: testhost5
+
+ - name: Set info on inventory_hostname
+ set_fact:
+ login: invaliduser
+ mypass: badpassword
+
+ - name: test fakelocal
+ command: ls
+ ignore_unreachable: True
+ ignore_errors: True
+ remote_user: "{{ login }}"
+ vars:
+ ansible_password: "{{ mypass }}"
+ ansible_connection: fakelocal
+ register: badlogin
+
+ - name: ensure we skipped do to unreachable and not templating error
+ assert:
+ that:
+ - badlogin is unreachable
+
+ - name: delegate but try to use inventory_hostname data directly
+ command: ls
+ delegate_to: testhost5
+ ignore_unreachable: True
+ ignore_errors: True
+ remote_user: "{{ login }}"
+ vars:
+ ansible_password: "{{ mypass }}"
+ register: badlogin
+
+ - name: ensure we skipped do to unreachable and not templating error
+ assert:
+ that:
+ - badlogin is not unreachable
+ - badlogin is failed
+ - "'undefined' in badlogin['msg']"
+
+ - name: delegate ls to testhost5 as it uses ssh while testhost is local, but use vars from testhost
+ command: ls
+ remote_user: "{{ hostvars[inventory_hostname]['login'] }}"
+ delegate_to: testhost5
+ ignore_unreachable: True
+ ignore_errors: True
+ vars:
+ ansible_password: "{{ hostvars[inventory_hostname]['mypass'] }}"
+ register: badlogin
+
+ - name: ensure we skipped do to unreachable and not templating error
+ assert:
+ that:
+ - badlogin is unreachable
+ - badlogin is not failed
+ - "'undefined' not in badlogin['msg']"
diff --git a/test/integration/targets/delegate_to/inventory b/test/integration/targets/delegate_to/inventory
new file mode 100644
index 00000000..f7ad0a33
--- /dev/null
+++ b/test/integration/targets/delegate_to/inventory
@@ -0,0 +1,9 @@
+[local]
+testhost ansible_connection=local
+testhost2 ansible_connection=local
+testhost3 ansible_ssh_host=127.0.0.3
+testhost4 ansible_ssh_host=127.0.0.4
+testhost5 ansible_connection=fakelocal
+
+[all:vars]
+ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/test/integration/targets/delegate_to/inventory_interpreters b/test/integration/targets/delegate_to/inventory_interpreters
new file mode 100644
index 00000000..4c202ca5
--- /dev/null
+++ b/test/integration/targets/delegate_to/inventory_interpreters
@@ -0,0 +1,5 @@
+testhost ansible_python_interpreter=firstpython
+testhost2 ansible_python_interpreter=secondpython
+
+[all:vars]
+ansible_connection=local
diff --git a/test/integration/targets/delegate_to/library/detect_interpreter.py b/test/integration/targets/delegate_to/library/detect_interpreter.py
new file mode 100644
index 00000000..1f401677
--- /dev/null
+++ b/test/integration/targets/delegate_to/library/detect_interpreter.py
@@ -0,0 +1,18 @@
+#!/usr/bin/python
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import sys
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(argument_spec={})
+ module.exit_json(**dict(found=sys.executable))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/delegate_to/roles/test_template/templates/foo.j2 b/test/integration/targets/delegate_to/roles/test_template/templates/foo.j2
new file mode 100644
index 00000000..22187f91
--- /dev/null
+++ b/test/integration/targets/delegate_to/roles/test_template/templates/foo.j2
@@ -0,0 +1,3 @@
+{{ templated_var }}
+
+{{ templated_dict | to_nice_json }}
diff --git a/test/integration/targets/delegate_to/runme.sh b/test/integration/targets/delegate_to/runme.sh
new file mode 100755
index 00000000..697fc393
--- /dev/null
+++ b/test/integration/targets/delegate_to/runme.sh
@@ -0,0 +1,74 @@
+#!/usr/bin/env bash
+
+set -eux
+
+platform="$(uname)"
+
+function setup() {
+ if [[ "${platform}" == "FreeBSD" ]] || [[ "${platform}" == "Darwin" ]]; then
+ ifconfig lo0
+
+ existing=$(ifconfig lo0 | grep '^[[:blank:]]inet 127\.0\.0\. ' || true)
+
+ echo "${existing}"
+
+ for i in 3 4 254; do
+ ip="127.0.0.${i}"
+
+ if [[ "${existing}" != *"${ip}"* ]]; then
+ ifconfig lo0 alias "${ip}" up
+ fi
+ done
+
+ ifconfig lo0
+ fi
+}
+
+function teardown() {
+ if [[ "${platform}" == "FreeBSD" ]] || [[ "${platform}" == "Darwin" ]]; then
+ for i in 3 4 254; do
+ ip="127.0.0.${i}"
+
+ if [[ "${existing}" != *"${ip}"* ]]; then
+ ifconfig lo0 -alias "${ip}"
+ fi
+ done
+
+ ifconfig lo0
+ fi
+}
+
+setup
+
+trap teardown EXIT
+
+ANSIBLE_SSH_ARGS='-C -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null' \
+ ANSIBLE_HOST_KEY_CHECKING=false ansible-playbook test_delegate_to.yml -i inventory -v "$@"
+
+# this test is not doing what it says it does, also relies on var that should not be available
+#ansible-playbook test_loop_control.yml -v "$@"
+
+ansible-playbook test_delegate_to_loop_randomness.yml -v "$@"
+
+ansible-playbook delegate_and_nolog.yml -i inventory -v "$@"
+
+ansible-playbook delegate_facts_block.yml -i inventory -v "$@"
+
+ansible-playbook test_delegate_to_loop_caching.yml -i inventory -v "$@"
+
+# ensure we are using correct settings when delegating
+ANSIBLE_TIMEOUT=3 ansible-playbook delegate_vars_hanldling.yml -i inventory -v "$@"
+
+ansible-playbook has_hostvars.yml -i inventory -v "$@"
+
+# test ansible_x_interpreter
+# python
+source virtualenv.sh
+(
+cd "${OUTPUT_DIR}"/venv/bin
+ln -s python firstpython
+ln -s python secondpython
+)
+ansible-playbook verify_interpreter.yml -i inventory_interpreters -v "$@"
+ansible-playbook discovery_applied.yml -i inventory -v "$@"
+ansible-playbook delegate_local_from_root.yml -i inventory -v "$@" -e 'ansible_user=root'
diff --git a/test/integration/targets/delegate_to/test_delegate_to.yml b/test/integration/targets/delegate_to/test_delegate_to.yml
new file mode 100644
index 00000000..05b0536e
--- /dev/null
+++ b/test/integration/targets/delegate_to/test_delegate_to.yml
@@ -0,0 +1,58 @@
+- hosts: testhost3
+ vars:
+ - template_role: ./roles/test_template
+ - output_dir: "{{ playbook_dir }}"
+ - templated_var: foo
+ - templated_dict: { 'hello': 'world' }
+ tasks:
+ - name: Test no delegate_to
+ setup:
+ register: setup_results
+
+ - assert:
+ that:
+ - '"127.0.0.3" in setup_results.ansible_facts.ansible_env["SSH_CONNECTION"]'
+
+ - name: Test delegate_to with host in inventory
+ setup:
+ register: setup_results
+ delegate_to: testhost4
+
+ - debug: var=setup_results
+
+ - assert:
+ that:
+ - '"127.0.0.4" in setup_results.ansible_facts.ansible_env["SSH_CONNECTION"]'
+
+ - name: Test delegate_to with host not in inventory
+ setup:
+ register: setup_results
+ delegate_to: 127.0.0.254
+
+ - assert:
+ that:
+ - '"127.0.0.254" in setup_results.ansible_facts.ansible_env["SSH_CONNECTION"]'
+#
+# Smoketest some other modules do not error as a canary
+#
+ - name: Test file works with delegate_to and a host in inventory
+ file: path={{ output_dir }}/foo.txt mode=0644 state=touch
+ delegate_to: testhost4
+
+ - name: Test file works with delegate_to and a host not in inventory
+ file: path={{ output_dir }}/tmp.txt mode=0644 state=touch
+ delegate_to: 127.0.0.254
+
+ - name: Test template works with delegate_to and a host in inventory
+ template: src={{ template_role }}/templates/foo.j2 dest={{ output_dir }}/foo.txt
+ delegate_to: testhost4
+
+ - name: Test template works with delegate_to and a host not in inventory
+ template: src={{ template_role }}/templates/foo.j2 dest={{ output_dir }}/foo.txt
+ delegate_to: 127.0.0.254
+
+ - name: remove test file
+ file: path={{ output_dir }}/foo.txt state=absent
+
+ - name: remove test file
+ file: path={{ output_dir }}/tmp.txt state=absent
diff --git a/test/integration/targets/delegate_to/test_delegate_to_loop_caching.yml b/test/integration/targets/delegate_to/test_delegate_to_loop_caching.yml
new file mode 100644
index 00000000..6ea08f72
--- /dev/null
+++ b/test/integration/targets/delegate_to/test_delegate_to_loop_caching.yml
@@ -0,0 +1,45 @@
+- hosts: testhost,testhost2
+ gather_facts: false
+ vars:
+ delegate_to_host: "localhost"
+ tasks:
+ - set_fact:
+ gandalf:
+ shout: 'You shall not pass!'
+ when: inventory_hostname == 'testhost'
+
+ - set_fact:
+ gandalf:
+ speak: 'Run you fools!'
+ when: inventory_hostname == 'testhost2'
+
+ - name: works correctly
+ debug: var=item
+ delegate_to: localhost
+ with_dict: "{{ gandalf }}"
+ register: result1
+
+ - name: shows same item for all hosts
+ debug: var=item
+ delegate_to: "{{ delegate_to_host }}"
+ with_dict: "{{ gandalf }}"
+ register: result2
+
+ - debug:
+ var: result2.results[0].item.value
+
+ - assert:
+ that:
+ - result1.results[0].item.value == 'You shall not pass!'
+ - result2.results[0].item.value == 'You shall not pass!'
+ when: inventory_hostname == 'testhost'
+
+ - assert:
+ that:
+ - result1.results[0].item.value == 'Run you fools!'
+ - result2.results[0].item.value == 'Run you fools!'
+ when: inventory_hostname == 'testhost2'
+
+ - assert:
+ that:
+ - _ansible_loop_cache is undefined
diff --git a/test/integration/targets/delegate_to/test_delegate_to_loop_randomness.yml b/test/integration/targets/delegate_to/test_delegate_to_loop_randomness.yml
new file mode 100644
index 00000000..81033a16
--- /dev/null
+++ b/test/integration/targets/delegate_to/test_delegate_to_loop_randomness.yml
@@ -0,0 +1,73 @@
+---
+- name: Integration tests for #28231
+ hosts: localhost
+ gather_facts: false
+ tasks:
+ - name: Add some test hosts
+ add_host:
+ name: "foo{{item}}"
+ groups: foo
+ ansible_connection: local
+ ansible_python_interpreter: "{{ ansible_playbook_python }}"
+ loop: "{{ range(10)|list }}"
+
+ # We expect all of the next 3 runs to succeeed
+ # this is done multiple times to increase randomness
+ - assert:
+ that:
+ - item in ansible_delegated_vars
+ delegate_to: "{{ item }}"
+ loop:
+ - "{{ groups.foo|random }}"
+ ignore_errors: true
+ register: result1
+
+ - assert:
+ that:
+ - item in ansible_delegated_vars
+ delegate_to: "{{ item }}"
+ loop:
+ - "{{ groups.foo|random }}"
+ ignore_errors: true
+ register: result2
+
+ - assert:
+ that:
+ - item in ansible_delegated_vars
+ delegate_to: "{{ item }}"
+ loop:
+ - "{{ groups.foo|random }}"
+ ignore_errors: true
+ register: result3
+
+ - debug:
+ var: result1
+
+ - debug:
+ var: result2
+
+ - debug:
+ var: result3
+
+ - name: Ensure all of the 3 asserts were successful
+ assert:
+ that:
+ - results is all
+ vars:
+ results:
+ - "{{ (result1.results|first) is successful }}"
+ - "{{ (result2.results|first) is successful }}"
+ - "{{ (result3.results|first) is successful }}"
+
+ - name: Set delegate
+ set_fact:
+ _delegate: '{{ groups.foo[0] }}'
+
+ - command: "true"
+ delegate_to: "{{ _delegate }}"
+ register: result
+
+ - assert:
+ that:
+ - result.stdout is defined
+ - result.results is undefined
diff --git a/test/integration/targets/delegate_to/test_loop_control.yml b/test/integration/targets/delegate_to/test_loop_control.yml
new file mode 100644
index 00000000..61e9304d
--- /dev/null
+++ b/test/integration/targets/delegate_to/test_loop_control.yml
@@ -0,0 +1,16 @@
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Test delegate_to with loop_control
+ ping:
+ delegate_to: "{{ item }}"
+ with_items:
+ - localhost
+ loop_control:
+ label: "{{ item }}"
+ register: out
+
+ - name: Check if delegated_host was templated properly
+ assert:
+ that:
+ - out.results[0]['_ansible_delegated_vars']['ansible_delegated_host'] == 'localhost'
diff --git a/test/integration/targets/delegate_to/verify_interpreter.yml b/test/integration/targets/delegate_to/verify_interpreter.yml
new file mode 100644
index 00000000..63c60a41
--- /dev/null
+++ b/test/integration/targets/delegate_to/verify_interpreter.yml
@@ -0,0 +1,47 @@
+- name: ensure they are different
+ hosts: localhost
+ tasks:
+ - name: dont game me
+ assert:
+ msg: 'expected different values but got ((hostvars["testhost"]["ansible_python_interpreter"]}} and {{hostvars["testhost2"]["ansible_python_interpreter"]}}'
+ that:
+ - hostvars["testhost"]["ansible_python_interpreter"] != hostvars["testhost2"]["ansible_python_interpreter"]
+
+- name: no delegation
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: detect interpreter used by each host
+ detect_interpreter:
+ register: baseline
+
+ - name: verify it
+ assert:
+ msg: 'expected {{ansible_python_interpreter}} but got {{baseline.found|basename}}'
+ that:
+ - baseline.found|basename == ansible_python_interpreter
+
+- name: actual test
+ hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: original host
+ detect_interpreter:
+ register: found
+
+ - name: verify it orig host
+ assert:
+ msg: 'expected {{ansible_python_interpreter}} but got {{found.found|basename}}'
+ that:
+ - found.found|basename == ansible_python_interpreter
+
+ - name: delegated host
+ detect_interpreter:
+ register: found2
+ delegate_to: testhost2
+
+ - name: verify it delegated
+ assert:
+ msg: 'expected {{hostvars["testhost2"]["ansible_python_interpreter"]}} but got {{found2.found|basename}}'
+ that:
+ - found2.found|basename == hostvars["testhost2"]["ansible_python_interpreter"]
diff --git a/test/integration/targets/dict_transformations/aliases b/test/integration/targets/dict_transformations/aliases
new file mode 100644
index 00000000..a6dafcf8
--- /dev/null
+++ b/test/integration/targets/dict_transformations/aliases
@@ -0,0 +1 @@
+shippable/posix/group1
diff --git a/test/integration/targets/dict_transformations/library/convert_camelCase.py b/test/integration/targets/dict_transformations/library/convert_camelCase.py
new file mode 100644
index 00000000..50ca34c3
--- /dev/null
+++ b/test/integration/targets/dict_transformations/library/convert_camelCase.py
@@ -0,0 +1,48 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: convert_camelCase
+short_description: test converting data to camelCase
+description: test converting data to camelCase
+options:
+ data:
+ description: Data to modify
+ type: dict
+ required: True
+ capitalize_first:
+ description: Whether to capitalize the first character
+ default: False
+ type: bool
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ data=dict(type='dict', required=True),
+ capitalize_first=dict(type='bool', default=False),
+ ),
+ )
+
+ result = snake_dict_to_camel_dict(
+ module.params['data'],
+ module.params['capitalize_first']
+ )
+
+ module.exit_json(data=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/dict_transformations/library/convert_snake_case.py b/test/integration/targets/dict_transformations/library/convert_snake_case.py
new file mode 100644
index 00000000..4c13fbcb
--- /dev/null
+++ b/test/integration/targets/dict_transformations/library/convert_snake_case.py
@@ -0,0 +1,55 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: convert_snake_case
+short_description: test converting data to snake_case
+description: test converting data to snake_case
+options:
+ data:
+ description: Data to modify
+ type: dict
+ required: True
+ reversible:
+ description:
+ - Make the snake_case conversion in a way that can be converted back to the original value
+ - For example, convert IAMUser to i_a_m_user instead of iam_user
+ default: False
+ ignore_list:
+ description: list of top level keys that should not have their contents converted
+ type: list
+ default: []
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ data=dict(type='dict', required=True),
+ reversible=dict(type='bool', default=False),
+ ignore_list=dict(type='list', default=[]),
+ ),
+ )
+
+ result = camel_dict_to_snake_dict(
+ module.params['data'],
+ module.params['reversible'],
+ module.params['ignore_list']
+ )
+
+ module.exit_json(data=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/dict_transformations/tasks/main.yml b/test/integration/targets/dict_transformations/tasks/main.yml
new file mode 100644
index 00000000..03aa6e19
--- /dev/null
+++ b/test/integration/targets/dict_transformations/tasks/main.yml
@@ -0,0 +1,3 @@
+- include_tasks: test_convert_snake_case.yml
+
+- include_tasks: test_convert_camelCase.yml
diff --git a/test/integration/targets/dict_transformations/tasks/test_convert_camelCase.yml b/test/integration/targets/dict_transformations/tasks/test_convert_camelCase.yml
new file mode 100644
index 00000000..666e8d3a
--- /dev/null
+++ b/test/integration/targets/dict_transformations/tasks/test_convert_camelCase.yml
@@ -0,0 +1,33 @@
+- convert_camelCase:
+ data: {'top_level_key': {'nested_key': 'do_not_convert'}}
+ register: result
+
+- assert:
+ that:
+ - "result.data == {'topLevelKey': {'nestedKey': 'do_not_convert'}}"
+
+- convert_camelCase:
+ data: {'t_o_p_level_key': {'n_e_s_t_e_d_key': 'do_not_convert'}}
+ register: result
+
+- assert:
+ that:
+ - "result.data == {'tOPLevelKey': {'nESTEDKey': 'do_not_convert'}}"
+
+- convert_camelCase:
+ data: {'t_o_p_level_key': {'n_e_s_t_e_d_key': 'do_not_convert'}}
+ capitalize_first: True
+ register: result
+
+- assert:
+ that:
+ - "result.data == {'TOPLevelKey': {'NESTEDKey': 'do_not_convert'}}"
+
+- convert_camelCase:
+ data: {'results': [{'i_a_m_user': 'user_name', 'tags': {'do_convert': 'do_not_convert'}}]}
+ capitalize_first: True
+ register: result
+
+- assert:
+ that:
+ - "result.data == {'Results': [{'IAMUser': 'user_name', 'Tags': {'DoConvert': 'do_not_convert'}}]}"
diff --git a/test/integration/targets/dict_transformations/tasks/test_convert_snake_case.yml b/test/integration/targets/dict_transformations/tasks/test_convert_snake_case.yml
new file mode 100644
index 00000000..ba80aa7a
--- /dev/null
+++ b/test/integration/targets/dict_transformations/tasks/test_convert_snake_case.yml
@@ -0,0 +1,26 @@
+- convert_snake_case:
+ data: {'TOPLevelKey': {'NESTEDKey': 'DoNotConvert'}}
+ register: result
+
+- assert:
+ that:
+ - "result.data == {'top_level_key': {'nested_key': 'DoNotConvert'}}"
+
+- convert_snake_case:
+ data: {'TOPLevelKey': {'NESTEDKey': 'DoNotConvert'}}
+ reversible: True
+ register: result
+
+- assert:
+ that:
+ - "result.data == {'t_o_p_level_key': {'n_e_s_t_e_d_key': 'DoNotConvert'}}"
+
+- convert_snake_case:
+ data: {'Results': [{'IAMUser': 'UserName', 'Tags': {'DoConvert': 'DoNotConvert'}}], 'Tags': {'DoNotConvert': 'DoNotConvert'}}
+ reversible: True
+ ignore_list: ['Tags'] # Ignore top level 'Tags' key if found
+ register: result
+
+- assert:
+ that:
+ - "result.data == {'results': [{'i_a_m_user': 'UserName', 'tags': {'do_convert': 'DoNotConvert'}}], 'tags': {'DoNotConvert': 'DoNotConvert'}}"
diff --git a/test/integration/targets/dnf/aliases b/test/integration/targets/dnf/aliases
new file mode 100644
index 00000000..4d1afd64
--- /dev/null
+++ b/test/integration/targets/dnf/aliases
@@ -0,0 +1,7 @@
+destructive
+shippable/posix/group4
+skip/aix
+skip/power/centos
+skip/freebsd
+skip/osx
+skip/macos
diff --git a/test/integration/targets/dnf/meta/main.yml b/test/integration/targets/dnf/meta/main.yml
new file mode 100644
index 00000000..34d81261
--- /dev/null
+++ b/test/integration/targets/dnf/meta/main.yml
@@ -0,0 +1,4 @@
+dependencies:
+ - prepare_tests
+ - setup_rpm_repo
+ - setup_remote_tmp_dir
diff --git a/test/integration/targets/dnf/tasks/dnf.yml b/test/integration/targets/dnf/tasks/dnf.yml
new file mode 100644
index 00000000..19008188
--- /dev/null
+++ b/test/integration/targets/dnf/tasks/dnf.yml
@@ -0,0 +1,774 @@
+# UNINSTALL 'python2-dnf'
+# The `dnf` module has the smarts to auto-install the relevant python
+# bindings. To test, we will first uninstall python2-dnf (so that the tests
+# on python2 will require python2-dnf)
+- name: check python2-dnf with rpm
+ shell: rpm -q python2-dnf
+ register: rpm_result
+ ignore_errors: true
+ args:
+ warn: no
+
+# Don't uninstall python2-dnf with the `dnf` module in case it needs to load
+# some dnf python files after the package is uninstalled.
+- name: uninstall python2-dnf with shell
+ shell: dnf -y remove python2-dnf
+ when: rpm_result is successful
+
+# UNINSTALL
+# With 'python2-dnf' uninstalled, the first call to 'dnf' should install
+# python2-dnf.
+- name: uninstall sos
+ dnf:
+ name: sos
+ state: removed
+ register: dnf_result
+
+- name: check sos with rpm
+ shell: rpm -q sos
+ failed_when: False
+ register: rpm_result
+
+- name: verify uninstallation of sos
+ assert:
+ that:
+ - "not dnf_result.failed | default(False)"
+ - "rpm_result.rc == 1"
+
+# UNINSTALL AGAIN
+- name: uninstall sos
+ dnf:
+ name: sos
+ state: removed
+ register: dnf_result
+
+- name: verify no change on re-uninstall
+ assert:
+ that:
+ - "not dnf_result.changed"
+
+# INSTALL
+- name: install sos (check_mode)
+ dnf:
+ name: sos
+ state: present
+ update_cache: True
+ check_mode: True
+ register: dnf_result
+
+- assert:
+ that:
+ - dnf_result is success
+ - dnf_result.results|length > 0
+ - "dnf_result.results[0].startswith('Installed: ')"
+
+- name: install sos
+ dnf:
+ name: sos
+ state: present
+ update_cache: True
+ register: dnf_result
+
+- name: check sos with rpm
+ shell: rpm -q sos
+ failed_when: False
+ register: rpm_result
+
+- name: verify installation of sos
+ assert:
+ that:
+ - "not dnf_result.failed | default(False)"
+ - "dnf_result.changed"
+ - "rpm_result.rc == 0"
+
+- name: verify dnf module outputs
+ assert:
+ that:
+ - "'changed' in dnf_result"
+ - "'results' in dnf_result"
+
+# INSTALL AGAIN
+- name: install sos again (check_mode)
+ dnf:
+ name: sos
+ state: present
+ check_mode: True
+ register: dnf_result
+
+- assert:
+ that:
+ - dnf_result is not changed
+ - dnf_result.results|length == 0
+
+- name: install sos again
+ dnf:
+ name: sos
+ state: present
+ register: dnf_result
+
+- name: verify no change on second install
+ assert:
+ that:
+ - "not dnf_result.changed"
+
+# Multiple packages
+- name: uninstall sos and pciutils
+ dnf: name=sos,pciutils state=removed
+ register: dnf_result
+
+- name: check sos with rpm
+ shell: rpm -q sos
+ failed_when: False
+ register: rpm_sos_result
+
+- name: check pciutils with rpm
+ shell: rpm -q pciutils
+ failed_when: False
+ register: rpm_pciutils_result
+
+- name: verify packages installed
+ assert:
+ that:
+ - "rpm_sos_result.rc != 0"
+ - "rpm_pciutils_result.rc != 0"
+
+- name: install sos and pciutils as comma separated
+ dnf: name=sos,pciutils state=present
+ register: dnf_result
+
+- name: check sos with rpm
+ shell: rpm -q sos
+ failed_when: False
+ register: rpm_sos_result
+
+- name: check pciutils with rpm
+ shell: rpm -q pciutils
+ failed_when: False
+ register: rpm_pciutils_result
+
+- name: verify packages installed
+ assert:
+ that:
+ - "not dnf_result.failed | default(False)"
+ - "dnf_result.changed"
+ - "rpm_sos_result.rc == 0"
+ - "rpm_pciutils_result.rc == 0"
+
+- name: uninstall sos and pciutils
+ dnf: name=sos,pciutils state=removed
+ register: dnf_result
+
+- name: install sos and pciutils as list
+ dnf:
+ name:
+ - sos
+ - pciutils
+ state: present
+ register: dnf_result
+
+- name: check sos with rpm
+ shell: rpm -q sos
+ failed_when: False
+ register: rpm_sos_result
+
+- name: check pciutils with rpm
+ shell: rpm -q pciutils
+ failed_when: False
+ register: rpm_pciutils_result
+
+- name: verify packages installed
+ assert:
+ that:
+ - "not dnf_result.failed | default(False)"
+ - "dnf_result.changed"
+ - "rpm_sos_result.rc == 0"
+ - "rpm_pciutils_result.rc == 0"
+
+- name: uninstall sos and pciutils
+ dnf:
+ name: "sos,pciutils"
+ state: removed
+ register: dnf_result
+
+- name: install sos and pciutils as comma separated with spaces
+ dnf:
+ name: "sos, pciutils"
+ state: present
+ register: dnf_result
+
+- name: check sos with rpm
+ shell: rpm -q sos
+ failed_when: False
+ register: rpm_sos_result
+
+- name: check sos with rpm
+ shell: rpm -q pciutils
+ failed_when: False
+ register: rpm_pciutils_result
+
+- name: verify packages installed
+ assert:
+ that:
+ - "not dnf_result.failed | default(False)"
+ - "dnf_result.changed"
+ - "rpm_sos_result.rc == 0"
+ - "rpm_pciutils_result.rc == 0"
+
+- name: uninstall sos and pciutils (check_mode)
+ dnf:
+ name:
+ - sos
+ - pciutils
+ state: removed
+ check_mode: True
+ register: dnf_result
+
+- assert:
+ that:
+ - dnf_result is success
+ - dnf_result.results|length == 2
+ - "dnf_result.results[0].startswith('Removed: ')"
+ - "dnf_result.results[1].startswith('Removed: ')"
+
+- name: uninstall sos and pciutils
+ dnf:
+ name:
+ - sos
+ - pciutils
+ state: removed
+ register: dnf_result
+
+- assert:
+ that:
+ - dnf_result is changed
+
+- name: install non-existent rpm
+ dnf:
+ name: does-not-exist
+ register: non_existent_rpm
+ ignore_errors: True
+
+- name: check non-existent rpm install failed
+ assert:
+ that:
+ - non_existent_rpm is failed
+
+# Install in installroot='/'. This should be identical to default
+- name: install sos in /
+ dnf: name=sos state=present installroot='/'
+ register: dnf_result
+
+- name: check sos with rpm in /
+ shell: rpm -q sos --root=/
+ failed_when: False
+ register: rpm_result
+
+- name: verify installation of sos in /
+ assert:
+ that:
+ - "not dnf_result.failed | default(False)"
+ - "dnf_result.changed"
+ - "rpm_result.rc == 0"
+
+- name: verify dnf module outputs in /
+ assert:
+ that:
+ - "'changed' in dnf_result"
+ - "'results' in dnf_result"
+
+- name: uninstall sos in /
+ dnf: name=sos installroot='/'
+ register: dnf_result
+
+- name: uninstall sos for downloadonly test
+ dnf:
+ name: sos
+ state: absent
+
+- name: Test download_only (check_mode)
+ dnf:
+ name: sos
+ state: latest
+ download_only: true
+ check_mode: true
+ register: dnf_result
+
+- assert:
+ that:
+ - dnf_result is success
+ - "dnf_result.results[0].startswith('Downloaded: ')"
+
+- name: Test download_only
+ dnf:
+ name: sos
+ state: latest
+ download_only: true
+ register: dnf_result
+
+- name: verify download of sos (part 1 -- dnf "install" succeeded)
+ assert:
+ that:
+ - "dnf_result is success"
+ - "dnf_result is changed"
+
+- name: uninstall sos (noop)
+ dnf:
+ name: sos
+ state: absent
+ register: dnf_result
+
+- name: verify download of sos (part 2 -- nothing removed during uninstall)
+ assert:
+ that:
+ - "dnf_result is success"
+ - "not dnf_result is changed"
+
+- name: uninstall sos for downloadonly/downloaddir test
+ dnf:
+ name: sos
+ state: absent
+
+- name: Test download_only/download_dir
+ dnf:
+ name: sos
+ state: latest
+ download_only: true
+ download_dir: "/var/tmp/packages"
+ register: dnf_result
+
+- name: verify dnf output
+ assert:
+ that:
+ - "dnf_result is success"
+ - "dnf_result is changed"
+
+- command: "ls /var/tmp/packages"
+ register: ls_out
+
+- name: Verify specified download_dir was used
+ assert:
+ that:
+ - "'sos' in ls_out.stdout"
+
+# GROUP INSTALL
+- name: install Custom Group group
+ dnf:
+ name: "@Custom Group"
+ state: present
+ register: dnf_result
+
+- name: check dinginessentail with rpm
+ command: rpm -q dinginessentail
+ failed_when: False
+ register: dinginessentail_result
+
+- name: verify installation of the group
+ assert:
+ that:
+ - not dnf_result is failed
+ - dnf_result is changed
+ - "'results' in dnf_result"
+ - dinginessentail_result.rc == 0
+
+- name: install the group again
+ dnf:
+ name: "@Custom Group"
+ state: present
+ register: dnf_result
+
+- name: verify nothing changed
+ assert:
+ that:
+ - not dnf_result is changed
+ - "'msg' in dnf_result"
+
+- name: verify that landsidescalping is not installed
+ dnf:
+ name: landsidescalping
+ state: absent
+
+- name: install the group again but also with a package that is not yet installed
+ dnf:
+ name:
+ - "@Custom Group"
+ - landsidescalping
+ state: present
+ register: dnf_result
+
+- name: check landsidescalping with rpm
+ command: rpm -q landsidescalping
+ failed_when: False
+ register: landsidescalping_result
+
+- name: verify landsidescalping is installed
+ assert:
+ that:
+ - dnf_result is changed
+ - "'results' in dnf_result"
+ - landsidescalping_result.rc == 0
+
+- name: try to install the group again, with --check to check 'changed'
+ dnf:
+ name: "@Custom Group"
+ state: present
+ check_mode: yes
+ register: dnf_result
+
+- name: verify nothing changed
+ assert:
+ that:
+ - not dnf_result is changed
+ - "'msg' in dnf_result"
+
+- name: remove landsidescalping after test
+ dnf:
+ name: landsidescalping
+ state: absent
+
+# cleanup until https://github.com/ansible/ansible/issues/27377 is resolved
+- shell: 'dnf -y group install "Custom Group" && dnf -y group remove "Custom Group"'
+ register: shell_dnf_result
+
+# GROUP UPGRADE - this will go to the same method as group install
+# but through group_update - it is its invocation we're testing here
+# see commit 119c9e5d6eb572c4a4800fbe8136095f9063c37b
+- name: install latest Custom Group
+ dnf:
+ name: "@Custom Group"
+ state: latest
+ register: dnf_result
+
+- name: verify installation of the group
+ assert:
+ that:
+ - not dnf_result is failed
+ - dnf_result is changed
+ - "'results' in dnf_result"
+
+# cleanup until https://github.com/ansible/ansible/issues/27377 is resolved
+- shell: dnf -y group install "Custom Group" && dnf -y group remove "Custom Group"
+
+- name: try to install non existing group
+ dnf:
+ name: "@non-existing-group"
+ state: present
+ register: dnf_result
+ ignore_errors: True
+
+- name: verify installation of the non existing group failed
+ assert:
+ that:
+ - "not dnf_result.changed"
+ - "dnf_result is failed"
+
+- name: verify dnf module outputs
+ assert:
+ that:
+ - "'changed' in dnf_result"
+ - "'msg' in dnf_result"
+
+- name: try to install non existing file
+ dnf:
+ name: /tmp/non-existing-1.0.0.fc26.noarch.rpm
+ state: present
+ register: dnf_result
+ ignore_errors: yes
+
+- name: verify installation failed
+ assert:
+ that:
+ - "dnf_result is failed"
+ - "not dnf_result.changed"
+
+- name: verify dnf module outputs
+ assert:
+ that:
+ - "'changed' in dnf_result"
+ - "'msg' in dnf_result"
+
+- name: try to install from non existing url
+ dnf:
+ name: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/dnf/non-existing-1.0.0.fc26.noarch.rpm
+ state: present
+ register: dnf_result
+ ignore_errors: yes
+
+- name: verify installation failed
+ assert:
+ that:
+ - "dnf_result is failed"
+ - "not dnf_result.changed"
+
+- name: verify dnf module outputs
+ assert:
+ that:
+ - "'changed' in dnf_result"
+ - "'msg' in dnf_result"
+
+# ENVIRONMENT UPGRADE
+# see commit de299ef77c03a64a8f515033a79ac6b7db1bc710
+- name: install Custom Environment Group
+ dnf:
+ name: "@Custom Environment Group"
+ state: latest
+ register: dnf_result
+
+- name: check landsidescalping with rpm
+ command: rpm -q landsidescalping
+ register: landsidescalping_result
+
+- name: verify installation of the environment
+ assert:
+ that:
+ - not dnf_result is failed
+ - dnf_result is changed
+ - "'results' in dnf_result"
+ - landsidescalping_result.rc == 0
+
+# Fedora 28 (DNF 2) does not support this, just remove the package itself
+- name: remove landsidescalping package on Fedora 28
+ dnf:
+ name: landsidescalping
+ state: absent
+ when: ansible_distribution == 'Fedora' and ansible_distribution_major_version|int <= 28
+
+# cleanup until https://github.com/ansible/ansible/issues/27377 is resolved
+- name: remove Custom Environment Group
+ shell: dnf -y group install "Custom Environment Group" && dnf -y group remove "Custom Environment Group"
+ when: not (ansible_distribution == 'Fedora' and ansible_distribution_major_version|int <= 28)
+
+# https://github.com/ansible/ansible/issues/39704
+- name: install non-existent rpm, state=latest
+ dnf:
+ name: non-existent-rpm
+ state: latest
+ ignore_errors: yes
+ register: dnf_result
+
+- name: verify the result
+ assert:
+ that:
+ - "dnf_result is failed"
+ - "'non-existent-rpm' in dnf_result['failures'][0]"
+ - "'No package non-existent-rpm available' in dnf_result['failures'][0]"
+ - "'Failed to install some of the specified packages' in dnf_result['msg']"
+
+- name: use latest to install httpd
+ dnf:
+ name: httpd
+ state: latest
+ register: dnf_result
+
+- name: verify httpd was installed
+ assert:
+ that:
+ - "'changed' in dnf_result"
+
+- name: uninstall httpd
+ dnf:
+ name: httpd
+ state: removed
+
+- name: update httpd only if it exists
+ dnf:
+ name: httpd
+ state: latest
+ update_only: yes
+ register: dnf_result
+
+- name: verify httpd not installed
+ assert:
+ that:
+ - "not dnf_result is changed"
+
+- name: try to install not compatible arch rpm, should fail
+ dnf:
+ name: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/dnf/banner-1.3.4-3.el7.ppc64le.rpm
+ state: present
+ register: dnf_result
+ ignore_errors: True
+
+- name: verify that dnf failed
+ assert:
+ that:
+ - "not dnf_result is changed"
+ - "dnf_result is failed"
+
+# setup for testing installing an RPM from url
+
+- set_fact:
+ pkg_name: fpaste
+
+- name: cleanup
+ dnf:
+ name: "{{ pkg_name }}"
+ state: absent
+
+- set_fact:
+ pkg_url: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/dnf/fpaste-0.3.9.1-1.fc27.noarch.rpm
+# setup end
+
+- name: download an rpm
+ get_url:
+ url: "{{ pkg_url }}"
+ dest: "/tmp/{{ pkg_name }}.rpm"
+
+- name: install the downloaded rpm
+ dnf:
+ name: "/tmp/{{ pkg_name }}.rpm"
+ state: present
+ disable_gpg_check: true
+ register: dnf_result
+
+- name: verify installation
+ assert:
+ that:
+ - "dnf_result is success"
+ - "dnf_result is changed"
+
+- name: install the downloaded rpm again
+ dnf:
+ name: "/tmp/{{ pkg_name }}.rpm"
+ state: present
+ register: dnf_result
+
+- name: verify installation
+ assert:
+ that:
+ - "dnf_result is success"
+ - "not dnf_result is changed"
+
+- name: clean up
+ dnf:
+ name: "{{ pkg_name }}"
+ state: absent
+
+- name: install from url
+ dnf:
+ name: "{{ pkg_url }}"
+ state: present
+ disable_gpg_check: true
+ register: dnf_result
+
+- name: verify installation
+ assert:
+ that:
+ - "dnf_result is success"
+ - "dnf_result is changed"
+ - "dnf_result is not failed"
+
+- name: verify dnf module outputs
+ assert:
+ that:
+ - "'changed' in dnf_result"
+ - "'results' in dnf_result"
+
+- name: Create a temp RPM file which does not contain nevra information
+ file:
+ name: "/tmp/non_existent_pkg.rpm"
+ state: touch
+
+- name: Try installing RPM file which does not contain nevra information
+ dnf:
+ name: "/tmp/non_existent_pkg.rpm"
+ state: present
+ register: no_nevra_info_result
+ ignore_errors: yes
+
+- name: Verify RPM failed to install
+ assert:
+ that:
+ - "'changed' in no_nevra_info_result"
+ - "'msg' in no_nevra_info_result"
+
+- name: Delete a temp RPM file
+ file:
+ name: "/tmp/non_existent_pkg.rpm"
+ state: absent
+
+- name: uninstall lsof
+ dnf:
+ name: lsof
+ state: removed
+
+- name: check lsof with rpm
+ shell: rpm -q lsof
+ ignore_errors: True
+ register: rpm_lsof_result
+
+- name: verify lsof is uninstalled
+ assert:
+ that:
+ - "rpm_lsof_result is failed"
+
+- name: create conf file that excludes lsof
+ copy:
+ content: |
+ [main]
+ exclude=lsof*
+ dest: '{{ output_dir }}/test-dnf.conf'
+ register: test_dnf_copy
+
+- block:
+ # begin test case where disable_excludes is supported
+ - name: Try install lsof without disable_excludes
+ dnf: name=lsof state=latest conf_file={{ test_dnf_copy.dest }}
+ register: dnf_lsof_result
+ ignore_errors: True
+
+ - name: verify lsof did not install because it is in exclude list
+ assert:
+ that:
+ - "dnf_lsof_result is failed"
+
+ - name: install lsof with disable_excludes
+ dnf: name=lsof state=latest disable_excludes=all conf_file={{ test_dnf_copy.dest }}
+ register: dnf_lsof_result_using_excludes
+
+ - name: verify lsof did install using disable_excludes=all
+ assert:
+ that:
+ - "dnf_lsof_result_using_excludes is success"
+ - "dnf_lsof_result_using_excludes is changed"
+ - "dnf_lsof_result_using_excludes is not failed"
+ always:
+ - name: remove exclude lsof conf file
+ file:
+ path: '{{ output_dir }}/test-dnf.conf'
+ state: absent
+
+# end test case where disable_excludes is supported
+
+- name: Test "dnf install /usr/bin/vi"
+ block:
+ - name: Clean vim-minimal
+ dnf:
+ name: vim-minimal
+ state: absent
+
+ - name: Install vim-minimal by specifying "/usr/bin/vi"
+ dnf:
+ name: /usr/bin/vi
+ state: present
+
+ - name: Get rpm output
+ command: rpm -q vim-minimal
+ register: rpm_output
+
+ - name: Check installation was successful
+ assert:
+ that:
+ - "'vim-minimal' in rpm_output.stdout"
+ when:
+ - ansible_distribution == 'Fedora'
+
+- name: Remove wildcard package that isn't installed
+ dnf:
+ name: firefox*
+ state: absent
+ register: wildcard_absent
+
+- assert:
+ that:
+ - wildcard_absent is successful
+ - wildcard_absent is not changed
diff --git a/test/integration/targets/dnf/tasks/dnfinstallroot.yml b/test/integration/targets/dnf/tasks/dnfinstallroot.yml
new file mode 100644
index 00000000..b5e09011
--- /dev/null
+++ b/test/integration/targets/dnf/tasks/dnfinstallroot.yml
@@ -0,0 +1,47 @@
+# make a installroot
+- name: Create installroot
+ command: mktemp -d "{{ remote_tmp_dir }}/ansible.test.XXXXXX"
+ register: dnfroot
+
+- name: Make a necessary directory
+ file:
+ path: "/{{ dnfroot.stdout }}/etc/dnf/vars/"
+ state: directory
+ mode: 0755
+
+- name: Populate directory
+ copy:
+ # We need '8' for CentOS, but '8.x' for RHEL.
+ content: "{{ ansible_distribution_version|int if ansible_distribution != 'RedHat' else ansible_distribution_version }}\n"
+ dest: "/{{ dnfroot.stdout }}/etc/dnf/vars/releasever"
+
+# This will drag in > 200 MB.
+- name: attempt installroot
+ dnf: name=sos installroot="/{{ dnfroot.stdout }}/" disable_gpg_check=yes
+ register: dnf_result
+
+- name: check sos with rpm in installroot
+ shell: rpm -q sos --root="/{{ dnfroot.stdout }}/"
+ failed_when: False
+ register: rpm_result
+
+- debug: var=dnf_result
+- debug: var=rpm_result
+
+- name: verify installation of sos in installroot
+ assert:
+ that:
+ - "not dnf_result.failed | default(False)"
+ - "dnf_result.changed"
+ - "rpm_result.rc == 0"
+
+- name: verify dnf module outputs in /
+ assert:
+ that:
+ - "'changed' in dnf_result"
+ - "'results' in dnf_result"
+
+- name: cleanup installroot
+ file:
+ path: "/{{ dnfroot.stdout }}/"
+ state: absent
diff --git a/test/integration/targets/dnf/tasks/dnfreleasever.yml b/test/integration/targets/dnf/tasks/dnfreleasever.yml
new file mode 100644
index 00000000..351a26b1
--- /dev/null
+++ b/test/integration/targets/dnf/tasks/dnfreleasever.yml
@@ -0,0 +1,47 @@
+# make an installroot
+- name: Create installroot
+ command: mktemp -d "{{ remote_tmp_dir }}/ansible.test.XXXXXX"
+ register: dnfroot
+
+- name: Make a necessary directory
+ file:
+ path: "/{{dnfroot.stdout}}/etc/dnf/vars"
+ state: directory
+ mode: 0755
+
+- name: Populate directory
+ copy:
+ content: "{{ansible_distribution_version}}\n"
+ dest: "/{{dnfroot.stdout}}/etc/dnf/vars/releasever"
+
+- name: attempt releasever to the installroot
+ dnf:
+ name: filesystem
+ installroot: '/{{dnfroot.stdout}}'
+ releasever: '{{ansible_distribution_version|int - 1}}'
+ register: dnf_result
+
+- name: check filesystem version
+ shell: rpm -q filesystem --root="/{{dnfroot.stdout}}/"
+ failed_when: False
+ register: rpm_result
+
+- debug: var=dnf_result
+- debug: var=rpm_result
+
+- name: verify installation was done
+ assert:
+ that:
+ - "not dnf_result.failed | default(False)"
+ - "dnf_result.changed"
+ - "rpm_result.rc == 0"
+
+- name: verify the version
+ assert:
+ that:
+ - "rpm_result.stdout.find('fc' ~ (ansible_distribution_version|int - 1)) != -1"
+
+- name: cleanup installroot
+ file:
+ path: "/{{dnfroot.stdout}}/"
+ state: absent
diff --git a/test/integration/targets/dnf/tasks/filters.yml b/test/integration/targets/dnf/tasks/filters.yml
new file mode 100644
index 00000000..d5e9ee90
--- /dev/null
+++ b/test/integration/targets/dnf/tasks/filters.yml
@@ -0,0 +1,134 @@
+# We have a test repo set up with a valid updateinfo.xml which is referenced
+# from its repomd.xml.
+- block:
+ - set_fact:
+ updateinfo_repo: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/setup_rpm_repo/repo-with-updateinfo
+
+ - name: Install the test repo
+ yum_repository:
+ name: test-repo-with-updateinfo
+ description: test-repo-with-updateinfo
+ baseurl: "{{ updateinfo_repo }}"
+ gpgcheck: no
+
+ - name: Install old versions of toaster and oven
+ dnf:
+ name:
+ - "{{ updateinfo_repo }}/toaster-1.2.3.4-1.el8.noarch.rpm"
+ - "{{ updateinfo_repo }}/oven-1.2.3.4-1.el8.noarch.rpm"
+ disable_gpg_check: true
+
+ - name: Ask for pending updates
+ dnf:
+ name: '*'
+ state: latest
+ update_only: true
+ disable_gpg_check: true
+ disablerepo: '*'
+ enablerepo: test-repo-with-updateinfo
+ register: update_no_filter
+
+ - assert:
+ that:
+ - update_no_filter is changed
+ - '"Installed: toaster-1.2.3.5-1.el8.noarch" in update_no_filter.results'
+ - '"Installed: oven-1.2.3.5-1.el8.noarch" in update_no_filter.results'
+ - '"Removed: toaster-1.2.3.4-1.el8.noarch" in update_no_filter.results'
+ - '"Removed: oven-1.2.3.4-1.el8.noarch" in update_no_filter.results'
+
+ - name: Install old versions of toaster and oven
+ dnf:
+ name:
+ - "{{ updateinfo_repo }}/toaster-1.2.3.4-1.el8.noarch.rpm"
+ - "{{ updateinfo_repo }}/oven-1.2.3.4-1.el8.noarch.rpm"
+ allow_downgrade: true
+ disable_gpg_check: true
+
+ - name: Ask for pending updates with security=true
+ dnf:
+ name: '*'
+ state: latest
+ update_only: true
+ disable_gpg_check: true
+ security: true
+ disablerepo: '*'
+ enablerepo: test-repo-with-updateinfo
+ register: update_security
+
+ - assert:
+ that:
+ - update_security is changed
+ - '"Installed: toaster-1.2.3.5-1.el8.noarch" in update_security.results'
+ - '"Removed: toaster-1.2.3.4-1.el8.noarch" in update_security.results'
+ - '"Installed: oven-1.2.3.5-1.el8.noarch" not in update_security.results'
+ - '"Removed: oven-1.2.3.4-1.el8.noarch" not in update_security.results'
+
+ - name: Install old versions of toaster and oven
+ dnf:
+ name:
+ - "{{ updateinfo_repo }}/toaster-1.2.3.4-1.el8.noarch.rpm"
+ - "{{ updateinfo_repo }}/oven-1.2.3.4-1.el8.noarch.rpm"
+ allow_downgrade: true
+ disable_gpg_check: true
+
+ - name: Ask for pending updates with bugfix=true
+ dnf:
+ name: '*'
+ state: latest
+ update_only: true
+ disable_gpg_check: true
+ bugfix: true
+ disablerepo: '*'
+ enablerepo: test-repo-with-updateinfo
+ register: update_bugfix
+
+ - assert:
+ that:
+ - update_bugfix is changed
+ - '"Installed: toaster-1.2.3.5-1.el8.noarch" not in update_bugfix.results'
+ - '"Removed: toaster-1.2.3.4-1.el8.noarch" not in update_bugfix.results'
+ - '"Installed: oven-1.2.3.5-1.el8.noarch" in update_bugfix.results'
+ - '"Removed: oven-1.2.3.4-1.el8.noarch" in update_bugfix.results'
+
+ - name: Install old versions of toaster and oven
+ dnf:
+ name:
+ - "{{ updateinfo_repo }}/toaster-1.2.3.4-1.el8.noarch.rpm"
+ - "{{ updateinfo_repo }}/oven-1.2.3.4-1.el8.noarch.rpm"
+ allow_downgrade: true
+ disable_gpg_check: true
+
+ - name: Ask for pending updates with bugfix=true and security=true
+ dnf:
+ name: '*'
+ state: latest
+ update_only: true
+ disable_gpg_check: true
+ bugfix: true
+ security: true
+ disablerepo: '*'
+ enablerepo: test-repo-with-updateinfo
+ register: update_bugfix
+
+ - assert:
+ that:
+ - update_bugfix is changed
+ - '"Installed: toaster-1.2.3.5-1.el8.noarch" in update_bugfix.results'
+ - '"Removed: toaster-1.2.3.4-1.el8.noarch" in update_bugfix.results'
+ - '"Installed: oven-1.2.3.5-1.el8.noarch" in update_bugfix.results'
+ - '"Removed: oven-1.2.3.4-1.el8.noarch" in update_bugfix.results'
+
+ always:
+ - name: Remove installed packages
+ dnf:
+ name:
+ - toaster
+ - oven
+ state: absent
+
+ - name: Remove the repo
+ yum_repository:
+ name: test-repo-with-updateinfo
+ state: absent
+ tags:
+ - filters
diff --git a/test/integration/targets/dnf/tasks/filters_check_mode.yml b/test/integration/targets/dnf/tasks/filters_check_mode.yml
new file mode 100644
index 00000000..024ac066
--- /dev/null
+++ b/test/integration/targets/dnf/tasks/filters_check_mode.yml
@@ -0,0 +1,118 @@
+# We have a test repo set up with a valid updateinfo.xml which is referenced
+# from its repomd.xml.
+- block:
+ - set_fact:
+ updateinfo_repo: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/setup_rpm_repo/repo-with-updateinfo
+
+ - name: Install the test repo
+ yum_repository:
+ name: test-repo-with-updateinfo
+ description: test-repo-with-updateinfo
+ baseurl: "{{ updateinfo_repo }}"
+ gpgcheck: no
+
+ - name: Install old versions of toaster and oven
+ dnf:
+ name:
+ - "{{ updateinfo_repo }}/toaster-1.2.3.4-1.el8.noarch.rpm"
+ - "{{ updateinfo_repo }}/oven-1.2.3.4-1.el8.noarch.rpm"
+ disable_gpg_check: true
+
+ - name: Ask for pending updates (check_mode)
+ dnf:
+ name:
+ - toaster
+ - oven
+ state: latest
+ update_only: true
+ disable_gpg_check: true
+ check_mode: true
+ register: update_no_filter
+
+ - assert:
+ that:
+ - update_no_filter is changed
+ - '"would have if not in check mode" in update_no_filter.msg'
+ - '"Installed: toaster-1.2.3.5-1.el8.noarch" in update_no_filter.results'
+ - '"Installed: oven-1.2.3.5-1.el8.noarch" in update_no_filter.results'
+ - '"Removed: toaster-1.2.3.4-1.el8.noarch" in update_no_filter.results'
+ - '"Removed: oven-1.2.3.4-1.el8.noarch" in update_no_filter.results'
+
+ - name: Ask for pending updates with security=true (check_mode)
+ dnf:
+ name:
+ - toaster
+ - oven
+ state: latest
+ update_only: true
+ disable_gpg_check: true
+ security: true
+ check_mode: true
+ register: update_security
+
+ - assert:
+ that:
+ - update_security is changed
+ - '"would have if not in check mode" in update_security.msg'
+ - '"Installed: toaster-1.2.3.5-1.el8.noarch" in update_security.results'
+ - '"Removed: toaster-1.2.3.4-1.el8.noarch" in update_security.results'
+ - '"Installed: oven-1.2.3.5-1.el8.noarch" not in update_security.results'
+ - '"Removed: oven-1.2.3.4-1.el8.noarch" not in update_security.results'
+
+ - name: Ask for pending updates with bugfix=true (check_mode)
+ dnf:
+ name:
+ - toaster
+ - oven
+ state: latest
+ update_only: true
+ disable_gpg_check: true
+ bugfix: true
+ check_mode: true
+ register: update_bugfix
+
+ - assert:
+ that:
+ - update_bugfix is changed
+ - '"would have if not in check mode" in update_bugfix.msg'
+ - '"Installed: toaster-1.2.3.5-1.el8.noarch" not in update_bugfix.results'
+ - '"Removed: toaster-1.2.3.4-1.el8.noarch" not in update_bugfix.results'
+ - '"Installed: oven-1.2.3.5-1.el8.noarch" in update_bugfix.results'
+ - '"Removed: oven-1.2.3.4-1.el8.noarch" in update_bugfix.results'
+
+ - name: Ask for pending updates with bugfix=true and security=true (check_mode)
+ dnf:
+ name:
+ - toaster
+ - oven
+ state: latest
+ update_only: true
+ disable_gpg_check: true
+ bugfix: true
+ security: true
+ check_mode: true
+ register: update_bugfix
+
+ - assert:
+ that:
+ - update_bugfix is changed
+ - '"would have if not in check mode" in update_bugfix.msg'
+ - '"Installed: toaster-1.2.3.5-1.el8.noarch" in update_bugfix.results'
+ - '"Removed: toaster-1.2.3.4-1.el8.noarch" in update_bugfix.results'
+ - '"Installed: oven-1.2.3.5-1.el8.noarch" in update_bugfix.results'
+ - '"Removed: oven-1.2.3.4-1.el8.noarch" in update_bugfix.results'
+
+ always:
+ - name: Remove installed packages
+ dnf:
+ name:
+ - toaster
+ - oven
+ state: absent
+
+ - name: Remove the repo
+ yum_repository:
+ name: test-repo-with-updateinfo
+ state: absent
+ tags:
+ - filters
diff --git a/test/integration/targets/dnf/tasks/gpg.yml b/test/integration/targets/dnf/tasks/gpg.yml
new file mode 100644
index 00000000..2b6f4079
--- /dev/null
+++ b/test/integration/targets/dnf/tasks/gpg.yml
@@ -0,0 +1,72 @@
+# Set up a repo of unsigned rpms
+- block:
+ - name: Ensure our test package isn't already installed
+ dnf:
+ name:
+ - fpaste
+ state: absent
+
+ - name: Install rpm-sign
+ dnf:
+ name:
+ - rpm-sign
+ state: present
+
+ - name: Create directory to use as local repo
+ file:
+ path: "{{ remote_tmp_dir }}/unsigned"
+ state: directory
+
+ - name: Download an RPM
+ get_url:
+ url: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/dnf/fpaste-0.3.9.1-1.fc27.noarch.rpm
+ dest: "{{ remote_tmp_dir }}/unsigned/fpaste-0.3.9.1-1.fc27.noarch.rpm"
+ mode: 0644
+
+ - name: Unsign the RPM
+ command: rpmsign --delsign "{{ remote_tmp_dir }}/unsigned/fpaste-0.3.9.1-1.fc27.noarch.rpm"
+
+ - name: createrepo
+ command: createrepo .
+ args:
+ chdir: "{{ remote_tmp_dir }}/unsigned"
+
+ - name: Add the repo
+ yum_repository:
+ name: unsigned
+ description: unsigned rpms
+ baseurl: "file://{{ remote_tmp_dir }}/unsigned/"
+ # we want to ensure that signing is verified
+ gpgcheck: true
+
+ - name: Install fpaste from above
+ dnf:
+ name:
+ - fpaste
+ disablerepo: '*'
+ enablerepo: unsigned
+ register: res
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - res is failed
+ - "'Failed to validate GPG signature' in res.msg"
+
+ always:
+ - name: Remove rpm-sign (and fpaste if it got installed)
+ dnf:
+ name:
+ - rpm-sign
+ - fpaste
+ state: absent
+
+ - name: Remove test repo
+ yum_repository:
+ name: unsigned
+ state: absent
+
+ - name: Remove repo dir
+ file:
+ path: "{{ remote_tmp_dir }}/unsigned"
+ state: absent
diff --git a/test/integration/targets/dnf/tasks/logging.yml b/test/integration/targets/dnf/tasks/logging.yml
new file mode 100644
index 00000000..4cbeaa61
--- /dev/null
+++ b/test/integration/targets/dnf/tasks/logging.yml
@@ -0,0 +1,47 @@
+# Verify logging function is enabled in the dnf module.
+# The following tasks has been supported in dnf-4.2.17-6 or later
+# Note: https://bugzilla.redhat.com/show_bug.cgi?id=1788212
+- name: Install latest version python3-dnf
+ dnf:
+ name:
+ - python3-dnf
+ - python3-libdnf # https://bugzilla.redhat.com/show_bug.cgi?id=1887502
+ state: latest
+ register: dnf_result
+
+- name: Verify python3-dnf installed
+ assert:
+ that:
+ - "dnf_result.rc == 0"
+
+- name: Get python3-dnf version
+ shell: "dnf info python3-dnf | awk '/^Version/ { print $3 }'"
+ register: py3_dnf_version
+
+- name: Check logging enabled
+ block:
+ - name: remove logfiles if exist
+ file:
+ path: "{{ item }}"
+ state: absent
+ loop: "{{ dnf_log_files }}"
+
+ - name: Install sos package
+ dnf:
+ name: sos
+ state: present
+ register: dnf_result
+
+ - name: Get status of logfiles
+ stat:
+ path: "{{ item }}"
+ loop: "{{ dnf_log_files }}"
+ register: stats
+
+ - name: Verify logfile exists
+ assert:
+ that:
+ - "item.stat.exists"
+ loop: "{{ stats.results }}"
+ when:
+ - 'py3_dnf_version.stdout is version("4.2.17", ">=")'
diff --git a/test/integration/targets/dnf/tasks/main.yml b/test/integration/targets/dnf/tasks/main.yml
new file mode 100644
index 00000000..1b6e0941
--- /dev/null
+++ b/test/integration/targets/dnf/tasks/main.yml
@@ -0,0 +1,62 @@
+# test code for the dnf module
+# (c) 2014, James Tanner <tanner.jc@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Note: We install the yum package onto Fedora so that this will work on dnf systems
+# We want to test that for people who don't want to upgrade their systems.
+
+- include_tasks: dnf.yml
+ when: (ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('23', '>=')) or
+ (ansible_distribution in ['RedHat', 'CentOS'] and ansible_distribution_major_version is version('8', '>='))
+
+- include_tasks: filters_check_mode.yml
+ when: (ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('23', '>=')) or
+ (ansible_distribution in ['RedHat', 'CentOS'] and ansible_distribution_major_version is version('8', '>='))
+ tags:
+ - filters
+
+- include_tasks: filters.yml
+ when: (ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('23', '>=')) or
+ (ansible_distribution in ['RedHat', 'CentOS'] and ansible_distribution_major_version is version('8', '>='))
+ tags:
+ - filters
+
+- include_tasks: gpg.yml
+ when: (ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('23', '>=')) or
+ (ansible_distribution in ['RedHat', 'CentOS'] and ansible_distribution_major_version is version('8', '>='))
+
+- include_tasks: repo.yml
+ when: (ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('23', '>=')) or
+ (ansible_distribution in ['RedHat', 'CentOS'] and ansible_distribution_major_version is version('8', '>='))
+
+- include_tasks: dnfinstallroot.yml
+ when: (ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('23', '>=')) or
+ (ansible_distribution in ['RedHat', 'CentOS'] and ansible_distribution_major_version is version('8', '>='))
+
+# Attempting to install a different RHEL release in a tmpdir doesn't work (rhel8 beta)
+- include_tasks: dnfreleasever.yml
+ when:
+ - ansible_distribution == 'Fedora'
+ - ansible_distribution_major_version is version('23', '>=')
+
+- include_tasks: modularity.yml
+ when: (ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('29', '>=')) or
+ (ansible_distribution in ['RedHat', 'CentOS'] and ansible_distribution_major_version is version('8', '>='))
+
+- include_tasks: logging.yml
+ when: (ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('31', '>=')) or
+ (ansible_distribution in ['RedHat', 'CentOS'] and ansible_distribution_major_version is version('8', '>='))
diff --git a/test/integration/targets/dnf/tasks/modularity.yml b/test/integration/targets/dnf/tasks/modularity.yml
new file mode 100644
index 00000000..48a0111a
--- /dev/null
+++ b/test/integration/targets/dnf/tasks/modularity.yml
@@ -0,0 +1,99 @@
+# FUTURE - look at including AppStream support in our local repo
+- name: Include distribution specific variables
+ include_vars: "{{ ansible_facts.distribution }}.yml"
+
+- name: install "{{ astream_name }}" module
+ dnf:
+ name: "{{ astream_name }}"
+ state: present
+ register: dnf_result
+
+- name: verify installation of "{{ astream_name }}" module
+ assert:
+ that:
+ - "not dnf_result.failed"
+ - "dnf_result.changed"
+
+- name: install "{{ astream_name }}" module again
+ dnf:
+ name: "{{ astream_name }}"
+ state: present
+ register: dnf_result
+
+- name: verify installation of "{{ astream_name }}" module again
+ assert:
+ that:
+ - "not dnf_result.failed"
+ - "not dnf_result.changed"
+
+- name: uninstall "{{ astream_name }}" module
+ dnf:
+ name: "{{ astream_name }}"
+ state: absent
+ register: dnf_result
+
+- name: verify uninstallation of "{{ astream_name }}" module
+ assert:
+ that:
+ - "not dnf_result.failed"
+ - "dnf_result.changed"
+
+- name: uninstall "{{ astream_name }}" module again
+ dnf:
+ name: "{{ astream_name }}"
+ state: absent
+ register: dnf_result
+
+- name: verify uninstallation of "{{ astream_name }}" module again
+ assert:
+ that:
+ - "not dnf_result.failed"
+ - "not dnf_result.changed"
+
+- name: install "{{ astream_name_no_stream }}" module without providing stream
+ dnf:
+ name: "{{ astream_name_no_stream }}"
+ state: present
+ register: dnf_result
+
+- name: verify installation of "{{ astream_name_no_stream }}" module without providing stream
+ assert:
+ that:
+ - "not dnf_result.failed"
+ - "dnf_result.changed"
+
+- name: install "{{ astream_name_no_stream }}" module again without providing stream
+ dnf:
+ name: "{{ astream_name_no_stream }}"
+ state: present
+ register: dnf_result
+
+- name: verify installation of "{{ astream_name_no_stream }}" module again without providing stream
+ assert:
+ that:
+ - "not dnf_result.failed"
+ - "not dnf_result.changed"
+
+- name: uninstall "{{ astream_name_no_stream }}" module without providing stream
+ dnf:
+ name: "{{ astream_name_no_stream }}"
+ state: absent
+ register: dnf_result
+
+- name: verify uninstallation of "{{ astream_name_no_stream }}" module without providing stream
+ assert:
+ that:
+ - "not dnf_result.failed"
+ - "dnf_result.changed"
+
+- name: uninstall "{{ astream_name_no_stream }}" module again without providing stream
+ dnf:
+ name: "{{ astream_name_no_stream }}"
+ state: absent
+ register: dnf_result
+
+- name: verify uninstallation of "{{ astream_name_no_stream }}" module again without providing stream
+ assert:
+ that:
+ - "not dnf_result.failed"
+ - "not dnf_result.changed"
diff --git a/test/integration/targets/dnf/tasks/repo.yml b/test/integration/targets/dnf/tasks/repo.yml
new file mode 100644
index 00000000..4f82899c
--- /dev/null
+++ b/test/integration/targets/dnf/tasks/repo.yml
@@ -0,0 +1,309 @@
+- block:
+ - name: Install dinginessentail-1.0-1
+ dnf:
+ name: dinginessentail-1.0-1
+ state: present
+ register: dnf_result
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: Verify installation
+ assert:
+ that:
+ - "dnf_result.changed"
+ - "rpm_result.stdout.startswith('dinginessentail-1.0-1')"
+
+ - name: Verify dnf module outputs
+ assert:
+ that:
+ - "'results' in dnf_result"
+ # ============================================================================
+ - name: Install dinginessentail-1.0-1 again
+ dnf:
+ name: dinginessentail-1.0-1
+ state: present
+ register: dnf_result
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: Verify installation
+ assert:
+ that:
+ - "not dnf_result.changed"
+ - "rpm_result.stdout.startswith('dinginessentail-1.0-1')"
+
+ - name: Verify dnf module outputs
+ assert:
+ that:
+ - "'msg' in dnf_result"
+ # ============================================================================
+ - name: Install dinginessentail again (noop, module is idempotent)
+ dnf:
+ name: dinginessentail
+ state: present
+ register: dnf_result
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: Verify installation
+ assert:
+ that:
+ # No upgrade happened to 1.1.1
+ - "not dnf_result.changed"
+ # Old version still installed
+ - "rpm_result.stdout.startswith('dinginessentail-1.0-1')"
+ # ============================================================================
+ - name: Install dinginessentail-1:1.0-2
+ dnf:
+ name: "dinginessentail-1:1.0-2.{{ ansible_architecture }}"
+ state: present
+ register: dnf_result
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: Verify installation
+ assert:
+ that:
+ - "dnf_result.changed"
+ - "rpm_result.stdout.startswith('dinginessentail-1.0-2')"
+
+ - name: Verify dnf module outputs
+ assert:
+ that:
+ - "'results' in dnf_result"
+ # ============================================================================
+ - name: Update to the latest dinginessentail
+ dnf:
+ name: dinginessentail
+ state: latest
+ register: dnf_result
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: Verify installation
+ assert:
+ that:
+ - "dnf_result.changed"
+ - "rpm_result.stdout.startswith('dinginessentail-1.1-1')"
+
+ - name: Verify dnf module outputs
+ assert:
+ that:
+ - "'results' in dnf_result"
+ # ============================================================================
+ - name: Install dinginessentail-1.0-1 from a file (downgrade)
+ dnf:
+ name: "{{ repodir }}/dinginessentail-1.0-1.{{ ansible_architecture }}.rpm"
+ state: present
+ allow_downgrade: True
+ disable_gpg_check: True
+ register: dnf_result
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: Verify installation
+ assert:
+ that:
+ - "dnf_result.changed"
+ - "rpm_result.stdout.startswith('dinginessentail-1.0-1')"
+
+ - name: Verify dnf module outputs
+ assert:
+ that:
+ - "'results' in dnf_result"
+
+ - name: Remove dinginessentail
+ dnf:
+ name: dinginessentail
+ state: absent
+ # ============================================================================
+ - name: Install dinginessentail-1.0-1 from a file
+ dnf:
+ name: "{{ repodir }}/dinginessentail-1.0-1.{{ ansible_architecture }}.rpm"
+ state: present
+ disable_gpg_check: True
+ register: dnf_result
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: Verify installation
+ assert:
+ that:
+ - "dnf_result.changed"
+ - "rpm_result.stdout.startswith('dinginessentail-1.0-1')"
+
+ - name: Verify dnf module outputs
+ assert:
+ that:
+ - "'results' in dnf_result"
+ # ============================================================================
+ - name: Install dinginessentail-1.0-1 from a file again
+ dnf:
+ name: "{{ repodir }}/dinginessentail-1.0-1.{{ ansible_architecture }}.rpm"
+ state: present
+ disable_gpg_check: True
+ register: dnf_result
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: Verify installation
+ assert:
+ that:
+ - "not dnf_result.changed"
+ - "rpm_result.stdout.startswith('dinginessentail-1.0-1')"
+ # ============================================================================
+ - name: Install dinginessentail-1.0-2 from a file
+ dnf:
+ name: "{{ repodir }}/dinginessentail-1.0-2.{{ ansible_architecture }}.rpm"
+ state: present
+ disable_gpg_check: True
+ register: dnf_result
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: Verify installation
+ assert:
+ that:
+ - "dnf_result.changed"
+ - "rpm_result.stdout.startswith('dinginessentail-1.0-2')"
+
+ - name: Verify dnf module outputs
+ assert:
+ that:
+ - "'results' in dnf_result"
+ # ============================================================================
+ - name: Install dinginessentail-1.0-2 from a file again
+ dnf:
+ name: "{{ repodir }}/dinginessentail-1.0-2.{{ ansible_architecture }}.rpm"
+ state: present
+ disable_gpg_check: True
+ register: dnf_result
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: Verify installation
+ assert:
+ that:
+ - "not dnf_result.changed"
+ - "rpm_result.stdout.startswith('dinginessentail-1.0-2')"
+ # ============================================================================
+ - name: Remove dinginessentail
+ dnf:
+ name: dinginessentail
+ state: absent
+
+ - name: Try to install incompatible arch
+ dnf:
+ name: "{{ repodir_ppc64 }}/dinginessentail-1.0-1.ppc64.rpm"
+ state: present
+ register: dnf_result
+ ignore_errors: yes
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+ ignore_errors: yes
+
+ - name: Verify installation
+ assert:
+ that:
+ - "rpm_result.rc == 1"
+ - "not dnf_result.changed"
+ - "dnf_result is failed"
+ # ============================================================================
+
+ # Should install dinginessentail-with-weak-dep and dinginessentail-weak-dep
+ - name: Install package with defaults
+ dnf:
+ name: dinginessentail-with-weak-dep
+ state: present
+
+ - name: Check if dinginessentail-with-weak-dep is installed
+ shell: rpm -q dinginessentail-with-weak-dep
+ register: rpm_main_result
+
+ - name: Check if dinginessentail-weak-dep is installed
+ shell: rpm -q dinginessentail-weak-dep
+ register: rpm_weak_result
+
+ - name: Verify install with weak deps
+ assert:
+ that:
+ - rpm_main_result.rc == 0
+ - rpm_weak_result.rc == 0
+
+ - name: Uninstall dinginessentail weak dep packages
+ dnf:
+ name:
+ - dinginessentail-with-weak-dep
+ - dinginessentail-weak-dep
+ state: absent
+
+ - name: Install package with weak deps but skip weak deps
+ dnf:
+ name: dinginessentail-with-weak-dep
+ install_weak_deps: False
+ state: present
+
+ - name: Check if dinginessentail-with-weak-dep is installed
+ shell: rpm -q dinginessentail-with-weak-dep
+ register: rpm_main_result
+
+ - name: Check if dinginessentail-weak-dep is installed
+ shell: rpm -q dinginessentail-weak-dep
+ register: rpm_weak_result
+ ignore_errors: yes
+
+ - name: Verify install without weak deps
+ assert:
+ that:
+ - rpm_main_result.rc == 0
+ - rpm_weak_result.rc == 1 # the weak dependency shouldn't be installed
+
+ # https://github.com/ansible/ansible/issues/55938
+ - name: Install dinginessentail-*
+ dnf:
+ name: dinginessentail-*
+ state: present
+
+ - name: Uninstall dinginessentail-*
+ dnf:
+ name: dinginessentail-*
+ state: absent
+
+ - name: Check if all dinginessentail packages are removed
+ shell: rpm -qa dinginessentail-* | wc -l
+ register: rpm_result
+
+ - name: Verify rpm result
+ assert:
+ that:
+ - rpm_result.stdout == '0'
+ always:
+ - name: Clean up
+ dnf:
+ name:
+ - dinginessentail
+ - dinginessentail-with-weak-dep
+ - dinginessentail-weak-dep
+ state: absent
diff --git a/test/integration/targets/dnf/vars/CentOS.yml b/test/integration/targets/dnf/vars/CentOS.yml
new file mode 100644
index 00000000..c70d8538
--- /dev/null
+++ b/test/integration/targets/dnf/vars/CentOS.yml
@@ -0,0 +1,2 @@
+astream_name: '@php:7.2/minimal'
+astream_name_no_stream: '@php/minimal'
diff --git a/test/integration/targets/dnf/vars/Fedora.yml b/test/integration/targets/dnf/vars/Fedora.yml
new file mode 100644
index 00000000..6e0a798c
--- /dev/null
+++ b/test/integration/targets/dnf/vars/Fedora.yml
@@ -0,0 +1,6 @@
+astream_name: '@hub:pre-release/default'
+
+# For this to work, it needs to be that only shows once in `dnf module list`.
+# Such packages, that exist on all the versions we test on, are hard to come by.
+# TODO: This would be solved by using our own repo with modularity/streams.
+astream_name_no_stream: '@hub/default'
diff --git a/test/integration/targets/dnf/vars/RedHat.yml b/test/integration/targets/dnf/vars/RedHat.yml
new file mode 100644
index 00000000..c70d8538
--- /dev/null
+++ b/test/integration/targets/dnf/vars/RedHat.yml
@@ -0,0 +1,2 @@
+astream_name: '@php:7.2/minimal'
+astream_name_no_stream: '@php/minimal'
diff --git a/test/integration/targets/dnf/vars/main.yml b/test/integration/targets/dnf/vars/main.yml
new file mode 100644
index 00000000..86588de3
--- /dev/null
+++ b/test/integration/targets/dnf/vars/main.yml
@@ -0,0 +1,4 @@
+dnf_log_files:
+ - /var/log/dnf.log
+ - /var/log/dnf.rpm.log
+ - /var/log/dnf.librepo.log
diff --git a/test/integration/targets/dpkg_selections/aliases b/test/integration/targets/dpkg_selections/aliases
new file mode 100644
index 00000000..55da8c88
--- /dev/null
+++ b/test/integration/targets/dpkg_selections/aliases
@@ -0,0 +1,7 @@
+shippable/posix/group1
+destructive
+skip/aix
+skip/freebsd
+skip/osx
+skip/macos
+skip/rhel
diff --git a/test/integration/targets/dpkg_selections/defaults/main.yaml b/test/integration/targets/dpkg_selections/defaults/main.yaml
new file mode 100644
index 00000000..94bd9bcc
--- /dev/null
+++ b/test/integration/targets/dpkg_selections/defaults/main.yaml
@@ -0,0 +1 @@
+hello_old_version: 2.6-1
diff --git a/test/integration/targets/dpkg_selections/tasks/dpkg_selections.yaml b/test/integration/targets/dpkg_selections/tasks/dpkg_selections.yaml
new file mode 100644
index 00000000..5a46fcd9
--- /dev/null
+++ b/test/integration/targets/dpkg_selections/tasks/dpkg_selections.yaml
@@ -0,0 +1,89 @@
+- name: download and install old version of hello
+ apt: "deb=https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/dpkg_selections/hello_{{ hello_old_version }}_amd64.deb"
+
+- name: freeze version for hello
+ dpkg_selections:
+ name: hello
+ selection: hold
+
+- name: get dpkg selections
+ shell: "dpkg --get-selections | grep hold"
+ register: result
+
+- debug: var=result
+
+- name: check that hello is marked as hold
+ assert:
+ that:
+ - "'hello' in result.stdout"
+
+- name: attempt to upgrade hello
+ apt:
+ name: hello
+ state: latest
+ ignore_errors: yes
+
+- name: check hello version
+ shell: dpkg -s hello | grep Version | awk '{print $2}'
+ register: hello_version
+
+- name: ensure hello was not upgraded
+ assert:
+ that:
+ - hello_version.stdout == hello_old_version
+
+- name: remove version freeze
+ dpkg_selections:
+ name: hello
+ selection: install
+
+- name: upgrade hello
+ apt:
+ name: hello
+ state: latest
+
+- name: check hello version
+ shell: dpkg -s hello | grep Version | awk '{print $2}'
+ register: hello_version
+
+- name: check that old version upgraded correctly
+ assert:
+ that:
+ - hello_version.stdout != hello_old_version
+
+- name: set hello to deinstall
+ dpkg_selections:
+ name: hello
+ selection: deinstall
+
+- name: get dpkg selections
+ shell: "dpkg --get-selections | grep deinstall"
+ register: result
+
+- debug: var=result
+
+- name: check that hello is marked as deinstall
+ assert:
+ that:
+ - "'hello' in result.stdout"
+
+- name: set hello to purge
+ dpkg_selections:
+ name: hello
+ selection: purge
+
+- name: get dpkg selections
+ shell: "dpkg --get-selections | grep purge"
+ register: result
+
+- debug: var=result
+
+- name: check that hello is marked as purge
+ assert:
+ that:
+ - "'hello' in result.stdout"
+
+- name: remove hello
+ apt:
+ name: hello
+ state: absent
diff --git a/test/integration/targets/dpkg_selections/tasks/main.yaml b/test/integration/targets/dpkg_selections/tasks/main.yaml
new file mode 100644
index 00000000..6abd1dec
--- /dev/null
+++ b/test/integration/targets/dpkg_selections/tasks/main.yaml
@@ -0,0 +1,3 @@
+---
+ - include: 'dpkg_selections.yaml'
+ when: ansible_distribution in ('Ubuntu', 'Debian')
diff --git a/test/integration/targets/egg-info/aliases b/test/integration/targets/egg-info/aliases
new file mode 100644
index 00000000..a6dafcf8
--- /dev/null
+++ b/test/integration/targets/egg-info/aliases
@@ -0,0 +1 @@
+shippable/posix/group1
diff --git a/test/integration/targets/egg-info/lookup_plugins/import_pkg_resources.py b/test/integration/targets/egg-info/lookup_plugins/import_pkg_resources.py
new file mode 100644
index 00000000..c0c5ccd5
--- /dev/null
+++ b/test/integration/targets/egg-info/lookup_plugins/import_pkg_resources.py
@@ -0,0 +1,11 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pkg_resources
+
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables, **kwargs):
+ return ['ok']
diff --git a/test/integration/targets/egg-info/tasks/main.yml b/test/integration/targets/egg-info/tasks/main.yml
new file mode 100644
index 00000000..d7b886c0
--- /dev/null
+++ b/test/integration/targets/egg-info/tasks/main.yml
@@ -0,0 +1,3 @@
+- name: Make sure pkg_resources can be imported by plugins
+ debug:
+ msg: "{{ lookup('import_pkg_resources') }}"
diff --git a/test/integration/targets/embedded_module/aliases b/test/integration/targets/embedded_module/aliases
new file mode 100644
index 00000000..765b70da
--- /dev/null
+++ b/test/integration/targets/embedded_module/aliases
@@ -0,0 +1 @@
+shippable/posix/group2
diff --git a/test/integration/targets/embedded_module/library/test_integration_module b/test/integration/targets/embedded_module/library/test_integration_module
new file mode 100644
index 00000000..04755b8e
--- /dev/null
+++ b/test/integration/targets/embedded_module/library/test_integration_module
@@ -0,0 +1,3 @@
+#!/usr/bin/python
+
+print('{"changed":false, "msg":"this is the embedded module"}')
diff --git a/test/integration/targets/embedded_module/tasks/main.yml b/test/integration/targets/embedded_module/tasks/main.yml
new file mode 100644
index 00000000..6a6d6485
--- /dev/null
+++ b/test/integration/targets/embedded_module/tasks/main.yml
@@ -0,0 +1,9 @@
+- name: run the embedded dummy module
+ test_integration_module:
+ register: result
+
+- name: assert the embedded module ran
+ assert:
+ that:
+ - "'msg' in result"
+ - result.msg == "this is the embedded module"
diff --git a/test/integration/targets/environment/aliases b/test/integration/targets/environment/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/test/integration/targets/environment/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/test/integration/targets/environment/runme.sh b/test/integration/targets/environment/runme.sh
new file mode 100755
index 00000000..c556a17c
--- /dev/null
+++ b/test/integration/targets/environment/runme.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ansible-playbook test_environment.yml -i ../../inventory "$@"
diff --git a/test/integration/targets/environment/test_environment.yml b/test/integration/targets/environment/test_environment.yml
new file mode 100644
index 00000000..43f9c74e
--- /dev/null
+++ b/test/integration/targets/environment/test_environment.yml
@@ -0,0 +1,173 @@
+- hosts: testhost
+ gather_facts: no
+ tasks:
+ - name: get PATH from target
+ command: echo $PATH
+ register: target_path
+
+- hosts: testhost
+ vars:
+ - test1:
+ key1: val1
+ environment:
+ PATH: '{{ansible_env.PATH + ":/lola"}}'
+ lola: 'ido'
+ tasks:
+ - name: ensure special case with ansible_env is skipped but others still work
+ assert:
+ that:
+ - target_path.stdout == ansible_env.PATH
+ - "'/lola' not in ansible_env.PATH"
+ - ansible_env.lola == 'ido'
+
+ - name: check that envvar does not exist
+ shell: echo $key1
+ register: test_env
+
+ - name: assert no val in stdout
+ assert:
+ that:
+ - '"val1" not in test_env.stdout_lines'
+
+ - name: check that envvar does exist
+ shell: echo $key1
+ environment: "{{test1}}"
+ register: test_env2
+
+ - name: assert val1 in stdout
+ assert:
+ that:
+ - '"val1" in test_env2.stdout_lines'
+
+- hosts: testhost
+ vars:
+ - test1:
+ key1: val1
+ - test2:
+ key1: not1
+ other1: val2
+ environment: "{{test1}}"
+ tasks:
+ - name: check that play envvar does exist
+ shell: echo $key1
+ register: test_env3
+
+ - name: assert val1 in stdout
+ assert:
+ that:
+ - '"val1" in test_env3.stdout_lines'
+
+ - name: check that task envvar does exist
+ shell: echo $key1; echo $other1
+ register: test_env4
+ environment: "{{test2}}"
+
+ - name: assert all vars appear as expected
+ assert:
+ that:
+ - '"val1" not in test_env4.stdout_lines'
+ - '"not1" in test_env4.stdout_lines'
+ - '"val2" in test_env4.stdout_lines'
+
+ - block:
+ - name: check that task envvar does exist in block
+ shell: echo $key1; echo $other1
+ register: test_env5
+
+ - name: assert all vars appear as expected in block
+ assert:
+ that:
+ - '"val1" not in test_env5.stdout_lines'
+ - '"not1" in test_env5.stdout_lines'
+ - '"val2" in test_env5.stdout_lines'
+ environment: "{{test2}}"
+
+- name: test setting environment while using loops
+ hosts: testhost
+ environment:
+ foo: outer
+ tasks:
+ - name: verify foo==outer
+ command: /bin/echo $foo
+ loop:
+ - 1
+ register: test_foo
+
+ - name: assert foo==outer
+ assert:
+ that:
+ - "{{ test_foo.results[0].stdout == 'outer' }}"
+
+ - name: set environment on a task
+ environment:
+ foo: in_task
+ command: /bin/echo $foo
+ loop:
+ - 1
+ register: test_foo
+
+ - name: assert foo==in_task
+ assert:
+ that:
+ - "test_foo.results[0].stdout == 'in_task'"
+
+ - name: test that the outer env var is set appropriately still
+ command: /bin/echo $foo
+ loop:
+ - 1
+ register: test_foo
+
+ - name: assert foo==outer
+ assert:
+ that:
+ - "{{ test_foo.results[0].stdout == 'outer' }}"
+
+ - name: set environment on a block
+ environment:
+ foo: in_block
+ block:
+ - name: test the environment is set in the block
+ command: /bin/echo $foo
+ loop:
+ - 1
+ register: test_foo
+
+ - name: assert foo==in_block
+ assert:
+ that:
+ - "test_foo.results[0].stdout == 'in_block'"
+
+ - name: test setting environment in a task inside a block
+ environment:
+ foo: in_block_in_task
+ command: /bin/echo $foo
+ loop:
+ - 1
+ register: test_foo
+
+ - name: assert foo==in_block_in_task
+ assert:
+ that:
+ - "test_foo.results[0].stdout == 'in_block_in_task'"
+
+ - name: test the environment var is set to the parent value
+ command: /bin/echo $foo
+ loop:
+ - 1
+ register: test_foo
+
+ - name: assert foo==in_block
+ assert:
+ that:
+ - "test_foo.results[0].stdout == 'in_block'"
+
+ - name: test the env var foo has the initial value
+ command: /bin/echo $foo
+ loop:
+ - 1
+ register: test_foo
+
+ - name: assert foo==outer
+ assert:
+ that:
+ - "{{ test_foo.results[0].stdout == 'outer' }}"
diff --git a/test/integration/targets/error_from_connection/aliases b/test/integration/targets/error_from_connection/aliases
new file mode 100644
index 00000000..765b70da
--- /dev/null
+++ b/test/integration/targets/error_from_connection/aliases
@@ -0,0 +1 @@
+shippable/posix/group2
diff --git a/test/integration/targets/error_from_connection/connection_plugins/dummy.py b/test/integration/targets/error_from_connection/connection_plugins/dummy.py
new file mode 100644
index 00000000..2a2c8795
--- /dev/null
+++ b/test/integration/targets/error_from_connection/connection_plugins/dummy.py
@@ -0,0 +1,45 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ author:
+ - John Doe
+ connection: dummy
+ short_description: defective connection plugin
+ description:
+ - defective connection plugin
+ version_added: "2.0"
+ options: {}
+"""
+import ansible.constants as C
+from ansible.errors import AnsibleError
+from ansible.plugins.connection import ConnectionBase
+
+
+class Connection(ConnectionBase):
+
+ transport = 'dummy'
+ has_pipelining = True
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ raise AnsibleError('an error with {{ some Jinja }}')
+
+ def transport(self):
+ pass
+
+ def _connect(self):
+ pass
+
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ pass
+
+ def put_file(self, in_path, out_path):
+ pass
+
+ def fetch_file(self, in_path, out_path):
+ pass
+
+ def close(self):
+ pass
diff --git a/test/integration/targets/error_from_connection/inventory b/test/integration/targets/error_from_connection/inventory
new file mode 100644
index 00000000..324f0d3a
--- /dev/null
+++ b/test/integration/targets/error_from_connection/inventory
@@ -0,0 +1,2 @@
+[local]
+testhost
diff --git a/test/integration/targets/error_from_connection/play.yml b/test/integration/targets/error_from_connection/play.yml
new file mode 100644
index 00000000..04320d88
--- /dev/null
+++ b/test/integration/targets/error_from_connection/play.yml
@@ -0,0 +1,20 @@
+- hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: "use a connection plugin raising an exception, exception message contains Jinja template."
+ connection: dummy
+ command: /bin/true # command won't be executed
+ register: result
+ ignore_errors: True
+
+ - name: "check that Jinja template embedded in exception message isn't rendered"
+ debug:
+ msg: 'ok'
+ when: result is failed
+ register: debug_task
+
+ - assert:
+ that:
+ - result is failed
+ - "'an error with' in result.msg" # makes sure plugin was found
+ - debug_task is success
diff --git a/test/integration/targets/error_from_connection/runme.sh b/test/integration/targets/error_from_connection/runme.sh
new file mode 100755
index 00000000..92679fd1
--- /dev/null
+++ b/test/integration/targets/error_from_connection/runme.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+set -o nounset -o errexit -o xtrace
+
+ansible-playbook -i inventory "play.yml" -v "$@"
diff --git a/test/integration/targets/expect/aliases b/test/integration/targets/expect/aliases
new file mode 100644
index 00000000..ca7c9128
--- /dev/null
+++ b/test/integration/targets/expect/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group2
+destructive
diff --git a/test/integration/targets/expect/files/foo.txt b/test/integration/targets/expect/files/foo.txt
new file mode 100644
index 00000000..7c6ded14
--- /dev/null
+++ b/test/integration/targets/expect/files/foo.txt
@@ -0,0 +1 @@
+foo.txt
diff --git a/test/integration/targets/expect/files/test_command.py b/test/integration/targets/expect/files/test_command.py
new file mode 100644
index 00000000..685c50c2
--- /dev/null
+++ b/test/integration/targets/expect/files/test_command.py
@@ -0,0 +1,12 @@
+import sys
+
+try:
+ input_function = raw_input
+except NameError:
+ input_function = input
+
+prompts = sys.argv[1:] or ['foo']
+
+for prompt in prompts:
+ user_input = input_function(prompt)
+ print(user_input)
diff --git a/test/integration/targets/expect/tasks/main.yml b/test/integration/targets/expect/tasks/main.yml
new file mode 100644
index 00000000..7feaec4d
--- /dev/null
+++ b/test/integration/targets/expect/tasks/main.yml
@@ -0,0 +1,205 @@
+# test code for the ping module
+# (c) 2014, James Cammarata <jcammarata@ansible.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+- name: Install test requirements
+ pip:
+ name: pexpect
+ state: present
+
+- name: record the test_command file
+ set_fact: test_command_file={{output_dir | expanduser}}/test_command.py
+
+- name: copy script into output directory
+ copy: src=test_command.py dest={{test_command_file}} mode=0444
+
+- name: record the output file
+ set_fact: output_file={{output_dir}}/foo.txt
+
+- copy:
+ content: "foo"
+ dest: "{{output_file}}"
+
+- name: test expect
+ expect:
+ command: "{{ansible_python_interpreter}} {{test_command_file}}"
+ responses:
+ foo: bar
+ register: expect_result
+
+- name: assert expect worked
+ assert:
+ that:
+ - "expect_result.changed == true"
+ - "expect_result.stdout == 'foobar'"
+
+- name: test creates option
+ expect:
+ command: "{{ansible_python_interpreter}} {{test_command_file}}"
+ responses:
+ foo: bar
+ creates: "{{output_file}}"
+ register: creates_result
+
+- name: assert when creates is provided command is not run
+ assert:
+ that:
+ - "creates_result.changed == false"
+ - "'skipped' in creates_result.stdout"
+
+- name: test creates option (missing)
+ expect:
+ command: "{{ansible_python_interpreter}} {{test_command_file}}"
+ responses:
+ foo: bar
+ creates: "{{output_file}}.does.not.exist"
+ register: creates_result
+
+- name: assert when missing creates is provided command is run
+ assert:
+ that:
+ - "creates_result.changed == true"
+ - "creates_result.stdout == 'foobar'"
+
+- name: test removes option
+ expect:
+ command: "{{ansible_python_interpreter}} {{test_command_file}}"
+ responses:
+ foo: bar
+ removes: "{{output_file}}"
+ register: removes_result
+
+- name: assert when removes is provided command is run
+ assert:
+ that:
+ - "removes_result.changed == true"
+ - "removes_result.stdout == 'foobar'"
+
+- name: test removes option (missing)
+ expect:
+ command: "{{ansible_python_interpreter}} {{test_command_file}}"
+ responses:
+ foo: bar
+ removes: "{{output_file}}.does.not.exist"
+ register: removes_result
+
+- name: assert when missing removes is provided command is not run
+ assert:
+ that:
+ - "removes_result.changed == false"
+ - "'skipped' in removes_result.stdout"
+
+- name: test chdir
+ expect:
+ command: "/bin/sh -c 'pwd && sleep 1'"
+ chdir: "{{output_dir}}"
+ responses:
+ foo: bar
+ register: chdir_result
+
+- name: assert chdir works
+ assert:
+ that:
+ - "'{{chdir_result.stdout |expanduser | realpath }}' == '{{output_dir | expanduser | realpath}}'"
+
+- name: test timeout option
+ expect:
+ command: "sleep 10"
+ responses:
+ foo: bar
+ timeout: 1
+ ignore_errors: true
+ register: timeout_result
+
+- name: assert failure message when timeout
+ assert:
+ that:
+ - "timeout_result.msg == 'command exceeded timeout'"
+
+- name: test echo option
+ expect:
+ command: "{{ansible_python_interpreter}} {{test_command_file}}"
+ responses:
+ foo: bar
+ echo: true
+ register: echo_result
+
+- name: assert echo works
+ assert:
+ that:
+ - "echo_result.stdout_lines|length == 2"
+ - "echo_result.stdout_lines[0] == 'foobar'"
+ - "echo_result.stdout_lines[1] == 'bar'"
+
+- name: test response list
+ expect:
+ command: "{{ansible_python_interpreter}} {{test_command_file}} foo foo"
+ responses:
+ foo:
+ - bar
+ - baz
+ register: list_result
+
+- name: assert list response works
+ assert:
+ that:
+ - "list_result.stdout_lines|length == 2"
+ - "list_result.stdout_lines[0] == 'foobar'"
+ - "list_result.stdout_lines[1] == 'foobaz'"
+
+- name: test no remaining responses
+ expect:
+ command: "{{ansible_python_interpreter}} {{test_command_file}} foo foo"
+ responses:
+ foo:
+ - bar
+ register: list_result
+ ignore_errors: yes
+
+- name: assert no remaining responses
+ assert:
+ that:
+ - "list_result.failed"
+ - "'No remaining responses' in list_result.msg"
+
+- name: test no command
+ expect:
+ command: ""
+ responses:
+ foo: bar
+ register: no_command_result
+ ignore_errors: yes
+
+- name: assert no command
+ assert:
+ that:
+ - "no_command_result.failed"
+ - "no_command_result.msg == 'no command given'"
+ - "no_command_result.rc == 256"
+
+- name: test non-zero return code
+ expect:
+ command: "ls /does-not-exist"
+ responses:
+ foo: bar
+ register: non_zero_result
+ ignore_errors: yes
+
+- name: assert non-zero return code
+ assert:
+ that:
+ - "non_zero_result.failed"
+ - "non_zero_result.msg == 'non-zero return code'"
diff --git a/test/integration/targets/facts_d/aliases b/test/integration/targets/facts_d/aliases
new file mode 100644
index 00000000..765b70da
--- /dev/null
+++ b/test/integration/targets/facts_d/aliases
@@ -0,0 +1 @@
+shippable/posix/group2
diff --git a/test/integration/targets/facts_d/meta/main.yml b/test/integration/targets/facts_d/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/test/integration/targets/facts_d/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/test/integration/targets/facts_d/tasks/main.yml b/test/integration/targets/facts_d/tasks/main.yml
new file mode 100644
index 00000000..ca23544f
--- /dev/null
+++ b/test/integration/targets/facts_d/tasks/main.yml
@@ -0,0 +1,41 @@
+# Test code for facts.d and setup filters
+# (c) 2014, James Tanner <tanner.jc@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- set_fact: fact_dir={{output_dir}}/facts.d
+
+- file: path={{ fact_dir }} state=directory
+- shell: echo "[general]" > {{ fact_dir }}/preferences.fact
+- shell: echo "bar=loaded" >> {{ fact_dir }}/preferences.fact
+
+- setup:
+ fact_path: "{{ fact_dir | expanduser }}"
+ filter: "*local*"
+ register: setup_result
+
+- debug: var=setup_result
+
+- assert:
+ that:
+ - "'ansible_facts' in setup_result"
+ - "'ansible_local' in setup_result.ansible_facts"
+ - "'ansible_env' not in setup_result.ansible_facts"
+ - "'ansible_user_id' not in setup_result.ansible_facts"
+ - "'preferences' in setup_result.ansible_facts['ansible_local']"
+ - "'general' in setup_result.ansible_facts['ansible_local']['preferences']"
+ - "'bar' in setup_result.ansible_facts['ansible_local']['preferences']['general']"
+ - "setup_result.ansible_facts['ansible_local']['preferences']['general']['bar'] == 'loaded'"
diff --git a/test/integration/targets/facts_linux_network/aliases b/test/integration/targets/facts_linux_network/aliases
new file mode 100644
index 00000000..21a4e907
--- /dev/null
+++ b/test/integration/targets/facts_linux_network/aliases
@@ -0,0 +1,5 @@
+needs/privileged
+shippable/posix/group2
+skip/freebsd
+skip/osx
+skip/macos
diff --git a/test/integration/targets/facts_linux_network/meta/main.yml b/test/integration/targets/facts_linux_network/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/test/integration/targets/facts_linux_network/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/test/integration/targets/facts_linux_network/tasks/main.yml b/test/integration/targets/facts_linux_network/tasks/main.yml
new file mode 100644
index 00000000..af4dde96
--- /dev/null
+++ b/test/integration/targets/facts_linux_network/tasks/main.yml
@@ -0,0 +1,18 @@
+- block:
+ - name: Add IP to interface
+ command: ip address add 100.42.42.1/32 dev {{ ansible_facts.default_ipv4.interface }}
+ ignore_errors: yes
+
+ - name: Gather network facts
+ setup:
+ gather_subset: network
+
+ - name: Ensure broadcast is reported as empty
+ assert:
+ that:
+ - ansible_facts[ansible_facts['default_ipv4']['interface']]['ipv4_secondaries'][0]['broadcast'] == ''
+
+ always:
+ - name: Remove IP from interface
+ command: ip address delete 100.42.42.1/32 dev {{ ansible_facts.default_ipv4.interface }}
+ ignore_errors: yes
diff --git a/test/integration/targets/failed_when/aliases b/test/integration/targets/failed_when/aliases
new file mode 100644
index 00000000..765b70da
--- /dev/null
+++ b/test/integration/targets/failed_when/aliases
@@ -0,0 +1 @@
+shippable/posix/group2
diff --git a/test/integration/targets/failed_when/tasks/main.yml b/test/integration/targets/failed_when/tasks/main.yml
new file mode 100644
index 00000000..3f8ae545
--- /dev/null
+++ b/test/integration/targets/failed_when/tasks/main.yml
@@ -0,0 +1,68 @@
+# Test code for failed_when.
+# (c) 2014, Richard Isaacson <richard.c.isaacson@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: command rc 0 failed_when_result undef
+ shell: exit 0
+ ignore_errors: True
+ register: result
+
+- assert:
+ that:
+ - "'failed' in result and not result.failed"
+
+- name: command rc 0 failed_when_result False
+ shell: exit 0
+ failed_when: false
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - "'failed' in result and not result.failed"
+ - "'failed_when_result' in result and not result.failed_when_result"
+
+- name: command rc 1 failed_when_result True
+ shell: exit 1
+ failed_when: true
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - "'failed' in result and result.failed"
+ - "'failed_when_result' in result and result.failed_when_result"
+
+- name: command rc 1 failed_when_result undef
+ shell: exit 1
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - "'failed' in result and result.failed"
+
+- name: command rc 1 failed_when_result False
+ shell: exit 1
+ failed_when: false
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - "'failed' in result and not result.failed"
+ - "'failed_when_result' in result and not result.failed_when_result"
diff --git a/test/integration/targets/fetch/aliases b/test/integration/targets/fetch/aliases
new file mode 100644
index 00000000..fb5d6faa
--- /dev/null
+++ b/test/integration/targets/fetch/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group2
+needs/target/setup_remote_tmp_dir
diff --git a/test/integration/targets/fetch/injection/avoid_slurp_return.yml b/test/integration/targets/fetch/injection/avoid_slurp_return.yml
new file mode 100644
index 00000000..af62dcf4
--- /dev/null
+++ b/test/integration/targets/fetch/injection/avoid_slurp_return.yml
@@ -0,0 +1,26 @@
+- name: ensure that 'fake slurp' does not poison fetch source
+ hosts: localhost
+ gather_facts: False
+ tasks:
+ - name: fetch with relative source path
+ fetch: src=../injection/here.txt dest={{output_dir}}
+ become: true
+ register: islurp
+
+ - name: fetch with normal source path
+ fetch: src=here.txt dest={{output_dir}}
+ become: true
+ register: islurp2
+
+ - name: ensure all is good in hollywood
+ assert:
+ that:
+ - "'..' not in islurp['dest']"
+ - "'..' not in islurp2['dest']"
+ - "'foo' not in islurp['dest']"
+ - "'foo' not in islurp2['dest']"
+
+ - name: try to trip dest anyways
+ fetch: src=../injection/here.txt dest={{output_dir}}
+ become: true
+ register: islurp2
diff --git a/test/integration/targets/fetch/injection/here.txt b/test/integration/targets/fetch/injection/here.txt
new file mode 100644
index 00000000..493021b1
--- /dev/null
+++ b/test/integration/targets/fetch/injection/here.txt
@@ -0,0 +1 @@
+this is a test file
diff --git a/test/integration/targets/fetch/injection/library/slurp.py b/test/integration/targets/fetch/injection/library/slurp.py
new file mode 100644
index 00000000..7b78ba18
--- /dev/null
+++ b/test/integration/targets/fetch/injection/library/slurp.py
@@ -0,0 +1,29 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+ module: fakeslurp
+ short_desciptoin: fake slurp module
+ description:
+ - this is a fake slurp module
+ options:
+ _notreal:
+ description: really not a real slurp
+ author:
+ - me
+"""
+
+import json
+import random
+
+bad_responses = ['../foo', '../../foo', '../../../foo', '/../../../foo', '/../foo', '//..//foo', '..//..//foo']
+
+
+def main():
+ print(json.dumps(dict(changed=False, content='', encoding='base64', source=random.choice(bad_responses))))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/fetch/roles/fetch_tests/meta/main.yml b/test/integration/targets/fetch/roles/fetch_tests/meta/main.yml
new file mode 100644
index 00000000..1810d4be
--- /dev/null
+++ b/test/integration/targets/fetch/roles/fetch_tests/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/test/integration/targets/fetch/roles/fetch_tests/tasks/main.yml b/test/integration/targets/fetch/roles/fetch_tests/tasks/main.yml
new file mode 100644
index 00000000..267ae0f0
--- /dev/null
+++ b/test/integration/targets/fetch/roles/fetch_tests/tasks/main.yml
@@ -0,0 +1,141 @@
+# test code for the pip module
+# (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: create a file that we can use to fetch
+ copy: content="test" dest={{ remote_tmp_dir }}/orig
+
+- name: fetch the test file
+ fetch: src={{ remote_tmp_dir }}/orig dest={{ output_dir }}/fetched
+ register: fetched
+
+- debug: var=fetched
+
+- name: Assert that we fetched correctly
+ assert:
+ that:
+ - 'fetched["changed"] == True'
+ - 'fetched["checksum"] == "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3"'
+ - 'fetched["remote_checksum"] == "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3"'
+ - 'lookup("file", output_dir + "/fetched/" + inventory_hostname + remote_tmp_dir + "/orig") == "test"'
+
+# TODO: check the become and non-become forms of fetch because in one form we'll do
+# the get method of the connection plugin and in the become case we'll use the
+# fetch module.
+
+- name: fetch a second time to show idempotence
+ fetch: src={{ remote_tmp_dir }}/orig dest={{ output_dir }}/fetched
+ register: fetched
+
+- name: Assert that the file was not fetched the second time
+ assert:
+ that:
+ - 'fetched["changed"] == False'
+ - 'fetched["checksum"] == "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3"'
+
+- name: attempt to fetch a non-existent file - do not fail on missing
+ fetch: src={{ remote_tmp_dir }}/doesnotexist dest={{ output_dir }}/fetched fail_on_missing=False
+ register: fetch_missing_nofail
+
+- name: check fetch missing no fail result
+ assert:
+ that:
+ - "fetch_missing_nofail.msg"
+ - "fetch_missing_nofail is not changed"
+
+- name: attempt to fetch a non-existent file - fail on missing
+ fetch: src={{ remote_tmp_dir }}/doesnotexist dest={{ output_dir }}/fetched fail_on_missing=yes
+ register: fetch_missing
+ ignore_errors: true
+
+- name: check fetch missing with failure
+ assert:
+ that:
+ - "fetch_missing is failed"
+ - "fetch_missing.msg"
+ - "fetch_missing is not changed"
+
+- name: attempt to fetch a non-existent file - fail on missing implicit
+ fetch: src={{ remote_tmp_dir }}/doesnotexist dest={{ output_dir }}/fetched
+ register: fetch_missing_implicit
+ ignore_errors: true
+
+- name: check fetch missing with failure with implicit fail
+ assert:
+ that:
+ - "fetch_missing_implicit is failed"
+ - "fetch_missing_implicit.msg"
+ - "fetch_missing_implicit is not changed"
+
+- name: attempt to fetch a directory - should not fail but return a message
+ fetch: src={{ remote_tmp_dir }} dest={{ output_dir }}/somedir fail_on_missing=False
+ register: fetch_dir
+
+- name: check fetch directory result
+ assert:
+ that:
+ - "fetch_dir is not changed"
+ - "fetch_dir.msg"
+
+- name: attempt to fetch a directory - should fail
+ fetch: src={{ remote_tmp_dir }} dest={{ output_dir }}/somedir fail_on_missing=True
+ register: failed_fetch_dir
+ ignore_errors: true
+
+- name: check fetch directory result
+ assert:
+ that:
+ - "failed_fetch_dir is failed"
+ - "fetch_dir.msg"
+
+- name: create symlink to a file that we can fetch
+ file:
+ path: "{{ remote_tmp_dir }}/link"
+ src: "{{ remote_tmp_dir }}/orig"
+ state: "link"
+
+- name: fetch the file via a symlink
+ fetch: src={{ remote_tmp_dir }}/link dest={{ output_dir }}/fetched-link
+ register: fetched
+
+- debug: var=fetched
+
+- name: Assert that we fetched correctly
+ assert:
+ that:
+ - 'fetched["changed"] == True'
+ - 'fetched["checksum"] == "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3"'
+ - 'fetched["remote_checksum"] == "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3"'
+ - 'lookup("file", output_dir + "/fetched-link/" + inventory_hostname + remote_tmp_dir + "/link") == "test"'
+
+# TODO: check the become and non-become forms of fetch because in one form we'll do
+# the get method of the connection plugin and in the become case we'll use the
+# fetch module.
+
+- name: dest is an existing directory name without trailing slash and flat=yes, should fail
+ fetch:
+ src: "{{ remote_tmp_dir }}/orig"
+ dest: "{{ output_dir }}"
+ flat: yes
+ register: failed_fetch_dest_dir
+ ignore_errors: true
+
+- name: check that it indeed failed
+ assert:
+ that:
+ - "failed_fetch_dest_dir is failed"
+ - "failed_fetch_dest_dir.msg"
diff --git a/test/integration/targets/fetch/run_fetch_tests.yml b/test/integration/targets/fetch/run_fetch_tests.yml
new file mode 100644
index 00000000..f2ff1df3
--- /dev/null
+++ b/test/integration/targets/fetch/run_fetch_tests.yml
@@ -0,0 +1,5 @@
+- name: call fetch_tests role
+ hosts: testhost
+ gather_facts: false
+ roles:
+ - fetch_tests
diff --git a/test/integration/targets/fetch/runme.sh b/test/integration/targets/fetch/runme.sh
new file mode 100755
index 00000000..7e909dde
--- /dev/null
+++ b/test/integration/targets/fetch/runme.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# setup required roles
+ln -s ../../setup_remote_tmp_dir roles/setup_remote_tmp_dir
+
+# run old type role tests
+ansible-playbook -i ../../inventory run_fetch_tests.yml -e "output_dir=${OUTPUT_DIR}" -v "$@"
+
+# run tests to avoid path injection from slurp when fetch uses become
+ansible-playbook -i ../../inventory injection/avoid_slurp_return.yml -e "output_dir=${OUTPUT_DIR}" -v "$@"
diff --git a/test/integration/targets/file/aliases b/test/integration/targets/file/aliases
new file mode 100644
index 00000000..4a2ce27c
--- /dev/null
+++ b/test/integration/targets/file/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group2
+needs/root
+skip/aix
diff --git a/test/integration/targets/file/defaults/main.yml b/test/integration/targets/file/defaults/main.yml
new file mode 100644
index 00000000..8e9a5836
--- /dev/null
+++ b/test/integration/targets/file/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+remote_unprivileged_user: tmp_ansible_test_user
diff --git a/test/integration/targets/file/files/foo.txt b/test/integration/targets/file/files/foo.txt
new file mode 100644
index 00000000..7c6ded14
--- /dev/null
+++ b/test/integration/targets/file/files/foo.txt
@@ -0,0 +1 @@
+foo.txt
diff --git a/test/integration/targets/file/files/foobar/directory/fileC b/test/integration/targets/file/files/foobar/directory/fileC
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/file/files/foobar/directory/fileC
diff --git a/test/integration/targets/file/files/foobar/directory/fileD b/test/integration/targets/file/files/foobar/directory/fileD
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/file/files/foobar/directory/fileD
diff --git a/test/integration/targets/file/files/foobar/fileA b/test/integration/targets/file/files/foobar/fileA
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/file/files/foobar/fileA
diff --git a/test/integration/targets/file/files/foobar/fileB b/test/integration/targets/file/files/foobar/fileB
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/file/files/foobar/fileB
diff --git a/test/integration/targets/file/meta/main.yml b/test/integration/targets/file/meta/main.yml
new file mode 100644
index 00000000..06d4fd29
--- /dev/null
+++ b/test/integration/targets/file/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_nobody
diff --git a/test/integration/targets/file/tasks/diff_peek.yml b/test/integration/targets/file/tasks/diff_peek.yml
new file mode 100644
index 00000000..802a99aa
--- /dev/null
+++ b/test/integration/targets/file/tasks/diff_peek.yml
@@ -0,0 +1,10 @@
+- name: Run task with _diff_peek
+ file:
+ path: "{{ output_file }}"
+ _diff_peek: yes
+ register: diff_peek_result
+
+- name: Ensure warning was not issued when using _diff_peek parameter
+ assert:
+ that:
+ - diff_peek_result['warnings'] is not defined
diff --git a/test/integration/targets/file/tasks/directory_as_dest.yml b/test/integration/targets/file/tasks/directory_as_dest.yml
new file mode 100644
index 00000000..9b6ddb5d
--- /dev/null
+++ b/test/integration/targets/file/tasks/directory_as_dest.yml
@@ -0,0 +1,345 @@
+# File module tests for overwriting directories
+- name: Initialize the test output dir
+ include: initialize.yml
+
+# We need to make this more consistent:
+# https://github.com/ansible/proposals/issues/111
+#
+# This series of tests document the current inconsistencies. We should not
+# break these by accident but if we approve a proposal we can break these on
+# purpose.
+
+#
+# Setup
+#
+
+- name: create a test sub-directory
+ file:
+ dest: '{{output_dir}}/sub1'
+ state: directory
+
+- name: create a file for linking to
+ copy:
+ dest: '{{output_dir}}/file_to_link'
+ content: 'Hello World'
+
+#
+# Error condtion: specify a directory with state={link,file}, force=False
+#
+
+# file raises an error
+- name: Try to create a file with directory as dest
+ file:
+ dest: '{{output_dir}}/sub1'
+ state: file
+ force: False
+ ignore_errors: True
+ register: file1_result
+
+- name: Get stat info to show the directory has not been changed to a file
+ stat:
+ path: '{{ output_dir }}/sub1'
+ follow: False
+ register: file1_dir_stat
+
+- name: verify that the directory was not overwritten
+ assert:
+ that:
+ - 'file1_result is failed'
+ - 'file1_dir_stat["stat"].isdir'
+
+# link raises an error
+- name: Try to create a symlink with directory as dest
+ file:
+ src: '{{ output_dir }}/file_to_link'
+ dest: '{{output_dir}}/sub1'
+ state: link
+ force: False
+ ignore_errors: True
+ register: file2_result
+
+- name: Get stat info to show the directory has not been changed to a file
+ stat:
+ path: '{{ output_dir }}/sub1'
+ follow: False
+ register: file2_dir_stat
+
+- name: verify that the directory was not overwritten
+ assert:
+ that:
+ - 'file2_result is failed'
+ - 'file2_dir_stat["stat"].isdir'
+
+#
+# Error condition: file and link with non-empty directory
+#
+
+- copy:
+ content: 'test'
+ dest: '{{ output_dir }}/sub1/passwd'
+
+# file raises an error
+- name: Try to create a file with directory as dest
+ file:
+ dest: '{{output_dir}}/sub1'
+ state: file
+ force: True
+ ignore_errors: True
+ register: file3_result
+
+- name: Get stat info to show the directory has not been changed to a file
+ stat:
+ path: '{{ output_dir }}/sub1'
+ follow: False
+ register: file3_dir_stat
+
+- name: verify that the directory was not overwritten
+ assert:
+ that:
+ - 'file3_result is failed'
+ - 'file3_dir_stat["stat"].isdir'
+
+# link raises an error
+- name: Try to create a symlink with directory as dest
+ file:
+ src: '{{ output_dir }}/file_to_link'
+ dest: '{{output_dir}}/sub1'
+ state: link
+ force: True
+ ignore_errors: True
+ register: file4_result
+
+- name: Get stat info to show the directory has not been changed to a file
+ stat:
+ path: '{{ output_dir }}/sub1'
+ follow: False
+ register: file4_dir_stat
+
+- name: verify that the directory was not overwritten
+ assert:
+ that:
+ - 'file4_result is failed'
+ - 'file4_dir_stat["stat"].isdir'
+
+# Cleanup the file that made it non-empty
+- name: Cleanup the file that made the directory nonempty
+ file:
+ state: 'absent'
+ dest: '{{ output_dir }}/sub1/passwd'
+
+#
+# Error condition: file cannot even overwrite an empty directory with force=True
+#
+
+# file raises an error
+- name: Try to create a file with directory as dest
+ file:
+ dest: '{{output_dir}}/sub1'
+ state: file
+ force: True
+ ignore_errors: True
+ register: file5_result
+
+- name: Get stat info to show the directory has not been changed to a file
+ stat:
+ path: '{{ output_dir }}/sub1'
+ follow: False
+ register: file5_dir_stat
+
+- name: verify that the directory was not overwritten
+ assert:
+ that:
+ - 'file5_result is failed'
+ - 'file5_dir_stat["stat"].isdir'
+
+#
+# Directory overwriting - link with force=True will overwrite an empty directory
+#
+
+# link can overwrite an empty directory with force=True
+- name: Try to create a symlink with directory as dest
+ file:
+ src: '{{ output_dir }}/file_to_link'
+ dest: '{{output_dir}}/sub1'
+ state: link
+ force: True
+ register: file6_result
+
+- name: Get stat info to show the directory has been overwritten
+ stat:
+ path: '{{ output_dir }}/sub1'
+ follow: False
+ register: file6_dir_stat
+
+- name: verify that the directory was overwritten
+ assert:
+ that:
+ - 'file6_result is changed'
+ - 'not file6_dir_stat["stat"].isdir'
+ - 'file6_dir_stat["stat"].islnk'
+
+#
+# Cleanup from last set of tests
+#
+
+- name: Cleanup the test subdirectory
+ file:
+ dest: '{{output_dir}}/sub1'
+ state: 'absent'
+
+- name: Re-create the test sub-directory
+ file:
+ dest: '{{output_dir}}/sub1'
+ state: 'directory'
+
+#
+# Hard links have the proposed 111 behaviour already: Place the new file inside the directory
+#
+
+- name: Try to create a hardlink with directory as dest
+ file:
+ src: '{{ output_dir }}/file_to_link'
+ dest: '{{ output_dir }}/sub1'
+ state: hard
+ force: False
+ ignore_errors: True
+ register: file7_result
+
+- name: Get stat info to show the directory has not been changed to a file
+ stat:
+ path: '{{ output_dir }}/sub1'
+ follow: False
+ register: file7_dir_stat
+
+- name: Get stat info to show the link has been created
+ stat:
+ path: '{{ output_dir }}/sub1/file_to_link'
+ follow: False
+ register: file7_link_stat
+
+- debug:
+ var: file7_link_stat
+
+- name: verify that the directory was not overwritten
+ assert:
+ that:
+ - 'file7_result is changed'
+ - 'file7_dir_stat["stat"].isdir'
+ - 'file7_link_stat["stat"].isfile'
+ - 'file7_link_stat["stat"].isfile'
+ ignore_errors: True
+
+#
+# Touch is a bit different than everything else.
+# If we need to set timestamps we should probably add atime, mtime, and ctime parameters
+# But I think touch was written because state=file didn't create a file if it
+# didn't already exist. We should look at changing that behaviour.
+#
+
+- name: Get initial stat info to compare with later
+ stat:
+ path: '{{ output_dir }}/sub1'
+ follow: False
+ register: file8_initial_dir_stat
+
+- name: Pause to ensure stat times are not the exact same
+ pause:
+ seconds: 1
+
+- name: Use touch with directory as dest
+ file:
+ dest: '{{output_dir}}/sub1'
+ state: touch
+ force: False
+ register: file8_result
+
+- name: Get stat info to show the directory has not been changed to a file
+ stat:
+ path: '{{ output_dir }}/sub1'
+ follow: False
+ register: file8_dir_stat
+
+- name: verify that the directory has been updated
+ assert:
+ that:
+ - 'file8_result is changed'
+ - 'file8_dir_stat["stat"].isdir'
+ - 'file8_dir_stat["stat"]["mtime"] != file8_initial_dir_stat["stat"]["mtime"]'
+
+- name: Get initial stat info to compare with later
+ stat:
+ path: '{{ output_dir }}/sub1'
+ follow: False
+ register: file11_initial_dir_stat
+
+- name: Use touch with directory as dest and keep mtime and atime
+ file:
+ dest: '{{output_dir}}/sub1'
+ state: touch
+ force: False
+ modification_time: preserve
+ access_time: preserve
+ register: file11_result
+
+- name: Get stat info to show the directory has not been changed
+ stat:
+ path: '{{ output_dir }}/sub1'
+ follow: False
+ register: file11_dir_stat
+
+- name: verify that the directory has not been updated
+ assert:
+ that:
+ - 'file11_result is not changed'
+ - 'file11_dir_stat["stat"].isdir'
+ - 'file11_dir_stat["stat"]["mtime"] == file11_initial_dir_stat["stat"]["mtime"]'
+ - 'file11_dir_stat["stat"]["atime"] == file11_initial_dir_stat["stat"]["atime"]'
+
+#
+# State=directory realizes that the directory already exists and does nothing
+#
+- name: Get initial stat info to compare with later
+ stat:
+ path: '{{ output_dir }}/sub1'
+ follow: False
+ register: file9_initial_dir_stat
+
+- name: Use directory with directory as dest
+ file:
+ dest: '{{output_dir}}/sub1'
+ state: directory
+ force: False
+ register: file9_result
+
+- name: Get stat info to show the directory has not been changed
+ stat:
+ path: '{{ output_dir }}/sub1'
+ follow: False
+ register: file9_dir_stat
+
+- name: verify that the directory has been updated
+ assert:
+ that:
+ - 'file9_result is not changed'
+ - 'file9_dir_stat["stat"].isdir'
+ - 'file9_dir_stat["stat"]["mtime"] == file9_initial_dir_stat["stat"]["mtime"]'
+
+- name: Use directory with directory as dest and force=True
+ file:
+ dest: '{{output_dir}}/sub1'
+ state: directory
+ force: True
+ register: file10_result
+
+- name: Get stat info to show the directory has not been changed
+ stat:
+ path: '{{ output_dir }}/sub1'
+ follow: False
+ register: file10_dir_stat
+
+- name: verify that the directory has been updated
+ assert:
+ that:
+ - 'file10_result is not changed'
+ - 'file10_dir_stat["stat"].isdir'
+ - 'file10_dir_stat["stat"]["mtime"] == file9_initial_dir_stat["stat"]["mtime"]'
diff --git a/test/integration/targets/file/tasks/initialize.yml b/test/integration/targets/file/tasks/initialize.yml
new file mode 100644
index 00000000..dd7d1274
--- /dev/null
+++ b/test/integration/targets/file/tasks/initialize.yml
@@ -0,0 +1,15 @@
+#
+# Cleanup the output dir and recreate it for the tests to operate on
+#
+- name: Cleanup the output directory
+ file:
+ dest: '{{ output_dir }}'
+ state: 'absent'
+
+- name: Recreate the toplevel output dir
+ file:
+ dest: '{{ output_dir }}'
+ state: 'directory'
+
+- name: prep with a basic file to operate on
+ copy: src=foo.txt dest={{output_file}}
diff --git a/test/integration/targets/file/tasks/main.yml b/test/integration/targets/file/tasks/main.yml
new file mode 100644
index 00000000..34ae4ba4
--- /dev/null
+++ b/test/integration/targets/file/tasks/main.yml
@@ -0,0 +1,752 @@
+# Test code for the file module.
+# (c) 2014, Richard Isaacson <richard.c.isaacson@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- set_fact: output_file={{output_dir}}/foo.txt
+
+# same as expanduser & expandvars called on managed host
+- command: 'echo {{ output_file }}'
+ register: echo
+
+- set_fact:
+ remote_file_expanded: '{{ echo.stdout }}'
+
+# Import the test tasks
+- name: Run tests for state=link
+ import_tasks: state_link.yml
+
+- name: Run tests for directory as dest
+ import_tasks: directory_as_dest.yml
+
+- name: Run tests for unicode
+ import_tasks: unicode_path.yml
+ environment:
+ LC_ALL: C
+ LANG: C
+
+- name: decide to include or not include selinux tests
+ include_tasks: selinux_tests.yml
+ when: selinux_installed is defined and selinux_installed.stdout != "" and selinux_enabled.stdout != "Disabled"
+
+- name: Initialize the test output dir
+ import_tasks: initialize.yml
+
+- name: Test _diff_peek
+ import_tasks: diff_peek.yml
+
+
+# These tests need to be organized by state parameter into separate files later
+
+- name: verify that we are checking a file and it is present
+ file: path={{output_file}} state=file
+ register: file_result
+
+- name: verify that the file was marked as changed
+ assert:
+ that:
+ - "file_result.changed == false"
+ - "file_result.state == 'file'"
+
+- name: Make sure file does not exist
+ file:
+ path: /tmp/ghost
+ state: absent
+
+- name: Target a file that does not exist
+ file:
+ path: /tmp/ghost
+ ignore_errors: yes
+ register: ghost_file_result
+
+- name: Validate ghost file results
+ assert:
+ that:
+ - ghost_file_result is failed
+ - ghost_file_result is not changed
+ - ghost_file_result.state == 'absent'
+ - "'cannot continue' in ghost_file_result.msg"
+
+- name: verify that we are checking an absent file
+ file: path={{output_dir}}/bar.txt state=absent
+ register: file2_result
+
+- name: verify that the file was marked as changed
+ assert:
+ that:
+ - "file2_result.changed == false"
+ - "file2_result.state == 'absent'"
+
+- name: verify we can touch a file
+ file: path={{output_dir}}/baz.txt state=touch
+ register: file3_result
+
+- name: verify that the file was marked as changed
+ assert:
+ that:
+ - "file3_result.changed == true"
+ - "file3_result.state == 'file'"
+ - "file3_result.mode == '0644'"
+
+- name: change file mode
+ file: path={{output_dir}}/baz.txt mode=0600
+ register: file4_result
+
+- name: verify that the file was marked as changed
+ assert:
+ that:
+ - "file4_result.changed == true"
+ - "file4_result.mode == '0600'"
+
+- name: define file to verify chattr/lsattr with
+ set_fact:
+ attributes_file: "{{ output_dir }}/attributes.txt"
+ attributes_supported: no
+
+- name: create file to verify chattr/lsattr with
+ command: touch "{{ attributes_file }}"
+
+- name: add "A" attribute to file
+ command: chattr +A "{{ attributes_file }}"
+ ignore_errors: yes
+
+- name: get attributes from file
+ # Use of `-v` is important, as that is what the module does (through `set_attributes_if_different` and then `get_file_attributes` in basic.py).
+ # On some systems, such as in containers, attributes work, but file versions may not.
+ # It should be possible to update `set_attributes_if_different` in the future to not use `-v` since the file version is unrelated to the attributes.
+ command: lsattr -vd "{{ attributes_file }}"
+ register: attribute_A_set
+ ignore_errors: yes
+
+- name: remove "A" attribute from file
+ command: chattr -A "{{ attributes_file }}"
+ ignore_errors: yes
+
+- name: get attributes from file
+ # See the note above on use of the `-v` option.
+ command: lsattr -vd "{{ attributes_file }}"
+ register: attribute_A_unset
+ ignore_errors: yes
+
+- name: determine if chattr/lsattr is supported
+ set_fact:
+ attributes_supported: yes
+ when:
+ - attribute_A_set is success
+ - "'A' in attribute_A_set.stdout_lines[0].split()[1]"
+ - attribute_A_unset is success
+ - "'A' not in attribute_A_unset.stdout_lines[0].split()[1]"
+
+- name: explicitly set file attribute "A"
+ file: path={{output_dir}}/baz.txt attributes=A
+ register: file_attributes_result
+ ignore_errors: True
+ when: attributes_supported
+
+- name: add file attribute "A"
+ file: path={{output_dir}}/baz.txt attributes=+A
+ register: file_attributes_result_2
+ when: file_attributes_result is changed
+
+- name: verify that the file was not marked as changed
+ assert:
+ that:
+ - "file_attributes_result_2 is not changed"
+ when: file_attributes_result is changed
+
+- name: remove file attribute "A"
+ file: path={{output_dir}}/baz.txt attributes=-A
+ register: file_attributes_result_3
+ ignore_errors: True
+
+- name: explicitly remove file attributes
+ file: path={{output_dir}}/baz.txt attributes=""
+ register: file_attributes_result_4
+ when: file_attributes_result_3 is changed
+
+- name: verify that the file was not marked as changed
+ assert:
+ that:
+ - "file_attributes_result_4 is not changed"
+ when: file_attributes_result_4 is changed
+
+- name: change ownership and group
+ file: path={{output_dir}}/baz.txt owner=1234 group=1234
+
+- name: Get stat info to check atime later
+ stat: path={{output_dir}}/baz.txt
+ register: file_attributes_result_5_before
+
+- name: updates access time
+ file: path={{output_dir}}/baz.txt access_time=now
+ register: file_attributes_result_5
+
+- name: Get stat info to check atime later
+ stat: path={{output_dir}}/baz.txt
+ register: file_attributes_result_5_after
+
+- name: verify that the file was marked as changed and atime changed
+ assert:
+ that:
+ - "file_attributes_result_5 is changed"
+ - "file_attributes_result_5_after['stat']['atime'] != file_attributes_result_5_before['stat']['atime']"
+
+- name: setup a tmp-like directory for ownership test
+ file: path=/tmp/worldwritable mode=1777 state=directory
+
+- name: Ask to create a file without enough perms to change ownership
+ file: path=/tmp/worldwritable/baz.txt state=touch owner=root
+ become: yes
+ become_user: nobody
+ register: chown_result
+ ignore_errors: True
+
+- name: Ask whether the new file exists
+ stat: path=/tmp/worldwritable/baz.txt
+ register: file_exists_result
+
+- name: Verify that the file doesn't exist on failure
+ assert:
+ that:
+ - "chown_result.failed == True"
+ - "file_exists_result.stat.exists == False"
+
+- name: clean up
+ file: path=/tmp/worldwritable state=absent
+
+- name: create hard link to file
+ file: src={{output_file}} dest={{output_dir}}/hard.txt state=hard
+ register: file6_result
+
+- name: verify that the file was marked as changed
+ assert:
+ that:
+ - "file6_result.changed == true"
+
+- name: touch a hard link
+ file:
+ dest: '{{ output_dir }}/hard.txt'
+ state: 'touch'
+ register: file6_touch_result
+
+- name: verify that the hard link was touched
+ assert:
+ that:
+ - "file6_touch_result.changed == true"
+
+- name: stat1
+ stat: path={{output_file}}
+ register: hlstat1
+
+- name: stat2
+ stat: path={{output_dir}}/hard.txt
+ register: hlstat2
+
+- name: verify that hard link is still the same after timestamp updated
+ assert:
+ that:
+ - "hlstat1.stat.inode == hlstat2.stat.inode"
+
+- name: create hard link to file 2
+ file: src={{output_file}} dest={{output_dir}}/hard.txt state=hard
+ register: hlink_result
+
+- name: verify that hard link creation is idempotent
+ assert:
+ that:
+ - "hlink_result.changed == False"
+
+- name: Change mode on a hard link
+ file: src={{output_file}} dest={{output_dir}}/hard.txt mode=0701
+ register: file6_mode_change
+
+- name: verify that the hard link was touched
+ assert:
+ that:
+ - "file6_touch_result.changed == true"
+
+- name: stat1
+ stat: path={{output_file}}
+ register: hlstat1
+
+- name: stat2
+ stat: path={{output_dir}}/hard.txt
+ register: hlstat2
+
+- name: verify that hard link is still the same after timestamp updated
+ assert:
+ that:
+ - "hlstat1.stat.inode == hlstat2.stat.inode"
+ - "hlstat1.stat.mode == '0701'"
+
+- name: create a directory
+ file: path={{output_dir}}/foobar state=directory
+ register: file7_result
+
+- name: verify that the file was marked as changed
+ assert:
+ that:
+ - "file7_result.changed == true"
+ - "file7_result.state == 'directory'"
+
+- name: determine if selinux is installed
+ shell: which getenforce || exit 0
+ register: selinux_installed
+
+- name: determine if selinux is enabled
+ shell: getenforce
+ register: selinux_enabled
+ when: selinux_installed.stdout != ""
+ ignore_errors: true
+
+- name: remove directory foobar
+ file: path={{output_dir}}/foobar state=absent
+
+- name: remove file foo.txt
+ file: path={{output_dir}}/foo.txt state=absent
+
+- name: remove file bar.txt
+ file: path={{output_dir}}/foo.txt state=absent
+
+- name: remove file baz.txt
+ file: path={{output_dir}}/foo.txt state=absent
+
+- name: copy directory structure over
+ copy: src=foobar dest={{output_dir}}
+
+- name: check what would be removed if folder state was absent and diff is enabled
+ file:
+ path: "{{ item }}"
+ state: absent
+ check_mode: yes
+ diff: yes
+ with_items:
+ - "{{ output_dir }}"
+ - "{{ output_dir }}/foobar/fileA"
+ register: folder_absent_result
+
+- name: 'assert that the "absent" state lists expected files and folders for only directories'
+ assert:
+ that:
+ - folder_absent_result.results[0].diff.before.path_content is defined
+ - folder_absent_result.results[1].diff.before.path_content is not defined
+ - test_folder in folder_absent_result.results[0].diff.before.path_content.directories
+ - test_file in folder_absent_result.results[0].diff.before.path_content.files
+ vars:
+ test_folder: "{{ folder_absent_result.results[0].path }}/foobar"
+ test_file: "{{ folder_absent_result.results[0].path }}/foobar/fileA"
+
+- name: Change ownership of a directory with recurse=no(default)
+ file: path={{output_dir}}/foobar owner=1234
+
+- name: verify that the permission of the directory was set
+ file: path={{output_dir}}/foobar state=directory
+ register: file8_result
+
+- name: assert that the directory has changed to have owner 1234
+ assert:
+ that:
+ - "file8_result.uid == 1234"
+
+- name: verify that the permission of a file under the directory was not set
+ file: path={{output_dir}}/foobar/fileA state=file
+ register: file9_result
+
+- name: assert the file owner has not changed to 1234
+ assert:
+ that:
+ - "file9_result.uid != 1234"
+
+- name: change the ownership of a directory with recurse=yes
+ file: path={{output_dir}}/foobar owner=1235 recurse=yes
+
+- name: verify that the permission of the directory was set
+ file: path={{output_dir}}/foobar state=directory
+ register: file10_result
+
+- name: assert that the directory has changed to have owner 1235
+ assert:
+ that:
+ - "file10_result.uid == 1235"
+
+- name: verify that the permission of a file under the directory was not set
+ file: path={{output_dir}}/foobar/fileA state=file
+ register: file11_result
+
+- name: assert that the file has changed to have owner 1235
+ assert:
+ that:
+ - "file11_result.uid == 1235"
+
+- name: remove directory foobar
+ file: path={{output_dir}}/foobar state=absent
+ register: file14_result
+
+- name: verify that the directory was removed
+ assert:
+ that:
+ - 'file14_result.changed == true'
+ - 'file14_result.state == "absent"'
+
+- name: create a test sub-directory
+ file: dest={{output_dir}}/sub1 state=directory
+ register: file15_result
+
+- name: verify that the new directory was created
+ assert:
+ that:
+ - 'file15_result.changed == true'
+ - 'file15_result.state == "directory"'
+
+- name: create test files in the sub-directory
+ file: dest={{output_dir}}/sub1/{{item}} state=touch
+ with_items:
+ - file1
+ - file2
+ - file3
+ register: file16_result
+
+- name: verify the files were created
+ assert:
+ that:
+ - 'item.changed == true'
+ - 'item.state == "file"'
+ with_items: "{{file16_result.results}}"
+
+- name: test file creation with symbolic mode
+ file: dest={{output_dir}}/test_symbolic state=touch mode=u=rwx,g=rwx,o=rwx
+ register: result
+
+- name: assert file mode
+ assert:
+ that:
+ - result.mode == '0777'
+
+- name: modify symbolic mode for all
+ file: dest={{output_dir}}/test_symbolic state=touch mode=a=r
+ register: result
+
+- name: assert file mode
+ assert:
+ that:
+ - result.mode == '0444'
+
+- name: modify symbolic mode for owner
+ file: dest={{output_dir}}/test_symbolic state=touch mode=u+w
+ register: result
+
+- name: assert file mode
+ assert:
+ that:
+ - result.mode == '0644'
+
+- name: modify symbolic mode for group
+ file: dest={{output_dir}}/test_symbolic state=touch mode=g+w
+ register: result
+
+- name: assert file mode
+ assert:
+ that:
+ - result.mode == '0664'
+
+- name: modify symbolic mode for world
+ file: dest={{output_dir}}/test_symbolic state=touch mode=o+w
+ register: result
+
+- name: assert file mode
+ assert:
+ that:
+ - result.mode == '0666'
+
+- name: modify symbolic mode for owner
+ file: dest={{output_dir}}/test_symbolic state=touch mode=u+x
+ register: result
+
+- name: assert file mode
+ assert:
+ that:
+ - result.mode == '0766'
+
+- name: modify symbolic mode for group
+ file: dest={{output_dir}}/test_symbolic state=touch mode=g+x
+ register: result
+
+- name: assert file mode
+ assert:
+ that:
+ - result.mode == '0776'
+
+- name: modify symbolic mode for world
+ file: dest={{output_dir}}/test_symbolic state=touch mode=o+x
+ register: result
+
+- name: assert file mode
+ assert:
+ that:
+ - result.mode == '0777'
+
+- name: remove symbolic mode for world
+ file: dest={{output_dir}}/test_symbolic state=touch mode=o-wx
+ register: result
+
+- name: assert file mode
+ assert:
+ that:
+ - result.mode == '0774'
+
+- name: remove symbolic mode for group
+ file: dest={{output_dir}}/test_symbolic state=touch mode=g-wx
+ register: result
+
+- name: assert file mode
+ assert:
+ that:
+ - result.mode == '0744'
+
+- name: remove symbolic mode for owner
+ file: dest={{output_dir}}/test_symbolic state=touch mode=u-wx
+ register: result
+
+- name: assert file mode
+ assert:
+ that:
+ - result.mode == '0444'
+
+- name: set sticky bit with symbolic mode
+ file: dest={{output_dir}}/test_symbolic state=touch mode=o+t
+ register: result
+
+- name: assert file mode
+ assert:
+ that:
+ - result.mode == '01444'
+
+- name: remove sticky bit with symbolic mode
+ file: dest={{output_dir}}/test_symbolic state=touch mode=o-t
+ register: result
+
+- name: assert file mode
+ assert:
+ that:
+ - result.mode == '0444'
+
+- name: add setgid with symbolic mode
+ file: dest={{output_dir}}/test_symbolic state=touch mode=g+s
+ register: result
+
+- name: assert file mode
+ assert:
+ that:
+ - result.mode == '02444'
+
+- name: remove setgid with symbolic mode
+ file: dest={{output_dir}}/test_symbolic state=touch mode=g-s
+ register: result
+
+- name: assert file mode
+ assert:
+ that:
+ - result.mode == '0444'
+
+- name: add setuid with symbolic mode
+ file: dest={{output_dir}}/test_symbolic state=touch mode=u+s
+ register: result
+
+- name: assert file mode
+ assert:
+ that:
+ - result.mode == '04444'
+
+- name: remove setuid with symbolic mode
+ file: dest={{output_dir}}/test_symbolic state=touch mode=u-s
+ register: result
+
+- name: assert file mode
+ assert:
+ that:
+ - result.mode == '0444'
+
+# https://github.com/ansible/ansible/issues/50943
+# Need to use /tmp as nobody can't access output_dir at all
+- name: create file as root with all write permissions
+ file: dest=/tmp/write_utime state=touch mode=0666 owner={{ansible_user_id}}
+
+- name: Pause to ensure stat times are not the exact same
+ pause:
+ seconds: 1
+
+- block:
+ - name: get previous time
+ stat: path=/tmp/write_utime
+ register: previous_time
+
+ - name: pause for 1 second to ensure the next touch is newer
+ pause: seconds=1
+
+ - name: touch file as nobody
+ file: dest=/tmp/write_utime state=touch
+ become: True
+ become_user: nobody
+ register: result
+
+ - name: get new time
+ stat: path=/tmp/write_utime
+ register: current_time
+
+ always:
+ - name: remove test utime file
+ file: path=/tmp/write_utime state=absent
+
+- name: assert touch file as nobody
+ assert:
+ that:
+ - result is changed
+ - current_time.stat.atime > previous_time.stat.atime
+ - current_time.stat.mtime > previous_time.stat.mtime
+
+# Follow + recursive tests
+- name: create a toplevel directory
+ file: path={{output_dir}}/test_follow_rec state=directory mode=0755
+
+- name: create a file outside of the toplevel
+ file: path={{output_dir}}/test_follow_rec_target_file state=touch mode=0700
+
+- name: create a directory outside of the toplevel
+ file: path={{output_dir}}/test_follow_rec_target_dir state=directory mode=0700
+
+- name: create a file inside of the link target directory
+ file: path={{output_dir}}/test_follow_rec_target_dir/foo state=touch mode=0700
+
+- name: create a symlink to the file
+ file: path={{output_dir}}/test_follow_rec/test_link state=link src="../test_follow_rec_target_file"
+
+- name: create a symlink to the directory
+ file: path={{output_dir}}/test_follow_rec/test_link_dir state=link src="../test_follow_rec_target_dir"
+
+- name: create a symlink to a nonexistent file
+ file: path={{output_dir}}/test_follow_rec/nonexistent state=link src=does_not_exist force=True
+
+- name: try to change permissions without following symlinks
+ file: path={{output_dir}}/test_follow_rec follow=False mode="a-x" recurse=True
+
+- name: stat the link file target
+ stat: path={{output_dir}}/test_follow_rec_target_file
+ register: file_result
+
+- name: stat the link dir target
+ stat: path={{output_dir}}/test_follow_rec_target_dir
+ register: dir_result
+
+- name: stat the file inside the link dir target
+ stat: path={{output_dir}}/test_follow_rec_target_dir/foo
+ register: file_in_dir_result
+
+- name: assert that the link targets were unmodified
+ assert:
+ that:
+ - file_result.stat.mode == '0700'
+ - dir_result.stat.mode == '0700'
+ - file_in_dir_result.stat.mode == '0700'
+
+- name: try to change permissions with following symlinks
+ file: path={{output_dir}}/test_follow_rec follow=True mode="a-x" recurse=True
+
+- name: stat the link file target
+ stat: path={{output_dir}}/test_follow_rec_target_file
+ register: file_result
+
+- name: stat the link dir target
+ stat: path={{output_dir}}/test_follow_rec_target_dir
+ register: dir_result
+
+- name: stat the file inside the link dir target
+ stat: path={{output_dir}}/test_follow_rec_target_dir/foo
+ register: file_in_dir_result
+
+- name: assert that the link targets were modified
+ assert:
+ that:
+ - file_result.stat.mode == '0600'
+ - dir_result.stat.mode == '0600'
+ - file_in_dir_result.stat.mode == '0600'
+
+# https://github.com/ansible/ansible/issues/55971
+- name: Test missing src and path
+ file:
+ state: hard
+ register: file_error1
+ ignore_errors: yes
+
+- assert:
+ that:
+ - "file_error1 is failed"
+ - "file_error1.msg == 'missing required arguments: path'"
+
+- name: Test missing src
+ file:
+ dest: "{{ output_dir }}/hard.txt"
+ state: hard
+ register: file_error2
+ ignore_errors: yes
+
+- assert:
+ that:
+ - "file_error2 is failed"
+ - "file_error2.msg == 'src is required for creating new hardlinks'"
+
+- name: Test non-existing src
+ file:
+ src: non-existing-file-that-does-not-exist.txt
+ dest: "{{ output_dir }}/hard.txt"
+ state: hard
+ register: file_error3
+ ignore_errors: yes
+
+- assert:
+ that:
+ - "file_error3 is failed"
+ - "file_error3.msg == 'src does not exist'"
+ - "file_error3.dest == '{{ output_dir }}/hard.txt' | expanduser"
+ - "file_error3.src == 'non-existing-file-that-does-not-exist.txt'"
+
+- block:
+ - name: Create a testing file
+ file:
+ dest: original_file.txt
+ state: touch
+
+ - name: Test relative path with state=hard
+ file:
+ src: original_file.txt
+ dest: hard_link_file.txt
+ state: hard
+ register: hard_link_relpath
+
+ - name: Just check if it was successful, we don't care about the actual hard link in this test
+ assert:
+ that:
+ - "hard_link_relpath is success"
+
+ always:
+ - name: Clean up
+ file:
+ path: "{{ item }}"
+ state: absent
+ loop:
+ - original_file.txt
+ - hard_link_file.txt
+
+# END #55971
diff --git a/test/integration/targets/file/tasks/selinux_tests.yml b/test/integration/targets/file/tasks/selinux_tests.yml
new file mode 100644
index 00000000..6a95c442
--- /dev/null
+++ b/test/integration/targets/file/tasks/selinux_tests.yml
@@ -0,0 +1,33 @@
+# Test code for the file module - selinux subtasks.
+# (c) 2014, Richard Isaacson <richard.c.isaacson@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: Initialize the test output dir
+ include: initialize.yml
+
+- name: touch a file for testing
+ file: path={{output_dir}}/foo-se.txt state=touch
+ register: file_se_result
+
+- name: verify that the file was marked as changed
+ assert:
+ that:
+ - "file_se_result.changed == true"
+ - "file_se_result.secontext == 'unconfined_u:object_r:admin_home_t:s0'"
+
+- name: remove the file used for testing
+ file: path={{output_dir}}/foo-se.txt state=absent
diff --git a/test/integration/targets/file/tasks/state_link.yml b/test/integration/targets/file/tasks/state_link.yml
new file mode 100644
index 00000000..89150adc
--- /dev/null
+++ b/test/integration/targets/file/tasks/state_link.yml
@@ -0,0 +1,487 @@
+# file module tests for dealing with symlinks (state=link)
+
+- name: Initialize the test output dir
+ include: initialize.yml
+
+#
+# Basic absolute symlink to a file
+#
+- name: create soft link to file
+ file: src={{output_file}} dest={{output_dir}}/soft.txt state=link
+ register: file1_result
+
+- name: Get stat info for the link
+ stat:
+ path: '{{ output_dir }}/soft.txt'
+ follow: False
+ register: file1_link_stat
+
+- name: verify that the symlink was created correctly
+ assert:
+ that:
+ - 'file1_result is changed'
+ - 'file1_link_stat["stat"].islnk'
+ - 'file1_link_stat["stat"].lnk_target | expanduser == output_file | expanduser'
+
+#
+# Change an absolute soft link into a relative soft link
+#
+- name: change soft link to relative
+ file: src={{output_file|basename}} dest={{output_dir}}/soft.txt state=link
+ register: file2_result
+
+- name: Get stat info for the link
+ stat:
+ path: '{{ output_dir }}/soft.txt'
+ follow: False
+ register: file2_link_stat
+
+- name: verify that the file was marked as changed
+ assert:
+ that:
+ - "file2_result is changed"
+ - "file2_result.diff.before.src == remote_file_expanded"
+ - "file2_result.diff.after.src == remote_file_expanded|basename"
+ - "file2_link_stat['stat'].islnk"
+ - "file2_link_stat['stat'].lnk_target == remote_file_expanded | basename"
+
+#
+# Check that creating the soft link a second time was idempotent
+#
+- name: soft link idempotency check
+ file: src={{output_file|basename}} dest={{output_dir}}/soft.txt state=link
+ register: file3_result
+
+- name: Get stat info for the link
+ stat:
+ path: '{{ output_dir }}/soft.txt'
+ follow: False
+ register: file3_link_stat
+
+- name: verify that the file was not marked as changed
+ assert:
+ that:
+ - "not file3_result is changed"
+ - "file3_link_stat['stat'].islnk"
+ - "file3_link_stat['stat'].lnk_target == remote_file_expanded | basename"
+
+#
+# Test symlink to nonexistent files
+#
+- name: fail to create soft link to non existent file
+ file:
+ src: '/nonexistent'
+ dest: '{{output_dir}}/soft2.txt'
+ state: 'link'
+ force: False
+ register: file4_result
+ ignore_errors: true
+
+- name: verify that link was not created
+ assert:
+ that:
+ - "file4_result is failed"
+
+- name: force creation soft link to non existent
+ file:
+ src: '/nonexistent'
+ dest: '{{ output_dir}}/soft2.txt'
+ state: 'link'
+ force: True
+ register: file5_result
+
+- name: Get stat info for the link
+ stat:
+ path: '{{ output_dir }}/soft2.txt'
+ follow: False
+ register: file5_link_stat
+
+- name: verify that link was created
+ assert:
+ that:
+ - "file5_result is changed"
+ - "file5_link_stat['stat'].islnk"
+ - "file5_link_stat['stat'].lnk_target == '/nonexistent'"
+
+- name: Prove idempotence of force creation soft link to non existent
+ file:
+ src: '/nonexistent'
+ dest: '{{ output_dir }}/soft2.txt'
+ state: 'link'
+ force: True
+ register: file6a_result
+
+- name: verify that the link to nonexistent is idempotent
+ assert:
+ that:
+ - "file6a_result.changed == false"
+
+# In order for a symlink in a sticky world writable directory to be followed, it must
+# either be owned by the follower,
+# or the directory and symlink must have the same owner.
+- name: symlink in sticky directory
+ block:
+ - name: Create remote unprivileged remote user
+ user:
+ name: '{{ remote_unprivileged_user }}'
+ register: user
+
+ - name: Create a local temporary directory
+ tempfile:
+ state: directory
+ register: tempdir
+
+ - name: Set sticky bit
+ file:
+ path: '{{ tempdir.path }}'
+ mode: o=rwXt
+
+ - name: 'Check mode: force creation soft link in sticky directory owned by another user (mode is used)'
+ file:
+ src: '{{ user.home }}/nonexistent'
+ dest: '{{ tempdir.path }}/soft3.txt'
+ mode: 0640
+ state: 'link'
+ owner: '{{ remote_unprivileged_user }}'
+ force: true
+ follow: false
+ check_mode: true
+ register: missing_dst_no_follow_enable_force_use_mode1
+
+ - name: force creation soft link in sticky directory owned by another user (mode is used)
+ file:
+ src: '{{ user.home }}/nonexistent'
+ dest: '{{ tempdir.path }}/soft3.txt'
+ mode: 0640
+ state: 'link'
+ owner: '{{ remote_unprivileged_user }}'
+ force: true
+ follow: false
+ register: missing_dst_no_follow_enable_force_use_mode2
+
+ - name: Get stat info for the link
+ stat:
+ path: '{{ tempdir.path }}/soft3.txt'
+ follow: false
+ register: soft3_result
+
+ - name: 'Idempotence: force creation soft link in sticky directory owned by another user (mode is used)'
+ file:
+ src: '{{ user.home }}/nonexistent'
+ dest: '{{ tempdir.path }}/soft3.txt'
+ mode: 0640
+ state: 'link'
+ owner: '{{ remote_unprivileged_user }}'
+ force: yes
+ follow: false
+ register: missing_dst_no_follow_enable_force_use_mode3
+ always:
+ - name: Delete remote unprivileged remote user
+ user:
+ name: '{{ remote_unprivileged_user }}'
+ state: absent
+
+ - name: Delete unprivileged user home and tempdir
+ file:
+ path: "{{ item }}"
+ state: absent
+ loop:
+ - '{{ tempdir.path }}'
+ - '{{ user.home }}'
+
+- name: verify that link was created
+ assert:
+ that:
+ - "missing_dst_no_follow_enable_force_use_mode1 is changed"
+ - "missing_dst_no_follow_enable_force_use_mode2 is changed"
+ - "missing_dst_no_follow_enable_force_use_mode3 is not changed"
+ - "soft3_result['stat'].islnk"
+ - "soft3_result['stat'].lnk_target == '{{ user.home }}/nonexistent'"
+
+#
+# Test creating a link to a directory https://github.com/ansible/ansible/issues/1369
+#
+- name: create soft link to directory using absolute path
+ file:
+ src: '/'
+ dest: '{{ output_dir }}/root'
+ state: 'link'
+ register: file6_result
+
+- name: Get stat info for the link
+ stat:
+ path: '{{ output_dir }}/root'
+ follow: False
+ register: file6_link_stat
+
+- name: Get stat info for the pointed to file
+ stat:
+ path: '{{ output_dir }}/root'
+ follow: True
+ register: file6_links_dest_stat
+
+- name: Get stat info for the file we intend to point to
+ stat:
+ path: '/'
+ follow: False
+ register: file6_dest_stat
+
+- name: verify that the link was created correctly
+ assert:
+ that:
+ # file command reports it created something
+ - "file6_result.changed == true"
+ # file command created a link
+ - 'file6_link_stat["stat"]["islnk"]'
+ # Link points to the right path
+ - 'file6_link_stat["stat"]["lnk_target"] == "/"'
+ # The link target and the file we intended to link to have the same inode
+ - 'file6_links_dest_stat["stat"]["inode"] == file6_dest_stat["stat"]["inode"]'
+
+#
+# Test creating a relative link
+#
+
+# Relative link to file
+- name: create a test sub-directory to link to
+ file:
+ dest: '{{ output_dir }}/sub1'
+ state: 'directory'
+
+- name: create a file to link to in the test sub-directory
+ file:
+ dest: '{{ output_dir }}/sub1/file1'
+ state: 'touch'
+
+- name: create another test sub-directory to place links within
+ file:
+ dest: '{{output_dir}}/sub2'
+ state: 'directory'
+
+- name: create soft link to relative file
+ file:
+ src: '../sub1/file1'
+ dest: '{{ output_dir }}/sub2/link1'
+ state: 'link'
+ register: file7_result
+
+- name: Get stat info for the link
+ stat:
+ path: '{{ output_dir }}/sub2/link1'
+ follow: False
+ register: file7_link_stat
+
+- name: Get stat info for the pointed to file
+ stat:
+ path: '{{ output_dir }}/sub2/link1'
+ follow: True
+ register: file7_links_dest_stat
+
+- name: Get stat info for the file we intend to point to
+ stat:
+ path: '{{ output_dir }}/sub1/file1'
+ follow: False
+ register: file7_dest_stat
+
+- name: verify that the link was created correctly
+ assert:
+ that:
+ # file command reports it created something
+ - "file7_result.changed == true"
+ # file command created a link
+ - 'file7_link_stat["stat"]["islnk"]'
+ # Link points to the right path
+ - 'file7_link_stat["stat"]["lnk_target"] == "../sub1/file1"'
+ # The link target and the file we intended to link to have the same inode
+ - 'file7_links_dest_stat["stat"]["inode"] == file7_dest_stat["stat"]["inode"]'
+
+# Relative link to directory
+- name: create soft link to relative directory
+ file:
+ src: sub1
+ dest: '{{ output_dir }}/sub1-link'
+ state: 'link'
+ register: file8_result
+
+- name: Get stat info for the link
+ stat:
+ path: '{{ output_dir }}/sub1-link'
+ follow: False
+ register: file8_link_stat
+
+- name: Get stat info for the pointed to file
+ stat:
+ path: '{{ output_dir }}/sub1-link'
+ follow: True
+ register: file8_links_dest_stat
+
+- name: Get stat info for the file we intend to point to
+ stat:
+ path: '{{ output_dir }}/sub1'
+ follow: False
+ register: file8_dest_stat
+
+- name: verify that the link was created correctly
+ assert:
+ that:
+ # file command reports it created something
+ - "file8_result.changed == true"
+ # file command created a link
+ - 'file8_link_stat["stat"]["islnk"]'
+ # Link points to the right path
+ - 'file8_link_stat["stat"]["lnk_target"] == "sub1"'
+ # The link target and the file we intended to link to have the same inode
+ - 'file8_links_dest_stat["stat"]["inode"] == file8_dest_stat["stat"]["inode"]'
+
+# test the file module using follow=yes, so that the target of a
+# symlink is modified, rather than the link itself
+
+- name: create a test file
+ copy:
+ dest: '{{output_dir}}/test_follow'
+ content: 'this is a test file\n'
+ mode: 0666
+
+- name: create a symlink to the test file
+ file:
+ path: '{{output_dir}}/test_follow_link'
+ src: './test_follow'
+ state: 'link'
+
+- name: modify the permissions on the link using follow=yes
+ file:
+ path: '{{output_dir}}/test_follow_link'
+ mode: 0644
+ follow: yes
+ register: file9_result
+
+- name: stat the link target
+ stat:
+ path: '{{output_dir}}/test_follow'
+ register: file9_stat
+
+- name: assert that the chmod worked
+ assert:
+ that:
+ - 'file9_result is changed'
+ - 'file9_stat["stat"]["mode"] == "0644"'
+
+#
+# Test modifying the permissions of a link itself
+#
+- name: attempt to modify the permissions of the link itself
+ file:
+ path: '{{output_dir}}/test_follow_link'
+ src: './test_follow'
+ state: 'link'
+ mode: 0600
+ follow: False
+ register: file10_result
+
+# Whether the link itself changed is platform dependent! (BSD vs Linux?)
+# Just check that the underlying file was not changed
+- name: stat the link target
+ stat:
+ path: '{{output_dir}}/test_follow'
+ register: file10_target_stat
+
+- name: assert that the link target was unmodified
+ assert:
+ that:
+ - 'file10_result is changed'
+ - 'file10_target_stat["stat"]["mode"] == "0644"'
+
+
+# https://github.com/ansible/ansible/issues/56928
+- block:
+
+ - name: Create a testing file
+ file:
+ path: "{{ output_dir }}/test_follow1"
+ state: touch
+
+ - name: Create a symlink and change mode of the original file, since follow == yes by default
+ file:
+ src: "{{ output_dir }}/test_follow1"
+ dest: "{{ output_dir }}/test_follow1_link"
+ state: link
+ mode: 0700
+
+ - name: stat the original file
+ stat:
+ path: "{{ output_dir }}/test_follow1"
+ register: stat_out
+
+ - name: Check if the mode of the original file was set
+ assert:
+ that:
+ - 'stat_out.stat.mode == "0700"'
+
+ always:
+ - name: Clean up
+ file:
+ path: "{{ item }}"
+ state: absent
+ loop:
+ - "{{ output_dir }}/test_follow1"
+ - "{{ output_dir }}/test_follow1_link"
+
+# END #56928
+
+
+# Test failure with src and no state parameter
+- name: Specify src without state
+ file:
+ src: "{{ output_file }}"
+ dest: "{{ output_dir }}/link.txt"
+ ignore_errors: yes
+ register: src_state
+
+- name: Ensure src without state failed
+ assert:
+ that:
+ - src_state is failed
+ - "'src option requires state to be' in src_state.msg"
+
+# Test creating a symlink when the destination exists and is a file
+- name: create a test file
+ copy:
+ dest: '{{ output_dir }}/file.txt'
+ content: 'this is a test file\n'
+ mode: 0666
+
+- name: Create a symlink with dest already a file
+ file:
+ src: '{{ output_file }}'
+ dest: '{{ output_dir }}/file.txt'
+ state: link
+ ignore_errors: true
+ register: dest_is_existing_file_fail
+
+- name: Stat to make sure the symlink was not created
+ stat:
+ path: '{{ output_dir }}/file.txt'
+ follow: false
+ register: dest_is_existing_file_fail_stat
+
+- name: Forcefully a symlink with dest already a file
+ file:
+ src: '{{ output_file }}'
+ dest: '{{ output_dir }}/file.txt'
+ state: link
+ force: true
+ register: dest_is_existing_file_force
+
+- name: Stat to make sure the symlink was created
+ stat:
+ path: '{{ output_dir }}/file.txt'
+ follow: false
+ register: dest_is_existing_file_force_stat
+
+- assert:
+ that:
+ - dest_is_existing_file_fail is failed
+ - not dest_is_existing_file_fail_stat.stat.islnk
+ - dest_is_existing_file_force is changed
+ - dest_is_existing_file_force_stat.stat.exists
+ - dest_is_existing_file_force_stat.stat.islnk
diff --git a/test/integration/targets/file/tasks/unicode_path.yml b/test/integration/targets/file/tasks/unicode_path.yml
new file mode 100644
index 00000000..d78af765
--- /dev/null
+++ b/test/integration/targets/file/tasks/unicode_path.yml
@@ -0,0 +1,10 @@
+- name: create local file with unicode filename and content
+ lineinfile:
+ dest: "{{ output_dir }}/语/汉语.txt"
+ create: true
+ line: 汉语
+
+- name: remove local file with unicode filename and content
+ file:
+ path: "{{ output_dir }}/语/汉语.txt"
+ state: absent
diff --git a/test/integration/targets/filter_core/aliases b/test/integration/targets/filter_core/aliases
new file mode 100644
index 00000000..1603f435
--- /dev/null
+++ b/test/integration/targets/filter_core/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group2
+skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller
+skip/aix
diff --git a/test/integration/targets/filter_core/files/9851.txt b/test/integration/targets/filter_core/files/9851.txt
new file mode 100644
index 00000000..70b12793
--- /dev/null
+++ b/test/integration/targets/filter_core/files/9851.txt
@@ -0,0 +1,3 @@
+ [{
+ "k": "Quotes \"'\n"
+}]
diff --git a/test/integration/targets/filter_core/files/fileglob/one.txt b/test/integration/targets/filter_core/files/fileglob/one.txt
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/filter_core/files/fileglob/one.txt
diff --git a/test/integration/targets/filter_core/files/fileglob/two.txt b/test/integration/targets/filter_core/files/fileglob/two.txt
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/filter_core/files/fileglob/two.txt
diff --git a/test/integration/targets/filter_core/files/foo.txt b/test/integration/targets/filter_core/files/foo.txt
new file mode 100644
index 00000000..9bd9b636
--- /dev/null
+++ b/test/integration/targets/filter_core/files/foo.txt
@@ -0,0 +1,69 @@
+This is a test of various filter plugins found in Ansible (ex: core.py), and
+not so much a test of the core filters in Jinja2.
+
+Dumping the same structure to YAML
+
+- this is a list element
+- this: is a hash element in a list
+ warp: 9
+ where: endor
+
+
+Dumping the same structure to JSON, but don't pretty print
+
+["this is a list element", {"this": "is a hash element in a list", "warp": 9, "where": "endor"}]
+
+Dumping the same structure to YAML, but don't pretty print
+
+- this is a list element
+- {this: is a hash element in a list, warp: 9, where: endor}
+
+
+From a recorded task, the changed, failed, success, and skipped
+tests are shortcuts to ask if those tasks produced changes, failed,
+succeeded, or skipped (as one might guess).
+
+Changed = True
+Failed = False
+Success = True
+Skipped = False
+
+The mandatory filter fails if a variable is not defined and returns the value.
+To avoid breaking this test, this variable is already defined.
+
+a = 1
+
+There are various casts available
+
+int = 1
+bool = True
+
+String quoting
+
+quoted = quoted
+
+The fileglob module returns the list of things matching a pattern.
+
+fileglob = one.txt, two.txt
+
+There are also various string operations that work on paths. These do not require
+files to exist and are passthrus to the python os.path functions
+
+/etc/motd with basename = motd
+/etc/motd with dirname = /etc
+
+path_join_simple = /etc/subdir/test
+path_join_with_slash = /test
+path_join_relative = etc/subdir/test
+
+TODO: realpath follows symlinks. There isn't a test for this just now.
+
+TODO: add tests for set theory operations like union
+
+regex_replace = bar
+# Check regex_replace with multiline
+#bar
+#bart
+regex_search = 0001
+regex_findall = ["car", "tar", "bar"]
+regex_escape = \^f\.\*o\(\.\*\)\$
diff --git a/test/integration/targets/filter_core/handle_undefined_type_errors.yml b/test/integration/targets/filter_core/handle_undefined_type_errors.yml
new file mode 100644
index 00000000..70628809
--- /dev/null
+++ b/test/integration/targets/filter_core/handle_undefined_type_errors.yml
@@ -0,0 +1,29 @@
+- hosts: localhost
+ gather_facts: false
+ tasks:
+ - debug: msg={{item}}
+ with_dict: '{{myundef}}'
+ when:
+ - myundef is defined
+ register: shouldskip
+
+ - name: check if skipped
+ assert:
+ that:
+ - shouldskip is skipped
+
+ - debug: msg={{item}}
+ loop: '{{myundef|dict2items}}'
+ when:
+ - myundef is defined
+
+ - debug: msg={{item}}
+ with_dict: '{{myundef}}'
+ register: notskipped
+ ignore_errors: true
+
+ - name: check it failed
+ assert:
+ that:
+ - notskipped is not skipped
+ - notskipped is failed
diff --git a/test/integration/targets/filter_core/host_vars/localhost b/test/integration/targets/filter_core/host_vars/localhost
new file mode 100644
index 00000000..a8926a52
--- /dev/null
+++ b/test/integration/targets/filter_core/host_vars/localhost
@@ -0,0 +1 @@
+a: 1
diff --git a/test/integration/targets/filter_core/meta/main.yml b/test/integration/targets/filter_core/meta/main.yml
new file mode 100644
index 00000000..e430ea6f
--- /dev/null
+++ b/test/integration/targets/filter_core/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - role: setup_passlib
+ when: ansible_facts.distribution == 'MacOSX'
diff --git a/test/integration/targets/filter_core/runme.sh b/test/integration/targets/filter_core/runme.sh
new file mode 100755
index 00000000..c055603b
--- /dev/null
+++ b/test/integration/targets/filter_core/runme.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ANSIBLE_ROLES_PATH=../ ansible-playbook runme.yml "$@"
+ANSIBLE_ROLES_PATH=../ ansible-playbook handle_undefined_type_errors.yml "$@"
diff --git a/test/integration/targets/filter_core/runme.yml b/test/integration/targets/filter_core/runme.yml
new file mode 100644
index 00000000..4af4b23c
--- /dev/null
+++ b/test/integration/targets/filter_core/runme.yml
@@ -0,0 +1,3 @@
+- hosts: localhost
+ roles:
+ - { role: filter_core }
diff --git a/test/integration/targets/filter_core/tasks/main.yml b/test/integration/targets/filter_core/tasks/main.yml
new file mode 100644
index 00000000..2197febd
--- /dev/null
+++ b/test/integration/targets/filter_core/tasks/main.yml
@@ -0,0 +1,576 @@
+# test code for filters
+# Copyright: (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Note: |groupby is already tested by the `groupby_filter` target.
+
+- set_fact:
+ output_dir: "{{ lookup('env', 'OUTPUT_DIR') }}"
+
+- name: a dummy task to test the changed and success filters
+ shell: echo hi
+ register: some_registered_var
+
+- debug:
+ var: some_registered_var
+
+- name: Verify that we workaround a py26 json bug
+ template:
+ src: py26json.j2
+ dest: "{{ output_dir }}/py26json.templated"
+ mode: 0644
+
+- name: 9851 - Verify that we don't trigger https://github.com/ansible/ansible/issues/9851
+ copy:
+ content: " [{{ item | to_nice_json }}]"
+ dest: "{{ output_dir }}/9851.out"
+ with_items:
+ - {"k": "Quotes \"'\n"}
+
+- name: 9851 - copy known good output into place
+ copy:
+ src: 9851.txt
+ dest: "{{ output_dir }}/9851.txt"
+
+- name: 9851 - Compare generated json to known good
+ shell: diff -w {{ output_dir }}/9851.out {{ output_dir }}/9851.txt
+ register: diff_result_9851
+
+- name: 9851 - verify generated file matches known good
+ assert:
+ that:
+ - 'diff_result_9851.stdout == ""'
+
+- name: fill in a basic template
+ template:
+ src: foo.j2
+ dest: "{{ output_dir }}/foo.templated"
+ mode: 0644
+ register: template_result
+
+- name: copy known good into place
+ copy:
+ src: foo.txt
+ dest: "{{ output_dir }}/foo.txt"
+
+- name: compare templated file to known good
+ shell: diff -w {{ output_dir }}/foo.templated {{ output_dir }}/foo.txt
+ register: diff_result
+
+- name: verify templated file matches known good
+ assert:
+ that:
+ - 'diff_result.stdout == ""'
+
+- name: Test extract
+ assert:
+ that:
+ - '"c" == 2 | extract(["a", "b", "c"])'
+ - '"b" == 1 | extract(["a", "b", "c"])'
+ - '"a" == 0 | extract(["a", "b", "c"])'
+
+- name: Container lookups with extract
+ assert:
+ that:
+ - "'x' == [0]|map('extract',['x','y'])|list|first"
+ - "'y' == [1]|map('extract',['x','y'])|list|first"
+ - "42 == ['x']|map('extract',{'x':42,'y':31})|list|first"
+ - "31 == ['x','y']|map('extract',{'x':42,'y':31})|list|last"
+ - "'local' == ['localhost']|map('extract',hostvars,'ansible_connection')|list|first"
+ - "'local' == ['localhost']|map('extract',hostvars,['ansible_connection'])|list|first"
+ # map was added to jinja2 in version 2.7
+ when: lookup('pipe', ansible_python.executable ~ ' -c "import jinja2; print(jinja2.__version__)"') is version('2.7', '>=')
+
+- name: Test extract filter with defaults
+ vars:
+ container:
+ key:
+ subkey: value
+ assert:
+ that:
+ - "'key' | extract(badcontainer) | default('a') == 'a'"
+ - "'key' | extract(badcontainer, 'subkey') | default('a') == 'a'"
+ - "('key' | extract(badcontainer)).subkey | default('a') == 'a'"
+ - "'badkey' | extract(container) | default('a') == 'a'"
+ - "'badkey' | extract(container, 'subkey') | default('a') == 'a'"
+ - "('badkey' | extract(container)).subsubkey | default('a') == 'a'"
+ - "'key' | extract(container, 'badsubkey') | default('a') == 'a'"
+ - "'key' | extract(container, ['badsubkey', 'subsubkey']) | default('a') == 'a'"
+ - "('key' | extract(container, 'badsubkey')).subsubkey | default('a') == 'a'"
+ - "'badkey' | extract(hostvars) | default('a') == 'a'"
+ - "'badkey' | extract(hostvars, 'subkey') | default('a') == 'a'"
+ - "('badkey' | extract(hostvars)).subsubkey | default('a') == 'a'"
+ - "'localhost' | extract(hostvars, 'badsubkey') | default('a') == 'a'"
+ - "'localhost' | extract(hostvars, ['badsubkey', 'subsubkey']) | default('a') == 'a'"
+ - "('localhost' | extract(hostvars, 'badsubkey')).subsubkey | default('a') == 'a'"
+
+- name: Test hash filter
+ assert:
+ that:
+ - '"{{ "hash" | hash("sha1") }}" == "2346ad27d7568ba9896f1b7da6b5991251debdf2"'
+ - '"{{ "café" | hash("sha1") }}" == "f424452a9673918c6f09b0cdd35b20be8e6ae7d7"'
+ - '"corned beef"|hash("haha, get it?") == None'
+
+- name: Flatten tests
+ block:
+ - name: use flatten
+ set_fact:
+ flat_full: '{{orig_list|flatten}}'
+ flat_one: '{{orig_list|flatten(levels=1)}}'
+ flat_two: '{{orig_list|flatten(levels=2)}}'
+ flat_tuples: '{{ [1,3] | zip([2,4]) | list | flatten }}'
+
+ - name: Verify flatten filter works as expected
+ assert:
+ that:
+ - flat_full == [1, 2, 3, 4, 5, 6, 7]
+ - flat_one == [1, 2, 3, [4, [5]], 6, 7]
+ - flat_two == [1, 2, 3, 4, [5], 6, 7]
+ - flat_tuples == [1, 2, 3, 4]
+ vars:
+ orig_list: [1, 2, [3, [4, [5]], 6], 7]
+
+- name: Test base64 filter
+ assert:
+ that:
+ - "'Ansible - ãらã¨ã¿\n' | b64encode == 'QW5zaWJsZSAtIOOBj+OCieOBqOOBvwo='"
+ - "'QW5zaWJsZSAtIOOBj+OCieOBqOOBvwo=' | b64decode == 'Ansible - ãらã¨ã¿\n'"
+ - "'Ansible - ãらã¨ã¿\n' | b64encode(encoding='utf-16-le') == 'QQBuAHMAaQBiAGwAZQAgAC0AIABPMIkwaDB/MAoA'"
+ - "'QQBuAHMAaQBiAGwAZQAgAC0AIABPMIkwaDB/MAoA' | b64decode(encoding='utf-16-le') == 'Ansible - ãらã¨ã¿\n'"
+
+- set_fact:
+ x:
+ x: x
+ key: x
+ y:
+ y: y
+ key: y
+ z:
+ z: z
+ key: z
+
+ # Most complicated combine dicts from the documentation
+ default:
+ a:
+ a':
+ x: default_value
+ y: default_value
+ list:
+ - default_value
+ b:
+ - 1
+ - 1
+ - 2
+ - 3
+ patch:
+ a:
+ a':
+ y: patch_value
+ z: patch_value
+ list:
+ - patch_value
+ b:
+ - 3
+ - 4
+ - 4
+ - key: value
+ result:
+ a:
+ a':
+ x: default_value
+ y: patch_value
+ z: patch_value
+ list:
+ - default_value
+ - patch_value
+ b:
+ - 1
+ - 1
+ - 2
+ - 3
+ - 4
+ - 4
+ - key: value
+
+- name: Verify combine fails with extra kwargs
+ set_fact:
+ foo: "{{[1] | combine(foo='bar')}}"
+ ignore_errors: yes
+ register: combine_fail
+
+- name: Verify combine filter
+ assert:
+ that:
+ - "([x] | combine) == x"
+ - "(x | combine(y)) == {'x': 'x', 'y': 'y', 'key': 'y'}"
+ - "(x | combine(y, z)) == {'x': 'x', 'y': 'y', 'z': 'z', 'key': 'z'}"
+ - "([x, y, z] | combine) == {'x': 'x', 'y': 'y', 'z': 'z', 'key': 'z'}"
+ - "([x, y] | combine(z)) == {'x': 'x', 'y': 'y', 'z': 'z', 'key': 'z'}"
+ - "None|combine == {}"
+ # more advanced dict combination tests are done in the "merge_hash" function unit tests
+ # but even though it's redundant with those unit tests, we do at least the most complicated example of the documentation here
+ - "(default | combine(patch, recursive=True, list_merge='append_rp')) == result"
+ - combine_fail is failed
+ - "combine_fail.msg == \"'recursive' and 'list_merge' are the only valid keyword arguments\""
+
+- set_fact:
+ combine: "{{[x, [y]] | combine(z)}}"
+ ignore_errors: yes
+ register: result
+
+- name: Ensure combining objects which aren't dictionaries throws an error
+ assert:
+ that:
+ - "result.msg.startswith(\"failed to combine variables, expected dicts but got\")"
+
+- name: Ensure combining two dictionaries containing undefined variables provides a helpful error
+ block:
+ - set_fact:
+ foo:
+ key1: value1
+
+ - set_fact:
+ combined: "{{ foo | combine({'key2': undef_variable}) }}"
+ ignore_errors: yes
+ register: result
+
+ - assert:
+ that:
+ - "result.msg.startswith('The task includes an option with an undefined variable')"
+
+ - set_fact:
+ combined: "{{ foo | combine({'key2': {'nested': [undef_variable]}})}}"
+ ignore_errors: yes
+ register: result
+
+ - assert:
+ that:
+ - "result.msg.startswith('The task includes an option with an undefined variable')"
+
+- name: regex_search
+ set_fact:
+ match_case: "{{ 'hello' | regex_search('HELLO', ignorecase=false) }}"
+ ignore_case: "{{ 'hello' | regex_search('HELLO', ignorecase=true) }}"
+ single_line: "{{ 'hello\nworld' | regex_search('^world', multiline=false) }}"
+ multi_line: "{{ 'hello\nworld' | regex_search('^world', multiline=true) }}"
+ named_groups: "{{ 'goodbye' | regex_search('(?P<first>good)(?P<second>bye)', '\\g<second>', '\\g<first>') }}"
+ numbered_groups: "{{ 'goodbye' | regex_search('(good)(bye)', '\\2', '\\1') }}"
+
+- name: regex_search unknown argument (failure expected)
+ set_fact:
+ unknown_arg: "{{ 'hello' | regex_search('hello', 'unknown') }}"
+ ignore_errors: yes
+ register: failure
+
+- name: regex_search check
+ assert:
+ that:
+ - match_case == ''
+ - ignore_case == 'hello'
+ - single_line == ''
+ - multi_line == 'world'
+ - named_groups == ['bye', 'good']
+ - numbered_groups == ['bye', 'good']
+ - failure is failed
+
+- name: Verify to_bool
+ assert:
+ that:
+ - 'None|bool == None'
+ - 'False|bool == False'
+ - '"TrUe"|bool == True'
+ - '"FalSe"|bool == False'
+ - '7|bool == False'
+
+- name: Verify to_datetime
+ assert:
+ that:
+ - '"1993-03-26 01:23:45"|to_datetime < "1994-03-26 01:23:45"|to_datetime'
+
+- name: strftime invalid argument (failure expected)
+ set_fact:
+ foo: "{{ '%Y' | strftime('foo') }}"
+ ignore_errors: yes
+ register: strftime_fail
+
+- name: Verify strftime
+ assert:
+ that:
+ - '"%Y-%m-%d"|strftime(1585247522) == "2020-03-26"'
+ - '"%Y-%m-%d"|strftime("1585247522.0") == "2020-03-26"'
+ - '("%Y"|strftime(None)).startswith("20")' # Current date, can't check much there.
+ - strftime_fail is failed
+ - '"Invalid value for epoch value" in strftime_fail.msg'
+
+- name: Verify case-insensitive regex_replace
+ assert:
+ that:
+ - '"hElLo there"|regex_replace("hello", "hi", ignorecase=True) == "hi there"'
+
+- name: Verify case-insensitive regex_findall
+ assert:
+ that:
+ - '"hEllo there heLlo haha HELLO there"|regex_findall("h.... ", ignorecase=True)|length == 3'
+
+- name: Verify ternary
+ assert:
+ that:
+ - 'True|ternary("seven", "eight") == "seven"'
+ - 'None|ternary("seven", "eight") == "eight"'
+ - 'None|ternary("seven", "eight", "nine") == "nine"'
+ - 'False|ternary("seven", "eight") == "eight"'
+ - '123|ternary("seven", "eight") == "seven"'
+ - '"haha"|ternary("seven", "eight") == "seven"'
+
+- name: Verify regex_escape raises on posix_extended (failure expected)
+ set_fact:
+ foo: '{{"]]^"|regex_escape(re_type="posix_extended")}}'
+ ignore_errors: yes
+ register: regex_escape_fail_1
+
+- name: Verify regex_escape raises on other re_type (failure expected)
+ set_fact:
+ foo: '{{"]]^"|regex_escape(re_type="haha")}}'
+ ignore_errors: yes
+ register: regex_escape_fail_2
+
+- name: Verify regex_escape with re_type other than 'python'
+ assert:
+ that:
+ - '"]]^"|regex_escape(re_type="posix_basic") == "\\]\\]\\^"'
+ - regex_escape_fail_1 is failed
+ - 'regex_escape_fail_1.msg == "Regex type (posix_extended) not yet implemented"'
+ - regex_escape_fail_2 is failed
+ - 'regex_escape_fail_2.msg == "Invalid regex type (haha)"'
+
+- name: Verify from_yaml and from_yaml_all
+ assert:
+ that:
+ - "'---\nbananas: yellow\napples: red'|from_yaml == {'bananas': 'yellow', 'apples': 'red'}"
+ - "2|from_yaml == 2"
+ - "'---\nbananas: yellow\n---\napples: red'|from_yaml_all|list == [{'bananas': 'yellow'}, {'apples': 'red'}]"
+ - "2|from_yaml_all == 2"
+
+- name: Verify random raises on non-iterable input (failure expected)
+ set_fact:
+ foo: '{{None|random}}'
+ ignore_errors: yes
+ register: random_fail_1
+
+- name: Verify random raises on iterable input with start (failure expected)
+ set_fact:
+ foo: '{{[1,2,3]|random(start=2)}}'
+ ignore_errors: yes
+ register: random_fail_2
+
+- name: Verify random raises on iterable input with step (failure expected)
+ set_fact:
+ foo: '{{[1,2,3]|random(step=2)}}'
+ ignore_errors: yes
+ register: random_fail_3
+
+- name: Verify random
+ assert:
+ that:
+ - '2|random in [0,1]'
+ - '2|random(seed=1337) in [0,1]'
+ - '["a", "b"]|random in ["a", "b"]'
+ - '20|random(start=10) in range(10, 20)'
+ - '20|random(start=10, step=2) % 2 == 0'
+ - random_fail_1 is failure
+ - '"random can only be used on" in random_fail_1.msg'
+ - random_fail_2 is failure
+ - '"start and step can only be used" in random_fail_2.msg'
+ - random_fail_3 is failure
+ - '"start and step can only be used" in random_fail_3.msg'
+
+# It's hard to actually verify much here since the result is, well, random.
+- name: Verify randomize_list
+ assert:
+ that:
+ - '[1,3,5,7,9]|shuffle|length == 5'
+ - '[1,3,5,7,9]|shuffle(seed=1337)|length == 5'
+ - '22|shuffle == 22'
+
+- name: Verify password_hash throws on weird salt_size type
+ set_fact:
+ foo: '{{"hey"|password_hash(salt_size=[999])}}'
+ ignore_errors: yes
+ register: password_hash_1
+
+- name: Verify password_hash throws on weird hashtype
+ set_fact:
+ foo: '{{"hey"|password_hash(hashtype="supersecurehashtype")}}'
+ ignore_errors: yes
+ register: password_hash_2
+
+- name: Verify password_hash
+ assert:
+ that:
+ - "'what in the WORLD is up?'|password_hash|length == 106"
+ # This throws a vastly different error on py2 vs py3, so we just check
+ # that it's a failure, not a substring of the exception.
+ - password_hash_1 is failed
+ - password_hash_2 is failed
+ - "'not support' in password_hash_2.msg"
+
+- name: Verify to_uuid throws on weird namespace
+ set_fact:
+ foo: '{{"hey"|to_uuid(namespace=22)}}'
+ ignore_errors: yes
+ register: to_uuid_1
+
+- name: Verify to_uuid
+ assert:
+ that:
+ - '"monkeys"|to_uuid == "0d03a178-da0f-5b51-934e-cda9c76578c3"'
+ - to_uuid_1 is failed
+ - '"Invalid value" in to_uuid_1.msg'
+
+- name: Verify mandatory throws on undefined variable
+ set_fact:
+ foo: '{{hey|mandatory}}'
+ ignore_errors: yes
+ register: mandatory_1
+
+- name: Verify mandatory throws on undefined variable with custom message
+ set_fact:
+ foo: '{{hey|mandatory("You did not give me a variable. I am a sad wolf.")}}'
+ ignore_errors: yes
+ register: mandatory_2
+
+- name: Set a variable
+ set_fact:
+ mandatory_demo: 123
+
+- name: Verify mandatory
+ assert:
+ that:
+ - '{{mandatory_demo|mandatory}} == 123'
+ - mandatory_1 is failed
+ - "mandatory_1.msg == \"Mandatory variable 'hey' not defined.\""
+ - mandatory_2 is failed
+ - "mandatory_2.msg == 'You did not give me a variable. I am a sad wolf.'"
+
+- name: Verify comment
+ assert:
+ that:
+ - '"boo!"|comment == "#\n# boo!\n#"'
+ - '"boo!"|comment(decoration="-- ") == "--\n-- boo!\n--"'
+ - '"boo!"|comment(style="cblock") == "/*\n *\n * boo!\n *\n */"'
+ - '"boo!"|comment(decoration="") == "boo!\n"'
+ - '"boo!"|comment(prefix="\n", prefix_count=20) == "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# boo!\n#"'
+
+- name: Verify subelements throws on invalid obj
+ set_fact:
+ foo: '{{True|subelements("foo")}}'
+ ignore_errors: yes
+ register: subelements_1
+
+- name: Verify subelements throws on invalid subelements arg
+ set_fact:
+ foo: '{{{}|subelements(17)}}'
+ ignore_errors: yes
+ register: subelements_2
+
+- name: Set demo data for subelements
+ set_fact:
+ subelements_demo: '{{ [{"name": "alice", "groups": ["wheel"], "authorized": ["/tmp/alice/onekey.pub"]}] }}'
+
+- name: Verify subelements throws on bad key
+ set_fact:
+ foo: '{{subelements_demo | subelements("does not compute")}}'
+ ignore_errors: yes
+ register: subelements_3
+
+- name: Verify subelements throws on key pointing to bad value
+ set_fact:
+ foo: '{{subelements_demo | subelements("name")}}'
+ ignore_errors: yes
+ register: subelements_4
+
+- name: Verify subelements throws on list of keys ultimately pointing to bad value
+ set_fact:
+ foo: '{{subelements_demo | subelements(["groups", "authorized"])}}'
+ ignore_errors: yes
+ register: subelements_5
+
+- name: Verify subelements
+ assert:
+ that:
+ - subelements_1 is failed
+ - 'subelements_1.msg == "obj must be a list of dicts or a nested dict"'
+ - subelements_2 is failed
+ - '"subelements must be a list or a string" in subelements_2.msg'
+ - 'subelements_demo|subelements("does not compute", skip_missing=True) == []'
+ - subelements_3 is failed
+ - '"could not find" in subelements_3.msg'
+ - subelements_4 is failed
+ - '"should point to a list" in subelements_4.msg'
+ - subelements_5 is failed
+ - '"should point to a dictionary" in subelements_5.msg'
+ - 'subelements_demo|subelements("groups") == [({"name": "alice", "groups": ["wheel"], "authorized": ["/tmp/alice/onekey.pub"]}, "wheel")]'
+ - 'subelements_demo|subelements(["groups"]) == [({"name": "alice", "groups": ["wheel"], "authorized": ["/tmp/alice/onekey.pub"]}, "wheel")]'
+
+
+- name: Verify dict2items throws on non-Mapping
+ set_fact:
+ foo: '{{True|dict2items}}'
+ ignore_errors: yes
+ register: dict2items_fail
+
+- name: Verify dict2items
+ assert:
+ that:
+ - '{"foo": "bar", "banana": "fruit"}|dict2items == [{"key": "foo", "value": "bar"}, {"key": "banana", "value": "fruit"}]'
+ - dict2items_fail is failed
+ - '"dict2items requires a dictionary" in dict2items_fail.msg'
+
+- name: Verify items2dict throws on non-Mapping
+ set_fact:
+ foo: '{{True|items2dict}}'
+ ignore_errors: yes
+ register: items2dict_fail
+
+- name: Verify items2dict
+ assert:
+ that:
+ - '[{"key": "foo", "value": "bar"}, {"key": "banana", "value": "fruit"}]|items2dict == {"foo": "bar", "banana": "fruit"}'
+ - items2dict_fail is failed
+ - '"items2dict requires a list" in items2dict_fail.msg'
+
+- name: Verify path_join throws on non-string and non-sequence
+ set_fact:
+ foo: '{{True|path_join}}'
+ ignore_errors: yes
+ register: path_join_fail
+
+- name: Verify path_join
+ assert:
+ that:
+ - '"foo"|path_join == "foo"'
+ - '["foo", "bar"]|path_join in ["foo/bar", "foo\bar"]'
+ - path_join_fail is failed
+ - '"expects string or sequence" in path_join_fail.msg'
+
+- name: Verify type_debug
+ assert:
+ that:
+ - '"foo"|type_debug == "str"'
+
+- name: Assert that a jinja2 filter that produces a map is auto unrolled
+ assert:
+ that:
+ - thing|map(attribute="bar")|first == 123
+ - thing_result|first == 123
+ - thing_items|first|last == 123
+ - thing_range == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+ vars:
+ thing:
+ - bar: 123
+ thing_result: '{{ thing|map(attribute="bar") }}'
+ thing_dict:
+ bar: 123
+ thing_items: '{{ thing_dict.items() }}'
+ thing_range: '{{ range(10) }}'
diff --git a/test/integration/targets/filter_core/templates/foo.j2 b/test/integration/targets/filter_core/templates/foo.j2
new file mode 100644
index 00000000..a69ba5ef
--- /dev/null
+++ b/test/integration/targets/filter_core/templates/foo.j2
@@ -0,0 +1,62 @@
+This is a test of various filter plugins found in Ansible (ex: core.py), and
+not so much a test of the core filters in Jinja2.
+
+Dumping the same structure to YAML
+
+{{ some_structure | to_nice_yaml }}
+
+Dumping the same structure to JSON, but don't pretty print
+
+{{ some_structure | to_json(sort_keys=true) }}
+
+Dumping the same structure to YAML, but don't pretty print
+
+{{ some_structure | to_yaml }}
+
+From a recorded task, the changed, failed, success, and skipped
+tests are shortcuts to ask if those tasks produced changes, failed,
+succeeded, or skipped (as one might guess).
+
+Changed = {{ some_registered_var is changed }}
+Failed = {{ some_registered_var is failed }}
+Success = {{ some_registered_var is successful }}
+Skipped = {{ some_registered_var is skipped }}
+
+The mandatory filter fails if a variable is not defined and returns the value.
+To avoid breaking this test, this variable is already defined.
+
+a = {{ a | mandatory }}
+
+There are various casts available
+
+int = {{ a | int }}
+bool = {{ 1 | bool }}
+
+String quoting
+
+quoted = {{ 'quoted' | quote }}
+
+The fileglob module returns the list of things matching a pattern.
+
+fileglob = {{ (playbook_dir + '/files/fileglob/*') | fileglob | map('basename') | sort | join(', ') }}
+
+There are also various string operations that work on paths. These do not require
+files to exist and are passthrus to the python os.path functions
+
+/etc/motd with basename = {{ '/etc/motd' | basename }}
+/etc/motd with dirname = {{ '/etc/motd' | dirname }}
+
+path_join_simple = {{ ('/etc', 'subdir', 'test') | path_join }}
+path_join_with_slash = {{ ('/etc', 'subdir', '/test') | path_join }}
+path_join_relative = {{ ('etc', 'subdir', 'test') | path_join }}
+
+TODO: realpath follows symlinks. There isn't a test for this just now.
+
+TODO: add tests for set theory operations like union
+
+regex_replace = {{ 'foo' | regex_replace('^foo', 'bar') }}
+# Check regex_replace with multiline
+{{ '#foo\n#foot' | regex_replace('^#foo', '#bar', multiline=True) }}
+regex_search = {{ 'test_value_0001' | regex_search('([0-9]+)$')}}
+regex_findall = {{ 'car\ntar\nfoo\nbar\n' | regex_findall('^.ar$', multiline=True)|to_json }}
+regex_escape = {{ '^f.*o(.*)$' | regex_escape() }}
diff --git a/test/integration/targets/filter_core/templates/py26json.j2 b/test/integration/targets/filter_core/templates/py26json.j2
new file mode 100644
index 00000000..dba62ad1
--- /dev/null
+++ b/test/integration/targets/filter_core/templates/py26json.j2
@@ -0,0 +1,2 @@
+Provoke a python2.6 json bug
+{{ hostvars[inventory_hostname] | to_nice_json }}
diff --git a/test/integration/targets/filter_core/vars/main.yml b/test/integration/targets/filter_core/vars/main.yml
new file mode 100644
index 00000000..aedecd8f
--- /dev/null
+++ b/test/integration/targets/filter_core/vars/main.yml
@@ -0,0 +1,106 @@
+some_structure:
+ - "this is a list element"
+ -
+ this: "is a hash element in a list"
+ warp: 9
+ where: endor
+
+other_data:
+ level1:
+ foo: bar
+ blip: baz
+ nested:
+ abc: def
+ ghi: xyz
+ alist:
+ - alpha
+ - beta
+ - charlie
+ - delta
+ level2:
+ asd: df
+ xc: dsdfsfsd
+ nested:
+ abc: foo
+ alist:
+ - zebra
+ - yellow
+ - xray
+
+# from https://github.com/ansible/ansible/issues/20379#issuecomment-280492883
+example_20379: {
+ "ApplicationVersions": [
+ {
+ "ApplicationName": "gitlab_ci_elasticbeanstalk",
+ "Status": "UNPROCESSED",
+ "VersionLabel": "test-npm-check-626-1313",
+ "Description": "bla",
+ "DateCreated": "2017-01-22T02:02:31.798Z",
+ "DateUpdated": "2017-01-22T02:02:31.798Z",
+ "SourceBundle": {
+ "S3Bucket": "bla",
+ "S3Key": "ci/beanstalk/gitlab_ci_elasticbeanstalk/gitlab_ci_elasticbeanstalk-626-1313.war"
+ }
+ },
+ {
+ "ApplicationName": "gitlab_ci_elasticbeanstalk",
+ "Status": "UNPROCESSED",
+ "VersionLabel": "terminate-611-1289",
+ "Description": "bla",
+ "DateCreated": "2017-01-20T00:34:29.864Z",
+ "DateUpdated": "2017-01-20T00:34:29.864Z",
+ "SourceBundle": {
+ "S3Bucket": "bla",
+ "S3Key": "ci/beanstalk/gitlab_ci_elasticbeanstalk/gitlab_ci_elasticbeanstalk-611-1289.war"
+ }
+ },
+ {
+ "ApplicationName": "gitlab_ci_elasticbeanstalk",
+ "Status": "UNPROCESSED",
+ "VersionLabel": "terminate-610-1286",
+ "Description": "bla",
+ "DateCreated": "2017-01-20T00:22:02.229Z",
+ "DateUpdated": "2017-01-20T00:22:02.229Z",
+ "SourceBundle": {
+ "S3Bucket": "bla",
+ "S3Key": "ci/beanstalk/gitlab_ci_elasticbeanstalk/gitlab_ci_elasticbeanstalk-610-1286.war"
+ }
+ },
+ {
+ "ApplicationName": "gitlab_ci_elasticbeanstalk",
+ "Status": "UNPROCESSED",
+ "VersionLabel": "master-609-1284",
+ "Description": "bla",
+ "DateCreated": "2017-01-19T23:54:32.902Z",
+ "DateUpdated": "2017-01-19T23:54:32.902Z",
+ "SourceBundle": {
+ "S3Bucket": "bla",
+ "S3Key": "ci/beanstalk/gitlab_ci_elasticbeanstalk/gitlab_ci_elasticbeanstalk-609-1284.war"
+ }
+ },
+ {
+ "ApplicationName": "gitlab_ci_elasticbeanstalk",
+ "Status": "UNPROCESSED",
+ "VersionLabel": "master-608-1282",
+ "Description": "bla",
+ "DateCreated": "2017-01-19T23:02:44.902Z",
+ "DateUpdated": "2017-01-19T23:02:44.902Z",
+ "SourceBundle": {
+ "S3Bucket": "bla",
+ "S3Key": "ci/beanstalk/gitlab_ci_elasticbeanstalk/gitlab_ci_elasticbeanstalk-608-1282.war"
+ }
+ },
+ {
+ "ApplicationName": "gitlab_ci_elasticbeanstalk",
+ "Status": "UNPROCESSED",
+ "VersionLabel": "master-606-1278",
+ "Description": "bla'",
+ "DateCreated": "2017-01-19T22:47:57.741Z",
+ "DateUpdated": "2017-01-19T22:47:57.741Z",
+ "SourceBundle": {
+ "S3Bucket": "bla",
+ "S3Key": "ci/beanstalk/gitlab_ci_elasticbeanstalk/gitlab_ci_elasticbeanstalk-606-1278.war"
+ }
+ }
+ ]
+}
diff --git a/test/integration/targets/filter_mathstuff/aliases b/test/integration/targets/filter_mathstuff/aliases
new file mode 100644
index 00000000..1603f435
--- /dev/null
+++ b/test/integration/targets/filter_mathstuff/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group2
+skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller
+skip/aix
diff --git a/test/integration/targets/filter_mathstuff/tasks/main.yml b/test/integration/targets/filter_mathstuff/tasks/main.yml
new file mode 100644
index 00000000..2a708be1
--- /dev/null
+++ b/test/integration/targets/filter_mathstuff/tasks/main.yml
@@ -0,0 +1,288 @@
+- name: Verify unique's fallback's exception throwing for case_sensitive=True
+ set_fact:
+ unique_fallback_exc1: '{{ [{"foo": "bar", "moo": "cow"}]|unique(case_sensitive=True) }}'
+ ignore_errors: true
+ tags: unique
+ register: unique_fallback_exc1_res
+
+- name: Verify unique's fallback's exception throwing for a Hashable thing that triggers TypeError
+ set_fact:
+ unique_fallback_exc2: '{{ True|unique }}'
+ ignore_errors: true
+ tags: unique
+ register: unique_fallback_exc2_res
+
+- name: Verify unique
+ tags: unique
+ assert:
+ that:
+ - '[1,2,3,4,4,3,2,1]|unique == [1,2,3,4]'
+ - '["a", "b", "a", "b"]|unique == ["a", "b"]'
+ - '[{"foo": "bar", "moo": "cow"}, {"foo": "bar", "moo": "cow"}, {"haha": "bar", "moo": "mar"}]|unique == [{"foo": "bar", "moo": "cow"}, {"haha": "bar", "moo": "mar"}]'
+ - '[{"foo": "bar", "moo": "cow"}, {"foo": "bar", "moo": "mar"}]|unique == [{"foo": "bar", "moo": "cow"}, {"foo": "bar", "moo": "mar"}]'
+ - '{"foo": "bar", "moo": "cow"}|unique == ["foo", "moo"]'
+ - '"foo"|unique|sort|join == "fo"'
+ - '[1,2,3,4,5]|unique == [1,2,3,4,5]'
+ - unique_fallback_exc1_res is failed
+ - unique_fallback_exc2_res is failed
+ - "\"'bool' object is not iterable\" in unique_fallback_exc2_res.msg"
+
+# `unique` will fall back to a custom implementation if the Jinja2 version is
+# too old to support `jinja2.filters.do_unique`. However, the built-in fallback
+# is quite different by default. Namely, it ignores the case-sensitivity
+# setting. This means running:
+# ['a', 'b', 'A', 'B']|unique
+# ... will give a different result for someone running Jinja 2.9 vs 2.10 when
+# do_unique was added. So here, we do a test to see if we have `do_unique`. If
+# we do, then we do another test to make sure attribute and case_sensitive
+# work on it.
+- name: Test for do_unique
+ shell: "{{ansible_python_interpreter}} -c 'from jinja2 import filters; print(\"do_unique\" in dir(filters))'"
+ tags: unique
+ register: do_unique_res
+
+- name: Verify unique some more
+ tags: unique
+ assert:
+ that:
+ - '["a", "b", "A", "B"]|unique(case_sensitive=True) == ["a", "b", "A", "B"]'
+ - '[{"foo": "bar", "moo": "cow"}, {"foo": "bar", "moo": "mar"}]|unique(attribute="foo") == [{"foo": "bar", "moo": "cow"}]'
+ - '["a", "b", "A", "B"]|unique == ["a", "b"]' # defaults to case_sensitive=False
+ - "'cannot fall back' in unique_fallback_exc1_res.msg"
+ when: do_unique_res.stdout == 'True'
+
+- name: Verify unique some more
+ tags: unique
+ assert:
+ that:
+ - "'does not support case_sensitive' in unique_fallback_exc1_res.msg"
+ when: do_unique_res.stdout == 'False'
+
+- name: Verify intersect
+ tags: intersect
+ assert:
+ that:
+ - '[1,2,3]|intersect([4,5,6]) == []'
+ - '[1,2,3]|intersect([3,4,5,6]) == [3]'
+ - '[1,2,3]|intersect([3,2,1]) == [1,2,3]'
+ - '(1,2,3)|intersect((4,5,6))|list == []'
+ - '(1,2,3)|intersect((3,4,5,6))|list == [3]'
+
+- name: Verify difference
+ tags: difference
+ assert:
+ that:
+ - '[1,2,3]|difference([4,5,6]) == [1,2,3]'
+ - '[1,2,3]|difference([3,4,5,6]) == [1,2]'
+ - '[1,2,3]|difference([3,2,1]) == []'
+ - '(1,2,3)|difference((4,5,6))|list == [1,2,3]'
+ - '(1,2,3)|difference((3,4,5,6))|list == [1,2]'
+
+- name: Verify symmetric_difference
+ tags: symmetric_difference
+ assert:
+ that:
+ - '[1,2,3]|symmetric_difference([4,5,6]) == [1,2,3,4,5,6]'
+ - '[1,2,3]|symmetric_difference([3,4,5,6]) == [1,2,4,5,6]'
+ - '[1,2,3]|symmetric_difference([3,2,1]) == []'
+ - '(1,2,3)|symmetric_difference((4,5,6))|list == [1,2,3,4,5,6]'
+ - '(1,2,3)|symmetric_difference((3,4,5,6))|list == [1,2,4,5,6]'
+
+- name: Verify union
+ tags: union
+ assert:
+ that:
+ - '[1,2,3]|union([4,5,6]) == [1,2,3,4,5,6]'
+ - '[1,2,3]|union([3,4,5,6]) == [1,2,3,4,5,6]'
+ - '[1,2,3]|union([3,2,1]) == [1,2,3]'
+ - '(1,2,3)|union((4,5,6))|list == [1,2,3,4,5,6]'
+ - '(1,2,3)|union((3,4,5,6))|list == [1,2,3,4,5,6]'
+
+- name: Verify min
+ tags: min
+ assert:
+ that:
+ - '[1000,-99]|min == -99'
+ - '[0,4]|min == 0'
+
+- name: Verify max
+ tags: max
+ assert:
+ that:
+ - '[1000,-99]|max == 1000'
+ - '[0,4]|max == 4'
+
+- name: Verify logarithm on a value of invalid type
+ set_fact:
+ logarithm_exc1: '{{ "yo"|log }}'
+ ignore_errors: true
+ tags: logarithm
+ register: logarithm_exc1_res
+
+- name: Verify logarithm (which is passed to Jinja as "log" because consistency is boring)
+ tags: logarithm
+ assert:
+ that:
+ - '1|log == 0.0'
+ - '100|log(10) == 2.0'
+ - '100|log(10) == 2.0'
+ - '21|log(21) == 1.0'
+ - '(2.3|log(42)|string).startswith("0.222841")'
+ - '(21|log(42)|string).startswith("0.814550")'
+ - logarithm_exc1_res is failed
+ - '"can only be used on numbers" in logarithm_exc1_res.msg'
+
+- name: Verify power on a value of invalid type
+ set_fact:
+ power_exc1: '{{ "yo"|pow(4) }}'
+ ignore_errors: true
+ tags: power
+ register: power_exc1_res
+
+- name: Verify power (which is passed to Jinja as "pow" because consistency is boring)
+ tags: power
+ assert:
+ that:
+ - '2|pow(4) == 16.0'
+ - power_exc1_res is failed
+ - '"can only be used on numbers" in power_exc1_res.msg'
+
+- name: Verify inversepower on a value of invalid type
+ set_fact:
+ inversepower_exc1: '{{ "yo"|root }}'
+ ignore_errors: true
+ tags: inversepower
+ register: inversepower_exc1_res
+
+- name: Verify inversepower (which is passed to Jinja as "root" because consistency is boring)
+ tags: inversepower
+ assert:
+ that:
+ - '4|root == 2.0'
+ - '4|root(2) == 2.0'
+ - '9|root(1) == 9.0'
+ - '(9|root(6)|string).startswith("1.4422495")'
+ - inversepower_exc1_res is failed
+ - '"can only be used on numbers" in inversepower_exc1_res.msg'
+
+- name: Verify human_readable on invalid input
+ set_fact:
+ human_readable_exc1: '{{ "monkeys"|human_readable }}'
+ ignore_errors: true
+ tags: human_readable
+ register: human_readable_exc1_res
+
+- name: Verify human_readable
+ tags: human_readable
+ assert:
+ that:
+ - '"1.00 Bytes" == 1|human_readable'
+ - '"1.00 bits" == 1|human_readable(isbits=True)'
+ - '"10.00 KB" == 10240|human_readable'
+ - '"97.66 MB" == 102400000|human_readable'
+ - '"0.10 GB" == 102400000|human_readable(unit="G")'
+ - '"0.10 Gb" == 102400000|human_readable(isbits=True, unit="G")'
+ - human_readable_exc1_res is failed
+ - '"failed on bad input" in human_readable_exc1_res.msg'
+
+- name: Verify human_to_bytes
+ tags: human_to_bytes
+ assert:
+ that:
+ - "{{'0'|human_to_bytes}} == 0"
+ - "{{'0.1'|human_to_bytes}} == 0"
+ - "{{'0.9'|human_to_bytes}} == 1"
+ - "{{'1'|human_to_bytes}} == 1"
+ - "{{'10.00 KB'|human_to_bytes}} == 10240"
+ - "{{ '11 MB'|human_to_bytes}} == 11534336"
+ - "{{ '1.1 GB'|human_to_bytes}} == 1181116006"
+ - "{{'10.00 Kb'|human_to_bytes(isbits=True)}} == 10240"
+
+- name: Verify human_to_bytes (bad string)
+ set_fact:
+ bad_string: "{{ '10.00 foo' | human_to_bytes }}"
+ ignore_errors: yes
+ tags: human_to_bytes
+ register: _human_bytes_test
+
+- name: Verify human_to_bytes (bad string)
+ tags: human_to_bytes
+ assert:
+ that: "{{_human_bytes_test.failed}}"
+
+- name: Verify that union can be chained
+ tags: union
+ vars:
+ unions: '{{ [1,2,3]|union([4,5])|union([6,7]) }}'
+ assert:
+ that:
+ - "unions|type_debug == 'list'"
+ - "unions|length == 7"
+
+- name: Test union with unhashable item
+ tags: union
+ vars:
+ unions: '{{ [1,2,3]|union([{}]) }}'
+ assert:
+ that:
+ - "unions|type_debug == 'list'"
+ - "unions|length == 4"
+
+- name: Verify rekey_on_member with invalid "duplicates" kwarg
+ set_fact:
+ rekey_on_member_exc1: '{{ []|rekey_on_member("asdf", duplicates="boo") }}'
+ ignore_errors: true
+ tags: rekey_on_member
+ register: rekey_on_member_exc1_res
+
+- name: Verify rekey_on_member with invalid data
+ set_fact:
+ rekey_on_member_exc2: '{{ "minkeys"|rekey_on_member("asdf") }}'
+ ignore_errors: true
+ tags: rekey_on_member
+ register: rekey_on_member_exc2_res
+
+- name: Verify rekey_on_member with partially invalid data (list item is not dict)
+ set_fact:
+ rekey_on_member_exc3: '{{ [True]|rekey_on_member("asdf") }}'
+ ignore_errors: true
+ tags: rekey_on_member
+ register: rekey_on_member_exc3_res
+
+- name: Verify rekey_on_member with partially invalid data (key not in all dicts)
+ set_fact:
+ rekey_on_member_exc4: '{{ [{"foo": "bar", "baz": "buzz"}, {"hello": 8, "different": "haha"}]|rekey_on_member("foo") }}'
+ ignore_errors: true
+ tags: rekey_on_member
+ register: rekey_on_member_exc4_res
+
+- name: Verify rekey_on_member with duplicates and duplicates=error
+ set_fact:
+ rekey_on_member_exc5: '{{ [{"proto": "eigrp", "state": "enabled"}, {"proto": "eigrp", "state": "enabled"}]|rekey_on_member("proto", duplicates="error") }}'
+ ignore_errors: true
+ tags: rekey_on_member
+ register: rekey_on_member_exc5_res
+
+- name: Verify rekey_on_member
+ tags: rekey_on_member
+ assert:
+ that:
+ - rekey_on_member_exc1_res is failed
+ - '"duplicates parameter to rekey_on_member has unknown value" in rekey_on_member_exc1_res.msg'
+ - '[{"proto": "eigrp", "state": "enabled"}, {"proto": "ospf", "state": "enabled"}]|rekey_on_member("proto") == {"eigrp": {"proto": "eigrp", "state": "enabled"}, "ospf": {"proto": "ospf", "state": "enabled"}}'
+ - '{"a": {"proto": "eigrp", "state": "enabled"}, "b": {"proto": "ospf", "state": "enabled"}}|rekey_on_member("proto") == {"eigrp": {"proto": "eigrp", "state": "enabled"}, "ospf": {"proto": "ospf", "state": "enabled"}}'
+ - '[{"proto": "eigrp", "state": "enabled"}, {"proto": "eigrp", "state": "enabled"}]|rekey_on_member("proto", duplicates="overwrite") == {"eigrp": {"proto": "eigrp", "state": "enabled"}}'
+ - rekey_on_member_exc2_res is failed
+ - '"Type is not a valid list, set, or dict" in rekey_on_member_exc2_res.msg'
+ - rekey_on_member_exc3_res is failed
+ - '"List item is not a valid dict" in rekey_on_member_exc3_res.msg'
+ - rekey_on_member_exc4_res is failed
+ - '"was not found" in rekey_on_member_exc4_res.msg'
+ - rekey_on_member_exc5_res is failed
+ - '"is not unique, cannot correctly turn into dict" in rekey_on_member_exc5_res.msg'
+
+# TODO: For some reason, the coverage tool isn't accounting for the last test
+# so add another "last test" to fake it...
+- assert:
+ that:
+ - true
diff --git a/test/integration/targets/filter_urls/aliases b/test/integration/targets/filter_urls/aliases
new file mode 100644
index 00000000..1603f435
--- /dev/null
+++ b/test/integration/targets/filter_urls/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group2
+skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller
+skip/aix
diff --git a/test/integration/targets/filter_urls/runme.sh b/test/integration/targets/filter_urls/runme.sh
new file mode 100755
index 00000000..f6460acb
--- /dev/null
+++ b/test/integration/targets/filter_urls/runme.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+
+set -eux
+
+export ANSIBLE_ROLES_PATH=../
+
+ansible-playbook runme.yml "$@"
+
+source virtualenv.sh
+
+# This is necessary for installing Jinja 2.6. We need this because Jinja 2.6
+# won't install with newer setuptools, and because setuptools 45+ won't work
+# with Python 2.
+pip install 'setuptools<45'
+
+# Install Jinja 2.6 since we want to test the fallback to Ansible's custom
+# urlencode functions. Jinja 2.6 does not have urlencode so we will trigger the
+# fallback.
+pip install 'jinja2 >= 2.6, < 2.7'
+
+# Run the playbook again in the venv with Jinja 2.6
+ansible-playbook runme.yml "$@"
diff --git a/test/integration/targets/filter_urls/runme.yml b/test/integration/targets/filter_urls/runme.yml
new file mode 100644
index 00000000..527a03e3
--- /dev/null
+++ b/test/integration/targets/filter_urls/runme.yml
@@ -0,0 +1,4 @@
+- hosts: localhost
+ gather_facts: false
+ roles:
+ - { role: filter_urls }
diff --git a/test/integration/targets/filter_urls/tasks/main.yml b/test/integration/targets/filter_urls/tasks/main.yml
new file mode 100644
index 00000000..935ed479
--- /dev/null
+++ b/test/integration/targets/filter_urls/tasks/main.yml
@@ -0,0 +1,31 @@
+- name: Get Jinja2 version
+ shell: "{{ ansible_python_interpreter }} -c 'import jinja2; print(jinja2.__version__)'"
+ register: jinja2_version
+
+- name: Print Jinja2 version
+ debug: var=jinja2_version.stdout
+
+- name: Test urldecode filter
+ set_fact:
+ urldecoded_string: key="@{}é&%£ foo bar '(;\<>""°)
+
+- name: Test urlencode filter
+ set_fact:
+ urlencoded_string: 'key%3D%22%40%7B%7D%C3%A9%26%25%C2%A3%20foo%20bar%20%27%28%3B%5C%3C%3E%22%22%C2%B0%29'
+
+- name: Verify urlencode / urldecode isomorphism
+ assert:
+ that:
+ - urldecoded_string == urlencoded_string|urldecode
+ - urlencoded_string == urldecoded_string|urlencode
+
+- name: Verify urlencode handles dicts properly
+ assert:
+ that:
+ - "{'foo': 'bar'}|urlencode == 'foo=bar'"
+ - "{'foo': 'bar', 'baz': 'buz'}|urlencode == 'foo=bar&baz=buz'"
+ - "()|urlencode == ''"
+
+# Needed (temporarily) due to coverage reports not including the last task.
+- assert:
+ that: true
diff --git a/test/integration/targets/filter_urlsplit/aliases b/test/integration/targets/filter_urlsplit/aliases
new file mode 100644
index 00000000..1603f435
--- /dev/null
+++ b/test/integration/targets/filter_urlsplit/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group2
+skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller
+skip/aix
diff --git a/test/integration/targets/filter_urlsplit/tasks/main.yml b/test/integration/targets/filter_urlsplit/tasks/main.yml
new file mode 100644
index 00000000..c3ff3ec9
--- /dev/null
+++ b/test/integration/targets/filter_urlsplit/tasks/main.yml
@@ -0,0 +1,30 @@
+- debug:
+ var: "'http://mary:MySecret@www.acme.com:9000/dir/index.html?query=term#fragment' | urlsplit"
+ verbosity: 1
+ tags: debug
+
+- name: Test urlsplit filter
+ assert:
+ that:
+ - "'http://mary:MySecret@www.acme.com:9000/dir/index.html?query=term#fragment' | urlsplit('fragment') == 'fragment'"
+ - "'http://mary:MySecret@www.acme.com:9000/dir/index.html?query=term#fragment' | urlsplit('hostname') == 'www.acme.com'"
+ - "'http://mary:MySecret@www.acme.com:9000/dir/index.html?query=term#fragment' | urlsplit('netloc') == 'mary:MySecret@www.acme.com:9000'"
+ - "'http://mary:MySecret@www.acme.com:9000/dir/index.html?query=term#fragment' | urlsplit('path') == '/dir/index.html'"
+ - "'http://mary:MySecret@www.acme.com:9000/dir/index.html?query=term#fragment' | urlsplit('port') == 9000"
+ - "'http://mary:MySecret@www.acme.com:9000/dir/index.html?query=term#fragment' | urlsplit('query') == 'query=term'"
+ - "'http://mary:MySecret@www.acme.com:9000/dir/index.html?query=term#fragment' | urlsplit('scheme') == 'http'"
+ - "'http://mary:MySecret@www.acme.com:9000/dir/index.html?query=term#fragment' | urlsplit('username') == 'mary'"
+ - "'http://mary:MySecret@www.acme.com:9000/dir/index.html?query=term#fragment' | urlsplit('password') == 'MySecret'"
+ - "'http://mary:MySecret@www.acme.com:9000/dir/index.html?query=term#fragment' | urlsplit == { 'fragment': 'fragment', 'hostname': 'www.acme.com', 'netloc': 'mary:MySecret@www.acme.com:9000', 'password': 'MySecret', 'path': '/dir/index.html', 'port': 9000, 'query': 'query=term', 'scheme': 'http', 'username': 'mary' }"
+
+- name: Test urlsplit filter bad argument
+ debug:
+ var: "'http://www.acme.com:9000/dir/index.html' | urlsplit('bad_filter')"
+ register: _bad_urlsplit_filter
+ ignore_errors: yes
+
+- name: Verify urlsplit filter showed an error message
+ assert:
+ that:
+ - _bad_urlsplit_filter is failed
+ - "'unknown URL component' in _bad_urlsplit_filter.msg"
diff --git a/test/integration/targets/find/aliases b/test/integration/targets/find/aliases
new file mode 100644
index 00000000..a6dafcf8
--- /dev/null
+++ b/test/integration/targets/find/aliases
@@ -0,0 +1 @@
+shippable/posix/group1
diff --git a/test/integration/targets/find/meta/main.yml b/test/integration/targets/find/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/test/integration/targets/find/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/test/integration/targets/find/tasks/main.yml b/test/integration/targets/find/tasks/main.yml
new file mode 100644
index 00000000..7fd61dd2
--- /dev/null
+++ b/test/integration/targets/find/tasks/main.yml
@@ -0,0 +1,97 @@
+# Test code for the find module.
+# (c) 2017, James Tanner <tanner.jc@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- set_fact: output_dir_test={{output_dir}}/test_find
+
+- name: make sure our testing sub-directory does not exist
+ file: path="{{ output_dir_test }}" state=absent
+
+- name: create our testing sub-directory
+ file: path="{{ output_dir_test }}" state=directory
+
+##
+## find
+##
+
+- name: make some directories
+ file:
+ path: "{{ output_dir_test }}/{{ item }}"
+ state: directory
+ with_items:
+ - a/b/c/d
+ - e/f/g/h
+
+- name: make some files
+ copy:
+ dest: "{{ output_dir_test }}/{{ item }}"
+ content: 'data'
+ with_items:
+ - a/1.txt
+ - a/b/2.jpg
+ - a/b/c/3
+ - a/b/c/d/4.xml
+ - e/5.json
+ - e/f/6.swp
+ - e/f/g/7.img
+ - e/f/g/h/8.ogg
+
+- name: find the directories
+ find:
+ paths: "{{ output_dir_test }}"
+ file_type: directory
+ recurse: yes
+ register: find_test0
+- debug: var=find_test0
+- name: validate directory results
+ assert:
+ that:
+ - 'find_test0.changed is defined'
+ - 'find_test0.examined is defined'
+ - 'find_test0.files is defined'
+ - 'find_test0.matched is defined'
+ - 'find_test0.msg is defined'
+ - 'find_test0.matched == 8'
+ - 'find_test0.files | length == 8'
+
+- name: find the xml and img files
+ find:
+ paths: "{{ output_dir_test }}"
+ file_type: file
+ patterns: "*.xml,*.img"
+ recurse: yes
+ register: find_test1
+- debug: var=find_test1
+- name: validate directory results
+ assert:
+ that:
+ - 'find_test1.matched == 2'
+ - 'find_test1.files | length == 2'
+
+- name: find the xml file
+ find:
+ paths: "{{ output_dir_test }}"
+ patterns: "*.xml"
+ recurse: yes
+ register: find_test2
+- debug: var=find_test2
+- name: validate gr_name and pw_name are defined
+ assert:
+ that:
+ - 'find_test2.matched == 1'
+ - 'find_test2.files[0].pw_name is defined'
+ - 'find_test2.files[0].gr_name is defined'
diff --git a/test/integration/targets/gathering/aliases b/test/integration/targets/gathering/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/test/integration/targets/gathering/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/test/integration/targets/gathering/explicit.yml b/test/integration/targets/gathering/explicit.yml
new file mode 100644
index 00000000..453dfb6a
--- /dev/null
+++ b/test/integration/targets/gathering/explicit.yml
@@ -0,0 +1,14 @@
+- hosts: testhost
+ tasks:
+ - name: ensure facts have not been collected
+ assert:
+ that:
+ - ansible_facts is undefined or not 'fqdn' in ansible_facts
+
+- hosts: testhost
+ gather_facts: True
+ tasks:
+ - name: ensure facts have been collected
+ assert:
+ that:
+ - ansible_facts is defined and 'fqdn' in ansible_facts
diff --git a/test/integration/targets/gathering/implicit.yml b/test/integration/targets/gathering/implicit.yml
new file mode 100644
index 00000000..f1ea965d
--- /dev/null
+++ b/test/integration/targets/gathering/implicit.yml
@@ -0,0 +1,23 @@
+- hosts: testhost
+ tasks:
+ - name: check that facts were gathered but no local facts exist
+ assert:
+ that:
+ - ansible_facts is defined and 'fqdn' in ansible_facts
+ - not 'uuid' in ansible_local
+ - name: create 'local facts' for next gathering
+ copy:
+ src: uuid.fact
+ dest: /etc/ansible/facts.d/
+ mode: 0755
+
+- hosts: testhost
+ tasks:
+ - name: ensure facts are gathered and includes the new 'local facts' created above
+ assert:
+ that:
+ - ansible_facts is defined and 'fqdn' in ansible_facts
+ - "'uuid' in ansible_local"
+
+ - name: cleanup 'local facts' from target
+ file: path=/etc/ansible/facts.d/uuid.fact state=absent
diff --git a/test/integration/targets/gathering/runme.sh b/test/integration/targets/gathering/runme.sh
new file mode 100755
index 00000000..1c0832c5
--- /dev/null
+++ b/test/integration/targets/gathering/runme.sh
@@ -0,0 +1,7 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ANSIBLE_GATHERING=smart ansible-playbook smart.yml --flush-cache -i ../../inventory -v "$@"
+ANSIBLE_GATHERING=implicit ansible-playbook implicit.yml --flush-cache -i ../../inventory -v "$@"
+ANSIBLE_GATHERING=explicit ansible-playbook explicit.yml --flush-cache -i ../../inventory -v "$@"
diff --git a/test/integration/targets/gathering/smart.yml b/test/integration/targets/gathering/smart.yml
new file mode 100644
index 00000000..735cb461
--- /dev/null
+++ b/test/integration/targets/gathering/smart.yml
@@ -0,0 +1,23 @@
+- hosts: testhost
+ tasks:
+ - name: ensure facts are gathered but no local exists
+ assert:
+ that:
+ - ansible_facts is defined and 'fqdn' in ansible_facts
+ - not 'uuid' in ansible_local
+ - name: create local facts for latter test
+ copy:
+ src: uuid.fact
+ dest: /etc/ansible/facts.d/
+ mode: 0755
+
+- hosts: testhost
+ tasks:
+ - name: ensure we still have facts, but didnt pickup new local ones
+ assert:
+ that:
+ - ansible_facts is defined and 'fqdn' in ansible_facts
+ - not 'uuid' in ansible_local
+
+ - name: remove local facts file
+ file: path=/etc/ansible/facts.d/uuid.fact state=absent
diff --git a/test/integration/targets/gathering/uuid.fact b/test/integration/targets/gathering/uuid.fact
new file mode 100644
index 00000000..79e3f626
--- /dev/null
+++ b/test/integration/targets/gathering/uuid.fact
@@ -0,0 +1,10 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+
+import json
+import uuid
+
+
+# return a random string
+print(json.dumps(str(uuid.uuid4())))
diff --git a/test/integration/targets/gathering_facts/aliases b/test/integration/targets/gathering_facts/aliases
new file mode 100644
index 00000000..0ee704e1
--- /dev/null
+++ b/test/integration/targets/gathering_facts/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group3
+needs/root
diff --git a/test/integration/targets/gathering_facts/cache_plugins/none.py b/test/integration/targets/gathering_facts/cache_plugins/none.py
new file mode 100644
index 00000000..5681dee0
--- /dev/null
+++ b/test/integration/targets/gathering_facts/cache_plugins/none.py
@@ -0,0 +1,50 @@
+# (c) 2014, Brian Coca, Josh Drake, et al
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.cache import BaseCacheModule
+
+DOCUMENTATION = '''
+ cache: none
+ short_description: write-only cache (no cache)
+ description:
+ - No caching at all
+ version_added: historical
+ author: core team (@ansible-core)
+'''
+
+
+class CacheModule(BaseCacheModule):
+ def __init__(self, *args, **kwargs):
+ self.empty = {}
+
+ def get(self, key):
+ return self.empty.get(key)
+
+ def set(self, key, value):
+ return value
+
+ def keys(self):
+ return self.empty.keys()
+
+ def contains(self, key):
+ return key in self.empty
+
+ def delete(self, key):
+ del self.emtpy[key]
+
+ def flush(self):
+ self.empty = {}
+
+ def copy(self):
+ return self.empty.copy()
+
+ def __getstate__(self):
+ return self.copy()
+
+ def __setstate__(self, data):
+ self.empty = data
diff --git a/test/integration/targets/gathering_facts/inventory b/test/integration/targets/gathering_facts/inventory
new file mode 100644
index 00000000..e15ae780
--- /dev/null
+++ b/test/integration/targets/gathering_facts/inventory
@@ -0,0 +1,2 @@
+[local]
+facthost[0:25] ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/test/integration/targets/gathering_facts/library/bogus_facts b/test/integration/targets/gathering_facts/library/bogus_facts
new file mode 100644
index 00000000..a6aeede5
--- /dev/null
+++ b/test/integration/targets/gathering_facts/library/bogus_facts
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+echo '{
+ "changed": false,
+ "ansible_facts": {
+ "ansible_facts": {
+ "discovered_interpreter_python": "(touch /tmp/pwned-$(date -Iseconds)-$(whoami) ) 2>/dev/null >/dev/null && /usr/bin/python",
+ "bogus_overwrite": "yes"
+ },
+ "dansible_iscovered_interpreter_python": "(touch /tmp/pwned-$(date -Iseconds)-$(whoami) ) 2>/dev/null >/dev/null && /usr/bin/python"
+ }
+}'
diff --git a/test/integration/targets/gathering_facts/library/facts_one b/test/integration/targets/gathering_facts/library/facts_one
new file mode 100644
index 00000000..c74ab9a7
--- /dev/null
+++ b/test/integration/targets/gathering_facts/library/facts_one
@@ -0,0 +1,25 @@
+#!/bin/sh
+
+echo '{
+ "changed": false,
+ "ansible_facts": {
+ "factsone": "from facts_one module",
+ "common_fact": "also from facts_one module",
+ "common_dict_fact": {
+ "key_one": "from facts_one",
+ "key_two": "from facts_one"
+ },
+ "common_list_fact": [
+ "one",
+ "three",
+ "five"
+ ],
+ "common_list_fact2": [
+ "one",
+ "two",
+ "three",
+ "five",
+ "five"
+ ]
+ }
+}'
diff --git a/test/integration/targets/gathering_facts/library/facts_two b/test/integration/targets/gathering_facts/library/facts_two
new file mode 100644
index 00000000..4e7c6684
--- /dev/null
+++ b/test/integration/targets/gathering_facts/library/facts_two
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+echo '{
+ "changed": false,
+ "ansible_facts": {
+ "factstwo": "from facts_two module",
+ "common_fact": "also from facts_two module",
+ "common_dict_fact": {
+ "key_two": "from facts_two",
+ "key_four": "from facts_two"
+ },
+ "common_list_fact": [
+ "one",
+ "two",
+ "four"
+ ],
+ "common_list_fact2": [
+ "one",
+ "two",
+ "four",
+ "four"
+ ]
+ }
+}'
diff --git a/test/integration/targets/gathering_facts/library/file_utils.py b/test/integration/targets/gathering_facts/library/file_utils.py
new file mode 100644
index 00000000..58538029
--- /dev/null
+++ b/test/integration/targets/gathering_facts/library/file_utils.py
@@ -0,0 +1,54 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import sys
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.facts.utils import (
+ get_file_content,
+ get_file_lines,
+ get_mount_size,
+)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ test=dict(type='str', default='strip'),
+ touch_file=dict(type='str', default='/dev/null'),
+ line_sep_file=dict(type='str', default='/dev/null'),
+ line_sep_sep=dict(type='str', default='\n'),
+ )
+ )
+
+ test = module.params['test']
+ facts = {}
+
+ if test == 'strip':
+ etc_passwd = get_file_content('/etc/passwd')
+ etc_passwd_unstripped = get_file_content('/etc/passwd', strip=False)
+ facts['etc_passwd_newlines'] = etc_passwd.count('\n')
+ facts['etc_passwd_newlines_unstripped'] = etc_passwd_unstripped.count('\n')
+
+ elif test == 'default':
+ path = module.params['touch_file']
+ facts['touch_default'] = get_file_content(path, default='i am a default')
+
+ elif test == 'line_sep':
+ path = module.params['line_sep_file']
+ sep = module.params['line_sep_sep']
+ facts['line_sep'] = get_file_lines(path, line_sep=sep)
+
+ elif test == 'invalid_mountpoint':
+ facts['invalid_mountpoint'] = get_mount_size('/doesnotexist')
+
+ result = {
+ 'changed': False,
+ 'ansible_facts': facts,
+ }
+
+ module.exit_json(**result)
+
+
+main()
diff --git a/test/integration/targets/gathering_facts/one_two.json b/test/integration/targets/gathering_facts/one_two.json
new file mode 100644
index 00000000..ecc698c3
--- /dev/null
+++ b/test/integration/targets/gathering_facts/one_two.json
@@ -0,0 +1,27 @@
+{
+ "_ansible_facts_gathered": true,
+ "common_dict_fact": {
+ "key_four": "from facts_two",
+ "key_one": "from facts_one",
+ "key_two": "from facts_two"
+ },
+ "common_fact": "also from facts_two module",
+ "common_list_fact": [
+ "three",
+ "five",
+ "one",
+ "two",
+ "four"
+ ],
+ "common_list_fact2": [
+ "three",
+ "five",
+ "five",
+ "one",
+ "two",
+ "four",
+ "four"
+ ],
+ "factsone": "from facts_one module",
+ "factstwo": "from facts_two module"
+} \ No newline at end of file
diff --git a/test/integration/targets/gathering_facts/prevent_clobbering.yml b/test/integration/targets/gathering_facts/prevent_clobbering.yml
new file mode 100644
index 00000000..94bb4512
--- /dev/null
+++ b/test/integration/targets/gathering_facts/prevent_clobbering.yml
@@ -0,0 +1,8 @@
+- name: Verify existing facts don't go undefined on unrelated new facts in loop
+ hosts: localhost
+ gather_facts: True
+ tasks:
+ - name: Ensure that 'virtualization_type' is not undefined after first loop iteration
+ bogus_facts:
+ loop: [1, 2, 3]
+ when: ansible_facts['virtualization_type'] != 'NotDocker'
diff --git a/test/integration/targets/gathering_facts/runme.sh b/test/integration/targets/gathering_facts/runme.sh
new file mode 100755
index 00000000..46355627
--- /dev/null
+++ b/test/integration/targets/gathering_facts/runme.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+
+set -eux
+
+#ANSIBLE_CACHE_PLUGINS=cache_plugins/ ANSIBLE_CACHE_PLUGIN=none ansible-playbook test_gathering_facts.yml -i inventory -v "$@"
+ansible-playbook test_gathering_facts.yml -i inventory -e output_dir="$OUTPUT_DIR" -v "$@"
+#ANSIBLE_CACHE_PLUGIN=base ansible-playbook test_gathering_facts.yml -i inventory -v "$@"
+
+ANSIBLE_GATHERING=smart ansible-playbook test_run_once.yml -i inventory -v "$@"
+
+# ensure clean_facts is working properly
+ansible-playbook test_prevent_injection.yml -i inventory -v "$@"
+
+# ensure fact merging is working properly
+ansible-playbook verify_merge_facts.yml -v "$@" -e 'ansible_facts_parallel: False'
+
+# ensure we dont clobber facts in loop
+ansible-playbook prevent_clobbering.yml -v "$@"
diff --git a/test/integration/targets/gathering_facts/test_gathering_facts.yml b/test/integration/targets/gathering_facts/test_gathering_facts.yml
new file mode 100644
index 00000000..d4364d29
--- /dev/null
+++ b/test/integration/targets/gathering_facts/test_gathering_facts.yml
@@ -0,0 +1,474 @@
+---
+- hosts: facthost7
+ tags: [ 'fact_negation' ]
+ connection: local
+ gather_subset: "!hardware"
+ gather_facts: no
+ tasks:
+ - name: setup with not hardware
+ setup:
+ gather_subset:
+ - "!hardware"
+ register: not_hardware_facts
+
+- name: min and network test for platform added
+ hosts: facthost21
+ tags: [ 'fact_network' ]
+ connection: local
+ gather_subset: ["!all", "network"]
+ gather_facts: yes
+ tasks:
+ - name: Test that retrieving network facts works and gets prereqs from platform and distribution
+ assert:
+ that:
+ - 'ansible_default_ipv4|default("UNDEF") != "UNDEF"'
+ - 'ansible_interfaces|default("UNDEF") != "UNDEF"'
+ # these are true for linux, but maybe not for other os
+ - 'ansible_system|default("UNDEF") != "UNDEF"'
+ - 'ansible_distribution|default("UNDEF") != "UNDEF"'
+ # we dont really require these but they are in the min set
+ # - 'ansible_virtualization_role|default("UNDEF") == "UNDEF"'
+ # - 'ansible_user_id|default("UNDEF") == "UNDEF"'
+ # - 'ansible_env|default("UNDEF") == "UNDEF"'
+ # - 'ansible_selinux|default("UNDEF") == "UNDEF"'
+ # - 'ansible_pkg_mgr|default("UNDEF") == "UNDEF"'
+
+- name: min and hardware test for platform added
+ hosts: facthost22
+ tags: [ 'fact_hardware' ]
+ connection: local
+ gather_subset: "hardware"
+ gather_facts: yes
+ tasks:
+ - name: debug stuff
+ debug:
+ var: hostvars['facthost22']
+ # we should also collect platform, but not distribution
+ - name: Test that retrieving hardware facts works and gets prereqs from platform and distribution
+ when: ansible_system|default("UNDEF") == "Linux"
+ assert:
+ # LinuxHardwareCollector requires 'platform' facts
+ that:
+ - 'ansible_memory_mb|default("UNDEF") != "UNDEF"'
+ - 'ansible_default_ipv4|default("UNDEF") == "UNDEF"'
+ - 'ansible_interfaces|default("UNDEF") == "UNDEF"'
+ # these are true for linux, but maybe not for other os
+ # hardware requires 'platform'
+ - 'ansible_system|default("UNDEF") != "UNDEF"'
+ - 'ansible_machine|default("UNDEF") != "UNDEF"'
+ # hardware does not require 'distribution' but it is min set
+ # - 'ansible_distribution|default("UNDEF") == "UNDEF"'
+ # we dont really require these but they are in the min set
+ # - 'ansible_virtualization_role|default("UNDEF") == "UNDEF"'
+ # - 'ansible_user_id|default("UNDEF") == "UNDEF"'
+ # - 'ansible_env|default("UNDEF") == "UNDEF"'
+ # - 'ansible_selinux|default("UNDEF") == "UNDEF"'
+ # - 'ansible_pkg_mgr|default("UNDEF") == "UNDEF"'
+
+- name: min and service_mgr test for platform added
+ hosts: facthost23
+ tags: [ 'fact_service_mgr' ]
+ connection: local
+ gather_subset: ["!all", "service_mgr"]
+ gather_facts: yes
+ tasks:
+ - name: Test that retrieving service_mgr facts works and gets prereqs from platform and distribution
+ assert:
+ that:
+ - 'ansible_service_mgr|default("UNDEF") != "UNDEF"'
+ - 'ansible_default_ipv4|default("UNDEF") == "UNDEF"'
+ - 'ansible_interfaces|default("UNDEF") == "UNDEF"'
+ # these are true for linux, but maybe not for other os
+ - 'ansible_system|default("UNDEF") != "UNDEF"'
+ - 'ansible_distribution|default("UNDEF") != "UNDEF"'
+ # we dont really require these but they are in the min set
+ # - 'ansible_virtualization_role|default("UNDEF") == "UNDEF"'
+ # - 'ansible_user_id|default("UNDEF") == "UNDEF"'
+ # - 'ansible_env|default("UNDEF") == "UNDEF"'
+ # - 'ansible_selinux|default("UNDEF") == "UNDEF"'
+ # - 'ansible_pkg_mgr|default("UNDEF") == "UNDEF"'
+
+- hosts: facthost0
+ tags: [ 'fact_min' ]
+ connection: local
+ gather_subset: "all"
+ gather_facts: yes
+ tasks:
+ #- setup:
+ # register: facts
+ - name: Test that retrieving all facts works
+ assert:
+ that:
+ - 'ansible_user_id|default("UNDEF_MIN") != "UNDEF_MIN"'
+ - 'ansible_interfaces|default("UNDEF_NET") != "UNDEF_NET"'
+ - 'ansible_mounts|default("UNDEF_MOUNT") != "UNDEF_MOUNT" or ansible_distribution == "MacOSX"'
+ - 'ansible_virtualization_role|default("UNDEF_VIRT") != "UNDEF_VIRT"'
+
+
+- hosts: facthost19
+ tags: [ 'fact_min' ]
+ connection: local
+ gather_facts: no
+ tasks:
+ - setup:
+ filter: "*env*"
+ # register: fact_results
+
+ - name: Test that retrieving all facts filtered to env works
+ assert:
+ that:
+ - 'ansible_interfaces|default("UNDEF_NET") == "UNDEF_NET"'
+ - 'ansible_mounts|default("UNDEF_MOUNT") == "UNDEF_MOUNT"'
+ - 'ansible_virtualization_role|default("UNDEF_VIRT") == "UNDEF_VIRT"'
+ - 'ansible_env|default("UNDEF_ENV") != "UNDEF_ENV"'
+
+- hosts: facthost13
+ tags: [ 'fact_min' ]
+ connection: local
+ gather_facts: no
+ tasks:
+ - setup:
+ filter: "ansible_user_id"
+ # register: fact_results
+
+ - name: Test that retrieving all facts filtered to specific fact ansible_user_id works
+ assert:
+ that:
+ - 'ansible_user_id|default("UNDEF_USER") != "UNDEF_USER"'
+ - 'ansible_interfaces|default("UNDEF_NET") == "UNDEF_NET"'
+ - 'ansible_mounts|default("UNDEF_MOUNT") == "UNDEF_MOUNT"'
+ - 'ansible_virtualization_role|default("UNDEF_VIRT") == "UNDEF_VIRT"'
+ - 'ansible_env|default("UNDEF_ENV") == "UNDEF_ENV"'
+ - 'ansible_pkg_mgr|default("UNDEF_PKG_MGR") == "UNDEF_PKG_MGR"'
+
+- hosts: facthost11
+ tags: [ 'fact_min' ]
+ connection: local
+ gather_facts: no
+ tasks:
+ - setup:
+ filter: "*"
+ # register: fact_results
+
+ - name: Test that retrieving all facts filtered to splat
+ assert:
+ that:
+ - 'ansible_user_id|default("UNDEF_MIN") != "UNDEF_MIN"'
+ - 'ansible_interfaces|default("UNDEF_NET") != "UNDEF_NET"'
+ - 'ansible_mounts|default("UNDEF_MOUNT") != "UNDEF_MOUNT" or ansible_distribution == "MacOSX"'
+ - 'ansible_virtualization_role|default("UNDEF_VIRT") != "UNDEF_VIRT"'
+
+- hosts: facthost12
+ tags: [ 'fact_min' ]
+ connection: local
+ gather_facts: no
+ tasks:
+ - setup:
+ filter: ""
+ # register: fact_results
+
+ - name: Test that retrieving all facts filtered to empty filter_spec works
+ assert:
+ that:
+ - 'ansible_user_id|default("UNDEF_MIN") != "UNDEF_MIN"'
+ - 'ansible_interfaces|default("UNDEF_NET") != "UNDEF_NET"'
+ - 'ansible_mounts|default("UNDEF_MOUNT") != "UNDEF_MOUNT" or ansible_distribution == "MacOSX"'
+ - 'ansible_virtualization_role|default("UNDEF_VIRT") != "UNDEF_VIRT"'
+
+- hosts: facthost1
+ tags: [ 'fact_min' ]
+ connection: local
+ gather_subset: "!all"
+ gather_facts: yes
+ tasks:
+ - name: Test that only retrieving minimal facts work
+ assert:
+ that:
+ # from the min set, which should still collect
+ - 'ansible_user_id|default("UNDEF_MIN") != "UNDEF_MIN"'
+ - 'ansible_env|default("UNDEF_ENV") != "UNDEF_ENV"'
+ # non min facts that are not collected
+ - 'ansible_interfaces|default("UNDEF_NET") == "UNDEF_NET"'
+ - 'ansible_mounts|default("UNDEF_MOUNT") == "UNDEF_MOUNT"'
+ - 'ansible_virtualization_role|default("UNDEF_VIRT") == "UNDEF_VIRT"'
+
+- hosts: facthost2
+ tags: [ 'fact_network' ]
+ connection: local
+ gather_subset: ["!all", "!min", "network"]
+ gather_facts: yes
+ tasks:
+ - name: Test that retrieving network facts work
+ assert:
+ that:
+ - 'ansible_user_id|default("UNDEF") == "UNDEF"'
+ - 'ansible_interfaces|default("UNDEF_NET") != "UNDEF_NET"'
+ - 'ansible_mounts|default("UNDEF") == "UNDEF"'
+ - 'ansible_virtualization_role|default("UNDEF") == "UNDEF"'
+
+- hosts: facthost3
+ tags: [ 'fact_hardware' ]
+ connection: local
+ gather_subset: "hardware"
+ gather_facts: yes
+ tasks:
+ - name: Test that retrieving hardware facts work
+ assert:
+ that:
+ - 'ansible_user_id|default("UNDEF_MIN") != "UNDEF_MIN"'
+ - 'ansible_interfaces|default("UNDEF_NET") == "UNDEF_NET"'
+ - 'ansible_mounts|default("UNDEF_MOUNT") != "UNDEF_MOUNT" or ansible_distribution == "MacOSX"'
+ - 'ansible_virtualization_role|default("UNDEF_VIRT") == "UNDEF_VIRT"'
+
+- hosts: facthost4
+ tags: [ 'fact_virtual' ]
+ connection: local
+ gather_subset: "virtual"
+ gather_facts: yes
+ tasks:
+ - name: Test that retrieving virtualization facts work
+ assert:
+ that:
+ - 'ansible_user_id|default("UNDEF_MIN") != "UNDEF_MIN"'
+ - 'ansible_interfaces|default("UNDEF_NET") == "UNDEF_NET"'
+ - 'ansible_mounts|default("UNDEF_MOUNT") == "UNDEF_MOUNT"'
+ - 'ansible_virtualization_role|default("UNDEF_VIRT") != "UNDEF_VIRT"'
+
+- hosts: facthost5
+ tags: [ 'fact_comma_string' ]
+ connection: local
+ gather_subset: ["virtual", "network"]
+ gather_facts: yes
+ tasks:
+ - name: Test that retrieving virtualization and network as a string works
+ assert:
+ that:
+ - 'ansible_user_id|default("UNDEF_MIN") != "UNDEF_MIN"'
+ - 'ansible_interfaces|default("UNDEF_NET") != "UNDEF_NET"'
+ - 'ansible_mounts|default("UNDEF_MOUNT") == "UNDEF_MOUNT"'
+ - 'ansible_virtualization_role|default("UNDEF_VIRT") != "UNDEF_VIRT"'
+
+- hosts: facthost6
+ tags: [ 'fact_yaml_list' ]
+ connection: local
+ gather_subset:
+ - virtual
+ - network
+ gather_facts: yes
+ tasks:
+ - name: Test that retrieving virtualization and network as a string works
+ assert:
+ that:
+ - 'ansible_user_id|default("UNDEF_MIN") != "UNDEF_MIN"'
+ - 'ansible_interfaces|default("UNDEF_NET") != "UNDEF_NET"'
+ - 'ansible_mounts|default("UNDEF_MOUNT") == "UNDEF_MOUNT"'
+ - 'ansible_virtualization_role|default("UNDEF_VIRT") != "UNDEF_VIRT"'
+
+
+- hosts: facthost7
+ tags: [ 'fact_negation' ]
+ connection: local
+ gather_subset: "!hardware"
+ gather_facts: yes
+ tasks:
+ - name: Test that negation of fact subsets work
+ assert:
+ that:
+ # network, not collected since it is not in min
+ - 'ansible_interfaces|default("UNDEF_NET") == "UNDEF_NET"'
+ # not collecting virt, should be undef
+ - 'ansible_virtualization_role|default("UNDEF_VIRT") == "UNDEF_VIRT"'
+ # mounts/devices are collected by hardware, so should be not collected and undef
+ - 'ansible_mounts|default("UNDEF_MOUNT") == "UNDEF_MOUNT"'
+ - 'ansible_devices|default("UNDEF_DEVICES") == "UNDEF_DEVICES"'
+ # from the min set, which should still collect
+ - 'ansible_user_id|default("UNDEF_MIN") != "UNDEF_MIN"'
+ - 'ansible_env|default("UNDEF_ENV") != "UNDEF_ENV"'
+
+- hosts: facthost8
+ tags: [ 'fact_mixed_negation_addition' ]
+ connection: local
+ gather_subset: ["!hardware", "network"]
+ gather_facts: yes
+ tasks:
+ - name: Test that negation and additional subsets work together
+ assert:
+ that:
+ - 'ansible_user_id|default("UNDEF_MIN") != "UNDEF_MIN"'
+ - 'ansible_interfaces|default("UNDEF_NET") != "UNDEF_NET"'
+ - 'ansible_mounts|default("UNDEF_MOUNT") == "UNDEF_MOUNT"'
+ - 'ansible_virtualization_role|default("UNDEF_VIRT") == "UNDEF_VIRT"'
+
+- hosts: facthost14
+ tags: [ 'fact_mixed_negation_addition_min' ]
+ connection: local
+ gather_subset: ["!all", "!min", "network"]
+ gather_facts: yes
+ tasks:
+ - name: Test that negation and additional subsets work together for min subset
+ assert:
+ that:
+ - 'ansible_user_id|default("UNDEF_MIN") == "UNDEF_MIN"'
+ - 'ansible_interfaces|default("UNDEF_NET") != "UNDEF_NET"'
+ - 'ansible_default_ipv4|default("UNDEF_DEFAULT_IPV4") != "UNDEF_DEFAULT_IPV4"'
+ - 'ansible_all_ipv4_addresses|default("UNDEF_ALL_IPV4") != "UNDEF_ALL_IPV4"'
+ - 'ansible_mounts|default("UNDEF_MOUNT") == "UNDEF_MOUNT"'
+ - 'ansible_virtualization_role|default("UNDEF_VIRT") == "UNDEF_VIRT"'
+ - 'ansible_env|default("UNDEF_ENV") == "UNDEF_ENV"'
+
+- hosts: facthost15
+ tags: [ 'fact_negate_all_min_add_pkg_mgr' ]
+ connection: local
+ gather_subset: ["!all", "!min", "pkg_mgr"]
+ gather_facts: yes
+ tasks:
+ - name: Test that negation and additional subsets work together for min subset
+ assert:
+ that:
+ # network, not collected since it is not in min
+ - 'ansible_interfaces|default("UNDEF_NET") == "UNDEF_NET"'
+ # not collecting virt, should be undef
+ - 'ansible_virtualization_role|default("UNDEF_VIRT") == "UNDEF_VIRT"'
+ # mounts/devices are collected by hardware, so should be not collected and undef
+ - 'ansible_mounts|default("UNDEF_MOUNT") == "UNDEF_MOUNT"'
+ - 'ansible_devices|default("UNDEF_DEVICES") == "UNDEF_DEVICES"'
+ # from the min set, which should not collect
+ - 'ansible_user_id|default("UNDEF_MIN") == "UNDEF_MIN"'
+ - 'ansible_env|default("UNDEF_ENV") == "UNDEF_ENV"'
+ # the pkg_mgr fact we requested explicitly
+ - 'ansible_pkg_mgr|default("UNDEF_PKG_MGR") != "UNDEF_PKG_MGR"'
+
+
+- hosts: facthost9
+ tags: [ 'fact_local']
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: Create fact directories
+ become: true
+ with_items:
+ - /etc/ansible/facts.d
+ - /tmp/custom_facts.d
+ file:
+ state: directory
+ path: "{{ item }}"
+ mode: '0777'
+ - name: Deploy local facts
+ with_items:
+ - path: /etc/ansible/facts.d/testfact.fact
+ content: '{ "fact_dir": "default" }'
+ - path: /tmp/custom_facts.d/testfact.fact
+ content: '{ "fact_dir": "custom" }'
+ copy:
+ dest: "{{ item.path }}"
+ content: "{{ item.content }}"
+
+- hosts: facthost9
+ tags: [ 'fact_local']
+ connection: local
+ gather_facts: yes
+ tasks:
+ - name: Test reading facts from default fact_path
+ assert:
+ that:
+ - '"{{ ansible_local.testfact.fact_dir }}" == "default"'
+
+- hosts: facthost9
+ tags: [ 'fact_local']
+ connection: local
+ gather_facts: yes
+ fact_path: /tmp/custom_facts.d
+ tasks:
+ - name: Test reading facts from custom fact_path
+ assert:
+ that:
+ - '"{{ ansible_local.testfact.fact_dir }}" == "custom"'
+
+- hosts: facthost20
+ tags: [ 'fact_facter_ohai' ]
+ connection: local
+ gather_subset:
+ - facter
+ - ohai
+ gather_facts: yes
+ tasks:
+ - name: Test that retrieving facter and ohai doesnt fail
+ assert:
+ # not much to assert here, aside from not crashing, since test images dont have
+ # facter/ohai
+ that:
+ - 'ansible_user_id|default("UNDEF_MIN") != "UNDEF_MIN"'
+
+- hosts: facthost9
+ tags: [ 'fact_file_utils' ]
+ connection: local
+ gather_facts: false
+ tasks:
+ - block:
+ - name: Ensure get_file_content works when strip=False
+ file_utils:
+ test: strip
+
+ - assert:
+ that:
+ - ansible_facts.get('etc_passwd_newlines', 0) + 1 == ansible_facts.get('etc_passwd_newlines_unstripped', 0)
+
+ - name: Make an empty file
+ file:
+ path: "{{ output_dir }}/empty_file"
+ state: touch
+
+ - name: Ensure get_file_content gives default when file is empty
+ file_utils:
+ test: default
+ touch_file: "{{ output_dir }}/empty_file"
+
+ - assert:
+ that:
+ - ansible_facts.get('touch_default') == 'i am a default'
+
+ - copy:
+ dest: "{{ output_dir }}/1charsep"
+ content: "foo:bar:baz:buzz:"
+
+ - copy:
+ dest: "{{ output_dir }}/2charsep"
+ content: "foo::bar::baz::buzz::"
+
+ - name: Ensure get_file_lines works as expected with specified 1-char line_sep
+ file_utils:
+ test: line_sep
+ line_sep_file: "{{ output_dir }}/1charsep"
+ line_sep_sep: ":"
+
+ - assert:
+ that:
+ - ansible_facts.get('line_sep') == ['foo', 'bar', 'baz', 'buzz']
+
+ - name: Ensure get_file_lines works as expected with specified 1-char line_sep
+ file_utils:
+ test: line_sep
+ line_sep_file: "{{ output_dir }}/2charsep"
+ line_sep_sep: "::"
+
+ - assert:
+ that:
+ - ansible_facts.get('line_sep') == ['foo', 'bar', 'baz', 'buzz', '']
+
+ - name: Ensure get_mount_size fails gracefully
+ file_utils:
+ test: invalid_mountpoint
+
+ - assert:
+ that:
+ - ansible_facts['invalid_mountpoint']|length == 0
+
+ always:
+ - name: Remove test files
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - "{{ output_dir }}/empty_file"
+ - "{{ output_dir }}/1charsep"
+ - "{{ output_dir }}/2charsep"
diff --git a/test/integration/targets/gathering_facts/test_prevent_injection.yml b/test/integration/targets/gathering_facts/test_prevent_injection.yml
new file mode 100644
index 00000000..f304fe88
--- /dev/null
+++ b/test/integration/targets/gathering_facts/test_prevent_injection.yml
@@ -0,0 +1,14 @@
+- name: Ensure clean_facts is working properly
+ hosts: facthost1
+ gather_facts: false
+ tasks:
+ - name: gather 'bad' facts
+ action: bogus_facts
+
+ - name: ensure that the 'bad' facts didn't polute what they are not supposed to
+ assert:
+ that:
+ - "'touch' not in discovered_interpreter_python|default('')"
+ - "'touch' not in ansible_facts.get('discovered_interpreter_python', '')"
+ - "'touch' not in ansible_facts.get('ansible_facts', {}).get('discovered_interpreter_python', '')"
+ - bogus_overwrite is undefined
diff --git a/test/integration/targets/gathering_facts/test_run_once.yml b/test/integration/targets/gathering_facts/test_run_once.yml
new file mode 100644
index 00000000..37023b24
--- /dev/null
+++ b/test/integration/targets/gathering_facts/test_run_once.yml
@@ -0,0 +1,32 @@
+---
+- hosts: facthost1
+ gather_facts: no
+ tasks:
+ - name: check that smart gathering is enabled
+ fail:
+ msg: 'smart gathering must be enabled'
+ when: 'lookup("env", "ANSIBLE_GATHERING") != "smart"'
+ - name: install test local facts
+ copy:
+ src: uuid.fact
+ dest: /etc/ansible/facts.d/
+ mode: 0755
+
+- hosts: facthost1,facthost2
+ gather_facts: yes
+ run_once: yes
+ tasks:
+ - block:
+ - name: 'Check the same host is used'
+ assert:
+ that: 'hostvars.facthost1.ansible_fqdn == hostvars.facthost2.ansible_fqdn'
+ msg: 'This test requires 2 inventory hosts referring to the same host.'
+ - name: "Check that run_once doesn't prevent fact gathering (#39453)"
+ assert:
+ that: 'hostvars.facthost1.ansible_local.uuid != hostvars.facthost2.ansible_local.uuid'
+ msg: "{{ 'Same value for ansible_local.uuid on both hosts: ' ~ hostvars.facthost1.ansible_local.uuid }}"
+ always:
+ - name: remove test local facts
+ file:
+ path: /etc/ansible/facts.d/uuid.fact
+ state: absent
diff --git a/test/integration/targets/gathering_facts/two_one.json b/test/integration/targets/gathering_facts/two_one.json
new file mode 100644
index 00000000..4b34a2d5
--- /dev/null
+++ b/test/integration/targets/gathering_facts/two_one.json
@@ -0,0 +1,27 @@
+{
+ "_ansible_facts_gathered": true,
+ "common_dict_fact": {
+ "key_four": "from facts_two",
+ "key_one": "from facts_one",
+ "key_two": "from facts_one"
+ },
+ "common_fact": "also from facts_one module",
+ "common_list_fact": [
+ "two",
+ "four",
+ "one",
+ "three",
+ "five"
+ ],
+ "common_list_fact2": [
+ "four",
+ "four",
+ "one",
+ "two",
+ "three",
+ "five",
+ "five"
+ ],
+ "factsone": "from facts_one module",
+ "factstwo": "from facts_two module"
+} \ No newline at end of file
diff --git a/test/integration/targets/gathering_facts/uuid.fact b/test/integration/targets/gathering_facts/uuid.fact
new file mode 100644
index 00000000..79e3f626
--- /dev/null
+++ b/test/integration/targets/gathering_facts/uuid.fact
@@ -0,0 +1,10 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+
+import json
+import uuid
+
+
+# return a random string
+print(json.dumps(str(uuid.uuid4())))
diff --git a/test/integration/targets/gathering_facts/verify_merge_facts.yml b/test/integration/targets/gathering_facts/verify_merge_facts.yml
new file mode 100644
index 00000000..d2144024
--- /dev/null
+++ b/test/integration/targets/gathering_facts/verify_merge_facts.yml
@@ -0,0 +1,41 @@
+- name: rune one and two, verify merge is as expected
+ hosts: localhost
+ vars:
+ ansible_facts_modules:
+ - facts_one
+ - facts_two
+ tasks:
+
+ - name: populate original
+ include_vars:
+ name: original
+ file: one_two.json
+
+ - name: fail if ref file is updated
+ assert:
+ msg: '{{ansible_facts}} vs {{original}}'
+ that:
+ - ansible_facts|to_json(indent=4, sort_keys=True) == original|to_json(indent=4, sort_keys=True)
+
+ - name: clear existing facts for next play
+ meta: clear_facts
+
+
+- name: rune two and one, verify merge is as expected
+ hosts: localhost
+ vars:
+ ansible_facts_modules:
+ - facts_two
+ - facts_one
+ tasks:
+
+ - name: populate original
+ include_vars:
+ name: original
+ file: two_one.json
+
+ - name: fail if ref file is updated
+ assert:
+ msg: '{{ansible_facts}} vs {{original}}'
+ that:
+ - ansible_facts|to_json(indent=4, sort_keys=True) == original|to_json(indent=4, sort_keys=True)
diff --git a/test/integration/targets/get_url/aliases b/test/integration/targets/get_url/aliases
new file mode 100644
index 00000000..f82a267b
--- /dev/null
+++ b/test/integration/targets/get_url/aliases
@@ -0,0 +1,4 @@
+destructive
+shippable/posix/group1
+needs/httptester
+skip/aix
diff --git a/test/integration/targets/get_url/files/testserver.py b/test/integration/targets/get_url/files/testserver.py
new file mode 100644
index 00000000..81043b66
--- /dev/null
+++ b/test/integration/targets/get_url/files/testserver.py
@@ -0,0 +1,20 @@
+import sys
+
+if __name__ == '__main__':
+ if sys.version_info[0] >= 3:
+ import http.server
+ import socketserver
+ PORT = int(sys.argv[1])
+
+ class Handler(http.server.SimpleHTTPRequestHandler):
+ pass
+
+ Handler.extensions_map['.json'] = 'application/json'
+ httpd = socketserver.TCPServer(("", PORT), Handler)
+ httpd.serve_forever()
+ else:
+ import mimetypes
+ mimetypes.init()
+ mimetypes.add_type('application/json', '.json')
+ import SimpleHTTPServer
+ SimpleHTTPServer.test()
diff --git a/test/integration/targets/get_url/meta/main.yml b/test/integration/targets/get_url/meta/main.yml
new file mode 100644
index 00000000..2c2155ab
--- /dev/null
+++ b/test/integration/targets/get_url/meta/main.yml
@@ -0,0 +1,4 @@
+dependencies:
+ - prepare_tests
+ - prepare_http_tests
+ - setup_remote_tmp_dir
diff --git a/test/integration/targets/get_url/tasks/main.yml b/test/integration/targets/get_url/tasks/main.yml
new file mode 100644
index 00000000..052bde22
--- /dev/null
+++ b/test/integration/targets/get_url/tasks/main.yml
@@ -0,0 +1,463 @@
+# Test code for the get_url module
+# (c) 2014, Richard Isaacson <richard.c.isaacson@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
+
+- name: Determine if python looks like it will support modern ssl features like SNI
+ command: "{{ ansible_python.executable }} -c 'from ssl import SSLContext'"
+ ignore_errors: True
+ register: python_test
+
+- name: Set python_has_sslcontext if we have it
+ set_fact:
+ python_has_ssl_context: True
+ when: python_test.rc == 0
+
+- name: Set python_has_sslcontext False if we don't have it
+ set_fact:
+ python_has_ssl_context: False
+ when: python_test.rc != 0
+
+- name: Define test files for file schema
+ set_fact:
+ geturl_srcfile: "{{ remote_tmp_dir }}/aurlfile.txt"
+ geturl_dstfile: "{{ remote_tmp_dir }}/aurlfile_copy.txt"
+
+- name: Create source file
+ copy:
+ dest: "{{ geturl_srcfile }}"
+ content: "foobar"
+ register: source_file_copied
+
+- name: test file fetch
+ get_url:
+ url: "file://{{ source_file_copied.dest }}"
+ dest: "{{ geturl_dstfile }}"
+ register: result
+
+- name: assert success and change
+ assert:
+ that:
+ - result is changed
+ - '"OK" in result.msg'
+
+- name: test nonexisting file fetch
+ get_url:
+ url: "file://{{ source_file_copied.dest }}NOFILE"
+ dest: "{{ geturl_dstfile }}NOFILE"
+ register: result
+ ignore_errors: True
+
+- name: assert success and change
+ assert:
+ that:
+ - result is failed
+
+- name: test HTTP HEAD request for file in check mode
+ get_url:
+ url: "https://{{ httpbin_host }}/get"
+ dest: "{{ remote_tmp_dir }}/get_url_check.txt"
+ force: yes
+ check_mode: True
+ register: result
+
+- name: assert that the HEAD request was successful in check mode
+ assert:
+ that:
+ - result is changed
+ - '"OK" in result.msg'
+
+- name: test HTTP HEAD for nonexistent URL in check mode
+ get_url:
+ url: "https://{{ httpbin_host }}/DOESNOTEXIST"
+ dest: "{{ remote_tmp_dir }}/shouldnotexist.html"
+ force: yes
+ check_mode: True
+ register: result
+ ignore_errors: True
+
+- name: assert that HEAD request for nonexistent URL failed
+ assert:
+ that:
+ - result is failed
+
+- name: test https fetch
+ get_url: url="https://{{ httpbin_host }}/get" dest={{remote_tmp_dir}}/get_url.txt force=yes
+ register: result
+
+- name: assert the get_url call was successful
+ assert:
+ that:
+ - result is changed
+ - '"OK" in result.msg'
+
+- name: test https fetch to a site with mismatched hostname and certificate
+ get_url:
+ url: "https://{{ badssl_host }}/"
+ dest: "{{ remote_tmp_dir }}/shouldnotexist.html"
+ ignore_errors: True
+ register: result
+
+- stat:
+ path: "{{ remote_tmp_dir }}/shouldnotexist.html"
+ register: stat_result
+
+- name: Assert that the file was not downloaded
+ assert:
+ that:
+ - "result is failed"
+ - "'Failed to validate the SSL certificate' in result.msg or 'Hostname mismatch' in result.msg or ( result.msg is match('hostname .* doesn.t match .*'))"
+ - "stat_result.stat.exists == false"
+
+- name: test https fetch to a site with mismatched hostname and certificate and validate_certs=no
+ get_url:
+ url: "https://{{ badssl_host }}/"
+ dest: "{{ remote_tmp_dir }}/get_url_no_validate.html"
+ validate_certs: no
+ register: result
+
+- stat:
+ path: "{{ remote_tmp_dir }}/get_url_no_validate.html"
+ register: stat_result
+
+- name: Assert that the file was downloaded
+ assert:
+ that:
+ - result is changed
+ - "stat_result.stat.exists == true"
+
+# SNI Tests
+# SNI is only built into the stdlib from python-2.7.9 onwards
+- name: Test that SNI works
+ get_url:
+ url: 'https://{{ sni_host }}/'
+ dest: "{{ remote_tmp_dir }}/sni.html"
+ register: get_url_result
+ ignore_errors: True
+
+- command: "grep '{{ sni_host }}' {{ remote_tmp_dir}}/sni.html"
+ register: data_result
+ when: python_has_ssl_context
+
+- debug:
+ var: get_url_result
+
+- name: Assert that SNI works with this python version
+ assert:
+ that:
+ - 'data_result.rc == 0'
+ when: python_has_ssl_context
+
+# If the client doesn't support SNI then get_url should have failed with a certificate mismatch
+- name: Assert that hostname verification failed because SNI is not supported on this version of python
+ assert:
+ that:
+ - 'get_url_result is failed'
+ when: not python_has_ssl_context
+
+# These tests are just side effects of how the site is hosted. It's not
+# specifically a test site. So the tests may break due to the hosting changing
+- name: Test that SNI works
+ get_url:
+ url: 'https://{{ sni_host }}/'
+ dest: "{{ remote_tmp_dir }}/sni.html"
+ register: get_url_result
+ ignore_errors: True
+
+- command: "grep '{{ sni_host }}' {{ remote_tmp_dir}}/sni.html"
+ register: data_result
+ when: python_has_ssl_context
+
+- debug:
+ var: get_url_result
+
+- name: Assert that SNI works with this python version
+ assert:
+ that:
+ - 'data_result.rc == 0'
+ - 'get_url_result is not failed'
+ when: python_has_ssl_context
+
+# If the client doesn't support SNI then get_url should have failed with a certificate mismatch
+- name: Assert that hostname verification failed because SNI is not supported on this version of python
+ assert:
+ that:
+ - 'get_url_result is failed'
+ when: not python_has_ssl_context
+# End hacky SNI test section
+
+- name: Test get_url with redirect
+ get_url:
+ url: 'https://{{ httpbin_host }}/redirect/6'
+ dest: "{{ remote_tmp_dir }}/redirect.json"
+
+- name: Test that setting file modes work
+ get_url:
+ url: 'https://{{ httpbin_host }}/'
+ dest: '{{ remote_tmp_dir }}/test'
+ mode: '0707'
+ register: result
+
+- stat:
+ path: "{{ remote_tmp_dir }}/test"
+ register: stat_result
+
+- name: Assert that the file has the right permissions
+ assert:
+ that:
+ - result is changed
+ - "stat_result.stat.mode == '0707'"
+
+- name: Test that setting file modes on an already downloaded file work
+ get_url:
+ url: 'https://{{ httpbin_host }}/'
+ dest: '{{ remote_tmp_dir }}/test'
+ mode: '0070'
+ register: result
+
+- stat:
+ path: "{{ remote_tmp_dir }}/test"
+ register: stat_result
+
+- name: Assert that the file has the right permissions
+ assert:
+ that:
+ - result is changed
+ - "stat_result.stat.mode == '0070'"
+
+# https://github.com/ansible/ansible/pull/65307/
+- name: Test that on http status 304, we get a status_code field.
+ get_url:
+ url: 'https://{{ httpbin_host }}/status/304'
+ dest: '{{ remote_tmp_dir }}/test'
+ register: result
+
+- name: Assert that we get the appropriate status_code
+ assert:
+ that:
+ - "'status_code' in result"
+ - "result.status_code == 304"
+
+# https://github.com/ansible/ansible/issues/29614
+- name: Change mode on an already downloaded file and specify checksum
+ get_url:
+ url: 'https://{{ httpbin_host }}/get'
+ dest: '{{ remote_tmp_dir }}/test'
+ checksum: 'sha256:7036ede810fad2b5d2e7547ec703cae8da61edbba43c23f9d7203a0239b765c4.'
+ mode: '0775'
+ register: result
+
+- stat:
+ path: "{{ remote_tmp_dir }}/test"
+ register: stat_result
+
+- name: Assert that file permissions on already downloaded file were changed
+ assert:
+ that:
+ - result is changed
+ - "stat_result.stat.mode == '0775'"
+
+- name: test checksum match in check mode
+ get_url:
+ url: 'https://{{ httpbin_host }}/get'
+ dest: '{{ remote_tmp_dir }}/test'
+ checksum: 'sha256:7036ede810fad2b5d2e7547ec703cae8da61edbba43c23f9d7203a0239b765c4.'
+ check_mode: True
+ register: result
+
+- name: Assert that check mode was green
+ assert:
+ that:
+ - result is not changed
+
+- name: Get a file that already exists with a checksum
+ get_url:
+ url: 'https://{{ httpbin_host }}/cache'
+ dest: '{{ remote_tmp_dir }}/test'
+ checksum: 'sha1:{{ stat_result.stat.checksum }}'
+ register: result
+
+- name: Assert that the file was not downloaded
+ assert:
+ that:
+ - result.msg == 'file already exists'
+
+- name: Get a file that already exists
+ get_url:
+ url: 'https://{{ httpbin_host }}/cache'
+ dest: '{{ remote_tmp_dir }}/test'
+ register: result
+
+- name: Assert that we didn't re-download unnecessarily
+ assert:
+ that:
+ - result is not changed
+ - "'304' in result.msg"
+
+- name: get a file that doesn't respond to If-Modified-Since without checksum
+ get_url:
+ url: 'https://{{ httpbin_host }}/get'
+ dest: '{{ remote_tmp_dir }}/test'
+ register: result
+
+- name: Assert that we downloaded the file
+ assert:
+ that:
+ - result is changed
+
+# https://github.com/ansible/ansible/issues/27617
+
+- name: set role facts
+ set_fact:
+ http_port: 27617
+ files_dir: '{{ remote_tmp_dir }}/files'
+
+- name: create files_dir
+ file:
+ dest: "{{ files_dir }}"
+ state: directory
+
+- name: create src file
+ copy:
+ dest: '{{ files_dir }}/27617.txt'
+ content: "ptux"
+
+- name: create sha1 checksum file of src
+ copy:
+ dest: '{{ files_dir }}/sha1sum.txt'
+ content: |
+ a97e6837f60cec6da4491bab387296bbcd72bdba 27617.txt
+ 3911340502960ca33aece01129234460bfeb2791 not_target1.txt
+ 1b4b6adf30992cedb0f6edefd6478ff0a593b2e4 not_target2.txt
+
+- name: create sha256 checksum file of src
+ copy:
+ dest: '{{ files_dir }}/sha256sum.txt'
+ content: |
+ b1b6ce5073c8fac263a8fc5edfffdbd5dec1980c784e09c5bc69f8fb6056f006. 27617.txt
+ 30949cc401e30ac494d695ab8764a9f76aae17c5d73c67f65e9b558f47eff892 not_target1.txt
+ d0dbfc1945bc83bf6606b770e442035f2c4e15c886ee0c22fb3901ba19900b5b not_target2.txt
+
+- name: create sha256 checksum file of src with a dot leading path
+ copy:
+ dest: '{{ files_dir }}/sha256sum_with_dot.txt'
+ content: |
+ b1b6ce5073c8fac263a8fc5edfffdbd5dec1980c784e09c5bc69f8fb6056f006. ./27617.txt
+ 30949cc401e30ac494d695ab8764a9f76aae17c5d73c67f65e9b558f47eff892 ./not_target1.txt
+ d0dbfc1945bc83bf6606b770e442035f2c4e15c886ee0c22fb3901ba19900b5b ./not_target2.txt
+
+- copy:
+ src: "testserver.py"
+ dest: "{{ remote_tmp_dir }}/testserver.py"
+
+- name: start SimpleHTTPServer for issues 27617
+ shell: cd {{ files_dir }} && {{ ansible_python.executable }} {{ remote_tmp_dir}}/testserver.py {{ http_port }}
+ async: 90
+ poll: 0
+
+- name: Wait for SimpleHTTPServer to come up online
+ wait_for:
+ host: 'localhost'
+ port: '{{ http_port }}'
+ state: started
+
+- name: download src with sha1 checksum url
+ get_url:
+ url: 'http://localhost:{{ http_port }}/27617.txt'
+ dest: '{{ remote_tmp_dir }}'
+ checksum: 'sha1:http://localhost:{{ http_port }}/sha1sum.txt'
+ register: result_sha1
+
+- stat:
+ path: "{{ remote_tmp_dir }}/27617.txt"
+ register: stat_result_sha1
+
+- name: download src with sha256 checksum url
+ get_url:
+ url: 'http://localhost:{{ http_port }}/27617.txt'
+ dest: '{{ remote_tmp_dir }}/27617sha256.txt'
+ checksum: 'sha256:http://localhost:{{ http_port }}/sha256sum.txt'
+ register: result_sha256
+
+- stat:
+ path: "{{ remote_tmp_dir }}/27617.txt"
+ register: stat_result_sha256
+
+- name: download src with sha256 checksum url with dot leading paths
+ get_url:
+ url: 'http://localhost:{{ http_port }}/27617.txt'
+ dest: '{{ remote_tmp_dir }}/27617sha256_with_dot.txt'
+ checksum: 'sha256:http://localhost:{{ http_port }}/sha256sum_with_dot.txt'
+ register: result_sha256_with_dot
+
+- stat:
+ path: "{{ remote_tmp_dir }}/27617sha256_with_dot.txt"
+ register: stat_result_sha256_with_dot
+
+- name: Assert that the file was downloaded
+ assert:
+ that:
+ - result_sha1 is changed
+ - result_sha256 is changed
+ - result_sha256_with_dot is changed
+ - "stat_result_sha1.stat.exists == true"
+ - "stat_result_sha256.stat.exists == true"
+ - "stat_result_sha256_with_dot.stat.exists == true"
+
+#https://github.com/ansible/ansible/issues/16191
+- name: Test url split with no filename
+ get_url:
+ url: https://{{ httpbin_host }}
+ dest: "{{ remote_tmp_dir }}"
+
+- name: Test headers dict
+ get_url:
+ url: https://{{ httpbin_host }}/headers
+ headers:
+ Foo: bar
+ Baz: qux
+ dest: "{{ remote_tmp_dir }}/headers_dict.json"
+
+- name: Get downloaded file
+ slurp:
+ src: "{{ remote_tmp_dir }}/headers_dict.json"
+ register: result
+
+- name: Test headers dict
+ assert:
+ that:
+ - (result.content | b64decode | from_json).headers.get('Foo') == 'bar'
+ - (result.content | b64decode | from_json).headers.get('Baz') == 'qux'
+
+- name: Test client cert auth, with certs
+ get_url:
+ url: "https://ansible.http.tests/ssl_client_verify"
+ client_cert: "{{ remote_tmp_dir }}/client.pem"
+ client_key: "{{ remote_tmp_dir }}/client.key"
+ dest: "{{ remote_tmp_dir }}/ssl_client_verify"
+ when: has_httptester
+
+- name: Get downloaded file
+ slurp:
+ src: "{{ remote_tmp_dir }}/ssl_client_verify"
+ register: result
+ when: has_httptester
+
+- name: Assert that the ssl_client_verify file contains the correct content
+ assert:
+ that:
+ - '(result.content | b64decode) == "ansible.http.tests:SUCCESS"'
+ when: has_httptester
diff --git a/test/integration/targets/getent/aliases b/test/integration/targets/getent/aliases
new file mode 100644
index 00000000..f8e28c7e
--- /dev/null
+++ b/test/integration/targets/getent/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group1
+skip/aix
diff --git a/test/integration/targets/getent/meta/main.yml b/test/integration/targets/getent/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/test/integration/targets/getent/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/test/integration/targets/getent/tasks/main.yml b/test/integration/targets/getent/tasks/main.yml
new file mode 100644
index 00000000..825ad5ea
--- /dev/null
+++ b/test/integration/targets/getent/tasks/main.yml
@@ -0,0 +1,46 @@
+# Test code for the getent module.
+# (c) 2017, James Tanner <tanner.jc@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+- name: check for getent command
+ shell: which getent
+ failed_when: False
+ register: getent_check
+##
+## getent
+##
+- block:
+ - name: run getent with specified service
+ getent:
+ database: passwd
+ key: root
+ service: files
+ register: getent_test0
+ when: ansible_system != 'FreeBSD'
+ - name: run getent w/o specified service (FreeBSD)
+ getent:
+ database: passwd
+ key: root
+ register: getent_test0
+ when: ansible_system == 'FreeBSD'
+ - debug: var=getent_test0
+ - name: validate results
+ assert:
+ that:
+ - 'getent_passwd is defined'
+ - 'getent_passwd.root is defined'
+ - 'getent_passwd.root|length == 6'
+ when: getent_check.rc == 0
diff --git a/test/integration/targets/git/aliases b/test/integration/targets/git/aliases
new file mode 100644
index 00000000..f71c8117
--- /dev/null
+++ b/test/integration/targets/git/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group4
+skip/aix
diff --git a/test/integration/targets/git/handlers/cleanup-default.yml b/test/integration/targets/git/handlers/cleanup-default.yml
new file mode 100644
index 00000000..02a79882
--- /dev/null
+++ b/test/integration/targets/git/handlers/cleanup-default.yml
@@ -0,0 +1,6 @@
+# TODO remove everything we'd installed (see git_required_packages), not just git
+# problem is that we should not remove what we hadn't installed
+- name: remove git
+ package:
+ name: git
+ state: absent
diff --git a/test/integration/targets/git/handlers/cleanup-freebsd.yml b/test/integration/targets/git/handlers/cleanup-freebsd.yml
new file mode 100644
index 00000000..1ee35013
--- /dev/null
+++ b/test/integration/targets/git/handlers/cleanup-freebsd.yml
@@ -0,0 +1,5 @@
+- name: remove git fromn FreeBSD
+ pkgng:
+ name: git
+ state: absent
+ autoremove: yes
diff --git a/test/integration/targets/git/handlers/main.yml b/test/integration/targets/git/handlers/main.yml
new file mode 100644
index 00000000..875f513a
--- /dev/null
+++ b/test/integration/targets/git/handlers/main.yml
@@ -0,0 +1,7 @@
+- name: cleanup
+ include_tasks: "{{ cleanup_filename }}"
+ with_first_found:
+ - "cleanup-{{ ansible_distribution | lower }}.yml"
+ - "cleanup-default.yml"
+ loop_control:
+ loop_var: cleanup_filename
diff --git a/test/integration/targets/git/meta/main.yml b/test/integration/targets/git/meta/main.yml
new file mode 100644
index 00000000..34a77cb7
--- /dev/null
+++ b/test/integration/targets/git/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_gnutar
diff --git a/test/integration/targets/git/tasks/ambiguous-ref.yml b/test/integration/targets/git/tasks/ambiguous-ref.yml
new file mode 100644
index 00000000..f06112e5
--- /dev/null
+++ b/test/integration/targets/git/tasks/ambiguous-ref.yml
@@ -0,0 +1,37 @@
+# test for https://github.com/ansible/ansible-modules-core/pull/3386
+
+- name: AMBIGUOUS-REF | clone repo
+ git:
+ repo: '{{ repo_format1 }}'
+ dest: '{{ checkout_dir }}'
+
+- name: AMBIGUOUS-REF | rename remote to be ambiguous
+ command: git remote rename origin v0.1
+ args:
+ chdir: "{{ checkout_dir }}"
+
+- name: AMBIGUOUS-REF | switch to HEAD
+ git:
+ repo: '{{ repo_format1 }}'
+ dest: '{{ checkout_dir }}'
+ remote: v0.1
+
+- name: AMBIGUOUS-REF | rev-parse remote HEAD
+ command: git rev-parse v0.1/HEAD
+ args:
+ chdir: "{{ checkout_dir }}"
+ register: git_remote_head
+
+- name: AMBIGUOUS-REF | rev-parse local HEAD
+ command: git rev-parse HEAD
+ args:
+ chdir: "{{ checkout_dir }}"
+ register: git_local_head
+
+- assert:
+ that: git_remote_head.stdout == git_local_head.stdout
+
+- name: AMBIGUOUS-REF | clear checkout_dir
+ file:
+ state: absent
+ path: "{{ checkout_dir }}"
diff --git a/test/integration/targets/git/tasks/archive.yml b/test/integration/targets/git/tasks/archive.yml
new file mode 100644
index 00000000..574559ef
--- /dev/null
+++ b/test/integration/targets/git/tasks/archive.yml
@@ -0,0 +1,135 @@
+- name: ARCHIVE | Clear checkout_dir
+ file:
+ state: absent
+ path: "{{ checkout_dir }}"
+
+- name: ARCHIVE | Archive repo using various archival format
+ git:
+ repo: '{{ repo_format1 }}'
+ dest: '{{ checkout_dir }}'
+ archive: '{{ checkout_dir }}/test_role.{{ item }}'
+ register: git_archive
+ with_items: "{{ git_archive_extensions[ansible_os_family ~ ansible_distribution_major_version | default('default') ] | default(git_archive_extensions.default) }}"
+
+# The map filter was added in Jinja2 2.7, which is newer than the version on RHEL/CentOS 6,
+# so we skip this validation on those hosts
+- name: ARCHIVE | Assert that archives were downloaded
+ assert:
+ that: (git_archive.results | map(attribute='changed') | unique | list)[0]
+ when:
+ - "ansible_os_family == 'RedHat'"
+ - ansible_distribution_major_version is version('7', '>=')
+
+- name: ARCHIVE | Check if archive file is created or not
+ stat:
+ path: '{{ checkout_dir }}/test_role.{{ item }}'
+ register: archive_check
+ with_items: "{{ git_archive_extensions[ansible_os_family ~ ansible_distribution_major_version | default('default') ] | default(git_archive_extensions.default) }}"
+
+- name: ARCHIVE | Assert that archive files exist
+ assert:
+ that: (archive_check.results | map(attribute='stat.exists') | unique | list)[0]
+ when:
+ - "ansible_os_family == 'RedHat'"
+ - ansible_distribution_major_version is version('7', '>=')
+
+- name: ARCHIVE | Clear checkout_dir
+ file:
+ state: absent
+ path: "{{ checkout_dir }}"
+
+- name: ARCHIVE | Clone clean repo
+ git:
+ repo: '{{ repo_format1 }}'
+ dest: '{{ checkout_dir }}'
+
+# Check git archive functionality without update
+- name: ARCHIVE | Archive repo using various archival format and without update
+ git:
+ repo: '{{ repo_format1 }}'
+ dest: '{{ checkout_dir }}'
+ update: no
+ archive: '{{ checkout_dir }}/test_role.{{ item }}'
+ register: git_archive
+ with_items: "{{ git_archive_extensions[ansible_os_family ~ ansible_distribution_major_version | default('default') ] | default(git_archive_extensions.default) }}"
+
+# The map filter was added in Jinja2 2.7, which is newer than the version on RHEL/CentOS 6,
+# so we skip this validation on those hosts
+- name: ARCHIVE | Assert that archives were downloaded
+ assert:
+ that: (git_archive.results | map(attribute='changed') | unique | list)[0]
+ when:
+ - "ansible_os_family == 'RedHat'"
+ - ansible_distribution_major_version is version('7', '>=')
+
+- name: ARCHIVE | Check if archive file is created or not
+ stat:
+ path: '{{ checkout_dir }}/test_role.{{ item }}'
+ register: archive_check
+ with_items: "{{ git_archive_extensions[ansible_os_family ~ ansible_distribution_major_version | default('default') ] | default(git_archive_extensions.default) }}"
+
+- name: ARCHIVE | Assert that archive files exist
+ assert:
+ that: (archive_check.results | map(attribute='stat.exists') | unique | list)[0]
+ when:
+ - "ansible_os_family == 'RedHat'"
+ - ansible_distribution_major_version is version('7', '>=')
+
+- name: ARCHIVE | Inspect archive file
+ command:
+ cmd: "{{ git_list_commands[item] }} {{ checkout_dir }}/test_role.{{ item }}"
+ warn: no
+ register: archive_content
+ with_items: "{{ git_archive_extensions[ansible_os_family ~ ansible_distribution_major_version | default('default') ] | default(git_archive_extensions.default) }}"
+
+# Does not work on RedHat6 (jinja2 too old?)
+- name: ARCHIVE | Ensure archive content is correct
+ assert:
+ that:
+ - item.stdout_lines | sort | first == 'defaults/'
+ with_items: "{{ archive_content.results }}"
+ when:
+ - ansible_os_family ~ ansible_distribution_major_version != 'RedHat6'
+
+- name: ARCHIVE | Clear checkout_dir
+ file:
+ state: absent
+ path: "{{ checkout_dir }}"
+
+- name: ARCHIVE | Generate an archive prefix
+ set_fact:
+ git_archive_prefix: '{{ range(2 ** 31, 2 ** 32) | random }}' # Generate some random archive prefix
+
+- name: ARCHIVE | Archive repo using various archival format and with an archive prefix
+ git:
+ repo: '{{ repo_format1 }}'
+ dest: '{{ checkout_dir }}'
+ archive: '{{ checkout_dir }}/test_role.{{ item }}'
+ archive_prefix: '{{ git_archive_prefix }}/'
+ register: git_archive
+ with_items: "{{ git_archive_extensions[ansible_os_family ~ ansible_distribution_major_version | default('default') ] | default(git_archive_extensions.default) }}"
+
+- name: ARCHIVE | Prepare the target for archive(s) extraction
+ file:
+ state: directory
+ path: '{{ checkout_dir }}/{{ git_archive_prefix }}.{{ item }}'
+ with_items: "{{ git_archive_extensions[ansible_os_family ~ ansible_distribution_major_version | default('default') ] | default(git_archive_extensions.default) }}"
+
+- name: ARCHIVE | Extract the archive(s) into that target
+ unarchive:
+ src: '{{ checkout_dir }}/test_role.{{ item }}'
+ dest: '{{ checkout_dir }}/{{ git_archive_prefix }}.{{ item }}'
+ with_items: "{{ git_archive_extensions[ansible_os_family ~ ansible_distribution_major_version | default('default') ] | default(git_archive_extensions.default) }}"
+
+- name: ARCHIVE | Check if prefix directory exists in what's extracted
+ find:
+ path: '{{ checkout_dir }}/{{ git_archive_prefix }}.{{ item }}'
+ patterns: '{{ git_archive_prefix }}'
+ file_type: directory
+ register: archive_check
+ with_items: "{{ git_archive_extensions[ansible_os_family ~ ansible_distribution_major_version | default('default') ] | default(git_archive_extensions.default) }}"
+
+- name: ARCHIVE | Assert that prefix directory is found
+ assert:
+ that: '{{ item.matched == 1 }}'
+ with_items: "{{ archive_check.results }}"
diff --git a/test/integration/targets/git/tasks/change-repo-url.yml b/test/integration/targets/git/tasks/change-repo-url.yml
new file mode 100644
index 00000000..b12fca1f
--- /dev/null
+++ b/test/integration/targets/git/tasks/change-repo-url.yml
@@ -0,0 +1,132 @@
+# test change of repo url
+# see https://github.com/ansible/ansible-modules-core/pull/721
+
+- name: CHANGE-REPO-URL | clear checkout_dir
+ file:
+ state: absent
+ path: "{{ checkout_dir }}"
+
+- name: CHANGE-REPO-URL | Clone example git repo
+ git:
+ repo: "{{ repo_update_url_1 }}"
+ dest: "{{ checkout_dir }}"
+
+- name: CHANGE-REPO-URL | Clone repo with changed url to the same place
+ git:
+ repo: "{{ repo_update_url_2 }}"
+ dest: "{{ checkout_dir }}"
+ register: clone2
+
+- assert:
+ that: "clone2 is successful"
+
+- name: CHANGE-REPO-URL | check url updated
+ shell: git remote show origin | grep Fetch
+ register: remote_url
+ args:
+ chdir: "{{ checkout_dir }}"
+ environment:
+ LC_ALL: C
+
+- assert:
+ that:
+ - "'git-test-new' in remote_url.stdout"
+ - "'git-test-old' not in remote_url.stdout"
+
+- name: CHANGE-REPO-URL | check for new content in git-test-new
+ stat: path={{ checkout_dir }}/newfilename
+ register: repo_content
+
+- name: CHANGE-REPO-URL | assert presence of new file in repo (i.e. working copy updated)
+ assert:
+ that: "repo_content.stat.exists"
+
+# Make sure 'changed' result is accurate in check mode.
+# See https://github.com/ansible/ansible-modules-core/pull/4243
+
+- name: CHANGE-REPO-URL | clear checkout_dir
+ file:
+ state: absent
+ path: "{{ checkout_dir }}"
+
+- name: CHANGE-REPO-URL | clone repo
+ git:
+ repo: "{{ repo_update_url_1 }}"
+ dest: "{{ checkout_dir }}"
+
+- name: CHANGE-REPO-URL | clone repo with same url to same destination
+ git:
+ repo: "{{ repo_update_url_1 }}"
+ dest: "{{ checkout_dir }}"
+ register: checkout_same_url
+
+- name: CHANGE-REPO-URL | check repo not changed
+ assert:
+ that:
+ - checkout_same_url is not changed
+
+
+- name: CHANGE-REPO-URL | clone repo with new url to same destination
+ git:
+ repo: "{{ repo_update_url_2 }}"
+ dest: "{{ checkout_dir }}"
+ register: checkout_new_url
+
+- name: CHANGE-REPO-URL | check repo changed
+ assert:
+ that:
+ - checkout_new_url is changed
+
+
+- name: CHANGE-REPO-URL | clone repo with new url in check mode
+ git:
+ repo: "{{ repo_update_url_1 }}"
+ dest: "{{ checkout_dir }}"
+ register: checkout_new_url_check_mode
+ check_mode: True
+
+- name: CHANGE-REPO-URL | check repo reported changed in check mode
+ assert:
+ that:
+ - checkout_new_url_check_mode is changed
+ when: git_version.stdout is version(git_version_supporting_ls_remote, '>=')
+
+- name: CHANGE-REPO-URL | clone repo with new url after check mode
+ git:
+ repo: "{{ repo_update_url_1 }}"
+ dest: "{{ checkout_dir }}"
+ register: checkout_new_url_after_check_mode
+
+- name: CHANGE-REPO-URL | check repo still changed after check mode
+ assert:
+ that:
+ - checkout_new_url_after_check_mode is changed
+
+
+# Test that checkout by branch works when the branch is not in our current repo but the sha is
+
+- name: CHANGE-REPO-URL | clear checkout_dir
+ file:
+ state: absent
+ path: "{{ checkout_dir }}"
+
+- name: CHANGE-REPO-URL | "Clone example git repo that we're going to modify"
+ git:
+ repo: "{{ repo_update_url_1 }}"
+ dest: "{{ checkout_dir }}/repo"
+
+- name: CHANGE-REPO-URL | Clone the repo again - this is what we test
+ git:
+ repo: "{{ checkout_dir }}/repo"
+ dest: "{{ checkout_dir }}/checkout"
+
+- name: CHANGE-REPO-URL | Add a branch to the repo
+ command: git branch new-branch
+ args:
+ chdir: "{{ checkout_dir }}/repo"
+
+- name: CHANGE-REPO-URL | Checkout the new branch in the checkout
+ git:
+ repo: "{{ checkout_dir}}/repo"
+ version: 'new-branch'
+ dest: "{{ checkout_dir }}/checkout"
diff --git a/test/integration/targets/git/tasks/checkout-new-tag.yml b/test/integration/targets/git/tasks/checkout-new-tag.yml
new file mode 100644
index 00000000..eac73f67
--- /dev/null
+++ b/test/integration/targets/git/tasks/checkout-new-tag.yml
@@ -0,0 +1,54 @@
+# test for https://github.com/ansible/ansible-modules-core/issues/527
+# clone a repo, add a tag to the same commit and try to checkout the new commit
+
+
+- name: clear checkout_dir
+ file:
+ state: absent
+ path: "{{ checkout_dir }}"
+
+- name: checkout example repo
+ git:
+ repo: "{{ repo_dir }}/format1"
+ dest: "{{ checkout_dir }}"
+
+- name: get tags of head
+ command: git tag --contains
+ args:
+ chdir: "{{ checkout_dir }}"
+ register: listoftags
+
+- name: make sure the tag does not yet exist
+ assert:
+ that:
+ - "'newtag' not in listoftags.stdout_lines"
+
+- name: add tag in orig repo
+ command: git tag newtag
+ args:
+ chdir: "{{ repo_dir }}/format1"
+
+- name: update copy with new tag
+ git:
+ repo: "{{ repo_dir }}/format1"
+ dest: "{{checkout_dir}}"
+ version: newtag
+ register: update_new_tag
+
+- name: get tags of new head
+ command: git tag --contains
+ args:
+ chdir: "{{ checkout_dir }}"
+ register: listoftags
+
+- name: check new head
+ assert:
+ that:
+ - update_new_tag is not changed
+ - "'newtag' in listoftags.stdout_lines"
+
+
+- name: clear checkout_dir
+ file:
+ state: absent
+ path: "{{ checkout_dir }}"
diff --git a/test/integration/targets/git/tasks/depth.yml b/test/integration/targets/git/tasks/depth.yml
new file mode 100644
index 00000000..547f84f7
--- /dev/null
+++ b/test/integration/targets/git/tasks/depth.yml
@@ -0,0 +1,229 @@
+# Test the depth option and fetching revisions that were ignored first
+
+- name: DEPTH | clear checkout_dir
+ file:
+ state: absent
+ path: "{{ checkout_dir }}"
+
+- name: DEPTH | Clone example git repo with depth 1
+ git:
+ repo: 'file://{{ repo_dir|expanduser }}/shallow'
+ dest: '{{ checkout_dir }}'
+ depth: 1
+
+- name: DEPTH | try to access earlier commit
+ command: "git checkout {{git_shallow_head_1.stdout}}"
+ register: checkout_early
+ failed_when: False
+ args:
+ chdir: '{{ checkout_dir }}'
+
+- name: DEPTH | make sure the old commit was not fetched
+ assert:
+ that: 'checkout_early.rc != 0'
+ when: git_version.stdout is version(git_version_supporting_depth, '>=')
+
+# tests https://github.com/ansible/ansible/issues/14954
+- name: DEPTH | fetch repo again with depth=1
+ git:
+ repo: 'file://{{ repo_dir|expanduser }}/shallow'
+ dest: '{{ checkout_dir }}'
+ depth: 1
+ register: checkout2
+
+- assert:
+ that: "checkout2 is not changed"
+ when: git_version.stdout is version(git_version_supporting_depth, '>=')
+
+- name: DEPTH | again try to access earlier commit
+ shell: "git checkout {{git_shallow_head_1.stdout}}"
+ register: checkout_early
+ failed_when: False
+ args:
+ chdir: '{{ checkout_dir }}'
+
+- name: DEPTH | again make sure the old commit was not fetched
+ assert:
+ that: 'checkout_early.rc != 0'
+ when: git_version.stdout is version(git_version_supporting_depth, '>=')
+
+# make sure we are still able to fetch other versions
+- name: DEPTH | Clone same repo with older version
+ git:
+ repo: 'file://{{ repo_dir|expanduser }}/shallow'
+ dest: '{{ checkout_dir }}'
+ depth: 1
+ version: earlytag
+ register: cloneold
+
+- assert:
+ that: cloneold is successful
+
+- name: DEPTH | try to access earlier commit
+ shell: "git checkout {{git_shallow_head_1.stdout}}"
+ args:
+ chdir: '{{ checkout_dir }}'
+
+- name: DEPTH | clear checkout_dir
+ file:
+ state: absent
+ path: "{{ checkout_dir }}"
+
+# Test for https://github.com/ansible/ansible/issues/21316
+- name: DEPTH | Shallow clone with tag
+ git:
+ repo: 'file://{{ repo_dir|expanduser }}/shallow'
+ dest: '{{ checkout_dir }}'
+ depth: 1
+ version: earlytag
+ register: cloneold
+
+- assert:
+ that: cloneold is successful
+
+- name: DEPTH | clear checkout_dir
+ file:
+ state: absent
+ path: "{{ checkout_dir }}"
+
+
+ # Test for https://github.com/ansible/ansible-modules-core/issues/3456
+ # clone a repo with depth and version specified
+
+- name: DEPTH | clone repo with both version and depth specified
+ git:
+ repo: 'file://{{ repo_dir|expanduser }}/shallow'
+ dest: '{{ checkout_dir }}'
+ depth: 1
+ version: master
+
+- name: DEPTH | run a second time (now fetch, not clone)
+ git:
+ repo: 'file://{{ repo_dir|expanduser }}/shallow'
+ dest: '{{ checkout_dir }}'
+ depth: 1
+ version: master
+ register: git_fetch
+
+- name: DEPTH | ensure the fetch succeeded
+ assert:
+ that: git_fetch is successful
+
+
+- name: DEPTH | clear checkout_dir
+ file:
+ state: absent
+ path: "{{ checkout_dir }}"
+
+- name: DEPTH | clone repo with both version and depth specified
+ git:
+ repo: 'file://{{ repo_dir|expanduser }}/shallow'
+ dest: '{{ checkout_dir }}'
+ depth: 1
+ version: master
+
+- name: DEPTH | switch to older branch with depth=1 (uses fetch)
+ git:
+ repo: 'file://{{ repo_dir|expanduser }}/shallow'
+ dest: '{{ checkout_dir }}'
+ depth: 1
+ version: earlybranch
+ register: git_fetch
+
+- name: DEPTH | ensure the fetch succeeded
+ assert:
+ that: git_fetch is successful
+
+- name: DEPTH | clear checkout_dir
+ file:
+ state: absent
+ path: "{{ checkout_dir }}"
+
+# test for https://github.com/ansible/ansible-modules-core/issues/3782
+# make sure shallow fetch works when no version is specified
+
+- name: DEPTH | checkout old repo
+ git:
+ repo: 'file://{{ repo_dir|expanduser }}/shallow'
+ dest: '{{ checkout_dir }}'
+ depth: 1
+
+- name: DEPTH | "update repo"
+ shell: echo "3" > a; git commit -a -m "3"
+ args:
+ chdir: "{{ repo_dir }}/shallow"
+
+- name: DEPTH | fetch updated repo
+ git:
+ repo: 'file://{{ repo_dir|expanduser }}/shallow'
+ dest: '{{ checkout_dir }}'
+ depth: 1
+ register: git_fetch
+ ignore_errors: yes
+
+- name: DEPTH | get "a" file
+ slurp:
+ src: '{{ checkout_dir }}/a'
+ register: a_file
+
+- name: DEPTH | check update arrived
+ assert:
+ that:
+ - "{{ a_file.content | b64decode | trim }} == 3"
+ - git_fetch is changed
+
+- name: DEPTH | clear checkout_dir
+ file:
+ state: absent
+ path: "{{ checkout_dir }}"
+
+#
+# Make sure shallow fetch works when switching to (fetching) a new a branch
+#
+
+- name: DEPTH | clone from branch with depth specified
+ git:
+ repo: 'file://{{ repo_dir|expanduser }}/shallow_branches'
+ dest: '{{ checkout_dir }}'
+ depth: 1
+ version: test_branch
+
+- name: DEPTH | check if clone is shallow
+ stat: path={{ checkout_dir }}/.git/shallow
+ register: is_shallow
+ when: git_version.stdout is version(git_version_supporting_depth, '>=')
+
+- name: DEPTH | assert that clone is shallow
+ assert:
+ that:
+ - is_shallow.stat.exists
+ when: git_version.stdout is version(git_version_supporting_depth, '>=')
+
+- name: DEPTH | switch to new branch (fetch) with the shallow clone
+ git:
+ repo: 'file://{{ repo_dir|expanduser }}/shallow_branches'
+ dest: '{{ checkout_dir }}'
+ depth: 1
+ version: new_branch
+ register: git_fetch
+
+- name: DEPTH | assert if switching a shallow clone to a new branch worked
+ assert:
+ that:
+ - git_fetch is changed
+
+- name: DEPTH | check if clone is still shallow
+ stat: path={{ checkout_dir }}/.git/shallow
+ register: is_shallow
+ when: git_version.stdout is version(git_version_supporting_depth, '>=')
+
+- name: DEPTH | assert that clone still is shallow
+ assert:
+ that:
+ - is_shallow.stat.exists
+ when: git_version.stdout is version(git_version_supporting_depth, '>=')
+
+- name: DEPTH | clear checkout_dir
+ file:
+ state: absent
+ path: "{{ checkout_dir }}"
diff --git a/test/integration/targets/git/tasks/forcefully-fetch-tag.yml b/test/integration/targets/git/tasks/forcefully-fetch-tag.yml
new file mode 100644
index 00000000..47c37478
--- /dev/null
+++ b/test/integration/targets/git/tasks/forcefully-fetch-tag.yml
@@ -0,0 +1,38 @@
+# Tests against https://github.com/ansible/ansible/issues/67972
+
+# Do our first clone manually; there are no commits yet and Ansible doesn't like
+# that.
+- name: FORCEFULLY-FETCH-TAG | Clone the bare repo in a non-bare clone
+ shell: git clone {{ repo_dir }}/tag_force_push {{ repo_dir }}/tag_force_push_clone1
+
+- name: FORCEFULLY-FETCH-TAG | Prepare repo with a tag
+ shell: |
+ echo 1337 > leet;
+ git add leet;
+ git commit -m uh-oh;
+ git tag -f herewego;
+ git push --tags origin master
+ args:
+ chdir: "{{ repo_dir }}/tag_force_push_clone1"
+
+- name: FORCEFULLY-FETCH-TAG | clone the repo for the second time
+ git:
+ repo: "{{ repo_dir }}/tag_force_push"
+ dest: "{{ repo_dir }}/tag_force_push_clone2"
+
+- name: FORCEFULLY-FETCH-TAG | Forcefully overwrite the tag in clone1
+ shell: |
+ echo 1338 > leet;
+ git add leet;
+ git commit -m uh-oh;
+ git tag -f herewego;
+ git push -f --tags origin master
+ args:
+ chdir: "{{ repo_dir }}/tag_force_push_clone1"
+
+- name: FORCEFULLY-FETCH-TAG | Try to update the second clone
+ git:
+ repo: "{{ repo_dir }}/tag_force_push"
+ dest: "{{ repo_dir }}/tag_force_push_clone2"
+ force: yes
+ register: git_res
diff --git a/test/integration/targets/git/tasks/formats.yml b/test/integration/targets/git/tasks/formats.yml
new file mode 100644
index 00000000..e5fcda72
--- /dev/null
+++ b/test/integration/targets/git/tasks/formats.yml
@@ -0,0 +1,40 @@
+- name: FORMATS | initial checkout
+ git:
+ repo: "{{ repo_format1 }}"
+ dest: "{{ repo_dir }}/format1"
+ register: git_result
+
+- name: FORMATS | verify information about the initial clone
+ assert:
+ that:
+ - "'before' in git_result"
+ - "'after' in git_result"
+ - "not git_result.before"
+ - "git_result.changed"
+
+- name: FORMATS | repeated checkout
+ git:
+ repo: "{{ repo_format1 }}"
+ dest: "{{ repo_dir }}/format1"
+ register: git_result2
+
+- name: FORMATS | check for tags
+ stat:
+ path: "{{ repo_dir }}/format1/.git/refs/tags"
+ register: tags
+
+- name: FORMATS | check for HEAD
+ stat:
+ path: "{{ repo_dir }}/format1/.git/HEAD"
+ register: head
+
+- name: FORMATS | assert presence of tags/trunk/branches
+ assert:
+ that:
+ - "tags.stat.isdir"
+ - "head.stat.isreg"
+
+- name: FORMATS | verify on a reclone things are marked unchanged
+ assert:
+ that:
+ - "not git_result2.changed"
diff --git a/test/integration/targets/git/tasks/gpg-verification.yml b/test/integration/targets/git/tasks/gpg-verification.yml
new file mode 100644
index 00000000..143b7e55
--- /dev/null
+++ b/test/integration/targets/git/tasks/gpg-verification.yml
@@ -0,0 +1,192 @@
+# Test for verification of GnuPG signatures
+
+- name: GPG-VERIFICATION | Create GnuPG verification workdir
+ tempfile:
+ state: directory
+ register: git_gpg_workdir
+
+- name: GPG-VERIFICATION | Define variables based on workdir
+ set_fact:
+ git_gpg_keyfile: "{{ git_gpg_workdir.path }}/testkey.asc"
+ git_gpg_source: "{{ git_gpg_workdir.path }}/source"
+ git_gpg_dest: "{{ git_gpg_workdir.path }}/dest"
+ git_gpg_gpghome: "{{ git_gpg_workdir.path }}/gpg"
+
+- name: GPG-VERIFICATION | Temporary store GnuPG test key
+ copy:
+ content: "{{ git_gpg_testkey }}"
+ dest: "{{ git_gpg_keyfile }}"
+
+- name: GPG-VERIFICATION | Create temporary GNUPGHOME directory
+ file:
+ path: "{{ git_gpg_gpghome }}"
+ state: directory
+ mode: 0700
+
+- name: GPG-VERIFICATION | Import GnuPG test key
+ environment:
+ - GNUPGHOME: "{{ git_gpg_gpghome }}"
+ command: gpg --import {{ git_gpg_keyfile }}
+
+- name: GPG-VERIFICATION | Create local GnuPG signed repository directory
+ file:
+ path: "{{ git_gpg_source }}"
+ state: directory
+
+- name: GPG-VERIFICATION | Generate local GnuPG signed repository
+ environment:
+ - GNUPGHOME: "{{ git_gpg_gpghome }}"
+ shell: |
+ set -e
+ git init
+ touch an_empty_file
+ git add an_empty_file
+ git commit --no-gpg-sign --message "Commit, and don't sign"
+ git tag lightweight_tag/unsigned_commit HEAD
+ git commit --allow-empty --gpg-sign --message "Commit, and sign"
+ git tag lightweight_tag/signed_commit HEAD
+ git tag --annotate --message "This is not a signed tag" unsigned_annotated_tag HEAD
+ git commit --allow-empty --gpg-sign --message "Commit, and sign"
+ git tag --sign --message "This is a signed tag" signed_annotated_tag HEAD
+ git checkout -b some_branch/signed_tip master
+ git commit --allow-empty --gpg-sign --message "Commit, and sign"
+ git checkout -b another_branch/unsigned_tip master
+ git commit --allow-empty --no-gpg-sign --message "Commit, and don't sign"
+ git checkout master
+ args:
+ chdir: "{{ git_gpg_source }}"
+
+- name: GPG-VERIFICATION | Get hash of an unsigned commit
+ command: git show-ref --hash --verify refs/tags/lightweight_tag/unsigned_commit
+ args:
+ chdir: "{{ git_gpg_source }}"
+ register: git_gpg_unsigned_commit
+
+- name: GPG-VERIFICATION | Get hash of a signed commit
+ command: git show-ref --hash --verify refs/tags/lightweight_tag/signed_commit
+ args:
+ chdir: "{{ git_gpg_source }}"
+ register: git_gpg_signed_commit
+
+- name: GPG-VERIFICATION | Clone repo and verify signed HEAD
+ environment:
+ - GNUPGHOME: "{{ git_gpg_gpghome }}"
+ git:
+ repo: "{{ git_gpg_source }}"
+ dest: "{{ git_gpg_dest }}"
+ verify_commit: yes
+
+- name: GPG-VERIFICATION | Clone repo and verify a signed lightweight tag
+ environment:
+ - GNUPGHOME: "{{ git_gpg_gpghome }}"
+ git:
+ repo: "{{ git_gpg_source }}"
+ dest: "{{ git_gpg_dest }}"
+ version: lightweight_tag/signed_commit
+ verify_commit: yes
+
+- name: GPG-VERIFICATION | Clone repo and verify an unsigned lightweight tag (should fail)
+ environment:
+ - GNUPGHOME: "{{ git_gpg_gpghome }}"
+ git:
+ repo: "{{ git_gpg_source }}"
+ dest: "{{ git_gpg_dest }}"
+ version: lightweight_tag/unsigned_commit
+ verify_commit: yes
+ register: git_verify
+ ignore_errors: yes
+
+- name: GPG-VERIFICATION | Check that unsigned lightweight tag verification failed
+ assert:
+ that:
+ - git_verify is failed
+ - git_verify.msg is match("Failed to verify GPG signature of commit/tag.+")
+
+- name: GPG-VERIFICATION | Clone repo and verify a signed commit
+ environment:
+ - GNUPGHOME: "{{ git_gpg_gpghome }}"
+ git:
+ repo: "{{ git_gpg_source }}"
+ dest: "{{ git_gpg_dest }}"
+ version: "{{ git_gpg_signed_commit.stdout }}"
+ verify_commit: yes
+
+- name: GPG-VERIFICATION | Clone repo and verify an unsigned commit
+ environment:
+ - GNUPGHOME: "{{ git_gpg_gpghome }}"
+ git:
+ repo: "{{ git_gpg_source }}"
+ dest: "{{ git_gpg_dest }}"
+ version: "{{ git_gpg_unsigned_commit.stdout }}"
+ verify_commit: yes
+ register: git_verify
+ ignore_errors: yes
+
+- name: GPG-VERIFICATION | Check that unsigned commit verification failed
+ assert:
+ that:
+ - git_verify is failed
+ - git_verify.msg is match("Failed to verify GPG signature of commit/tag.+")
+
+- name: GPG-VERIFICATION | Clone repo and verify a signed annotated tag
+ environment:
+ - GNUPGHOME: "{{ git_gpg_gpghome }}"
+ git:
+ repo: "{{ git_gpg_source }}"
+ dest: "{{ git_gpg_dest }}"
+ version: signed_annotated_tag
+ verify_commit: yes
+
+- name: GPG-VERIFICATION | Clone repo and verify an unsigned annotated tag (should fail)
+ environment:
+ - GNUPGHOME: "{{ git_gpg_gpghome }}"
+ git:
+ repo: "{{ git_gpg_source }}"
+ dest: "{{ git_gpg_dest }}"
+ version: unsigned_annotated_tag
+ verify_commit: yes
+ register: git_verify
+ ignore_errors: yes
+
+- name: GPG-VERIFICATION | Check that unsigned annotated tag verification failed
+ assert:
+ that:
+ - git_verify is failed
+ - git_verify.msg is match("Failed to verify GPG signature of commit/tag.+")
+
+- name: GPG-VERIFICATION | Clone repo and verify a signed branch
+ environment:
+ - GNUPGHOME: "{{ git_gpg_gpghome }}"
+ git:
+ repo: "{{ git_gpg_source }}"
+ dest: "{{ git_gpg_dest }}"
+ version: some_branch/signed_tip
+ verify_commit: yes
+
+- name: GPG-VERIFICATION | Clone repo and verify an unsigned branch (should fail)
+ environment:
+ - GNUPGHOME: "{{ git_gpg_gpghome }}"
+ git:
+ repo: "{{ git_gpg_source }}"
+ dest: "{{ git_gpg_dest }}"
+ version: another_branch/unsigned_tip
+ verify_commit: yes
+ register: git_verify
+ ignore_errors: yes
+
+- name: GPG-VERIFICATION | Check that unsigned branch verification failed
+ assert:
+ that:
+ - git_verify is failed
+ - git_verify.msg is match("Failed to verify GPG signature of commit/tag.+")
+
+- name: GPG-VERIFICATION | Stop gpg-agent so we can remove any locks on the GnuPG dir
+ command: gpgconf --kill gpg-agent
+ when: ansible_os_family != 'Suse' or ansible_distribution_version != '42.3' # OpenSUSE 42.3 ships with an older version of gpg-agent that doesn't support this
+ environment:
+ GNUPGHOME: "{{ git_gpg_gpghome }}"
+
+- name: GPG-VERIFICATION | Remove GnuPG verification workdir
+ file:
+ path: "{{ git_gpg_workdir.path }}"
+ state: absent
diff --git a/test/integration/targets/git/tasks/localmods.yml b/test/integration/targets/git/tasks/localmods.yml
new file mode 100644
index 00000000..09a1326d
--- /dev/null
+++ b/test/integration/targets/git/tasks/localmods.yml
@@ -0,0 +1,112 @@
+# test for https://github.com/ansible/ansible-modules-core/pull/5505
+- name: LOCALMODS | prepare old git repo
+ shell: rm -rf localmods; mkdir localmods; cd localmods; git init; echo "1" > a; git add a; git commit -m "1"
+ args:
+ chdir: "{{repo_dir}}"
+
+- name: LOCALMODS | checkout old repo
+ git:
+ repo: '{{ repo_dir }}/localmods'
+ dest: '{{ checkout_dir }}'
+
+- name: LOCALMODS | "update repo"
+ shell: echo "2" > a; git commit -a -m "2"
+ args:
+ chdir: "{{repo_dir}}/localmods"
+
+- name: LOCALMODS | "add local mods"
+ shell: echo "3" > a
+ args:
+ chdir: "{{ checkout_dir }}"
+
+- name: LOCALMODS | fetch with local mods without force (should fail)
+ git:
+ repo: '{{ repo_dir }}/localmods'
+ dest: '{{ checkout_dir }}'
+ register: git_fetch
+ ignore_errors: yes
+
+- name: LOCALMODS | check fetch with localmods failed
+ assert:
+ that:
+ - git_fetch is failed
+
+- name: LOCALMODS | fetch with local mods with force
+ git:
+ repo: '{{ repo_dir }}/localmods'
+ dest: '{{ checkout_dir }}'
+ force: True
+ register: git_fetch_force
+ ignore_errors: yes
+
+- name: LOCALMODS | get "a" file
+ slurp:
+ src: '{{ checkout_dir }}/a'
+ register: a_file
+
+- name: LOCALMODS | check update arrived
+ assert:
+ that:
+ - "{{ a_file.content | b64decode | trim }} == 2"
+ - git_fetch_force is changed
+
+- name: LOCALMODS | clear checkout_dir
+ file: state=absent path={{ checkout_dir }}
+
+# localmods and shallow clone
+- name: LOCALMODS | prepare old git repo
+ shell: rm -rf localmods; mkdir localmods; cd localmods; git init; echo "1" > a; git add a; git commit -m "1"
+ args:
+ chdir: "{{repo_dir}}"
+
+- name: LOCALMODS | checkout old repo
+ git:
+ repo: '{{ repo_dir }}/localmods'
+ dest: '{{ checkout_dir }}'
+ depth: 1
+
+- name: LOCALMODS | "update repo"
+ shell: echo "2" > a; git commit -a -m "2"
+ args:
+ chdir: "{{repo_dir}}/localmods"
+
+- name: LOCALMODS | "add local mods"
+ shell: echo "3" > a
+ args:
+ chdir: "{{ checkout_dir }}"
+
+- name: LOCALMODS | fetch with local mods without force (should fail)
+ git:
+ repo: '{{ repo_dir }}/localmods'
+ dest: '{{ checkout_dir }}'
+ depth: 1
+ register: git_fetch
+ ignore_errors: yes
+
+- name: LOCALMODS | check fetch with localmods failed
+ assert:
+ that:
+ - git_fetch is failed
+
+- name: LOCALMODS | fetch with local mods with force
+ git:
+ repo: '{{ repo_dir }}/localmods'
+ dest: '{{ checkout_dir }}'
+ depth: 1
+ force: True
+ register: git_fetch_force
+ ignore_errors: yes
+
+- name: LOCALMODS | get "a" file
+ slurp:
+ src: '{{ checkout_dir }}/a'
+ register: a_file
+
+- name: LOCALMODS | check update arrived
+ assert:
+ that:
+ - "{{ a_file.content | b64decode | trim }} == 2"
+ - git_fetch_force is changed
+
+- name: LOCALMODS | clear checkout_dir
+ file: state=absent path={{ checkout_dir }}
diff --git a/test/integration/targets/git/tasks/main.yml b/test/integration/targets/git/tasks/main.yml
new file mode 100644
index 00000000..9d750c5c
--- /dev/null
+++ b/test/integration/targets/git/tasks/main.yml
@@ -0,0 +1,40 @@
+# test code for the git module
+# (c) 2014, James Tanner <tanner.jc@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- include_tasks: setup.yml
+- include_tasks: setup-local-repos.yml
+
+- include_tasks: formats.yml
+- include_tasks: missing_hostkey.yml
+- include_tasks: no-destination.yml
+- include_tasks: specific-revision.yml
+- include_tasks: submodules.yml
+- include_tasks: change-repo-url.yml
+- include_tasks: depth.yml
+- include_tasks: checkout-new-tag.yml
+- include_tasks: gpg-verification.yml
+ when:
+ - not gpg_version.stderr
+ - gpg_version.stdout
+ - git_version.stdout is version("2.1.0", '>=')
+- include_tasks: localmods.yml
+- include_tasks: reset-origin.yml
+- include_tasks: ambiguous-ref.yml
+- include_tasks: archive.yml
+- include_tasks: separate-git-dir.yml
+- include_tasks: forcefully-fetch-tag.yml
diff --git a/test/integration/targets/git/tasks/missing_hostkey.yml b/test/integration/targets/git/tasks/missing_hostkey.yml
new file mode 100644
index 00000000..02d5be35
--- /dev/null
+++ b/test/integration/targets/git/tasks/missing_hostkey.yml
@@ -0,0 +1,48 @@
+- name: MISSING-HOSTKEY | checkout ssh://git@github.com repo without accept_hostkey (expected fail)
+ git:
+ repo: '{{ repo_format2 }}'
+ dest: '{{ checkout_dir }}'
+ ssh_opts: '-o UserKnownHostsFile={{ output_dir }}/known_hosts'
+ register: git_result
+ ignore_errors: true
+
+- assert:
+ that:
+ - git_result is failed
+
+- name: MISSING-HOSTKEY | checkout git@github.com repo with accept_hostkey (expected pass)
+ git:
+ repo: '{{ repo_format2 }}'
+ dest: '{{ checkout_dir }}'
+ accept_hostkey: true
+ key_file: '{{ github_ssh_private_key }}'
+ ssh_opts: '-o UserKnownHostsFile={{ output_dir }}/known_hosts'
+ register: git_result
+ when: github_ssh_private_key is defined
+
+- assert:
+ that:
+ - git_result is changed
+ when: github_ssh_private_key is defined
+
+- name: MISSING-HOSTKEY | clear checkout_dir
+ file:
+ state: absent
+ path: '{{ checkout_dir }}'
+ when: github_ssh_private_key is defined
+
+- name: MISSING-HOSTKEY | checkout ssh://git@github.com repo with accept_hostkey (expected pass)
+ git:
+ repo: '{{ repo_format3 }}'
+ dest: '{{ checkout_dir }}'
+ version: 'master'
+ accept_hostkey: false # should already have been accepted
+ key_file: '{{ github_ssh_private_key }}'
+ ssh_opts: '-o UserKnownHostsFile={{ output_dir }}/known_hosts'
+ register: git_result
+ when: github_ssh_private_key is defined
+
+- assert:
+ that:
+ - git_result is changed
+ when: github_ssh_private_key is defined
diff --git a/test/integration/targets/git/tasks/no-destination.yml b/test/integration/targets/git/tasks/no-destination.yml
new file mode 100644
index 00000000..1ef7f2fd
--- /dev/null
+++ b/test/integration/targets/git/tasks/no-destination.yml
@@ -0,0 +1,13 @@
+# Test a non-updating repo query with no destination specified
+
+- name: NO-DESTINATION | get info on a repo without updating and with no destination specified
+ git:
+ repo: '{{ repo_dir }}/minimal'
+ update: no
+ clone: no
+ accept_hostkey: yes
+ register: git_result
+
+- assert:
+ that:
+ - git_result is changed
diff --git a/test/integration/targets/git/tasks/reset-origin.yml b/test/integration/targets/git/tasks/reset-origin.yml
new file mode 100644
index 00000000..8fddd4b1
--- /dev/null
+++ b/test/integration/targets/git/tasks/reset-origin.yml
@@ -0,0 +1,25 @@
+- name: RESET-ORIGIN | Clean up the directories
+ file:
+ state: absent
+ path: "{{ item }}"
+ with_items:
+ - "{{ repo_dir }}/origin"
+ - "{{ checkout_dir }}"
+
+- name: RESET-ORIGIN | Create a directory
+ file:
+ name: "{{ repo_dir }}/origin"
+ state: directory
+
+- name: RESET-ORIGIN | Initialise the repo with a file named origin,see github.com/ansible/ansible/pull/22502
+ shell: git init; echo "PR 22502" > origin; git add origin; git commit -m "PR 22502"
+ args:
+ chdir: "{{ repo_dir }}/origin"
+
+- name: RESET-ORIGIN | Clone a git repo with file named origin
+ git:
+ repo: "{{ repo_dir }}/origin"
+ dest: "{{ checkout_dir }}"
+ remote: origin
+ update: no
+ register: status
diff --git a/test/integration/targets/git/tasks/separate-git-dir.yml b/test/integration/targets/git/tasks/separate-git-dir.yml
new file mode 100644
index 00000000..5b874043
--- /dev/null
+++ b/test/integration/targets/git/tasks/separate-git-dir.yml
@@ -0,0 +1,132 @@
+# test code for repositories with separate git dir updating
+# see https://github.com/ansible/ansible/pull/38016
+# see https://github.com/ansible/ansible/issues/30034
+
+- name: SEPARATE-GIT-DIR | clear checkout_dir
+ file:
+ state: absent
+ path: '{{ checkout_dir }}'
+
+- name: SEPARATE-GIT-DIR | make a pre-exist repo dir
+ file:
+ state: directory
+ path: '{{ separate_git_dir }}'
+
+- name: SEPARATE-GIT-DIR | clone with a separate git dir
+ git:
+ repo: '{{ repo_format1 }}'
+ dest: '{{ checkout_dir }}'
+ separate_git_dir: '{{ separate_git_dir }}'
+ ignore_errors: yes
+ register: result
+
+- name: SEPARATE-GIT-DIR | the clone will fail due to pre-exist dir
+ assert:
+ that: 'result is failed'
+
+- name: SEPARATE-GIT-DIR | delete pre-exist dir
+ file:
+ state: absent
+ path: '{{ separate_git_dir }}'
+
+- name: SEPARATE-GIT-DIR | clone again with a separate git dir
+ git:
+ repo: '{{ repo_format1 }}'
+ dest: '{{ checkout_dir }}'
+ separate_git_dir: '{{ separate_git_dir }}'
+
+- name: SEPARATE-GIT-DIR | check the stat of git dir
+ stat:
+ path: '{{ separate_git_dir }}'
+ register: stat_result
+
+- name: SEPARATE-GIT-DIR | the git dir should exist
+ assert:
+ that: 'stat_result.stat.exists == True'
+
+- name: SEPARATE-GIT-DIR | update repo the usual way
+ git:
+ repo: '{{ repo_format1 }}'
+ dest: '{{ checkout_dir }}'
+ separate_git_dir: '{{ separate_git_dir }}'
+ register: result
+
+- name: SEPARATE-GIT-DIR | update should not fail
+ assert:
+ that:
+ - result is not failed
+
+- name: SEPARATE-GIT-DIR | move the git dir to new place
+ git:
+ repo: '{{ repo_format1 }}'
+ dest: '{{ checkout_dir }}'
+ separate_git_dir: '{{ separate_git_dir }}_new'
+ register: result
+
+- name: SEPARATE-GIT-DIR | the movement should not failed
+ assert:
+ that: 'result is not failed'
+
+- name: SEPARATE-GIT-DIR | check the stat of new git dir
+ stat:
+ path: '{{ separate_git_dir }}_new'
+ register: stat_result
+
+- name: SEPARATE-GIT-DIR | the new git dir should exist
+ assert:
+ that: 'stat_result.stat.exists == True'
+
+- name: SEPARATE-GIT-DIR | test the update
+ git:
+ repo: '{{ repo_format1 }}'
+ dest: '{{ checkout_dir }}'
+ register: result
+
+- name: SEPARATE-GIT-DIR | the update should not failed
+ assert:
+ that:
+ - result is not failed
+
+- name: SEPARATE-GIT-DIR | set git dir to non-existent dir
+ shell: "echo gitdir: /dev/null/non-existent-dir > .git"
+ args:
+ chdir: "{{ checkout_dir }}"
+
+- name: SEPARATE-GIT-DIR | update repo the usual way
+ git:
+ repo: "{{ repo_format1 }}"
+ dest: "{{ checkout_dir }}"
+ ignore_errors: yes
+ register: result
+
+- name: SEPARATE-GIT-DIR | check update has failed
+ assert:
+ that:
+ - result is failed
+
+- name: SEPARATE-GIT-DIR | set .git file to bad format
+ shell: "echo some text gitdir: {{ checkout_dir }} > .git"
+ args:
+ chdir: "{{ checkout_dir }}"
+
+- name: SEPARATE-GIT-DIR | update repo the usual way
+ git:
+ repo: "{{ repo_format1 }}"
+ dest: "{{ checkout_dir }}"
+ ignore_errors: yes
+ register: result
+
+- name: SEPARATE-GIT-DIR | check update has failed
+ assert:
+ that:
+ - result is failed
+
+- name: SEPARATE-GIT-DIR | clear separate git dir
+ file:
+ state: absent
+ path: "{{ separate_git_dir }}_new"
+
+- name: SEPARATE-GIT-DIR | clear checkout_dir
+ file:
+ state: absent
+ path: '{{ checkout_dir }}'
diff --git a/test/integration/targets/git/tasks/setup-local-repos.yml b/test/integration/targets/git/tasks/setup-local-repos.yml
new file mode 100644
index 00000000..584a1693
--- /dev/null
+++ b/test/integration/targets/git/tasks/setup-local-repos.yml
@@ -0,0 +1,45 @@
+- name: SETUP-LOCAL-REPOS | create dirs
+ file:
+ name: "{{ item }}"
+ state: directory
+ with_items:
+ - "{{ repo_dir }}/minimal"
+ - "{{ repo_dir }}/shallow"
+ - "{{ repo_dir }}/shallow_branches"
+ - "{{ repo_dir }}/tag_force_push"
+
+- name: SETUP-LOCAL-REPOS | prepare minimal git repo
+ shell: git init; echo "1" > a; git add a; git commit -m "1"
+ args:
+ chdir: "{{ repo_dir }}/minimal"
+
+- name: SETUP-LOCAL-REPOS | prepare git repo for shallow clone
+ shell: |
+ git init;
+ echo "1" > a; git add a; git commit -m "1"; git tag earlytag; git branch earlybranch;
+ echo "2" > a; git add a; git commit -m "2";
+ args:
+ chdir: "{{ repo_dir }}/shallow"
+
+- name: SETUP-LOCAL-REPOS | set old hash var for shallow test
+ command: 'git rev-parse HEAD~1'
+ register: git_shallow_head_1
+ args:
+ chdir: "{{ repo_dir }}/shallow"
+
+- name: SETUP-LOCAL-REPOS | prepare tmp git repo with two branches
+ shell: |
+ git init
+ echo "1" > a; git add a; git commit -m "1"
+ git checkout -b test_branch; echo "2" > a; git commit -m "2 on branch" a
+ git checkout -b new_branch; echo "3" > a; git commit -m "3 on new branch" a
+ args:
+ chdir: "{{ repo_dir }}/shallow_branches"
+
+# Make this a bare one, we need to be able to push to it from clones
+# We make the repo here for consistency with the other repos,
+# but we finish setting it up in forcefully-fetch-tag.yml.
+- name: SETUP-LOCAL-REPOS | prepare tag_force_push git repo
+ shell: git init --bare
+ args:
+ chdir: "{{ repo_dir }}/tag_force_push"
diff --git a/test/integration/targets/git/tasks/setup.yml b/test/integration/targets/git/tasks/setup.yml
new file mode 100644
index 00000000..16c56904
--- /dev/null
+++ b/test/integration/targets/git/tasks/setup.yml
@@ -0,0 +1,43 @@
+- name: SETUP | clean out the output_dir
+ file:
+ path: "{{ output_dir }}"
+ state: absent
+
+- name: SETUP | create clean output_dir
+ file:
+ path: "{{ output_dir }}"
+ state: directory
+
+- name: SETUP | install git
+ package:
+ name: '{{ item }}'
+ when: ansible_distribution != "MacOSX"
+ notify:
+ - cleanup
+ with_items: "{{ git_required_packages[ansible_os_family | default('default') ] | default(git_required_packages.default) }}"
+
+- name: SETUP | verify that git is installed so this test can continue
+ shell: which git
+
+- name: SETUP | get git version, only newer than {{git_version_supporting_depth}} has fixed git depth
+ shell: git --version | grep 'git version' | sed 's/git version //'
+ register: git_version
+
+- name: SETUP | get gpg version
+ shell: gpg --version 2>1 | head -1 | sed -e 's/gpg (GnuPG) //'
+ register: gpg_version
+
+- name: SETUP | set git global user.email if not already set
+ shell: git config --global user.email || git config --global user.email "noreply@example.com"
+
+- name: SETUP | set git global user.name if not already set
+ shell: git config --global user.name || git config --global user.name "Ansible Test Runner"
+
+- name: SETUP | create repo_dir
+ file:
+ path: "{{ repo_dir }}"
+ state: directory
+
+- name: SETUP | show git version
+ debug:
+ msg: "Running test with git {{ git_version.stdout }}"
diff --git a/test/integration/targets/git/tasks/specific-revision.yml b/test/integration/targets/git/tasks/specific-revision.yml
new file mode 100644
index 00000000..26fa7cf3
--- /dev/null
+++ b/test/integration/targets/git/tasks/specific-revision.yml
@@ -0,0 +1,238 @@
+# Test that a specific revision can be checked out
+
+- name: SPECIFIC-REVISION | clear checkout_dir
+ file:
+ state: absent
+ path: '{{ checkout_dir }}'
+
+- name: SPECIFIC-REVISION | clone to specific revision
+ git:
+ repo: "{{ repo_dir }}/format1"
+ dest: "{{ checkout_dir }}"
+ version: df4612ba925fbc1b3c51cbb006f51a0443bd2ce9
+
+- name: SPECIFIC-REVISION | check HEAD after clone to revision
+ command: git rev-parse HEAD
+ args:
+ chdir: "{{ checkout_dir }}"
+ register: git_result
+
+- assert:
+ that:
+ - 'git_result.stdout == "df4612ba925fbc1b3c51cbb006f51a0443bd2ce9"'
+
+- name: SPECIFIC-REVISION | update to specific revision
+ git:
+ repo: "{{ repo_dir }}/format1"
+ dest: "{{ checkout_dir }}"
+ version: 4e739a34719654db7b04896966e2354e1256ea5d
+ register: git_result
+
+- assert:
+ that:
+ - git_result is changed
+
+- name: SPECIFIC-REVISION | check HEAD after update to revision
+ command: git rev-parse HEAD
+ args:
+ chdir: "{{ checkout_dir }}"
+ register: git_result
+
+- assert:
+ that:
+ - 'git_result.stdout == "4e739a34719654db7b04896966e2354e1256ea5d"'
+
+- name: SPECIFIC-REVISION | update to HEAD from detached HEAD state
+ git:
+ repo: "{{ repo_dir }}/format1"
+ dest: "{{ checkout_dir }}"
+ version: HEAD
+ register: git_result
+
+- assert:
+ that:
+ - git_result is changed
+
+# Test a revision not available under refs/heads/ or refs/tags/
+
+- name: SPECIFIC-REVISION | attempt to get unavailable revision
+ git:
+ repo: "{{ repo_dir }}/format1"
+ dest: "{{ checkout_dir }}"
+ version: 5473e343e33255f2da0b160f53135c56921d875c
+ ignore_errors: true
+ register: git_result
+
+- assert:
+ that:
+ - git_result is failed
+
+# Same as the previous test, but this time we specify which ref
+# contains the SHA1
+- name: SPECIFIC-REVISION | update to revision by specifying the refspec
+ git:
+ repo: https://github.com/ansible/ansible-examples.git
+ dest: '{{ checkout_dir }}'
+ version: 5473e343e33255f2da0b160f53135c56921d875c
+ refspec: refs/pull/7/merge
+
+- name: SPECIFIC-REVISION | check HEAD after update with refspec
+ command: git rev-parse HEAD
+ args:
+ chdir: "{{ checkout_dir }}"
+ register: git_result
+
+- assert:
+ that:
+ - 'git_result.stdout == "5473e343e33255f2da0b160f53135c56921d875c"'
+
+# try out combination of refspec and depth
+- name: SPECIFIC-REVISION | clear checkout_dir
+ file:
+ state: absent
+ path: "{{ checkout_dir }}"
+
+- name: SPECIFIC-REVISION | update to revision by specifying the refspec with depth=1
+ git:
+ repo: https://github.com/ansible/ansible-examples.git
+ dest: '{{ checkout_dir }}'
+ version: 5473e343e33255f2da0b160f53135c56921d875c
+ refspec: refs/pull/7/merge
+ depth: 1
+
+- name: SPECIFIC-REVISION | check HEAD after update with refspec
+ command: git rev-parse HEAD
+ args:
+ chdir: "{{ checkout_dir }}"
+ register: git_result
+
+- assert:
+ that:
+ - 'git_result.stdout == "5473e343e33255f2da0b160f53135c56921d875c"'
+
+- name: SPECIFIC-REVISION | try to access other commit
+ shell: git checkout 0ce1096
+ register: checkout_shallow
+ failed_when: False
+ args:
+ chdir: "{{ checkout_dir }}"
+
+- name: SPECIFIC-REVISION | "make sure the old commit was not fetched, task is 'forced success'"
+ assert:
+ that:
+ - checkout_shallow.rc != 0
+ - checkout_shallow is successful
+ when: git_version.stdout is version(git_version_supporting_depth, '>=')
+
+- name: SPECIFIC-REVISION | clear checkout_dir
+ file:
+ state: absent
+ path: "{{ checkout_dir }}"
+
+- name: SPECIFIC-REVISION | clone to revision by specifying the refspec
+ git:
+ repo: https://github.com/ansible/ansible-examples.git
+ dest: "{{ checkout_dir }}"
+ version: 5473e343e33255f2da0b160f53135c56921d875c
+ refspec: refs/pull/7/merge
+
+- name: SPECIFIC-REVISION | check HEAD after update with refspec
+ command: git rev-parse HEAD
+ args:
+ chdir: "{{ checkout_dir }}"
+ register: git_result
+
+- assert:
+ that:
+ - 'git_result.stdout == "5473e343e33255f2da0b160f53135c56921d875c"'
+
+# Test that a forced shallow checkout referincing branch only always fetches latest head
+
+- name: SPECIFIC-REVISION | clear checkout_dir
+ file:
+ state: absent
+ path: "{{ item }}"
+ with_items:
+ - "{{ checkout_dir }}"
+ - "{{ checkout_dir }}.copy"
+
+- name: SPECIFIC-REVISION | create original repo dir
+ file:
+ state: directory
+ path: "{{ checkout_dir }}"
+
+- name: SPECIFIC-REVISION | prepare origina repo
+ shell: git init; echo "1" > a; git add a; git commit -m "1"
+ args:
+ chdir: "{{ checkout_dir }}"
+
+- name: SPECIFIC-REVISION | clone example repo locally
+ git:
+ repo: "{{ checkout_dir }}"
+ dest: "{{ checkout_dir }}.copy"
+
+- name: SPECIFIC-REVISION | create branch in original
+ command: git checkout -b test/branch
+ args:
+ chdir: "{{ checkout_dir }}"
+
+- name: SPECIFIC-REVISION | get commit for HEAD on new branch
+ command: git rev-parse HEAD
+ args:
+ chdir: "{{ checkout_dir }}.copy"
+ register: originaltip0
+
+- name: SPECIFIC-REVISION | shallow force checkout new branch in copy
+ git:
+ repo: "{{ checkout_dir }}"
+ dest: "{{ checkout_dir }}.copy"
+ version: test/branch
+ depth: 1
+ force: yes
+
+- name: SPECIFIC-REVISION | create new commit in original
+ shell: git init; echo "2" > b; git add b; git commit -m "2"
+ args:
+ chdir: "{{ checkout_dir }}"
+
+- name: SPECIFIC-REVISION | get commit for new HEAD on original branch
+ command: git rev-parse HEAD
+ args:
+ chdir: "{{ checkout_dir }}"
+ register: originaltip1
+
+- name: SPECIFIC-REVISION | get commit for HEAD on new branch
+ command: git rev-parse HEAD
+ args:
+ chdir: "{{ checkout_dir }}.copy"
+ register: newtip
+
+- name: SPECIFIC-REVISION | assert that copy is still pointing at previous tip
+ assert:
+ that:
+ - newtip.stdout == originaltip0.stdout
+
+- name: SPECIFIC-REVISION | create a local modification in the copy
+ shell: echo "3" > c
+ args:
+ chdir: "{{ checkout_dir }}.copy"
+
+- name: SPECIFIC-REVISION | shallow force checkout new branch in copy (again)
+ git:
+ repo: "{{ checkout_dir }}"
+ dest: "{{ checkout_dir }}.copy"
+ version: test/branch
+ depth: 1
+ force: yes
+
+- name: SPECIFIC-REVISION | get commit for HEAD on new branch
+ command: git rev-parse HEAD
+ args:
+ chdir: "{{ checkout_dir }}.copy"
+ register: newtip
+
+- name: SPECIFIC-REVISION | make sure copy tip is not pointing at previous sha and that new tips match
+ assert:
+ that:
+ - newtip.stdout != originaltip0.stdout
+ - newtip.stdout == originaltip1.stdout
diff --git a/test/integration/targets/git/tasks/submodules.yml b/test/integration/targets/git/tasks/submodules.yml
new file mode 100644
index 00000000..647d1e23
--- /dev/null
+++ b/test/integration/targets/git/tasks/submodules.yml
@@ -0,0 +1,124 @@
+#
+# Submodule tests
+#
+
+# Repository A with submodules defined (repo_submodules)
+# .gitmodules file points to Repository I
+# Repository B forked from A that has newer commits (repo_submodules_newer)
+# .gitmodules file points to Repository II instead of I
+# .gitmodules file also points to Repository III
+# Repository I for submodule1 (repo_submodule1)
+# Has 1 file checked in
+# Repository II forked from I that has newer commits (repo_submodule1_newer)
+# Has 2 files checked in
+# Repository III for a second submodule (repo_submodule2)
+# Has 1 file checked in
+
+- name: SUBMODULES | clear checkout_dir
+ file:
+ state: absent
+ path: "{{ checkout_dir }}"
+
+- name: SUBMODULES | Test that clone without recursive does not retrieve submodules
+ git:
+ repo: "{{ repo_submodules }}"
+ version: 45c6c07ef10fd9e453d90207e63da1ce5bd3ae1e
+ dest: "{{ checkout_dir }}"
+ recursive: no
+
+- name: SUBMODULES | List submodule1
+ command: 'ls -1a {{ checkout_dir }}/submodule1'
+ register: submodule1
+
+- name: SUBMODULES | Ensure submodu1 is at the appropriate commit
+ assert:
+ that: '{{ submodule1.stdout_lines | length }} == 2'
+
+- name: SUBMODULES | clear checkout_dir
+ file:
+ state: absent
+ path: "{{ checkout_dir }}"
+
+
+- name: SUBMODULES | Test that clone with recursive retrieves submodules
+ git:
+ repo: "{{ repo_submodules }}"
+ dest: "{{ checkout_dir }}"
+ version: 45c6c07ef10fd9e453d90207e63da1ce5bd3ae1e
+ recursive: yes
+
+- name: SUBMODULES | List submodule1
+ command: 'ls -1a {{ checkout_dir }}/submodule1'
+ register: submodule1
+
+- name: SUBMODULES | Ensure submodule1 is at the appropriate commit
+ assert:
+ that: '{{ submodule1.stdout_lines | length }} == 4'
+
+- name: SUBMODULES | Copy the checkout so we can run several different tests on it
+ command: 'cp -pr {{ checkout_dir }} {{ checkout_dir }}.bak'
+
+
+- name: SUBMODULES | Test that update without recursive does not change submodules
+ git:
+ repo: "{{ repo_submodules }}"
+ version: d2974e4bbccdb59368f1d5eff2205f0fa863297e
+ dest: "{{ checkout_dir }}"
+ recursive: no
+ update: yes
+ track_submodules: yes
+
+- name: SUBMODULES | List submodule1
+ command: 'ls -1a {{ checkout_dir }}/submodule1'
+ register: submodule1
+
+- name: SUBMODULES | Stat submodule2
+ stat:
+ path: "{{ checkout_dir }}/submodule2"
+ register: submodule2
+
+- name: SUBMODULES | List submodule2
+ command: ls -1a {{ checkout_dir }}/submodule2
+ register: submodule2
+
+- name: SUBMODULES | Ensure both submodules are at the appropriate commit
+ assert:
+ that:
+ - '{{ submodule1.stdout_lines|length }} == 4'
+ - '{{ submodule2.stdout_lines|length }} == 2'
+
+
+- name: SUBMODULES | Remove checkout dir
+ file:
+ state: absent
+ path: "{{ checkout_dir }}"
+
+- name: SUBMODULES | Restore checkout to prior state
+ command: 'cp -pr {{ checkout_dir }}.bak {{ checkout_dir }}'
+
+
+- name: SUBMODULES | Test that update with recursive updated existing submodules
+ git:
+ repo: "{{ repo_submodules }}"
+ version: d2974e4bbccdb59368f1d5eff2205f0fa863297e
+ dest: "{{ checkout_dir }}"
+ update: yes
+ recursive: yes
+ track_submodules: yes
+
+- name: SUBMODULES | List submodule 1
+ command: 'ls -1a {{ checkout_dir }}/submodule1'
+ register: submodule1
+
+- name: SUBMODULES | Ensure submodule1 is at the appropriate commit
+ assert:
+ that: '{{ submodule1.stdout_lines | length }} == 5'
+
+
+- name: SUBMODULES | Test that update with recursive found new submodules
+ command: 'ls -1a {{ checkout_dir }}/submodule2'
+ register: submodule2
+
+- name: SUBMODULES | Enusre submodule2 is at the appropriate commit
+ assert:
+ that: '{{ submodule2.stdout_lines | length }} == 4'
diff --git a/test/integration/targets/git/vars/main.yml b/test/integration/targets/git/vars/main.yml
new file mode 100644
index 00000000..a5bae5ba
--- /dev/null
+++ b/test/integration/targets/git/vars/main.yml
@@ -0,0 +1,97 @@
+git_archive_extensions:
+ default:
+ - tar.gz
+ - tar
+ - tgz
+ - zip
+ RedHat6:
+ - tar
+ - zip
+
+git_required_packages:
+ default:
+ - git
+ - gzip
+ - tar
+ - unzip
+ - zip
+ FreeBSD:
+ - git
+ - gzip
+ - unzip
+ - zip
+
+git_list_commands:
+ tar.gz: tar -tf
+ tar: tar -tf
+ tgz: tar -tf
+ zip: unzip -Z1
+
+checkout_dir: '{{ output_dir }}/git'
+repo_dir: '{{ output_dir }}/local_repos'
+separate_git_dir: '{{ output_dir }}/sep_git_dir'
+repo_format1: 'https://github.com/jimi-c/test_role'
+repo_format2: 'git@github.com:jimi-c/test_role.git'
+repo_format3: 'ssh://git@github.com/jimi-c/test_role.git'
+repo_submodules: 'https://github.com/abadger/test_submodules_newer.git'
+repo_submodule1: 'https://github.com/abadger/test_submodules_subm1.git'
+repo_submodule2: 'https://github.com/abadger/test_submodules_subm2.git'
+repo_update_url_1: 'https://github.com/ansible-test-robinro/git-test-old'
+repo_update_url_2: 'https://github.com/ansible-test-robinro/git-test-new'
+known_host_files:
+ - "{{ lookup('env','HOME') }}/.ssh/known_hosts"
+ - '/etc/ssh/ssh_known_hosts'
+git_version_supporting_depth: 1.9.1
+git_version_supporting_ls_remote: 1.7.5
+# path to a SSH private key for use with github.com (tests skipped if undefined)
+# github_ssh_private_key: "{{ lookup('env', 'HOME') }}/.ssh/id_rsa"
+git_gpg_testkey: |
+ -----BEGIN PGP PRIVATE KEY BLOCK-----
+
+ lQOYBFlkmX0BCACtE81Xj/351nnvwnAWMf8ZUP9B1YOPe9ohqNsCQY1DxODVJc9y
+ ljCoh9fTdoHXuaUMUFistozxCMP81RuZxfbfsGePnl8OAOgWT5Sln6yEG45oClJ0
+ RmJJZdDT1lF3VaVwK9NQ5E1oqmk1IOjISi7iFa9TmMn1h7ISP/p+/xtMxQhzUXt8
+ APAEhRdc9FfwxaxCHKZBiM7ND+pAm6vpom07ZUgxSppsrXZAxDncTwAeCumDpeOL
+ LAcSBsw02swOIHFfqHNrkELLr4KJqws+zeAk6R2nq0k16AVdNX+Rb7T3OKmuLawx
+ HXe8rKpaw0RC+JCogZK4tz0KDNuZPLW2Y5JJABEBAAEAB/4zkKpFk79p35YNskLd
+ wgCMRN7/+MKNDavUCnBRsEELt0z7BBxVudx+YZaSSITvxj4fuJJqxqqgJ2no2n8y
+ JdJjG7YHCnqse+WpvAUAAV4PL/ySD704Kj4fOwfoDTrRUIGNNWlseNB9RgQ5UXg5
+ MCzeq/JD+En3bnnFySzzCENUcAQfu2FVYgKEiKaKL5Djs6p5w/jTm+Let3EsIczb
+ ykJ8D4/G/tSrNdp/g10DDy+VclWMhMFqmFesedvytE8jzCVxPKOoRkFTGrX76gIK
+ eMVxHIYxdCfSTHLjBykMGO9gxfk9lf18roNYs0VV2suyi4fVFxEozSAxwWlwKrXn
+ 0arvBADPsm5NjlZ5uR06YKbpUUwPTYcwLbasic0qHuUWgNsTVv8dd2il/jbha77m
+ StU7qRJ1jwbFEFxx7HnTmeGfPbdyKe2qyLJUyD/rpQSC5YirisUchtG8nZsHlnzn
+ k10SIeB480tkgkdMQx1Eif40aiuQb09/TxaaXAEFKttZhEO4RwQA1VQ8a0IrMBI2
+ i4WqaIDNDl3x61JvvFD74v43I0AHKmZUPwcgAd6q2IvCDaKH0hIuBKu6BGq6DPvx
+ Oc/4r3iRn/xccconxRop2A9ffa00B/eQXrBq+uLBQfyiFL9UfkU8eTAAgbDKRxjY
+ ScaevoBbbYxkpgJUCL6VnoSdXlbNOO8EAL2ypsVkDmXNgR8ZT8cKSUft47di5T+9
+ mhT1qmD62B+D86892y2QAohmUDadYRK9m9WD91Y7gOMeNhYj9qbxyPprPYUL0aPt
+ L8KS1H73C5WQMOsl2RyIw81asss30LWghsFIJ1gz8gVEjXhV+YC6W9XQ42iabmRR
+ A67f5sqK1scuO0q0KUFuc2libGUgVGVzdCBSdW5uZXIgPG5vcmVwbHlAZXhhbXBs
+ ZS5jb20+iQE3BBMBCAAhBQJZZJl9AhsDBQsJCAcCBhUICQoLAgQWAgMBAh4BAheA
+ AAoJEK0vcLBcXpbYi/kH/R0xk42MFpGd4pndTAsVIjRk/VhmhFc1v6sBeR40GXlt
+ hyEeOQQnIeHKLhsVT6YnfFZa8b4JwgTD6NeIiibOAlLgaKOWNwZu8toixMPVAzfQ
+ cRei+/gFXNil0FmBwWreVBDppuIn6XiSEPik0C7eCcw4lD+A+BbL3WGkp+OSQPho
+ hodIU02hgkrgs/6YJPats8Rgzw9hICsa2j0MjnG6P2z9atMz6tw2SiE5iBl7mZ2Z
+ zG/HiplleMhf/G8OZOskrWkKiLbpSPfQSKdOFkw1C6yqOlQ+HmuCZ56oyxtpItET
+ R11uAKt+ABdi4DX3FQQ+A+bGJ1+aKrcorZ8Z8s0XhPo=
+ =tV71
+ -----END PGP PRIVATE KEY BLOCK-----
+ -----BEGIN PGP PUBLIC KEY BLOCK-----
+
+ mQENBFlkmX0BCACtE81Xj/351nnvwnAWMf8ZUP9B1YOPe9ohqNsCQY1DxODVJc9y
+ ljCoh9fTdoHXuaUMUFistozxCMP81RuZxfbfsGePnl8OAOgWT5Sln6yEG45oClJ0
+ RmJJZdDT1lF3VaVwK9NQ5E1oqmk1IOjISi7iFa9TmMn1h7ISP/p+/xtMxQhzUXt8
+ APAEhRdc9FfwxaxCHKZBiM7ND+pAm6vpom07ZUgxSppsrXZAxDncTwAeCumDpeOL
+ LAcSBsw02swOIHFfqHNrkELLr4KJqws+zeAk6R2nq0k16AVdNX+Rb7T3OKmuLawx
+ HXe8rKpaw0RC+JCogZK4tz0KDNuZPLW2Y5JJABEBAAG0KUFuc2libGUgVGVzdCBS
+ dW5uZXIgPG5vcmVwbHlAZXhhbXBsZS5jb20+iQE3BBMBCAAhBQJZZJl9AhsDBQsJ
+ CAcCBhUICQoLAgQWAgMBAh4BAheAAAoJEK0vcLBcXpbYi/kH/R0xk42MFpGd4pnd
+ TAsVIjRk/VhmhFc1v6sBeR40GXlthyEeOQQnIeHKLhsVT6YnfFZa8b4JwgTD6NeI
+ iibOAlLgaKOWNwZu8toixMPVAzfQcRei+/gFXNil0FmBwWreVBDppuIn6XiSEPik
+ 0C7eCcw4lD+A+BbL3WGkp+OSQPhohodIU02hgkrgs/6YJPats8Rgzw9hICsa2j0M
+ jnG6P2z9atMz6tw2SiE5iBl7mZ2ZzG/HiplleMhf/G8OZOskrWkKiLbpSPfQSKdO
+ Fkw1C6yqOlQ+HmuCZ56oyxtpItETR11uAKt+ABdi4DX3FQQ+A+bGJ1+aKrcorZ8Z
+ 8s0XhPo=
+ =mUYY
+ -----END PGP PUBLIC KEY BLOCK-----
diff --git a/test/integration/targets/group/aliases b/test/integration/targets/group/aliases
new file mode 100644
index 00000000..f8e28c7e
--- /dev/null
+++ b/test/integration/targets/group/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group1
+skip/aix
diff --git a/test/integration/targets/group/files/gidget.py b/test/integration/targets/group/files/gidget.py
new file mode 100644
index 00000000..128985e7
--- /dev/null
+++ b/test/integration/targets/group/files/gidget.py
@@ -0,0 +1,12 @@
+#!/usr/bin/env python
+
+import grp
+
+gids = [g.gr_gid for g in grp.getgrall()]
+
+i = 0
+while True:
+ if i not in gids:
+ print(i)
+ break
+ i += 1
diff --git a/test/integration/targets/group/files/grouplist.sh b/test/integration/targets/group/files/grouplist.sh
new file mode 100644
index 00000000..d3129dfe
--- /dev/null
+++ b/test/integration/targets/group/files/grouplist.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+
+#- name: make a list of groups
+# shell: |
+# cat /etc/group | cut -d: -f1
+# register: group_names
+# when: 'ansible_distribution != "MacOSX"'
+
+#- name: make a list of groups [mac]
+# shell: dscl localhost -list /Local/Default/Groups
+# register: group_names
+# when: 'ansible_distribution == "MacOSX"'
+
+DISTRO="$*"
+
+if [[ "$DISTRO" == "MacOSX" ]]; then
+ dscl localhost -list /Local/Default/Groups
+else
+ grep -E -v ^\# /etc/group | cut -d: -f1
+fi
diff --git a/test/integration/targets/group/meta/main.yml b/test/integration/targets/group/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/test/integration/targets/group/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/test/integration/targets/group/tasks/main.yml b/test/integration/targets/group/tasks/main.yml
new file mode 100644
index 00000000..eb8126dd
--- /dev/null
+++ b/test/integration/targets/group/tasks/main.yml
@@ -0,0 +1,40 @@
+# Test code for the group module.
+# (c) 2017, James Tanner <tanner.jc@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: ensure test groups are deleted before the test
+ group:
+ name: '{{ item }}'
+ state: absent
+ loop:
+ - ansibullgroup
+ - ansibullgroup2
+ - ansibullgroup3
+
+- block:
+ - name: run tests
+ include_tasks: tests.yml
+
+ always:
+ - name: remove test groups after test
+ group:
+ name: '{{ item }}'
+ state: absent
+ loop:
+ - ansibullgroup
+ - ansibullgroup2
+ - ansibullgroup3 \ No newline at end of file
diff --git a/test/integration/targets/group/tasks/tests.yml b/test/integration/targets/group/tasks/tests.yml
new file mode 100644
index 00000000..e35b23c9
--- /dev/null
+++ b/test/integration/targets/group/tasks/tests.yml
@@ -0,0 +1,329 @@
+---
+##
+## group add
+##
+
+- name: create group (check mode)
+ group:
+ name: ansibullgroup
+ state: present
+ register: create_group_check
+ check_mode: True
+
+- name: get result of create group (check mode)
+ script: 'grouplist.sh "{{ ansible_distribution }}"'
+ register: create_group_actual_check
+
+- name: assert create group (check mode)
+ assert:
+ that:
+ - create_group_check is changed
+ - '"ansibullgroup" not in create_group_actual_check.stdout_lines'
+
+- name: create group
+ group:
+ name: ansibullgroup
+ state: present
+ register: create_group
+
+- name: get result of create group
+ script: 'grouplist.sh "{{ ansible_distribution }}"'
+ register: create_group_actual
+
+- name: assert create group
+ assert:
+ that:
+ - create_group is changed
+ - create_group.gid is defined
+ - '"ansibullgroup" in create_group_actual.stdout_lines'
+
+- name: create group (idempotent)
+ group:
+ name: ansibullgroup
+ state: present
+ register: create_group_again
+
+- name: assert create group (idempotent)
+ assert:
+ that:
+ - not create_group_again is changed
+
+##
+## group check
+##
+
+- name: run existing group check tests
+ group:
+ name: "{{ create_group_actual.stdout_lines|random }}"
+ state: present
+ with_sequence: start=1 end=5
+ register: group_test1
+
+- name: validate results for testcase 1
+ assert:
+ that:
+ - group_test1.results is defined
+ - group_test1.results|length == 5
+
+- name: validate change results for testcase 1
+ assert:
+ that:
+ - not group_test1 is changed
+
+##
+## group add with gid
+##
+
+- name: get the next available gid
+ script: gidget.py
+ args:
+ executable: '{{ ansible_python_interpreter }}'
+ register: gid
+
+- name: create a group with a gid (check mode)
+ group:
+ name: ansibullgroup2
+ gid: '{{ gid.stdout_lines[0] }}'
+ state: present
+ register: create_group_gid_check
+ check_mode: True
+
+- name: get result of create a group with a gid (check mode)
+ script: 'grouplist.sh "{{ ansible_distribution }}"'
+ register: create_group_gid_actual_check
+
+- name: assert create group with a gid (check mode)
+ assert:
+ that:
+ - create_group_gid_check is changed
+ - '"ansibullgroup2" not in create_group_gid_actual_check.stdout_lines'
+
+- name: create a group with a gid
+ group:
+ name: ansibullgroup2
+ gid: '{{ gid.stdout_lines[0] }}'
+ state: present
+ register: create_group_gid
+
+- name: get gid of created group
+ command: "{{ ansible_python_interpreter | quote }} -c \"import grp; print(grp.getgrnam('ansibullgroup2').gr_gid)\""
+ register: create_group_gid_actual
+
+- name: assert create group with a gid
+ assert:
+ that:
+ - create_group_gid is changed
+ - create_group_gid.gid | int == gid.stdout_lines[0] | int
+ - create_group_gid_actual.stdout | trim | int == gid.stdout_lines[0] | int
+
+- name: create a group with a gid (idempotent)
+ group:
+ name: ansibullgroup2
+ gid: '{{ gid.stdout_lines[0] }}'
+ state: present
+ register: create_group_gid_again
+
+- name: assert create group with a gid (idempotent)
+ assert:
+ that:
+ - not create_group_gid_again is changed
+ - create_group_gid_again.gid | int == gid.stdout_lines[0] | int
+
+- block:
+ - name: create a group with a non-unique gid
+ group:
+ name: ansibullgroup3
+ gid: '{{ gid.stdout_lines[0] }}'
+ non_unique: true
+ state: present
+ register: create_group_gid_non_unique
+
+ - name: assert create group with a non unique gid
+ assert:
+ that:
+ - create_group_gid_non_unique is changed
+ - create_group_gid_non_unique.gid | int == gid.stdout_lines[0] | int
+ when: ansible_facts.distribution not in ['MacOSX', 'Alpine']
+
+##
+## group remove
+##
+
+- name: delete group (check mode)
+ group:
+ name: ansibullgroup
+ state: absent
+ register: delete_group_check
+ check_mode: True
+
+- name: get result of delete group (check mode)
+ script: grouplist.sh "{{ ansible_distribution }}"
+ register: delete_group_actual_check
+
+- name: assert delete group (check mode)
+ assert:
+ that:
+ - delete_group_check is changed
+ - '"ansibullgroup" in delete_group_actual_check.stdout_lines'
+
+- name: delete group
+ group:
+ name: ansibullgroup
+ state: absent
+ register: delete_group
+
+- name: get result of delete group
+ script: grouplist.sh "{{ ansible_distribution }}"
+ register: delete_group_actual
+
+- name: assert delete group
+ assert:
+ that:
+ - delete_group is changed
+ - '"ansibullgroup" not in delete_group_actual.stdout_lines'
+
+- name: delete group (idempotent)
+ group:
+ name: ansibullgroup
+ state: absent
+ register: delete_group_again
+
+- name: assert delete group (idempotent)
+ assert:
+ that:
+ - not delete_group_again is changed
+
+- name: Ensure lgroupadd is present
+ action: "{{ ansible_facts.pkg_mgr }}"
+ args:
+ name: libuser
+ state: present
+ when: ansible_facts.system in ['Linux']
+ tags:
+ - user_test_local_mode
+
+# https://github.com/ansible/ansible/issues/56481
+- block:
+ - name: Test duplicate GID with local=yes
+ group:
+ name: "{{ item }}"
+ gid: 1337
+ local: yes
+ loop:
+ - group1_local_test
+ - group2_local_test
+ ignore_errors: yes
+ register: local_duplicate_gid_result
+
+ - assert:
+ that:
+ - local_duplicate_gid_result['results'][0] is success
+ - local_duplicate_gid_result['results'][1]['msg'] == "GID '1337' already exists with group 'group1_local_test'"
+ always:
+ - name: Cleanup
+ group:
+ name: group1_local_test
+ state: absent
+ # only applicable to Linux, limit further to CentOS where 'luseradd' is installed
+ when: ansible_distribution == 'CentOS'
+
+# https://github.com/ansible/ansible/pull/59769
+- block:
+ - name: create a local group with a gid
+ group:
+ name: group1_local_test
+ gid: 1337
+ local: yes
+ state: present
+ register: create_local_group_gid
+
+ - name: get gid of created local group
+ command: "{{ ansible_python_interpreter | quote }} -c \"import grp; print(grp.getgrnam('group1_local_test').gr_gid)\""
+ register: create_local_group_gid_actual
+
+ - name: assert create local group with a gid
+ assert:
+ that:
+ - create_local_group_gid is changed
+ - create_local_group_gid.gid | int == 1337 | int
+ - create_local_group_gid_actual.stdout | trim | int == 1337 | int
+
+ - name: create a local group with a gid (idempotent)
+ group:
+ name: group1_local_test
+ gid: 1337
+ state: present
+ register: create_local_group_gid_again
+
+ - name: assert create local group with a gid (idempotent)
+ assert:
+ that:
+ - not create_local_group_gid_again is changed
+ - create_local_group_gid_again.gid | int == 1337 | int
+ always:
+ - name: Cleanup create local group with a gid
+ group:
+ name: group1_local_test
+ state: absent
+ # only applicable to Linux, limit further to CentOS where 'luseradd' is installed
+ when: ansible_distribution == 'CentOS'
+
+# https://github.com/ansible/ansible/pull/59772
+- block:
+ - name: create group with a gid
+ group:
+ name: group1_test
+ gid: 1337
+ local: no
+ state: present
+ register: create_group_gid
+
+ - name: get gid of created group
+ command: "{{ ansible_python_interpreter | quote }} -c \"import grp; print(grp.getgrnam('group1_test').gr_gid)\""
+ register: create_group_gid_actual
+
+ - name: assert create group with a gid
+ assert:
+ that:
+ - create_group_gid is changed
+ - create_group_gid.gid | int == 1337 | int
+ - create_group_gid_actual.stdout | trim | int == 1337 | int
+
+ - name: create local group with the same gid
+ group:
+ name: group1_test
+ gid: 1337
+ local: yes
+ state: present
+ register: create_local_group_gid
+
+ - name: assert create local group with a gid
+ assert:
+ that:
+ - create_local_group_gid.gid | int == 1337 | int
+ always:
+ - name: Cleanup create group with a gid
+ group:
+ name: group1_test
+ local: no
+ state: absent
+ - name: Cleanup create local group with the same gid
+ group:
+ name: group1_test
+ local: yes
+ state: absent
+ # only applicable to Linux, limit further to CentOS where 'lgroupadd' is installed
+ when: ansible_distribution == 'CentOS'
+
+# create system group
+
+- name: remove group
+ group:
+ name: ansibullgroup
+ state: absent
+
+- name: create system group
+ group:
+ name: ansibullgroup
+ state: present
+ system: yes
diff --git a/test/integration/targets/group_by/aliases b/test/integration/targets/group_by/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/test/integration/targets/group_by/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/test/integration/targets/group_by/create_groups.yml b/test/integration/targets/group_by/create_groups.yml
new file mode 100644
index 00000000..3494a20f
--- /dev/null
+++ b/test/integration/targets/group_by/create_groups.yml
@@ -0,0 +1,39 @@
+# test code for the group_by module
+# (c) 2014, Chris Church <cchurch@ansible.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- debug: var=genus
+
+- name: group by genus
+ group_by: key={{ genus }}
+ register: grouped_by_genus
+
+- debug: var=grouped_by_genus
+
+- name: ensure we reflect 'changed' on change
+ assert:
+ that:
+ - grouped_by_genus is changed
+
+- name: group by first three letters of genus with key in quotes
+ group_by: key="{{ genus[:3] }}"
+
+- name: group by first two letters of genus with key not in quotes
+ group_by: key={{ genus[:2] }}
+
+- name: group by genus in uppercase using complex args
+ group_by: { key: "{{ genus | upper() }}" }
diff --git a/test/integration/targets/group_by/group_vars/all b/test/integration/targets/group_by/group_vars/all
new file mode 100644
index 00000000..0b674e00
--- /dev/null
+++ b/test/integration/targets/group_by/group_vars/all
@@ -0,0 +1,3 @@
+uno: 1
+dos: 2
+tres: 3
diff --git a/test/integration/targets/group_by/group_vars/camelus b/test/integration/targets/group_by/group_vars/camelus
new file mode 100644
index 00000000..b214ad69
--- /dev/null
+++ b/test/integration/targets/group_by/group_vars/camelus
@@ -0,0 +1 @@
+dos: 'two'
diff --git a/test/integration/targets/group_by/group_vars/vicugna b/test/integration/targets/group_by/group_vars/vicugna
new file mode 100644
index 00000000..8feb93fc
--- /dev/null
+++ b/test/integration/targets/group_by/group_vars/vicugna
@@ -0,0 +1 @@
+tres: 'three'
diff --git a/test/integration/targets/group_by/inventory.group_by b/test/integration/targets/group_by/inventory.group_by
new file mode 100644
index 00000000..9c7fe7ee
--- /dev/null
+++ b/test/integration/targets/group_by/inventory.group_by
@@ -0,0 +1,9 @@
+# ungrouped
+camel genus=camelus ansible_connection=local
+
+[lamini]
+alpaca genus=vicugna
+llama genus=lama
+
+[lamini:vars]
+ansible_connection=local
diff --git a/test/integration/targets/group_by/runme.sh b/test/integration/targets/group_by/runme.sh
new file mode 100755
index 00000000..d1192681
--- /dev/null
+++ b/test/integration/targets/group_by/runme.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ansible-playbook test_group_by.yml -i inventory.group_by -v "$@"
+ANSIBLE_HOST_PATTERN_MISMATCH=warning ansible-playbook test_group_by_skipped.yml -i inventory.group_by -v "$@"
diff --git a/test/integration/targets/group_by/test_group_by.yml b/test/integration/targets/group_by/test_group_by.yml
new file mode 100644
index 00000000..07368dfe
--- /dev/null
+++ b/test/integration/targets/group_by/test_group_by.yml
@@ -0,0 +1,187 @@
+# test code for the group_by module
+# (c) 2014, Chris Church <cchurch@ansible.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: Create overall groups
+ hosts: all
+ gather_facts: false
+ tasks:
+ - include_tasks: create_groups.yml
+
+- name: Vicunga group validation
+ hosts: vicugna
+ gather_facts: false
+ tasks:
+ - name: verify that only the alpaca is in this group
+ assert: { that: "inventory_hostname == 'alpaca'" }
+ - name: set a fact to check that we ran this play
+ set_fact: genus_vicugna=true
+
+- name: Lama group validation
+ hosts: lama
+ gather_facts: false
+ tasks:
+ - name: verify that only the llama is in this group
+ assert: { that: "inventory_hostname == 'llama'" }
+ - name: set a fact to check that we ran this play
+ set_fact: genus_lama=true
+
+- name: Camelus group validation
+ hosts: camelus
+ gather_facts: false
+ tasks:
+ - name: verify that only the camel is in this group
+ assert: { that: "inventory_hostname == 'camel'" }
+ - name: set a fact to check that we ran this play
+ set_fact: genus_camelus=true
+
+- name: Vic group validation
+ hosts: vic
+ gather_facts: false
+ tasks:
+ - name: verify that only the alpaca is in this group
+ assert: { that: "inventory_hostname == 'alpaca'" }
+ - name: set a fact to check that we ran this play
+ set_fact: genus_vic=true
+
+- name: Lam group validation
+ hosts: lam
+ gather_facts: false
+ tasks:
+ - name: verify that only the llama is in this group
+ assert: { that: "inventory_hostname == 'llama'" }
+ - name: set a fact to check that we ran this play
+ set_fact: genus_lam=true
+
+- name: Cam group validation
+ hosts: cam
+ gather_facts: false
+ tasks:
+ - name: verify that only the camel is in this group
+ assert: { that: "inventory_hostname == 'camel'" }
+ - name: set a fact to check that we ran this play
+ set_fact: genus_cam=true
+
+- name: Vi group validation
+ hosts: vi
+ gather_facts: false
+ tasks:
+ - name: verify that only the alpaca is in this group
+ assert: { that: "inventory_hostname == 'alpaca'" }
+ - name: set a fact to check that we ran this play
+ set_fact: genus_vi=true
+
+- name: La group validation
+ hosts: la
+ gather_facts: false
+ tasks:
+ - name: verify that only the llama is in this group
+ assert: { that: "inventory_hostname == 'llama'" }
+ - name: set a fact to check that we ran this play
+ set_fact: genus_la=true
+
+- name: Ca group validation
+ hosts: ca
+ gather_facts: false
+ tasks:
+ - name: verify that only the camel is in this group
+ assert: { that: "inventory_hostname == 'camel'" }
+ - name: set a fact to check that we ran this play
+ set_fact: genus_ca=true
+
+- name: VICUGNA group validation
+ hosts: VICUGNA
+ gather_facts: false
+ tasks:
+ - name: verify that only the alpaca is in this group
+ assert: { that: "inventory_hostname == 'alpaca'" }
+ - name: set a fact to check that we ran this play
+ set_fact: genus_VICUGNA=true
+
+- name: LAMA group validation
+ hosts: LAMA
+ gather_facts: false
+ tasks:
+ - name: verify that only the llama is in this group
+ assert: { that: "inventory_hostname == 'llama'" }
+ - name: set a fact to check that we ran this play
+ set_fact: genus_LAMA=true
+
+- name: CAMELUS group validation
+ hosts: CAMELUS
+ gather_facts: false
+ tasks:
+ - name: verify that only the camel is in this group
+ assert: { that: "inventory_hostname == 'camel'" }
+ - name: set a fact to check that we ran this play
+ set_fact: genus_CAMELUS=true
+
+- name: alpaca validation of groups
+ hosts: alpaca
+ gather_facts: false
+ tasks:
+ - name: check that alpaca matched all four groups
+ assert: { that: ["genus_vicugna", "genus_vic", "genus_vi", "genus_VICUGNA"] }
+
+- name: llama validation of groups
+ hosts: llama
+ gather_facts: false
+ tasks:
+ - name: check that llama matched all four groups
+ assert: { that: ["genus_lama", "genus_lam", "genus_la", "genus_LAMA"] }
+
+- hosts: camel
+ gather_facts: false
+ tasks:
+ - name: check that camel matched all four groups
+ assert: { that: ["genus_camelus", "genus_cam", "genus_ca", "genus_CAMELUS"] }
+
+- hosts: vicugna
+ gather_facts: false
+ tasks:
+ - name: check group_vars variable overrides for vicugna
+ assert: { that: ["uno == 1", "dos == 2", "tres == 'three'"] }
+
+- hosts: lama
+ gather_facts: false
+ tasks:
+ - name: check group_vars variable overrides for lama
+ assert: { that: ["uno == 1", "dos == 2", "tres == 3"] }
+
+- hosts: camelus
+ gather_facts: false
+ tasks:
+ - name: check group_vars variable overrides for camelus
+ assert: { that: ["uno == 1", "dos == 'two'", "tres == 3"] }
+
+- name: Nested group validation
+ hosts: lama
+ gather_facts: false
+ tasks:
+ - name: group by genus with parent
+ group_by: key=vicugna-{{ genus }} parents=vicugna
+ - name: check group_vars variable overrides for vicugna-lama
+ assert: { that: ["uno == 1", "dos == 2", "tres == 'three'"] }
+
+ - name: group by genus with nonexistent parent
+ group_by:
+ key: "{{ genus }}"
+ parents:
+ - oxydactylus
+ - stenomylus
+ - name: check parent groups
+ assert: { that: ["'oxydactylus' in group_names", "'stenomylus' in group_names"] }
diff --git a/test/integration/targets/group_by/test_group_by_skipped.yml b/test/integration/targets/group_by/test_group_by_skipped.yml
new file mode 100644
index 00000000..6c18b4e8
--- /dev/null
+++ b/test/integration/targets/group_by/test_group_by_skipped.yml
@@ -0,0 +1,30 @@
+# test code for the group_by module
+# (c) 2014, Chris Church <cchurch@ansible.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: Create overall groups
+ hosts: all
+ gather_facts: false
+ tasks:
+ - include_tasks: create_groups.yml
+
+- name: genus group validation (expect skipped)
+ hosts: 'genus'
+ gather_facts: false
+ tasks:
+ - name: no hosts should match this group
+ fail: msg="should never get here"
diff --git a/test/integration/targets/groupby_filter/aliases b/test/integration/targets/groupby_filter/aliases
new file mode 100644
index 00000000..765b70da
--- /dev/null
+++ b/test/integration/targets/groupby_filter/aliases
@@ -0,0 +1 @@
+shippable/posix/group2
diff --git a/test/integration/targets/groupby_filter/runme.sh b/test/integration/targets/groupby_filter/runme.sh
new file mode 100755
index 00000000..e5099aa1
--- /dev/null
+++ b/test/integration/targets/groupby_filter/runme.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+
+set -eux
+
+source virtualenv.sh
+
+pip install -U jinja2==2.9.4
+
+ansible-playbook -i ../../inventory test_jinja2_groupby.yml -v "$@"
+
+pip install -U "jinja2<2.9.0"
+
+ansible-playbook -i ../../inventory test_jinja2_groupby.yml -v "$@"
diff --git a/test/integration/targets/groupby_filter/test_jinja2_groupby.yml b/test/integration/targets/groupby_filter/test_jinja2_groupby.yml
new file mode 100644
index 00000000..3cd02959
--- /dev/null
+++ b/test/integration/targets/groupby_filter/test_jinja2_groupby.yml
@@ -0,0 +1,29 @@
+---
+- name: Test jinja2 groupby
+ hosts: localhost
+ gather_facts: True
+ connection: local
+ vars:
+ fruits:
+ - name: apple
+ enjoy: yes
+ - name: orange
+ enjoy: no
+ - name: strawberry
+ enjoy: yes
+ expected: [[false, [{"enjoy": false, "name": "orange"}]], [true, [{"enjoy": true, "name": "apple"}, {"enjoy": true, "name": "strawberry"}]]]
+ tasks:
+ - name: show python interpreter
+ debug:
+ msg: "{{ ansible_python['executable'] }}"
+
+ - name: show jinja2 version
+ debug:
+ msg: "{{ lookup('pipe', '{{ ansible_python[\"executable\"] }} -c \"import jinja2; print(jinja2.__version__)\"') }}"
+
+ - set_fact:
+ result: "{{ fruits | groupby('enjoy') }}"
+
+ - assert:
+ that:
+ - result == expected
diff --git a/test/integration/targets/handler_race/aliases b/test/integration/targets/handler_race/aliases
new file mode 100644
index 00000000..68d6d978
--- /dev/null
+++ b/test/integration/targets/handler_race/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group5
+handler_race
+skip/aix
diff --git a/test/integration/targets/handler_race/inventory b/test/integration/targets/handler_race/inventory
new file mode 100644
index 00000000..87879294
--- /dev/null
+++ b/test/integration/targets/handler_race/inventory
@@ -0,0 +1,30 @@
+host001 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host002 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host003 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host004 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host005 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host006 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host007 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host008 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host009 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host010 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host011 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host012 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host013 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host014 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host015 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host016 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host017 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host018 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host019 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host020 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host021 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host022 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host023 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host024 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host025 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host026 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host027 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host028 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host029 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host030 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/test/integration/targets/handler_race/roles/do_handlers/handlers/main.yml b/test/integration/targets/handler_race/roles/do_handlers/handlers/main.yml
new file mode 100644
index 00000000..4c43df8c
--- /dev/null
+++ b/test/integration/targets/handler_race/roles/do_handlers/handlers/main.yml
@@ -0,0 +1,4 @@
+---
+# handlers file for do_handlers
+- name: My Handler
+ shell: sleep 5
diff --git a/test/integration/targets/handler_race/roles/do_handlers/tasks/main.yml b/test/integration/targets/handler_race/roles/do_handlers/tasks/main.yml
new file mode 100644
index 00000000..028e9a55
--- /dev/null
+++ b/test/integration/targets/handler_race/roles/do_handlers/tasks/main.yml
@@ -0,0 +1,9 @@
+---
+# tasks file for do_handlers
+- name: Invoke handler
+ shell: sleep 1
+ notify:
+ - My Handler
+
+- name: Flush handlers
+ meta: flush_handlers
diff --git a/test/integration/targets/handler_race/roles/more_sleep/tasks/main.yml b/test/integration/targets/handler_race/roles/more_sleep/tasks/main.yml
new file mode 100644
index 00000000..aefbce26
--- /dev/null
+++ b/test/integration/targets/handler_race/roles/more_sleep/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+# tasks file for more_sleep
+- name: Random more sleep
+ set_fact:
+ more_sleep_time: "{{ 5 | random }}"
+
+- name: Moar sleep
+ shell: sleep "{{ more_sleep_time }}"
diff --git a/test/integration/targets/handler_race/roles/random_sleep/tasks/main.yml b/test/integration/targets/handler_race/roles/random_sleep/tasks/main.yml
new file mode 100644
index 00000000..607318bb
--- /dev/null
+++ b/test/integration/targets/handler_race/roles/random_sleep/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+# tasks file for random_sleep
+- name: Generate sleep time
+ set_fact:
+ sleep_time: "{{ 60 | random }}"
+
+- name: Do random sleep
+ shell: sleep "{{ sleep_time }}"
diff --git a/test/integration/targets/handler_race/runme.sh b/test/integration/targets/handler_race/runme.sh
new file mode 100755
index 00000000..ba0f9873
--- /dev/null
+++ b/test/integration/targets/handler_race/runme.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ansible-playbook test_handler_race.yml -i inventory -v "$@"
+
diff --git a/test/integration/targets/handler_race/test_handler_race.yml b/test/integration/targets/handler_race/test_handler_race.yml
new file mode 100644
index 00000000..ef713829
--- /dev/null
+++ b/test/integration/targets/handler_race/test_handler_race.yml
@@ -0,0 +1,10 @@
+- hosts: all
+ gather_facts: no
+ strategy: free
+ tasks:
+ - include_role:
+ name: random_sleep
+ - include_role:
+ name: do_handlers
+ - include_role:
+ name: more_sleep
diff --git a/test/integration/targets/handlers/aliases b/test/integration/targets/handlers/aliases
new file mode 100644
index 00000000..30bb677a
--- /dev/null
+++ b/test/integration/targets/handlers/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group5
+handlers
+skip/aix
diff --git a/test/integration/targets/handlers/from_handlers.yml b/test/integration/targets/handlers/from_handlers.yml
new file mode 100644
index 00000000..7b2dea2d
--- /dev/null
+++ b/test/integration/targets/handlers/from_handlers.yml
@@ -0,0 +1,39 @@
+- name: verify handlers_from on include_role
+ hosts: A
+ gather_facts: False
+ tags: ['scenario1']
+ tasks:
+ - name: test include_role
+ include_role: name=test_handlers_meta handlers_from=alternate.yml
+
+ - name: force handler run
+ meta: flush_handlers
+
+ - name: verify handlers ran
+ assert:
+ that:
+ - "'handler1_alt_called' in hostvars[inventory_hostname]"
+ - "'handler2_alt_called' in hostvars[inventory_hostname]"
+ tags: ['scenario1']
+
+
+- name: verify handlers_from on import_role
+ hosts: A
+ gather_facts: False
+ tasks:
+ - name: set facts to false
+ set_fact:
+ handler1_alt_called: False
+ handler2_alt_called: False
+
+ - import_role: name=test_handlers_meta handlers_from=alternate.yml
+
+ - name: force handler run
+ meta: flush_handlers
+
+ - name: verify handlers ran
+ assert:
+ that:
+ - handler1_alt_called|bool
+ - handler2_alt_called|bool
+ tags: ['scenario1']
diff --git a/test/integration/targets/handlers/handlers.yml b/test/integration/targets/handlers/handlers.yml
new file mode 100644
index 00000000..aed75bd2
--- /dev/null
+++ b/test/integration/targets/handlers/handlers.yml
@@ -0,0 +1,2 @@
+- name: test handler
+ debug: msg="handler called"
diff --git a/test/integration/targets/handlers/inventory.handlers b/test/integration/targets/handlers/inventory.handlers
new file mode 100644
index 00000000..268cf657
--- /dev/null
+++ b/test/integration/targets/handlers/inventory.handlers
@@ -0,0 +1,10 @@
+[testgroup]
+A
+B
+C
+D
+E
+
+[testgroup:vars]
+ansible_connection=local
+ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/test/integration/targets/handlers/roles/test_force_handlers/handlers/main.yml b/test/integration/targets/handlers/roles/test_force_handlers/handlers/main.yml
new file mode 100644
index 00000000..962d7561
--- /dev/null
+++ b/test/integration/targets/handlers/roles/test_force_handlers/handlers/main.yml
@@ -0,0 +1,2 @@
+- name: echoing handler
+ command: echo CALLED_HANDLER_{{ inventory_hostname }}
diff --git a/test/integration/targets/handlers/roles/test_force_handlers/tasks/main.yml b/test/integration/targets/handlers/roles/test_force_handlers/tasks/main.yml
new file mode 100644
index 00000000..f5d78c73
--- /dev/null
+++ b/test/integration/targets/handlers/roles/test_force_handlers/tasks/main.yml
@@ -0,0 +1,26 @@
+---
+
+# We notify for A and B, and hosts B and C fail.
+# When forcing, we expect A and B to run handlers
+# When not forcing, we expect only B to run handlers
+
+- name: notify the handler for host A and B
+ shell: echo
+ notify:
+ - echoing handler
+ when: inventory_hostname == 'A' or inventory_hostname == 'B'
+
+- name: EXPECTED FAILURE fail task for all
+ fail: msg="Fail All"
+ when: fail_all is defined and fail_all
+
+- name: EXPECTED FAILURE fail task for A
+ fail: msg="Fail A"
+ when: inventory_hostname == 'A'
+
+- name: EXPECTED FAILURE fail task for C
+ fail: msg="Fail C"
+ when: inventory_hostname == 'C'
+
+- name: echo after A and C have failed
+ command: echo CALLED_TASK_{{ inventory_hostname }}
diff --git a/test/integration/targets/handlers/roles/test_handlers/handlers/main.yml b/test/integration/targets/handlers/roles/test_handlers/handlers/main.yml
new file mode 100644
index 00000000..0261f935
--- /dev/null
+++ b/test/integration/targets/handlers/roles/test_handlers/handlers/main.yml
@@ -0,0 +1,5 @@
+- name: set handler fact
+ set_fact:
+ handler_called: True
+- name: test handler
+ debug: msg="handler called"
diff --git a/test/integration/targets/handlers/roles/test_handlers/meta/main.yml b/test/integration/targets/handlers/roles/test_handlers/meta/main.yml
new file mode 100644
index 00000000..32cf5dda
--- /dev/null
+++ b/test/integration/targets/handlers/roles/test_handlers/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/test/integration/targets/handlers/roles/test_handlers/tasks/main.yml b/test/integration/targets/handlers/roles/test_handlers/tasks/main.yml
new file mode 100644
index 00000000..a857dacf
--- /dev/null
+++ b/test/integration/targets/handlers/roles/test_handlers/tasks/main.yml
@@ -0,0 +1,52 @@
+# test code for the async keyword
+# (c) 2014, James Tanner <tanner.jc@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+- name: reset handler_called variable to false for all hosts
+ set_fact:
+ handler_called: False
+ tags: scenario1
+
+- name: notify the handler for host A only
+ shell: echo
+ notify:
+ - set handler fact
+ when: inventory_hostname == 'A'
+ tags: scenario1
+
+- name: force handler execution now
+ meta: "flush_handlers"
+ tags: scenario1
+
+- debug: var=handler_called
+ tags: scenario1
+
+- name: validate the handler only ran on one host
+ assert:
+ that:
+ - "inventory_hostname == 'A' and handler_called == True or handler_called == False"
+ tags: scenario1
+
+- name: 'test notify with loop'
+ debug: msg='a task'
+ changed_when: item == 1
+ notify: test handler
+ with_items:
+ - 1
+ - 2
+ tags: scenario2
diff --git a/test/integration/targets/handlers/roles/test_handlers_include/handlers/main.yml b/test/integration/targets/handlers/roles/test_handlers_include/handlers/main.yml
new file mode 100644
index 00000000..abe01be4
--- /dev/null
+++ b/test/integration/targets/handlers/roles/test_handlers_include/handlers/main.yml
@@ -0,0 +1 @@
+- include: handlers.yml
diff --git a/test/integration/targets/handlers/roles/test_handlers_include/tasks/main.yml b/test/integration/targets/handlers/roles/test_handlers_include/tasks/main.yml
new file mode 100644
index 00000000..84f0a583
--- /dev/null
+++ b/test/integration/targets/handlers/roles/test_handlers_include/tasks/main.yml
@@ -0,0 +1,4 @@
+- name: 'main task'
+ debug: msg='main task'
+ changed_when: True
+ notify: test handler
diff --git a/test/integration/targets/handlers/roles/test_handlers_include_role/handlers/main.yml b/test/integration/targets/handlers/roles/test_handlers_include_role/handlers/main.yml
new file mode 100644
index 00000000..0261f935
--- /dev/null
+++ b/test/integration/targets/handlers/roles/test_handlers_include_role/handlers/main.yml
@@ -0,0 +1,5 @@
+- name: set handler fact
+ set_fact:
+ handler_called: True
+- name: test handler
+ debug: msg="handler called"
diff --git a/test/integration/targets/handlers/roles/test_handlers_include_role/meta/main.yml b/test/integration/targets/handlers/roles/test_handlers_include_role/meta/main.yml
new file mode 100644
index 00000000..32cf5dda
--- /dev/null
+++ b/test/integration/targets/handlers/roles/test_handlers_include_role/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/test/integration/targets/handlers/roles/test_handlers_include_role/tasks/main.yml b/test/integration/targets/handlers/roles/test_handlers_include_role/tasks/main.yml
new file mode 100644
index 00000000..fbc3d1c5
--- /dev/null
+++ b/test/integration/targets/handlers/roles/test_handlers_include_role/tasks/main.yml
@@ -0,0 +1,47 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+- name: reset handler_called variable to false for all hosts
+ set_fact:
+ handler_called: False
+ tags: scenario1
+
+- name: notify the handler for host A only
+ shell: echo
+ notify:
+ - set handler fact
+ when: inventory_hostname == 'A'
+ tags: scenario1
+
+- name: force handler execution now
+ meta: "flush_handlers"
+ tags: scenario1
+
+- debug: var=handler_called
+ tags: scenario1
+
+- name: validate the handler only ran on one host
+ assert:
+ that:
+ - "inventory_hostname == 'A' and handler_called == True or handler_called == False"
+ tags: scenario1
+
+# item below is passed in by the playbook that calls this
+- name: 'test notify with loop'
+ debug: msg='a task'
+ changed_when: item == 1
+ notify: test handler
+ tags: scenario2
diff --git a/test/integration/targets/handlers/roles/test_handlers_listen/handlers/main.yml b/test/integration/targets/handlers/roles/test_handlers_listen/handlers/main.yml
new file mode 100644
index 00000000..3bfd82a2
--- /dev/null
+++ b/test/integration/targets/handlers/roles/test_handlers_listen/handlers/main.yml
@@ -0,0 +1,10 @@
+---
+- name: notify_listen_ran_4_3
+ set_fact:
+ notify_listen_ran_4_3: True
+ listen: notify_listen
+
+- name: notify_listen_in_role_4
+ set_fact:
+ notify_listen_in_role_4: True
+ listen: notify_listen_in_role
diff --git a/test/integration/targets/handlers/roles/test_handlers_listen/tasks/main.yml b/test/integration/targets/handlers/roles/test_handlers_listen/tasks/main.yml
new file mode 100644
index 00000000..bac9b71e
--- /dev/null
+++ b/test/integration/targets/handlers/roles/test_handlers_listen/tasks/main.yml
@@ -0,0 +1,6 @@
+---
+- name: notify some handlers from a role
+ command: uptime
+ notify:
+ - notify_listen_from_role
+ - notify_listen_in_role
diff --git a/test/integration/targets/handlers/roles/test_handlers_meta/handlers/alternate.yml b/test/integration/targets/handlers/roles/test_handlers_meta/handlers/alternate.yml
new file mode 100644
index 00000000..9268ce51
--- /dev/null
+++ b/test/integration/targets/handlers/roles/test_handlers_meta/handlers/alternate.yml
@@ -0,0 +1,12 @@
+- name: set_handler_fact_1
+ set_fact:
+ handler1_called: True
+ handler1_alt_called: True
+
+- name: set_handler_fact_2
+ set_fact:
+ handler2_called: True
+ handler2_alt_called: True
+
+- name: count_handler
+ shell: echo . >> {{ handler_countpath }}
diff --git a/test/integration/targets/handlers/roles/test_handlers_meta/handlers/main.yml b/test/integration/targets/handlers/roles/test_handlers_meta/handlers/main.yml
new file mode 100644
index 00000000..0dd408b7
--- /dev/null
+++ b/test/integration/targets/handlers/roles/test_handlers_meta/handlers/main.yml
@@ -0,0 +1,10 @@
+- name: set_handler_fact_1
+ set_fact:
+ handler1_called: True
+
+- name: set_handler_fact_2
+ set_fact:
+ handler2_called: True
+
+- name: count_handler
+ shell: echo . >> {{ handler_countpath }}
diff --git a/test/integration/targets/handlers/roles/test_handlers_meta/tasks/main.yml b/test/integration/targets/handlers/roles/test_handlers_meta/tasks/main.yml
new file mode 100644
index 00000000..d9f5c574
--- /dev/null
+++ b/test/integration/targets/handlers/roles/test_handlers_meta/tasks/main.yml
@@ -0,0 +1,75 @@
+# test code for the async keyword
+# (c) 2014, James Tanner <tanner.jc@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: notify the first handler
+ shell: echo
+ notify:
+ - set_handler_fact_1
+
+- name: force handler execution now
+ meta: "flush_handlers"
+
+- name: assert handler1 ran and not handler2
+ assert:
+ that:
+ - "handler1_called is defined"
+ - "handler2_called is not defined"
+
+- name: make a tempfile for counting
+ shell: mktemp
+ register: mktemp_out
+
+- name: register tempfile path
+ set_fact:
+ handler_countpath: "{{ mktemp_out.stdout }}"
+
+- name: notify the counting handler
+ shell: echo
+ notify:
+ - count_handler
+
+- name: notify the counting handler again
+ shell: echo
+ notify:
+ - count_handler
+
+- name: force handler execution now
+ meta: flush_handlers
+
+- name: get handler execution count
+ shell: cat {{ handler_countpath }} | grep -o . | wc -l
+ register: exec_count_out
+
+- debug: var=exec_count_out.stdout
+
+- name: ensure single execution
+ assert:
+ that:
+ - exec_count_out.stdout | int == 1
+
+- name: cleanup tempfile
+ file: path={{ handler_countpath }} state=absent
+
+- name: reset handler1_called
+ set_fact:
+ handler1_called: False
+
+- name: notify the second handler
+ shell: echo
+ notify:
+ - set_handler_fact_2
diff --git a/test/integration/targets/handlers/roles/test_templating_in_handlers/handlers/main.yml b/test/integration/targets/handlers/roles/test_templating_in_handlers/handlers/main.yml
new file mode 100644
index 00000000..7dbf3347
--- /dev/null
+++ b/test/integration/targets/handlers/roles/test_templating_in_handlers/handlers/main.yml
@@ -0,0 +1,21 @@
+---
+- name: name1
+ set_fact:
+ role_non_templated_name: True
+- name: "{{ handler2 }}"
+ set_fact:
+ role_templated_name: True
+- name: testlistener1
+ set_fact:
+ role_non_templated_listener: True
+ listen: name3
+- name: testlistener2
+ set_fact:
+ role_templated_listener: True
+ listen: "{{ handler4 }}"
+- name: name5
+ set_fact:
+ role_handler5: True
+- set_fact:
+ role_handler6: True
+ listen: name6
diff --git a/test/integration/targets/handlers/roles/test_templating_in_handlers/tasks/main.yml b/test/integration/targets/handlers/roles/test_templating_in_handlers/tasks/main.yml
new file mode 100644
index 00000000..54174172
--- /dev/null
+++ b/test/integration/targets/handlers/roles/test_templating_in_handlers/tasks/main.yml
@@ -0,0 +1,26 @@
+---
+- command: echo Hello World
+ notify:
+ - "{{ handler1 }}"
+ - "{{ handler2 }}"
+ - "{{ handler3 }}"
+ - "{{ handler4 }}"
+
+- meta: flush_handlers
+
+- assert:
+ that:
+ - role_non_templated_name is defined
+ - role_templated_name is defined
+ - role_non_templated_listener is defined
+ - role_templated_listener is undefined
+
+- command: echo
+ notify: "{{ handler_list }}"
+
+- meta: flush_handlers
+
+- assert:
+ that:
+ - role_handler5 is defined
+ - role_handler6 is defined
diff --git a/test/integration/targets/handlers/runme.sh b/test/integration/targets/handlers/runme.sh
new file mode 100755
index 00000000..59c81bce
--- /dev/null
+++ b/test/integration/targets/handlers/runme.sh
@@ -0,0 +1,95 @@
+#!/usr/bin/env bash
+
+set -eux
+
+export ANSIBLE_FORCE_HANDLERS
+
+ANSIBLE_FORCE_HANDLERS=false
+
+# simple handler test
+ansible-playbook test_handlers.yml -i inventory.handlers -v "$@" --tags scenario1
+
+# simple from_handlers test
+ansible-playbook from_handlers.yml -i inventory.handlers -v "$@" --tags scenario1
+
+ansible-playbook test_listening_handlers.yml -i inventory.handlers -v "$@"
+
+[ "$(ansible-playbook test_handlers.yml -i inventory.handlers -v "$@" --tags scenario2 -l A \
+| grep -E -o 'RUNNING HANDLER \[test_handlers : .*?]')" = "RUNNING HANDLER [test_handlers : test handler]" ]
+
+# Test forcing handlers using the linear and free strategy
+for strategy in linear free; do
+
+ export ANSIBLE_STRATEGY=$strategy
+
+ # Not forcing, should only run on successful host
+ [ "$(ansible-playbook test_force_handlers.yml -i inventory.handlers -v "$@" --tags normal \
+ | grep -E -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_B" ]
+
+ # Forcing from command line
+ [ "$(ansible-playbook test_force_handlers.yml -i inventory.handlers -v "$@" --tags normal --force-handlers \
+ | grep -E -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_A CALLED_HANDLER_B" ]
+
+ # Forcing from command line, should only run later tasks on unfailed hosts
+ [ "$(ansible-playbook test_force_handlers.yml -i inventory.handlers -v "$@" --tags normal --force-handlers \
+ | grep -E -o CALLED_TASK_. | sort | uniq | xargs)" = "CALLED_TASK_B CALLED_TASK_D CALLED_TASK_E" ]
+
+ # Forcing from command line, should call handlers even if all hosts fail
+ [ "$(ansible-playbook test_force_handlers.yml -i inventory.handlers -v "$@" --tags normal --force-handlers -e fail_all=yes \
+ | grep -E -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_A CALLED_HANDLER_B" ]
+
+ # Forcing from ansible.cfg
+ [ "$(ANSIBLE_FORCE_HANDLERS=true ansible-playbook test_force_handlers.yml -i inventory.handlers -v "$@" --tags normal \
+ | grep -E -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_A CALLED_HANDLER_B" ]
+
+ # Forcing true in play
+ [ "$(ansible-playbook test_force_handlers.yml -i inventory.handlers -v "$@" --tags force_true_in_play \
+ | grep -E -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_A CALLED_HANDLER_B" ]
+
+ # Forcing false in play, which overrides command line
+ [ "$(ansible-playbook test_force_handlers.yml -i inventory.handlers -v "$@" --tags force_false_in_play --force-handlers \
+ | grep -E -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_B" ]
+
+ unset ANSIBLE_STRATEGY
+
+done
+
+[ "$(ansible-playbook test_handlers_include.yml -i ../../inventory -v "$@" --tags playbook_include_handlers \
+| grep -E -o 'RUNNING HANDLER \[.*?]')" = "RUNNING HANDLER [test handler]" ]
+
+[ "$(ansible-playbook test_handlers_include.yml -i ../../inventory -v "$@" --tags role_include_handlers \
+| grep -E -o 'RUNNING HANDLER \[test_handlers_include : .*?]')" = "RUNNING HANDLER [test_handlers_include : test handler]" ]
+
+[ "$(ansible-playbook test_handlers_include_role.yml -i ../../inventory -v "$@" \
+| grep -E -o 'RUNNING HANDLER \[test_handlers_include_role : .*?]')" = "RUNNING HANDLER [test_handlers_include_role : test handler]" ]
+
+# Notify handler listen
+ansible-playbook test_handlers_listen.yml -i inventory.handlers -v "$@"
+
+# Notify inexistent handlers results in error
+set +e
+result="$(ansible-playbook test_handlers_inexistent_notify.yml -i inventory.handlers "$@" 2>&1)"
+set -e
+grep -q "ERROR! The requested handler 'notify_inexistent_handler' was not found in either the main handlers list nor in the listening handlers list" <<< "$result"
+
+# Notify inexistent handlers without errors when ANSIBLE_ERROR_ON_MISSING_HANDLER=false
+ANSIBLE_ERROR_ON_MISSING_HANDLER=false ansible-playbook test_handlers_inexistent_notify.yml -i inventory.handlers -v "$@"
+
+ANSIBLE_ERROR_ON_MISSING_HANDLER=false ansible-playbook test_templating_in_handlers.yml -v "$@"
+
+# https://github.com/ansible/ansible/issues/36649
+output_dir=/tmp
+set +e
+result="$(ansible-playbook test_handlers_any_errors_fatal.yml -e output_dir=$output_dir -i inventory.handlers -v "$@" 2>&1)"
+set -e
+[ ! -f $output_dir/should_not_exist_B ] || (rm -f $output_dir/should_not_exist_B && exit 1)
+
+# https://github.com/ansible/ansible/issues/47287
+[ "$(ansible-playbook test_handlers_including_task.yml -i ../../inventory -v "$@" | grep -E -o 'failed=[0-9]+')" = "failed=0" ]
+
+# https://github.com/ansible/ansible/issues/27237
+set +e
+result="$(ansible-playbook test_handlers_template_run_once.yml -i inventory.handlers "$@" 2>&1)"
+set -e
+grep -q "handler A" <<< "$result"
+grep -q "handler B" <<< "$result"
diff --git a/test/integration/targets/handlers/test_force_handlers.yml b/test/integration/targets/handlers/test_force_handlers.yml
new file mode 100644
index 00000000..9cff7729
--- /dev/null
+++ b/test/integration/targets/handlers/test_force_handlers.yml
@@ -0,0 +1,27 @@
+---
+
+- name: test force handlers (default)
+ tags: normal
+ hosts: testgroup
+ gather_facts: False
+ roles:
+ - { role: test_force_handlers }
+ tasks:
+ - debug: msg="you should see this with --tags=normal"
+
+- name: test force handlers (set to true)
+ tags: force_true_in_play
+ hosts: testgroup
+ gather_facts: False
+ force_handlers: True
+ roles:
+ - { role: test_force_handlers, tags: force_true_in_play }
+
+
+- name: test force handlers (set to false)
+ tags: force_false_in_play
+ hosts: testgroup
+ gather_facts: False
+ force_handlers: False
+ roles:
+ - { role: test_force_handlers, tags: force_false_in_play }
diff --git a/test/integration/targets/handlers/test_handlers.yml b/test/integration/targets/handlers/test_handlers.yml
new file mode 100644
index 00000000..ae9847ba
--- /dev/null
+++ b/test/integration/targets/handlers/test_handlers.yml
@@ -0,0 +1,47 @@
+---
+- name: run handlers
+ hosts: A
+ gather_facts: False
+ roles:
+ - { role: test_handlers_meta, tags: ['scenario1'] }
+
+- name: verify final handler was run
+ hosts: A
+ gather_facts: False
+ tasks:
+ - name: verify handler2 ran
+ assert:
+ that:
+ - "not hostvars[inventory_hostname]['handler1_called']"
+ - "'handler2_called' in hostvars[inventory_hostname]"
+ tags: ['scenario1']
+
+- name: verify listening handlers
+ hosts: A
+ gather_facts: False
+ tasks:
+ - name: notify some handlers
+ command: echo foo
+ notify:
+ - notify_listen
+ post_tasks:
+ - name: assert all defined handlers ran without error
+ assert:
+ that:
+ - "notify_listen_ran_1 is defined"
+ - "notify_listen_ran_2 is defined"
+ handlers:
+ - name: first listening handler has a name
+ set_fact:
+ notify_listen_ran_1: True
+ listen: notify_listen
+ # second listening handler does not
+ - set_fact:
+ notify_listen_ran_2: True
+ listen: notify_listen
+
+- name: test handlers
+ hosts: testgroup
+ gather_facts: False
+ roles:
+ - { role: test_handlers }
diff --git a/test/integration/targets/handlers/test_handlers_any_errors_fatal.yml b/test/integration/targets/handlers/test_handlers_any_errors_fatal.yml
new file mode 100644
index 00000000..6b791a3b
--- /dev/null
+++ b/test/integration/targets/handlers/test_handlers_any_errors_fatal.yml
@@ -0,0 +1,24 @@
+- hosts:
+ - A
+ - B
+ gather_facts: no
+ any_errors_fatal: yes
+ vars:
+ output_dir: /tmp
+ tasks:
+ - name: Task one
+ debug:
+ msg: 'task 1'
+ changed_when: yes
+ notify: EXPECTED FAILURE failed_handler
+
+ - meta: flush_handlers
+
+ - name: This task should never happen
+ file:
+ path: "{{ output_dir }}/should_not_exist_{{ inventory_hostname }}"
+ state: touch
+ handlers:
+ - name: EXPECTED FAILURE failed_handler
+ fail:
+ when: 'inventory_hostname == "A"'
diff --git a/test/integration/targets/handlers/test_handlers_include.yml b/test/integration/targets/handlers/test_handlers_include.yml
new file mode 100644
index 00000000..5514fc10
--- /dev/null
+++ b/test/integration/targets/handlers/test_handlers_include.yml
@@ -0,0 +1,14 @@
+- name: verify that play can include handler
+ hosts: testhost
+ tasks:
+ - debug: msg="main task"
+ changed_when: True
+ notify: test handler
+ tags: ['playbook_include_handlers']
+ handlers:
+ - include: handlers.yml
+
+- name: verify that role can include handler
+ hosts: testhost
+ roles:
+ - { role: test_handlers_include, tags: ['role_include_handlers'] }
diff --git a/test/integration/targets/handlers/test_handlers_include_role.yml b/test/integration/targets/handlers/test_handlers_include_role.yml
new file mode 100644
index 00000000..77e6b53a
--- /dev/null
+++ b/test/integration/targets/handlers/test_handlers_include_role.yml
@@ -0,0 +1,8 @@
+- name: verify that play can include handler
+ hosts: testhost
+ tasks:
+ - include_role:
+ name: test_handlers_include_role
+ with_items:
+ - 1
+ - 2
diff --git a/test/integration/targets/handlers/test_handlers_including_task.yml b/test/integration/targets/handlers/test_handlers_including_task.yml
new file mode 100644
index 00000000..8f7933ab
--- /dev/null
+++ b/test/integration/targets/handlers/test_handlers_including_task.yml
@@ -0,0 +1,16 @@
+---
+- name: Verify handler can include other tasks (#47287)
+ hosts: testhost
+ tasks:
+ - name: include a task from the tasks section
+ include_tasks: handlers.yml
+
+ - name: notify a handler
+ debug:
+ msg: notifying handler
+ changed_when: yes
+ notify: include a task from the handlers section
+
+ handlers:
+ - name: include a task from the handlers section
+ include_tasks: handlers.yml
diff --git a/test/integration/targets/handlers/test_handlers_inexistent_notify.yml b/test/integration/targets/handlers/test_handlers_inexistent_notify.yml
new file mode 100644
index 00000000..15de38aa
--- /dev/null
+++ b/test/integration/targets/handlers/test_handlers_inexistent_notify.yml
@@ -0,0 +1,10 @@
+---
+- name: notify inexistent handler
+ hosts: localhost
+ gather_facts: false
+ tasks:
+ - name: test notify an inexistent handler
+ command: uptime
+ notify:
+ - notify_inexistent_handler
+ register: result
diff --git a/test/integration/targets/handlers/test_handlers_listen.yml b/test/integration/targets/handlers/test_handlers_listen.yml
new file mode 100644
index 00000000..dd2cd87d
--- /dev/null
+++ b/test/integration/targets/handlers/test_handlers_listen.yml
@@ -0,0 +1,128 @@
+---
+- name: test listen with named handlers
+ hosts: localhost
+ gather_facts: false
+ tasks:
+ - name: test notify handlers listen
+ command: uptime
+ notify:
+ - notify_listen
+ - meta: flush_handlers
+ - name: verify test notify handlers listen
+ assert:
+ that:
+ - "notify_listen_ran_1_1 is defined"
+ - "notify_listen_ran_1_2 is defined"
+ - "notify_listen_ran_1_3 is undefined"
+ handlers:
+ - name: notify_handler_ran_1_1
+ set_fact:
+ notify_listen_ran_1_1: True
+ listen: notify_listen
+ - name: notify_handler_ran_1_2
+ set_fact:
+ notify_listen_ran_1_2: True
+ listen: notify_listen
+ - name: notify_handler_ran_1_3
+ set_fact:
+ notify_handler_ran_1_3: True
+ listen: notify_listen2
+
+- name: test listen unnamed handlers
+ hosts: localhost
+ gather_facts: false
+ pre_tasks:
+ - name: notify some handlers
+ command: echo foo
+ notify:
+ - notify_listen
+ tasks:
+ - meta: flush_handlers
+ - name: assert all defined handlers ran without error
+ assert:
+ that:
+ - "notify_listen_ran_1 is defined"
+ - "notify_listen_ran_2 is defined"
+ - "notify_listen_ran_3 is undefined"
+ handlers:
+ - set_fact:
+ notify_listen_ran_1: True
+ listen: notify_listen
+ - set_fact:
+ notify_listen_ran_2: True
+ listen: notify_listen
+ - set_fact:
+ notify_handler_ran_3: True
+ listen: notify_listen2
+
+- name: test with mixed notify by name and listen
+ hosts: localhost
+ gather_facts: false
+ tasks:
+ - name: test notify handlers names and identical listen
+ command: uptime
+ notify:
+ - notify_listen
+ - meta: flush_handlers
+ - name: verify test notify handlers names and identical listen
+ assert:
+ that:
+ - "notify_handler_name_ran_3 is defined"
+ - "notify_handler_name_ran_3_1 is not defined"
+ - "notify_listen_ran_3_2 is defined"
+ - "notify_listen_ran_3_3 is defined"
+ - "not_notify_listen_3_4 is not defined"
+ handlers:
+ - name: notify_listen
+ set_fact:
+ notify_handler_name_ran_3: True
+ # this will not run as we have a handler with a identical name notified first
+ - name: notify_listen
+ set_fact:
+ notify_handler_name_ran_3_1: True
+ - name: notify_handler_ran_3_2
+ set_fact:
+ notify_listen_ran_3_2: True
+ listen: notify_listen
+ - name: notify_handler_ran_3_3
+ set_fact:
+ notify_listen_ran_3_3: True
+ listen: notify_listen
+ # this one is not notified
+ - name: not_notify_listen_3_4
+ set_fact:
+ not_notify_listen_3_4: True
+ listen: not_notified
+
+- name: test listen in roles
+ hosts: localhost
+ gather_facts: false
+ roles:
+ - role: test_handlers_listen
+ tasks:
+ - name: test notify handlers listen in roles
+ command: uptime
+ notify:
+ - notify_listen
+ - meta: flush_handlers
+ - name: verify test notify handlers listen in roles
+ assert:
+ that:
+ - "notify_listen_ran_4_1 is defined"
+ - "notify_listen_ran_4_2 is defined"
+ - "notify_listen_ran_4_3 is defined"
+ - "notify_listen_in_role_4 is defined"
+ - "notify_listen_from_role_4 is defined"
+ handlers:
+ - name: notify_listen_ran_4_1
+ set_fact:
+ notify_listen_ran_4_1: True
+ listen: notify_listen
+ - name: notify_listen_ran_4_2
+ set_fact:
+ notify_listen_ran_4_2: True
+ listen: notify_listen
+ - name: notify_listen_from_role_4
+ set_fact:
+ notify_listen_from_role_4: True
+ listen: notify_listen_from_role
diff --git a/test/integration/targets/handlers/test_handlers_template_run_once.yml b/test/integration/targets/handlers/test_handlers_template_run_once.yml
new file mode 100644
index 00000000..6edc32e2
--- /dev/null
+++ b/test/integration/targets/handlers/test_handlers_template_run_once.yml
@@ -0,0 +1,12 @@
+- hosts: A,B
+ gather_facts: no
+ tasks:
+ - debug:
+ changed_when: true
+ notify:
+ - handler
+ handlers:
+ - name: handler
+ debug:
+ msg: "handler {{ inventory_hostname }}"
+ run_once: "{{ testvar | default(False) }}"
diff --git a/test/integration/targets/handlers/test_listening_handlers.yml b/test/integration/targets/handlers/test_listening_handlers.yml
new file mode 100644
index 00000000..67bdad9a
--- /dev/null
+++ b/test/integration/targets/handlers/test_listening_handlers.yml
@@ -0,0 +1,24 @@
+---
+- name: verify listening handlers
+ hosts: A
+ gather_facts: False
+ tasks:
+ - name: notify some handlers
+ command: echo foo
+ notify:
+ - notify_listen
+ post_tasks:
+ - name: assert all defined handlers ran without error
+ assert:
+ that:
+ - "notify_listen_ran_1 is defined"
+ - "notify_listen_ran_2 is defined"
+ handlers:
+ - name: first listening handler has a name
+ set_fact:
+ notify_listen_ran_1: True
+ listen: notify_listen
+ # second listening handler does not
+ - set_fact:
+ notify_listen_ran_2: True
+ listen: notify_listen
diff --git a/test/integration/targets/handlers/test_templating_in_handlers.yml b/test/integration/targets/handlers/test_templating_in_handlers.yml
new file mode 100644
index 00000000..662b8c1e
--- /dev/null
+++ b/test/integration/targets/handlers/test_templating_in_handlers.yml
@@ -0,0 +1,62 @@
+- name: test templated values in handlers
+ hosts: localhost
+ gather_facts: no
+ vars:
+ handler1: name1
+ handler2: name2
+ handler3: name3
+ handler4: name4
+ handler_list:
+ - name5
+ - name6
+
+ handlers:
+ - name: name1
+ set_fact:
+ non_templated_name: True
+ - name: "{{ handler2 }}"
+ set_fact:
+ templated_name: True
+ - name: testlistener1
+ set_fact:
+ non_templated_listener: True
+ listen: name3
+ - name: testlistener2
+ set_fact:
+ templated_listener: True
+ listen: "{{ handler4 }}"
+ - name: name5
+ set_fact:
+ handler5: True
+ - set_fact:
+ handler6: True
+ listen: name6
+
+ tasks:
+ - command: echo Hello World
+ notify:
+ - "{{ handler1 }}"
+ - "{{ handler2 }}"
+ - "{{ handler3 }}"
+ - "{{ handler4 }}"
+
+ - meta: flush_handlers
+
+ - assert:
+ that:
+ - non_templated_name is defined
+ - templated_name is defined
+ - non_templated_listener is defined
+ - templated_listener is undefined
+
+ - command: echo
+ notify: "{{ handler_list }}"
+
+ - meta: flush_handlers
+
+ - assert:
+ that:
+ - handler5 is defined
+ - handler6 is defined
+
+ - include_role: name=test_templating_in_handlers
diff --git a/test/integration/targets/hash/aliases b/test/integration/targets/hash/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/test/integration/targets/hash/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/test/integration/targets/hash/group_vars/all b/test/integration/targets/hash/group_vars/all
new file mode 100644
index 00000000..805ac26a
--- /dev/null
+++ b/test/integration/targets/hash/group_vars/all
@@ -0,0 +1,3 @@
+# variables used for hash merging behavior testing
+test_hash:
+ group_vars_all: "this is in group_vars/all"
diff --git a/test/integration/targets/hash/host_vars/testhost b/test/integration/targets/hash/host_vars/testhost
new file mode 100644
index 00000000..3a75ee66
--- /dev/null
+++ b/test/integration/targets/hash/host_vars/testhost
@@ -0,0 +1,2 @@
+test_hash:
+ host_vars_testhost: "this is in host_vars/testhost"
diff --git a/test/integration/targets/hash/roles/test_hash_behaviour/defaults/main.yml b/test/integration/targets/hash/roles/test_hash_behaviour/defaults/main.yml
new file mode 100644
index 00000000..10cc09f3
--- /dev/null
+++ b/test/integration/targets/hash/roles/test_hash_behaviour/defaults/main.yml
@@ -0,0 +1,21 @@
+# test code for the hash variable behavior
+# (c) 2014, James Cammarata <jcammarata@ansible.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+---
+test_hash:
+ default_vars: "this is in role defaults/main.yml"
diff --git a/test/integration/targets/hash/roles/test_hash_behaviour/meta/main.yml b/test/integration/targets/hash/roles/test_hash_behaviour/meta/main.yml
new file mode 100644
index 00000000..59adf997
--- /dev/null
+++ b/test/integration/targets/hash/roles/test_hash_behaviour/meta/main.yml
@@ -0,0 +1,17 @@
+# test code for the hash variable behavior
+# (c) 2014, James Cammarata <jcammarata@ansible.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
diff --git a/test/integration/targets/hash/roles/test_hash_behaviour/tasks/main.yml b/test/integration/targets/hash/roles/test_hash_behaviour/tasks/main.yml
new file mode 100644
index 00000000..bc635498
--- /dev/null
+++ b/test/integration/targets/hash/roles/test_hash_behaviour/tasks/main.yml
@@ -0,0 +1,37 @@
+# test code for the hash variable behaviour
+# (c) 2014, James Cammarata <jcammarata@ansible.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: debug hash behaviour result
+ debug:
+ var: "{{ lookup('env', 'ANSIBLE_HASH_BEHAVIOUR') }}"
+ verbosity: 2
+
+- name: assert hash behaviour is merge or replace
+ assert:
+ that:
+ - lookup('env', 'ANSIBLE_HASH_BEHAVIOUR') in ('merge', 'replace')
+
+- name: debug test_hash var
+ debug:
+ var: test_hash
+ verbosity: 2
+
+- name: assert the dictionary values match
+ assert:
+ that:
+ - "lookup('env', 'ANSIBLE_HASH_BEHAVIOUR') == 'merge' and test_hash == merged_hash or lookup('env', 'ANSIBLE_HASH_BEHAVIOUR') == 'replace' and test_hash == replaced_hash"
diff --git a/test/integration/targets/hash/roles/test_hash_behaviour/vars/main.yml b/test/integration/targets/hash/roles/test_hash_behaviour/vars/main.yml
new file mode 100644
index 00000000..2068e9fb
--- /dev/null
+++ b/test/integration/targets/hash/roles/test_hash_behaviour/vars/main.yml
@@ -0,0 +1,21 @@
+# test code for the hash variable behavior
+# (c) 2014, James Cammarata <jcammarata@ansible.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+---
+test_hash:
+ role_vars: "this is in role vars/main.yml"
diff --git a/test/integration/targets/hash/runme.sh b/test/integration/targets/hash/runme.sh
new file mode 100755
index 00000000..9448e4e0
--- /dev/null
+++ b/test/integration/targets/hash/runme.sh
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+
+set -eux
+
+JSON_ARG='{"test_hash":{"extra_args":"this is an extra arg"}}'
+
+ANSIBLE_HASH_BEHAVIOUR=replace ansible-playbook test_hash.yml -i ../../inventory -v "$@" -e "${JSON_ARG}"
+ANSIBLE_HASH_BEHAVIOUR=merge ansible-playbook test_hash.yml -i ../../inventory -v "$@" -e "${JSON_ARG}"
diff --git a/test/integration/targets/hash/test_hash.yml b/test/integration/targets/hash/test_hash.yml
new file mode 100644
index 00000000..37b56e65
--- /dev/null
+++ b/test/integration/targets/hash/test_hash.yml
@@ -0,0 +1,21 @@
+- hosts: testhost
+ vars_files:
+ - vars/test_hash_vars.yml
+ vars:
+ test_hash:
+ playbook_vars: "this is a playbook variable"
+ replaced_hash:
+ extra_args: "this is an extra arg"
+ merged_hash:
+ default_vars: "this is in role defaults/main.yml"
+ extra_args: "this is an extra arg"
+ group_vars_all: "this is in group_vars/all"
+ host_vars_testhost: "this is in host_vars/testhost"
+ playbook_vars: "this is a playbook variable"
+ role_argument: "this is a role argument variable"
+ role_vars: "this is in role vars/main.yml"
+ vars_file: "this is in a vars_file"
+ roles:
+ - role: test_hash_behaviour
+ test_hash:
+ role_argument: 'this is a role argument variable'
diff --git a/test/integration/targets/hash/vars/test_hash_vars.yml b/test/integration/targets/hash/vars/test_hash_vars.yml
new file mode 100644
index 00000000..e25f8576
--- /dev/null
+++ b/test/integration/targets/hash/vars/test_hash_vars.yml
@@ -0,0 +1,3 @@
+---
+test_hash:
+ vars_file: "this is in a vars_file"
diff --git a/test/integration/targets/hosts_field/aliases b/test/integration/targets/hosts_field/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/test/integration/targets/hosts_field/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/test/integration/targets/hosts_field/inventory.hosts_field b/test/integration/targets/hosts_field/inventory.hosts_field
new file mode 100644
index 00000000..46644046
--- /dev/null
+++ b/test/integration/targets/hosts_field/inventory.hosts_field
@@ -0,0 +1 @@
+42 ansible_host=127.0.0.42 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/test/integration/targets/hosts_field/runme.sh b/test/integration/targets/hosts_field/runme.sh
new file mode 100755
index 00000000..1291933c
--- /dev/null
+++ b/test/integration/targets/hosts_field/runme.sh
@@ -0,0 +1,49 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# Hosts in playbook has a list of strings consisting solely of digits
+ansible-playbook test_hosts_field.yml -i inventory.hosts_field -e 'target_kv=42' \
+ -e '{ "target_json_cli": "42", "target_json_cli_list": ["42", "localhost"] }' -e "@test_hosts_field.json" \
+ -t string_digit_host_in_list -v "$@" | tee test_hosts_field.out
+grep 'Running on 42' test_hosts_field.out 2>&1
+test "$(grep -c 'ok=1' test_hosts_field.out)" = 1
+
+# Hosts taken from kv extra_var on the CLI
+ansible-playbook test_hosts_field.yml -i inventory.hosts_field -e 'target_kv=42' \
+ -e '{ "target_json_cli": "42", "target_json_cli_list": ["42", "localhost"] }' -e "@test_hosts_field.json" \
+ -t hosts_from_kv_string -v "$@" | tee test_hosts_field.out
+grep 'Running on 42' test_hosts_field.out 2>&1
+test "$(grep -c 'ok=1' test_hosts_field.out)" = 1
+
+# hosts is taken from an all digit json extra_vars string on the CLI
+ansible-playbook test_hosts_field.yml -i inventory.hosts_field -e 'target_kv=42' \
+ -e '{ "target_json_cli": "42", "target_json_cli_list": ["42", "localhost"] }' -e "@test_hosts_field.json" \
+ -t hosts_from_cli_json_string -v "$@" | tee test_hosts_field.out
+grep 'Running on 42' test_hosts_field.out 2>&1
+test "$(grep -c 'ok=1' test_hosts_field.out)" = 1
+
+# hosts is taken from a json list in extra_vars on the CLI
+ansible-playbook test_hosts_field.yml -i inventory.hosts_field -e 'target_kv=42' \
+ -e '{ "target_json_cli": "42", "target_json_cli_list": ["42", "localhost"] }' -e "@test_hosts_field.json" \
+ -t hosts_from_cli_json_list -v "$@" | tee test_hosts_field.out
+grep 'Running on 42' test_hosts_field.out 2>&1
+grep 'Running on localhost' test_hosts_field.out 2>&1
+test "$(grep -c 'ok=1' test_hosts_field.out)" = 2
+
+# hosts is taken from a json string in an extra_vars file
+ansible-playbook test_hosts_field.yml -i inventory.hosts_field -e 'target_kv=42' \
+ -e '{ "target_json_cli": "42", "target_json_cli_list": ["42", "localhost"] }' -e "@test_hosts_field.json" \
+ -t hosts_from_json_file_string -v "$@" | tee test_hosts_field.out
+grep 'Running on 42' test_hosts_field.out 2>&1
+test "$(grep -c 'ok=1' test_hosts_field.out)" = 1
+
+# hosts is taken from a json list in an extra_vars file
+ansible-playbook test_hosts_field.yml -i inventory.hosts_field -e 'target_kv=42' \
+ -e '{ "target_json_cli": "42", "target_json_cli_list": ["42", "localhost"] }' -e "@test_hosts_field.json" \
+ -t hosts_from_json_file_list -v "$@" | tee test_hosts_field.out
+grep 'Running on 42' test_hosts_field.out 2>&1
+grep 'Running on localhost' test_hosts_field.out 2>&1
+test "$(grep -c 'ok=1' test_hosts_field.out)" = 2
+
+rm test_hosts_field.out
diff --git a/test/integration/targets/hosts_field/test_hosts_field.json b/test/integration/targets/hosts_field/test_hosts_field.json
new file mode 100644
index 00000000..26875560
--- /dev/null
+++ b/test/integration/targets/hosts_field/test_hosts_field.json
@@ -0,0 +1 @@
+{ "target_json_file": "42", "target_json_file_list": ["42", "localhost"] }
diff --git a/test/integration/targets/hosts_field/test_hosts_field.yml b/test/integration/targets/hosts_field/test_hosts_field.yml
new file mode 100644
index 00000000..568d7025
--- /dev/null
+++ b/test/integration/targets/hosts_field/test_hosts_field.yml
@@ -0,0 +1,62 @@
+---
+#- name: Host in playbook is an integer
+# hosts: 42
+# tags: numeric_host
+# tasks:
+# - command: echo 'Running on {{ inventory_hostname }}'
+
+#- name: Host in playbook is a string of digits
+# hosts: "42"
+# tags: string_digit_host
+# tasks:
+# - command: echo 'Running on {{ inventory_hostname }}'
+
+#- name: Host in playbook is a list of integer
+# hosts:
+# - 42
+# tags: numeric_host_in_list
+# tasks:
+# - command: echo 'Running on {{ inventory_hostname }}'
+
+- name: Host in playbook is a list of strings of digits
+ hosts:
+ - "42"
+ gather_facts: False
+ tags: string_digit_host_in_list
+ tasks:
+ - command: echo 'Running on {{ inventory_hostname }}'
+
+- name: Hosts taken from kv extra_var on the CLI
+ hosts: "{{ target_kv }}"
+ gather_facts: False
+ tags: hosts_from_kv_string
+ tasks:
+ - command: echo 'Running on {{ inventory_hostname }}'
+
+- name: Hosts taken from a json string on the CLI
+ hosts: "{{ target_json_cli }}"
+ gather_facts: False
+ tags: hosts_from_cli_json_string
+ tasks:
+ - command: echo 'Running on {{ inventory_hostname }}'
+
+- name: Hosts taken from a json list on the CLI
+ hosts: "{{ target_json_cli_list }}"
+ gather_facts: False
+ tags: hosts_from_cli_json_list
+ tasks:
+ - command: echo 'Running on {{ inventory_hostname }}'
+
+- name: Hosts is taken from a json string in an extra_vars file
+ hosts: "{{ target_json_file }}"
+ gather_facts: False
+ tags: hosts_from_json_file_string
+ tasks:
+ - command: echo 'Running on {{ inventory_hostname }}'
+
+- name: Hosts is taken from a json list in an extra_vars file
+ hosts: "{{ target_json_file_list }}"
+ gather_facts: False
+ tags: hosts_from_json_file_list
+ tasks:
+ - command: echo 'Running on {{ inventory_hostname }}'
diff --git a/test/integration/targets/ignore_errors/aliases b/test/integration/targets/ignore_errors/aliases
new file mode 100644
index 00000000..3005e4b2
--- /dev/null
+++ b/test/integration/targets/ignore_errors/aliases
@@ -0,0 +1 @@
+shippable/posix/group4
diff --git a/test/integration/targets/ignore_errors/meta/main.yml b/test/integration/targets/ignore_errors/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/test/integration/targets/ignore_errors/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/test/integration/targets/ignore_errors/tasks/main.yml b/test/integration/targets/ignore_errors/tasks/main.yml
new file mode 100644
index 00000000..a6964e04
--- /dev/null
+++ b/test/integration/targets/ignore_errors/tasks/main.yml
@@ -0,0 +1,22 @@
+# test code
+# (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: this will not stop the playbook
+ shell: /bin/false
+ register: failed
+ ignore_errors: True
diff --git a/test/integration/targets/ignore_unreachable/aliases b/test/integration/targets/ignore_unreachable/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/test/integration/targets/ignore_unreachable/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/test/integration/targets/ignore_unreachable/fake_connectors/bad_exec.py b/test/integration/targets/ignore_unreachable/fake_connectors/bad_exec.py
new file mode 100644
index 00000000..b5e9ca88
--- /dev/null
+++ b/test/integration/targets/ignore_unreachable/fake_connectors/bad_exec.py
@@ -0,0 +1,11 @@
+import ansible.plugins.connection.local as ansible_local
+from ansible.errors import AnsibleConnectionFailure
+
+from ansible.utils.display import Display
+display = Display()
+
+
+class Connection(ansible_local.Connection):
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ display.debug('Intercepted call to exec remote command')
+ raise AnsibleConnectionFailure('BADLOCAL Error: this is supposed to fail')
diff --git a/test/integration/targets/ignore_unreachable/fake_connectors/bad_put_file.py b/test/integration/targets/ignore_unreachable/fake_connectors/bad_put_file.py
new file mode 100644
index 00000000..98927997
--- /dev/null
+++ b/test/integration/targets/ignore_unreachable/fake_connectors/bad_put_file.py
@@ -0,0 +1,11 @@
+import ansible.plugins.connection.local as ansible_local
+from ansible.errors import AnsibleConnectionFailure
+
+from ansible.utils.display import Display
+display = Display()
+
+
+class Connection(ansible_local.Connection):
+ def put_file(self, in_path, out_path):
+ display.debug('Intercepted call to send data')
+ raise AnsibleConnectionFailure('BADLOCAL Error: this is supposed to fail')
diff --git a/test/integration/targets/ignore_unreachable/inventory b/test/integration/targets/ignore_unreachable/inventory
new file mode 100644
index 00000000..495a68cf
--- /dev/null
+++ b/test/integration/targets/ignore_unreachable/inventory
@@ -0,0 +1,3 @@
+nonexistent ansible_host=169.254.199.200
+bad_put_file ansible_host=localhost ansible_connection=bad_put_file
+bad_exec ansible_host=localhost ansible_connection=bad_exec
diff --git a/test/integration/targets/ignore_unreachable/meta/main.yml b/test/integration/targets/ignore_unreachable/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/test/integration/targets/ignore_unreachable/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/test/integration/targets/ignore_unreachable/runme.sh b/test/integration/targets/ignore_unreachable/runme.sh
new file mode 100755
index 00000000..5b0ef190
--- /dev/null
+++ b/test/integration/targets/ignore_unreachable/runme.sh
@@ -0,0 +1,16 @@
+#!/usr/bin/env bash
+set -eux
+
+export ANSIBLE_CONNECTION_PLUGINS=./fake_connectors
+# use fake connectors that raise srrors at different stages
+ansible-playbook test_with_bad_plugins.yml -i inventory -v "$@"
+unset ANSIBLE_CONNECTION_PLUGINS
+
+ansible-playbook test_cannot_connect.yml -i inventory -v "$@"
+
+if ansible-playbook test_base_cannot_connect.yml -i inventory -v "$@"; then
+ echo "Playbook intended to fail succeeded. Connection succeeded to nonexistent host"
+ exit 99
+else
+ echo "Connection to nonexistent hosts failed without using ignore_unreachable. Success!"
+fi
diff --git a/test/integration/targets/ignore_unreachable/test_base_cannot_connect.yml b/test/integration/targets/ignore_unreachable/test_base_cannot_connect.yml
new file mode 100644
index 00000000..931c82bf
--- /dev/null
+++ b/test/integration/targets/ignore_unreachable/test_base_cannot_connect.yml
@@ -0,0 +1,5 @@
+- hosts: [localhost, nonexistent]
+ gather_facts: false
+ tasks:
+ - name: Hi
+ ping:
diff --git a/test/integration/targets/ignore_unreachable/test_cannot_connect.yml b/test/integration/targets/ignore_unreachable/test_cannot_connect.yml
new file mode 100644
index 00000000..64e2bfea
--- /dev/null
+++ b/test/integration/targets/ignore_unreachable/test_cannot_connect.yml
@@ -0,0 +1,29 @@
+---
+- hosts: localhost
+ gather_facts: false
+ tasks:
+ - name: Hi
+ ping:
+- hosts: [localhost, nonexistent]
+ ignore_unreachable: true
+ gather_facts: false
+ tasks:
+ - name: Hi
+ ping:
+- hosts: nonexistent
+ ignore_unreachable: true
+ gather_facts: false
+ tasks:
+ - name: Hi
+ ping:
+ - name: This should print anyway
+ debug:
+ msg: This should print worked even though host was unreachable
+ - name: Hi
+ ping:
+ register: should_fail
+ - assert:
+ that:
+ - 'should_fail is unreachable'
+ - 'not (should_fail is skipped)'
+ - 'not (should_fail is failed)'
diff --git a/test/integration/targets/ignore_unreachable/test_with_bad_plugins.yml b/test/integration/targets/ignore_unreachable/test_with_bad_plugins.yml
new file mode 100644
index 00000000..5d62f199
--- /dev/null
+++ b/test/integration/targets/ignore_unreachable/test_with_bad_plugins.yml
@@ -0,0 +1,24 @@
+- hosts: bad_put_file
+ gather_facts: false
+ ignore_unreachable: true
+ tasks:
+ - name: Hi
+ ping:
+- hosts: bad_put_file
+ gather_facts: true
+ ignore_unreachable: true
+ tasks:
+ - name: Hi
+ ping:
+- hosts: bad_exec
+ gather_facts: false
+ ignore_unreachable: true
+ tasks:
+ - name: Hi
+ ping:
+- hosts: bad_exec
+ gather_facts: true
+ ignore_unreachable: true
+ tasks:
+ - name: Hi
+ ping:
diff --git a/test/integration/targets/incidental_azure_rm_mariadbserver/aliases b/test/integration/targets/incidental_azure_rm_mariadbserver/aliases
new file mode 100644
index 00000000..9901373a
--- /dev/null
+++ b/test/integration/targets/incidental_azure_rm_mariadbserver/aliases
@@ -0,0 +1,3 @@
+cloud/azure
+destructive
+shippable/azure/incidental
diff --git a/test/integration/targets/incidental_azure_rm_mariadbserver/tasks/main.yml b/test/integration/targets/incidental_azure_rm_mariadbserver/tasks/main.yml
new file mode 100644
index 00000000..5b33ffb9
--- /dev/null
+++ b/test/integration/targets/incidental_azure_rm_mariadbserver/tasks/main.yml
@@ -0,0 +1,640 @@
+- name: Prepare random number
+ set_fact:
+ rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
+ run_once: yes
+
+- name: Create instance of MariaDB Server -- check mode
+ azure_rm_mariadbserver:
+ resource_group: "{{ resource_group }}"
+ name: mariadbsrv{{ rpfx }}
+ sku:
+ name: B_Gen5_1
+ tier: Basic
+ location: westus2
+ storage_mb: 51200
+ version: 10.2
+ enforce_ssl: True
+ admin_username: zimxyz
+ admin_password: Testpasswordxyz12!
+ check_mode: yes
+ register: output
+- name: Assert the resource instance is well created
+ assert:
+ that:
+ - output.changed
+
+- name: Create instance of MariaDB Server
+ azure_rm_mariadbserver:
+ resource_group: "{{ resource_group }}"
+ name: mariadbsrv{{ rpfx }}
+ sku:
+ name: B_Gen5_1
+ tier: Basic
+ location: westus2
+ storage_mb: 51200
+ version: 10.2
+ enforce_ssl: True
+ admin_username: zimxyz
+ admin_password: Testpasswordxyz12!
+ register: output
+- name: Assert the resource instance is well created
+ assert:
+ that:
+ - output.changed
+ - output.state == 'Ready'
+
+- name: Create again instance of MariaDB Server
+ azure_rm_mariadbserver:
+ resource_group: "{{ resource_group }}"
+ name: mariadbsrv{{ rpfx }}
+ sku:
+ name: B_Gen5_1
+ tier: Basic
+ location: westus2
+ storage_mb: 51200
+ version: 10.2
+ enforce_ssl: True
+ admin_username: zimxyz
+ admin_password: Testpasswordxyz12!
+ register: output
+- name: Assert the state has not changed
+ assert:
+ that:
+ - output.changed == false
+ - output.state == 'Ready'
+
+- name: Update instance of MariaDB Server, change storage size
+ azure_rm_mariadbserver:
+ resource_group: "{{ resource_group }}"
+ name: mariadbsrv{{ rpfx }}
+ sku:
+ name: B_Gen5_1
+ tier: Basic
+ location: westus2
+ storage_mb: 128000
+ version: 10.2
+ enforce_ssl: True
+ admin_username: zimxyz
+ admin_password: Testpasswordxyz12!
+ register: output
+- name: Assert the state has not changed
+ assert:
+ that:
+ - output.changed
+ - output.state == 'Ready'
+- debug:
+ var: output
+
+- name: Gather facts MariaDB Server
+ azure_rm_mariadbserver_facts:
+ resource_group: "{{ resource_group }}"
+ name: mariadbsrv{{ rpfx }}
+ register: output
+- name: Assert that storage size is correct
+ assert:
+ that:
+ - output.servers[0]['storage_mb'] == 128000
+
+- name: Create second instance of MariaDB Server
+ azure_rm_mariadbserver:
+ resource_group: "{{ resource_group }}"
+ name: mariadbsrv{{ rpfx }}second
+ sku:
+ name: B_Gen5_1
+ tier: Basic
+ location: westus2
+ storage_mb: 51200
+ version: 10.2
+ enforce_ssl: True
+ admin_username: zimxyz
+ admin_password: Testpasswordxyz12!
+ tags:
+ aaa: bbb
+
+- name: Create second instance of MariaDB Server
+ azure_rm_mariadbserver:
+ resource_group: "{{ resource_group }}"
+ name: mariadbsrv{{ rpfx }}second
+ sku:
+ name: B_Gen5_1
+ tier: Basic
+ location: westus2
+ storage_mb: 51200
+ version: 10.2
+ enforce_ssl: True
+ admin_username: zimxyz
+ admin_password: Testpasswordxyz12!
+ tags:
+ ccc: ddd
+
+- name: Gather facts MariaDB Server
+ azure_rm_mariadbserver_facts:
+ resource_group: "{{ resource_group }}"
+ name: mariadbsrv{{ rpfx }}second
+ register: output
+
+- name: Assert that facts are returned
+ assert:
+ that:
+ - output.changed == False
+ - output.servers[0]['id'] != None
+ - output.servers[0]['name'] != None
+ - output.servers[0]['location'] != None
+ - output.servers[0]['sku']['name'] != None
+ - output.servers[0]['sku']['tier'] != None
+ - output.servers[0]['sku']['capacity'] != None
+ - output.servers[0]['version'] != None
+ - output.servers[0]['user_visible_state'] != None
+ - output.servers[0]['fully_qualified_domain_name'] != None
+ - output.servers[0]['tags']['aaa'] == 'bbb'
+ - output.servers[0]['tags']['ccc'] == 'ddd'
+
+- name: Gather facts MariaDB Server
+ azure_rm_mariadbserver_facts:
+ resource_group: "{{ resource_group }}"
+ register: output
+- name: Assert that facts are returned
+ assert:
+ that:
+ - output.changed == False
+ - output.servers[0]['id'] != None
+ - output.servers[0]['name'] != None
+ - output.servers[0]['location'] != None
+ - output.servers[0]['sku']['name'] != None
+ - output.servers[0]['sku']['tier'] != None
+ - output.servers[0]['sku']['capacity'] != None
+ - output.servers[0]['version'] != None
+ - output.servers[0]['user_visible_state'] != None
+ - output.servers[0]['fully_qualified_domain_name'] != None
+ - output.servers[1]['id'] != None
+ - output.servers[1]['name'] != None
+ - output.servers[1]['location'] != None
+ - output.servers[1]['sku']['name'] != None
+ - output.servers[1]['sku']['tier'] != None
+ - output.servers[1]['sku']['capacity'] != None
+ - output.servers[1]['version'] != None
+ - output.servers[1]['user_visible_state'] != None
+ - output.servers[1]['fully_qualified_domain_name'] != None
+
+#
+# azure_rm_mariadbdatabase tests below
+#
+- name: Create instance of MariaDB Database -- check mode
+ azure_rm_mariadbdatabase:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: testdatabase
+ check_mode: yes
+ register: output
+- name: Assert the resource instance is well created
+ assert:
+ that:
+ - output.changed
+
+- name: Create instance of MariaDB Database
+ azure_rm_mariadbdatabase:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: testdatabase
+ collation: latin1_swedish_ci
+ charset: latin1
+ register: output
+- name: Assert the resource instance is well created
+ assert:
+ that:
+ - output.changed
+ - output.name == 'testdatabase'
+
+- name: Create again instance of MariaDB Database
+ azure_rm_mariadbdatabase:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: testdatabase
+ collation: latin1_swedish_ci
+ charset: latin1
+ register: output
+- name: Assert the state has not changed
+ assert:
+ that:
+ - output.changed == false
+ - output.name == 'testdatabase'
+
+- name: Try to update database without force_update
+ azure_rm_mariadbdatabase:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: testdatabase
+ collation: latin1_czech_ci
+ charset: latin1
+ ignore_errors: yes
+ register: output
+- name: Assert that nothing has changed
+ assert:
+ that:
+ - output.changed == False
+
+- name: Update instance of database using force_update
+ azure_rm_mariadbdatabase:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: testdatabase
+ collation: latin1_czech_ci
+ charset: latin1
+ force_update: yes
+ register: output
+- name: Assert the state has changed
+ assert:
+ that:
+ - output.changed
+ - output.name == 'testdatabase'
+
+- name: Create second instance of MariaDB Database
+ azure_rm_mariadbdatabase:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: testdatabase2
+
+- name: Gather facts MariaDB Database
+ azure_rm_mariadbdatabase_facts:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: testdatabase
+ register: output
+
+- name: Assert that facts are returned
+ assert:
+ that:
+ - output.changed == False
+ - output.databases[0]['server_name'] != None
+ - output.databases[0]['name'] != None
+ - output.databases[0]['charset'] != None
+ - output.databases[0]['collation'] != None
+
+- name: Gather facts MariaDB Database
+ azure_rm_mariadbdatabase_facts:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ register: output
+- name: Assert that facts are returned
+ assert:
+ that:
+ - output.changed == False
+ - output.databases[0]['server_name'] != None
+ - output.databases[0]['name'] != None
+ - output.databases[0]['charset'] != None
+ - output.databases[0]['collation'] != None
+ - output.databases[1]['server_name'] != None
+ - output.databases[1]['name'] != None
+ - output.databases[1]['charset'] != None
+ - output.databases[1]['collation'] != None
+
+- name: Delete instance of MariaDB Database -- check mode
+ azure_rm_mariadbdatabase:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: testdatabase
+ state: absent
+ check_mode: yes
+ register: output
+- name: Assert the state has changed
+ assert:
+ that:
+ - output.changed
+
+- name: Delete instance of MariaDB Database
+ azure_rm_mariadbdatabase:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: testdatabase
+ state: absent
+ register: output
+- name: Assert the state has changed
+ assert:
+ that:
+ - output.changed
+
+- name: Delete unexisting instance of MariaDB Database
+ azure_rm_mariadbdatabase:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: testdatabase
+ state: absent
+ register: output
+- name: Assert the state has changed
+ assert:
+ that:
+ - output.changed == false
+
+#
+# azure_rm_firewallrule tests below
+#
+- name: Create instance of Firewall Rule -- check mode
+ azure_rm_mariadbfirewallrule:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: firewallrule{{ rpfx }}
+ start_ip_address: 172.28.10.136
+ end_ip_address: 172.28.10.138
+ check_mode: yes
+ register: output
+- name: Assert the resource instance is well created
+ assert:
+ that:
+ - output.changed
+
+- name: Create instance of Firewall Rule
+ azure_rm_mariadbfirewallrule:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: firewallrule{{ rpfx }}
+ start_ip_address: 172.28.10.136
+ end_ip_address: 172.28.10.138
+ register: output
+- name: Assert the resource instance is well created
+ assert:
+ that:
+ - output.changed
+
+- name: Create again instance of Firewall Rule
+ azure_rm_mariadbfirewallrule:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: firewallrule{{ rpfx }}
+ start_ip_address: 172.28.10.136
+ end_ip_address: 172.28.10.138
+ register: output
+- name: Assert the state has not changed
+ assert:
+ that:
+ - output.changed == false
+
+- name: Delete instance of Firewall Rule -- check mode
+ azure_rm_mariadbfirewallrule:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: firewallrule{{ rpfx }}
+ state: absent
+ check_mode: yes
+ register: output
+- name: Assert the state has changed
+ assert:
+ that:
+ - output.changed
+
+- name: Create instance of Firewall Rule -- second
+ azure_rm_mariadbfirewallrule:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: firewallrule{{ rpfx }}second
+ start_ip_address: 172.28.10.136
+ end_ip_address: 172.28.10.138
+ register: output
+- name: Assert the state has changed
+ assert:
+ that:
+ - output.changed
+
+- name: Gather facts MariaDB Firewall Rule
+ azure_rm_mariadbfirewallrule_facts:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: firewallrule{{ rpfx }}
+ register: output
+- name: Assert that facts are returned
+ assert:
+ that:
+ - output.changed == False
+ - output.rules[0].id != None
+ - output.rules[0].server_name != None
+ - output.rules[0].name != None
+ - output.rules[0].start_ip_address != None
+ - output.rules[0].end_ip_address != None
+ - "output.rules | length == 1"
+
+- name: Gather facts MariaDB Firewall Rule
+ azure_rm_mariadbfirewallrule_facts:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ register: output
+- name: Assert that facts are returned
+ assert:
+ that:
+ - output.changed == False
+ - output.rules[0].id != None
+ - output.rules[0].server_name != None
+ - output.rules[0].name != None
+ - output.rules[0].start_ip_address != None
+ - output.rules[0].end_ip_address != None
+ - output.rules[1].id != None
+ - output.rules[1].name != None
+ - output.rules[1].start_ip_address != None
+ - output.rules[1].end_ip_address != None
+ - "output.rules | length == 2"
+
+- name: Delete instance of Firewall Rule
+ azure_rm_mariadbfirewallrule:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: firewallrule{{ rpfx }}
+ state: absent
+ register: output
+- name: Assert the state has changed
+ assert:
+ that:
+ - output.changed
+
+- name: Delete unexisting instance of Firewall Rule
+ azure_rm_mariadbfirewallrule:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: firewallrule{{ rpfx }}
+ state: absent
+ register: output
+- name: Assert the state has changed
+ assert:
+ that:
+ - output.changed == false
+
+- name: Delete instance of Firewall Rule - second
+ azure_rm_mariadbfirewallrule:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: firewallrule{{ rpfx }}second
+ state: absent
+
+- name: Gather facts MariaDB Firewall Rule
+ azure_rm_mariadbfirewallrule_facts:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: firewallrule{{ rpfx }}
+ register: output
+- name: Assert that empty list was returned
+ assert:
+ that:
+ - output.changed == False
+ - "output.rules | length == 0"
+
+#
+# configuration
+#
+- name: Create instance of Configuration -- check mode
+ azure_rm_mariadbconfiguration:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: event_scheduler
+ value: "ON"
+ check_mode: yes
+ register: output
+- name: Assert that change was registered
+ assert:
+ that:
+ - output.changed
+
+- name: Try to delete default configuraion
+ azure_rm_mariadbconfiguration_facts:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: event_scheduler
+ register: output
+- name: Get facts of event_scheduler
+ debug:
+ var: output
+
+- name: Try to delete default configuraion
+ azure_rm_mariadbconfiguration:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: event_scheduler
+ state: absent
+ register: output
+- name: Assert that change was registered
+ assert:
+ that:
+ - not output.changed
+
+- name: Try to change default configuraion
+ azure_rm_mariadbconfiguration:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: event_scheduler
+ value: "ON"
+ register: output
+- name: Assert that change was registered
+ assert:
+ that:
+ - output.changed
+
+- name: Try to change default configuration -- idempotent
+ azure_rm_mariadbconfiguration:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: event_scheduler
+ value: "ON"
+ register: output
+- name: Assert that change was registered
+ assert:
+ that:
+ - not output.changed
+
+- name: Try to reset configuration
+ azure_rm_mariadbconfiguration:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: event_scheduler
+ state: absent
+ register: output
+- name: Assert that change was registered
+ assert:
+ that:
+ - output.changed
+
+- name: Try to reset configuration -- idempotent
+ azure_rm_mariadbconfiguration:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: event_scheduler
+ state: absent
+ register: output
+- name: Assert that change was registered
+ assert:
+ that:
+ - not output.changed
+
+- name: Gather facts MariaDB Configuration
+ azure_rm_mariadbconfiguration_facts:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: event_scheduler
+ register: output
+- name: Assert that facts are returned
+ assert:
+ that:
+ - output.changed == False
+ - output.settings[0].id != None
+ - output.settings[0].name != None
+ - output.settings[0].value != None
+ - output.settings[0].description != None
+ - output.settings[0].source != None
+ - output.settings | length == 1
+
+- name: Gather facts MariaDB Configuration
+ azure_rm_mariadbconfiguration_facts:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ register: output
+- name: Assert that facts are returned
+ assert:
+ that:
+ - output.changed == False
+ - output.settings[0].id != None
+ - output.settings[0].name != None
+ - output.settings[0].value != None
+ - output.settings[0].description != None
+ - output.settings[0].source != None
+ - output.settings | length > 1
+
+#
+# clean up azure_rm_mariadbserver test
+#
+
+- name: Delete instance of MariaDB Server -- check mode
+ azure_rm_mariadbserver:
+ resource_group: "{{ resource_group }}"
+ name: mariadbsrv{{ rpfx }}
+ state: absent
+ check_mode: yes
+ register: output
+- name: Assert the state has changed
+ assert:
+ that:
+ - output.changed
+
+- name: Delete instance of MariaDB Server
+ azure_rm_mariadbserver:
+ resource_group: "{{ resource_group }}"
+ name: mariadbsrv{{ rpfx }}
+ state: absent
+ register: output
+- name: Assert the state has changed
+ assert:
+ that:
+ - output.changed
+
+- name: Delete unexisting instance of MariaDB Server
+ azure_rm_mariadbserver:
+ resource_group: "{{ resource_group }}"
+ name: mariadbsrv{{ rpfx }}
+ state: absent
+ register: output
+- name: Assert the state has changed
+ assert:
+ that:
+ - output.changed == false
+
+- name: Delete second instance of MariaDB Server
+ azure_rm_mariadbserver:
+ resource_group: "{{ resource_group }}"
+ name: mariadbsrv{{ rpfx }}second
+ state: absent
+ async: 400
+ poll: 0
diff --git a/test/integration/targets/incidental_azure_rm_resource/aliases b/test/integration/targets/incidental_azure_rm_resource/aliases
new file mode 100644
index 00000000..9901373a
--- /dev/null
+++ b/test/integration/targets/incidental_azure_rm_resource/aliases
@@ -0,0 +1,3 @@
+cloud/azure
+destructive
+shippable/azure/incidental
diff --git a/test/integration/targets/incidental_azure_rm_resource/tasks/main.yml b/test/integration/targets/incidental_azure_rm_resource/tasks/main.yml
new file mode 100644
index 00000000..7c3024a5
--- /dev/null
+++ b/test/integration/targets/incidental_azure_rm_resource/tasks/main.yml
@@ -0,0 +1,158 @@
+- name: Prepare random number
+ set_fact:
+ nsgname: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
+ storageaccountname: "stacc{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
+ dbname: "mdb{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
+ run_once: yes
+
+- name: Call REST API
+ azure_rm_resource:
+ api_version: '2018-02-01'
+ resource_group: "{{ resource_group }}"
+ provider: network
+ resource_type: networksecuritygroups
+ resource_name: "{{ nsgname }}"
+ body:
+ location: eastus
+ idempotency: yes
+ register: output
+
+- name: Assert that something has changed
+ assert:
+ that: output.changed
+
+- name: Call REST API
+ azure_rm_resource:
+ api_version: '2018-02-01'
+ resource_group: "{{ resource_group }}"
+ provider: network
+ resource_type: networksecuritygroups
+ resource_name: "{{ nsgname }}"
+ body:
+ location: eastus
+ idempotency: yes
+ register: output
+
+- name: Assert that nothing has changed
+ assert:
+ that: not output.changed
+
+- name: Call REST API
+ azure_rm_resource:
+ api_version: '2018-02-01'
+ resource_group: "{{ resource_group }}"
+ provider: network
+ resource_type: networksecuritygroups
+ resource_name: "{{ nsgname }}"
+ body:
+ location: eastus
+ tags:
+ a: "abc"
+ b: "cde"
+ idempotency: yes
+ register: output
+
+- name: Assert that something has changed
+ assert:
+ that: output.changed
+
+- name: Try to get information about account
+ azure_rm_resource_facts:
+ api_version: '2018-02-01'
+ resource_group: "{{ resource_group }}"
+ provider: network
+ resource_type: networksecuritygroups
+ resource_name: "{{ nsgname }}"
+ register: output
+
+- name: Assert value was returned
+ assert:
+ that:
+ - not output.changed
+ - output.response[0]['name'] != None
+ - output.response | length == 1
+
+- name: Try to query a list
+ azure_rm_resource_facts:
+ api_version: '2018-02-01'
+ resource_group: "{{ resource_group }}"
+ provider: network
+ resource_type: networksecuritygroups
+ register: output
+- name: Assert value was returned
+ assert:
+ that:
+ - not output.changed
+ - output.response[0]['name'] != None
+ - output.response | length >= 1
+
+- name: Try to query a list - same without API version
+ azure_rm_resource_facts:
+ resource_group: "{{ resource_group }}"
+ provider: network
+ resource_type: networksecuritygroups
+ register: output
+- name: Assert value was returned
+ assert:
+ that:
+ - not output.changed
+ - output.response[0]['name'] != None
+ - output.response | length >= 1
+
+- name: Query all the resources in the resource group
+ azure_rm_resource_facts:
+ resource_group: "{{ resource_group }}"
+ resource_type: resources
+ register: output
+- name: Assert value was returned
+ assert:
+ that:
+ - not output.changed
+ - output.response | length >= 1
+
+- name: Create storage account that requires LRO polling
+ azure_rm_resource:
+ polling_timeout: 600
+ polling_interval: 60
+ api_version: '2018-07-01'
+ resource_group: "{{ resource_group }}"
+ provider: Storage
+ resource_type: storageAccounts
+ resource_name: "{{ storageaccountname }}"
+ body:
+ sku:
+ name: Standard_GRS
+ kind: Storage
+ location: eastus
+ register: output
+
+- name: Assert that storage was successfully created
+ assert:
+ that: "output['response']['name'] == '{{ storageaccountname }}'"
+
+
+- name: Try to storage keys -- special case when subresource part has no name
+ azure_rm_resource:
+ resource_group: "{{ resource_group }}"
+ provider: storage
+ resource_type: storageAccounts
+ resource_name: "{{ storageaccountname }}"
+ subresource:
+ - type: listkeys
+ api_version: '2018-03-01-preview'
+ method: POST
+ register: keys
+
+- name: Assert that key was returned
+ assert:
+ that: keys['response']['keys'][0]['value'] | length > 0
+
+- name: Delete storage - without API version
+ azure_rm_resource:
+ polling_timeout: 600
+ polling_interval: 60
+ method: DELETE
+ resource_group: "{{ resource_group }}"
+ provider: Storage
+ resource_type: storageAccounts
+ resource_name: "{{ storageaccountname }}"
diff --git a/test/integration/targets/incidental_cloud_init_data_facts/aliases b/test/integration/targets/incidental_cloud_init_data_facts/aliases
new file mode 100644
index 00000000..85f7fe0f
--- /dev/null
+++ b/test/integration/targets/incidental_cloud_init_data_facts/aliases
@@ -0,0 +1,6 @@
+destructive
+shippable/posix/incidental
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
diff --git a/test/integration/targets/incidental_cloud_init_data_facts/tasks/main.yml b/test/integration/targets/incidental_cloud_init_data_facts/tasks/main.yml
new file mode 100644
index 00000000..eca905c6
--- /dev/null
+++ b/test/integration/targets/incidental_cloud_init_data_facts/tasks/main.yml
@@ -0,0 +1,50 @@
+---
+- name: test cloud-init
+ # TODO: check for a workaround
+ # install 'cloud-init'' failed: dpkg-divert: error: `diversion of /etc/init/ureadahead.conf
+ # to /etc/init/ureadahead.conf.disabled by cloud-init' clashes with `local diversion of
+ # /etc/init/ureadahead.conf to /etc/init/ureadahead.conf.distrib
+ # https://bugs.launchpad.net/ubuntu/+source/ureadahead/+bug/997838
+ # Will also have to skip on OpenSUSE when running on Python 2 on newer Leap versions
+ # (!= 42 and >= 15) ascloud-init will install the Python 3 package, breaking our build on py2.
+ when:
+ - not (ansible_distribution == "Ubuntu" and ansible_distribution_major_version|int == 14)
+ - not (ansible_os_family == "Suse" and ansible_distribution_major_version|int != 42 and ansible_python.version.major != 3)
+ block:
+ - name: setup install cloud-init
+ package:
+ name:
+ - cloud-init
+ - udev
+
+ - name: setup run cloud-init
+ service:
+ name: cloud-init-local
+ state: restarted
+
+ - name: test gather cloud-init facts in check mode
+ cloud_init_data_facts:
+ check_mode: yes
+ register: result
+ - name: verify test gather cloud-init facts in check mode
+ assert:
+ that:
+ - result.cloud_init_data_facts.status.v1 is defined
+ - result.cloud_init_data_facts.status.v1.stage is defined
+ - not result.cloud_init_data_facts.status.v1.stage
+ - cloud_init_data_facts.status.v1 is defined
+ - cloud_init_data_facts.status.v1.stage is defined
+ - not cloud_init_data_facts.status.v1.stage
+
+ - name: test gather cloud-init facts
+ cloud_init_data_facts:
+ register: result
+ - name: verify test gather cloud-init facts
+ assert:
+ that:
+ - result.cloud_init_data_facts.status.v1 is defined
+ - result.cloud_init_data_facts.status.v1.stage is defined
+ - not result.cloud_init_data_facts.status.v1.stage
+ - cloud_init_data_facts.status.v1 is defined
+ - cloud_init_data_facts.status.v1.stage is defined
+ - not cloud_init_data_facts.status.v1.stage
diff --git a/test/integration/targets/incidental_cloudformation/aliases b/test/integration/targets/incidental_cloudformation/aliases
new file mode 100644
index 00000000..29f60feb
--- /dev/null
+++ b/test/integration/targets/incidental_cloudformation/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/incidental
diff --git a/test/integration/targets/incidental_cloudformation/defaults/main.yml b/test/integration/targets/incidental_cloudformation/defaults/main.yml
new file mode 100644
index 00000000..aaf0ca7e
--- /dev/null
+++ b/test/integration/targets/incidental_cloudformation/defaults/main.yml
@@ -0,0 +1,8 @@
+stack_name: "{{ resource_prefix }}"
+
+vpc_name: '{{ resource_prefix }}-vpc'
+vpc_seed: '{{ resource_prefix }}'
+vpc_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.0.0/16'
+subnet_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.32.0/24'
+
+ec2_ami_name: 'amzn2-ami-hvm-2.*-x86_64-gp2'
diff --git a/test/integration/targets/incidental_cloudformation/files/cf_template.json b/test/integration/targets/incidental_cloudformation/files/cf_template.json
new file mode 100644
index 00000000..ff4c5693
--- /dev/null
+++ b/test/integration/targets/incidental_cloudformation/files/cf_template.json
@@ -0,0 +1,37 @@
+{
+ "AWSTemplateFormatVersion" : "2010-09-09",
+
+ "Description" : "Create an Amazon EC2 instance.",
+
+ "Parameters" : {
+ "InstanceType" : {
+ "Description" : "EC2 instance type",
+ "Type" : "String",
+ "Default" : "t3.nano",
+ "AllowedValues" : [ "t3.micro", "t3.nano"]
+ },
+ "ImageId" : {
+ "Type" : "String"
+ },
+ "SubnetId" : {
+ "Type" : "String"
+ }
+ },
+
+ "Resources" : {
+ "EC2Instance" : {
+ "Type" : "AWS::EC2::Instance",
+ "Properties" : {
+ "InstanceType" : { "Ref" : "InstanceType" },
+ "ImageId" : { "Ref" : "ImageId" },
+ "SubnetId": { "Ref" : "SubnetId" }
+ }
+ }
+ },
+
+ "Outputs" : {
+ "InstanceId" : {
+ "Value" : { "Ref" : "EC2Instance" }
+ }
+ }
+}
diff --git a/test/integration/targets/incidental_cloudformation/tasks/main.yml b/test/integration/targets/incidental_cloudformation/tasks/main.yml
new file mode 100644
index 00000000..10924bcd
--- /dev/null
+++ b/test/integration/targets/incidental_cloudformation/tasks/main.yml
@@ -0,0 +1,476 @@
+---
+- name: set up aws connection info
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key | default(omit) }}"
+ aws_secret_key: "{{ aws_secret_key | default(omit) }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region | default(omit) }}"
+ no_log: yes
+
+- module_defaults:
+ cloudformation:
+ <<: *aws_connection_info
+ cloudformation_info:
+ <<: *aws_connection_info
+
+ block:
+
+ # ==== Env setup ==========================================================
+ - name: list available AZs
+ aws_az_info:
+ <<: *aws_connection_info
+ register: region_azs
+
+ - name: pick an AZ for testing
+ set_fact:
+ availability_zone: "{{ region_azs.availability_zones[0].zone_name }}"
+
+ - name: Create a test VPC
+ ec2_vpc_net:
+ name: "{{ vpc_name }}"
+ cidr_block: "{{ vpc_cidr }}"
+ tags:
+ Name: Cloudformation testing
+ <<: *aws_connection_info
+ register: testing_vpc
+
+ - name: Create a test subnet
+ ec2_vpc_subnet:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: "{{ subnet_cidr }}"
+ az: "{{ availability_zone }}"
+ <<: *aws_connection_info
+ register: testing_subnet
+
+ - name: Find AMI to use
+ ec2_ami_info:
+ owners: 'amazon'
+ filters:
+ name: '{{ ec2_ami_name }}'
+ <<: *aws_connection_info
+ register: ec2_amis
+
+ - name: Set fact with latest AMI
+ vars:
+ latest_ami: '{{ ec2_amis.images | sort(attribute="creation_date") | last }}'
+ set_fact:
+ ec2_ami_image: '{{ latest_ami.image_id }}'
+
+ # ==== Cloudformation tests ===============================================
+
+ # 1. Basic stack creation (check mode, actual run and idempotency)
+ # 2. Tags
+ # 3. cloudformation_info tests (basic + all_facts)
+ # 4. termination_protection
+ # 5. create_changeset + changeset_name
+
+ # There is still scope to add tests for -
+ # 1. capabilities
+ # 2. stack_policy
+ # 3. on_create_failure (covered in unit tests)
+ # 4. Passing in a role
+ # 5. nested stacks?
+
+
+ - name: create a cloudformation stack (check mode)
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.nano"
+ ImageId: "{{ ec2_ami_image }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: cf_stack
+ check_mode: yes
+
+ - name: check task return attributes
+ assert:
+ that:
+ - cf_stack.changed
+ - "'msg' in cf_stack and 'New stack would be created' in cf_stack.msg"
+
+ - name: create a cloudformation stack
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.nano"
+ ImageId: "{{ ec2_ami_image }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: cf_stack
+
+ - name: check task return attributes
+ assert:
+ that:
+ - cf_stack.changed
+ - "'events' in cf_stack"
+ - "'output' in cf_stack and 'Stack CREATE complete' in cf_stack.output"
+ - "'stack_outputs' in cf_stack and 'InstanceId' in cf_stack.stack_outputs"
+ - "'stack_resources' in cf_stack"
+
+ - name: create a cloudformation stack (check mode) (idempotent)
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.nano"
+ ImageId: "{{ ec2_ami_image }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: cf_stack
+ check_mode: yes
+
+ - name: check task return attributes
+ assert:
+ that:
+ - not cf_stack.changed
+
+ - name: create a cloudformation stack (idempotent)
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.nano"
+ ImageId: "{{ ec2_ami_image }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: cf_stack
+
+ - name: check task return attributes
+ assert:
+ that:
+ - not cf_stack.changed
+ - "'output' in cf_stack and 'Stack is already up-to-date.' in cf_stack.output"
+ - "'stack_outputs' in cf_stack and 'InstanceId' in cf_stack.stack_outputs"
+ - "'stack_resources' in cf_stack"
+
+ - name: get stack details
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+
+ - name: assert stack info
+ assert:
+ that:
+ - "'cloudformation' in stack_info"
+ - "stack_info.cloudformation | length == 1"
+ - "stack_name in stack_info.cloudformation"
+ - "'stack_description' in stack_info.cloudformation[stack_name]"
+ - "'stack_outputs' in stack_info.cloudformation[stack_name]"
+ - "'stack_parameters' in stack_info.cloudformation[stack_name]"
+ - "'stack_tags' in stack_info.cloudformation[stack_name]"
+ - "stack_info.cloudformation[stack_name].stack_tags.Stack == stack_name"
+
+ - name: get stack details (checkmode)
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+ check_mode: yes
+
+ - name: assert stack info
+ assert:
+ that:
+ - "'cloudformation' in stack_info"
+ - "stack_info.cloudformation | length == 1"
+ - "stack_name in stack_info.cloudformation"
+ - "'stack_description' in stack_info.cloudformation[stack_name]"
+ - "'stack_outputs' in stack_info.cloudformation[stack_name]"
+ - "'stack_parameters' in stack_info.cloudformation[stack_name]"
+ - "'stack_tags' in stack_info.cloudformation[stack_name]"
+ - "stack_info.cloudformation[stack_name].stack_tags.Stack == stack_name"
+
+ - name: get stack details (all_facts)
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ all_facts: yes
+ register: stack_info
+
+ - name: assert stack info
+ assert:
+ that:
+ - "'stack_events' in stack_info.cloudformation[stack_name]"
+ - "'stack_policy' in stack_info.cloudformation[stack_name]"
+ - "'stack_resource_list' in stack_info.cloudformation[stack_name]"
+ - "'stack_resources' in stack_info.cloudformation[stack_name]"
+ - "'stack_template' in stack_info.cloudformation[stack_name]"
+
+ - name: get stack details (all_facts) (checkmode)
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ all_facts: yes
+ register: stack_info
+ check_mode: yes
+
+ - name: assert stack info
+ assert:
+ that:
+ - "'stack_events' in stack_info.cloudformation[stack_name]"
+ - "'stack_policy' in stack_info.cloudformation[stack_name]"
+ - "'stack_resource_list' in stack_info.cloudformation[stack_name]"
+ - "'stack_resources' in stack_info.cloudformation[stack_name]"
+ - "'stack_template' in stack_info.cloudformation[stack_name]"
+
+ # ==== Cloudformation tests (create changeset) ============================
+
+ # try to create a changeset by changing instance type
+ - name: create a changeset
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ create_changeset: yes
+ changeset_name: "test-changeset"
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.micro"
+ ImageId: "{{ ec2_ami_image }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: create_changeset_result
+
+ - name: assert changeset created
+ assert:
+ that:
+ - "create_changeset_result.changed"
+ - "'change_set_id' in create_changeset_result"
+ - "'Stack CREATE_CHANGESET complete' in create_changeset_result.output"
+
+ - name: get stack details with changesets
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ stack_change_sets: True
+ register: stack_info
+
+ - name: assert changesets in info
+ assert:
+ that:
+ - "'stack_change_sets' in stack_info.cloudformation[stack_name]"
+
+ - name: get stack details with changesets (checkmode)
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ stack_change_sets: True
+ register: stack_info
+ check_mode: yes
+
+ - name: assert changesets in info
+ assert:
+ that:
+ - "'stack_change_sets' in stack_info.cloudformation[stack_name]"
+
+ # try to create an empty changeset by passing in unchanged template
+ - name: create a changeset
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ create_changeset: yes
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.nano"
+ ImageId: "{{ ec2_ami_image }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: create_changeset_result
+
+ - name: assert changeset created
+ assert:
+ that:
+ - "not create_changeset_result.changed"
+ - "'The created Change Set did not contain any changes to this stack and was deleted.' in create_changeset_result.output"
+
+ # ==== Cloudformation tests (termination_protection) ======================
+
+ - name: set termination protection to true
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ termination_protection: yes
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.nano"
+ ImageId: "{{ ec2_ami_image }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: cf_stack
+
+# This fails - #65592
+# - name: check task return attributes
+# assert:
+# that:
+# - cf_stack.changed
+
+ - name: get stack details
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+
+ - name: assert stack info
+ assert:
+ that:
+ - "stack_info.cloudformation[stack_name].stack_description.enable_termination_protection"
+
+ - name: get stack details (checkmode)
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+ check_mode: yes
+
+ - name: assert stack info
+ assert:
+ that:
+ - "stack_info.cloudformation[stack_name].stack_description.enable_termination_protection"
+
+ - name: set termination protection to false
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ termination_protection: no
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.nano"
+ ImageId: "{{ ec2_ami_image }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: cf_stack
+
+# This fails - #65592
+# - name: check task return attributes
+# assert:
+# that:
+# - cf_stack.changed
+
+ - name: get stack details
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+
+ - name: assert stack info
+ assert:
+ that:
+ - "not stack_info.cloudformation[stack_name].stack_description.enable_termination_protection"
+
+ - name: get stack details (checkmode)
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+ check_mode: yes
+
+ - name: assert stack info
+ assert:
+ that:
+ - "not stack_info.cloudformation[stack_name].stack_description.enable_termination_protection"
+
+ # ==== Cloudformation tests (delete stack tests) ==========================
+
+ - name: delete cloudformation stack (check mode)
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ state: absent
+ check_mode: yes
+ register: cf_stack
+
+ - name: check task return attributes
+ assert:
+ that:
+ - cf_stack.changed
+ - "'msg' in cf_stack and 'Stack would be deleted' in cf_stack.msg"
+
+ - name: delete cloudformation stack
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ state: absent
+ register: cf_stack
+
+ - name: check task return attributes
+ assert:
+ that:
+ - cf_stack.changed
+ - "'output' in cf_stack and 'Stack Deleted' in cf_stack.output"
+
+ - name: delete cloudformation stack (check mode) (idempotent)
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ state: absent
+ check_mode: yes
+ register: cf_stack
+
+ - name: check task return attributes
+ assert:
+ that:
+ - not cf_stack.changed
+ - "'msg' in cf_stack"
+ - >-
+ "Stack doesn't exist" in cf_stack.msg
+
+ - name: delete cloudformation stack (idempotent)
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ state: absent
+ register: cf_stack
+
+ - name: check task return attributes
+ assert:
+ that:
+ - not cf_stack.changed
+ - "'output' in cf_stack and 'Stack not found.' in cf_stack.output"
+
+ - name: get stack details
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+
+ - name: assert stack info
+ assert:
+ that:
+ - "not stack_info.cloudformation"
+
+ - name: get stack details (checkmode)
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+ check_mode: yes
+
+ - name: assert stack info
+ assert:
+ that:
+ - "not stack_info.cloudformation"
+
+ # ==== Cleanup ============================================================
+
+ always:
+
+ - name: delete stack
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ state: absent
+ ignore_errors: yes
+
+ - name: Delete test subnet
+ ec2_vpc_subnet:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: "{{ subnet_cidr }}"
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+ - name: Delete test VPC
+ ec2_vpc_net:
+ name: "{{ vpc_name }}"
+ cidr_block: "{{ vpc_cidr }}"
+ state: absent
+ <<: *aws_connection_info
+ ignore_errors: yes
diff --git a/test/integration/targets/incidental_cs_common/aliases b/test/integration/targets/incidental_cs_common/aliases
new file mode 100644
index 00000000..136c05e0
--- /dev/null
+++ b/test/integration/targets/incidental_cs_common/aliases
@@ -0,0 +1 @@
+hidden
diff --git a/test/integration/targets/incidental_cs_common/defaults/main.yml b/test/integration/targets/incidental_cs_common/defaults/main.yml
new file mode 100644
index 00000000..942316bd
--- /dev/null
+++ b/test/integration/targets/incidental_cs_common/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+cs_resource_prefix: "cs-{{ (ansible_date_time.iso8601_micro | to_uuid).split('-')[0] }}"
+cs_common_template: CentOS 5.6 (64-bit) no GUI (Simulator)
+cs_common_service_offering: Small Instance
+cs_common_zone_adv: Sandbox-simulator-advanced
+cs_common_zone_basic: Sandbox-simulator-basic
diff --git a/test/integration/targets/incidental_deploy_helper/aliases b/test/integration/targets/incidental_deploy_helper/aliases
new file mode 100644
index 00000000..31c6a8b4
--- /dev/null
+++ b/test/integration/targets/incidental_deploy_helper/aliases
@@ -0,0 +1 @@
+shippable/posix/incidental
diff --git a/test/integration/targets/incidental_deploy_helper/tasks/main.yml b/test/integration/targets/incidental_deploy_helper/tasks/main.yml
new file mode 100644
index 00000000..962c894a
--- /dev/null
+++ b/test/integration/targets/incidental_deploy_helper/tasks/main.yml
@@ -0,0 +1,149 @@
+---
+- name: record the output directory
+ set_fact: deploy_helper_test_root={{output_dir}}/deploy_helper_test_root
+
+- name: State=query with default parameters
+ deploy_helper: path={{ deploy_helper_test_root }} state=query
+- name: Assert State=query with default parameters
+ assert:
+ that:
+ - "'project_path' in deploy_helper"
+ - "deploy_helper.current_path == '{{ deploy_helper.project_path }}/current'"
+ - "deploy_helper.releases_path == '{{ deploy_helper.project_path }}/releases'"
+ - "deploy_helper.shared_path == '{{ deploy_helper.project_path }}/shared'"
+ - "deploy_helper.unfinished_filename == 'DEPLOY_UNFINISHED'"
+ - "'previous_release' in deploy_helper"
+ - "'previous_release_path' in deploy_helper"
+ - "'new_release' in deploy_helper"
+ - "'new_release_path' in deploy_helper"
+ - "deploy_helper.new_release_path == '{{ deploy_helper.releases_path }}/{{ deploy_helper.new_release }}'"
+
+- name: State=query with relative overridden paths
+ deploy_helper: path={{ deploy_helper_test_root }} current_path=CURRENT_PATH releases_path=RELEASES_PATH shared_path=SHARED_PATH state=query
+- name: Assert State=query with relative overridden paths
+ assert:
+ that:
+ - "deploy_helper.current_path == '{{ deploy_helper.project_path }}/CURRENT_PATH'"
+ - "deploy_helper.releases_path == '{{ deploy_helper.project_path }}/RELEASES_PATH'"
+ - "deploy_helper.shared_path == '{{ deploy_helper.project_path }}/SHARED_PATH'"
+ - "deploy_helper.new_release_path == '{{ deploy_helper.releases_path }}/{{ deploy_helper.new_release}}'"
+
+- name: State=query with absolute overridden paths
+ deploy_helper: path={{ deploy_helper_test_root }} current_path=/CURRENT_PATH releases_path=/RELEASES_PATH shared_path=/SHARED_PATH state=query
+- name: Assert State=query with absolute overridden paths
+ assert:
+ that:
+ - "deploy_helper.current_path == '/CURRENT_PATH'"
+ - "deploy_helper.releases_path == '/RELEASES_PATH'"
+ - "deploy_helper.shared_path == '/SHARED_PATH'"
+ - "deploy_helper.new_release_path == '{{ deploy_helper.releases_path }}/{{ deploy_helper.new_release}}'"
+
+- name: State=query with overridden unfinished_filename
+ deploy_helper: path={{ deploy_helper_test_root }} unfinished_filename=UNFINISHED_DEPLOY state=query
+- name: Assert State=query with overridden unfinished_filename
+ assert:
+ that:
+ - "'UNFINISHED_DEPLOY' == deploy_helper.unfinished_filename"
+
+# Remove the root folder just in case it exists
+- file: path={{ deploy_helper_test_root }} state=absent
+
+- name: State=present with default parameters
+ deploy_helper: path={{ deploy_helper_test_root }} state=present
+- stat: path={{ deploy_helper.releases_path }}
+ register: releases_path
+- stat: path={{ deploy_helper.shared_path }}
+ register: shared_path
+- name: Assert State=present with default parameters
+ assert:
+ that:
+ - "releases_path.stat.exists"
+ - "shared_path.stat.exists"
+
+# Setup older releases for tests
+- file: path={{ deploy_helper.releases_path }}/{{ item }} state=directory
+ with_items: ['first', 'second', 'third', 'fourth', 'fifth', 'sixth', 'seventh']
+# Setup the new release
+- file: path={{ deploy_helper.new_release_path }} state=directory
+# Add a buildfile, just like in a real deploy
+- copy: content='' dest={{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }}
+# Add a buildfile, to an older deploy
+- copy: content='' dest={{ deploy_helper.releases_path }}/third/{{ deploy_helper.unfinished_filename }}
+
+- name: State=finalize with default parameters
+ deploy_helper: path={{ deploy_helper_test_root }} release={{ deploy_helper.new_release }} state=finalize
+- stat: path={{ deploy_helper.current_path }}
+ register: current_path
+- stat: path={{ deploy_helper.current_path }}/DEPLOY_UNFINISHED
+ register: current_path_unfinished_filename
+- name: Assert State=finalize with default parameters
+ assert:
+ that:
+ - "current_path.stat.islnk"
+ - "deploy_helper.new_release_path in current_path.stat.lnk_source"
+ - "not current_path_unfinished_filename.stat.exists"
+- stat: path={{ deploy_helper.releases_path }}/third
+ register: third_release_path
+- shell: "ls {{ deploy_helper.releases_path }} | wc -l"
+ register: releases_count
+- name: Assert State=finalize with default parameters (clean=true checks)
+ assert:
+ that:
+ - "not third_release_path.stat.exists"
+ - "releases_count.stdout|trim == '6'"
+- deploy_helper: path={{ deploy_helper_test_root }} release={{ deploy_helper.new_release }} state=query
+- name: Assert State=finalize with default parameters (previous_release checks)
+ assert:
+ that:
+ - "deploy_helper.new_release == deploy_helper.previous_release"
+
+- name: State=absent with default parameters
+ deploy_helper: path={{ deploy_helper_test_root }} state=absent
+- stat: path={{ deploy_helper_test_root }}
+ register: project_path
+- name: Assert State=absent with default parameters
+ assert:
+ that:
+ - "not project_path.stat.exists"
+
+- debug: msg="Clearing all release data and facts ---------"
+
+- name: State=present with shared_path set to False
+ deploy_helper: path={{ deploy_helper_test_root }} state=present shared_path=''
+- stat: path={{ deploy_helper.releases_path }}
+ register: releases_path
+- stat: path={{ deploy_helper.shared_path }}
+ register: shared_path
+- name: Assert State=present with shared_path set to False
+ assert:
+ that:
+ - "releases_path.stat.exists"
+ - "not shared_path.stat.exists"
+
+# Setup older releases for tests
+- file: path={{ deploy_helper.releases_path }}/{{ item }} state=directory
+ with_items: ['first', 'second', 'third', 'fourth', 'fifth']
+# Setup the new release
+- file: path={{ deploy_helper.new_release_path }} state=directory
+# Add a buildfile, just like in a real deploy
+- copy: content='' dest={{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }}
+# Add a buildfile, to an older deploy
+- copy: content='' dest={{ deploy_helper.releases_path }}/third/{{ deploy_helper.unfinished_filename }}
+
+- shell: "ls {{ deploy_helper_test_root }}/releases | wc -l"
+ register: before_releases_count
+- name: State=clean with keep_releases=3
+ deploy_helper: path={{ deploy_helper_test_root }} release={{ deploy_helper.new_release }} state=clean keep_releases=3
+- stat: path={{ deploy_helper.releases_path }}/third
+ register: third_release_path
+- shell: "ls {{ deploy_helper.releases_path }} | wc -l"
+ register: releases_count
+- name: Assert State=finalize with default parameters (clean=true checks)
+ assert:
+ that:
+ - "not third_release_path.stat.exists"
+ - "before_releases_count.stdout|trim == '6'"
+ - "releases_count.stdout|trim == '3'"
+
+# Remove the root folder
+- file: path={{ deploy_helper_test_root }} state=absent
diff --git a/test/integration/targets/incidental_flatpak_remote/aliases b/test/integration/targets/incidental_flatpak_remote/aliases
new file mode 100644
index 00000000..32b7f55a
--- /dev/null
+++ b/test/integration/targets/incidental_flatpak_remote/aliases
@@ -0,0 +1,8 @@
+shippable/posix/incidental
+destructive
+skip/aix
+skip/freebsd
+skip/osx
+skip/macos
+skip/rhel
+needs/root
diff --git a/test/integration/targets/incidental_flatpak_remote/meta/main.yml b/test/integration/targets/incidental_flatpak_remote/meta/main.yml
new file mode 100644
index 00000000..a1c58bf1
--- /dev/null
+++ b/test/integration/targets/incidental_flatpak_remote/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - incidental_setup_flatpak_remote
diff --git a/test/integration/targets/incidental_flatpak_remote/tasks/check_mode.yml b/test/integration/targets/incidental_flatpak_remote/tasks/check_mode.yml
new file mode 100644
index 00000000..7ce89a8c
--- /dev/null
+++ b/test/integration/targets/incidental_flatpak_remote/tasks/check_mode.yml
@@ -0,0 +1,101 @@
+# - Tests with absent flatpak remote -------------------------------------------
+
+# state=present
+
+- name: Test addition of absent flatpak remote (check mode)
+ flatpak_remote:
+ name: flatpak-test
+ flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo
+ state: present
+ register: addition_result
+ check_mode: true
+
+- name: Verify addition of absent flatpak remote test result (check mode)
+ assert:
+ that:
+ - "addition_result.changed == true"
+ msg: "Adding an absent flatpak remote shall mark module execution as changed"
+
+- name: Test non-existent idempotency of addition of absent flatpak remote (check mode)
+ flatpak_remote:
+ name: flatpak-test
+ flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo
+ state: present
+ register: double_addition_result
+ check_mode: true
+
+- name: >
+ Verify non-existent idempotency of addition of absent flatpak remote
+ test result (check mode)
+ assert:
+ that:
+ - "double_addition_result.changed == true"
+ msg: |
+ Adding an absent flatpak remote a second time shall still mark module execution
+ as changed in check mode
+
+# state=absent
+
+- name: Test removal of absent flatpak remote not doing anything in check mode
+ flatpak_remote:
+ name: flatpak-test
+ state: absent
+ register: removal_result
+ check_mode: true
+
+- name: Verify removal of absent flatpak remote test result (check mode)
+ assert:
+ that:
+ - "removal_result.changed == false"
+ msg: "Removing an absent flatpak remote shall mark module execution as not changed"
+
+
+# - Tests with present flatpak remote -------------------------------------------
+
+# state=present
+
+- name: Test addition of present flatpak remote (check mode)
+ flatpak_remote:
+ name: check-mode-test-remote
+ flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo
+ state: present
+ register: addition_result
+ check_mode: true
+
+- name: Verify addition of present flatpak remote test result (check mode)
+ assert:
+ that:
+ - "addition_result.changed == false"
+ msg: "Adding a present flatpak remote shall mark module execution as not changed"
+
+# state=absent
+
+- name: Test removal of present flatpak remote not doing anything in check mode
+ flatpak_remote:
+ name: check-mode-test-remote
+ state: absent
+ register: removal_result
+ check_mode: true
+
+- name: Verify removal of present flatpak remote test result (check mode)
+ assert:
+ that:
+ - "removal_result.changed == true"
+ msg: "Removing a present flatpak remote shall mark module execution as changed"
+
+- name: Test non-existent idempotency of removal of present flatpak remote (check mode)
+ flatpak_remote:
+ name: check-mode-test-remote
+ state: absent
+ register: double_removal_result
+ check_mode: true
+
+- name: >
+ Verify non-existent idempotency of removal of present flatpak remote
+ test result (check mode)
+ assert:
+ that:
+ - "double_removal_result.changed == true"
+ msg: |
+ Removing a present flatpak remote a second time shall still mark module execution
+ as changed in check mode
diff --git a/test/integration/targets/incidental_flatpak_remote/tasks/main.yml b/test/integration/targets/incidental_flatpak_remote/tasks/main.yml
new file mode 100644
index 00000000..9c3ec6d7
--- /dev/null
+++ b/test/integration/targets/incidental_flatpak_remote/tasks/main.yml
@@ -0,0 +1,57 @@
+# (c) 2018, Alexander Bethke <oolongbrothers@gmx.net>
+# (c) 2018, Ansible Project
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- block:
+
+ - import_tasks: setup.yml
+ become: true
+
+ # executable override
+
+ - name: Test executable override
+ flatpak_remote:
+ name: irrelevant
+ remote: irrelevant
+ state: present
+ executable: nothing-that-exists
+ ignore_errors: true
+ register: executable_override_result
+
+ - name: Verify executable override test result
+ assert:
+ that:
+ - "executable_override_result.failed == true"
+ - "executable_override_result.changed == false"
+ msg: "Specifying non-existing executable shall fail module execution"
+
+ - import_tasks: check_mode.yml
+ become: false
+
+ - import_tasks: test.yml
+ become: false
+ vars:
+ method: user
+
+ - import_tasks: test.yml
+ become: true
+ vars:
+ method: system
+
+ when: |
+ ansible_distribution == 'Fedora' or
+ ansible_distribution == 'Ubuntu' and not ansible_distribution_major_version | int < 16
diff --git a/test/integration/targets/incidental_flatpak_remote/tasks/setup.yml b/test/integration/targets/incidental_flatpak_remote/tasks/setup.yml
new file mode 100644
index 00000000..b2fd2766
--- /dev/null
+++ b/test/integration/targets/incidental_flatpak_remote/tasks/setup.yml
@@ -0,0 +1,27 @@
+- name: Install flatpak on Fedora
+ dnf:
+ name: flatpak
+ state: present
+
+ when: ansible_distribution == 'Fedora'
+
+- block:
+ - name: Activate flatpak ppa on Ubuntu versions older than 18.04/bionic
+ apt_repository:
+ repo: "ppa:alexlarsson/flatpak"
+ state: present
+ mode: 0644
+ when: ansible_lsb.major_release | int < 18
+
+ - name: Install flatpak package on Ubuntu
+ apt:
+ name: flatpak
+ state: present
+
+ when: ansible_distribution == 'Ubuntu'
+
+- name: Install flatpak remote for testing check mode
+ flatpak_remote:
+ name: check-mode-test-remote
+ flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo
+ state: present
diff --git a/test/integration/targets/incidental_flatpak_remote/tasks/test.yml b/test/integration/targets/incidental_flatpak_remote/tasks/test.yml
new file mode 100644
index 00000000..97a13f0c
--- /dev/null
+++ b/test/integration/targets/incidental_flatpak_remote/tasks/test.yml
@@ -0,0 +1,72 @@
+# state=present
+
+- name: Test addition - {{ method }}
+ flatpak_remote:
+ name: flatpak-test
+ flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo
+ state: present
+ method: "{{ method }}"
+ register: addition_result
+
+- name: Verify addition test result - {{ method }}
+ assert:
+ that:
+ - "addition_result.changed == true"
+ msg: "state=preset shall add flatpak when absent"
+
+- name: Test idempotency of addition - {{ method }}
+ flatpak_remote:
+ name: flatpak-test
+ flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo
+ state: present
+ method: "{{ method }}"
+ register: double_addition_result
+
+- name: Verify idempotency of addition test result - {{ method }}
+ assert:
+ that:
+ - "double_addition_result.changed == false"
+ msg: "state=present shall not do anything when flatpak is already present"
+
+- name: Test updating remote url does not do anything - {{ method }}
+ flatpak_remote:
+ name: flatpak-test
+ flatpakrepo_url: https://a.different/repo.flatpakrepo
+ state: present
+ method: "{{ method }}"
+ register: url_update_result
+
+- name: Verify updating remote url does not do anything - {{ method }}
+ assert:
+ that:
+ - "url_update_result.changed == false"
+ msg: "Trying to update the URL of an existing flatpak remote shall not do anything"
+
+
+# state=absent
+
+- name: Test removal - {{ method }}
+ flatpak_remote:
+ name: flatpak-test
+ state: absent
+ method: "{{ method }}"
+ register: removal_result
+
+- name: Verify removal test result - {{ method }}
+ assert:
+ that:
+ - "removal_result.changed == true"
+ msg: "state=absent shall remove flatpak when present"
+
+- name: Test idempotency of removal - {{ method }}
+ flatpak_remote:
+ name: flatpak-test
+ state: absent
+ method: "{{ method }}"
+ register: double_removal_result
+
+- name: Verify idempotency of removal test result - {{ method }}
+ assert:
+ that:
+ - "double_removal_result.changed == false"
+ msg: "state=absent shall not do anything when flatpak is not present"
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/aliases b/test/integration/targets/incidental_inventory_aws_ec2/aliases
new file mode 100644
index 00000000..29f60feb
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/incidental
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/create_inventory_config.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/create_inventory_config.yml
new file mode 100644
index 00000000..8680c38d
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/create_inventory_config.yml
@@ -0,0 +1,11 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ vars:
+ template_name: "../templates/{{ template | default('inventory.yml') }}"
+ tasks:
+ - name: write inventory config file
+ copy:
+ dest: ../test.aws_ec2.yml
+ content: "{{ lookup('template', template_name) }}"
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/empty_inventory_config.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/empty_inventory_config.yml
new file mode 100644
index 00000000..f67fff1a
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/empty_inventory_config.yml
@@ -0,0 +1,9 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: write inventory config file
+ copy:
+ dest: ../test.aws_ec2.yml
+ content: ""
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/populate_cache.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/populate_cache.yml
new file mode 100644
index 00000000..07b0eec4
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/populate_cache.yml
@@ -0,0 +1,64 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ environment: "{{ ansible_test.environment }}"
+ tasks:
+
+ - block:
+
+ # Create VPC, subnet, security group, and find image_id to create instance
+
+ - include_tasks: setup.yml
+
+ - name: assert group was populated with inventory but is empty
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "not groups.aws_ec2"
+
+ # Create new host, add it to inventory and then terminate it without updating the cache
+
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ no_log: yes
+
+ - name: create a new host
+ ec2:
+ image: '{{ image_id }}'
+ exact_count: 1
+ count_tag:
+ Name: '{{ resource_prefix }}'
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ instance_type: t2.micro
+ wait: yes
+ group_id: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ <<: *aws_connection_info
+ register: setup_instance
+
+ - meta: refresh_inventory
+
+ always:
+
+ - name: remove setup ec2 instance
+ ec2:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ wait: yes
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ group_id: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ <<: *aws_connection_info
+ ignore_errors: yes
+ when: setup_instance is defined
+
+ - include_tasks: tear_down.yml
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/setup.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/setup.yml
new file mode 100644
index 00000000..8a9b8893
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/setup.yml
@@ -0,0 +1,62 @@
+- name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ no_log: yes
+
+- name: get image ID to create an instance
+ ec2_ami_info:
+ filters:
+ architecture: x86_64
+ owner-id: '125523088429'
+ virtualization-type: hvm
+ root-device-type: ebs
+ name: 'Fedora-Atomic-27*'
+ <<: *aws_connection_info
+ register: fedora_images
+
+- set_fact:
+ image_id: '{{ fedora_images.images.0.image_id }}'
+
+- name: create a VPC to work in
+ ec2_vpc_net:
+ cidr_block: 10.10.0.0/24
+ state: present
+ name: '{{ resource_prefix }}_setup'
+ resource_tags:
+ Name: '{{ resource_prefix }}_setup'
+ <<: *aws_connection_info
+ register: setup_vpc
+
+- set_fact:
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+
+- name: create a subnet to use for creating an ec2 instance
+ ec2_vpc_subnet:
+ az: '{{ aws_region }}a'
+ tags: '{{ resource_prefix }}_setup'
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ cidr: 10.10.0.0/24
+ state: present
+ resource_tags:
+ Name: '{{ resource_prefix }}_setup'
+ <<: *aws_connection_info
+ register: setup_subnet
+
+- set_fact:
+ subnet_id: '{{ setup_subnet.subnet.id }}'
+
+- name: create a security group to use for creating an ec2 instance
+ ec2_group:
+ name: '{{ resource_prefix }}_setup'
+ description: 'created by Ansible integration tests'
+ state: present
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ <<: *aws_connection_info
+ register: setup_sg
+
+- set_fact:
+ sg_id: '{{ setup_sg.group_id }}'
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/tear_down.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/tear_down.yml
new file mode 100644
index 00000000..4c8240e4
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/tear_down.yml
@@ -0,0 +1,39 @@
+- name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ no_log: yes
+
+- name: remove setup security group
+ ec2_group:
+ name: '{{ resource_prefix }}_setup'
+ description: 'created by Ansible integration tests'
+ state: absent
+ vpc_id: '{{ vpc_id }}'
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+- name: remove setup subnet
+ ec2_vpc_subnet:
+ az: '{{ aws_region }}a'
+ tags: '{{ resource_prefix }}_setup'
+ vpc_id: '{{ vpc_id }}'
+ cidr: 10.10.0.0/24
+ state: absent
+ resource_tags:
+ Name: '{{ resource_prefix }}_setup'
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+- name: remove setup VPC
+ ec2_vpc_net:
+ cidr_block: 10.10.0.0/24
+ state: absent
+ name: '{{ resource_prefix }}_setup'
+ resource_tags:
+ Name: '{{ resource_prefix }}_setup'
+ <<: *aws_connection_info
+ ignore_errors: yes
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml
new file mode 100644
index 00000000..cc1b9a5a
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml
@@ -0,0 +1,9 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: assert inventory was not populated by aws_ec2 inventory plugin
+ assert:
+ that:
+ - "'aws_ec2' not in groups"
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_inventory_cache.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_inventory_cache.yml
new file mode 100644
index 00000000..d83cb0bf
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_inventory_cache.yml
@@ -0,0 +1,18 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: assert cache was used to populate inventory
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "groups.aws_ec2 | length == 1"
+
+ - meta: refresh_inventory
+
+ - name: assert refresh_inventory updated the cache
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "not groups.aws_ec2"
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory.yml
new file mode 100644
index 00000000..73a67db0
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory.yml
@@ -0,0 +1,91 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ environment: "{{ ansible_test.environment }}"
+ tasks:
+
+ - block:
+
+ # Create VPC, subnet, security group, and find image_id to create instance
+
+ - include_tasks: setup.yml
+
+ - name: assert group was populated with inventory but is empty
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "not groups.aws_ec2"
+
+ # Create new host, refresh inventory, remove host, refresh inventory
+
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ no_log: yes
+
+ - name: create a new host
+ ec2:
+ image: '{{ image_id }}'
+ exact_count: 1
+ count_tag:
+ Name: '{{ resource_prefix }}'
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ instance_type: t2.micro
+ wait: yes
+ group_id: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ <<: *aws_connection_info
+ register: setup_instance
+
+ - meta: refresh_inventory
+
+ - name: assert group was populated with inventory and is no longer empty
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "groups.aws_ec2 | length == 1"
+ - "groups.aws_ec2.0 == '{{ resource_prefix }}'"
+
+ - name: remove setup ec2 instance
+ ec2:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ wait: yes
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ group_id: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ <<: *aws_connection_info
+
+ - meta: refresh_inventory
+
+ - name: assert group was populated with inventory but is empty
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "not groups.aws_ec2"
+
+ always:
+
+ - name: remove setup ec2 instance
+ ec2:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ wait: yes
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ group_id: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ <<: *aws_connection_info
+ ignore_errors: yes
+ when: setup_instance is defined
+
+ - include_tasks: tear_down.yml
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml
new file mode 100644
index 00000000..fdeeeeff
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml
@@ -0,0 +1,79 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ environment: "{{ ansible_test.environment }}"
+ tasks:
+
+ - block:
+
+ # Create VPC, subnet, security group, and find image_id to create instance
+
+ - include_tasks: setup.yml
+
+ # Create new host, refresh inventory
+
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ no_log: yes
+
+ - name: create a new host
+ ec2:
+ image: '{{ image_id }}'
+ exact_count: 1
+ count_tag:
+ Name: '{{ resource_prefix }}'
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ tag1: value1
+ tag2: value2
+ instance_type: t2.micro
+ wait: yes
+ group_id: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ <<: *aws_connection_info
+ register: setup_instance
+
+ - meta: refresh_inventory
+
+ - name: register the keyed sg group name
+ set_fact:
+ sg_group_name: "security_groups_{{ sg_id | replace('-', '_') }}"
+
+ - name: register one of the keyed tag groups name
+ set_fact:
+ tag_group_name: "tag_Name_{{ resource_prefix | replace('-', '_') }}"
+
+ - name: assert the keyed groups and groups from constructed config were added to inventory and composite var added to hostvars
+ assert:
+ that:
+ # There are 9 groups: all, ungrouped, aws_ec2, sg keyed group, 3 tag keyed group (one per tag), arch keyed group, constructed group
+ - "groups | length == 9"
+ - "groups[tag_group_name] | length == 1"
+ - "groups[sg_group_name] | length == 1"
+ - "groups.arch_x86_64 | length == 1"
+ - "groups.tag_with_name_key | length == 1"
+ - vars.hostvars[groups.aws_ec2.0]['test_compose_var_sum'] == 'value1value2'
+
+ always:
+
+ - name: remove setup ec2 instance
+ ec2:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ wait: yes
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ group_id: "{{ sg_id }}"
+ vpc_subnet_id: "{{ subnet_id }}"
+ <<: *aws_connection_info
+ ignore_errors: yes
+ when: setup_instance is defined
+
+ - include_tasks: tear_down.yml
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_refresh_inventory.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_refresh_inventory.yml
new file mode 100644
index 00000000..6b46599b
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_refresh_inventory.yml
@@ -0,0 +1,74 @@
+- name: test updating inventory
+ block:
+ - name: assert group was populated with inventory but is empty
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "not groups.aws_ec2"
+
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: create a new host
+ ec2:
+ image: "{{ images[aws_region] }}"
+ exact_count: 1
+ count_tag:
+ Name: '{{ resource_prefix }}'
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ instance_type: t2.micro
+ wait: yes
+ group_id: '{{ setup_sg.group_id }}'
+ vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
+ <<: *aws_connection_info
+ register: setup_instance
+
+ - meta: refresh_inventory
+
+ - name: assert group was populated with inventory and is no longer empty
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "groups.aws_ec2 | length == 1"
+ - "groups.aws_ec2.0 == '{{ resource_prefix }}'"
+
+ - name: remove setup ec2 instance
+ ec2:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ wait: yes
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ group_id: '{{ setup_sg.group_id }}'
+ vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
+ <<: *aws_connection_info
+
+ - meta: refresh_inventory
+
+ - name: assert group was populated with inventory but is empty
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "not groups.aws_ec2"
+
+ always:
+ - name: remove setup ec2 instance
+ ec2:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ wait: yes
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ group_id: '{{ setup_sg.group_id }}'
+ vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
+ <<: *aws_connection_info
+ ignore_errors: yes
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/runme.sh b/test/integration/targets/incidental_inventory_aws_ec2/runme.sh
new file mode 100755
index 00000000..916f7e8f
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/runme.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# ensure test config is empty
+ansible-playbook playbooks/empty_inventory_config.yml "$@"
+
+export ANSIBLE_INVENTORY_ENABLED=aws_ec2
+
+# test with default inventory file
+ansible-playbook playbooks/test_invalid_aws_ec2_inventory_config.yml "$@"
+
+export ANSIBLE_INVENTORY=test.aws_ec2.yml
+
+# test empty inventory config
+ansible-playbook playbooks/test_invalid_aws_ec2_inventory_config.yml "$@"
+
+# generate inventory config and test using it
+ansible-playbook playbooks/create_inventory_config.yml "$@"
+ansible-playbook playbooks/test_populating_inventory.yml "$@"
+
+# generate inventory config with caching and test using it
+ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_cache.yml'" "$@"
+ansible-playbook playbooks/populate_cache.yml "$@"
+ansible-playbook playbooks/test_inventory_cache.yml "$@"
+
+# remove inventory cache
+rm -r aws_ec2_cache_dir/
+
+# generate inventory config with constructed features and test using it
+ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_constructed.yml'" "$@"
+ansible-playbook playbooks/test_populating_inventory_with_constructed.yml "$@"
+
+# cleanup inventory config
+ansible-playbook playbooks/empty_inventory_config.yml "$@"
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory.yml b/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory.yml
new file mode 100644
index 00000000..942edb30
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory.yml
@@ -0,0 +1,12 @@
+plugin: aws_ec2
+aws_access_key_id: '{{ aws_access_key }}'
+aws_secret_access_key: '{{ aws_secret_key }}'
+aws_security_token: '{{ security_token }}'
+regions:
+ - '{{ aws_region }}'
+filters:
+ tag:Name:
+ - '{{ resource_prefix }}'
+hostnames:
+ - tag:Name
+ - dns-name
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_cache.yml b/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_cache.yml
new file mode 100644
index 00000000..e35bf901
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_cache.yml
@@ -0,0 +1,12 @@
+plugin: aws_ec2
+cache: True
+cache_plugin: jsonfile
+cache_connection: aws_ec2_cache_dir
+aws_access_key_id: '{{ aws_access_key }}'
+aws_secret_access_key: '{{ aws_secret_key }}'
+aws_security_token: '{{ security_token }}'
+regions:
+ - '{{ aws_region }}'
+filters:
+ tag:Name:
+ - '{{ resource_prefix }}'
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_constructed.yml b/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_constructed.yml
new file mode 100644
index 00000000..6befb4e3
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_constructed.yml
@@ -0,0 +1,20 @@
+plugin: aws_ec2
+aws_access_key_id: '{{ aws_access_key }}'
+aws_secret_access_key: '{{ aws_secret_key }}'
+aws_security_token: '{{ security_token }}'
+regions:
+ - '{{ aws_region }}'
+filters:
+ tag:Name:
+ - '{{ resource_prefix }}'
+keyed_groups:
+ - key: 'security_groups|json_query("[].group_id")'
+ prefix: 'security_groups'
+ - key: 'tags'
+ prefix: 'tag'
+ - prefix: 'arch'
+ key: "architecture"
+compose:
+ test_compose_var_sum: tags.tag1 + tags.tag2
+groups:
+ tag_with_name_key: "'Name' in (tags | list)"
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/test.aws_ec2.yml b/test/integration/targets/incidental_inventory_aws_ec2/test.aws_ec2.yml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/test.aws_ec2.yml
diff --git a/test/integration/targets/incidental_inventory_docker_swarm/aliases b/test/integration/targets/incidental_inventory_docker_swarm/aliases
new file mode 100644
index 00000000..c3a38c06
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_docker_swarm/aliases
@@ -0,0 +1,13 @@
+shippable/posix/incidental
+skip/aix
+skip/power/centos
+skip/osx
+skip/macos
+skip/freebsd
+destructive
+skip/docker # The tests sometimes make docker daemon unstable; hence,
+ # we skip all docker-based CI runs to avoid disrupting
+ # the whole CI system. On VMs, we restart docker daemon
+ # after finishing the tests to minimize potential effects
+ # on other tests.
+needs/root
diff --git a/test/integration/targets/incidental_inventory_docker_swarm/inventory_1.docker_swarm.yml b/test/integration/targets/incidental_inventory_docker_swarm/inventory_1.docker_swarm.yml
new file mode 100644
index 00000000..e8e6d55e
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_docker_swarm/inventory_1.docker_swarm.yml
@@ -0,0 +1,3 @@
+---
+plugin: docker_swarm
+docker_host: unix://var/run/docker.sock
diff --git a/test/integration/targets/incidental_inventory_docker_swarm/inventory_2.docker_swarm.yml b/test/integration/targets/incidental_inventory_docker_swarm/inventory_2.docker_swarm.yml
new file mode 100644
index 00000000..e36bd00f
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_docker_swarm/inventory_2.docker_swarm.yml
@@ -0,0 +1,5 @@
+---
+plugin: docker_swarm
+docker_host: unix://var/run/docker.sock
+verbose_output: no
+include_host_uri: yes
diff --git a/test/integration/targets/incidental_inventory_docker_swarm/meta/main.yml b/test/integration/targets/incidental_inventory_docker_swarm/meta/main.yml
new file mode 100644
index 00000000..569a453c
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_docker_swarm/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - incidental_setup_docker
diff --git a/test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_cleanup.yml b/test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_cleanup.yml
new file mode 100644
index 00000000..fc4455ec
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_cleanup.yml
@@ -0,0 +1,19 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: yes
+ tasks:
+ - name: Make sure swarm is removed
+ docker_swarm:
+ state: absent
+ force: yes
+
+ - name: remove docker pagkages
+ action: "{{ ansible_facts.pkg_mgr }}"
+ args:
+ name:
+ - docker
+ - docker-ce
+ - docker-ce-cli
+ - containerd.io
+ state: absent
diff --git a/test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_setup.yml b/test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_setup.yml
new file mode 100644
index 00000000..d9f77732
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_docker_swarm/playbooks/swarm_setup.yml
@@ -0,0 +1,15 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ vars:
+ docker_skip_cleanup: yes
+
+ tasks:
+ - name: Setup docker
+ import_role:
+ name: incidental_setup_docker
+
+ - name: Create a Swarm cluster
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
diff --git a/test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_1.yml b/test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_1.yml
new file mode 100644
index 00000000..600a89b1
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_1.yml
@@ -0,0 +1,58 @@
+---
+- hosts: 127.0.0.1
+ connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22
+ gather_facts: no
+ tasks:
+ - name: Show all groups
+ debug:
+ var: groups
+ - name: Make sure docker_swarm groups are there
+ assert:
+ that:
+ - groups.all | length > 0
+ - groups.leader | length == 1
+ - groups.manager | length > 0
+ - groups.worker | length >= 0
+ - groups.nonleaders | length >= 0
+
+- hosts: all
+ connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22
+ vars:
+ # for some reason, Ansible can't find the Python interpreter when connecting to the nodes,
+ # which is in fact just localhost in disguise. That's why we use ansible_playbook_python.
+ ansible_python_interpreter: "{{ ansible_playbook_python }}"
+ tasks:
+ - name: Check for groups
+ assert:
+ that:
+ - "groups.manager | length > 0"
+ - "groups.worker | length >= 0"
+ - "groups.leader | length == 1"
+ run_once: yes
+
+ - name: List manager group
+ debug:
+ var: groups.manager
+ run_once: yes
+
+ - name: List worker group
+ debug:
+ var: groups.worker
+ run_once: yes
+
+ - name: List leader group
+ debug:
+ var: groups.leader
+ run_once: yes
+
+ - name: Print ansible_host per host
+ debug:
+ var: ansible_host
+
+ - name: Make sure docker_swarm_node_attributes is available
+ assert:
+ that:
+ - docker_swarm_node_attributes is not undefined
+ - name: Print docker_swarm_node_attributes per host
+ debug:
+ var: docker_swarm_node_attributes
diff --git a/test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_2.yml b/test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_2.yml
new file mode 100644
index 00000000..b2a794d3
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_docker_swarm/playbooks/test_inventory_2.yml
@@ -0,0 +1,35 @@
+---
+- hosts: 127.0.0.1
+ connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22
+ gather_facts: no
+ tasks:
+ - name: Show all groups
+ debug:
+ var: groups
+ - name: Make sure docker_swarm groups are there
+ assert:
+ that:
+ - groups.all | length > 0
+ - groups.leader | length == 1
+ - groups.manager | length > 0
+ - groups.worker | length >= 0
+ - groups.nonleaders | length >= 0
+
+- hosts: all
+ connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22
+ vars:
+ # for some reason, Ansible can't find the Python interpreter when connecting to the nodes,
+ # which is in fact just localhost in disguise. That's why we use ansible_playbook_python.
+ ansible_python_interpreter: "{{ ansible_playbook_python }}"
+ tasks:
+ - name: Make sure docker_swarm_node_attributes is not available
+ assert:
+ that:
+ - docker_swarm_node_attributes is undefined
+ - name: Make sure ansible_host_uri is available
+ assert:
+ that:
+ - ansible_host_uri is defined
+ - name: Print ansible_host_uri
+ debug:
+ var: ansible_host_uri
diff --git a/test/integration/targets/incidental_inventory_docker_swarm/runme.sh b/test/integration/targets/incidental_inventory_docker_swarm/runme.sh
new file mode 100755
index 00000000..e2ba6869
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_docker_swarm/runme.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+
+[[ -n "$DEBUG" || -n "$ANSIBLE_DEBUG" ]] && set -x
+
+set -euo pipefail
+
+cleanup() {
+ echo "Cleanup"
+ ansible-playbook playbooks/swarm_cleanup.yml
+ echo "Done"
+ exit 0
+}
+
+trap cleanup INT TERM EXIT
+
+echo "Setup"
+ANSIBLE_ROLES_PATH=.. ansible-playbook playbooks/swarm_setup.yml
+
+echo "Test docker_swarm inventory 1"
+ansible-playbook -i inventory_1.docker_swarm.yml playbooks/test_inventory_1.yml
+
+echo "Test docker_swarm inventory 2"
+ansible-playbook -i inventory_2.docker_swarm.yml playbooks/test_inventory_2.yml
diff --git a/test/integration/targets/incidental_inventory_foreman/aliases b/test/integration/targets/incidental_inventory_foreman/aliases
new file mode 100644
index 00000000..c28a056e
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_foreman/aliases
@@ -0,0 +1,3 @@
+shippable/cloud/incidental
+cloud/foreman
+destructive
diff --git a/test/integration/targets/incidental_inventory_foreman/ansible.cfg b/test/integration/targets/incidental_inventory_foreman/ansible.cfg
new file mode 100644
index 00000000..63e24c4b
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_foreman/ansible.cfg
@@ -0,0 +1,5 @@
+[defaults]
+inventory = test-config.foreman.yaml
+
+[inventory]
+enable_plugins = foreman
diff --git a/test/integration/targets/incidental_inventory_foreman/inspect_cache.yml b/test/integration/targets/incidental_inventory_foreman/inspect_cache.yml
new file mode 100644
index 00000000..c91f4c38
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_foreman/inspect_cache.yml
@@ -0,0 +1,31 @@
+---
+- hosts: localhost
+ vars:
+ foreman_stub_host: "{{ lookup('env', 'FOREMAN_HOST') }}"
+ foreman_stub_port: "{{ lookup('env', 'FOREMAN_PORT') }}"
+ foreman_stub_api_path: /api/v2
+ cached_hosts_key: "http://{{ foreman_stub_host }}:{{ foreman_stub_port }}{{ foreman_stub_api_path }}/hosts"
+ tasks:
+ - name: verify a cache file was created
+ find:
+ path:
+ - ./foreman_cache
+ register: matching_files
+
+ - assert:
+ that:
+ - matching_files.matched == 1
+ - name: read the cached inventory
+ set_fact:
+ contents: "{{ lookup('file', matching_files.files.0.path) }}"
+
+ - name: extract all the host names
+ set_fact:
+ cached_hosts: "{{ contents[cached_hosts_key] | json_query('[*].name') }}"
+
+ - assert:
+ that:
+ "'{{ item }}' in cached_hosts"
+ loop:
+ - "v6.example-780.com"
+ - "c4.j1.y5.example-487.com"
diff --git a/test/integration/targets/incidental_inventory_foreman/runme.sh b/test/integration/targets/incidental_inventory_foreman/runme.sh
new file mode 100755
index 00000000..ba94a936
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_foreman/runme.sh
@@ -0,0 +1,50 @@
+#!/usr/bin/env bash
+
+[[ -n "$DEBUG" || -n "$ANSIBLE_DEBUG" ]] && set -x
+
+set -euo pipefail
+
+export ANSIBLE_INVENTORY
+export ANSIBLE_PYTHON_INTERPRETER
+
+unset ANSIBLE_INVENTORY
+unset ANSIBLE_PYTHON_INTERPRETER
+
+export ANSIBLE_CONFIG=ansible.cfg
+export FOREMAN_HOST="${FOREMAN_HOST:-localhost}"
+export FOREMAN_PORT="${FOREMAN_PORT:-8080}"
+FOREMAN_CONFIG=test-config.foreman.yaml
+
+# Set inventory caching environment variables to populate a jsonfile cache
+export ANSIBLE_INVENTORY_CACHE=True
+export ANSIBLE_INVENTORY_CACHE_PLUGIN=jsonfile
+export ANSIBLE_INVENTORY_CACHE_CONNECTION=./foreman_cache
+
+# flag for checking whether cleanup has already fired
+_is_clean=
+
+function _cleanup() {
+ [[ -n "$_is_clean" ]] && return # don't double-clean
+ echo Cleanup: removing $FOREMAN_CONFIG...
+ rm -vf "$FOREMAN_CONFIG"
+ unset ANSIBLE_CONFIG
+ unset FOREMAN_HOST
+ unset FOREMAN_PORT
+ unset FOREMAN_CONFIG
+ _is_clean=1
+}
+trap _cleanup INT TERM EXIT
+
+cat > "$FOREMAN_CONFIG" <<FOREMAN_YAML
+plugin: foreman
+url: http://${FOREMAN_HOST}:${FOREMAN_PORT}
+user: ansible-tester
+password: secure
+validate_certs: False
+FOREMAN_YAML
+
+ansible-playbook test_foreman_inventory.yml --connection=local "$@"
+ansible-playbook inspect_cache.yml --connection=local "$@"
+
+# remove inventory cache
+rm -r ./foreman_cache
diff --git a/test/integration/targets/incidental_inventory_foreman/test_foreman_inventory.yml b/test/integration/targets/incidental_inventory_foreman/test_foreman_inventory.yml
new file mode 100644
index 00000000..d5eeed4f
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_foreman/test_foreman_inventory.yml
@@ -0,0 +1,59 @@
+---
+- hosts: localhost
+ vars:
+ foreman_stub_host: "{{ lookup('env', 'FOREMAN_HOST') }}"
+ foreman_stub_port: "{{ lookup('env', 'FOREMAN_PORT') }}"
+ foreman_stub_api_path: /api/v2
+ foreman_stub_host_uri: "http://{{ foreman_stub_host }}:{{ foreman_stub_port }}"
+ foreman_stub_api_uri: "{{ foreman_stub_host_uri }}{{ foreman_stub_api_path }}"
+ foreman_stub_heartbeat_uri: "{{ foreman_stub_host_uri }}/ping"
+ tasks:
+ - debug:
+ msg: >-
+ Foreman host: {{ foreman_stub_host }} |
+ Foreman port: {{ foreman_stub_port }} |
+ API path: {{ foreman_stub_api_path }} |
+ Foreman API URL: {{ foreman_stub_api_uri }}
+
+ - name: Wait for Foreman API stub to come up online
+ wait_for:
+ host: "{{ foreman_stub_host }}"
+ port: "{{ foreman_stub_port }}"
+ state: started
+
+ # smoke test that flask app is serving
+ - name: Smoke test HTTP response from Foreman stub
+ uri:
+ url: "{{ foreman_stub_heartbeat_uri }}"
+ return_content: yes
+ register: heartbeat_resp
+ failed_when: >
+ heartbeat_resp.json.status != 'ok' or heartbeat_resp.json.response != 'pong'
+
+ #### Testing start
+ - name: >
+ Check that there are 'foreman_pgagne_sats' and 'foreman_base'
+ groups present in inventory
+ assert:
+ that: >
+ '{{ item }}' in groups
+ with_items:
+ - foreman_pgagne_sats
+ - foreman_base
+
+ - name: Check that host are in appropriate groups
+ assert:
+ that: >
+ '{{ item.key }}' in groups['{{ item.value }}']
+ with_dict:
+ v6.example-780.com: foreman_base
+ c4.j1.y5.example-487.com: ungrouped
+
+ - name: Check host UUIDs
+ assert:
+ that: >
+ hostvars['{{ item.key }}']['foreman_subscription_facet_attributes']['uuid'] == '{{ item.value }}'
+ with_dict:
+ v6.example-780.com: 2c72fa49-995a-4bbf-bda0-684c7048ad9f
+ c4.j1.y5.example-487.com: 0a494b6e-7e90-4ed2-8edc-43a41436a242
+ #### Testing end
diff --git a/test/integration/targets/incidental_ios_file/aliases b/test/integration/targets/incidental_ios_file/aliases
new file mode 100644
index 00000000..cbcfec65
--- /dev/null
+++ b/test/integration/targets/incidental_ios_file/aliases
@@ -0,0 +1,2 @@
+shippable/ios/incidental
+network/ios
diff --git a/test/integration/targets/incidental_ios_file/defaults/main.yaml b/test/integration/targets/incidental_ios_file/defaults/main.yaml
new file mode 100644
index 00000000..5f709c5a
--- /dev/null
+++ b/test/integration/targets/incidental_ios_file/defaults/main.yaml
@@ -0,0 +1,2 @@
+---
+testcase: "*"
diff --git a/test/integration/targets/incidental_ios_file/ios1.cfg b/test/integration/targets/incidental_ios_file/ios1.cfg
new file mode 100644
index 00000000..120dd4ca
--- /dev/null
+++ b/test/integration/targets/incidental_ios_file/ios1.cfg
@@ -0,0 +1,3 @@
+vlan 3
+ name ank_vlan3
+!
diff --git a/test/integration/targets/incidental_ios_file/nonascii.bin b/test/integration/targets/incidental_ios_file/nonascii.bin
new file mode 100644
index 00000000..14c6ddb1
--- /dev/null
+++ b/test/integration/targets/incidental_ios_file/nonascii.bin
Binary files differ
diff --git a/test/integration/targets/incidental_ios_file/tasks/cli.yaml b/test/integration/targets/incidental_ios_file/tasks/cli.yaml
new file mode 100644
index 00000000..d4f663b3
--- /dev/null
+++ b/test/integration/targets/incidental_ios_file/tasks/cli.yaml
@@ -0,0 +1,17 @@
+---
+- name: collect all cli test cases
+ find:
+ paths: "{{ role_path }}/tests/cli"
+ patterns: "{{ testcase }}.yaml"
+ register: test_cases
+ delegate_to: localhost
+
+- name: set test_items
+ set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
+
+- name: run test cases (connection=ansible.netcommon.network_cli)
+ include: "{{ test_case_to_run }}"
+ with_items: "{{ test_items }}"
+ loop_control:
+ loop_var: test_case_to_run
+ tags: connection_network_cli
diff --git a/test/integration/targets/incidental_ios_file/tasks/main.yaml b/test/integration/targets/incidental_ios_file/tasks/main.yaml
new file mode 100644
index 00000000..415c99d8
--- /dev/null
+++ b/test/integration/targets/incidental_ios_file/tasks/main.yaml
@@ -0,0 +1,2 @@
+---
+- { include: cli.yaml, tags: ['cli'] }
diff --git a/test/integration/targets/incidental_ios_file/tests/cli/net_get.yaml b/test/integration/targets/incidental_ios_file/tests/cli/net_get.yaml
new file mode 100644
index 00000000..5a7ebf07
--- /dev/null
+++ b/test/integration/targets/incidental_ios_file/tests/cli/net_get.yaml
@@ -0,0 +1,52 @@
+---
+- debug: msg="START ios cli/net_get.yaml on connection={{ ansible_connection }}"
+
+# Add minimal testcase to check args are passed correctly to
+# implementation module and module run is successful.
+
+- name: setup
+ cisco.ios.ios_config:
+ lines:
+ - ip ssh version 2
+ - ip scp server enable
+ - username {{ ansible_ssh_user }} privilege 15
+ match: none
+
+- name: setup (copy file to be fetched from device)
+ ansible.netcommon.net_put:
+ src: ios1.cfg
+ register: result
+
+- name: setup (remove file from localhost if present)
+ file:
+ path: ios_{{ inventory_hostname }}.cfg
+ state: absent
+ delegate_to: localhost
+
+- name: get the file from device with relative destination
+ ansible.netcommon.net_get:
+ src: ios1.cfg
+ dest: 'ios_{{ inventory_hostname }}.cfg'
+ register: result
+
+- assert:
+ that:
+ - result.changed == true
+
+- name: Idempotency check
+ ansible.netcommon.net_get:
+ src: ios1.cfg
+ dest: 'ios_{{ inventory_hostname }}.cfg'
+ register: result
+
+- assert:
+ that:
+ - result.changed == false
+
+- name: setup (remove file from localhost if present)
+ file:
+ path: ios_{{ inventory_hostname }}.cfg
+ state: absent
+ delegate_to: localhost
+
+- debug: msg="END ios cli/net_get.yaml on connection={{ ansible_connection }}"
diff --git a/test/integration/targets/incidental_ios_file/tests/cli/net_put.yaml b/test/integration/targets/incidental_ios_file/tests/cli/net_put.yaml
new file mode 100644
index 00000000..215b524d
--- /dev/null
+++ b/test/integration/targets/incidental_ios_file/tests/cli/net_put.yaml
@@ -0,0 +1,73 @@
+---
+- debug:
+ msg: "START ios cli/net_put.yaml on connection={{ ansible_connection }}"
+
+# Add minimal testcase to check args are passed correctly to
+# implementation module and module run is successful.
+
+- name: setup
+ cisco.ios.ios_config:
+ lines:
+ - ip ssh version 2
+ - ip scp server enable
+ - username {{ ansible_ssh_user }} privilege 15
+ match: none
+
+- name: Delete existing files if present on remote host
+ cisco.ios.ios_command:
+ commands: "{{ item }}"
+ loop:
+ - delete /force ios1.cfg
+ - delete /force ios.cfg
+ - delete /force nonascii.bin
+ ignore_errors: true
+
+- name: copy file from controller to ios + scp (Default)
+ ansible.netcommon.net_put:
+ src: ios1.cfg
+ register: result
+
+- assert:
+ that:
+ - result.changed == true
+
+- name: Idempotency Check
+ ansible.netcommon.net_put:
+ src: ios1.cfg
+ register: result
+
+- assert:
+ that:
+ - result.changed == false
+
+- name: copy file from controller to ios + dest specified
+ ansible.netcommon.net_put:
+ src: ios1.cfg
+ dest: ios.cfg
+ register: result
+
+- assert:
+ that:
+ - result.changed == true
+
+- name: copy file with non-ascii characters to ios in template mode(Fail case)
+ ansible.netcommon.net_put:
+ src: nonascii.bin
+ mode: 'text'
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result.failed == true
+
+- name: copy file with non-ascii characters to ios in default mode(binary)
+ ansible.netcommon.net_put:
+ src: nonascii.bin
+ register: result
+
+- assert:
+ that:
+ - result.changed == true
+
+- debug: msg="END ios cli/net_put.yaml on connection={{ ansible_connection }}"
diff --git a/test/integration/targets/incidental_lookup_rabbitmq/aliases b/test/integration/targets/incidental_lookup_rabbitmq/aliases
new file mode 100644
index 00000000..f89752b8
--- /dev/null
+++ b/test/integration/targets/incidental_lookup_rabbitmq/aliases
@@ -0,0 +1,6 @@
+destructive
+shippable/posix/incidental
+skip/aix
+skip/osx
+skip/freebsd
+skip/rhel
diff --git a/test/integration/targets/incidental_lookup_rabbitmq/meta/main.yml b/test/integration/targets/incidental_lookup_rabbitmq/meta/main.yml
new file mode 100644
index 00000000..33fa97dc
--- /dev/null
+++ b/test/integration/targets/incidental_lookup_rabbitmq/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - incidental_setup_rabbitmq
diff --git a/test/integration/targets/incidental_lookup_rabbitmq/tasks/main.yml b/test/integration/targets/incidental_lookup_rabbitmq/tasks/main.yml
new file mode 100644
index 00000000..740f8998
--- /dev/null
+++ b/test/integration/targets/incidental_lookup_rabbitmq/tasks/main.yml
@@ -0,0 +1,5 @@
+# Rabbitmq lookup
+- include: ubuntu.yml
+ when:
+ - ansible_distribution == 'Ubuntu'
+ - ansible_distribution_release != 'trusty'
diff --git a/test/integration/targets/incidental_lookup_rabbitmq/tasks/ubuntu.yml b/test/integration/targets/incidental_lookup_rabbitmq/tasks/ubuntu.yml
new file mode 100644
index 00000000..3b007ede
--- /dev/null
+++ b/test/integration/targets/incidental_lookup_rabbitmq/tasks/ubuntu.yml
@@ -0,0 +1,138 @@
+- name: Test failure without pika installed
+ set_fact:
+ rabbit_missing_pika: "{{ lookup('rabbitmq', url='amqp://guest:guest@192.168.250.1:5672/%2F', queue='hello', count=3) }}"
+ ignore_errors: yes
+ register: rabbitmq_missing_pika_error
+
+- assert:
+ that:
+ - "'pika python package is required' in rabbitmq_missing_pika_error.msg"
+
+- name: Install pika and requests
+ pip:
+ name: pika<1.0.0,requests
+ state: latest
+
+- name: Test that giving an incorrect amqp protocol in URL will error
+ set_fact:
+ rabbitmq_test_protocol: "{{ lookup('rabbitmq', url='zzzamqp://guest:guest@192.168.250.1:5672/%2F', queue='hello', count=3) }}"
+ ignore_errors: yes
+ register: rabbitmq_protocol_error
+
+- assert:
+ that:
+ - "rabbitmq_protocol_error is failed"
+ - "'URL malformed' in rabbitmq_protocol_error.msg"
+
+- name: Test that giving an incorrect IP address in URL will error
+ set_fact:
+ rabbitmq_test_protocol: "{{ lookup('rabbitmq', url='amqp://guest:guest@xxxxx192.112312368.250.1:5672/%2F', queue='hello', count=3) }}"
+ ignore_errors: yes
+ register: rabbitmq_ip_error
+
+- assert:
+ that:
+ - "rabbitmq_ip_error is failed"
+ - "'Connection issue' in rabbitmq_ip_error.msg"
+
+- name: Test missing parameters will error
+ set_fact:
+ rabbitmq_test_protocol: "{{ lookup('rabbitmq') }}"
+ ignore_errors: yes
+ register: rabbitmq_params_error
+
+- assert:
+ that:
+ - "rabbitmq_params_error is failed"
+ - "'URL is required for rabbitmq lookup.' in rabbitmq_params_error.msg"
+
+- name: Test missing queue will error
+ set_fact:
+ rabbitmq_queue_protocol: "{{ lookup('rabbitmq', url='amqp://guest:guest@192.168.250.1:5672/%2F') }}"
+ ignore_errors: yes
+ register: rabbitmq_queue_error
+
+- assert:
+ that:
+ - "rabbitmq_queue_error is failed"
+ - "'Queue is required for rabbitmq lookup' in rabbitmq_queue_error.msg"
+
+- name: Enables the rabbitmq_management plugin
+ rabbitmq_plugin:
+ names: rabbitmq_management
+ state: enabled
+
+- name: Setup test queue
+ rabbitmq_queue:
+ name: hello
+
+- name: Post test message to the exchange (string)
+ uri:
+ url: http://localhost:15672/api/exchanges/%2f/amq.default/publish
+ method: POST
+ body: '{"properties":{},"routing_key":"hello","payload":"ansible-test","payload_encoding":"string"}'
+ user: guest
+ password: guest
+ force_basic_auth: yes
+ return_content: yes
+ headers:
+ Content-Type: "application/json"
+ register: post_data
+
+
+- name: Post test message to the exchange (json)
+ uri:
+ url: http://localhost:15672/api/exchanges/%2f/amq.default/publish
+ method: POST
+ body: '{"properties":{"content_type": "application/json"},"routing_key":"hello","payload":"{\"key\": \"value\" }","payload_encoding":"string"}'
+ user: guest
+ password: guest
+ force_basic_auth: yes
+ return_content: yes
+ headers:
+ Content-Type: "application/json"
+ register: post_data_json
+
+- name: Test retrieve messages
+ set_fact:
+ rabbitmq_msg: "{{ lookup('rabbitmq', url='amqp://guest:guest@localhost:5672/%2f/hello', queue='hello') }}"
+ ignore_errors: yes
+ register: rabbitmq_msg_error
+
+- name: Ensure two messages received
+ assert:
+ that:
+ - "rabbitmq_msg_error is not failed"
+ - rabbitmq_msg | length == 2
+
+- name: Ensure first message is a string
+ assert:
+ that:
+ - rabbitmq_msg[0].msg == "ansible-test"
+
+- name: Ensure second message is json
+ assert:
+ that:
+ - rabbitmq_msg[1].json.key == "value"
+
+- name: Test missing vhost
+ set_fact:
+ rabbitmq_msg: "{{ lookup('rabbitmq', url='amqp://guest:guest@localhost:5672/missing/', queue='hello') }}"
+ ignore_errors: yes
+ register: rabbitmq_vhost_error
+
+- assert:
+ that:
+ - "rabbitmq_vhost_error is failed"
+ - "'NOT_ALLOWED' in rabbitmq_vhost_error.msg"
+
+# Tidy up
+- name: Uninstall pika and requests
+ pip:
+ name: pika,requests
+ state: absent
+
+- name: Disable the rabbitmq_management plugin
+ rabbitmq_plugin:
+ names: rabbitmq_management
+ state: disabled
diff --git a/test/integration/targets/incidental_lvg/aliases b/test/integration/targets/incidental_lvg/aliases
new file mode 100644
index 00000000..d5baa06d
--- /dev/null
+++ b/test/integration/targets/incidental_lvg/aliases
@@ -0,0 +1,6 @@
+destructive
+needs/privileged
+shippable/posix/incidental
+skip/aix
+skip/freebsd
+skip/osx
diff --git a/test/integration/targets/incidental_lvg/meta/main.yml b/test/integration/targets/incidental_lvg/meta/main.yml
new file mode 100644
index 00000000..1810d4be
--- /dev/null
+++ b/test/integration/targets/incidental_lvg/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/test/integration/targets/incidental_lvg/tasks/main.yml b/test/integration/targets/incidental_lvg/tasks/main.yml
new file mode 100644
index 00000000..a57f591b
--- /dev/null
+++ b/test/integration/targets/incidental_lvg/tasks/main.yml
@@ -0,0 +1,15 @@
+- name: Install required packages (Linux)
+ package:
+ name: lvm2
+ state: present
+ when: ansible_system == 'Linux'
+
+- name: Test lvg module
+ block:
+ - import_tasks: setup.yml
+
+ - import_tasks: test_indempotency.yml
+
+ - import_tasks: test_grow_reduce.yml
+ always:
+ - import_tasks: teardown.yml
diff --git a/test/integration/targets/incidental_lvg/tasks/setup.yml b/test/integration/targets/incidental_lvg/tasks/setup.yml
new file mode 100644
index 00000000..e63c2d64
--- /dev/null
+++ b/test/integration/targets/incidental_lvg/tasks/setup.yml
@@ -0,0 +1,13 @@
+- name: "Create files to use as a disk devices"
+ command: "dd if=/dev/zero of={{ remote_tmp_dir }}/img{{ item }} bs=1M count=10"
+ with_sequence: 'count=2'
+
+- name: "Create loop device for file"
+ command: "losetup --show -f {{ remote_tmp_dir }}/img{{ item }}"
+ with_sequence: 'count=2'
+ register: loop_devices
+
+- name: "Affect name on disk to work on"
+ set_fact:
+ loop_device1: "{{ loop_devices.results[0] }}"
+ loop_device2: "{{ loop_devices.results[1] }}"
diff --git a/test/integration/targets/incidental_lvg/tasks/teardown.yml b/test/integration/targets/incidental_lvg/tasks/teardown.yml
new file mode 100644
index 00000000..ed662f1e
--- /dev/null
+++ b/test/integration/targets/incidental_lvg/tasks/teardown.yml
@@ -0,0 +1,17 @@
+- name: Remove test volume group
+ lvg:
+ vg: testvg
+ state: absent
+
+- name: Detach loop device
+ command: "losetup -d {{ item.stdout }}"
+ loop: "{{ loop_devices.results|default([]) }}"
+ when:
+ - item.stdout is defined
+ - item.stdout is match("/dev/.*")
+
+- name: Remove device files
+ file:
+ path: "{{ remote_tmp_dir }}/img{{ item }}"
+ state: absent
+ with_sequence: 'count={{ loop_devices.results|length }}'
diff --git a/test/integration/targets/incidental_lvg/tasks/test_grow_reduce.yml b/test/integration/targets/incidental_lvg/tasks/test_grow_reduce.yml
new file mode 100644
index 00000000..1e988045
--- /dev/null
+++ b/test/integration/targets/incidental_lvg/tasks/test_grow_reduce.yml
@@ -0,0 +1,33 @@
+- name: "Create volume group on first disk"
+ lvg:
+ vg: testvg
+ pvs: "{{ loop_device1.stdout }}"
+
+- name: "get lvm facts"
+ setup:
+
+- debug: var=ansible_lvm
+
+- name: "Assert the testvg span only on first disk"
+ assert:
+ that:
+ - ansible_lvm.pvs[loop_device1.stdout].vg == "testvg"
+ - 'loop_device2.stdout not in ansible_lvm.pvs or
+ ansible_lvm.pvs[loop_device2.stdout].vg == ""'
+
+- name: "Extend to second disk AND reduce from the first disk"
+ lvg:
+ vg: testvg
+ pvs: "{{ loop_device2.stdout }}"
+
+- name: "get lvm facts"
+ setup:
+
+- debug: var=ansible_lvm
+
+- name: "Assert the testvg span only on first disk"
+ assert:
+ that:
+ - 'loop_device1.stdout not in ansible_lvm.pvs or
+ ansible_lvm.pvs[loop_device1.stdout].vg == ""'
+ - ansible_lvm.pvs[loop_device2.stdout].vg == "testvg"
diff --git a/test/integration/targets/incidental_lvg/tasks/test_indempotency.yml b/test/integration/targets/incidental_lvg/tasks/test_indempotency.yml
new file mode 100644
index 00000000..5007e56a
--- /dev/null
+++ b/test/integration/targets/incidental_lvg/tasks/test_indempotency.yml
@@ -0,0 +1,15 @@
+- name: Create volume group on disk device
+ lvg:
+ vg: testvg
+ pvs: "{{ loop_device1.stdout }}"
+
+- name: Create the volume group again to verify idempotence
+ lvg:
+ vg: testvg
+ pvs: "{{ loop_device1.stdout }}"
+ register: repeat_vg_create
+
+- name: Do all assertions to verify expected results
+ assert:
+ that:
+ - repeat_vg_create is not changed
diff --git a/test/integration/targets/incidental_mongodb_parameter/aliases b/test/integration/targets/incidental_mongodb_parameter/aliases
new file mode 100644
index 00000000..dc285483
--- /dev/null
+++ b/test/integration/targets/incidental_mongodb_parameter/aliases
@@ -0,0 +1,8 @@
+destructive
+shippable/posix/incidental
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+skip/rhel
+needs/root
diff --git a/test/integration/targets/incidental_mongodb_parameter/defaults/main.yml b/test/integration/targets/incidental_mongodb_parameter/defaults/main.yml
new file mode 100644
index 00000000..aac55526
--- /dev/null
+++ b/test/integration/targets/incidental_mongodb_parameter/defaults/main.yml
@@ -0,0 +1,21 @@
+---
+# defaults file for test_mongodb_user
+mongodb_admin_user: test_root
+mongodb_admin_password: saE_Rr9!gE6gh#e~R#nZ
+mongod_auth: false
+kill_signal: SIGTERM
+# Should be one of
+# --storageEngine wiredTiger --wiredTigerEngineConfigString="cache_size=200M"
+# --storageEngine mmapv1 --nojournal
+mongod_storage_engine_opts: "--storageEngine wiredTiger --wiredTigerEngineConfigString='cache_size=200M'"
+mongodb_user: mongodb
+mongodb_user_list:
+ - { "name": "user1", "password": "password1", "roles": "read", "database": "test" }
+ - { "name": "user2", "password": "password2", "roles": "readWrite", "database": "test" }
+ - { "name": "user3", "password": "password3", "roles": "dbAdmin", "database": "test" }
+ - { "name": "user4", "password": "password4", "roles": "userAdmin", "database": "test" }
+ - { "name": "user5", "password": "password5", "roles": "clusterAdmin", "database": "admin" }
+ - { "name": "user6", "password": "password6", "roles": "readAnyDatabase", "database": "admin" }
+ - { "name": "user7", "password": "password7", "roles": "readWriteAnyDatabase", "database": "admin" }
+ - { "name": "user8", "password": "password8", "roles": "userAdminAnyDatabase", "database": "admin" }
+ - { "name": "user9", "password": "password9", "roles": "dbAdminAnyDatabase", "database": "admin" }
diff --git a/test/integration/targets/incidental_mongodb_parameter/meta/main.yml b/test/integration/targets/incidental_mongodb_parameter/meta/main.yml
new file mode 100644
index 00000000..10fc3936
--- /dev/null
+++ b/test/integration/targets/incidental_mongodb_parameter/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - incidental_setup_mongodb
+ - setup_remote_tmp_dir
diff --git a/test/integration/targets/incidental_mongodb_parameter/tasks/main.yml b/test/integration/targets/incidental_mongodb_parameter/tasks/main.yml
new file mode 100644
index 00000000..a0fda1dc
--- /dev/null
+++ b/test/integration/targets/incidental_mongodb_parameter/tasks/main.yml
@@ -0,0 +1,143 @@
+# test code for the mongodb_parameter module
+# (c) 2019, Rhys Campbell <rhys.james.campbell@googlemail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# ============================================================
+
+- name: Ensure tests home exists
+ file:
+ path: "{{ remote_tmp_dir }}/tests"
+ state: directory
+
+- include_tasks: mongod_teardown.yml
+
+- include_tasks: mongod_singlenode.yml
+
+- name: Set syncdelay to 99
+ mongodb_parameter:
+ login_port: 3001
+ param: syncdelay
+ value: 99
+ param_type: int
+ register: sd_change
+
+- assert:
+ that:
+ - sd_change.before | int == 60
+ - sd_change.after | int == 99
+ - sd_change.changed == True
+
+- name: Set syncdelay to 99 (again)
+ mongodb_parameter:
+ login_port: 3001
+ param: syncdelay
+ value: 99
+ param_type: int
+ register: sd_change
+
+- assert:
+ that:
+ - sd_change.before | int == 99
+ - sd_change.after | int == 99
+ - sd_change.changed == False
+
+- name: Create admin user with module
+ mongodb_user:
+ login_port: 3001
+ database: admin
+ name: "{{ mongodb_admin_user }}"
+ password: "{{ mongodb_admin_password }}"
+ roles: root
+ state: present
+ register: mongodb_admin_user_created
+
+- assert:
+ that:
+ - mongodb_admin_user_created.changed == True
+
+- name: Kill all mongod processes
+ command: pkill -{{ kill_signal }} mongod
+ ignore_errors: true
+
+- name: Getting pids for mongod
+ pids:
+ name: mongod
+ register: pids_of_mongod
+
+- name: Wait for all mongod processes to exit
+ wait_for:
+ path: "/proc/{{ item }}/status"
+ state: absent
+ delay: 3
+ with_items: "{{ pids_of_mongod }}"
+
+- set_fact:
+ mongod_auth: true
+
+- include_tasks: mongod_singlenode.yml
+# Tests with auth enabled
+
+- name: Set syncdelay to 59 with auth
+ mongodb_parameter:
+ login_port: 3001
+ login_user: "{{ mongodb_admin_user }}"
+ login_password: "{{ mongodb_admin_password }}"
+ param: syncdelay
+ value: 59
+ param_type: int
+ register: sd_change
+
+- assert:
+ that:
+ - sd_change.before | int == 60
+ - sd_change.after | int == 59
+ - sd_change.changed == True
+
+- name: Set syncdelay to 59 (again) with auth
+ mongodb_parameter:
+ login_port: 3001
+ login_user: "{{ mongodb_admin_user }}"
+ login_password: "{{ mongodb_admin_password }}"
+ param: syncdelay
+ value: 59
+ param_type: int
+ register: sd_change
+
+- assert:
+ that:
+ - sd_change.before | int == 59
+ - sd_change.after | int == 59
+ - sd_change.changed == False
+
+- name: Set authenticationMechanisms to MONGODB-X509 with auth (will fail)
+ mongodb_parameter:
+ login_port: 3001
+ login_user: "{{ mongodb_admin_user }}"
+ login_password: "{{ mongodb_admin_password }}"
+ param: authenticationMechanisms
+ value: "MONGODB-X509"
+ param_type: str
+ register: diag_change
+ ignore_errors: yes
+
+- assert:
+ that:
+ - '"unable to change parameter" in diag_change.msg'
+ - diag_change.failed == True
+
+# Clean up
+- include_tasks: mongod_teardown.yml
diff --git a/test/integration/targets/incidental_mongodb_parameter/tasks/mongod_singlenode.yml b/test/integration/targets/incidental_mongodb_parameter/tasks/mongod_singlenode.yml
new file mode 100644
index 00000000..291cb1c9
--- /dev/null
+++ b/test/integration/targets/incidental_mongodb_parameter/tasks/mongod_singlenode.yml
@@ -0,0 +1,55 @@
+- name: Set mongodb_user user for redhat
+ set_fact:
+ mongodb_user: "mongod"
+ when: ansible_os_family == "RedHat"
+
+- set_fact:
+ mongodb_nodes:
+ - 3001
+
+- name: Create directories for mongod processes
+ file:
+ path: "{{ remote_tmp_dir }}/mongod{{ item }}"
+ state: directory
+ owner: "{{ mongodb_user }}"
+ group: "{{ mongodb_user }}"
+ mode: 0755
+ recurse: yes
+ with_items: "{{ mongodb_nodes }}"
+
+- name: Ensure {{ remote_tmp_dir }}/config dir exists
+ file:
+ path: "{{ remote_tmp_dir }}/config"
+ state: directory
+ owner: "{{ mongodb_user }}"
+ group: "{{ mongodb_user }}"
+ mode: 0755
+
+- name: Create keyfile
+ copy:
+ dest: "{{ remote_tmp_dir }}/my.key"
+ content: |
+ fd2CUrbXBJpB4rt74A6F
+ owner: "{{ mongodb_user }}"
+ group: "{{ mongodb_user }}"
+ mode: 0600
+ when: mongod_auth == True
+
+- name: Spawn mongod process without auth
+ command: mongod --shardsvr --smallfiles {{ mongod_storage_engine_opts }} --dbpath mongod{{ item }} --port {{ item }} --logpath mongod{{ item }}/log.log --fork
+ args:
+ chdir: "{{ remote_tmp_dir }}"
+ with_items: "{{ mongodb_nodes | sort }}"
+ when: mongod_auth == False
+
+- name: Spawn mongod process with auth
+ command: mongod --shardsvr --smallfiles {{ mongod_storage_engine_opts }} --dbpath mongod{{ item }} --port {{ item }} --logpath mongod{{ item }}/log.log --fork --auth --keyFile my.key
+ args:
+ chdir: "{{ remote_tmp_dir }}"
+ with_items: "{{ mongodb_nodes | sort }}"
+ when: mongod_auth == True
+
+- name: Wait for mongod to start responding
+ wait_for:
+ port: "{{ item }}"
+ with_items: "{{ mongodb_nodes }}"
diff --git a/test/integration/targets/incidental_mongodb_parameter/tasks/mongod_teardown.yml b/test/integration/targets/incidental_mongodb_parameter/tasks/mongod_teardown.yml
new file mode 100644
index 00000000..a904a718
--- /dev/null
+++ b/test/integration/targets/incidental_mongodb_parameter/tasks/mongod_teardown.yml
@@ -0,0 +1,25 @@
+- name: Kill all mongod processes
+ command: pkill -{{ kill_signal }} mongod
+ ignore_errors: true
+
+- name: Getting pids for mongod
+ pids:
+ name: mongod
+ register: pids_of_mongod
+
+- name: Wait for all mongod processes to exit
+ wait_for:
+ path: "/proc/{{ item }}/status"
+ state: absent
+ delay: 1
+ with_items: "{{ pids_of_mongod }}"
+
+- name: Remove all mongod folders
+ file:
+ path: "{{ remote_tmp_dir }}/{{ item }}"
+ state: absent
+ with_items:
+ - mongod3001
+
+- name: Remove all mongod sock files
+ shell: rm -Rf /tmp/mongodb*.sock
diff --git a/test/integration/targets/incidental_postgresql_user/aliases b/test/integration/targets/incidental_postgresql_user/aliases
new file mode 100644
index 00000000..78b47900
--- /dev/null
+++ b/test/integration/targets/incidental_postgresql_user/aliases
@@ -0,0 +1,4 @@
+destructive
+shippable/posix/incidental
+skip/aix
+skip/osx
diff --git a/test/integration/targets/incidental_postgresql_user/defaults/main.yml b/test/integration/targets/incidental_postgresql_user/defaults/main.yml
new file mode 100644
index 00000000..bc9ef19b
--- /dev/null
+++ b/test/integration/targets/incidental_postgresql_user/defaults/main.yml
@@ -0,0 +1,3 @@
+db_name: 'ansible_db'
+db_user1: 'ansible_db_user1'
+db_user2: 'ansible_db_user2'
diff --git a/test/integration/targets/incidental_postgresql_user/meta/main.yml b/test/integration/targets/incidental_postgresql_user/meta/main.yml
new file mode 100644
index 00000000..c2a0d561
--- /dev/null
+++ b/test/integration/targets/incidental_postgresql_user/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - incidental_setup_postgresql_db
diff --git a/test/integration/targets/incidental_postgresql_user/tasks/main.yml b/test/integration/targets/incidental_postgresql_user/tasks/main.yml
new file mode 100644
index 00000000..d59ae635
--- /dev/null
+++ b/test/integration/targets/incidental_postgresql_user/tasks/main.yml
@@ -0,0 +1,7 @@
+# Initial CI tests of postgresql_user module
+- import_tasks: postgresql_user_initial.yml
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+# General tests:
+- import_tasks: postgresql_user_general.yml
+ when: postgres_version_resp.stdout is version('9.4', '>=')
diff --git a/test/integration/targets/incidental_postgresql_user/tasks/postgresql_user_general.yml b/test/integration/targets/incidental_postgresql_user/tasks/postgresql_user_general.yml
new file mode 100644
index 00000000..963f58ac
--- /dev/null
+++ b/test/integration/targets/incidental_postgresql_user/tasks/postgresql_user_general.yml
@@ -0,0 +1,741 @@
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# Integration tests for postgresql_user module.
+
+- vars:
+ test_user: hello.user.with.dots
+ test_user2: hello
+ test_group1: group1
+ test_group2: group2
+ test_table: test
+ test_comment1: 'comment1'
+ test_comment2: 'comment2'
+ task_parameters: &task_parameters
+ become_user: '{{ pg_user }}'
+ become: yes
+ register: result
+ pg_parameters: &pg_parameters
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+
+ block:
+ #
+ # Common tests
+ #
+ - name: Create role in check_mode
+ <<: *task_parameters
+ check_mode: yes
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: check that the user doesn't exist
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ - name: Create role in actual mode
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: check that the user exists
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Add a comment on the user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ comment: '{{ test_comment1 }}'
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["COMMENT ON ROLE \"{{ test_user }}\" IS '{{ test_comment1 }}'"]
+
+ - name: check the comment
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') AS comment
+ FROM pg_catalog.pg_roles r WHERE r.rolname = '{{ test_user }}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+ - result.query_result[0].comment == '{{ test_comment1 }}'
+
+ - name: Try to add the same comment on the user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ comment: '{{ test_comment1 }}'
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: Try to add another comment on the user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ comment: '{{ test_comment2 }}'
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["COMMENT ON ROLE \"{{ test_user }}\" IS '{{ test_comment2 }}'"]
+
+ - name: check the comment
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') AS comment
+ FROM pg_catalog.pg_roles r WHERE r.rolname = '{{ test_user }}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+ - result.query_result[0].comment == '{{ test_comment2 }}'
+
+ - name: Try to create role again in check_mode
+ <<: *task_parameters
+ check_mode: yes
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+ - name: check that the user exists
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Try to create role again
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+ - name: check that the user exists
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Drop role in check_mode
+ <<: *task_parameters
+ check_mode: yes
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ state: absent
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: check that the user actually exists
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Drop role in actual mode
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ state: absent
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: check that the user doesn't exist
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ - name: Try to drop role in check mode again
+ <<: *task_parameters
+ check_mode: yes
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ state: absent
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+ - name: Try to drop role in actual mode again
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ state: absent
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+ #
+ # password, no_password_changes, encrypted, expires parameters
+ #
+
+ - name: Create role with password, passed as hashed md5
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ password: md59543f1d82624df2b31672ec0f7050460
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check that the user exist with a proper password
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' and rolpassword = 'md59543f1d82624df2b31672ec0f7050460'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Test no_password_changes
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ password: u123
+ no_password_changes: yes
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+
+ - name: Check that nothing changed
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' and rolpassword = 'md59543f1d82624df2b31672ec0f7050460'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ # Storing unencrypted passwords is not available from PostgreSQL 10
+ - name: Change password, passed as unencrypted
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ password: myunencryptedpass
+ encrypted: no
+ when: postgres_version_resp.stdout is version('10', '<')
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+ when: postgres_version_resp.stdout is version('10', '<')
+
+ - name: Check that the user exist with the unencrypted password
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' and rolpassword = 'myunencryptedpass'"
+ when: postgres_version_resp.stdout is version('10', '<')
+
+ - assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('10', '<')
+
+ - name: Change password, explicit encrypted=yes
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ password: myunencryptedpass
+ encrypted: yes
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check that the user exist with encrypted password
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' and rolpassword != 'myunencryptedpass'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Change rolvaliduntil attribute
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ expires: 'Jan 31 2020'
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check the prev step
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}'
+ AND rolvaliduntil::text like '2020-01-31%'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Try to set the same rolvaliduntil value again
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ expires: 'Jan 31 2020'
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check that nothing changed
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}'
+ AND rolvaliduntil::text like '2020-01-31%'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ #
+ # role_attr_flags
+ #
+ - name: Set role attributes
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ role_attr_flags: CREATEROLE,CREATEDB
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check the prev step
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}'
+ AND rolcreaterole = 't' and rolcreatedb = 't'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Set the same role attributes again
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ role_attr_flags: CREATEROLE,CREATEDB
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check the prev step
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}'
+ AND rolcreaterole = 't' and rolcreatedb = 't'
+
+ - name: Set role attributes
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ role_attr_flags: NOCREATEROLE,NOCREATEDB
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check the prev step
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}'
+ AND rolcreaterole = 'f' and rolcreatedb = 'f'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Set role attributes
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ role_attr_flags: NOCREATEROLE,NOCREATEDB
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check the prev step
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}'
+ AND rolcreaterole = 'f' and rolcreatedb = 'f'
+
+ #
+ # priv
+ #
+ - name: Create test table
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: '{{ test_table }}'
+ columns:
+ - id int
+
+ - name: Insert data to test table
+ <<: *task_parameters
+ postgresql_query:
+ query: "INSERT INTO {{ test_table }} (id) VALUES ('1')"
+ <<: *pg_parameters
+
+ - name: Check that test_user is not allowed to read the data
+ <<: *task_parameters
+ postgresql_query:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ session_role: '{{ test_user }}'
+ query: 'SELECT * FROM {{ test_table }}'
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - result is failed
+ - "'permission denied' in result.msg"
+
+ - name: Grant privileges
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ priv: '{{ test_table }}:SELECT'
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: Check that test_user is allowed to read the data
+ <<: *task_parameters
+ postgresql_query:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ session_role: '{{ test_user }}'
+ query: 'SELECT * FROM {{ test_table }}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Grant the same privileges again
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ priv: '{{ test_table }}:SELECT'
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: Remove test table
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: '{{ test_table }}'
+ state: absent
+
+ #
+ # fail_on_user
+ #
+ - name: Create role for test
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user2 }}'
+
+ - name: Create test table, set owner as test_user
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: '{{ test_table }}'
+ owner: '{{ test_user2 }}'
+
+ - name: Test fail_on_user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user2 }}'
+ state: absent
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - result is failed
+ - result.msg == 'Unable to remove user'
+
+ - name: Test fail_on_user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ fail_on_user: no
+
+ - assert:
+ that:
+ - result is not changed
+
+ #
+ # Test groups parameter
+ #
+ - name: Create test group
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_group2 }}'
+ role_attr_flags: NOLOGIN
+
+ - name: Create role test_group1 and grant test_group2 to test_group1 in check_mode
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_group1 }}'
+ groups: '{{ test_group2 }}'
+ role_attr_flags: NOLOGIN
+ check_mode: yes
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_group1 }}'
+ - result.queries == ['CREATE USER "{{ test_group1 }}" NOLOGIN', 'GRANT "{{ test_group2 }}" TO "{{ test_group1 }}"']
+
+ - name: check that the user doesn't exist
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_group1 }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ - name: check membership
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT grolist FROM pg_group WHERE groname = '{{ test_group2 }}' AND grolist != '{}'"
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ - name: Create role test_group1 and grant test_group2 to test_group1
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_group1 }}'
+ groups: '{{ test_group2 }}'
+ role_attr_flags: NOLOGIN
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_group1 }}'
+ - result.queries == ['CREATE USER "{{ test_group1 }}" NOLOGIN', 'GRANT "{{ test_group2 }}" TO "{{ test_group1 }}"']
+
+ - name: check that the user exists
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_group1 }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: check membership
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT grolist FROM pg_group WHERE groname = '{{ test_group2 }}' AND grolist != '{}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Grant test_group2 to test_group1 again
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_group1 }}'
+ groups: '{{ test_group2 }}'
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_group1 }}'
+
+ - name: check membership
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT grolist FROM pg_group WHERE groname = '{{ test_group2 }}' AND grolist != '{}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Grant groups to existent role
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ groups:
+ - '{{ test_group1 }}'
+ - '{{ test_group2 }}'
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+ - result.queries == ['GRANT "{{ test_group1 }}" TO "{{ test_user }}"', 'GRANT "{{ test_group2 }}" TO "{{ test_user }}"']
+
+ - name: check membership
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT * FROM pg_group WHERE groname in ('{{ test_group1 }}', '{{ test_group2 }}') AND grolist != '{}'"
+
+ - assert:
+ that:
+ - result.rowcount == 2
+
+ always:
+ #
+ # Clean up
+ #
+ - name: Drop test table
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: '{{ test_table }}'
+ state: absent
+
+ - name: Drop test user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ item }}'
+ state: absent
+ loop:
+ - '{{ test_user }}'
+ - '{{ test_user2 }}'
+ - '{{ test_group1 }}'
+ - '{{ test_group2 }}'
diff --git a/test/integration/targets/incidental_postgresql_user/tasks/postgresql_user_initial.yml b/test/integration/targets/incidental_postgresql_user/tasks/postgresql_user_initial.yml
new file mode 100644
index 00000000..ccd42847
--- /dev/null
+++ b/test/integration/targets/incidental_postgresql_user/tasks/postgresql_user_initial.yml
@@ -0,0 +1,153 @@
+#
+# Create and destroy user, test 'password' and 'encrypted' parameters
+#
+# unencrypted values are not supported on newer versions
+# do not run the encrypted: no tests if on 10+
+- set_fact:
+ encryption_values:
+ - 'yes'
+
+- set_fact:
+ encryption_values: '{{ encryption_values + ["no"]}}'
+ when: postgres_version_resp.stdout is version('10', '<=')
+
+- include_tasks: test_password.yml
+ vars:
+ encrypted: '{{ loop_item }}'
+ db_password1: 'secretù' # use UTF-8
+ loop: '{{ encryption_values }}'
+ loop_control:
+ loop_var: loop_item
+
+# BYPASSRLS role attribute was introduced in PostgreSQL 9.5, so
+# we want to test attribute management differently depending
+# on the version.
+- set_fact:
+ bypassrls_supported: "{{ postgres_version_resp.stdout is version('9.5.0', '>=') }}"
+
+# test 'no_password_change' and 'role_attr_flags' parameters
+- include_tasks: test_no_password_change.yml
+ vars:
+ no_password_changes: '{{ loop_item }}'
+ loop:
+ - 'yes'
+ - 'no'
+ loop_control:
+ loop_var: loop_item
+
+### TODO: fail_on_user
+
+#
+# Test login_user functionality
+#
+- name: Create a user to test login module parameters
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ name: "{{ db_user1 }}"
+ state: "present"
+ encrypted: 'yes'
+ password: "password"
+ role_attr_flags: "CREATEDB,LOGIN,CREATEROLE"
+ login_user: "{{ pg_user }}"
+ db: postgres
+
+- name: Create db
+ postgresql_db:
+ name: "{{ db_name }}"
+ state: "present"
+ login_user: "{{ db_user1 }}"
+ login_password: "password"
+ login_host: "localhost"
+
+- name: Check that database created
+ become: yes
+ become_user: "{{ pg_user }}"
+ shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql -d postgres
+ register: result
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+
+- name: Create a user
+ postgresql_user:
+ name: "{{ db_user2 }}"
+ state: "present"
+ encrypted: 'yes'
+ password: "md55c8ccfd9d6711fc69a7eae647fc54f51"
+ db: "{{ db_name }}"
+ login_user: "{{ db_user1 }}"
+ login_password: "password"
+ login_host: "localhost"
+
+- name: Check that it was created
+ become: yes
+ become_user: "{{ pg_user }}"
+ shell: echo "select * from pg_user where usename='{{ db_user2 }}';" | psql -d postgres
+ register: result
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+
+- name: Grant database privileges
+ postgresql_privs:
+ type: "database"
+ state: "present"
+ roles: "{{ db_user2 }}"
+ privs: "CREATE,connect"
+ objs: "{{ db_name }}"
+ db: "{{ db_name }}"
+ login: "{{ db_user1 }}"
+ password: "password"
+ host: "localhost"
+
+- name: Check that the user has the requested permissions (database)
+ become: yes
+ become_user: "{{ pg_user }}"
+ shell: echo "select datacl from pg_database where datname='{{ db_name }}';" | psql {{ db_name }}
+ register: result_database
+
+- assert:
+ that:
+ - "result_database.stdout_lines[-1] == '(1 row)'"
+ - "db_user2 ~ '=Cc' in result_database.stdout"
+
+- name: Remove user
+ postgresql_user:
+ name: "{{ db_user2 }}"
+ state: 'absent'
+ priv: "ALL"
+ db: "{{ db_name }}"
+ login_user: "{{ db_user1 }}"
+ login_password: "password"
+ login_host: "localhost"
+
+- name: Check that they were removed
+ become: yes
+ become_user: "{{ pg_user }}"
+ shell: echo "select * from pg_user where usename='{{ db_user2 }}';" | psql -d postgres
+ register: result
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '(0 rows)'"
+
+- name: Destroy DB
+ postgresql_db:
+ state: absent
+ name: "{{ db_name }}"
+ login_user: "{{ db_user1 }}"
+ login_password: "password"
+ login_host: "localhost"
+
+- name: Check that database was destroyed
+ become: yes
+ become_user: "{{ pg_user }}"
+ shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql -d postgres
+ register: result
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '(0 rows)'"
diff --git a/test/integration/targets/incidental_postgresql_user/tasks/test_no_password_change.yml b/test/integration/targets/incidental_postgresql_user/tasks/test_no_password_change.yml
new file mode 100644
index 00000000..c296c0ea
--- /dev/null
+++ b/test/integration/targets/incidental_postgresql_user/tasks/test_no_password_change.yml
@@ -0,0 +1,167 @@
+- vars:
+ task_parameters: &task_parameters
+ become_user: "{{ pg_user }}"
+ become: yes
+ register: result
+ postgresql_parameters: &parameters
+ db: postgres
+ name: "{{ db_user1 }}"
+ login_user: "{{ pg_user }}"
+
+ block:
+
+ - name: Create a user with all role attributes
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ state: "present"
+ role_attr_flags: "SUPERUSER,CREATEROLE,CREATEDB,INHERIT,LOGIN{{ bypassrls_supported | ternary(',BYPASSRLS', '') }}"
+ no_password_changes: '{{ no_password_changes }}' # no_password_changes is ignored when user doesn't already exist
+
+ - name: Check that the user has the requested role attributes
+ <<: *task_parameters
+ shell: "echo \"select 'super:'||rolsuper, 'createrole:'||rolcreaterole, 'create:'||rolcreatedb, 'inherit:'||rolinherit, 'login:'||rolcanlogin {{ bypassrls_supported | ternary(\", 'bypassrls:'||rolbypassrls\", '') }} from pg_roles where rolname='{{ db_user1 }}';\" | psql -d postgres"
+
+ - assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+ - "'super:t' in result.stdout_lines[-2]"
+ - "'createrole:t' in result.stdout_lines[-2]"
+ - "'create:t' in result.stdout_lines[-2]"
+ - "'inherit:t' in result.stdout_lines[-2]"
+ - "'login:t' in result.stdout_lines[-2]"
+
+ - block:
+ - name: Check that the user has the requested role attribute BYPASSRLS
+ <<: *task_parameters
+ shell: "echo \"select 'bypassrls:'||rolbypassrls from pg_roles where rolname='{{ db_user1 }}';\" | psql -d postgres"
+
+ - assert:
+ that:
+ - "not bypassrls_supported or 'bypassrls:t' in result.stdout_lines[-2]"
+ when: bypassrls_supported
+
+ - name: Modify a user to have no role attributes
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ state: "present"
+ role_attr_flags: "NOSUPERUSER,NOCREATEROLE,NOCREATEDB,noinherit,NOLOGIN{{ bypassrls_supported | ternary(',NOBYPASSRLS', '') }}"
+ no_password_changes: '{{ no_password_changes }}'
+
+ - name: Check that ansible reports it modified the role
+ assert:
+ that:
+ - result is changed
+
+ - name: "Check that the user doesn't have any attribute"
+ <<: *task_parameters
+ shell: "echo \"select 'super:'||rolsuper, 'createrole:'||rolcreaterole, 'create:'||rolcreatedb, 'inherit:'||rolinherit, 'login:'||rolcanlogin from pg_roles where rolname='{{ db_user1 }}';\" | psql -d postgres"
+
+ - assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+ - "'super:f' in result.stdout_lines[-2]"
+ - "'createrole:f' in result.stdout_lines[-2]"
+ - "'create:f' in result.stdout_lines[-2]"
+ - "'inherit:f' in result.stdout_lines[-2]"
+ - "'login:f' in result.stdout_lines[-2]"
+
+ - block:
+ - name: Check that the user has the requested role attribute BYPASSRLS
+ <<: *task_parameters
+ shell: "echo \"select 'bypassrls:'||rolbypassrls from pg_roles where rolname='{{ db_user1 }}';\" | psql -d postgres"
+
+ - assert:
+ that:
+ - "not bypassrls_supported or 'bypassrls:f' in result.stdout_lines[-2]"
+ when: bypassrls_supported
+
+ - name: Try to add an invalid attribute
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ state: "present"
+ role_attr_flags: "NOSUPERUSER,NOCREATEROLE,NOCREATEDB,noinherit,NOLOGIN{{ bypassrls_supported | ternary(',NOBYPASSRLS', '') }},INVALID"
+ no_password_changes: '{{ no_password_changes }}'
+ ignore_errors: yes
+
+ - name: Check that ansible reports failure
+ assert:
+ that:
+ - result is not changed
+ - result is failed
+ - "result.msg == 'Invalid role_attr_flags specified: INVALID'"
+
+ - name: Modify a single role attribute on a user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ state: "present"
+ role_attr_flags: "LOGIN"
+ no_password_changes: '{{ no_password_changes }}'
+
+ - name: Check that ansible reports it modified the role
+ assert:
+ that:
+ - result is changed
+
+ - name: Check the role attributes
+ <<: *task_parameters
+ shell: echo "select 'super:'||rolsuper, 'createrole:'||rolcreaterole, 'create:'||rolcreatedb, 'inherit:'||rolinherit, 'login:'||rolcanlogin from pg_roles where rolname='{{ db_user1 }}';" | psql -d postgres
+
+ - assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+ - "'super:f' in result.stdout_lines[-2]"
+ - "'createrole:f' in result.stdout_lines[-2]"
+ - "'create:f' in result.stdout_lines[-2]"
+ - "'inherit:f' in result.stdout_lines[-2]"
+ - "'login:t' in result.stdout_lines[-2]"
+
+ - block:
+ - name: Check the role attribute BYPASSRLS
+ <<: *task_parameters
+ shell: echo "select 'bypassrls:'||rolbypassrls from pg_roles where rolname='{{ db_user1 }}';" | psql -d postgres
+
+ - assert:
+ that:
+ - "( postgres_version_resp.stdout is version('9.5.0', '<')) or 'bypassrls:f' in result.stdout_lines[-2]"
+ when: bypassrls_supported
+
+ - name: Check that using same attribute a second time does nothing
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ state: "present"
+ role_attr_flags: "LOGIN"
+ no_password_changes: '{{ no_password_changes }}'
+ environment:
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - name: Check there isn't any update reported
+ assert:
+ that:
+ - result is not changed
+
+ - name: Cleanup the user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ state: 'absent'
+ no_password_changes: '{{ no_password_changes }}' # user deletion: no_password_changes is ignored
+
+ - name: Check that user was removed
+ <<: *task_parameters
+ shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql -d postgres
+
+ - assert:
+ that:
+ - "result.stdout_lines[-1] == '(0 rows)'"
+
+ always:
+ - name: Cleanup the user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ state: 'absent'
diff --git a/test/integration/targets/incidental_postgresql_user/tasks/test_password.yml b/test/integration/targets/incidental_postgresql_user/tasks/test_password.yml
new file mode 100644
index 00000000..be033a55
--- /dev/null
+++ b/test/integration/targets/incidental_postgresql_user/tasks/test_password.yml
@@ -0,0 +1,336 @@
+- vars:
+ task_parameters: &task_parameters
+ become_user: "{{ pg_user }}"
+ become: yes
+ register: result
+ postgresql_parameters: &parameters
+ db: postgres
+ name: "{{ db_user1 }}"
+ login_user: "{{ pg_user }}"
+
+ block:
+ - name: 'Check that PGOPTIONS environment variable is effective (1/2)'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: '{{ db_password1 }}'
+ ignore_errors: true
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - name: 'Check that PGOPTIONS environment variable is effective (2/2)'
+ assert:
+ that:
+ - "{{ result is failed }}"
+
+ - name: 'Create a user (password encrypted: {{ encrypted }})'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: '{{ db_password1 }}'
+ encrypted: '{{ encrypted }}'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - block: &changed # block is only used here in order to be able to define YAML anchor
+ - name: Check that ansible reports it was created
+ assert:
+ that:
+ - "{{ result is changed }}"
+
+ - name: Check that it was created
+ <<: *task_parameters
+ shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql -d postgres
+
+ - assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+
+ - name: Check that creating user a second time does nothing
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: '{{ db_password1 }}'
+ encrypted: '{{ encrypted }}'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - block: &not_changed # block is only used here in order to be able to define YAML anchor
+ - name: Check that ansible reports no change
+ assert:
+ that:
+ - "{{ result is not changed }}"
+
+ - name: 'Define an expiration time'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ expires: '2025-01-01'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - <<: *changed
+
+ - name: 'Redefine the same expiration time'
+ <<: *task_parameters
+ postgresql_user:
+ expires: '2025-01-01'
+ <<: *parameters
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - block:
+
+ - name: 'Using MD5-hashed password: check that password not changed when using cleartext password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: '{{ db_password1 }}'
+ encrypted: 'yes'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: "Using MD5-hashed password: check that password not changed when using md5 hash with 'ENCRYPTED'"
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "md5{{ (db_password1 ~ db_user1) | hash('md5')}}"
+ encrypted: 'yes'
+ environment:
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: "Using MD5-hashed password: check that password not changed when using md5 hash with 'UNENCRYPTED'"
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "md5{{ (db_password1 ~ db_user1) | hash('md5')}}"
+ encrypted: 'no'
+ environment:
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Redefine the same expiration time and password (encrypted)'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ encrypted: 'yes'
+ password: "md5{{ (db_password1 ~ db_user1) | hash('md5')}}"
+ expires: '2025-01-01'
+ environment:
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Using MD5-hashed password: check that password changed when using another cleartext password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: 'prefix{{ db_password1 }}'
+ encrypted: 'yes'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - <<: *changed
+
+ - name: "Using MD5-hashed password: check that password changed when using another md5 hash with 'ENCRYPTED'"
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "md5{{ ('prefix1' ~ db_password1 ~ db_user1) | hash('md5')}}"
+ encrypted: 'yes'
+
+ - <<: *changed
+
+ - name: "Using MD5-hashed password: check that password changed when using md5 hash with 'UNENCRYPTED'"
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "md5{{ ('prefix2' ~ db_password1 ~ db_user1) | hash('md5')}}"
+ encrypted: 'no'
+ register: change_pass_unencrypted
+ failed_when:
+ - change_pass_unencrypted is failed
+ # newer version of psycopg2 no longer supported unencrypted password, we ignore the error
+ - '"UNENCRYPTED PASSWORD is no longer supported" not in change_pass_unencrypted.msg'
+
+ - <<: *changed
+
+ - name: 'Using MD5-hashed password: check that password changed when clearing the password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: ''
+ encrypted: 'yes'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - <<: *changed
+
+ - name: 'Using MD5-hashed password: check that password not changed when clearing the password again'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: ''
+ encrypted: 'yes'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Using cleartext password: check that password not changed when clearing the password again'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: ''
+ encrypted: 'no'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Using MD5-hashed password: check that password changed when using a cleartext password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: '{{ db_password1 }}'
+ encrypted: 'yes'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - <<: *changed
+
+ when: encrypted == 'yes'
+
+ - block:
+
+ - name: 'Using cleartext password: check that password not changed when using cleartext password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "{{ db_password1 }}"
+ encrypted: 'no'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Redefine the same expiration time and password (not encrypted)'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "{{ db_password1 }}"
+ encrypted: 'no'
+ expires: '2025-01-01'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Using cleartext password: check that password changed when using another cleartext password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "changed{{ db_password1 }}"
+ encrypted: 'no'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - <<: *changed
+
+ - name: 'Using cleartext password: check that password changed when clearing the password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: ''
+ encrypted: 'no'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - <<: *changed
+
+ - name: 'Using cleartext password: check that password not changed when clearing the password again'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: ''
+ encrypted: 'no'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Using MD5-hashed password: check that password not changed when clearing the password again'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: ''
+ encrypted: 'yes'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Using cleartext password: check that password changed when using cleartext password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "{{ db_password1 }}"
+ encrypted: 'no'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - <<: *changed
+
+ when: encrypted == 'no'
+
+ - name: Remove user
+ <<: *task_parameters
+ postgresql_user:
+ state: 'absent'
+ <<: *parameters
+
+ - <<: *changed
+
+ - name: Check that they were removed
+ <<: *task_parameters
+ shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql -d postgres
+ environment:
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - assert:
+ that:
+ - "result.stdout_lines[-1] == '(0 rows)'"
+
+ - name: Check that removing user a second time does nothing
+ <<: *task_parameters
+ postgresql_user:
+ state: 'absent'
+ <<: *parameters
+ environment:
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ always:
+ - name: Remove user
+ <<: *task_parameters
+ postgresql_user:
+ state: 'absent'
+ <<: *parameters
diff --git a/test/integration/targets/incidental_setup_docker/aliases b/test/integration/targets/incidental_setup_docker/aliases
new file mode 100644
index 00000000..d466c39c
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/aliases
@@ -0,0 +1,2 @@
+needs/target/setup_epel
+hidden
diff --git a/test/integration/targets/incidental_setup_docker/defaults/main.yml b/test/integration/targets/incidental_setup_docker/defaults/main.yml
new file mode 100644
index 00000000..48959cc3
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/defaults/main.yml
@@ -0,0 +1,18 @@
+docker_cli_version: '0.0'
+docker_api_version: '0.0'
+docker_py_version: '0.0'
+docker_skip_cleanup: no
+docker_prereq_packages: []
+docker_packages:
+ - docker-ce
+
+docker_pip_extra_packages: []
+docker_pip_never_remove: []
+docker_pip_packages:
+ - docker
+
+docker_cleanup_packages:
+ - docker
+ - docker-ce
+ - docker-ce-cli
+ - containerd.io
diff --git a/test/integration/targets/incidental_setup_docker/handlers/main.yml b/test/integration/targets/incidental_setup_docker/handlers/main.yml
new file mode 100644
index 00000000..9e3f928f
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/handlers/main.yml
@@ -0,0 +1,14 @@
+- name: remove pip packages
+ pip:
+ state: absent
+ name: "{{ docker_pip_packages | union(docker_pip_extra_packages) | difference(docker_pip_never_remove) }}"
+ listen: cleanup docker
+ when: not docker_skip_cleanup | bool
+
+- name: remove docker packages
+ action: "{{ ansible_facts.pkg_mgr }}"
+ args:
+ name: "{{ docker_cleanup_packages }}"
+ state: absent
+ listen: cleanup docker
+ when: not docker_skip_cleanup | bool
diff --git a/test/integration/targets/incidental_setup_docker/meta/main.yml b/test/integration/targets/incidental_setup_docker/meta/main.yml
new file mode 100644
index 00000000..91a63627
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_remote_constraints
diff --git a/test/integration/targets/incidental_setup_docker/tasks/Debian.yml b/test/integration/targets/incidental_setup_docker/tasks/Debian.yml
new file mode 100644
index 00000000..0ea2cb4b
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/tasks/Debian.yml
@@ -0,0 +1,43 @@
+- name: Get OS version
+ shell: uname -r
+ register: os_version
+
+- name: Install pre-reqs
+ apt:
+ name: "{{ docker_prereq_packages }}"
+ state: present
+ update_cache: yes
+ notify: cleanup docker
+
+- name: Add gpg key
+ shell: curl -fsSL https://download.docker.com/linux/ubuntu/gpg >key && apt-key add key
+
+- name: Add Docker repo
+ shell: add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
+
+- block:
+ - name: Prevent service restart
+ copy:
+ content: exit 101
+ dest: /usr/sbin/policy-rc.d
+ backup: yes
+ mode: 0755
+ register: policy_rc_d
+
+ - name: Install Docker CE
+ apt:
+ name: "{{ docker_packages }}"
+ state: present
+ update_cache: yes
+ always:
+ - name: Restore /usr/sbin/policy-rc.d (if needed)
+ command: mv {{ policy_rc_d.backup_file }} /usr/sbin/policy-rc.d
+ when:
+ - "'backup_file' in policy_rc_d"
+
+ - name: Remove /usr/sbin/policy-rc.d (if needed)
+ file:
+ path: /usr/sbin/policy-rc.d
+ state: absent
+ when:
+ - "'backup_file' not in policy_rc_d"
diff --git a/test/integration/targets/incidental_setup_docker/tasks/Fedora.yml b/test/integration/targets/incidental_setup_docker/tasks/Fedora.yml
new file mode 100644
index 00000000..9f52e8f1
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/tasks/Fedora.yml
@@ -0,0 +1,21 @@
+- name: Add repository
+ yum_repository:
+ file: docker-ce
+ name: docker-ce-stable
+ description: Docker CE Stable - $basearch
+ baseurl: https://download.docker.com/linux/fedora/$releasever/$basearch/stable
+ enabled: yes
+ gpgcheck: yes
+ gpgkey: https://download.docker.com/linux/fedora/gpg
+
+- name: Update cache
+ command: dnf makecache
+ args:
+ warn: no
+
+- name: Install docker
+ dnf:
+ name: "{{ docker_packages }}"
+ state: present
+ enablerepo: docker-ce-test
+ notify: cleanup docker
diff --git a/test/integration/targets/incidental_setup_docker/tasks/RedHat-7.yml b/test/integration/targets/incidental_setup_docker/tasks/RedHat-7.yml
new file mode 100644
index 00000000..bd708315
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/tasks/RedHat-7.yml
@@ -0,0 +1,44 @@
+# The RHEL extras repository must be enabled to provide the container-selinux package.
+# See: https://docs.docker.com/engine/installation/linux/docker-ee/rhel/#install-using-the-repository
+
+- name: Install Docker pre-reqs
+ yum:
+ name: "{{ docker_prereq_packages }}"
+ state: present
+ notify: cleanup docker
+
+- name: Install epel repo which is missing on rhel-7 and is needed for pigz (needed for docker-ce 18)
+ include_role:
+ name: setup_epel
+
+- name: Enable extras repository for RHEL on AWS
+ # RHEL 7.6 uses REGION-rhel-server-extras and RHEL 7.7+ use rhel-7-server-rhui-extras-rpms
+ command: yum-config-manager --enable REGION-rhel-server-extras rhel-7-server-rhui-extras-rpms
+ args:
+ warn: no
+
+# They broke their .repo file, so we set it up ourselves
+- name: Set-up repository
+ yum_repository:
+ name: docker-ce
+ description: docker-ce
+ baseurl: https://download.docker.com/linux/centos/{{ ansible_facts.distribution_major_version }}/$basearch/stable
+ gpgcheck: true
+ gpgkey: https://download.docker.com/linux/centos/gpg
+
+- name: Update cache
+ command: yum -y makecache fast
+ args:
+ warn: no
+
+- name: Install docker
+ yum:
+ name: "{{ docker_packages }}"
+ state: present
+ notify: cleanup docker
+
+- name: Make sure the docker daemon is running (failure expected inside docker container)
+ service:
+ name: docker
+ state: started
+ ignore_errors: "{{ ansible_virtualization_type == 'docker' }}"
diff --git a/test/integration/targets/incidental_setup_docker/tasks/RedHat-8.yml b/test/integration/targets/incidental_setup_docker/tasks/RedHat-8.yml
new file mode 100644
index 00000000..6d2fb3e7
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/tasks/RedHat-8.yml
@@ -0,0 +1,33 @@
+# The RHEL extras repository must be enabled to provide the container-selinux package.
+# See: https://docs.docker.com/engine/installation/linux/docker-ee/rhel/#install-using-the-repository
+
+- name: Install Docker pre-reqs
+ dnf:
+ name: "{{ docker_prereq_packages }}"
+ state: present
+ notify: cleanup docker
+ register: result
+ until: result is success
+ retries: 10
+ delay: 2
+
+# They broke their .repo file, so we set it up ourselves
+- name: Set-up repository
+ yum_repository:
+ name: docker-ce
+ description: docker-ce
+ baseurl: https://download.docker.com/linux/centos/{{ ansible_facts.distribution_major_version }}/$basearch/stable
+ gpgcheck: true
+ gpgkey: https://download.docker.com/linux/centos/gpg
+
+- name: Install docker
+ dnf:
+ name: "{{ docker_packages }}"
+ state: present
+ notify: cleanup docker
+
+- name: Make sure the docker daemon is running (failure expected inside docker container)
+ service:
+ name: docker
+ state: started
+ ignore_errors: "{{ ansible_virtualization_type == 'docker' }}"
diff --git a/test/integration/targets/incidental_setup_docker/tasks/Suse.yml b/test/integration/targets/incidental_setup_docker/tasks/Suse.yml
new file mode 100644
index 00000000..93f4d34e
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/tasks/Suse.yml
@@ -0,0 +1,7 @@
+- name: Install docker
+ zypper:
+ name: "{{ docker_packages }}"
+ force: yes
+ disable_gpg_check: yes
+ update_cache: yes
+ notify: cleanup docker
diff --git a/test/integration/targets/incidental_setup_docker/tasks/main.yml b/test/integration/targets/incidental_setup_docker/tasks/main.yml
new file mode 100644
index 00000000..359a6d44
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/tasks/main.yml
@@ -0,0 +1,113 @@
+- name: Setup Docker
+ when: ansible_facts.distribution ~ ansible_facts.distribution_major_version not in ['CentOS6', 'RedHat6']
+ block:
+ - name: Include distribution specific variables
+ include_vars: "{{ lookup('first_found', params) }}"
+ vars:
+ params:
+ files:
+ - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - "{{ ansible_facts.os_family }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - "{{ ansible_facts.distribution }}.yml"
+ - "{{ ansible_facts.os_family }}.yml"
+ - default.yml
+ paths:
+ - "{{ role_path }}/vars"
+
+ - name: Include distribution specific tasks
+ include_tasks: "{{ lookup('first_found', params) }}"
+ vars:
+ params:
+ files:
+ - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - "{{ ansible_facts.os_family }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - "{{ ansible_facts.distribution }}.yml"
+ - "{{ ansible_facts.os_family }}.yml"
+ paths:
+ - "{{ role_path }}/tasks"
+
+ - name: Install Python requirements
+ pip:
+ state: present
+ name: "{{ docker_pip_packages | union(docker_pip_extra_packages) }}"
+ extra_args: "-c {{ remote_constraints }}"
+ notify: cleanup docker
+
+ # Detect docker CLI, API and docker-py versions
+ - name: Check Docker CLI version
+ command: "docker version -f {% raw %}'{{.Client.Version}}'{% endraw %}"
+ register: docker_cli_version_stdout
+ ignore_errors: yes
+
+ - name: Check Docker API version
+ command: "{{ ansible_python.executable }} -c 'import docker; print(docker.from_env().version()[\"ApiVersion\"])'"
+ register: docker_api_version_stdout
+ ignore_errors: yes
+
+ - name: Check docker-py API version
+ command: "{{ ansible_python.executable }} -c 'import docker; print(docker.__version__)'"
+ register: docker_py_version_stdout
+ ignore_errors: yes
+
+ - set_fact:
+ docker_cli_version: "{{ docker_cli_version_stdout.stdout | default('0.0') }}"
+ docker_api_version: "{{ docker_api_version_stdout.stdout | default('0.0') }}"
+ docker_py_version: "{{ docker_py_version_stdout.stdout | default('0.0') }}"
+
+ - debug:
+ msg: "Docker CLI version: {{ docker_cli_version }}; Docker API version: {{ docker_api_version }}; docker-py library version: {{ docker_py_version }}"
+
+ - block:
+ # Cleanup docker daemon
+ - name: "Remove all ansible-test-* docker containers"
+ shell: 'docker ps --no-trunc --format {% raw %}"{{.Names}}"{% endraw %} | grep "^ansible-test-" | xargs -r docker rm -f'
+ register: docker_containers
+ retries: 3
+ delay: 3
+ until: docker_containers is success
+
+ - name: "Remove all ansible-test-* docker volumes"
+ shell: 'docker volume ls --format {% raw %}"{{.Name}}"{% endraw %} | grep "^ansible-test-" | xargs -r docker volume rm -f'
+ register: docker_volumes
+
+ - name: "Remove all ansible-test-* docker networks"
+ shell: 'docker network ls --no-trunc --format {% raw %}"{{.Name}}"{% endraw %} | grep "^ansible-test-" | xargs -r docker network rm'
+ register: docker_networks
+
+ - name: Cleaned docker resources
+ debug:
+ var: docker_resources
+ vars:
+ docker_resources:
+ containers: "{{ docker_containers.stdout_lines }}"
+ volumes: "{{ docker_volumes.stdout_lines }}"
+ networks: "{{ docker_networks.stdout_lines }}"
+
+ # List all existing docker resources
+ - name: List all docker containers
+ command: docker ps --no-trunc -a
+ register: docker_containers
+
+ - name: List all docker volumes
+ command: docker volume ls
+ register: docker_volumes
+
+ - name: List all docker networks
+ command: docker network ls --no-trunc
+ register: docker_networks
+
+ - name: List all docker images
+ command: docker images --no-trunc -a
+ register: docker_images
+
+ - name: Still existing docker resources
+ debug:
+ var: docker_resources
+ vars:
+ docker_resources:
+ containers: "{{ docker_containers.stdout_lines }}"
+ volumes: "{{ docker_volumes.stdout_lines }}"
+ networks: "{{ docker_networks.stdout_lines }}"
+ images: "{{ docker_images.stdout_lines }}"
+
+ when: docker_cli_version is version('0.0', '>')
diff --git a/test/integration/targets/incidental_setup_docker/vars/Debian.yml b/test/integration/targets/incidental_setup_docker/vars/Debian.yml
new file mode 100644
index 00000000..477bd124
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/vars/Debian.yml
@@ -0,0 +1,9 @@
+docker_packages:
+ - docker-ce=5:19.03.0*
+ - docker-ce-cli=5:19.03.0*
+
+docker_prereq_packages:
+ - apt-transport-https
+ - ca-certificates
+ - curl
+ - software-properties-common
diff --git a/test/integration/targets/incidental_setup_docker/vars/Fedora.yml b/test/integration/targets/incidental_setup_docker/vars/Fedora.yml
new file mode 100644
index 00000000..9dd84344
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/vars/Fedora.yml
@@ -0,0 +1,5 @@
+docker_prereq_packages: []
+
+docker_packages:
+ - docker-ce-19.03.1
+ - docker-ce-cli-19.03.1
diff --git a/test/integration/targets/incidental_setup_docker/vars/RedHat-7.yml b/test/integration/targets/incidental_setup_docker/vars/RedHat-7.yml
new file mode 100644
index 00000000..84ba0920
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/vars/RedHat-7.yml
@@ -0,0 +1,18 @@
+docker_prereq_packages:
+ - yum-utils
+ - device-mapper-persistent-data
+ - lvm2
+ - libseccomp
+
+docker_packages:
+ - docker-ce-19.03.1
+ - docker-ce-cli-19.03.1
+
+docker_pip_extra_packages:
+ - requests==2.6.0
+
+# We need to pin the above so pip finds the right system-installed package
+# but we never want to try to remove it, so we substract this from the set of
+# packages we remove on cleanup
+docker_pip_never_remove:
+ - requests==2.6.0
diff --git a/test/integration/targets/incidental_setup_docker/vars/RedHat-8.yml b/test/integration/targets/incidental_setup_docker/vars/RedHat-8.yml
new file mode 100644
index 00000000..ff6dcf7b
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/vars/RedHat-8.yml
@@ -0,0 +1,9 @@
+docker_prereq_packages:
+ - yum-utils
+ - device-mapper-persistent-data
+ - lvm2
+ - libseccomp
+
+docker_packages:
+ - docker-ce-19.03.13
+ - docker-ce-cli-19.03.13
diff --git a/test/integration/targets/incidental_setup_docker/vars/Suse.yml b/test/integration/targets/incidental_setup_docker/vars/Suse.yml
new file mode 100644
index 00000000..b740861f
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/vars/Suse.yml
@@ -0,0 +1,2 @@
+docker_packages:
+ - docker=19.03.1_ce
diff --git a/test/integration/targets/incidental_setup_docker/vars/Ubuntu-14.yml b/test/integration/targets/incidental_setup_docker/vars/Ubuntu-14.yml
new file mode 100644
index 00000000..36ab54b9
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/vars/Ubuntu-14.yml
@@ -0,0 +1,5 @@
+docker_pip_extra_packages:
+ # Installing requests >=2.12.0 on Ubuntu 14.04 breaks certificate validation. We restrict to an older version
+ # to ensure out get_url tests work out fine. This is only an issue if pyOpenSSL is also installed.
+ # Not sure why RHEL7 needs this specific version
+ - requests==2.6.0
diff --git a/test/integration/targets/incidental_setup_docker/vars/default.yml b/test/integration/targets/incidental_setup_docker/vars/default.yml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/incidental_setup_docker/vars/default.yml
diff --git a/test/integration/targets/incidental_setup_ec2/aliases b/test/integration/targets/incidental_setup_ec2/aliases
new file mode 100644
index 00000000..136c05e0
--- /dev/null
+++ b/test/integration/targets/incidental_setup_ec2/aliases
@@ -0,0 +1 @@
+hidden
diff --git a/test/integration/targets/incidental_setup_ec2/defaults/main.yml b/test/integration/targets/incidental_setup_ec2/defaults/main.yml
new file mode 100644
index 00000000..fb1f88b1
--- /dev/null
+++ b/test/integration/targets/incidental_setup_ec2/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+resource_prefix: 'ansible-testing-'
diff --git a/test/integration/targets/incidental_setup_ec2/tasks/common.yml b/test/integration/targets/incidental_setup_ec2/tasks/common.yml
new file mode 100644
index 00000000..bf23f539
--- /dev/null
+++ b/test/integration/targets/incidental_setup_ec2/tasks/common.yml
@@ -0,0 +1,119 @@
+---
+
+# ============================================================
+- name: test with no parameters
+ action: "{{module_name}}"
+ register: result
+ ignore_errors: true
+
+- name: assert failure when called with no parameters
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg == "missing required arguments: name"'
+
+# ============================================================
+- name: test with only name
+ action: "{{module_name}} name={{ec2_key_name}}"
+ register: result
+ ignore_errors: true
+
+- name: assert failure when called with only 'name'
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg == "Either region or ec2_url must be specified"'
+
+# ============================================================
+- name: test invalid region parameter
+ action: "{{module_name}} name='{{ec2_key_name}}' region='asdf querty 1234'"
+ register: result
+ ignore_errors: true
+
+- name: assert invalid region parameter
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg.startswith("value of region must be one of:")'
+
+# ============================================================
+- name: test valid region parameter
+ action: "{{module_name}} name='{{ec2_key_name}}' region='{{ec2_region}}'"
+ register: result
+ ignore_errors: true
+
+- name: assert valid region parameter
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg.startswith("No handler was ready to authenticate.")'
+
+# ============================================================
+- name: test environment variable EC2_REGION
+ action: "{{module_name}} name='{{ec2_key_name}}'"
+ environment:
+ EC2_REGION: '{{ec2_region}}'
+ register: result
+ ignore_errors: true
+
+- name: assert environment variable EC2_REGION
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg.startswith("No handler was ready to authenticate.")'
+
+# ============================================================
+- name: test invalid ec2_url parameter
+ action: "{{module_name}} name='{{ec2_key_name}}'"
+ environment:
+ EC2_URL: bogus.example.com
+ register: result
+ ignore_errors: true
+
+- name: assert invalid ec2_url parameter
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg.startswith("No handler was ready to authenticate.")'
+
+# ============================================================
+- name: test valid ec2_url parameter
+ action: "{{module_name}} name='{{ec2_key_name}}'"
+ environment:
+ EC2_URL: '{{ec2_url}}'
+ register: result
+ ignore_errors: true
+
+- name: assert valid ec2_url parameter
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg.startswith("No handler was ready to authenticate.")'
+
+# ============================================================
+- name: test credentials from environment
+ action: "{{module_name}} name='{{ec2_key_name}}'"
+ environment:
+ EC2_REGION: '{{ec2_region}}'
+ EC2_ACCESS_KEY: bogus_access_key
+ EC2_SECRET_KEY: bogus_secret_key
+ register: result
+ ignore_errors: true
+
+- name: assert ec2_key with valid ec2_url
+ assert:
+ that:
+ - 'result.failed'
+ - '"EC2ResponseError: 401 Unauthorized" in result.msg'
+
+# ============================================================
+- name: test credential parameters
+ action: "{{module_name}} name='{{ec2_key_name}}' ec2_region='{{ec2_region}}' ec2_access_key=bogus_access_key ec2_secret_key=bogus_secret_key"
+ register: result
+ ignore_errors: true
+
+- name: assert credential parameters
+ assert:
+ that:
+ - 'result.failed'
+ - '"EC2ResponseError: 401 Unauthorized" in result.msg'
diff --git a/test/integration/targets/incidental_setup_ec2/vars/main.yml b/test/integration/targets/incidental_setup_ec2/vars/main.yml
new file mode 100644
index 00000000..3d7209ef
--- /dev/null
+++ b/test/integration/targets/incidental_setup_ec2/vars/main.yml
@@ -0,0 +1,3 @@
+---
+ec2_url: ec2.amazonaws.com
+ec2_region: us-east-1
diff --git a/test/integration/targets/incidental_setup_flatpak_remote/README.md b/test/integration/targets/incidental_setup_flatpak_remote/README.md
new file mode 100644
index 00000000..d7916c14
--- /dev/null
+++ b/test/integration/targets/incidental_setup_flatpak_remote/README.md
@@ -0,0 +1,138 @@
+# Create a dummy flatpak repository remote
+
+This document describes how to create a local flatpak dummy repo. Just like the one contained in the `files/repo.tar.gxz` archive.
+
+
+## Create a hello world app
+
+Prerequisites:
+
+ - flathub
+
+Prepare the environment:
+
+```
+flatpak install --system flathub org.freedesktop.Platform//1.6 org.freedesktop.Sdk//1.6
+```
+
+Create a hello world executable:
+
+```
+echo $'#!/bin/sh\necho hello world' > hello.sh
+```
+
+To create dummy flatpaks, run this (defining a unique NUM for every flatpak to add):
+
+```
+export NUM=1
+flatpak build-init appdir$NUM com.dummy.App$NUM org.freedesktop.Sdk org.freedesktop.Platform 1.6;
+flatpak build appdir$NUM mkdir /app/bin;
+flatpak build appdir$NUM install --mode=750 hello.sh /app/bin;
+flatpak build-finish --command=hello.sh appdir$NUM
+```
+
+## Create a repo and/or add the app to it
+
+Create a repo and add the file to it in one command:
+
+```
+flatpak build-export repo appdir$NUM stable
+```
+
+## Create flatpak*-files
+
+Put a flatpakref file under the repo folder (`repo/com.dummy.App1.flatpakref`):
+
+```
+[Flatpak Ref]
+Title=Dummy App$NUM
+Name=com.dummy.App$NUM
+Branch=stable
+Url=file:///tmp/flatpak/repo
+GPGKey={{ base64-encoded public KEY }}
+IsRuntime=false
+RuntimeRepo=https://flathub.org/repo/flathub.flatpakrepo
+```
+
+Add a `.flatpakrepo` file to the `repo` folder (`repo/dummy-repo.flatpakrepo`):
+
+```
+[Flatpak Repo]
+Title=Dummy Repo
+Url=file:///tmp/flatpak/repo
+Comment=Dummy repo for ansible module integration testing
+Description=Dummy repo for ansible module integration testing
+GPGKey={{ base64-encoded public KEY }}
+```
+
+## Sign the repo
+
+Create a new key in a new gpg home folder (On RedHat systems, the executable needs to addressed as gpg2):
+
+```
+mkdir gpg
+gpg --homedir gpg --quick-gen-key test@dummy.com
+```
+
+Sign the repo and summary file, you need to redo this when you update the repository:
+
+```
+flatpak build-sign repo --gpg-sign=KEY_ID --gpg-homedir=gpg
+flatpak build-update-repo repo --gpg-sign=KEY_ID --gpg-homedir=gpg
+```
+
+Export the public key as a file:
+
+```
+gpg --homedir=gpg --export KEY_ID > dummy-repo.gpg
+```
+
+Create base64-encoded string from gpg-file for `GPGKey=` property in flatpak*-files:
+
+```
+base64 dummy-repo.gpg | tr -d '\n'
+```
+
+## How to use the repo
+
+Now you can add the `repo` folder as a local repo:
+
+```
+flatpak --system remote-add --gpg-import=/tmp/flatpak/repo/dummy-repo.gpg dummy-repo /tmp/flatpak/repo
+```
+
+Or, via `.flatpakrepo` file:
+
+```
+flatpak --system remote-add dummy-repo /tmp/flatpak/repo/dummy-repo.flatpakrepo
+```
+
+And install the hello world flatpaks like this:
+
+```
+flatpak --system install dummy-repo com.dummy.App$NUM
+```
+
+Or from flatpakref:
+
+```
+flatpak --system install --from /tmp/flatpak/repo/com.dummy.App$NUM.flatpakref
+```
+
+Run the app:
+
+```
+flatpak run com.dummy.App$NUM
+```
+
+To install an app without any runtime dependencies (the app will be broken, but it is enough to test flatpak installation):
+
+```
+flatpak --system install --no-deps dummy-repo com.dummy.App$NUM
+```
+
+## Sources:
+
+* https://blogs.gnome.org/alexl/2017/02/10/maintaining-a-flatpak-repository/
+
+* http://docs.flatpak.org/en/latest/first-build.html
diff --git a/test/integration/targets/incidental_setup_flatpak_remote/aliases b/test/integration/targets/incidental_setup_flatpak_remote/aliases
new file mode 100644
index 00000000..136c05e0
--- /dev/null
+++ b/test/integration/targets/incidental_setup_flatpak_remote/aliases
@@ -0,0 +1 @@
+hidden
diff --git a/test/integration/targets/incidental_setup_flatpak_remote/files/repo.tar.xz b/test/integration/targets/incidental_setup_flatpak_remote/files/repo.tar.xz
new file mode 100644
index 00000000..41a89c46
--- /dev/null
+++ b/test/integration/targets/incidental_setup_flatpak_remote/files/repo.tar.xz
Binary files differ
diff --git a/test/integration/targets/incidental_setup_flatpak_remote/handlers/main.yaml b/test/integration/targets/incidental_setup_flatpak_remote/handlers/main.yaml
new file mode 100644
index 00000000..9380dee9
--- /dev/null
+++ b/test/integration/targets/incidental_setup_flatpak_remote/handlers/main.yaml
@@ -0,0 +1,4 @@
+- name: remove temporary flatpak link
+ file:
+ state: absent
+ path: /tmp/flatpak
diff --git a/test/integration/targets/incidental_setup_flatpak_remote/meta/main.yaml b/test/integration/targets/incidental_setup_flatpak_remote/meta/main.yaml
new file mode 100644
index 00000000..75ee4583
--- /dev/null
+++ b/test/integration/targets/incidental_setup_flatpak_remote/meta/main.yaml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/test/integration/targets/incidental_setup_flatpak_remote/tasks/main.yaml b/test/integration/targets/incidental_setup_flatpak_remote/tasks/main.yaml
new file mode 100644
index 00000000..c199d216
--- /dev/null
+++ b/test/integration/targets/incidental_setup_flatpak_remote/tasks/main.yaml
@@ -0,0 +1,22 @@
+- name: Set up dummy flatpak repository remote
+ block:
+
+ - name: Copy repo into place
+ unarchive:
+ src: repo.tar.xz
+ dest: "{{ remote_tmp_dir }}"
+ owner: root
+ group: root
+ mode: 0644
+
+ - name: Create deterministic link to temp directory
+ file:
+ state: link
+ src: "{{ remote_tmp_dir }}/"
+ path: "/tmp/flatpak"
+ owner: root
+ group: root
+ mode: 0644
+ notify: remove temporary flatpak link
+
+ become: true
diff --git a/test/integration/targets/incidental_setup_mongodb/aliases b/test/integration/targets/incidental_setup_mongodb/aliases
new file mode 100644
index 00000000..136c05e0
--- /dev/null
+++ b/test/integration/targets/incidental_setup_mongodb/aliases
@@ -0,0 +1 @@
+hidden
diff --git a/test/integration/targets/incidental_setup_mongodb/defaults/main.yml b/test/integration/targets/incidental_setup_mongodb/defaults/main.yml
new file mode 100644
index 00000000..b205013c
--- /dev/null
+++ b/test/integration/targets/incidental_setup_mongodb/defaults/main.yml
@@ -0,0 +1,46 @@
+mongodb_version: "4.0"
+
+apt:
+ keyserver: "keyserver.ubuntu.com"
+ keyserver_id: "9DA31620334BD75D9DCB49F368818C72E52529D4"
+ repo: "deb [ arch=amd64 ] http://repo.mongodb.org/apt/ubuntu {{ansible_distribution_release}}/mongodb-org/{{mongodb_version}} multiverse"
+
+mongodb_packages:
+ mongod: mongodb-org-server
+ mongos: mongodb-org-mongos
+ mongo: mongodb-org-shell
+
+yum:
+ name: mongodb-org
+ description: "Official MongoDB {{mongodb_version}} yum repo"
+ baseurl: https://repo.mongodb.org/yum/redhat/$releasever/mongodb-org/{{mongodb_version}}/x86_64/
+ gpgcheck: 1
+ gpgkey: https://www.mongodb.org/static/pgp/server-{{mongodb_version}}.asc
+ redhat8url: https://repo.mongodb.org/yum/redhat/7/mongodb-org/{{mongodb_version}}/x86_64/
+ fedoraurl: https://repo.mongodb.org/yum/amazon/2013.03/mongodb-org/{{mongodb_version}}/x86_64/
+
+debian_packages_py2:
+ - python-dev
+ - python-setuptools
+ - python-pip
+
+debian_packages_py36:
+ - python3.6-dev
+ - python3-setuptools
+ - python3-pip
+
+redhat_packages_py2:
+ - python-devel
+ - python-setuptools
+ - python-pip
+
+redhat_packages_py3:
+ - python3-devel
+ - python3-setuptools
+ - python3-pip
+
+# Do not install requests[security] via pip. It will cause test failures.
+# See https://github.com/ansible/ansible/pull/66319
+pip_packages:
+ - psutil
+ - pymongo
diff --git a/test/integration/targets/incidental_setup_mongodb/handlers/main.yml b/test/integration/targets/incidental_setup_mongodb/handlers/main.yml
new file mode 100644
index 00000000..1b73525e
--- /dev/null
+++ b/test/integration/targets/incidental_setup_mongodb/handlers/main.yml
@@ -0,0 +1,24 @@
+- name: Remove debian_packages_py2
+ apt:
+ name: "{{ debian_packages_py2 }}"
+ state: absent
+
+- name: Remove debian_packages_py36
+ apt:
+ name: "{{ debian_packages_py36 }}"
+ state: absent
+
+- name: Remove redhat_packages_py2
+ yum:
+ name: "{{ redhat_packages_py36 }}"
+ state: absent
+
+- name: Remove redhat_packages_py36
+ yum:
+ name: "{{ redhat_packages_py36 }}"
+ state: absent
+
+- name: remove mongodb pip packages
+ pip:
+ name: "{{ pip_packages }}"
+ state: absent
diff --git a/test/integration/targets/incidental_setup_mongodb/tasks/main.yml b/test/integration/targets/incidental_setup_mongodb/tasks/main.yml
new file mode 100644
index 00000000..16382ce5
--- /dev/null
+++ b/test/integration/targets/incidental_setup_mongodb/tasks/main.yml
@@ -0,0 +1,166 @@
+# (c) 2019, Rhys Campbell <rhys.james.campbell@googlemail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# ============================================================
+
+# https://docs.mongodb.com/manual/tutorial/install-mongodb-on-ubuntu/
+# Support for Ubuntu 14.04 has been removed from MongoDB 4.0.10+, 3.6.13+, and 3.4.21+.
+# CentOS6 has python version issues
+- meta: end_play
+ when: (ansible_distribution == 'Ubuntu' and ansible_distribution_version == '14.04')
+ or (ansible_os_family == "RedHat" and ansible_distribution_major_version == '6')
+ or ansible_os_family == "Suse"
+ or ansible_distribution == 'Fedora'
+ or (ansible_facts['distribution'] == "CentOS")
+
+# Ubuntu
+- name: Import MongoDB public GPG Key
+ apt_key:
+ keyserver: "{{ apt.keyserver }}"
+ id: "{{ apt.keyserver_id }}"
+ when:
+ - ansible_distribution_version in ["16.04", "18.04"]
+ - ansible_distribution == 'Ubuntu'
+
+- name: Add MongoDB repository into sources list
+ apt_repository:
+ repo: "{{ apt.repo }}"
+ state: present
+ update_cache: yes
+ when:
+ - ansible_distribution_version in ["16.04", "18.04"]
+ - ansible_distribution == 'Ubuntu'
+
+# Need to handle various platforms here. Package name will not always be the same
+- name: Ensure mongod package is installed
+ apt:
+ name: "{{ mongodb_packages.mongod }}"
+ state: present
+ force: yes
+ when:
+ - ansible_distribution == 'Ubuntu'
+
+- name: Ensure mongos package is installed
+ apt:
+ name: "{{ mongodb_packages.mongos }}"
+ state: present
+ force: yes
+ when:
+ - ansible_distribution == 'Ubuntu'
+
+- name: Ensure mongo client is installed
+ apt:
+ name: "{{ mongodb_packages.mongo }}"
+ state: present
+ force: yes
+ when:
+ - ansible_distribution == 'Ubuntu'
+# EOF Ubuntu
+
+# Redhat
+- name: Add MongopDB repo
+ yum_repository:
+ name: "{{ yum.name }}"
+ description: "{{ yum.description }}"
+ baseurl: "{{ yum.baseurl }}"
+ gpgcheck: "{{ yum.gpgcheck }}"
+ gpgkey: "{{ yum.gpgkey }}"
+ when:
+ - ansible_os_family == "RedHat"
+ - ansible_distribution_version.split('.')[0]|int <= 7
+ - not ansible_distribution == "Fedora"
+
+
+- name: RedHat 8 repo not yet available so use 7 url
+ yum_repository:
+ name: "{{ yum.name }}"
+ description: "{{ yum.description }}"
+ baseurl: "{{ yum.redhat8url }}"
+ gpgcheck: "{{ yum.gpgcheck }}"
+ gpgkey: "{{ yum.gpgkey }}"
+ when:
+ - ansible_os_family == "RedHat"
+ - ansible_distribution_version.split('.')[0]|int == 8
+ - not ansible_distribution == "Fedora"
+
+- name: Another url for Fedora based systems
+ yum_repository:
+ name: "{{ yum.name }}"
+ description: "{{ yum.description }}"
+ baseurl: "{{ yum.fedoraurl }}"
+ gpgcheck: "{{ yum.gpgcheck }}"
+ gpgkey: "{{ yum.gpgkey }}"
+ when:
+ - ansible_distribution == "Fedora"
+
+- name: Ensure mongod package is installed
+ yum:
+ name: "{{ mongodb_packages.mongod }}"
+ state: present
+ when: ansible_os_family == "RedHat"
+
+- name: Ensure mongos package is installed
+ yum:
+ name: "{{ mongodb_packages.mongos }}"
+ state: present
+ when: ansible_os_family == "RedHat"
+
+- name: Ensure mongo client is installed
+ yum:
+ name: "{{ mongodb_packages.mongo }}"
+ state: present
+ when: ansible_os_family == "RedHat"
+# EOF Redhat
+
+- name: Install debian_packages
+ apt:
+ name: "{{ debian_packages_py2 }}"
+ when:
+ - ansible_os_family == "Debian"
+ - ansible_distribution_version == "16.04"
+ notify: Remove debian_packages_py2
+
+- name: Install debian_packages
+ apt:
+ name: "{{ debian_packages_py36 }}"
+ when:
+ - ansible_os_family == "Debian"
+ - ansible_distribution_version == "18.04"
+ notify: Remove debian_packages_py36
+
+- name: Install redhat_packages_py2
+ yum:
+ name: "{{ redhat_packages_py2 }}"
+ when:
+ - ansible_os_family == "RedHat"
+ - ansible_distribution_version|float < 8
+ - not (ansible_os_family == "RedHat" and ansible_distribution_version|float < 8)
+ notify: Remove redhat_packages_py2
+
+- name: Install redhat_packages_py3
+ yum:
+ name: "{{ redhat_packages_py3 }}"
+ when:
+ - ansible_os_family == "RedHat"
+ - ansible_distribution_version|float >= 8
+ notify: Remove redhat_packages_py3
+
+- name: Install pip packages
+ pip:
+ name: "{{ pip_packages }}"
+ state: present
+ notify: remove mongodb pip packages
diff --git a/test/integration/targets/incidental_setup_postgresql_db/aliases b/test/integration/targets/incidental_setup_postgresql_db/aliases
new file mode 100644
index 00000000..136c05e0
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/aliases
@@ -0,0 +1 @@
+hidden
diff --git a/test/integration/targets/incidental_setup_postgresql_db/defaults/main.yml b/test/integration/targets/incidental_setup_postgresql_db/defaults/main.yml
new file mode 100644
index 00000000..aea02442
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/defaults/main.yml
@@ -0,0 +1,17 @@
+postgresql_service: postgresql
+
+postgresql_packages:
+ - postgresql-server
+ - python-psycopg2
+
+pg_user: postgres
+pg_group: root
+
+locale_latin_suffix:
+locale_utf8_suffix:
+
+# defaults for test SSL
+ssl_db: 'ssl_db'
+ssl_user: 'ssl_user'
+ssl_pass: 'ssl_pass'
+ssl_rootcert: '~{{ pg_user }}/root.crt'
diff --git a/test/integration/targets/incidental_setup_postgresql_db/files/dummy--1.0.sql b/test/integration/targets/incidental_setup_postgresql_db/files/dummy--1.0.sql
new file mode 100644
index 00000000..53c79666
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/files/dummy--1.0.sql
@@ -0,0 +1,2 @@
+CREATE OR REPLACE FUNCTION dummy_display_ext_version()
+RETURNS text LANGUAGE SQL AS 'SELECT (''1.0'')::text';
diff --git a/test/integration/targets/incidental_setup_postgresql_db/files/dummy--2.0.sql b/test/integration/targets/incidental_setup_postgresql_db/files/dummy--2.0.sql
new file mode 100644
index 00000000..227ba1b4
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/files/dummy--2.0.sql
@@ -0,0 +1,2 @@
+CREATE OR REPLACE FUNCTION dummy_display_ext_version()
+RETURNS text LANGUAGE SQL AS 'SELECT (''2.0'')::text';
diff --git a/test/integration/targets/incidental_setup_postgresql_db/files/dummy--3.0.sql b/test/integration/targets/incidental_setup_postgresql_db/files/dummy--3.0.sql
new file mode 100644
index 00000000..7d6a60e5
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/files/dummy--3.0.sql
@@ -0,0 +1,2 @@
+CREATE OR REPLACE FUNCTION dummy_display_ext_version()
+RETURNS text LANGUAGE SQL AS 'SELECT (''3.0'')::text';
diff --git a/test/integration/targets/incidental_setup_postgresql_db/files/dummy.control b/test/integration/targets/incidental_setup_postgresql_db/files/dummy.control
new file mode 100644
index 00000000..4f8553c2
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/files/dummy.control
@@ -0,0 +1,3 @@
+comment = 'dummy extension used to test postgresql_ext Ansible module'
+default_version = '3.0'
+relocatable = true
diff --git a/test/integration/targets/incidental_setup_postgresql_db/files/pg_hba.conf b/test/integration/targets/incidental_setup_postgresql_db/files/pg_hba.conf
new file mode 100644
index 00000000..58de3607
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/files/pg_hba.conf
@@ -0,0 +1,10 @@
+# !!! This file managed by Ansible. Any local changes may be overwritten. !!!
+
+# Database administrative login by UNIX sockets
+# note: you may wish to restrict this further later
+local all {{ pg_user }} trust
+
+# TYPE DATABASE USER CIDR-ADDRESS METHOD
+local all all md5
+host all all 127.0.0.1/32 md5
+host all all ::1/128 md5
diff --git a/test/integration/targets/incidental_setup_postgresql_db/tasks/main.yml b/test/integration/targets/incidental_setup_postgresql_db/tasks/main.yml
new file mode 100644
index 00000000..2e969c31
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/tasks/main.yml
@@ -0,0 +1,222 @@
+- name: python 2
+ set_fact:
+ python_suffix: ""
+ when: ansible_python_version is version('3', '<')
+
+- name: python 3
+ set_fact:
+ python_suffix: "-py3"
+ when: ansible_python_version is version('3', '>=')
+
+- name: Include distribution and Python version specific variables
+ include_vars: "{{ lookup('first_found', params) }}"
+ vars:
+ params:
+ files:
+ - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}{{ python_suffix }}.yml'
+ - '{{ ansible_distribution }}-{{ ansible_distribution_version }}{{ python_suffix }}.yml'
+ - '{{ ansible_os_family }}{{ python_suffix }}.yml'
+ - 'default{{ python_suffix }}.yml'
+ paths:
+ - "{{ role_path }}/vars"
+
+- name: make sure the dbus service is started under systemd
+ systemd:
+ name: dbus
+ state: started
+ when: ansible_service_mgr == 'systemd' and ansible_distribution == 'Fedora'
+
+# Make sure we start fresh
+- name: stop postgresql service
+ service: name={{ postgresql_service }} state=stopped
+ ignore_errors: True
+
+- name: remove old db (RedHat or Suse)
+ file:
+ path: "{{ pg_dir }}"
+ state: absent
+ ignore_errors: True
+ when: ansible_os_family == "RedHat" or ansible_os_family == "Suse"
+
+- name: remove old db (FreeBSD)
+ file:
+ path: "{{ pg_dir }}"
+ state: absent
+ ignore_errors: True
+ when: ansible_os_family == "FreeBSD"
+
+# Theoretically, pg_dropcluster should work but it doesn't so remove files
+- name: remove old db config and files (debian)
+ file:
+ path: '{{ loop_item }}'
+ state: absent
+ ignore_errors: True
+ when: ansible_os_family == "Debian"
+ loop:
+ - /etc/postgresql
+ - /var/lib/postgresql
+ loop_control:
+ loop_var: loop_item
+
+- name: install dependencies for postgresql test
+ package:
+ name: "{{ postgresql_package_item }}"
+ state: present
+ with_items: "{{ postgresql_packages }}"
+ loop_control:
+ loop_var: postgresql_package_item
+
+- name: initialize postgres (FreeBSD)
+ command: /usr/local/etc/rc.d/postgresql oneinitdb
+ when: ansible_os_family == "FreeBSD"
+
+- name: Initialize postgres (RedHat systemd)
+ command: postgresql-setup initdb
+ when: ansible_os_family == "RedHat" and ansible_service_mgr == "systemd"
+
+- name: Initialize postgres (RedHat sysv)
+ command: /sbin/service postgresql initdb
+ when: ansible_os_family == "RedHat" and ansible_service_mgr != "systemd"
+
+- name: Initialize postgres (Debian)
+ shell: '. /usr/share/postgresql-common/maintscripts-functions && set_system_locale && /usr/bin/pg_createcluster -u postgres {{ pg_ver }} main'
+ args:
+ creates: "/etc/postgresql/{{ pg_ver }}/"
+ when: ansible_os_family == 'Debian'
+
+- name: Initialize postgres (Suse)
+ service: name=postgresql state=restarted
+ when: ansible_os_family == 'Suse'
+
+- name: Copy pg_hba into place
+ template:
+ src: files/pg_hba.conf
+ dest: "{{ pg_hba_location }}"
+ owner: "{{ pg_user }}"
+ group: "{{ pg_group }}"
+ mode: "0644"
+
+- name: Generate locales (Debian)
+ locale_gen:
+ name: '{{ item }}'
+ state: present
+ with_items:
+ - pt_BR
+ - es_ES
+ when: ansible_os_family == 'Debian'
+
+# Suse: locales are installed by default (glibc-locale package).
+# Fedora 23: locales are installed by default (glibc-common package)
+# CentOS: all locales are installed by default (glibc-common package) but some
+# RPM macros could prevent their installation (for example when using anaconda
+# instLangs parameter).
+
+- block:
+ - name: Install langpacks (RHEL8)
+ yum:
+ name:
+ - glibc-langpack-es
+ - glibc-langpack-pt
+ - glibc-all-langpacks
+ state: present
+ when: ansible_distribution_major_version is version('8', '>=')
+
+ - name: Check if locales need to be generated (RedHat)
+ shell: "localedef --list-archive | grep -a -q '^{{ locale }}$'"
+ register: locale_present
+ ignore_errors: True
+ with_items:
+ - es_ES
+ - pt_BR
+ loop_control:
+ loop_var: locale
+
+ - name: Reinstall internationalization files
+ shell: 'yum -y reinstall glibc-common || yum -y install glibc-common'
+ args:
+ warn: no
+ when: locale_present is failed
+
+ - name: Generate locale (RedHat)
+ command: 'localedef -f ISO-8859-1 -i {{ item.locale }} {{ item.locale }}'
+ when: item is failed
+ with_items: '{{ locale_present.results }}'
+ when: ansible_os_family == 'RedHat' and ansible_distribution != 'Fedora'
+
+- name: Install glibc langpacks (Fedora >= 24)
+ package:
+ name: '{{ item }}'
+ state: 'latest'
+ with_items:
+ - glibc-langpack-es
+ - glibc-langpack-pt
+ when: ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('24', '>=')
+
+- name: enable postgresql service (FreeBSD)
+ lineinfile:
+ path: /etc/rc.conf
+ line: 'postgresql_enable="YES"'
+ when: ansible_os_family == "FreeBSD"
+
+- name: stop postgresql service
+ service:
+ name: "{{ postgresql_service }}"
+ state: stopped
+
+- name: pause between stop and start of postgresql service
+ pause:
+ seconds: 3
+
+- name: start postgresql service
+ service:
+ name: "{{ postgresql_service }}"
+ state: started
+
+########################
+# Setup dummy extension:
+- name: copy control file for dummy ext
+ copy:
+ src: dummy.control
+ dest: "/usr/share/postgresql/{{ pg_ver }}/extension/dummy.control"
+ mode: 0444
+ when: ansible_os_family == 'Debian'
+
+- name: copy version files for dummy ext
+ copy:
+ src: "{{ item }}"
+ dest: "/usr/share/postgresql/{{ pg_ver }}/extension/{{ item }}"
+ mode: 0444
+ with_items:
+ - dummy--1.0.sql
+ - dummy--2.0.sql
+ - dummy--3.0.sql
+ when: ansible_os_family == 'Debian'
+
+- name: add update paths
+ file:
+ path: "/usr/share/postgresql/{{ pg_ver }}/extension/{{ item }}"
+ mode: 0444
+ state: touch
+ with_items:
+ - dummy--1.0--2.0.sql
+ - dummy--2.0--3.0.sql
+ when: ansible_os_family == 'Debian'
+
+- name: Get PostgreSQL version
+ become_user: "{{ pg_user }}"
+ become: yes
+ shell: "echo 'SHOW SERVER_VERSION' | psql --tuples-only --no-align --dbname postgres"
+ register: postgres_version_resp
+
+- name: Print PostgreSQL server version
+ debug:
+ msg: "{{ postgres_version_resp.stdout }}"
+
+# SSL configuration.
+# Restricted using Debian family because of there are errors on other distributions
+# that not related with PostgreSQL or psycopg2 SSL support.
+# The tests key point is to be sure that ssl options work in general
+- import_tasks: ssl.yml
+ when:
+ - ansible_os_family == 'Debian'
+ - postgres_version_resp.stdout is version('9.4', '>=')
diff --git a/test/integration/targets/incidental_setup_postgresql_db/tasks/ssl.yml b/test/integration/targets/incidental_setup_postgresql_db/tasks/ssl.yml
new file mode 100644
index 00000000..bc45ec6f
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/tasks/ssl.yml
@@ -0,0 +1,81 @@
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# The aim of this test is to be sure that SSL options work in general
+# and preparing the environment for testing these options in
+# the following PostgreSQL modules (ssl_db, ssl_user, certs).
+# Configured by https://www.postgresql.org/docs/current/ssl-tcp.html
+
+####################
+# Prepare for tests:
+
+- name: postgresql SSL - create database
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ name: "{{ ssl_db }}"
+
+- name: postgresql SSL - create role
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_user:
+ name: "{{ ssl_user }}"
+ role_attr_flags: SUPERUSER
+ password: "{{ ssl_pass }}"
+
+- name: postgresql SSL - install openssl
+ become: yes
+ package: name=openssl state=present
+
+- name: postgresql SSL - create certs 1
+ become_user: root
+ become: yes
+ shell: 'openssl req -new -nodes -text -out ~{{ pg_user }}/root.csr \
+ -keyout ~{{ pg_user }}/root.key -subj "/CN=localhost.local"'
+
+- name: postgresql SSL - create certs 2
+ become_user: root
+ become: yes
+ shell: 'openssl x509 -req -in ~{{ pg_user }}/root.csr -text -days 3650 \
+ -extensions v3_ca -signkey ~{{ pg_user }}/root.key -out ~{{ pg_user }}/root.crt'
+
+- name: postgresql SSL - create certs 3
+ become_user: root
+ become: yes
+ shell: 'openssl req -new -nodes -text -out ~{{ pg_user }}/server.csr \
+ -keyout ~{{ pg_user }}/server.key -subj "/CN=localhost.local"'
+
+- name: postgresql SSL - create certs 4
+ become_user: root
+ become: yes
+ shell: 'openssl x509 -req -in ~{{ pg_user }}/server.csr -text -days 365 \
+ -CA ~{{ pg_user }}/root.crt -CAkey ~{{ pg_user }}/root.key -CAcreateserial -out server.crt'
+
+- name: postgresql SSL - set right permissions to files
+ become_user: root
+ become: yes
+ file:
+ path: '{{ item }}'
+ mode: 0600
+ owner: '{{ pg_user }}'
+ group: '{{ pg_user }}'
+ with_items:
+ - '~{{ pg_user }}/root.key'
+ - '~{{ pg_user }}/server.key'
+ - '~{{ pg_user }}/root.crt'
+ - '~{{ pg_user }}/server.csr'
+
+- name: postgresql SSL - enable SSL
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_set:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: ssl
+ value: on
+
+- name: postgresql SSL - reload PostgreSQL to enable ssl on
+ become: yes
+ service:
+ name: "{{ postgresql_service }}"
+ state: reloaded
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/Debian-8.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/Debian-8.yml
new file mode 100644
index 00000000..c5c6795e
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/Debian-8.yml
@@ -0,0 +1,8 @@
+postgresql_packages:
+ - "postgresql"
+ - "postgresql-common"
+ - "python-psycopg2"
+
+pg_hba_location: "/etc/postgresql/9.4/main/pg_hba.conf"
+pg_dir: "/var/lib/postgresql/9.4/main"
+pg_ver: 9.4
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-11-py3.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-11-py3.yml
new file mode 100644
index 00000000..2f6b0d98
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-11-py3.yml
@@ -0,0 +1,12 @@
+postgresql_packages:
+ - postgresql95-server
+ - py36-psycopg2
+
+pg_dir: /usr/local/pgsql/data
+pg_hba_location: "{{ pg_dir }}/pg_hba.conf"
+pg_ver: 9.5
+pg_user: pgsql
+pg_group: pgsql
+
+locale_latin_suffix: .ISO8859-1
+locale_utf8_suffix: .UTF-8
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-11.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-11.yml
new file mode 100644
index 00000000..efb0603b
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-11.yml
@@ -0,0 +1,12 @@
+postgresql_packages:
+ - postgresql95-server
+ - py27-psycopg2
+
+pg_dir: /usr/local/pgsql/data
+pg_hba_location: "{{ pg_dir }}/pg_hba.conf"
+pg_ver: 9.5
+pg_user: pgsql
+pg_group: pgsql
+
+locale_latin_suffix: .ISO8859-1
+locale_utf8_suffix: .UTF-8
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.0-py3.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.0-py3.yml
new file mode 100644
index 00000000..2f6b0d98
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.0-py3.yml
@@ -0,0 +1,12 @@
+postgresql_packages:
+ - postgresql95-server
+ - py36-psycopg2
+
+pg_dir: /usr/local/pgsql/data
+pg_hba_location: "{{ pg_dir }}/pg_hba.conf"
+pg_ver: 9.5
+pg_user: pgsql
+pg_group: pgsql
+
+locale_latin_suffix: .ISO8859-1
+locale_utf8_suffix: .UTF-8
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.0.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.0.yml
new file mode 100644
index 00000000..1fe66782
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.0.yml
@@ -0,0 +1,12 @@
+postgresql_packages:
+ - postgresql96-server
+ - py27-psycopg2
+
+pg_dir: /usr/local/pgsql/data
+pg_hba_location: "{{ pg_dir }}/pg_hba.conf"
+pg_ver: 9.6
+pg_user: pgsql
+pg_group: pgsql
+
+locale_latin_suffix: .ISO8859-1
+locale_utf8_suffix: .UTF-8
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.1-py3.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.1-py3.yml
new file mode 100644
index 00000000..cd7c83a4
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.1-py3.yml
@@ -0,0 +1,12 @@
+postgresql_packages:
+ - postgresql11-server
+ - py36-psycopg2
+
+pg_dir: /var/db/postgres/data11
+pg_hba_location: "{{ pg_dir }}/pg_hba.conf"
+pg_ver: 11
+pg_user: postgres
+pg_group: postgres
+
+locale_latin_suffix: .ISO8859-1
+locale_utf8_suffix: .UTF-8
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.1.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.1.yml
new file mode 100644
index 00000000..0b1ab5b2
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/FreeBSD-12.1.yml
@@ -0,0 +1,12 @@
+postgresql_packages:
+ - postgresql11-server
+ - py27-psycopg2
+
+pg_dir: /var/db/postgres/data11
+pg_hba_location: "{{ pg_dir }}/pg_hba.conf"
+pg_ver: 11
+pg_user: postgres
+pg_group: postgres
+
+locale_latin_suffix: .ISO8859-1
+locale_utf8_suffix: .UTF-8
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/RedHat-py3.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/RedHat-py3.yml
new file mode 100644
index 00000000..ee083722
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/RedHat-py3.yml
@@ -0,0 +1,8 @@
+postgresql_packages:
+ - "postgresql-server"
+ - "python3-psycopg2"
+ - "bzip2"
+ - "xz"
+
+pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf"
+pg_dir: "/var/lib/pgsql/data"
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/RedHat.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/RedHat.yml
new file mode 100644
index 00000000..20c4b1f5
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/RedHat.yml
@@ -0,0 +1,7 @@
+postgresql_packages:
+ - "postgresql-server"
+ - "python-psycopg2"
+ - "bzip2"
+
+pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf"
+pg_dir: "/var/lib/pgsql/data"
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-12.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-12.yml
new file mode 100644
index 00000000..4b6e744b
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-12.yml
@@ -0,0 +1,8 @@
+postgresql_packages:
+ - "postgresql"
+ - "postgresql-common"
+ - "python-psycopg2"
+
+pg_hba_location: "/etc/postgresql/9.1/main/pg_hba.conf"
+pg_dir: "/var/lib/postgresql/9.1/main"
+pg_ver: 9.1
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-14.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-14.yml
new file mode 100644
index 00000000..ffcc8dd4
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-14.yml
@@ -0,0 +1,8 @@
+postgresql_packages:
+ - "postgresql"
+ - "postgresql-common"
+ - "python-psycopg2"
+
+pg_hba_location: "/etc/postgresql/9.3/main/pg_hba.conf"
+pg_dir: "/var/lib/postgresql/9.3/main"
+pg_ver: 9.3
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-16-py3.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-16-py3.yml
new file mode 100644
index 00000000..b088c310
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-16-py3.yml
@@ -0,0 +1,8 @@
+postgresql_packages:
+ - "postgresql"
+ - "postgresql-common"
+ - "python3-psycopg2"
+
+pg_hba_location: "/etc/postgresql/9.5/main/pg_hba.conf"
+pg_dir: "/var/lib/postgresql/9.5/main"
+pg_ver: 9.5
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-16.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-16.yml
new file mode 100644
index 00000000..897efd2c
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-16.yml
@@ -0,0 +1,8 @@
+postgresql_packages:
+ - "postgresql"
+ - "postgresql-common"
+ - "python-psycopg2"
+
+pg_hba_location: "/etc/postgresql/9.5/main/pg_hba.conf"
+pg_dir: "/var/lib/postgresql/9.5/main"
+pg_ver: 9.5
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-18-py3.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-18-py3.yml
new file mode 100644
index 00000000..10453bdf
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/Ubuntu-18-py3.yml
@@ -0,0 +1,8 @@
+postgresql_packages:
+ - "postgresql"
+ - "postgresql-common"
+ - "python3-psycopg2"
+
+pg_hba_location: "/etc/postgresql/10/main/pg_hba.conf"
+pg_dir: "/var/lib/postgresql/10/main"
+pg_ver: 10
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/default-py3.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/default-py3.yml
new file mode 100644
index 00000000..19152a64
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/default-py3.yml
@@ -0,0 +1,6 @@
+postgresql_packages:
+ - "postgresql-server"
+ - "python3-psycopg2"
+
+pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf"
+pg_dir: "/var/lib/pgsql/data"
diff --git a/test/integration/targets/incidental_setup_postgresql_db/vars/default.yml b/test/integration/targets/incidental_setup_postgresql_db/vars/default.yml
new file mode 100644
index 00000000..ab36dd9f
--- /dev/null
+++ b/test/integration/targets/incidental_setup_postgresql_db/vars/default.yml
@@ -0,0 +1,6 @@
+postgresql_packages:
+ - "postgresql-server"
+ - "python-psycopg2"
+
+pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf"
+pg_dir: "/var/lib/pgsql/data"
diff --git a/test/integration/targets/incidental_setup_rabbitmq/aliases b/test/integration/targets/incidental_setup_rabbitmq/aliases
new file mode 100644
index 00000000..136c05e0
--- /dev/null
+++ b/test/integration/targets/incidental_setup_rabbitmq/aliases
@@ -0,0 +1 @@
+hidden
diff --git a/test/integration/targets/incidental_setup_rabbitmq/files/rabbitmq.conf b/test/integration/targets/incidental_setup_rabbitmq/files/rabbitmq.conf
new file mode 100644
index 00000000..1e602175
--- /dev/null
+++ b/test/integration/targets/incidental_setup_rabbitmq/files/rabbitmq.conf
@@ -0,0 +1,8 @@
+listeners.ssl.default = 5671
+
+ssl_options.cacertfile = /tls/ca_certificate.pem
+ssl_options.certfile = /tls/server_certificate.pem
+ssl_options.keyfile = /tls/server_key.pem
+ssl_options.password = bunnies
+ssl_options.verify = verify_peer
+ssl_options.fail_if_no_peer_cert = false
diff --git a/test/integration/targets/incidental_setup_rabbitmq/meta/main.yml b/test/integration/targets/incidental_setup_rabbitmq/meta/main.yml
new file mode 100644
index 00000000..7a6c3e01
--- /dev/null
+++ b/test/integration/targets/incidental_setup_rabbitmq/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - incidental_setup_tls
diff --git a/test/integration/targets/incidental_setup_rabbitmq/tasks/main.yml b/test/integration/targets/incidental_setup_rabbitmq/tasks/main.yml
new file mode 100644
index 00000000..4f35f16f
--- /dev/null
+++ b/test/integration/targets/incidental_setup_rabbitmq/tasks/main.yml
@@ -0,0 +1,3 @@
+---
+- include: ubuntu.yml
+ when: ansible_distribution == 'Ubuntu'
diff --git a/test/integration/targets/incidental_setup_rabbitmq/tasks/ubuntu.yml b/test/integration/targets/incidental_setup_rabbitmq/tasks/ubuntu.yml
new file mode 100644
index 00000000..6d7a3ef0
--- /dev/null
+++ b/test/integration/targets/incidental_setup_rabbitmq/tasks/ubuntu.yml
@@ -0,0 +1,63 @@
+---
+# https://www.rabbitmq.com/install-debian.html#apt-pinning
+- name: Pin erlang version that rabbitmq supports
+ copy:
+ dest: /etc/apt/preferences.d/erlang
+ content: |
+ Package: erlang*
+ Pin: version 1:20.3.8.18-1
+ Pin-Priority: 1000
+
+ Package: esl-erlang
+ Pin: version 1:20.3.6
+ Pin-Priority: 1000
+
+- name: Install https transport for apt
+ apt:
+ name: apt-transport-https
+ state: latest
+ force: yes
+
+- name: Add RabbitMQ release signing key
+ apt_key:
+ url: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/setup_rabbitmq/rabbitmq-release-signing-key.asc
+ state: present
+
+- name: Add RabbitMQ Erlang repository
+ apt_repository:
+ repo: "deb https://dl.bintray.com/rabbitmq-erlang/debian {{ ansible_distribution_release }} erlang-20.x"
+ filename: 'rabbitmq-erlang'
+ state: present
+ update_cache: yes
+
+# Required by the rabbitmq modules that uses the management API
+- name: Install requests
+ pip:
+ name: requests
+
+- name: Install RabbitMQ Server
+ apt:
+ deb: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/setup_rabbitmq/rabbitmq-server_3.7.14-1_all.deb
+
+- name: Install RabbitMQ TLS dependencies
+ apt:
+ name: "{{ item }}"
+ state: latest
+ loop:
+ - erlang-asn1
+ - erlang-crypto
+ - erlang-public-key
+ - erlang-ssl
+
+- name: Ensure TLS config
+ copy:
+ src: rabbitmq.conf
+ dest: /etc/rabbitmq/rabbitmq.conf
+
+- name: Start RabbitMQ service
+ service:
+ name: rabbitmq-server
+ state: started
+
+- name: Enable management
+ command: rabbitmq-plugins enable --online rabbitmq_management
diff --git a/test/integration/targets/incidental_setup_tls/aliases b/test/integration/targets/incidental_setup_tls/aliases
new file mode 100644
index 00000000..136c05e0
--- /dev/null
+++ b/test/integration/targets/incidental_setup_tls/aliases
@@ -0,0 +1 @@
+hidden
diff --git a/test/integration/targets/incidental_setup_tls/files/ca_certificate.pem b/test/integration/targets/incidental_setup_tls/files/ca_certificate.pem
new file mode 100644
index 00000000..a438d926
--- /dev/null
+++ b/test/integration/targets/incidental_setup_tls/files/ca_certificate.pem
@@ -0,0 +1,19 @@
+-----BEGIN CERTIFICATE-----
+MIIDAjCCAeqgAwIBAgIJANguFROhaWocMA0GCSqGSIb3DQEBCwUAMDExIDAeBgNV
+BAMMF1RMU0dlblNlbGZTaWduZWR0Um9vdENBMQ0wCwYDVQQHDAQkJCQkMB4XDTE5
+MDExMTA4MzMxNVoXDTI5MDEwODA4MzMxNVowMTEgMB4GA1UEAwwXVExTR2VuU2Vs
+ZlNpZ25lZHRSb290Q0ExDTALBgNVBAcMBCQkJCQwggEiMA0GCSqGSIb3DQEBAQUA
+A4IBDwAwggEKAoIBAQDqVt84czSxWnWW4Ng6hmKE3NarbLsycwtjrYBokV7Kk7Mp
+7PrBbYF05FOgSdJLvL6grlRSQK2VPsXdLfEv5uFXX6gyd2WQwKCiGGf4UY4ZIl4l
+JVpSDsBV2orR4pOIf1s1+iSwvcRQkX46SVjoKWbDUc4VLo1uy8UvavQI+DMioYyy
+0K2MbRs7oG2rdKks8zisfT0ymKnrFTdVeUjIrg0sStaMnf9VVkcEeYkfNY0vWqdn
+CV5wPfDBlnnxGMgqGdLSpzfyJ7qafFET+q+gOvjsEqzn7DvlPkmk86hIIWXKi3aM
+A9swknL3rnagJL6GioWRpYUwKdRKmZxdyr4I2JTTAgMBAAGjHTAbMAwGA1UdEwQF
+MAMBAf8wCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBCwUAA4IBAQACTpPBf5WSwZ7r
+hrbPUN3qVh70HI0ZNK2jlK6b5fpSdw3JI/GQl0Kw3eGICLzwTByWvhD62U7IigL5
+0UWxWuEod310Y/qo/7OxRVPp5PH/0oNGoKHhEzas2ii0heQYGsHQUKGzYNNyVfjy
+nqBFz5AcKf067LcXivYqod6JDQHqFq/5/hWlIsHHrZIeijqqtthPq39GlGAYO+AB
+U66nzlH7YQgmfYfy6l7O4LsjXf/bz9rWvueO3NqCsmXV+FacDkOkwWA5Kf6rcgNL
+3G+2HAVTRIXDnO4ShnK6aYMW+UklpYRlVYBBUOdwoNIp5gI+BlSc1IuF6PdLVt3q
+VdjN1MjY
+-----END CERTIFICATE-----
diff --git a/test/integration/targets/incidental_setup_tls/files/ca_key.pem b/test/integration/targets/incidental_setup_tls/files/ca_key.pem
new file mode 100644
index 00000000..0a950eda
--- /dev/null
+++ b/test/integration/targets/incidental_setup_tls/files/ca_key.pem
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDqVt84czSxWnWW
+4Ng6hmKE3NarbLsycwtjrYBokV7Kk7Mp7PrBbYF05FOgSdJLvL6grlRSQK2VPsXd
+LfEv5uFXX6gyd2WQwKCiGGf4UY4ZIl4lJVpSDsBV2orR4pOIf1s1+iSwvcRQkX46
+SVjoKWbDUc4VLo1uy8UvavQI+DMioYyy0K2MbRs7oG2rdKks8zisfT0ymKnrFTdV
+eUjIrg0sStaMnf9VVkcEeYkfNY0vWqdnCV5wPfDBlnnxGMgqGdLSpzfyJ7qafFET
++q+gOvjsEqzn7DvlPkmk86hIIWXKi3aMA9swknL3rnagJL6GioWRpYUwKdRKmZxd
+yr4I2JTTAgMBAAECggEBALpg9ZDUMCiOpc+mbNO/ZkP90M7u38Q0M+7HY8XHOPkt
+l+XUkWueSMRLhSeLDzMlnwf1HyN8RZLaJkzP6XAL1VXEwuXAiIskaZ4Cg07Arp/W
+8cHhf4CcMuUVuCtOZcC+ajD4Do5zn9vkm9yH0ap0o0LdoWa/a8WfU+luy0EHBsSW
+6qqI+nqNFmISluVbfWt7t3zp273+8sir6YeHQu9G91/jzggv8rHmu4EHhi3cnU0K
+vY6OPCGBL7nrg9Rv1LSFpH95TvlIM6/Cm0AjgW7m6XwWUTaI9p+GvKzrYUSLd9L/
+QxlmAwiu/sBTXLrsWyr8XEtj+lVGxQ6eFbf6E+lUm8ECgYEA+8Wgmhf3VsC3gvJz
+w2jApEoOioD5iGOWGClGVURkfaBhFELr4XCTVMdBuCtxT7LYTMHTAlBqIbdWDjB4
+m/E417hLGogSDy7j0R0Mx75OOGEitxYUhe0VGDNoytgCNd2UnTMt42lp+9vAHZag
+INhVDOnxRNdtNTf1yYkWUMEbh1sCgYEA7kZNJXPVYJtR78+km/Gcv64Umci7KUV+
+hYc7chR5xv3cXvXg5eojKa4G7CyMQTX7VnRa6CiQKdN73AbIAhS4Oy5UlCOKtmb8
+xnBiOAYwSpOfIeZhjq0RvEeZX0t6u7XsErBZ03rEPKXF2nNDo1x8byrlKPtlUzwJ
+gb5yjmK/mekCgYEA1TWQAs5m4+2Bun+tbv7nnHkmhT4hktGays0xRYYMf6Jwc6MU
+dC5MZg/zZI5Nf8uZhq7hDWWh6vmCA7QifxSxKWVlHIu8l2UDAhRSvVg4j2Aa8Obe
+7GdQZNUsWhLBFHKXpuQvaRTc7q8yqxvicM4igDQg4EZ6sgW4vDm+TxapRF8CgYAz
+n6mhPqpxRtWGxo8cdkmGwfmWpAXg2DykQ3teqQ8FTQUM0erLBWJe6mR3kONGUaLF
+xWnYuMkbNsW0EwgMY17S+6O5gMXR5RhJChpNlxGpZrhoiNiEJ/0atMyG9/x8ZNrj
+5a9ggU248hWe0bBK2YPgNgP2UBlQ4kYRBSkerkhi2QKBgF+tlpyqcU+0iY82qRS2
+wMf7oI2pWR8nX9LPAY/nnvwWvqwcAFJPMlSMTu8Ext6h7l9yu+7JGL6JWwsO57Lb
+Gm/RxbuZ/kG/13+lSNmZiyHrhj6hZhkAMeFM34fpT4+DBXqSxZuvdrmwBc5B2jYg
+F9Bv8gcmZlGhqONL23evr9Gu
+-----END PRIVATE KEY-----
diff --git a/test/integration/targets/incidental_setup_tls/files/client_certificate.pem b/test/integration/targets/incidental_setup_tls/files/client_certificate.pem
new file mode 100644
index 00000000..501d8389
--- /dev/null
+++ b/test/integration/targets/incidental_setup_tls/files/client_certificate.pem
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDRjCCAi6gAwIBAgIBAjANBgkqhkiG9w0BAQsFADAxMSAwHgYDVQQDDBdUTFNH
+ZW5TZWxmU2lnbmVkdFJvb3RDQTENMAsGA1UEBwwEJCQkJDAeFw0xOTAxMTEwODMz
+MThaFw0yOTAxMDgwODMzMThaMC0xGjAYBgNVBAMMEWFuc2libGUudGxzLnRlc3Rz
+MQ8wDQYDVQQKDAZjbGllbnQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
+AQCoM+OQ3HCnCUAAz9KGGTwWB9hQbUfAZXm/stlb2/uOAp3rNwxAlCs/giymBHE6
+Iu6mrK006Vn+Z9ibqIrD2LuCOxcu25y8goqG62TgdP5sa9wR+597s0XssnwnaY8y
+bJ3p2zWAJvMgqQ0iNW/ZynpWbO85K5SryUykF7FAeNU9ogGGlIwCPjHhPvnwjkqd
+yDqaA1VaJKDUWIF9joI7sV4VLgGhQvzXRrHULsTeIF2m0+ebL0PTNEWHQ0dtgLYX
+kW7YO4Y6+n3cjHNH4qTof8V30EK8pk8kTdJ/x6ubwf+klFCAyroOxNOaxUy299Oo
+yD6qIPJPnGkPhrKtWnWIhNzJAgMBAAGjbTBrMAkGA1UdEwQCMAAwCwYDVR0PBAQD
+AgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMCMDwGA1UdEQQ1MDOCEWFuc2libGUudGxz
+LnRlc3RzghNNYWNCb29rLVByby00LmxvY2Fsgglsb2NhbGhvc3QwDQYJKoZIhvcN
+AQELBQADggEBAK214+VVXnGnsUlvd9Q6A2Ea6UGrr6b7xkmlnIaNd+6xoUsDsHob
+srHYm7UC0uLi1KwSunI7AU5ZELVEUfAmJzh3O4d6C5sQyqKYPqd5harWOQ3BOD0I
+plHpp7qMtsPDuJBtmE/bmvF85eto0H7pPz+cTTXRlOaVVeiHjMggFcXdy1MzGo9C
+X/4wLQmsFeypTfe+ZGqvDh99VV+ffNMIsMh+opWEloaKiHmDKB6S9aC/MsVVM4RR
+nHm/UKTOukaGE9QIPkSSaygv3sBkVnQ2SHMvvtnjPHVHlizNoq6+YTnuOvKpo4o5
+V7Bij+W7rkBQLsEfwv2IC+gzmRz2yxr2tXk=
+-----END CERTIFICATE-----
diff --git a/test/integration/targets/incidental_setup_tls/files/client_key.pem b/test/integration/targets/incidental_setup_tls/files/client_key.pem
new file mode 100644
index 00000000..850260a8
--- /dev/null
+++ b/test/integration/targets/incidental_setup_tls/files/client_key.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAqDPjkNxwpwlAAM/Shhk8FgfYUG1HwGV5v7LZW9v7jgKd6zcM
+QJQrP4IspgRxOiLupqytNOlZ/mfYm6iKw9i7gjsXLtucvIKKhutk4HT+bGvcEfuf
+e7NF7LJ8J2mPMmyd6ds1gCbzIKkNIjVv2cp6VmzvOSuUq8lMpBexQHjVPaIBhpSM
+Aj4x4T758I5Kncg6mgNVWiSg1FiBfY6CO7FeFS4BoUL810ax1C7E3iBdptPnmy9D
+0zRFh0NHbYC2F5Fu2DuGOvp93IxzR+Kk6H/Fd9BCvKZPJE3Sf8erm8H/pJRQgMq6
+DsTTmsVMtvfTqMg+qiDyT5xpD4ayrVp1iITcyQIDAQABAoIBAHPszzpXs4xr46Cr
+mvyxB6hnX76OkpUXWwGz0fptcsI9K3mhRuB7PhNXNE53YVIgITreZ8G/0jZ0e+VM
+E9dG2HS5JRE2ap/BmJfERJIuD+vJqrL6KMCondi0arz/E6I9GdjDK+xW69nmqRaa
+nawM0KQgD//m+WAsLJYrfg5hORZwI2SHaahawnCp0QaMmz3bdDWKRacM3q0UFX46
+Ze6CaZkUn+e1rHsTMcZBvxQWIVzysFNXh150idIB/PxL5YfCQqTSAj1c/nxaxz6a
+BvHFlpaYR3tvXXlexxfjglCwsGyckbvTyP1cBZqpv5oES+VKt2PrOve9Zyax+CYT
+0uQf6cECgYEA09+46QHXLfWh6jiJYu9skC9UrLU5czfCNB6PrUtFcjPFMYjZDcw9
+inJmcuTPXmfplxc47YDfpwotU+szTJDF+R8kknnfw9zVr/sIwZ5wsFfUQl/56Svn
+AIOVvHHvcvMX95XKGiuTsoCIJZNjJN3l3ztu/bRciuiVLyizglwIVrMCgYEAyzvK
+PFlWilbp3GPJlnW7x1bUxe1ziLE/Um+ujZx96+fy34hJLFdNdNzpNUjoOf3IDTGq
+6xl+vXcf12gimWMFcD3qNIGKHBDM9cIB2RDbb6YcqI8lOqopsmOyGmVLPkRpCoUK
+72kacQwvw6M9xjmpiG3dN8lE881jDmZi+hyCnJMCgYEAoIQnQAhP8Jbeo2dP1q+T
+bS0elnX532uH6xqYOW8EXwAPznZiEw0ANspzCWqGHHzXQMusKmtvhcq1CpXvWHt6
+MUHB4GMK/wVosxmZya5yq3bu7ZZu7JOBQCdwosMi6NB5AO7vnaIUFLFB9E3UWBLw
+243YicdCMU8B7yeD0ChPfPcCgYA1dYHKBBn+g8Q6Y8lIGaoOUmnfsok8gJtOfPAm
+ce6xmi7J29iboE9QmTeC+62Sa44u4ky6UNeE0QwAJnVLcb+hebfcneKNZWH0l1bT
+GVsPcFuDfzvkxZP4R782sERtmaMj0EFDHpuE9xatWIhMVyigKX4SSZAorXML+6S3
+c75rnwKBgBR+WU934wS+DbwTLlUB2mJWqJMEbOH/CUwPC7+VN4h1h3/i455iAeiU
+BizLS0SlD+MoSbC7URcZuquqGkmMlnJXoxF+NdxoWZK78tYNftryWoR87TloiVc/
+LhkxZxje4tgW/mTLqH3zKDoyyzDzG6Q6tAUN2ZTjJFEws7qF30Qe
+-----END RSA PRIVATE KEY-----
diff --git a/test/integration/targets/incidental_setup_tls/files/server_certificate.pem b/test/integration/targets/incidental_setup_tls/files/server_certificate.pem
new file mode 100644
index 00000000..4a0ebc6e
--- /dev/null
+++ b/test/integration/targets/incidental_setup_tls/files/server_certificate.pem
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDRjCCAi6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAxMSAwHgYDVQQDDBdUTFNH
+ZW5TZWxmU2lnbmVkdFJvb3RDQTENMAsGA1UEBwwEJCQkJDAeFw0xOTAxMTEwODMz
+MTZaFw0yOTAxMDgwODMzMTZaMC0xGjAYBgNVBAMMEWFuc2libGUudGxzLnRlc3Rz
+MQ8wDQYDVQQKDAZzZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
+AQDIwErHwAesRBfd9HiZkmB3VYh28c1QkE9I8nYyHJKX2ZBUhAzK+h80BkcTJJ94
+265qWyACH/wl54Xe/ofFUFrGa4vz0qz4UkL/KI0OGw28Y4qnKdorb9DumbiIPB+9
+I9TJT9vhtXTxBNlBTpv3ONHL8EzdV6ZmuvELU11H27oQ4xoUYhfXPXLMLK0sOnXZ
+lt0BOMMd5fVpJVa8fvXiw3626a0aXCr4e/MWUsBFRnzrXfgoW+AjYoTjKKS2hLYo
+8//MM05h7ROIXrNe990sf9C1G+fOThmOMszK9sjMhu2xHranRcz5aA0UTfyOjTs8
+9WexUYhC5VorYyRWtVZu2mDjAgMBAAGjbTBrMAkGA1UdEwQCMAAwCwYDVR0PBAQD
+AgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMDwGA1UdEQQ1MDOCEWFuc2libGUudGxz
+LnRlc3RzghNNYWNCb29rLVByby00LmxvY2Fsgglsb2NhbGhvc3QwDQYJKoZIhvcN
+AQELBQADggEBAFoPBeB6tQhFS1198sia5NDHDDrghDOIlE0QbaoA+MSKzsaIy8Mu
+mNcM2ewYpT600XXTBxcqF6/vuKL9OEbvivtRYQu1YfkifN1jzREoWTieUkR5ytzt
+8ATfFkgTWJmiRiOIb/fNgewvhd+aKxep0OGwDiSKKl1ab6F17Cp4iK8sDBWmnUb6
+0Wf7pfver1Gl0Gp8vRXGUuc8a7udA9a8mV70HJlLkMdMvR9U8Bqih0+iRaqNWXRZ
+7Lc6v5LbzrW/ntilmgU6F0lwxPydg49MY4UrSXcjYLZs9T4iYHwTfLxFjFMIgGwn
+peYMKRj18akP9i2mjj5O2mRu4K+ecuUSOGI=
+-----END CERTIFICATE-----
diff --git a/test/integration/targets/incidental_setup_tls/files/server_key.pem b/test/integration/targets/incidental_setup_tls/files/server_key.pem
new file mode 100644
index 00000000..c79ab648
--- /dev/null
+++ b/test/integration/targets/incidental_setup_tls/files/server_key.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAyMBKx8AHrEQX3fR4mZJgd1WIdvHNUJBPSPJ2MhySl9mQVIQM
+yvofNAZHEySfeNuualsgAh/8JeeF3v6HxVBaxmuL89Ks+FJC/yiNDhsNvGOKpyna
+K2/Q7pm4iDwfvSPUyU/b4bV08QTZQU6b9zjRy/BM3VemZrrxC1NdR9u6EOMaFGIX
+1z1yzCytLDp12ZbdATjDHeX1aSVWvH714sN+tumtGlwq+HvzFlLARUZ86134KFvg
+I2KE4yiktoS2KPP/zDNOYe0TiF6zXvfdLH/QtRvnzk4ZjjLMyvbIzIbtsR62p0XM
++WgNFE38jo07PPVnsVGIQuVaK2MkVrVWbtpg4wIDAQABAoIBAHw3wA3pnNXTLJGC
+fD1KfbZZjp9K76gyI10X6lsHow2i6dPiAah3LGecms4VkzfNdxcIW7303Kj3obZh
++ND277RnR6oPakgdXqdUCDP6OX2gemMFWqIWBkodhDmIOntmeHw4le4LwdiBD42B
+frBy0B5JCsbLPYPDmPNRGh8krvVS+Eir4hb4tK95TPMSL0vEjvHYFbCxv7//Ri1p
+3CROGp2CGX0WZ+Zs0crRNoIhRRM6kLAhROcqejtnEy6o7l5CWpCAL2vxlE9y8/kL
+iRawSZRFZnz/zGnqpx0vswgvijkuPfcNGMSzdwaiDgQz8D0GkJ7s9VgzZJazNy+1
+ET/4YIECgYEA612rwP9Ar9qdYbmmMPaJzITnaIrNGfO2JvaQqZt+DG8sVgdxL7V5
+D6emcw406drKRZvFAxnW6ZW2bVpmit02osl0re2A/nOTXLNuo338Qkap/hG8YZrF
+bw7w75pFa/rwlDtedjBnGHO2KbRXeU5Hn5wLoKjYgJoF6Ht+PPdL0IsCgYEA2lnC
+pQEhM51iRMDqNdmVJyvsTNU1ikoO8HaXHq+LwOQETaKMnDwp4Bn14E815CTulAc/
+tsDTKSDk6umZ+IufG1a2v7CqgKVwkB4HkgxKFQs2gQdTFfoMi5eeHR+njuNtklp1
+9fWfKHsP/ddrg+iTVTRZBLWexgKK89IMHYalpAkCgYEAy0Q3a9NF81mTJ+3kOE8C
+zO1OyLtuzGXsvxOb9c6C+owctyNwPeq05a89EgqH6hr5K0qOx9HOCCcyyJgVDQJl
+CAuByB/gkmAQOTQBbhMFA9vxPanljknTDsnRjKwoHkw2712ig+Hjd3ufK79C+FGB
+i7eBVzva1p2uUowshsxv3mcCgYAOFiRciMofjlO8o8V4W+Undcn02vxtQ4HbOYte
+S2z0sMEmUQpJOghpkMMwCWwsn8VUf3M40w/MY3bhQNjSFA/br6hyjW8yhXnRkl5i
+qbBN0z9c66AMlukgSFPHBTfGHB4Bhxx9Fa+C6Q2LDs6839BBevMTPrRTie509GQb
+s4gUIQKBgAvE8wLcmozno0GLDnBdKRZP/C7tmVnAINuraITPUBTASwI+Qo8ILigQ
+LRLaDqF84BEpjb8vdzkYFQqRQSZ8BI8NydfuKEFSBfL27sBvSGMYQJVm6bryUmPq
+T3ayaeZ4Wb3FFDijgtM9dRKyf7p4hQPOqM44QrntAtb43b2Q5L7M
+-----END RSA PRIVATE KEY-----
diff --git a/test/integration/targets/incidental_setup_tls/tasks/main.yml b/test/integration/targets/incidental_setup_tls/tasks/main.yml
new file mode 100644
index 00000000..c5b7a23a
--- /dev/null
+++ b/test/integration/targets/incidental_setup_tls/tasks/main.yml
@@ -0,0 +1,21 @@
+---
+# Generated certificate with: https://github.com/michaelklishin/tls-gen
+# ~/tls-gen/basic# make PASSWORD=bunnies CN=ansible.tls.tests
+# verify with: make info
+
+- name: ensure target directory is present
+ file:
+ path: /tls
+ state: directory
+
+- name: ensure TLS files are present
+ copy:
+ src: "{{ item }}"
+ dest: "/tls/{{ item }}"
+ loop:
+ - ca_certificate.pem
+ - ca_key.pem
+ - client_certificate.pem
+ - client_key.pem
+ - server_certificate.pem
+ - server_key.pem
diff --git a/test/integration/targets/incidental_synchronize/aliases b/test/integration/targets/incidental_synchronize/aliases
new file mode 100644
index 00000000..31c6a8b4
--- /dev/null
+++ b/test/integration/targets/incidental_synchronize/aliases
@@ -0,0 +1 @@
+shippable/posix/incidental
diff --git a/test/integration/targets/incidental_synchronize/files/bar.txt b/test/integration/targets/incidental_synchronize/files/bar.txt
new file mode 100644
index 00000000..3e96db9b
--- /dev/null
+++ b/test/integration/targets/incidental_synchronize/files/bar.txt
@@ -0,0 +1 @@
+templated_var_loaded
diff --git a/test/integration/targets/incidental_synchronize/files/foo.txt b/test/integration/targets/incidental_synchronize/files/foo.txt
new file mode 100644
index 00000000..3e96db9b
--- /dev/null
+++ b/test/integration/targets/incidental_synchronize/files/foo.txt
@@ -0,0 +1 @@
+templated_var_loaded
diff --git a/test/integration/targets/incidental_synchronize/tasks/main.yml b/test/integration/targets/incidental_synchronize/tasks/main.yml
new file mode 100644
index 00000000..80e052a6
--- /dev/null
+++ b/test/integration/targets/incidental_synchronize/tasks/main.yml
@@ -0,0 +1,273 @@
+# test code for the synchronize module
+# (c) 2014, James Tanner <tanner.jc@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: install rsync
+ package:
+ name: rsync
+ when: ansible_distribution != "MacOSX"
+
+- name: cleanup old files
+ shell: rm -rf {{output_dir}}/*
+
+- name: create test new files
+ copy: dest={{output_dir}}/{{item}} mode=0644 content="hello world"
+ with_items:
+ - foo.txt
+ - bar.txt
+
+- name: synchronize file to new filename
+ synchronize: src={{output_dir}}/foo.txt dest={{output_dir}}/foo.result
+ register: sync_result
+
+- assert:
+ that:
+ - "'changed' in sync_result"
+ - "sync_result.changed == true"
+ - "'cmd' in sync_result"
+ - "'rsync' in sync_result.cmd"
+ - "'msg' in sync_result"
+ - "sync_result.msg.startswith('>f+')"
+ - "sync_result.msg.endswith('+ foo.txt\n')"
+
+- name: test that the file was really copied over
+ stat:
+ path: "{{ output_dir }}/foo.result"
+ register: stat_result
+
+- assert:
+ that:
+ - "stat_result.stat.exists == True"
+ - "stat_result.stat.checksum == '2aae6c35c94fcfb415dbe95f408b9ce91ee846ed'"
+
+- name: test that the file is not copied a second time
+ synchronize: src={{output_dir}}/foo.txt dest={{output_dir}}/foo.result
+ register: sync_result
+
+- assert:
+ that:
+ - "sync_result.changed == False"
+
+- name: Cleanup
+ file:
+ state: absent
+ path: "{{output_dir}}/{{item}}"
+ with_items:
+ - foo.result
+ - bar.result
+
+- name: Synchronize using the mode=push param
+ synchronize:
+ src: "{{output_dir}}/foo.txt"
+ dest: "{{output_dir}}/foo.result"
+ mode: push
+ register: sync_result
+
+- assert:
+ that:
+ - "'changed' in sync_result"
+ - "sync_result.changed == true"
+ - "'cmd' in sync_result"
+ - "'rsync' in sync_result.cmd"
+ - "'msg' in sync_result"
+ - "sync_result.msg.startswith('>f+')"
+ - "sync_result.msg.endswith('+ foo.txt\n')"
+
+- name: test that the file was really copied over
+ stat:
+ path: "{{ output_dir }}/foo.result"
+ register: stat_result
+
+- assert:
+ that:
+ - "stat_result.stat.exists == True"
+ - "stat_result.stat.checksum == '2aae6c35c94fcfb415dbe95f408b9ce91ee846ed'"
+
+- name: test that the file is not copied a second time
+ synchronize:
+ src: "{{output_dir}}/foo.txt"
+ dest: "{{output_dir}}/foo.result"
+ mode: push
+ register: sync_result
+
+- assert:
+ that:
+ - "sync_result.changed == False"
+
+- name: Cleanup
+ file:
+ state: absent
+ path: "{{output_dir}}/{{item}}"
+ with_items:
+ - foo.result
+ - bar.result
+
+- name: Synchronize using the mode=pull param
+ synchronize:
+ src: "{{output_dir}}/foo.txt"
+ dest: "{{output_dir}}/foo.result"
+ mode: pull
+ register: sync_result
+
+- assert:
+ that:
+ - "'changed' in sync_result"
+ - "sync_result.changed == true"
+ - "'cmd' in sync_result"
+ - "'rsync' in sync_result.cmd"
+ - "'msg' in sync_result"
+ - "sync_result.msg.startswith('>f+')"
+ - "sync_result.msg.endswith('+ foo.txt\n')"
+
+- name: test that the file was really copied over
+ stat:
+ path: "{{ output_dir }}/foo.result"
+ register: stat_result
+
+- assert:
+ that:
+ - "stat_result.stat.exists == True"
+ - "stat_result.stat.checksum == '2aae6c35c94fcfb415dbe95f408b9ce91ee846ed'"
+
+- name: test that the file is not copied a second time
+ synchronize:
+ src: "{{output_dir}}/foo.txt"
+ dest: "{{output_dir}}/foo.result"
+ mode: pull
+ register: sync_result
+
+- assert:
+ that:
+ - "sync_result.changed == False"
+
+- name: Cleanup
+ file:
+ state: absent
+ path: "{{output_dir}}/{{item}}"
+ with_items:
+ - foo.result
+ - bar.result
+
+- name: synchronize files using with_items (issue#5965)
+ synchronize: src={{output_dir}}/{{item}} dest={{output_dir}}/{{item}}.result
+ with_items:
+ - foo.txt
+ - bar.txt
+ register: sync_result
+
+- assert:
+ that:
+ - "sync_result.changed"
+ - "sync_result.msg == 'All items completed'"
+ - "'results' in sync_result"
+ - "sync_result.results|length == 2"
+ - "sync_result.results[0].msg.endswith('+ foo.txt\n')"
+ - "sync_result.results[1].msg.endswith('+ bar.txt\n')"
+
+- name: Cleanup
+ file:
+ state: absent
+ path: "{{output_dir}}/{{item}}.result"
+ with_items:
+ - foo.txt
+ - bar.txt
+
+- name: synchronize files using rsync_path (issue#7182)
+ synchronize: src={{output_dir}}/foo.txt dest={{output_dir}}/foo.rsync_path rsync_path="sudo rsync"
+ register: sync_result
+
+- assert:
+ that:
+ - "'changed' in sync_result"
+ - "sync_result.changed == true"
+ - "'cmd' in sync_result"
+ - "'rsync' in sync_result.cmd"
+ - "'rsync_path' in sync_result.cmd"
+ - "'msg' in sync_result"
+ - "sync_result.msg.startswith('>f+')"
+ - "sync_result.msg.endswith('+ foo.txt\n')"
+
+- name: Cleanup
+ file:
+ state: absent
+ path: "{{output_dir}}/{{item}}"
+ with_items:
+ - foo.rsync_path
+
+- name: add subdirectories for link-dest test
+ file:
+ path: "{{output_dir}}/{{item}}/"
+ state: directory
+ mode: 0755
+ with_items:
+ - directory_a
+ - directory_b
+
+- name: copy foo.txt into the first directory
+ synchronize:
+ src: "{{output_dir}}/foo.txt"
+ dest: "{{output_dir}}/{{item}}/foo.txt"
+ with_items:
+ - directory_a
+
+- name: synchronize files using link_dest
+ synchronize:
+ src: "{{output_dir}}/directory_a/foo.txt"
+ dest: "{{output_dir}}/directory_b/foo.txt"
+ link_dest:
+ - "{{output_dir}}/directory_a"
+ register: sync_result
+
+- name: get stat information for directory_a
+ stat:
+ path: "{{ output_dir }}/directory_a/foo.txt"
+ register: stat_result_a
+
+- name: get stat information for directory_b
+ stat:
+ path: "{{ output_dir }}/directory_b/foo.txt"
+ register: stat_result_b
+
+- assert:
+ that:
+ - "'changed' in sync_result"
+ - "sync_result.changed == true"
+ - "stat_result_a.stat.inode == stat_result_b.stat.inode"
+
+- name: synchronize files using link_dest that would be recursive
+ synchronize:
+ src: "{{output_dir}}/foo.txt"
+ dest: "{{output_dir}}/foo.result"
+ link_dest:
+ - "{{output_dir}}"
+ register: sync_result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - sync_result is not changed
+ - sync_result is failed
+
+- name: Cleanup
+ file:
+ state: absent
+ path: "{{output_dir}}/{{item}}"
+ with_items:
+ - "directory_b/foo.txt"
+ - "directory_a/foo.txt"
+ - "directory_a"
+ - "directory_b"
diff --git a/test/integration/targets/incidental_timezone/aliases b/test/integration/targets/incidental_timezone/aliases
new file mode 100644
index 00000000..834cafc9
--- /dev/null
+++ b/test/integration/targets/incidental_timezone/aliases
@@ -0,0 +1,5 @@
+destructive
+shippable/posix/incidental
+skip/aix
+skip/osx
+skip/macos
diff --git a/test/integration/targets/incidental_timezone/tasks/main.yml b/test/integration/targets/incidental_timezone/tasks/main.yml
new file mode 100644
index 00000000..247ad6cf
--- /dev/null
+++ b/test/integration/targets/incidental_timezone/tasks/main.yml
@@ -0,0 +1,57 @@
+# Because hwclock usually isn't available inside Docker containers in Shippable
+# these tasks will detect if hwclock works and only run hwclock tests if it is
+# supported. That is why it is recommended to run these tests locally with
+# `--docker-privileged` on centos6, centos7 and ubuntu1404 images. Example
+# command to run on centos6:
+#
+# ansible-test integration --docker centos6 --docker-privileged -v timezone
+
+##
+## set path to timezone config files
+##
+
+- name: set config file path on Debian
+ set_fact:
+ timezone_config_file: '/etc/timezone'
+ when: ansible_os_family == 'Debian'
+
+- name: set config file path on RedHat
+ set_fact:
+ timezone_config_file: '/etc/sysconfig/clock'
+ when: ansible_os_family == 'RedHat'
+
+##
+## set path to hwclock config files
+##
+
+- name: set config file path on Debian
+ set_fact:
+ hwclock_config_file: '/etc/default/rcS'
+ when: ansible_os_family == 'Debian'
+
+- name: set config file path on RedHat
+ set_fact:
+ hwclock_config_file: '/etc/sysconfig/clock'
+ when: ansible_os_family == 'RedHat'
+
+- name: Run tests
+ # Skip tests on Fedora because dbus fails to start unless the container is run in priveleged mode.
+ # Even then, it starts unreliably. This may be due to the move to cgroup v2 in Fedora 31.
+ # https://www.redhat.com/sysadmin/fedora-31-control-group-v2
+ # Just skip Fedora rather than version-limiting because F30 goes EOL within a month of this writing
+ # and that is the oldest version we currently test in CI. F31+ are affected by the issue
+ # and making the tests work on them is something to deal with in community.general, not here.
+ when: ansible_distribution != 'Fedora'
+ block:
+ - name: set timezone to Etc/UTC
+ timezone:
+ name: Etc/UTC
+ register: original_timezone
+
+ - block:
+ - include_tasks: test.yml
+ always:
+ - name: Restore original system timezone - {{ original_timezone.diff.before.name }}
+ timezone:
+ name: "{{ original_timezone.diff.before.name }}"
+ when: original_timezone is changed
diff --git a/test/integration/targets/incidental_timezone/tasks/test.yml b/test/integration/targets/incidental_timezone/tasks/test.yml
new file mode 100644
index 00000000..ec0d854d
--- /dev/null
+++ b/test/integration/targets/incidental_timezone/tasks/test.yml
@@ -0,0 +1,607 @@
+##
+## test setting timezone, idempotency and checkmode
+##
+
+- name: set timezone to Australia/Brisbane (checkmode)
+ timezone:
+ name: Australia/Brisbane
+ check_mode: yes
+ register: timezone_set_checkmode
+
+- name: ensure timezone reported as changed in checkmode
+ assert:
+ that:
+ - timezone_set_checkmode.changed
+ - timezone_set_checkmode.diff.after.name == 'Australia/Brisbane'
+ - timezone_set_checkmode.diff.before.name == 'Etc/UTC'
+
+- name: ensure checkmode didn't change the timezone
+ command: cmp /etc/localtime /usr/share/zoneinfo/Australia/Brisbane
+ register: result
+ failed_when: result is not failed
+ changed_when: no
+
+- name: ensure that checkmode didn't update the timezone in the config file
+ command: egrep '^(TIME)?ZONE="Etc/UTC"' {{ timezone_config_file }}
+ when:
+ - ansible_service_mgr != 'systemd'
+ - ansible_os_family == 'RedHat'
+
+- name: ensure that checkmode didn't update the timezone in the config file
+ command: egrep '^Etc/UTC' {{ timezone_config_file }}
+ when:
+ - ansible_service_mgr != 'systemd'
+ - ansible_os_family == 'Debian'
+
+- name: set timezone to Australia/Brisbane
+ timezone:
+ name: Australia/Brisbane
+ register: timezone_set
+
+- name: ensure timezone changed
+ assert:
+ that:
+ - timezone_set.changed
+ - timezone_set.diff.after.name == 'Australia/Brisbane'
+ - timezone_set.diff.before.name == 'Etc/UTC'
+
+- name: ensure that the timezone is actually set
+ command: cmp /etc/localtime /usr/share/zoneinfo/Australia/Brisbane
+ changed_when: no
+
+- name: ensure that the timezone is updated in the config file
+ command: egrep '^(TIME)?ZONE="Australia/Brisbane"' {{ timezone_config_file }}
+ when:
+ - ansible_service_mgr != 'systemd'
+ - ansible_os_family == 'RedHat'
+
+- name: ensure that the timezone is updated in the config file
+ command: egrep '^Australia/Brisbane' {{ timezone_config_file }}
+ when:
+ - ansible_service_mgr != 'systemd'
+ - ansible_os_family == 'Debian'
+
+- name: set timezone to Australia/Brisbane again
+ timezone:
+ name: Australia/Brisbane
+ register: timezone_again
+
+- name: ensure timezone idempotency
+ assert:
+ that:
+ - not timezone_again.changed
+
+- name: set timezone to Australia/Brisbane again in checkmode
+ timezone:
+ name: Australia/Brisbane
+ register: timezone_again_checkmode
+
+- name: set timezone idempotency (checkmode)
+ assert:
+ that:
+ - not timezone_again_checkmode.changed
+
+##
+## tests for same timezones with different names
+##
+
+- name: check dpkg-reconfigure
+ shell: type dpkg-reconfigure
+ register: check_dpkg_reconfigure
+ ignore_errors: yes
+ changed_when: no
+
+- name: check timedatectl
+ shell: type timedatectl && timedatectl
+ register: check_timedatectl
+ ignore_errors: yes
+ changed_when: no
+
+- block:
+ - name: set timezone to Etc/UTC
+ timezone:
+ name: Etc/UTC
+
+ - name: change timezone from Etc/UTC to UTC
+ timezone:
+ name: UTC
+ register: timezone_etcutc_to_utc
+
+ - name: check timezone changed from Etc/UTC to UTC
+ assert:
+ that:
+ - timezone_etcutc_to_utc.changed
+ - timezone_etcutc_to_utc.diff.before.name == 'Etc/UTC'
+ - timezone_etcutc_to_utc.diff.after.name == 'UTC'
+
+ - name: change timezone from UTC to Etc/UTC
+ timezone:
+ name: Etc/UTC
+ register: timezone_utc_to_etcutc
+
+ - name: check timezone changed from UTC to Etc/UTC
+ assert:
+ that:
+ - timezone_utc_to_etcutc.changed
+ - timezone_utc_to_etcutc.diff.before.name == 'UTC'
+ - timezone_utc_to_etcutc.diff.after.name == 'Etc/UTC'
+
+ when:
+ # FIXME: Due to the bug of the dpkg-reconfigure, those tests failed on non-systemd debian
+ - check_dpkg_reconfigure.rc != 0 or check_timedatectl.rc == 0
+
+##
+## no systemd tests for timezone
+##
+
+- block:
+ ##
+ ## test with empty config file
+ ##
+
+ - name: empty config file
+ command: cp /dev/null {{ timezone_config_file }}
+
+ - name: set timezone to Europe/Belgrade (empty config file)
+ timezone:
+ name: Europe/Belgrade
+ register: timezone_empty_conf
+
+ - name: check if timezone set (empty config file)
+ assert:
+ that:
+ - timezone_empty_conf.changed
+ - timezone_empty_conf.diff.after.name == 'Europe/Belgrade'
+ - timezone_empty_conf.diff.before.name == 'n/a'
+
+ - name: check if the timezone is actually set (empty config file)
+ command: cmp /etc/localtime /usr/share/zoneinfo/Europe/Belgrade
+ changed_when: no
+
+
+ ##
+ ## test with deleted config file
+ ##
+
+ - name: remove config file
+ file:
+ path: '{{ timezone_config_file }}'
+ state: absent
+
+ - name: set timezone to Europe/Belgrade (no config file)
+ timezone:
+ name: Europe/Belgrade
+ register: timezone_missing_conf
+
+ - name: check if timezone set (no config file)
+ assert:
+ that:
+ - timezone_missing_conf.changed
+ - timezone_missing_conf.diff.after.name == 'Europe/Belgrade'
+ - timezone_missing_conf.diff.before.name == 'n/a'
+
+ - name: check if the timezone is actually set (no config file)
+ command: cmp /etc/localtime /usr/share/zoneinfo/Europe/Belgrade
+ changed_when: no
+
+
+ ##
+ ## test with /etc/localtime as symbolic link to a zoneinfo file
+ ##
+
+ - name: create symlink /etc/locatime -> /usr/share/zoneinfo/Etc/UTC
+ file:
+ src: /usr/share/zoneinfo/Etc/UTC
+ dest: /etc/localtime
+ state: link
+ force: yes
+
+ - name: set timezone to Europe/Belgrade (over symlink)
+ timezone:
+ name: Europe/Belgrade
+ register: timezone_symllink
+
+ - name: check if timezone set (over symlink)
+ assert:
+ that:
+ - timezone_symllink.changed
+ - timezone_symllink.diff.after.name == 'Europe/Belgrade'
+ - timezone_symllink.diff.before.name == 'Etc/UTC'
+
+ - name: check if the timezone is actually set (over symlink)
+ command: cmp /etc/localtime /usr/share/zoneinfo/Europe/Belgrade
+ changed_when: no
+
+
+ ##
+ ## test with /etc/localtime as broken symbolic link
+ ##
+
+ - name: set timezone to a broken symlink
+ file:
+ src: /tmp/foo
+ dest: /etc/localtime
+ state: link
+ force: yes
+
+ - name: set timezone to Europe/Belgrade (over broken symlink)
+ timezone:
+ name: Europe/Belgrade
+ register: timezone_symllink_broken
+
+ - name: check if timezone set (over broken symlink)
+ assert:
+ that:
+ - timezone_symllink_broken.changed
+ - timezone_symllink_broken.diff.after.name == 'Europe/Belgrade'
+ - timezone_symllink_broken.diff.before.name == 'n/a'
+
+ - name: check if the timezone is actually set (over broken symlink)
+ command: cmp /etc/localtime /usr/share/zoneinfo/Europe/Belgrade
+ changed_when: no
+
+
+ ##
+ ## test with /etc/localtime set manually using copy
+ ##
+
+ - name: set timezone manually by coping zone info file to /etc/localtime
+ copy:
+ src: /usr/share/zoneinfo/Etc/UTC
+ dest: /etc/localtime
+ remote_src: yes
+
+ - name: set timezone to Europe/Belgrade (over copied file)
+ timezone:
+ name: Europe/Belgrade
+ register: timezone_copied
+
+ - name: check if timezone set (over copied file)
+ assert:
+ that:
+ - timezone_copied.changed
+ - timezone_copied.diff.after.name == 'Europe/Belgrade'
+ - timezone_copied.diff.before.name == 'n/a'
+
+ - name: check if the timezone is actually set (over copied file)
+ command: cmp /etc/localtime /usr/share/zoneinfo/Europe/Belgrade
+ changed_when: no
+ when:
+ - ansible_service_mgr != 'systemd'
+ - timezone_config_file is defined
+
+
+####
+#### hwclock tests
+####
+
+- name: check if hwclock is supported in the environment
+ command: hwclock --test
+ register: hwclock_test
+ ignore_errors: yes
+
+- name: check if timedatectl works in the environment
+ command: timedatectl
+ register: timedatectl_test
+ ignore_errors: yes
+
+- name:
+ set_fact:
+ hwclock_supported: '{{ hwclock_test is successful or timedatectl_test is successful }}'
+##
+## test set hwclock, idempotency and checkmode
+##
+
+- block:
+ - name: set hwclock to local
+ timezone:
+ hwclock: local
+
+ - name: set hwclock to UTC (checkmode)
+ timezone:
+ hwclock: UTC
+ check_mode: yes
+ register: hwclock_set_checkmode
+
+ - name: ensure hwclock reported as changed (checkmode)
+ assert:
+ that:
+ - hwclock_set_checkmode.changed
+ - hwclock_set_checkmode.diff.after.hwclock == 'UTC'
+ - hwclock_set_checkmode.diff.before.hwclock == 'local'
+
+ - block:
+ - name: ensure that checkmode didn't update hwclock in /etc/adjtime
+ command: grep ^UTC /etc/adjtime
+ register: result
+ failed_when: result is not failed
+
+ - name: ensure that checkmode didn't update hwclock the config file
+ command: grep ^UTC=no {{ hwclock_config_file }}
+ when: ansible_service_mgr != 'systemd'
+
+ - name: set hwclock to UTC
+ timezone:
+ hwclock: UTC
+ register: hwclock_set
+
+ - name: ensure hwclock changed
+ assert:
+ that:
+ - hwclock_set.changed
+ - hwclock_set.diff.after.hwclock == 'UTC'
+ - hwclock_set.diff.before.hwclock == 'local'
+
+ - block:
+ - name: ensure that hwclock is updated in /etc/adjtime
+ command: grep ^UTC /etc/adjtime
+
+ - name: ensure that hwclock is updated in the config file
+ command: grep ^UTC=yes {{ hwclock_config_file }}
+ when: ansible_service_mgr != 'systemd'
+
+ - name: set hwclock to RTC again
+ timezone:
+ hwclock: UTC
+ register: hwclock_again
+
+ - name: set hwclock idempotency
+ assert:
+ that:
+ - not hwclock_again.changed
+
+ - name: set hwclock to RTC again (checkmode)
+ timezone:
+ hwclock: UTC
+ check_mode: yes
+ register: hwclock_again_checkmode
+
+ - name: set hwclock idempotency (checkmode)
+ assert:
+ that:
+ - not hwclock_again_checkmode.changed
+
+
+ ##
+ ## no systemd tests for hwclock
+ ##
+
+ - block:
+ ##
+ ## test set hwclock with both /etc/adjtime and conf file deleted
+ ##
+
+ - name: remove /etc/adjtime and conf file
+ file:
+ path: '{{ item }}'
+ state: absent
+ with_items:
+ - /etc/adjtime
+ - '{{ hwclock_config_file }}'
+
+ - name: set hwclock to UTC with deleted /etc/adjtime and conf file
+ timezone:
+ hwclock: UTC
+ register: hwclock_set_utc_deleted_adjtime_and_conf
+
+ - name: ensure hwclock changed with deleted /etc/adjtime and conf
+ assert:
+ that:
+ - hwclock_set_utc_deleted_adjtime_and_conf.changed
+ - hwclock_set_utc_deleted_adjtime_and_conf.diff.after.hwclock == 'UTC'
+ - hwclock_set_utc_deleted_adjtime_and_conf.diff.before.hwclock == 'n/a'
+
+
+ ##
+ ## test set hwclock with /etc/adjtime deleted
+ ##
+
+ - name: remove /etc/adjtime
+ file:
+ path: '{{ item }}'
+ state: absent
+ with_items:
+ - /etc/adjtime
+
+ - name: set hwclock to UTC with deleted /etc/adjtime
+ timezone:
+ hwclock: UTC
+ register: hwclock_set_utc_deleted_adjtime_utc
+
+ - name: ensure hwclock changed with deleted /etc/adjtime
+ assert:
+ that:
+ - not hwclock_set_utc_deleted_adjtime_utc.changed
+ - hwclock_set_utc_deleted_adjtime_utc.diff.after.hwclock == 'UTC'
+ - hwclock_set_utc_deleted_adjtime_utc.diff.before.hwclock == 'UTC'
+
+ - name: set hwclock to LOCAL with deleted /etc/adjtime
+ timezone:
+ hwclock: local
+ register: hwclock_set_local_deleted_adjtime_local
+
+ - name: ensure hwclock changed to LOCAL with deleted /etc/adjtime
+ assert:
+ that:
+ - hwclock_set_local_deleted_adjtime_local.changed
+ - hwclock_set_local_deleted_adjtime_local.diff.after.hwclock == 'local'
+ - hwclock_set_local_deleted_adjtime_local.diff.before.hwclock == 'UTC'
+
+
+ ##
+ ## test set hwclock with conf file deleted
+ ##
+
+ - name: remove conf file
+ file:
+ path: '{{ item }}'
+ state: absent
+ with_items:
+ - '{{ hwclock_config_file }}'
+
+ - name: set hwclock to UTC with deleted conf
+ timezone:
+ hwclock: UTC
+ register: hwclock_set_utc_deleted_conf
+
+ - name: ensure hwclock changed with deleted /etc/adjtime
+ assert:
+ that:
+ - hwclock_set_utc_deleted_conf.changed
+ - hwclock_set_utc_deleted_conf.diff.after.hwclock == 'UTC'
+ - hwclock_set_utc_deleted_conf.diff.before.hwclock == 'n/a'
+
+
+ ##
+ ## test set hwclock with /etc/adjtime missing UTC/LOCAL strings
+ ##
+
+ - name: create /etc/adjtime without UTC/LOCAL
+ copy:
+ content: '0.0 0 0\n0'
+ dest: /etc/adjtime
+
+ - name: set hwclock to UTC with broken /etc/adjtime
+ timezone:
+ hwclock: UTC
+ register: hwclock_set_utc_broken_adjtime
+
+ - name: ensure hwclock doesn't report changed with broken /etc/adjtime
+ assert:
+ that:
+ - not hwclock_set_utc_broken_adjtime.changed
+ - hwclock_set_utc_broken_adjtime.diff.after.hwclock == 'UTC'
+ - hwclock_set_utc_broken_adjtime.diff.before.hwclock == 'UTC'
+
+ - name: set hwclock to LOCAL with broken /etc/adjtime
+ timezone:
+ hwclock: local
+ register: hwclock_set_local_broken_adjtime
+
+ - name: ensure hwclock changed to LOCAL with broken /etc/adjtime
+ assert:
+ that:
+ - hwclock_set_local_broken_adjtime.changed
+ - hwclock_set_local_broken_adjtime.diff.after.hwclock == 'local'
+ - hwclock_set_local_broken_adjtime.diff.before.hwclock == 'UTC'
+ when:
+ - ansible_service_mgr != 'systemd'
+ - hwclock_config_file is defined
+
+ ####
+ #### timezone + hwclock tests
+ ####
+
+ ##
+ ## test set timezone and hwclock, idempotency and checkmode
+ ##
+
+ - name: set timezone to Etc/UTC and hwclock to local
+ timezone:
+ name: Etc/UTC
+ hwclock: local
+
+ - name: set timezone to Europe/Belgrade and hwclock to UTC (checkmode)
+ timezone:
+ name: Europe/Belgrade
+ hwclock: UTC
+ check_mode: yes
+ register: tzclock_set_checkmode
+
+ - name: ensure timezone and hwclock reported as changed in checkmode
+ assert:
+ that:
+ - tzclock_set_checkmode.changed
+ - tzclock_set_checkmode.diff.after.name == 'Europe/Belgrade'
+ - tzclock_set_checkmode.diff.before.name == 'Etc/UTC'
+ - tzclock_set_checkmode.diff.after.hwclock == 'UTC'
+ - tzclock_set_checkmode.diff.before.hwclock == 'local'
+
+ - name: ensure checkmode didn't change the timezone
+ command: cmp /etc/localtime /usr/share/zoneinfo/Australia/Brisbane
+ register: result
+ failed_when: result is not failed
+ changed_when: no
+
+ - block:
+ - name: ensure that checkmode didn't update the timezone in the config file
+ command: egrep '^(TIME)?ZONE="Etc/UTC"' {{ timezone_config_file }}
+ when:
+ - ansible_os_family == 'RedHat'
+
+ - name: ensure that checkmode didn't update the timezone in the config file
+ command: egrep '^Etc/UTC' {{ timezone_config_file }}
+ when:
+ - ansible_os_family == 'Debian'
+
+ - name: ensure that checkmode didn't update hwclock in /etc/adjtime
+ command: grep ^UTC /etc/adjtime
+ register: result
+ failed_when: result is not failed
+
+ - name: ensure that checkmode didn't update hwclock the config file
+ command: grep ^UTC=no {{ hwclock_config_file }}
+ when: ansible_service_mgr != 'systemd'
+
+ - name: set timezone to Europe/Belgrade and hwclock to UTC
+ timezone:
+ name: Europe/Belgrade
+ hwclock: UTC
+ register: tzclock_set
+
+ - name: ensure timezone and hwclock changed
+ assert:
+ that:
+ - tzclock_set.changed
+ - tzclock_set.diff.after.name == 'Europe/Belgrade'
+ - tzclock_set.diff.before.name == 'Etc/UTC'
+ - tzclock_set.diff.after.hwclock == 'UTC'
+ - tzclock_set.diff.before.hwclock == 'local'
+
+ - name: ensure that the timezone is actually set
+ command: cmp /etc/localtime /usr/share/zoneinfo/Europe/Belgrade
+ changed_when: no
+
+ - block:
+ - name: ensure that the timezone is updated in the config file
+ command: egrep '^(TIME)?ZONE="Europe/Belgrade"' {{ timezone_config_file }}
+ when:
+ - ansible_os_family == 'RedHat'
+
+ - name: ensure that the timezone is updated in the config file
+ command: egrep 'Europe/Belgrade' {{ timezone_config_file }}
+ when:
+ - ansible_os_family == 'Debian'
+
+ - name: ensure that hwclock is updated in /etc/adjtime
+ command: grep ^UTC /etc/adjtime
+
+ - name: ensure that hwclock is updated in the config file
+ command: grep ^UTC=yes {{ hwclock_config_file }}
+ when: ansible_service_mgr != 'systemd'
+
+ - name: set timezone to Europe/Belgrade and hwclock to UTC again
+ timezone:
+ name: Europe/Belgrade
+ hwclock: UTC
+ register: tzclock_set_again
+
+ - name: set timezone and hwclock idempotency
+ assert:
+ that:
+ - not tzclock_set_again.changed
+
+ - name: set timezone to Europe/Belgrade and hwclock to UTC again (checkmode)
+ timezone:
+ name: Europe/Belgrade
+ hwclock: UTC
+ register: tzclock_set_again_checkmode
+
+ - name: set timezone and hwclock idempotency in checkmode
+ assert:
+ that:
+ - not tzclock_set_again_checkmode.changed
+
+ when:
+ - ansible_system == 'Linux'
+ - hwclock_supported
diff --git a/test/integration/targets/incidental_vyos_config/aliases b/test/integration/targets/incidental_vyos_config/aliases
new file mode 100644
index 00000000..fae06ba0
--- /dev/null
+++ b/test/integration/targets/incidental_vyos_config/aliases
@@ -0,0 +1,2 @@
+shippable/vyos/incidental
+network/vyos
diff --git a/test/integration/targets/incidental_vyos_config/defaults/main.yaml b/test/integration/targets/incidental_vyos_config/defaults/main.yaml
new file mode 100644
index 00000000..9ef5ba51
--- /dev/null
+++ b/test/integration/targets/incidental_vyos_config/defaults/main.yaml
@@ -0,0 +1,3 @@
+---
+testcase: "*"
+test_items: []
diff --git a/test/integration/targets/incidental_vyos_config/tasks/cli.yaml b/test/integration/targets/incidental_vyos_config/tasks/cli.yaml
new file mode 100644
index 00000000..22a71d96
--- /dev/null
+++ b/test/integration/targets/incidental_vyos_config/tasks/cli.yaml
@@ -0,0 +1,22 @@
+---
+- name: collect all cli test cases
+ find:
+ paths: "{{ role_path }}/tests/cli"
+ patterns: "{{ testcase }}.yaml"
+ register: test_cases
+ delegate_to: localhost
+
+- name: set test_items
+ set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
+
+- name: run test case (connection=ansible.netcommon.network_cli)
+ include: "{{ test_case_to_run }} ansible_connection=ansible.netcommon.network_cli"
+ with_items: "{{ test_items }}"
+ loop_control:
+ loop_var: test_case_to_run
+
+- name: run test case (connection=local)
+ include: "{{ test_case_to_run }} ansible_connection=local"
+ with_first_found: "{{ test_items }}"
+ loop_control:
+ loop_var: test_case_to_run
diff --git a/test/integration/targets/incidental_vyos_config/tasks/cli_config.yaml b/test/integration/targets/incidental_vyos_config/tasks/cli_config.yaml
new file mode 100644
index 00000000..8ed28748
--- /dev/null
+++ b/test/integration/targets/incidental_vyos_config/tasks/cli_config.yaml
@@ -0,0 +1,16 @@
+---
+- name: collect all cli_config test cases
+ find:
+ paths: "{{ role_path }}/tests/cli_config"
+ patterns: "{{ testcase }}.yaml"
+ register: test_cases
+ delegate_to: localhost
+
+- name: set test_items
+ set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
+
+- name: run test case (connection=ansible.netcommon.network_cli)
+ include: "{{ test_case_to_run }} ansible_connection=ansible.netcommon.network_cli"
+ with_items: "{{ test_items }}"
+ loop_control:
+ loop_var: test_case_to_run
diff --git a/test/integration/targets/incidental_vyos_config/tasks/main.yaml b/test/integration/targets/incidental_vyos_config/tasks/main.yaml
new file mode 100644
index 00000000..13977a44
--- /dev/null
+++ b/test/integration/targets/incidental_vyos_config/tasks/main.yaml
@@ -0,0 +1,3 @@
+---
+- {include: cli.yaml, tags: ['cli']}
+- {include: cli_config.yaml, tags: ['cli_config']}
diff --git a/test/integration/targets/incidental_vyos_config/tests/cli/backup.yaml b/test/integration/targets/incidental_vyos_config/tests/cli/backup.yaml
new file mode 100644
index 00000000..af6a772f
--- /dev/null
+++ b/test/integration/targets/incidental_vyos_config/tests/cli/backup.yaml
@@ -0,0 +1,113 @@
+---
+- debug: msg="START vyos/backup.yaml on connection={{ ansible_connection }}"
+
+- name: collect any backup files
+ find:
+ paths: "{{ role_path }}/backup"
+ pattern: "{{ inventory_hostname_short }}_config*"
+ register: backup_files
+ connection: local
+
+- name: delete backup files
+ file:
+ path: "{{ item.path }}"
+ state: absent
+ with_items: "{{backup_files.files|default([])}}"
+
+- name: take configure backup
+ vyos.vyos.vyos_config:
+ backup: true
+ register: result
+
+- assert:
+ that:
+ - "result.changed == true"
+
+- name: collect any backup files
+ find:
+ paths: "{{ role_path }}/backup"
+ pattern: "{{ inventory_hostname_short }}_config*"
+ register: backup_files
+ connection: local
+
+- assert:
+ that:
+ - "backup_files.files is defined"
+
+- name: delete configurable backup file path
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - "{{ role_path }}/backup_test_dir/"
+ - "{{ role_path }}/backup/backup.cfg"
+
+- name: take configuration backup in custom filename and directory path
+ vyos.vyos.vyos_config:
+ backup: true
+ backup_options:
+ filename: backup.cfg
+ dir_path: "{{ role_path }}/backup_test_dir/{{ inventory_hostname_short }}"
+ become: true
+ register: result
+
+- assert:
+ that:
+ - "result.changed == true"
+
+- name: check if the backup file-1 exist
+ find:
+ paths: "{{ role_path }}/backup_test_dir/{{ inventory_hostname_short }}/backup.cfg"
+ register: backup_file
+ connection: local
+
+- assert:
+ that:
+ - "backup_file.files is defined"
+
+- name: take configuration backup in custom filename
+ vyos.vyos.vyos_config:
+ backup: true
+ backup_options:
+ filename: backup.cfg
+ become: true
+ register: result
+
+- assert:
+ that:
+ - "result.changed == true"
+
+- name: check if the backup file-2 exist
+ find:
+ paths: "{{ role_path }}/backup/backup.cfg"
+ register: backup_file
+ connection: local
+
+- assert:
+ that:
+ - "backup_file.files is defined"
+
+- name: take configuration backup in custom path and default filename
+ vyos.vyos.vyos_config:
+ backup: true
+ backup_options:
+ dir_path: "{{ role_path }}/backup_test_dir/{{ inventory_hostname_short }}"
+ become: true
+ register: result
+
+- assert:
+ that:
+ - "result.changed == true"
+
+- name: check if the backup file-3 exist
+ find:
+ paths: "{{ role_path }}/backup_test_dir/{{ inventory_hostname_short }}"
+ pattern: "{{ inventory_hostname_short }}_config*"
+ register: backup_file
+ connection: local
+
+- assert:
+ that:
+ - "backup_file.files is defined"
+
+- debug: msg="END vyos/backup.yaml on connection={{ ansible_connection }}"
diff --git a/test/integration/targets/incidental_vyos_config/tests/cli/check_config.yaml b/test/integration/targets/incidental_vyos_config/tests/cli/check_config.yaml
new file mode 100644
index 00000000..f1ddc71b
--- /dev/null
+++ b/test/integration/targets/incidental_vyos_config/tests/cli/check_config.yaml
@@ -0,0 +1,63 @@
+---
+- debug: msg="START cli/config_check.yaml on connection={{ ansible_connection }}"
+
+- name: setup- ensure interface is not present
+ vyos.vyos.vyos_config:
+ lines: delete interfaces loopback lo
+
+- name: setup- create interface
+ vyos.vyos.vyos_config:
+ lines:
+ - interfaces
+ - interfaces loopback lo
+ - interfaces loopback lo description test
+ register: result
+
+# note collapsing the duplicate lines doesn't work if
+# lines:
+# - interfaces loopback lo description test
+# - interfaces loopback lo
+# - interfaces
+
+- name: Check that multiple duplicate lines collapse into a single commands
+ assert:
+ that:
+ - "{{ result.commands|length }} == 1"
+
+- name: Check that set is correctly prepended
+ assert:
+ that:
+ - "result.commands[0] == 'set interfaces loopback lo description test'"
+
+- name: configure config_check config command
+ vyos.vyos.vyos_config:
+ lines: delete interfaces loopback lo
+ register: result
+
+- assert:
+ that:
+ - "result.changed == true"
+
+- name: check config_check config command idempontent
+ vyos.vyos.vyos_config:
+ lines: delete interfaces loopback lo
+ register: result
+
+- assert:
+ that:
+ - "result.changed == false"
+
+- name: check multiple line config filter is working
+ vyos.vyos.vyos_config:
+ lines:
+ - set system login user esa level admin
+ - set system login user esa authentication encrypted-password '!abc!'
+ - set system login user vyos level admin
+ - set system login user vyos authentication encrypted-password 'abc'
+ register: result
+
+- assert:
+ that:
+ - "{{ result.filtered|length }} == 2"
+
+- debug: msg="END cli/config_check.yaml on connection={{ ansible_connection }}"
diff --git a/test/integration/targets/incidental_vyos_config/tests/cli/comment.yaml b/test/integration/targets/incidental_vyos_config/tests/cli/comment.yaml
new file mode 100644
index 00000000..2cd13509
--- /dev/null
+++ b/test/integration/targets/incidental_vyos_config/tests/cli/comment.yaml
@@ -0,0 +1,34 @@
+---
+- debug: msg="START cli/comment.yaml on connection={{ ansible_connection }}"
+
+- name: setup
+ vyos.vyos.vyos_config:
+ lines: set system host-name {{ inventory_hostname_short }}
+ match: none
+
+- name: configure using comment
+ vyos.vyos.vyos_config:
+ lines: set system host-name foo
+ comment: this is a test
+ register: result
+
+- assert:
+ that:
+ - "result.changed == true"
+ - "'set system host-name foo' in result.commands"
+
+- name: collect system commits
+ vyos.vyos.vyos_command:
+ commands: show system commit
+ register: result
+
+- assert:
+ that:
+ - "'this is a test' in result.stdout_lines[0][1]"
+
+- name: teardown
+ vyos.vyos.vyos_config:
+ lines: set system host-name {{ inventory_hostname_short }}
+ match: none
+
+- debug: msg="END cli/comment.yaml on connection={{ ansible_connection }}"
diff --git a/test/integration/targets/incidental_vyos_config/tests/cli/config.cfg b/test/integration/targets/incidental_vyos_config/tests/cli/config.cfg
new file mode 100644
index 00000000..36c98f19
--- /dev/null
+++ b/test/integration/targets/incidental_vyos_config/tests/cli/config.cfg
@@ -0,0 +1,3 @@
+ set service lldp
+ set protocols static
+
diff --git a/test/integration/targets/incidental_vyos_config/tests/cli/save.yaml b/test/integration/targets/incidental_vyos_config/tests/cli/save.yaml
new file mode 100644
index 00000000..d8e45e25
--- /dev/null
+++ b/test/integration/targets/incidental_vyos_config/tests/cli/save.yaml
@@ -0,0 +1,54 @@
+---
+- debug: msg="START cli/save.yaml on connection={{ ansible_connection }}"
+
+- name: setup
+ vyos.vyos.vyos_config:
+ lines: set system host-name {{ inventory_hostname_short }}
+ match: none
+
+- name: configure hostaname and save
+ vyos.vyos.vyos_config:
+ lines: set system host-name foo
+ save: true
+ register: result
+
+- assert:
+ that:
+ - "result.changed == true"
+ - "'set system host-name foo' in result.commands"
+
+- name: configure hostaname and don't save
+ vyos.vyos.vyos_config:
+ lines: set system host-name bar
+ register: result
+
+- assert:
+ that:
+ - "result.changed == true"
+ - "'set system host-name bar' in result.commands"
+
+- name: save config
+ vyos.vyos.vyos_config:
+ save: true
+ register: result
+
+- assert:
+ that:
+ - "result.changed == true"
+
+- name: save config again
+ vyos.vyos.vyos_config:
+ save: true
+ register: result
+
+- assert:
+ that:
+ - "result.changed == false"
+
+- name: teardown
+ vyos.vyos.vyos_config:
+ lines: set system host-name {{ inventory_hostname_short }}
+ match: none
+ save: true
+
+- debug: msg="END cli/simple.yaml on connection={{ ansible_connection }}"
diff --git a/test/integration/targets/incidental_vyos_config/tests/cli/simple.yaml b/test/integration/targets/incidental_vyos_config/tests/cli/simple.yaml
new file mode 100644
index 00000000..c0826737
--- /dev/null
+++ b/test/integration/targets/incidental_vyos_config/tests/cli/simple.yaml
@@ -0,0 +1,53 @@
+---
+- debug: msg="START cli/simple.yaml on connection={{ ansible_connection }}"
+
+- name: setup
+ vyos.vyos.vyos_config:
+ lines: set system host-name {{ inventory_hostname_short }}
+ match: none
+
+- name: configure simple config command
+ vyos.vyos.vyos_config:
+ lines: set system host-name foo
+ register: result
+
+- assert:
+ that:
+ - "result.changed == true"
+ - "'set system host-name foo' in result.commands"
+
+- name: check simple config command idempontent
+ vyos.vyos.vyos_config:
+ lines: set system host-name foo
+ register: result
+
+- assert:
+ that:
+ - "result.changed == false"
+
+- name: Delete services
+ vyos.vyos.vyos_config: &del
+ lines:
+ - delete service lldp
+ - delete protocols static
+
+- name: Configuring when commands starts with whitespaces
+ vyos.vyos.vyos_config:
+ src: "{{ role_path }}/tests/cli/config.cfg"
+ register: result
+
+- assert:
+ that:
+ - "result.changed == true"
+ - '"set service lldp" in result.commands'
+ - '"set protocols static" in result.commands'
+
+- name: Delete services
+ vyos.vyos.vyos_config: *del
+
+- name: teardown
+ vyos.vyos.vyos_config:
+ lines: set system host-name {{ inventory_hostname_short }}
+ match: none
+
+- debug: msg="END cli/simple.yaml on connection={{ ansible_connection }}"
diff --git a/test/integration/targets/incidental_vyos_config/tests/cli_config/cli_backup.yaml b/test/integration/targets/incidental_vyos_config/tests/cli_config/cli_backup.yaml
new file mode 100644
index 00000000..744bb7ea
--- /dev/null
+++ b/test/integration/targets/incidental_vyos_config/tests/cli_config/cli_backup.yaml
@@ -0,0 +1,114 @@
+---
+- debug: msg="END cli_config/backup.yaml on connection={{ ansible_connection }}"
+
+- name: delete configurable backup file path
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - "{{ role_path }}/backup_test_dir/"
+ - "{{ role_path }}/backup/backup.cfg"
+
+- name: collect any backup files
+ find:
+ paths: "{{ role_path }}/backup"
+ pattern: "{{ inventory_hostname_short }}_config*"
+ register: backup_files
+ connection: local
+
+- name: delete backup files
+ file:
+ path: "{{ item.path }}"
+ state: absent
+ with_items: "{{backup_files.files|default([])}}"
+
+- name: take config backup
+ ansible.netcommon.cli_config:
+ backup: true
+ become: true
+ register: result
+
+- assert:
+ that:
+ - "result.changed == true"
+
+- name: collect any backup files
+ find:
+ paths: "{{ role_path }}/backup"
+ pattern: "{{ inventory_hostname_short }}_config*"
+ register: backup_files
+ connection: local
+
+- assert:
+ that:
+ - "backup_files.files is defined"
+
+- name: take configuration backup in custom filename and directory path
+ ansible.netcommon.cli_config:
+ backup: true
+ backup_options:
+ filename: backup.cfg
+ dir_path: "{{ role_path }}/backup_test_dir/{{ inventory_hostname_short }}"
+ become: true
+ register: result
+
+- assert:
+ that:
+ - "result.changed == true"
+
+- name: check if the backup file-1 exist
+ find:
+ paths: "{{ role_path }}/backup_test_dir/{{ inventory_hostname_short }}/backup.cfg"
+ register: backup_file
+ connection: local
+
+- assert:
+ that:
+ - "backup_file.files is defined"
+
+- name: take configuration backup in custom filename
+ ansible.netcommon.cli_config:
+ backup: true
+ backup_options:
+ filename: backup.cfg
+ become: true
+ register: result
+
+- assert:
+ that:
+ - "result.changed == true"
+
+- name: check if the backup file-2 exist
+ find:
+ paths: "{{ role_path }}/backup/backup.cfg"
+ register: backup_file
+ connection: local
+
+- assert:
+ that:
+ - "backup_file.files is defined"
+
+- name: take configuration backup in custom path and default filename
+ ansible.netcommon.cli_config:
+ backup: true
+ backup_options:
+ dir_path: "{{ role_path }}/backup_test_dir/{{ inventory_hostname_short }}"
+ become: true
+ register: result
+
+- assert:
+ that:
+ - "result.changed == true"
+
+- name: check if the backup file-3 exist
+ find:
+ paths: "{{ role_path }}/backup_test_dir/{{ inventory_hostname_short }}"
+ pattern: "{{ inventory_hostname_short }}_config*"
+ register: backup_file
+ connection: local
+
+- assert:
+ that:
+ - "backup_file.files is defined"
+
+- debug: msg="END cli_config/backup.yaml on connection={{ ansible_connection }}"
diff --git a/test/integration/targets/incidental_vyos_config/tests/cli_config/cli_basic.yaml b/test/integration/targets/incidental_vyos_config/tests/cli_config/cli_basic.yaml
new file mode 100644
index 00000000..c6c4f594
--- /dev/null
+++ b/test/integration/targets/incidental_vyos_config/tests/cli_config/cli_basic.yaml
@@ -0,0 +1,28 @@
+---
+- debug: msg="START cli_config/cli_basic.yaml on connection={{ ansible_connection }}"
+
+- name: setup - remove interface description
+ ansible.netcommon.cli_config: &rm
+ config: delete interfaces loopback lo description
+
+- name: configure device with config
+ ansible.netcommon.cli_config: &conf
+ config: set interfaces loopback lo description 'this is a test'
+ register: result
+
+- assert:
+ that:
+ - "result.changed == true"
+
+- name: Idempotence
+ ansible.netcommon.cli_config: *conf
+ register: result
+
+- assert:
+ that:
+ - "result.changed == false"
+
+- name: teardown
+ ansible.netcommon.cli_config: *rm
+
+- debug: msg="END cli_config/cli_basic.yaml on connection={{ ansible_connection }}"
diff --git a/test/integration/targets/incidental_vyos_config/tests/cli_config/cli_comment.yaml b/test/integration/targets/incidental_vyos_config/tests/cli_config/cli_comment.yaml
new file mode 100644
index 00000000..90ee1c86
--- /dev/null
+++ b/test/integration/targets/incidental_vyos_config/tests/cli_config/cli_comment.yaml
@@ -0,0 +1,30 @@
+---
+- debug: msg="START cli_config/cli_comment.yaml on connection={{ ansible_connection }}"
+
+- name: setup
+ ansible.netcommon.cli_config: &rm
+ config: set system host-name {{ inventory_hostname_short }}
+
+- name: configure using comment
+ ansible.netcommon.cli_config:
+ config: set system host-name foo
+ commit_comment: this is a test
+ register: result
+
+- assert:
+ that:
+ - "result.changed == true"
+
+- name: collect system commits
+ vyos.vyos.vyos_command:
+ commands: show system commit
+ register: result
+
+- assert:
+ that:
+ - "'this is a test' in result.stdout_lines[0][1]"
+
+- name: teardown
+ ansible.netcommon.cli_config: *rm
+
+- debug: msg="END cli_config/cli_comment.yaml on connection={{ ansible_connection }}"
diff --git a/test/integration/targets/incidental_vyos_lldp_interfaces/aliases b/test/integration/targets/incidental_vyos_lldp_interfaces/aliases
new file mode 100644
index 00000000..fae06ba0
--- /dev/null
+++ b/test/integration/targets/incidental_vyos_lldp_interfaces/aliases
@@ -0,0 +1,2 @@
+shippable/vyos/incidental
+network/vyos
diff --git a/test/integration/targets/incidental_vyos_lldp_interfaces/defaults/main.yaml b/test/integration/targets/incidental_vyos_lldp_interfaces/defaults/main.yaml
new file mode 100644
index 00000000..164afead
--- /dev/null
+++ b/test/integration/targets/incidental_vyos_lldp_interfaces/defaults/main.yaml
@@ -0,0 +1,3 @@
+---
+testcase: "[^_].*"
+test_items: []
diff --git a/test/integration/targets/incidental_vyos_lldp_interfaces/meta/main.yaml b/test/integration/targets/incidental_vyos_lldp_interfaces/meta/main.yaml
new file mode 100644
index 00000000..ee1fa013
--- /dev/null
+++ b/test/integration/targets/incidental_vyos_lldp_interfaces/meta/main.yaml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - incidental_vyos_prepare_tests
diff --git a/test/integration/targets/incidental_vyos_lldp_interfaces/tasks/cli.yaml b/test/integration/targets/incidental_vyos_lldp_interfaces/tasks/cli.yaml
new file mode 100644
index 00000000..83496e0e
--- /dev/null
+++ b/test/integration/targets/incidental_vyos_lldp_interfaces/tasks/cli.yaml
@@ -0,0 +1,19 @@
+---
+- name: Collect all cli test cases
+ find:
+ paths: "{{ role_path }}/tests/cli"
+ patterns: "{{ testcase }}.yaml"
+ use_regex: true
+ register: test_cases
+ delegate_to: localhost
+
+- name: Set test_items
+ set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
+
+- name: Run test case (connection=ansible.netcommon.network_cli)
+ include: "{{ test_case_to_run }}"
+ vars:
+ ansible_connection: ansible.netcommon.network_cli
+ with_items: "{{ test_items }}"
+ loop_control:
+ loop_var: test_case_to_run
diff --git a/test/integration/targets/incidental_vyos_lldp_interfaces/tasks/main.yaml b/test/integration/targets/incidental_vyos_lldp_interfaces/tasks/main.yaml
new file mode 100644
index 00000000..d4cf26fc
--- /dev/null
+++ b/test/integration/targets/incidental_vyos_lldp_interfaces/tasks/main.yaml
@@ -0,0 +1,2 @@
+---
+- {include: cli.yaml, tags: ['cli']}
diff --git a/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/_populate.yaml b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/_populate.yaml
new file mode 100644
index 00000000..3acded63
--- /dev/null
+++ b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/_populate.yaml
@@ -0,0 +1,14 @@
+---
+- name: Setup
+ ansible.netcommon.cli_config:
+ config: "{{ lines }}"
+ vars:
+ lines: |
+ set service lldp interface eth1
+ set service lldp interface eth1 location civic-based country-code US
+ set service lldp interface eth1 location civic-based ca-type 0 ca-value ENGLISH
+ set service lldp interface eth2
+ set service lldp interface eth2 location coordinate-based latitude 33.524449N
+ set service lldp interface eth2 location coordinate-based altitude 2200
+ set service lldp interface eth2 location coordinate-based datum WGS84
+ set service lldp interface eth2 location coordinate-based longitude 222.267255W
diff --git a/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/_populate_intf.yaml b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/_populate_intf.yaml
new file mode 100644
index 00000000..c7ab1ae7
--- /dev/null
+++ b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/_populate_intf.yaml
@@ -0,0 +1,10 @@
+---
+- name: Setup
+ ansible.netcommon.cli_config:
+ config: "{{ lines }}"
+ vars:
+ lines: |
+ set service lldp interface eth2
+ set service lldp interface eth2 location civic-based country-code US
+ set service lldp interface eth2 location civic-based ca-type 0 ca-value ENGLISH
+ set service lldp interface eth2 disable
diff --git a/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/_remove_config.yaml b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/_remove_config.yaml
new file mode 100644
index 00000000..1b1a3b33
--- /dev/null
+++ b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/_remove_config.yaml
@@ -0,0 +1,8 @@
+---
+- name: Remove Config
+ ansible.netcommon.cli_config:
+ config: "{{ lines }}"
+ vars:
+ lines: |
+ delete service lldp interface
+ delete service lldp
diff --git a/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/deleted.yaml b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/deleted.yaml
new file mode 100644
index 00000000..7b2d53a3
--- /dev/null
+++ b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/deleted.yaml
@@ -0,0 +1,46 @@
+---
+- debug:
+ msg: "Start vyos_lldp_interfaces deleted integration tests ansible_connection={{ ansible_connection }}"
+
+- include_tasks: _populate.yaml
+
+- block:
+ - name: Delete attributes of given LLDP interfaces.
+ vyos.vyos.vyos_lldp_interfaces: &deleted
+ config:
+ - name: 'eth1'
+ - name: 'eth2'
+ state: deleted
+ register: result
+
+ - name: Assert that the before dicts were correctly generated
+ assert:
+ that:
+ - "{{ populate | symmetric_difference(result['before']) |length == 0 }}"
+
+ - name: Assert that the correct set of commands were generated
+ assert:
+ that:
+ - "{{ deleted['commands'] | symmetric_difference(result['commands']) |length == 0 }}"
+
+ - name: Assert that the after dicts were correctly generated
+ assert:
+ that:
+ - "{{ deleted['after'] | symmetric_difference(result['after']) |length == 0 }}"
+
+ - name: Delete attributes of given interfaces (IDEMPOTENT)
+ vyos.vyos.vyos_lldp_interfaces: *deleted
+ register: result
+
+ - name: Assert that the previous task was idempotent
+ assert:
+ that:
+ - "result.changed == false"
+ - "result.commands|length == 0"
+
+ - name: Assert that the before dicts were correctly generated
+ assert:
+ that:
+ - "{{ deleted['after'] | symmetric_difference(result['before']) |length == 0 }}"
+ always:
+ - include_tasks: _remove_config.yaml
diff --git a/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/empty_config.yaml b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/empty_config.yaml
new file mode 100644
index 00000000..44c0b894
--- /dev/null
+++ b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/empty_config.yaml
@@ -0,0 +1,36 @@
+---
+- debug:
+ msg: "START vyos_lldp_interfaces empty_config integration tests on connection={{ ansible_connection }}"
+
+- name: Merged with empty config should give appropriate error message
+ vyos.vyos.vyos_lldp_interfaces:
+ config:
+ state: merged
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result.msg == 'value of config parameter must not be empty for state merged'
+
+- name: Replaced with empty config should give appropriate error message
+ vyos.vyos.vyos_lldp_interfaces:
+ config:
+ state: replaced
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result.msg == 'value of config parameter must not be empty for state replaced'
+
+- name: Overridden with empty config should give appropriate error message
+ vyos.vyos.vyos_lldp_interfaces:
+ config:
+ state: overridden
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result.msg == 'value of config parameter must not be empty for state overridden'
diff --git a/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/merged.yaml b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/merged.yaml
new file mode 100644
index 00000000..bf968b21
--- /dev/null
+++ b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/merged.yaml
@@ -0,0 +1,58 @@
+---
+- debug:
+ msg: "START vyos_lldp_interfaces merged integration tests on connection={{ ansible_connection }}"
+
+- include_tasks: _remove_config.yaml
+
+- block:
+ - name: Merge the provided configuration with the exisiting running configuration
+ vyos.vyos.vyos_lldp_interfaces: &merged
+ config:
+ - name: 'eth1'
+ location:
+ civic_based:
+ country_code: 'US'
+ ca_info:
+ - ca_type: 0
+ ca_value: 'ENGLISH'
+
+ - name: 'eth2'
+ location:
+ coordinate_based:
+ altitude: 2200
+ datum: 'WGS84'
+ longitude: '222.267255W'
+ latitude: '33.524449N'
+ state: merged
+ register: result
+
+ - name: Assert that before dicts were correctly generated
+ assert:
+ that: "{{ merged['before'] | symmetric_difference(result['before']) |length == 0 }}"
+
+ - name: Assert that correct set of commands were generated
+ assert:
+ that:
+ - "{{ merged['commands'] | symmetric_difference(result['commands']) |length == 0 }}"
+
+ - name: Assert that after dicts was correctly generated
+ assert:
+ that:
+ - "{{ merged['after'] | symmetric_difference(result['after']) |length == 0 }}"
+
+ - name: Merge the provided configuration with the existing running configuration (IDEMPOTENT)
+ vyos.vyos.vyos_lldp_interfaces: *merged
+ register: result
+
+ - name: Assert that the previous task was idempotent
+ assert:
+ that:
+ - "result['changed'] == false"
+
+ - name: Assert that before dicts were correctly generated
+ assert:
+ that:
+ - "{{ merged['after'] | symmetric_difference(result['before']) |length == 0 }}"
+
+ always:
+ - include_tasks: _remove_config.yaml
diff --git a/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/overridden.yaml b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/overridden.yaml
new file mode 100644
index 00000000..8cf038c9
--- /dev/null
+++ b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/overridden.yaml
@@ -0,0 +1,49 @@
+---
+- debug:
+ msg: "START vyos_lldp_interfaces overridden integration tests on connection={{ ansible_connection }}"
+
+- include_tasks: _remove_config.yaml
+
+- include_tasks: _populate_intf.yaml
+
+- block:
+ - name: Overrides all device configuration with provided configuration
+ vyos.vyos.vyos_lldp_interfaces: &overridden
+ config:
+ - name: 'eth2'
+ location:
+ elin: '0000000911'
+ state: overridden
+ register: result
+
+ - name: Assert that before dicts were correctly generated
+ assert:
+ that:
+ - "{{ populate_intf | symmetric_difference(result['before']) |length == 0 }}"
+
+ - name: Assert that correct commands were generated
+ assert:
+ that:
+ - "{{ overridden['commands'] | symmetric_difference(result['commands']) |length == 0 }}"
+
+ - name: Assert that after dicts were correctly generated
+ assert:
+ that:
+ - "{{ overridden['after'] | symmetric_difference(result['after']) |length == 0 }}"
+
+ - name: Overrides all device configuration with provided configurations (IDEMPOTENT)
+ vyos.vyos.vyos_lldp_interfaces: *overridden
+ register: result
+
+ - name: Assert that the previous task was idempotent
+ assert:
+ that:
+ - "result['changed'] == false"
+
+ - name: Assert that before dicts were correctly generated
+ assert:
+ that:
+ - "{{ overridden['after'] | symmetric_difference(result['before']) |length == 0 }}"
+
+ always:
+ - include_tasks: _remove_config.yaml
diff --git a/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/replaced.yaml b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/replaced.yaml
new file mode 100644
index 00000000..17acf065
--- /dev/null
+++ b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/replaced.yaml
@@ -0,0 +1,63 @@
+---
+- debug:
+ msg: "START vyos_lldp_interfaces replaced integration tests on connection={{ ansible_connection }}"
+
+- include_tasks: _remove_config.yaml
+
+- include_tasks: _populate.yaml
+
+- block:
+ - name: Replace device configurations of listed LLDP interfaces with provided configurations
+ vyos.vyos.vyos_lldp_interfaces: &replaced
+ config:
+ - name: 'eth2'
+ enable: false
+ location:
+ civic_based:
+ country_code: 'US'
+ ca_info:
+ - ca_type: 0
+ ca_value: 'ENGLISH'
+
+ - name: 'eth1'
+ enable: false
+ location:
+ coordinate_based:
+ altitude: 2200
+ datum: 'WGS84'
+ longitude: '222.267255W'
+ latitude: '33.524449N'
+ state: replaced
+ register: result
+
+ - name: Assert that correct set of commands were generated
+ assert:
+ that:
+ - "{{ replaced['commands'] | symmetric_difference(result['commands']) |length == 0 }}"
+
+ - name: Assert that before dicts are correctly generated
+ assert:
+ that:
+ - "{{ populate | symmetric_difference(result['before']) |length == 0 }}"
+
+ - name: Assert that after dict is correctly generated
+ assert:
+ that:
+ - "{{ replaced['after'] | symmetric_difference(result['after']) |length == 0 }}"
+
+ - name: Replace device configurations of listed LLDP interfaces with provided configurarions (IDEMPOTENT)
+ vyos.vyos.vyos_lldp_interfaces: *replaced
+ register: result
+
+ - name: Assert that task was idempotent
+ assert:
+ that:
+ - "result['changed'] == false"
+
+ - name: Assert that before dict is correctly generated
+ assert:
+ that:
+ - "{{ replaced['after'] | symmetric_difference(result['before']) |length == 0 }}"
+
+ always:
+ - include_tasks: _remove_config.yaml
diff --git a/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/rtt.yaml b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/rtt.yaml
new file mode 100644
index 00000000..4d4cf82c
--- /dev/null
+++ b/test/integration/targets/incidental_vyos_lldp_interfaces/tests/cli/rtt.yaml
@@ -0,0 +1,57 @@
+---
+- debug:
+ msg: "START vyos_lldp_interfaces round trip integration tests on connection={{ ansible_connection }}"
+
+- include_tasks: _remove_config.yaml
+
+- block:
+ - name: Apply the provided configuration (base config)
+ vyos.vyos.vyos_lldp_interfaces:
+ config:
+ - name: 'eth1'
+ location:
+ civic_based:
+ country_code: 'US'
+ ca_info:
+ - ca_type: 0
+ ca_value: 'ENGLISH'
+
+ state: merged
+ register: base_config
+
+ - name: Gather lldp_interfaces facts
+ vyos.vyos.vyos_facts:
+ gather_subset:
+ - default
+ gather_network_resources:
+ - lldp_interfaces
+
+ - name: Apply the provided configuration (config to be reverted)
+ vyos.vyos.vyos_lldp_interfaces:
+ config:
+ - name: 'eth2'
+ location:
+ coordinate_based:
+ altitude: 2200
+ datum: 'WGS84'
+ longitude: '222.267255W'
+ latitude: '33.524449N'
+ state: merged
+ register: result
+
+ - name: Assert that changes were applied
+ assert:
+ that: "{{ round_trip['after'] | symmetric_difference(result['after']) |length == 0 }}"
+
+ - name: Revert back to base config using facts round trip
+ vyos.vyos.vyos_lldp_interfaces:
+ config: "{{ ansible_facts['network_resources']['lldp_interfaces'] }}"
+ state: overridden
+ register: revert
+
+ - name: Assert that config was reverted
+ assert:
+ that: "{{ base_config['after'] | symmetric_difference(revert['after']) |length == 0 }}"
+
+ always:
+ - include_tasks: _remove_config.yaml
diff --git a/test/integration/targets/incidental_vyos_lldp_interfaces/vars/main.yaml b/test/integration/targets/incidental_vyos_lldp_interfaces/vars/main.yaml
new file mode 100644
index 00000000..169b0d5d
--- /dev/null
+++ b/test/integration/targets/incidental_vyos_lldp_interfaces/vars/main.yaml
@@ -0,0 +1,130 @@
+---
+merged:
+ before: []
+
+
+ commands:
+ - "set service lldp interface eth1 location civic-based country-code 'US'"
+ - "set service lldp interface eth1 location civic-based ca-type 0 ca-value 'ENGLISH'"
+ - "set service lldp interface eth1"
+ - "set service lldp interface eth2 location coordinate-based latitude '33.524449N'"
+ - "set service lldp interface eth2 location coordinate-based altitude '2200'"
+ - "set service lldp interface eth2 location coordinate-based datum 'WGS84'"
+ - "set service lldp interface eth2 location coordinate-based longitude '222.267255W'"
+ - "set service lldp interface eth2 location coordinate-based latitude '33.524449N'"
+ - "set service lldp interface eth2 location coordinate-based altitude '2200'"
+ - "set service lldp interface eth2 location coordinate-based datum 'WGS84'"
+ - "set service lldp interface eth2 location coordinate-based longitude '222.267255W'"
+ - "set service lldp interface eth2"
+
+ after:
+ - name: 'eth1'
+ location:
+ civic_based:
+ country_code: 'US'
+ ca_info:
+ - ca_type: 0
+ ca_value: 'ENGLISH'
+
+ - name: 'eth2'
+ location:
+ coordinate_based:
+ altitude: 2200
+ datum: 'WGS84'
+ longitude: '222.267255W'
+ latitude: '33.524449N'
+
+populate:
+ - name: 'eth1'
+ location:
+ civic_based:
+ country_code: 'US'
+ ca_info:
+ - ca_type: 0
+ ca_value: 'ENGLISH'
+
+ - name: 'eth2'
+ location:
+ coordinate_based:
+ altitude: 2200
+ datum: 'WGS84'
+ longitude: '222.267255W'
+ latitude: '33.524449N'
+
+replaced:
+ commands:
+ - "delete service lldp interface eth2 location"
+ - "set service lldp interface eth2 'disable'"
+ - "set service lldp interface eth2 location civic-based country-code 'US'"
+ - "set service lldp interface eth2 location civic-based ca-type 0 ca-value 'ENGLISH'"
+ - "delete service lldp interface eth1 location"
+ - "set service lldp interface eth1 'disable'"
+ - "set service lldp interface eth1 location coordinate-based latitude '33.524449N'"
+ - "set service lldp interface eth1 location coordinate-based altitude '2200'"
+ - "set service lldp interface eth1 location coordinate-based datum 'WGS84'"
+ - "set service lldp interface eth1 location coordinate-based longitude '222.267255W'"
+
+ after:
+ - name: 'eth2'
+ enable: false
+ location:
+ civic_based:
+ country_code: 'US'
+ ca_info:
+ - ca_type: 0
+ ca_value: 'ENGLISH'
+
+ - name: 'eth1'
+ enable: false
+ location:
+ coordinate_based:
+ altitude: 2200
+ datum: 'WGS84'
+ longitude: '222.267255W'
+ latitude: '33.524449N'
+
+populate_intf:
+ - name: 'eth2'
+ enable: false
+ location:
+ civic_based:
+ country_code: 'US'
+ ca_info:
+ - ca_type: 0
+ ca_value: 'ENGLISH'
+
+overridden:
+ commands:
+ - "delete service lldp interface eth2 location"
+ - "delete service lldp interface eth2 'disable'"
+ - "set service lldp interface eth2 location elin '0000000911'"
+
+ after:
+ - name: 'eth2'
+ location:
+ elin: 0000000911
+
+deleted:
+ commands:
+ - "delete service lldp interface eth1"
+ - "delete service lldp interface eth2"
+
+ after: []
+
+round_trip:
+ after:
+ - name: 'eth1'
+ location:
+ civic_based:
+ country_code: 'US'
+ ca_info:
+ - ca_type: 0
+ ca_value: 'ENGLISH'
+
+ - name: 'eth2'
+ location:
+ coordinate_based:
+ altitude: 2200
+ datum: 'WGS84'
+ longitude: '222.267255W'
+ latitude: '33.524449N'
diff --git a/test/integration/targets/incidental_vyos_prepare_tests/aliases b/test/integration/targets/incidental_vyos_prepare_tests/aliases
new file mode 100644
index 00000000..136c05e0
--- /dev/null
+++ b/test/integration/targets/incidental_vyos_prepare_tests/aliases
@@ -0,0 +1 @@
+hidden
diff --git a/test/integration/targets/incidental_vyos_prepare_tests/tasks/main.yaml b/test/integration/targets/incidental_vyos_prepare_tests/tasks/main.yaml
new file mode 100644
index 00000000..ac0b4922
--- /dev/null
+++ b/test/integration/targets/incidental_vyos_prepare_tests/tasks/main.yaml
@@ -0,0 +1,13 @@
+---
+- name: Ensure required interfaces are present in running-config
+ ansible.netcommon.cli_config:
+ config: "{{ lines }}"
+ vars:
+ lines: |
+ set interfaces ethernet eth0 address dhcp
+ set interfaces ethernet eth0 speed auto
+ set interfaces ethernet eth0 duplex auto
+ set interfaces ethernet eth1
+ set interfaces ethernet eth2
+ delete interfaces loopback lo
+ ignore_errors: true
diff --git a/test/integration/targets/incidental_win_copy/aliases b/test/integration/targets/incidental_win_copy/aliases
new file mode 100644
index 00000000..a5fc90dc
--- /dev/null
+++ b/test/integration/targets/incidental_win_copy/aliases
@@ -0,0 +1,2 @@
+shippable/windows/incidental
+windows
diff --git a/test/integration/targets/incidental_win_copy/defaults/main.yml b/test/integration/targets/incidental_win_copy/defaults/main.yml
new file mode 100644
index 00000000..5d8a1d23
--- /dev/null
+++ b/test/integration/targets/incidental_win_copy/defaults/main.yml
@@ -0,0 +1 @@
+test_win_copy_path: C:\ansible\win_copy .ÅÑŚÌβÅÈ [$!@^&test(;)]
diff --git a/test/integration/targets/incidental_win_copy/files-different/vault/folder/nested-vault-file b/test/integration/targets/incidental_win_copy/files-different/vault/folder/nested-vault-file
new file mode 100644
index 00000000..d8d15498
--- /dev/null
+++ b/test/integration/targets/incidental_win_copy/files-different/vault/folder/nested-vault-file
@@ -0,0 +1,6 @@
+$ANSIBLE_VAULT;1.1;AES256
+65653164323866373138353632323531393664393563633665373635623763353561386431373366
+3232353263363034313136663062623336663463373966320a333763323032646463386432626161
+36386330356637666362396661653935653064623038333031653335626164376465353235303636
+3335616231663838620a303632343938326538656233393562303162343261383465623261646664
+33613932343461626339333832363930303962633364303736376634396364643861
diff --git a/test/integration/targets/incidental_win_copy/files-different/vault/readme.txt b/test/integration/targets/incidental_win_copy/files-different/vault/readme.txt
new file mode 100644
index 00000000..dae883b5
--- /dev/null
+++ b/test/integration/targets/incidental_win_copy/files-different/vault/readme.txt
@@ -0,0 +1,5 @@
+This directory contains some files that have been encrypted with ansible-vault.
+
+This is to test out the decrypt parameter in win_copy.
+
+The password is: password
diff --git a/test/integration/targets/incidental_win_copy/files-different/vault/vault-file b/test/integration/targets/incidental_win_copy/files-different/vault/vault-file
new file mode 100644
index 00000000..2fff7619
--- /dev/null
+++ b/test/integration/targets/incidental_win_copy/files-different/vault/vault-file
@@ -0,0 +1,6 @@
+$ANSIBLE_VAULT;1.1;AES256
+30353665333635633433356261616636356130386330363962386533303566313463383734373532
+3933643234323638623939613462346361313431363939370a303532656338353035346661353965
+34656231633238396361393131623834316262306533663838336362366137306562646561383766
+6363373965633337640a373666336461613337346131353564383134326139616561393664663563
+3431
diff --git a/test/integration/targets/incidental_win_copy/files/empty.txt b/test/integration/targets/incidental_win_copy/files/empty.txt
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/incidental_win_copy/files/empty.txt
diff --git a/test/integration/targets/incidental_win_copy/files/foo.txt b/test/integration/targets/incidental_win_copy/files/foo.txt
new file mode 100644
index 00000000..7c6ded14
--- /dev/null
+++ b/test/integration/targets/incidental_win_copy/files/foo.txt
@@ -0,0 +1 @@
+foo.txt
diff --git a/test/integration/targets/incidental_win_copy/files/subdir/bar.txt b/test/integration/targets/incidental_win_copy/files/subdir/bar.txt
new file mode 100644
index 00000000..76018072
--- /dev/null
+++ b/test/integration/targets/incidental_win_copy/files/subdir/bar.txt
@@ -0,0 +1 @@
+baz
diff --git a/test/integration/targets/incidental_win_copy/files/subdir/subdir2/baz.txt b/test/integration/targets/incidental_win_copy/files/subdir/subdir2/baz.txt
new file mode 100644
index 00000000..76018072
--- /dev/null
+++ b/test/integration/targets/incidental_win_copy/files/subdir/subdir2/baz.txt
@@ -0,0 +1 @@
+baz
diff --git a/test/integration/targets/incidental_win_copy/files/subdir/subdir2/subdir3/subdir4/qux.txt b/test/integration/targets/incidental_win_copy/files/subdir/subdir2/subdir3/subdir4/qux.txt
new file mode 100644
index 00000000..78df5b06
--- /dev/null
+++ b/test/integration/targets/incidental_win_copy/files/subdir/subdir2/subdir3/subdir4/qux.txt
@@ -0,0 +1 @@
+qux \ No newline at end of file
diff --git a/test/integration/targets/incidental_win_copy/tasks/main.yml b/test/integration/targets/incidental_win_copy/tasks/main.yml
new file mode 100644
index 00000000..b2ee103f
--- /dev/null
+++ b/test/integration/targets/incidental_win_copy/tasks/main.yml
@@ -0,0 +1,34 @@
+---
+- name: create empty folder
+ file:
+ path: '{{role_path}}/files/subdir/empty'
+ state: directory
+ delegate_to: localhost
+
+# removes the cached zip module from the previous task so we can replicate
+# the below issue where win_copy would delete DEFAULT_LOCAL_TMP if it
+# had permission to
+# https://github.com/ansible/ansible/issues/35613
+- name: clear the local ansiballz cache
+ file:
+ path: "{{lookup('config', 'DEFAULT_LOCAL_TMP')}}/ansiballz_cache"
+ state: absent
+ delegate_to: localhost
+
+- name: create test folder
+ win_file:
+ path: '{{test_win_copy_path}}'
+ state: directory
+
+- block:
+ - name: run tests for local to remote
+ include_tasks: tests.yml
+
+ - name: run tests for remote to remote
+ include_tasks: remote_tests.yml
+
+ always:
+ - name: remove test folder
+ win_file:
+ path: '{{test_win_copy_path}}'
+ state: absent
diff --git a/test/integration/targets/incidental_win_copy/tasks/remote_tests.yml b/test/integration/targets/incidental_win_copy/tasks/remote_tests.yml
new file mode 100644
index 00000000..5abb5020
--- /dev/null
+++ b/test/integration/targets/incidental_win_copy/tasks/remote_tests.yml
@@ -0,0 +1,471 @@
+---
+- name: fail when source does not exist remote
+ win_copy:
+ src: fakesource
+ dest: fakedest
+ remote_src: yes
+ register: fail_remote_invalid_source
+ failed_when: "'it does not exist' not in fail_remote_invalid_source.msg"
+
+- name: setup source folder for remote tests
+ win_copy:
+ src: files/
+ dest: '{{test_win_copy_path}}\source\'
+
+- name: setup remote failure tests
+ win_file:
+ path: '{{item.path}}'
+ state: '{{item.state}}'
+ with_items:
+ - { 'path': '{{test_win_copy_path}}\target\folder', 'state': 'directory' }
+ - { 'path': '{{test_win_copy_path}}\target\file', 'state': 'touch' }
+ - { 'path': '{{test_win_copy_path}}\target\subdir', 'state': 'touch' }
+
+- name: fail source is a file but dest is a folder
+ win_copy:
+ src: '{{test_win_copy_path}}\source\foo.txt'
+ dest: '{{test_win_copy_path}}\target\folder'
+ remote_src: yes
+ register: fail_remote_file_to_folder
+ failed_when: "'dest is already a folder' not in fail_remote_file_to_folder.msg"
+
+- name: fail source is a file but dest is a folder
+ win_copy:
+ src: '{{test_win_copy_path}}\source\'
+ dest: '{{test_win_copy_path}}\target\'
+ remote_src: yes
+ register: fail_remote_folder_to_file
+ failed_when: "'dest is already a file' not in fail_remote_folder_to_file.msg"
+
+- name: fail source is a file dest parent dir is also a file
+ win_copy:
+ src: '{{test_win_copy_path}}\source\foo.txt'
+ dest: '{{test_win_copy_path}}\target\file\foo.txt'
+ remote_src: yes
+ register: fail_remote_file_parent_dir_file
+ failed_when: "'is currently a file' not in fail_remote_file_parent_dir_file.msg"
+
+- name: fail source is a folder dest parent dir is also a file
+ win_copy:
+ src: '{{test_win_copy_path}}\source\subdir'
+ dest: '{{test_win_copy_path}}\target\file'
+ remote_src: yes
+ register: fail_remote_folder_parent_dir_file
+ failed_when: "'object at dest parent dir is not a folder' not in fail_remote_folder_parent_dir_file.msg"
+
+- name: fail to copy a remote file with parent dir that doesn't exist and filename is set
+ win_copy:
+ src: '{{test_win_copy_path}}\source\foo.txt'
+ dest: '{{test_win_copy_path}}\missing-dir\foo.txt'
+ remote_src: yes
+ register: fail_remote_missing_parent_dir
+ failed_when: "'does not exist' not in fail_remote_missing_parent_dir.msg"
+
+- name: remove target after remote failure tests
+ win_file:
+ path: '{{test_win_copy_path}}\target'
+ state: absent
+
+- name: create remote target after cleaning
+ win_file:
+ path: '{{test_win_copy_path}}\target'
+ state: directory
+
+- name: copy single file remote (check mode)
+ win_copy:
+ src: '{{test_win_copy_path}}\source\foo.txt'
+ dest: '{{test_win_copy_path}}\target\foo-target.txt'
+ remote_src: yes
+ register: remote_copy_file_check
+ check_mode: yes
+
+- name: get result of copy single file remote (check mode)
+ win_stat:
+ path: '{{test_win_copy_path}}\target\foo-target.txt'
+ register: remote_copy_file_actual_check
+
+- name: assert copy single file remote (check mode)
+ assert:
+ that:
+ - remote_copy_file_check is changed
+ - remote_copy_file_actual_check.stat.exists == False
+
+- name: copy single file remote
+ win_copy:
+ src: '{{test_win_copy_path}}\source\foo.txt'
+ dest: '{{test_win_copy_path}}\target\foo-target.txt'
+ remote_src: yes
+ register: remote_copy_file
+
+- name: get result of copy single file remote
+ win_stat:
+ path: '{{test_win_copy_path}}\target\foo-target.txt'
+ register: remote_copy_file_actual
+
+- name: assert copy single file remote
+ assert:
+ that:
+ - remote_copy_file is changed
+ - remote_copy_file.operation == 'file_copy'
+ - remote_copy_file.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'
+ - remote_copy_file.size == 8
+ - remote_copy_file.original_basename == 'foo.txt'
+ - remote_copy_file_actual.stat.exists == True
+ - remote_copy_file_actual.stat.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'
+
+- name: copy single file remote (idempotent)
+ win_copy:
+ src: '{{test_win_copy_path}}\source\foo.txt'
+ dest: '{{test_win_copy_path}}\target\foo-target.txt'
+ remote_src: yes
+ register: remote_copy_file_again
+
+- name: assert copy single file remote (idempotent)
+ assert:
+ that:
+ - remote_copy_file_again is not changed
+
+- name: copy single file into folder remote (check mode)
+ win_copy:
+ src: '{{test_win_copy_path}}\source\foo.txt'
+ dest: '{{test_win_copy_path}}\target\'
+ remote_src: yes
+ register: remote_copy_file_to_folder_check
+ check_mode: yes
+
+- name: get result of copy single file into folder remote (check mode)
+ win_stat:
+ path: '{{test_win_copy_path}}\target\foo.txt'
+ register: remote_copy_file_to_folder_actual_check
+
+- name: assert copy single file into folder remote (check mode)
+ assert:
+ that:
+ - remote_copy_file_to_folder_check is changed
+ - remote_copy_file_to_folder_actual_check.stat.exists == False
+
+- name: copy single file into folder remote
+ win_copy:
+ src: '{{test_win_copy_path}}\source\foo.txt'
+ dest: '{{test_win_copy_path}}\target\'
+ remote_src: yes
+ register: remote_copy_file_to_folder
+
+- name: get result of copy single file into folder remote
+ win_stat:
+ path: '{{test_win_copy_path}}\target\foo.txt'
+ register: remote_copy_file_to_folder_actual
+
+- name: assert copy single file into folder remote
+ assert:
+ that:
+ - remote_copy_file_to_folder is changed
+ - remote_copy_file_to_folder.operation == 'file_copy'
+ - remote_copy_file_to_folder.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'
+ - remote_copy_file_to_folder.size == 8
+ - remote_copy_file_to_folder.original_basename == 'foo.txt'
+ - remote_copy_file_to_folder_actual.stat.exists == True
+ - remote_copy_file_to_folder_actual.stat.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'
+
+- name: copy single file into folder remote (idempotent)
+ win_copy:
+ src: '{{test_win_copy_path}}\source\foo.txt'
+ dest: '{{test_win_copy_path}}\target\'
+ remote_src: yes
+ register: remote_copy_file_to_folder_again
+
+- name: assert copy single file into folder remote
+ assert:
+ that:
+ - remote_copy_file_to_folder_again is not changed
+
+- name: copy single file to missing folder (check mode)
+ win_copy:
+ src: '{{test_win_copy_path}}\source\foo.txt'
+ dest: '{{test_win_copy_path}}\target\missing\'
+ remote_src: yes
+ register: remote_copy_file_to_missing_folder_check
+ check_mode: yes
+
+- name: get result of copy single file to missing folder remote (check mode)
+ win_stat:
+ path: '{{test_win_copy_path}}\target\missing\foo.txt'
+ register: remote_copy_file_to_missing_folder_actual_check
+
+- name: assert copy single file to missing folder remote (check mode)
+ assert:
+ that:
+ - remote_copy_file_to_missing_folder_check is changed
+ - remote_copy_file_to_missing_folder_check.operation == 'file_copy'
+ - remote_copy_file_to_missing_folder_actual_check.stat.exists == False
+
+- name: copy single file to missing folder remote
+ win_copy:
+ src: '{{test_win_copy_path}}\source\foo.txt'
+ dest: '{{test_win_copy_path}}\target\missing\'
+ remote_src: yes
+ register: remote_copy_file_to_missing_folder
+
+- name: get result of copy single file to missing folder remote
+ win_stat:
+ path: '{{test_win_copy_path}}\target\missing\foo.txt'
+ register: remote_copy_file_to_missing_folder_actual
+
+- name: assert copy single file to missing folder remote
+ assert:
+ that:
+ - remote_copy_file_to_missing_folder is changed
+ - remote_copy_file_to_missing_folder.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'
+ - remote_copy_file_to_missing_folder.operation == 'file_copy'
+ - remote_copy_file_to_missing_folder.size == 8
+ - remote_copy_file_to_missing_folder_actual.stat.exists == True
+ - remote_copy_file_to_missing_folder_actual.stat.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'
+
+- name: clear target for folder to folder test
+ win_file:
+ path: '{{test_win_copy_path}}\target'
+ state: absent
+
+- name: copy folder to folder remote (check mode)
+ win_copy:
+ src: '{{test_win_copy_path}}\source'
+ dest: '{{test_win_copy_path}}\target'
+ remote_src: yes
+ register: remote_copy_folder_to_folder_check
+ check_mode: yes
+
+- name: get result of copy folder to folder remote (check mode)
+ win_stat:
+ path: '{{test_win_copy_path}}\target'
+ register: remote_copy_folder_to_folder_actual_check
+
+- name: assert copy folder to folder remote (check mode)
+ assert:
+ that:
+ - remote_copy_folder_to_folder_check is changed
+ - remote_copy_folder_to_folder_check.operation == 'folder_copy'
+ - remote_copy_folder_to_folder_actual_check.stat.exists == False
+
+- name: copy folder to folder remote
+ win_copy:
+ src: '{{test_win_copy_path}}\source'
+ dest: '{{test_win_copy_path}}\target'
+ remote_src: yes
+ register: remote_copy_folder_to_folder
+
+- name: get result of copy folder to folder remote
+ win_find:
+ paths: '{{test_win_copy_path}}\target'
+ recurse: yes
+ file_type: directory
+ register: remote_copy_folder_to_folder_actual
+
+- name: assert copy folder to folder remote
+ assert:
+ that:
+ - remote_copy_folder_to_folder is changed
+ - remote_copy_folder_to_folder.operation == 'folder_copy'
+ - remote_copy_folder_to_folder_actual.examined == 11
+ - remote_copy_folder_to_folder_actual.matched == 6
+ - remote_copy_folder_to_folder_actual.files[0].filename == 'source'
+ - remote_copy_folder_to_folder_actual.files[1].filename == 'subdir'
+ - remote_copy_folder_to_folder_actual.files[2].filename == 'empty'
+ - remote_copy_folder_to_folder_actual.files[3].filename == 'subdir2'
+ - remote_copy_folder_to_folder_actual.files[4].filename == 'subdir3'
+ - remote_copy_folder_to_folder_actual.files[5].filename == 'subdir4'
+
+- name: copy folder to folder remote (idempotent)
+ win_copy:
+ src: '{{test_win_copy_path}}\source'
+ dest: '{{test_win_copy_path}}\target'
+ remote_src: yes
+ register: remote_copy_folder_to_folder_again
+
+- name: assert copy folder to folder remote (idempotent)
+ assert:
+ that:
+ - remote_copy_folder_to_folder_again is not changed
+
+- name: change remote file after folder to folder test
+ win_copy:
+ content: bar.txt
+ dest: '{{test_win_copy_path}}\target\source\foo.txt'
+
+- name: remote remote folder after folder to folder test
+ win_file:
+ path: '{{test_win_copy_path}}\target\source\subdir\subdir2\subdir3\subdir4'
+ state: absent
+
+- name: copy folder to folder remote after change
+ win_copy:
+ src: '{{test_win_copy_path}}\source'
+ dest: '{{test_win_copy_path}}\target'
+ remote_src: yes
+ register: remote_copy_folder_to_folder_after_change
+
+- name: get result of copy folder to folder remote after change
+ win_find:
+ paths: '{{test_win_copy_path}}\target\source'
+ recurse: yes
+ patterns: ['foo.txt', 'qux.txt']
+ register: remote_copy_folder_to_folder_after_change_actual
+
+- name: assert copy folder after changes
+ assert:
+ that:
+ - remote_copy_folder_to_folder_after_change is changed
+ - remote_copy_folder_to_folder_after_change_actual.matched == 2
+ - remote_copy_folder_to_folder_after_change_actual.files[0].checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'
+ - remote_copy_folder_to_folder_after_change_actual.files[1].checksum == 'b54ba7f5621240d403f06815f7246006ef8c7d43'
+
+- name: clear target folder before folder contents to remote test
+ win_file:
+ path: '{{test_win_copy_path}}\target'
+ state: absent
+
+- name: copy folder contents to folder remote with backslash (check mode)
+ win_copy:
+ src: '{{test_win_copy_path}}\source\'
+ dest: '{{test_win_copy_path}}\target'
+ remote_src: yes
+ register: remote_copy_folder_content_backslash_check
+ check_mode: yes
+
+- name: get result of copy folder contents to folder remote with backslash (check mode)
+ win_stat:
+ path: '{{test_win_copy_path}}\target'
+ register: remote_copy_folder_content_backslash_actual_check
+
+- name: assert copy folder content to folder remote with backslash (check mode)
+ assert:
+ that:
+ - remote_copy_folder_content_backslash_check is changed
+ - remote_copy_folder_content_backslash_actual_check.stat.exists == False
+
+- name: copy folder contents to folder remote with backslash
+ win_copy:
+ src: '{{test_win_copy_path}}\source\'
+ dest: '{{test_win_copy_path}}\target'
+ remote_src: yes
+ register: remote_copy_folder_content_backslash
+
+- name: get result of copy folder contents to folder remote with backslash
+ win_find:
+ paths: '{{test_win_copy_path}}\target'
+ recurse: yes
+ file_type: directory
+ register: remote_copy_folder_content_backslash_actual
+
+- name: assert copy folder content to folder remote with backslash
+ assert:
+ that:
+ - remote_copy_folder_content_backslash is changed
+ - remote_copy_folder_content_backslash.operation == 'folder_copy'
+ - remote_copy_folder_content_backslash_actual.examined == 10
+ - remote_copy_folder_content_backslash_actual.matched == 5
+ - remote_copy_folder_content_backslash_actual.files[0].filename == 'subdir'
+ - remote_copy_folder_content_backslash_actual.files[1].filename == 'empty'
+ - remote_copy_folder_content_backslash_actual.files[2].filename == 'subdir2'
+ - remote_copy_folder_content_backslash_actual.files[3].filename == 'subdir3'
+ - remote_copy_folder_content_backslash_actual.files[4].filename == 'subdir4'
+
+- name: copy folder contents to folder remote with backslash (idempotent)
+ win_copy:
+ src: '{{test_win_copy_path}}\source\'
+ dest: '{{test_win_copy_path}}\target'
+ remote_src: yes
+ register: remote_copy_folder_content_backslash_again
+
+- name: assert copy folder content to folder remote with backslash (idempotent)
+ assert:
+ that:
+ - remote_copy_folder_content_backslash_again is not changed
+
+- name: change remote file after folder content to folder test
+ win_copy:
+ content: bar.txt
+ dest: '{{test_win_copy_path}}\target\foo.txt'
+
+- name: remote remote folder after folder content to folder test
+ win_file:
+ path: '{{test_win_copy_path}}\target\subdir\subdir2\subdir3\subdir4'
+ state: absent
+
+- name: copy folder content to folder remote after change
+ win_copy:
+ src: '{{test_win_copy_path}}/source/'
+ dest: '{{test_win_copy_path}}/target/'
+ remote_src: yes
+ register: remote_copy_folder_content_to_folder_after_change
+
+- name: get result of copy folder content to folder remote after change
+ win_find:
+ paths: '{{test_win_copy_path}}\target'
+ recurse: yes
+ patterns: ['foo.txt', 'qux.txt']
+ register: remote_copy_folder_content_to_folder_after_change_actual
+
+- name: assert copy folder content to folder after changes
+ assert:
+ that:
+ - remote_copy_folder_content_to_folder_after_change is changed
+ - remote_copy_folder_content_to_folder_after_change_actual.matched == 2
+ - remote_copy_folder_content_to_folder_after_change_actual.files[0].checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'
+ - remote_copy_folder_content_to_folder_after_change_actual.files[1].checksum == 'b54ba7f5621240d403f06815f7246006ef8c7d43'
+
+# https://github.com/ansible/ansible/issues/50077
+- name: create empty nested directory
+ win_file:
+ path: '{{ test_win_copy_path }}\source\empty-nested\nested-dir'
+ state: directory
+
+- name: copy empty nested directory (check mode)
+ win_copy:
+ src: '{{ test_win_copy_path }}\source\empty-nested'
+ dest: '{{ test_win_copy_path }}\target'
+ remote_src: True
+ check_mode: True
+ register: copy_empty_dir_check
+
+- name: get result of copy empty nested directory (check mode)
+ win_stat:
+ path: '{{ test_win_copy_path }}\target\empty-nested'
+ register: copy_empty_dir_actual_check
+
+- name: assert copy empty nested directory (check mode)
+ assert:
+ that:
+ - copy_empty_dir_check is changed
+ - copy_empty_dir_check.operation == "folder_copy"
+ - not copy_empty_dir_actual_check.stat.exists
+
+- name: copy empty nested directory
+ win_copy:
+ src: '{{ test_win_copy_path }}\source\empty-nested'
+ dest: '{{ test_win_copy_path }}\target'
+ remote_src: True
+ register: copy_empty_dir
+
+- name: get result of copy empty nested directory
+ win_stat:
+ path: '{{ test_win_copy_path }}\target\empty-nested\nested-dir'
+ register: copy_empty_dir_actual
+
+- name: assert copy empty nested directory
+ assert:
+ that:
+ - copy_empty_dir is changed
+ - copy_empty_dir.operation == "folder_copy"
+ - copy_empty_dir_actual.stat.exists
+
+- name: copy empty nested directory (idempotent)
+ win_copy:
+ src: '{{ test_win_copy_path }}\source\empty-nested'
+ dest: '{{ test_win_copy_path }}\target'
+ remote_src: True
+ register: copy_empty_dir_again
+
+- name: assert copy empty nested directory (idempotent)
+ assert:
+ that:
+ - not copy_empty_dir_again is changed
diff --git a/test/integration/targets/incidental_win_copy/tasks/tests.yml b/test/integration/targets/incidental_win_copy/tasks/tests.yml
new file mode 100644
index 00000000..d15e71f6
--- /dev/null
+++ b/test/integration/targets/incidental_win_copy/tasks/tests.yml
@@ -0,0 +1,535 @@
+---
+- name: fail no source or content
+ win_copy:
+ dest: dest
+ register: fail_no_source_content
+ failed_when: fail_no_source_content.msg != 'src (or content) and dest are required'
+
+- name: fail content but dest isn't a file, unix ending
+ win_copy:
+ content: a
+ dest: a/
+ register: fail_dest_not_file_unix
+ failed_when: fail_dest_not_file_unix.msg != 'dest must be a file if content is defined'
+
+- name: fail content but dest isn't a file, windows ending
+ win_copy:
+ content: a
+ dest: a\
+ register: fail_dest_not_file_windows
+ failed_when: fail_dest_not_file_windows.msg != 'dest must be a file if content is defined'
+
+- name: fail to copy a file with parent dir that doesn't exist and filename is set
+ win_copy:
+ src: foo.txt
+ dest: '{{test_win_copy_path}}\missing-dir\foo.txt'
+ register: fail_missing_parent_dir
+ failed_when: "'does not exist' not in fail_missing_parent_dir.msg"
+
+- name: fail to copy an encrypted file without the password set
+ win_copy:
+ src: '{{role_path}}/files-different/vault/vault-file'
+ dest: '{{test_win_copy_path}}\file'
+ register: fail_copy_encrypted_file
+ ignore_errors: yes # weird failed_when doesn't work in this case
+
+- name: assert failure message when copying an encrypted file without the password set
+ assert:
+ that:
+ - fail_copy_encrypted_file is failed
+ - fail_copy_encrypted_file.msg == 'A vault password or secret must be specified to decrypt {{role_path}}/files-different/vault/vault-file'
+
+- name: fail to copy a directory with an encrypted file without the password
+ win_copy:
+ src: '{{role_path}}/files-different/vault'
+ dest: '{{test_win_copy_path}}'
+ register: fail_copy_directory_with_enc_file
+ ignore_errors: yes
+
+- name: assert failure message when copying a directory that contains an encrypted file without the password set
+ assert:
+ that:
+ - fail_copy_directory_with_enc_file is failed
+ - fail_copy_directory_with_enc_file.msg == 'A vault password or secret must be specified to decrypt {{role_path}}/files-different/vault/vault-file'
+
+- name: copy with content (check mode)
+ win_copy:
+ content: a
+ dest: '{{test_win_copy_path}}\file'
+ register: copy_content_check
+ check_mode: yes
+
+- name: get result of copy with content (check mode)
+ win_stat:
+ path: '{{test_win_copy_path}}\file'
+ register: copy_content_actual_check
+
+- name: assert copy with content (check mode)
+ assert:
+ that:
+ - copy_content_check is changed
+ - copy_content_check.checksum == '86f7e437faa5a7fce15d1ddcb9eaeaea377667b8'
+ - copy_content_check.operation == 'file_copy'
+ - copy_content_check.size == 1
+ - copy_content_actual_check.stat.exists == False
+
+- name: copy with content
+ win_copy:
+ content: a
+ dest: '{{test_win_copy_path}}\file'
+ register: copy_content
+
+- name: get result of copy with content
+ win_stat:
+ path: '{{test_win_copy_path}}\file'
+ register: copy_content_actual
+
+- name: assert copy with content
+ assert:
+ that:
+ - copy_content is changed
+ - copy_content.checksum == '86f7e437faa5a7fce15d1ddcb9eaeaea377667b8'
+ - copy_content.operation == 'file_copy'
+ - copy_content.size == 1
+ - copy_content_actual.stat.exists == True
+ - copy_content_actual.stat.checksum == '86f7e437faa5a7fce15d1ddcb9eaeaea377667b8'
+
+- name: copy with content (idempotent)
+ win_copy:
+ content: a
+ dest: '{{test_win_copy_path}}\file'
+ register: copy_content_again
+
+- name: assert copy with content (idempotent)
+ assert:
+ that:
+ - copy_content_again is not changed
+
+- name: copy with content change when missing
+ win_copy:
+ content: b
+ dest: '{{test_win_copy_path}}\file'
+ force: no
+ register: copy_content_when_missing
+
+- name: assert copy with content change when missing
+ assert:
+ that:
+ - copy_content_when_missing is not changed
+
+- name: copy single file (check mode)
+ win_copy:
+ src: foo.txt
+ dest: '{{test_win_copy_path}}\foo-target.txt'
+ register: copy_file_check
+ check_mode: yes
+
+- name: get result of copy single file (check mode)
+ win_stat:
+ path: '{{test_win_copy_path}}\foo-target.txt'
+ register: copy_file_actual_check
+
+- name: assert copy single file (check mode)
+ assert:
+ that:
+ - copy_file_check is changed
+ - copy_file_check.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'
+ - copy_file_check.dest == test_win_copy_path + '\\foo-target.txt'
+ - copy_file_check.operation == 'file_copy'
+ - copy_file_check.size == 8
+ - copy_file_actual_check.stat.exists == False
+
+- name: copy single file
+ win_copy:
+ src: foo.txt
+ dest: '{{test_win_copy_path}}\foo-target.txt'
+ register: copy_file
+
+- name: get result of copy single file
+ win_stat:
+ path: '{{test_win_copy_path}}\foo-target.txt'
+ register: copy_file_actual
+
+- name: assert copy single file
+ assert:
+ that:
+ - copy_file is changed
+ - copy_file.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'
+ - copy_file.dest == test_win_copy_path + '\\foo-target.txt'
+ - copy_file.operation == 'file_copy'
+ - copy_file.size == 8
+ - copy_file_actual.stat.exists == True
+ - copy_file_actual.stat.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'
+
+- name: copy single file (idempotent)
+ win_copy:
+ src: foo.txt
+ dest: '{{test_win_copy_path}}\foo-target.txt'
+ register: copy_file_again
+
+- name: assert copy single file (idempotent)
+ assert:
+ that:
+ - copy_file_again is not changed
+
+- name: copy single file (backup)
+ win_copy:
+ content: "{{ lookup('file', 'foo.txt') }}\nfoo bar"
+ dest: '{{test_win_copy_path}}\foo-target.txt'
+ backup: yes
+ register: copy_file_backup
+
+- name: check backup_file
+ win_stat:
+ path: '{{ copy_file_backup.backup_file }}'
+ register: backup_file
+
+- name: assert copy single file (backup)
+ assert:
+ that:
+ - copy_file_backup is changed
+ - backup_file.stat.exists == true
+
+- name: copy single file to folder (check mode)
+ win_copy:
+ src: foo.txt
+ dest: '{{test_win_copy_path}}\'
+ register: copy_file_to_folder_check
+ check_mode: yes
+
+- name: get result of copy single file to folder (check mode)
+ win_stat:
+ path: '{{test_win_copy_path}}\foo.txt'
+ register: copy_file_to_folder_actual_check
+
+- name: assert copy single file to folder (check mode)
+ assert:
+ that:
+ - copy_file_to_folder_check is changed
+ - copy_file_to_folder_check.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'
+ - copy_file_to_folder_check.dest == test_win_copy_path + '\\foo.txt'
+ - copy_file_to_folder_check.operation == 'file_copy'
+ - copy_file_to_folder_check.size == 8
+ - copy_file_to_folder_actual_check.stat.exists == False
+
+- name: copy single file to folder
+ win_copy:
+ src: foo.txt
+ dest: '{{test_win_copy_path}}\'
+ register: copy_file_to_folder
+
+- name: get result of copy single file to folder
+ win_stat:
+ path: '{{test_win_copy_path}}\foo.txt'
+ register: copy_file_to_folder_actual
+
+- name: assert copy single file to folder
+ assert:
+ that:
+ - copy_file_to_folder is changed
+ - copy_file_to_folder.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'
+ - copy_file_to_folder.dest == test_win_copy_path + '\\foo.txt'
+ - copy_file_to_folder.operation == 'file_copy'
+ - copy_file_to_folder.size == 8
+ - copy_file_to_folder_actual.stat.exists == True
+ - copy_file_to_folder_actual.stat.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'
+
+- name: copy single file to folder (idempotent)
+ win_copy:
+ src: foo.txt
+ dest: '{{test_win_copy_path}}\'
+ register: copy_file_to_folder_again
+
+- name: assert copy single file to folder (idempotent)
+ assert:
+ that:
+ - copy_file_to_folder_again is not changed
+
+- name: copy single file to missing folder (check mode)
+ win_copy:
+ src: foo.txt
+ dest: '{{test_win_copy_path}}\missing\'
+ register: copy_file_to_missing_folder_check
+ check_mode: yes
+
+- name: get result of copy single file to missing folder (check mode)
+ win_stat:
+ path: '{{test_win_copy_path}}\missing\foo.txt'
+ register: copy_file_to_missing_folder_actual_check
+
+- name: assert copy single file to missing folder (check mode)
+ assert:
+ that:
+ - copy_file_to_missing_folder_check is changed
+ - copy_file_to_missing_folder_check.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'
+ - copy_file_to_missing_folder_check.operation == 'file_copy'
+ - copy_file_to_missing_folder_check.size == 8
+ - copy_file_to_missing_folder_actual_check.stat.exists == False
+
+- name: copy single file to missing folder
+ win_copy:
+ src: foo.txt
+ dest: '{{test_win_copy_path}}\missing\'
+ register: copy_file_to_missing_folder
+
+- name: get result of copy single file to missing folder
+ win_stat:
+ path: '{{test_win_copy_path}}\missing\foo.txt'
+ register: copy_file_to_missing_folder_actual
+
+- name: assert copy single file to missing folder
+ assert:
+ that:
+ - copy_file_to_missing_folder is changed
+ - copy_file_to_missing_folder.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'
+ - copy_file_to_missing_folder.operation == 'file_copy'
+ - copy_file_to_missing_folder.size == 8
+ - copy_file_to_missing_folder_actual.stat.exists == True
+ - copy_file_to_missing_folder_actual.stat.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'
+
+- name: copy folder (check mode)
+ win_copy:
+ src: files
+ dest: '{{test_win_copy_path}}\recursive\folder'
+ register: copy_folder_check
+ check_mode: yes
+
+- name: get result of copy folder (check mode)
+ win_stat:
+ path: '{{test_win_copy_path}}\recursive\folder'
+ register: copy_folder_actual_check
+
+- name: assert copy folder (check mode)
+ assert:
+ that:
+ - copy_folder_check is changed
+ - copy_folder_check.operation == 'folder_copy'
+ - copy_folder_actual_check.stat.exists == False
+
+- name: copy folder
+ win_copy:
+ src: files
+ dest: '{{test_win_copy_path}}\recursive\folder'
+ register: copy_folder
+
+- name: get result of copy folder
+ win_find:
+ paths: '{{test_win_copy_path}}\recursive\folder'
+ recurse: yes
+ file_type: directory
+ register: copy_folder_actual
+
+- name: assert copy folder
+ assert:
+ that:
+ - copy_folder is changed
+ - copy_folder.operation == 'folder_copy'
+ - copy_folder_actual.examined == 11 # includes files and folders, the below is the nested order
+ - copy_folder_actual.matched == 6
+ - copy_folder_actual.files[0].filename == 'files'
+ - copy_folder_actual.files[1].filename == 'subdir'
+ - copy_folder_actual.files[2].filename == 'empty'
+ - copy_folder_actual.files[3].filename == 'subdir2'
+ - copy_folder_actual.files[4].filename == 'subdir3'
+ - copy_folder_actual.files[5].filename == 'subdir4'
+
+- name: copy folder (idempotent)
+ win_copy:
+ src: files
+ dest: '{{test_win_copy_path}}\recursive\folder'
+ register: copy_folder_again
+
+- name: assert copy folder (idempotent)
+ assert:
+ that:
+ - copy_folder_again is not changed
+
+- name: change the text of a file in the remote source
+ win_copy:
+ content: bar.txt
+ dest: '{{test_win_copy_path}}\recursive\folder\files\foo.txt'
+
+- name: remove folder for test of recursive copy
+ win_file:
+ path: '{{test_win_copy_path}}\recursive\folder\files\subdir\subdir2\subdir3\subdir4'
+ state: absent
+
+- name: copy folder after changes
+ win_copy:
+ src: files
+ dest: '{{test_win_copy_path}}\recursive\folder'
+ register: copy_folder_after_change
+
+- name: get result of copy folder after changes
+ win_find:
+ paths: '{{test_win_copy_path}}\recursive\folder\files'
+ recurse: yes
+ patterns: ['foo.txt', 'qux.txt']
+ register: copy_folder_after_changes_actual
+
+- name: assert copy folder after changes
+ assert:
+ that:
+ - copy_folder_after_change is changed
+ - copy_folder_after_changes_actual.matched == 2
+ - copy_folder_after_changes_actual.files[0].checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'
+ - copy_folder_after_changes_actual.files[1].checksum == 'b54ba7f5621240d403f06815f7246006ef8c7d43'
+
+- name: copy folder's contents (check mode)
+ win_copy:
+ src: files/
+ dest: '{{test_win_copy_path}}\recursive-contents\'
+ register: copy_folder_contents_check
+ check_mode: yes
+
+- name: get result of copy folder'scontents (check mode)
+ win_stat:
+ path: '{{test_win_copy_path}}\recursive-contents'
+ register: copy_folder_contents_actual_check
+
+- name: assert copy folder's contents (check mode)
+ assert:
+ that:
+ - copy_folder_contents_check is changed
+ - copy_folder_contents_check.operation == 'folder_copy'
+ - copy_folder_contents_actual_check.stat.exists == False
+
+- name: copy folder's contents
+ win_copy:
+ src: files/
+ dest: '{{test_win_copy_path}}\recursive-contents\'
+ register: copy_folder_contents
+
+- name: get result of copy folder
+ win_find:
+ paths: '{{test_win_copy_path}}\recursive-contents'
+ recurse: yes
+ file_type: directory
+ register: copy_folder_contents_actual
+
+- name: assert copy folder
+ assert:
+ that:
+ - copy_folder_contents is changed
+ - copy_folder_contents.operation == 'folder_copy'
+ - copy_folder_contents_actual.examined == 10 # includes files and folders, the below is the nested order
+ - copy_folder_contents_actual.matched == 5
+ - copy_folder_contents_actual.files[0].filename == 'subdir'
+ - copy_folder_contents_actual.files[1].filename == 'empty'
+ - copy_folder_contents_actual.files[2].filename == 'subdir2'
+ - copy_folder_contents_actual.files[3].filename == 'subdir3'
+ - copy_folder_contents_actual.files[4].filename == 'subdir4'
+
+- name: fail to copy file to a folder
+ win_copy:
+ src: foo.txt
+ dest: '{{test_win_copy_path}}\recursive-contents'
+ register: fail_file_to_folder
+ failed_when: "'object at path is already a directory' not in fail_file_to_folder.msg"
+
+- name: fail to copy folder to a file
+ win_copy:
+ src: subdir/
+ dest: '{{test_win_copy_path}}\recursive-contents\foo.txt'
+ register: fail_folder_to_file
+ failed_when: "'object at parent directory path is already a file' not in fail_folder_to_file.msg"
+
+# https://github.com/ansible/ansible/issues/31336
+- name: create file with colon in the name
+ copy:
+ dest: '{{role_path}}/files-different/colon:file'
+ content: test
+ delegate_to: localhost
+
+- name: copy a file with colon as a source
+ win_copy:
+ src: '{{role_path}}/files-different/colon:file'
+ dest: '{{test_win_copy_path}}\colon.file'
+ register: copy_file_with_colon
+
+- name: get result of file with colon as a source
+ win_stat:
+ path: '{{test_win_copy_path}}\colon.file'
+ register: copy_file_with_colon_result
+
+- name: assert results of copy a file with colon as a source
+ assert:
+ that:
+ - copy_file_with_colon is changed
+ - copy_file_with_colon_result.stat.exists == True
+ - copy_file_with_colon_result.stat.checksum == "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3"
+
+- name: remove file with colon in the name
+ file:
+ path: '{{role_path}}/files-different/colon:file'
+ state: absent
+ delegate_to: localhost
+
+- name: copy an encrypted file without decrypting
+ win_copy:
+ src: '{{role_path}}/files-different/vault/vault-file'
+ dest: '{{test_win_copy_path}}\vault-file'
+ decrypt: no
+ register: copy_encrypted_file
+
+- name: get stat of copied encrypted file without decrypting
+ win_stat:
+ path: '{{test_win_copy_path}}\vault-file'
+ register: copy_encrypted_file_result
+
+- name: assert result of copy an encrypted file without decrypting
+ assert:
+ that:
+ - copy_encrypted_file is changed
+ - copy_encrypted_file_result.stat.checksum == "74a89620002d253f38834ee5b06cddd28956a43d"
+
+- name: copy an encrypted file without decrypting (idempotent)
+ win_copy:
+ src: '{{role_path}}/files-different/vault/vault-file'
+ dest: '{{test_win_copy_path}}\vault-file'
+ decrypt: no
+ register: copy_encrypted_file_again
+
+- name: assert result of copy an encrypted file without decrypting (idempotent)
+ assert:
+ that:
+ - copy_encrypted_file_again is not changed
+
+- name: copy folder with encrypted files without decrypting
+ win_copy:
+ src: '{{role_path}}/files-different/vault/'
+ dest: '{{test_win_copy_path}}\encrypted-test'
+ decrypt: no
+ register: copy_encrypted_file
+
+- name: get result of copy folder with encrypted files without decrypting
+ win_find:
+ paths: '{{test_win_copy_path}}\encrypted-test'
+ recurse: yes
+ patterns: '*vault*'
+ register: copy_encrypted_file_result
+
+- name: assert result of copy folder with encrypted files without decrypting
+ assert:
+ that:
+ - copy_encrypted_file is changed
+ - copy_encrypted_file_result.files|count == 2
+ - copy_encrypted_file_result.files[0].checksum == "834563c94127730ecfa42dfc1e1821bbda2e51da"
+ - copy_encrypted_file_result.files[1].checksum == "74a89620002d253f38834ee5b06cddd28956a43d"
+
+- name: copy folder with encrypted files without decrypting (idempotent)
+ win_copy:
+ src: '{{role_path}}/files-different/vault/'
+ dest: '{{test_win_copy_path}}\encrypted-test'
+ decrypt: no
+ register: copy_encrypted_file_again
+
+- name: assert result of copy folder with encrypted files without decrypting (idempotent)
+ assert:
+ that:
+ - copy_encrypted_file_again is not changed
+
+- name: remove test folder after local to remote tests
+ win_file:
+ path: '{{test_win_copy_path}}'
+ state: absent
diff --git a/test/integration/targets/incidental_win_data_deduplication/aliases b/test/integration/targets/incidental_win_data_deduplication/aliases
new file mode 100644
index 00000000..c7657537
--- /dev/null
+++ b/test/integration/targets/incidental_win_data_deduplication/aliases
@@ -0,0 +1,5 @@
+shippable/windows/incidental
+windows
+skip/windows/2008
+skip/windows/2008-R2
+skip/windows/2012
diff --git a/test/integration/targets/incidental_win_data_deduplication/meta/main.yml b/test/integration/targets/incidental_win_data_deduplication/meta/main.yml
new file mode 100644
index 00000000..9f37e96c
--- /dev/null
+++ b/test/integration/targets/incidental_win_data_deduplication/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+- setup_remote_tmp_dir
diff --git a/test/integration/targets/incidental_win_data_deduplication/tasks/main.yml b/test/integration/targets/incidental_win_data_deduplication/tasks/main.yml
new file mode 100644
index 00000000..ae6be90e
--- /dev/null
+++ b/test/integration/targets/incidental_win_data_deduplication/tasks/main.yml
@@ -0,0 +1,2 @@
+---
+- include: pre_test.yml
diff --git a/test/integration/targets/incidental_win_data_deduplication/tasks/pre_test.yml b/test/integration/targets/incidental_win_data_deduplication/tasks/pre_test.yml
new file mode 100644
index 00000000..f72955e4
--- /dev/null
+++ b/test/integration/targets/incidental_win_data_deduplication/tasks/pre_test.yml
@@ -0,0 +1,40 @@
+---
+- set_fact:
+ AnsibleVhdx: '{{ remote_tmp_dir }}\AnsiblePart.vhdx'
+
+- name: Install FS-Data-Deduplication
+ win_feature:
+ name: FS-Data-Deduplication
+ include_sub_features: true
+ state: present
+ register: data_dedup_feat_reg
+
+- name: Reboot windows after the feature has been installed
+ win_reboot:
+ reboot_timeout: 3600
+ when:
+ - data_dedup_feat_reg.success
+ - data_dedup_feat_reg.reboot_required
+
+- name: Copy VHDX scripts
+ win_template:
+ src: "{{ item.src }}"
+ dest: '{{ remote_tmp_dir }}\{{ item.dest }}'
+ loop:
+ - { src: partition_creation_script.j2, dest: partition_creation_script.txt }
+ - { src: partition_deletion_script.j2, dest: partition_deletion_script.txt }
+
+- name: Create partition
+ win_command: diskpart.exe /s {{ remote_tmp_dir }}\partition_creation_script.txt
+
+- name: Format T with NTFS
+ win_format:
+ drive_letter: T
+ file_system: ntfs
+
+- name: Run tests
+ block:
+ - include: tests.yml
+ always:
+ - name: Detach disk
+ win_command: diskpart.exe /s {{ remote_tmp_dir }}\partition_deletion_script.txt
diff --git a/test/integration/targets/incidental_win_data_deduplication/tasks/tests.yml b/test/integration/targets/incidental_win_data_deduplication/tasks/tests.yml
new file mode 100644
index 00000000..64a42927
--- /dev/null
+++ b/test/integration/targets/incidental_win_data_deduplication/tasks/tests.yml
@@ -0,0 +1,47 @@
+---
+
+- name: Enable Data Deduplication on the T drive - check mode
+ win_data_deduplication:
+ drive_letter: "T"
+ state: present
+ settings:
+ no_compress: true
+ minimum_file_age_days: 2
+ minimum_file_size: 0
+ check_mode: yes
+ register: win_data_deduplication_enable_check_mode
+
+- name: Check that it was successful with a change - check mode
+ assert:
+ that:
+ - win_data_deduplication_enable_check_mode is changed
+
+- name: Enable Data Deduplication on the T drive
+ win_data_deduplication:
+ drive_letter: "T"
+ state: present
+ settings:
+ no_compress: true
+ minimum_file_age_days: 2
+ minimum_file_size: 0
+ register: win_data_deduplication_enable
+
+- name: Check that it was successful with a change
+ assert:
+ that:
+ - win_data_deduplication_enable is changed
+
+- name: Enable Data Deduplication on the T drive
+ win_data_deduplication:
+ drive_letter: "T"
+ state: present
+ settings:
+ no_compress: true
+ minimum_file_age_days: 2
+ minimum_file_size: 0
+ register: win_data_deduplication_enable_again
+
+- name: Check that it was successful without a change
+ assert:
+ that:
+ - win_data_deduplication_enable_again is not changed
diff --git a/test/integration/targets/incidental_win_data_deduplication/templates/partition_creation_script.j2 b/test/integration/targets/incidental_win_data_deduplication/templates/partition_creation_script.j2
new file mode 100644
index 00000000..8e47fda9
--- /dev/null
+++ b/test/integration/targets/incidental_win_data_deduplication/templates/partition_creation_script.j2
@@ -0,0 +1,11 @@
+create vdisk file="{{ AnsibleVhdx }}" maximum=2000 type=fixed
+
+select vdisk file="{{ AnsibleVhdx }}"
+
+attach vdisk
+
+convert mbr
+
+create partition primary
+
+assign letter="T"
diff --git a/test/integration/targets/incidental_win_data_deduplication/templates/partition_deletion_script.j2 b/test/integration/targets/incidental_win_data_deduplication/templates/partition_deletion_script.j2
new file mode 100644
index 00000000..c2be9cd1
--- /dev/null
+++ b/test/integration/targets/incidental_win_data_deduplication/templates/partition_deletion_script.j2
@@ -0,0 +1,3 @@
+select vdisk file="{{ AnsibleVhdx }}"
+
+detach vdisk
diff --git a/test/integration/targets/incidental_win_dsc/aliases b/test/integration/targets/incidental_win_dsc/aliases
new file mode 100644
index 00000000..9114c742
--- /dev/null
+++ b/test/integration/targets/incidental_win_dsc/aliases
@@ -0,0 +1,6 @@
+shippable/windows/incidental
+windows
+skip/windows/2008
+skip/windows/2008-R2
+skip/windows/2012
+skip/windows/2012-R2
diff --git a/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xSetReboot/ANSIBLE_xSetReboot.psm1 b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xSetReboot/ANSIBLE_xSetReboot.psm1
new file mode 100644
index 00000000..dbf1ecf3
--- /dev/null
+++ b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xSetReboot/ANSIBLE_xSetReboot.psm1
@@ -0,0 +1,41 @@
+#Requires -Version 5.0 -Modules CimCmdlets
+
+Function Get-TargetResource
+{
+ [CmdletBinding()]
+ [OutputType([Hashtable])]
+ param(
+ [Parameter(Mandatory=$true)]
+ [ValidateNotNullOrEmpty()]
+ [String]$KeyParam
+ )
+ return @{Value = [bool]$global:DSCMachineStatus}
+}
+
+Function Set-TargetResource
+{
+ [CmdletBinding()]
+ param (
+ [Parameter(Mandatory=$true)]
+ [ValidateNotNullOrEmpty()]
+ [String]$KeyParam,
+ [Bool]$Value = $true
+ )
+ $global:DSCMachineStatus = [int]$Value
+}
+
+Function Test-TargetResource
+{
+ [CmdletBinding()]
+ [OutputType([Boolean])]
+ param (
+ [Parameter(Mandatory=$true)]
+ [ValidateNotNullOrEmpty()]
+ [String]$KeyParam,
+ [Bool]$Value = $true
+ )
+ $false
+}
+
+Export-ModuleMember -Function *-TargetResource
+
diff --git a/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xSetReboot/ANSIBLE_xSetReboot.schema.mof b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xSetReboot/ANSIBLE_xSetReboot.schema.mof
new file mode 100644
index 00000000..288b8877
--- /dev/null
+++ b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xSetReboot/ANSIBLE_xSetReboot.schema.mof
@@ -0,0 +1,7 @@
+[ClassVersion("1.0.0"), FriendlyName("xSetReboot")]
+class ANSIBLE_xSetReboot : OMI_BaseResource
+{
+ [Key] String KeyParam;
+ [Write] Boolean Value;
+};
+
diff --git a/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.psm1 b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.psm1
new file mode 100644
index 00000000..79f64969
--- /dev/null
+++ b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.psm1
@@ -0,0 +1,214 @@
+#Requires -Version 5.0 -Modules CimCmdlets
+
+Function ConvertFrom-CimInstance {
+ param(
+ [Parameter(Mandatory=$true)][CimInstance]$Instance
+ )
+ $hashtable = @{
+ _cim_instance = $Instance.CimSystemProperties.ClassName
+ }
+ foreach ($prop in $Instance.CimInstanceProperties) {
+ $hashtable."$($prop.Name)" = ConvertTo-OutputValue -Value $prop.Value
+ }
+ return $hashtable
+}
+
+Function ConvertTo-OutputValue {
+ param($Value)
+
+ if ($Value -is [DateTime[]]) {
+ $Value = $Value | ForEach-Object { $_.ToString("o") }
+ } elseif ($Value -is [DateTime]) {
+ $Value = $Value.ToString("o")
+ } elseif ($Value -is [Double]) {
+ $Value = $Value.ToString() # To avoid Python 2 double parsing issues on test validation
+ } elseif ($Value -is [Double[]]) {
+ $Value = $Value | ForEach-Object { $_.ToString() }
+ } elseif ($Value -is [PSCredential]) {
+ $password = $null
+ $password_ptr = [System.Runtime.InteropServices.Marshal]::SecureStringToGlobalAllocUnicode($Value.Password)
+ try {
+ $password = [System.Runtime.InteropServices.Marshal]::PtrToStringUni($password_ptr)
+ } finally {
+ [System.Runtime.InteropServices.Marshal]::ZeroFreeGlobalAllocUnicode($password_ptr)
+ }
+ $Value = @{
+ username = $Value.Username
+ password = $password
+ }
+ } elseif ($Value -is [CimInstance[]]) {
+ $value_list = [System.Collections.Generic.List`1[Hashtable]]@()
+ foreach ($cim_instance in $Value) {
+ $value_list.Add((ConvertFrom-CimInstance -Instance $cim_instance))
+ }
+ $Value = $value_list.ToArray()
+ } elseif ($Value -is [CimInstance]) {
+ $Value = ConvertFrom-CimInstance -Instance $Value
+ }
+
+ return ,$Value
+}
+
+Function Get-TargetResource
+{
+ [CmdletBinding()]
+ [OutputType([Hashtable])]
+ param(
+ [Parameter(Mandatory = $true)]
+ [ValidateSet("Present", "Absent")]
+ [String] $Ensure = "Present",
+
+ [Parameter(Mandatory = $true)]
+ [ValidateNotNullOrEmpty()]
+ [String] $Path
+ )
+ return @{
+ Ensure = $Ensure
+ Path = $Path
+ }
+}
+
+Function Set-TargetResource
+{
+ [CmdletBinding()]
+ param
+ (
+ [Parameter(Mandatory = $true)]
+ [ValidateSet("Present", "Absent")]
+ [String] $Ensure = "Present",
+
+ [Parameter(Mandatory = $true)]
+ [ValidateNotNullOrEmpty()]
+ [String] $Path,
+
+ [String] $DefaultParam = "Default",
+ [String] $StringParam,
+ [String[]] $StringArrayParam,
+ [SByte] $Int8Param,
+ [SByte[]] $Int8ArrayParam,
+ [Byte] $UInt8Param,
+ [Byte[]] $UInt8ArrayParam,
+ [Int16] $Int16Param,
+ [Int16[]] $Int16ArrayParam,
+ [UInt16] $UInt16Param,
+ [UInt16[]] $UInt16ArrayParam,
+ [Int32] $Int32Param,
+ [Int32[]] $Int32ArrayParam,
+ [UInt32] $UInt32Param,
+ [UInt32[]] $UInt32ArrayParam,
+ [Int64] $Int64Param,
+ [Int64[]] $Int64ArrayParam,
+ [UInt64] $UInt64Param,
+ [UInt64[]] $UInt64ArrayParam,
+ [Bool] $BooleanParam,
+ [Bool[]] $BooleanArrayParam,
+ [Char] $CharParam,
+ [Char[]] $CharArrayParam,
+ [Single] $SingleParam,
+ [Single[]] $SingleArrayParam,
+ [Double] $DoubleParam,
+ [Double[]] $DoubleArrayParam,
+ [DateTime] $DateTimeParam,
+ [DateTime[]] $DateTimeArrayParam,
+ [PSCredential] $PSCredentialParam,
+ [CimInstance[]] $HashtableParam,
+ [CimInstance] $CimInstanceParam,
+ [CimInstance[]] $CimInstanceArrayParam,
+ [CimInstance] $NestedCimInstanceParam,
+ [CimInstance[]] $NestedCimInstanceArrayParam
+ )
+
+ $info = @{
+ Version = "1.0.0"
+ Ensure = @{
+ Type = $Ensure.GetType().FullName
+ Value = $Ensure
+ }
+ Path = @{
+ Type = $Path.GetType().FullName
+ Value = $Path
+ }
+ DefaultParam = @{
+ Type = $DefaultParam.GetType().FullName
+ Value = $DefaultParam
+ }
+ }
+
+ foreach ($kvp in $PSCmdlet.MyInvocation.BoundParameters.GetEnumerator()) {
+ $info."$($kvp.Key)" = @{
+ Type = $kvp.Value.GetType().FullName
+ Value = (ConvertTo-OutputValue -Value $kvp.Value)
+ }
+ }
+
+ if (Test-Path -Path $Path) {
+ Remove-Item -Path $Path -Force > $null
+ }
+ New-Item -Path $Path -ItemType File > $null
+ Set-Content -Path $Path -Value (ConvertTo-Json -InputObject $info -Depth 10) > $null
+ Write-Verbose -Message "set verbose"
+ Write-Warning -Message "set warning"
+}
+
+Function Test-TargetResource
+{
+ [CmdletBinding()]
+ [OutputType([Boolean])]
+ param
+ (
+ [Parameter(Mandatory = $true)]
+ [ValidateSet("Present", "Absent")]
+ [String] $Ensure = "Present",
+
+ [Parameter(Mandatory = $true)]
+ [ValidateNotNullOrEmpty()]
+ [String] $Path,
+
+ [String] $DefaultParam = "Default",
+ [String] $StringParam,
+ [String[]] $StringArrayParam,
+ [SByte] $Int8Param,
+ [SByte[]] $Int8ArrayParam,
+ [Byte] $UInt8Param,
+ [Byte[]] $UInt8ArrayParam,
+ [Int16] $Int16Param,
+ [Int16[]] $Int16ArrayParam,
+ [UInt16] $UInt16Param,
+ [UInt16[]] $UInt16ArrayParam,
+ [Int32] $Int32Param,
+ [Int32[]] $Int32ArrayParam,
+ [UInt32] $UInt32Param,
+ [UInt32[]] $UInt32ArrayParam,
+ [Int64] $Int64Param,
+ [Int64[]] $Int64ArrayParam,
+ [UInt64] $UInt64Param,
+ [UInt64[]] $UInt64ArrayParam,
+ [Bool] $BooleanParam,
+ [Bool[]] $BooleanArrayParam,
+ [Char] $CharParam,
+ [Char[]] $CharArrayParam,
+ [Single] $SingleParam,
+ [Single[]] $SingleArrayParam,
+ [Double] $DoubleParam,
+ [Double[]] $DoubleArrayParam,
+ [DateTime] $DateTimeParam,
+ [DateTime[]] $DateTimeArrayParam,
+ [PSCredential] $PSCredentialParam,
+ [CimInstance[]] $HashtableParam,
+ [CimInstance] $CimInstanceParam,
+ [CimInstance[]] $CimInstanceArrayParam,
+ [CimInstance] $NestedCimInstanceParam,
+ [CimInstance[]] $NestedCimInstanceArrayParam
+ )
+ Write-Verbose -Message "test verbose"
+ Write-Warning -Message "test warning"
+ $exists = Test-Path -LiteralPath $Path -PathType Leaf
+ if ($Ensure -eq "Present") {
+ $exists
+ } else {
+ -not $exists
+ }
+}
+
+Export-ModuleMember -Function *-TargetResource
+
diff --git a/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.schema.mof b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.schema.mof
new file mode 100644
index 00000000..c61b2b1e
--- /dev/null
+++ b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.schema.mof
@@ -0,0 +1,60 @@
+[ClassVersion("1.0.0")]
+class ANSIBLE_xTestClass
+{
+ [Key] String Key;
+ [Write] String StringValue;
+ [Write] SInt32 IntValue;
+ [Write] String StringArrayValue[];
+};
+
+[ClassVersion("1.0.0")]
+class ANSIBLE_xNestedClass
+{
+ [Key] String KeyValue;
+ [Write, EmbeddedInstance("ANSIBLE_xTestClass")] String CimValue;
+ [Write, EmbeddedInstance("MSFT_KeyValuePair")] String HashValue[];
+ [Write] SInt16 IntValue;
+};
+
+[ClassVersion("1.0.0"), FriendlyName("xTestResource")]
+class ANSIBLE_xTestResource : OMI_BaseResource
+{
+ [Key] String Path;
+ [Required, ValueMap{"Present", "Absent"}, Values{"Present", "Absent"}] String Ensure;
+ [Read] String ReadParam;
+ [Write] String DefaultParam;
+ [Write] String StringParam;
+ [Write] String StringArrayParam[];
+ [Write] SInt8 Int8Param;
+ [Write] SInt8 Int8ArrayParam[];
+ [Write] UInt8 UInt8Param;
+ [Write] UInt8 UInt8ArrayParam[];
+ [Write] SInt16 Int16Param;
+ [Write] SInt16 Int16ArrayParam[];
+ [Write] UInt16 UInt16Param;
+ [Write] UInt16 UInt16ArrayParam[];
+ [Write] SInt32 Int32Param;
+ [Write] SInt32 Int32ArrayParam[];
+ [Write] UInt32 UInt32Param;
+ [Write] UInt32 UInt32ArrayParam[];
+ [Write] SInt64 Int64Param;
+ [Write] SInt64 Int64ArrayParam[];
+ [Write] UInt64 UInt64Param;
+ [Write] UInt64 UInt64ArrayParam[];
+ [Write] Boolean BooleanParam;
+ [Write] Boolean BooleanArrayParam[];
+ [Write] Char16 CharParam;
+ [Write] Char16 CharArrayParam[];
+ [Write] Real32 SingleParam;
+ [Write] Real32 SingleArrayParam[];
+ [Write] Real64 DoubleParam;
+ [Write] Real64 DoubleArrayParam[];
+ [Write] DateTime DateTimeParam;
+ [Write] DateTime DateTimeArrayParam[];
+ [Write, EmbeddedInstance("MSFT_Credential")] String PSCredentialParam;
+ [Write, EmbeddedInstance("MSFT_KeyValuePair")] String HashtableParam[];
+ [Write, EmbeddedInstance("ANSIBLE_xTestClass")] String CimInstanceArrayParam[];
+ [Write, EmbeddedInstance("ANSIBLE_xNestedClass")] String NestedCimInstanceParam;
+ [Write, EmbeddedInstance("ANSIBLE_xNestedClass")] String NestedCimInstanceArrayParam[];
+};
+
diff --git a/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/xTestDsc.psd1 b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/xTestDsc.psd1
new file mode 100644
index 00000000..3d61611d
--- /dev/null
+++ b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/xTestDsc.psd1
@@ -0,0 +1,13 @@
+@{
+ ModuleVersion = '1.0.0'
+ GUID = '80c895c4-de3f-4d6d-8fa4-c504c96b6f22'
+ Author = 'Ansible'
+ CompanyName = 'Ansible'
+ Copyright = '(c) 2019'
+ Description = 'Test DSC Resource for Ansible integration tests'
+ PowerShellVersion = '5.0'
+ CLRVersion = '4.0'
+ FunctionsToExport = '*'
+ CmdletsToExport = '*'
+}
+
diff --git a/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.1/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.psm1 b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.1/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.psm1
new file mode 100644
index 00000000..d75256e1
--- /dev/null
+++ b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.1/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.psm1
@@ -0,0 +1,214 @@
+#Requires -Version 5.0 -Modules CimCmdlets
+
+Function ConvertFrom-CimInstance {
+ param(
+ [Parameter(Mandatory=$true)][CimInstance]$Instance
+ )
+ $hashtable = @{
+ _cim_instance = $Instance.CimSystemProperties.ClassName
+ }
+ foreach ($prop in $Instance.CimInstanceProperties) {
+ $hashtable."$($prop.Name)" = ConvertTo-OutputValue -Value $prop.Value
+ }
+ return $hashtable
+}
+
+Function ConvertTo-OutputValue {
+ param($Value)
+
+ if ($Value -is [DateTime[]]) {
+ $Value = $Value | ForEach-Object { $_.ToString("o") }
+ } elseif ($Value -is [DateTime]) {
+ $Value = $Value.ToString("o")
+ } elseif ($Value -is [Double]) {
+ $Value = $Value.ToString() # To avoid Python 2 double parsing issues on test validation
+ } elseif ($Value -is [Double[]]) {
+ $Value = $Value | ForEach-Object { $_.ToString() }
+ } elseif ($Value -is [PSCredential]) {
+ $password = $null
+ $password_ptr = [System.Runtime.InteropServices.Marshal]::SecureStringToGlobalAllocUnicode($Value.Password)
+ try {
+ $password = [System.Runtime.InteropServices.Marshal]::PtrToStringUni($password_ptr)
+ } finally {
+ [System.Runtime.InteropServices.Marshal]::ZeroFreeGlobalAllocUnicode($password_ptr)
+ }
+ $Value = @{
+ username = $Value.Username
+ password = $password
+ }
+ } elseif ($Value -is [CimInstance[]]) {
+ $value_list = [System.Collections.Generic.List`1[Hashtable]]@()
+ foreach ($cim_instance in $Value) {
+ $value_list.Add((ConvertFrom-CimInstance -Instance $cim_instance))
+ }
+ $Value = $value_list.ToArray()
+ } elseif ($Value -is [CimInstance]) {
+ $Value = ConvertFrom-CimInstance -Instance $Value
+ }
+
+ return ,$Value
+}
+
+Function Get-TargetResource
+{
+ [CmdletBinding()]
+ [OutputType([Hashtable])]
+ param(
+ [Parameter(Mandatory = $true)]
+ [ValidateSet("Present", "Absent")]
+ [String] $Ensure = "Present",
+
+ [Parameter(Mandatory = $true)]
+ [ValidateNotNullOrEmpty()]
+ [String] $Path
+ )
+ return @{
+ Ensure = $Ensure
+ Path = $Path
+ }
+}
+
+Function Set-TargetResource
+{
+ [CmdletBinding()]
+ param
+ (
+ [Parameter(Mandatory = $true)]
+ [ValidateSet("Present", "Absent")]
+ [String] $Ensure = "Present",
+
+ [Parameter(Mandatory = $true)]
+ [ValidateNotNullOrEmpty()]
+ [String] $Path,
+
+ [String] $DefaultParam = "Default",
+ [String] $StringParam,
+ [String[]] $StringArrayParam,
+ [SByte] $Int8Param,
+ [SByte[]] $Int8ArrayParam,
+ [Byte] $UInt8Param,
+ [Byte[]] $UInt8ArrayParam,
+ [Int16] $Int16Param,
+ [Int16[]] $Int16ArrayParam,
+ [UInt16] $UInt16Param,
+ [UInt16[]] $UInt16ArrayParam,
+ [Int32] $Int32Param,
+ [Int32[]] $Int32ArrayParam,
+ [UInt32] $UInt32Param,
+ [UInt32[]] $UInt32ArrayParam,
+ [Int64] $Int64Param,
+ [Int64[]] $Int64ArrayParam,
+ [UInt64] $UInt64Param,
+ [UInt64[]] $UInt64ArrayParam,
+ [Bool] $BooleanParam,
+ [Bool[]] $BooleanArrayParam,
+ [Char] $CharParam,
+ [Char[]] $CharArrayParam,
+ [Single] $SingleParam,
+ [Single[]] $SingleArrayParam,
+ [Double] $DoubleParam,
+ [Double[]] $DoubleArrayParam,
+ [DateTime] $DateTimeParam,
+ [DateTime[]] $DateTimeArrayParam,
+ [PSCredential] $PSCredentialParam,
+ [CimInstance[]] $HashtableParam,
+ [CimInstance] $CimInstanceParam,
+ [CimInstance[]] $CimInstanceArrayParam,
+ [CimInstance] $NestedCimInstanceParam,
+ [CimInstance[]] $NestedCimInstanceArrayParam
+ )
+
+ $info = @{
+ Version = "1.0.1"
+ Ensure = @{
+ Type = $Ensure.GetType().FullName
+ Value = $Ensure
+ }
+ Path = @{
+ Type = $Path.GetType().FullName
+ Value = $Path
+ }
+ DefaultParam = @{
+ Type = $DefaultParam.GetType().FullName
+ Value = $DefaultParam
+ }
+ }
+
+ foreach ($kvp in $PSCmdlet.MyInvocation.BoundParameters.GetEnumerator()) {
+ $info."$($kvp.Key)" = @{
+ Type = $kvp.Value.GetType().FullName
+ Value = (ConvertTo-OutputValue -Value $kvp.Value)
+ }
+ }
+
+ if (Test-Path -Path $Path) {
+ Remove-Item -Path $Path -Force > $null
+ }
+ New-Item -Path $Path -ItemType File > $null
+ Set-Content -Path $Path -Value (ConvertTo-Json -InputObject $info -Depth 10) > $null
+ Write-Verbose -Message "set verbose"
+ Write-Warning -Message "set warning"
+}
+
+Function Test-TargetResource
+{
+ [CmdletBinding()]
+ [OutputType([Boolean])]
+ param
+ (
+ [Parameter(Mandatory = $true)]
+ [ValidateSet("Present", "Absent")]
+ [String] $Ensure = "Present",
+
+ [Parameter(Mandatory = $true)]
+ [ValidateNotNullOrEmpty()]
+ [String] $Path,
+
+ [String] $DefaultParam = "Default",
+ [String] $StringParam,
+ [String[]] $StringArrayParam,
+ [SByte] $Int8Param,
+ [SByte[]] $Int8ArrayParam,
+ [Byte] $UInt8Param,
+ [Byte[]] $UInt8ArrayParam,
+ [Int16] $Int16Param,
+ [Int16[]] $Int16ArrayParam,
+ [UInt16] $UInt16Param,
+ [UInt16[]] $UInt16ArrayParam,
+ [Int32] $Int32Param,
+ [Int32[]] $Int32ArrayParam,
+ [UInt32] $UInt32Param,
+ [UInt32[]] $UInt32ArrayParam,
+ [Int64] $Int64Param,
+ [Int64[]] $Int64ArrayParam,
+ [UInt64] $UInt64Param,
+ [UInt64[]] $UInt64ArrayParam,
+ [Bool] $BooleanParam,
+ [Bool[]] $BooleanArrayParam,
+ [Char] $CharParam,
+ [Char[]] $CharArrayParam,
+ [Single] $SingleParam,
+ [Single[]] $SingleArrayParam,
+ [Double] $DoubleParam,
+ [Double[]] $DoubleArrayParam,
+ [DateTime] $DateTimeParam,
+ [DateTime[]] $DateTimeArrayParam,
+ [PSCredential] $PSCredentialParam,
+ [CimInstance[]] $HashtableParam,
+ [CimInstance] $CimInstanceParam,
+ [CimInstance[]] $CimInstanceArrayParam,
+ [CimInstance] $NestedCimInstanceParam,
+ [CimInstance[]] $NestedCimInstanceArrayParam
+ )
+ Write-Verbose -Message "test verbose"
+ Write-Warning -Message "test warning"
+ $exists = Test-Path -LiteralPath $Path -PathType Leaf
+ if ($Ensure -eq "Present") {
+ $exists
+ } else {
+ -not $exists
+ }
+}
+
+Export-ModuleMember -Function *-TargetResource
+
diff --git a/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.1/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.schema.mof b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.1/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.schema.mof
new file mode 100644
index 00000000..9301664b
--- /dev/null
+++ b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.1/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.schema.mof
@@ -0,0 +1,63 @@
+[ClassVersion("1.0.1")]
+class ANSIBLE_xTestClass
+{
+ [Key] String KeyValue;
+ [Write, ValueMap{"Choice1", "Choice2"}, Values{"Choice1", "Choice2"}] String Choice;
+ [Write] String StringValue;
+ [Write] SInt32 IntValue;
+ [Write] String StringArrayValue[];
+};
+
+[ClassVersion("1.0.1")]
+class ANSIBLE_xNestedClass
+{
+ [Key] String KeyValue;
+ [Write, EmbeddedInstance("ANSIBLE_xTestClass")] String CimValue;
+ [Write, EmbeddedInstance("ANSIBLE_xTestClass")] String CimArrayValue[];
+ [Write, EmbeddedInstance("MSFT_KeyValuePair")] String HashValue[];
+ [Write] SInt16 IntValue;
+};
+
+[ClassVersion("1.0.1"), FriendlyName("xTestResource")]
+class ANSIBLE_xTestResource : OMI_BaseResource
+{
+ [Key] String Path;
+ [Required, ValueMap{"Present", "Absent"}, Values{"Present", "Absent"}] String Ensure;
+ [Read] String ReadParam;
+ [Write] String DefaultParam;
+ [Write] String StringParam;
+ [Write] String StringArrayParam[];
+ [Write] SInt8 Int8Param;
+ [Write] SInt8 Int8ArrayParam[];
+ [Write] UInt8 UInt8Param;
+ [Write] UInt8 UInt8ArrayParam[];
+ [Write] SInt16 Int16Param;
+ [Write] SInt16 Int16ArrayParam[];
+ [Write] UInt16 UInt16Param;
+ [Write] UInt16 UInt16ArrayParam[];
+ [Write] SInt32 Int32Param;
+ [Write] SInt32 Int32ArrayParam[];
+ [Write] UInt32 UInt32Param;
+ [Write] UInt32 UInt32ArrayParam[];
+ [Write] SInt64 Int64Param;
+ [Write] SInt64 Int64ArrayParam[];
+ [Write] UInt64 UInt64Param;
+ [Write] UInt64 UInt64ArrayParam[];
+ [Write] Boolean BooleanParam;
+ [Write] Boolean BooleanArrayParam[];
+ [Write] Char16 CharParam;
+ [Write] Char16 CharArrayParam[];
+ [Write] Real32 SingleParam;
+ [Write] Real32 SingleArrayParam[];
+ [Write] Real64 DoubleParam;
+ [Write] Real64 DoubleArrayParam[];
+ [Write] DateTime DateTimeParam;
+ [Write] DateTime DateTimeArrayParam[];
+ [Write, EmbeddedInstance("MSFT_Credential")] String PSCredentialParam;
+ [Write, EmbeddedInstance("MSFT_KeyValuePair")] String HashtableParam[];
+ [Write, EmbeddedInstance("ANSIBLE_xTestClass")] String CimInstanceParam;
+ [Write, EmbeddedInstance("ANSIBLE_xTestClass")] String CimInstanceArrayParam[];
+ [Write, EmbeddedInstance("ANSIBLE_xNestedClass")] String NestedCimInstanceParam;
+ [Write, EmbeddedInstance("ANSIBLE_xNestedClass")] String NestedCimInstanceArrayParam[];
+};
+
diff --git a/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.1/xTestDsc.psd1 b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.1/xTestDsc.psd1
new file mode 100644
index 00000000..0c43b852
--- /dev/null
+++ b/test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.1/xTestDsc.psd1
@@ -0,0 +1,13 @@
+@{
+ ModuleVersion = '1.0.1'
+ GUID = '80c895c4-de3f-4d6d-8fa4-c504c96b6f22'
+ Author = 'Ansible'
+ CompanyName = 'Ansible'
+ Copyright = '(c) 2019'
+ Description = 'Test DSC Resource for Ansible integration tests'
+ PowerShellVersion = '5.0'
+ CLRVersion = '4.0'
+ FunctionsToExport = '*'
+ CmdletsToExport = '*'
+}
+
diff --git a/test/integration/targets/incidental_win_dsc/meta/main.yml b/test/integration/targets/incidental_win_dsc/meta/main.yml
new file mode 100644
index 00000000..9f37e96c
--- /dev/null
+++ b/test/integration/targets/incidental_win_dsc/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+- setup_remote_tmp_dir
diff --git a/test/integration/targets/incidental_win_dsc/tasks/main.yml b/test/integration/targets/incidental_win_dsc/tasks/main.yml
new file mode 100644
index 00000000..f37295ab
--- /dev/null
+++ b/test/integration/targets/incidental_win_dsc/tasks/main.yml
@@ -0,0 +1,39 @@
+---
+- name: get powershell version
+ win_shell: $PSVersionTable.PSVersion.Major
+ register: powershell_version
+
+- name: expect failure when running on old PS hosts
+ win_dsc:
+ resource_name: File
+ register: fail_dsc_old
+ failed_when: '"This module cannot run as it requires a minimum PowerShell version of 5.0" not in fail_dsc_old.msg'
+ when: powershell_version.stdout_lines[0]|int < 5
+
+- name: run tests when PSv5+
+ when: powershell_version.stdout_lines[0]|int >= 5
+ block:
+ - name: add remote temp dir to PSModulePath
+ win_path:
+ name: PSModulePath
+ state: present
+ scope: machine
+ elements:
+ - '{{ remote_tmp_dir }}'
+
+ - name: copy custom DSC resources to remote temp dir
+ win_copy:
+ src: xTestDsc
+ dest: '{{ remote_tmp_dir }}'
+
+ - name: run tests
+ include_tasks: tests.yml
+
+ always:
+ - name: remove remote tmp dir from PSModulePath
+ win_path:
+ name: PSModulePath
+ state: absent
+ scope: machine
+ elements:
+ - '{{ remote_tmp_dir }}'
diff --git a/test/integration/targets/incidental_win_dsc/tasks/tests.yml b/test/integration/targets/incidental_win_dsc/tasks/tests.yml
new file mode 100644
index 00000000..d2a6802f
--- /dev/null
+++ b/test/integration/targets/incidental_win_dsc/tasks/tests.yml
@@ -0,0 +1,544 @@
+---
+- name: fail with incorrect DSC resource name
+ win_dsc:
+ resource_name: FakeResource
+ register: fail_invalid_resource
+ failed_when: fail_invalid_resource.msg != "Resource 'FakeResource' not found."
+
+- name: fail with invalid DSC version
+ win_dsc:
+ resource_name: xTestResource
+ module_version: 0.0.1
+ register: fail_invalid_version
+ failed_when: 'fail_invalid_version.msg != "Resource ''xTestResource'' with version ''0.0.1'' not found. Versions installed: ''1.0.0'', ''1.0.1''."'
+
+- name: fail with mandatory option not set
+ win_dsc:
+ resource_name: xSetReboot
+ Value: yes
+ register: fail_man_key
+ failed_when: 'fail_man_key.msg != "missing required arguments: KeyParam"'
+
+- name: fail with mandatory option not set in sub dict
+ win_dsc:
+ resource_name: xTestResource
+ Path: C:\path
+ Ensure: Present
+ CimInstanceParam: # Missing KeyValue in dict
+ Choice: Choice1
+ register: fail_man_key_sub_dict
+ failed_when: 'fail_man_key_sub_dict.msg != "missing required arguments: KeyValue found in CimInstanceParam"'
+
+- name: fail invalid option
+ win_dsc:
+ resource_name: xSetReboot
+ KeyParam: key
+ OtherParam: invalid
+ register: fail_invalid_option
+ failed_when: 'fail_invalid_option.msg != "Unsupported parameters for (win_dsc) module: OtherParam. Supported parameters include: KeyParam, PsDscRunAsCredential_username, module_version, Value, PsDscRunAsCredential_password, resource_name, DependsOn"'
+
+- name: fail invalid option in sub dict
+ win_dsc:
+ resource_name: xTestResource
+ Path: C:\path
+ Ensure: Present
+ NestedCimInstanceParam:
+ KeyValue: key
+ CimValue:
+ KeyValue: other key
+ InvalidKey: invalid
+ register: fail_invalid_option_sub_dict
+ failed_when: 'fail_invalid_option_sub_dict.msg != "Unsupported parameters for (win_dsc) module: InvalidKey found in NestedCimInstanceParam -> CimValue. Supported parameters include: IntValue, KeyValue, StringArrayValue, Choice, StringValue"'
+
+- name: fail invalid read only option
+ win_dsc:
+ resource_name: xTestResource
+ Path: C:\path
+ Ensure: Present
+ ReadParam: abc
+ register: fail_invalid_option_read_only
+ failed_when: '"Unsupported parameters for (win_dsc) module: ReadParam" not in fail_invalid_option_read_only.msg'
+
+- name: fail invalid choice
+ win_dsc:
+ resource_name: xTestResource
+ Path: C:\path
+ Ensure: invalid
+ register: fail_invalid_choice
+ failed_when: 'fail_invalid_choice.msg != "value of Ensure must be one of: Present, Absent. Got no match for: invalid"'
+
+- name: fail invalid choice in sub dict
+ win_dsc:
+ resource_name: xTestResource
+ Path: C:\path
+ Ensure: Present
+ CimInstanceArrayParam:
+ - KeyValue: key
+ - KeyValue: key2
+ Choice: Choice3
+ register: fail_invalid_choice_sub_dict
+ failed_when: 'fail_invalid_choice_sub_dict.msg != "value of Choice must be one of: Choice1, Choice2. Got no match for: Choice3 found in CimInstanceArrayParam"'
+
+- name: fail old version missing new option
+ win_dsc:
+ resource_name: xTestResource
+ module_version: 1.0.0
+ Path: C:\path
+ Ensure: Present
+ CimInstanceParam: # CimInstanceParam does not exist in the 1.0.0 version
+ Key: key
+ register: fail_invalid_option_old
+ failed_when: '"Unsupported parameters for (win_dsc) module: CimInstanceParam" not in fail_invalid_option_old.msg'
+
+- name: fail old version missing new option sub dict
+ win_dsc:
+ resource_name: xTestResource
+ module_version: 1.0.0
+ Path: C:\path
+ Ensure: Present
+ CimInstanceArrayParam:
+ - Key: key
+ Choice: Choice1
+ register: fail_invalid_option_old_sub_dict
+ failed_when: 'fail_invalid_option_old_sub_dict.msg != "Unsupported parameters for (win_dsc) module: Choice found in CimInstanceArrayParam. Supported parameters include: Key, IntValue, StringArrayValue, StringValue"'
+
+- name: create test file (check mode)
+ win_dsc:
+ resource_name: File
+ DestinationPath: '{{ remote_tmp_dir }}\dsc-file'
+ Contents: file contents
+ Attributes:
+ - Hidden
+ - ReadOnly
+ Ensure: Present
+ Type: File
+ register: create_file_check
+ check_mode: yes
+
+- name: get result of create test file (check mode)
+ win_stat:
+ path: '{{ remote_tmp_dir }}\dsc-file'
+ register: create_file_actual_check
+
+- name: assert create test file (check mode)
+ assert:
+ that:
+ - create_file_check is changed
+ - create_file_check.module_version == None # Some built in modules don't have a version set
+ - not create_file_check.reboot_required
+ - not create_file_actual_check.stat.exists
+
+- name: assert create test file verbosity (check mode)
+ assert:
+ that:
+ - create_file_check.verbose_test is defined
+ - not create_file_check.verbose_set is defined
+ when: ansible_verbosity >= 3
+
+- name: create test file
+ win_dsc:
+ resource_name: File
+ DestinationPath: '{{ remote_tmp_dir }}\dsc-file'
+ Contents: file contents
+ Attributes:
+ - Hidden
+ - ReadOnly
+ Ensure: Present
+ Type: File
+ register: create_file
+
+- name: get result of create test file
+ win_stat:
+ path: '{{ remote_tmp_dir }}\dsc-file'
+ register: create_file_actual
+
+- name: assert create test file verbosity
+ assert:
+ that:
+ - create_file.verbose_test is defined
+ - create_file.verbose_set is defined
+ when: ansible_verbosity >= 3
+
+- name: assert create test file
+ assert:
+ that:
+ - create_file is changed
+ - create_file.module_version == None
+ - not create_file.reboot_required
+ - create_file_actual.stat.exists
+ - create_file_actual.stat.attributes == "ReadOnly, Hidden, Archive"
+ - create_file_actual.stat.checksum == 'd48daab51112b49ecabd917adc345b8ba257055e'
+
+- name: create test file (idempotent)
+ win_dsc:
+ resource_name: File
+ DestinationPath: '{{ remote_tmp_dir }}\dsc-file'
+ Contents: file contents
+ Attributes:
+ - Hidden
+ - ReadOnly
+ Ensure: Present
+ Type: File
+ register: create_file_again
+
+- name: assert create test file (idempotent)
+ assert:
+ that:
+ - not create_file_again is changed
+ - create_file.module_version == None
+ - not create_file.reboot_required
+
+- name: get SID of the current Ansible user
+ win_shell: |
+ Add-Type -AssemblyName System.DirectoryServices.AccountManagement
+ [System.DirectoryServices.AccountManagement.UserPrincipal]::Current.Sid.Value
+ register: actual_sid
+
+- name: run DSC process as another user
+ win_dsc:
+ resource_name: Script
+ GetScript: '@{ Result= "" }'
+ SetScript: |
+ Add-Type -AssemblyName System.DirectoryServices.AccountManagement
+ $sid = [System.DirectoryServices.AccountManagement.UserPrincipal]::Current.Sid.Value
+ Set-Content -Path "{{ remote_tmp_dir }}\runas.txt" -Value $sid
+ TestScript: $false
+ PsDscRunAsCredential_username: '{{ ansible_user }}'
+ PsDscRunAsCredential_password: '{{ ansible_password }}'
+ register: runas_user
+
+- name: get result of run DSC process as another user
+ slurp:
+ path: '{{ remote_tmp_dir }}\runas.txt'
+ register: runas_user_result
+
+- name: assert run DSC process as another user
+ assert:
+ that:
+ - runas_user is changed
+ - runas_user.module_version != None # Can't reliably set the version but we can test it is set
+ - not runas_user.reboot_required
+ - runas_user_result.content|b64decode == actual_sid.stdout
+
+- name: run DSC that sets reboot_required with defaults
+ win_dsc:
+ resource_name: xSetReboot
+ KeyParam: value # Just to satisfy the Resource with key validation
+ register: set_reboot_defaults
+
+- name: assert run DSC that sets reboot_required with defaults
+ assert:
+ that:
+ - set_reboot_defaults.reboot_required
+
+- name: run DSC that sets reboot_required with False
+ win_dsc:
+ resource_name: xSetReboot
+ KeyParam: value
+ Value: no
+ register: set_reboot_false
+
+- name: assert run DSC that sets reboot_required with False
+ assert:
+ that:
+ - not set_reboot_false.reboot_required
+
+- name: run DSC that sets reboot_required with True
+ win_dsc:
+ resource_name: xSetReboot
+ KeyParam: value
+ Value: yes
+ register: set_reboot_true
+
+- name: assert run DSC that sets reboot_required with True
+ assert:
+ that:
+ - set_reboot_true.reboot_required
+
+- name: test DSC with all types
+ win_dsc:
+ resource_name: xTestResource
+ Path: '{{ remote_tmp_dir }}\test-types.json'
+ Ensure: Present
+ StringParam: string param
+ StringArrayParam:
+ - string 1
+ - string 2
+ Int8Param: 127 # [SByte]::MaxValue
+ Int8ArrayParam:
+ - 127
+ - '127'
+ UInt8Param: 255 # [Byte]::MaxValue
+ UInt8ArrayParam:
+ - 255
+ - '255'
+ Int16Param: 32767 # [Int16]::MaxValue
+ Int16ArrayParam: 32767, 32767
+ UInt16Param: '65535' # [UInt16]::MaxValue
+ UInt16ArrayParam: 65535
+ Int32Param: 2147483647 # [Int32]::MaxValue
+ Int32ArrayParam: '2147483647'
+ UInt32Param: '4294967295' # [UInt32]::MaxValue
+ UInt32ArrayParam:
+ - '4294967295'
+ - 4294967295
+ Int64Param: 9223372036854775807 # [Int64]::MaxValue
+ Int64ArrayParam:
+ - -9223372036854775808 # [Int64]::MinValue
+ - 9223372036854775807
+ UInt64Param: 18446744073709551615 # [UInt64]::MaxValue
+ UInt64ArrayParam:
+ - 0 # [UInt64]::MinValue
+ - 18446744073709551615
+ BooleanParam: True
+ BooleanArrayParam:
+ - True
+ - 'True'
+ - 'true'
+ - 'y'
+ - 'yes'
+ - 1
+ - False
+ - 'False'
+ - 'false'
+ - 'n'
+ - 'no'
+ - 0
+ CharParam: c
+ CharArrayParam:
+ - c
+ - h
+ - a
+ - r
+ SingleParam: 3.402823E+38
+ SingleArrayParam:
+ - '3.402823E+38'
+ - 1.2393494
+ DoubleParam: 1.79769313486232E+300
+ DoubleArrayParam:
+ - '1.79769313486232E+300'
+ - 3.56821831681516
+ DateTimeParam: '2019-02-22T13:57:31.2311892-04:00'
+ DateTimeArrayParam:
+ - '2019-02-22T13:57:31.2311892+00:00'
+ - '2019-02-22T13:57:31.2311892+04:00'
+ PSCredentialParam_username: username1
+ PSCredentialParam_password: password1
+ HashtableParam:
+ key1: string 1
+ key2: ''
+ key3: 1
+ CimInstanceParam:
+ KeyValue: a
+ CimInstanceArrayParam:
+ - KeyValue: b
+ Choice: Choice1
+ StringValue: string 1
+ IntValue: 1
+ StringArrayValue:
+ - abc
+ - def
+ - KeyValue: c
+ Choice: Choice2
+ StringValue: string 2
+ IntValue: '2'
+ StringArrayValue:
+ - ghi
+ - jkl
+ NestedCimInstanceParam:
+ KeyValue: key value
+ CimValue:
+ KeyValue: d
+ CimArrayValue:
+ - KeyValue: e
+ Choice: Choice2
+ HashValue:
+ a: a
+ IntValue: '300'
+ register: dsc_types
+
+- name: get result of test DSC with all types
+ slurp:
+ path: '{{ remote_tmp_dir }}\test-types.json'
+ register: dsc_types_raw
+
+- name: convert result of test DSC with all types to dict
+ set_fact:
+ dsc_types_actual: '{{ dsc_types_raw.content | b64decode | from_json }}'
+
+- name: assert test DSC with all types
+ assert:
+ that:
+ - dsc_types is changed
+ - dsc_types.module_version == '1.0.1'
+ - not dsc_types.reboot_required
+ - dsc_types_actual.Version == '1.0.1'
+ - dsc_types_actual.Verbose.Value.IsPresent
+ - dsc_types_actual.DefaultParam.Value == 'Default' # ensures that the default is set in the engine if we don't set it outselves
+ - dsc_types_actual.Ensure.Value == 'Present'
+ - dsc_types_actual.Path.Value == remote_tmp_dir + "\\test-types.json"
+ - dsc_types_actual.StringParam.Type == 'System.String'
+ - dsc_types_actual.StringParam.Value == 'string param'
+ - dsc_types_actual.StringArrayParam.Type == 'System.String[]'
+ - dsc_types_actual.StringArrayParam.Value == ['string 1', 'string 2']
+ - dsc_types_actual.Int8Param.Type == 'System.SByte'
+ - dsc_types_actual.Int8Param.Value == 127
+ - dsc_types_actual.Int8ArrayParam.Type == 'System.SByte[]'
+ - dsc_types_actual.Int8ArrayParam.Value == [127, 127]
+ - dsc_types_actual.UInt8Param.Type == 'System.Byte'
+ - dsc_types_actual.UInt8Param.Value == 255
+ - dsc_types_actual.UInt8ArrayParam.Type == 'System.Byte[]'
+ - dsc_types_actual.UInt8ArrayParam.Value == [255, 255]
+ - dsc_types_actual.Int16Param.Type == 'System.Int16'
+ - dsc_types_actual.Int16Param.Value == 32767
+ - dsc_types_actual.Int16ArrayParam.Type == 'System.Int16[]'
+ - dsc_types_actual.Int16ArrayParam.Value == [32767, 32767]
+ - dsc_types_actual.UInt16Param.Type == 'System.UInt16'
+ - dsc_types_actual.UInt16Param.Value == 65535
+ - dsc_types_actual.UInt16ArrayParam.Type == 'System.UInt16[]'
+ - dsc_types_actual.UInt16ArrayParam.Value == [65535]
+ - dsc_types_actual.Int32Param.Type == 'System.Int32'
+ - dsc_types_actual.Int32Param.Value == 2147483647
+ - dsc_types_actual.Int32ArrayParam.Type == 'System.Int32[]'
+ - dsc_types_actual.Int32ArrayParam.Value == [2147483647]
+ - dsc_types_actual.UInt32Param.Type == 'System.UInt32'
+ - dsc_types_actual.UInt32Param.Value == 4294967295
+ - dsc_types_actual.UInt32ArrayParam.Type == 'System.UInt32[]'
+ - dsc_types_actual.UInt32ArrayParam.Value == [4294967295, 4294967295]
+ - dsc_types_actual.Int64Param.Type == 'System.Int64'
+ - dsc_types_actual.Int64Param.Value == 9223372036854775807
+ - dsc_types_actual.Int64ArrayParam.Type == 'System.Int64[]'
+ - dsc_types_actual.Int64ArrayParam.Value == [-9223372036854775808, 9223372036854775807]
+ - dsc_types_actual.UInt64Param.Type == 'System.UInt64'
+ - dsc_types_actual.UInt64Param.Value == 18446744073709551615
+ - dsc_types_actual.UInt64ArrayParam.Type == 'System.UInt64[]'
+ - dsc_types_actual.UInt64ArrayParam.Value == [0, 18446744073709551615]
+ - dsc_types_actual.BooleanParam.Type == 'System.Boolean'
+ - dsc_types_actual.BooleanParam.Value == True
+ - dsc_types_actual.BooleanArrayParam.Type == 'System.Boolean[]'
+ - dsc_types_actual.BooleanArrayParam.Value == [True, True, True, True, True, True, False, False, False, False, False, False]
+ - dsc_types_actual.CharParam.Type == 'System.Char'
+ - dsc_types_actual.CharParam.Value == 'c'
+ - dsc_types_actual.CharArrayParam.Type == 'System.Char[]'
+ - dsc_types_actual.CharArrayParam.Value == ['c', 'h', 'a', 'r']
+ - dsc_types_actual.SingleParam.Type == 'System.Single'
+ - dsc_types_actual.SingleParam.Value|string == '3.402823e+38'
+ - dsc_types_actual.SingleArrayParam.Type == 'System.Single[]'
+ - dsc_types_actual.SingleArrayParam.Value|length == 2
+ - dsc_types_actual.SingleArrayParam.Value[0]|string == '3.402823e+38'
+ - dsc_types_actual.SingleArrayParam.Value[1]|string == '1.23934937'
+ - dsc_types_actual.DoubleParam.Type == 'System.Double'
+ - dsc_types_actual.DoubleParam.Value == '1.79769313486232E+300'
+ - dsc_types_actual.DoubleArrayParam.Type == 'System.Double[]'
+ - dsc_types_actual.DoubleArrayParam.Value|length == 2
+ - dsc_types_actual.DoubleArrayParam.Value[0] == '1.79769313486232E+300'
+ - dsc_types_actual.DoubleArrayParam.Value[1] == '3.56821831681516'
+ - dsc_types_actual.DateTimeParam.Type == 'System.DateTime'
+ - dsc_types_actual.DateTimeParam.Value == '2019-02-22T17:57:31.2311890+00:00'
+ - dsc_types_actual.DateTimeArrayParam.Type == 'System.DateTime[]'
+ - dsc_types_actual.DateTimeArrayParam.Value == ['2019-02-22T13:57:31.2311890+00:00', '2019-02-22T09:57:31.2311890+00:00']
+ - dsc_types_actual.PSCredentialParam.Type == 'System.Management.Automation.PSCredential'
+ - dsc_types_actual.PSCredentialParam.Value.username == 'username1'
+ - dsc_types_actual.PSCredentialParam.Value.password == 'password1'
+ # Hashtable is actually a CimInstance[] of MSFT_KeyValuePairs
+ - dsc_types_actual.HashtableParam.Type == 'Microsoft.Management.Infrastructure.CimInstance[]'
+ - dsc_types_actual.HashtableParam.Value|length == 3
+ # Can't guarantee the order of the keys so just check they are the values they could be
+ - dsc_types_actual.HashtableParam.Value[0].Key in ["key1", "key2", "key3"]
+ - dsc_types_actual.HashtableParam.Value[0].Value in ["string 1", "1", ""]
+ - dsc_types_actual.HashtableParam.Value[0]._cim_instance == 'MSFT_KeyValuePair'
+ - dsc_types_actual.HashtableParam.Value[1].Key in ["key1", "key2", "key3"]
+ - dsc_types_actual.HashtableParam.Value[1].Value in ["string 1", "1", ""]
+ - dsc_types_actual.HashtableParam.Value[1]._cim_instance == 'MSFT_KeyValuePair'
+ - dsc_types_actual.HashtableParam.Value[2].Key in ["key1", "key2", "key3"]
+ - dsc_types_actual.HashtableParam.Value[2].Value in ["string 1", "1", ""]
+ - dsc_types_actual.HashtableParam.Value[2]._cim_instance == 'MSFT_KeyValuePair'
+ - dsc_types_actual.CimInstanceParam.Type == 'Microsoft.Management.Infrastructure.CimInstance'
+ - dsc_types_actual.CimInstanceParam.Value.Choice == None
+ - dsc_types_actual.CimInstanceParam.Value.IntValue == None
+ - dsc_types_actual.CimInstanceParam.Value.KeyValue == 'a'
+ - dsc_types_actual.CimInstanceParam.Value.StringArrayValue == None
+ - dsc_types_actual.CimInstanceParam.Value.StringValue == None
+ - dsc_types_actual.CimInstanceParam.Value._cim_instance == "ANSIBLE_xTestClass"
+ - dsc_types_actual.CimInstanceArrayParam.Type == 'Microsoft.Management.Infrastructure.CimInstance[]'
+ - dsc_types_actual.CimInstanceArrayParam.Value|length == 2
+ - dsc_types_actual.CimInstanceArrayParam.Value[0].Choice == 'Choice1'
+ - dsc_types_actual.CimInstanceArrayParam.Value[0].IntValue == 1
+ - dsc_types_actual.CimInstanceArrayParam.Value[0].KeyValue == 'b'
+ - dsc_types_actual.CimInstanceArrayParam.Value[0].StringArrayValue == ['abc', 'def']
+ - dsc_types_actual.CimInstanceArrayParam.Value[0].StringValue == 'string 1'
+ - dsc_types_actual.CimInstanceArrayParam.Value[0]._cim_instance == 'ANSIBLE_xTestClass'
+ - dsc_types_actual.CimInstanceArrayParam.Value[1].Choice == 'Choice2'
+ - dsc_types_actual.CimInstanceArrayParam.Value[1].IntValue == 2
+ - dsc_types_actual.CimInstanceArrayParam.Value[1].KeyValue == 'c'
+ - dsc_types_actual.CimInstanceArrayParam.Value[1].StringArrayValue == ['ghi', 'jkl']
+ - dsc_types_actual.CimInstanceArrayParam.Value[1].StringValue == 'string 2'
+ - dsc_types_actual.CimInstanceArrayParam.Value[1]._cim_instance == 'ANSIBLE_xTestClass'
+ - dsc_types_actual.NestedCimInstanceParam.Type == 'Microsoft.Management.Infrastructure.CimInstance'
+ - dsc_types_actual.NestedCimInstanceParam.Value.CimArrayValue|length == 1
+ - dsc_types_actual.NestedCimInstanceParam.Value.CimArrayValue[0].Choice == 'Choice2'
+ - dsc_types_actual.NestedCimInstanceParam.Value.CimArrayValue[0].IntValue == None
+ - dsc_types_actual.NestedCimInstanceParam.Value.CimArrayValue[0].KeyValue == 'e'
+ - dsc_types_actual.NestedCimInstanceParam.Value.CimArrayValue[0].StringArrayValue == None
+ - dsc_types_actual.NestedCimInstanceParam.Value.CimArrayValue[0].StringValue == None
+ - dsc_types_actual.NestedCimInstanceParam.Value.CimArrayValue[0]._cim_instance == 'ANSIBLE_xTestClass'
+ - dsc_types_actual.NestedCimInstanceParam.Value.CimValue.Choice == None
+ - dsc_types_actual.NestedCimInstanceParam.Value.CimValue.IntValue == None
+ - dsc_types_actual.NestedCimInstanceParam.Value.CimValue.KeyValue == 'd'
+ - dsc_types_actual.NestedCimInstanceParam.Value.CimValue.StringArrayValue == None
+ - dsc_types_actual.NestedCimInstanceParam.Value.CimValue.StringValue == None
+ - dsc_types_actual.NestedCimInstanceParam.Value.CimValue._cim_instance == 'ANSIBLE_xTestClass'
+ - dsc_types_actual.NestedCimInstanceParam.Value.HashValue|length == 1
+ - dsc_types_actual.NestedCimInstanceParam.Value.HashValue[0].Key == 'a'
+ - dsc_types_actual.NestedCimInstanceParam.Value.HashValue[0].Value == 'a'
+ - dsc_types_actual.NestedCimInstanceParam.Value.HashValue[0]._cim_instance == 'MSFT_KeyValuePair'
+ - dsc_types_actual.NestedCimInstanceParam.Value.IntValue == 300
+ - dsc_types_actual.NestedCimInstanceParam.Value.KeyValue == 'key value'
+ - dsc_types_actual.NestedCimInstanceParam.Value._cim_instance == 'ANSIBLE_xNestedClass'
+
+- name: test DSC with all types older version
+ win_dsc:
+ resource_name: xTestResource
+ module_version: 1.0.0
+ Path: '{{ remote_tmp_dir }}\test-types.json'
+ Ensure: Absent
+ StringParam: string param old
+ CimInstanceArrayParam:
+ - Key: old key
+ StringValue: string old 1
+ IntValue: 0
+ StringArrayValue:
+ - zyx
+ - wvu
+ register: dsc_types_old
+
+- name: get result of test DSC with all types older version
+ slurp:
+ path: '{{ remote_tmp_dir }}\test-types.json'
+ register: dsc_types_old_raw
+
+- name: convert result of test DSC with all types to dict
+ set_fact:
+ dsc_types_old_actual: '{{ dsc_types_old_raw.content | b64decode | from_json }}'
+
+- name: assert test DSC with all types older version
+ assert:
+ that:
+ - dsc_types_old is changed
+ - dsc_types_old.module_version == '1.0.0'
+ - not dsc_types_old.reboot_required
+ - dsc_types_old_actual.Version == '1.0.0'
+ - dsc_types_old_actual.Verbose.Value.IsPresent
+ - dsc_types_old_actual.DefaultParam.Value == 'Default'
+ - dsc_types_old_actual.Ensure.Value == 'Absent'
+ - dsc_types_old_actual.Path.Value == remote_tmp_dir + "\\test-types.json"
+ - dsc_types_old_actual.StringParam.Type == 'System.String'
+ - dsc_types_old_actual.StringParam.Value == 'string param old'
+ - dsc_types_old_actual.CimInstanceArrayParam.Type == 'Microsoft.Management.Infrastructure.CimInstance[]'
+ - dsc_types_old_actual.CimInstanceArrayParam.Value|length == 1
+ - not dsc_types_old_actual.CimInstanceArrayParam.Value[0].Choice is defined # 1.0.0 does not have a Choice option
+ - dsc_types_old_actual.CimInstanceArrayParam.Value[0].IntValue == 0
+ - dsc_types_old_actual.CimInstanceArrayParam.Value[0].Key == 'old key'
+ - dsc_types_old_actual.CimInstanceArrayParam.Value[0].StringArrayValue == ['zyx', 'wvu']
+ - dsc_types_old_actual.CimInstanceArrayParam.Value[0].StringValue == 'string old 1'
+ - dsc_types_old_actual.CimInstanceArrayParam.Value[0]._cim_instance == 'ANSIBLE_xTestClass'
diff --git a/test/integration/targets/incidental_win_lineinfile/aliases b/test/integration/targets/incidental_win_lineinfile/aliases
new file mode 100644
index 00000000..194cbc3f
--- /dev/null
+++ b/test/integration/targets/incidental_win_lineinfile/aliases
@@ -0,0 +1,3 @@
+shippable/windows/incidental
+windows
+skip/windows/2016 # Host takes a while to run and module isn't OS dependent
diff --git a/test/integration/targets/incidental_win_lineinfile/files/test.txt b/test/integration/targets/incidental_win_lineinfile/files/test.txt
new file mode 100644
index 00000000..8187db9f
--- /dev/null
+++ b/test/integration/targets/incidental_win_lineinfile/files/test.txt
@@ -0,0 +1,5 @@
+This is line 1
+This is line 2
+REF this is a line for backrefs REF
+This is line 4
+This is line 5
diff --git a/test/integration/targets/incidental_win_lineinfile/files/test_linebreak.txt b/test/integration/targets/incidental_win_lineinfile/files/test_linebreak.txt
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/incidental_win_lineinfile/files/test_linebreak.txt
diff --git a/test/integration/targets/incidental_win_lineinfile/files/test_quoting.txt b/test/integration/targets/incidental_win_lineinfile/files/test_quoting.txt
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/incidental_win_lineinfile/files/test_quoting.txt
diff --git a/test/integration/targets/incidental_win_lineinfile/files/testempty.txt b/test/integration/targets/incidental_win_lineinfile/files/testempty.txt
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/incidental_win_lineinfile/files/testempty.txt
diff --git a/test/integration/targets/incidental_win_lineinfile/files/testnoeof.txt b/test/integration/targets/incidental_win_lineinfile/files/testnoeof.txt
new file mode 100644
index 00000000..152780b9
--- /dev/null
+++ b/test/integration/targets/incidental_win_lineinfile/files/testnoeof.txt
@@ -0,0 +1,2 @@
+This is line 1
+This is line 2 \ No newline at end of file
diff --git a/test/integration/targets/incidental_win_lineinfile/meta/main.yml b/test/integration/targets/incidental_win_lineinfile/meta/main.yml
new file mode 100644
index 00000000..e0ff46db
--- /dev/null
+++ b/test/integration/targets/incidental_win_lineinfile/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - incidental_win_prepare_tests
diff --git a/test/integration/targets/incidental_win_lineinfile/tasks/main.yml b/test/integration/targets/incidental_win_lineinfile/tasks/main.yml
new file mode 100644
index 00000000..e5f047be
--- /dev/null
+++ b/test/integration/targets/incidental_win_lineinfile/tasks/main.yml
@@ -0,0 +1,708 @@
+# Test code for the win_lineinfile module, adapted from the standard lineinfile module tests
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+- name: deploy the test file for lineinfile
+ win_copy: src=test.txt dest={{win_output_dir}}/test.txt
+ register: result
+
+- name: assert that the test file was deployed
+ assert:
+ that:
+ - "result.changed == true"
+
+- name: stat the test file
+ win_stat: path={{win_output_dir}}/test.txt
+ register: result
+
+- name: check win_stat file result
+ assert:
+ that:
+ - "result.stat.exists"
+ - "not result.stat.isdir"
+ - "result.stat.checksum == '5feac65e442c91f557fc90069ce6efc4d346ab51'"
+ - "result is not failed"
+ - "result is not changed"
+
+
+- name: insert a line at the beginning of the file, and back it up
+ win_lineinfile: dest={{win_output_dir}}/test.txt state=present line="New line at the beginning" insertbefore="BOF" backup=yes
+ register: result
+
+- name: check backup_file
+ win_stat:
+ path: '{{ result.backup_file }}'
+ register: backup_file
+
+- name: assert that the line was inserted at the head of the file
+ assert:
+ that:
+ - result.changed == true
+ - result.msg == 'line added'
+ - backup_file.stat.exists == true
+
+- name: stat the backup file
+ win_stat: path={{result.backup}}
+ register: result
+
+- name: assert the backup file matches the previous hash
+ assert:
+ that:
+ - "result.stat.checksum == '5feac65e442c91f557fc90069ce6efc4d346ab51'"
+
+- name: stat the test after the insert at the head
+ win_stat: path={{win_output_dir}}/test.txt
+ register: result
+
+- name: assert test hash is what we expect for the file with the insert at the head
+ assert:
+ that:
+ - "result.stat.checksum == 'b526e2e044defc64dfb0fad2f56e105178f317d8'"
+
+- name: insert a line at the end of the file
+ win_lineinfile: dest={{win_output_dir}}/test.txt state=present line="New line at the end" insertafter="EOF"
+ register: result
+
+- name: assert that the line was inserted at the end of the file
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+
+- name: stat the test after the insert at the end
+ win_stat: path={{win_output_dir}}/test.txt
+ register: result
+
+- name: assert test checksum matches after the insert at the end
+ assert:
+ that:
+ - "result.stat.checksum == 'dd5e207e28ce694ab18e41c2b16deb74fde93b14'"
+
+- name: insert a line after the first line
+ win_lineinfile: dest={{win_output_dir}}/test.txt state=present line="New line after line 1" insertafter="^This is line 1$"
+ register: result
+
+- name: assert that the line was inserted after the first line
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+
+- name: stat the test after insert after the first line
+ win_stat: path={{win_output_dir}}/test.txt
+ register: result
+
+- name: assert test checksum matches after the insert after the first line
+ assert:
+ that:
+ - "result.stat.checksum == '604b17405f2088e6868af9680b7834087acdc8f4'"
+
+- name: insert a line before the last line
+ win_lineinfile: dest={{win_output_dir}}/test.txt state=present line="New line before line 5" insertbefore="^This is line 5$"
+ register: result
+
+- name: assert that the line was inserted before the last line
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+
+- name: stat the test after the insert before the last line
+ win_stat: path={{win_output_dir}}/test.txt
+ register: result
+
+- name: assert test checksum matches after the insert before the last line
+ assert:
+ that:
+ - "result.stat.checksum == '8f5b30e8f01578043d782e5a68d4c327e75a6e34'"
+
+- name: replace a line with backrefs
+ win_lineinfile: dest={{win_output_dir}}/test.txt state=present line="This is line 3" backrefs=yes regexp="^(REF).*$"
+ register: result
+
+- name: assert that the line with backrefs was changed
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line replaced'"
+
+- name: stat the test after the backref line was replaced
+ win_stat: path={{win_output_dir}}/test.txt
+ register: result
+
+- name: assert test checksum matches after backref line was replaced
+ assert:
+ that:
+ - "result.stat.checksum == 'ef6b02645908511a2cfd2df29d50dd008897c580'"
+
+- name: remove the middle line
+ win_lineinfile: dest={{win_output_dir}}/test.txt state=absent regexp="^This is line 3$"
+ register: result
+
+- name: assert that the line was removed
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == '1 line(s) removed'"
+
+- name: stat the test after the middle line was removed
+ win_stat: path={{win_output_dir}}/test.txt
+ register: result
+
+- name: assert test checksum matches after the middle line was removed
+ assert:
+ that:
+ - "result.stat.checksum == '11695efa472be5c31c736bc43e055f8ac90eabdf'"
+
+- name: run a validation script that succeeds
+ win_lineinfile: dest={{win_output_dir}}/test.txt state=absent regexp="^This is line 5$" validate="sort.exe %s"
+ register: result
+
+- name: assert that the file validated after removing a line
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == '1 line(s) removed'"
+
+- name: stat the test after the validation succeeded
+ win_stat: path={{win_output_dir}}/test.txt
+ register: result
+
+- name: assert test checksum matches after the validation succeeded
+ assert:
+ that:
+ - "result.stat.checksum == '39c38a30aa6ac6af9ec41f54c7ed7683f1249347'"
+
+- name: run a validation script that fails
+ win_lineinfile: dest={{win_output_dir}}/test.txt state=absent regexp="^This is line 1$" validate="sort.exe %s.foo"
+ register: result
+ ignore_errors: yes
+
+- name: assert that the validate failed
+ assert:
+ that:
+ - "result.failed == true"
+
+- name: stat the test after the validation failed
+ win_stat: path={{win_output_dir}}/test.txt
+ register: result
+
+- name: assert test checksum matches the previous after the validation failed
+ assert:
+ that:
+ - "result.stat.checksum == '39c38a30aa6ac6af9ec41f54c7ed7683f1249347'"
+
+- name: use create=yes
+ win_lineinfile: dest={{win_output_dir}}/new_test.txt create=yes insertbefore=BOF state=present line="This is a new file"
+ register: result
+
+- name: assert that the new file was created
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+
+- name: validate that the newly created file exists
+ win_stat: path={{win_output_dir}}/new_test.txt
+ register: result
+ ignore_errors: yes
+
+- name: assert the newly created test checksum matches
+ assert:
+ that:
+ - "result.stat.checksum == '84faac1183841c57434693752fc3debc91b9195d'"
+
+# Test EOF in cases where file has no newline at EOF
+- name: testnoeof deploy the file for lineinfile
+ win_copy: src=testnoeof.txt dest={{win_output_dir}}/testnoeof.txt
+ register: result
+
+- name: testnoeof insert a line at the end of the file
+ win_lineinfile: dest={{win_output_dir}}/testnoeof.txt state=present line="New line at the end" insertafter="EOF"
+ register: result
+
+- name: testempty assert that the line was inserted at the end of the file
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+
+- name: testnoeof stat the no newline EOF test after the insert at the end
+ win_stat: path={{win_output_dir}}/testnoeof.txt
+ register: result
+
+- name: testnoeof assert test checksum matches after the insert at the end
+ assert:
+ that:
+ - "result.stat.checksum == '229852b09f7e9921fbcbb0ee0166ba78f7f7f261'"
+
+- name: add multiple lines at the end of the file
+ win_lineinfile: dest={{win_output_dir}}/test.txt state=present line="This is a line\r\nwith newline character" insertafter="EOF"
+ register: result
+
+- name: assert that the multiple lines was inserted
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+
+- name: stat file after adding multiple lines
+ win_stat: path={{win_output_dir}}/test.txt
+ register: result
+
+- name: assert test checksum matches after inserting multiple lines
+ assert:
+ that:
+ - "result.stat.checksum == '1401413cd4eac732be66cd6aceddd334c4240f86'"
+
+
+
+# Test EOF with empty file to make sure no unnecessary newline is added
+- name: testempty deploy the testempty file for lineinfile
+ win_copy: src=testempty.txt dest={{win_output_dir}}/testempty.txt
+ register: result
+
+- name: testempty insert a line at the end of the file
+ win_lineinfile: dest={{win_output_dir}}/testempty.txt state=present line="New line at the end" insertafter="EOF"
+ register: result
+
+- name: testempty assert that the line was inserted at the end of the file
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+
+- name: testempty stat the test after the insert at the end
+ win_stat: path={{win_output_dir}}/testempty.txt
+ register: result
+
+- name: testempty assert test checksum matches after the insert at the end
+ assert:
+ that:
+ - "result.stat.checksum == 'd3d34f11edda51be7ca5dcb0757cf3e1257c0bfe'"
+
+
+
+- name: replace a line with backrefs included in the line
+ win_lineinfile: dest={{win_output_dir}}/test.txt state=present line="New $1 created with the backref" backrefs=yes regexp="^This is (line 4)$"
+ register: result
+
+- name: assert that the line with backrefs was changed
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line replaced'"
+
+- name: stat the test after the backref line was replaced
+ win_stat: path={{win_output_dir}}/test.txt
+ register: result
+
+- name: assert test checksum matches after backref line was replaced
+ assert:
+ that:
+ - "result.stat.checksum == 'e6ff42e926dac2274c93dff0b8a323e07ae09149'"
+
+###################################################################
+# issue 8535
+
+- name: create a new file for testing quoting issues
+ win_copy: src=test_quoting.txt dest={{win_output_dir}}/test_quoting.txt
+ register: result
+
+- name: assert the new file was created
+ assert:
+ that:
+ - result.changed
+
+- name: use with_items to add code-like strings to the quoting txt file
+ win_lineinfile: >
+ dest={{win_output_dir}}/test_quoting.txt
+ line="{{ item }}"
+ insertbefore="BOF"
+ with_items:
+ - "'foo'"
+ - "dotenv.load();"
+ - "var dotenv = require('dotenv');"
+ register: result
+
+- name: assert the quote test file was modified correctly
+ assert:
+ that:
+ - result.results|length == 3
+ - result.results[0].changed
+ - result.results[0].item == "'foo'"
+ - result.results[1].changed
+ - result.results[1].item == "dotenv.load();"
+ - result.results[2].changed
+ - result.results[2].item == "var dotenv = require('dotenv');"
+
+- name: stat the quote test file
+ win_stat: path={{win_output_dir}}/test_quoting.txt
+ register: result
+
+- name: assert test checksum matches for quote test file
+ assert:
+ that:
+ - "result.stat.checksum == 'f3bccdbdfa1d7176c497ef87d04957af40ab48d2'"
+
+- name: append a line into the quoted file with a single quote
+ win_lineinfile: dest={{win_output_dir}}/test_quoting.txt line="import g'"
+ register: result
+
+- name: assert that the quoted file was changed
+ assert:
+ that:
+ - result.changed
+
+- name: stat the quote test file
+ win_stat: path={{win_output_dir}}/test_quoting.txt
+ register: result
+
+- name: assert test checksum matches adding line with single quote
+ assert:
+ that:
+ - "result.stat.checksum == 'dabf4cbe471e1797d8dcfc773b6b638c524d5237'"
+
+- name: insert a line into the quoted file with many double quotation strings
+ win_lineinfile: dest={{win_output_dir}}/test_quoting.txt line='"quote" and "unquote"'
+ register: result
+
+- name: assert that the quoted file was changed
+ assert:
+ that:
+ - result.changed
+
+- name: stat the quote test file
+ win_stat: path={{win_output_dir}}/test_quoting.txt
+ register: result
+
+- name: assert test checksum matches quoted line added
+ assert:
+ that:
+ - "result.stat.checksum == '9dc1fc1ff19942e2936564102ad37134fa83b91d'"
+
+
+# Windows vs. Unix line separator test cases
+
+- name: Create windows test file with initial line
+ win_lineinfile: dest={{win_output_dir}}/test_windows_sep.txt create=yes insertbefore=BOF state=present line="This is a new file"
+ register: result
+
+- name: assert that the new file was created
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+
+- name: validate that the newly created file exists
+ win_stat: path={{win_output_dir}}/test_windows_sep.txt
+ register: result
+
+- name: assert the newly created file checksum matches
+ assert:
+ that:
+ - "result.stat.checksum == '84faac1183841c57434693752fc3debc91b9195d'"
+
+- name: Test appending to the file using the default (windows) line separator
+ win_lineinfile: dest={{win_output_dir}}/test_windows_sep.txt insertbefore=EOF state=present line="This is the last line"
+ register: result
+
+- name: assert that the new line was added
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+
+- name: stat the file
+ win_stat: path={{win_output_dir}}/test_windows_sep.txt
+ register: result
+
+- name: assert the file checksum matches expected checksum
+ assert:
+ that:
+ - "result.stat.checksum == '71a17ddd1d57ed7c7912e4fd11ecb2ead0b27033'"
+
+
+- name: Create unix test file with initial line
+ win_lineinfile: dest={{win_output_dir}}/test_unix_sep.txt create=yes insertbefore=BOF state=present line="This is a new file"
+ register: result
+
+- name: assert that the new file was created
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+
+- name: validate that the newly created file exists
+ win_stat: path={{win_output_dir}}/test_unix_sep.txt
+ register: result
+
+- name: assert the newly created file checksum matches
+ assert:
+ that:
+ - "result.stat.checksum == '84faac1183841c57434693752fc3debc91b9195d'"
+
+- name: Test appending to the file using unix line separator
+ win_lineinfile: dest={{win_output_dir}}/test_unix_sep.txt insertbefore=EOF state=present line="This is the last line" newline="unix"
+ register: result
+
+- name: assert that the new line was added
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+
+- name: stat the file
+ win_stat: path={{win_output_dir}}/test_unix_sep.txt
+ register: result
+
+- name: assert the file checksum matches expected checksum
+ assert:
+ that:
+ - "result.stat.checksum == 'f1f634a37ab1c73efb77a71a5ad2cc87b61b17ae'"
+
+
+# Encoding management test cases
+
+# Default (auto) encoding should use utf-8 with no BOM
+- name: Test create file without explicit encoding results in utf-8 without BOM
+ win_lineinfile: dest={{win_output_dir}}/test_auto_utf8.txt create=yes insertbefore=BOF state=present line="This is a new utf-8 file"
+ register: result
+
+- name: assert that the new file was created
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+ - "result.encoding == 'utf-8'"
+
+- name: validate that the newly created file exists
+ win_stat: path={{win_output_dir}}/test_auto_utf8.txt
+ register: result
+
+- name: assert the newly created file checksum matches
+ assert:
+ that:
+ - "result.stat.checksum == 'b69fcbacca8291a4668f57fba91d7c022f1c3dc7'"
+
+- name: Test appending to the utf-8 without BOM file - should autodetect UTF-8 no BOM
+ win_lineinfile: dest={{win_output_dir}}/test_auto_utf8.txt insertbefore=EOF state=present line="This is the last line"
+ register: result
+
+- name: assert that the new line was added and encoding did not change
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+ - "result.encoding == 'utf-8'"
+
+- name: stat the file
+ win_stat: path={{win_output_dir}}/test_auto_utf8.txt
+ register: result
+
+- name: assert the file checksum matches
+ assert:
+ that:
+ - "result.stat.checksum == '64d747f1ebf8c9d793dbfd27126e4152d39a3848'"
+
+
+# UTF-8 explicit (with BOM)
+- name: Test create file with explicit utf-8 encoding results in utf-8 with a BOM
+ win_lineinfile: dest={{win_output_dir}}/test_utf8.txt create=yes encoding="utf-8" insertbefore=BOF state=present line="This is a new utf-8 file"
+ register: result
+
+- name: assert that the new file was created
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+ - "result.encoding == 'utf-8'"
+
+- name: validate that the newly created file exists
+ win_stat: path={{win_output_dir}}/test_utf8.txt
+ register: result
+
+- name: assert the newly created file checksum matches
+ assert:
+ that:
+ - "result.stat.checksum == 'd45344b2b3bf1cf90eae851b40612f5f37a88bbb'"
+
+- name: Test appending to the utf-8 with BOM file - should autodetect utf-8 with BOM encoding
+ win_lineinfile: dest={{win_output_dir}}/test_utf8.txt insertbefore=EOF state=present line="This is the last line"
+ register: result
+
+- name: assert that the new line was added and encoding did not change
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+ - "result.encoding == 'utf-8'"
+
+- name: stat the file
+ win_stat: path={{win_output_dir}}/test_utf8.txt
+ register: result
+
+- name: assert the file checksum matches
+ assert:
+ that:
+ - "result.stat.checksum == '9b84254489f40f258871a4c6573cacc65895ee1a'"
+
+
+# UTF-16 explicit
+- name: Test create file with explicit utf-16 encoding
+ win_lineinfile: dest={{win_output_dir}}/test_utf16.txt create=yes encoding="utf-16" insertbefore=BOF state=present line="This is a new utf-16 file"
+ register: result
+
+- name: assert that the new file was created
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+ - "result.encoding == 'utf-16'"
+
+- name: validate that the newly created file exists
+ win_stat: path={{win_output_dir}}/test_utf16.txt
+ register: result
+
+- name: assert the newly created file checksum matches
+ assert:
+ that:
+ - "result.stat.checksum == '785b0693cec13b60e2c232782adeda2f8a967434'"
+
+- name: Test appending to the utf-16 file - should autodetect utf-16 encoding
+ win_lineinfile: dest={{win_output_dir}}/test_utf16.txt insertbefore=EOF state=present line="This is the last line"
+ register: result
+
+- name: assert that the new line was added and encoding did not change
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+ - "result.encoding == 'utf-16'"
+
+- name: stat the file
+ win_stat: path={{win_output_dir}}/test_utf16.txt
+ register: result
+
+- name: assert the file checksum matches
+ assert:
+ that:
+ - "result.stat.checksum == '70e4eb3ba795e1ba94d262db47e4fd17c64b2e73'"
+
+# UTF-32 explicit
+- name: Test create file with explicit utf-32 encoding
+ win_lineinfile: dest={{win_output_dir}}/test_utf32.txt create=yes encoding="utf-32" insertbefore=BOF state=present line="This is a new utf-32 file"
+ register: result
+
+- name: assert that the new file was created
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+ - "result.encoding == 'utf-32'"
+
+- name: validate that the newly created file exists
+ win_stat: path={{win_output_dir}}/test_utf32.txt
+ register: result
+
+- name: assert the newly created file checksum matches
+ assert:
+ that:
+ - "result.stat.checksum == '7a6e3f3604c0def431aaa813173a4ddaa10fd1fb'"
+
+- name: Test appending to the utf-32 file - should autodetect utf-32 encoding
+ win_lineinfile: dest={{win_output_dir}}/test_utf32.txt insertbefore=EOF state=present line="This is the last line"
+ register: result
+
+- name: assert that the new line was added and encoding did not change
+ assert:
+ that:
+ - "result.changed == true"
+ - "result.msg == 'line added'"
+ - "result.encoding == 'utf-32'"
+
+- name: stat the file
+ win_stat: path={{win_output_dir}}/test_utf32.txt
+ register: result
+
+- name: assert the file checksum matches
+ assert:
+ that:
+ - "result.stat.checksum == '66a72e71f42c4775f4326da95cfe82c8830e5022'"
+
+#########################################################################
+# issue #33858
+# \r\n causes line break instead of printing literally which breaks paths.
+
+- name: create testing file
+ win_copy:
+ src: test_linebreak.txt
+ dest: "{{win_output_dir}}/test_linebreak.txt"
+
+- name: stat the test file
+ win_stat:
+ path: "{{win_output_dir}}/test_linebreak.txt"
+ register: result
+
+# (Get-FileHash -path C:\ansible\test\integration\targets\win_lineinfile\files\test_linebreak.txt -Algorithm sha1).hash.tolower()
+- name: check win_stat file result
+ assert:
+ that:
+ - result.stat.exists
+ - not result.stat.isdir
+ - result.stat.checksum == 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
+ - result is not failed
+ - result is not changed
+
+- name: insert path c:\return\new to test file
+ win_lineinfile:
+ dest: "{{win_output_dir}}/test_linebreak.txt"
+ line: c:\return\new
+ register: result_literal
+
+- name: insert path "c:\return\new" to test file, will cause line breaks
+ win_lineinfile:
+ dest: "{{win_output_dir}}/test_linebreak.txt"
+ line: "c:\return\new"
+ register: result_expand
+
+- name: assert that the lines were inserted
+ assert:
+ that:
+ - result_literal.changed == true
+ - result_literal.msg == 'line added'
+ - result_expand.changed == true
+ - result_expand.msg == 'line added'
+
+- name: stat the test file
+ win_stat:
+ path: "{{win_output_dir}}/test_linebreak.txt"
+ register: result
+
+- debug:
+ var: result
+ verbosity: 1
+
+# expect that the file looks like this:
+# c:\return\new
+# c:
+# eturn
+# ew #or c:eturnew on windows
+- name: assert that one line is literal and the other has breaks
+ assert:
+ that:
+ - result.stat.checksum == 'd2dfd11bc70526ff13a91153c76a7ae5595a845b'
diff --git a/test/integration/targets/incidental_win_ping/aliases b/test/integration/targets/incidental_win_ping/aliases
new file mode 100644
index 00000000..a5fc90dc
--- /dev/null
+++ b/test/integration/targets/incidental_win_ping/aliases
@@ -0,0 +1,2 @@
+shippable/windows/incidental
+windows
diff --git a/test/integration/targets/incidental_win_ping/library/win_ping_set_attr.ps1 b/test/integration/targets/incidental_win_ping/library/win_ping_set_attr.ps1
new file mode 100644
index 00000000..f1704964
--- /dev/null
+++ b/test/integration/targets/incidental_win_ping/library/win_ping_set_attr.ps1
@@ -0,0 +1,31 @@
+#!powershell
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args $true;
+
+$data = Get-Attr $params "data" "pong";
+
+$result = @{
+ changed = $false
+ ping = "pong"
+};
+
+# Test that Set-Attr will replace an existing attribute.
+Set-Attr $result "ping" $data
+
+Exit-Json $result;
diff --git a/test/integration/targets/incidental_win_ping/library/win_ping_strict_mode_error.ps1 b/test/integration/targets/incidental_win_ping/library/win_ping_strict_mode_error.ps1
new file mode 100644
index 00000000..508174af
--- /dev/null
+++ b/test/integration/targets/incidental_win_ping/library/win_ping_strict_mode_error.ps1
@@ -0,0 +1,30 @@
+#!powershell
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args $true;
+
+$params.thisPropertyDoesNotExist
+
+$data = Get-Attr $params "data" "pong";
+
+$result = @{
+ changed = $false
+ ping = $data
+};
+
+Exit-Json $result;
diff --git a/test/integration/targets/incidental_win_ping/library/win_ping_syntax_error.ps1 b/test/integration/targets/incidental_win_ping/library/win_ping_syntax_error.ps1
new file mode 100644
index 00000000..d4c9f07a
--- /dev/null
+++ b/test/integration/targets/incidental_win_ping/library/win_ping_syntax_error.ps1
@@ -0,0 +1,30 @@
+#!powershell
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# POWERSHELL_COMMON
+
+$blah = 'I can't quote my strings correctly.'
+
+$params = Parse-Args $args $true;
+
+$data = Get-Attr $params "data" "pong";
+
+$result = @{
+ changed = $false
+ ping = $data
+};
+
+Exit-Json $result;
diff --git a/test/integration/targets/incidental_win_ping/library/win_ping_throw.ps1 b/test/integration/targets/incidental_win_ping/library/win_ping_throw.ps1
new file mode 100644
index 00000000..7306f4d2
--- /dev/null
+++ b/test/integration/targets/incidental_win_ping/library/win_ping_throw.ps1
@@ -0,0 +1,30 @@
+#!powershell
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# POWERSHELL_COMMON
+
+throw
+
+$params = Parse-Args $args $true;
+
+$data = Get-Attr $params "data" "pong";
+
+$result = @{
+ changed = $false
+ ping = $data
+};
+
+Exit-Json $result;
diff --git a/test/integration/targets/incidental_win_ping/library/win_ping_throw_string.ps1 b/test/integration/targets/incidental_win_ping/library/win_ping_throw_string.ps1
new file mode 100644
index 00000000..09e3b7cb
--- /dev/null
+++ b/test/integration/targets/incidental_win_ping/library/win_ping_throw_string.ps1
@@ -0,0 +1,30 @@
+#!powershell
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# POWERSHELL_COMMON
+
+throw "no ping for you"
+
+$params = Parse-Args $args $true;
+
+$data = Get-Attr $params "data" "pong";
+
+$result = @{
+ changed = $false
+ ping = $data
+};
+
+Exit-Json $result;
diff --git a/test/integration/targets/incidental_win_ping/tasks/main.yml b/test/integration/targets/incidental_win_ping/tasks/main.yml
new file mode 100644
index 00000000..a7e6ba7f
--- /dev/null
+++ b/test/integration/targets/incidental_win_ping/tasks/main.yml
@@ -0,0 +1,67 @@
+# test code for the win_ping module
+# (c) 2014, Chris Church <chris@ninemoreminutes.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: test win_ping
+ action: win_ping
+ register: win_ping_result
+
+- name: check win_ping result
+ assert:
+ that:
+ - win_ping_result is not failed
+ - win_ping_result is not changed
+ - win_ping_result.ping == 'pong'
+
+- name: test win_ping with data
+ win_ping:
+ data: ☠
+ register: win_ping_with_data_result
+
+- name: check win_ping result with data
+ assert:
+ that:
+ - win_ping_with_data_result is not failed
+ - win_ping_with_data_result is not changed
+ - win_ping_with_data_result.ping == '☠'
+
+- name: test win_ping.ps1 with data as complex args
+ # win_ping.ps1: # TODO: do we want to actually support this? no other tests that I can see...
+ win_ping:
+ data: bleep
+ register: win_ping_ps1_result
+
+- name: check win_ping.ps1 result with data
+ assert:
+ that:
+ - win_ping_ps1_result is not failed
+ - win_ping_ps1_result is not changed
+ - win_ping_ps1_result.ping == 'bleep'
+
+- name: test win_ping using data=crash so that it throws an exception
+ win_ping:
+ data: crash
+ register: win_ping_crash_result
+ ignore_errors: yes
+
+- name: check win_ping_crash result
+ assert:
+ that:
+ - win_ping_crash_result is failed
+ - win_ping_crash_result is not changed
+ - 'win_ping_crash_result.msg == "Unhandled exception while executing module: boom"'
+ - '"throw \"boom\"" in win_ping_crash_result.exception'
diff --git a/test/integration/targets/incidental_win_prepare_tests/aliases b/test/integration/targets/incidental_win_prepare_tests/aliases
new file mode 100644
index 00000000..136c05e0
--- /dev/null
+++ b/test/integration/targets/incidental_win_prepare_tests/aliases
@@ -0,0 +1 @@
+hidden
diff --git a/test/integration/targets/incidental_win_prepare_tests/meta/main.yml b/test/integration/targets/incidental_win_prepare_tests/meta/main.yml
new file mode 100644
index 00000000..cf5427b6
--- /dev/null
+++ b/test/integration/targets/incidental_win_prepare_tests/meta/main.yml
@@ -0,0 +1,3 @@
+---
+
+allow_duplicates: yes
diff --git a/test/integration/targets/incidental_win_prepare_tests/tasks/main.yml b/test/integration/targets/incidental_win_prepare_tests/tasks/main.yml
new file mode 100644
index 00000000..e87b614b
--- /dev/null
+++ b/test/integration/targets/incidental_win_prepare_tests/tasks/main.yml
@@ -0,0 +1,29 @@
+# test code for the windows versions of copy, file and template module
+# originally
+# (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+- name: clean out the test directory
+ win_file: name={{win_output_dir|mandatory}} state=absent
+ tags:
+ - prepare
+
+- name: create the test directory
+ win_file: name={{win_output_dir}} state=directory
+ tags:
+ - prepare
diff --git a/test/integration/targets/incidental_win_psexec/aliases b/test/integration/targets/incidental_win_psexec/aliases
new file mode 100644
index 00000000..a5fc90dc
--- /dev/null
+++ b/test/integration/targets/incidental_win_psexec/aliases
@@ -0,0 +1,2 @@
+shippable/windows/incidental
+windows
diff --git a/test/integration/targets/incidental_win_psexec/meta/main.yml b/test/integration/targets/incidental_win_psexec/meta/main.yml
new file mode 100644
index 00000000..9f37e96c
--- /dev/null
+++ b/test/integration/targets/incidental_win_psexec/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+- setup_remote_tmp_dir
diff --git a/test/integration/targets/incidental_win_psexec/tasks/main.yml b/test/integration/targets/incidental_win_psexec/tasks/main.yml
new file mode 100644
index 00000000..27783f9e
--- /dev/null
+++ b/test/integration/targets/incidental_win_psexec/tasks/main.yml
@@ -0,0 +1,80 @@
+# Would use [] but this has troubles with PATH and trying to find the executable so just resort to keeping a space
+- name: record special path for tests
+ set_fact:
+ testing_dir: '{{ remote_tmp_dir }}\ansible win_psexec'
+
+- name: create special path testing dir
+ win_file:
+ path: '{{ testing_dir }}'
+ state: directory
+
+- name: Download PsExec
+ win_get_url:
+ url: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/win_psexec/PsExec.exe
+ dest: '{{ testing_dir }}\PsExec.exe'
+
+- name: Get the existing PATH env var
+ win_shell: '$env:PATH'
+ register: system_path
+ changed_when: False
+
+- name: Run whoami
+ win_psexec:
+ command: whoami.exe
+ nobanner: true
+ register: whoami
+ environment:
+ PATH: '{{ testing_dir }};{{ system_path.stdout | trim }}'
+
+- name: Test whoami
+ assert:
+ that:
+ - whoami.rc == 0
+ - whoami.stdout == ''
+ # FIXME: Standard output does not work or is truncated
+ #- whoami.stdout == '{{ ansible_hostname|lower }}'
+
+- name: Run whoami as SYSTEM
+ win_psexec:
+ command: whoami.exe
+ system: yes
+ nobanner: true
+ executable: '{{ testing_dir }}\PsExec.exe'
+ register: whoami_as_system
+ # Seems to be a bug with PsExec where the stdout can be empty, just retry the task to make this test a bit more stable
+ until: whoami_as_system.rc == 0 and whoami_as_system.stdout == 'nt authority\system'
+ retries: 3
+ delay: 2
+
+# FIXME: Behaviour is not consistent on all Windows systems
+#- name: Run whoami as ELEVATED
+# win_psexec:
+# command: whoami.exe
+# elevated: yes
+# register: whoami_as_elevated
+#
+## Ensure we have basic facts
+#- setup:
+#
+#- debug:
+# msg: '{{ whoami_as_elevated.stdout|lower }} == {{ ansible_hostname|lower }}\{{ ansible_user_id|lower }}'
+#
+#- name: Test whoami
+# assert:
+# that:
+# - whoami_as_elevated.rc == 0
+# - whoami_as_elevated.stdout|lower == '{{ ansible_hostname|lower }}\{{ ansible_user_id|lower }}'
+
+- name: Run command with multiple arguments
+ win_psexec:
+ command: powershell.exe -NonInteractive "exit 1"
+ ignore_errors: yes
+ register: whoami_multiple_args
+ environment:
+ PATH: '{{ testing_dir }};{{ system_path.stdout | trim }}'
+
+- name: Test command with multiple argumetns
+ assert:
+ that:
+ - whoami_multiple_args.rc == 1
+ - whoami_multiple_args.psexec_command == "psexec.exe -accepteula powershell.exe -NonInteractive \"exit 1\""
diff --git a/test/integration/targets/incidental_win_reboot/aliases b/test/integration/targets/incidental_win_reboot/aliases
new file mode 100644
index 00000000..a5fc90dc
--- /dev/null
+++ b/test/integration/targets/incidental_win_reboot/aliases
@@ -0,0 +1,2 @@
+shippable/windows/incidental
+windows
diff --git a/test/integration/targets/incidental_win_reboot/tasks/main.yml b/test/integration/targets/incidental_win_reboot/tasks/main.yml
new file mode 100644
index 00000000..7757e08f
--- /dev/null
+++ b/test/integration/targets/incidental_win_reboot/tasks/main.yml
@@ -0,0 +1,70 @@
+---
+- name: make sure win output dir exists
+ win_file:
+ path: "{{win_output_dir}}"
+ state: directory
+
+- name: reboot with defaults
+ win_reboot:
+
+- name: test with negative values for delays
+ win_reboot:
+ post_reboot_delay: -0.5
+ pre_reboot_delay: -61
+
+- name: schedule a reboot for sometime in the future
+ win_command: shutdown.exe /r /t 599
+
+- name: reboot with a shutdown already scheduled
+ win_reboot:
+
+# test a reboot that reboots again during the test_command phase
+- name: create test file
+ win_file:
+ path: '{{win_output_dir}}\win_reboot_test'
+ state: touch
+
+- name: reboot with secondary reboot stage
+ win_reboot:
+ test_command: '{{ lookup("template", "post_reboot.ps1") }}'
+
+- name: reboot with test command that fails
+ win_reboot:
+ test_command: 'FAIL'
+ reboot_timeout: 120
+ register: reboot_fail_test
+ failed_when: "reboot_fail_test.msg != 'Timed out waiting for post-reboot test command (timeout=120)'"
+
+- name: remove SeRemoteShutdownPrivilege
+ win_user_right:
+ name: SeRemoteShutdownPrivilege
+ users: []
+ action: set
+ register: removed_shutdown_privilege
+
+- block:
+ - name: try and reboot without required privilege
+ win_reboot:
+ register: fail_privilege
+ failed_when:
+ - "'Reboot command failed, error was:' not in fail_privilege.msg"
+ - "'Access is denied.(5)' not in fail_privilege.msg"
+
+ always:
+ - name: reset the SeRemoteShutdownPrivilege
+ win_user_right:
+ name: SeRemoteShutdownPrivilege
+ users: '{{ removed_shutdown_privilege.removed }}'
+ action: add
+
+- name: Use invalid parameter
+ reboot:
+ foo: bar
+ ignore_errors: true
+ register: invalid_parameter
+
+- name: Ensure task fails with error
+ assert:
+ that:
+ - invalid_parameter is failed
+ - "invalid_parameter.msg == 'Invalid options for reboot: foo'"
diff --git a/test/integration/targets/incidental_win_reboot/templates/post_reboot.ps1 b/test/integration/targets/incidental_win_reboot/templates/post_reboot.ps1
new file mode 100644
index 00000000..e4a99a72
--- /dev/null
+++ b/test/integration/targets/incidental_win_reboot/templates/post_reboot.ps1
@@ -0,0 +1,8 @@
+if (Test-Path -Path '{{win_output_dir}}\win_reboot_test') {
+ New-ItemProperty -Path 'HKLM:\SYSTEM\CurrentControlSet\Control\Session Manager' `
+ -Name PendingFileRenameOperations `
+ -Value @("\??\{{win_output_dir}}\win_reboot_test`0") `
+ -PropertyType MultiString
+ Restart-Computer -Force
+ exit 1
+}
diff --git a/test/integration/targets/incidental_win_security_policy/aliases b/test/integration/targets/incidental_win_security_policy/aliases
new file mode 100644
index 00000000..a5fc90dc
--- /dev/null
+++ b/test/integration/targets/incidental_win_security_policy/aliases
@@ -0,0 +1,2 @@
+shippable/windows/incidental
+windows
diff --git a/test/integration/targets/incidental_win_security_policy/library/test_win_security_policy.ps1 b/test/integration/targets/incidental_win_security_policy/library/test_win_security_policy.ps1
new file mode 100644
index 00000000..5c83c1b5
--- /dev/null
+++ b/test/integration/targets/incidental_win_security_policy/library/test_win_security_policy.ps1
@@ -0,0 +1,53 @@
+#!powershell
+
+# WANT_JSON
+# POWERSHELL_COMMON
+
+# basic script to get the lsit of users in a particular right
+# this is quite complex to put as a simple script so this is
+# just a simple module
+
+$ErrorActionPreference = 'Stop'
+
+$params = Parse-Args $args -supports_check_mode $false
+$section = Get-AnsibleParam -obj $params -name "section" -type "str" -failifempty $true
+$key = Get-AnsibleParam -obj $params -name "key" -type "str" -failifempty $true
+
+$result = @{
+ changed = $false
+}
+
+Function ConvertFrom-Ini($file_path) {
+ $ini = @{}
+ switch -Regex -File $file_path {
+ "^\[(.+)\]" {
+ $section = $matches[1]
+ $ini.$section = @{}
+ }
+ "(.+?)\s*=(.*)" {
+ $name = $matches[1].Trim()
+ $value = $matches[2].Trim()
+ if ($value -match "^\d+$") {
+ $value = [int]$value
+ } elseif ($value.StartsWith('"') -and $value.EndsWith('"')) {
+ $value = $value.Substring(1, $value.Length - 2)
+ }
+
+ $ini.$section.$name = $value
+ }
+ }
+
+ $ini
+}
+
+$secedit_ini_path = [IO.Path]::GetTempFileName()
+&SecEdit.exe /export /cfg $secedit_ini_path /quiet
+$secedit_ini = ConvertFrom-Ini -file_path $secedit_ini_path
+
+if ($secedit_ini.ContainsKey($section)) {
+ $result.value = $secedit_ini.$section.$key
+} else {
+ $result.value = $null
+}
+
+Exit-Json $result
diff --git a/test/integration/targets/incidental_win_security_policy/tasks/main.yml b/test/integration/targets/incidental_win_security_policy/tasks/main.yml
new file mode 100644
index 00000000..28fdb5ea
--- /dev/null
+++ b/test/integration/targets/incidental_win_security_policy/tasks/main.yml
@@ -0,0 +1,41 @@
+---
+- name: get current entry for audit
+ test_win_security_policy:
+ section: Event Audit
+ key: AuditSystemEvents
+ register: before_value_audit
+
+- name: get current entry for guest
+ test_win_security_policy:
+ section: System Access
+ key: NewGuestName
+ register: before_value_guest
+
+- block:
+ - name: set AuditSystemEvents entry before tests
+ win_security_policy:
+ section: Event Audit
+ key: AuditSystemEvents
+ value: 0
+
+ - name: set NewGuestName entry before tests
+ win_security_policy:
+ section: System Access
+ key: NewGuestName
+ value: Guest
+
+ - name: run tests
+ include_tasks: tests.yml
+
+ always:
+ - name: reset entries for AuditSystemEvents
+ win_security_policy:
+ section: Event Audit
+ key: AuditSystemEvents
+ value: "{{before_value_audit.value}}"
+
+ - name: reset entries for NewGuestName
+ win_security_policy:
+ section: System Access
+ key: NewGuestName
+ value: "{{before_value_guest.value}}"
diff --git a/test/integration/targets/incidental_win_security_policy/tasks/tests.yml b/test/integration/targets/incidental_win_security_policy/tasks/tests.yml
new file mode 100644
index 00000000..724b6010
--- /dev/null
+++ b/test/integration/targets/incidental_win_security_policy/tasks/tests.yml
@@ -0,0 +1,186 @@
+---
+- name: fail with invalid section name
+ win_security_policy:
+ section: This is not a valid section
+ key: KeyName
+ value: 0
+ register: fail_invalid_section
+ failed_when: fail_invalid_section.msg != "The section 'This is not a valid section' does not exist in SecEdit.exe output ini"
+
+- name: fail with invalid key name
+ win_security_policy:
+ section: System Access
+ key: InvalidKey
+ value: 0
+ register: fail_invalid_key
+ failed_when: fail_invalid_key.msg != "The key 'InvalidKey' in section 'System Access' is not a valid key, cannot set this value"
+
+- name: change existing key check
+ win_security_policy:
+ section: Event Audit
+ key: AuditSystemEvents
+ value: 1
+ register: change_existing_check
+ check_mode: yes
+
+- name: get actual change existing key check
+ test_win_security_policy:
+ section: Event Audit
+ key: AuditSystemEvents
+ register: change_existing_actual_check
+
+- name: assert change existing key check
+ assert:
+ that:
+ - change_existing_check is changed
+ - change_existing_actual_check.value == 0
+
+- name: change existing key
+ win_security_policy:
+ section: Event Audit
+ key: AuditSystemEvents
+ value: 1
+ register: change_existing
+
+- name: get actual change existing key
+ test_win_security_policy:
+ section: Event Audit
+ key: AuditSystemEvents
+ register: change_existing_actual
+
+- name: assert change existing key
+ assert:
+ that:
+ - change_existing is changed
+ - change_existing_actual.value == 1
+
+- name: change existing key again
+ win_security_policy:
+ section: Event Audit
+ key: AuditSystemEvents
+ value: 1
+ register: change_existing_again
+
+- name: assert change existing key again
+ assert:
+ that:
+ - change_existing_again is not changed
+ - change_existing_again.value == 1
+
+- name: change existing key with string type
+ win_security_policy:
+ section: Event Audit
+ key: AuditSystemEvents
+ value: "1"
+ register: change_existing_key_with_type
+
+- name: assert change existing key with string type
+ assert:
+ that:
+ - change_existing_key_with_type is not changed
+ - change_existing_key_with_type.value == "1"
+
+- name: change existing string key check
+ win_security_policy:
+ section: System Access
+ key: NewGuestName
+ value: New Guest
+ register: change_existing_string_check
+ check_mode: yes
+
+- name: get actual change existing string key check
+ test_win_security_policy:
+ section: System Access
+ key: NewGuestName
+ register: change_existing_string_actual_check
+
+- name: assert change existing string key check
+ assert:
+ that:
+ - change_existing_string_check is changed
+ - change_existing_string_actual_check.value == "Guest"
+
+- name: change existing string key
+ win_security_policy:
+ section: System Access
+ key: NewGuestName
+ value: New Guest
+ register: change_existing_string
+
+- name: get actual change existing string key
+ test_win_security_policy:
+ section: System Access
+ key: NewGuestName
+ register: change_existing_string_actual
+
+- name: assert change existing string key
+ assert:
+ that:
+ - change_existing_string is changed
+ - change_existing_string_actual.value == "New Guest"
+
+- name: change existing string key again
+ win_security_policy:
+ section: System Access
+ key: NewGuestName
+ value: New Guest
+ register: change_existing_string_again
+
+- name: assert change existing string key again
+ assert:
+ that:
+ - change_existing_string_again is not changed
+ - change_existing_string_again.value == "New Guest"
+
+- name: add policy setting
+ win_security_policy:
+ section: Privilege Rights
+ # following key is empty by default
+ key: SeCreateTokenPrivilege
+ # add Guests
+ value: '*S-1-5-32-546'
+
+- name: get actual policy setting
+ test_win_security_policy:
+ section: Privilege Rights
+ key: SeCreateTokenPrivilege
+ register: add_policy_setting_actual
+
+- name: assert add policy setting
+ assert:
+ that:
+ - add_policy_setting_actual.value == '*S-1-5-32-546'
+
+- name: remove policy setting
+ win_security_policy:
+ section: Privilege Rights
+ key: SeCreateTokenPrivilege
+ value: ''
+ diff: yes
+ register: remove_policy_setting
+
+- name: get actual policy setting
+ test_win_security_policy:
+ section: Privilege Rights
+ key: SeCreateTokenPrivilege
+ register: remove_policy_setting_actual
+
+- name: assert remove policy setting
+ assert:
+ that:
+ - remove_policy_setting is changed
+ - remove_policy_setting.diff.prepared == "[Privilege Rights]\n-SeCreateTokenPrivilege = *S-1-5-32-546\n+SeCreateTokenPrivilege = "
+ - remove_policy_setting_actual.value is none
+
+- name: remove policy setting again
+ win_security_policy:
+ section: Privilege Rights
+ key: SeCreateTokenPrivilege
+ value: ''
+ register: remove_policy_setting_again
+
+- name: assert remove policy setting again
+ assert:
+ that:
+ - remove_policy_setting_again is not changed
+ - remove_policy_setting_again.value == ''
diff --git a/test/integration/targets/incidental_xml/aliases b/test/integration/targets/incidental_xml/aliases
new file mode 100644
index 00000000..fc0963c1
--- /dev/null
+++ b/test/integration/targets/incidental_xml/aliases
@@ -0,0 +1,4 @@
+destructive
+shippable/posix/incidental
+skip/aix
+skip/power/centos
diff --git a/test/integration/targets/incidental_xml/fixtures/ansible-xml-beers-unicode.xml b/test/integration/targets/incidental_xml/fixtures/ansible-xml-beers-unicode.xml
new file mode 100644
index 00000000..d0e3e39a
--- /dev/null
+++ b/test/integration/targets/incidental_xml/fixtures/ansible-xml-beers-unicode.xml
@@ -0,0 +1,13 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>ТолÑтый бар</name>
+ <beers>
+ <beer>ОкÑкое</beer>
+ <beer>ÐевÑкое</beer>
+ </beers>
+ <rating subjective="да">деÑÑÑ‚ÑŒ</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tolstyybar.com</address>
+ </website>
+</business>
diff --git a/test/integration/targets/incidental_xml/fixtures/ansible-xml-beers.xml b/test/integration/targets/incidental_xml/fixtures/ansible-xml-beers.xml
new file mode 100644
index 00000000..5afc7974
--- /dev/null
+++ b/test/integration/targets/incidental_xml/fixtures/ansible-xml-beers.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/fixtures/ansible-xml-namespaced-beers.xml b/test/integration/targets/incidental_xml/fixtures/ansible-xml-namespaced-beers.xml
new file mode 100644
index 00000000..61747d4b
--- /dev/null
+++ b/test/integration/targets/incidental_xml/fixtures/ansible-xml-namespaced-beers.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business xmlns="http://test.business" xmlns:attr="http://test.attribute" type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers xmlns="http://test.beers">
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating xmlns="http://test.rating" attr:subjective="true">10</rating>
+ <website xmlns="http://test.website">
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-add-children-elements-unicode.xml b/test/integration/targets/incidental_xml/results/test-add-children-elements-unicode.xml
new file mode 100644
index 00000000..525330c2
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-add-children-elements-unicode.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer>ОкÑкое</beer></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-add-children-elements.xml b/test/integration/targets/incidental_xml/results/test-add-children-elements.xml
new file mode 100644
index 00000000..f9ff2517
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-add-children-elements.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer>Old Rasputin</beer></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-add-children-from-groupvars.xml b/test/integration/targets/incidental_xml/results/test-add-children-from-groupvars.xml
new file mode 100644
index 00000000..565ba402
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-add-children-from-groupvars.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer>Natty Lite</beer><beer>Miller Lite</beer><beer>Coors Lite</beer></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-add-children-insertafter.xml b/test/integration/targets/incidental_xml/results/test-add-children-insertafter.xml
new file mode 100644
index 00000000..8da96336
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-add-children-insertafter.xml
@@ -0,0 +1,17 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Old Rasputin</beer>
+ <beer>Old Motor Oil</beer>
+ <beer>Old Curmudgeon</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/test/integration/targets/incidental_xml/results/test-add-children-insertbefore.xml b/test/integration/targets/incidental_xml/results/test-add-children-insertbefore.xml
new file mode 100644
index 00000000..c409e54b
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-add-children-insertbefore.xml
@@ -0,0 +1,17 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>Old Rasputin</beer>
+ <beer>Old Motor Oil</beer>
+ <beer>Old Curmudgeon</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/test/integration/targets/incidental_xml/results/test-add-children-with-attributes-unicode.xml b/test/integration/targets/incidental_xml/results/test-add-children-with-attributes-unicode.xml
new file mode 100644
index 00000000..37465224
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-add-children-with-attributes-unicode.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer name="ОкÑкое" type="ÑкÑтра"/></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-add-children-with-attributes.xml b/test/integration/targets/incidental_xml/results/test-add-children-with-attributes.xml
new file mode 100644
index 00000000..5a3907f6
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-add-children-with-attributes.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer name="Ansible Brew" type="light"/></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-add-element-implicitly.yml b/test/integration/targets/incidental_xml/results/test-add-element-implicitly.yml
new file mode 100644
index 00000000..fa1ddfca
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-add-element-implicitly.yml
@@ -0,0 +1,32 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer color="red">George Killian's Irish Red</beer>
+ <beer origin="CZ" color="blonde">Pilsner Urquell</beer>
+ </beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ <validxhtml validateon=""/>
+ </website>
+ <phonenumber>555-555-1234</phonenumber>
+ <owner dob="1976-04-12">
+ <name>
+ <last>Smith</last>
+ <first>John</first>
+ <middle>Q</middle>
+ </name>
+ </owner>
+ <website_bis>
+ <validxhtml validateon=""/>
+ </website_bis>
+ <testnormalelement>xml tag with no special characters</testnormalelement>
+ <test-with-dash>xml tag with dashes</test-with-dash>
+ <test-with-dash.and.dot>xml tag with dashes and dots</test-with-dash.and.dot>
+ <test-with.dash_and.dot_and-underscores>xml tag with dashes, dots and underscores</test-with.dash_and.dot_and-underscores>
+</business>
diff --git a/test/integration/targets/incidental_xml/results/test-add-namespaced-children-elements.xml b/test/integration/targets/incidental_xml/results/test-add-namespaced-children-elements.xml
new file mode 100644
index 00000000..3d27e8aa
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-add-namespaced-children-elements.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business xmlns="http://test.business" xmlns:attr="http://test.attribute" type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers xmlns="http://test.beers">
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer>Old Rasputin</beer></beers>
+ <rating xmlns="http://test.rating" attr:subjective="true">10</rating>
+ <website xmlns="http://test.website">
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-pretty-print-only.xml b/test/integration/targets/incidental_xml/results/test-pretty-print-only.xml
new file mode 100644
index 00000000..f47909ac
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-pretty-print-only.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/test/integration/targets/incidental_xml/results/test-pretty-print.xml b/test/integration/targets/incidental_xml/results/test-pretty-print.xml
new file mode 100644
index 00000000..b5c38262
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-pretty-print.xml
@@ -0,0 +1,15 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer>Old Rasputin</beer>
+ </beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/test/integration/targets/incidental_xml/results/test-remove-attribute.xml b/test/integration/targets/incidental_xml/results/test-remove-attribute.xml
new file mode 100644
index 00000000..8a621cf1
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-remove-attribute.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating>10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-remove-element.xml b/test/integration/targets/incidental_xml/results/test-remove-element.xml
new file mode 100644
index 00000000..454d905c
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-remove-element.xml
@@ -0,0 +1,13 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-remove-namespaced-attribute.xml b/test/integration/targets/incidental_xml/results/test-remove-namespaced-attribute.xml
new file mode 100644
index 00000000..732a0ed2
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-remove-namespaced-attribute.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business xmlns="http://test.business" xmlns:attr="http://test.attribute" type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers xmlns="http://test.beers">
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating xmlns="http://test.rating">10</rating>
+ <website xmlns="http://test.website">
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-remove-namespaced-element.xml b/test/integration/targets/incidental_xml/results/test-remove-namespaced-element.xml
new file mode 100644
index 00000000..16df98e2
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-remove-namespaced-element.xml
@@ -0,0 +1,13 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business xmlns="http://test.business" xmlns:attr="http://test.attribute" type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers xmlns="http://test.beers">
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <website xmlns="http://test.website">
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-set-attribute-value-unicode.xml b/test/integration/targets/incidental_xml/results/test-set-attribute-value-unicode.xml
new file mode 100644
index 00000000..de3bc3f6
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-set-attribute-value-unicode.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="нет">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-set-attribute-value.xml b/test/integration/targets/incidental_xml/results/test-set-attribute-value.xml
new file mode 100644
index 00000000..143fe7bf
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-set-attribute-value.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="false">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-set-children-elements-level.xml b/test/integration/targets/incidental_xml/results/test-set-children-elements-level.xml
new file mode 100644
index 00000000..0ef2b7e6
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-set-children-elements-level.xml
@@ -0,0 +1,11 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer alcohol="0.5" name="90 Minute IPA"><Water liter="0.2" quantity="200g"/><Starch quantity="10g"/><Hops quantity="50g"/><Yeast quantity="20g"/></beer><beer alcohol="0.3" name="Harvest Pumpkin Ale"><Water liter="0.2" quantity="200g"/><Hops quantity="25g"/><Yeast quantity="20g"/></beer></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-set-children-elements-unicode.xml b/test/integration/targets/incidental_xml/results/test-set-children-elements-unicode.xml
new file mode 100644
index 00000000..f19d5356
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-set-children-elements-unicode.xml
@@ -0,0 +1,11 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>ОкÑкое</beer><beer>ÐевÑкое</beer></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-set-children-elements.xml b/test/integration/targets/incidental_xml/results/test-set-children-elements.xml
new file mode 100644
index 00000000..be313a5a
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-set-children-elements.xml
@@ -0,0 +1,11 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>90 Minute IPA</beer><beer>Harvest Pumpkin Ale</beer></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-set-element-value-empty.xml b/test/integration/targets/incidental_xml/results/test-set-element-value-empty.xml
new file mode 100644
index 00000000..785beb64
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-set-element-value-empty.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address></address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-set-element-value-unicode.xml b/test/integration/targets/incidental_xml/results/test-set-element-value-unicode.xml
new file mode 100644
index 00000000..734fe6db
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-set-element-value-unicode.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="true">пÑÑ‚ÑŒ</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+<rating>пÑÑ‚ÑŒ</rating></business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-set-element-value.xml b/test/integration/targets/incidental_xml/results/test-set-element-value.xml
new file mode 100644
index 00000000..fc97ec3b
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-set-element-value.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="true">5</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+<rating>5</rating></business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-set-namespaced-attribute-value.xml b/test/integration/targets/incidental_xml/results/test-set-namespaced-attribute-value.xml
new file mode 100644
index 00000000..44abda43
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-set-namespaced-attribute-value.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business xmlns="http://test.business" xmlns:attr="http://test.attribute" type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers xmlns="http://test.beers">
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating xmlns="http://test.rating" attr:subjective="false">10</rating>
+ <website xmlns="http://test.website">
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/results/test-set-namespaced-element-value.xml b/test/integration/targets/incidental_xml/results/test-set-namespaced-element-value.xml
new file mode 100644
index 00000000..0cc8a79e
--- /dev/null
+++ b/test/integration/targets/incidental_xml/results/test-set-namespaced-element-value.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business xmlns="http://test.business" xmlns:attr="http://test.attribute" type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers xmlns="http://test.beers">
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating xmlns="http://test.rating" attr:subjective="true">11</rating>
+ <website xmlns="http://test.website">
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/test/integration/targets/incidental_xml/tasks/main.yml b/test/integration/targets/incidental_xml/tasks/main.yml
new file mode 100644
index 00000000..9b8f2c36
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/main.yml
@@ -0,0 +1,67 @@
+- name: Gather facts
+ setup:
+
+- name: Install lxml (FreeBSD)
+ package:
+ name: '{{ "py27-lxml" if ansible_python.version.major == 2 else "py36-lxml" }}'
+ state: present
+ when: ansible_os_family == "FreeBSD"
+
+# Needed for MacOSX !
+- name: Install lxml
+ pip:
+ name: lxml
+ state: present
+# when: ansible_os_family == "Darwin"
+
+- name: Get lxml version
+ command: "{{ ansible_python_interpreter }} -c 'from lxml import etree; print(\".\".join(str(v) for v in etree.LXML_VERSION))'"
+ register: lxml_version
+
+- name: Set lxml capabilities as variables
+ set_fact:
+ # NOTE: Some tests require predictable element attribute order,
+ # which is only guaranteed starting from lxml v3.0alpha1
+ lxml_predictable_attribute_order: '{{ lxml_version.stdout is version("3", ">=") }}'
+
+ # NOTE: The xml module requires at least lxml v2.3.0
+ lxml_xpath_attribute_result_attrname: '{{ lxml_version.stdout is version("2.3.0", ">=") }}'
+
+- name: Only run the tests when lxml v2.3.0+
+ when: lxml_xpath_attribute_result_attrname
+ block:
+
+ - include_tasks: test-add-children-elements.yml
+ - include_tasks: test-add-children-from-groupvars.yml
+ - include_tasks: test-add-children-insertafter.yml
+ - include_tasks: test-add-children-insertbefore.yml
+ - include_tasks: test-add-children-with-attributes.yml
+ - include_tasks: test-add-element-implicitly.yml
+ - include_tasks: test-count.yml
+ - include_tasks: test-mutually-exclusive-attributes.yml
+ - include_tasks: test-remove-attribute.yml
+ - include_tasks: test-remove-element.yml
+ - include_tasks: test-set-attribute-value.yml
+ - include_tasks: test-set-children-elements.yml
+ - include_tasks: test-set-children-elements-level.yml
+ - include_tasks: test-set-element-value.yml
+ - include_tasks: test-set-element-value-empty.yml
+ - include_tasks: test-pretty-print.yml
+ - include_tasks: test-pretty-print-only.yml
+ - include_tasks: test-add-namespaced-children-elements.yml
+ - include_tasks: test-remove-namespaced-attribute.yml
+ - include_tasks: test-set-namespaced-attribute-value.yml
+ - include_tasks: test-set-namespaced-element-value.yml
+ - include_tasks: test-set-namespaced-children-elements.yml
+ - include_tasks: test-get-element-content.yml
+ - include_tasks: test-xmlstring.yml
+ - include_tasks: test-children-elements-xml.yml
+
+ # Unicode tests
+ - include_tasks: test-add-children-elements-unicode.yml
+ - include_tasks: test-add-children-with-attributes-unicode.yml
+ - include_tasks: test-set-attribute-value-unicode.yml
+ - include_tasks: test-count-unicode.yml
+ - include_tasks: test-get-element-content.yml
+ - include_tasks: test-set-children-elements-unicode.yml
+ - include_tasks: test-set-element-value-unicode.yml
diff --git a/test/integration/targets/incidental_xml/tasks/test-add-children-elements-unicode.yml b/test/integration/targets/incidental_xml/tasks/test-add-children-elements-unicode.yml
new file mode 100644
index 00000000..8ad91501
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-add-children-elements-unicode.yml
@@ -0,0 +1,29 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ add_children:
+ - beer: ОкÑкое
+ register: add_children_elements_unicode
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-elements-unicode.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - add_children_elements_unicode.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-add-children-elements-unicode.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-add-children-elements.yml b/test/integration/targets/incidental_xml/tasks/test-add-children-elements.yml
new file mode 100644
index 00000000..8d9b0686
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-add-children-elements.yml
@@ -0,0 +1,29 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ add_children:
+ - beer: Old Rasputin
+ register: add_children_elements
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-elements.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - add_children_elements.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-add-children-elements.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-add-children-from-groupvars.yml b/test/integration/targets/incidental_xml/tasks/test-add-children-from-groupvars.yml
new file mode 100644
index 00000000..e062de8d
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-add-children-from-groupvars.yml
@@ -0,0 +1,28 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ add_children: '{{ bad_beers }}'
+ register: add_children_from_groupvars
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-from-groupvars.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - add_children_from_groupvars.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-add-children-from-groupvars.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-add-children-insertafter.yml b/test/integration/targets/incidental_xml/tasks/test-add-children-insertafter.yml
new file mode 100644
index 00000000..2d42e2d5
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-add-children-insertafter.yml
@@ -0,0 +1,32 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: '/business/beers/beer[text()="St. Bernardus Abbot 12"]'
+ insertafter: yes
+ add_children:
+ - beer: Old Rasputin
+ - beer: Old Motor Oil
+ - beer: Old Curmudgeon
+ pretty_print: yes
+ register: add_children_insertafter
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-insertafter.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - add_children_insertafter.changed == true
+ - comparison.changed == false # identical
diff --git a/test/integration/targets/incidental_xml/tasks/test-add-children-insertbefore.yml b/test/integration/targets/incidental_xml/tasks/test-add-children-insertbefore.yml
new file mode 100644
index 00000000..8550f12c
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-add-children-insertbefore.yml
@@ -0,0 +1,32 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: '/business/beers/beer[text()="St. Bernardus Abbot 12"]'
+ insertbefore: yes
+ add_children:
+ - beer: Old Rasputin
+ - beer: Old Motor Oil
+ - beer: Old Curmudgeon
+ pretty_print: yes
+ register: add_children_insertbefore
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-insertbefore.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - add_children_insertbefore.changed == true
+ - comparison.changed == false # identical
diff --git a/test/integration/targets/incidental_xml/tasks/test-add-children-with-attributes-unicode.yml b/test/integration/targets/incidental_xml/tasks/test-add-children-with-attributes-unicode.yml
new file mode 100644
index 00000000..d4a2329f
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-add-children-with-attributes-unicode.yml
@@ -0,0 +1,31 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ add_children:
+ - beer:
+ name: ОкÑкое
+ type: ÑкÑтра
+ register: add_children_with_attributes_unicode
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-with-attributes-unicode.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - add_children_with_attributes_unicode.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-add-children-with-attributes-unicode.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-add-children-with-attributes.yml b/test/integration/targets/incidental_xml/tasks/test-add-children-with-attributes.yml
new file mode 100644
index 00000000..91e92637
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-add-children-with-attributes.yml
@@ -0,0 +1,35 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ add_children:
+ - beer:
+ name: Ansible Brew
+ type: light
+ register: add_children_with_attributes
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-with-attributes.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ # NOTE: This test may fail if lxml does not support predictable element attribute order
+ # So we filter the failure out for these platforms (e.g. CentOS 6)
+ # The module still works fine, we simply are not comparing as smart as we should.
+ - name: Test expected result
+ assert:
+ that:
+ - add_children_with_attributes.changed == true
+ - comparison.changed == false # identical
+ when: lxml_predictable_attribute_order
+ #command: diff -u {{ role_path }}/results/test-add-children-with-attributes.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-add-element-implicitly.yml b/test/integration/targets/incidental_xml/tasks/test-add-element-implicitly.yml
new file mode 100644
index 00000000..db674ba4
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-add-element-implicitly.yml
@@ -0,0 +1,237 @@
+---
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers-implicit.xml
+
+
+- name: Add a phonenumber element to the business element. Implicit mkdir -p behavior where applicable
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/phonenumber
+ value: 555-555-1234
+
+- name: Add a owner element to the business element, testing implicit mkdir -p behavior 1/2
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/owner/name/last
+ value: Smith
+
+- name: Add a owner element to the business element, testing implicit mkdir -p behavior 2/2
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/owner/name/first
+ value: John
+
+- name: Add a validxhtml element to the website element. Note that ensure is present by default and while value defaults to null for elements, if one doesn't specify it we don't know what to do.
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/website/validxhtml
+
+- name: Add an empty validateon attribute to the validxhtml element. This actually makes the previous example redundant because of the implicit parent-node creation behavior.
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/website/validxhtml/@validateon
+
+- name: Add an empty validateon attribute to the validxhtml element. Actually verifies the implicit parent-node creation behavior.
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/website_bis/validxhtml/@validateon
+
+- name: Add an attribute with a value
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/owner/@dob='1976-04-12'
+
+- name: Add an element with a value, alternate syntax
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/beers/beer/text()="George Killian's Irish Red" # note the quote within an XPath string thing
+
+- name: Add an element without special characters
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/testnormalelement
+ value: xml tag with no special characters
+ pretty_print: yes
+
+- name: Add an element with dash
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/test-with-dash
+ value: xml tag with dashes
+ pretty_print: yes
+
+- name: Add an element with dot
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/test-with-dash.and.dot
+ value: xml tag with dashes and dots
+ pretty_print: yes
+
+- name: Add an element with underscore
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/test-with.dash_and.dot_and-underscores
+ value: xml tag with dashes, dots and underscores
+ pretty_print: yes
+
+- name: Add an attribute on a conditional element
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/beers/beer[text()="George Killian's Irish Red"]/@color='red'
+
+- name: Add two attributes on a conditional element
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/beers/beer[text()="Pilsner Urquell" and @origin='CZ']/@color='blonde'
+
+- name: Add a owner element to the business element, testing implicit mkdir -p behavior 3/2 -- complex lookup
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/owner/name[first/text()='John']/middle
+ value: Q
+
+- name: Pretty Print this!
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ pretty_print: yes
+
+- name: Compare to expected result
+ copy:
+ src: results/test-add-element-implicitly.yml
+ dest: /tmp/ansible-xml-beers-implicit.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+- name: Test expected result
+ assert:
+ that:
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-add-element-implicitly.yml /tmp/ansible-xml-beers-implicit.xml
+
+
+# Now we repeat the same, just to ensure proper use of namespaces
+- name: Add a phonenumber element to the business element. Implicit mkdir -p behavior where applicable
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:phonenumber
+ value: 555-555-1234
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add a owner element to the business element, testing implicit mkdir -p behavior 1/2
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:owner/a:name/a:last
+ value: Smith
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add a owner element to the business element, testing implicit mkdir -p behavior 2/2
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:owner/a:name/a:first
+ value: John
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add a validxhtml element to the website element. Note that ensure is present by default and while value defaults to null for elements, if one doesn't specify it we don't know what to do.
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:website/a:validxhtml
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an empty validateon attribute to the validxhtml element. This actually makes the previous example redundant because of the implicit parent-node creation behavior.
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:website/a:validxhtml/@a:validateon
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an empty validateon attribute to the validxhtml element. Actually verifies the implicit parent-node creation behavior.
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:website_bis/a:validxhtml/@a:validateon
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an attribute with a value
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:owner/@a:dob='1976-04-12'
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an element with a value, alternate syntax
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:beers/a:beer/text()="George Killian's Irish Red" # note the quote within an XPath string thing
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an attribute on a conditional element
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:beers/a:beer[text()="George Killian's Irish Red"]/@a:color='red'
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add two attributes on a conditional element
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:beers/a:beer[text()="Pilsner Urquell" and @a:origin='CZ']/@a:color='blonde'
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add a owner element to the business element, testing implicit mkdir -p behavior 3/2 -- complex lookup
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:owner/a:name[a:first/text()='John']/a:middle
+ value: Q
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an element without special characters
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/testnormalelement
+ value: xml tag with no special characters
+ pretty_print: yes
+ namespaces:
+ a: http://example.com/some/namespace
+
+
+- name: Add an element with dash
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/test-with-dash
+ value: xml tag with dashes
+ pretty_print: yes
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an element with dot
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/test-with-dash.and.dot
+ value: xml tag with dashes and dots
+ pretty_print: yes
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an element with underscore
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/test-with.dash_and.dot_and-underscores
+ value: xml tag with dashes, dots and underscores
+ pretty_print: yes
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Pretty Print this!
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ pretty_print: yes
diff --git a/test/integration/targets/incidental_xml/tasks/test-add-namespaced-children-elements.yml b/test/integration/targets/incidental_xml/tasks/test-add-namespaced-children-elements.yml
new file mode 100644
index 00000000..25eca47f
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-add-namespaced-children-elements.yml
@@ -0,0 +1,32 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+
+
+ - name: Add namespaced child element
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ xpath: /bus:business/ber:beers
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ add_children:
+ - beer: Old Rasputin
+ register: add_namespaced_children_elements
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-namespaced-children-elements.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - add_namespaced_children_elements.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-add-namespaced-children-elements.xml /tmp/ansible-xml-namespaced-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-children-elements-xml.yml b/test/integration/targets/incidental_xml/tasks/test-children-elements-xml.yml
new file mode 100644
index 00000000..e63100c4
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-children-elements-xml.yml
@@ -0,0 +1,30 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element with xml format
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ input_type: xml
+ add_children:
+ - '<beer>Old Rasputin</beer>'
+ register: children_elements
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-elements.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - children_elements.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-add-children-elements.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-count-unicode.yml b/test/integration/targets/incidental_xml/tasks/test-count-unicode.yml
new file mode 100644
index 00000000..47a806bf
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-count-unicode.yml
@@ -0,0 +1,19 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers-unicode.xml
+ dest: /tmp/ansible-xml-beers-unicode.xml
+
+
+ - name: Count child element
+ xml:
+ path: /tmp/ansible-xml-beers-unicode.xml
+ xpath: /business/beers/beer
+ count: yes
+ register: beers
+
+ - name: Test expected result
+ assert:
+ that:
+ - beers.changed == false
+ - beers.count == 2
diff --git a/test/integration/targets/incidental_xml/tasks/test-count.yml b/test/integration/targets/incidental_xml/tasks/test-count.yml
new file mode 100644
index 00000000..cbc97e32
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-count.yml
@@ -0,0 +1,19 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers/beer
+ count: yes
+ register: beers
+
+ - name: Test expected result
+ assert:
+ that:
+ - beers.changed == false
+ - beers.count == 3
diff --git a/test/integration/targets/incidental_xml/tasks/test-get-element-content-unicode.yml b/test/integration/targets/incidental_xml/tasks/test-get-element-content-unicode.yml
new file mode 100644
index 00000000..73ae9667
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-get-element-content-unicode.yml
@@ -0,0 +1,32 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers-unicode.xml
+ dest: /tmp/ansible-xml-beers-unicode.xml
+
+
+ - name: Get element attributes
+ xml:
+ path: /tmp/ansible-xml-beers-unicode.xml
+ xpath: /business/rating
+ content: attribute
+ register: get_element_attribute
+
+ - name: Test expected result
+ assert:
+ that:
+ - get_element_attribute.changed == false
+ - get_element_attribute.matches[0]['rating'] is defined and get_element_attribute.matches[0]['rating']['subjective'] == 'да'
+
+ - name: Get element text
+ xml:
+ path: /tmp/ansible-xml-beers-unicode.xml
+ xpath: /business/rating
+ content: text
+ register: get_element_text
+
+ - name: Test expected result
+ assert:
+ that:
+ - get_element_text.changed == false
+ - get_element_text.matches[0]['rating'] == 'деÑÑÑ‚ÑŒ'
diff --git a/test/integration/targets/incidental_xml/tasks/test-get-element-content.yml b/test/integration/targets/incidental_xml/tasks/test-get-element-content.yml
new file mode 100644
index 00000000..58ca7767
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-get-element-content.yml
@@ -0,0 +1,52 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Get element attributes
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ content: attribute
+ register: get_element_attribute
+
+ - name: Test expected result
+ assert:
+ that:
+ - get_element_attribute.changed == false
+ - get_element_attribute.matches[0]['rating'] is defined
+ - get_element_attribute.matches[0]['rating']['subjective'] == 'true'
+
+ # TODO: Remove this in Ansible v2.12 when this incorrect use of attribute is deprecated
+ - name: Get element attributes
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ content: attribute
+ attribute: subjective
+ register: get_element_attribute_wrong
+
+ - name: Test expected result
+ assert:
+ that:
+ - get_element_attribute_wrong.changed == false
+ - get_element_attribute_wrong.matches[0]['rating'] is defined
+ - get_element_attribute_wrong.matches[0]['rating']['subjective'] == 'true'
+ - get_element_attribute_wrong.deprecations is defined
+ - get_element_attribute_wrong.deprecations[0].msg == "Parameter 'attribute=subjective' is ignored when using 'content=attribute' only 'xpath' is used. Please remove entry."
+ - get_element_attribute_wrong.deprecations[0].version == '2.12'
+
+ - name: Get element text
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ content: text
+ register: get_element_text
+
+ - name: Test expected result
+ assert:
+ that:
+ - get_element_text.changed == false
+ - get_element_text.matches[0]['rating'] == '10'
diff --git a/test/integration/targets/incidental_xml/tasks/test-mutually-exclusive-attributes.yml b/test/integration/targets/incidental_xml/tasks/test-mutually-exclusive-attributes.yml
new file mode 100644
index 00000000..3f24b0ac
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-mutually-exclusive-attributes.yml
@@ -0,0 +1,22 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Specify both children to add and a value
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ add_children:
+ - child01
+ - child02
+ value: conflict!
+ register: module_output
+ ignore_errors: yes
+
+ - name: Test expected result
+ assert:
+ that:
+ - module_output.changed == false
+ - module_output.failed == true
diff --git a/test/integration/targets/incidental_xml/tasks/test-pretty-print-only.yml b/test/integration/targets/incidental_xml/tasks/test-pretty-print-only.yml
new file mode 100644
index 00000000..7c0f7d5f
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-pretty-print-only.yml
@@ -0,0 +1,29 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml.orig
+
+ - name: Remove spaces from test fixture
+ shell: sed 's/^[ ]*//g' < /tmp/ansible-xml-beers.xml.orig > /tmp/ansible-xml-beers.xml
+
+ - name: Pretty print without modification
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ pretty_print: yes
+ register: pretty_print_only
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-pretty-print-only.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - pretty_print_only.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-pretty-print.yml b/test/integration/targets/incidental_xml/tasks/test-pretty-print.yml
new file mode 100644
index 00000000..88b618b2
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-pretty-print.yml
@@ -0,0 +1,30 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Pretty print
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ pretty_print: yes
+ add_children:
+ - beer: Old Rasputin
+ register: pretty_print
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-pretty-print.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - pretty_print.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-pretty-print.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-remove-attribute.yml b/test/integration/targets/incidental_xml/tasks/test-remove-attribute.yml
new file mode 100644
index 00000000..9aa395e6
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-remove-attribute.yml
@@ -0,0 +1,28 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Remove '/business/rating/@subjective'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating/@subjective
+ state: absent
+ register: remove_attribute
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-remove-attribute.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - remove_attribute.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-remove-attribute.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-remove-element.yml b/test/integration/targets/incidental_xml/tasks/test-remove-element.yml
new file mode 100644
index 00000000..f2e20ea2
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-remove-element.yml
@@ -0,0 +1,28 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Remove '/business/rating'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ state: absent
+ register: remove_element
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-remove-element.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - remove_element.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-remove-namespaced-attribute.yml b/test/integration/targets/incidental_xml/tasks/test-remove-namespaced-attribute.yml
new file mode 100644
index 00000000..36682b22
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-remove-namespaced-attribute.yml
@@ -0,0 +1,33 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+
+
+ - name: Remove namespaced '/bus:business/rat:rating/@attr:subjective'
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ xpath: /bus:business/rat:rating/@attr:subjective
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ rat: http://test.rating
+ attr: http://test.attribute
+ state: absent
+ register: remove_namespaced_attribute
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-remove-namespaced-attribute.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - remove_element.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-remove-namespaced-attribute.xml /tmp/ansible-xml-namespaced-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-remove-namespaced-element.yml b/test/integration/targets/incidental_xml/tasks/test-remove-namespaced-element.yml
new file mode 100644
index 00000000..be78af68
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-remove-namespaced-element.yml
@@ -0,0 +1,33 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+
+
+ - name: Remove namespaced '/bus:business/rat:rating'
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ xpath: /bus:business/rat:rating
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ rat: http://test.rating
+ attr: http://test.attribute
+ state: absent
+ register: remove_namespaced_element
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-remove-element.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - remove_namespaced_element.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-namespaced-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-set-attribute-value-unicode.yml b/test/integration/targets/incidental_xml/tasks/test-set-attribute-value-unicode.yml
new file mode 100644
index 00000000..dabf72a1
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-set-attribute-value-unicode.yml
@@ -0,0 +1,29 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Set '/business/rating/@subjective' to 'нет'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ attribute: subjective
+ value: нет
+ register: set_attribute_value_unicode
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-attribute-value-unicode.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_attribute_value_unicode.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-attribute-value-unicode.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-set-attribute-value.yml b/test/integration/targets/incidental_xml/tasks/test-set-attribute-value.yml
new file mode 100644
index 00000000..2aa39fe2
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-set-attribute-value.yml
@@ -0,0 +1,29 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Set '/business/rating/@subjective' to 'false'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ attribute: subjective
+ value: 'false'
+ register: set_attribute_value
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-attribute-value.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_attribute_value.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-attribute-value.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-set-children-elements-level.yml b/test/integration/targets/incidental_xml/tasks/test-set-children-elements-level.yml
new file mode 100644
index 00000000..3e2c0adb
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-set-children-elements-level.yml
@@ -0,0 +1,74 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Set child elements
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ set_children: &children
+ - beer:
+ alcohol: "0.5"
+ name: 90 Minute IPA
+ _:
+ - Water:
+ liter: "0.2"
+ quantity: 200g
+ - Starch:
+ quantity: 10g
+ - Hops:
+ quantity: 50g
+ - Yeast:
+ quantity: 20g
+ - beer:
+ alcohol: "0.3"
+ name: Harvest Pumpkin Ale
+ _:
+ - Water:
+ liter: "0.2"
+ quantity: 200g
+ - Hops:
+ quantity: 25g
+ - Yeast:
+ quantity: 20g
+ register: set_children_elements_level
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements-level.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_children_elements_level.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-children-elements-level.xml /tmp/ansible-xml-beers.xml
+
+
+ - name: Set child elements (again)
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ set_children: *children
+ register: set_children_again
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements-level.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_children_again.changed == false
+ - comparison.changed == false # identical
diff --git a/test/integration/targets/incidental_xml/tasks/test-set-children-elements-unicode.yml b/test/integration/targets/incidental_xml/tasks/test-set-children-elements-unicode.yml
new file mode 100644
index 00000000..240b894a
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-set-children-elements-unicode.yml
@@ -0,0 +1,46 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Set child elements
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ set_children: &children
+ - beer: ОкÑкое
+ - beer: ÐевÑкое
+ register: set_children_elements_unicode
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements-unicode.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_children_elements_unicode.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-children-elements-unicode.xml /tmp/ansible-xml-beers.xml
+
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements-unicode.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_children_again.changed == false
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-children-elements-unicode.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-set-children-elements.yml b/test/integration/targets/incidental_xml/tasks/test-set-children-elements.yml
new file mode 100644
index 00000000..7b0f3247
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-set-children-elements.yml
@@ -0,0 +1,53 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Set child elements
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ set_children: &children
+ - beer: 90 Minute IPA
+ - beer: Harvest Pumpkin Ale
+ register: set_children_elements
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_children_elements.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-children-elements.xml /tmp/ansible-xml-beers.xml
+
+
+ - name: Set child elements (again)
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ set_children: *children
+ register: set_children_again
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_children_again.changed == false
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-children-elements.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-set-element-value-empty.yml b/test/integration/targets/incidental_xml/tasks/test-set-element-value-empty.yml
new file mode 100644
index 00000000..5814803c
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-set-element-value-empty.yml
@@ -0,0 +1,28 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Set '/business/website/address' to empty string.
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/website/address
+ value: ''
+ register: set_element_value_empty
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-element-value-empty.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_element_value_empty.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-element-value-empty.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-set-element-value-unicode.yml b/test/integration/targets/incidental_xml/tasks/test-set-element-value-unicode.yml
new file mode 100644
index 00000000..c3a40b7d
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-set-element-value-unicode.yml
@@ -0,0 +1,43 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add 2nd '/business/rating' with value 'пÑÑ‚ÑŒ'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business
+ add_children:
+ - rating: пÑÑ‚ÑŒ
+
+ - name: Set '/business/rating' to 'пÑÑ‚ÑŒ'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ value: пÑÑ‚ÑŒ
+ register: set_element_first_run
+
+ - name: Set '/business/rating' to 'false'... again
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ value: пÑÑ‚ÑŒ
+ register: set_element_second_run
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-element-value-unicode.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_element_first_run.changed == true
+ - set_element_second_run.changed == false
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-element-value-unicode.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-set-element-value.yml b/test/integration/targets/incidental_xml/tasks/test-set-element-value.yml
new file mode 100644
index 00000000..dbd070f1
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-set-element-value.yml
@@ -0,0 +1,43 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add 2nd '/business/rating' with value '5'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business
+ add_children:
+ - rating: '5'
+
+ - name: Set '/business/rating' to '5'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ value: '5'
+ register: set_element_first_run
+
+ - name: Set '/business/rating' to '5'... again
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ value: '5'
+ register: set_element_second_run
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-element-value.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_element_first_run.changed == true
+ - set_element_second_run.changed == false
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-element-value.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-set-namespaced-attribute-value.yml b/test/integration/targets/incidental_xml/tasks/test-set-namespaced-attribute-value.yml
new file mode 100644
index 00000000..e0086efe
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-set-namespaced-attribute-value.yml
@@ -0,0 +1,34 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+
+
+ - name: Set namespaced '/bus:business/rat:rating/@attr:subjective' to 'false'
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ xpath: /bus:business/rat:rating
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ rat: http://test.rating
+ attr: http://test.attribute
+ attribute: attr:subjective
+ value: 'false'
+ register: set_namespaced_attribute_value
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-namespaced-attribute-value.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_namespaced_attribute_value.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-namespaced-attribute-value.xml /tmp/ansible-xml-namespaced-beers.xml
diff --git a/test/integration/targets/incidental_xml/tasks/test-set-namespaced-children-elements.yml b/test/integration/targets/incidental_xml/tasks/test-set-namespaced-children-elements.yml
new file mode 100644
index 00000000..8e66e70e
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-set-namespaced-children-elements.yml
@@ -0,0 +1,57 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers-xml.xml
+
+ - name: Set child elements
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers-xml.xml
+ xpath: /bus:business/ber:beers
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ set_children:
+ - beer: 90 Minute IPA
+ - beer: Harvest Pumpkin Ale
+
+ - name: Copy state after first set_children
+ copy:
+ src: /tmp/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers-1.xml
+ remote_src: yes
+
+ - name: Set child elements again
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers-xml.xml
+ xpath: /bus:business/ber:beers
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ set_children:
+ - beer: 90 Minute IPA
+ - beer: Harvest Pumpkin Ale
+ register: set_children_again
+
+ - name: Copy state after second set_children
+ copy:
+ src: /tmp/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers-2.xml
+ remote_src: yes
+
+ - name: Compare to expected result
+ copy:
+ src: /tmp/ansible-xml-namespaced-beers-1.xml
+ dest: /tmp/ansible-xml-namespaced-beers-2.xml
+ remote_src: yes
+ check_mode: yes
+ diff: yes
+ register: comparison
+ #command: diff /tmp/ansible-xml-namespaced-beers-1.xml /tmp/ansible-xml-namespaced-beers-2.xml
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_children_again.changed == false # idempotency
+ - set_namespaced_attribute_value.changed == true
+ - comparison.changed == false # identical
diff --git a/test/integration/targets/incidental_xml/tasks/test-set-namespaced-element-value.yml b/test/integration/targets/incidental_xml/tasks/test-set-namespaced-element-value.yml
new file mode 100644
index 00000000..f77d7537
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-set-namespaced-element-value.yml
@@ -0,0 +1,46 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+
+
+ - name: Set namespaced '/bus:business/rat:rating' to '11'
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ rat: http://test.rating
+ attr: http://test.attribute
+ xpath: /bus:business/rat:rating
+ value: '11'
+ register: set_element_first_run
+
+ - name: Set namespaced '/bus:business/rat:rating' to '11' again
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ rat: http://test.rating
+ attr: http://test.attribute
+ xpath: /bus:business/rat:rating
+ value: '11'
+ register: set_element_second_run
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-namespaced-element-value.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+ #command: diff -u {{ role_path }}/results/test-set-namespaced-element-value.xml /tmp/ansible-xml-namespaced-beers.xml
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_element_first_run.changed == true
+ - set_element_second_run.changed == false
+ - comparison.changed == false # identical
diff --git a/test/integration/targets/incidental_xml/tasks/test-xmlstring.yml b/test/integration/targets/incidental_xml/tasks/test-xmlstring.yml
new file mode 100644
index 00000000..4620d984
--- /dev/null
+++ b/test/integration/targets/incidental_xml/tasks/test-xmlstring.yml
@@ -0,0 +1,81 @@
+---
+ - name: Copy expected results to remote
+ copy:
+ src: "results/{{ item }}"
+ dest: "/tmp/{{ item }}"
+ with_items:
+ - test-pretty-print.xml
+ - test-pretty-print-only.xml
+
+ # NOTE: Jinja2 templating eats trailing newlines
+ - name: Read from xmlstring (not using pretty_print)
+ xml:
+ xmlstring: "{{ lookup('file', '{{ role_path }}/fixtures/ansible-xml-beers.xml') }}"
+ xpath: .
+ register: xmlresponse
+
+ - name: Compare to expected result
+ copy:
+ content: "{{ xmlresponse.xmlstring }}\n"
+ dest: '/tmp/test-pretty-print-only.xml'
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - xmlresponse.changed == false
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml
+
+
+ # NOTE: Jinja2 templating eats trailing newlines
+ - name: Read from xmlstring (using pretty_print)
+ xml:
+ xmlstring: "{{ lookup('file', '{{ role_path }}/fixtures/ansible-xml-beers.xml') }}"
+ pretty_print: yes
+ register: xmlresponse
+
+ - name: Compare to expected result
+ copy:
+ content: '{{ xmlresponse.xmlstring }}'
+ dest: '/tmp/test-pretty-print-only.xml'
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ # FIXME: This change is related to the newline added by pretty_print
+ - name: Test expected result
+ assert:
+ that:
+ - xmlresponse.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml
+
+
+ # NOTE: Jinja2 templating eats trailing newlines
+ - name: Read from xmlstring
+ xml:
+ xmlstring: "{{ lookup('file', '{{ role_path }}/fixtures/ansible-xml-beers.xml') }}"
+ xpath: /business/beers
+ pretty_print: yes
+ add_children:
+ - beer: Old Rasputin
+ register: xmlresponse_modification
+
+ - name: Compare to expected result
+ copy:
+ content: '{{ xmlresponse_modification.xmlstring }}'
+ dest: '/tmp/test-pretty-print.xml'
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ # FIXME: This change is related to the newline added by pretty_print
+ - name: Test expected result
+ assert:
+ that:
+ - xmlresponse_modification.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-pretty-print.xml /tmp/ansible-xml-beers.xml
diff --git a/test/integration/targets/incidental_xml/vars/main.yml b/test/integration/targets/incidental_xml/vars/main.yml
new file mode 100644
index 00000000..7c5675bd
--- /dev/null
+++ b/test/integration/targets/incidental_xml/vars/main.yml
@@ -0,0 +1,6 @@
+# -*- mode: yaml -*
+---
+bad_beers:
+- beer: "Natty Lite"
+- beer: "Miller Lite"
+- beer: "Coors Lite"
diff --git a/test/integration/targets/include_import/aliases b/test/integration/targets/include_import/aliases
new file mode 100644
index 00000000..fff62d9f
--- /dev/null
+++ b/test/integration/targets/include_import/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group5
+skip/aix
diff --git a/test/integration/targets/include_import/apply/import_apply.yml b/test/integration/targets/include_import/apply/import_apply.yml
new file mode 100644
index 00000000..27a40861
--- /dev/null
+++ b/test/integration/targets/include_import/apply/import_apply.yml
@@ -0,0 +1,31 @@
+---
+- hosts: testhost
+ gather_facts: false
+ tasks:
+ - import_tasks:
+ file: import_tasks.yml
+ apply:
+ tags:
+ - foo
+ tags:
+ - always
+
+ - assert:
+ that:
+ - include_tasks_result is defined
+ tags:
+ - always
+
+ - import_role:
+ name: import_role
+ apply:
+ tags:
+ - foo
+ tags:
+ - always
+
+ - assert:
+ that:
+ - include_role_result is defined
+ tags:
+ - always
diff --git a/test/integration/targets/include_import/apply/include_apply.yml b/test/integration/targets/include_import/apply/include_apply.yml
new file mode 100644
index 00000000..32c6e5e9
--- /dev/null
+++ b/test/integration/targets/include_import/apply/include_apply.yml
@@ -0,0 +1,50 @@
+---
+- hosts: testhost
+ gather_facts: false
+ tasks:
+ - include_tasks:
+ file: include_tasks.yml
+ apply:
+ tags:
+ - foo
+ tags:
+ - always
+
+ - assert:
+ that:
+ - include_tasks_result is defined
+ tags:
+ - always
+
+ - include_role:
+ name: include_role
+ apply:
+ tags:
+ - foo
+ tags:
+ - always
+
+ - assert:
+ that:
+ - include_role_result is defined
+ tags:
+ - always
+
+ - include_role:
+ name: include_role2
+ apply:
+ tags:
+ - foo
+ tags:
+ - not_specified_on_purpose
+
+ - assert:
+ that:
+ - include_role2_result is undefined
+ tags:
+ - always
+
+ - include_role:
+ name: include_role
+ apply:
+ delegate_to: testhost2
diff --git a/test/integration/targets/include_import/apply/include_tasks.yml b/test/integration/targets/include_import/apply/include_tasks.yml
new file mode 100644
index 00000000..be511d1e
--- /dev/null
+++ b/test/integration/targets/include_import/apply/include_tasks.yml
@@ -0,0 +1,2 @@
+- set_fact:
+ include_tasks_result: true
diff --git a/test/integration/targets/include_import/apply/roles/include_role/tasks/main.yml b/test/integration/targets/include_import/apply/roles/include_role/tasks/main.yml
new file mode 100644
index 00000000..7f86b264
--- /dev/null
+++ b/test/integration/targets/include_import/apply/roles/include_role/tasks/main.yml
@@ -0,0 +1,2 @@
+- set_fact:
+ include_role_result: true
diff --git a/test/integration/targets/include_import/apply/roles/include_role2/tasks/main.yml b/test/integration/targets/include_import/apply/roles/include_role2/tasks/main.yml
new file mode 100644
index 00000000..028c30d5
--- /dev/null
+++ b/test/integration/targets/include_import/apply/roles/include_role2/tasks/main.yml
@@ -0,0 +1,2 @@
+- set_fact:
+ include_role2_result: true
diff --git a/test/integration/targets/include_import/empty_group_warning/playbook.yml b/test/integration/targets/include_import/empty_group_warning/playbook.yml
new file mode 100644
index 00000000..6da5b7c3
--- /dev/null
+++ b/test/integration/targets/include_import/empty_group_warning/playbook.yml
@@ -0,0 +1,13 @@
+---
+- hosts: localhost
+ gather_facts: false
+ tasks:
+ - name: Group
+ group_by:
+ key: test_{{ inventory_hostname }}
+
+- hosts: test_localhost
+ gather_facts: false
+ tasks:
+ - name: Print
+ import_tasks: tasks.yml
diff --git a/test/integration/targets/include_import/empty_group_warning/tasks.yml b/test/integration/targets/include_import/empty_group_warning/tasks.yml
new file mode 100644
index 00000000..2fbad773
--- /dev/null
+++ b/test/integration/targets/include_import/empty_group_warning/tasks.yml
@@ -0,0 +1,3 @@
+- name: test
+ debug:
+ msg: hello
diff --git a/test/integration/targets/include_import/grandchild/block_include_tasks.yml b/test/integration/targets/include_import/grandchild/block_include_tasks.yml
new file mode 100644
index 00000000..f8addcf4
--- /dev/null
+++ b/test/integration/targets/include_import/grandchild/block_include_tasks.yml
@@ -0,0 +1,2 @@
+- command: "true"
+ register: block_include_result
diff --git a/test/integration/targets/include_import/grandchild/import.yml b/test/integration/targets/include_import/grandchild/import.yml
new file mode 100644
index 00000000..ef6990e2
--- /dev/null
+++ b/test/integration/targets/include_import/grandchild/import.yml
@@ -0,0 +1 @@
+- include_tasks: include_level_1.yml
diff --git a/test/integration/targets/include_import/grandchild/import_include_include_tasks.yml b/test/integration/targets/include_import/grandchild/import_include_include_tasks.yml
new file mode 100644
index 00000000..dae3a245
--- /dev/null
+++ b/test/integration/targets/include_import/grandchild/import_include_include_tasks.yml
@@ -0,0 +1,2 @@
+- command: "true"
+ register: import_include_include_result
diff --git a/test/integration/targets/include_import/grandchild/include_level_1.yml b/test/integration/targets/include_import/grandchild/include_level_1.yml
new file mode 100644
index 00000000..e323511f
--- /dev/null
+++ b/test/integration/targets/include_import/grandchild/include_level_1.yml
@@ -0,0 +1 @@
+- include_tasks: import_include_include_tasks.yml
diff --git a/test/integration/targets/include_import/handler_addressing/playbook.yml b/test/integration/targets/include_import/handler_addressing/playbook.yml
new file mode 100644
index 00000000..7515dc99
--- /dev/null
+++ b/test/integration/targets/include_import/handler_addressing/playbook.yml
@@ -0,0 +1,11 @@
+- hosts: localhost
+ gather_facts: false
+ tasks:
+ - import_role:
+ name: include_handler_test
+
+- hosts: localhost
+ gather_facts: false
+ tasks:
+ - import_role:
+ name: import_handler_test
diff --git a/test/integration/targets/include_import/handler_addressing/roles/import_handler_test/handlers/main.yml b/test/integration/targets/include_import/handler_addressing/roles/import_handler_test/handlers/main.yml
new file mode 100644
index 00000000..95524ed4
--- /dev/null
+++ b/test/integration/targets/include_import/handler_addressing/roles/import_handler_test/handlers/main.yml
@@ -0,0 +1,2 @@
+- name: do_import
+ import_tasks: tasks/handlers.yml
diff --git a/test/integration/targets/include_import/handler_addressing/roles/import_handler_test/tasks/handlers.yml b/test/integration/targets/include_import/handler_addressing/roles/import_handler_test/tasks/handlers.yml
new file mode 100644
index 00000000..eeb49ff6
--- /dev/null
+++ b/test/integration/targets/include_import/handler_addressing/roles/import_handler_test/tasks/handlers.yml
@@ -0,0 +1,2 @@
+- debug:
+ msg: import handler task
diff --git a/test/integration/targets/include_import/handler_addressing/roles/import_handler_test/tasks/main.yml b/test/integration/targets/include_import/handler_addressing/roles/import_handler_test/tasks/main.yml
new file mode 100644
index 00000000..b0312cc2
--- /dev/null
+++ b/test/integration/targets/include_import/handler_addressing/roles/import_handler_test/tasks/main.yml
@@ -0,0 +1,3 @@
+- command: "true"
+ notify:
+ - do_import
diff --git a/test/integration/targets/include_import/handler_addressing/roles/include_handler_test/handlers/main.yml b/test/integration/targets/include_import/handler_addressing/roles/include_handler_test/handlers/main.yml
new file mode 100644
index 00000000..7f24b9d5
--- /dev/null
+++ b/test/integration/targets/include_import/handler_addressing/roles/include_handler_test/handlers/main.yml
@@ -0,0 +1,2 @@
+- name: do_include
+ include_tasks: tasks/handlers.yml
diff --git a/test/integration/targets/include_import/handler_addressing/roles/include_handler_test/tasks/handlers.yml b/test/integration/targets/include_import/handler_addressing/roles/include_handler_test/tasks/handlers.yml
new file mode 100644
index 00000000..2bf07f23
--- /dev/null
+++ b/test/integration/targets/include_import/handler_addressing/roles/include_handler_test/tasks/handlers.yml
@@ -0,0 +1,2 @@
+- debug:
+ msg: include handler task
diff --git a/test/integration/targets/include_import/handler_addressing/roles/include_handler_test/tasks/main.yml b/test/integration/targets/include_import/handler_addressing/roles/include_handler_test/tasks/main.yml
new file mode 100644
index 00000000..c29a787c
--- /dev/null
+++ b/test/integration/targets/include_import/handler_addressing/roles/include_handler_test/tasks/main.yml
@@ -0,0 +1,3 @@
+- command: "true"
+ notify:
+ - do_include
diff --git a/test/integration/targets/include_import/inventory b/test/integration/targets/include_import/inventory
new file mode 100644
index 00000000..3ae8d9c3
--- /dev/null
+++ b/test/integration/targets/include_import/inventory
@@ -0,0 +1,6 @@
+[local]
+testhost ansible_connection=local host_var_role_name=role3
+testhost2 ansible_connection=local host_var_role_name=role2
+
+[local:vars]
+ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/test/integration/targets/include_import/nestedtasks/nested/nested.yml b/test/integration/targets/include_import/nestedtasks/nested/nested.yml
new file mode 100644
index 00000000..95fe2660
--- /dev/null
+++ b/test/integration/targets/include_import/nestedtasks/nested/nested.yml
@@ -0,0 +1,2 @@
+---
+- include_role: {name: nested_include_task}
diff --git a/test/integration/targets/include_import/parent_templating/playbook.yml b/test/integration/targets/include_import/parent_templating/playbook.yml
new file mode 100644
index 00000000..b7330206
--- /dev/null
+++ b/test/integration/targets/include_import/parent_templating/playbook.yml
@@ -0,0 +1,11 @@
+# https://github.com/ansible/ansible/issues/49969
+- hosts: localhost
+ gather_facts: false
+ tasks:
+ - include_role:
+ name: test
+ public: true
+
+ - assert:
+ that:
+ - included_other is defined
diff --git a/test/integration/targets/include_import/parent_templating/roles/test/tasks/localhost.yml b/test/integration/targets/include_import/parent_templating/roles/test/tasks/localhost.yml
new file mode 100644
index 00000000..e5b281e7
--- /dev/null
+++ b/test/integration/targets/include_import/parent_templating/roles/test/tasks/localhost.yml
@@ -0,0 +1 @@
+- include_tasks: other.yml
diff --git a/test/integration/targets/include_import/parent_templating/roles/test/tasks/main.yml b/test/integration/targets/include_import/parent_templating/roles/test/tasks/main.yml
new file mode 100644
index 00000000..16fba69a
--- /dev/null
+++ b/test/integration/targets/include_import/parent_templating/roles/test/tasks/main.yml
@@ -0,0 +1 @@
+- include_tasks: "{{ lookup('first_found', inventory_hostname ~ '.yml') }}"
diff --git a/test/integration/targets/include_import/parent_templating/roles/test/tasks/other.yml b/test/integration/targets/include_import/parent_templating/roles/test/tasks/other.yml
new file mode 100644
index 00000000..c3bae1a5
--- /dev/null
+++ b/test/integration/targets/include_import/parent_templating/roles/test/tasks/other.yml
@@ -0,0 +1,2 @@
+- set_fact:
+ included_other: true
diff --git a/test/integration/targets/include_import/playbook/group_vars/all.yml b/test/integration/targets/include_import/playbook/group_vars/all.yml
new file mode 100644
index 00000000..9acd8c64
--- /dev/null
+++ b/test/integration/targets/include_import/playbook/group_vars/all.yml
@@ -0,0 +1 @@
+group_var1: set in group_vars/all.yml
diff --git a/test/integration/targets/include_import/playbook/playbook1.yml b/test/integration/targets/include_import/playbook/playbook1.yml
new file mode 100644
index 00000000..55c66d80
--- /dev/null
+++ b/test/integration/targets/include_import/playbook/playbook1.yml
@@ -0,0 +1,9 @@
+- name: Playbook 1
+ hosts: testhost2
+
+ tasks:
+ - name: Set fact in playbook 1
+ set_fact:
+ canary_var1: playbook1 imported
+ tags:
+ - canary1
diff --git a/test/integration/targets/include_import/playbook/playbook2.yml b/test/integration/targets/include_import/playbook/playbook2.yml
new file mode 100644
index 00000000..c986165e
--- /dev/null
+++ b/test/integration/targets/include_import/playbook/playbook2.yml
@@ -0,0 +1,9 @@
+- name: Playbook 2
+ hosts: testhost2
+
+ tasks:
+ - name: Set fact in playbook 2
+ set_fact:
+ canary_var2: playbook2 imported
+ tags:
+ - canary2
diff --git a/test/integration/targets/include_import/playbook/playbook3.yml b/test/integration/targets/include_import/playbook/playbook3.yml
new file mode 100644
index 00000000..b62b96c3
--- /dev/null
+++ b/test/integration/targets/include_import/playbook/playbook3.yml
@@ -0,0 +1,10 @@
+- name: Playbook 3
+ hosts: testhost2
+
+ tasks:
+ - name: Set fact in playbook 3
+ set_fact:
+ canary_var3: playbook3 imported
+ include_next_playbook: yes
+ tags:
+ - canary3
diff --git a/test/integration/targets/include_import/playbook/playbook4.yml b/test/integration/targets/include_import/playbook/playbook4.yml
new file mode 100644
index 00000000..330612a9
--- /dev/null
+++ b/test/integration/targets/include_import/playbook/playbook4.yml
@@ -0,0 +1,9 @@
+- name: Playbook 4
+ hosts: testhost2
+
+ tasks:
+ - name: Set fact in playbook 4
+ set_fact:
+ canary_var4: playbook4 imported
+ tags:
+ - canary4
diff --git a/test/integration/targets/include_import/playbook/playbook_needing_vars.yml b/test/integration/targets/include_import/playbook/playbook_needing_vars.yml
new file mode 100644
index 00000000..6454502b
--- /dev/null
+++ b/test/integration/targets/include_import/playbook/playbook_needing_vars.yml
@@ -0,0 +1,6 @@
+---
+- hosts: testhost
+ gather_facts: no
+ tasks:
+ - import_role:
+ name: "{{ import_playbook_role_name }}"
diff --git a/test/integration/targets/include_import/playbook/roles/import_playbook_role/tasks/main.yml b/test/integration/targets/include_import/playbook/roles/import_playbook_role/tasks/main.yml
new file mode 100644
index 00000000..77554399
--- /dev/null
+++ b/test/integration/targets/include_import/playbook/roles/import_playbook_role/tasks/main.yml
@@ -0,0 +1,2 @@
+- debug:
+ msg: in import_playbook_role
diff --git a/test/integration/targets/include_import/playbook/sub_playbook/library/helloworld.py b/test/integration/targets/include_import/playbook/sub_playbook/library/helloworld.py
new file mode 100644
index 00000000..0ebe690d
--- /dev/null
+++ b/test/integration/targets/include_import/playbook/sub_playbook/library/helloworld.py
@@ -0,0 +1,30 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(argument_spec={})
+
+ module.exit_json(msg='Hello, World!')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/include_import/playbook/sub_playbook/sub_playbook.yml b/test/integration/targets/include_import/playbook/sub_playbook/sub_playbook.yml
new file mode 100644
index 00000000..4399d935
--- /dev/null
+++ b/test/integration/targets/include_import/playbook/sub_playbook/sub_playbook.yml
@@ -0,0 +1,4 @@
+- hosts: localhost
+ gather_facts: false
+ tasks:
+ - helloworld:
diff --git a/test/integration/targets/include_import/playbook/test_import_playbook.yml b/test/integration/targets/include_import/playbook/test_import_playbook.yml
new file mode 100644
index 00000000..a5894608
--- /dev/null
+++ b/test/integration/targets/include_import/playbook/test_import_playbook.yml
@@ -0,0 +1,26 @@
+# Test that additional parameters after import_playbook raises a warning & strips whitespaces
+- import_playbook: playbook1.yml tags=test_import
+
+# Test and validate playbook import
+- import_playbook: playbook1.yml
+- import_playbook: validate1.yml
+
+
+# Test and validate conditional import
+- import_playbook: playbook2.yml
+ when: no
+
+- import_playbook: validate2.yml
+
+- import_playbook: playbook3.yml
+- import_playbook: playbook4.yml
+ when: include_next_playbook
+
+- import_playbook: validate34.yml
+
+- import_playbook: playbook_needing_vars.yml
+ vars:
+ import_playbook_role_name: import_playbook_role
+
+# https://github.com/ansible/ansible/issues/59548
+- import_playbook: sub_playbook/sub_playbook.yml
diff --git a/test/integration/targets/include_import/playbook/test_import_playbook_tags.yml b/test/integration/targets/include_import/playbook/test_import_playbook_tags.yml
new file mode 100644
index 00000000..46136f6f
--- /dev/null
+++ b/test/integration/targets/include_import/playbook/test_import_playbook_tags.yml
@@ -0,0 +1,10 @@
+- import_playbook: playbook1.yml # Test tag in tasks in included play
+- import_playbook: playbook2.yml # Test tag added to import_playbook
+ tags:
+ - canary22
+
+- import_playbook: playbook3.yml # Test skipping tags added to import_playbook
+ tags:
+ - skipme
+
+- import_playbook: validate_tags.yml # Validate
diff --git a/test/integration/targets/include_import/playbook/validate1.yml b/test/integration/targets/include_import/playbook/validate1.yml
new file mode 100644
index 00000000..0018344d
--- /dev/null
+++ b/test/integration/targets/include_import/playbook/validate1.yml
@@ -0,0 +1,10 @@
+- hosts: testhost2
+
+ tasks:
+ - name: Assert that variable was set in playbook1.yml
+ assert:
+ that:
+ - canary_var1 == 'playbook1 imported'
+ tags:
+ - validate
+ - validate1
diff --git a/test/integration/targets/include_import/playbook/validate2.yml b/test/integration/targets/include_import/playbook/validate2.yml
new file mode 100644
index 00000000..f22bcb6e
--- /dev/null
+++ b/test/integration/targets/include_import/playbook/validate2.yml
@@ -0,0 +1,10 @@
+- hosts: testhost2
+
+ tasks:
+ - name: Assert that playbook2.yml was skipeed
+ assert:
+ that:
+ - canary_var2 is not defined
+ tags:
+ - validate
+ - validate2
diff --git a/test/integration/targets/include_import/playbook/validate34.yml b/test/integration/targets/include_import/playbook/validate34.yml
new file mode 100644
index 00000000..fd53a305
--- /dev/null
+++ b/test/integration/targets/include_import/playbook/validate34.yml
@@ -0,0 +1,11 @@
+- hosts: testhost2
+
+ tasks:
+ - name: Assert that playbook3.yml and playbook4.yml were imported
+ assert:
+ that:
+ - canary_var3 == 'playbook3 imported'
+ - canary_var4 == 'playbook4 imported'
+ tags:
+ - validate
+ - validate34
diff --git a/test/integration/targets/include_import/playbook/validate_tags.yml b/test/integration/targets/include_import/playbook/validate_tags.yml
new file mode 100644
index 00000000..acdcb1f2
--- /dev/null
+++ b/test/integration/targets/include_import/playbook/validate_tags.yml
@@ -0,0 +1,11 @@
+- hosts: testhost2
+
+ tasks:
+ - name: Assert that only tasks with tags were run
+ assert:
+ that:
+ - canary_var1 == 'playbook1 imported'
+ - canary_var2 == 'playbook2 imported'
+ - canary_var3 is not defined
+ tags:
+ - validate
diff --git a/test/integration/targets/include_import/public_exposure/no_bleeding.yml b/test/integration/targets/include_import/public_exposure/no_bleeding.yml
new file mode 100644
index 00000000..b9db7132
--- /dev/null
+++ b/test/integration/targets/include_import/public_exposure/no_bleeding.yml
@@ -0,0 +1,25 @@
+---
+- hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Static imports should expose vars at parse time, not at execution time
+ assert:
+ that:
+ - static_defaults_var == 'static_defaults'
+ - static_vars_var == 'static_vars'
+ - import_role:
+ name: static
+ - assert:
+ that:
+ - static_tasks_var == 'static_tasks'
+ - static_defaults_var == 'static_defaults'
+ - static_vars_var == 'static_vars'
+
+- hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Ensure vars from import_roles do not bleed between plays
+ assert:
+ that:
+ - static_defaults_var is undefined
+ - static_vars_var is undefined
diff --git a/test/integration/targets/include_import/public_exposure/no_overwrite_roles.yml b/test/integration/targets/include_import/public_exposure/no_overwrite_roles.yml
new file mode 100644
index 00000000..6a1d9bff
--- /dev/null
+++ b/test/integration/targets/include_import/public_exposure/no_overwrite_roles.yml
@@ -0,0 +1,4 @@
+- hosts: localhost
+ gather_facts: false
+ roles:
+ - call_import
diff --git a/test/integration/targets/include_import/public_exposure/playbook.yml b/test/integration/targets/include_import/public_exposure/playbook.yml
new file mode 100644
index 00000000..11735e77
--- /dev/null
+++ b/test/integration/targets/include_import/public_exposure/playbook.yml
@@ -0,0 +1,56 @@
+---
+- hosts: testhost
+ gather_facts: false
+ roles:
+ - regular
+ tasks:
+ - debug:
+ msg: start tasks
+
+ - name: Static imports should expose vars at parse time, not at execution time
+ assert:
+ that:
+ - static_defaults_var == 'static_defaults'
+ - static_vars_var == 'static_vars'
+ - import_role:
+ name: static
+ - assert:
+ that:
+ - static_tasks_var == 'static_tasks'
+ - static_defaults_var == 'static_defaults'
+ - static_vars_var == 'static_vars'
+
+ - include_role:
+ name: dynamic_private
+ - assert:
+ that:
+ - private_tasks_var == 'private_tasks'
+ - private_defaults_var is undefined
+ - private_vars_var is undefined
+
+ - name: Dynamic include should not expose vars until execution time
+ assert:
+ that:
+ - dynamic_tasks_var is undefined
+ - dynamic_defaults_var is undefined
+ - dynamic_vars_var is undefined
+ - include_role:
+ name: dynamic
+ public: true
+ - assert:
+ that:
+ - dynamic_tasks_var == 'dynamic_tasks'
+ - dynamic_defaults_var == 'dynamic_defaults'
+ - dynamic_vars_var == 'dynamic_vars'
+
+ - include_role:
+ name: from
+ public: true
+ tasks_from: from.yml
+ vars_from: from.yml
+ defaults_from: from.yml
+ - assert:
+ that:
+ - from_tasks_var == 'from_tasks'
+ - from_defaults_var == 'from_defaults'
+ - from_vars_var == 'from_vars'
diff --git a/test/integration/targets/include_import/public_exposure/roles/call_import/tasks/main.yml b/test/integration/targets/include_import/public_exposure/roles/call_import/tasks/main.yml
new file mode 100644
index 00000000..d6b28f09
--- /dev/null
+++ b/test/integration/targets/include_import/public_exposure/roles/call_import/tasks/main.yml
@@ -0,0 +1,6 @@
+- import_role:
+ name: regular
+
+- assert:
+ that:
+ - regular_defaults_var is defined
diff --git a/test/integration/targets/include_import/public_exposure/roles/dynamic/defaults/main.yml b/test/integration/targets/include_import/public_exposure/roles/dynamic/defaults/main.yml
new file mode 100644
index 00000000..099ac29b
--- /dev/null
+++ b/test/integration/targets/include_import/public_exposure/roles/dynamic/defaults/main.yml
@@ -0,0 +1 @@
+dynamic_defaults_var: dynamic_defaults
diff --git a/test/integration/targets/include_import/public_exposure/roles/dynamic/tasks/main.yml b/test/integration/targets/include_import/public_exposure/roles/dynamic/tasks/main.yml
new file mode 100644
index 00000000..e9b9ad3d
--- /dev/null
+++ b/test/integration/targets/include_import/public_exposure/roles/dynamic/tasks/main.yml
@@ -0,0 +1,5 @@
+- debug:
+ msg: dynamic
+
+- set_fact:
+ dynamic_tasks_var: dynamic_tasks
diff --git a/test/integration/targets/include_import/public_exposure/roles/dynamic/vars/main.yml b/test/integration/targets/include_import/public_exposure/roles/dynamic/vars/main.yml
new file mode 100644
index 00000000..b33c12df
--- /dev/null
+++ b/test/integration/targets/include_import/public_exposure/roles/dynamic/vars/main.yml
@@ -0,0 +1 @@
+dynamic_vars_var: dynamic_vars
diff --git a/test/integration/targets/include_import/public_exposure/roles/dynamic_private/defaults/main.yml b/test/integration/targets/include_import/public_exposure/roles/dynamic_private/defaults/main.yml
new file mode 100644
index 00000000..b19ef72c
--- /dev/null
+++ b/test/integration/targets/include_import/public_exposure/roles/dynamic_private/defaults/main.yml
@@ -0,0 +1 @@
+private_defaults_var: private_defaults
diff --git a/test/integration/targets/include_import/public_exposure/roles/dynamic_private/tasks/main.yml b/test/integration/targets/include_import/public_exposure/roles/dynamic_private/tasks/main.yml
new file mode 100644
index 00000000..1c7f653d
--- /dev/null
+++ b/test/integration/targets/include_import/public_exposure/roles/dynamic_private/tasks/main.yml
@@ -0,0 +1,5 @@
+- debug:
+ msg: private
+
+- set_fact:
+ private_tasks_var: private_tasks
diff --git a/test/integration/targets/include_import/public_exposure/roles/dynamic_private/vars/main.yml b/test/integration/targets/include_import/public_exposure/roles/dynamic_private/vars/main.yml
new file mode 100644
index 00000000..60f7ca81
--- /dev/null
+++ b/test/integration/targets/include_import/public_exposure/roles/dynamic_private/vars/main.yml
@@ -0,0 +1 @@
+private_vars_var: private_vars
diff --git a/test/integration/targets/include_import/public_exposure/roles/from/defaults/from.yml b/test/integration/targets/include_import/public_exposure/roles/from/defaults/from.yml
new file mode 100644
index 00000000..6729c4b4
--- /dev/null
+++ b/test/integration/targets/include_import/public_exposure/roles/from/defaults/from.yml
@@ -0,0 +1 @@
+from_defaults_var: from_defaults
diff --git a/test/integration/targets/include_import/public_exposure/roles/from/tasks/from.yml b/test/integration/targets/include_import/public_exposure/roles/from/tasks/from.yml
new file mode 100644
index 00000000..932efc9f
--- /dev/null
+++ b/test/integration/targets/include_import/public_exposure/roles/from/tasks/from.yml
@@ -0,0 +1,5 @@
+- debug:
+ msg: from
+
+- set_fact:
+ from_tasks_var: from_tasks
diff --git a/test/integration/targets/include_import/public_exposure/roles/from/vars/from.yml b/test/integration/targets/include_import/public_exposure/roles/from/vars/from.yml
new file mode 100644
index 00000000..98b2ad47
--- /dev/null
+++ b/test/integration/targets/include_import/public_exposure/roles/from/vars/from.yml
@@ -0,0 +1 @@
+from_vars_var: from_vars
diff --git a/test/integration/targets/include_import/public_exposure/roles/regular/defaults/main.yml b/test/integration/targets/include_import/public_exposure/roles/regular/defaults/main.yml
new file mode 100644
index 00000000..21a6967c
--- /dev/null
+++ b/test/integration/targets/include_import/public_exposure/roles/regular/defaults/main.yml
@@ -0,0 +1 @@
+regular_defaults_var: regular_defaults
diff --git a/test/integration/targets/include_import/public_exposure/roles/regular/tasks/main.yml b/test/integration/targets/include_import/public_exposure/roles/regular/tasks/main.yml
new file mode 100644
index 00000000..eafa141a
--- /dev/null
+++ b/test/integration/targets/include_import/public_exposure/roles/regular/tasks/main.yml
@@ -0,0 +1,5 @@
+- debug:
+ msg: regular
+
+- set_fact:
+ regular_tasks_var: regular_tasks
diff --git a/test/integration/targets/include_import/public_exposure/roles/regular/vars/main.yml b/test/integration/targets/include_import/public_exposure/roles/regular/vars/main.yml
new file mode 100644
index 00000000..3d06546f
--- /dev/null
+++ b/test/integration/targets/include_import/public_exposure/roles/regular/vars/main.yml
@@ -0,0 +1 @@
+regular_vars_var: regular_vars
diff --git a/test/integration/targets/include_import/public_exposure/roles/static/defaults/main.yml b/test/integration/targets/include_import/public_exposure/roles/static/defaults/main.yml
new file mode 100644
index 00000000..d88f5559
--- /dev/null
+++ b/test/integration/targets/include_import/public_exposure/roles/static/defaults/main.yml
@@ -0,0 +1 @@
+static_defaults_var: static_defaults
diff --git a/test/integration/targets/include_import/public_exposure/roles/static/tasks/main.yml b/test/integration/targets/include_import/public_exposure/roles/static/tasks/main.yml
new file mode 100644
index 00000000..5a6488c1
--- /dev/null
+++ b/test/integration/targets/include_import/public_exposure/roles/static/tasks/main.yml
@@ -0,0 +1,5 @@
+- debug:
+ msg: static
+
+- set_fact:
+ static_tasks_var: static_tasks
diff --git a/test/integration/targets/include_import/public_exposure/roles/static/vars/main.yml b/test/integration/targets/include_import/public_exposure/roles/static/vars/main.yml
new file mode 100644
index 00000000..982e34d0
--- /dev/null
+++ b/test/integration/targets/include_import/public_exposure/roles/static/vars/main.yml
@@ -0,0 +1 @@
+static_vars_var: static_vars
diff --git a/test/integration/targets/include_import/role/test_import_role.yml b/test/integration/targets/include_import/role/test_import_role.yml
new file mode 100644
index 00000000..d45ff79b
--- /dev/null
+++ b/test/integration/targets/include_import/role/test_import_role.yml
@@ -0,0 +1,139 @@
+- name: Test import_role
+ hosts: testhost
+
+ vars:
+ run_role: yes
+ do_not_run_role: no
+ role_name: role1
+ test_var: templating test in playbook
+ role_vars:
+ where_am_i_defined: in the playbook
+ entire_task:
+ include_role:
+ name: role1
+
+ tasks:
+ - name: Test basic role import
+ import_role:
+ name: role1
+
+ - name: Assert that basic include works
+ assert:
+ that:
+ - _role1_result.msg == 'In role1'
+
+ - name: Test conditional role include
+ import_role:
+ name: role1
+ tasks_from: canary1.yml
+ when: run_role
+
+ - name: Assert that role ran
+ assert:
+ that:
+ - role1_canary1 == 'r1c1'
+
+ - name: Test conditional role import that should be skipped
+ import_role:
+ name: role1
+ tasks_from: canary2.yml
+ when: do_not_run_role
+
+ - name: Assert that role did not run
+ assert:
+ that:
+ - role1_canary2 is not defined
+
+ # FIXME We expect this to fail, but I'm not sure how best to test for
+ # syntax level failures.
+ #
+ # - name: Test role import with a loop
+ # import_role:
+ # name: "{{ item }}"
+ # register: loop_test
+ # with_items:
+ # - role1
+ # - role3
+ # - role2
+
+ - name: Test importing a task file from a role
+ import_role:
+ name: role1
+ tasks_from: tasks.yml
+
+ - name: Test importing vars file and tasks file from a role
+ import_role:
+ name: role3
+ tasks_from: vartest.yml
+ vars_from: role3vars.yml
+
+ - name: Assert that variables defined in previous task are available to play
+ assert:
+ that:
+ - role3_default == 'defined in role3/defaults/main.yml'
+ - role3_main == 'defined in role3/vars/main.yml'
+ - role3_var == 'defined in role3/vars/role3vars.yml'
+ ignore_errors: yes
+
+ - name: Test using a play variable for role name
+ import_role:
+ name: "{{ role_name }}"
+
+ # FIXME Trying to use a host_var here causes play execution to fail because
+ # the variable is undefined.
+ #
+ # - name: Test using a host variable for role name
+ # import_role:
+ # name: "{{ host_var_role_name }}"
+
+ - name: Pass variable to role
+ import_role:
+ name: role1
+ tasks_from: vartest.yml
+ vars:
+ where_am_i_defined: in the task
+
+ ## FIXME Currently failing
+ ## ERROR! Vars in a IncludeRole must be specified as a dictionary, or a list of dictionaries
+ # - name: Pass all variables in a variable to role
+ # import_role:
+ # name: role1
+ # tasks_from: vartest.yml
+ # vars: "{{ role_vars }}"
+
+ - name: Pass templated variable to a role
+ import_role:
+ name: role1
+ tasks_from: vartest.yml
+ vars:
+ where_am_i_defined: "{{ test_var }}"
+
+ # FIXME This fails with the following error:
+ # The module {u'import_role': {u'name': u'role1'}} was not found in configured module paths.
+ #
+ - name: Include an entire task
+ action:
+ module: "{{ entire_task }}"
+ tags:
+ - never
+
+ - block:
+ - name: Include a role that will fail
+ import_role:
+ name: role1
+ tasks_from: fail.yml
+
+ rescue:
+ - name: Include a role inside rescue
+ import_role:
+ name: role2
+
+ always:
+ - name: Include role inside always
+ import_role:
+ name: role3
+
+ - name: Test delegate_to handler is delegated
+ import_role:
+ name: delegated_handler
+ delegate_to: localhost
diff --git a/test/integration/targets/include_import/role/test_include_role.yml b/test/integration/targets/include_import/role/test_include_role.yml
new file mode 100644
index 00000000..e120bd8c
--- /dev/null
+++ b/test/integration/targets/include_import/role/test_include_role.yml
@@ -0,0 +1,166 @@
+- name: Test include_role
+ hosts: testhost
+
+ vars:
+ run_role: yes
+ do_not_run_role: no
+ role_name: role1
+ test_var: templating test in playbook
+ role_vars:
+ where_am_i_defined: in the playbook
+ entire_task:
+ include_role:
+ name: role1
+
+ tasks:
+ - name: Test basic role include
+ include_role:
+ name: role1
+
+ - name: Assert that basic include works
+ assert:
+ that:
+ - _role1_result.msg == 'In role1'
+
+ - name: Test conditional role include
+ include_role:
+ name: role1
+ tasks_from: canary1.yml
+ when: run_role
+
+ - name: Assert that role ran
+ assert:
+ that:
+ - role1_canary1 == 'r1c1'
+
+ - name: Test conditional role include that should be skipped
+ include_role:
+ name: role1
+ tasks_from: canary2.yml
+ when: do_not_run_role
+
+ - name: Assert that role did not run
+ assert:
+ that:
+ - role1_canary2 is not defined
+
+ - name: Test role include with a loop
+ include_role:
+ name: "{{ item }}"
+ with_items:
+ - role1
+ - role3
+ - role2
+
+ - name: Assert that roles run with_items
+ assert:
+ that:
+ - _role1_result.msg == 'In role1'
+ - _role2_result.msg == 'In role2'
+ - _role3_result.msg == 'In role3'
+
+ - name: Test including a task file from a role
+ include_role:
+ name: role1
+ tasks_from: tasks.yml
+
+ - name: Test including vars file and tasks file from a role
+ include_role:
+ name: role3
+ tasks_from: vartest.yml
+ vars_from: role3vars.yml
+
+ - name: Assert that variables defined in previous task are available to play
+ assert:
+ that:
+ - role3_default == 'defined in role3/defaults/main.yml'
+ - role3_main == 'defined in role3/vars/main.yml'
+ - role3_var == 'defined in role3/vars/role3vars.yml'
+ ignore_errors: yes
+
+ - name: Test using a play variable for role name
+ include_role:
+ name: "{{ role_name }}"
+
+ - name: Test using a host variable for role name
+ include_role:
+ name: "{{ host_var_role_name }}"
+
+ - name: Pass variable to role
+ include_role:
+ name: role1
+ tasks_from: vartest.yml
+ vars:
+ where_am_i_defined: in the task
+
+ ## FIXME Currently failing with
+ ## ERROR! Vars in a IncludeRole must be specified as a dictionary, or a list of dictionaries
+ # - name: Pass all variables in a variable to role
+ # include_role:
+ # name: role1
+ # tasks_from: vartest.yml
+ # vars: "{{ role_vars }}"
+
+ - name: Pass templated variable to a role
+ include_role:
+ name: role1
+ tasks_from: vartest.yml
+ vars:
+ where_am_i_defined: "{{ test_var }}"
+
+ - name: Use a variable in tasks_from field
+ include_role:
+ name: role1
+ tasks_from: "{{ tasks_file_name }}.yml"
+ vars:
+ tasks_file_name: canary3
+
+ - name: Assert that tasks file was included
+ assert:
+ that:
+ - role1_canary3 == 'r1c3'
+
+ ## FIXME This fails with the following error:
+ ## The module {u'include_role': {u'name': u'role1'}} was not found in configured module paths.
+ # - name: Include an entire task
+ # action:
+ # module: "{{ entire_task }}"
+
+ - block:
+ - name: Include a role that will fail
+ include_role:
+ name: role1
+ tasks_from: fail.yml
+
+ rescue:
+ - name: Include a role inside rescue
+ include_role:
+ name: role2
+
+ always:
+ - name: Include role inside always
+ include_role:
+ name: role3
+
+- hosts: testhost,testhost2
+ tasks:
+ - name: wipe role results
+ set_fact:
+ _role2_result: ~
+ _role3_result: ~
+
+ - name: Test using a host variable for role name
+ include_role:
+ name: "{{ host_var_role_name }}"
+
+ - name: assert that host variable for role name calls 2 diff roles
+ assert:
+ that:
+ - _role2_result is not none
+ when: inventory_hostname == 'testhost2'
+
+ - name: assert that host variable for role name calls 2 diff roles
+ assert:
+ that:
+ - _role3_result is not none
+ when: inventory_hostname == 'testhost'
diff --git a/test/integration/targets/include_import/role/test_include_role_vars_from.yml b/test/integration/targets/include_import/role/test_include_role_vars_from.yml
new file mode 100644
index 00000000..f7bb4d76
--- /dev/null
+++ b/test/integration/targets/include_import/role/test_include_role_vars_from.yml
@@ -0,0 +1,10 @@
+- name: Test include_role vars_from
+ hosts: testhost
+ vars:
+ role_name: role1
+ tasks:
+ - name: Test vars_from
+ include_role:
+ name: role1
+ vars_from:
+ - vars_1.yml
diff --git a/test/integration/targets/include_import/roles/delegated_handler/handlers/main.yml b/test/integration/targets/include_import/roles/delegated_handler/handlers/main.yml
new file mode 100644
index 00000000..550ddc21
--- /dev/null
+++ b/test/integration/targets/include_import/roles/delegated_handler/handlers/main.yml
@@ -0,0 +1,4 @@
+- name: delegated assert handler
+ assert:
+ that:
+ - ansible_delegated_vars is defined
diff --git a/test/integration/targets/include_import/roles/delegated_handler/tasks/main.yml b/test/integration/targets/include_import/roles/delegated_handler/tasks/main.yml
new file mode 100644
index 00000000..9d2ef61c
--- /dev/null
+++ b/test/integration/targets/include_import/roles/delegated_handler/tasks/main.yml
@@ -0,0 +1,3 @@
+- command: "true"
+ notify:
+ - delegated assert handler
diff --git a/test/integration/targets/include_import/roles/dup_allowed_role/meta/main.yml b/test/integration/targets/include_import/roles/dup_allowed_role/meta/main.yml
new file mode 100644
index 00000000..61d3ffe4
--- /dev/null
+++ b/test/integration/targets/include_import/roles/dup_allowed_role/meta/main.yml
@@ -0,0 +1,2 @@
+---
+allow_duplicates: true
diff --git a/test/integration/targets/include_import/roles/dup_allowed_role/tasks/main.yml b/test/integration/targets/include_import/roles/dup_allowed_role/tasks/main.yml
new file mode 100644
index 00000000..cad935e3
--- /dev/null
+++ b/test/integration/targets/include_import/roles/dup_allowed_role/tasks/main.yml
@@ -0,0 +1,3 @@
+---
+- debug:
+ msg: "Tasks file inside role"
diff --git a/test/integration/targets/include_import/roles/loop_name_assert/tasks/main.yml b/test/integration/targets/include_import/roles/loop_name_assert/tasks/main.yml
new file mode 100644
index 00000000..9bb3db51
--- /dev/null
+++ b/test/integration/targets/include_import/roles/loop_name_assert/tasks/main.yml
@@ -0,0 +1,4 @@
+- assert:
+ that:
+ - name == 'name_from_loop_var'
+ - name != 'loop_name_assert'
diff --git a/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/defaults/main.yml b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/defaults/main.yml
new file mode 100644
index 00000000..aba24bbe
--- /dev/null
+++ b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+testnesteddep2_defvar1: foobar
+testnesteddep2_varvar1: foobar
diff --git a/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/meta/main.yml b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/meta/main.yml
new file mode 100644
index 00000000..31afcaa9
--- /dev/null
+++ b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+- role: nested/nested/nested_dep_role2a
diff --git a/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/tasks/main.yml b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/tasks/main.yml
new file mode 100644
index 00000000..1f2ee7f0
--- /dev/null
+++ b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/tasks/main.yml
@@ -0,0 +1,2 @@
+---
+- include_tasks: ./rund.yml
diff --git a/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/tasks/rund.yml b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/tasks/rund.yml
new file mode 100644
index 00000000..523e579d
--- /dev/null
+++ b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/tasks/rund.yml
@@ -0,0 +1,2 @@
+---
+- shell: echo from deprole2a
diff --git a/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/vars/main.yml b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/vars/main.yml
new file mode 100644
index 00000000..c89b6973
--- /dev/null
+++ b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2/vars/main.yml
@@ -0,0 +1,2 @@
+---
+testnesteddep2_varvar1: muche
diff --git a/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/defaults/main.yml b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/defaults/main.yml
new file mode 100644
index 00000000..aba24bbe
--- /dev/null
+++ b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+testnesteddep2_defvar1: foobar
+testnesteddep2_varvar1: foobar
diff --git a/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/meta/main.yml b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/meta/main.yml
new file mode 100644
index 00000000..6fc8ab0c
--- /dev/null
+++ b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+- role: nested/nested/nested_dep_role2b
diff --git a/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/tasks/main.yml b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/tasks/main.yml
new file mode 100644
index 00000000..729582c4
--- /dev/null
+++ b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/tasks/main.yml
@@ -0,0 +1,2 @@
+---
+- include_tasks: ./rune.yml
diff --git a/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/tasks/rune.yml b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/tasks/rune.yml
new file mode 100644
index 00000000..e77882b2
--- /dev/null
+++ b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/tasks/rune.yml
@@ -0,0 +1,2 @@
+---
+- shell: echo from deprole2
diff --git a/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/vars/main.yml b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/vars/main.yml
new file mode 100644
index 00000000..c89b6973
--- /dev/null
+++ b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2a/vars/main.yml
@@ -0,0 +1,2 @@
+---
+testnesteddep2_varvar1: muche
diff --git a/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/defaults/main.yml b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/defaults/main.yml
new file mode 100644
index 00000000..aba24bbe
--- /dev/null
+++ b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+testnesteddep2_defvar1: foobar
+testnesteddep2_varvar1: foobar
diff --git a/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/meta/main.yml b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/meta/main.yml
new file mode 100644
index 00000000..32cf5dda
--- /dev/null
+++ b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/meta/main.yml
@@ -0,0 +1 @@
+dependencies: []
diff --git a/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/tasks/main.yml b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/tasks/main.yml
new file mode 100644
index 00000000..5fbb04fe
--- /dev/null
+++ b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/tasks/main.yml
@@ -0,0 +1,2 @@
+---
+- include_tasks: ./runf.yml
diff --git a/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/tasks/runf.yml b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/tasks/runf.yml
new file mode 100644
index 00000000..694005fd
--- /dev/null
+++ b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/tasks/runf.yml
@@ -0,0 +1,2 @@
+---
+- shell: echo from deprole2b
diff --git a/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/vars/main.yml b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/vars/main.yml
new file mode 100644
index 00000000..c89b6973
--- /dev/null
+++ b/test/integration/targets/include_import/roles/nested/nested/nested_dep_role2b/vars/main.yml
@@ -0,0 +1,2 @@
+---
+testnesteddep2_varvar1: muche
diff --git a/test/integration/targets/include_import/roles/nested/nested_dep_role/defaults/main.yml b/test/integration/targets/include_import/roles/nested/nested_dep_role/defaults/main.yml
new file mode 100644
index 00000000..536745ee
--- /dev/null
+++ b/test/integration/targets/include_import/roles/nested/nested_dep_role/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+testnesteddep_defvar1: foobar
+testnesteddep_varvar1: foobar
diff --git a/test/integration/targets/include_import/roles/nested/nested_dep_role/meta/main.yml b/test/integration/targets/include_import/roles/nested/nested_dep_role/meta/main.yml
new file mode 100644
index 00000000..23d65c7e
--- /dev/null
+++ b/test/integration/targets/include_import/roles/nested/nested_dep_role/meta/main.yml
@@ -0,0 +1,2 @@
+---
+dependencies: []
diff --git a/test/integration/targets/include_import/roles/nested/nested_dep_role/tasks/main.yml b/test/integration/targets/include_import/roles/nested/nested_dep_role/tasks/main.yml
new file mode 100644
index 00000000..d86604b4
--- /dev/null
+++ b/test/integration/targets/include_import/roles/nested/nested_dep_role/tasks/main.yml
@@ -0,0 +1,2 @@
+---
+- include_tasks: ./runc.yml
diff --git a/test/integration/targets/include_import/roles/nested/nested_dep_role/tasks/runc.yml b/test/integration/targets/include_import/roles/nested/nested_dep_role/tasks/runc.yml
new file mode 100644
index 00000000..76682f54
--- /dev/null
+++ b/test/integration/targets/include_import/roles/nested/nested_dep_role/tasks/runc.yml
@@ -0,0 +1,4 @@
+---
+- debug:
+ msg: from test_nested_dep_role
+- include_role: {name: nested/nested/nested_dep_role2}
diff --git a/test/integration/targets/include_import/roles/nested/nested_dep_role/vars/main.yml b/test/integration/targets/include_import/roles/nested/nested_dep_role/vars/main.yml
new file mode 100644
index 00000000..b80b5de3
--- /dev/null
+++ b/test/integration/targets/include_import/roles/nested/nested_dep_role/vars/main.yml
@@ -0,0 +1,2 @@
+---
+testnesteddep_varvar1: muche
diff --git a/test/integration/targets/include_import/roles/nested_include_task/meta/main.yml b/test/integration/targets/include_import/roles/nested_include_task/meta/main.yml
new file mode 100644
index 00000000..9410b7d2
--- /dev/null
+++ b/test/integration/targets/include_import/roles/nested_include_task/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+- role: nested/nested_dep_role
diff --git a/test/integration/targets/include_import/roles/nested_include_task/tasks/main.yml b/test/integration/targets/include_import/roles/nested_include_task/tasks/main.yml
new file mode 100644
index 00000000..15a8e9fa
--- /dev/null
+++ b/test/integration/targets/include_import/roles/nested_include_task/tasks/main.yml
@@ -0,0 +1,2 @@
+---
+- include_tasks: ./runa.yml
diff --git a/test/integration/targets/include_import/roles/nested_include_task/tasks/runa.yml b/test/integration/targets/include_import/roles/nested_include_task/tasks/runa.yml
new file mode 100644
index 00000000..643fdd2a
--- /dev/null
+++ b/test/integration/targets/include_import/roles/nested_include_task/tasks/runa.yml
@@ -0,0 +1,3 @@
+---
+- debug:
+ msg: from nested_include_task
diff --git a/test/integration/targets/include_import/roles/role1/tasks/canary1.yml b/test/integration/targets/include_import/roles/role1/tasks/canary1.yml
new file mode 100644
index 00000000..9f202ba3
--- /dev/null
+++ b/test/integration/targets/include_import/roles/role1/tasks/canary1.yml
@@ -0,0 +1,2 @@
+- set_fact:
+ role1_canary1: r1c1
diff --git a/test/integration/targets/include_import/roles/role1/tasks/canary2.yml b/test/integration/targets/include_import/roles/role1/tasks/canary2.yml
new file mode 100644
index 00000000..80e18b8d
--- /dev/null
+++ b/test/integration/targets/include_import/roles/role1/tasks/canary2.yml
@@ -0,0 +1,2 @@
+- set_fact:
+ role1_canary2: r1c2
diff --git a/test/integration/targets/include_import/roles/role1/tasks/canary3.yml b/test/integration/targets/include_import/roles/role1/tasks/canary3.yml
new file mode 100644
index 00000000..40014e32
--- /dev/null
+++ b/test/integration/targets/include_import/roles/role1/tasks/canary3.yml
@@ -0,0 +1,2 @@
+- set_fact:
+ role1_canary3: r1c3
diff --git a/test/integration/targets/include_import/roles/role1/tasks/fail.yml b/test/integration/targets/include_import/roles/role1/tasks/fail.yml
new file mode 100644
index 00000000..b1b5f155
--- /dev/null
+++ b/test/integration/targets/include_import/roles/role1/tasks/fail.yml
@@ -0,0 +1,3 @@
+- name: EXPECTED FAILURE
+ fail:
+ msg: This command should always fail
diff --git a/test/integration/targets/include_import/roles/role1/tasks/main.yml b/test/integration/targets/include_import/roles/role1/tasks/main.yml
new file mode 100644
index 00000000..a8b641ee
--- /dev/null
+++ b/test/integration/targets/include_import/roles/role1/tasks/main.yml
@@ -0,0 +1,3 @@
+- debug:
+ msg: In role1
+ register: _role1_result
diff --git a/test/integration/targets/include_import/roles/role1/tasks/r1t01.yml b/test/integration/targets/include_import/roles/role1/tasks/r1t01.yml
new file mode 100644
index 00000000..e4a1e63e
--- /dev/null
+++ b/test/integration/targets/include_import/roles/role1/tasks/r1t01.yml
@@ -0,0 +1 @@
+- import_tasks: r1t02.yml
diff --git a/test/integration/targets/include_import/roles/role1/tasks/r1t02.yml b/test/integration/targets/include_import/roles/role1/tasks/r1t02.yml
new file mode 100644
index 00000000..d3d37507
--- /dev/null
+++ b/test/integration/targets/include_import/roles/role1/tasks/r1t02.yml
@@ -0,0 +1 @@
+- import_tasks: r1t03.yml
diff --git a/test/integration/targets/include_import/roles/role1/tasks/r1t03.yml b/test/integration/targets/include_import/roles/role1/tasks/r1t03.yml
new file mode 100644
index 00000000..1d3330ae
--- /dev/null
+++ b/test/integration/targets/include_import/roles/role1/tasks/r1t03.yml
@@ -0,0 +1 @@
+- import_tasks: r1t04.yml
diff --git a/test/integration/targets/include_import/roles/role1/tasks/r1t04.yml b/test/integration/targets/include_import/roles/role1/tasks/r1t04.yml
new file mode 100644
index 00000000..f3eece23
--- /dev/null
+++ b/test/integration/targets/include_import/roles/role1/tasks/r1t04.yml
@@ -0,0 +1 @@
+- import_tasks: r1t05.yml
diff --git a/test/integration/targets/include_import/roles/role1/tasks/r1t05.yml b/test/integration/targets/include_import/roles/role1/tasks/r1t05.yml
new file mode 100644
index 00000000..4c7371ee
--- /dev/null
+++ b/test/integration/targets/include_import/roles/role1/tasks/r1t05.yml
@@ -0,0 +1 @@
+- import_tasks: r1t06.yml
diff --git a/test/integration/targets/include_import/roles/role1/tasks/r1t06.yml b/test/integration/targets/include_import/roles/role1/tasks/r1t06.yml
new file mode 100644
index 00000000..96d56609
--- /dev/null
+++ b/test/integration/targets/include_import/roles/role1/tasks/r1t06.yml
@@ -0,0 +1 @@
+- import_tasks: r1t07.yml
diff --git a/test/integration/targets/include_import/roles/role1/tasks/r1t07.yml b/test/integration/targets/include_import/roles/role1/tasks/r1t07.yml
new file mode 100644
index 00000000..ee8d3252
--- /dev/null
+++ b/test/integration/targets/include_import/roles/role1/tasks/r1t07.yml
@@ -0,0 +1 @@
+- import_tasks: r1t08.yml
diff --git a/test/integration/targets/include_import/roles/role1/tasks/r1t08.yml b/test/integration/targets/include_import/roles/role1/tasks/r1t08.yml
new file mode 100644
index 00000000..33b81096
--- /dev/null
+++ b/test/integration/targets/include_import/roles/role1/tasks/r1t08.yml
@@ -0,0 +1 @@
+- import_tasks: r1t09.yml
diff --git a/test/integration/targets/include_import/roles/role1/tasks/r1t09.yml b/test/integration/targets/include_import/roles/role1/tasks/r1t09.yml
new file mode 100644
index 00000000..8973c291
--- /dev/null
+++ b/test/integration/targets/include_import/roles/role1/tasks/r1t09.yml
@@ -0,0 +1 @@
+- import_tasks: r1t10.yml
diff --git a/test/integration/targets/include_import/roles/role1/tasks/r1t10.yml b/test/integration/targets/include_import/roles/role1/tasks/r1t10.yml
new file mode 100644
index 00000000..eafdca25
--- /dev/null
+++ b/test/integration/targets/include_import/roles/role1/tasks/r1t10.yml
@@ -0,0 +1 @@
+- import_tasks: r1t11.yml
diff --git a/test/integration/targets/include_import/roles/role1/tasks/r1t11.yml b/test/integration/targets/include_import/roles/role1/tasks/r1t11.yml
new file mode 100644
index 00000000..9ab828f3
--- /dev/null
+++ b/test/integration/targets/include_import/roles/role1/tasks/r1t11.yml
@@ -0,0 +1 @@
+- import_tasks: r1t12.yml
diff --git a/test/integration/targets/include_import/roles/role1/tasks/r1t12.yml b/test/integration/targets/include_import/roles/role1/tasks/r1t12.yml
new file mode 100644
index 00000000..88284861
--- /dev/null
+++ b/test/integration/targets/include_import/roles/role1/tasks/r1t12.yml
@@ -0,0 +1,2 @@
+- debug:
+ msg: r1t12
diff --git a/test/integration/targets/include_import/roles/role1/tasks/tasks.yml b/test/integration/targets/include_import/roles/role1/tasks/tasks.yml
new file mode 100644
index 00000000..45430bc4
--- /dev/null
+++ b/test/integration/targets/include_import/roles/role1/tasks/tasks.yml
@@ -0,0 +1,2 @@
+- debug:
+ msg: Tasks file inside role1
diff --git a/test/integration/targets/include_import/roles/role1/tasks/vartest.yml b/test/integration/targets/include_import/roles/role1/tasks/vartest.yml
new file mode 100644
index 00000000..5a49d8dd
--- /dev/null
+++ b/test/integration/targets/include_import/roles/role1/tasks/vartest.yml
@@ -0,0 +1,2 @@
+- debug:
+ var: where_am_i_defined
diff --git a/test/integration/targets/include_import/roles/role1/vars/main.yml b/test/integration/targets/include_import/roles/role1/vars/main.yml
new file mode 100644
index 00000000..57d31cf7
--- /dev/null
+++ b/test/integration/targets/include_import/roles/role1/vars/main.yml
@@ -0,0 +1 @@
+where_am_i_defined: role1 vars/main.yml
diff --git a/test/integration/targets/include_import/roles/role1/vars/role1vars.yml b/test/integration/targets/include_import/roles/role1/vars/role1vars.yml
new file mode 100644
index 00000000..57d31cf7
--- /dev/null
+++ b/test/integration/targets/include_import/roles/role1/vars/role1vars.yml
@@ -0,0 +1 @@
+where_am_i_defined: role1 vars/main.yml
diff --git a/test/integration/targets/include_import/roles/role2/tasks/main.yml b/test/integration/targets/include_import/roles/role2/tasks/main.yml
new file mode 100644
index 00000000..82934f67
--- /dev/null
+++ b/test/integration/targets/include_import/roles/role2/tasks/main.yml
@@ -0,0 +1,3 @@
+- debug:
+ msg: In role2
+ register: _role2_result
diff --git a/test/integration/targets/include_import/roles/role3/defaults/main.yml b/test/integration/targets/include_import/roles/role3/defaults/main.yml
new file mode 100644
index 00000000..c3464c4d
--- /dev/null
+++ b/test/integration/targets/include_import/roles/role3/defaults/main.yml
@@ -0,0 +1,2 @@
+where_am_i_defined: defaults in role3
+role3_default: defined in role3/defaults/main.yml
diff --git a/test/integration/targets/include_import/roles/role3/handlers/main.yml b/test/integration/targets/include_import/roles/role3/handlers/main.yml
new file mode 100644
index 00000000..c8baa270
--- /dev/null
+++ b/test/integration/targets/include_import/roles/role3/handlers/main.yml
@@ -0,0 +1,3 @@
+- name: runme
+ debug:
+ msg: role3 handler
diff --git a/test/integration/targets/include_import/roles/role3/tasks/main.yml b/test/integration/targets/include_import/roles/role3/tasks/main.yml
new file mode 100644
index 00000000..bb70dad3
--- /dev/null
+++ b/test/integration/targets/include_import/roles/role3/tasks/main.yml
@@ -0,0 +1,3 @@
+- debug:
+ msg: In role3
+ register: _role3_result
diff --git a/test/integration/targets/include_import/roles/role3/tasks/tasks.yml b/test/integration/targets/include_import/roles/role3/tasks/tasks.yml
new file mode 100644
index 00000000..0e822695
--- /dev/null
+++ b/test/integration/targets/include_import/roles/role3/tasks/tasks.yml
@@ -0,0 +1,2 @@
+- debug:
+ msg: Tasks file inside role3
diff --git a/test/integration/targets/include_import/roles/role3/tasks/vartest.yml b/test/integration/targets/include_import/roles/role3/tasks/vartest.yml
new file mode 100644
index 00000000..cb21c53f
--- /dev/null
+++ b/test/integration/targets/include_import/roles/role3/tasks/vartest.yml
@@ -0,0 +1,2 @@
+- debug:
+ var: role3_var
diff --git a/test/integration/targets/include_import/roles/role3/vars/main.yml b/test/integration/targets/include_import/roles/role3/vars/main.yml
new file mode 100644
index 00000000..9adac6b8
--- /dev/null
+++ b/test/integration/targets/include_import/roles/role3/vars/main.yml
@@ -0,0 +1 @@
+role3_main: defined in role3/vars/main.yml
diff --git a/test/integration/targets/include_import/roles/role3/vars/role3vars.yml b/test/integration/targets/include_import/roles/role3/vars/role3vars.yml
new file mode 100644
index 00000000..f324d56a
--- /dev/null
+++ b/test/integration/targets/include_import/roles/role3/vars/role3vars.yml
@@ -0,0 +1,2 @@
+where_am_i_defined: role3vars.yml
+role3_var: defined in role3/vars/role3vars.yml
diff --git a/test/integration/targets/include_import/roles/role_with_deps/meta/main.yml b/test/integration/targets/include_import/roles/role_with_deps/meta/main.yml
new file mode 100644
index 00000000..a2446bba
--- /dev/null
+++ b/test/integration/targets/include_import/roles/role_with_deps/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - role1
+ - role2
diff --git a/test/integration/targets/include_import/roles/role_with_deps/tasks/main.yml b/test/integration/targets/include_import/roles/role_with_deps/tasks/main.yml
new file mode 100644
index 00000000..060fe42f
--- /dev/null
+++ b/test/integration/targets/include_import/roles/role_with_deps/tasks/main.yml
@@ -0,0 +1,2 @@
+- debug:
+ msg: In role_with_deps
diff --git a/test/integration/targets/include_import/run_once/include_me.yml b/test/integration/targets/include_import/run_once/include_me.yml
new file mode 100644
index 00000000..e92128a9
--- /dev/null
+++ b/test/integration/targets/include_import/run_once/include_me.yml
@@ -0,0 +1,2 @@
+- set_fact:
+ lola: wiseman
diff --git a/test/integration/targets/include_import/run_once/playbook.yml b/test/integration/targets/include_import/run_once/playbook.yml
new file mode 100644
index 00000000..cc1e265f
--- /dev/null
+++ b/test/integration/targets/include_import/run_once/playbook.yml
@@ -0,0 +1,61 @@
+# This playbook exists to document the behavior of how run_once when
+# applied to a dynamic include works
+#
+# As with other uses of keywords on dynamic includes, it only affects the include.
+# In this case it causes the include to only be processed for ansible_play_hosts[0]
+# which has the side effect of only running the tasks on ansible_play_hosts[0]
+# and would only delegate facts of the include itself, not the tasks contained within
+
+- hosts: localhost
+ gather_facts: false
+ tasks:
+ - add_host:
+ name: "{{ item }}"
+ ansible_connection: local
+ groups:
+ - all
+ loop:
+ - localhost0
+ - localhost1
+
+ - add_host:
+ name: "{{ item }}"
+ groups:
+ - testing
+ ansible_connection: local
+ loop:
+ - localhost2
+ - localhost3
+
+- hosts: all:!testing
+ gather_facts: false
+ vars:
+ lola: untouched
+ tasks:
+ - include_tasks:
+ file: include_me.yml
+ apply:
+ run_once: true
+ run_once: true
+
+ - assert:
+ that:
+ - lola == 'wiseman'
+
+- hosts: testing
+ gather_facts: false
+ vars:
+ lola: untouched
+ tasks:
+ - include_tasks: include_me.yml
+ run_once: true
+
+ - assert:
+ that:
+ - lola == 'wiseman'
+ when: inventory_hostname == ansible_play_hosts[0]
+
+ - assert:
+ that:
+ - lola == 'untouched'
+ when: inventory_hostname != ansible_play_hosts[0]
diff --git a/test/integration/targets/include_import/runme.sh b/test/integration/targets/include_import/runme.sh
new file mode 100755
index 00000000..28115a5b
--- /dev/null
+++ b/test/integration/targets/include_import/runme.sh
@@ -0,0 +1,124 @@
+#!/usr/bin/env bash
+
+set -eux
+
+export ANSIBLE_ROLES_PATH=./roles
+
+function gen_task_files() {
+ for i in $(seq -f '%03g' 1 39); do
+ echo -e "- name: Hello Message\n debug:\n msg: Task file ${i}" > "tasks/hello/tasks-file-${i}.yml"
+ done
+}
+
+## Adhoc
+
+ansible -m include_role -a name=role1 localhost
+
+## Import (static)
+
+# Playbook
+test "$(ansible-playbook -i ../../inventory playbook/test_import_playbook.yml "$@" 2>&1 | grep -c '\[WARNING\]: Additional parameters in import_playbook')" = 1
+
+ANSIBLE_STRATEGY='linear' ansible-playbook playbook/test_import_playbook_tags.yml -i inventory "$@" --tags canary1,canary22,validate --skip-tags skipme
+
+# Tasks
+ANSIBLE_STRATEGY='linear' ansible-playbook tasks/test_import_tasks.yml -i inventory "$@"
+ANSIBLE_STRATEGY='free' ansible-playbook tasks/test_import_tasks.yml -i inventory "$@"
+ANSIBLE_STRATEGY='free' ansible-playbook tasks/test_import_tasks_tags.yml -i inventory "$@" --tags tasks1,canary1,validate
+
+# Role
+ANSIBLE_STRATEGY='linear' ansible-playbook role/test_import_role.yml -i inventory "$@"
+ANSIBLE_STRATEGY='free' ansible-playbook role/test_import_role.yml -i inventory "$@"
+
+
+## Include (dynamic)
+
+# Tasks
+ANSIBLE_STRATEGY='linear' ansible-playbook tasks/test_include_tasks.yml -i inventory "$@"
+ANSIBLE_STRATEGY='free' ansible-playbook tasks/test_include_tasks.yml -i inventory "$@"
+ANSIBLE_STRATEGY='free' ansible-playbook tasks/test_include_tasks_tags.yml -i inventory "$@" --tags tasks1,canary1,validate
+
+# Role
+ANSIBLE_STRATEGY='linear' ansible-playbook role/test_include_role.yml -i inventory "$@"
+ANSIBLE_STRATEGY='free' ansible-playbook role/test_include_role.yml -i inventory "$@"
+
+# https://github.com/ansible/ansible/issues/68515
+ansible-playbook -v role/test_include_role_vars_from.yml 2>&1 | tee test_include_role_vars_from.out
+test "$(grep -E -c 'Expected a string for vars_from but got' test_include_role_vars_from.out)" = 1
+
+## Max Recursion Depth
+# https://github.com/ansible/ansible/issues/23609
+ANSIBLE_STRATEGY='linear' ansible-playbook test_role_recursion.yml -i inventory "$@"
+ANSIBLE_STRATEGY='linear' ansible-playbook test_role_recursion_fqcn.yml -i inventory "$@"
+
+## Nested tasks
+# https://github.com/ansible/ansible/issues/34782
+ANSIBLE_STRATEGY='linear' ansible-playbook test_nested_tasks.yml -i inventory "$@"
+ANSIBLE_STRATEGY='linear' ansible-playbook test_nested_tasks_fqcn.yml -i inventory "$@"
+ANSIBLE_STRATEGY='free' ansible-playbook test_nested_tasks.yml -i inventory "$@"
+ANSIBLE_STRATEGY='free' ansible-playbook test_nested_tasks_fqcn.yml -i inventory "$@"
+
+## Tons of top level include_tasks
+# https://github.com/ansible/ansible/issues/36053
+# Fixed by https://github.com/ansible/ansible/pull/36075
+gen_task_files
+ANSIBLE_STRATEGY='linear' ansible-playbook test_copious_include_tasks.yml -i inventory "$@"
+ANSIBLE_STRATEGY='linear' ansible-playbook test_copious_include_tasks_fqcn.yml -i inventory "$@"
+ANSIBLE_STRATEGY='free' ansible-playbook test_copious_include_tasks.yml -i inventory "$@"
+ANSIBLE_STRATEGY='free' ansible-playbook test_copious_include_tasks_fqcn.yml -i inventory "$@"
+rm -f tasks/hello/*.yml
+
+# Inlcuded tasks should inherit attrs from non-dynamic blocks in parent chain
+# https://github.com/ansible/ansible/pull/38827
+ANSIBLE_STRATEGY='linear' ansible-playbook test_grandparent_inheritance.yml -i inventory "$@"
+ANSIBLE_STRATEGY='linear' ansible-playbook test_grandparent_inheritance_fqcn.yml -i inventory "$@"
+
+# undefined_var
+ANSIBLE_STRATEGY='linear' ansible-playbook undefined_var/playbook.yml -i inventory "$@"
+ANSIBLE_STRATEGY='free' ansible-playbook undefined_var/playbook.yml -i inventory "$@"
+
+# include_ + apply (explicit inheritance)
+ANSIBLE_STRATEGY='linear' ansible-playbook apply/include_apply.yml -i inventory "$@" --tags foo
+set +e
+OUT=$(ANSIBLE_STRATEGY='linear' ansible-playbook apply/import_apply.yml -i inventory "$@" --tags foo 2>&1 | grep 'ERROR! Invalid options for import_tasks: apply')
+set -e
+if [[ -z "$OUT" ]]; then
+ echo "apply on import_tasks did not cause error"
+ exit 1
+fi
+
+# Test that duplicate items in loop are not deduped
+ANSIBLE_STRATEGY='linear' ansible-playbook tasks/test_include_dupe_loop.yml -i inventory "$@" | tee test_include_dupe_loop.out
+test "$(grep -c '"item=foo"' test_include_dupe_loop.out)" = 3
+ANSIBLE_STRATEGY='free' ansible-playbook tasks/test_include_dupe_loop.yml -i inventory "$@" | tee test_include_dupe_loop.out
+test "$(grep -c '"item=foo"' test_include_dupe_loop.out)" = 3
+
+ansible-playbook public_exposure/playbook.yml -i inventory "$@"
+ansible-playbook public_exposure/no_bleeding.yml -i inventory "$@"
+ansible-playbook public_exposure/no_overwrite_roles.yml -i inventory "$@"
+
+# https://github.com/ansible/ansible/pull/48068
+ANSIBLE_HOST_PATTERN_MISMATCH=warning ansible-playbook run_once/playbook.yml "$@"
+
+# https://github.com/ansible/ansible/issues/48936
+ansible-playbook -v handler_addressing/playbook.yml 2>&1 | tee test_handler_addressing.out
+test "$(grep -E -c 'include handler task|ERROR! The requested handler '"'"'do_import'"'"' was not found' test_handler_addressing.out)" = 2
+
+# https://github.com/ansible/ansible/issues/49969
+ansible-playbook -v parent_templating/playbook.yml 2>&1 | tee test_parent_templating.out
+test "$(grep -E -c 'Templating the path of the parent include_tasks failed.' test_parent_templating.out)" = 0
+
+# https://github.com/ansible/ansible/issues/54618
+ansible-playbook test_loop_var_bleed.yaml "$@"
+
+# https://github.com/ansible/ansible/issues/56580
+ansible-playbook valid_include_keywords/playbook.yml "$@"
+
+# https://github.com/ansible/ansible/issues/64902
+ansible-playbook tasks/test_allow_single_role_dup.yml 2>&1 | tee test_allow_single_role_dup.out
+test "$(grep -c 'ok=3' test_allow_single_role_dup.out)" = 1
+
+# https://github.com/ansible/ansible/issues/66764
+ANSIBLE_HOST_PATTERN_MISMATCH=error ansible-playbook empty_group_warning/playbook.yml
+
+ansible-playbook test_include_loop_fqcn.yml "$@"
diff --git a/test/integration/targets/include_import/tasks/debug_item.yml b/test/integration/targets/include_import/tasks/debug_item.yml
new file mode 100644
index 00000000..025e132d
--- /dev/null
+++ b/test/integration/targets/include_import/tasks/debug_item.yml
@@ -0,0 +1,2 @@
+- debug:
+ msg: "item={{ item }}"
diff --git a/test/integration/targets/include_import/tasks/hello/.gitignore b/test/integration/targets/include_import/tasks/hello/.gitignore
new file mode 100644
index 00000000..b4602e78
--- /dev/null
+++ b/test/integration/targets/include_import/tasks/hello/.gitignore
@@ -0,0 +1 @@
+tasks-file-*
diff --git a/test/integration/targets/include_import/tasks/hello/keep b/test/integration/targets/include_import/tasks/hello/keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/include_import/tasks/hello/keep
diff --git a/test/integration/targets/include_import/tasks/nested/nested.yml b/test/integration/targets/include_import/tasks/nested/nested.yml
new file mode 100644
index 00000000..0bfcdeef
--- /dev/null
+++ b/test/integration/targets/include_import/tasks/nested/nested.yml
@@ -0,0 +1,2 @@
+---
+- include_tasks: ../../nestedtasks/nested/nested.yml
diff --git a/test/integration/targets/include_import/tasks/tasks1.yml b/test/integration/targets/include_import/tasks/tasks1.yml
new file mode 100644
index 00000000..e1d83d92
--- /dev/null
+++ b/test/integration/targets/include_import/tasks/tasks1.yml
@@ -0,0 +1,5 @@
+- name: Set variable inside tasks1.yml
+ set_fact:
+ set_in_tasks1: yes
+ tags:
+ - tasks1
diff --git a/test/integration/targets/include_import/tasks/tasks2.yml b/test/integration/targets/include_import/tasks/tasks2.yml
new file mode 100644
index 00000000..1b4c86fc
--- /dev/null
+++ b/test/integration/targets/include_import/tasks/tasks2.yml
@@ -0,0 +1,5 @@
+- name: Set variable inside tasks2.yml
+ set_fact:
+ set_in_tasks2: yes
+ tags:
+ - tasks2
diff --git a/test/integration/targets/include_import/tasks/tasks3.yml b/test/integration/targets/include_import/tasks/tasks3.yml
new file mode 100644
index 00000000..6da37191
--- /dev/null
+++ b/test/integration/targets/include_import/tasks/tasks3.yml
@@ -0,0 +1,5 @@
+- name: Set variable inside tasks3.yml
+ set_fact:
+ set_in_tasks3: yes
+ tags:
+ - tasks3
diff --git a/test/integration/targets/include_import/tasks/tasks4.yml b/test/integration/targets/include_import/tasks/tasks4.yml
new file mode 100644
index 00000000..fc2eb6cb
--- /dev/null
+++ b/test/integration/targets/include_import/tasks/tasks4.yml
@@ -0,0 +1,5 @@
+- name: Set variable inside tasks4.yml
+ set_fact:
+ set_in_tasks4: yes
+ tags:
+ - tasks4
diff --git a/test/integration/targets/include_import/tasks/tasks5.yml b/test/integration/targets/include_import/tasks/tasks5.yml
new file mode 100644
index 00000000..f2ee6b9e
--- /dev/null
+++ b/test/integration/targets/include_import/tasks/tasks5.yml
@@ -0,0 +1,6 @@
+- name: Set variable inside tasks5.yml
+ set_fact:
+ set_in_tasks5: yes
+ tags:
+ - tasks5
+ - canary1
diff --git a/test/integration/targets/include_import/tasks/tasks6.yml b/test/integration/targets/include_import/tasks/tasks6.yml
new file mode 100644
index 00000000..fa03079d
--- /dev/null
+++ b/test/integration/targets/include_import/tasks/tasks6.yml
@@ -0,0 +1,5 @@
+- name: Set variable inside tasks6.yml
+ set_fact:
+ set_in_tasks6: yes
+ tags:
+ - tasks6
diff --git a/test/integration/targets/include_import/tasks/test_allow_single_role_dup.yml b/test/integration/targets/include_import/tasks/test_allow_single_role_dup.yml
new file mode 100644
index 00000000..3a6992fa
--- /dev/null
+++ b/test/integration/targets/include_import/tasks/test_allow_single_role_dup.yml
@@ -0,0 +1,8 @@
+---
+- name: test for allow_duplicates with single role
+ hosts: localhost
+ gather_facts: false
+ roles:
+ - dup_allowed_role
+ - dup_allowed_role
+ - dup_allowed_role
diff --git a/test/integration/targets/include_import/tasks/test_import_tasks.yml b/test/integration/targets/include_import/tasks/test_import_tasks.yml
new file mode 100644
index 00000000..8f07bb90
--- /dev/null
+++ b/test/integration/targets/include_import/tasks/test_import_tasks.yml
@@ -0,0 +1,41 @@
+- name: Test import_tasks
+ hosts: testhost
+
+ tasks:
+ - name: Test basic task import
+ import_tasks: tasks1.yml
+
+ - name: Assert that fact was set in import
+ assert:
+ that:
+ - set_in_tasks1
+
+ - name: Test conditional task import
+ import_tasks: tasks2.yml
+ when: no
+
+ - name: Assert that tasks were skipped
+ assert:
+ that:
+ - set_in_tasks2 is not defined
+
+ - block:
+ - name: Import tasks inside a block
+ import_tasks: tasks3.yml
+
+ - name: Assert that task3 was included
+ assert:
+ that:
+ - set_in_tasks3
+
+ always:
+ - name: Import task inside always
+ import_tasks: tasks4.yml
+
+ - name: Validate that variables set in previously improted tasks are passed down.
+ import_tasks: validate3.yml
+
+ - name: Assert that tasks4 was included
+ assert:
+ that:
+ - set_in_tasks4
diff --git a/test/integration/targets/include_import/tasks/test_import_tasks_tags.yml b/test/integration/targets/include_import/tasks/test_import_tasks_tags.yml
new file mode 100644
index 00000000..3b1d68fc
--- /dev/null
+++ b/test/integration/targets/include_import/tasks/test_import_tasks_tags.yml
@@ -0,0 +1,23 @@
+- name: Test import_tasks using tags
+ hosts: testhost
+
+ tasks:
+ - name: Import tasks1.yml
+ import_tasks: tasks1.yml
+
+ - name: Import tasks4.yml using tag on import task
+ import_tasks: tasks4.yml
+ tags:
+ - canary1
+
+ - name: Import tasks2.yml
+ import_tasks: tasks2.yml
+
+ - name: Assert that appropriate tasks were run
+ assert:
+ that:
+ - set_in_tasks1
+ - set_in_tasks4
+ - set_in_tasks2 is not defined
+ tags:
+ - validate
diff --git a/test/integration/targets/include_import/tasks/test_include_dupe_loop.yml b/test/integration/targets/include_import/tasks/test_include_dupe_loop.yml
new file mode 100644
index 00000000..b7b9301d
--- /dev/null
+++ b/test/integration/targets/include_import/tasks/test_include_dupe_loop.yml
@@ -0,0 +1,8 @@
+- name: Test Include Duplicate Loop Items
+ hosts: testhost
+ tasks:
+ - include_tasks: debug_item.yml
+ loop:
+ - foo
+ - foo
+ - foo
diff --git a/test/integration/targets/include_import/tasks/test_include_tasks.yml b/test/integration/targets/include_import/tasks/test_include_tasks.yml
new file mode 100644
index 00000000..ebe2273e
--- /dev/null
+++ b/test/integration/targets/include_import/tasks/test_include_tasks.yml
@@ -0,0 +1,44 @@
+- name: Test include_tasks
+ hosts: testhost
+
+ tasks:
+ - name: Test basic task include
+ include_tasks: tasks1.yml
+
+ - name: Assert that fact was set in include
+ assert:
+ that:
+ - set_in_tasks1
+
+ - name: Test conditional task include
+ include_tasks: tasks2.yml
+ when: no
+
+ - name: Assert that tasks were skipped
+ assert:
+ that:
+ - set_in_tasks2 is not defined
+
+ - block:
+ - name: Include tasks inside a block
+ include_tasks: tasks3.yml
+
+ - name: Assert that task3 was included
+ assert:
+ that:
+ - set_in_tasks3
+
+ always:
+ - name: Include task inside always
+ include_tasks: tasks4.yml
+
+ - name: Validate that variables set in previously improted tasks are passed down
+ include_tasks: validate3.yml
+
+ - name: Assert that tasks4 was included
+ assert:
+ that:
+ - set_in_tasks4
+
+ - name: include_tasks + action
+ action: include_tasks tasks1.yml
diff --git a/test/integration/targets/include_import/tasks/test_include_tasks_tags.yml b/test/integration/targets/include_import/tasks/test_include_tasks_tags.yml
new file mode 100644
index 00000000..3fe43809
--- /dev/null
+++ b/test/integration/targets/include_import/tasks/test_include_tasks_tags.yml
@@ -0,0 +1,25 @@
+- name: Test include_tasks using tags
+ hosts: testhost
+
+ tasks:
+ # This should not be included
+ - name: Include tasks1.yml
+ include_tasks: tasks1.yml
+
+ # This should be included but tasks inside should not run because they do not have
+ # the canary1 tag and tasks2 is not in the list of tags for the ansible-playbook command
+ - name: Include tasks2.yml
+ include_tasks: tasks2.yml
+ tags:
+ - canary1
+
+ # This should be included and tasks inside should be run
+ - name: Include tasks5.yml using tag on include task
+ include_tasks: tasks5.yml
+ tags:
+ - canary1
+
+ - name: Include validate_tags.yml
+ include_tasks: validate_tags.yml
+ tags:
+ - validate
diff --git a/test/integration/targets/include_import/tasks/test_recursion.yml b/test/integration/targets/include_import/tasks/test_recursion.yml
new file mode 100644
index 00000000..96754ec8
--- /dev/null
+++ b/test/integration/targets/include_import/tasks/test_recursion.yml
@@ -0,0 +1,6 @@
+- hosts: testhost
+
+ tasks:
+ - include_role:
+ name: role
+ tasks_from: r1t1.yml
diff --git a/test/integration/targets/include_import/tasks/validate3.yml b/test/integration/targets/include_import/tasks/validate3.yml
new file mode 100644
index 00000000..e3166aa3
--- /dev/null
+++ b/test/integration/targets/include_import/tasks/validate3.yml
@@ -0,0 +1,4 @@
+- name: Assert than variable set in previously included task is defined
+ assert:
+ that:
+ - set_in_tasks3
diff --git a/test/integration/targets/include_import/tasks/validate_tags.yml b/test/integration/targets/include_import/tasks/validate_tags.yml
new file mode 100644
index 00000000..e2f3377b
--- /dev/null
+++ b/test/integration/targets/include_import/tasks/validate_tags.yml
@@ -0,0 +1,8 @@
+- name: Assert that appropriate tasks were run
+ assert:
+ that:
+ - set_in_tasks1 is undefined
+ - set_in_tasks2 is undefined
+ - set_in_tasks5
+ tags:
+ - validate
diff --git a/test/integration/targets/include_import/test_copious_include_tasks.yml b/test/integration/targets/include_import/test_copious_include_tasks.yml
new file mode 100644
index 00000000..4564c76e
--- /dev/null
+++ b/test/integration/targets/include_import/test_copious_include_tasks.yml
@@ -0,0 +1,44 @@
+- name: Test many include_tasks
+ hosts: testhost
+ gather_facts: no
+
+ tasks:
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-001.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-002.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-003.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-004.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-005.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-006.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-007.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-008.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-009.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-010.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-011.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-012.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-013.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-014.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-015.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-016.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-017.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-018.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-019.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-020.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-021.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-022.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-023.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-024.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-025.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-026.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-027.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-028.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-029.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-030.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-031.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-032.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-033.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-034.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-035.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-036.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-037.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-038.yml"
+ - include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-039.yml"
diff --git a/test/integration/targets/include_import/test_copious_include_tasks_fqcn.yml b/test/integration/targets/include_import/test_copious_include_tasks_fqcn.yml
new file mode 100644
index 00000000..32fa9abc
--- /dev/null
+++ b/test/integration/targets/include_import/test_copious_include_tasks_fqcn.yml
@@ -0,0 +1,44 @@
+- name: Test many ansible.builtin.include_tasks
+ hosts: testhost
+ gather_facts: no
+
+ tasks:
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-001.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-002.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-003.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-004.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-005.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-006.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-007.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-008.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-009.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-010.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-011.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-012.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-013.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-014.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-015.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-016.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-017.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-018.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-019.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-020.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-021.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-022.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-023.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-024.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-025.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-026.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-027.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-028.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-029.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-030.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-031.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-032.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-033.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-034.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-035.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-036.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-037.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-038.yml"
+ - ansible.builtin.include_tasks: "{{ playbook_dir }}/tasks/hello/tasks-file-039.yml"
diff --git a/test/integration/targets/include_import/test_grandparent_inheritance.yml b/test/integration/targets/include_import/test_grandparent_inheritance.yml
new file mode 100644
index 00000000..45a3d836
--- /dev/null
+++ b/test/integration/targets/include_import/test_grandparent_inheritance.yml
@@ -0,0 +1,29 @@
+---
+- hosts: testhost
+ gather_facts: false
+ tasks:
+ - debug:
+ var: inventory_hostname
+
+ - name: Test included tasks inherit from block
+ check_mode: true
+ block:
+ - include_tasks: grandchild/block_include_tasks.yml
+
+ - debug:
+ var: block_include_result
+
+ - assert:
+ that:
+ - block_include_result is skipped
+
+ - name: Test included tasks inherit deeply from import
+ import_tasks: grandchild/import.yml
+ check_mode: true
+
+ - debug:
+ var: import_include_include_result
+
+ - assert:
+ that:
+ - import_include_include_result is skipped
diff --git a/test/integration/targets/include_import/test_grandparent_inheritance_fqcn.yml b/test/integration/targets/include_import/test_grandparent_inheritance_fqcn.yml
new file mode 100644
index 00000000..37a0ad0d
--- /dev/null
+++ b/test/integration/targets/include_import/test_grandparent_inheritance_fqcn.yml
@@ -0,0 +1,29 @@
+---
+- hosts: testhost
+ gather_facts: false
+ tasks:
+ - debug:
+ var: inventory_hostname
+
+ - name: Test included tasks inherit from block
+ check_mode: true
+ block:
+ - ansible.builtin.include_tasks: grandchild/block_include_tasks.yml
+
+ - debug:
+ var: block_include_result
+
+ - assert:
+ that:
+ - block_include_result is skipped
+
+ - name: Test included tasks inherit deeply from import
+ ansible.builtin.import_tasks: grandchild/import.yml
+ check_mode: true
+
+ - debug:
+ var: import_include_include_result
+
+ - assert:
+ that:
+ - import_include_include_result is skipped
diff --git a/test/integration/targets/include_import/test_include_loop_fqcn.yml b/test/integration/targets/include_import/test_include_loop_fqcn.yml
new file mode 100644
index 00000000..62d91f22
--- /dev/null
+++ b/test/integration/targets/include_import/test_include_loop_fqcn.yml
@@ -0,0 +1,17 @@
+- hosts: localhost
+ gather_facts: false
+ tasks:
+ - name: skipped include undefined loop
+ ansible.builtin.include_tasks: doesnt_matter.yml
+ loop: '{{ lkjsdflkjsdlfkjsdlfkjsdf }}'
+ when: false
+ register: skipped_include
+
+ - debug:
+ var: skipped_include
+
+ - assert:
+ that:
+ - skipped_include.results is undefined
+ - skipped_include.skip_reason is defined
+ - skipped_include is skipped
diff --git a/test/integration/targets/include_import/test_loop_var_bleed.yaml b/test/integration/targets/include_import/test_loop_var_bleed.yaml
new file mode 100644
index 00000000..a5146f30
--- /dev/null
+++ b/test/integration/targets/include_import/test_loop_var_bleed.yaml
@@ -0,0 +1,9 @@
+- hosts: localhost
+ gather_facts: false
+ tasks:
+ - include_role:
+ name: loop_name_assert
+ loop:
+ - name_from_loop_var
+ loop_control:
+ loop_var: name
diff --git a/test/integration/targets/include_import/test_nested_tasks.yml b/test/integration/targets/include_import/test_nested_tasks.yml
new file mode 100644
index 00000000..7451ec4e
--- /dev/null
+++ b/test/integration/targets/include_import/test_nested_tasks.yml
@@ -0,0 +1,6 @@
+- name: >-
+ verify that multiple level of nested statements and
+ include+meta doesnt mess included files mecanisms
+ hosts: testhost
+ tasks:
+ - include_tasks: ./tasks/nested/nested.yml
diff --git a/test/integration/targets/include_import/test_nested_tasks_fqcn.yml b/test/integration/targets/include_import/test_nested_tasks_fqcn.yml
new file mode 100644
index 00000000..14e72eed
--- /dev/null
+++ b/test/integration/targets/include_import/test_nested_tasks_fqcn.yml
@@ -0,0 +1,6 @@
+- name: >-
+ verify that multiple level of nested statements and
+ include+meta doesnt mess included files mecanisms
+ hosts: testhost
+ tasks:
+ - ansible.builtin.include_tasks: ./tasks/nested/nested.yml
diff --git a/test/integration/targets/include_import/test_role_recursion.yml b/test/integration/targets/include_import/test_role_recursion.yml
new file mode 100644
index 00000000..ad2489a0
--- /dev/null
+++ b/test/integration/targets/include_import/test_role_recursion.yml
@@ -0,0 +1,7 @@
+- name: Test max recursion depth
+ hosts: testhost
+
+ tasks:
+ - import_role:
+ name: role1
+ tasks_from: r1t01.yml
diff --git a/test/integration/targets/include_import/test_role_recursion_fqcn.yml b/test/integration/targets/include_import/test_role_recursion_fqcn.yml
new file mode 100644
index 00000000..13d8d2cb
--- /dev/null
+++ b/test/integration/targets/include_import/test_role_recursion_fqcn.yml
@@ -0,0 +1,7 @@
+- name: Test max recursion depth
+ hosts: testhost
+
+ tasks:
+ - ansible.builtin.import_role:
+ name: role1
+ tasks_from: r1t01.yml
diff --git a/test/integration/targets/include_import/undefined_var/include_tasks.yml b/test/integration/targets/include_import/undefined_var/include_tasks.yml
new file mode 100644
index 00000000..56f06c97
--- /dev/null
+++ b/test/integration/targets/include_import/undefined_var/include_tasks.yml
@@ -0,0 +1,5 @@
+---
+
+- debug:
+ msg: "This message comes from an 'include_tasks'-task! :-)"
+ register: "_include_tasks_task_result"
diff --git a/test/integration/targets/include_import/undefined_var/include_that_defines_var.yml b/test/integration/targets/include_import/undefined_var/include_that_defines_var.yml
new file mode 100644
index 00000000..7f24a435
--- /dev/null
+++ b/test/integration/targets/include_import/undefined_var/include_that_defines_var.yml
@@ -0,0 +1,5 @@
+- vars:
+ _undefined: 'yes'
+ block:
+ - set_fact:
+ _include_defined_result: 'good'
diff --git a/test/integration/targets/include_import/undefined_var/playbook.yml b/test/integration/targets/include_import/undefined_var/playbook.yml
new file mode 100644
index 00000000..0584fa8a
--- /dev/null
+++ b/test/integration/targets/include_import/undefined_var/playbook.yml
@@ -0,0 +1,36 @@
+---
+- hosts: testhost
+ gather_facts: false
+ tasks:
+ - include_tasks: "include_tasks.yml"
+ ignore_errors: True
+ register: "_include_tasks_result"
+ when:
+ - "_undefined == 'yes'"
+
+ - assert:
+ that:
+ - "_include_tasks_result is failed"
+ - "_include_tasks_task_result is not defined"
+ msg: "'include_tasks' did not evaluate it's attached condition and failed"
+
+ - include_role:
+ name: "no_log"
+ ignore_errors: True
+ register: "_include_role_result"
+ when:
+ - "_undefined == 'yes'"
+
+ - assert:
+ that:
+ - "_include_role_result is failed"
+ msg: "'include_role' did not evaluate it's attached condition and failed"
+
+ - include: include_that_defines_var.yml
+ static: yes
+ when:
+ - "_undefined == 'yes'"
+
+ - assert:
+ that:
+ - _include_defined_result == 'good'
diff --git a/test/integration/targets/include_import/valid_include_keywords/include_me.yml b/test/integration/targets/include_import/valid_include_keywords/include_me.yml
new file mode 100644
index 00000000..ab5c6a9c
--- /dev/null
+++ b/test/integration/targets/include_import/valid_include_keywords/include_me.yml
@@ -0,0 +1,6 @@
+- debug:
+ msg: include_me
+- assert:
+ that:
+ - loopy == 1
+ - baz == 'qux'
diff --git a/test/integration/targets/include_import/valid_include_keywords/include_me_listen.yml b/test/integration/targets/include_import/valid_include_keywords/include_me_listen.yml
new file mode 100644
index 00000000..47b424ad
--- /dev/null
+++ b/test/integration/targets/include_import/valid_include_keywords/include_me_listen.yml
@@ -0,0 +1,2 @@
+- debug:
+ msg: listen
diff --git a/test/integration/targets/include_import/valid_include_keywords/include_me_notify.yml b/test/integration/targets/include_import/valid_include_keywords/include_me_notify.yml
new file mode 100644
index 00000000..4501e380
--- /dev/null
+++ b/test/integration/targets/include_import/valid_include_keywords/include_me_notify.yml
@@ -0,0 +1,2 @@
+- debug:
+ msg: notify
diff --git a/test/integration/targets/include_import/valid_include_keywords/playbook.yml b/test/integration/targets/include_import/valid_include_keywords/playbook.yml
new file mode 100644
index 00000000..c70ec81f
--- /dev/null
+++ b/test/integration/targets/include_import/valid_include_keywords/playbook.yml
@@ -0,0 +1,40 @@
+- hosts: localhost
+ gather_facts: false
+ handlers:
+ - include_tasks:
+ file: include_me_listen.yml
+ listen:
+ - include_me_listen
+
+ - name: Include Me Notify
+ include_tasks: include_me_notify.yml
+
+ tasks:
+ - name: Include me
+ include_tasks: include_me.yml
+ args:
+ apply:
+ tags:
+ - bar
+ debugger: ~
+ ignore_errors: false
+ loop:
+ - 1
+ loop_control:
+ loop_var: loopy
+ no_log: false
+ register: this_isnt_useful
+ run_once: true
+ tags:
+ - foo
+ vars:
+ baz: qux
+ when: true
+
+ - command: "true"
+ notify:
+ - include_me_listen
+
+ - command: "true"
+ notify:
+ - Include Me Notify
diff --git a/test/integration/targets/include_parent_role_vars/aliases b/test/integration/targets/include_parent_role_vars/aliases
new file mode 100644
index 00000000..23abb8d3
--- /dev/null
+++ b/test/integration/targets/include_parent_role_vars/aliases
@@ -0,0 +1,2 @@
+# Continuation of special_vars integration tests to test special variables set on role inclusion.
+hidden \ No newline at end of file
diff --git a/test/integration/targets/include_parent_role_vars/tasks/included_by_other_role.yml b/test/integration/targets/include_parent_role_vars/tasks/included_by_other_role.yml
new file mode 100644
index 00000000..79b7b1cb
--- /dev/null
+++ b/test/integration/targets/include_parent_role_vars/tasks/included_by_other_role.yml
@@ -0,0 +1,37 @@
+# Copyright 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: ensure our parent role tree to contain only our direct parent item
+ assert:
+ that:
+ - "ansible_parent_role_names == ['special_vars']"
+
+- name: ensure that ansible_parent_role_paths has the same length as ansible_parent_role_names
+ assert:
+ that:
+ - "ansible_parent_role_names|length == ansible_parent_role_paths|length"
+
+- name: attempt to import ourselves
+ import_role:
+ name: "include_parent_role_vars"
+ tasks_from: "included_by_ourselves.yml"
+
+- name: ensure our parent role tree to contain only our direct parent item after importing
+ assert:
+ that:
+ - "ansible_parent_role_names == ['special_vars']"
+
+- name: attempt to include ourselves
+ include_role:
+ name: "include_parent_role_vars"
+ tasks_from: "included_by_ourselves.yml"
+
+- name: ensure our parent role tree to contain only our direct parent item after including
+ assert:
+ that:
+ - "ansible_parent_role_names == ['special_vars']"
+
+- name: ensure that ansible_parent_role_paths has the same length as ansible_parent_role_names
+ assert:
+ that:
+ - "ansible_parent_role_names|length == ansible_parent_role_paths|length"
diff --git a/test/integration/targets/include_parent_role_vars/tasks/included_by_ourselves.yml b/test/integration/targets/include_parent_role_vars/tasks/included_by_ourselves.yml
new file mode 100644
index 00000000..3ea93004
--- /dev/null
+++ b/test/integration/targets/include_parent_role_vars/tasks/included_by_ourselves.yml
@@ -0,0 +1,14 @@
+# Copyright 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: check if the inclusion tree shows ourself twice as well as our initial parent
+ assert:
+ that:
+ - "ansible_parent_role_names|length == 2"
+ - "ansible_parent_role_names[0] == 'include_parent_role_vars'" # Since we included ourselves, we're the top level
+ - "ansible_parent_role_names[1] == 'special_vars'"
+
+- name: ensure that ansible_parent_role_paths has the same length as ansible_parent_role_names
+ assert:
+ that:
+ - "ansible_parent_role_names|length == ansible_parent_role_paths|length"
diff --git a/test/integration/targets/include_parent_role_vars/tasks/main.yml b/test/integration/targets/include_parent_role_vars/tasks/main.yml
new file mode 100644
index 00000000..56a485bc
--- /dev/null
+++ b/test/integration/targets/include_parent_role_vars/tasks/main.yml
@@ -0,0 +1,21 @@
+# Copyright 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+- name: ensure our parent role tree to contain only our direct parent item
+ assert:
+ that:
+ - "ansible_parent_role_names == ['special_vars']"
+
+- name: ensure that ansible_parent_role_paths has the same length as ansible_parent_role_names
+ assert:
+ that:
+ - "ansible_parent_role_names|length == ansible_parent_role_paths|length"
+
+# task importing should not affect ansible_parent_role_names
+- name: test task-importing after we've been included by another role
+ import_tasks: "included_by_other_role.yml"
+
+# task inclusion should not affect ansible_parent_role_names
+- name: test task-inclusion after we've been included by another role
+ include_tasks: "included_by_other_role.yml"
diff --git a/test/integration/targets/include_vars-ad-hoc/aliases b/test/integration/targets/include_vars-ad-hoc/aliases
new file mode 100644
index 00000000..765b70da
--- /dev/null
+++ b/test/integration/targets/include_vars-ad-hoc/aliases
@@ -0,0 +1 @@
+shippable/posix/group2
diff --git a/test/integration/targets/include_vars-ad-hoc/dir/inc.yml b/test/integration/targets/include_vars-ad-hoc/dir/inc.yml
new file mode 100644
index 00000000..c1d24c84
--- /dev/null
+++ b/test/integration/targets/include_vars-ad-hoc/dir/inc.yml
@@ -0,0 +1 @@
+porter: cable
diff --git a/test/integration/targets/include_vars-ad-hoc/runme.sh b/test/integration/targets/include_vars-ad-hoc/runme.sh
new file mode 100755
index 00000000..51b68d21
--- /dev/null
+++ b/test/integration/targets/include_vars-ad-hoc/runme.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ansible testhost -i ../../inventory -m include_vars -a 'dir/inc.yml' "$@"
+ansible testhost -i ../../inventory -m include_vars -a 'dir=dir' "$@"
diff --git a/test/integration/targets/include_vars/aliases b/test/integration/targets/include_vars/aliases
new file mode 100644
index 00000000..765b70da
--- /dev/null
+++ b/test/integration/targets/include_vars/aliases
@@ -0,0 +1 @@
+shippable/posix/group2
diff --git a/test/integration/targets/include_vars/defaults/main.yml b/test/integration/targets/include_vars/defaults/main.yml
new file mode 100644
index 00000000..901fb220
--- /dev/null
+++ b/test/integration/targets/include_vars/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+testing: 1
+base_dir: defaults
diff --git a/test/integration/targets/include_vars/tasks/main.yml b/test/integration/targets/include_vars/tasks/main.yml
new file mode 100644
index 00000000..799d7b26
--- /dev/null
+++ b/test/integration/targets/include_vars/tasks/main.yml
@@ -0,0 +1,164 @@
+---
+- name: verify that the default value is indeed 1
+ assert:
+ that:
+ - "testing == 1"
+ - "base_dir == 'defaults'"
+
+- name: include the vars/environments/development/all.yml
+ include_vars:
+ file: environments/development/all.yml
+ register: included_one_file
+
+- name: verify that the correct file has been loaded and default value is indeed 789
+ assert:
+ that:
+ - "testing == 789"
+ - "base_dir == 'environments/development'"
+ - "{{ included_one_file.ansible_included_var_files | length }} == 1"
+ - "'vars/environments/development/all.yml' in included_one_file.ansible_included_var_files[0]"
+
+- name: include the vars/environments/development/all.yml and save results in all
+ include_vars:
+ file: environments/development/all.yml
+ name: all
+
+- name: verify that the values are stored in the all variable
+ assert:
+ that:
+ - "all['testing'] == 789"
+ - "all['base_dir'] == 'environments/development'"
+
+- name: include the all directory in vars
+ include_vars:
+ dir: all
+ depth: 1
+
+- name: verify that the default value is indeed 123
+ assert:
+ that:
+ - "testing == 123"
+ - "base_dir == 'all'"
+
+- name: include var files with extension only
+ include_vars:
+ dir: webapp
+ ignore_unknown_extensions: True
+ extensions: ['', 'yaml', 'yml', 'json']
+ register: include_without_file_extension
+
+- name: verify that only files with valid extensions are loaded
+ assert:
+ that:
+ - webapp_version is defined
+ - "'file_without_extension' in '{{ include_without_file_extension.ansible_included_var_files | join(' ') }}'"
+
+- name: include every directory in vars
+ include_vars:
+ dir: vars
+ extensions: ['', 'yaml', 'yml', 'json']
+ register: include_every_dir
+
+- name: verify that the correct files have been loaded and overwrite based on alphabetical order
+ assert:
+ that:
+ - "testing == 456"
+ - "base_dir == 'services'"
+ - "webapp_containers == 10"
+ - "{{ include_every_dir.ansible_included_var_files | length }} == 7"
+ - "'vars/all/all.yml' in include_every_dir.ansible_included_var_files[0]"
+ - "'vars/environments/development/all.yml' in include_every_dir.ansible_included_var_files[1]"
+ - "'vars/environments/development/services/webapp.yml' in include_every_dir.ansible_included_var_files[2]"
+ - "'vars/services/webapp.yml' in include_every_dir.ansible_included_var_files[5]"
+ - "'vars/webapp/file_without_extension' in include_every_dir.ansible_included_var_files[6]"
+
+- name: include every directory in vars except files matching webapp.yml
+ include_vars:
+ dir: vars
+ ignore_files:
+ - webapp.yml
+ - file_without_extension
+ register: include_without_webapp
+
+- name: verify that the webapp.yml file was not included
+ assert:
+ that:
+ - "testing == 789"
+ - "base_dir == 'environments/development'"
+ - "{{ include_without_webapp.ansible_included_var_files | length }} == 4"
+ - "'webapp.yml' not in '{{ include_without_webapp.ansible_included_var_files | join(' ') }}'"
+ - "'file_without_extension' not in '{{ include_without_webapp.ansible_included_var_files | join(' ') }}'"
+
+- name: include only files matching webapp.yml
+ include_vars:
+ dir: environments
+ files_matching: webapp.yml
+ register: include_match_webapp
+
+- name: verify that only files matching webapp.yml and in the environments directory get loaded.
+ assert:
+ that:
+ - "testing == 101112"
+ - "base_dir == 'development/services'"
+ - "webapp_containers == 20"
+ - "{{ include_match_webapp.ansible_included_var_files | length }} == 1"
+ - "'vars/environments/development/services/webapp.yml' in include_match_webapp.ansible_included_var_files[0]"
+ - "'all.yml' not in '{{ include_match_webapp.ansible_included_var_files | join(' ') }}'"
+
+- name: include only files matching webapp.yml and store results in webapp
+ include_vars:
+ dir: environments
+ files_matching: webapp.yml
+ name: webapp
+
+- name: verify that only files matching webapp.yml and in the environments directory get loaded into stored variable webapp.
+ assert:
+ that:
+ - "webapp['testing'] == 101112"
+ - "webapp['base_dir'] == 'development/services'"
+ - "webapp['webapp_containers'] == 20"
+
+- name: include var files without extension
+ include_vars:
+ dir: webapp
+ ignore_unknown_extensions: False
+ register: include_with_unknown_file_extension
+ ignore_errors: True
+
+- name: verify that files without valid extensions are loaded
+ assert:
+ that:
+ - "'a valid extension' in include_with_unknown_file_extension.message"
+
+- name: include var with raw params
+ include_vars: >
+ services/service_vars.yml
+
+- name: Verify that files with raw params is include without new line character
+ assert:
+ that:
+ - "service_name == 'my_custom_service'"
+
+- name: Check NoneType for raw params and file
+ include_vars:
+ file: "{{ lookup('first_found', possible_files, errors='ignore') }}"
+ vars:
+ possible_files:
+ - "does_not_exist.yml"
+ ignore_errors: True
+ register: include_with_non_existent_file
+
+- name: Verify that file and raw_params provide correct error message to user
+ assert:
+ that:
+ - "'Could not find file' in include_with_non_existent_file.message"
+
+- name: include var (FQCN) with raw params
+ ansible.builtin.include_vars: >
+ services/service_vars_fqcn.yml
+
+- name: Verify that FQCN of include_vars works
+ assert:
+ that:
+ - "'my_custom_service' == service_name_fqcn"
+ - "'my_custom_service' == service_name_tmpl_fqcn"
diff --git a/test/integration/targets/include_vars/vars/all/all.yml b/test/integration/targets/include_vars/vars/all/all.yml
new file mode 100644
index 00000000..14c3e92b
--- /dev/null
+++ b/test/integration/targets/include_vars/vars/all/all.yml
@@ -0,0 +1,3 @@
+---
+testing: 123
+base_dir: all
diff --git a/test/integration/targets/include_vars/vars/environments/development/all.yml b/test/integration/targets/include_vars/vars/environments/development/all.yml
new file mode 100644
index 00000000..9f370de5
--- /dev/null
+++ b/test/integration/targets/include_vars/vars/environments/development/all.yml
@@ -0,0 +1,3 @@
+---
+testing: 789
+base_dir: 'environments/development'
diff --git a/test/integration/targets/include_vars/vars/environments/development/services/webapp.yml b/test/integration/targets/include_vars/vars/environments/development/services/webapp.yml
new file mode 100644
index 00000000..a0a809c9
--- /dev/null
+++ b/test/integration/targets/include_vars/vars/environments/development/services/webapp.yml
@@ -0,0 +1,4 @@
+---
+testing: 101112
+base_dir: 'development/services'
+webapp_containers: 20
diff --git a/test/integration/targets/include_vars/vars/services/service_vars.yml b/test/integration/targets/include_vars/vars/services/service_vars.yml
new file mode 100644
index 00000000..96b05d6c
--- /dev/null
+++ b/test/integration/targets/include_vars/vars/services/service_vars.yml
@@ -0,0 +1,2 @@
+---
+service_name: 'my_custom_service' \ No newline at end of file
diff --git a/test/integration/targets/include_vars/vars/services/service_vars_fqcn.yml b/test/integration/targets/include_vars/vars/services/service_vars_fqcn.yml
new file mode 100644
index 00000000..2c04fee5
--- /dev/null
+++ b/test/integration/targets/include_vars/vars/services/service_vars_fqcn.yml
@@ -0,0 +1,3 @@
+---
+service_name_fqcn: 'my_custom_service'
+service_name_tmpl_fqcn: '{{ service_name_fqcn }}' \ No newline at end of file
diff --git a/test/integration/targets/include_vars/vars/services/webapp.yml b/test/integration/targets/include_vars/vars/services/webapp.yml
new file mode 100644
index 00000000..f0dcc8b5
--- /dev/null
+++ b/test/integration/targets/include_vars/vars/services/webapp.yml
@@ -0,0 +1,4 @@
+---
+testing: 456
+base_dir: services
+webapp_containers: 10
diff --git a/test/integration/targets/include_vars/vars/webapp/file_without_extension b/test/integration/targets/include_vars/vars/webapp/file_without_extension
new file mode 100644
index 00000000..9cfb60fb
--- /dev/null
+++ b/test/integration/targets/include_vars/vars/webapp/file_without_extension
@@ -0,0 +1,2 @@
+---
+webapp_version: "1"
diff --git a/test/integration/targets/include_when_parent_is_dynamic/aliases b/test/integration/targets/include_when_parent_is_dynamic/aliases
new file mode 100644
index 00000000..41c99f51
--- /dev/null
+++ b/test/integration/targets/include_when_parent_is_dynamic/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group3
+skip/python2.6 # include is controller only, and we no longer support Python 2.6 on the controller
diff --git a/test/integration/targets/include_when_parent_is_dynamic/playbook.yml b/test/integration/targets/include_when_parent_is_dynamic/playbook.yml
new file mode 100644
index 00000000..afdbc54c
--- /dev/null
+++ b/test/integration/targets/include_when_parent_is_dynamic/playbook.yml
@@ -0,0 +1,4 @@
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - include_tasks: tasks.yml
diff --git a/test/integration/targets/include_when_parent_is_dynamic/runme.sh b/test/integration/targets/include_when_parent_is_dynamic/runme.sh
new file mode 100755
index 00000000..b136965f
--- /dev/null
+++ b/test/integration/targets/include_when_parent_is_dynamic/runme.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+
+set -eu
+
+ansible-playbook playbook.yml "$@" > output.log 2>&1 || true
+
+if grep "task should always execute" output.log >/dev/null; then
+ echo "Test passed (playbook failed with expected output, output not shown)."
+ exit 0
+fi
+
+cat output.log
+exit 1
diff --git a/test/integration/targets/include_when_parent_is_dynamic/syntax_error.yml b/test/integration/targets/include_when_parent_is_dynamic/syntax_error.yml
new file mode 100644
index 00000000..101a18ab
--- /dev/null
+++ b/test/integration/targets/include_when_parent_is_dynamic/syntax_error.yml
@@ -0,0 +1 @@
+intentional syntax error which should NOT be encountered
diff --git a/test/integration/targets/include_when_parent_is_dynamic/tasks.yml b/test/integration/targets/include_when_parent_is_dynamic/tasks.yml
new file mode 100644
index 00000000..6831245c
--- /dev/null
+++ b/test/integration/targets/include_when_parent_is_dynamic/tasks.yml
@@ -0,0 +1,12 @@
+# intentionally stop execution of the play before reaching the include below
+# if the include is dynamic as expected it will not trigger a syntax error
+# however, if the include is static a syntax error will occur
+- name: EXPECTED FAILURE
+ fail:
+ msg:
+ This task should always execute.
+ The playbook would have failed due to a syntax error in 'syntax_error.yml' when attempting a static include of that file.
+
+# perform an include task which should be static if all of the task's parents are static, otherwise it should be dynamic
+# this file was loaded using include_tasks, which is dynamic, so this include should also be dynamic
+- include: syntax_error.yml
diff --git a/test/integration/targets/include_when_parent_is_static/aliases b/test/integration/targets/include_when_parent_is_static/aliases
new file mode 100644
index 00000000..41c99f51
--- /dev/null
+++ b/test/integration/targets/include_when_parent_is_static/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group3
+skip/python2.6 # include is controller only, and we no longer support Python 2.6 on the controller
diff --git a/test/integration/targets/include_when_parent_is_static/playbook.yml b/test/integration/targets/include_when_parent_is_static/playbook.yml
new file mode 100644
index 00000000..6189873e
--- /dev/null
+++ b/test/integration/targets/include_when_parent_is_static/playbook.yml
@@ -0,0 +1,4 @@
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - import_tasks: tasks.yml
diff --git a/test/integration/targets/include_when_parent_is_static/runme.sh b/test/integration/targets/include_when_parent_is_static/runme.sh
new file mode 100755
index 00000000..33728bdf
--- /dev/null
+++ b/test/integration/targets/include_when_parent_is_static/runme.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+
+set -eu
+
+ansible-playbook playbook.yml "$@" > output.log 2>&1 || true
+
+if grep "intentional syntax error" output.log >/dev/null; then
+ echo "Test passed (playbook failed with expected output, output not shown)."
+ exit 0
+fi
+
+cat output.log
+exit 1
diff --git a/test/integration/targets/include_when_parent_is_static/syntax_error.yml b/test/integration/targets/include_when_parent_is_static/syntax_error.yml
new file mode 100644
index 00000000..e1a629ce
--- /dev/null
+++ b/test/integration/targets/include_when_parent_is_static/syntax_error.yml
@@ -0,0 +1 @@
+intentional syntax error which SHOULD be encountered
diff --git a/test/integration/targets/include_when_parent_is_static/tasks.yml b/test/integration/targets/include_when_parent_is_static/tasks.yml
new file mode 100644
index 00000000..a234a3dd
--- /dev/null
+++ b/test/integration/targets/include_when_parent_is_static/tasks.yml
@@ -0,0 +1,12 @@
+# intentionally stop execution of the play before reaching the include below
+# if the include is static as expected it will trigger a syntax error
+# however, if the include is dynamic a syntax error will not occur
+- name: EXPECTED SUCCESS
+ fail:
+ msg:
+ This task should never execute.
+ The playbook should have failed due to a syntax error in 'syntax_error.yml' when attempting a static include of that file.
+
+# perform an include task which should be static if all of the task's parents are static, otherwise it should be dynamic
+# this file was loaded using import_tasks, which is static, so this include should also be static
+- include: syntax_error.yml
diff --git a/test/integration/targets/includes/aliases b/test/integration/targets/includes/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/test/integration/targets/includes/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/test/integration/targets/includes/roles/test_includes/handlers/main.yml b/test/integration/targets/includes/roles/test_includes/handlers/main.yml
new file mode 100644
index 00000000..7d3e625f
--- /dev/null
+++ b/test/integration/targets/includes/roles/test_includes/handlers/main.yml
@@ -0,0 +1 @@
+- include: more_handlers.yml
diff --git a/test/integration/targets/includes/roles/test_includes/handlers/more_handlers.yml b/test/integration/targets/includes/roles/test_includes/handlers/more_handlers.yml
new file mode 100644
index 00000000..c85d53cc
--- /dev/null
+++ b/test/integration/targets/includes/roles/test_includes/handlers/more_handlers.yml
@@ -0,0 +1,12 @@
+- name: included_handler
+ set_fact:
+ ca: 4001
+ cb: 4002
+ cc: 4003
+
+- name: verify_handler
+ assert:
+ that:
+ - "ca == 4001"
+ - "cb == 4002"
+ - "cc == 4003"
diff --git a/test/integration/targets/includes/roles/test_includes/tasks/branch_toplevel.yml b/test/integration/targets/includes/roles/test_includes/tasks/branch_toplevel.yml
new file mode 100644
index 00000000..62416705
--- /dev/null
+++ b/test/integration/targets/includes/roles/test_includes/tasks/branch_toplevel.yml
@@ -0,0 +1,9 @@
+# 'canary2' used instead of 'canary', otherwise a "recursive loop detected in
+# template string" occurs when both includes use static=yes
+- include: 'leaf_sublevel.yml canary2={{ canary }}'
+ static: yes
+ when: 'nested_include_static|bool' # value for 'static' can not be a variable, hence use 'when'
+
+- include: 'leaf_sublevel.yml canary2={{ canary }}'
+ static: no
+ when: 'not nested_include_static|bool'
diff --git a/test/integration/targets/includes/roles/test_includes/tasks/empty.yml b/test/integration/targets/includes/roles/test_includes/tasks/empty.yml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/includes/roles/test_includes/tasks/empty.yml
diff --git a/test/integration/targets/includes/roles/test_includes/tasks/included_task1.yml b/test/integration/targets/includes/roles/test_includes/tasks/included_task1.yml
new file mode 100644
index 00000000..6f4c0480
--- /dev/null
+++ b/test/integration/targets/includes/roles/test_includes/tasks/included_task1.yml
@@ -0,0 +1,9 @@
+- set_fact:
+ ca: "{{ a }}"
+- debug: var=ca
+- set_fact:
+ cb: "{{b}}"
+- debug: var=cb
+- set_fact:
+ cc: "{{ c }}"
+- debug: var=cc
diff --git a/test/integration/targets/includes/roles/test_includes/tasks/leaf_sublevel.yml b/test/integration/targets/includes/roles/test_includes/tasks/leaf_sublevel.yml
new file mode 100644
index 00000000..06632017
--- /dev/null
+++ b/test/integration/targets/includes/roles/test_includes/tasks/leaf_sublevel.yml
@@ -0,0 +1,2 @@
+- set_fact:
+ canary_fact: '{{ canary2 }}'
diff --git a/test/integration/targets/includes/roles/test_includes/tasks/main.yml b/test/integration/targets/includes/roles/test_includes/tasks/main.yml
new file mode 100644
index 00000000..6fcac9eb
--- /dev/null
+++ b/test/integration/targets/includes/roles/test_includes/tasks/main.yml
@@ -0,0 +1,106 @@
+# test code for the ping module
+# (c) 2014, James Cammarata <jcammarata@ansible.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+- include: included_task1.yml a=1 b=2 c=3
+
+- name: verify non-variable include params
+ assert:
+ that:
+ - "ca == '1'"
+ - "cb == '2'"
+ - "cc == '3'"
+
+- set_fact:
+ a: 101
+ b: 102
+ c: 103
+
+- include: included_task1.yml a={{a}} b={{b}} c=103
+
+- name: verify variable include params
+ assert:
+ that:
+ - "ca == 101"
+ - "cb == 102"
+ - "cc == 103"
+
+# Test that strings are not turned into numbers
+- set_fact:
+ a: "101"
+ b: "102"
+ c: "103"
+
+- include: included_task1.yml a={{a}} b={{b}} c=103
+
+- name: verify variable include params
+ assert:
+ that:
+ - "ca == '101'"
+ - "cb == '102'"
+ - "cc == '103'"
+
+# now try long form includes
+
+- include: included_task1.yml
+ vars:
+ a: 201
+ b: 202
+ c: 203
+
+- debug: var=a
+- debug: var=b
+- debug: var=c
+
+- name: verify long-form include params
+ assert:
+ that:
+ - "ca == 201"
+ - "cb == 202"
+ - "cc == 203"
+
+- name: test handlers with includes
+ shell: echo 1
+ notify:
+ # both these via a handler include
+ - included_handler
+ - verify_handler
+
+- include: branch_toplevel.yml canary=value1 nested_include_static=no
+ static: no
+- assert:
+ that:
+ - 'canary_fact == "value1"'
+
+- include: branch_toplevel.yml canary=value2 nested_include_static=yes
+ static: no
+- assert:
+ that:
+ - 'canary_fact == "value2"'
+
+- include: branch_toplevel.yml canary=value3 nested_include_static=no
+ static: yes
+- assert:
+ that:
+ - 'canary_fact == "value3"'
+
+- include: branch_toplevel.yml canary=value4 nested_include_static=yes
+ static: yes
+- assert:
+ that:
+ - 'canary_fact == "value4"'
diff --git a/test/integration/targets/includes/roles/test_includes/tasks/not_a_role_task.yml b/test/integration/targets/includes/roles/test_includes/tasks/not_a_role_task.yml
new file mode 100644
index 00000000..862b051c
--- /dev/null
+++ b/test/integration/targets/includes/roles/test_includes/tasks/not_a_role_task.yml
@@ -0,0 +1,4 @@
+- set_fact:
+ ca: 33000
+ cb: 33001
+ cc: 33002
diff --git a/test/integration/targets/includes/roles/test_includes_free/tasks/inner.yml b/test/integration/targets/includes/roles/test_includes_free/tasks/inner.yml
new file mode 100644
index 00000000..d9c32f4f
--- /dev/null
+++ b/test/integration/targets/includes/roles/test_includes_free/tasks/inner.yml
@@ -0,0 +1,2 @@
+- set_fact:
+ inner: "reached"
diff --git a/test/integration/targets/includes/roles/test_includes_free/tasks/inner_fqcn.yml b/test/integration/targets/includes/roles/test_includes_free/tasks/inner_fqcn.yml
new file mode 100644
index 00000000..5b4ce040
--- /dev/null
+++ b/test/integration/targets/includes/roles/test_includes_free/tasks/inner_fqcn.yml
@@ -0,0 +1,2 @@
+- set_fact:
+ inner_fqcn: "reached"
diff --git a/test/integration/targets/includes/roles/test_includes_free/tasks/main.yml b/test/integration/targets/includes/roles/test_includes_free/tasks/main.yml
new file mode 100644
index 00000000..5ae7882f
--- /dev/null
+++ b/test/integration/targets/includes/roles/test_includes_free/tasks/main.yml
@@ -0,0 +1,9 @@
+- name: this needs to be here
+ debug:
+ msg: "hello"
+- include: inner.yml
+ with_items:
+ - '1'
+- ansible.builtin.include: inner_fqcn.yml
+ with_items:
+ - '1'
diff --git a/test/integration/targets/includes/roles/test_includes_host_pinned/tasks/inner.yml b/test/integration/targets/includes/roles/test_includes_host_pinned/tasks/inner.yml
new file mode 100644
index 00000000..fa4ec93e
--- /dev/null
+++ b/test/integration/targets/includes/roles/test_includes_host_pinned/tasks/inner.yml
@@ -0,0 +1,2 @@
+- set_fact:
+ inner_host_pinned: "reached"
diff --git a/test/integration/targets/includes/roles/test_includes_host_pinned/tasks/main.yml b/test/integration/targets/includes/roles/test_includes_host_pinned/tasks/main.yml
new file mode 100644
index 00000000..7bc19faa
--- /dev/null
+++ b/test/integration/targets/includes/roles/test_includes_host_pinned/tasks/main.yml
@@ -0,0 +1,6 @@
+- name: this needs to be here
+ debug:
+ msg: "hello"
+- include: inner.yml
+ with_items:
+ - '1'
diff --git a/test/integration/targets/includes/runme.sh b/test/integration/targets/includes/runme.sh
new file mode 100755
index 00000000..dff40029
--- /dev/null
+++ b/test/integration/targets/includes/runme.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ansible-playbook test_includes.yml -i ../../inventory "$@"
diff --git a/test/integration/targets/includes/test_include_free.yml b/test/integration/targets/includes/test_include_free.yml
new file mode 100644
index 00000000..dedad734
--- /dev/null
+++ b/test/integration/targets/includes/test_include_free.yml
@@ -0,0 +1,10 @@
+- hosts: testhost
+ gather_facts: no
+ strategy: free
+ roles:
+ - test_includes_free
+ tasks:
+ - assert:
+ that:
+ - "inner == 'reached'"
+ - "inner_fqcn == 'reached'"
diff --git a/test/integration/targets/includes/test_include_host_pinned.yml b/test/integration/targets/includes/test_include_host_pinned.yml
new file mode 100644
index 00000000..6ff92c66
--- /dev/null
+++ b/test/integration/targets/includes/test_include_host_pinned.yml
@@ -0,0 +1,9 @@
+- hosts: testhost
+ gather_facts: no
+ strategy: host_pinned
+ roles:
+ - test_includes_host_pinned
+ tasks:
+ - assert:
+ that:
+ - "inner_host_pinned == 'reached'"
diff --git a/test/integration/targets/includes/test_includes.yml b/test/integration/targets/includes/test_includes.yml
new file mode 100644
index 00000000..0bcebd4f
--- /dev/null
+++ b/test/integration/targets/includes/test_includes.yml
@@ -0,0 +1,7 @@
+- include: test_includes2.yml parameter1=asdf parameter2=jkl
+
+- include: test_includes3.yml
+
+- include: test_include_free.yml
+
+- include: test_include_host_pinned.yml
diff --git a/test/integration/targets/includes/test_includes2.yml b/test/integration/targets/includes/test_includes2.yml
new file mode 100644
index 00000000..a32e8513
--- /dev/null
+++ b/test/integration/targets/includes/test_includes2.yml
@@ -0,0 +1,22 @@
+- name: verify playbook includes can take parameters
+ hosts: testhost
+ tasks:
+ - assert:
+ that:
+ - "parameter1 == 'asdf'"
+ - "parameter2 == 'jkl'"
+
+- name: verify task include logic
+ hosts: testhost
+ gather_facts: True
+ roles:
+ - role: test_includes
+ tags: test_includes
+ tasks:
+ - include: roles/test_includes/tasks/not_a_role_task.yml
+ - include: roles/test_includes/tasks/empty.yml
+ - assert:
+ that:
+ - "ca == 33000"
+ - "cb == 33001"
+ - "cc == 33002"
diff --git a/test/integration/targets/includes/test_includes3.yml b/test/integration/targets/includes/test_includes3.yml
new file mode 100644
index 00000000..0b4c6312
--- /dev/null
+++ b/test/integration/targets/includes/test_includes3.yml
@@ -0,0 +1,6 @@
+- hosts: testhost
+ tasks:
+ - include: test_includes4.yml
+ with_items: ["a"]
+ loop_control:
+ loop_var: r
diff --git a/test/integration/targets/includes/test_includes4.yml b/test/integration/targets/includes/test_includes4.yml
new file mode 100644
index 00000000..bee906bd
--- /dev/null
+++ b/test/integration/targets/includes/test_includes4.yml
@@ -0,0 +1,2 @@
+- set_fact:
+ p: 1
diff --git a/test/integration/targets/includes_race/aliases b/test/integration/targets/includes_race/aliases
new file mode 100644
index 00000000..fff62d9f
--- /dev/null
+++ b/test/integration/targets/includes_race/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group5
+skip/aix
diff --git a/test/integration/targets/includes_race/inventory b/test/integration/targets/includes_race/inventory
new file mode 100644
index 00000000..87879294
--- /dev/null
+++ b/test/integration/targets/includes_race/inventory
@@ -0,0 +1,30 @@
+host001 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host002 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host003 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host004 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host005 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host006 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host007 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host008 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host009 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host010 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host011 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host012 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host013 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host014 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host015 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host016 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host017 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host018 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host019 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host020 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host021 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host022 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host023 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host024 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host025 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host026 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host027 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host028 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host029 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+host030 ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/test/integration/targets/includes_race/roles/random_sleep/tasks/main.yml b/test/integration/targets/includes_race/roles/random_sleep/tasks/main.yml
new file mode 100644
index 00000000..cee459a2
--- /dev/null
+++ b/test/integration/targets/includes_race/roles/random_sleep/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+# tasks file for random_sleep
+- name: Generate sleep time
+ set_fact:
+ sleep_time: "{{ 3 | random }}"
+
+- name: Do random sleep
+ shell: sleep "{{ sleep_time }}"
diff --git a/test/integration/targets/includes_race/roles/set_a_fact/tasks/fact1.yml b/test/integration/targets/includes_race/roles/set_a_fact/tasks/fact1.yml
new file mode 100644
index 00000000..36b08dcb
--- /dev/null
+++ b/test/integration/targets/includes_race/roles/set_a_fact/tasks/fact1.yml
@@ -0,0 +1,4 @@
+---
+- name: Set fact1
+ set_fact:
+ fact1: yay
diff --git a/test/integration/targets/includes_race/roles/set_a_fact/tasks/fact2.yml b/test/integration/targets/includes_race/roles/set_a_fact/tasks/fact2.yml
new file mode 100644
index 00000000..865f130d
--- /dev/null
+++ b/test/integration/targets/includes_race/roles/set_a_fact/tasks/fact2.yml
@@ -0,0 +1,4 @@
+---
+- name: Set fact2
+ set_fact:
+ fact2: yay
diff --git a/test/integration/targets/includes_race/runme.sh b/test/integration/targets/includes_race/runme.sh
new file mode 100755
index 00000000..2261d271
--- /dev/null
+++ b/test/integration/targets/includes_race/runme.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ansible-playbook test_includes_race.yml -i inventory -v "$@"
diff --git a/test/integration/targets/includes_race/test_includes_race.yml b/test/integration/targets/includes_race/test_includes_race.yml
new file mode 100644
index 00000000..20f7dddd
--- /dev/null
+++ b/test/integration/targets/includes_race/test_includes_race.yml
@@ -0,0 +1,19 @@
+- hosts: all
+ strategy: free
+ gather_facts: false
+ tasks:
+ - include_role:
+ name: random_sleep
+ - block:
+ - name: set a fact (1)
+ include_role:
+ name: set_a_fact
+ tasks_from: fact1.yml
+ - name: set a fact (2)
+ include_role:
+ name: set_a_fact
+ tasks_from: fact2.yml
+ - name: include didn't run
+ fail:
+ msg: "set_a_fact didn't run fact1 {{ fact1 | default('not defined')}} fact2: {{ fact2 | default('not defined') }}"
+ when: (fact1 is not defined or fact2 is not defined)
diff --git a/test/integration/targets/infra/aliases b/test/integration/targets/infra/aliases
new file mode 100644
index 00000000..887d7029
--- /dev/null
+++ b/test/integration/targets/infra/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group3
+needs/file/hacking/test-module.py
+needs/file/lib/ansible/modules/ping.py
diff --git a/test/integration/targets/infra/inventory.local b/test/integration/targets/infra/inventory.local
new file mode 100644
index 00000000..2baa1f88
--- /dev/null
+++ b/test/integration/targets/infra/inventory.local
@@ -0,0 +1,2 @@
+testhost ansible_connection=local
+
diff --git a/test/integration/targets/infra/library/test.py b/test/integration/targets/infra/library/test.py
new file mode 100644
index 00000000..93860575
--- /dev/null
+++ b/test/integration/targets/infra/library/test.py
@@ -0,0 +1,21 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(),
+ )
+ result = {
+ 'selinux_special_fs': module._selinux_special_fs,
+ 'tmpdir': module._tmpdir,
+ 'keep_remote_files': module._keep_remote_files,
+ 'version': module.ansible_version,
+ }
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/infra/runme.sh b/test/integration/targets/infra/runme.sh
new file mode 100755
index 00000000..c4d84572
--- /dev/null
+++ b/test/integration/targets/infra/runme.sh
@@ -0,0 +1,39 @@
+#!/usr/bin/env bash
+
+set -ux
+
+# ensure fail/assert work locally and can stop execution with non-zero exit code
+PB_OUT=$(ansible-playbook -i inventory.local test_test_infra.yml)
+APB_RC=$?
+echo "$PB_OUT"
+echo "rc was $APB_RC (must be non-zero)"
+[ ${APB_RC} -ne 0 ]
+echo "ensure playbook output shows assert/fail works (True)"
+echo "$PB_OUT" | grep -F "fail works (True)" || exit 1
+echo "$PB_OUT" | grep -F "assert works (True)" || exit 1
+
+# ensure we work using all specified test args, overridden inventory, etc
+PB_OUT=$(ansible-playbook -i ../../inventory test_test_infra.yml "$@")
+APB_RC=$?
+echo "$PB_OUT"
+echo "rc was $APB_RC (must be non-zero)"
+[ ${APB_RC} -ne 0 ]
+echo "ensure playbook output shows assert/fail works (True)"
+echo "$PB_OUT" | grep -F "fail works (True)" || exit 1
+echo "$PB_OUT" | grep -F "assert works (True)" || exit 1
+
+set -e
+
+PING_MODULE_PATH="../../../../lib/ansible/modules/ping.py"
+
+# ensure test-module.py script works without passing Python interpreter path
+../../../../hacking/test-module.py -m "$PING_MODULE_PATH"
+
+# ensure test-module.py script works well
+../../../../hacking/test-module.py -m "$PING_MODULE_PATH" -I ansible_python_interpreter="$(which python)"
+
+# ensure module.ansible_version is defined when using test-module.py
+../../../../hacking/test-module.py -m library/test.py -I ansible_python_interpreter="$(which python)" <<< '{"ANSIBLE_MODULE_ARGS": {}}'
+
+# ensure exercising module code locally works
+python -m ansible.modules.file <<< '{"ANSIBLE_MODULE_ARGS": {"path": "/path/to/file", "state": "absent"}}'
diff --git a/test/integration/targets/infra/test_test_infra.yml b/test/integration/targets/infra/test_test_infra.yml
new file mode 100644
index 00000000..706f9b8f
--- /dev/null
+++ b/test/integration/targets/infra/test_test_infra.yml
@@ -0,0 +1,25 @@
+- hosts: testhost
+ gather_facts: no
+ tags:
+ - always
+ tasks:
+ - name: ensure fail action produces a failing result
+ fail:
+ ignore_errors: yes
+ register: fail_out
+
+ - debug:
+ msg: fail works ({{ fail_out.failed }})
+
+ - name: ensure assert produces a failing result
+ assert:
+ that: false
+ ignore_errors: yes
+ register: assert_out
+
+ - debug:
+ msg: assert works ({{ assert_out.failed }})
+
+ - name: EXPECTED FAILURE ensure fail action stops execution
+ fail:
+ msg: fail actually failed (this is expected)
diff --git a/test/integration/targets/interpreter_discovery_python/aliases b/test/integration/targets/interpreter_discovery_python/aliases
new file mode 100644
index 00000000..740ed1a5
--- /dev/null
+++ b/test/integration/targets/interpreter_discovery_python/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group1
+non_local # workaround to allow override of ansible_python_interpreter; disables coverage on this integration target
diff --git a/test/integration/targets/interpreter_discovery_python/library/test_echo_module.py b/test/integration/targets/interpreter_discovery_python/library/test_echo_module.py
new file mode 100644
index 00000000..73179211
--- /dev/null
+++ b/test/integration/targets/interpreter_discovery_python/library/test_echo_module.py
@@ -0,0 +1,29 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import sys
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ result = dict(changed=False)
+
+ module = AnsibleModule(argument_spec=dict(
+ facts=dict(type=dict, default={})
+ ))
+
+ result['ansible_facts'] = module.params['facts']
+ result['running_python_interpreter'] = sys.executable
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/interpreter_discovery_python/tasks/main.yml b/test/integration/targets/interpreter_discovery_python/tasks/main.yml
new file mode 100644
index 00000000..be15186f
--- /dev/null
+++ b/test/integration/targets/interpreter_discovery_python/tasks/main.yml
@@ -0,0 +1,177 @@
+- name: ensure we can override ansible_python_interpreter
+ vars:
+ ansible_python_interpreter: overriddenpython
+ assert:
+ that:
+ - ansible_python_interpreter == 'overriddenpython'
+ fail_msg: "'ansible_python_interpreter' appears to be set at a high precedence to {{ ansible_python_interpreter }},
+ which breaks this test."
+
+- name: snag some facts to validate for later
+ set_fact:
+ distro: '{{ ansible_distribution | default("unknown") | lower }}'
+ distro_version: '{{ ansible_distribution_version | default("unknown") }}'
+ os_family: '{{ ansible_os_family | default("unknown") }}'
+
+- name: test that python discovery is working and that fact persistence makes it only run once
+ block:
+ - name: clear facts to force interpreter discovery to run
+ meta: clear_facts
+
+ - name: trigger discovery with auto
+ vars:
+ ansible_python_interpreter: auto
+ ping:
+ register: auto_out
+
+ - name: get the interpreter being used on the target to execute modules
+ vars:
+ # keep this set so we can verify we didn't repeat discovery
+ ansible_python_interpreter: auto
+ test_echo_module:
+ register: echoout
+
+ - name: clear facts to force interpreter discovery to run again
+ meta: clear_facts
+
+ - name: get the interpreter being used on the target to execute modules with ansible_facts
+ vars:
+ # keep this set so we can verify we didn't repeat discovery
+ ansible_python_interpreter: auto
+ test_echo_module:
+ facts:
+ sandwich: ham
+ register: echoout_with_facts
+
+ - when: distro == 'macosx'
+ block:
+ - name: Get the sys.executable for the macos discovered interpreter, as it may be different than the actual path
+ raw: '{{ auto_out.ansible_facts.discovered_interpreter_python }} -c "import sys; print(sys.executable)"'
+ register: discovered_sys_executable
+
+ - set_fact:
+ normalized_discovered_interpreter: '{{ discovered_sys_executable.stdout_lines[0] }}'
+
+ - set_fact:
+ normalized_discovered_interpreter: '{{ auto_out.ansible_facts.discovered_interpreter_python }}'
+ when: distro != 'macosx'
+
+ - assert:
+ that:
+ - auto_out.ansible_facts.discovered_interpreter_python is defined
+ - echoout.running_python_interpreter == normalized_discovered_interpreter
+ # verify that discovery didn't run again (if it did, we'd have the fact in the result)
+ - echoout.ansible_facts is not defined or echoout.ansible_facts.discovered_interpreter_python is not defined
+ - echoout_with_facts.ansible_facts is defined
+ - echoout_with_facts.running_python_interpreter == normalized_discovered_interpreter
+
+- name: test that auto_legacy gives a dep warning when /usr/bin/python present but != auto result
+ block:
+ - name: clear facts to force interpreter discovery to run
+ meta: clear_facts
+
+ - name: trigger discovery with auto_legacy
+ vars:
+ ansible_python_interpreter: auto_legacy
+ ping:
+ register: legacy
+
+ - name: check for dep warning (only on platforms where auto result is not /usr/bin/python and legacy is)
+ assert:
+ that:
+ - legacy.deprecations | default([]) | length > 0
+ # only check for a dep warning if legacy returned /usr/bin/python and auto didn't
+ when: legacy.ansible_facts.discovered_interpreter_python == '/usr/bin/python' and
+ auto_out.ansible_facts.discovered_interpreter_python != '/usr/bin/python'
+
+
+- name: test that auto_silent never warns and got the same answer as auto
+ block:
+ - name: clear facts to force interpreter discovery to run
+ meta: clear_facts
+
+ - name: initial task to trigger discovery
+ vars:
+ ansible_python_interpreter: auto_silent
+ ping:
+ register: auto_silent_out
+
+ - assert:
+ that:
+ - auto_silent_out.warnings is not defined
+ - auto_silent_out.ansible_facts.discovered_interpreter_python == auto_out.ansible_facts.discovered_interpreter_python
+
+
+- name: test that auto_legacy_silent never warns and got the same answer as auto_legacy
+ block:
+ - name: clear facts to force interpreter discovery to run
+ meta: clear_facts
+
+ - name: trigger discovery with auto_legacy_silent
+ vars:
+ ansible_python_interpreter: auto_legacy_silent
+ ping:
+ register: legacy_silent
+
+ - assert:
+ that:
+ - legacy_silent.warnings is not defined
+ - legacy_silent.ansible_facts.discovered_interpreter_python == legacy.ansible_facts.discovered_interpreter_python
+
+- name: ensure modules can't set discovered_interpreter_X or ansible_X_interpreter
+ block:
+ - test_echo_module:
+ facts:
+ ansible_discovered_interpreter_bogus: from module
+ discovered_interpreter_bogus: from_module
+ ansible_bogus_interpreter: from_module
+ test_fact: from_module
+ register: echoout
+
+ - assert:
+ that:
+ - test_fact == 'from_module'
+ - discovered_interpreter_bogus | default('nope') == 'nope'
+ - ansible_bogus_interpreter | default('nope') == 'nope'
+ # this one will exist in facts, but with its prefix removed
+ - ansible_facts['ansible_bogus_interpreter'] | default('nope') == 'nope'
+ - ansible_facts['discovered_interpreter_bogus'] | default('nope') == 'nope'
+
+ - name: debian assertions
+ assert:
+ that:
+ - auto_out.ansible_facts.discovered_interpreter_python == '/usr/bin/python3'
+ when: distro == 'debian' and distro_version is version('10', '>=')
+
+ - name: fedora assertions
+ assert:
+ that:
+ - auto_out.ansible_facts.discovered_interpreter_python == '/usr/bin/python3'
+ when: distro == 'fedora' and distro_version is version('23', '>=')
+
+ - name: rhel assertions
+ assert:
+ that:
+ # rhel 6/7
+ - (auto_out.ansible_facts.discovered_interpreter_python == '/usr/bin/python' and distro_version is version('8','<')) or distro_version is version('8','>=')
+ # rhel 8+
+ - (auto_out.ansible_facts.discovered_interpreter_python == '/usr/libexec/platform-python' and distro_version is version('8','>=')) or distro_version is version('8','<')
+ when: distro == 'redhat'
+
+ - name: ubuntu assertions
+ assert:
+ that:
+ # ubuntu < 16
+ - (auto_out.ansible_facts.discovered_interpreter_python == '/usr/bin/python' and distro_version is version('16.04','<')) or distro_version is version('16.04','>=')
+ # ubuntu >= 16
+ - (auto_out.ansible_facts.discovered_interpreter_python == '/usr/bin/python3' and distro_version is version('16.04','>=')) or distro_version is version('16.04','<')
+ when: distro == 'ubuntu'
+
+ - name: mac assertions
+ assert:
+ that:
+ - auto_out.ansible_facts.discovered_interpreter_python == '/usr/bin/python'
+ when: os_family == 'darwin'
+
+ always:
+ - meta: clear_facts
diff --git a/test/integration/targets/interpreter_discovery_python_delegate_facts/aliases b/test/integration/targets/interpreter_discovery_python_delegate_facts/aliases
new file mode 100644
index 00000000..dc9ac468
--- /dev/null
+++ b/test/integration/targets/interpreter_discovery_python_delegate_facts/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group1
+non_local # this test requires interpreter discovery, which means code coverage must be disabled
diff --git a/test/integration/targets/interpreter_discovery_python_delegate_facts/delegate_facts.yml b/test/integration/targets/interpreter_discovery_python_delegate_facts/delegate_facts.yml
new file mode 100644
index 00000000..535269d1
--- /dev/null
+++ b/test/integration/targets/interpreter_discovery_python_delegate_facts/delegate_facts.yml
@@ -0,0 +1,10 @@
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Test python interpreter discovery with delegate_to without delegate_facts
+ ping:
+ delegate_to: testhost
+ - name: Test python interpreter discovery with delegate_to with delegate_facts
+ ping:
+ delegate_to: testhost
+ delegate_facts: yes
diff --git a/test/integration/targets/interpreter_discovery_python_delegate_facts/inventory b/test/integration/targets/interpreter_discovery_python_delegate_facts/inventory
new file mode 100644
index 00000000..350f3e89
--- /dev/null
+++ b/test/integration/targets/interpreter_discovery_python_delegate_facts/inventory
@@ -0,0 +1,2 @@
+[local]
+testhost ansible_connection=local ansible_python_interpreter=auto # interpreter discovery required
diff --git a/test/integration/targets/interpreter_discovery_python_delegate_facts/runme.sh b/test/integration/targets/interpreter_discovery_python_delegate_facts/runme.sh
new file mode 100755
index 00000000..ca2caa1c
--- /dev/null
+++ b/test/integration/targets/interpreter_discovery_python_delegate_facts/runme.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ansible-playbook delegate_facts.yml -i inventory "$@"
diff --git a/test/integration/targets/inventory/aliases b/test/integration/targets/inventory/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/test/integration/targets/inventory/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/test/integration/targets/inventory/playbook.yml b/test/integration/targets/inventory/playbook.yml
new file mode 100644
index 00000000..5e073614
--- /dev/null
+++ b/test/integration/targets/inventory/playbook.yml
@@ -0,0 +1,4 @@
+- hosts: all
+ gather_facts: false
+ tasks:
+ - ping:
diff --git a/test/integration/targets/inventory/runme.sh b/test/integration/targets/inventory/runme.sh
new file mode 100755
index 00000000..3cd533cd
--- /dev/null
+++ b/test/integration/targets/inventory/runme.sh
@@ -0,0 +1,36 @@
+#!/usr/bin/env bash
+
+set -x
+
+empty_limit_file="/tmp/limit_file"
+touch "${empty_limit_file}"
+
+cleanup() {
+ if [[ -f "${empty_limit_file}" ]]; then
+ rm -rf "${empty_limit_file}"
+ fi
+}
+
+trap 'cleanup' EXIT
+
+# https://github.com/ansible/ansible/issues/52152
+# Ensure that non-matching limit causes failure with rc 1
+ansible-playbook -i ../../inventory --limit foo playbook.yml
+if [ "$?" != "1" ]; then
+ echo "Non-matching limit should cause failure"
+ exit 1
+fi
+
+# Ensure that non-existing limit file causes failure with rc 1
+ansible-playbook -i ../../inventory --limit @foo playbook.yml
+if [ "$?" != "1" ]; then
+ echo "Non-existing limit file should cause failure"
+ exit 1
+fi
+
+# Ensure that non-matching limit causes failure with rc 1
+ansible-playbook -i ../../inventory --limit @"${empty_limit_file}" playbook.yml
+
+ansible-playbook -i ../../inventory "$@" strategy.yml
+ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS=always ansible-playbook -i ../../inventory "$@" strategy.yml
+ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS=never ansible-playbook -i ../../inventory "$@" strategy.yml
diff --git a/test/integration/targets/inventory/strategy.yml b/test/integration/targets/inventory/strategy.yml
new file mode 100644
index 00000000..5c1cbd2b
--- /dev/null
+++ b/test/integration/targets/inventory/strategy.yml
@@ -0,0 +1,12 @@
+- name: Check that 'invalid' group works, problem exposed in #58980
+ hosts: localhost
+ tasks:
+ - name: add a host to a group, that has - to trigger substitution
+ add_host:
+ name: localhost
+ groups: Not-Working
+
+ - name: group hosts by distribution, with dash to trigger substitution
+ group_by:
+ key: "{{ ansible_distribution }}-{{ ansible_distribution_version }}"
+ changed_when: false
diff --git a/test/integration/targets/inventory_ini/aliases b/test/integration/targets/inventory_ini/aliases
new file mode 100644
index 00000000..70a7b7a9
--- /dev/null
+++ b/test/integration/targets/inventory_ini/aliases
@@ -0,0 +1 @@
+shippable/posix/group5
diff --git a/test/integration/targets/inventory_ini/inventory.ini b/test/integration/targets/inventory_ini/inventory.ini
new file mode 100644
index 00000000..a0c99ade
--- /dev/null
+++ b/test/integration/targets/inventory_ini/inventory.ini
@@ -0,0 +1,5 @@
+[local]
+testhost ansible_connection=local ansible_become=no ansible_become_user=ansibletest1
+
+[all:vars]
+ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/test/integration/targets/inventory_ini/runme.sh b/test/integration/targets/inventory_ini/runme.sh
new file mode 100755
index 00000000..81bf1475
--- /dev/null
+++ b/test/integration/targets/inventory_ini/runme.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ansible-playbook -v -i inventory.ini test_ansible_become.yml
diff --git a/test/integration/targets/inventory_ini/test_ansible_become.yml b/test/integration/targets/inventory_ini/test_ansible_become.yml
new file mode 100644
index 00000000..55bbe7da
--- /dev/null
+++ b/test/integration/targets/inventory_ini/test_ansible_become.yml
@@ -0,0 +1,11 @@
+- hosts: testhost
+ gather_facts: no
+ tasks:
+ - name: Test proper bool evaluation of ansible_become (issue #70476)
+ shell: whoami
+ register: output
+
+ - name: Assert we are NOT the become user specified
+ assert:
+ that:
+ - "output.stdout != 'ansibletest1'"
diff --git a/test/integration/targets/inventory_script/aliases b/test/integration/targets/inventory_script/aliases
new file mode 100644
index 00000000..a6dafcf8
--- /dev/null
+++ b/test/integration/targets/inventory_script/aliases
@@ -0,0 +1 @@
+shippable/posix/group1
diff --git a/test/integration/targets/inventory_script/inventory.json b/test/integration/targets/inventory_script/inventory.json
new file mode 100644
index 00000000..5046a9a8
--- /dev/null
+++ b/test/integration/targets/inventory_script/inventory.json
@@ -0,0 +1,1045 @@
+{
+ "None": {
+ "hosts": [
+ "DC0_C0_RP0_VM0_cd0681bf-2f18-5c00-9b9b-8197c0095348",
+ "DC0_C0_RP0_VM1_f7c371d6-2003-5a48-9859-3bc9a8b08908",
+ "DC0_H0_VM0_265104de-1472-547c-b873-6dc7883fb6cb",
+ "DC0_H0_VM1_39365506-5a0a-5fd0-be10-9586ad53aaad"
+ ]
+ },
+ "_meta": {
+ "hostvars": {
+ "DC0_C0_RP0_VM0_cd0681bf-2f18-5c00-9b9b-8197c0095348": {
+ "alarmactionsenabled": null,
+ "ansible_host": "None",
+ "ansible_ssh_host": "None",
+ "ansible_uuid": "239fb366-6d93-430e-939a-0b6ab272d98f",
+ "availablefield": [],
+ "capability": {
+ "bootoptionssupported": false,
+ "bootretryoptionssupported": false,
+ "changetrackingsupported": false,
+ "consolepreferencessupported": false,
+ "cpufeaturemasksupported": false,
+ "disablesnapshotssupported": false,
+ "diskonlysnapshotonsuspendedvmsupported": null,
+ "disksharessupported": false,
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "featurerequirementsupported": false,
+ "guestautolocksupported": false,
+ "hostbasedreplicationsupported": false,
+ "locksnapshotssupported": false,
+ "memoryreservationlocksupported": false,
+ "memorysnapshotssupported": false,
+ "multiplecorespersocketsupported": false,
+ "multiplesnapshotssupported": false,
+ "nestedhvsupported": false,
+ "npivwwnonnonrdmvmsupported": false,
+ "pervmevcsupported": null,
+ "poweredoffsnapshotssupported": false,
+ "poweredonmonitortypechangesupported": false,
+ "quiescedsnapshotssupported": false,
+ "recordreplaysupported": false,
+ "reverttosnapshotsupported": false,
+ "s1acpimanagementsupported": false,
+ "securebootsupported": null,
+ "sesparsedisksupported": false,
+ "settingdisplaytopologysupported": false,
+ "settingscreenresolutionsupported": false,
+ "settingvideoramsizesupported": false,
+ "snapshotconfigsupported": false,
+ "snapshotoperationssupported": false,
+ "swapplacementsupported": false,
+ "toolsautoupdatesupported": false,
+ "toolssynctimesupported": false,
+ "virtualexecusageignored": null,
+ "virtualmmuusageignored": null,
+ "virtualmmuusagesupported": false,
+ "vmnpivwwndisablesupported": false,
+ "vmnpivwwnsupported": false,
+ "vmnpivwwnupdatesupported": false,
+ "vpmcsupported": false
+ },
+ "config": {
+ "alternateguestname": "",
+ "annotation": null,
+ "bootoptions": null,
+ "changetrackingenabled": null,
+ "changeversion": "",
+ "consolepreferences": null,
+ "contentlibiteminfo": null,
+ "cpuaffinity": null,
+ "cpuallocation": {},
+ "cpufeaturemask": [],
+ "cpuhotaddenabled": null,
+ "cpuhotremoveenabled": null,
+ "createdate": null,
+ "datastoreurl": [],
+ "defaultpowerops": {},
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "extraconfig": [],
+ "files": {},
+ "firmware": null,
+ "flags": {},
+ "forkconfiginfo": null,
+ "ftinfo": null,
+ "guestautolockenabled": null,
+ "guestfullname": "otherGuest",
+ "guestid": "otherGuest",
+ "guestintegrityinfo": null,
+ "guestmonitoringmodeinfo": null,
+ "hardware": {},
+ "hotplugmemoryincrementsize": null,
+ "hotplugmemorylimit": null,
+ "initialoverhead": null,
+ "instanceuuid": "bfff331f-7f07-572d-951e-edd3701dc061",
+ "keyid": null,
+ "latencysensitivity": null,
+ "locationid": null,
+ "managedby": null,
+ "maxmksconnections": null,
+ "memoryaffinity": null,
+ "memoryallocation": {},
+ "memoryhotaddenabled": null,
+ "memoryreservationlockedtomax": null,
+ "messagebustunnelenabled": null,
+ "migrateencryption": null,
+ "modified": {},
+ "name": "DC0_C0_RP0_VM0",
+ "nestedhvenabled": null,
+ "networkshaper": null,
+ "npivdesirednodewwns": null,
+ "npivdesiredportwwns": null,
+ "npivnodeworldwidename": [],
+ "npivonnonrdmdisks": null,
+ "npivportworldwidename": [],
+ "npivtemporarydisabled": null,
+ "npivworldwidenametype": null,
+ "repconfig": null,
+ "scheduledhardwareupgradeinfo": null,
+ "sgxinfo": null,
+ "swapplacement": null,
+ "swapstorageobjectid": null,
+ "template": false,
+ "tools": {},
+ "uuid": "cd0681bf-2f18-5c00-9b9b-8197c0095348",
+ "vappconfig": null,
+ "vassertsenabled": null,
+ "vcpuconfig": [],
+ "version": "vmx-13",
+ "vflashcachereservation": null,
+ "vmstorageobjectid": null,
+ "vmxconfigchecksum": null,
+ "vpmcenabled": null
+ },
+ "configissue": [],
+ "configstatus": "green",
+ "customvalue": [],
+ "datastore": [
+ {
+ "_moId": "/tmp/govcsim-DC0-LocalDS_0-949174843@folder-5",
+ "name": "LocalDS_0"
+ }
+ ],
+ "effectiverole": [
+ -1
+ ],
+ "guest": {
+ "appheartbeatstatus": null,
+ "appstate": null,
+ "disk": [],
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "generationinfo": [],
+ "guestfamily": null,
+ "guestfullname": null,
+ "guestid": null,
+ "guestkernelcrashed": null,
+ "guestoperationsready": null,
+ "gueststate": "",
+ "gueststatechangesupported": null,
+ "hostname": null,
+ "hwversion": null,
+ "interactiveguestoperationsready": null,
+ "ipaddress": null,
+ "ipstack": [],
+ "net": [],
+ "screen": null,
+ "toolsinstalltype": null,
+ "toolsrunningstatus": "guestToolsNotRunning",
+ "toolsstatus": "toolsNotInstalled",
+ "toolsversion": "0",
+ "toolsversionstatus": null,
+ "toolsversionstatus2": null
+ },
+ "guestheartbeatstatus": null,
+ "layout": {
+ "configfile": [],
+ "disk": [],
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "logfile": [],
+ "snapshot": [],
+ "swapfile": null
+ },
+ "layoutex": {
+ "disk": [],
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "file": [],
+ "snapshot": [],
+ "timestamp": {}
+ },
+ "name": "DC0_C0_RP0_VM0",
+ "network": [],
+ "overallstatus": "green",
+ "parentvapp": null,
+ "permission": [],
+ "recenttask": [],
+ "resourcepool": {
+ "_moId": "resgroup-26",
+ "name": "Resources"
+ },
+ "rootsnapshot": [],
+ "runtime": {
+ "boottime": null,
+ "cleanpoweroff": null,
+ "connectionstate": "connected",
+ "consolidationneeded": false,
+ "cryptostate": null,
+ "dasvmprotection": null,
+ "device": [],
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "faulttolerancestate": null,
+ "featuremask": [],
+ "featurerequirement": [],
+ "host": {
+ "_moId": "host-47",
+ "name": "DC0_C0_H2"
+ },
+ "instantclonefrozen": null,
+ "maxcpuusage": null,
+ "maxmemoryusage": null,
+ "memoryoverhead": null,
+ "minrequiredevcmodekey": null,
+ "needsecondaryreason": null,
+ "nummksconnections": 0,
+ "offlinefeaturerequirement": [],
+ "onlinestandby": false,
+ "paused": null,
+ "powerstate": "poweredOn",
+ "question": null,
+ "quiescedforkparent": null,
+ "recordreplaystate": null,
+ "snapshotinbackground": null,
+ "suspendinterval": null,
+ "suspendtime": null,
+ "toolsinstallermounted": false,
+ "vflashcacheallocation": null
+ },
+ "snapshot": null,
+ "storage": {
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "perdatastoreusage": [],
+ "timestamp": {}
+ },
+ "summary": {
+ "config": {},
+ "customvalue": [],
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "guest": {},
+ "overallstatus": "green",
+ "quickstats": {},
+ "runtime": {},
+ "storage": {},
+ "vm": {}
+ },
+ "tag": [],
+ "triggeredalarmstate": [],
+ "value": []
+ },
+ "DC0_C0_RP0_VM1_f7c371d6-2003-5a48-9859-3bc9a8b08908": {
+ "alarmactionsenabled": null,
+ "ansible_host": "None",
+ "ansible_ssh_host": "None",
+ "ansible_uuid": "64b6ca93-f35f-4749-abeb-fc1fabae6c79",
+ "availablefield": [],
+ "capability": {
+ "bootoptionssupported": false,
+ "bootretryoptionssupported": false,
+ "changetrackingsupported": false,
+ "consolepreferencessupported": false,
+ "cpufeaturemasksupported": false,
+ "disablesnapshotssupported": false,
+ "diskonlysnapshotonsuspendedvmsupported": null,
+ "disksharessupported": false,
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "featurerequirementsupported": false,
+ "guestautolocksupported": false,
+ "hostbasedreplicationsupported": false,
+ "locksnapshotssupported": false,
+ "memoryreservationlocksupported": false,
+ "memorysnapshotssupported": false,
+ "multiplecorespersocketsupported": false,
+ "multiplesnapshotssupported": false,
+ "nestedhvsupported": false,
+ "npivwwnonnonrdmvmsupported": false,
+ "pervmevcsupported": null,
+ "poweredoffsnapshotssupported": false,
+ "poweredonmonitortypechangesupported": false,
+ "quiescedsnapshotssupported": false,
+ "recordreplaysupported": false,
+ "reverttosnapshotsupported": false,
+ "s1acpimanagementsupported": false,
+ "securebootsupported": null,
+ "sesparsedisksupported": false,
+ "settingdisplaytopologysupported": false,
+ "settingscreenresolutionsupported": false,
+ "settingvideoramsizesupported": false,
+ "snapshotconfigsupported": false,
+ "snapshotoperationssupported": false,
+ "swapplacementsupported": false,
+ "toolsautoupdatesupported": false,
+ "toolssynctimesupported": false,
+ "virtualexecusageignored": null,
+ "virtualmmuusageignored": null,
+ "virtualmmuusagesupported": false,
+ "vmnpivwwndisablesupported": false,
+ "vmnpivwwnsupported": false,
+ "vmnpivwwnupdatesupported": false,
+ "vpmcsupported": false
+ },
+ "config": {
+ "alternateguestname": "",
+ "annotation": null,
+ "bootoptions": null,
+ "changetrackingenabled": null,
+ "changeversion": "",
+ "consolepreferences": null,
+ "contentlibiteminfo": null,
+ "cpuaffinity": null,
+ "cpuallocation": {},
+ "cpufeaturemask": [],
+ "cpuhotaddenabled": null,
+ "cpuhotremoveenabled": null,
+ "createdate": null,
+ "datastoreurl": [],
+ "defaultpowerops": {},
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "extraconfig": [],
+ "files": {},
+ "firmware": null,
+ "flags": {},
+ "forkconfiginfo": null,
+ "ftinfo": null,
+ "guestautolockenabled": null,
+ "guestfullname": "otherGuest",
+ "guestid": "otherGuest",
+ "guestintegrityinfo": null,
+ "guestmonitoringmodeinfo": null,
+ "hardware": {},
+ "hotplugmemoryincrementsize": null,
+ "hotplugmemorylimit": null,
+ "initialoverhead": null,
+ "instanceuuid": "6132d223-1566-5921-bc3b-df91ece09a4d",
+ "keyid": null,
+ "latencysensitivity": null,
+ "locationid": null,
+ "managedby": null,
+ "maxmksconnections": null,
+ "memoryaffinity": null,
+ "memoryallocation": {},
+ "memoryhotaddenabled": null,
+ "memoryreservationlockedtomax": null,
+ "messagebustunnelenabled": null,
+ "migrateencryption": null,
+ "modified": {},
+ "name": "DC0_C0_RP0_VM1",
+ "nestedhvenabled": null,
+ "networkshaper": null,
+ "npivdesirednodewwns": null,
+ "npivdesiredportwwns": null,
+ "npivnodeworldwidename": [],
+ "npivonnonrdmdisks": null,
+ "npivportworldwidename": [],
+ "npivtemporarydisabled": null,
+ "npivworldwidenametype": null,
+ "repconfig": null,
+ "scheduledhardwareupgradeinfo": null,
+ "sgxinfo": null,
+ "swapplacement": null,
+ "swapstorageobjectid": null,
+ "template": false,
+ "tools": {},
+ "uuid": "f7c371d6-2003-5a48-9859-3bc9a8b08908",
+ "vappconfig": null,
+ "vassertsenabled": null,
+ "vcpuconfig": [],
+ "version": "vmx-13",
+ "vflashcachereservation": null,
+ "vmstorageobjectid": null,
+ "vmxconfigchecksum": null,
+ "vpmcenabled": null
+ },
+ "configissue": [],
+ "configstatus": "green",
+ "customvalue": [],
+ "datastore": [
+ {
+ "_moId": "/tmp/govcsim-DC0-LocalDS_0-949174843@folder-5",
+ "name": "LocalDS_0"
+ }
+ ],
+ "effectiverole": [
+ -1
+ ],
+ "guest": {
+ "appheartbeatstatus": null,
+ "appstate": null,
+ "disk": [],
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "generationinfo": [],
+ "guestfamily": null,
+ "guestfullname": null,
+ "guestid": null,
+ "guestkernelcrashed": null,
+ "guestoperationsready": null,
+ "gueststate": "",
+ "gueststatechangesupported": null,
+ "hostname": null,
+ "hwversion": null,
+ "interactiveguestoperationsready": null,
+ "ipaddress": null,
+ "ipstack": [],
+ "net": [],
+ "screen": null,
+ "toolsinstalltype": null,
+ "toolsrunningstatus": "guestToolsNotRunning",
+ "toolsstatus": "toolsNotInstalled",
+ "toolsversion": "0",
+ "toolsversionstatus": null,
+ "toolsversionstatus2": null
+ },
+ "guestheartbeatstatus": null,
+ "layout": {
+ "configfile": [],
+ "disk": [],
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "logfile": [],
+ "snapshot": [],
+ "swapfile": null
+ },
+ "layoutex": {
+ "disk": [],
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "file": [],
+ "snapshot": [],
+ "timestamp": {}
+ },
+ "name": "DC0_C0_RP0_VM1",
+ "network": [],
+ "overallstatus": "green",
+ "parentvapp": null,
+ "permission": [],
+ "recenttask": [],
+ "resourcepool": {
+ "_moId": "resgroup-26",
+ "name": "Resources"
+ },
+ "rootsnapshot": [],
+ "runtime": {
+ "boottime": null,
+ "cleanpoweroff": null,
+ "connectionstate": "connected",
+ "consolidationneeded": false,
+ "cryptostate": null,
+ "dasvmprotection": null,
+ "device": [],
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "faulttolerancestate": null,
+ "featuremask": [],
+ "featurerequirement": [],
+ "host": {
+ "_moId": "host-33",
+ "name": "DC0_C0_H0"
+ },
+ "instantclonefrozen": null,
+ "maxcpuusage": null,
+ "maxmemoryusage": null,
+ "memoryoverhead": null,
+ "minrequiredevcmodekey": null,
+ "needsecondaryreason": null,
+ "nummksconnections": 0,
+ "offlinefeaturerequirement": [],
+ "onlinestandby": false,
+ "paused": null,
+ "powerstate": "poweredOn",
+ "question": null,
+ "quiescedforkparent": null,
+ "recordreplaystate": null,
+ "snapshotinbackground": null,
+ "suspendinterval": null,
+ "suspendtime": null,
+ "toolsinstallermounted": false,
+ "vflashcacheallocation": null
+ },
+ "snapshot": null,
+ "storage": {
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "perdatastoreusage": [],
+ "timestamp": {}
+ },
+ "summary": {
+ "config": {},
+ "customvalue": [],
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "guest": {},
+ "overallstatus": "green",
+ "quickstats": {},
+ "runtime": {},
+ "storage": {},
+ "vm": {}
+ },
+ "tag": [],
+ "triggeredalarmstate": [],
+ "value": []
+ },
+ "DC0_H0_VM0_265104de-1472-547c-b873-6dc7883fb6cb": {
+ "alarmactionsenabled": null,
+ "ansible_host": "None",
+ "ansible_ssh_host": "None",
+ "ansible_uuid": "6616671b-16b0-494c-8201-737ca506790b",
+ "availablefield": [],
+ "capability": {
+ "bootoptionssupported": false,
+ "bootretryoptionssupported": false,
+ "changetrackingsupported": false,
+ "consolepreferencessupported": false,
+ "cpufeaturemasksupported": false,
+ "disablesnapshotssupported": false,
+ "diskonlysnapshotonsuspendedvmsupported": null,
+ "disksharessupported": false,
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "featurerequirementsupported": false,
+ "guestautolocksupported": false,
+ "hostbasedreplicationsupported": false,
+ "locksnapshotssupported": false,
+ "memoryreservationlocksupported": false,
+ "memorysnapshotssupported": false,
+ "multiplecorespersocketsupported": false,
+ "multiplesnapshotssupported": false,
+ "nestedhvsupported": false,
+ "npivwwnonnonrdmvmsupported": false,
+ "pervmevcsupported": null,
+ "poweredoffsnapshotssupported": false,
+ "poweredonmonitortypechangesupported": false,
+ "quiescedsnapshotssupported": false,
+ "recordreplaysupported": false,
+ "reverttosnapshotsupported": false,
+ "s1acpimanagementsupported": false,
+ "securebootsupported": null,
+ "sesparsedisksupported": false,
+ "settingdisplaytopologysupported": false,
+ "settingscreenresolutionsupported": false,
+ "settingvideoramsizesupported": false,
+ "snapshotconfigsupported": false,
+ "snapshotoperationssupported": false,
+ "swapplacementsupported": false,
+ "toolsautoupdatesupported": false,
+ "toolssynctimesupported": false,
+ "virtualexecusageignored": null,
+ "virtualmmuusageignored": null,
+ "virtualmmuusagesupported": false,
+ "vmnpivwwndisablesupported": false,
+ "vmnpivwwnsupported": false,
+ "vmnpivwwnupdatesupported": false,
+ "vpmcsupported": false
+ },
+ "config": {
+ "alternateguestname": "",
+ "annotation": null,
+ "bootoptions": null,
+ "changetrackingenabled": null,
+ "changeversion": "",
+ "consolepreferences": null,
+ "contentlibiteminfo": null,
+ "cpuaffinity": null,
+ "cpuallocation": {},
+ "cpufeaturemask": [],
+ "cpuhotaddenabled": null,
+ "cpuhotremoveenabled": null,
+ "createdate": null,
+ "datastoreurl": [],
+ "defaultpowerops": {},
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "extraconfig": [],
+ "files": {},
+ "firmware": null,
+ "flags": {},
+ "forkconfiginfo": null,
+ "ftinfo": null,
+ "guestautolockenabled": null,
+ "guestfullname": "otherGuest",
+ "guestid": "otherGuest",
+ "guestintegrityinfo": null,
+ "guestmonitoringmodeinfo": null,
+ "hardware": {},
+ "hotplugmemoryincrementsize": null,
+ "hotplugmemorylimit": null,
+ "initialoverhead": null,
+ "instanceuuid": "b4689bed-97f0-5bcd-8a4c-07477cc8f06f",
+ "keyid": null,
+ "latencysensitivity": null,
+ "locationid": null,
+ "managedby": null,
+ "maxmksconnections": null,
+ "memoryaffinity": null,
+ "memoryallocation": {},
+ "memoryhotaddenabled": null,
+ "memoryreservationlockedtomax": null,
+ "messagebustunnelenabled": null,
+ "migrateencryption": null,
+ "modified": {},
+ "name": "DC0_H0_VM0",
+ "nestedhvenabled": null,
+ "networkshaper": null,
+ "npivdesirednodewwns": null,
+ "npivdesiredportwwns": null,
+ "npivnodeworldwidename": [],
+ "npivonnonrdmdisks": null,
+ "npivportworldwidename": [],
+ "npivtemporarydisabled": null,
+ "npivworldwidenametype": null,
+ "repconfig": null,
+ "scheduledhardwareupgradeinfo": null,
+ "sgxinfo": null,
+ "swapplacement": null,
+ "swapstorageobjectid": null,
+ "template": false,
+ "tools": {},
+ "uuid": "265104de-1472-547c-b873-6dc7883fb6cb",
+ "vappconfig": null,
+ "vassertsenabled": null,
+ "vcpuconfig": [],
+ "version": "vmx-13",
+ "vflashcachereservation": null,
+ "vmstorageobjectid": null,
+ "vmxconfigchecksum": null,
+ "vpmcenabled": null
+ },
+ "configissue": [],
+ "configstatus": "green",
+ "customvalue": [],
+ "datastore": [
+ {
+ "_moId": "/tmp/govcsim-DC0-LocalDS_0-949174843@folder-5",
+ "name": "LocalDS_0"
+ }
+ ],
+ "effectiverole": [
+ -1
+ ],
+ "guest": {
+ "appheartbeatstatus": null,
+ "appstate": null,
+ "disk": [],
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "generationinfo": [],
+ "guestfamily": null,
+ "guestfullname": null,
+ "guestid": null,
+ "guestkernelcrashed": null,
+ "guestoperationsready": null,
+ "gueststate": "",
+ "gueststatechangesupported": null,
+ "hostname": null,
+ "hwversion": null,
+ "interactiveguestoperationsready": null,
+ "ipaddress": null,
+ "ipstack": [],
+ "net": [],
+ "screen": null,
+ "toolsinstalltype": null,
+ "toolsrunningstatus": "guestToolsNotRunning",
+ "toolsstatus": "toolsNotInstalled",
+ "toolsversion": "0",
+ "toolsversionstatus": null,
+ "toolsversionstatus2": null
+ },
+ "guestheartbeatstatus": null,
+ "layout": {
+ "configfile": [],
+ "disk": [],
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "logfile": [],
+ "snapshot": [],
+ "swapfile": null
+ },
+ "layoutex": {
+ "disk": [],
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "file": [],
+ "snapshot": [],
+ "timestamp": {}
+ },
+ "name": "DC0_H0_VM0",
+ "network": [],
+ "overallstatus": "green",
+ "parentvapp": null,
+ "permission": [],
+ "recenttask": [],
+ "resourcepool": {
+ "_moId": "resgroup-22",
+ "name": "Resources"
+ },
+ "rootsnapshot": [],
+ "runtime": {
+ "boottime": null,
+ "cleanpoweroff": null,
+ "connectionstate": "connected",
+ "consolidationneeded": false,
+ "cryptostate": null,
+ "dasvmprotection": null,
+ "device": [],
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "faulttolerancestate": null,
+ "featuremask": [],
+ "featurerequirement": [],
+ "host": {
+ "_moId": "host-21",
+ "name": "DC0_H0"
+ },
+ "instantclonefrozen": null,
+ "maxcpuusage": null,
+ "maxmemoryusage": null,
+ "memoryoverhead": null,
+ "minrequiredevcmodekey": null,
+ "needsecondaryreason": null,
+ "nummksconnections": 0,
+ "offlinefeaturerequirement": [],
+ "onlinestandby": false,
+ "paused": null,
+ "powerstate": "poweredOn",
+ "question": null,
+ "quiescedforkparent": null,
+ "recordreplaystate": null,
+ "snapshotinbackground": null,
+ "suspendinterval": null,
+ "suspendtime": null,
+ "toolsinstallermounted": false,
+ "vflashcacheallocation": null
+ },
+ "snapshot": null,
+ "storage": {
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "perdatastoreusage": [],
+ "timestamp": {}
+ },
+ "summary": {
+ "config": {},
+ "customvalue": [],
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "guest": {},
+ "overallstatus": "green",
+ "quickstats": {},
+ "runtime": {},
+ "storage": {},
+ "vm": {}
+ },
+ "tag": [],
+ "triggeredalarmstate": [],
+ "value": []
+ },
+ "DC0_H0_VM1_39365506-5a0a-5fd0-be10-9586ad53aaad": {
+ "alarmactionsenabled": null,
+ "ansible_host": "None",
+ "ansible_ssh_host": "None",
+ "ansible_uuid": "50401ff9-720a-4166-b9e6-d7cd0d9a4dc9",
+ "availablefield": [],
+ "capability": {
+ "bootoptionssupported": false,
+ "bootretryoptionssupported": false,
+ "changetrackingsupported": false,
+ "consolepreferencessupported": false,
+ "cpufeaturemasksupported": false,
+ "disablesnapshotssupported": false,
+ "diskonlysnapshotonsuspendedvmsupported": null,
+ "disksharessupported": false,
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "featurerequirementsupported": false,
+ "guestautolocksupported": false,
+ "hostbasedreplicationsupported": false,
+ "locksnapshotssupported": false,
+ "memoryreservationlocksupported": false,
+ "memorysnapshotssupported": false,
+ "multiplecorespersocketsupported": false,
+ "multiplesnapshotssupported": false,
+ "nestedhvsupported": false,
+ "npivwwnonnonrdmvmsupported": false,
+ "pervmevcsupported": null,
+ "poweredoffsnapshotssupported": false,
+ "poweredonmonitortypechangesupported": false,
+ "quiescedsnapshotssupported": false,
+ "recordreplaysupported": false,
+ "reverttosnapshotsupported": false,
+ "s1acpimanagementsupported": false,
+ "securebootsupported": null,
+ "sesparsedisksupported": false,
+ "settingdisplaytopologysupported": false,
+ "settingscreenresolutionsupported": false,
+ "settingvideoramsizesupported": false,
+ "snapshotconfigsupported": false,
+ "snapshotoperationssupported": false,
+ "swapplacementsupported": false,
+ "toolsautoupdatesupported": false,
+ "toolssynctimesupported": false,
+ "virtualexecusageignored": null,
+ "virtualmmuusageignored": null,
+ "virtualmmuusagesupported": false,
+ "vmnpivwwndisablesupported": false,
+ "vmnpivwwnsupported": false,
+ "vmnpivwwnupdatesupported": false,
+ "vpmcsupported": false
+ },
+ "config": {
+ "alternateguestname": "",
+ "annotation": null,
+ "bootoptions": null,
+ "changetrackingenabled": null,
+ "changeversion": "",
+ "consolepreferences": null,
+ "contentlibiteminfo": null,
+ "cpuaffinity": null,
+ "cpuallocation": {},
+ "cpufeaturemask": [],
+ "cpuhotaddenabled": null,
+ "cpuhotremoveenabled": null,
+ "createdate": null,
+ "datastoreurl": [],
+ "defaultpowerops": {},
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "extraconfig": [],
+ "files": {},
+ "firmware": null,
+ "flags": {},
+ "forkconfiginfo": null,
+ "ftinfo": null,
+ "guestautolockenabled": null,
+ "guestfullname": "otherGuest",
+ "guestid": "otherGuest",
+ "guestintegrityinfo": null,
+ "guestmonitoringmodeinfo": null,
+ "hardware": {},
+ "hotplugmemoryincrementsize": null,
+ "hotplugmemorylimit": null,
+ "initialoverhead": null,
+ "instanceuuid": "12f8928d-f144-5c57-89db-dd2d0902c9fa",
+ "keyid": null,
+ "latencysensitivity": null,
+ "locationid": null,
+ "managedby": null,
+ "maxmksconnections": null,
+ "memoryaffinity": null,
+ "memoryallocation": {},
+ "memoryhotaddenabled": null,
+ "memoryreservationlockedtomax": null,
+ "messagebustunnelenabled": null,
+ "migrateencryption": null,
+ "modified": {},
+ "name": "DC0_H0_VM1",
+ "nestedhvenabled": null,
+ "networkshaper": null,
+ "npivdesirednodewwns": null,
+ "npivdesiredportwwns": null,
+ "npivnodeworldwidename": [],
+ "npivonnonrdmdisks": null,
+ "npivportworldwidename": [],
+ "npivtemporarydisabled": null,
+ "npivworldwidenametype": null,
+ "repconfig": null,
+ "scheduledhardwareupgradeinfo": null,
+ "sgxinfo": null,
+ "swapplacement": null,
+ "swapstorageobjectid": null,
+ "template": false,
+ "tools": {},
+ "uuid": "39365506-5a0a-5fd0-be10-9586ad53aaad",
+ "vappconfig": null,
+ "vassertsenabled": null,
+ "vcpuconfig": [],
+ "version": "vmx-13",
+ "vflashcachereservation": null,
+ "vmstorageobjectid": null,
+ "vmxconfigchecksum": null,
+ "vpmcenabled": null
+ },
+ "configissue": [],
+ "configstatus": "green",
+ "customvalue": [],
+ "datastore": [
+ {
+ "_moId": "/tmp/govcsim-DC0-LocalDS_0-949174843@folder-5",
+ "name": "LocalDS_0"
+ }
+ ],
+ "effectiverole": [
+ -1
+ ],
+ "guest": {
+ "appheartbeatstatus": null,
+ "appstate": null,
+ "disk": [],
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "generationinfo": [],
+ "guestfamily": null,
+ "guestfullname": null,
+ "guestid": null,
+ "guestkernelcrashed": null,
+ "guestoperationsready": null,
+ "gueststate": "",
+ "gueststatechangesupported": null,
+ "hostname": null,
+ "hwversion": null,
+ "interactiveguestoperationsready": null,
+ "ipaddress": null,
+ "ipstack": [],
+ "net": [],
+ "screen": null,
+ "toolsinstalltype": null,
+ "toolsrunningstatus": "guestToolsNotRunning",
+ "toolsstatus": "toolsNotInstalled",
+ "toolsversion": "0",
+ "toolsversionstatus": null,
+ "toolsversionstatus2": null
+ },
+ "guestheartbeatstatus": null,
+ "layout": {
+ "configfile": [],
+ "disk": [],
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "logfile": [],
+ "snapshot": [],
+ "swapfile": null
+ },
+ "layoutex": {
+ "disk": [],
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "file": [],
+ "snapshot": [],
+ "timestamp": {}
+ },
+ "name": "DC0_H0_VM1",
+ "network": [],
+ "overallstatus": "green",
+ "parentvapp": null,
+ "permission": [],
+ "recenttask": [],
+ "resourcepool": {
+ "_moId": "resgroup-22",
+ "name": "Resources"
+ },
+ "rootsnapshot": [],
+ "runtime": {
+ "boottime": null,
+ "cleanpoweroff": null,
+ "connectionstate": "connected",
+ "consolidationneeded": false,
+ "cryptostate": null,
+ "dasvmprotection": null,
+ "device": [],
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "faulttolerancestate": null,
+ "featuremask": [],
+ "featurerequirement": [],
+ "host": {
+ "_moId": "host-21",
+ "name": "DC0_H0"
+ },
+ "instantclonefrozen": null,
+ "maxcpuusage": null,
+ "maxmemoryusage": null,
+ "memoryoverhead": null,
+ "minrequiredevcmodekey": null,
+ "needsecondaryreason": null,
+ "nummksconnections": 0,
+ "offlinefeaturerequirement": [],
+ "onlinestandby": false,
+ "paused": null,
+ "powerstate": "poweredOn",
+ "question": null,
+ "quiescedforkparent": null,
+ "recordreplaystate": null,
+ "snapshotinbackground": null,
+ "suspendinterval": null,
+ "suspendtime": null,
+ "toolsinstallermounted": false,
+ "vflashcacheallocation": null
+ },
+ "snapshot": null,
+ "storage": {
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "perdatastoreusage": [],
+ "timestamp": {}
+ },
+ "summary": {
+ "config": {},
+ "customvalue": [],
+ "dynamicproperty": [],
+ "dynamictype": null,
+ "guest": {},
+ "overallstatus": "green",
+ "quickstats": {},
+ "runtime": {},
+ "storage": {},
+ "vm": {}
+ },
+ "tag": [],
+ "triggeredalarmstate": [],
+ "value": []
+ }
+ }
+ },
+ "all": {
+ "children": [
+ "None",
+ "guests",
+ "ungrouped"
+ ]
+ },
+ "guests": {
+ "hosts": [
+ "DC0_C0_RP0_VM0_cd0681bf-2f18-5c00-9b9b-8197c0095348",
+ "DC0_C0_RP0_VM1_f7c371d6-2003-5a48-9859-3bc9a8b08908",
+ "DC0_H0_VM0_265104de-1472-547c-b873-6dc7883fb6cb",
+ "DC0_H0_VM1_39365506-5a0a-5fd0-be10-9586ad53aaad"
+ ]
+ }
+}
diff --git a/test/integration/targets/inventory_script/inventory.sh b/test/integration/targets/inventory_script/inventory.sh
new file mode 100755
index 00000000..b3f1d035
--- /dev/null
+++ b/test/integration/targets/inventory_script/inventory.sh
@@ -0,0 +1,7 @@
+#!/bin/sh
+# This script mimics the output from what the contrib/inventory/vmware_inventory.py
+# dynamic inventory script produced.
+# This ensures we are still covering the same code that the original tests gave us
+# and subsequently ensures that ansible-inventory produces output consistent with
+# that of a dynamic inventory script
+cat inventory.json
diff --git a/test/integration/targets/inventory_script/runme.sh b/test/integration/targets/inventory_script/runme.sh
new file mode 100755
index 00000000..bb4fcea9
--- /dev/null
+++ b/test/integration/targets/inventory_script/runme.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+set -eux
+
+diff -uw <(ansible-inventory -i inventory.sh --list --export) inventory.json
diff --git a/test/integration/targets/inventory_yaml/aliases b/test/integration/targets/inventory_yaml/aliases
new file mode 100644
index 00000000..f8e28c7e
--- /dev/null
+++ b/test/integration/targets/inventory_yaml/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group1
+skip/aix
diff --git a/test/integration/targets/inventory_yaml/empty.json b/test/integration/targets/inventory_yaml/empty.json
new file mode 100644
index 00000000..e1ae0684
--- /dev/null
+++ b/test/integration/targets/inventory_yaml/empty.json
@@ -0,0 +1,10 @@
+{
+ "_meta": {
+ "hostvars": {}
+ },
+ "all": {
+ "children": [
+ "ungrouped"
+ ]
+ }
+}
diff --git a/test/integration/targets/inventory_yaml/runme.sh b/test/integration/targets/inventory_yaml/runme.sh
new file mode 100755
index 00000000..b82f499d
--- /dev/null
+++ b/test/integration/targets/inventory_yaml/runme.sh
@@ -0,0 +1,4 @@
+#!/usr/bin/env bash
+
+# handle empty/commented out group keys correctly https://github.com/ansible/ansible/issues/47254
+ANSIBLE_VERBOSITY=0 diff -w <(ansible-inventory -i ./test.yml --list) success.json
diff --git a/test/integration/targets/inventory_yaml/success.json b/test/integration/targets/inventory_yaml/success.json
new file mode 100644
index 00000000..a8b15f96
--- /dev/null
+++ b/test/integration/targets/inventory_yaml/success.json
@@ -0,0 +1,61 @@
+{
+ "_meta": {
+ "hostvars": {
+ "alice": {
+ "status": "single"
+ },
+ "bobby": {
+ "in_trouble": true,
+ "popular": false
+ },
+ "cindy": {
+ "in_trouble": true,
+ "popular": true
+ },
+ "greg": {
+ "in_trouble": true,
+ "popular": true
+ },
+ "jan": {
+ "in_trouble": true,
+ "popular": false
+ },
+ "marcia": {
+ "in_trouble": true,
+ "popular": true
+ },
+ "peter": {
+ "in_trouble": true,
+ "popular": false
+ }
+ }
+ },
+ "all": {
+ "children": [
+ "cousins",
+ "kids",
+ "the-maid",
+ "ungrouped"
+ ]
+ },
+ "cousins": {
+ "children": [
+ "redheads"
+ ]
+ },
+ "kids": {
+ "hosts": [
+ "bobby",
+ "cindy",
+ "greg",
+ "jan",
+ "marcia",
+ "peter"
+ ]
+ },
+ "the-maid": {
+ "hosts": [
+ "alice"
+ ]
+ }
+}
diff --git a/test/integration/targets/inventory_yaml/test.yml b/test/integration/targets/inventory_yaml/test.yml
new file mode 100644
index 00000000..9755396a
--- /dev/null
+++ b/test/integration/targets/inventory_yaml/test.yml
@@ -0,0 +1,27 @@
+all:
+ children:
+ kids:
+ hosts:
+ marcia:
+ popular: True
+ jan:
+ popular: False
+ cindy:
+ popular: True
+ greg:
+ popular: True
+ peter:
+ popular: False
+ bobby:
+ popular: False
+ vars:
+ in_trouble: True
+ cousins:
+ children:
+ redheads:
+ hosts:
+ #oliver: # this used to cause an error and deliver incomplete inventory
+ the-maid:
+ hosts:
+ alice:
+ status: single
diff --git a/test/integration/targets/jinja2_native_types/aliases b/test/integration/targets/jinja2_native_types/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/test/integration/targets/jinja2_native_types/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/test/integration/targets/jinja2_native_types/nested_undefined.yml b/test/integration/targets/jinja2_native_types/nested_undefined.yml
new file mode 100644
index 00000000..c808ffb7
--- /dev/null
+++ b/test/integration/targets/jinja2_native_types/nested_undefined.yml
@@ -0,0 +1,24 @@
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - block:
+ - name: Test nested undefined var fails, single node
+ debug:
+ msg: "{{ [{ 'key': nested_and_undefined }] }}"
+ register: result
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - "\"'nested_and_undefined' is undefined\" in result.msg"
+
+ - name: Test nested undefined var fails, multiple nodes
+ debug:
+ msg: "{{ [{ 'key': nested_and_undefined}] }} second_node"
+ register: result
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - "\"'nested_and_undefined' is undefined\" in result.msg"
+ when: lookup('pipe', ansible_python_interpreter ~ ' -c "import jinja2; print(jinja2.__version__)"') is version('2.10', '>=')
diff --git a/test/integration/targets/jinja2_native_types/runme.sh b/test/integration/targets/jinja2_native_types/runme.sh
new file mode 100755
index 00000000..f648f875
--- /dev/null
+++ b/test/integration/targets/jinja2_native_types/runme.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+
+set -eux
+
+export ANSIBLE_JINJA2_NATIVE=1
+ansible-playbook runtests.yml -v "$@"
+ansible-playbook --vault-password-file test_vault_pass test_vault.yml -v "$@"
+ansible-playbook test_hostvars.yml -v "$@"
+ansible-playbook nested_undefined.yml -v "$@"
+unset ANSIBLE_JINJA2_NATIVE
diff --git a/test/integration/targets/jinja2_native_types/runtests.yml b/test/integration/targets/jinja2_native_types/runtests.yml
new file mode 100644
index 00000000..efcdb7a5
--- /dev/null
+++ b/test/integration/targets/jinja2_native_types/runtests.yml
@@ -0,0 +1,50 @@
+- name: Test jinja2 native types
+ hosts: localhost
+ gather_facts: no
+ vars:
+ i_one: 1
+ i_two: 2
+ i_three: 3
+ s_one: "1"
+ s_two: "2"
+ s_three: "3"
+ dict_one:
+ foo: bar
+ baz: bang
+ dict_two:
+ bar: foo
+ foobar: barfoo
+ list_one:
+ - one
+ - two
+ list_two:
+ - three
+ - four
+ list_ints:
+ - 4
+ - 2
+ list_one_int:
+ - 1
+ b_true: True
+ b_false: False
+ s_true: "True"
+ s_false: "False"
+ yaml_none: ~
+ tasks:
+ - name: check jinja version
+ command: "{{ ansible_python_interpreter }} -c 'import jinja2; print(jinja2.__version__)'"
+ register: jinja2_version
+
+ - name: make sure jinja is the right version
+ set_fact:
+ is_native: "{{ jinja2_version.stdout is version('2.10', '>=') }}"
+
+ - block:
+ - import_tasks: test_casting.yml
+ - import_tasks: test_concatentation.yml
+ - import_tasks: test_bool.yml
+ - import_tasks: test_dunder.yml
+ - import_tasks: test_types.yml
+ - import_tasks: test_none.yml
+ - import_tasks: test_template.yml
+ when: is_native
diff --git a/test/integration/targets/jinja2_native_types/test_bool.yml b/test/integration/targets/jinja2_native_types/test_bool.yml
new file mode 100644
index 00000000..f3b5e8c0
--- /dev/null
+++ b/test/integration/targets/jinja2_native_types/test_bool.yml
@@ -0,0 +1,53 @@
+- name: test bool True
+ set_fact:
+ bool_var_true: "{{ b_true }}"
+
+- assert:
+ that:
+ - 'bool_var_true is sameas true'
+ - 'bool_var_true|type_debug == "bool"'
+
+- name: test bool False
+ set_fact:
+ bool_var_false: "{{ b_false }}"
+
+- assert:
+ that:
+ - 'bool_var_false is sameas false'
+ - 'bool_var_false|type_debug == "bool"'
+
+- name: test bool expr True
+ set_fact:
+ bool_var_expr_true: "{{ 1 == 1 }}"
+
+- assert:
+ that:
+ - 'bool_var_expr_true is sameas true'
+ - 'bool_var_expr_true|type_debug == "bool"'
+
+- name: test bool expr False
+ set_fact:
+ bool_var_expr_false: "{{ 2 + 2 == 5 }}"
+
+- assert:
+ that:
+ - 'bool_var_expr_false is sameas false'
+ - 'bool_var_expr_false|type_debug == "bool"'
+
+- name: test bool expr with None, True
+ set_fact:
+ bool_var_none_expr_true: "{{ None == None }}"
+
+- assert:
+ that:
+ - 'bool_var_none_expr_true is sameas true'
+ - 'bool_var_none_expr_true|type_debug == "bool"'
+
+- name: test bool expr with None, False
+ set_fact:
+ bool_var_none_expr_false: "{{ '' == None }}"
+
+- assert:
+ that:
+ - 'bool_var_none_expr_false is sameas false'
+ - 'bool_var_none_expr_false|type_debug == "bool"'
diff --git a/test/integration/targets/jinja2_native_types/test_casting.yml b/test/integration/targets/jinja2_native_types/test_casting.yml
new file mode 100644
index 00000000..8627a056
--- /dev/null
+++ b/test/integration/targets/jinja2_native_types/test_casting.yml
@@ -0,0 +1,31 @@
+- name: cast things to other things
+ set_fact:
+ int_to_str: "'{{ i_two }}'"
+ int_to_str2: "{{ i_two | string }}"
+ str_to_int: "{{ s_two|int }}"
+ dict_to_str: "'{{ dict_one }}'"
+ list_to_str: "'{{ list_one }}'"
+ int_to_bool: "{{ i_one|bool }}"
+ str_true_to_bool: "{{ s_true|bool }}"
+ str_false_to_bool: "{{ s_false|bool }}"
+ list_to_json_str: "{{ list_one | to_json }}"
+ list_to_yaml_str: "{{ list_one | to_yaml }}"
+
+- assert:
+ that:
+ - 'int_to_str == "2"'
+ - 'int_to_str|type_debug in ["str", "unicode"]'
+ - 'int_to_str2 == "2"'
+ - 'int_to_str2|type_debug in ["NativeJinjaText"]'
+ - 'str_to_int == 2'
+ - 'str_to_int|type_debug == "int"'
+ - 'dict_to_str|type_debug in ["str", "unicode"]'
+ - 'list_to_str|type_debug in ["str", "unicode"]'
+ - 'int_to_bool is sameas true'
+ - 'int_to_bool|type_debug == "bool"'
+ - 'str_true_to_bool is sameas true'
+ - 'str_true_to_bool|type_debug == "bool"'
+ - 'str_false_to_bool is sameas false'
+ - 'str_false_to_bool|type_debug == "bool"'
+ - 'list_to_json_str|type_debug in ["NativeJinjaText"]'
+ - 'list_to_yaml_str|type_debug in ["NativeJinjaText"]'
diff --git a/test/integration/targets/jinja2_native_types/test_concatentation.yml b/test/integration/targets/jinja2_native_types/test_concatentation.yml
new file mode 100644
index 00000000..8a8077b6
--- /dev/null
+++ b/test/integration/targets/jinja2_native_types/test_concatentation.yml
@@ -0,0 +1,88 @@
+- name: add two ints
+ set_fact:
+ integer_sum: "{{ i_one + i_two }}"
+
+- assert:
+ that:
+ - 'integer_sum == 3'
+ - 'integer_sum|type_debug == "int"'
+
+- name: add casted string and int
+ set_fact:
+ integer_sum2: "{{ s_one|int + i_two }}"
+
+- assert:
+ that:
+ - 'integer_sum2 == 3'
+ - 'integer_sum2|type_debug == "int"'
+
+- name: concatenate int and string
+ set_fact:
+ string_sum: "'{{ [i_one, s_two]|join('') }}'"
+
+- assert:
+ that:
+ - 'string_sum == "12"'
+ - 'string_sum|type_debug in ["str", "unicode"]'
+
+- name: add two lists
+ set_fact:
+ list_sum: "{{ list_one + list_two }}"
+
+- assert:
+ that:
+ - 'list_sum == ["one", "two", "three", "four"]'
+ - 'list_sum|type_debug == "list"'
+
+- name: add two lists, multi expression
+ set_fact:
+ list_sum_multi: "{{ list_one }} + {{ list_two }}"
+
+- assert:
+ that:
+ - 'list_sum_multi|type_debug in ["str", "unicode"]'
+
+- name: add two dicts
+ set_fact:
+ dict_sum: "{{ dict_one + dict_two }}"
+ ignore_errors: yes
+
+- assert:
+ that:
+ - 'dict_sum is undefined'
+
+- name: loop through list with strings
+ set_fact:
+ list_for_strings: "{% for x in list_one %}{{ x }}{% endfor %}"
+
+- assert:
+ that:
+ - 'list_for_strings == "onetwo"'
+ - 'list_for_strings|type_debug in ["str", "unicode"]'
+
+- name: loop through list with int
+ set_fact:
+ list_for_int: "{% for x in list_one_int %}{{ x }}{% endfor %}"
+
+- assert:
+ that:
+ - 'list_for_int == 1'
+ - 'list_for_int|type_debug == "int"'
+
+- name: loop through list with ints
+ set_fact:
+ list_for_ints: "{% for x in list_ints %}{{ x }}{% endfor %}"
+
+- assert:
+ that:
+ - 'list_for_ints == 42'
+ - 'list_for_ints|type_debug == "int"'
+
+- name: loop through list to create a new list
+ set_fact:
+ list_from_list: "[{% for x in list_ints %}{{ x }},{% endfor %}]"
+
+- assert:
+ that:
+ - 'list_from_list == [4, 2]'
+ - 'list_from_list|type_debug == "list"'
diff --git a/test/integration/targets/jinja2_native_types/test_dunder.yml b/test/integration/targets/jinja2_native_types/test_dunder.yml
new file mode 100644
index 00000000..df5ea927
--- /dev/null
+++ b/test/integration/targets/jinja2_native_types/test_dunder.yml
@@ -0,0 +1,23 @@
+- name: test variable dunder
+ set_fact:
+ var_dunder: "{{ b_true.__class__ }}"
+
+- assert:
+ that:
+ - 'var_dunder|type_debug == "type"'
+
+- name: test constant dunder
+ set_fact:
+ const_dunder: "{{ true.__class__ }}"
+
+- assert:
+ that:
+ - 'const_dunder|type_debug == "type"'
+
+- name: test constant dunder to string
+ set_fact:
+ const_dunder: "{{ true.__class__|string }}"
+
+- assert:
+ that:
+ - 'const_dunder|type_debug in ["str", "unicode", "NativeJinjaText"]'
diff --git a/test/integration/targets/jinja2_native_types/test_hostvars.yml b/test/integration/targets/jinja2_native_types/test_hostvars.yml
new file mode 100644
index 00000000..ef0047b8
--- /dev/null
+++ b/test/integration/targets/jinja2_native_types/test_hostvars.yml
@@ -0,0 +1,10 @@
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Print vars
+ debug:
+ var: vars
+
+ - name: Print hostvars
+ debug:
+ var: hostvars
diff --git a/test/integration/targets/jinja2_native_types/test_none.yml b/test/integration/targets/jinja2_native_types/test_none.yml
new file mode 100644
index 00000000..1d26154c
--- /dev/null
+++ b/test/integration/targets/jinja2_native_types/test_none.yml
@@ -0,0 +1,11 @@
+- name: test none
+ set_fact:
+ none_var: "{{ yaml_none }}"
+ none_var_direct: "{{ None }}"
+
+- assert:
+ that:
+ - 'none_var is sameas none'
+ - 'none_var|type_debug == "NoneType"'
+ - 'none_var_direct is sameas none'
+ - 'none_var_direct|type_debug == "NoneType"'
diff --git a/test/integration/targets/jinja2_native_types/test_template.yml b/test/integration/targets/jinja2_native_types/test_template.yml
new file mode 100644
index 00000000..0896ac14
--- /dev/null
+++ b/test/integration/targets/jinja2_native_types/test_template.yml
@@ -0,0 +1,27 @@
+- block:
+ - name: Template file with newlines
+ template:
+ src: test_template_newlines.j2
+ dest: test_template_newlines.res
+
+ - name: Dump template file
+ stat:
+ path: test_template_newlines.j2
+ get_checksum: yes
+ register: template_stat
+
+ - name: Dump result file
+ stat:
+ path: test_template_newlines.res
+ get_checksum: yes
+ register: result_stat
+
+ - name: Check that number of newlines from original template are preserved
+ assert:
+ that:
+ - template_stat.stat.checksum == result_stat.stat.checksum
+ always:
+ - name: Clean up
+ file:
+ path: test_template_newlines.res
+ state: absent
diff --git a/test/integration/targets/jinja2_native_types/test_template_newlines.j2 b/test/integration/targets/jinja2_native_types/test_template_newlines.j2
new file mode 100644
index 00000000..ca887efa
--- /dev/null
+++ b/test/integration/targets/jinja2_native_types/test_template_newlines.j2
@@ -0,0 +1,4 @@
+First line.
+
+
+
diff --git a/test/integration/targets/jinja2_native_types/test_types.yml b/test/integration/targets/jinja2_native_types/test_types.yml
new file mode 100644
index 00000000..f5659d4e
--- /dev/null
+++ b/test/integration/targets/jinja2_native_types/test_types.yml
@@ -0,0 +1,20 @@
+- assert:
+ that:
+ - 'i_one|type_debug == "int"'
+ - 's_one|type_debug == "AnsibleUnicode"'
+ - 'dict_one|type_debug == "dict"'
+ - 'dict_one is mapping'
+ - 'list_one|type_debug == "list"'
+ - 'b_true|type_debug == "bool"'
+ - 's_true|type_debug == "AnsibleUnicode"'
+
+- set_fact:
+ a_list: "{{[i_one, s_two]}}"
+
+- assert:
+ that:
+ - 'a_list|type_debug == "list"'
+ - 'a_list[0] == 1'
+ - 'a_list[0]|type_debug == "int"'
+ - 'a_list[1] == "2"'
+ - 'a_list[1]|type_debug == "AnsibleUnicode"'
diff --git a/test/integration/targets/jinja2_native_types/test_vault.yml b/test/integration/targets/jinja2_native_types/test_vault.yml
new file mode 100644
index 00000000..2daa3c5b
--- /dev/null
+++ b/test/integration/targets/jinja2_native_types/test_vault.yml
@@ -0,0 +1,16 @@
+- hosts: localhost
+ gather_facts: no
+ vars:
+ # ansible-vault encrypt_string root
+ # vault_password_file = test_vault_pass
+ vaulted_root_string: !vault |
+ $ANSIBLE_VAULT;1.1;AES256
+ 39333565666430306232343266346635373235626564396332323838613063646132653436303239
+ 3133363232306334393863343563366131373565616338380a666339383162333838653631663131
+ 36633637303862353435643930393664386365323164643831363332666435303436373365393162
+ 6535383134323539380a613663366631626534313837313565666665336164353362373431666366
+ 3464
+ tasks:
+ - name: make sure group root exists
+ group:
+ name: "{{ vaulted_root_string }}"
diff --git a/test/integration/targets/jinja2_native_types/test_vault_pass b/test/integration/targets/jinja2_native_types/test_vault_pass
new file mode 100644
index 00000000..9daeafb9
--- /dev/null
+++ b/test/integration/targets/jinja2_native_types/test_vault_pass
@@ -0,0 +1 @@
+test
diff --git a/test/integration/targets/known_hosts/aliases b/test/integration/targets/known_hosts/aliases
new file mode 100644
index 00000000..765b70da
--- /dev/null
+++ b/test/integration/targets/known_hosts/aliases
@@ -0,0 +1 @@
+shippable/posix/group2
diff --git a/test/integration/targets/known_hosts/defaults/main.yml b/test/integration/targets/known_hosts/defaults/main.yml
new file mode 100644
index 00000000..eb0a4ba3
--- /dev/null
+++ b/test/integration/targets/known_hosts/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+example_org_rsa_key: >
+ example.org ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAglyZmHHWskQ9wkh8LYbIqzvg99/oloneH7BaZ02ripJUy/2Zynv4tgUfm9fdXvAb1XXCEuTRnts9FBer87+voU0FPRgx3CfY9Sgr0FspUjnm4lqs53FIab1psddAaS7/F7lrnjl6VqBtPwMRQZG7qlml5uogGJwYJHxX0PGtsdoTJsM=
diff --git a/test/integration/targets/known_hosts/files/existing_known_hosts b/test/integration/targets/known_hosts/files/existing_known_hosts
new file mode 100644
index 00000000..2564f409
--- /dev/null
+++ b/test/integration/targets/known_hosts/files/existing_known_hosts
@@ -0,0 +1,5 @@
+example.com ssh-dss AAAAB3NzaC1kc3MAAACBALT8YHxZ59d8yX4oQNPbpdK9AMPRQGKFY9X13S2fp4UMPijiB3ETxU1bAyVTjTbsoag065naFt13aIVl+u0MDPfMuYgVJFEorAZkDlBixvT25zpKyQhI4CtHhZ9Y9YWug4xLqSaFUYEPO31Bie7k8xRfDwsHtzTRPp/0zRURwARHAAAAFQDLx2DZMm3cR8cZtbq4zdSvkXLh0wAAAIAalkQYziu2b5dDRQMiFpDLpPdbymyVhDMmRKnXwAB1+dhGyJLGvfe0xO+ibqGXMp1aZ1iC3a/vHTpYKDVqKIIpFg5r0fxAcAZkJR0aRC8RDxW/IclbIliETD71osIT8I47OFc7vAVCWP8JbV3ZYzR+i98WUkmZ4/ZUzsDl2gi7WAAAAIAsdTGwAo4Fs784TdP2tIHCqxAIz2k4tWmZyeRmXkH5K/P1o9XSh3RNxvFKK7BY6dQK+h9jLunMBs0SCzhMoTcXaJq331kmLJltjq5peo0PnLGnQz5pas0PD7p7gb+soklmHoVp7J2oMC/U4N1Rxr6g9sv8Rpsf1PTPDT3sEbze6A== root@freezer
+|1|d71/U7CbOH3Su+d2zxlbmiNfXtI=|g2YSPAVoK7bmg16FCOOPKTZe2BM= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==
+|1|L0TqxOhAVh6mLZ2lbHdTv3owun0=|vn0La5pbHNxin3XzQQdvaOulvVU= ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBCNLCAA/SjVF3jkmlAlkgh+GtZdgxtusHaK66fcA7XSgCpQOdri1dGmND6pQDGwsxiKMy4Ou1GB2DR4N0G9T5E8=
+|1|WPo7yAOdlQKLSuRatNJCmDoga0k=|D/QybGglKokWuEQUe9Okpy5uSh0= ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBCNLCAA/SjVF3jkmlAlkgh+GtZdgxtusHaK66fcA7XSgCpQOdri1dGmND6pQDGwsxiKMy4Ou1GB2DR4N0G9T5E8=
+# example.net ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIM6OSqweGdPdQ/metQaf738AdN3P+itYp1AypOTgXkyj root@localhost
diff --git a/test/integration/targets/known_hosts/meta/main.yml b/test/integration/targets/known_hosts/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/test/integration/targets/known_hosts/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/test/integration/targets/known_hosts/tasks/main.yml b/test/integration/targets/known_hosts/tasks/main.yml
new file mode 100644
index 00000000..4ea91c35
--- /dev/null
+++ b/test/integration/targets/known_hosts/tasks/main.yml
@@ -0,0 +1,377 @@
+# test code for the known_hosts module
+# (c) 2017, Marius Gedminas <marius@gedmin.as>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: copy an existing file in place
+ copy:
+ src: existing_known_hosts
+ dest: "{{ output_dir }}/known_hosts"
+
+# test addition
+
+- name: add a new host in check mode
+ check_mode: yes
+ known_hosts:
+ name: example.org
+ key: "{{ example_org_rsa_key }}"
+ state: present
+ path: "{{output_dir}}/known_hosts"
+ register: diff
+
+- name: assert that the diff looks as expected (the key was added at the end)
+ assert:
+ that:
+ - 'diff is changed'
+ - 'diff.diff.before_header == diff.diff.after_header == output_dir|expanduser + "/known_hosts"'
+ - 'diff.diff.after.splitlines()[:-1] == diff.diff.before.splitlines()'
+ - 'diff.diff.after.splitlines()[-1] == example_org_rsa_key.strip()'
+
+- name: add a new host
+ known_hosts:
+ name: example.org
+ key: "{{ example_org_rsa_key }}"
+ state: present
+ path: "{{output_dir}}/known_hosts"
+ register: result
+
+- name: get the file content
+ command: "cat {{output_dir}}/known_hosts"
+ register: known_hosts
+
+- name: assert that the key was added and ordering preserved
+ assert:
+ that:
+ - 'result is changed'
+ - 'known_hosts.stdout_lines[0].startswith("example.com")'
+ - 'known_hosts.stdout_lines[4].startswith("# example.net")'
+ - 'known_hosts.stdout_lines[-1].strip() == example_org_rsa_key.strip()'
+
+# test idempotence of addition
+
+- name: add the same host in check mode
+ check_mode: yes
+ known_hosts:
+ name: example.org
+ key: "{{ example_org_rsa_key }}"
+ state: present
+ path: "{{output_dir}}/known_hosts"
+ register: check
+
+- name: assert that no changes were expected
+ assert:
+ that:
+ - 'check is not changed'
+ - 'check.diff.before == check.diff.after'
+
+- name: add the same host
+ known_hosts:
+ name: example.org
+ key: "{{ example_org_rsa_key }}"
+ state: present
+ path: "{{output_dir}}/known_hosts"
+ register: result
+
+- name: get the file content
+ command: "cat {{output_dir}}/known_hosts"
+ register: known_hosts_v2
+
+- name: assert that no changes happened
+ assert:
+ that:
+ - 'result is not changed'
+ - 'result.diff.before == result.diff.after'
+ - 'known_hosts.stdout == known_hosts_v2.stdout'
+
+# test removal
+
+- name: remove the host in check mode
+ check_mode: yes
+ known_hosts:
+ name: example.org
+ key: "{{ example_org_rsa_key }}"
+ state: absent
+ path: "{{output_dir}}/known_hosts"
+ register: diff
+
+- name: assert that the diff looks as expected (the key was removed)
+ assert:
+ that:
+ - 'diff.diff.before_header == diff.diff.after_header == output_dir|expanduser + "/known_hosts"'
+ - 'diff.diff.before.splitlines()[-1] == example_org_rsa_key.strip()'
+ - 'diff.diff.after.splitlines() == diff.diff.before.splitlines()[:-1]'
+
+- name: remove the host
+ known_hosts:
+ name: example.org
+ key: "{{ example_org_rsa_key }}"
+ state: absent
+ path: "{{output_dir}}/known_hosts"
+ register: result
+
+- name: get the file content
+ command: "cat {{output_dir}}/known_hosts"
+ register: known_hosts_v3
+
+- name: assert that the key was removed and ordering preserved
+ assert:
+ that:
+ - 'result is changed'
+ - '"example.org" not in known_hosts_v3.stdout'
+ - 'known_hosts_v3.stdout_lines[0].startswith("example.com")'
+ - 'known_hosts_v3.stdout_lines[-1].startswith("# example.net")'
+
+# test idempotence of removal
+
+- name: remove the same host in check mode
+ check_mode: yes
+ known_hosts:
+ name: example.org
+ key: "{{ example_org_rsa_key }}"
+ state: absent
+ path: "{{output_dir}}/known_hosts"
+ register: check
+
+- name: assert that no changes were expected
+ assert:
+ that:
+ - 'check is not changed'
+ - 'check.diff.before == check.diff.after'
+
+- name: remove the same host
+ known_hosts:
+ name: example.org
+ key: "{{ example_org_rsa_key }}"
+ state: absent
+ path: "{{output_dir}}/known_hosts"
+ register: result
+
+- name: get the file content
+ command: "cat {{output_dir}}/known_hosts"
+ register: known_hosts_v4
+
+- name: assert that no changes happened
+ assert:
+ that:
+ - 'result is not changed'
+ - 'result.diff.before == result.diff.after'
+ - 'known_hosts_v3.stdout == known_hosts_v4.stdout'
+
+# test addition as hashed_host
+
+- name: add a new hashed host
+ known_hosts:
+ name: example.org
+ key: "{{ example_org_rsa_key }}"
+ state: present
+ path: "{{output_dir}}/known_hosts"
+ hash_host: yes
+ register: result
+
+- name: get the file content
+ command: "cat {{output_dir}}/known_hosts"
+ register: known_hosts_v5
+
+- name: assert that the key was added and ordering preserved
+ assert:
+ that:
+ - 'result is changed'
+ - 'known_hosts_v5.stdout_lines[0].startswith("example.com")'
+ - 'known_hosts_v5.stdout_lines[4].startswith("# example.net")'
+ - 'known_hosts_v5.stdout_lines[-1].strip().startswith("|1|")'
+ - 'known_hosts_v5.stdout_lines[-1].strip().endswith(example_org_rsa_key.strip().split()[-1])'
+
+# test idempotence of hashed addition
+
+- name: add the same host hashed
+ known_hosts:
+ name: example.org
+ key: "{{ example_org_rsa_key }}"
+ state: present
+ path: "{{output_dir}}/known_hosts"
+ hash_host: yes
+ register: result
+
+- name: get the file content
+ command: "cat {{output_dir}}/known_hosts"
+ register: known_hosts_v6
+
+- name: assert that no changes happened
+ assert:
+ that:
+ - 'result is not changed'
+ - 'result.diff.before == result.diff.after'
+ - 'known_hosts_v5.stdout == known_hosts_v6.stdout'
+
+# test hashed removal
+
+- name: remove the hashed host
+ known_hosts:
+ name: example.org
+ key: "{{ example_org_rsa_key }}"
+ state: absent
+ path: "{{output_dir}}/known_hosts"
+ register: result
+
+- name: get the file content
+ command: "cat {{output_dir}}/known_hosts"
+ register: known_hosts_v7
+
+- name: assert that the key was removed and ordering preserved
+ assert:
+ that:
+ - 'result is changed'
+ - 'example_org_rsa_key.strip().split()[-1] not in known_hosts_v7.stdout'
+ - 'known_hosts_v7.stdout_lines[0].startswith("example.com")'
+ - 'known_hosts_v7.stdout_lines[-1].startswith("# example.net")'
+
+# test idempotence of removal
+
+- name: remove the same hashed host
+ known_hosts:
+ name: example.org
+ key: "{{ example_org_rsa_key }}"
+ state: absent
+ path: "{{output_dir}}/known_hosts"
+ register: result
+
+- name: get the file content
+ command: "cat {{output_dir}}/known_hosts"
+ register: known_hosts_v8
+
+- name: assert that no changes happened
+ assert:
+ that:
+ - 'result is not changed'
+ - 'result.diff.before == result.diff.after'
+ - 'known_hosts_v7.stdout == known_hosts_v8.stdout'
+
+# test roundtrip plaintext => hashed => plaintext
+# The assertions are rather relaxed, because most of this hash been tested previously
+
+- name: add a new host
+ known_hosts:
+ name: example.org
+ key: "{{ example_org_rsa_key }}"
+ state: present
+ path: "{{output_dir}}/known_hosts"
+
+- name: get the file content
+ command: "cat {{output_dir}}/known_hosts"
+ register: known_hosts_v8
+
+- name: assert the plaintext host is there
+ assert:
+ that:
+ - 'known_hosts_v8.stdout_lines[-1].strip() == example_org_rsa_key.strip()'
+
+- name: update the host to hashed mode
+ known_hosts:
+ name: example.org
+ key: "{{ example_org_rsa_key }}"
+ state: present
+ path: "{{output_dir}}/known_hosts"
+ hash_host: true
+
+- name: get the file content
+ command: "cat {{output_dir}}/known_hosts"
+ register: known_hosts_v9
+
+- name: assert the hashed host is there
+ assert:
+ that:
+ - 'known_hosts_v9.stdout_lines[-1].strip().startswith("|1|")'
+ - 'known_hosts_v9.stdout_lines[-1].strip().endswith(example_org_rsa_key.strip().split()[-1])'
+
+- name: downgrade the host to plaintext mode
+ known_hosts:
+ name: example.org
+ key: "{{ example_org_rsa_key }}"
+ state: present
+ path: "{{output_dir}}/known_hosts"
+
+- name: get the file content
+ command: "cat {{output_dir}}/known_hosts"
+ register: known_hosts_v10
+
+- name: assert the plaintext host is there
+ assert:
+ that:
+ - 'known_hosts_v10.stdout_lines[5].strip() == example_org_rsa_key.strip()'
+
+# ... and remove the host again for the next test
+
+- name: copy an existing file in place
+ copy:
+ src: existing_known_hosts
+ dest: "{{ output_dir }}/known_hosts"
+
+# Test key changes
+
+- name: add a hashed host
+ known_hosts:
+ name: example.org
+ key: "{{ example_org_rsa_key }}"
+ state: present
+ path: "{{output_dir}}/known_hosts"
+ hash_host: true
+
+- name: change the key of a hashed host
+ known_hosts:
+ name: example.org
+ key: "{{ example_org_rsa_key.strip()[:-7] + 'RANDOM=' }}"
+ state: present
+ path: "{{output_dir}}/known_hosts"
+ hash_host: true
+
+- name: get the file content
+ command: "cat {{output_dir}}/known_hosts"
+ register: known_hosts_v11
+
+- name: assert the change took place and the key got modified
+ assert:
+ that:
+ - 'known_hosts_v11.stdout_lines[-1].strip().endswith("RANDOM=")'
+
+# test errors
+
+- name: Try using a comma separated list of hosts
+ known_hosts:
+ name: example.org,acme.com
+ key: "{{ example_org_rsa_key }}"
+ path: "{{output_dir}}/known_hosts"
+ ignore_errors: yes
+ register: result
+
+- name: Assert that error message was displayed
+ assert:
+ that:
+ - result is failed
+ - result.msg == 'Comma separated list of names is not supported. Please pass a single name to lookup in the known_hosts file.'
+
+- name: Try using a name that does not match the key
+ known_hosts:
+ name: example.com
+ key: "{{ example_org_rsa_key }}"
+ path: "{{output_dir}}/known_hosts"
+ ignore_errors: yes
+ register: result
+
+- name: Assert that name checking failed with error message
+ assert:
+ that:
+ - result is failed
+ - result.msg == 'Host parameter does not match hashed host field in supplied key'
diff --git a/test/integration/targets/limit_inventory/aliases b/test/integration/targets/limit_inventory/aliases
new file mode 100644
index 00000000..3005e4b2
--- /dev/null
+++ b/test/integration/targets/limit_inventory/aliases
@@ -0,0 +1 @@
+shippable/posix/group4
diff --git a/test/integration/targets/limit_inventory/hosts.yml b/test/integration/targets/limit_inventory/hosts.yml
new file mode 100644
index 00000000..2e1b1927
--- /dev/null
+++ b/test/integration/targets/limit_inventory/hosts.yml
@@ -0,0 +1,5 @@
+all:
+ hosts:
+ host1:
+ host2:
+ host3:
diff --git a/test/integration/targets/limit_inventory/runme.sh b/test/integration/targets/limit_inventory/runme.sh
new file mode 100755
index 00000000..6a142b3b
--- /dev/null
+++ b/test/integration/targets/limit_inventory/runme.sh
@@ -0,0 +1,31 @@
+#!/usr/bin/env bash
+
+set -eux
+
+trap 'echo "Host pattern limit test failed"' ERR
+
+# https://github.com/ansible/ansible/issues/61964
+
+# These tests should return all hosts
+ansible -i hosts.yml all --limit ,, --list-hosts | tee out ; grep -q 'hosts (3)' out
+ansible -i hosts.yml ,, --list-hosts | tee out ; grep -q 'hosts (3)' out
+ansible -i hosts.yml , --list-hosts | tee out ; grep -q 'hosts (3)' out
+ansible -i hosts.yml all --limit , --list-hosts | tee out ; grep -q 'hosts (3)' out
+ansible -i hosts.yml all --limit '' --list-hosts | tee out ; grep -q 'hosts (3)' out
+
+
+# Only one host
+ansible -i hosts.yml all --limit ,,host1 --list-hosts | tee out ; grep -q 'hosts (1)' out
+ansible -i hosts.yml ,,host1 --list-hosts | tee out ; grep -q 'hosts (1)' out
+
+ansible -i hosts.yml all --limit host1,, --list-hosts | tee out ; grep -q 'hosts (1)' out
+ansible -i hosts.yml host1,, --list-hosts | tee out ; grep -q 'hosts (1)' out
+
+
+# Only two hosts
+ansible -i hosts.yml all --limit host1,,host3 --list-hosts | tee out ; grep -q 'hosts (2)' out
+ansible -i hosts.yml host1,,host3 --list-hosts | tee out ; grep -q 'hosts (2)' out
+
+ansible -i hosts.yml all --limit 'host1, , ,host3' --list-hosts | tee out ; grep -q 'hosts (2)' out
+ansible -i hosts.yml 'host1, , ,host3' --list-hosts | tee out ; grep -q 'hosts (2)' out
+
diff --git a/test/integration/targets/lineinfile/aliases b/test/integration/targets/lineinfile/aliases
new file mode 100644
index 00000000..765b70da
--- /dev/null
+++ b/test/integration/targets/lineinfile/aliases
@@ -0,0 +1 @@
+shippable/posix/group2
diff --git a/test/integration/targets/lineinfile/files/firstmatch.txt b/test/integration/targets/lineinfile/files/firstmatch.txt
new file mode 100644
index 00000000..347132c6
--- /dev/null
+++ b/test/integration/targets/lineinfile/files/firstmatch.txt
@@ -0,0 +1,5 @@
+line1
+line1
+line1
+line2
+line3
diff --git a/test/integration/targets/lineinfile/files/test.conf b/test/integration/targets/lineinfile/files/test.conf
new file mode 100644
index 00000000..15404cd6
--- /dev/null
+++ b/test/integration/targets/lineinfile/files/test.conf
@@ -0,0 +1,5 @@
+[section_one]
+
+[section_two]
+
+[section_three]
diff --git a/test/integration/targets/lineinfile/files/test.txt b/test/integration/targets/lineinfile/files/test.txt
new file mode 100644
index 00000000..8187db9f
--- /dev/null
+++ b/test/integration/targets/lineinfile/files/test.txt
@@ -0,0 +1,5 @@
+This is line 1
+This is line 2
+REF this is a line for backrefs REF
+This is line 4
+This is line 5
diff --git a/test/integration/targets/lineinfile/files/test_58923.txt b/test/integration/targets/lineinfile/files/test_58923.txt
new file mode 100644
index 00000000..34579fde
--- /dev/null
+++ b/test/integration/targets/lineinfile/files/test_58923.txt
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+case "`uname`" in
+ Darwin*) if [ -z "$JAVA_HOME" ] ; then
diff --git a/test/integration/targets/lineinfile/files/testempty.txt b/test/integration/targets/lineinfile/files/testempty.txt
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/lineinfile/files/testempty.txt
diff --git a/test/integration/targets/lineinfile/files/testmultiple.txt b/test/integration/targets/lineinfile/files/testmultiple.txt
new file mode 100644
index 00000000..fb57082b
--- /dev/null
+++ b/test/integration/targets/lineinfile/files/testmultiple.txt
@@ -0,0 +1,7 @@
+This is line 1
+
+This is line 2
+
+This is line 3
+
+This is line 4
diff --git a/test/integration/targets/lineinfile/files/testnoeof.txt b/test/integration/targets/lineinfile/files/testnoeof.txt
new file mode 100644
index 00000000..152780b9
--- /dev/null
+++ b/test/integration/targets/lineinfile/files/testnoeof.txt
@@ -0,0 +1,2 @@
+This is line 1
+This is line 2 \ No newline at end of file
diff --git a/test/integration/targets/lineinfile/meta/main.yml b/test/integration/targets/lineinfile/meta/main.yml
new file mode 100644
index 00000000..98e60f78
--- /dev/null
+++ b/test/integration/targets/lineinfile/meta/main.yml
@@ -0,0 +1,20 @@
+# test code for the lineinfile module
+# (c) 2014, James Cammarata <jcammarata@ansible.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+dependencies:
+ - prepare_tests
diff --git a/test/integration/targets/lineinfile/tasks/main.yml b/test/integration/targets/lineinfile/tasks/main.yml
new file mode 100644
index 00000000..840051cf
--- /dev/null
+++ b/test/integration/targets/lineinfile/tasks/main.yml
@@ -0,0 +1,1157 @@
+# test code for the lineinfile module
+# (c) 2014, James Cammarata <jcammarata@ansible.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: deploy the test file for lineinfile
+ copy:
+ src: test.txt
+ dest: "{{ output_dir }}/test.txt"
+ register: result
+
+- name: assert that the test file was deployed
+ assert:
+ that:
+ - result is changed
+ - "result.checksum == '5feac65e442c91f557fc90069ce6efc4d346ab51'"
+ - "result.state == 'file'"
+
+- name: insert a line at the beginning of the file, and back it up
+ lineinfile:
+ dest: "{{ output_dir }}/test.txt"
+ state: present
+ line: "New line at the beginning"
+ insertbefore: "BOF"
+ backup: yes
+ register: result1
+
+- name: insert a line at the beginning of the file again
+ lineinfile:
+ dest: "{{ output_dir }}/test.txt"
+ state: present
+ line: "New line at the beginning"
+ insertbefore: "BOF"
+ register: result2
+
+- name: assert that the line was inserted at the head of the file
+ assert:
+ that:
+ - result1 is changed
+ - result2 is not changed
+ - result1.msg == 'line added'
+ - result1.backup != ''
+
+- name: stat the backup file
+ stat:
+ path: "{{ result1.backup }}"
+ register: result
+
+- name: assert the backup file matches the previous hash
+ assert:
+ that:
+ - "result.stat.checksum == '5feac65e442c91f557fc90069ce6efc4d346ab51'"
+
+- name: stat the test after the insert at the head
+ stat:
+ path: "{{ output_dir }}/test.txt"
+ register: result
+
+- name: assert test hash is what we expect for the file with the insert at the head
+ assert:
+ that:
+ - "result.stat.checksum == '7eade4042b23b800958fe807b5bfc29f8541ec09'"
+
+- name: insert a line at the end of the file
+ lineinfile:
+ dest: "{{ output_dir }}/test.txt"
+ state: present
+ line: "New line at the end"
+ insertafter: "EOF"
+ register: result
+
+- name: assert that the line was inserted at the end of the file
+ assert:
+ that:
+ - result is changed
+ - "result.msg == 'line added'"
+
+- name: stat the test after the insert at the end
+ stat:
+ path: "{{ output_dir }}/test.txt"
+ register: result
+
+- name: assert test checksum matches after the insert at the end
+ assert:
+ that:
+ - "result.stat.checksum == 'fb57af7dc10a1006061b000f1f04c38e4bef50a9'"
+
+- name: insert a line after the first line
+ lineinfile:
+ dest: "{{ output_dir }}/test.txt"
+ state: present
+ line: "New line after line 1"
+ insertafter: "^This is line 1$"
+ register: result
+
+- name: assert that the line was inserted after the first line
+ assert:
+ that:
+ - result is changed
+ - "result.msg == 'line added'"
+
+- name: stat the test after insert after the first line
+ stat:
+ path: "{{ output_dir }}/test.txt"
+ register: result
+
+- name: assert test checksum matches after the insert after the first line
+ assert:
+ that:
+ - "result.stat.checksum == '5348da605b1bc93dbadf3a16474cdf22ef975bec'"
+
+- name: insert a line before the last line
+ lineinfile:
+ dest: "{{ output_dir }}/test.txt"
+ state: present
+ line: "New line before line 5"
+ insertbefore: "^This is line 5$"
+ register: result
+
+- name: assert that the line was inserted before the last line
+ assert:
+ that:
+ - result is changed
+ - "result.msg == 'line added'"
+
+- name: stat the test after the insert before the last line
+ stat:
+ path: "{{ output_dir }}/test.txt"
+ register: result
+
+- name: assert test checksum matches after the insert before the last line
+ assert:
+ that:
+ - "result.stat.checksum == '2e9e460ff68929e4453eb765761fd99814f6e286'"
+
+- name: Replace a line with backrefs
+ lineinfile:
+ dest: "{{ output_dir }}/test.txt"
+ state: present
+ line: "This is line 3"
+ backrefs: yes
+ regexp: "^(REF) .* \\1$"
+ register: backrefs_result1
+
+- name: Replace a line with backrefs again
+ lineinfile:
+ dest: "{{ output_dir }}/test.txt"
+ state: present
+ line: "This is line 3"
+ backrefs: yes
+ regexp: "^(REF) .* \\1$"
+ register: backrefs_result2
+- command: cat {{ output_dir }}/test.txt
+
+- name: assert that the line with backrefs was changed
+ assert:
+ that:
+ - backrefs_result1 is changed
+ - backrefs_result2 is not changed
+ - "backrefs_result1.msg == 'line replaced'"
+
+- name: stat the test after the backref line was replaced
+ stat:
+ path: "{{ output_dir }}/test.txt"
+ register: result
+
+- name: assert test checksum matches after backref line was replaced
+ assert:
+ that:
+ - "result.stat.checksum == '72f60239a735ae06e769d823f5c2b4232c634d9c'"
+
+- name: remove the middle line
+ lineinfile:
+ dest: "{{ output_dir }}/test.txt"
+ state: absent
+ regexp: "^This is line 3$"
+ register: result
+
+- name: assert that the line was removed
+ assert:
+ that:
+ - result is changed
+ - "result.msg == '1 line(s) removed'"
+
+- name: stat the test after the middle line was removed
+ stat:
+ path: "{{ output_dir }}/test.txt"
+ register: result
+
+- name: assert test checksum matches after the middle line was removed
+ assert:
+ that:
+ - "result.stat.checksum == 'd4eeb07bdebab2d1cdb3ec4a3635afa2618ad4ea'"
+
+- name: run a validation script that succeeds
+ lineinfile:
+ dest: "{{ output_dir }}/test.txt"
+ state: absent
+ regexp: "^This is line 5$"
+ validate: "true %s"
+ register: result
+
+- name: assert that the file validated after removing a line
+ assert:
+ that:
+ - result is changed
+ - "result.msg == '1 line(s) removed'"
+
+- name: stat the test after the validation succeeded
+ stat:
+ path: "{{ output_dir }}/test.txt"
+ register: result
+
+- name: assert test checksum matches after the validation succeeded
+ assert:
+ that:
+ - "result.stat.checksum == 'ab56c210ea82839a54487464800fed4878cb2608'"
+
+- name: run a validation script that fails
+ lineinfile:
+ dest: "{{ output_dir }}/test.txt"
+ state: absent
+ regexp: "^This is line 1$"
+ validate: "/bin/false %s"
+ register: result
+ ignore_errors: yes
+
+- name: assert that the validate failed
+ assert:
+ that:
+ - "result.failed == true"
+
+- name: stat the test after the validation failed
+ stat:
+ path: "{{ output_dir }}/test.txt"
+ register: result
+
+- name: assert test checksum matches the previous after the validation failed
+ assert:
+ that:
+ - "result.stat.checksum == 'ab56c210ea82839a54487464800fed4878cb2608'"
+
+- name: use create=yes
+ lineinfile:
+ dest: "{{ output_dir }}/new_test.txt"
+ create: yes
+ insertbefore: BOF
+ state: present
+ line: "This is a new file"
+ register: result
+
+- name: assert that the new file was created
+ assert:
+ that:
+ - result is changed
+ - "result.msg == 'line added'"
+
+- name: validate that the newly created file exists
+ stat:
+ path: "{{ output_dir }}/new_test.txt"
+ register: result
+ ignore_errors: yes
+
+- name: assert the newly created test checksum matches
+ assert:
+ that:
+ - "result.stat.checksum == '038f10f9e31202451b093163e81e06fbac0c6f3a'"
+
+- name: Create a file without a path
+ lineinfile:
+ dest: file.txt
+ create: yes
+ line: Test line
+ register: create_no_path_test
+
+- name: Stat the file
+ stat:
+ path: file.txt
+ register: create_no_path_file
+
+- name: Ensure file was created
+ assert:
+ that:
+ - create_no_path_test is changed
+ - create_no_path_file.stat.exists
+
+# Test EOF in cases where file has no newline at EOF
+- name: testnoeof deploy the file for lineinfile
+ copy:
+ src: testnoeof.txt
+ dest: "{{ output_dir }}/testnoeof.txt"
+ register: result
+
+- name: testnoeof insert a line at the end of the file
+ lineinfile:
+ dest: "{{ output_dir }}/testnoeof.txt"
+ state: present
+ line: "New line at the end"
+ insertafter: "EOF"
+ register: result
+
+- name: testempty assert that the line was inserted at the end of the file
+ assert:
+ that:
+ - result is changed
+ - "result.msg == 'line added'"
+
+- name: insert a multiple lines at the end of the file
+ lineinfile:
+ dest: "{{ output_dir }}/test.txt"
+ state: present
+ line: "This is a line\nwith \\n character"
+ insertafter: "EOF"
+ register: result
+
+- name: assert that the multiple lines was inserted
+ assert:
+ that:
+ - result is changed
+ - "result.msg == 'line added'"
+
+- name: testnoeof stat the no newline EOF test after the insert at the end
+ stat:
+ path: "{{ output_dir }}/testnoeof.txt"
+ register: result
+
+- name: testnoeof assert test checksum matches after the insert at the end
+ assert:
+ that:
+ - "result.stat.checksum == 'f9af7008e3cb67575ce653d094c79cabebf6e523'"
+
+# Test EOF with empty file to make sure no unnecessary newline is added
+- name: testempty deploy the testempty file for lineinfile
+ copy:
+ src: testempty.txt
+ dest: "{{ output_dir }}/testempty.txt"
+ register: result
+
+- name: testempty insert a line at the end of the file
+ lineinfile:
+ dest: "{{ output_dir }}/testempty.txt"
+ state: present
+ line: "New line at the end"
+ insertafter: "EOF"
+ register: result
+
+- name: testempty assert that the line was inserted at the end of the file
+ assert:
+ that:
+ - result is changed
+ - "result.msg == 'line added'"
+
+- name: testempty stat the test after the insert at the end
+ stat:
+ path: "{{ output_dir }}/testempty.txt"
+ register: result
+
+- name: testempty assert test checksum matches after the insert at the end
+ assert:
+ that:
+ - "result.stat.checksum == 'f440dc65ea9cec3fd496c1479ddf937e1b949412'"
+
+- stat:
+ path: "{{ output_dir }}/test.txt"
+ register: result
+
+- name: assert test checksum matches after inserting multiple lines
+ assert:
+ that:
+ - "result.stat.checksum == 'fde683229429a4f05d670e6c10afc875e1d5c489'"
+
+- name: replace a line with backrefs included in the line
+ lineinfile:
+ dest: "{{ output_dir }}/test.txt"
+ state: present
+ line: "New \\1 created with the backref"
+ backrefs: yes
+ regexp: "^This is (line 4)$"
+ register: result
+
+- name: assert that the line with backrefs was changed
+ assert:
+ that:
+ - result is changed
+ - "result.msg == 'line replaced'"
+
+- name: stat the test after the backref line was replaced
+ stat:
+ path: "{{ output_dir }}/test.txt"
+ register: result
+
+- name: assert test checksum matches after backref line was replaced
+ assert:
+ that:
+ - "result.stat.checksum == '981ad35c4b30b03bc3a1beedce0d1e72c491898e'"
+
+###################################################################
+# issue 8535
+
+- name: create a new file for testing quoting issues
+ file:
+ dest: "{{ output_dir }}/test_quoting.txt"
+ state: touch
+ register: result
+
+- name: assert the new file was created
+ assert:
+ that:
+ - result is changed
+
+- name: use with_items to add code-like strings to the quoting txt file
+ lineinfile:
+ dest: "{{ output_dir }}/test_quoting.txt"
+ line: "{{ item }}"
+ insertbefore: BOF
+ with_items:
+ - "'foo'"
+ - "dotenv.load();"
+ - "var dotenv = require('dotenv');"
+ register: result
+
+- name: assert the quote test file was modified correctly
+ assert:
+ that:
+ - result.results|length == 3
+ - result.results[0] is changed
+ - result.results[0].item == "'foo'"
+ - result.results[1] is changed
+ - result.results[1].item == "dotenv.load();"
+ - result.results[2] is changed
+ - result.results[2].item == "var dotenv = require('dotenv');"
+
+- name: stat the quote test file
+ stat:
+ path: "{{ output_dir }}/test_quoting.txt"
+ register: result
+
+- name: assert test checksum matches after backref line was replaced
+ assert:
+ that:
+ - "result.stat.checksum == '7dc3cb033c3971e73af0eaed6623d4e71e5743f1'"
+
+- name: insert a line into the quoted file with a single quote
+ lineinfile:
+ dest: "{{ output_dir }}/test_quoting.txt"
+ line: "import g'"
+ register: result
+
+- name: assert that the quoted file was changed
+ assert:
+ that:
+ - result is changed
+
+- name: stat the quote test file
+ stat:
+ path: "{{ output_dir }}/test_quoting.txt"
+ register: result
+
+- name: assert test checksum matches after backref line was replaced
+ assert:
+ that:
+ - "result.stat.checksum == '73b271c2cc1cef5663713bc0f00444b4bf9f4543'"
+
+- name: insert a line into the quoted file with many double quotation strings
+ lineinfile:
+ dest: "{{ output_dir }}/test_quoting.txt"
+ line: "\"quote\" and \"unquote\""
+ register: result
+
+- name: assert that the quoted file was changed
+ assert:
+ that:
+ - result is changed
+
+- name: stat the quote test file
+ stat:
+ path: "{{ output_dir }}/test_quoting.txt"
+ register: result
+
+- name: assert test checksum matches after backref line was replaced
+ assert:
+ that:
+ - "result.stat.checksum == 'b10ab2a3c3b6492680c8d0b1d6f35aa6b8f9e731'"
+
+###################################################################
+# Issue 28721
+
+- name: Deploy the testmultiple file
+ copy:
+ src: testmultiple.txt
+ dest: "{{ output_dir }}/testmultiple.txt"
+ register: result
+
+- name: Assert that the testmultiple file was deployed
+ assert:
+ that:
+ - result is changed
+ - result.checksum == '3e0090a34fb641f3c01e9011546ff586260ea0ea'
+ - result.state == 'file'
+
+# Test insertafter
+- name: Write the same line to a file inserted after different lines
+ lineinfile:
+ path: "{{ output_dir }}/testmultiple.txt"
+ insertafter: "{{ item.regex }}"
+ line: "{{ item.replace }}"
+ register: _multitest_1
+ with_items: "{{ test_regexp }}"
+
+- name: Assert that the line is added once only
+ assert:
+ that:
+ - _multitest_1.results.0 is changed
+ - _multitest_1.results.1 is not changed
+ - _multitest_1.results.2 is not changed
+ - _multitest_1.results.3 is not changed
+
+- name: Do the same thing again to check for changes
+ lineinfile:
+ path: "{{ output_dir }}/testmultiple.txt"
+ insertafter: "{{ item.regex }}"
+ line: "{{ item.replace }}"
+ register: _multitest_2
+ with_items: "{{ test_regexp }}"
+
+- name: Assert that the line is not added anymore
+ assert:
+ that:
+ - _multitest_2.results.0 is not changed
+ - _multitest_2.results.1 is not changed
+ - _multitest_2.results.2 is not changed
+ - _multitest_2.results.3 is not changed
+
+- name: Stat the insertafter file
+ stat:
+ path: "{{ output_dir }}/testmultiple.txt"
+ register: result
+
+- name: Assert that the insertafter file matches expected checksum
+ assert:
+ that:
+ - result.stat.checksum == 'c6733b6c53ddd0e11e6ba39daa556ef8f4840761'
+
+# Test insertbefore
+
+- name: Deploy the testmultiple file
+ copy:
+ src: testmultiple.txt
+ dest: "{{ output_dir }}/testmultiple.txt"
+ register: result
+
+- name: Assert that the testmultiple file was deployed
+ assert:
+ that:
+ - result is changed
+ - result.checksum == '3e0090a34fb641f3c01e9011546ff586260ea0ea'
+ - result.state == 'file'
+
+- name: Write the same line to a file inserted before different lines
+ lineinfile:
+ path: "{{ output_dir }}/testmultiple.txt"
+ insertbefore: "{{ item.regex }}"
+ line: "{{ item.replace }}"
+ register: _multitest_3
+ with_items: "{{ test_regexp }}"
+
+- name: Assert that the line is added once only
+ assert:
+ that:
+ - _multitest_3.results.0 is changed
+ - _multitest_3.results.1 is not changed
+ - _multitest_3.results.2 is not changed
+ - _multitest_3.results.3 is not changed
+
+- name: Do the same thing again to check for changes
+ lineinfile:
+ path: "{{ output_dir }}/testmultiple.txt"
+ insertbefore: "{{ item.regex }}"
+ line: "{{ item.replace }}"
+ register: _multitest_4
+ with_items: "{{ test_regexp }}"
+
+- name: Assert that the line is not added anymore
+ assert:
+ that:
+ - _multitest_4.results.0 is not changed
+ - _multitest_4.results.1 is not changed
+ - _multitest_4.results.2 is not changed
+ - _multitest_4.results.3 is not changed
+
+- name: Stat the insertbefore file
+ stat:
+ path: "{{ output_dir }}/testmultiple.txt"
+ register: result
+
+- name: Assert that the insertbefore file matches expected checksum
+ assert:
+ that:
+ - result.stat.checksum == '5d298651fbc377b45257da10308a9dc2fe1f8be5'
+
+###################################################################
+# Issue 36156
+# Test insertbefore and insertafter with regexp
+
+- name: Deploy the test.conf file
+ copy:
+ src: test.conf
+ dest: "{{ output_dir }}/test.conf"
+ register: result
+
+- name: Assert that the test.conf file was deployed
+ assert:
+ that:
+ - result is changed
+ - result.checksum == '6037f13e419b132eb3fd20a89e60c6c87a6add38'
+ - result.state == 'file'
+
+# Test instertafter
+- name: Insert lines after with regexp
+ lineinfile:
+ path: "{{ output_dir }}/test.conf"
+ regexp: "{{ item.regexp }}"
+ line: "{{ item.line }}"
+ insertafter: "{{ item.after }}"
+ with_items: "{{ test_befaf_regexp }}"
+ register: _multitest_5
+
+- name: Do the same thing again and check for changes
+ lineinfile:
+ path: "{{ output_dir }}/test.conf"
+ regexp: "{{ item.regexp }}"
+ line: "{{ item.line }}"
+ insertafter: "{{ item.after }}"
+ with_items: "{{ test_befaf_regexp }}"
+ register: _multitest_6
+
+- name: Assert that the file was changed the first time but not the second time
+ assert:
+ that:
+ - item.0 is changed
+ - item.1 is not changed
+ with_together:
+ - "{{ _multitest_5.results }}"
+ - "{{ _multitest_6.results }}"
+
+- name: Stat the file
+ stat:
+ path: "{{ output_dir }}/test.conf"
+ register: result
+
+- name: Assert that the file contents match what is expected
+ assert:
+ that:
+ - result.stat.checksum == '06e2c456e5028dd7bcd0b117b5927a1139458c82'
+
+- name: Do the same thing a third time without regexp and check for changes
+ lineinfile:
+ path: "{{ output_dir }}/test.conf"
+ line: "{{ item.line }}"
+ insertafter: "{{ item.after }}"
+ with_items: "{{ test_befaf_regexp }}"
+ register: _multitest_7
+
+- name: Stat the file
+ stat:
+ path: "{{ output_dir }}/test.conf"
+ register: result
+
+- name: Assert that the file was changed when no regexp was provided
+ assert:
+ that:
+ - item is not changed
+ with_items: "{{ _multitest_7.results }}"
+
+- name: Stat the file
+ stat:
+ path: "{{ output_dir }}/test.conf"
+ register: result
+
+- name: Assert that the file contents match what is expected
+ assert:
+ that:
+ - result.stat.checksum == '06e2c456e5028dd7bcd0b117b5927a1139458c82'
+
+# Test insertbefore
+- name: Deploy the test.conf file
+ copy:
+ src: test.conf
+ dest: "{{ output_dir }}/test.conf"
+ register: result
+
+- name: Assert that the test.conf file was deployed
+ assert:
+ that:
+ - result is changed
+ - result.checksum == '6037f13e419b132eb3fd20a89e60c6c87a6add38'
+ - result.state == 'file'
+
+- name: Insert lines before with regexp
+ lineinfile:
+ path: "{{ output_dir }}/test.conf"
+ regexp: "{{ item.regexp }}"
+ line: "{{ item.line }}"
+ insertbefore: "{{ item.before }}"
+ with_items: "{{ test_befaf_regexp }}"
+ register: _multitest_8
+
+- name: Do the same thing again and check for changes
+ lineinfile:
+ path: "{{ output_dir }}/test.conf"
+ regexp: "{{ item.regexp }}"
+ line: "{{ item.line }}"
+ insertbefore: "{{ item.before }}"
+ with_items: "{{ test_befaf_regexp }}"
+ register: _multitest_9
+
+- name: Assert that the file was changed the first time but not the second time
+ assert:
+ that:
+ - item.0 is changed
+ - item.1 is not changed
+ with_together:
+ - "{{ _multitest_8.results }}"
+ - "{{ _multitest_9.results }}"
+
+- name: Stat the file
+ stat:
+ path: "{{ output_dir }}/test.conf"
+ register: result
+
+- name: Assert that the file contents match what is expected
+ assert:
+ that:
+ - result.stat.checksum == 'c3be9438a07c44d4c256cebfcdbca15a15b1db91'
+
+- name: Do the same thing a third time without regexp and check for changes
+ lineinfile:
+ path: "{{ output_dir }}/test.conf"
+ line: "{{ item.line }}"
+ insertbefore: "{{ item.before }}"
+ with_items: "{{ test_befaf_regexp }}"
+ register: _multitest_10
+
+- name: Stat the file
+ stat:
+ path: "{{ output_dir }}/test.conf"
+ register: result
+
+- name: Assert that the file was changed when no regexp was provided
+ assert:
+ that:
+ - item is not changed
+ with_items: "{{ _multitest_10.results }}"
+
+- name: Stat the file
+ stat:
+ path: "{{ output_dir }}/test.conf"
+ register: result
+
+- name: Assert that the file contents match what is expected
+ assert:
+ that:
+ - result.stat.checksum == 'c3be9438a07c44d4c256cebfcdbca15a15b1db91'
+
+- name: Copy empty file to test with insertbefore
+ copy:
+ src: testempty.txt
+ dest: "{{ output_dir }}/testempty.txt"
+
+- name: Add a line to empty file with insertbefore
+ lineinfile:
+ path: "{{ output_dir }}/testempty.txt"
+ line: top
+ insertbefore: '^not in the file$'
+ register: oneline_insbefore_test1
+
+- name: Add a line to file with only one line using insertbefore
+ lineinfile:
+ path: "{{ output_dir }}/testempty.txt"
+ line: top
+ insertbefore: '^not in the file$'
+ register: oneline_insbefore_test2
+
+- name: Stat the file
+ stat:
+ path: "{{ output_dir }}/testempty.txt"
+ register: oneline_insbefore_file
+
+- name: Assert that insertebefore worked properly with a one line file
+ assert:
+ that:
+ - oneline_insbefore_test1 is changed
+ - oneline_insbefore_test2 is not changed
+ - oneline_insbefore_file.stat.checksum == '4dca56d05a21f0d018cd311f43e134e4501cf6d9'
+
+###################################################################
+# Issue 29443
+# When using an empty regexp, replace the last line (since it matches every line)
+# but also provide a warning.
+
+- name: Deploy the test file for lineinfile
+ copy:
+ src: test.txt
+ dest: "{{ output_dir }}/test.txt"
+ register: result
+
+- name: Assert that the test file was deployed
+ assert:
+ that:
+ - result is changed
+ - result.checksum == '5feac65e442c91f557fc90069ce6efc4d346ab51'
+ - result.state == 'file'
+
+- name: Insert a line in the file using an empty string as a regular expression
+ lineinfile:
+ path: "{{ output_dir }}/test.txt"
+ regexp: ''
+ line: This is line 6
+ register: insert_empty_regexp
+
+- name: Stat the file
+ stat:
+ path: "{{ output_dir }}/test.txt"
+ register: result
+
+- name: Assert that the file contents match what is expected and a warning was displayed
+ assert:
+ that:
+ - insert_empty_regexp is changed
+ - warning_message in insert_empty_regexp.warnings
+ - result.stat.checksum == '23555a98ceaa88756b4c7c7bba49d9f86eed868f'
+ vars:
+ warning_message: >-
+ The regular expression is an empty string, which will match every line in the file.
+ This may have unintended consequences, such as replacing the last line in the file rather than appending.
+ If this is desired, use '^' to match every line in the file and avoid this warning.
+
+###################################################################
+## Issue #58923
+## Using firstmatch with insertafter and ensure multiple lines are not inserted
+
+- name: Deploy the firstmatch test file
+ copy:
+ src: firstmatch.txt
+ dest: "{{ output_dir }}/firstmatch.txt"
+ register: result
+
+- name: Assert that the test file was deployed
+ assert:
+ that:
+ - result is changed
+ - result.checksum == '1d644e5e2e51c67f1bd12d7bbe2686017f39923d'
+ - result.state == 'file'
+
+- name: Insert a line before an existing line using firstmatch
+ lineinfile:
+ path: "{{ output_dir }}/firstmatch.txt"
+ line: INSERT
+ insertafter: line1
+ firstmatch: yes
+ register: insertafter1
+
+- name: Insert a line before an existing line using firstmatch again
+ lineinfile:
+ path: "{{ output_dir }}/firstmatch.txt"
+ line: INSERT
+ insertafter: line1
+ firstmatch: yes
+ register: insertafter2
+
+- name: Stat the file
+ stat:
+ path: "{{ output_dir }}/firstmatch.txt"
+ register: result
+
+- name: Assert that the file was modified appropriately
+ assert:
+ that:
+ - insertafter1 is changed
+ - insertafter2 is not changed
+ - result.stat.checksum == '114aae024073a3ee8ec8db0ada03c5483326dd86'
+
+########################################################################################
+# Tests of fixing the same issue as above (#58923) by @Andersson007 <aaklychkov@mail.ru>
+# and @samdoran <sdoran@redhat.com>:
+
+# Test insertafter with regexp
+- name: Deploy the test file
+ copy:
+ src: test_58923.txt
+ dest: "{{ output_dir }}/test_58923.txt"
+ register: initial_file
+
+- name: Assert that the test file was deployed
+ assert:
+ that:
+ - initial_file is changed
+ - initial_file.checksum == 'b6379ba43261c451a62102acb2c7f438a177c66e'
+ - initial_file.state == 'file'
+
+# Regarding the documentation:
+# If regular expressions are passed to both regexp and
+# insertafter, insertafter is only honored if no match for regexp is found.
+# Therefore,
+# when regular expressions are passed to both regexp and insertafter, then:
+# 1. regexp was found -> ignore insertafter, replace the founded line
+# 2. regexp was not found -> insert the line after 'insertafter' line
+
+# Regexp is not present in the file, so the line must be inserted after ^#!/bin/sh
+- name: Add the line using firstmatch, regexp, and insertafter
+ lineinfile:
+ path: "{{ output_dir }}/test_58923.txt"
+ insertafter: '^#!/bin/sh'
+ regexp: ^export FISHEYE_OPTS
+ firstmatch: true
+ line: export FISHEYE_OPTS="-Xmx4096m -Xms2048m"
+ register: insertafter_test1
+
+- name: Stat the file
+ stat:
+ path: "{{ output_dir }}/test_58923.txt"
+ register: insertafter_test1_file
+
+- name: Add the line using firstmatch, regexp, and insertafter again
+ lineinfile:
+ path: "{{ output_dir }}/test_58923.txt"
+ insertafter: '^#!/bin/sh'
+ regexp: ^export FISHEYE_OPTS
+ firstmatch: true
+ line: export FISHEYE_OPTS="-Xmx4096m -Xms2048m"
+ register: insertafter_test2
+
+# Check of the prev step.
+# We tried to add the same line with the same playbook,
+# so nothing has been added:
+- name: Stat the file again
+ stat:
+ path: "{{ output_dir }}/test_58923.txt"
+ register: insertafter_test2_file
+
+- name: Assert insertafter tests gave the expected results
+ assert:
+ that:
+ - insertafter_test1 is changed
+ - insertafter_test1_file.stat.checksum == '9232aed6fe88714964d9e29d13e42cd782070b08'
+ - insertafter_test2 is not changed
+ - insertafter_test2_file.stat.checksum == '9232aed6fe88714964d9e29d13e42cd782070b08'
+
+# Test insertafter without regexp
+- name: Deploy the test file
+ copy:
+ src: test_58923.txt
+ dest: "{{ output_dir }}/test_58923.txt"
+ register: initial_file
+
+- name: Assert that the test file was deployed
+ assert:
+ that:
+ - initial_file is changed
+ - initial_file.checksum == 'b6379ba43261c451a62102acb2c7f438a177c66e'
+ - initial_file.state == 'file'
+
+- name: Insert the line using firstmatch and insertafter without regexp
+ lineinfile:
+ path: "{{ output_dir }}/test_58923.txt"
+ insertafter: '^#!/bin/sh'
+ firstmatch: true
+ line: export FISHEYE_OPTS="-Xmx4096m -Xms2048m"
+ register: insertafter_test3
+
+- name: Stat the file
+ stat:
+ path: "{{ output_dir }}/test_58923.txt"
+ register: insertafter_test3_file
+
+- name: Insert the line using firstmatch and insertafter without regexp again
+ lineinfile:
+ path: "{{ output_dir }}/test_58923.txt"
+ insertafter: '^#!/bin/sh'
+ firstmatch: true
+ line: export FISHEYE_OPTS="-Xmx4096m -Xms2048m"
+ register: insertafter_test4
+
+- name: Stat the file again
+ stat:
+ path: "{{ output_dir }}/test_58923.txt"
+ register: insertafter_test4_file
+
+- name: Assert insertafter without regexp tests gave the expected results
+ assert:
+ that:
+ - insertafter_test3 is changed
+ - insertafter_test3_file.stat.checksum == '9232aed6fe88714964d9e29d13e42cd782070b08'
+ - insertafter_test4 is not changed
+ - insertafter_test4_file.stat.checksum == '9232aed6fe88714964d9e29d13e42cd782070b08'
+
+
+# Test insertbefore with regexp
+- name: Deploy the test file
+ copy:
+ src: test_58923.txt
+ dest: "{{ output_dir }}/test_58923.txt"
+ register: initial_file
+
+- name: Assert that the test file was deployed
+ assert:
+ that:
+ - initial_file is changed
+ - initial_file.checksum == 'b6379ba43261c451a62102acb2c7f438a177c66e'
+ - initial_file.state == 'file'
+
+- name: Add the line using regexp, firstmatch, and insertbefore
+ lineinfile:
+ path: "{{ output_dir }}/test_58923.txt"
+ insertbefore: '^#!/bin/sh'
+ regexp: ^export FISHEYE_OPTS
+ firstmatch: true
+ line: export FISHEYE_OPTS="-Xmx4096m -Xms2048m"
+ register: insertbefore_test1
+
+- name: Stat the file
+ stat:
+ path: "{{ output_dir }}/test_58923.txt"
+ register: insertbefore_test1_file
+
+- name: Add the line using regexp, firstmatch, and insertbefore again
+ lineinfile:
+ path: "{{ output_dir }}/test_58923.txt"
+ insertbefore: '^#!/bin/sh'
+ regexp: ^export FISHEYE_OPTS
+ firstmatch: true
+ line: export FISHEYE_OPTS="-Xmx4096m -Xms2048m"
+ register: insertbefore_test2
+
+- name: Stat the file again
+ stat:
+ path: "{{ output_dir }}/test_58923.txt"
+ register: insertbefore_test2_file
+
+- name: Assert insertbefore with regexp tests gave the expected results
+ assert:
+ that:
+ - insertbefore_test1 is changed
+ - insertbefore_test1_file.stat.checksum == '3c6630b9d44f561ea9ad999be56a7504cadc12f7'
+ - insertbefore_test2 is not changed
+ - insertbefore_test2_file.stat.checksum == '3c6630b9d44f561ea9ad999be56a7504cadc12f7'
+
+
+# Test insertbefore without regexp
+- name: Deploy the test file
+ copy:
+ src: test_58923.txt
+ dest: "{{ output_dir }}/test_58923.txt"
+ register: initial_file
+
+- name: Assert that the test file was deployed
+ assert:
+ that:
+ - initial_file is changed
+ - initial_file.checksum == 'b6379ba43261c451a62102acb2c7f438a177c66e'
+ - initial_file.state == 'file'
+
+- name: Add the line using insertbefore and firstmatch
+ lineinfile:
+ path: "{{ output_dir }}/test_58923.txt"
+ insertbefore: '^#!/bin/sh'
+ firstmatch: true
+ line: export FISHEYE_OPTS="-Xmx4096m -Xms2048m"
+ register: insertbefore_test3
+
+- name: Stat the file
+ stat:
+ path: "{{ output_dir }}/test_58923.txt"
+ register: insertbefore_test3_file
+
+- name: Add the line using insertbefore and firstmatch again
+ lineinfile:
+ path: "{{ output_dir }}/test_58923.txt"
+ insertbefore: '^#!/bin/sh'
+ firstmatch: true
+ line: export FISHEYE_OPTS="-Xmx4096m -Xms2048m"
+ register: insertbefore_test4
+
+- name: Stat the file again
+ stat:
+ path: "{{ output_dir }}/test_58923.txt"
+ register: insertbefore_test4_file
+
+# Test when the line is presented in the file but
+# not in the before/after spot and it does match the regexp:
+- name: >
+ Add the line using insertbefore and firstmatch when the regexp line
+ is presented but not close to insertbefore spot
+ lineinfile:
+ path: "{{ output_dir }}/test_58923.txt"
+ insertbefore: ' Darwin\*\) if \[ -z \"\$JAVA_HOME\" \] ; then'
+ firstmatch: true
+ line: export FISHEYE_OPTS="-Xmx4096m -Xms2048m"
+ register: insertbefore_test5
+
+- name: Stat the file again
+ stat:
+ path: "{{ output_dir }}/test_58923.txt"
+ register: insertbefore_test5_file
+
+- name: Assert insertbefore with regexp tests gave the expected results
+ assert:
+ that:
+ - insertbefore_test3 is changed
+ - insertbefore_test3_file.stat.checksum == '3c6630b9d44f561ea9ad999be56a7504cadc12f7'
+ - insertbefore_test4 is not changed
+ - insertbefore_test4_file.stat.checksum == '3c6630b9d44f561ea9ad999be56a7504cadc12f7'
+ - insertbefore_test5 is not changed
+ - insertbefore_test5_file.stat.checksum == '3c6630b9d44f561ea9ad999be56a7504cadc12f7'
+
+
+# Test inserting a line at the end of the file using regexp with insertafter
+# https://github.com/ansible/ansible/issues/63684
+- name: Create a file by inserting a line
+ lineinfile:
+ path: "{{ output_dir }}/testend.txt"
+ create: yes
+ line: testline
+ register: testend1
+
+- name: Insert a line at the end of the file
+ lineinfile:
+ path: "{{ output_dir }}/testend.txt"
+ insertafter: testline
+ regexp: line at the end
+ line: line at the end
+ register: testend2
+
+- name: Stat the file
+ stat:
+ path: "{{ output_dir }}/testend.txt"
+ register: testend_file
+
+- name: Assert inserting at the end gave the expected results.
+ assert:
+ that:
+ - testend1 is changed
+ - testend2 is changed
+ - testend_file.stat.checksum == 'ef36116966836ce04f6b249fd1837706acae4e19'
diff --git a/test/integration/targets/lineinfile/vars/main.yml b/test/integration/targets/lineinfile/vars/main.yml
new file mode 100644
index 00000000..6e99d4f1
--- /dev/null
+++ b/test/integration/targets/lineinfile/vars/main.yml
@@ -0,0 +1,29 @@
+test_regexp:
+ - regex: '1'
+ replace: 'bar'
+
+ - regex: '2'
+ replace: 'bar'
+
+ - regex: '3'
+ replace: 'bar'
+
+ - regex: '4'
+ replace: 'bar'
+
+
+test_befaf_regexp:
+ - before: section_three
+ after: section_one
+ regexp: option_one=
+ line: option_one=1
+
+ - before: section_three
+ after: section_one
+ regexp: option_two=
+ line: option_two=2
+
+ - before: section_three
+ after: section_one
+ regexp: option_three=
+ line: option_three=3
diff --git a/test/integration/targets/lookup_config/aliases b/test/integration/targets/lookup_config/aliases
new file mode 100644
index 00000000..bc987654
--- /dev/null
+++ b/test/integration/targets/lookup_config/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group2
+skip/aix
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/test/integration/targets/lookup_config/tasks/main.yml b/test/integration/targets/lookup_config/tasks/main.yml
new file mode 100644
index 00000000..be185197
--- /dev/null
+++ b/test/integration/targets/lookup_config/tasks/main.yml
@@ -0,0 +1,59 @@
+- name: Verify lookup_config errors with no on_missing (failure expected)
+ set_fact:
+ foo: '{{lookup("config", "THIS_DOES_NOT_EXIST")}}'
+ ignore_errors: yes
+ register: lookup_config_1
+
+- name: Verify lookup_config errors with on_missing=error (failure expected)
+ set_fact:
+ foo: '{{lookup("config", "THIS_DOES_NOT_EXIST", on_missing="error")}}'
+ ignore_errors: yes
+ register: lookup_config_2
+
+- name: Verify lookup_config does not error with on_missing=skip
+ set_fact:
+ lookup3: '{{lookup("config", "THIS_DOES_NOT_EXIST", on_missing="skip")}}'
+ register: lookup_config_3
+
+# TODO: Is there a decent way to check that the warning is actually triggered?
+- name: Verify lookup_config does not error with on_missing=warn (warning expected)
+ set_fact:
+ lookup4: '{{lookup("config", "THIS_DOES_NOT_EXIST", on_missing="warn")}}'
+ register: lookup_config_4
+
+- name: Verify lookup_config errors with invalid on_missing (failure expected)
+ set_fact:
+ foo: '{{lookup("config", "THIS_DOES_NOT_EXIST", on_missing="boo")}}'
+ ignore_errors: yes
+ register: lookup_config_5
+
+- name: Verify lookup_config errors with invalid param type (failure expected)
+ set_fact:
+ foo: '{{lookup("config", 1337)}}'
+ ignore_errors: yes
+ register: lookup_config_6
+
+- name: Verify lookup_config errors with callable arg (failure expected)
+ set_fact:
+ foo: '{{lookup("config", "ConfigManager")}}'
+ ignore_errors: yes
+ register: lookup_config_7
+
+- name: Verify lookup_config
+ assert:
+ that:
+ - '"meow" in lookup("config", "ANSIBLE_COW_WHITELIST")'
+ - lookup_config_1 is failed
+ - '"Unable to find setting" in lookup_config_1.msg'
+ - lookup_config_2 is failed
+ - '"Unable to find setting" in lookup_config_2.msg'
+ - lookup_config_3 is success
+ - 'lookup3|length == 0'
+ - lookup_config_4 is success
+ - 'lookup4|length == 0'
+ - lookup_config_5 is failed
+ - '"must be a string and one of" in lookup_config_5.msg'
+ - lookup_config_6 is failed
+ - '"Invalid setting identifier" in lookup_config_6.msg'
+ - lookup_config_7 is failed
+ - '"Invalid setting" in lookup_config_7.msg'
diff --git a/test/integration/targets/lookup_dict/aliases b/test/integration/targets/lookup_dict/aliases
new file mode 100644
index 00000000..07b87020
--- /dev/null
+++ b/test/integration/targets/lookup_dict/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group1
+skip/aix
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/test/integration/targets/lookup_dict/tasks/main.yml b/test/integration/targets/lookup_dict/tasks/main.yml
new file mode 100644
index 00000000..6f778548
--- /dev/null
+++ b/test/integration/targets/lookup_dict/tasks/main.yml
@@ -0,0 +1,54 @@
+- name: Define users dict
+ set_fact:
+ users:
+ alice:
+ name: Alice
+ age: 21
+ bob:
+ name: Bob
+ age: 22
+
+- name: Convert users dict to list
+ set_fact:
+ user_list: "{{ lookup('dict', users) | sort(attribute='key') }}"
+
+- name: Verify results
+ assert:
+ that:
+ - user_list | length == 2
+ - user_list[0].key == 'alice'
+ - user_list[0].value | length == 2
+ - user_list[0].value.name == 'Alice'
+ - user_list[0].value.age == 21
+ - user_list[1].key == 'bob'
+ - user_list[1].value | length == 2
+ - user_list[1].value.name == 'Bob'
+ - user_list[1].value.age == 22
+
+- name: Convert a non-dict (failure expected)
+ set_fact:
+ bad_fact: "{{ lookup('dict', 1) }}"
+ register: result
+ ignore_errors: yes
+
+- name: Verify conversion failed
+ assert:
+ that:
+ - result is failed
+
+- name: Define simple dict
+ set_fact:
+ simple:
+ hello: World
+
+- name: Convert using with_dict to cause terms to not be a list
+ set_fact:
+ hello: "{{ item }}"
+ with_dict: "{{ simple }}"
+
+- name: Verify conversion
+ assert:
+ that:
+ - hello | length == 2
+ - hello.key == 'hello'
+ - hello.value == 'World'
diff --git a/test/integration/targets/lookup_env/aliases b/test/integration/targets/lookup_env/aliases
new file mode 100644
index 00000000..07b87020
--- /dev/null
+++ b/test/integration/targets/lookup_env/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group1
+skip/aix
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/test/integration/targets/lookup_env/tasks/main.yml b/test/integration/targets/lookup_env/tasks/main.yml
new file mode 100644
index 00000000..daaeb35f
--- /dev/null
+++ b/test/integration/targets/lookup_env/tasks/main.yml
@@ -0,0 +1,15 @@
+- name: get HOME environment var value
+ shell: "echo $HOME"
+ register: home_var_value
+
+- name: use env lookup to get HOME var
+ set_fact:
+ test_val: "{{ lookup('env', 'HOME') }}"
+
+- debug: var=home_var_value.stdout
+- debug: var=test_val
+
+- name: compare values
+ assert:
+ that:
+ - "test_val == home_var_value.stdout"
diff --git a/test/integration/targets/lookup_file/aliases b/test/integration/targets/lookup_file/aliases
new file mode 100644
index 00000000..07b87020
--- /dev/null
+++ b/test/integration/targets/lookup_file/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group1
+skip/aix
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/test/integration/targets/lookup_file/tasks/main.yml b/test/integration/targets/lookup_file/tasks/main.yml
new file mode 100644
index 00000000..a6d636db
--- /dev/null
+++ b/test/integration/targets/lookup_file/tasks/main.yml
@@ -0,0 +1,13 @@
+- name: make a new file to read
+ copy: dest={{output_dir}}/foo.txt mode=0644 content="bar"
+
+- name: load the file as a fact
+ set_fact:
+ foo: "{{ lookup('file', output_dir + '/foo.txt' ) }}"
+
+- debug: var=foo
+
+- name: verify file lookup
+ assert:
+ that:
+ - "foo == 'bar'"
diff --git a/test/integration/targets/lookup_fileglob/aliases b/test/integration/targets/lookup_fileglob/aliases
new file mode 100644
index 00000000..a6dafcf8
--- /dev/null
+++ b/test/integration/targets/lookup_fileglob/aliases
@@ -0,0 +1 @@
+shippable/posix/group1
diff --git a/test/integration/targets/lookup_fileglob/find_levels/files/play_adj_subdir.txt b/test/integration/targets/lookup_fileglob/find_levels/files/play_adj_subdir.txt
new file mode 100644
index 00000000..50255880
--- /dev/null
+++ b/test/integration/targets/lookup_fileglob/find_levels/files/play_adj_subdir.txt
@@ -0,0 +1 @@
+in files subdir adjacent to play
diff --git a/test/integration/targets/lookup_fileglob/find_levels/files/somepath/play_adj_subsubdir.txt b/test/integration/targets/lookup_fileglob/find_levels/files/somepath/play_adj_subsubdir.txt
new file mode 100644
index 00000000..96c7a549
--- /dev/null
+++ b/test/integration/targets/lookup_fileglob/find_levels/files/somepath/play_adj_subsubdir.txt
@@ -0,0 +1 @@
+in play adjacent subdir of files/
diff --git a/test/integration/targets/lookup_fileglob/find_levels/play.yml b/test/integration/targets/lookup_fileglob/find_levels/play.yml
new file mode 100644
index 00000000..4bdee05d
--- /dev/null
+++ b/test/integration/targets/lookup_fileglob/find_levels/play.yml
@@ -0,0 +1,13 @@
+- hosts: localhost
+ gather_facts: false
+ vars:
+ expected:
+ play_adj: ajectent to play
+ play_adj_subdir: in files subdir adjacent to play
+ somepath/play_adj_subsubdir: in play adjacent subdir of files/
+ in_role: file in role
+ otherpath/in_role_subdir: file in role subdir
+ tasks:
+ - name: Import role lookup
+ import_role:
+ name: get_file
diff --git a/test/integration/targets/lookup_fileglob/find_levels/play_adj.txt b/test/integration/targets/lookup_fileglob/find_levels/play_adj.txt
new file mode 100644
index 00000000..14f0cf50
--- /dev/null
+++ b/test/integration/targets/lookup_fileglob/find_levels/play_adj.txt
@@ -0,0 +1 @@
+ajectent to play
diff --git a/test/integration/targets/lookup_fileglob/find_levels/roles/get_file/files/in_role.txt b/test/integration/targets/lookup_fileglob/find_levels/roles/get_file/files/in_role.txt
new file mode 100644
index 00000000..fdfc9476
--- /dev/null
+++ b/test/integration/targets/lookup_fileglob/find_levels/roles/get_file/files/in_role.txt
@@ -0,0 +1 @@
+file in role
diff --git a/test/integration/targets/lookup_fileglob/find_levels/roles/get_file/files/otherpath/in_role_subdir.txt b/test/integration/targets/lookup_fileglob/find_levels/roles/get_file/files/otherpath/in_role_subdir.txt
new file mode 100644
index 00000000..40e75a40
--- /dev/null
+++ b/test/integration/targets/lookup_fileglob/find_levels/roles/get_file/files/otherpath/in_role_subdir.txt
@@ -0,0 +1 @@
+file in role subdir
diff --git a/test/integration/targets/lookup_fileglob/find_levels/roles/get_file/tasks/main.yml b/test/integration/targets/lookup_fileglob/find_levels/roles/get_file/tasks/main.yml
new file mode 100644
index 00000000..2fc21df7
--- /dev/null
+++ b/test/integration/targets/lookup_fileglob/find_levels/roles/get_file/tasks/main.yml
@@ -0,0 +1,10 @@
+- name: show file contents
+ debug:
+ msg: '{{ q("fileglob", seed + ".*") }}'
+ register: found
+
+- name: did we get right one?
+ assert:
+ that:
+ - found['msg'][0].endswith(seed + '.txt')
+ - q('file', found['msg'][0])[0] == expected[seed]
diff --git a/test/integration/targets/lookup_fileglob/non_existent/play.yml b/test/integration/targets/lookup_fileglob/non_existent/play.yml
new file mode 100644
index 00000000..e92dff5a
--- /dev/null
+++ b/test/integration/targets/lookup_fileglob/non_existent/play.yml
@@ -0,0 +1,6 @@
+- hosts: localhost
+ gather_facts: false
+ tasks:
+ - name: fileglob should be empty
+ assert:
+ that: q("fileglob", seed) | length == 0
diff --git a/test/integration/targets/lookup_fileglob/runme.sh b/test/integration/targets/lookup_fileglob/runme.sh
new file mode 100755
index 00000000..1e0297c7
--- /dev/null
+++ b/test/integration/targets/lookup_fileglob/runme.sh
@@ -0,0 +1,15 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# fun multilevel finds
+for seed in play_adj play_adj_subdir somepath/play_adj_subsubdir in_role otherpath/in_role_subdir
+do
+ ansible-playbook find_levels/play.yml -e "seed='${seed}'" "$@"
+done
+
+# non-existent paths
+for seed in foo foo/bar foo/bar/baz
+do
+ ansible-playbook non_existent/play.yml -e "seed='${seed}'" "$@"
+done
diff --git a/test/integration/targets/lookup_first_found/aliases b/test/integration/targets/lookup_first_found/aliases
new file mode 100644
index 00000000..bc987654
--- /dev/null
+++ b/test/integration/targets/lookup_first_found/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group2
+skip/aix
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/test/integration/targets/lookup_first_found/files/bar1 b/test/integration/targets/lookup_first_found/files/bar1
new file mode 100644
index 00000000..5716ca59
--- /dev/null
+++ b/test/integration/targets/lookup_first_found/files/bar1
@@ -0,0 +1 @@
+bar
diff --git a/test/integration/targets/lookup_first_found/files/foo1 b/test/integration/targets/lookup_first_found/files/foo1
new file mode 100644
index 00000000..257cc564
--- /dev/null
+++ b/test/integration/targets/lookup_first_found/files/foo1
@@ -0,0 +1 @@
+foo
diff --git a/test/integration/targets/lookup_first_found/tasks/main.yml b/test/integration/targets/lookup_first_found/tasks/main.yml
new file mode 100644
index 00000000..87f2a404
--- /dev/null
+++ b/test/integration/targets/lookup_first_found/tasks/main.yml
@@ -0,0 +1,73 @@
+- name: test with_first_found
+ #shell: echo {{ item }}
+ set_fact: "first_found={{ item }}"
+ with_first_found:
+ - "{{ role_path + '/files/does_not_exist' }}"
+ - "{{ role_path + '/files/foo1' }}"
+ - "{{ role_path + '/files/bar1' }}"
+
+- name: set expected
+ set_fact: first_expected="{{ role_path + '/files/foo1' }}"
+
+- name: set unexpected
+ set_fact: first_unexpected="{{ role_path + '/files/bar1' }}"
+
+- name: verify with_first_found results
+ assert:
+ that:
+ - "first_found == first_expected"
+ - "first_found != first_unexpected"
+
+- name: test q(first_found) with no files produces empty list
+ set_fact:
+ first_found_var: "{{ q('first_found', params, errors='ignore') }}"
+ vars:
+ params:
+ files: "not_a_file.yaml"
+
+- name: verify q(first_found) result
+ assert:
+ that:
+ - "first_found_var == []"
+
+- name: test lookup(first_found) with no files produces empty string
+ set_fact:
+ first_found_var: "{{ lookup('first_found', params, errors='ignore') }}"
+ vars:
+ params:
+ files: "not_a_file.yaml"
+
+- name: verify lookup(first_found) result
+ assert:
+ that:
+ - "first_found_var == ''"
+
+# NOTE: skip: True deprecated e17a2b502d6601be53c60d7ba1c627df419460c9, remove 2.12
+- name: test first_found with no matches and skip=True does nothing
+ set_fact: "this_not_set={{ item }}"
+ vars:
+ params:
+ files:
+ - not/a/file.yaml
+ - another/non/file.yaml
+ skip: True
+ loop: "{{ q('first_found', params) }}"
+
+- name: verify skip
+ assert:
+ that:
+ - "this_not_set is not defined"
+
+- name: test first_found with no matches and errors='ignore' skips in a loop
+ set_fact: "this_not_set={{ item }}"
+ vars:
+ params:
+ files:
+ - not/a/file.yaml
+ - another/non/file.yaml
+ loop: "{{ query('first_found', params, errors='ignore') }}"
+
+- name: verify errors=ignore
+ assert:
+ that:
+ - "this_not_set is not defined"
diff --git a/test/integration/targets/lookup_indexed_items/aliases b/test/integration/targets/lookup_indexed_items/aliases
new file mode 100644
index 00000000..bc987654
--- /dev/null
+++ b/test/integration/targets/lookup_indexed_items/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group2
+skip/aix
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/test/integration/targets/lookup_indexed_items/tasks/main.yml b/test/integration/targets/lookup_indexed_items/tasks/main.yml
new file mode 100644
index 00000000..84f5fbce
--- /dev/null
+++ b/test/integration/targets/lookup_indexed_items/tasks/main.yml
@@ -0,0 +1,16 @@
+- name: create unindexed list
+ shell: for i in $(seq 1 5); do echo "x" ; done;
+ register: list_data
+
+- name: create indexed list
+ set_fact: "{{ item[1] + item[0]|string }}=set"
+ with_indexed_items: "{{list_data.stdout_lines}}"
+
+- name: verify with_indexed_items result
+ assert:
+ that:
+ - "x0 == 'set'"
+ - "x1 == 'set'"
+ - "x2 == 'set'"
+ - "x3 == 'set'"
+ - "x4 == 'set'"
diff --git a/test/integration/targets/lookup_ini/aliases b/test/integration/targets/lookup_ini/aliases
new file mode 100644
index 00000000..f9f29ef3
--- /dev/null
+++ b/test/integration/targets/lookup_ini/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group3
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/test/integration/targets/lookup_ini/lookup-8859-15.ini b/test/integration/targets/lookup_ini/lookup-8859-15.ini
new file mode 100644
index 00000000..33f9c29d
--- /dev/null
+++ b/test/integration/targets/lookup_ini/lookup-8859-15.ini
@@ -0,0 +1,7 @@
+[global]
+# A comment
+value1=Text associated with value1 and global section
+value2=Same for value2 and global section
+value.dot=Properties with dot
+field.with.space = another space
+field_with_unicode=été indien où à château français ïîôû
diff --git a/test/integration/targets/lookup_ini/lookup.ini b/test/integration/targets/lookup_ini/lookup.ini
new file mode 100644
index 00000000..5b7cc34b
--- /dev/null
+++ b/test/integration/targets/lookup_ini/lookup.ini
@@ -0,0 +1,25 @@
+[global]
+# A comment
+value1=Text associated with value1 and global section
+value2=Same for value2 and global section
+value.dot=Properties with dot
+field.with.space = another space
+unicode=été indien où à château français ïîôû
+
+[section1]
+value1=section1/value1
+value2=section1/value2
+
+[value_section]
+value1=1
+value2=2
+value3=3
+other1=4
+other2=5
+
+[other_section]
+value1=1
+value2=2
+value3=3
+other1=4
+other2=5
diff --git a/test/integration/targets/lookup_ini/lookup.properties b/test/integration/targets/lookup_ini/lookup.properties
new file mode 100644
index 00000000..d71ce121
--- /dev/null
+++ b/test/integration/targets/lookup_ini/lookup.properties
@@ -0,0 +1,6 @@
+# A comment
+value1=Text associated with value1
+value2=Same for value2
+value.dot=Properties with dot
+field.with.space = another space
+field.with.unicode = été indien où à château français ïîôû
diff --git a/test/integration/targets/lookup_ini/runme.sh b/test/integration/targets/lookup_ini/runme.sh
new file mode 100755
index 00000000..71a507de
--- /dev/null
+++ b/test/integration/targets/lookup_ini/runme.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ansible-playbook test_lookup_properties.yml -i ../../inventory -v "$@"
diff --git a/test/integration/targets/lookup_ini/test_lookup_properties.yml b/test/integration/targets/lookup_ini/test_lookup_properties.yml
new file mode 100644
index 00000000..a8cad9de
--- /dev/null
+++ b/test/integration/targets/lookup_ini/test_lookup_properties.yml
@@ -0,0 +1,71 @@
+---
+- name: "Lookup test"
+ hosts: "localhost"
+# connection: local
+ tasks:
+ - name: "read properties value"
+ set_fact:
+ test1: "{{lookup('ini', 'value1 type=properties file=lookup.properties')}}"
+ test2: "{{lookup('ini', 'value2 type=properties file=lookup.properties')}}"
+ test_dot: "{{lookup('ini', 'value.dot type=properties file=lookup.properties')}}"
+ field_with_space: "{{lookup('ini', 'field.with.space type=properties file=lookup.properties')}}"
+ - assert:
+ that: "{{item}} is defined"
+ with_items: [ 'test1', 'test2', 'test_dot', 'field_with_space' ]
+ - name: "read ini value"
+ set_fact:
+ value1_global: "{{lookup('ini', 'value1 section=global file=lookup.ini')}}"
+ value2_global: "{{lookup('ini', 'value2 section=global file=lookup.ini')}}"
+ value1_section1: "{{lookup('ini', 'value1 section=section1 file=lookup.ini')}}"
+ field_with_unicode: "{{lookup('ini', 'unicode section=global file=lookup.ini')}}"
+ - debug: var={{item}}
+ with_items: [ 'value1_global', 'value2_global', 'value1_section1', 'field_with_unicode' ]
+ - assert:
+ that:
+ - "field_with_unicode == 'été indien où à château français ïîôû'"
+ - name: "read ini value from iso8859-15 file"
+ set_fact:
+ field_with_unicode: "{{lookup('ini', 'field_with_unicode section=global encoding=iso8859-1 file=lookup-8859-15.ini')}}"
+ - assert:
+ that:
+ - "field_with_unicode == 'été indien où à château français ïîôû'"
+ - name: "read ini value with section and regexp"
+ set_fact:
+ value_section: "{{lookup('ini', 'value[1-2] section=value_section file=lookup.ini re=true')}}"
+ other_section: "{{lookup('ini', 'other[1-2] section=other_section file=lookup.ini re=true')}}"
+ - debug: var={{item}}
+ with_items: [ 'value_section', 'other_section' ]
+ - assert:
+ that:
+ - "value_section == '1,2'"
+ - "other_section == '4,5'"
+ - name: "Reading unknown value"
+ set_fact:
+ unknown: "{{lookup('ini', 'unknown default=unknown section=section1 file=lookup.ini')}}"
+ - debug: var=unknown
+ - assert:
+ that:
+ - 'unknown == "unknown"'
+ - name: "Looping over section section1"
+ debug: msg="{{item}}"
+ with_ini: value[1-2] section=section1 file=lookup.ini re=true
+ register: _
+ - assert:
+ that:
+ - '_.results.0.item == "section1/value1"'
+ - '_.results.1.item == "section1/value2"'
+ - name: "Looping over section value_section"
+ debug: msg="{{item}}"
+ with_ini: value[1-2] section=value_section file=lookup.ini re=true
+ register: _
+ - assert:
+ that:
+ - '_.results.0.item == "1"'
+ - '_.results.1.item == "2"'
+ - debug: msg="{{item}}"
+ with_ini: value[1-2] section=section1 file=lookup.ini re=true
+ register: _
+ - assert:
+ that:
+ - '_.results.0.item == "section1/value1"'
+ - '_.results.1.item == "section1/value2"'
diff --git a/test/integration/targets/lookup_inventory_hostnames/aliases b/test/integration/targets/lookup_inventory_hostnames/aliases
new file mode 100644
index 00000000..45489be8
--- /dev/null
+++ b/test/integration/targets/lookup_inventory_hostnames/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group2
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/test/integration/targets/lookup_inventory_hostnames/inventory b/test/integration/targets/lookup_inventory_hostnames/inventory
new file mode 100644
index 00000000..1b968af2
--- /dev/null
+++ b/test/integration/targets/lookup_inventory_hostnames/inventory
@@ -0,0 +1,6 @@
+[group01]
+test01
+test05
+test03
+test02
+test04
diff --git a/test/integration/targets/lookup_inventory_hostnames/main.yml b/test/integration/targets/lookup_inventory_hostnames/main.yml
new file mode 100644
index 00000000..afc09ea8
--- /dev/null
+++ b/test/integration/targets/lookup_inventory_hostnames/main.yml
@@ -0,0 +1,13 @@
+---
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - set_fact:
+ hosts_a: "{{ lookup('inventory_hostnames', 'group01', wantlist=true) }}"
+
+ - set_fact:
+ hosts_b: "{{ groups['group01'] }}"
+
+ - assert:
+ that:
+ - hosts_a == hosts_b
diff --git a/test/integration/targets/lookup_inventory_hostnames/runme.sh b/test/integration/targets/lookup_inventory_hostnames/runme.sh
new file mode 100755
index 00000000..449c66bb
--- /dev/null
+++ b/test/integration/targets/lookup_inventory_hostnames/runme.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ansible-playbook main.yml -i inventory "$@"
diff --git a/test/integration/targets/lookup_items/aliases b/test/integration/targets/lookup_items/aliases
new file mode 100644
index 00000000..bc987654
--- /dev/null
+++ b/test/integration/targets/lookup_items/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group2
+skip/aix
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/test/integration/targets/lookup_items/tasks/main.yml b/test/integration/targets/lookup_items/tasks/main.yml
new file mode 100644
index 00000000..12df8d0b
--- /dev/null
+++ b/test/integration/targets/lookup_items/tasks/main.yml
@@ -0,0 +1,14 @@
+- name: test with_items
+ set_fact: "{{ item }}=moo"
+ with_items:
+ - 'foo'
+ - 'bar'
+
+- debug: var=foo
+- debug: var=bar
+
+- name: verify with_items results
+ assert:
+ that:
+ - "foo == 'moo'"
+ - "bar == 'moo'"
diff --git a/test/integration/targets/lookup_lines/aliases b/test/integration/targets/lookup_lines/aliases
new file mode 100644
index 00000000..bc987654
--- /dev/null
+++ b/test/integration/targets/lookup_lines/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group2
+skip/aix
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/test/integration/targets/lookup_lines/tasks/main.yml b/test/integration/targets/lookup_lines/tasks/main.yml
new file mode 100644
index 00000000..f864d720
--- /dev/null
+++ b/test/integration/targets/lookup_lines/tasks/main.yml
@@ -0,0 +1,13 @@
+- name: test with_lines
+ #shell: echo "{{ item }}"
+ set_fact: "{{ item }}=set"
+ with_lines: for i in $(seq 1 5); do echo "l$i" ; done;
+
+- name: verify with_lines results
+ assert:
+ that:
+ - "l1 == 'set'"
+ - "l2 == 'set'"
+ - "l3 == 'set'"
+ - "l4 == 'set'"
+ - "l5 == 'set'"
diff --git a/test/integration/targets/lookup_list/aliases b/test/integration/targets/lookup_list/aliases
new file mode 100644
index 00000000..bc987654
--- /dev/null
+++ b/test/integration/targets/lookup_list/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group2
+skip/aix
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/test/integration/targets/lookup_list/tasks/main.yml b/test/integration/targets/lookup_list/tasks/main.yml
new file mode 100644
index 00000000..3c03014e
--- /dev/null
+++ b/test/integration/targets/lookup_list/tasks/main.yml
@@ -0,0 +1,19 @@
+- name: Set variables to verify lookup_list
+ set_fact: "{{ item if item is string else item[0] }}={{ item }}"
+ with_list:
+ - a
+ - [b, c]
+ - d
+
+- name: Verify lookup_list
+ assert:
+ that:
+ - a is defined
+ - b is defined
+ - c is not defined
+ - d is defined
+ - b is iterable and b is not string
+ - b|length == 2
+ - a == a
+ - b == ['b', 'c']
+ - d == d
diff --git a/test/integration/targets/lookup_nested/aliases b/test/integration/targets/lookup_nested/aliases
new file mode 100644
index 00000000..bc987654
--- /dev/null
+++ b/test/integration/targets/lookup_nested/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group2
+skip/aix
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/test/integration/targets/lookup_nested/tasks/main.yml b/test/integration/targets/lookup_nested/tasks/main.yml
new file mode 100644
index 00000000..fec081a3
--- /dev/null
+++ b/test/integration/targets/lookup_nested/tasks/main.yml
@@ -0,0 +1,18 @@
+- name: test with_nested
+ set_fact: "{{ item.0 + item.1 }}=x"
+ with_nested:
+ - [ 'a', 'b' ]
+ - [ 'c', 'd' ]
+
+- debug: var=ac
+- debug: var=ad
+- debug: var=bc
+- debug: var=bd
+
+- name: verify with_nested results
+ assert:
+ that:
+ - "ac == 'x'"
+ - "ad == 'x'"
+ - "bc == 'x'"
+ - "bd == 'x'"
diff --git a/test/integration/targets/lookup_password/aliases b/test/integration/targets/lookup_password/aliases
new file mode 100644
index 00000000..07b87020
--- /dev/null
+++ b/test/integration/targets/lookup_password/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group1
+skip/aix
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/test/integration/targets/lookup_password/runme.sh b/test/integration/targets/lookup_password/runme.sh
new file mode 100755
index 00000000..a3637a7e
--- /dev/null
+++ b/test/integration/targets/lookup_password/runme.sh
@@ -0,0 +1,11 @@
+#!/usr/bin/env bash
+
+set -eux
+
+source virtualenv.sh
+
+# Requirements have to be installed prior to running ansible-playbook
+# because plugins and requirements are loaded before the task runs
+pip install passlib
+
+ANSIBLE_ROLES_PATH=../ ansible-playbook runme.yml -e "output_dir=${OUTPUT_DIR}" "$@"
diff --git a/test/integration/targets/lookup_password/runme.yml b/test/integration/targets/lookup_password/runme.yml
new file mode 100644
index 00000000..4f55c1da
--- /dev/null
+++ b/test/integration/targets/lookup_password/runme.yml
@@ -0,0 +1,4 @@
+- hosts: localhost
+ gather_facts: no
+ roles:
+ - { role: lookup_password }
diff --git a/test/integration/targets/lookup_password/tasks/main.yml b/test/integration/targets/lookup_password/tasks/main.yml
new file mode 100644
index 00000000..4eeef151
--- /dev/null
+++ b/test/integration/targets/lookup_password/tasks/main.yml
@@ -0,0 +1,104 @@
+- name: create a password file
+ set_fact:
+ newpass: "{{ lookup('password', output_dir + '/lookup/password length=8') }}"
+
+- name: stat the password file directory
+ stat: path="{{output_dir}}/lookup"
+ register: result
+
+- name: assert the directory's permissions
+ assert:
+ that:
+ - result.stat.mode == '0700'
+
+- name: stat the password file
+ stat: path="{{output_dir}}/lookup/password"
+ register: result
+
+- name: assert the directory's permissions
+ assert:
+ that:
+ - result.stat.mode == '0600'
+
+- name: get password length
+ shell: wc -c {{output_dir}}/lookup/password | awk '{print $1}'
+ register: wc_result
+
+- debug: var=wc_result.stdout
+
+- name: read password
+ shell: cat {{output_dir}}/lookup/password
+ register: cat_result
+
+- debug: var=cat_result.stdout
+
+- name: verify password
+ assert:
+ that:
+ - "wc_result.stdout == '9'"
+ - "cat_result.stdout == newpass"
+ - "' salt=' not in cat_result.stdout"
+
+- name: fetch password from an existing file
+ set_fact:
+ pass2: "{{ lookup('password', output_dir + '/lookup/password length=8') }}"
+
+- name: read password (again)
+ shell: cat {{output_dir}}/lookup/password
+ register: cat_result2
+
+- debug: var=cat_result2.stdout
+
+- name: verify password (again)
+ assert:
+ that:
+ - "cat_result2.stdout == newpass"
+ - "' salt=' not in cat_result2.stdout"
+
+
+
+- name: create a password (with salt) file
+ debug: msg={{ lookup('password', output_dir + '/lookup/password_with_salt encrypt=sha256_crypt') }}
+
+- name: read password and salt
+ shell: cat {{output_dir}}/lookup/password_with_salt
+ register: cat_pass_salt
+
+- debug: var=cat_pass_salt.stdout
+
+- name: fetch unencrypted password
+ set_fact:
+ newpass: "{{ lookup('password', output_dir + '/lookup/password_with_salt') }}"
+
+- debug: var=newpass
+
+- name: verify password and salt
+ assert:
+ that:
+ - "cat_pass_salt.stdout != newpass"
+ - "cat_pass_salt.stdout.startswith(newpass)"
+ - "' salt=' in cat_pass_salt.stdout"
+ - "' salt=' not in newpass"
+
+
+- name: fetch unencrypted password (using empty encrypt parameter)
+ set_fact:
+ newpass2: "{{ lookup('password', output_dir + '/lookup/password_with_salt encrypt=') }}"
+
+- name: verify lookup password behavior
+ assert:
+ that:
+ - "newpass == newpass2"
+
+- name: verify that we can generate a 1st password without writing it
+ set_fact:
+ newpass: "{{ lookup('password', '/dev/null') }}"
+
+- name: verify that we can generate a 2nd password without writing it
+ set_fact:
+ newpass2: "{{ lookup('password', '/dev/null') }}"
+
+- name: verify lookup password behavior with /dev/null
+ assert:
+ that:
+ - "newpass != newpass2"
diff --git a/test/integration/targets/lookup_pipe/aliases b/test/integration/targets/lookup_pipe/aliases
new file mode 100644
index 00000000..07b87020
--- /dev/null
+++ b/test/integration/targets/lookup_pipe/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group1
+skip/aix
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/test/integration/targets/lookup_pipe/tasks/main.yml b/test/integration/targets/lookup_pipe/tasks/main.yml
new file mode 100644
index 00000000..8aa1bc64
--- /dev/null
+++ b/test/integration/targets/lookup_pipe/tasks/main.yml
@@ -0,0 +1,9 @@
+# https://github.com/ansible/ansible/issues/6550
+- name: confirm pipe lookup works with a single positional arg
+ set_fact:
+ result: "{{ lookup('pipe', 'echo $OUTPUT_DIR') }}"
+
+- name: verify the expected output was received
+ assert:
+ that:
+ - "result == output_dir"
diff --git a/test/integration/targets/lookup_random_choice/aliases b/test/integration/targets/lookup_random_choice/aliases
new file mode 100644
index 00000000..bc987654
--- /dev/null
+++ b/test/integration/targets/lookup_random_choice/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group2
+skip/aix
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/test/integration/targets/lookup_random_choice/tasks/main.yml b/test/integration/targets/lookup_random_choice/tasks/main.yml
new file mode 100644
index 00000000..e18126ae
--- /dev/null
+++ b/test/integration/targets/lookup_random_choice/tasks/main.yml
@@ -0,0 +1,10 @@
+- name: test with_random_choice
+ set_fact: "random={{ item }}"
+ with_random_choice:
+ - "foo"
+ - "bar"
+
+- name: verify with_random_choice
+ assert:
+ that:
+ - "random in ['foo', 'bar']"
diff --git a/test/integration/targets/lookup_sequence/aliases b/test/integration/targets/lookup_sequence/aliases
new file mode 100644
index 00000000..bc987654
--- /dev/null
+++ b/test/integration/targets/lookup_sequence/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group2
+skip/aix
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/test/integration/targets/lookup_sequence/tasks/main.yml b/test/integration/targets/lookup_sequence/tasks/main.yml
new file mode 100644
index 00000000..72181a42
--- /dev/null
+++ b/test/integration/targets/lookup_sequence/tasks/main.yml
@@ -0,0 +1,63 @@
+- name: test with_sequence
+ set_fact: "{{ 'x' + item }}={{ item }}"
+ with_sequence: start=0 end=3
+
+- name: test with_sequence backwards
+ set_fact: "{{ 'y' + item }}={{ item }}"
+ with_sequence: start=3 end=0 stride=-1
+
+- name: verify with_sequence
+ assert:
+ that:
+ - "x0 == '0'"
+ - "x1 == '1'"
+ - "x2 == '2'"
+ - "x3 == '3'"
+ - "y3 == '3'"
+ - "y2 == '2'"
+ - "y1 == '1'"
+ - "y0 == '0'"
+
+- name: test with_sequence not failing on count == 0
+ debug: msg='previously failed with backward counting error'
+ with_sequence: count=0
+ register: count_of_zero
+
+- name: test with_sequence does 1 when start == end
+ debug: msg='should run once'
+ with_sequence: start=1 end=1
+ register: start_equal_end
+
+- name: test with_sequence count 1
+ set_fact: "{{ 'x' + item }}={{ item }}"
+ with_sequence: count=1
+ register: count_of_one
+
+- assert:
+ that:
+ - start_equal_end is not skipped
+ - count_of_zero is skipped
+ - count_of_one is not skipped
+
+- name: test with_sequence shortcut syntax (end)
+ set_fact: "{{ 'ws_z_' + item }}={{ item }}"
+ with_sequence: '4'
+
+- name: test with_sequence shortcut syntax (start-end/stride)
+ set_fact: "{{ 'ws_z_' + item }}=stride_{{ item }}"
+ with_sequence: '2-6/2'
+
+- name: test with_sequence shortcut syntax (start-end:format)
+ set_fact: "{{ 'ws_z_' + item }}={{ item }}"
+ with_sequence: '7-8:host%02d'
+
+- name: verify with_sequence shortcut syntax
+ assert:
+ that:
+ - "ws_z_1 == '1'"
+ - "ws_z_2 == 'stride_2'"
+ - "ws_z_3 == '3'"
+ - "ws_z_4 == 'stride_4'"
+ - "ws_z_6 == 'stride_6'"
+ - "ws_z_host07 == 'host07'"
+ - "ws_z_host08 == 'host08'"
diff --git a/test/integration/targets/lookup_subelements/aliases b/test/integration/targets/lookup_subelements/aliases
new file mode 100644
index 00000000..bc987654
--- /dev/null
+++ b/test/integration/targets/lookup_subelements/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group2
+skip/aix
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/test/integration/targets/lookup_subelements/tasks/main.yml b/test/integration/targets/lookup_subelements/tasks/main.yml
new file mode 100644
index 00000000..5c706b27
--- /dev/null
+++ b/test/integration/targets/lookup_subelements/tasks/main.yml
@@ -0,0 +1,45 @@
+- name: test with_subelements
+ set_fact: "{{ '_'+ item.0.id + item.1 }}={{ item.1 }}"
+ with_subelements:
+ - "{{element_data}}"
+ - the_list
+
+- name: verify with_subelements results
+ assert:
+ that:
+ - "_xf == 'f'"
+ - "_xd == 'd'"
+ - "_ye == 'e'"
+ - "_yf == 'f'"
+
+- name: test with_subelements in subkeys
+ set_fact: "{{ '_'+ item.0.id + item.1 }}={{ item.1 }}"
+ with_subelements:
+ - "{{element_data}}"
+ - the.sub.key.list
+
+- name: verify with_subelements in subkeys results
+ assert:
+ that:
+ - "_xq == 'q'"
+ - "_xr == 'r'"
+ - "_yi == 'i'"
+ - "_yo == 'o'"
+
+- name: test with_subelements with missing key or subkey
+ set_fact: "{{ '_'+ item.0.id + item.1 }}={{ item.1 }}"
+ with_subelements:
+ - "{{element_data_missing}}"
+ - the.sub.key.list
+ - skip_missing: yes
+ register: _subelements_missing_subkeys
+
+- debug: var=_subelements_missing_subkeys
+- debug: var=_subelements_missing_subkeys.results|length
+- name: verify with_subelements in subkeys results
+ assert:
+ that:
+ - _subelements_missing_subkeys.skipped is not defined
+ - _subelements_missing_subkeys.results|length == 2
+ - "_xk == 'k'"
+ - "_xl == 'l'"
diff --git a/test/integration/targets/lookup_subelements/vars/main.yml b/test/integration/targets/lookup_subelements/vars/main.yml
new file mode 100644
index 00000000..f7ef50f5
--- /dev/null
+++ b/test/integration/targets/lookup_subelements/vars/main.yml
@@ -0,0 +1,43 @@
+element_data:
+ - id: x
+ the_list:
+ - "f"
+ - "d"
+ the:
+ sub:
+ key:
+ list:
+ - "q"
+ - "r"
+ - id: y
+ the_list:
+ - "e"
+ - "f"
+ the:
+ sub:
+ key:
+ list:
+ - "i"
+ - "o"
+element_data_missing:
+ - id: x
+ the_list:
+ - "f"
+ - "d"
+ the:
+ sub:
+ key:
+ list:
+ - "k"
+ - "l"
+ - id: y
+ the_list:
+ - "f"
+ - "d"
+ - id: z
+ the_list:
+ - "e"
+ - "f"
+ the:
+ sub:
+ key:
diff --git a/test/integration/targets/lookup_template/aliases b/test/integration/targets/lookup_template/aliases
new file mode 100644
index 00000000..07b87020
--- /dev/null
+++ b/test/integration/targets/lookup_template/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group1
+skip/aix
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/test/integration/targets/lookup_template/tasks/main.yml b/test/integration/targets/lookup_template/tasks/main.yml
new file mode 100644
index 00000000..df115766
--- /dev/null
+++ b/test/integration/targets/lookup_template/tasks/main.yml
@@ -0,0 +1,19 @@
+# ref #18526
+- name: Test that we have a proper jinja search path in template lookup
+ set_fact:
+ hello_world: "{{ lookup('template', 'hello.txt') }}"
+
+- assert:
+ that:
+ - "hello_world|trim == 'Hello world!'"
+
+
+- name: Test that we have a proper jinja search path in template lookup with different variable start and end string
+ vars:
+ my_var: world
+ set_fact:
+ hello_world_string: "{{ lookup('template', 'hello_string.txt', variable_start_string='[%', variable_end_string='%]') }}"
+
+- assert:
+ that:
+ - "hello_world_string|trim == 'Hello world!'"
diff --git a/test/integration/targets/lookup_template/templates/hello.txt b/test/integration/targets/lookup_template/templates/hello.txt
new file mode 100644
index 00000000..be15a4f3
--- /dev/null
+++ b/test/integration/targets/lookup_template/templates/hello.txt
@@ -0,0 +1 @@
+Hello {% include 'world.txt' %}!
diff --git a/test/integration/targets/lookup_template/templates/hello_string.txt b/test/integration/targets/lookup_template/templates/hello_string.txt
new file mode 100644
index 00000000..75199afd
--- /dev/null
+++ b/test/integration/targets/lookup_template/templates/hello_string.txt
@@ -0,0 +1 @@
+Hello [% my_var %]!
diff --git a/test/integration/targets/lookup_template/templates/world.txt b/test/integration/targets/lookup_template/templates/world.txt
new file mode 100644
index 00000000..cc628ccd
--- /dev/null
+++ b/test/integration/targets/lookup_template/templates/world.txt
@@ -0,0 +1 @@
+world
diff --git a/test/integration/targets/lookup_together/aliases b/test/integration/targets/lookup_together/aliases
new file mode 100644
index 00000000..bc987654
--- /dev/null
+++ b/test/integration/targets/lookup_together/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group2
+skip/aix
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/test/integration/targets/lookup_together/tasks/main.yml b/test/integration/targets/lookup_together/tasks/main.yml
new file mode 100644
index 00000000..ee59a2ae
--- /dev/null
+++ b/test/integration/targets/lookup_together/tasks/main.yml
@@ -0,0 +1,14 @@
+- name: test with_together
+ #shell: echo {{ item }}
+ set_fact: "{{ item.0 }}={{ item.1 }}"
+ with_together:
+ - [ 'a', 'b', 'c', 'd' ]
+ - [ '1', '2', '3', '4' ]
+
+- name: verify with_together results
+ assert:
+ that:
+ - "a == '1'"
+ - "b == '2'"
+ - "c == '3'"
+ - "d == '4'"
diff --git a/test/integration/targets/lookup_unvault/aliases b/test/integration/targets/lookup_unvault/aliases
new file mode 100644
index 00000000..4a2ce27c
--- /dev/null
+++ b/test/integration/targets/lookup_unvault/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group2
+needs/root
+skip/aix
diff --git a/test/integration/targets/lookup_unvault/files/foot.txt b/test/integration/targets/lookup_unvault/files/foot.txt
new file mode 100644
index 00000000..5716ca59
--- /dev/null
+++ b/test/integration/targets/lookup_unvault/files/foot.txt
@@ -0,0 +1 @@
+bar
diff --git a/test/integration/targets/lookup_unvault/files/foot.txt.vault b/test/integration/targets/lookup_unvault/files/foot.txt.vault
new file mode 100644
index 00000000..98ee41bc
--- /dev/null
+++ b/test/integration/targets/lookup_unvault/files/foot.txt.vault
@@ -0,0 +1,6 @@
+$ANSIBLE_VAULT;1.1;AES256
+35363932323438383333343462373431376162373631636238353061616565323630656464393939
+3937313630326662336264636662313163343832643239630a646436313833633135353834343364
+63363039663765363365626531643533616232333533383239323234393934356639373136323635
+3632356163343031300a373766636130626237346630653537633764663063313439666135623032
+6139
diff --git a/test/integration/targets/lookup_unvault/runme.sh b/test/integration/targets/lookup_unvault/runme.sh
new file mode 100755
index 00000000..a7a0be5a
--- /dev/null
+++ b/test/integration/targets/lookup_unvault/runme.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# run tests
+ansible-playbook unvault.yml --vault-password-file='secret' -v "$@"
diff --git a/test/integration/targets/lookup_unvault/secret b/test/integration/targets/lookup_unvault/secret
new file mode 100644
index 00000000..f925edd3
--- /dev/null
+++ b/test/integration/targets/lookup_unvault/secret
@@ -0,0 +1 @@
+ssssshhhhhh
diff --git a/test/integration/targets/lookup_unvault/unvault.yml b/test/integration/targets/lookup_unvault/unvault.yml
new file mode 100644
index 00000000..f1f3b98a
--- /dev/null
+++ b/test/integration/targets/lookup_unvault/unvault.yml
@@ -0,0 +1,9 @@
+- name: test vault lookup plugin
+ hosts: localhost
+ gather_facts: false
+ tasks:
+ - debug: msg={{lookup('unvault', 'foot.txt.vault')}}
+ - name: verify vault lookup works with both vaulted and unvaulted
+ assert:
+ that:
+ - lookup('unvault', 'foot.txt.vault') == lookup('unvault', 'foot.txt')
diff --git a/test/integration/targets/lookup_url/aliases b/test/integration/targets/lookup_url/aliases
new file mode 100644
index 00000000..28990148
--- /dev/null
+++ b/test/integration/targets/lookup_url/aliases
@@ -0,0 +1,5 @@
+destructive
+shippable/posix/group1
+needs/httptester
+skip/aix
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/test/integration/targets/lookup_url/meta/main.yml b/test/integration/targets/lookup_url/meta/main.yml
new file mode 100644
index 00000000..374b5fdf
--- /dev/null
+++ b/test/integration/targets/lookup_url/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_http_tests
diff --git a/test/integration/targets/lookup_url/tasks/main.yml b/test/integration/targets/lookup_url/tasks/main.yml
new file mode 100644
index 00000000..4eaa32e0
--- /dev/null
+++ b/test/integration/targets/lookup_url/tasks/main.yml
@@ -0,0 +1,28 @@
+- name: Test that retrieving a url works
+ set_fact:
+ web_data: "{{ lookup('url', 'https://gist.githubusercontent.com/abadger/9858c22712f62a8effff/raw/43dd47ea691c90a5fa7827892c70241913351963/test') }}"
+
+- name: Assert that the url was retrieved
+ assert:
+ that:
+ - "'one' in web_data"
+
+- name: Test that retrieving a url with invalid cert fails
+ set_fact:
+ web_data: "{{ lookup('url', 'https://{{ badssl_host }}/') }}"
+ ignore_errors: True
+ register: url_invalid_cert
+
+- assert:
+ that:
+ - "url_invalid_cert.failed"
+ - "'Error validating the server' in url_invalid_cert.msg or 'Hostname mismatch' in url_invalid_cert.msg or ( url_invalid_cert.msg is search('hostname .* doesn.t match .*'))"
+
+- name: Test that retrieving a url with invalid cert with validate_certs=False works
+ set_fact:
+ web_data: "{{ lookup('url', 'https://{{ badssl_host }}/', validate_certs=False) }}"
+ register: url_no_validate_cert
+
+- assert:
+ that:
+ - "'{{ badssl_host_substring }}' in web_data"
diff --git a/test/integration/targets/lookup_vars/aliases b/test/integration/targets/lookup_vars/aliases
new file mode 100644
index 00000000..07b87020
--- /dev/null
+++ b/test/integration/targets/lookup_vars/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group1
+skip/aix
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/test/integration/targets/lookup_vars/tasks/main.yml b/test/integration/targets/lookup_vars/tasks/main.yml
new file mode 100644
index 00000000..f24d8657
--- /dev/null
+++ b/test/integration/targets/lookup_vars/tasks/main.yml
@@ -0,0 +1,16 @@
+- name: Test that we can give it a single value and receive a single value
+ set_fact:
+ var_host: '{{ lookup("vars", "ansible_host") }}'
+
+- assert:
+ that:
+ - 'var_host == ansible_host'
+
+- name: Test that we can give a list of values to var and receive a list of values back
+ set_fact:
+ var_host_info: '{{ query("vars", "ansible_host", "ansible_connection") }}'
+
+- assert:
+ that:
+ - 'var_host_info[0] == ansible_host'
+ - 'var_host_info[1] == ansible_connection'
diff --git a/test/integration/targets/loop_control/aliases b/test/integration/targets/loop_control/aliases
new file mode 100644
index 00000000..765b70da
--- /dev/null
+++ b/test/integration/targets/loop_control/aliases
@@ -0,0 +1 @@
+shippable/posix/group2
diff --git a/test/integration/targets/loop_control/extended.yml b/test/integration/targets/loop_control/extended.yml
new file mode 100644
index 00000000..6ad9933a
--- /dev/null
+++ b/test/integration/targets/loop_control/extended.yml
@@ -0,0 +1,12 @@
+- name: loop_control/extended/include https://github.com/ansible/ansible/issues/61218
+ hosts: localhost
+ gather_facts: false
+ tasks:
+ - name: loop on an include
+ include_tasks: inner.yml
+ loop:
+ - first
+ - second
+ - third
+ loop_control:
+ extended: yes
diff --git a/test/integration/targets/loop_control/inner.yml b/test/integration/targets/loop_control/inner.yml
new file mode 100644
index 00000000..1c286fa4
--- /dev/null
+++ b/test/integration/targets/loop_control/inner.yml
@@ -0,0 +1,9 @@
+- name: assert ansible_loop variables in include_tasks
+ assert:
+ that:
+ - ansible_loop.index == ansible_loop.index0 + 1
+ - ansible_loop.revindex == ansible_loop.revindex0 + 1
+ - ansible_loop.first == {{ ansible_loop.index == 1 }}
+ - ansible_loop.last == {{ ansible_loop.index == ansible_loop.length }}
+ - ansible_loop.length == 3
+ - ansible_loop.allitems|join(',') == 'first,second,third'
diff --git a/test/integration/targets/loop_control/label.yml b/test/integration/targets/loop_control/label.yml
new file mode 100644
index 00000000..5ac85fdf
--- /dev/null
+++ b/test/integration/targets/loop_control/label.yml
@@ -0,0 +1,23 @@
+- name: loop_control/label https://github.com/ansible/ansible/pull/36430
+ hosts: localhost
+ gather_facts: false
+ tasks:
+ - set_fact:
+ loopthis:
+ - name: foo
+ label: foo_label
+ - name: bar
+ label: bar_label
+
+ - name: check that item label is updated each iteration
+ debug:
+ msg: "{{ looped_var.name }}"
+ with_items: "{{ loopthis }}"
+ loop_control:
+ loop_var: looped_var
+ label: "looped_var {{ looped_var.label }}"
+#
+# - assert:
+# that:
+# - "output.results[0]['_ansible_item_label'] == 'looped_var foo_label'"
+# - "output.results[1]['_ansible_item_label'] == 'looped_var bar_label'"
diff --git a/test/integration/targets/loop_control/runme.sh b/test/integration/targets/loop_control/runme.sh
new file mode 100755
index 00000000..af065ea0
--- /dev/null
+++ b/test/integration/targets/loop_control/runme.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# user output has:
+#ok: [localhost] => (item=looped_var foo_label) => {
+#ok: [localhost] => (item=looped_var bar_label) => {
+MATCH='foo_label
+bar_label'
+[ "$(ansible-playbook label.yml "$@" |grep 'item='|sed -e 's/^.*(item=looped_var \(.*\)).*$/\1/')" == "${MATCH}" ]
+
+ansible-playbook extended.yml "$@"
diff --git a/test/integration/targets/loops/aliases b/test/integration/targets/loops/aliases
new file mode 100644
index 00000000..ed821c27
--- /dev/null
+++ b/test/integration/targets/loops/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group2
+skip/aix
diff --git a/test/integration/targets/loops/files/data1.txt b/test/integration/targets/loops/files/data1.txt
new file mode 100644
index 00000000..b044a82a
--- /dev/null
+++ b/test/integration/targets/loops/files/data1.txt
@@ -0,0 +1 @@
+ Hello World
diff --git a/test/integration/targets/loops/files/data2.txt b/test/integration/targets/loops/files/data2.txt
new file mode 100644
index 00000000..e9359ad1
--- /dev/null
+++ b/test/integration/targets/loops/files/data2.txt
@@ -0,0 +1 @@
+ Olá Mundo
diff --git a/test/integration/targets/loops/tasks/index_var_tasks.yml b/test/integration/targets/loops/tasks/index_var_tasks.yml
new file mode 100644
index 00000000..fa9a5bdf
--- /dev/null
+++ b/test/integration/targets/loops/tasks/index_var_tasks.yml
@@ -0,0 +1,3 @@
+- name: check that index var exists inside included tasks file
+ assert:
+ that: my_idx == item|int
diff --git a/test/integration/targets/loops/tasks/main.yml b/test/integration/targets/loops/tasks/main.yml
new file mode 100644
index 00000000..5575dd36
--- /dev/null
+++ b/test/integration/targets/loops/tasks/main.yml
@@ -0,0 +1,391 @@
+#
+# loop_control/pause
+#
+
+- name: Measure time before
+ shell: date +%s
+ register: before
+
+- debug:
+ var: i
+ with_sequence: count=3
+ loop_control:
+ loop_var: i
+ pause: 2
+
+- name: Measure time after
+ shell: date +%s
+ register: after
+
+# since there is 3 rounds, and 2 seconds between, it should last 4 seconds
+# we do not test the upper bound, since CI can lag significantly
+- assert:
+ that:
+ - '(after.stdout |int) - (before.stdout|int) >= 4'
+
+- name: test subsecond pause
+ block:
+ - name: Measure time before loop with .5s pause
+ set_fact:
+ times: "{{times|default([]) + [ lookup('pipe','date +%s.%3N') ]}}"
+ with_sequence: count=3
+ loop_control:
+ pause: 0.6
+
+ - name: ensure lag, since there is 3 rounds, and 0.5 seconds between, it should last 1.2 seconds, but allowing leeway due to CI lag
+ assert:
+ that:
+ - tdiff|float >= 1.2
+ - tdiff|int < 3
+ vars:
+ tdiff: '{{ times[2]|float - times[0]|float }}'
+ when:
+ - ansible_facts['distribution'] not in ("MacOSX", "FreeBSD")
+
+#
+# Tests of loop syntax with args
+#
+
+- name: Test that with_list works with a list
+ ping:
+ data: '{{ item }}'
+ with_list:
+ - 'Hello World'
+ - 'Olá Mundo'
+ register: results
+
+- name: Assert that we ran the module twice with the correct strings
+ assert:
+ that:
+ - 'results["results"][0]["ping"] == "Hello World"'
+ - 'results["results"][1]["ping"] == "Olá Mundo"'
+
+- name: Test that with_list works with a list inside a variable
+ ping:
+ data: '{{ item }}'
+ with_list: '{{ phrases }}'
+ register: results2
+
+- name: Assert that we ran the module twice with the correct strings
+ assert:
+ that:
+ - 'results2["results"][0]["ping"] == "Hello World"'
+ - 'results2["results"][1]["ping"] == "Olá Mundo"'
+
+- name: Test that loop works with a manual list
+ ping:
+ data: '{{ item }}'
+ loop:
+ - 'Hello World'
+ - 'Olá Mundo'
+ register: results3
+
+- name: Assert that we ran the module twice with the correct strings
+ assert:
+ that:
+ - 'results3["results"][0]["ping"] == "Hello World"'
+ - 'results3["results"][1]["ping"] == "Olá Mundo"'
+
+- name: Test that loop works with a list in a variable
+ ping:
+ data: '{{ item }}'
+ loop: '{{ phrases }}'
+ register: results4
+
+- name: Assert that we ran the module twice with the correct strings
+ assert:
+ that:
+ - 'results4["results"][0]["ping"] == "Hello World"'
+ - 'results4["results"][1]["ping"] == "Olá Mundo"'
+
+- name: Test that loop works with a list via the list lookup
+ ping:
+ data: '{{ item }}'
+ loop: '{{ lookup("list", "Hello World", "Olá Mundo", wantlist=True) }}'
+ register: results5
+
+- name: Assert that we ran the module twice with the correct strings
+ assert:
+ that:
+ - 'results5["results"][0]["ping"] == "Hello World"'
+ - 'results5["results"][1]["ping"] == "Olá Mundo"'
+
+- name: Test that loop works with a list in a variable via the list lookup
+ ping:
+ data: '{{ item }}'
+ loop: '{{ lookup("list", wantlist=True, *phrases) }}'
+ register: results6
+
+- name: Assert that we ran the module twice with the correct strings
+ assert:
+ that:
+ - 'results6["results"][0]["ping"] == "Hello World"'
+ - 'results6["results"][1]["ping"] == "Olá Mundo"'
+
+- name: Test that loop works with a list via the query lookup
+ ping:
+ data: '{{ item }}'
+ loop: '{{ query("list", "Hello World", "Olá Mundo") }}'
+ register: results7
+
+- name: Assert that we ran the module twice with the correct strings
+ assert:
+ that:
+ - 'results7["results"][0]["ping"] == "Hello World"'
+ - 'results7["results"][1]["ping"] == "Olá Mundo"'
+
+- name: Test that loop works with a list in a variable via the query lookup
+ ping:
+ data: '{{ item }}'
+ loop: '{{ q("list", *phrases) }}'
+ register: results8
+
+- name: Assert that we ran the module twice with the correct strings
+ assert:
+ that:
+ - 'results8["results"][0]["ping"] == "Hello World"'
+ - 'results8["results"][1]["ping"] == "Olá Mundo"'
+
+- name: Test that loop works with a list and keyword args
+ ping:
+ data: '{{ item }}'
+ loop: '{{ q("file", "data1.txt", "data2.txt", lstrip=True) }}'
+ register: results9
+
+- name: Assert that we ran the module twice with the correct strings
+ assert:
+ that:
+ - 'results9["results"][0]["ping"] == "Hello World"'
+ - 'results9["results"][1]["ping"] == "Olá Mundo"'
+
+- name: Test that loop works with a list in variable and keyword args
+ ping:
+ data: '{{ item }}'
+ loop: '{{ q("file", lstrip=True, *filenames) }}'
+ register: results10
+
+- name: Assert that we ran the module twice with the correct strings
+ assert:
+ that:
+ - 'results10["results"][0]["ping"] == "Hello World"'
+ - 'results10["results"][1]["ping"] == "Olá Mundo"'
+
+#
+# loop_control/index_var
+#
+
+- name: check that the index var is created and increments as expected
+ assert:
+ that: my_idx == item|int
+ with_sequence: start=0 count=3
+ loop_control:
+ index_var: my_idx
+
+- name: check that value of index var matches position of current item in source list
+ assert:
+ that: 'test_var.index(item) == my_idx'
+ vars:
+ test_var: ['a', 'b', 'c']
+ with_items: "{{ test_var }}"
+ loop_control:
+ index_var: my_idx
+
+- name: check index var with included tasks file
+ include_tasks: index_var_tasks.yml
+ with_sequence: start=0 count=3
+ loop_control:
+ index_var: my_idx
+
+
+# The following test cases are to ensure that we don't have a regression on
+# GitHub Issue https://github.com/ansible/ansible/issues/35481
+#
+# This should execute and not cause a RuntimeError
+- debug:
+ msg: "with_dict passed a list: {{item}}"
+ with_dict: "{{ a_list }}"
+ register: with_dict_passed_a_list
+ ignore_errors: True
+- assert:
+ that:
+ - with_dict_passed_a_list is failed
+- debug:
+ msg: "with_list passed a dict: {{item}}"
+ with_list: "{{ a_dict }}"
+ register: with_list_passed_a_dict
+ ignore_errors: True
+- assert:
+ that:
+ - with_list_passed_a_dict is failed
+
+- debug:
+ var: "item"
+ loop:
+ - "{{ ansible_search_path }}"
+ register: loop_search_path
+
+- assert:
+ that:
+ - ansible_search_path == loop_search_path.results.0.item
+
+# https://github.com/ansible/ansible/issues/45189
+- name: with_X conditional delegate_to shortcircuit on templating error
+ debug:
+ msg: "loop"
+ when: false
+ delegate_to: localhost
+ with_list: "{{ fake_var }}"
+ register: result
+ failed_when: result is not skipped
+
+- name: loop conditional delegate_to shortcircuit on templating error
+ debug:
+ msg: "loop"
+ when: false
+ delegate_to: localhost
+ loop: "{{ fake_var }}"
+ register: result
+ failed_when: result is not skipped
+
+- name: Loop on literal empty list
+ debug:
+ loop: []
+ register: literal_empty_list
+ failed_when: literal_empty_list is not skipped
+
+# https://github.com/ansible/ansible/issues/47372
+- name: Loop unsafe list
+ debug:
+ var: item
+ with_items: "{{ things|list|unique }}"
+ vars:
+ things:
+ - !unsafe foo
+ - !unsafe bar
+
+- name: extended loop info
+ assert:
+ that:
+ - ansible_loop.nextitem == 'orange'
+ - ansible_loop.index == 1
+ - ansible_loop.index0 == 0
+ - ansible_loop.first
+ - not ansible_loop.last
+ - ansible_loop.previtem is undefined
+ - ansible_loop.allitems == ['apple', 'orange', 'banana']
+ - ansible_loop.revindex == 3
+ - ansible_loop.revindex0 == 2
+ - ansible_loop.length == 3
+ loop:
+ - apple
+ - orange
+ - banana
+ loop_control:
+ extended: true
+ when: item == 'apple'
+
+- name: extended loop info 2
+ assert:
+ that:
+ - ansible_loop.nextitem == 'banana'
+ - ansible_loop.index == 2
+ - ansible_loop.index0 == 1
+ - not ansible_loop.first
+ - not ansible_loop.last
+ - ansible_loop.previtem == 'apple'
+ - ansible_loop.allitems == ['apple', 'orange', 'banana']
+ - ansible_loop.revindex == 2
+ - ansible_loop.revindex0 == 1
+ - ansible_loop.length == 3
+ loop:
+ - apple
+ - orange
+ - banana
+ loop_control:
+ extended: true
+ when: item == 'orange'
+
+- name: extended loop info 3
+ assert:
+ that:
+ - ansible_loop.nextitem is undefined
+ - ansible_loop.index == 3
+ - ansible_loop.index0 == 2
+ - not ansible_loop.first
+ - ansible_loop.last
+ - ansible_loop.previtem == 'orange'
+ - ansible_loop.allitems == ['apple', 'orange', 'banana']
+ - ansible_loop.revindex == 1
+ - ansible_loop.revindex0 == 0
+ - ansible_loop.length == 3
+ loop:
+ - apple
+ - orange
+ - banana
+ loop_control:
+ extended: true
+ when: item == 'banana'
+
+- name: Validate the loop_var name
+ assert:
+ that:
+ - ansible_loop_var == 'alvin'
+ loop:
+ - 1
+ loop_control:
+ loop_var: alvin
+
+# https://github.com/ansible/ansible/issues/58820
+- name: Test using templated loop_var inside include_tasks
+ include_tasks: templated_loop_var_tasks.yml
+ loop:
+ - value
+ loop_control:
+ loop_var: "{{ loop_var_name }}"
+ vars:
+ loop_var_name: templated_loop_var_name
+
+# https://github.com/ansible/ansible/issues/59414
+- name: Test preserving original connection related vars
+ debug:
+ var: ansible_remote_tmp
+ vars:
+ ansible_remote_tmp: /tmp/test1
+ with_items:
+ - 1
+ - 2
+ register: loop_out
+
+- assert:
+ that:
+ - loop_out['results'][1]['ansible_remote_tmp'] == '/tmp/test1'
+
+# https://github.com/ansible/ansible/issues/64169
+- include_vars: 64169.yml
+
+- set_fact: "{{ item.key }}={{ hostvars[inventory_hostname][item.value] }}"
+ with_dict:
+ foo: __foo
+
+- debug:
+ var: foo
+
+- assert:
+ that:
+ - foo[0] != 'foo1.0'
+ - foo[0] == unsafe_value
+ vars:
+ unsafe_value: !unsafe 'foo{{ version_64169 }}'
+
+- set_fact: "{{ item.key }}={{ hostvars[inventory_hostname][item.value] }}"
+ loop: "{{ dicty_dict|dict2items }}"
+ vars:
+ dicty_dict:
+ foo: __foo
+
+- debug:
+ var: foo
+
+- assert:
+ that:
+ - foo[0] == 'foo1.0'
diff --git a/test/integration/targets/loops/tasks/templated_loop_var_tasks.yml b/test/integration/targets/loops/tasks/templated_loop_var_tasks.yml
new file mode 100644
index 00000000..1f8f9697
--- /dev/null
+++ b/test/integration/targets/loops/tasks/templated_loop_var_tasks.yml
@@ -0,0 +1,4 @@
+- name: Validate that the correct value was used
+ assert:
+ that:
+ - templated_loop_var_name == 'value'
diff --git a/test/integration/targets/loops/vars/64169.yml b/test/integration/targets/loops/vars/64169.yml
new file mode 100644
index 00000000..f48d616a
--- /dev/null
+++ b/test/integration/targets/loops/vars/64169.yml
@@ -0,0 +1,2 @@
+__foo:
+ - "foo{{ version_64169 }}"
diff --git a/test/integration/targets/loops/vars/main.yml b/test/integration/targets/loops/vars/main.yml
new file mode 100644
index 00000000..5d85370d
--- /dev/null
+++ b/test/integration/targets/loops/vars/main.yml
@@ -0,0 +1,8 @@
+---
+phrases:
+ - 'Hello World'
+ - 'Olá Mundo'
+filenames:
+ - 'data1.txt'
+ - 'data2.txt'
+version_64169: '1.0'
diff --git a/test/integration/targets/meta_tasks/aliases b/test/integration/targets/meta_tasks/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/test/integration/targets/meta_tasks/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/test/integration/targets/meta_tasks/inventory.yml b/test/integration/targets/meta_tasks/inventory.yml
new file mode 100644
index 00000000..5fb39e5f
--- /dev/null
+++ b/test/integration/targets/meta_tasks/inventory.yml
@@ -0,0 +1,9 @@
+local:
+ hosts:
+ testhost:
+ host_var_role_name: role3
+ testhost2:
+ host_var_role_name: role2
+ vars:
+ ansible_connection: local
+ ansible_python_interpreter: "{{ ansible_playbook_python }}"
diff --git a/test/integration/targets/meta_tasks/runme.sh b/test/integration/targets/meta_tasks/runme.sh
new file mode 100755
index 00000000..3f456def
--- /dev/null
+++ b/test/integration/targets/meta_tasks/runme.sh
@@ -0,0 +1,50 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# test end_host meta task, with when conditional
+for test_strategy in linear free; do
+ out="$(ansible-playbook test_end_host.yml -i inventory.yml -e test_strategy=$test_strategy -vv "$@")"
+
+ grep -q "META: end_host conditional evaluated to false, continuing execution for testhost" <<< "$out"
+ grep -q "META: ending play for testhost2" <<< "$out"
+ grep -q "play not ended for testhost" <<< "$out"
+ grep -qv "play not ended for testhost2" <<< "$out"
+
+ out="$(ansible-playbook test_end_host_fqcn.yml -i inventory.yml -e test_strategy=$test_strategy -vv "$@")"
+
+ grep -q "META: end_host conditional evaluated to false, continuing execution for testhost" <<< "$out"
+ grep -q "META: ending play for testhost2" <<< "$out"
+ grep -q "play not ended for testhost" <<< "$out"
+ grep -qv "play not ended for testhost2" <<< "$out"
+done
+
+# test end_host meta task, on all hosts
+for test_strategy in linear free; do
+ out="$(ansible-playbook test_end_host_all.yml -i inventory.yml -e test_strategy=$test_strategy -vv "$@")"
+
+ grep -q "META: ending play for testhost" <<< "$out"
+ grep -q "META: ending play for testhost2" <<< "$out"
+ grep -qv "play not ended for testhost" <<< "$out"
+ grep -qv "play not ended for testhost2" <<< "$out"
+
+ out="$(ansible-playbook test_end_host_all_fqcn.yml -i inventory.yml -e test_strategy=$test_strategy -vv "$@")"
+
+ grep -q "META: ending play for testhost" <<< "$out"
+ grep -q "META: ending play for testhost2" <<< "$out"
+ grep -qv "play not ended for testhost" <<< "$out"
+ grep -qv "play not ended for testhost2" <<< "$out"
+done
+
+# test end_play meta task
+for test_strategy in linear free; do
+ out="$(ansible-playbook test_end_play.yml -i inventory.yml -e test_strategy=$test_strategy -vv "$@")"
+
+ grep -q "META: ending play" <<< "$out"
+ grep -qv 'Failed to end using end_play' <<< "$out"
+
+ out="$(ansible-playbook test_end_play_fqcn.yml -i inventory.yml -e test_strategy=$test_strategy -vv "$@")"
+
+ grep -q "META: ending play" <<< "$out"
+ grep -qv 'Failed to end using end_play' <<< "$out"
+done
diff --git a/test/integration/targets/meta_tasks/test_end_host.yml b/test/integration/targets/meta_tasks/test_end_host.yml
new file mode 100644
index 00000000..a8bb0562
--- /dev/null
+++ b/test/integration/targets/meta_tasks/test_end_host.yml
@@ -0,0 +1,14 @@
+- name: "Testing end_host with strategy={{ test_strategy | default('linear') }}"
+ hosts:
+ - testhost
+ - testhost2
+ gather_facts: no
+ strategy: "{{ test_strategy | default('linear') }}"
+ tasks:
+ - debug:
+
+ - meta: end_host
+ when: "host_var_role_name == 'role2'" # end play for testhost2, see inventory
+
+ - debug:
+ msg: "play not ended for {{ inventory_hostname }}"
diff --git a/test/integration/targets/meta_tasks/test_end_host_all.yml b/test/integration/targets/meta_tasks/test_end_host_all.yml
new file mode 100644
index 00000000..dab5e881
--- /dev/null
+++ b/test/integration/targets/meta_tasks/test_end_host_all.yml
@@ -0,0 +1,13 @@
+- name: "Testing end_host all hosts with strategy={{ test_strategy | default('linear') }}"
+ hosts:
+ - testhost
+ - testhost2
+ gather_facts: no
+ strategy: "{{ test_strategy | default('linear') }}"
+ tasks:
+ - debug:
+
+ - meta: end_host
+
+ - debug:
+ msg: "play not ended {{ inventory_hostname }}"
diff --git a/test/integration/targets/meta_tasks/test_end_host_all_fqcn.yml b/test/integration/targets/meta_tasks/test_end_host_all_fqcn.yml
new file mode 100644
index 00000000..78b5a2e9
--- /dev/null
+++ b/test/integration/targets/meta_tasks/test_end_host_all_fqcn.yml
@@ -0,0 +1,13 @@
+- name: "Testing end_host all hosts with strategy={{ test_strategy | default('linear') }}"
+ hosts:
+ - testhost
+ - testhost2
+ gather_facts: no
+ strategy: "{{ test_strategy | default('linear') }}"
+ tasks:
+ - debug:
+
+ - ansible.builtin.meta: end_host
+
+ - debug:
+ msg: "play not ended {{ inventory_hostname }}"
diff --git a/test/integration/targets/meta_tasks/test_end_host_fqcn.yml b/test/integration/targets/meta_tasks/test_end_host_fqcn.yml
new file mode 100644
index 00000000..bdb38b53
--- /dev/null
+++ b/test/integration/targets/meta_tasks/test_end_host_fqcn.yml
@@ -0,0 +1,14 @@
+- name: "Testing end_host with strategy={{ test_strategy | default('linear') }}"
+ hosts:
+ - testhost
+ - testhost2
+ gather_facts: no
+ strategy: "{{ test_strategy | default('linear') }}"
+ tasks:
+ - debug:
+
+ - ansible.builtin.meta: end_host
+ when: "host_var_role_name == 'role2'" # end play for testhost2, see inventory
+
+ - debug:
+ msg: "play not ended for {{ inventory_hostname }}"
diff --git a/test/integration/targets/meta_tasks/test_end_play.yml b/test/integration/targets/meta_tasks/test_end_play.yml
new file mode 100644
index 00000000..29489dc4
--- /dev/null
+++ b/test/integration/targets/meta_tasks/test_end_play.yml
@@ -0,0 +1,12 @@
+- name: Testing end_play with strategy {{ test_strategy | default('linear') }}
+ hosts: testhost:testhost2
+ gather_facts: no
+ strategy: "{{ test_strategy | default('linear') }}"
+ tasks:
+ - debug:
+ msg: "Testing end_play on host {{ inventory_hostname }}"
+
+ - meta: end_play
+
+ - fail:
+ msg: 'Failed to end using end_play'
diff --git a/test/integration/targets/meta_tasks/test_end_play_fqcn.yml b/test/integration/targets/meta_tasks/test_end_play_fqcn.yml
new file mode 100644
index 00000000..2ae67fbe
--- /dev/null
+++ b/test/integration/targets/meta_tasks/test_end_play_fqcn.yml
@@ -0,0 +1,12 @@
+- name: Testing end_play with strategy {{ test_strategy | default('linear') }}
+ hosts: testhost:testhost2
+ gather_facts: no
+ strategy: "{{ test_strategy | default('linear') }}"
+ tasks:
+ - debug:
+ msg: "Testing end_play on host {{ inventory_hostname }}"
+
+ - ansible.builtin.meta: end_play
+
+ - fail:
+ msg: 'Failed to end using end_play'
diff --git a/test/integration/targets/missing_required_lib/aliases b/test/integration/targets/missing_required_lib/aliases
new file mode 100644
index 00000000..70a7b7a9
--- /dev/null
+++ b/test/integration/targets/missing_required_lib/aliases
@@ -0,0 +1 @@
+shippable/posix/group5
diff --git a/test/integration/targets/missing_required_lib/library/missing_required_lib.py b/test/integration/targets/missing_required_lib/library/missing_required_lib.py
new file mode 100644
index 00000000..480ea001
--- /dev/null
+++ b/test/integration/targets/missing_required_lib/library/missing_required_lib.py
@@ -0,0 +1,37 @@
+#!/usr/bin/python
+# Copyright: (c) 2020, Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+try:
+ import ansible_missing_lib
+ HAS_LIB = True
+except ImportError as e:
+ HAS_LIB = False
+
+
+def main():
+ module = AnsibleModule({
+ 'url': {'type': 'bool'},
+ 'reason': {'type': 'bool'},
+ })
+ kwargs = {}
+ if module.params['url']:
+ kwargs['url'] = 'https://github.com/ansible/ansible'
+ if module.params['reason']:
+ kwargs['reason'] = 'for fun'
+ if not HAS_LIB:
+ module.fail_json(
+ msg=missing_required_lib(
+ 'ansible_missing_lib',
+ **kwargs
+ ),
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/missing_required_lib/runme.sh b/test/integration/targets/missing_required_lib/runme.sh
new file mode 100755
index 00000000..2e1ea8d8
--- /dev/null
+++ b/test/integration/targets/missing_required_lib/runme.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+set -eux
+export ANSIBLE_ROLES_PATH=../
+ansible-playbook -i ../../inventory runme.yml -e "output_dir=${OUTPUT_DIR}" -v "$@"
diff --git a/test/integration/targets/missing_required_lib/runme.yml b/test/integration/targets/missing_required_lib/runme.yml
new file mode 100644
index 00000000..e1df7959
--- /dev/null
+++ b/test/integration/targets/missing_required_lib/runme.yml
@@ -0,0 +1,57 @@
+- hosts: localhost
+ gather_facts: false
+ tasks:
+ - command: ansible localhost -m import_role -a role=missing_required_lib -e url=true -e reason=true
+ register: missing_required_lib_all
+ failed_when: missing_required_lib_all.rc == 0
+
+ - command: ansible localhost -m import_role -a role=missing_required_lib
+ register: missing_required_lib_none
+ failed_when: missing_required_lib_none.rc == 0
+
+ - command: ansible localhost -m import_role -a role=missing_required_lib -e url=true
+ register: missing_required_lib_url
+ failed_when: missing_required_lib_url.rc == 0
+
+ - command: ansible localhost -m import_role -a role=missing_required_lib -e reason=true
+ register: missing_required_lib_reason
+ failed_when: missing_required_lib_reason.rc == 0
+
+ - assert:
+ that:
+ - missing_required_lib_all.stdout is search(expected_all)
+ - missing_required_lib_none.stdout is search(expected_none)
+ - missing_required_lib_url.stdout is search(expected_url)
+ - missing_required_lib_reason.stdout is search(expected_reason)
+ vars:
+ expected_all: >-
+ Failed to import the required Python library \(ansible_missing_lib\) on
+ \S+'s Python \S+\.
+ This is required for fun\. See https://github.com/ansible/ansible for
+ more info. Please read the module documentation and install it in the
+ appropriate location\. If the required library is installed, but Ansible
+ is using the wrong Python interpreter, please consult the documentation
+ on ansible_python_interpreter
+ expected_none: >-
+ Failed to import the required Python library \(ansible_missing_lib\) on
+ \S+'s Python \S+\.
+ Please read the module documentation and install it in the
+ appropriate location\. If the required library is installed, but Ansible
+ is using the wrong Python interpreter, please consult the documentation
+ on ansible_python_interpreter
+ expected_url: >-
+ Failed to import the required Python library \(ansible_missing_lib\) on
+ \S+'s Python \S+\.
+ See https://github.com/ansible/ansible for
+ more info\. Please read the module documentation and install it in the
+ appropriate location\. If the required library is installed, but Ansible
+ is using the wrong Python interpreter, please consult the documentation
+ on ansible_python_interpreter
+ expected_reason: >-
+ Failed to import the required Python library \(ansible_missing_lib\) on
+ \S+'s Python \S+\.
+ This is required for fun\.
+ Please read the module documentation and install it in the
+ appropriate location\. If the required library is installed, but Ansible
+ is using the wrong Python interpreter, please consult the documentation
+ on ansible_python_interpreter
diff --git a/test/integration/targets/missing_required_lib/tasks/main.yml b/test/integration/targets/missing_required_lib/tasks/main.yml
new file mode 100644
index 00000000..a50f5ac4
--- /dev/null
+++ b/test/integration/targets/missing_required_lib/tasks/main.yml
@@ -0,0 +1,3 @@
+- missing_required_lib:
+ url: '{{ url|default(omit) }}'
+ reason: '{{ reason|default(omit) }}'
diff --git a/test/integration/targets/module_defaults/aliases b/test/integration/targets/module_defaults/aliases
new file mode 100644
index 00000000..a6dafcf8
--- /dev/null
+++ b/test/integration/targets/module_defaults/aliases
@@ -0,0 +1 @@
+shippable/posix/group1
diff --git a/test/integration/targets/module_defaults/collections/ansible_collections/testns/othercoll/plugins/action/other_echoaction.py b/test/integration/targets/module_defaults/collections/ansible_collections/testns/othercoll/plugins/action/other_echoaction.py
new file mode 100644
index 00000000..f7777b8a
--- /dev/null
+++ b/test/integration/targets/module_defaults/collections/ansible_collections/testns/othercoll/plugins/action/other_echoaction.py
@@ -0,0 +1,8 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.testns.testcoll.plugins.action.echoaction import ActionModule as BaseAM
+
+
+class ActionModule(BaseAM):
+ pass
diff --git a/test/integration/targets/module_defaults/collections/ansible_collections/testns/othercoll/plugins/modules/other_echo1.py b/test/integration/targets/module_defaults/collections/ansible_collections/testns/othercoll/plugins/modules/other_echo1.py
new file mode 100644
index 00000000..771395f2
--- /dev/null
+++ b/test/integration/targets/module_defaults/collections/ansible_collections/testns/othercoll/plugins/modules/other_echo1.py
@@ -0,0 +1,13 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.testns.testcoll.plugins.module_utils.echo_impl import do_echo
+
+
+def main():
+ do_echo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/meta/runtime.yml b/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/meta/runtime.yml
new file mode 100644
index 00000000..62695fbc
--- /dev/null
+++ b/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/meta/runtime.yml
@@ -0,0 +1,9 @@
+action_groups:
+ testgroup:
+ - testns.testcoll.echo1
+ - testns.testcoll.echo2
+# note we can define defaults for an action
+ - testns.testcoll.echoaction
+# note we can define defaults in this group for actions/modules in another collection
+ - testns.othercoll.other_echoaction
+ - testns.othercoll.other_echo1
diff --git a/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/action/echoaction.py b/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/action/echoaction.py
new file mode 100644
index 00000000..2fa097b2
--- /dev/null
+++ b/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/action/echoaction.py
@@ -0,0 +1,19 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+ TRANSFERS_FILES = False
+ _VALID_ARGS = frozenset()
+
+ def run(self, tmp=None, task_vars=None):
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(None, task_vars)
+
+ result = dict(changed=False, args_in=self._task.args)
+
+ return result
diff --git a/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/module_utils/echo_impl.py b/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/module_utils/echo_impl.py
new file mode 100644
index 00000000..f5c5d737
--- /dev/null
+++ b/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/module_utils/echo_impl.py
@@ -0,0 +1,15 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+from ansible.module_utils import basic
+from ansible.module_utils.basic import _load_params, AnsibleModule
+
+
+def do_echo():
+ p = _load_params()
+ d = json.loads(basic._ANSIBLE_ARGS)
+ d['ANSIBLE_MODULE_ARGS'] = {}
+ basic._ANSIBLE_ARGS = json.dumps(d).encode('utf-8')
+ module = AnsibleModule(argument_spec={})
+ module.exit_json(args_in=p)
diff --git a/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/modules/echo1.py b/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/modules/echo1.py
new file mode 100644
index 00000000..771395f2
--- /dev/null
+++ b/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/modules/echo1.py
@@ -0,0 +1,13 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.testns.testcoll.plugins.module_utils.echo_impl import do_echo
+
+
+def main():
+ do_echo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/modules/echo2.py b/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/modules/echo2.py
new file mode 100644
index 00000000..771395f2
--- /dev/null
+++ b/test/integration/targets/module_defaults/collections/ansible_collections/testns/testcoll/plugins/modules/echo2.py
@@ -0,0 +1,13 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.testns.testcoll.plugins.module_utils.echo_impl import do_echo
+
+
+def main():
+ do_echo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/module_defaults/library/test_module_defaults.py b/test/integration/targets/module_defaults/library/test_module_defaults.py
new file mode 100644
index 00000000..ede8c995
--- /dev/null
+++ b/test/integration/targets/module_defaults/library/test_module_defaults.py
@@ -0,0 +1,30 @@
+#!/usr/bin/python
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ arg1=dict(type='str', default='default1'),
+ arg2=dict(type='str', default='default2'),
+ arg3=dict(type='str', default='default3'),
+ ),
+ supports_check_mode=True
+ )
+
+ result = dict(
+ test_module_defaults=dict(
+ arg1=module.params['arg1'],
+ arg2=module.params['arg2'],
+ arg3=module.params['arg3'],
+ ),
+ )
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/module_defaults/runme.sh b/test/integration/targets/module_defaults/runme.sh
new file mode 100755
index 00000000..c19e607b
--- /dev/null
+++ b/test/integration/targets/module_defaults/runme.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ansible-playbook test_defaults.yml "$@"
diff --git a/test/integration/targets/module_defaults/tasks/main.yml b/test/integration/targets/module_defaults/tasks/main.yml
new file mode 100644
index 00000000..3ed960d3
--- /dev/null
+++ b/test/integration/targets/module_defaults/tasks/main.yml
@@ -0,0 +1,89 @@
+- name: main block
+ vars:
+ test_file: /tmp/ansible-test.module_defaults.foo
+ module_defaults:
+ debug:
+ msg: test default
+ file:
+ path: '{{ test_file }}'
+ block:
+ - debug:
+ register: foo
+
+ - name: test that 'debug' task used default 'msg' param
+ assert:
+ that: foo.msg == "test default"
+
+ - name: remove test file
+ file:
+ state: absent
+
+ - name: touch test file
+ file:
+ state: touch
+
+ - name: stat test file
+ stat:
+ path: '{{ test_file }}'
+ register: foo
+
+ - name: check that test file exists
+ assert:
+ that: foo.stat.exists
+
+ - name: remove test file
+ file:
+ state: absent
+
+ - name: test that module defaults from parent are inherited and merged
+ module_defaults:
+ # Meaningless values to make sure that 'module_defaults' gets
+ # evaluated for this block
+ foo:
+ bar: baz
+ block:
+ - debug:
+ register: foo
+
+ - assert:
+ that: foo.msg == "test default"
+
+ - name: test that we can override module defaults inherited from parent
+ module_defaults:
+ debug:
+ msg: "different test message"
+ block:
+ - debug:
+ register: foo
+
+ - assert:
+ that: foo.msg == "different test message"
+
+ - name: test that module defaults inherited from parent can be removed
+ module_defaults:
+ debug: {}
+ block:
+ - debug:
+ register: foo
+
+ - assert:
+ that:
+ foo.msg == "Hello world!"
+
+ - name: test that module defaults can be overridden by module params
+ block:
+ - debug:
+ msg: another test message
+ register: foo
+
+ - assert:
+ that:
+ foo.msg == "another test message"
+
+ - debug:
+ msg: '{{ omit }}'
+ register: foo
+
+ - assert:
+ that:
+ foo.msg == "Hello world!"
diff --git a/test/integration/targets/module_defaults/test_defaults.yml b/test/integration/targets/module_defaults/test_defaults.yml
new file mode 100644
index 00000000..15b66362
--- /dev/null
+++ b/test/integration/targets/module_defaults/test_defaults.yml
@@ -0,0 +1,60 @@
+- hosts: localhost
+ gather_facts: no
+ collections:
+ - testns.testcoll
+ - testns.othercoll
+ module_defaults:
+ testns.testcoll.echoaction:
+ explicit_module_default: from playbook
+ testns.testcoll.echo1:
+ explicit_module_default: from playbook
+ group/testgroup:
+ group_module_default: from playbook
+ tasks:
+ - testns.testcoll.echoaction:
+ task_arg: from task
+ register: echoaction_fq
+ - echoaction:
+ task_arg: from task
+ register: echoaction_unq
+ - testns.testcoll.echo1:
+ task_arg: from task
+ register: echo1_fq
+ - echo1:
+ task_arg: from task
+ register: echo1_unq
+ - testns.testcoll.echo2:
+ task_arg: from task
+ register: echo2_fq
+ - echo2:
+ task_arg: from task
+ register: echo2_unq
+ - testns.othercoll.other_echoaction:
+ task_arg: from task
+ register: other_echoaction_fq
+ - other_echoaction:
+ task_arg: from task
+ register: other_echoaction_unq
+ - testns.othercoll.other_echo1:
+ task_arg: from task
+ register: other_echo1_fq
+ - other_echo1:
+ task_arg: from task
+ register: other_echo1_unq
+
+ - debug: var=echo1_fq
+
+ - assert:
+ that:
+ - "echoaction_fq.args_in == {'task_arg': 'from task', 'explicit_module_default': 'from playbook', 'group_module_default': 'from playbook' }"
+ - "echoaction_unq.args_in == {'task_arg': 'from task', 'explicit_module_default': 'from playbook', 'group_module_default': 'from playbook' }"
+ - "echo1_fq.args_in == {'task_arg': 'from task', 'explicit_module_default': 'from playbook', 'group_module_default': 'from playbook' }"
+ - "echo1_unq.args_in == {'task_arg': 'from task', 'explicit_module_default': 'from playbook', 'group_module_default': 'from playbook' }"
+ - "echo2_fq.args_in == {'task_arg': 'from task', 'group_module_default': 'from playbook' }"
+ - "echo2_unq.args_in == {'task_arg': 'from task', 'group_module_default': 'from playbook' }"
+ - "other_echoaction_fq.args_in == {'task_arg': 'from task', 'group_module_default': 'from playbook' }"
+ - "other_echoaction_unq.args_in == {'task_arg': 'from task', 'group_module_default': 'from playbook' }"
+ - "other_echo1_fq.args_in == {'task_arg': 'from task', 'group_module_default': 'from playbook' }"
+ - "other_echo1_unq.args_in == {'task_arg': 'from task', 'group_module_default': 'from playbook' }"
+
+ - include_tasks: tasks/main.yml
diff --git a/test/integration/targets/module_no_log/aliases b/test/integration/targets/module_no_log/aliases
new file mode 100644
index 00000000..cbbb8804
--- /dev/null
+++ b/test/integration/targets/module_no_log/aliases
@@ -0,0 +1,5 @@
+shippable/posix/group1
+skip/aix # not configured to log user.info to /var/log/syslog
+skip/freebsd # not configured to log user.info to /var/log/syslog
+skip/osx # not configured to log user.info to /var/log/syslog
+skip/macos # not configured to log user.info to /var/log/syslog
diff --git a/test/integration/targets/module_no_log/library/module_that_logs.py b/test/integration/targets/module_no_log/library/module_that_logs.py
new file mode 100644
index 00000000..44b36eeb
--- /dev/null
+++ b/test/integration/targets/module_no_log/library/module_that_logs.py
@@ -0,0 +1,18 @@
+#!/usr/bin/python
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ number=dict(type='int'),
+ ))
+
+ module.log('My number is: (%d)' % module.params['number'])
+ module.exit_json()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/module_no_log/tasks/main.yml b/test/integration/targets/module_no_log/tasks/main.yml
new file mode 100644
index 00000000..cf9e5802
--- /dev/null
+++ b/test/integration/targets/module_no_log/tasks/main.yml
@@ -0,0 +1,61 @@
+- name: Detect syslog
+ stat:
+ path: /var/log/syslog
+ register: syslog
+
+- name: Detect journalctl
+ shell: command -V journalctl
+ ignore_errors: yes
+ changed_when: no
+ register: journalctl
+
+- block:
+ - name: Skip tests if logs were not found.
+ debug:
+ msg: Did not find /var/log/syslog or journalctl. Tests will be skipped.
+ - meta: end_play
+ when: journalctl is failed and not syslog.stat.exists
+
+- name: Generate random numbers for unique log entries
+ set_fact:
+ good_number: "{{ 999999999999 | random }}"
+ bad_number: "{{ 999999999999 | random }}"
+
+- name: Generate expected log entry messages
+ set_fact:
+ good_message: 'My number is: ({{ good_number }})'
+ bad_message: 'My number is: ({{ bad_number }})'
+
+- name: Generate log message search patterns
+ set_fact:
+ # these search patterns are designed to avoid matching themselves
+ good_search: '{{ good_message.replace(":", "[:]") }}'
+ bad_search: '{{ bad_message.replace(":", "[:]") }}'
+
+- name: Generate grep command
+ set_fact:
+ grep_command: "grep -e '{{ good_search }}' -e '{{ bad_search }}'"
+
+- name: Run a module that logs without no_log
+ module_that_logs:
+ number: "{{ good_number }}"
+
+- name: Run a module that logs with no_log
+ module_that_logs:
+ number: "{{ bad_number }}"
+ no_log: yes
+
+- name: Search for expected log messages
+ # if this fails the tests are probably running on a system which stores logs elsewhere
+ shell: "({{ grep_command }} /var/log/syslog) || (journalctl | {{ grep_command }})"
+ changed_when: no
+ register: grep
+
+- name: Verify the correct log messages were found
+ assert:
+ that:
+ # if the good message is not found then the cause is likely one of:
+ # 1) the remote system does not write user.info messages to the logs
+ # 2) the AnsibleModule.log method is not working
+ - good_message in grep.stdout
+ - bad_message not in grep.stdout
diff --git a/test/integration/targets/module_precedence/aliases b/test/integration/targets/module_precedence/aliases
new file mode 100644
index 00000000..a6dafcf8
--- /dev/null
+++ b/test/integration/targets/module_precedence/aliases
@@ -0,0 +1 @@
+shippable/posix/group1
diff --git a/test/integration/targets/module_precedence/lib_no_extension/ping b/test/integration/targets/module_precedence/lib_no_extension/ping
new file mode 100644
index 00000000..e30706e8
--- /dev/null
+++ b/test/integration/targets/module_precedence/lib_no_extension/ping
@@ -0,0 +1,69 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+
+DOCUMENTATION = '''
+---
+module: ping
+version_added: historical
+short_description: Try to connect to host, verify a usable python and return C(pong) on success.
+description:
+ - A trivial test module, this module always returns C(pong) on successful
+ contact. It does not make sense in playbooks, but it is useful from
+ C(/usr/bin/ansible) to verify the ability to login and that a usable python is configured.
+ - This is NOT ICMP ping, this is just a trivial test module.
+options: {}
+author:
+ - "Ansible Core Team"
+ - "Michael DeHaan"
+'''
+
+EXAMPLES = '''
+# Test we can logon to 'webservers' and execute python with json lib.
+ansible webservers -m ping
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ data=dict(required=False, default=None),
+ ),
+ supports_check_mode=True
+ )
+ result = dict(ping='pong')
+ if module.params['data']:
+ if module.params['data'] == 'crash':
+ raise Exception("boom")
+ result['ping'] = module.params['data']
+ result['location'] = 'library'
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/module_precedence/lib_with_extension/a.ini b/test/integration/targets/module_precedence/lib_with_extension/a.ini
new file mode 100644
index 00000000..80278c9e
--- /dev/null
+++ b/test/integration/targets/module_precedence/lib_with_extension/a.ini
@@ -0,0 +1,13 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+
+def main():
+ print(json.dumps(dict(changed=False, location='a.ini')))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/module_precedence/lib_with_extension/a.py b/test/integration/targets/module_precedence/lib_with_extension/a.py
new file mode 100644
index 00000000..8eda1419
--- /dev/null
+++ b/test/integration/targets/module_precedence/lib_with_extension/a.py
@@ -0,0 +1,13 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+
+def main():
+ print(json.dumps(dict(changed=False, location='a.py')))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/module_precedence/lib_with_extension/ping.ini b/test/integration/targets/module_precedence/lib_with_extension/ping.ini
new file mode 100644
index 00000000..6f4b6a1a
--- /dev/null
+++ b/test/integration/targets/module_precedence/lib_with_extension/ping.ini
@@ -0,0 +1,13 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+
+def main():
+ print(json.dumps(dict(changed=False, location='ping.ini')))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/module_precedence/lib_with_extension/ping.py b/test/integration/targets/module_precedence/lib_with_extension/ping.py
new file mode 100644
index 00000000..e30706e8
--- /dev/null
+++ b/test/integration/targets/module_precedence/lib_with_extension/ping.py
@@ -0,0 +1,69 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+
+DOCUMENTATION = '''
+---
+module: ping
+version_added: historical
+short_description: Try to connect to host, verify a usable python and return C(pong) on success.
+description:
+ - A trivial test module, this module always returns C(pong) on successful
+ contact. It does not make sense in playbooks, but it is useful from
+ C(/usr/bin/ansible) to verify the ability to login and that a usable python is configured.
+ - This is NOT ICMP ping, this is just a trivial test module.
+options: {}
+author:
+ - "Ansible Core Team"
+ - "Michael DeHaan"
+'''
+
+EXAMPLES = '''
+# Test we can logon to 'webservers' and execute python with json lib.
+ansible webservers -m ping
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ data=dict(required=False, default=None),
+ ),
+ supports_check_mode=True
+ )
+ result = dict(ping='pong')
+ if module.params['data']:
+ if module.params['data'] == 'crash':
+ raise Exception("boom")
+ result['ping'] = module.params['data']
+ result['location'] = 'library'
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/module_precedence/modules_test.yml b/test/integration/targets/module_precedence/modules_test.yml
new file mode 100644
index 00000000..cf3e8886
--- /dev/null
+++ b/test/integration/targets/module_precedence/modules_test.yml
@@ -0,0 +1,10 @@
+- hosts: testhost
+ gather_facts: no
+ tasks:
+ - name: Use standard ping module
+ ping:
+ register: result
+
+ - assert:
+ that:
+ - '"location" not in result'
diff --git a/test/integration/targets/module_precedence/modules_test_envvar.yml b/test/integration/targets/module_precedence/modules_test_envvar.yml
new file mode 100644
index 00000000..f52e2f91
--- /dev/null
+++ b/test/integration/targets/module_precedence/modules_test_envvar.yml
@@ -0,0 +1,11 @@
+- hosts: testhost
+ gather_facts: no
+ tasks:
+ - name: Use ping from library path
+ ping:
+ register: result
+
+ - assert:
+ that:
+ - '"location" in result'
+ - 'result["location"] == "library"'
diff --git a/test/integration/targets/module_precedence/modules_test_envvar_ext.yml b/test/integration/targets/module_precedence/modules_test_envvar_ext.yml
new file mode 100644
index 00000000..48f27c4f
--- /dev/null
+++ b/test/integration/targets/module_precedence/modules_test_envvar_ext.yml
@@ -0,0 +1,16 @@
+- hosts: testhost
+ gather_facts: no
+ tasks:
+ - name: Use ping from library path
+ ping:
+ register: result
+
+ - name: Use a from library path
+ a:
+ register: a_res
+
+ - assert:
+ that:
+ - '"location" in result'
+ - 'result["location"] == "library"'
+ - 'a_res["location"] == "a.py"'
diff --git a/test/integration/targets/module_precedence/modules_test_multiple_roles.yml b/test/integration/targets/module_precedence/modules_test_multiple_roles.yml
new file mode 100644
index 00000000..f4bd2649
--- /dev/null
+++ b/test/integration/targets/module_precedence/modules_test_multiple_roles.yml
@@ -0,0 +1,17 @@
+- hosts: testhost
+ gather_facts: no
+ vars:
+ expected_location: "role: foo"
+ roles:
+ - foo
+ - bar
+
+ tasks:
+ - name: Use ping from role
+ ping:
+ register: result
+
+ - assert:
+ that:
+ - '"location" in result'
+ - 'result["location"] == "{{ expected_location}}"'
diff --git a/test/integration/targets/module_precedence/modules_test_multiple_roles_reverse_order.yml b/test/integration/targets/module_precedence/modules_test_multiple_roles_reverse_order.yml
new file mode 100644
index 00000000..5403ae23
--- /dev/null
+++ b/test/integration/targets/module_precedence/modules_test_multiple_roles_reverse_order.yml
@@ -0,0 +1,16 @@
+- hosts: testhost
+ gather_facts: no
+ vars:
+ expected_location: "role: bar"
+ roles:
+ - bar
+ - foo
+ tasks:
+ - name: Use ping from role
+ ping:
+ register: result
+
+ - assert:
+ that:
+ - '"location" in result'
+ - 'result["location"] == "{{ expected_location}}"'
diff --git a/test/integration/targets/module_precedence/modules_test_role.yml b/test/integration/targets/module_precedence/modules_test_role.yml
new file mode 100644
index 00000000..ccbe31d8
--- /dev/null
+++ b/test/integration/targets/module_precedence/modules_test_role.yml
@@ -0,0 +1,13 @@
+- hosts: testhost
+ gather_facts: no
+ roles:
+ - foo
+ tasks:
+ - name: Use ping from role
+ ping:
+ register: result
+
+ - assert:
+ that:
+ - '"location" in result'
+ - 'result["location"] == "role: foo"'
diff --git a/test/integration/targets/module_precedence/modules_test_role_ext.yml b/test/integration/targets/module_precedence/modules_test_role_ext.yml
new file mode 100644
index 00000000..f8816f93
--- /dev/null
+++ b/test/integration/targets/module_precedence/modules_test_role_ext.yml
@@ -0,0 +1,18 @@
+- hosts: testhost
+ gather_facts: no
+ roles:
+ - foo
+ tasks:
+ - name: Use ping from role
+ ping:
+ register: result
+
+ - name: Use from role
+ a:
+ register: a_res
+
+ - assert:
+ that:
+ - '"location" in result'
+ - 'result["location"] == "role: foo"'
+ - 'a_res["location"] == "role: foo, a.py"'
diff --git a/test/integration/targets/module_precedence/multiple_roles/bar/library/ping.py b/test/integration/targets/module_precedence/multiple_roles/bar/library/ping.py
new file mode 100644
index 00000000..e7776001
--- /dev/null
+++ b/test/integration/targets/module_precedence/multiple_roles/bar/library/ping.py
@@ -0,0 +1,69 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+
+DOCUMENTATION = '''
+---
+module: ping
+version_added: historical
+short_description: Try to connect to host, verify a usable python and return C(pong) on success.
+description:
+ - A trivial test module, this module always returns C(pong) on successful
+ contact. It does not make sense in playbooks, but it is useful from
+ C(/usr/bin/ansible) to verify the ability to login and that a usable python is configured.
+ - This is NOT ICMP ping, this is just a trivial test module.
+options: {}
+author:
+ - "Ansible Core Team"
+ - "Michael DeHaan"
+'''
+
+EXAMPLES = '''
+# Test we can logon to 'webservers' and execute python with json lib.
+ansible webservers -m ping
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ data=dict(required=False, default=None),
+ ),
+ supports_check_mode=True
+ )
+ result = dict(ping='pong')
+ if module.params['data']:
+ if module.params['data'] == 'crash':
+ raise Exception("boom")
+ result['ping'] = module.params['data']
+ result['location'] = 'role: bar'
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/module_precedence/multiple_roles/bar/tasks/main.yml b/test/integration/targets/module_precedence/multiple_roles/bar/tasks/main.yml
new file mode 100644
index 00000000..52c34020
--- /dev/null
+++ b/test/integration/targets/module_precedence/multiple_roles/bar/tasks/main.yml
@@ -0,0 +1,10 @@
+---
+- name: Use ping from inside foo role
+ ping:
+ register: result
+
+- name: Make sure that we used the ping module from the foo role
+ assert:
+ that:
+ - '"location" in result'
+ - 'result["location"] == "{{ expected_location }}"'
diff --git a/test/integration/targets/module_precedence/multiple_roles/foo/library/ping.py b/test/integration/targets/module_precedence/multiple_roles/foo/library/ping.py
new file mode 100644
index 00000000..a6d153ba
--- /dev/null
+++ b/test/integration/targets/module_precedence/multiple_roles/foo/library/ping.py
@@ -0,0 +1,69 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+
+DOCUMENTATION = '''
+---
+module: ping
+version_added: historical
+short_description: Try to connect to host, verify a usable python and return C(pong) on success.
+description:
+ - A trivial test module, this module always returns C(pong) on successful
+ contact. It does not make sense in playbooks, but it is useful from
+ C(/usr/bin/ansible) to verify the ability to login and that a usable python is configured.
+ - This is NOT ICMP ping, this is just a trivial test module.
+options: {}
+author:
+ - "Ansible Core Team"
+ - "Michael DeHaan"
+'''
+
+EXAMPLES = '''
+# Test we can logon to 'webservers' and execute python with json lib.
+ansible webservers -m ping
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ data=dict(required=False, default=None),
+ ),
+ supports_check_mode=True
+ )
+ result = dict(ping='pong')
+ if module.params['data']:
+ if module.params['data'] == 'crash':
+ raise Exception("boom")
+ result['ping'] = module.params['data']
+ result['location'] = 'role: foo'
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/module_precedence/multiple_roles/foo/tasks/main.yml b/test/integration/targets/module_precedence/multiple_roles/foo/tasks/main.yml
new file mode 100644
index 00000000..52c34020
--- /dev/null
+++ b/test/integration/targets/module_precedence/multiple_roles/foo/tasks/main.yml
@@ -0,0 +1,10 @@
+---
+- name: Use ping from inside foo role
+ ping:
+ register: result
+
+- name: Make sure that we used the ping module from the foo role
+ assert:
+ that:
+ - '"location" in result'
+ - 'result["location"] == "{{ expected_location }}"'
diff --git a/test/integration/targets/module_precedence/roles_no_extension/foo/library/ping b/test/integration/targets/module_precedence/roles_no_extension/foo/library/ping
new file mode 100644
index 00000000..a6d153ba
--- /dev/null
+++ b/test/integration/targets/module_precedence/roles_no_extension/foo/library/ping
@@ -0,0 +1,69 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+
+DOCUMENTATION = '''
+---
+module: ping
+version_added: historical
+short_description: Try to connect to host, verify a usable python and return C(pong) on success.
+description:
+ - A trivial test module, this module always returns C(pong) on successful
+ contact. It does not make sense in playbooks, but it is useful from
+ C(/usr/bin/ansible) to verify the ability to login and that a usable python is configured.
+ - This is NOT ICMP ping, this is just a trivial test module.
+options: {}
+author:
+ - "Ansible Core Team"
+ - "Michael DeHaan"
+'''
+
+EXAMPLES = '''
+# Test we can logon to 'webservers' and execute python with json lib.
+ansible webservers -m ping
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ data=dict(required=False, default=None),
+ ),
+ supports_check_mode=True
+ )
+ result = dict(ping='pong')
+ if module.params['data']:
+ if module.params['data'] == 'crash':
+ raise Exception("boom")
+ result['ping'] = module.params['data']
+ result['location'] = 'role: foo'
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/module_precedence/roles_no_extension/foo/tasks/main.yml b/test/integration/targets/module_precedence/roles_no_extension/foo/tasks/main.yml
new file mode 100644
index 00000000..985fc341
--- /dev/null
+++ b/test/integration/targets/module_precedence/roles_no_extension/foo/tasks/main.yml
@@ -0,0 +1,10 @@
+---
+- name: Use ping from inside foo role
+ ping:
+ register: result
+
+- name: Make sure that we used the ping module from the foo role
+ assert:
+ that:
+ - '"location" in result'
+ - 'result["location"] == "role: foo"'
diff --git a/test/integration/targets/module_precedence/roles_with_extension/foo/library/a.ini b/test/integration/targets/module_precedence/roles_with_extension/foo/library/a.ini
new file mode 100644
index 00000000..8b170291
--- /dev/null
+++ b/test/integration/targets/module_precedence/roles_with_extension/foo/library/a.ini
@@ -0,0 +1,13 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+
+def main():
+ print(json.dumps(dict(changed=False, location='role: foo, a.ini')))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/module_precedence/roles_with_extension/foo/library/a.py b/test/integration/targets/module_precedence/roles_with_extension/foo/library/a.py
new file mode 100644
index 00000000..4bc5906d
--- /dev/null
+++ b/test/integration/targets/module_precedence/roles_with_extension/foo/library/a.py
@@ -0,0 +1,13 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+
+def main():
+ print(json.dumps(dict(changed=False, location='role: foo, a.py')))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/module_precedence/roles_with_extension/foo/library/ping.ini b/test/integration/targets/module_precedence/roles_with_extension/foo/library/ping.ini
new file mode 100644
index 00000000..f9c04f5c
--- /dev/null
+++ b/test/integration/targets/module_precedence/roles_with_extension/foo/library/ping.ini
@@ -0,0 +1,13 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+
+def main():
+ print(json.dumps(dict(changed=False, location='role: foo, ping.ini')))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/module_precedence/roles_with_extension/foo/library/ping.py b/test/integration/targets/module_precedence/roles_with_extension/foo/library/ping.py
new file mode 100644
index 00000000..a6d153ba
--- /dev/null
+++ b/test/integration/targets/module_precedence/roles_with_extension/foo/library/ping.py
@@ -0,0 +1,69 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+
+DOCUMENTATION = '''
+---
+module: ping
+version_added: historical
+short_description: Try to connect to host, verify a usable python and return C(pong) on success.
+description:
+ - A trivial test module, this module always returns C(pong) on successful
+ contact. It does not make sense in playbooks, but it is useful from
+ C(/usr/bin/ansible) to verify the ability to login and that a usable python is configured.
+ - This is NOT ICMP ping, this is just a trivial test module.
+options: {}
+author:
+ - "Ansible Core Team"
+ - "Michael DeHaan"
+'''
+
+EXAMPLES = '''
+# Test we can logon to 'webservers' and execute python with json lib.
+ansible webservers -m ping
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ data=dict(required=False, default=None),
+ ),
+ supports_check_mode=True
+ )
+ result = dict(ping='pong')
+ if module.params['data']:
+ if module.params['data'] == 'crash':
+ raise Exception("boom")
+ result['ping'] = module.params['data']
+ result['location'] = 'role: foo'
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/module_precedence/roles_with_extension/foo/tasks/main.yml b/test/integration/targets/module_precedence/roles_with_extension/foo/tasks/main.yml
new file mode 100644
index 00000000..985fc341
--- /dev/null
+++ b/test/integration/targets/module_precedence/roles_with_extension/foo/tasks/main.yml
@@ -0,0 +1,10 @@
+---
+- name: Use ping from inside foo role
+ ping:
+ register: result
+
+- name: Make sure that we used the ping module from the foo role
+ assert:
+ that:
+ - '"location" in result'
+ - 'result["location"] == "role: foo"'
diff --git a/test/integration/targets/module_precedence/runme.sh b/test/integration/targets/module_precedence/runme.sh
new file mode 100755
index 00000000..0f6a98fe
--- /dev/null
+++ b/test/integration/targets/module_precedence/runme.sh
@@ -0,0 +1,49 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# Standard ping module
+ansible-playbook modules_test.yml -i ../../inventory -v "$@"
+
+# Library path ping module
+ANSIBLE_LIBRARY=lib_with_extension ansible-playbook modules_test_envvar.yml -i ../../inventory -v "$@"
+ANSIBLE_LIBRARY=lib_no_extension ansible-playbook modules_test_envvar.yml -i ../../inventory -v "$@"
+
+# ping module from role
+ANSIBLE_ROLES_PATH=roles_with_extension ansible-playbook modules_test_role.yml -i ../../inventory -v "$@"
+ANSIBLE_ROLES_PATH=roles_no_extension ansible-playbook modules_test_role.yml -i ../../inventory -v "$@"
+
+# ping module from role when there's a library path module too
+ANSIBLE_LIBRARY=lib_no_extension ANSIBLE_ROLES_PATH=roles_with_extension ansible-playbook modules_test_role.yml -i ../../inventory -v "$@"
+ANSIBLE_LIBRARY=lib_with_extension ANSIBLE_ROLES_PATH=roles_with_extension ansible-playbook modules_test_role.yml -i ../../inventory -v "$@"
+ANSIBLE_LIBRARY=lib_no_extension ANSIBLE_ROLES_PATH=roles_no_extension ansible-playbook modules_test_role.yml -i ../../inventory -v "$@"
+ANSIBLE_LIBRARY=lib_with_extension ANSIBLE_ROLES_PATH=roles_no_extension ansible-playbook modules_test_role.yml -i ../../inventory -v "$@"
+
+# ping module in multiple roles: Note that this will use the first module found
+# which is the current way things work but may not be the best way
+ANSIBLE_LIBRARY=lib_no_extension ANSIBLE_ROLES_PATH=multiple_roles ansible-playbook modules_test_multiple_roles.yml -i ../../inventory -v "$@"
+ANSIBLE_LIBRARY=lib_with_extension ANSIBLE_ROLES_PATH=multiple_roles ansible-playbook modules_test_multiple_roles.yml -i ../../inventory -v "$@"
+ANSIBLE_LIBRARY=lib_no_extension ANSIBLE_ROLES_PATH=multiple_roles ansible-playbook modules_test_multiple_roles.yml -i ../../inventory -v "$@"
+ANSIBLE_LIBRARY=lib_with_extension ANSIBLE_ROLES_PATH=multiple_roles ansible-playbook modules_test_multiple_roles.yml -i ../../inventory -v "$@"
+
+# And prove that with multiple roles, it's the order the roles are listed in the play that matters
+ANSIBLE_LIBRARY=lib_with_extension ANSIBLE_ROLES_PATH=multiple_roles ansible-playbook modules_test_multiple_roles_reverse_order.yml -i ../../inventory -v "$@"
+
+# Tests for MODULE_IGNORE_EXTS.
+#
+# Very similar to two tests above, but adds a check to test extension
+# precedence. Separate from the above playbooks because we *only* care about
+# extensions here and 'a' will not exist when the above playbooks run with
+# non-extension library/role paths. There is also no way to guarantee that
+# these tests will be useful due to how the pluginloader seems to work. It uses
+# os.listdir which returns things in an arbitrary order (likely dependent on
+# filesystem). If it happens to return 'a.py' on the test node before it
+# returns 'a.ini', then this test is pointless anyway because there's no chance
+# that 'a.ini' would ever have run regardless of what MODULE_IGNORE_EXTS is set
+# to. The hope is that we test across enough systems that one would fail this
+# test if the MODULE_IGNORE_EXTS broke, but there is no guarantee. This would
+# perhaps be better as a mocked unit test because of this but would require
+# a fair bit of work to be feasible as none of that loader logic is tested at
+# all right now.
+ANSIBLE_LIBRARY=lib_with_extension ansible-playbook modules_test_envvar_ext.yml -i ../../inventory -v "$@"
+ANSIBLE_ROLES_PATH=roles_with_extension ansible-playbook modules_test_role_ext.yml -i ../../inventory -v "$@"
diff --git a/test/integration/targets/module_tracebacks/aliases b/test/integration/targets/module_tracebacks/aliases
new file mode 100644
index 00000000..804f0460
--- /dev/null
+++ b/test/integration/targets/module_tracebacks/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group4
+needs/ssh
+skip/aix
diff --git a/test/integration/targets/module_tracebacks/inventory b/test/integration/targets/module_tracebacks/inventory
new file mode 100644
index 00000000..91565267
--- /dev/null
+++ b/test/integration/targets/module_tracebacks/inventory
@@ -0,0 +1,5 @@
+testhost_local ansible_connection=local
+testhost_ssh ansible_connection=ssh ansible_host=localhost
+
+[all:vars]
+ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/test/integration/targets/module_tracebacks/runme.sh b/test/integration/targets/module_tracebacks/runme.sh
new file mode 100755
index 00000000..b8ac8068
--- /dev/null
+++ b/test/integration/targets/module_tracebacks/runme.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ansible-playbook traceback.yml -i inventory "$@"
diff --git a/test/integration/targets/module_tracebacks/traceback.yml b/test/integration/targets/module_tracebacks/traceback.yml
new file mode 100644
index 00000000..b1f0b516
--- /dev/null
+++ b/test/integration/targets/module_tracebacks/traceback.yml
@@ -0,0 +1,21 @@
+- hosts: all
+ gather_facts: no
+ tasks:
+ - name: intentionally fail module execution
+ ping:
+ data: crash
+ ignore_errors: yes
+ register: ping
+
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: verify exceptions were properly captured
+ assert:
+ that:
+ - hostvars.testhost_local.ping is failed
+ - "'boom' in hostvars.testhost_local.ping.exception"
+ - "'boom' in hostvars.testhost_local.ping.module_stderr"
+ - hostvars.testhost_ssh.ping is failed
+ - "'boom' in hostvars.testhost_ssh.ping.exception"
+ - "'boom' in hostvars.testhost_ssh.ping.module_stdout"
diff --git a/test/integration/targets/module_utils/aliases b/test/integration/targets/module_utils/aliases
new file mode 100644
index 00000000..2f5770ff
--- /dev/null
+++ b/test/integration/targets/module_utils/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group3
+needs/root
+needs/target/setup_nobody
diff --git a/test/integration/targets/module_utils/library/test.py b/test/integration/targets/module_utils/library/test.py
new file mode 100644
index 00000000..fbb7e6e2
--- /dev/null
+++ b/test/integration/targets/module_utils/library/test.py
@@ -0,0 +1,85 @@
+#!/usr/bin/python
+# Most of these names are only available via PluginLoader so pylint doesn't
+# know they exist
+# pylint: disable=no-name-in-module
+results = {}
+
+# Test import with no from
+import ansible.module_utils.foo0
+results['foo0'] = ansible.module_utils.foo0.data
+
+# Test depthful import with no from
+import ansible.module_utils.bar0.foo
+results['bar0'] = ansible.module_utils.bar0.foo.data
+
+# Test import of module_utils/foo1.py
+from ansible.module_utils import foo1
+results['foo1'] = foo1.data
+
+# Test import of an identifier inside of module_utils/foo2.py
+from ansible.module_utils.foo2 import data
+results['foo2'] = data
+
+# Test import of module_utils/bar1/__init__.py
+from ansible.module_utils import bar1
+results['bar1'] = bar1.data
+
+# Test import of an identifier inside of module_utils/bar2/__init__.py
+from ansible.module_utils.bar2 import data
+results['bar2'] = data
+
+# Test import of module_utils/baz1/one.py
+from ansible.module_utils.baz1 import one
+results['baz1'] = one.data
+
+# Test import of an identifier inside of module_utils/baz2/one.py
+from ansible.module_utils.baz2.one import data
+results['baz2'] = data
+
+# Test import of module_utils/spam1/ham/eggs/__init__.py
+from ansible.module_utils.spam1.ham import eggs
+results['spam1'] = eggs.data
+
+# Test import of an identifier inside module_utils/spam2/ham/eggs/__init__.py
+from ansible.module_utils.spam2.ham.eggs import data
+results['spam2'] = data
+
+# Test import of module_utils/spam3/ham/bacon.py
+from ansible.module_utils.spam3.ham import bacon
+results['spam3'] = bacon.data
+
+# Test import of an identifier inside of module_utils/spam4/ham/bacon.py
+from ansible.module_utils.spam4.ham.bacon import data
+results['spam4'] = data
+
+# Test import of module_utils.spam5.ham bacon and eggs (modules)
+from ansible.module_utils.spam5.ham import bacon, eggs
+results['spam5'] = (bacon.data, eggs.data)
+
+# Test import of module_utils.spam6.ham bacon and eggs (identifiers)
+from ansible.module_utils.spam6.ham import bacon, eggs
+results['spam6'] = (bacon, eggs)
+
+# Test import of module_utils.spam7.ham bacon and eggs (module and identifier)
+from ansible.module_utils.spam7.ham import bacon, eggs
+results['spam7'] = (bacon.data, eggs)
+
+# Test import of module_utils/spam8/ham/bacon.py and module_utils/spam8/ham/eggs.py separately
+from ansible.module_utils.spam8.ham import bacon
+from ansible.module_utils.spam8.ham import eggs
+results['spam8'] = (bacon.data, eggs)
+
+# Test that import of module_utils/qux1/quux.py using as works
+from ansible.module_utils.qux1 import quux as one
+results['qux1'] = one.data
+
+# Test that importing qux2/quux.py and qux2/quuz.py using as works
+from ansible.module_utils.qux2 import quux as one, quuz as two
+results['qux2'] = (one.data, two.data)
+
+# Test depth
+from ansible.module_utils.a.b.c.d.e.f.g.h import data
+
+results['abcdefgh'] = data
+from ansible.module_utils.basic import AnsibleModule
+AnsibleModule(argument_spec=dict()).exit_json(**results)
diff --git a/test/integration/targets/module_utils/library/test_alias_deprecation.py b/test/integration/targets/module_utils/library/test_alias_deprecation.py
new file mode 100644
index 00000000..96410fc4
--- /dev/null
+++ b/test/integration/targets/module_utils/library/test_alias_deprecation.py
@@ -0,0 +1,15 @@
+#!/usr/bin/python
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.facts import data
+
+results = {"data": data}
+
+arg_spec = dict(
+ foo=dict(type='str', aliases=['baz'], deprecated_aliases=[dict(name='baz', version='9.99')])
+)
+
+AnsibleModule(argument_spec=arg_spec).exit_json(**results)
diff --git a/test/integration/targets/module_utils/library/test_cwd_missing.py b/test/integration/targets/module_utils/library/test_cwd_missing.py
new file mode 100644
index 00000000..cd1f9c77
--- /dev/null
+++ b/test/integration/targets/module_utils/library/test_cwd_missing.py
@@ -0,0 +1,33 @@
+#!/usr/bin/python
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ # This module verifies that AnsibleModule works when cwd does not exist.
+ # This situation can occur as a race condition when the following conditions are met:
+ #
+ # 1) Execute a module which has high startup overhead prior to instantiating AnsibleModule (0.5s is enough in many cases).
+ # 2) Run the module async as the last task in a playbook using connection=local (a fire-and-forget task).
+ # 3) Remove the directory containing the playbook immediately after playbook execution ends (playbook in a temp dir).
+ #
+ # To ease testing of this race condition the deletion of cwd is handled in this module.
+ # This avoids race conditions in the test, including timing cwd deletion between AnsiballZ wrapper execution and AnsibleModule instantiation.
+ # The timing issue with AnsiballZ is due to cwd checking in the wrapper when code coverage is enabled.
+
+ temp = os.path.abspath('temp')
+
+ os.mkdir(temp)
+ os.chdir(temp)
+ os.rmdir(temp)
+
+ module = AnsibleModule(argument_spec=dict())
+ module.exit_json(before=temp, after=os.getcwd())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/module_utils/library/test_cwd_unreadable.py b/test/integration/targets/module_utils/library/test_cwd_unreadable.py
new file mode 100644
index 00000000..d65f31ac
--- /dev/null
+++ b/test/integration/targets/module_utils/library/test_cwd_unreadable.py
@@ -0,0 +1,28 @@
+#!/usr/bin/python
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ # This module verifies that AnsibleModule works when cwd exists but is unreadable.
+ # This situation can occur when running tasks as an unprivileged user.
+
+ try:
+ cwd = os.getcwd()
+ except OSError:
+ # Compensate for macOS being unable to access cwd as an unprivileged user.
+ # This test is a no-op in this case.
+ # Testing for os.getcwd() failures is handled by the test_cwd_missing module.
+ cwd = '/'
+ os.chdir(cwd)
+
+ module = AnsibleModule(argument_spec=dict())
+ module.exit_json(before=cwd, after=os.getcwd())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/module_utils/library/test_env_override.py b/test/integration/targets/module_utils/library/test_env_override.py
new file mode 100644
index 00000000..94e3051b
--- /dev/null
+++ b/test/integration/targets/module_utils/library/test_env_override.py
@@ -0,0 +1,11 @@
+#!/usr/bin/python
+# Most of these names are only available via PluginLoader so pylint doesn't
+# know they exist
+# pylint: disable=no-name-in-module
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.json_utils import data
+from ansible.module_utils.mork import data as mork_data
+
+results = {"json_utils": data, "mork": mork_data}
+
+AnsibleModule(argument_spec=dict()).exit_json(**results)
diff --git a/test/integration/targets/module_utils/library/test_failure.py b/test/integration/targets/module_utils/library/test_failure.py
new file mode 100644
index 00000000..e1a87c2e
--- /dev/null
+++ b/test/integration/targets/module_utils/library/test_failure.py
@@ -0,0 +1,14 @@
+#!/usr/bin/python
+
+results = {}
+# Test that we are rooted correctly
+# Following files:
+# module_utils/yak/zebra/foo.py
+try:
+ from ansible.module_utils.zebra import foo
+ results['zebra'] = foo.data
+except ImportError:
+ results['zebra'] = 'Failed in module as expected but incorrectly did not fail in AnsiballZ construction'
+
+from ansible.module_utils.basic import AnsibleModule
+AnsibleModule(argument_spec=dict()).exit_json(**results)
diff --git a/test/integration/targets/module_utils/library/test_override.py b/test/integration/targets/module_utils/library/test_override.py
new file mode 100644
index 00000000..9ff54bf9
--- /dev/null
+++ b/test/integration/targets/module_utils/library/test_override.py
@@ -0,0 +1,7 @@
+#!/usr/bin/python
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.facts import data
+
+results = {"data": data}
+
+AnsibleModule(argument_spec=dict()).exit_json(**results)
diff --git a/test/integration/targets/module_utils/library/test_recursive_diff.py b/test/integration/targets/module_utils/library/test_recursive_diff.py
new file mode 100644
index 00000000..0cf39d9c
--- /dev/null
+++ b/test/integration/targets/module_utils/library/test_recursive_diff.py
@@ -0,0 +1,29 @@
+#!/usr/bin/python
+# Copyright: (c) 2020, Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.dict_transformations import recursive_diff
+
+
+def main():
+ module = AnsibleModule(
+ {
+ 'a': {'type': 'dict'},
+ 'b': {'type': 'dict'},
+ }
+ )
+
+ module.exit_json(
+ the_diff=recursive_diff(
+ module.params['a'],
+ module.params['b'],
+ ),
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/module_utils/module_utils/__init__.py b/test/integration/targets/module_utils/module_utils/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/__init__.py
diff --git a/test/integration/targets/module_utils/module_utils/a/__init__.py b/test/integration/targets/module_utils/module_utils/a/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/a/__init__.py
diff --git a/test/integration/targets/module_utils/module_utils/a/b/__init__.py b/test/integration/targets/module_utils/module_utils/a/b/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/a/b/__init__.py
diff --git a/test/integration/targets/module_utils/module_utils/a/b/c/__init__.py b/test/integration/targets/module_utils/module_utils/a/b/c/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/a/b/c/__init__.py
diff --git a/test/integration/targets/module_utils/module_utils/a/b/c/d/__init__.py b/test/integration/targets/module_utils/module_utils/a/b/c/d/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/a/b/c/d/__init__.py
diff --git a/test/integration/targets/module_utils/module_utils/a/b/c/d/e/__init__.py b/test/integration/targets/module_utils/module_utils/a/b/c/d/e/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/a/b/c/d/e/__init__.py
diff --git a/test/integration/targets/module_utils/module_utils/a/b/c/d/e/f/__init__.py b/test/integration/targets/module_utils/module_utils/a/b/c/d/e/f/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/a/b/c/d/e/f/__init__.py
diff --git a/test/integration/targets/module_utils/module_utils/a/b/c/d/e/f/g/__init__.py b/test/integration/targets/module_utils/module_utils/a/b/c/d/e/f/g/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/a/b/c/d/e/f/g/__init__.py
diff --git a/test/integration/targets/module_utils/module_utils/a/b/c/d/e/f/g/h/__init__.py b/test/integration/targets/module_utils/module_utils/a/b/c/d/e/f/g/h/__init__.py
new file mode 100644
index 00000000..722f4b77
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/a/b/c/d/e/f/g/h/__init__.py
@@ -0,0 +1 @@
+data = 'abcdefgh'
diff --git a/test/integration/targets/module_utils/module_utils/bar0/__init__.py b/test/integration/targets/module_utils/module_utils/bar0/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/bar0/__init__.py
diff --git a/test/integration/targets/module_utils/module_utils/bar0/foo.py b/test/integration/targets/module_utils/module_utils/bar0/foo.py
new file mode 100644
index 00000000..1072dcc2
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/bar0/foo.py
@@ -0,0 +1 @@
+data = 'bar0'
diff --git a/test/integration/targets/module_utils/module_utils/bar1/__init__.py b/test/integration/targets/module_utils/module_utils/bar1/__init__.py
new file mode 100644
index 00000000..68e43509
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/bar1/__init__.py
@@ -0,0 +1 @@
+data = 'bar1'
diff --git a/test/integration/targets/module_utils/module_utils/bar2/__init__.py b/test/integration/targets/module_utils/module_utils/bar2/__init__.py
new file mode 100644
index 00000000..59e86afd
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/bar2/__init__.py
@@ -0,0 +1 @@
+data = 'bar2'
diff --git a/test/integration/targets/module_utils/module_utils/baz1/__init__.py b/test/integration/targets/module_utils/module_utils/baz1/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/baz1/__init__.py
diff --git a/test/integration/targets/module_utils/module_utils/baz1/one.py b/test/integration/targets/module_utils/module_utils/baz1/one.py
new file mode 100644
index 00000000..e5d7894a
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/baz1/one.py
@@ -0,0 +1 @@
+data = 'baz1'
diff --git a/test/integration/targets/module_utils/module_utils/baz2/__init__.py b/test/integration/targets/module_utils/module_utils/baz2/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/baz2/__init__.py
diff --git a/test/integration/targets/module_utils/module_utils/baz2/one.py b/test/integration/targets/module_utils/module_utils/baz2/one.py
new file mode 100644
index 00000000..1efe196c
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/baz2/one.py
@@ -0,0 +1 @@
+data = 'baz2'
diff --git a/test/integration/targets/module_utils/module_utils/facts.py b/test/integration/targets/module_utils/module_utils/facts.py
new file mode 100644
index 00000000..ba7cbb7b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/facts.py
@@ -0,0 +1 @@
+data = 'overridden facts.py'
diff --git a/test/integration/targets/module_utils/module_utils/foo.py b/test/integration/targets/module_utils/module_utils/foo.py
new file mode 100644
index 00000000..20698f1f
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/foo.py
@@ -0,0 +1,3 @@
+#!/usr/bin/env python
+
+foo = "FOO FROM foo.py"
diff --git a/test/integration/targets/module_utils/module_utils/foo0.py b/test/integration/targets/module_utils/module_utils/foo0.py
new file mode 100644
index 00000000..4b528b6d
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/foo0.py
@@ -0,0 +1 @@
+data = 'foo0'
diff --git a/test/integration/targets/module_utils/module_utils/foo1.py b/test/integration/targets/module_utils/module_utils/foo1.py
new file mode 100644
index 00000000..18e0cef1
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/foo1.py
@@ -0,0 +1 @@
+data = 'foo1'
diff --git a/test/integration/targets/module_utils/module_utils/foo2.py b/test/integration/targets/module_utils/module_utils/foo2.py
new file mode 100644
index 00000000..feb142df
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/foo2.py
@@ -0,0 +1 @@
+data = 'foo2'
diff --git a/test/integration/targets/module_utils/module_utils/qux1/__init__.py b/test/integration/targets/module_utils/module_utils/qux1/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/qux1/__init__.py
diff --git a/test/integration/targets/module_utils/module_utils/qux1/quux.py b/test/integration/targets/module_utils/module_utils/qux1/quux.py
new file mode 100644
index 00000000..3d288c96
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/qux1/quux.py
@@ -0,0 +1 @@
+data = 'qux1'
diff --git a/test/integration/targets/module_utils/module_utils/qux2/__init__.py b/test/integration/targets/module_utils/module_utils/qux2/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/qux2/__init__.py
diff --git a/test/integration/targets/module_utils/module_utils/qux2/quux.py b/test/integration/targets/module_utils/module_utils/qux2/quux.py
new file mode 100644
index 00000000..496d446a
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/qux2/quux.py
@@ -0,0 +1 @@
+data = 'qux2:quux'
diff --git a/test/integration/targets/module_utils/module_utils/qux2/quuz.py b/test/integration/targets/module_utils/module_utils/qux2/quuz.py
new file mode 100644
index 00000000..cdc0fad7
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/qux2/quuz.py
@@ -0,0 +1 @@
+data = 'qux2:quuz'
diff --git a/test/integration/targets/module_utils/module_utils/service.py b/test/integration/targets/module_utils/module_utils/service.py
new file mode 100644
index 00000000..1492f468
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/service.py
@@ -0,0 +1 @@
+sysv_is_enabled = 'sysv_is_enabled'
diff --git a/test/integration/targets/module_utils/module_utils/spam1/__init__.py b/test/integration/targets/module_utils/module_utils/spam1/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/spam1/__init__.py
diff --git a/test/integration/targets/module_utils/module_utils/spam1/ham/__init__.py b/test/integration/targets/module_utils/module_utils/spam1/ham/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/spam1/ham/__init__.py
diff --git a/test/integration/targets/module_utils/module_utils/spam1/ham/eggs/__init__.py b/test/integration/targets/module_utils/module_utils/spam1/ham/eggs/__init__.py
new file mode 100644
index 00000000..f290e156
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/spam1/ham/eggs/__init__.py
@@ -0,0 +1 @@
+data = 'spam1'
diff --git a/test/integration/targets/module_utils/module_utils/spam2/__init__.py b/test/integration/targets/module_utils/module_utils/spam2/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/spam2/__init__.py
diff --git a/test/integration/targets/module_utils/module_utils/spam2/ham/__init__.py b/test/integration/targets/module_utils/module_utils/spam2/ham/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/spam2/ham/__init__.py
diff --git a/test/integration/targets/module_utils/module_utils/spam2/ham/eggs/__init__.py b/test/integration/targets/module_utils/module_utils/spam2/ham/eggs/__init__.py
new file mode 100644
index 00000000..5e053d88
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/spam2/ham/eggs/__init__.py
@@ -0,0 +1 @@
+data = 'spam2'
diff --git a/test/integration/targets/module_utils/module_utils/spam3/__init__.py b/test/integration/targets/module_utils/module_utils/spam3/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/spam3/__init__.py
diff --git a/test/integration/targets/module_utils/module_utils/spam3/ham/__init__.py b/test/integration/targets/module_utils/module_utils/spam3/ham/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/spam3/ham/__init__.py
diff --git a/test/integration/targets/module_utils/module_utils/spam3/ham/bacon.py b/test/integration/targets/module_utils/module_utils/spam3/ham/bacon.py
new file mode 100644
index 00000000..91075089
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/spam3/ham/bacon.py
@@ -0,0 +1 @@
+data = 'spam3'
diff --git a/test/integration/targets/module_utils/module_utils/spam4/__init__.py b/test/integration/targets/module_utils/module_utils/spam4/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/spam4/__init__.py
diff --git a/test/integration/targets/module_utils/module_utils/spam4/ham/__init__.py b/test/integration/targets/module_utils/module_utils/spam4/ham/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/spam4/ham/__init__.py
diff --git a/test/integration/targets/module_utils/module_utils/spam4/ham/bacon.py b/test/integration/targets/module_utils/module_utils/spam4/ham/bacon.py
new file mode 100644
index 00000000..7d552882
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/spam4/ham/bacon.py
@@ -0,0 +1 @@
+data = 'spam4'
diff --git a/test/integration/targets/module_utils/module_utils/spam5/__init__.py b/test/integration/targets/module_utils/module_utils/spam5/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/spam5/__init__.py
diff --git a/test/integration/targets/module_utils/module_utils/spam5/ham/__init__.py b/test/integration/targets/module_utils/module_utils/spam5/ham/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/spam5/ham/__init__.py
diff --git a/test/integration/targets/module_utils/module_utils/spam5/ham/bacon.py b/test/integration/targets/module_utils/module_utils/spam5/ham/bacon.py
new file mode 100644
index 00000000..cc947b83
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/spam5/ham/bacon.py
@@ -0,0 +1 @@
+data = 'spam5:bacon'
diff --git a/test/integration/targets/module_utils/module_utils/spam5/ham/eggs.py b/test/integration/targets/module_utils/module_utils/spam5/ham/eggs.py
new file mode 100644
index 00000000..f0394c87
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/spam5/ham/eggs.py
@@ -0,0 +1 @@
+data = 'spam5:eggs'
diff --git a/test/integration/targets/module_utils/module_utils/spam6/__init__.py b/test/integration/targets/module_utils/module_utils/spam6/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/spam6/__init__.py
diff --git a/test/integration/targets/module_utils/module_utils/spam6/ham/__init__.py b/test/integration/targets/module_utils/module_utils/spam6/ham/__init__.py
new file mode 100644
index 00000000..8c1a70ea
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/spam6/ham/__init__.py
@@ -0,0 +1,2 @@
+bacon = 'spam6:bacon'
+eggs = 'spam6:eggs'
diff --git a/test/integration/targets/module_utils/module_utils/spam7/__init__.py b/test/integration/targets/module_utils/module_utils/spam7/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/spam7/__init__.py
diff --git a/test/integration/targets/module_utils/module_utils/spam7/ham/__init__.py b/test/integration/targets/module_utils/module_utils/spam7/ham/__init__.py
new file mode 100644
index 00000000..cd9a05d0
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/spam7/ham/__init__.py
@@ -0,0 +1 @@
+eggs = 'spam7:eggs'
diff --git a/test/integration/targets/module_utils/module_utils/spam7/ham/bacon.py b/test/integration/targets/module_utils/module_utils/spam7/ham/bacon.py
new file mode 100644
index 00000000..490121f8
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/spam7/ham/bacon.py
@@ -0,0 +1 @@
+data = 'spam7:bacon'
diff --git a/test/integration/targets/module_utils/module_utils/spam8/__init__.py b/test/integration/targets/module_utils/module_utils/spam8/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/spam8/__init__.py
diff --git a/test/integration/targets/module_utils/module_utils/spam8/ham/__init__.py b/test/integration/targets/module_utils/module_utils/spam8/ham/__init__.py
new file mode 100644
index 00000000..c02bf5fd
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/spam8/ham/__init__.py
@@ -0,0 +1 @@
+eggs = 'spam8:eggs'
diff --git a/test/integration/targets/module_utils/module_utils/spam8/ham/bacon.py b/test/integration/targets/module_utils/module_utils/spam8/ham/bacon.py
new file mode 100644
index 00000000..28ea2857
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/spam8/ham/bacon.py
@@ -0,0 +1 @@
+data = 'spam8:bacon'
diff --git a/test/integration/targets/module_utils/module_utils/sub/__init__.py b/test/integration/targets/module_utils/module_utils/sub/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/sub/__init__.py
diff --git a/test/integration/targets/module_utils/module_utils/sub/bam.py b/test/integration/targets/module_utils/module_utils/sub/bam.py
new file mode 100644
index 00000000..566f8b7c
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/sub/bam.py
@@ -0,0 +1,3 @@
+#!/usr/bin/env python
+
+bam = "BAM FROM sub/bam.py"
diff --git a/test/integration/targets/module_utils/module_utils/sub/bam/__init__.py b/test/integration/targets/module_utils/module_utils/sub/bam/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/sub/bam/__init__.py
diff --git a/test/integration/targets/module_utils/module_utils/sub/bam/bam.py b/test/integration/targets/module_utils/module_utils/sub/bam/bam.py
new file mode 100644
index 00000000..b7ed7072
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/sub/bam/bam.py
@@ -0,0 +1,3 @@
+#!/usr/bin/env python
+
+bam = "BAM FROM sub/bam/bam.py"
diff --git a/test/integration/targets/module_utils/module_utils/sub/bar/__init__.py b/test/integration/targets/module_utils/module_utils/sub/bar/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/sub/bar/__init__.py
diff --git a/test/integration/targets/module_utils/module_utils/sub/bar/bam.py b/test/integration/targets/module_utils/module_utils/sub/bar/bam.py
new file mode 100644
index 00000000..02fafd40
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/sub/bar/bam.py
@@ -0,0 +1,3 @@
+#!/usr/bin/env python
+
+bam = "BAM FROM sub/bar/bam.py"
diff --git a/test/integration/targets/module_utils/module_utils/sub/bar/bar.py b/test/integration/targets/module_utils/module_utils/sub/bar/bar.py
new file mode 100644
index 00000000..8566901f
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/sub/bar/bar.py
@@ -0,0 +1,3 @@
+#!/usr/bin/env python
+
+bar = "BAR FROM sub/bar/bar.py"
diff --git a/test/integration/targets/module_utils/module_utils/yak/__init__.py b/test/integration/targets/module_utils/module_utils/yak/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/yak/__init__.py
diff --git a/test/integration/targets/module_utils/module_utils/yak/zebra/__init__.py b/test/integration/targets/module_utils/module_utils/yak/zebra/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/yak/zebra/__init__.py
diff --git a/test/integration/targets/module_utils/module_utils/yak/zebra/foo.py b/test/integration/targets/module_utils/module_utils/yak/zebra/foo.py
new file mode 100644
index 00000000..89b2bfe8
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils/yak/zebra/foo.py
@@ -0,0 +1 @@
+data = 'yak'
diff --git a/test/integration/targets/module_utils/module_utils_basic_setcwd.yml b/test/integration/targets/module_utils/module_utils_basic_setcwd.yml
new file mode 100644
index 00000000..97dbf873
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils_basic_setcwd.yml
@@ -0,0 +1,22 @@
+- hosts: testhost
+ gather_facts: no
+ tasks:
+ - name: make sure the nobody user is available
+ include_role:
+ name: setup_nobody
+
+ - name: verify AnsibleModule works when cwd is missing
+ test_cwd_missing:
+ register: missing
+
+ - name: verify AnsibleModule works when cwd is unreadable
+ test_cwd_unreadable:
+ register: unreadable
+ become: yes
+ become_user: nobody # root can read cwd regardless of permissions, so a non-root user is required here
+
+ - name: verify AnsibleModule was able to adjust cwd as expected
+ assert:
+ that:
+ - missing.before != missing.after
+ - unreadable.before != unreadable.after or unreadable.before == '/' # allow / fallback on macOS when using an unprivileged user
diff --git a/test/integration/targets/module_utils/module_utils_common_dict_transformation.yml b/test/integration/targets/module_utils/module_utils_common_dict_transformation.yml
new file mode 100644
index 00000000..7d961c4c
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils_common_dict_transformation.yml
@@ -0,0 +1,34 @@
+- hosts: testhost
+ gather_facts: no
+ tasks:
+ - test_recursive_diff:
+ a:
+ foo:
+ bar:
+ - baz:
+ qux: ham_sandwich
+ b:
+ foo:
+ bar:
+ - baz:
+ qux: turkey_sandwich
+ register: recursive_diff_diff
+
+ - test_recursive_diff:
+ a:
+ foo:
+ bar:
+ - baz:
+ qux: ham_sandwich
+ b:
+ foo:
+ bar:
+ - baz:
+ qux: ham_sandwich
+ register: recursive_diff_same
+
+ - assert:
+ that:
+ - recursive_diff_diff.the_diff is not none
+ - recursive_diff_diff.the_diff|length == 2
+ - recursive_diff_same.the_diff is none
diff --git a/test/integration/targets/module_utils/module_utils_envvar.yml b/test/integration/targets/module_utils/module_utils_envvar.yml
new file mode 100644
index 00000000..8d97e0eb
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils_envvar.yml
@@ -0,0 +1,51 @@
+- hosts: testhost
+ gather_facts: no
+ tasks:
+ - name: Use a specially crafted module to see if things were imported correctly
+ test:
+ register: result
+
+ - name: Check that these are all loaded from playbook dir's module_utils
+ assert:
+ that:
+ - 'result["abcdefgh"] == "abcdefgh"'
+ - 'result["bar0"] == "bar0"'
+ - 'result["bar1"] == "bar1"'
+ - 'result["bar2"] == "bar2"'
+ - 'result["baz1"] == "baz1"'
+ - 'result["baz2"] == "baz2"'
+ - 'result["foo0"] == "foo0"'
+ - 'result["foo1"] == "foo1"'
+ - 'result["foo2"] == "foo2"'
+ - 'result["qux1"] == "qux1"'
+ - 'result["qux2"] == ["qux2:quux", "qux2:quuz"]'
+ - 'result["spam1"] == "spam1"'
+ - 'result["spam2"] == "spam2"'
+ - 'result["spam3"] == "spam3"'
+ - 'result["spam4"] == "spam4"'
+ - 'result["spam5"] == ["spam5:bacon", "spam5:eggs"]'
+ - 'result["spam6"] == ["spam6:bacon", "spam6:eggs"]'
+ - 'result["spam7"] == ["spam7:bacon", "spam7:eggs"]'
+ - 'result["spam8"] == ["spam8:bacon", "spam8:eggs"]'
+
+ # Test that overriding something in module_utils with something in the local library works
+ - name: Test that playbook dir's module_utils overrides facts.py
+ test_override:
+ register: result
+
+ - name: Make sure the we used the local facts.py, not the one shipped with ansible
+ assert:
+ that:
+ - 'result["data"] == "overridden facts.py"'
+
+ - name: Test that importing something from the module_utils in the env_vars works
+ test_env_override:
+ register: result
+
+ - name: Make sure we used the module_utils from the env_var for these
+ assert:
+ that:
+ # Override of shipped module_utils
+ - 'result["json_utils"] == "overridden json_utils"'
+ # Only i nthe env vars directory
+ - 'result["mork"] == "mork"'
diff --git a/test/integration/targets/module_utils/module_utils_test.yml b/test/integration/targets/module_utils/module_utils_test.yml
new file mode 100644
index 00000000..943bf4ee
--- /dev/null
+++ b/test/integration/targets/module_utils/module_utils_test.yml
@@ -0,0 +1,62 @@
+- hosts: testhost
+ gather_facts: no
+ tasks:
+ - name: Use a specially crafted module to see if things were imported correctly
+ test:
+ register: result
+
+ - name: Check that the module imported the correct version of each module_util
+ assert:
+ that:
+ - 'result["abcdefgh"] == "abcdefgh"'
+ - 'result["bar0"] == "bar0"'
+ - 'result["bar1"] == "bar1"'
+ - 'result["bar2"] == "bar2"'
+ - 'result["baz1"] == "baz1"'
+ - 'result["baz2"] == "baz2"'
+ - 'result["foo0"] == "foo0"'
+ - 'result["foo1"] == "foo1"'
+ - 'result["foo2"] == "foo2"'
+ - 'result["qux1"] == "qux1"'
+ - 'result["qux2"] == ["qux2:quux", "qux2:quuz"]'
+ - 'result["spam1"] == "spam1"'
+ - 'result["spam2"] == "spam2"'
+ - 'result["spam3"] == "spam3"'
+ - 'result["spam4"] == "spam4"'
+ - 'result["spam5"] == ["spam5:bacon", "spam5:eggs"]'
+ - 'result["spam6"] == ["spam6:bacon", "spam6:eggs"]'
+ - 'result["spam7"] == ["spam7:bacon", "spam7:eggs"]'
+ - 'result["spam8"] == ["spam8:bacon", "spam8:eggs"]'
+
+ # Test that overriding something in module_utils with something in the local library works
+ - name: Test that local module_utils overrides facts.py
+ test_override:
+ register: result
+
+ - name: Make sure the we used the local facts.py, not the one shipped with ansible
+ assert:
+ that:
+ - result["data"] == "overridden facts.py"
+
+ - name: Test that importing a module that only exists inside of a submodule does not work
+ test_failure:
+ ignore_errors: True
+ register: result
+
+ - debug: var=result
+ - name: Make sure we failed in AnsiBallZ
+ assert:
+ that:
+ - result is failed
+ - result['msg'] == "Could not find imported module support code for ansible.modules.test_failure. Looked for (['ansible.module_utils.zebra.foo', 'ansible.module_utils.zebra'])"
+
+ - name: Test that alias deprecation works
+ test_alias_deprecation:
+ baz: 'bar'
+ register: result
+
+ - name: Assert that the deprecation message is given correctly
+ assert:
+ that:
+ - result.deprecations[0].msg == "Alias 'baz' is deprecated. See the module docs for more information"
+ - result.deprecations[0].version == '9.99'
diff --git a/test/integration/targets/module_utils/other_mu_dir/__init__.py b/test/integration/targets/module_utils/other_mu_dir/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/other_mu_dir/__init__.py
diff --git a/test/integration/targets/module_utils/other_mu_dir/a/__init__.py b/test/integration/targets/module_utils/other_mu_dir/a/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/other_mu_dir/a/__init__.py
diff --git a/test/integration/targets/module_utils/other_mu_dir/a/b/__init__.py b/test/integration/targets/module_utils/other_mu_dir/a/b/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/other_mu_dir/a/b/__init__.py
diff --git a/test/integration/targets/module_utils/other_mu_dir/a/b/c/__init__.py b/test/integration/targets/module_utils/other_mu_dir/a/b/c/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/other_mu_dir/a/b/c/__init__.py
diff --git a/test/integration/targets/module_utils/other_mu_dir/a/b/c/d/__init__.py b/test/integration/targets/module_utils/other_mu_dir/a/b/c/d/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/other_mu_dir/a/b/c/d/__init__.py
diff --git a/test/integration/targets/module_utils/other_mu_dir/a/b/c/d/e/__init__.py b/test/integration/targets/module_utils/other_mu_dir/a/b/c/d/e/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/other_mu_dir/a/b/c/d/e/__init__.py
diff --git a/test/integration/targets/module_utils/other_mu_dir/a/b/c/d/e/f/__init__.py b/test/integration/targets/module_utils/other_mu_dir/a/b/c/d/e/f/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/other_mu_dir/a/b/c/d/e/f/__init__.py
diff --git a/test/integration/targets/module_utils/other_mu_dir/a/b/c/d/e/f/g/__init__.py b/test/integration/targets/module_utils/other_mu_dir/a/b/c/d/e/f/g/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/module_utils/other_mu_dir/a/b/c/d/e/f/g/__init__.py
diff --git a/test/integration/targets/module_utils/other_mu_dir/a/b/c/d/e/f/g/h/__init__.py b/test/integration/targets/module_utils/other_mu_dir/a/b/c/d/e/f/g/h/__init__.py
new file mode 100644
index 00000000..796fed38
--- /dev/null
+++ b/test/integration/targets/module_utils/other_mu_dir/a/b/c/d/e/f/g/h/__init__.py
@@ -0,0 +1 @@
+data = 'should not be visible abcdefgh'
diff --git a/test/integration/targets/module_utils/other_mu_dir/facts.py b/test/integration/targets/module_utils/other_mu_dir/facts.py
new file mode 100644
index 00000000..dbfab271
--- /dev/null
+++ b/test/integration/targets/module_utils/other_mu_dir/facts.py
@@ -0,0 +1 @@
+data = 'should not be visible facts.py'
diff --git a/test/integration/targets/module_utils/other_mu_dir/json_utils.py b/test/integration/targets/module_utils/other_mu_dir/json_utils.py
new file mode 100644
index 00000000..59757e40
--- /dev/null
+++ b/test/integration/targets/module_utils/other_mu_dir/json_utils.py
@@ -0,0 +1 @@
+data = 'overridden json_utils'
diff --git a/test/integration/targets/module_utils/other_mu_dir/mork.py b/test/integration/targets/module_utils/other_mu_dir/mork.py
new file mode 100644
index 00000000..3b700fca
--- /dev/null
+++ b/test/integration/targets/module_utils/other_mu_dir/mork.py
@@ -0,0 +1 @@
+data = 'mork'
diff --git a/test/integration/targets/module_utils/runme.sh b/test/integration/targets/module_utils/runme.sh
new file mode 100755
index 00000000..7a9e458e
--- /dev/null
+++ b/test/integration/targets/module_utils/runme.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ANSIBLE_ROLES_PATH=../ ansible-playbook module_utils_basic_setcwd.yml -i ../../inventory "$@"
+
+ansible-playbook module_utils_test.yml -i ../../inventory -v "$@"
+ANSIBLE_MODULE_UTILS=other_mu_dir ansible-playbook module_utils_envvar.yml -i ../../inventory -v "$@"
+
+ansible-playbook module_utils_common_dict_transformation.yml -i ../../inventory "$@"
diff --git a/test/integration/targets/module_utils_Ansible.AccessToken/aliases b/test/integration/targets/module_utils_Ansible.AccessToken/aliases
new file mode 100644
index 00000000..cf714783
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.AccessToken/aliases
@@ -0,0 +1,3 @@
+windows
+shippable/windows/group1
+shippable/windows/smoketest
diff --git a/test/integration/targets/module_utils_Ansible.AccessToken/library/ansible_access_token_tests.ps1 b/test/integration/targets/module_utils_Ansible.AccessToken/library/ansible_access_token_tests.ps1
new file mode 100644
index 00000000..5e3a0af5
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.AccessToken/library/ansible_access_token_tests.ps1
@@ -0,0 +1,378 @@
+# End of the setup code and start of the module code
+#!powershell
+
+#AnsibleRequires -CSharpUtil Ansible.AccessToken
+#AnsibleRequires -CSharpUtil Ansible.Basic
+
+$spec = @{
+ options = @{
+ test_username = @{ type = "str"; required = $true }
+ test_password = @{ type = "str"; required = $true; no_log = $true }
+ }
+}
+$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec)
+
+$test_username = $module.Params.test_username
+$test_password = $module.Params.test_password
+
+Function Assert-Equals {
+ param(
+ [Parameter(Mandatory=$true, ValueFromPipeline=$true)][AllowNull()]$Actual,
+ [Parameter(Mandatory=$true, Position=0)][AllowNull()]$Expected
+ )
+
+ $matched = $false
+ if ($Actual -is [System.Collections.ArrayList] -or $Actual -is [Array]) {
+ $Actual.Count | Assert-Equals -Expected $Expected.Count
+ for ($i = 0; $i -lt $Actual.Count; $i++) {
+ $actual_value = $Actual[$i]
+ $expected_value = $Expected[$i]
+ Assert-Equals -Actual $actual_value -Expected $expected_value
+ }
+ $matched = $true
+ } else {
+ $matched = $Actual -ceq $Expected
+ }
+
+ if (-not $matched) {
+ if ($Actual -is [PSObject]) {
+ $Actual = $Actual.ToString()
+ }
+
+ $call_stack = (Get-PSCallStack)[1]
+ $module.Result.test = $test
+ $module.Result.actual = $Actual
+ $module.Result.expected = $Expected
+ $module.Result.line = $call_stack.ScriptLineNumber
+ $module.Result.method = $call_stack.Position.Text
+
+ $module.FailJson("AssertionError: actual != expected")
+ }
+}
+
+$current_user = [System.Security.Principal.WindowsIdentity]::GetCurrent().User
+
+$tests = [Ordered]@{
+ "Open process token" = {
+ $h_process = [Ansible.AccessToken.TokenUtil]::OpenProcess()
+
+ $h_token = [Ansible.AccessToken.TokenUtil]::OpenProcessToken($h_process, "Query")
+ try {
+ $h_token.IsClosed | Assert-Equals -Expected $false
+ $h_token.IsInvalid | Assert-Equals -Expected $false
+
+ $actual_user = [Ansible.AccessToken.TokenUtil]::GetTokenUser($h_token)
+ $actual_user | Assert-Equals -Expected $current_user
+ } finally {
+ $h_token.Dispose()
+ }
+ $h_token.IsClosed | Assert-Equals -Expected $true
+ }
+
+ "Open process token of another process" = {
+ $proc_info = Start-Process -FilePath "powershell.exe" -ArgumentList "-Command Start-Sleep -Seconds 60" -WindowStyle Hidden -PassThru
+ try {
+ $h_process = [Ansible.AccessToken.TokenUtil]::OpenProcess($proc_info.Id, "QueryInformation", $false)
+ try {
+ $h_process.IsClosed | Assert-Equals -Expected $false
+ $h_process.IsInvalid | Assert-Equals -Expected $false
+
+ $h_token = [Ansible.AccessToken.TokenUtil]::OpenProcessToken($h_process, "Query")
+ try {
+ $actual_user = [Ansible.AccessToken.TokenUtil]::GetTokenUser($h_token)
+ $actual_user | Assert-Equals -Expected $current_user
+ } finally {
+ $h_token.Dispose()
+ }
+ } finally {
+ $h_process.Dispose()
+ }
+ $h_process.IsClosed | Assert-Equals -Expected $true
+ } finally {
+ $proc_info | Stop-Process
+ }
+ }
+
+ "Failed to open process token" = {
+ $failed = $false
+ try {
+ $h_process = [Ansible.AccessToken.TokenUtil]::OpenProcess(4, "QueryInformation", $false)
+ $h_process.Dispose() # Incase this doesn't fail, make sure we still dispose of it
+ } catch [Ansible.AccessToken.Win32Exception] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "Failed to open process 4 with access QueryInformation (Access is denied, Win32ErrorCode 5 - 0x00000005)"
+ }
+ $failed | Assert-Equals -Expected $true
+ }
+
+ "Duplicate access token primary" = {
+ $h_process = [Ansible.AccessToken.TokenUtil]::OpenProcess()
+ $h_token = [Ansible.AccessToken.TokenUtil]::OpenProcessToken($h_process, "Duplicate")
+ try {
+ $dup_token = [Ansible.AccessToken.TokenUtil]::DuplicateToken($h_token, "Query", "Anonymous", "Primary")
+ try {
+ $dup_token.IsClosed | Assert-Equals -Expected $false
+ $dup_token.IsInvalid | Assert-Equals -Expected $false
+
+ $actual_user = [Ansible.AccessToken.TokenUtil]::GetTokenUser($dup_token)
+
+ $actual_user | Assert-Equals -Expected $current_user
+ $actual_stat = [Ansible.AccessToken.TokenUtil]::GetTokenStatistics($dup_token)
+
+ $actual_stat.TokenType | Assert-Equals -Expected ([Ansible.AccessToken.TokenType]::Primary)
+ $actual_stat.ImpersonationLevel | Assert-Equals -Expected ([Ansible.AccessToken.SecurityImpersonationLevel]::Anonymous)
+ } finally {
+ $dup_token.Dispose()
+ }
+
+ $dup_token.IsClosed | Assert-Equals -Expected $true
+ } finally {
+ $h_token.Dispose()
+ }
+ }
+
+ "Duplicate access token impersonation" = {
+ $h_process = [Ansible.AccessToken.TokenUtil]::OpenProcess()
+ $h_token = [Ansible.AccessToken.TokenUtil]::OpenProcessToken($h_process, "Duplicate")
+ try {
+ "Anonymous", "Identification", "Impersonation", "Delegation" | ForEach-Object -Process {
+ $dup_token = [Ansible.AccessToken.TokenUtil]::DuplicateToken($h_token, "Query", $_, "Impersonation")
+ try {
+ $actual_user = [Ansible.AccessToken.TokenUtil]::GetTokenUser($dup_token)
+
+ $actual_user | Assert-Equals -Expected $current_user
+ $actual_stat = [Ansible.AccessToken.TokenUtil]::GetTokenStatistics($dup_token)
+
+ $actual_stat.TokenType | Assert-Equals -Expected ([Ansible.AccessToken.TokenType]::Impersonation)
+ $actual_stat.ImpersonationLevel | Assert-Equals -Expected ([Ansible.AccessToken.SecurityImpersonationLevel]"$_")
+ } finally {
+ $dup_token.Dispose()
+ }
+ }
+ } finally {
+ $h_token.Dispose()
+ }
+ }
+
+ "Impersonate SYSTEM token" = {
+ $system_sid = New-Object -TypeName System.Security.Principal.SecurityIdentifier -ArgumentList @(
+ [System.Security.Principal.WellKnownSidType]::LocalSystemSid,
+ $null
+ )
+ $tested = $false
+ foreach ($h_token in [Ansible.AccessToken.TokenUtil]::EnumerateUserTokens($system_sid, "Duplicate, Impersonate, Query")) {
+ $actual_user = [Ansible.AccessToken.TokenUtil]::GetTokenUser($h_token)
+ $actual_user | Assert-Equals -Expected $system_sid
+
+ [Ansible.AccessToken.TokenUtil]::ImpersonateToken($h_token)
+ try {
+ $current_sid = [System.Security.Principal.WindowsIdentity]::GetCurrent().User
+ $current_sid | Assert-Equals -Expected $system_sid
+ } finally {
+ [Ansible.AccessToken.TokenUtil]::RevertToSelf()
+ }
+
+ $current_sid = [System.Security.Principal.WindowsIdentity]::GetCurrent().User
+ $current_sid | Assert-Equals -Expected $current_user
+
+ # Will keep on looping for each SYSTEM token it can retrieve, we only want to test 1
+ $tested = $true
+ break
+ }
+
+ $tested | Assert-Equals -Expected $true
+ }
+
+ "Get token privileges" = {
+ $h_process = [Ansible.AccessToken.TokenUtil]::OpenProcess()
+ $h_token = [Ansible.AccessToken.TokenUtil]::OpenProcessToken($h_process, "Query")
+ try {
+ $priv_info = &whoami.exe /priv | Where-Object { $_.StartsWith("Se") }
+ $actual_privs = [Ansible.AccessToken.Tokenutil]::GetTokenPrivileges($h_token)
+ $actual_stat = [Ansible.AccessToken.TokenUtil]::GetTokenStatistics($h_token)
+
+ $actual_privs.Count | Assert-Equals -Expected $priv_info.Count
+ $actual_privs.Count | Assert-Equals -Expected $actual_stat.PrivilegeCount
+
+ foreach ($info in $priv_info) {
+ $info_split = $info.Split(" ", [System.StringSplitOptions]::RemoveEmptyEntries)
+ $priv_name = $info_split[0]
+ $priv_enabled = $info_split[-1] -eq "Enabled"
+ $actual_priv = $actual_privs | Where-Object { $_.Name -eq $priv_name }
+
+ $actual_priv -eq $null | Assert-Equals -Expected $false
+ if ($priv_enabled) {
+ $actual_priv.Attributes.HasFlag([Ansible.AccessToken.PrivilegeAttributes]::Enabled) | Assert-Equals -Expected $true
+ } else {
+ $actual_priv.Attributes.HasFlag([Ansible.AccessToken.PrivilegeAttributes]::Disabled) | Assert-Equals -Expected $true
+ }
+ }
+ } finally {
+ $h_token.Dispose()
+ }
+ }
+
+ "Get token statistics" = {
+ $h_process = [Ansible.AccessToken.TokenUtil]::OpenProcess()
+ $h_token = [Ansible.AccessToken.TokenUtil]::OpenProcessToken($h_process, "Query")
+ try {
+ $actual_priv = [Ansible.AccessToken.Tokenutil]::GetTokenPrivileges($h_token)
+ $actual_stat = [Ansible.AccessToken.TokenUtil]::GetTokenStatistics($h_token)
+
+ $actual_stat.TokenId.GetType().FullName | Assert-Equals -Expected "Ansible.AccessToken.Luid"
+ $actual_stat.AuthenticationId.GetType().FullName | Assert-Equals -Expected "Ansible.AccessToken.Luid"
+ $actual_stat.ExpirationTime.GetType().FullName | Assert-Equals -Expected "System.Int64"
+
+ $actual_stat.TokenType | Assert-Equals -Expected ([Ansible.AccessToken.TokenType]::Primary)
+
+ $os_version = [Version](Get-Item -LiteralPath $env:SystemRoot\System32\kernel32.dll).VersionInfo.ProductVersion
+ if ($os_version -lt [Version]"6.1") {
+ # While the token is a primary token, Server 2008 reports the SecurityImpersonationLevel for a primary token as Impersonation
+ $actual_stat.ImpersonationLevel | Assert-Equals -Expected ([Ansible.AccessToken.SecurityImpersonationLevel]::Impersonation)
+ } else {
+ $actual_stat.ImpersonationLevel | Assert-Equals -Expected ([Ansible.AccessToken.SecurityImpersonationLevel]::Anonymous)
+ }
+ $actual_stat.DynamicCharged.GetType().FullName | Assert-Equals -Expected "System.UInt32"
+ $actual_stat.DynamicAvailable.GetType().FullName | Assert-Equals -Expected "System.UInt32"
+ $actual_stat.GroupCount.GetType().FullName | Assert-Equals -Expected "System.UInt32"
+ $actual_stat.PrivilegeCount | Assert-Equals -Expected $actual_priv.Count
+ $actual_stat.ModifiedId.GetType().FullName | Assert-Equals -Expected "Ansible.AccessToken.Luid"
+ } finally {
+ $h_token.Dispose()
+ }
+ }
+
+ "Get token linked token impersonation" = {
+ $h_token = [Ansible.AccessToken.TokenUtil]::LogonUser($test_username, $null, $test_password, "Interactive", "Default")
+ try {
+ $actual_elevation_type = [Ansible.AccessToken.TokenUtil]::GetTokenElevationType($h_token)
+ $actual_elevation_type | Assert-Equals -Expected ([Ansible.AccessToken.TokenElevationType]::Limited)
+
+ $actual_linked = [Ansible.AccessToken.TokenUtil]::GetTokenLinkedToken($h_token)
+ try {
+ $actual_linked.IsClosed | Assert-Equals -Expected $false
+ $actual_linked.IsInvalid | Assert-Equals -Expected $false
+
+ $actual_elevation_type = [Ansible.AccessToken.TokenUtil]::GetTokenElevationType($actual_linked)
+ $actual_elevation_type | Assert-Equals -Expected ([Ansible.AccessToken.TokenElevationType]::Full)
+
+ $actual_stat = [Ansible.AccessToken.TokenUtil]::GetTokenStatistics($actual_linked)
+ $actual_stat.TokenType | Assert-Equals -Expected ([Ansible.AccessToken.TokenType]::Impersonation)
+ } finally {
+ $actual_linked.Dispose()
+ }
+ $actual_linked.IsClosed | Assert-Equals -Expected $true
+ } finally {
+ $h_token.Dispose()
+ }
+ }
+
+ "Get token linked token primary" = {
+ # We need a token with the SeTcbPrivilege for this to work.
+ $system_sid = New-Object -TypeName System.Security.Principal.SecurityIdentifier -ArgumentList @(
+ [System.Security.Principal.WellKnownSidType]::LocalSystemSid,
+ $null
+ )
+ $tested = $false
+ foreach ($system_token in [Ansible.AccessToken.TokenUtil]::EnumerateUserTokens($system_sid, "Duplicate, Impersonate, Query")) {
+ $privileges = [Ansible.AccessToken.TokenUtil]::GetTokenPrivileges($system_token)
+ if ($null -eq ($privileges | Where-Object { $_.Name -eq "SeTcbPrivilege" })) {
+ continue
+ }
+
+ $h_token = [Ansible.AccessToken.TokenUtil]::LogonUser($test_username, $null, $test_password, "Interactive", "Default")
+ try {
+ [Ansible.AccessToken.TokenUtil]::ImpersonateToken($system_token)
+ try {
+ $actual_linked = [Ansible.AccessToken.TokenUtil]::GetTokenLinkedToken($h_token)
+ try {
+ $actual_linked.IsClosed | Assert-Equals -Expected $false
+ $actual_linked.IsInvalid | Assert-Equals -Expected $false
+
+ $actual_elevation_type = [Ansible.AccessToken.TokenUtil]::GetTokenElevationType($actual_linked)
+ $actual_elevation_type | Assert-Equals -Expected ([Ansible.AccessToken.TokenElevationType]::Full)
+
+ $actual_stat = [Ansible.AccessToken.TokenUtil]::GetTokenStatistics($actual_linked)
+ $actual_stat.TokenType | Assert-Equals -Expected ([Ansible.AccessToken.TokenType]::Primary)
+ } finally {
+ $actual_linked.Dispose()
+ }
+ $actual_linked.IsClosed | Assert-Equals -Expected $true
+ } finally {
+ [Ansible.AccessToken.TokenUtil]::RevertToSelf()
+ }
+ } finally {
+ $h_token.Dispose()
+ }
+
+ $tested = $true
+ break
+ }
+ $tested | Assert-Equals -Expected $true
+ }
+
+ "Failed to get token information" = {
+ $h_process = [Ansible.AccessToken.TokenUtil]::OpenProcess()
+ $h_token = [Ansible.AccessToken.TokenUtil]::OpenProcessToken($h_process, 'Duplicate') # Without Query the below will fail
+
+ $failed = $false
+ try {
+ [Ansible.AccessToken.TokenUtil]::GetTokenUser($h_token)
+ } catch [Ansible.AccessToken.Win32Exception] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "GetTokenInformation(TokenUser) failed to get buffer length (Access is denied, Win32ErrorCode 5 - 0x00000005)"
+ } finally {
+ $h_token.Dispose()
+ }
+ $failed | Assert-Equals -Expected $true
+ }
+
+ "Logon with valid credentials" = {
+ $expected_user = New-Object -TypeName System.Security.Principal.NTAccount -ArgumentList $test_username
+ $expected_sid = $expected_user.Translate([System.Security.Principal.SecurityIdentifier])
+
+ $h_token = [Ansible.AccessToken.TokenUtil]::LogonUser($test_username, $null, $test_password, "Network", "Default")
+ try {
+ $h_token.IsClosed | Assert-Equals -Expected $false
+ $h_token.IsInvalid | Assert-Equals -Expected $false
+
+ $actual_user = [Ansible.AccessToken.TokenUtil]::GetTokenUser($h_token)
+ $actual_user | Assert-Equals -Expected $expected_sid
+ } finally {
+ $h_token.Dispose()
+ }
+ $h_token.IsClosed | Assert-Equals -Expected $true
+ }
+
+ "Logon with invalid credentials" = {
+ $failed = $false
+ try {
+ [Ansible.AccessToken.TokenUtil]::LogonUser("fake-user", $null, "fake-pass", "Network", "Default")
+ } catch [Ansible.AccessToken.Win32Exception] {
+ $failed = $true
+ $_.Exception.Message.Contains("Failed to logon fake-user") | Assert-Equals -Expected $true
+ $_.Exception.Message.Contains("Win32ErrorCode 1326 - 0x0000052E)") | Assert-Equals -Expected $true
+ }
+ $failed | Assert-Equals -Expected $true
+ }
+
+ "Logon with invalid credential with domain account" = {
+ $failed = $false
+ try {
+ [Ansible.AccessToken.TokenUtil]::LogonUser("fake-user", "fake-domain", "fake-pass", "Network", "Default")
+ } catch [Ansible.AccessToken.Win32Exception] {
+ $failed = $true
+ $_.Exception.Message.Contains("Failed to logon fake-domain\fake-user") | Assert-Equals -Expected $true
+ $_.Exception.Message.Contains("Win32ErrorCode 1326 - 0x0000052E)") | Assert-Equals -Expected $true
+ }
+ $failed | Assert-Equals -Expected $true
+ }
+}
+
+foreach ($test_impl in $tests.GetEnumerator()) {
+ $test = $test_impl.Key
+ &$test_impl.Value
+}
+
+$module.Result.data = "success"
+$module.ExitJson()
diff --git a/test/integration/targets/module_utils_Ansible.AccessToken/tasks/main.yml b/test/integration/targets/module_utils_Ansible.AccessToken/tasks/main.yml
new file mode 100644
index 00000000..dbd64b06
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.AccessToken/tasks/main.yml
@@ -0,0 +1,29 @@
+---
+- set_fact:
+ test_username: ansible-test
+ test_password: Password123{{ lookup('password', '/dev/null chars=ascii_letters,digits length=8') }}
+
+- name: create test Admin user
+ win_user:
+ name: '{{ test_username }}'
+ password: '{{ test_password }}'
+ state: present
+ groups:
+ - Administrators
+
+- block:
+ - name: test Ansible.AccessToken.cs
+ ansible_access_token_tests:
+ test_username: '{{ test_username }}'
+ test_password: '{{ test_password }}'
+ register: ansible_access_token_test
+
+ - name: assert test Ansible.AccessToken.cs
+ assert:
+ that:
+ - ansible_access_token_test.data == "success"
+ always:
+ - name: remove test Admin user
+ win_user:
+ name: '{{ test_username }}'
+ state: absent
diff --git a/test/integration/targets/module_utils_Ansible.Basic/aliases b/test/integration/targets/module_utils_Ansible.Basic/aliases
new file mode 100644
index 00000000..cf714783
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.Basic/aliases
@@ -0,0 +1,3 @@
+windows
+shippable/windows/group1
+shippable/windows/smoketest
diff --git a/test/integration/targets/module_utils_Ansible.Basic/library/ansible_basic_tests.ps1 b/test/integration/targets/module_utils_Ansible.Basic/library/ansible_basic_tests.ps1
new file mode 100644
index 00000000..9278e386
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.Basic/library/ansible_basic_tests.ps1
@@ -0,0 +1,3098 @@
+#!powershell
+
+#AnsibleRequires -CSharpUtil Ansible.Basic
+
+$module = [Ansible.Basic.AnsibleModule]::Create($args, @{})
+
+Function Assert-Equals {
+ param(
+ [Parameter(Mandatory=$true, ValueFromPipeline=$true)][AllowNull()]$Actual,
+ [Parameter(Mandatory=$true, Position=0)][AllowNull()]$Expected
+ )
+
+ $matched = $false
+ if ($Actual -is [System.Collections.ArrayList] -or $Actual -is [Array]) {
+ $Actual.Count | Assert-Equals -Expected $Expected.Count
+ for ($i = 0; $i -lt $Actual.Count; $i++) {
+ $actual_value = $Actual[$i]
+ $expected_value = $Expected[$i]
+ Assert-Equals -Actual $actual_value -Expected $expected_value
+ }
+ $matched = $true
+ } else {
+ $matched = $Actual -ceq $Expected
+ }
+
+ if (-not $matched) {
+ if ($Actual -is [PSObject]) {
+ $Actual = $Actual.ToString()
+ }
+
+ $call_stack = (Get-PSCallStack)[1]
+ $module.Result.failed = $true
+ $module.Result.test = $test
+ $module.Result.actual = $Actual
+ $module.Result.expected = $Expected
+ $module.Result.line = $call_stack.ScriptLineNumber
+ $module.Result.method = $call_stack.Position.Text
+ $module.Result.msg = "AssertionError: actual != expected"
+
+ Exit-Module
+ }
+}
+
+Function Assert-DictionaryEquals {
+ param(
+ [Parameter(Mandatory=$true, ValueFromPipeline=$true)][AllowNull()]$Actual,
+ [Parameter(Mandatory=$true, Position=0)][AllowNull()]$Expected
+ )
+ $actual_keys = $Actual.Keys
+ $expected_keys = $Expected.Keys
+
+ $actual_keys.Count | Assert-Equals -Expected $expected_keys.Count
+ foreach ($actual_entry in $Actual.GetEnumerator()) {
+ $actual_key = $actual_entry.Key
+ ($actual_key -cin $expected_keys) | Assert-Equals -Expected $true
+ $actual_value = $actual_entry.Value
+ $expected_value = $Expected.$actual_key
+
+ if ($actual_value -is [System.Collections.IDictionary]) {
+ $actual_value | Assert-DictionaryEquals -Expected $expected_value
+ } elseif ($actual_value -is [System.Collections.ArrayList] -or $actual_value -is [Array]) {
+ for ($i = 0; $i -lt $actual_value.Count; $i++) {
+ $actual_entry = $actual_value[$i]
+ $expected_entry = $expected_value[$i]
+ if ($actual_entry -is [System.Collections.IDictionary]) {
+ $actual_entry | Assert-DictionaryEquals -Expected $expected_entry
+ } else {
+ Assert-Equals -Actual $actual_entry -Expected $expected_entry
+ }
+ }
+ } else {
+ Assert-Equals -Actual $actual_value -Expected $expected_value
+ }
+ }
+ foreach ($expected_key in $expected_keys) {
+ ($expected_key -cin $actual_keys) | Assert-Equals -Expected $true
+ }
+}
+
+Function Exit-Module {
+ # Make sure Exit actually calls exit and not our overriden test behaviour
+ [Ansible.Basic.AnsibleModule]::Exit = { param([Int32]$rc) exit $rc }
+ Write-Output -InputObject (ConvertTo-Json -InputObject $module.Result -Compress -Depth 99)
+ $module.ExitJson()
+}
+
+$tmpdir = $module.Tmpdir
+
+# Override the Exit and WriteLine behaviour to throw an exception instead of exiting the module
+[Ansible.Basic.AnsibleModule]::Exit = {
+ param([Int32]$rc)
+ $exp = New-Object -TypeName System.Exception -ArgumentList "exit: $rc"
+ $exp | Add-Member -Type NoteProperty -Name Output -Value $_test_out
+ throw $exp
+}
+[Ansible.Basic.AnsibleModule]::WriteLine = {
+ param([String]$line)
+ Set-Variable -Name _test_out -Scope Global -Value $line
+}
+
+$tests = @{
+ "Empty spec and no options - args file" = {
+ $args_file = Join-Path -Path $tmpdir -ChildPath "args-$(Get-Random).json"
+ [System.IO.File]::WriteAllText($args_file, '{ "ANSIBLE_MODULE_ARGS": {} }')
+ $m = [Ansible.Basic.AnsibleModule]::Create(@($args_file), @{})
+
+ $m.CheckMode | Assert-Equals -Expected $false
+ $m.DebugMode | Assert-Equals -Expected $false
+ $m.DiffMode | Assert-Equals -Expected $false
+ $m.KeepRemoteFiles | Assert-Equals -Expected $false
+ $m.ModuleName | Assert-Equals -Expected "undefined win module"
+ $m.NoLog | Assert-Equals -Expected $false
+ $m.Verbosity | Assert-Equals -Expected 0
+ $m.AnsibleVersion | Assert-Equals -Expected $null
+ }
+
+ "Empty spec and no options - complex_args" = {
+ Set-Variable -Name complex_args -Scope Global -Value @{}
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{})
+
+ $m.CheckMode | Assert-Equals -Expected $false
+ $m.DebugMode | Assert-Equals -Expected $false
+ $m.DiffMode | Assert-Equals -Expected $false
+ $m.KeepRemoteFiles | Assert-Equals -Expected $false
+ $m.ModuleName | Assert-Equals -Expected "undefined win module"
+ $m.NoLog | Assert-Equals -Expected $false
+ $m.Verbosity | Assert-Equals -Expected 0
+ $m.AnsibleVersion | Assert-Equals -Expected $null
+ }
+
+ "Internal param changes - args file" = {
+ $m_tmpdir = Join-Path -Path $tmpdir -ChildPath "moduletmpdir-$(Get-Random)"
+ New-Item -Path $m_tmpdir -ItemType Directory > $null
+ $args_file = Join-Path -Path $tmpdir -ChildPath "args-$(Get-Random).json"
+ [System.IO.File]::WriteAllText($args_file, @"
+{
+ "ANSIBLE_MODULE_ARGS": {
+ "_ansible_check_mode": true,
+ "_ansible_debug": true,
+ "_ansible_diff": true,
+ "_ansible_keep_remote_files": true,
+ "_ansible_module_name": "ansible_basic_tests",
+ "_ansible_no_log": true,
+ "_ansible_remote_tmp": "%TEMP%",
+ "_ansible_selinux_special_fs": "ignored",
+ "_ansible_shell_executable": "ignored",
+ "_ansible_socket": "ignored",
+ "_ansible_syslog_facility": "ignored",
+ "_ansible_tmpdir": "$($m_tmpdir -replace "\\", "\\")",
+ "_ansible_verbosity": 3,
+ "_ansible_version": "2.8.0"
+ }
+}
+"@)
+ $m = [Ansible.Basic.AnsibleModule]::Create(@($args_file), @{supports_check_mode=$true})
+ $m.CheckMode | Assert-Equals -Expected $true
+ $m.DebugMode | Assert-Equals -Expected $true
+ $m.DiffMode | Assert-Equals -Expected $true
+ $m.KeepRemoteFiles | Assert-Equals -Expected $true
+ $m.ModuleName | Assert-Equals -Expected "ansible_basic_tests"
+ $m.NoLog | Assert-Equals -Expected $true
+ $m.Verbosity | Assert-Equals -Expected 3
+ $m.AnsibleVersion | Assert-Equals -Expected "2.8.0"
+ $m.Tmpdir | Assert-Equals -Expected $m_tmpdir
+ }
+
+ "Internal param changes - complex_args" = {
+ $m_tmpdir = Join-Path -Path $tmpdir -ChildPath "moduletmpdir-$(Get-Random)"
+ New-Item -Path $m_tmpdir -ItemType Directory > $null
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ _ansible_check_mode = $true
+ _ansible_debug = $true
+ _ansible_diff = $true
+ _ansible_keep_remote_files = $true
+ _ansible_module_name = "ansible_basic_tests"
+ _ansible_no_log = $true
+ _ansible_remote_tmp = "%TEMP%"
+ _ansible_selinux_special_fs = "ignored"
+ _ansible_shell_executable = "ignored"
+ _ansible_socket = "ignored"
+ _ansible_syslog_facility = "ignored"
+ _ansible_tmpdir = $m_tmpdir.ToString()
+ _ansible_verbosity = 3
+ _ansible_version = "2.8.0"
+ }
+ $spec = @{
+ supports_check_mode = $true
+ }
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ $m.CheckMode | Assert-Equals -Expected $true
+ $m.DebugMode | Assert-Equals -Expected $true
+ $m.DiffMode | Assert-Equals -Expected $true
+ $m.KeepRemoteFiles | Assert-Equals -Expected $true
+ $m.ModuleName | Assert-Equals -Expected "ansible_basic_tests"
+ $m.NoLog | Assert-Equals -Expected $true
+ $m.Verbosity | Assert-Equals -Expected 3
+ $m.AnsibleVersion | Assert-Equals -Expected "2.8.0"
+ $m.Tmpdir | Assert-Equals -Expected $m_tmpdir
+ }
+
+ "Parse complex module options" = {
+ $spec = @{
+ options = @{
+ option_default = @{}
+ missing_option_default = @{}
+ string_option = @{type = "str"}
+ required_option = @{required = $true}
+ missing_choices = @{choices = "a", "b"}
+ choices = @{choices = "a", "b"}
+ one_choice = @{choices = ,"b"}
+ choice_with_default = @{choices = "a", "b"; default = "b"}
+ alias_direct = @{aliases = ,"alias_direct1"}
+ alias_as_alias = @{aliases = "alias_as_alias1", "alias_as_alias2"}
+ bool_type = @{type = "bool"}
+ bool_from_str = @{type = "bool"}
+ dict_type = @{
+ type = "dict"
+ options = @{
+ int_type = @{type = "int"}
+ str_type = @{type = "str"; default = "str_sub_type"}
+ }
+ }
+ dict_type_missing = @{
+ type = "dict"
+ options = @{
+ int_type = @{type = "int"}
+ str_type = @{type = "str"; default = "str_sub_type"}
+ }
+ }
+ dict_type_defaults = @{
+ type = "dict"
+ apply_defaults = $true
+ options = @{
+ int_type = @{type = "int"}
+ str_type = @{type = "str"; default = "str_sub_type"}
+ }
+ }
+ dict_type_json = @{type = "dict"}
+ dict_type_str = @{type = "dict"}
+ float_type = @{type = "float"}
+ int_type = @{type = "int"}
+ json_type = @{type = "json"}
+ json_type_dict = @{type = "json"}
+ list_type = @{type = "list"}
+ list_type_str = @{type = "list"}
+ list_with_int = @{type = "list"; elements = "int"}
+ list_type_single = @{type = "list"}
+ list_with_dict = @{
+ type = "list"
+ elements = "dict"
+ options = @{
+ int_type = @{type = "int"}
+ str_type = @{type = "str"; default = "str_sub_type"}
+ }
+ }
+ path_type = @{type = "path"}
+ path_type_nt = @{type = "path"}
+ path_type_missing = @{type = "path"}
+ raw_type_str = @{type = "raw"}
+ raw_type_int = @{type = "raw"}
+ sid_type = @{type = "sid"}
+ sid_from_name = @{type = "sid"}
+ str_type = @{type = "str"}
+ delegate_type = @{type = [Func[[Object], [UInt64]]]{ [System.UInt64]::Parse($args[0]) }}
+ }
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ option_default = 1
+ string_option = 1
+ required_option = "required"
+ choices = "a"
+ one_choice = "b"
+ alias_direct = "a"
+ alias_as_alias2 = "a"
+ bool_type = $true
+ bool_from_str = "false"
+ dict_type = @{
+ int_type = "10"
+ }
+ dict_type_json = '{"a":"a","b":1,"c":["a","b"]}'
+ dict_type_str = 'a=a b="b 2" c=c'
+ float_type = "3.14159"
+ int_type = 0
+ json_type = '{"a":"a","b":1,"c":["a","b"]}'
+ json_type_dict = @{
+ a = "a"
+ b = 1
+ c = @("a", "b")
+ }
+ list_type = @("a", "b", 1, 2)
+ list_type_str = "a, b,1,2 "
+ list_with_int = @("1", 2)
+ list_type_single = "single"
+ list_with_dict = @(
+ @{
+ int_type = 2
+ str_type = "dict entry"
+ },
+ @{ int_type = 1 },
+ @{}
+ )
+ path_type = "%SystemRoot%\System32"
+ path_type_nt = "\\?\%SystemRoot%\System32"
+ path_type_missing = "T:\missing\path"
+ raw_type_str = "str"
+ raw_type_int = 1
+ sid_type = "S-1-5-18"
+ sid_from_name = "SYSTEM"
+ str_type = "str"
+ delegate_type = "1234"
+ }
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+
+ $m.Params.option_default | Assert-Equals -Expected "1"
+ $m.Params.option_default.GetType().ToString() | Assert-Equals -Expected "System.String"
+ $m.Params.missing_option_default | Assert-Equals -Expected $null
+ $m.Params.string_option | Assert-Equals -Expected "1"
+ $m.Params.string_option.GetType().ToString() | Assert-Equals -Expected "System.String"
+ $m.Params.required_option | Assert-Equals -Expected "required"
+ $m.Params.required_option.GetType().ToString() | Assert-Equals -Expected "System.String"
+ $m.Params.missing_choices | Assert-Equals -Expected $null
+ $m.Params.choices | Assert-Equals -Expected "a"
+ $m.Params.choices.GetType().ToString() | Assert-Equals -Expected "System.String"
+ $m.Params.one_choice | Assert-Equals -Expected "b"
+ $m.Params.one_choice.GetType().ToString() | Assert-Equals -Expected "System.String"
+ $m.Params.choice_with_default | Assert-Equals -Expected "b"
+ $m.Params.choice_with_default.GetType().ToString() | Assert-Equals -Expected "System.String"
+ $m.Params.alias_direct | Assert-Equals -Expected "a"
+ $m.Params.alias_direct.GetType().ToString() | Assert-Equals -Expected "System.String"
+ $m.Params.alias_as_alias | Assert-Equals -Expected "a"
+ $m.Params.alias_as_alias.GetType().ToString() | Assert-Equals -Expected "System.String"
+ $m.Params.bool_type | Assert-Equals -Expected $true
+ $m.Params.bool_type.GetType().ToString() | Assert-Equals -Expected "System.Boolean"
+ $m.Params.bool_from_str | Assert-Equals -Expected $false
+ $m.Params.bool_from_str.GetType().ToString() | Assert-Equals -Expected "System.Boolean"
+ $m.Params.dict_type | Assert-DictionaryEquals -Expected @{int_type = 10; str_type = "str_sub_type"}
+ $m.Params.dict_type.GetType().ToString() | Assert-Equals -Expected "System.Collections.Generic.Dictionary``2[System.String,System.Object]"
+ $m.Params.dict_type.int_type.GetType().ToString() | Assert-Equals -Expected "System.Int32"
+ $m.Params.dict_type.str_type.GetType().ToString() | Assert-Equals -Expected "System.String"
+ $m.Params.dict_type_missing | Assert-Equals -Expected $null
+ $m.Params.dict_type_defaults | Assert-DictionaryEquals -Expected @{int_type = $null; str_type = "str_sub_type"}
+ $m.Params.dict_type_defaults.GetType().ToString() | Assert-Equals -Expected "System.Collections.Generic.Dictionary``2[System.String,System.Object]"
+ $m.Params.dict_type_defaults.str_type.GetType().ToString() | Assert-Equals -Expected "System.String"
+ $m.Params.dict_type_json | Assert-DictionaryEquals -Expected @{
+ a = "a"
+ b = 1
+ c = @("a", "b")
+ }
+ $m.Params.dict_type_json.GetType().ToString() | Assert-Equals -Expected "System.Collections.Generic.Dictionary``2[System.String,System.Object]"
+ $m.Params.dict_type_json.a.GetType().ToString() | Assert-Equals -Expected "System.String"
+ $m.Params.dict_type_json.b.GetType().ToString() | Assert-Equals -Expected "System.Int32"
+ $m.Params.dict_type_json.c.GetType().ToString() | Assert-Equals -Expected "System.Collections.ArrayList"
+ $m.Params.dict_type_str | Assert-DictionaryEquals -Expected @{a = "a"; b = "b 2"; c = "c"}
+ $m.Params.dict_type_str.GetType().ToString() | Assert-Equals -Expected "System.Collections.Generic.Dictionary``2[System.String,System.Object]"
+ $m.Params.dict_type_str.a.GetType().ToString() | Assert-Equals -Expected "System.String"
+ $m.Params.dict_type_str.b.GetType().ToString() | Assert-Equals -Expected "System.String"
+ $m.Params.dict_type_str.c.GetType().ToString() | Assert-Equals -Expected "System.String"
+ $m.Params.float_type | Assert-Equals -Expected ([System.Single]3.14159)
+ $m.Params.float_type.GetType().ToString() | Assert-Equals -Expected "System.Single"
+ $m.Params.int_type | Assert-Equals -Expected 0
+ $m.Params.int_type.GetType().ToString() | Assert-Equals -Expected "System.Int32"
+ $m.Params.json_type | Assert-Equals -Expected '{"a":"a","b":1,"c":["a","b"]}'
+ $m.Params.json_type.GetType().ToString() | Assert-Equals -Expected "System.String"
+ [Ansible.Basic.AnsibleModule]::FromJson($m.Params.json_type_dict) | Assert-DictionaryEquals -Expected ([Ansible.Basic.AnsibleModule]::FromJson('{"a":"a","b":1,"c":["a","b"]}'))
+ $m.Params.json_type_dict.GetType().ToString() | Assert-Equals -Expected "System.String"
+ $m.Params.list_type.GetType().ToString() | Assert-Equals -Expected "System.Collections.Generic.List``1[System.Object]"
+ $m.Params.list_type.Count | Assert-Equals -Expected 4
+ $m.Params.list_type[0] | Assert-Equals -Expected "a"
+ $m.Params.list_type[0].GetType().FullName | Assert-Equals -Expected "System.String"
+ $m.Params.list_type[1] | Assert-Equals -Expected "b"
+ $m.Params.list_type[1].GetType().FullName | Assert-Equals -Expected "System.String"
+ $m.Params.list_type[2] | Assert-Equals -Expected 1
+ $m.Params.list_type[2].GetType().FullName | Assert-Equals -Expected "System.Int32"
+ $m.Params.list_type[3] | Assert-Equals -Expected 2
+ $m.Params.list_type[3].GetType().FullName | Assert-Equals -Expected "System.Int32"
+ $m.Params.list_type_str.GetType().ToString() | Assert-Equals -Expected "System.Collections.Generic.List``1[System.Object]"
+ $m.Params.list_type_str.Count | Assert-Equals -Expected 4
+ $m.Params.list_type_str[0] | Assert-Equals -Expected "a"
+ $m.Params.list_type_str[0].GetType().FullName | Assert-Equals -Expected "System.String"
+ $m.Params.list_type_str[1] | Assert-Equals -Expected "b"
+ $m.Params.list_type_str[1].GetType().FullName | Assert-Equals -Expected "System.String"
+ $m.Params.list_type_str[2] | Assert-Equals -Expected "1"
+ $m.Params.list_type_str[2].GetType().FullName | Assert-Equals -Expected "System.String"
+ $m.Params.list_type_str[3] | Assert-Equals -Expected "2"
+ $m.Params.list_type_str[3].GetType().FullName | Assert-Equals -Expected "System.String"
+ $m.Params.list_with_int.GetType().ToString() | Assert-Equals -Expected "System.Collections.Generic.List``1[System.Object]"
+ $m.Params.list_with_int.Count | Assert-Equals -Expected 2
+ $m.Params.list_with_int[0] | Assert-Equals -Expected 1
+ $m.Params.list_with_int[0].GetType().FullName | Assert-Equals -Expected "System.Int32"
+ $m.Params.list_with_int[1] | Assert-Equals -Expected 2
+ $m.Params.list_with_int[1].GetType().FullName | Assert-Equals -Expected "System.Int32"
+ $m.Params.list_type_single.GetType().ToString() | Assert-Equals -Expected "System.Collections.Generic.List``1[System.Object]"
+ $m.Params.list_type_single.Count | Assert-Equals -Expected 1
+ $m.Params.list_type_single[0] | Assert-Equals -Expected "single"
+ $m.Params.list_type_single[0].GetType().FullName | Assert-Equals -Expected "System.String"
+ $m.Params.list_with_dict.GetType().FullName.StartsWith("System.Collections.Generic.List``1[[System.Object") | Assert-Equals -Expected $true
+ $m.Params.list_with_dict.Count | Assert-Equals -Expected 3
+ $m.Params.list_with_dict[0].GetType().FullName.StartsWith("System.Collections.Generic.Dictionary``2[[System.String") | Assert-Equals -Expected $true
+ $m.Params.list_with_dict[0] | Assert-DictionaryEquals -Expected @{int_type = 2; str_type = "dict entry"}
+ $m.Params.list_with_dict[0].int_type.GetType().FullName.ToString() | Assert-Equals -Expected "System.Int32"
+ $m.Params.list_with_dict[0].str_type.GetType().FullName.ToString() | Assert-Equals -Expected "System.String"
+ $m.Params.list_with_dict[1].GetType().FullName.StartsWith("System.Collections.Generic.Dictionary``2[[System.String") | Assert-Equals -Expected $true
+ $m.Params.list_with_dict[1] | Assert-DictionaryEquals -Expected @{int_type = 1; str_type = "str_sub_type"}
+ $m.Params.list_with_dict[1].int_type.GetType().FullName.ToString() | Assert-Equals -Expected "System.Int32"
+ $m.Params.list_with_dict[1].str_type.GetType().FullName.ToString() | Assert-Equals -Expected "System.String"
+ $m.Params.list_with_dict[2].GetType().FullName.StartsWith("System.Collections.Generic.Dictionary``2[[System.String") | Assert-Equals -Expected $true
+ $m.Params.list_with_dict[2] | Assert-DictionaryEquals -Expected @{int_type = $null; str_type = "str_sub_type"}
+ $m.Params.list_with_dict[2].str_type.GetType().FullName.ToString() | Assert-Equals -Expected "System.String"
+ $m.Params.path_type | Assert-Equals -Expected "$($env:SystemRoot)\System32"
+ $m.Params.path_type.GetType().ToString() | Assert-Equals -Expected "System.String"
+ $m.Params.path_type_nt | Assert-Equals -Expected "\\?\%SystemRoot%\System32"
+ $m.Params.path_type_nt.GetType().ToString() | Assert-Equals -Expected "System.String"
+ $m.Params.path_type_missing | Assert-Equals -Expected "T:\missing\path"
+ $m.Params.path_type_missing.GetType().ToString() | Assert-Equals -Expected "System.String"
+ $m.Params.raw_type_str | Assert-Equals -Expected "str"
+ $m.Params.raw_type_str.GetType().FullName | Assert-Equals -Expected "System.String"
+ $m.Params.raw_type_int | Assert-Equals -Expected 1
+ $m.Params.raw_type_int.GetType().FullName | Assert-Equals -Expected "System.Int32"
+ $m.Params.sid_type | Assert-Equals -Expected (New-Object -TypeName System.Security.Principal.SecurityIdentifier -ArgumentList "S-1-5-18")
+ $m.Params.sid_type.GetType().ToString() | Assert-Equals -Expected "System.Security.Principal.SecurityIdentifier"
+ $m.Params.sid_from_name | Assert-Equals -Expected (New-Object -TypeName System.Security.Principal.SecurityIdentifier -ArgumentList "S-1-5-18")
+ $m.Params.sid_from_name.GetType().ToString() | Assert-Equals -Expected "System.Security.Principal.SecurityIdentifier"
+ $m.Params.str_type | Assert-Equals -Expected "str"
+ $m.Params.str_type.GetType().ToString() | Assert-Equals -Expected "System.String"
+ $m.Params.delegate_type | Assert-Equals -Expected 1234
+ $m.Params.delegate_type.GetType().ToString() | Assert-Equals -Expected "System.UInt64"
+
+ $failed = $false
+ try {
+ $m.ExitJson()
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 0"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected_module_args = @{
+ option_default = "1"
+ missing_option_default = $null
+ string_option = "1"
+ required_option = "required"
+ missing_choices = $null
+ choices = "a"
+ one_choice = "b"
+ choice_with_default = "b"
+ alias_direct = "a"
+ alias_as_alias = "a"
+ alias_as_alias2 = "a"
+ bool_type = $true
+ bool_from_str = $false
+ dict_type = @{
+ int_type = 10
+ str_type = "str_sub_type"
+ }
+ dict_type_missing = $null
+ dict_type_defaults = @{
+ int_type = $null
+ str_type = "str_sub_type"
+ }
+ dict_type_json = @{
+ a = "a"
+ b = 1
+ c = @("a", "b")
+ }
+ dict_type_str = @{
+ a = "a"
+ b = "b 2"
+ c = "c"
+ }
+ float_type = 3.14159
+ int_type = 0
+ json_type = $m.Params.json_type.ToString()
+ json_type_dict = $m.Params.json_type_dict.ToString()
+ list_type = @("a", "b", 1, 2)
+ list_type_str = @("a", "b", "1", "2")
+ list_with_int = @(1, 2)
+ list_type_single = @("single")
+ list_with_dict = @(
+ @{
+ int_type = 2
+ str_type = "dict entry"
+ },
+ @{
+ int_type = 1
+ str_type = "str_sub_type"
+ },
+ @{
+ int_type = $null
+ str_type = "str_sub_type"
+ }
+ )
+ path_type = "$($env:SystemRoot)\System32"
+ path_type_nt = "\\?\%SystemRoot%\System32"
+ path_type_missing = "T:\missing\path"
+ raw_type_str = "str"
+ raw_type_int = 1
+ sid_type = "S-1-5-18"
+ sid_from_name = "S-1-5-18"
+ str_type = "str"
+ delegate_type = 1234
+ }
+ $actual.Keys.Count | Assert-Equals -Expected 2
+ $actual.changed | Assert-Equals -Expected $false
+ $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $expected_module_args}
+ }
+
+ "Parse module args with list elements and delegate type" = {
+ $spec = @{
+ options = @{
+ list_delegate_type = @{
+ type = "list"
+ elements = [Func[[Object], [UInt16]]]{ [System.UInt16]::Parse($args[0]) }
+ }
+ }
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ list_delegate_type = @(
+ "1234",
+ 4321
+ )
+ }
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ $m.Params.list_delegate_type.GetType().Name | Assert-Equals -Expected 'List`1'
+ $m.Params.list_delegate_type[0].GetType().FullName | Assert-Equals -Expected "System.UInt16"
+ $m.Params.list_delegate_Type[1].GetType().FullName | Assert-Equals -Expected "System.UInt16"
+
+ $failed = $false
+ try {
+ $m.ExitJson()
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 0"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected_module_args = @{
+ list_delegate_type = @(
+ 1234,
+ 4321
+ )
+ }
+ $actual.Keys.Count | Assert-Equals -Expected 2
+ $actual.changed | Assert-Equals -Expected $false
+ $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $expected_module_args}
+ }
+
+ "Parse module args with case insensitive input" = {
+ $spec = @{
+ options = @{
+ option1 = @{ type = "int"; required = $true }
+ }
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ _ansible_module_name = "win_test"
+ Option1 = "1"
+ }
+
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ # Verifies the case of the params key is set to the module spec not actual input
+ $m.Params.Keys | Assert-Equals -Expected @("option1")
+ $m.Params.option1 | Assert-Equals -Expected 1
+
+ # Verifies the type conversion happens even on a case insensitive match
+ $m.Params.option1.GetType().FullName | Assert-Equals -Expected "System.Int32"
+
+ $failed = $false
+ try {
+ $m.ExitJson()
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 0"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected_warnings = "Parameters for (win_test) was a case insensitive match: Option1. "
+ $expected_warnings += "Module options will become case sensitive in a future Ansible release. "
+ $expected_warnings += "Supported parameters include: option1"
+
+ $expected = @{
+ changed = $false
+ invocation = @{
+ module_args = @{
+ option1 = 1
+ }
+ }
+ # We have disabled the warning for now
+ #warnings = @($expected_warnings)
+ }
+ $actual | Assert-DictionaryEquals -Expected $expected
+ }
+
+ "No log values" = {
+ $spec = @{
+ options = @{
+ username = @{type = "str"}
+ password = @{type = "str"; no_log = $true}
+ password2 = @{type = "int"; no_log = $true}
+ dict = @{type = "dict"}
+ }
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ _ansible_module_name = "test_no_log"
+ username = "user - pass - name"
+ password = "pass"
+ password2 = 1234
+ dict = @{
+ data = "Oops this is secret: pass"
+ dict = @{
+ pass = "plain"
+ hide = "pass"
+ sub_hide = "password"
+ int_hide = 123456
+ }
+ list = @(
+ "pass",
+ "password",
+ 1234567,
+ "pa ss",
+ @{
+ pass = "plain"
+ hide = "pass"
+ sub_hide = "password"
+ int_hide = 123456
+ }
+ )
+ custom = "pass"
+ }
+ }
+
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ $m.Result.data = $complex_args.dict
+
+ # verify params internally aren't masked
+ $m.Params.username | Assert-Equals -Expected "user - pass - name"
+ $m.Params.password | Assert-Equals -Expected "pass"
+ $m.Params.password2 | Assert-Equals -Expected 1234
+ $m.Params.dict.custom | Assert-Equals -Expected "pass"
+
+ $failed = $false
+ try {
+ $m.ExitJson()
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 0"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ # verify no_log params are masked in invocation
+ $expected = @{
+ invocation = @{
+ module_args = @{
+ password2 = "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER"
+ dict = @{
+ dict = @{
+ pass = "plain"
+ hide = "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER"
+ sub_hide = "********word"
+ int_hide = "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER"
+ }
+ custom = "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER"
+ list = @(
+ "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER",
+ "********word",
+ "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER",
+ "pa ss",
+ @{
+ pass = "plain"
+ hide = "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER"
+ sub_hide = "********word"
+ int_hide = "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER"
+ }
+ )
+ data = "Oops this is secret: ********"
+ }
+ username = "user - ******** - name"
+ password = "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER"
+ }
+ }
+ changed = $false
+ data = $complex_args.dict
+ }
+ $actual | Assert-DictionaryEquals -Expected $expected
+
+ $expected_event = @'
+test_no_log - Invoked with:
+ username: user - ******** - name
+ dict: dict: sub_hide: ****word
+ pass: plain
+ int_hide: ********56
+ hide: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER
+ data: Oops this is secret: ********
+ custom: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER
+ list:
+ - VALUE_SPECIFIED_IN_NO_LOG_PARAMETER
+ - ********word
+ - ********567
+ - pa ss
+ - sub_hide: ********word
+ pass: plain
+ int_hide: ********56
+ hide: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER
+ password2: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER
+ password: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER
+'@
+ $actual_event = (Get-EventLog -LogName Application -Source Ansible -Newest 1).Message
+ $actual_event | Assert-DictionaryEquals -Expected $expected_event
+ }
+
+ "No log value with an empty string" = {
+ $spec = @{
+ options = @{
+ password1 = @{type = "str"; no_log = $true}
+ password2 = @{type = "str"; no_log = $true}
+ }
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ _ansible_module_name = "test_no_log"
+ password1 = ""
+ }
+
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ $m.Result.data = $complex_args.dict
+
+ # verify params internally aren't masked
+ $m.Params.password1 | Assert-Equals -Expected ""
+ $m.Params.password2 | Assert-Equals -Expected $null
+
+ $failed = $false
+ try {
+ $m.ExitJson()
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 0"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected = @{
+ invocation = @{
+ module_args = @{
+ password1 = ""
+ password2 = $null
+ }
+ }
+ changed = $false
+ data = $complex_args.dict
+ }
+ $actual | Assert-DictionaryEquals -Expected $expected
+ }
+
+ "Removed in version" = {
+ $spec = @{
+ options = @{
+ removed1 = @{removed_in_version = "2.1"}
+ removed2 = @{removed_in_version = "2.2"}
+ removed3 = @{removed_in_version = "2.3"; removed_from_collection = "ansible.builtin"}
+ }
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ removed1 = "value"
+ removed3 = "value"
+ }
+
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+
+ $failed = $false
+ try {
+ $m.ExitJson()
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 0"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected = @{
+ changed = $false
+ invocation = @{
+ module_args = @{
+ removed1 = "value"
+ removed2 = $null
+ removed3 = "value"
+ }
+ }
+ deprecations = @(
+ @{
+ msg = "Param 'removed3' is deprecated. See the module docs for more information"
+ version = "2.3"
+ collection_name = "ansible.builtin"
+ },
+ @{
+ msg = "Param 'removed1' is deprecated. See the module docs for more information"
+ version = "2.1"
+ collection_name = $null
+ }
+ )
+ }
+ $actual | Assert-DictionaryEquals -Expected $expected
+ }
+
+ "Removed at date" = {
+ $spec = @{
+ options = @{
+ removed1 = @{removed_at_date = [DateTime]"2020-03-10"}
+ removed2 = @{removed_at_date = [DateTime]"2020-03-11"}
+ removed3 = @{removed_at_date = [DateTime]"2020-06-07"; removed_from_collection = "ansible.builtin"}
+ }
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ removed1 = "value"
+ removed3 = "value"
+ }
+
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+
+ $failed = $false
+ try {
+ $m.ExitJson()
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 0"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected = @{
+ changed = $false
+ invocation = @{
+ module_args = @{
+ removed1 = "value"
+ removed2 = $null
+ removed3 = "value"
+ }
+ }
+ deprecations = @(
+ @{
+ msg = "Param 'removed3' is deprecated. See the module docs for more information"
+ date = "2020-06-07"
+ collection_name = "ansible.builtin"
+ },
+ @{
+ msg = "Param 'removed1' is deprecated. See the module docs for more information"
+ date = "2020-03-10"
+ collection_name = $null
+ }
+ )
+ }
+ $actual | Assert-DictionaryEquals -Expected $expected
+ }
+
+ "Deprecated aliases" = {
+ $spec = @{
+ options = @{
+ option1 = @{ type = "str"; aliases = "alias1"; deprecated_aliases = @(@{name = "alias1"; version = "2.10"}) }
+ option2 = @{ type = "str"; aliases = "alias2"; deprecated_aliases = @(@{name = "alias2"; version = "2.11"}) }
+ option3 = @{
+ type = "dict"
+ options = @{
+ option1 = @{ type = "str"; aliases = "alias1"; deprecated_aliases = @(@{name = "alias1"; version = "2.10"}) }
+ option2 = @{ type = "str"; aliases = "alias2"; deprecated_aliases = @(@{name = "alias2"; version = "2.11"}) }
+ option3 = @{ type = "str"; aliases = "alias3"; deprecated_aliases = @(@{name = "alias3"; version = "2.12"; collection_name = "ansible.builtin"}) }
+ option4 = @{ type = "str"; aliases = "alias4"; deprecated_aliases = @(@{name = "alias4"; date = [DateTime]"2020-03-11"}) }
+ option5 = @{ type = "str"; aliases = "alias5"; deprecated_aliases = @(@{name = "alias5"; date = [DateTime]"2020-03-09"}) }
+ option6 = @{ type = "str"; aliases = "alias6"; deprecated_aliases = @(@{name = "alias6"; date = [DateTime]"2020-06-01"; collection_name = "ansible.builtin"}) }
+ }
+ }
+ option4 = @{ type = "str"; aliases = "alias4"; deprecated_aliases = @(@{name = "alias4"; date = [DateTime]"2020-03-10"}) }
+ option5 = @{ type = "str"; aliases = "alias5"; deprecated_aliases = @(@{name = "alias5"; date = [DateTime]"2020-03-12"}) }
+ option6 = @{ type = "str"; aliases = "alias6"; deprecated_aliases = @(@{name = "alias6"; version = "2.12"; collection_name = "ansible.builtin"}) }
+ option7 = @{ type = "str"; aliases = "alias7"; deprecated_aliases = @(@{name = "alias7"; date = [DateTime]"2020-06-07"; collection_name = "ansible.builtin"}) }
+ }
+ }
+
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ alias1 = "alias1"
+ option2 = "option2"
+ option3 = @{
+ option1 = "option1"
+ alias2 = "alias2"
+ alias3 = "alias3"
+ option4 = "option4"
+ alias5 = "alias5"
+ alias6 = "alias6"
+ }
+ option4 = "option4"
+ alias5 = "alias5"
+ alias6 = "alias6"
+ alias7 = "alias7"
+ }
+
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ $failed = $false
+ try {
+ $m.ExitJson()
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 0"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected = @{
+ changed = $false
+ invocation = @{
+ module_args = @{
+ alias1 = "alias1"
+ option1 = "alias1"
+ option2 = "option2"
+ option3 = @{
+ option1 = "option1"
+ option2 = "alias2"
+ alias2 = "alias2"
+ option3 = "alias3"
+ alias3 = "alias3"
+ option4 = "option4"
+ option5 = "alias5"
+ alias5 = "alias5"
+ option6 = "alias6"
+ alias6 = "alias6"
+ }
+ option4 = "option4"
+ option5 = "alias5"
+ alias5 = "alias5"
+ option6 = "alias6"
+ alias6 = "alias6"
+ option7 = "alias7"
+ alias7 = "alias7"
+ }
+ }
+ deprecations = @(
+ @{
+ msg = "Alias 'alias7' is deprecated. See the module docs for more information"
+ date = "2020-06-07"
+ collection_name = "ansible.builtin"
+ },
+ @{
+ msg = "Alias 'alias1' is deprecated. See the module docs for more information"
+ version = "2.10"
+ collection_name = $null
+ },
+ @{
+ msg = "Alias 'alias5' is deprecated. See the module docs for more information"
+ date = "2020-03-12"
+ collection_name = $null
+ },
+ @{
+ msg = "Alias 'alias6' is deprecated. See the module docs for more information"
+ version = "2.12"
+ collection_name = "ansible.builtin"
+ },
+ @{
+ msg = "Alias 'alias2' is deprecated. See the module docs for more information - found in option3"
+ version = "2.11"
+ collection_name = $null
+ },
+ @{
+ msg = "Alias 'alias5' is deprecated. See the module docs for more information - found in option3"
+ date = "2020-03-09"
+ collection_name = $null
+ },
+ @{
+ msg = "Alias 'alias3' is deprecated. See the module docs for more information - found in option3"
+ version = "2.12"
+ collection_name = "ansible.builtin"
+ },
+ @{
+ msg = "Alias 'alias6' is deprecated. See the module docs for more information - found in option3"
+ date = "2020-06-01"
+ collection_name = "ansible.builtin"
+ }
+ )
+ }
+ $actual | Assert-DictionaryEquals -Expected $expected
+ }
+
+ "Required by - single value" = {
+ $spec = @{
+ options = @{
+ option1 = @{type = "str"}
+ option2 = @{type = "str"}
+ option3 = @{type = "str"}
+ }
+ required_by = @{
+ option1 = "option2"
+ }
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ option1 = "option1"
+ option2 = "option2"
+ }
+
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+
+ $failed = $false
+ try {
+ $m.ExitJson()
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 0"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected = @{
+ changed = $false
+ invocation = @{
+ module_args = @{
+ option1 = "option1"
+ option2 = "option2"
+ option3 = $null
+ }
+ }
+ }
+ $actual | Assert-DictionaryEquals -Expected $expected
+ }
+
+ "Required by - multiple values" = {
+ $spec = @{
+ options = @{
+ option1 = @{type = "str"}
+ option2 = @{type = "str"}
+ option3 = @{type = "str"}
+ }
+ required_by = @{
+ option1 = "option2", "option3"
+ }
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ option1 = "option1"
+ option2 = "option2"
+ option3 = "option3"
+ }
+
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+
+ $failed = $false
+ try {
+ $m.ExitJson()
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 0"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected = @{
+ changed = $false
+ invocation = @{
+ module_args = @{
+ option1 = "option1"
+ option2 = "option2"
+ option3 = "option3"
+ }
+ }
+ }
+ $actual | Assert-DictionaryEquals -Expected $expected
+ }
+
+ "Required by explicit null" = {
+ $spec = @{
+ options = @{
+ option1 = @{type = "str"}
+ option2 = @{type = "str"}
+ option3 = @{type = "str"}
+ }
+ required_by = @{
+ option1 = "option2"
+ }
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ option1 = "option1"
+ option2 = $null
+ }
+
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+
+ $failed = $false
+ try {
+ $m.ExitJson()
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 0"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected = @{
+ changed = $false
+ invocation = @{
+ module_args = @{
+ option1 = "option1"
+ option2 = $null
+ option3 = $null
+ }
+ }
+ }
+ $actual | Assert-DictionaryEquals -Expected $expected
+ }
+
+ "Required by failed - single value" = {
+ $spec = @{
+ options = @{
+ option1 = @{type = "str"}
+ option2 = @{type = "str"}
+ option3 = @{type = "str"}
+ }
+ required_by = @{
+ option1 = "option2"
+ }
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ option1 = "option1"
+ }
+
+ $failed = $false
+ try {
+ $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected = @{
+ changed = $false
+ failed = $true
+ invocation = @{
+ module_args = @{
+ option1 = "option1"
+ }
+ }
+ msg = "missing parameter(s) required by 'option1': option2"
+ }
+ $actual | Assert-DictionaryEquals -Expected $expected
+ }
+
+ "Required by failed - multiple values" = {
+ $spec = @{
+ options = @{
+ option1 = @{type = "str"}
+ option2 = @{type = "str"}
+ option3 = @{type = "str"}
+ }
+ required_by = @{
+ option1 = "option2", "option3"
+ }
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ option1 = "option1"
+ }
+
+ $failed = $false
+ try {
+ $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected = @{
+ changed = $false
+ failed = $true
+ invocation = @{
+ module_args = @{
+ option1 = "option1"
+ }
+ }
+ msg = "missing parameter(s) required by 'option1': option2, option3"
+ }
+ $actual | Assert-DictionaryEquals -Expected $expected
+ }
+
+ "Debug without debug set" = {
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ _ansible_debug = $false
+ }
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{})
+ $m.Debug("debug message")
+ $actual_event = (Get-EventLog -LogName Application -Source Ansible -Newest 1).Message
+ $actual_event | Assert-Equals -Expected "undefined win module - Invoked with:`r`n "
+ }
+
+ "Debug with debug set" = {
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ _ansible_debug = $true
+ }
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{})
+ $m.Debug("debug message")
+ $actual_event = (Get-EventLog -LogName Application -Source Ansible -Newest 1).Message
+ $actual_event | Assert-Equals -Expected "undefined win module - [DEBUG] debug message"
+ }
+
+ "Deprecate and warn with version" = {
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{})
+ $m.Deprecate("message", "2.7")
+ $actual_deprecate_event_1 = Get-EventLog -LogName Application -Source Ansible -Newest 1
+ $m.Deprecate("message w collection", "2.8", "ansible.builtin")
+ $actual_deprecate_event_2 = Get-EventLog -LogName Application -Source Ansible -Newest 1
+ $m.Warn("warning")
+ $actual_warn_event = Get-EventLog -LogName Application -Source Ansible -Newest 1
+
+ $actual_deprecate_event_1.Message | Assert-Equals -Expected "undefined win module - [DEPRECATION WARNING] message 2.7"
+ $actual_deprecate_event_2.Message | Assert-Equals -Expected "undefined win module - [DEPRECATION WARNING] message w collection 2.8"
+ $actual_warn_event.EntryType | Assert-Equals -Expected "Warning"
+ $actual_warn_event.Message | Assert-Equals -Expected "undefined win module - [WARNING] warning"
+
+ $failed = $false
+ try {
+ $m.ExitJson()
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 0"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected = @{
+ changed = $false
+ invocation = @{
+ module_args = @{}
+ }
+ warnings = @("warning")
+ deprecations = @(
+ @{msg = "message"; version = "2.7"; collection_name = $null},
+ @{msg = "message w collection"; version = "2.8"; collection_name = "ansible.builtin"}
+ )
+ }
+ $actual | Assert-DictionaryEquals -Expected $expected
+ }
+
+ "Deprecate and warn with date" = {
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{})
+ $m.Deprecate("message", [DateTime]"2020-01-01")
+ $actual_deprecate_event_1 = Get-EventLog -LogName Application -Source Ansible -Newest 1
+ $m.Deprecate("message w collection", [DateTime]"2020-01-02", "ansible.builtin")
+ $actual_deprecate_event_2 = Get-EventLog -LogName Application -Source Ansible -Newest 1
+ $m.Warn("warning")
+ $actual_warn_event = Get-EventLog -LogName Application -Source Ansible -Newest 1
+
+ $actual_deprecate_event_1.Message | Assert-Equals -Expected "undefined win module - [DEPRECATION WARNING] message 2020-01-01"
+ $actual_deprecate_event_2.Message | Assert-Equals -Expected "undefined win module - [DEPRECATION WARNING] message w collection 2020-01-02"
+ $actual_warn_event.EntryType | Assert-Equals -Expected "Warning"
+ $actual_warn_event.Message | Assert-Equals -Expected "undefined win module - [WARNING] warning"
+
+ $failed = $false
+ try {
+ $m.ExitJson()
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 0"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected = @{
+ changed = $false
+ invocation = @{
+ module_args = @{}
+ }
+ warnings = @("warning")
+ deprecations = @(
+ @{msg = "message"; date = "2020-01-01"; collection_name = $null},
+ @{msg = "message w collection"; date = "2020-01-02"; collection_name = "ansible.builtin"}
+ )
+ }
+ $actual | Assert-DictionaryEquals -Expected $expected
+ }
+
+ "FailJson with message" = {
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{})
+
+ $failed = $false
+ try {
+ $m.FailJson("fail message")
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $failed
+
+ $expected = @{
+ changed = $false
+ invocation = @{
+ module_args = @{}
+ }
+ failed = $true
+ msg = "fail message"
+ }
+ $actual | Assert-DictionaryEquals -Expected $expected
+ }
+
+ "FailJson with Exception" = {
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{})
+
+ try {
+ [System.IO.Path]::GetFullPath($null)
+ } catch {
+ $excp = $_.Exception
+ }
+
+ $failed = $false
+ try {
+ $m.FailJson("fail message", $excp)
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $failed
+
+ $expected = @{
+ changed = $false
+ invocation = @{
+ module_args = @{}
+ }
+ failed = $true
+ msg = "fail message"
+ }
+ $actual | Assert-DictionaryEquals -Expected $expected
+ }
+
+ "FailJson with ErrorRecord" = {
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{})
+
+ try {
+ Get-Item -LiteralPath $null
+ } catch {
+ $error_record = $_
+ }
+
+ $failed = $false
+ try {
+ $m.FailJson("fail message", $error_record)
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $failed
+
+ $expected = @{
+ changed = $false
+ invocation = @{
+ module_args = @{}
+ }
+ failed = $true
+ msg = "fail message"
+ }
+ $actual | Assert-DictionaryEquals -Expected $expected
+ }
+
+ "FailJson with Exception and verbosity 3" = {
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ _ansible_verbosity = 3
+ }
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{})
+
+ try {
+ [System.IO.Path]::GetFullPath($null)
+ } catch {
+ $excp = $_.Exception
+ }
+
+ $failed = $false
+ try {
+ $m.FailJson("fail message", $excp)
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $failed
+
+ $actual.changed | Assert-Equals -Expected $false
+ $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = @{}}
+ $actual.failed | Assert-Equals -Expected $true
+ $actual.msg | Assert-Equals -Expected "fail message"
+ $actual.exception.Contains('System.Management.Automation.MethodInvocationException: Exception calling "GetFullPath" with "1" argument(s)') | Assert-Equals -Expected $true
+ }
+
+ "FailJson with ErrorRecord and verbosity 3" = {
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ _ansible_verbosity = 3
+ }
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{})
+
+ try {
+ Get-Item -LiteralPath $null
+ } catch {
+ $error_record = $_
+ }
+
+ $failed = $false
+ try {
+ $m.FailJson("fail message", $error_record)
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $failed
+
+ $actual.changed | Assert-Equals -Expected $false
+ $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = @{}}
+ $actual.failed | Assert-Equals -Expected $true
+ $actual.msg | Assert-Equals -Expected "fail message"
+ $actual.exception.Contains("Cannot bind argument to parameter 'LiteralPath' because it is null") | Assert-Equals -Expected $true
+ $actual.exception.Contains("+ Get-Item -LiteralPath `$null") | Assert-Equals -Expected $true
+ $actual.exception.Contains("ScriptStackTrace:") | Assert-Equals -Expected $true
+ }
+
+ "Diff entry without diff set" = {
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{})
+ $m.Diff.before = @{a = "a"}
+ $m.Diff.after = @{b = "b"}
+
+ $failed = $false
+ try {
+ $m.ExitJson()
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 0"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $failed
+
+ $expected = @{
+ changed = $false
+ invocation = @{
+ module_args = @{}
+ }
+ }
+ $actual | Assert-DictionaryEquals -Expected $expected
+ }
+
+ "Diff entry with diff set" = {
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ _ansible_diff = $true
+ }
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{})
+ $m.Diff.before = @{a = "a"}
+ $m.Diff.after = @{b = "b"}
+
+ $failed = $false
+ try {
+ $m.ExitJson()
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 0"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $failed
+
+ $expected = @{
+ changed = $false
+ invocation = @{
+ module_args = @{}
+ }
+ diff = @{
+ before = @{a = "a"}
+ after = @{b = "b"}
+ }
+ }
+ $actual | Assert-DictionaryEquals -Expected $expected
+ }
+
+ "ParseBool tests" = {
+ $mapping = New-Object -TypeName 'System.Collections.Generic.Dictionary`2[[Object], [Bool]]'
+ $mapping.Add("y", $true)
+ $mapping.Add("Y", $true)
+ $mapping.Add("yes", $true)
+ $mapping.Add("Yes", $true)
+ $mapping.Add("on", $true)
+ $mapping.Add("On", $true)
+ $mapping.Add("1", $true)
+ $mapping.Add(1, $true)
+ $mapping.Add("true", $true)
+ $mapping.Add("True", $true)
+ $mapping.Add("t", $true)
+ $mapping.Add("T", $true)
+ $mapping.Add("1.0", $true)
+ $mapping.Add(1.0, $true)
+ $mapping.Add($true, $true)
+ $mapping.Add("n", $false)
+ $mapping.Add("N", $false)
+ $mapping.Add("no", $false)
+ $mapping.Add("No", $false)
+ $mapping.Add("off", $false)
+ $mapping.Add("Off", $false)
+ $mapping.Add("0", $false)
+ $mapping.Add(0, $false)
+ $mapping.Add("false", $false)
+ $mapping.Add("False", $false)
+ $mapping.Add("f", $false)
+ $mapping.Add("F", $false)
+ $mapping.Add("0.0", $false)
+ $mapping.Add(0.0, $false)
+ $mapping.Add($false, $false)
+
+ foreach ($map in $mapping.GetEnumerator()) {
+ $expected = $map.Value
+ $actual = [Ansible.Basic.AnsibleModule]::ParseBool($map.Key)
+ $actual | Assert-Equals -Expected $expected
+ $actual.GetType().FullName | Assert-Equals -Expected "System.Boolean"
+ }
+
+ $fail_bools = @(
+ "falsey",
+ "abc",
+ 2,
+ "2",
+ -1
+ )
+ foreach ($fail_bool in $fail_bools) {
+ $failed = $false
+ try {
+ [Ansible.Basic.AnsibleModule]::ParseBool($fail_bool)
+ } catch {
+ $failed = $true
+ $_.Exception.Message.Contains("The value '$fail_bool' is not a valid boolean") | Assert-Equals -Expected $true
+ }
+ $failed | Assert-Equals -Expected $true
+ }
+ }
+
+ "Unknown internal key" = {
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ _ansible_invalid = "invalid"
+ }
+ $failed = $false
+ try {
+ $null = [Ansible.Basic.AnsibleModule]::Create(@(), @{})
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+
+ $expected = @{
+ invocation = @{
+ module_args = @{
+ _ansible_invalid = "invalid"
+ }
+ }
+ changed = $false
+ failed = $true
+ msg = "Unsupported parameters for (undefined win module) module: _ansible_invalid. Supported parameters include: "
+ }
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ $actual | Assert-DictionaryEquals -Expected $expected
+ }
+ $failed | Assert-Equals -Expected $true
+ }
+
+ "Module tmpdir with present remote tmp" = {
+ $current_user = [System.Security.Principal.WindowsIdentity]::GetCurrent().User
+ $dir_security = New-Object -TypeName System.Security.AccessControl.DirectorySecurity
+ $dir_security.SetOwner($current_user)
+ $dir_security.SetAccessRuleProtection($true, $false)
+ $ace = New-Object -TypeName System.Security.AccessControl.FileSystemAccessRule -ArgumentList @(
+ $current_user, [System.Security.AccessControl.FileSystemRights]::FullControl,
+ [System.Security.AccessControl.InheritanceFlags]"ContainerInherit, ObjectInherit",
+ [System.Security.AccessControl.PropagationFlags]::None, [System.Security.AccessControl.AccessControlType]::Allow
+ )
+ $dir_security.AddAccessRule($ace)
+ $expected_sd = $dir_security.GetSecurityDescriptorSddlForm("Access, Owner")
+
+ $remote_tmp = Join-Path -Path $tmpdir -ChildPath "moduletmpdir-$(Get-Random)"
+ New-Item -Path $remote_tmp -ItemType Directory > $null
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ _ansible_remote_tmp = $remote_tmp.ToString()
+ }
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{})
+ (Test-Path -LiteralPath $remote_tmp -PathType Container) | Assert-Equals -Expected $true
+
+ $actual_tmpdir = $m.Tmpdir
+ $parent_tmpdir = Split-Path -Path $actual_tmpdir -Parent
+ $tmpdir_name = Split-Path -Path $actual_tmpdir -Leaf
+
+ $parent_tmpdir | Assert-Equals -Expected $remote_tmp
+ $tmpdir_name.StartSwith("ansible-moduletmp-") | Assert-Equals -Expected $true
+ (Test-Path -LiteralPath $actual_tmpdir -PathType Container) | Assert-Equals -Expected $true
+ (Test-Path -LiteralPath $remote_tmp -PathType Container) | Assert-Equals -Expected $true
+ $children = [System.IO.Directory]::EnumerateDirectories($remote_tmp)
+ $children.Count | Assert-Equals -Expected 1
+ $actual_tmpdir_sd = (Get-Acl -Path $actual_tmpdir).GetSecurityDescriptorSddlForm("Access, Owner")
+ $actual_tmpdir_sd | Assert-Equals -Expected $expected_sd
+
+ try {
+ $m.ExitJson()
+ } catch [System.Management.Automation.RuntimeException] {
+ $output = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ (Test-Path -LiteralPath $actual_tmpdir -PathType Container) | Assert-Equals -Expected $false
+ (Test-Path -LiteralPath $remote_tmp -PathType Container) | Assert-Equals -Expected $true
+ $output.warnings.Count | Assert-Equals -Expected 0
+ }
+
+ "Module tmpdir with missing remote_tmp" = {
+ $current_user = [System.Security.Principal.WindowsIdentity]::GetCurrent().User
+ $dir_security = New-Object -TypeName System.Security.AccessControl.DirectorySecurity
+ $dir_security.SetOwner($current_user)
+ $dir_security.SetAccessRuleProtection($true, $false)
+ $ace = New-Object -TypeName System.Security.AccessControl.FileSystemAccessRule -ArgumentList @(
+ $current_user, [System.Security.AccessControl.FileSystemRights]::FullControl,
+ [System.Security.AccessControl.InheritanceFlags]"ContainerInherit, ObjectInherit",
+ [System.Security.AccessControl.PropagationFlags]::None, [System.Security.AccessControl.AccessControlType]::Allow
+ )
+ $dir_security.AddAccessRule($ace)
+ $expected_sd = $dir_security.GetSecurityDescriptorSddlForm("Access, Owner")
+
+ $remote_tmp = Join-Path -Path $tmpdir -ChildPath "moduletmpdir-$(Get-Random)"
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ _ansible_remote_tmp = $remote_tmp.ToString()
+ }
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{})
+ (Test-Path -LiteralPath $remote_tmp -PathType Container) | Assert-Equals -Expected $false
+
+ $actual_tmpdir = $m.Tmpdir
+ $parent_tmpdir = Split-Path -Path $actual_tmpdir -Parent
+ $tmpdir_name = Split-Path -Path $actual_tmpdir -Leaf
+
+ $parent_tmpdir | Assert-Equals -Expected $remote_tmp
+ $tmpdir_name.StartSwith("ansible-moduletmp-") | Assert-Equals -Expected $true
+ (Test-Path -LiteralPath $actual_tmpdir -PathType Container) | Assert-Equals -Expected $true
+ (Test-Path -LiteralPath $remote_tmp -PathType Container) | Assert-Equals -Expected $true
+ $children = [System.IO.Directory]::EnumerateDirectories($remote_tmp)
+ $children.Count | Assert-Equals -Expected 1
+ $actual_remote_sd = (Get-Acl -Path $remote_tmp).GetSecurityDescriptorSddlForm("Access, Owner")
+ $actual_tmpdir_sd = (Get-Acl -Path $actual_tmpdir).GetSecurityDescriptorSddlForm("Access, Owner")
+ $actual_remote_sd | Assert-Equals -Expected $expected_sd
+ $actual_tmpdir_sd | Assert-Equals -Expected $expected_sd
+
+ try {
+ $m.ExitJson()
+ } catch [System.Management.Automation.RuntimeException] {
+ $output = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ (Test-Path -LiteralPath $actual_tmpdir -PathType Container) | Assert-Equals -Expected $false
+ (Test-Path -LiteralPath $remote_tmp -PathType Container) | Assert-Equals -Expected $true
+ $output.warnings.Count | Assert-Equals -Expected 1
+ $nt_account = $current_user.Translate([System.Security.Principal.NTAccount])
+ $actual_warning = "Module remote_tmp $remote_tmp did not exist and was created with FullControl to $nt_account, "
+ $actual_warning += "this may cause issues when running as another user. To avoid this, "
+ $actual_warning += "create the remote_tmp dir with the correct permissions manually"
+ $actual_warning | Assert-Equals -Expected $output.warnings[0]
+ }
+
+ "Module tmp, keep remote files" = {
+ $remote_tmp = Join-Path -Path $tmpdir -ChildPath "moduletmpdir-$(Get-Random)"
+ New-Item -Path $remote_tmp -ItemType Directory > $null
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ _ansible_remote_tmp = $remote_tmp.ToString()
+ _ansible_keep_remote_files = $true
+ }
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{})
+
+ $actual_tmpdir = $m.Tmpdir
+ $parent_tmpdir = Split-Path -Path $actual_tmpdir -Parent
+ $tmpdir_name = Split-Path -Path $actual_tmpdir -Leaf
+
+ $parent_tmpdir | Assert-Equals -Expected $remote_tmp
+ $tmpdir_name.StartSwith("ansible-moduletmp-") | Assert-Equals -Expected $true
+ (Test-Path -LiteralPath $actual_tmpdir -PathType Container) | Assert-Equals -Expected $true
+ (Test-Path -LiteralPath $remote_tmp -PathType Container) | Assert-Equals -Expected $true
+
+ try {
+ $m.ExitJson()
+ } catch [System.Management.Automation.RuntimeException] {
+ $output = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ (Test-Path -LiteralPath $actual_tmpdir -PathType Container) | Assert-Equals -Expected $true
+ (Test-Path -LiteralPath $remote_tmp -PathType Container) | Assert-Equals -Expected $true
+ $output.warnings.Count | Assert-Equals -Expected 0
+ Remove-Item -LiteralPath $actual_tmpdir -Force -Recurse
+ }
+
+ "Invalid argument spec key" = {
+ $spec = @{
+ invalid = $true
+ }
+ $failed = $false
+ try {
+ $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected_msg = "internal error: argument spec entry contains an invalid key 'invalid', valid keys: apply_defaults, "
+ $expected_msg += "aliases, choices, default, deprecated_aliases, elements, mutually_exclusive, no_log, options, "
+ $expected_msg += "removed_in_version, removed_at_date, removed_from_collection, required, required_by, required_if, "
+ $expected_msg += "required_one_of, required_together, supports_check_mode, type"
+
+ $actual.Keys.Count | Assert-Equals -Expected 3
+ $actual.failed | Assert-Equals -Expected $true
+ $actual.msg | Assert-Equals -Expected $expected_msg
+ ("exception" -cin $actual.Keys) | Assert-Equals -Expected $true
+ }
+
+ "Invalid argument spec key - nested" = {
+ $spec = @{
+ options = @{
+ option_key = @{
+ options = @{
+ sub_option_key = @{
+ invalid = $true
+ }
+ }
+ }
+ }
+ }
+ $failed = $false
+ try {
+ $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected_msg = "internal error: argument spec entry contains an invalid key 'invalid', valid keys: apply_defaults, "
+ $expected_msg += "aliases, choices, default, deprecated_aliases, elements, mutually_exclusive, no_log, options, "
+ $expected_msg += "removed_in_version, removed_at_date, removed_from_collection, required, required_by, required_if, "
+ $expected_msg += "required_one_of, required_together, supports_check_mode, type - found in option_key -> sub_option_key"
+
+ $actual.Keys.Count | Assert-Equals -Expected 3
+ $actual.failed | Assert-Equals -Expected $true
+ $actual.msg | Assert-Equals -Expected $expected_msg
+ ("exception" -cin $actual.Keys) | Assert-Equals -Expected $true
+ }
+
+ "Invalid argument spec value type" = {
+ $spec = @{
+ apply_defaults = "abc"
+ }
+ $failed = $false
+ try {
+ $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected_msg = "internal error: argument spec for 'apply_defaults' did not match expected "
+ $expected_msg += "type System.Boolean: actual type System.String"
+
+ $actual.Keys.Count | Assert-Equals -Expected 3
+ $actual.failed | Assert-Equals -Expected $true
+ $actual.msg | Assert-Equals -Expected $expected_msg
+ ("exception" -cin $actual.Keys) | Assert-Equals -Expected $true
+ }
+
+ "Invalid argument spec option type" = {
+ $spec = @{
+ options = @{
+ option_key = @{
+ type = "invalid type"
+ }
+ }
+ }
+ $failed = $false
+ try {
+ $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected_msg = "internal error: type 'invalid type' is unsupported - found in option_key. "
+ $expected_msg += "Valid types are: bool, dict, float, int, json, list, path, raw, sid, str"
+
+ $actual.Keys.Count | Assert-Equals -Expected 3
+ $actual.failed | Assert-Equals -Expected $true
+ $actual.msg | Assert-Equals -Expected $expected_msg
+ ("exception" -cin $actual.Keys) | Assert-Equals -Expected $true
+ }
+
+ "Invalid argument spec option element type" = {
+ $spec = @{
+ options = @{
+ option_key = @{
+ type = "list"
+ elements = "invalid type"
+ }
+ }
+ }
+ $failed = $false
+ try {
+ $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected_msg = "internal error: elements 'invalid type' is unsupported - found in option_key. "
+ $expected_msg += "Valid types are: bool, dict, float, int, json, list, path, raw, sid, str"
+
+ $actual.Keys.Count | Assert-Equals -Expected 3
+ $actual.failed | Assert-Equals -Expected $true
+ $actual.msg | Assert-Equals -Expected $expected_msg
+ ("exception" -cin $actual.Keys) | Assert-Equals -Expected $true
+ }
+
+ "Invalid deprecated aliases entry - no version and date" = {
+ $spec = @{
+ options = @{
+ option_key = @{
+ type = "str"
+ aliases = ,"alias_name"
+ deprecated_aliases = @(
+ @{name = "alias_name"}
+ )
+ }
+ }
+ }
+
+ $failed = $false
+ try {
+ $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected_msg = "internal error: One of version or date is required in a deprecated_aliases entry"
+
+ $actual.Keys.Count | Assert-Equals -Expected 3
+ $actual.failed | Assert-Equals -Expected $true
+ $actual.msg | Assert-Equals -Expected $expected_msg
+ ("exception" -cin $actual.Keys) | Assert-Equals -Expected $true
+ }
+
+ "Invalid deprecated aliases entry - no name (nested)" = {
+ $spec = @{
+ options = @{
+ option_key = @{
+ type = "dict"
+ options = @{
+ sub_option_key = @{
+ type = "str"
+ aliases = ,"alias_name"
+ deprecated_aliases = @(
+ @{version = "2.10"}
+ )
+ }
+ }
+ }
+ }
+ }
+
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ option_key = @{
+ sub_option_key = "a"
+ }
+ }
+
+ $failed = $false
+ try {
+ $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ } catch [System.ArgumentException] {
+ $failed = $true
+ $expected_msg = "name is required in a deprecated_aliases entry - found in option_key"
+ $_.Exception.Message | Assert-Equals -Expected $expected_msg
+ }
+ $failed | Assert-Equals -Expected $true
+ }
+
+ "Invalid deprecated aliases entry - both version and date" = {
+ $spec = @{
+ options = @{
+ option_key = @{
+ type = "str"
+ aliases = ,"alias_name"
+ deprecated_aliases = @(
+ @{
+ name = "alias_name"
+ date = [DateTime]"2020-03-10"
+ version = "2.11"
+ }
+ )
+ }
+ }
+ }
+
+ $failed = $false
+ try {
+ $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected_msg = "internal error: Only one of version or date is allowed in a deprecated_aliases entry"
+
+ $actual.Keys.Count | Assert-Equals -Expected 3
+ $actual.failed | Assert-Equals -Expected $true
+ $actual.msg | Assert-Equals -Expected $expected_msg
+ ("exception" -cin $actual.Keys) | Assert-Equals -Expected $true
+ }
+
+ "Invalid deprecated aliases entry - wrong date type" = {
+ $spec = @{
+ options = @{
+ option_key = @{
+ type = "str"
+ aliases = ,"alias_name"
+ deprecated_aliases = @(
+ @{
+ name = "alias_name"
+ date = "2020-03-10"
+ }
+ )
+ }
+ }
+ }
+
+ $failed = $false
+ try {
+ $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected_msg = "internal error: A deprecated_aliases date must be a DateTime object"
+
+ $actual.Keys.Count | Assert-Equals -Expected 3
+ $actual.failed | Assert-Equals -Expected $true
+ $actual.msg | Assert-Equals -Expected $expected_msg
+ ("exception" -cin $actual.Keys) | Assert-Equals -Expected $true
+ }
+
+ "Spec required and default set at the same time" = {
+ $spec = @{
+ options = @{
+ option_key = @{
+ required = $true
+ default = "default value"
+ }
+ }
+ }
+
+ $failed = $false
+ try {
+ $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected_msg = "internal error: required and default are mutually exclusive for option_key"
+
+ $actual.Keys.Count | Assert-Equals -Expected 3
+ $actual.failed | Assert-Equals -Expected $true
+ $actual.msg | Assert-Equals -Expected $expected_msg
+ ("exception" -cin $actual.Keys) | Assert-Equals -Expected $true
+ }
+
+ "Unsupported options" = {
+ $spec = @{
+ options = @{
+ option_key = @{
+ type = "str"
+ }
+ }
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ option_key = "abc"
+ invalid_key = "def"
+ another_key = "ghi"
+ }
+
+ $failed = $false
+ try {
+ $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected_msg = "Unsupported parameters for (undefined win module) module: another_key, invalid_key. "
+ $expected_msg += "Supported parameters include: option_key"
+
+ $actual.Keys.Count | Assert-Equals -Expected 4
+ $actual.changed | Assert-Equals -Expected $false
+ $actual.failed | Assert-Equals -Expected $true
+ $actual.msg | Assert-Equals -Expected $expected_msg
+ $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args}
+ }
+
+ "Check mode and module doesn't support check mode" = {
+ $spec = @{
+ options = @{
+ option_key = @{
+ type = "str"
+ }
+ }
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ _ansible_check_mode = $true
+ option_key = "abc"
+ }
+
+ $failed = $false
+ try {
+ $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 0"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected_msg = "remote module (undefined win module) does not support check mode"
+
+ $actual.Keys.Count | Assert-Equals -Expected 4
+ $actual.changed | Assert-Equals -Expected $false
+ $actual.skipped | Assert-Equals -Expected $true
+ $actual.msg | Assert-Equals -Expected $expected_msg
+ $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = @{option_key = "abc"}}
+ }
+
+ "Check mode with suboption without supports_check_mode" = {
+ $spec = @{
+ options = @{
+ sub_options = @{
+ # This tests the situation where a sub key doesn't set supports_check_mode, the logic in
+ # Ansible.Basic automatically sets that to $false and we want it to ignore it for a nested check
+ type = "dict"
+ options = @{
+ sub_option = @{ type = "str"; default = "value" }
+ }
+ }
+ }
+ supports_check_mode = $true
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ _ansible_check_mode = $true
+ }
+
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ $m.CheckMode | Assert-Equals -Expected $true
+ }
+
+ "Type conversion error" = {
+ $spec = @{
+ options = @{
+ option_key = @{
+ type = "int"
+ }
+ }
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ option_key = "a"
+ }
+
+ $failed = $false
+ try {
+ $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected_msg = "argument for option_key is of type System.String and we were unable to convert to int: "
+ $expected_msg += "Input string was not in a correct format."
+
+ $actual.Keys.Count | Assert-Equals -Expected 4
+ $actual.changed | Assert-Equals -Expected $false
+ $actual.failed | Assert-Equals -Expected $true
+ $actual.msg | Assert-Equals -Expected $expected_msg
+ $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args}
+ }
+
+ "Type conversion error - delegate" = {
+ $spec = @{
+ options = @{
+ option_key = @{
+ type = "dict"
+ options = @{
+ sub_option_key = @{
+ type = [Func[[Object], [UInt64]]]{ [System.UInt64]::Parse($args[0]) }
+ }
+ }
+ }
+ }
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ option_key = @{
+ sub_option_key = "a"
+ }
+ }
+
+ $failed = $false
+ try {
+ $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected_msg = "argument for sub_option_key is of type System.String and we were unable to convert to delegate: "
+ $expected_msg += "Exception calling `"Parse`" with `"1`" argument(s): `"Input string was not in a correct format.`" "
+ $expected_msg += "found in option_key"
+
+ $actual.Keys.Count | Assert-Equals -Expected 4
+ $actual.changed | Assert-Equals -Expected $false
+ $actual.failed | Assert-Equals -Expected $true
+ $actual.msg | Assert-Equals -Expected $expected_msg
+ $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args}
+ }
+
+ "Numeric choices" = {
+ $spec = @{
+ options = @{
+ option_key = @{
+ choices = 1, 2, 3
+ type = "int"
+ }
+ }
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ option_key = "2"
+ }
+
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ try {
+ $m.ExitJson()
+ } catch [System.Management.Automation.RuntimeException] {
+ $output = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $output.Keys.Count | Assert-Equals -Expected 2
+ $output.changed | Assert-Equals -Expected $false
+ $output.invocation | Assert-DictionaryEquals -Expected @{module_args = @{option_key = 2}}
+ }
+
+ "Case insensitive choice" = {
+ $spec = @{
+ options = @{
+ option_key = @{
+ choices = "abc", "def"
+ }
+ }
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ option_key = "ABC"
+ }
+
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ try {
+ $m.ExitJson()
+ } catch [System.Management.Automation.RuntimeException] {
+ $output = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $expected_warning = "value of option_key was a case insensitive match of one of: abc, def. "
+ $expected_warning += "Checking of choices will be case sensitive in a future Ansible release. "
+ $expected_warning += "Case insensitive matches were: ABC"
+
+ $output.invocation | Assert-DictionaryEquals -Expected @{module_args = @{option_key = "ABC"}}
+ # We have disabled the warnings for now
+ #$output.warnings.Count | Assert-Equals -Expected 1
+ #$output.warnings[0] | Assert-Equals -Expected $expected_warning
+ }
+
+ "Case insensitive choice no_log" = {
+ $spec = @{
+ options = @{
+ option_key = @{
+ choices = "abc", "def"
+ no_log = $true
+ }
+ }
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ option_key = "ABC"
+ }
+
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ try {
+ $m.ExitJson()
+ } catch [System.Management.Automation.RuntimeException] {
+ $output = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $expected_warning = "value of option_key was a case insensitive match of one of: abc, def. "
+ $expected_warning += "Checking of choices will be case sensitive in a future Ansible release. "
+ $expected_warning += "Case insensitive matches were: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER"
+
+ $output.invocation | Assert-DictionaryEquals -Expected @{module_args = @{option_key = "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER"}}
+ # We have disabled the warnings for now
+ #$output.warnings.Count | Assert-Equals -Expected 1
+ #$output.warnings[0] | Assert-Equals -Expected $expected_warning
+ }
+
+ "Case insentitive choice as list" = {
+ $spec = @{
+ options = @{
+ option_key = @{
+ choices = "abc", "def", "ghi", "JKL"
+ type = "list"
+ elements = "str"
+ }
+ }
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ option_key = "AbC", "ghi", "jkl"
+ }
+
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ try {
+ $m.ExitJson()
+ } catch [System.Management.Automation.RuntimeException] {
+ $output = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $expected_warning = "value of option_key was a case insensitive match of one or more of: abc, def, ghi, JKL. "
+ $expected_warning += "Checking of choices will be case sensitive in a future Ansible release. "
+ $expected_warning += "Case insensitive matches were: AbC, jkl"
+
+ $output.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args}
+ # We have disabled the warnings for now
+ #$output.warnings.Count | Assert-Equals -Expected 1
+ #$output.warnings[0] | Assert-Equals -Expected $expected_warning
+ }
+
+ "Invalid choice" = {
+ $spec = @{
+ options = @{
+ option_key = @{
+ choices = "a", "b"
+ }
+ }
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ option_key = "c"
+ }
+
+ $failed = $false
+ try {
+ $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected_msg = "value of option_key must be one of: a, b. Got no match for: c"
+
+ $actual.Keys.Count | Assert-Equals -Expected 4
+ $actual.changed | Assert-Equals -Expected $false
+ $actual.failed | Assert-Equals -Expected $true
+ $actual.msg | Assert-Equals -Expected $expected_msg
+ $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args}
+ }
+
+ "Invalid choice with no_log" = {
+ $spec = @{
+ options = @{
+ option_key = @{
+ choices = "a", "b"
+ no_log = $true
+ }
+ }
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ option_key = "abc"
+ }
+
+ $failed = $false
+ try {
+ $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected_msg = "value of option_key must be one of: a, b. Got no match for: ********"
+
+ $actual.Keys.Count | Assert-Equals -Expected 4
+ $actual.changed | Assert-Equals -Expected $false
+ $actual.failed | Assert-Equals -Expected $true
+ $actual.msg | Assert-Equals -Expected $expected_msg
+ $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = @{option_key = "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER"}}
+ }
+
+ "Invalid choice in list" = {
+ $spec = @{
+ options = @{
+ option_key = @{
+ choices = "a", "b"
+ type = "list"
+ }
+ }
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ option_key = "a", "c"
+ }
+
+ $failed = $false
+ try {
+ $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected_msg = "value of option_key must be one or more of: a, b. Got no match for: c"
+
+ $actual.Keys.Count | Assert-Equals -Expected 4
+ $actual.changed | Assert-Equals -Expected $false
+ $actual.failed | Assert-Equals -Expected $true
+ $actual.msg | Assert-Equals -Expected $expected_msg
+ $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args}
+ }
+
+ "Mutually exclusive options" = {
+ $spec = @{
+ options = @{
+ option1 = @{}
+ option2 = @{}
+ }
+ mutually_exclusive = @(,@("option1", "option2"))
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ option1 = "a"
+ option2 = "b"
+ }
+
+ $failed = $false
+ try {
+ $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected_msg = "parameters are mutually exclusive: option1, option2"
+
+ $actual.Keys.Count | Assert-Equals -Expected 4
+ $actual.changed | Assert-Equals -Expected $false
+ $actual.failed | Assert-Equals -Expected $true
+ $actual.msg | Assert-Equals -Expected $expected_msg
+ $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args}
+ }
+
+ "Missing required argument" = {
+ $spec = @{
+ options = @{
+ option1 = @{}
+ option2 = @{required = $true}
+ }
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ option1 = "a"
+ }
+
+ $failed = $false
+ try {
+ $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected_msg = "missing required arguments: option2"
+
+ $actual.Keys.Count | Assert-Equals -Expected 4
+ $actual.changed | Assert-Equals -Expected $false
+ $actual.failed | Assert-Equals -Expected $true
+ $actual.msg | Assert-Equals -Expected $expected_msg
+ $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args}
+ }
+
+ "Missing required argument subspec - no value defined" = {
+ $spec = @{
+ options = @{
+ option_key = @{
+ type = "dict"
+ options = @{
+ sub_option_key = @{
+ required = $true
+ }
+ }
+ }
+ }
+ }
+
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ $failed = $false
+ try {
+ $m.ExitJson()
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 0"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $actual.Keys.Count | Assert-Equals -Expected 2
+ $actual.changed | Assert-Equals -Expected $false
+ $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args}
+ }
+
+ "Missing required argument subspec" = {
+ $spec = @{
+ options = @{
+ option_key = @{
+ type = "dict"
+ options = @{
+ sub_option_key = @{
+ required = $true
+ }
+ another_key = @{}
+ }
+ }
+ }
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ option_key = @{
+ another_key = "abc"
+ }
+ }
+
+ $failed = $false
+ try {
+ $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected_msg = "missing required arguments: sub_option_key found in option_key"
+
+ $actual.Keys.Count | Assert-Equals -Expected 4
+ $actual.changed | Assert-Equals -Expected $false
+ $actual.failed | Assert-Equals -Expected $true
+ $actual.msg | Assert-Equals -Expected $expected_msg
+ $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args}
+ }
+
+ "Required together not set" = {
+ $spec = @{
+ options = @{
+ option1 = @{}
+ option2 = @{}
+ }
+ required_together = @(,@("option1", "option2"))
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ option1 = "abc"
+ }
+
+ $failed = $false
+ try {
+ $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected_msg = "parameters are required together: option1, option2"
+
+ $actual.Keys.Count | Assert-Equals -Expected 4
+ $actual.changed | Assert-Equals -Expected $false
+ $actual.failed | Assert-Equals -Expected $true
+ $actual.msg | Assert-Equals -Expected $expected_msg
+ $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args}
+ }
+
+ "Required together not set - subspec" = {
+ $spec = @{
+ options = @{
+ option_key = @{
+ type = "dict"
+ options = @{
+ option1 = @{}
+ option2 = @{}
+ }
+ required_together = @(,@("option1", "option2"))
+ }
+ another_option = @{}
+ }
+ required_together = @(,@("option_key", "another_option"))
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ option_key = @{
+ option1 = "abc"
+ }
+ another_option = "def"
+ }
+
+ $failed = $false
+ try {
+ $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected_msg = "parameters are required together: option1, option2 found in option_key"
+
+ $actual.Keys.Count | Assert-Equals -Expected 4
+ $actual.changed | Assert-Equals -Expected $false
+ $actual.failed | Assert-Equals -Expected $true
+ $actual.msg | Assert-Equals -Expected $expected_msg
+ $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args}
+ }
+
+ "Required one of not set" = {
+ $spec = @{
+ options = @{
+ option1 = @{}
+ option2 = @{}
+ option3 = @{}
+ }
+ required_one_of = @(@("option1", "option2"), @("option2", "option3"))
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ option1 = "abc"
+ }
+
+ $failed = $false
+ try {
+ $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected_msg = "one of the following is required: option2, option3"
+
+ $actual.Keys.Count | Assert-Equals -Expected 4
+ $actual.changed | Assert-Equals -Expected $false
+ $actual.failed | Assert-Equals -Expected $true
+ $actual.msg | Assert-Equals -Expected $expected_msg
+ $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args}
+ }
+
+ "Required if invalid entries" = {
+ $spec = @{
+ options = @{
+ state = @{choices = "absent", "present"; default = "present"}
+ path = @{type = "path"}
+ }
+ required_if = @(,@("state", "absent"))
+ }
+
+ $failed = $false
+ try {
+ $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected_msg = "internal error: invalid required_if value count of 2, expecting 3 or 4 entries"
+
+ $actual.Keys.Count | Assert-Equals -Expected 4
+ $actual.changed | Assert-Equals -Expected $false
+ $actual.failed | Assert-Equals -Expected $true
+ $actual.msg | Assert-Equals -Expected $expected_msg
+ $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args}
+ }
+
+ "Required if no missing option" = {
+ $spec = @{
+ options = @{
+ state = @{choices = "absent", "present"; default = "present"}
+ name = @{}
+ path = @{type = "path"}
+ }
+ required_if = @(,@("state", "absent", @("name", "path")))
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ name = "abc"
+ }
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+
+ $failed = $false
+ try {
+ $m.ExitJson()
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 0"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $actual.Keys.Count | Assert-Equals -Expected 2
+ $actual.changed | Assert-Equals -Expected $false
+ $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args}
+ }
+
+ "Required if missing option" = {
+ $spec = @{
+ options = @{
+ state = @{choices = "absent", "present"; default = "present"}
+ name = @{}
+ path = @{type = "path"}
+ }
+ required_if = @(,@("state", "absent", @("name", "path")))
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ state = "absent"
+ name = "abc"
+ }
+
+ $failed = $false
+ try {
+ $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected_msg = "state is absent but all of the following are missing: path"
+
+ $actual.Keys.Count | Assert-Equals -Expected 4
+ $actual.changed | Assert-Equals -Expected $false
+ $actual.failed | Assert-Equals -Expected $true
+ $actual.msg | Assert-Equals -Expected $expected_msg
+ $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args}
+ }
+
+ "Required if missing option and required one is set" = {
+ $spec = @{
+ options = @{
+ state = @{choices = "absent", "present"; default = "present"}
+ name = @{}
+ path = @{type = "path"}
+ }
+ required_if = @(,@("state", "absent", @("name", "path"), $true))
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ state = "absent"
+ }
+
+ $failed = $false
+ try {
+ $null = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $expected_msg = "state is absent but any of the following are missing: name, path"
+
+ $actual.Keys.Count | Assert-Equals -Expected 4
+ $actual.changed | Assert-Equals -Expected $false
+ $actual.failed | Assert-Equals -Expected $true
+ $actual.msg | Assert-Equals -Expected $expected_msg
+ $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args}
+ }
+
+ "Required if missing option but one required set" = {
+ $spec = @{
+ options = @{
+ state = @{choices = "absent", "present"; default = "present"}
+ name = @{}
+ path = @{type = "path"}
+ }
+ required_if = @(,@("state", "absent", @("name", "path"), $true))
+ }
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ state = "absent"
+ name = "abc"
+ }
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+
+ $failed = $false
+ try {
+ $m.ExitJson()
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 0"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $actual.Keys.Count | Assert-Equals -Expected 2
+ $actual.changed | Assert-Equals -Expected $false
+ $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args}
+ }
+
+ "PS Object in return result" = {
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), @{})
+
+ # JavaScriptSerializer struggles with PS Object like PSCustomObject due to circular references, this test makes
+ # sure we can handle these types of objects without bombing
+ $m.Result.output = [PSCustomObject]@{a = "a"; b = "b"}
+ $failed = $true
+ try {
+ $m.ExitJson()
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 0"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $actual.Keys.Count | Assert-Equals -Expected 3
+ $actual.changed | Assert-Equals -Expected $false
+ $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = @{}}
+ $actual.output | Assert-DictionaryEquals -Expected @{a = "a"; b = "b"}
+ }
+
+ "String json array to object" = {
+ $input_json = '["abc", "def"]'
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($input_json)
+ $actual -is [Array] | Assert-Equals -Expected $true
+ $actual.Length | Assert-Equals -Expected 2
+ $actual[0] | Assert-Equals -Expected "abc"
+ $actual[1] | Assert-Equals -Expected "def"
+ }
+
+ "String json array of dictionaries to object" = {
+ $input_json = '[{"abc":"def"}]'
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($input_json)
+ $actual -is [Array] | Assert-Equals -Expected $true
+ $actual.Length | Assert-Equals -Expected 1
+ $actual[0] | Assert-DictionaryEquals -Expected @{"abc" = "def"}
+ }
+
+ "Spec with fragments" = {
+ $spec = @{
+ options = @{
+ option1 = @{ type = "str" }
+ }
+ }
+ $fragment1 = @{
+ options = @{
+ option2 = @{ type = "str" }
+ }
+ }
+
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ option1 = "option1"
+ option2 = "option2"
+ }
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec, @($fragment1))
+
+ $failed = $false
+ try {
+ $m.ExitJson()
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 0"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $actual.changed | Assert-Equals -Expected $false
+ $actual.invocation | Assert-DictionaryEquals -Expected @{module_args = $complex_args}
+ }
+
+ "Fragment spec that with a deprecated alias" = {
+ $spec = @{
+ options = @{
+ option1 = @{
+ aliases = @("alias1_spec")
+ type = "str"
+ deprecated_aliases = @(
+ @{name = "alias1_spec"; version = "2.0"}
+ )
+ }
+ option2 = @{
+ aliases = @("alias2_spec")
+ deprecated_aliases = @(
+ @{name = "alias2_spec"; version = "2.0"; collection_name = "ansible.builtin"}
+ )
+ }
+ }
+ }
+ $fragment1 = @{
+ options = @{
+ option1 = @{
+ aliases = @("alias1")
+ deprecated_aliases = @() # Makes sure it doesn't overwrite the spec, just adds to it.
+ }
+ option2 = @{
+ aliases = @("alias2")
+ deprecated_aliases = @(
+ @{name = "alias2"; version = "2.0"; collection_name = "foo.bar"}
+ )
+ type = "str"
+ }
+ }
+ }
+
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ alias1_spec = "option1"
+ alias2 = "option2"
+ }
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec, @($fragment1))
+
+ $failed = $false
+ try {
+ $m.ExitJson()
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 0"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $actual.deprecations.Count | Assert-Equals -Expected 2
+ $actual.deprecations[0] | Assert-DictionaryEquals -Expected @{
+ msg = "Alias 'alias1_spec' is deprecated. See the module docs for more information"; version = "2.0"; collection_name = $null
+ }
+ $actual.deprecations[1] | Assert-DictionaryEquals -Expected @{
+ msg = "Alias 'alias2' is deprecated. See the module docs for more information"; version = "2.0"; collection_name = "foo.bar"
+ }
+ $actual.changed | Assert-Equals -Expected $false
+ $actual.invocation | Assert-DictionaryEquals -Expected @{
+ module_args = @{
+ option1 = "option1"
+ alias1_spec = "option1"
+ option2 = "option2"
+ alias2 = "option2"
+ }
+ }
+ }
+
+ "Fragment spec with mutual args" = {
+ $spec = @{
+ options = @{
+ option1 = @{ type = "str" }
+ option2 = @{ type = "str" }
+ }
+ mutually_exclusive = @(
+ ,@('option1', 'option2')
+ )
+ }
+ $fragment1 = @{
+ options = @{
+ fragment1_1 = @{ type = "str" }
+ fragment1_2 = @{ type = "str" }
+ }
+ mutually_exclusive = @(
+ ,@('fragment1_1', 'fragment1_2')
+ )
+ }
+ $fragment2 = @{
+ options = @{
+ fragment2 = @{ type = "str" }
+ }
+ }
+
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ option1 = "option1"
+ fragment1_1 = "fragment1_1"
+ fragment1_2 = "fragment1_2"
+ fragment2 = "fragment2"
+ }
+
+ $failed = $false
+ try {
+ [Ansible.Basic.AnsibleModule]::Create(@(), $spec, @($fragment1, $fragment2))
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $actual.changed | Assert-Equals -Expected $false
+ $actual.failed | Assert-Equals -Expected $true
+ $actual.msg | Assert-Equals -Expected "parameters are mutually exclusive: fragment1_1, fragment1_2"
+ $actual.invocation | Assert-DictionaryEquals -Expected @{ module_args = $complex_args }
+ }
+
+ "Fragment spec with no_log" = {
+ $spec = @{
+ options = @{
+ option1 = @{
+ aliases = @("alias")
+ }
+ }
+ }
+ $fragment1 = @{
+ options = @{
+ option1 = @{
+ no_log = $true # Makes sure that a value set in the fragment but not in the spec is respected.
+ type = "str"
+ }
+ }
+ }
+
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ alias = "option1"
+ }
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec, @($fragment1))
+
+ $failed = $false
+ try {
+ $m.ExitJson()
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 0"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $actual.changed | Assert-Equals -Expected $false
+ $actual.invocation | Assert-DictionaryEquals -Expected @{
+ module_args = @{
+ option1 = "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER"
+ alias = "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER"
+ }
+ }
+ }
+
+ "Catch invalid fragment spec format" = {
+ $spec = @{
+ options = @{
+ option1 = @{ type = "str" }
+ }
+ }
+ $fragment = @{
+ options = @{}
+ invalid = "will fail"
+ }
+
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ option1 = "option1"
+ }
+
+ $failed = $false
+ try {
+ [Ansible.Basic.AnsibleModule]::Create(@(), $spec, @($fragment))
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 1"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $actual.failed | Assert-Equals -Expected $true
+ $actual.msg.StartsWith("internal error: argument spec entry contains an invalid key 'invalid', valid keys: ") | Assert-Equals -Expected $true
+ }
+
+ "Spec with different list types" = {
+ $spec = @{
+ options = @{
+ # Single element of the same list type not in a list
+ option1 = @{
+ aliases = "alias1"
+ deprecated_aliases = @{name="alias1";version="2.0";collection_name="foo.bar"}
+ }
+
+ # Arrays
+ option2 = @{
+ aliases = ,"alias2"
+ deprecated_aliases = ,@{name="alias2";version="2.0";collection_name="foo.bar"}
+ }
+
+ # ArrayList
+ option3 = @{
+ aliases = [System.Collections.ArrayList]@("alias3")
+ deprecated_aliases = [System.Collections.ArrayList]@(@{name="alias3";version="2.0";collection_name="foo.bar"})
+ }
+
+ # Generic.List[Object]
+ option4 = @{
+ aliases = [System.Collections.Generic.List[Object]]@("alias4")
+ deprecated_aliases = [System.Collections.Generic.List[Object]]@(@{name="alias4";version="2.0";collection_name="foo.bar"})
+ }
+
+ # Generic.List[T]
+ option5 = @{
+ aliases = [System.Collections.Generic.List[String]]@("alias5")
+ deprecated_aliases = [System.Collections.Generic.List[Hashtable]]@()
+ }
+ }
+ }
+ $spec.options.option5.deprecated_aliases.Add(@{name="alias5";version="2.0";collection_name="foo.bar"})
+
+ Set-Variable -Name complex_args -Scope Global -Value @{
+ alias1 = "option1"
+ alias2 = "option2"
+ alias3 = "option3"
+ alias4 = "option4"
+ alias5 = "option5"
+ }
+ $m = [Ansible.Basic.AnsibleModule]::Create(@(), $spec)
+
+ $failed = $false
+ try {
+ $m.ExitJson()
+ } catch [System.Management.Automation.RuntimeException] {
+ $failed = $true
+ $_.Exception.Message | Assert-Equals -Expected "exit: 0"
+ $actual = [Ansible.Basic.AnsibleModule]::FromJson($_.Exception.InnerException.Output)
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $actual.changed | Assert-Equals -Expected $false
+ $actual.deprecations.Count | Assert-Equals -Expected 5
+ foreach ($dep in $actual.deprecations) {
+ $dep.msg -like "Alias 'alias?' is deprecated. See the module docs for more information" | Assert-Equals -Expected $true
+ $dep.version | Assert-Equals -Expected '2.0'
+ $dep.collection_name | Assert-Equals -Expected 'foo.bar'
+ }
+ $actual.invocation | Assert-DictionaryEquals -Expected @{
+ module_args = @{
+ alias1 = "option1"
+ option1 = "option1"
+ alias2 = "option2"
+ option2 = "option2"
+ alias3 = "option3"
+ option3 = "option3"
+ alias4 = "option4"
+ option4 = "option4"
+ alias5 = "option5"
+ option5 = "option5"
+ }
+ }
+ }
+}
+
+try {
+ foreach ($test_impl in $tests.GetEnumerator()) {
+ # Reset the variables before each test
+ Set-Variable -Name complex_args -Value @{} -Scope Global
+
+ $test = $test_impl.Key
+ &$test_impl.Value
+ }
+ $module.Result.data = "success"
+} catch [System.Management.Automation.RuntimeException] {
+ $module.Result.failed = $true
+ $module.Result.test = $test
+ $module.Result.line = $_.InvocationInfo.ScriptLineNumber
+ $module.Result.method = $_.InvocationInfo.Line.Trim()
+
+ if ($_.Exception.Message.StartSwith("exit: ")) {
+ # The exception was caused by an unexpected Exit call, log that on the output
+ $module.Result.output = (ConvertFrom-Json -InputObject $_.Exception.InnerException.Output)
+ $module.Result.msg = "Uncaught AnsibleModule exit in tests, see output"
+ } else {
+ # Unrelated exception
+ $module.Result.exception = $_.Exception.ToString()
+ $module.Result.msg = "Uncaught exception: $(($_ | Out-String).ToString())"
+ }
+}
+
+Exit-Module
diff --git a/test/integration/targets/module_utils_Ansible.Basic/tasks/main.yml b/test/integration/targets/module_utils_Ansible.Basic/tasks/main.yml
new file mode 100644
index 00000000..010c2d50
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.Basic/tasks/main.yml
@@ -0,0 +1,9 @@
+---
+- name: test Ansible.Basic.cs
+ ansible_basic_tests:
+ register: ansible_basic_test
+
+- name: assert test Ansible.Basic.cs
+ assert:
+ that:
+ - ansible_basic_test.data == "success"
diff --git a/test/integration/targets/module_utils_Ansible.Become/aliases b/test/integration/targets/module_utils_Ansible.Become/aliases
new file mode 100644
index 00000000..cf714783
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.Become/aliases
@@ -0,0 +1,3 @@
+windows
+shippable/windows/group1
+shippable/windows/smoketest
diff --git a/test/integration/targets/module_utils_Ansible.Become/library/ansible_become_tests.ps1 b/test/integration/targets/module_utils_Ansible.Become/library/ansible_become_tests.ps1
new file mode 100644
index 00000000..4d1f319b
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.Become/library/ansible_become_tests.ps1
@@ -0,0 +1,1009 @@
+#!powershell
+
+#AnsibleRequires -CSharpUtil Ansible.Basic
+#AnsibleRequires -CSharpUtil Ansible.Become
+
+$module = [Ansible.Basic.AnsibleModule]::Create($args, @{})
+
+Function Assert-Equals {
+ param(
+ [Parameter(Mandatory=$true, ValueFromPipeline=$true)][AllowNull()]$Actual,
+ [Parameter(Mandatory=$true, Position=0)][AllowNull()]$Expected
+ )
+
+ $matched = $false
+ if ($Actual -is [System.Collections.ArrayList] -or $Actual -is [Array]) {
+ $Actual.Count | Assert-Equals -Expected $Expected.Count
+ for ($i = 0; $i -lt $Actual.Count; $i++) {
+ $actual_value = $Actual[$i]
+ $expected_value = $Expected[$i]
+ Assert-Equals -Actual $actual_value -Expected $expected_value
+ }
+ $matched = $true
+ } else {
+ $matched = $Actual -ceq $Expected
+ }
+
+ if (-not $matched) {
+ if ($Actual -is [PSObject]) {
+ $Actual = $Actual.ToString()
+ }
+
+ $call_stack = (Get-PSCallStack)[1]
+ $module.Result.test = $test
+ $module.Result.actual = $Actual
+ $module.Result.expected = $Expected
+ $module.Result.line = $call_stack.ScriptLineNumber
+ $module.Result.method = $call_stack.Position.Text
+ $module.FailJson("AssertionError: actual != expected")
+ }
+}
+
+# Would be great to move win_whomai out into it's own module util and share the
+# code here, for now just rely on a cut down version
+$test_whoami = {
+ Add-Type -TypeDefinition @'
+using Microsoft.Win32.SafeHandles;
+using System;
+using System.Runtime.ConstrainedExecution;
+using System.Runtime.InteropServices;
+using System.Security.Principal;
+using System.Text;
+
+namespace Ansible
+{
+ internal class NativeHelpers
+ {
+ [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Unicode)]
+ public struct LSA_UNICODE_STRING
+ {
+ public UInt16 Length;
+ public UInt16 MaximumLength;
+ public IntPtr Buffer;
+
+ public override string ToString()
+ {
+ return Marshal.PtrToStringUni(Buffer, Length / sizeof(char));
+ }
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct LUID
+ {
+ public UInt32 LowPart;
+ public Int32 HighPart;
+
+ public static explicit operator UInt64(LUID l)
+ {
+ return (UInt64)((UInt64)l.HighPart << 32) | (UInt64)l.LowPart;
+ }
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct SECURITY_LOGON_SESSION_DATA
+ {
+ public UInt32 Size;
+ public LUID LogonId;
+ public LSA_UNICODE_STRING UserName;
+ public LSA_UNICODE_STRING LogonDomain;
+ public LSA_UNICODE_STRING AuthenticationPackage;
+ public SECURITY_LOGON_TYPE LogonType;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct SID_AND_ATTRIBUTES
+ {
+ public IntPtr Sid;
+ public int Attributes;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct TOKEN_MANDATORY_LABEL
+ {
+ public SID_AND_ATTRIBUTES Label;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct TOKEN_SOURCE
+ {
+ [MarshalAs(UnmanagedType.ByValArray, SizeConst = 8)] public char[] SourceName;
+ public LUID SourceIdentifier;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct TOKEN_STATISTICS
+ {
+ public LUID TokenId;
+ public LUID AuthenticationId;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct TOKEN_USER
+ {
+ public SID_AND_ATTRIBUTES User;
+ }
+
+ public enum SECURITY_LOGON_TYPE
+ {
+ System = 0, // Used only by the Sytem account
+ Interactive = 2,
+ Network,
+ Batch,
+ Service,
+ Proxy,
+ Unlock,
+ NetworkCleartext,
+ NewCredentials,
+ RemoteInteractive,
+ CachedInteractive,
+ CachedRemoteInteractive,
+ CachedUnlock
+ }
+
+ public enum TokenInformationClass
+ {
+ TokenUser = 1,
+ TokenSource = 7,
+ TokenStatistics = 10,
+ TokenIntegrityLevel = 25,
+ }
+ }
+
+ internal class NativeMethods
+ {
+ [DllImport("kernel32.dll", SetLastError = true)]
+ public static extern bool CloseHandle(
+ IntPtr hObject);
+
+ [DllImport("kernel32.dll")]
+ public static extern SafeNativeHandle GetCurrentProcess();
+
+ [DllImport("userenv.dll", SetLastError = true)]
+ public static extern bool GetProfileType(
+ out UInt32 dwFlags);
+
+ [DllImport("advapi32.dll", SetLastError = true)]
+ public static extern bool GetTokenInformation(
+ SafeNativeHandle TokenHandle,
+ NativeHelpers.TokenInformationClass TokenInformationClass,
+ SafeMemoryBuffer TokenInformation,
+ UInt32 TokenInformationLength,
+ out UInt32 ReturnLength);
+
+ [DllImport("advapi32.dll", SetLastError = true, CharSet = CharSet.Unicode)]
+ public static extern bool LookupAccountSid(
+ string lpSystemName,
+ IntPtr Sid,
+ StringBuilder lpName,
+ ref UInt32 cchName,
+ StringBuilder ReferencedDomainName,
+ ref UInt32 cchReferencedDomainName,
+ out UInt32 peUse);
+
+ [DllImport("secur32.dll", SetLastError = true)]
+ public static extern UInt32 LsaEnumerateLogonSessions(
+ out UInt32 LogonSessionCount,
+ out SafeLsaMemoryBuffer LogonSessionList);
+
+ [DllImport("secur32.dll", SetLastError = true)]
+ public static extern UInt32 LsaFreeReturnBuffer(
+ IntPtr Buffer);
+
+ [DllImport("secur32.dll", SetLastError = true)]
+ public static extern UInt32 LsaGetLogonSessionData(
+ IntPtr LogonId,
+ out SafeLsaMemoryBuffer ppLogonSessionData);
+
+ [DllImport("advapi32.dll")]
+ public static extern UInt32 LsaNtStatusToWinError(
+ UInt32 Status);
+
+ [DllImport("advapi32.dll", SetLastError = true)]
+ public static extern bool OpenProcessToken(
+ SafeNativeHandle ProcessHandle,
+ TokenAccessLevels DesiredAccess,
+ out SafeNativeHandle TokenHandle);
+ }
+
+ internal class SafeLsaMemoryBuffer : SafeHandleZeroOrMinusOneIsInvalid
+ {
+ public SafeLsaMemoryBuffer() : base(true) { }
+
+ [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)]
+ protected override bool ReleaseHandle()
+ {
+ UInt32 res = NativeMethods.LsaFreeReturnBuffer(handle);
+ return res == 0;
+ }
+ }
+
+ internal class SafeMemoryBuffer : SafeHandleZeroOrMinusOneIsInvalid
+ {
+ public SafeMemoryBuffer() : base(true) { }
+ public SafeMemoryBuffer(int cb) : base(true)
+ {
+ base.SetHandle(Marshal.AllocHGlobal(cb));
+ }
+ public SafeMemoryBuffer(IntPtr handle) : base(true)
+ {
+ base.SetHandle(handle);
+ }
+
+ [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)]
+ protected override bool ReleaseHandle()
+ {
+ Marshal.FreeHGlobal(handle);
+ return true;
+ }
+ }
+
+ internal class SafeNativeHandle : SafeHandleZeroOrMinusOneIsInvalid
+ {
+ public SafeNativeHandle() : base(true) { }
+ public SafeNativeHandle(IntPtr handle) : base(true) { this.handle = handle; }
+
+ [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)]
+ protected override bool ReleaseHandle()
+ {
+ return NativeMethods.CloseHandle(handle);
+ }
+ }
+
+ public class Win32Exception : System.ComponentModel.Win32Exception
+ {
+ private string _msg;
+
+ public Win32Exception(string message) : this(Marshal.GetLastWin32Error(), message) { }
+ public Win32Exception(int errorCode, string message) : base(errorCode)
+ {
+ _msg = String.Format("{0} ({1}, Win32ErrorCode {2})", message, base.Message, errorCode);
+ }
+
+ public override string Message { get { return _msg; } }
+ public static explicit operator Win32Exception(string message) { return new Win32Exception(message); }
+ }
+
+ public class Logon
+ {
+ public string AuthenticationPackage { get; internal set; }
+ public string LogonType { get; internal set; }
+ public string MandatoryLabelName { get; internal set; }
+ public SecurityIdentifier MandatoryLabelSid { get; internal set; }
+ public bool ProfileLoaded { get; internal set; }
+ public string SourceName { get; internal set; }
+ public string UserName { get; internal set; }
+ public SecurityIdentifier UserSid { get; internal set; }
+
+ public Logon()
+ {
+ using (SafeNativeHandle process = NativeMethods.GetCurrentProcess())
+ {
+ TokenAccessLevels dwAccess = TokenAccessLevels.Query | TokenAccessLevels.QuerySource;
+
+ SafeNativeHandle hToken;
+ NativeMethods.OpenProcessToken(process, dwAccess, out hToken);
+ using (hToken)
+ {
+ SetLogonSessionData(hToken);
+ SetTokenMandatoryLabel(hToken);
+ SetTokenSource(hToken);
+ SetTokenUser(hToken);
+ }
+ }
+ SetProfileLoaded();
+ }
+
+ private void SetLogonSessionData(SafeNativeHandle hToken)
+ {
+ NativeHelpers.TokenInformationClass tokenClass = NativeHelpers.TokenInformationClass.TokenStatistics;
+ UInt32 returnLength;
+ NativeMethods.GetTokenInformation(hToken, tokenClass, new SafeMemoryBuffer(IntPtr.Zero), 0, out returnLength);
+
+ UInt64 tokenLuidId;
+ using (SafeMemoryBuffer infoPtr = new SafeMemoryBuffer((int)returnLength))
+ {
+ if (!NativeMethods.GetTokenInformation(hToken, tokenClass, infoPtr, returnLength, out returnLength))
+ throw new Win32Exception("GetTokenInformation(TokenStatistics) failed");
+
+ NativeHelpers.TOKEN_STATISTICS stats = (NativeHelpers.TOKEN_STATISTICS)Marshal.PtrToStructure(
+ infoPtr.DangerousGetHandle(), typeof(NativeHelpers.TOKEN_STATISTICS));
+ tokenLuidId = (UInt64)stats.AuthenticationId;
+ }
+
+ UInt32 sessionCount;
+ SafeLsaMemoryBuffer sessionPtr;
+ UInt32 res = NativeMethods.LsaEnumerateLogonSessions(out sessionCount, out sessionPtr);
+ if (res != 0)
+ throw new Win32Exception((int)NativeMethods.LsaNtStatusToWinError(res), "LsaEnumerateLogonSession() failed");
+ using (sessionPtr)
+ {
+ IntPtr currentSession = sessionPtr.DangerousGetHandle();
+ for (UInt32 i = 0; i < sessionCount; i++)
+ {
+ SafeLsaMemoryBuffer sessionDataPtr;
+ res = NativeMethods.LsaGetLogonSessionData(currentSession, out sessionDataPtr);
+ if (res != 0)
+ {
+ currentSession = IntPtr.Add(currentSession, Marshal.SizeOf(typeof(NativeHelpers.LUID)));
+ continue;
+ }
+ using (sessionDataPtr)
+ {
+ NativeHelpers.SECURITY_LOGON_SESSION_DATA sessionData = (NativeHelpers.SECURITY_LOGON_SESSION_DATA)Marshal.PtrToStructure(
+ sessionDataPtr.DangerousGetHandle(), typeof(NativeHelpers.SECURITY_LOGON_SESSION_DATA));
+ UInt64 sessionId = (UInt64)sessionData.LogonId;
+ if (sessionId == tokenLuidId)
+ {
+ AuthenticationPackage = sessionData.AuthenticationPackage.ToString();
+ LogonType = sessionData.LogonType.ToString();
+ break;
+ }
+ }
+
+ currentSession = IntPtr.Add(currentSession, Marshal.SizeOf(typeof(NativeHelpers.LUID)));
+ }
+ }
+ }
+
+ private void SetTokenMandatoryLabel(SafeNativeHandle hToken)
+ {
+ NativeHelpers.TokenInformationClass tokenClass = NativeHelpers.TokenInformationClass.TokenIntegrityLevel;
+ UInt32 returnLength;
+ NativeMethods.GetTokenInformation(hToken, tokenClass, new SafeMemoryBuffer(IntPtr.Zero), 0, out returnLength);
+ using (SafeMemoryBuffer infoPtr = new SafeMemoryBuffer((int)returnLength))
+ {
+ if (!NativeMethods.GetTokenInformation(hToken, tokenClass, infoPtr, returnLength, out returnLength))
+ throw new Win32Exception("GetTokenInformation(TokenIntegrityLevel) failed");
+ NativeHelpers.TOKEN_MANDATORY_LABEL label = (NativeHelpers.TOKEN_MANDATORY_LABEL)Marshal.PtrToStructure(
+ infoPtr.DangerousGetHandle(), typeof(NativeHelpers.TOKEN_MANDATORY_LABEL));
+ MandatoryLabelName = LookupSidName(label.Label.Sid);
+ MandatoryLabelSid = new SecurityIdentifier(label.Label.Sid);
+ }
+ }
+
+ private void SetTokenSource(SafeNativeHandle hToken)
+ {
+ NativeHelpers.TokenInformationClass tokenClass = NativeHelpers.TokenInformationClass.TokenSource;
+ UInt32 returnLength;
+ NativeMethods.GetTokenInformation(hToken, tokenClass, new SafeMemoryBuffer(IntPtr.Zero), 0, out returnLength);
+ using (SafeMemoryBuffer infoPtr = new SafeMemoryBuffer((int)returnLength))
+ {
+ if (!NativeMethods.GetTokenInformation(hToken, tokenClass, infoPtr, returnLength, out returnLength))
+ throw new Win32Exception("GetTokenInformation(TokenSource) failed");
+ NativeHelpers.TOKEN_SOURCE source = (NativeHelpers.TOKEN_SOURCE)Marshal.PtrToStructure(
+ infoPtr.DangerousGetHandle(), typeof(NativeHelpers.TOKEN_SOURCE));
+ SourceName = new string(source.SourceName).Replace('\0', ' ').TrimEnd();
+ }
+ }
+
+ private void SetTokenUser(SafeNativeHandle hToken)
+ {
+ NativeHelpers.TokenInformationClass tokenClass = NativeHelpers.TokenInformationClass.TokenUser;
+ UInt32 returnLength;
+ NativeMethods.GetTokenInformation(hToken, tokenClass, new SafeMemoryBuffer(IntPtr.Zero), 0, out returnLength);
+ using (SafeMemoryBuffer infoPtr = new SafeMemoryBuffer((int)returnLength))
+ {
+ if (!NativeMethods.GetTokenInformation(hToken, tokenClass, infoPtr, returnLength, out returnLength))
+ throw new Win32Exception("GetTokenInformation(TokenSource) failed");
+ NativeHelpers.TOKEN_USER user = (NativeHelpers.TOKEN_USER)Marshal.PtrToStructure(
+ infoPtr.DangerousGetHandle(), typeof(NativeHelpers.TOKEN_USER));
+ UserName = LookupSidName(user.User.Sid);
+ UserSid = new SecurityIdentifier(user.User.Sid);
+ }
+ }
+
+ private void SetProfileLoaded()
+ {
+ UInt32 flags;
+ ProfileLoaded = NativeMethods.GetProfileType(out flags);
+ }
+
+ private static string LookupSidName(IntPtr pSid)
+ {
+ StringBuilder name = new StringBuilder(0);
+ StringBuilder domain = new StringBuilder(0);
+ UInt32 nameLength = 0;
+ UInt32 domainLength = 0;
+ UInt32 peUse;
+ NativeMethods.LookupAccountSid(null, pSid, name, ref nameLength, domain, ref domainLength, out peUse);
+ name.EnsureCapacity((int)nameLength);
+ domain.EnsureCapacity((int)domainLength);
+
+ if (!NativeMethods.LookupAccountSid(null, pSid, name, ref nameLength, domain, ref domainLength, out peUse))
+ throw new Win32Exception("LookupAccountSid() failed");
+
+ return String.Format("{0}\\{1}", domain.ToString(), name.ToString());
+ }
+ }
+}
+'@
+ $logon = New-Object -TypeName Ansible.Logon
+ ConvertTo-Json -InputObject $logon
+}.ToString()
+
+$current_user_raw = [Ansible.Process.ProcessUtil]::CreateProcess($null, "powershell.exe -NoProfile -", $null, $null, $test_whoami + "`r`n")
+$current_user = ConvertFrom-Json -InputObject $current_user_raw.StandardOut
+
+$adsi = [ADSI]"WinNT://$env:COMPUTERNAME"
+
+$standard_user = "become_standard"
+$admin_user = "become_admin"
+$become_pass = "password123!$([System.IO.Path]::GetRandomFileName())"
+$medium_integrity_sid = "S-1-16-8192"
+$high_integrity_sid = "S-1-16-12288"
+$system_integrity_sid = "S-1-16-16384"
+
+$tests = @{
+ "Runas standard user" = {
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($standard_user, $become_pass,
+ "powershell.exe -NoProfile -ExecutionPolicy ByPass -File $tmp_script")
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+
+ $stdout = ConvertFrom-Json -InputObject $actual.StandardOut
+ $stdout.LogonType | Assert-Equals -Expected "Interactive"
+ $stdout.ProfileLoaded | Assert-Equals -Expected $true
+ $stdout.UserSid.Value | Assert-Equals -Expected $standard_user_sid
+ $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $medium_integrity_sid
+ }
+
+ "Runas admin user" = {
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($admin_user, $become_pass,
+ "powershell.exe -NoProfile -ExecutionPolicy ByPass -File $tmp_script")
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+ $stdout = ConvertFrom-Json -InputObject $actual.StandardOut
+ $stdout.LogonType | Assert-Equals -Expected "Interactive"
+ $stdout.ProfileLoaded | Assert-Equals -Expected $true
+ $stdout.UserSid.Value | Assert-Equals -Expected $admin_user_sid
+ $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $high_integrity_sid
+ }
+
+ "Runas SYSTEM" = {
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser("SYSTEM", $null,
+ "powershell.exe -NoProfile -ExecutionPolicy ByPass -File $tmp_script")
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+
+ $stdout = ConvertFrom-Json -InputObject $actual.StandardOut
+ $stdout.LogonType | Assert-Equals -Expected "System"
+ $stdout.ProfileLoaded | Assert-Equals -Expected $true
+ $stdout.UserSid.Value | Assert-Equals -Expected "S-1-5-18"
+ $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $system_integrity_sid
+
+ $with_domain = [Ansible.Become.BecomeUtil]::CreateProcessAsUser("NT AUTHORITY\System", $null, "whoami.exe")
+ $with_domain.StandardOut | Assert-Equals -Expected "nt authority\system`r`n"
+ }
+
+ "Runas LocalService" = {
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser("LocalService", $null,
+ "powershell.exe -NoProfile -ExecutionPolicy ByPass -File $tmp_script")
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+
+ $stdout = ConvertFrom-Json -InputObject $actual.StandardOut
+ $stdout.LogonType | Assert-Equals -Expected "Service"
+ $stdout.ProfileLoaded | Assert-Equals -Expected $true
+ $stdout.UserSid.Value | Assert-Equals -Expected "S-1-5-19"
+ $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $system_integrity_sid
+
+ $with_domain = [Ansible.Become.BecomeUtil]::CreateProcessAsUser("NT AUTHORITY\LocalService", $null, "whoami.exe")
+ $with_domain.StandardOut | Assert-Equals -Expected "nt authority\local service`r`n"
+ }
+
+ "Runas NetworkService" = {
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser("NetworkService", $null,
+ "powershell.exe -NoProfile -ExecutionPolicy ByPass -File $tmp_script")
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+
+ $stdout = ConvertFrom-Json -InputObject $actual.StandardOut
+ $stdout.LogonType | Assert-Equals -Expected "Service"
+ $stdout.ProfileLoaded | Assert-Equals -Expected $true
+ $stdout.UserSid.Value | Assert-Equals -Expected "S-1-5-20"
+ $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $system_integrity_sid
+
+ $with_domain = [Ansible.Become.BecomeUtil]::CreateProcessAsUser("NT AUTHORITY\NetworkService", $null, "whoami.exe")
+ $with_domain.StandardOut | Assert-Equals -Expected "nt authority\network service`r`n"
+ }
+
+ "Runas without working dir set" = {
+ $expected = "$env:SystemRoot\system32`r`n"
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($standard_user, $become_pass, 0, "Interactive", $null,
+ 'powershell.exe $pwd.Path', $null, $null, "")
+ $actual.StandardOut | Assert-Equals -Expected $expected
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+ }
+
+ "Runas with working dir set" = {
+ $expected = "$env:SystemRoot`r`n"
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($standard_user, $become_pass, 0, "Interactive", $null,
+ 'powershell.exe $pwd.Path', $env:SystemRoot, $null, "")
+ $actual.StandardOut | Assert-Equals -Expected $expected
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+ }
+
+ "Runas without environment set" = {
+ $expected = "Windows_NT`r`n"
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($standard_user, $become_pass, 0, "Interactive", $null,
+ 'powershell.exe $env:TEST; $env:OS', $null, $null, "")
+ $actual.StandardOut | Assert-Equals -Expected $expected
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+ }
+
+ "Runas with environment set" = {
+ $env_vars = @{
+ TEST = "tesTing"
+ TEST2 = "Testing 2"
+ }
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($admin_user, $become_pass, 0, "Interactive", $null,
+ 'cmd.exe /c set', $null, $env_vars, "")
+ ("TEST=tesTing" -cin $actual.StandardOut.Split("`r`n")) | Assert-Equals -Expected $true
+ ("TEST2=Testing 2" -cin $actual.StandardOut.Split("`r`n")) | Assert-Equals -Expected $true
+ ("OS=Windows_NT" -cnotin $actual.StandardOut.Split("`r`n")) | Assert-Equals -Expected $true
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+ }
+
+ "Runas with string stdin" = {
+ $expected = "input value`r`n`r`n"
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($admin_user, $become_pass, 0, "Interactive", $null,
+ 'powershell.exe [System.Console]::In.ReadToEnd()', $null, $null, "input value")
+ $actual.StandardOut | Assert-Equals -Expected $expected
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+ }
+
+ "Runas with string stdin and newline" = {
+ $expected = "input value`r`n`r`n"
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($admin_user, $become_pass, 0, "Interactive", $null,
+ 'powershell.exe [System.Console]::In.ReadToEnd()', $null, $null, "input value`r`n")
+ $actual.StandardOut | Assert-Equals -Expected $expected
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+ }
+
+ "Runas with byte stdin" = {
+ $expected = "input value`r`n"
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($admin_user, $become_pass, 0, "Interactive", $null,
+ 'powershell.exe [System.Console]::In.ReadToEnd()', $null, $null, [System.Text.Encoding]::UTF8.GetBytes("input value"))
+ $actual.StandardOut | Assert-Equals -Expected $expected
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+ }
+
+ "Missing executable" = {
+ $failed = $false
+ try {
+ [Ansible.Become.BecomeUtil]::CreateProcessAsUser("SYSTEM", $null, "fake.exe")
+ } catch {
+ $failed = $true
+ $_.Exception.InnerException.GetType().FullName | Assert-Equals -Expected "Ansible.Process.Win32Exception"
+ $expected = 'Exception calling "CreateProcessAsUser" with "3" argument(s): "CreateProcessWithTokenW() failed '
+ $expected += '(The system cannot find the file specified, Win32ErrorCode 2)"'
+ $_.Exception.Message | Assert-Equals -Expected $expected
+ }
+ $failed | Assert-Equals -Expected $true
+ }
+
+ "CreateProcessAsUser with lpApplicationName" = {
+ $expected = "abc`r`n"
+ $full_path = "$($env:SystemRoot)\System32\WindowsPowerShell\v1.0\powershell.exe"
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser("SYSTEM", $null, 0, "Interactive", $full_path,
+ "Write-Output 'abc'", $null, $null, "")
+ $actual.StandardOut | Assert-Equals -Expected $expected
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser("SYSTEM", $null, 0, "Interactive", $full_path,
+ "powershell.exe Write-Output 'abc'", $null, $null, "")
+ $actual.StandardOut | Assert-Equals -Expected $expected
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+ }
+
+ "CreateProcessAsUser with stderr" = {
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser("SYSTEM", $null, 0, "Interactive", $null,
+ "powershell.exe [System.Console]::Error.WriteLine('hi')", $null, $null, "")
+ $actual.StandardOut | Assert-Equals -Expected ""
+ $actual.StandardError | Assert-Equals -Expected "hi`r`n"
+ $actual.ExitCode | Assert-Equals -Expected 0
+ }
+
+ "CreateProcessAsUser with exit code" = {
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser("SYSTEM", $null, 0, "Interactive", $null,
+ "powershell.exe exit 10", $null, $null, "")
+ $actual.StandardOut | Assert-Equals -Expected ""
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 10
+ }
+
+ "Local account with computer name" = {
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser("$env:COMPUTERNAME\$standard_user", $become_pass,
+ "powershell.exe -NoProfile -ExecutionPolicy ByPass -File $tmp_script")
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+
+ $stdout = ConvertFrom-Json -InputObject $actual.StandardOut
+ $stdout.LogonType | Assert-Equals -Expected "Interactive"
+ $stdout.ProfileLoaded | Assert-Equals -Expected $true
+ $stdout.UserSid.Value | Assert-Equals -Expected $standard_user_sid
+ $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $medium_integrity_sid
+ }
+
+ "Local account with computer as period" = {
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser(".\$standard_user", $become_pass,
+ "powershell.exe -NoProfile -ExecutionPolicy ByPass -File $tmp_script")
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+
+ $stdout = ConvertFrom-Json -InputObject $actual.StandardOut
+ $stdout.LogonType | Assert-Equals -Expected "Interactive"
+ $stdout.ProfileLoaded | Assert-Equals -Expected $true
+ $stdout.UserSid.Value | Assert-Equals -Expected $standard_user_sid
+ $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $medium_integrity_sid
+ }
+
+ "Local account with invalid password" = {
+ $failed = $false
+ try {
+ [Ansible.Become.BecomeUtil]::CreateProcessAsUser($standard_user, "incorrect", "powershell.exe Write-Output abc")
+ } catch {
+ $failed = $true
+ $_.Exception.InnerException.GetType().FullName | Assert-Equals -Expected "Ansible.AccessToken.Win32Exception"
+ # Server 2008 has a slightly different error msg, just assert we get the error 1326
+ ($_.Exception.Message.Contains("Win32ErrorCode 1326")) | Assert-Equals -Expected $true
+ }
+ $failed | Assert-Equals -Expected $true
+ }
+
+ "Invalid account" = {
+ $failed = $false
+ try {
+ [Ansible.Become.BecomeUtil]::CreateProcessAsUser("incorrect", "incorrect", "powershell.exe Write-Output abc")
+ } catch {
+ $failed = $true
+ $_.Exception.InnerException.GetType().FullName | Assert-Equals -Expected "System.Security.Principal.IdentityNotMappedException"
+ $expected = 'Exception calling "CreateProcessAsUser" with "3" argument(s): "Some or all '
+ $expected += 'identity references could not be translated."'
+ $_.Exception.Message | Assert-Equals -Expected $expected
+ }
+ $failed | Assert-Equals -Expected $true
+ }
+
+ "Interactive logon with standard" = {
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($standard_user, $become_pass, "WithProfile",
+ "Interactive", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n")
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+
+ $stdout = ConvertFrom-Json -InputObject $actual.StandardOut
+ $stdout.LogonType | Assert-Equals -Expected "Interactive"
+ $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $medium_integrity_sid
+ $stdout.ProfileLoaded | Assert-Equals -Expected $true
+ $stdout.SourceName | Assert-Equals -Expected "Advapi"
+ $stdout.UserSid.Value | Assert-Equals -Expected $standard_user_sid
+ }
+
+ "Batch logon with standard" = {
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($standard_user, $become_pass, "WithProfile",
+ "Batch", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n")
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+
+ $stdout = ConvertFrom-Json -InputObject $actual.StandardOut
+ $stdout.LogonType | Assert-Equals -Expected "Batch"
+ $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $medium_integrity_sid
+ $stdout.ProfileLoaded | Assert-Equals -Expected $true
+ $stdout.SourceName | Assert-Equals -Expected "Advapi"
+ $stdout.UserSid.Value | Assert-Equals -Expected $standard_user_sid
+ }
+
+ "Network logon with standard" = {
+ # Server 2008 will not work with become to Network or Network Credentials
+ if ([System.Environment]::OSVersion.Version -lt [Version]"6.1") {
+ continue
+ }
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($standard_user, $become_pass, "WithProfile",
+ "Network", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n")
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+
+ $stdout = ConvertFrom-Json -InputObject $actual.StandardOut
+ $stdout.LogonType | Assert-Equals -Expected "Network"
+ $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $medium_integrity_sid
+ $stdout.ProfileLoaded | Assert-Equals -Expected $true
+ $stdout.SourceName | Assert-Equals -Expected "Advapi"
+ $stdout.UserSid.Value | Assert-Equals -Expected $standard_user_sid
+ }
+
+ "Network with cleartext logon with standard" = {
+ # Server 2008 will not work with become to Network or Network Cleartext
+ if ([System.Environment]::OSVersion.Version -lt [Version]"6.1") {
+ continue
+ }
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($standard_user, $become_pass, "WithProfile",
+ "NetworkCleartext", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n")
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+
+ $stdout = ConvertFrom-Json -InputObject $actual.StandardOut
+ $stdout.LogonType | Assert-Equals -Expected "NetworkCleartext"
+ $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $medium_integrity_sid
+ $stdout.ProfileLoaded | Assert-Equals -Expected $true
+ $stdout.SourceName | Assert-Equals -Expected "Advapi"
+ $stdout.UserSid.Value | Assert-Equals -Expected $standard_user_sid
+ }
+
+ "Logon without password with standard" = {
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($standard_user, [NullString]::Value, "WithProfile",
+ "Interactive", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n")
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+
+ # Too unstable, there might be another process still lingering which causes become to steal instead of using
+ # S4U. Just don't check the type and source to verify we can become without a password
+ $stdout = ConvertFrom-Json -InputObject $actual.StandardOut
+ # $stdout.LogonType | Assert-Equals -Expected "Batch"
+ $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $medium_integrity_sid
+ $stdout.ProfileLoaded | Assert-Equals -Expected $true
+ # $stdout.SourceName | Assert-Equals -Expected "ansible"
+ $stdout.UserSid.Value | Assert-Equals -Expected $standard_user_sid
+ }
+
+ "Logon without password and network type with standard" = {
+ # Server 2008 will not work with become to Network or Network Cleartext
+ if ([System.Environment]::OSVersion.Version -lt [Version]"6.1") {
+ continue
+ }
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($standard_user, [NullString]::Value, "WithProfile",
+ "Network", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n")
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+
+ # Too unstable, there might be another process still lingering which causes become to steal instead of using
+ # S4U. Just don't check the type and source to verify we can become without a password
+ $stdout = ConvertFrom-Json -InputObject $actual.StandardOut
+ # $stdout.LogonType | Assert-Equals -Expected "Network"
+ $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $medium_integrity_sid
+ $stdout.ProfileLoaded | Assert-Equals -Expected $true
+ # $stdout.SourceName | Assert-Equals -Expected "ansible"
+ $stdout.UserSid.Value | Assert-Equals -Expected $standard_user_sid
+ }
+
+ "Interactive logon with admin" = {
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($admin_user, $become_pass, "WithProfile",
+ "Interactive", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n")
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+
+ $stdout = ConvertFrom-Json -InputObject $actual.StandardOut
+ $stdout.LogonType | Assert-Equals -Expected "Interactive"
+ $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $high_integrity_sid
+ $stdout.ProfileLoaded | Assert-Equals -Expected $true
+ $stdout.SourceName | Assert-Equals -Expected "Advapi"
+ $stdout.UserSid.Value | Assert-Equals -Expected $admin_user_sid
+ }
+
+ "Batch logon with admin" = {
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($admin_user, $become_pass, "WithProfile",
+ "Batch", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n")
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+
+ $stdout = ConvertFrom-Json -InputObject $actual.StandardOut
+ $stdout.LogonType | Assert-Equals -Expected "Batch"
+ $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $high_integrity_sid
+ $stdout.ProfileLoaded | Assert-Equals -Expected $true
+ $stdout.SourceName | Assert-Equals -Expected "Advapi"
+ $stdout.UserSid.Value | Assert-Equals -Expected $admin_user_sid
+ }
+
+ "Network logon with admin" = {
+ # Server 2008 will not work with become to Network or Network Credentials
+ if ([System.Environment]::OSVersion.Version -lt [Version]"6.1") {
+ continue
+ }
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($admin_user, $become_pass, "WithProfile",
+ "Network", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n")
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+
+ $stdout = ConvertFrom-Json -InputObject $actual.StandardOut
+ $stdout.LogonType | Assert-Equals -Expected "Network"
+ $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $high_integrity_sid
+ $stdout.ProfileLoaded | Assert-Equals -Expected $true
+ $stdout.SourceName | Assert-Equals -Expected "Advapi"
+ $stdout.UserSid.Value | Assert-Equals -Expected $admin_user_sid
+ }
+
+ "Network with cleartext logon with admin" = {
+ # Server 2008 will not work with become to Network or Network Credentials
+ if ([System.Environment]::OSVersion.Version -lt [Version]"6.1") {
+ continue
+ }
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($admin_user, $become_pass, "WithProfile",
+ "NetworkCleartext", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n")
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+
+ $stdout = ConvertFrom-Json -InputObject $actual.StandardOut
+ $stdout.LogonType | Assert-Equals -Expected "NetworkCleartext"
+ $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $high_integrity_sid
+ $stdout.ProfileLoaded | Assert-Equals -Expected $true
+ $stdout.SourceName | Assert-Equals -Expected "Advapi"
+ $stdout.UserSid.Value | Assert-Equals -Expected $admin_user_sid
+ }
+
+ "Fail to logon with null or empty password" = {
+ $failed = $false
+ try {
+ # Having $null or an empty string means we are trying to become a user with a blank password and not
+ # become without setting the password. This is confusing as $null gets converted to "" and we need to
+ # use [NullString]::Value instead if we want that behaviour. This just tests to see that an empty
+ # string won't go the S4U route.
+ [Ansible.Become.BecomeUtil]::CreateProcessAsUser($admin_user, $null, "WithProfile",
+ "Interactive", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n")
+ } catch {
+ $failed = $true
+ $_.Exception.InnerException.GetType().FullName | Assert-Equals -Expected "Ansible.AccessToken.Win32Exception"
+ # Server 2008 has a slightly different error msg, just assert we get the error 1326
+ ($_.Exception.Message.Contains("Win32ErrorCode 1326")) | Assert-Equals -Expected $true
+ }
+ $failed | Assert-Equals -Expected $true
+ }
+
+ "Logon without password with admin" = {
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($admin_user, [NullString]::Value, "WithProfile",
+ "Interactive", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n")
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+
+ # Too unstable, there might be another process still lingering which causes become to steal instead of using
+ # S4U. Just don't check the type and source to verify we can become without a password
+ $stdout = ConvertFrom-Json -InputObject $actual.StandardOut
+ # $stdout.LogonType | Assert-Equals -Expected "Batch"
+ $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $high_integrity_sid
+ $stdout.ProfileLoaded | Assert-Equals -Expected $true
+ # $stdout.SourceName | Assert-Equals -Expected "ansible"
+ $stdout.UserSid.Value | Assert-Equals -Expected $admin_user_sid
+ }
+
+ "Logon without password and network type with admin" = {
+ # become network doesn't work on Server 2008
+ if ([System.Environment]::OSVersion.Version -lt [Version]"6.1") {
+ continue
+ }
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($admin_user, [NullString]::Value, "WithProfile",
+ "Network", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n")
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+
+ # Too unstable, there might be another process still lingering which causes become to steal instead of using
+ # S4U. Just don't check the type and source to verify we can become without a password
+ $stdout = ConvertFrom-Json -InputObject $actual.StandardOut
+ # $stdout.LogonType | Assert-Equals -Expected "Network"
+ $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $high_integrity_sid
+ $stdout.ProfileLoaded | Assert-Equals -Expected $true
+ # $stdout.SourceName | Assert-Equals -Expected "ansible"
+ $stdout.UserSid.Value | Assert-Equals -Expected $admin_user_sid
+ }
+
+ "Logon without profile with admin" = {
+ # Server 2008 and 2008 R2 does not support running without the profile being set
+ if ([System.Environment]::OSVersion.Version -lt [Version]"6.2") {
+ continue
+ }
+
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser($admin_user, $become_pass, 0,
+ "Interactive", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n")
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+
+ $stdout = ConvertFrom-Json -InputObject $actual.StandardOut
+ $stdout.LogonType | Assert-Equals -Expected "Interactive"
+ $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $high_integrity_sid
+ $stdout.ProfileLoaded | Assert-Equals -Expected $false
+ $stdout.SourceName | Assert-Equals -Expected "Advapi"
+ $stdout.UserSid.Value | Assert-Equals -Expected $admin_user_sid
+ }
+
+ "Logon with network credentials and no profile" = {
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser("fakeuser", "fakepassword", "NetcredentialsOnly",
+ "NewCredentials", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n")
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+
+ $stdout = ConvertFrom-Json -InputObject $actual.StandardOut
+ $stdout.LogonType | Assert-Equals -Expected "NewCredentials"
+ $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $current_user.MandatoryLabelSid.Value
+
+ # while we didn't set WithProfile, the new process is based on the current process
+ $stdout.ProfileLoaded | Assert-Equals -Expected $current_user.ProfileLoaded
+ $stdout.SourceName | Assert-Equals -Expected "Advapi"
+ $stdout.UserSid.Value | Assert-Equals -Expected $current_user.UserSid.Value
+ }
+
+ "Logon with network credentials and with profile" = {
+ $actual = [Ansible.Become.BecomeUtil]::CreateProcessAsUser("fakeuser", "fakepassword", "NetcredentialsOnly, WithProfile",
+ "NewCredentials", $null, "powershell.exe -NoProfile -", $tmp_dir, $null, $test_whoami + "`r`n")
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+
+ $stdout = ConvertFrom-Json -InputObject $actual.StandardOut
+ $stdout.LogonType | Assert-Equals -Expected "NewCredentials"
+ $stdout.MandatoryLabelSid.Value | Assert-Equals -Expected $current_user.MandatoryLabelSid.Value
+ $stdout.ProfileLoaded | Assert-Equals -Expected $current_user.ProfileLoaded
+ $stdout.SourceName | Assert-Equals -Expected "Advapi"
+ $stdout.UserSid.Value | Assert-Equals -Expected $current_user.UserSid.Value
+ }
+}
+
+try {
+ $tmp_dir = Join-Path -Path ([System.IO.Path]::GetTempPath()) -ChildPath ([System.IO.Path]::GetRandomFileName())
+ New-Item -Path $tmp_dir -ItemType Directory > $null
+ $acl = Get-Acl -Path $tmp_dir
+ $ace = New-Object -TypeName System.Security.AccessControl.FileSystemAccessRule -ArgumentList @(
+ New-Object -TypeName System.Security.Principal.SecurityIdentifier -ArgumentList ([System.Security.Principal.WellKnownSidType]::WorldSid, $null)
+ [System.Security.AccessControl.FileSystemRights]::FullControl,
+ [System.Security.AccessControl.InheritanceFlags]"ContainerInherit, ObjectInherit",
+ [System.Security.AccessControl.PropagationFlags]::None,
+ [System.Security.AccessControl.AccessControlType]::Allow
+ )
+ $acl.AddAccessRule($ace)
+ Set-Acl -Path $tmp_dir -AclObject $acl
+
+ $tmp_script = Join-Path -Path $tmp_dir -ChildPath "whoami.ps1"
+ Set-Content -LiteralPath $tmp_script -Value $test_whoami
+
+ foreach ($user in $standard_user, $admin_user) {
+ $user_obj = $adsi.Children | Where-Object { $_.SchemaClassName -eq "User" -and $_.Name -eq $user }
+ if ($null -eq $user_obj) {
+ $user_obj = $adsi.Create("User", $user)
+ $user_obj.SetPassword($become_pass)
+ $user_obj.SetInfo()
+ } else {
+ $user_obj.SetPassword($become_pass)
+ }
+ $user_obj.RefreshCache()
+
+ if ($user -eq $standard_user) {
+ $standard_user_sid = (New-Object -TypeName System.Security.Principal.SecurityIdentifier -ArgumentList @($user_obj.ObjectSid.Value, 0)).Value
+ $group = [System.Security.Principal.WellKnownSidType]::BuiltinUsersSid
+ } else {
+ $admin_user_sid = (New-Object -TypeName System.Security.Principal.SecurityIdentifier -ArgumentList @($user_obj.ObjectSid.Value, 0)).Value
+ $group = [System.Security.Principal.WellKnownSidType]::BuiltinAdministratorsSid
+ }
+ $group = (New-Object -TypeName System.Security.Principal.SecurityIdentifier -ArgumentList $group, $null).Value
+ [string[]]$current_groups = $user_obj.Groups() | ForEach-Object {
+ New-Object -TypeName System.Security.Principal.SecurityIdentifier -ArgumentList @($_.GetType().InvokeMember("objectSID", "GetProperty", $null, $_, $null), 0)
+ }
+ if ($current_groups -notcontains $group) {
+ $group_obj = $adsi.Children | Where-Object {
+ if ($_.SchemaClassName -eq "Group") {
+ $group_sid = New-Object -TypeName System.Security.Principal.SecurityIdentifier -ArgumentList @($_.objectSID.Value, 0)
+ $group_sid -eq $group
+ }
+ }
+ $group_obj.Add($user_obj.Path)
+ }
+ }
+ foreach ($test_impl in $tests.GetEnumerator()) {
+ $test = $test_impl.Key
+ &$test_impl.Value
+ }
+} finally {
+ Remove-Item -LiteralPath $tmp_dir -Force -Recurse
+ foreach ($user in $standard_user, $admin_user) {
+ $user_obj = $adsi.Children | Where-Object { $_.SchemaClassName -eq "User" -and $_.Name -eq $user }
+ $adsi.Delete("User", $user_obj.Name.Value)
+ }
+}
+
+
+$module.Result.data = "success"
+$module.ExitJson()
+
diff --git a/test/integration/targets/module_utils_Ansible.Become/tasks/main.yml b/test/integration/targets/module_utils_Ansible.Become/tasks/main.yml
new file mode 100644
index 00000000..deb228b5
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.Become/tasks/main.yml
@@ -0,0 +1,28 @@
+---
+# Users by default don't have this right, temporarily enable it
+- name: ensure the Users group have the SeBatchLogonRight
+ win_user_right:
+ name: SeBatchLogonRight
+ users:
+ - Users
+ action: add
+ register: batch_user_add
+
+- block:
+ - name: test Ansible.Become.cs
+ ansible_become_tests:
+ register: ansible_become_tests
+
+ always:
+ - name: remove SeBatchLogonRight from users if added in test
+ win_user_right:
+ name: SeBatchLogonRight
+ users:
+ - Users
+ action: remove
+ when: batch_user_add is changed
+
+- name: assert test Ansible.Become.cs
+ assert:
+ that:
+ - ansible_become_tests.data == "success"
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.AddType/aliases b/test/integration/targets/module_utils_Ansible.ModuleUtils.AddType/aliases
new file mode 100644
index 00000000..cf714783
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.AddType/aliases
@@ -0,0 +1,3 @@
+windows
+shippable/windows/group1
+shippable/windows/smoketest
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.AddType/library/add_type_test.ps1 b/test/integration/targets/module_utils_Ansible.ModuleUtils.AddType/library/add_type_test.ps1
new file mode 100644
index 00000000..d89f99b7
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.AddType/library/add_type_test.ps1
@@ -0,0 +1,299 @@
+#!powershell
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+#Requires -Module Ansible.ModuleUtils.AddType
+
+$ErrorActionPreference = "Stop"
+
+$result = @{
+ changed = $false
+}
+
+Function Assert-Equals($actual, $expected) {
+ if ($actual -cne $expected) {
+ $call_stack = (Get-PSCallStack)[1]
+ $error_msg = "AssertionError:`r`nActual: `"$actual`" != Expected: `"$expected`"`r`nLine: $($call_stack.ScriptLineNumber), Method: $($call_stack.Position.Text)"
+ Fail-Json -obj $result -message $error_msg
+ }
+}
+
+$code = @'
+using System;
+
+namespace Namespace1
+{
+ public class Class1
+ {
+ public static string GetString(bool error)
+ {
+ if (error)
+ throw new Exception("error");
+ return "Hello World";
+ }
+ }
+}
+'@
+$res = Add-CSharpType -References $code
+Assert-Equals -actual $res -expected $null
+
+$actual = [Namespace1.Class1]::GetString($false)
+Assert-Equals $actual -expected "Hello World"
+
+try {
+ [Namespace1.Class1]::GetString($true)
+} catch {
+ Assert-Equals ($_.Exception.ToString().Contains("at Namespace1.Class1.GetString(Boolean error)`r`n")) -expected $true
+}
+
+$code_debug = @'
+using System;
+
+namespace Namespace2
+{
+ public class Class2
+ {
+ public static string GetString(bool error)
+ {
+ if (error)
+ throw new Exception("error");
+ return "Hello World";
+ }
+ }
+}
+'@
+$res = Add-CSharpType -References $code_debug -IncludeDebugInfo
+Assert-Equals -actual $res -expected $null
+
+$actual = [Namespace2.Class2]::GetString($false)
+Assert-Equals $actual -expected "Hello World"
+
+try {
+ [Namespace2.Class2]::GetString($true)
+} catch {
+ $tmp_path = [System.IO.Path]::GetFullPath($env:TMP).ToLower()
+ Assert-Equals ($_.Exception.ToString().ToLower().Contains("at namespace2.class2.getstring(boolean error) in $tmp_path")) -expected $true
+ Assert-Equals ($_.Exception.ToString().Contains(".cs:line 10")) -expected $true
+}
+
+$code_tmp = @'
+using System;
+
+namespace Namespace3
+{
+ public class Class3
+ {
+ public static string GetString(bool error)
+ {
+ if (error)
+ throw new Exception("error");
+ return "Hello World";
+ }
+ }
+}
+'@
+$tmp_path = $env:USERPROFILE
+$res = Add-CSharpType -References $code_tmp -IncludeDebugInfo -TempPath $tmp_path -PassThru
+Assert-Equals -actual $res.GetType().Name -expected "RuntimeAssembly"
+Assert-Equals -actual $res.Location -expected ""
+Assert-Equals -actual $res.GetTypes().Length -expected 1
+Assert-Equals -actual $res.GetTypes()[0].Name -expected "Class3"
+
+$actual = [Namespace3.Class3]::GetString($false)
+Assert-Equals $actual -expected "Hello World"
+
+try {
+ [Namespace3.Class3]::GetString($true)
+} catch {
+ Assert-Equals ($_.Exception.ToString().ToLower().Contains("at namespace3.class3.getstring(boolean error) in $($tmp_path.ToLower())")) -expected $true
+ Assert-Equals ($_.Exception.ToString().Contains(".cs:line 10")) -expected $true
+}
+
+$warning_code = @'
+using System;
+
+namespace Namespace4
+{
+ public class Class4
+ {
+ public static string GetString(bool test)
+ {
+ if (test)
+ {
+ string a = "";
+ }
+
+ return "Hello World";
+ }
+ }
+}
+'@
+$failed = $false
+try {
+ Add-CSharpType -References $warning_code
+} catch {
+ $failed = $true
+ Assert-Equals -actual ($_.Exception.Message.Contains("error CS0219: Warning as Error: The variable 'a' is assigned but its value is never used")) -expected $true
+}
+Assert-Equals -actual $failed -expected $true
+
+Add-CSharpType -References $warning_code -IgnoreWarnings
+$actual = [Namespace4.Class4]::GetString($true)
+Assert-Equals -actual $actual -expected "Hello World"
+
+$reference_1 = @'
+using System;
+using System.Web.Script.Serialization;
+
+//AssemblyReference -Name System.Web.Extensions.dll
+
+namespace Namespace5
+{
+ public class Class5
+ {
+ public static string GetString()
+ {
+ return "Hello World";
+ }
+ }
+}
+'@
+
+$reference_2 = @'
+using System;
+using Namespace5;
+using System.Management.Automation;
+using System.Collections;
+using System.Collections.Generic;
+
+namespace Namespace6
+{
+ public class Class6
+ {
+ public static string GetString()
+ {
+ Hashtable hash = new Hashtable();
+ hash["test"] = "abc";
+ return Class5.GetString();
+ }
+ }
+}
+'@
+
+Add-CSharpType -References $reference_1, $reference_2
+$actual = [Namespace6.Class6]::GetString()
+Assert-Equals -actual $actual -expected "Hello World"
+
+$ignored_warning = @'
+using System;
+
+//NoWarn -Name CS0219
+
+namespace Namespace7
+{
+ public class Class7
+ {
+ public static string GetString()
+ {
+ string a = "";
+ return "abc";
+ }
+ }
+}
+'@
+Add-CSharpType -References $ignored_warning
+$actual = [Namespace7.Class7]::GetString()
+Assert-Equals -actual $actual -expected "abc"
+
+$defined_symbol = @'
+using System;
+
+namespace Namespace8
+{
+ public class Class8
+ {
+ public static string GetString()
+ {
+#if SYMBOL1
+ string a = "symbol";
+#else
+ string a = "no symbol";
+#endif
+ return a;
+ }
+ }
+}
+'@
+Add-CSharpType -References $defined_symbol -CompileSymbols "SYMBOL1"
+$actual = [Namespace8.Class8]::GetString()
+Assert-Equals -actual $actual -expected "symbol"
+
+$type_accelerator = @'
+using System;
+
+//TypeAccelerator -Name AnsibleType -TypeName Class9
+
+namespace Namespace9
+{
+ public class Class9
+ {
+ public static string GetString()
+ {
+ return "a";
+ }
+ }
+}
+'@
+Add-CSharpType -Reference $type_accelerator
+$actual = [AnsibleType]::GetString()
+Assert-Equals -actual $actual -expected "a"
+
+$missing_type_class = @'
+using System;
+
+//TypeAccelerator -Name AnsibleTypeMissing -TypeName MissingClass
+
+namespace Namespace10
+{
+ public class Class10
+ {
+ public static string GetString()
+ {
+ return "b";
+ }
+ }
+}
+'@
+$failed = $false
+try {
+ Add-CSharpType -Reference $missing_type_class
+} catch {
+ $failed = $true
+ Assert-Equals -actual $_.Exception.Message -expected "Failed to find compiled class 'MissingClass' for custom TypeAccelerator."
+}
+Assert-Equals -actual $failed -expected $true
+
+$arch_class = @'
+using System;
+
+namespace Namespace11
+{
+ public class Class11
+ {
+ public static int GetIntPtrSize()
+ {
+#if X86
+ return 4;
+#elif AMD64
+ return 8;
+#else
+ return 0;
+#endif
+ }
+ }
+}
+'@
+Add-CSharpType -Reference $arch_class
+Assert-Equals -actual ([Namespace11.Class11]::GetIntPtrSize()) -expected ([System.IntPtr]::Size)
+
+$result.res = "success"
+Exit-Json -obj $result
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.AddType/tasks/main.yml b/test/integration/targets/module_utils_Ansible.ModuleUtils.AddType/tasks/main.yml
new file mode 100644
index 00000000..4c4810be
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.AddType/tasks/main.yml
@@ -0,0 +1,10 @@
+---
+- name: call module with AddType tests
+ add_type_test:
+ register: add_type_test
+
+- name: assert call module with AddType tests
+ assert:
+ that:
+ - not add_type_test is failed
+ - add_type_test.res == 'success'
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/aliases b/test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/aliases
new file mode 100644
index 00000000..cf714783
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/aliases
@@ -0,0 +1,3 @@
+windows
+shippable/windows/group1
+shippable/windows/smoketest
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/library/argv_parser_test.ps1 b/test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/library/argv_parser_test.ps1
new file mode 100644
index 00000000..d7bd4bb3
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/library/argv_parser_test.ps1
@@ -0,0 +1,93 @@
+#!powershell
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+#Requires -Module Ansible.ModuleUtils.ArgvParser
+
+$ErrorActionPreference = 'Continue'
+
+$params = Parse-Args $args
+$exe = Get-AnsibleParam -obj $params -name "exe" -type "path" -failifempty $true
+
+Add-Type -TypeDefinition @'
+using System.IO;
+using System.Threading;
+
+namespace Ansible.Command
+{
+ public static class NativeUtil
+ {
+ public static void GetProcessOutput(StreamReader stdoutStream, StreamReader stderrStream, out string stdout, out string stderr)
+ {
+ var sowait = new EventWaitHandle(false, EventResetMode.ManualReset);
+ var sewait = new EventWaitHandle(false, EventResetMode.ManualReset);
+ string so = null, se = null;
+ ThreadPool.QueueUserWorkItem((s)=>
+ {
+ so = stdoutStream.ReadToEnd();
+ sowait.Set();
+ });
+ ThreadPool.QueueUserWorkItem((s) =>
+ {
+ se = stderrStream.ReadToEnd();
+ sewait.Set();
+ });
+ foreach(var wh in new WaitHandle[] { sowait, sewait })
+ wh.WaitOne();
+ stdout = so;
+ stderr = se;
+ }
+ }
+}
+'@
+
+Function Invoke-Process($executable, $arguments) {
+ $proc = New-Object System.Diagnostics.Process
+ $psi = $proc.StartInfo
+ $psi.FileName = $executable
+ $psi.Arguments = $arguments
+ $psi.RedirectStandardOutput = $true
+ $psi.RedirectStandardError = $true
+ $psi.UseShellExecute = $false
+
+ $proc.Start() > $null # will always return $true for non shell-exec cases
+ $stdout = $stderr = [string] $null
+
+ [Ansible.Command.NativeUtil]::GetProcessOutput($proc.StandardOutput, $proc.StandardError, [ref] $stdout, [ref] $stderr) > $null
+ $proc.WaitForExit() > $null
+ $actual_args = $stdout.Substring(0, $stdout.Length - 2) -split "`r`n"
+
+ return $actual_args
+}
+
+$tests = @(
+ @('abc', 'd', 'e'),
+ @('a\\b', 'de fg', 'h'),
+ @('a\"b', 'c', 'd'),
+ @('a\\b c', 'd', 'e'),
+ @('C:\Program Files\file\', 'arg with " quote'),
+ @('ADDLOCAL="a,b,c"', '/s', 'C:\\Double\\Backslash')
+)
+
+foreach ($expected in $tests) {
+ $joined_string = Argv-ToString -arguments $expected
+ # We can't used CommandLineToArgvW to test this out as it seems to mangle
+ # \, might be something to do with unicode but not sure...
+ $actual = Invoke-Process -executable $exe -arguments $joined_string
+
+ if ($expected.Count -ne $actual.Count) {
+ $result.actual = $actual -join "`n"
+ $result.expected = $expected -join "`n"
+ Fail-Json -obj $result -message "Actual arg count: $($actual.Count) != Expected arg count: $($expected.Count)"
+ }
+ for ($i = 0; $i -lt $expected.Count; $i++) {
+ $expected_arg = $expected[$i]
+ $actual_arg = $actual[$i]
+ if ($expected_arg -cne $actual_arg) {
+ $result.actual = $actual -join "`n"
+ $result.expected = $expected -join "`n"
+ Fail-Json -obj $result -message "Actual arg: '$actual_arg' != Expected arg: '$expected_arg'"
+ }
+ }
+}
+
+Exit-Json @{ data = 'success' }
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/meta/main.yml b/test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/meta/main.yml
new file mode 100644
index 00000000..fd0dc543
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+- setup_win_printargv
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/tasks/main.yml b/test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/tasks/main.yml
new file mode 100644
index 00000000..b39155e0
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.ArgvParser/tasks/main.yml
@@ -0,0 +1,9 @@
+---
+- name: call module with ArgvParser tests
+ argv_parser_test:
+ exe: '{{ win_printargv_path }}'
+ register: argv_test
+
+- assert:
+ that:
+ - argv_test.data == 'success'
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.Backup/aliases b/test/integration/targets/module_utils_Ansible.ModuleUtils.Backup/aliases
new file mode 100644
index 00000000..cf714783
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.Backup/aliases
@@ -0,0 +1,3 @@
+windows
+shippable/windows/group1
+shippable/windows/smoketest
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.Backup/library/backup_file_test.ps1 b/test/integration/targets/module_utils_Ansible.ModuleUtils.Backup/library/backup_file_test.ps1
new file mode 100644
index 00000000..15527560
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.Backup/library/backup_file_test.ps1
@@ -0,0 +1,89 @@
+#!powershell
+
+# Copyright: (c) 2019, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#AnsibleRequires -CSharpUtil Ansible.Basic
+#Requires -Module Ansible.ModuleUtils.Backup
+
+$module = [Ansible.Basic.AnsibleModule]::Create($args, @{})
+
+Function Assert-Equals {
+ param(
+ [Parameter(Mandatory=$true, ValueFromPipeline=$true)][AllowNull()]$Actual,
+ [Parameter(Mandatory=$true, Position=0)][AllowNull()]$Expected
+ )
+
+ $matched = $false
+ if ($Actual -is [System.Collections.ArrayList] -or $Actual -is [Array]) {
+ $Actual.Count | Assert-Equals -Expected $Expected.Count
+ for ($i = 0; $i -lt $Actual.Count; $i++) {
+ $actual_value = $Actual[$i]
+ $expected_value = $Expected[$i]
+ Assert-Equals -Actual $actual_value -Expected $expected_value
+ }
+ $matched = $true
+ } else {
+ $matched = $Actual -ceq $Expected
+ }
+
+ if (-not $matched) {
+ if ($Actual -is [PSObject]) {
+ $Actual = $Actual.ToString()
+ }
+
+ $call_stack = (Get-PSCallStack)[1]
+ $module.Result.test = $test
+ $module.Result.actual = $Actual
+ $module.Result.expected = $Expected
+ $module.Result.line = $call_stack.ScriptLineNumber
+ $module.Result.method = $call_stack.Position.Text
+ $module.FailJson("AssertionError: actual != expected")
+ }
+}
+
+$tmp_dir = $module.Tmpdir
+
+$tests = @{
+ "Test backup file with missing file" = {
+ $actual = Backup-File -path (Join-Path -Path $tmp_dir -ChildPath "missing")
+ $actual | Assert-Equals -Expected $null
+ }
+
+ "Test backup file in check mode" = {
+ $orig_file = Join-Path -Path $tmp_dir -ChildPath "file-check.txt"
+ Set-Content -LiteralPath $orig_file -Value "abc"
+ $actual = Backup-File -path $orig_file -WhatIf
+
+ (Test-Path -LiteralPath $actual) | Assert-Equals -Expected $false
+
+ $parent_dir = Split-Path -LiteralPath $actual
+ $backup_file = Split-Path -Path $actual -Leaf
+ $parent_dir | Assert-Equals -Expected $tmp_dir
+ ($backup_file -match "^file-check\.txt\.$pid\.\d{8}-\d{6}\.bak$") | Assert-Equals -Expected $true
+ }
+
+ "Test backup file" = {
+ $content = "abc"
+ $orig_file = Join-Path -Path $tmp_dir -ChildPath "file.txt"
+ Set-Content -LiteralPath $orig_file -Value $content
+ $actual = Backup-File -path $orig_file
+
+ (Test-Path -LiteralPath $actual) | Assert-Equals -Expected $true
+
+ $parent_dir = Split-Path -LiteralPath $actual
+ $backup_file = Split-Path -Path $actual -Leaf
+ $parent_dir | Assert-Equals -Expected $tmp_dir
+ ($backup_file -match "^file\.txt\.$pid\.\d{8}-\d{6}\.bak$") | Assert-Equals -Expected $true
+ (Get-Content -LiteralPath $actual -Raw) | Assert-Equals -Expected "$content`r`n"
+ }
+}
+
+foreach ($test_impl in $tests.GetEnumerator()) {
+ $test = $test_impl.Key
+ &$test_impl.Value
+}
+
+$module.Result.res = 'success'
+
+$module.ExitJson()
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.Backup/tasks/main.yml b/test/integration/targets/module_utils_Ansible.ModuleUtils.Backup/tasks/main.yml
new file mode 100644
index 00000000..cb979ebc
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.Backup/tasks/main.yml
@@ -0,0 +1,10 @@
+---
+- name: call module with BackupFile tests
+ backup_file_test:
+ register: backup_file_test
+
+- name: assert call module with BackupFile tests
+ assert:
+ that:
+ - not backup_file_test is failed
+ - backup_file_test.res == 'success'
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.CamelConversion/aliases b/test/integration/targets/module_utils_Ansible.ModuleUtils.CamelConversion/aliases
new file mode 100644
index 00000000..cf714783
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.CamelConversion/aliases
@@ -0,0 +1,3 @@
+windows
+shippable/windows/group1
+shippable/windows/smoketest
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.CamelConversion/library/camel_conversion_test.ps1 b/test/integration/targets/module_utils_Ansible.ModuleUtils.CamelConversion/library/camel_conversion_test.ps1
new file mode 100644
index 00000000..d3dc9d7b
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.CamelConversion/library/camel_conversion_test.ps1
@@ -0,0 +1,74 @@
+#!powershell
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+#Requires -Module Ansible.ModuleUtils.CamelConversion
+
+$ErrorActionPreference = 'Stop'
+
+Function Assert-Equals($actual, $expected) {
+ if ($actual -cne $expected) {
+ Fail-Json @{} "actual != expected`nActual: $actual`nExpected: $expected"
+ }
+}
+
+$input_dict = @{
+ alllower = 'alllower'
+ ALLUPPER = 'allupper'
+ camelCase = 'camel_case'
+ mixedCase_withCamel = 'mixed_case_with_camel'
+ TwoWords = 'two_words'
+ AllUpperAtEND = 'all_upper_at_end'
+ AllUpperButPLURALs = 'all_upper_but_plurals'
+ TargetGroupARNs = 'target_group_arns'
+ HTTPEndpoints = 'http_endpoints'
+ PLURALs = 'plurals'
+ listDict = @(
+ @{ entry1 = 'entry1'; entryTwo = 'entry_two' },
+ 'stringTwo',
+ 0
+ )
+ INNERHashTable = @{
+ ID = 'id'
+ IEnumerable = 'i_enumerable'
+ }
+ emptyList = @()
+ singleList = @("a")
+}
+
+$output_dict = Convert-DictToSnakeCase -dict $input_dict
+foreach ($entry in $output_dict.GetEnumerator()) {
+ $key = $entry.Name
+ $value = $entry.Value
+
+ if ($value -is [Hashtable]) {
+ Assert-Equals -actual $key -expected "inner_hash_table"
+ foreach ($inner_hash in $value.GetEnumerator()) {
+ Assert-Equals -actual $inner_hash.Name -expected $inner_hash.Value
+ }
+ } elseif ($value -is [Array] -or $value -is [System.Collections.ArrayList]) {
+ if ($key -eq "list_dict") {
+ foreach ($inner_list in $value) {
+ if ($inner_list -is [Hashtable]) {
+ foreach ($inner_list_hash in $inner_list.GetEnumerator()) {
+ Assert-Equals -actual $inner_list_hash.Name -expected $inner_list_hash.Value
+ }
+ } elseif ($inner_list -is [String]) {
+ # this is not a string key so we need to keep it the same
+ Assert-Equals -actual $inner_list -expected "stringTwo"
+ } else {
+ Assert-Equals -actual $inner_list -expected 0
+ }
+ }
+ } elseif ($key -eq "empty_list") {
+ Assert-Equals -actual $value.Count -expected 0
+ } elseif ($key -eq "single_list") {
+ Assert-Equals -actual $value.Count -expected 1
+ } else {
+ Fail-Json -obj $result -message "invalid key found for list $key"
+ }
+ } else {
+ Assert-Equals -actual $key -expected $value
+ }
+}
+
+Exit-Json @{ data = 'success' }
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.CamelConversion/tasks/main.yml b/test/integration/targets/module_utils_Ansible.ModuleUtils.CamelConversion/tasks/main.yml
new file mode 100644
index 00000000..f28ea30d
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.CamelConversion/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+- name: call module with camel conversion tests
+ camel_conversion_test:
+ register: camel_conversion
+
+- assert:
+ that:
+ - camel_conversion.data == 'success'
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/aliases b/test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/aliases
new file mode 100644
index 00000000..cf714783
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/aliases
@@ -0,0 +1,3 @@
+windows
+shippable/windows/group1
+shippable/windows/smoketest
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/library/command_util_test.ps1 b/test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/library/command_util_test.ps1
new file mode 100644
index 00000000..6e644fe2
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/library/command_util_test.ps1
@@ -0,0 +1,135 @@
+#!powershell
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+#Requires -Module Ansible.ModuleUtils.CommandUtil
+
+$ErrorActionPreference = 'Stop'
+
+$params = Parse-Args $args
+$exe = Get-AnsibleParam -obj $params -name "exe" -type "path" -failifempty $true
+
+$result = @{
+ changed = $false
+}
+
+$exe_directory = Split-Path -Path $exe -Parent
+$exe_filename = Split-Path -Path $exe -Leaf
+$test_name = $null
+
+Function Assert-Equals($actual, $expected) {
+ if ($actual -cne $expected) {
+ Fail-Json -obj $result -message "Test $test_name failed`nActual: '$actual' != Expected: '$expected'"
+ }
+}
+
+$test_name = "full exe path"
+$actual = Run-Command -command "`"$exe`" arg1 arg2 `"arg 3`""
+Assert-Equals -actual $actual.rc -expected 0
+Assert-Equals -actual $actual.stdout -expected "arg1`r`narg2`r`narg 3`r`n"
+Assert-Equals -actual $actual.stderr -expected ""
+Assert-Equals -actual $actual.executable -expected $exe
+
+$test_name = "exe in special char dir"
+$tmp_dir = Join-Path -Path $env:TEMP -ChildPath "ansible .ÅÑŚÌβÅÈ [$!@^&test(;)]"
+try {
+ New-Item -Path $tmp_dir -ItemType Directory > $null
+ $exe_special = Join-Path $tmp_dir -ChildPath "PrintArgv.exe"
+ Copy-Item -LiteralPath $exe -Destination $exe_special
+ $actual = Run-Command -command "`"$exe_special`" arg1 arg2 `"arg 3`""
+} finally {
+ Remove-Item -LiteralPath $tmp_dir -Force -Recurse
+}
+Assert-Equals -actual $actual.rc -expected 0
+Assert-Equals -actual $actual.stdout -expected "arg1`r`narg2`r`narg 3`r`n"
+Assert-Equals -actual $actual.stderr -expected ""
+Assert-Equals -actual $actual.executable -expected $exe_special
+
+$test_name = "invalid exe path"
+try {
+ $actual = Run-Command -command "C:\fakepath\$exe_filename arg1"
+ Fail-Json -obj $result -message "Test $test_name failed`nCommand should have thrown an exception"
+} catch {
+ Assert-Equals -actual $_.Exception.Message -expected "Exception calling `"SearchPath`" with `"1`" argument(s): `"Could not find file 'C:\fakepath\$exe_filename'.`""
+}
+
+$test_name = "exe in current folder"
+$actual = Run-Command -command "$exe_filename arg1" -working_directory $exe_directory
+Assert-Equals -actual $actual.rc -expected 0
+Assert-Equals -actual $actual.stdout -expected "arg1`r`n"
+Assert-Equals -actual $actual.stderr -expected ""
+Assert-Equals -actual $actual.executable -expected $exe
+
+$test_name = "no working directory set"
+$actual = Run-Command -command "cmd.exe /c cd"
+Assert-Equals -actual $actual.rc -expected 0
+Assert-Equals -actual $actual.stdout -expected "$($pwd.Path)`r`n"
+Assert-Equals -actual $actual.stderr -expected ""
+Assert-Equals -actual $actual.executable.ToUpper() -expected "$env:SystemRoot\System32\cmd.exe".ToUpper()
+
+$test_name = "working directory override"
+$actual = Run-Command -command "cmd.exe /c cd" -working_directory $env:SystemRoot
+Assert-Equals -actual $actual.rc -expected 0
+Assert-Equals -actual $actual.stdout -expected "$env:SystemRoot`r`n"
+Assert-Equals -actual $actual.stderr -expected ""
+Assert-Equals -actual $actual.executable.ToUpper() -expected "$env:SystemRoot\System32\cmd.exe".ToUpper()
+
+$test_name = "working directory invalid path"
+try {
+ $actual = Run-Command -command "doesn't matter" -working_directory "invalid path here"
+ Fail-Json -obj $result -message "Test $test_name failed`nCommand should have thrown an exception"
+} catch {
+ Assert-Equals -actual $_.Exception.Message -expected "invalid working directory path 'invalid path here'"
+}
+
+$test_name = "invalid arguments"
+$actual = Run-Command -command "ipconfig.exe /asdf"
+Assert-Equals -actual $actual.rc -expected 1
+
+$test_name = "test stdout and stderr streams"
+$actual = Run-Command -command "cmd.exe /c echo stdout && echo stderr 1>&2"
+Assert-Equals -actual $actual.rc -expected 0
+Assert-Equals -actual $actual.stdout -expected "stdout `r`n"
+Assert-Equals -actual $actual.stderr -expected "stderr `r`n"
+
+$test_name = "Test UTF8 output from stdout stream"
+$actual = Run-Command -command "powershell.exe -ExecutionPolicy ByPass -Command `"Write-Host '💩'`""
+Assert-Equals -actual $actual.rc -expected 0
+Assert-Equals -actual $actual.stdout -expected "💩`n"
+Assert-Equals -actual $actual.stderr -expected ""
+
+$test_name = "test default environment variable"
+Set-Item -LiteralPath env:TESTENV -Value "test"
+$actual = Run-Command -command "cmd.exe /c set"
+$env_present = $actual.stdout -split "`r`n" | Where-Object { $_ -eq "TESTENV=test" }
+if ($null -eq $env_present) {
+ Fail-Json -obj $result -message "Test $test_name failed`nenvironment variable TESTENV not found in stdout`n$($actual.stdout)"
+}
+
+$test_name = "test custom environment variable1"
+$actual = Run-Command -command "cmd.exe /c set" -environment @{ TESTENV2 = "testing" }
+$env_not_present = $actual.stdout -split "`r`n" | Where-Object { $_ -eq "TESTENV=test" }
+$env_present = $actual.stdout -split "`r`n" | Where-Object { $_ -eq "TESTENV2=testing" }
+if ($null -ne $env_not_present) {
+ Fail-Json -obj $result -message "Test $test_name failed`nenvironment variabel TESTENV found in stdout when it should be`n$($actual.stdout)"
+}
+if ($null -eq $env_present) {
+ Fail-json -obj $result -message "Test $test_name failed`nenvironment variable TESTENV2 not found in stdout`n$($actual.stdout)"
+}
+
+$test_name = "input test"
+$wrapper = @"
+begin {
+ `$string = ""
+} process {
+ `$current_input = [string]`$input
+ `$string += `$current_input
+} end {
+ Write-Host `$string
+}
+"@
+$encoded_wrapper = [System.Convert]::ToBase64String([System.Text.Encoding]::Unicode.GetBytes($wrapper))
+$actual = Run-Command -command "powershell.exe -ExecutionPolicy ByPass -EncodedCommand $encoded_wrapper" -stdin "Ansible"
+Assert-Equals -actual $actual.stdout -expected "Ansible`n"
+
+$result.data = "success"
+Exit-Json -obj $result
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/meta/main.yml b/test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/meta/main.yml
new file mode 100644
index 00000000..fd0dc543
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+- setup_win_printargv
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/tasks/main.yml b/test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/tasks/main.yml
new file mode 100644
index 00000000..3001518b
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.CommandUtil/tasks/main.yml
@@ -0,0 +1,9 @@
+---
+- name: call module with CommandUtil tests
+ command_util_test:
+ exe: '{{ win_printargv_path }}'
+ register: command_util
+
+- assert:
+ that:
+ - command_util.data == 'success'
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.FileUtil/aliases b/test/integration/targets/module_utils_Ansible.ModuleUtils.FileUtil/aliases
new file mode 100644
index 00000000..cf714783
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.FileUtil/aliases
@@ -0,0 +1,3 @@
+windows
+shippable/windows/group1
+shippable/windows/smoketest
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.FileUtil/library/file_util_test.ps1 b/test/integration/targets/module_utils_Ansible.ModuleUtils.FileUtil/library/file_util_test.ps1
new file mode 100644
index 00000000..ae3e68ec
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.FileUtil/library/file_util_test.ps1
@@ -0,0 +1,108 @@
+#!powershell
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+#Requires -Module Ansible.ModuleUtils.FileUtil
+
+$ErrorActionPreference = "Stop"
+
+$result = @{
+ changed = $false
+}
+
+Function Assert-Equals($actual, $expected) {
+ if ($actual -cne $expected) {
+ $call_stack = (Get-PSCallStack)[1]
+ $error_msg = "AssertionError:`r`nActual: `"$actual`" != Expected: `"$expected`"`r`nLine: $($call_stack.ScriptLineNumber), Method: $($call_stack.Position.Text)"
+ Fail-Json -obj $result -message $error_msg
+ }
+}
+
+Function Get-PagefilePath() {
+ $pagefile = $null
+ $cs = Get-CimInstance -ClassName Win32_ComputerSystem
+ if ($cs.AutomaticManagedPagefile) {
+ $pagefile = "$($env:SystemRoot.Substring(0, 1)):\pagefile.sys"
+ } else {
+ $pf = Get-CimInstance -ClassName Win32_PageFileSetting
+ if ($null -ne $pf) {
+ $pagefile = $pf[0].Name
+ }
+ }
+ return $pagefile
+}
+
+$pagefile = Get-PagefilePath
+if ($pagefile) {
+ # Test-AnsiblePath Hidden system file
+ $actual = Test-AnsiblePath -Path $pagefile
+ Assert-Equals -actual $actual -expected $true
+
+ # Get-AnsibleItem file
+ $actual = Get-AnsibleItem -Path $pagefile
+ Assert-Equals -actual $actual.FullName -expected $pagefile
+ Assert-Equals -actual $actual.Attributes.HasFlag([System.IO.FileAttributes]::Directory) -expected $false
+ Assert-Equals -actual $actual.Exists -expected $true
+}
+
+# Test-AnsiblePath File that doesn't exist
+$actual = Test-AnsiblePath -Path C:\fakefile
+Assert-Equals -actual $actual -expected $false
+
+# Test-AnsiblePath Directory that doesn't exist
+$actual = Test-AnsiblePath -Path C:\fakedirectory
+Assert-Equals -actual $actual -expected $false
+
+# Test-AnsiblePath file in non-existant directory
+$actual = Test-AnsiblePath -Path C:\fakedirectory\fakefile.txt
+Assert-Equals -actual $actual -expected $false
+
+# Test-AnsiblePath Normal directory
+$actual = Test-AnsiblePath -Path C:\Windows
+Assert-Equals -actual $actual -expected $true
+
+# Test-AnsiblePath Normal file
+$actual = Test-AnsiblePath -Path C:\Windows\System32\kernel32.dll
+Assert-Equals -actual $actual -expected $true
+
+# Test-AnsiblePath fails with wildcard
+$failed = $false
+try {
+ Test-AnsiblePath -Path C:\Windows\*.exe
+} catch {
+ $failed = $true
+ Assert-Equals -actual $_.Exception.Message -expected "Exception calling `"GetAttributes`" with `"1`" argument(s): `"Illegal characters in path.`""
+}
+Assert-Equals -actual $failed -expected $true
+
+# Test-AnsiblePath on non file PS Provider object
+$actual = Test-AnsiblePath -Path Cert:\LocalMachine\My
+Assert-Equals -actual $actual -expected $true
+
+# Test-AnsiblePath on environment variable
+$actual = Test-AnsiblePath -Path env:SystemDrive
+Assert-Equals -actual $actual -expected $true
+
+# Test-AnsiblePath on environment variable that does not exist
+$actual = Test-AnsiblePath -Path env:FakeEnvValue
+Assert-Equals -actual $actual -expected $false
+
+# Get-AnsibleItem doesn't exist with -ErrorAction SilentlyContinue param
+$actual = Get-AnsibleItem -Path C:\fakefile -ErrorAction SilentlyContinue
+Assert-Equals -actual $actual -expected $null
+
+# Get-AnsibleItem directory
+$actual = Get-AnsibleItem -Path C:\Windows
+Assert-Equals -actual $actual.FullName -expected C:\Windows
+Assert-Equals -actual $actual.Attributes.HasFlag([System.IO.FileAttributes]::Directory) -expected $true
+Assert-Equals -actual $actual.Exists -expected $true
+
+# ensure Get-AnsibleItem doesn't fail in a try/catch and -ErrorAction SilentlyContinue - stop's a trap from trapping it
+try {
+ $actual = Get-AnsibleItem -Path C:\fakepath -ErrorAction SilentlyContinue
+} catch {
+ Fail-Json -obj $result -message "this should not fire"
+}
+Assert-Equals -actual $actual -expected $null
+
+$result.data = "success"
+Exit-Json -obj $result
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.FileUtil/tasks/main.yml b/test/integration/targets/module_utils_Ansible.ModuleUtils.FileUtil/tasks/main.yml
new file mode 100644
index 00000000..a636d32e
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.FileUtil/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+- name: call module with FileUtil tests
+ file_util_test:
+ register: file_util_test
+
+- assert:
+ that:
+ - file_util_test.data == 'success'
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/aliases b/test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/aliases
new file mode 100644
index 00000000..cf714783
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/aliases
@@ -0,0 +1,3 @@
+windows
+shippable/windows/group1
+shippable/windows/smoketest
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/library/testlist.ps1 b/test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/library/testlist.ps1
new file mode 100644
index 00000000..06ef17b4
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/library/testlist.ps1
@@ -0,0 +1,12 @@
+#powershell
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+
+$params = Parse-Args $args
+$value = Get-AnsibleParam -Obj $params -Name value -Type list
+
+if ($value -isnot [array]) {
+ Fail-Json -obj @{} -message "value was not a list but was $($value.GetType().FullName)"
+}
+
+Exit-Json @{ count = $value.Count }
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/library/testpath.ps1 b/test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/library/testpath.ps1
new file mode 100644
index 00000000..55cad70f
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/library/testpath.ps1
@@ -0,0 +1,9 @@
+#powershell
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+
+$params = Parse-Args $args
+
+$path = Get-AnsibleParam -Obj $params -Name path -Type path
+
+Exit-Json @{ path=$path }
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/tasks/main.yml b/test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/tasks/main.yml
new file mode 100644
index 00000000..0bd10558
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.Legacy/tasks/main.yml
@@ -0,0 +1,41 @@
+# NB: these tests are just a placeholder until we have pester unit tests.
+# They are being run as part of the Windows smoke tests. Please do not significantly
+# increase the size of these tests, as the smoke tests need to remain fast.
+# Any significant additions should be made to the (as yet nonexistent) PS module_utils unit tests.
+---
+- name: find a nonexistent drive letter
+ raw: foreach($c in [char[]]([char]'D'..[char]'Z')) { If (-not $(Get-PSDrive $c -ErrorAction SilentlyContinue)) { return $c } }
+ register: bogus_driveletter
+
+- assert:
+ that: bogus_driveletter.stdout_lines[0] | length == 1
+
+- name: test path shape validation
+ testpath:
+ path: "{{ item.path }}"
+ failed_when: path_shapes is failed != (item.should_fail | default(false))
+ register: path_shapes
+ with_items:
+ - path: C:\Windows
+ - path: HKLM:\Software
+ - path: '{{ bogus_driveletter.stdout_lines[0] }}:\goodpath'
+ - path: '{{ bogus_driveletter.stdout_lines[0] }}:\badpath*%@:\blar'
+ should_fail: true
+
+- name: test list parameters
+ testlist:
+ value: '{{item.value}}'
+ register: list_tests
+ failed_when: list_tests is failed or list_tests.count != item.count
+ with_items:
+ - value: []
+ count: 0
+ - value:
+ - 1
+ - 2
+ count: 2
+ - value:
+ - 1
+ count: 1
+ - value: "1, 2"
+ count: 2
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.LinkUtil/aliases b/test/integration/targets/module_utils_Ansible.ModuleUtils.LinkUtil/aliases
new file mode 100644
index 00000000..cf714783
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.LinkUtil/aliases
@@ -0,0 +1,3 @@
+windows
+shippable/windows/group1
+shippable/windows/smoketest
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.LinkUtil/library/symbolic_link_test.ps1 b/test/integration/targets/module_utils_Ansible.ModuleUtils.LinkUtil/library/symbolic_link_test.ps1
new file mode 100644
index 00000000..1decfe4f
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.LinkUtil/library/symbolic_link_test.ps1
@@ -0,0 +1,170 @@
+#!powershell
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+#Requires -Module Ansible.ModuleUtils.LinkUtil
+#Requires -Module Ansible.ModuleUtils.CommandUtil
+
+$ErrorActionPreference = 'Stop'
+
+$path = Join-Path -Path ([System.IO.Path]::GetFullPath($env:TEMP)) -ChildPath '.ansible .ÅÑŚÌβÅÈ [$!@^&test(;)]'
+
+$folder_target = "$path\folder"
+$file_target = "$path\file"
+$symlink_file_path = "$path\file-symlink"
+$symlink_folder_path = "$path\folder-symlink"
+$hardlink_path = "$path\hardlink"
+$hardlink_path_2 = "$path\hardlink2"
+$junction_point_path = "$path\junction"
+
+if (Test-Path -LiteralPath $path) {
+ # Remove-Item struggles with broken symlinks, rely on trusty rmdir instead
+ Run-Command -command "cmd.exe /c rmdir /S /Q `"$path`"" > $null
+}
+New-Item -Path $path -ItemType Directory | Out-Null
+New-Item -Path $folder_target -ItemType Directory | Out-Null
+New-Item -Path $file_target -ItemType File | Out-Null
+Set-Content -LiteralPath $file_target -Value "a"
+
+Function Assert-Equals($actual, $expected) {
+ if ($actual -ne $expected) {
+ Fail-Json @{} "actual != expected`nActual: $actual`nExpected: $expected"
+ }
+}
+
+Function Assert-True($expression, $message) {
+ if ($expression -ne $true) {
+ Fail-Json @{} $message
+ }
+}
+
+# need to manually set this
+Load-LinkUtils
+
+# path is not a link
+$no_link_result = Get-Link -link_path $path
+Assert-True -expression ($null -eq $no_link_result) -message "did not return null result for a non link"
+
+# fail to create hard link pointed to a directory
+try {
+ New-Link -link_path "$path\folder-hard" -link_target $folder_target -link_type "hard"
+ Assert-True -expression $false -message "creation of hard link should have failed if target was a directory"
+} catch {
+ Assert-Equals -actual $_.Exception.Message -expected "cannot set the target for a hard link to a directory"
+}
+
+# fail to create a junction point pointed to a file
+try {
+ New-Link -link_path "$path\junction-fail" -link_target $file_target -link_type "junction"
+ Assert-True -expression $false -message "creation of junction point should have failed if target was a file"
+} catch {
+ Assert-Equals -actual $_.Exception.Message -expected "cannot set the target for a junction point to a file"
+}
+
+# fail to create a symbolic link with non-existent target
+try {
+ New-Link -link_path "$path\symlink-fail" -link_target "$path\fake-folder" -link_type "link"
+ Assert-True -expression $false -message "creation of symbolic link should have failed if target did not exist"
+} catch {
+ Assert-Equals -actual $_.Exception.Message -expected "link_target '$path\fake-folder' does not exist, cannot create link"
+}
+
+# create recursive symlink
+Run-Command -command "cmd.exe /c mklink /D symlink-rel folder" -working_directory $path | Out-Null
+$rel_link_result = Get-Link -link_path "$path\symlink-rel"
+Assert-Equals -actual $rel_link_result.Type -expected "SymbolicLink"
+Assert-Equals -actual $rel_link_result.SubstituteName -expected "folder"
+Assert-Equals -actual $rel_link_result.PrintName -expected "folder"
+Assert-Equals -actual $rel_link_result.TargetPath -expected "folder"
+Assert-Equals -actual $rel_link_result.AbsolutePath -expected $folder_target
+Assert-Equals -actual $rel_link_result.HardTargets -expected $null
+
+# create a symbolic file test
+New-Link -link_path $symlink_file_path -link_target $file_target -link_type "link"
+$file_link_result = Get-Link -link_path $symlink_file_path
+Assert-Equals -actual $file_link_result.Type -expected "SymbolicLink"
+Assert-Equals -actual $file_link_result.SubstituteName -expected "\??\$file_target"
+Assert-Equals -actual $file_link_result.PrintName -expected $file_target
+Assert-Equals -actual $file_link_result.TargetPath -expected $file_target
+Assert-Equals -actual $file_link_result.AbsolutePath -expected $file_target
+Assert-Equals -actual $file_link_result.HardTargets -expected $null
+
+# create a symbolic link folder test
+New-Link -link_path $symlink_folder_path -link_target $folder_target -link_type "link"
+$folder_link_result = Get-Link -link_path $symlink_folder_path
+Assert-Equals -actual $folder_link_result.Type -expected "SymbolicLink"
+Assert-Equals -actual $folder_link_result.SubstituteName -expected "\??\$folder_target"
+Assert-Equals -actual $folder_link_result.PrintName -expected $folder_target
+Assert-Equals -actual $folder_link_result.TargetPath -expected $folder_target
+Assert-Equals -actual $folder_link_result.AbsolutePath -expected $folder_target
+Assert-Equals -actual $folder_link_result.HardTargets -expected $null
+
+# create a junction point test
+New-Link -link_path $junction_point_path -link_target $folder_target -link_type "junction"
+$junction_point_result = Get-Link -link_path $junction_point_path
+Assert-Equals -actual $junction_point_result.Type -expected "JunctionPoint"
+Assert-Equals -actual $junction_point_result.SubstituteName -expected "\??\$folder_target"
+Assert-Equals -actual $junction_point_result.PrintName -expected $folder_target
+Assert-Equals -actual $junction_point_result.TargetPath -expected $folder_target
+Assert-Equals -actual $junction_point_result.AbsolutePath -expected $folder_target
+Assert-Equals -actual $junction_point_result.HardTargets -expected $null
+
+# create a hard link test
+New-Link -link_path $hardlink_path -link_target $file_target -link_type "hard"
+$hardlink_result = Get-Link -link_path $hardlink_path
+Assert-Equals -actual $hardlink_result.Type -expected "HardLink"
+Assert-Equals -actual $hardlink_result.SubstituteName -expected $null
+Assert-Equals -actual $hardlink_result.PrintName -expected $null
+Assert-Equals -actual $hardlink_result.TargetPath -expected $null
+Assert-Equals -actual $hardlink_result.AbsolutePath -expected $null
+if ($hardlink_result.HardTargets[0] -ne $hardlink_path -and $hardlink_result.HardTargets[1] -ne $hardlink_path) {
+ Assert-True -expression $false -message "file $hardlink_path is not a target of the hard link"
+}
+if ($hardlink_result.HardTargets[0] -ne $file_target -and $hardlink_result.HardTargets[1] -ne $file_target) {
+ Assert-True -expression $false -message "file $file_target is not a target of the hard link"
+}
+Assert-equals -actual (Get-Content -LiteralPath $hardlink_path -Raw) -expected (Get-Content -LiteralPath $file_target -Raw)
+
+# create a new hard link and verify targets go to 3
+New-Link -link_path $hardlink_path_2 -link_target $file_target -link_type "hard"
+$hardlink_result_2 = Get-Link -link_path $hardlink_path
+Assert-True -expression ($hardlink_result_2.HardTargets.Count -eq 3) -message "did not return 3 targets for the hard link, actual $($hardlink_result_2.Targets.Count)"
+
+# check if broken symbolic link still works
+Remove-Item -LiteralPath $folder_target -Force | Out-Null
+$broken_link_result = Get-Link -link_path $symlink_folder_path
+Assert-Equals -actual $broken_link_result.Type -expected "SymbolicLink"
+Assert-Equals -actual $broken_link_result.SubstituteName -expected "\??\$folder_target"
+Assert-Equals -actual $broken_link_result.PrintName -expected $folder_target
+Assert-Equals -actual $broken_link_result.TargetPath -expected $folder_target
+Assert-Equals -actual $broken_link_result.AbsolutePath -expected $folder_target
+Assert-Equals -actual $broken_link_result.HardTargets -expected $null
+
+# check if broken junction point still works
+$broken_junction_result = Get-Link -link_path $junction_point_path
+Assert-Equals -actual $broken_junction_result.Type -expected "JunctionPoint"
+Assert-Equals -actual $broken_junction_result.SubstituteName -expected "\??\$folder_target"
+Assert-Equals -actual $broken_junction_result.PrintName -expected $folder_target
+Assert-Equals -actual $broken_junction_result.TargetPath -expected $folder_target
+Assert-Equals -actual $broken_junction_result.AbsolutePath -expected $folder_target
+Assert-Equals -actual $broken_junction_result.HardTargets -expected $null
+
+# delete file symbolic link
+Remove-Link -link_path $symlink_file_path
+Assert-True -expression (-not (Test-Path -LiteralPath $symlink_file_path)) -message "failed to delete file symbolic link"
+
+# delete folder symbolic link
+Remove-Link -link_path $symlink_folder_path
+Assert-True -expression (-not (Test-Path -LiteralPath $symlink_folder_path)) -message "failed to delete folder symbolic link"
+
+# delete junction point
+Remove-Link -link_path $junction_point_path
+Assert-True -expression (-not (Test-Path -LiteralPath $junction_point_path)) -message "failed to delete junction point"
+
+# delete hard link
+Remove-Link -link_path $hardlink_path
+Assert-True -expression (-not (Test-Path -LiteralPath $hardlink_path)) -message "failed to delete hard link"
+
+# cleanup after tests
+Run-Command -command "cmd.exe /c rmdir /S /Q `"$path`"" > $null
+
+Exit-Json @{ data = "success" }
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.LinkUtil/tasks/main.yml b/test/integration/targets/module_utils_Ansible.ModuleUtils.LinkUtil/tasks/main.yml
new file mode 100644
index 00000000..f121ad4a
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.LinkUtil/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+- name: call module with symbolic link tests
+ symbolic_link_test:
+ register: symbolic_link
+
+- assert:
+ that:
+ - symbolic_link.data == 'success'
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.PrivilegeUtil/aliases b/test/integration/targets/module_utils_Ansible.ModuleUtils.PrivilegeUtil/aliases
new file mode 100644
index 00000000..cf714783
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.PrivilegeUtil/aliases
@@ -0,0 +1,3 @@
+windows
+shippable/windows/group1
+shippable/windows/smoketest
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.PrivilegeUtil/library/privilege_util_test.ps1 b/test/integration/targets/module_utils_Ansible.ModuleUtils.PrivilegeUtil/library/privilege_util_test.ps1
new file mode 100644
index 00000000..e1ca25da
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.PrivilegeUtil/library/privilege_util_test.ps1
@@ -0,0 +1,112 @@
+#!powershell
+
+#AnsibleRequires -CSharpUtil Ansible.Basic
+#Requires -Module Ansible.ModuleUtils.PrivilegeUtil
+
+$module = [Ansible.Basic.AnsibleModule]::Create($args, @{})
+
+Function Assert-Equals($actual, $expected) {
+ if ($actual -cne $expected) {
+ $call_stack = (Get-PSCallStack)[1]
+ $module.Result.actual = $actual
+ $module.Result.expected = $expected
+ $module.Result.line = $call_stack.ScriptLineNumber
+ $module.Result.method = $call_stack.Position.Text
+ $module.FailJson("AssertionError: actual != expected")
+ }
+}
+
+# taken from https://docs.microsoft.com/en-us/windows/desktop/SecAuthZ/privilege-constants
+$total_privileges = @(
+ "SeAssignPrimaryTokenPrivilege",
+ "SeAuditPrivilege",
+ "SeBackupPrivilege",
+ "SeChangeNotifyPrivilege",
+ "SeCreateGlobalPrivilege",
+ "SeCreatePagefilePrivilege",
+ "SeCreatePermanentPrivilege",
+ "SeCreateSymbolicLinkPrivilege",
+ "SeCreateTokenPrivilege",
+ "SeDebugPrivilege",
+ "SeEnableDelegationPrivilege",
+ "SeImpersonatePrivilege",
+ "SeIncreaseBasePriorityPrivilege",
+ "SeIncreaseQuotaPrivilege",
+ "SeIncreaseWorkingSetPrivilege",
+ "SeLoadDriverPrivilege",
+ "SeLockMemoryPrivilege",
+ "SeMachineAccountPrivilege",
+ "SeManageVolumePrivilege",
+ "SeProfileSingleProcessPrivilege",
+ "SeRelabelPrivilege",
+ "SeRemoteShutdownPrivilege",
+ "SeRestorePrivilege",
+ "SeSecurityPrivilege",
+ "SeShutdownPrivilege",
+ "SeSyncAgentPrivilege",
+ "SeSystemEnvironmentPrivilege",
+ "SeSystemProfilePrivilege",
+ "SeSystemtimePrivilege",
+ "SeTakeOwnershipPrivilege",
+ "SeTcbPrivilege",
+ "SeTimeZonePrivilege",
+ "SeTrustedCredManAccessPrivilege",
+ "SeUndockPrivilege"
+)
+
+$raw_privilege_output = &whoami /priv | Where-Object { $_.StartsWith("Se") }
+$actual_privileges = @{}
+foreach ($raw_privilege in $raw_privilege_output) {
+ $split = $raw_privilege.TrimEnd() -split " "
+ $actual_privileges."$($split[0])" = ($split[-1] -eq "Enabled")
+}
+$process = [Ansible.Privilege.PrivilegeUtil]::GetCurrentProcess()
+
+### Test PS cmdlets ###
+# test ps Get-AnsiblePrivilege
+foreach ($privilege in $total_privileges) {
+ $expected = $null
+ if ($actual_privileges.ContainsKey($privilege)) {
+ $expected = $actual_privileges.$privilege
+ }
+ $actual = Get-AnsiblePrivilege -Name $privilege
+ Assert-Equals -actual $actual -expected $expected
+}
+
+# test c# GetAllPrivilegeInfo
+$actual = [Ansible.Privilege.PrivilegeUtil]::GetAllPrivilegeInfo($process)
+Assert-Equals -actual $actual.GetType().Name -expected 'Dictionary`2'
+Assert-Equals -actual $actual.Count -expected $actual_privileges.Count
+foreach ($privilege in $total_privileges) {
+ if ($actual_privileges.ContainsKey($privilege)) {
+ $actual_value = $actual.$privilege
+ if ($actual_privileges.$privilege) {
+ Assert-Equals -actual $actual_value.HasFlag([Ansible.Privilege.PrivilegeAttributes]::Enabled) -expected $true
+ } else {
+ Assert-Equals -actual $actual_value.HasFlag([Ansible.Privilege.PrivilegeAttributes]::Enabled) -expected $false
+ }
+ }
+}
+
+# test Set-AnsiblePrivilege
+Set-AnsiblePrivilege -Name SeUndockPrivilege -Value $false # ensure we start with a disabled privilege
+
+Set-AnsiblePrivilege -Name SeUndockPrivilege -Value $true -WhatIf
+$actual = Get-AnsiblePrivilege -Name SeUndockPrivilege
+Assert-Equals -actual $actual -expected $false
+
+Set-AnsiblePrivilege -Name SeUndockPrivilege -Value $true
+$actual = Get-AnsiblePrivilege -Name SeUndockPrivilege
+Assert-Equals -actual $actual -expected $true
+
+Set-AnsiblePrivilege -Name SeUndockPrivilege -Value $false -WhatIf
+$actual = Get-AnsiblePrivilege -Name SeUndockPrivilege
+Assert-Equals -actual $actual -expected $true
+
+Set-AnsiblePrivilege -Name SeUndockPrivilege -Value $false
+$actual = Get-AnsiblePrivilege -Name SeUndockPrivilege
+Assert-Equals -actual $actual -expected $false
+
+$module.Result.data = "success"
+$module.ExitJson()
+
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.PrivilegeUtil/tasks/main.yml b/test/integration/targets/module_utils_Ansible.ModuleUtils.PrivilegeUtil/tasks/main.yml
new file mode 100644
index 00000000..5f54480e
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.PrivilegeUtil/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+- name: call module with PrivilegeUtil tests
+ privilege_util_test:
+ register: privilege_util_test
+
+- assert:
+ that:
+ - privilege_util_test.data == 'success'
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.SID/aliases b/test/integration/targets/module_utils_Ansible.ModuleUtils.SID/aliases
new file mode 100644
index 00000000..cf714783
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.SID/aliases
@@ -0,0 +1,3 @@
+windows
+shippable/windows/group1
+shippable/windows/smoketest
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.SID/library/sid_utils_test.ps1 b/test/integration/targets/module_utils_Ansible.ModuleUtils.SID/library/sid_utils_test.ps1
new file mode 100644
index 00000000..eb376c81
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.SID/library/sid_utils_test.ps1
@@ -0,0 +1,93 @@
+#!powershell
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+#Requires -Module Ansible.ModuleUtils.SID
+
+$params = Parse-Args $args
+$sid_account = Get-AnsibleParam -obj $params -name "sid_account" -type "str" -failifempty $true
+
+Function Assert-Equals($actual, $expected) {
+ if ($actual -ne $expected) {
+ Fail-Json @{} "actual != expected`nActual: $actual`nExpected: $expected"
+ }
+}
+
+Function Get-ComputerSID() {
+ # find any local user and trim off the final UID
+ $luser_sid = (Get-CimInstance Win32_UserAccount -Filter "Domain='$env:COMPUTERNAME'")[0].SID
+
+ return $luser_sid -replace '(S-1-5-21-\d+-\d+-\d+)-\d+', '$1'
+}
+
+$local_sid = Get-ComputerSID
+
+# most machines should have a -500 Administrator account, but it may have been renamed. Look it up by SID
+$default_admin = Get-CimInstance Win32_UserAccount -Filter "SID='$local_sid-500'"
+
+# this group is called Administrators by default on English Windows, but could named something else. Look it up by SID
+$default_admin_group = Get-CimInstance Win32_Group -Filter "SID='S-1-5-32-544'"
+
+if (@($default_admin).Length -ne 1) {
+ Fail-Json @{} "could not find a local admin account with SID ending in -500"
+}
+
+### Set this to the NETBIOS name of the domain you wish to test, not set for shippable ###
+$test_domain = $null
+
+$tests = @(
+ # Local Users
+ @{ sid = "S-1-1-0"; full_name = "Everyone"; names = @("Everyone") },
+ @{ sid = "S-1-5-18"; full_name = "NT AUTHORITY\SYSTEM"; names = @("NT AUTHORITY\SYSTEM", "SYSTEM") },
+ @{ sid = "S-1-5-20"; full_name = "NT AUTHORITY\NETWORK SERVICE"; names = @("NT AUTHORITY\NETWORK SERVICE", "NETWORK SERVICE") },
+ @{ sid = "$($default_admin.SID)"; full_name = "$($default_admin.FullName)"; names = @("$env:COMPUTERNAME\$($default_admin.Name)", "$($default_admin.Name)", ".\$($default_admin.Name)") },
+
+ # Local Groups
+ @{ sid = "$($default_admin_group.SID)"; full_name = "BUILTIN\$($default_admin_group.Name)"; names = @("BUILTIN\$($default_admin_group.Name)", "$($default_admin_group.Name)", ".\$($default_admin_group.Name)") }
+)
+
+# Add domain tests if the domain name has been set
+if ($null -ne $test_domain) {
+ Import-Module ActiveDirectory
+ $domain_info = Get-ADDomain -Identity $test_domain
+ $domain_sid = $domain_info.DomainSID
+ $domain_netbios = $domain_info.NetBIOSName
+ $domain_upn = $domain_info.Forest
+
+ $tests += @{
+ sid = "$domain_sid-512"
+ full_name = "$domain_netbios\Domain Admins"
+ names = @("$domain_netbios\Domain Admins", "Domain Admins@$domain_upn", "Domain Admins")
+ }
+
+ $tests += @{
+ sid = "$domain_sid-500"
+ full_name = "$domain_netbios\Administrator"
+ names = @("$domain_netbios\Administrator", "Administrator@$domain_upn")
+ }
+}
+
+foreach ($test in $tests) {
+ $actual_account_name = Convert-FromSID -sid $test.sid
+ # renamed admins may have an empty FullName; skip comparison in that case
+ if ($test.full_name) {
+ Assert-Equals -actual $actual_account_name -expected $test.full_name
+ }
+
+ foreach ($test_name in $test.names) {
+ $actual_sid = Convert-ToSID -account_name $test_name
+ Assert-Equals -actual $actual_sid -expected $test.sid
+ }
+}
+
+# the account to SID test is run outside of the normal run as we can't test it
+# in the normal test suite
+# Calling Convert-ToSID with a string like a SID should return that SID back
+$actual = Convert-ToSID -account_name $sid_account
+Assert-Equals -actual $actual -expected $sid_account
+
+# Calling COnvert-ToSID with a string prefixed with .\ should return the SID
+# for a user that is called that SID and not the SID passed in
+$actual = Convert-ToSID -account_name ".\$sid_account"
+Assert-Equals -actual ($actual -ne $sid_account) -expected $true
+
+Exit-Json @{ data = "success" }
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.SID/tasks/main.yml b/test/integration/targets/module_utils_Ansible.ModuleUtils.SID/tasks/main.yml
new file mode 100644
index 00000000..acbae50a
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.SID/tasks/main.yml
@@ -0,0 +1,22 @@
+---
+- block:
+ - name: create test user with well know SID as the name
+ win_user:
+ name: S-1-0-0
+ password: AbcDef123!@#
+ state: present
+
+ - name: call module with SID tests
+ sid_utils_test:
+ sid_account: S-1-0-0
+ register: sid_test
+
+ always:
+ - name: remove test SID user
+ win_user:
+ name: S-1-0-0
+ state: absent
+
+- assert:
+ that:
+ - sid_test.data == 'success'
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/aliases b/test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/aliases
new file mode 100644
index 00000000..b5ad7ca9
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/aliases
@@ -0,0 +1,4 @@
+windows
+shippable/windows/group1
+shippable/windows/smoketest
+needs/httptester
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/library/web_request_test.ps1 b/test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/library/web_request_test.ps1
new file mode 100644
index 00000000..a483698c
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/library/web_request_test.ps1
@@ -0,0 +1,467 @@
+#!powershell
+
+#AnsibleRequires -CSharpUtil Ansible.Basic
+#Requires -Module Ansible.ModuleUtils.WebRequest
+
+$spec = @{
+ options = @{
+ httpbin_host = @{ type = 'str'; required = $true }
+ }
+}
+
+$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec)
+
+$httpbin_host = $module.Params.httpbin_host
+
+Function Assert-Equals {
+ param(
+ [Parameter(Mandatory=$true, ValueFromPipeline=$true)][AllowNull()]$Actual,
+ [Parameter(Mandatory=$true, Position=0)][AllowNull()]$Expected
+ )
+
+ $matched = $false
+ if ($Actual -is [System.Collections.ArrayList] -or $Actual -is [Array] -or $Actual -is [System.Collections.IList]) {
+ $Actual.Count | Assert-Equals -Expected $Expected.Count
+ for ($i = 0; $i -lt $Actual.Count; $i++) {
+ $actualValue = $Actual[$i]
+ $expectedValue = $Expected[$i]
+ Assert-Equals -Actual $actualValue -Expected $expectedValue
+ }
+ $matched = $true
+ } else {
+ $matched = $Actual -ceq $Expected
+ }
+
+ if (-not $matched) {
+ if ($Actual -is [PSObject]) {
+ $Actual = $Actual.ToString()
+ }
+
+ $call_stack = (Get-PSCallStack)[1]
+ $module.Result.test = $test
+ $module.Result.actual = $Actual
+ $module.Result.expected = $Expected
+ $module.Result.line = $call_stack.ScriptLineNumber
+ $module.Result.method = $call_stack.Position.Text
+
+ $module.FailJson("AssertionError: actual != expected")
+ }
+}
+
+Function Convert-StreamToString {
+ [CmdletBinding()]
+ param (
+ [Parameter(Mandatory=$true)]
+ [System.IO.Stream]
+ $Stream
+ )
+
+ $ms = New-Object -TypeName System.IO.MemoryStream
+ try {
+ $Stream.CopyTo($ms)
+ [System.Text.Encoding]::UTF8.GetString($ms.ToArray())
+ } finally {
+ $ms.Dispose()
+ }
+}
+
+$tests = [Ordered]@{
+ 'GET request over http' = {
+ $r = Get-AnsibleWebRequest -Uri "http://$httpbin_host/get"
+
+ $r.Method | Assert-Equals -Expected 'GET'
+ $r.Timeout | Assert-Equals -Expected 30000
+ $r.UseDefaultCredentials | Assert-Equals -Expected $false
+ $r.Credentials | Assert-Equals -Expected $null
+ $r.ClientCertificates.Count | Assert-Equals -Expected 0
+ $r.Proxy.Credentials | Assert-Equals -Expected $null
+ $r.UserAgent | Assert-Equals -Expected 'ansible-httpget'
+
+ $actual = Invoke-WithWebRequest -Module $module -Request $r -Script {
+ Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream)
+
+ $Response.StatusCode | Assert-Equals -Expected 200
+ Convert-StreamToString -Stream $Stream
+ } | ConvertFrom-Json
+
+ $actual.headers.'User-Agent' | Assert-Equals -Expected 'ansible-httpget'
+ $actual.headers.'Host' | Assert-Equals -Expected $httpbin_host
+
+ $module.Result.msg | Assert-Equals -Expected 'OK'
+ $module.Result.status_code | Assert-Equals -Expected 200
+ $module.Result.ContainsKey('elapsed') | Assert-Equals -Expected $true
+ }
+
+ 'GET request over https' = {
+ # url is an alias for the -Uri parameter.
+ $r = Get-AnsibleWebRequest -url "https://$httpbin_host/get"
+
+ $r.Method | Assert-Equals -Expected 'GET'
+ $r.Timeout | Assert-Equals -Expected 30000
+ $r.UseDefaultCredentials | Assert-Equals -Expected $false
+ $r.Credentials | Assert-Equals -Expected $null
+ $r.ClientCertificates.Count | Assert-Equals -Expected 0
+ $r.Proxy.Credentials | Assert-Equals -Expected $null
+ $r.UserAgent | Assert-Equals -Expected 'ansible-httpget'
+
+ $actual = Invoke-WithWebRequest -Module $module -Request $r -Script {
+ Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream)
+
+ $Response.StatusCode | Assert-Equals -Expected 200
+ Convert-StreamToString -Stream $Stream
+ } | ConvertFrom-Json
+
+ $actual.headers.'User-Agent' | Assert-Equals -Expected 'ansible-httpget'
+ $actual.headers.'Host' | Assert-Equals -Expected $httpbin_host
+ }
+
+ 'POST request' = {
+ $getParams = @{
+ Headers = @{
+ 'Content-Type' = 'application/json'
+ }
+ Method = 'POST'
+ Uri = "https://$httpbin_host/post"
+ }
+ $r = Get-AnsibleWebRequest @getParams
+
+ $r.Method | Assert-Equals -Expected 'POST'
+ $r.Timeout | Assert-Equals -Expected 30000
+ $r.UseDefaultCredentials | Assert-Equals -Expected $false
+ $r.Credentials | Assert-Equals -Expected $null
+ $r.ClientCertificates.Count | Assert-Equals -Expected 0
+ $r.Proxy.Credentials | Assert-Equals -Expected $null
+ $r.ContentType | Assert-Equals -Expected 'application/json'
+ $r.UserAgent | Assert-Equals -Expected 'ansible-httpget'
+
+ $body = New-Object -TypeName System.IO.MemoryStream -ArgumentList @(,
+ ([System.Text.Encoding]::UTF8.GetBytes('{"foo":"bar"}'))
+ )
+ $actual = Invoke-WithWebRequest -Module $module -Request $r -Body $body -Script {
+ Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream)
+
+ $Response.StatusCode | Assert-Equals -Expected 200
+ Convert-StreamToString -Stream $Stream
+ } | ConvertFrom-Json
+
+ $actual.headers.'User-Agent' | Assert-Equals -Expected 'ansible-httpget'
+ $actual.headers.'Host' | Assert-Equals -Expected $httpbin_host
+ $actual.data | Assert-Equals -Expected '{"foo":"bar"}'
+ }
+
+ 'Safe redirection of GET' = {
+ $r = Get-AnsibleWebRequest -Uri "http://$httpbin_host/redirect/2"
+
+ Invoke-WithWebRequest -Module $module -Request $r -Script {
+ Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream)
+
+ $Response.ResponseUri | Assert-Equals -Expected "http://$httpbin_host/get"
+ $Response.StatusCode | Assert-Equals -Expected 200
+ }
+ }
+
+ 'Safe redirection of HEAD' = {
+ $r = Get-AnsibleWebRequest -Uri "http://$httpbin_host/redirect/2" -Method HEAD
+
+ Invoke-WithWebRequest -Module $module -Request $r -Script {
+ Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream)
+
+ $Response.ResponseUri | Assert-Equals -Expected "http://$httpbin_host/get"
+ $Response.StatusCode | Assert-Equals -Expected 200
+ }
+ }
+
+ 'Safe redirection of PUT' = {
+ $params = @{
+ Method = 'PUT'
+ Uri = "http://$httpbin_host/redirect-to?url=https://$httpbin_host/put"
+ }
+ $r = Get-AnsibleWebRequest @params
+
+ Invoke-WithWebRequest -Module $module -Request $r -Script {
+ Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream)
+
+ $Response.ResponseUri | Assert-Equals -Expected $r.RequestUri
+ $Response.StatusCode | Assert-Equals -Expected 302
+ }
+ }
+
+ 'None redirection of GET' = {
+ $params = @{
+ FollowRedirects = 'None'
+ Uri = "http://$httpbin_host/redirect/2"
+ }
+ $r = Get-AnsibleWebRequest @params
+
+ Invoke-WithWebRequest -Module $module -Request $r -Script {
+ Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream)
+
+ $Response.ResponseUri | Assert-Equals -Expected $r.RequestUri
+ $Response.StatusCode | Assert-Equals -Expected 302
+ }
+ }
+
+ 'None redirection of HEAD' = {
+ $params = @{
+ follow_redirects = 'None'
+ method = 'HEAD'
+ Uri = "http://$httpbin_host/redirect/2"
+ }
+ $r = Get-AnsibleWebRequest @params
+
+ Invoke-WithWebRequest -Module $module -Request $r -Script {
+ Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream)
+
+ $Response.ResponseUri | Assert-Equals -Expected $r.RequestUri
+ $Response.StatusCode | Assert-Equals -Expected 302
+ }
+ }
+
+ 'None redirection of PUT' = {
+ $params = @{
+ FollowRedirects = 'None'
+ Method = 'PUT'
+ Uri = "http://$httpbin_host/redirect-to?url=https://$httpbin_host/put"
+ }
+ $r = Get-AnsibleWebRequest @params
+
+ Invoke-WithWebRequest -Module $module -Request $r -Script {
+ Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream)
+
+ $Response.ResponseUri | Assert-Equals -Expected $r.RequestUri
+ $Response.StatusCode | Assert-Equals -Expected 302
+ }
+ }
+
+ 'All redirection of GET' = {
+ $params = @{
+ FollowRedirects = 'All'
+ Uri = "http://$httpbin_host/redirect/2"
+ }
+ $r = Get-AnsibleWebRequest @params
+
+ Invoke-WithWebRequest -Module $module -Request $r -Script {
+ Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream)
+
+ $Response.ResponseUri | Assert-Equals -Expected "http://$httpbin_host/get"
+ $Response.StatusCode | Assert-Equals -Expected 200
+ }
+ }
+
+ 'All redirection of HEAD' = {
+ $params = @{
+ follow_redirects = 'All'
+ method = 'HEAD'
+ Uri = "http://$httpbin_host/redirect/2"
+ }
+ $r = Get-AnsibleWebRequest @params
+
+ Invoke-WithWebRequest -Module $module -Request $r -Script {
+ Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream)
+
+ $Response.ResponseUri | Assert-Equals -Expected "http://$httpbin_host/get"
+ $Response.StatusCode | Assert-Equals -Expected 200
+ }
+ }
+
+ 'All redirection of PUT' = {
+ $params = @{
+ FollowRedirects = 'All'
+ Method = 'PUT'
+ Uri = "http://$httpbin_host/redirect-to?url=https://$httpbin_host/put"
+ }
+ $r = Get-AnsibleWebRequest @params
+
+ Invoke-WithWebRequest -Module $module -Request $r -Script {
+ Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream)
+
+ $Response.ResponseUri | Assert-Equals -Expected "https://$httpbin_host/put"
+ $Response.StatusCode | Assert-Equals -Expected 200
+ }
+ }
+
+ 'Exceeds maximum redirection - ignored' = {
+ $params = @{
+ MaximumRedirection = 4
+ Uri = "https://$httpbin_host/redirect/5"
+ }
+ $r = Get-AnsibleWebRequest @params
+
+ Invoke-WithWebRequest -Module $module -Request $r -IgnoreBadResponse -Script {
+ Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream)
+
+ $Response.ResponseUri | Assert-Equals -Expected "https://$httpbin_host/relative-redirect/1"
+ $Response.StatusCode | Assert-Equals -Expected 302
+ }
+ }
+
+ 'Exceeds maximum redirection - exception' = {
+ $params = @{
+ MaximumRedirection = 1
+ Uri = "https://$httpbin_host/redirect/2"
+ }
+ $r = Get-AnsibleWebRequest @params
+
+ $failed = $false
+ try {
+ $null = Invoke-WithWebRequest -Module $module -Request $r -Script {}
+ } catch {
+ $_.Exception.GetType().Name | Assert-Equals -Expected 'WebException'
+ $_.Exception.Message | Assert-Equals -Expected 'Too many automatic redirections were attempted.'
+ $failed = $true
+ }
+ $failed | Assert-Equals -Expected $true
+ }
+
+ 'Basic auth as Credential' = {
+ $params = @{
+ Url = "http://$httpbin_host/basic-auth/username/password"
+ UrlUsername = 'username'
+ UrlPassword = 'password'
+ }
+ $r = Get-AnsibleWebRequest @params
+
+ Invoke-WithWebRequest -Module $module -Request $r -IgnoreBadResponse -Script {
+ Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream)
+
+ $Response.StatusCode | Assert-Equals -Expected 200
+ }
+ }
+
+ 'Basic auth as Header' = {
+ $params = @{
+ Url = "http://$httpbin_host/basic-auth/username/password"
+ url_username = 'username'
+ url_password = 'password'
+ ForceBasicAuth = $true
+ }
+ $r = Get-AnsibleWebRequest @params
+
+ Invoke-WithWebRequest -Module $module -Request $r -IgnoreBadResponse -Script {
+ Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream)
+
+ $Response.StatusCode | Assert-Equals -Expected 200
+ }
+ }
+
+ 'Send request with headers' = {
+ $params = @{
+ Headers = @{
+ 'Content-Length' = 0
+ testingheader = 'testing_header'
+ TestHeader = 'test-header'
+ 'User-Agent' = 'test-agent'
+ }
+ Url = "https://$httpbin_host/get"
+ }
+ $r = Get-AnsibleWebRequest @params
+
+ $actual = Invoke-WithWebRequest -Module $module -Request $r -Script {
+ Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream)
+
+ $Response.StatusCode | Assert-Equals -Expected 200
+ Convert-StreamToString -Stream $Stream
+ } | ConvertFrom-Json
+
+ $actual.headers.'Testheader' | Assert-Equals -Expected 'test-header'
+ $actual.headers.'testingheader' | Assert-Equals -Expected 'testing_header'
+ $actual.Headers.'User-Agent' | Assert-Equals -Expected 'test-agent'
+ }
+
+ 'Request with timeout' = {
+ $params = @{
+ Uri = "https://$httpbin_host/delay/5"
+ Timeout = 1
+ }
+ $r = Get-AnsibleWebRequest @params
+
+ $failed = $false
+ try {
+ $null = Invoke-WithWebRequest -Module $module -Request $r -Script {}
+ } catch {
+ $failed = $true
+ $_.Exception.GetType().Name | Assert-Equals -Expected WebException
+ $_.Exception.Message | Assert-Equals -Expected 'The operation has timed out'
+ }
+ $failed | Assert-Equals -Expected $true
+ }
+
+ 'Request with file URI' = {
+ $filePath = Join-Path $module.Tmpdir -ChildPath 'test.txt'
+ Set-Content -LiteralPath $filePath -Value 'test'
+
+ $r = Get-AnsibleWebRequest -Uri $filePath
+
+ $actual = Invoke-WithWebRequest -Module $module -Request $r -Script {
+ Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream)
+
+ $Response.ContentLength | Assert-Equals -Expected 6
+ Convert-StreamToString -Stream $Stream
+ }
+ $actual | Assert-Equals -Expected "test`r`n"
+ $module.Result.msg | Assert-Equals -Expected "OK"
+ $module.Result.status_code | Assert-Equals -Expected 200
+ }
+
+ 'Web request based on module options' = {
+ Set-Variable complex_args -Scope Global -Value @{
+ url = "https://$httpbin_host/redirect/2"
+ method = 'GET'
+ follow_redirects = 'safe'
+ headers = @{
+ 'User-Agent' = 'other-agent'
+ }
+ http_agent = 'actual-agent'
+ maximum_redirection = 2
+ timeout = 10
+ validate_certs = $false
+ }
+ $spec = @{
+ options = @{
+ url = @{ type = 'str'; required = $true }
+ test = @{ type = 'str'; choices = 'abc', 'def'}
+ }
+ mutually_exclusive = @(,@('url', 'test'))
+ }
+
+ $testModule = [Ansible.Basic.AnsibleModule]::Create(@(), $spec, @(Get-AnsibleWebRequestSpec))
+ $r = Get-AnsibleWebRequest -Url $testModule.Params.url -Module $testModule
+
+ $actual = Invoke-WithWebRequest -Module $testModule -Request $r -Script {
+ Param ([System.Net.WebResponse]$Response, [System.IO.Stream]$Stream)
+
+ $Response.ResponseUri | Assert-Equals -Expected "https://$httpbin_host/get"
+ Convert-StreamToString -Stream $Stream
+ } | ConvertFrom-Json
+ $actual.headers.'User-Agent' | Assert-Equals -Expected 'actual-agent'
+ }
+
+ 'Web request with default proxy' = {
+ $params = @{
+ Uri = "https://$httpbin_host/get"
+ }
+ $r = Get-AnsibleWebRequest @params
+
+ $null -ne $r.Proxy | Assert-Equals -Expected $true
+ }
+
+ 'Web request with no proxy' = {
+ $params = @{
+ Uri = "https://$httpbin_host/get"
+ UseProxy = $false
+ }
+ $r = Get-AnsibleWebRequest @params
+
+ $null -eq $r.Proxy | Assert-Equals -Expected $true
+ }
+}
+
+# setup and teardown should favour native tools to create and delete the service and not the util we are testing.
+foreach ($testImpl in $tests.GetEnumerator()) {
+ Set-Variable -Name complex_args -Scope Global -Value @{}
+ $test = $testImpl.Key
+ &$testImpl.Value
+}
+
+$module.Result.data = "success"
+$module.ExitJson()
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/meta/main.yml b/test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/meta/main.yml
new file mode 100644
index 00000000..829d0a78
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+- prepare_http_tests
diff --git a/test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/tasks/main.yml b/test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/tasks/main.yml
new file mode 100644
index 00000000..57d8138a
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.ModuleUtils.WebRequest/tasks/main.yml
@@ -0,0 +1,10 @@
+---
+- name: test Ansible.ModuleUtils.WebRequest
+ web_request_test:
+ httpbin_host: '{{ httpbin_host }}'
+ register: web_request
+
+- name: assert test Ansible.ModuleUtils.WebRequest succeeded
+ assert:
+ that:
+ - web_request.data == 'success'
diff --git a/test/integration/targets/module_utils_Ansible.Privilege/aliases b/test/integration/targets/module_utils_Ansible.Privilege/aliases
new file mode 100644
index 00000000..cf714783
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.Privilege/aliases
@@ -0,0 +1,3 @@
+windows
+shippable/windows/group1
+shippable/windows/smoketest
diff --git a/test/integration/targets/module_utils_Ansible.Privilege/library/ansible_privilege_tests.ps1 b/test/integration/targets/module_utils_Ansible.Privilege/library/ansible_privilege_tests.ps1
new file mode 100644
index 00000000..7c76036a
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.Privilege/library/ansible_privilege_tests.ps1
@@ -0,0 +1,324 @@
+#!powershell
+
+#AnsibleRequires -CSharpUtil Ansible.Basic
+#Ansiblerequires -CSharpUtil Ansible.Privilege
+
+$module = [Ansible.Basic.AnsibleModule]::Create($args, @{})
+
+Function Assert-Equals {
+ param(
+ [Parameter(Mandatory=$true, ValueFromPipeline=$true)][AllowNull()]$Actual,
+ [Parameter(Mandatory=$true, Position=0)][AllowNull()]$Expected
+ )
+
+ $matched = $false
+ if ($Actual -is [System.Collections.ArrayList] -or $Actual -is [Array]) {
+ $Actual.Count | Assert-Equals -Expected $Expected.Count
+ for ($i = 0; $i -lt $Actual.Count; $i++) {
+ $actual_value = $Actual[$i]
+ $expected_value = $Expected[$i]
+ Assert-Equals -Actual $actual_value -Expected $expected_value
+ }
+ $matched = $true
+ } else {
+ $matched = $Actual -ceq $Expected
+ }
+
+ if (-not $matched) {
+ if ($Actual -is [PSObject]) {
+ $Actual = $Actual.ToString()
+ }
+
+ $call_stack = (Get-PSCallStack)[1]
+ $module.Result.test = $test
+ $module.Result.actual = $Actual
+ $module.Result.expected = $Expected
+ $module.Result.line = $call_stack.ScriptLineNumber
+ $module.Result.method = $call_stack.Position.Text
+ $module.FailJson("AssertionError: actual != expected")
+ }
+}
+
+Function Assert-DictionaryEquals {
+ param(
+ [Parameter(Mandatory=$true, ValueFromPipeline=$true)][AllowNull()]$Actual,
+ [Parameter(Mandatory=$true, Position=0)][AllowNull()]$Expected
+ )
+ $actual_keys = $Actual.Keys
+ $expected_keys = $Expected.Keys
+
+ $actual_keys.Count | Assert-Equals -Expected $expected_keys.Count
+ foreach ($actual_entry in $Actual.GetEnumerator()) {
+ $actual_key = $actual_entry.Key
+ ($actual_key -cin $expected_keys) | Assert-Equals -Expected $true
+ $actual_value = $actual_entry.Value
+ $expected_value = $Expected.$actual_key
+
+ if ($actual_value -is [System.Collections.IDictionary]) {
+ $actual_value | Assert-DictionaryEquals -Expected $expected_value
+ } elseif ($actual_value -is [System.Collections.ArrayList]) {
+ for ($i = 0; $i -lt $actual_value.Count; $i++) {
+ $actual_entry = $actual_value[$i]
+ $expected_entry = $expected_value[$i]
+ if ($actual_entry -is [System.Collections.IDictionary]) {
+ $actual_entry | Assert-DictionaryEquals -Expected $expected_entry
+ } else {
+ Assert-Equals -Actual $actual_entry -Expected $expected_entry
+ }
+ }
+ } else {
+ Assert-Equals -Actual $actual_value -Expected $expected_value
+ }
+ }
+ foreach ($expected_key in $expected_keys) {
+ ($expected_key -cin $actual_keys) | Assert-Equals -Expected $true
+ }
+}
+
+Function Assert-Equals {
+ param(
+ [Parameter(Mandatory=$true, ValueFromPipeline=$true)][AllowNull()]$Actual,
+ [Parameter(Mandatory=$true, Position=0)][AllowNull()]$Expected
+ )
+
+ $matched = $false
+ if ($Actual -is [System.Collections.ArrayList] -or $Actual -is [Array]) {
+ $Actual.Count | Assert-Equals -Expected $Expected.Count
+ for ($i = 0; $i -lt $Actual.Count; $i++) {
+ $actual_value = $Actual[$i]
+ $expected_value = $Expected[$i]
+ Assert-Equals -Actual $actual_value -Expected $expected_value
+ }
+ $matched = $true
+ } else {
+ $matched = $Actual -ceq $Expected
+ }
+
+ if (-not $matched) {
+ if ($Actual -is [PSObject]) {
+ $Actual = $Actual.ToString()
+ }
+
+ $call_stack = (Get-PSCallStack)[1]
+ $module.Result.test = $test
+ $module.Result.actual = $Actual
+ $module.Result.expected = $Expected
+ $module.Result.line = $call_stack.ScriptLineNumber
+ $module.Result.method = $call_stack.Position.Text
+ $module.FailJson("AssertionError: actual != expected")
+ }
+}
+
+Function Assert-DictionaryEquals {
+ param(
+ [Parameter(Mandatory=$true, ValueFromPipeline=$true)][AllowNull()]$Actual,
+ [Parameter(Mandatory=$true, Position=0)][AllowNull()]$Expected
+ )
+ $actual_keys = $Actual.Keys
+ $expected_keys = $Expected.Keys
+
+ $actual_keys.Count | Assert-Equals -Expected $expected_keys.Count
+ foreach ($actual_entry in $Actual.GetEnumerator()) {
+ $actual_key = $actual_entry.Key
+ ($actual_key -cin $expected_keys) | Assert-Equals -Expected $true
+ $actual_value = $actual_entry.Value
+ $expected_value = $Expected.$actual_key
+
+ if ($actual_value -is [System.Collections.IDictionary]) {
+ $actual_value | Assert-DictionaryEquals -Expected $expected_value
+ } elseif ($actual_value -is [System.Collections.ArrayList]) {
+ for ($i = 0; $i -lt $actual_value.Count; $i++) {
+ $actual_entry = $actual_value[$i]
+ $expected_entry = $expected_value[$i]
+ if ($actual_entry -is [System.Collections.IDictionary]) {
+ $actual_entry | Assert-DictionaryEquals -Expected $expected_entry
+ } else {
+ Assert-Equals -Actual $actual_entry -Expected $expected_entry
+ }
+ }
+ } else {
+ Assert-Equals -Actual $actual_value -Expected $expected_value
+ }
+ }
+ foreach ($expected_key in $expected_keys) {
+ ($expected_key -cin $actual_keys) | Assert-Equals -Expected $true
+ }
+}
+
+$process = [Ansible.Privilege.PrivilegeUtil]::GetCurrentProcess()
+
+$tests = @{
+ "Check valid privilege name" = {
+ $actual = [Ansible.Privilege.PrivilegeUtil]::CheckPrivilegeName("SeTcbPrivilege")
+ $actual | Assert-Equals -Expected $true
+ }
+
+ "Check invalid privilege name" = {
+ $actual = [Ansible.Privilege.PrivilegeUtil]::CheckPrivilegeName("SeFake")
+ $actual | Assert-Equals -Expected $false
+ }
+
+ "Disable a privilege" = {
+ # Ensure the privilege is enabled at the start
+ [Ansible.Privilege.PrivilegeUtil]::EnablePrivilege($process, "SeTimeZonePrivilege") > $null
+
+ $actual = [Ansible.Privilege.PrivilegeUtil]::DisablePrivilege($process, "SeTimeZonePrivilege")
+ $actual.GetType().Name | Assert-Equals -Expected 'Dictionary`2'
+ $actual.Count | Assert-Equals -Expected 1
+ $actual.SeTimeZonePrivilege | Assert-Equals -Expected $true
+
+ # Disable again
+ $actual = [Ansible.Privilege.PrivilegeUtil]::DisablePrivilege($process, "SeTimeZonePrivilege")
+ $actual.GetType().Name | Assert-Equals -Expected 'Dictionary`2'
+ $actual.Count | Assert-Equals -Expected 0
+ }
+
+ "Enable a privilege" = {
+ # Ensure the privilege is disabled at the start
+ [Ansible.Privilege.PrivilegeUtil]::DisablePrivilege($process, "SeTimeZonePrivilege") > $null
+
+ $actual = [Ansible.Privilege.PrivilegeUtil]::EnablePrivilege($process, "SeTimeZonePrivilege")
+ $actual.GetType().Name | Assert-Equals -Expected 'Dictionary`2'
+ $actual.Count | Assert-Equals -Expected 1
+ $actual.SeTimeZonePrivilege | Assert-Equals -Expected $false
+
+ # Disable again
+ $actual = [Ansible.Privilege.PrivilegeUtil]::EnablePrivilege($process, "SeTimeZonePrivilege")
+ $actual.GetType().Name | Assert-Equals -Expected 'Dictionary`2'
+ $actual.Count | Assert-Equals -Expected 0
+ }
+
+ "Disable and revert privileges" = {
+ $current_state = [Ansible.Privilege.PrivilegeUtil]::GetAllPrivilegeInfo($process)
+
+ $previous_state = [Ansible.Privilege.PrivilegeUtil]::DisableAllPrivileges($process)
+ $previous_state.GetType().Name | Assert-Equals -Expected 'Dictionary`2'
+ foreach ($previous_state_entry in $previous_state.GetEnumerator()) {
+ $previous_state_entry.Value | Assert-Equals -Expected $true
+ }
+
+ # Disable again
+ $previous_state2 = [Ansible.Privilege.PrivilegeUtil]::DisableAllPrivileges($process)
+ $previous_state2.Count | Assert-Equals -Expected 0
+
+ $actual = [Ansible.Privilege.PrivilegeUtil]::GetAllPrivilegeInfo($process)
+ foreach ($actual_entry in $actual.GetEnumerator()) {
+ $actual_entry.Value -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected 0
+ }
+
+ [Ansible.Privilege.PrivilegeUtil]::SetTokenPrivileges($process, $previous_state) > $null
+ $actual = [Ansible.Privilege.PrivilegeUtil]::GetAllPrivilegeInfo($process)
+ $actual | Assert-DictionaryEquals -Expected $current_state
+ }
+
+ "Remove a privilege" = {
+ [Ansible.Privilege.PrivilegeUtil]::RemovePrivilege($process, "SeUndockPrivilege") > $null
+ $actual = [Ansible.Privilege.PrivilegeUtil]::GetAllPrivilegeInfo($process)
+ $actual.ContainsKey("SeUndockPrivilege") | Assert-Equals -Expected $false
+ }
+
+ "Test Enabler" = {
+ # Disable privilege at the start
+ $new_state = @{
+ SeTimeZonePrivilege = $false
+ SeShutdownPrivilege = $false
+ SeIncreaseWorkingSetPrivilege = $false
+ }
+ [Ansible.Privilege.PrivilegeUtil]::SetTokenPrivileges($process, $new_state) > $null
+ $check_state = [Ansible.Privilege.PrivilegeUtil]::GetAllPrivilegeInfo($process)
+ $check_state.SeTimeZonePrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected 0
+ $check_state.SeShutdownPrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected 0
+ $check_state.SeIncreaseWorkingSetPrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected 0
+
+ # Check that strict = false won't validate privileges not held but activates the ones we want
+ $enabler = New-Object -TypeName Ansible.Privilege.PrivilegeEnabler -ArgumentList $false, "SeTimeZonePrivilege", "SeShutdownPrivilege", "SeTcbPrivilege"
+ $actual = [Ansible.Privilege.PrivilegeUtil]::GetAllPrivilegeInfo($process)
+ $actual.SeTimeZonePrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected ([Ansible.Privilege.PrivilegeAttributes]::Enabled)
+ $actual.SeShutdownPrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected ([Ansible.Privilege.PrivilegeAttributes]::Enabled)
+ $actual.SeIncreaseWorkingSetPrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected 0
+ $actual.ContainsKey("SeTcbPrivilege") | Assert-Equals -Expected $false
+
+ # Now verify a no-op enabler will not rever back to disabled
+ $enabler2 = New-Object -TypeName Ansible.Privilege.PrivilegeEnabler -ArgumentList $false, "SeTimeZonePrivilege", "SeShutdownPrivilege", "SeTcbPrivilege"
+ $enabler2.Dispose()
+ $actual = [Ansible.Privilege.PrivilegeUtil]::GetAllPrivilegeInfo($process)
+ $actual.SeTimeZonePrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected ([Ansible.Privilege.PrivilegeAttributes]::Enabled)
+ $actual.SeShutdownPrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected ([Ansible.Privilege.PrivilegeAttributes]::Enabled)
+
+ # Verify that when disposing the object the privileges are reverted
+ $enabler.Dispose()
+ $actual = [Ansible.Privilege.PrivilegeUtil]::GetAllPrivilegeInfo($process)
+ $actual.SeTimeZonePrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected 0
+ $actual.SeShutdownPrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected 0
+ }
+
+ "Test Enabler strict" = {
+ # Disable privilege at the start
+ $new_state = @{
+ SeTimeZonePrivilege = $false
+ SeShutdownPrivilege = $false
+ SeIncreaseWorkingSetPrivilege = $false
+ }
+ [Ansible.Privilege.PrivilegeUtil]::SetTokenPrivileges($process, $new_state) > $null
+ $check_state = [Ansible.Privilege.PrivilegeUtil]::GetAllPrivilegeInfo($process)
+ $check_state.SeTimeZonePrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected 0
+ $check_state.SeShutdownPrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected 0
+ $check_state.SeIncreaseWorkingSetPrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected 0
+
+ # Check that strict = false won't validate privileges not held but activates the ones we want
+ $enabler = New-Object -TypeName Ansible.Privilege.PrivilegeEnabler -ArgumentList $true, "SeTimeZonePrivilege", "SeShutdownPrivilege"
+ $actual = [Ansible.Privilege.PrivilegeUtil]::GetAllPrivilegeInfo($process)
+ $actual.SeTimeZonePrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected ([Ansible.Privilege.PrivilegeAttributes]::Enabled)
+ $actual.SeShutdownPrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected ([Ansible.Privilege.PrivilegeAttributes]::Enabled)
+ $actual.SeIncreaseWorkingSetPrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected 0
+
+ # Now verify a no-op enabler will not rever back to disabled
+ $enabler2 = New-Object -TypeName Ansible.Privilege.PrivilegeEnabler -ArgumentList $true, "SeTimeZonePrivilege", "SeShutdownPrivilege"
+ $enabler2.Dispose()
+ $actual = [Ansible.Privilege.PrivilegeUtil]::GetAllPrivilegeInfo($process)
+ $actual.SeTimeZonePrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected ([Ansible.Privilege.PrivilegeAttributes]::Enabled)
+ $actual.SeShutdownPrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected ([Ansible.Privilege.PrivilegeAttributes]::Enabled)
+
+ # Verify that when disposing the object the privileges are reverted
+ $enabler.Dispose()
+ $actual = [Ansible.Privilege.PrivilegeUtil]::GetAllPrivilegeInfo($process)
+ $actual.SeTimeZonePrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected 0
+ $actual.SeShutdownPrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected 0
+ }
+
+ "Test Enabler invalid privilege" = {
+ $failed = $false
+ try {
+ New-Object -TypeName Ansible.Privilege.PrivilegeEnabler -ArgumentList $false, "SeTimeZonePrivilege", "SeFake"
+ } catch {
+ $failed = $true
+ $_.Exception.InnerException.Message | Assert-Equals -Expected "Failed to enable privilege(s) SeTimeZonePrivilege, SeFake (A specified privilege does not exist, Win32ErrorCode 1313)"
+ }
+ $failed | Assert-Equals -Expected $true
+ }
+
+ "Test Enabler strict failure" = {
+ # Start disabled
+ [Ansible.Privilege.PrivilegeUtil]::DisablePrivilege($process, "SeTimeZonePrivilege") > $null
+ $check_state = [Ansible.Privilege.PrivilegeUtil]::GetAllPrivilegeInfo($process)
+ $check_state.SeTimeZonePrivilege -band [Ansible.Privilege.PrivilegeAttributes]::Enabled | Assert-Equals -Expected 0
+
+ $failed = $false
+ try {
+ New-Object -TypeName Ansible.Privilege.PrivilegeEnabler -ArgumentList $true, "SeTimeZonePrivilege", "SeTcbPrivilege"
+ } catch {
+ $failed = $true
+ $_.Exception.InnerException.Message | Assert-Equals -Expected "Failed to enable privilege(s) SeTimeZonePrivilege, SeTcbPrivilege (Not all privileges or groups referenced are assigned to the caller, Win32ErrorCode 1300)"
+ }
+ $failed | Assert-Equals -Expected $true
+ }
+}
+
+foreach ($test_impl in $tests.GetEnumerator()) {
+ $test = $test_impl.Key
+ &$test_impl.Value
+}
+
+$module.Result.data = "success"
+$module.ExitJson()
+
diff --git a/test/integration/targets/module_utils_Ansible.Privilege/tasks/main.yml b/test/integration/targets/module_utils_Ansible.Privilege/tasks/main.yml
new file mode 100644
index 00000000..888394d4
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.Privilege/tasks/main.yml
@@ -0,0 +1,9 @@
+---
+- name: test Ansible.Privilege.cs
+ ansible_privilege_tests:
+ register: ansible_privilege_test
+
+- name: assert test Ansible.Privilege.cs
+ assert:
+ that:
+ - ansible_privilege_test.data == "success"
diff --git a/test/integration/targets/module_utils_Ansible.Process/aliases b/test/integration/targets/module_utils_Ansible.Process/aliases
new file mode 100644
index 00000000..cf714783
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.Process/aliases
@@ -0,0 +1,3 @@
+windows
+shippable/windows/group1
+shippable/windows/smoketest
diff --git a/test/integration/targets/module_utils_Ansible.Process/library/ansible_process_tests.ps1 b/test/integration/targets/module_utils_Ansible.Process/library/ansible_process_tests.ps1
new file mode 100644
index 00000000..d906dfc5
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.Process/library/ansible_process_tests.ps1
@@ -0,0 +1,236 @@
+#!powershell
+
+#AnsibleRequires -CSharpUtil Ansible.Basic
+#AnsibleRequires -CSharpUtil Ansible.Process
+
+$module = [Ansible.Basic.AnsibleModule]::Create($args, @{})
+
+Function Assert-Equals {
+ param(
+ [Parameter(Mandatory=$true, ValueFromPipeline=$true)][AllowNull()]$Actual,
+ [Parameter(Mandatory=$true, Position=0)][AllowNull()]$Expected
+ )
+
+ $matched = $false
+ if ($Actual -is [System.Collections.ArrayList] -or $Actual -is [Array]) {
+ $Actual.Count | Assert-Equals -Expected $Expected.Count
+ for ($i = 0; $i -lt $Actual.Count; $i++) {
+ $actual_value = $Actual[$i]
+ $expected_value = $Expected[$i]
+ Assert-Equals -Actual $actual_value -Expected $expected_value
+ }
+ $matched = $true
+ } else {
+ $matched = $Actual -ceq $Expected
+ }
+
+ if (-not $matched) {
+ if ($Actual -is [PSObject]) {
+ $Actual = $Actual.ToString()
+ }
+
+ $call_stack = (Get-PSCallStack)[1]
+ $module.Result.test = $test
+ $module.Result.actual = $Actual
+ $module.Result.expected = $Expected
+ $module.Result.line = $call_stack.ScriptLineNumber
+ $module.Result.method = $call_stack.Position.Text
+ $module.FailJson("AssertionError: actual != expected")
+ }
+}
+
+$tests = @{
+ "ParseCommandLine empty string" = {
+ $expected = @((Get-Process -Id $pid).Path)
+ $actual = [Ansible.Process.ProcessUtil]::ParseCommandLine("")
+ Assert-Equals -Actual $actual -Expected $expected
+ }
+
+ "ParseCommandLine single argument" = {
+ $expected = @("powershell.exe")
+ $actual = [Ansible.Process.ProcessUtil]::ParseCommandLine("powershell.exe")
+ Assert-Equals -Actual $actual -Expected $expected
+ }
+
+ "ParseCommandLine multiple arguments" = {
+ $expected = @("powershell.exe", "-File", "C:\temp\script.ps1")
+ $actual = [Ansible.Process.ProcessUtil]::ParseCommandLine("powershell.exe -File C:\temp\script.ps1")
+ Assert-Equals -Actual $actual -Expected $expected
+ }
+
+ "ParseCommandLine comples arguments" = {
+ $expected = @('abc', 'd', 'ef gh', 'i\j', 'k"l', 'm\n op', 'ADDLOCAL=qr, s', 'tuv\', 'w''x', 'yz')
+ $actual = [Ansible.Process.ProcessUtil]::ParseCommandLine('abc d "ef gh" i\j k\"l m\\"n op" ADDLOCAL="qr, s" tuv\ w''x yz')
+ Assert-Equals -Actual $actual -Expected $expected
+ }
+
+ "SearchPath normal" = {
+ $expected = "$($env:SystemRoot)\System32\WindowsPowerShell\v1.0\powershell.exe"
+ $actual = [Ansible.Process.ProcessUtil]::SearchPath("powershell.exe")
+ $actual | Assert-Equals -Expected $expected
+ }
+
+ "SearchPath missing" = {
+ $failed = $false
+ try {
+ [Ansible.Process.ProcessUtil]::SearchPath("fake.exe")
+ } catch {
+ $failed = $true
+ $_.Exception.InnerException.GetType().FullName | Assert-Equals -Expected "System.IO.FileNotFoundException"
+ $expected = 'Exception calling "SearchPath" with "1" argument(s): "Could not find file ''fake.exe''."'
+ $_.Exception.Message | Assert-Equals -Expected $expected
+ }
+ $failed | Assert-Equals -Expected $true
+ }
+
+ "CreateProcess basic" = {
+ $actual = [Ansible.Process.ProcessUtil]::CreateProcess("whoami.exe")
+ $actual.GetType().FullName | Assert-Equals -Expected "Ansible.Process.Result"
+ $actual.StandardOut | Assert-Equals -Expected "$(&whoami.exe)`r`n"
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+ }
+
+ "CreateProcess stderr" = {
+ $actual = [Ansible.Process.ProcessUtil]::CreateProcess("powershell.exe [System.Console]::Error.WriteLine('hi')")
+ $actual.StandardOut | Assert-Equals -Expected ""
+ $actual.StandardError | Assert-Equals -Expected "hi`r`n"
+ $actual.ExitCode | Assert-Equals -Expected 0
+ }
+
+ "CreateProcess exit code" = {
+ $actual = [Ansible.Process.ProcessUtil]::CreateProcess("powershell.exe exit 10")
+ $actual.StandardOut | Assert-Equals -Expected ""
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 10
+ }
+
+ "CreateProcess bad executable" = {
+ $failed = $false
+ try {
+ [Ansible.Process.ProcessUtil]::CreateProcess("fake.exe")
+ } catch {
+ $failed = $true
+ $_.Exception.InnerException.GetType().FullName | Assert-Equals -Expected "Ansible.Process.Win32Exception"
+ $expected = 'Exception calling "CreateProcess" with "1" argument(s): "CreateProcessW() failed '
+ $expected += '(The system cannot find the file specified, Win32ErrorCode 2)"'
+ $_.Exception.Message | Assert-Equals -Expected $expected
+ }
+ $failed | Assert-Equals -Expected $true
+ }
+
+ "CreateProcess with unicode" = {
+ $actual = [Ansible.Process.ProcessUtil]::CreateProcess("cmd.exe /c echo 💩 café")
+ $actual.StandardOut | Assert-Equals -Expected "💩 café`r`n"
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+
+ $actual = [Ansible.Process.ProcessUtil]::CreateProcess($null, "cmd.exe /c echo 💩 café", $null, $null)
+ $actual.StandardOut | Assert-Equals -Expected "💩 café`r`n"
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+ }
+
+ "CreateProcess without working dir" = {
+ $expected = $pwd.Path + "`r`n"
+ $actual = [Ansible.Process.ProcessUtil]::CreateProcess($null, 'powershell.exe $pwd.Path', $null, $null)
+ $actual.StandardOut | Assert-Equals -Expected $expected
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+ }
+
+ "CreateProcess with working dir" = {
+ $expected = "C:\Windows`r`n"
+ $actual = [Ansible.Process.ProcessUtil]::CreateProcess($null, 'powershell.exe $pwd.Path', "C:\Windows", $null)
+ $actual.StandardOut | Assert-Equals -Expected $expected
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+ }
+
+ "CreateProcess without environment" = {
+ $expected = "$($env:USERNAME)`r`n"
+ $actual = [Ansible.Process.ProcessUtil]::CreateProcess($null, 'powershell.exe $env:TEST; $env:USERNAME', $null, $null)
+ $actual.StandardOut | Assert-Equals -Expected $expected
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+ }
+
+ "CreateProcess with environment" = {
+ $env_vars = @{
+ TEST = "tesTing"
+ TEST2 = "Testing 2"
+ }
+ $actual = [Ansible.Process.ProcessUtil]::CreateProcess($null, 'cmd.exe /c set', $null, $env_vars)
+ ("TEST=tesTing" -cin $actual.StandardOut.Split("`r`n")) | Assert-Equals -Expected $true
+ ("TEST2=Testing 2" -cin $actual.StandardOut.Split("`r`n")) | Assert-Equals -Expected $true
+ ("USERNAME=$($env:USERNAME)" -cnotin $actual.StandardOut.Split("`r`n")) | Assert-Equals -Expected $true
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+ }
+
+ "CreateProcess with string stdin" = {
+ $expected = "input value`r`n`r`n"
+ $actual = [Ansible.Process.ProcessUtil]::CreateProcess($null, 'powershell.exe [System.Console]::In.ReadToEnd()',
+ $null, $null, "input value")
+ $actual.StandardOut | Assert-Equals -Expected $expected
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+ }
+
+ "CreateProcess with string stdin and newline" = {
+ $expected = "input value`r`n`r`n"
+ $actual = [Ansible.Process.ProcessUtil]::CreateProcess($null, 'powershell.exe [System.Console]::In.ReadToEnd()',
+ $null, $null, "input value`r`n")
+ $actual.StandardOut | Assert-Equals -Expected $expected
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+ }
+
+ "CreateProcess with byte stdin" = {
+ $expected = "input value`r`n"
+ $actual = [Ansible.Process.ProcessUtil]::CreateProcess($null, 'powershell.exe [System.Console]::In.ReadToEnd()',
+ $null, $null, [System.Text.Encoding]::UTF8.GetBytes("input value"))
+ $actual.StandardOut | Assert-Equals -Expected $expected
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+ }
+
+ "CreateProcess with byte stdin and newline" = {
+ $expected = "input value`r`n`r`n"
+ $actual = [Ansible.Process.ProcessUtil]::CreateProcess($null, 'powershell.exe [System.Console]::In.ReadToEnd()',
+ $null, $null, [System.Text.Encoding]::UTF8.GetBytes("input value`r`n"))
+ $actual.StandardOut | Assert-Equals -Expected $expected
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+ }
+
+ "CreateProcess with lpApplicationName" = {
+ $expected = "abc`r`n"
+ $full_path = "$($env:SystemRoot)\System32\WindowsPowerShell\v1.0\powershell.exe"
+ $actual = [Ansible.Process.ProcessUtil]::CreateProcess($full_path, "Write-Output 'abc'", $null, $null)
+ $actual.StandardOut | Assert-Equals -Expected $expected
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+
+ $actual = [Ansible.Process.ProcessUtil]::CreateProcess($full_path, "powershell.exe Write-Output 'abc'", $null, $null)
+ $actual.StandardOut | Assert-Equals -Expected $expected
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+ }
+
+ "CreateProcess with unicode and us-ascii encoding" = {
+ $poop = [System.Char]::ConvertFromUtf32(0xE05A) # Coverage breaks due to script parsing encoding issues with unicode chars, just use the code point instead
+ $actual = [Ansible.Process.ProcessUtil]::CreateProcess($null, "cmd.exe /c echo $poop café", $null, $null, '', 'us-ascii')
+ $actual.StandardOut | Assert-Equals -Expected "??? caf??`r`n"
+ $actual.StandardError | Assert-Equals -Expected ""
+ $actual.ExitCode | Assert-Equals -Expected 0
+ }
+}
+
+foreach ($test_impl in $tests.GetEnumerator()) {
+ $test = $test_impl.Key
+ &$test_impl.Value
+}
+
+$module.Result.data = "success"
+$module.ExitJson()
diff --git a/test/integration/targets/module_utils_Ansible.Process/tasks/main.yml b/test/integration/targets/module_utils_Ansible.Process/tasks/main.yml
new file mode 100644
index 00000000..13a5c16e
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.Process/tasks/main.yml
@@ -0,0 +1,9 @@
+---
+- name: test Ansible.Process.cs
+ ansible_process_tests:
+ register: ansible_process_tests
+
+- name: assert test Ansible.Process.cs
+ assert:
+ that:
+ - ansible_process_tests.data == "success"
diff --git a/test/integration/targets/module_utils_Ansible.Service/aliases b/test/integration/targets/module_utils_Ansible.Service/aliases
new file mode 100644
index 00000000..cf714783
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.Service/aliases
@@ -0,0 +1,3 @@
+windows
+shippable/windows/group1
+shippable/windows/smoketest
diff --git a/test/integration/targets/module_utils_Ansible.Service/library/ansible_service_tests.ps1 b/test/integration/targets/module_utils_Ansible.Service/library/ansible_service_tests.ps1
new file mode 100644
index 00000000..6c8f729b
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.Service/library/ansible_service_tests.ps1
@@ -0,0 +1,937 @@
+#!powershell
+
+#AnsibleRequires -CSharpUtil Ansible.Basic
+#AnsibleRequires -CSharpUtil Ansible.Service
+#Requires -Module Ansible.ModuleUtils.ArgvParser
+#Requires -Module Ansible.ModuleUtils.CommandUtil
+
+$module = [Ansible.Basic.AnsibleModule]::Create($args, @{})
+
+$path = "$env:SystemRoot\System32\svchost.exe"
+
+Function Assert-Equals {
+ param(
+ [Parameter(Mandatory=$true, ValueFromPipeline=$true)][AllowNull()]$Actual,
+ [Parameter(Mandatory=$true, Position=0)][AllowNull()]$Expected
+ )
+
+ $matched = $false
+ if ($Actual -is [System.Collections.ArrayList] -or $Actual -is [Array] -or $Actual -is [System.Collections.IList]) {
+ $Actual.Count | Assert-Equals -Expected $Expected.Count
+ for ($i = 0; $i -lt $Actual.Count; $i++) {
+ $actualValue = $Actual[$i]
+ $expectedValue = $Expected[$i]
+ Assert-Equals -Actual $actualValue -Expected $expectedValue
+ }
+ $matched = $true
+ } else {
+ $matched = $Actual -ceq $Expected
+ }
+
+ if (-not $matched) {
+ if ($Actual -is [PSObject]) {
+ $Actual = $Actual.ToString()
+ }
+
+ $call_stack = (Get-PSCallStack)[1]
+ $module.Result.test = $test
+ $module.Result.actual = $Actual
+ $module.Result.expected = $Expected
+ $module.Result.line = $call_stack.ScriptLineNumber
+ $module.Result.method = $call_stack.Position.Text
+
+ $module.FailJson("AssertionError: actual != expected")
+ }
+}
+
+Function Invoke-Sc {
+ [CmdletBinding()]
+ param (
+ [Parameter(Mandatory=$true)]
+ [String]
+ $Action,
+
+ [Parameter(Mandatory=$true)]
+ [String]
+ $Name,
+
+ [Object]
+ $Arguments
+ )
+
+ $commandArgs = [System.Collections.Generic.List[String]]@("sc.exe", $Action, $Name)
+ if ($null -ne $Arguments) {
+ if ($Arguments -is [System.Collections.IDictionary]) {
+ foreach ($arg in $Arguments.GetEnumerator()) {
+ $commandArgs.Add("$($arg.Key)=")
+ $commandArgs.Add($arg.Value)
+ }
+ } else {
+ foreach ($arg in $Arguments) {
+ $commandArgs.Add($arg)
+ }
+ }
+ }
+
+ $command = Argv-ToString -arguments $commandArgs
+
+ $res = Run-Command -command $command
+ if ($res.rc -ne 0) {
+ $module.Result.rc = $res.rc
+ $module.Result.stdout = $res.stdout
+ $module.Result.stderr = $res.stderr
+ $module.FailJson("Failed to invoke sc with: $command")
+ }
+
+ $info = @{ Name = $Name }
+
+ if ($Action -eq 'qtriggerinfo') {
+ # qtriggerinfo is in a different format which requires some manual parsing from the norm.
+ $info.Triggers = [System.Collections.Generic.List[PSObject]]@()
+ }
+
+ $currentKey = $null
+ $qtriggerSection = @{}
+ $res.stdout -split "`r`n" | Foreach-Object -Process {
+ $line = $_.Trim()
+
+ if ($Action -eq 'qtriggerinfo' -and $line -in @('START SERVICE', 'STOP SERVICE')) {
+ if ($qtriggerSection.Count -gt 0) {
+ $info.Triggers.Add([PSCustomObject]$qtriggerSection)
+ $qtriggerSection = @{}
+ }
+
+ $qtriggerSection = @{
+ Action = $line
+ }
+ }
+
+ if (-not $line -or (-not $line.Contains(':') -and $null -eq $currentKey)) {
+ return
+ }
+
+ $lineSplit = $line.Split(':', 2)
+ if ($lineSplit.Length -eq 2) {
+ $k = $lineSplit[0].Trim()
+ if (-not $k) {
+ $k = $currentKey
+ }
+
+ $v = $lineSplit[1].Trim()
+ } else {
+ $k = $currentKey
+ $v = $line
+ }
+
+ if ($qtriggerSection.Count -gt 0) {
+ if ($k -eq 'DATA') {
+ $qtriggerSection.Data.Add($v)
+ } else {
+ $qtriggerSection.Type = $k
+ $qtriggerSection.SubType = $v
+ $qtriggerSection.Data = [System.Collections.Generic.List[String]]@()
+ }
+ } else {
+ if ($info.ContainsKey($k)) {
+ if ($info[$k] -isnot [System.Collections.Generic.List[String]]) {
+ $info[$k] = [System.Collections.Generic.List[String]]@($info[$k])
+ }
+ $info[$k].Add($v)
+ } else {
+ $currentKey = $k
+ $info[$k] = $v
+ }
+ }
+ }
+
+ if ($qtriggerSection.Count -gt 0) {
+ $info.Triggers.Add([PSCustomObject]$qtriggerSection)
+ }
+
+ [PSCustomObject]$info
+}
+
+$tests = [Ordered]@{
+ "Props on service created by New-Service" = {
+ $actual = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName
+
+ $actual.ServiceName | Assert-Equals -Expected $serviceName
+ $actual.ServiceType | Assert-Equals -Expected ([Ansible.Service.ServiceType]::Win32OwnProcess)
+ $actual.StartType | Assert-Equals -Expected ([Ansible.Service.ServiceStartType]::DemandStart)
+ $actual.ErrorControl | Assert-Equals -Expected ([Ansible.Service.ErrorControl]::Normal)
+ $actual.Path | Assert-Equals -Expected ('"{0}"' -f $path)
+ $actual.LoadOrderGroup | Assert-Equals -Expected ""
+ $actual.DependentOn.Count | Assert-Equals -Expected 0
+ $actual.Account | Assert-Equals -Expected (
+ [System.Security.Principal.SecurityIdentifier]'S-1-5-18').Translate([System.Security.Principal.NTAccount]
+ )
+ $actual.DisplayName | Assert-Equals -Expected $serviceName
+ $actual.Description | Assert-Equals -Expected $null
+ $actual.FailureActions.ResetPeriod | Assert-Equals -Expected 0
+ $actual.FailureActions.RebootMsg | Assert-Equals -Expected $null
+ $actual.FailureActions.Command | Assert-Equals -Expected $null
+ $actual.FailureActions.Actions.Count | Assert-Equals -Expected 0
+ $actual.FailureActionsOnNonCrashFailures | Assert-Equals -Expected $false
+ $actual.ServiceSidInfo | Assert-Equals -Expected ([Ansible.Service.ServiceSidInfo]::None)
+ $actual.RequiredPrivileges.Count | Assert-Equals -Expected 0
+ # Cannot test default values as it differs per OS version
+ $null -ne $actual.PreShutdownTimeout | Assert-Equals -Expected $true
+ $actual.Triggers.Count | Assert-Equals -Expected 0
+ $actual.PreferredNode | Assert-Equals -Expected $null
+ if ([Environment]::OSVersion.Version -ge [Version]'6.3') {
+ $actual.LaunchProtection | Assert-Equals -Expected ([Ansible.Service.LaunchProtection]::None)
+ } else {
+ $actual.LaunchProtection | Assert-Equals -Expected $null
+ }
+ $actual.State | Assert-Equals -Expected ([Ansible.Service.ServiceStatus]::Stopped)
+ $actual.Win32ExitCode | Assert-Equals -Expected 1077 # ERROR_SERVICE_NEVER_STARTED
+ $actual.ServiceExitCode | Assert-Equals -Expected 0
+ $actual.Checkpoint | Assert-Equals -Expected 0
+ $actual.WaitHint | Assert-Equals -Expected 0
+ $actual.ProcessId | Assert-Equals -Expected 0
+ $actual.ServiceFlags | Assert-Equals -Expected ([Ansible.Service.ServiceFlags]::None)
+ $actual.DependedBy.Count | Assert-Equals 0
+ }
+
+ "Service creation through util" = {
+ $testName = "$($serviceName)_2"
+ $actual = [Ansible.Service.Service]::Create($testName, '"{0}"' -f $path)
+
+ try {
+ $cmdletService = Get-Service -Name $testName -ErrorAction SilentlyContinue
+ $null -ne $cmdletService | Assert-Equals -Expected $true
+
+ $actual.ServiceName | Assert-Equals -Expected $testName
+ $actual.ServiceType | Assert-Equals -Expected ([Ansible.Service.ServiceType]::Win32OwnProcess)
+ $actual.StartType | Assert-Equals -Expected ([Ansible.Service.ServiceStartType]::DemandStart)
+ $actual.ErrorControl | Assert-Equals -Expected ([Ansible.Service.ErrorControl]::Normal)
+ $actual.Path | Assert-Equals -Expected ('"{0}"' -f $path)
+ $actual.LoadOrderGroup | Assert-Equals -Expected ""
+ $actual.DependentOn.Count | Assert-Equals -Expected 0
+ $actual.Account | Assert-Equals -Expected (
+ [System.Security.Principal.SecurityIdentifier]'S-1-5-18').Translate([System.Security.Principal.NTAccount]
+ )
+ $actual.DisplayName | Assert-Equals -Expected $testName
+ $actual.Description | Assert-Equals -Expected $null
+ $actual.FailureActions.ResetPeriod | Assert-Equals -Expected 0
+ $actual.FailureActions.RebootMsg | Assert-Equals -Expected $null
+ $actual.FailureActions.Command | Assert-Equals -Expected $null
+ $actual.FailureActions.Actions.Count | Assert-Equals -Expected 0
+ $actual.FailureActionsOnNonCrashFailures | Assert-Equals -Expected $false
+ $actual.ServiceSidInfo | Assert-Equals -Expected ([Ansible.Service.ServiceSidInfo]::None)
+ $actual.RequiredPrivileges.Count | Assert-Equals -Expected 0
+ $null -ne $actual.PreShutdownTimeout | Assert-Equals -Expected $true
+ $actual.Triggers.Count | Assert-Equals -Expected 0
+ $actual.PreferredNode | Assert-Equals -Expected $null
+ if ([Environment]::OSVersion.Version -ge [Version]'6.3') {
+ $actual.LaunchProtection | Assert-Equals -Expected ([Ansible.Service.LaunchProtection]::None)
+ } else {
+ $actual.LaunchProtection | Assert-Equals -Expected $null
+ }
+ $actual.State | Assert-Equals -Expected ([Ansible.Service.ServiceStatus]::Stopped)
+ $actual.Win32ExitCode | Assert-Equals -Expected 1077 # ERROR_SERVICE_NEVER_STARTED
+ $actual.ServiceExitCode | Assert-Equals -Expected 0
+ $actual.Checkpoint | Assert-Equals -Expected 0
+ $actual.WaitHint | Assert-Equals -Expected 0
+ $actual.ProcessId | Assert-Equals -Expected 0
+ $actual.ServiceFlags | Assert-Equals -Expected ([Ansible.Service.ServiceFlags]::None)
+ $actual.DependedBy.Count | Assert-Equals 0
+ } finally {
+ $actual.Delete()
+ }
+ }
+
+ "Fail to open non-existing service" = {
+ $failed = $false
+ try {
+ $null = New-Object -TypeName Ansible.Service.Service -ArgumentList 'fake_service'
+ } catch [Ansible.Service.ServiceManagerException] {
+ # 1060 == ERROR_SERVICE_DOES_NOT_EXIST
+ $_.Exception.Message -like '*Win32ErrorCode 1060 - 0x00000424*' | Assert-Equals -Expected $true
+ $failed = $true
+ }
+
+ $failed | Assert-Equals -Expected $true
+ }
+
+ "Open with specific access rights" = {
+ $service = New-Object -TypeName Ansible.Service.Service -ArgumentList @(
+ $serviceName, [Ansible.Service.ServiceRights]'QueryConfig, QueryStatus'
+ )
+
+ # QueryStatus can get the status
+ $service.State | Assert-Equals -Expected ([Ansible.Service.ServiceStatus]::Stopped)
+
+ # Should fail to get the config because we did not request that right
+ $failed = $false
+ try {
+ $service.Path = 'fail'
+ } catch [Ansible.Service.ServiceManagerException] {
+ # 5 == ERROR_ACCESS_DENIED
+ $_.Exception.Message -like '*Win32ErrorCode 5 - 0x00000005*' | Assert-Equals -Expected $true
+ $failed = $true
+ }
+
+ $failed | Assert-Equals -Expected $true
+
+ }
+
+ "Modfiy ServiceType" = {
+ $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName
+ $service.ServiceType = [Ansible.Service.ServiceType]::Win32ShareProcess
+
+ $actual = Invoke-Sc -Action qc -Name $serviceName
+ $service.ServiceType | Assert-Equals -Expected ([Ansible.Service.ServiceType]::Win32ShareProcess)
+ $actual.TYPE | Assert-Equals -Expected "20 WIN32_SHARE_PROCESS"
+
+ $null = Invoke-Sc -Action config -Name $serviceName -Arguments @{type="own"}
+ $service.Refresh()
+ $service.ServiceType | Assert-Equals -Expected ([Ansible.Service.ServiceType]::Win32OwnProcess)
+ }
+
+ "Create desktop interactive service" = {
+ $service = New-Object -Typename Ansible.Service.Service -ArgumentList $serviceName
+ $service.ServiceType = [Ansible.Service.ServiceType]'Win32OwnProcess, InteractiveProcess'
+
+ $actual = Invoke-Sc -Action qc -Name $serviceName
+ $actual.TYPE | Assert-Equals -Expected "110 WIN32_OWN_PROCESS (interactive)"
+ $service.ServiceType | Assert-Equals -Expected ([Ansible.Service.ServiceType]'Win32OwnProcess, InteractiveProcess')
+
+ # Change back from interactive process
+ $service.ServiceType = [Ansible.Service.ServiceType]::Win32OwnProcess
+
+ $actual = Invoke-Sc -Action qc -Name $serviceName
+ $actual.TYPE | Assert-Equals -Expected "10 WIN32_OWN_PROCESS"
+ $service.ServiceType | Assert-Equals -Expected ([Ansible.Service.ServiceType]::Win32OwnProcess)
+
+ $service.Account = [System.Security.Principal.SecurityIdentifier]'S-1-5-20'
+
+ $failed = $false
+ try {
+ $service.ServiceType = [Ansible.Service.ServiceType]'Win32OwnProcess, InteractiveProcess'
+ } catch [Ansible.Service.ServiceManagerException] {
+ $failed = $true
+ $_.Exception.NativeErrorCode | Assert-Equals -Expected 87 # ERROR_INVALID_PARAMETER
+ }
+ $failed | Assert-Equals -Expected $true
+
+ $actual = Invoke-Sc -Action qc -Name $serviceName
+ $actual.TYPE | Assert-Equals -Expected "10 WIN32_OWN_PROCESS"
+ }
+
+ "Modify StartType" = {
+ $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName
+ $service.StartType = [Ansible.Service.ServiceStartType]::Disabled
+
+ $actual = Invoke-Sc -Action qc -Name $serviceName
+ $service.StartType | Assert-Equals -Expected ([Ansible.Service.ServiceStartType]::Disabled)
+ $actual.START_TYPE | Assert-Equals -Expected "4 DISABLED"
+
+ $null = Invoke-Sc -Action config -Name $serviceName -Arguments @{start="demand"}
+ $service.Refresh()
+ $service.StartType | Assert-Equals -Expected ([Ansible.Service.ServiceStartType]::DemandStart)
+ }
+
+ "Modify StartType auto delayed" = {
+ # Delayed start type is a modifier of the AutoStart type. It uses a separate config entry to define and this
+ # makes sure the util does that correctly from various types and back.
+ $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName
+ $service.StartType = [Ansible.Service.ServiceStartType]::Disabled # Start from Disabled
+
+ # Disabled -> Auto Start Delayed
+ $service.StartType = [Ansible.Service.ServiceStartType]::AutoStartDelayed
+
+ $actual = Invoke-Sc -Action qc -Name $serviceName
+ $service.StartType | Assert-Equals -Expected ([Ansible.Service.ServiceStartType]::AutoStartDelayed)
+ $actual.START_TYPE | Assert-Equals -Expected "2 AUTO_START (DELAYED)"
+
+ # Auto Start Delayed -> Auto Start
+ $service.StartType = [Ansible.Service.ServiceStartType]::AutoStart
+
+ $actual = Invoke-Sc -Action qc -Name $serviceName
+ $service.StartType | Assert-Equals -Expected ([Ansible.Service.ServiceStartType]::AutoStart)
+ $actual.START_TYPE | Assert-Equals -Expected "2 AUTO_START"
+
+ # Auto Start -> Auto Start Delayed
+ $service.StartType = [Ansible.Service.ServiceStartType]::AutoStartDelayed
+
+ $actual = Invoke-Sc -Action qc -Name $serviceName
+ $service.StartType | Assert-Equals -Expected ([Ansible.Service.ServiceStartType]::AutoStartDelayed)
+ $actual.START_TYPE | Assert-Equals -Expected "2 AUTO_START (DELAYED)"
+
+ # Auto Start Delayed -> Manual
+ $service.StartType = [Ansible.Service.ServiceStartType]::DemandStart
+
+ $actual = Invoke-Sc -Action qc -Name $serviceName
+ $service.StartType | Assert-Equals -Expected ([Ansible.Service.ServiceStartType]::DemandStart)
+ $actual.START_TYPE | Assert-Equals -Expected "3 DEMAND_START"
+ }
+
+ "Modify ErrorControl" = {
+ $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName
+ $service.ErrorControl = [Ansible.Service.ErrorControl]::Severe
+
+ $actual = Invoke-Sc -Action qc -Name $serviceName
+ $service.ErrorControl | Assert-Equals -Expected ([Ansible.Service.ErrorControl]::Severe)
+ $actual.ERROR_CONTROL | Assert-Equals -Expected "2 SEVERE"
+
+ $null = Invoke-Sc -Action config -Name $serviceName -Arguments @{error="ignore"}
+ $service.Refresh()
+ $service.ErrorControl | Assert-Equals -Expected ([Ansible.Service.ErrorControl]::Ignore)
+ }
+
+ "Modify Path" = {
+ $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName
+ $service.Path = "Fake path"
+
+ $actual = Invoke-Sc -Action qc -Name $serviceName
+ $service.Path | Assert-Equals -Expected "Fake path"
+ $actual.BINARY_PATH_NAME | Assert-Equals -Expected "Fake path"
+
+ $null = Invoke-Sc -Action config -Name $serviceName -Arguments @{binpath="other fake path"}
+ $service.Refresh()
+ $service.Path | Assert-Equals -Expected "other fake path"
+ }
+
+ "Modify LoadOrderGroup" = {
+ $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName
+ $service.LoadOrderGroup = "my group"
+
+ $actual = Invoke-Sc -Action qc -Name $serviceName
+ $service.LoadOrderGroup | Assert-Equals -Expected "my group"
+ $actual.LOAD_ORDER_GROUP | Assert-Equals -Expected "my group"
+
+ $null = Invoke-Sc -Action config -Name $serviceName -Arguments @{group=""}
+ $service.Refresh()
+ $service.LoadOrderGroup | Assert-Equals -Expected ""
+ }
+
+ "Modify DependentOn" = {
+ $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName
+ $service.DependentOn = @("HTTP", "WinRM")
+
+ $actual = Invoke-Sc -Action qc -Name $serviceName
+ @(,$service.DependentOn) | Assert-Equals -Expected @("HTTP", "WinRM")
+ @(,$actual.DEPENDENCIES) | Assert-Equals -Expected @("HTTP", "WinRM")
+
+ $null = Invoke-Sc -Action config -Name $serviceName -Arguments @{depend=""}
+ $service.Refresh()
+ $service.DependentOn.Count | Assert-Equals -Expected 0
+ }
+
+ "Modify Account - service account" = {
+ $systemSid = [System.Security.Principal.SecurityIdentifier]'S-1-5-18'
+ $systemName =$systemSid.Translate([System.Security.Principal.NTAccount])
+ $localSid = [System.Security.Principal.SecurityIdentifier]'S-1-5-19'
+ $localName = $localSid.Translate([System.Security.Principal.NTAccount])
+ $networkSid = [System.Security.Principal.SecurityIdentifier]'S-1-5-20'
+ $networkName = $networkSid.Translate([System.Security.Principal.NTAccount])
+
+ $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName
+ $service.Account = $networkSid
+
+ $actual = Invoke-Sc -Action qc -Name $serviceName
+ $service.Account | Assert-Equals -Expected $networkName
+ $actual.SERVICE_START_NAME | Assert-Equals -Expected $networkName.Value
+
+ $null = Invoke-Sc -Action config -Name $serviceName -Arguments @{obj=$localName.Value}
+ $service.Refresh()
+ $service.Account | Assert-Equals -Expected $localName
+
+ $service.Account = $systemSid
+ $actual = Invoke-Sc -Action qc -Name $serviceName
+ $service.Account | Assert-Equals -Expected $systemName
+ $actual.SERVICE_START_NAME | Assert-Equals -Expected "LocalSystem"
+ }
+
+ "Modify Account - user" = {
+ $currentSid = [System.Security.Principal.WindowsIdentity]::GetCurrent().User
+
+ $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName
+ $service.Account = $currentSid
+ $service.Password = 'password'
+
+ $actual = Invoke-Sc -Action qc -Name $serviceName
+
+ # When running tests in CI this seems to become .\Administrator
+ if ($service.Account.Value.StartsWith('.\')) {
+ $username = $service.Account.Value.Substring(2, $service.Account.Value.Length - 2)
+ $actualSid = ([System.Security.Principal.NTAccount]"$env:COMPUTERNAME\$username").Translate(
+ [System.Security.Principal.SecurityIdentifier]
+ )
+ } else {
+ $actualSid = $service.Account.Translate([System.Security.Principal.SecurityIdentifier])
+ }
+ $actualSid.Value | Assert-Equals -Expected $currentSid.Value
+ $actual.SERVICE_START_NAME | Assert-Equals -Expected $service.Account.Value
+
+ # Go back to SYSTEM from account
+ $systemSid = [System.Security.Principal.SecurityIdentifier]'S-1-5-18'
+ $service.Account = $systemSid
+
+ $actual = Invoke-Sc -Action qc -Name $serviceName
+ $service.Account | Assert-Equals -Expected $systemSid.Translate([System.Security.Principal.NTAccount])
+ $actual.SERVICE_START_NAME | Assert-Equals -Expected "LocalSystem"
+ }
+
+ "Modify Account - virtual account" = {
+ $account = [System.Security.Principal.NTAccount]"NT SERVICE\$serviceName"
+
+ $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName
+ $service.Account = $account
+
+ $actual = Invoke-Sc -Action qc -Name $serviceName
+ $service.Account | Assert-Equals -Expected $account
+ $actual.SERVICE_START_NAME | Assert-Equals -Expected $account.Value
+ }
+
+ "Modify Account - gMSA" = {
+ # This cannot be tested through CI, only done on manual tests.
+ return
+
+ $gmsaName = [System.Security.Principal.NTAccount]'gMSA$@DOMAIN.LOCAL' # Make sure this is UPN.
+ $gmsaSid = $gmsaName.Translate([System.Security.Principal.SecurityIdentifier])
+ $gmsaNetlogon = $gmsaSid.Translate([System.Security.Principal.NTAccount])
+
+ $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName
+ $service.Account = $gmsaName
+
+ $actual = Invoke-Sc -Action qc -Name $serviceName
+ $service.Account | Assert-Equals -Expected $gmsaName
+ $actual.SERVICE_START_NAME | Assert-Equals -Expected $gmsaName
+
+ # Go from gMSA to account and back to verify the Password doesn't matter.
+ $currentUser = [System.Security.Principal.WindowsIdentity]::GetCurrent().User
+ $service.Account = $currentUser
+ $service.Password = 'fake password'
+ $service.Password = 'fake password2'
+
+ # Now test in the Netlogon format.
+ $service.Account = $gmsaSid
+
+ $actual = Invoke-Sc -Action qc -Name $serviceName
+ $service.Account | Assert-Equals -Expected $gmsaNetlogon
+ $actual.SERVICE_START_NAME | Assert-Equals -Expected $gmsaNetlogon.Value
+ }
+
+ "Modify DisplayName" = {
+ $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName
+ $service.DisplayName = "Custom Service Name"
+
+ $actual = Invoke-Sc -Action qc -Name $serviceName
+ $service.DisplayName | Assert-Equals -Expected "Custom Service Name"
+ $actual.DISPLAY_NAME | Assert-Equals -Expected "Custom Service Name"
+
+ $null = Invoke-Sc -Action config -Name $serviceName -Arguments @{displayname="New Service Name"}
+ $service.Refresh()
+ $service.DisplayName | Assert-Equals -Expected "New Service Name"
+ }
+
+ "Modify Description" = {
+ $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName
+ $service.Description = "My custom service description"
+
+ $actual = Invoke-Sc -Action qdescription -Name $serviceName
+ $service.Description | Assert-Equals -Expected "My custom service description"
+ $actual.DESCRIPTION | Assert-Equals -Expected "My custom service description"
+
+ $null = Invoke-Sc -Action description -Name $serviceName -Arguments @(,"new description")
+ $service.Description | Assert-Equals -Expected "new description"
+
+ $service.Description = $null
+
+ $actual = Invoke-Sc -Action qdescription -Name $serviceName
+ $service.Description | Assert-Equals -Expected $null
+ $actual.DESCRIPTION | Assert-Equals -Expected ""
+ }
+
+ "Modify FailureActions" = {
+ $newAction = [Ansible.Service.FailureActions]@{
+ ResetPeriod = 86400
+ RebootMsg = 'Reboot msg'
+ Command = 'Command line'
+ Actions = @(
+ [Ansible.Service.Action]@{Type = [Ansible.Service.FailureAction]::RunCommand; Delay = 1000},
+ [Ansible.Service.Action]@{Type = [Ansible.Service.FailureAction]::RunCommand; Delay = 2000},
+ [Ansible.Service.Action]@{Type = [Ansible.Service.FailureAction]::Restart; Delay = 1000},
+ [Ansible.Service.Action]@{Type = [Ansible.Service.FailureAction]::Reboot; Delay = 1000}
+ )
+ }
+ $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName
+ $service.FailureActions = $newAction
+
+ $actual = Invoke-Sc -Action qfailure -Name $serviceName
+ $actual.'RESET_PERIOD (in seconds)' | Assert-Equals -Expected 86400
+ $actual.REBOOT_MESSAGE | Assert-Equals -Expected 'Reboot msg'
+ $actual.COMMAND_LINE | Assert-Equals -Expected 'Command line'
+ $actual.FAILURE_ACTIONS.Count | Assert-Equals -Expected 4
+ $actual.FAILURE_ACTIONS[0] | Assert-Equals -Expected "RUN PROCESS -- Delay = 1000 milliseconds."
+ $actual.FAILURE_ACTIONS[1] | Assert-Equals -Expected "RUN PROCESS -- Delay = 2000 milliseconds."
+ $actual.FAILURE_ACTIONS[2] | Assert-Equals -Expected "RESTART -- Delay = 1000 milliseconds."
+ $actual.FAILURE_ACTIONS[3] | Assert-Equals -Expected "REBOOT -- Delay = 1000 milliseconds."
+ $service.FailureActions.Actions.Count | Assert-Equals -Expected 4
+
+ # Test that we can change individual settings and it doesn't change all
+ $service.FailureActions = [Ansible.Service.FailureActions]@{ResetPeriod = 172800}
+
+ $actual = Invoke-Sc -Action qfailure -Name $serviceName
+ $actual.'RESET_PERIOD (in seconds)' | Assert-Equals -Expected 172800
+ $actual.REBOOT_MESSAGE | Assert-Equals -Expected 'Reboot msg'
+ $actual.COMMAND_LINE | Assert-Equals -Expected 'Command line'
+ $actual.FAILURE_ACTIONS.Count | Assert-Equals -Expected 4
+ $service.FailureActions.Actions.Count | Assert-Equals -Expected 4
+
+ $service.FailureActions = [Ansible.Service.FailureActions]@{RebootMsg = "New reboot msg"}
+
+ $actual = Invoke-Sc -Action qfailure -Name $serviceName
+ $actual.'RESET_PERIOD (in seconds)' | Assert-Equals -Expected 172800
+ $actual.REBOOT_MESSAGE | Assert-Equals -Expected 'New reboot msg'
+ $actual.COMMAND_LINE | Assert-Equals -Expected 'Command line'
+ $actual.FAILURE_ACTIONS.Count | Assert-Equals -Expected 4
+ $service.FailureActions.Actions.Count | Assert-Equals -Expected 4
+
+ $service.FailureActions = [Ansible.Service.FailureActions]@{Command = "New command line"}
+
+ $actual = Invoke-Sc -Action qfailure -Name $serviceName
+ $actual.'RESET_PERIOD (in seconds)' | Assert-Equals -Expected 172800
+ $actual.REBOOT_MESSAGE | Assert-Equals -Expected 'New reboot msg'
+ $actual.COMMAND_LINE | Assert-Equals -Expected 'New command line'
+ $actual.FAILURE_ACTIONS.Count | Assert-Equals -Expected 4
+ $service.FailureActions.Actions.Count | Assert-Equals -Expected 4
+
+ # Test setting both ResetPeriod and Actions together
+ $service.FailureActions = [Ansible.Service.FailureActions]@{
+ ResetPeriod = 86400
+ Actions = @(
+ [Ansible.Service.Action]@{Type = [Ansible.Service.FailureAction]::RunCommand; Delay = 5000},
+ [Ansible.Service.Action]@{Type = [Ansible.Service.FailureAction]::None; Delay = 0}
+ )
+ }
+
+ $actual = Invoke-Sc -Action qfailure -Name $serviceName
+ $actual.'RESET_PERIOD (in seconds)' | Assert-Equals -Expected 86400
+ $actual.REBOOT_MESSAGE | Assert-Equals -Expected 'New reboot msg'
+ $actual.COMMAND_LINE | Assert-Equals -Expected 'New command line'
+ # sc.exe does not show the None action it just ends the list, so we verify from get_FailureActions
+ $actual.FAILURE_ACTIONS | Assert-Equals -Expected "RUN PROCESS -- Delay = 5000 milliseconds."
+ $service.FailureActions.Actions.Count | Assert-Equals -Expected 2
+ $service.FailureActions.Actions[1].Type | Assert-Equals -Expected ([Ansible.Service.FailureAction]::None)
+
+ # Test setting just Actions without ResetPeriod
+ $service.FailureActions = [Ansible.Service.FailureActions]@{
+ Actions = [Ansible.Service.Action]@{Type = [Ansible.Service.FailureAction]::RunCommand; Delay = 10000}
+ }
+ $actual = Invoke-Sc -Action qfailure -Name $serviceName
+ $actual.'RESET_PERIOD (in seconds)' | Assert-Equals -Expected 86400
+ $actual.REBOOT_MESSAGE | Assert-Equals -Expected 'New reboot msg'
+ $actual.COMMAND_LINE | Assert-Equals -Expected 'New command line'
+ $actual.FAILURE_ACTIONS | Assert-Equals -Expected "RUN PROCESS -- Delay = 10000 milliseconds."
+ $service.FailureActions.Actions.Count | Assert-Equals -Expected 1
+
+ # Test removing all actions
+ $service.FailureActions = [Ansible.Service.FailureActions]@{
+ Actions = @()
+ }
+ $actual = Invoke-Sc -Action qfailure -Name $serviceName
+ $actual.'RESET_PERIOD (in seconds)' | Assert-Equals -Expected 0 # ChangeServiceConfig2W resets this back to 0.
+ $actual.REBOOT_MESSAGE | Assert-Equals -Expected 'New reboot msg'
+ $actual.COMMAND_LINE | Assert-Equals -Expected 'New command line'
+ $actual.PSObject.Properties.Name.Contains('FAILURE_ACTIONS') | Assert-Equals -Expected $false
+ $service.FailureActions.Actions.Count | Assert-Equals -Expected 0
+
+ # Test that we are reading the right values
+ $null = Invoke-Sc -Action failure -Name $serviceName -Arguments @{
+ reset = 172800
+ reboot = "sc reboot msg"
+ command = "sc command line"
+ actions = "run/5000/reboot/800"
+ }
+
+ $actual = $service.FailureActions
+ $actual.ResetPeriod | Assert-Equals -Expected 172800
+ $actual.RebootMsg | Assert-Equals -Expected "sc reboot msg"
+ $actual.Command | Assert-Equals -Expected "sc command line"
+ $actual.Actions.Count | Assert-Equals -Expected 2
+ $actual.Actions[0].Type | Assert-Equals -Expected ([Ansible.Service.FailureAction]::RunCommand)
+ $actual.Actions[0].Delay | Assert-Equals -Expected 5000
+ $actual.Actions[1].Type | Assert-Equals -Expected ([Ansible.Service.FailureAction]::Reboot)
+ $actual.Actions[1].Delay | Assert-Equals -Expected 800
+ }
+
+ "Modify FailureActionsOnNonCrashFailures" = {
+ $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName
+ $service.FailureActionsOnNonCrashFailures = $true
+
+ $actual = Invoke-Sc -Action qfailureflag -Name $serviceName
+ $service.FailureActionsOnNonCrashFailures | Assert-Equals -Expected $true
+ $actual.FAILURE_ACTIONS_ON_NONCRASH_FAILURES | Assert-Equals -Expected "TRUE"
+
+ $null = Invoke-Sc -Action failureflag -Name $serviceName -Arguments @(,0)
+ $service.FailureActionsOnNonCrashFailures | Assert-Equals -Expected $false
+ }
+
+ "Modify ServiceSidInfo" = {
+ $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName
+ $service.ServiceSidInfo = [Ansible.Service.ServiceSidInfo]::None
+
+ $actual = Invoke-Sc -Action qsidtype -Name $serviceName
+ $service.ServiceSidInfo | Assert-Equals -Expected ([Ansible.Service.ServiceSidInfo]::None)
+ $actual.SERVICE_SID_TYPE | Assert-Equals -Expected 'NONE'
+
+ $null = Invoke-Sc -Action sidtype -Name $serviceName -Arguments @(,'unrestricted')
+ $service.ServiceSidInfo | Assert-Equals -Expected ([Ansible.Service.ServiceSidInfo]::Unrestricted)
+
+ $service.ServiceSidInfo = [Ansible.Service.ServiceSidInfo]::Restricted
+
+ $actual = Invoke-Sc -Action qsidtype -Name $serviceName
+ $service.ServiceSidInfo | Assert-Equals -Expected ([Ansible.Service.ServiceSidInfo]::Restricted)
+ $actual.SERVICE_SID_TYPE | Assert-Equals -Expected 'RESTRICTED'
+ }
+
+ "Modify RequiredPrivileges" = {
+ $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName
+ $service.RequiredPrivileges = @("SeBackupPrivilege", "SeTcbPrivilege")
+
+ $actual = Invoke-Sc -Action qprivs -Name $serviceName
+ ,$service.RequiredPrivileges | Assert-Equals -Expected @("SeBackupPrivilege", "SeTcbPrivilege")
+ ,$actual.PRIVILEGES | Assert-Equals -Expected @("SeBackupPrivilege", "SeTcbPrivilege")
+
+ # Ensure setting to $null is the same as an empty array
+ $service.RequiredPrivileges = $null
+
+ $actual = Invoke-Sc -Action qprivs -Name $serviceName
+ ,$service.RequiredPrivileges | Assert-Equals -Expected @()
+ ,$actual.PRIVILEGES | Assert-Equals -Expected @()
+
+ $service.RequiredPrivileges = @("SeBackupPrivilege", "SeTcbPrivilege")
+ $service.RequiredPrivileges = @()
+
+ $actual = Invoke-Sc -Action qprivs -Name $serviceName
+ ,$service.RequiredPrivileges | Assert-Equals -Expected @()
+ ,$actual.PRIVILEGES | Assert-Equals -Expected @()
+
+ $null = Invoke-Sc -Action privs -Name $serviceName -Arguments @(,"SeCreateTokenPrivilege/SeRestorePrivilege")
+ ,$service.RequiredPrivileges | Assert-Equals -Expected @("SeCreateTokenPrivilege", "SeRestorePrivilege")
+ }
+
+ "Modify PreShutdownTimeout" = {
+ $service = New-Object -TypeName Ansible.Service.Service -ArgumentList $serviceName
+ $service.PreShutdownTimeout = 60000
+
+ # sc.exe doesn't seem to have a query argument for this, just get it from the registry
+ $actual = (
+ Get-ItemProperty -LiteralPath "HKLM:\SYSTEM\CurrentControlSet\Services\$serviceName" -Name PreshutdownTimeout
+ ).PreshutdownTimeout
+ $actual | Assert-Equals -Expected 60000
+ }
+
+ "Modify Triggers" = {
+ $service = [Ansible.Service.Service]$serviceName
+ $service.Triggers = @(
+ [Ansible.Service.Trigger]@{
+ Type = [Ansible.Service.TriggerType]::DomainJoin
+ Action = [Ansible.Service.TriggerAction]::ServiceStop
+ SubType = [Guid][Ansible.Service.Trigger]::DOMAIN_JOIN_GUID
+ },
+ [Ansible.Service.Trigger]@{
+ Type = [Ansible.Service.TriggerType]::NetworkEndpoint
+ Action = [Ansible.Service.TriggerAction]::ServiceStart
+ SubType = [Guid][Ansible.Service.Trigger]::NAMED_PIPE_EVENT_GUID
+ DataItems = [Ansible.Service.TriggerItem]@{
+ Type = [Ansible.Service.TriggerDataType]::String
+ Data = 'my named pipe'
+ }
+ },
+ [Ansible.Service.Trigger]@{
+ Type = [Ansible.Service.TriggerType]::NetworkEndpoint
+ Action = [Ansible.Service.TriggerAction]::ServiceStart
+ SubType = [Guid][Ansible.Service.Trigger]::NAMED_PIPE_EVENT_GUID
+ DataItems = [Ansible.Service.TriggerItem]@{
+ Type = [Ansible.Service.TriggerDataType]::String
+ Data = 'my named pipe 2'
+ }
+ },
+ [Ansible.Service.Trigger]@{
+ Type = [Ansible.Service.TriggerType]::Custom
+ Action = [Ansible.Service.TriggerAction]::ServiceStart
+ SubType = [Guid]'9bf04e57-05dc-4914-9ed9-84bf992db88c'
+ DataItems = @(
+ [Ansible.Service.TriggerItem]@{
+ Type = [Ansible.Service.TriggerDataType]::Binary
+ Data = [byte[]]@(1, 2, 3, 4)
+ },
+ [Ansible.Service.TriggerItem]@{
+ Type = [Ansible.Service.TriggerDataType]::Binary
+ Data = [byte[]]@(5, 6, 7, 8, 9)
+ }
+ )
+ }
+ [Ansible.Service.Trigger]@{
+ Type = [Ansible.Service.TriggerType]::Custom
+ Action = [Ansible.Service.TriggerAction]::ServiceStart
+ SubType = [Guid]'9fbcfc7e-7581-4d46-913b-53bb15c80c51'
+ DataItems = @(
+ [Ansible.Service.TriggerItem]@{
+ Type = [Ansible.Service.TriggerDataType]::String
+ Data = 'entry 1'
+ },
+ [Ansible.Service.TriggerItem]@{
+ Type = [Ansible.Service.TriggerDataType]::String
+ Data = 'entry 2'
+ }
+ )
+ },
+ [Ansible.Service.Trigger]@{
+ Type = [Ansible.Service.TriggerType]::FirewallPortEvent
+ Action = [Ansible.Service.TriggerAction]::ServiceStop
+ SubType = [Guid][Ansible.Service.Trigger]::FIREWALL_PORT_CLOSE_GUID
+ DataItems = [Ansible.Service.TriggerItem]@{
+ Type = [Ansible.Service.TriggerDataType]::String
+ Data = [System.Collections.Generic.List[String]]@("1234", "tcp", "imagepath", "servicename")
+ }
+ }
+ )
+
+ $actual = Invoke-Sc -Action qtriggerinfo -Name $serviceName
+
+ $actual.Triggers.Count | Assert-Equals -Expected 6
+ $actual.Triggers[0].Type | Assert-Equals -Expected 'DOMAIN JOINED STATUS'
+ $actual.Triggers[0].Action | Assert-Equals -Expected 'STOP SERVICE'
+ $actual.Triggers[0].SubType | Assert-Equals -Expected "$([Ansible.Service.Trigger]::DOMAIN_JOIN_GUID) [DOMAIN JOINED]"
+ $actual.Triggers[0].Data.Count | Assert-Equals -Expected 0
+
+ $actual.Triggers[1].Type | Assert-Equals -Expected 'NETWORK EVENT'
+ $actual.Triggers[1].Action | Assert-Equals -Expected 'START SERVICE'
+ $actual.Triggers[1].SubType | Assert-Equals -Expected "$([Ansible.Service.Trigger]::NAMED_PIPE_EVENT_GUID) [NAMED PIPE EVENT]"
+ $actual.Triggers[1].Data.Count | Assert-Equals -Expected 1
+ $actual.Triggers[1].Data[0] | Assert-Equals -Expected 'my named pipe'
+
+ $actual.Triggers[2].Type | Assert-Equals -Expected 'NETWORK EVENT'
+ $actual.Triggers[2].Action | Assert-Equals -Expected 'START SERVICE'
+ $actual.Triggers[2].SubType | Assert-Equals -Expected "$([Ansible.Service.Trigger]::NAMED_PIPE_EVENT_GUID) [NAMED PIPE EVENT]"
+ $actual.Triggers[2].Data.Count | Assert-Equals -Expected 1
+ $actual.Triggers[2].Data[0] | Assert-Equals -Expected 'my named pipe 2'
+
+ $actual.Triggers[3].Type | Assert-Equals -Expected 'CUSTOM'
+ $actual.Triggers[3].Action | Assert-Equals -Expected 'START SERVICE'
+ $actual.Triggers[3].SubType | Assert-Equals -Expected '9bf04e57-05dc-4914-9ed9-84bf992db88c [ETW PROVIDER UUID]'
+ $actual.Triggers[3].Data.Count | Assert-Equals -Expected 2
+ $actual.Triggers[3].Data[0] | Assert-Equals -Expected '01 02 03 04'
+ $actual.Triggers[3].Data[1] | Assert-Equals -Expected '05 06 07 08 09'
+
+ $actual.Triggers[4].Type | Assert-Equals -Expected 'CUSTOM'
+ $actual.Triggers[4].Action | Assert-Equals -Expected 'START SERVICE'
+ $actual.Triggers[4].SubType | Assert-Equals -Expected '9fbcfc7e-7581-4d46-913b-53bb15c80c51 [ETW PROVIDER UUID]'
+ $actual.Triggers[4].Data.Count | Assert-Equals -Expected 2
+ $actual.Triggers[4].Data[0] | Assert-Equals -Expected "entry 1"
+ $actual.Triggers[4].Data[1] | Assert-Equals -Expected "entry 2"
+
+ $actual.Triggers[5].Type | Assert-Equals -Expected 'FIREWALL PORT EVENT'
+ $actual.Triggers[5].Action | Assert-Equals -Expected 'STOP SERVICE'
+ $actual.Triggers[5].SubType | Assert-Equals -Expected "$([Ansible.Service.Trigger]::FIREWALL_PORT_CLOSE_GUID) [PORT CLOSE]"
+ $actual.Triggers[5].Data.Count | Assert-Equals -Expected 1
+ $actual.Triggers[5].Data[0] | Assert-Equals -Expected '1234;tcp;imagepath;servicename'
+
+ # Remove trigger with $null
+ $service.Triggers = $null
+
+ $actual = Invoke-Sc -Action qtriggerinfo -Name $serviceName
+ $actual.Triggers.Count | Assert-Equals -Expected 0
+
+ # Add a single trigger
+ $service.Triggers = [Ansible.Service.Trigger]@{
+ Type = [Ansible.Service.TriggerType]::GroupPolicy
+ Action = [Ansible.Service.TriggerAction]::ServiceStart
+ SubType = [Guid][Ansible.Service.Trigger]::MACHINE_POLICY_PRESENT_GUID
+ }
+
+ $actual = Invoke-Sc -Action qtriggerinfo -Name $serviceName
+ $actual.Triggers.Count | Assert-Equals -Expected 1
+ $actual.Triggers[0].Type | Assert-Equals -Expected 'GROUP POLICY'
+ $actual.Triggers[0].Action | Assert-Equals -Expected 'START SERVICE'
+ $actual.Triggers[0].SubType | Assert-Equals -Expected "$([Ansible.Service.Trigger]::MACHINE_POLICY_PRESENT_GUID) [MACHINE POLICY PRESENT]"
+ $actual.Triggers[0].Data.Count | Assert-Equals -Expected 0
+
+ # Remove trigger with empty list
+ $service.Triggers = @()
+
+ $actual = Invoke-Sc -Action qtriggerinfo -Name $serviceName
+ $actual.Triggers.Count | Assert-Equals -Expected 0
+
+ # Add triggers through sc and check we get the values correctly
+ $null = Invoke-Sc -Action triggerinfo -Name $serviceName -Arguments @(
+ 'start/namedpipe/abc',
+ 'start/namedpipe/def',
+ 'start/custom/d4497e12-ac36-4823-af61-92db0dbd4a76/11223344/aabbccdd',
+ 'start/strcustom/435a1742-22c5-4234-9db3-e32dafde695c/11223344/aabbccdd',
+ 'stop/portclose/1234;tcp;imagepath;servicename',
+ 'stop/networkoff'
+ )
+
+ $actual = $service.Triggers
+ $actual.Count | Assert-Equals -Expected 6
+
+ $actual[0].Type | Assert-Equals -Expected ([Ansible.Service.TriggerType]::NetworkEndpoint)
+ $actual[0].Action | Assert-Equals -Expected ([Ansible.Service.TriggerAction]::ServiceStart)
+ $actual[0].SubType = [Guid][Ansible.Service.Trigger]::NAMED_PIPE_EVENT_GUID
+ $actual[0].DataItems.Count | Assert-Equals -Expected 1
+ $actual[0].DataItems[0].Type | Assert-Equals -Expected ([Ansible.Service.TriggerDataType]::String)
+ $actual[0].DataItems[0].Data | Assert-Equals -Expected 'abc'
+
+ $actual[1].Type | Assert-Equals -Expected ([Ansible.Service.TriggerType]::NetworkEndpoint)
+ $actual[1].Action | Assert-Equals -Expected ([Ansible.Service.TriggerAction]::ServiceStart)
+ $actual[1].SubType = [Guid][Ansible.Service.Trigger]::NAMED_PIPE_EVENT_GUID
+ $actual[1].DataItems.Count | Assert-Equals -Expected 1
+ $actual[1].DataItems[0].Type | Assert-Equals -Expected ([Ansible.Service.TriggerDataType]::String)
+ $actual[1].DataItems[0].Data | Assert-Equals -Expected 'def'
+
+ $actual[2].Type | Assert-Equals -Expected ([Ansible.Service.TriggerType]::Custom)
+ $actual[2].Action | Assert-Equals -Expected ([Ansible.Service.TriggerAction]::ServiceStart)
+ $actual[2].SubType = [Guid]'d4497e12-ac36-4823-af61-92db0dbd4a76'
+ $actual[2].DataItems.Count | Assert-Equals -Expected 2
+ $actual[2].DataItems[0].Type | Assert-Equals -Expected ([Ansible.Service.TriggerDataType]::Binary)
+ ,$actual[2].DataItems[0].Data | Assert-Equals -Expected ([byte[]]@(17, 34, 51, 68))
+ $actual[2].DataItems[1].Type | Assert-Equals -Expected ([Ansible.Service.TriggerDataType]::Binary)
+ ,$actual[2].DataItems[1].Data | Assert-Equals -Expected ([byte[]]@(170, 187, 204, 221))
+
+ $actual[3].Type | Assert-Equals -Expected ([Ansible.Service.TriggerType]::Custom)
+ $actual[3].Action | Assert-Equals -Expected ([Ansible.Service.TriggerAction]::ServiceStart)
+ $actual[3].SubType = [Guid]'435a1742-22c5-4234-9db3-e32dafde695c'
+ $actual[3].DataItems.Count | Assert-Equals -Expected 2
+ $actual[3].DataItems[0].Type | Assert-Equals -Expected ([Ansible.Service.TriggerDataType]::String)
+ $actual[3].DataItems[0].Data | Assert-Equals -Expected '11223344'
+ $actual[3].DataItems[1].Type | Assert-Equals -Expected ([Ansible.Service.TriggerDataType]::String)
+ $actual[3].DataItems[1].Data | Assert-Equals -Expected 'aabbccdd'
+
+ $actual[4].Type | Assert-Equals -Expected ([Ansible.Service.TriggerType]::FirewallPortEvent)
+ $actual[4].Action | Assert-Equals -Expected ([Ansible.Service.TriggerAction]::ServiceStop)
+ $actual[4].SubType = [Guid][Ansible.Service.Trigger]::FIREWALL_PORT_CLOSE_GUID
+ $actual[4].DataItems.Count | Assert-Equals -Expected 1
+ $actual[4].DataItems[0].Type | Assert-Equals -Expected ([Ansible.Service.TriggerDataType]::String)
+ ,$actual[4].DataItems[0].Data | Assert-Equals -Expected @('1234', 'tcp', 'imagepath', 'servicename')
+
+ $actual[5].Type | Assert-Equals -Expected ([Ansible.Service.TriggerType]::IpAddressAvailability)
+ $actual[5].Action | Assert-Equals -Expected ([Ansible.Service.TriggerAction]::ServiceStop)
+ $actual[5].SubType = [Guid][Ansible.Service.Trigger]::NETWORK_MANAGER_LAST_IP_ADDRESS_REMOVAL_GUID
+ $actual[5].DataItems.Count | Assert-Equals -Expected 0
+ }
+
+ # Cannot test PreferredNode as we can't guarantee CI is set up with NUMA support.
+ # Cannot test LaunchProtection as once set we cannot remove unless rebooting
+}
+
+# setup and teardown should favour native tools to create and delete the service and not the util we are testing.
+foreach ($testImpl in $tests.GetEnumerator()) {
+ $serviceName = "ansible_$([System.IO.Path]::GetRandomFileName())"
+ $null = New-Service -Name $serviceName -BinaryPathName ('"{0}"' -f $path) -StartupType Manual
+
+ try {
+ $test = $testImpl.Key
+ &$testImpl.Value
+ } finally {
+ $null = Invoke-Sc -Action delete -Name $serviceName
+ }
+}
+
+$module.Result.data = "success"
+$module.ExitJson()
diff --git a/test/integration/targets/module_utils_Ansible.Service/tasks/main.yml b/test/integration/targets/module_utils_Ansible.Service/tasks/main.yml
new file mode 100644
index 00000000..78f91e1e
--- /dev/null
+++ b/test/integration/targets/module_utils_Ansible.Service/tasks/main.yml
@@ -0,0 +1,9 @@
+---
+- name: test Ansible.Service.cs
+ ansible_service_tests:
+ register: ansible_service_test
+
+- name: assert test Ansible.Service.cs
+ assert:
+ that:
+ - ansible_service_test.data == "success"
diff --git a/test/integration/targets/network_cli/aliases b/test/integration/targets/network_cli/aliases
new file mode 100644
index 00000000..6a739c96
--- /dev/null
+++ b/test/integration/targets/network_cli/aliases
@@ -0,0 +1,3 @@
+# Keeping incidental for efficiency, to avoid spinning up another VM
+shippable/vyos/incidental
+network/vyos
diff --git a/test/integration/targets/network_cli/passworded_user.yml b/test/integration/targets/network_cli/passworded_user.yml
new file mode 100644
index 00000000..5538684c
--- /dev/null
+++ b/test/integration/targets/network_cli/passworded_user.yml
@@ -0,0 +1,14 @@
+- hosts: vyos
+ gather_facts: false
+
+ tasks:
+ - name: Run whoami
+ vyos.vyos.vyos_command:
+ commands:
+ - whoami
+ register: whoami
+
+ - assert:
+ that:
+ - whoami is successful
+ - whoami.stdout_lines[0][0] == 'atester'
diff --git a/test/integration/targets/network_cli/runme.sh b/test/integration/targets/network_cli/runme.sh
new file mode 100755
index 00000000..156674fe
--- /dev/null
+++ b/test/integration/targets/network_cli/runme.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+set -eux
+export ANSIBLE_ROLES_PATH=../
+
+function cleanup {
+ ansible-playbook teardown.yml -i "$INVENTORY_PATH" "$@"
+}
+
+trap cleanup EXIT
+
+ansible-playbook setup.yml -i "$INVENTORY_PATH" "$@"
+
+# We need a nonempty file to override key with (empty file gives a
+# lovely "list index out of range" error)
+foo=$(mktemp)
+echo hello > "$foo"
+
+# We want to ensure that passwords make it to the network connection plugins
+# because they follow a different path than the rest of the codebase.
+# In setup.yml, we create a passworded user, and now we connect as that user
+# to make sure the password we pass here successfully makes it to the plugin.
+ansible-playbook \
+ -i "$INVENTORY_PATH" \
+ -e ansible_user=atester \
+ -e ansible_password=testymctest \
+ -e ansible_ssh_private_key_file="$foo" \
+ passworded_user.yml
diff --git a/test/integration/targets/network_cli/setup.yml b/test/integration/targets/network_cli/setup.yml
new file mode 100644
index 00000000..d862406f
--- /dev/null
+++ b/test/integration/targets/network_cli/setup.yml
@@ -0,0 +1,14 @@
+- hosts: vyos
+ connection: ansible.netcommon.network_cli
+ become: true
+ gather_facts: false
+
+ tasks:
+ - name: Create user with password
+ register: result
+ vyos.vyos.vyos_config:
+ lines:
+ - set system login user atester full-name "Ansible Tester"
+ - set system login user atester authentication plaintext-password testymctest
+ - set system login user jsmith level admin
+ - delete service ssh disable-password-authentication
diff --git a/test/integration/targets/network_cli/teardown.yml b/test/integration/targets/network_cli/teardown.yml
new file mode 100644
index 00000000..c47f3e89
--- /dev/null
+++ b/test/integration/targets/network_cli/teardown.yml
@@ -0,0 +1,14 @@
+- hosts: vyos
+ connection: ansible.netcommon.network_cli
+ become: true
+ gather_facts: false
+
+ tasks:
+ - name: Get rid of user (undo everything from setup.yml)
+ register: result
+ vyos.vyos.vyos_config:
+ lines:
+ - delete system login user atester full-name "Ansible Tester"
+ - delete system login user atester authentication plaintext-password testymctest
+ - delete system login user jsmith level admin
+ - set service ssh disable-password-authentication
diff --git a/test/integration/targets/no_log/aliases b/test/integration/targets/no_log/aliases
new file mode 100644
index 00000000..70a7b7a9
--- /dev/null
+++ b/test/integration/targets/no_log/aliases
@@ -0,0 +1 @@
+shippable/posix/group5
diff --git a/test/integration/targets/no_log/dynamic.yml b/test/integration/targets/no_log/dynamic.yml
new file mode 100644
index 00000000..4a1123d5
--- /dev/null
+++ b/test/integration/targets/no_log/dynamic.yml
@@ -0,0 +1,27 @@
+- name: test dynamic no log
+ hosts: testhost
+ gather_facts: no
+ ignore_errors: yes
+ tasks:
+ - name: no loop, task fails, dynamic no_log
+ debug:
+ msg: "SHOW {{ var_does_not_exist }}"
+ no_log: "{{ not (unsafe_show_logs|bool) }}"
+
+ - name: loop, task succeeds, dynamic does no_log
+ debug:
+ msg: "SHOW {{ item }}"
+ loop:
+ - a
+ - b
+ - c
+ no_log: "{{ not (unsafe_show_logs|bool) }}"
+
+ - name: loop, task fails, dynamic no_log
+ debug:
+ msg: "SHOW {{ var_does_not_exist }}"
+ loop:
+ - a
+ - b
+ - c
+ no_log: "{{ not (unsafe_show_logs|bool) }}"
diff --git a/test/integration/targets/no_log/library/module.py b/test/integration/targets/no_log/library/module.py
new file mode 100644
index 00000000..d4f3c565
--- /dev/null
+++ b/test/integration/targets/no_log/library/module.py
@@ -0,0 +1,45 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ 'state': {},
+ 'secret': {'no_log': True},
+ 'subopt_dict': {
+ 'type': 'dict',
+ 'options': {
+ 'str_sub_opt1': {'no_log': True},
+ 'str_sub_opt2': {},
+ 'nested_subopt': {
+ 'type': 'dict',
+ 'options': {
+ 'n_subopt1': {'no_log': True},
+ }
+ }
+ }
+ },
+ 'subopt_list': {
+ 'type': 'list',
+ 'elements': 'dict',
+ 'options': {
+ 'subopt1': {'no_log': True},
+ 'subopt2': {},
+ }
+ }
+
+ }
+ )
+ module.exit_json(msg='done')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/no_log/no_log_local.yml b/test/integration/targets/no_log/no_log_local.yml
new file mode 100644
index 00000000..aacf7de2
--- /dev/null
+++ b/test/integration/targets/no_log/no_log_local.yml
@@ -0,0 +1,92 @@
+# TODO: test against real connection plugins to ensure they're not leaking module args
+
+- name: normal play
+ hosts: testhost
+ gather_facts: no
+ tasks:
+ - name: args should be logged in the absence of no_log
+ shell: echo "LOG_ME_TASK_SUCCEEDED"
+
+ - name: failed args should be logged in the absence of no_log
+ shell: echo "LOG_ME_TASK_FAILED"
+ failed_when: true
+ ignore_errors: true
+
+ - name: item args should be logged in the absence of no_log
+ shell: echo {{ item }}
+ with_items: [ "LOG_ME_ITEM", "LOG_ME_SKIPPED", "LOG_ME_ITEM_FAILED" ]
+ when: item != "LOG_ME_SKIPPED"
+ failed_when: item == "LOG_ME_ITEM_FAILED"
+ ignore_errors: true
+
+ - name: args should not be logged when task-level no_log set
+ shell: echo "DO_NOT_LOG_TASK_SUCCEEDED"
+ no_log: true
+
+ - name: failed args should not be logged when task-level no_log set
+ shell: echo "DO_NOT_LOG_TASK_FAILED"
+ no_log: true
+ failed_when: true
+ ignore_errors: true
+
+ - name: skipped task args should be suppressed with no_log
+ shell: echo "DO_NOT_LOG_TASK_SKIPPED"
+ no_log: true
+ when: false
+
+ - name: items args should be suppressed with no_log in every state
+ shell: echo {{ item }}
+ no_log: true
+ with_items: [ "DO_NOT_LOG_ITEM", "DO_NOT_LOG_ITEM_SKIPPED", "DO_NOT_LOG_ITEM_FAILED" ]
+ when: item != "DO_NOT_LOG_ITEM_SKIPPED"
+ failed_when: item == "DO_NOT_LOG_ITEM_FAILED"
+ ignore_errors: yes
+
+ - name: async task args should suppressed with no_log
+ async: 10
+ poll: 1
+ shell: echo "DO_NOT_LOG_ASYNC_TASK_SUCCEEDED"
+ no_log: true
+
+- name: play-level no_log set
+ hosts: testhost
+ gather_facts: no
+ no_log: true
+ tasks:
+ - name: args should not be logged when play-level no_log set
+ shell: echo "DO_NOT_LOG_PLAY"
+
+ - name: args should not be logged when both play- and task-level no_log set
+ shell: echo "DO_NOT_LOG_TASK_AND_PLAY"
+ no_log: true
+
+ - name: args should be logged when task-level no_log overrides play-level
+ shell: echo "LOG_ME_OVERRIDE"
+ no_log: false
+
+ - name: Add a fake host for next play
+ add_host:
+ hostname: fake
+
+- name: use 'fake' unreachable host to force unreachable error
+ hosts: fake
+ gather_facts: no
+ connection: ssh
+ tasks:
+ - name: 'EXPECTED FAILURE: Fail to run a lineinfile task'
+ vars:
+ logins:
+ - machine: foo
+ login: bar
+ password: DO_NOT_LOG_UNREACHABLE_ITEM
+ - machine: two
+ login: three
+ password: DO_NOT_LOG_UNREACHABLE_ITEM
+ lineinfile:
+ path: /dev/null
+ mode: 0600
+ create: true
+ insertafter: EOF
+ line: "machine {{ item.machine }} login {{ item.login }} password {{ item.password }}"
+ loop: "{{ logins }}"
+ no_log: true
diff --git a/test/integration/targets/no_log/no_log_suboptions.yml b/test/integration/targets/no_log/no_log_suboptions.yml
new file mode 100644
index 00000000..e67ecfe2
--- /dev/null
+++ b/test/integration/targets/no_log/no_log_suboptions.yml
@@ -0,0 +1,24 @@
+- name: test no log with suboptions
+ hosts: testhost
+ gather_facts: no
+
+ tasks:
+ - name: Task with suboptions
+ module:
+ secret: GLAMOROUS
+ subopt_dict:
+ str_sub_opt1: AFTERMATH
+ str_sub_opt2: otherstring
+ nested_subopt:
+ n_subopt1: MANPOWER
+
+ subopt_list:
+ - subopt1: UNTAPPED
+ subopt2: thridstring
+
+ - subopt1: CONCERNED
+
+ - name: Task with suboptions as string
+ module:
+ secret: MARLIN
+ subopt_dict: str_sub_opt1=FLICK
diff --git a/test/integration/targets/no_log/no_log_suboptions_invalid.yml b/test/integration/targets/no_log/no_log_suboptions_invalid.yml
new file mode 100644
index 00000000..933a8a9b
--- /dev/null
+++ b/test/integration/targets/no_log/no_log_suboptions_invalid.yml
@@ -0,0 +1,45 @@
+- name: test no log with suboptions
+ hosts: testhost
+ gather_facts: no
+ ignore_errors: yes
+
+ tasks:
+ - name: Task with suboptions and invalid parameter
+ module:
+ secret: SUPREME
+ invalid: param
+ subopt_dict:
+ str_sub_opt1: IDIOM
+ str_sub_opt2: otherstring
+ nested_subopt:
+ n_subopt1: MOCKUP
+
+ subopt_list:
+ - subopt1: EDUCATED
+ subopt2: thridstring
+ - subopt1: FOOTREST
+
+ - name: Task with suboptions as string with invalid parameter
+ module:
+ secret: FOOTREST
+ invalid: param
+ subopt_dict: str_sub_opt1=CRAFTY
+
+ - name: Task with suboptions with dict instead of list
+ module:
+ secret: FELINE
+ subopt_dict:
+ str_sub_opt1: CRYSTAL
+ str_sub_opt2: otherstring
+ nested_subopt:
+ n_subopt1: EXPECTANT
+ subopt_list:
+ foo: bar
+
+ - name: Task with suboptions with incorrect data type
+ module:
+ secret: AGROUND
+ subopt_dict: 9068.21361
+ subopt_list:
+ - subopt1: GOLIATH
+ - subopt1: FREEFALL
diff --git a/test/integration/targets/no_log/runme.sh b/test/integration/targets/no_log/runme.sh
new file mode 100755
index 00000000..bb5c048f
--- /dev/null
+++ b/test/integration/targets/no_log/runme.sh
@@ -0,0 +1,21 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# This test expects 7 loggable vars and 0 non-loggable ones.
+# If either mismatches it fails, run the ansible-playbook command to debug.
+[ "$(ansible-playbook no_log_local.yml -i ../../inventory -vvvvv "$@" | awk \
+'BEGIN { logme = 0; nolog = 0; } /LOG_ME/ { logme += 1;} /DO_NOT_LOG/ { nolog += 1;} END { printf "%d/%d", logme, nolog; }')" = "26/0" ]
+
+# deal with corner cases with no log and loops
+# no log enabled, should produce 6 censored messages
+[ "$(ansible-playbook dynamic.yml -i ../../inventory -vvvvv "$@" -e unsafe_show_logs=no|grep -c 'output has been hidden')" = "6" ]
+
+# no log disabled, should produce 0 censored
+[ "$(ansible-playbook dynamic.yml -i ../../inventory -vvvvv "$@" -e unsafe_show_logs=yes|grep -c 'output has been hidden')" = "0" ]
+
+# test no log for sub options
+[ "$(ansible-playbook no_log_suboptions.yml -i ../../inventory -vvvvv "$@" | grep -Ec '(MANPOWER|UNTAPPED|CONCERNED|MARLIN|FLICK)')" = "0" ]
+
+# test invalid data passed to a suboption
+[ "$(ansible-playbook no_log_suboptions_invalid.yml -i ../../inventory -vvvvv "$@" | grep -Ec '(SUPREME|IDIOM|MOCKUP|EDUCATED|FOOTREST|CRAFTY|FELINE|CRYSTAL|EXPECTANT|AGROUND|GOLIATH|FREEFALL)')" = "0" ]
diff --git a/test/integration/targets/old_style_cache_plugins/aliases b/test/integration/targets/old_style_cache_plugins/aliases
new file mode 100644
index 00000000..05f65b71
--- /dev/null
+++ b/test/integration/targets/old_style_cache_plugins/aliases
@@ -0,0 +1,4 @@
+shippable/posix/group3
+skip/osx
+skip/macos
+disabled
diff --git a/test/integration/targets/old_style_cache_plugins/inventory_config b/test/integration/targets/old_style_cache_plugins/inventory_config
new file mode 100644
index 00000000..d87c2a90
--- /dev/null
+++ b/test/integration/targets/old_style_cache_plugins/inventory_config
@@ -0,0 +1 @@
+# inventory config file for consistent source
diff --git a/test/integration/targets/old_style_cache_plugins/plugins/cache/redis.py b/test/integration/targets/old_style_cache_plugins/plugins/cache/redis.py
new file mode 100644
index 00000000..9879dec9
--- /dev/null
+++ b/test/integration/targets/old_style_cache_plugins/plugins/cache/redis.py
@@ -0,0 +1,141 @@
+# (c) 2014, Brian Coca, Josh Drake, et al
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ cache: redis
+ short_description: Use Redis DB for cache
+ description:
+ - This cache uses JSON formatted, per host records saved in Redis.
+ version_added: "1.9"
+ requirements:
+ - redis>=2.4.5 (python lib)
+ options:
+ _uri:
+ description:
+ - A colon separated string of connection information for Redis.
+ required: True
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_CONNECTION
+ ini:
+ - key: fact_caching_connection
+ section: defaults
+ _prefix:
+ description: User defined prefix to use when creating the DB entries
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_PREFIX
+ ini:
+ - key: fact_caching_prefix
+ section: defaults
+ _timeout:
+ default: 86400
+ description: Expiration timeout for the cache plugin data
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
+ ini:
+ - key: fact_caching_timeout
+ section: defaults
+ type: integer
+'''
+
+import time
+import json
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.plugins.cache import BaseCacheModule
+
+try:
+ from redis import StrictRedis, VERSION
+except ImportError:
+ raise AnsibleError("The 'redis' python module (version 2.4.5 or newer) is required for the redis fact cache, 'pip install redis'")
+
+
+class CacheModule(BaseCacheModule):
+ """
+ A caching module backed by redis.
+ Keys are maintained in a zset with their score being the timestamp
+ when they are inserted. This allows for the usage of 'zremrangebyscore'
+ to expire keys. This mechanism is used or a pattern matched 'scan' for
+ performance.
+ """
+ def __init__(self, *args, **kwargs):
+ if C.CACHE_PLUGIN_CONNECTION:
+ connection = C.CACHE_PLUGIN_CONNECTION.split(':')
+ else:
+ connection = []
+
+ self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
+ self._prefix = C.CACHE_PLUGIN_PREFIX
+ self._cache = {}
+ self._db = StrictRedis(*connection)
+ self._keys_set = 'ansible_cache_keys'
+
+ def _make_key(self, key):
+ return self._prefix + key
+
+ def get(self, key):
+
+ if key not in self._cache:
+ value = self._db.get(self._make_key(key))
+ # guard against the key not being removed from the zset;
+ # this could happen in cases where the timeout value is changed
+ # between invocations
+ if value is None:
+ self.delete(key)
+ raise KeyError
+ self._cache[key] = json.loads(value)
+
+ return self._cache.get(key)
+
+ def set(self, key, value):
+
+ value2 = json.dumps(value)
+ if self._timeout > 0: # a timeout of 0 is handled as meaning 'never expire'
+ self._db.setex(self._make_key(key), int(self._timeout), value2)
+ else:
+ self._db.set(self._make_key(key), value2)
+
+ if VERSION[0] == 2:
+ self._db.zadd(self._keys_set, time.time(), key)
+ else:
+ self._db.zadd(self._keys_set, {key: time.time()})
+ self._cache[key] = value
+
+ def _expire_keys(self):
+ if self._timeout > 0:
+ expiry_age = time.time() - self._timeout
+ self._db.zremrangebyscore(self._keys_set, 0, expiry_age)
+
+ def keys(self):
+ self._expire_keys()
+ return self._db.zrange(self._keys_set, 0, -1)
+
+ def contains(self, key):
+ self._expire_keys()
+ return (self._db.zrank(self._keys_set, key) is not None)
+
+ def delete(self, key):
+ if key in self._cache:
+ del self._cache[key]
+ self._db.delete(self._make_key(key))
+ self._db.zrem(self._keys_set, key)
+
+ def flush(self):
+ for key in self.keys():
+ self.delete(key)
+
+ def copy(self):
+ # TODO: there is probably a better way to do this in redis
+ ret = dict()
+ for key in self.keys():
+ ret[key] = self.get(key)
+ return ret
+
+ def __getstate__(self):
+ return dict()
+
+ def __setstate__(self, data):
+ self.__init__()
diff --git a/test/integration/targets/old_style_cache_plugins/plugins/inventory/test.py b/test/integration/targets/old_style_cache_plugins/plugins/inventory/test.py
new file mode 100644
index 00000000..7e591957
--- /dev/null
+++ b/test/integration/targets/old_style_cache_plugins/plugins/inventory/test.py
@@ -0,0 +1,59 @@
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: test
+ plugin_type: inventory
+ short_description: test inventory source
+ extends_documentation_fragment:
+ - inventory_cache
+'''
+
+from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable
+
+
+class InventoryModule(BaseInventoryPlugin, Cacheable):
+
+ NAME = 'test'
+
+ def populate(self, hosts):
+ for host in list(hosts.keys()):
+ self.inventory.add_host(host, group='all')
+ for hostvar, hostval in hosts[host].items():
+ self.inventory.set_variable(host, hostvar, hostval)
+
+ def get_hosts(self):
+ return {'host1': {'one': 'two'}, 'host2': {'three': 'four'}}
+
+ def parse(self, inventory, loader, path, cache=True):
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ self.load_cache_plugin()
+
+ cache_key = self.get_cache_key(path)
+
+ # cache may be True or False at this point to indicate if the inventory is being refreshed
+ # get the user's cache option
+ cache_setting = self.get_option('cache')
+
+ attempt_to_read_cache = cache_setting and cache
+ cache_needs_update = cache_setting and not cache
+
+ # attempt to read the cache if inventory isn't being refreshed and the user has caching enabled
+ if attempt_to_read_cache:
+ try:
+ results = self._cache[cache_key]
+ except KeyError:
+ # This occurs if the cache_key is not in the cache or if the cache_key expired, so the cache needs to be updated
+ cache_needs_update = True
+
+ if cache_needs_update:
+ results = self.get_hosts()
+
+ # set the cache
+ self._cache[cache_key] = results
+
+ self.populate(results)
diff --git a/test/integration/targets/old_style_cache_plugins/runme.sh b/test/integration/targets/old_style_cache_plugins/runme.sh
new file mode 100755
index 00000000..13911bd5
--- /dev/null
+++ b/test/integration/targets/old_style_cache_plugins/runme.sh
@@ -0,0 +1,80 @@
+#!/usr/bin/env bash
+
+set -eux
+
+source virtualenv.sh
+
+# Run test if dependencies are installed
+failed_dep_1=$(ansible localhost -m pip -a "name=redis>=2.4.5 state=present" "$@" | tee out.txt | grep -c 'FAILED!' || true)
+cat out.txt
+
+installed_redis=$(ansible localhost -m package -a "name=redis-server state=present" --become "$@" | tee out.txt | grep -c '"changed": true' || true)
+failed_dep_2=$(grep out.txt -ce 'FAILED!' || true)
+cat out.txt
+
+started_redis=$(ansible localhost -m service -a "name=redis-server state=started" --become "$@" | tee out.txt | grep -c '"changed": true' || true)
+failed_dep_3=$(grep out.txt -ce 'FAILED!' || true)
+cat out.txt
+
+CLEANUP_REDIS () { if [ "${installed_redis}" -eq 1 ] ; then ansible localhost -m package -a "name=redis-server state=absent" --become ; fi }
+STOP_REDIS () { if [ "${installed_redis}" -ne 1 ] && [ "${started_redis}" -eq 1 ] ; then ansible localhost -m service -a "name=redis-server state=stopped" --become ; fi }
+
+if [ "${failed_dep_1}" -eq 1 ] || [ "${failed_dep_2}" -eq 1 ] || [ "${failed_dep_3}" -eq 1 ] ; then
+ STOP_REDIS
+ CLEANUP_REDIS
+ exit 0
+fi
+
+export ANSIBLE_CACHE_PLUGIN=redis
+export ANSIBLE_CACHE_PLUGIN_CONNECTION=localhost:6379:0
+export ANSIBLE_CACHE_PLUGINS=./plugins/cache
+
+# Use old redis for fact caching
+count=$(ansible-playbook test_fact_gathering.yml -vvv 2>&1 "$@" | tee out.txt | grep -c 'Gathering Facts' || true)
+failed_dep_version=$(grep out.txt -ce "'redis' python module (version 2.4.5 or newer) is required" || true)
+cat out.txt
+if [ "${failed_dep_version}" -eq 1 ] ; then
+ STOP_REDIS
+ CLEANUP_REDIS
+ exit 0
+fi
+if [ "${count}" -ne 1 ] ; then
+ STOP_REDIS
+ CLEANUP_REDIS
+ exit 1
+fi
+
+# Attempt to use old redis for inventory caching; should not work
+export ANSIBLE_INVENTORY_CACHE=True
+export ANSIBLE_INVENTORY_CACHE_PLUGIN=redis
+export ANSIBLE_INVENTORY_ENABLED=test
+export ANSIBLE_INVENTORY_PLUGINS=./plugins/inventory
+
+ansible-inventory -i inventory_config --graph 2>&1 "$@" | tee out.txt | grep 'Cache options were provided but may not reconcile correctly unless set via set_options'
+res=$?
+cat out.txt
+if [ "${res}" -eq 1 ] ; then
+ STOP_REDIS
+ CLEANUP_REDIS
+ exit 1
+fi
+
+# Use new style redis for fact caching
+unset ANSIBLE_CACHE_PLUGINS
+count=$(ansible-playbook test_fact_gathering.yml -vvv "$@" | tee out.txt | grep -c 'Gathering Facts' || true)
+cat out.txt
+if [ "${count}" -ne 1 ] ; then
+ STOP_REDIS
+ CLEANUP_REDIS
+ exit 1
+fi
+
+# Use new redis for inventory caching
+ansible-inventory -i inventory_config --graph "$@" 2>&1 | tee out.txt | grep 'host2'
+res=$?
+cat out.txt
+
+STOP_REDIS
+CLEANUP_REDIS
+
+exit $res
diff --git a/test/integration/targets/old_style_cache_plugins/test_fact_gathering.yml b/test/integration/targets/old_style_cache_plugins/test_fact_gathering.yml
new file mode 100644
index 00000000..5c720b4e
--- /dev/null
+++ b/test/integration/targets/old_style_cache_plugins/test_fact_gathering.yml
@@ -0,0 +1,6 @@
+---
+- hosts: localhost
+ gather_facts: no
+
+- hosts: localhost
+ gather_facts: yes
diff --git a/test/integration/targets/old_style_modules_posix/aliases b/test/integration/targets/old_style_modules_posix/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/test/integration/targets/old_style_modules_posix/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/test/integration/targets/old_style_modules_posix/library/helloworld.sh b/test/integration/targets/old_style_modules_posix/library/helloworld.sh
new file mode 100644
index 00000000..c1108a8c
--- /dev/null
+++ b/test/integration/targets/old_style_modules_posix/library/helloworld.sh
@@ -0,0 +1,29 @@
+#!/bin/sh
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+if [ -f "$1" ]; then
+ . "$1"
+else
+ echo '{"msg": "No argument file provided", "failed": true}'
+ exit 1
+fi
+
+salutation=${salutation:=Hello}
+name=${name:=World}
+
+cat << EOF
+{"msg": "${salutation}, ${name}!"}
+EOF
diff --git a/test/integration/targets/old_style_modules_posix/meta/main.yml b/test/integration/targets/old_style_modules_posix/meta/main.yml
new file mode 100644
index 00000000..1810d4be
--- /dev/null
+++ b/test/integration/targets/old_style_modules_posix/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/test/integration/targets/old_style_modules_posix/tasks/main.yml b/test/integration/targets/old_style_modules_posix/tasks/main.yml
new file mode 100644
index 00000000..a7882171
--- /dev/null
+++ b/test/integration/targets/old_style_modules_posix/tasks/main.yml
@@ -0,0 +1,44 @@
+- name: Hello, World!
+ helloworld:
+ register: hello_world
+
+- assert:
+ that:
+ - 'hello_world.msg == "Hello, World!"'
+
+- name: Hello, Ansible!
+ helloworld:
+ args:
+ name: Ansible
+ register: hello_ansible
+
+- assert:
+ that:
+ - 'hello_ansible.msg == "Hello, Ansible!"'
+
+- name: Goodbye, Ansible!
+ helloworld:
+ args:
+ salutation: Goodbye
+ name: Ansible
+ register: goodbye_ansible
+
+- assert:
+ that:
+ - 'goodbye_ansible.msg == "Goodbye, Ansible!"'
+
+- name: Copy module to remote
+ copy:
+ src: "{{ role_path }}/library/helloworld.sh"
+ dest: "{{ remote_tmp_dir }}/helloworld.sh"
+
+- name: Execute module directly
+ command: '/bin/sh {{ remote_tmp_dir }}/helloworld.sh'
+ register: direct
+ ignore_errors: true
+
+- assert:
+ that:
+ - direct is failed
+ - |
+ direct.stdout == '{"msg": "No argument file provided", "failed": true}'
diff --git a/test/integration/targets/omit/48673.yml b/test/integration/targets/omit/48673.yml
new file mode 100644
index 00000000..d25c8cf2
--- /dev/null
+++ b/test/integration/targets/omit/48673.yml
@@ -0,0 +1,4 @@
+- hosts: testhost
+ serial: "{{ testing_omitted_variable | default(omit) }}"
+ tasks:
+ - debug:
diff --git a/test/integration/targets/omit/aliases b/test/integration/targets/omit/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/test/integration/targets/omit/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/test/integration/targets/omit/runme.sh b/test/integration/targets/omit/runme.sh
new file mode 100755
index 00000000..962e1f04
--- /dev/null
+++ b/test/integration/targets/omit/runme.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ansible-playbook 48673.yml -i ../../inventory -v "$@"
diff --git a/test/integration/targets/order/aliases b/test/integration/targets/order/aliases
new file mode 100644
index 00000000..a6dafcf8
--- /dev/null
+++ b/test/integration/targets/order/aliases
@@ -0,0 +1 @@
+shippable/posix/group1
diff --git a/test/integration/targets/order/inventory b/test/integration/targets/order/inventory
new file mode 100644
index 00000000..11f322a1
--- /dev/null
+++ b/test/integration/targets/order/inventory
@@ -0,0 +1,9 @@
+[incremental]
+hostB
+hostA
+hostD
+hostC
+
+[incremental:vars]
+ansible_connection=local
+ansible_python_interpreter='{{ansible_playbook_python}}'
diff --git a/test/integration/targets/order/order.yml b/test/integration/targets/order/order.yml
new file mode 100644
index 00000000..62176b1e
--- /dev/null
+++ b/test/integration/targets/order/order.yml
@@ -0,0 +1,39 @@
+- name: just plain order
+ hosts: all
+ gather_facts: false
+ order: '{{ myorder | default("inventory") }}'
+ tasks:
+ - shell: "echo '{{ inventory_hostname }}' >> hostlist.txt"
+
+- name: with serial
+ hosts: all
+ gather_facts: false
+ serial: 1
+ order: '{{ myorder | default("inventory")}}'
+ tasks:
+ - shell: "echo '{{ inventory_hostname }}' >> shostlist.txt"
+
+- name: ensure everything works
+ hosts: localhost
+ gather_facts: false
+ tasks:
+ - assert:
+ that:
+ - item.1 == hostlist[item.0]
+ - item.1 == shostlist[item.0]
+ loop: '{{ lookup("indexed_items", inputlist) }}'
+ vars:
+ hostlist: '{{ lookup("file", "hostlist.txt").splitlines() }}'
+ shostlist: '{{ lookup("file", "shostlist.txt").splitlines() }}'
+ when: myorder | default('inventory') != 'shuffle'
+
+ - name: Assert that shuffle worked
+ assert:
+ that:
+ - item.1 != hostlist[item.0] or item.1 in hostlist
+ - item.1 != hostlist[item.0] or item.1 in hostlist
+ loop: '{{ lookup("indexed_items", inputlist) }}'
+ vars:
+ hostlist: '{{ lookup("file", "hostlist.txt").splitlines() }}'
+ shostlist: '{{ lookup("file", "shostlist.txt").splitlines() }}'
+ when: myorder | default('inventory') == 'shuffle'
diff --git a/test/integration/targets/order/runme.sh b/test/integration/targets/order/runme.sh
new file mode 100755
index 00000000..9a01c211
--- /dev/null
+++ b/test/integration/targets/order/runme.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+
+set -eux
+
+cleanup () {
+ files="shostlist.txt hostlist.txt"
+ for file in $files; do
+ if [[ -f "$file" ]]; then
+ rm -f "$file"
+ fi
+ done
+}
+
+for EXTRA in '{"inputlist": ["hostB", "hostA", "hostD", "hostC"]}' \
+ '{"myorder": "inventory", "inputlist": ["hostB", "hostA", "hostD", "hostC"]}' \
+ '{"myorder": "sorted", "inputlist": ["hostA", "hostB", "hostC", "hostD"]}' \
+ '{"myorder": "reverse_sorted", "inputlist": ["hostD", "hostC", "hostB", "hostA"]}' \
+ '{"myorder": "reverse_inventory", "inputlist": ["hostC", "hostD", "hostA", "hostB"]}' \
+ '{"myorder": "shuffle", "inputlist": ["hostC", "hostD", "hostA", "hostB"]}'
+do
+ cleanup
+ ansible-playbook order.yml --forks 1 -i inventory -e "$EXTRA" "$@"
+done
+cleanup
diff --git a/test/integration/targets/package/aliases b/test/integration/targets/package/aliases
new file mode 100644
index 00000000..0b484bba
--- /dev/null
+++ b/test/integration/targets/package/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group1
+destructive
+skip/aix
diff --git a/test/integration/targets/package/meta/main.yml b/test/integration/targets/package/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/test/integration/targets/package/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/test/integration/targets/package/tasks/main.yml b/test/integration/targets/package/tasks/main.yml
new file mode 100644
index 00000000..4fc3a8a6
--- /dev/null
+++ b/test/integration/targets/package/tasks/main.yml
@@ -0,0 +1,114 @@
+# Test code for the package module.
+# (c) 2017, James Tanner <tanner.jc@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- set_fact: output_dir_test={{output_dir}}/at
+
+- name: make sure our testing sub-directory does not exist
+ file: path="{{ output_dir_test }}" state=absent
+
+- name: create our testing sub-directory
+ file: path="{{ output_dir_test }}" state=directory
+
+# Verify correct default package manager for Fedora
+# Validates: https://github.com/ansible/ansible/issues/34014
+- block:
+ - name: install apt
+ dnf:
+ name: apt
+ state: present
+ - name: gather facts again
+ setup:
+ - name: validate output
+ assert:
+ that:
+ - 'ansible_pkg_mgr == "dnf"'
+ always:
+ - name: remove apt
+ dnf:
+ name: apt
+ state: absent
+ - name: gather facts again
+ setup:
+ when: ansible_distribution == "Fedora"
+
+# Verify correct default package manager for Debian/Ubuntu when Zypper installed
+- block:
+ # Just make an executable file called "zypper" - installing zypper itself
+ # consistently is hard - and we're not going to use it
+ - name: install fake zypper
+ file:
+ state: touch
+ mode: 0755
+ path: /usr/bin/zypper
+ - name: gather facts again
+ setup:
+ - name: validate output
+ assert:
+ that:
+ - 'ansible_pkg_mgr == "apt"'
+ always:
+ - name: remove fake zypper
+ file:
+ path: /usr/bin/zypper
+ state: absent
+ - name: gather facts again
+ setup:
+ when: ansible_os_family == "Debian"
+
+##
+## package
+##
+
+- name: define distros to attempt installing at on
+ set_fact:
+ package_distros:
+ - RedHat
+ - CentOS
+ - ScientificLinux
+ - Fedora
+ - Ubuntu
+ - Debian
+
+- block:
+ - name: remove at package
+ package:
+ name: at
+ state: absent
+ register: at_check0
+
+ - name: verify at command is missing
+ shell: which at
+ register: at_check1
+ failed_when: at_check1.rc == 0
+
+ - name: reinstall at package
+ package:
+ name: at
+ state: present
+ register: at_install0
+ - debug: var=at_install0
+ - name: validate results
+ assert:
+ that:
+ - 'at_install0.changed is defined'
+ - 'at_install0.changed'
+
+ - name: verify at command is installed
+ shell: which at
+
+ when: ansible_distribution in package_distros
diff --git a/test/integration/targets/package_facts/aliases b/test/integration/targets/package_facts/aliases
new file mode 100644
index 00000000..6c62b9a7
--- /dev/null
+++ b/test/integration/targets/package_facts/aliases
@@ -0,0 +1,4 @@
+shippable/posix/group3
+skip/aix
+skip/osx
+skip/macos
diff --git a/test/integration/targets/package_facts/tasks/main.yml b/test/integration/targets/package_facts/tasks/main.yml
new file mode 100644
index 00000000..12dfcf03
--- /dev/null
+++ b/test/integration/targets/package_facts/tasks/main.yml
@@ -0,0 +1,115 @@
+# Test playbook for the package_facts module
+# (c) 2017, Adam Miller <admiller@redhat.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: Prep package_fact tests - Debian Family
+ block:
+ - name: install python apt bindings - python2
+ package: name="python-apt" state=present
+ when: ansible_python.version.major|int == 2
+
+ - name: install python apt bindings - python3
+ package: name="python3-apt" state=present
+ when: ansible_python.version.major|int == 3
+
+ - name: Gather package facts
+ package_facts:
+ manager: apt
+
+ - name: check for ansible_facts.packages exists
+ assert:
+ that: ansible_facts.packages is defined
+ when: ansible_os_family == "Debian"
+
+- name: Run package_fact tests - Red Hat Family
+ block:
+ - name: Gather package facts
+ package_facts:
+ manager: rpm
+
+ - name: check for ansible_facts.packages exists
+ assert:
+ that: ansible_facts.packages is defined
+ when: (ansible_os_family == "RedHat")
+
+- name: Run package_fact tests - SUSE/OpenSUSE Family
+ block:
+ - name: install python rpm bindings - python2
+ package: name="rpm-python" state=present
+ when: ansible_python.version.major|int == 2
+
+ - name: install python rpm bindings - python3
+ package: name="python3-rpm" state=present
+ when: ansible_python.version.major|int == 3
+
+ - name: Gather package facts
+ package_facts:
+ manager: rpm
+
+ - name: check for ansible_facts.packages exists
+ assert:
+ that: ansible_facts.packages is defined
+ when: (ansible_os_family == "openSUSE Leap") or (ansible_os_family == "Suse")
+
+# Check that auto detection works also
+- name: Gather package facts
+ package_facts:
+ manager: auto
+
+- name: check for ansible_facts.packages exists
+ assert:
+ that: ansible_facts.packages is defined
+
+- name: Run package_fact tests - FreeBSD
+ block:
+ - name: Gather package facts
+ package_facts:
+ manager: pkg
+
+ - name: check for ansible_facts.packages exists
+ assert:
+ that: ansible_facts.packages is defined
+
+ - name: check there is at least one package not flagged vital nor automatic
+ command: pkg query -e "%a = 0 && %V = 0" %n
+ register: not_vital_nor_automatic
+ failed_when: not not_vital_nor_automatic.stdout
+
+ - vars:
+ pkg_name: "{{ not_vital_nor_automatic.stdout_lines[0].strip() }}"
+ block:
+ - name: check the selected package is not vital
+ assert:
+ that:
+ - 'not ansible_facts.packages[pkg_name][0].vital'
+ - 'not ansible_facts.packages[pkg_name][0].automatic'
+
+ - name: flag the selected package as vital and automatic
+ command: 'pkg set --yes -v 1 -A 1 {{ pkg_name }}'
+
+ - name: Gather package facts (again)
+ package_facts:
+
+ - name: check the selected package is flagged vital and automatic
+ assert:
+ that:
+ - 'ansible_facts.packages[pkg_name][0].vital|bool'
+ - 'ansible_facts.packages[pkg_name][0].automatic|bool'
+ always:
+ - name: restore previous flags for the selected package
+ command: 'pkg set --yes -v 0 -A 0 {{ pkg_name }}'
+ when: ansible_os_family == "FreeBSD"
diff --git a/test/integration/targets/parsing/aliases b/test/integration/targets/parsing/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/test/integration/targets/parsing/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/test/integration/targets/parsing/bad_parsing.yml b/test/integration/targets/parsing/bad_parsing.yml
new file mode 100644
index 00000000..953ec072
--- /dev/null
+++ b/test/integration/targets/parsing/bad_parsing.yml
@@ -0,0 +1,12 @@
+- hosts: testhost
+
+ # the following commands should all parse fine and execute fine
+ # and represent quoting scenarios that should be legit
+
+ gather_facts: False
+
+ roles:
+
+ # this one has a lot of things that should fail, see makefile for operation w/ tags
+
+ - { role: test_bad_parsing }
diff --git a/test/integration/targets/parsing/good_parsing.yml b/test/integration/targets/parsing/good_parsing.yml
new file mode 100644
index 00000000..b68d9112
--- /dev/null
+++ b/test/integration/targets/parsing/good_parsing.yml
@@ -0,0 +1,9 @@
+- hosts: testhost
+
+ # the following commands should all parse fine and execute fine
+ # and represent quoting scenarios that should be legit
+
+ gather_facts: False
+
+ roles:
+ - { role: test_good_parsing, tags: test_good_parsing }
diff --git a/test/integration/targets/parsing/roles/test_bad_parsing/tasks/main.yml b/test/integration/targets/parsing/roles/test_bad_parsing/tasks/main.yml
new file mode 100644
index 00000000..f1b2ec6a
--- /dev/null
+++ b/test/integration/targets/parsing/roles/test_bad_parsing/tasks/main.yml
@@ -0,0 +1,60 @@
+# test code for the ping module
+# (c) 2014, Michael DeHaan <michael@ansible.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# the following tests all raise errors, to use them in a Makefile, we run them with different flags, as
+# otherwise ansible stops at the first one and we want to ensure STOP conditions for each
+
+- set_fact:
+ test_file: "{{ output_dir }}/ansible_test_file" # FIXME, use set tempdir
+ test_input: "owner=test"
+ bad_var: "{{ output_dir }}' owner=test"
+ chdir: "mom chdir=/tmp"
+ tags: common
+
+- file: name={{test_file}} state=touch
+ tags: common
+
+- name: remove touched file
+ file: name={{test_file}} state=absent
+ tags: common
+
+- name: include test that we cannot insert arguments
+ include: scenario1.yml
+ tags: scenario1
+
+- name: include test that we cannot duplicate arguments
+ include: scenario2.yml
+ tags: scenario2
+
+- name: include test that we can't do this for the shell module
+ include: scenario3.yml
+ tags: scenario3
+
+- name: include test that we can't go all Little Bobby Droptables on a quoted var to add more
+ include: scenario4.yml
+ tags: scenario4
+
+- name: test that a missing/malformed jinja2 filter fails
+ debug: msg="{{output_dir|badfiltername}}"
+ tags: scenario5
+ register: filter_fail
+ ignore_errors: yes
+
+- assert:
+ that:
+ - filter_fail is failed
diff --git a/test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario1.yml b/test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario1.yml
new file mode 100644
index 00000000..8a82fb95
--- /dev/null
+++ b/test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario1.yml
@@ -0,0 +1,4 @@
+- name: test that we cannot insert arguments
+ file: path={{ test_file }} {{ test_input }}
+ failed_when: False # ignore the module, just test the parser
+ tags: scenario1
diff --git a/test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario2.yml b/test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario2.yml
new file mode 100644
index 00000000..c3b4b13c
--- /dev/null
+++ b/test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario2.yml
@@ -0,0 +1,4 @@
+- name: test that we cannot duplicate arguments
+ file: path={{ test_file }} owner=test2 {{ test_input }}
+ failed_when: False # ignore the module, just test the parser
+ tags: scenario2
diff --git a/test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario3.yml b/test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario3.yml
new file mode 100644
index 00000000..a228f70e
--- /dev/null
+++ b/test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario3.yml
@@ -0,0 +1,4 @@
+- name: test that we can't do this for the shell module
+ shell: echo hi {{ chdir }}
+ failed_when: False
+ tags: scenario3
diff --git a/test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario4.yml b/test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario4.yml
new file mode 100644
index 00000000..2845adca
--- /dev/null
+++ b/test/integration/targets/parsing/roles/test_bad_parsing/tasks/scenario4.yml
@@ -0,0 +1,4 @@
+- name: test that we can't go all Little Bobby Droptables on a quoted var to add more
+ file: "name={{ bad_var }}"
+ failed_when: False
+ tags: scenario4
diff --git a/test/integration/targets/parsing/roles/test_bad_parsing/vars/main.yml b/test/integration/targets/parsing/roles/test_bad_parsing/vars/main.yml
new file mode 100644
index 00000000..1aaeac77
--- /dev/null
+++ b/test/integration/targets/parsing/roles/test_bad_parsing/vars/main.yml
@@ -0,0 +1,2 @@
+---
+output_dir: .
diff --git a/test/integration/targets/parsing/roles/test_good_parsing/tasks/main.yml b/test/integration/targets/parsing/roles/test_good_parsing/tasks/main.yml
new file mode 100644
index 00000000..0fb1337e
--- /dev/null
+++ b/test/integration/targets/parsing/roles/test_good_parsing/tasks/main.yml
@@ -0,0 +1,204 @@
+# test code for the ping module
+# (c) 2014, Michael DeHaan <michael@ansible.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# various tests of things that should not cause parsing problems
+
+- set_fact:
+ test_input: "a=1 a=2 a=3"
+
+- set_fact:
+ multi_line: |
+ echo old
+ echo mcdonald
+ echo had
+ echo a
+ echo farm
+
+- shell: echo "dog"
+ register: result
+
+- assert:
+ that:
+ result.cmd == 'echo "dog"'
+
+- shell: echo 'dog'
+ register: result
+
+- assert:
+ that:
+ result.cmd == 'echo \'dog\''
+
+- name: a quoted argument is not sent to the shell module as anything but a string parameter
+ shell: echo 'dog' 'executable=/usr/bin/python'
+ register: result
+
+- debug: var=result.cmd
+
+- assert:
+ that:
+ result.cmd == "echo 'dog' 'executable=/usr/bin/python'"
+
+- name: it is valid to pass multiple key=value arguments because the shell doesn't check key=value arguments
+ shell: echo quackquack=here quackquack=everywhere
+ register: result
+
+- assert:
+ that:
+ result.cmd == 'echo quackquack=here quackquack=everywhere'
+
+- name: the same is true with quoting
+ shell: echo "quackquack=here quackquack=everywhere"
+ register: result
+
+- assert:
+ that:
+ result.cmd == 'echo "quackquack=here quackquack=everywhere"'
+
+- name: the same is true with quoting (B)
+ shell: echo "quackquack=here" "quackquack=everywhere"
+ register: result
+
+- name: the same is true with quoting (C)
+ shell: echo "quackquack=here" 'quackquack=everywhere'
+ register: result
+
+- name: the same is true with quoting (D)
+ shell: echo "quackquack=here" 'quackquack=everywhere'
+ register: result
+
+- name: the same is true with quoting (E)
+ shell: echo {{ test_input }}
+ register: result
+
+- assert:
+ that:
+ result.cmd == "echo a=1 a=2 a=3"
+
+- name: more shell duplicates
+ shell: echo foo=bar foo=bar
+ register: result
+
+- assert:
+ that:
+ result.cmd == "echo foo=bar foo=bar"
+
+- name: raw duplicates, noop
+ raw: env true foo=bar foo=bar
+
+- name: multi-line inline shell commands (should use script module but hey) are a thing
+ shell: "{{ multi_line }}"
+ register: result
+
+- debug: var=result
+
+- assert:
+ that:
+ result.stdout_lines == [ 'old', 'mcdonald', 'had', 'a', 'farm' ]
+
+- name: passing same arg to shell command is legit
+ shell: echo foo --arg=a --arg=b
+ failed_when: False # just catch the exit code, parse error is what I care about, but should register and compare result
+ register: result
+
+- assert:
+ that:
+ # command shouldn't end in spaces, amend test once fixed
+ - result.cmd == "echo foo --arg=a --arg=b"
+
+- name: test includes with params
+ include: test_include.yml fact_name=include_params param="{{ test_input }}"
+
+- name: assert the include set the correct fact for the param
+ assert:
+ that:
+ - include_params == test_input
+
+- name: test includes with quoted params
+ include: test_include.yml fact_name=double_quoted_param param="this is a param with double quotes"
+
+- name: assert the include set the correct fact for the double quoted param
+ assert:
+ that:
+ - double_quoted_param == "this is a param with double quotes"
+
+- name: test includes with single quoted params
+ include: test_include.yml fact_name=single_quoted_param param='this is a param with single quotes'
+
+- name: assert the include set the correct fact for the single quoted param
+ assert:
+ that:
+ - single_quoted_param == "this is a param with single quotes"
+
+- name: test includes with quoted params in complex args
+ include: test_include.yml
+ vars:
+ fact_name: complex_param
+ param: "this is a param in a complex arg with double quotes"
+
+- name: assert the include set the correct fact for the params in complex args
+ assert:
+ that:
+ - complex_param == "this is a param in a complex arg with double quotes"
+
+- name: test variable module name
+ action: "{{ variable_module_name }} msg='this should be debugged'"
+ register: result
+
+- name: assert the task with variable module name ran
+ assert:
+ that:
+ - result.msg == "this should be debugged"
+
+- name: test conditional includes
+ include: test_include_conditional.yml
+ when: false
+
+- name: assert the nested include from test_include_conditional was not set
+ assert:
+ that:
+ - nested_include_var is undefined
+
+- name: test omit in complex args
+ set_fact:
+ foo: bar
+ spam: "{{ omit }}"
+ should_not_omit: "prefix{{ omit }}"
+
+- assert:
+ that:
+ - foo == 'bar'
+ - spam is undefined
+ - should_not_omit is defined
+
+- name: test omit in module args
+ set_fact: >
+ yo=whatsup
+ eggs="{{ omit }}"
+ default_omitted="{{ not_exists|default(omit) }}"
+ should_not_omit_1="prefix{{ omit }}"
+ should_not_omit_2="{{ omit }}suffix"
+ should_not_omit_3="__omit_place_holder__afb6b9bc3d20bfeaa00a1b23a5930f89"
+
+- assert:
+ that:
+ - yo == 'whatsup'
+ - eggs is undefined
+ - default_omitted is undefined
+ - should_not_omit_1 is defined
+ - should_not_omit_2 is defined
+ - should_not_omit_3 == "__omit_place_holder__afb6b9bc3d20bfeaa00a1b23a5930f89"
diff --git a/test/integration/targets/parsing/roles/test_good_parsing/tasks/test_include.yml b/test/integration/targets/parsing/roles/test_good_parsing/tasks/test_include.yml
new file mode 100644
index 00000000..4ba50358
--- /dev/null
+++ b/test/integration/targets/parsing/roles/test_good_parsing/tasks/test_include.yml
@@ -0,0 +1 @@
+- set_fact: "{{fact_name}}='{{param}}'"
diff --git a/test/integration/targets/parsing/roles/test_good_parsing/tasks/test_include_conditional.yml b/test/integration/targets/parsing/roles/test_good_parsing/tasks/test_include_conditional.yml
new file mode 100644
index 00000000..070888da
--- /dev/null
+++ b/test/integration/targets/parsing/roles/test_good_parsing/tasks/test_include_conditional.yml
@@ -0,0 +1 @@
+- include: test_include_nested.yml
diff --git a/test/integration/targets/parsing/roles/test_good_parsing/tasks/test_include_nested.yml b/test/integration/targets/parsing/roles/test_good_parsing/tasks/test_include_nested.yml
new file mode 100644
index 00000000..f1f6fcc4
--- /dev/null
+++ b/test/integration/targets/parsing/roles/test_good_parsing/tasks/test_include_nested.yml
@@ -0,0 +1,2 @@
+- name: set the nested include fact
+ set_fact: nested_include_var=1
diff --git a/test/integration/targets/parsing/roles/test_good_parsing/vars/main.yml b/test/integration/targets/parsing/roles/test_good_parsing/vars/main.yml
new file mode 100644
index 00000000..ea7a0b84
--- /dev/null
+++ b/test/integration/targets/parsing/roles/test_good_parsing/vars/main.yml
@@ -0,0 +1,2 @@
+---
+variable_module_name: debug
diff --git a/test/integration/targets/parsing/runme.sh b/test/integration/targets/parsing/runme.sh
new file mode 100755
index 00000000..022ce4cf
--- /dev/null
+++ b/test/integration/targets/parsing/runme.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ansible-playbook bad_parsing.yml -i ../../inventory -vvv "$@" --tags prepare,common,scenario5
+ansible-playbook good_parsing.yml -i ../../inventory -v "$@"
diff --git a/test/integration/targets/path_lookups/aliases b/test/integration/targets/path_lookups/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/test/integration/targets/path_lookups/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/test/integration/targets/path_lookups/play.yml b/test/integration/targets/path_lookups/play.yml
new file mode 100644
index 00000000..7321589b
--- /dev/null
+++ b/test/integration/targets/path_lookups/play.yml
@@ -0,0 +1,49 @@
+- name: setup state
+ hosts: localhost
+ gather_facts: false
+ tasks:
+ - file: path={{playbook_dir}}/files state=directory
+ - file: path={{playbook_dir}}/roles/showfile/files state=directory
+ - copy: dest={{playbook_dir}}/roles/showfile/files/testfile content='in role files'
+ - copy: dest={{playbook_dir}}/roles/showfile/testfile content='in role'
+ - copy: dest={{playbook_dir}}/roles/showfile/tasks/testfile content='in role tasks'
+ - copy: dest={{playbook_dir}}/files/testfile content='in files'
+ - copy: dest={{playbook_dir}}/testfile content='in local'
+
+- include: testplay.yml
+ vars:
+ remove: nothing
+ role_out: in role files
+ play_out: in files
+
+- include: testplay.yml
+ vars:
+ remove: roles/showfile/files/testfile
+ role_out: in role
+ play_out: in files
+
+- include: testplay.yml
+ vars:
+ remove: roles/showfile/testfile
+ role_out: in role tasks
+ play_out: in files
+
+- include: testplay.yml
+ vars:
+ remove: roles/showfile/tasks/testfile
+ role_out: in files
+ play_out: in files
+
+- include: testplay.yml
+ vars:
+ remove: files/testfile
+ role_out: in local
+ play_out: in local
+
+- name: cleanup
+ hosts: localhost
+ gather_facts: false
+ tasks:
+ - file: path={{playbook_dir}}/testfile state=absent
+ - file: path={{playbook_dir}}/files state=absent
+ - file: path={{playbook_dir}}/roles/showfile/files state=absent
diff --git a/test/integration/targets/path_lookups/roles/showfile/tasks/main.yml b/test/integration/targets/path_lookups/roles/showfile/tasks/main.yml
new file mode 100644
index 00000000..1b380579
--- /dev/null
+++ b/test/integration/targets/path_lookups/roles/showfile/tasks/main.yml
@@ -0,0 +1,2 @@
+- name: relative to role
+ set_fact: role_result="{{lookup('file', 'testfile')}}"
diff --git a/test/integration/targets/path_lookups/runme.sh b/test/integration/targets/path_lookups/runme.sh
new file mode 100755
index 00000000..754150b4
--- /dev/null
+++ b/test/integration/targets/path_lookups/runme.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ansible-playbook play.yml -i ../../inventory -v "$@"
diff --git a/test/integration/targets/path_lookups/testplay.yml b/test/integration/targets/path_lookups/testplay.yml
new file mode 100644
index 00000000..8bf45532
--- /dev/null
+++ b/test/integration/targets/path_lookups/testplay.yml
@@ -0,0 +1,20 @@
+- name: test initial state
+ hosts: localhost
+ gather_facts: false
+ pre_tasks:
+ - name: remove {{ remove }}
+ file: path={{ playbook_dir }}/{{ remove }} state=absent
+ roles:
+ - showfile
+ post_tasks:
+ - name: from play
+ set_fact: play_result="{{lookup('file', 'testfile')}}"
+
+ - name: output stage {{ remove }} removed
+ debug: msg="play> {{play_out}}, role> {{role_out}}"
+
+ - name: verify that result match expected
+ assert:
+ that:
+ - 'play_result == play_out'
+ - 'role_result == role_out'
diff --git a/test/integration/targets/path_with_comma_in_inventory/aliases b/test/integration/targets/path_with_comma_in_inventory/aliases
new file mode 100644
index 00000000..70a7b7a9
--- /dev/null
+++ b/test/integration/targets/path_with_comma_in_inventory/aliases
@@ -0,0 +1 @@
+shippable/posix/group5
diff --git a/test/integration/targets/path_with_comma_in_inventory/playbook.yml b/test/integration/targets/path_with_comma_in_inventory/playbook.yml
new file mode 100644
index 00000000..64c83689
--- /dev/null
+++ b/test/integration/targets/path_with_comma_in_inventory/playbook.yml
@@ -0,0 +1,9 @@
+---
+- hosts: all
+ gather_facts: false
+ tasks:
+ - name: Ensure we can see group_vars from path with comma
+ assert:
+ that:
+ - inventory_var_from_path_with_commas is defined
+ - inventory_var_from_path_with_commas == 'here'
diff --git a/test/integration/targets/path_with_comma_in_inventory/runme.sh b/test/integration/targets/path_with_comma_in_inventory/runme.sh
new file mode 100755
index 00000000..833e2ac5
--- /dev/null
+++ b/test/integration/targets/path_with_comma_in_inventory/runme.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+set -ux
+
+ansible-playbook -i this,path,has,commas/hosts playbook.yml -v "$@"
diff --git a/test/integration/targets/path_with_comma_in_inventory/this,path,has,commas/group_vars/all.yml b/test/integration/targets/path_with_comma_in_inventory/this,path,has,commas/group_vars/all.yml
new file mode 100644
index 00000000..df5b84d8
--- /dev/null
+++ b/test/integration/targets/path_with_comma_in_inventory/this,path,has,commas/group_vars/all.yml
@@ -0,0 +1 @@
+inventory_var_from_path_with_commas: 'here'
diff --git a/test/integration/targets/path_with_comma_in_inventory/this,path,has,commas/hosts b/test/integration/targets/path_with_comma_in_inventory/this,path,has,commas/hosts
new file mode 100644
index 00000000..5219b90c
--- /dev/null
+++ b/test/integration/targets/path_with_comma_in_inventory/this,path,has,commas/hosts
@@ -0,0 +1 @@
+localhost ansible_connect=local
diff --git a/test/integration/targets/pause/aliases b/test/integration/targets/pause/aliases
new file mode 100644
index 00000000..810f1ab6
--- /dev/null
+++ b/test/integration/targets/pause/aliases
@@ -0,0 +1,3 @@
+needs/target/setup_pexpect
+shippable/posix/group1
+skip/aix
diff --git a/test/integration/targets/pause/pause-1.yml b/test/integration/targets/pause/pause-1.yml
new file mode 100644
index 00000000..44c9960c
--- /dev/null
+++ b/test/integration/targets/pause/pause-1.yml
@@ -0,0 +1,11 @@
+- name: Test pause module in default state
+ hosts: localhost
+ become: no
+ gather_facts: no
+
+ tasks:
+ - name: EXPECTED FAILURE
+ pause:
+
+ - debug:
+ msg: Task after pause
diff --git a/test/integration/targets/pause/pause-2.yml b/test/integration/targets/pause/pause-2.yml
new file mode 100644
index 00000000..81a7fda5
--- /dev/null
+++ b/test/integration/targets/pause/pause-2.yml
@@ -0,0 +1,12 @@
+- name: Test pause module with custom prompt
+ hosts: localhost
+ become: no
+ gather_facts: no
+
+ tasks:
+ - name: EXPECTED FAILURE
+ pause:
+ prompt: Custom prompt
+
+ - debug:
+ msg: Task after pause
diff --git a/test/integration/targets/pause/pause-3.yml b/test/integration/targets/pause/pause-3.yml
new file mode 100644
index 00000000..8f8c72ed
--- /dev/null
+++ b/test/integration/targets/pause/pause-3.yml
@@ -0,0 +1,12 @@
+- name: Test pause module with pause
+ hosts: localhost
+ become: no
+ gather_facts: no
+
+ tasks:
+ - name: EXPECTED FAILURE
+ pause:
+ seconds: 2
+
+ - debug:
+ msg: Task after pause
diff --git a/test/integration/targets/pause/pause-4.yml b/test/integration/targets/pause/pause-4.yml
new file mode 100644
index 00000000..f16c7d67
--- /dev/null
+++ b/test/integration/targets/pause/pause-4.yml
@@ -0,0 +1,13 @@
+- name: Test pause module with pause and custom prompt
+ hosts: localhost
+ become: no
+ gather_facts: no
+
+ tasks:
+ - name: EXPECTED FAILURE
+ pause:
+ seconds: 2
+ prompt: Waiting for two seconds
+
+ - debug:
+ msg: Task after pause
diff --git a/test/integration/targets/pause/pause-5.yml b/test/integration/targets/pause/pause-5.yml
new file mode 100644
index 00000000..22955cd0
--- /dev/null
+++ b/test/integration/targets/pause/pause-5.yml
@@ -0,0 +1,35 @@
+- name: Test pause module echo output
+ hosts: localhost
+ become: no
+ gather_facts: no
+
+ tasks:
+ - pause:
+ echo: yes
+ prompt: Enter some text
+ register: results
+
+ - name: Ensure that input was captured
+ assert:
+ that:
+ - results.user_input == 'hello there'
+
+ - pause:
+ echo: yes
+ prompt: Enter some text to edit
+ register: result
+
+ - name: Ensure edited input was captured
+ assert:
+ that:
+ - result.user_input == 'hello tommy boy'
+
+ - pause:
+ echo: no
+ prompt: Enter some text
+ register: result
+
+ - name: Ensure secret input was caputered
+ assert:
+ that:
+ - result.user_input == 'supersecretpancakes'
diff --git a/test/integration/targets/pause/runme.sh b/test/integration/targets/pause/runme.sh
new file mode 100755
index 00000000..932f49ec
--- /dev/null
+++ b/test/integration/targets/pause/runme.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ANSIBLE_ROLES_PATH=../ ansible-playbook setup.yml
+
+# Test pause module when no tty and non-interactive. This is to prevent playbooks
+# from hanging in cron and Tower jobs.
+/usr/bin/env bash << EOF
+ansible-playbook test-pause-no-tty.yml 2>&1 | \
+ grep '\[WARNING\]: Not waiting for response to prompt as stdin is not interactive' && {
+ echo 'Successfully skipped pause in no TTY mode' >&2
+ exit 0
+ } || {
+ echo 'Failed to skip pause module' >&2
+ exit 1
+ }
+EOF
+
+# Test redirecting stdout
+# Issue #41717
+ansible-playbook pause-3.yml > /dev/null \
+ && echo "Successfully redirected stdout" \
+ || echo "Failure when attempting to redirect stdout"
+
+# Test pause with seconds and minutes specified
+ansible-playbook test-pause.yml "$@"
+
+# Interactively test pause
+python test-pause.py "$@"
diff --git a/test/integration/targets/pause/setup.yml b/test/integration/targets/pause/setup.yml
new file mode 100644
index 00000000..9f6ab117
--- /dev/null
+++ b/test/integration/targets/pause/setup.yml
@@ -0,0 +1,4 @@
+- hosts: localhost
+ gather_facts: no
+ roles:
+ - setup_pexpect
diff --git a/test/integration/targets/pause/test-pause-background.yml b/test/integration/targets/pause/test-pause-background.yml
new file mode 100644
index 00000000..e480a774
--- /dev/null
+++ b/test/integration/targets/pause/test-pause-background.yml
@@ -0,0 +1,10 @@
+- name: Test pause in a background task
+ hosts: localhost
+ gather_facts: no
+ become: no
+
+ tasks:
+ - pause:
+
+ - pause:
+ seconds: 1
diff --git a/test/integration/targets/pause/test-pause-no-tty.yml b/test/integration/targets/pause/test-pause-no-tty.yml
new file mode 100644
index 00000000..6e0e4027
--- /dev/null
+++ b/test/integration/targets/pause/test-pause-no-tty.yml
@@ -0,0 +1,7 @@
+- name: Test pause
+ hosts: localhost
+ gather_facts: no
+ become: no
+
+ tasks:
+ - pause:
diff --git a/test/integration/targets/pause/test-pause.py b/test/integration/targets/pause/test-pause.py
new file mode 100755
index 00000000..7b37c666
--- /dev/null
+++ b/test/integration/targets/pause/test-pause.py
@@ -0,0 +1,273 @@
+#!/usr/bin/env python
+
+import os
+import pexpect
+import sys
+import termios
+
+from ansible.module_utils.six import PY2
+
+args = sys.argv[1:]
+
+env_vars = {
+ 'ANSIBLE_ROLES_PATH': './roles',
+ 'ANSIBLE_NOCOLOR': 'True',
+ 'ANSIBLE_RETRY_FILES_ENABLED': 'False'
+}
+
+try:
+ backspace = termios.tcgetattr(sys.stdin.fileno())[6][termios.VERASE]
+except Exception:
+ backspace = b'\x7f'
+
+if PY2:
+ log_buffer = sys.stdout
+else:
+ log_buffer = sys.stdout.buffer
+
+os.environ.update(env_vars)
+
+# -- Plain pause -- #
+playbook = 'pause-1.yml'
+
+# Case 1 - Contiune with enter
+pause_test = pexpect.spawn(
+ 'ansible-playbook',
+ args=[playbook] + args,
+ timeout=10,
+ env=os.environ
+)
+
+pause_test.logfile = log_buffer
+pause_test.expect(r'Press enter to continue, Ctrl\+C to interrupt:')
+pause_test.send('\r')
+pause_test.expect('Task after pause')
+pause_test.expect(pexpect.EOF)
+pause_test.close()
+
+
+# Case 2 - Continue with C
+pause_test = pexpect.spawn(
+ 'ansible-playbook',
+ args=[playbook] + args,
+ timeout=10,
+ env=os.environ
+)
+
+pause_test.logfile = log_buffer
+pause_test.expect(r'Press enter to continue, Ctrl\+C to interrupt:')
+pause_test.send('\x03')
+pause_test.expect("Press 'C' to continue the play or 'A' to abort")
+pause_test.send('C')
+pause_test.expect('Task after pause')
+pause_test.expect(pexpect.EOF)
+pause_test.close()
+
+
+# Case 3 - Abort with A
+pause_test = pexpect.spawn(
+ 'ansible-playbook',
+ args=[playbook] + args,
+ timeout=10,
+ env=os.environ
+)
+
+pause_test.logfile = log_buffer
+pause_test.expect(r'Press enter to continue, Ctrl\+C to interrupt:')
+pause_test.send('\x03')
+pause_test.expect("Press 'C' to continue the play or 'A' to abort")
+pause_test.send('A')
+pause_test.expect('user requested abort!')
+pause_test.expect(pexpect.EOF)
+pause_test.close()
+
+# -- Custom Prompt -- #
+playbook = 'pause-2.yml'
+
+# Case 1 - Contiune with enter
+pause_test = pexpect.spawn(
+ 'ansible-playbook',
+ args=[playbook] + args,
+ timeout=10,
+ env=os.environ
+)
+
+pause_test.logfile = log_buffer
+pause_test.expect(r'Custom prompt:')
+pause_test.send('\r')
+pause_test.expect('Task after pause')
+pause_test.expect(pexpect.EOF)
+pause_test.close()
+
+
+# Case 2 - Contiune with C
+pause_test = pexpect.spawn(
+ 'ansible-playbook',
+ args=[playbook] + args,
+ timeout=10,
+ env=os.environ
+)
+
+pause_test.logfile = log_buffer
+pause_test.expect(r'Custom prompt:')
+pause_test.send('\x03')
+pause_test.expect("Press 'C' to continue the play or 'A' to abort")
+pause_test.send('C')
+pause_test.expect('Task after pause')
+pause_test.expect(pexpect.EOF)
+pause_test.close()
+
+
+# Case 3 - Abort with A
+pause_test = pexpect.spawn(
+ 'ansible-playbook',
+ args=[playbook] + args,
+ timeout=10,
+ env=os.environ
+)
+
+pause_test.logfile = log_buffer
+pause_test.expect(r'Custom prompt:')
+pause_test.send('\x03')
+pause_test.expect("Press 'C' to continue the play or 'A' to abort")
+pause_test.send('A')
+pause_test.expect('user requested abort!')
+pause_test.expect(pexpect.EOF)
+pause_test.close()
+
+# -- Pause for N seconds -- #
+
+playbook = 'pause-3.yml'
+
+# Case 1 - Wait for task to continue after timeout
+pause_test = pexpect.spawn(
+ 'ansible-playbook',
+ args=[playbook] + args,
+ timeout=10,
+ env=os.environ
+)
+
+pause_test.logfile = log_buffer
+pause_test.expect(r'Pausing for \d+ seconds')
+pause_test.expect(r"\(ctrl\+C then 'C' = continue early, ctrl\+C then 'A' = abort\)")
+pause_test.expect('Task after pause')
+pause_test.expect(pexpect.EOF)
+pause_test.close()
+
+# Case 2 - Contiune with Ctrl + C, C
+pause_test = pexpect.spawn(
+ 'ansible-playbook',
+ args=[playbook] + args,
+ timeout=10,
+ env=os.environ
+)
+
+pause_test.logfile = log_buffer
+pause_test.expect(r'Pausing for \d+ seconds')
+pause_test.expect(r"\(ctrl\+C then 'C' = continue early, ctrl\+C then 'A' = abort\)")
+pause_test.send('\x03')
+pause_test.send('C')
+pause_test.expect('Task after pause')
+pause_test.expect(pexpect.EOF)
+pause_test.close()
+
+
+# Case 3 - Abort with Ctrl + C, A
+pause_test = pexpect.spawn(
+ 'ansible-playbook',
+ args=[playbook] + args,
+ timeout=10,
+ env=os.environ
+)
+
+pause_test.logfile = log_buffer
+pause_test.expect(r'Pausing for \d+ seconds')
+pause_test.expect(r"\(ctrl\+C then 'C' = continue early, ctrl\+C then 'A' = abort\)")
+pause_test.send('\x03')
+pause_test.send('A')
+pause_test.expect('user requested abort!')
+pause_test.expect(pexpect.EOF)
+pause_test.close()
+
+# -- Pause for N seconds with custom prompt -- #
+
+playbook = 'pause-4.yml'
+
+# Case 1 - Wait for task to continue after timeout
+pause_test = pexpect.spawn(
+ 'ansible-playbook',
+ args=[playbook] + args,
+ timeout=10,
+ env=os.environ
+)
+
+pause_test.logfile = log_buffer
+pause_test.expect(r'Pausing for \d+ seconds')
+pause_test.expect(r"\(ctrl\+C then 'C' = continue early, ctrl\+C then 'A' = abort\)")
+pause_test.expect(r"Waiting for two seconds:")
+pause_test.expect('Task after pause')
+pause_test.expect(pexpect.EOF)
+pause_test.close()
+
+# Case 2 - Contiune with Ctrl + C, C
+pause_test = pexpect.spawn(
+ 'ansible-playbook',
+ args=[playbook] + args,
+ timeout=10,
+ env=os.environ
+)
+
+pause_test.logfile = log_buffer
+pause_test.expect(r'Pausing for \d+ seconds')
+pause_test.expect(r"\(ctrl\+C then 'C' = continue early, ctrl\+C then 'A' = abort\)")
+pause_test.expect(r"Waiting for two seconds:")
+pause_test.send('\x03')
+pause_test.send('C')
+pause_test.expect('Task after pause')
+pause_test.expect(pexpect.EOF)
+pause_test.close()
+
+
+# Case 3 - Abort with Ctrl + C, A
+pause_test = pexpect.spawn(
+ 'ansible-playbook',
+ args=[playbook] + args,
+ timeout=10,
+ env=os.environ
+)
+
+pause_test.logfile = log_buffer
+pause_test.expect(r'Pausing for \d+ seconds')
+pause_test.expect(r"\(ctrl\+C then 'C' = continue early, ctrl\+C then 'A' = abort\)")
+pause_test.expect(r"Waiting for two seconds:")
+pause_test.send('\x03')
+pause_test.send('A')
+pause_test.expect('user requested abort!')
+pause_test.expect(pexpect.EOF)
+pause_test.close()
+
+# -- Enter input and ensure it's captured, echoed, and can be edited -- #
+
+playbook = 'pause-5.yml'
+
+pause_test = pexpect.spawn(
+ 'ansible-playbook',
+ args=[playbook] + args,
+ timeout=10,
+ env=os.environ
+)
+
+pause_test.logfile = log_buffer
+pause_test.expect(r'Enter some text:')
+pause_test.send('hello there')
+pause_test.send('\r')
+pause_test.expect(r'Enter some text to edit:')
+pause_test.send('hello there')
+pause_test.send(backspace * 4)
+pause_test.send('ommy boy')
+pause_test.send('\r')
+pause_test.expect(r'Enter some text \(output is hidden\):')
+pause_test.send('supersecretpancakes')
+pause_test.send('\r')
+pause_test.expect(pexpect.EOF)
+pause_test.close()
diff --git a/test/integration/targets/pause/test-pause.yml b/test/integration/targets/pause/test-pause.yml
new file mode 100644
index 00000000..6fefbaa1
--- /dev/null
+++ b/test/integration/targets/pause/test-pause.yml
@@ -0,0 +1,51 @@
+- name: Test pause
+ hosts: localhost
+ gather_facts: no
+ become: no
+
+ tasks:
+ - name: non-integer for duraction (EXPECTED FAILURE)
+ pause:
+ seconds: hello
+ register: result
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - result is failed
+ - "'non-integer' in result.msg"
+
+ - name: non-boolean for echo (EXPECTED FAILURE)
+ pause:
+ echo: hello
+ register: result
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - result is failed
+ - "'not a valid boolean' in result.msg"
+
+ - pause:
+ seconds: 0.1
+ register: results
+
+ - assert:
+ that:
+ - results.stdout is search('Paused for \d+\.\d+ seconds')
+
+ - pause:
+ seconds: 1
+ register: results
+
+ - assert:
+ that:
+ - results.stdout is search('Paused for \d+\.\d+ seconds')
+
+ - pause:
+ minutes: 1
+ register: results
+
+ - assert:
+ that:
+ - results.stdout is search('Paused for \d+\.\d+ minutes')
diff --git a/test/integration/targets/ping/aliases b/test/integration/targets/ping/aliases
new file mode 100644
index 00000000..765b70da
--- /dev/null
+++ b/test/integration/targets/ping/aliases
@@ -0,0 +1 @@
+shippable/posix/group2
diff --git a/test/integration/targets/ping/tasks/main.yml b/test/integration/targets/ping/tasks/main.yml
new file mode 100644
index 00000000..bc93f98a
--- /dev/null
+++ b/test/integration/targets/ping/tasks/main.yml
@@ -0,0 +1,53 @@
+# test code for the ping module
+# (c) 2014, James Cammarata <jcammarata@ansible.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: ping the test
+ ping:
+ register: result
+
+- name: assert the ping worked
+ assert:
+ that:
+ - result is not failed
+ - result is not changed
+ - result.ping == 'pong'
+
+- name: ping with data
+ ping:
+ data: testing
+ register: result
+
+- name: assert the ping worked with data
+ assert:
+ that:
+ - result is not failed
+ - result is not changed
+ - result.ping == 'testing'
+
+- name: ping with data=crash
+ ping:
+ data: crash
+ register: result
+ ignore_errors: yes
+
+- name: assert the ping failed with data=boom
+ assert:
+ that:
+ - result is failed
+ - result is not changed
+ - "'Exception: boom' in result.module_stdout + result.module_stderr"
diff --git a/test/integration/targets/pip/aliases b/test/integration/targets/pip/aliases
new file mode 100644
index 00000000..8d8cc50e
--- /dev/null
+++ b/test/integration/targets/pip/aliases
@@ -0,0 +1,3 @@
+destructive
+shippable/posix/group5
+skip/aix
diff --git a/test/integration/targets/pip/files/ansible_test_pip_chdir/__init__.py b/test/integration/targets/pip/files/ansible_test_pip_chdir/__init__.py
new file mode 100644
index 00000000..c8a79430
--- /dev/null
+++ b/test/integration/targets/pip/files/ansible_test_pip_chdir/__init__.py
@@ -0,0 +1,2 @@
+def main():
+ print("success")
diff --git a/test/integration/targets/pip/files/setup.py b/test/integration/targets/pip/files/setup.py
new file mode 100755
index 00000000..094064b7
--- /dev/null
+++ b/test/integration/targets/pip/files/setup.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+
+from setuptools import setup, find_packages
+
+setup(
+ name="ansible_test_pip_chdir",
+ version="0",
+ packages=find_packages(),
+ entry_points={
+ 'console_scripts': [
+ 'ansible_test_pip_chdir = ansible_test_pip_chdir:main'
+ ]
+ }
+)
diff --git a/test/integration/targets/pip/meta/main.yml b/test/integration/targets/pip/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/test/integration/targets/pip/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/test/integration/targets/pip/tasks/default_cleanup.yml b/test/integration/targets/pip/tasks/default_cleanup.yml
new file mode 100644
index 00000000..f2265c09
--- /dev/null
+++ b/test/integration/targets/pip/tasks/default_cleanup.yml
@@ -0,0 +1,5 @@
+- name: remove unwanted packages
+ package:
+ name: git
+ state: absent
+ when: git_install.changed
diff --git a/test/integration/targets/pip/tasks/freebsd_cleanup.yml b/test/integration/targets/pip/tasks/freebsd_cleanup.yml
new file mode 100644
index 00000000..fa224d83
--- /dev/null
+++ b/test/integration/targets/pip/tasks/freebsd_cleanup.yml
@@ -0,0 +1,6 @@
+- name: remove auto-installed packages from FreeBSD
+ pkgng:
+ name: git
+ state: absent
+ autoremove: yes
+ when: git_install.changed
diff --git a/test/integration/targets/pip/tasks/main.yml b/test/integration/targets/pip/tasks/main.yml
new file mode 100644
index 00000000..05879c18
--- /dev/null
+++ b/test/integration/targets/pip/tasks/main.yml
@@ -0,0 +1,43 @@
+# Current pip unconditionally uses md5.
+# We can re-enable if pip switches to a different hash or allows us to not check md5.
+
+- name: find virtualenv command
+ command: "which virtualenv virtualenv-{{ ansible_python.version.major }}.{{ ansible_python.version.minor }}"
+ register: command
+ ignore_errors: true
+
+- name: is virtualenv available to python -m
+ command: '{{ ansible_python_interpreter }} -m virtualenv'
+ register: python_m
+ when: not command.stdout_lines
+ failed_when: python_m.rc != 2
+
+- name: remember selected virtualenv command
+ set_fact:
+ virtualenv: "{{ command.stdout_lines[0] if command is successful else ansible_python_interpreter ~ ' -m virtualenv' }}"
+
+- block:
+ - name: install git, needed for repo installs
+ package:
+ name: git
+ state: present
+ when: ansible_distribution != "MacOSX"
+ register: git_install
+
+ - name: ensure wheel is installed
+ pip:
+ name: wheel
+
+ - include_tasks: pip.yml
+ always:
+ - name: platform specific cleanup
+ include_tasks: "{{ cleanup_filename }}"
+ with_first_found:
+ - "{{ ansible_distribution | lower }}_cleanup.yml"
+ - "default_cleanup.yml"
+ loop_control:
+ loop_var: cleanup_filename
+ when: ansible_fips|bool != True
+ module_defaults:
+ pip:
+ virtualenv_command: "{{ virtualenv }}"
diff --git a/test/integration/targets/pip/tasks/pip.yml b/test/integration/targets/pip/tasks/pip.yml
new file mode 100644
index 00000000..6281bbe8
--- /dev/null
+++ b/test/integration/targets/pip/tasks/pip.yml
@@ -0,0 +1,580 @@
+# test code for the pip module
+# (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# FIXME: replace the python test package
+
+# first some tests installed system-wide
+# verify things were not installed to start with
+
+- name: ensure packages are not installed (precondition setup)
+ pip:
+ name: "{{ pip_test_packages }}"
+ state: absent
+
+# verify that a package that is uninstalled being set to absent
+# results in an unchanged state and that the test package is not
+# installed
+
+- name: ensure packages are not installed
+ pip:
+ name: "{{ pip_test_packages }}"
+ state: absent
+ register: uninstall_result
+
+- name: removing unremoved packages should return unchanged
+ assert:
+ that:
+ - "not (uninstall_result is changed)"
+
+- command: "{{ ansible_python.executable }} -c 'import {{ item }}'"
+ register: absent_result
+ failed_when: "absent_result.rc == 0"
+ loop: '{{ pip_test_modules }}'
+
+# now we're going to install the test package knowing it is uninstalled
+# and check that installation was ok
+
+- name: ensure packages are installed
+ pip:
+ name: "{{ pip_test_packages }}"
+ state: present
+ register: install_result
+
+- name: verify we recorded a change
+ assert:
+ that:
+ - "install_result is changed"
+
+- command: "{{ ansible_python.executable }} -c 'import {{ item }}'"
+ loop: '{{ pip_test_modules }}'
+
+# now remove it to test uninstallation of a package we are sure is installed
+
+- name: now uninstall so we can see that a change occurred
+ pip:
+ name: "{{ pip_test_packages }}"
+ state: absent
+ register: absent2
+
+- name: assert a change occurred on uninstallation
+ assert:
+ that:
+ - "absent2 is changed"
+
+# put the test packages back
+
+- name: now put it back in case someone wanted it (like us!)
+ pip:
+ name: "{{ pip_test_packages }}"
+ state: present
+
+# Test virtualenv installations
+
+- name: "make sure the test env doesn't exist"
+ file:
+ state: absent
+ name: "{{ output_dir }}/pipenv"
+
+- name: install a working version of setuptools in the virtualenv
+ pip:
+ name: setuptools
+ virtualenv: "{{ output_dir }}/pipenv"
+ state: present
+ version: 33.1.1
+
+- name: create a requirement file with an vcs url
+ copy:
+ dest: "{{ output_dir }}/pipreq.txt"
+ content: "-e git+https://github.com/dvarrazzo/pyiso8601#egg=iso8601"
+
+- name: install the requirement file in a virtualenv
+ pip:
+ requirements: "{{ output_dir}}/pipreq.txt"
+ virtualenv: "{{ output_dir }}/pipenv"
+ register: req_installed
+
+- name: check that a change occurred
+ assert:
+ that:
+ - "req_installed is changed"
+
+- name: "repeat installation to check status didn't change"
+ pip:
+ requirements: "{{ output_dir}}/pipreq.txt"
+ virtualenv: "{{ output_dir }}/pipenv"
+ register: req_installed
+
+- name: "check that a change didn't occurr this time (bug ansible#1705)"
+ assert:
+ that:
+ - "not (req_installed is changed)"
+
+- name: install the same module from url
+ pip:
+ name: "git+https://github.com/dvarrazzo/pyiso8601#egg=iso8601"
+ virtualenv: "{{ output_dir }}/pipenv"
+ editable: True
+ register: url_installed
+
+- name: "check that a change didn't occurr (bug ansible-modules-core#1645)"
+ assert:
+ that:
+ - "not (url_installed is changed)"
+
+# Test pip package in check mode doesn't always report changed.
+
+# Special case for pip
+- name: check for pip package
+ pip:
+ name: pip
+ virtualenv: "{{ output_dir }}/pipenv"
+ state: present
+
+- name: check for pip package in check_mode
+ pip:
+ name: pip
+ virtualenv: "{{ output_dir }}/pipenv"
+ state: present
+ check_mode: True
+ register: pip_check_mode
+
+- name: make sure pip in check_mode doesn't report changed
+ assert:
+ that:
+ - "not (pip_check_mode is changed)"
+
+# Special case for setuptools
+- name: check for setuptools package
+ pip:
+ name: setuptools
+ virtualenv: "{{ output_dir }}/pipenv"
+ state: present
+
+- name: check for setuptools package in check_mode
+ pip:
+ name: setuptools
+ virtualenv: "{{ output_dir }}/pipenv"
+ state: present
+ check_mode: True
+ register: setuptools_check_mode
+
+- name: make sure setuptools in check_mode doesn't report changed
+ assert:
+ that:
+ - "not (setuptools_check_mode is changed)"
+
+
+# Normal case
+- name: check for q package
+ pip:
+ name: q
+ virtualenv: "{{ output_dir }}/pipenv"
+ state: present
+
+- name: check for q package in check_mode
+ pip:
+ name: q
+ virtualenv: "{{ output_dir }}/pipenv"
+ state: present
+ check_mode: True
+ register: q_check_mode
+
+- name: make sure q in check_mode doesn't report changed
+ assert:
+ that:
+ - "not (q_check_mode is changed)"
+
+# Case with package name that has a different package name case and an
+# underscore instead of a hyphen
+- name: check for Junit-XML package
+ pip:
+ name: Junit-XML
+ virtualenv: "{{ output_dir }}/pipenv"
+ state: present
+
+- name: check for Junit-XML package in check_mode
+ pip:
+ name: Junit-XML
+ virtualenv: "{{ output_dir }}/pipenv"
+ state: present
+ check_mode: True
+ register: diff_case_check_mode
+
+- name: make sure Junit-XML in check_mode doesn't report changed
+ assert:
+ that:
+ - "diff_case_check_mode is not changed"
+
+# ansible#23204
+- name: ensure is a fresh virtualenv
+ file:
+ state: absent
+ name: "{{ output_dir }}/pipenv"
+
+- name: install pip throught pip into fresh virtualenv
+ pip:
+ name: pip
+ virtualenv: "{{ output_dir }}/pipenv"
+ register: pip_install_venv
+
+- name: make sure pip in fresh virtualenv report changed
+ assert:
+ that:
+ - "pip_install_venv is changed"
+
+# https://github.com/ansible/ansible/issues/37912
+# support chdir without virtualenv
+- name: create chdir test directories
+ file:
+ state: directory
+ name: "{{ output_dir }}/{{ item }}"
+ loop:
+ - pip_module
+ - pip_root
+ - pip_module/ansible_test_pip_chdir
+
+- name: copy test module
+ copy:
+ src: "{{ item }}"
+ dest: "{{ output_dir }}/pip_module/{{ item }}"
+ loop:
+ - setup.py
+ - ansible_test_pip_chdir/__init__.py
+
+- name: install test module
+ pip:
+ name: .
+ chdir: "{{ output_dir }}/pip_module"
+ extra_args: --user --upgrade --root {{ output_dir }}/pip_root
+
+- name: register python_site_lib
+ command: '{{ ansible_python.executable }} -c "import site; print(site.USER_SITE)"'
+ register: pip_python_site_lib
+
+- name: register python_user_base
+ command: '{{ ansible_python.executable }} -c "import site; print(site.USER_BASE)"'
+ register: pip_python_user_base
+
+- name: run test module
+ shell: "PYTHONPATH=$(echo {{ output_dir }}/pip_root{{ pip_python_site_lib.stdout }}) {{ output_dir }}/pip_root{{ pip_python_user_base.stdout }}/bin/ansible_test_pip_chdir"
+ register: pip_chdir_command
+
+- name: make sure command ran
+ assert:
+ that:
+ - pip_chdir_command.stdout == "success"
+
+# https://github.com/ansible/ansible/issues/25122
+- name: ensure is a fresh virtualenv
+ file:
+ state: absent
+ name: "{{ output_dir }}/pipenv"
+
+- name: install requirements file into virtual + chdir
+ pip:
+ name: q
+ chdir: "{{ output_dir }}/"
+ virtualenv: "pipenv"
+ state: present
+ register: venv_chdir
+
+- name: make sure fresh virtualenv + chdir report changed
+ assert:
+ that:
+ - "venv_chdir is changed"
+
+# ansible#38785
+- name: allow empty list of packages
+ pip:
+ name: []
+ register: pip_install_empty
+
+- name: ensure empty install is successful
+ assert:
+ that:
+ - "not (pip_install_empty is changed)"
+
+# https://github.com/ansible/ansible/issues/41043
+- name: do not consider an empty string as a version
+ pip:
+ name: q
+ state: present
+ version: ""
+ virtualenv: "{{ output_dir }}/pipenv"
+ register: pip_install_empty_version_string
+
+- name: ensure that task installation did not fail
+ assert:
+ that:
+ - pip_install_empty_version_string is successful
+
+# test version specifiers
+- name: make sure no test_package installed now
+ pip:
+ name: "{{ pip_test_packages }}"
+ state: absent
+
+- name: install package with version specifiers
+ pip:
+ name: "{{ pip_test_package }}"
+ version: "<100,!=1.0,>0.0.0"
+ register: version
+
+- name: assert package installed correctly
+ assert:
+ that: "version.changed"
+
+- name: reinstall package
+ pip:
+ name: "{{ pip_test_package }}"
+ version: "<100,!=1.0,>0.0.0"
+ register: version2
+
+- name: assert no changes ocurred
+ assert:
+ that: "not version2.changed"
+
+- name: test the check_mod
+ pip:
+ name: "{{ pip_test_package }}"
+ version: "<100,!=1.0,>0.0.0"
+ check_mode: yes
+ register: version3
+
+- name: assert no changes
+ assert:
+ that: "not version3.changed"
+
+- name: test the check_mod with unsatisfied version
+ pip:
+ name: "{{ pip_test_package }}"
+ version: ">100.0.0"
+ check_mode: yes
+ register: version4
+
+- name: assert changed
+ assert:
+ that: "version4.changed"
+
+- name: uninstall test packages for next test
+ pip:
+ name: "{{ pip_test_packages }}"
+ state: absent
+
+- name: test invalid combination of arguments
+ pip:
+ name: "{{ pip_test_pkg_ver }}"
+ version: "1.11.1"
+ ignore_errors: yes
+ register: version5
+
+- name: assert the invalid combination should fail
+ assert:
+ that: "version5 is failed"
+
+- name: another invalid combination of arguments
+ pip:
+ name: "{{ pip_test_pkg_ver[0] }}"
+ version: "<100.0.0"
+ ignore_errors: yes
+ register: version6
+
+- name: assert invalid combination should fail
+ assert:
+ that: "version6 is failed"
+
+- name: try to install invalid package
+ pip:
+ name: "{{ pip_test_pkg_ver_unsatisfied }}"
+ ignore_errors: yes
+ register: version7
+
+- name: assert install should fail
+ assert:
+ that: "version7 is failed"
+
+- name: test install multi-packages with version specifiers
+ pip:
+ name: "{{ pip_test_pkg_ver }}"
+ register: version8
+
+- name: assert packages installed correctly
+ assert:
+ that: "version8.changed"
+
+- name: test install multi-packages with check_mode
+ pip:
+ name: "{{ pip_test_pkg_ver }}"
+ check_mode: yes
+ register: version9
+
+- name: assert no change
+ assert:
+ that: "not version9.changed"
+
+- name: test install unsatisfied multi-packages with check_mode
+ pip:
+ name: "{{ pip_test_pkg_ver_unsatisfied }}"
+ check_mode: yes
+ register: version10
+
+- name: assert changes needed
+ assert:
+ that: "version10.changed"
+
+- name: uninstall packages for next test
+ pip:
+ name: "{{ pip_test_packages }}"
+ state: absent
+
+- name: test install multi package provided by one single string
+ pip:
+ name: "{{pip_test_pkg_ver[0]}},{{pip_test_pkg_ver[1]}}"
+ register: version11
+
+- name: assert the install ran correctly
+ assert:
+ that: "version11.changed"
+
+- name: test install multi package provided by one single string with check_mode
+ pip:
+ name: "{{pip_test_pkg_ver[0]}},{{pip_test_pkg_ver[1]}}"
+ check_mode: yes
+ register: version12
+
+- name: assert no changes needed
+ assert:
+ that: "not version12.changed"
+
+- name: test module can parse the combination of multi-packages one line and git url
+ pip:
+ name:
+ - git+https://github.com/dvarrazzo/pyiso8601#egg=iso8601
+ - "{{pip_test_pkg_ver[0]}},{{pip_test_pkg_ver[1]}}"
+
+- name: test the invalid package name
+ pip:
+ name: djan=+-~!@#$go>1.11.1,<1.11.3
+ ignore_errors: yes
+ register: version13
+
+- name: the invalid package should make module failed
+ assert:
+ that: "version13 is failed"
+
+- name: try install package with setuptools extras
+ pip:
+ name:
+ - "{{pip_test_package}}[test]"
+
+- name: clean up
+ pip:
+ name: "{{ pip_test_packages }}"
+ state: absent
+
+# https://github.com/ansible/ansible/issues/47198
+# distribute is a legacy package that will fail on newer Python 3 versions
+- block:
+ - name: make sure the virtualenv does not exist
+ file:
+ state: absent
+ name: "{{ output_dir }}/pipenv"
+
+ - name: install distribute in the virtualenv
+ pip:
+ # using -c for constraints is not supported as long as tests are executed using the centos6 container
+ # since the pip version in the venv is not upgraded and is too old (6.0.8)
+ name:
+ - distribute
+ - setuptools<45 # setuptools 45 and later require python 3.5 or later
+ virtualenv: "{{ output_dir }}/pipenv"
+ state: present
+
+ - name: try to remove distribute
+ pip:
+ state: "absent"
+ name: "distribute"
+ virtualenv: "{{ output_dir }}/pipenv"
+ ignore_errors: yes
+ register: remove_distribute
+
+ - name: inspect the cmd
+ assert:
+ that: "'distribute' in remove_distribute.cmd"
+ when: ansible_python.version.major == 2
+
+### test virtualenv_command begin ###
+
+- name: Test virtualenv command with arguments
+ when: "ansible_system == 'Linux'"
+ block:
+ - name: make sure the virtualenv does not exist
+ file:
+ state: absent
+ name: "{{ output_dir }}/pipenv"
+
+ # ref: https://github.com/ansible/ansible/issues/52275
+ - name: install using virtualenv_command with arguments
+ pip:
+ name: "{{ pip_test_package }}"
+ virtualenv: "{{ output_dir }}/pipenv"
+ virtualenv_command: "virtualenv --verbose"
+ state: present
+ register: version13
+
+ - name: ensure install using virtualenv_command with arguments was successful
+ assert:
+ that:
+ - "version13 is success"
+
+### test virtualenv_command end ###
+
+# https://github.com/ansible/ansible/issues/68592
+# Handle pre-release version numbers in check_mode for already-installed
+# packages.
+# TODO: Limiting to py3 test boxes for now so the example of 'black' installs,
+# we should probably find another package to use with a similar versioning
+# scheme or make a small one and enable this test for py2 as well.
+- block:
+ - name: Install a beta version of a package
+ pip:
+ name: black
+ version: 19.10b0
+ state: present
+
+ - name: Use check_mode and ensure that the package is shown as installed
+ check_mode: true
+ pip:
+ name: black
+ state: present
+ register: pip_prereleases
+
+ - name: Uninstall the beta package if we need to
+ pip:
+ name: black
+ version: 19.10b0
+ state: absent
+ when: pip_prereleases is changed
+
+ - assert:
+ that:
+ - pip_prereleases is successful
+ - pip_prereleases is not changed
+ - '"black==19.10b0" in pip_prereleases.stdout_lines'
+
+ when: ansible_python.version.major == 3
diff --git a/test/integration/targets/pip/vars/main.yml b/test/integration/targets/pip/vars/main.yml
new file mode 100644
index 00000000..2e87abcc
--- /dev/null
+++ b/test/integration/targets/pip/vars/main.yml
@@ -0,0 +1,13 @@
+pip_test_package: sampleprojectpy2
+pip_test_packages:
+ - sampleprojectpy2
+ - jiphy
+pip_test_pkg_ver:
+ - sampleprojectpy2<=100, !=9.0.0,>=0.0.1
+ - jiphy<100 ,!=9,>=0.0.1
+pip_test_pkg_ver_unsatisfied:
+ - sampleprojectpy2>= 999.0.0
+ - jiphy >999.0
+pip_test_modules:
+ - sample
+ - jiphy
diff --git a/test/integration/targets/play_iterator/aliases b/test/integration/targets/play_iterator/aliases
new file mode 100644
index 00000000..3005e4b2
--- /dev/null
+++ b/test/integration/targets/play_iterator/aliases
@@ -0,0 +1 @@
+shippable/posix/group4
diff --git a/test/integration/targets/play_iterator/playbook.yml b/test/integration/targets/play_iterator/playbook.yml
new file mode 100644
index 00000000..76100c60
--- /dev/null
+++ b/test/integration/targets/play_iterator/playbook.yml
@@ -0,0 +1,10 @@
+---
+- hosts: localhost
+ gather_facts: false
+ tasks:
+ - name:
+ debug:
+ msg: foo
+ - name: "task 2"
+ debug:
+ msg: bar
diff --git a/test/integration/targets/play_iterator/runme.sh b/test/integration/targets/play_iterator/runme.sh
new file mode 100755
index 00000000..9f30d9e7
--- /dev/null
+++ b/test/integration/targets/play_iterator/runme.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ansible-playbook playbook.yml --start-at-task 'task 2' "$@"
diff --git a/test/integration/targets/playbook/aliases b/test/integration/targets/playbook/aliases
new file mode 100644
index 00000000..a6dafcf8
--- /dev/null
+++ b/test/integration/targets/playbook/aliases
@@ -0,0 +1 @@
+shippable/posix/group1
diff --git a/test/integration/targets/playbook/runme.sh b/test/integration/targets/playbook/runme.sh
new file mode 100755
index 00000000..25e2e5a6
--- /dev/null
+++ b/test/integration/targets/playbook/runme.sh
@@ -0,0 +1,9 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# run type tests
+ansible-playbook -i ../../inventory types.yml -v "$@"
+
+# test timeout
+ansible-playbook -i ../../inventory timeout.yml -v "$@"
diff --git a/test/integration/targets/playbook/timeout.yml b/test/integration/targets/playbook/timeout.yml
new file mode 100644
index 00000000..442e13ae
--- /dev/null
+++ b/test/integration/targets/playbook/timeout.yml
@@ -0,0 +1,12 @@
+- hosts: localhost
+ gather_facts: false
+ tasks:
+ - shell: sleep 100
+ timeout: 1
+ ignore_errors: true
+ register: time
+
+ - assert:
+ that:
+ - time is failed
+ - '"The shell action failed to execute in the expected time frame" in time["msg"]'
diff --git a/test/integration/targets/playbook/types.yml b/test/integration/targets/playbook/types.yml
new file mode 100644
index 00000000..dd8997b6
--- /dev/null
+++ b/test/integration/targets/playbook/types.yml
@@ -0,0 +1,21 @@
+- hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: try to set 'diff' a boolean
+ debug: msg="not important"
+ diff: yes
+ ignore_errors: True
+ register: good_diff
+
+ - name: try to set 'diff' a boolean to a string (. would make it non boolean)
+ debug: msg="not important"
+ diff: yes.
+ ignore_errors: True
+ register: bad_diff
+
+ - name: Check we did error out
+ assert:
+ that:
+ - good_diff is success
+ - bad_diff is failed
+ - "'is not a valid boolean' in bad_diff['msg']"
diff --git a/test/integration/targets/plugin_config_for_inventory/aliases b/test/integration/targets/plugin_config_for_inventory/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/test/integration/targets/plugin_config_for_inventory/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/test/integration/targets/plugin_config_for_inventory/config_with_parameter.yml b/test/integration/targets/plugin_config_for_inventory/config_with_parameter.yml
new file mode 100644
index 00000000..8ff39884
--- /dev/null
+++ b/test/integration/targets/plugin_config_for_inventory/config_with_parameter.yml
@@ -0,0 +1,3 @@
+plugin: test_inventory
+departments:
+ - paris
diff --git a/test/integration/targets/plugin_config_for_inventory/config_without_parameter.yml b/test/integration/targets/plugin_config_for_inventory/config_without_parameter.yml
new file mode 100644
index 00000000..787cf967
--- /dev/null
+++ b/test/integration/targets/plugin_config_for_inventory/config_without_parameter.yml
@@ -0,0 +1 @@
+plugin: test_inventory
diff --git a/test/integration/targets/plugin_config_for_inventory/runme.sh b/test/integration/targets/plugin_config_for_inventory/runme.sh
new file mode 100755
index 00000000..119a073a
--- /dev/null
+++ b/test/integration/targets/plugin_config_for_inventory/runme.sh
@@ -0,0 +1,16 @@
+#!/usr/bin/env bash
+
+set -o errexit -o nounset -o xtrace
+
+export ANSIBLE_INVENTORY_PLUGINS=./
+export ANSIBLE_INVENTORY_ENABLED=test_inventory
+
+# check default values
+ansible-inventory --list -i ./config_without_parameter.yml --export | \
+ env python -c "import json, sys; inv = json.loads(sys.stdin.read()); \
+ assert set(inv['_meta']['hostvars']['test_host']['departments']) == set(['seine-et-marne', 'haute-garonne'])"
+
+# check values
+ansible-inventory --list -i ./config_with_parameter.yml --export | \
+ env python -c "import json, sys; inv = json.loads(sys.stdin.read()); \
+ assert set(inv['_meta']['hostvars']['test_host']['departments']) == set(['paris'])"
diff --git a/test/integration/targets/plugin_config_for_inventory/test_inventory.py b/test/integration/targets/plugin_config_for_inventory/test_inventory.py
new file mode 100644
index 00000000..63ed0cc2
--- /dev/null
+++ b/test/integration/targets/plugin_config_for_inventory/test_inventory.py
@@ -0,0 +1,52 @@
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: test_inventory
+ plugin_type: inventory
+ authors:
+ - Pierre-Louis Bonicoli (@pilou-)
+ short_description: test inventory
+ description:
+ - test inventory (fetch parameters using config API)
+ options:
+ departments:
+ description: test parameter
+ type: list
+ default:
+ - seine-et-marne
+ - haute-garonne
+ required: False
+'''
+
+EXAMPLES = '''
+# Example command line: ansible-inventory --list -i test_inventory.yml
+
+plugin: test_inventory
+departments:
+ - paris
+'''
+
+from ansible.plugins.inventory import BaseInventoryPlugin
+
+
+class InventoryModule(BaseInventoryPlugin):
+ NAME = 'test_inventory'
+
+ def verify_file(self, path):
+ return True
+
+ def parse(self, inventory, loader, path, cache=True):
+ super(InventoryModule, self).parse(inventory, loader, path)
+ config_data = self._read_config_data(path=path)
+ self._consume_options(config_data)
+
+ departments = self.get_option('departments')
+
+ group = 'test_group'
+ host = 'test_host'
+
+ self.inventory.add_group(group)
+ self.inventory.add_host(group=group, host=host)
+ self.inventory.set_variable(host, 'departments', departments)
diff --git a/test/integration/targets/plugin_filtering/aliases b/test/integration/targets/plugin_filtering/aliases
new file mode 100644
index 00000000..3005e4b2
--- /dev/null
+++ b/test/integration/targets/plugin_filtering/aliases
@@ -0,0 +1 @@
+shippable/posix/group4
diff --git a/test/integration/targets/plugin_filtering/copy.yml b/test/integration/targets/plugin_filtering/copy.yml
new file mode 100644
index 00000000..083386a1
--- /dev/null
+++ b/test/integration/targets/plugin_filtering/copy.yml
@@ -0,0 +1,10 @@
+---
+- hosts: testhost
+ gather_facts: False
+ tasks:
+ - copy:
+ content: 'Testing 1... 2... 3...'
+ dest: ./testing.txt
+ - file:
+ state: absent
+ path: ./testing.txt
diff --git a/test/integration/targets/plugin_filtering/filter_lookup.ini b/test/integration/targets/plugin_filtering/filter_lookup.ini
new file mode 100644
index 00000000..17e58e33
--- /dev/null
+++ b/test/integration/targets/plugin_filtering/filter_lookup.ini
@@ -0,0 +1,4 @@
+[default]
+retry_files_enabled = False
+plugin_filters_cfg = ./filter_lookup.yml
+
diff --git a/test/integration/targets/plugin_filtering/filter_lookup.yml b/test/integration/targets/plugin_filtering/filter_lookup.yml
new file mode 100644
index 00000000..694ebfcb
--- /dev/null
+++ b/test/integration/targets/plugin_filtering/filter_lookup.yml
@@ -0,0 +1,6 @@
+---
+filter_version: 1.0
+module_blacklist:
+ # Specify the name of a lookup plugin here. This should have no effect as
+ # this is only for filtering modules
+ - list
diff --git a/test/integration/targets/plugin_filtering/filter_modules.ini b/test/integration/targets/plugin_filtering/filter_modules.ini
new file mode 100644
index 00000000..ab39bedd
--- /dev/null
+++ b/test/integration/targets/plugin_filtering/filter_modules.ini
@@ -0,0 +1,4 @@
+[default]
+retry_files_enabled = False
+plugin_filters_cfg = ./filter_modules.yml
+
diff --git a/test/integration/targets/plugin_filtering/filter_modules.yml b/test/integration/targets/plugin_filtering/filter_modules.yml
new file mode 100644
index 00000000..6cffa676
--- /dev/null
+++ b/test/integration/targets/plugin_filtering/filter_modules.yml
@@ -0,0 +1,9 @@
+---
+filter_version: 1.0
+module_blacklist:
+ # A pure action plugin
+ - pause
+ # A hybrid action plugin with module
+ - copy
+ # A pure module
+ - tempfile
diff --git a/test/integration/targets/plugin_filtering/filter_ping.ini b/test/integration/targets/plugin_filtering/filter_ping.ini
new file mode 100644
index 00000000..aabbde45
--- /dev/null
+++ b/test/integration/targets/plugin_filtering/filter_ping.ini
@@ -0,0 +1,4 @@
+[default]
+retry_files_enabled = False
+plugin_filters_cfg = ./filter_ping.yml
+
diff --git a/test/integration/targets/plugin_filtering/filter_ping.yml b/test/integration/targets/plugin_filtering/filter_ping.yml
new file mode 100644
index 00000000..08e56f24
--- /dev/null
+++ b/test/integration/targets/plugin_filtering/filter_ping.yml
@@ -0,0 +1,5 @@
+---
+filter_version: 1.0
+module_blacklist:
+ # Ping is special
+ - ping
diff --git a/test/integration/targets/plugin_filtering/filter_stat.ini b/test/integration/targets/plugin_filtering/filter_stat.ini
new file mode 100644
index 00000000..13a103dd
--- /dev/null
+++ b/test/integration/targets/plugin_filtering/filter_stat.ini
@@ -0,0 +1,4 @@
+[default]
+retry_files_enabled = False
+plugin_filters_cfg = ./filter_stat.yml
+
diff --git a/test/integration/targets/plugin_filtering/filter_stat.yml b/test/integration/targets/plugin_filtering/filter_stat.yml
new file mode 100644
index 00000000..c1ce42ef
--- /dev/null
+++ b/test/integration/targets/plugin_filtering/filter_stat.yml
@@ -0,0 +1,5 @@
+---
+filter_version: 1.0
+module_blacklist:
+ # Stat is special
+ - stat
diff --git a/test/integration/targets/plugin_filtering/lookup.yml b/test/integration/targets/plugin_filtering/lookup.yml
new file mode 100644
index 00000000..de6d1b48
--- /dev/null
+++ b/test/integration/targets/plugin_filtering/lookup.yml
@@ -0,0 +1,14 @@
+---
+- hosts: testhost
+ gather_facts: False
+ vars:
+ data:
+ - one
+ - two
+ tasks:
+ - debug:
+ msg: '{{ lookup("list", data) }}'
+
+ - debug:
+ msg: '{{ item }}'
+ with_list: '{{ data }}'
diff --git a/test/integration/targets/plugin_filtering/no_blacklist_module.ini b/test/integration/targets/plugin_filtering/no_blacklist_module.ini
new file mode 100644
index 00000000..65b51d67
--- /dev/null
+++ b/test/integration/targets/plugin_filtering/no_blacklist_module.ini
@@ -0,0 +1,3 @@
+[defaults]
+retry_files_enabled = False
+plugin_filters_cfg = ./no_blacklist_module.yml
diff --git a/test/integration/targets/plugin_filtering/no_blacklist_module.yml b/test/integration/targets/plugin_filtering/no_blacklist_module.yml
new file mode 100644
index 00000000..52a55dff
--- /dev/null
+++ b/test/integration/targets/plugin_filtering/no_blacklist_module.yml
@@ -0,0 +1,3 @@
+---
+filter_version: 1.0
+module_blacklist:
diff --git a/test/integration/targets/plugin_filtering/no_filters.ini b/test/integration/targets/plugin_filtering/no_filters.ini
new file mode 100644
index 00000000..4b42c8c4
--- /dev/null
+++ b/test/integration/targets/plugin_filtering/no_filters.ini
@@ -0,0 +1,4 @@
+[default]
+retry_files_enabled = False
+plugin_filters_cfg = ./empty.yml
+
diff --git a/test/integration/targets/plugin_filtering/pause.yml b/test/integration/targets/plugin_filtering/pause.yml
new file mode 100644
index 00000000..e2c1ef9c
--- /dev/null
+++ b/test/integration/targets/plugin_filtering/pause.yml
@@ -0,0 +1,6 @@
+---
+- hosts: testhost
+ gather_facts: False
+ tasks:
+ - pause:
+ seconds: 1
diff --git a/test/integration/targets/plugin_filtering/ping.yml b/test/integration/targets/plugin_filtering/ping.yml
new file mode 100644
index 00000000..9e2214b0
--- /dev/null
+++ b/test/integration/targets/plugin_filtering/ping.yml
@@ -0,0 +1,6 @@
+---
+- hosts: testhost
+ gather_facts: False
+ tasks:
+ - ping:
+ data: 'Testing 1... 2... 3...'
diff --git a/test/integration/targets/plugin_filtering/runme.sh b/test/integration/targets/plugin_filtering/runme.sh
new file mode 100755
index 00000000..aa0e2b0c
--- /dev/null
+++ b/test/integration/targets/plugin_filtering/runme.sh
@@ -0,0 +1,137 @@
+#!/usr/bin/env bash
+
+set -ux
+
+#
+# Check that with no filters set, all of these modules run as expected
+#
+ANSIBLE_CONFIG=no_filters.ini ansible-playbook copy.yml -i ../../inventory -vvv "$@"
+if test $? != 0 ; then
+ echo "### Failed to run copy with no filters applied"
+ exit 1
+fi
+ANSIBLE_CONFIG=no_filters.ini ansible-playbook pause.yml -i ../../inventory -vvv "$@"
+if test $? != 0 ; then
+ echo "### Failed to run pause with no filters applied"
+ exit 1
+fi
+ANSIBLE_CONFIG=no_filters.ini ansible-playbook tempfile.yml -i ../../inventory -vvv "$@"
+if test $? != 0 ; then
+ echo "### Failed to run tempfile with no filters applied"
+ exit 1
+fi
+
+#
+# Check that if no modules are blacklisted then Ansible should not through traceback
+#
+ANSIBLE_CONFIG=no_blacklist_module.ini ansible-playbook tempfile.yml -i ../../inventory -vvv "$@"
+if test $? != 0 ; then
+ echo "### Failed to run tempfile with no modules blacklisted"
+ exit 1
+fi
+
+#
+# Check that with these modules filtered out, all of these modules fail to be found
+#
+ANSIBLE_CONFIG=filter_modules.ini ansible-playbook copy.yml -i ../../inventory -v "$@"
+if test $? = 0 ; then
+ echo "### Failed to prevent copy from running"
+ exit 1
+else
+ echo "### Copy was prevented from running as expected"
+fi
+ANSIBLE_CONFIG=filter_modules.ini ansible-playbook pause.yml -i ../../inventory -v "$@"
+if test $? = 0 ; then
+ echo "### Failed to prevent pause from running"
+ exit 1
+else
+ echo "### pause was prevented from running as expected"
+fi
+ANSIBLE_CONFIG=filter_modules.ini ansible-playbook tempfile.yml -i ../../inventory -v "$@"
+if test $? = 0 ; then
+ echo "### Failed to prevent tempfile from running"
+ exit 1
+else
+ echo "### tempfile was prevented from running as expected"
+fi
+
+#
+# ping is a special module as we test for its existence. Check it specially
+#
+
+# Check that ping runs with no filter
+ANSIBLE_CONFIG=no_filters.ini ansible-playbook ping.yml -i ../../inventory -vvv "$@"
+if test $? != 0 ; then
+ echo "### Failed to run ping with no filters applied"
+ exit 1
+fi
+
+# Check that other modules run with ping filtered
+ANSIBLE_CONFIG=filter_ping.ini ansible-playbook copy.yml -i ../../inventory -vvv "$@"
+if test $? != 0 ; then
+ echo "### Failed to run copy when a filter was applied to ping"
+ exit 1
+fi
+# Check that ping fails to run when it is filtered
+ANSIBLE_CONFIG=filter_ping.ini ansible-playbook ping.yml -i ../../inventory -v "$@"
+if test $? = 0 ; then
+ echo "### Failed to prevent ping from running"
+ exit 1
+else
+ echo "### Ping was prevented from running as expected"
+fi
+
+#
+# Check that specifying a lookup plugin in the filter has no effect
+#
+
+ANSIBLE_CONFIG=filter_lookup.ini ansible-playbook lookup.yml -i ../../inventory -vvv "$@"
+if test $? != 0 ; then
+ echo "### Failed to use a lookup plugin when it is incorrectly specified in the *module* blacklist"
+ exit 1
+fi
+
+#
+# stat is a special module as we use it to run nearly every other module. Check it specially
+#
+
+# Check that stat runs with no filter
+ANSIBLE_CONFIG=no_filters.ini ansible-playbook stat.yml -i ../../inventory -vvv "$@"
+if test $? != 0 ; then
+ echo "### Failed to run stat with no filters applied"
+ exit 1
+fi
+
+# Check that running another module when stat is filtered gives us our custom error message
+ANSIBLE_CONFIG=filter_stat.ini
+export ANSIBLE_CONFIG
+CAPTURE=$(ansible-playbook copy.yml -i ../../inventory -vvv "$@" 2>&1)
+if test $? = 0 ; then
+ echo "### Copy ran even though stat is in the module blacklist"
+ exit 1
+else
+ echo "$CAPTURE" | grep 'The stat module was specified in the module blacklist file,.*, but Ansible will not function without the stat module. Please remove stat from the blacklist.'
+ if test $? != 0 ; then
+ echo "### Stat did not give us our custom error message"
+ exit 1
+ fi
+ echo "### Filtering stat failed with our custom error message as expected"
+fi
+unset ANSIBLE_CONFIG
+
+# Check that running stat when stat is filtered gives our custom error message
+ANSIBLE_CONFIG=filter_stat.ini
+export ANSIBLE_CONFIG
+CAPTURE=$(ansible-playbook stat.yml -i ../../inventory -vvv "$@" 2>&1)
+if test $? = 0 ; then
+ echo "### Stat ran even though it is in the module blacklist"
+ exit 1
+else
+ echo "$CAPTURE" | grep 'The stat module was specified in the module blacklist file,.*, but Ansible will not function without the stat module. Please remove stat from the blacklist.'
+ if test $? != 0 ; then
+ echo "### Stat did not give us our custom error message"
+ exit 1
+ fi
+ echo "### Filtering stat failed with our custom error message as expected"
+fi
+unset ANSIBLE_CONFIG
diff --git a/test/integration/targets/plugin_filtering/stat.yml b/test/integration/targets/plugin_filtering/stat.yml
new file mode 100644
index 00000000..4f24baae
--- /dev/null
+++ b/test/integration/targets/plugin_filtering/stat.yml
@@ -0,0 +1,6 @@
+---
+- hosts: testhost
+ gather_facts: False
+ tasks:
+ - stat:
+ path: '/'
diff --git a/test/integration/targets/plugin_filtering/tempfile.yml b/test/integration/targets/plugin_filtering/tempfile.yml
new file mode 100644
index 00000000..06463547
--- /dev/null
+++ b/test/integration/targets/plugin_filtering/tempfile.yml
@@ -0,0 +1,9 @@
+---
+- hosts: testhost
+ gather_facts: False
+ tasks:
+ - tempfile:
+ register: temp_result
+ - file:
+ state: absent
+ path: '{{ temp_result["path"] }}'
diff --git a/test/integration/targets/plugin_loader/aliases b/test/integration/targets/plugin_loader/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/test/integration/targets/plugin_loader/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/test/integration/targets/plugin_loader/normal/filters.yml b/test/integration/targets/plugin_loader/normal/filters.yml
new file mode 100644
index 00000000..f9069be1
--- /dev/null
+++ b/test/integration/targets/plugin_loader/normal/filters.yml
@@ -0,0 +1,13 @@
+- hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: ensure filters work as shipped from core
+ assert:
+ that:
+ - a|flatten == [1, 2, 3, 4, 5]
+ - a|ternary('yes', 'no') == 'yes'
+ vars:
+ a:
+ - 1
+ - 2
+ - [3, 4, 5]
diff --git a/test/integration/targets/plugin_loader/normal/library/_symlink.py b/test/integration/targets/plugin_loader/normal/library/_symlink.py
new file mode 120000
index 00000000..c4142e74
--- /dev/null
+++ b/test/integration/targets/plugin_loader/normal/library/_symlink.py
@@ -0,0 +1 @@
+_underscore.py \ No newline at end of file
diff --git a/test/integration/targets/plugin_loader/normal/library/_underscore.py b/test/integration/targets/plugin_loader/normal/library/_underscore.py
new file mode 100644
index 00000000..7a416a64
--- /dev/null
+++ b/test/integration/targets/plugin_loader/normal/library/_underscore.py
@@ -0,0 +1,13 @@
+#!/usr/bin/python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+
+def main():
+ print(json.dumps(dict(changed=False, source='legacy_library_dir')))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/plugin_loader/normal/underscore.yml b/test/integration/targets/plugin_loader/normal/underscore.yml
new file mode 100644
index 00000000..fb5bbad7
--- /dev/null
+++ b/test/integration/targets/plugin_loader/normal/underscore.yml
@@ -0,0 +1,15 @@
+- hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Load a deprecated module
+ underscore:
+ register: res
+
+ - name: Load a deprecated module that is a symlink
+ symlink:
+ register: sym
+
+ - assert:
+ that:
+ - res.source == 'legacy_library_dir'
+ - sym.source == 'legacy_library_dir'
diff --git a/test/integration/targets/plugin_loader/override/filter_plugins/core.py b/test/integration/targets/plugin_loader/override/filter_plugins/core.py
new file mode 100644
index 00000000..f283dc39
--- /dev/null
+++ b/test/integration/targets/plugin_loader/override/filter_plugins/core.py
@@ -0,0 +1,18 @@
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def do_flag(myval):
+ return 'flagged'
+
+
+class FilterModule(object):
+ ''' Ansible core jinja2 filters '''
+
+ def filters(self):
+ return {
+ # jinja2 overrides
+ 'flag': do_flag,
+ 'flatten': do_flag,
+ }
diff --git a/test/integration/targets/plugin_loader/override/filters.yml b/test/integration/targets/plugin_loader/override/filters.yml
new file mode 100644
index 00000000..e51ab4e9
--- /dev/null
+++ b/test/integration/targets/plugin_loader/override/filters.yml
@@ -0,0 +1,15 @@
+- hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: ensure local 'flag' filter works, 'flatten' is overriden and 'ternary' is still from core
+ assert:
+ that:
+ - a|flag == 'flagged'
+ - a|flatten != [1, 2, 3, 4, 5]
+ - a|flatten == "flagged"
+ - a|ternary('yes', 'no') == 'yes'
+ vars:
+ a:
+ - 1
+ - 2
+ - [3, 4, 5]
diff --git a/test/integration/targets/plugin_loader/runme.sh b/test/integration/targets/plugin_loader/runme.sh
new file mode 100755
index 00000000..2a1bdeda
--- /dev/null
+++ b/test/integration/targets/plugin_loader/runme.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+
+set -ux
+
+
+# check normal execution
+for myplay in normal/*.yml
+do
+ ansible-playbook "${myplay}" -i ../../inventory -vvv "$@"
+ if test $? != 0 ; then
+ echo "### Failed to run ${myplay} normally"
+ exit 1
+ fi
+done
+
+# check overrides
+for myplay in override/*.yml
+do
+ ansible-playbook "${myplay}" -i ../../inventory -vvv "$@"
+ if test $? != 0 ; then
+ echo "### Failed to run ${myplay} override"
+ exit 1
+ fi
+done
diff --git a/test/integration/targets/plugin_namespace/aliases b/test/integration/targets/plugin_namespace/aliases
new file mode 100644
index 00000000..a6dafcf8
--- /dev/null
+++ b/test/integration/targets/plugin_namespace/aliases
@@ -0,0 +1 @@
+shippable/posix/group1
diff --git a/test/integration/targets/plugin_namespace/filter_plugins/test_filter.py b/test/integration/targets/plugin_namespace/filter_plugins/test_filter.py
new file mode 100644
index 00000000..dca094be
--- /dev/null
+++ b/test/integration/targets/plugin_namespace/filter_plugins/test_filter.py
@@ -0,0 +1,15 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def filter_name(a):
+ return __name__
+
+
+class FilterModule(object):
+ def filters(self):
+ filters = {
+ 'filter_name': filter_name,
+ }
+
+ return filters
diff --git a/test/integration/targets/plugin_namespace/lookup_plugins/lookup_name.py b/test/integration/targets/plugin_namespace/lookup_plugins/lookup_name.py
new file mode 100644
index 00000000..d0af703b
--- /dev/null
+++ b/test/integration/targets/plugin_namespace/lookup_plugins/lookup_name.py
@@ -0,0 +1,9 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables, **kwargs):
+ return [__name__]
diff --git a/test/integration/targets/plugin_namespace/tasks/main.yml b/test/integration/targets/plugin_namespace/tasks/main.yml
new file mode 100644
index 00000000..19bdd3a0
--- /dev/null
+++ b/test/integration/targets/plugin_namespace/tasks/main.yml
@@ -0,0 +1,11 @@
+- set_fact:
+ filter_name: "{{ 1 | filter_name }}"
+ lookup_name: "{{ lookup('lookup_name') }}"
+ test_name_ok: "{{ 1 is test_name_ok }}"
+
+- assert:
+ that:
+ # filter names are prefixed with a unique hash value to prevent shadowing of other plugins
+ - filter_name | regex_search('^ansible\.plugins\.filter\.[0-9]+_test_filter$')
+ - lookup_name == 'ansible.plugins.lookup.lookup_name'
+ - test_name_ok
diff --git a/test/integration/targets/plugin_namespace/test_plugins/test_test.py b/test/integration/targets/plugin_namespace/test_plugins/test_test.py
new file mode 100644
index 00000000..2a9d6ee0
--- /dev/null
+++ b/test/integration/targets/plugin_namespace/test_plugins/test_test.py
@@ -0,0 +1,16 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+
+def test_name_ok(value):
+ # test names are prefixed with a unique hash value to prevent shadowing of other plugins
+ return bool(re.match(r'^ansible\.plugins\.test\.[0-9]+_test_test$', __name__))
+
+
+class TestModule:
+ def tests(self):
+ return {
+ 'test_name_ok': test_name_ok,
+ }
diff --git a/test/integration/targets/prepare_http_tests/defaults/main.yml b/test/integration/targets/prepare_http_tests/defaults/main.yml
new file mode 100644
index 00000000..a1e5b8d1
--- /dev/null
+++ b/test/integration/targets/prepare_http_tests/defaults/main.yml
@@ -0,0 +1,4 @@
+badssl_host: wrong.host.badssl.com
+httpbin_host: httpbin.org
+sni_host: ci-files.testing.ansible.com
+badssl_host_substring: wrong.host.badssl.com
diff --git a/test/integration/targets/prepare_http_tests/meta/main.yml b/test/integration/targets/prepare_http_tests/meta/main.yml
new file mode 100644
index 00000000..1810d4be
--- /dev/null
+++ b/test/integration/targets/prepare_http_tests/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/test/integration/targets/prepare_http_tests/tasks/default.yml b/test/integration/targets/prepare_http_tests/tasks/default.yml
new file mode 100644
index 00000000..bff90350
--- /dev/null
+++ b/test/integration/targets/prepare_http_tests/tasks/default.yml
@@ -0,0 +1,64 @@
+- name: RedHat - Enable the dynamic CA configuration feature
+ command: update-ca-trust force-enable
+ when: ansible_os_family == 'RedHat'
+
+- name: RedHat - Retrieve test cacert
+ get_url:
+ url: "http://ansible.http.tests/cacert.pem"
+ dest: "/etc/pki/ca-trust/source/anchors/ansible.pem"
+ when: ansible_os_family == 'RedHat'
+
+- name: Get client cert/key
+ get_url:
+ url: "http://ansible.http.tests/{{ item }}"
+ dest: "{{ remote_tmp_dir }}/{{ item }}"
+ with_items:
+ - client.pem
+ - client.key
+
+- name: Suse - Retrieve test cacert
+ get_url:
+ url: "http://ansible.http.tests/cacert.pem"
+ dest: "/etc/pki/trust/anchors/ansible.pem"
+ when: ansible_os_family == 'Suse'
+
+- name: Debian - Retrieve test cacert
+ get_url:
+ url: "http://ansible.http.tests/cacert.pem"
+ dest: "/usr/local/share/ca-certificates/ansible.crt"
+ when: ansible_os_family == 'Debian'
+
+- name: Redhat - Update ca trust
+ command: update-ca-trust extract
+ when: ansible_os_family == 'RedHat'
+
+- name: Debian/Suse - Update ca certificates
+ command: update-ca-certificates
+ when: ansible_os_family == 'Debian' or ansible_os_family == 'Suse'
+
+- name: FreeBSD - Retrieve test cacert
+ get_url:
+ url: "http://ansible.http.tests/cacert.pem"
+ dest: "/tmp/ansible.pem"
+ when: ansible_os_family == 'FreeBSD'
+
+- name: FreeBSD - Add cacert to root certificate store
+ blockinfile:
+ path: "/etc/ssl/cert.pem"
+ block: "{{ lookup('file', '/tmp/ansible.pem') }}"
+ when: ansible_os_family == 'FreeBSD'
+
+- name: MacOS - Retrieve test cacert
+ when: ansible_os_family == 'Darwin'
+ block:
+ - uri:
+ url: "http://ansible.http.tests/cacert.pem"
+ return_content: true
+ register: cacert_pem
+
+ - raw: '{{ ansible_python_interpreter }} -c "import ssl; print(ssl.get_default_verify_paths().cafile)"'
+ register: macos_cafile
+
+ - blockinfile:
+ path: "{{ macos_cafile.stdout_lines|first }}"
+ block: "{{ cacert_pem.content }}"
diff --git a/test/integration/targets/prepare_http_tests/tasks/main.yml b/test/integration/targets/prepare_http_tests/tasks/main.yml
new file mode 100644
index 00000000..86e350c2
--- /dev/null
+++ b/test/integration/targets/prepare_http_tests/tasks/main.yml
@@ -0,0 +1,24 @@
+# The docker --link functionality gives us an ENV var we can key off of to see if we have access to
+# the httptester container
+- set_fact:
+ has_httptester: "{{ lookup('env', 'HTTPTESTER') != '' }}"
+
+- name: make sure we have the ansible_os_family and ansible_distribution_version facts
+ setup:
+ gather_subset: distribution
+ when: ansible_facts == {}
+
+# If we are running with access to a httptester container, grab it's cacert and install it
+- block:
+ # Override hostname defaults with httptester linked names
+ - include_vars: httptester.yml
+
+ - include_tasks: "{{ lookup('first_found', files)}}"
+ vars:
+ files:
+ - "{{ ansible_os_family | lower }}.yml"
+ - "default.yml"
+ when:
+ - has_httptester|bool
+ # skip the setup if running on Windows Server 2008 as httptester is not available
+ - ansible_os_family != 'Windows' or (ansible_os_family == 'Windows' and not ansible_distribution_version.startswith("6.0."))
diff --git a/test/integration/targets/prepare_http_tests/tasks/windows.yml b/test/integration/targets/prepare_http_tests/tasks/windows.yml
new file mode 100644
index 00000000..da8b0eb3
--- /dev/null
+++ b/test/integration/targets/prepare_http_tests/tasks/windows.yml
@@ -0,0 +1,33 @@
+# Server 2008 R2 uses a 3rd party program to foward the ports and it may
+# not be ready straight away, we give it at least 5 minutes before
+# conceding defeat
+- name: Windows - make sure the port forwarder is active
+ win_wait_for:
+ host: ansible.http.tests
+ port: 80
+ state: started
+ timeout: 300
+
+- name: Windows - Get client cert/key
+ win_get_url:
+ url: http://ansible.http.tests/{{ item }}
+ dest: '{{ remote_tmp_dir }}\{{ item }}'
+ register: win_download
+ # Server 2008 R2 is slightly slower, we attempt 5 retries
+ retries: 5
+ until: win_download is successful
+ with_items:
+ - client.pem
+ - client.key
+
+- name: Windows - Retrieve test cacert
+ win_get_url:
+ url: http://ansible.http.tests/cacert.pem
+ dest: '{{ remote_tmp_dir }}\cacert.pem'
+
+- name: Windows - Update ca trust
+ win_certificate_store:
+ path: '{{ remote_tmp_dir }}\cacert.pem'
+ state: present
+ store_location: LocalMachine
+ store_name: Root
diff --git a/test/integration/targets/prepare_http_tests/vars/httptester.yml b/test/integration/targets/prepare_http_tests/vars/httptester.yml
new file mode 100644
index 00000000..0e23ae93
--- /dev/null
+++ b/test/integration/targets/prepare_http_tests/vars/httptester.yml
@@ -0,0 +1,5 @@
+# these are fake hostnames provided by docker link for the httptester container
+badssl_host: fail.ansible.http.tests
+httpbin_host: ansible.http.tests
+sni_host: sni1.ansible.http.tests
+badssl_host_substring: HTTP Client Testing Service
diff --git a/test/integration/targets/prepare_tests/tasks/main.yml b/test/integration/targets/prepare_tests/tasks/main.yml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/prepare_tests/tasks/main.yml
diff --git a/test/integration/targets/pull/aliases b/test/integration/targets/pull/aliases
new file mode 100644
index 00000000..757c9966
--- /dev/null
+++ b/test/integration/targets/pull/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group3
+skip/aix
diff --git a/test/integration/targets/pull/cleanup.yml b/test/integration/targets/pull/cleanup.yml
new file mode 100644
index 00000000..68686964
--- /dev/null
+++ b/test/integration/targets/pull/cleanup.yml
@@ -0,0 +1,16 @@
+- hosts: localhost
+ vars:
+ git_install: '{{ lookup("file", lookup("env", "OUTPUT_DIR") + "/git_install.json") | from_json }}'
+ tasks:
+ - name: remove unwanted packages
+ package:
+ name: git
+ state: absent
+ when: git_install.changed
+
+ - name: remove auto-installed packages from FreeBSD
+ package:
+ name: git
+ state: absent
+ autoremove: yes
+ when: git_install.changed and ansible_distribution == "FreeBSD"
diff --git a/test/integration/targets/pull/pull-integration-test/ansible.cfg b/test/integration/targets/pull/pull-integration-test/ansible.cfg
new file mode 100644
index 00000000..f8fc6cdb
--- /dev/null
+++ b/test/integration/targets/pull/pull-integration-test/ansible.cfg
@@ -0,0 +1,2 @@
+[defaults]
+inventory = inventory
diff --git a/test/integration/targets/pull/pull-integration-test/inventory b/test/integration/targets/pull/pull-integration-test/inventory
new file mode 100644
index 00000000..72644cef
--- /dev/null
+++ b/test/integration/targets/pull/pull-integration-test/inventory
@@ -0,0 +1,2 @@
+testhost1.example.com
+localhost
diff --git a/test/integration/targets/pull/pull-integration-test/local.yml b/test/integration/targets/pull/pull-integration-test/local.yml
new file mode 100644
index 00000000..d358ee86
--- /dev/null
+++ b/test/integration/targets/pull/pull-integration-test/local.yml
@@ -0,0 +1,20 @@
+- name: test playbook for ansible-pull
+ hosts: all
+ gather_facts: False
+ tasks:
+ - name: debug output
+ debug: msg="test task"
+ - name: check for correct inventory
+ debug: msg="testing for inventory content"
+ failed_when: "'testhost1.example.com' not in groups['all']"
+ - name: check for correct limit
+ debug: msg="testing for limit"
+ failed_when: "'testhost1.example.com' == inventory_hostname"
+ - name: final task, has to be reached for the test to succeed
+ debug: msg="MAGICKEYWORD"
+
+ - name: check that extra vars are correclty passed
+ assert:
+ that:
+ - docker_registries_login is defined
+ tags: ['never', 'test_ev']
diff --git a/test/integration/targets/pull/runme.sh b/test/integration/targets/pull/runme.sh
new file mode 100755
index 00000000..dcadc495
--- /dev/null
+++ b/test/integration/targets/pull/runme.sh
@@ -0,0 +1,69 @@
+#!/usr/bin/env bash
+
+set -eux
+set -o pipefail
+
+# http://unix.stackexchange.com/questions/30091/fix-or-alternative-for-mktemp-in-os-x
+temp_dir=$(shell mktemp -d 2>/dev/null || mktemp -d -t 'ansible-testing-XXXXXXXXXX')
+trap 'rm -rf "${temp_dir}"' EXIT
+
+repo_dir="${temp_dir}/repo"
+pull_dir="${temp_dir}/pull"
+temp_log="${temp_dir}/pull.log"
+
+ansible-playbook setup.yml -i ../../inventory
+
+cleanup="$(pwd)/cleanup.yml"
+
+trap 'ansible-playbook "${cleanup}" -i ../../inventory' EXIT
+
+cp -av "pull-integration-test" "${repo_dir}"
+cd "${repo_dir}"
+(
+ git init
+ git config user.email "ansible@ansible.com"
+ git config user.name "Ansible Test Runner"
+ git add .
+ git commit -m "Initial commit."
+)
+
+function pass_tests {
+ # test for https://github.com/ansible/ansible/issues/13688
+ if ! grep MAGICKEYWORD "${temp_log}"; then
+ cat "${temp_log}"
+ echo "Missing MAGICKEYWORD in output."
+ exit 1
+ fi
+
+ # test for https://github.com/ansible/ansible/issues/13681
+ if grep -E '127\.0\.0\.1.*ok' "${temp_log}"; then
+ cat "${temp_log}"
+ echo "Found host 127.0.0.1 in output. Only localhost should be present."
+ exit 1
+ fi
+ # make sure one host was run
+ if ! grep -E 'localhost.*ok' "${temp_log}"; then
+ cat "${temp_log}"
+ echo "Did not find host localhost in output."
+ exit 1
+ fi
+}
+
+export ANSIBLE_INVENTORY
+export ANSIBLE_HOST_PATTERN_MISMATCH
+
+unset ANSIBLE_INVENTORY
+unset ANSIBLE_HOST_PATTERN_MISMATCH
+
+ANSIBLE_CONFIG='' ansible-pull -d "${pull_dir}" -U "${repo_dir}" "$@" | tee "${temp_log}"
+
+pass_tests
+
+# ensure complex extra vars work
+PASSWORD='test'
+USER=${USER:-'broken_docker'}
+JSON_EXTRA_ARGS='{"docker_registries_login": [{ "docker_password": "'"${PASSWORD}"'", "docker_username": "'"${USER}"'", "docker_registry_url":"repository-manager.company.com:5001"}], "docker_registries_logout": [{ "docker_password": "'"${PASSWORD}"'", "docker_username": "'"${USER}"'", "docker_registry_url":"repository-manager.company.com:5001"}] }'
+
+ANSIBLE_CONFIG='' ansible-pull -d "${pull_dir}" -U "${repo_dir}" -e "${JSON_EXTRA_ARGS}" "$@" --tags untagged,test_ev | tee "${temp_log}"
+
+pass_tests
diff --git a/test/integration/targets/pull/setup.yml b/test/integration/targets/pull/setup.yml
new file mode 100644
index 00000000..a82d02ae
--- /dev/null
+++ b/test/integration/targets/pull/setup.yml
@@ -0,0 +1,11 @@
+- hosts: localhost
+ tasks:
+ - name: install git
+ package:
+ name: git
+ when: ansible_distribution != "MacOSX"
+ register: git_install
+ - name: save install result
+ copy:
+ content: '{{ git_install }}'
+ dest: '{{ lookup("env", "OUTPUT_DIR") }}/git_install.json'
diff --git a/test/integration/targets/raw/aliases b/test/integration/targets/raw/aliases
new file mode 100644
index 00000000..a6dafcf8
--- /dev/null
+++ b/test/integration/targets/raw/aliases
@@ -0,0 +1 @@
+shippable/posix/group1
diff --git a/test/integration/targets/raw/meta/main.yml b/test/integration/targets/raw/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/test/integration/targets/raw/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/test/integration/targets/raw/runme.sh b/test/integration/targets/raw/runme.sh
new file mode 100755
index 00000000..07955427
--- /dev/null
+++ b/test/integration/targets/raw/runme.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/env bash
+
+set -ux
+export ANSIBLE_BECOME_ALLOW_SAME_USER=1
+export ANSIBLE_ROLES_PATH=../
+ansible-playbook -i ../../inventory runme.yml -e "output_dir=${OUTPUT_DIR}" -v "$@"
diff --git a/test/integration/targets/raw/runme.yml b/test/integration/targets/raw/runme.yml
new file mode 100644
index 00000000..ea865bca
--- /dev/null
+++ b/test/integration/targets/raw/runme.yml
@@ -0,0 +1,4 @@
+- hosts: testhost
+ gather_facts: no
+ roles:
+ - { role: raw }
diff --git a/test/integration/targets/raw/tasks/main.yml b/test/integration/targets/raw/tasks/main.yml
new file mode 100644
index 00000000..7f99eadf
--- /dev/null
+++ b/test/integration/targets/raw/tasks/main.yml
@@ -0,0 +1,107 @@
+# Test code for the raw module.
+# (c) 2017, James Tanner <tanner.jc@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- set_fact: output_dir_test={{output_dir}}/test_command_raw
+
+- name: make sure our testing sub-directory does not exist
+ file: path="{{ output_dir_test }}" state=absent
+
+- name: create our testing sub-directory
+ file: path="{{ output_dir_test }}" state=directory
+
+##
+## raw
+##
+
+- name: touch a file
+ raw: "touch {{output_dir_test | expanduser}}/test.txt"
+ register: raw_result0
+- debug: var=raw_result0
+- stat:
+ path: "{{output_dir_test | expanduser}}/test.txt"
+ register: raw_result0_stat
+- debug: var=raw_result0_stat
+- name: ensure proper results
+ assert:
+ that:
+ - 'raw_result0.changed is defined'
+ - 'raw_result0.rc is defined'
+ - 'raw_result0.stderr is defined'
+ - 'raw_result0.stdout is defined'
+ - 'raw_result0.stdout_lines is defined'
+ - 'raw_result0.rc == 0'
+ - 'raw_result0_stat.stat.size == 0'
+
+- name: run a piped command
+ raw: "echo 'foo,bar,baz' | cut -d\\, -f2 | tr 'b' 'c'"
+ register: raw_result1
+- debug: var=raw_result1
+- name: ensure proper results
+ assert:
+ that:
+ - 'raw_result1.changed is defined'
+ - 'raw_result1.rc is defined'
+ - 'raw_result1.stderr is defined'
+ - 'raw_result1.stdout is defined'
+ - 'raw_result1.stdout_lines is defined'
+ - 'raw_result1.rc == 0'
+ - 'raw_result1.stdout_lines == ["car"]'
+
+- name: get the path to bash
+ shell: which bash
+ register: bash_path
+- name: run exmample non-posix command with bash
+ raw: "echo 'foobar' > {{output_dir_test | expanduser}}/test.txt ; cat < {{output_dir_test | expanduser}}/test.txt"
+ args:
+ executable: "{{ bash_path.stdout }}"
+ register: raw_result2
+- debug: var=raw_result2
+- name: ensure proper results
+ assert:
+ that:
+ - 'raw_result2.changed is defined'
+ - 'raw_result2.rc is defined'
+ - 'raw_result2.stderr is defined'
+ - 'raw_result2.stdout is defined'
+ - 'raw_result2.stdout_lines is defined'
+ - 'raw_result2.rc == 0'
+ - 'raw_result2.stdout_lines == ["foobar"]'
+# the following five tests added to test https://github.com/ansible/ansible/pull/68315
+- name: get the path to sh
+ shell: which sh
+ register: sh_path
+- name: use sh
+ raw: echo $0
+ args:
+ executable: "{{ sh_path.stdout }}"
+ become: true
+ become_method: su
+ register: sh_output
+- name: assert sh
+ assert:
+ that: "(sh_output.stdout | trim) == sh_path.stdout"
+- name: use bash
+ raw: echo $0
+ args:
+ executable: "{{ bash_path.stdout }}"
+ become: true
+ become_method: su
+ register: bash_output
+- name: assert bash
+ assert:
+ that: "(bash_output.stdout | trim) == bash_path.stdout"
diff --git a/test/integration/targets/reboot/aliases b/test/integration/targets/reboot/aliases
new file mode 100644
index 00000000..e9bebbf3
--- /dev/null
+++ b/test/integration/targets/reboot/aliases
@@ -0,0 +1,2 @@
+# No current way to split controller and test node
+unsupported
diff --git a/test/integration/targets/reboot/tasks/check_reboot.yml b/test/integration/targets/reboot/tasks/check_reboot.yml
new file mode 100644
index 00000000..1aff1be2
--- /dev/null
+++ b/test/integration/targets/reboot/tasks/check_reboot.yml
@@ -0,0 +1,10 @@
+- name: Get current boot time
+ command: "{{ boot_time_command[ansible_facts['distribution'] | lower] | default('cat /proc/sys/kernel/random/boot_id') }}"
+ register: after_boot_time
+
+- name: Ensure system was actually rebooted
+ assert:
+ that:
+ - reboot_result is changed
+ - reboot_result.elapsed > 10
+ - before_boot_time.stdout != after_boot_time.stdout
diff --git a/test/integration/targets/reboot/tasks/get_boot_time.yml b/test/integration/targets/reboot/tasks/get_boot_time.yml
new file mode 100644
index 00000000..cec22f06
--- /dev/null
+++ b/test/integration/targets/reboot/tasks/get_boot_time.yml
@@ -0,0 +1,3 @@
+- name: Get current boot time
+ command: "{{ boot_time_command[ansible_facts['distribution'] | lower] | default('cat /proc/sys/kernel/random/boot_id') }}"
+ register: before_boot_time
diff --git a/test/integration/targets/reboot/tasks/main.yml b/test/integration/targets/reboot/tasks/main.yml
new file mode 100644
index 00000000..2568b9b2
--- /dev/null
+++ b/test/integration/targets/reboot/tasks/main.yml
@@ -0,0 +1,111 @@
+- block:
+ # This block can be removed once we have a mechanism in ansible-test to separate
+ # the control node from the managed node.
+ - block:
+ - name: Write temp file for sanity checking this is not the controller
+ copy:
+ content: 'I am the control node'
+ dest: /tmp/Anything-Nutlike-Nuzzle-Plow-Overdue
+ delegate_to: localhost
+ connection: local
+ when: inventory_hostname == ansible_play_hosts[0]
+
+ - name: See if the temp file exists on the managed node
+ stat:
+ path: /tmp/Anything-Nutlike-Nuzzle-Plow-Overdue
+ register: controller_temp_file
+
+ - name: EXPECT FAILURE | Check if the managed node is the control node
+ assert:
+ msg: >
+ This test must be run manually by modifying the inventory file to point
+ "{{ inventory_hostname }}" at a remote host rather than "{{ ansible_host }}".
+ Skipping reboot test.
+ that:
+ - not controller_temp_file.stat.exists
+
+ - import_tasks: get_boot_time.yml
+
+ - name: Reboot with default settings
+ reboot:
+ register: reboot_result
+
+ - import_tasks: check_reboot.yml
+
+ - import_tasks: get_boot_time.yml
+
+ - name: Reboot with all options
+ reboot:
+ connect_timeout: 30
+ search_paths: /usr/local/bin
+ msg: Rebooting
+ post_reboot_delay: 1
+ pre_reboot_delay: 61
+ test_command: uptime
+ reboot_timeout: 500
+ register: reboot_result
+
+ - import_tasks: check_reboot.yml
+
+ - import_tasks: get_boot_time.yml
+
+ - name: Test with negative values for delays
+ reboot:
+ post_reboot_delay: -0.5
+ pre_reboot_delay: -61
+ register: reboot_result
+
+ - import_tasks: check_reboot.yml
+
+ - name: Use invalid parameter
+ reboot:
+ foo: bar
+ ignore_errors: true
+ register: invalid_parameter
+
+ - name: Ensure task fails with error
+ assert:
+ that:
+ - invalid_parameter is failed
+ - "invalid_parameter.msg == 'Invalid options for reboot: foo'"
+
+ - name: Reboot with test command that fails
+ reboot:
+ test_command: 'FAIL'
+ reboot_timeout: "{{ timeout }}"
+ register: reboot_fail_test
+ failed_when: "reboot_fail_test.msg != 'Timed out waiting for post-reboot test command (timeout=' ~ timeout ~ ')'"
+ vars:
+ timeout: "{{ timeout_value[ansible_facts['distribution'] | lower] | default(60) }}"
+
+ - name: Test molly-guard
+ block:
+ - import_tasks: get_boot_time.yml
+
+ - name: Install molly-guard
+ apt:
+ update_cache: yes
+ name: molly-guard
+ state: present
+
+ - name: Reboot when molly-guard is installed
+ reboot:
+ search_paths: /lib/molly-guard
+ register: reboot_result
+
+ - import_tasks: check_reboot.yml
+
+ when: ansible_facts.distribution in ['Debian', 'Ubuntu']
+ tags:
+ - molly-guard
+
+ always:
+ - name: Cleanup temp file
+ file:
+ path: /tmp/Anything-Nutlike-Nuzzle-Plow-Overdue
+ state: absent
+ delegate_to: localhost
+ connection: local
+ when: inventory_hostname == ansible_play_hosts[0]
+
+ when: ansible_virtualization_type | default('') != 'docker'
diff --git a/test/integration/targets/reboot/vars/main.yml b/test/integration/targets/reboot/vars/main.yml
new file mode 100644
index 00000000..24367c80
--- /dev/null
+++ b/test/integration/targets/reboot/vars/main.yml
@@ -0,0 +1,9 @@
+boot_time_command:
+ freebsd: '/sbin/sysctl kern.boottime'
+ openbsd: '/sbin/sysctl kern.boottime'
+ macosx: 'who -b'
+ solaris: 'who -b'
+ sunos: 'who -b'
+
+timeout_value:
+ solaris: 120
diff --git a/test/integration/targets/rel_plugin_loading/aliases b/test/integration/targets/rel_plugin_loading/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/test/integration/targets/rel_plugin_loading/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/test/integration/targets/rel_plugin_loading/notyaml.yml b/test/integration/targets/rel_plugin_loading/notyaml.yml
new file mode 100644
index 00000000..23ab0323
--- /dev/null
+++ b/test/integration/targets/rel_plugin_loading/notyaml.yml
@@ -0,0 +1,5 @@
+all:
+ hosts:
+ testhost:
+ ansible_connection: local
+ ansible_python_interpreter: "{{ansible_playbook_python}}"
diff --git a/test/integration/targets/rel_plugin_loading/runme.sh b/test/integration/targets/rel_plugin_loading/runme.sh
new file mode 100755
index 00000000..34e70fdd
--- /dev/null
+++ b/test/integration/targets/rel_plugin_loading/runme.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ANSIBLE_INVENTORY_ENABLED=notyaml ansible-playbook subdir/play.yml -i notyaml.yml "$@"
diff --git a/test/integration/targets/rel_plugin_loading/subdir/inventory_plugins/notyaml.py b/test/integration/targets/rel_plugin_loading/subdir/inventory_plugins/notyaml.py
new file mode 100644
index 00000000..d013fc48
--- /dev/null
+++ b/test/integration/targets/rel_plugin_loading/subdir/inventory_plugins/notyaml.py
@@ -0,0 +1,168 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ inventory: yaml
+ version_added: "2.4"
+ short_description: Uses a specific YAML file as an inventory source.
+ description:
+ - "YAML-based inventory, should start with the C(all) group and contain hosts/vars/children entries."
+ - Host entries can have sub-entries defined, which will be treated as variables.
+ - Vars entries are normal group vars.
+ - "Children are 'child groups', which can also have their own vars/hosts/children and so on."
+ - File MUST have a valid extension, defined in configuration.
+ notes:
+ - If you want to set vars for the C(all) group inside the inventory file, the C(all) group must be the first entry in the file.
+ - Whitelisted in configuration by default.
+ options:
+ yaml_extensions:
+ description: list of 'valid' extensions for files containing YAML
+ type: list
+ default: ['.yaml', '.yml', '.json']
+ env:
+ - name: ANSIBLE_YAML_FILENAME_EXT
+ - name: ANSIBLE_INVENTORY_PLUGIN_EXTS
+ ini:
+ - key: yaml_valid_extensions
+ section: defaults
+ - section: inventory_plugin_yaml
+ key: yaml_valid_extensions
+
+'''
+EXAMPLES = '''
+all: # keys must be unique, i.e. only one 'hosts' per group
+ hosts:
+ test1:
+ test2:
+ host_var: value
+ vars:
+ group_all_var: value
+ children: # key order does not matter, indentation does
+ other_group:
+ children:
+ group_x:
+ hosts:
+ test5
+ vars:
+ g2_var2: value3
+ hosts:
+ test4:
+ ansible_host: 127.0.0.1
+ last_group:
+ hosts:
+ test1 # same host as above, additional group membership
+ vars:
+ group_last_var: value
+'''
+
+import os
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.common._collections_compat import MutableMapping
+from ansible.plugins.inventory import BaseFileInventoryPlugin
+
+NoneType = type(None)
+
+
+class InventoryModule(BaseFileInventoryPlugin):
+
+ NAME = 'yaml'
+
+ def __init__(self):
+
+ super(InventoryModule, self).__init__()
+
+ def verify_file(self, path):
+
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ file_name, ext = os.path.splitext(path)
+ if not ext or ext in self.get_option('yaml_extensions'):
+ valid = True
+ return valid
+
+ def parse(self, inventory, loader, path, cache=True):
+ ''' parses the inventory file '''
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+ self.set_options()
+
+ try:
+ data = self.loader.load_from_file(path, cache=False)
+ except Exception as e:
+ raise AnsibleParserError(e)
+
+ if not data:
+ raise AnsibleParserError('Parsed empty YAML file')
+ elif not isinstance(data, MutableMapping):
+ raise AnsibleParserError('YAML inventory has invalid structure, it should be a dictionary, got: %s' % type(data))
+ elif data.get('plugin'):
+ raise AnsibleParserError('Plugin configuration YAML file, not YAML inventory')
+
+ # We expect top level keys to correspond to groups, iterate over them
+ # to get host, vars and subgroups (which we iterate over recursivelly)
+ if isinstance(data, MutableMapping):
+ for group_name in data:
+ self._parse_group(group_name, data[group_name])
+ else:
+ raise AnsibleParserError("Invalid data from file, expected dictionary and got:\n\n%s" % to_native(data))
+
+ def _parse_group(self, group, group_data):
+
+ if isinstance(group_data, (MutableMapping, NoneType)):
+
+ try:
+ self.inventory.add_group(group)
+ except AnsibleError as e:
+ raise AnsibleParserError("Unable to add group %s: %s" % (group, to_text(e)))
+
+ if group_data is not None:
+ # make sure they are dicts
+ for section in ['vars', 'children', 'hosts']:
+ if section in group_data:
+ # convert strings to dicts as these are allowed
+ if isinstance(group_data[section], string_types):
+ group_data[section] = {group_data[section]: None}
+
+ if not isinstance(group_data[section], (MutableMapping, NoneType)):
+ raise AnsibleParserError('Invalid "%s" entry for "%s" group, requires a dictionary, found "%s" instead.' %
+ (section, group, type(group_data[section])))
+
+ for key in group_data:
+
+ if not isinstance(group_data[key], (MutableMapping, NoneType)):
+ self.display.warning('Skipping key (%s) in group (%s) as it is not a mapping, it is a %s' % (key, group, type(group_data[key])))
+ continue
+
+ if isinstance(group_data[key], NoneType):
+ self.display.vvv('Skipping empty key (%s) in group (%s)' % (key, group))
+ elif key == 'vars':
+ for var in group_data[key]:
+ self.inventory.set_variable(group, var, group_data[key][var])
+ elif key == 'children':
+ for subgroup in group_data[key]:
+ self._parse_group(subgroup, group_data[key][subgroup])
+ self.inventory.add_child(group, subgroup)
+
+ elif key == 'hosts':
+ for host_pattern in group_data[key]:
+ hosts, port = self._parse_host(host_pattern)
+ self._populate_host_vars(hosts, group_data[key][host_pattern] or {}, group, port)
+ else:
+ self.display.warning('Skipping unexpected key (%s) in group (%s), only "vars", "children" and "hosts" are valid' % (key, group))
+
+ else:
+ self.display.warning("Skipping '%s' as this is not a valid group definition" % group)
+
+ def _parse_host(self, host_pattern):
+ '''
+ Each host key can be a pattern, try to process it and add variables as needed
+ '''
+ (hostnames, port) = self._expand_hostpattern(host_pattern)
+
+ return hostnames, port
diff --git a/test/integration/targets/rel_plugin_loading/subdir/play.yml b/test/integration/targets/rel_plugin_loading/subdir/play.yml
new file mode 100644
index 00000000..2326b14a
--- /dev/null
+++ b/test/integration/targets/rel_plugin_loading/subdir/play.yml
@@ -0,0 +1,6 @@
+- hosts: all
+ gather_facts: false
+ tasks:
+ - assert:
+ that:
+ - inventory_hostname == 'testhost'
diff --git a/test/integration/targets/remote_tmp/aliases b/test/integration/targets/remote_tmp/aliases
new file mode 100644
index 00000000..757c9966
--- /dev/null
+++ b/test/integration/targets/remote_tmp/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group3
+skip/aix
diff --git a/test/integration/targets/remote_tmp/playbook.yml b/test/integration/targets/remote_tmp/playbook.yml
new file mode 100644
index 00000000..43f99ca5
--- /dev/null
+++ b/test/integration/targets/remote_tmp/playbook.yml
@@ -0,0 +1,57 @@
+- name: Test temp dir on de escalation
+ hosts: testhost
+ become: yes
+ tasks:
+ - name: create test user
+ user:
+ name: tmptest
+ state: present
+ group: '{{ "staff" if ansible_facts.distribution == "MacOSX" else omit }}'
+
+ - name: execute test case
+ become_user: tmptest
+ block:
+ - name: Test case from issue 41340
+ blockinfile:
+ create: yes
+ block: |
+ export foo=bar
+ marker: "# {mark} Here there be a marker"
+ dest: /tmp/testing.txt
+ mode: 0644
+ always:
+ - name: clean up file
+ file: path=/tmp/testing.txt state=absent
+
+ - name: clean up test user
+ user: name=tmptest state=absent
+ become_user: root
+
+- name: Test tempdir is removed
+ hosts: testhost
+ gather_facts: false
+ tasks:
+ - file:
+ state: touch
+ path: "{{ output_dir }}/65393"
+
+ - copy:
+ src: "{{ output_dir }}/65393"
+ dest: "{{ output_dir }}/65393.2"
+ remote_src: true
+
+ - find:
+ path: "~/.ansible/tmp"
+ use_regex: yes
+ patterns: 'AnsiballZ_.+\.py'
+ recurse: true
+ register: result
+
+ - debug:
+ var: result
+
+ - assert:
+ that:
+ # Should only be AnsiballZ_find.py because find is actively running
+ - result.files|length == 1
+ - result.files[0].path.endswith('/AnsiballZ_find.py')
diff --git a/test/integration/targets/remote_tmp/runme.sh b/test/integration/targets/remote_tmp/runme.sh
new file mode 100755
index 00000000..8d1eebd6
--- /dev/null
+++ b/test/integration/targets/remote_tmp/runme.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+set -ux
+
+ansible-playbook -i ../../inventory playbook.yml -e "output_dir=${OUTPUT_DIR}" -v "$@"
diff --git a/test/integration/targets/replace/aliases b/test/integration/targets/replace/aliases
new file mode 100644
index 00000000..a6dafcf8
--- /dev/null
+++ b/test/integration/targets/replace/aliases
@@ -0,0 +1 @@
+shippable/posix/group1
diff --git a/test/integration/targets/replace/meta/main.yml b/test/integration/targets/replace/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/test/integration/targets/replace/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/test/integration/targets/replace/tasks/main.yml b/test/integration/targets/replace/tasks/main.yml
new file mode 100644
index 00000000..24146ff3
--- /dev/null
+++ b/test/integration/targets/replace/tasks/main.yml
@@ -0,0 +1,265 @@
+# setup
+- set_fact: output_dir_test={{output_dir}}/test_replace
+
+- name: make sure our testing sub-directory does not exist
+ file: path="{{ output_dir_test }}" state=absent
+
+- name: create our testing sub-directory
+ file: path="{{ output_dir_test }}" state=directory
+
+# tests
+- name: create test files
+ copy:
+ content: |-
+ The quick brown fox jumps over the lazy dog.
+ We promptly judged antique ivory buckles for the next prize.
+ Jinxed wizards pluck ivy from the big quilt.
+ Jaded zombies acted quaintly but kept driving their oxen forward.
+ dest: "{{ output_dir_test }}/pangrams.{{ item }}.txt"
+ with_sequence: start=0 end=6 format=%02x #increment as needed
+
+
+## test `before` option
+- name: remove all spaces before "quilt"
+ replace:
+ path: "{{ output_dir_test }}/pangrams.00.txt"
+ before: 'quilt'
+ regexp: ' '
+ register: replace_test0
+
+- command: "cat {{ output_dir_test }}/pangrams.00.txt"
+ register: replace_cat0
+
+- name: validate before assertions
+ assert:
+ that:
+ - replace_test0 is successful
+ - replace_test0 is changed
+ - replace_cat0.stdout_lines[0] == 'Thequickbrownfoxjumpsoverthelazydog.'
+ - replace_cat0.stdout_lines[-1] == 'Jaded zombies acted quaintly but kept driving their oxen forward.'
+
+
+## test `after` option
+- name: remove all spaces after "promptly"
+ replace:
+ path: "{{ output_dir_test }}/pangrams.01.txt"
+ after: 'promptly'
+ regexp: ' '
+ register: replace_test1
+
+- command: "cat {{ output_dir_test }}/pangrams.01.txt"
+ register: replace_cat1
+
+- name: validate after assertions
+ assert:
+ that:
+ - replace_test1 is successful
+ - replace_test1 is changed
+ - replace_cat1.stdout_lines[0] == 'The quick brown fox jumps over the lazy dog.'
+ - replace_cat1.stdout_lines[-1] == 'Jadedzombiesactedquaintlybutkeptdrivingtheiroxenforward.'
+
+
+## test combined `before` and `after` options
+- name: before "promptly" but after "quilt", replace every "e" with a "3"
+ replace:
+ path: "{{ output_dir_test }}/pangrams.02.txt"
+ before: 'promptly'
+ after: 'quilt'
+ regexp: 'e'
+ replace: '3'
+ register: replace_test2
+
+- name: validate after+before assertions
+ assert:
+ that:
+ - replace_test2 is successful
+ - not replace_test2 is changed
+ - replace_test2.msg.startswith("Pattern for before/after params did not match the given file")
+
+- name: before "quilt" but after "promptly", replace every "e" with a "3"
+ replace:
+ path: "{{ output_dir_test }}/pangrams.03.txt"
+ before: 'quilt'
+ after: 'promptly'
+ regexp: 'e'
+ replace: '3'
+ register: replace_test3
+
+- command: "cat {{ output_dir_test }}/pangrams.03.txt"
+ register: replace_cat3
+
+- name: validate before+after assertions
+ assert:
+ that:
+ - replace_test3 is successful
+ - replace_test3 is changed
+ - replace_cat3.stdout_lines[1] == 'We promptly judg3d antiqu3 ivory buckl3s for th3 n3xt priz3.'
+
+
+## test ^$ behavior in MULTILINE, and . behavior in absense of DOTALL
+- name: quote everything between bof and eof
+ replace:
+ path: "{{ output_dir_test }}/pangrams.04.txt"
+ regexp: ^([\S\s]+)$
+ replace: '"\1"'
+ register: replace_test4_0
+
+- command: "cat {{ output_dir_test }}/pangrams.04.txt"
+ register: replace_cat4_0
+
+- name: quote everything between bol and eol
+ replace:
+ path: "{{ output_dir_test }}/pangrams.04.txt"
+ regexp: ^(.+)$
+ replace: '"\1"'
+ register: replace_test4_1
+
+- command: "cat {{ output_dir_test }}/pangrams.04.txt"
+ register: replace_cat4_1
+
+- name: validate before+after assertions
+ assert:
+ that:
+ - replace_test4_0 is successful
+ - replace_test4_0 is changed
+ - replace_test4_1 is successful
+ - replace_test4_1 is changed
+ - replace_cat4_0.stdout_lines[0] == '"The quick brown fox jumps over the lazy dog.'
+ - replace_cat4_0.stdout_lines[-1] == 'Jaded zombies acted quaintly but kept driving their oxen forward."'
+ - replace_cat4_1.stdout_lines[0] == '""The quick brown fox jumps over the lazy dog."'
+ - replace_cat4_1.stdout_lines[-1] == '"Jaded zombies acted quaintly but kept driving their oxen forward.""'
+
+
+## test \b escaping in short and long form
+- name: short form with unescaped word boundaries
+ replace: path="{{ output_dir_test }}/pangrams.05.txt" regexp='\b(.+)\b' replace='"\1"'
+ register: replace_test5_0
+
+- name: short form with escaped word boundaries
+ replace: path="{{ output_dir_test }}/pangrams.05.txt" regexp='\\b(.+)\\b' replace='"\1"'
+ register: replace_test5_1
+
+- command: "cat {{ output_dir_test }}/pangrams.05.txt"
+ register: replace_cat5_1
+
+- name: long form with unescaped word boundaries
+ replace:
+ path: "{{ output_dir_test }}/pangrams.05.txt"
+ regexp: '\b(.+)\b'
+ replace: '"\1"'
+ register: replace_test5_2
+
+- command: "cat {{ output_dir_test }}/pangrams.05.txt"
+ register: replace_cat5_2
+
+- name: long form with escaped word boundaries
+ replace:
+ path: "{{ output_dir_test }}/pangrams.05.txt"
+ regexp: '\\b(.+)\\b'
+ replace: '"\1"'
+ register: replace_test5_3
+
+- name: validate before+after assertions
+ assert:
+ that:
+ - not replace_test5_0 is changed
+ - replace_test5_1 is changed
+ - replace_test5_2 is changed
+ - not replace_test5_3 is changed
+ - replace_cat5_1.stdout_lines[0] == '"The quick brown fox jumps over the lazy dog".'
+ - replace_cat5_1.stdout_lines[-1] == '"Jaded zombies acted quaintly but kept driving their oxen forward".'
+ - replace_cat5_2.stdout_lines[0] == '""The quick brown fox jumps over the lazy dog"".'
+ - replace_cat5_2.stdout_lines[-1] == '""Jaded zombies acted quaintly but kept driving their oxen forward"".'
+
+
+## test backup behaviors
+- name: replacement with backup
+ replace:
+ path: "{{ output_dir_test }}/pangrams.06.txt"
+ regexp: ^(.+)$
+ replace: '"\1"'
+ backup: true
+ register: replace_test6
+
+- command: "cat {{ output_dir_test }}/pangrams.06.txt"
+ register: replace_cat6_0
+
+- command: "cat {{ replace_test6.backup_file }}"
+ register: replace_cat6_1
+
+- name: validate backup
+ assert:
+ that:
+ - replace_test6 is successful
+ - replace_test6 is changed
+ - replace_test6.backup_file is search('/pangrams.06.txt.')
+ - replace_cat6_0.stdout != replace_cat6_1.stdout
+
+
+## test filesystem failures
+- name: fail on directory
+ replace:
+ path: "{{ output_dir_test }}"
+ regexp: ^(.+)$
+ register: replace_test7_1
+ ignore_errors: true
+
+- name: fail on missing file
+ replace:
+ path: "{{ output_dir_test }}/missing_file.txt"
+ regexp: ^(.+)$
+ register: replace_test7_2
+ ignore_errors: true
+
+- name: validate backup
+ assert:
+ that:
+ - replace_test7_1 is failure
+ - replace_test7_2 is failure
+ - replace_test7_1.msg.endswith(" is a directory !")
+ - replace_test7_2.msg.endswith(" does not exist !")
+
+
+## test subsection replacement when before/after potentially match more than once
+- name: test file for subsection replacement gone awry
+ copy:
+ content: |-
+ # start of group
+ 0.0.0.0
+ 127.0.0.1
+ 127.0.1.1
+ # end of group
+
+ # start of group
+ 0.0.0.0
+ 127.0.0.1
+ 127.0.1.1
+ # end of group
+
+ # start of group
+ 0.0.0.0
+ 127.0.0.1
+ 127.0.1.1
+ # end of group
+ dest: "{{ output_dir_test }}/addresses.txt"
+
+- name: subsection madness
+ replace:
+ path: "{{ output_dir_test }}/addresses.txt"
+ after: '# start of group'
+ before: '# end of group'
+ regexp: '0'
+ replace: '9'
+ register: replace_test8
+
+- command: "cat {{ output_dir_test }}/addresses.txt"
+ register: replace_cat8
+
+- name: validate before+after assertions
+ assert:
+ that:
+ - replace_test8 is successful
+ - replace_test8 is changed
+ - replace_cat8.stdout_lines[1] == "9.9.9.9"
+ - replace_cat8.stdout_lines[7] == "0.0.0.0"
+ - replace_cat8.stdout_lines[13] == "0.0.0.0"
diff --git a/test/integration/targets/retry_task_name_in_callback/aliases b/test/integration/targets/retry_task_name_in_callback/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/test/integration/targets/retry_task_name_in_callback/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/test/integration/targets/retry_task_name_in_callback/runme.sh b/test/integration/targets/retry_task_name_in_callback/runme.sh
new file mode 100755
index 00000000..5f636cd8
--- /dev/null
+++ b/test/integration/targets/retry_task_name_in_callback/runme.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# we are looking to verify the callback for v2_retry_runner gets a correct task name, include
+# if the value needs templating based on results of previous tasks
+OUTFILE="callback_retry_task_name.out"
+trap 'rm -rf "${OUTFILE}"' EXIT
+
+EXPECTED_REGEX="^.*TASK.*18236 callback task template fix OUTPUT 2"
+ansible-playbook "$@" -i ../../inventory test.yml | tee "${OUTFILE}"
+echo "Grepping for ${EXPECTED_REGEX} in stdout."
+grep -e "${EXPECTED_REGEX}" "${OUTFILE}"
diff --git a/test/integration/targets/retry_task_name_in_callback/test.yml b/test/integration/targets/retry_task_name_in_callback/test.yml
new file mode 100644
index 00000000..0e450cf9
--- /dev/null
+++ b/test/integration/targets/retry_task_name_in_callback/test.yml
@@ -0,0 +1,28 @@
+---
+- hosts: testhost
+ gather_facts: False
+ vars:
+ foo: blippy
+ tasks:
+ - name: First run {{ foo }}
+ command: echo "18236 callback task template fix OUTPUT 1"
+ register: the_result_var
+
+ - block:
+ - name: "{{ the_result_var.stdout }}"
+ command: echo "18236 callback task template fix OUTPUT 2"
+ register: the_result_var
+ retries: 1
+ delay: 1
+ until: False
+ ignore_errors: true
+
+ # - name: assert task_name was
+
+ - name: "{{ the_result_var.stdout }}"
+ command: echo "18236 callback taskadfadf template fix OUTPUT 3"
+ register: the_result_var
+
+ - name: "{{ the_result_var.stdout }}"
+ debug:
+ msg: "nothing to see here."
diff --git a/test/integration/targets/roles/aliases b/test/integration/targets/roles/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/test/integration/targets/roles/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/test/integration/targets/roles/allowed_dupes.yml b/test/integration/targets/roles/allowed_dupes.yml
new file mode 100644
index 00000000..998950b3
--- /dev/null
+++ b/test/integration/targets/roles/allowed_dupes.yml
@@ -0,0 +1,18 @@
+- name: test that import_role adds one (just one) execution of the role
+ hosts: localhost
+ gather_facts: false
+ tags: ['importrole']
+ roles:
+ - name: a
+ tasks:
+ - name: import role ignores dupe rule
+ import_role: name=a
+
+- name: test that include_role adds one (just one) execution of the role
+ hosts: localhost
+ gather_facts: false
+ tags: ['includerole']
+ roles:
+ - name: a
+ tasks:
+ - include_role: name=a
diff --git a/test/integration/targets/roles/no_dupes.yml b/test/integration/targets/roles/no_dupes.yml
new file mode 100644
index 00000000..0ac9ff94
--- /dev/null
+++ b/test/integration/targets/roles/no_dupes.yml
@@ -0,0 +1,19 @@
+- name: play should only show 1 invocation of a, as dependencies in this play are deduped
+ hosts: testhost
+ gather_facts: false
+ tags: [ 'inroles' ]
+ roles:
+ - role: a
+ - role: b
+ - role: c
+
+- name: play should only show 1 invocation of a, as dependencies in this play are deduped even outside of roles
+ hosts: testhost
+ gather_facts: false
+ tags: [ 'acrossroles' ]
+ roles:
+ - role: a
+ - role: b
+ tasks:
+ - name: execute role c which depends on a
+ import_role: name=c
diff --git a/test/integration/targets/roles/roles/a/tasks/main.yml b/test/integration/targets/roles/roles/a/tasks/main.yml
new file mode 100644
index 00000000..7fb1b487
--- /dev/null
+++ b/test/integration/targets/roles/roles/a/tasks/main.yml
@@ -0,0 +1 @@
+- debug: msg=A
diff --git a/test/integration/targets/roles/roles/b/meta/main.yml b/test/integration/targets/roles/roles/b/meta/main.yml
new file mode 100644
index 00000000..f95ffe65
--- /dev/null
+++ b/test/integration/targets/roles/roles/b/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - name: a
diff --git a/test/integration/targets/roles/roles/b/tasks/main.yml b/test/integration/targets/roles/roles/b/tasks/main.yml
new file mode 100644
index 00000000..57c13524
--- /dev/null
+++ b/test/integration/targets/roles/roles/b/tasks/main.yml
@@ -0,0 +1 @@
+- debug: msg=B
diff --git a/test/integration/targets/roles/roles/c/meta/main.yml b/test/integration/targets/roles/roles/c/meta/main.yml
new file mode 100644
index 00000000..04bd23be
--- /dev/null
+++ b/test/integration/targets/roles/roles/c/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - name: a
diff --git a/test/integration/targets/roles/roles/c/tasks/main.yml b/test/integration/targets/roles/roles/c/tasks/main.yml
new file mode 100644
index 00000000..190c429b
--- /dev/null
+++ b/test/integration/targets/roles/roles/c/tasks/main.yml
@@ -0,0 +1 @@
+- debug: msg=C
diff --git a/test/integration/targets/roles/runme.sh b/test/integration/targets/roles/runme.sh
new file mode 100755
index 00000000..fe99ea10
--- /dev/null
+++ b/test/integration/targets/roles/runme.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# test no dupes when dependencies in b and c point to a in roles:
+[ "$(ansible-playbook no_dupes.yml -i ../../inventory --tags inroles "$@" | grep -c '"msg": "A"')" = "1" ]
+[ "$(ansible-playbook no_dupes.yml -i ../../inventory --tags acrossroles "$@" | grep -c '"msg": "A"')" = "1" ]
+
+# but still dupe across plays
+[ "$(ansible-playbook no_dupes.yml -i ../../inventory "$@" | grep -c '"msg": "A"')" = "2" ]
+
+# include/import can execute another instance of role
+[ "$(ansible-playbook allowed_dupes.yml -i ../../inventory --tags importrole "$@" | grep -c '"msg": "A"')" = "2" ]
+[ "$(ansible-playbook allowed_dupes.yml -i ../../inventory --tags includerole "$@" | grep -c '"msg": "A"')" = "2" ]
diff --git a/test/integration/targets/rpm_key/aliases b/test/integration/targets/rpm_key/aliases
new file mode 100644
index 00000000..3a07aab3
--- /dev/null
+++ b/test/integration/targets/rpm_key/aliases
@@ -0,0 +1,3 @@
+destructive
+shippable/posix/group1
+skip/aix
diff --git a/test/integration/targets/rpm_key/defaults/main.yaml b/test/integration/targets/rpm_key/defaults/main.yaml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/rpm_key/defaults/main.yaml
diff --git a/test/integration/targets/rpm_key/tasks/main.yaml b/test/integration/targets/rpm_key/tasks/main.yaml
new file mode 100644
index 00000000..9f6fd4ec
--- /dev/null
+++ b/test/integration/targets/rpm_key/tasks/main.yaml
@@ -0,0 +1,2 @@
+ - include: 'rpm_key.yaml'
+ when: ansible_os_family == "RedHat"
diff --git a/test/integration/targets/rpm_key/tasks/rpm_key.yaml b/test/integration/targets/rpm_key/tasks/rpm_key.yaml
new file mode 100644
index 00000000..58020f48
--- /dev/null
+++ b/test/integration/targets/rpm_key/tasks/rpm_key.yaml
@@ -0,0 +1,195 @@
+---
+#
+# Save initial state
+#
+- name: Retrieve a list of gpg keys are installed for package checking
+ shell: 'rpm -q gpg-pubkey | sort'
+ register: list_of_pubkeys
+
+- name: Retrieve the gpg keys used to verify packages
+ command: 'rpm -q --qf %{description} gpg-pubkey'
+ register: pubkeys
+
+- name: Save gpg keys to a file
+ copy:
+ content: "{{ pubkeys['stdout'] }}\n"
+ dest: '{{ output_dir }}/pubkeys'
+ mode: 0600
+
+#
+# Tests start
+#
+- name: download EPEL GPG key
+ get_url:
+ url: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/rpm_key/RPM-GPG-KEY-EPEL-7
+ dest: /tmp/RPM-GPG-KEY-EPEL-7
+
+- name: download sl rpm
+ get_url:
+ url: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/rpm_key/sl-5.02-1.el7.x86_64.rpm
+ dest: /tmp/sl.rpm
+
+- name: download Mono key
+ get_url:
+ url: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/rpm_key/mono.gpg
+ dest: /tmp/mono.gpg
+
+- name: remove EPEL GPG key from keyring
+ rpm_key:
+ state: absent
+ key: /tmp/RPM-GPG-KEY-EPEL-7
+
+- name: check GPG signature of sl. Should fail
+ shell: "rpm --checksig /tmp/sl.rpm"
+ register: sl_check
+ ignore_errors: yes
+
+- name: confirm that signature check failed
+ assert:
+ that:
+ - "'MISSING KEYS' in sl_check.stdout or 'SIGNATURES NOT OK' in sl_check.stdout"
+ - "sl_check.failed"
+
+- name: remove EPEL GPG key from keyring (idempotent)
+ rpm_key:
+ state: absent
+ key: /tmp/RPM-GPG-KEY-EPEL-7
+ register: idempotent_test
+
+- name: check idempontence
+ assert:
+ that: "not idempotent_test.changed"
+
+- name: add EPEL GPG key to key ring
+ rpm_key:
+ state: present
+ key: /tmp/RPM-GPG-KEY-EPEL-7
+
+- name: add EPEL GPG key to key ring (idempotent)
+ rpm_key:
+ state: present
+ key: /tmp/RPM-GPG-KEY-EPEL-7
+
+- name: add Mono gpg key
+ rpm_key:
+ state: present
+ key: /tmp/mono.gpg
+
+- name: add Mono gpg key
+ rpm_key:
+ state: present
+ key: /tmp/mono.gpg
+ register: mono_indempotence
+
+- name: verify idempotence
+ assert:
+ that: "not mono_indempotence.changed"
+
+- name: check GPG signature of sl. Should return okay
+ shell: "rpm --checksig /tmp/sl.rpm"
+ register: sl_check
+
+- name: confirm that signature check succeeded
+ assert:
+ that: "'rsa sha1 (md5) pgp md5 OK' in sl_check.stdout or 'digests signatures OK' in sl_check.stdout"
+
+- name: remove GPG key from url
+ rpm_key:
+ state: absent
+ key: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/rpm_key/RPM-GPG-KEY-EPEL-7
+
+- name: Confirm key is missing
+ shell: "rpm --checksig /tmp/sl.rpm"
+ register: sl_check
+ ignore_errors: yes
+
+- name: confirm that signature check failed
+ assert:
+ that:
+ - "'MISSING KEYS' in sl_check.stdout or 'SIGNATURES NOT OK' in sl_check.stdout"
+ - "sl_check.failed"
+
+- name: add GPG key from url
+ rpm_key:
+ state: present
+ key: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/rpm_key/RPM-GPG-KEY-EPEL-7
+
+- name: check GPG signature of sl. Should return okay
+ shell: "rpm --checksig /tmp/sl.rpm"
+ register: sl_check
+
+- name: confirm that signature check succeeded
+ assert:
+ that: "'rsa sha1 (md5) pgp md5 OK' in sl_check.stdout or 'digests signatures OK' in sl_check.stdout"
+
+- name: remove all keys from key ring
+ shell: "rpm -q gpg-pubkey | xargs rpm -e"
+
+- name: add very first key on system
+ rpm_key:
+ state: present
+ key: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/rpm_key/RPM-GPG-KEY-EPEL-7
+
+- name: check GPG signature of sl. Should return okay
+ shell: "rpm --checksig /tmp/sl.rpm"
+ register: sl_check
+
+- name: confirm that signature check succeeded
+ assert:
+ that: "'rsa sha1 (md5) pgp md5 OK' in sl_check.stdout or 'digests signatures OK' in sl_check.stdout"
+
+- name: Issue 20325 - Verify fingerprint of key, invalid fingerprint - EXPECTED FAILURE
+ rpm_key:
+ key: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/rpm_key/RPM-GPG-KEY.dag
+ fingerprint: 1111 1111 1111 1111 1111 1111 1111 1111 1111 1111
+ register: result
+ failed_when: result is success
+
+- name: Issue 20325 - Assert Verify fingerprint of key, invalid fingerprint
+ assert:
+ that:
+ - result is success
+ - result is not changed
+ - "'does not match the key fingerprint' in result.msg"
+
+- name: Issue 20325 - Verify fingerprint of key, valid fingerprint
+ rpm_key:
+ key: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/rpm_key/RPM-GPG-KEY.dag
+ fingerprint: EBC6 E12C 62B1 C734 026B 2122 A20E 5214 6B8D 79E6
+ register: result
+
+- name: Issue 20325 - Assert Verify fingerprint of key, valid fingerprint
+ assert:
+ that:
+ - result is success
+ - result is changed
+
+- name: Issue 20325 - Verify fingerprint of key, valid fingerprint - Idempotent check
+ rpm_key:
+ key: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/rpm_key/RPM-GPG-KEY.dag
+ fingerprint: EBC6 E12C 62B1 C734 026B 2122 A20E 5214 6B8D 79E6
+ register: result
+
+- name: Issue 20325 - Assert Verify fingerprint of key, valid fingerprint - Idempotent check
+ assert:
+ that:
+ - result is success
+ - result is not changed
+
+#
+# Cleanup
+#
+- name: remove all keys from key ring
+ shell: "rpm -q gpg-pubkey | xargs rpm -e"
+
+- name: Restore the gpg keys normally installed on the system
+ command: 'rpm --import {{ output_dir }}/pubkeys'
+
+- name: Retrieve a list of gpg keys are installed for package checking
+ shell: 'rpm -q gpg-pubkey | sort'
+ register: new_list_of_pubkeys
+
+- name: Confirm that we've restored all the pubkeys
+ assert:
+ that:
+ - 'list_of_pubkeys["stdout"] == new_list_of_pubkeys["stdout"]'
diff --git a/test/integration/targets/run_modules/aliases b/test/integration/targets/run_modules/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/test/integration/targets/run_modules/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/test/integration/targets/run_modules/args.json b/test/integration/targets/run_modules/args.json
new file mode 100644
index 00000000..c3abc21a
--- /dev/null
+++ b/test/integration/targets/run_modules/args.json
@@ -0,0 +1 @@
+{ "ANSIBLE_MODULE_ARGS": {} }
diff --git a/test/integration/targets/run_modules/library/test.py b/test/integration/targets/run_modules/library/test.py
new file mode 100644
index 00000000..bbe3182c
--- /dev/null
+++ b/test/integration/targets/run_modules/library/test.py
@@ -0,0 +1,7 @@
+#!/usr/bin/python
+
+from ansible.module_utils.basic import AnsibleModule
+
+module = AnsibleModule(argument_spec=dict())
+
+module.exit_json(**{'tempdir': module._remote_tmp})
diff --git a/test/integration/targets/run_modules/runme.sh b/test/integration/targets/run_modules/runme.sh
new file mode 100755
index 00000000..34c245cb
--- /dev/null
+++ b/test/integration/targets/run_modules/runme.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# test running module directly
+python.py library/test.py args.json
diff --git a/test/integration/targets/script/aliases b/test/integration/targets/script/aliases
new file mode 100644
index 00000000..765b70da
--- /dev/null
+++ b/test/integration/targets/script/aliases
@@ -0,0 +1 @@
+shippable/posix/group2
diff --git a/test/integration/targets/script/files/create_afile.sh b/test/integration/targets/script/files/create_afile.sh
new file mode 100755
index 00000000..e6fae448
--- /dev/null
+++ b/test/integration/targets/script/files/create_afile.sh
@@ -0,0 +1,3 @@
+#!/usr/bin/env bash
+
+echo "win" > "$1" \ No newline at end of file
diff --git a/test/integration/targets/script/files/no_shebang.py b/test/integration/targets/script/files/no_shebang.py
new file mode 100644
index 00000000..c6c813af
--- /dev/null
+++ b/test/integration/targets/script/files/no_shebang.py
@@ -0,0 +1,3 @@
+import sys
+
+sys.stdout.write("Script with shebang omitted")
diff --git a/test/integration/targets/script/files/remove_afile.sh b/test/integration/targets/script/files/remove_afile.sh
new file mode 100755
index 00000000..4a7fea66
--- /dev/null
+++ b/test/integration/targets/script/files/remove_afile.sh
@@ -0,0 +1,3 @@
+#!/usr/bin/env bash
+
+rm "$1" \ No newline at end of file
diff --git a/test/integration/targets/script/files/space path/test.sh b/test/integration/targets/script/files/space path/test.sh
new file mode 100755
index 00000000..6f6334d7
--- /dev/null
+++ b/test/integration/targets/script/files/space path/test.sh
@@ -0,0 +1,3 @@
+#!/usr/bin/env bash
+
+echo -n "Script with space in path" \ No newline at end of file
diff --git a/test/integration/targets/script/files/test.sh b/test/integration/targets/script/files/test.sh
new file mode 100755
index 00000000..ade17e9b
--- /dev/null
+++ b/test/integration/targets/script/files/test.sh
@@ -0,0 +1,3 @@
+#!/usr/bin/env bash
+
+echo -n "win" \ No newline at end of file
diff --git a/test/integration/targets/script/files/test_with_args.sh b/test/integration/targets/script/files/test_with_args.sh
new file mode 100755
index 00000000..13dce4f2
--- /dev/null
+++ b/test/integration/targets/script/files/test_with_args.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+for i in "$@"; do
+ echo "$i"
+done \ No newline at end of file
diff --git a/test/integration/targets/script/meta/main.yml b/test/integration/targets/script/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/test/integration/targets/script/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/test/integration/targets/script/tasks/main.yml b/test/integration/targets/script/tasks/main.yml
new file mode 100644
index 00000000..f1746f7c
--- /dev/null
+++ b/test/integration/targets/script/tasks/main.yml
@@ -0,0 +1,240 @@
+# Test code for the script module and action_plugin.
+# (c) 2014, Richard Isaacson <richard.c.isaacson@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+##
+## prep
+##
+
+- set_fact:
+ output_dir_test: "{{ output_dir }}/test_script"
+
+- name: make sure our testing sub-directory does not exist
+ file:
+ path: "{{ output_dir_test }}"
+ state: absent
+
+- name: create our testing sub-directory
+ file:
+ path: "{{ output_dir_test }}"
+ state: directory
+
+##
+## script
+##
+
+- name: execute the test.sh script via command
+ script: test.sh
+ register: script_result0
+
+- name: assert that the script executed correctly
+ assert:
+ that:
+ - "script_result0.rc == 0"
+ - "script_result0.stdout == 'win'"
+
+- name: Execute a script with a space in the path
+ script: "'space path/test.sh'"
+ register: _space_path_test
+ tags:
+ - spacepath
+
+- name: Assert that script with space in path ran successfully
+ assert:
+ that:
+ - _space_path_test is success
+ - _space_path_test.stdout == 'Script with space in path'
+ tags:
+ - spacepath
+
+- name: Execute a script with arguments including a unicode character
+ script: test_with_args.sh -this -that -Ó¦ther
+ register: unicode_args
+
+- name: Assert that script with unicode character ran successfully
+ assert:
+ that:
+ - unicode_args is success
+ - unicode_args.stdout_lines[0] == '-this'
+ - unicode_args.stdout_lines[1] == '-that'
+ - unicode_args.stdout_lines[2] == '-Ó¦ther'
+
+# creates
+- name: verify that afile.txt is absent
+ file:
+ path: "{{ output_dir_test }}/afile.txt"
+ state: absent
+
+- name: create afile.txt with create_afile.sh via command
+ script: create_afile.sh {{ output_dir_test | expanduser }}/afile.txt
+ args:
+ creates: "{{ output_dir_test | expanduser }}/afile.txt"
+ register: _create_test1
+
+- name: Check state of created file
+ stat:
+ path: "{{ output_dir_test | expanduser }}/afile.txt"
+ register: _create_stat1
+
+- name: Run create_afile.sh again to ensure it is skipped
+ script: create_afile.sh {{ output_dir_test | expanduser }}/afile.txt
+ args:
+ creates: "{{ output_dir_test | expanduser }}/afile.txt"
+ register: _create_test2
+
+- name: Assert that script report a change, file was created, second run was skipped
+ assert:
+ that:
+ - _create_test1 is changed
+ - _create_stat1.stat.exists
+ - _create_test2 is skipped
+
+
+# removes
+- name: verify that afile.txt is present
+ file:
+ path: "{{ output_dir_test }}/afile.txt"
+ state: file
+
+- name: remove afile.txt with remote_afile.sh via command
+ script: remove_afile.sh {{ output_dir_test | expanduser }}/afile.txt
+ args:
+ removes: "{{ output_dir_test | expanduser }}/afile.txt"
+ register: _remove_test1
+
+- name: Check state of removed file
+ stat:
+ path: "{{ output_dir_test | expanduser }}/afile.txt"
+ register: _remove_stat1
+
+- name: Run remote_afile.sh again to enure it is skipped
+ script: remove_afile.sh {{ output_dir_test | expanduser }}/afile.txt
+ args:
+ removes: "{{ output_dir_test | expanduser }}/afile.txt"
+ register: _remove_test2
+
+- name: Assert that script report a change, file was removed, second run was skipped
+ assert:
+ that:
+ - _remove_test1 is changed
+ - not _remove_stat1.stat.exists
+ - _remove_test2 is skipped
+
+
+# async
+- name: verify that afile.txt is absent
+ file:
+ path: "{{ output_dir_test }}/afile.txt"
+ state: absent
+
+- name: test task failure with async param
+ script: /some/script.sh
+ async: 2
+ ignore_errors: true
+ register: script_result3
+
+- name: assert task with async param failed
+ assert:
+ that:
+ - script_result3 is failed
+ - script_result3.msg == "async is not supported for this task."
+
+
+# check mode
+- name: Run script to create a file in check mode
+ script: create_afile.sh {{ output_dir_test | expanduser }}/afile2.txt
+ check_mode: yes
+ register: _check_mode_test
+
+- debug:
+ var: _check_mode_test
+ verbosity: 2
+
+- name: Get state of file created by script
+ stat:
+ path: "{{ output_dir_test | expanduser }}/afile2.txt"
+ register: _afile_stat
+
+- debug:
+ var: _afile_stat
+ verbosity: 2
+
+- name: Assert that a change was reported but the script did not make changes
+ assert:
+ that:
+ - _check_mode_test is changed
+ - not _afile_stat.stat.exists
+
+- name: Run script to create a file
+ script: create_afile.sh {{ output_dir_test | expanduser }}/afile2.txt
+
+- name: Run script to create a file in check mode with 'creates' argument
+ script: create_afile.sh {{ output_dir_test | expanduser }}/afile2.txt
+ args:
+ creates: "{{ output_dir_test | expanduser }}/afile2.txt"
+ register: _check_mode_test2
+ check_mode: yes
+
+- debug:
+ var: _check_mode_test2
+ verbosity: 2
+
+- name: Assert that task was skipped and mesage was returned
+ assert:
+ that:
+ - _check_mode_test2 is skipped
+ - '_check_mode_test2.msg == "{{ output_dir_test | expanduser }}/afile2.txt exists, matching creates option"'
+
+- name: Remove afile2.txt
+ file:
+ path: "{{ output_dir_test | expanduser }}/afile2.txt"
+ state: absent
+
+- name: Run script to remove a file in check mode with 'removes' argument
+ script: remove_afile.sh {{ output_dir_test | expanduser }}/afile2.txt
+ args:
+ removes: "{{ output_dir_test | expanduser }}/afile2.txt"
+ register: _check_mode_test3
+ check_mode: yes
+
+- debug:
+ var: _check_mode_test3
+ verbosity: 2
+
+- name: Assert that task was skipped and message was returned
+ assert:
+ that:
+ - _check_mode_test3 is skipped
+ - '_check_mode_test3.msg == "{{ output_dir_test | expanduser }}/afile2.txt does not exist, matching removes option"'
+
+# executable
+
+- name: Run script with shebang omitted
+ script: no_shebang.py
+ args:
+ executable: "{{ ansible_python_interpreter }}"
+ register: _shebang_omitted_test
+ tags:
+ - noshebang
+
+- name: Assert that script with shebang omitted succeeded
+ assert:
+ that:
+ - _shebang_omitted_test is success
+ - _shebang_omitted_test.stdout == 'Script with shebang omitted'
+ tags:
+ - noshebang
diff --git a/test/integration/targets/service/aliases b/test/integration/targets/service/aliases
new file mode 100644
index 00000000..1ef4c361
--- /dev/null
+++ b/test/integration/targets/service/aliases
@@ -0,0 +1,5 @@
+destructive
+shippable/posix/group1
+skip/aix
+skip/osx
+skip/macos
diff --git a/test/integration/targets/service/files/ansible-broken.upstart b/test/integration/targets/service/files/ansible-broken.upstart
new file mode 100644
index 00000000..4e9c6694
--- /dev/null
+++ b/test/integration/targets/service/files/ansible-broken.upstart
@@ -0,0 +1,10 @@
+description "ansible test daemon"
+
+start on runlevel [345]
+stop on runlevel [!345]
+
+expect daemon
+
+exec ansible_test_service
+
+manual
diff --git a/test/integration/targets/service/files/ansible.rc b/test/integration/targets/service/files/ansible.rc
new file mode 100644
index 00000000..ec77d521
--- /dev/null
+++ b/test/integration/targets/service/files/ansible.rc
@@ -0,0 +1,16 @@
+#!/bin/sh
+
+# PROVIDE: ansible_test_service
+# REQUIRE: FILESYSTEMS devfs
+# BEFORE: LOGIN
+# KEYWORD: nojail shutdown
+
+. /etc/rc.subr
+
+name="ansible_test_service"
+rcvar="ansible_test_service_enable"
+command="/usr/sbin/${name}"
+pidfile="/var/run/${name}.pid"
+extra_commands=reload
+load_rc_config $name
+run_rc_command "$1"
diff --git a/test/integration/targets/service/files/ansible.systemd b/test/integration/targets/service/files/ansible.systemd
new file mode 100644
index 00000000..3466f25a
--- /dev/null
+++ b/test/integration/targets/service/files/ansible.systemd
@@ -0,0 +1,11 @@
+[Unit]
+Description=Ansible Test Service
+
+[Service]
+ExecStart=/usr/sbin/ansible_test_service "Test\nthat newlines in scripts\nwork"
+ExecReload=/bin/true
+Type=forking
+PIDFile=/var/run/ansible_test_service.pid
+
+[Install]
+WantedBy=multi-user.target
diff --git a/test/integration/targets/service/files/ansible.sysv b/test/integration/targets/service/files/ansible.sysv
new file mode 100755
index 00000000..1df0423d
--- /dev/null
+++ b/test/integration/targets/service/files/ansible.sysv
@@ -0,0 +1,134 @@
+#!/bin/sh
+#
+
+# LSB header
+
+### BEGIN INIT INFO
+# Provides: ansible-test
+# Default-Start: 3 4 5
+# Default-Stop: 0 1 2 6
+# Short-Description: test daemon for ansible
+# Description: This is a test daemon used by ansible for testing only
+### END INIT INFO
+
+# chkconfig header
+
+# chkconfig: 345 99 99
+# description: This is a test daemon used by ansible for testing only
+#
+# processname: /usr/sbin/ansible_test_service
+
+# Sanity checks.
+[ -x /usr/sbin/ansible_test_service ] || exit 0
+
+DEBIAN_VERSION=/etc/debian_version
+SUSE_RELEASE=/etc/SuSE-release
+# Source function library.
+if [ -f $DEBIAN_VERSION ]; then
+ . /lib/lsb/init-functions
+elif [ -f $SUSE_RELEASE -a -r /etc/rc.status ]; then
+ . /etc/rc.status
+else
+ . /etc/rc.d/init.d/functions
+fi
+
+SERVICE=ansible_test_service
+PROCESS=ansible_test_service
+CONFIG_ARGS=" "
+if [ -f $DEBIAN_VERSION ]; then
+ LOCKFILE=/var/lock/$SERVICE
+else
+ LOCKFILE=/var/lock/subsys/$SERVICE
+fi
+
+RETVAL=0
+
+start() {
+ echo -n "Starting ansible test daemon: "
+ if [ -f $SUSE_RELEASE ]; then
+ startproc -p /var/run/${SERVICE}.pid -f /usr/sbin/ansible_test_service
+ rc_status -v
+ elif [ -e $DEBIAN_VERSION ]; then
+ if [ -f $LOCKFILE ]; then
+ echo -n "already started, lock file found"
+ RETVAL=1
+ elif /usr/sbin/ansible_test_service; then
+ echo -n "OK"
+ RETVAL=0
+ fi
+ else
+ daemon --check $SERVICE $PROCESS --daemonize $CONFIG_ARGS
+ fi
+ RETVAL=$?
+ echo
+ [ $RETVAL -eq 0 ] && touch $LOCKFILE
+ return $RETVAL
+}
+
+stop() {
+ echo -n "Stopping ansible test daemon: "
+ if [ -f $SUSE_RELEASE ]; then
+ killproc -TERM /usr/sbin/ansible_test_service
+ rc_status -v
+ elif [ -f $DEBIAN_VERSION ]; then
+ # Added this since Debian's start-stop-daemon doesn't support spawned processes
+ if ps -ef | grep "/usr/sbin/ansible_test_service" | grep -v grep | awk '{print $2}' | xargs kill &> /dev/null; then
+ echo -n "OK"
+ RETVAL=0
+ else
+ echo -n "Daemon is not started"
+ RETVAL=1
+ fi
+ else
+ killproc -p /var/run/${SERVICE}.pid
+ fi
+ RETVAL=$?
+ echo
+ if [ $RETVAL -eq 0 ]; then
+ rm -f $LOCKFILE
+ rm -f /var/run/$SERVICE.pid
+ fi
+}
+
+restart() {
+ stop
+ start
+}
+
+# See how we were called.
+case "$1" in
+ start|stop|restart)
+ $1
+ ;;
+ status)
+ if [ -f $SUSE_RELEASE ]; then
+ echo -n "Checking for ansible test service "
+ checkproc /usr/sbin/ansible_test_service
+ rc_status -v
+ elif [ -f $DEBIAN_VERSION ]; then
+ if [ -f $LOCKFILE ]; then
+ RETVAL=0
+ echo "ansible test is running."
+ else
+ RETVAL=1
+ echo "ansible test is stopped."
+ fi
+ else
+ status $PROCESS
+ RETVAL=$?
+ fi
+ ;;
+ condrestart)
+ [ -f $LOCKFILE ] && restart || :
+ ;;
+ reload)
+ echo "ok"
+ RETVAL=0
+ ;;
+ *)
+ echo "Usage: $0 {start|stop|status|restart|condrestart|reload}"
+ exit 1
+ ;;
+esac
+exit $RETVAL
+
diff --git a/test/integration/targets/service/files/ansible.upstart b/test/integration/targets/service/files/ansible.upstart
new file mode 100644
index 00000000..369f61a8
--- /dev/null
+++ b/test/integration/targets/service/files/ansible.upstart
@@ -0,0 +1,9 @@
+description "ansible test daemon"
+
+start on runlevel [345]
+stop on runlevel [!345]
+
+expect daemon
+
+exec ansible_test_service
+
diff --git a/test/integration/targets/service/files/ansible_test_service.py b/test/integration/targets/service/files/ansible_test_service.py
new file mode 100644
index 00000000..c4feed85
--- /dev/null
+++ b/test/integration/targets/service/files/ansible_test_service.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python
+
+# this is mostly based off of the code found here:
+# http://code.activestate.com/recipes/278731-creating-a-daemon-the-python-way/
+
+import os
+import resource
+import signal
+import sys
+import time
+
+UMASK = 0
+WORKDIR = "/"
+MAXFD = 1024
+
+if (hasattr(os, "devnull")):
+ REDIRECT_TO = os.devnull
+else:
+ REDIRECT_TO = "/dev/null"
+
+
+def createDaemon():
+ try:
+ pid = os.fork()
+ except OSError as e:
+ raise Exception("%s [%d]" % (e.strerror, e.errno))
+
+ if (pid == 0):
+ os.setsid()
+
+ try:
+ pid = os.fork()
+ except OSError as e:
+ raise Exception("%s [%d]" % (e.strerror, e.errno))
+
+ if (pid == 0):
+ os.chdir(WORKDIR)
+ os.umask(UMASK)
+ else:
+ f = open('/var/run/ansible_test_service.pid', 'w')
+ f.write("%d\n" % pid)
+ f.close()
+ os._exit(0)
+ else:
+ os._exit(0)
+
+ maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
+ if (maxfd == resource.RLIM_INFINITY):
+ maxfd = MAXFD
+
+ for fd in range(0, maxfd):
+ try:
+ os.close(fd)
+ except OSError: # ERROR, fd wasn't open to begin with (ignored)
+ pass
+
+ os.open(REDIRECT_TO, os.O_RDWR)
+ os.dup2(0, 1)
+ os.dup2(0, 2)
+
+ return (0)
+
+
+if __name__ == "__main__":
+
+ signal.signal(signal.SIGHUP, signal.SIG_IGN)
+
+ retCode = createDaemon()
+
+ while True:
+ time.sleep(1000)
diff --git a/test/integration/targets/service/meta/main.yml b/test/integration/targets/service/meta/main.yml
new file mode 100644
index 00000000..399f3fb6
--- /dev/null
+++ b/test/integration/targets/service/meta/main.yml
@@ -0,0 +1,20 @@
+# test code for the service module
+# (c) 2014, James Cammarata <jcammarata@ansible.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+dependencies:
+ - prepare_tests
diff --git a/test/integration/targets/service/tasks/main.yml b/test/integration/targets/service/tasks/main.yml
new file mode 100644
index 00000000..69a9ef20
--- /dev/null
+++ b/test/integration/targets/service/tasks/main.yml
@@ -0,0 +1,58 @@
+- name: install the test daemon script
+ copy:
+ src: ansible_test_service.py
+ dest: /usr/sbin/ansible_test_service
+ mode: '755'
+
+- name: rewrite shebang in the test daemon script
+ lineinfile:
+ path: /usr/sbin/ansible_test_service
+ line: "#!{{ ansible_python_interpreter | realpath }}"
+ insertbefore: BOF
+ firstmatch: yes
+
+- block:
+ # determine init system is in use
+ - name: detect sysv init system
+ set_fact:
+ service_type: sysv
+ when:
+ - ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux']
+ - ansible_distribution_version is version('6', '>=')
+ - ansible_distribution_version is version('7', '<')
+ - name: detect systemd init system
+ set_fact:
+ service_type: systemd
+ when: (ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux'] and ansible_distribution_major_version is version('7', '>=')) or ansible_distribution == 'Fedora' or (ansible_distribution == 'Ubuntu' and ansible_distribution_version is version('15.04', '>=')) or (ansible_distribution == 'Debian' and ansible_distribution_version is version('8', '>=')) or ansible_os_family == 'Suse'
+ - name: detect upstart init system
+ set_fact:
+ service_type: upstart
+ when:
+ - ansible_distribution == 'Ubuntu'
+ - ansible_distribution_version is version('15.04', '<')
+ - name: detect rc init system
+ set_fact:
+ service_type: rc
+ when:
+ - ansible_distribution.lower().endswith('bsd')
+
+
+ - name: display value of ansible_service_mgr
+ debug:
+ msg: 'ansible_service_mgr: {{ ansible_service_mgr }}'
+
+ - name: setup test service script
+ include_tasks: '{{ service_type }}_setup.yml'
+
+ - name: execute tests
+ import_tasks: tests.yml
+
+ always:
+ - name: disable and stop ansible test service
+ service:
+ name: ansible_test
+ state: stopped
+ enabled: false
+
+ # cleaning up changes made by this playbook
+ - include_tasks: '{{ service_type }}_cleanup.yml'
diff --git a/test/integration/targets/service/tasks/rc_cleanup.yml b/test/integration/targets/service/tasks/rc_cleanup.yml
new file mode 100644
index 00000000..47f470c6
--- /dev/null
+++ b/test/integration/targets/service/tasks/rc_cleanup.yml
@@ -0,0 +1,9 @@
+- name: remove the rc init file
+ file: path=/etc/rc.d/ansible_test state=absent
+ register: remove_rc_result
+
+- name: assert that the rc init file was removed
+ assert:
+ that:
+ - "remove_rc_result.path == '/etc/rc.d/ansible_test'"
+ - "remove_rc_result.state == 'absent'"
diff --git a/test/integration/targets/service/tasks/rc_setup.yml b/test/integration/targets/service/tasks/rc_setup.yml
new file mode 100644
index 00000000..45d2c90d
--- /dev/null
+++ b/test/integration/targets/service/tasks/rc_setup.yml
@@ -0,0 +1,21 @@
+- name: install the rc init file
+ copy: src=ansible.rc dest=/etc/rc.d/ansible_test mode=0755
+ register: install_rc_result
+
+- name: assert that the rc init file was installed
+ assert:
+ that:
+ - "install_rc_result.dest == '/etc/rc.d/ansible_test'"
+ - "install_rc_result.state == 'file'"
+ - "install_rc_result.mode == '0755'"
+ - "install_rc_result.checksum == '8526e4571d2ac685fa5a73af723183c194bda35d'"
+
+# FreeBSD (likely others as well) requires the command_interpreter to match the
+# shebang the script was started with as an extra caution against killing the
+# wrong thing. We add the line here.
+- name: add command_interpreter in rc init file
+ lineinfile:
+ path: /etc/rc.d/ansible_test
+ line: "command_interpreter={{ ansible_python_interpreter | realpath }}"
+ insertafter: '^pidfile.*'
+ firstmatch: yes
diff --git a/test/integration/targets/service/tasks/systemd_cleanup.yml b/test/integration/targets/service/tasks/systemd_cleanup.yml
new file mode 100644
index 00000000..e0707265
--- /dev/null
+++ b/test/integration/targets/service/tasks/systemd_cleanup.yml
@@ -0,0 +1,25 @@
+- name: remove the systemd unit file
+ file: path=/usr/lib/systemd/system/ansible_test.service state=absent
+ register: remove_systemd_result
+
+- name: remove the systemd unit file
+ file: path=/usr/lib/systemd/system/ansible_test_broken.service state=absent
+ register: remove_systemd_broken_result
+
+- debug: var=remove_systemd_broken_result
+- name: assert that the systemd unit file was removed
+ assert:
+ that:
+ - "remove_systemd_result.path == '/usr/lib/systemd/system/ansible_test.service'"
+ - "remove_systemd_result.state == 'absent'"
+ - "remove_systemd_broken_result.path == '/usr/lib/systemd/system/ansible_test_broken.service'"
+ - "remove_systemd_broken_result.state == 'absent'"
+
+- name: make sure systemd is reloaded
+ shell: systemctl daemon-reload
+ register: restart_systemd_result
+
+- name: assert that systemd was reloaded
+ assert:
+ that:
+ - "restart_systemd_result.rc == 0"
diff --git a/test/integration/targets/service/tasks/systemd_setup.yml b/test/integration/targets/service/tasks/systemd_setup.yml
new file mode 100644
index 00000000..a9170a38
--- /dev/null
+++ b/test/integration/targets/service/tasks/systemd_setup.yml
@@ -0,0 +1,17 @@
+- name: install the systemd unit file
+ copy: src=ansible.systemd dest=/etc/systemd/system/ansible_test.service mode=0644
+ register: install_systemd_result
+
+- name: install a broken systemd unit file
+ file: src=ansible_test.service path=/etc/systemd/system/ansible_test_broken.service state=link
+ register: install_broken_systemd_result
+
+- name: assert that the systemd unit file was installed
+ assert:
+ that:
+ - "install_systemd_result.dest == '/etc/systemd/system/ansible_test.service'"
+ - "install_systemd_result.state == 'file'"
+ - "install_systemd_result.mode == '0644'"
+ - "install_systemd_result.checksum == '9e6320795a5c79c01230a6de1c343ea32097af52'"
+ - "install_broken_systemd_result.dest == '/etc/systemd/system/ansible_test_broken.service'"
+ - "install_broken_systemd_result.state == 'link'"
diff --git a/test/integration/targets/service/tasks/sysv_cleanup.yml b/test/integration/targets/service/tasks/sysv_cleanup.yml
new file mode 100644
index 00000000..dbdfcf8b
--- /dev/null
+++ b/test/integration/targets/service/tasks/sysv_cleanup.yml
@@ -0,0 +1,9 @@
+- name: remove the sysV init file
+ file: path=/etc/init.d/ansible_test state=absent
+ register: remove_sysv_result
+
+- name: assert that the sysV init file was removed
+ assert:
+ that:
+ - "remove_sysv_result.path == '/etc/init.d/ansible_test'"
+ - "remove_sysv_result.state == 'absent'"
diff --git a/test/integration/targets/service/tasks/sysv_setup.yml b/test/integration/targets/service/tasks/sysv_setup.yml
new file mode 100644
index 00000000..7b648c24
--- /dev/null
+++ b/test/integration/targets/service/tasks/sysv_setup.yml
@@ -0,0 +1,11 @@
+- name: install the sysV init file
+ copy: src=ansible.sysv dest=/etc/init.d/ansible_test mode=0755
+ register: install_sysv_result
+
+- name: assert that the sysV init file was installed
+ assert:
+ that:
+ - "install_sysv_result.dest == '/etc/init.d/ansible_test'"
+ - "install_sysv_result.state == 'file'"
+ - "install_sysv_result.mode == '0755'"
+ - "install_sysv_result.checksum == '362899814c47d9aad6e93b2f64e39edd24e38797'"
diff --git a/test/integration/targets/service/tasks/tests.yml b/test/integration/targets/service/tasks/tests.yml
new file mode 100644
index 00000000..de66bf5c
--- /dev/null
+++ b/test/integration/targets/service/tasks/tests.yml
@@ -0,0 +1,225 @@
+- name: disable the ansible test service
+ service: name=ansible_test enabled=no
+
+- name: (check mode run) enable the ansible test service
+ service: name=ansible_test enabled=yes
+ register: enable_in_check_mode_result
+ check_mode: yes
+
+- name: assert that changes reported for check mode run
+ assert:
+ that:
+ - "enable_in_check_mode_result is changed"
+
+- name: enable the ansible test service
+ service: name=ansible_test enabled=yes
+ register: enable_result
+
+- name: assert that the service was enabled and changes reported
+ assert:
+ that:
+ - "enable_result.enabled == true"
+ - "enable_result is changed"
+
+- name: start the ansible test service
+ service: name=ansible_test state=started
+ register: start_result
+
+- name: assert that the service was started
+ assert:
+ that:
+ - "start_result.state == 'started'"
+ - "start_result is changed"
+
+- name: check that the service was started
+ shell: 'cat /proc/$(cat /var/run/ansible_test_service.pid)/cmdline'
+ register: cmdline
+ failed_when: cmdline is failed or '\0/usr/sbin/ansible_test_service\0' not in cmdline.stdout
+ # No proc on BSD
+ when: not ansible_distribution.lower().endswith('bsd')
+
+- name: check that the service was started (*bsd)
+ shell: 'ps -p $(cat /var/run/ansible_test_service.pid)'
+ register: cmdline
+ failed_when: cmdline is failed or '/usr/sbin/ansible_test_service' not in cmdline.stdout
+ when: ansible_distribution.lower().endswith('bsd')
+
+- name: find the service with a pattern
+ service: name=ansible_test pattern="ansible_test_ser" state=started
+ register: start2_result
+
+- name: assert that the service was started via the pattern
+ assert:
+ that:
+ - "start2_result.name == 'ansible_test'"
+ - "start2_result.state == 'started'"
+ - "start2_result is not changed"
+
+- name: fetch PID for ansible_test service (before restart)
+ command: 'cat /var/run/ansible_test_service.pid'
+ register: pid_before_restart
+
+- name: restart the ansible test service
+ service: name=ansible_test state=restarted
+ register: restart_result
+
+- name: assert that the service was restarted
+ assert:
+ that:
+ - "restart_result.state == 'started'"
+ - "restart_result is changed"
+
+- name: fetch PID for ansible_test service (after restart)
+ command: 'cat /var/run/ansible_test_service.pid'
+ register: pid_after_restart
+
+- name: "check that PIDs aren't the same"
+ fail:
+ when: pid_before_restart.stdout == pid_after_restart.stdout
+
+- name: check that service is started
+ command: 'cat /proc/{{ pid_after_restart.stdout }}/cmdline'
+ register: cmdline
+ failed_when: cmdline is failed or '\0/usr/sbin/ansible_test_service\0' not in cmdline.stdout
+ # No proc on BSD
+ when: not ansible_distribution.lower().endswith('bsd')
+
+- name: check that the service is started (*bsd)
+ shell: 'ps -p {{ pid_after_restart.stdout }}'
+ register: cmdline
+ failed_when: cmdline is failed or '/usr/sbin/ansible_test_service' not in cmdline.stdout
+ when: ansible_distribution.lower().endswith('bsd')
+
+- name: restart the ansible test service with a sleep
+ service: name=ansible_test state=restarted sleep=2
+ register: restart_sleep_result
+
+- name: assert that the service was restarted with a sleep
+ assert:
+ that:
+ - "restart_sleep_result.state == 'started'"
+ - "restart_sleep_result is changed"
+
+- name: reload the ansible test service
+ service: name=ansible_test state=reloaded
+ register: reload_result
+ # don't do this on systems with systemd because it triggers error:
+ # Unable to reload service ansible_test: ansible_test.service is not active, cannot reload.
+ when: service_type != "systemd"
+
+- name: assert that the service was reloaded
+ assert:
+ that:
+ - "reload_result.state == 'started'"
+ - "reload_result is changed"
+ when: service_type != "systemd"
+
+- name: "test for #42786 (sysvinit)"
+ when: service_type == "sysv"
+ block:
+ - name: "sysvinit (#42786): check state, 'enable' parameter isn't set"
+ service: use=sysvinit name=ansible_test state=started
+
+ - name: "sysvinit (#42786): check that service is still enabled"
+ service: use=sysvinit name=ansible_test enabled=yes
+ register: result_enabled
+ failed_when: result_enabled is changed
+
+- name: fetch PID for ansible_test service
+ command: 'cat /var/run/ansible_test_service.pid'
+ register: ansible_test_pid
+
+- name: check that service is started
+ command: 'cat /proc/{{ ansible_test_pid.stdout }}/cmdline'
+ register: cmdline
+ failed_when: cmdline is failed or '\0/usr/sbin/ansible_test_service\0' not in cmdline.stdout
+ # No proc on BSD
+ when: not ansible_distribution.lower().endswith('bsd')
+
+- name: check that the service is started (*bsd)
+ shell: 'ps -p {{ ansible_test_pid.stdout }}'
+ register: cmdline
+ failed_when: cmdline is failed or '/usr/sbin/ansible_test_service' not in cmdline.stdout
+ when: ansible_distribution.lower().endswith('bsd')
+
+- name: stop the ansible test service
+ service: name=ansible_test state=stopped
+ register: stop_result
+
+- name: check that the service is stopped
+ command: 'cat /proc/{{ ansible_test_pid.stdout }}/cmdline'
+ register: cmdline
+ failed_when: cmdline is not failed or '\0/usr/sbin/ansible_test_service\0' in cmdline.stdout
+ # No proc on BSD
+ when: not ansible_distribution.lower().endswith('bsd')
+
+- name: check that the service is stopped (*bsd)
+ shell: 'ps -p {{ ansible_test_pid.stdout }}'
+ register: cmdline
+ failed_when: cmdline is not failed or '/usr/sbin/ansible_test_service' in cmdline.stdout
+ when: ansible_distribution.lower().endswith('bsd')
+
+- name: assert that the service was stopped
+ assert:
+ that:
+ - "stop_result.state == 'stopped'"
+ - "stop_result is changed"
+
+- name: disable the ansible test service
+ service: name=ansible_test enabled=no
+ register: disable_result
+
+- name: assert that the service was disabled
+ assert:
+ that:
+ - "disable_result.enabled == false"
+ - "disable_result is changed"
+
+- name: try to enable a broken service
+ service: name=ansible_broken_test enabled=yes
+ register: broken_enable_result
+ ignore_errors: True
+
+- name: assert that the broken test failed
+ assert:
+ that:
+ - "broken_enable_result is failed"
+
+- name: remove the test daemon script
+ file: path=/usr/sbin/ansible_test_service state=absent
+ register: remove_result
+
+- name: assert that the test daemon script was removed
+ assert:
+ that:
+ - "remove_result.path == '/usr/sbin/ansible_test_service'"
+ - "remove_result.state == 'absent'"
+
+- name: the module must fail when a service is not found
+ service:
+ name: 'nonexisting'
+ state: stopped
+ register: result
+ ignore_errors: yes
+ when: ansible_distribution != 'FreeBSD'
+
+- assert:
+ that:
+ - result is failed
+ - result is search("Could not find the requested service nonexisting")
+ when: ansible_distribution != 'FreeBSD'
+
+- name: the module must fail in check_mode as well when a service is not found
+ service:
+ name: 'nonexisting'
+ state: stopped
+ register: result
+ check_mode: yes
+ ignore_errors: yes
+ when: ansible_distribution != 'FreeBSD'
+
+- assert:
+ that:
+ - result is failed
+ - result is search("Could not find the requested service nonexisting")
+ when: ansible_distribution != 'FreeBSD'
diff --git a/test/integration/targets/service/tasks/upstart_cleanup.yml b/test/integration/targets/service/tasks/upstart_cleanup.yml
new file mode 100644
index 00000000..683fb104
--- /dev/null
+++ b/test/integration/targets/service/tasks/upstart_cleanup.yml
@@ -0,0 +1,17 @@
+- vars:
+ upstart_files:
+ - /etc/init/ansible_test.conf
+ - /etc/init/ansible_test.override
+ - /etc/init/ansible_test_broken.conf
+ block:
+ - name: remove upstart init files
+ file:
+ path: '{{ item }}'
+ state: absent
+ loop: '{{ upstart_files }}'
+
+ - name: assert that upstart init files were removed
+ raw: 'test -e {{ item }}'
+ loop: '{{ upstart_files }}'
+ register: file_exists
+ failed_when: file_exists is not failed
diff --git a/test/integration/targets/service/tasks/upstart_setup.yml b/test/integration/targets/service/tasks/upstart_setup.yml
new file mode 100644
index 00000000..e9607bb0
--- /dev/null
+++ b/test/integration/targets/service/tasks/upstart_setup.yml
@@ -0,0 +1,19 @@
+- name: install the upstart init file
+ copy: src=ansible.upstart dest=/etc/init/ansible_test.conf mode=0644
+ register: install_upstart_result
+
+- name: install an upstart init file that will fail (manual in .conf)
+ copy: src=ansible-broken.upstart dest=/etc/init/ansible_broken_test.conf mode=0644
+ register: install_upstart_broken_result
+
+- name: assert that the upstart init file was installed
+ assert:
+ that:
+ - "install_upstart_result.dest == '/etc/init/ansible_test.conf'"
+ - "install_upstart_result.state == 'file'"
+ - "install_upstart_result.mode == '0644'"
+ - "install_upstart_result.checksum == '5c314837b6c4dd6c68d1809653a2974e9078e02a'"
+ - "install_upstart_broken_result.dest == '/etc/init/ansible_broken_test.conf'"
+ - "install_upstart_broken_result.state == 'file'"
+ - "install_upstart_broken_result.mode == '0644'"
+ - "install_upstart_broken_result.checksum == 'e66497894f2b2bf71e1380a196cc26089cc24a10'"
diff --git a/test/integration/targets/service/templates/main.yml b/test/integration/targets/service/templates/main.yml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/service/templates/main.yml
diff --git a/test/integration/targets/service_facts/aliases b/test/integration/targets/service_facts/aliases
new file mode 100644
index 00000000..cc0aa0d9
--- /dev/null
+++ b/test/integration/targets/service_facts/aliases
@@ -0,0 +1,5 @@
+shippable/posix/group3
+skip/aix
+skip/freebsd
+skip/osx
+skip/macos
diff --git a/test/integration/targets/service_facts/files/ansible.systemd b/test/integration/targets/service_facts/files/ansible.systemd
new file mode 100644
index 00000000..3466f25a
--- /dev/null
+++ b/test/integration/targets/service_facts/files/ansible.systemd
@@ -0,0 +1,11 @@
+[Unit]
+Description=Ansible Test Service
+
+[Service]
+ExecStart=/usr/sbin/ansible_test_service "Test\nthat newlines in scripts\nwork"
+ExecReload=/bin/true
+Type=forking
+PIDFile=/var/run/ansible_test_service.pid
+
+[Install]
+WantedBy=multi-user.target
diff --git a/test/integration/targets/service_facts/files/ansible_test_service.py b/test/integration/targets/service_facts/files/ansible_test_service.py
new file mode 100644
index 00000000..19f1e291
--- /dev/null
+++ b/test/integration/targets/service_facts/files/ansible_test_service.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+
+# this is mostly based off of the code found here:
+# http://code.activestate.com/recipes/278731-creating-a-daemon-the-python-way/
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import os
+import resource
+import signal
+import time
+
+UMASK = 0
+WORKDIR = "/"
+MAXFD = 1024
+
+if (hasattr(os, "devnull")):
+ REDIRECT_TO = os.devnull
+else:
+ REDIRECT_TO = "/dev/null"
+
+
+def createDaemon():
+ try:
+ pid = os.fork()
+ except OSError as e:
+ raise Exception("%s [%d]" % (e.strerror, e.errno))
+
+ if (pid == 0):
+ os.setsid()
+
+ try:
+ pid = os.fork()
+ except OSError as e:
+ raise Exception("%s [%d]" % (e.strerror, e.errno))
+
+ if (pid == 0):
+ os.chdir(WORKDIR)
+ os.umask(UMASK)
+ else:
+ f = open('/var/run/ansible_test_service.pid', 'w')
+ f.write("%d\n" % pid)
+ f.close()
+ os._exit(0)
+ else:
+ os._exit(0)
+
+ maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
+ if (maxfd == resource.RLIM_INFINITY):
+ maxfd = MAXFD
+
+ for fd in range(0, maxfd):
+ try:
+ os.close(fd)
+ except OSError: # ERROR, fd wasn't open to begin with (ignored)
+ pass
+
+ os.open(REDIRECT_TO, os.O_RDWR)
+ os.dup2(0, 1)
+ os.dup2(0, 2)
+
+ return (0)
+
+
+if __name__ == "__main__":
+
+ signal.signal(signal.SIGHUP, signal.SIG_IGN)
+
+ retCode = createDaemon()
+
+ while True:
+ time.sleep(1000)
diff --git a/test/integration/targets/service_facts/tasks/main.yml b/test/integration/targets/service_facts/tasks/main.yml
new file mode 100644
index 00000000..5a08fad3
--- /dev/null
+++ b/test/integration/targets/service_facts/tasks/main.yml
@@ -0,0 +1,25 @@
+# Test playbook for the service_facts module
+# Copyright: (c) 2017, Adam Miller <admiller@redhat.com>
+# Copyright: (c) 2020, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: Gather service facts
+ service_facts:
+
+- name: check for ansible_facts.services exists
+ assert:
+ that: ansible_facts.services is defined
+
+- name: Test disabled service facts (https://github.com/ansible/ansible/issues/69144)
+ block:
+ - name: display value of ansible_service_mgr
+ debug:
+ msg: 'ansible_service_mgr: {{ ansible_service_mgr }}'
+
+ - name: setup test service script
+ include_tasks: 'systemd_setup.yml'
+
+ - name: execute tests
+ import_tasks: tests.yml
+
+ when: (ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux'] and ansible_distribution_major_version is version('7', '>=')) or ansible_distribution == 'Fedora' or (ansible_distribution == 'Ubuntu' and ansible_distribution_version is version('15.04', '>=')) or (ansible_distribution == 'Debian' and ansible_distribution_version is version('8', '>=')) or ansible_os_family == 'Suse'
diff --git a/test/integration/targets/service_facts/tasks/systemd_cleanup.yml b/test/integration/targets/service_facts/tasks/systemd_cleanup.yml
new file mode 100644
index 00000000..b68530b9
--- /dev/null
+++ b/test/integration/targets/service_facts/tasks/systemd_cleanup.yml
@@ -0,0 +1,32 @@
+- name: remove the systemd unit file
+ file:
+ path: /usr/lib/systemd/system/ansible_test.service
+ state: absent
+ register: remove_systemd_result
+
+- name: assert that the systemd unit file was removed
+ assert:
+ that:
+ - "remove_systemd_result.path == '/usr/lib/systemd/system/ansible_test.service'"
+ - "remove_systemd_result.state == 'absent'"
+
+- name: remove python systemd test script file
+ file:
+ path: /usr/sbin/ansible_test_service
+ state: absent
+ register: remove_systemd_binary_result
+
+- name: assert that python systemd test script file was removed
+ assert:
+ that:
+ - "remove_systemd_binary_result.path == '/usr/sbin/ansible_test_service'"
+ - "remove_systemd_binary_result.state == 'absent'"
+
+- name: make sure systemd is reloaded
+ shell: systemctl daemon-reload
+ register: restart_systemd_result
+
+- name: assert that systemd was reloaded
+ assert:
+ that:
+ - "restart_systemd_result.rc == 0"
diff --git a/test/integration/targets/service_facts/tasks/systemd_setup.yml b/test/integration/targets/service_facts/tasks/systemd_setup.yml
new file mode 100644
index 00000000..85eeed0c
--- /dev/null
+++ b/test/integration/targets/service_facts/tasks/systemd_setup.yml
@@ -0,0 +1,26 @@
+- name: install the test daemon script
+ copy:
+ src: ansible_test_service.py
+ dest: /usr/sbin/ansible_test_service
+ mode: '755'
+
+- name: rewrite shebang in the test daemon script
+ lineinfile:
+ path: /usr/sbin/ansible_test_service
+ line: "#!{{ ansible_python_interpreter | realpath }}"
+ insertbefore: BOF
+ firstmatch: yes
+
+- name: install the systemd unit file
+ copy:
+ src: ansible.systemd
+ dest: /etc/systemd/system/ansible_test.service
+ mode: '0644'
+ register: install_systemd_result
+
+- name: assert that the systemd unit file was installed
+ assert:
+ that:
+ - "install_systemd_result.dest == '/etc/systemd/system/ansible_test.service'"
+ - "install_systemd_result.state == 'file'"
+ - "install_systemd_result.mode == '0644'"
diff --git a/test/integration/targets/service_facts/tasks/tests.yml b/test/integration/targets/service_facts/tasks/tests.yml
new file mode 100644
index 00000000..495b71fb
--- /dev/null
+++ b/test/integration/targets/service_facts/tasks/tests.yml
@@ -0,0 +1,36 @@
+- name: start the ansible test service
+ service:
+ name: ansible_test
+ enabled: yes
+ state: started
+ register: enable_result
+
+- name: assert that the service was enabled and changes reported
+ assert:
+ that:
+ - "enable_result.enabled == true"
+ - "enable_result is changed"
+
+- name: disable the ansible test service
+ service:
+ name: ansible_test
+ state: stopped
+ enabled: no
+ register: start_result
+
+- name: assert that the service was stopped
+ assert:
+ that:
+ - "start_result.state == 'stopped'"
+ - "start_result is changed"
+
+- name: Populate service facts
+ service_facts:
+
+- name: get ansible_test service's state
+ debug:
+ var: services['ansible_test.service'].state
+
+- name: ansible_test service's running state should be \"inactive\"
+ assert:
+ that: "services['ansible_test.service'].state == 'inactive'"
diff --git a/test/integration/targets/set_fact/aliases b/test/integration/targets/set_fact/aliases
new file mode 100644
index 00000000..757c9966
--- /dev/null
+++ b/test/integration/targets/set_fact/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group3
+skip/aix
diff --git a/test/integration/targets/set_fact/incremental.yml b/test/integration/targets/set_fact/incremental.yml
new file mode 100644
index 00000000..3f7aa6c4
--- /dev/null
+++ b/test/integration/targets/set_fact/incremental.yml
@@ -0,0 +1,35 @@
+- name: test set_fact incremental https://github.com/ansible/ansible/issues/38271
+ hosts: testhost
+ gather_facts: no
+ tasks:
+ - name: Generate inline loop for set_fact
+ set_fact:
+ dig_list: "{{ dig_list + [ item ] }}"
+ loop:
+ - two
+ - three
+ - four
+ vars:
+ dig_list:
+ - one
+
+ - name: verify cumulative set fact worked
+ assert:
+ that:
+ - dig_list == ['one', 'two', 'three', 'four']
+
+ - name: Generate inline loop for set_fact (FQCN)
+ ansible.builtin.set_fact:
+ dig_list_fqcn: "{{ dig_list_fqcn + [ item ] }}"
+ loop:
+ - two
+ - three
+ - four
+ vars:
+ dig_list_fqcn:
+ - one
+
+ - name: verify cumulative set fact worked (FQCN)
+ assert:
+ that:
+ - dig_list_fqcn == ['one', 'two', 'three', 'four']
diff --git a/test/integration/targets/set_fact/inventory b/test/integration/targets/set_fact/inventory
new file mode 100644
index 00000000..b0c00d32
--- /dev/null
+++ b/test/integration/targets/set_fact/inventory
@@ -0,0 +1,3 @@
+[testgroup]
+testhost ansible_connection=local # no connection is actually established with this host
+localhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/test/integration/targets/set_fact/nowarn_clean_facts.yml b/test/integration/targets/set_fact/nowarn_clean_facts.yml
new file mode 100644
index 00000000..74f908d0
--- /dev/null
+++ b/test/integration/targets/set_fact/nowarn_clean_facts.yml
@@ -0,0 +1,10 @@
+- name: Test no warnings ref "http://github.com/ansible/ansible/issues/37535"
+ hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: set ssh jump host args
+ set_fact:
+ ansible_ssh_common_args: "-o ProxyCommand='ssh -W %h:%p -q root@localhost'"
+ - name: set ssh jump host args (FQCN)
+ ansible.builtin.set_fact:
+ ansible_ssh_common_args: "-o ProxyCommand='ssh -W %h:%p -q root@localhost'"
diff --git a/test/integration/targets/set_fact/runme.sh b/test/integration/targets/set_fact/runme.sh
new file mode 100755
index 00000000..364798a1
--- /dev/null
+++ b/test/integration/targets/set_fact/runme.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+
+set -eux
+
+MYTMPDIR=$(mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir')
+trap 'rm -rf "${MYTMPDIR}"' EXIT
+
+# ensure we can incrementally set fact via loopi, injection or not
+ANSIBLE_INJECT_FACT_VARS=0 ansible-playbook -i inventory incremental.yml
+ANSIBLE_INJECT_FACT_VARS=1 ansible-playbook -i inventory incremental.yml
+
+# ensure we dont have spurious warnings do to clean_facts
+ansible-playbook -i inventory nowarn_clean_facts.yml | grep '[WARNING]: Removed restricted key from module data: ansible_ssh_common_args' && exit 1
+
+# test cached feature
+export ANSIBLE_CACHE_PLUGIN=jsonfile ANSIBLE_CACHE_PLUGIN_CONNECTION="${MYTMPDIR}" ANSIBLE_CACHE_PLUGIN_PREFIX=prefix_
+ansible-playbook -i inventory "$@" set_fact_cached_1.yml
+ansible-playbook -i inventory "$@" set_fact_cached_2.yml
+
+# check contents of the fact cache directory before flushing it
+if [[ "$(find "${MYTMPDIR}" -type f)" != $MYTMPDIR/prefix_* ]]; then
+ echo "Unexpected cache file"
+ exit 1
+fi
+
+ansible-playbook -i inventory --flush-cache "$@" set_fact_no_cache.yml
+
+# Test boolean conversions in set_fact
+ansible-playbook -v set_fact_bool_conv.yml
+ANSIBLE_JINJA2_NATIVE=1 ansible-playbook -v set_fact_bool_conv_jinja2_native.yml
diff --git a/test/integration/targets/set_fact/set_fact_bool_conv.yml b/test/integration/targets/set_fact/set_fact_bool_conv.yml
new file mode 100644
index 00000000..8df249be
--- /dev/null
+++ b/test/integration/targets/set_fact/set_fact_bool_conv.yml
@@ -0,0 +1,35 @@
+- hosts: localhost
+ gather_facts: false
+ vars:
+ string_var: "no"
+ tasks:
+ - set_fact:
+ this_is_string: "yes"
+ this_is_not_string: yes
+ this_is_also_string: "{{ string_var }}"
+ this_is_another_string: !!str "{% set thing = '' + string_var + '' %}{{ thing }}"
+ this_is_more_strings: '{{ string_var + "" }}'
+
+ - assert:
+ that:
+ - string_var == 'no'
+ - this_is_string == True
+ - this_is_not_string == True
+ - this_is_also_string == False
+ - this_is_another_string == False
+ - this_is_more_strings == False
+
+ - ansible.builtin.set_fact:
+ this_is_string_fqcn: "yes"
+ this_is_not_string_fqcn: yes
+ this_is_also_string_fqcn: "{{ string_var }}"
+ this_is_another_string_fqcn: !!str "{% set thing = '' + string_var + '' %}{{ thing }}"
+ this_is_more_strings_fqcn: '{{ string_var + "" }}'
+
+ - assert:
+ that:
+ - this_is_string_fqcn == True
+ - this_is_not_string_fqcn == True
+ - this_is_also_string_fqcn == False
+ - this_is_another_string_fqcn == False
+ - this_is_more_strings_fqcn == False
diff --git a/test/integration/targets/set_fact/set_fact_bool_conv_jinja2_native.yml b/test/integration/targets/set_fact/set_fact_bool_conv_jinja2_native.yml
new file mode 100644
index 00000000..2642599f
--- /dev/null
+++ b/test/integration/targets/set_fact/set_fact_bool_conv_jinja2_native.yml
@@ -0,0 +1,35 @@
+- hosts: localhost
+ gather_facts: false
+ vars:
+ string_var: "no"
+ tasks:
+ - set_fact:
+ this_is_string: "yes"
+ this_is_not_string: yes
+ this_is_also_string: "{{ string_var }}"
+ this_is_another_string: !!str "{% set thing = '' + string_var + '' %}{{ thing }}"
+ this_is_more_strings: '{{ string_var + "" }}'
+
+ - assert:
+ that:
+ - string_var == 'no'
+ - this_is_string == 'yes'
+ - this_is_not_string == True
+ - this_is_also_string == 'no'
+ - this_is_another_string == 'no'
+ - this_is_more_strings == 'no'
+
+ - ansible.builtin.set_fact:
+ this_is_string_fqcn: "yes"
+ this_is_not_string_fqcn: yes
+ this_is_also_string_fqcn: "{{ string_var }}"
+ this_is_another_string_fqcn: !!str "{% set thing = '' + string_var + '' %}{{ thing }}"
+ this_is_more_strings_fqcn: '{{ string_var + "" }}'
+
+ - assert:
+ that:
+ - this_is_string_fqcn == 'yes'
+ - this_is_not_string_fqcn == True
+ - this_is_also_string_fqcn == 'no'
+ - this_is_another_string_fqcn == 'no'
+ - this_is_more_strings_fqcn == 'no'
diff --git a/test/integration/targets/set_fact/set_fact_cached_1.yml b/test/integration/targets/set_fact/set_fact_cached_1.yml
new file mode 100644
index 00000000..01c9f1e0
--- /dev/null
+++ b/test/integration/targets/set_fact/set_fact_cached_1.yml
@@ -0,0 +1,324 @@
+---
+- name: the first play
+ hosts: localhost
+ tasks:
+ - name: show foobar fact before
+ debug:
+ var: ansible_foobar
+
+ - name: set a persistent fact foobar
+ set_fact:
+ ansible_foobar: 'foobar_from_set_fact_cacheable'
+ cacheable: true
+
+ - name: show foobar fact after
+ debug:
+ var: ansible_foobar
+
+ - name: assert ansible_foobar is correct value
+ assert:
+ that:
+ - ansible_foobar == 'foobar_from_set_fact_cacheable'
+
+ - name: set a non persistent fact that will not be cached
+ set_fact:
+ ansible_foobar_not_cached: 'this_should_not_be_cached'
+
+ - name: show ansible_foobar_not_cached fact after being set
+ debug:
+ var: ansible_foobar_not_cached
+
+ - name: assert ansible_foobar_not_cached is correct value
+ assert:
+ that:
+ - ansible_foobar_not_cached == 'this_should_not_be_cached'
+
+ - name: set another non persistent fact that will not be cached
+ set_fact: "cacheable=no fact_not_cached='this_should_not_be_cached!'"
+
+ - name: show fact_not_cached fact after being set
+ debug:
+ var: fact_not_cached
+
+ - name: assert fact_not_cached is correct value
+ assert:
+ that:
+ - fact_not_cached == 'this_should_not_be_cached!'
+
+ - name: show foobar fact before (FQCN)
+ debug:
+ var: ansible_foobar_fqcn
+
+ - name: set a persistent fact foobar (FQCN)
+ set_fact:
+ ansible_foobar_fqcn: 'foobar_fqcn_from_set_fact_cacheable'
+ cacheable: true
+
+ - name: show foobar fact after (FQCN)
+ debug:
+ var: ansible_foobar_fqcn
+
+ - name: assert ansible_foobar_fqcn is correct value (FQCN)
+ assert:
+ that:
+ - ansible_foobar_fqcn == 'foobar_fqcn_from_set_fact_cacheable'
+
+ - name: set a non persistent fact that will not be cached (FQCN)
+ set_fact:
+ ansible_foobar_not_cached_fqcn: 'this_should_not_be_cached'
+
+ - name: show ansible_foobar_not_cached_fqcn fact after being set (FQCN)
+ debug:
+ var: ansible_foobar_not_cached_fqcn
+
+ - name: assert ansible_foobar_not_cached_fqcn is correct value (FQCN)
+ assert:
+ that:
+ - ansible_foobar_not_cached_fqcn == 'this_should_not_be_cached'
+
+ - name: set another non persistent fact that will not be cached (FQCN)
+ set_fact: "cacheable=no fact_not_cached_fqcn='this_should_not_be_cached!'"
+
+ - name: show fact_not_cached_fqcn fact after being set (FQCN)
+ debug:
+ var: fact_not_cached_fqcn
+
+ - name: assert fact_not_cached_fqcn is correct value (FQCN)
+ assert:
+ that:
+ - fact_not_cached_fqcn == 'this_should_not_be_cached!'
+
+- name: the second play
+ hosts: localhost
+ tasks:
+ - name: show foobar fact after second play
+ debug:
+ var: ansible_foobar
+
+ - name: assert ansible_foobar is correct value
+ assert:
+ that:
+ - ansible_foobar == 'foobar_from_set_fact_cacheable'
+
+ - name: show foobar fact after second play (FQCN)
+ debug:
+ var: ansible_foobar_fqcn
+
+ - name: assert ansible_foobar is correct value (FQCN)
+ assert:
+ that:
+ - ansible_foobar_fqcn == 'foobar_fqcn_from_set_fact_cacheable'
+
+- name: show ansible_nodename and ansible_os_family
+ hosts: localhost
+ tasks:
+ - name: show nodename fact after second play
+ debug:
+ var: ansible_nodename
+ - name: show os_family fact after second play (FQCN)
+ debug:
+ var: ansible_os_family
+
+- name: show ansible_nodename and ansible_os_family overridden with var
+ hosts: localhost
+ vars:
+ ansible_nodename: 'nodename_from_play_vars'
+ ansible_os_family: 'os_family_from_play_vars'
+ tasks:
+ - name: show nodename fact after second play
+ debug:
+ var: ansible_nodename
+ - name: show os_family fact after second play (FQCN)
+ debug:
+ var: ansible_os_family
+
+- name: verify ansible_nodename from vars overrides the fact
+ hosts: localhost
+ vars:
+ ansible_nodename: 'nodename_from_play_vars'
+ ansible_os_family: 'os_family_from_play_vars'
+ tasks:
+ - name: show nodename fact
+ debug:
+ var: ansible_nodename
+
+ - name: assert ansible_nodename is correct value
+ assert:
+ that:
+ - ansible_nodename == 'nodename_from_play_vars'
+
+ - name: show os_family fact (FQCN)
+ debug:
+ var: ansible_os_family
+
+ - name: assert ansible_os_family is correct value (FQCN)
+ assert:
+ that:
+ - ansible_os_family == 'os_family_from_play_vars'
+
+- name: set_fact ansible_nodename and ansible_os_family
+ hosts: localhost
+ tasks:
+ - name: set a persistent fact nodename
+ set_fact:
+ ansible_nodename: 'nodename_from_set_fact_cacheable'
+
+ - name: show nodename fact
+ debug:
+ var: ansible_nodename
+
+ - name: assert ansible_nodename is correct value
+ assert:
+ that:
+ - ansible_nodename == 'nodename_from_set_fact_cacheable'
+
+ - name: set a persistent fact os_family (FQCN)
+ ansible.builtin.set_fact:
+ ansible_os_family: 'os_family_from_set_fact_cacheable'
+
+ - name: show os_family fact (FQCN)
+ debug:
+ var: ansible_os_family
+
+ - name: assert ansible_os_family is correct value (FQCN)
+ assert:
+ that:
+ - ansible_os_family == 'os_family_from_set_fact_cacheable'
+
+- name: verify that set_fact ansible_xxx non_cacheable overrides ansible_xxx in vars
+ hosts: localhost
+ vars:
+ ansible_nodename: 'nodename_from_play_vars'
+ ansible_os_family: 'os_family_from_play_vars'
+ tasks:
+ - name: show nodename fact
+ debug:
+ var: ansible_nodename
+
+ - name: assert ansible_nodename is correct value
+ assert:
+ that:
+ - ansible_nodename == 'nodename_from_set_fact_cacheable'
+
+ - name: show os_family fact (FQCN)
+ debug:
+ var: ansible_os_family
+
+ - name: assert ansible_os_family is correct value (FQCN)
+ assert:
+ that:
+ - ansible_os_family == 'os_family_from_set_fact_cacheable'
+
+- name: verify that set_fact_cacheable in previous play overrides ansible_xxx in vars
+ hosts: localhost
+ vars:
+ ansible_nodename: 'nodename_from_play_vars'
+ ansible_os_family: 'os_family_from_play_vars'
+ tasks:
+ - name: show nodename fact
+ debug:
+ var: ansible_nodename
+
+ - name: assert ansible_nodename is correct value
+ assert:
+ that:
+ - ansible_nodename == 'nodename_from_set_fact_cacheable'
+
+ - name: show os_family fact (FQCN)
+ debug:
+ var: ansible_os_family
+
+ - name: assert ansible_os_family is correct value (FQCN)
+ assert:
+ that:
+ - ansible_os_family == 'os_family_from_set_fact_cacheable'
+
+- name: set_fact ansible_nodename and ansible_os_family cacheable
+ hosts: localhost
+ tasks:
+ - name: set a persistent fact nodename
+ set_fact:
+ ansible_nodename: 'nodename_from_set_fact_cacheable'
+ cacheable: true
+
+ - name: show nodename fact
+ debug:
+ var: ansible_nodename
+
+ - name: assert ansible_nodename is correct value
+ assert:
+ that:
+ - ansible_nodename == 'nodename_from_set_fact_cacheable'
+
+ - name: set a persistent fact os_family (FQCN)
+ ansible.builtin.set_fact:
+ ansible_os_family: 'os_family_from_set_fact_cacheable'
+ cacheable: true
+
+ - name: show os_family fact (FQCN)
+ debug:
+ var: ansible_os_family
+
+ - name: assert ansible_os_family is correct value (FQCN)
+ assert:
+ that:
+ - ansible_os_family == 'os_family_from_set_fact_cacheable'
+
+
+- name: verify that set_fact_cacheable in previous play overrides ansible_xxx in vars
+ hosts: localhost
+ vars:
+ ansible_nodename: 'nodename_from_play_vars'
+ ansible_os_family: 'os_family_from_play_vars'
+ tasks:
+ - name: show nodename fact
+ debug:
+ var: ansible_nodename
+
+ - name: assert ansible_nodename is correct value
+ assert:
+ that:
+ - ansible_nodename == 'nodename_from_set_fact_cacheable'
+
+ - name: show os_family fact (FQCN)
+ debug:
+ var: ansible_os_family
+
+ - name: assert ansible_os_family is correct value (FQCN)
+ assert:
+ that:
+ - ansible_os_family == 'os_family_from_set_fact_cacheable'
+
+- name: the fourth play
+ hosts: localhost
+ vars:
+ ansible_foobar: 'foobar_from_play_vars'
+ ansible_foobar_fqcn: 'foobar_fqcn_from_play_vars'
+ tasks:
+ - name: show example fact
+ debug:
+ var: ansible_example
+
+ - name: set a persistent fact example
+ set_fact:
+ ansible_example: 'foobar_from_set_fact_cacheable'
+ cacheable: true
+
+ - name: assert ansible_example is correct value
+ assert:
+ that:
+ - ansible_example == 'foobar_from_set_fact_cacheable'
+
+ - name: show example fact (FQCN)
+ debug:
+ var: ansible_example_fqcn
+
+ - name: set a persistent fact example (FQCN)
+ set_fact:
+ ansible_example_fqcn: 'foobar_fqcn_from_set_fact_cacheable'
+ cacheable: true
+
+ - name: assert ansible_example_fqcn is correct value (FQCN)
+ assert:
+ that:
+ - ansible_example_fqcn == 'foobar_fqcn_from_set_fact_cacheable'
diff --git a/test/integration/targets/set_fact/set_fact_cached_2.yml b/test/integration/targets/set_fact/set_fact_cached_2.yml
new file mode 100644
index 00000000..7df92244
--- /dev/null
+++ b/test/integration/targets/set_fact/set_fact_cached_2.yml
@@ -0,0 +1,57 @@
+---
+- name: A second playbook run with fact caching enabled
+ hosts: localhost
+ tasks:
+ - name: show ansible_foobar fact
+ debug:
+ var: ansible_foobar
+
+ - name: assert ansible_foobar is correct value when read from cache
+ assert:
+ that:
+ - ansible_foobar == 'foobar_from_set_fact_cacheable'
+
+ - name: show ansible_foobar_not_cached fact
+ debug:
+ var: ansible_foobar_not_cached
+
+ - name: assert ansible_foobar_not_cached is not cached
+ assert:
+ that:
+ - ansible_foobar_not_cached is undefined
+
+ - name: show fact_not_cached fact
+ debug:
+ var: fact_not_cached
+
+ - name: assert fact_not_cached is not cached
+ assert:
+ that:
+ - fact_not_cached is undefined
+
+ - name: show ansible_foobar_fqcn fact (FQCN)
+ debug:
+ var: ansible_foobar_fqcn
+
+ - name: assert ansible_foobar_fqcn is correct value when read from cache (FQCN)
+ assert:
+ that:
+ - ansible_foobar_fqcn == 'foobar_fqcn_from_set_fact_cacheable'
+
+ - name: show ansible_foobar_fqcn_not_cached fact (FQCN)
+ debug:
+ var: ansible_foobar_fqcn_not_cached
+
+ - name: assert ansible_foobar_fqcn_not_cached is not cached (FQCN)
+ assert:
+ that:
+ - ansible_foobar_fqcn_not_cached is undefined
+
+ - name: show fact_not_cached_fqcn fact (FQCN)
+ debug:
+ var: fact_not_cached_fqcn
+
+ - name: assert fact_not_cached_fqcn is not cached (FQCN)
+ assert:
+ that:
+ - fact_not_cached_fqcn is undefined
diff --git a/test/integration/targets/set_fact/set_fact_no_cache.yml b/test/integration/targets/set_fact/set_fact_no_cache.yml
new file mode 100644
index 00000000..f5a99792
--- /dev/null
+++ b/test/integration/targets/set_fact/set_fact_no_cache.yml
@@ -0,0 +1,39 @@
+---
+- name: Running with fact caching enabled but with cache flushed
+ hosts: localhost
+ tasks:
+ - name: show ansible_foobar fact
+ debug:
+ var: ansible_foobar
+
+ - name: assert ansible_foobar is correct value
+ assert:
+ that:
+ - ansible_foobar is undefined
+
+ - name: show ansible_foobar_not_cached fact
+ debug:
+ var: ansible_foobar_not_cached
+
+ - name: assert ansible_foobar_not_cached is not cached
+ assert:
+ that:
+ - ansible_foobar_not_cached is undefined
+
+ - name: show ansible_foobar fact (FQCN)
+ debug:
+ var: ansible_foobar_fqcn
+
+ - name: assert ansible_foobar is correct value (FQCN)
+ assert:
+ that:
+ - ansible_foobar_fqcn is undefined
+
+ - name: show ansible_foobar_not_cached fact (FQCN)
+ debug:
+ var: ansible_foobar_fqcn_not_cached
+
+ - name: assert ansible_foobar_not_cached is not cached (FQCN)
+ assert:
+ that:
+ - ansible_foobar_fqcn_not_cached is undefined
diff --git a/test/integration/targets/setup_cron/defaults/main.yml b/test/integration/targets/setup_cron/defaults/main.yml
new file mode 100644
index 00000000..e4b0123d
--- /dev/null
+++ b/test/integration/targets/setup_cron/defaults/main.yml
@@ -0,0 +1 @@
+remote_dir: "{{ lookup('env', 'OUTPUT_DIR') }}"
diff --git a/test/integration/targets/setup_cron/tasks/main.yml b/test/integration/targets/setup_cron/tasks/main.yml
new file mode 100644
index 00000000..93dcefa5
--- /dev/null
+++ b/test/integration/targets/setup_cron/tasks/main.yml
@@ -0,0 +1,70 @@
+- name: Include distribution specific variables
+ include_vars: "{{ lookup('first_found', search) }}"
+ vars:
+ search:
+ files:
+ - '{{ ansible_distribution | lower }}.yml'
+ - '{{ ansible_os_family | lower }}.yml'
+ - '{{ ansible_system | lower }}.yml'
+ - default.yml
+ paths:
+ - vars
+
+- name: install cron package
+ package:
+ name: '{{ cron_pkg }}'
+ when: cron_pkg | default(false, true)
+ register: cron_package_installed
+ until: cron_package_installed is success
+
+- when: faketime_pkg | default(false, true)
+ block:
+ - name: install cron and faketime packages
+ package:
+ name: '{{ faketime_pkg }}'
+ register: faketime_package_installed
+ until: faketime_package_installed is success
+
+ - name: Find libfaketime path
+ shell: '{{ list_pkg_files }} {{ faketime_pkg }} | grep -F libfaketime.so.1'
+ args:
+ warn: false
+ register: libfaketime_path
+
+ - when: ansible_service_mgr == 'systemd'
+ block:
+ - name: create directory for cron drop-in file
+ file:
+ path: '/etc/systemd/system/{{ cron_service }}.service.d'
+ state: directory
+ owner: root
+ group: root
+ mode: 0755
+
+ - name: Use faketime with cron service
+ copy:
+ content: |-
+ [Service]
+ Environment=LD_PRELOAD={{ libfaketime_path.stdout_lines[0].strip() }}
+ Environment="FAKETIME=+0y x10"
+ Environment=RANDOM_DELAY=0
+ dest: '/etc/systemd/system/{{ cron_service }}.service.d/faketime.conf'
+ owner: root
+ group: root
+ mode: 0644
+
+ - when: ansible_system == 'FreeBSD'
+ name: Use faketime with cron service
+ copy:
+ content: |-
+ cron_env='LD_PRELOAD={{ libfaketime_path.stdout_lines[0].strip() }} FAKETIME="+0y x10"'
+ dest: '/etc/rc.conf.d/cron'
+ owner: root
+ group: wheel
+ mode: 0644
+
+- name: enable cron service
+ service:
+ daemon-reload: "{{ (ansible_service_mgr == 'systemd') | ternary(true, omit) }}"
+ name: '{{ cron_service }}'
+ state: restarted
diff --git a/test/integration/targets/setup_cron/vars/debian.yml b/test/integration/targets/setup_cron/vars/debian.yml
new file mode 100644
index 00000000..cd04871c
--- /dev/null
+++ b/test/integration/targets/setup_cron/vars/debian.yml
@@ -0,0 +1,3 @@
+cron_pkg: cron
+cron_service: cron
+list_pkg_files: dpkg -L
diff --git a/test/integration/targets/setup_cron/vars/default.yml b/test/integration/targets/setup_cron/vars/default.yml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/setup_cron/vars/default.yml
diff --git a/test/integration/targets/setup_cron/vars/fedora.yml b/test/integration/targets/setup_cron/vars/fedora.yml
new file mode 100644
index 00000000..b80a51b5
--- /dev/null
+++ b/test/integration/targets/setup_cron/vars/fedora.yml
@@ -0,0 +1,3 @@
+cron_pkg: cronie
+cron_service: crond
+list_pkg_files: rpm -ql
diff --git a/test/integration/targets/setup_cron/vars/freebsd.yml b/test/integration/targets/setup_cron/vars/freebsd.yml
new file mode 100644
index 00000000..41ed4493
--- /dev/null
+++ b/test/integration/targets/setup_cron/vars/freebsd.yml
@@ -0,0 +1,3 @@
+cron_pkg:
+cron_service: cron
+list_pkg_files: pkg info --list-files
diff --git a/test/integration/targets/setup_cron/vars/redhat.yml b/test/integration/targets/setup_cron/vars/redhat.yml
new file mode 100644
index 00000000..2dff13de
--- /dev/null
+++ b/test/integration/targets/setup_cron/vars/redhat.yml
@@ -0,0 +1,4 @@
+cron_pkg: cronie
+cron_service: crond
+faketime_pkg:
+list_pkg_files: rpm -ql
diff --git a/test/integration/targets/setup_cron/vars/suse.yml b/test/integration/targets/setup_cron/vars/suse.yml
new file mode 100644
index 00000000..cd3677a6
--- /dev/null
+++ b/test/integration/targets/setup_cron/vars/suse.yml
@@ -0,0 +1,3 @@
+cron_pkg: cron
+cron_service: cron
+list_pkg_files: rpm -ql
diff --git a/test/integration/targets/setup_deb_repo/files/package_specs/foo-1.0.0 b/test/integration/targets/setup_deb_repo/files/package_specs/foo-1.0.0
new file mode 100644
index 00000000..4206fbab
--- /dev/null
+++ b/test/integration/targets/setup_deb_repo/files/package_specs/foo-1.0.0
@@ -0,0 +1,10 @@
+Section: misc
+Priority: optional
+Standards-Version: 2.3.3
+
+Package: foo
+Version: 1.0.0
+Section: system
+Maintainer: John Doe <john@doe.com>
+Architecture: all
+Description: Dummy package
diff --git a/test/integration/targets/setup_deb_repo/files/package_specs/foo-1.0.1 b/test/integration/targets/setup_deb_repo/files/package_specs/foo-1.0.1
new file mode 100644
index 00000000..021f4d52
--- /dev/null
+++ b/test/integration/targets/setup_deb_repo/files/package_specs/foo-1.0.1
@@ -0,0 +1,10 @@
+Section: misc
+Priority: optional
+Standards-Version: 2.3.3
+
+Package: foo
+Version: 1.0.1
+Section: system
+Maintainer: John Doe <john@doe.com>
+Architecture: all
+Description: Dummy package
diff --git a/test/integration/targets/setup_deb_repo/files/package_specs/foobar-1.0.0 b/test/integration/targets/setup_deb_repo/files/package_specs/foobar-1.0.0
new file mode 100644
index 00000000..0da0348f
--- /dev/null
+++ b/test/integration/targets/setup_deb_repo/files/package_specs/foobar-1.0.0
@@ -0,0 +1,11 @@
+Section: misc
+Priority: optional
+Standards-Version: 2.3.3
+
+Package: foobar
+Version: 1.0.0
+Section: system
+Depends: foo
+Maintainer: John Doe <john@doe.com>
+Architecture: all
+Description: Dummy package
diff --git a/test/integration/targets/setup_deb_repo/files/package_specs/foobar-1.0.1 b/test/integration/targets/setup_deb_repo/files/package_specs/foobar-1.0.1
new file mode 100644
index 00000000..b9fa8303
--- /dev/null
+++ b/test/integration/targets/setup_deb_repo/files/package_specs/foobar-1.0.1
@@ -0,0 +1,10 @@
+Section: misc
+Priority: optional
+Standards-Version: 2.3.3
+
+Package: foobar
+Version: 1.0.1
+Section: system
+Maintainer: John Doe <john@doe.com>
+Architecture: all
+Description: Dummy package
diff --git a/test/integration/targets/setup_deb_repo/meta/main.yml b/test/integration/targets/setup_deb_repo/meta/main.yml
new file mode 100644
index 00000000..1810d4be
--- /dev/null
+++ b/test/integration/targets/setup_deb_repo/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/test/integration/targets/setup_deb_repo/tasks/main.yml b/test/integration/targets/setup_deb_repo/tasks/main.yml
new file mode 100644
index 00000000..49f68a2c
--- /dev/null
+++ b/test/integration/targets/setup_deb_repo/tasks/main.yml
@@ -0,0 +1,56 @@
+- block:
+ - name: Install needed packages
+ apt:
+ name: "{{ item }}"
+ with_items:
+ - dpkg-dev
+ - equivs
+ - libfile-fcntllock-perl # to silence warning by equivs-build
+
+ - set_fact:
+ repodir: /tmp/repo/
+
+ - name: Create repo dir
+ file:
+ path: "{{ repodir }}"
+ state: directory
+ mode: 0755
+
+ - name: Copy package specs to remote
+ copy:
+ src: "{{ item }}"
+ dest: "{{ remote_tmp_dir }}/{{ item | basename }}"
+ with_fileglob:
+ - "files/package_specs/*"
+
+ - name: Create deb files
+ shell: "equivs-build {{ remote_tmp_dir }}/{{ item | basename }}"
+ args:
+ chdir: "{{ repodir }}"
+ with_fileglob:
+ - "files/package_specs/*"
+
+ - name: Create repo
+ shell: dpkg-scanpackages --multiversion . /dev/null | gzip -9c > Packages.gz
+ args:
+ chdir: "{{ repodir }}"
+
+ # Can't use apt_repository as it doesn't expose a trusted=yes option
+ - name: Install the repo
+ copy:
+ content: deb [trusted=yes] file:{{ repodir }} ./
+ dest: /etc/apt/sources.list.d/file_tmp_repo.list
+
+ # Need to uncomment the deb-src for the universe component for build-dep state
+ - name: Ensure deb-src for the universe component
+ lineinfile:
+ path: /etc/apt/sources.list
+ backrefs: True
+ regexp: ^#\s*deb-src http://archive\.ubuntu\.com/ubuntu/ (\w*){{ item }} universe$
+ line: deb-src http://archive.ubuntu.com/ubuntu \1{{ item }} universe
+ state: present
+ with_items:
+ - ''
+ - -updates
+
+ when: ansible_distribution in ['Ubuntu', 'Debian']
diff --git a/test/integration/targets/setup_epel/tasks/main.yml b/test/integration/targets/setup_epel/tasks/main.yml
new file mode 100644
index 00000000..c279810e
--- /dev/null
+++ b/test/integration/targets/setup_epel/tasks/main.yml
@@ -0,0 +1,5 @@
+- name: Install EPEL
+ yum:
+ name: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/setup_epel/epel-release-latest-{{ ansible_distribution_major_version }}.noarch.rpm
+ disable_gpg_check: true
+ when: ansible_facts.distribution in ['RedHat', 'CentOS']
diff --git a/test/integration/targets/setup_gnutar/handlers/main.yml b/test/integration/targets/setup_gnutar/handlers/main.yml
new file mode 100644
index 00000000..d3fa7c27
--- /dev/null
+++ b/test/integration/targets/setup_gnutar/handlers/main.yml
@@ -0,0 +1,6 @@
+- name: uninstall gnu-tar
+ command: brew uninstall gnu-tar
+ become: yes
+ become_user: "{{ brew_stat.stat.pw_name }}"
+ environment:
+ HOMEBREW_NO_AUTO_UPDATE: True
diff --git a/test/integration/targets/setup_gnutar/tasks/main.yml b/test/integration/targets/setup_gnutar/tasks/main.yml
new file mode 100644
index 00000000..b7d841cd
--- /dev/null
+++ b/test/integration/targets/setup_gnutar/tasks/main.yml
@@ -0,0 +1,18 @@
+- when: ansible_facts.distribution == 'MacOSX'
+ block:
+ - name: MACOS | Find brew binary
+ command: which brew
+ register: brew_which
+
+ - name: MACOS | Get owner of brew binary
+ stat:
+ path: "{{ brew_which.stdout }}"
+ register: brew_stat
+
+ - command: brew install gnu-tar
+ become: yes
+ become_user: "{{ brew_stat.stat.pw_name }}"
+ environment:
+ HOMEBREW_NO_AUTO_UPDATE: True
+ notify:
+ - uninstall gnu-tar
diff --git a/test/integration/targets/setup_nobody/handlers/main.yml b/test/integration/targets/setup_nobody/handlers/main.yml
new file mode 100644
index 00000000..2d02efbb
--- /dev/null
+++ b/test/integration/targets/setup_nobody/handlers/main.yml
@@ -0,0 +1,5 @@
+---
+- name: remove nobody user
+ user:
+ name: nobody
+ state: absent
diff --git a/test/integration/targets/setup_nobody/tasks/main.yml b/test/integration/targets/setup_nobody/tasks/main.yml
new file mode 100644
index 00000000..cc0e4fe8
--- /dev/null
+++ b/test/integration/targets/setup_nobody/tasks/main.yml
@@ -0,0 +1,7 @@
+---
+- name: create nobody user
+ user:
+ name: nobody
+ create_home: no
+ state: present
+ notify: remove nobody user
diff --git a/test/integration/targets/setup_paramiko/install-CentOS-6-python-2.yml b/test/integration/targets/setup_paramiko/install-CentOS-6-python-2.yml
new file mode 100644
index 00000000..0c7b9e82
--- /dev/null
+++ b/test/integration/targets/setup_paramiko/install-CentOS-6-python-2.yml
@@ -0,0 +1,3 @@
+- name: Install Paramiko for Python 2 on CentOS 6
+ yum:
+ name: python-paramiko
diff --git a/test/integration/targets/setup_paramiko/install-FreeBSD-11-python-2.yml b/test/integration/targets/setup_paramiko/install-FreeBSD-11-python-2.yml
new file mode 100644
index 00000000..dec5b548
--- /dev/null
+++ b/test/integration/targets/setup_paramiko/install-FreeBSD-11-python-2.yml
@@ -0,0 +1,3 @@
+- name: Install Paramiko for Python 2 on FreeBSD 11
+ pkgng:
+ name: py27-paramiko
diff --git a/test/integration/targets/setup_paramiko/install-FreeBSD-11-python-3.yml b/test/integration/targets/setup_paramiko/install-FreeBSD-11-python-3.yml
new file mode 100644
index 00000000..b8ca6c9e
--- /dev/null
+++ b/test/integration/targets/setup_paramiko/install-FreeBSD-11-python-3.yml
@@ -0,0 +1,9 @@
+- name: Downgrade to pip version 18.1 to work around a PEP 517 virtualenv bug
+ # pip 19.0.0 added support for PEP 517
+ # versions as recent as 19.0.3 fail to install paramiko in a virtualenv due to a BackendUnavailable exception
+ # installation without a virtualenv succeeds
+ pip:
+ name: pip==18.1
+- name: Install Paramiko for Python 3 on FreeBSD 11
+ pip: # no py36-paramiko package exists for FreeBSD 11
+ name: paramiko
diff --git a/test/integration/targets/setup_paramiko/install-FreeBSD-12-python-2.yml b/test/integration/targets/setup_paramiko/install-FreeBSD-12-python-2.yml
new file mode 100644
index 00000000..29e78969
--- /dev/null
+++ b/test/integration/targets/setup_paramiko/install-FreeBSD-12-python-2.yml
@@ -0,0 +1,3 @@
+- name: Install Paramiko for Python 2 on FreeBSD 12
+ pkgng:
+ name: py27-paramiko
diff --git a/test/integration/targets/setup_paramiko/install-FreeBSD-12-python-3.yml b/test/integration/targets/setup_paramiko/install-FreeBSD-12-python-3.yml
new file mode 100644
index 00000000..2aa7b500
--- /dev/null
+++ b/test/integration/targets/setup_paramiko/install-FreeBSD-12-python-3.yml
@@ -0,0 +1,3 @@
+- name: Install Paramiko for Python 3 on FreeBSD 12
+ pkgng:
+ name: py36-paramiko
diff --git a/test/integration/targets/setup_paramiko/install-MacOSX-10-python-3.yml b/test/integration/targets/setup_paramiko/install-MacOSX-10-python-3.yml
new file mode 100644
index 00000000..a156f806
--- /dev/null
+++ b/test/integration/targets/setup_paramiko/install-MacOSX-10-python-3.yml
@@ -0,0 +1,6 @@
+- name: Install Paramiko for Python 3 on MacOS
+ pip: # no homebrew package manager in core, just use pip
+ name: paramiko
+ environment:
+ # Not sure why this fixes the test, but it does.
+ SETUPTOOLS_USE_DISTUTILS: stdlib
diff --git a/test/integration/targets/setup_paramiko/install-RedHat-8-python-3.yml b/test/integration/targets/setup_paramiko/install-RedHat-8-python-3.yml
new file mode 100644
index 00000000..dbc0f65c
--- /dev/null
+++ b/test/integration/targets/setup_paramiko/install-RedHat-8-python-3.yml
@@ -0,0 +1,3 @@
+- name: Install Paramiko for Python 3 on RHEL 8
+ pip: # no python3-paramiko package exists for RHEL 8
+ name: paramiko
diff --git a/test/integration/targets/setup_paramiko/install-Ubuntu-16-python-2.yml b/test/integration/targets/setup_paramiko/install-Ubuntu-16-python-2.yml
new file mode 100644
index 00000000..8f760740
--- /dev/null
+++ b/test/integration/targets/setup_paramiko/install-Ubuntu-16-python-2.yml
@@ -0,0 +1,3 @@
+- name: Install Paramiko for Python 2 on Ubuntu 16
+ apt:
+ name: python-paramiko
diff --git a/test/integration/targets/setup_paramiko/install-fail.yml b/test/integration/targets/setup_paramiko/install-fail.yml
new file mode 100644
index 00000000..b4ba4640
--- /dev/null
+++ b/test/integration/targets/setup_paramiko/install-fail.yml
@@ -0,0 +1,7 @@
+- name: Install Paramiko
+ fail:
+ msg: "Install of Paramiko on distribution '{{ ansible_distribution }}' with major version '{{ ansible_distribution_major_version }}'
+ with package manager '{{ ansible_pkg_mgr }}' on Python {{ ansible_python.version.major }} has not been implemented.
+ Use native OS packages if available, otherwise use pip.
+ Be sure to uninstall automatically installed dependencies when possible.
+ Do not implement a generic fallback to pip, as that would allow distributions not yet configured to go undetected."
diff --git a/test/integration/targets/setup_paramiko/install-python-2.yml b/test/integration/targets/setup_paramiko/install-python-2.yml
new file mode 100644
index 00000000..be337a16
--- /dev/null
+++ b/test/integration/targets/setup_paramiko/install-python-2.yml
@@ -0,0 +1,3 @@
+- name: Install Paramiko for Python 2
+ package:
+ name: python2-paramiko
diff --git a/test/integration/targets/setup_paramiko/install-python-3.yml b/test/integration/targets/setup_paramiko/install-python-3.yml
new file mode 100644
index 00000000..ac2a1a2b
--- /dev/null
+++ b/test/integration/targets/setup_paramiko/install-python-3.yml
@@ -0,0 +1,3 @@
+- name: Install Paramiko for Python 3
+ package:
+ name: python3-paramiko
diff --git a/test/integration/targets/setup_paramiko/install.yml b/test/integration/targets/setup_paramiko/install.yml
new file mode 100644
index 00000000..194bd51f
--- /dev/null
+++ b/test/integration/targets/setup_paramiko/install.yml
@@ -0,0 +1,17 @@
+- hosts: localhost
+ tasks:
+ - name: Detect Paramiko
+ detect_paramiko:
+ register: detect_paramiko
+ - name: Persist Result
+ copy:
+ content: "{{ detect_paramiko }}"
+ dest: "{{ lookup('env', 'OUTPUT_DIR') }}/detect-paramiko.json"
+ - name: Install Paramiko
+ when: not detect_paramiko.found
+ include_tasks: "{{ item }}"
+ with_first_found:
+ - "install-{{ ansible_distribution }}-{{ ansible_distribution_major_version }}-python-{{ ansible_python.version.major }}.yml"
+ - "install-{{ ansible_os_family }}-{{ ansible_distribution_major_version }}-python-{{ ansible_python.version.major }}.yml"
+ - "install-python-{{ ansible_python.version.major }}.yml"
+ - "install-fail.yml"
diff --git a/test/integration/targets/setup_paramiko/inventory b/test/integration/targets/setup_paramiko/inventory
new file mode 100644
index 00000000..8618c725
--- /dev/null
+++ b/test/integration/targets/setup_paramiko/inventory
@@ -0,0 +1 @@
+localhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/test/integration/targets/setup_paramiko/library/detect_paramiko.py b/test/integration/targets/setup_paramiko/library/detect_paramiko.py
new file mode 100644
index 00000000..e3a81582
--- /dev/null
+++ b/test/integration/targets/setup_paramiko/library/detect_paramiko.py
@@ -0,0 +1,31 @@
+#!/usr/bin/python
+"""Ansible module to detect the presence of both the normal and Ansible-specific versions of Paramiko."""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+
+try:
+ import paramiko
+except ImportError:
+ paramiko = None
+
+try:
+ import ansible_paramiko
+except ImportError:
+ ansible_paramiko = None
+
+
+def main():
+ module = AnsibleModule(argument_spec={})
+ module.exit_json(**dict(
+ found=bool(paramiko or ansible_paramiko),
+ paramiko=bool(paramiko),
+ ansible_paramiko=bool(ansible_paramiko),
+ ))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/setup_paramiko/setup.sh b/test/integration/targets/setup_paramiko/setup.sh
new file mode 100644
index 00000000..64b935cd
--- /dev/null
+++ b/test/integration/targets/setup_paramiko/setup.sh
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+# Usage: source ../setup_paramiko/setup.sh
+
+set -eux
+
+source virtualenv.sh # for pip installs, if needed, otherwise unused
+ansible-playbook ../setup_paramiko/install.yml -i ../setup_paramiko/inventory "$@"
+trap 'ansible-playbook ../setup_paramiko/uninstall.yml -i ../setup_paramiko/inventory "$@"' EXIT
diff --git a/test/integration/targets/setup_paramiko/uninstall-FreeBSD-11-python-2.yml b/test/integration/targets/setup_paramiko/uninstall-FreeBSD-11-python-2.yml
new file mode 100644
index 00000000..d27f831c
--- /dev/null
+++ b/test/integration/targets/setup_paramiko/uninstall-FreeBSD-11-python-2.yml
@@ -0,0 +1,4 @@
+- name: Uninstall Paramiko for Python 2 on FreeBSD 11
+ pkgng:
+ name: py27-paramiko
+ state: absent
diff --git a/test/integration/targets/setup_paramiko/uninstall-FreeBSD-11-python-3.yml b/test/integration/targets/setup_paramiko/uninstall-FreeBSD-11-python-3.yml
new file mode 100644
index 00000000..33f292e8
--- /dev/null
+++ b/test/integration/targets/setup_paramiko/uninstall-FreeBSD-11-python-3.yml
@@ -0,0 +1,4 @@
+- name: Uninstall Paramiko for Python 3 on FreeBSD 11
+ pip: # no py36-paramiko package exists for FreeBSD 11
+ name: paramiko
+ state: absent
diff --git a/test/integration/targets/setup_paramiko/uninstall-FreeBSD-12-python-2.yml b/test/integration/targets/setup_paramiko/uninstall-FreeBSD-12-python-2.yml
new file mode 100644
index 00000000..79352487
--- /dev/null
+++ b/test/integration/targets/setup_paramiko/uninstall-FreeBSD-12-python-2.yml
@@ -0,0 +1,4 @@
+- name: Uninstall Paramiko for Python 2 on FreeBSD 12
+ pkgng:
+ name: py27-paramiko
+ state: absent
diff --git a/test/integration/targets/setup_paramiko/uninstall-FreeBSD-12-python-3.yml b/test/integration/targets/setup_paramiko/uninstall-FreeBSD-12-python-3.yml
new file mode 100644
index 00000000..46d26ca3
--- /dev/null
+++ b/test/integration/targets/setup_paramiko/uninstall-FreeBSD-12-python-3.yml
@@ -0,0 +1,4 @@
+- name: Uninstall Paramiko for Python 3 on FreeBSD 12
+ pkgng:
+ name: py36-paramiko
+ state: absent
diff --git a/test/integration/targets/setup_paramiko/uninstall-MacOSX-10-python-3.yml b/test/integration/targets/setup_paramiko/uninstall-MacOSX-10-python-3.yml
new file mode 100644
index 00000000..69a68e42
--- /dev/null
+++ b/test/integration/targets/setup_paramiko/uninstall-MacOSX-10-python-3.yml
@@ -0,0 +1,4 @@
+- name: Uninstall Paramiko for Python 3 on MacOS
+ pip:
+ name: paramiko
+ state: absent
diff --git a/test/integration/targets/setup_paramiko/uninstall-RedHat-8-python-3.yml b/test/integration/targets/setup_paramiko/uninstall-RedHat-8-python-3.yml
new file mode 100644
index 00000000..d3a9493e
--- /dev/null
+++ b/test/integration/targets/setup_paramiko/uninstall-RedHat-8-python-3.yml
@@ -0,0 +1,4 @@
+- name: Uninstall Paramiko for Python 3 on RHEL 8
+ pip: # no python3-paramiko package exists for RHEL 8
+ name: paramiko
+ state: absent
diff --git a/test/integration/targets/setup_paramiko/uninstall-apt-python-2.yml b/test/integration/targets/setup_paramiko/uninstall-apt-python-2.yml
new file mode 100644
index 00000000..507d94cc
--- /dev/null
+++ b/test/integration/targets/setup_paramiko/uninstall-apt-python-2.yml
@@ -0,0 +1,5 @@
+- name: Uninstall Paramiko for Python 2 using apt
+ apt:
+ name: python-paramiko
+ state: absent
+ autoremove: yes
diff --git a/test/integration/targets/setup_paramiko/uninstall-apt-python-3.yml b/test/integration/targets/setup_paramiko/uninstall-apt-python-3.yml
new file mode 100644
index 00000000..d51fc92e
--- /dev/null
+++ b/test/integration/targets/setup_paramiko/uninstall-apt-python-3.yml
@@ -0,0 +1,5 @@
+- name: Uninstall Paramiko for Python 3 using apt
+ apt:
+ name: python3-paramiko
+ state: absent
+ autoremove: yes
diff --git a/test/integration/targets/setup_paramiko/uninstall-dnf.yml b/test/integration/targets/setup_paramiko/uninstall-dnf.yml
new file mode 100644
index 00000000..ff0d39ce
--- /dev/null
+++ b/test/integration/targets/setup_paramiko/uninstall-dnf.yml
@@ -0,0 +1,4 @@
+- name: Uninstall Paramiko using dnf history undo
+ command: dnf history undo last --assumeyes
+ args:
+ warn: no
diff --git a/test/integration/targets/setup_paramiko/uninstall-fail.yml b/test/integration/targets/setup_paramiko/uninstall-fail.yml
new file mode 100644
index 00000000..bc5e12f1
--- /dev/null
+++ b/test/integration/targets/setup_paramiko/uninstall-fail.yml
@@ -0,0 +1,7 @@
+- name: Uninstall Paramiko
+ fail:
+ msg: "Uninstall of Paramiko on distribution '{{ ansible_distribution }}' with major version '{{ ansible_distribution_major_version }}'
+ with package manager '{{ ansible_pkg_mgr }}' on Python {{ ansible_python.version.major }} has not been implemented.
+ Use native OS packages if available, otherwise use pip.
+ Be sure to uninstall automatically installed dependencies when possible.
+ Do not implement a generic fallback to pip, as that would allow distributions not yet configured to go undetected."
diff --git a/test/integration/targets/setup_paramiko/uninstall-yum.yml b/test/integration/targets/setup_paramiko/uninstall-yum.yml
new file mode 100644
index 00000000..f293d229
--- /dev/null
+++ b/test/integration/targets/setup_paramiko/uninstall-yum.yml
@@ -0,0 +1,4 @@
+- name: Uninstall Paramiko using yum history undo
+ command: yum history undo last --assumeyes
+ args:
+ warn: no
diff --git a/test/integration/targets/setup_paramiko/uninstall-zypper-python-2.yml b/test/integration/targets/setup_paramiko/uninstall-zypper-python-2.yml
new file mode 100644
index 00000000..6bdb3307
--- /dev/null
+++ b/test/integration/targets/setup_paramiko/uninstall-zypper-python-2.yml
@@ -0,0 +1,4 @@
+- name: Uninstall Paramiko for Python 2 using zypper
+ command: zypper --quiet --non-interactive remove --clean-deps python2-paramiko
+ args:
+ warn: no
diff --git a/test/integration/targets/setup_paramiko/uninstall-zypper-python-3.yml b/test/integration/targets/setup_paramiko/uninstall-zypper-python-3.yml
new file mode 100644
index 00000000..cb0db941
--- /dev/null
+++ b/test/integration/targets/setup_paramiko/uninstall-zypper-python-3.yml
@@ -0,0 +1,4 @@
+- name: Uninstall Paramiko for Python 3 using zypper
+ command: zypper --quiet --non-interactive remove --clean-deps python3-paramiko
+ args:
+ warn: no
diff --git a/test/integration/targets/setup_paramiko/uninstall.yml b/test/integration/targets/setup_paramiko/uninstall.yml
new file mode 100644
index 00000000..46a16d91
--- /dev/null
+++ b/test/integration/targets/setup_paramiko/uninstall.yml
@@ -0,0 +1,19 @@
+- hosts: localhost
+ vars:
+ detect_paramiko: '{{ lookup("file", lookup("env", "OUTPUT_DIR") + "/detect-paramiko.json") | from_json }}'
+ tasks:
+ - name: Uninstall Paramiko and Verify Results
+ when: not detect_paramiko.found
+ block:
+ - name: Uninstall Paramiko
+ include_tasks: "{{ item }}"
+ with_first_found:
+ - "uninstall-{{ ansible_distribution }}-{{ ansible_distribution_major_version }}-python-{{ ansible_python.version.major }}.yml"
+ - "uninstall-{{ ansible_os_family }}-{{ ansible_distribution_major_version }}-python-{{ ansible_python.version.major }}.yml"
+ - "uninstall-{{ ansible_pkg_mgr }}-python-{{ ansible_python.version.major }}.yml"
+ - "uninstall-{{ ansible_pkg_mgr }}.yml"
+ - "uninstall-fail.yml"
+ - name: Verify Paramiko was uninstalled
+ detect_paramiko:
+ register: detect_paramiko
+ failed_when: detect_paramiko.found
diff --git a/test/integration/targets/setup_passlib/tasks/main.yml b/test/integration/targets/setup_passlib/tasks/main.yml
new file mode 100644
index 00000000..e4cd0d0b
--- /dev/null
+++ b/test/integration/targets/setup_passlib/tasks/main.yml
@@ -0,0 +1,4 @@
+- name: Install passlib
+ pip:
+ name: passlib
+ state: present
diff --git a/test/integration/targets/setup_pexpect/tasks/main.yml b/test/integration/targets/setup_pexpect/tasks/main.yml
new file mode 100644
index 00000000..ef57fe6f
--- /dev/null
+++ b/test/integration/targets/setup_pexpect/tasks/main.yml
@@ -0,0 +1,4 @@
+- name: Install pexpect
+ pip:
+ name: pexpect
+ state: present
diff --git a/test/integration/targets/setup_remote_constraints/aliases b/test/integration/targets/setup_remote_constraints/aliases
new file mode 100644
index 00000000..18cc100a
--- /dev/null
+++ b/test/integration/targets/setup_remote_constraints/aliases
@@ -0,0 +1 @@
+needs/file/test/lib/ansible_test/_data/requirements/constraints.txt
diff --git a/test/integration/targets/setup_remote_constraints/meta/main.yml b/test/integration/targets/setup_remote_constraints/meta/main.yml
new file mode 100644
index 00000000..1810d4be
--- /dev/null
+++ b/test/integration/targets/setup_remote_constraints/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/test/integration/targets/setup_remote_constraints/tasks/main.yml b/test/integration/targets/setup_remote_constraints/tasks/main.yml
new file mode 100644
index 00000000..eee09ccf
--- /dev/null
+++ b/test/integration/targets/setup_remote_constraints/tasks/main.yml
@@ -0,0 +1,8 @@
+- name: record constraints.txt path on remote host
+ set_fact:
+ remote_constraints: "{{ remote_tmp_dir }}/constraints.txt"
+
+- name: copy constraints.txt to remote host
+ copy:
+ src: "{{ role_path }}/../../../lib/ansible_test/_data/requirements/constraints.txt"
+ dest: "{{ remote_constraints }}"
diff --git a/test/integration/targets/setup_remote_tmp_dir/handlers/main.yml b/test/integration/targets/setup_remote_tmp_dir/handlers/main.yml
new file mode 100644
index 00000000..229037c8
--- /dev/null
+++ b/test/integration/targets/setup_remote_tmp_dir/handlers/main.yml
@@ -0,0 +1,5 @@
+- name: delete temporary directory
+ include_tasks: default-cleanup.yml
+
+- name: delete temporary directory (windows)
+ include_tasks: windows-cleanup.yml
diff --git a/test/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml b/test/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml
new file mode 100644
index 00000000..39872d74
--- /dev/null
+++ b/test/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml
@@ -0,0 +1,5 @@
+- name: delete temporary directory
+ file:
+ path: "{{ remote_tmp_dir }}"
+ state: absent
+ no_log: yes
diff --git a/test/integration/targets/setup_remote_tmp_dir/tasks/default.yml b/test/integration/targets/setup_remote_tmp_dir/tasks/default.yml
new file mode 100644
index 00000000..1e0f51b8
--- /dev/null
+++ b/test/integration/targets/setup_remote_tmp_dir/tasks/default.yml
@@ -0,0 +1,11 @@
+- name: create temporary directory
+ tempfile:
+ state: directory
+ suffix: .test
+ register: remote_tmp_dir
+ notify:
+ - delete temporary directory
+
+- name: record temporary directory
+ set_fact:
+ remote_tmp_dir: "{{ remote_tmp_dir.path }}"
diff --git a/test/integration/targets/setup_remote_tmp_dir/tasks/main.yml b/test/integration/targets/setup_remote_tmp_dir/tasks/main.yml
new file mode 100644
index 00000000..f8df391b
--- /dev/null
+++ b/test/integration/targets/setup_remote_tmp_dir/tasks/main.yml
@@ -0,0 +1,10 @@
+- name: make sure we have the ansible_os_family and ansible_distribution_version facts
+ setup:
+ gather_subset: distribution
+ when: ansible_facts == {}
+
+- include_tasks: "{{ lookup('first_found', files)}}"
+ vars:
+ files:
+ - "{{ ansible_os_family | lower }}.yml"
+ - "default.yml"
diff --git a/test/integration/targets/setup_remote_tmp_dir/tasks/windows-cleanup.yml b/test/integration/targets/setup_remote_tmp_dir/tasks/windows-cleanup.yml
new file mode 100644
index 00000000..1936b610
--- /dev/null
+++ b/test/integration/targets/setup_remote_tmp_dir/tasks/windows-cleanup.yml
@@ -0,0 +1,4 @@
+- name: delete temporary directory (windows)
+ win_file:
+ path: "{{ remote_tmp_dir }}"
+ state: absent
diff --git a/test/integration/targets/setup_remote_tmp_dir/tasks/windows.yml b/test/integration/targets/setup_remote_tmp_dir/tasks/windows.yml
new file mode 100644
index 00000000..afedc4eb
--- /dev/null
+++ b/test/integration/targets/setup_remote_tmp_dir/tasks/windows.yml
@@ -0,0 +1,11 @@
+- name: create temporary directory
+ win_tempfile:
+ state: directory
+ suffix: .test
+ register: remote_tmp_dir
+ notify:
+ - delete temporary directory (windows)
+
+- name: record temporary directory
+ set_fact:
+ remote_tmp_dir: "{{ remote_tmp_dir.path }}"
diff --git a/test/integration/targets/setup_rpm_repo/aliases b/test/integration/targets/setup_rpm_repo/aliases
new file mode 100644
index 00000000..65e83152
--- /dev/null
+++ b/test/integration/targets/setup_rpm_repo/aliases
@@ -0,0 +1 @@
+needs/target/setup_epel
diff --git a/test/integration/targets/setup_rpm_repo/defaults/main.yml b/test/integration/targets/setup_rpm_repo/defaults/main.yml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/setup_rpm_repo/defaults/main.yml
diff --git a/test/integration/targets/setup_rpm_repo/files/comps.xml b/test/integration/targets/setup_rpm_repo/files/comps.xml
new file mode 100644
index 00000000..e9391829
--- /dev/null
+++ b/test/integration/targets/setup_rpm_repo/files/comps.xml
@@ -0,0 +1,36 @@
+<!DOCTYPE comps PUBLIC "-//Red Hat, Inc.//DTD Comps info//EN" "comps.dtd">
+<comps>
+ <group>
+ <id>customgroup</id>
+ <name>Custom Group</name>
+ <description></description>
+ <default>false</default>
+ <uservisible>true</uservisible>
+ <display_order>1024</display_order>
+ <packagelist>
+ <packagereq type="mandatory">dinginessentail</packagereq>
+ </packagelist>
+ </group>
+
+ <group>
+ <id>customenvgroup</id>
+ <name>Custom Environment Group</name>
+ <description></description>
+ <default>false</default>
+ <uservisible>false</uservisible>
+ <display_order>1024</display_order>
+ <packagelist>
+ <packagereq type="mandatory">landsidescalping</packagereq>
+ </packagelist>
+ </group>
+
+ <environment>
+ <id>customenvgroup-environment</id>
+ <name>Custom Environment Group</name>
+ <description></description>
+ <display_order>1024</display_order>
+ <grouplist>
+ <groupid>customenvgroup</groupid>
+ </grouplist>
+ </environment>
+</comps>
diff --git a/test/integration/targets/setup_rpm_repo/files/create-repo.py b/test/integration/targets/setup_rpm_repo/files/create-repo.py
new file mode 100644
index 00000000..2033fdf8
--- /dev/null
+++ b/test/integration/targets/setup_rpm_repo/files/create-repo.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+
+
+import sys
+from collections import namedtuple
+
+try:
+ from rpmfluff import SimpleRpmBuild
+ from rpmfluff import YumRepoBuild
+except ImportError:
+ from rpmfluff.rpmbuild import SimpleRpmBuild
+ from rpmfluff.yumrepobuild import YumRepoBuild
+
+try:
+ from rpmfluff import can_use_rpm_weak_deps
+except ImportError:
+ try:
+ from rpmfluff.utils import can_use_rpm_weak_deps
+ except ImportError:
+ can_use_rpm_weak_deps = None
+
+RPM = namedtuple('RPM', ['name', 'version', 'release', 'epoch', 'recommends'])
+
+
+SPECS = [
+ RPM('dinginessentail', '1.0', '1', None, None),
+ RPM('dinginessentail', '1.0', '2', '1', None),
+ RPM('dinginessentail', '1.1', '1', '1', None),
+ RPM('dinginessentail-olive', '1.0', '1', None, None),
+ RPM('dinginessentail-olive', '1.1', '1', None, None),
+ RPM('landsidescalping', '1.0', '1', None, None),
+ RPM('landsidescalping', '1.1', '1', None, None),
+ RPM('dinginessentail-with-weak-dep', '1.0', '1', None, ['dinginessentail-weak-dep']),
+ RPM('dinginessentail-weak-dep', '1.0', '1', None, None),
+]
+
+
+def main():
+ try:
+ arch = sys.argv[1]
+ except IndexError:
+ arch = 'x86_64'
+
+ pkgs = []
+ for spec in SPECS:
+ pkg = SimpleRpmBuild(spec.name, spec.version, spec.release, [arch])
+ pkg.epoch = spec.epoch
+
+ if spec.recommends:
+ # Skip packages that require weak deps but an older version of RPM is being used
+ if not can_use_rpm_weak_deps or not can_use_rpm_weak_deps():
+ continue
+
+ for recommend in spec.recommends:
+ pkg.add_recommends(recommend)
+
+ pkgs.append(pkg)
+
+ repo = YumRepoBuild(pkgs)
+ repo.make(arch)
+
+ for pkg in pkgs:
+ pkg.clean()
+
+ print(repo.repoDir)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/test/integration/targets/setup_rpm_repo/tasks/main.yml b/test/integration/targets/setup_rpm_repo/tasks/main.yml
new file mode 100644
index 00000000..14addf79
--- /dev/null
+++ b/test/integration/targets/setup_rpm_repo/tasks/main.yml
@@ -0,0 +1,97 @@
+- block:
+ - name: Install epel repo which is missing on rhel-7 and is needed for rpmfluff
+ include_role:
+ name: setup_epel
+ when:
+ - ansible_distribution in ['RedHat', 'CentOS']
+ - ansible_distribution_major_version is version('7', '==')
+
+ - name: Include distribution specific variables
+ include_vars: "{{ lookup('first_found', params) }}"
+ vars:
+ params:
+ files:
+ - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_version }}.yml"
+ - "{{ ansible_facts.os_family }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - "{{ ansible_facts.distribution }}.yml"
+ - "{{ ansible_facts.os_family }}.yml"
+ - default.yml
+ paths:
+ - "{{ role_path }}/vars"
+
+ - name: Install rpmfluff and deps
+ action: "{{ ansible_facts.pkg_mgr }}"
+ args:
+ name: "{{ rpm_repo_packages }}"
+
+ - name: Install rpmfluff from pip on RHEL 8 and later
+ pip:
+ name: rpmfluff
+ when:
+ - ansible_facts.distribution in ['RedHat', 'CentOS']
+ - ansible_facts.distribution_major_version is version('8', '>=')
+
+ - name: Copy script for creating a repo
+ copy:
+ src: create-repo.py
+ dest: /tmp/create-repo.py
+ mode: 0755
+
+ - name: Create RPMs and put them into a repo
+ shell: "{{ansible_python_interpreter}} /tmp/create-repo.py {{ ansible_architecture }}"
+ register: repo
+
+ - set_fact:
+ repodir: "{{ repo.stdout_lines[-1] }}"
+
+ - name: Install the repo
+ yum_repository:
+ name: "fake-{{ ansible_architecture }}"
+ description: "fake-{{ ansible_architecture }}"
+ baseurl: "file://{{ repodir }}"
+ gpgcheck: no
+
+ - name: Copy comps.xml file
+ copy:
+ src: comps.xml
+ dest: "{{ repodir }}"
+ register: repodir_comps
+
+ - name: Register comps.xml on repo
+ command: createrepo -g {{ repodir_comps.dest | quote }} {{ repodir | quote }}
+
+ - name: Create RPMs and put them into a repo (i686)
+ shell: "{{ansible_python_interpreter}} /tmp/create-repo.py i686"
+ register: repo_i686
+
+ - set_fact:
+ repodir_i686: "{{ repo_i686.stdout_lines[-1] }}"
+
+ - name: Install the repo (i686)
+ yum_repository:
+ name: "fake-i686"
+ description: "fake-i686"
+ baseurl: "file://{{ repodir_i686 }}"
+ gpgcheck: no
+
+ - name: Create RPMs and put them into a repo (ppc64)
+ shell: "{{ansible_python_interpreter}} /tmp/create-repo.py ppc64"
+ register: repo_ppc64
+
+ - set_fact:
+ repodir_ppc64: "{{ repo_ppc64.stdout_lines[-1] }}"
+
+ - name: Install the repo (ppc64)
+ yum_repository:
+ name: "fake-ppc64"
+ description: "fake-ppc64"
+ baseurl: "file://{{ repodir_ppc64 }}"
+ gpgcheck: no
+
+ - set_fact:
+ repos:
+ - "fake-{{ ansible_architecture }}"
+ - "fake-i686"
+ - "fake-ppc64"
+
+ when: ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux', 'Fedora']
diff --git a/test/integration/targets/setup_rpm_repo/vars/Fedora.yml b/test/integration/targets/setup_rpm_repo/vars/Fedora.yml
new file mode 100644
index 00000000..6e8fdaaa
--- /dev/null
+++ b/test/integration/targets/setup_rpm_repo/vars/Fedora.yml
@@ -0,0 +1,3 @@
+rpm_repo_packages:
+ - "{{ 'python' ~ rpm_repo_python_major_version ~ '-rpmfluff' }}"
+ - createrepo
diff --git a/test/integration/targets/setup_rpm_repo/vars/RedHat-6.yml b/test/integration/targets/setup_rpm_repo/vars/RedHat-6.yml
new file mode 100644
index 00000000..69615d2c
--- /dev/null
+++ b/test/integration/targets/setup_rpm_repo/vars/RedHat-6.yml
@@ -0,0 +1,4 @@
+rpm_repo_packages:
+ - python-rpmfluff
+ - createrepo_c
+ - createrepo
diff --git a/test/integration/targets/setup_rpm_repo/vars/RedHat-7.yml b/test/integration/targets/setup_rpm_repo/vars/RedHat-7.yml
new file mode 100644
index 00000000..69615d2c
--- /dev/null
+++ b/test/integration/targets/setup_rpm_repo/vars/RedHat-7.yml
@@ -0,0 +1,4 @@
+rpm_repo_packages:
+ - python-rpmfluff
+ - createrepo_c
+ - createrepo
diff --git a/test/integration/targets/setup_rpm_repo/vars/RedHat-8.yml b/test/integration/targets/setup_rpm_repo/vars/RedHat-8.yml
new file mode 100644
index 00000000..84849e23
--- /dev/null
+++ b/test/integration/targets/setup_rpm_repo/vars/RedHat-8.yml
@@ -0,0 +1,4 @@
+rpm_repo_packages:
+ - rpm-build
+ - createrepo_c
+ - createrepo
diff --git a/test/integration/targets/setup_rpm_repo/vars/main.yml b/test/integration/targets/setup_rpm_repo/vars/main.yml
new file mode 100644
index 00000000..8e924fce
--- /dev/null
+++ b/test/integration/targets/setup_rpm_repo/vars/main.yml
@@ -0,0 +1 @@
+rpm_repo_python_major_version: "{{ ansible_facts.python_version.split('.')[0] }}"
diff --git a/test/integration/targets/setup_win_printargv/files/PrintArgv.cs b/test/integration/targets/setup_win_printargv/files/PrintArgv.cs
new file mode 100644
index 00000000..5ca3a8a0
--- /dev/null
+++ b/test/integration/targets/setup_win_printargv/files/PrintArgv.cs
@@ -0,0 +1,13 @@
+using System;
+// This has been compiled to an exe and uploaded to S3 bucket for argv test
+
+namespace PrintArgv
+{
+ class Program
+ {
+ static void Main(string[] args)
+ {
+ Console.WriteLine(string.Join(System.Environment.NewLine, args));
+ }
+ }
+}
diff --git a/test/integration/targets/setup_win_printargv/meta/main.yml b/test/integration/targets/setup_win_printargv/meta/main.yml
new file mode 100644
index 00000000..e3dd5fb1
--- /dev/null
+++ b/test/integration/targets/setup_win_printargv/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+- setup_remote_tmp_dir
diff --git a/test/integration/targets/setup_win_printargv/tasks/main.yml b/test/integration/targets/setup_win_printargv/tasks/main.yml
new file mode 100644
index 00000000..5f671924
--- /dev/null
+++ b/test/integration/targets/setup_win_printargv/tasks/main.yml
@@ -0,0 +1,9 @@
+---
+- name: download the PrintArgv.exe binary to temp location
+ win_get_url:
+ url: https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/setup_win_printargv/PrintArgv.exe
+ dest: '{{ remote_tmp_dir }}\PrintArgv.exe'
+
+- name: set fact containing PrintArgv binary path
+ set_fact:
+ win_printargv_path: '{{ remote_tmp_dir }}\PrintArgv.exe'
diff --git a/test/integration/targets/shell/action_plugins/test_shell.py b/test/integration/targets/shell/action_plugins/test_shell.py
new file mode 100644
index 00000000..6e66ed07
--- /dev/null
+++ b/test/integration/targets/shell/action_plugins/test_shell.py
@@ -0,0 +1,19 @@
+# This file is part of Ansible
+
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+
+ def run(self, tmp=None, task_vars=None):
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+ result['shell'] = self._connection._shell.SHELL_FAMILY
+ return result
diff --git a/test/integration/targets/shell/aliases b/test/integration/targets/shell/aliases
new file mode 100644
index 00000000..a6dafcf8
--- /dev/null
+++ b/test/integration/targets/shell/aliases
@@ -0,0 +1 @@
+shippable/posix/group1
diff --git a/test/integration/targets/shell/connection_plugins/test_connection_default.py b/test/integration/targets/shell/connection_plugins/test_connection_default.py
new file mode 100644
index 00000000..52b027d0
--- /dev/null
+++ b/test/integration/targets/shell/connection_plugins/test_connection_default.py
@@ -0,0 +1,44 @@
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+connection: test_connection_default
+short_description: test connection plugin used in tests
+description:
+- This is a test connection plugin used for shell testing
+author: ansible (@core)
+version_added: historical
+options:
+'''
+
+from ansible.plugins.connection import ConnectionBase
+
+
+class Connection(ConnectionBase):
+ ''' test connnection '''
+
+ transport = 'test_connection_default'
+
+ def __init__(self, *args, **kwargs):
+ super(Connection, self).__init__(*args, **kwargs)
+
+ def transport(self):
+ pass
+
+ def _connect(self):
+ pass
+
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ pass
+
+ def put_file(self, in_path, out_path):
+ pass
+
+ def fetch_file(self, in_path, out_path):
+ pass
+
+ def close(self):
+ pass
diff --git a/test/integration/targets/shell/connection_plugins/test_connection_override.py b/test/integration/targets/shell/connection_plugins/test_connection_override.py
new file mode 100644
index 00000000..56d531c4
--- /dev/null
+++ b/test/integration/targets/shell/connection_plugins/test_connection_override.py
@@ -0,0 +1,45 @@
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+connection: test_connection_override
+short_description: test connection plugin used in tests
+description:
+- This is a test connection plugin used for shell testing
+author: ansible (@core)
+version_added: historical
+options:
+'''
+
+from ansible.plugins.connection import ConnectionBase
+
+
+class Connection(ConnectionBase):
+ ''' test connection '''
+
+ transport = 'test_connection_override'
+
+ def __init__(self, *args, **kwargs):
+ self._shell_type = 'powershell' # Set a shell type that is not sh
+ super(Connection, self).__init__(*args, **kwargs)
+
+ def transport(self):
+ pass
+
+ def _connect(self):
+ pass
+
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ pass
+
+ def put_file(self, in_path, out_path):
+ pass
+
+ def fetch_file(self, in_path, out_path):
+ pass
+
+ def close(self):
+ pass
diff --git a/test/integration/targets/shell/tasks/main.yml b/test/integration/targets/shell/tasks/main.yml
new file mode 100644
index 00000000..d6f2a2b5
--- /dev/null
+++ b/test/integration/targets/shell/tasks/main.yml
@@ -0,0 +1,36 @@
+---
+- name: get shell when shell_type is not defined
+ test_shell:
+ register: shell_type_default
+ failed_when: shell_type_default.shell != 'sh'
+ vars:
+ ansible_connection: test_connection_default
+
+- name: get shell when shell_type is not defined but is overridden
+ test_shell:
+ register: shell_type_default_override
+ failed_when: shell_type_default_override.shell != item
+ vars:
+ ansible_connection: test_connection_default
+ ansible_shell_type: '{{ item }}'
+ with_items:
+ - powershell
+ - sh
+
+- name: get shell when shell_type is defined
+ test_shell:
+ register: shell_type_defined
+ failed_when: shell_type_defined.shell != 'powershell'
+ vars:
+ ansible_connection: test_connection_override
+
+- name: get shell when shell_type is defined but is overridden
+ test_shell:
+ register: shell_type_defined_override
+ failed_when: shell_type_defined_override.shell != item
+ vars:
+ ansible_connection: test_connection_default
+ ansible_shell_type: '{{ item }}'
+ with_items:
+ - powershell
+ - sh
diff --git a/test/integration/targets/slurp/aliases b/test/integration/targets/slurp/aliases
new file mode 100644
index 00000000..a6dafcf8
--- /dev/null
+++ b/test/integration/targets/slurp/aliases
@@ -0,0 +1 @@
+shippable/posix/group1
diff --git a/test/integration/targets/slurp/files/bar.bin b/test/integration/targets/slurp/files/bar.bin
new file mode 100644
index 00000000..38d4d8a4
--- /dev/null
+++ b/test/integration/targets/slurp/files/bar.bin
Binary files differ
diff --git a/test/integration/targets/slurp/tasks/main.yml b/test/integration/targets/slurp/tasks/main.yml
new file mode 100644
index 00000000..4f3556fa
--- /dev/null
+++ b/test/integration/targets/slurp/tasks/main.yml
@@ -0,0 +1,98 @@
+# test code for the slurp module. Based on win_slurp test cases
+# (c) 2014, Chris Church <chris@ninemoreminutes.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: Create a UTF-8 file to test with
+ copy:
+ content: 'We are at the café'
+ dest: '{{ output_dir }}/foo.txt'
+
+- name: test slurping an existing file
+ slurp:
+ src: '{{ output_dir }}/foo.txt'
+ register: slurp_existing
+
+- name: check slurp existing result
+ assert:
+ that:
+ - 'slurp_existing.content'
+ - 'slurp_existing.encoding == "base64"'
+ - 'slurp_existing is not changed'
+ - 'slurp_existing is not failed'
+ - '"{{ slurp_existing.content | b64decode }}" == "We are at the café"'
+
+- name: Create a binary file to test with
+ copy:
+ src: bar.bin
+ dest: '{{ output_dir }}/bar.bin'
+
+- name: test slurping a binary file
+ slurp:
+ path: '{{ output_dir }}/bar.bin'
+ register: slurp_binary
+ no_log: true
+
+- name: check slurp result of binary
+ assert:
+ that:
+ - "slurp_binary.content"
+ - "slurp_binary.encoding == 'base64'"
+ - "slurp_binary is not changed"
+ - "slurp_binary is not failed"
+
+- name: test slurping a non-existent file
+ slurp:
+ src: '{{ output_dir }}/i_do_not_exist'
+ register: slurp_missing
+ ignore_errors: true
+
+- name: check slurp missing result
+ assert:
+ that:
+ - "slurp_missing is failed"
+ - "slurp_missing.msg"
+ - "slurp_missing is not changed"
+
+- name: Create a directory to test with
+ file:
+ path: '{{ output_dir }}/baz/'
+ state: directory
+
+- name: test slurping a directory
+ slurp:
+ src: '{{ output_dir }}/baz'
+ register: slurp_dir
+ ignore_errors: true
+
+- name: check slurp directory result
+ assert:
+ that:
+ - "slurp_dir is failed"
+ - "slurp_dir.msg"
+ - "slurp_dir is not changed"
+
+- name: test slurp with missing argument
+ action: slurp
+ register: slurp_no_args
+ ignore_errors: true
+
+- name: check slurp with missing argument result
+ assert:
+ that:
+ - "slurp_no_args is failed"
+ - "slurp_no_args.msg"
+ - "slurp_no_args is not changed"
diff --git a/test/integration/targets/special_vars/aliases b/test/integration/targets/special_vars/aliases
new file mode 100644
index 00000000..2d9e6788
--- /dev/null
+++ b/test/integration/targets/special_vars/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group2
+needs/target/include_parent_role_vars
diff --git a/test/integration/targets/special_vars/meta/main.yml b/test/integration/targets/special_vars/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/test/integration/targets/special_vars/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/test/integration/targets/special_vars/tasks/main.yml b/test/integration/targets/special_vars/tasks/main.yml
new file mode 100644
index 00000000..0e71f1dc
--- /dev/null
+++ b/test/integration/targets/special_vars/tasks/main.yml
@@ -0,0 +1,100 @@
+# test code for the template module
+# (c) 2015, Brian Coca <bcoca@ansible.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: verify ansible_managed
+ template: src=foo.j2 dest={{output_dir}}/special_vars.yaml
+
+- name: read the file into facts
+ include_vars: "{{output_dir}}/special_vars.yaml"
+
+
+- name: verify all test vars are defined
+ assert:
+ that:
+ - 'item in hostvars[inventory_hostname].keys()'
+ with_items:
+ - test_template_host
+ - test_template_path
+ - test_template_mtime
+ - test_template_uid
+ - test_template_fullpath
+ - test_template_run_date
+ - test_ansible_managed
+
+- name: ensure that role_name exists in role_names, ansible_play_role_names, ansible_role_names, and not in ansible_dependent_role_names
+ assert:
+ that:
+ - "role_name in role_names"
+ - "role_name in ansible_play_role_names"
+ - "role_name in ansible_role_names"
+ - "role_name not in ansible_dependent_role_names"
+
+- name: ensure that our dependency (prepare_tests) exists in ansible_role_names and ansible_dependent_role_names, but not in role_names or ansible_play_role_names
+ assert:
+ that:
+ - "'prepare_tests' in ansible_role_names"
+ - "'prepare_tests' in ansible_dependent_role_names"
+ - "'prepare_tests' not in role_names"
+ - "'prepare_tests' not in ansible_play_role_names"
+
+- name: ensure that ansible_role_names is the sum of ansible_play_role_names and ansible_dependent_role_names
+ assert:
+ that:
+ - "(ansible_play_role_names + ansible_dependent_role_names)|unique|sort|list == ansible_role_names|sort|list"
+
+- name: check that ansible_parent_role_names is normally unset when not included/imported (before including other roles)
+ assert:
+ that:
+ - "ansible_parent_role_names is undefined"
+ - "ansible_parent_role_paths is undefined"
+
+- name: ansible_parent_role_names - test functionality by including another role
+ include_role:
+ name: include_parent_role_vars
+ tasks_from: included_by_other_role.yml
+
+- name: check that ansible_parent_role_names is normally unset when not included/imported (after including other role)
+ assert:
+ that:
+ - "ansible_parent_role_names is undefined"
+ - "ansible_parent_role_paths is undefined"
+
+- name: ansible_parent_role_names - test functionality by importing another role
+ import_role:
+ name: include_parent_role_vars
+ tasks_from: included_by_other_role.yml
+
+- name: check that ansible_parent_role_names is normally unset when not included/imported (after importing other role)
+ assert:
+ that:
+ - "ansible_parent_role_names is undefined"
+ - "ansible_parent_role_paths is undefined"
+
+- name: ansible_parent_role_names - test functionality by including another role
+ include_role:
+ name: include_parent_role_vars
+
+- name: check that ansible_parent_role_names is normally unset when not included/imported (after both import and inlcude)
+ assert:
+ that:
+ - "ansible_parent_role_names is undefined"
+ - "ansible_parent_role_paths is undefined"
+
+- name: ansible_parent_role_names - test functionality by importing another role
+ import_role:
+ name: include_parent_role_vars
diff --git a/test/integration/targets/special_vars/templates/foo.j2 b/test/integration/targets/special_vars/templates/foo.j2
new file mode 100644
index 00000000..0f6db2a1
--- /dev/null
+++ b/test/integration/targets/special_vars/templates/foo.j2
@@ -0,0 +1,7 @@
+test_template_host: "{{template_host}}"
+test_template_path: "{{template_path}}"
+test_template_mtime: "{{template_mtime}}"
+test_template_uid: "{{template_uid}}"
+test_template_fullpath: "{{template_fullpath}}"
+test_template_run_date: "{{template_run_date}}"
+test_ansible_managed: "{{ansible_managed}}"
diff --git a/test/integration/targets/special_vars/vars/main.yml b/test/integration/targets/special_vars/vars/main.yml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/special_vars/vars/main.yml
diff --git a/test/integration/targets/stat/aliases b/test/integration/targets/stat/aliases
new file mode 100644
index 00000000..765b70da
--- /dev/null
+++ b/test/integration/targets/stat/aliases
@@ -0,0 +1 @@
+shippable/posix/group2
diff --git a/test/integration/targets/stat/files/foo.txt b/test/integration/targets/stat/files/foo.txt
new file mode 100644
index 00000000..3e96db9b
--- /dev/null
+++ b/test/integration/targets/stat/files/foo.txt
@@ -0,0 +1 @@
+templated_var_loaded
diff --git a/test/integration/targets/stat/meta/main.yml b/test/integration/targets/stat/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/test/integration/targets/stat/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/test/integration/targets/stat/tasks/main.yml b/test/integration/targets/stat/tasks/main.yml
new file mode 100644
index 00000000..bd6b1e89
--- /dev/null
+++ b/test/integration/targets/stat/tasks/main.yml
@@ -0,0 +1,157 @@
+# test code for the stat module
+# (c) 2014, James Tanner <tanner.jc@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: make a new file
+ copy: dest={{output_dir}}/foo.txt mode=0644 content="hello world"
+
+- name: check stat of file
+ stat: path={{output_dir}}/foo.txt
+ register: stat_result
+
+- debug: var=stat_result
+
+- assert:
+ that:
+ - "'changed' in stat_result"
+ - "stat_result.changed == false"
+ - "'stat' in stat_result"
+ - "'atime' in stat_result.stat"
+ - "'ctime' in stat_result.stat"
+ - "'dev' in stat_result.stat"
+ - "'exists' in stat_result.stat"
+ - "'gid' in stat_result.stat"
+ - "'inode' in stat_result.stat"
+ - "'isblk' in stat_result.stat"
+ - "'ischr' in stat_result.stat"
+ - "'isdir' in stat_result.stat"
+ - "'isfifo' in stat_result.stat"
+ - "'isgid' in stat_result.stat"
+ - "'isreg' in stat_result.stat"
+ - "'issock' in stat_result.stat"
+ - "'isuid' in stat_result.stat"
+ - "'checksum' in stat_result.stat"
+ - "stat_result.stat.checksum == '2aae6c35c94fcfb415dbe95f408b9ce91ee846ed'"
+ - "'mode' in stat_result.stat"
+ - "'mtime' in stat_result.stat"
+ - "'nlink' in stat_result.stat"
+ - "'pw_name' in stat_result.stat"
+ - "'rgrp' in stat_result.stat"
+ - "'roth' in stat_result.stat"
+ - "'rusr' in stat_result.stat"
+ - "'size' in stat_result.stat"
+ - "'uid' in stat_result.stat"
+ - "'wgrp' in stat_result.stat"
+ - "'woth' in stat_result.stat"
+ - "'wusr' in stat_result.stat"
+ - "'xgrp' in stat_result.stat"
+ - "'xoth' in stat_result.stat"
+ - "'xusr' in stat_result.stat"
+
+- name: make a symlink
+ file:
+ src: "{{ output_dir }}/foo.txt"
+ path: "{{ output_dir }}/foo-link"
+ state: link
+
+- name: check stat of a symlink with follow off
+ stat:
+ path: "{{ output_dir }}/foo-link"
+ register: stat_result
+
+- debug: var=stat_result
+
+- assert:
+ that:
+ - "'changed' in stat_result"
+ - "stat_result.changed == false"
+ - "'stat' in stat_result"
+ - "'atime' in stat_result.stat"
+ - "'ctime' in stat_result.stat"
+ - "'dev' in stat_result.stat"
+ - "'exists' in stat_result.stat"
+ - "'gid' in stat_result.stat"
+ - "'inode' in stat_result.stat"
+ - "'isblk' in stat_result.stat"
+ - "'ischr' in stat_result.stat"
+ - "'isdir' in stat_result.stat"
+ - "'isfifo' in stat_result.stat"
+ - "'isgid' in stat_result.stat"
+ - "'isreg' in stat_result.stat"
+ - "'issock' in stat_result.stat"
+ - "'isuid' in stat_result.stat"
+ - "'islnk' in stat_result.stat"
+ - "'mode' in stat_result.stat"
+ - "'mtime' in stat_result.stat"
+ - "'nlink' in stat_result.stat"
+ - "'pw_name' in stat_result.stat"
+ - "'rgrp' in stat_result.stat"
+ - "'roth' in stat_result.stat"
+ - "'rusr' in stat_result.stat"
+ - "'size' in stat_result.stat"
+ - "'uid' in stat_result.stat"
+ - "'wgrp' in stat_result.stat"
+ - "'woth' in stat_result.stat"
+ - "'wusr' in stat_result.stat"
+ - "'xgrp' in stat_result.stat"
+ - "'xoth' in stat_result.stat"
+ - "'xusr' in stat_result.stat"
+
+- name: check stat of a symlink with follow on
+ stat:
+ path: "{{ output_dir }}/foo-link"
+ follow: True
+ register: stat_result
+
+- debug: var=stat_result
+
+- assert:
+ that:
+ - "'changed' in stat_result"
+ - "stat_result.changed == false"
+ - "'stat' in stat_result"
+ - "'atime' in stat_result.stat"
+ - "'ctime' in stat_result.stat"
+ - "'dev' in stat_result.stat"
+ - "'exists' in stat_result.stat"
+ - "'gid' in stat_result.stat"
+ - "'inode' in stat_result.stat"
+ - "'isblk' in stat_result.stat"
+ - "'ischr' in stat_result.stat"
+ - "'isdir' in stat_result.stat"
+ - "'isfifo' in stat_result.stat"
+ - "'isgid' in stat_result.stat"
+ - "'isreg' in stat_result.stat"
+ - "'issock' in stat_result.stat"
+ - "'isuid' in stat_result.stat"
+ - "'checksum' in stat_result.stat"
+ - "stat_result.stat.checksum == '2aae6c35c94fcfb415dbe95f408b9ce91ee846ed'"
+ - "'mode' in stat_result.stat"
+ - "'mtime' in stat_result.stat"
+ - "'nlink' in stat_result.stat"
+ - "'pw_name' in stat_result.stat"
+ - "'rgrp' in stat_result.stat"
+ - "'roth' in stat_result.stat"
+ - "'rusr' in stat_result.stat"
+ - "'size' in stat_result.stat"
+ - "'uid' in stat_result.stat"
+ - "'wgrp' in stat_result.stat"
+ - "'woth' in stat_result.stat"
+ - "'wusr' in stat_result.stat"
+ - "'xgrp' in stat_result.stat"
+ - "'xoth' in stat_result.stat"
+ - "'xusr' in stat_result.stat"
diff --git a/test/integration/targets/strategy_linear/aliases b/test/integration/targets/strategy_linear/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/test/integration/targets/strategy_linear/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/test/integration/targets/strategy_linear/inventory b/test/integration/targets/strategy_linear/inventory
new file mode 100644
index 00000000..4a34c320
--- /dev/null
+++ b/test/integration/targets/strategy_linear/inventory
@@ -0,0 +1,3 @@
+[local]
+testhost ansible_connection=local
+testhost2 ansible_connection=local
diff --git a/test/integration/targets/strategy_linear/roles/role1/tasks/main.yml b/test/integration/targets/strategy_linear/roles/role1/tasks/main.yml
new file mode 100644
index 00000000..51efd43e
--- /dev/null
+++ b/test/integration/targets/strategy_linear/roles/role1/tasks/main.yml
@@ -0,0 +1,6 @@
+- name: Include tasks
+ include_tasks: "tasks.yml"
+
+- name: Mark role as finished
+ set_fact:
+ role1_complete: True
diff --git a/test/integration/targets/strategy_linear/roles/role1/tasks/tasks.yml b/test/integration/targets/strategy_linear/roles/role1/tasks/tasks.yml
new file mode 100644
index 00000000..b7a46aa0
--- /dev/null
+++ b/test/integration/targets/strategy_linear/roles/role1/tasks/tasks.yml
@@ -0,0 +1,7 @@
+- name: Call role2
+ include_role:
+ name: role2
+
+- name: Call role2 again
+ include_role:
+ name: role2
diff --git a/test/integration/targets/strategy_linear/roles/role2/tasks/main.yml b/test/integration/targets/strategy_linear/roles/role2/tasks/main.yml
new file mode 100644
index 00000000..81e041e1
--- /dev/null
+++ b/test/integration/targets/strategy_linear/roles/role2/tasks/main.yml
@@ -0,0 +1,7 @@
+- block:
+ - block:
+ - name: Nested task 1
+ debug: msg="Nested task 1"
+
+ - name: Nested task 2
+ debug: msg="Nested task 2"
diff --git a/test/integration/targets/strategy_linear/runme.sh b/test/integration/targets/strategy_linear/runme.sh
new file mode 100755
index 00000000..41639f3c
--- /dev/null
+++ b/test/integration/targets/strategy_linear/runme.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ansible-playbook test_include_file_noop.yml -i inventory "$@"
diff --git a/test/integration/targets/strategy_linear/test_include_file_noop.yml b/test/integration/targets/strategy_linear/test_include_file_noop.yml
new file mode 100644
index 00000000..9dbf83da
--- /dev/null
+++ b/test/integration/targets/strategy_linear/test_include_file_noop.yml
@@ -0,0 +1,16 @@
+- hosts:
+ - testhost
+ - testhost2
+ gather_facts: no
+ vars:
+ secondhost: testhost2
+ tasks:
+ - name: Call the first role only on one host
+ include_role:
+ name: role1
+ when: inventory_hostname is match(secondhost)
+
+ - name: Make sure nothing else runs until role1 finishes
+ assert:
+ that:
+ - "'role1_complete' in hostvars[secondhost]"
diff --git a/test/integration/targets/subversion/aliases b/test/integration/targets/subversion/aliases
new file mode 100644
index 00000000..1dd2724e
--- /dev/null
+++ b/test/integration/targets/subversion/aliases
@@ -0,0 +1,7 @@
+setup/always/setup_passlib
+shippable/posix/group2
+skip/aix
+skip/osx
+skip/macos
+destructive
+needs/root
diff --git a/test/integration/targets/subversion/roles/subversion/defaults/main.yml b/test/integration/targets/subversion/roles/subversion/defaults/main.yml
new file mode 100644
index 00000000..af5ea026
--- /dev/null
+++ b/test/integration/targets/subversion/roles/subversion/defaults/main.yml
@@ -0,0 +1,10 @@
+---
+apache_port: 11386 # cannot use 80 as httptester overrides this
+output_dir: "{{ lookup('env', 'OUTPUT_DIR') }}"
+subversion_test_dir: '{{ output_dir }}/svn-test'
+subversion_server_dir: /tmp/ansible-svn # cannot use a path in the home dir without userdir or granting exec permission to the apache user
+subversion_repo_name: ansible-test-repo
+subversion_repo_url: http://127.0.0.1:{{ apache_port }}/svn/{{ subversion_repo_name }}
+subversion_repo_auth_url: http://127.0.0.1:{{ apache_port }}/svnauth/{{ subversion_repo_name }}
+subversion_username: subsvn_user'''
+subversion_password: Password123!
diff --git a/test/integration/targets/subversion/roles/subversion/files/create_repo.sh b/test/integration/targets/subversion/roles/subversion/files/create_repo.sh
new file mode 100644
index 00000000..cc7f4074
--- /dev/null
+++ b/test/integration/targets/subversion/roles/subversion/files/create_repo.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/env bash
+
+svnadmin create "$1"
+svn mkdir "file://$PWD/$1/trunk" -m "make trunk"
+svn mkdir "file://$PWD/$1/tags" -m "make tags"
+svn mkdir "file://$PWD/$1/branches" -m "make branches"
diff --git a/test/integration/targets/subversion/roles/subversion/tasks/cleanup.yml b/test/integration/targets/subversion/roles/subversion/tasks/cleanup.yml
new file mode 100644
index 00000000..9be43b4c
--- /dev/null
+++ b/test/integration/targets/subversion/roles/subversion/tasks/cleanup.yml
@@ -0,0 +1,8 @@
+---
+- name: stop apache after tests
+ shell: "kill -9 $(cat '{{ subversion_server_dir }}/apache.pid')"
+
+- name: remove tmp subversion server dir
+ file:
+ path: '{{ subversion_server_dir }}'
+ state: absent
diff --git a/test/integration/targets/subversion/roles/subversion/tasks/main.yml b/test/integration/targets/subversion/roles/subversion/tasks/main.yml
new file mode 100644
index 00000000..0d6acb8a
--- /dev/null
+++ b/test/integration/targets/subversion/roles/subversion/tasks/main.yml
@@ -0,0 +1,20 @@
+---
+- name: setup subversion server
+ import_tasks: setup.yml
+ tags: setup
+
+- name: verify that subversion is installed so this test can continue
+ shell: which svn
+ tags: always
+
+- name: run tests
+ import_tasks: tests.yml
+ tags: tests
+
+- name: run warning
+ import_tasks: warnings.yml
+ tags: warnings
+
+- name: clean up
+ import_tasks: cleanup.yml
+ tags: cleanup
diff --git a/test/integration/targets/subversion/roles/subversion/tasks/setup.yml b/test/integration/targets/subversion/roles/subversion/tasks/setup.yml
new file mode 100644
index 00000000..5c9c5cb5
--- /dev/null
+++ b/test/integration/targets/subversion/roles/subversion/tasks/setup.yml
@@ -0,0 +1,63 @@
+---
+- name: clean out the checkout dir
+ file:
+ path: '{{ subversion_test_dir }}'
+ state: '{{ item }}'
+ loop:
+ - absent
+ - directory
+
+- name: install SVN pre-reqs
+ package:
+ name: '{{ subversion_packages }}'
+ state: present
+
+- name: upgrade SVN pre-reqs
+ package:
+ name: '{{ upgrade_packages }}'
+ state: latest
+ when:
+ - upgrade_packages | default([])
+
+- name: create SVN home folder
+ file:
+ path: '{{ subversion_server_dir }}'
+ state: directory
+
+- name: setup selinux when enabled
+ include_tasks: setup_selinux.yml
+ when: ansible_selinux.status == "enabled"
+
+- name: template out configuration file
+ template:
+ src: subversion.conf.j2
+ dest: '{{ subversion_server_dir }}/subversion.conf'
+
+- name: create a test repository
+ script: create_repo.sh {{ subversion_repo_name }}
+ args:
+ chdir: '{{ subversion_server_dir }}'
+ creates: '{{ subversion_server_dir }}/{{ subversion_repo_name }}'
+
+- name: apply ownership for all SVN directories
+ file:
+ path: '{{ subversion_server_dir }}'
+ owner: '{{ apache_user }}'
+ group: '{{ apache_group }}'
+ recurse: True
+
+- name: add test user to htpasswd for Subversion site
+ htpasswd:
+ path: '{{ subversion_server_dir }}/svn-auth-users'
+ name: '{{ subversion_username }}'
+ password: '{{ subversion_password }}'
+ state: present
+
+- name: start test Apache SVN site - non Red Hat
+ command: apachectl -k start -f {{ subversion_server_dir }}/subversion.conf
+ when: not ansible_os_family == 'RedHat'
+
+# On Red Hat based OS', we can't use apachectl to start up own instance, just use the raw httpd
+- name: start test Apache SVN site - Red Hat
+ command: httpd -k start -f {{ subversion_server_dir }}/subversion.conf
+ when: ansible_os_family == 'RedHat'
diff --git a/test/integration/targets/subversion/roles/subversion/tasks/setup_selinux.yml b/test/integration/targets/subversion/roles/subversion/tasks/setup_selinux.yml
new file mode 100644
index 00000000..a9ffa712
--- /dev/null
+++ b/test/integration/targets/subversion/roles/subversion/tasks/setup_selinux.yml
@@ -0,0 +1,11 @@
+- name: set SELinux security context for SVN folder
+ sefcontext:
+ target: '{{ subversion_server_dir }}(/.*)?'
+ setype: '{{ item }}'
+ state: present
+ with_items:
+ - httpd_sys_content_t
+ - httpd_sys_rw_content_t
+
+- name: apply new SELinux context to filesystem
+ command: restorecon -irv {{ subversion_server_dir | quote }}
diff --git a/test/integration/targets/subversion/roles/subversion/tasks/tests.yml b/test/integration/targets/subversion/roles/subversion/tasks/tests.yml
new file mode 100644
index 00000000..8421f9de
--- /dev/null
+++ b/test/integration/targets/subversion/roles/subversion/tasks/tests.yml
@@ -0,0 +1,133 @@
+# test code for the svn module
+# (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# checks out every branch so using a small repo
+
+- name: initial checkout
+ subversion:
+ repo: '{{ subversion_repo_url }}'
+ dest: '{{ subversion_test_dir }}/svn'
+ register: subverted
+
+- name: check if dir was checked out
+ stat:
+ path: '{{ subversion_test_dir }}/svn'
+ register: subverted_result
+
+# FIXME: the before/after logic here should be fixed to make them hashes, see GitHub 6078
+# looks like this: {
+# "after": [
+# "Revision: 9",
+# "URL: https://github.com/jimi-c/test_role"
+# ],
+# "before": null,
+# "changed": true,
+# "item": ""
+# }
+- name: verify information about the initial clone
+ assert:
+ that:
+ - "'after' in subverted"
+ - "subverted.after.1 == 'URL: ' ~ subversion_repo_url"
+ - "not subverted.before"
+ - "subverted.changed"
+ - subverted_result.stat.exists
+
+- name: repeated checkout
+ subversion:
+ repo: '{{ subversion_repo_url }}'
+ dest: '{{ subversion_test_dir }}/svn'
+ register: subverted2
+
+- name: verify on a reclone things are marked unchanged
+ assert:
+ that:
+ - "not subverted2.changed"
+
+- name: check for tags
+ stat: path={{ subversion_test_dir }}/svn/tags
+ register: tags
+
+- name: check for trunk
+ stat: path={{ subversion_test_dir }}/svn/trunk
+ register: trunk
+
+- name: check for branches
+ stat: path={{ subversion_test_dir }}/svn/branches
+ register: branches
+
+- name: assert presence of tags/trunk/branches
+ assert:
+ that:
+ - "tags.stat.isdir"
+ - "trunk.stat.isdir"
+ - "branches.stat.isdir"
+
+- name: remove checked out repo
+ file:
+ path: '{{ subversion_test_dir }}/svn'
+ state: absent
+
+- name: checkout with quotes in username
+ subversion:
+ repo: '{{ subversion_repo_auth_url }}'
+ dest: '{{ subversion_test_dir }}/svn'
+ username: '{{ subversion_username }}'
+ password: '{{ subversion_password }}'
+ register: subverted3
+
+- name: get result of checkout with quotes in username
+ stat:
+ path: '{{ subversion_test_dir }}/svn'
+ register: subverted3_result
+
+- name: assert checkout with quotes in username
+ assert:
+ that:
+ - subverted3 is changed
+ - subverted3_result.stat.exists
+ - subverted3_result.stat.isdir
+
+- name: checkout with export
+ subversion:
+ repo: '{{ subversion_repo_url }}'
+ dest: '{{ subversion_test_dir }}/svn-export'
+ export: True
+ register: subverted4
+
+- name: check for tags
+ stat: path={{ subversion_test_dir }}/svn-export/tags
+ register: export_tags
+
+- name: check for trunk
+ stat: path={{ subversion_test_dir }}/svn-export/trunk
+ register: export_trunk
+
+- name: check for branches
+ stat: path={{ subversion_test_dir }}/svn-export/branches
+ register: export_branches
+
+- name: assert presence of tags/trunk/branches in export
+ assert:
+ that:
+ - "export_tags.stat.isdir"
+ - "export_trunk.stat.isdir"
+ - "export_branches.stat.isdir"
+ - "subverted4.changed"
+
+# TBA: test for additional options or URL variants welcome
diff --git a/test/integration/targets/subversion/roles/subversion/tasks/warnings.yml b/test/integration/targets/subversion/roles/subversion/tasks/warnings.yml
new file mode 100644
index 00000000..50ebd441
--- /dev/null
+++ b/test/integration/targets/subversion/roles/subversion/tasks/warnings.yml
@@ -0,0 +1,7 @@
+---
+- name: checkout using a password to test for a warning when using svn lt 1.10.0
+ subversion:
+ repo: '{{ subversion_repo_auth_url }}'
+ dest: '{{ subversion_test_dir }}/svn'
+ username: '{{ subversion_username }}'
+ password: '{{ subversion_password }}'
diff --git a/test/integration/targets/subversion/roles/subversion/templates/subversion.conf.j2 b/test/integration/targets/subversion/roles/subversion/templates/subversion.conf.j2
new file mode 100644
index 00000000..07e7083a
--- /dev/null
+++ b/test/integration/targets/subversion/roles/subversion/templates/subversion.conf.j2
@@ -0,0 +1,67 @@
+{% if ansible_os_family == "Debian" %}
+
+{# On Ubuntu 16.04 we can include the default config, other versions require explicit config #}
+{% if ansible_distribution_version == "16.04" %}
+Include /etc/apache2/apache2.conf
+
+{% else %}
+Timeout 300
+KeepAlive On
+MaxKeepAliveRequests 100
+KeepAliveTimeout 5
+User ${APACHE_RUN_USER}
+Group ${APACHE_RUN_GROUP}
+HostnameLookups Off
+LogLevel warn
+LogFormat "%v:%p %h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" vhost_combined
+LogFormat "%h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" combined
+LogFormat "%h %l %u %t \"%r\" %>s %O" common
+LogFormat "%{Referer}i -> %U" referer
+LogFormat "%{User-agent}i" agent
+
+IncludeOptional mods-enabled/*.load
+IncludeOptional mods-enabled/*.conf
+IncludeOptional conf-enabled/*.conf
+IncludeOptional sites-enabled/*conf
+
+<FilesMatch "^\.ht">
+ Require all denied
+</FilesMatch>
+
+{% endif %}
+
+{% elif ansible_os_family == "FreeBSD" %}
+Include /usr/local/etc/apache24/httpd.conf
+LoadModule dav_module libexec/apache24/mod_dav.so
+LoadModule dav_svn_module libexec/apache24/mod_dav_svn.so
+LoadModule authz_svn_module libexec/apache24/mod_authz_svn.so
+{% elif ansible_os_family == "Suse" %}
+Include /etc/apache2/httpd.conf
+LoadModule dav_module /usr/lib64/apache2/mod_dav.so
+LoadModule dav_svn_module /usr/lib64/apache2/mod_dav_svn.so
+{% elif ansible_os_family == "RedHat" %}
+Include /etc/httpd/conf/httpd.conf
+{% endif %}
+
+PidFile {{ subversion_server_dir }}/apache.pid
+Listen 127.0.0.1:{{ apache_port }}
+ErrorLog {{ subversion_server_dir }}/apache2-error.log
+
+<Location /svn>
+ DAV svn
+ SVNParentPath {{ subversion_server_dir }}
+{% if ansible_distribution == "CentOS" and ansible_distribution_version.startswith("6") %}
+ Allow from all
+{% else %}
+ Require all granted
+{% endif %}
+</Location>
+
+<Location /svnauth>
+ DAV svn
+ SVNParentPath {{ subversion_server_dir }}
+ AuthType Basic
+ AuthName "Subversion repositories"
+ AuthUserFile {{ subversion_server_dir }}/svn-auth-users
+ Require valid-user
+</Location>
diff --git a/test/integration/targets/subversion/runme.sh b/test/integration/targets/subversion/runme.sh
new file mode 100755
index 00000000..f505e581
--- /dev/null
+++ b/test/integration/targets/subversion/runme.sh
@@ -0,0 +1,32 @@
+#!/usr/bin/env bash
+
+set -eu
+
+cleanup() {
+ echo "Cleanup"
+ ansible-playbook runme.yml -e "output_dir=${OUTPUT_DIR}" "$@" --tags cleanup
+ echo "Done"
+}
+
+trap cleanup INT TERM EXIT
+
+export ANSIBLE_ROLES_PATH=roles/
+
+# Ensure subversion is set up
+ansible-playbook runme.yml "$@" -v --tags setup
+
+# Test functionality
+ansible-playbook runme.yml "$@" -v --tags tests
+
+# Test a warning is displayed for versions < 1.10.0 when a password is provided
+ansible-playbook runme.yml "$@" --tags warnings 2>&1 | tee out.txt
+
+version="$(svn --version -q)"
+secure=$(python -c "from distutils.version import LooseVersion; print(LooseVersion('$version') >= LooseVersion('1.10.0'))")
+
+if [[ "${secure}" = "False" ]] && [[ "$(grep -c 'To securely pass credentials, upgrade svn to version 1.10.0' out.txt)" -eq 1 ]]; then
+ echo "Found the expected warning"
+elif [[ "${secure}" = "False" ]]; then
+ echo "Expected a warning"
+ exit 1
+fi
diff --git a/test/integration/targets/subversion/runme.yml b/test/integration/targets/subversion/runme.yml
new file mode 100644
index 00000000..c67d7b89
--- /dev/null
+++ b/test/integration/targets/subversion/runme.yml
@@ -0,0 +1,15 @@
+---
+- hosts: localhost
+ tasks:
+ - name: load OS specific vars
+ include_vars: '{{ item }}'
+ with_first_found:
+ - files:
+ - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml'
+ - '{{ ansible_os_family }}.yml'
+ paths: '../vars'
+ tags: always
+
+ - include_role:
+ name: subversion
+ tags: always
diff --git a/test/integration/targets/subversion/vars/Debian.yml b/test/integration/targets/subversion/vars/Debian.yml
new file mode 100644
index 00000000..bf7c2084
--- /dev/null
+++ b/test/integration/targets/subversion/vars/Debian.yml
@@ -0,0 +1,6 @@
+---
+subversion_packages:
+- subversion
+- libapache2-svn
+apache_user: www-data
+apache_group: www-data
diff --git a/test/integration/targets/subversion/vars/FreeBSD.yml b/test/integration/targets/subversion/vars/FreeBSD.yml
new file mode 100644
index 00000000..153f5235
--- /dev/null
+++ b/test/integration/targets/subversion/vars/FreeBSD.yml
@@ -0,0 +1,7 @@
+---
+subversion_packages:
+- apache24
+- mod_dav_svn
+- subversion
+apache_user: www
+apache_group: www
diff --git a/test/integration/targets/subversion/vars/RedHat.yml b/test/integration/targets/subversion/vars/RedHat.yml
new file mode 100644
index 00000000..3e3f9109
--- /dev/null
+++ b/test/integration/targets/subversion/vars/RedHat.yml
@@ -0,0 +1,10 @@
+---
+subversion_packages:
+- mod_dav_svn
+- subversion
+upgrade_packages:
+# prevent sqlite from being out-of-sync with the version subversion was compiled with
+- subversion
+- sqlite
+apache_user: apache
+apache_group: apache
diff --git a/test/integration/targets/subversion/vars/Suse.yml b/test/integration/targets/subversion/vars/Suse.yml
new file mode 100644
index 00000000..eab906ec
--- /dev/null
+++ b/test/integration/targets/subversion/vars/Suse.yml
@@ -0,0 +1,6 @@
+---
+subversion_packages:
+- subversion
+- subversion-server
+apache_user: wwwrun
+apache_group: www
diff --git a/test/integration/targets/subversion/vars/Ubuntu-18.yml b/test/integration/targets/subversion/vars/Ubuntu-18.yml
new file mode 100644
index 00000000..dfe131b0
--- /dev/null
+++ b/test/integration/targets/subversion/vars/Ubuntu-18.yml
@@ -0,0 +1,6 @@
+---
+subversion_packages:
+- subversion
+- libapache2-mod-svn
+apache_user: www-data
+apache_group: www-data
diff --git a/test/integration/targets/systemd/aliases b/test/integration/targets/systemd/aliases
new file mode 100644
index 00000000..f8e28c7e
--- /dev/null
+++ b/test/integration/targets/systemd/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group1
+skip/aix
diff --git a/test/integration/targets/systemd/defaults/main.yml b/test/integration/targets/systemd/defaults/main.yml
new file mode 100644
index 00000000..33063b86
--- /dev/null
+++ b/test/integration/targets/systemd/defaults/main.yml
@@ -0,0 +1 @@
+fake_service: nonexisting
diff --git a/test/integration/targets/systemd/meta/main.yml b/test/integration/targets/systemd/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/test/integration/targets/systemd/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/test/integration/targets/systemd/tasks/main.yml b/test/integration/targets/systemd/tasks/main.yml
new file mode 100644
index 00000000..867a554d
--- /dev/null
+++ b/test/integration/targets/systemd/tasks/main.yml
@@ -0,0 +1,116 @@
+# Test code for the systemd module.
+# (c) 2017, James Tanner <tanner.jc@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+##
+## systemctl
+##
+
+- name: check for systemctl command
+ shell: which systemctl
+ failed_when: False
+ register: systemctl_check
+
+- meta: end_host
+ when: systemctl_check.rc != 0
+
+- set_fact:
+ ssh_service: '{{ "ssh" if ansible_os_family == "Debian" else "sshd" }}'
+
+- block:
+ - name: get a list of running services
+ shell: systemctl | fgrep 'running' | awk '{print $1}' | sed 's/\.service//g' | fgrep -v '.' | egrep ^[a-z]
+ register: running_names
+ - debug: var=running_names
+
+ - name: check running state
+ systemd:
+ name: "{{ running_names.stdout_lines|random }}"
+ state: started
+ register: systemd_test0
+ - debug: var=systemd_test0
+ - name: validate results for test0
+ assert:
+ that:
+ - 'systemd_test0.changed is defined'
+ - 'systemd_test0.name is defined'
+ - 'systemd_test0.state is defined'
+ - 'systemd_test0.status is defined'
+ - 'not systemd_test0.changed'
+ - 'systemd_test0.state == "started"'
+
+ - name: the module must fail when a service is not found
+ systemd:
+ name: '{{ fake_service }}'
+ state: stopped
+ register: result
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - result is failed
+ - 'result is search("Could not find the requested service {{ fake_service }}")'
+
+ - name: the module must fail in check_mode as well when a service is not found
+ systemd:
+ name: '{{ fake_service }}'
+ state: stopped
+ register: result
+ check_mode: yes
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - result is failed
+ - 'result is search("Could not find the requested service {{ fake_service }}")'
+
+ - name: check that the module works even when systemd is offline (eg in chroot)
+ systemd:
+ name: "{{ running_names.stdout_lines|random }}"
+ state: started
+ environment:
+ SYSTEMD_OFFLINE: 1
+
+- name: Disable ssh 1
+ systemd:
+ name: '{{ ssh_service }}'
+ enabled: false
+ register: systemd_disable_ssh_1
+
+- name: Disable ssh 2
+ systemd:
+ name: '{{ ssh_service }}'
+ enabled: false
+ register: systemd_disable_ssh_2
+
+- name: Enable ssh 1
+ systemd:
+ name: '{{ ssh_service }}'
+ enabled: true
+ register: systemd_enable_ssh_1
+
+- name: Enable ssh 2
+ systemd:
+ name: '{{ ssh_service }}'
+ enabled: true
+ register: systemd_enable_ssh_2
+
+- assert:
+ that:
+ - systemd_disable_ssh_2 is not changed
+ - systemd_enable_ssh_1 is changed
+ - systemd_enable_ssh_2 is not changed
diff --git a/test/integration/targets/tags/aliases b/test/integration/targets/tags/aliases
new file mode 100644
index 00000000..757c9966
--- /dev/null
+++ b/test/integration/targets/tags/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group3
+skip/aix
diff --git a/test/integration/targets/tags/runme.sh b/test/integration/targets/tags/runme.sh
new file mode 100755
index 00000000..949fbd5f
--- /dev/null
+++ b/test/integration/targets/tags/runme.sh
@@ -0,0 +1,49 @@
+#!/usr/bin/env bash
+
+set -eu
+
+# Using set -x for this test causes the Shippable console to stop receiving updates and the job to time out for macOS.
+# Once that issue is resolved the set -x option can be added above.
+
+# Run these using en_US.UTF-8 because list-tasks is a user output function and so it tailors its output to the
+# user's locale. For unicode tags, this means replacing non-ascii chars with "?"
+
+COMMAND=(ansible-playbook -i ../../inventory test_tags.yml -v --list-tasks)
+
+export LC_ALL=en_US.UTF-8
+
+# Run everything by default
+[ "$("${COMMAND[@]}" | grep -F Task_with | xargs)" = \
+"Task_with_tag TAGS: [tag] Task_with_always_tag TAGS: [always] Task_with_unicode_tag TAGS: [ãらã¨ã¿] Task_with_list_of_tags TAGS: [café, press] Task_without_tag TAGS: [] Task_with_csv_tags TAGS: [tag1, tag2] Task_with_templated_tags TAGS: [tag3]" ]
+
+# Run the exact tags, and always
+[ "$("${COMMAND[@]}" --tags tag | grep -F Task_with | xargs)" = \
+"Task_with_tag TAGS: [tag] Task_with_always_tag TAGS: [always]" ]
+
+# Skip one tag
+[ "$("${COMMAND[@]}" --skip-tags tag | grep -F Task_with | xargs)" = \
+"Task_with_always_tag TAGS: [always] Task_with_unicode_tag TAGS: [ãらã¨ã¿] Task_with_list_of_tags TAGS: [café, press] Task_without_tag TAGS: [] Task_with_csv_tags TAGS: [tag1, tag2] Task_with_templated_tags TAGS: [tag3]" ]
+
+# Skip a unicode tag
+[ "$("${COMMAND[@]}" --skip-tags 'ãらã¨ã¿' | grep -F Task_with | xargs)" = \
+"Task_with_tag TAGS: [tag] Task_with_always_tag TAGS: [always] Task_with_list_of_tags TAGS: [café, press] Task_without_tag TAGS: [] Task_with_csv_tags TAGS: [tag1, tag2] Task_with_templated_tags TAGS: [tag3]" ]
+
+# Run just a unicode tag and always
+[ "$("${COMMAND[@]}" --tags 'ãらã¨ã¿' | grep -F Task_with | xargs)" = \
+"Task_with_always_tag TAGS: [always] Task_with_unicode_tag TAGS: [ãらã¨ã¿]" ]
+
+# Run a tag from a list of tags and always
+[ "$("${COMMAND[@]}" --tags café | grep -F Task_with | xargs)" = \
+"Task_with_always_tag TAGS: [always] Task_with_list_of_tags TAGS: [café, press]" ]
+
+# Run tag with never
+[ "$("${COMMAND[@]}" --tags donever | grep -F Task_with | xargs)" = \
+"Task_with_always_tag TAGS: [always] Task_with_never_tag TAGS: [donever, never]" ]
+
+# Run csv tags
+[ "$("${COMMAND[@]}" --tags tag1 | grep -F Task_with | xargs)" = \
+"Task_with_always_tag TAGS: [always] Task_with_csv_tags TAGS: [tag1, tag2]" ]
+
+# Run templated tags
+[ "$("${COMMAND[@]}" --tags tag3 | grep -F Task_with | xargs)" = \
+"Task_with_always_tag TAGS: [always] Task_with_templated_tags TAGS: [tag3]" ]
diff --git a/test/integration/targets/tags/test_tags.yml b/test/integration/targets/tags/test_tags.yml
new file mode 100644
index 00000000..76ac5ba4
--- /dev/null
+++ b/test/integration/targets/tags/test_tags.yml
@@ -0,0 +1,33 @@
+---
+- name: verify tags work as expected
+ hosts: testhost
+ gather_facts: False
+ vars:
+ the_tags:
+ - tag3
+ tasks:
+ - name: Task_with_tag
+ debug: msg=
+ tags: tag
+ - name: Task_with_always_tag
+ debug: msg=
+ tags: always
+ - name: Task_with_unicode_tag
+ debug: msg=
+ tags: ãらã¨ã¿
+ - name: Task_with_list_of_tags
+ debug: msg=
+ tags:
+ - café
+ - press
+ - name: Task_without_tag
+ debug: msg=
+ - name: Task_with_never_tag
+ debug: msg=NEVER
+ tags: ['never', 'donever']
+ - name: Task_with_csv_tags
+ debug: msg=csv
+ tags: tag1,tag2
+ - name: Task_with_templated_tags
+ debug: msg=templated
+ tags: "{{ the_tags }}"
diff --git a/test/integration/targets/task_ordering/aliases b/test/integration/targets/task_ordering/aliases
new file mode 100644
index 00000000..765b70da
--- /dev/null
+++ b/test/integration/targets/task_ordering/aliases
@@ -0,0 +1 @@
+shippable/posix/group2
diff --git a/test/integration/targets/task_ordering/meta/main.yml b/test/integration/targets/task_ordering/meta/main.yml
new file mode 100644
index 00000000..1810d4be
--- /dev/null
+++ b/test/integration/targets/task_ordering/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/test/integration/targets/task_ordering/tasks/main.yml b/test/integration/targets/task_ordering/tasks/main.yml
new file mode 100644
index 00000000..4a7828bf
--- /dev/null
+++ b/test/integration/targets/task_ordering/tasks/main.yml
@@ -0,0 +1,15 @@
+- set_fact:
+ temppath: "{{ remote_tmp_dir }}/output.txt"
+
+- include: taskorder-include.yml
+ with_items:
+ - 1
+ - 2
+ - 3
+
+- slurp:
+ src: "{{ temppath }}"
+ register: tempout
+
+- assert:
+ that: tempout.content | b64decode == "one.1.two.1.three.1.four.1.one.2.two.2.three.2.four.2.one.3.two.3.three.3.four.3."
diff --git a/test/integration/targets/task_ordering/tasks/taskorder-include.yml b/test/integration/targets/task_ordering/tasks/taskorder-include.yml
new file mode 100644
index 00000000..228e897e
--- /dev/null
+++ b/test/integration/targets/task_ordering/tasks/taskorder-include.yml
@@ -0,0 +1,10 @@
+# This test ensures that included tasks are run in order.
+# There have been regressions where included tasks and
+# nested blocks ran out of order...
+
+- shell: printf one.{{ item }}. >> {{ temppath }}
+- block:
+ - shell: printf two.{{ item }}. >> {{ temppath }}
+ - block:
+ - shell: printf three.{{ item }}. >> {{ temppath }}
+- shell: printf four.{{ item }}. >> {{ temppath }}
diff --git a/test/integration/targets/tasks/aliases b/test/integration/targets/tasks/aliases
new file mode 100644
index 00000000..a6dafcf8
--- /dev/null
+++ b/test/integration/targets/tasks/aliases
@@ -0,0 +1 @@
+shippable/posix/group1
diff --git a/test/integration/targets/tasks/tasks/main.yml b/test/integration/targets/tasks/tasks/main.yml
new file mode 100644
index 00000000..f6ac1114
--- /dev/null
+++ b/test/integration/targets/tasks/tasks/main.yml
@@ -0,0 +1,4 @@
+# make sure tasks with an undefined variable in the name are gracefully handled
+- name: "Task name with undefined variable: {{ not_defined }}"
+ debug:
+ msg: Hello
diff --git a/test/integration/targets/template/aliases b/test/integration/targets/template/aliases
new file mode 100644
index 00000000..f0c24d20
--- /dev/null
+++ b/test/integration/targets/template/aliases
@@ -0,0 +1,3 @@
+needs/root
+shippable/posix/group5
+skip/aix
diff --git a/test/integration/targets/template/ansible_managed.cfg b/test/integration/targets/template/ansible_managed.cfg
new file mode 100644
index 00000000..3626429f
--- /dev/null
+++ b/test/integration/targets/template/ansible_managed.cfg
@@ -0,0 +1,2 @@
+[defaults]
+ansible_managed=ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}
diff --git a/test/integration/targets/template/ansible_managed.yml b/test/integration/targets/template/ansible_managed.yml
new file mode 100644
index 00000000..2bd7c2c4
--- /dev/null
+++ b/test/integration/targets/template/ansible_managed.yml
@@ -0,0 +1,14 @@
+---
+- hosts: testhost
+ gather_facts: False
+ tasks:
+ - set_fact:
+ output_dir: "{{ lookup('env', 'OUTPUT_DIR') }}"
+ - file:
+ path: '{{ output_dir }}/café.txt'
+ state: 'absent'
+ # Smoketest that ansible_managed with non-ascii chars works:
+ # https://github.com/ansible/ansible/issues/27262
+ - template:
+ src: 'templates/café.j2'
+ dest: '{{ output_dir }}/café.txt'
diff --git a/test/integration/targets/template/corner_cases.yml b/test/integration/targets/template/corner_cases.yml
new file mode 100644
index 00000000..48782f79
--- /dev/null
+++ b/test/integration/targets/template/corner_cases.yml
@@ -0,0 +1,51 @@
+- name: test tempating corner cases
+ hosts: localhost
+ gather_facts: false
+ vars:
+ empty_list: []
+ dont: I SHOULD NOT BE TEMPLATED
+ other: I WORK
+ tasks:
+ - name: 'ensure we are not interpolating data from outside of j2 delmiters'
+ assert:
+ that:
+ - '"I SHOULD NOT BE TEMPLATED" not in adjacent'
+ - globals1 == "[[], globals()]"
+ - globals2 == "[[], globals]"
+ vars:
+ adjacent: "{{ empty_list }} + [dont]"
+ globals1: "[{{ empty_list }}, globals()]"
+ globals2: "[{{ empty_list }}, globals]"
+
+ - name: 'ensure we can add lists'
+ assert:
+ that:
+ - (empty_list + [other]) == [other]
+ - (empty_list + [other, other]) == [other, other]
+ - (dont_exist|default([]) + [other]) == [other]
+ - ([other] + [empty_list, other]) == [other, [], other]
+
+ - name: 'ensure comments go away and we still dont interpolate in string'
+ assert:
+ that:
+ - 'comm1 == " + [dont]"'
+ - 'comm2 == " #} + [dont]"'
+ vars:
+ comm1: '{# {{nothing}} {# #} + [dont]'
+ comm2: "{# {{nothing}} {# #} #} + [dont]"
+
+ - name: test additions with facts, set them up
+ set_fact:
+ inames: []
+ iname: "{{ prefix ~ '-options' }}"
+ iname_1: "{{ prefix ~ '-options-1' }}"
+ vars:
+ prefix: 'bo'
+
+ - name: add the facts
+ set_fact:
+ inames: '{{ inames + [iname, iname_1] }}'
+
+ - assert:
+ that:
+ - inames == ['bo-options', 'bo-options-1']
diff --git a/test/integration/targets/template/custom_tasks/tasks/main.yml b/test/integration/targets/template/custom_tasks/tasks/main.yml
new file mode 100644
index 00000000..182f7cca
--- /dev/null
+++ b/test/integration/targets/template/custom_tasks/tasks/main.yml
@@ -0,0 +1,15 @@
+---
+- set_fact:
+ output_dir: "{{ lookup('env', 'OUTPUT_DIR') }}"
+
+- template:
+ src: test
+ dest: "{{ output_dir }}/templated_test"
+ register: custom_template_result
+
+- debug:
+ msg: "{{ custom_template_result }}"
+
+- assert:
+ that:
+ - custom_template_result.changed
diff --git a/test/integration/targets/template/custom_tasks/templates/test b/test/integration/targets/template/custom_tasks/templates/test
new file mode 100644
index 00000000..d033f125
--- /dev/null
+++ b/test/integration/targets/template/custom_tasks/templates/test
@@ -0,0 +1 @@
+Sample Text
diff --git a/test/integration/targets/template/custom_template.yml b/test/integration/targets/template/custom_template.yml
new file mode 100644
index 00000000..e5c7aac8
--- /dev/null
+++ b/test/integration/targets/template/custom_template.yml
@@ -0,0 +1,4 @@
+- hosts: testhost
+ gather_facts: yes
+ roles:
+ - { role: custom_tasks }
diff --git a/test/integration/targets/template/files/encoding_1252_utf-8.expected b/test/integration/targets/template/files/encoding_1252_utf-8.expected
new file mode 100644
index 00000000..0d3cc352
--- /dev/null
+++ b/test/integration/targets/template/files/encoding_1252_utf-8.expected
@@ -0,0 +1 @@
+windows-1252 Special Characters: €‚ƒ„…†‡ˆ‰Š‹ŒŽ‘’“â€â€¢â€“—˜™š›œžŸ ¡¢£¤¥¦§¨©ª«¬­®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÃÂÃÄÅÆÇÈÉÊËÌÃÃŽÃÃÑÒÓÔÕÖ×ØÙÚÛÜÃÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ
diff --git a/test/integration/targets/template/files/encoding_1252_windows-1252.expected b/test/integration/targets/template/files/encoding_1252_windows-1252.expected
new file mode 100644
index 00000000..7fb94a7b
--- /dev/null
+++ b/test/integration/targets/template/files/encoding_1252_windows-1252.expected
@@ -0,0 +1 @@
+windows-1252 Special Characters: €‚ƒ„…†‡ˆ‰Š‹ŒŽ‘’“”•–—˜™š›œžŸ ¡¢£¤¥¦§¨©ª«¬­®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ
diff --git a/test/integration/targets/template/files/foo-py26.txt b/test/integration/targets/template/files/foo-py26.txt
new file mode 100644
index 00000000..76b0bb56
--- /dev/null
+++ b/test/integration/targets/template/files/foo-py26.txt
@@ -0,0 +1,9 @@
+templated_var_loaded
+
+{
+ "bool": true,
+ "multi_part": "1Foo",
+ "null_type": null,
+ "number": 5,
+ "string_num": "5"
+}
diff --git a/test/integration/targets/template/files/foo.dos.txt b/test/integration/targets/template/files/foo.dos.txt
new file mode 100644
index 00000000..b716eca0
--- /dev/null
+++ b/test/integration/targets/template/files/foo.dos.txt
@@ -0,0 +1,3 @@
+BEGIN
+templated_var_loaded
+END
diff --git a/test/integration/targets/template/files/foo.txt b/test/integration/targets/template/files/foo.txt
new file mode 100644
index 00000000..58af3be8
--- /dev/null
+++ b/test/integration/targets/template/files/foo.txt
@@ -0,0 +1,9 @@
+templated_var_loaded
+
+{
+ "bool": true,
+ "multi_part": "1Foo",
+ "null_type": null,
+ "number": 5,
+ "string_num": "5"
+}
diff --git a/test/integration/targets/template/files/foo.unix.txt b/test/integration/targets/template/files/foo.unix.txt
new file mode 100644
index 00000000..d33849f2
--- /dev/null
+++ b/test/integration/targets/template/files/foo.unix.txt
@@ -0,0 +1,3 @@
+BEGIN
+templated_var_loaded
+END
diff --git a/test/integration/targets/template/files/import_as.expected b/test/integration/targets/template/files/import_as.expected
new file mode 100644
index 00000000..fc6ea021
--- /dev/null
+++ b/test/integration/targets/template/files/import_as.expected
@@ -0,0 +1,3 @@
+hello world import as
+WIBBLE
+Goodbye
diff --git a/test/integration/targets/template/files/import_as_with_context.expected b/test/integration/targets/template/files/import_as_with_context.expected
new file mode 100644
index 00000000..7099a47a
--- /dev/null
+++ b/test/integration/targets/template/files/import_as_with_context.expected
@@ -0,0 +1,2 @@
+hello world as qux with context
+WIBBLE
diff --git a/test/integration/targets/template/files/import_with_context.expected b/test/integration/targets/template/files/import_with_context.expected
new file mode 100644
index 00000000..5323655a
--- /dev/null
+++ b/test/integration/targets/template/files/import_with_context.expected
@@ -0,0 +1,3 @@
+hello world with context
+WIBBLE
+Goodbye
diff --git a/test/integration/targets/template/files/lstrip_blocks_false.expected b/test/integration/targets/template/files/lstrip_blocks_false.expected
new file mode 100644
index 00000000..12600012
--- /dev/null
+++ b/test/integration/targets/template/files/lstrip_blocks_false.expected
@@ -0,0 +1,4 @@
+ hello world
+ hello world
+ hello world
+
diff --git a/test/integration/targets/template/files/lstrip_blocks_true.expected b/test/integration/targets/template/files/lstrip_blocks_true.expected
new file mode 100644
index 00000000..1b11f8b2
--- /dev/null
+++ b/test/integration/targets/template/files/lstrip_blocks_true.expected
@@ -0,0 +1,3 @@
+hello world
+hello world
+hello world
diff --git a/test/integration/targets/template/files/trim_blocks_false.expected b/test/integration/targets/template/files/trim_blocks_false.expected
new file mode 100644
index 00000000..283cefc8
--- /dev/null
+++ b/test/integration/targets/template/files/trim_blocks_false.expected
@@ -0,0 +1,4 @@
+
+Hello world
+
+Goodbye
diff --git a/test/integration/targets/template/files/trim_blocks_true.expected b/test/integration/targets/template/files/trim_blocks_true.expected
new file mode 100644
index 00000000..03acd5d3
--- /dev/null
+++ b/test/integration/targets/template/files/trim_blocks_true.expected
@@ -0,0 +1,2 @@
+Hello world
+Goodbye
diff --git a/test/integration/targets/template/filter_plugins.yml b/test/integration/targets/template/filter_plugins.yml
new file mode 100644
index 00000000..c3e97a54
--- /dev/null
+++ b/test/integration/targets/template/filter_plugins.yml
@@ -0,0 +1,9 @@
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - debug:
+ msg: "force templating in delegate_to before we hit the second one with a filter"
+ delegate_to: "{{ 'localhost' }}"
+
+ - include_role:
+ name: role_filter
diff --git a/test/integration/targets/template/meta/main.yml b/test/integration/targets/template/meta/main.yml
new file mode 100644
index 00000000..06d4fd29
--- /dev/null
+++ b/test/integration/targets/template/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_nobody
diff --git a/test/integration/targets/template/role_filter/filter_plugins/myplugin.py b/test/integration/targets/template/role_filter/filter_plugins/myplugin.py
new file mode 100644
index 00000000..44935ab0
--- /dev/null
+++ b/test/integration/targets/template/role_filter/filter_plugins/myplugin.py
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+
+
+class FilterModule(object):
+ def filters(self):
+ return {'parse_ip': self.parse_ip}
+
+ def parse_ip(self, ip):
+ return ip
diff --git a/test/integration/targets/template/role_filter/tasks/main.yml b/test/integration/targets/template/role_filter/tasks/main.yml
new file mode 100644
index 00000000..7d962a29
--- /dev/null
+++ b/test/integration/targets/template/role_filter/tasks/main.yml
@@ -0,0 +1,3 @@
+- name: test
+ command: echo hello
+ delegate_to: "{{ '127.0.0.1' | parse_ip }}"
diff --git a/test/integration/targets/template/runme.sh b/test/integration/targets/template/runme.sh
new file mode 100755
index 00000000..8e21352d
--- /dev/null
+++ b/test/integration/targets/template/runme.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ANSIBLE_ROLES_PATH=../ ansible-playbook template.yml -i ../../inventory -v "$@"
+
+# Test for #35571
+ansible testhost -i testhost, -m debug -a 'msg={{ hostvars["localhost"] }}' -e "vars1={{ undef }}" -e "vars2={{ vars1 }}"
+
+# Test for https://github.com/ansible/ansible/issues/27262
+ansible-playbook ansible_managed.yml -c ansible_managed.cfg -i ../../inventory -v "$@"
+
+# Test for #42585
+ANSIBLE_ROLES_PATH=../ ansible-playbook custom_template.yml -i ../../inventory -v "$@"
+
+
+# Test for several corner cases #57188
+ansible-playbook corner_cases.yml -v "$@"
+
+# Test for #57351
+ansible-playbook filter_plugins.yml -v "$@"
+
+# https://github.com/ansible/ansible/issues/68699
+ansible-playbook unused_vars_include.yml -v "$@"
+
+# https://github.com/ansible/ansible/issues/55152
+ansible-playbook undefined_var_info.yml -v "$@"
diff --git a/test/integration/targets/template/tasks/backup_test.yml b/test/integration/targets/template/tasks/backup_test.yml
new file mode 100644
index 00000000..eb4eff17
--- /dev/null
+++ b/test/integration/targets/template/tasks/backup_test.yml
@@ -0,0 +1,60 @@
+# https://github.com/ansible/ansible/issues/24408
+
+- set_fact:
+ t_username: templateuser1
+ t_groupname: templateuser1
+
+- name: create the test group
+ group:
+ name: "{{ t_groupname }}"
+
+- name: create the test user
+ user:
+ name: "{{ t_username }}"
+ group: "{{ t_groupname }}"
+ createhome: no
+
+- name: set the dest file
+ set_fact:
+ t_dest: "{{ output_dir + '/tfile_dest.txt' }}"
+
+- name: create the old file
+ file:
+ path: "{{ t_dest }}"
+ state: touch
+ mode: 0777
+ owner: "{{ t_username }}"
+ group: "{{ t_groupname }}"
+
+- name: failsafe attr change incase underlying system does not support it
+ shell: chattr =j "{{ t_dest }}"
+ ignore_errors: True
+
+- name: run the template
+ template:
+ src: foo.j2
+ dest: "{{ t_dest }}"
+ backup: True
+ register: t_backup_res
+
+- name: check the data for the backup
+ stat:
+ path: "{{ t_backup_res.backup_file }}"
+ register: t_backup_stats
+
+- name: validate result of preserved backup
+ assert:
+ that:
+ - 't_backup_stats.stat.mode == "0777"'
+ - 't_backup_stats.stat.pw_name == t_username'
+ - 't_backup_stats.stat.gr_name == t_groupname'
+
+- name: cleanup the user
+ user:
+ name: "{{ t_username }}"
+ state: absent
+
+- name: cleanup the group
+ user:
+ name: "{{ t_groupname }}"
+ state: absent
diff --git a/test/integration/targets/template/tasks/main.yml b/test/integration/targets/template/tasks/main.yml
new file mode 100644
index 00000000..da803436
--- /dev/null
+++ b/test/integration/targets/template/tasks/main.yml
@@ -0,0 +1,719 @@
+# test code for the template module
+# (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- set_fact:
+ output_dir: "{{ lookup('env', 'OUTPUT_DIR') }}"
+
+- name: show python interpreter
+ debug:
+ msg: "{{ ansible_python['executable'] }}"
+
+- name: show jinja2 version
+ debug:
+ msg: "{{ lookup('pipe', '{{ ansible_python[\"executable\"] }} -c \"import jinja2; print(jinja2.__version__)\"') }}"
+
+- name: get default group
+ shell: id -gn
+ register: group
+
+- name: fill in a basic template
+ template: src=foo.j2 dest={{output_dir}}/foo.templated mode=0644
+ register: template_result
+
+- assert:
+ that:
+ - "'changed' in template_result"
+ - "'dest' in template_result"
+ - "'group' in template_result"
+ - "'gid' in template_result"
+ - "'md5sum' in template_result"
+ - "'checksum' in template_result"
+ - "'owner' in template_result"
+ - "'size' in template_result"
+ - "'src' in template_result"
+ - "'state' in template_result"
+ - "'uid' in template_result"
+
+- name: verify that the file was marked as changed
+ assert:
+ that:
+ - "template_result.changed == true"
+
+# Basic template with non-ascii names
+- name: Check that non-ascii source and dest work
+ template:
+ src: 'café.j2'
+ dest: '{{ output_dir }}/café.txt'
+ register: template_results
+
+- name: Check that the resulting file exists
+ stat:
+ path: '{{ output_dir }}/café.txt'
+ register: stat_results
+
+- name: Check that template created the right file
+ assert:
+ that:
+ - 'template_results is changed'
+ - 'stat_results.stat["exists"]'
+
+# test for import with context on jinja-2.9 See https://github.com/ansible/ansible/issues/20494
+- name: fill in a template using import with context ala issue 20494
+ template: src=import_with_context.j2 dest={{output_dir}}/import_with_context.templated mode=0644
+ register: template_result
+
+- name: copy known good import_with_context.expected into place
+ copy: src=import_with_context.expected dest={{output_dir}}/import_with_context.expected
+
+- name: compare templated file to known good import_with_context
+ shell: diff -uw {{output_dir}}/import_with_context.templated {{output_dir}}/import_with_context.expected
+ register: diff_result
+
+- name: verify templated import_with_context matches known good
+ assert:
+ that:
+ - 'diff_result.stdout == ""'
+ - "diff_result.rc == 0"
+
+# test for nested include https://github.com/ansible/ansible/issues/34886
+- name: test if parent variables are defined in nested include
+ template: src=for_loop.j2 dest={{output_dir}}/for_loop.templated mode=0644
+
+- name: save templated output
+ shell: "cat {{output_dir}}/for_loop.templated"
+ register: for_loop_out
+- debug: var=for_loop_out
+- name: verify variables got templated
+ assert:
+ that:
+ - '"foo" in for_loop_out.stdout'
+ - '"bar" in for_loop_out.stdout'
+ - '"bam" in for_loop_out.stdout'
+
+# test for 'import as' on jinja-2.9 See https://github.com/ansible/ansible/issues/20494
+- name: fill in a template using import as ala fails2 case in issue 20494
+ template: src=import_as.j2 dest={{output_dir}}/import_as.templated mode=0644
+ register: import_as_template_result
+
+- name: copy known good import_as.expected into place
+ copy: src=import_as.expected dest={{output_dir}}/import_as.expected
+
+- name: compare templated file to known good import_as
+ shell: diff -uw {{output_dir}}/import_as.templated {{output_dir}}/import_as.expected
+ register: import_as_diff_result
+
+- name: verify templated import_as matches known good
+ assert:
+ that:
+ - 'import_as_diff_result.stdout == ""'
+ - "import_as_diff_result.rc == 0"
+
+# test for 'import as with context' on jinja-2.9 See https://github.com/ansible/ansible/issues/20494
+- name: fill in a template using import as with context ala fails2 case in issue 20494
+ template: src=import_as_with_context.j2 dest={{output_dir}}/import_as_with_context.templated mode=0644
+ register: import_as_with_context_template_result
+
+- name: copy known good import_as_with_context.expected into place
+ copy: src=import_as_with_context.expected dest={{output_dir}}/import_as_with_context.expected
+
+- name: compare templated file to known good import_as_with_context
+ shell: diff -uw {{output_dir}}/import_as_with_context.templated {{output_dir}}/import_as_with_context.expected
+ register: import_as_with_context_diff_result
+
+- name: verify templated import_as_with_context matches known good
+ assert:
+ that:
+ - 'import_as_with_context_diff_result.stdout == ""'
+ - "import_as_with_context_diff_result.rc == 0"
+
+# VERIFY trim_blocks
+
+- name: Render a template with "trim_blocks" set to False
+ template:
+ src: trim_blocks.j2
+ dest: "{{output_dir}}/trim_blocks_false.templated"
+ trim_blocks: False
+ register: trim_blocks_false_result
+
+- name: Get checksum of known good trim_blocks_false.expected
+ stat:
+ path: "{{role_path}}/files/trim_blocks_false.expected"
+ register: trim_blocks_false_good
+
+- name: Verify templated trim_blocks_false matches known good using checksum
+ assert:
+ that:
+ - "trim_blocks_false_result.checksum == trim_blocks_false_good.stat.checksum"
+
+- name: Render a template with "trim_blocks" set to True
+ template:
+ src: trim_blocks.j2
+ dest: "{{output_dir}}/trim_blocks_true.templated"
+ trim_blocks: True
+ register: trim_blocks_true_result
+
+- name: Get checksum of known good trim_blocks_true.expected
+ stat:
+ path: "{{role_path}}/files/trim_blocks_true.expected"
+ register: trim_blocks_true_good
+
+- name: Verify templated trim_blocks_true matches known good using checksum
+ assert:
+ that:
+ - "trim_blocks_true_result.checksum == trim_blocks_true_good.stat.checksum"
+
+# VERIFY lstrip_blocks
+
+- name: Check support for lstrip_blocks in Jinja2
+ shell: "{{ ansible_python.executable }} -c 'import jinja2; jinja2.defaults.LSTRIP_BLOCKS'"
+ register: lstrip_block_support
+ ignore_errors: True
+
+- name: Render a template with "lstrip_blocks" set to False
+ template:
+ src: lstrip_blocks.j2
+ dest: "{{output_dir}}/lstrip_blocks_false.templated"
+ lstrip_blocks: False
+ register: lstrip_blocks_false_result
+
+- name: Get checksum of known good lstrip_blocks_false.expected
+ stat:
+ path: "{{role_path}}/files/lstrip_blocks_false.expected"
+ register: lstrip_blocks_false_good
+
+- name: Verify templated lstrip_blocks_false matches known good using checksum
+ assert:
+ that:
+ - "lstrip_blocks_false_result.checksum == lstrip_blocks_false_good.stat.checksum"
+
+- name: Render a template with "lstrip_blocks" set to True
+ template:
+ src: lstrip_blocks.j2
+ dest: "{{output_dir}}/lstrip_blocks_true.templated"
+ lstrip_blocks: True
+ register: lstrip_blocks_true_result
+ ignore_errors: True
+
+- name: Verify exception is thrown if Jinja2 does not support lstrip_blocks but lstrip_blocks is used
+ assert:
+ that:
+ - "lstrip_blocks_true_result.failed"
+ - 'lstrip_blocks_true_result.msg is search(">=2.7")'
+ when: "lstrip_block_support is failed"
+
+- name: Get checksum of known good lstrip_blocks_true.expected
+ stat:
+ path: "{{role_path}}/files/lstrip_blocks_true.expected"
+ register: lstrip_blocks_true_good
+ when: "lstrip_block_support is successful"
+
+- name: Verify templated lstrip_blocks_true matches known good using checksum
+ assert:
+ that:
+ - "lstrip_blocks_true_result.checksum == lstrip_blocks_true_good.stat.checksum"
+ when: "lstrip_block_support is successful"
+
+# VERIFY CONTENTS
+
+- name: check what python version ansible is running on
+ command: "{{ ansible_python.executable }} -c 'import distutils.sysconfig ; print(distutils.sysconfig.get_python_version())'"
+ register: pyver
+ delegate_to: localhost
+
+- name: copy known good into place
+ copy: src=foo.txt dest={{output_dir}}/foo.txt
+
+- name: compare templated file to known good
+ shell: diff -uw {{output_dir}}/foo.templated {{output_dir}}/foo.txt
+ register: diff_result
+
+- name: verify templated file matches known good
+ assert:
+ that:
+ - 'diff_result.stdout == ""'
+ - "diff_result.rc == 0"
+
+# VERIFY MODE
+
+- name: set file mode
+ file: path={{output_dir}}/foo.templated mode=0644
+ register: file_result
+
+- name: ensure file mode did not change
+ assert:
+ that:
+ - "file_result.changed != True"
+
+# VERIFY dest as a directory does not break file attributes
+# Note: expanduser is needed to go down the particular codepath that was broken before
+- name: setup directory for test
+ file: state=directory dest={{output_dir | expanduser}}/template-dir mode=0755 owner=nobody group={{ group.stdout }}
+
+- name: set file mode when the destination is a directory
+ template: src=foo.j2 dest={{output_dir | expanduser}}/template-dir/ mode=0600 owner=root group={{ group.stdout }}
+
+- name: set file mode when the destination is a directory
+ template: src=foo.j2 dest={{output_dir | expanduser}}/template-dir/ mode=0600 owner=root group={{ group.stdout }}
+ register: file_result
+
+- name: check that the file has the correct attributes
+ stat: path={{output_dir | expanduser}}/template-dir/foo.j2
+ register: file_attrs
+
+- assert:
+ that:
+ - "file_attrs.stat.uid == 0"
+ - "file_attrs.stat.pw_name == 'root'"
+ - "file_attrs.stat.mode == '0600'"
+
+- name: check that the containing directory did not change attributes
+ stat: path={{output_dir | expanduser}}/template-dir/
+ register: dir_attrs
+
+- assert:
+ that:
+ - "dir_attrs.stat.uid != 0"
+ - "dir_attrs.stat.pw_name == 'nobody'"
+ - "dir_attrs.stat.mode == '0755'"
+
+- name: Check that template to a directory where the directory does not end with a / is allowed
+ template: src=foo.j2 dest={{output_dir | expanduser}}/template-dir mode=0600 owner=root group={{ group.stdout }}
+
+- name: make a symlink to the templated file
+ file:
+ path: '{{ output_dir }}/foo.symlink'
+ src: '{{ output_dir }}/foo.templated'
+ state: link
+
+- name: check that templating the symlink results in the file being templated
+ template:
+ src: foo.j2
+ dest: '{{output_dir}}/foo.symlink'
+ mode: 0600
+ follow: True
+ register: template_result
+
+- assert:
+ that:
+ - "template_result.changed == True"
+
+- name: check that the file has the correct attributes
+ stat: path={{output_dir | expanduser}}/template-dir/foo.j2
+ register: file_attrs
+
+- assert:
+ that:
+ - "file_attrs.stat.mode == '0600'"
+
+- name: check that templating the symlink again makes no changes
+ template:
+ src: foo.j2
+ dest: '{{output_dir}}/foo.symlink'
+ mode: 0600
+ follow: True
+ register: template_result
+
+- assert:
+ that:
+ - "template_result.changed == False"
+
+# Test strange filenames
+
+- name: Create a temp dir for filename tests
+ file:
+ state: directory
+ dest: '{{ output_dir }}/filename-tests'
+
+- name: create a file with an unusual filename
+ template:
+ src: foo.j2
+ dest: "{{ output_dir }}/filename-tests/foo t'e~m\\plated"
+ register: template_result
+
+- assert:
+ that:
+ - "template_result.changed == True"
+
+- name: check that the unusual filename was created
+ command: "ls {{ output_dir }}/filename-tests/"
+ register: unusual_results
+
+- assert:
+ that:
+ - "\"foo t'e~m\\plated\" in unusual_results.stdout_lines"
+ - "{{unusual_results.stdout_lines| length}} == 1"
+
+- name: check that the unusual filename can be checked for changes
+ template:
+ src: foo.j2
+ dest: "{{ output_dir }}/filename-tests/foo t'e~m\\plated"
+ register: template_result
+
+- assert:
+ that:
+ - "template_result.changed == False"
+
+
+# check_mode
+
+- name: fill in a basic template in check mode
+ template: src=short.j2 dest={{output_dir}}/short.templated
+ register: template_result
+ check_mode: True
+
+- name: check file exists
+ stat: path={{output_dir}}/short.templated
+ register: templated
+
+- name: verify that the file was marked as changed in check mode but was not created
+ assert:
+ that:
+ - "not templated.stat.exists"
+ - "template_result is changed"
+
+- name: fill in a basic template
+ template: src=short.j2 dest={{output_dir}}/short.templated
+
+- name: fill in a basic template in check mode
+ template: src=short.j2 dest={{output_dir}}/short.templated
+ register: template_result
+ check_mode: True
+
+- name: verify that the file was marked as not changes in check mode
+ assert:
+ that:
+ - "template_result is not changed"
+ - "'templated_var_loaded' in lookup('file', output_dir + '/short.templated')"
+
+- name: change var for the template
+ set_fact:
+ templated_var: "changed"
+
+- name: fill in a basic template with changed var in check mode
+ template: src=short.j2 dest={{output_dir}}/short.templated
+ register: template_result
+ check_mode: True
+
+- name: verify that the file was marked as changed in check mode but the content was not changed
+ assert:
+ that:
+ - "'templated_var_loaded' in lookup('file', output_dir + '/short.templated')"
+ - "template_result is changed"
+
+# Create a template using a child template, to ensure that variables
+# are passed properly from the parent to subtemplate context (issue #20063)
+
+- name: test parent and subtemplate creation of context
+ template: src=parent.j2 dest={{output_dir}}/parent_and_subtemplate.templated
+ register: template_result
+
+- stat: path={{output_dir}}/parent_and_subtemplate.templated
+
+- name: verify that the parent and subtemplate creation worked
+ assert:
+ that:
+ - "template_result is changed"
+
+#
+# template module can overwrite a file that's been hard linked
+# https://github.com/ansible/ansible/issues/10834
+#
+
+- name: ensure test dir is absent
+ file:
+ path: '{{ output_dir | expanduser }}/hlink_dir'
+ state: absent
+
+- name: create test dir
+ file:
+ path: '{{ output_dir | expanduser }}/hlink_dir'
+ state: directory
+
+- name: template out test file to system 1
+ template:
+ src: foo.j2
+ dest: '{{ output_dir | expanduser }}/hlink_dir/test_file'
+
+- name: make hard link
+ file:
+ src: '{{ output_dir | expanduser }}/hlink_dir/test_file'
+ dest: '{{ output_dir | expanduser }}/hlink_dir/test_file_hlink'
+ state: hard
+
+- name: template out test file to system 2
+ template:
+ src: foo.j2
+ dest: '{{ output_dir | expanduser }}/hlink_dir/test_file'
+ register: hlink_result
+
+- name: check that the files are still hardlinked
+ stat:
+ path: '{{ output_dir | expanduser }}/hlink_dir/test_file'
+ register: orig_file
+
+- name: check that the files are still hardlinked
+ stat:
+ path: '{{ output_dir | expanduser }}/hlink_dir/test_file_hlink'
+ register: hlink_file
+
+# We've done nothing at this point to update the content of the file so it should still be hardlinked
+- assert:
+ that:
+ - "hlink_result.changed == False"
+ - "orig_file.stat.inode == hlink_file.stat.inode"
+
+- name: change var for the template
+ set_fact:
+ templated_var: "templated_var_loaded"
+
+# UNIX TEMPLATE
+- name: fill in a basic template (Unix)
+ template:
+ src: foo2.j2
+ dest: '{{ output_dir }}/foo.unix.templated'
+ register: template_result
+
+- name: verify that the file was marked as changed (Unix)
+ assert:
+ that:
+ - 'template_result is changed'
+
+- name: fill in a basic template again (Unix)
+ template:
+ src: foo2.j2
+ dest: '{{ output_dir }}/foo.unix.templated'
+ register: template_result2
+
+- name: verify that the template was not changed (Unix)
+ assert:
+ that:
+ - 'template_result2 is not changed'
+
+# VERIFY UNIX CONTENTS
+- name: copy known good into place (Unix)
+ copy:
+ src: foo.unix.txt
+ dest: '{{ output_dir }}/foo.unix.txt'
+
+- name: Dump templated file (Unix)
+ command: hexdump -C {{ output_dir }}/foo.unix.templated
+
+- name: Dump expected file (Unix)
+ command: hexdump -C {{ output_dir }}/foo.unix.txt
+
+- name: compare templated file to known good (Unix)
+ command: diff -u {{ output_dir }}/foo.unix.templated {{ output_dir }}/foo.unix.txt
+ register: diff_result
+
+- name: verify templated file matches known good (Unix)
+ assert:
+ that:
+ - 'diff_result.stdout == ""'
+ - "diff_result.rc == 0"
+
+# DOS TEMPLATE
+- name: fill in a basic template (DOS)
+ template:
+ src: foo2.j2
+ dest: '{{ output_dir }}/foo.dos.templated'
+ newline_sequence: '\r\n'
+ register: template_result
+
+- name: verify that the file was marked as changed (DOS)
+ assert:
+ that:
+ - 'template_result is changed'
+
+- name: fill in a basic template again (DOS)
+ template:
+ src: foo2.j2
+ dest: '{{ output_dir }}/foo.dos.templated'
+ newline_sequence: '\r\n'
+ register: template_result2
+
+- name: verify that the template was not changed (DOS)
+ assert:
+ that:
+ - 'template_result2 is not changed'
+
+# VERIFY DOS CONTENTS
+- name: copy known good into place (DOS)
+ copy:
+ src: foo.dos.txt
+ dest: '{{ output_dir }}/foo.dos.txt'
+
+- name: Dump templated file (DOS)
+ command: hexdump -C {{ output_dir }}/foo.dos.templated
+
+- name: Dump expected file (DOS)
+ command: hexdump -C {{ output_dir }}/foo.dos.txt
+
+- name: compare templated file to known good (DOS)
+ command: diff -u {{ output_dir }}/foo.dos.templated {{ output_dir }}/foo.dos.txt
+ register: diff_result
+
+- name: verify templated file matches known good (DOS)
+ assert:
+ that:
+ - 'diff_result.stdout == ""'
+ - "diff_result.rc == 0"
+
+# VERIFY DOS CONTENTS
+- name: copy known good into place (Unix)
+ copy:
+ src: foo.unix.txt
+ dest: '{{ output_dir }}/foo.unix.txt'
+
+- name: Dump templated file (Unix)
+ command: hexdump -C {{ output_dir }}/foo.unix.templated
+
+- name: Dump expected file (Unix)
+ command: hexdump -C {{ output_dir }}/foo.unix.txt
+
+- name: compare templated file to known good (Unix)
+ command: diff -u {{ output_dir }}/foo.unix.templated {{ output_dir }}/foo.unix.txt
+ register: diff_result
+
+- name: verify templated file matches known good (Unix)
+ assert:
+ that:
+ - 'diff_result.stdout == ""'
+ - "diff_result.rc == 0"
+
+# Check that mode=preserve works with template
+- name: Create a template which has strange permissions
+ copy:
+ content: !unsafe '{{ ansible_managed }}\n'
+ dest: '{{ output_dir }}/foo-template.j2'
+ mode: 0547
+ delegate_to: localhost
+
+- name: Use template with mode=preserve
+ template:
+ src: '{{ output_dir }}/foo-template.j2'
+ dest: '{{ output_dir }}/foo-templated.txt'
+ mode: 'preserve'
+ register: template_results
+
+- name: Get permissions from the templated file
+ stat:
+ path: '{{ output_dir }}/foo-templated.txt'
+ register: stat_results
+
+- name: Check that the resulting file has the correct permissions
+ assert:
+ that:
+ - 'template_results is changed'
+ - 'template_results.mode == "0547"'
+ - 'stat_results.stat["mode"] == "0547"'
+
+# Test output_encoding
+- name: Prepare the list of encodings we want to check, including empty string for defaults
+ set_fact:
+ template_encoding_1252_encodings: ['', 'utf-8', 'windows-1252']
+
+- name: Copy known good encoding_1252_*.expected into place
+ copy:
+ src: 'encoding_1252_{{ item | default("utf-8", true) }}.expected'
+ dest: '{{ output_dir }}/encoding_1252_{{ item }}.expected'
+ loop: '{{ template_encoding_1252_encodings }}'
+
+- name: Generate the encoding_1252_* files from templates using various encoding combinations
+ template:
+ src: 'encoding_1252.j2'
+ dest: '{{ output_dir }}/encoding_1252_{{ item }}.txt'
+ output_encoding: '{{ item }}'
+ loop: '{{ template_encoding_1252_encodings }}'
+
+- name: Compare the encoding_1252_* templated files to known good
+ command: diff -u {{ output_dir }}/encoding_1252_{{ item }}.expected {{ output_dir }}/encoding_1252_{{ item }}.txt
+ register: encoding_1252_diff_result
+ loop: '{{ template_encoding_1252_encodings }}'
+
+- name: Check that nested undefined values return Undefined
+ vars:
+ dict_var:
+ bar: {}
+ list_var:
+ - foo: {}
+ assert:
+ that:
+ - dict_var is defined
+ - dict_var.bar is defined
+ - dict_var.bar.baz is not defined
+ - dict_var.bar.baz | default('DEFAULT') == 'DEFAULT'
+ - dict_var.bar.baz.abc is not defined
+ - dict_var.bar.baz.abc | default('DEFAULT') == 'DEFAULT'
+ - dict_var.baz is not defined
+ - dict_var.baz.abc is not defined
+ - dict_var.baz.abc | default('DEFAULT') == 'DEFAULT'
+ - list_var.0 is defined
+ - list_var.1 is not defined
+ - list_var.0.foo is defined
+ - list_var.0.foo.bar is not defined
+ - list_var.0.foo.bar | default('DEFAULT') == 'DEFAULT'
+ - list_var.1.foo is not defined
+ - list_var.1.foo | default('DEFAULT') == 'DEFAULT'
+ - dict_var is defined
+ - dict_var['bar'] is defined
+ - dict_var['bar']['baz'] is not defined
+ - dict_var['bar']['baz'] | default('DEFAULT') == 'DEFAULT'
+ - dict_var['bar']['baz']['abc'] is not defined
+ - dict_var['bar']['baz']['abc'] | default('DEFAULT') == 'DEFAULT'
+ - dict_var['baz'] is not defined
+ - dict_var['baz']['abc'] is not defined
+ - dict_var['baz']['abc'] | default('DEFAULT') == 'DEFAULT'
+ - list_var[0] is defined
+ - list_var[1] is not defined
+ - list_var[0]['foo'] is defined
+ - list_var[0]['foo']['bar'] is not defined
+ - list_var[0]['foo']['bar'] | default('DEFAULT') == 'DEFAULT'
+ - list_var[1]['foo'] is not defined
+ - list_var[1]['foo'] | default('DEFAULT') == 'DEFAULT'
+ - dict_var['bar'].baz is not defined
+ - dict_var['bar'].baz | default('DEFAULT') == 'DEFAULT'
+
+- template:
+ src: template_destpath_test.j2
+ dest: "{{ output_dir }}/template_destpath.templated"
+
+- copy:
+ content: "{{ output_dir}}/template_destpath.templated\n"
+ dest: "{{ output_dir }}/template_destpath.expected"
+
+- name: compare templated file to known good template_destpath
+ shell: diff -uw {{output_dir}}/template_destpath.templated {{output_dir}}/template_destpath.expected
+ register: diff_result
+
+- name: verify templated template_destpath matches known good
+ assert:
+ that:
+ - 'diff_result.stdout == ""'
+ - "diff_result.rc == 0"
+
+- debug:
+ msg: "{{ 'x' in y }}"
+ ignore_errors: yes
+ register: error
+
+- name: check that proper error message is emitted when in operator is used
+ assert:
+ that: "\"'y' is undefined\" in error.msg"
+
+# aliases file requires root for template tests so this should be safe
+- include: backup_test.yml
diff --git a/test/integration/targets/template/template.yml b/test/integration/targets/template/template.yml
new file mode 100644
index 00000000..d33293be
--- /dev/null
+++ b/test/integration/targets/template/template.yml
@@ -0,0 +1,4 @@
+- hosts: testhost
+ gather_facts: yes
+ roles:
+ - { role: template }
diff --git a/test/integration/targets/template/templates/bar b/test/integration/targets/template/templates/bar
new file mode 100644
index 00000000..2b60207f
--- /dev/null
+++ b/test/integration/targets/template/templates/bar
@@ -0,0 +1 @@
+Goodbye
diff --git a/test/integration/targets/template/templates/café.j2 b/test/integration/targets/template/templates/café.j2
new file mode 100644
index 00000000..ef7e08e7
--- /dev/null
+++ b/test/integration/targets/template/templates/café.j2
@@ -0,0 +1 @@
+{{ ansible_managed }}
diff --git a/test/integration/targets/template/templates/encoding_1252.j2 b/test/integration/targets/template/templates/encoding_1252.j2
new file mode 100644
index 00000000..0d3cc352
--- /dev/null
+++ b/test/integration/targets/template/templates/encoding_1252.j2
@@ -0,0 +1 @@
+windows-1252 Special Characters: €‚ƒ„…†‡ˆ‰Š‹ŒŽ‘’“â€â€¢â€“—˜™š›œžŸ ¡¢£¤¥¦§¨©ª«¬­®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÃÂÃÄÅÆÇÈÉÊËÌÃÃŽÃÃÑÒÓÔÕÖ×ØÙÚÛÜÃÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ
diff --git a/test/integration/targets/template/templates/foo.j2 b/test/integration/targets/template/templates/foo.j2
new file mode 100644
index 00000000..22187f91
--- /dev/null
+++ b/test/integration/targets/template/templates/foo.j2
@@ -0,0 +1,3 @@
+{{ templated_var }}
+
+{{ templated_dict | to_nice_json }}
diff --git a/test/integration/targets/template/templates/foo2.j2 b/test/integration/targets/template/templates/foo2.j2
new file mode 100644
index 00000000..e6e34852
--- /dev/null
+++ b/test/integration/targets/template/templates/foo2.j2
@@ -0,0 +1,3 @@
+BEGIN
+{{ templated_var }}
+END
diff --git a/test/integration/targets/template/templates/foo3.j2 b/test/integration/targets/template/templates/foo3.j2
new file mode 100644
index 00000000..710d55a7
--- /dev/null
+++ b/test/integration/targets/template/templates/foo3.j2
@@ -0,0 +1,3 @@
+BEGIN
+[% templated_var %]
+END
diff --git a/test/integration/targets/template/templates/for_loop.j2 b/test/integration/targets/template/templates/for_loop.j2
new file mode 100644
index 00000000..49fa412d
--- /dev/null
+++ b/test/integration/targets/template/templates/for_loop.j2
@@ -0,0 +1,4 @@
+{% for par_var in parent_vars %}
+{% include 'for_loop_include.j2' %}
+
+{% endfor %}
diff --git a/test/integration/targets/template/templates/for_loop_include.j2 b/test/integration/targets/template/templates/for_loop_include.j2
new file mode 100644
index 00000000..b1a0ad7d
--- /dev/null
+++ b/test/integration/targets/template/templates/for_loop_include.j2
@@ -0,0 +1,3 @@
+{% if par_var is defined %}
+{% include 'for_loop_include_nested.j2' %}
+{% endif %}
diff --git a/test/integration/targets/template/templates/for_loop_include_nested.j2 b/test/integration/targets/template/templates/for_loop_include_nested.j2
new file mode 100644
index 00000000..368bce4b
--- /dev/null
+++ b/test/integration/targets/template/templates/for_loop_include_nested.j2
@@ -0,0 +1 @@
+{{ par_var }}
diff --git a/test/integration/targets/template/templates/import_as.j2 b/test/integration/targets/template/templates/import_as.j2
new file mode 100644
index 00000000..b06f1be8
--- /dev/null
+++ b/test/integration/targets/template/templates/import_as.j2
@@ -0,0 +1,4 @@
+{% import 'qux' as qux %}
+hello world import as
+{{ qux.wibble }}
+{% include 'bar' %}
diff --git a/test/integration/targets/template/templates/import_as_with_context.j2 b/test/integration/targets/template/templates/import_as_with_context.j2
new file mode 100644
index 00000000..3dd806a3
--- /dev/null
+++ b/test/integration/targets/template/templates/import_as_with_context.j2
@@ -0,0 +1,3 @@
+{% import 'qux' as qux with context %}
+hello world as qux with context
+{{ qux.wibble }}
diff --git a/test/integration/targets/template/templates/import_with_context.j2 b/test/integration/targets/template/templates/import_with_context.j2
new file mode 100644
index 00000000..104e68b3
--- /dev/null
+++ b/test/integration/targets/template/templates/import_with_context.j2
@@ -0,0 +1,4 @@
+{% import 'qux' as qux with context %}
+hello world with context
+{{ qux.wibble }}
+{% include 'bar' %}
diff --git a/test/integration/targets/template/templates/lstrip_blocks.j2 b/test/integration/targets/template/templates/lstrip_blocks.j2
new file mode 100644
index 00000000..d572da67
--- /dev/null
+++ b/test/integration/targets/template/templates/lstrip_blocks.j2
@@ -0,0 +1,8 @@
+{% set hello_world="hello world" %}
+{% for i in [1, 2, 3] %}
+ {% if loop.first %}
+{{hello_world}}
+ {% else %}
+{{hello_world}}
+ {% endif %}
+{% endfor %}
diff --git a/test/integration/targets/template/templates/parent.j2 b/test/integration/targets/template/templates/parent.j2
new file mode 100644
index 00000000..99a8e4cc
--- /dev/null
+++ b/test/integration/targets/template/templates/parent.j2
@@ -0,0 +1,3 @@
+{% for parent_item in parent_vars %}
+{% include "subtemplate.j2" %}
+{% endfor %}
diff --git a/test/integration/targets/template/templates/qux b/test/integration/targets/template/templates/qux
new file mode 100644
index 00000000..d8cd22e4
--- /dev/null
+++ b/test/integration/targets/template/templates/qux
@@ -0,0 +1 @@
+{% set wibble = "WIBBLE" %}
diff --git a/test/integration/targets/template/templates/short.j2 b/test/integration/targets/template/templates/short.j2
new file mode 100644
index 00000000..55aab8f1
--- /dev/null
+++ b/test/integration/targets/template/templates/short.j2
@@ -0,0 +1 @@
+{{ templated_var }}
diff --git a/test/integration/targets/template/templates/subtemplate.j2 b/test/integration/targets/template/templates/subtemplate.j2
new file mode 100644
index 00000000..f359bf20
--- /dev/null
+++ b/test/integration/targets/template/templates/subtemplate.j2
@@ -0,0 +1,2 @@
+{{ parent_item }}
+
diff --git a/test/integration/targets/template/templates/template_destpath_test.j2 b/test/integration/targets/template/templates/template_destpath_test.j2
new file mode 100644
index 00000000..1d21d8cd
--- /dev/null
+++ b/test/integration/targets/template/templates/template_destpath_test.j2
@@ -0,0 +1 @@
+{{ template_destpath }}
diff --git a/test/integration/targets/template/templates/trim_blocks.j2 b/test/integration/targets/template/templates/trim_blocks.j2
new file mode 100644
index 00000000..824a0a03
--- /dev/null
+++ b/test/integration/targets/template/templates/trim_blocks.j2
@@ -0,0 +1,4 @@
+{% if True %}
+Hello world
+{% endif %}
+Goodbye
diff --git a/test/integration/targets/template/templates/unused_vars_include.j2 b/test/integration/targets/template/templates/unused_vars_include.j2
new file mode 100644
index 00000000..457cbbc0
--- /dev/null
+++ b/test/integration/targets/template/templates/unused_vars_include.j2
@@ -0,0 +1 @@
+{{ var_set_in_template }}
diff --git a/test/integration/targets/template/templates/unused_vars_template.j2 b/test/integration/targets/template/templates/unused_vars_template.j2
new file mode 100644
index 00000000..28afc902
--- /dev/null
+++ b/test/integration/targets/template/templates/unused_vars_template.j2
@@ -0,0 +1,2 @@
+{% set var_set_in_template=test_var %}
+{% include "unused_vars_include.j2" %}
diff --git a/test/integration/targets/template/undefined_var_info.yml b/test/integration/targets/template/undefined_var_info.yml
new file mode 100644
index 00000000..b96a58db
--- /dev/null
+++ b/test/integration/targets/template/undefined_var_info.yml
@@ -0,0 +1,15 @@
+- hosts: localhost
+ gather_facts: no
+ vars:
+ foo: []
+ bar: "{{ foo[0] }}"
+ tasks:
+ - debug:
+ msg: "{{ bar }}"
+ register: result
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - '"foo[0]" in result.msg'
+ - '"object has no element 0" in result.msg'
diff --git a/test/integration/targets/template/unused_vars_include.yml b/test/integration/targets/template/unused_vars_include.yml
new file mode 100644
index 00000000..ff31b70d
--- /dev/null
+++ b/test/integration/targets/template/unused_vars_include.yml
@@ -0,0 +1,8 @@
+- hosts: localhost
+ gather_facts: no
+ vars:
+ test_var: foo
+ unused_var: "{{ undefined_var }}"
+ tasks:
+ - debug:
+ msg: "{{ lookup('template', 'unused_vars_template.j2') }}"
diff --git a/test/integration/targets/template/vars/main.yml b/test/integration/targets/template/vars/main.yml
new file mode 100644
index 00000000..9d45cf24
--- /dev/null
+++ b/test/integration/targets/template/vars/main.yml
@@ -0,0 +1,20 @@
+templated_var: templated_var_loaded
+
+number_var: 5
+string_num: "5"
+bool_var: true
+part_1: 1
+part_2: "Foo"
+null_type: !!null
+
+templated_dict:
+ number: "{{ number_var }}"
+ string_num: "{{ string_num }}"
+ null_type: "{{ null_type }}"
+ bool: "{{ bool_var }}"
+ multi_part: "{{ part_1 }}{{ part_2 }}"
+
+parent_vars:
+- foo
+- bar
+- bam
diff --git a/test/integration/targets/template_jinja2_latest/aliases b/test/integration/targets/template_jinja2_latest/aliases
new file mode 100644
index 00000000..8602d059
--- /dev/null
+++ b/test/integration/targets/template_jinja2_latest/aliases
@@ -0,0 +1,4 @@
+needs/root
+shippable/posix/group2
+needs/target/template
+skip/aix
diff --git a/test/integration/targets/template_jinja2_latest/main.yml b/test/integration/targets/template_jinja2_latest/main.yml
new file mode 100644
index 00000000..aa7d6433
--- /dev/null
+++ b/test/integration/targets/template_jinja2_latest/main.yml
@@ -0,0 +1,4 @@
+- hosts: testhost
+ gather_facts: True
+ roles:
+ - { role: template }
diff --git a/test/integration/targets/template_jinja2_latest/requirements.txt b/test/integration/targets/template_jinja2_latest/requirements.txt
new file mode 100644
index 00000000..49a806fb
--- /dev/null
+++ b/test/integration/targets/template_jinja2_latest/requirements.txt
@@ -0,0 +1,2 @@
+jinja2 < 2.11 ; python_version < '2.7' # jinja2 2.11 and later require python 2.7 or later
+jinja2 ; python_version >= '2.7'
diff --git a/test/integration/targets/template_jinja2_latest/runme.sh b/test/integration/targets/template_jinja2_latest/runme.sh
new file mode 100755
index 00000000..6a20eb5d
--- /dev/null
+++ b/test/integration/targets/template_jinja2_latest/runme.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+
+set -eux
+
+source virtualenv.sh
+
+pip install -U -r requirements.txt
+
+ANSIBLE_ROLES_PATH=../
+export ANSIBLE_ROLES_PATH
+
+ansible-playbook -i ../../inventory main.yml -v "$@"
diff --git a/test/integration/targets/templating_lookups/aliases b/test/integration/targets/templating_lookups/aliases
new file mode 100644
index 00000000..f8e28c7e
--- /dev/null
+++ b/test/integration/targets/templating_lookups/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group1
+skip/aix
diff --git a/test/integration/targets/templating_lookups/runme.sh b/test/integration/targets/templating_lookups/runme.sh
new file mode 100755
index 00000000..e958bcfb
--- /dev/null
+++ b/test/integration/targets/templating_lookups/runme.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ANSIBLE_ROLES_PATH=./ UNICODE_VAR=café ansible-playbook runme.yml "$@"
+
+ansible-playbook template_lookup_vaulted/playbook.yml --vault-password-file template_lookup_vaulted/test_vault_pass "$@"
+
+ansible-playbook template_deepcopy/playbook.yml -i template_deepcopy/hosts "$@"
+
+# https://github.com/ansible/ansible/issues/66943
+ansible-playbook template_lookup_safe_eval_unicode/playbook.yml "$@"
diff --git a/test/integration/targets/templating_lookups/runme.yml b/test/integration/targets/templating_lookups/runme.yml
new file mode 100644
index 00000000..a27337bb
--- /dev/null
+++ b/test/integration/targets/templating_lookups/runme.yml
@@ -0,0 +1,4 @@
+- hosts: localhost
+ gather_facts: no
+ roles:
+ - { role: template_lookups }
diff --git a/test/integration/targets/templating_lookups/template_deepcopy/hosts b/test/integration/targets/templating_lookups/template_deepcopy/hosts
new file mode 100644
index 00000000..ecd3b966
--- /dev/null
+++ b/test/integration/targets/templating_lookups/template_deepcopy/hosts
@@ -0,0 +1 @@
+h1 ansible_connection=local host_var=foo
diff --git a/test/integration/targets/templating_lookups/template_deepcopy/playbook.yml b/test/integration/targets/templating_lookups/template_deepcopy/playbook.yml
new file mode 100644
index 00000000..da55c167
--- /dev/null
+++ b/test/integration/targets/templating_lookups/template_deepcopy/playbook.yml
@@ -0,0 +1,10 @@
+- hosts: h1
+ gather_facts: no
+ tasks:
+ - set_fact:
+ templated_foo: "{{ lookup('template', 'template.in') }}"
+
+ - name: Test that the hostvar was templated correctly
+ assert:
+ that:
+ - templated_foo == "foo\n"
diff --git a/test/integration/targets/templating_lookups/template_deepcopy/template.in b/test/integration/targets/templating_lookups/template_deepcopy/template.in
new file mode 100644
index 00000000..77de0adf
--- /dev/null
+++ b/test/integration/targets/templating_lookups/template_deepcopy/template.in
@@ -0,0 +1 @@
+{{hostvars['h1'].host_var}}
diff --git a/test/integration/targets/templating_lookups/template_lookup_safe_eval_unicode/playbook.yml b/test/integration/targets/templating_lookups/template_lookup_safe_eval_unicode/playbook.yml
new file mode 100644
index 00000000..29e4b615
--- /dev/null
+++ b/test/integration/targets/templating_lookups/template_lookup_safe_eval_unicode/playbook.yml
@@ -0,0 +1,8 @@
+- hosts: localhost
+ gather_facts: no
+ vars:
+ original_dict: "{{ lookup('template', 'template.json.j2') }}"
+ copy_dict: {}
+ tasks:
+ - set_fact:
+ copy_dict: "{{ copy_dict | combine(original_dict) }}"
diff --git a/test/integration/targets/templating_lookups/template_lookup_safe_eval_unicode/template.json.j2 b/test/integration/targets/templating_lookups/template_lookup_safe_eval_unicode/template.json.j2
new file mode 100644
index 00000000..bc31407c
--- /dev/null
+++ b/test/integration/targets/templating_lookups/template_lookup_safe_eval_unicode/template.json.j2
@@ -0,0 +1,4 @@
+{
+ "key1": "ascii_value",
+ "key2": "unicode_value_křížek",
+}
diff --git a/test/integration/targets/templating_lookups/template_lookup_vaulted/playbook.yml b/test/integration/targets/templating_lookups/template_lookup_vaulted/playbook.yml
new file mode 100644
index 00000000..23f32e8f
--- /dev/null
+++ b/test/integration/targets/templating_lookups/template_lookup_vaulted/playbook.yml
@@ -0,0 +1,13 @@
+# https://github.com/ansible/ansible/issues/34209
+- hosts: localhost
+ gather_facts: no
+ vars:
+ hello_world: Hello World
+ tasks:
+ - name: Test that template lookup can handle vaulted templates
+ set_fact:
+ vaulted_hello_world: "{{ lookup('template', 'vaulted_hello.j2') }}"
+
+ - assert:
+ that:
+ - "vaulted_hello_world|trim == 'Unvaulted Hello World!'"
diff --git a/test/integration/targets/templating_lookups/template_lookup_vaulted/templates/vaulted_hello.j2 b/test/integration/targets/templating_lookups/template_lookup_vaulted/templates/vaulted_hello.j2
new file mode 100644
index 00000000..a6e98bd8
--- /dev/null
+++ b/test/integration/targets/templating_lookups/template_lookup_vaulted/templates/vaulted_hello.j2
@@ -0,0 +1,6 @@
+$ANSIBLE_VAULT;1.1;AES256
+33623433323331343363343830343365376233386637366264646634663632343963396664393463
+3734626234626639323061643863613164643365363063310a663336663762356135396430353435
+39303930613231336135623761363130653235666433383965306235653963343166633233323638
+6635303662333734300a623063393761376531636535383164333632613839663237336463616436
+62643437623538633335366435346532636666616139386332323034336530356131
diff --git a/test/integration/targets/templating_lookups/template_lookup_vaulted/test_vault_pass b/test/integration/targets/templating_lookups/template_lookup_vaulted/test_vault_pass
new file mode 100644
index 00000000..9daeafb9
--- /dev/null
+++ b/test/integration/targets/templating_lookups/template_lookup_vaulted/test_vault_pass
@@ -0,0 +1 @@
+test
diff --git a/test/integration/targets/templating_lookups/template_lookups/tasks/errors.yml b/test/integration/targets/templating_lookups/template_lookups/tasks/errors.yml
new file mode 100644
index 00000000..da57631a
--- /dev/null
+++ b/test/integration/targets/templating_lookups/template_lookups/tasks/errors.yml
@@ -0,0 +1,31 @@
+- name: Task that fails due to templating error for plugin option
+ debug: msg="{{ 5 / 0 | int }}"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.failed
+ - result.exception
+
+- name: Loop that fails due to templating error in first entry and ignores errors
+ debug: msg="{{ 5 / item }}"
+ ignore_errors: true
+ register: result
+ loop: [0, 0, 1]
+
+- debug: var=result
+
+- assert:
+ that:
+ - result.results[0].failed
+ - result.results[0].exception
+ - result.results[0].item == 0
+
+ - result.results[1].failed
+ - result.results[1].exception
+ - result.results[1].item == 0
+
+ - not result.results[2].failed
+ - result.results[2].exception is undefined
+ - result.results[2].item == 1
diff --git a/test/integration/targets/templating_lookups/template_lookups/tasks/main.yml b/test/integration/targets/templating_lookups/template_lookups/tasks/main.yml
new file mode 100644
index 00000000..f240a234
--- /dev/null
+++ b/test/integration/targets/templating_lookups/template_lookups/tasks/main.yml
@@ -0,0 +1,90 @@
+# UNICODE
+
+# https://github.com/ansible/ansible/issues/65297
+- name: get UNICODE_VAR environment var value
+ shell: "echo $UNICODE_VAR"
+ register: unicode_var_value
+
+- name: verify the UNICODE_VAR is defined
+ assert:
+ that:
+ - "unicode_var_value.stdout"
+
+- name: use env lookup to get UNICODE_VAR value
+ set_fact:
+ test_unicode_val: "{{ lookup('env', 'UNICODE_VAR') }}"
+
+- debug: var=unicode_var_value
+- debug: var=test_unicode_val
+
+- name: compare unicode values
+ assert:
+ that:
+ - "test_unicode_val == unicode_var_value.stdout"
+
+# LOOKUP TEMPLATING
+
+- name: use bare interpolation
+ debug: msg="got {{item}}"
+ with_items: "{{things1}}"
+ register: bare_var
+
+- name: verify that list was interpolated
+ assert:
+ that:
+ - "bare_var.results[0].item == 1"
+ - "bare_var.results[1].item == 2"
+
+- name: use list with bare strings in it
+ debug: msg={{item}}
+ with_items:
+ - things2
+ - things1
+
+- name: use list with undefined var in it
+ debug: msg={{item}}
+ with_items: "{{things2}}"
+ ignore_errors: True
+
+# BUG #10073 nested template handling
+
+- name: set variable that clashes
+ set_fact:
+ PATH: foobar
+
+- name: get PATH environment var value
+ set_fact:
+ known_var_value: "{{ lookup('pipe', 'echo $PATH') }}"
+
+- name: do the lookup for env PATH
+ set_fact:
+ test_val: "{{ lookup('env', 'PATH') }}"
+
+- debug: var=test_val
+
+- name: compare values
+ assert:
+ that:
+ - "test_val != ''"
+ - "test_val == known_var_value"
+
+- name: set with_dict
+ shell: echo "{{ item.key + '=' + item.value }}"
+ with_dict: "{{ mydict }}"
+
+# BUG #34144 bad template caching
+
+- name: generate two random passwords
+ set_fact:
+ password1: "{{ lookup('password', '/dev/null length=20') }}"
+ password2: "{{ lookup('password', '/dev/null length=20') }}"
+ # If the passwords are generated randomly, the chance that they
+ # coincide is neglectable (< 1e-18 assuming 120 bits of randomness
+ # per password).
+
+- name: make sure passwords are not the same
+ assert:
+ that:
+ - password1 != password2
+
+- include_tasks: ./errors.yml
diff --git a/test/integration/targets/templating_lookups/template_lookups/vars/main.yml b/test/integration/targets/templating_lookups/template_lookups/vars/main.yml
new file mode 100644
index 00000000..4c44b1cb
--- /dev/null
+++ b/test/integration/targets/templating_lookups/template_lookups/vars/main.yml
@@ -0,0 +1,9 @@
+mydict:
+ mykey1: myval1
+ mykey2: myval2
+things1:
+ - 1
+ - 2
+things2:
+ - "{{ foo }}"
+ - "{{ foob | default('') }}"
diff --git a/test/integration/targets/templating_settings/aliases b/test/integration/targets/templating_settings/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/test/integration/targets/templating_settings/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/test/integration/targets/templating_settings/dont_warn_register.yml b/test/integration/targets/templating_settings/dont_warn_register.yml
new file mode 100644
index 00000000..277ce788
--- /dev/null
+++ b/test/integration/targets/templating_settings/dont_warn_register.yml
@@ -0,0 +1,6 @@
+- hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: template in register warns, but no template should not
+ debug: msg=unimportant
+ register: thisshouldnotwarn
diff --git a/test/integration/targets/templating_settings/runme.sh b/test/integration/targets/templating_settings/runme.sh
new file mode 100755
index 00000000..2fb202c3
--- /dev/null
+++ b/test/integration/targets/templating_settings/runme.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ansible-playbook test_templating_settings.yml -i ../../inventory -v "$@"
+[ "$(ansible-playbook dont_warn_register.yml -i ../../inventory -v "$@" 2>&1| grep -c 'is not templatable, but we found')" == "0" ]
diff --git a/test/integration/targets/templating_settings/test_templating_settings.yml b/test/integration/targets/templating_settings/test_templating_settings.yml
new file mode 100644
index 00000000..0c024dfd
--- /dev/null
+++ b/test/integration/targets/templating_settings/test_templating_settings.yml
@@ -0,0 +1,14 @@
+---
+- name: 'Test templating in name'
+ hosts: testhost
+ vars:
+ a_list:
+ - 'part'
+ - 'of a'
+ - 'name'
+
+ tasks:
+ # Note: this only tests that we do not traceback. It doesn't test that the
+ # name goes through templating correctly
+ - name: 'Task: {{ a_list | to_json }}'
+ debug: msg='{{ a_list | to_json }}'
diff --git a/test/integration/targets/test_core/aliases b/test/integration/targets/test_core/aliases
new file mode 100644
index 00000000..041b0cc7
--- /dev/null
+++ b/test/integration/targets/test_core/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group5
+skip/python2.6 # tests are controller only, and we no longer support Python 2.6 on the controller
diff --git a/test/integration/targets/test_core/inventory b/test/integration/targets/test_core/inventory
new file mode 100644
index 00000000..0fdd8ae3
--- /dev/null
+++ b/test/integration/targets/test_core/inventory
@@ -0,0 +1 @@
+unreachable ansible_connection=ssh ansible_host=127.0.0.1 ansible_port=1011 # IANA Reserved port
diff --git a/test/integration/targets/test_core/runme.sh b/test/integration/targets/test_core/runme.sh
new file mode 100755
index 00000000..c20c1741
--- /dev/null
+++ b/test/integration/targets/test_core/runme.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+set -eu
+
+ANSIBLE_ROLES_PATH=../ ansible-playbook --vault-password-file vault-password runme.yml -i inventory "${@}"
diff --git a/test/integration/targets/test_core/runme.yml b/test/integration/targets/test_core/runme.yml
new file mode 100644
index 00000000..20a94672
--- /dev/null
+++ b/test/integration/targets/test_core/runme.yml
@@ -0,0 +1,4 @@
+- hosts: localhost
+ gather_facts: no
+ roles:
+ - test_core
diff --git a/test/integration/targets/test_core/tasks/main.yml b/test/integration/targets/test_core/tasks/main.yml
new file mode 100644
index 00000000..50c43581
--- /dev/null
+++ b/test/integration/targets/test_core/tasks/main.yml
@@ -0,0 +1,303 @@
+- name: Failure
+ set_fact:
+ hello: world
+ failed_when: true
+ ignore_errors: yes
+ register: intentional_failure
+
+- name: Success
+ set_fact:
+ hello: world
+ register: intentional_success
+
+- name: Try failure test on non-dictionary
+ set_fact:
+ hello: "{{ 'nope' is failure }}"
+ ignore_errors: yes
+ register: misuse_of_failure
+
+- name: Assert failure tests work
+ assert:
+ that:
+ - intentional_failure is failed # old name
+ - intentional_failure is failure
+ - intentional_success is not failure
+ - misuse_of_failure is failed
+
+- name: Assert successful tests work
+ assert:
+ that:
+ - intentional_success is succeeded # old name
+ - intentional_success is success # old name
+ - intentional_success is successful
+ - intentional_failure is not successful
+
+- name: Try reachable host
+ command: id
+ register: reachable_host
+
+- name: Try unreachable host
+ command: id
+ delegate_to: unreachable
+ ignore_unreachable: yes
+ ignore_errors: yes
+ register: unreachable_host
+
+- name: Try reachable test on non-dictionary
+ set_fact:
+ hello: "{{ 'nope' is reachable }}"
+ ignore_errors: yes
+ register: misuse_of_reachable
+
+- name: Assert reachable tests work
+ assert:
+ that:
+ - misuse_of_reachable is failed
+ - reachable_host is reachable
+ - unreachable_host is not reachable
+
+- name: Try unreachable test on non-dictionary
+ set_fact:
+ hello: "{{ 'nope' is unreachable }}"
+ ignore_errors: yes
+ register: misuse_of_unreachable
+
+- name: Assert unreachable tests work
+ assert:
+ that:
+ - misuse_of_unreachable is failed
+ - reachable_host is not unreachable
+ - unreachable_host is unreachable
+
+- name: Make changes
+ file:
+ path: dir_for_changed
+ state: directory
+ register: directory_created
+
+- name: Make no changes
+ file:
+ path: dir_for_changed
+ state: directory
+ register: directory_unchanged
+
+- name: Try changed test on non-dictionary
+ set_fact:
+ hello: "{{ 'nope' is changed }}"
+ ignore_errors: yes
+ register: misuse_of_changed
+
+# providing artificial task results since there are no modules in ansible-base that provide a 'results' list instead of 'changed'
+- name: Prepare artificial task results
+ set_fact:
+ results_all_changed:
+ results:
+ - changed: true
+ - changed: true
+ results_some_changed:
+ results:
+ - changed: true
+ - changed: false
+ results_none_changed:
+ results:
+ - changed: false
+ - changed: false
+ results_missing_changed: {}
+
+- name: Assert changed tests work
+ assert:
+ that:
+ - directory_created is changed
+ - directory_unchanged is not changed
+ - misuse_of_changed is failed
+ - results_all_changed is changed
+ - results_some_changed is changed
+ - results_none_changed is not changed
+ - results_missing_changed is not changed
+
+- name: Skip me
+ set_fact:
+ hello: world
+ when: false
+ register: skipped_task
+
+- name: Don't skip me
+ set_fact:
+ hello: world
+ register: executed_task
+
+- name: Try skipped test on non-dictionary
+ set_fact:
+ hello: "{{ 'nope' is skipped }}"
+ ignore_errors: yes
+ register: misuse_of_skipped
+
+- name: Assert skipped tests work
+ assert:
+ that:
+ - skipped_task is skipped
+ - executed_task is not skipped
+ - misuse_of_skipped is failure
+
+- name: Not an async task
+ set_fact:
+ hello: world
+ register: non_async_task
+
+- name: Complete an async task
+ command: id
+ async: 10
+ poll: 1
+ register: async_completed
+
+- name: Start an async task without waiting for completion
+ shell: sleep 3
+ async: 10
+ poll: 0
+ register: async_incomplete
+
+- name: Try finished test on non-dictionary
+ set_fact:
+ hello: "{{ 'nope' is finished }}"
+ ignore_errors: yes
+ register: misuse_of_finished
+
+- name: Assert finished tests work (warning expected)
+ assert:
+ that:
+ - non_async_task is finished
+ - misuse_of_finished is failed
+ - async_completed is finished
+ - async_incomplete is not finished
+
+- name: Try started test on non-dictionary
+ set_fact:
+ hello: "{{ 'nope' is started }}"
+ ignore_errors: yes
+ register: misuse_of_started
+
+- name: Assert started tests work (warning expected)
+ assert:
+ that:
+ - non_async_task is started
+ - misuse_of_started is failed
+ - async_completed is started
+ - async_incomplete is started
+
+- name: Assert match tests work
+ assert:
+ that:
+ - "'hello' is match('h.ll.')"
+ - "'hello' is not match('.ll.')"
+
+- name: Assert search tests work
+ assert:
+ that:
+ - "'hello' is search('.l')"
+ - "'hello' is not search('nope')"
+
+- name: Assert regex tests work
+ assert:
+ that:
+ - "'hello' is regex('.l')"
+ - "'hello' is regex('.L', ignorecase=true)"
+ - "'hello\nAnsible' is regex('^Ansible', multiline=true)"
+ - "'hello' is not regex('.L')"
+ - "'hello\nAnsible' is not regex('^Ansible')"
+
+- name: Try version tests with bad operator
+ set_fact:
+ result: "{{ '1.0' is version('1.0', 'equals') }}"
+ ignore_errors: yes
+ register: version_bad_operator
+
+- name: Try version tests with bad value
+ set_fact:
+ result: "{{ '1.0' is version('nope', '==', true) }}"
+ ignore_errors: yes
+ register: version_bad_value
+
+- name: Assert version tests work
+ assert:
+ that:
+ - "'1.0' is version_compare('1.0', '==')" # old name
+ - "'1.0' is version('1.0', '==')"
+ - "'1.0' is version('2.0', '!=')"
+ - "'1.0' is version('2.0', '<')"
+ - "'2.0' is version('1.0', '>')"
+ - "'1.0' is version('1.0', '<=')"
+ - "'1.0' is version('1.0', '>=')"
+ - "'1.0' is version_compare('1.0', '==', true)" # old name
+ - "'1.0' is version('1.0', '==', true)"
+ - "'1.0' is version('2.0', '!=', true)"
+ - "'1.0' is version('2.0', '<', true)"
+ - "'2.0' is version('1.0', '>', true)"
+ - "'1.0' is version('1.0', '<=', true)"
+ - "'1.0' is version('1.0', '>=', true)"
+ - version_bad_operator is failed
+ - version_bad_value is failed
+
+- name: Assert any tests work
+ assert:
+ that:
+ - "[true, false] is any"
+ - "[false] is not any"
+
+- name: Assert all tests work
+ assert:
+ that:
+ - "[true] is all"
+ - "[true, false] is not all"
+
+- name: Assert truthy tests work
+ assert:
+ that:
+ - '"string" is truthy'
+ - '"" is not truthy'
+ - True is truthy
+ - False is not truthy
+ - true is truthy
+ - false is not truthy
+ - 1 is truthy
+ - 0 is not truthy
+ - '[""] is truthy'
+ - '[] is not truthy'
+ - '"on" is truthy(convert_bool=True)'
+ - '"off" is not truthy(convert_bool=True)'
+ - '"fred" is truthy(convert_bool=True)'
+ - '{} is not truthy'
+ - '{"key": "value"} is truthy'
+
+- name: Assert falsy tests work
+ assert:
+ that:
+ - '"string" is not falsy'
+ - '"" is falsy'
+ - True is not falsy
+ - False is falsy
+ - true is not falsy
+ - false is falsy
+ - 1 is not falsy
+ - 0 is falsy
+ - '[""] is not falsy'
+ - '[] is falsy'
+ - '"on" is not falsy(convert_bool=True)'
+ - '"off" is falsy(convert_bool=True)'
+ - '{} is falsy'
+ - '{"key": "value"} is not falsy'
+
+- name: Create vaulted variable for vault_encrypted test
+ set_fact:
+ vaulted_value: !vault |
+ $ANSIBLE_VAULT;1.1;AES256
+ 35323961353038346165643738646465376139363061353835303739663538343266303232326635
+ 3365353662646236356665323135633630656238316530640a663362363763633436373439663031
+ 33663433383037396438656464636433653837376361313638366362333037323961316364363363
+ 3835616438623261650a636164376534376661393134326662326362323131373964313961623365
+ 3833
+
+- name: Assert vault_encrypted tests work
+ assert:
+ that:
+ - vaulted_value is vault_encrypted
+ - inventory_hostname is not vault_encrypted
diff --git a/test/integration/targets/test_core/vault-password b/test/integration/targets/test_core/vault-password
new file mode 100644
index 00000000..96973929
--- /dev/null
+++ b/test/integration/targets/test_core/vault-password
@@ -0,0 +1 @@
+test-vault-password
diff --git a/test/integration/targets/test_files/aliases b/test/integration/targets/test_files/aliases
new file mode 100644
index 00000000..041b0cc7
--- /dev/null
+++ b/test/integration/targets/test_files/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group5
+skip/python2.6 # tests are controller only, and we no longer support Python 2.6 on the controller
diff --git a/test/integration/targets/test_files/tasks/main.yml b/test/integration/targets/test_files/tasks/main.yml
new file mode 100644
index 00000000..0d51fc95
--- /dev/null
+++ b/test/integration/targets/test_files/tasks/main.yml
@@ -0,0 +1,60 @@
+- name: Create a broken symbolic link
+ file:
+ src: does_not_exist
+ dest: link_to_nonexistent_file
+ state: link
+ force: yes
+ follow: no
+
+- name: Assert directory tests work
+ assert:
+ that:
+ - "'.' is is_dir" # old name
+ - "'.' is directory"
+ - "'does_not_exist' is not directory"
+
+- name: Assert file tests work
+ assert:
+ that:
+ - "(role_path + '/aliases') is is_file" # old name
+ - "(role_path + '/aliases') is file"
+ - "'does_not_exist' is not file"
+
+- name: Assert link tests work
+ assert:
+ that:
+ - "'link_to_nonexistent_file' is link"
+ - "'.' is not link"
+
+- name: Assert exists tests work
+ assert:
+ that:
+ - "(role_path + '/aliases') is exists"
+ - "'link_to_nonexistent_file' is not exists"
+
+- name: Assert link_exists tests work
+ assert:
+ that:
+ - "'link_to_nonexistent_file' is link_exists"
+ - "'does_not_exist' is not link_exists"
+
+- name: Assert abs tests work
+ assert:
+ that:
+ - "'/' is is_abs" # old name
+ - "'/' is abs"
+ - "'../' is not abs"
+
+- name: Assert same_file tests work
+ assert:
+ that:
+ - "'/' is is_same_file('/')" # old name
+ - "'/' is same_file('/')"
+ - "'/' is not same_file(role_path + '/aliases')"
+
+- name: Assert mount tests work
+ assert:
+ that:
+ - "'/' is is_mount" # old name
+ - "'/' is mount"
+ - "'/does_not_exist' is not mount"
diff --git a/test/integration/targets/test_mathstuff/aliases b/test/integration/targets/test_mathstuff/aliases
new file mode 100644
index 00000000..041b0cc7
--- /dev/null
+++ b/test/integration/targets/test_mathstuff/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group5
+skip/python2.6 # tests are controller only, and we no longer support Python 2.6 on the controller
diff --git a/test/integration/targets/test_mathstuff/tasks/main.yml b/test/integration/targets/test_mathstuff/tasks/main.yml
new file mode 100644
index 00000000..dd379ce2
--- /dev/null
+++ b/test/integration/targets/test_mathstuff/tasks/main.yml
@@ -0,0 +1,38 @@
+- name: Get Jinja2 version
+ set_fact:
+ jinja2_version: >-
+ {{ lookup('pipe', '{{ ansible_playbook_python }} -c "import jinja2; print(jinja2.__version__)"') }}
+
+- name: Assert subset tests work
+ assert:
+ that:
+ - "[1] is issubset([1, 2])" # old name
+ - "[1] is subset([1, 2])"
+ - "[1] is not subset([2])"
+
+- name: Assert superset tests work
+ assert:
+ that:
+ - "[1, 2] is issuperset([1])" # old name
+ - "[1, 2] is superset([1])"
+ - "[2] is not superset([1])"
+
+- name: Assert contains tests work
+ assert:
+ that:
+ - "[1] is contains(1)"
+ - "[1] is not contains(2)"
+
+- name: Assert nan tests work
+ assert:
+ that:
+ - "'bad' is not nan"
+ - "1.1 | float is not nan"
+
+# Jinja2 versions prior to 2.10 will traceback when using: 'nan' | float
+- name: Assert nan tests work (Jinja2 2.10+)
+ assert:
+ that:
+ - "'nan' | float is isnan" # old name
+ - "'nan' | float is nan"
+ when: jinja2_version is version('2.10', '>=')
diff --git a/test/integration/targets/throttle/aliases b/test/integration/targets/throttle/aliases
new file mode 100644
index 00000000..765b70da
--- /dev/null
+++ b/test/integration/targets/throttle/aliases
@@ -0,0 +1 @@
+shippable/posix/group2
diff --git a/test/integration/targets/throttle/group_vars/all.yml b/test/integration/targets/throttle/group_vars/all.yml
new file mode 100644
index 00000000..b04b2aae
--- /dev/null
+++ b/test/integration/targets/throttle/group_vars/all.yml
@@ -0,0 +1,4 @@
+---
+throttledir: '{{ base_throttledir }}/{{ subdir }}'
+base_throttledir: "{{ lookup('env', 'OUTPUT_DIR') }}/throttle.dir"
+subdir: "{{ test_id if lookup('env', 'SELECTED_STRATEGY') in ['free', 'host_pinned'] else '' }}"
diff --git a/test/integration/targets/throttle/inventory b/test/integration/targets/throttle/inventory
new file mode 100644
index 00000000..9f062d94
--- /dev/null
+++ b/test/integration/targets/throttle/inventory
@@ -0,0 +1,6 @@
+[localhosts]
+testhost[00:11]
+
+[localhosts:vars]
+ansible_connection=local
+ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/test/integration/targets/throttle/runme.sh b/test/integration/targets/throttle/runme.sh
new file mode 100755
index 00000000..0db5098d
--- /dev/null
+++ b/test/integration/targets/throttle/runme.sh
@@ -0,0 +1,7 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# https://github.com/ansible/ansible/pull/42528
+SELECTED_STRATEGY='linear' ansible-playbook test_throttle.yml -vv -i inventory --forks 12 "$@"
+SELECTED_STRATEGY='free' ansible-playbook test_throttle.yml -vv -i inventory --forks 12 "$@"
diff --git a/test/integration/targets/throttle/test_throttle.py b/test/integration/targets/throttle/test_throttle.py
new file mode 100755
index 00000000..3ee8424e
--- /dev/null
+++ b/test/integration/targets/throttle/test_throttle.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+import time
+
+# read the args from sys.argv
+throttledir, inventory_hostname, max_throttle = sys.argv[1:]
+# format/create additional vars
+max_throttle = int(max_throttle)
+throttledir = os.path.expanduser(throttledir)
+throttlefile = os.path.join(throttledir, inventory_hostname)
+try:
+ # create the file
+ with(open(throttlefile, 'a')):
+ os.utime(throttlefile, None)
+ # count the number of files in the dir
+ throttlelist = os.listdir(throttledir)
+ print("tasks: %d/%d" % (len(throttlelist), max_throttle))
+ # if we have too many files, fail
+ if len(throttlelist) > max_throttle:
+ print(throttlelist)
+ raise ValueError("Too many concurrent tasks: %d/%d" % (len(throttlelist), max_throttle))
+ time.sleep(1.5)
+finally:
+ # remove the file, then wait to make sure it's gone
+ os.unlink(throttlefile)
+ while True:
+ if not os.path.exists(throttlefile):
+ break
+ time.sleep(0.1)
diff --git a/test/integration/targets/throttle/test_throttle.yml b/test/integration/targets/throttle/test_throttle.yml
new file mode 100644
index 00000000..8990ea2f
--- /dev/null
+++ b/test/integration/targets/throttle/test_throttle.yml
@@ -0,0 +1,84 @@
+---
+- hosts: localhosts
+ gather_facts: false
+ strategy: linear
+ run_once: yes
+ tasks:
+ - name: Clean base throttledir '{{ base_throttledir }}'
+ file:
+ state: absent
+ path: '{{ base_throttledir }}'
+ ignore_errors: yes
+
+ - name: Create throttledir '{{ throttledir }}'
+ file:
+ state: directory
+ path: '{{ throttledir }}'
+ loop: "{{ range(1, test_count|int)|list }}"
+ loop_control:
+ loop_var: test_id
+ vars:
+ test_count: "{{ 9 if lookup('env', 'SELECTED_STRATEGY') in ['free', 'host_pinned'] else 2 }}"
+
+- hosts: localhosts
+ gather_facts: false
+ strategy: "{{ lookup('env', 'SELECTED_STRATEGY') }}"
+ tasks:
+ - block:
+ - name: "Test 1 (max throttle: 3)"
+ script: "test_throttle.py {{throttledir}} {{inventory_hostname}} 3"
+ vars:
+ test_id: 1
+ throttle: 3
+ - block:
+ - name: "Test 2 (max throttle: 5)"
+ script: "test_throttle.py {{throttledir}} {{inventory_hostname}} 5"
+ throttle: 5
+ vars:
+ test_id: 2
+ - block:
+ - name: "Test 3 (max throttle: 8)"
+ script: "test_throttle.py {{throttledir}} {{inventory_hostname}} 8"
+ throttle: 8
+ throttle: 6
+ vars:
+ test_id: 3
+ - block:
+ - block:
+ - name: "Test 4 (max throttle: 8)"
+ script: "test_throttle.py {{throttledir}} {{inventory_hostname}} 8"
+ throttle: 8
+ vars:
+ test_id: 4
+ throttle: 6
+ throttle: 12
+ throttle: 15
+ - block:
+ - name: "Teat 5 (max throttle: 3)"
+ script: "test_throttle.py {{throttledir}} {{inventory_hostname}} 3"
+ vars:
+ test_id: 5
+ throttle: 3
+ - block:
+ - name: "Test 6 (max throttle: 5)"
+ script: "test_throttle.py {{throttledir}} {{inventory_hostname}} 5"
+ throttle: 5
+ vars:
+ test_id: 6
+ - block:
+ - name: "Test 7 (max throttle: 6)"
+ script: "test_throttle.py {{throttledir}} {{inventory_hostname}} 6"
+ throttle: 6
+ vars:
+ test_id: 7
+ throttle: 3
+ - block:
+ - block:
+ - name: "Test 8 (max throttle: 8)"
+ script: "test_throttle.py {{throttledir}} {{inventory_hostname}} 8"
+ throttle: 8
+ vars:
+ test_id: 8
+ throttle: 6
+ throttle: 4
+ throttle: 2
diff --git a/test/integration/targets/unarchive/aliases b/test/integration/targets/unarchive/aliases
new file mode 100644
index 00000000..db9bbd8c
--- /dev/null
+++ b/test/integration/targets/unarchive/aliases
@@ -0,0 +1,4 @@
+needs/root
+shippable/posix/group2
+destructive
+skip/aix
diff --git a/test/integration/targets/unarchive/files/foo.txt b/test/integration/targets/unarchive/files/foo.txt
new file mode 100644
index 00000000..7c6ded14
--- /dev/null
+++ b/test/integration/targets/unarchive/files/foo.txt
@@ -0,0 +1 @@
+foo.txt
diff --git a/test/integration/targets/unarchive/files/test-unarchive-nonascii-ãらã¨ã¿.tar.gz b/test/integration/targets/unarchive/files/test-unarchive-nonascii-ãらã¨ã¿.tar.gz
new file mode 100644
index 00000000..4882b920
--- /dev/null
+++ b/test/integration/targets/unarchive/files/test-unarchive-nonascii-ãらã¨ã¿.tar.gz
Binary files differ
diff --git a/test/integration/targets/unarchive/meta/main.yml b/test/integration/targets/unarchive/meta/main.yml
new file mode 100644
index 00000000..56245b3d
--- /dev/null
+++ b/test/integration/targets/unarchive/meta/main.yml
@@ -0,0 +1,4 @@
+dependencies:
+ - prepare_tests
+ - setup_remote_tmp_dir
+ - setup_gnutar
diff --git a/test/integration/targets/unarchive/tasks/main.yml b/test/integration/targets/unarchive/tasks/main.yml
new file mode 100644
index 00000000..7051539c
--- /dev/null
+++ b/test/integration/targets/unarchive/tasks/main.yml
@@ -0,0 +1,16 @@
+- import_tasks: prepare_tests.yml
+- import_tasks: test_tar.yml
+- import_tasks: test_tar_gz.yml
+- import_tasks: test_tar_gz_creates.yml
+- import_tasks: test_tar_gz_owner_group.yml
+- import_tasks: test_tar_gz_keep_newer.yml
+- import_tasks: test_zip.yml
+- import_tasks: test_exclude.yml
+- import_tasks: test_parent_not_writeable.yml
+- import_tasks: test_mode.yml
+- import_tasks: test_quotable_characters.yml
+- import_tasks: test_non_ascii_filename.yml
+- import_tasks: test_missing_files.yml
+- import_tasks: test_symlink.yml
+- import_tasks: test_download.yml
+- import_tasks: test_unprivileged_user.yml
diff --git a/test/integration/targets/unarchive/tasks/prepare_tests.yml b/test/integration/targets/unarchive/tasks/prepare_tests.yml
new file mode 100644
index 00000000..783d77d3
--- /dev/null
+++ b/test/integration/targets/unarchive/tasks/prepare_tests.yml
@@ -0,0 +1,92 @@
+# Need unzip for unarchive module, and zip for archive creation.
+- name: Ensure zip & unzip are present
+ package:
+ name:
+ - zip
+ - unzip
+ when: ansible_pkg_mgr in ('yum', 'dnf', 'apt', 'pkgng')
+
+- name: prep our file
+ copy:
+ src: foo.txt
+ dest: "{{remote_tmp_dir}}/foo-unarchive.txt"
+ mode: preserve
+
+- name: prep a tar file
+ shell: tar cvf test-unarchive.tar foo-unarchive.txt chdir={{remote_tmp_dir}}
+
+- name: prep a tar.gz file
+ shell: tar czvf test-unarchive.tar.gz foo-unarchive.txt chdir={{remote_tmp_dir}}
+
+- name: prep a chmodded file for zip
+ copy:
+ src: foo.txt
+ dest: '{{remote_tmp_dir}}/foo-unarchive-777.txt'
+ mode: '0777'
+
+- name: prep a windows permission file for our zip
+ copy:
+ src: foo.txt
+ dest: '{{remote_tmp_dir}}/FOO-UNAR.TXT'
+ mode: preserve
+
+# This gets around an unzip timestamp bug in some distributions
+# Recent unzip on Ubuntu and BSD will randomly round some timestamps up.
+# But that doesn't seem to happen when the timestamp has an even second.
+- name: Bug work around
+ command: touch -t "201705111530.00" {{remote_tmp_dir}}/foo-unarchive.txt {{remote_tmp_dir}}/foo-unarchive-777.txt {{remote_tmp_dir}}/FOO-UNAR.TXT
+# See Ubuntu bug 1691636: https://bugs.launchpad.net/ubuntu/+source/unzip/+bug/1691636
+# When these are fixed, this code should be removed.
+
+- name: prep a zip file
+ shell: zip test-unarchive.zip foo-unarchive.txt foo-unarchive-777.txt chdir={{remote_tmp_dir}}
+
+- name: Prepare - Create test dirs
+ file:
+ path: "{{remote_tmp_dir}}/{{item}}"
+ state: directory
+ with_items:
+ - created/include
+ - created/exclude
+ - created/other
+
+- name: Prepare - Create test files
+ file:
+ path: "{{remote_tmp_dir}}/created/{{item}}"
+ state: touch
+ with_items:
+ - include/include-1.txt
+ - include/include-2.txt
+ - include/include-3.txt
+ - exclude/exclude-1.txt
+ - exclude/exclude-2.txt
+ - exclude/exclude-3.txt
+ - other/include-1.ext
+ - other/include-2.ext
+ - other/exclude-1.ext
+ - other/exclude-2.ext
+ - other/other-1.ext
+ - other/other-2.ext
+
+- name: Prepare - zip file
+ shell: zip -r {{remote_tmp_dir}}/unarchive-00.zip * chdir={{remote_tmp_dir}}/created/
+
+- name: Prepare - tar file
+ shell: tar czvf {{remote_tmp_dir}}/unarchive-00.tar * chdir={{remote_tmp_dir}}/created/
+
+- name: add a file with Windows permissions to zip file
+ shell: zip -k test-unarchive.zip FOO-UNAR.TXT chdir={{remote_tmp_dir}}
+
+- name: prep a subdirectory
+ file:
+ path: '{{remote_tmp_dir}}/unarchive-dir'
+ state: directory
+
+- name: prep our file
+ copy:
+ src: foo.txt
+ dest: '{{remote_tmp_dir}}/unarchive-dir/foo-unarchive.txt'
+ mode: preserve
+
+- name: prep a tar.gz file with directory
+ shell: tar czvf test-unarchive-dir.tar.gz unarchive-dir chdir={{remote_tmp_dir}}
diff --git a/test/integration/targets/unarchive/tasks/test_download.yml b/test/integration/targets/unarchive/tasks/test_download.yml
new file mode 100644
index 00000000..6b17449b
--- /dev/null
+++ b/test/integration/targets/unarchive/tasks/test_download.yml
@@ -0,0 +1,34 @@
+# Test downloading a file before unarchiving it
+- name: create our unarchive destination
+ file:
+ path: '{{remote_tmp_dir}}/test-unarchive-tar-gz'
+ state: directory
+
+- name: Install packages to make TLS connections work on CentOS 6
+ pip:
+ name:
+ - urllib3==1.10.2
+ - ndg_httpsclient==0.4.4
+ - pyOpenSSL==16.2.0
+ state: present
+ when:
+ - ansible_facts.distribution == 'CentOS'
+ - not ansible_facts.python.has_sslcontext
+
+- name: unarchive a tar from an URL
+ unarchive:
+ src: "https://releases.ansible.com/ansible/ansible-latest.tar.gz"
+ dest: "{{ remote_tmp_dir }}/test-unarchive-tar-gz"
+ mode: "0700"
+ remote_src: yes
+ register: unarchive13
+
+- name: Test that unarchive succeeded
+ assert:
+ that:
+ - "unarchive13.changed == true"
+
+- name: remove our tar.gz unarchive destination
+ file:
+ path: '{{ remote_tmp_dir }}/test-unarchive-tar-gz'
+ state: absent
diff --git a/test/integration/targets/unarchive/tasks/test_exclude.yml b/test/integration/targets/unarchive/tasks/test_exclude.yml
new file mode 100644
index 00000000..be24756c
--- /dev/null
+++ b/test/integration/targets/unarchive/tasks/test_exclude.yml
@@ -0,0 +1,48 @@
+- name: "Create {{ remote_tmp_dir }}/exclude directory"
+ file:
+ state: directory
+ path: "{{ remote_tmp_dir }}/exclude-{{item}}"
+ with_items:
+ - zip
+ - tar
+
+- name: Unpack archive file excluding regular and glob files.
+ unarchive:
+ src: "{{ remote_tmp_dir }}/unarchive-00.{{item}}"
+ dest: "{{ remote_tmp_dir }}/exclude-{{item}}"
+ remote_src: yes
+ exclude:
+ - "exclude/exclude-*.txt"
+ - "other/exclude-1.ext"
+ with_items:
+ - zip
+ - tar
+
+- name: verify that the file was unarchived
+ shell: find {{ remote_tmp_dir }}/exclude-{{item}} chdir={{ remote_tmp_dir }}
+ register: unarchive00
+ with_items:
+ - zip
+ - tar
+
+- name: verify that archive extraction excluded the files
+ assert:
+ that:
+ - "'exclude/exclude-1.txt' not in item.stdout"
+ - "'other/exclude-1.ext' not in item.stdout"
+ with_items:
+ - "{{ unarchive00.results }}"
+
+- name: remove our zip unarchive destination
+ file:
+ path: '{{remote_tmp_dir}}/test-unarchive-zip'
+ state: absent
+
+- name: remove our test files for the archive
+ file:
+ path: '{{remote_tmp_dir}}/{{item}}'
+ state: absent
+ with_items:
+ - foo-unarchive.txt
+ - foo-unarchive-777.txt
+ - FOO-UNAR.TXT
diff --git a/test/integration/targets/unarchive/tasks/test_missing_files.yml b/test/integration/targets/unarchive/tasks/test_missing_files.yml
new file mode 100644
index 00000000..4f57e184
--- /dev/null
+++ b/test/integration/targets/unarchive/tasks/test_missing_files.yml
@@ -0,0 +1,47 @@
+# Test that unarchiving is performed if files are missing
+# https://github.com/ansible/ansible-modules-core/issues/1064
+- name: create our unarchive destination
+ file:
+ path: '{{remote_tmp_dir}}/test-unarchive-tar-gz'
+ state: directory
+
+- name: unarchive a tar that has directories
+ unarchive:
+ src: "{{ remote_tmp_dir }}/test-unarchive-dir.tar.gz"
+ dest: "{{ remote_tmp_dir }}/test-unarchive-tar-gz"
+ mode: "0700"
+ remote_src: yes
+ register: unarchive10
+
+- name: Test that unarchive succeeded
+ assert:
+ that:
+ - "unarchive10.changed == true"
+
+- name: Change the mode of the toplevel dir
+ file:
+ path: "{{ remote_tmp_dir }}/test-unarchive-tar-gz/unarchive-dir"
+ mode: "0701"
+
+- name: Remove a file from the extraction point
+ file:
+ path: "{{ remote_tmp_dir }}/test-unarchive-tar-gz/unarchive-dir/foo-unarchive.txt"
+ state: absent
+
+- name: unarchive a tar that has directories
+ unarchive:
+ src: "{{ remote_tmp_dir }}/test-unarchive-dir.tar.gz"
+ dest: "{{ remote_tmp_dir }}/test-unarchive-tar-gz"
+ mode: "0700"
+ remote_src: yes
+ register: unarchive10_1
+
+- name: Test that unarchive succeeded
+ assert:
+ that:
+ - "unarchive10_1.changed == true"
+
+- name: remove our tar.gz unarchive destination
+ file:
+ path: '{{ remote_tmp_dir }}/test-unarchive-tar-gz'
+ state: absent
diff --git a/test/integration/targets/unarchive/tasks/test_mode.yml b/test/integration/targets/unarchive/tasks/test_mode.yml
new file mode 100644
index 00000000..c69e3bd2
--- /dev/null
+++ b/test/integration/targets/unarchive/tasks/test_mode.yml
@@ -0,0 +1,151 @@
+- name: create our unarchive destination
+ file:
+ path: '{{remote_tmp_dir}}/test-unarchive-tar-gz'
+ state: directory
+
+- name: unarchive and set mode to 0600, directories 0700
+ unarchive:
+ src: "{{ remote_tmp_dir }}/test-unarchive.tar.gz"
+ dest: "{{ remote_tmp_dir }}/test-unarchive-tar-gz"
+ remote_src: yes
+ mode: "u+rwX,g-rwx,o-rwx"
+ list_files: True
+ register: unarchive06
+
+- name: Test that the file modes were changed
+ stat:
+ path: "{{ remote_tmp_dir }}/test-unarchive-tar-gz/foo-unarchive.txt"
+ register: unarchive06_stat
+
+- name: Test that the file modes were changed
+ assert:
+ that:
+ - "unarchive06.changed == true"
+ - "unarchive06_stat.stat.mode == '0600'"
+ # Verify that file list is generated
+ - "'files' in unarchive06"
+ - "{{unarchive06['files']| length}} == 1"
+ - "'foo-unarchive.txt' in unarchive06['files']"
+
+- name: remove our tar.gz unarchive destination
+ file:
+ path: '{{ remote_tmp_dir }}/test-unarchive-tar-gz'
+ state: absent
+
+- name: create our unarchive destination
+ file:
+ path: '{{remote_tmp_dir}}/test-unarchive-tar-gz'
+ state: directory
+
+- name: unarchive over existing extraction and set mode to 0644
+ unarchive:
+ src: "{{ remote_tmp_dir }}/test-unarchive.tar.gz"
+ dest: "{{ remote_tmp_dir }}/test-unarchive-tar-gz"
+ remote_src: yes
+ mode: "u+rwX,g-wx,o-wx,g+r,o+r"
+ register: unarchive06_2
+
+- name: Test that the file modes were changed
+ stat:
+ path: "{{ remote_tmp_dir }}/test-unarchive-tar-gz/foo-unarchive.txt"
+ register: unarchive06_2_stat
+
+- debug:
+ var: unarchive06_2_stat.stat.mode
+
+- name: Test that the files were changed
+ assert:
+ that:
+ - "unarchive06_2.changed == true"
+ - "unarchive06_2_stat.stat.mode == '0644'"
+
+- name: Repeat the last request to verify no changes
+ unarchive:
+ src: "{{ remote_tmp_dir }}/test-unarchive.tar.gz"
+ dest: "{{ remote_tmp_dir }}/test-unarchive-tar-gz"
+ remote_src: yes
+ mode: "u+rwX-x,g-wx,o-wx,g+r,o+r"
+ list_files: True
+ register: unarchive07
+
+- name: Test that the files were not changed
+ assert:
+ that:
+ - "unarchive07.changed == false"
+ # Verify that file list is generated
+ - "'files' in unarchive07"
+ - "{{unarchive07['files']| length}} == 1"
+ - "'foo-unarchive.txt' in unarchive07['files']"
+
+- name: remove our tar.gz unarchive destination
+ file:
+ path: '{{ remote_tmp_dir }}/test-unarchive-tar-gz'
+ state: absent
+
+- name: create our unarchive destination
+ file:
+ path: '{{remote_tmp_dir}}/test-unarchive-zip'
+ state: directory
+
+- name: unarchive and set mode to 0601, directories 0700
+ unarchive:
+ src: "{{ remote_tmp_dir }}/test-unarchive.zip"
+ dest: "{{ remote_tmp_dir }}/test-unarchive-zip"
+ remote_src: yes
+ mode: "u+rwX-x,g-rwx,o=x"
+ list_files: True
+ register: unarchive08
+
+- name: Test that the file modes were changed
+ stat:
+ path: "{{ remote_tmp_dir }}/test-unarchive-zip/foo-unarchive.txt"
+ register: unarchive08_stat
+
+- name: Test that the file modes were changed
+ assert:
+ that:
+ - "unarchive08.changed == true"
+ - "unarchive08_stat.stat.mode == '0601'"
+ # Verify that file list is generated
+ - "'files' in unarchive08"
+ - "{{unarchive08['files']| length}} == 3"
+ - "'foo-unarchive.txt' in unarchive08['files']"
+ - "'foo-unarchive-777.txt' in unarchive08['files']"
+ - "'FOO-UNAR.TXT' in unarchive08['files']"
+
+- name: unarchive zipfile a second time and set mode to 0601, directories 0700
+ unarchive:
+ src: "{{ remote_tmp_dir }}/test-unarchive.zip"
+ dest: "{{ remote_tmp_dir }}/test-unarchive-zip"
+ remote_src: yes
+ mode: "u+rwX-x,g-rwx,o=x"
+ list_files: True
+ register: unarchive08
+
+- name: Test that the file modes were not changed
+ stat:
+ path: "{{ remote_tmp_dir }}/test-unarchive-zip/foo-unarchive.txt"
+ register: unarchive08_stat
+
+- debug:
+ var: unarchive08
+
+- debug:
+ var: unarchive08_stat
+
+- name: Test that the files did not change
+ assert:
+ that:
+ - "unarchive08.changed == false"
+ - "unarchive08_stat.stat.mode == '0601'"
+ # Verify that file list is generated
+ - "'files' in unarchive08"
+ - "{{unarchive08['files']| length}} == 3"
+ - "'foo-unarchive.txt' in unarchive08['files']"
+ - "'foo-unarchive-777.txt' in unarchive08['files']"
+ - "'FOO-UNAR.TXT' in unarchive08['files']"
+
+- name: remove our zip unarchive destination
+ file:
+ path: '{{ remote_tmp_dir }}/test-unarchive-zip'
+ state: absent
diff --git a/test/integration/targets/unarchive/tasks/test_non_ascii_filename.yml b/test/integration/targets/unarchive/tasks/test_non_ascii_filename.yml
new file mode 100644
index 00000000..c884f49a
--- /dev/null
+++ b/test/integration/targets/unarchive/tasks/test_non_ascii_filename.yml
@@ -0,0 +1,66 @@
+- name: create our unarchive destination
+ file:
+ path: "{{ remote_tmp_dir }}/test-unarchive-nonascii-ãらã¨ã¿-tar-gz"
+ state: directory
+
+- name: test that unarchive works with an archive that contains non-ascii filenames
+ unarchive:
+ # Both the filename of the tarball and the filename inside the tarball have
+ # nonascii chars
+ src: "test-unarchive-nonascii-ãらã¨ã¿.tar.gz"
+ dest: "{{ remote_tmp_dir }}/test-unarchive-nonascii-ãらã¨ã¿-tar-gz"
+ mode: "u+rwX,go+rX"
+ remote_src: no
+ register: nonascii_result0
+
+- name: Check that file is really there
+ stat:
+ path: "{{ remote_tmp_dir }}/test-unarchive-nonascii-ãらã¨ã¿-tar-gz/storage/aÌ€âæçeÌeÌ€ïiÌ‚oÌ‚Å“(copy)!@#$%^&-().jpg"
+ register: nonascii_stat0
+
+- name: Assert that nonascii tests succeeded
+ assert:
+ that:
+ - "nonascii_result0.changed == true"
+ - "nonascii_stat0.stat.exists == true"
+
+- name: remove nonascii test
+ file:
+ path: "{{ remote_tmp_dir }}/test-unarchive-nonascii-ãらã¨ã¿-tar-gz"
+ state: absent
+
+- name: test non-ascii with different LC_ALL
+ block:
+ - name: create our unarchive destination
+ file:
+ path: "{{ remote_tmp_dir }}/test-unarchive-nonascii-ãらã¨ã¿-tar-gz"
+ state: directory
+
+ - name: test that unarchive works with an archive that contains non-ascii filenames
+ unarchive:
+ # Both the filename of the tarball and the filename inside the tarball have
+ # nonascii chars
+ src: "test-unarchive-nonascii-ãらã¨ã¿.tar.gz"
+ dest: "{{ remote_tmp_dir }}/test-unarchive-nonascii-ãらã¨ã¿-tar-gz"
+ mode: "u+rwX,go+rX"
+ remote_src: no
+ register: nonascii_result0
+
+ - name: Check that file is really there
+ stat:
+ path: "{{ remote_tmp_dir }}/test-unarchive-nonascii-ãらã¨ã¿-tar-gz/storage/aÌ€âæçeÌeÌ€ïiÌ‚oÌ‚Å“(copy)!@#$%^&-().jpg"
+ register: nonascii_stat0
+
+ - name: Assert that nonascii tests succeeded
+ assert:
+ that:
+ - "nonascii_result0.changed == true"
+ - "nonascii_stat0.stat.exists == true"
+
+ - name: remove nonascii test
+ file:
+ path: "{{ remote_tmp_dir }}/test-unarchive-nonascii-ãらã¨ã¿-tar-gz"
+ state: absent
+
+ environment:
+ LC_ALL: C
diff --git a/test/integration/targets/unarchive/tasks/test_parent_not_writeable.yml b/test/integration/targets/unarchive/tasks/test_parent_not_writeable.yml
new file mode 100644
index 00000000..bfb082c6
--- /dev/null
+++ b/test/integration/targets/unarchive/tasks/test_parent_not_writeable.yml
@@ -0,0 +1,32 @@
+- name: check if /tmp/foo-unarchive.text exists
+ stat:
+ path: /tmp/foo-unarchive.txt
+ ignore_errors: True
+ register: unarchive04
+
+- name: fail if the proposed destination file exists for safey
+ fail:
+ msg: /tmp/foo-unarchive.txt already exists, aborting
+ when: unarchive04.stat.exists
+
+- name: try unarchiving to /tmp
+ unarchive:
+ src: '{{remote_tmp_dir}}/test-unarchive.tar.gz'
+ dest: /tmp
+ remote_src: true
+ register: unarchive05
+
+- name: verify that the file was marked as changed
+ assert:
+ that:
+ - "unarchive05.changed == true"
+
+- name: verify that the file was unarchived
+ file:
+ path: /tmp/foo-unarchive.txt
+ state: file
+
+- name: remove our unarchive destination
+ file:
+ path: /tmp/foo-unarchive.txt
+ state: absent
diff --git a/test/integration/targets/unarchive/tasks/test_quotable_characters.yml b/test/integration/targets/unarchive/tasks/test_quotable_characters.yml
new file mode 100644
index 00000000..0a3c2cc3
--- /dev/null
+++ b/test/integration/targets/unarchive/tasks/test_quotable_characters.yml
@@ -0,0 +1,38 @@
+- name: create our unarchive destination
+ file:
+ path: '{{remote_tmp_dir}}/test-unarchive-tar-gz'
+ state: directory
+
+- name: create a directory with quotable chars
+ file:
+ path: '{{ remote_tmp_dir }}/test-quotes~root'
+ state: directory
+
+- name: unarchive into directory with quotable chars
+ unarchive:
+ src: "{{ remote_tmp_dir }}/test-unarchive.tar.gz"
+ dest: "{{ remote_tmp_dir }}/test-quotes~root"
+ remote_src: yes
+ register: unarchive08
+
+- name: Test that unarchive succeeded
+ assert:
+ that:
+ - "unarchive08.changed == true"
+
+- name: unarchive into directory with quotable chars a second time
+ unarchive:
+ src: "{{ remote_tmp_dir }}/test-unarchive.tar.gz"
+ dest: "{{ remote_tmp_dir }}/test-quotes~root"
+ remote_src: yes
+ register: unarchive09
+
+- name: Test that unarchive did nothing
+ assert:
+ that:
+ - "unarchive09.changed == false"
+
+- name: remove quotable chars test
+ file:
+ path: '{{ remote_tmp_dir }}/test-quotes~root'
+ state: absent
diff --git a/test/integration/targets/unarchive/tasks/test_symlink.yml b/test/integration/targets/unarchive/tasks/test_symlink.yml
new file mode 100644
index 00000000..fcb72828
--- /dev/null
+++ b/test/integration/targets/unarchive/tasks/test_symlink.yml
@@ -0,0 +1,64 @@
+- name: Create a destination dir
+ file:
+ path: "{{ remote_tmp_dir }}/test-unarchive-tar-gz"
+ state: directory
+
+- name: Create a symlink to the detination dir
+ file:
+ path: "{{ remote_tmp_dir }}/link-to-unarchive-dir"
+ src: "{{ remote_tmp_dir }}/test-unarchive-tar-gz"
+ state: "link"
+
+- name: test that unarchive works when dest is a symlink to a dir
+ unarchive:
+ src: "{{ remote_tmp_dir }}/test-unarchive.tar.gz"
+ dest: "{{ remote_tmp_dir }}/link-to-unarchive-dir"
+ mode: "u+rwX,go+rX"
+ remote_src: yes
+ register: unarchive_11
+
+- name: Check that file is really there
+ stat:
+ path: "{{ remote_tmp_dir }}/test-unarchive-tar-gz/foo-unarchive.txt"
+ register: unarchive11_stat0
+
+- name: Assert that unarchive when dest is a symlink to a dir worked
+ assert:
+ that:
+ - "unarchive_11.changed == true"
+ - "unarchive11_stat0.stat.exists == true"
+
+- name: remove our tar.gz unarchive destination
+ file:
+ path: '{{ remote_tmp_dir }}/test-unarchive-tar-gz'
+ state: absent
+
+- name: Create a file
+ file:
+ path: "{{ remote_tmp_dir }}/test-unarchive-tar-gz"
+ state: touch
+
+- name: Create a symlink to the file
+ file:
+ src: "{{ remote_tmp_dir }}/test-unarchive-tar-gz"
+ path: "{{ remote_tmp_dir }}/link-to-unarchive-file"
+ state: "link"
+
+- name: test that unarchive fails when dest is a link to a file
+ unarchive:
+ src: "{{ remote_tmp_dir }}/test-unarchive.tar.gz"
+ dest: "{{ remote_tmp_dir }}/link-to-unarchive-file"
+ mode: "u+rwX,go+rX"
+ remote_src: yes
+ ignore_errors: True
+ register: unarchive_12
+
+- name: Assert that unarchive when dest is a file failed
+ assert:
+ that:
+ - "unarchive_12.failed == true"
+
+- name: remove our tar.gz unarchive destination
+ file:
+ path: '{{ remote_tmp_dir }}/test-unarchive-tar-gz'
+ state: absent
diff --git a/test/integration/targets/unarchive/tasks/test_tar.yml b/test/integration/targets/unarchive/tasks/test_tar.yml
new file mode 100644
index 00000000..09105c60
--- /dev/null
+++ b/test/integration/targets/unarchive/tasks/test_tar.yml
@@ -0,0 +1,26 @@
+- name: create our tar unarchive destination
+ file:
+ path: '{{remote_tmp_dir}}/test-unarchive-tar'
+ state: directory
+
+- name: unarchive a tar file
+ unarchive:
+ src: '{{remote_tmp_dir}}/test-unarchive.tar'
+ dest: '{{remote_tmp_dir}}/test-unarchive-tar'
+ remote_src: yes
+ register: unarchive01
+
+- name: verify that the file was marked as changed
+ assert:
+ that:
+ - "unarchive01.changed == true"
+
+- name: verify that the file was unarchived
+ file:
+ path: '{{remote_tmp_dir}}/test-unarchive-tar/foo-unarchive.txt'
+ state: file
+
+- name: remove our tar unarchive destination
+ file:
+ path: '{{remote_tmp_dir}}/test-unarchive-tar'
+ state: absent
diff --git a/test/integration/targets/unarchive/tasks/test_tar_gz.yml b/test/integration/targets/unarchive/tasks/test_tar_gz.yml
new file mode 100644
index 00000000..ac9e9a15
--- /dev/null
+++ b/test/integration/targets/unarchive/tasks/test_tar_gz.yml
@@ -0,0 +1,28 @@
+- name: create our tar.gz unarchive destination
+ file:
+ path: '{{remote_tmp_dir}}/test-unarchive-tar-gz'
+ state: directory
+
+- name: unarchive a tar.gz file
+ unarchive:
+ src: '{{remote_tmp_dir}}/test-unarchive.tar.gz'
+ dest: '{{remote_tmp_dir}}/test-unarchive-tar-gz'
+ remote_src: yes
+ register: unarchive02
+
+- name: verify that the file was marked as changed
+ assert:
+ that:
+ - "unarchive02.changed == true"
+ # Verify that no file list is generated
+ - "'files' not in unarchive02"
+
+- name: verify that the file was unarchived
+ file:
+ path: '{{remote_tmp_dir}}/test-unarchive-tar-gz/foo-unarchive.txt'
+ state: file
+
+- name: remove our tar.gz unarchive destination
+ file:
+ path: '{{remote_tmp_dir}}/test-unarchive-tar-gz'
+ state: absent
diff --git a/test/integration/targets/unarchive/tasks/test_tar_gz_creates.yml b/test/integration/targets/unarchive/tasks/test_tar_gz_creates.yml
new file mode 100644
index 00000000..fa3a23f8
--- /dev/null
+++ b/test/integration/targets/unarchive/tasks/test_tar_gz_creates.yml
@@ -0,0 +1,53 @@
+- name: create our tar.gz unarchive destination for creates
+ file:
+ path: '{{remote_tmp_dir}}/test-unarchive-tar-gz'
+ state: directory
+
+- name: unarchive a tar.gz file with creates set
+ unarchive:
+ src: '{{remote_tmp_dir}}/test-unarchive.tar.gz'
+ dest: '{{remote_tmp_dir}}/test-unarchive-tar-gz'
+ creates: '{{remote_tmp_dir}}/test-unarchive-tar-gz/foo-unarchive.txt'
+ remote_src: yes
+ register: unarchive02b
+
+- name: verify that the file was marked as changed
+ assert:
+ that:
+ - "unarchive02b.changed == true"
+
+- name: verify that the file was unarchived
+ file:
+ path: '{{remote_tmp_dir}}/test-unarchive-tar-gz/foo-unarchive.txt'
+ state: file
+
+- name: unarchive a tar.gz file with creates over an existing file
+ unarchive:
+ src: '{{remote_tmp_dir}}/test-unarchive.tar.gz'
+ dest: '{{remote_tmp_dir}}/test-unarchive-tar-gz'
+ creates: '{{remote_tmp_dir}}/test-unarchive-tar-gz/foo-unarchive.txt'
+ remote_src: yes
+ register: unarchive02c
+
+- name: verify that the file was not marked as changed
+ assert:
+ that:
+ - "unarchive02c.changed == false"
+
+- name: unarchive a tar.gz file with creates over an existing file using complex_args
+ unarchive:
+ src: "{{remote_tmp_dir}}/test-unarchive.tar.gz"
+ dest: "{{remote_tmp_dir}}/test-unarchive-tar-gz"
+ remote_src: yes
+ creates: "{{remote_tmp_dir}}/test-unarchive-tar-gz/foo-unarchive.txt"
+ register: unarchive02d
+
+- name: verify that the file was not marked as changed
+ assert:
+ that:
+ - "unarchive02d.changed == false"
+
+- name: remove our tar.gz unarchive destination
+ file:
+ path: '{{remote_tmp_dir}}/test-unarchive-tar-gz'
+ state: absent
diff --git a/test/integration/targets/unarchive/tasks/test_tar_gz_keep_newer.yml b/test/integration/targets/unarchive/tasks/test_tar_gz_keep_newer.yml
new file mode 100644
index 00000000..aec94545
--- /dev/null
+++ b/test/integration/targets/unarchive/tasks/test_tar_gz_keep_newer.yml
@@ -0,0 +1,57 @@
+- name: create our tar.gz unarchive destination for keep-newer
+ file:
+ path: "{{remote_tmp_dir}}/test-unarchive-tar-gz"
+ state: directory
+
+- name: Create a newer file that we would replace
+ copy:
+ dest: "{{remote_tmp_dir}}/test-unarchive-tar-gz/foo-unarchive.txt"
+ content: boo
+ mode: preserve
+
+- name: unarchive a tar.gz file but avoid overwriting newer files (keep_newer=true)
+ unarchive:
+ src: "{{remote_tmp_dir}}/test-unarchive.tar.gz"
+ dest: "{{remote_tmp_dir}}/test-unarchive-tar-gz"
+ remote_src: yes
+ keep_newer: true
+ register: unarchive02f
+
+- name: Make sure the file still contains 'boo'
+ shell: cat {{remote_tmp_dir}}/test-unarchive-tar-gz/foo-unarchive.txt
+ register: unarchive02f_cat
+
+- name: remove our tar.gz unarchive destination
+ file:
+ path: "{{remote_tmp_dir}}/test-unarchive-tar-gz"
+ state: absent
+
+- name: create our tar.gz unarchive destination for keep-newer (take 2)
+ file:
+ path: "{{remote_tmp_dir}}/test-unarchive-tar-gz"
+ state: directory
+
+- name: unarchive a tar.gz file and overwrite newer files (keep_newer=false)
+ unarchive:
+ src: "{{remote_tmp_dir}}/test-unarchive.tar.gz"
+ dest: "{{remote_tmp_dir}}/test-unarchive-tar-gz"
+ remote_src: yes
+ keep_newer: false
+ register: unarchive02g
+
+- name: Make sure the file still contains 'boo'
+ shell: cat {{remote_tmp_dir}}/test-unarchive-tar-gz/foo-unarchive.txt
+ register: unarchive02g_cat
+
+- name: remove our tar.gz unarchive destination
+ file:
+ path: "{{remote_tmp_dir}}/test-unarchive-tar-gz"
+ state: absent
+
+- name: verify results
+ assert:
+ that:
+ - unarchive02f is changed
+ - unarchive02f_cat.stdout == 'boo'
+ - unarchive02g is changed
+ - unarchive02g_cat.stdout != 'boo'
diff --git a/test/integration/targets/unarchive/tasks/test_tar_gz_owner_group.yml b/test/integration/targets/unarchive/tasks/test_tar_gz_owner_group.yml
new file mode 100644
index 00000000..257692e1
--- /dev/null
+++ b/test/integration/targets/unarchive/tasks/test_tar_gz_owner_group.yml
@@ -0,0 +1,48 @@
+- block:
+ - name: Create a group to chown to
+ group:
+ name: testgroup
+
+ - name: Create a user to chown to
+ user:
+ name: testuser
+ groups:
+ - testgroup
+
+ - name: create our tar.gz unarchive destination for chown
+ file:
+ path: "{{remote_tmp_dir}}/test-unarchive-tar-gz"
+ state: directory
+
+ - name: unarchive a tar.gz file with owner and group set to the above user
+ unarchive:
+ src: "{{remote_tmp_dir}}/test-unarchive.tar.gz"
+ dest: "{{remote_tmp_dir}}/test-unarchive-tar-gz"
+ remote_src: yes
+ owner: testuser
+ group: testgroup
+ register: unarchive02e
+
+ - name: Stat a file in the directory we unarchived to
+ stat:
+ path: "{{remote_tmp_dir}}/test-unarchive-tar-gz/foo-unarchive.txt"
+ register: unarchive02e_file_stat
+
+ - name: verify results
+ assert:
+ that:
+ - unarchive02e is changed
+ - unarchive02e_file_stat.stat.exists
+ - unarchive02e_file_stat.stat.pw_name == 'testuser'
+ - unarchive02e_file_stat.stat.gr_name == 'testgroup'
+
+ always:
+ - name: Remove testuser
+ user:
+ name: testuser
+ state: absent
+
+ - name: Remove testgroup
+ group:
+ name: testgroup
+ state: absent
diff --git a/test/integration/targets/unarchive/tasks/test_unprivileged_user.yml b/test/integration/targets/unarchive/tasks/test_unprivileged_user.yml
new file mode 100644
index 00000000..6181e3bd
--- /dev/null
+++ b/test/integration/targets/unarchive/tasks/test_unprivileged_user.yml
@@ -0,0 +1,86 @@
+- name: Create unarchivetest1 user
+ user:
+ name: unarchivetest1
+ uid: 1002610001
+ group: "{{ group_table[ansible_facts['distribution']] | default(omit) }}"
+ register: user
+ vars:
+ group_table:
+ MacOSX: staff
+
+- name: Test unarchiving twice as unprivileged user
+ become: yes
+ become_user: unarchivetest1
+ block:
+ - name: prep our file
+ copy:
+ src: foo.txt
+ dest: "{{ user.home }}/foo-unarchive.txt"
+ mode: preserve
+
+ - name: Prep a zip file as unarchivetest1 user
+ shell: zip unarchivetest1-unarchive.zip foo-unarchive.txt
+ args:
+ chdir: "{{ user.home }}"
+ creates: "{{ user.home }}/unarchivetest1-unarchive.zip"
+
+ - name: create our zip unarchive destination as unarchivetest1 user
+ file:
+ path: "{{ user.home }}/unarchivetest1-unarchive-zip"
+ state: directory
+
+ - name: unarchive a zip file as unarchivetest1 user
+ unarchive:
+ src: "{{ user.home }}/unarchivetest1-unarchive.zip"
+ dest: "{{ user.home }}/unarchivetest1-unarchive-zip"
+ remote_src: yes
+ list_files: True
+ register: unarchive10
+
+ - name: stat the unarchived file
+ stat:
+ path: "{{ user.home }}/unarchivetest1-unarchive-zip/foo-unarchive.txt"
+ register: archive_path
+
+ - name: verify that the tasks performed as expected
+ assert:
+ that:
+ - unarchive10 is changed
+ # Verify that file list is generated
+ - "'files' in unarchive10"
+ - "{{unarchive10['files']| length}} == 1"
+ - "'foo-unarchive.txt' in unarchive10['files']"
+ - archive_path.stat.exists
+
+ - name: repeat the last request to verify no changes
+ unarchive:
+ src: "{{ user.home }}/unarchivetest1-unarchive.zip"
+ dest: "{{ user.home }}/unarchivetest1-unarchive-zip"
+ remote_src: yes
+ list_files: True
+ register: unarchive10b
+
+ # Due to a bug in the date calculation used to determine if a change
+ # was made or not, this check is unreliable. This seems to only happen on
+ # Ubuntu 1604.
+ # https://github.com/ansible/ansible/blob/58145dff9ca1a713f8ed295a0076779a91c41cba/lib/ansible/modules/unarchive.py#L472-L474
+ - name: Check that unarchiving again reports no change
+ assert:
+ that:
+ - unarchive10b is not changed
+ ignore_errors: yes
+
+ always:
+ - name: remove our unarchivetest1 user and files
+ user:
+ name: unarchivetest1
+ state: absent
+ remove: yes
+ become: no
+
+ - name: Remove user home directory on macOS
+ file:
+ path: /Users/unarchivetest1
+ state: absent
+ become: no
+ when: ansible_facts.distribution == 'MacOSX'
diff --git a/test/integration/targets/unarchive/tasks/test_zip.yml b/test/integration/targets/unarchive/tasks/test_zip.yml
new file mode 100644
index 00000000..aae57d8e
--- /dev/null
+++ b/test/integration/targets/unarchive/tasks/test_zip.yml
@@ -0,0 +1,45 @@
+- name: create our zip unarchive destination
+ file:
+ path: '{{remote_tmp_dir}}/test-unarchive-zip'
+ state: directory
+
+- name: unarchive a zip file
+ unarchive:
+ src: '{{remote_tmp_dir}}/test-unarchive.zip'
+ dest: '{{remote_tmp_dir}}/test-unarchive-zip'
+ list_files: True
+ remote_src: yes
+ register: unarchive03
+
+- name: verify that the file was marked as changed
+ assert:
+ that:
+ - "unarchive03.changed == true"
+ # Verify that file list is generated
+ - "'files' in unarchive03"
+ - "{{unarchive03['files']| length}} == 3"
+ - "'foo-unarchive.txt' in unarchive03['files']"
+ - "'foo-unarchive-777.txt' in unarchive03['files']"
+ - "'FOO-UNAR.TXT' in unarchive03['files']"
+
+- name: verify that the file was unarchived
+ file:
+ path: '{{remote_tmp_dir}}/test-unarchive-zip/{{item}}'
+ state: file
+ with_items:
+ - foo-unarchive.txt
+ - foo-unarchive-777.txt
+ - FOO-UNAR.TXT
+
+- name: repeat the last request to verify no changes
+ unarchive:
+ src: '{{remote_tmp_dir}}/test-unarchive.zip'
+ dest: '{{remote_tmp_dir}}/test-unarchive-zip'
+ list_files: true
+ remote_src: true
+ register: unarchive03b
+
+- name: verify that the task was not marked as changed
+ assert:
+ that:
+ - "unarchive03b.changed == false"
diff --git a/test/integration/targets/undefined/aliases b/test/integration/targets/undefined/aliases
new file mode 100644
index 00000000..70a7b7a9
--- /dev/null
+++ b/test/integration/targets/undefined/aliases
@@ -0,0 +1 @@
+shippable/posix/group5
diff --git a/test/integration/targets/undefined/tasks/main.yml b/test/integration/targets/undefined/tasks/main.yml
new file mode 100644
index 00000000..de6681a0
--- /dev/null
+++ b/test/integration/targets/undefined/tasks/main.yml
@@ -0,0 +1,18 @@
+- when: lookup('pipe', ansible_playbook_python ~ ' -c "import jinja2; print(jinja2.__version__)"') is version('2.7', '>=')
+ block:
+ - set_fact:
+ names: '{{ things|map(attribute="name") }}'
+ vars:
+ things:
+ - name: one
+ - name: two
+ - notname: three
+ - name: four
+
+ - assert:
+ that:
+ - '"%r"|format(undef) == "AnsibleUndefined"'
+ # The existence of AnsibleUndefined in a templating result
+ # prevents safe_eval from turning the value into a python object
+ - names is string
+ - '", AnsibleUndefined," in names'
diff --git a/test/integration/targets/unicode/aliases b/test/integration/targets/unicode/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/test/integration/targets/unicode/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/test/integration/targets/unicode/inventory b/test/integration/targets/unicode/inventory
new file mode 100644
index 00000000..11b35606
--- /dev/null
+++ b/test/integration/targets/unicode/inventory
@@ -0,0 +1,5 @@
+[local]
+testhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+
+[all:vars]
+unicode_host_var=CaféEñyei
diff --git a/test/integration/targets/unicode/křížek-ansible-project/ansible.cfg b/test/integration/targets/unicode/křížek-ansible-project/ansible.cfg
new file mode 100644
index 00000000..6775889f
--- /dev/null
+++ b/test/integration/targets/unicode/křížek-ansible-project/ansible.cfg
@@ -0,0 +1,2 @@
+[defaults]
+library=~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules:.
diff --git a/test/integration/targets/unicode/runme.sh b/test/integration/targets/unicode/runme.sh
new file mode 100755
index 00000000..aa14783b
--- /dev/null
+++ b/test/integration/targets/unicode/runme.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ansible-playbook unicode.yml -i inventory -v -e 'extra_var=café' "$@"
+# Test the start-at-task flag #9571
+ANSIBLE_HOST_PATTERN_MISMATCH=warning ansible-playbook unicode.yml -i inventory -v --start-at-task '*¶' -e 'start_at_task=True' "$@"
+
+# Test --version works with non-ascii ansible project paths #66617
+# Unset these so values from the project dir are used
+unset ANSIBLE_CONFIG
+unset ANSIBLE_LIBRARY
+pushd křížek-ansible-project && ansible --version; popd
diff --git a/test/integration/targets/unicode/unicode-test-script b/test/integration/targets/unicode/unicode-test-script
new file mode 100755
index 00000000..340f2a9f
--- /dev/null
+++ b/test/integration/targets/unicode/unicode-test-script
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+echo "Non-ascii arguments:"
+echo $@
+
+echo "Non-ascii Env var:"
+echo $option
diff --git a/test/integration/targets/unicode/unicode.yml b/test/integration/targets/unicode/unicode.yml
new file mode 100644
index 00000000..672133d5
--- /dev/null
+++ b/test/integration/targets/unicode/unicode.yml
@@ -0,0 +1,149 @@
+---
+- name: 'A play with unicode: ¢ £ ¤ ¥'
+ hosts: localhost
+ vars:
+ test_var: 'Ī ī Ĭ ĭ Į į İ ı IJ ij Ĵ ĵ Ķ ķ ĸ Ĺ ĺ Ļ ļ Ľ ľ Ŀ ŀ Šł Ń ń Ņ ņ Ň ň ʼn Ŋ ŋ Ō ŠŎ ŠŠő Œ'
+ hostnames:
+ - 'host-ϬϭϮϯϰ'
+ - 'host-fóöbär'
+ - 'host-ΙΚΛΜÎΞ'
+ - 'host-στυφχψ'
+ - 'host-ϬϭϮϯϰϱ'
+
+ tasks:
+ - name: 'A task name with unicode: è é ê ë'
+ debug: msg='hi there'
+
+ - name: 'A task with unicode parameters'
+ debug: var=test_var
+
+ # € ‚ ƒ „ … † ‡ ˆ ‰ Š ‹ Œ Ž ‘ ’ “ †• – — ˜ ™ š › œ ž Ÿ ¡ ¢ £ ¤ ¥ ¦ § ¨ © ª « ¬ ­ ®'
+
+ - name: 'A task using with_items containing unicode'
+ debug: msg='{{item}}'
+ with_items:
+ - '¯ ° ± ² ³ ´ µ ¶ · ¸ ¹ º » ¼ ½ ¾ ¿ À à Â Ã Ä Å Æ Ç È É Ê Ë Ì à Î à à Ñ Ò Ó Ô Õ Ö ×'
+ - 'Ø Ù Ú Û Ü à Þ ß à á â ã ä å æ ç è é ê ë ì í î ï ð ñ ò ó ô õ ö ÷ ø ù ú û ü ý þ ÿ Ā'
+ - 'Ä Ä‚ ă Ä„ Ä… Ć ć Ĉ ĉ ÄŠ Ä‹ ÄŒ Ä ÄŽ Ä Ä Ä‘ Ä’ Ä“ Ä” Ä• Ä– Ä— Ę Ä™ Äš Ä› Äœ Ä Äž ÄŸ Ä  Ä¡ Ä¢ Ä£ Ĥ Ä¥ Ħ ħ Ĩ Ä©'
+
+ - add_host:
+ name: '{{item}}'
+ groups: 'ĪīĬĭ'
+ ansible_ssh_host: 127.0.0.1
+ ansible_connection: local
+ ansible_python_interpreter: "{{ ansible_playbook_python }}"
+ with_items: "{{ hostnames }}"
+
+ - name: 'A task with unicode extra vars'
+ debug: var=extra_var
+
+ - name: 'A task with unicode host vars'
+ debug: var=unicode_host_var
+
+ - name: 'A task with unicode shell parameters'
+ shell: echo '¯ ° ± ² ³ ´ µ ¶ · ¸ ¹ º » ¼ ½ ¾ ¿ À à Â Ã Ä Å Æ Ç È É Ê Ë Ì à Î à à Ñ Ò Ó Ô Õ Ö ×'
+ register: output
+
+ - name: 'Assert that the unicode was echoed'
+ assert:
+ that:
+ - "'¯ ° ± ² ³ ´ µ ¶ · ¸ ¹ º » ¼ ½ ¾ ¿ À à Â Ã Ä Å Æ Ç È É Ê Ë Ì à Î à à Ñ Ò Ó Ô Õ Ö ×' in output.stdout_lines"
+
+ - name: Run raw with non-ascii options
+ raw: "/bin/echo Zażółć gęślą jaźń"
+ register: results
+
+ - name: Check that raw output the right thing
+ assert:
+ that:
+ - "'Zażółć gęślą jaźń' in results.stdout_lines"
+
+ - name: Run a script with non-ascii options and environment
+ script: unicode-test-script --option "Zażółć gęślą jaźń"
+ environment:
+ option: Zażółć
+ register: results
+
+ - name: Check that script output includes the nonascii arguments and environment values
+ assert:
+ that:
+ - "'--option Zażółć gęślą jaźń' in results.stdout_lines"
+ - "'Zażółć' in results.stdout_lines"
+
+ - name: Ping with non-ascii environment variable and option
+ ping:
+ data: "Zażółć gęślą jaźń"
+ environment:
+ option: Zażółć
+ register: results
+
+ - name: Check that ping with non-ascii data was correct
+ assert:
+ that:
+ - "'Zażółć gęślą jaźń' == results.ping"
+
+ - name: Command that echos a non-ascii env var
+ command: "echo $option"
+ environment:
+ option: Zażółć
+ register: results
+
+ - name: Check that a non-ascii env var was passed to the command module
+ assert:
+ that:
+ - "'Zażółć' in results.stdout_lines"
+
+ - name: Clean a temp directory
+ file:
+ path: /var/tmp/ansible_test_unicode_get_put
+ state: absent
+
+ - name: Create a temp directory
+ file:
+ path: /var/tmp/ansible_test_unicode_get_put
+ state: directory
+
+ - name: Create a file with a non-ascii filename
+ file:
+ path: /var/tmp/ansible_test_unicode_get_put/Zażółć
+ state: touch
+ delegate_to: localhost
+
+ - name: Put with unicode filename
+ copy:
+ src: /var/tmp/ansible_test_unicode_get_put/Zażółć
+ dest: /var/tmp/ansible_test_unicode_get_put/Zażółć2
+
+ - name: Fetch with unicode filename
+ fetch:
+ src: /var/tmp/ansible_test_unicode_get_put/Zażółć2
+ dest: /var/tmp/ansible_test_unicode_get_put/
+
+ - name: Clean a temp directory
+ file:
+ path: /var/tmp/ansible_test_unicode_get_put
+ state: absent
+
+- name: 'A play for hosts in group: ĪīĬĭ'
+ hosts: 'ĪīĬĭ'
+ gather_facts: true
+ tasks:
+ - debug: msg='Unicode is a good thing â„¢'
+ - debug: msg=ÐБВГД
+
+# Run this test by adding to the CLI: -e start_at_task=True --start-at-task '*¶'
+- name: 'Show that we can skip to unicode named tasks'
+ hosts: localhost
+ gather_facts: false
+ vars:
+ flag: 'original'
+ start_at_task: False
+ tasks:
+ - name: 'Override flag var'
+ set_fact: flag='new'
+
+ - name: 'A unicode task at the end of the playbook: ¶'
+ assert:
+ that:
+ - 'flag == "original"'
+ when: start_at_task|bool
diff --git a/test/integration/targets/until/aliases b/test/integration/targets/until/aliases
new file mode 100644
index 00000000..765b70da
--- /dev/null
+++ b/test/integration/targets/until/aliases
@@ -0,0 +1 @@
+shippable/posix/group2
diff --git a/test/integration/targets/until/tasks/main.yml b/test/integration/targets/until/tasks/main.yml
new file mode 100644
index 00000000..4a09ff3b
--- /dev/null
+++ b/test/integration/targets/until/tasks/main.yml
@@ -0,0 +1,71 @@
+- shell: '{{ ansible_python.executable }} -c "import tempfile; print(tempfile.mkstemp()[1])"'
+ register: tempfilepath
+
+- set_fact:
+ until_tempfile_path: "{{ tempfilepath.stdout }}"
+
+- name: loop with default retries
+ shell: echo "run" >> {{ until_tempfile_path }} && wc -w < {{ until_tempfile_path }} | tr -d ' '
+ register: runcount
+ until: runcount.stdout | int == 3
+ delay: 0.01
+
+- assert:
+ that: runcount.stdout | int == 3
+
+- file: path="{{ until_tempfile_path }}" state=absent
+
+- name: loop with specified max retries
+ shell: echo "run" >> {{ until_tempfile_path }}
+ until: 1==0
+ retries: 5
+ delay: 0.01
+ ignore_errors: true
+
+- name: validate output
+ shell: wc -l < {{ until_tempfile_path }}
+ register: runcount
+
+- assert:
+ that: runcount.stdout | int == 6 # initial + 5 retries
+
+- file:
+ path: "{{ until_tempfile_path }}"
+ state: absent
+
+- name: Test failed_when impacting until
+ shell: 'true'
+ register: failed_when_until
+ failed_when: True
+ until: failed_when_until is successful
+ retries: 3
+ delay: 0.5
+ ignore_errors: True
+
+- assert:
+ that:
+ - failed_when_until.attempts == 3
+
+- name: Test changed_when impacting until
+ shell: 'true'
+ register: changed_when_until
+ changed_when: False
+ until: changed_when_until is changed
+ retries: 3
+ delay: 0.5
+ ignore_errors: True
+
+- assert:
+ that:
+ - changed_when_until.attempts == 3
+
+# This task shouldn't fail, previously .attempts was not available to changed_when/failed_when
+# and would cause the conditional to fail due to ``'dict object' has no attribute 'attempts'``
+# https://github.com/ansible/ansible/issues/34139
+- name: Test access to attempts in changed_when/failed_when
+ shell: 'true'
+ register: changed_when_attempts
+ until: 1 == 0
+ retries: 5
+ delay: 0.5
+ failed_when: changed_when_attempts.attempts > 6
diff --git a/test/integration/targets/uri/aliases b/test/integration/targets/uri/aliases
new file mode 100644
index 00000000..11e91ee7
--- /dev/null
+++ b/test/integration/targets/uri/aliases
@@ -0,0 +1,4 @@
+destructive
+shippable/posix/group4
+needs/httptester
+skip/aix
diff --git a/test/integration/targets/uri/files/README b/test/integration/targets/uri/files/README
new file mode 100644
index 00000000..ef779126
--- /dev/null
+++ b/test/integration/targets/uri/files/README
@@ -0,0 +1,9 @@
+The files were taken from http://www.json.org/JSON_checker/
+> If the JSON_checker is working correctly, it must accept all of the pass*.json files and reject all of the fail*.json files.
+
+Difference with JSON_checker dataset:
+ - *${n}.json renamed to *${n-1}.json to be 0-based
+ - fail0.json renamed to pass3.json as python json module allows JSON payload to be string
+ - fail17.json renamed to pass4.json as python json module has no problems with deep structures
+ - fail32.json renamed to fail0.json to fill gap
+ - fail31.json renamed to fail17.json to fill gap
diff --git a/test/integration/targets/uri/files/fail0.json b/test/integration/targets/uri/files/fail0.json
new file mode 100644
index 00000000..ca5eb19d
--- /dev/null
+++ b/test/integration/targets/uri/files/fail0.json
@@ -0,0 +1 @@
+["mismatch"} \ No newline at end of file
diff --git a/test/integration/targets/uri/files/fail1.json b/test/integration/targets/uri/files/fail1.json
new file mode 100644
index 00000000..6b7c11e5
--- /dev/null
+++ b/test/integration/targets/uri/files/fail1.json
@@ -0,0 +1 @@
+["Unclosed array" \ No newline at end of file
diff --git a/test/integration/targets/uri/files/fail10.json b/test/integration/targets/uri/files/fail10.json
new file mode 100644
index 00000000..76eb95b4
--- /dev/null
+++ b/test/integration/targets/uri/files/fail10.json
@@ -0,0 +1 @@
+{"Illegal expression": 1 + 2} \ No newline at end of file
diff --git a/test/integration/targets/uri/files/fail11.json b/test/integration/targets/uri/files/fail11.json
new file mode 100644
index 00000000..77580a45
--- /dev/null
+++ b/test/integration/targets/uri/files/fail11.json
@@ -0,0 +1 @@
+{"Illegal invocation": alert()} \ No newline at end of file
diff --git a/test/integration/targets/uri/files/fail12.json b/test/integration/targets/uri/files/fail12.json
new file mode 100644
index 00000000..379406b5
--- /dev/null
+++ b/test/integration/targets/uri/files/fail12.json
@@ -0,0 +1 @@
+{"Numbers cannot have leading zeroes": 013} \ No newline at end of file
diff --git a/test/integration/targets/uri/files/fail13.json b/test/integration/targets/uri/files/fail13.json
new file mode 100644
index 00000000..0ed366b3
--- /dev/null
+++ b/test/integration/targets/uri/files/fail13.json
@@ -0,0 +1 @@
+{"Numbers cannot be hex": 0x14} \ No newline at end of file
diff --git a/test/integration/targets/uri/files/fail14.json b/test/integration/targets/uri/files/fail14.json
new file mode 100644
index 00000000..fc8376b6
--- /dev/null
+++ b/test/integration/targets/uri/files/fail14.json
@@ -0,0 +1 @@
+["Illegal backslash escape: \x15"] \ No newline at end of file
diff --git a/test/integration/targets/uri/files/fail15.json b/test/integration/targets/uri/files/fail15.json
new file mode 100644
index 00000000..3fe21d4b
--- /dev/null
+++ b/test/integration/targets/uri/files/fail15.json
@@ -0,0 +1 @@
+[\naked] \ No newline at end of file
diff --git a/test/integration/targets/uri/files/fail16.json b/test/integration/targets/uri/files/fail16.json
new file mode 100644
index 00000000..62b9214a
--- /dev/null
+++ b/test/integration/targets/uri/files/fail16.json
@@ -0,0 +1 @@
+["Illegal backslash escape: \017"] \ No newline at end of file
diff --git a/test/integration/targets/uri/files/fail17.json b/test/integration/targets/uri/files/fail17.json
new file mode 100644
index 00000000..45cba739
--- /dev/null
+++ b/test/integration/targets/uri/files/fail17.json
@@ -0,0 +1 @@
+{"Comma instead if closing brace": true, \ No newline at end of file
diff --git a/test/integration/targets/uri/files/fail18.json b/test/integration/targets/uri/files/fail18.json
new file mode 100644
index 00000000..3b9c46fa
--- /dev/null
+++ b/test/integration/targets/uri/files/fail18.json
@@ -0,0 +1 @@
+{"Missing colon" null} \ No newline at end of file
diff --git a/test/integration/targets/uri/files/fail19.json b/test/integration/targets/uri/files/fail19.json
new file mode 100644
index 00000000..27c1af3e
--- /dev/null
+++ b/test/integration/targets/uri/files/fail19.json
@@ -0,0 +1 @@
+{"Double colon":: null} \ No newline at end of file
diff --git a/test/integration/targets/uri/files/fail2.json b/test/integration/targets/uri/files/fail2.json
new file mode 100644
index 00000000..168c81eb
--- /dev/null
+++ b/test/integration/targets/uri/files/fail2.json
@@ -0,0 +1 @@
+{unquoted_key: "keys must be quoted"} \ No newline at end of file
diff --git a/test/integration/targets/uri/files/fail20.json b/test/integration/targets/uri/files/fail20.json
new file mode 100644
index 00000000..62474573
--- /dev/null
+++ b/test/integration/targets/uri/files/fail20.json
@@ -0,0 +1 @@
+{"Comma instead of colon", null} \ No newline at end of file
diff --git a/test/integration/targets/uri/files/fail21.json b/test/integration/targets/uri/files/fail21.json
new file mode 100644
index 00000000..a7752581
--- /dev/null
+++ b/test/integration/targets/uri/files/fail21.json
@@ -0,0 +1 @@
+["Colon instead of comma": false] \ No newline at end of file
diff --git a/test/integration/targets/uri/files/fail22.json b/test/integration/targets/uri/files/fail22.json
new file mode 100644
index 00000000..494add1c
--- /dev/null
+++ b/test/integration/targets/uri/files/fail22.json
@@ -0,0 +1 @@
+["Bad value", truth] \ No newline at end of file
diff --git a/test/integration/targets/uri/files/fail23.json b/test/integration/targets/uri/files/fail23.json
new file mode 100644
index 00000000..caff239b
--- /dev/null
+++ b/test/integration/targets/uri/files/fail23.json
@@ -0,0 +1 @@
+['single quote'] \ No newline at end of file
diff --git a/test/integration/targets/uri/files/fail24.json b/test/integration/targets/uri/files/fail24.json
new file mode 100644
index 00000000..8b7ad23e
--- /dev/null
+++ b/test/integration/targets/uri/files/fail24.json
@@ -0,0 +1 @@
+[" tab character in string "] \ No newline at end of file
diff --git a/test/integration/targets/uri/files/fail25.json b/test/integration/targets/uri/files/fail25.json
new file mode 100644
index 00000000..845d26a6
--- /dev/null
+++ b/test/integration/targets/uri/files/fail25.json
@@ -0,0 +1 @@
+["tab\ character\ in\ string\ "] \ No newline at end of file
diff --git a/test/integration/targets/uri/files/fail26.json b/test/integration/targets/uri/files/fail26.json
new file mode 100644
index 00000000..6b01a2ca
--- /dev/null
+++ b/test/integration/targets/uri/files/fail26.json
@@ -0,0 +1,2 @@
+["line
+break"] \ No newline at end of file
diff --git a/test/integration/targets/uri/files/fail27.json b/test/integration/targets/uri/files/fail27.json
new file mode 100644
index 00000000..621a0101
--- /dev/null
+++ b/test/integration/targets/uri/files/fail27.json
@@ -0,0 +1,2 @@
+["line\
+break"] \ No newline at end of file
diff --git a/test/integration/targets/uri/files/fail28.json b/test/integration/targets/uri/files/fail28.json
new file mode 100644
index 00000000..47ec421b
--- /dev/null
+++ b/test/integration/targets/uri/files/fail28.json
@@ -0,0 +1 @@
+[0e] \ No newline at end of file
diff --git a/test/integration/targets/uri/files/fail29.json b/test/integration/targets/uri/files/fail29.json
new file mode 100644
index 00000000..8ab0bc4b
--- /dev/null
+++ b/test/integration/targets/uri/files/fail29.json
@@ -0,0 +1 @@
+[0e+] \ No newline at end of file
diff --git a/test/integration/targets/uri/files/fail3.json b/test/integration/targets/uri/files/fail3.json
new file mode 100644
index 00000000..9de168bf
--- /dev/null
+++ b/test/integration/targets/uri/files/fail3.json
@@ -0,0 +1 @@
+["extra comma",] \ No newline at end of file
diff --git a/test/integration/targets/uri/files/fail30.json b/test/integration/targets/uri/files/fail30.json
new file mode 100644
index 00000000..1cce602b
--- /dev/null
+++ b/test/integration/targets/uri/files/fail30.json
@@ -0,0 +1 @@
+[0e+-1] \ No newline at end of file
diff --git a/test/integration/targets/uri/files/fail4.json b/test/integration/targets/uri/files/fail4.json
new file mode 100644
index 00000000..ddf3ce3d
--- /dev/null
+++ b/test/integration/targets/uri/files/fail4.json
@@ -0,0 +1 @@
+["double extra comma",,] \ No newline at end of file
diff --git a/test/integration/targets/uri/files/fail5.json b/test/integration/targets/uri/files/fail5.json
new file mode 100644
index 00000000..ed91580e
--- /dev/null
+++ b/test/integration/targets/uri/files/fail5.json
@@ -0,0 +1 @@
+[ , "<-- missing value"] \ No newline at end of file
diff --git a/test/integration/targets/uri/files/fail6.json b/test/integration/targets/uri/files/fail6.json
new file mode 100644
index 00000000..8a96af3e
--- /dev/null
+++ b/test/integration/targets/uri/files/fail6.json
@@ -0,0 +1 @@
+["Comma after the close"], \ No newline at end of file
diff --git a/test/integration/targets/uri/files/fail7.json b/test/integration/targets/uri/files/fail7.json
new file mode 100644
index 00000000..b28479c6
--- /dev/null
+++ b/test/integration/targets/uri/files/fail7.json
@@ -0,0 +1 @@
+["Extra close"]] \ No newline at end of file
diff --git a/test/integration/targets/uri/files/fail8.json b/test/integration/targets/uri/files/fail8.json
new file mode 100644
index 00000000..5815574f
--- /dev/null
+++ b/test/integration/targets/uri/files/fail8.json
@@ -0,0 +1 @@
+{"Extra comma": true,} \ No newline at end of file
diff --git a/test/integration/targets/uri/files/fail9.json b/test/integration/targets/uri/files/fail9.json
new file mode 100644
index 00000000..5d8c0047
--- /dev/null
+++ b/test/integration/targets/uri/files/fail9.json
@@ -0,0 +1 @@
+{"Extra value after close": true} "misplaced quoted value" \ No newline at end of file
diff --git a/test/integration/targets/uri/files/formdata.txt b/test/integration/targets/uri/files/formdata.txt
new file mode 100644
index 00000000..974c0f97
--- /dev/null
+++ b/test/integration/targets/uri/files/formdata.txt
@@ -0,0 +1 @@
+_multipart/form-data_
diff --git a/test/integration/targets/uri/files/pass0.json b/test/integration/targets/uri/files/pass0.json
new file mode 100644
index 00000000..70e26854
--- /dev/null
+++ b/test/integration/targets/uri/files/pass0.json
@@ -0,0 +1,58 @@
+[
+ "JSON Test Pattern pass1",
+ {"object with 1 member":["array with 1 element"]},
+ {},
+ [],
+ -42,
+ true,
+ false,
+ null,
+ {
+ "integer": 1234567890,
+ "real": -9876.543210,
+ "e": 0.123456789e-12,
+ "E": 1.234567890E+34,
+ "": 23456789012E66,
+ "zero": 0,
+ "one": 1,
+ "space": " ",
+ "quote": "\"",
+ "backslash": "\\",
+ "controls": "\b\f\n\r\t",
+ "slash": "/ & \/",
+ "alpha": "abcdefghijklmnopqrstuvwyz",
+ "ALPHA": "ABCDEFGHIJKLMNOPQRSTUVWYZ",
+ "digit": "0123456789",
+ "0123456789": "digit",
+ "special": "`1~!@#$%^&*()_+-={':[,]}|;.</>?",
+ "hex": "\u0123\u4567\u89AB\uCDEF\uabcd\uef4A",
+ "true": true,
+ "false": false,
+ "null": null,
+ "array":[ ],
+ "object":{ },
+ "address": "50 St. James Street",
+ "url": "http://www.JSON.org/",
+ "comment": "// /* <!-- --",
+ "# -- --> */": " ",
+ " s p a c e d " :[1,2 , 3
+
+,
+
+4 , 5 , 6 ,7 ],"compact":[1,2,3,4,5,6,7],
+ "jsontext": "{\"object with 1 member\":[\"array with 1 element\"]}",
+ "quotes": "&#34; \u0022 %22 0x22 034 &#x22;",
+ "\/\\\"\uCAFE\uBABE\uAB98\uFCDE\ubcda\uef4A\b\f\n\r\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?"
+: "A key can be any string"
+ },
+ 0.5 ,98.6
+,
+99.44
+,
+
+1066,
+1e1,
+0.1e1,
+1e-1,
+1e00,2e+00,2e-00
+,"rosebud"] \ No newline at end of file
diff --git a/test/integration/targets/uri/files/pass1.json b/test/integration/targets/uri/files/pass1.json
new file mode 100644
index 00000000..d3c63c7a
--- /dev/null
+++ b/test/integration/targets/uri/files/pass1.json
@@ -0,0 +1 @@
+[[[[[[[[[[[[[[[[[[["Not too deep"]]]]]]]]]]]]]]]]]]] \ No newline at end of file
diff --git a/test/integration/targets/uri/files/pass2.json b/test/integration/targets/uri/files/pass2.json
new file mode 100644
index 00000000..4528d51f
--- /dev/null
+++ b/test/integration/targets/uri/files/pass2.json
@@ -0,0 +1,6 @@
+{
+ "JSON Test Pattern pass3": {
+ "The outermost value": "must be an object or array.",
+ "In this test": "It is an object."
+ }
+}
diff --git a/test/integration/targets/uri/files/pass3.json b/test/integration/targets/uri/files/pass3.json
new file mode 100644
index 00000000..6216b865
--- /dev/null
+++ b/test/integration/targets/uri/files/pass3.json
@@ -0,0 +1 @@
+"A JSON payload should be an object or array, not a string." \ No newline at end of file
diff --git a/test/integration/targets/uri/files/pass4.json b/test/integration/targets/uri/files/pass4.json
new file mode 100644
index 00000000..edac9271
--- /dev/null
+++ b/test/integration/targets/uri/files/pass4.json
@@ -0,0 +1 @@
+[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]] \ No newline at end of file
diff --git a/test/integration/targets/uri/files/testserver.py b/test/integration/targets/uri/files/testserver.py
new file mode 100644
index 00000000..81043b66
--- /dev/null
+++ b/test/integration/targets/uri/files/testserver.py
@@ -0,0 +1,20 @@
+import sys
+
+if __name__ == '__main__':
+ if sys.version_info[0] >= 3:
+ import http.server
+ import socketserver
+ PORT = int(sys.argv[1])
+
+ class Handler(http.server.SimpleHTTPRequestHandler):
+ pass
+
+ Handler.extensions_map['.json'] = 'application/json'
+ httpd = socketserver.TCPServer(("", PORT), Handler)
+ httpd.serve_forever()
+ else:
+ import mimetypes
+ mimetypes.init()
+ mimetypes.add_type('application/json', '.json')
+ import SimpleHTTPServer
+ SimpleHTTPServer.test()
diff --git a/test/integration/targets/uri/meta/main.yml b/test/integration/targets/uri/meta/main.yml
new file mode 100644
index 00000000..39b94950
--- /dev/null
+++ b/test/integration/targets/uri/meta/main.yml
@@ -0,0 +1,5 @@
+dependencies:
+ - prepare_tests
+ - prepare_http_tests
+ - setup_remote_tmp_dir
+ - setup_remote_constraints
diff --git a/test/integration/targets/uri/tasks/main.yml b/test/integration/targets/uri/tasks/main.yml
new file mode 100644
index 00000000..409607af
--- /dev/null
+++ b/test/integration/targets/uri/tasks/main.yml
@@ -0,0 +1,600 @@
+# test code for the uri module
+# (c) 2014, Leonid Evdokimov <leon@darkk.net.ru>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
+
+- name: set role facts
+ set_fact:
+ http_port: 15260
+ files_dir: '{{ output_dir|expanduser }}/files'
+ checkout_dir: '{{ output_dir }}/git'
+
+- name: create a directory to serve files from
+ file:
+ dest: "{{ files_dir }}"
+ state: directory
+
+- copy:
+ src: "{{ item }}"
+ dest: "{{files_dir}}/{{ item }}"
+ with_sequence: start=0 end=4 format=pass%d.json
+
+- copy:
+ src: "{{ item }}"
+ dest: "{{files_dir}}/{{ item }}"
+ with_sequence: start=0 end=30 format=fail%d.json
+
+- copy:
+ src: "testserver.py"
+ dest: "{{ output_dir }}/testserver.py"
+
+- name: start SimpleHTTPServer
+ shell: cd {{ files_dir }} && {{ ansible_python.executable }} {{ output_dir}}/testserver.py {{ http_port }}
+ async: 120 # this test set can take ~1m to run on FreeBSD (via Shippable)
+ poll: 0
+
+- wait_for: port={{ http_port }}
+
+
+- name: checksum pass_json
+ stat: path={{ files_dir }}/{{ item }}.json get_checksum=yes
+ register: pass_checksum
+ with_sequence: start=0 end=4 format=pass%d
+
+- name: fetch pass_json
+ uri: return_content=yes url=http://localhost:{{ http_port }}/{{ item }}.json
+ register: fetch_pass_json
+ with_sequence: start=0 end=4 format=pass%d
+
+- name: check pass_json
+ assert:
+ that:
+ - '"json" in item.1'
+ - item.0.stat.checksum == item.1.content | checksum
+ with_together:
+ - "{{pass_checksum.results}}"
+ - "{{fetch_pass_json.results}}"
+
+
+- name: checksum fail_json
+ stat: path={{ files_dir }}/{{ item }}.json get_checksum=yes
+ register: fail_checksum
+ with_sequence: start=0 end=30 format=fail%d
+
+- name: fetch fail_json
+ uri: return_content=yes url=http://localhost:{{ http_port }}/{{ item }}.json
+ register: fail
+ with_sequence: start=0 end=30 format=fail%d
+
+- name: check fail_json
+ assert:
+ that:
+ - item.0.stat.checksum == item.1.content | checksum
+ - '"json" not in item.1'
+ with_together:
+ - "{{fail_checksum.results}}"
+ - "{{fail.results}}"
+
+- name: test https fetch to a site with mismatched hostname and certificate
+ uri:
+ url: "https://{{ badssl_host }}/"
+ dest: "{{ output_dir }}/shouldnotexist.html"
+ ignore_errors: True
+ register: result
+
+- stat:
+ path: "{{ output_dir }}/shouldnotexist.html"
+ register: stat_result
+
+- name: Assert that the file was not downloaded
+ assert:
+ that:
+ - result.failed == true
+ - "'Failed to validate the SSL certificate' in result.msg or 'Hostname mismatch' in result.msg or (result.msg is match('hostname .* doesn.t match .*'))"
+ - stat_result.stat.exists == false
+ - result.status is defined
+ - result.status == -1
+ - result.url == 'https://' ~ badssl_host ~ '/'
+
+- name: Clean up any cruft from the results directory
+ file:
+ name: "{{ output_dir }}/kreitz.html"
+ state: absent
+
+- name: test https fetch to a site with mismatched hostname and certificate and validate_certs=no
+ uri:
+ url: "https://{{ badssl_host }}/"
+ dest: "{{ output_dir }}/kreitz.html"
+ validate_certs: no
+ register: result
+
+- stat:
+ path: "{{ output_dir }}/kreitz.html"
+ register: stat_result
+
+- name: Assert that the file was downloaded
+ assert:
+ that:
+ - "stat_result.stat.exists == true"
+ - "result.changed == true"
+
+- name: test redirect without follow_redirects
+ uri:
+ url: 'https://{{ httpbin_host }}/redirect/2'
+ follow_redirects: 'none'
+ status_code: 302
+ register: result
+
+- name: Assert location header
+ assert:
+ that:
+ - 'result.location|default("") == "https://{{ httpbin_host }}/relative-redirect/1"'
+
+- name: Check SSL with redirect
+ uri:
+ url: 'https://{{ httpbin_host }}/redirect/2'
+ register: result
+
+- name: Assert SSL with redirect
+ assert:
+ that:
+ - 'result.url|default("") == "https://{{ httpbin_host }}/get"'
+
+- name: redirect to bad SSL site
+ uri:
+ url: 'http://{{ badssl_host }}'
+ register: result
+ ignore_errors: true
+
+- name: Ensure bad SSL site reidrect fails
+ assert:
+ that:
+ - result is failed
+ - 'badssl_host in result.msg'
+
+- name: test basic auth
+ uri:
+ url: 'https://{{ httpbin_host }}/basic-auth/user/passwd'
+ user: user
+ password: passwd
+
+- name: test basic forced auth
+ uri:
+ url: 'https://{{ httpbin_host }}/hidden-basic-auth/user/passwd'
+ force_basic_auth: true
+ user: user
+ password: passwd
+
+- name: test digest auth
+ uri:
+ url: 'https://{{ httpbin_host }}/digest-auth/auth/user/passwd'
+ user: user
+ password: passwd
+ headers:
+ Cookie: "fake=fake_value"
+
+- name: test PUT
+ uri:
+ url: 'https://{{ httpbin_host }}/put'
+ method: PUT
+ body: 'foo=bar'
+
+- name: test OPTIONS
+ uri:
+ url: 'https://{{ httpbin_host }}/'
+ method: OPTIONS
+ register: result
+
+- name: Assert we got an allow header
+ assert:
+ that:
+ - 'result.allow.split(", ")|sort == ["GET", "HEAD", "OPTIONS"]'
+
+# Ubuntu12.04 doesn't have python-urllib3, this makes handling required dependencies a pain across all variations
+# We'll use this to just skip 12.04 on those tests. We should be sufficiently covered with other OSes and versions
+- name: Set fact if running on Ubuntu 12.04
+ set_fact:
+ is_ubuntu_precise: "{{ ansible_distribution == 'Ubuntu' and ansible_distribution_release == 'precise' }}"
+
+- name: Test that SNI succeeds on python versions that have SNI
+ uri:
+ url: 'https://{{ sni_host }}/'
+ return_content: true
+ when: ansible_python.has_sslcontext
+ register: result
+
+- name: Assert SNI verification succeeds on new python
+ assert:
+ that:
+ - result is successful
+ - 'sni_host in result.content'
+ when: ansible_python.has_sslcontext
+
+- name: Verify SNI verification fails on old python without urllib3 contrib
+ uri:
+ url: 'https://{{ sni_host }}'
+ ignore_errors: true
+ when: not ansible_python.has_sslcontext
+ register: result
+
+- name: Assert SNI verification fails on old python
+ assert:
+ that:
+ - result is failed
+ when: result is not skipped
+
+- name: check if urllib3 is installed as an OS package
+ package:
+ name: "{{ uri_os_packages[ansible_os_family].urllib3 }}"
+ check_mode: yes
+ when: not ansible_python.has_sslcontext and not is_ubuntu_precise|bool and uri_os_packages[ansible_os_family].urllib3|default
+ register: urllib3
+
+- name: uninstall conflicting urllib3 pip package
+ pip:
+ name: urllib3
+ state: absent
+ when: not ansible_python.has_sslcontext and not is_ubuntu_precise|bool and uri_os_packages[ansible_os_family].urllib3|default and urllib3.changed
+
+- name: install OS packages that are needed for SNI on old python
+ package:
+ name: "{{ item }}"
+ with_items: "{{ uri_os_packages[ansible_os_family].step1 | default([]) }}"
+ when: not ansible_python.has_sslcontext and not is_ubuntu_precise|bool
+
+- name: install python modules for Older Python SNI verification
+ pip:
+ name: "{{ item }}"
+ with_items:
+ - ndg-httpsclient
+ when: not ansible_python.has_sslcontext and not is_ubuntu_precise|bool
+
+- name: Verify SNI verification succeeds on old python with urllib3 contrib
+ uri:
+ url: 'https://{{ sni_host }}'
+ return_content: true
+ when: not ansible_python.has_sslcontext and not is_ubuntu_precise|bool
+ register: result
+
+- name: Assert SNI verification succeeds on old python
+ assert:
+ that:
+ - result is successful
+ - 'sni_host in result.content'
+ when: not ansible_python.has_sslcontext and not is_ubuntu_precise|bool
+
+- name: Uninstall ndg-httpsclient
+ pip:
+ name: "{{ item }}"
+ state: absent
+ with_items:
+ - ndg-httpsclient
+ when: not ansible_python.has_sslcontext and not is_ubuntu_precise|bool
+
+- name: uninstall OS packages that are needed for SNI on old python
+ package:
+ name: "{{ item }}"
+ state: absent
+ with_items: "{{ uri_os_packages[ansible_os_family].step1 | default([]) }}"
+ when: not ansible_python.has_sslcontext and not is_ubuntu_precise|bool
+
+- name: install OS packages that are needed for building cryptography
+ package:
+ name: "{{ item }}"
+ with_items: "{{ uri_os_packages[ansible_os_family].step2 | default([]) }}"
+ when: not ansible_python.has_sslcontext and not is_ubuntu_precise|bool
+
+- name: install urllib3 and pyopenssl via pip
+ pip:
+ name: "{{ item }}"
+ state: latest
+ extra_args: "-c {{ remote_constraints }}"
+ with_items:
+ - urllib3
+ - PyOpenSSL
+ when: not ansible_python.has_sslcontext and not is_ubuntu_precise|bool
+
+- name: Verify SNI verification succeeds on old python with pip urllib3 contrib
+ uri:
+ url: 'https://{{ sni_host }}'
+ return_content: true
+ when: not ansible_python.has_sslcontext and not is_ubuntu_precise|bool
+ register: result
+
+- name: Assert SNI verification succeeds on old python with pip urllib3 contrib
+ assert:
+ that:
+ - result is successful
+ - 'sni_host in result.content'
+ when: not ansible_python.has_sslcontext and not is_ubuntu_precise|bool
+
+- name: Uninstall urllib3 and PyOpenSSL
+ pip:
+ name: "{{ item }}"
+ state: absent
+ with_items:
+ - urllib3
+ - PyOpenSSL
+ when: not ansible_python.has_sslcontext and not is_ubuntu_precise|bool
+
+- name: validate the status_codes are correct
+ uri:
+ url: "https://{{ httpbin_host }}/status/202"
+ status_code: 202
+ method: POST
+ body: foo
+
+- name: Validate body_format json does not override content-type in 2.3 or newer
+ uri:
+ url: "https://{{ httpbin_host }}/post"
+ method: POST
+ body:
+ foo: bar
+ body_format: json
+ headers:
+ 'Content-Type': 'text/json'
+ return_content: true
+ register: result
+ failed_when: result.json.headers['Content-Type'] != 'text/json'
+
+- name: Validate body_format form-urlencoded using dicts works
+ uri:
+ url: https://{{ httpbin_host }}/post
+ method: POST
+ body:
+ user: foo
+ password: bar!#@ |&82$M
+ submit: Sign in
+ body_format: form-urlencoded
+ return_content: yes
+ register: result
+
+- name: Assert form-urlencoded dict input
+ assert:
+ that:
+ - result is successful
+ - result.json.headers['Content-Type'] == 'application/x-www-form-urlencoded'
+ - result.json.form.password == 'bar!#@ |&82$M'
+
+- name: Validate body_format form-urlencoded using lists works
+ uri:
+ url: https://{{ httpbin_host }}/post
+ method: POST
+ body:
+ - [ user, foo ]
+ - [ password, bar!#@ |&82$M ]
+ - [ submit, Sign in ]
+ body_format: form-urlencoded
+ return_content: yes
+ register: result
+
+- name: Assert form-urlencoded list input
+ assert:
+ that:
+ - result is successful
+ - result.json.headers['Content-Type'] == 'application/x-www-form-urlencoded'
+ - result.json.form.password == 'bar!#@ |&82$M'
+
+- name: Validate body_format form-urlencoded of invalid input fails
+ uri:
+ url: https://{{ httpbin_host }}/post
+ method: POST
+ body:
+ - foo
+ - bar: baz
+ body_format: form-urlencoded
+ return_content: yes
+ register: result
+ ignore_errors: yes
+
+- name: Assert invalid input fails
+ assert:
+ that:
+ - result is failure
+ - "'failed to parse body as form_urlencoded: too many values to unpack' in result.msg"
+
+- name: multipart/form-data
+ uri:
+ url: https://{{ httpbin_host }}/post
+ method: POST
+ body_format: form-multipart
+ body:
+ file1:
+ filename: formdata.txt
+ file2:
+ content: text based file content
+ filename: fake.txt
+ mime_type: text/plain
+ text_form_field1: value1
+ text_form_field2:
+ content: value2
+ mime_type: text/plain
+ register: multipart
+
+- name: Assert multipart/form-data
+ assert:
+ that:
+ - multipart.json.files.file1 == '_multipart/form-data_\n'
+ - multipart.json.files.file2 == 'text based file content'
+ - multipart.json.form.text_form_field1 == 'value1'
+ - multipart.json.form.text_form_field2 == 'value2'
+
+
+- name: Validate invalid method
+ uri:
+ url: https://{{ httpbin_host }}/anything
+ method: UNKNOWN
+ register: result
+ ignore_errors: yes
+
+- name: Assert invalid method fails
+ assert:
+ that:
+ - result is failure
+ - result.status == 405
+ - "'METHOD NOT ALLOWED' in result.msg"
+
+- name: Test client cert auth, no certs
+ uri:
+ url: "https://ansible.http.tests/ssl_client_verify"
+ status_code: 200
+ return_content: true
+ register: result
+ failed_when: result.content != "ansible.http.tests:NONE"
+ when: has_httptester
+
+- name: Test client cert auth, with certs
+ uri:
+ url: "https://ansible.http.tests/ssl_client_verify"
+ client_cert: "{{ remote_tmp_dir }}/client.pem"
+ client_key: "{{ remote_tmp_dir }}/client.key"
+ return_content: true
+ register: result
+ failed_when: result.content != "ansible.http.tests:SUCCESS"
+ when: has_httptester
+
+- name: Test client cert auth, with no validation
+ uri:
+ url: "https://fail.ansible.http.tests/ssl_client_verify"
+ client_cert: "{{ remote_tmp_dir }}/client.pem"
+ client_key: "{{ remote_tmp_dir }}/client.key"
+ return_content: true
+ validate_certs: no
+ register: result
+ failed_when: result.content != "ansible.http.tests:SUCCESS"
+ when: has_httptester
+
+- name: Test client cert auth, with validation and ssl mismatch
+ uri:
+ url: "https://fail.ansible.http.tests/ssl_client_verify"
+ client_cert: "{{ remote_tmp_dir }}/client.pem"
+ client_key: "{{ remote_tmp_dir }}/client.key"
+ return_content: true
+ validate_certs: yes
+ register: result
+ failed_when: result is not failed
+ when: has_httptester
+
+- uri:
+ url: https://{{ httpbin_host }}/response-headers?Set-Cookie=Foo%3Dbar&Set-Cookie=Baz%3Dqux
+ register: result
+
+- assert:
+ that:
+ - result['set_cookie'] == 'Foo=bar, Baz=qux'
+ # Python sorts cookies in order of most specific (ie. longest) path first
+ # items with the same path are reversed from response order
+ - result['cookies_string'] == 'Baz=qux; Foo=bar'
+
+- name: Write out netrc template
+ template:
+ src: netrc.j2
+ dest: "{{ remote_tmp_dir }}/netrc"
+
+- name: Test netrc with port
+ uri:
+ url: "https://{{ httpbin_host }}:443/basic-auth/user/passwd"
+ environment:
+ NETRC: "{{ remote_tmp_dir }}/netrc"
+
+- name: Test JSON POST with src
+ uri:
+ url: "https://{{ httpbin_host}}/post"
+ src: pass0.json
+ method: POST
+ return_content: true
+ body_format: json
+ register: result
+
+- name: Validate POST with src works
+ assert:
+ that:
+ - result.json.json[0] == 'JSON Test Pattern pass1'
+
+- name: Copy file pass0.json to remote
+ copy:
+ src: "{{ role_path }}/files/pass0.json"
+ dest: "{{ remote_tmp_dir }}/pass0.json"
+
+- name: Test JSON POST with src and remote_src=True
+ uri:
+ url: "https://{{ httpbin_host}}/post"
+ src: "{{ remote_tmp_dir }}/pass0.json"
+ remote_src: true
+ method: POST
+ return_content: true
+ body_format: json
+ register: result
+
+- name: Validate POST with src and remote_src=True works
+ assert:
+ that:
+ - result.json.json[0] == 'JSON Test Pattern pass1'
+
+- name: Make request that includes password in JSON keys
+ uri:
+ url: "https://{{ httpbin_host}}/get?key-password=value-password"
+ user: admin
+ password: password
+ register: sanitize_keys
+
+- name: assert that keys were sanitized
+ assert:
+ that:
+ - sanitize_keys.json.args['key-********'] == 'value-********'
+
+- name: Create a testing file
+ copy:
+ content: "content"
+ dest: "{{ output_dir }}/output"
+
+- name: Download a file from non existing location
+ uri:
+ url: http://does/not/exist
+ dest: "{{ output_dir }}/output"
+ ignore_errors: yes
+
+- name: Save testing file's output
+ command: "cat {{ output_dir }}/output"
+ register: file_out
+
+- name: Test the testing file was not overwritten
+ assert:
+ that:
+ - "'content' in file_out.stdout"
+
+- name: Clean up
+ file:
+ dest: "{{ output_dir }}/output"
+ state: absent
+
+- name: Test follow_redirects=none
+ import_tasks: redirect-none.yml
+
+- name: Test follow_redirects=safe
+ import_tasks: redirect-safe.yml
+
+- name: Test follow_redirects=urllib2
+ import_tasks: redirect-urllib2.yml
+
+- name: Test follow_redirects=all
+ import_tasks: redirect-all.yml
+
+- name: Check unexpected failures
+ import_tasks: unexpected-failures.yml
+
+- name: Check return-content
+ import_tasks: return-content.yml
diff --git a/test/integration/targets/uri/tasks/redirect-all.yml b/test/integration/targets/uri/tasks/redirect-all.yml
new file mode 100644
index 00000000..d5b47a1c
--- /dev/null
+++ b/test/integration/targets/uri/tasks/redirect-all.yml
@@ -0,0 +1,272 @@
+- name: Test HTTP 301 using HEAD
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything
+ follow_redirects: all
+ return_content: yes
+ method: HEAD
+ register: http_301_head
+
+- assert:
+ that:
+ - http_301_head is successful
+ - http_301_head.json is not defined
+ - http_301_head.redirected == true
+ - http_301_head.status == 200
+ - http_301_head.url == 'https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 301 using GET
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything
+ follow_redirects: all
+ return_content: yes
+ method: GET
+ register: http_301_get
+
+- assert:
+ that:
+ - http_301_get is successful
+ - http_301_get.json.data == ''
+ - http_301_get.json.method == 'GET'
+ - http_301_get.json.url == 'https://{{ httpbin_host }}/anything'
+ - http_301_get.redirected == true
+ - http_301_get.status == 200
+ - http_301_get.url == 'https://{{ httpbin_host }}/anything'
+
+# NOTE: The HTTP POST turns into an HTTP GET
+- name: Test HTTP 301 using POST
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything
+ follow_redirects: all
+ return_content: yes
+ method: POST
+ body: '{ "foo": "bar" }'
+ body_format: json
+ register: http_301_post
+
+- assert:
+ that:
+ - http_301_post is successful
+ - http_301_post.json.data == ''
+ - http_301_post.json.method == 'GET'
+ - http_301_post.json.url == 'https://{{ httpbin_host }}/anything'
+ - http_301_post.redirected == true
+ - http_301_post.status == 200
+ - http_301_post.url == 'https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 302 using HEAD
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything
+ follow_redirects: all
+ return_content: yes
+ method: HEAD
+ register: http_302_head
+
+- assert:
+ that:
+ - http_302_head is successful
+ - http_302_head.json is not defined
+ - http_302_head.redirected == true
+ - http_302_head.status == 200
+ - http_302_head.url == 'https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 302 using GET
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything
+ follow_redirects: all
+ return_content: yes
+ method: GET
+ register: http_302_get
+
+- assert:
+ that:
+ - http_302_get is successful
+ - http_302_get.json.data == ''
+ - http_302_get.json.method == 'GET'
+ - http_302_get.json.url == 'https://{{ httpbin_host }}/anything'
+ - http_302_get.redirected == true
+ - http_302_get.status == 200
+ - http_302_get.url == 'https://{{ httpbin_host }}/anything'
+
+# NOTE: The HTTP POST turns into an HTTP GET
+- name: Test HTTP 302 using POST
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything
+ follow_redirects: all
+ return_content: yes
+ method: POST
+ body: '{ "foo": "bar" }'
+ body_format: json
+ register: http_302_post
+
+- assert:
+ that:
+ - http_302_post is successful
+ - http_302_post.json.data == ''
+ - http_302_post.json.method == 'GET'
+ - http_302_post.json.url == 'https://{{ httpbin_host }}/anything'
+ - http_302_post.redirected == true
+ - http_302_post.status == 200
+ - http_302_post.url == 'https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 303 using HEAD
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything
+ follow_redirects: all
+ return_content: yes
+ method: HEAD
+ register: http_303_head
+
+- assert:
+ that:
+ - http_303_head is successful
+ - http_303_head.json is not defined
+ - http_303_head.redirected == true
+ - http_303_head.status == 200
+ - http_303_head.url == 'https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 303 using GET
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything
+ follow_redirects: all
+ return_content: yes
+ method: GET
+ register: http_303_get
+
+- assert:
+ that:
+ - http_303_get is successful
+ - http_303_get.json.data == ''
+ - http_303_get.json.method == 'GET'
+ - http_303_get.json.url == 'https://{{ httpbin_host }}/anything'
+ - http_303_get.redirected == true
+ - http_303_get.status == 200
+ - http_303_get.url == 'https://{{ httpbin_host }}/anything'
+
+# NOTE: The HTTP POST turns into an HTTP GET
+- name: Test HTTP 303 using POST
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything
+ follow_redirects: all
+ return_content: yes
+ method: POST
+ body: '{ "foo": "bar" }'
+ body_format: json
+ register: http_303_post
+
+- assert:
+ that:
+ - http_303_post is successful
+ - http_303_post.json.data == ''
+ - http_303_post.json.method == 'GET'
+ - http_303_post.json.url == 'https://{{ httpbin_host }}/anything'
+ - http_303_post.redirected == true
+ - http_303_post.status == 200
+ - http_303_post.url == 'https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 307 using HEAD
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything
+ follow_redirects: all
+ return_content: yes
+ method: HEAD
+ register: http_307_head
+
+- assert:
+ that:
+ - http_307_head is successful
+ - http_307_head.json is not defined
+ - http_307_head.redirected == true
+ - http_307_head.status == 200
+ - http_307_head.url == 'https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 307 using GET
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything
+ follow_redirects: all
+ return_content: yes
+ method: GET
+ register: http_307_get
+
+- assert:
+ that:
+ - http_307_get is successful
+ - http_307_get.json.data == ''
+ - http_307_get.json.method == 'GET'
+ - http_307_get.json.url == 'https://{{ httpbin_host }}/anything'
+ - http_307_get.redirected == true
+ - http_307_get.status == 200
+ - http_307_get.url == 'https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 307 using POST
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything
+ follow_redirects: all
+ return_content: yes
+ method: POST
+ body: '{ "foo": "bar" }'
+ body_format: json
+ register: http_307_post
+
+- assert:
+ that:
+ - http_307_post is successful
+ - http_307_post.json.json.foo == 'bar'
+ - http_307_post.json.method == 'POST'
+ - http_307_post.json.url == 'https://{{ httpbin_host }}/anything'
+ - http_307_post.redirected == true
+ - http_307_post.status == 200
+ - http_307_post.url == 'https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 308 using HEAD
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything
+ follow_redirects: all
+ return_content: yes
+ method: HEAD
+ register: http_308_head
+
+- assert:
+ that:
+ - http_308_head is successful
+ - http_308_head.json is undefined
+ - http_308_head.redirected == true
+ - http_308_head.status == 200
+ - http_308_head.url == 'https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 308 using GET
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything
+ follow_redirects: all
+ return_content: yes
+ method: GET
+ register: http_308_get
+
+- assert:
+ that:
+ - http_308_get is successful
+ - http_308_get.json.data == ''
+ - http_308_get.json.method == 'GET'
+ - http_308_get.json.url == 'https://{{ httpbin_host }}/anything'
+ - http_308_get.redirected == true
+ - http_308_get.status == 200
+ - http_308_get.url == 'https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 308 using POST
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything
+ follow_redirects: all
+ return_content: yes
+ method: POST
+ body: '{ "foo": "bar" }'
+ body_format: json
+ register: http_308_post
+
+- assert:
+ that:
+ - http_308_post is successful
+ - http_308_post.json.json.foo == 'bar'
+ - http_308_post.json.method == 'POST'
+ - http_308_post.json.url == 'https://{{ httpbin_host }}/anything'
+ - http_308_post.redirected == true
+ - http_308_post.status == 200
+ - http_308_post.url == 'https://{{ httpbin_host }}/anything'
diff --git a/test/integration/targets/uri/tasks/redirect-none.yml b/test/integration/targets/uri/tasks/redirect-none.yml
new file mode 100644
index 00000000..0f5ec68b
--- /dev/null
+++ b/test/integration/targets/uri/tasks/redirect-none.yml
@@ -0,0 +1,296 @@
+- name: Test HTTP 301 using HEAD
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything
+ follow_redirects: none
+ return_content: yes
+ method: HEAD
+ ignore_errors: yes
+ register: http_301_head
+
+- assert:
+ that:
+ - http_301_head is failure
+ - http_301_head.json is not defined
+ - http_301_head.location == 'https://{{ httpbin_host }}/anything'
+ - "http_301_head.msg == 'Status code was 301 and not [200]: HTTP Error 301: MOVED PERMANENTLY'"
+ - http_301_head.redirected == false
+ - http_301_head.status == 301
+ - http_301_head.url == 'https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 301 using GET
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything
+ follow_redirects: none
+ return_content: yes
+ method: GET
+ ignore_errors: yes
+ register: http_301_get
+
+- assert:
+ that:
+ - http_301_get is failure
+ - http_301_get.json is not defined
+ - http_301_get.location == 'https://{{ httpbin_host }}/anything'
+ - "http_301_get.msg == 'Status code was 301 and not [200]: HTTP Error 301: MOVED PERMANENTLY'"
+ - http_301_get.redirected == false
+ - http_301_get.status == 301
+ - http_301_get.url == 'https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 301 using POST
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything
+ follow_redirects: none
+ return_content: yes
+ method: POST
+ body: '{ "foo": "bar" }'
+ body_format: json
+ ignore_errors: yes
+ register: http_301_post
+
+- assert:
+ that:
+ - http_301_post is failure
+ - http_301_post.json is not defined
+ - http_301_post.location == 'https://{{ httpbin_host }}/anything'
+ - "http_301_post.msg == 'Status code was 301 and not [200]: HTTP Error 301: MOVED PERMANENTLY'"
+ - http_301_post.redirected == false
+ - http_301_post.status == 301
+ - http_301_post.url == 'https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 302 using HEAD
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything
+ follow_redirects: none
+ return_content: yes
+ method: HEAD
+ ignore_errors: yes
+ register: http_302_head
+
+- assert:
+ that:
+ - http_302_head is failure
+ - http_302_head.json is not defined
+ - http_302_head.location == 'https://{{ httpbin_host }}/anything'
+ - "http_302_head.msg == 'Status code was 302 and not [200]: HTTP Error 302: FOUND'"
+ - http_302_head.redirected == false
+ - http_302_head.status == 302
+ - http_302_head.url == 'https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 302 using GET
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything
+ follow_redirects: none
+ return_content: yes
+ method: GET
+ ignore_errors: yes
+ register: http_302_get
+
+- assert:
+ that:
+ - http_302_get is failure
+ - http_302_get.json is not defined
+ - http_302_get.location == 'https://{{ httpbin_host }}/anything'
+ - "http_302_get.msg == 'Status code was 302 and not [200]: HTTP Error 302: FOUND'"
+ - http_302_get.redirected == false
+ - http_302_get.status == 302
+ - http_302_get.url == 'https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 302 using POST
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything
+ follow_redirects: none
+ return_content: yes
+ method: POST
+ body: '{ "foo": "bar" }'
+ body_format: json
+ ignore_errors: yes
+ register: http_302_post
+
+- assert:
+ that:
+ - http_302_post is failure
+ - http_302_post.json is not defined
+ - http_302_post.location == 'https://{{ httpbin_host }}/anything'
+ - "http_302_post.msg == 'Status code was 302 and not [200]: HTTP Error 302: FOUND'"
+ - http_302_post.redirected == false
+ - http_302_post.status == 302
+ - http_302_post.url == 'https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 303 using HEAD
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything
+ follow_redirects: none
+ return_content: yes
+ method: HEAD
+ ignore_errors: yes
+ register: http_303_head
+
+- assert:
+ that:
+ - http_303_head is failure
+ - http_303_head.json is not defined
+ - http_303_head.location == 'https://{{ httpbin_host }}/anything'
+ - "http_303_head.msg == 'Status code was 303 and not [200]: HTTP Error 303: SEE OTHER'"
+ - http_303_head.redirected == false
+ - http_303_head.status == 303
+ - http_303_head.url == 'https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 303 using GET
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything
+ follow_redirects: none
+ return_content: yes
+ method: GET
+ ignore_errors: yes
+ register: http_303_get
+
+- assert:
+ that:
+ - http_303_get is failure
+ - http_303_get.json is not defined
+ - http_303_get.location == 'https://{{ httpbin_host }}/anything'
+ - "http_303_get.msg == 'Status code was 303 and not [200]: HTTP Error 303: SEE OTHER'"
+ - http_303_get.redirected == false
+ - http_303_get.status == 303
+ - http_303_get.url == 'https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 303 using POST
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything
+ follow_redirects: none
+ return_content: yes
+ method: POST
+ body: '{ "foo": "bar" }'
+ body_format: json
+ ignore_errors: yes
+ register: http_303_post
+
+- assert:
+ that:
+ - http_303_post is failure
+ - http_303_post.json is not defined
+ - http_303_post.location == 'https://{{ httpbin_host }}/anything'
+ - "http_303_post.msg == 'Status code was 303 and not [200]: HTTP Error 303: SEE OTHER'"
+ - http_303_post.redirected == false
+ - http_303_post.status == 303
+ - http_303_post.url == 'https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 307 using HEAD
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything
+ follow_redirects: none
+ return_content: yes
+ method: HEAD
+ ignore_errors: yes
+ register: http_307_head
+
+- assert:
+ that:
+ - http_307_head is failure
+ - http_307_head.json is not defined
+ - http_307_head.location == 'https://{{ httpbin_host }}/anything'
+ - "http_307_head.msg == 'Status code was 307 and not [200]: HTTP Error 307: TEMPORARY REDIRECT'"
+ - http_307_head.redirected == false
+ - http_307_head.status == 307
+ - http_307_head.url == 'https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 307 using GET
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything
+ follow_redirects: none
+ return_content: yes
+ method: GET
+ ignore_errors: yes
+ register: http_307_get
+
+- assert:
+ that:
+ - http_307_get is failure
+ - http_307_get.json is not defined
+ - http_307_get.location == 'https://{{ httpbin_host }}/anything'
+ - "http_307_get.msg == 'Status code was 307 and not [200]: HTTP Error 307: TEMPORARY REDIRECT'"
+ - http_307_get.redirected == false
+ - http_307_get.status == 307
+ - http_307_get.url == 'https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 307 using POST
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything
+ follow_redirects: none
+ return_content: yes
+ method: POST
+ body: '{ "foo": "bar" }'
+ body_format: json
+ ignore_errors: yes
+ register: http_307_post
+
+- assert:
+ that:
+ - http_307_post is failure
+ - http_307_post.json is not defined
+ - http_307_post.location == 'https://{{ httpbin_host }}/anything'
+ - "http_307_post.msg == 'Status code was 307 and not [200]: HTTP Error 307: TEMPORARY REDIRECT'"
+ - http_307_post.redirected == false
+ - http_307_post.status == 307
+ - http_307_post.url == 'https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything'
+
+# NOTE: This is a bug, fixed in https://github.com/ansible/ansible/pull/36809
+- name: Test HTTP 308 using HEAD
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything
+ follow_redirects: none
+ return_content: yes
+ method: GET
+ ignore_errors: yes
+ register: http_308_head
+
+- assert:
+ that:
+ - http_308_head is failure
+ - http_308_head.json is not defined
+ - http_308_head.location == 'https://{{ httpbin_host }}/anything'
+ - "http_308_head.msg == 'Status code was 308 and not [200]: HTTP Error 308: UNKNOWN'"
+ - http_308_head.redirected == false
+ - http_308_head.status == 308
+ - http_308_head.url == 'https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything'
+
+# NOTE: This is a bug, fixed in https://github.com/ansible/ansible/pull/36809
+- name: Test HTTP 308 using GET
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything
+ follow_redirects: none
+ return_content: yes
+ method: GET
+ ignore_errors: yes
+ register: http_308_get
+
+- assert:
+ that:
+ - http_308_get is failure
+ - http_308_get.json is not defined
+ - http_308_get.location == 'https://{{ httpbin_host }}/anything'
+ - "http_308_get.msg == 'Status code was 308 and not [200]: HTTP Error 308: UNKNOWN'"
+ - http_308_get.redirected == false
+ - http_308_get.status == 308
+ - http_308_get.url == 'https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 308 using POST
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything
+ follow_redirects: none
+ return_content: yes
+ method: POST
+ body: '{ "foo": "bar" }'
+ body_format: json
+ ignore_errors: yes
+ register: http_308_post
+
+- assert:
+ that:
+ - http_308_post is failure
+ - http_308_post.json is not defined
+ - http_308_post.location == 'https://{{ httpbin_host }}/anything'
+ - "http_308_post.msg == 'Status code was 308 and not [200]: HTTP Error 308: UNKNOWN'"
+ - http_308_post.redirected == false
+ - http_308_post.status == 308
+ - http_308_post.url == 'https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything'
diff --git a/test/integration/targets/uri/tasks/redirect-safe.yml b/test/integration/targets/uri/tasks/redirect-safe.yml
new file mode 100644
index 00000000..c95dd5aa
--- /dev/null
+++ b/test/integration/targets/uri/tasks/redirect-safe.yml
@@ -0,0 +1,274 @@
+- name: Test HTTP 301 using HEAD
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything
+ follow_redirects: safe
+ return_content: yes
+ method: HEAD
+ register: http_301_head
+
+- assert:
+ that:
+ - http_301_head is successful
+ - http_301_head.json is not defined
+ - http_301_head.redirected == true
+ - http_301_head.status == 200
+ - http_301_head.url == 'https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 301 using GET
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything
+ follow_redirects: safe
+ return_content: yes
+ method: GET
+ register: http_301_get
+
+- assert:
+ that:
+ - http_301_get is successful
+ - http_301_get.json.data == ''
+ - http_301_get.json.method == 'GET'
+ - http_301_get.json.url == 'https://{{ httpbin_host }}/anything'
+ - http_301_get.redirected == true
+ - http_301_get.status == 200
+ - http_301_get.url == 'https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 301 using POST
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything
+ follow_redirects: safe
+ return_content: yes
+ method: POST
+ body: '{ "foo": "bar" }'
+ body_format: json
+ ignore_errors: yes
+ register: http_301_post
+
+- assert:
+ that:
+ - http_301_post is failure
+ - http_301_post.json is not defined
+ - http_301_post.location == 'https://{{ httpbin_host }}/anything'
+ - "http_301_post.msg == 'Status code was 301 and not [200]: HTTP Error 301: MOVED PERMANENTLY'"
+ - http_301_post.redirected == false
+ - http_301_post.status == 301
+ - http_301_post.url == 'https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 302 using HEAD
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything
+ follow_redirects: safe
+ return_content: yes
+ method: HEAD
+ register: http_302_head
+
+- assert:
+ that:
+ - http_302_head is successful
+ - http_302_head.json is not defined
+ - http_302_head.redirected == true
+ - http_302_head.status == 200
+ - http_302_head.url == 'https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 302 using GET
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything
+ follow_redirects: safe
+ return_content: yes
+ method: GET
+ register: http_302_get
+
+- assert:
+ that:
+ - http_302_get is successful
+ - http_302_get.json.data == ''
+ - http_302_get.json.method == 'GET'
+ - http_302_get.json.url == 'https://{{ httpbin_host }}/anything'
+ - http_302_get.redirected == true
+ - http_302_get.status == 200
+ - http_302_get.url == 'https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 302 using POST
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything
+ follow_redirects: safe
+ return_content: yes
+ method: POST
+ body: '{ "foo": "bar" }'
+ body_format: json
+ ignore_errors: yes
+ register: http_302_post
+
+- assert:
+ that:
+ - http_302_post is failure
+ - http_302_post.json is not defined
+ - http_302_post.location == 'https://{{ httpbin_host }}/anything'
+ - "http_302_post.msg == 'Status code was 302 and not [200]: HTTP Error 302: FOUND'"
+ - http_302_post.redirected == false
+ - http_302_post.status == 302
+ - http_302_post.url == 'https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 303 using HEAD
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything
+ follow_redirects: safe
+ return_content: yes
+ method: HEAD
+ register: http_303_head
+
+- assert:
+ that:
+ - http_303_head is successful
+ - http_303_head.json is not defined
+ - http_303_head.redirected == true
+ - http_303_head.status == 200
+ - http_303_head.url == 'https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 303 using GET
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything
+ follow_redirects: safe
+ return_content: yes
+ method: GET
+ register: http_303_get
+
+- assert:
+ that:
+ - http_303_get is successful
+ - http_303_get.json.data == ''
+ - http_303_get.json.method == 'GET'
+ - http_303_get.json.url == 'https://{{ httpbin_host }}/anything'
+ - http_303_get.redirected == true
+ - http_303_get.status == 200
+ - http_303_get.url == 'https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 303 using POST
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything
+ follow_redirects: safe
+ return_content: yes
+ method: POST
+ body: '{ "foo": "bar" }'
+ body_format: json
+ ignore_errors: yes
+ register: http_303_post
+
+- assert:
+ that:
+ - http_303_post is failure
+ - http_303_post.json is not defined
+ - http_303_post.location == 'https://{{ httpbin_host }}/anything'
+ - "http_303_post.msg == 'Status code was 303 and not [200]: HTTP Error 303: SEE OTHER'"
+ - http_303_post.redirected == false
+ - http_303_post.status == 303
+ - http_303_post.url == 'https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 307 using HEAD
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything
+ follow_redirects: safe
+ return_content: yes
+ method: HEAD
+ register: http_307_head
+
+- assert:
+ that:
+ - http_307_head is successful
+ - http_307_head.json is not defined
+ - http_307_head.redirected == true
+ - http_307_head.status == 200
+ - http_307_head.url == 'https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 307 using GET
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything
+ follow_redirects: safe
+ return_content: yes
+ method: GET
+ register: http_307_get
+
+- assert:
+ that:
+ - http_307_get is successful
+ - http_307_get.json.data == ''
+ - http_307_get.json.method == 'GET'
+ - http_307_get.json.url == 'https://{{ httpbin_host }}/anything'
+ - http_307_get.redirected == true
+ - http_307_get.status == 200
+ - http_307_get.url == 'https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 307 using POST
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything
+ follow_redirects: safe
+ return_content: yes
+ method: POST
+ body: '{ "foo": "bar" }'
+ body_format: json
+ ignore_errors: yes
+ register: http_307_post
+
+- assert:
+ that:
+ - http_307_post is failure
+ - http_307_post.json is not defined
+ - http_307_post.location == 'https://{{ httpbin_host }}/anything'
+ - "http_307_post.msg == 'Status code was 307 and not [200]: HTTP Error 307: TEMPORARY REDIRECT'"
+ - http_307_post.redirected == false
+ - http_307_post.status == 307
+ - http_307_post.url == 'https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 308 using HEAD
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything
+ follow_redirects: safe
+ return_content: yes
+ method: HEAD
+ register: http_308_head
+
+- assert:
+ that:
+ - http_308_head is successful
+ - http_308_head.json is not defined
+ - http_308_head.redirected == true
+ - http_308_head.status == 200
+ - http_308_head.url == 'https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 308 using GET
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything
+ follow_redirects: safe
+ return_content: yes
+ method: GET
+ register: http_308_get
+
+- assert:
+ that:
+ - http_308_get is successful
+ - http_308_get.json.data == ''
+ - http_308_get.json.method == 'GET'
+ - http_308_get.json.url == 'https://{{ httpbin_host }}/anything'
+ - http_308_get.redirected == true
+ - http_308_get.status == 200
+ - http_308_get.url == 'https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 308 using POST
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything
+ follow_redirects: safe
+ return_content: yes
+ method: POST
+ body: '{ "foo": "bar" }'
+ body_format: json
+ ignore_errors: yes
+ register: http_308_post
+
+- assert:
+ that:
+ - http_308_post is failure
+ - http_308_post.json is not defined
+ - http_308_post.location == 'https://{{ httpbin_host }}/anything'
+ - "http_308_post.msg == 'Status code was 308 and not [200]: HTTP Error 308: UNKNOWN'"
+ - http_308_post.redirected == false
+ - http_308_post.status == 308
+ - http_308_post.url == 'https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything'
diff --git a/test/integration/targets/uri/tasks/redirect-urllib2.yml b/test/integration/targets/uri/tasks/redirect-urllib2.yml
new file mode 100644
index 00000000..10b115ee
--- /dev/null
+++ b/test/integration/targets/uri/tasks/redirect-urllib2.yml
@@ -0,0 +1,294 @@
+# NOTE: The HTTP HEAD turns into an HTTP GET
+- name: Test HTTP 301 using HEAD
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything
+ follow_redirects: urllib2
+ return_content: yes
+ method: HEAD
+ register: http_301_head
+
+- assert:
+ that:
+ - http_301_head is successful
+ - http_301_head.json.data == ''
+ - http_301_head.json.method == 'GET'
+ - http_301_head.json.url == 'https://{{ httpbin_host }}/anything'
+ - http_301_head.redirected == true
+ - http_301_head.status == 200
+ - http_301_head.url == 'https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 301 using GET
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything
+ follow_redirects: urllib2
+ return_content: yes
+ method: GET
+ register: http_301_get
+
+- assert:
+ that:
+ - http_301_get is successful
+ - http_301_get.json.data == ''
+ - http_301_get.json.method == 'GET'
+ - http_301_get.json.url == 'https://{{ httpbin_host }}/anything'
+ - http_301_get.redirected == true
+ - http_301_get.status == 200
+ - http_301_get.url == 'https://{{ httpbin_host }}/anything'
+
+# NOTE: The HTTP POST turns into an HTTP GET
+- name: Test HTTP 301 using POST
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=301&url=https://{{ httpbin_host }}/anything
+ follow_redirects: urllib2
+ return_content: yes
+ method: POST
+ body: '{ "foo": "bar" }'
+ body_format: json
+ register: http_301_post
+
+- assert:
+ that:
+ - http_301_post is successful
+ - http_301_post.json.data == ''
+ - http_301_post.json.method == 'GET'
+ - http_301_post.json.url == 'https://{{ httpbin_host }}/anything'
+ - http_301_post.redirected == true
+ - http_301_post.status == 200
+ - http_301_post.url == 'https://{{ httpbin_host }}/anything'
+
+# NOTE: The HTTP HEAD turns into an HTTP GET
+- name: Test HTTP 302 using HEAD
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything
+ follow_redirects: urllib2
+ return_content: yes
+ method: HEAD
+ register: http_302_head
+
+- assert:
+ that:
+ - http_302_head is successful
+ - http_302_head.json.data == ''
+ - http_302_head.json.method == 'GET'
+ - http_302_head.json.url == 'https://{{ httpbin_host }}/anything'
+ - http_302_head.redirected == true
+ - http_302_head.status == 200
+ - http_302_head.url == 'https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 302 using GET
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything
+ follow_redirects: urllib2
+ return_content: yes
+ method: GET
+ register: http_302_get
+
+- assert:
+ that:
+ - http_302_get is successful
+ - http_302_get.json.data == ''
+ - http_302_get.json.method == 'GET'
+ - http_302_get.json.url == 'https://{{ httpbin_host }}/anything'
+ - http_302_get.redirected == true
+ - http_302_get.status == 200
+ - http_302_get.url == 'https://{{ httpbin_host }}/anything'
+
+# NOTE: The HTTP POST turns into an HTTP GET
+- name: Test HTTP 302 using POST
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=302&url=https://{{ httpbin_host }}/anything
+ follow_redirects: urllib2
+ return_content: yes
+ method: POST
+ body: '{ "foo": "bar" }'
+ body_format: json
+ register: http_302_post
+
+- assert:
+ that:
+ - http_302_post is successful
+ - http_302_post.json.data == ''
+ - http_302_post.json.method == 'GET'
+ - http_302_post.json.url == 'https://{{ httpbin_host }}/anything'
+ - http_302_post.redirected == true
+ - http_302_post.status == 200
+ - http_302_post.url == 'https://{{ httpbin_host }}/anything'
+
+# NOTE: The HTTP HEAD turns into an HTTP GET
+- name: Test HTTP 303 using HEAD
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything
+ follow_redirects: urllib2
+ return_content: yes
+ method: HEAD
+ register: http_303_head
+
+- assert:
+ that:
+ - http_303_head is successful
+ - http_303_head.json.data == ''
+ - http_303_head.json.method == 'GET'
+ - http_303_head.json.url == 'https://{{ httpbin_host }}/anything'
+ - http_303_head.redirected == true
+ - http_303_head.status == 200
+ - http_303_head.url == 'https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 303 using GET
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything
+ follow_redirects: urllib2
+ return_content: yes
+ method: GET
+ register: http_303_get
+
+- assert:
+ that:
+ - http_303_get is successful
+ - http_303_get.json.data == ''
+ - http_303_get.json.method == 'GET'
+ - http_303_get.json.url == 'https://{{ httpbin_host }}/anything'
+ - http_303_get.redirected == true
+ - http_303_get.status == 200
+ - http_303_get.url == 'https://{{ httpbin_host }}/anything'
+
+# NOTE: The HTTP POST turns into an HTTP GET
+- name: Test HTTP 303 using POST
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=303&url=https://{{ httpbin_host }}/anything
+ follow_redirects: urllib2
+ return_content: yes
+ method: POST
+ body: '{ "foo": "bar" }'
+ body_format: json
+ register: http_303_post
+
+- assert:
+ that:
+ - http_303_post is successful
+ - http_303_post.json.data == ''
+ - http_303_post.json.method == 'GET'
+ - http_303_post.json.url == 'https://{{ httpbin_host }}/anything'
+ - http_303_post.redirected == true
+ - http_303_post.status == 200
+ - http_303_post.url == 'https://{{ httpbin_host }}/anything'
+
+# NOTE: The HTTP HEAD turns into an HTTP GET
+- name: Test HTTP 307 using HEAD
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything
+ follow_redirects: urllib2
+ return_content: yes
+ method: HEAD
+ register: http_307_head
+
+- assert:
+ that:
+ - http_307_head is successful
+ - http_307_head.json.data == ''
+ - http_307_head.json.method == 'GET'
+ - http_307_head.json.url == 'https://{{ httpbin_host }}/anything'
+ - http_307_head.redirected == true
+ - http_307_head.status == 200
+ - http_307_head.url == 'https://{{ httpbin_host }}/anything'
+
+- name: Test HTTP 307 using GET
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything
+ follow_redirects: urllib2
+ return_content: yes
+ method: GET
+ register: http_307_get
+
+- assert:
+ that:
+ - http_307_get is successful
+ - http_307_get.json.data == ''
+ - http_307_get.json.method == 'GET'
+ - http_307_get.json.url == 'https://{{ httpbin_host }}/anything'
+ - http_307_get.redirected == true
+ - http_307_get.status == 200
+ - http_307_get.url == 'https://{{ httpbin_host }}/anything'
+
+# FIXME: This is fixed in https://github.com/ansible/ansible/pull/36809
+- name: Test HTTP 307 using POST
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything
+ follow_redirects: urllib2
+ return_content: yes
+ method: POST
+ body: '{ "foo": "bar" }'
+ body_format: json
+ ignore_errors: yes
+ register: http_307_post
+
+- assert:
+ that:
+ - http_307_post is failure
+ - http_307_post.json is not defined
+ - http_307_post.location == 'https://{{ httpbin_host }}/anything'
+ - "http_307_post.msg == 'Status code was 307 and not [200]: HTTP Error 307: TEMPORARY REDIRECT'"
+ - http_307_post.redirected == false
+ - http_307_post.status == 307
+ - http_307_post.url == 'https://{{ httpbin_host }}/redirect-to?status_code=307&url=https://{{ httpbin_host }}/anything'
+
+# FIXME: This is fixed in https://github.com/ansible/ansible/pull/36809
+- name: Test HTTP 308 using HEAD
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything
+ follow_redirects: urllib2
+ return_content: yes
+ method: GET
+ ignore_errors: yes
+ register: http_308_head
+
+- assert:
+ that:
+ - http_308_head is failure
+ - http_308_head.json is not defined
+ - http_308_head.location == 'https://{{ httpbin_host }}/anything'
+ - "http_308_head.msg == 'Status code was 308 and not [200]: HTTP Error 308: UNKNOWN'"
+ - http_308_head.redirected == false
+ - http_308_head.status == 308
+ - http_308_head.url == 'https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything'
+
+# FIXME: This is fixed in https://github.com/ansible/ansible/pull/36809
+- name: Test HTTP 308 using GET
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything
+ follow_redirects: urllib2
+ return_content: yes
+ method: GET
+ ignore_errors: yes
+ register: http_308_get
+
+- assert:
+ that:
+ - http_308_get is failure
+ - http_308_get.json is not defined
+ - http_308_get.location == 'https://{{ httpbin_host }}/anything'
+ - "http_308_get.msg == 'Status code was 308 and not [200]: HTTP Error 308: UNKNOWN'"
+ - http_308_get.redirected == false
+ - http_308_get.status == 308
+ - http_308_get.url == 'https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything'
+
+# FIXME: This is fixed in https://github.com/ansible/ansible/pull/36809
+- name: Test HTTP 308 using POST
+ uri:
+ url: https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything
+ follow_redirects: urllib2
+ return_content: yes
+ method: POST
+ body: '{ "foo": "bar" }'
+ body_format: json
+ ignore_errors: yes
+ register: http_308_post
+
+- assert:
+ that:
+ - http_308_post is failure
+ - http_308_post.json is not defined
+ - http_308_post.location == 'https://{{ httpbin_host }}/anything'
+ - "http_308_post.msg == 'Status code was 308 and not [200]: HTTP Error 308: UNKNOWN'"
+ - http_308_post.redirected == false
+ - http_308_post.status == 308
+ - http_308_post.url == 'https://{{ httpbin_host }}/redirect-to?status_code=308&url=https://{{ httpbin_host }}/anything'
diff --git a/test/integration/targets/uri/tasks/return-content.yml b/test/integration/targets/uri/tasks/return-content.yml
new file mode 100644
index 00000000..5a9b97e6
--- /dev/null
+++ b/test/integration/targets/uri/tasks/return-content.yml
@@ -0,0 +1,49 @@
+- name: Test when return_content is yes
+ uri:
+ url: https://{{ httpbin_host }}/get
+ return_content: yes
+ register: result
+
+- name: Assert content exists when return_content is yes and request succeeds
+ assert:
+ that:
+ - result is successful
+ - "'content' in result"
+
+- name: Test when return_content is yes
+ uri:
+ url: http://does/not/exist
+ return_content: yes
+ register: result
+ ignore_errors: true
+
+- name: Assert content exists when return_content is yes and request fails
+ assert:
+ that:
+ - result is failed
+ - "'content' in result"
+
+- name: Test when return_content is no
+ uri:
+ url: https://{{ httpbin_host }}/get
+ return_content: no
+ register: result
+
+- name: Assert content does not exist when return_content is no and request succeeds
+ assert:
+ that:
+ - result is successful
+ - "'content' not in result"
+
+- name: Test when return_content is no
+ uri:
+ url: http://does/not/exist
+ return_content: no
+ register: result
+ ignore_errors: true
+
+- name: Assert content does not exist when return_content is no and request fails
+ assert:
+ that:
+ - result is failed
+ - "'content' not in result" \ No newline at end of file
diff --git a/test/integration/targets/uri/tasks/unexpected-failures.yml b/test/integration/targets/uri/tasks/unexpected-failures.yml
new file mode 100644
index 00000000..ac38871c
--- /dev/null
+++ b/test/integration/targets/uri/tasks/unexpected-failures.yml
@@ -0,0 +1,27 @@
+---
+# same as expanduser & expandvars called on managed host
+- command: 'echo {{ output_dir }}'
+ register: echo
+
+- set_fact:
+ remote_dir_expanded: '{{ echo.stdout }}'
+
+- name: ensure test directory doesn't exist
+ file:
+ path: '{{ output_dir }}/non/existent/path'
+ state: absent
+
+- name: destination doesn't exist
+ uri:
+ url: 'https://{{ httpbin_host }}/get'
+ dest: '{{ output_dir }}/non/existent/path'
+ ignore_errors: true
+ register: ret
+
+- name: check that unexpected failure didn't happen
+ assert:
+ that:
+ - ret is failed
+ - "not ret.msg.startswith('MODULE FAILURE')"
+ - '"Destination dir ''" ~ remote_dir_expanded ~ "/non/existent'' not writable" in ret.msg'
+ - ret.status == 200
diff --git a/test/integration/targets/uri/templates/netrc.j2 b/test/integration/targets/uri/templates/netrc.j2
new file mode 100644
index 00000000..3a100d51
--- /dev/null
+++ b/test/integration/targets/uri/templates/netrc.j2
@@ -0,0 +1,3 @@
+machine {{ httpbin_host }}
+login user
+password passwd
diff --git a/test/integration/targets/uri/vars/main.yml b/test/integration/targets/uri/vars/main.yml
new file mode 100644
index 00000000..83a740bc
--- /dev/null
+++ b/test/integration/targets/uri/vars/main.yml
@@ -0,0 +1,20 @@
+uri_os_packages:
+ RedHat:
+ urllib3: python-urllib3
+ step1:
+ - python-pyasn1
+ - pyOpenSSL
+ - python-urllib3
+ step2:
+ - libffi-devel
+ - openssl-devel
+ - python-devel
+ Debian:
+ step1:
+ - python-pyasn1
+ - python-openssl
+ - python-urllib3
+ step2:
+ - libffi-dev
+ - libssl-dev
+ - python-dev
diff --git a/test/integration/targets/user/aliases b/test/integration/targets/user/aliases
new file mode 100644
index 00000000..3a07aab3
--- /dev/null
+++ b/test/integration/targets/user/aliases
@@ -0,0 +1,3 @@
+destructive
+shippable/posix/group1
+skip/aix
diff --git a/test/integration/targets/user/files/userlist.sh b/test/integration/targets/user/files/userlist.sh
new file mode 100644
index 00000000..96a83b20
--- /dev/null
+++ b/test/integration/targets/user/files/userlist.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+
+#- name: make a list of groups
+# shell: |
+# cat /etc/group | cut -d: -f1
+# register: group_names
+# when: 'ansible_distribution != "MacOSX"'
+
+#- name: make a list of groups [mac]
+# shell: dscl localhost -list /Local/Default/Groups
+# register: group_names
+# when: 'ansible_distribution == "MacOSX"'
+
+DISTRO="$*"
+
+if [[ "$DISTRO" == "MacOSX" ]]; then
+ dscl localhost -list /Local/Default/Users
+else
+ grep -E -v ^\# /etc/passwd | cut -d: -f1
+fi
diff --git a/test/integration/targets/user/meta/main.yml b/test/integration/targets/user/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/test/integration/targets/user/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/test/integration/targets/user/tasks/expires_local.yml b/test/integration/targets/user/tasks/expires_local.yml
new file mode 100644
index 00000000..e6620353
--- /dev/null
+++ b/test/integration/targets/user/tasks/expires_local.yml
@@ -0,0 +1,333 @@
+---
+## local user expires
+# Date is March 3, 2050
+
+- name: Remove local_ansibulluser
+ user:
+ name: local_ansibulluser
+ state: absent
+ remove: yes
+ local: yes
+ tags:
+ - user_test_local_mode
+
+- name: Set user expiration
+ user:
+ name: local_ansibulluser
+ state: present
+ local: yes
+ expires: 2529881062
+ register: user_test_local_expires1
+ tags:
+ - timezone
+ - user_test_local_mode
+
+- name: Set user expiration again to ensure no change is made
+ user:
+ name: local_ansibulluser
+ state: present
+ local: yes
+ expires: 2529881062
+ register: user_test_local_expires2
+ tags:
+ - timezone
+ - user_test_local_mode
+
+- name: Ensure that account with expiration was created and did not change on subsequent run
+ assert:
+ that:
+ - user_test_local_expires1 is changed
+ - user_test_local_expires2 is not changed
+ tags:
+ - user_test_local_mode
+
+- name: Verify expiration date for Linux
+ block:
+ - name: LINUX | Get expiration date for local_ansibulluser
+ getent:
+ database: shadow
+ key: local_ansibulluser
+ tags:
+ - user_test_local_mode
+
+ - name: LINUX | Ensure proper expiration date was set
+ assert:
+ that:
+ - getent_shadow['local_ansibulluser'][6] == '29281'
+ tags:
+ - user_test_local_mode
+ when: ansible_facts.os_family in ['RedHat', 'Debian', 'Suse']
+
+- name: Change timezone
+ timezone:
+ name: America/Denver
+ register: original_timezone
+ tags:
+ - timezone
+ - user_test_local_mode
+
+- name: Change system timezone to make sure expiration comparison works properly
+ block:
+ - name: Create user with expiration again to ensure no change is made in a new timezone
+ user:
+ name: local_ansibulluser
+ state: present
+ local: yes
+ expires: 2529881062
+ register: user_test_local_different_tz
+ tags:
+ - timezone
+ - user_test_local_mode
+
+ - name: Ensure that no change was reported
+ assert:
+ that:
+ - user_test_local_different_tz is not changed
+ tags:
+ - timezone
+ - user_test_local_mode
+
+ always:
+ - name: Restore original timezone - {{ original_timezone.diff.before.name }}
+ timezone:
+ name: "{{ original_timezone.diff.before.name }}"
+ when: original_timezone.diff.before.name != "n/a"
+ tags:
+ - timezone
+ - user_test_local_mode
+
+ - name: Restore original timezone when n/a
+ file:
+ path: /etc/sysconfig/clock
+ state: absent
+ when:
+ - original_timezone.diff.before.name == "n/a"
+ - "'/etc/sysconfig/clock' in original_timezone.msg"
+ tags:
+ - timezone
+ - user_test_local_mode
+
+
+- name: Unexpire user
+ user:
+ name: local_ansibulluser
+ state: present
+ local: yes
+ expires: -1
+ register: user_test_local_expires3
+ tags:
+ - user_test_local_mode
+
+- name: Verify un expiration date for Linux
+ block:
+ - name: LINUX | Get expiration date for local_ansibulluser
+ getent:
+ database: shadow
+ key: local_ansibulluser
+ tags:
+ - user_test_local_mode
+
+ - name: LINUX | Ensure proper expiration date was set
+ assert:
+ msg: "expiry is supposed to be empty or -1, not {{ getent_shadow['local_ansibulluser'][6] }}"
+ that:
+ - not getent_shadow['local_ansibulluser'][6] or getent_shadow['local_ansibulluser'][6] | int < 0
+ tags:
+ - user_test_local_mode
+ when: ansible_facts.os_family in ['RedHat', 'Debian', 'Suse']
+
+- name: Verify un expiration date for Linux/BSD
+ block:
+ - name: Unexpire user again to check for change
+ user:
+ name: local_ansibulluser
+ state: present
+ local: yes
+ expires: -1
+ register: user_test_local_expires4
+ tags:
+ - user_test_local_mode
+
+ - name: Ensure first expiration reported a change and second did not
+ assert:
+ msg: The second run of the expiration removal task reported a change when it should not
+ that:
+ - user_test_local_expires3 is changed
+ - user_test_local_expires4 is not changed
+ tags:
+ - user_test_local_mode
+ when: ansible_facts.os_family in ['RedHat', 'Debian', 'Suse', 'FreeBSD']
+
+# Test setting no expiration when creating a new account
+# https://github.com/ansible/ansible/issues/44155
+- name: Remove local_ansibulluser
+ user:
+ name: local_ansibulluser
+ state: absent
+ remove: yes
+ local: yes
+ tags:
+ - user_test_local_mode
+
+- name: Create user account without expiration
+ user:
+ name: local_ansibulluser
+ state: present
+ local: yes
+ expires: -1
+ register: user_test_local_create_no_expires_1
+ tags:
+ - user_test_local_mode
+
+- name: Create user account without expiration again
+ user:
+ name: local_ansibulluser
+ state: present
+ local: yes
+ expires: -1
+ register: user_test_local_create_no_expires_2
+ tags:
+ - user_test_local_mode
+
+- name: Ensure changes were made appropriately
+ assert:
+ msg: Setting 'expires='-1 resulted in incorrect changes
+ that:
+ - user_test_local_create_no_expires_1 is changed
+ - user_test_local_create_no_expires_2 is not changed
+ tags:
+ - user_test_local_mode
+
+- name: Verify un expiration date for Linux
+ block:
+ - name: LINUX | Get expiration date for local_ansibulluser
+ getent:
+ database: shadow
+ key: local_ansibulluser
+ tags:
+ - user_test_local_mode
+
+ - name: LINUX | Ensure proper expiration date was set
+ assert:
+ msg: "expiry is supposed to be empty or -1, not {{ getent_shadow['local_ansibulluser'][6] }}"
+ that:
+ - not getent_shadow['local_ansibulluser'][6] or getent_shadow['local_ansibulluser'][6] | int < 0
+ tags:
+ - user_test_local_mode
+ when: ansible_facts.os_family in ['RedHat', 'Debian', 'Suse']
+
+# Test setting epoch 0 expiration when creating a new account, then removing the expiry
+# https://github.com/ansible/ansible/issues/47114
+- name: Remove local_ansibulluser
+ user:
+ name: local_ansibulluser
+ state: absent
+ remove: yes
+ local: yes
+ tags:
+ - user_test_local_mode
+
+- name: Create user account with epoch 0 expiration
+ user:
+ name: local_ansibulluser
+ state: present
+ local: yes
+ expires: 0
+ register: user_test_local_expires_create0_1
+ tags:
+ - user_test_local_mode
+
+- name: Create user account with epoch 0 expiration again
+ user:
+ name: local_ansibulluser
+ state: present
+ local: yes
+ expires: 0
+ register: user_test_local_expires_create0_2
+ tags:
+ - user_test_local_mode
+
+- name: Change the user account to remove the expiry time
+ user:
+ name: local_ansibulluser
+ expires: -1
+ local: yes
+ register: user_test_local_remove_expires_1
+ tags:
+ - user_test_local_mode
+
+- name: Change the user account to remove the expiry time again
+ user:
+ name: local_ansibulluser
+ expires: -1
+ local: yes
+ register: user_test_local_remove_expires_2
+ tags:
+ - user_test_local_mode
+
+
+- name: Verify un expiration date for Linux
+ block:
+ - name: LINUX | Ensure changes were made appropriately
+ assert:
+ msg: Creating an account with 'expries=0' then removing that expriation with 'expires=-1' resulted in incorrect changes
+ that:
+ - user_test_local_expires_create0_1 is changed
+ - user_test_local_expires_create0_2 is not changed
+ - user_test_local_remove_expires_1 is changed
+ - user_test_local_remove_expires_2 is not changed
+ tags:
+ - user_test_local_mode
+
+ - name: LINUX | Get expiration date for local_ansibulluser
+ getent:
+ database: shadow
+ key: local_ansibulluser
+ tags:
+ - user_test_local_mode
+
+ - name: LINUX | Ensure proper expiration date was set
+ assert:
+ msg: "expiry is supposed to be empty or -1, not {{ getent_shadow['local_ansibulluser'][6] }}"
+ that:
+ - not getent_shadow['local_ansibulluser'][6] or getent_shadow['local_ansibulluser'][6] | int < 0
+ tags:
+ - user_test_local_mode
+ when: ansible_facts.os_family in ['RedHat', 'Debian', 'Suse']
+
+# Test expiration with a very large negative number. This should have the same
+# result as setting -1.
+- name: Set expiration date using very long negative number
+ user:
+ name: local_ansibulluser
+ state: present
+ local: yes
+ expires: -2529881062
+ register: user_test_local_expires5
+ tags:
+ - user_test_local_mode
+
+- name: Ensure no change was made
+ assert:
+ that:
+ - user_test_local_expires5 is not changed
+ tags:
+ - user_test_local_mode
+
+- name: Verify un expiration date for Linux
+ block:
+ - name: LINUX | Get expiration date for local_ansibulluser
+ getent:
+ database: shadow
+ key: local_ansibulluser
+ tags:
+ - user_test_local_mode
+
+ - name: LINUX | Ensure proper expiration date was set
+ assert:
+ msg: "expiry is supposed to be empty or -1, not {{ getent_shadow['local_ansibulluser'][6] }}"
+ that:
+ - not getent_shadow['local_ansibulluser'][6] or getent_shadow['local_ansibulluser'][6] | int < 0
+ tags:
+ - user_test_local_mode
+ when: ansible_facts.os_family in ['RedHat', 'Debian', 'Suse']
diff --git a/test/integration/targets/user/tasks/main.yml b/test/integration/targets/user/tasks/main.yml
new file mode 100644
index 00000000..19b12742
--- /dev/null
+++ b/test/integration/targets/user/tasks/main.yml
@@ -0,0 +1,1136 @@
+# Test code for the user module.
+# (c) 2017, James Tanner <tanner.jc@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+## user add
+
+- name: remove the test user
+ user:
+ name: ansibulluser
+ state: absent
+
+- name: try to create a user
+ user:
+ name: ansibulluser
+ state: present
+ register: user_test0_0
+
+- name: create the user again
+ user:
+ name: ansibulluser
+ state: present
+ register: user_test0_1
+
+- debug:
+ var: user_test0
+ verbosity: 2
+
+- name: make a list of users
+ script: userlist.sh {{ ansible_facts.distribution }}
+ register: user_names
+
+- debug:
+ var: user_names
+ verbosity: 2
+
+- name: validate results for testcase 0
+ assert:
+ that:
+ - user_test0_0 is changed
+ - user_test0_1 is not changed
+ - '"ansibulluser" in user_names.stdout_lines'
+
+# create system user
+
+- name: remove user
+ user:
+ name: ansibulluser
+ state: absent
+
+- name: create system user
+ user:
+ name: ansibulluser
+ state: present
+ system: yes
+
+# test adding user with uid
+# https://github.com/ansible/ansible/issues/62969
+- name: remove the test user
+ user:
+ name: ansibulluser
+ state: absent
+
+- name: try to create a user with uid
+ user:
+ name: ansibulluser
+ state: present
+ uid: 572
+ register: user_test01_0
+
+- name: create the user again
+ user:
+ name: ansibulluser
+ state: present
+ uid: 572
+ register: user_test01_1
+
+- name: validate results for testcase 0
+ assert:
+ that:
+ - user_test01_0 is changed
+ - user_test01_1 is not changed
+
+# test user add with password
+- name: add an encrypted password for user
+ user:
+ name: ansibulluser
+ password: "$6$rounds=656000$TT4O7jz2M57npccl$33LF6FcUMSW11qrESXL1HX0BS.bsiT6aenFLLiVpsQh6hDtI9pJh5iY7x8J7ePkN4fP8hmElidHXaeD51pbGS."
+ state: present
+ update_password: always
+ register: test_user_encrypt0
+
+- name: there should not be warnings
+ assert:
+ that: "'warnings' not in test_user_encrypt0"
+
+# https://github.com/ansible/ansible/issues/65711
+- name: Test updating password only on creation
+ user:
+ name: ansibulluser
+ password: '*'
+ update_password: on_create
+ register: test_user_update_password
+
+- name: Ensure password was not changed
+ assert:
+ that:
+ - test_user_update_password is not changed
+
+- name: Verify password hash for Linux
+ when: ansible_facts.os_family in ['RedHat', 'Debian', 'Suse']
+ block:
+ - name: LINUX | Get shadow entry for ansibulluser
+ getent:
+ database: shadow
+ key: ansibulluser
+
+ - name: LINUX | Ensure password hash was not removed
+ assert:
+ that:
+ - getent_shadow['ansibulluser'][1] != '*'
+
+- block:
+ - name: add an plaintext password for user
+ user:
+ name: ansibulluser
+ password: "plaintextpassword"
+ state: present
+ update_password: always
+ register: test_user_encrypt1
+
+ - name: there should be a warning complains that the password is plaintext
+ assert:
+ that: "'warnings' in test_user_encrypt1"
+
+ - name: add an invalid hashed password
+ user:
+ name: ansibulluser
+ password: "$6$rounds=656000$tgK3gYTyRLUmhyv2$lAFrYUQwn7E6VsjPOwQwoSx30lmpiU9r/E0Al7tzKrR9mkodcMEZGe9OXD0H/clOn6qdsUnaL4zefy5fG+++++"
+ state: present
+ update_password: always
+ register: test_user_encrypt2
+
+ - name: there should be a warning complains about the character set of password
+ assert:
+ that: "'warnings' in test_user_encrypt2"
+
+ - name: change password to '!'
+ user:
+ name: ansibulluser
+ password: '!'
+ register: test_user_encrypt3
+
+ - name: change password to '*'
+ user:
+ name: ansibulluser
+ password: '*'
+ register: test_user_encrypt4
+
+ - name: change password to '*************'
+ user:
+ name: ansibulluser
+ password: '*************'
+ register: test_user_encrypt5
+
+ - name: there should be no warnings when setting the password to '!', '*' or '*************'
+ assert:
+ that:
+ - "'warnings' not in test_user_encrypt3"
+ - "'warnings' not in test_user_encrypt4"
+ - "'warnings' not in test_user_encrypt5"
+ when: ansible_facts.system != 'Darwin'
+
+
+# https://github.com/ansible/ansible/issues/42484
+# Skipping macOS for now since there is a bug when changing home directory
+- block:
+ - name: create user specifying home
+ user:
+ name: ansibulluser
+ state: present
+ home: "{{ user_home_prefix[ansible_facts.system] }}/ansibulluser"
+ register: user_test3_0
+
+ - name: create user again specifying home
+ user:
+ name: ansibulluser
+ state: present
+ home: "{{ user_home_prefix[ansible_facts.system] }}/ansibulluser"
+ register: user_test3_1
+
+ - name: change user home
+ user:
+ name: ansibulluser
+ state: present
+ home: "{{ user_home_prefix[ansible_facts.system] }}/ansibulluser-mod"
+ register: user_test3_2
+
+ - name: change user home back
+ user:
+ name: ansibulluser
+ state: present
+ home: "{{ user_home_prefix[ansible_facts.system] }}/ansibulluser"
+ register: user_test3_3
+
+ - name: validate results for testcase 3
+ assert:
+ that:
+ - user_test3_0 is not changed
+ - user_test3_1 is not changed
+ - user_test3_2 is changed
+ - user_test3_3 is changed
+ when: ansible_facts.system != 'Darwin'
+
+# https://github.com/ansible/ansible/issues/41393
+# Create a new user account with a path that has parent directories that do not exist
+- name: Create user with home path that has parents that do not exist
+ user:
+ name: ansibulluser2
+ state: present
+ home: "{{ user_home_prefix[ansible_facts.system] }}/in2deep/ansibulluser2"
+ register: create_home_with_no_parent_1
+
+- name: Create user with home path that has parents that do not exist again
+ user:
+ name: ansibulluser2
+ state: present
+ home: "{{ user_home_prefix[ansible_facts.system] }}/in2deep/ansibulluser2"
+ register: create_home_with_no_parent_2
+
+- name: Check the created home directory
+ stat:
+ path: "{{ user_home_prefix[ansible_facts.system] }}/in2deep/ansibulluser2"
+ register: home_with_no_parent_3
+
+- name: Ensure user with non-existing parent paths was created successfully
+ assert:
+ that:
+ - create_home_with_no_parent_1 is changed
+ - create_home_with_no_parent_1.home == user_home_prefix[ansible_facts.system] ~ '/in2deep/ansibulluser2'
+ - create_home_with_no_parent_2 is not changed
+ - home_with_no_parent_3.stat.uid == create_home_with_no_parent_1.uid
+ - home_with_no_parent_3.stat.gr_name == default_user_group[ansible_facts.distribution] | default('ansibulluser2')
+
+- name: Cleanup test account
+ user:
+ name: ansibulluser2
+ home: "{{ user_home_prefix[ansible_facts.system] }}/in2deep/ansibulluser2"
+ state: absent
+ remove: yes
+
+- name: Remove testing dir
+ file:
+ path: "{{ user_home_prefix[ansible_facts.system] }}/in2deep/"
+ state: absent
+
+
+# https://github.com/ansible/ansible/issues/60307
+# Make sure we can create a user when the home directory is missing
+- name: Create user with home path that does not exist
+ user:
+ name: ansibulluser3
+ state: present
+ home: "{{ user_home_prefix[ansible_facts.system] }}/nosuchdir"
+ createhome: no
+
+- name: Cleanup test account
+ user:
+ name: ansibulluser3
+ state: absent
+ remove: yes
+
+# https://github.com/ansible/ansible/issues/70589
+# Create user with create_home: no and parent directory does not exist.
+- name: "Check if parent dir for home dir for user exists (before)"
+ stat:
+ path: "{{ user_home_prefix[ansible_facts.system] }}/thereisnodir"
+ register: create_user_no_create_home_with_no_parent_parent_dir_before
+
+- name: "Create user with create_home == no and home path parent dir does not exist"
+ user:
+ name: randomuser
+ state: present
+ create_home: false
+ home: "{{ user_home_prefix[ansible_facts.system] }}/thereisnodir/randomuser"
+ register: create_user_no_create_home_with_no_parent
+
+- name: "Check if parent dir for home dir for user exists (after)"
+ stat:
+ path: "{{ user_home_prefix[ansible_facts.system] }}/thereisnodir"
+ register: create_user_no_create_home_with_no_parent_parent_dir_after
+
+- name: "Check if home for user is created"
+ stat:
+ path: "{{ user_home_prefix[ansible_facts.system] }}/thereisnodir/randomuser"
+ register: create_user_no_create_home_with_no_parent_home_dir
+
+- name: "Ensure user with non-existing parent paths with create_home: no was created successfully"
+ assert:
+ that:
+ - not create_user_no_create_home_with_no_parent_parent_dir_before.stat.exists
+ - not create_user_no_create_home_with_no_parent_parent_dir_after.stat.isdir is defined
+ - not create_user_no_create_home_with_no_parent_home_dir.stat.exists
+
+- name: Cleanup test account
+ user:
+ name: randomuser
+ state: absent
+ remove: yes
+
+## user check
+
+- name: run existing user check tests
+ user:
+ name: "{{ user_names.stdout_lines | random }}"
+ state: present
+ create_home: no
+ loop: "{{ range(1, 5+1) | list }}"
+ register: user_test1
+
+- debug:
+ var: user_test1
+ verbosity: 2
+
+- name: validate results for testcase 1
+ assert:
+ that:
+ - user_test1.results is defined
+ - user_test1.results | length == 5
+
+- name: validate changed results for testcase 1
+ assert:
+ that:
+ - "user_test1.results[0] is not changed"
+ - "user_test1.results[1] is not changed"
+ - "user_test1.results[2] is not changed"
+ - "user_test1.results[3] is not changed"
+ - "user_test1.results[4] is not changed"
+ - "user_test1.results[0]['state'] == 'present'"
+ - "user_test1.results[1]['state'] == 'present'"
+ - "user_test1.results[2]['state'] == 'present'"
+ - "user_test1.results[3]['state'] == 'present'"
+ - "user_test1.results[4]['state'] == 'present'"
+
+
+## user remove
+
+- name: try to delete the user
+ user:
+ name: ansibulluser
+ state: absent
+ force: true
+ register: user_test2
+
+- name: make a new list of users
+ script: userlist.sh {{ ansible_facts.distribution }}
+ register: user_names2
+
+- debug:
+ var: user_names2
+ verbosity: 2
+
+- name: validate results for testcase 2
+ assert:
+ that:
+ - '"ansibulluser" not in user_names2.stdout_lines'
+
+
+## create user without home and test fallback home dir create
+
+- block:
+ - name: create the user
+ user:
+ name: ansibulluser
+
+ - name: delete the user and home dir
+ user:
+ name: ansibulluser
+ state: absent
+ force: true
+ remove: true
+
+ - name: create the user without home
+ user:
+ name: ansibulluser
+ create_home: no
+
+ - name: create the user home dir
+ user:
+ name: ansibulluser
+ register: user_create_home_fallback
+
+ - name: stat home dir
+ stat:
+ path: '{{ user_create_home_fallback.home }}'
+ register: user_create_home_fallback_dir
+
+ - name: read UMASK from /etc/login.defs and return mode
+ shell: |
+ import re
+ import os
+ try:
+ for line in open('/etc/login.defs').readlines():
+ m = re.match(r'^UMASK\s+(\d+)$', line)
+ if m:
+ umask = int(m.group(1), 8)
+ except:
+ umask = os.umask(0)
+ mode = oct(0o777 & ~umask)
+ print(str(mode).replace('o', ''))
+ args:
+ executable: "{{ ansible_python_interpreter }}"
+ register: user_login_defs_umask
+
+ - name: validate that user home dir is created
+ assert:
+ that:
+ - user_create_home_fallback is changed
+ - user_create_home_fallback_dir.stat.exists
+ - user_create_home_fallback_dir.stat.isdir
+ - user_create_home_fallback_dir.stat.pw_name == 'ansibulluser'
+ - user_create_home_fallback_dir.stat.mode == user_login_defs_umask.stdout
+ when: ansible_facts.system != 'Darwin'
+
+- block:
+ - name: create non-system user on macOS to test the shell is set to /bin/bash
+ user:
+ name: macosuser
+ register: macosuser_output
+
+ - name: validate the shell is set to /bin/bash
+ assert:
+ that:
+ - 'macosuser_output.shell == "/bin/bash"'
+
+ - name: cleanup
+ user:
+ name: macosuser
+ state: absent
+
+ - name: create system user on macos to test the shell is set to /usr/bin/false
+ user:
+ name: macosuser
+ system: yes
+ register: macosuser_output
+
+ - name: validate the shell is set to /usr/bin/false
+ assert:
+ that:
+ - 'macosuser_output.shell == "/usr/bin/false"'
+
+ - name: cleanup
+ user:
+ name: macosuser
+ state: absent
+
+ - name: create non-system user on macos and set the shell to /bin/sh
+ user:
+ name: macosuser
+ shell: /bin/sh
+ register: macosuser_output
+
+ - name: validate the shell is set to /bin/sh
+ assert:
+ that:
+ - 'macosuser_output.shell == "/bin/sh"'
+
+ - name: cleanup
+ user:
+ name: macosuser
+ state: absent
+ when: ansible_facts.distribution == "MacOSX"
+
+
+## user expires
+# Date is March 3, 2050
+- name: Set user expiration
+ user:
+ name: ansibulluser
+ state: present
+ expires: 2529881062
+ register: user_test_expires1
+ tags:
+ - timezone
+
+- name: Set user expiration again to ensure no change is made
+ user:
+ name: ansibulluser
+ state: present
+ expires: 2529881062
+ register: user_test_expires2
+ tags:
+ - timezone
+
+- name: Ensure that account with expiration was created and did not change on subsequent run
+ assert:
+ that:
+ - user_test_expires1 is changed
+ - user_test_expires2 is not changed
+
+- name: Verify expiration date for Linux
+ block:
+ - name: LINUX | Get expiration date for ansibulluser
+ getent:
+ database: shadow
+ key: ansibulluser
+
+ - name: LINUX | Ensure proper expiration date was set
+ assert:
+ that:
+ - getent_shadow['ansibulluser'][6] == '29281'
+ when: ansible_facts.os_family in ['RedHat', 'Debian', 'Suse']
+
+
+- name: Verify expiration date for BSD
+ block:
+ - name: BSD | Get expiration date for ansibulluser
+ shell: 'grep ansibulluser /etc/master.passwd | cut -d: -f 7'
+ changed_when: no
+ register: bsd_account_expiration
+
+ - name: BSD | Ensure proper expiration date was set
+ assert:
+ that:
+ - bsd_account_expiration.stdout == '2529881062'
+ when: ansible_facts.os_family == 'FreeBSD'
+
+- name: Change timezone
+ timezone:
+ name: America/Denver
+ register: original_timezone
+ tags:
+ - timezone
+
+- name: Change system timezone to make sure expiration comparison works properly
+ block:
+ - name: Create user with expiration again to ensure no change is made in a new timezone
+ user:
+ name: ansibulluser
+ state: present
+ expires: 2529881062
+ register: user_test_different_tz
+ tags:
+ - timezone
+
+ - name: Ensure that no change was reported
+ assert:
+ that:
+ - user_test_different_tz is not changed
+ tags:
+ - timezone
+
+ always:
+ - name: Restore original timezone - {{ original_timezone.diff.before.name }}
+ timezone:
+ name: "{{ original_timezone.diff.before.name }}"
+ when: original_timezone.diff.before.name != "n/a"
+ tags:
+ - timezone
+
+ - name: Restore original timezone when n/a
+ file:
+ path: /etc/sysconfig/clock
+ state: absent
+ when:
+ - original_timezone.diff.before.name == "n/a"
+ - "'/etc/sysconfig/clock' in original_timezone.msg"
+ tags:
+ - timezone
+
+
+- name: Unexpire user
+ user:
+ name: ansibulluser
+ state: present
+ expires: -1
+ register: user_test_expires3
+
+- name: Verify un expiration date for Linux
+ block:
+ - name: LINUX | Get expiration date for ansibulluser
+ getent:
+ database: shadow
+ key: ansibulluser
+
+ - name: LINUX | Ensure proper expiration date was set
+ assert:
+ msg: "expiry is supposed to be empty or -1, not {{ getent_shadow['ansibulluser'][6] }}"
+ that:
+ - not getent_shadow['ansibulluser'][6] or getent_shadow['ansibulluser'][6] | int < 0
+ when: ansible_facts.os_family in ['RedHat', 'Debian', 'Suse']
+
+- name: Verify un expiration date for Linux/BSD
+ block:
+ - name: Unexpire user again to check for change
+ user:
+ name: ansibulluser
+ state: present
+ expires: -1
+ register: user_test_expires4
+
+ - name: Ensure first expiration reported a change and second did not
+ assert:
+ msg: The second run of the expiration removal task reported a change when it should not
+ that:
+ - user_test_expires3 is changed
+ - user_test_expires4 is not changed
+ when: ansible_facts.os_family in ['RedHat', 'Debian', 'Suse', 'FreeBSD']
+
+- name: Verify un expiration date for BSD
+ block:
+ - name: BSD | Get expiration date for ansibulluser
+ shell: 'grep ansibulluser /etc/master.passwd | cut -d: -f 7'
+ changed_when: no
+ register: bsd_account_expiration
+
+ - name: BSD | Ensure proper expiration date was set
+ assert:
+ msg: "expiry is supposed to be '0', not {{ bsd_account_expiration.stdout }}"
+ that:
+ - bsd_account_expiration.stdout == '0'
+ when: ansible_facts.os_family == 'FreeBSD'
+
+# Test setting no expiration when creating a new account
+# https://github.com/ansible/ansible/issues/44155
+- name: Remove ansibulluser
+ user:
+ name: ansibulluser
+ state: absent
+
+- name: Create user account without expiration
+ user:
+ name: ansibulluser
+ state: present
+ expires: -1
+ register: user_test_create_no_expires_1
+
+- name: Create user account without expiration again
+ user:
+ name: ansibulluser
+ state: present
+ expires: -1
+ register: user_test_create_no_expires_2
+
+- name: Ensure changes were made appropriately
+ assert:
+ msg: Setting 'expires='-1 resulted in incorrect changes
+ that:
+ - user_test_create_no_expires_1 is changed
+ - user_test_create_no_expires_2 is not changed
+
+- name: Verify un expiration date for Linux
+ block:
+ - name: LINUX | Get expiration date for ansibulluser
+ getent:
+ database: shadow
+ key: ansibulluser
+
+ - name: LINUX | Ensure proper expiration date was set
+ assert:
+ msg: "expiry is supposed to be empty or -1, not {{ getent_shadow['ansibulluser'][6] }}"
+ that:
+ - not getent_shadow['ansibulluser'][6] or getent_shadow['ansibulluser'][6] | int < 0
+ when: ansible_facts.os_family in ['RedHat', 'Debian', 'Suse']
+
+- name: Verify un expiration date for BSD
+ block:
+ - name: BSD | Get expiration date for ansibulluser
+ shell: 'grep ansibulluser /etc/master.passwd | cut -d: -f 7'
+ changed_when: no
+ register: bsd_account_expiration
+
+ - name: BSD | Ensure proper expiration date was set
+ assert:
+ msg: "expiry is supposed to be '0', not {{ bsd_account_expiration.stdout }}"
+ that:
+ - bsd_account_expiration.stdout == '0'
+ when: ansible_facts.os_family == 'FreeBSD'
+
+# Test setting epoch 0 expiration when creating a new account, then removing the expiry
+# https://github.com/ansible/ansible/issues/47114
+- name: Remove ansibulluser
+ user:
+ name: ansibulluser
+ state: absent
+
+- name: Create user account with epoch 0 expiration
+ user:
+ name: ansibulluser
+ state: present
+ expires: 0
+ register: user_test_expires_create0_1
+
+- name: Create user account with epoch 0 expiration again
+ user:
+ name: ansibulluser
+ state: present
+ expires: 0
+ register: user_test_expires_create0_2
+
+- name: Change the user account to remove the expiry time
+ user:
+ name: ansibulluser
+ expires: -1
+ register: user_test_remove_expires_1
+
+- name: Change the user account to remove the expiry time again
+ user:
+ name: ansibulluser
+ expires: -1
+ register: user_test_remove_expires_2
+
+
+- name: Verify un expiration date for Linux
+ block:
+ - name: LINUX | Ensure changes were made appropriately
+ assert:
+ msg: Creating an account with 'expries=0' then removing that expriation with 'expires=-1' resulted in incorrect changes
+ that:
+ - user_test_expires_create0_1 is changed
+ - user_test_expires_create0_2 is not changed
+ - user_test_remove_expires_1 is changed
+ - user_test_remove_expires_2 is not changed
+
+ - name: LINUX | Get expiration date for ansibulluser
+ getent:
+ database: shadow
+ key: ansibulluser
+
+ - name: LINUX | Ensure proper expiration date was set
+ assert:
+ msg: "expiry is supposed to be empty or -1, not {{ getent_shadow['ansibulluser'][6] }}"
+ that:
+ - not getent_shadow['ansibulluser'][6] or getent_shadow['ansibulluser'][6] | int < 0
+ when: ansible_facts.os_family in ['RedHat', 'Debian', 'Suse']
+
+
+- name: Verify proper expiration behavior for BSD
+ block:
+ - name: BSD | Ensure changes were made appropriately
+ assert:
+ msg: Creating an account with 'expries=0' then removing that expriation with 'expires=-1' resulted in incorrect changes
+ that:
+ - user_test_expires_create0_1 is changed
+ - user_test_expires_create0_2 is not changed
+ - user_test_remove_expires_1 is not changed
+ - user_test_remove_expires_2 is not changed
+ when: ansible_facts.os_family == 'FreeBSD'
+
+# Test expiration with a very large negative number. This should have the same
+# result as setting -1.
+- name: Set expiration date using very long negative number
+ user:
+ name: ansibulluser
+ state: present
+ expires: -2529881062
+ register: user_test_expires5
+
+- name: Ensure no change was made
+ assert:
+ that:
+ - user_test_expires5 is not changed
+
+- name: Verify un expiration date for Linux
+ block:
+ - name: LINUX | Get expiration date for ansibulluser
+ getent:
+ database: shadow
+ key: ansibulluser
+
+ - name: LINUX | Ensure proper expiration date was set
+ assert:
+ msg: "expiry is supposed to be empty or -1, not {{ getent_shadow['ansibulluser'][6] }}"
+ that:
+ - not getent_shadow['ansibulluser'][6] or getent_shadow['ansibulluser'][6] | int < 0
+ when: ansible_facts.os_family in ['RedHat', 'Debian', 'Suse']
+
+- name: Verify un expiration date for BSD
+ block:
+ - name: BSD | Get expiration date for ansibulluser
+ shell: 'grep ansibulluser /etc/master.passwd | cut -d: -f 7'
+ changed_when: no
+ register: bsd_account_expiration
+
+ - name: BSD | Ensure proper expiration date was set
+ assert:
+ msg: "expiry is supposed to be '0', not {{ bsd_account_expiration.stdout }}"
+ that:
+ - bsd_account_expiration.stdout == '0'
+ when: ansible_facts.os_family == 'FreeBSD'
+
+
+## shadow backup
+- block:
+ - name: Create a user to test shadow file backup
+ user:
+ name: ansibulluser
+ state: present
+ register: result
+
+ - name: Find shadow backup files
+ find:
+ path: /etc
+ patterns: 'shadow\..*~$'
+ use_regex: yes
+ register: shadow_backups
+
+ - name: Assert that a backup file was created
+ assert:
+ that:
+ - result.bakup
+ - shadow_backups.files | map(attribute='path') | list | length > 0
+ when: ansible_facts.os_family == 'Solaris'
+
+
+# Test creating ssh key with passphrase
+- name: Remove ansibulluser
+ user:
+ name: ansibulluser
+ state: absent
+
+- name: Create user with ssh key
+ user:
+ name: ansibulluser
+ state: present
+ generate_ssh_key: yes
+ force: yes
+ ssh_key_file: "{{ output_dir }}/test_id_rsa"
+ ssh_key_passphrase: secret_passphrase
+
+- name: Unlock ssh key
+ command: "ssh-keygen -y -f {{ output_dir }}/test_id_rsa -P secret_passphrase"
+ register: result
+
+- name: Check that ssh key was unlocked successfully
+ assert:
+ that:
+ - result.rc == 0
+
+- name: Clean ssh key
+ file:
+ path: "{{ output_dir }}/test_id_rsa"
+ state: absent
+ when: ansible_os_family == 'FreeBSD'
+
+
+## password lock
+- block:
+ - name: Set password for ansibulluser
+ user:
+ name: ansibulluser
+ password: "$6$rounds=656000$TT4O7jz2M57npccl$33LF6FcUMSW11qrESXL1HX0BS.bsiT6aenFLLiVpsQh6hDtI9pJh5iY7x8J7ePkN4fP8hmElidHXaeD51pbGS."
+
+ - name: Lock account
+ user:
+ name: ansibulluser
+ password_lock: yes
+ register: password_lock_1
+
+ - name: Lock account again
+ user:
+ name: ansibulluser
+ password_lock: yes
+ register: password_lock_2
+
+ - name: Unlock account
+ user:
+ name: ansibulluser
+ password_lock: no
+ register: password_lock_3
+
+ - name: Unlock account again
+ user:
+ name: ansibulluser
+ password_lock: no
+ register: password_lock_4
+
+ - name: Ensure task reported changes appropriately
+ assert:
+ msg: The password_lock tasks did not make changes appropriately
+ that:
+ - password_lock_1 is changed
+ - password_lock_2 is not changed
+ - password_lock_3 is changed
+ - password_lock_4 is not changed
+
+ - name: Lock account
+ user:
+ name: ansibulluser
+ password_lock: yes
+
+ - name: Verify account lock for BSD
+ block:
+ - name: BSD | Get account status
+ shell: "{{ status_command[ansible_facts['system']] }}"
+ register: account_status_locked
+
+ - name: Unlock account
+ user:
+ name: ansibulluser
+ password_lock: no
+
+ - name: BSD | Get account status
+ shell: "{{ status_command[ansible_facts['system']] }}"
+ register: account_status_unlocked
+
+ - name: FreeBSD | Ensure account is locked
+ assert:
+ that:
+ - "'LOCKED' in account_status_locked.stdout"
+ - "'LOCKED' not in account_status_unlocked.stdout"
+ when: ansible_facts['system'] == 'FreeBSD'
+
+ when: ansible_facts['system'] in ['FreeBSD', 'OpenBSD']
+
+ - name: Verify account lock for Linux
+ block:
+ - name: LINUX | Get account status
+ getent:
+ database: shadow
+ key: ansibulluser
+
+ - name: LINUX | Ensure account is locked
+ assert:
+ that:
+ - getent_shadow['ansibulluser'][0].startswith('!')
+
+ - name: Unlock account
+ user:
+ name: ansibulluser
+ password_lock: no
+
+ - name: LINUX | Get account status
+ getent:
+ database: shadow
+ key: ansibulluser
+
+ - name: LINUX | Ensure account is unlocked
+ assert:
+ that:
+ - not getent_shadow['ansibulluser'][0].startswith('!')
+
+ when: ansible_facts['system'] == 'Linux'
+
+ always:
+ - name: Unlock account
+ user:
+ name: ansibulluser
+ password_lock: no
+
+ when: ansible_facts['system'] in ['FreeBSD', 'OpenBSD', 'Linux']
+
+
+ ## Check local mode
+ # Even if we don't have a system that is bound to a directory, it's useful
+ # to run with local: true to exercise the code path that reads through the local
+ # user database file.
+ # https://github.com/ansible/ansible/issues/50947
+
+- name: Create /etc/gshadow
+ file:
+ path: /etc/gshadow
+ state: touch
+ when: ansible_facts.os_family == 'Suse'
+ tags:
+ - user_test_local_mode
+
+- name: Create /etc/libuser.conf
+ file:
+ path: /etc/libuser.conf
+ state: touch
+ when:
+ - ansible_facts.distribution == 'Ubuntu'
+ - ansible_facts.distribution_major_version is version_compare('16', '==')
+ tags:
+ - user_test_local_mode
+
+- name: Ensure luseradd is present
+ action: "{{ ansible_facts.pkg_mgr }}"
+ args:
+ name: libuser
+ state: present
+ when: ansible_facts.system in ['Linux']
+ tags:
+ - user_test_local_mode
+
+- name: Create local account that already exists to check for warning
+ user:
+ name: root
+ local: yes
+ register: local_existing
+ tags:
+ - user_test_local_mode
+
+- name: Create local_ansibulluser
+ user:
+ name: local_ansibulluser
+ state: present
+ local: yes
+ register: local_user_test_1
+ tags:
+ - user_test_local_mode
+
+- name: Create local_ansibulluser again
+ user:
+ name: local_ansibulluser
+ state: present
+ local: yes
+ register: local_user_test_2
+ tags:
+ - user_test_local_mode
+
+- name: Remove local_ansibulluser
+ user:
+ name: local_ansibulluser
+ state: absent
+ remove: yes
+ local: yes
+ register: local_user_test_remove_1
+ tags:
+ - user_test_local_mode
+
+- name: Remove local_ansibulluser again
+ user:
+ name: local_ansibulluser
+ state: absent
+ remove: yes
+ local: yes
+ register: local_user_test_remove_2
+ tags:
+ - user_test_local_mode
+
+- name: Create test groups
+ group:
+ name: "{{ item }}"
+ loop:
+ - testgroup1
+ - testgroup2
+ - testgroup3
+ - testgroup4
+ tags:
+ - user_test_local_mode
+
+- name: Create local_ansibulluser with groups
+ user:
+ name: local_ansibulluser
+ state: present
+ local: yes
+ groups: ['testgroup1', 'testgroup2']
+ register: local_user_test_3
+ ignore_errors: yes
+ tags:
+ - user_test_local_mode
+
+- name: Append groups for local_ansibulluser
+ user:
+ name: local_ansibulluser
+ state: present
+ local: yes
+ groups: ['testgroup3', 'testgroup4']
+ append: yes
+ register: local_user_test_4
+ ignore_errors: yes
+ tags:
+ - user_test_local_mode
+
+- name: Test append without groups for local_ansibulluser
+ user:
+ name: local_ansibulluser
+ state: present
+ append: yes
+ register: local_user_test_5
+ ignore_errors: yes
+ tags:
+ - user_test_local_mode
+
+- name: Remove local_ansibulluser again
+ user:
+ name: local_ansibulluser
+ state: absent
+ remove: yes
+ local: yes
+ tags:
+ - user_test_local_mode
+
+- name: Remove test groups
+ group:
+ name: "{{ item }}"
+ state: absent
+ loop:
+ - testgroup1
+ - testgroup2
+ - testgroup3
+ - testgroup4
+ tags:
+ - user_test_local_mode
+
+- name: Ensure local user accounts were created and removed properly
+ assert:
+ that:
+ - local_user_test_1 is changed
+ - local_user_test_2 is not changed
+ - local_user_test_3 is changed
+ - local_user_test_4 is changed
+ - local_user_test_remove_1 is changed
+ - local_user_test_remove_2 is not changed
+ tags:
+ - user_test_local_mode
+
+- name: Ensure warnings were displayed properly
+ assert:
+ that:
+ - local_user_test_1['warnings'] | length > 0
+ - local_user_test_1['warnings'] | first is search('The local user account may already exist')
+ - local_user_test_5['warnings'] is search("'append' is set, but no 'groups' are specified. Use 'groups'")
+ - local_existing['warnings'] is not defined
+ when: ansible_facts.system in ['Linux']
+ tags:
+ - user_test_local_mode
+
+- name: Test expires for local users
+ import_tasks: expires_local.yml
diff --git a/test/integration/targets/user/vars/main.yml b/test/integration/targets/user/vars/main.yml
new file mode 100644
index 00000000..4b328f71
--- /dev/null
+++ b/test/integration/targets/user/vars/main.yml
@@ -0,0 +1,13 @@
+user_home_prefix:
+ Linux: '/home'
+ FreeBSD: '/home'
+ SunOS: '/home'
+ Darwin: '/Users'
+
+status_command:
+ OpenBSD: "grep ansibulluser /etc/master.passwd | cut -d ':' -f 2"
+ FreeBSD: 'pw user show ansibulluser'
+
+default_user_group:
+ openSUSE Leap: users
+ MacOSX: admin
diff --git a/test/integration/targets/var_blending/aliases b/test/integration/targets/var_blending/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/test/integration/targets/var_blending/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/test/integration/targets/var_blending/group_vars/all b/test/integration/targets/var_blending/group_vars/all
new file mode 100644
index 00000000..30aa3d6d
--- /dev/null
+++ b/test/integration/targets/var_blending/group_vars/all
@@ -0,0 +1,9 @@
+a: 999
+b: 998
+c: 997
+d: 996
+uno: 1
+dos: 2
+tres: 3
+etest: 'from group_vars'
+inventory_beats_default: 'narf'
diff --git a/test/integration/targets/var_blending/group_vars/local b/test/integration/targets/var_blending/group_vars/local
new file mode 100644
index 00000000..8feb93fc
--- /dev/null
+++ b/test/integration/targets/var_blending/group_vars/local
@@ -0,0 +1 @@
+tres: 'three'
diff --git a/test/integration/targets/var_blending/host_vars/testhost b/test/integration/targets/var_blending/host_vars/testhost
new file mode 100644
index 00000000..49271aef
--- /dev/null
+++ b/test/integration/targets/var_blending/host_vars/testhost
@@ -0,0 +1,4 @@
+a: 1
+b: 2
+c: 3
+d: 4
diff --git a/test/integration/targets/var_blending/inventory b/test/integration/targets/var_blending/inventory
new file mode 100644
index 00000000..f0afb18d
--- /dev/null
+++ b/test/integration/targets/var_blending/inventory
@@ -0,0 +1,26 @@
+[local]
+testhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+testhost2 ansible_connection=local # connections are never made to this host, only host vars are accessed
+
+# the following inline declarations are accompanied
+# by (preferred) group_vars/ and host_vars/ variables
+# and are used in testing of variable precedence
+
+[arbitrary_parent:children]
+local
+
+[local:vars]
+parent_var=6000
+groups_tree_var=5000
+
+[arbitrary_parent:vars]
+groups_tree_var=4000
+overridden_in_parent=1000
+
+[arbitrary_grandparent:children]
+arbitrary_parent
+
+[arbitrary_grandparent:vars]
+groups_tree_var=3000
+grandparent_var=2000
+overridden_in_parent=2000
diff --git a/test/integration/targets/var_blending/roles/test_var_blending/defaults/main.yml b/test/integration/targets/var_blending/roles/test_var_blending/defaults/main.yml
new file mode 100644
index 00000000..671a1271
--- /dev/null
+++ b/test/integration/targets/var_blending/roles/test_var_blending/defaults/main.yml
@@ -0,0 +1,4 @@
+etest: "from role defaults"
+role_var_beats_default: "shouldn't see this"
+parameterized_beats_default: "shouldn't see this"
+inventory_beats_default: "shouldn't see this"
diff --git a/test/integration/targets/var_blending/roles/test_var_blending/files/foo.txt b/test/integration/targets/var_blending/roles/test_var_blending/files/foo.txt
new file mode 100644
index 00000000..d51be39b
--- /dev/null
+++ b/test/integration/targets/var_blending/roles/test_var_blending/files/foo.txt
@@ -0,0 +1,77 @@
+The value of groups_tree_var = 5000.
+This comes from host, not the parents or grandparents.
+
+The value of the grandparent variable grandparent_var is
+not overridden and is = 2000
+
+The value of the parent variable is not overridden and
+is = 6000
+
+The variable 'overridden_in_parent' is set in the parent
+and grandparent, so the parent wins. It's value is = 1000.
+
+The values of 'uno', 'dos', and 'tres' are set in group_vars/all but 'tres' is
+set to the value of 'three' in group_vars/local, which should override it.
+
+uno = 1
+dos = 2
+tres = three
+
+The values of 'a', 'b', 'c', and 'd' are set in host_vars/local and should not
+be clobbered by values that are also set in group_vars.
+
+a = 1
+b = 2
+c = 3
+d = 4
+
+The value of 'badwolf' is set via the include_vars plugin.
+
+badwolf = badwolf
+
+The value of 'winter' is set via the main.yml in the role.
+
+winter = coming
+
+Here's an arbitrary variable set as vars_files in the playbook.
+
+vars_file_var = 321
+
+And vars.
+
+vars = 123
+
+Variables about other hosts can be looked up via hostvars. This includes
+facts but here we'll just access a variable defined in the groups.
+
+999
+
+Ansible has pretty basic precedence rules for variable overriding. We already have
+some tests above about group order. Here are a few more.
+
+ * -e variables always win
+ * then comes "most everything else"
+ * then comes variables defined in inventory
+ * then "role defaults", which are the most "defaulty" and lose in priority to everything.
+
+Given the above rules, here's a test that a -e variable overrides inventory,
+and also defaults, and role vars.
+
+etest = from -e
+
+Now a test to make sure role variables can override inventory variables.
+
+role_var_beats_inventory = chevron 5 encoded
+
+Role variables should also beat defaults.
+
+role_var_beats_default = chevron 6 encoded
+
+But defaults are lower priority than inventory, so inventory should win.
+
+inventory_beats_default = narf
+
+That's the end of the precedence tests for now, but more are welcome.
+
+
+
diff --git a/test/integration/targets/var_blending/roles/test_var_blending/tasks/main.yml b/test/integration/targets/var_blending/roles/test_var_blending/tasks/main.yml
new file mode 100644
index 00000000..f2b2e54a
--- /dev/null
+++ b/test/integration/targets/var_blending/roles/test_var_blending/tasks/main.yml
@@ -0,0 +1,57 @@
+# test code
+# (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- include_vars: more_vars.yml
+
+- set_fact:
+ output_dir: "{{ lookup('env', 'OUTPUT_DIR') }}"
+
+- name: deploy a template that will use variables at various levels
+ template: src=foo.j2 dest={{output_dir}}/foo.templated
+ register: template_result
+
+- name: copy known good into place
+ copy: src=foo.txt dest={{output_dir}}/foo.txt
+
+- name: compare templated file to known good
+ shell: diff {{output_dir}}/foo.templated {{output_dir}}/foo.txt
+ register: diff_result
+
+- name: verify templated file matches known good
+ assert:
+ that:
+ - 'diff_result.stdout == ""'
+
+- name: check debug variable with same name as var content
+ debug: var=same_value_as_var_name_var
+ register: same_value_as_var_name
+
+- name: check debug variable output when variable is undefined
+ debug: var=undefined_variable
+ register: var_undefined
+
+- assert:
+ that:
+ - "'VARIABLE IS NOT DEFINED!' in var_undefined.undefined_variable"
+ - same_value_as_var_name.same_value_as_var_name_var == 'same_value_as_var_name_var'
+
+- name: cleanup temporary template output
+ file: path={{output_dir}}/foo.templated state=absent
+
+- name: cleanup temporary copy
+ file: path={{output_dir}}/foo.txt state=absent
diff --git a/test/integration/targets/var_blending/roles/test_var_blending/templates/foo.j2 b/test/integration/targets/var_blending/roles/test_var_blending/templates/foo.j2
new file mode 100644
index 00000000..10709b1a
--- /dev/null
+++ b/test/integration/targets/var_blending/roles/test_var_blending/templates/foo.j2
@@ -0,0 +1,77 @@
+The value of groups_tree_var = {{ groups_tree_var }}.
+This comes from host, not the parents or grandparents.
+
+The value of the grandparent variable grandparent_var is
+not overridden and is = {{ grandparent_var }}
+
+The value of the parent variable is not overridden and
+is = {{ parent_var }}
+
+The variable 'overridden_in_parent' is set in the parent
+and grandparent, so the parent wins. It's value is = {{ overridden_in_parent }}.
+
+The values of 'uno', 'dos', and 'tres' are set in group_vars/all but 'tres' is
+set to the value of 'three' in group_vars/local, which should override it.
+
+uno = {{ uno }}
+dos = {{ dos }}
+tres = {{ tres }}
+
+The values of 'a', 'b', 'c', and 'd' are set in host_vars/local and should not
+be clobbered by values that are also set in group_vars.
+
+a = {{ a }}
+b = {{ b }}
+c = {{ c }}
+d = {{ d }}
+
+The value of 'badwolf' is set via the include_vars plugin.
+
+badwolf = {{ badwolf }}
+
+The value of 'winter' is set via the main.yml in the role.
+
+winter = {{ winter }}
+
+Here's an arbitrary variable set as vars_files in the playbook.
+
+vars_file_var = {{ vars_file_var }}
+
+And vars.
+
+vars = {{ vars_var }}
+
+Variables about other hosts can be looked up via hostvars. This includes
+facts but here we'll just access a variable defined in the groups.
+
+{{ hostvars['testhost2']['a'] }}
+
+Ansible has pretty basic precedence rules for variable overriding. We already have
+some tests above about group order. Here are a few more.
+
+ * -e variables always win
+ * then comes "most everything else"
+ * then comes variables defined in inventory
+ * then "role defaults", which are the most "defaulty" and lose in priority to everything.
+
+Given the above rules, here's a test that a -e variable overrides inventory,
+and also defaults, and role vars.
+
+etest = {{ etest }}
+
+Now a test to make sure role variables can override inventory variables.
+
+role_var_beats_inventory = {{ role_var_beats_inventory }}
+
+Role variables should also beat defaults.
+
+role_var_beats_default = {{ role_var_beats_default }}
+
+But defaults are lower priority than inventory, so inventory should win.
+
+inventory_beats_default = {{ inventory_beats_default }}
+
+That's the end of the precedence tests for now, but more are welcome.
+
+
+
diff --git a/test/integration/targets/var_blending/roles/test_var_blending/vars/main.yml b/test/integration/targets/var_blending/roles/test_var_blending/vars/main.yml
new file mode 100644
index 00000000..1bb08bf8
--- /dev/null
+++ b/test/integration/targets/var_blending/roles/test_var_blending/vars/main.yml
@@ -0,0 +1,4 @@
+winter: coming
+etest: 'from role vars'
+role_var_beats_inventory: 'chevron 5 encoded'
+role_var_beats_default: 'chevron 6 encoded'
diff --git a/test/integration/targets/var_blending/roles/test_var_blending/vars/more_vars.yml b/test/integration/targets/var_blending/roles/test_var_blending/vars/more_vars.yml
new file mode 100644
index 00000000..bac93d3e
--- /dev/null
+++ b/test/integration/targets/var_blending/roles/test_var_blending/vars/more_vars.yml
@@ -0,0 +1,3 @@
+badwolf: badwolf
+
+same_value_as_var_name_var: "same_value_as_var_name_var"
diff --git a/test/integration/targets/var_blending/runme.sh b/test/integration/targets/var_blending/runme.sh
new file mode 100755
index 00000000..d0cf7f09
--- /dev/null
+++ b/test/integration/targets/var_blending/runme.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ansible-playbook test_var_blending.yml -i inventory -e @test_vars.yml -v "$@"
diff --git a/test/integration/targets/var_blending/test_var_blending.yml b/test/integration/targets/var_blending/test_var_blending.yml
new file mode 100644
index 00000000..88a35b2c
--- /dev/null
+++ b/test/integration/targets/var_blending/test_var_blending.yml
@@ -0,0 +1,8 @@
+- hosts: testhost
+ vars_files:
+ - vars_file.yml
+ vars:
+ vars_var: 123
+ gather_facts: True
+ roles:
+ - { role: test_var_blending, parameterized_beats_default: 1234, tags: test_var_blending }
diff --git a/test/integration/targets/var_blending/test_vars.yml b/test/integration/targets/var_blending/test_vars.yml
new file mode 100644
index 00000000..abb71a55
--- /dev/null
+++ b/test/integration/targets/var_blending/test_vars.yml
@@ -0,0 +1 @@
+etest: 'from -e'
diff --git a/test/integration/targets/var_blending/vars_file.yml b/test/integration/targets/var_blending/vars_file.yml
new file mode 100644
index 00000000..971e16a7
--- /dev/null
+++ b/test/integration/targets/var_blending/vars_file.yml
@@ -0,0 +1,12 @@
+# this file is here to support testing vars_files in the blending tests only.
+# in general define test data in the individual role:
+# roles/role_name/vars/main.yml
+
+foo: "Hello"
+things1:
+ - 1
+ - 2
+things2:
+ - "{{ foo }}"
+ - "{{ foob | default('') }}"
+vars_file_var: 321
diff --git a/test/integration/targets/var_precedence/aliases b/test/integration/targets/var_precedence/aliases
new file mode 100644
index 00000000..3005e4b2
--- /dev/null
+++ b/test/integration/targets/var_precedence/aliases
@@ -0,0 +1 @@
+shippable/posix/group4
diff --git a/test/integration/targets/var_precedence/ansible-var-precedence-check.py b/test/integration/targets/var_precedence/ansible-var-precedence-check.py
new file mode 100755
index 00000000..f19cd1c5
--- /dev/null
+++ b/test/integration/targets/var_precedence/ansible-var-precedence-check.py
@@ -0,0 +1,541 @@
+#!/usr/bin/env python
+
+# A tool to check the order of precedence for ansible variables
+# https://github.com/ansible/ansible/blob/devel/test/integration/test_var_precedence.yml
+
+import json
+import os
+import sys
+import shutil
+import stat
+import subprocess
+import tempfile
+import yaml
+from pprint import pprint
+from optparse import OptionParser
+from jinja2 import Environment
+
+ENV = Environment()
+TESTDIR = tempfile.mkdtemp()
+
+
+def run_command(args, cwd=None):
+ p = subprocess.Popen(
+ args,
+ stderr=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ shell=True,
+ cwd=cwd,
+ )
+ (so, se) = p.communicate()
+ return (p.returncode, so, se)
+
+
+def clean_test_dir():
+ if os.path.isdir(TESTDIR):
+ shutil.rmtree(TESTDIR)
+ os.makedirs(TESTDIR)
+
+
+class Role(object):
+ def __init__(self, name):
+ self.name = name
+ self.load = True
+ self.dependencies = []
+ self.defaults = False
+ self.vars = False
+ self.tasks = []
+ self.params = dict()
+
+ def write_role(self):
+
+ fpath = os.path.join(TESTDIR, 'roles', self.name)
+ if not os.path.isdir(fpath):
+ os.makedirs(fpath)
+
+ if self.defaults:
+ # roles/x/defaults/main.yml
+ fpath = os.path.join(TESTDIR, 'roles', self.name, 'defaults')
+ if not os.path.isdir(fpath):
+ os.makedirs(fpath)
+ fname = os.path.join(fpath, 'main.yml')
+ with open(fname, 'w') as f:
+ f.write('findme: %s\n' % self.name)
+
+ if self.vars:
+ # roles/x/vars/main.yml
+ fpath = os.path.join(TESTDIR, 'roles', self.name, 'vars')
+ if not os.path.isdir(fpath):
+ os.makedirs(fpath)
+ fname = os.path.join(fpath, 'main.yml')
+ with open(fname, 'w') as f:
+ f.write('findme: %s\n' % self.name)
+
+ if self.dependencies:
+ fpath = os.path.join(TESTDIR, 'roles', self.name, 'meta')
+ if not os.path.isdir(fpath):
+ os.makedirs(fpath)
+ fname = os.path.join(fpath, 'main.yml')
+ with open(fname, 'w') as f:
+ f.write('dependencies:\n')
+ for dep in self.dependencies:
+ f.write('- { role: %s }\n' % dep)
+
+
+class DynamicInventory(object):
+ BASESCRIPT = '''#!/usr/bin/python
+import json
+data = """{{ data }}"""
+data = json.loads(data)
+print(json.dumps(data, indent=2, sort_keys=True))
+'''
+
+ BASEINV = {
+ '_meta': {
+ 'hostvars': {
+ 'testhost': {}
+ }
+ }
+ }
+
+ def __init__(self, features):
+ self.ENV = Environment()
+ self.features = features
+ self.fpath = None
+ self.inventory = self.BASEINV.copy()
+ self.build()
+
+ def build(self):
+ xhost = 'testhost'
+ if 'script_host' in self.features:
+ self.inventory['_meta']['hostvars'][xhost]['findme'] = 'script_host'
+ else:
+ self.inventory['_meta']['hostvars'][xhost] = {}
+
+ if 'script_child' in self.features:
+ self.inventory['child'] = {
+ 'hosts': [xhost],
+ 'vars': {'findme': 'script_child'}
+ }
+
+ if 'script_parent' in self.features:
+
+ self.inventory['parent'] = {
+ 'vars': {'findme': 'script_parent'}
+ }
+
+ if 'script_child' in self.features:
+ self.inventory['parent']['children'] = ['child']
+ else:
+ self.inventory['parent']['hosts'] = [xhost]
+
+ if 'script_all' in self.features:
+ self.inventory['all'] = {
+ 'hosts': [xhost],
+ 'vars': {
+ 'findme': 'script_all'
+ },
+ }
+ else:
+ self.inventory['all'] = {
+ 'hosts': [xhost],
+ }
+
+ def write_script(self):
+ fdir = os.path.join(TESTDIR, 'inventory')
+ if not os.path.isdir(fdir):
+ os.makedirs(fdir)
+ fpath = os.path.join(fdir, 'hosts')
+ # fpath = os.path.join(TESTDIR, 'inventory')
+ self.fpath = fpath
+
+ data = json.dumps(self.inventory)
+ t = self.ENV.from_string(self.BASESCRIPT)
+ fdata = t.render(data=data)
+ with open(fpath, 'w') as f:
+ f.write(fdata + '\n')
+ st = os.stat(fpath)
+ os.chmod(fpath, st.st_mode | stat.S_IEXEC)
+
+
+class VarTestMaker(object):
+ def __init__(self, features, dynamic_inventory=False):
+ clean_test_dir()
+ self.dynamic_inventory = dynamic_inventory
+ self.di = None
+ self.features = features[:]
+ self.inventory = ''
+ self.playvars = dict()
+ self.varsfiles = []
+ self.playbook = dict(hosts='testhost', gather_facts=False)
+ self.tasks = []
+ self.roles = []
+ self.ansible_command = None
+ self.stdout = None
+
+ def write_playbook(self):
+ fname = os.path.join(TESTDIR, 'site.yml')
+ pb_copy = self.playbook.copy()
+
+ if self.playvars:
+ pb_copy['vars'] = self.playvars
+ if self.varsfiles:
+ pb_copy['vars_files'] = self.varsfiles
+ if self.roles:
+ pb_copy['roles'] = []
+ for role in self.roles:
+ role.write_role()
+ role_def = dict(role=role.name)
+ role_def.update(role.params)
+ pb_copy['roles'].append(role_def)
+ if self.tasks:
+ pb_copy['tasks'] = self.tasks
+
+ with open(fname, 'w') as f:
+ pb_yaml = yaml.dump([pb_copy], f, default_flow_style=False, indent=2)
+
+ def build(self):
+
+ if self.dynamic_inventory:
+ # python based inventory file
+ self.di = DynamicInventory(self.features)
+ self.di.write_script()
+ else:
+ # ini based inventory file
+ if 'ini_host' in self.features:
+ self.inventory += 'testhost findme=ini_host\n'
+ else:
+ self.inventory += 'testhost\n'
+ self.inventory += '\n'
+
+ if 'ini_child' in self.features:
+ self.inventory += '[child]\n'
+ self.inventory += 'testhost\n'
+ self.inventory += '\n'
+ self.inventory += '[child:vars]\n'
+ self.inventory += 'findme=ini_child\n'
+ self.inventory += '\n'
+
+ if 'ini_parent' in self.features:
+ if 'ini_child' in self.features:
+ self.inventory += '[parent:children]\n'
+ self.inventory += 'child\n'
+ else:
+ self.inventory += '[parent]\n'
+ self.inventory += 'testhost\n'
+ self.inventory += '\n'
+ self.inventory += '[parent:vars]\n'
+ self.inventory += 'findme=ini_parent\n'
+ self.inventory += '\n'
+
+ if 'ini_all' in self.features:
+ self.inventory += '[all:vars]\n'
+ self.inventory += 'findme=ini_all\n'
+ self.inventory += '\n'
+
+ # default to a single file called inventory
+ invfile = os.path.join(TESTDIR, 'inventory', 'hosts')
+ ipath = os.path.join(TESTDIR, 'inventory')
+ if not os.path.isdir(ipath):
+ os.makedirs(ipath)
+
+ with open(invfile, 'w') as f:
+ f.write(self.inventory)
+
+ hpath = os.path.join(TESTDIR, 'inventory', 'host_vars')
+ if not os.path.isdir(hpath):
+ os.makedirs(hpath)
+ gpath = os.path.join(TESTDIR, 'inventory', 'group_vars')
+ if not os.path.isdir(gpath):
+ os.makedirs(gpath)
+
+ if 'ini_host_vars_file' in self.features:
+ hfile = os.path.join(hpath, 'testhost')
+ with open(hfile, 'w') as f:
+ f.write('findme: ini_host_vars_file\n')
+
+ if 'ini_group_vars_file_all' in self.features:
+ hfile = os.path.join(gpath, 'all')
+ with open(hfile, 'w') as f:
+ f.write('findme: ini_group_vars_file_all\n')
+
+ if 'ini_group_vars_file_child' in self.features:
+ hfile = os.path.join(gpath, 'child')
+ with open(hfile, 'w') as f:
+ f.write('findme: ini_group_vars_file_child\n')
+
+ if 'ini_group_vars_file_parent' in self.features:
+ hfile = os.path.join(gpath, 'parent')
+ with open(hfile, 'w') as f:
+ f.write('findme: ini_group_vars_file_parent\n')
+
+ if 'pb_host_vars_file' in self.features:
+ os.makedirs(os.path.join(TESTDIR, 'host_vars'))
+ fname = os.path.join(TESTDIR, 'host_vars', 'testhost')
+ with open(fname, 'w') as f:
+ f.write('findme: pb_host_vars_file\n')
+
+ if 'pb_group_vars_file_parent' in self.features:
+ if not os.path.isdir(os.path.join(TESTDIR, 'group_vars')):
+ os.makedirs(os.path.join(TESTDIR, 'group_vars'))
+ fname = os.path.join(TESTDIR, 'group_vars', 'parent')
+ with open(fname, 'w') as f:
+ f.write('findme: pb_group_vars_file_parent\n')
+
+ if 'pb_group_vars_file_child' in self.features:
+ if not os.path.isdir(os.path.join(TESTDIR, 'group_vars')):
+ os.makedirs(os.path.join(TESTDIR, 'group_vars'))
+ fname = os.path.join(TESTDIR, 'group_vars', 'child')
+ with open(fname, 'w') as f:
+ f.write('findme: pb_group_vars_file_child\n')
+
+ if 'pb_group_vars_file_all' in self.features:
+ if not os.path.isdir(os.path.join(TESTDIR, 'group_vars')):
+ os.makedirs(os.path.join(TESTDIR, 'group_vars'))
+ fname = os.path.join(TESTDIR, 'group_vars', 'all')
+ with open(fname, 'w') as f:
+ f.write('findme: pb_group_vars_file_all\n')
+
+ if 'play_var' in self.features:
+ self.playvars['findme'] = 'play_var'
+
+ if 'set_fact' in self.features:
+ self.tasks.append(dict(set_fact='findme="set_fact"'))
+
+ if 'vars_file' in self.features:
+ self.varsfiles.append('varsfile.yml')
+ fname = os.path.join(TESTDIR, 'varsfile.yml')
+ with open(fname, 'w') as f:
+ f.write('findme: vars_file\n')
+
+ if 'include_vars' in self.features:
+ self.tasks.append(dict(include_vars='included_vars.yml'))
+ fname = os.path.join(TESTDIR, 'included_vars.yml')
+ with open(fname, 'w') as f:
+ f.write('findme: include_vars\n')
+
+ if 'role_var' in self.features:
+ role = Role('role_var')
+ role.vars = True
+ role.load = True
+ self.roles.append(role)
+
+ if 'role_parent_default' in self.features:
+ role = Role('role_default')
+ role.load = False
+ role.defaults = True
+ self.roles.append(role)
+
+ role = Role('role_parent_default')
+ role.dependencies.append('role_default')
+ role.defaults = True
+ role.load = True
+ if 'role_params' in self.features:
+ role.params = dict(findme='role_params')
+ self.roles.append(role)
+
+ elif 'role_default' in self.features:
+ role = Role('role_default')
+ role.defaults = True
+ role.load = True
+ if 'role_params' in self.features:
+ role.params = dict(findme='role_params')
+ self.roles.append(role)
+
+ debug_task = dict(debug='var=findme')
+ test_task = {'assert': dict(that=['findme == "%s"' % self.features[0]])}
+ if 'task_vars' in self.features:
+ test_task['vars'] = dict(findme="task_vars")
+ if 'registered_vars' in self.features:
+ test_task['register'] = 'findme'
+
+ if 'block_vars' in self.features:
+ block_wrapper = [
+ debug_task,
+ {
+ 'block': [test_task],
+ 'vars': dict(findme="block_vars"),
+ }
+ ]
+ else:
+ block_wrapper = [debug_task, test_task]
+
+ if 'include_params' in self.features:
+ self.tasks.append(dict(name='including tasks', include='included_tasks.yml', vars=dict(findme='include_params')))
+ else:
+ self.tasks.append(dict(include='included_tasks.yml'))
+
+ fname = os.path.join(TESTDIR, 'included_tasks.yml')
+ with open(fname, 'w') as f:
+ f.write(yaml.dump(block_wrapper))
+
+ self.write_playbook()
+
+ def run(self):
+ '''
+ if self.dynamic_inventory:
+ cmd = 'ansible-playbook -c local -i inventory/hosts site.yml'
+ else:
+ cmd = 'ansible-playbook -c local -i inventory site.yml'
+ '''
+ cmd = 'ansible-playbook -c local -i inventory site.yml'
+ if 'extra_vars' in self.features:
+ cmd += ' --extra-vars="findme=extra_vars"'
+ cmd = cmd + ' -vvvvv'
+ self.ansible_command = cmd
+ (rc, so, se) = run_command(cmd, cwd=TESTDIR)
+ self.stdout = so
+
+ if rc != 0:
+ raise Exception("playbook failed (rc=%s), stdout: '%s' stderr: '%s'" % (rc, so, se))
+
+ def show_tree(self):
+ print('## TREE')
+ cmd = 'tree %s' % TESTDIR
+ (rc, so, se) = run_command(cmd)
+ lines = so.split('\n')
+ lines = lines[:-3]
+ print('\n'.join(lines))
+
+ def show_content(self):
+ print('## CONTENT')
+ cmd = 'find %s -type f | xargs tail -n +1' % TESTDIR
+ (rc, so, se) = run_command(cmd)
+ print(so)
+
+ def show_stdout(self):
+ print('## COMMAND')
+ print(self.ansible_command)
+ print('## STDOUT')
+ print(self.stdout)
+
+
+def main():
+ features = [
+ 'extra_vars',
+ 'include_params',
+ # 'role_params', # FIXME: we don't yet validate tasks within a role
+ 'set_fact',
+ # 'registered_vars', # FIXME: hard to simulate
+ 'include_vars',
+ # 'role_dep_params',
+ 'task_vars',
+ 'block_vars',
+ 'role_var',
+ 'vars_file',
+ 'play_var',
+ # 'host_facts', # FIXME: hard to simulate
+ 'pb_host_vars_file',
+ 'ini_host_vars_file',
+ 'ini_host',
+ 'pb_group_vars_file_child',
+ # 'ini_group_vars_file_child', #FIXME: this contradicts documented precedence pb group vars files should override inventory ones
+ 'pb_group_vars_file_parent',
+ 'ini_group_vars_file_parent',
+ 'pb_group_vars_file_all',
+ 'ini_group_vars_file_all',
+ 'ini_child',
+ 'ini_parent',
+ 'ini_all',
+ 'role_parent_default',
+ 'role_default',
+ ]
+
+ parser = OptionParser()
+ parser.add_option('-f', '--feature', action='append')
+ parser.add_option('--use_dynamic_inventory', action='store_true')
+ parser.add_option('--show_tree', action='store_true')
+ parser.add_option('--show_content', action='store_true')
+ parser.add_option('--show_stdout', action='store_true')
+ parser.add_option('--copy_testcases_to_local_dir', action='store_true')
+ (options, args) = parser.parse_args()
+
+ if options.feature:
+ for f in options.feature:
+ if f not in features:
+ print('%s is not a valid feature' % f)
+ sys.exit(1)
+ features = [x for x in options.feature]
+
+ fdesc = {
+ 'ini_host': 'host var inside the ini',
+ 'script_host': 'host var inside the script _meta',
+ 'ini_child': 'child group var inside the ini',
+ 'script_child': 'child group var inside the script',
+ 'ini_parent': 'parent group var inside the ini',
+ 'script_parent': 'parent group var inside the script',
+ 'ini_all': 'all group var inside the ini',
+ 'script_all': 'all group var inside the script',
+ 'ini_host_vars_file': 'var in inventory/host_vars/host',
+ 'ini_group_vars_file_parent': 'var in inventory/group_vars/parent',
+ 'ini_group_vars_file_child': 'var in inventory/group_vars/child',
+ 'ini_group_vars_file_all': 'var in inventory/group_vars/all',
+ 'pb_group_vars_file_parent': 'var in playbook/group_vars/parent',
+ 'pb_group_vars_file_child': 'var in playbook/group_vars/child',
+ 'pb_group_vars_file_all': 'var in playbook/group_vars/all',
+ 'pb_host_vars_file': 'var in playbook/host_vars/host',
+ 'play_var': 'var set in playbook header',
+ 'role_parent_default': 'var in roles/role_parent/defaults/main.yml',
+ 'role_default': 'var in roles/role/defaults/main.yml',
+ 'role_var': 'var in ???',
+ 'include_vars': 'var in included file',
+ 'set_fact': 'var made by set_fact',
+ 'vars_file': 'var in file added by vars_file',
+ 'block_vars': 'vars defined on the block',
+ 'task_vars': 'vars defined on the task',
+ 'extra_vars': 'var passed via the cli'
+ }
+
+ dinv = options.use_dynamic_inventory
+ if dinv:
+ # some features are specific to ini, so swap those
+ for (idx, x) in enumerate(features):
+ if x.startswith('ini_') and 'vars_file' not in x:
+ features[idx] = x.replace('ini_', 'script_')
+
+ dinv = options.use_dynamic_inventory
+
+ index = 1
+ while features:
+ VTM = VarTestMaker(features, dynamic_inventory=dinv)
+ VTM.build()
+
+ if options.show_tree or options.show_content or options.show_stdout:
+ print('')
+ if options.show_tree:
+ VTM.show_tree()
+ if options.show_content:
+ VTM.show_content()
+
+ try:
+ print("CHECKING: %s (%s)" % (features[0], fdesc.get(features[0], '')))
+ res = VTM.run()
+ if options.show_stdout:
+ VTM.show_stdout()
+
+ features.pop(0)
+
+ if options.copy_testcases_to_local_dir:
+ topdir = 'testcases'
+ if index == 1 and os.path.isdir(topdir):
+ shutil.rmtree(topdir)
+ if not os.path.isdir(topdir):
+ os.makedirs(topdir)
+ thisindex = str(index)
+ if len(thisindex) == 1:
+ thisindex = '0' + thisindex
+ thisdir = os.path.join(topdir, '%s.%s' % (thisindex, res))
+ shutil.copytree(TESTDIR, thisdir)
+
+ except Exception as e:
+ print("ERROR !!!")
+ print(e)
+ print('feature: %s failed' % features[0])
+ sys.exit(1)
+ finally:
+ shutil.rmtree(TESTDIR)
+ index += 1
+
+
+if __name__ == "__main__":
+ main()
diff --git a/test/integration/targets/var_precedence/host_vars/testhost b/test/integration/targets/var_precedence/host_vars/testhost
new file mode 100644
index 00000000..7d533554
--- /dev/null
+++ b/test/integration/targets/var_precedence/host_vars/testhost
@@ -0,0 +1,2 @@
+# Var precedence testing
+defaults_file_var_role3: "overridden from inventory"
diff --git a/test/integration/targets/var_precedence/inventory b/test/integration/targets/var_precedence/inventory
new file mode 100644
index 00000000..3b52d041
--- /dev/null
+++ b/test/integration/targets/var_precedence/inventory
@@ -0,0 +1,13 @@
+[local]
+testhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+
+[all:vars]
+extra_var_override=FROM_INVENTORY
+inven_var=inventory_var
+
+[inven_overridehosts]
+invenoverride ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
+
+[inven_overridehosts:vars]
+foo=foo
+var_dir=vars
diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence/meta/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence/meta/main.yml
new file mode 100644
index 00000000..423b94e3
--- /dev/null
+++ b/test/integration/targets/var_precedence/roles/test_var_precedence/meta/main.yml
@@ -0,0 +1,4 @@
+dependencies:
+ - { role: test_var_precedence_role1, param_var: "param_var_role1" }
+ - { role: test_var_precedence_role2, param_var: "param_var_role2" }
+ - { role: test_var_precedence_role3, param_var: "param_var_role3" }
diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence/tasks/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence/tasks/main.yml
new file mode 100644
index 00000000..7850e6b6
--- /dev/null
+++ b/test/integration/targets/var_precedence/roles/test_var_precedence/tasks/main.yml
@@ -0,0 +1,10 @@
+- debug: var=extra_var
+- debug: var=vars_var
+- debug: var=vars_files_var
+- debug: var=vars_files_var_role
+- assert:
+ that:
+ - 'extra_var == "extra_var"'
+ - 'vars_var == "vars_var"'
+ - 'vars_files_var == "vars_files_var"'
+ - 'vars_files_var_role == "vars_files_var_role3"'
diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence_dep/defaults/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence_dep/defaults/main.yml
new file mode 100644
index 00000000..dda4224c
--- /dev/null
+++ b/test/integration/targets/var_precedence/roles/test_var_precedence_dep/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+# should be overridden by vars_files in the main play
+vars_files_var: "BAD!"
+# should be seen in role1 (no override)
+defaults_file_var_role1: "defaults_file_var_role1"
diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence_dep/tasks/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence_dep/tasks/main.yml
new file mode 100644
index 00000000..2f8e1709
--- /dev/null
+++ b/test/integration/targets/var_precedence/roles/test_var_precedence_dep/tasks/main.yml
@@ -0,0 +1,14 @@
+- debug: var=extra_var
+- debug: var=param_var
+- debug: var=vars_var
+- debug: var=vars_files_var
+- debug: var=vars_files_var_role
+- debug: var=defaults_file_var_role1
+- assert:
+ that:
+ - 'extra_var == "extra_var"'
+ - 'param_var == "param_var_role1"'
+ - 'vars_var == "vars_var"'
+ - 'vars_files_var == "vars_files_var"'
+ - 'vars_files_var_role == "vars_files_var_dep"'
+ - 'defaults_file_var_role1 == "defaults_file_var_role1"'
diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence_dep/vars/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence_dep/vars/main.yml
new file mode 100644
index 00000000..a69efad5
--- /dev/null
+++ b/test/integration/targets/var_precedence/roles/test_var_precedence_dep/vars/main.yml
@@ -0,0 +1,4 @@
+---
+# should override the global vars_files_var since it's local to the role
+# but will be set to the value in the last role included which defines it
+vars_files_var_role: "vars_files_var_dep"
diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence_inven_override/tasks/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence_inven_override/tasks/main.yml
new file mode 100644
index 00000000..942ae4ec
--- /dev/null
+++ b/test/integration/targets/var_precedence/roles/test_var_precedence_inven_override/tasks/main.yml
@@ -0,0 +1,5 @@
+---
+- debug: var=foo
+- assert:
+ that:
+ - 'foo == "bar"'
diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence_role1/defaults/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence_role1/defaults/main.yml
new file mode 100644
index 00000000..dda4224c
--- /dev/null
+++ b/test/integration/targets/var_precedence/roles/test_var_precedence_role1/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+# should be overridden by vars_files in the main play
+vars_files_var: "BAD!"
+# should be seen in role1 (no override)
+defaults_file_var_role1: "defaults_file_var_role1"
diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence_role1/meta/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence_role1/meta/main.yml
new file mode 100644
index 00000000..c8b410b5
--- /dev/null
+++ b/test/integration/targets/var_precedence/roles/test_var_precedence_role1/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - test_var_precedence_dep
diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence_role1/tasks/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence_role1/tasks/main.yml
new file mode 100644
index 00000000..95b2a0bb
--- /dev/null
+++ b/test/integration/targets/var_precedence/roles/test_var_precedence_role1/tasks/main.yml
@@ -0,0 +1,14 @@
+- debug: var=extra_var
+- debug: var=param_var
+- debug: var=vars_var
+- debug: var=vars_files_var
+- debug: var=vars_files_var_role
+- debug: var=defaults_file_var_role1
+- assert:
+ that:
+ - 'extra_var == "extra_var"'
+ - 'param_var == "param_var_role1"'
+ - 'vars_var == "vars_var"'
+ - 'vars_files_var == "vars_files_var"'
+ - 'vars_files_var_role == "vars_files_var_role1"'
+ - 'defaults_file_var_role1 == "defaults_file_var_role1"'
diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence_role1/vars/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence_role1/vars/main.yml
new file mode 100644
index 00000000..2f7613d3
--- /dev/null
+++ b/test/integration/targets/var_precedence/roles/test_var_precedence_role1/vars/main.yml
@@ -0,0 +1,4 @@
+---
+# should override the global vars_files_var since it's local to the role
+# but will be set to the value in the last role included which defines it
+vars_files_var_role: "vars_files_var_role1"
diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence_role2/defaults/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence_role2/defaults/main.yml
new file mode 100644
index 00000000..8ed63ced
--- /dev/null
+++ b/test/integration/targets/var_precedence/roles/test_var_precedence_role2/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+# should be overridden by vars_files in the main play
+vars_files_var: "BAD!"
+# should be overridden by the vars file in role2
+defaults_file_var_role2: "BAD!"
diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence_role2/tasks/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence_role2/tasks/main.yml
new file mode 100644
index 00000000..a862389c
--- /dev/null
+++ b/test/integration/targets/var_precedence/roles/test_var_precedence_role2/tasks/main.yml
@@ -0,0 +1,14 @@
+- debug: var=extra_var
+- debug: var=param_var
+- debug: var=vars_var
+- debug: var=vars_files_var
+- debug: var=vars_files_var_role
+- debug: var=defaults_file_var_role1
+- assert:
+ that:
+ - 'extra_var == "extra_var"'
+ - 'param_var == "param_var_role2"'
+ - 'vars_var == "vars_var"'
+ - 'vars_files_var == "vars_files_var"'
+ - 'vars_files_var_role == "vars_files_var_role2"'
+ - 'defaults_file_var_role2 == "overridden by role vars"'
diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence_role2/vars/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence_role2/vars/main.yml
new file mode 100644
index 00000000..483c5ea2
--- /dev/null
+++ b/test/integration/targets/var_precedence/roles/test_var_precedence_role2/vars/main.yml
@@ -0,0 +1,5 @@
+---
+# should override the global vars_files_var since it's local to the role
+vars_files_var_role: "vars_files_var_role2"
+# should override the value in defaults/main.yml for role 2
+defaults_file_var_role2: "overridden by role vars"
diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence_role3/defaults/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence_role3/defaults/main.yml
new file mode 100644
index 00000000..763b0d50
--- /dev/null
+++ b/test/integration/targets/var_precedence/roles/test_var_precedence_role3/defaults/main.yml
@@ -0,0 +1,7 @@
+---
+# should be overridden by vars_files in the main play
+vars_files_var: "BAD!"
+# should override the defaults var for role 1 and 2
+defaults_file_var: "last one wins"
+# should be overridden from the inventory value
+defaults_file_var_role3: "BAD!"
diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence_role3/tasks/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence_role3/tasks/main.yml
new file mode 100644
index 00000000..12346ecd
--- /dev/null
+++ b/test/integration/targets/var_precedence/roles/test_var_precedence_role3/tasks/main.yml
@@ -0,0 +1,14 @@
+- debug: var=extra_var
+- debug: var=param_var
+- debug: var=vars_var
+- debug: var=vars_files_var
+- debug: var=vars_files_var_role
+- debug: var=defaults_file_var_role1
+- assert:
+ that:
+ - 'extra_var == "extra_var"'
+ - 'param_var == "param_var_role3"'
+ - 'vars_var == "vars_var"'
+ - 'vars_files_var == "vars_files_var"'
+ - 'vars_files_var_role == "vars_files_var_role3"'
+ - 'defaults_file_var_role3 == "overridden from inventory"'
diff --git a/test/integration/targets/var_precedence/roles/test_var_precedence_role3/vars/main.yml b/test/integration/targets/var_precedence/roles/test_var_precedence_role3/vars/main.yml
new file mode 100644
index 00000000..3cfb1b1c
--- /dev/null
+++ b/test/integration/targets/var_precedence/roles/test_var_precedence_role3/vars/main.yml
@@ -0,0 +1,3 @@
+---
+# should override the global vars_files_var since it's local to the role
+vars_files_var_role: "vars_files_var_role3"
diff --git a/test/integration/targets/var_precedence/runme.sh b/test/integration/targets/var_precedence/runme.sh
new file mode 100755
index 00000000..0f0811c3
--- /dev/null
+++ b/test/integration/targets/var_precedence/runme.sh
@@ -0,0 +1,9 @@
+#!/usr/bin/env bash
+
+set -eux
+
+ansible-playbook test_var_precedence.yml -i inventory -v "$@" \
+ -e 'extra_var=extra_var' \
+ -e 'extra_var_override=extra_var_override'
+
+./ansible-var-precedence-check.py
diff --git a/test/integration/targets/var_precedence/test_var_precedence.yml b/test/integration/targets/var_precedence/test_var_precedence.yml
new file mode 100644
index 00000000..58584bfb
--- /dev/null
+++ b/test/integration/targets/var_precedence/test_var_precedence.yml
@@ -0,0 +1,44 @@
+---
+- hosts: testhost
+ vars:
+ - ansible_hostname: "BAD!"
+ - vars_var: "vars_var"
+ - param_var: "BAD!"
+ - vars_files_var: "BAD!"
+ - extra_var_override_once_removed: "{{ extra_var_override }}"
+ - from_inventory_once_removed: "{{ inven_var | default('BAD!') }}"
+ vars_files:
+ - vars/test_var_precedence.yml
+ roles:
+ - { role: test_var_precedence, param_var: "param_var" }
+ tasks:
+ - name: register a result
+ command: echo 'BAD!'
+ register: registered_var
+ - name: use set_fact to override the registered_var
+ set_fact: registered_var="this is from set_fact"
+ - debug: var=extra_var
+ - debug: var=extra_var_override_once_removed
+ - debug: var=vars_var
+ - debug: var=vars_files_var
+ - debug: var=vars_files_var_role
+ - debug: var=registered_var
+ - debug: var=from_inventory_once_removed
+ - assert:
+ that: item
+ with_items:
+ - 'extra_var == "extra_var"'
+ - 'extra_var_override == "extra_var_override"'
+ - 'extra_var_override_once_removed == "extra_var_override"'
+ - 'vars_var == "vars_var"'
+ - 'vars_files_var == "vars_files_var"'
+ - 'vars_files_var_role == "vars_files_var_role3"'
+ - 'registered_var == "this is from set_fact"'
+ - 'from_inventory_once_removed == "inventory_var"'
+
+- hosts: inven_overridehosts
+ vars_files:
+ - "test_var_precedence.yml"
+ roles:
+ - role: test_var_precedence_inven_override
+ foo: bar
diff --git a/test/integration/targets/var_precedence/vars/test_var_precedence.yml b/test/integration/targets/var_precedence/vars/test_var_precedence.yml
new file mode 100644
index 00000000..19d65cba
--- /dev/null
+++ b/test/integration/targets/var_precedence/vars/test_var_precedence.yml
@@ -0,0 +1,5 @@
+---
+extra_var: "BAD!"
+role_var: "BAD!"
+vars_files_var: "vars_files_var"
+vars_files_var_role: "should be overridden by roles"
diff --git a/test/integration/targets/var_templating/aliases b/test/integration/targets/var_templating/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/test/integration/targets/var_templating/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/test/integration/targets/var_templating/group_vars/all.yml b/test/integration/targets/var_templating/group_vars/all.yml
new file mode 100644
index 00000000..4eae7c1b
--- /dev/null
+++ b/test/integration/targets/var_templating/group_vars/all.yml
@@ -0,0 +1,7 @@
+---
+x: 100
+y: "{{ x }}"
+nested_x:
+ value:
+ x: 100
+nested_y: "{{ nested_x }}"
diff --git a/test/integration/targets/var_templating/runme.sh b/test/integration/targets/var_templating/runme.sh
new file mode 100755
index 00000000..0d3ac6bb
--- /dev/null
+++ b/test/integration/targets/var_templating/runme.sh
@@ -0,0 +1,17 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# this should succeed since we override the undefined variable
+ansible-playbook undefined.yml -i inventory -v "$@" -e '{"mytest": False}'
+
+# this should still work, just show that var is undefined in debug
+ansible-playbook undefined.yml -i inventory -v "$@"
+
+# this should work since we dont use the variable
+ansible-playbook undall.yml -i inventory -v "$@"
+
+# test hostvars templating
+ansible-playbook task_vars_templating.yml -v "$@"
+
+ansible-playbook test_connection_vars.yml -v "$@" 2>&1 | grep 'sudo'
diff --git a/test/integration/targets/var_templating/task_vars_templating.yml b/test/integration/targets/var_templating/task_vars_templating.yml
new file mode 100644
index 00000000..88e1e604
--- /dev/null
+++ b/test/integration/targets/var_templating/task_vars_templating.yml
@@ -0,0 +1,58 @@
+---
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - add_host:
+ name: host1
+ ansible_connection: local
+ ansible_host: 127.0.0.1
+
+- hosts: all
+ gather_facts: no
+ tasks:
+ - debug:
+ msg: "{{ hostvars['host1']['x'] }}"
+ register: x_1
+ - debug:
+ msg: "{{ hostvars['host1']['y'] }}"
+ register: y_1
+ - debug:
+ msg: "{{ hostvars_['x'] }}"
+ vars:
+ hostvars_: "{{ hostvars['host1'] }}"
+ register: x_2
+ - debug:
+ msg: "{{ hostvars_['y'] }}"
+ vars:
+ hostvars_: "{{ hostvars['host1'] }}"
+ register: y_2
+
+ - assert:
+ that:
+ - x_1 == x_2
+ - y_1 == y_2
+ - x_1 == y_1
+
+ - debug:
+ msg: "{{ hostvars['host1']['nested_x']['value'] }}"
+ register: x_1
+ - debug:
+ msg: "{{ hostvars['host1']['nested_y']['value'] }}"
+ register: y_1
+ - debug:
+ msg: "{{ hostvars_['nested_x']['value'] }}"
+ vars:
+ hostvars_: "{{ hostvars['host1'] }}"
+ register: x_2
+ - debug:
+ msg: "{{ hostvars_['nested_y']['value'] }}"
+ vars:
+ hostvars_: "{{ hostvars['host1'] }}"
+ register: y_2
+
+ - assert:
+ that:
+ - x_1 == x_2
+ - y_1 == y_2
+ - x_1 == y_1
diff --git a/test/integration/targets/var_templating/test_connection_vars.yml b/test/integration/targets/var_templating/test_connection_vars.yml
new file mode 100644
index 00000000..2b22eea6
--- /dev/null
+++ b/test/integration/targets/var_templating/test_connection_vars.yml
@@ -0,0 +1,26 @@
+---
+- hosts: localhost
+ gather_facts: no
+ vars:
+ my_var:
+ become_method: sudo
+ connection: local
+ become: 1
+ tasks:
+
+ - include_vars: "./vars/connection.yml"
+
+ - command: whoami
+ ignore_errors: yes
+ register: result
+ failed_when: result is not success and (result.module_stderr is defined or result.module_stderr is defined)
+
+ - assert:
+ that:
+ - "'sudo' in result.module_stderr"
+ when: result is not success and result.module_stderr is defined
+
+ - assert:
+ that:
+ - "'Invalid become method specified' not in result.msg"
+ when: result is not success and result.msg is defined
diff --git a/test/integration/targets/var_templating/undall.yml b/test/integration/targets/var_templating/undall.yml
new file mode 100644
index 00000000..9ea9f1d1
--- /dev/null
+++ b/test/integration/targets/var_templating/undall.yml
@@ -0,0 +1,6 @@
+- hosts: localhost
+ gather_facts: false
+ tasks:
+ - debug:
+ vars:
+ mytest: '{{ und }}'
diff --git a/test/integration/targets/var_templating/undefined.yml b/test/integration/targets/var_templating/undefined.yml
new file mode 100644
index 00000000..cf083d5f
--- /dev/null
+++ b/test/integration/targets/var_templating/undefined.yml
@@ -0,0 +1,13 @@
+- hosts: localhost
+ gather_facts: false
+ tasks:
+ - name: show defined/undefined var
+ debug: var=mytest
+ vars:
+ mytest: '{{ und }}'
+ register: var_undefined
+
+ - name: ensure either mytest is defined or debug finds it to be undefined
+ assert:
+ that:
+ - mytest is defined or 'VARIABLE IS NOT DEFINED!' in var_undefined['mytest']
diff --git a/test/integration/targets/var_templating/vars/connection.yml b/test/integration/targets/var_templating/vars/connection.yml
new file mode 100644
index 00000000..263929a8
--- /dev/null
+++ b/test/integration/targets/var_templating/vars/connection.yml
@@ -0,0 +1,3 @@
+ansible_become: "{{ my_var.become }}"
+ansible_become_method: "{{ my_var.become_method }}"
+ansible_connection: "{{ my_var.connection }}"
diff --git a/test/integration/targets/vault/aliases b/test/integration/targets/vault/aliases
new file mode 100644
index 00000000..757c9966
--- /dev/null
+++ b/test/integration/targets/vault/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group3
+skip/aix
diff --git a/test/integration/targets/vault/empty-password b/test/integration/targets/vault/empty-password
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/vault/empty-password
diff --git a/test/integration/targets/vault/encrypted-vault-password b/test/integration/targets/vault/encrypted-vault-password
new file mode 100644
index 00000000..7aa4e4be
--- /dev/null
+++ b/test/integration/targets/vault/encrypted-vault-password
@@ -0,0 +1,6 @@
+$ANSIBLE_VAULT;1.1;AES256
+34353166613539646338666531633061646161663836373965663032313466613135313130383133
+3634383331386336333436323832356264343033323166370a323737396234376132353731643863
+62386335616635363062613562666561643931626332623464306666636131356134386531363533
+3831323230353333620a616633376363373830346332663733316634663937336663633631326361
+62343638656532393932643530633133326233316134383036316333373962626164
diff --git a/test/integration/targets/vault/encrypted_file_encrypted_var_password b/test/integration/targets/vault/encrypted_file_encrypted_var_password
new file mode 100644
index 00000000..57bc06e3
--- /dev/null
+++ b/test/integration/targets/vault/encrypted_file_encrypted_var_password
@@ -0,0 +1 @@
+test-encrypted-file-password
diff --git a/test/integration/targets/vault/example1_password b/test/integration/targets/vault/example1_password
new file mode 100644
index 00000000..e723c8f9
--- /dev/null
+++ b/test/integration/targets/vault/example1_password
@@ -0,0 +1 @@
+example1
diff --git a/test/integration/targets/vault/example2_password b/test/integration/targets/vault/example2_password
new file mode 100644
index 00000000..7b010f87
--- /dev/null
+++ b/test/integration/targets/vault/example2_password
@@ -0,0 +1 @@
+example2
diff --git a/test/integration/targets/vault/example3_password b/test/integration/targets/vault/example3_password
new file mode 100644
index 00000000..f5bc5a8c
--- /dev/null
+++ b/test/integration/targets/vault/example3_password
@@ -0,0 +1 @@
+example3
diff --git a/test/integration/targets/vault/faux-editor.py b/test/integration/targets/vault/faux-editor.py
new file mode 100755
index 00000000..68f62590
--- /dev/null
+++ b/test/integration/targets/vault/faux-editor.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+# ansible-vault is a script that encrypts/decrypts YAML files. See
+# https://docs.ansible.com/playbooks_vault.html for more details.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+import time
+import os
+
+
+def main(args):
+ path = os.path.abspath(args[1])
+
+ fo = open(path, 'r+')
+
+ content = fo.readlines()
+
+ content.append('faux editor added at %s\n' % time.time())
+
+ fo.seek(0)
+ fo.write(''.join(content))
+ fo.close()
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[:]))
diff --git a/test/integration/targets/vault/files/test_assemble/nonsecret.txt b/test/integration/targets/vault/files/test_assemble/nonsecret.txt
new file mode 100644
index 00000000..320b6b4c
--- /dev/null
+++ b/test/integration/targets/vault/files/test_assemble/nonsecret.txt
@@ -0,0 +1 @@
+THIS IS OK
diff --git a/test/integration/targets/vault/files/test_assemble/secret.vault b/test/integration/targets/vault/files/test_assemble/secret.vault
new file mode 100644
index 00000000..fd278564
--- /dev/null
+++ b/test/integration/targets/vault/files/test_assemble/secret.vault
@@ -0,0 +1,7 @@
+$ANSIBLE_VAULT;1.1;AES256
+37626439373465656332623633333336353334326531333666363766303339336134313136616165
+6561333963343739386334653636393363396366396338660a663537666561643862343233393265
+33336436633864323935356337623861663631316530336532633932623635346364363338363437
+3365313831366365350a613934313862313538626130653539303834656634353132343065633162
+34316135313837623735653932663139353164643834303534346238386435373832366564646236
+3461333465343434666639373432366139363566303564643066
diff --git a/test/integration/targets/vault/format_1_1_AES256.yml b/test/integration/targets/vault/format_1_1_AES256.yml
new file mode 100644
index 00000000..5616605e
--- /dev/null
+++ b/test/integration/targets/vault/format_1_1_AES256.yml
@@ -0,0 +1,6 @@
+$ANSIBLE_VAULT;1.1;AES256
+33613463343938323434396164663236376438313435633837336438366530666431643031333734
+6463646538393331333239393363333830613039376562360a396635393636636539346332336364
+35303039353164386461326439346165656463383137663932323930666632326263636266656461
+3232663537653637640a643166666232633936636664376435316664656631633166323237356163
+6138
diff --git a/test/integration/targets/vault/format_1_2_AES256.yml b/test/integration/targets/vault/format_1_2_AES256.yml
new file mode 100644
index 00000000..1e3795fb
--- /dev/null
+++ b/test/integration/targets/vault/format_1_2_AES256.yml
@@ -0,0 +1,6 @@
+$ANSIBLE_VAULT;1.2;AES256;test_vault_id
+30383835613535356232333534303264656530633664616233386138396563623939626136366537
+3635323530646538626138383136636437616637616430610a386661346563346136326637656461
+64393364343964633364336666333630383164643662343930663432316333633537353938376437
+6134656262373731390a363166356461376663313532343733326438386632623930313366643038
+6133
diff --git a/test/integration/targets/vault/host_vars/myhost.yml b/test/integration/targets/vault/host_vars/myhost.yml
new file mode 100644
index 00000000..1434ec15
--- /dev/null
+++ b/test/integration/targets/vault/host_vars/myhost.yml
@@ -0,0 +1,7 @@
+myvar: !vault |
+ $ANSIBLE_VAULT;1.1;AES256
+ 31356335363836383937363933366135623233343830326234633633623734336636343630396464
+ 3234343638313166663237343536646336323862613739380a346266316336356230643838663031
+ 34623034383639323062373235356564393337346666393665313237313231306131356637346537
+ 3966393238666430310a363462326639323033653237373036643936613234623063643761663033
+ 3832
diff --git a/test/integration/targets/vault/host_vars/testhost.yml b/test/integration/targets/vault/host_vars/testhost.yml
new file mode 100644
index 00000000..b3e569ad
--- /dev/null
+++ b/test/integration/targets/vault/host_vars/testhost.yml
@@ -0,0 +1,7 @@
+vaulted_utf8_value: !vault |
+ $ANSIBLE_VAULT;1.1;AES256
+ 39313961356631343234656136636231663539363963386364653436346133366366633031366364
+ 3332376636333837333036633662316135383365343335380a393331663434663238666537343163
+ 62363561336431623666633735313766613663333736653064373632666131356434336537383336
+ 3333343436613232330a643461363831633166333237653530353131316361643465353132616362
+ 3461
diff --git a/test/integration/targets/vault/invalid_format/README.md b/test/integration/targets/vault/invalid_format/README.md
new file mode 100644
index 00000000..cbbc07a9
--- /dev/null
+++ b/test/integration/targets/vault/invalid_format/README.md
@@ -0,0 +1 @@
+Based on https://github.com/yves-vogl/ansible-inline-vault-issue
diff --git a/test/integration/targets/vault/invalid_format/broken-group-vars-tasks.yml b/test/integration/targets/vault/invalid_format/broken-group-vars-tasks.yml
new file mode 100644
index 00000000..71dbacc0
--- /dev/null
+++ b/test/integration/targets/vault/invalid_format/broken-group-vars-tasks.yml
@@ -0,0 +1,23 @@
+---
+- hosts: broken-group-vars
+ gather_facts: false
+ tasks:
+ - name: EXPECTED FAILURE
+ debug:
+ msg: "some_var_that_fails: {{ some_var_that_fails }}"
+
+ - name: EXPECTED FAILURE Display hostvars
+ debug:
+ msg: "{{inventory_hostname}} hostvars: {{ hostvars[inventory_hostname] }}"
+
+
+# ansible-vault --vault-password-file=vault-secret encrypt_string test
+# !vault |
+# $ANSIBLE_VAULT;1.1;AES256
+# 64323332393930623633306662363165386332376638653035356132646165663632616263653366
+# 6233383362313531623238613461323861376137656265380a366464663835633065616361636231
+# 39653230653538366165623664326661653135306132313730393232343432333635326536373935
+# 3366323866663763660a323766383531396433663861656532373663373134376263383263316261
+# 3137
+
+# $ ansible-playbook -i inventory --vault-password-file=vault-secret tasks.yml
diff --git a/test/integration/targets/vault/invalid_format/broken-host-vars-tasks.yml b/test/integration/targets/vault/invalid_format/broken-host-vars-tasks.yml
new file mode 100644
index 00000000..9afbd58e
--- /dev/null
+++ b/test/integration/targets/vault/invalid_format/broken-host-vars-tasks.yml
@@ -0,0 +1,7 @@
+---
+- hosts: broken-host-vars
+ gather_facts: false
+ tasks:
+ - name: EXPECTED FAILURE Display hostvars
+ debug:
+ msg: "{{inventory_hostname}} hostvars: {{ hostvars[inventory_hostname] }}"
diff --git a/test/integration/targets/vault/invalid_format/group_vars/broken-group-vars.yml b/test/integration/targets/vault/invalid_format/group_vars/broken-group-vars.yml
new file mode 100644
index 00000000..5f477431
--- /dev/null
+++ b/test/integration/targets/vault/invalid_format/group_vars/broken-group-vars.yml
@@ -0,0 +1,8 @@
+$ANSIBLE_VAULT;1.1;AES256
+64306566356165343030353932383461376334336665626135343932356431383134306338353664
+6435326361306561633165633536333234306665346437330a366265346466626464396264393262
+34616366626565336637653032336465363165363334356535353833393332313239353736623237
+6434373738633039650a353435303366323139356234616433613663626334643939303361303764
+3636363333333333333333333
+36313937643431303637353931366363643661396238303530323262326334343432383637633439
+6365373237336535353661356430313965656538363436333836
diff --git a/test/integration/targets/vault/invalid_format/host_vars/broken-host-vars.example.com/vars b/test/integration/targets/vault/invalid_format/host_vars/broken-host-vars.example.com/vars
new file mode 100644
index 00000000..2d309eb5
--- /dev/null
+++ b/test/integration/targets/vault/invalid_format/host_vars/broken-host-vars.example.com/vars
@@ -0,0 +1,11 @@
+---
+example_vars:
+ some_key:
+ another_key: some_value
+ bad_vault_dict_key: !vault |
+ $ANSIBLE_VAULT;1.1;AES256
+ 64323332393930623633306662363165386332376638653035356132646165663632616263653366
+ 623338xyz2313531623238613461323861376137656265380a366464663835633065616361636231
+ 3366323866663763660a323766383531396433663861656532373663373134376263383263316261
+ 3137
+
diff --git a/test/integration/targets/vault/invalid_format/inventory b/test/integration/targets/vault/invalid_format/inventory
new file mode 100644
index 00000000..e6e259a4
--- /dev/null
+++ b/test/integration/targets/vault/invalid_format/inventory
@@ -0,0 +1,5 @@
+[broken-group-vars]
+broken.example.com
+
+[broken-host-vars]
+broken-host-vars.example.com
diff --git a/test/integration/targets/vault/invalid_format/original-broken-host-vars b/test/integration/targets/vault/invalid_format/original-broken-host-vars
new file mode 100644
index 00000000..6be696b5
--- /dev/null
+++ b/test/integration/targets/vault/invalid_format/original-broken-host-vars
@@ -0,0 +1,6 @@
+$ANSIBLE_VAULT;1.1;AES256
+64323332393930623633306662363165386332376638653035356132646165663632616263653366
+6233383362313531623238613461323861376137656265380a366464663835633065616361636231
+3366323866663763660a323766383531396433663861656532373663373134376263383263316261
+3137
+
diff --git a/test/integration/targets/vault/invalid_format/original-group-vars.yml b/test/integration/targets/vault/invalid_format/original-group-vars.yml
new file mode 100644
index 00000000..817557be
--- /dev/null
+++ b/test/integration/targets/vault/invalid_format/original-group-vars.yml
@@ -0,0 +1,2 @@
+---
+some_var_that_fails: blippy
diff --git a/test/integration/targets/vault/invalid_format/some-vars b/test/integration/targets/vault/invalid_format/some-vars
new file mode 100644
index 00000000..e841a262
--- /dev/null
+++ b/test/integration/targets/vault/invalid_format/some-vars
@@ -0,0 +1,6 @@
+$ANSIBLE_VAULT;1.1;AES256
+37303462633933386339386465613039363964643466663866356261313966663465646262636333
+3965643566363764356563363334363431656661636634380a333837343065326239336639373238
+64316236383836383434366662626339643561616630326137383262396331396538363136323063
+6236616130383264620a613863373631316234656236323332633166623738356664353531633239
+3533
diff --git a/test/integration/targets/vault/invalid_format/vault-secret b/test/integration/targets/vault/invalid_format/vault-secret
new file mode 100644
index 00000000..4406e35c
--- /dev/null
+++ b/test/integration/targets/vault/invalid_format/vault-secret
@@ -0,0 +1 @@
+enemenemu \ No newline at end of file
diff --git a/test/integration/targets/vault/inventory.toml b/test/integration/targets/vault/inventory.toml
new file mode 100644
index 00000000..d97ed398
--- /dev/null
+++ b/test/integration/targets/vault/inventory.toml
@@ -0,0 +1,5 @@
+[vauled_group.hosts]
+vaulted_host_toml={ ansible_host="localhost", ansible_connection="local" }
+
+[vauled_group.vars]
+hello="world"
diff --git a/test/integration/targets/vault/password-script.py b/test/integration/targets/vault/password-script.py
new file mode 100755
index 00000000..c47fdfb9
--- /dev/null
+++ b/test/integration/targets/vault/password-script.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+# ansible-vault is a script that encrypts/decrypts YAML files. See
+# https://docs.ansible.com/playbooks_vault.html for more details.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+
+PASSWORD = 'test-vault-password'
+
+
+def main(args):
+ print(PASSWORD)
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[:]))
diff --git a/test/integration/targets/vault/roles/test_vault/tasks/main.yml b/test/integration/targets/vault/roles/test_vault/tasks/main.yml
new file mode 100644
index 00000000..4e5551d9
--- /dev/null
+++ b/test/integration/targets/vault/roles/test_vault/tasks/main.yml
@@ -0,0 +1,9 @@
+- assert:
+ that:
+ - 'secret_var == "secret"'
+
+
+- copy: src=vault-secret.txt dest={{output_dir}}/secret.txt
+
+- name: cleanup decrypted file
+ file: path={{ output_dir }}/secret.txt state=absent
diff --git a/test/integration/targets/vault/roles/test_vault/vars/main.yml b/test/integration/targets/vault/roles/test_vault/vars/main.yml
new file mode 100644
index 00000000..cfac107a
--- /dev/null
+++ b/test/integration/targets/vault/roles/test_vault/vars/main.yml
@@ -0,0 +1,9 @@
+$ANSIBLE_VAULT;1.1;AES256
+31626536666232643662346539623662393436386162643439643434656231343435653936343235
+6139346364396166336636383734333430373763336434310a303137623539653939336132626234
+64613232396532313731313935333433353330666466646663303233323331636234326464643166
+6538653264636166370a613161313064653566323037393962643032353230396536313865326362
+34396262303130326632623162623230346238633932393938393766313036643835613936356233
+33323730373331386337353339613165373064323134343930333031623036326164353534646631
+31313963666234623731316238656233396638643331306231373539643039383434373035306233
+30386230363730643561
diff --git a/test/integration/targets/vault/roles/test_vault_embedded/tasks/main.yml b/test/integration/targets/vault/roles/test_vault_embedded/tasks/main.yml
new file mode 100644
index 00000000..eba93896
--- /dev/null
+++ b/test/integration/targets/vault/roles/test_vault_embedded/tasks/main.yml
@@ -0,0 +1,13 @@
+---
+- name: Assert that a embedded vault of a string with no newline works
+ assert:
+ that:
+ - '"{{ vault_encrypted_one_line_var }}" == "Setec Astronomy"'
+
+- name: Assert that a multi line embedded vault works, including new line
+ assert:
+ that:
+ - vault_encrypted_var == "Setec Astronomy\n"
+
+# TODO: add a expected fail here
+# - debug: var=vault_encrypted_one_line_var_with_embedded_template
diff --git a/test/integration/targets/vault/roles/test_vault_embedded/vars/main.yml b/test/integration/targets/vault/roles/test_vault_embedded/vars/main.yml
new file mode 100644
index 00000000..54e6004f
--- /dev/null
+++ b/test/integration/targets/vault/roles/test_vault_embedded/vars/main.yml
@@ -0,0 +1,17 @@
+# If you use normal 'ansible-vault create' or edit, files always have at least one new line
+# so c&p from a vault encrypted that wasn't specifically created sans new line ends up with one.
+# (specifically created, as in 'echo -n "just one line" > my_secret.yml'
+vault_encrypted_var: !vault |
+ $ANSIBLE_VAULT;1.1;AES256
+ 66386439653236336462626566653063336164663966303231363934653561363964363833313662
+ 6431626536303530376336343832656537303632313433360a626438346336353331386135323734
+ 62656361653630373231613662633962316233633936396165386439616533353965373339616234
+ 3430613539666330390a313736323265656432366236633330313963326365653937323833366536
+ 34623731376664623134383463316265643436343438623266623965636363326136
+vault_encrypted_one_line_var: !vault |
+ $ANSIBLE_VAULT;1.1;AES256
+ 33363965326261303234626463623963633531343539616138316433353830356566396130353436
+ 3562643163366231316662386565383735653432386435610a306664636137376132643732393835
+ 63383038383730306639353234326630666539346233376330303938323639306661313032396437
+ 6233623062366136310a633866373936313238333730653739323461656662303864663666653563
+ 3138
diff --git a/test/integration/targets/vault/roles/test_vault_embedded_ids/tasks/main.yml b/test/integration/targets/vault/roles/test_vault_embedded_ids/tasks/main.yml
new file mode 100644
index 00000000..9aeaf240
--- /dev/null
+++ b/test/integration/targets/vault/roles/test_vault_embedded_ids/tasks/main.yml
@@ -0,0 +1,29 @@
+---
+- name: set a fact from vault_encrypted_example1_releases
+ set_fact:
+ example1_releases: "{{ vault_encrypted_example1_releases }}"
+
+- name: Assert that a embedded vault of a multiline string with a vault id works
+ assert:
+ that:
+ - "vault_encrypted_example1_releases is defined"
+ - "example1_releases is defined"
+ - "example1_releases.startswith('Ansible Releases')"
+ # - '"{{ vault_encrypted_example1_releases }}" == "Setec Astronomy"'
+
+- name: Assert that a embedded vault with a different vault id works
+ assert:
+ that:
+ - "vault_encrypted_example2_hello == 'Hello world'"
+
+- name: Assert that a embedded vault with no vault id and format 1.2 works
+ assert:
+ that:
+ - "vault_encrypted_example3_foobar == 'Foobar'"
+ #- name: Assert that a multi line embedded vault works, including new line
+ # assert:
+ # that:
+ # - vault_encrypted_var == "Setec Astronomy\n"
+
+# TODO: add a expected fail here
+# - debug: var=vault_encrypted_one_line_var_with_embedded_template
diff --git a/test/integration/targets/vault/roles/test_vault_embedded_ids/vars/main.yml b/test/integration/targets/vault/roles/test_vault_embedded_ids/vars/main.yml
new file mode 100644
index 00000000..9c8fa4b2
--- /dev/null
+++ b/test/integration/targets/vault/roles/test_vault_embedded_ids/vars/main.yml
@@ -0,0 +1,194 @@
+vault_encrypted_example2_hello: !vault |
+ $ANSIBLE_VAULT;1.2;AES256;example2
+ 30383930326535616363383537613266376364323738313835353566633533353364363837383638
+ 3737633764613862343666346337353964613138653036610a313663393231386139343835626436
+ 66633336303866323335616661366363333463616530326635383836656432396665313338313737
+ 6539616630663262650a383762303362356438616261646564303230633930336563373566623235
+ 3566
+vault_encrypted_example1_releases: !vault |
+ $ANSIBLE_VAULT;1.2;AES256;example1
+ 63643833646565393535303862343135326261343362396234656137313731313864316539616462
+ 3333313439353638393963643535633835643035383331340a393639386166313838326336363032
+ 65396565616531663839316132646230316561613865333437653666323034396337626431663931
+ 3339363233356438350a363734616337306136376139346162376334343537613032633563666361
+ 36386437356463616563646336393064626131363963643434376439346331663836663961353533
+ 62343663623863663830663531663930636532653165636238636433373835623435313632313030
+ 33333734343566393739393661383430623063323132303132306361666433386166633564626434
+ 62666361653465616636646335353230373961393863373261633461303233313965346565643434
+ 63383633303131643730366233383264373865376562623962636562343732343266636535356362
+ 62396635613231336162393630343136663731366665623835303762636161393163373361383634
+ 65333739326264346136333337363666396336353065366161316130653738356133646364316130
+ 32346636386665633131376662356238386161373565336430623263353036323561633235303135
+ 35333031316366373636326665656230343934383334303863643364613364663436383030373237
+ 35323964376564313636643633303262633033363633663966393535613064343364313161383061
+ 66393733366463393936663033633038653465636539356266353936373162303661613962393662
+ 61313534643064366432333166666130663730653333613964316130363135646532303531376537
+ 63313339623337363464343637323431336438636337386264303961333139326666306365363937
+ 36386437343036346165366439636533666237393535316536333966376536623030643663343561
+ 64626362363736316234356639663039396634653766646237376636653062383530366562323138
+ 61343537616263373137613232393731363866653038633932643163633732326463656365346535
+ 63316337346636326631326134633339363133393337393035333730663133646332343536636337
+ 36626566633162333463613735656564393764356337346535646539373536363933326139626239
+ 35386434663636343366303830663531616530616563343737653761616232303865626634646537
+ 38383430366131396133636530383865356430343965633062373366383261383231663162323566
+ 30373061366533643938383363333266636463383134393264343662623465323164356464666364
+ 35636135316333636266313038613239616638343761326332663933356164323635653861346430
+ 65616661353162633765666633393139613830626535633462633166376563313236623465626339
+ 38663138633664613738656166356431343438653833623132383330656637343661616432623362
+ 66643466343663306434353237343737633535343233653765356134373739316234353836303034
+ 37336435376135363362323130316338316135633633303861303665393766616537356666653238
+ 63366461383334356666633134616436663731633666323261393761363264333430366234353732
+ 66333732373236303338333862626537326638393964363965303532353465613638393934313538
+ 66323366353064666334626461313933333961613637663332656131383038393264636537643730
+ 35626265346363393665663431663036633461613362343330643133333232326664623833626336
+ 65353363373962383561396163653361663736383235376661626132386131353137303764623231
+ 63326538623231396366356432663537333331343335633531326331616531313039393335313139
+ 65376461323434383065383834626535393063363432326233383930626437343961313538303135
+ 39386561623662333335313661636637656336353537313466386239613166396436626630376337
+ 36633739326336366530643733393962633737343035346536366336643266346162333931633235
+ 66643966626262343862393832663132356435343561646634373835306130623637633836633166
+ 30313732333963383565373261306232663365363033376431313437326366656264346532666561
+ 63386231636634613235333363326166616238613734643739343237303963663539633535356232
+ 66393365616165393130356561363733313735336132336166353839303230643437643165353338
+ 39663138313130366635386365663830336365646562666635323361373362626339306536313664
+ 32383934623533373361666536326131316630616661623839666137656330306433326637386134
+ 34393162343535633438643036613831303265646632383231306239646132393338663564653939
+ 63613232646230616338316434376663613266303362386631353733623335643034356631383139
+ 62613932396132636339393337383065613061306162633831386236323163633439303263393663
+ 38616237313761306533636361386161666264333839616463386631633233343132373732636639
+ 61326239383961656437646236656336303638656665316633643630393063373964323534643961
+ 39383538303234343438363736373136316464643165383361336262303231353937316432366639
+ 36613662393736386433356532626162643462313234316230643639333535653064303830373166
+ 31393332336539313362373136326639386566343637623633396134643533393839353934613064
+ 65396233353363393763363231633462663537626165646666633937343733653932633733313237
+ 31323633326463333938343062626361313761646133633865623130323665336634356364366566
+ 31626562373662313064306239356336376136306336643961323839313964393734343265306137
+ 62663563306665636463356465663432346331323832666163623530666265393164336466383936
+ 64653831316162313861373462643264373965623632653430373439656535636365383066643464
+ 61366436613631386161306631386331656632636337653864343261643433363438396361373831
+ 37363532346564343562356132306432303933643431636539303039306638356537353237323036
+ 63366334623438393838383561383937313330303832326330326366303264303437646666613638
+ 37653266633362636330656666303437323138346666373265663466616635326366313233323430
+ 62616165626239363833613565326264373063376232303837363062616663333461373062323266
+ 32626636316465666230626634396431323032323962313437323837336562313438346634656335
+ 33613566636461663334623966646465623531653631653565333836613261633534393439613738
+ 66356364383637666465336666333962393735643766633836383833396533626635633734326136
+ 65656562366337326161303466336232646533346135353332643030383433643662363465633931
+ 63323761623537383438333837333733363263663630336264376239336234663866633131376463
+ 66663438313439643565316138383439353839366365393238376439626537656535643739373237
+ 66666266366533393738363138613437666435366163643835383830643333323730303537313139
+ 32313436663932633933353265356431336138306437353936363638643539383236323232326630
+ 62323963626138633865376238666264666531613237636232373938303030393632643230336138
+ 38663237646637616232343664396136376534313533613364663062356535313766343331616431
+ 36616237336532333239386663643538643239613866393631393364306463303131643863363533
+ 31356436373062666266656431643038323766383632613939616539663637623164323161633464
+ 39666663353339383164363534616330323936333865663564646334373438303061656662656331
+ 37633530663666323834383333623136633164326632313938643234326235616461323734353638
+ 63393365313334646538373631643266383936333533383630623861343764373863346161316333
+ 38356466626234653336326433353234613430623135343739323433326435373663363237643531
+ 36626238613832633661343263383962373536353766653631323431393330623634656166333437
+ 66376537643836626264383961303465363035336666306165316631316661366637303361656332
+ 36616463626135653235393562343464353262616331326539316361393036623134623361383635
+ 39383565313433653139663963306362373233313738613933626563333230656239613462363164
+ 65396539333833633137313163396635373433303164633463383935663939343266396366666231
+ 30353434323837343563613662643632386662616363646630353530386466643939623866626331
+ 63613266366135646562653064333166356561626138343364373631376336393931313262323063
+ 32653938333837366231343865656239353433663537313763376132613366363333313137323065
+ 31666663656539333438343664323062323238353061663439326333366162303636626634313037
+ 38366631306438393333356138393730316161336233656239626565366134643535383536613034
+ 37343733663631663863643337373462633462666234393063336330306465366637653136393533
+ 63336535316438303564613366343565363831666233626466623161356635363464343634303136
+ 61616561393861393036353433356364376533656334326433323934643236346133363535613334
+ 32626332653362313731643035653335383164303534616537333132356535376233343566313736
+ 39353037636530376338383739366230346134643738313037386438613461323934663537666164
+ 66353330303730336435313735343333316364373432313030396361343061343632653765646336
+ 39666537366537343635396235373433363438393637663166666530356339316334313834363938
+ 33393837336265353265303635663363353439343062316363643637623564353261643637306434
+ 36393662363737316234323461373763663364356535313165656661613137396366386464663866
+ 63653562313539313839613436653137663262346233626464616237373737373736306231383265
+ 35323532373631613762616234386162643035613838376264343532396263626562623262363532
+ 36303530353137616134346262646464633462646662323262633366393736383834616665666466
+ 34393363353135616437346332386634396635363130623337653230666334303630653738633334
+ 33316162326335373838643261656561303736363331316134363736393362313734346236306638
+ 65343163646264643539643635633761393665623039653232623435383062363462346336613238
+ 38306138353832306263356265316236303065626566643134373836303933323130303634393931
+ 31633334373064353263353135656433623863636261633664646439336539343636656464306531
+ 36373364323637393634623666353730626532613534343638663966313332636437383233303864
+ 33356432613638303936653134373338626261353662653930333534643732656130653636316433
+ 33653364373636613739353439383066646530303565383432356134396436306134643030643034
+ 63323433396238636330383836396364613738616338356563633565613537313138346661636164
+ 34333566393738343661663062346433396532613032663331313566333161396230343336346264
+ 66333935316630653936346336366336303363376633623034346536643731313136363835303964
+ 37346537373236343832306637653563386435363435333537393733333966643461623064316639
+ 65323363343338326435633631303037623234303334353366303936373664383762316364663036
+ 61353638376335333663343066303961616234336664313732366630343331613537633336316534
+ 31656561626430383338353231376263383362333966666363316435373533613138323039363463
+ 33363031373035316431353930626632666165376538303638353631303931326262386363376330
+ 36333531303235306532363763313233616165646234343235306332383262663261366164623130
+ 66613232636264636336313230303261626639316465383265373762346434616362383562633533
+ 64346438653161306266663634623666646239383363313862383563386461626264383165373561
+ 64383431653061393132623833653337643266663462666462366339363233353335386264383936
+ 38396264373833343935653264373631626662653962353438313262633339316537306463663930
+ 31613634613535346364643930613739383035336164303064653736663031633135613966656463
+ 64333539643534376662666539653766666532333832333430346333613236356534643964383135
+ 38326235626164663364366163353434613530306531343735353761396563326536636335326336
+ 34613835333362346363623235316564363934333732646435373033613863346565353034306333
+ 33643763363838656339396435316162616539623764366163376438656266353137633262613464
+ 31393434646435623032383934373262666430616262353165343231666631666238653134396539
+ 32323137616639306262366638366536366665633331653363643234643238656338316133613166
+ 38343566623137353566306538616639363935303766633732633638356362373463616563663438
+ 66346133636562373031316363616662663132636263653037343962313630313535396563313230
+ 34613735663838613130346461343166663830623861393634353438376336363961326263333634
+ 34646465326238636630316164316339333961333939363139623262396531303665383230363562
+ 63626431333365663337323430653230613837396133636431303863366239303531653966653932
+ 65363139366637623531306333363465386636366334383734353330626566346532653263633238
+ 39383434346665323730366261316433303739313032653638636232666432323930653837643831
+ 63393565306538663365616364326334306333346463343330316161616362323063666666373035
+ 66383938383238353134386333343437623030363032303531643736353636643165373362363666
+ 31363037613064633164346638306231663161626265663535363634336665656163636637393161
+ 64313363373965396262386337613533393639353332316234643666613065343939393336366633
+ 64303637323531393936386365316366656432346230653066306334626431366335353130663233
+ 62303961663362623637303535333432313635303936363462336438663232333862303934383166
+ 31626438623963346262376135633434643533316162376633353661356463616538363733346464
+ 65646563626139356264363132616161303438653133353961636135333833376364333138353263
+ 36613437373365666665643664343666366234636164626437396139393864653031396331303938
+ 35323839646265393232326434616233323535396134346465363131366165373163353932363538
+ 39353764623463393732346134656539353966643366653765663038323631373432663839396239
+ 35623665623661326231643734346134623961663539363436323134333630306663653039653062
+ 36623730663538666166363436616131363233643739393966333437643637303737383733356138
+ 34343733623137326265343332326437316365346439316137663361373066333166383032396636
+ 35623561626139666264373363363965383633653633656464393932666634353962623637643262
+ 32323663303861376166656266653962643166326535363237316333663631323235333833636361
+ 31633038353265386439313766313966633536346230646566633333646632383938363761373363
+ 38353931343136633062303366643930323034616265653030643062333461616637366666336437
+ 36346330636666313833346534363461336366393533346338653061356333653839623364336266
+ 32373965346363613165383639366365396665353966393262393562353664623231326132363735
+ 38386238336135306464366332353035613938313262323739326638623733663030656533383438
+ 38316364393030376436313031613936363435633562633862323063643035383030313865396666
+ 66646338316262653734633431393862626633643163313732343638313066646163353264653531
+ 64346265656363323666656239333466313666373234626261633630653133316639313233303466
+ 62353735626634616661396238356138343064386332366361643530613364366365663764393037
+ 31613730313234393263653964376262373131383064393133636533656534343431613964663634
+ 65656365393439306433313333346234333332346230666462633132313863623765306665306461
+ 65633862656637646134353030393637353339646265613731646564333561313431346135626532
+ 66646363383932636562343731626164633138386463356634353062323965376235383130633231
+ 61623537333030383130623064356662356463646532613339303336666631366539613835646364
+ 37636634353430386632656331313936393261643638326162376238326139643939636333366364
+ 31626163376436336631
+vault_encrypted_example3_foobar: !vault |
+ $ANSIBLE_VAULT;1.1;AES256
+ 37336431373836376339373763306436396334623061366266353763363766313063363230636138
+ 3665663061366436306232323636376261303064616339620a333365323266643364396136626665
+ 62363862653134623665326635396563643832636234386266616436626334363839326434383431
+ 3330373333366233380a363431386334636164643936313430623661633265346632343331373866
+ 3732
+# We dont have a secret for this vaulttext, but nothing references it
+# so nothing should ever try to decrypt it. So this is testing that
+# we dont require all vaulted vars to be decrypted.
+vault_encrypted_example4_unknown_password: !vault |
+ $ANSIBLE_VAULT;1.1;AES256
+ 64316436303566666563393931613833316533346539373635663031376664366131353264366132
+ 3637623935356263643639313562366434383234633232660a353636666134353030646539643139
+ 65376235333932353531356666363434313066366161383532363166653762326533323233623431
+ 3934393962633637330a356337626634343736313339316365373239663031663938353063326665
+ 30643339386131663336366531663031383030313936356631613432336338313962
diff --git a/test/integration/targets/vault/roles/test_vault_file_encrypted_embedded/README.md b/test/integration/targets/vault/roles/test_vault_file_encrypted_embedded/README.md
new file mode 100644
index 00000000..4a75cece
--- /dev/null
+++ b/test/integration/targets/vault/roles/test_vault_file_encrypted_embedded/README.md
@@ -0,0 +1 @@
+file is encrypted with password of 'test-encrypted-file-password'
diff --git a/test/integration/targets/vault/roles/test_vault_file_encrypted_embedded/tasks/main.yml b/test/integration/targets/vault/roles/test_vault_file_encrypted_embedded/tasks/main.yml
new file mode 100644
index 00000000..e09004a1
--- /dev/null
+++ b/test/integration/targets/vault/roles/test_vault_file_encrypted_embedded/tasks/main.yml
@@ -0,0 +1,13 @@
+---
+- name: Assert that a vault encrypted file with embedded vault of a string with no newline works
+ assert:
+ that:
+ - '"{{ vault_file_encrypted_with_encrypted_one_line_var }}" == "Setec Astronomy"'
+
+- name: Assert that a vault encrypted file with multi line embedded vault works, including new line
+ assert:
+ that:
+ - vault_file_encrypted_with_encrypted_var == "Setec Astronomy\n"
+
+# TODO: add a expected fail here
+# - debug: var=vault_encrypted_one_line_var_with_embedded_template
diff --git a/test/integration/targets/vault/roles/test_vault_file_encrypted_embedded/vars/main.yml b/test/integration/targets/vault/roles/test_vault_file_encrypted_embedded/vars/main.yml
new file mode 100644
index 00000000..89cc4a0f
--- /dev/null
+++ b/test/integration/targets/vault/roles/test_vault_file_encrypted_embedded/vars/main.yml
@@ -0,0 +1,76 @@
+$ANSIBLE_VAULT;1.1;AES256
+31613535653961393639346266636234373833316530373965356161373735666662613137386466
+3365303539306132613861646362396161323962373839640a653030376530316136643961623665
+65643665616338363432383264363730386538353635663339633932353933653132343430613332
+6136663837306333370a643139336230663465346637663032613231656364316533613235623532
+65643738663735636662363565313561646162343865393733663838393239646634633936336262
+39626235616537663934363932323831376539666331353334386636663738643932306239663265
+64646664616331643663326561386638393764313737303865326166373031336665663533373431
+35353736346264616135656164636337363966323935643032646138366166636537333565306230
+65646533623134393633623663336263393533613632663464653663313835306265333139646563
+35393061343266343138333936646364333735373930666262376137396562356231393330313731
+36363164623939393436363564353162373364626536376434626463343161646437316665613662
+38343534363965373735316339643061333931666264353566316235616433666536313065306132
+31623933633533366162323961343662323364353065316235303162306635663435663066393865
+64356634363761333838326331343865653633396665353638633730663134313565653166656131
+33366464396532313635326237363135316230663838393030303963616161393966393836633237
+30333338343031366235396438663838633136666563646161363332663533626662663531653439
+63643435383931663038613637346637383365336431646663366436626333313536396135636566
+31373133363661636338376166356664353366343730373164663361623338383636336464373038
+36306437363139346233623036636330333664323165636538666138306465653435666132623835
+30363266333666626363366465313165643761396562653761313764616562666439366437623766
+33343666623866653461376137353731356530363732386261383863666439333735666638653533
+38393430323961356333383464643036383739663064633461363937336538373539666662653764
+36376266333230666232396665616434303432653562353131383430643533623932363537346435
+33326335663561643564663936323832376634336363373531363666333732643363646130383464
+30656366633863643966656134653833343634383136363539366330336261313736343838663936
+39333835353035386664633331303264356339613933393162393037306565636563386436633532
+34376564343237303166613461383963353030383166326538643932323130643830376165366564
+30366432623761623366653966313865653262363064316130393339393366323539373338306265
+31626564393065303032383161343137636432353061333964613935363865356139313766303039
+32333863353465306265653237396232383330333438303866316362353161383266316633663364
+66353130326237376331656334633965633339303138656263616239323261663864666236323662
+33643463303965313264396463333963376464313838373765633463396534363836366132653437
+30303132633232623265303966316639373664656262636166653438323534326435363966616133
+33663463626536643930623034343237613933623462346635306565623834346532613539383838
+39356339303930663739333236316234666633623961323362323537313833383538363132636165
+31396433386664356532383432666464613137376561396534316665386134333665626430373064
+30626561363731326635393334633837303934653062616461303732316239663764633565353633
+33336161623332383064376538353531343534333836313139376439316564313436623462396134
+31643831656135653234396362653861643933346433646633383130323139353465616430383061
+34623164376436326466333765353037323630356662646364366265303534313764393862653238
+66376365323561643030343534636263386338333566613436383630613561646639616265313465
+66336239303432666361383038323038383663346561356664626634333037313838363732643463
+33373734663933373238363635623336323232313161353861306430323334353836616265623639
+65613436323939643932383537666530306134633435373331623963633436386162306565656433
+35383962633163643837343436383664313565656134646633393237353065666535316561613266
+64653234366462623764313438666466616664303138656565663036376230323763393135323330
+35383861306262356430656531343938643763306663323031636638383762626564616366393434
+33373035363633396230396161623433336530326432343666346332613262376338313731626462
+63616463363831333239643535383936646264336466616635353063383163306564373263656265
+65383466653162626132633463613037343865316639653931633965323637373733653131666233
+35643831646638383232616538656265663365306136343733633535323537653165636665383832
+65303162656238303665346232353136346639316263636264346533356263353066353438323535
+36303236326663303763653137656264336566646161663538383361306138323064336235616438
+32373731643331373239383339326365366337646237643836373238656339646362366239623533
+33306531353863653834666361393161366465626632643061363266353465653964363263613430
+32323132613866343733376437643239316661313330323661633234343630626132383434343461
+61663765383134666330316237633963323463363762383666323866386336316438373461306138
+38613266346532313134386236386131626262663534313935623635343533383831386332343534
+65333963353861656232383134396438613034663333633661346465636436373533346561306661
+33656535613963663938313233333736343036393734373363316236373765343736633635386336
+30323036393431363636316466393561626365366333623431353435633963613935346239666534
+33623037306334343464633932313430616666633631313366356532643938333835333231313039
+65363734336630303861626636613139663130616362333662616532313734393636353963643032
+39626162623933616561383736636466316331346135613063383261373865366232376562316237
+65393563633131653761646365313831646265316233343833653363626465363863363936316664
+63363863363761353264316662643338656432356336326339623961396538643838666330303934
+62343537653262353737316266366134623961323637613338303164383734613034383964623135
+35646130363038356530383638663431663238336337313034303631366538326361646530626138
+34653533383964353866653562666463333961313434373063333163346537636631393138316465
+62656361613365366137346337363830356263633162623466373564346437653036386136333333
+32323863393866373932353534343133306333303265336564383132616365363439393364336562
+62333130343664343436356338623336643735373164373962313762333763343137626238316536
+36376539666331376162376361646631396231306165316362343164616232393864656161393735
+63313439643865346231346363376137306464396637356539353139343932333438323964323035
+326532383066643037653036333166346238
diff --git a/test/integration/targets/vault/roles/test_vaulted_template/tasks/main.yml b/test/integration/targets/vault/roles/test_vaulted_template/tasks/main.yml
new file mode 100644
index 00000000..b4af5efc
--- /dev/null
+++ b/test/integration/targets/vault/roles/test_vaulted_template/tasks/main.yml
@@ -0,0 +1,19 @@
+---
+- name: Template from a vaulted template file
+ template:
+ src: vaulted_template.j2
+ dest: "{{ output_dir }}/vaulted_template.out"
+ vars:
+ vaulted_template_var: "here_i_am"
+
+- name: Get output template contents
+ slurp:
+ path: "{{ output_dir }}/vaulted_template.out"
+ register: vaulted_template_out
+
+- debug:
+ msg: "{{ vaulted_template_out.content|b64decode }}"
+
+- assert:
+ that:
+ - vaulted_template_out.content|b64decode == 'here_i_am\n'
diff --git a/test/integration/targets/vault/roles/test_vaulted_template/templates/vaulted_template.j2 b/test/integration/targets/vault/roles/test_vaulted_template/templates/vaulted_template.j2
new file mode 100644
index 00000000..af9c3eb1
--- /dev/null
+++ b/test/integration/targets/vault/roles/test_vaulted_template/templates/vaulted_template.j2
@@ -0,0 +1,6 @@
+$ANSIBLE_VAULT;1.1;AES256
+65626437623461633630303033303939616334373263633438623938396564376435366534303865
+6363663439346464336437346263343235626463663130640a373233623733653830306262376430
+31666538323132343039613537323761343234613531353035373434666632333932623064316564
+3532363462643736380a303136353830636635313662663065343066323631633562356663633536
+31343265376433633234656432393066393865613235303165666338663930303035
diff --git a/test/integration/targets/vault/runme.sh b/test/integration/targets/vault/runme.sh
new file mode 100755
index 00000000..e3b21d7f
--- /dev/null
+++ b/test/integration/targets/vault/runme.sh
@@ -0,0 +1,524 @@
+#!/usr/bin/env bash
+
+set -euvx
+source virtualenv.sh
+
+
+MYTMPDIR=$(mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir')
+trap 'rm -rf "${MYTMPDIR}"' EXIT
+
+# create a test file
+TEST_FILE="${MYTMPDIR}/test_file"
+echo "This is a test file" > "${TEST_FILE}"
+
+TEST_FILE_1_2="${MYTMPDIR}/test_file_1_2"
+echo "This is a test file for format 1.2" > "${TEST_FILE_1_2}"
+
+TEST_FILE_ENC_PASSWORD="${MYTMPDIR}/test_file_enc_password"
+echo "This is a test file for encrypted with a vault password that is itself vault encrypted" > "${TEST_FILE_ENC_PASSWORD}"
+
+TEST_FILE_ENC_PASSWORD_DEFAULT="${MYTMPDIR}/test_file_enc_password_default"
+echo "This is a test file for encrypted with a vault password that is itself vault encrypted using --encrypted-vault-id default" > "${TEST_FILE_ENC_PASSWORD_DEFAULT}"
+
+TEST_FILE_OUTPUT="${MYTMPDIR}/test_file_output"
+
+TEST_FILE_EDIT="${MYTMPDIR}/test_file_edit"
+echo "This is a test file for edit" > "${TEST_FILE_EDIT}"
+
+TEST_FILE_EDIT2="${MYTMPDIR}/test_file_edit2"
+echo "This is a test file for edit2" > "${TEST_FILE_EDIT2}"
+
+# test case for https://github.com/ansible/ansible/issues/35834
+# (being prompted for new password on vault-edit with no configured passwords)
+
+TEST_FILE_EDIT3="${MYTMPDIR}/test_file_edit3"
+echo "This is a test file for edit3" > "${TEST_FILE_EDIT3}"
+
+# ansible-config view
+ansible-config view
+
+# ansible-config
+ansible-config dump --only-changed
+ansible-vault encrypt "$@" --vault-id vault-password "${TEST_FILE_EDIT3}"
+# EDITOR=./faux-editor.py ansible-vault edit "$@" "${TEST_FILE_EDIT3}"
+EDITOR=./faux-editor.py ansible-vault edit --vault-id vault-password -vvvvv "${TEST_FILE_EDIT3}"
+echo $?
+
+# view the vault encrypted password file
+ansible-vault view "$@" --vault-id vault-password encrypted-vault-password
+
+# encrypt with a password from a vault encrypted password file and multiple vault-ids
+# should fail because we dont know which vault id to use to encrypt with
+ansible-vault encrypt "$@" --vault-id vault-password --vault-id encrypted-vault-password "${TEST_FILE_ENC_PASSWORD}" && :
+WRONG_RC=$?
+echo "rc was $WRONG_RC (5 is expected)"
+[ $WRONG_RC -eq 5 ]
+
+# try to view the file encrypted with the vault-password we didnt specify
+# to verify we didnt choose the wrong vault-id
+ansible-vault view "$@" --vault-id vault-password encrypted-vault-password
+
+FORMAT_1_1_HEADER="\$ANSIBLE_VAULT;1.1;AES256"
+FORMAT_1_2_HEADER="\$ANSIBLE_VAULT;1.2;AES256"
+
+
+VAULT_PASSWORD_FILE=vault-password
+# new format, view, using password client script
+ansible-vault view "$@" --vault-id vault-password@test-vault-client.py format_1_1_AES256.yml
+
+# view, using password client script, unknown vault/keyname
+ansible-vault view "$@" --vault-id some_unknown_vault_id@test-vault-client.py format_1_1_AES256.yml && :
+
+# Use linux setsid to test without a tty. No setsid if osx/bsd though...
+if [ -x "$(command -v setsid)" ]; then
+ # tests related to https://github.com/ansible/ansible/issues/30993
+ CMD='ansible-playbook -i ../../inventory -vvvvv --ask-vault-pass test_vault.yml'
+ setsid sh -c "echo test-vault-password|${CMD}" < /dev/null > log 2>&1 && :
+ WRONG_RC=$?
+ cat log
+ echo "rc was $WRONG_RC (0 is expected)"
+ [ $WRONG_RC -eq 0 ]
+
+ setsid sh -c 'tty; ansible-vault view --ask-vault-pass -vvvvv test_vault.yml' < /dev/null > log 2>&1 && :
+ WRONG_RC=$?
+ echo "rc was $WRONG_RC (1 is expected)"
+ [ $WRONG_RC -eq 1 ]
+ cat log
+
+ setsid sh -c 'tty; echo passbhkjhword|ansible-playbook -i ../../inventory -vvvvv --ask-vault-pass test_vault.yml' < /dev/null > log 2>&1 && :
+ WRONG_RC=$?
+ echo "rc was $WRONG_RC (1 is expected)"
+ [ $WRONG_RC -eq 1 ]
+ cat log
+
+ setsid sh -c 'tty; echo test-vault-password |ansible-playbook -i ../../inventory -vvvvv --ask-vault-pass test_vault.yml' < /dev/null > log 2>&1
+ echo $?
+ cat log
+
+ setsid sh -c 'tty; echo test-vault-password|ansible-playbook -i ../../inventory -vvvvv --ask-vault-pass test_vault.yml' < /dev/null > log 2>&1
+ echo $?
+ cat log
+
+ setsid sh -c 'tty; echo test-vault-password |ansible-playbook -i ../../inventory -vvvvv --ask-vault-pass test_vault.yml' < /dev/null > log 2>&1
+ echo $?
+ cat log
+
+ setsid sh -c 'tty; echo test-vault-password|ansible-vault view --ask-vault-pass -vvvvv vaulted.inventory' < /dev/null > log 2>&1
+ echo $?
+ cat log
+
+ # test using --ask-vault-password option
+ CMD='ansible-playbook -i ../../inventory -vvvvv --ask-vault-password test_vault.yml'
+ setsid sh -c "echo test-vault-password|${CMD}" < /dev/null > log 2>&1 && :
+ WRONG_RC=$?
+ cat log
+ echo "rc was $WRONG_RC (0 is expected)"
+ [ $WRONG_RC -eq 0 ]
+fi
+
+ansible-vault view "$@" --vault-password-file vault-password-wrong format_1_1_AES256.yml && :
+WRONG_RC=$?
+echo "rc was $WRONG_RC (1 is expected)"
+[ $WRONG_RC -eq 1 ]
+
+set -eux
+
+
+# new format, view
+ansible-vault view "$@" --vault-password-file vault-password format_1_1_AES256.yml
+
+# new format, view with vault-id
+ansible-vault view "$@" --vault-id=vault-password format_1_1_AES256.yml
+
+# new format, view, using password script
+ansible-vault view "$@" --vault-password-file password-script.py format_1_1_AES256.yml
+
+# new format, view, using password script with vault-id
+ansible-vault view "$@" --vault-id password-script.py format_1_1_AES256.yml
+
+# new 1.2 format, view
+ansible-vault view "$@" --vault-password-file vault-password format_1_2_AES256.yml
+
+# new 1.2 format, view with vault-id
+ansible-vault view "$@" --vault-id=test_vault_id@vault-password format_1_2_AES256.yml
+
+# new 1,2 format, view, using password script
+ansible-vault view "$@" --vault-password-file password-script.py format_1_2_AES256.yml
+
+# new 1.2 format, view, using password script with vault-id
+ansible-vault view "$@" --vault-id password-script.py format_1_2_AES256.yml
+
+# newish 1.1 format, view, using a vault-id list from config env var
+ANSIBLE_VAULT_IDENTITY_LIST='wrong-password@vault-password-wrong,default@vault-password' ansible-vault view "$@" --vault-id password-script.py format_1_1_AES256.yml
+
+# new 1.2 format, view, ENFORCE_IDENTITY_MATCH=true, should fail, no 'test_vault_id' vault_id
+ANSIBLE_VAULT_ID_MATCH=1 ansible-vault view "$@" --vault-password-file vault-password format_1_2_AES256.yml && :
+WRONG_RC=$?
+echo "rc was $WRONG_RC (1 is expected)"
+[ $WRONG_RC -eq 1 ]
+
+# new 1.2 format, view with vault-id, ENFORCE_IDENTITY_MATCH=true, should work, 'test_vault_id' is provided
+ANSIBLE_VAULT_ID_MATCH=1 ansible-vault view "$@" --vault-id=test_vault_id@vault-password format_1_2_AES256.yml
+
+# new 1,2 format, view, using password script, ENFORCE_IDENTITY_MATCH=true, should fail, no 'test_vault_id'
+ANSIBLE_VAULT_ID_MATCH=1 ansible-vault view "$@" --vault-password-file password-script.py format_1_2_AES256.yml && :
+WRONG_RC=$?
+echo "rc was $WRONG_RC (1 is expected)"
+[ $WRONG_RC -eq 1 ]
+
+
+# new 1.2 format, view, using password script with vault-id, ENFORCE_IDENTITY_MATCH=true, should fail
+ANSIBLE_VAULT_ID_MATCH=1 ansible-vault view "$@" --vault-id password-script.py format_1_2_AES256.yml && :
+WRONG_RC=$?
+echo "rc was $WRONG_RC (1 is expected)"
+[ $WRONG_RC -eq 1 ]
+
+# new 1.2 format, view, using password script with vault-id, ENFORCE_IDENTITY_MATCH=true, 'test_vault_id' provided should work
+ANSIBLE_VAULT_ID_MATCH=1 ansible-vault view "$@" --vault-id=test_vault_id@password-script.py format_1_2_AES256.yml
+
+# test with a default vault password set via config/env, right password
+ANSIBLE_VAULT_PASSWORD_FILE=vault-password ansible-vault view "$@" format_1_1_AES256.yml
+
+# test with a default vault password set via config/env, wrong password
+ANSIBLE_VAULT_PASSWORD_FILE=vault-password-wrong ansible-vault view "$@" format_1_1_AES.yml && :
+WRONG_RC=$?
+echo "rc was $WRONG_RC (1 is expected)"
+[ $WRONG_RC -eq 1 ]
+
+# test with a default vault-id list set via config/env, right password
+ANSIBLE_VAULT_PASSWORD_FILE=wrong@vault-password-wrong,correct@vault-password ansible-vault view "$@" format_1_1_AES.yml && :
+
+# test with a default vault-id list set via config/env,wrong passwords
+ANSIBLE_VAULT_PASSWORD_FILE=wrong@vault-password-wrong,alsowrong@vault-password-wrong ansible-vault view "$@" format_1_1_AES.yml && :
+WRONG_RC=$?
+echo "rc was $WRONG_RC (1 is expected)"
+[ $WRONG_RC -eq 1 ]
+
+# try specifying a --encrypt-vault-id that doesnt exist, should exit with an error indicating
+# that --encrypt-vault-id and the known vault-ids
+ansible-vault encrypt "$@" --vault-password-file vault-password --encrypt-vault-id doesnt_exist "${TEST_FILE}" && :
+WRONG_RC=$?
+echo "rc was $WRONG_RC (1 is expected)"
+[ $WRONG_RC -eq 1 ]
+
+# encrypt it
+ansible-vault encrypt "$@" --vault-password-file vault-password "${TEST_FILE}"
+
+ansible-vault view "$@" --vault-password-file vault-password "${TEST_FILE}"
+
+# view with multiple vault-password files, including a wrong one
+ansible-vault view "$@" --vault-password-file vault-password --vault-password-file vault-password-wrong "${TEST_FILE}"
+
+# view with multiple vault-password files, including a wrong one, using vault-id
+ansible-vault view "$@" --vault-id vault-password --vault-id vault-password-wrong "${TEST_FILE}"
+
+# And with the password files specified in a different order
+ansible-vault view "$@" --vault-password-file vault-password-wrong --vault-password-file vault-password "${TEST_FILE}"
+
+# And with the password files specified in a different order, using vault-id
+ansible-vault view "$@" --vault-id vault-password-wrong --vault-id vault-password "${TEST_FILE}"
+
+# And with the password files specified in a different order, using --vault-id and non default vault_ids
+ansible-vault view "$@" --vault-id test_vault_id@vault-password-wrong --vault-id test_vault_id@vault-password "${TEST_FILE}"
+
+ansible-vault decrypt "$@" --vault-password-file vault-password "${TEST_FILE}"
+
+# encrypt it, using a vault_id so we write a 1.2 format file
+ansible-vault encrypt "$@" --vault-id test_vault_1_2@vault-password "${TEST_FILE_1_2}"
+
+ansible-vault view "$@" --vault-id vault-password "${TEST_FILE_1_2}"
+ansible-vault view "$@" --vault-id test_vault_1_2@vault-password "${TEST_FILE_1_2}"
+
+# view with multiple vault-password files, including a wrong one
+ansible-vault view "$@" --vault-id vault-password --vault-id wrong_password@vault-password-wrong "${TEST_FILE_1_2}"
+
+# And with the password files specified in a different order, using vault-id
+ansible-vault view "$@" --vault-id vault-password-wrong --vault-id vault-password "${TEST_FILE_1_2}"
+
+# And with the password files specified in a different order, using --vault-id and non default vault_ids
+ansible-vault view "$@" --vault-id test_vault_id@vault-password-wrong --vault-id test_vault_id@vault-password "${TEST_FILE_1_2}"
+
+ansible-vault decrypt "$@" --vault-id test_vault_1_2@vault-password "${TEST_FILE_1_2}"
+
+# multiple vault passwords
+ansible-vault view "$@" --vault-password-file vault-password --vault-password-file vault-password-wrong format_1_1_AES256.yml
+
+# multiple vault passwords, --vault-id
+ansible-vault view "$@" --vault-id test_vault_id@vault-password --vault-id test_vault_id@vault-password-wrong format_1_1_AES256.yml
+
+# encrypt it, with password from password script
+ansible-vault encrypt "$@" --vault-password-file password-script.py "${TEST_FILE}"
+
+ansible-vault view "$@" --vault-password-file password-script.py "${TEST_FILE}"
+
+ansible-vault decrypt "$@" --vault-password-file password-script.py "${TEST_FILE}"
+
+# encrypt it, with password from password script
+ansible-vault encrypt "$@" --vault-id test_vault_id@password-script.py "${TEST_FILE}"
+
+ansible-vault view "$@" --vault-id test_vault_id@password-script.py "${TEST_FILE}"
+
+ansible-vault decrypt "$@" --vault-id test_vault_id@password-script.py "${TEST_FILE}"
+
+# new password file for rekeyed file
+NEW_VAULT_PASSWORD="${MYTMPDIR}/new-vault-password"
+echo "newpassword" > "${NEW_VAULT_PASSWORD}"
+
+ansible-vault encrypt "$@" --vault-password-file vault-password "${TEST_FILE}"
+
+ansible-vault rekey "$@" --vault-password-file vault-password --new-vault-password-file "${NEW_VAULT_PASSWORD}" "${TEST_FILE}"
+
+# --new-vault-password-file and --new-vault-id should cause options error
+ansible-vault rekey "$@" --vault-password-file vault-password --new-vault-id=foobar --new-vault-password-file "${NEW_VAULT_PASSWORD}" "${TEST_FILE}" && :
+WRONG_RC=$?
+echo "rc was $WRONG_RC (2 is expected)"
+[ $WRONG_RC -eq 2 ]
+
+ansible-vault view "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" "${TEST_FILE}"
+
+# view file with unicode in filename
+ansible-vault view "$@" --vault-password-file vault-password vault-café.yml
+
+# view with old password file and new password file
+ansible-vault view "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" --vault-password-file vault-password "${TEST_FILE}"
+
+# view with old password file and new password file, different order
+ansible-vault view "$@" --vault-password-file vault-password --vault-password-file "${NEW_VAULT_PASSWORD}" "${TEST_FILE}"
+
+# view with old password file and new password file and another wrong
+ansible-vault view "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" --vault-password-file vault-password-wrong --vault-password-file vault-password "${TEST_FILE}"
+
+# view with old password file and new password file and another wrong, using --vault-id
+ansible-vault view "$@" --vault-id "tmp_new_password@${NEW_VAULT_PASSWORD}" --vault-id wrong_password@vault-password-wrong --vault-id myorg@vault-password "${TEST_FILE}"
+
+ansible-vault decrypt "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" "${TEST_FILE}"
+
+# reading/writing to/from stdin/stdin (See https://github.com/ansible/ansible/issues/23567)
+ansible-vault encrypt "$@" --vault-password-file "${VAULT_PASSWORD_FILE}" --output="${TEST_FILE_OUTPUT}" < "${TEST_FILE}"
+OUTPUT=$(ansible-vault decrypt "$@" --vault-password-file "${VAULT_PASSWORD_FILE}" --output=- < "${TEST_FILE_OUTPUT}")
+echo "${OUTPUT}" | grep 'This is a test file'
+
+OUTPUT_DASH=$(ansible-vault decrypt "$@" --vault-password-file "${VAULT_PASSWORD_FILE}" --output=- "${TEST_FILE_OUTPUT}")
+echo "${OUTPUT_DASH}" | grep 'This is a test file'
+
+OUTPUT_DASH_SPACE=$(ansible-vault decrypt "$@" --vault-password-file "${VAULT_PASSWORD_FILE}" --output - "${TEST_FILE_OUTPUT}")
+echo "${OUTPUT_DASH_SPACE}" | grep 'This is a test file'
+
+
+# test using an empty vault password file
+ansible-vault view "$@" --vault-password-file empty-password format_1_1_AES256.yml && :
+WRONG_RC=$?
+echo "rc was $WRONG_RC (1 is expected)"
+[ $WRONG_RC -eq 1 ]
+
+ansible-vault view "$@" --vault-id=empty@empty-password --vault-password-file empty-password format_1_1_AES256.yml && :
+WRONG_RC=$?
+echo "rc was $WRONG_RC (1 is expected)"
+[ $WRONG_RC -eq 1 ]
+
+echo 'foo' > some_file.txt
+ansible-vault encrypt "$@" --vault-password-file empty-password some_file.txt && :
+WRONG_RC=$?
+echo "rc was $WRONG_RC (1 is expected)"
+[ $WRONG_RC -eq 1 ]
+
+
+ansible-vault encrypt_string "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" "a test string"
+
+# Test with multiple vault password files
+# https://github.com/ansible/ansible/issues/57172
+env ANSIBLE_VAULT_PASSWORD_FILE=vault-password ansible-vault encrypt_string "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" --encrypt-vault-id default "a test string"
+
+ansible-vault encrypt_string "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" --name "blippy" "a test string names blippy"
+
+ansible-vault encrypt_string "$@" --vault-id "${NEW_VAULT_PASSWORD}" "a test string"
+
+ansible-vault encrypt_string "$@" --vault-id "${NEW_VAULT_PASSWORD}" --name "blippy" "a test string names blippy"
+
+
+# from stdin
+ansible-vault encrypt_string "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" < "${TEST_FILE}"
+
+ansible-vault encrypt_string "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" --stdin-name "the_var_from_stdin" < "${TEST_FILE}"
+
+# write to file
+ansible-vault encrypt_string "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" --name "blippy" "a test string names blippy" --output "${MYTMPDIR}/enc_string_test_file"
+
+# test ansible-vault edit with a faux editor
+ansible-vault encrypt "$@" --vault-password-file vault-password "${TEST_FILE_EDIT}"
+
+# edit a 1.1 format with no vault-id, should stay 1.1
+EDITOR=./faux-editor.py ansible-vault edit "$@" --vault-password-file vault-password "${TEST_FILE_EDIT}"
+head -1 "${TEST_FILE_EDIT}" | grep "${FORMAT_1_1_HEADER}"
+
+# edit a 1.1 format with vault-id, should stay 1.1
+cat "${TEST_FILE_EDIT}"
+EDITOR=./faux-editor.py ansible-vault edit "$@" --vault-id vault_password@vault-password "${TEST_FILE_EDIT}"
+cat "${TEST_FILE_EDIT}"
+head -1 "${TEST_FILE_EDIT}" | grep "${FORMAT_1_1_HEADER}"
+
+ansible-vault encrypt "$@" --vault-id vault_password@vault-password "${TEST_FILE_EDIT2}"
+
+# verify that we aren't prompted for a new vault password on edit if we are running interactively (ie, with prompts)
+# have to use setsid nd --ask-vault-pass to force a prompt to simulate.
+# See https://github.com/ansible/ansible/issues/35834
+setsid sh -c 'tty; echo password |ansible-vault edit --ask-vault-pass vault_test.yml' < /dev/null > log 2>&1 && :
+grep 'New Vault password' log && :
+WRONG_RC=$?
+echo "The stdout log had 'New Vault password' in it and it is not supposed to. rc of grep was $WRONG_RC (1 is expected)"
+[ $WRONG_RC -eq 1 ]
+
+# edit a 1.2 format with vault id, should keep vault id and 1.2 format
+EDITOR=./faux-editor.py ansible-vault edit "$@" --vault-id vault_password@vault-password "${TEST_FILE_EDIT2}"
+head -1 "${TEST_FILE_EDIT2}" | grep "${FORMAT_1_2_HEADER};vault_password"
+
+# edit a 1.2 file with no vault-id, should keep vault id and 1.2 format
+EDITOR=./faux-editor.py ansible-vault edit "$@" --vault-password-file vault-password "${TEST_FILE_EDIT2}"
+head -1 "${TEST_FILE_EDIT2}" | grep "${FORMAT_1_2_HEADER};vault_password"
+
+# encrypt with a password from a vault encrypted password file and multiple vault-ids
+# should fail because we dont know which vault id to use to encrypt with
+ansible-vault encrypt "$@" --vault-id vault-password --vault-id encrypted-vault-password "${TEST_FILE_ENC_PASSWORD}" && :
+WRONG_RC=$?
+echo "rc was $WRONG_RC (5 is expected)"
+[ $WRONG_RC -eq 5 ]
+
+
+# encrypt with a password from a vault encrypted password file and multiple vault-ids
+# but this time specify with --encrypt-vault-id, but specifying vault-id names (instead of default)
+# ansible-vault encrypt "$@" --vault-id from_vault_password@vault-password --vault-id from_encrypted_vault_password@encrypted-vault-password --encrypt-vault-id from_encrypted_vault_password "${TEST_FILE(_ENC_PASSWORD}"
+
+# try to view the file encrypted with the vault-password we didnt specify
+# to verify we didnt choose the wrong vault-id
+# ansible-vault view "$@" --vault-id vault-password "${TEST_FILE_ENC_PASSWORD}" && :
+# WRONG_RC=$?
+# echo "rc was $WRONG_RC (1 is expected)"
+# [ $WRONG_RC -eq 1 ]
+
+ansible-vault encrypt "$@" --vault-id vault-password "${TEST_FILE_ENC_PASSWORD}"
+
+# view the file encrypted with a password from a vault encrypted password file
+ansible-vault view "$@" --vault-id vault-password --vault-id encrypted-vault-password "${TEST_FILE_ENC_PASSWORD}"
+
+# try to view the file encrypted with a password from a vault encrypted password file but without the password to the password file.
+# This should fail with an
+ansible-vault view "$@" --vault-id encrypted-vault-password "${TEST_FILE_ENC_PASSWORD}" && :
+WRONG_RC=$?
+echo "rc was $WRONG_RC (1 is expected)"
+[ $WRONG_RC -eq 1 ]
+
+
+# test playbooks using vaulted files
+ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password --list-tasks
+ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password --list-hosts
+ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password --syntax-check
+ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password
+ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file vault-password --syntax-check
+ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file vault-password
+ansible-playbook test_vaulted_inventory.yml -i vaulted.inventory -v "$@" --vault-password-file vault-password
+ansible-playbook test_vaulted_template.yml -i ../../inventory -v "$@" --vault-password-file vault-password
+
+# test using --vault-pass-file option
+ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-pass-file vault-password
+
+# install TOML for parse toml inventory
+# test playbooks using vaulted files(toml)
+pip install toml
+ansible-vault encrypt ./inventory.toml -v "$@" --vault-password-file=./vault-password
+ansible-playbook test_vaulted_inventory_toml.yml -i ./inventory.toml -v "$@" --vault-password-file vault-password
+ansible-vault decrypt ./inventory.toml -v "$@" --vault-password-file=./vault-password
+
+# test a playbook with a host_var whose value is non-ascii utf8 (see https://github.com/ansible/ansible/issues/37258)
+ansible-playbook -i ../../inventory -v "$@" --vault-id vault-password test_vaulted_utf8_value.yml
+
+# test with password from password script
+ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file password-script.py
+ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file password-script.py
+
+# with multiple password files
+ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password --vault-password-file vault-password-wrong
+ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password-wrong --vault-password-file vault-password
+
+ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file vault-password --vault-password-file vault-password-wrong --syntax-check
+ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file vault-password-wrong --vault-password-file vault-password
+
+# test with a default vault password file set in config
+ANSIBLE_VAULT_PASSWORD_FILE=vault-password ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file vault-password-wrong
+
+# test using vault_identity_list config
+ANSIBLE_VAULT_IDENTITY_LIST='wrong-password@vault-password-wrong,default@vault-password' ansible-playbook test_vault.yml -i ../../inventory -v "$@"
+
+# test that we can have a vault encrypted yaml file that includes embedded vault vars
+# that were encrypted with a different vault secret
+ansible-playbook test_vault_file_encrypted_embedded.yml -i ../../inventory "$@" --vault-id encrypted_file_encrypted_var_password --vault-id vault-password
+
+# with multiple password files, --vault-id, ordering
+ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-id vault-password --vault-id vault-password-wrong
+ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-id vault-password-wrong --vault-id vault-password
+
+ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-id vault-password --vault-id vault-password-wrong --syntax-check
+ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-id vault-password-wrong --vault-id vault-password
+
+# test with multiple password files, including a script, and a wrong password
+ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file vault-password-wrong --vault-password-file password-script.py --vault-password-file vault-password
+
+# test with multiple password files, including a script, and a wrong password, and a mix of --vault-id and --vault-password-file
+ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file vault-password-wrong --vault-id password-script.py --vault-id vault-password
+
+# test with multiple password files, including a script, and a wrong password, and a mix of --vault-id and --vault-password-file
+ansible-playbook test_vault_embedded_ids.yml -i ../../inventory -v "$@" \
+ --vault-password-file vault-password-wrong \
+ --vault-id password-script.py --vault-id example1@example1_password \
+ --vault-id example2@example2_password --vault-password-file example3_password \
+ --vault-id vault-password
+
+# with wrong password
+ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password-wrong && :
+WRONG_RC=$?
+echo "rc was $WRONG_RC (1 is expected)"
+[ $WRONG_RC -eq 1 ]
+
+# with multiple wrong passwords
+ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password-wrong --vault-password-file vault-password-wrong && :
+WRONG_RC=$?
+echo "rc was $WRONG_RC (1 is expected)"
+[ $WRONG_RC -eq 1 ]
+
+# with wrong password, --vault-id
+ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-id vault-password-wrong && :
+WRONG_RC=$?
+echo "rc was $WRONG_RC (1 is expected)"
+[ $WRONG_RC -eq 1 ]
+
+# with multiple wrong passwords with --vault-id
+ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-id vault-password-wrong --vault-id vault-password-wrong && :
+WRONG_RC=$?
+echo "rc was $WRONG_RC (1 is expected)"
+[ $WRONG_RC -eq 1 ]
+
+# with multiple wrong passwords with --vault-id
+ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-id wrong1@vault-password-wrong --vault-id wrong2@vault-password-wrong && :
+WRONG_RC=$?
+echo "rc was $WRONG_RC (1 is expected)"
+[ $WRONG_RC -eq 1 ]
+
+# with empty password file
+ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-id empty@empty-password && :
+WRONG_RC=$?
+echo "rc was $WRONG_RC (1 is expected)"
+[ $WRONG_RC -eq 1 ]
+
+# test invalid format ala https://github.com/ansible/ansible/issues/28038
+EXPECTED_ERROR='Vault format unhexlify error: Non-hexadecimal digit found'
+ansible-playbook "$@" -i invalid_format/inventory --vault-id invalid_format/vault-secret invalid_format/broken-host-vars-tasks.yml 2>&1 | grep "${EXPECTED_ERROR}"
+
+EXPECTED_ERROR='Vault format unhexlify error: Odd-length string'
+ansible-playbook "$@" -i invalid_format/inventory --vault-id invalid_format/vault-secret invalid_format/broken-group-vars-tasks.yml 2>&1 | grep "${EXPECTED_ERROR}"
+
+# Run playbook with vault file with unicode in filename (https://github.com/ansible/ansible/issues/50316)
+ansible-playbook -i ../../inventory -v "$@" --vault-password-file vault-password test_utf8_value_in_filename.yml
+
+# Ensure we don't leave unencrypted temp files dangling
+ansible-playbook -v "$@" --vault-password-file vault-password test_dangling_temp.yml
+
+ansible-playbook "$@" --vault-password-file vault-password single_vault_as_string.yml \ No newline at end of file
diff --git a/test/integration/targets/vault/runme_change_pip_installed.sh b/test/integration/targets/vault/runme_change_pip_installed.sh
new file mode 100755
index 00000000..5ab2a8ec
--- /dev/null
+++ b/test/integration/targets/vault/runme_change_pip_installed.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+
+# start by removing pycrypto and cryptography
+
+pip uninstall -y cryptography
+pip uninstall -y pycrypto
+
+./runme.sh
+
+# now just pycrypto
+pip install --user pycrypto
+
+./runme.sh
+
+
+# now just cryptography
+
+pip uninstall -y pycrypto
+pip install --user cryptography
+
+./runme.sh
+
+# now both
+
+pip install --user pycrypto
+
+./runme.sh
diff --git a/test/integration/targets/vault/single_vault_as_string.yml b/test/integration/targets/vault/single_vault_as_string.yml
new file mode 100644
index 00000000..1eb17d04
--- /dev/null
+++ b/test/integration/targets/vault/single_vault_as_string.yml
@@ -0,0 +1,123 @@
+- hosts: localhost
+ vars:
+ vaulted_value: !vault |
+ $ANSIBLE_VAULT;1.1;AES256
+ 35323961353038346165643738646465376139363061353835303739663538343266303232326635
+ 3365353662646236356665323135633630656238316530640a663362363763633436373439663031
+ 33663433383037396438656464636433653837376361313638366362333037323961316364363363
+ 3835616438623261650a636164376534376661393134326662326362323131373964313961623365
+ 3833
+ tasks:
+ - debug:
+ msg: "{{ vaulted_value }}"
+
+ - debug:
+ msg: "{{ vaulted_value|type_debug }}"
+
+ - assert:
+ that:
+ - vaulted_value is vault_encrypted
+ - vaulted_value == 'foo bar'
+ - vaulted_value|string == 'foo bar'
+ - vaulted_value|quote == "'foo bar'"
+ - vaulted_value|capitalize == 'Foo bar'
+ - vaulted_value|center(width=9) == ' foo bar '
+ - vaulted_value|default('monkey') == 'foo bar'
+ - vaulted_value|escape == 'foo bar'
+ - vaulted_value|forceescape == 'foo bar'
+ - vaulted_value|first == 'f'
+ - "'%s'|format(vaulted_value) == 'foo bar'"
+ - vaulted_value|indent(indentfirst=True) == ' foo bar'
+ - vaulted_value.split() == ['foo', 'bar']
+ - vaulted_value|join('-') == 'f-o-o- -b-a-r'
+ - vaulted_value|last == 'r'
+ - vaulted_value|length == 7
+ - vaulted_value|list == ['f', 'o', 'o', ' ', 'b', 'a', 'r']
+ - vaulted_value|lower == 'foo bar'
+ - vaulted_value|replace('foo', 'baz') == 'baz bar'
+ - vaulted_value|reverse|string == 'rab oof'
+ - vaulted_value|safe == 'foo bar'
+ - vaulted_value|slice(2)|list == [['f', 'o', 'o', ' '], ['b', 'a', 'r']]
+ - vaulted_value|sort|list == [" ", "a", "b", "f", "o", "o", "r"]
+ - vaulted_value|trim == 'foo bar'
+ - vaulted_value|upper == 'FOO BAR'
+ # jinja2.filters.do_urlencode uses an isinstance against string_types
+ # - vaulted_value|urlencode == 'foo%20bar'
+ - vaulted_value|urlize == 'foo bar'
+ - vaulted_value is not callable
+ - vaulted_value is iterable
+ - vaulted_value is lower
+ - vaulted_value is not none
+ # This is not exactly a string, and UserString doesn't fulfill this
+ # - vaulted_value is string
+ - vaulted_value is not upper
+
+ - vaulted_value|b64encode == 'Zm9vIGJhcg=='
+ - vaulted_value|to_uuid == '0271fe51-bb26-560f-b118-5d6513850860'
+ - vaulted_value|string|to_json == '"foo bar"'
+ - vaulted_value|md5 == '327b6f07435811239bc47e1544353273'
+ - vaulted_value|sha1 == '3773dea65156909838fa6c22825cafe090ff8030'
+ - vaulted_value|hash == '3773dea65156909838fa6c22825cafe090ff8030'
+ - vaulted_value|regex_replace('foo', 'baz') == 'baz bar'
+ - vaulted_value|regex_escape == 'foo\ bar'
+ - vaulted_value|regex_search('foo') == 'foo'
+ - vaulted_value|regex_findall('foo') == ['foo']
+ - vaulted_value|comment == '#\n# foo bar\n#'
+
+ - assert:
+ that:
+ - vaulted_value|random(seed='foo') == ' '
+ - vaulted_value|shuffle(seed='foo') == ["o", "f", "r", "b", "o", "a", " "]
+ - vaulted_value|pprint == "'foo bar'"
+ when: ansible_python.version.major == 3
+
+ - assert:
+ that:
+ - vaulted_value|random(seed='foo') == 'r'
+ - vaulted_value|shuffle(seed='foo') == ["b", "o", "a", " ", "o", "f", "r"]
+ - vaulted_value|pprint == "u'foo bar'"
+ when: ansible_python.version.major == 2
+
+ - assert:
+ that:
+ - vaulted_value|map('upper')|list == ['F', 'O', 'O', ' ', 'B', 'A', 'R']
+ when: lookup('pipe', ansible_python.executable ~ ' -c "import jinja2; print(jinja2.__version__)"') is version('2.7', '>=')
+
+
+ - assert:
+ that:
+ - vaulted_value.split()|first|int(base=36) == 20328
+ - vaulted_value|select('equalto', 'o')|list == ['o', 'o']
+ - vaulted_value|title == 'Foo Bar'
+ - vaulted_value is equalto('foo bar')
+ when: lookup('pipe', ansible_python.executable ~ ' -c "import jinja2; print(jinja2.__version__)"') is version('2.8', '>=')
+
+ - assert:
+ that:
+ - vaulted_value|string|tojson == '"foo bar"'
+ - vaulted_value|truncate(4) == 'foo bar'
+ when: lookup('pipe', ansible_python.executable ~ ' -c "import jinja2; print(jinja2.__version__)"') is version('2.9', '>=')
+
+ - assert:
+ that:
+ - vaulted_value|wordwrap(4) == 'foo\nbar'
+ when: lookup('pipe', ansible_python.executable ~ ' -c "import jinja2; print(jinja2.__version__)"') is version('2.11', '>=')
+
+ - assert:
+ that:
+ - vaulted_value|wordcount == 2
+ when: lookup('pipe', ansible_python.executable ~ ' -c "import jinja2; print(jinja2.__version__)"') is version('2.11.2', '>=')
+
+ - ping:
+ data: !vault |
+ $ANSIBLE_VAULT;1.1;AES256
+ 35323961353038346165643738646465376139363061353835303739663538343266303232326635
+ 3365353662646236356665323135633630656238316530640a663362363763633436373439663031
+ 33663433383037396438656464636433653837376361313638366362333037323961316364363363
+ 3835616438623261650a636164376534376661393134326662326362323131373964313961623365
+ 3833
+ register: ping_result
+
+ - assert:
+ that:
+ - ping_result.ping == 'foo bar'
diff --git a/test/integration/targets/vault/test-vault-client.py b/test/integration/targets/vault/test-vault-client.py
new file mode 100755
index 00000000..a2f17dc5
--- /dev/null
+++ b/test/integration/targets/vault/test-vault-client.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+import argparse
+import sys
+
+# TODO: could read these from the files I suppose...
+secrets = {'vault-password': 'test-vault-password',
+ 'vault-password-wrong': 'hunter42',
+ 'vault-password-ansible': 'ansible',
+ 'password': 'password',
+ 'vault-client-password-1': 'password-1',
+ 'vault-client-password-2': 'password-2'}
+
+
+def build_arg_parser():
+ parser = argparse.ArgumentParser(description='Get a vault password from user keyring')
+
+ parser.add_argument('--vault-id', action='store', default=None,
+ dest='vault_id',
+ help='name of the vault secret to get from keyring')
+ parser.add_argument('--username', action='store', default=None,
+ help='the username whose keyring is queried')
+ parser.add_argument('--set', action='store_true', default=False,
+ dest='set_password',
+ help='set the password instead of getting it')
+ return parser
+
+
+def get_secret(keyname):
+ return secrets.get(keyname, None)
+
+
+def main():
+ rc = 0
+
+ arg_parser = build_arg_parser()
+ args = arg_parser.parse_args()
+ # print('args: %s' % args)
+
+ keyname = args.vault_id or 'ansible'
+
+ if args.set_password:
+ print('--set is not supported yet')
+ sys.exit(1)
+
+ secret = get_secret(keyname)
+ if secret is None:
+ sys.stderr.write('test-vault-client could not find key for vault-id="%s"\n' % keyname)
+ # key not found rc=2
+ return 2
+
+ sys.stdout.write('%s\n' % secret)
+
+ return rc
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/test/integration/targets/vault/test_dangling_temp.yml b/test/integration/targets/vault/test_dangling_temp.yml
new file mode 100644
index 00000000..71a9d73a
--- /dev/null
+++ b/test/integration/targets/vault/test_dangling_temp.yml
@@ -0,0 +1,34 @@
+- hosts: localhost
+ gather_facts: False
+ vars:
+ od: "{{output_dir|default('/tmp')}}/test_vault_assemble"
+ tasks:
+ - name: create target directory
+ file:
+ path: "{{od}}"
+ state: directory
+
+ - name: assemble_file file with secret
+ assemble:
+ src: files/test_assemble
+ dest: "{{od}}/dest_file"
+ remote_src: no
+ mode: 0600
+
+ - name: remove assembled file with secret (so nothing should have unencrypted secret)
+ file: path="{{od}}/dest_file" state=absent
+
+ - name: find temp files with secrets
+ find:
+ paths: '{{temp_paths}}'
+ contains: 'VAULT TEST IN WHICH BAD THING HAPPENED'
+ recurse: yes
+ register: badthings
+ vars:
+ temp_paths: "{{[lookup('env', 'TMP'), lookup('env', 'TEMP'), hardcoded]|flatten(1)|unique|list}}"
+ hardcoded: ['/tmp', '/var/tmp']
+
+ - name: ensure we failed to find any
+ assert:
+ that:
+ - badthings['matched'] == 0
diff --git a/test/integration/targets/vault/test_utf8_value_in_filename.yml b/test/integration/targets/vault/test_utf8_value_in_filename.yml
new file mode 100644
index 00000000..9bd394dc
--- /dev/null
+++ b/test/integration/targets/vault/test_utf8_value_in_filename.yml
@@ -0,0 +1,16 @@
+- name: "Test that the vaulted file with UTF-8 in filename decrypts correctly"
+ gather_facts: false
+ hosts: testhost
+ vars:
+ expected: "my_secret"
+ vars_files:
+ - vault-café.yml
+ tasks:
+ - name: decrypt vaulted file with utf8 in filename and show it in debug
+ debug:
+ var: vault_string
+
+ - name: assert decrypted value matches expected
+ assert:
+ that:
+ - "vault_string == expected"
diff --git a/test/integration/targets/vault/test_vault.yml b/test/integration/targets/vault/test_vault.yml
new file mode 100644
index 00000000..7f8ed115
--- /dev/null
+++ b/test/integration/targets/vault/test_vault.yml
@@ -0,0 +1,6 @@
+- hosts: testhost
+ gather_facts: False
+ vars:
+ - output_dir: .
+ roles:
+ - { role: test_vault, tags: test_vault}
diff --git a/test/integration/targets/vault/test_vault_embedded.yml b/test/integration/targets/vault/test_vault_embedded.yml
new file mode 100644
index 00000000..ee9739f8
--- /dev/null
+++ b/test/integration/targets/vault/test_vault_embedded.yml
@@ -0,0 +1,4 @@
+- hosts: testhost
+ gather_facts: False
+ roles:
+ - { role: test_vault_embedded, tags: test_vault_embedded}
diff --git a/test/integration/targets/vault/test_vault_embedded_ids.yml b/test/integration/targets/vault/test_vault_embedded_ids.yml
new file mode 100644
index 00000000..23ebbb96
--- /dev/null
+++ b/test/integration/targets/vault/test_vault_embedded_ids.yml
@@ -0,0 +1,4 @@
+- hosts: testhost
+ gather_facts: False
+ roles:
+ - { role: test_vault_embedded_ids, tags: test_vault_embedded_ids}
diff --git a/test/integration/targets/vault/test_vault_file_encrypted_embedded.yml b/test/integration/targets/vault/test_vault_file_encrypted_embedded.yml
new file mode 100644
index 00000000..685d20ef
--- /dev/null
+++ b/test/integration/targets/vault/test_vault_file_encrypted_embedded.yml
@@ -0,0 +1,4 @@
+- hosts: testhost
+ gather_facts: False
+ roles:
+ - { role: test_vault_file_encrypted_embedded, tags: test_vault_file_encrypted_embedded}
diff --git a/test/integration/targets/vault/test_vaulted_inventory.yml b/test/integration/targets/vault/test_vaulted_inventory.yml
new file mode 100644
index 00000000..06b6582b
--- /dev/null
+++ b/test/integration/targets/vault/test_vaulted_inventory.yml
@@ -0,0 +1,5 @@
+- hosts: vaulted_host
+ gather_facts: no
+ tasks:
+ - name: See if we knew vaulted_host
+ debug: msg="Found vaulted_host from vaulted.inventory"
diff --git a/test/integration/targets/vault/test_vaulted_inventory_toml.yml b/test/integration/targets/vault/test_vaulted_inventory_toml.yml
new file mode 100644
index 00000000..f6e2c5d6
--- /dev/null
+++ b/test/integration/targets/vault/test_vaulted_inventory_toml.yml
@@ -0,0 +1,9 @@
+- hosts: vaulted_host_toml
+ gather_facts: no
+ tasks:
+ - name: See if we knew vaulted_host_toml
+ debug: msg="Found vaulted_host from vaulted.inventory.toml"
+
+ - assert:
+ that:
+ - 'hello=="world"'
diff --git a/test/integration/targets/vault/test_vaulted_template.yml b/test/integration/targets/vault/test_vaulted_template.yml
new file mode 100644
index 00000000..b495211d
--- /dev/null
+++ b/test/integration/targets/vault/test_vaulted_template.yml
@@ -0,0 +1,6 @@
+- hosts: testhost
+ gather_facts: False
+ vars:
+ - output_dir: .
+ roles:
+ - { role: test_vaulted_template, tags: test_vaulted_template}
diff --git a/test/integration/targets/vault/test_vaulted_utf8_value.yml b/test/integration/targets/vault/test_vaulted_utf8_value.yml
new file mode 100644
index 00000000..63b602b1
--- /dev/null
+++ b/test/integration/targets/vault/test_vaulted_utf8_value.yml
@@ -0,0 +1,15 @@
+- name: "test that the vaulted_utf8_value decrypts correctly"
+ gather_facts: false
+ hosts: testhost
+ vars:
+ expected: "aöffü"
+ tasks:
+ - name: decrypt vaulted_utf8_value and show it in debug
+ debug:
+ var: vaulted_utf8_value
+
+ - name: assert decrypted vaulted_utf8_value matches expected
+ assert:
+ that:
+ - "vaulted_utf8_value == expected"
+ - "vaulted_utf8_value == 'aöffü'"
diff --git a/test/integration/targets/vault/vault-café.yml b/test/integration/targets/vault/vault-café.yml
new file mode 100644
index 00000000..0d179aec
--- /dev/null
+++ b/test/integration/targets/vault/vault-café.yml
@@ -0,0 +1,6 @@
+$ANSIBLE_VAULT;1.1;AES256
+63363732353363646661643038636339343263303161346533393636336562336465396265373834
+6366313833613236356666646532613636303532366231340a316238666435306332656662613731
+31623433613434633539333564613564656439343661363831336364376266653462366161383038
+6530386533363933350a336631653833666663643166303932653261323431623333356539666265
+37316464303231366163333430346537353631376538393939646362313337363866
diff --git a/test/integration/targets/vault/vault-password b/test/integration/targets/vault/vault-password
new file mode 100644
index 00000000..96973929
--- /dev/null
+++ b/test/integration/targets/vault/vault-password
@@ -0,0 +1 @@
+test-vault-password
diff --git a/test/integration/targets/vault/vault-password-ansible b/test/integration/targets/vault/vault-password-ansible
new file mode 100644
index 00000000..90d40550
--- /dev/null
+++ b/test/integration/targets/vault/vault-password-ansible
@@ -0,0 +1 @@
+ansible
diff --git a/test/integration/targets/vault/vault-password-wrong b/test/integration/targets/vault/vault-password-wrong
new file mode 100644
index 00000000..50e2efad
--- /dev/null
+++ b/test/integration/targets/vault/vault-password-wrong
@@ -0,0 +1 @@
+hunter42
diff --git a/test/integration/targets/vault/vault-secret.txt b/test/integration/targets/vault/vault-secret.txt
new file mode 100644
index 00000000..b6bc9bfb
--- /dev/null
+++ b/test/integration/targets/vault/vault-secret.txt
@@ -0,0 +1,6 @@
+$ANSIBLE_VAULT;1.1;AES256
+39303432393062643236616234306333383838333662386165616633303735336537613337396337
+6662666233356462326631653161663663363166323338320a653131656636666339633863346530
+32326238646631653133643936306666643065393038386234343736663239363665613963343661
+3230353633643361650a363034323631613864326438396665343237383566336339323837326464
+3930
diff --git a/test/integration/targets/vault/vaulted.inventory b/test/integration/targets/vault/vaulted.inventory
new file mode 100644
index 00000000..1ed258b6
--- /dev/null
+++ b/test/integration/targets/vault/vaulted.inventory
@@ -0,0 +1,8 @@
+$ANSIBLE_VAULT;1.1;AES256
+62663838646564656432633932396339666332653932656230356332316530613665336461653731
+3839393466623734663861313636356530396434376462320a623966363661306334333639356263
+37366332626434326537353562636139333835613961333635633333313832666432396361393861
+3538626339636634360a396239383139646438323662383637663138646439306532613732306263
+64666237366334663931363462313131323861613237613337366562373532373537613531636334
+64653938333938313539653539303031393936306432623862363263663438653932643338373338
+633436626431656361633934363263303962
diff --git a/test/integration/targets/wait_for/aliases b/test/integration/targets/wait_for/aliases
new file mode 100644
index 00000000..a4c92ef8
--- /dev/null
+++ b/test/integration/targets/wait_for/aliases
@@ -0,0 +1,2 @@
+destructive
+shippable/posix/group1
diff --git a/test/integration/targets/wait_for/files/testserver.py b/test/integration/targets/wait_for/files/testserver.py
new file mode 100644
index 00000000..1f6f1187
--- /dev/null
+++ b/test/integration/targets/wait_for/files/testserver.py
@@ -0,0 +1,16 @@
+import sys
+
+if __name__ == '__main__':
+ if sys.version_info[0] >= 3:
+ import http.server
+ import socketserver
+ PORT = int(sys.argv[1])
+ Handler = http.server.SimpleHTTPRequestHandler
+ httpd = socketserver.TCPServer(("", PORT), Handler)
+ httpd.serve_forever()
+ else:
+ import mimetypes
+ mimetypes.init()
+ mimetypes.add_type('application/json', '.json')
+ import SimpleHTTPServer
+ SimpleHTTPServer.test()
diff --git a/test/integration/targets/wait_for/files/zombie.py b/test/integration/targets/wait_for/files/zombie.py
new file mode 100644
index 00000000..913074eb
--- /dev/null
+++ b/test/integration/targets/wait_for/files/zombie.py
@@ -0,0 +1,13 @@
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import os
+import sys
+import time
+
+child_pid = os.fork()
+
+if child_pid > 0:
+ time.sleep(60)
+else:
+ sys.exit()
diff --git a/test/integration/targets/wait_for/meta/main.yml b/test/integration/targets/wait_for/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/test/integration/targets/wait_for/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/test/integration/targets/wait_for/tasks/main.yml b/test/integration/targets/wait_for/tasks/main.yml
new file mode 100644
index 00000000..67e07786
--- /dev/null
+++ b/test/integration/targets/wait_for/tasks/main.yml
@@ -0,0 +1,177 @@
+---
+- name: test wait_for with delegate_to
+ wait_for:
+ timeout: 2
+ delegate_to: localhost
+ register: waitfor
+
+- assert:
+ that:
+ - waitfor is successful
+ - waitfor.elapsed >= 2
+
+- name: setup create a directory to serve files from
+ file:
+ dest: "{{ files_dir }}"
+ state: directory
+
+- name: setup webserver
+ copy:
+ src: "testserver.py"
+ dest: "{{ output_dir }}/testserver.py"
+
+- name: setup a path
+ file:
+ path: "{{ output_dir }}/wait_for_file"
+ state: touch
+
+- name: setup remove a file after 3s
+ shell: sleep 3 && rm {{ output_dir }}/wait_for_file
+ async: 20
+ poll: 0
+
+- name: test for absent path
+ wait_for:
+ path: "{{ output_dir }}/wait_for_file"
+ state: absent
+ timeout: 20
+ register: waitfor
+- name: verify test for absent path
+ assert:
+ that:
+ - waitfor is successful
+ - waitfor.path == "{{ output_dir | expanduser }}/wait_for_file"
+ - waitfor.elapsed >= 2
+ - waitfor.elapsed <= 15
+
+- name: setup create a file after 3s
+ shell: sleep 3 && touch {{ output_dir }}/wait_for_file
+ async: 20
+ poll: 0
+
+- name: test for present path
+ wait_for:
+ path: "{{ output_dir }}/wait_for_file"
+ timeout: 5
+ register: waitfor
+- name: verify test for absent path
+ assert:
+ that:
+ - waitfor is successful
+ - waitfor.path == "{{ output_dir | expanduser }}/wait_for_file"
+ - waitfor.elapsed >= 2
+ - waitfor.elapsed <= 15
+
+- name: setup write keyword to file after 3s
+ shell: sleep 3 && echo completed > {{output_dir}}/wait_for_keyword
+ async: 20
+ poll: 0
+
+- name: test wait for keyword in file
+ wait_for:
+ path: "{{output_dir}}/wait_for_keyword"
+ search_regex: completed
+ timeout: 5
+ register: waitfor
+
+- name: verify test wait for keyword in file
+ assert:
+ that:
+ - waitfor is successful
+ - "waitfor.search_regex == 'completed'"
+ - waitfor.elapsed >= 2
+ - waitfor.elapsed <= 15
+
+- name: setup write keyword to file after 3s
+ shell: sleep 3 && echo "completed data 123" > {{output_dir}}/wait_for_keyword
+ async: 20
+ poll: 0
+
+- name: test wait for keyword in file with match groups
+ wait_for:
+ path: "{{output_dir}}/wait_for_keyword"
+ search_regex: completed (?P<foo>\w+) ([0-9]+)
+ timeout: 5
+ register: waitfor
+
+- name: verify test wait for keyword in file with match groups
+ assert:
+ that:
+ - waitfor is successful
+ - waitfor.elapsed >= 2
+ - waitfor.elapsed <= 15
+ - waitfor['match_groupdict'] | length == 1
+ - waitfor['match_groupdict']['foo'] == 'data'
+ - waitfor['match_groups'] == ['data', '123']
+
+- name: test wait for port timeout
+ wait_for:
+ port: 12121
+ timeout: 3
+ register: waitfor
+ ignore_errors: true
+- name: verify test wait for port timeout
+ assert:
+ that:
+ - waitfor is failed
+ - waitfor.elapsed == 3
+ - "waitfor.msg == 'Timeout when waiting for 127.0.0.1:12121'"
+
+- name: test fail with custom msg
+ wait_for:
+ port: 12121
+ msg: fail with custom message
+ timeout: 3
+ register: waitfor
+ ignore_errors: true
+- name: verify test fail with custom msg
+ assert:
+ that:
+ - waitfor is failed
+ - waitfor.elapsed == 3
+ - "waitfor.msg == 'fail with custom message'"
+
+- name: setup start SimpleHTTPServer
+ shell: sleep 3 && cd {{ files_dir }} && {{ ansible_python.executable }} {{ output_dir}}/testserver.py {{ http_port }}
+ async: 120 # this test set can take ~1m to run on FreeBSD (via Shippable)
+ poll: 0
+
+- name: test wait for port with sleep
+ wait_for:
+ port: "{{ http_port }}"
+ sleep: 3
+ register: waitfor
+- name: verify test wait for port sleep
+ assert:
+ that:
+ - waitfor is successful
+ - waitfor is not changed
+ - "waitfor.port == {{ http_port }}"
+
+- name: install psutil using pip (non-Linux only)
+ pip:
+ name: psutil
+ when: ansible_system != 'Linux'
+
+- name: Copy zombie.py
+ copy:
+ src: zombie.py
+ dest: "{{ output_dir }}"
+
+- name: Create zombie process
+ shell: "{{ ansible_python.executable }} {{ output_dir }}/zombie"
+ async: 90
+ poll: 0
+
+- name: test wait for port drained
+ wait_for:
+ port: "{{ http_port }}"
+ state: drained
+ register: waitfor
+
+- name: verify test wait for port
+ assert:
+ that:
+ - waitfor is successful
+ - waitfor is not changed
+ - "waitfor.port == {{ http_port }}"
diff --git a/test/integration/targets/wait_for/vars/main.yml b/test/integration/targets/wait_for/vars/main.yml
new file mode 100644
index 00000000..c2732948
--- /dev/null
+++ b/test/integration/targets/wait_for/vars/main.yml
@@ -0,0 +1,4 @@
+---
+http_port: 15261
+files_dir: '{{ output_dir|expanduser }}/files'
+checkout_dir: '{{ output_dir }}/git'
diff --git a/test/integration/targets/wait_for_connection/aliases b/test/integration/targets/wait_for_connection/aliases
new file mode 100644
index 00000000..7ab3bd0a
--- /dev/null
+++ b/test/integration/targets/wait_for_connection/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group1
+shippable/windows/group1
diff --git a/test/integration/targets/wait_for_connection/tasks/main.yml b/test/integration/targets/wait_for_connection/tasks/main.yml
new file mode 100644
index 00000000..19749e68
--- /dev/null
+++ b/test/integration/targets/wait_for_connection/tasks/main.yml
@@ -0,0 +1,30 @@
+- name: Test normal connection to target node
+ wait_for_connection:
+ connect_timeout: 5
+ sleep: 1
+ timeout: 10
+
+- name: Test normal connection to target node with delay
+ wait_for_connection:
+ connect_timeout: 5
+ sleep: 1
+ timeout: 10
+ delay: 3
+ register: result
+
+- name: Verify delay was honored
+ assert:
+ that:
+ - result.elapsed >= 3
+
+- name: Use invalid parameter
+ wait_for_connection:
+ foo: bar
+ ignore_errors: yes
+ register: invalid_parameter
+
+- name: Ensure task fails with error
+ assert:
+ that:
+ - invalid_parameter is failed
+ - "invalid_parameter.msg == 'Invalid options for wait_for_connection: foo'"
diff --git a/test/integration/targets/want_json_modules_posix/aliases b/test/integration/targets/want_json_modules_posix/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/test/integration/targets/want_json_modules_posix/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/test/integration/targets/want_json_modules_posix/library/helloworld.py b/test/integration/targets/want_json_modules_posix/library/helloworld.py
new file mode 100644
index 00000000..ad0301cb
--- /dev/null
+++ b/test/integration/targets/want_json_modules_posix/library/helloworld.py
@@ -0,0 +1,31 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# WANT_JSON
+
+import json
+import sys
+
+try:
+ with open(sys.argv[1], 'r') as f:
+ data = json.load(f)
+except (IOError, OSError, IndexError):
+ print(json.dumps(dict(msg="No argument file provided", failed=True)))
+ sys.exit(1)
+
+salutation = data.get('salutation', 'Hello')
+name = data.get('name', 'World')
+print(json.dumps(dict(msg='%s, %s!' % (salutation, name))))
diff --git a/test/integration/targets/want_json_modules_posix/meta/main.yml b/test/integration/targets/want_json_modules_posix/meta/main.yml
new file mode 100644
index 00000000..1810d4be
--- /dev/null
+++ b/test/integration/targets/want_json_modules_posix/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/test/integration/targets/want_json_modules_posix/tasks/main.yml b/test/integration/targets/want_json_modules_posix/tasks/main.yml
new file mode 100644
index 00000000..27e9f781
--- /dev/null
+++ b/test/integration/targets/want_json_modules_posix/tasks/main.yml
@@ -0,0 +1,43 @@
+- name: Hello, World!
+ helloworld:
+ register: hello_world
+
+- assert:
+ that:
+ - 'hello_world.msg == "Hello, World!"'
+
+- name: Hello, Ansible!
+ helloworld:
+ args:
+ name: Ansible
+ register: hello_ansible
+
+- assert:
+ that:
+ - 'hello_ansible.msg == "Hello, Ansible!"'
+
+- name: Goodbye, Ansible!
+ helloworld:
+ args:
+ salutation: Goodbye
+ name: Ansible
+ register: goodbye_ansible
+
+- assert:
+ that:
+ - 'goodbye_ansible.msg == "Goodbye, Ansible!"'
+
+- name: Copy module to remote
+ copy:
+ src: "{{ role_path }}/library/helloworld.py"
+ dest: "{{ remote_tmp_dir }}/helloworld.py"
+
+- name: Execute module directly
+ command: '{{ ansible_python_interpreter|default(ansible_playbook_python) }} {{ remote_tmp_dir }}/helloworld.py'
+ register: direct
+ ignore_errors: true
+
+- assert:
+ that:
+ - direct is failed
+ - 'direct.stdout | from_json == {"msg": "No argument file provided", "failed": true}'
diff --git a/test/integration/targets/win_async_wrapper/aliases b/test/integration/targets/win_async_wrapper/aliases
new file mode 100644
index 00000000..59dda5e6
--- /dev/null
+++ b/test/integration/targets/win_async_wrapper/aliases
@@ -0,0 +1,3 @@
+async_status
+shippable/windows/group1
+shippable/windows/smoketest
diff --git a/test/integration/targets/win_async_wrapper/library/async_test.ps1 b/test/integration/targets/win_async_wrapper/library/async_test.ps1
new file mode 100644
index 00000000..fa41b3e8
--- /dev/null
+++ b/test/integration/targets/win_async_wrapper/library/async_test.ps1
@@ -0,0 +1,48 @@
+#!powershell
+
+# Copyright: (c) 2018, Ansible Project
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+
+$parsed_args = Parse-Args $args
+
+$sleep_delay_sec = Get-AnsibleParam -obj $parsed_args -name "sleep_delay_sec" -type "int" -default 0
+$fail_mode = Get-AnsibleParam -obj $parsed_args -name "fail_mode" -type "str" -default "success" -validateset "success","graceful","exception"
+
+If($fail_mode -isnot [array]) {
+ $fail_mode = @($fail_mode)
+}
+
+$result = @{
+ changed = $true
+ module_pid = $pid
+ module_tempdir = $PSScriptRoot
+}
+
+If($sleep_delay_sec -gt 0) {
+ Sleep -Seconds $sleep_delay_sec
+ $result["slept_sec"] = $sleep_delay_sec
+}
+
+If($fail_mode -contains "leading_junk") {
+ Write-Output "leading junk before module output"
+}
+
+If($fail_mode -contains "graceful") {
+ Fail-Json $result "failed gracefully"
+}
+
+Try {
+
+ If($fail_mode -contains "exception") {
+ Throw "failing via exception"
+ }
+
+ Exit-Json $result
+}
+Finally
+{
+ If($fail_mode -contains "trailing_junk") {
+ Write-Output "trailing junk after module output"
+ }
+}
diff --git a/test/integration/targets/win_async_wrapper/tasks/main.yml b/test/integration/targets/win_async_wrapper/tasks/main.yml
new file mode 100644
index 00000000..91b45846
--- /dev/null
+++ b/test/integration/targets/win_async_wrapper/tasks/main.yml
@@ -0,0 +1,257 @@
+- name: capture timestamp before fire and forget
+ set_fact:
+ start_timestamp: "{{ lookup('pipe', 'date +%s') }}"
+
+- name: async fire and forget
+ async_test:
+ sleep_delay_sec: 15
+ async: 20
+ poll: 0
+ register: asyncresult
+
+- name: validate response
+ assert:
+ that:
+ - asyncresult.ansible_job_id is match('\d+\.\d+')
+ - asyncresult.started == 1
+ - asyncresult is started
+ - asyncresult.finished == 0
+ - asyncresult is not finished
+ - asyncresult.results_file is search('\.ansible_async.+\d+\.\d+')
+ # ensure that async is actually async- this test will fail if # hosts > forks or if the target host is VERY slow
+ - (lookup('pipe', 'date +%s') | int) - (start_timestamp | int) < 15
+
+- name: async poll immediate success
+ async_test:
+ sleep_delay_sec: 0
+ async: 10
+ poll: 1
+ register: asyncresult
+
+- name: validate response
+ assert:
+ that:
+ - asyncresult.ansible_job_id is match('\d+\.\d+')
+ - asyncresult.finished == 1
+ - asyncresult is finished
+ - asyncresult is changed
+ - asyncresult.ansible_async_watchdog_pid is number
+# - asyncresult.module_tempdir is search('ansible-tmp-')
+ - asyncresult.module_pid is number
+
+# this part of the test is flaky- Windows PIDs are reused aggressively, so this occasionally fails due to a new process with the same ID
+# FUTURE: consider having the test module hook to a kernel object we can poke at that gets signaled/released on exit
+#- name: ensure that watchdog and module procs have exited
+# raw: Get-Process | Where { $_.Id -in ({{ asyncresult.ansible_async_watchdog_pid }}, {{ asyncresult.module_pid }}) }
+# register: proclist
+#
+#- name: validate no running watchdog/module processes were returned
+# assert:
+# that:
+# - proclist.stdout.strip() == ''
+
+#- name: ensure that module_tempdir was deleted
+# raw: Test-Path {{ asyncresult.module_tempdir }}
+# register: tempdircheck
+#
+#- name: validate tempdir response
+# assert:
+# that:
+# - tempdircheck.stdout is search('False')
+
+- name: async poll retry
+ async_test:
+ sleep_delay_sec: 5
+ async: 10
+ poll: 1
+ register: asyncresult
+
+- name: validate response
+ assert:
+ that:
+ - asyncresult.ansible_job_id is match('\d+\.\d+')
+ - asyncresult.finished == 1
+ - asyncresult is finished
+ - asyncresult is changed
+# - asyncresult.module_tempdir is search('ansible-tmp-')
+ - asyncresult.module_pid is number
+
+# this part of the test is flaky- Windows PIDs are reused aggressively, so this occasionally fails due to a new process with the same ID
+# FUTURE: consider having the test module hook to a kernel object we can poke at that gets signaled/released on exit
+#- name: ensure that watchdog and module procs have exited
+# raw: Get-Process | Where { $_.Id -in ({{ asyncresult.ansible_async_watchdog_pid }}, {{ asyncresult.module_pid }}) }
+# register: proclist
+#
+#- name: validate no running watchdog/module processes were returned
+# assert:
+# that:
+# - proclist.stdout.strip() == ''
+
+#- name: ensure that module_tempdir was deleted
+# raw: Test-Path {{ asyncresult.module_tempdir }}
+# register: tempdircheck
+#
+#- name: validate tempdir response
+# assert:
+# that:
+# - tempdircheck.stdout is search('False')
+
+- name: async poll timeout
+ async_test:
+ sleep_delay_sec: 5
+ async: 3
+ poll: 1
+ register: asyncresult
+ ignore_errors: true
+
+- name: validate response
+ assert:
+ that:
+ - asyncresult.ansible_job_id is match('\d+\.\d+')
+ - asyncresult.finished == 1
+ - asyncresult is finished
+ - asyncresult is not changed
+ - asyncresult is failed
+ - asyncresult.msg is search('timed out')
+
+- name: async poll graceful module failure
+ async_test:
+ fail_mode: graceful
+ async: 5
+ poll: 1
+ register: asyncresult
+ ignore_errors: true
+
+- name: validate response
+ assert:
+ that:
+ - asyncresult.ansible_job_id is match('\d+\.\d+')
+ - asyncresult.finished == 1
+ - asyncresult is finished
+ - asyncresult is changed
+ - asyncresult is failed
+ - asyncresult.msg == 'failed gracefully'
+
+- name: async poll exception module failure
+ async_test:
+ fail_mode: exception
+ async: 5
+ poll: 1
+ register: asyncresult
+ ignore_errors: true
+
+- name: validate response
+ assert:
+ that:
+ - asyncresult.ansible_job_id is match('\d+\.\d+')
+ - asyncresult.finished == 1
+ - asyncresult is finished
+ - asyncresult is not changed
+ - asyncresult is failed
+ - 'asyncresult.msg == "Unhandled exception while executing module: failing via exception"'
+
+- name: echo some non ascii characters
+ win_command: cmd.exe /c echo über den Fußgängerübergang gehen
+ async: 10
+ poll: 1
+ register: nonascii_output
+
+- name: assert echo some non ascii characters
+ assert:
+ that:
+ - nonascii_output is changed
+ - nonascii_output.rc == 0
+ - nonascii_output.stdout_lines|count == 1
+ - nonascii_output.stdout_lines[0] == 'über den Fußgängerübergang gehen'
+ - nonascii_output.stderr == ''
+
+- name: test async with custom async dir
+ win_shell: echo hi
+ register: async_custom_dir
+ async: 5
+ vars:
+ ansible_async_dir: '{{win_output_dir}}'
+
+- name: assert results file is in the remote tmp specified
+ assert:
+ that:
+ - async_custom_dir.results_file == win_output_dir + '\\' + async_custom_dir.ansible_job_id
+
+- name: test async fire and forget with custom async dir
+ win_shell: echo hi
+ register: async_custom_dir_poll
+ async: 5
+ poll: 0
+ vars:
+ ansible_async_dir: '{{win_output_dir}}'
+
+- name: poll with different dir - fail
+ async_status:
+ jid: '{{ async_custom_dir_poll.ansible_job_id }}'
+ register: fail_async_custom_dir_poll
+ ignore_errors: yes
+
+- name: poll with different dir - success
+ async_status:
+ jid: '{{ async_custom_dir_poll.ansible_job_id }}'
+ register: success_async_custom_dir_poll
+ vars:
+ ansible_async_dir: '{{win_output_dir}}'
+
+- name: assert test async fire and forget with custom async dir
+ assert:
+ that:
+ - fail_async_custom_dir_poll.failed
+ - '"could not find job at ''" + nonascii_output.results_file|win_dirname + "''" in fail_async_custom_dir_poll.msg'
+ - not success_async_custom_dir_poll.failed
+ - success_async_custom_dir_poll.results_file == win_output_dir + '\\' + async_custom_dir_poll.ansible_job_id
+
+# FUTURE: figure out why the last iteration of this test often fails on shippable
+#- name: loop async success
+# async_test:
+# sleep_delay_sec: 3
+# async: 10
+# poll: 0
+# with_sequence: start=1 end=4
+# register: async_many
+#
+#- name: wait for completion
+# async_status:
+# jid: "{{ item }}"
+# register: asyncout
+# until: asyncout is finished
+# retries: 10
+# delay: 1
+# with_items: "{{ async_many.results | map(attribute='ansible_job_id') | list }}"
+#
+#- name: validate results
+# assert:
+# that:
+# - item.finished == 1
+# - item is finished
+# - item.slept_sec == 3
+# - item is changed
+# - item.ansible_job_id is match('\d+\.\d+')
+# with_items: "{{ asyncout.results }}"
+
+# this part of the test is flaky- Windows PIDs are reused aggressively, so this occasionally fails due to a new process with the same ID
+# FUTURE: consider having the test module hook to a kernel object we can poke at that gets signaled/released on exit
+#- name: ensure that all watchdog and module procs have exited
+# raw: Get-Process | Where { $_.Id -in ({{ asyncout.results | join(',', attribute='ansible_async_watchdog_pid') }}, {{ asyncout.results | join(',', attribute='module_pid') }}) }
+# register: proclist
+#
+#- name: validate no processes were returned
+# assert:
+# that:
+# - proclist.stdout.strip() == ""
+
+# FUTURE: test junk before/after JSON
+# FUTURE: verify tempdir stays through module exec
+# FUTURE: verify tempdir is deleted after module exec
+# FUTURE: verify tempdir is permanent with ANSIBLE_KEEP_REMOTE_FILES=1 (how?)
+# FUTURE: verify binary modules work
+
+# FUTURE: test status/return
+# FUTURE: test status/cleanup
+# FUTURE: test reboot/connection failure
+# FUTURE: figure out how to ensure that processes and tempdirs are cleaned up in all exceptional cases
diff --git a/test/integration/targets/win_become/aliases b/test/integration/targets/win_become/aliases
new file mode 100644
index 00000000..1eed2ecf
--- /dev/null
+++ b/test/integration/targets/win_become/aliases
@@ -0,0 +1,2 @@
+shippable/windows/group1
+shippable/windows/smoketest
diff --git a/test/integration/targets/win_become/tasks/main.yml b/test/integration/targets/win_become/tasks/main.yml
new file mode 100644
index 00000000..a0759580
--- /dev/null
+++ b/test/integration/targets/win_become/tasks/main.yml
@@ -0,0 +1,251 @@
+- set_fact:
+ become_test_username: ansible_become_test
+ become_test_admin_username: ansible_become_admin
+ gen_pw: "{{ 'password123!' + lookup('password', '/dev/null chars=ascii_letters,digits length=8') }}"
+
+- name: create unprivileged user
+ win_user:
+ name: "{{ become_test_username }}"
+ password: "{{ gen_pw }}"
+ update_password: always
+ groups: Users
+ register: user_limited_result
+
+- name: create a privileged user
+ win_user:
+ name: "{{ become_test_admin_username }}"
+ password: "{{ gen_pw }}"
+ update_password: always
+ groups: Administrators
+ register: user_admin_result
+
+- name: add requisite logon rights for test user
+ win_user_right:
+ name: '{{item}}'
+ users: '{{become_test_username}}'
+ action: add
+ with_items:
+ - SeNetworkLogonRight
+ - SeInteractiveLogonRight
+ - SeBatchLogonRight
+
+- name: fetch current target date/time for log filtering
+ raw: '[datetime]::now | Out-String'
+ register: test_starttime
+
+- name: execute tests and ensure that test user is deleted regardless of success/failure
+ block:
+ - name: ensure current user is not the become user
+ win_whoami:
+ register: whoami_out
+ failed_when: whoami_out.account.sid == user_limited_result.sid or whoami_out.account.sid == user_admin_result.sid
+
+ - name: get become user profile dir so we can clean it up later
+ vars: &become_vars
+ ansible_become_user: "{{ become_test_username }}"
+ ansible_become_password: "{{ gen_pw }}"
+ ansible_become_method: runas
+ ansible_become: yes
+ win_shell: $env:USERPROFILE
+ register: profile_dir_out
+
+ - name: ensure profile dir contains test username (eg, if become fails silently, prevent deletion of real user profile)
+ assert:
+ that:
+ - become_test_username in profile_dir_out.stdout_lines[0]
+
+ - name: get become admin user profile dir so we can clean it up later
+ vars: &admin_become_vars
+ ansible_become_user: "{{ become_test_admin_username }}"
+ ansible_become_password: "{{ gen_pw }}"
+ ansible_become_method: runas
+ ansible_become: yes
+ win_shell: $env:USERPROFILE
+ register: admin_profile_dir_out
+
+ - name: ensure profile dir contains admin test username
+ assert:
+ that:
+ - become_test_admin_username in admin_profile_dir_out.stdout_lines[0]
+
+ - name: test become runas via task vars (underprivileged user)
+ vars: *become_vars
+ win_whoami:
+ register: whoami_out
+
+ - name: verify output
+ assert:
+ that:
+ - whoami_out.account.sid == user_limited_result.sid
+ - whoami_out.account.account_name == become_test_username
+ - whoami_out.label.account_name == 'Medium Mandatory Level'
+ - whoami_out.label.sid == 'S-1-16-8192'
+ - whoami_out.logon_type == 'Interactive'
+
+ - name: test become runas via task vars (privileged user)
+ vars: *admin_become_vars
+ win_whoami:
+ register: whoami_out
+
+ - name: verify output
+ assert:
+ that:
+ - whoami_out.account.sid == user_admin_result.sid
+ - whoami_out.account.account_name == become_test_admin_username
+ - whoami_out.label.account_name == 'High Mandatory Level'
+ - whoami_out.label.sid == 'S-1-16-12288'
+ - whoami_out.logon_type == 'Interactive'
+
+ - name: test become runas via task keywords
+ vars:
+ ansible_become_password: "{{ gen_pw }}"
+ become: yes
+ become_method: runas
+ become_user: "{{ become_test_username }}"
+ win_shell: whoami
+ register: whoami_out
+
+ - name: verify output
+ assert:
+ that:
+ - whoami_out.stdout_lines[0].endswith(become_test_username)
+
+ - name: test become via block vars
+ vars: *become_vars
+ block:
+ - name: ask who the current user is
+ win_whoami:
+ register: whoami_out
+
+ - name: verify output
+ assert:
+ that:
+ - whoami_out.account.sid == user_limited_result.sid
+ - whoami_out.account.account_name == become_test_username
+ - whoami_out.label.account_name == 'Medium Mandatory Level'
+ - whoami_out.label.sid == 'S-1-16-8192'
+ - whoami_out.logon_type == 'Interactive'
+
+ - name: test with module that will return non-zero exit code (https://github.com/ansible/ansible/issues/30468)
+ vars: *become_vars
+ setup:
+
+ - name: test become with invalid password
+ win_whoami:
+ vars:
+ ansible_become_pass: '{{ gen_pw }}abc'
+ become: yes
+ become_method: runas
+ become_user: '{{ become_test_username }}'
+ register: become_invalid_pass
+ failed_when:
+ - '"Failed to become user " + become_test_username not in become_invalid_pass.msg'
+ - '"LogonUser failed" not in become_invalid_pass.msg'
+ - '"Win32ErrorCode 1326 - 0x0000052E)" not in become_invalid_pass.msg'
+
+ - name: test become password precedence
+ win_whoami:
+ become: yes
+ become_method: runas
+ become_user: '{{ become_test_username }}'
+ vars:
+ ansible_become_pass: broken
+ ansible_runas_pass: '{{ gen_pw }}' # should have a higher precedence than ansible_become_pass
+
+ - name: test become + async
+ vars: *become_vars
+ win_command: whoami
+ async: 10
+ register: whoami_out
+
+ - name: verify become + async worked
+ assert:
+ that:
+ - whoami_out is successful
+ - become_test_username in whoami_out.stdout
+
+ - name: test failure with string become invalid key
+ vars: *become_vars
+ win_whoami:
+ become_flags: logon_type=batch invalid_flags=a
+ become_method: runas
+ register: failed_flags_invalid_key
+ failed_when: "failed_flags_invalid_key.msg != \"internal error: failed to parse become_flags 'logon_type=batch invalid_flags=a': become_flags key 'invalid_flags' is not a valid runas flag, must be 'logon_type' or 'logon_flags'\""
+
+ - name: test failure with invalid logon_type
+ vars: *become_vars
+ win_whoami:
+ become_flags: logon_type=invalid
+ register: failed_flags_invalid_type
+ failed_when: "failed_flags_invalid_type.msg != \"internal error: failed to parse become_flags 'logon_type=invalid': become_flags logon_type value 'invalid' is not valid, valid values are: interactive, network, batch, service, unlock, network_cleartext, new_credentials\""
+
+ - name: test failure with invalid logon_flag
+ vars: *become_vars
+ win_whoami:
+ become_flags: logon_flags=with_profile,invalid
+ register: failed_flags_invalid_flag
+ failed_when: "failed_flags_invalid_flag.msg != \"internal error: failed to parse become_flags 'logon_flags=with_profile,invalid': become_flags logon_flags value 'invalid' is not valid, valid values are: with_profile, netcredentials_only\""
+
+ - name: echo some non ascii characters
+ win_command: cmd.exe /c echo über den Fußgängerübergang gehen
+ vars: *become_vars
+ register: nonascii_output
+
+ - name: assert echo some non ascii characters
+ assert:
+ that:
+ - nonascii_output is changed
+ - nonascii_output.rc == 0
+ - nonascii_output.stdout_lines|count == 1
+ - nonascii_output.stdout_lines[0] == 'über den Fußgängerübergang gehen'
+ - nonascii_output.stderr == ''
+
+ - name: get PS events containing password or module args created since test start
+ raw: |
+ $dt=[datetime]"{{ test_starttime.stdout|trim }}"
+ (Get-WinEvent -LogName Microsoft-Windows-Powershell/Operational |
+ ? { $_.TimeCreated -ge $dt -and $_.Message -match "{{ gen_pw }}" }).Count
+ register: ps_log_count
+
+ - name: assert no PS events contain password or module args
+ assert:
+ that:
+ - ps_log_count.stdout | int == 0
+
+# FUTURE: test raw + script become behavior once they're running under the exec wrapper again
+# FUTURE: add standalone playbook tests to include password prompting and play become keywords
+
+ always:
+ - name: remove explicit logon rights for test user
+ win_user_right:
+ name: '{{item}}'
+ users: '{{become_test_username}}'
+ action: remove
+ with_items:
+ - SeNetworkLogonRight
+ - SeInteractiveLogonRight
+ - SeBatchLogonRight
+
+ - name: ensure underprivileged test user is deleted
+ win_user:
+ name: "{{ become_test_username }}"
+ state: absent
+
+ - name: ensure privileged test user is deleted
+ win_user:
+ name: "{{ become_test_admin_username }}"
+ state: absent
+
+ - name: ensure underprivileged test user profile is deleted
+ # NB: have to work around powershell limitation of long filenames until win_file fixes it
+ win_shell: rmdir /S /Q {{ profile_dir_out.stdout_lines[0] }}
+ args:
+ executable: cmd.exe
+ when: become_test_username in profile_dir_out.stdout_lines[0]
+
+ - name: ensure privileged test user profile is deleted
+ # NB: have to work around powershell limitation of long filenames until win_file fixes it
+ win_shell: rmdir /S /Q {{ admin_profile_dir_out.stdout_lines[0] }}
+ args:
+ executable: cmd.exe
+ when: become_test_admin_username in admin_profile_dir_out.stdout_lines[0]
diff --git a/test/integration/targets/win_exec_wrapper/aliases b/test/integration/targets/win_exec_wrapper/aliases
new file mode 100644
index 00000000..1eed2ecf
--- /dev/null
+++ b/test/integration/targets/win_exec_wrapper/aliases
@@ -0,0 +1,2 @@
+shippable/windows/group1
+shippable/windows/smoketest
diff --git a/test/integration/targets/win_exec_wrapper/library/test_all_options.ps1 b/test/integration/targets/win_exec_wrapper/library/test_all_options.ps1
new file mode 100644
index 00000000..7c2c9c7b
--- /dev/null
+++ b/test/integration/targets/win_exec_wrapper/library/test_all_options.ps1
@@ -0,0 +1,12 @@
+#!powershell
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+#Requires -Module Ansible.ModuleUtils.SID
+#Requires -Version 3.0
+#AnsibleRequires -OSVersion 6
+#AnsibleRequires -Become
+
+$output = &whoami.exe
+$sid = Convert-ToSID -account_name $output.Trim()
+
+Exit-Json -obj @{ output = $sid; changed = $false }
diff --git a/test/integration/targets/win_exec_wrapper/library/test_common_functions.ps1 b/test/integration/targets/win_exec_wrapper/library/test_common_functions.ps1
new file mode 100644
index 00000000..9a5918f9
--- /dev/null
+++ b/test/integration/targets/win_exec_wrapper/library/test_common_functions.ps1
@@ -0,0 +1,40 @@
+#!powershell
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+
+$ErrorActionPreference = "Stop"
+
+Function Assert-Equals($actual, $expected) {
+ if ($actual -cne $expected) {
+ $call_stack = (Get-PSCallStack)[1]
+ $error_msg = "AssertionError:`r`nActual: `"$actual`" != Expected: `"$expected`"`r`nLine: $($call_stack.ScriptLineNumber), Method: $($call_stack.Position.Text)"
+ Fail-Json -obj $result -message $error_msg
+ }
+}
+
+$result = @{
+ changed = $false
+}
+
+#ConvertFrom-AnsibleJso
+$input_json = '{"string":"string","float":3.1415926,"dict":{"string":"string","int":1},"list":["entry 1","entry 2"],"null":null,"int":1}'
+$actual = ConvertFrom-AnsibleJson -InputObject $input_json
+Assert-Equals -actual $actual.GetType() -expected ([Hashtable])
+Assert-Equals -actual $actual.string.GetType() -expected ([String])
+Assert-Equals -actual $actual.string -expected "string"
+Assert-Equals -actual $actual.int.GetType() -expected ([Int32])
+Assert-Equals -actual $actual.int -expected 1
+Assert-Equals -actual $actual.null -expected $null
+Assert-Equals -actual $actual.float.GetType() -expected ([Decimal])
+Assert-Equals -actual $actual.float -expected 3.1415926
+Assert-Equals -actual $actual.list.GetType() -expected ([Object[]])
+Assert-Equals -actual $actual.list.Count -expected 2
+Assert-Equals -actual $actual.list[0] -expected "entry 1"
+Assert-Equals -actual $actual.list[1] -expected "entry 2"
+Assert-Equals -actual $actual.GetType() -expected ([Hashtable])
+Assert-Equals -actual $actual.dict.string -expected "string"
+Assert-Equals -actual $actual.dict.int -expected 1
+
+$result.msg = "good"
+Exit-Json -obj $result
+
diff --git a/test/integration/targets/win_exec_wrapper/library/test_fail.ps1 b/test/integration/targets/win_exec_wrapper/library/test_fail.ps1
new file mode 100644
index 00000000..06c63f72
--- /dev/null
+++ b/test/integration/targets/win_exec_wrapper/library/test_fail.ps1
@@ -0,0 +1,58 @@
+#!powershell
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+
+$params = Parse-Args $args -supports_check_mode $true
+
+$data = Get-AnsibleParam -obj $params -name "data" -type "str" -default "normal"
+$result = @{
+ changed = $false
+}
+
+<#
+This module tests various error events in PowerShell to verify our hidden trap
+catches them all and outputs a pretty error message with a traceback to help
+users debug the actual issue
+
+normal - normal execution, no errors
+fail - Calls Fail-Json like normal
+throw - throws an exception
+error - Write-Error with ErrorActionPreferenceStop
+cmdlet_error - Calls a Cmdlet with an invalid error
+dotnet_exception - Calls a .NET function that will throw an error
+function_throw - Throws an exception in a function
+proc_exit_fine - calls an executable with a non-zero exit code with Exit-Json
+proc_exit_fail - calls an executable with a non-zero exit code with Fail-Json
+#>
+
+Function Test-ThrowException {
+ throw "exception in function"
+}
+
+if ($data -eq "normal") {
+ Exit-Json -obj $result
+} elseif ($data -eq "fail") {
+ Fail-Json -obj $result -message "fail message"
+} elseif ($data -eq "throw") {
+ throw [ArgumentException]"module is thrown"
+} elseif ($data -eq "error") {
+ Write-Error -Message $data
+} elseif ($data -eq "cmdlet_error") {
+ Get-Item -Path "fake:\path"
+} elseif ($data -eq "dotnet_exception") {
+ [System.IO.Path]::GetFullPath($null)
+} elseif ($data -eq "function_throw") {
+ Test-ThrowException
+} elseif ($data -eq "proc_exit_fine") {
+ # verifies that if no error was actually fired and we have an output, we
+ # don't use the RC to validate if the module failed
+ &cmd.exe /c exit 2
+ Exit-Json -obj $result
+} elseif ($data -eq "proc_exit_fail") {
+ &cmd.exe /c exit 2
+ Fail-Json -obj $result -message "proc_exit_fail"
+}
+
+# verify no exception were silently caught during our tests
+Fail-Json -obj $result -message "end of module"
+
diff --git a/test/integration/targets/win_exec_wrapper/library/test_invalid_requires.ps1 b/test/integration/targets/win_exec_wrapper/library/test_invalid_requires.ps1
new file mode 100644
index 00000000..89727ef1
--- /dev/null
+++ b/test/integration/targets/win_exec_wrapper/library/test_invalid_requires.ps1
@@ -0,0 +1,9 @@
+#!powershell
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+# Requires -Version 20
+# AnsibleRequires -OSVersion 20
+
+# requires statement must be straight after the original # with now space, this module won't fail
+
+Exit-Json -obj @{ output = "output"; changed = $false }
diff --git a/test/integration/targets/win_exec_wrapper/library/test_min_os_version.ps1 b/test/integration/targets/win_exec_wrapper/library/test_min_os_version.ps1
new file mode 100644
index 00000000..39b1ded1
--- /dev/null
+++ b/test/integration/targets/win_exec_wrapper/library/test_min_os_version.ps1
@@ -0,0 +1,8 @@
+#!powershell
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+#AnsibleRequires -OSVersion 20.0
+
+# this shouldn't run as no Windows OS will meet the version of 20.0
+
+Exit-Json -obj @{ output = "output"; changed = $false }
diff --git a/test/integration/targets/win_exec_wrapper/library/test_min_ps_version.ps1 b/test/integration/targets/win_exec_wrapper/library/test_min_ps_version.ps1
new file mode 100644
index 00000000..bb5fd0f2
--- /dev/null
+++ b/test/integration/targets/win_exec_wrapper/library/test_min_ps_version.ps1
@@ -0,0 +1,8 @@
+#!powershell
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+#Requires -Version 20.0.0.0
+
+# this shouldn't run as no PS Version will be at 20 in the near future
+
+Exit-Json -obj @{ output = "output"; changed = $false }
diff --git a/test/integration/targets/win_exec_wrapper/tasks/main.yml b/test/integration/targets/win_exec_wrapper/tasks/main.yml
new file mode 100644
index 00000000..8fc54f7c
--- /dev/null
+++ b/test/integration/targets/win_exec_wrapper/tasks/main.yml
@@ -0,0 +1,274 @@
+---
+- name: fetch current target date/time for log filtering
+ raw: '[datetime]::now | Out-String'
+ register: test_starttime
+
+- name: test normal module execution
+ test_fail:
+ register: normal
+
+- name: assert test normal module execution
+ assert:
+ that:
+ - not normal is failed
+
+- name: test fail module execution
+ test_fail:
+ data: fail
+ register: fail_module
+ ignore_errors: yes
+
+- name: assert test fail module execution
+ assert:
+ that:
+ - fail_module is failed
+ - fail_module.msg == "fail message"
+ - not fail_module.exception is defined
+
+- name: test module with exception thrown
+ test_fail:
+ data: throw
+ register: throw_module
+ ignore_errors: yes
+
+- name: assert test module with exception thrown
+ assert:
+ that:
+ - throw_module is failed
+ - 'throw_module.msg == "Unhandled exception while executing module: module is thrown"'
+ - '"throw [ArgumentException]\"module is thrown\"" in throw_module.exception'
+
+- name: test module with error msg
+ test_fail:
+ data: error
+ register: error_module
+ ignore_errors: yes
+ vars:
+ # Running with coverage means the module is run from a script and not as a psuedo script in a pipeline. This
+ # results in a different error message being returned so we disable coverage collection for this task.
+ _ansible_coverage_remote_output: ''
+
+- name: assert test module with error msg
+ assert:
+ that:
+ - error_module is failed
+ - 'error_module.msg == "Unhandled exception while executing module: error"'
+ - '"Write-Error -Message $data" in error_module.exception'
+
+- name: test module with cmdlet error
+ test_fail:
+ data: cmdlet_error
+ register: cmdlet_error
+ ignore_errors: yes
+
+- name: assert test module with cmdlet error
+ assert:
+ that:
+ - cmdlet_error is failed
+ - 'cmdlet_error.msg == "Unhandled exception while executing module: Cannot find drive. A drive with the name ''fake'' does not exist."'
+ - '"Get-Item -Path \"fake:\\path\"" in cmdlet_error.exception'
+
+- name: test module with .NET exception
+ test_fail:
+ data: dotnet_exception
+ register: dotnet_exception
+ ignore_errors: yes
+
+- name: assert test module with .NET exception
+ assert:
+ that:
+ - dotnet_exception is failed
+ - 'dotnet_exception.msg == "Unhandled exception while executing module: Exception calling \"GetFullPath\" with \"1\" argument(s): \"The path is not of a legal form.\""'
+ - '"[System.IO.Path]::GetFullPath($null)" in dotnet_exception.exception'
+
+- name: test module with function exception
+ test_fail:
+ data: function_throw
+ register: function_exception
+ ignore_errors: yes
+ vars:
+ _ansible_coverage_remote_output: ''
+
+- name: assert test module with function exception
+ assert:
+ that:
+ - function_exception is failed
+ - 'function_exception.msg == "Unhandled exception while executing module: exception in function"'
+ - '"throw \"exception in function\"" in function_exception.exception'
+ - '"at Test-ThrowException, <No file>: line" in function_exception.exception'
+
+- name: test module with fail process but Exit-Json
+ test_fail:
+ data: proc_exit_fine
+ register: proc_exit_fine
+
+- name: assert test module with fail process but Exit-Json
+ assert:
+ that:
+ - not proc_exit_fine is failed
+
+- name: test module with fail process but Fail-Json
+ test_fail:
+ data: proc_exit_fail
+ register: proc_exit_fail
+ ignore_errors: yes
+
+- name: assert test module with fail process but Fail-Json
+ assert:
+ that:
+ - proc_exit_fail is failed
+ - proc_exit_fail.msg == "proc_exit_fail"
+ - not proc_exit_fail.exception is defined
+
+- name: test out invalid options
+ test_invalid_requires:
+ register: invalid_options
+
+- name: assert test out invalid options
+ assert:
+ that:
+ - invalid_options is successful
+ - invalid_options.output == "output"
+
+- name: test out invalid os version
+ test_min_os_version:
+ register: invalid_os_version
+ ignore_errors: yes
+
+- name: assert test out invalid os version
+ assert:
+ that:
+ - invalid_os_version is failed
+ - '"This module cannot run on this OS as it requires a minimum version of 20.0, actual was " in invalid_os_version.msg'
+
+- name: test out invalid powershell version
+ test_min_ps_version:
+ register: invalid_ps_version
+ ignore_errors: yes
+
+- name: assert test out invalid powershell version
+ assert:
+ that:
+ - invalid_ps_version is failed
+ - '"This module cannot run as it requires a minimum PowerShell version of 20.0.0.0, actual was " in invalid_ps_version.msg'
+
+- name: test out environment block for task
+ win_shell: set
+ args:
+ executable: cmd.exe
+ environment:
+ String: string value
+ Int: 1234
+ Bool: True
+ double_quote: 'double " quote'
+ single_quote: "single ' quote"
+ hyphen-var: abc@123
+ '_-(){}[]<>*+-/\?"''!@#$%^&|;:i,.`~0': '_-(){}[]<>*+-/\?"''!@#$%^&|;:i,.`~0'
+ '‘key': 'value‚'
+ register: environment_block
+
+- name: assert environment block for task
+ assert:
+ that:
+ - '"String=string value" in environment_block.stdout_lines'
+ - '"Int=1234" in environment_block.stdout_lines'
+ - '"Bool=True" in environment_block.stdout_lines'
+ - '"double_quote=double \" quote" in environment_block.stdout_lines'
+ - '"single_quote=single '' quote" in environment_block.stdout_lines'
+ - '"hyphen-var=abc@123" in environment_block.stdout_lines'
+ # yaml escaping rules - (\\ == \), (\" == "), ('' == ')
+ - '"_-(){}[]<>*+-/\\?\"''!@#$%^&|;:i,.`~0=_-(){}[]<>*+-/\\?\"''!@#$%^&|;:i,.`~0" in environment_block.stdout_lines'
+ - '"‘key=value‚" in environment_block.stdout_lines'
+
+- name: test out become requires without become_user set
+ test_all_options:
+ register: become_system
+
+- name: assert become requires without become_user set
+ assert:
+ that:
+ - become_system is successful
+ - become_system.output == "S-1-5-18"
+
+- set_fact:
+ become_test_username: ansible_become_test
+ gen_pw: "{{ 'password123!' + lookup('password', '/dev/null chars=ascii_letters,digits length=8') }}"
+
+- name: create unprivileged user
+ win_user:
+ name: "{{ become_test_username }}"
+ password: "{{ gen_pw }}"
+ update_password: always
+ groups: Users
+ register: become_test_user_result
+
+- name: execute tests and ensure that test user is deleted regardless of success/failure
+ block:
+ - name: ensure current user is not the become user
+ win_shell: whoami
+ register: whoami_out
+
+ - name: verify output
+ assert:
+ that:
+ - not whoami_out.stdout_lines[0].endswith(become_test_username)
+
+ - name: get become user profile dir so we can clean it up later
+ vars: &become_vars
+ ansible_become_user: "{{ become_test_username }}"
+ ansible_become_password: "{{ gen_pw }}"
+ ansible_become_method: runas
+ ansible_become: yes
+ win_shell: $env:USERPROFILE
+ register: profile_dir_out
+
+ - name: ensure profile dir contains test username (eg, if become fails silently, prevent deletion of real user profile)
+ assert:
+ that:
+ - become_test_username in profile_dir_out.stdout_lines[0]
+
+ - name: test out become requires when become_user set
+ test_all_options:
+ vars: *become_vars
+ register: become_system
+
+ - name: assert become requires when become_user set
+ assert:
+ that:
+ - become_system is successful
+ - become_system.output == become_test_user_result.sid
+
+ always:
+ - name: ensure test user is deleted
+ win_user:
+ name: "{{ become_test_username }}"
+ state: absent
+
+ - name: ensure test user profile is deleted
+ # NB: have to work around powershell limitation of long filenames until win_file fixes it
+ win_shell: rmdir /S /Q {{ profile_dir_out.stdout_lines[0] }}
+ args:
+ executable: cmd.exe
+ when: become_test_username in profile_dir_out.stdout_lines[0]
+
+- name: test common functions in exec
+ test_common_functions:
+ register: common_functions_res
+
+- name: assert test common functions in exec
+ assert:
+ that:
+ - not common_functions_res is failed
+ - common_functions_res.msg == "good"
+
+- name: get PS events containing module args or envvars created since test start
+ raw: |
+ $dt=[datetime]"{{ test_starttime.stdout|trim }}"
+ (Get-WinEvent -LogName Microsoft-Windows-Powershell/Operational |
+ ? { $_.TimeCreated -ge $dt -and $_.Message -match "fail_module|hyphen-var" }).Count
+ register: ps_log_count
+
+- name: assert no PS events contain module args or envvars
+ assert:
+ that:
+ - ps_log_count.stdout | int == 0
diff --git a/test/integration/targets/win_fetch/aliases b/test/integration/targets/win_fetch/aliases
new file mode 100644
index 00000000..4cd27b3c
--- /dev/null
+++ b/test/integration/targets/win_fetch/aliases
@@ -0,0 +1 @@
+shippable/windows/group1
diff --git a/test/integration/targets/win_fetch/meta/main.yml b/test/integration/targets/win_fetch/meta/main.yml
new file mode 100644
index 00000000..9f37e96c
--- /dev/null
+++ b/test/integration/targets/win_fetch/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+- setup_remote_tmp_dir
diff --git a/test/integration/targets/win_fetch/tasks/main.yml b/test/integration/targets/win_fetch/tasks/main.yml
new file mode 100644
index 00000000..78b6fa02
--- /dev/null
+++ b/test/integration/targets/win_fetch/tasks/main.yml
@@ -0,0 +1,212 @@
+# test code for the fetch module when using winrm connection
+# (c) 2014, Chris Church <chris@ninemoreminutes.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: define host-specific host_output_dir
+ set_fact:
+ host_output_dir: "{{ output_dir }}/{{ inventory_hostname }}"
+
+- name: clean out the test directory
+ file: name={{ host_output_dir|mandatory }} state=absent
+ delegate_to: localhost
+ run_once: true
+
+- name: create the test directory
+ file: name={{ host_output_dir }} state=directory
+ delegate_to: localhost
+ run_once: true
+
+- name: fetch a small file
+ fetch: src="C:/Windows/win.ini" dest={{ host_output_dir }}
+ register: fetch_small
+
+- name: check fetch small result
+ assert:
+ that:
+ - "fetch_small.changed"
+
+- name: check file created by fetch small
+ stat: path={{ fetch_small.dest }}
+ delegate_to: localhost
+ register: fetch_small_stat
+
+- name: verify fetched small file exists locally
+ assert:
+ that:
+ - "fetch_small_stat.stat.exists"
+ - "fetch_small_stat.stat.isreg"
+ - "fetch_small_stat.stat.checksum == fetch_small.checksum"
+
+- name: fetch the same small file
+ fetch: src="C:/Windows/win.ini" dest={{ host_output_dir }}
+ register: fetch_small_again
+
+- name: check fetch small result again
+ assert:
+ that:
+ - "not fetch_small_again.changed"
+
+- name: fetch a small file to flat namespace
+ fetch: src="C:/Windows/win.ini" dest="{{ host_output_dir }}/" flat=yes
+ register: fetch_flat
+
+- name: check fetch flat result
+ assert:
+ that:
+ - "fetch_flat.changed"
+
+- name: check file created by fetch flat
+ stat: path="{{ host_output_dir }}/win.ini"
+ delegate_to: localhost
+ register: fetch_flat_stat
+
+- name: verify fetched file exists locally in host_output_dir
+ assert:
+ that:
+ - "fetch_flat_stat.stat.exists"
+ - "fetch_flat_stat.stat.isreg"
+ - "fetch_flat_stat.stat.checksum == fetch_flat.checksum"
+
+#- name: fetch a small file to flat directory (without trailing slash)
+# fetch: src="C:/Windows/win.ini" dest="{{ host_output_dir }}" flat=yes
+# register: fetch_flat_dir
+
+#- name: check fetch flat to directory result
+# assert:
+# that:
+# - "fetch_flat_dir is not changed"
+
+- name: fetch a large binary file
+ fetch: src="C:/Windows/explorer.exe" dest={{ host_output_dir }}
+ register: fetch_large
+
+- name: check fetch large binary file result
+ assert:
+ that:
+ - "fetch_large.changed"
+
+- name: check file created by fetch large binary
+ stat: path={{ fetch_large.dest }}
+ delegate_to: localhost
+ register: fetch_large_stat
+
+- name: verify fetched large file exists locally
+ assert:
+ that:
+ - "fetch_large_stat.stat.exists"
+ - "fetch_large_stat.stat.isreg"
+ - "fetch_large_stat.stat.checksum == fetch_large.checksum"
+
+- name: fetch a large binary file again
+ fetch: src="C:/Windows/explorer.exe" dest={{ host_output_dir }}
+ register: fetch_large_again
+
+- name: check fetch large binary file result again
+ assert:
+ that:
+ - "not fetch_large_again.changed"
+
+- name: fetch a small file using backslashes in src path
+ fetch: src="C:\\Windows\\system.ini" dest={{ host_output_dir }}
+ register: fetch_small_bs
+
+- name: check fetch small result with backslashes
+ assert:
+ that:
+ - "fetch_small_bs.changed"
+
+- name: check file created by fetch small with backslashes
+ stat: path={{ fetch_small_bs.dest }}
+ delegate_to: localhost
+ register: fetch_small_bs_stat
+
+- name: verify fetched small file with backslashes exists locally
+ assert:
+ that:
+ - "fetch_small_bs_stat.stat.exists"
+ - "fetch_small_bs_stat.stat.isreg"
+ - "fetch_small_bs_stat.stat.checksum == fetch_small_bs.checksum"
+
+- name: attempt to fetch a non-existent file - do not fail on missing
+ fetch: src="C:/this_file_should_not_exist.txt" dest={{ host_output_dir }} fail_on_missing=no
+ register: fetch_missing_nofail
+
+- name: check fetch missing no fail result
+ assert:
+ that:
+ - "fetch_missing_nofail is not failed"
+ - "fetch_missing_nofail.msg"
+ - "fetch_missing_nofail is not changed"
+
+- name: attempt to fetch a non-existent file - fail on missing
+ fetch: src="~/this_file_should_not_exist.txt" dest={{ host_output_dir }} fail_on_missing=yes
+ register: fetch_missing
+ ignore_errors: true
+
+- name: check fetch missing with failure
+ assert:
+ that:
+ - "fetch_missing is failed"
+ - "fetch_missing.msg"
+ - "fetch_missing is not changed"
+
+- name: attempt to fetch a non-existent file - fail on missing implicit
+ fetch: src="~/this_file_should_not_exist.txt" dest={{ host_output_dir }}
+ register: fetch_missing_implicit
+ ignore_errors: true
+
+- name: check fetch missing with failure on implicit
+ assert:
+ that:
+ - "fetch_missing_implicit is failed"
+ - "fetch_missing_implicit.msg"
+ - "fetch_missing_implicit is not changed"
+
+- name: attempt to fetch a directory
+ fetch: src="C:\\Windows" dest={{ host_output_dir }}
+ register: fetch_dir
+ ignore_errors: true
+
+- name: check fetch directory result
+ assert:
+ that:
+ # Doesn't fail anymore, only returns a message.
+ - "fetch_dir is not changed"
+ - "fetch_dir.msg"
+
+- name: create file with special characters
+ raw: Set-Content -LiteralPath '{{ remote_tmp_dir }}\abc$not var''quote‘‘' -Value 'abc'
+
+- name: fetch file with special characters
+ fetch:
+ src: '{{ remote_tmp_dir }}\abc$not var''quote‘'
+ dest: '{{ host_output_dir }}/'
+ flat: yes
+ register: fetch_special_file
+
+- name: get content of fetched file
+ command: cat {{ (host_output_dir ~ "/abc$not var'quote‘") | quote }}
+ register: fetch_special_file_actual
+ delegate_to: localhost
+
+- name: assert fetch file with special characters
+ assert:
+ that:
+ - fetch_special_file is changed
+ - fetch_special_file.checksum == '34d4150adc3347f1dd8ce19fdf65b74d971ab602'
+ - fetch_special_file.dest == host_output_dir + "/abc$not var'quote‘"
+ - fetch_special_file_actual.stdout == 'abc'
diff --git a/test/integration/targets/win_module_utils/aliases b/test/integration/targets/win_module_utils/aliases
new file mode 100644
index 00000000..1eed2ecf
--- /dev/null
+++ b/test/integration/targets/win_module_utils/aliases
@@ -0,0 +1,2 @@
+shippable/windows/group1
+shippable/windows/smoketest
diff --git a/test/integration/targets/win_module_utils/library/csharp_util.ps1 b/test/integration/targets/win_module_utils/library/csharp_util.ps1
new file mode 100644
index 00000000..cf2dc452
--- /dev/null
+++ b/test/integration/targets/win_module_utils/library/csharp_util.ps1
@@ -0,0 +1,12 @@
+#1powershell
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+#AnsibleRequires -CSharpUtil Ansible.Test
+
+$result = @{
+ res = [Ansible.Test.OutputTest]::GetString()
+ changed = $false
+}
+
+Exit-Json -obj $result
+
diff --git a/test/integration/targets/win_module_utils/library/legacy_only_new_way.ps1 b/test/integration/targets/win_module_utils/library/legacy_only_new_way.ps1
new file mode 100644
index 00000000..8ea3e061
--- /dev/null
+++ b/test/integration/targets/win_module_utils/library/legacy_only_new_way.ps1
@@ -0,0 +1,5 @@
+#!powershell
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+
+Exit-Json @{ data="success" }
diff --git a/test/integration/targets/win_module_utils/library/legacy_only_new_way_win_line_ending.ps1 b/test/integration/targets/win_module_utils/library/legacy_only_new_way_win_line_ending.ps1
new file mode 100644
index 00000000..d9c2e008
--- /dev/null
+++ b/test/integration/targets/win_module_utils/library/legacy_only_new_way_win_line_ending.ps1
@@ -0,0 +1,6 @@
+#!powershell
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+
+Exit-Json @{ data="success" }
+
diff --git a/test/integration/targets/win_module_utils/library/legacy_only_old_way.ps1 b/test/integration/targets/win_module_utils/library/legacy_only_old_way.ps1
new file mode 100644
index 00000000..652e1281
--- /dev/null
+++ b/test/integration/targets/win_module_utils/library/legacy_only_old_way.ps1
@@ -0,0 +1,5 @@
+#!powershell
+
+# POWERSHELL_COMMON
+
+Exit-Json @{ data="success" }
diff --git a/test/integration/targets/win_module_utils/library/legacy_only_old_way_win_line_ending.ps1 b/test/integration/targets/win_module_utils/library/legacy_only_old_way_win_line_ending.ps1
new file mode 100644
index 00000000..d5d328a5
--- /dev/null
+++ b/test/integration/targets/win_module_utils/library/legacy_only_old_way_win_line_ending.ps1
@@ -0,0 +1,4 @@
+#!powershell
+# POWERSHELL_COMMON
+
+Exit-Json @{ data="success" }
diff --git a/test/integration/targets/win_module_utils/library/recursive_requires.ps1 b/test/integration/targets/win_module_utils/library/recursive_requires.ps1
new file mode 100644
index 00000000..db8c23e9
--- /dev/null
+++ b/test/integration/targets/win_module_utils/library/recursive_requires.ps1
@@ -0,0 +1,13 @@
+#!powershell
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+#Requires -Module Ansible.ModuleUtils.Recursive3
+#Requires -Version 2
+
+$ErrorActionPreference = "Stop"
+
+$result = @{
+ changed = $false
+ value = Get-Test3
+}
+Exit-Json -obj $result
diff --git a/test/integration/targets/win_module_utils/library/uses_bogus_utils.ps1 b/test/integration/targets/win_module_utils/library/uses_bogus_utils.ps1
new file mode 100644
index 00000000..0a1c21a3
--- /dev/null
+++ b/test/integration/targets/win_module_utils/library/uses_bogus_utils.ps1
@@ -0,0 +1,6 @@
+#!powershell
+
+# this should fail
+#Requires -Module Ansible.ModuleUtils.BogusModule
+
+Exit-Json @{ data="success" }
diff --git a/test/integration/targets/win_module_utils/library/uses_local_utils.ps1 b/test/integration/targets/win_module_utils/library/uses_local_utils.ps1
new file mode 100644
index 00000000..3dfc940c
--- /dev/null
+++ b/test/integration/targets/win_module_utils/library/uses_local_utils.ps1
@@ -0,0 +1,9 @@
+#!powershell
+
+# use different cases, spacing and plural of 'module' to exercise flexible powershell dialect
+#ReQuiReS -ModUleS Ansible.ModuleUtils.Legacy
+#Requires -Module Ansible.ModuleUtils.ValidTestModule
+
+$o = CustomFunction
+
+Exit-Json @{data=$o}
diff --git a/test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.Recursive1.psm1 b/test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.Recursive1.psm1
new file mode 100644
index 00000000..a63ece34
--- /dev/null
+++ b/test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.Recursive1.psm1
@@ -0,0 +1,9 @@
+Function Get-Test1 {
+ <#
+ .SYNOPSIS
+ Test function
+ #>
+ return "Get-Test1"
+}
+
+Export-ModuleMember -Function Get-Test1
diff --git a/test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.Recursive2.psm1 b/test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.Recursive2.psm1
new file mode 100644
index 00000000..f9c07ca7
--- /dev/null
+++ b/test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.Recursive2.psm1
@@ -0,0 +1,12 @@
+#Requires -Module Ansible.ModuleUtils.Recursive1
+#Requires -Module Ansible.ModuleUtils.Recursive3
+
+Function Get-Test2 {
+ <#
+ .SYNOPSIS
+ Test function
+ #>
+ return "Get-Test2, 1: $(Get-Test1), 3: $(Get-NewTest3)"
+}
+
+Export-ModuleMember -Function Get-Test2
diff --git a/test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.Recursive3.psm1 b/test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.Recursive3.psm1
new file mode 100644
index 00000000..ce6e70c1
--- /dev/null
+++ b/test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.Recursive3.psm1
@@ -0,0 +1,20 @@
+#Requires -Module Ansible.ModuleUtils.Recursive2
+#Requires -Version 3.0
+
+Function Get-Test3 {
+ <#
+ .SYNOPSIS
+ Test function
+ #>
+ return "Get-Test3: 2: $(Get-Test2)"
+}
+
+Function Get-NewTest3 {
+ <#
+ .SYNOPSIS
+ Test function
+ #>
+ return "Get-NewTest3"
+}
+
+Export-ModuleMember -Function Get-Test3, Get-NewTest3
diff --git a/test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.ValidTestModule.psm1 b/test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.ValidTestModule.psm1
new file mode 100644
index 00000000..a60b799f
--- /dev/null
+++ b/test/integration/targets/win_module_utils/module_utils/Ansible.ModuleUtils.ValidTestModule.psm1
@@ -0,0 +1,3 @@
+Function CustomFunction {
+ return "ValueFromCustomFunction"
+}
diff --git a/test/integration/targets/win_module_utils/module_utils/Ansible.Test.cs b/test/integration/targets/win_module_utils/module_utils/Ansible.Test.cs
new file mode 100644
index 00000000..9556d9af
--- /dev/null
+++ b/test/integration/targets/win_module_utils/module_utils/Ansible.Test.cs
@@ -0,0 +1,26 @@
+//AssemblyReference -Name System.Web.Extensions.dll
+
+using System;
+using System.Collections.Generic;
+using System.Web.Script.Serialization;
+
+namespace Ansible.Test
+{
+ public class OutputTest
+ {
+ public static string GetString()
+ {
+ Dictionary<string, object> obj = new Dictionary<string, object>();
+ obj["a"] = "a";
+ obj["b"] = 1;
+ return ToJson(obj);
+ }
+
+ private static string ToJson(object obj)
+ {
+ JavaScriptSerializer jss = new JavaScriptSerializer();
+ return jss.Serialize(obj);
+ }
+ }
+}
+
diff --git a/test/integration/targets/win_module_utils/tasks/main.yml b/test/integration/targets/win_module_utils/tasks/main.yml
new file mode 100644
index 00000000..87f2592c
--- /dev/null
+++ b/test/integration/targets/win_module_utils/tasks/main.yml
@@ -0,0 +1,71 @@
+- name: call old WANTS_JSON module
+ legacy_only_old_way:
+ register: old_way
+
+- assert:
+ that:
+ - old_way.data == 'success'
+
+- name: call module with only legacy requires
+ legacy_only_new_way:
+ register: new_way
+
+- assert:
+ that:
+ - new_way.data == 'success'
+
+- name: call old WANTS_JSON module with windows line endings
+ legacy_only_old_way_win_line_ending:
+ register: old_way_win
+
+- assert:
+ that:
+ - old_way_win.data == 'success'
+
+- name: call module with only legacy requires and windows line endings
+ legacy_only_new_way_win_line_ending:
+ register: new_way_win
+
+- assert:
+ that:
+ - new_way_win.data == 'success'
+
+- name: call module with local module_utils
+ uses_local_utils:
+ register: local_utils
+
+- assert:
+ that:
+ - local_utils.data == "ValueFromCustomFunction"
+
+- name: call module that imports bogus Ansible-named module_utils
+ uses_bogus_utils:
+ ignore_errors: true
+ register: bogus_utils
+
+- assert:
+ that:
+ - bogus_utils is failed
+ - bogus_utils.msg is search("Could not find")
+
+- name: call module that imports module_utils with further imports
+ recursive_requires:
+ register: recursive_requires
+ vars:
+ # Our coverage runner does not work with recursive required. This is a limitation on PowerShell so we need to
+ # disable coverage for this task
+ _ansible_coverage_remote_output: ''
+
+- assert:
+ that:
+ - 'recursive_requires.value == "Get-Test3: 2: Get-Test2, 1: Get-Test1, 3: Get-NewTest3"'
+
+- name: call module with C# reference
+ csharp_util:
+ register: csharp_res
+
+- name: assert call module with C# reference
+ assert:
+ that:
+ - not csharp_res is failed
+ - csharp_res.res == '{"a":"a","b":1}'
diff --git a/test/integration/targets/win_raw/aliases b/test/integration/targets/win_raw/aliases
new file mode 100644
index 00000000..1eed2ecf
--- /dev/null
+++ b/test/integration/targets/win_raw/aliases
@@ -0,0 +1,2 @@
+shippable/windows/group1
+shippable/windows/smoketest
diff --git a/test/integration/targets/win_raw/tasks/main.yml b/test/integration/targets/win_raw/tasks/main.yml
new file mode 100644
index 00000000..31f90b85
--- /dev/null
+++ b/test/integration/targets/win_raw/tasks/main.yml
@@ -0,0 +1,143 @@
+# test code for the raw module when using winrm connection
+# (c) 2014, Chris Church <chris@ninemoreminutes.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: run getmac
+ raw: getmac
+ register: getmac_result
+
+- name: assert that getmac ran
+ assert:
+ that:
+ - "getmac_result.rc == 0"
+ - "getmac_result.stdout"
+ - "not getmac_result.stderr"
+ - "getmac_result is not failed"
+ - "getmac_result is changed"
+
+- name: run ipconfig with /all argument
+ raw: ipconfig /all
+ register: ipconfig_result
+
+- name: assert that ipconfig ran with /all argument
+ assert:
+ that:
+ - "ipconfig_result.rc == 0"
+ - "ipconfig_result.stdout"
+ - "'Physical Address' in ipconfig_result.stdout"
+ - "not ipconfig_result.stderr"
+ - "ipconfig_result is not failed"
+ - "ipconfig_result is changed"
+
+- name: run ipconfig with invalid argument
+ raw: ipconfig /badswitch
+ register: ipconfig_invalid_result
+ ignore_errors: true
+
+- name: assert that ipconfig with invalid argument failed
+ assert:
+ that:
+ - "ipconfig_invalid_result.rc != 0"
+ - "ipconfig_invalid_result.stdout" # ipconfig displays errors on stdout.
+# - "not ipconfig_invalid_result.stderr"
+ - "ipconfig_invalid_result is failed"
+ - "ipconfig_invalid_result is changed"
+
+- name: run an unknown command
+ raw: uname -a
+ register: unknown_result
+ ignore_errors: true
+
+- name: assert that an unknown command failed
+ assert:
+ that:
+ - "unknown_result.rc != 0"
+ - "not unknown_result.stdout"
+ - "unknown_result.stderr" # An unknown command displays error on stderr.
+ - "unknown_result is failed"
+ - "unknown_result is changed"
+
+- name: run a command that takes longer than 60 seconds
+ raw: Start-Sleep -s 75
+ register: sleep_command
+
+- name: assert that the sleep command ran
+ assert:
+ that:
+ - "sleep_command.rc == 0"
+ - "not sleep_command.stdout"
+ - "not sleep_command.stderr"
+ - "sleep_command is not failed"
+ - "sleep_command is changed"
+
+- name: run a raw command with key=value arguments
+ raw: echo wwe=raw
+ register: raw_result
+
+- name: make sure raw is really raw and not removing key=value arguments
+ assert:
+ that:
+ - "raw_result.stdout_lines[0] == 'wwe=raw'"
+
+- name: unicode tests for winrm
+ when: ansible_connection != 'psrp' # Write-Host does not work over PSRP
+ block:
+ - name: run a raw command with unicode chars and quoted args (from https://github.com/ansible/ansible-modules-core/issues/1929)
+ raw: Write-Host --% icacls D:\somedir\ /grant "! ЗÐО. РуководÑтво":F
+ register: raw_result2
+
+ - name: make sure raw passes command as-is and doesn't split/rejoin args
+ assert:
+ that:
+ - "raw_result2.stdout_lines[0] == '--% icacls D:\\\\somedir\\\\ /grant \"! ЗÐО. РуководÑтво\":F'"
+
+- name: unicode tests for psrp
+ when: ansible_connection == 'psrp'
+ block:
+ # Cannot test unicode passed into separate exec as PSRP doesn't run with a preset CP of 65001 which reuslts in ? for unicode chars
+ - name: run a raw command with unicode chars
+ raw: Write-Output "! ЗÐО. РуководÑтво"
+ register: raw_result2
+
+ - name: make sure raw passes command as-is and doesn't split/rejoin args
+ assert:
+ that:
+ - "raw_result2.stdout_lines[0] == '! ЗÐО. РуководÑтво'"
+
+# Assumes MaxShellsPerUser == 30 (the default)
+
+- name: test raw + with_items to verify that winrm connection is reused for each item
+ raw: echo "{{item}}"
+ with_items: "{{range(32)|list}}"
+ register: raw_with_items_result
+
+- name: check raw + with_items result
+ assert:
+ that:
+ - "raw_with_items_result is not failed"
+ - "raw_with_items_result.results|length == 32"
+
+# TODO: this test fails, since we're back to passing raw commands without modification
+#- name: test raw with job to ensure that preamble-free InputEncoding is working
+# raw: Start-Job { echo yo } | Receive-Job -Wait
+# register: raw_job_result
+#
+#- name: check raw with job result
+# assert:
+# that:
+# - raw_job_result is successful
+# - raw_job_result.stdout_lines[0] == 'yo'
diff --git a/test/integration/targets/win_script/aliases b/test/integration/targets/win_script/aliases
new file mode 100644
index 00000000..1eed2ecf
--- /dev/null
+++ b/test/integration/targets/win_script/aliases
@@ -0,0 +1,2 @@
+shippable/windows/group1
+shippable/windows/smoketest
diff --git a/test/integration/targets/win_script/defaults/main.yml b/test/integration/targets/win_script/defaults/main.yml
new file mode 100644
index 00000000..a2c6475e
--- /dev/null
+++ b/test/integration/targets/win_script/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+
+# Parameters to pass to test scripts.
+test_win_script_value: VaLuE
+test_win_script_splat: "@{This='THIS'; That='THAT'; Other='OTHER'}"
diff --git a/test/integration/targets/win_script/files/fail.bat b/test/integration/targets/win_script/files/fail.bat
new file mode 100644
index 00000000..02562a8a
--- /dev/null
+++ b/test/integration/targets/win_script/files/fail.bat
@@ -0,0 +1 @@
+bang-run-a-thing-that-doesnt-exist
diff --git a/test/integration/targets/win_script/files/space path/test_script.ps1 b/test/integration/targets/win_script/files/space path/test_script.ps1
new file mode 100644
index 00000000..10dd9c8c
--- /dev/null
+++ b/test/integration/targets/win_script/files/space path/test_script.ps1
@@ -0,0 +1 @@
+Write-Output "Ansible supports spaces in the path to the script."
diff --git a/test/integration/targets/win_script/files/test_script.bat b/test/integration/targets/win_script/files/test_script.bat
new file mode 100644
index 00000000..05cc2d19
--- /dev/null
+++ b/test/integration/targets/win_script/files/test_script.bat
@@ -0,0 +1,2 @@
+@ECHO OFF
+ECHO We can even run a batch file!
diff --git a/test/integration/targets/win_script/files/test_script.cmd b/test/integration/targets/win_script/files/test_script.cmd
new file mode 100644
index 00000000..0e36312d
--- /dev/null
+++ b/test/integration/targets/win_script/files/test_script.cmd
@@ -0,0 +1,2 @@
+@ECHO OFF
+ECHO We can even run a batch file with cmd extension!
diff --git a/test/integration/targets/win_script/files/test_script.ps1 b/test/integration/targets/win_script/files/test_script.ps1
new file mode 100644
index 00000000..9978f363
--- /dev/null
+++ b/test/integration/targets/win_script/files/test_script.ps1
@@ -0,0 +1,2 @@
+# Test script to make sure the Ansible script module works.
+Write-Host "Woohoo! We can run a PowerShell script via Ansible!"
diff --git a/test/integration/targets/win_script/files/test_script_bool.ps1 b/test/integration/targets/win_script/files/test_script_bool.ps1
new file mode 100644
index 00000000..970dedce
--- /dev/null
+++ b/test/integration/targets/win_script/files/test_script_bool.ps1
@@ -0,0 +1,6 @@
+Param(
+[bool]$boolvariable
+)
+
+Write-Output $boolvariable.GetType().FullName
+Write-Output $boolvariable
diff --git a/test/integration/targets/win_script/files/test_script_creates_file.ps1 b/test/integration/targets/win_script/files/test_script_creates_file.ps1
new file mode 100644
index 00000000..47f85a2d
--- /dev/null
+++ b/test/integration/targets/win_script/files/test_script_creates_file.ps1
@@ -0,0 +1,3 @@
+# Test script to create a file.
+
+echo $null > $args[0]
diff --git a/test/integration/targets/win_script/files/test_script_removes_file.ps1 b/test/integration/targets/win_script/files/test_script_removes_file.ps1
new file mode 100644
index 00000000..f0549a5b
--- /dev/null
+++ b/test/integration/targets/win_script/files/test_script_removes_file.ps1
@@ -0,0 +1,3 @@
+# Test script to remove a file.
+
+Remove-Item $args[0] -Force
diff --git a/test/integration/targets/win_script/files/test_script_whoami.ps1 b/test/integration/targets/win_script/files/test_script_whoami.ps1
new file mode 100644
index 00000000..79a1c475
--- /dev/null
+++ b/test/integration/targets/win_script/files/test_script_whoami.ps1
@@ -0,0 +1,2 @@
+whoami.exe
+Write-Output "finished"
diff --git a/test/integration/targets/win_script/files/test_script_with_args.ps1 b/test/integration/targets/win_script/files/test_script_with_args.ps1
new file mode 100644
index 00000000..520aafa3
--- /dev/null
+++ b/test/integration/targets/win_script/files/test_script_with_args.ps1
@@ -0,0 +1,7 @@
+# Test script to make sure the Ansible script module works when arguments are
+# passed to the script.
+
+foreach ($i in $args)
+{
+ Write-Host $i;
+}
diff --git a/test/integration/targets/win_script/files/test_script_with_env.ps1 b/test/integration/targets/win_script/files/test_script_with_env.ps1
new file mode 100644
index 00000000..b54fd928
--- /dev/null
+++ b/test/integration/targets/win_script/files/test_script_with_env.ps1
@@ -0,0 +1 @@
+$env:taskenv \ No newline at end of file
diff --git a/test/integration/targets/win_script/files/test_script_with_errors.ps1 b/test/integration/targets/win_script/files/test_script_with_errors.ps1
new file mode 100644
index 00000000..2d60dc1f
--- /dev/null
+++ b/test/integration/targets/win_script/files/test_script_with_errors.ps1
@@ -0,0 +1,9 @@
+# Test script to make sure we handle non-zero exit codes.
+
+trap
+{
+ Write-Error -ErrorRecord $_
+ exit 1;
+}
+
+throw "Oh noes I has an error"
diff --git a/test/integration/targets/win_script/files/test_script_with_splatting.ps1 b/test/integration/targets/win_script/files/test_script_with_splatting.ps1
new file mode 100644
index 00000000..429a9a3b
--- /dev/null
+++ b/test/integration/targets/win_script/files/test_script_with_splatting.ps1
@@ -0,0 +1,6 @@
+# Test script to make sure the Ansible script module works when arguments are
+# passed via splatting (http://technet.microsoft.com/en-us/magazine/gg675931.aspx)
+
+Write-Host $args.This
+Write-Host $args.That
+Write-Host $args.Other
diff --git a/test/integration/targets/win_script/tasks/main.yml b/test/integration/targets/win_script/tasks/main.yml
new file mode 100644
index 00000000..4d57eda2
--- /dev/null
+++ b/test/integration/targets/win_script/tasks/main.yml
@@ -0,0 +1,316 @@
+# test code for the script module when using winrm connection
+# (c) 2014, Chris Church <chris@ninemoreminutes.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: run setup to allow skipping OS-specific tests
+ setup:
+ gather_subset: min
+
+- name: get tempdir path
+ raw: $env:TEMP
+ register: tempdir
+
+- name: set script path dynamically
+ set_fact:
+ test_win_script_filename: "{{ tempdir.stdout_lines[0] }}/testing_win_script.txt"
+
+- name: run simple test script
+ script: test_script.ps1
+ register: test_script_result
+
+- name: check that script ran
+ assert:
+ that:
+ - "test_script_result.rc == 0"
+ - "test_script_result.stdout"
+ - "'Woohoo' in test_script_result.stdout"
+ - "not test_script_result.stderr"
+ - "test_script_result is not failed"
+ - "test_script_result is changed"
+
+- name: run test script that takes arguments including a unicode char
+ script: test_script_with_args.ps1 /this /that /Ó¦ther
+ register: test_script_with_args_result
+
+- name: check that script ran and received arguments and returned unicode
+ assert:
+ that:
+ - "test_script_with_args_result.rc == 0"
+ - "test_script_with_args_result.stdout"
+ - "test_script_with_args_result.stdout_lines[0] == '/this'"
+ - "test_script_with_args_result.stdout_lines[1] == '/that'"
+ - "test_script_with_args_result.stdout_lines[2] == '/Ó¦ther'"
+ - "not test_script_with_args_result.stderr"
+ - "test_script_with_args_result is not failed"
+ - "test_script_with_args_result is changed"
+
+# Bug: https://github.com/ansible/ansible/issues/32850
+- name: set fact of long string
+ set_fact:
+ long_string: "{{ lookup('pipe', 'printf \"a%.0s\" {1..1000}') }}"
+
+- name: run test script with args that exceed the stdin buffer
+ script: test_script_with_args.ps1 {{ long_string }}
+ register: test_script_with_large_args_result
+
+- name: check that script ran and received arguments correctly
+ assert:
+ that:
+ - test_script_with_large_args_result.rc == 0
+ - not test_script_with_large_args_result.stderr
+ - test_script_with_large_args_result is not failed
+ - test_script_with_large_args_result is changed
+
+- name: check that script ran and received arguments correctly with winrm output
+ assert:
+ that:
+ - test_script_with_large_args_result.stdout == long_string + "\r\n"
+ when: ansible_connection != 'psrp'
+
+- name: check that script ran and received arguments correctly with psrp output
+ assert:
+ that:
+ - test_script_with_large_args_result.stdout == long_string
+ when: ansible_connection == 'psrp'
+
+- name: run test script that takes parameters passed via splatting
+ script: test_script_with_splatting.ps1 @{ This = 'this'; That = '{{ test_win_script_value }}'; Other = 'other'}
+ register: test_script_with_splatting_result
+
+- name: check that script ran and received parameters via splatting
+ assert:
+ that:
+ - "test_script_with_splatting_result.rc == 0"
+ - "test_script_with_splatting_result.stdout"
+ - "test_script_with_splatting_result.stdout_lines[0] == 'this'"
+ - "test_script_with_splatting_result.stdout_lines[1] == test_win_script_value"
+ - "test_script_with_splatting_result.stdout_lines[2] == 'other'"
+ - "not test_script_with_splatting_result.stderr"
+ - "test_script_with_splatting_result is not failed"
+ - "test_script_with_splatting_result is changed"
+
+- name: run test script that takes splatted parameters from a variable
+ script: test_script_with_splatting.ps1 {{ test_win_script_splat }}
+ register: test_script_with_splatting2_result
+
+- name: check that script ran and received parameters via splatting from a variable
+ assert:
+ that:
+ - "test_script_with_splatting2_result.rc == 0"
+ - "test_script_with_splatting2_result.stdout"
+ - "test_script_with_splatting2_result.stdout_lines[0] == 'THIS'"
+ - "test_script_with_splatting2_result.stdout_lines[1] == 'THAT'"
+ - "test_script_with_splatting2_result.stdout_lines[2] == 'OTHER'"
+ - "not test_script_with_splatting2_result.stderr"
+ - "test_script_with_splatting2_result is not failed"
+ - "test_script_with_splatting2_result is changed"
+
+- name: run test script that has errors
+ script: test_script_with_errors.ps1
+ register: test_script_with_errors_result
+ ignore_errors: true
+
+- name: check that script ran but failed with errors
+ assert:
+ that:
+ - "test_script_with_errors_result.rc != 0"
+ - "not test_script_with_errors_result.stdout"
+ - "test_script_with_errors_result.stderr"
+ - "test_script_with_errors_result is failed"
+ - "test_script_with_errors_result is changed"
+
+- name: cleanup test file if it exists
+ raw: Remove-Item "{{ test_win_script_filename }}" -Force
+ ignore_errors: true
+
+- name: run test script that creates a file
+ script: test_script_creates_file.ps1 {{ test_win_script_filename }}
+ args:
+ creates: "{{ test_win_script_filename }}"
+ register: test_script_creates_file_result
+
+- name: check that script ran and indicated a change
+ assert:
+ that:
+ - "test_script_creates_file_result.rc == 0"
+ - "not test_script_creates_file_result.stdout"
+ - "not test_script_creates_file_result.stderr"
+ - "test_script_creates_file_result is not failed"
+ - "test_script_creates_file_result is changed"
+
+- name: run test script that creates a file again
+ script: test_script_creates_file.ps1 {{ test_win_script_filename }}
+ args:
+ creates: "{{ test_win_script_filename }}"
+ register: test_script_creates_file_again_result
+
+- name: check that the script did not run since the remote file exists
+ assert:
+ that:
+ - "test_script_creates_file_again_result is not failed"
+ - "test_script_creates_file_again_result is not changed"
+ - "test_script_creates_file_again_result is skipped"
+
+- name: run test script that removes a file
+ script: test_script_removes_file.ps1 {{ test_win_script_filename }}
+ args:
+ removes: "{{ test_win_script_filename }}"
+ register: test_script_removes_file_result
+
+- name: check that the script ran since the remote file exists
+ assert:
+ that:
+ - "test_script_removes_file_result.rc == 0"
+ - "not test_script_removes_file_result.stdout"
+ - "not test_script_removes_file_result.stderr"
+ - "test_script_removes_file_result is not failed"
+ - "test_script_removes_file_result is changed"
+
+- name: run test script that removes a file again
+ script: test_script_removes_file.ps1 {{ test_win_script_filename }}
+ args:
+ removes: "{{ test_win_script_filename }}"
+ register: test_script_removes_file_again_result
+
+- name: check that the script did not run since the remote file does not exist
+ assert:
+ that:
+ - "test_script_removes_file_again_result is not failed"
+ - "test_script_removes_file_again_result is not changed"
+ - "test_script_removes_file_again_result is skipped"
+
+- name: skip batch tests on 6.0 (UTF8 codepage prevents it from working, see https://github.com/ansible/ansible/issues/21915)
+ block:
+ - name: run simple batch file
+ script: test_script.bat
+ register: test_batch_result
+
+ - name: check that batch file ran
+ assert:
+ that:
+ - "test_batch_result.rc == 0"
+ - "test_batch_result.stdout"
+ - "'batch' in test_batch_result.stdout"
+ - "not test_batch_result.stderr"
+ - "test_batch_result is not failed"
+ - "test_batch_result is changed"
+
+ - name: run simple batch file with .cmd extension
+ script: test_script.cmd
+ register: test_cmd_result
+
+ - name: check that batch file with .cmd extension ran
+ assert:
+ that:
+ - "test_cmd_result.rc == 0"
+ - "test_cmd_result.stdout"
+ - "'cmd extension' in test_cmd_result.stdout"
+ - "not test_cmd_result.stderr"
+ - "test_cmd_result is not failed"
+ - "test_cmd_result is changed"
+
+ - name: run simple batch file with .bat extension that fails
+ script: fail.bat
+ ignore_errors: true
+ register: test_batch_result
+
+ - name: check that batch file with .bat extension reported failure
+ assert:
+ that:
+ - test_batch_result.rc == 1
+ - test_batch_result.stdout
+ - test_batch_result.stderr
+ - test_batch_result is failed
+ - test_batch_result is changed
+ when: not ansible_distribution_version.startswith('6.0')
+
+- name: run test script that takes a boolean parameter
+ script: test_script_bool.ps1 $false # use false as that can pick up more errors
+ register: test_script_bool_result
+
+- name: check that the script ran and the parameter was treated as a boolean
+ assert:
+ that:
+ - test_script_bool_result.stdout_lines[0] == 'System.Boolean'
+ - test_script_bool_result.stdout_lines[1] == 'False'
+
+- name: run test script that uses envvars
+ script: test_script_with_env.ps1
+ environment:
+ taskenv: task
+ register: test_script_env_result
+
+- name: ensure that script ran and that environment var was passed
+ assert:
+ that:
+ - test_script_env_result is successful
+ - test_script_env_result.stdout_lines[0] == 'task'
+
+# check mode
+- name: Run test script that creates a file in check mode
+ script: test_script_creates_file.ps1 {{ test_win_script_filename }}
+ args:
+ creates: "{{ test_win_script_filename }}"
+ check_mode: yes
+ register: test_script_creates_file_check_mode
+
+- name: Get state of file created by script
+ win_stat:
+ path: "{{ test_win_script_filename }}"
+ register: create_file_stat
+
+- name: Assert that a change was reported but the script did not make changes
+ assert:
+ that:
+ - test_script_creates_file_check_mode is changed
+ - not create_file_stat.stat.exists
+
+- name: Run test script that creates a file
+ script: test_script_creates_file.ps1 {{ test_win_script_filename }}
+ args:
+ creates: "{{ test_win_script_filename }}"
+
+- name: Run test script that removes a file in check mode
+ script: test_script_removes_file.ps1 {{ test_win_script_filename }}
+ args:
+ removes: "{{ test_win_script_filename }}"
+ check_mode: yes
+ register: test_script_removes_file_check_mode
+
+- name: Get state of file removed by script
+ win_stat:
+ path: "{{ test_win_script_filename }}"
+ register: remove_file_stat
+
+- name: Assert that a change was reported but the script did not make changes
+ assert:
+ that:
+ - test_script_removes_file_check_mode is changed
+ - remove_file_stat.stat.exists
+
+- name: run test script with become that outputs 2 lines
+ script: test_script_whoami.ps1
+ register: test_script_result_become
+ become: yes
+ become_user: SYSTEM
+ become_method: runas
+
+- name: check that the script ran and we get both outputs on new lines
+ assert:
+ that:
+ - test_script_result_become.stdout_lines[0]|lower == 'nt authority\\system'
+ - test_script_result_become.stdout_lines[1] == 'finished'
diff --git a/test/integration/targets/windows-minimal/aliases b/test/integration/targets/windows-minimal/aliases
new file mode 100644
index 00000000..479948a1
--- /dev/null
+++ b/test/integration/targets/windows-minimal/aliases
@@ -0,0 +1,4 @@
+shippable/windows/group1
+shippable/windows/minimal
+shippable/windows/smoketest
+windows
diff --git a/test/integration/targets/windows-minimal/library/win_ping.ps1 b/test/integration/targets/windows-minimal/library/win_ping.ps1
new file mode 100644
index 00000000..c848b912
--- /dev/null
+++ b/test/integration/targets/windows-minimal/library/win_ping.ps1
@@ -0,0 +1,21 @@
+#!powershell
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#AnsibleRequires -CSharpUtil Ansible.Basic
+
+$spec = @{
+ options = @{
+ data = @{ type = "str"; default = "pong" }
+ }
+ supports_check_mode = $true
+}
+$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec)
+$data = $module.Params.data
+
+if ($data -eq "crash") {
+ throw "boom"
+}
+
+$module.Result.ping = $data
+$module.ExitJson()
diff --git a/test/integration/targets/windows-minimal/library/win_ping.py b/test/integration/targets/windows-minimal/library/win_ping.py
new file mode 100644
index 00000000..6d35f379
--- /dev/null
+++ b/test/integration/targets/windows-minimal/library/win_ping.py
@@ -0,0 +1,55 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = r'''
+---
+module: win_ping
+version_added: "1.7"
+short_description: A windows version of the classic ping module
+description:
+ - Checks management connectivity of a windows host.
+ - This is NOT ICMP ping, this is just a trivial test module.
+ - For non-Windows targets, use the M(ping) module instead.
+ - For Network targets, use the M(net_ping) module instead.
+options:
+ data:
+ description:
+ - Alternate data to return instead of 'pong'.
+ - If this parameter is set to C(crash), the module will cause an exception.
+ type: str
+ default: pong
+seealso:
+- module: ping
+author:
+- Chris Church (@cchurch)
+'''
+
+EXAMPLES = r'''
+# Test connectivity to a windows host
+# ansible winserver -m win_ping
+
+- name: Example from an Ansible Playbook
+ win_ping:
+
+- name: Induce an exception to see what happens
+ win_ping:
+ data: crash
+'''
+
+RETURN = r'''
+ping:
+ description: Value provided with the data parameter.
+ returned: success
+ type: str
+ sample: pong
+'''
diff --git a/test/integration/targets/windows-minimal/library/win_ping_set_attr.ps1 b/test/integration/targets/windows-minimal/library/win_ping_set_attr.ps1
new file mode 100644
index 00000000..f1704964
--- /dev/null
+++ b/test/integration/targets/windows-minimal/library/win_ping_set_attr.ps1
@@ -0,0 +1,31 @@
+#!powershell
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args $true;
+
+$data = Get-Attr $params "data" "pong";
+
+$result = @{
+ changed = $false
+ ping = "pong"
+};
+
+# Test that Set-Attr will replace an existing attribute.
+Set-Attr $result "ping" $data
+
+Exit-Json $result;
diff --git a/test/integration/targets/windows-minimal/library/win_ping_strict_mode_error.ps1 b/test/integration/targets/windows-minimal/library/win_ping_strict_mode_error.ps1
new file mode 100644
index 00000000..508174af
--- /dev/null
+++ b/test/integration/targets/windows-minimal/library/win_ping_strict_mode_error.ps1
@@ -0,0 +1,30 @@
+#!powershell
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# POWERSHELL_COMMON
+
+$params = Parse-Args $args $true;
+
+$params.thisPropertyDoesNotExist
+
+$data = Get-Attr $params "data" "pong";
+
+$result = @{
+ changed = $false
+ ping = $data
+};
+
+Exit-Json $result;
diff --git a/test/integration/targets/windows-minimal/library/win_ping_syntax_error.ps1 b/test/integration/targets/windows-minimal/library/win_ping_syntax_error.ps1
new file mode 100644
index 00000000..d4c9f07a
--- /dev/null
+++ b/test/integration/targets/windows-minimal/library/win_ping_syntax_error.ps1
@@ -0,0 +1,30 @@
+#!powershell
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# POWERSHELL_COMMON
+
+$blah = 'I can't quote my strings correctly.'
+
+$params = Parse-Args $args $true;
+
+$data = Get-Attr $params "data" "pong";
+
+$result = @{
+ changed = $false
+ ping = $data
+};
+
+Exit-Json $result;
diff --git a/test/integration/targets/windows-minimal/library/win_ping_throw.ps1 b/test/integration/targets/windows-minimal/library/win_ping_throw.ps1
new file mode 100644
index 00000000..7306f4d2
--- /dev/null
+++ b/test/integration/targets/windows-minimal/library/win_ping_throw.ps1
@@ -0,0 +1,30 @@
+#!powershell
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# POWERSHELL_COMMON
+
+throw
+
+$params = Parse-Args $args $true;
+
+$data = Get-Attr $params "data" "pong";
+
+$result = @{
+ changed = $false
+ ping = $data
+};
+
+Exit-Json $result;
diff --git a/test/integration/targets/windows-minimal/library/win_ping_throw_string.ps1 b/test/integration/targets/windows-minimal/library/win_ping_throw_string.ps1
new file mode 100644
index 00000000..09e3b7cb
--- /dev/null
+++ b/test/integration/targets/windows-minimal/library/win_ping_throw_string.ps1
@@ -0,0 +1,30 @@
+#!powershell
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# POWERSHELL_COMMON
+
+throw "no ping for you"
+
+$params = Parse-Args $args $true;
+
+$data = Get-Attr $params "data" "pong";
+
+$result = @{
+ changed = $false
+ ping = $data
+};
+
+Exit-Json $result;
diff --git a/test/integration/targets/windows-minimal/tasks/main.yml b/test/integration/targets/windows-minimal/tasks/main.yml
new file mode 100644
index 00000000..a7e6ba7f
--- /dev/null
+++ b/test/integration/targets/windows-minimal/tasks/main.yml
@@ -0,0 +1,67 @@
+# test code for the win_ping module
+# (c) 2014, Chris Church <chris@ninemoreminutes.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: test win_ping
+ action: win_ping
+ register: win_ping_result
+
+- name: check win_ping result
+ assert:
+ that:
+ - win_ping_result is not failed
+ - win_ping_result is not changed
+ - win_ping_result.ping == 'pong'
+
+- name: test win_ping with data
+ win_ping:
+ data: ☠
+ register: win_ping_with_data_result
+
+- name: check win_ping result with data
+ assert:
+ that:
+ - win_ping_with_data_result is not failed
+ - win_ping_with_data_result is not changed
+ - win_ping_with_data_result.ping == '☠'
+
+- name: test win_ping.ps1 with data as complex args
+ # win_ping.ps1: # TODO: do we want to actually support this? no other tests that I can see...
+ win_ping:
+ data: bleep
+ register: win_ping_ps1_result
+
+- name: check win_ping.ps1 result with data
+ assert:
+ that:
+ - win_ping_ps1_result is not failed
+ - win_ping_ps1_result is not changed
+ - win_ping_ps1_result.ping == 'bleep'
+
+- name: test win_ping using data=crash so that it throws an exception
+ win_ping:
+ data: crash
+ register: win_ping_crash_result
+ ignore_errors: yes
+
+- name: check win_ping_crash result
+ assert:
+ that:
+ - win_ping_crash_result is failed
+ - win_ping_crash_result is not changed
+ - 'win_ping_crash_result.msg == "Unhandled exception while executing module: boom"'
+ - '"throw \"boom\"" in win_ping_crash_result.exception'
diff --git a/test/integration/targets/windows-paths/aliases b/test/integration/targets/windows-paths/aliases
new file mode 100644
index 00000000..cf714783
--- /dev/null
+++ b/test/integration/targets/windows-paths/aliases
@@ -0,0 +1,3 @@
+windows
+shippable/windows/group1
+shippable/windows/smoketest
diff --git a/test/integration/targets/windows-paths/tasks/main.yml b/test/integration/targets/windows-paths/tasks/main.yml
new file mode 100644
index 00000000..4d222659
--- /dev/null
+++ b/test/integration/targets/windows-paths/tasks/main.yml
@@ -0,0 +1,191 @@
+- name: Set variables in YAML syntax
+ set_fact:
+ no_quotes_single: C:\Windows\Temp
+ single_quotes_single: 'C:\Windows\Temp'
+# double_quotes_single: "C:\Windows\Temp"
+ no_quotes_double: C:\\Windows\\Temp
+ single_quotes_double: 'C:\\Windows\\Temp'
+ double_quotes_double: "C:\\Windows\\Temp"
+ no_quotes_slash: C:/Windows/Temp
+ no_quotes_trailing: C:\Windows\Temp\
+ single_quotes_trailing: 'C:\Windows\Temp\'
+# double_quotes_trailing: "C:\Windows\Temp\"
+ good: C:\Windows\Temp
+ works1: C:\\Windows\\Temp
+ works2: C:/Windows/Temp
+# fail: "C:\Windows\Temp"
+ trailing: C:\Windows\Temp\
+ register: yaml_syntax
+
+- assert:
+ that:
+ - no_quotes_single == good
+ - single_quotes_single == good
+# - double_quotes_single == fail
+ - no_quotes_double == works1
+ - single_quotes_double == works1
+ - double_quotes_double == good
+ - no_quotes_slash == works2
+ - no_quotes_trailing == trailing
+ - single_quotes_trailing == trailing
+# - double_quotes_trailing == fail
+ - good != works1
+ - good != works2
+ - good != trailing
+ - works1 != works2
+ - works1 != trailing
+ - works2 != trailing
+
+- name: Test good path {{ good }}
+ win_stat:
+ path: '{{ good }}'
+ register: good_result
+
+- assert:
+ that:
+ - good_result is successful
+ - good_result.stat.attributes == 'Directory'
+ - good_result.stat.exists == true
+ - good_result.stat.path == good
+
+- name: Test works1 path {{ works1 }}
+ win_stat:
+ path: '{{ works1 }}'
+ register: works1_result
+
+- assert:
+ that:
+ - works1_result is successful
+ - works1_result.stat.attributes == 'Directory'
+ - works1_result.stat.exists == true
+ - works1_result.stat.path == good
+
+- name: Test works2 path {{ works2 }}
+ win_stat:
+ path: '{{ works2 }}'
+ register: works2_result
+
+- assert:
+ that:
+ - works2_result is successful
+ - works2_result.stat.attributes == 'Directory'
+ - works2_result.stat.exists == true
+ - works2_result.stat.path == good
+
+- name: Test trailing path {{ trailing }}
+ win_stat:
+ path: '{{ trailing }}'
+ register: trailing_result
+
+- assert:
+ that:
+ - trailing_result is successful
+ - trailing_result.stat.attributes == 'Directory'
+ - trailing_result.stat.exists == true
+ - trailing_result.stat.path == trailing
+
+- name: Set variables in key=value syntax
+ set_fact:
+ no_quotes_single=C:\Windows\Temp
+ single_quotes_single='C:\Windows\Temp'
+ double_quotes_single="C:\Windows\Temp"
+ no_quotes_single_tab=C:\Windows\temp
+ single_quotes_single_tab='C:\Windows\temp'
+ double_quotes_single_tab="C:\Windows\temp"
+ no_quotes_double=C:\\Windows\\Temp
+ single_quotes_double='C:\\Windows\\Temp'
+ double_quotes_double="C:\\Windows\\Temp"
+ no_quotes_slash=C:/Windows/Temp
+ no_quotes_trailing=C:\Windows\Temp\
+ good=C:\Windows\Temp
+ works1=C:\\Windows\\Temp
+ works2=C:/Windows/Temp
+ fail="C:\Windows\Temp"
+ trailing=C:\Windows\Temp\
+ tab=C:\Windows\x09emp
+ eof=foobar
+# single_quotes_trailing='C:\Windows\Temp\'
+# double_quotes_trailing="C:\Windows\Temp\"
+ register: legacy_syntax
+
+- assert:
+ that:
+ - no_quotes_single == good
+ - single_quotes_single == good
+ - double_quotes_single == good
+ - no_quotes_double == works1
+ - single_quotes_double == works1
+ - double_quotes_double == works1
+ - no_quotes_slash == works2
+ - no_quotes_single_tab == tab
+ - single_quotes_single_tab == tab
+ - double_quotes_single_tab == tab
+ - no_quotes_trailing == trailing
+ - good == works1
+ - good != works2
+ - good != tab
+ - good != trailing
+ - works1 != works2
+ - works1 != tab
+ - works1 != trailing
+ - works2 != tab
+ - works2 != trailing
+ - tab != trailing
+
+- name: Test good path {{ good }}
+ win_stat:
+ path: '{{ good }}'
+ register: good_result
+
+- assert:
+ that:
+ - good_result is successful
+ - good_result.stat.attributes == 'Directory'
+ - good_result.stat.exists == true
+ - good_result.stat.path == good
+
+- name: Test works1 path {{ works1 }}
+ win_stat:
+ path: '{{ works1 }}'
+ register: works1_result
+
+- assert:
+ that:
+ - works1_result is successful
+ - works1_result.stat.attributes == 'Directory'
+ - works1_result.stat.exists == true
+ - works1_result.stat.path == good
+
+- name: Test works2 path {{ works2 }}
+ win_stat:
+ path: '{{ works2 }}'
+ register: works2_result
+
+- assert:
+ that:
+ - works2_result is successful
+ - works2_result.stat.attributes == 'Directory'
+ - works2_result.stat.exists == true
+ - works2_result.stat.path == good
+
+- name: Test trailing path {{ trailing }}
+ win_stat:
+ path: '{{ trailing }}'
+ register: trailing_result
+
+- assert:
+ that:
+ - trailing_result is successful
+ - trailing_result.stat.attributes == 'Directory'
+ - trailing_result.stat.exists == true
+ - trailing_result.stat.path == trailing
+
+- name: Test tab path {{ tab }}
+ win_stat:
+ path: '{{ tab }}'
+ register: tab_result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - tab_result is failed
diff --git a/test/integration/targets/yum/aliases b/test/integration/targets/yum/aliases
new file mode 100644
index 00000000..5aba303d
--- /dev/null
+++ b/test/integration/targets/yum/aliases
@@ -0,0 +1,6 @@
+destructive
+shippable/posix/group4
+skip/aix
+skip/freebsd
+skip/osx
+skip/macos
diff --git a/test/integration/targets/yum/files/yum.conf b/test/integration/targets/yum/files/yum.conf
new file mode 100644
index 00000000..5a5fca60
--- /dev/null
+++ b/test/integration/targets/yum/files/yum.conf
@@ -0,0 +1,5 @@
+[main]
+gpgcheck=1
+installonly_limit=3
+clean_requirements_on_remove=True
+tsflags=nodocs
diff --git a/test/integration/targets/yum/meta/main.yml b/test/integration/targets/yum/meta/main.yml
new file mode 100644
index 00000000..34d81261
--- /dev/null
+++ b/test/integration/targets/yum/meta/main.yml
@@ -0,0 +1,4 @@
+dependencies:
+ - prepare_tests
+ - setup_rpm_repo
+ - setup_remote_tmp_dir
diff --git a/test/integration/targets/yum/tasks/check_mode_consistency.yml b/test/integration/targets/yum/tasks/check_mode_consistency.yml
new file mode 100644
index 00000000..e2a99d95
--- /dev/null
+++ b/test/integration/targets/yum/tasks/check_mode_consistency.yml
@@ -0,0 +1,61 @@
+- name: install htop in check mode to verify changes dict returned
+ yum:
+ name: htop
+ state: present
+ check_mode: yes
+ register: yum_changes_check_mode_result
+
+- name: install verify changes dict returned in check mode
+ assert:
+ that:
+ - "yum_changes_check_mode_result is success"
+ - "yum_changes_check_mode_result is changed"
+ - "'changes' in yum_changes_check_mode_result"
+ - "'installed' in yum_changes_check_mode_result['changes']"
+ - "'htop' in yum_changes_check_mode_result['changes']['installed']"
+
+- name: install htop to verify changes dict returned
+ yum:
+ name: htop
+ state: present
+ register: yum_changes_result
+
+- name: install verify changes dict returned
+ assert:
+ that:
+ - "yum_changes_result is success"
+ - "yum_changes_result is changed"
+ - "'changes' in yum_changes_result"
+ - "'installed' in yum_changes_result['changes']"
+ - "'htop' in yum_changes_result['changes']['installed']"
+
+- name: remove htop in check mode to verify changes dict returned
+ yum:
+ name: htop
+ state: absent
+ check_mode: yes
+ register: yum_changes_check_mode_result
+
+- name: remove verify changes dict returned in check mode
+ assert:
+ that:
+ - "yum_changes_check_mode_result is success"
+ - "yum_changes_check_mode_result is changed"
+ - "'changes' in yum_changes_check_mode_result"
+ - "'removed' in yum_changes_check_mode_result['changes']"
+ - "'htop' in yum_changes_check_mode_result['changes']['removed']"
+
+- name: remove htop to verify changes dict returned
+ yum:
+ name: htop
+ state: absent
+ register: yum_changes_result
+
+- name: remove verify changes dict returned
+ assert:
+ that:
+ - "yum_changes_result is success"
+ - "yum_changes_result is changed"
+ - "'changes' in yum_changes_result"
+ - "'removed' in yum_changes_result['changes']"
+ - "'htop' in yum_changes_result['changes']['removed']"
diff --git a/test/integration/targets/yum/tasks/lock.yml b/test/integration/targets/yum/tasks/lock.yml
new file mode 100644
index 00000000..3f585c1d
--- /dev/null
+++ b/test/integration/targets/yum/tasks/lock.yml
@@ -0,0 +1,28 @@
+- block:
+ - name: Make sure testing package is not installed
+ yum:
+ name: sos
+ state: absent
+
+ - name: Create bogus lock file
+ copy:
+ content: bogus content for this lock file
+ dest: /var/run/yum.pid
+
+ - name: Install a package, lock file should be deleted by the module
+ yum:
+ name: sos
+ state: present
+ register: yum_result
+
+ - assert:
+ that:
+ - yum_result is success
+
+ always:
+ - name: Clean up
+ yum:
+ name: sos
+ state: absent
+
+ when: ansible_pkg_mgr == 'yum'
diff --git a/test/integration/targets/yum/tasks/main.yml b/test/integration/targets/yum/tasks/main.yml
new file mode 100644
index 00000000..3a7f4cf5
--- /dev/null
+++ b/test/integration/targets/yum/tasks/main.yml
@@ -0,0 +1,71 @@
+# (c) 2014, James Tanner <tanner.jc@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Note: We install the yum package onto Fedora so that this will work on dnf systems
+# We want to test that for people who don't want to upgrade their systems.
+
+- block:
+ - name: ensure test packages are removed before starting
+ yum:
+ name:
+ - sos
+ state: absent
+
+ - import_tasks: yum.yml
+ always:
+ - name: remove installed packages
+ yum:
+ name:
+ - sos
+ state: absent
+
+ - name: remove installed group
+ yum:
+ name: "@Custom Group"
+ state: absent
+
+ - name: On Fedora 28 the above won't remove the group which results in a failure in repo.yml below
+ yum:
+ name: dinginessentail
+ state: absent
+ when:
+ - ansible_distribution in ['Fedora']
+
+ when:
+ - ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux', 'Fedora']
+
+
+- block:
+ - import_tasks: repo.yml
+ - import_tasks: yum_group_remove.yml
+ when:
+ - ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux']
+ always:
+ - yum_repository:
+ name: "{{ item }}"
+ state: absent
+ loop: "{{ repos }}"
+
+ - command: yum clean metadata
+ when:
+ - ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux', 'Fedora']
+
+
+- import_tasks: yuminstallroot.yml
+ when:
+ - ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux', 'Fedora']
+
+
+- import_tasks: proxy.yml
+ when:
+ - ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux', 'Fedora']
+
+
+- import_tasks: check_mode_consistency.yml
+ when:
+ - (ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux'] and ansible_distribution_major_version|int == 7)
+
+
+- import_tasks: lock.yml
+ when:
+ - ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux']
diff --git a/test/integration/targets/yum/tasks/proxy.yml b/test/integration/targets/yum/tasks/proxy.yml
new file mode 100644
index 00000000..f42eb179
--- /dev/null
+++ b/test/integration/targets/yum/tasks/proxy.yml
@@ -0,0 +1,186 @@
+- name: test yum proxy settings
+ block:
+ - name: install tinyproxy
+ yum:
+ name: 'https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/yum/tinyproxy-1.10.0-3.el7.x86_64.rpm'
+ state: installed
+
+ # systemd doesn't play nice with this in a container for some reason
+ - name: start tinyproxy (systemd with tiny proxy does not work in container)
+ shell: tinyproxy
+ changed_when: false
+
+ # test proxy without auth
+ - name: set unauthenticated proxy in yum.conf
+ lineinfile:
+ path: /etc/yum.conf
+ line: "proxy=http://127.0.0.1:8888"
+ state: present
+
+ - name: clear proxy logs
+ shell: ': > /var/log/tinyproxy/tinyproxy.log'
+ changed_when: false
+ args:
+ executable: /usr/bin/bash
+
+ - name: install ninvaders with unauthenticated proxy
+ yum:
+ name: 'https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/yum/ninvaders-0.1.1-18.el7.x86_64.rpm'
+ state: installed
+ register: yum_proxy_result
+
+ - assert:
+ that:
+ - "yum_proxy_result.changed"
+ - "'msg' in yum_proxy_result"
+ - "'rc' in yum_proxy_result"
+
+ - name: check that it install via unauthenticated proxy
+ command: grep -q Request /var/log/tinyproxy/tinyproxy.log
+
+ - name: uninstall ninvaders with unauthenticated proxy
+ yum:
+ name: ninvaders
+ state: absent
+ register: yum_proxy_result
+
+ - assert:
+ that:
+ - "yum_proxy_result.changed"
+ - "'msg' in yum_proxy_result"
+ - "'rc' in yum_proxy_result"
+
+ - name: unset unauthenticated proxy in yum.conf
+ lineinfile:
+ path: /etc/yum.conf
+ line: "proxy=http://127.0.0.1:8888"
+ state: absent
+
+ # test proxy with auth
+ - name: set authenticated proxy config in tinyproxy.conf
+ lineinfile:
+ path: /etc/tinyproxy/tinyproxy.conf
+ line: "BasicAuth 1testuser 1testpassword"
+ state: present
+
+ # systemd doesn't play nice with this in a container for some reason
+ - name: SIGHUP tinyproxy to reload config (workaround because of systemd+tinyproxy in container)
+ shell: kill -HUP $(ps -ef | grep tinyproxy | grep -v grep | awk '{print $2}')
+ changed_when: false
+ args:
+ executable: /usr/bin/bash
+
+ - name: set authenticated proxy config in yum.conf
+ lineinfile:
+ path: /etc/yum.conf
+ line: "proxy=http://1testuser:1testpassword@127.0.0.1:8888"
+ state: present
+
+ - name: clear proxy logs
+ shell: ': > /var/log/tinyproxy/tinyproxy.log'
+ changed_when: false
+ args:
+ executable: /usr/bin/bash
+
+ - name: install ninvaders with authenticated proxy
+ yum:
+ name: 'https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/yum/ninvaders-0.1.1-18.el7.x86_64.rpm'
+ state: installed
+ register: yum_proxy_result
+
+ - assert:
+ that:
+ - "yum_proxy_result.changed"
+ - "'msg' in yum_proxy_result"
+ - "'rc' in yum_proxy_result"
+
+ - name: check that it install via authenticated proxy
+ command: grep -q Request /var/log/tinyproxy/tinyproxy.log
+
+ - name: uninstall ninvaders with authenticated proxy
+ yum:
+ name: ninvaders
+ state: absent
+
+ - name: unset authenticated proxy config in yum.conf
+ lineinfile:
+ path: /etc/yum.conf
+ line: "proxy=http://1testuser:1testpassword@127.0.0.1:8888"
+ state: absent
+
+ - name: set proxy config in yum.conf
+ lineinfile:
+ path: /etc/yum.conf
+ line: "proxy=http://127.0.0.1:8888"
+ state: present
+
+ - name: set proxy_username config in yum.conf
+ lineinfile:
+ path: /etc/yum.conf
+ line: "proxy_username=1testuser"
+ state: present
+
+ - name: set proxy_password config in yum.conf
+ lineinfile:
+ path: /etc/yum.conf
+ line: "proxy_password=1testpassword"
+ state: present
+
+ - name: clear proxy logs
+ shell: ': > /var/log/tinyproxy/tinyproxy.log'
+ changed_when: false
+ args:
+ executable: /usr/bin/bash
+
+ - name: install ninvaders with proxy, proxy_username, and proxy_password config in yum.conf
+ yum:
+ name: 'https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/yum/ninvaders-0.1.1-18.el7.x86_64.rpm'
+ state: installed
+ register: yum_proxy_result
+
+ - assert:
+ that:
+ - "yum_proxy_result.changed"
+ - "'msg' in yum_proxy_result"
+ - "'rc' in yum_proxy_result"
+
+ - name: check that it install via proxy with proxy_username, proxy_password config in yum.conf
+ command: grep -q Request /var/log/tinyproxy/tinyproxy.log
+
+ always:
+ #cleanup
+ - name: uninstall tinyproxy
+ yum:
+ name: tinyproxy
+ state: absent
+
+ - name: uninstall ninvaders
+ yum:
+ name: ninvaders
+ state: absent
+
+ - name: ensure unset authenticated proxy
+ lineinfile:
+ path: /etc/yum.conf
+ line: "proxy=http://1testuser:1testpassword@127.0.0.1:8888"
+ state: absent
+
+ - name: ensure unset proxy
+ lineinfile:
+ path: /etc/yum.conf
+ line: "proxy=http://127.0.0.1:8888"
+ state: absent
+
+ - name: ensure unset proxy_username
+ lineinfile:
+ path: /etc/yum.conf
+ line: "proxy_username=1testuser"
+ state: absent
+
+ - name: ensure unset proxy_password
+ lineinfile:
+ path: /etc/yum.conf
+ line: "proxy_password=1testpassword"
+ state: absent
+ when:
+ - (ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux'] and ansible_distribution_major_version|int == 7 and ansible_architecture in ['x86_64'])
diff --git a/test/integration/targets/yum/tasks/repo.yml b/test/integration/targets/yum/tasks/repo.yml
new file mode 100644
index 00000000..c1a7a016
--- /dev/null
+++ b/test/integration/targets/yum/tasks/repo.yml
@@ -0,0 +1,705 @@
+- block:
+ - name: Install dinginessentail-1.0-1
+ yum:
+ name: dinginessentail-1.0-1
+ state: present
+ register: yum_result
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: Verify installation
+ assert:
+ that:
+ - "yum_result.changed"
+ - "rpm_result.stdout.startswith('dinginessentail-1.0-1')"
+
+ - name: Verify yum module outputs
+ assert:
+ that:
+ - "'msg' in yum_result"
+ - "'rc' in yum_result"
+ - "'results' in yum_result"
+ # ============================================================================
+ - name: Install dinginessentail-1.0-1 again
+ yum:
+ name: dinginessentail-1.0-1
+ state: present
+ register: yum_result
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: Verify installation
+ assert:
+ that:
+ - "not yum_result.changed"
+ - "rpm_result.stdout.startswith('dinginessentail-1.0-1')"
+
+ - name: Verify yum module outputs
+ assert:
+ that:
+ - "'msg' in yum_result"
+ - "'rc' in yum_result"
+ - "'results' in yum_result"
+ # ============================================================================
+ - name: Install dinginessentail-1:1.0-2
+ yum:
+ name: "dinginessentail-1:1.0-2.{{ ansible_architecture }}"
+ state: present
+ register: yum_result
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: Verify installation
+ assert:
+ that:
+ - "yum_result.changed"
+ - "rpm_result.stdout.startswith('dinginessentail-1.0-2')"
+
+ - name: Verify yum module outputs
+ assert:
+ that:
+ - "'msg' in yum_result"
+ - "'rc' in yum_result"
+ - "'results' in yum_result"
+
+ - name: Remove dinginessentail
+ yum:
+ name: dinginessentail
+ state: absent
+ # ============================================================================
+ - name: Downgrade dinginessentail
+ yum:
+ name: dinginessentail-1.0-1
+ state: present
+ allow_downgrade: yes
+ register: yum_result
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: Verify installation
+ assert:
+ that:
+ - "yum_result.changed"
+ - "rpm_result.stdout.startswith('dinginessentail-1.0-1')"
+
+ - name: Verify yum module outputs
+ assert:
+ that:
+ - "'msg' in yum_result"
+ - "'rc' in yum_result"
+ - "'results' in yum_result"
+ # ============================================================================
+ - name: Update to the latest dinginessentail
+ yum:
+ name: dinginessentail
+ state: latest
+ register: yum_result
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: Verify installation
+ assert:
+ that:
+ - "yum_result.changed"
+ - "rpm_result.stdout.startswith('dinginessentail-1.1-1')"
+
+ - name: Verify yum module outputs
+ assert:
+ that:
+ - "'msg' in yum_result"
+ - "'rc' in yum_result"
+ - "'results' in yum_result"
+ # ============================================================================
+ - name: Install dinginessentail-1.0-1 from a file (higher version is already installed)
+ yum:
+ name: "{{ repodir }}/dinginessentail-1.0-1.{{ ansible_architecture }}.rpm"
+ state: present
+ register: yum_result
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: Verify installation
+ assert:
+ that:
+ - "not yum_result.changed"
+ - "rpm_result.stdout.startswith('dinginessentail-1.1-1')"
+
+ - name: Verify yum module outputs
+ assert:
+ that:
+ - "'msg' in yum_result"
+ - "'rc' in yum_result"
+ - "'results' in yum_result"
+
+ - name: Remove dinginessentail
+ yum:
+ name: dinginessentail
+ state: absent
+ # ============================================================================
+ - name: Install dinginessentail-1.0-1 from a file
+ yum:
+ name: "{{ repodir }}/dinginessentail-1.0-1.{{ ansible_architecture }}.rpm"
+ state: present
+ disable_gpg_check: true
+ register: yum_result
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: Verify installation
+ assert:
+ that:
+ - "yum_result.changed"
+ - "rpm_result.stdout.startswith('dinginessentail-1.0-1')"
+
+ - name: Verify yum module outputs
+ assert:
+ that:
+ - "'msg' in yum_result"
+ - "'rc' in yum_result"
+ - "'results' in yum_result"
+ # ============================================================================
+ - name: Install dinginessentail-1.0-1 from a file again
+ yum:
+ name: "{{ repodir }}/dinginessentail-1.0-1.{{ ansible_architecture }}.rpm"
+ state: present
+ disable_gpg_check: true
+ register: yum_result
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: Verify installation
+ assert:
+ that:
+ - "not yum_result.changed"
+ - "rpm_result.stdout.startswith('dinginessentail-1.0-1')"
+
+ - name: Verify yum module outputs
+ assert:
+ that:
+ - "'msg' in yum_result"
+ - "'rc' in yum_result"
+ - "'results' in yum_result"
+ # ============================================================================
+ - name: Install dinginessentail-1.0-2 from a file
+ yum:
+ name: "{{ repodir }}/dinginessentail-1.0-2.{{ ansible_architecture }}.rpm"
+ state: present
+ disable_gpg_check: true
+ register: yum_result
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: Verify installation
+ assert:
+ that:
+ - "yum_result.changed"
+ - "rpm_result.stdout.startswith('dinginessentail-1.0-2')"
+
+ - name: Verify yum module outputs
+ assert:
+ that:
+ - "'msg' in yum_result"
+ - "'rc' in yum_result"
+ - "'results' in yum_result"
+ # ============================================================================
+ - name: Install dinginessentail-1.0-2 from a file again
+ yum:
+ name: "{{ repodir }}/dinginessentail-1.0-2.{{ ansible_architecture }}.rpm"
+ state: present
+ disable_gpg_check: true
+ register: yum_result
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: Verify installation
+ assert:
+ that:
+ - "not yum_result.changed"
+ - "rpm_result.stdout.startswith('dinginessentail-1.0-2')"
+
+ - name: Verify yum module outputs
+ assert:
+ that:
+ - "'msg' in yum_result"
+ - "'rc' in yum_result"
+ - "'results' in yum_result"
+ # ============================================================================
+ - name: Try to downgrade dinginessentail without allow_downgrade being set
+ yum:
+ name: dinginessentail-1.0-1
+ state: present
+ allow_downgrade: no
+ register: yum_result
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: Verify installation
+ assert:
+ that:
+ - "not yum_result.changed"
+ - "rpm_result.stdout.startswith('dinginessentail-1.0-2')"
+
+ - name: Verify yum module outputs
+ assert:
+ that:
+ - "'msg' in yum_result"
+ - "'rc' in yum_result"
+ - "'results' in yum_result"
+ # ============================================================================
+ - name: Update dinginessentail with update_only set
+ yum:
+ name: dinginessentail
+ state: latest
+ update_only: yes
+ register: yum_result
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: Verify installation
+ assert:
+ that:
+ - "yum_result.changed"
+ - "rpm_result.stdout.startswith('dinginessentail-1.1-1')"
+
+ - name: Verify yum module outputs
+ assert:
+ that:
+ - "'msg' in yum_result"
+ - "'rc' in yum_result"
+ - "'results' in yum_result"
+
+ - name: Remove dinginessentail
+ yum:
+ name: dinginessentail
+ state: absent
+ # ============================================================================
+ - name: Try to update dinginessentail which is not installed, update_only is set
+ yum:
+ name: dinginessentail
+ state: latest
+ update_only: yes
+ register: yum_result
+ ignore_errors: yes
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+ ignore_errors: yes
+
+ - name: Verify installation
+ assert:
+ that:
+ - "rpm_result.rc == 1"
+ - "yum_result.rc == 0"
+ - "not yum_result.changed"
+ - "not yum_result is failed"
+
+ - name: Verify yum module outputs
+ assert:
+ that:
+ - "'msg' in yum_result"
+ - "'rc' in yum_result"
+ - "'results' in yum_result"
+ # ============================================================================
+ - name: Try to install incompatible arch
+ yum:
+ name: "{{ repodir_ppc64 }}/dinginessentail-1.0-1.ppc64.rpm"
+ state: present
+ register: yum_result
+ ignore_errors: yes
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+ ignore_errors: yes
+
+ - name: Verify installation
+ assert:
+ that:
+ - "rpm_result.rc == 1"
+ - "yum_result.rc == 1"
+ - "not yum_result.changed"
+ - "yum_result is failed"
+
+ - name: Verify yum module outputs
+ assert:
+ that:
+ - "'msg' in yum_result"
+ - "'rc' in yum_result"
+ - "'results' in yum_result"
+ # ============================================================================
+ - name: Make sure latest dinginessentail is installed
+ yum:
+ name: dinginessentail
+ state: latest
+
+ - name: Downgrade dinginessentail using rpm file
+ yum:
+ name: "{{ repodir }}/dinginessentail-1.0-1.{{ ansible_architecture }}.rpm"
+ state: present
+ allow_downgrade: yes
+ disable_gpg_check: yes
+ register: yum_result
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: Verify installation
+ assert:
+ that:
+ - "yum_result.changed"
+ - "rpm_result.stdout.startswith('dinginessentail-1.0-1')"
+
+ - name: Verify yum module outputs
+ assert:
+ that:
+ - "'msg' in yum_result"
+ - "'rc' in yum_result"
+ - "'results' in yum_result"
+ # ============================================================================
+ - block:
+ - name: make sure dinginessentail is not installed
+ yum:
+ name: dinginessentail
+ state: absent
+
+ - name: install dinginessentail both archs
+ yum:
+ name: "{{ pkgs }}"
+ state: present
+ disable_gpg_check: true
+ vars:
+ pkgs:
+ - "{{ repodir }}/dinginessentail-1.1-1.x86_64.rpm"
+ - "{{ repodir_i686 }}/dinginessentail-1.1-1.i686.rpm"
+
+ - name: try to install lower version of dinginessentail from rpm file, without allow_downgrade, just one arch
+ yum:
+ name: "{{ repodir_i686 }}/dinginessentail-1.0-1.i686.rpm"
+ state: present
+ register: yum_result
+
+ - name: check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: verify installation
+ assert:
+ that:
+ - "not yum_result.changed"
+ - "rpm_result.stdout_lines[0].startswith('dinginessentail-1.1-1')"
+ - "rpm_result.stdout_lines[1].startswith('dinginessentail-1.1-1')"
+
+ - name: verify yum module outputs
+ assert:
+ that:
+ - "'msg' in yum_result"
+ - "'rc' in yum_result"
+ - "'results' in yum_result"
+ when: ansible_architecture == "x86_64"
+ # ============================================================================
+ - block:
+ - name: make sure dinginessentail is not installed
+ yum:
+ name: dinginessentail
+ state: absent
+
+ - name: install dinginessentail both archs
+ yum:
+ name: "{{ pkgs }}"
+ state: present
+ disable_gpg_check: true
+ vars:
+ pkgs:
+ - "{{ repodir }}/dinginessentail-1.0-1.x86_64.rpm"
+ - "{{ repodir_i686 }}/dinginessentail-1.0-1.i686.rpm"
+
+ - name: Update both arch in one task using rpm files
+ yum:
+ name: "{{ repodir }}/dinginessentail-1.1-1.x86_64.rpm,{{ repodir_i686 }}/dinginessentail-1.1-1.i686.rpm"
+ state: present
+ disable_gpg_check: yes
+ register: yum_result
+
+ - name: check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: verify installation
+ assert:
+ that:
+ - "yum_result.changed"
+ - "rpm_result.stdout_lines[0].startswith('dinginessentail-1.1-1')"
+ - "rpm_result.stdout_lines[1].startswith('dinginessentail-1.1-1')"
+
+ - name: verify yum module outputs
+ assert:
+ that:
+ - "'msg' in yum_result"
+ - "'rc' in yum_result"
+ - "'results' in yum_result"
+ when: ansible_architecture == "x86_64"
+ # ============================================================================
+ always:
+ - name: Clean up
+ yum:
+ name: dinginessentail
+ state: absent
+
+# FIXME: dnf currently doesn't support epoch as part of it's pkg_spec for
+# finding install candidates
+# https://bugzilla.redhat.com/show_bug.cgi?id=1619687
+- block:
+ - name: Install 1:dinginessentail-1.0-2
+ yum:
+ name: "1:dinginessentail-1.0-2.{{ ansible_architecture }}"
+ state: present
+ disable_gpg_check: true
+ register: yum_result
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: Verify installation
+ assert:
+ that:
+ - "yum_result.changed"
+ - "rpm_result.stdout.startswith('dinginessentail-1.0-2')"
+
+ - name: Verify yum module outputs
+ assert:
+ that:
+ - "'msg' in yum_result"
+ - "'rc' in yum_result"
+ - "'results' in yum_result"
+ always:
+ - name: Clean up
+ yum:
+ name: dinginessentail
+ state: absent
+
+ when: ansible_pkg_mgr == 'yum'
+
+# DNF1 (Fedora < 26) had some issues:
+# - did not accept architecture tag as valid component of a package spec unless
+# installing a file (i.e. can't search the repo)
+# - doesn't handle downgrade transactions via the API properly, marks it as a
+# conflict
+#
+# NOTE: Both DNF1 and Fedora < 26 have long been EOL'd by their respective
+# upstreams
+- block:
+ # ============================================================================
+ - name: Install dinginessentail-1.0-2
+ yum:
+ name: "dinginessentail-1.0-2.{{ ansible_architecture }}"
+ state: present
+ disable_gpg_check: true
+ register: yum_result
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: Verify installation
+ assert:
+ that:
+ - "yum_result.changed"
+ - "rpm_result.stdout.startswith('dinginessentail-1.0-2')"
+
+ - name: Verify yum module outputs
+ assert:
+ that:
+ - "'msg' in yum_result"
+ - "'rc' in yum_result"
+ - "'results' in yum_result"
+
+ - name: Install dinginessentail-1.0-2 again
+ yum:
+ name: dinginessentail-1.0-2
+ state: present
+ register: yum_result
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: Verify installation
+ assert:
+ that:
+ - "not yum_result.changed"
+ - "rpm_result.stdout.startswith('dinginessentail-1.0-2')"
+
+ - name: Verify yum module outputs
+ assert:
+ that:
+ - "'msg' in yum_result"
+ - "'rc' in yum_result"
+ - "'results' in yum_result"
+ always:
+ - name: Clean up
+ yum:
+ name: dinginessentail
+ state: absent
+ when: not (ansible_distribution == "Fedora" and ansible_distribution_major_version|int < 26)
+
+# https://github.com/ansible/ansible/issues/47689
+- block:
+ - name: Install dinginessentail == 1.0
+ yum:
+ name: "dinginessentail == 1.0"
+ state: present
+ register: yum_result
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: Verify installation
+ assert:
+ that:
+ - "yum_result.changed"
+ - "rpm_result.stdout.startswith('dinginessentail-1.0-1')"
+
+ - name: Verify yum module outputs
+ assert:
+ that:
+ - "'msg' in yum_result"
+ - "'rc' in yum_result"
+ - "'results' in yum_result"
+ always:
+ - name: Clean up
+ yum:
+ name: dinginessentail
+ state: absent
+
+ when: ansible_pkg_mgr == 'yum'
+
+
+# https://github.com/ansible/ansible/pull/54603
+- block:
+ - name: Install dinginessentail < 1.1
+ yum:
+ name: "dinginessentail < 1.1"
+ state: present
+ register: yum_result
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: Verify installation
+ assert:
+ that:
+ - "yum_result.changed"
+ - "rpm_result.stdout.startswith('dinginessentail-1.0')"
+
+ - name: Install dinginessentail >= 1.1
+ yum:
+ name: "dinginessentail >= 1.1"
+ state: present
+ register: yum_result
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: Verify installation
+ assert:
+ that:
+ - "yum_result.changed"
+ - "rpm_result.stdout.startswith('dinginessentail-1.1')"
+
+ - name: Verify yum module outputs
+ assert:
+ that:
+ - "'msg' in yum_result"
+ - "'rc' in yum_result"
+ - "'results' in yum_result"
+
+ always:
+ - name: Clean up
+ yum:
+ name: dinginessentail
+ state: absent
+
+ when: ansible_pkg_mgr == 'yum'
+
+# https://github.com/ansible/ansible/issues/45250
+- block:
+ - name: Install dinginessentail-1.0, dinginessentail-olive-1.0, landsidescalping-1.0
+ yum:
+ name: "dinginessentail-1.0,dinginessentail-olive-1.0,landsidescalping-1.0"
+ state: present
+
+ - name: Upgrade dinginessentail*
+ yum:
+ name: dinginessentail*
+ state: latest
+ register: yum_result
+
+ - name: Check dinginessentail with rpm
+ shell: rpm -q dinginessentail
+ register: rpm_result
+
+ - name: Verify update of dinginessentail
+ assert:
+ that:
+ - "rpm_result.stdout.startswith('dinginessentail-1.1-1')"
+
+ - name: Check dinginessentail-olive with rpm
+ shell: rpm -q dinginessentail-olive
+ register: rpm_result
+
+ - name: Verify update of dinginessentail-olive
+ assert:
+ that:
+ - "rpm_result.stdout.startswith('dinginessentail-olive-1.1-1')"
+
+ - name: Check landsidescalping with rpm
+ shell: rpm -q landsidescalping
+ register: rpm_result
+
+ - name: Verify landsidescalping did NOT get updated
+ assert:
+ that:
+ - "rpm_result.stdout.startswith('landsidescalping-1.0-1')"
+
+ - name: Verify yum module outputs
+ assert:
+ that:
+ - "yum_result is changed"
+ - "'msg' in yum_result"
+ - "'rc' in yum_result"
+ - "'results' in yum_result"
+ always:
+ - name: Clean up
+ yum:
+ name: dinginessentail,dinginessentail-olive,landsidescalping
+ state: absent
diff --git a/test/integration/targets/yum/tasks/yum.yml b/test/integration/targets/yum/tasks/yum.yml
new file mode 100644
index 00000000..9ed00af8
--- /dev/null
+++ b/test/integration/targets/yum/tasks/yum.yml
@@ -0,0 +1,873 @@
+# Setup by setup_rpm_repo
+- set_fact:
+ package1: dinginessentail
+ package2: dinginessentail-olive
+
+# UNINSTALL
+- name: uninstall {{ package1 }}
+ yum: name={{ package1 }} state=removed
+ register: yum_result
+
+- name: check {{ package1 }} with rpm
+ shell: rpm -q {{ package1 }}
+ ignore_errors: True
+ register: rpm_result
+
+- name: verify uninstallation of {{ package1 }}
+ assert:
+ that:
+ - "yum_result is success"
+ - "rpm_result is failed"
+
+# UNINSTALL AGAIN
+- name: uninstall {{ package1 }} again in check mode
+ yum: name={{ package1 }} state=removed
+ check_mode: true
+ register: yum_result
+
+- name: verify no change on re-uninstall in check mode
+ assert:
+ that:
+ - "not yum_result is changed"
+
+- name: uninstall {{ package1 }} again
+ yum: name={{ package1 }} state=removed
+ register: yum_result
+
+- name: verify no change on re-uninstall
+ assert:
+ that:
+ - "not yum_result is changed"
+
+# INSTALL
+- name: install {{ package1 }} in check mode
+ yum: name={{ package1 }} state=present
+ check_mode: true
+ register: yum_result
+
+- name: verify installation of {{ package1 }} in check mode
+ assert:
+ that:
+ - "yum_result is changed"
+
+- name: install {{ package1 }}
+ yum: name={{ package1 }} state=present
+ register: yum_result
+
+- name: verify installation of {{ package1 }}
+ assert:
+ that:
+ - "yum_result is success"
+ - "yum_result is changed"
+
+- name: verify yum module outputs
+ assert:
+ that:
+ - "'changed' in yum_result"
+ - "'msg' in yum_result"
+ - "'results' in yum_result"
+
+- name: check {{ package1 }} with rpm
+ shell: rpm -q {{ package1 }}
+
+# INSTALL AGAIN
+- name: install {{ package1 }} again in check mode
+ yum: name={{ package1 }} state=present
+ check_mode: true
+ register: yum_result
+- name: verify no change on second install in check mode
+ assert:
+ that:
+ - "not yum_result is changed"
+
+- name: install {{ package1 }} again
+ yum: name={{ package1 }} state=present
+ register: yum_result
+- name: verify no change on second install
+ assert:
+ that:
+ - "not yum_result is changed"
+
+- name: install {{ package1 }} again with empty string enablerepo
+ yum: name={{ package1 }} state=present enablerepo=""
+ register: yum_result
+- name: verify no change on third install with empty string enablerepo
+ assert:
+ that:
+ - "yum_result is success"
+ - "not yum_result is changed"
+
+# This test case is unfortunately distro specific because we have to specify
+# repo names which are not the same across Fedora/RHEL/CentOS for base/updates
+- name: install {{ package1 }} again with missing repo enablerepo
+ yum:
+ name: '{{ package1 }}'
+ state: present
+ enablerepo: '{{ repos + ["thisrepodoesnotexist"] }}'
+ disablerepo: "*"
+ register: yum_result
+ when: ansible_distribution == 'CentOS'
+- name: verify no change on fourth install with missing repo enablerepo (yum)
+ assert:
+ that:
+ - "yum_result is success"
+ - "yum_result is not changed"
+ when: ansible_distribution == 'CentOS'
+
+# This test case is unfortunately distro specific because we have to specify
+# repo names which are not the same across Fedora/RHEL/CentOS for base/updates
+- name: install repos again with disable all and enable select repo(s)
+ yum:
+ name: '{{ package1 }}'
+ state: present
+ enablerepo: '{{ repos }}'
+ disablerepo: "*"
+ register: yum_result
+ when: ansible_distribution == 'CentOS'
+- name: verify no change on fourth install with missing repo enablerepo (yum)
+ assert:
+ that:
+ - "yum_result is success"
+ - "yum_result is not changed"
+ when: ansible_distribution == 'CentOS'
+
+- name: install {{ package1 }} again with only missing repo enablerepo
+ yum:
+ name: '{{ package1 }}'
+ state: present
+ enablerepo: "thisrepodoesnotexist"
+ ignore_errors: true
+ register: yum_result
+- name: verify no change on fifth install with only missing repo enablerepo (yum)
+ assert:
+ that:
+ - "yum_result is not success"
+ when: ansible_pkg_mgr == 'yum'
+- name: verify no change on fifth install with only missing repo enablerepo (dnf)
+ assert:
+ that:
+ - "yum_result is success"
+ when: ansible_pkg_mgr == 'dnf'
+
+# INSTALL AGAIN WITH LATEST
+- name: install {{ package1 }} again with state latest in check mode
+ yum: name={{ package1 }} state=latest
+ check_mode: true
+ register: yum_result
+- name: verify install {{ package1 }} again with state latest in check mode
+ assert:
+ that:
+ - "not yum_result is changed"
+
+- name: install {{ package1 }} again with state latest idempotence
+ yum: name={{ package1 }} state=latest
+ register: yum_result
+- name: verify install {{ package1 }} again with state latest idempotence
+ assert:
+ that:
+ - "not yum_result is changed"
+
+# INSTALL WITH LATEST
+- name: uninstall {{ package1 }}
+ yum: name={{ package1 }} state=removed
+ register: yum_result
+- name: verify uninstall {{ package1 }}
+ assert:
+ that:
+ - "yum_result is successful"
+
+- name: copy yum.conf file in case it is missing
+ copy:
+ src: yum.conf
+ dest: /etc/yum.conf
+ force: False
+ register: yum_conf_copy
+
+- block:
+ - name: install {{ package1 }} with state latest in check mode with config file param
+ yum: name={{ package1 }} state=latest conf_file=/etc/yum.conf
+ check_mode: true
+ register: yum_result
+ - name: verify install {{ package1 }} with state latest in check mode with config file param
+ assert:
+ that:
+ - "yum_result is changed"
+
+ always:
+ - name: remove tmp yum.conf file if we created it
+ file:
+ path: /etc/yum.conf
+ state: absent
+ when: yum_conf_copy is changed
+
+- name: install {{ package1 }} with state latest in check mode
+ yum: name={{ package1 }} state=latest
+ check_mode: true
+ register: yum_result
+- name: verify install {{ package1 }} with state latest in check mode
+ assert:
+ that:
+ - "yum_result is changed"
+
+- name: install {{ package1 }} with state latest
+ yum: name={{ package1 }} state=latest
+ register: yum_result
+- name: verify install {{ package1 }} with state latest
+ assert:
+ that:
+ - "yum_result is changed"
+
+- name: install {{ package1 }} with state latest idempotence
+ yum: name={{ package1 }} state=latest
+ register: yum_result
+- name: verify install {{ package1 }} with state latest idempotence
+ assert:
+ that:
+ - "not yum_result is changed"
+
+- name: install {{ package1 }} with state latest idempotence with config file param
+ yum: name={{ package1 }} state=latest
+ register: yum_result
+- name: verify install {{ package1 }} with state latest idempotence with config file param
+ assert:
+ that:
+ - "not yum_result is changed"
+
+
+# Multiple packages
+- name: uninstall {{ package1 }} and {{ package2 }}
+ yum: name={{ package1 }},{{ package2 }} state=removed
+
+- name: check {{ package1 }} with rpm
+ shell: rpm -q {{ package1 }}
+ ignore_errors: True
+ register: rpm_package1_result
+
+- name: check {{ package2 }} with rpm
+ shell: rpm -q {{ package2 }}
+ ignore_errors: True
+ register: rpm_package2_result
+
+- name: verify packages installed
+ assert:
+ that:
+ - "rpm_package1_result is failed"
+ - "rpm_package2_result is failed"
+
+- name: install {{ package1 }} and {{ package2 }} as comma separated
+ yum: name={{ package1 }},{{ package2 }} state=present
+ register: yum_result
+
+- name: verify packages installed
+ assert:
+ that:
+ - "yum_result is success"
+ - "yum_result is changed"
+
+- name: check {{ package1 }} with rpm
+ shell: rpm -q {{ package1 }}
+
+- name: check {{ package2 }} with rpm
+ shell: rpm -q {{ package2 }}
+
+- name: uninstall {{ package1 }} and {{ package2 }}
+ yum: name={{ package1 }},{{ package2 }} state=removed
+ register: yum_result
+
+- name: install {{ package1 }} and {{ package2 }} as list
+ yum:
+ name:
+ - '{{ package1 }}'
+ - '{{ package2 }}'
+ state: present
+ register: yum_result
+
+- name: verify packages installed
+ assert:
+ that:
+ - "yum_result is success"
+ - "yum_result is changed"
+
+- name: check {{ package1 }} with rpm
+ shell: rpm -q {{ package1 }}
+
+- name: check {{ package2 }} with rpm
+ shell: rpm -q {{ package2 }}
+
+- name: uninstall {{ package1 }} and {{ package2 }}
+ yum: name={{ package1 }},{{ package2 }} state=removed
+ register: yum_result
+
+- name: install {{ package1 }} and {{ package2 }} as comma separated with spaces
+ yum:
+ name: "{{ package1 }}, {{ package2 }}"
+ state: present
+ register: yum_result
+
+- name: verify packages installed
+ assert:
+ that:
+ - "yum_result is success"
+ - "yum_result is changed"
+
+- name: check {{ package1 }} with rpm
+ shell: rpm -q {{ package1 }}
+
+- name: check {{ package2 }} with rpm
+ shell: rpm -q {{ package2 }}
+
+- name: uninstall {{ package1 }} and {{ package2 }}
+ yum: name={{ package1 }},{{ package2 }} state=removed
+
+- name: install non-existent rpm
+ yum:
+ name: does-not-exist
+ register: non_existent_rpm
+ ignore_errors: True
+
+- name: check non-existent rpm install failed
+ assert:
+ that:
+ - non_existent_rpm is failed
+
+# Install in installroot='/'
+- name: install {{ package1 }}
+ yum: name={{ package1 }} state=present installroot='/'
+ register: yum_result
+
+- name: verify installation of {{ package1 }}
+ assert:
+ that:
+ - "yum_result is success"
+ - "yum_result is changed"
+
+- name: verify yum module outputs
+ assert:
+ that:
+ - "'changed' in yum_result"
+ - "'msg' in yum_result"
+ - "'results' in yum_result"
+
+- name: check {{ package1 }} with rpm
+ shell: rpm -q {{ package1 }} --root=/
+
+- name: uninstall {{ package1 }}
+ yum:
+ name: '{{ package1 }}'
+ installroot: '/'
+ state: removed
+ register: yum_result
+
+# Seems like some yum versions won't download a package from local file repository, continue to use sos for this test.
+# https://stackoverflow.com/questions/58295660/yum-downloadonly-ignores-packages-in-local-repo
+- name: Test download_only
+ yum:
+ name: sos
+ state: latest
+ download_only: true
+ register: yum_result
+
+- name: verify download of sos (part 1 -- yum "install" succeeded)
+ assert:
+ that:
+ - "yum_result is success"
+ - "yum_result is changed"
+
+- name: uninstall sos (noop)
+ yum:
+ name: sos
+ state: removed
+ register: yum_result
+
+- name: verify download of sos (part 2 -- nothing removed during uninstall)
+ assert:
+ that:
+ - "yum_result is success"
+ - "not yum_result is changed"
+
+- name: uninstall sos for downloadonly/downloaddir test
+ yum:
+ name: sos
+ state: absent
+
+- name: Test download_only/download_dir
+ yum:
+ name: sos
+ state: latest
+ download_only: true
+ download_dir: "/var/tmp/packages"
+ register: yum_result
+
+- name: verify yum output
+ assert:
+ that:
+ - "yum_result is success"
+ - "yum_result is changed"
+
+- command: "ls /var/tmp/packages"
+ register: ls_out
+
+- name: Verify specified download_dir was used
+ assert:
+ that:
+ - "'sos' in ls_out.stdout"
+
+- name: install group
+ yum:
+ name: "@Custom Group"
+ state: present
+ register: yum_result
+
+- name: verify installation of the group
+ assert:
+ that:
+ - "yum_result is success"
+ - "yum_result is changed"
+
+- name: verify yum module outputs
+ assert:
+ that:
+ - "'changed' in yum_result"
+ - "'msg' in yum_result"
+ - "'results' in yum_result"
+
+- name: install the group again
+ yum:
+ name: "@Custom Group"
+ state: present
+ register: yum_result
+
+- name: verify nothing changed
+ assert:
+ that:
+ - "yum_result is success"
+ - "not yum_result is changed"
+
+- name: verify yum module outputs
+ assert:
+ that:
+ - "'changed' in yum_result"
+ - "'msg' in yum_result"
+ - "'results' in yum_result"
+
+- name: install the group again but also with a package that is not yet installed
+ yum:
+ name:
+ - "@Custom Group"
+ - '{{ package2 }}'
+ state: present
+ register: yum_result
+
+- name: verify {{ package3 }} is installed
+ assert:
+ that:
+ - "yum_result is success"
+ - "yum_result is changed"
+
+- name: verify yum module outputs
+ assert:
+ that:
+ - "'changed' in yum_result"
+ - "'msg' in yum_result"
+ - "'results' in yum_result"
+
+- name: try to install the group again, with --check to check 'changed'
+ yum:
+ name: "@Custom Group"
+ state: present
+ check_mode: yes
+ register: yum_result
+
+- name: verify nothing changed
+ assert:
+ that:
+ - "not yum_result is changed"
+
+- name: verify yum module outputs
+ assert:
+ that:
+ - "'changed' in yum_result"
+ - "'msg' in yum_result"
+ - "'results' in yum_result"
+
+- name: try to install non existing group
+ yum:
+ name: "@non-existing-group"
+ state: present
+ register: yum_result
+ ignore_errors: True
+
+- name: verify installation of the non existing group failed
+ assert:
+ that:
+ - "yum_result is failed"
+ - "not yum_result is changed"
+ - "yum_result is failed"
+
+- name: verify yum module outputs
+ assert:
+ that:
+ - "'changed' in yum_result"
+ - "'msg' in yum_result"
+ - "'results' in yum_result"
+
+- name: try to install non existing file
+ yum:
+ name: /tmp/non-existing-1.0.0.fc26.noarch.rpm
+ state: present
+ register: yum_result
+ ignore_errors: yes
+
+- name: verify installation failed
+ assert:
+ that:
+ - "yum_result is failed"
+ - "not yum_result is changed"
+
+- name: verify yum module outputs
+ assert:
+ that:
+ - "'changed' in yum_result"
+ - "'msg' in yum_result"
+
+- name: try to install from non existing url
+ yum:
+ name: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/yum/non-existing-1.0.0.fc26.noarch.rpm
+ state: present
+ register: yum_result
+ ignore_errors: yes
+
+- name: verify installation failed
+ assert:
+ that:
+ - "yum_result is failed"
+ - "not yum_result is changed"
+
+- name: verify yum module outputs
+ assert:
+ that:
+ - "'changed' in yum_result"
+ - "'msg' in yum_result"
+
+- name: use latest to install httpd
+ yum:
+ name: httpd
+ state: latest
+ register: yum_result
+
+- name: verify httpd was installed
+ assert:
+ that:
+ - "'changed' in yum_result"
+
+- name: uninstall httpd
+ yum:
+ name: httpd
+ state: removed
+
+- name: update httpd only if it exists
+ yum:
+ name: httpd
+ state: latest
+ update_only: yes
+ register: yum_result
+
+- name: verify httpd not installed
+ assert:
+ that:
+ - "not yum_result is changed"
+ - "'Packages providing httpd not installed due to update_only specified' in yum_result.results"
+
+- name: try to install uncompatible arch rpm on non-ppc64le, should fail
+ yum:
+ name: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/yum/banner-1.3.4-3.el7.ppc64le.rpm
+ state: present
+ register: yum_result
+ ignore_errors: True
+ when:
+ - ansible_architecture not in ['ppc64le']
+
+- name: verify that yum failed on non-ppc64le
+ assert:
+ that:
+ - "not yum_result is changed"
+ - "yum_result is failed"
+ when:
+ - ansible_architecture not in ['ppc64le']
+
+- name: try to install uncompatible arch rpm on ppc64le, should fail
+ yum:
+ name: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/yum/tinyproxy-1.10.0-3.el7.x86_64.rpm
+ state: present
+ register: yum_result
+ ignore_errors: True
+ when:
+ - ansible_architecture in ['ppc64le']
+
+- name: verify that yum failed on ppc64le
+ assert:
+ that:
+ - "not yum_result is changed"
+ - "yum_result is failed"
+ when:
+ - ansible_architecture in ['ppc64le']
+
+# setup for testing installing an RPM from url
+
+- set_fact:
+ pkg_name: fpaste
+
+- name: cleanup
+ yum:
+ name: "{{ pkg_name }}"
+ state: absent
+
+- set_fact:
+ pkg_url: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/yum/fpaste-0.3.7.4.1-2.el7.noarch.rpm
+ when: ansible_python.version.major == 2
+
+- set_fact:
+ pkg_url: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/yum/fpaste-0.3.9.2-1.fc28.noarch.rpm
+ when: ansible_python.version.major == 3
+# setup end
+
+- name: download an rpm
+ get_url:
+ url: "{{ pkg_url }}"
+ dest: "/tmp/{{ pkg_name }}.rpm"
+
+- name: install the downloaded rpm
+ yum:
+ name: "/tmp/{{ pkg_name }}.rpm"
+ state: present
+ disable_gpg_check: true
+ register: yum_result
+
+- name: verify installation
+ assert:
+ that:
+ - "yum_result is success"
+ - "yum_result is changed"
+ - "yum_result is not failed"
+
+- name: verify yum module outputs
+ assert:
+ that:
+ - "'changed' in yum_result"
+ - "'msg' in yum_result"
+ - "'results' in yum_result"
+
+- name: install the downloaded rpm again
+ yum:
+ name: "/tmp/{{ pkg_name }}.rpm"
+ state: present
+ register: yum_result
+
+- name: verify installation
+ assert:
+ that:
+ - "yum_result is success"
+ - "not yum_result is changed"
+ - "yum_result is not failed"
+
+- name: verify yum module outputs
+ assert:
+ that:
+ - "'changed' in yum_result"
+ - "'msg' in yum_result"
+ - "'results' in yum_result"
+
+- name: clean up
+ yum:
+ name: "{{ pkg_name }}"
+ state: absent
+
+- name: install from url
+ yum:
+ name: "{{ pkg_url }}"
+ state: present
+ disable_gpg_check: true
+ register: yum_result
+
+- name: verify installation
+ assert:
+ that:
+ - "yum_result is success"
+ - "yum_result is changed"
+ - "yum_result is not failed"
+
+- name: verify yum module outputs
+ assert:
+ that:
+ - "'changed' in yum_result"
+ - "'msg' in yum_result"
+ - "'results' in yum_result"
+
+- name: Create a temp RPM file which does not contain nevra information
+ file:
+ name: "/tmp/non_existent_pkg.rpm"
+ state: touch
+
+- name: Try installing RPM file which does not contain nevra information
+ yum:
+ name: "/tmp/non_existent_pkg.rpm"
+ state: present
+ register: no_nevra_info_result
+ ignore_errors: yes
+
+- name: Verify RPM failed to install
+ assert:
+ that:
+ - "'changed' in no_nevra_info_result"
+ - "'msg' in no_nevra_info_result"
+
+- name: Delete a temp RPM file
+ file:
+ name: "/tmp/non_existent_pkg.rpm"
+ state: absent
+
+- name: get yum version
+ yum:
+ list: yum
+ register: yum_version
+
+- name: set yum_version of installed version
+ set_fact:
+ yum_version: "{%- if item.yumstate == 'installed' -%}{{ item.version }}{%- else -%}{{ yum_version }}{%- endif -%}"
+ with_items: "{{ yum_version.results }}"
+
+- name: Ensure double uninstall of wildcard globs works
+ block:
+ - name: "Install lohit-*-fonts"
+ yum:
+ name: "lohit-*-fonts"
+ state: present
+
+ - name: "Remove lohit-*-fonts (1st time)"
+ yum:
+ name: "lohit-*-fonts"
+ state: absent
+ register: remove_lohit_fonts_1
+
+ - name: "Verify lohit-*-fonts (1st time)"
+ assert:
+ that:
+ - "remove_lohit_fonts_1 is changed"
+ - "'msg' in remove_lohit_fonts_1"
+ - "'results' in remove_lohit_fonts_1"
+
+ - name: "Remove lohit-*-fonts (2nd time)"
+ yum:
+ name: "lohit-*-fonts"
+ state: absent
+ register: remove_lohit_fonts_2
+
+ - name: "Verify lohit-*-fonts (2nd time)"
+ assert:
+ that:
+ - "remove_lohit_fonts_2 is not changed"
+ - "'msg' in remove_lohit_fonts_2"
+ - "'results' in remove_lohit_fonts_2"
+ - "'lohit-*-fonts is not installed' in remove_lohit_fonts_2['results']"
+
+- block:
+ - name: uninstall {{ package2 }}
+ yum: name={{ package2 }} state=removed
+
+ - name: check {{ package2 }} with rpm
+ shell: rpm -q {{ package2 }}
+ ignore_errors: True
+ register: rpm_package2_result
+
+ - name: verify {{ package2 }} is uninstalled
+ assert:
+ that:
+ - "rpm_package2_result is failed"
+
+ - name: exclude {{ package2 }} (yum backend)
+ lineinfile:
+ dest: /etc/yum.conf
+ regexp: (^exclude=)(.)*
+ line: "exclude={{ package2 }}*"
+ state: present
+ when: ansible_pkg_mgr == 'yum'
+
+ - name: exclude {{ package2 }} (dnf backend)
+ lineinfile:
+ dest: /etc/dnf/dnf.conf
+ regexp: (^excludepkgs=)(.)*
+ line: "excludepkgs={{ package2 }}*"
+ state: present
+ when: ansible_pkg_mgr == 'dnf'
+
+ # begin test case where disable_excludes is supported
+ - name: Try install {{ package2 }} without disable_excludes
+ yum: name={{ package2 }} state=latest
+ register: yum_package2_result
+ ignore_errors: True
+
+ - name: verify {{ package2 }} did not install because it is in exclude list
+ assert:
+ that:
+ - "yum_package2_result is failed"
+
+ - name: install {{ package2 }} with disable_excludes
+ yum: name={{ package2 }} state=latest disable_excludes=all
+ register: yum_package2_result_using_excludes
+
+ - name: verify {{ package2 }} did install using disable_excludes=all
+ assert:
+ that:
+ - "yum_package2_result_using_excludes is success"
+ - "yum_package2_result_using_excludes is changed"
+ - "yum_package2_result_using_excludes is not failed"
+
+ - name: remove exclude {{ package2 }} (cleanup yum.conf)
+ lineinfile:
+ dest: /etc/yum.conf
+ regexp: (^exclude={{ package2 }}*)
+ line: "exclude="
+ state: present
+ when: ansible_pkg_mgr == 'yum'
+
+ - name: remove exclude {{ package2 }} (cleanup dnf.conf)
+ lineinfile:
+ dest: /etc/dnf/dnf.conf
+ regexp: (^excludepkgs={{ package2 }}*)
+ line: "excludepkgs="
+ state: present
+ when: ansible_pkg_mgr == 'dnf'
+
+ # Fedora < 26 has a bug in dnf where package excludes in dnf.conf aren't
+ # actually honored and those releases are EOL'd so we have no expectation they
+ # will ever be fixed
+ when: not ((ansible_distribution == "Fedora") and (ansible_distribution_major_version|int < 26))
+
+- name: Check that packages with Provides are handled correctly in state=absent
+ block:
+ - name: Install test packages
+ yum:
+ name:
+ - https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/yum/test-package-that-provides-toaster-1.3.3.7-1.el7.noarch.rpm
+ - https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/yum/toaster-1.2.3.4-1.el7.noarch.rpm
+ disable_gpg_check: true
+ register: install
+
+ - name: Remove toaster
+ yum:
+ name: toaster
+ state: absent
+ register: remove
+
+ - name: rpm -qa
+ command: rpm -qa
+ register: rpmqa
+
+ - assert:
+ that:
+ - install is successful
+ - install is changed
+ - remove is successful
+ - remove is changed
+ - "'toaster-1.2.3.4' not in rpmqa.stdout"
+ - "'test-package-that-provides-toaster' in rpmqa.stdout"
diff --git a/test/integration/targets/yum/tasks/yum_group_remove.yml b/test/integration/targets/yum/tasks/yum_group_remove.yml
new file mode 100644
index 00000000..22c6dcb1
--- /dev/null
+++ b/test/integration/targets/yum/tasks/yum_group_remove.yml
@@ -0,0 +1,152 @@
+- name: install a group to test and yum-utils
+ yum:
+ name: "{{ pkgs }}"
+ state: present
+ vars:
+ pkgs:
+ - "@Custom Group"
+ - yum-utils
+ when: ansible_pkg_mgr == "yum"
+
+- name: install a group to test and dnf-utils
+ yum:
+ name: "{{ pkgs }}"
+ state: present
+ vars:
+ pkgs:
+ - "@Custom Group"
+ - dnf-utils
+ when: ansible_pkg_mgr == "dnf"
+
+- name: check mode remove the group
+ yum:
+ name: "@Custom Group"
+ state: absent
+ check_mode: yes
+ register: yum_result
+
+- name: verify changed
+ assert:
+ that:
+ - "yum_result.changed"
+
+- name: verify yum module outputs
+ assert:
+ that:
+ - "'changed' in yum_result"
+ - "'results' in yum_result"
+
+- name: remove the group
+ yum:
+ name: "@Custom Group"
+ state: absent
+ register: yum_result
+
+- name: verify changed
+ assert:
+ that:
+ - "yum_result.rc == 0"
+ - "yum_result.changed"
+
+- name: verify yum module outputs
+ assert:
+ that:
+ - "'changed' in yum_result"
+ - "'msg' in yum_result"
+ - "'results' in yum_result"
+
+- name: remove the group again
+ yum:
+ name: "@Custom Group"
+ state: absent
+ register: yum_result
+
+- name: verify changed
+ assert:
+ that:
+ - "not yum_result.changed"
+
+- name: verify yum module outputs
+ assert:
+ that:
+ - "'changed' in yum_result"
+ - "'msg' in yum_result"
+ - "'results' in yum_result"
+
+- name: check mode remove the group again
+ yum:
+ name: "@Custom Group"
+ state: absent
+ check_mode: yes
+ register: yum_result
+
+- name: verify changed
+ assert:
+ that:
+ - "not yum_result.changed"
+
+- name: verify yum module outputs
+ assert:
+ that:
+ - "'changed' in yum_result"
+ - "'results' in yum_result"
+
+- name: install a group and a package to test
+ yum:
+ name: "@Custom Group,sos"
+ state: present
+ register: yum_output
+
+- name: check mode remove the group along with the package
+ yum:
+ name: "@Custom Group,sos"
+ state: absent
+ register: yum_result
+ check_mode: yes
+
+- name: verify changed
+ assert:
+ that:
+ - "yum_result.changed"
+
+- name: verify yum module outputs
+ assert:
+ that:
+ - "'changed' in yum_result"
+ - "'results' in yum_result"
+
+- name: remove the group along with the package
+ yum:
+ name: "@Custom Group,sos"
+ state: absent
+ register: yum_result
+
+- name: verify changed
+ assert:
+ that:
+ - "yum_result.changed"
+
+- name: verify yum module outputs
+ assert:
+ that:
+ - "'changed' in yum_result"
+ - "'msg' in yum_result"
+ - "'results' in yum_result"
+
+- name: check mode remove the group along with the package
+ yum:
+ name: "@Custom Group,sos"
+ state: absent
+ register: yum_result
+ check_mode: yes
+
+- name: verify not changed
+ assert:
+ that:
+ - "not yum_result.changed"
+
+- name: verify yum module outputs
+ assert:
+ that:
+ - "'changed' in yum_result"
+ - "'results' in yum_result"
diff --git a/test/integration/targets/yum/tasks/yuminstallroot.yml b/test/integration/targets/yum/tasks/yuminstallroot.yml
new file mode 100644
index 00000000..f9bee6f9
--- /dev/null
+++ b/test/integration/targets/yum/tasks/yuminstallroot.yml
@@ -0,0 +1,122 @@
+# make a installroot
+- name: Create installroot
+ command: mktemp -d "{{ remote_tmp_dir }}/ansible.test.XXXXXX"
+ register: yumroot
+
+#- name: Populate directory
+# file:
+# path: "/{{ yumroot.stdout }}/etc/"
+# state: directory
+# mode: 0755
+#
+#- name: Populate directory2
+# copy:
+# content: "[main]\ndistropkgver={{ ansible_distribution_version }}\n"
+# dest: "/{{ yumroot.stdout }}/etc/yum.conf"
+
+- name: Make a necessary directory
+ file:
+ path: "{{ yumroot.stdout }}/etc/yum/vars/"
+ state: directory
+ mode: 0755
+
+- name: get yum releasever
+ command: "{{ ansible_python_interpreter }} -c 'import yum; yb = yum.YumBase(); print(yb.conf.yumvar[\"releasever\"])'"
+ register: releasever
+ ignore_errors: yes
+
+- name: Populate directory
+ copy:
+ content: "{{ releasever.stdout_lines[-1] }}\n"
+ dest: "/{{ yumroot.stdout }}/etc/yum/vars/releasever"
+ when: releasever is successful
+
+# This will drag in > 200 MB.
+- name: attempt installroot
+ yum: name=zlib installroot="{{ yumroot.stdout }}/" disable_gpg_check=yes
+ register: yum_result
+
+- name: check sos with rpm in installroot
+ shell: rpm -q zlib --root="{{ yumroot.stdout }}/"
+ failed_when: False
+ register: rpm_result
+
+- name: verify installation of sos
+ assert:
+ that:
+ - "yum_result.rc == 0"
+ - "yum_result.changed"
+ - "rpm_result.rc == 0"
+
+- name: verify yum module outputs
+ assert:
+ that:
+ - "'changed' in yum_result"
+ - "'msg' in yum_result"
+ - "'rc' in yum_result"
+ - "'results' in yum_result"
+
+- name: cleanup installroot
+ file:
+ path: "{{ yumroot.stdout }}/"
+ state: absent
+
+# Test for releasever working correctly
+#
+# Bugfix: https://github.com/ansible/ansible/issues/67050
+#
+# This test case is based on a reproducer originally reported on Reddit:
+# https://www.reddit.com/r/ansible/comments/g2ps32/ansible_yum_module_throws_up_an_error_when/
+#
+# NOTE: For the Ansible upstream CI we can only run this for RHEL7 because the
+# containerized runtimes in shippable don't allow the nested mounting of
+# buildah container volumes.
+- name: perform yuminstallroot in a buildah mount with releasever
+ when:
+ - ansible_facts["distribution_major_version"] == "7"
+ - ansible_facts["distribution"] == "RedHat"
+ block:
+ # Need to enable this RHUI repo for RHEL7 testing in AWS, CentOS has Extras
+ # enabled by default and this is not needed there.
+ - name: enable rhel-7-server-rhui-extras-rpms repo for RHEL7
+ command: yum-config-manager --enable rhel-7-server-rhui-extras-rpms
+ - name: update cache to pull repodata
+ yum:
+ update_cache: yes
+ - name: install required packages for buildah test
+ yum:
+ state: present
+ name:
+ - buildah
+ - name: create buildah container from scratch
+ command: "buildah --name yum_installroot_releasever_test from scratch"
+ - name: mount the buildah container
+ command: "buildah mount yum_installroot_releasever_test"
+ register: buildah_mount
+ - name: figure out yum value of $releasever
+ shell: python -c 'import yum; yb = yum.YumBase(); print(yb.conf.yumvar["releasever"])' | tail -1
+ register: buildah_host_releasever
+ - name: test yum install of python using releasever
+ yum:
+ name: 'python'
+ state: present
+ installroot: "{{ buildah_mount.stdout }}"
+ releasever: "{{ buildah_host_releasever.stdout }}"
+ register: yum_result
+ - name: verify installation of python
+ assert:
+ that:
+ - "yum_result.rc == 0"
+ - "yum_result.changed"
+ - "rpm_result.rc == 0"
+ always:
+ - name: remove buildah container
+ command: "buildah rm yum_installroot_releasever_test"
+ ignore_errors: yes
+ - name: remove buildah from CI system
+ yum:
+ state: absent
+ name:
+ - buildah
+ - name: disable rhel-7-server-rhui-extras-rpms repo for RHEL7
+ command: yum-config-manager --disable rhel-7-server-rhui-extras-rpms
diff --git a/test/integration/targets/yum_repository/aliases b/test/integration/targets/yum_repository/aliases
new file mode 100644
index 00000000..0b484bba
--- /dev/null
+++ b/test/integration/targets/yum_repository/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group1
+destructive
+skip/aix
diff --git a/test/integration/targets/yum_repository/tasks/main.yml b/test/integration/targets/yum_repository/tasks/main.yml
new file mode 100644
index 00000000..3884d46c
--- /dev/null
+++ b/test/integration/targets/yum_repository/tasks/main.yml
@@ -0,0 +1,218 @@
+- name: Run tests
+ when: ansible_facts.distribution in ['CentOS', 'Fedora']
+ block:
+ - name: Include distribution specific variables
+ include_vars: "{{ lookup('first_found', params) }}"
+ vars:
+ params:
+ files:
+ - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - "{{ ansible_facts.distribution }}.yml"
+ - default.yml
+ paths:
+ - vars
+
+ - name: ensure {{ yum_repository_test_package }} is uninstalled to begin with
+ action: "{{ ansible_facts.pkg_mgr }}"
+ args:
+ name: "{{ yum_repository_test_package }}"
+ state: absent
+
+ - name: disable {{ yum_repository_test_repo.name }}
+ yum_repository:
+ name: "{{ yum_repository_test_repo.name }}"
+ state: absent
+
+ - name: disable {{ yum_repository_test_repo.name }} (Idempotant)
+ yum_repository:
+ name: "{{ yum_repository_test_repo.name }}"
+ state: absent
+ register: test_repo_remove
+
+ - name: check return values
+ assert:
+ that:
+ - "test_repo_remove.repo == yum_repository_test_repo.name"
+ - "test_repo_remove.state == 'absent'"
+
+ - name: check Idempotant
+ assert:
+ that: not test_repo_remove.changed
+
+ - name: install {{ yum_repository_test_package }}, which should fail
+ action: "{{ ansible_facts.pkg_mgr }}"
+ args:
+ name: "{{ yum_repository_test_package }}"
+ state: present
+ ignore_errors: yes
+ register: test_package_result
+
+ - name: check that install failed
+ assert:
+ that:
+ - test_package_result.failed
+ - test_package_result.msg in expected_messages
+ vars:
+ expected_messages:
+ - No package matching '{{ yum_repository_test_package }}' found available, installed or updated
+ - Failed to install some of the specified packages
+
+ - name: re-add {{ yum_repository_test_repo.name }}
+ yum_repository:
+ name: "{{ yum_repository_test_repo.name }}"
+ description: "{{ yum_repository_test_repo.description }}"
+ baseurl: "{{ yum_repository_test_repo.baseurl }}"
+ gpgcheck: no
+ state: present
+ register: test_repo_add
+
+ - name: check return values
+ assert:
+ that:
+ - test_repo_add.repo == yum_repository_test_repo.name
+ - test_repo_add.state == 'present'
+
+ - name: get repolist
+ shell: yum repolist
+ register: repolist
+ until: repolist.rc == 0
+ retries: 5
+ args:
+ warn: no
+
+ - name: ensure {{ yum_repository_test_repo.name }} was added
+ assert:
+ that:
+ - yum_repository_test_repo.name in repolist.stdout
+ - test_repo_add.changed
+
+ - name: install {{ yum_repository_test_package }}
+ action: "{{ ansible_facts.pkg_mgr }}"
+ args:
+ name: "{{ yum_repository_test_package }}"
+ state: present
+ register: test_package_result
+
+ - name: check that {{ yum_repository_test_package }} was successfully installed
+ assert:
+ that:
+ - test_package_result.changed
+
+ - name: remove {{ yum_repository_test_package }}
+ action: "{{ ansible_facts.pkg_mgr }}"
+ args:
+ name: "{{ yum_repository_test_package }}"
+ state: absent
+
+ - name: change configuration of {{ yum_repository_test_repo.name }} repo
+ yum_repository:
+ name: "{{ yum_repository_test_repo.name }}"
+ baseurl: "{{ yum_repository_test_repo.baseurl }}"
+ description: New description
+ async: no
+ enablegroups: no
+ file: "{{ yum_repository_test_repo.name ~ 2 }}"
+ ip_resolve: 4
+ keepalive: no
+ register: test_repo_add1
+
+ - name: check that options are correctly getting written to the repo file
+ assert:
+ that:
+ - "'async = 0' in repo_file_contents"
+ - "'name = New description' in repo_file_contents"
+ - "'enablegroups = 0' in repo_file_contents"
+ - "'ip_resolve = 4' in repo_file_contents"
+ - "'keepalive = 0' in repo_file_contents"
+ vars:
+ repo_file: "{{ '/etc/yum.repos.d/' ~ yum_repository_test_repo.name ~ '2.repo' }}"
+ repo_file_contents: "{{ lookup('file', repo_file) }}"
+
+ - name: check new config doesn't change (Idempotant)
+ yum_repository:
+ name: "{{ yum_repository_test_repo.name }}"
+ baseurl: "{{ yum_repository_test_repo.baseurl }}"
+ description: New description
+ async: no
+ enablegroups: no
+ file: "{{ yum_repository_test_repo.name ~ 2 }}"
+ ip_resolve: 4
+ keepalive: no
+ register: test_repo_add2
+
+ - name: check Idempotant
+ assert:
+ that:
+ - test_repo_add1 is changed
+ - test_repo_add2 is not changed
+
+ - name: re-enable the {{ yum_repository_test_repo.name }} repo
+ yum_repository:
+ name: "{{ yum_repository_test_repo.name }}"
+ description: "{{ yum_repository_test_repo.description }}"
+ baseurl: "{{ yum_repository_test_repo.baseurl }}"
+ state: present
+
+ - name: re-enable the {{ yum_repository_test_repo.name }} repo (Idempotant)
+ yum_repository:
+ name: "{{ yum_repository_test_repo.name }}"
+ description: "{{ yum_repository_test_repo.description }}"
+ baseurl: "{{ yum_repository_test_repo.baseurl }}"
+ state: present
+ register: test_repo_add
+
+ - name: check Idempotant
+ assert:
+ that: test_repo_add is not changed
+
+ - name: Test list options
+ yum_repository:
+ name: listtest
+ description: Testing list feature
+ baseurl:
+ - "{{ yum_repository_test_repo.baseurl }}"
+ - "{{ yum_repository_test_repo.baseurl | replace('download[0-9]?\\.', 'download2\\.', 1) }}"
+ gpgkey:
+ - gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-{{ ansible_facts.distribution_major_version }}
+ - gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG2-KEY-EPEL-{{ ansible_facts.distribution_major_version }}
+ exclude:
+ - aaa
+ - bbb
+ includepkgs:
+ - ccc
+ - ddd
+
+ - name: Assert that lists were properly inserted
+ assert:
+ that:
+ - url_hostname in repofile
+ - url_hostname2 in repofile
+ - "'RPM-GPG-KEY-EPEL' in repofile"
+ - "'RPM-GPG2-KEY-EPEL' in repofile"
+ - "'aaa bbb' in repofile"
+ - "'ccc ddd' in repofile"
+ vars:
+ repofile: "{{ lookup('file', '/etc/yum.repos.d/listtest.repo') }}"
+ url_hostname: "{{ yum_repository_test_repo.baseurl | urlsplit('hostname') }}"
+ url_hostname2: "{{ url_hostname | replace('download[0-9]?\\.', 'download2\\.', 1) }}"
+
+ - name: CLEANUP | Remove list test repo
+ yum_repository:
+ name: listtest
+ state: absent
+
+ - name: CLEANUP | Remove {{ yum_repository_test_repo.name }}
+ yum_repository:
+ name: "{{ yum_repository_test_repo.name }}"
+ state: absent
+
+ - name: CLEANUP | Enable EPEL
+ yum_repository:
+ name: epel
+ state: present
+ description: "{{ yum_repository_epel.description | default(omit) }}"
+ metalink: "{{ yum_repository_epel.metalink | default(omit) }}"
+ mirrorlist: "{{ yum_repository_epel.mirrorlist | default(omit) }}"
+ gpgkey: "{{ yum_repository_epel.gpgkey }}"
+ gpgcheck: yes
+ when: ansible_facts.distribution == 'CentOS'
diff --git a/test/integration/targets/yum_repository/vars/CentOS-8.yml b/test/integration/targets/yum_repository/vars/CentOS-8.yml
new file mode 100644
index 00000000..22d4d13e
--- /dev/null
+++ b/test/integration/targets/yum_repository/vars/CentOS-8.yml
@@ -0,0 +1,10 @@
+yum_repository_test_package: sshpass
+yum_repository_test_repo:
+ name: epel
+ description: EPEL yum repo
+ baseurl: https://download.fedoraproject.org/pub/epel/$releasever/Everything/$basearch
+
+yum_repository_epel:
+ description: Extra Packages for Enterprise Linux $releasever - $basearch
+ metalink: https://mirrors.fedoraproject.org/metalink?repo=epel-$releasever&arch=$basearch&infra=$infra&content=$contentdir
+ gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-{{ ansible_facts.distribution_major_version }}
diff --git a/test/integration/targets/yum_repository/vars/CentOS.yml b/test/integration/targets/yum_repository/vars/CentOS.yml
new file mode 100644
index 00000000..db9947d5
--- /dev/null
+++ b/test/integration/targets/yum_repository/vars/CentOS.yml
@@ -0,0 +1,10 @@
+yum_repository_test_package: sl
+yum_repository_test_repo:
+ name: epel
+ description: EPEL yum repo
+ baseurl: https://archives.fedoraproject.org/pub/archive/epel/{{ ansible_facts.distribution_major_version }}/$basearch
+
+yum_repository_epel:
+ description: Extra Packages for Enterprise Linux {{ ansible_facts.distribution_major_version }} - $basearch
+ mirrorlist: https://mirrors.fedoraproject.org/metalink?repo=epel-{{ ansible_facts.distribution_major_version }}&arch=$basearch
+ gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-{{ ansible_facts.distribution_major_version }}
diff --git a/test/integration/targets/yum_repository/vars/Fedora.yml b/test/integration/targets/yum_repository/vars/Fedora.yml
new file mode 100644
index 00000000..8c37eaa1
--- /dev/null
+++ b/test/integration/targets/yum_repository/vars/Fedora.yml
@@ -0,0 +1,5 @@
+yum_repository_test_package: libbdplus
+yum_repository_test_repo:
+ name: rpmfusion-free
+ description: RPM Fusion for Fedora {{ ansible_distribution_major_version }} - Free
+ baseurl: http://download1.rpmfusion.org/free/fedora/releases/{{ ansible_distribution_major_version }}/Everything/{{ ansible_architecture }}/os/
diff --git a/test/integration/targets/yum_repository/vars/default.yml b/test/integration/targets/yum_repository/vars/default.yml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/integration/targets/yum_repository/vars/default.yml
diff --git a/test/lib/ansible_test/__init__.py b/test/lib/ansible_test/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/lib/ansible_test/__init__.py
diff --git a/test/lib/ansible_test/_data/ansible.cfg b/test/lib/ansible_test/_data/ansible.cfg
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/lib/ansible_test/_data/ansible.cfg
diff --git a/test/lib/ansible_test/_data/cli/ansible_test_cli_stub.py b/test/lib/ansible_test/_data/cli/ansible_test_cli_stub.py
new file mode 100755
index 00000000..d12b6334
--- /dev/null
+++ b/test/lib/ansible_test/_data/cli/ansible_test_cli_stub.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+# PYTHON_ARGCOMPLETE_OK
+"""Command line entry point for ansible-test."""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+
+def main():
+ """Main program entry point."""
+ ansible_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+ source_root = os.path.join(ansible_root, 'test', 'lib')
+
+ if os.path.exists(os.path.join(source_root, 'ansible_test', '_internal', 'cli.py')):
+ # running from source, use that version of ansible-test instead of any version that may already be installed
+ sys.path.insert(0, source_root)
+
+ # noinspection PyProtectedMember
+ from ansible_test._internal.cli import main as cli_main
+
+ cli_main()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/collection_detail.py b/test/lib/ansible_test/_data/collection_detail.py
new file mode 100644
index 00000000..e7c883ca
--- /dev/null
+++ b/test/lib/ansible_test/_data/collection_detail.py
@@ -0,0 +1,95 @@
+"""Retrieve collection detail."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import re
+import sys
+
+import yaml
+
+
+# See semantic versioning specification (https://semver.org/)
+NUMERIC_IDENTIFIER = r'(?:0|[1-9][0-9]*)'
+ALPHANUMERIC_IDENTIFIER = r'(?:[0-9]*[a-zA-Z-][a-zA-Z0-9-]*)'
+
+PRE_RELEASE_IDENTIFIER = r'(?:' + NUMERIC_IDENTIFIER + r'|' + ALPHANUMERIC_IDENTIFIER + r')'
+BUILD_IDENTIFIER = r'[a-zA-Z0-9-]+' # equivalent to r'(?:[0-9]+|' + ALPHANUMERIC_IDENTIFIER + r')'
+
+VERSION_CORE = NUMERIC_IDENTIFIER + r'\.' + NUMERIC_IDENTIFIER + r'\.' + NUMERIC_IDENTIFIER
+PRE_RELEASE = r'(?:-' + PRE_RELEASE_IDENTIFIER + r'(?:\.' + PRE_RELEASE_IDENTIFIER + r')*)?'
+BUILD = r'(?:\+' + BUILD_IDENTIFIER + r'(?:\.' + BUILD_IDENTIFIER + r')*)?'
+
+SEMVER_REGULAR_EXPRESSION = r'^' + VERSION_CORE + PRE_RELEASE + BUILD + r'$'
+
+
+def validate_version(version):
+ """Raise exception if the provided version is not None or a valid semantic version."""
+ if version is None:
+ return
+ if not re.match(SEMVER_REGULAR_EXPRESSION, version):
+ raise Exception('Invalid version number "{0}". Collection version numbers must '
+ 'follow semantic versioning (https://semver.org/).'.format(version))
+
+
+def read_manifest_json(collection_path):
+ """Return collection information from the MANIFEST.json file."""
+ manifest_path = os.path.join(collection_path, 'MANIFEST.json')
+
+ if not os.path.exists(manifest_path):
+ return None
+
+ try:
+ with open(manifest_path) as manifest_file:
+ manifest = json.load(manifest_file)
+
+ collection_info = manifest.get('collection_info') or dict()
+
+ result = dict(
+ version=collection_info.get('version'),
+ )
+ validate_version(result['version'])
+ except Exception as ex: # pylint: disable=broad-except
+ raise Exception('{0}: {1}'.format(os.path.basename(manifest_path), ex))
+
+ return result
+
+
+def read_galaxy_yml(collection_path):
+ """Return collection information from the galaxy.yml file."""
+ galaxy_path = os.path.join(collection_path, 'galaxy.yml')
+
+ if not os.path.exists(galaxy_path):
+ return None
+
+ try:
+ with open(galaxy_path) as galaxy_file:
+ galaxy = yaml.safe_load(galaxy_file)
+
+ result = dict(
+ version=galaxy.get('version'),
+ )
+ validate_version(result['version'])
+ except Exception as ex: # pylint: disable=broad-except
+ raise Exception('{0}: {1}'.format(os.path.basename(galaxy_path), ex))
+
+ return result
+
+
+def main():
+ """Retrieve collection detail."""
+ collection_path = sys.argv[1]
+
+ try:
+ result = read_manifest_json(collection_path) or read_galaxy_yml(collection_path) or dict()
+ except Exception as ex: # pylint: disable=broad-except
+ result = dict(
+ error='{0}'.format(ex),
+ )
+
+ print(json.dumps(result))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/completion/docker.txt b/test/lib/ansible_test/_data/completion/docker.txt
new file mode 100644
index 00000000..978ba703
--- /dev/null
+++ b/test/lib/ansible_test/_data/completion/docker.txt
@@ -0,0 +1,12 @@
+default name=quay.io/ansible/default-test-container:2.9.0 python=3.6,2.6,2.7,3.5,3.7,3.8,3.9 seccomp=unconfined context=collection
+default name=quay.io/ansible/ansible-base-test-container:1.7.0 python=3.6,2.6,2.7,3.5,3.7,3.8,3.9 seccomp=unconfined context=ansible-base
+centos6 name=quay.io/ansible/centos6-test-container:1.26.0 python=2.6 seccomp=unconfined
+centos7 name=quay.io/ansible/centos7-test-container:1.17.0 python=2.7 seccomp=unconfined
+centos8 name=quay.io/ansible/centos8-test-container:1.21.0 python=3.6 seccomp=unconfined
+fedora30 name=quay.io/ansible/fedora30-test-container:1.17.0 python=3.7
+fedora31 name=quay.io/ansible/fedora31-test-container:1.17.0 python=3.7
+fedora32 name=quay.io/ansible/fedora32-test-container:1.17.0 python=3.8
+opensuse15py2 name=quay.io/ansible/opensuse15py2-test-container:1.21.0 python=2.7
+opensuse15 name=quay.io/ansible/opensuse15-test-container:1.21.0 python=3.6
+ubuntu1604 name=quay.io/ansible/ubuntu1604-test-container:1.21.0 python=2.7 seccomp=unconfined
+ubuntu1804 name=quay.io/ansible/ubuntu1804-test-container:1.21.0 python=3.6 seccomp=unconfined
diff --git a/test/lib/ansible_test/_data/completion/network.txt b/test/lib/ansible_test/_data/completion/network.txt
new file mode 100644
index 00000000..dca911f8
--- /dev/null
+++ b/test/lib/ansible_test/_data/completion/network.txt
@@ -0,0 +1,2 @@
+ios/csr1000v collection=cisco.ios connection=ansible.netcommon.network_cli
+vyos/1.1.8 collection=vyos.vyos connection=ansible.netcommon.network_cli
diff --git a/test/lib/ansible_test/_data/completion/remote.txt b/test/lib/ansible_test/_data/completion/remote.txt
new file mode 100644
index 00000000..109a8088
--- /dev/null
+++ b/test/lib/ansible_test/_data/completion/remote.txt
@@ -0,0 +1,11 @@
+freebsd/11.1 python=2.7,3.6 python_dir=/usr/local/bin
+freebsd/12.1 python=3.6,2.7 python_dir=/usr/local/bin
+osx/10.11 python=2.7 python_dir=/usr/local/bin
+macos/10.15 python=3.8 python_dir=/usr/local/bin
+rhel/7.6 python=2.7
+rhel/7.8 python=2.7
+rhel/7.9 python=2.7
+rhel/8.1 python=3.6
+rhel/8.2 python=3.6
+aix/7.2 python=2.7 httptester=disabled temp-unicode=disabled pip-check=disabled
+power/centos/7 python=2.7
diff --git a/test/lib/ansible_test/_data/completion/windows.txt b/test/lib/ansible_test/_data/completion/windows.txt
new file mode 100644
index 00000000..a4f3bf58
--- /dev/null
+++ b/test/lib/ansible_test/_data/completion/windows.txt
@@ -0,0 +1,6 @@
+2008
+2008-R2
+2012
+2012-R2
+2016
+2019 \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/coveragerc b/test/lib/ansible_test/_data/coveragerc
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/lib/ansible_test/_data/coveragerc
diff --git a/test/lib/ansible_test/_data/cryptography-constraints.txt b/test/lib/ansible_test/_data/cryptography-constraints.txt
new file mode 100644
index 00000000..8e3e99b4
--- /dev/null
+++ b/test/lib/ansible_test/_data/cryptography-constraints.txt
@@ -0,0 +1,3 @@
+# do not add a cryptography constraint here, see the get_cryptography_requirement function in executor.py for details
+idna < 2.8 ; python_version < '2.7' # idna 2.8+ requires python 2.7+
+cffi != 1.14.4 # Fails on systems with older gcc. Should be fixed in the next release. https://foss.heptapod.net/pypy/cffi/-/issues/480
diff --git a/test/lib/ansible_test/_data/injector/ansible b/test/lib/ansible_test/_data/injector/ansible
new file mode 120000
index 00000000..6bbbfe4d
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/ansible
@@ -0,0 +1 @@
+python.py \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/injector/ansible-config b/test/lib/ansible_test/_data/injector/ansible-config
new file mode 120000
index 00000000..6bbbfe4d
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/ansible-config
@@ -0,0 +1 @@
+python.py \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/injector/ansible-connection b/test/lib/ansible_test/_data/injector/ansible-connection
new file mode 120000
index 00000000..6bbbfe4d
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/ansible-connection
@@ -0,0 +1 @@
+python.py \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/injector/ansible-console b/test/lib/ansible_test/_data/injector/ansible-console
new file mode 120000
index 00000000..6bbbfe4d
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/ansible-console
@@ -0,0 +1 @@
+python.py \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/injector/ansible-doc b/test/lib/ansible_test/_data/injector/ansible-doc
new file mode 120000
index 00000000..6bbbfe4d
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/ansible-doc
@@ -0,0 +1 @@
+python.py \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/injector/ansible-galaxy b/test/lib/ansible_test/_data/injector/ansible-galaxy
new file mode 120000
index 00000000..6bbbfe4d
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/ansible-galaxy
@@ -0,0 +1 @@
+python.py \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/injector/ansible-inventory b/test/lib/ansible_test/_data/injector/ansible-inventory
new file mode 120000
index 00000000..6bbbfe4d
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/ansible-inventory
@@ -0,0 +1 @@
+python.py \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/injector/ansible-playbook b/test/lib/ansible_test/_data/injector/ansible-playbook
new file mode 120000
index 00000000..6bbbfe4d
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/ansible-playbook
@@ -0,0 +1 @@
+python.py \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/injector/ansible-pull b/test/lib/ansible_test/_data/injector/ansible-pull
new file mode 120000
index 00000000..6bbbfe4d
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/ansible-pull
@@ -0,0 +1 @@
+python.py \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/injector/ansible-test b/test/lib/ansible_test/_data/injector/ansible-test
new file mode 120000
index 00000000..6bbbfe4d
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/ansible-test
@@ -0,0 +1 @@
+python.py \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/injector/ansible-vault b/test/lib/ansible_test/_data/injector/ansible-vault
new file mode 120000
index 00000000..6bbbfe4d
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/ansible-vault
@@ -0,0 +1 @@
+python.py \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/injector/importer.py b/test/lib/ansible_test/_data/injector/importer.py
new file mode 120000
index 00000000..6bbbfe4d
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/importer.py
@@ -0,0 +1 @@
+python.py \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/injector/pytest b/test/lib/ansible_test/_data/injector/pytest
new file mode 120000
index 00000000..6bbbfe4d
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/pytest
@@ -0,0 +1 @@
+python.py \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/injector/python.py b/test/lib/ansible_test/_data/injector/python.py
new file mode 100755
index 00000000..290b995c
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/python.py
@@ -0,0 +1,80 @@
+#!/usr/bin/env python
+"""Provides an entry point for python scripts and python modules on the controller with the current python interpreter and optional code coverage collection."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+
+def main():
+ """Main entry point."""
+ name = os.path.basename(__file__)
+ args = [sys.executable]
+
+ coverage_config = os.environ.get('COVERAGE_CONF')
+ coverage_output = os.environ.get('COVERAGE_FILE')
+
+ if coverage_config:
+ if coverage_output:
+ args += ['-m', 'coverage.__main__', 'run', '--rcfile', coverage_config]
+ else:
+ if sys.version_info >= (3, 4):
+ # noinspection PyUnresolvedReferences
+ import importlib.util
+
+ # noinspection PyUnresolvedReferences
+ found = bool(importlib.util.find_spec('coverage'))
+ else:
+ # noinspection PyDeprecation
+ import imp
+
+ try:
+ # noinspection PyDeprecation
+ imp.find_module('coverage')
+ found = True
+ except ImportError:
+ found = False
+
+ if not found:
+ sys.exit('ERROR: Could not find `coverage` module. '
+ 'Did you use a virtualenv created without --system-site-packages or with the wrong interpreter?')
+
+ if name == 'python.py':
+ if sys.argv[1] == '-c':
+ # prevent simple misuse of python.py with -c which does not work with coverage
+ sys.exit('ERROR: Use `python -c` instead of `python.py -c` to avoid errors when code coverage is collected.')
+ elif name == 'pytest':
+ args += ['-m', 'pytest']
+ else:
+ args += [find_executable(name)]
+
+ args += sys.argv[1:]
+
+ os.execv(args[0], args)
+
+
+def find_executable(name):
+ """
+ :type name: str
+ :rtype: str
+ """
+ path = os.environ.get('PATH', os.path.defpath)
+ seen = set([os.path.abspath(__file__)])
+
+ for base in path.split(os.path.pathsep):
+ candidate = os.path.abspath(os.path.join(base, name))
+
+ if candidate in seen:
+ continue
+
+ seen.add(candidate)
+
+ if os.path.exists(candidate) and os.access(candidate, os.F_OK | os.X_OK):
+ return candidate
+
+ raise Exception('Executable "%s" not found in path: %s' % (name, path))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/injector/virtualenv-isolated.sh b/test/lib/ansible_test/_data/injector/virtualenv-isolated.sh
new file mode 100644
index 00000000..82f79980
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/virtualenv-isolated.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+# Create and activate a fresh virtual environment with `source virtualenv-isolated.sh`.
+
+rm -rf "${OUTPUT_DIR}/venv"
+"${ANSIBLE_TEST_PYTHON_INTERPRETER}" -m virtualenv --python "${ANSIBLE_TEST_PYTHON_INTERPRETER}" "${OUTPUT_DIR}/venv"
+set +ux
+source "${OUTPUT_DIR}/venv/bin/activate"
+set -ux
+
+if [[ "${ANSIBLE_TEST_COVERAGE}" ]]; then
+ pip install coverage -c ../../../runner/requirements/constraints.txt --disable-pip-version-check
+fi
diff --git a/test/lib/ansible_test/_data/injector/virtualenv.sh b/test/lib/ansible_test/_data/injector/virtualenv.sh
new file mode 100644
index 00000000..ccde2974
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/virtualenv.sh
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+# Create and activate a fresh virtual environment with `source virtualenv.sh`.
+
+rm -rf "${OUTPUT_DIR}/venv"
+"${ANSIBLE_TEST_PYTHON_INTERPRETER}" -m virtualenv --system-site-packages --python "${ANSIBLE_TEST_PYTHON_INTERPRETER}" "${OUTPUT_DIR}/venv"
+set +ux
+source "${OUTPUT_DIR}/venv/bin/activate"
+set -ux
diff --git a/test/lib/ansible_test/_data/inventory b/test/lib/ansible_test/_data/inventory
new file mode 100644
index 00000000..1b77a7ea
--- /dev/null
+++ b/test/lib/ansible_test/_data/inventory
@@ -0,0 +1,6 @@
+# Do not put test specific entries in this inventory file.
+# For script based test targets (using runme.sh) put the inventory file in the test's directory instead.
+
+[testgroup]
+# ansible_python_interpreter must be set to avoid interpreter discovery
+testhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/test/lib/ansible_test/_data/playbooks/windows_coverage_setup.yml b/test/lib/ansible_test/_data/playbooks/windows_coverage_setup.yml
new file mode 100644
index 00000000..2e5ff9c6
--- /dev/null
+++ b/test/lib/ansible_test/_data/playbooks/windows_coverage_setup.yml
@@ -0,0 +1,19 @@
+---
+- name: setup global coverage directory for Windows test targets
+ hosts: windows
+ gather_facts: no
+ tasks:
+ - name: create temp directory
+ ansible.windows.win_file:
+ path: '{{ remote_temp_path }}'
+ state: directory
+
+ - name: allow everyone to write to coverage test dir
+ ansible.windows.win_acl:
+ path: '{{ remote_temp_path }}'
+ user: Everyone
+ rights: Modify
+ inherit: ContainerInherit, ObjectInherit
+ propagation: 'None'
+ type: allow
+ state: present \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/playbooks/windows_coverage_teardown.yml b/test/lib/ansible_test/_data/playbooks/windows_coverage_teardown.yml
new file mode 100644
index 00000000..ab34dc27
--- /dev/null
+++ b/test/lib/ansible_test/_data/playbooks/windows_coverage_teardown.yml
@@ -0,0 +1,77 @@
+---
+- name: collect the coverage files from the Windows host
+ hosts: windows
+ gather_facts: no
+ tasks:
+ - name: make sure all vars have been set
+ assert:
+ that:
+ - local_temp_path is defined
+ - remote_temp_path is defined
+
+ - name: zip up all coverage files in the
+ ansible.windows.win_shell: |
+ $coverage_dir = '{{ remote_temp_path }}'
+ $zip_file = Join-Path -Path $coverage_dir -ChildPath 'coverage.zip'
+ if (Test-Path -LiteralPath $zip_file) {
+ Remove-Item -LiteralPath $zip_file -Force
+ }
+
+ $coverage_files = Get-ChildItem -LiteralPath $coverage_dir -Include '*=coverage*' -File
+
+ $legacy = $false
+ try {
+ # Requires .NET 4.5+ which isn't present on older WIndows versions. Remove once 2008/R2 is EOL.
+ # We also can't use the Shell.Application as it will fail on GUI-less servers (Server Core).
+ Add-Type -AssemblyName System.IO.Compression -ErrorAction Stop > $null
+ } catch {
+ $legacy = $true
+ }
+
+ if ($legacy) {
+ New-Item -Path $zip_file -ItemType File > $null
+ $shell = New-Object -ComObject Shell.Application
+ $zip = $shell.Namespace($zip_file)
+ foreach ($file in $coverage_files) {
+ $zip.CopyHere($file.FullName)
+ }
+ } else {
+ $fs = New-Object -TypeName System.IO.FileStream -ArgumentList $zip_file, 'CreateNew'
+ try {
+ $archive = New-Object -TypeName System.IO.Compression.ZipArchive -ArgumentList @(
+ $fs,
+ [System.IO.Compression.ZipArchiveMode]::Create
+ )
+ try {
+ foreach ($file in $coverage_files) {
+ $archive_entry = $archive.CreateEntry($file.Name, 'Optimal')
+ $entry_fs = $archive_entry.Open()
+ try {
+ $file_fs = [System.IO.File]::OpenRead($file.FullName)
+ try {
+ $file_fs.CopyTo($entry_fs)
+ } finally {
+ $file_fs.Dispose()
+ }
+ } finally {
+ $entry_fs.Dispose()
+ }
+ }
+ } finally {
+ $archive.Dispose()
+ }
+ } finally {
+ $fs.Dispose()
+ }
+ }
+
+ - name: fetch coverage zip file to localhost
+ fetch:
+ src: '{{ remote_temp_path }}\coverage.zip'
+ dest: '{{ local_temp_path }}/coverage-{{ inventory_hostname }}.zip'
+ flat: yes
+
+ - name: remove the temporary coverage directory
+ ansible.windows.win_file:
+ path: '{{ remote_temp_path }}'
+ state: absent \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/pytest.ini b/test/lib/ansible_test/_data/pytest.ini
new file mode 100644
index 00000000..2ac56423
--- /dev/null
+++ b/test/lib/ansible_test/_data/pytest.ini
@@ -0,0 +1,9 @@
+[pytest]
+xfail_strict = true
+mock_use_standalone_module = true
+# It was decided to stick with "legacy" (aka "xunit1") for now.
+# Currently used pytest versions all support xunit2 format too.
+# Except the one used under Python 2.6 — it doesn't process this option
+# at all. Ref:
+# https://github.com/ansible/ansible/pull/66445#discussion_r372530176
+junit_family = xunit1
diff --git a/test/lib/ansible_test/_data/pytest/plugins/ansible_pytest_collections.py b/test/lib/ansible_test/_data/pytest/plugins/ansible_pytest_collections.py
new file mode 100644
index 00000000..67c69f15
--- /dev/null
+++ b/test/lib/ansible_test/_data/pytest/plugins/ansible_pytest_collections.py
@@ -0,0 +1,67 @@
+"""Enable unit testing of Ansible collections. PYTEST_DONT_REWRITE"""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+# set by ansible-test to a single directory, rather than a list of directories as supported by Ansible itself
+ANSIBLE_COLLECTIONS_PATH = os.path.join(os.environ['ANSIBLE_COLLECTIONS_PATH'], 'ansible_collections')
+
+
+# this monkeypatch to _pytest.pathlib.resolve_package_path fixes PEP420 resolution for collections in pytest >= 6.0.0
+# NB: this code should never run under py2
+def collection_resolve_package_path(path):
+ """Configure the Python package path so that pytest can find our collections."""
+ for parent in path.parents:
+ if str(parent) == ANSIBLE_COLLECTIONS_PATH:
+ return parent
+
+ raise Exception('File "%s" not found in collection path "%s".' % (path, ANSIBLE_COLLECTIONS_PATH))
+
+
+# this monkeypatch to py.path.local.LocalPath.pypkgpath fixes PEP420 resolution for collections in pytest < 6.0.0
+def collection_pypkgpath(self):
+ """Configure the Python package path so that pytest can find our collections."""
+ for parent in self.parts(reverse=True):
+ if str(parent) == ANSIBLE_COLLECTIONS_PATH:
+ return parent
+
+ raise Exception('File "%s" not found in collection path "%s".' % (self.strpath, ANSIBLE_COLLECTIONS_PATH))
+
+
+def pytest_configure():
+ """Configure this pytest plugin."""
+ try:
+ if pytest_configure.executed:
+ return
+ except AttributeError:
+ pytest_configure.executed = True
+
+ # noinspection PyProtectedMember
+ from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder
+
+ # allow unit tests to import code from collections
+
+ # noinspection PyProtectedMember
+ _AnsibleCollectionFinder(paths=[os.path.dirname(ANSIBLE_COLLECTIONS_PATH)])._install() # pylint: disable=protected-access
+
+ try:
+ # noinspection PyProtectedMember
+ from _pytest import pathlib as _pytest_pathlib
+ except ImportError:
+ _pytest_pathlib = None
+
+ if hasattr(_pytest_pathlib, 'resolve_package_path'):
+ _pytest_pathlib.resolve_package_path = collection_resolve_package_path
+ else:
+ # looks like pytest <= 6.0.0, use the old hack against py.path
+ # noinspection PyProtectedMember
+ import py._path.local
+
+ # force collections unit tests to be loaded with the ansible_collections namespace
+ # original idea from https://stackoverflow.com/questions/50174130/how-do-i-pytest-a-project-using-pep-420-namespace-packages/50175552#50175552
+ # noinspection PyProtectedMember
+ py._path.local.LocalPath.pypkgpath = collection_pypkgpath # pylint: disable=protected-access
+
+
+pytest_configure()
diff --git a/test/lib/ansible_test/_data/pytest/plugins/ansible_pytest_coverage.py b/test/lib/ansible_test/_data/pytest/plugins/ansible_pytest_coverage.py
new file mode 100644
index 00000000..b05298ab
--- /dev/null
+++ b/test/lib/ansible_test/_data/pytest/plugins/ansible_pytest_coverage.py
@@ -0,0 +1,68 @@
+"""Monkey patch os._exit when running under coverage so we don't lose coverage data in forks, such as with `pytest --boxed`. PYTEST_DONT_REWRITE"""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def pytest_configure():
+ """Configure this pytest plugin."""
+ try:
+ if pytest_configure.executed:
+ return
+ except AttributeError:
+ pytest_configure.executed = True
+
+ try:
+ import coverage
+ except ImportError:
+ coverage = None
+
+ try:
+ coverage.Coverage
+ except AttributeError:
+ coverage = None
+
+ if not coverage:
+ return
+
+ import gc
+ import os
+
+ coverage_instances = []
+
+ for obj in gc.get_objects():
+ if isinstance(obj, coverage.Coverage):
+ coverage_instances.append(obj)
+
+ if not coverage_instances:
+ coverage_config = os.environ.get('COVERAGE_CONF')
+
+ if not coverage_config:
+ return
+
+ coverage_output = os.environ.get('COVERAGE_FILE')
+
+ if not coverage_output:
+ return
+
+ cov = coverage.Coverage(config_file=coverage_config)
+ coverage_instances.append(cov)
+ else:
+ cov = None
+
+ # noinspection PyProtectedMember
+ os_exit = os._exit # pylint: disable=protected-access
+
+ def coverage_exit(*args, **kwargs):
+ for instance in coverage_instances:
+ instance.stop()
+ instance.save()
+
+ os_exit(*args, **kwargs)
+
+ os._exit = coverage_exit # pylint: disable=protected-access
+
+ if cov:
+ cov.start()
+
+
+pytest_configure()
diff --git a/test/lib/ansible_test/_data/quiet_pip.py b/test/lib/ansible_test/_data/quiet_pip.py
new file mode 100644
index 00000000..7d2a6d16
--- /dev/null
+++ b/test/lib/ansible_test/_data/quiet_pip.py
@@ -0,0 +1,70 @@
+"""Custom entry-point for pip that filters out unwanted logging and warnings."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import logging
+import re
+import runpy
+import warnings
+
+BUILTIN_FILTERER_FILTER = logging.Filterer.filter
+
+LOGGING_MESSAGE_FILTER = re.compile("^("
+ ".*Running pip install with root privileges is generally not a good idea.*|" # custom Fedora patch [1]
+ "DEPRECATION: Python 2.7 will reach the end of its life .*|" # pip 19.2.3
+ "Ignoring .*: markers .* don't match your environment|"
+ "Requirement already satisfied.*"
+ ")$")
+
+# [1] https://src.fedoraproject.org/rpms/python-pip/blob/master/f/emit-a-warning-when-running-with-root-privileges.patch
+
+WARNING_MESSAGE_FILTERS = (
+ # DEPRECATION: Python 2.6 is no longer supported by the Python core team, please upgrade your Python.
+ # A future version of pip will drop support for Python 2.6
+ 'Python 2.6 is no longer supported by the Python core team, ',
+
+ # {path}/python2.6/lib/python2.6/site-packages/pip/_vendor/urllib3/util/ssl_.py:137: InsecurePlatformWarning:
+ # A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail.
+ # You can upgrade to a newer version of Python to solve this.
+ # For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
+ 'A true SSLContext object is not available. ',
+
+ # {path}/python2.6/lib/python2.6/site-packages/pip/_vendor/urllib3/util/ssl_.py:339: SNIMissingWarning:
+ # An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform.
+ # This may cause the server to present an incorrect TLS certificate, which can cause validation failures.
+ # You can upgrade to a newer version of Python to solve this.
+ # For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
+ 'An HTTPS request has been made, but the SNI ',
+
+ # DEPRECATION: Python 2.7 reached the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 is no longer maintained.
+ # pip 21.0 will drop support for Python 2.7 in January 2021.
+ # More details about Python 2 support in pip, can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support
+ 'DEPRECATION: Python 2.7 reached the end of its life ',
+)
+
+
+def custom_filterer_filter(self, record):
+ """Globally omit logging of unwanted messages."""
+ if LOGGING_MESSAGE_FILTER.search(record.getMessage()):
+ return 0
+
+ return BUILTIN_FILTERER_FILTER(self, record)
+
+
+def main():
+ """Main program entry point."""
+ # Filtering logging output globally avoids having to intercept stdout/stderr.
+ # It also avoids problems with loss of color output and mixing up the order of stdout/stderr messages.
+ logging.Filterer.filter = custom_filterer_filter
+
+ for message_filter in WARNING_MESSAGE_FILTERS:
+ # Setting filterwarnings in code is necessary because of the following:
+ # Python 2.6 does not support the PYTHONWARNINGS environment variable. It does support the -W option.
+ # Python 2.7 cannot use the -W option to match warning text after a colon. This makes it impossible to match specific warning messages.
+ warnings.filterwarnings('ignore', message_filter)
+
+ runpy.run_module('pip.__main__', run_name='__main__', alter_sys=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/requirements/ansible-test.txt b/test/lib/ansible_test/_data/requirements/ansible-test.txt
new file mode 100644
index 00000000..7b596e1b
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/ansible-test.txt
@@ -0,0 +1,6 @@
+argparse ; python_version < '2.7'
+
+# pip 7.1 added support for constraints, which are required by ansible-test to install most python requirements
+# see https://github.com/pypa/pip/blame/e648e00dc0226ade30ade99591b245b0c98e86c9/NEWS.rst#L1258
+pip >= 7.1, < 10 ; python_version < '2.7' # pip 10+ drops support for python 2.6 (sanity_ok)
+pip >= 7.1 ; python_version >= '2.7' # sanity_ok
diff --git a/test/lib/ansible_test/_data/requirements/constraints.txt b/test/lib/ansible_test/_data/requirements/constraints.txt
new file mode 100644
index 00000000..f613ef0e
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/constraints.txt
@@ -0,0 +1,64 @@
+coverage >= 4.5.1, < 5.0.0 ; python_version < '3.7' # coverage 4.4 required for "disable_warnings" support but 4.5.1 needed for bug fixes, coverage 5.0+ incompatible
+coverage >= 4.5.2, < 5.0.0 ; python_version == '3.7' # coverage 4.5.2 fixes bugs in support for python 3.7, coverage 5.0+ incompatible
+coverage >= 4.5.4, < 5.0.0 ; python_version > '3.7' # coverage had a bug in < 4.5.4 that would cause unit tests to hang in Python 3.8, coverage 5.0+ incompatible
+cryptography < 2.2 ; python_version < '2.7' # cryptography 2.2 drops support for python 2.6
+# do not add a cryptography constraint here unless it is for python version incompatibility, see the get_cryptography_requirement function in executor.py for details
+deepdiff < 4.0.0 ; python_version < '3' # deepdiff 4.0.0 and later require python 3
+jinja2 < 2.11 ; python_version < '2.7' # jinja2 2.11 and later require python 2.7 or later
+urllib3 < 1.24 ; python_version < '2.7' # urllib3 1.24 and later require python 2.7 or later
+pywinrm >= 0.3.0 # message encryption support
+sphinx < 1.6 ; python_version < '2.7' # sphinx 1.6 and later require python 2.7 or later
+sphinx < 1.8 ; python_version >= '2.7' # sphinx 1.8 and later are currently incompatible with rstcheck 3.3
+pygments >= 2.4.0 # Pygments 2.4.0 includes bugfixes for YAML and YAML+Jinja lexers
+wheel < 0.30.0 ; python_version < '2.7' # wheel 0.30.0 and later require python 2.7 or later
+yamllint != 1.8.0, < 1.14.0 ; python_version < '2.7' # yamllint 1.8.0 and 1.14.0+ require python 2.7+
+pycrypto >= 2.6 # Need features found in 2.6 and greater
+ncclient >= 0.5.2 # Need features added in 0.5.2 and greater
+idna < 2.6, >= 2.5 # linode requires idna < 2.9, >= 2.5, requests requires idna < 2.6, but cryptography will cause the latest version to be installed instead
+paramiko < 2.4.0 ; python_version < '2.7' # paramiko 2.4.0 drops support for python 2.6
+pytest < 3.3.0 ; python_version < '2.7' # pytest 3.3.0 drops support for python 2.6
+pytest < 5.0.0 ; python_version == '2.7' # pytest 5.0.0 and later will no longer support python 2.7
+pytest-forked < 1.0.2 ; python_version < '2.7' # pytest-forked 1.0.2 and later require python 2.7 or later
+pytest-forked >= 1.0.2 ; python_version >= '2.7' # pytest-forked before 1.0.2 does not work with pytest 4.2.0+ (which requires python 2.7+)
+ntlm-auth >= 1.3.0 # message encryption support using cryptography
+requests < 2.20.0 ; python_version < '2.7' # requests 2.20.0 drops support for python 2.6
+requests-ntlm >= 1.1.0 # message encryption support
+requests-credssp >= 0.1.0 # message encryption support
+voluptuous >= 0.11.0 # Schema recursion via Self
+openshift >= 0.6.2, < 0.9.0 # merge_type support
+virtualenv < 16.0.0 ; python_version < '2.7' # virtualenv 16.0.0 and later require python 2.7 or later
+pathspec < 0.6.0 ; python_version < '2.7' # pathspec 0.6.0 and later require python 2.7 or later
+pyopenssl < 18.0.0 ; python_version < '2.7' # pyOpenSSL 18.0.0 and later require python 2.7 or later
+pyparsing < 3.0.0 ; python_version < '3.5' # pyparsing 3 and later require python 3.5 or later
+pyfmg == 0.6.1 # newer versions do not pass current unit tests
+pyyaml < 5.1 ; python_version < '2.7' # pyyaml 5.1 and later require python 2.7 or later
+pycparser < 2.19 ; python_version < '2.7' # pycparser 2.19 and later require python 2.7 or later
+mock >= 2.0.0 # needed for features backported from Python 3.6 unittest.mock (assert_called, assert_called_once...)
+pytest-mock >= 1.4.0 # needed for mock_use_standalone_module pytest option
+xmltodict < 0.12.0 ; python_version < '2.7' # xmltodict 0.12.0 and later require python 2.7 or later
+lxml < 4.3.0 ; python_version < '2.7' # lxml 4.3.0 and later require python 2.7 or later
+pyvmomi < 6.0.0 ; python_version < '2.7' # pyvmomi 6.0.0 and later require python 2.7 or later
+pyone == 1.1.9 # newer versions do not pass current integration tests
+boto3 < 1.11 ; python_version < '2.7' # boto3 1.11 drops Python 2.6 support
+botocore >= 1.10.0, < 1.14 ; python_version < '2.7' # adds support for the following AWS services: secretsmanager, fms, and acm-pca; botocore 1.14 drops Python 2.6 support
+botocore >= 1.10.0 ; python_version >= '2.7' # adds support for the following AWS services: secretsmanager, fms, and acm-pca
+setuptools < 37 ; python_version == '2.6' # setuptools 37 and later require python 2.7 or later
+setuptools < 45 ; python_version == '2.7' # setuptools 45 and later require python 3.5 or later
+
+# freeze antsibull-changelog for consistent test results
+antsibull-changelog == 0.7.0
+
+# Make sure we have a new enough antsibull for the CLI args we use
+antsibull >= 0.21.0
+
+# freeze pylint and its requirements for consistent test results
+astroid == 2.2.5
+isort == 4.3.15
+lazy-object-proxy == 1.3.1
+mccabe == 0.6.1
+pylint == 2.3.1
+typed-ast == 1.4.0 # 1.4.0 is required to compile on Python 3.8
+wrapt == 1.11.1
+
+# freeze pycodestyle for consistent test results
+pycodestyle == 2.6.0
diff --git a/test/lib/ansible_test/_data/requirements/coverage.txt b/test/lib/ansible_test/_data/requirements/coverage.txt
new file mode 100644
index 00000000..4ebc8aea
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/coverage.txt
@@ -0,0 +1 @@
+coverage
diff --git a/test/lib/ansible_test/_data/requirements/integration.cloud.aws.txt b/test/lib/ansible_test/_data/requirements/integration.cloud.aws.txt
new file mode 100644
index 00000000..aa2f71cc
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/integration.cloud.aws.txt
@@ -0,0 +1,3 @@
+boto
+boto3
+botocore
diff --git a/test/lib/ansible_test/_data/requirements/integration.cloud.azure.txt b/test/lib/ansible_test/_data/requirements/integration.cloud.azure.txt
new file mode 100644
index 00000000..6df1a4e8
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/integration.cloud.azure.txt
@@ -0,0 +1,39 @@
+packaging
+requests[security]
+xmltodict
+azure-cli-core==2.0.35
+azure-cli-nspkg==3.0.2
+azure-common==1.1.11
+azure-mgmt-authorization==0.51.1
+azure-mgmt-batch==5.0.1
+azure-mgmt-cdn==3.0.0
+azure-mgmt-compute==10.0.0
+azure-mgmt-containerinstance==1.4.0
+azure-mgmt-containerregistry==2.0.0
+azure-mgmt-containerservice==4.4.0
+azure-mgmt-dns==2.1.0
+azure-mgmt-keyvault==1.1.0
+azure-mgmt-marketplaceordering==0.1.0
+azure-mgmt-monitor==0.5.2
+azure-mgmt-network==4.0.0
+azure-mgmt-nspkg==2.0.0
+azure-mgmt-redis==5.0.0
+azure-mgmt-resource==2.1.0
+azure-mgmt-rdbms==1.4.1
+azure-mgmt-servicebus==0.5.3
+azure-mgmt-sql==0.10.0
+azure-mgmt-storage==3.1.0
+azure-mgmt-trafficmanager==0.50.0
+azure-mgmt-web==0.41.0
+azure-nspkg==2.0.0
+azure-storage==0.35.1
+msrest==0.6.10
+msrestazure==0.6.2
+azure-keyvault==1.0.0a1
+azure-graphrbac==0.40.0
+azure-mgmt-cosmosdb==0.5.2
+azure-mgmt-hdinsight==0.1.0
+azure-mgmt-devtestlabs==3.0.0
+azure-mgmt-loganalytics==0.2.0
+azure-mgmt-automation==0.1.1
+azure-mgmt-iothub==0.7.0
diff --git a/test/lib/ansible_test/_data/requirements/integration.cloud.cs.txt b/test/lib/ansible_test/_data/requirements/integration.cloud.cs.txt
new file mode 100644
index 00000000..f0a89b91
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/integration.cloud.cs.txt
@@ -0,0 +1,2 @@
+cs
+sshpubkeys
diff --git a/test/lib/ansible_test/_data/requirements/integration.cloud.hcloud.txt b/test/lib/ansible_test/_data/requirements/integration.cloud.hcloud.txt
new file mode 100644
index 00000000..a6580e69
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/integration.cloud.hcloud.txt
@@ -0,0 +1 @@
+hcloud>=1.6.0 ; python_version >= '2.7' and python_version < '3.9' # Python 2.6 is not supported (sanity_ok); Only hcloud >= 1.6.0 supports Floating IPs with names; Python 3.9 and later are not supported
diff --git a/test/lib/ansible_test/_data/requirements/integration.cloud.nios.txt b/test/lib/ansible_test/_data/requirements/integration.cloud.nios.txt
new file mode 100644
index 00000000..be611454
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/integration.cloud.nios.txt
@@ -0,0 +1 @@
+infoblox-client
diff --git a/test/lib/ansible_test/_data/requirements/integration.cloud.opennebula.txt b/test/lib/ansible_test/_data/requirements/integration.cloud.opennebula.txt
new file mode 100644
index 00000000..acd34668
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/integration.cloud.opennebula.txt
@@ -0,0 +1 @@
+pyone \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/requirements/integration.cloud.openshift.txt b/test/lib/ansible_test/_data/requirements/integration.cloud.openshift.txt
new file mode 100644
index 00000000..269bf090
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/integration.cloud.openshift.txt
@@ -0,0 +1 @@
+openshift
diff --git a/test/lib/ansible_test/_data/requirements/integration.cloud.vcenter.txt b/test/lib/ansible_test/_data/requirements/integration.cloud.vcenter.txt
new file mode 100644
index 00000000..fd8f1398
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/integration.cloud.vcenter.txt
@@ -0,0 +1,2 @@
+pyvmomi
+git+https://github.com/vmware/vsphere-automation-sdk-python.git ; python_version >= '2.7' # Python 2.6 is not supported
diff --git a/test/lib/ansible_test/_data/requirements/integration.txt b/test/lib/ansible_test/_data/requirements/integration.txt
new file mode 100644
index 00000000..2c562615
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/integration.txt
@@ -0,0 +1,6 @@
+cryptography
+jinja2
+junit-xml
+ordereddict ; python_version < '2.7'
+packaging
+pyyaml
diff --git a/test/lib/ansible_test/_data/requirements/network-integration.txt b/test/lib/ansible_test/_data/requirements/network-integration.txt
new file mode 100644
index 00000000..726d2943
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/network-integration.txt
@@ -0,0 +1,7 @@
+cryptography
+jinja2
+junit-xml
+ordereddict ; python_version < '2.7' # ansible-test junit callback plugin requirement
+packaging
+paramiko
+pyyaml
diff --git a/test/lib/ansible_test/_data/requirements/sanity.ansible-doc.txt b/test/lib/ansible_test/_data/requirements/sanity.ansible-doc.txt
new file mode 100644
index 00000000..abd6c5fd
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.ansible-doc.txt
@@ -0,0 +1,2 @@
+jinja2 # ansible-base requirement
+pyyaml # ansible-base requirement
diff --git a/test/lib/ansible_test/_data/requirements/sanity.changelog.txt b/test/lib/ansible_test/_data/requirements/sanity.changelog.txt
new file mode 100644
index 00000000..8a98acc9
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.changelog.txt
@@ -0,0 +1,2 @@
+# changelog build requires python 3.6+
+antsibull-changelog ; python_version >= '3.6'
diff --git a/test/lib/ansible_test/_data/requirements/sanity.import.txt b/test/lib/ansible_test/_data/requirements/sanity.import.txt
new file mode 100644
index 00000000..17e375ce
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.import.txt
@@ -0,0 +1,2 @@
+pyyaml # required for the collection loader to parse yaml for plugin routing
+virtualenv ; python_version <= '2.7' # virtualenv required on Python 2.x, but on Python 3.x we can use the built-in venv instead
diff --git a/test/lib/ansible_test/_data/requirements/sanity.integration-aliases.txt b/test/lib/ansible_test/_data/requirements/sanity.integration-aliases.txt
new file mode 100644
index 00000000..c3726e8b
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.integration-aliases.txt
@@ -0,0 +1 @@
+pyyaml
diff --git a/test/lib/ansible_test/_data/requirements/sanity.pep8.txt b/test/lib/ansible_test/_data/requirements/sanity.pep8.txt
new file mode 100644
index 00000000..282a93fb
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.pep8.txt
@@ -0,0 +1 @@
+pycodestyle
diff --git a/test/lib/ansible_test/_data/requirements/sanity.ps1 b/test/lib/ansible_test/_data/requirements/sanity.ps1
new file mode 100755
index 00000000..1ea1f8e5
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.ps1
@@ -0,0 +1,45 @@
+#!/usr/bin/env pwsh
+param (
+ [Switch]
+ $IsContainer
+)
+
+#Requires -Version 6
+
+Set-StrictMode -Version 2.0
+$ErrorActionPreference = "Stop"
+$ProgressPreference = 'SilentlyContinue'
+
+Function Install-PSModule {
+ [CmdletBinding()]
+ param(
+ [Parameter(Mandatory=$true)]
+ [String]
+ $Name,
+
+ [Parameter(Mandatory=$true)]
+ [Version]
+ $RequiredVersion
+ )
+
+ # In case PSGallery is down we check if the module is already installed.
+ $installedModule = Get-Module -Name $Name -ListAvailable | Where-Object Version -eq $RequiredVersion
+ if (-not $installedModule) {
+ Install-Module -Name $Name -RequiredVersion $RequiredVersion -Scope CurrentUser
+ }
+}
+
+Set-PSRepository -Name PSGallery -InstallationPolicy Trusted
+Install-PSModule -Name PSScriptAnalyzer -RequiredVersion 1.18.0
+
+if ($IsContainer) {
+ # PSScriptAnalyzer contain lots of json files for the UseCompatibleCommands check. We don't use this rule so by
+ # removing the contents we can save 200MB in the docker image (or more in the future).
+ # https://github.com/PowerShell/PSScriptAnalyzer/blob/master/RuleDocumentation/UseCompatibleCommands.md
+ $pssaPath = (Get-Module -ListAvailable -Name PSScriptAnalyzer).ModuleBase
+ $compatPath = Join-Path -Path $pssaPath -ChildPath compatibility_profiles -AdditionalChildPath '*'
+ Remove-Item -Path $compatPath -Recurse -Force
+}
+
+# Installed the PSCustomUseLiteralPath rule
+Install-PSModule -Name PSSA-PSCustomUseLiteralPath -RequiredVersion 0.1.1
diff --git a/test/lib/ansible_test/_data/requirements/sanity.pylint.txt b/test/lib/ansible_test/_data/requirements/sanity.pylint.txt
new file mode 100644
index 00000000..1b800bd0
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.pylint.txt
@@ -0,0 +1,3 @@
+pylint ; python_version < '3.9' # installation fails on python 3.9.0b1
+pyyaml # needed for collection_detail.py
+mccabe # pylint complexity testing
diff --git a/test/lib/ansible_test/_data/requirements/sanity.rstcheck.txt b/test/lib/ansible_test/_data/requirements/sanity.rstcheck.txt
new file mode 100644
index 00000000..3a5eeed1
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.rstcheck.txt
@@ -0,0 +1 @@
+rstcheck
diff --git a/test/lib/ansible_test/_data/requirements/sanity.runtime-metadata.txt b/test/lib/ansible_test/_data/requirements/sanity.runtime-metadata.txt
new file mode 100644
index 00000000..edd96991
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.runtime-metadata.txt
@@ -0,0 +1,2 @@
+pyyaml
+voluptuous
diff --git a/test/lib/ansible_test/_data/requirements/sanity.validate-modules.txt b/test/lib/ansible_test/_data/requirements/sanity.validate-modules.txt
new file mode 100644
index 00000000..5c0fca78
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.validate-modules.txt
@@ -0,0 +1,3 @@
+jinja2 # ansible-base requirement
+pyyaml # needed for collection_detail.py
+voluptuous
diff --git a/test/lib/ansible_test/_data/requirements/sanity.yamllint.txt b/test/lib/ansible_test/_data/requirements/sanity.yamllint.txt
new file mode 100644
index 00000000..b2c729ca
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.yamllint.txt
@@ -0,0 +1 @@
+yamllint
diff --git a/test/lib/ansible_test/_data/requirements/units.txt b/test/lib/ansible_test/_data/requirements/units.txt
new file mode 100644
index 00000000..307d7c35
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/units.txt
@@ -0,0 +1,7 @@
+cryptography
+jinja2
+mock
+pytest
+pytest-mock
+pytest-xdist
+pyyaml
diff --git a/test/lib/ansible_test/_data/requirements/windows-integration.txt b/test/lib/ansible_test/_data/requirements/windows-integration.txt
new file mode 100644
index 00000000..86de35ee
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/windows-integration.txt
@@ -0,0 +1,11 @@
+cryptography
+jinja2
+junit-xml
+ntlm-auth
+ordereddict ; python_version < '2.7' # ansible-test junit callback plugin requirement
+requests-ntlm
+requests-credssp
+packaging
+pypsrp
+pywinrm[credssp]
+pyyaml
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/action-plugin-docs.json b/test/lib/ansible_test/_data/sanity/code-smell/action-plugin-docs.json
new file mode 100644
index 00000000..12bbe0d1
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/action-plugin-docs.json
@@ -0,0 +1,13 @@
+{
+ "all_targets": true,
+ "prefixes": [
+ "lib/ansible/modules/",
+ "lib/ansible/plugins/action/",
+ "plugins/modules/",
+ "plugins/action/"
+ ],
+ "extensions": [
+ ".py"
+ ],
+ "output": "path-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/action-plugin-docs.py b/test/lib/ansible_test/_data/sanity/code-smell/action-plugin-docs.py
new file mode 100755
index 00000000..65142e00
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/action-plugin-docs.py
@@ -0,0 +1,68 @@
+#!/usr/bin/env python
+"""Test to verify action plugins have an associated module to provide documentation."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+
+def main():
+ """Main entry point."""
+ paths = sys.argv[1:] or sys.stdin.read().splitlines()
+
+ module_names = set()
+
+ module_prefixes = {
+ 'lib/ansible/modules/': True,
+ 'plugins/modules/': False,
+ }
+
+ action_prefixes = {
+ 'lib/ansible/plugins/action/': True,
+ 'plugins/action/': False,
+ }
+
+ for path in paths:
+ full_name = get_full_name(path, module_prefixes)
+
+ if full_name:
+ module_names.add(full_name)
+
+ for path in paths:
+ full_name = get_full_name(path, action_prefixes)
+
+ if full_name and full_name not in module_names:
+ print('%s: action plugin has no matching module to provide documentation' % path)
+
+
+def get_full_name(path, prefixes):
+ """Return the full name of the plugin at the given path by matching against the given path prefixes, or None if no match is found."""
+ for prefix, flat in prefixes.items():
+ if path.startswith(prefix):
+ relative_path = os.path.relpath(path, prefix)
+
+ if flat:
+ full_name = os.path.basename(relative_path)
+ else:
+ full_name = relative_path
+
+ full_name = os.path.splitext(full_name)[0]
+
+ name = os.path.basename(full_name)
+
+ if name == '__init__':
+ return None
+
+ if name.startswith('_'):
+ name = name[1:]
+
+ full_name = os.path.join(os.path.dirname(full_name), name).replace(os.path.sep, '.')
+
+ return full_name
+
+ return None
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/changelog.json b/test/lib/ansible_test/_data/sanity/code-smell/changelog.json
new file mode 100644
index 00000000..87f223b1
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/changelog.json
@@ -0,0 +1,9 @@
+{
+ "intercept": true,
+ "minimum_python_version": "3.6",
+ "prefixes": [
+ "changelogs/config.yaml",
+ "changelogs/fragments/"
+ ],
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/changelog.py b/test/lib/ansible_test/_data/sanity/code-smell/changelog.py
new file mode 100755
index 00000000..710b10f6
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/changelog.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+import subprocess
+
+
+def main():
+ paths = sys.argv[1:] or sys.stdin.read().splitlines()
+
+ allowed_extensions = ('.yml', '.yaml')
+ config_path = 'changelogs/config.yaml'
+
+ # config must be detected independent of the file list since the file list only contains files under test (changed)
+ has_config = os.path.exists(config_path)
+ paths_to_check = []
+ for path in paths:
+ if path == config_path:
+ continue
+
+ if path.startswith('changelogs/fragments/.'):
+ if path in ('changelogs/fragments/.keep', 'changelogs/fragments/.gitkeep'):
+ continue
+
+ print('%s:%d:%d: file must not be a dotfile' % (path, 0, 0))
+ continue
+
+ ext = os.path.splitext(path)[1]
+
+ if ext not in allowed_extensions:
+ print('%s:%d:%d: extension must be one of: %s' % (path, 0, 0, ', '.join(allowed_extensions)))
+
+ paths_to_check.append(path)
+
+ if not has_config:
+ print('changelogs/config.yaml:0:0: config file does not exist')
+ return
+
+ if not paths_to_check:
+ return
+
+ cmd = [sys.executable, '-m', 'antsibull_changelog', 'lint'] + paths_to_check
+ subprocess.call(cmd) # ignore the return code, rely on the output instead
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/empty-init.json b/test/lib/ansible_test/_data/sanity/code-smell/empty-init.json
new file mode 100644
index 00000000..9835f9b6
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/empty-init.json
@@ -0,0 +1,14 @@
+{
+ "prefixes": [
+ "lib/ansible/modules/",
+ "lib/ansible/module_utils/",
+ "plugins/modules/",
+ "plugins/module_utils/",
+ "test/units/",
+ "tests/unit/"
+ ],
+ "files": [
+ "__init__.py"
+ ],
+ "output": "path-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/empty-init.py b/test/lib/ansible_test/_data/sanity/code-smell/empty-init.py
new file mode 100755
index 00000000..8bcd7f9e
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/empty-init.py
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ if os.path.getsize(path) > 0:
+ print('%s: empty __init__.py required' % path)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/future-import-boilerplate.json b/test/lib/ansible_test/_data/sanity/code-smell/future-import-boilerplate.json
new file mode 100644
index 00000000..6f1edb78
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/future-import-boilerplate.json
@@ -0,0 +1,6 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "output": "path-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/future-import-boilerplate.py b/test/lib/ansible_test/_data/sanity/code-smell/future-import-boilerplate.py
new file mode 100755
index 00000000..81081eed
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/future-import-boilerplate.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ast
+import sys
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'rb') as path_fd:
+ lines = path_fd.read().splitlines()
+
+ missing = True
+ if not lines:
+ # Files are allowed to be empty of everything including boilerplate
+ missing = False
+
+ for text in lines:
+ if text in (b'from __future__ import (absolute_import, division, print_function)',
+ b'from __future__ import absolute_import, division, print_function'):
+ missing = False
+ break
+
+ if missing:
+ with open(path) as file:
+ contents = file.read()
+
+ # noinspection PyBroadException
+ try:
+ node = ast.parse(contents)
+
+ # files consisting of only assignments have no need for future import boilerplate
+ # the only exception would be division during assignment, but we'll overlook that for simplicity
+ # the most likely case is that of a documentation only python file
+ if all(isinstance(statement, ast.Assign) for statement in node.body):
+ missing = False
+ except Exception: # pylint: disable=broad-except
+ pass # the compile sanity test will report this error
+
+ if missing:
+ print('%s: missing: from __future__ import (absolute_import, division, print_function)' % path)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/line-endings.json b/test/lib/ansible_test/_data/sanity/code-smell/line-endings.json
new file mode 100644
index 00000000..db5c3c98
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/line-endings.json
@@ -0,0 +1,4 @@
+{
+ "text": true,
+ "output": "path-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/line-endings.py b/test/lib/ansible_test/_data/sanity/code-smell/line-endings.py
new file mode 100755
index 00000000..1e4212d1
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/line-endings.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'rb') as path_fd:
+ contents = path_fd.read()
+
+ if b'\r' in contents:
+ print('%s: use "\\n" for line endings instead of "\\r\\n"' % path)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/metaclass-boilerplate.json b/test/lib/ansible_test/_data/sanity/code-smell/metaclass-boilerplate.json
new file mode 100644
index 00000000..6f1edb78
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/metaclass-boilerplate.json
@@ -0,0 +1,6 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "output": "path-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/metaclass-boilerplate.py b/test/lib/ansible_test/_data/sanity/code-smell/metaclass-boilerplate.py
new file mode 100755
index 00000000..28d06f36
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/metaclass-boilerplate.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ast
+import sys
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'rb') as path_fd:
+ lines = path_fd.read().splitlines()
+
+ missing = True
+ if not lines:
+ # Files are allowed to be empty of everything including boilerplate
+ missing = False
+
+ for text in lines:
+ if text == b'__metaclass__ = type':
+ missing = False
+ break
+
+ if missing:
+ with open(path) as file:
+ contents = file.read()
+
+ # noinspection PyBroadException
+ try:
+ node = ast.parse(contents)
+
+ # files consisting of only assignments have no need for metaclass boilerplate
+ # the most likely case is that of a documentation only python file
+ if all(isinstance(statement, ast.Assign) for statement in node.body):
+ missing = False
+ except Exception: # pylint: disable=broad-except
+ pass # the compile sanity test will report this error
+
+ if missing:
+ print('%s: missing: __metaclass__ = type' % path)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-assert.json b/test/lib/ansible_test/_data/sanity/code-smell/no-assert.json
new file mode 100644
index 00000000..ccee80a2
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-assert.json
@@ -0,0 +1,10 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "prefixes": [
+ "lib/ansible/",
+ "plugins/"
+ ],
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-assert.py b/test/lib/ansible_test/_data/sanity/code-smell/no-assert.py
new file mode 100755
index 00000000..78561d96
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-assert.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import sys
+
+ASSERT_RE = re.compile(r'^\s*assert[^a-z0-9_:]')
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r') as f:
+ for i, line in enumerate(f.readlines()):
+ matches = ASSERT_RE.findall(line)
+
+ if matches:
+ lineno = i + 1
+ colno = line.index('assert') + 1
+ print('%s:%d:%d: raise AssertionError instead of: %s' % (path, lineno, colno, matches[0][colno - 1:]))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-basestring.json b/test/lib/ansible_test/_data/sanity/code-smell/no-basestring.json
new file mode 100644
index 00000000..88858aeb
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-basestring.json
@@ -0,0 +1,7 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "ignore_self": true,
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-basestring.py b/test/lib/ansible_test/_data/sanity/code-smell/no-basestring.py
new file mode 100755
index 00000000..a35650ef
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-basestring.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import sys
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ match = re.search(r'(isinstance.*basestring)', text)
+
+ if match:
+ print('%s:%d:%d: do not use `isinstance(s, basestring)`' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-dict-iteritems.json b/test/lib/ansible_test/_data/sanity/code-smell/no-dict-iteritems.json
new file mode 100644
index 00000000..88858aeb
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-dict-iteritems.json
@@ -0,0 +1,7 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "ignore_self": true,
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-dict-iteritems.py b/test/lib/ansible_test/_data/sanity/code-smell/no-dict-iteritems.py
new file mode 100755
index 00000000..e28b24f4
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-dict-iteritems.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import sys
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ match = re.search(r'(?<! six)\.(iteritems)', text)
+
+ if match:
+ print('%s:%d:%d: use `dict.items` or `ansible.module_utils.six.iteritems` instead of `dict.iteritems`' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-dict-iterkeys.json b/test/lib/ansible_test/_data/sanity/code-smell/no-dict-iterkeys.json
new file mode 100644
index 00000000..88858aeb
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-dict-iterkeys.json
@@ -0,0 +1,7 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "ignore_self": true,
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-dict-iterkeys.py b/test/lib/ansible_test/_data/sanity/code-smell/no-dict-iterkeys.py
new file mode 100755
index 00000000..237ee5b1
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-dict-iterkeys.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import sys
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ match = re.search(r'\.(iterkeys)', text)
+
+ if match:
+ print('%s:%d:%d: use `dict.keys` or `for key in dict:` instead of `dict.iterkeys`' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-dict-itervalues.json b/test/lib/ansible_test/_data/sanity/code-smell/no-dict-itervalues.json
new file mode 100644
index 00000000..88858aeb
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-dict-itervalues.json
@@ -0,0 +1,7 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "ignore_self": true,
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-dict-itervalues.py b/test/lib/ansible_test/_data/sanity/code-smell/no-dict-itervalues.py
new file mode 100755
index 00000000..4bf92ea9
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-dict-itervalues.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import sys
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ match = re.search(r'(?<! six)\.(itervalues)', text)
+
+ if match:
+ print('%s:%d:%d: use `dict.values` or `ansible.module_utils.six.itervalues` instead of `dict.itervalues`' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-get-exception.json b/test/lib/ansible_test/_data/sanity/code-smell/no-get-exception.json
new file mode 100644
index 00000000..88858aeb
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-get-exception.json
@@ -0,0 +1,7 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "ignore_self": true,
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-get-exception.py b/test/lib/ansible_test/_data/sanity/code-smell/no-get-exception.py
new file mode 100755
index 00000000..c925f5b7
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-get-exception.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import sys
+
+
+def main():
+ basic_allow_once = True
+
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ match = re.search(r'([^a-zA-Z0-9_]get_exception[^a-zA-Z0-9_])', text)
+
+ if match:
+ if path == 'lib/ansible/module_utils/basic.py' and basic_allow_once:
+ # basic.py is allowed to import get_exception for backwards compatibility but should not call it anywhere
+ basic_allow_once = False
+ continue
+
+ print('%s:%d:%d: do not use `get_exception`' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-illegal-filenames.json b/test/lib/ansible_test/_data/sanity/code-smell/no-illegal-filenames.json
new file mode 100644
index 00000000..6f13c86b
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-illegal-filenames.json
@@ -0,0 +1,5 @@
+{
+ "include_directories": true,
+ "include_symlinks": true,
+ "output": "path-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-illegal-filenames.py b/test/lib/ansible_test/_data/sanity/code-smell/no-illegal-filenames.py
new file mode 100755
index 00000000..99432ea1
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-illegal-filenames.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python
+
+# a script to check for illegal filenames on various Operating Systems. The
+# main rules are derived from restrictions on Windows
+# https://msdn.microsoft.com/en-us/library/aa365247#naming_conventions
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import struct
+import sys
+
+from ansible.module_utils.basic import to_bytes
+
+ILLEGAL_CHARS = [
+ b'<',
+ b'>',
+ b':',
+ b'"',
+ b'/',
+ b'\\',
+ b'|',
+ b'?',
+ b'*'
+] + [struct.pack("b", i) for i in range(32)]
+
+ILLEGAL_NAMES = [
+ "CON",
+ "PRN",
+ "AUX",
+ "NUL",
+ "COM1",
+ "COM2",
+ "COM3",
+ "COM4",
+ "COM5",
+ "COM6",
+ "COM7",
+ "COM8",
+ "COM9",
+ "LPT1",
+ "LPT2",
+ "LPT3",
+ "LPT4",
+ "LPT5",
+ "LPT6",
+ "LPT7",
+ "LPT8",
+ "LPT9",
+]
+
+ILLEGAL_END_CHARS = [
+ '.',
+ ' ',
+]
+
+
+def check_path(path, is_dir=False):
+ type_name = 'directory' if is_dir else 'file'
+ file_name = os.path.basename(path.rstrip(os.path.sep))
+ name = os.path.splitext(file_name)[0]
+
+ if name.upper() in ILLEGAL_NAMES:
+ print("%s: illegal %s name %s" % (path, type_name, name.upper()))
+
+ if file_name[-1] in ILLEGAL_END_CHARS:
+ print("%s: illegal %s name end-char '%s'" % (path, type_name, file_name[-1]))
+
+ bfile = to_bytes(file_name, encoding='utf-8')
+ for char in ILLEGAL_CHARS:
+ if char in bfile:
+ bpath = to_bytes(path, encoding='utf-8')
+ print("%s: illegal char '%s' in %s name" % (bpath, char, type_name))
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ check_path(path, is_dir=path.endswith(os.path.sep))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-main-display.json b/test/lib/ansible_test/_data/sanity/code-smell/no-main-display.json
new file mode 100644
index 00000000..ccee80a2
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-main-display.json
@@ -0,0 +1,10 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "prefixes": [
+ "lib/ansible/",
+ "plugins/"
+ ],
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-main-display.py b/test/lib/ansible_test/_data/sanity/code-smell/no-main-display.py
new file mode 100755
index 00000000..74a36ecc
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-main-display.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+
+MAIN_DISPLAY_IMPORT = 'from __main__ import display'
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r') as f:
+ for i, line in enumerate(f.readlines()):
+ if MAIN_DISPLAY_IMPORT in line:
+ lineno = i + 1
+ colno = line.index(MAIN_DISPLAY_IMPORT) + 1
+ print('%s:%d:%d: Display is a singleton, just import and instantiate' % (path, lineno, colno))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-smart-quotes.json b/test/lib/ansible_test/_data/sanity/code-smell/no-smart-quotes.json
new file mode 100644
index 00000000..54d9fff5
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-smart-quotes.json
@@ -0,0 +1,5 @@
+{
+ "text": true,
+ "ignore_self": true,
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-smart-quotes.py b/test/lib/ansible_test/_data/sanity/code-smell/no-smart-quotes.py
new file mode 100755
index 00000000..e44005a5
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-smart-quotes.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import sys
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'rb') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ try:
+ text = text.decode('utf-8')
+ except UnicodeDecodeError as ex:
+ print('%s:%d:%d: UnicodeDecodeError: %s' % (path, line + 1, ex.start + 1, ex))
+ continue
+
+ match = re.search(u'([‘’“â€])', text)
+
+ if match:
+ print('%s:%d:%d: use ASCII quotes `\'` and `"` instead of Unicode quotes' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-unicode-literals.json b/test/lib/ansible_test/_data/sanity/code-smell/no-unicode-literals.json
new file mode 100644
index 00000000..88858aeb
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-unicode-literals.json
@@ -0,0 +1,7 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "ignore_self": true,
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-unicode-literals.py b/test/lib/ansible_test/_data/sanity/code-smell/no-unicode-literals.py
new file mode 100755
index 00000000..e2201ab1
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-unicode-literals.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import sys
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ match = re.search(r'(unicode_literals)', text)
+
+ if match:
+ print('%s:%d:%d: do not use `unicode_literals`' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/replace-urlopen.json b/test/lib/ansible_test/_data/sanity/code-smell/replace-urlopen.json
new file mode 100644
index 00000000..88858aeb
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/replace-urlopen.json
@@ -0,0 +1,7 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "ignore_self": true,
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/replace-urlopen.py b/test/lib/ansible_test/_data/sanity/code-smell/replace-urlopen.py
new file mode 100755
index 00000000..b2de1ba8
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/replace-urlopen.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import sys
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ match = re.search(r'^(?:[^#]*?)(urlopen)', text)
+
+ if match:
+ print('%s:%d:%d: use `ansible.module_utils.urls.open_url` instead of `urlopen`' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/runtime-metadata.json b/test/lib/ansible_test/_data/sanity/code-smell/runtime-metadata.json
new file mode 100644
index 00000000..44003ec0
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/runtime-metadata.json
@@ -0,0 +1,11 @@
+{
+ "prefixes": [
+ "lib/ansible/config/ansible_builtin_runtime.yml",
+ "meta/routing.yml",
+ "meta/runtime.yml"
+ ],
+ "extensions": [
+ ".yml"
+ ],
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/runtime-metadata.py b/test/lib/ansible_test/_data/sanity/code-smell/runtime-metadata.py
new file mode 100755
index 00000000..b986db2b
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/runtime-metadata.py
@@ -0,0 +1,150 @@
+#!/usr/bin/env python
+"""Schema validation of ansible-base's ansible_builtin_runtime.yml and collection's meta/runtime.yml"""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import datetime
+import os
+import re
+import sys
+import yaml
+
+from voluptuous import Any, MultipleInvalid, PREVENT_EXTRA
+from voluptuous import Required, Schema, Invalid
+from voluptuous.humanize import humanize_error
+
+from ansible.module_utils.six import string_types
+
+
+def isodate(value):
+ """Validate a datetime.date or ISO 8601 date string."""
+ # datetime.date objects come from YAML dates, these are ok
+ if isinstance(value, datetime.date):
+ return value
+ # make sure we have a string
+ msg = 'Expected ISO 8601 date string (YYYY-MM-DD), or YAML date'
+ if not isinstance(value, string_types):
+ raise Invalid(msg)
+ try:
+ datetime.datetime.strptime(value, '%Y-%m-%d').date()
+ except ValueError:
+ raise Invalid(msg)
+ return value
+
+
+def validate_metadata_file(path):
+ """Validate explicit runtime metadata file"""
+ try:
+ with open(path, 'r') as f_path:
+ routing = yaml.safe_load(f_path)
+ except yaml.error.MarkedYAMLError as ex:
+ print('%s:%d:%d: YAML load failed: %s' % (path, ex.context_mark.line +
+ 1, ex.context_mark.column + 1, re.sub(r'\s+', ' ', str(ex))))
+ return
+ except Exception as ex: # pylint: disable=broad-except
+ print('%s:%d:%d: YAML load failed: %s' %
+ (path, 0, 0, re.sub(r'\s+', ' ', str(ex))))
+ return
+
+ # Updates to schema MUST also be reflected in the documentation
+ # ~https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html
+
+ # plugin_routing schema
+
+ deprecation_tombstoning_schema = Any(Schema(
+ {
+ Required('removal_date'): Any(isodate),
+ 'warning_text': Any(*string_types),
+ },
+ extra=PREVENT_EXTRA
+ ), Schema(
+ {
+ Required('removal_version'): Any(*string_types),
+ 'warning_text': Any(*string_types),
+ },
+ extra=PREVENT_EXTRA
+ ))
+
+ plugin_routing_schema = Any(
+ Schema({
+ ('deprecation'): Any(deprecation_tombstoning_schema),
+ ('tombstone'): Any(deprecation_tombstoning_schema),
+ ('redirect'): Any(*string_types),
+ }, extra=PREVENT_EXTRA),
+ )
+
+ list_dict_plugin_routing_schema = [{str_type: plugin_routing_schema}
+ for str_type in string_types]
+
+ plugin_schema = Schema({
+ ('action'): Any(None, *list_dict_plugin_routing_schema),
+ ('become'): Any(None, *list_dict_plugin_routing_schema),
+ ('cache'): Any(None, *list_dict_plugin_routing_schema),
+ ('callback'): Any(None, *list_dict_plugin_routing_schema),
+ ('cliconf'): Any(None, *list_dict_plugin_routing_schema),
+ ('connection'): Any(None, *list_dict_plugin_routing_schema),
+ ('doc_fragments'): Any(None, *list_dict_plugin_routing_schema),
+ ('filter'): Any(None, *list_dict_plugin_routing_schema),
+ ('httpapi'): Any(None, *list_dict_plugin_routing_schema),
+ ('inventory'): Any(None, *list_dict_plugin_routing_schema),
+ ('lookup'): Any(None, *list_dict_plugin_routing_schema),
+ ('module_utils'): Any(None, *list_dict_plugin_routing_schema),
+ ('modules'): Any(None, *list_dict_plugin_routing_schema),
+ ('netconf'): Any(None, *list_dict_plugin_routing_schema),
+ ('shell'): Any(None, *list_dict_plugin_routing_schema),
+ ('strategy'): Any(None, *list_dict_plugin_routing_schema),
+ ('terminal'): Any(None, *list_dict_plugin_routing_schema),
+ ('test'): Any(None, *list_dict_plugin_routing_schema),
+ ('vars'): Any(None, *list_dict_plugin_routing_schema),
+ }, extra=PREVENT_EXTRA)
+
+ # import_redirection schema
+
+ import_redirection_schema = Any(
+ Schema({
+ ('redirect'): Any(*string_types),
+ # import_redirect doesn't currently support deprecation
+ }, extra=PREVENT_EXTRA)
+ )
+
+ list_dict_import_redirection_schema = [{str_type: import_redirection_schema}
+ for str_type in string_types]
+
+ # top level schema
+
+ schema = Schema({
+ # All of these are optional
+ ('plugin_routing'): Any(plugin_schema),
+ ('import_redirection'): Any(None, *list_dict_import_redirection_schema),
+ # requires_ansible: In the future we should validate this with SpecifierSet
+ ('requires_ansible'): Any(*string_types),
+ ('action_groups'): dict,
+ }, extra=PREVENT_EXTRA)
+
+ # Ensure schema is valid
+
+ try:
+ schema(routing)
+ except MultipleInvalid as ex:
+ for error in ex.errors:
+ # No way to get line/column numbers
+ print('%s:%d:%d: %s' % (path, 0, 0, humanize_error(routing, error)))
+
+
+def main():
+ """Validate runtime metadata"""
+ paths = sys.argv[1:] or sys.stdin.read().splitlines()
+
+ collection_legacy_file = 'meta/routing.yml'
+ collection_runtime_file = 'meta/runtime.yml'
+
+ for path in paths:
+ if path == collection_legacy_file:
+ print('%s:%d:%d: %s' % (path, 0, 0, ("Should be called '%s'" % collection_runtime_file)))
+ continue
+
+ validate_metadata_file(path)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/shebang.json b/test/lib/ansible_test/_data/sanity/code-smell/shebang.json
new file mode 100644
index 00000000..5648429e
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/shebang.json
@@ -0,0 +1,4 @@
+{
+ "text": true,
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/shebang.py b/test/lib/ansible_test/_data/sanity/code-smell/shebang.py
new file mode 100755
index 00000000..7cf3cf72
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/shebang.py
@@ -0,0 +1,120 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+import stat
+import sys
+
+
+def main():
+ standard_shebangs = set([
+ b'#!/bin/bash -eu',
+ b'#!/bin/bash -eux',
+ b'#!/bin/sh',
+ b'#!/usr/bin/env bash',
+ b'#!/usr/bin/env fish',
+ b'#!/usr/bin/env pwsh',
+ b'#!/usr/bin/env python',
+ b'#!/usr/bin/make -f',
+ ])
+
+ integration_shebangs = set([
+ b'#!/bin/sh',
+ b'#!/usr/bin/env bash',
+ b'#!/usr/bin/env python',
+ ])
+
+ module_shebangs = {
+ '': b'#!/usr/bin/python',
+ '.py': b'#!/usr/bin/python',
+ '.ps1': b'#!powershell',
+ }
+
+ # see https://unicode.org/faq/utf_bom.html#bom1
+ byte_order_marks = (
+ (b'\x00\x00\xFE\xFF', 'UTF-32 (BE)'),
+ (b'\xFF\xFE\x00\x00', 'UTF-32 (LE)'),
+ (b'\xFE\xFF', 'UTF-16 (BE)'),
+ (b'\xFF\xFE', 'UTF-16 (LE)'),
+ (b'\xEF\xBB\xBF', 'UTF-8'),
+ )
+
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'rb') as path_fd:
+ shebang = path_fd.readline().strip()
+ mode = os.stat(path).st_mode
+ executable = (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) & mode
+
+ if not shebang or not shebang.startswith(b'#!'):
+ if executable:
+ print('%s:%d:%d: file without shebang should not be executable' % (path, 0, 0))
+
+ for mark, name in byte_order_marks:
+ if shebang.startswith(mark):
+ print('%s:%d:%d: file starts with a %s byte order mark' % (path, 0, 0, name))
+ break
+
+ continue
+
+ is_module = False
+ is_integration = False
+
+ dirname = os.path.dirname(path)
+
+ if path.startswith('lib/ansible/modules/'):
+ is_module = True
+ elif re.search('^test/support/[^/]+/plugins/modules/', path):
+ is_module = True
+ elif re.search('^test/support/[^/]+/collections/ansible_collections/[^/]+/[^/]+/plugins/modules/', path):
+ is_module = True
+ elif path.startswith('test/lib/ansible_test/_data/'):
+ pass
+ elif path.startswith('lib/') or path.startswith('test/lib/'):
+ if executable:
+ print('%s:%d:%d: should not be executable' % (path, 0, 0))
+
+ if shebang:
+ print('%s:%d:%d: should not have a shebang' % (path, 0, 0))
+
+ continue
+ elif path.startswith('test/integration/targets/') or path.startswith('tests/integration/targets/'):
+ is_integration = True
+
+ if dirname.endswith('/library') or '/plugins/modules' in dirname or dirname in (
+ # non-standard module library directories
+ 'test/integration/targets/module_precedence/lib_no_extension',
+ 'test/integration/targets/module_precedence/lib_with_extension',
+ ):
+ is_module = True
+ elif path.startswith('plugins/modules/'):
+ is_module = True
+
+ if is_module:
+ if executable:
+ print('%s:%d:%d: module should not be executable' % (path, 0, 0))
+
+ ext = os.path.splitext(path)[1]
+ expected_shebang = module_shebangs.get(ext)
+ expected_ext = ' or '.join(['"%s"' % k for k in module_shebangs])
+
+ if expected_shebang:
+ if shebang == expected_shebang:
+ continue
+
+ print('%s:%d:%d: expected module shebang "%s" but found: %s' % (path, 1, 1, expected_shebang, shebang))
+ else:
+ print('%s:%d:%d: expected module extension %s but found: %s' % (path, 0, 0, expected_ext, ext))
+ else:
+ if is_integration:
+ allowed = integration_shebangs
+ else:
+ allowed = standard_shebangs
+
+ if shebang not in allowed:
+ print('%s:%d:%d: unexpected non-module shebang: %s' % (path, 1, 1, shebang))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/symlinks.json b/test/lib/ansible_test/_data/sanity/code-smell/symlinks.json
new file mode 100644
index 00000000..6f13c86b
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/symlinks.json
@@ -0,0 +1,5 @@
+{
+ "include_directories": true,
+ "include_symlinks": true,
+ "output": "path-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/symlinks.py b/test/lib/ansible_test/_data/sanity/code-smell/symlinks.py
new file mode 100755
index 00000000..0585c6b1
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/symlinks.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+
+def main():
+ root_dir = os.getcwd() + os.path.sep
+
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ if not os.path.islink(path.rstrip(os.path.sep)):
+ continue
+
+ if not os.path.exists(path):
+ print('%s: broken symlinks are not allowed' % path)
+ continue
+
+ if path.endswith(os.path.sep):
+ print('%s: symlinks to directories are not allowed' % path)
+ continue
+
+ real_path = os.path.realpath(path)
+
+ if not real_path.startswith(root_dir):
+ print('%s: symlinks outside content tree are not allowed: %s' % (path, os.path.relpath(real_path, os.path.dirname(path))))
+ continue
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/use-argspec-type-path.json b/test/lib/ansible_test/_data/sanity/code-smell/use-argspec-type-path.json
new file mode 100644
index 00000000..36103051
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/use-argspec-type-path.json
@@ -0,0 +1,10 @@
+{
+ "prefixes": [
+ "lib/ansible/modules/",
+ "plugins/modules/"
+ ],
+ "extensions": [
+ ".py"
+ ],
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/use-argspec-type-path.py b/test/lib/ansible_test/_data/sanity/code-smell/use-argspec-type-path.py
new file mode 100755
index 00000000..687136dc
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/use-argspec-type-path.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import sys
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ match = re.search(r'(expanduser)', text)
+
+ if match:
+ print('%s:%d:%d: use argspec type="path" instead of type="str" to avoid use of `expanduser`' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/use-compat-six.json b/test/lib/ansible_test/_data/sanity/code-smell/use-compat-six.json
new file mode 100644
index 00000000..776590b7
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/use-compat-six.json
@@ -0,0 +1,6 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/use-compat-six.py b/test/lib/ansible_test/_data/sanity/code-smell/use-compat-six.py
new file mode 100755
index 00000000..49cb76c5
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/use-compat-six.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import sys
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ match = re.search(r'((^\s*import\s+six\b)|(^\s*from\s+six\b))', text)
+
+ if match:
+ print('%s:%d:%d: use `ansible.module_utils.six` instead of `six`' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/compile/compile.py b/test/lib/ansible_test/_data/sanity/compile/compile.py
new file mode 100755
index 00000000..61910eee
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/compile/compile.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+"""Python syntax checker with lint friendly output."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import warnings
+
+with warnings.catch_warnings():
+ # The parser module is deprecated as of Python 3.9.
+ # This implementation will need to be updated to use another solution.
+ # Until then, disable the deprecation warnings to prevent test failures.
+ warnings.simplefilter('ignore', DeprecationWarning)
+ import parser
+
+import sys
+
+
+def main():
+ status = 0
+
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'rb') as source_fd:
+ if sys.version_info[0] == 3:
+ source = source_fd.read().decode('utf-8')
+ else:
+ source = source_fd.read()
+
+ try:
+ parser.suite(source)
+ except SyntaxError:
+ ex = sys.exc_info()[1]
+ status = 1
+ message = ex.text.splitlines()[0].strip()
+ sys.stdout.write("%s:%d:%d: SyntaxError: %s\n" % (path, ex.lineno, ex.offset, message))
+ sys.stdout.flush()
+
+ sys.exit(status)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/import/importer.py b/test/lib/ansible_test/_data/sanity/import/importer.py
new file mode 100755
index 00000000..ef8db71b
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/import/importer.py
@@ -0,0 +1,467 @@
+#!/usr/bin/env python
+"""Import the given python module(s) and report error(s) encountered."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def main():
+ """
+ Main program function used to isolate globals from imported code.
+ Changes to globals in imported modules on Python 2.x will overwrite our own globals.
+ """
+ import ansible
+ import contextlib
+ import datetime
+ import json
+ import os
+ import re
+ import runpy
+ import subprocess
+ import sys
+ import traceback
+ import types
+ import warnings
+
+ ansible_path = os.path.dirname(os.path.dirname(ansible.__file__))
+ temp_path = os.environ['SANITY_TEMP_PATH'] + os.path.sep
+ external_python = os.environ.get('SANITY_EXTERNAL_PYTHON') or sys.executable
+ collection_full_name = os.environ.get('SANITY_COLLECTION_FULL_NAME')
+ collection_root = os.environ.get('ANSIBLE_COLLECTIONS_PATH')
+
+ try:
+ # noinspection PyCompatibility
+ from importlib import import_module
+ except ImportError:
+ def import_module(name):
+ __import__(name)
+ return sys.modules[name]
+
+ try:
+ # noinspection PyCompatibility
+ from StringIO import StringIO
+ except ImportError:
+ from io import StringIO
+
+ if collection_full_name:
+ # allow importing code from collections when testing a collection
+ from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native, text_type
+ from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder
+ from ansible.utils.collection_loader import _collection_finder
+
+ yaml_to_json_path = os.path.join(os.path.dirname(__file__), 'yaml_to_json.py')
+ yaml_to_dict_cache = {}
+
+ # unique ISO date marker matching the one present in yaml_to_json.py
+ iso_date_marker = 'isodate:f23983df-f3df-453c-9904-bcd08af468cc:'
+ iso_date_re = re.compile('^%s([0-9]{4})-([0-9]{2})-([0-9]{2})$' % iso_date_marker)
+
+ def parse_value(value):
+ """Custom value parser for JSON deserialization that recognizes our internal ISO date format."""
+ if isinstance(value, text_type):
+ match = iso_date_re.search(value)
+
+ if match:
+ value = datetime.date(int(match.group(1)), int(match.group(2)), int(match.group(3)))
+
+ return value
+
+ def object_hook(data):
+ """Object hook for custom ISO date deserialization from JSON."""
+ return dict((key, parse_value(value)) for key, value in data.items())
+
+ def yaml_to_dict(yaml, content_id):
+ """
+ Return a Python dict version of the provided YAML.
+ Conversion is done in a subprocess since the current Python interpreter does not have access to PyYAML.
+ """
+ if content_id in yaml_to_dict_cache:
+ return yaml_to_dict_cache[content_id]
+
+ try:
+ cmd = [external_python, yaml_to_json_path]
+ proc = subprocess.Popen([to_bytes(c) for c in cmd], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout_bytes, stderr_bytes = proc.communicate(to_bytes(yaml))
+
+ if proc.returncode != 0:
+ raise Exception('command %s failed with return code %d: %s' % ([to_native(c) for c in cmd], proc.returncode, to_native(stderr_bytes)))
+
+ data = yaml_to_dict_cache[content_id] = json.loads(to_text(stdout_bytes), object_hook=object_hook)
+
+ return data
+ except Exception as ex:
+ raise Exception('internal importer error - failed to parse yaml: %s' % to_native(ex))
+
+ _collection_finder._meta_yml_to_dict = yaml_to_dict # pylint: disable=protected-access
+
+ collection_loader = _AnsibleCollectionFinder(paths=[collection_root])
+ collection_loader._install() # pylint: disable=protected-access
+ else:
+ # do not support collection loading when not testing a collection
+ collection_loader = None
+
+ # remove all modules under the ansible package
+ list(map(sys.modules.pop, [m for m in sys.modules if m.partition('.')[0] == ansible.__name__]))
+
+ # pre-load an empty ansible package to prevent unwanted code in __init__.py from loading
+ # this more accurately reflects the environment that AnsiballZ runs modules under
+ # it also avoids issues with imports in the ansible package that are not allowed
+ ansible_module = types.ModuleType(ansible.__name__)
+ ansible_module.__file__ = ansible.__file__
+ ansible_module.__path__ = ansible.__path__
+ ansible_module.__package__ = ansible.__package__
+
+ sys.modules[ansible.__name__] = ansible_module
+
+ class ImporterAnsibleModuleException(Exception):
+ """Exception thrown during initialization of ImporterAnsibleModule."""
+
+ class ImporterAnsibleModule:
+ """Replacement for AnsibleModule to support import testing."""
+ def __init__(self, *args, **kwargs):
+ raise ImporterAnsibleModuleException()
+
+ class ImportBlacklist:
+ """Blacklist inappropriate imports."""
+ def __init__(self, path, name):
+ self.path = path
+ self.name = name
+ self.loaded_modules = set()
+
+ def find_module(self, fullname, path=None):
+ """Return self if the given fullname is blacklisted, otherwise return None.
+ :param fullname: str
+ :param path: str
+ :return: ImportBlacklist | None
+ """
+ if fullname in self.loaded_modules:
+ return None # ignore modules that are already being loaded
+
+ if is_name_in_namepace(fullname, ['ansible']):
+ if fullname in ('ansible.module_utils.basic', 'ansible.module_utils.common.removed'):
+ return self # intercept loading so we can modify the result
+
+ if is_name_in_namepace(fullname, ['ansible.module_utils', self.name]):
+ return None # module_utils and module under test are always allowed
+
+ if any(os.path.exists(candidate_path) for candidate_path in convert_ansible_name_to_absolute_paths(fullname)):
+ return self # blacklist ansible files that exist
+
+ return None # ansible file does not exist, do not blacklist
+
+ if is_name_in_namepace(fullname, ['ansible_collections']):
+ if not collection_loader:
+ return self # blacklist collections when we are not testing a collection
+
+ if is_name_in_namepace(fullname, ['ansible_collections...plugins.module_utils', self.name]):
+ return None # module_utils and module under test are always allowed
+
+ if collection_loader.find_module(fullname, path):
+ return self # blacklist collection files that exist
+
+ return None # collection file does not exist, do not blacklist
+
+ # not a namespace we care about
+ return None
+
+ def load_module(self, fullname):
+ """Raise an ImportError.
+ :type fullname: str
+ """
+ if fullname == 'ansible.module_utils.basic':
+ module = self.__load_module(fullname)
+
+ # stop Ansible module execution during AnsibleModule instantiation
+ module.AnsibleModule = ImporterAnsibleModule
+ # no-op for _load_params since it may be called before instantiating AnsibleModule
+ module._load_params = lambda *args, **kwargs: {} # pylint: disable=protected-access
+
+ return module
+
+ if fullname == 'ansible.module_utils.common.removed':
+ module = self.__load_module(fullname)
+
+ # no-op for removed_module since it is called in place of AnsibleModule instantiation
+ module.removed_module = lambda *args, **kwargs: None
+
+ return module
+
+ raise ImportError('import of "%s" is not allowed in this context' % fullname)
+
+ def __load_module(self, fullname):
+ """Load the requested module while avoiding infinite recursion.
+ :type fullname: str
+ :rtype: module
+ """
+ self.loaded_modules.add(fullname)
+ return import_module(fullname)
+
+ def run():
+ """Main program function."""
+ base_dir = os.getcwd()
+ messages = set()
+
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ name = convert_relative_path_to_name(path)
+ test_python_module(path, name, base_dir, messages)
+
+ if messages:
+ sys.exit(10)
+
+ def test_python_module(path, name, base_dir, messages):
+ """Test the given python module by importing it.
+ :type path: str
+ :type name: str
+ :type base_dir: str
+ :type messages: set[str]
+ """
+ if name in sys.modules:
+ return # cannot be tested because it has already been loaded
+
+ is_ansible_module = (path.startswith('lib/ansible/modules/') or path.startswith('plugins/modules/')) and os.path.basename(path) != '__init__.py'
+ run_main = is_ansible_module
+
+ if path == 'lib/ansible/modules/async_wrapper.py':
+ # async_wrapper is a non-standard Ansible module (does not use AnsibleModule) so we cannot test the main function
+ run_main = False
+
+ capture_normal = Capture()
+ capture_main = Capture()
+
+ try:
+ with monitor_sys_modules(path, messages):
+ with blacklist_imports(path, name, messages):
+ with capture_output(capture_normal):
+ import_module(name)
+
+ if run_main:
+ with monitor_sys_modules(path, messages):
+ with blacklist_imports(path, name, messages):
+ with capture_output(capture_main):
+ runpy.run_module(name, run_name='__main__', alter_sys=True)
+ except ImporterAnsibleModuleException:
+ # module instantiated AnsibleModule without raising an exception
+ pass
+ except BaseException as ex: # pylint: disable=locally-disabled, broad-except
+ # intentionally catch all exceptions, including calls to sys.exit
+ exc_type, _exc, exc_tb = sys.exc_info()
+ message = str(ex)
+ results = list(reversed(traceback.extract_tb(exc_tb)))
+ line = 0
+ offset = 0
+ full_path = os.path.join(base_dir, path)
+ base_path = base_dir + os.path.sep
+ source = None
+
+ # avoid line wraps in messages
+ message = re.sub(r'\n *', ': ', message)
+
+ for result in results:
+ if result[0] == full_path:
+ # save the line number for the file under test
+ line = result[1] or 0
+
+ if not source and result[0].startswith(base_path) and not result[0].startswith(temp_path):
+ # save the first path and line number in the traceback which is in our source tree
+ source = (os.path.relpath(result[0], base_path), result[1] or 0, 0)
+
+ if isinstance(ex, SyntaxError):
+ # SyntaxError has better information than the traceback
+ if ex.filename == full_path: # pylint: disable=locally-disabled, no-member
+ # syntax error was reported in the file under test
+ line = ex.lineno or 0 # pylint: disable=locally-disabled, no-member
+ offset = ex.offset or 0 # pylint: disable=locally-disabled, no-member
+ elif ex.filename.startswith(base_path) and not ex.filename.startswith(temp_path): # pylint: disable=locally-disabled, no-member
+ # syntax error was reported in our source tree
+ source = (os.path.relpath(ex.filename, base_path), ex.lineno or 0, ex.offset or 0) # pylint: disable=locally-disabled, no-member
+
+ # remove the filename and line number from the message
+ # either it was extracted above, or it's not really useful information
+ message = re.sub(r' \(.*?, line [0-9]+\)$', '', message)
+
+ if source and source[0] != path:
+ message += ' (at %s:%d:%d)' % (source[0], source[1], source[2])
+
+ report_message(path, line, offset, 'traceback', '%s: %s' % (exc_type.__name__, message), messages)
+ finally:
+ capture_report(path, capture_normal, messages)
+ capture_report(path, capture_main, messages)
+
+ def is_name_in_namepace(name, namespaces):
+ """Returns True if the given name is one of the given namespaces, otherwise returns False."""
+ name_parts = name.split('.')
+
+ for namespace in namespaces:
+ namespace_parts = namespace.split('.')
+ length = min(len(name_parts), len(namespace_parts))
+
+ truncated_name = name_parts[0:length]
+ truncated_namespace = namespace_parts[0:length]
+
+ # empty parts in the namespace are treated as wildcards
+ # to simplify the comparison, use those empty parts to indicate the positions in the name to be empty as well
+ for idx, part in enumerate(truncated_namespace):
+ if not part:
+ truncated_name[idx] = part
+
+ # example: name=ansible, allowed_name=ansible.module_utils
+ # example: name=ansible.module_utils.system.ping, allowed_name=ansible.module_utils
+ if truncated_name == truncated_namespace:
+ return True
+
+ return False
+
+ def check_sys_modules(path, before, messages):
+ """Check for unwanted changes to sys.modules.
+ :type path: str
+ :type before: dict[str, module]
+ :type messages: set[str]
+ """
+ after = sys.modules
+ removed = set(before.keys()) - set(after.keys())
+ changed = set(key for key, value in before.items() if key in after and value != after[key])
+
+ # additions are checked by our custom PEP 302 loader, so we don't need to check them again here
+
+ for module in sorted(removed):
+ report_message(path, 0, 0, 'unload', 'unloading of "%s" in sys.modules is not supported' % module, messages)
+
+ for module in sorted(changed):
+ report_message(path, 0, 0, 'reload', 'reloading of "%s" in sys.modules is not supported' % module, messages)
+
+ def convert_ansible_name_to_absolute_paths(name):
+ """Calculate the module path from the given name.
+ :type name: str
+ :rtype: list[str]
+ """
+ return [
+ os.path.join(ansible_path, name.replace('.', os.path.sep)),
+ os.path.join(ansible_path, name.replace('.', os.path.sep)) + '.py',
+ ]
+
+ def convert_relative_path_to_name(path):
+ """Calculate the module name from the given path.
+ :type path: str
+ :rtype: str
+ """
+ if path.endswith('/__init__.py'):
+ clean_path = os.path.dirname(path)
+ else:
+ clean_path = path
+
+ clean_path = os.path.splitext(clean_path)[0]
+
+ name = clean_path.replace(os.path.sep, '.')
+
+ if collection_loader:
+ # when testing collections the relative paths (and names) being tested are within the collection under test
+ name = 'ansible_collections.%s.%s' % (collection_full_name, name)
+ else:
+ # when testing ansible all files being imported reside under the lib directory
+ name = name[len('lib/'):]
+
+ return name
+
+ class Capture:
+ """Captured output and/or exception."""
+ def __init__(self):
+ self.stdout = StringIO()
+ self.stderr = StringIO()
+
+ def capture_report(path, capture, messages):
+ """Report on captured output.
+ :type path: str
+ :type capture: Capture
+ :type messages: set[str]
+ """
+ if capture.stdout.getvalue():
+ first = capture.stdout.getvalue().strip().splitlines()[0].strip()
+ report_message(path, 0, 0, 'stdout', first, messages)
+
+ if capture.stderr.getvalue():
+ first = capture.stderr.getvalue().strip().splitlines()[0].strip()
+ report_message(path, 0, 0, 'stderr', first, messages)
+
+ def report_message(path, line, column, code, message, messages):
+ """Report message if not already reported.
+ :type path: str
+ :type line: int
+ :type column: int
+ :type code: str
+ :type message: str
+ :type messages: set[str]
+ """
+ message = '%s:%d:%d: %s: %s' % (path, line, column, code, message)
+
+ if message not in messages:
+ messages.add(message)
+ print(message)
+
+ @contextlib.contextmanager
+ def blacklist_imports(path, name, messages):
+ """Blacklist imports.
+ :type path: str
+ :type name: str
+ :type messages: set[str]
+ """
+ blacklist = ImportBlacklist(path, name)
+
+ sys.meta_path.insert(0, blacklist)
+ sys.path_importer_cache.clear()
+
+ try:
+ yield
+ finally:
+ if sys.meta_path[0] != blacklist:
+ report_message(path, 0, 0, 'metapath', 'changes to sys.meta_path[0] are not permitted', messages)
+
+ while blacklist in sys.meta_path:
+ sys.meta_path.remove(blacklist)
+
+ sys.path_importer_cache.clear()
+
+ @contextlib.contextmanager
+ def monitor_sys_modules(path, messages):
+ """Monitor sys.modules for unwanted changes, reverting any additions made to our own namespaces."""
+ snapshot = sys.modules.copy()
+
+ try:
+ yield
+ finally:
+ check_sys_modules(path, snapshot, messages)
+
+ for key in set(sys.modules.keys()) - set(snapshot.keys()):
+ if is_name_in_namepace(key, ('ansible', 'ansible_collections')):
+ del sys.modules[key] # only unload our own code since we know it's native Python
+
+ @contextlib.contextmanager
+ def capture_output(capture):
+ """Capture sys.stdout and sys.stderr.
+ :type capture: Capture
+ """
+ old_stdout = sys.stdout
+ old_stderr = sys.stderr
+
+ sys.stdout = capture.stdout
+ sys.stderr = capture.stderr
+
+ # clear all warnings registries to make all warnings available
+ for module in sys.modules.values():
+ try:
+ module.__warningregistry__.clear()
+ except AttributeError:
+ pass
+
+ with warnings.catch_warnings():
+ warnings.simplefilter('error')
+
+ try:
+ yield
+ finally:
+ sys.stdout = old_stdout
+ sys.stderr = old_stderr
+
+ run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/import/yaml_to_json.py b/test/lib/ansible_test/_data/sanity/import/yaml_to_json.py
new file mode 100644
index 00000000..09be9576
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/import/yaml_to_json.py
@@ -0,0 +1,27 @@
+"""Read YAML from stdin and write JSON to stdout."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import datetime
+import json
+import sys
+
+from yaml import load
+
+try:
+ from yaml import CSafeLoader as SafeLoader
+except ImportError:
+ from yaml import SafeLoader
+
+# unique ISO date marker matching the one present in importer.py
+ISO_DATE_MARKER = 'isodate:f23983df-f3df-453c-9904-bcd08af468cc:'
+
+
+def default(value):
+ if isinstance(value, datetime.date):
+ return '%s%s' % (ISO_DATE_MARKER, value.isoformat())
+
+ raise TypeError('cannot serialize type: %s' % type(value))
+
+
+json.dump(load(sys.stdin, Loader=SafeLoader), sys.stdout, default=default)
diff --git a/test/lib/ansible_test/_data/sanity/integration-aliases/yaml_to_json.py b/test/lib/ansible_test/_data/sanity/integration-aliases/yaml_to_json.py
new file mode 100644
index 00000000..74a45f00
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/integration-aliases/yaml_to_json.py
@@ -0,0 +1,15 @@
+"""Read YAML from stdin and write JSON to stdout."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import sys
+
+from yaml import load
+
+try:
+ from yaml import CSafeLoader as SafeLoader
+except ImportError:
+ from yaml import SafeLoader
+
+json.dump(load(sys.stdin, Loader=SafeLoader), sys.stdout)
diff --git a/test/lib/ansible_test/_data/sanity/pep8/current-ignore.txt b/test/lib/ansible_test/_data/sanity/pep8/current-ignore.txt
new file mode 100644
index 00000000..659c7f59
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/pep8/current-ignore.txt
@@ -0,0 +1,4 @@
+E402
+W503
+W504
+E741
diff --git a/test/lib/ansible_test/_data/sanity/pslint/pslint.ps1 b/test/lib/ansible_test/_data/sanity/pslint/pslint.ps1
new file mode 100755
index 00000000..1ef2743a
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/pslint/pslint.ps1
@@ -0,0 +1,43 @@
+#!/usr/bin/env pwsh
+#Requires -Version 6
+#Requires -Modules PSScriptAnalyzer, PSSA-PSCustomUseLiteralPath
+
+Set-StrictMode -Version 2.0
+$ErrorActionPreference = "Stop"
+$WarningPreference = "Stop"
+
+# Until https://github.com/PowerShell/PSScriptAnalyzer/issues/1217 is fixed we need to import Pester if it's
+# available.
+if (Get-Module -Name Pester -ListAvailable -ErrorAction SilentlyContinue) {
+ Import-Module -Name Pester
+}
+
+$LiteralPathRule = Import-Module -Name PSSA-PSCustomUseLiteralPath -PassThru
+$LiteralPathRulePath = Join-Path -Path $LiteralPathRule.ModuleBase -ChildPath $LiteralPathRule.RootModule
+
+$PSSAParams = @{
+ CustomRulePath = @($LiteralPathRulePath)
+ IncludeDefaultRules = $true
+ Setting = (Join-Path -Path $PSScriptRoot -ChildPath "settings.psd1")
+}
+
+$Results = @()
+
+ForEach ($Path in $Args) {
+ $Retries = 3
+
+ Do {
+ Try {
+ $Results += Invoke-ScriptAnalyzer -Path $Path @PSSAParams 3> $null
+ $Retries = 0
+ }
+ Catch {
+ If (--$Retries -le 0) {
+ Throw
+ }
+ }
+ }
+ Until ($Retries -le 0)
+}
+
+ConvertTo-Json -InputObject $Results
diff --git a/test/lib/ansible_test/_data/sanity/pslint/settings.psd1 b/test/lib/ansible_test/_data/sanity/pslint/settings.psd1
new file mode 100644
index 00000000..7646ec35
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/pslint/settings.psd1
@@ -0,0 +1,13 @@
+@{
+ ExcludeRules=@(
+ 'PSUseOutputTypeCorrectly',
+ 'PSUseShouldProcessForStateChangingFunctions',
+ # We send strings as plaintext so will always come across the 3 issues
+ 'PSAvoidUsingPlainTextForPassword',
+ 'PSAvoidUsingConvertToSecureStringWithPlainText',
+ 'PSAvoidUsingUserNameAndPassWordParams',
+ # We send the module as a base64 encoded string and a BOM will cause
+ # issues here
+ 'PSUseBOMForUnicodeEncodedFile'
+ )
+}
diff --git a/test/lib/ansible_test/_data/sanity/pylint/config/ansible-test.cfg b/test/lib/ansible_test/_data/sanity/pylint/config/ansible-test.cfg
new file mode 100644
index 00000000..d3643162
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/pylint/config/ansible-test.cfg
@@ -0,0 +1,39 @@
+[MESSAGES CONTROL]
+
+disable=
+ cyclic-import, # consistent results require running with --jobs 1 and testing all files
+ duplicate-code, # consistent results require running with --jobs 1 and testing all files
+ too-few-public-methods,
+ too-many-arguments,
+ too-many-branches,
+ too-many-instance-attributes,
+ too-many-lines,
+ too-many-locals,
+ too-many-nested-blocks,
+ too-many-return-statements,
+ too-many-statements,
+ no-self-use,
+ unused-import, # pylint does not understand PEP 484 type hints
+ consider-using-dict-comprehension, # requires Python 2.6, which we still support
+ consider-using-set-comprehension, # requires Python 2.6, which we still support
+
+[BASIC]
+
+bad-names=foo,
+ bar,
+ baz,
+ toto,
+ tutu,
+ tata,
+ _,
+
+good-names=i,
+ j,
+ k,
+ ex,
+ Run,
+ C,
+ __metaclass__,
+
+method-rgx=[a-z_][a-z0-9_]{2,40}$
+function-rgx=[a-z_][a-z0-9_]{2,40}$
diff --git a/test/lib/ansible_test/_data/sanity/pylint/config/collection.cfg b/test/lib/ansible_test/_data/sanity/pylint/config/collection.cfg
new file mode 100644
index 00000000..c2d75b1c
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/pylint/config/collection.cfg
@@ -0,0 +1,135 @@
+[MESSAGES CONTROL]
+
+disable=
+ abstract-method,
+ access-member-before-definition,
+ arguments-differ,
+ assignment-from-no-return,
+ assignment-from-none,
+ attribute-defined-outside-init,
+ bad-continuation,
+ bad-indentation,
+ bad-mcs-classmethod-argument,
+ broad-except,
+ c-extension-no-member,
+ cell-var-from-loop,
+ chained-comparison,
+ comparison-with-callable,
+ consider-iterating-dictionary,
+ consider-merging-isinstance,
+ consider-using-dict-comprehension,
+ consider-using-enumerate,
+ consider-using-get,
+ consider-using-in,
+ consider-using-set-comprehension,
+ consider-using-ternary,
+ cyclic-import, # consistent results require running with --jobs 1 and testing all files
+ deprecated-lambda,
+ deprecated-method,
+ deprecated-module,
+ duplicate-code, # consistent results require running with --jobs 1 and testing all files
+ eval-used,
+ exec-used,
+ expression-not-assigned,
+ fixme,
+ function-redefined,
+ global-statement,
+ global-variable-undefined,
+ import-error,
+ import-self,
+ inconsistent-return-statements,
+ invalid-envvar-default,
+ invalid-name,
+ invalid-sequence-index,
+ keyword-arg-before-vararg,
+ len-as-condition,
+ line-too-long,
+ literal-comparison,
+ locally-disabled,
+ method-hidden,
+ misplaced-comparison-constant,
+ missing-docstring,
+ no-else-raise,
+ no-else-return,
+ no-init,
+ no-member,
+ no-name-in-module,
+ no-self-use,
+ no-value-for-parameter,
+ non-iterator-returned,
+ not-a-mapping,
+ not-an-iterable,
+ not-callable,
+ old-style-class,
+ pointless-statement,
+ pointless-string-statement,
+ possibly-unused-variable,
+ protected-access,
+ redefined-argument-from-local,
+ redefined-builtin,
+ redefined-outer-name,
+ redefined-variable-type,
+ reimported,
+ relative-beyond-top-level, # https://github.com/PyCQA/pylint/issues/2967
+ signature-differs,
+ simplifiable-if-expression,
+ simplifiable-if-statement,
+ subprocess-popen-preexec-fn,
+ super-init-not-called,
+ superfluous-parens,
+ too-few-public-methods,
+ too-many-ancestors,
+ too-many-arguments,
+ too-many-boolean-expressions,
+ too-many-branches,
+ too-many-function-args,
+ too-many-instance-attributes,
+ too-many-lines,
+ too-many-locals,
+ too-many-nested-blocks,
+ too-many-public-methods,
+ too-many-return-statements,
+ too-many-statements,
+ trailing-comma-tuple,
+ trailing-comma-tuple,
+ try-except-raise,
+ unbalanced-tuple-unpacking,
+ undefined-loop-variable,
+ unexpected-keyword-arg,
+ ungrouped-imports,
+ unidiomatic-typecheck,
+ unnecessary-pass,
+ unsubscriptable-object,
+ unsupported-assignment-operation,
+ unsupported-delete-operation,
+ unsupported-membership-test,
+ unused-argument,
+ unused-import,
+ unused-variable,
+ used-before-assignment,
+ useless-object-inheritance,
+ useless-return,
+ useless-super-delegation,
+ wrong-import-order,
+ wrong-import-position,
+
+[BASIC]
+
+bad-names=foo,
+ bar,
+ baz,
+ toto,
+ tutu,
+ tata,
+ _,
+
+good-names=i,
+ j,
+ k,
+ ex,
+ Run,
+
+[TYPECHECK]
+
+ignored-modules=
+ _MovedItems,
diff --git a/test/lib/ansible_test/_data/sanity/pylint/config/default.cfg b/test/lib/ansible_test/_data/sanity/pylint/config/default.cfg
new file mode 100644
index 00000000..45199078
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/pylint/config/default.cfg
@@ -0,0 +1,135 @@
+[MESSAGES CONTROL]
+
+disable=
+ abstract-method,
+ access-member-before-definition,
+ arguments-differ,
+ assignment-from-no-return,
+ assignment-from-none,
+ attribute-defined-outside-init,
+ bad-continuation,
+ bad-indentation,
+ bad-mcs-classmethod-argument,
+ broad-except,
+ c-extension-no-member,
+ cell-var-from-loop,
+ chained-comparison,
+ comparison-with-callable,
+ consider-iterating-dictionary,
+ consider-merging-isinstance,
+ consider-using-dict-comprehension,
+ consider-using-enumerate,
+ consider-using-get,
+ consider-using-in,
+ consider-using-set-comprehension,
+ consider-using-ternary,
+ cyclic-import, # consistent results require running with --jobs 1 and testing all files
+ deprecated-lambda,
+ deprecated-method,
+ deprecated-module,
+ duplicate-code, # consistent results require running with --jobs 1 and testing all files
+ eval-used,
+ exec-used,
+ expression-not-assigned,
+ fixme,
+ function-redefined,
+ global-statement,
+ global-variable-undefined,
+ import-error,
+ import-self,
+ inconsistent-return-statements,
+ invalid-envvar-default,
+ invalid-name,
+ invalid-sequence-index,
+ keyword-arg-before-vararg,
+ len-as-condition,
+ line-too-long,
+ literal-comparison,
+ locally-disabled,
+ method-hidden,
+ misplaced-comparison-constant,
+ missing-docstring,
+ no-else-raise,
+ no-else-return,
+ no-init,
+ no-member,
+ no-name-in-module,
+ no-self-use,
+ no-value-for-parameter,
+ non-iterator-returned,
+ not-a-mapping,
+ not-an-iterable,
+ not-callable,
+ old-style-class,
+ pointless-statement,
+ pointless-string-statement,
+ possibly-unused-variable,
+ protected-access,
+ redefined-argument-from-local,
+ redefined-builtin,
+ redefined-outer-name,
+ redefined-variable-type,
+ reimported,
+ relative-import,
+ signature-differs,
+ simplifiable-if-expression,
+ simplifiable-if-statement,
+ subprocess-popen-preexec-fn,
+ super-init-not-called,
+ superfluous-parens,
+ too-few-public-methods,
+ too-many-ancestors,
+ too-many-arguments,
+ too-many-boolean-expressions,
+ too-many-branches,
+ too-many-function-args,
+ too-many-instance-attributes,
+ too-many-lines,
+ too-many-locals,
+ too-many-nested-blocks,
+ too-many-public-methods,
+ too-many-return-statements,
+ too-many-statements,
+ trailing-comma-tuple,
+ trailing-comma-tuple,
+ try-except-raise,
+ unbalanced-tuple-unpacking,
+ undefined-loop-variable,
+ unexpected-keyword-arg,
+ ungrouped-imports,
+ unidiomatic-typecheck,
+ unnecessary-pass,
+ unsubscriptable-object,
+ unsupported-assignment-operation,
+ unsupported-delete-operation,
+ unsupported-membership-test,
+ unused-argument,
+ unused-import,
+ unused-variable,
+ used-before-assignment,
+ useless-object-inheritance,
+ useless-return,
+ useless-super-delegation,
+ wrong-import-order,
+ wrong-import-position,
+
+[BASIC]
+
+bad-names=foo,
+ bar,
+ baz,
+ toto,
+ tutu,
+ tata,
+ _,
+
+good-names=i,
+ j,
+ k,
+ ex,
+ Run,
+
+[TYPECHECK]
+
+ignored-modules=
+ _MovedItems,
diff --git a/test/lib/ansible_test/_data/sanity/pylint/config/sanity.cfg b/test/lib/ansible_test/_data/sanity/pylint/config/sanity.cfg
new file mode 100644
index 00000000..f601ab57
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/pylint/config/sanity.cfg
@@ -0,0 +1,42 @@
+[MESSAGES CONTROL]
+
+disable=
+ cyclic-import, # consistent results require running with --jobs 1 and testing all files
+ duplicate-code, # consistent results require running with --jobs 1 and testing all files
+ too-few-public-methods,
+ too-many-arguments,
+ too-many-branches,
+ too-many-instance-attributes,
+ too-many-lines,
+ too-many-locals,
+ too-many-nested-blocks,
+ too-many-return-statements,
+ too-many-statements,
+ missing-docstring,
+ unused-import, # pylint does not understand PEP 484 type hints
+ consider-using-dict-comprehension, # requires Python 2.6, which we still support
+ consider-using-set-comprehension, # requires Python 2.6, which we still support
+
+[BASIC]
+
+bad-names=foo,
+ bar,
+ baz,
+ toto,
+ tutu,
+ tata,
+ _,
+
+good-names=i,
+ j,
+ k,
+ f,
+ e,
+ ex,
+ Run,
+ C,
+ __metaclass__,
+
+module-rgx=[a-z_][a-z0-9_-]{2,40}$
+method-rgx=[a-z_][a-z0-9_]{2,40}$
+function-rgx=[a-z_][a-z0-9_]{2,40}$
diff --git a/test/lib/ansible_test/_data/sanity/pylint/plugins/blacklist.py b/test/lib/ansible_test/_data/sanity/pylint/plugins/blacklist.py
new file mode 100644
index 00000000..ac53aeda
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/pylint/plugins/blacklist.py
@@ -0,0 +1,242 @@
+"""A plugin for pylint to identify imports and functions which should not be used."""
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import os
+
+import astroid
+
+from pylint.checkers import BaseChecker
+from pylint.interfaces import IAstroidChecker
+
+ANSIBLE_TEST_MODULES_PATH = os.environ['ANSIBLE_TEST_MODULES_PATH']
+ANSIBLE_TEST_MODULE_UTILS_PATH = os.environ['ANSIBLE_TEST_MODULE_UTILS_PATH']
+
+
+class BlacklistEntry:
+ """Defines a import blacklist entry."""
+ def __init__(self, alternative, modules_only=False, names=None, ignore_paths=None):
+ """
+ :type alternative: str
+ :type modules_only: bool
+ :type names: tuple[str] | None
+ :type ignore_paths: tuple[str] | None
+ """
+ self.alternative = alternative
+ self.modules_only = modules_only
+ self.names = set(names) if names else set()
+ self.ignore_paths = ignore_paths
+
+ def applies_to(self, path, name=None):
+ """
+ :type path: str
+ :type name: str | None
+ :rtype: bool
+ """
+ if self.names:
+ if not name:
+ return False
+
+ if name not in self.names:
+ return False
+
+ if self.ignore_paths and any(path.endswith(ignore_path) for ignore_path in self.ignore_paths):
+ return False
+
+ if self.modules_only:
+ return is_module_path(path)
+
+ return True
+
+
+def is_module_path(path):
+ """
+ :type path: str
+ :rtype: bool
+ """
+ return path.startswith(ANSIBLE_TEST_MODULES_PATH) or path.startswith(ANSIBLE_TEST_MODULE_UTILS_PATH)
+
+
+class AnsibleBlacklistChecker(BaseChecker):
+ """Checker for blacklisted imports and functions."""
+ __implements__ = (IAstroidChecker,)
+
+ name = 'blacklist'
+
+ BAD_IMPORT = 'ansible-bad-import'
+ BAD_IMPORT_FROM = 'ansible-bad-import-from'
+ BAD_FUNCTION = 'ansible-bad-function'
+ BAD_MODULE_IMPORT = 'ansible-bad-module-import'
+
+ msgs = dict(
+ E5101=('Import %s instead of %s',
+ BAD_IMPORT,
+ 'Identifies imports which should not be used.'),
+ E5102=('Import %s from %s instead of %s',
+ BAD_IMPORT_FROM,
+ 'Identifies imports which should not be used.'),
+ E5103=('Call %s instead of %s',
+ BAD_FUNCTION,
+ 'Identifies functions which should not be used.'),
+ E5104=('Import external package or ansible.module_utils not %s',
+ BAD_MODULE_IMPORT,
+ 'Identifies imports which should not be used.'),
+ )
+
+ blacklist_imports = dict(
+ # Additional imports that we may want to start checking:
+ # boto=BlacklistEntry('boto3', modules_only=True),
+ # requests=BlacklistEntry('ansible.module_utils.urls', modules_only=True),
+ # urllib=BlacklistEntry('ansible.module_utils.urls', modules_only=True),
+
+ # see https://docs.python.org/2/library/urllib2.html
+ urllib2=BlacklistEntry('ansible.module_utils.urls',
+ ignore_paths=(
+ '/lib/ansible/module_utils/urls.py',
+ )),
+
+ # see https://docs.python.org/3.7/library/collections.abc.html
+ collections=BlacklistEntry('ansible.module_utils.common._collections_compat',
+ ignore_paths=(
+ '/lib/ansible/module_utils/common/_collections_compat.py',
+ ),
+ names=(
+ 'MappingView',
+ 'ItemsView',
+ 'KeysView',
+ 'ValuesView',
+ 'Mapping', 'MutableMapping',
+ 'Sequence', 'MutableSequence',
+ 'Set', 'MutableSet',
+ 'Container',
+ 'Hashable',
+ 'Sized',
+ 'Callable',
+ 'Iterable',
+ 'Iterator',
+ )),
+ )
+
+ blacklist_functions = {
+ # see https://docs.python.org/2/library/tempfile.html#tempfile.mktemp
+ 'tempfile.mktemp': BlacklistEntry('tempfile.mkstemp'),
+
+ 'sys.exit': BlacklistEntry('exit_json or fail_json',
+ ignore_paths=(
+ '/lib/ansible/module_utils/basic.py',
+ '/lib/ansible/modules/async_wrapper.py',
+ '/lib/ansible/module_utils/common/removed.py',
+ ),
+ modules_only=True),
+
+ 'builtins.print': BlacklistEntry('module.log or module.debug',
+ ignore_paths=(
+ '/lib/ansible/module_utils/basic.py',
+ '/lib/ansible/module_utils/common/removed.py',
+ ),
+ modules_only=True),
+ }
+
+ def visit_import(self, node):
+ """
+ :type node: astroid.node_classes.Import
+ """
+ for name in node.names:
+ self._check_import(node, name[0])
+
+ def visit_importfrom(self, node):
+ """
+ :type node: astroid.node_classes.ImportFrom
+ """
+ self._check_importfrom(node, node.modname, node.names)
+
+ def visit_attribute(self, node):
+ """
+ :type node: astroid.node_classes.Attribute
+ """
+ last_child = node.last_child()
+
+ # this is faster than using type inference and will catch the most common cases
+ if not isinstance(last_child, astroid.node_classes.Name):
+ return
+
+ module = last_child.name
+
+ entry = self.blacklist_imports.get(module)
+
+ if entry and entry.names:
+ if entry.applies_to(self.linter.current_file, node.attrname):
+ self.add_message(self.BAD_IMPORT_FROM, args=(node.attrname, entry.alternative, module), node=node)
+
+ def visit_call(self, node):
+ """
+ :type node: astroid.node_classes.Call
+ """
+ try:
+ for i in node.func.inferred():
+ func = None
+
+ if isinstance(i, astroid.scoped_nodes.FunctionDef) and isinstance(i.parent, astroid.scoped_nodes.Module):
+ func = '%s.%s' % (i.parent.name, i.name)
+
+ if not func:
+ continue
+
+ entry = self.blacklist_functions.get(func)
+
+ if entry and entry.applies_to(self.linter.current_file):
+ self.add_message(self.BAD_FUNCTION, args=(entry.alternative, func), node=node)
+ except astroid.exceptions.InferenceError:
+ pass
+
+ def _check_import(self, node, modname):
+ """
+ :type node: astroid.node_classes.Import
+ :type modname: str
+ """
+ self._check_module_import(node, modname)
+
+ entry = self.blacklist_imports.get(modname)
+
+ if not entry:
+ return
+
+ if entry.applies_to(self.linter.current_file):
+ self.add_message(self.BAD_IMPORT, args=(entry.alternative, modname), node=node)
+
+ def _check_importfrom(self, node, modname, names):
+ """
+ :type node: astroid.node_classes.ImportFrom
+ :type modname: str
+ :type names: list[str[
+ """
+ self._check_module_import(node, modname)
+
+ entry = self.blacklist_imports.get(modname)
+
+ if not entry:
+ return
+
+ for name in names:
+ if entry.applies_to(self.linter.current_file, name[0]):
+ self.add_message(self.BAD_IMPORT_FROM, args=(name[0], entry.alternative, modname), node=node)
+
+ def _check_module_import(self, node, modname):
+ """
+ :type node: astroid.node_classes.Import | astroid.node_classes.ImportFrom
+ :type modname: str
+ """
+ if not is_module_path(self.linter.current_file):
+ return
+
+ if modname == 'ansible.module_utils' or modname.startswith('ansible.module_utils.'):
+ return
+
+ if modname == 'ansible' or modname.startswith('ansible.'):
+ self.add_message(self.BAD_MODULE_IMPORT, args=(modname,), node=node)
+
+
+def register(linter):
+ """required method to auto register this checker """
+ linter.register_checker(AnsibleBlacklistChecker(linter))
diff --git a/test/lib/ansible_test/_data/sanity/pylint/plugins/deprecated.py b/test/lib/ansible_test/_data/sanity/pylint/plugins/deprecated.py
new file mode 100644
index 00000000..c88e5e5a
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/pylint/plugins/deprecated.py
@@ -0,0 +1,250 @@
+# (c) 2018, Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# -*- coding: utf-8 -*-
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import datetime
+import re
+
+from distutils.version import LooseVersion
+
+import astroid
+
+from pylint.interfaces import IAstroidChecker
+from pylint.checkers import BaseChecker
+from pylint.checkers.utils import check_messages
+
+from ansible.module_utils.six import string_types
+from ansible.release import __version__ as ansible_version_raw
+from ansible.utils.version import SemanticVersion
+
+MSGS = {
+ 'E9501': ("Deprecated version (%r) found in call to Display.deprecated "
+ "or AnsibleModule.deprecate",
+ "ansible-deprecated-version",
+ "Used when a call to Display.deprecated specifies a version "
+ "less than or equal to the current version of Ansible",
+ {'minversion': (2, 6)}),
+ 'E9502': ("Display.deprecated call without a version or date",
+ "ansible-deprecated-no-version",
+ "Used when a call to Display.deprecated does not specify a "
+ "version or date",
+ {'minversion': (2, 6)}),
+ 'E9503': ("Invalid deprecated version (%r) found in call to "
+ "Display.deprecated or AnsibleModule.deprecate",
+ "ansible-invalid-deprecated-version",
+ "Used when a call to Display.deprecated specifies an invalid "
+ "Ansible version number",
+ {'minversion': (2, 6)}),
+ 'E9504': ("Deprecated version (%r) found in call to Display.deprecated "
+ "or AnsibleModule.deprecate",
+ "collection-deprecated-version",
+ "Used when a call to Display.deprecated specifies a collection "
+ "version less than or equal to the current version of this "
+ "collection",
+ {'minversion': (2, 6)}),
+ 'E9505': ("Invalid deprecated version (%r) found in call to "
+ "Display.deprecated or AnsibleModule.deprecate",
+ "collection-invalid-deprecated-version",
+ "Used when a call to Display.deprecated specifies an invalid "
+ "collection version number",
+ {'minversion': (2, 6)}),
+ 'E9506': ("No collection name found in call to Display.deprecated or "
+ "AnsibleModule.deprecate",
+ "ansible-deprecated-no-collection-name",
+ "The current collection name in format `namespace.name` must "
+ "be provided as collection_name when calling Display.deprecated "
+ "or AnsibleModule.deprecate (`ansible.builtin` for ansible-base)",
+ {'minversion': (2, 6)}),
+ 'E9507': ("Wrong collection name (%r) found in call to "
+ "Display.deprecated or AnsibleModule.deprecate",
+ "wrong-collection-deprecated",
+ "The name of the current collection must be passed to the "
+ "Display.deprecated resp. AnsibleModule.deprecate calls "
+ "(`ansible.builtin` for ansible-base)",
+ {'minversion': (2, 6)}),
+ 'E9508': ("Expired date (%r) found in call to Display.deprecated "
+ "or AnsibleModule.deprecate",
+ "ansible-deprecated-date",
+ "Used when a call to Display.deprecated specifies a date "
+ "before today",
+ {'minversion': (2, 6)}),
+ 'E9509': ("Invalid deprecated date (%r) found in call to "
+ "Display.deprecated or AnsibleModule.deprecate",
+ "ansible-invalid-deprecated-date",
+ "Used when a call to Display.deprecated specifies an invalid "
+ "date. It must be a string in format `YYYY-MM-DD` (ISO 8601)",
+ {'minversion': (2, 6)}),
+ 'E9510': ("Both version and date found in call to "
+ "Display.deprecated or AnsibleModule.deprecate",
+ "ansible-deprecated-both-version-and-date",
+ "Only one of version and date must be specified",
+ {'minversion': (2, 6)}),
+}
+
+
+ANSIBLE_VERSION = LooseVersion('.'.join(ansible_version_raw.split('.')[:3]))
+
+
+def _get_expr_name(node):
+ """Funciton to get either ``attrname`` or ``name`` from ``node.func.expr``
+
+ Created specifically for the case of ``display.deprecated`` or ``self._display.deprecated``
+ """
+ try:
+ return node.func.expr.attrname
+ except AttributeError:
+ # If this fails too, we'll let it raise, the caller should catch it
+ return node.func.expr.name
+
+
+def parse_isodate(value):
+ msg = 'Expected ISO 8601 date string (YYYY-MM-DD)'
+ if not isinstance(value, string_types):
+ raise ValueError(msg)
+ # From Python 3.7 in, there is datetime.date.fromisoformat(). For older versions,
+ # we have to do things manually.
+ if not re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', value):
+ raise ValueError(msg)
+ try:
+ return datetime.datetime.strptime(value, '%Y-%m-%d').date()
+ except ValueError:
+ raise ValueError(msg)
+
+
+class AnsibleDeprecatedChecker(BaseChecker):
+ """Checks for Display.deprecated calls to ensure that the ``version``
+ has not passed or met the time for removal
+ """
+
+ __implements__ = (IAstroidChecker,)
+ name = 'deprecated'
+ msgs = MSGS
+
+ options = (
+ ('collection-name', {
+ 'default': None,
+ 'type': 'string',
+ 'metavar': '<name>',
+ 'help': 'The collection\'s name used to check collection names in deprecations.',
+ }),
+ ('collection-version', {
+ 'default': None,
+ 'type': 'string',
+ 'metavar': '<version>',
+ 'help': 'The collection\'s version number used to check deprecations.',
+ }),
+ )
+
+ def __init__(self, *args, **kwargs):
+ self.collection_version = None
+ self.collection_name = None
+ super(AnsibleDeprecatedChecker, self).__init__(*args, **kwargs)
+
+ def set_option(self, optname, value, action=None, optdict=None):
+ super(AnsibleDeprecatedChecker, self).set_option(optname, value, action, optdict)
+ if optname == 'collection-version' and value is not None:
+ self.collection_version = SemanticVersion(self.config.collection_version)
+ if optname == 'collection-name' and value is not None:
+ self.collection_name = self.config.collection_name
+
+ def _check_date(self, node, date):
+ if not isinstance(date, str):
+ self.add_message('invalid-date', node=node, args=(date,))
+ return
+
+ try:
+ date_parsed = parse_isodate(date)
+ except ValueError:
+ self.add_message('ansible-invalid-deprecated-date', node=node, args=(date,))
+ return
+
+ if date_parsed < datetime.date.today():
+ self.add_message('ansible-deprecated-date', node=node, args=(date,))
+
+ def _check_version(self, node, version, collection_name):
+ if not isinstance(version, (str, float)):
+ self.add_message('invalid-version', node=node, args=(version,))
+ return
+
+ version_no = str(version)
+
+ if collection_name == 'ansible.builtin':
+ # Ansible-base
+ try:
+ if not version_no:
+ raise ValueError('Version string should not be empty')
+ loose_version = LooseVersion(str(version_no))
+ if ANSIBLE_VERSION >= loose_version:
+ self.add_message('ansible-deprecated-version', node=node, args=(version,))
+ except ValueError:
+ self.add_message('ansible-invalid-deprecated-version', node=node, args=(version,))
+ elif collection_name:
+ # Collections
+ try:
+ if not version_no:
+ raise ValueError('Version string should not be empty')
+ semantic_version = SemanticVersion(version_no)
+ if collection_name == self.collection_name and self.collection_version is not None:
+ if self.collection_version >= semantic_version:
+ self.add_message('collection-deprecated-version', node=node, args=(version,))
+ except ValueError:
+ self.add_message('collection-invalid-deprecated-version', node=node, args=(version,))
+
+ @check_messages(*(MSGS.keys()))
+ def visit_call(self, node):
+ version = None
+ date = None
+ collection_name = None
+ try:
+ if (node.func.attrname == 'deprecated' and 'display' in _get_expr_name(node) or
+ node.func.attrname == 'deprecate' and _get_expr_name(node)):
+ if node.keywords:
+ for keyword in node.keywords:
+ if len(node.keywords) == 1 and keyword.arg is None:
+ # This is likely a **kwargs splat
+ return
+ if keyword.arg == 'version':
+ if isinstance(keyword.value.value, astroid.Name):
+ # This is likely a variable
+ return
+ version = keyword.value.value
+ if keyword.arg == 'date':
+ if isinstance(keyword.value.value, astroid.Name):
+ # This is likely a variable
+ return
+ date = keyword.value.value
+ if keyword.arg == 'collection_name':
+ if isinstance(keyword.value.value, astroid.Name):
+ # This is likely a variable
+ return
+ collection_name = keyword.value.value
+ if not version and not date:
+ try:
+ version = node.args[1].value
+ except IndexError:
+ self.add_message('ansible-deprecated-no-version', node=node)
+ return
+ if version and date:
+ self.add_message('ansible-deprecated-both-version-and-date', node=node)
+
+ if collection_name:
+ this_collection = collection_name == (self.collection_name or 'ansible.builtin')
+ if not this_collection:
+ self.add_message('wrong-collection-deprecated', node=node, args=(collection_name,))
+ else:
+ self.add_message('ansible-deprecated-no-collection-name', node=node)
+
+ if date:
+ self._check_date(node, date)
+ elif version:
+ self._check_version(node, version, collection_name)
+ except AttributeError:
+ # Not the type of node we are interested in
+ pass
+
+
+def register(linter):
+ """required method to auto register this checker """
+ linter.register_checker(AnsibleDeprecatedChecker(linter))
diff --git a/test/lib/ansible_test/_data/sanity/pylint/plugins/string_format.py b/test/lib/ansible_test/_data/sanity/pylint/plugins/string_format.py
new file mode 100644
index 00000000..eafde73b
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/pylint/plugins/string_format.py
@@ -0,0 +1,90 @@
+# (c) 2018, Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# -*- coding: utf-8 -*-
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+
+import six
+
+import astroid
+from pylint.interfaces import IAstroidChecker
+from pylint.checkers import BaseChecker
+from pylint.checkers import utils
+from pylint.checkers.utils import check_messages
+try:
+ from pylint.checkers.utils import parse_format_method_string
+except ImportError:
+ # noinspection PyUnresolvedReferences
+ from pylint.checkers.strings import parse_format_method_string
+
+_PY3K = sys.version_info[:2] >= (3, 0)
+
+MSGS = {
+ 'E9305': ("Format string contains automatic field numbering "
+ "specification",
+ "ansible-format-automatic-specification",
+ "Used when a PEP 3101 format string contains automatic "
+ "field numbering (e.g. '{}').",
+ {'minversion': (2, 6)}),
+ 'E9390': ("bytes object has no .format attribute",
+ "ansible-no-format-on-bytestring",
+ "Used when a bytestring was used as a PEP 3101 format string "
+ "as Python3 bytestrings do not have a .format attribute",
+ {'minversion': (3, 0)}),
+}
+
+
+class AnsibleStringFormatChecker(BaseChecker):
+ """Checks string formatting operations to ensure that the format string
+ is valid and the arguments match the format string.
+ """
+
+ __implements__ = (IAstroidChecker,)
+ name = 'string'
+ msgs = MSGS
+
+ @check_messages(*(MSGS.keys()))
+ def visit_call(self, node):
+ func = utils.safe_infer(node.func)
+ if (isinstance(func, astroid.BoundMethod)
+ and isinstance(func.bound, astroid.Instance)
+ and func.bound.name in ('str', 'unicode', 'bytes')):
+ if func.name == 'format':
+ self._check_new_format(node, func)
+
+ def _check_new_format(self, node, func):
+ """ Check the new string formatting """
+ if (isinstance(node.func, astroid.Attribute)
+ and not isinstance(node.func.expr, astroid.Const)):
+ return
+ try:
+ strnode = next(func.bound.infer())
+ except astroid.InferenceError:
+ return
+ if not isinstance(strnode, astroid.Const):
+ return
+
+ if _PY3K and isinstance(strnode.value, six.binary_type):
+ self.add_message('ansible-no-format-on-bytestring', node=node)
+ return
+ if not isinstance(strnode.value, six.string_types):
+ return
+
+ if node.starargs or node.kwargs:
+ return
+ try:
+ num_args = parse_format_method_string(strnode.value)[1]
+ except utils.IncompleteFormatString:
+ return
+
+ if num_args:
+ self.add_message('ansible-format-automatic-specification',
+ node=node)
+ return
+
+
+def register(linter):
+ """required method to auto register this checker """
+ linter.register_checker(AnsibleStringFormatChecker(linter))
diff --git a/test/lib/ansible_test/_data/sanity/rstcheck/ignore-substitutions.txt b/test/lib/ansible_test/_data/sanity/rstcheck/ignore-substitutions.txt
new file mode 100644
index 00000000..961e9bd9
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/rstcheck/ignore-substitutions.txt
@@ -0,0 +1,5 @@
+version
+release
+today
+br
+_
diff --git a/test/lib/ansible_test/_data/sanity/shellcheck/exclude.txt b/test/lib/ansible_test/_data/sanity/shellcheck/exclude.txt
new file mode 100644
index 00000000..29588ddd
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/shellcheck/exclude.txt
@@ -0,0 +1,3 @@
+SC1090
+SC1091
+SC2164
diff --git a/test/lib/ansible_test/_data/sanity/validate-modules/main.py b/test/lib/ansible_test/_data/sanity/validate-modules/main.py
new file mode 100755
index 00000000..c1e2bdaa
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/validate-modules/main.py
@@ -0,0 +1,8 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from validate_modules.main import main
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/validate-modules/validate-modules b/test/lib/ansible_test/_data/sanity/validate-modules/validate-modules
new file mode 120000
index 00000000..11a5d8e1
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/validate-modules/validate-modules
@@ -0,0 +1 @@
+main.py \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/__init__.py b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/__init__.py
new file mode 100644
index 00000000..d8ff2dc0
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/__init__.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2015 Matt Martz <matt@sivel.net>
+# Copyright (C) 2015 Rackspace US, Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+__version__ = '0.0.1b'
diff --git a/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/main.py b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/main.py
new file mode 100644
index 00000000..79614211
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/main.py
@@ -0,0 +1,2442 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2015 Matt Martz <matt@sivel.net>
+# Copyright (C) 2015 Rackspace US, Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import abc
+import argparse
+import ast
+import datetime
+import json
+import errno
+import os
+import re
+import subprocess
+import sys
+import tempfile
+import traceback
+
+from collections import OrderedDict
+from contextlib import contextmanager
+from distutils.version import StrictVersion, LooseVersion
+from fnmatch import fnmatch
+
+import yaml
+
+from ansible import __version__ as ansible_version
+from ansible.executor.module_common import REPLACER_WINDOWS
+from ansible.module_utils.common._collections_compat import Mapping
+from ansible.module_utils._text import to_native
+from ansible.plugins.loader import fragment_loader
+from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder
+from ansible.utils.plugin_docs import BLACKLIST, add_collection_to_versions_and_dates, add_fragments, get_docstring
+from ansible.utils.version import SemanticVersion
+
+from .module_args import AnsibleModuleImportError, AnsibleModuleNotInitialized, get_argument_spec
+
+from .schema import ansible_module_kwargs_schema, doc_schema, return_schema
+
+from .utils import CaptureStd, NoArgsAnsibleModule, compare_unordered_lists, is_empty, parse_yaml, parse_isodate
+from voluptuous.humanize import humanize_error
+
+from ansible.module_utils.six import PY3, with_metaclass, string_types
+
+if PY3:
+ # Because there is no ast.TryExcept in Python 3 ast module
+ TRY_EXCEPT = ast.Try
+ # REPLACER_WINDOWS from ansible.executor.module_common is byte
+ # string but we need unicode for Python 3
+ REPLACER_WINDOWS = REPLACER_WINDOWS.decode('utf-8')
+else:
+ TRY_EXCEPT = ast.TryExcept
+
+BLACKLIST_DIRS = frozenset(('.git', 'test', '.github', '.idea'))
+INDENT_REGEX = re.compile(r'([\t]*)')
+TYPE_REGEX = re.compile(r'.*(if|or)(\s+[^"\']*|\s+)(?<!_)(?<!str\()type\([^)].*')
+SYS_EXIT_REGEX = re.compile(r'[^#]*sys.exit\s*\(.*')
+BLACKLIST_IMPORTS = {
+ 'requests': {
+ 'new_only': True,
+ 'error': {
+ 'code': 'use-module-utils-urls',
+ 'msg': ('requests import found, should use '
+ 'ansible.module_utils.urls instead')
+ }
+ },
+ r'boto(?:\.|$)': {
+ 'new_only': True,
+ 'error': {
+ 'code': 'use-boto3',
+ 'msg': 'boto import found, new modules should use boto3'
+ }
+ },
+}
+SUBPROCESS_REGEX = re.compile(r'subprocess\.Po.*')
+OS_CALL_REGEX = re.compile(r'os\.call.*')
+
+
+LOOSE_ANSIBLE_VERSION = LooseVersion('.'.join(ansible_version.split('.')[:3]))
+
+
+def compare_dates(d1, d2):
+ try:
+ date1 = parse_isodate(d1, allow_date=True)
+ date2 = parse_isodate(d2, allow_date=True)
+ return date1 == date2
+ except ValueError:
+ # At least one of d1 and d2 cannot be parsed. Simply compare values.
+ return d1 == d2
+
+
+class ReporterEncoder(json.JSONEncoder):
+ def default(self, o):
+ if isinstance(o, Exception):
+ return str(o)
+
+ return json.JSONEncoder.default(self, o)
+
+
+class Reporter:
+ def __init__(self):
+ self.files = OrderedDict()
+
+ def _ensure_default_entry(self, path):
+ try:
+ self.files[path]
+ except KeyError:
+ self.files[path] = {
+ 'errors': [],
+ 'warnings': [],
+ 'traces': [],
+ 'warning_traces': []
+ }
+
+ def _log(self, path, code, msg, level='error', line=0, column=0):
+ self._ensure_default_entry(path)
+ lvl_dct = self.files[path]['%ss' % level]
+ lvl_dct.append({
+ 'code': code,
+ 'msg': msg,
+ 'line': line,
+ 'column': column
+ })
+
+ def error(self, *args, **kwargs):
+ self._log(*args, level='error', **kwargs)
+
+ def warning(self, *args, **kwargs):
+ self._log(*args, level='warning', **kwargs)
+
+ def trace(self, path, tracebk):
+ self._ensure_default_entry(path)
+ self.files[path]['traces'].append(tracebk)
+
+ def warning_trace(self, path, tracebk):
+ self._ensure_default_entry(path)
+ self.files[path]['warning_traces'].append(tracebk)
+
+ @staticmethod
+ @contextmanager
+ def _output_handle(output):
+ if output != '-':
+ handle = open(output, 'w+')
+ else:
+ handle = sys.stdout
+
+ yield handle
+
+ handle.flush()
+ handle.close()
+
+ @staticmethod
+ def _filter_out_ok(reports):
+ temp_reports = OrderedDict()
+ for path, report in reports.items():
+ if report['errors'] or report['warnings']:
+ temp_reports[path] = report
+
+ return temp_reports
+
+ def plain(self, warnings=False, output='-'):
+ """Print out the test results in plain format
+
+ output is ignored here for now
+ """
+ ret = []
+
+ for path, report in Reporter._filter_out_ok(self.files).items():
+ traces = report['traces'][:]
+ if warnings and report['warnings']:
+ traces.extend(report['warning_traces'])
+
+ for trace in traces:
+ print('TRACE:')
+ print('\n '.join((' %s' % trace).splitlines()))
+ for error in report['errors']:
+ error['path'] = path
+ print('%(path)s:%(line)d:%(column)d: E%(code)s %(msg)s' % error)
+ ret.append(1)
+ if warnings:
+ for warning in report['warnings']:
+ warning['path'] = path
+ print('%(path)s:%(line)d:%(column)d: W%(code)s %(msg)s' % warning)
+
+ return 3 if ret else 0
+
+ def json(self, warnings=False, output='-'):
+ """Print out the test results in json format
+
+ warnings is not respected in this output
+ """
+ ret = [len(r['errors']) for r in self.files.values()]
+
+ with Reporter._output_handle(output) as handle:
+ print(json.dumps(Reporter._filter_out_ok(self.files), indent=4, cls=ReporterEncoder), file=handle)
+
+ return 3 if sum(ret) else 0
+
+
+class Validator(with_metaclass(abc.ABCMeta, object)):
+ """Validator instances are intended to be run on a single object. if you
+ are scanning multiple objects for problems, you'll want to have a separate
+ Validator for each one."""
+
+ def __init__(self, reporter=None):
+ self.reporter = reporter
+
+ @abc.abstractproperty
+ def object_name(self):
+ """Name of the object we validated"""
+ pass
+
+ @abc.abstractproperty
+ def object_path(self):
+ """Path of the object we validated"""
+ pass
+
+ @abc.abstractmethod
+ def validate(self):
+ """Run this method to generate the test results"""
+ pass
+
+
+class ModuleValidator(Validator):
+ BLACKLIST_PATTERNS = ('.git*', '*.pyc', '*.pyo', '.*', '*.md', '*.rst', '*.txt')
+ BLACKLIST_FILES = frozenset(('.git', '.gitignore', '.travis.yml',
+ 'shippable.yml',
+ '.gitattributes', '.gitmodules', 'COPYING',
+ '__init__.py', 'VERSION', 'test-docs.sh'))
+ BLACKLIST = BLACKLIST_FILES.union(BLACKLIST['MODULE'])
+
+ PS_DOC_BLACKLIST = frozenset((
+ 'async_status.ps1',
+ 'slurp.ps1',
+ 'setup.ps1'
+ ))
+ PS_ARG_VALIDATE_BLACKLIST = frozenset((
+ 'win_dsc.ps1', # win_dsc is a dynamic arg spec, the docs won't ever match
+ ))
+
+ WHITELIST_FUTURE_IMPORTS = frozenset(('absolute_import', 'division', 'print_function'))
+
+ def __init__(self, path, analyze_arg_spec=False, collection=None, collection_version=None,
+ base_branch=None, git_cache=None, reporter=None, routing=None):
+ super(ModuleValidator, self).__init__(reporter=reporter or Reporter())
+
+ self.path = path
+ self.basename = os.path.basename(self.path)
+ self.name = os.path.splitext(self.basename)[0]
+
+ self.analyze_arg_spec = analyze_arg_spec
+
+ self._Version = LooseVersion
+ self._StrictVersion = StrictVersion
+
+ self.collection = collection
+ self.collection_name = 'ansible.builtin'
+ if self.collection:
+ self._Version = SemanticVersion
+ self._StrictVersion = SemanticVersion
+ collection_namespace_path, collection_name = os.path.split(self.collection)
+ self.collection_name = '%s.%s' % (os.path.basename(collection_namespace_path), collection_name)
+ self.routing = routing
+ self.collection_version = None
+ if collection_version is not None:
+ self.collection_version_str = collection_version
+ self.collection_version = SemanticVersion(collection_version)
+
+ self.base_branch = base_branch
+ self.git_cache = git_cache or GitCache()
+
+ self._python_module_override = False
+
+ with open(path) as f:
+ self.text = f.read()
+ self.length = len(self.text.splitlines())
+ try:
+ self.ast = ast.parse(self.text)
+ except Exception:
+ self.ast = None
+
+ if base_branch:
+ self.base_module = self._get_base_file()
+ else:
+ self.base_module = None
+
+ def _create_version(self, v, collection_name=None):
+ if not v:
+ raise ValueError('Empty string is not a valid version')
+ if collection_name == 'ansible.builtin':
+ return LooseVersion(v)
+ if collection_name is not None:
+ return SemanticVersion(v)
+ return self._Version(v)
+
+ def _create_strict_version(self, v, collection_name=None):
+ if not v:
+ raise ValueError('Empty string is not a valid version')
+ if collection_name == 'ansible.builtin':
+ return StrictVersion(v)
+ if collection_name is not None:
+ return SemanticVersion(v)
+ return self._StrictVersion(v)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ if not self.base_module:
+ return
+
+ try:
+ os.remove(self.base_module)
+ except Exception:
+ pass
+
+ @property
+ def object_name(self):
+ return self.basename
+
+ @property
+ def object_path(self):
+ return self.path
+
+ def _get_collection_meta(self):
+ """Implement if we need this for version_added comparisons
+ """
+ pass
+
+ def _python_module(self):
+ if self.path.endswith('.py') or self._python_module_override:
+ return True
+ return False
+
+ def _powershell_module(self):
+ if self.path.endswith('.ps1'):
+ return True
+ return False
+
+ def _just_docs(self):
+ """Module can contain just docs and from __future__ boilerplate
+ """
+ try:
+ for child in self.ast.body:
+ if not isinstance(child, ast.Assign):
+ # allowed from __future__ imports
+ if isinstance(child, ast.ImportFrom) and child.module == '__future__':
+ for future_import in child.names:
+ if future_import.name not in self.WHITELIST_FUTURE_IMPORTS:
+ break
+ else:
+ continue
+ return False
+ return True
+ except AttributeError:
+ return False
+
+ def _get_base_branch_module_path(self):
+ """List all paths within lib/ansible/modules to try and match a moved module"""
+ return self.git_cache.base_module_paths.get(self.object_name)
+
+ def _has_alias(self):
+ """Return true if the module has any aliases."""
+ return self.object_name in self.git_cache.head_aliased_modules
+
+ def _get_base_file(self):
+ # In case of module moves, look for the original location
+ base_path = self._get_base_branch_module_path()
+
+ command = ['git', 'show', '%s:%s' % (self.base_branch, base_path or self.path)]
+ p = subprocess.Popen(command, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ stdout, stderr = p.communicate()
+ if int(p.returncode) != 0:
+ return None
+
+ t = tempfile.NamedTemporaryFile(delete=False)
+ t.write(stdout)
+ t.close()
+
+ return t.name
+
+ def _is_new_module(self):
+ if self._has_alias():
+ return False
+
+ return not self.object_name.startswith('_') and bool(self.base_branch) and not bool(self.base_module)
+
+ def _check_interpreter(self, powershell=False):
+ if powershell:
+ if not self.text.startswith('#!powershell\n'):
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-powershell-interpreter',
+ msg='Interpreter line is not "#!powershell"'
+ )
+ return
+
+ if not self.text.startswith('#!/usr/bin/python'):
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-python-interpreter',
+ msg='Interpreter line is not "#!/usr/bin/python"',
+ )
+
+ def _check_type_instead_of_isinstance(self, powershell=False):
+ if powershell:
+ return
+ for line_no, line in enumerate(self.text.splitlines()):
+ typekeyword = TYPE_REGEX.match(line)
+ if typekeyword:
+ # TODO: add column
+ self.reporter.error(
+ path=self.object_path,
+ code='unidiomatic-typecheck',
+ msg=('Type comparison using type() found. '
+ 'Use isinstance() instead'),
+ line=line_no + 1
+ )
+
+ def _check_for_sys_exit(self):
+ # Optimize out the happy path
+ if 'sys.exit' not in self.text:
+ return
+
+ for line_no, line in enumerate(self.text.splitlines()):
+ sys_exit_usage = SYS_EXIT_REGEX.match(line)
+ if sys_exit_usage:
+ # TODO: add column
+ self.reporter.error(
+ path=self.object_path,
+ code='use-fail-json-not-sys-exit',
+ msg='sys.exit() call found. Should be exit_json/fail_json',
+ line=line_no + 1
+ )
+
+ def _check_gpl3_header(self):
+ header = '\n'.join(self.text.split('\n')[:20])
+ if ('GNU General Public License' not in header or
+ ('version 3' not in header and 'v3.0' not in header)):
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-gplv3-license',
+ msg='GPLv3 license header not found in the first 20 lines of the module'
+ )
+ elif self._is_new_module():
+ if len([line for line in header
+ if 'GNU General Public License' in line]) > 1:
+ self.reporter.error(
+ path=self.object_path,
+ code='use-short-gplv3-license',
+ msg='Found old style GPLv3 license header: '
+ 'https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_documenting.html#copyright'
+ )
+
+ def _check_for_subprocess(self):
+ for child in self.ast.body:
+ if isinstance(child, ast.Import):
+ if child.names[0].name == 'subprocess':
+ for line_no, line in enumerate(self.text.splitlines()):
+ sp_match = SUBPROCESS_REGEX.search(line)
+ if sp_match:
+ self.reporter.error(
+ path=self.object_path,
+ code='use-run-command-not-popen',
+ msg=('subprocess.Popen call found. Should be module.run_command'),
+ line=(line_no + 1),
+ column=(sp_match.span()[0] + 1)
+ )
+
+ def _check_for_os_call(self):
+ if 'os.call' in self.text:
+ for line_no, line in enumerate(self.text.splitlines()):
+ os_call_match = OS_CALL_REGEX.search(line)
+ if os_call_match:
+ self.reporter.error(
+ path=self.object_path,
+ code='use-run-command-not-os-call',
+ msg=('os.call() call found. Should be module.run_command'),
+ line=(line_no + 1),
+ column=(os_call_match.span()[0] + 1)
+ )
+
+ def _find_blacklist_imports(self):
+ for child in self.ast.body:
+ names = []
+ if isinstance(child, ast.Import):
+ names.extend(child.names)
+ elif isinstance(child, TRY_EXCEPT):
+ bodies = child.body
+ for handler in child.handlers:
+ bodies.extend(handler.body)
+ for grandchild in bodies:
+ if isinstance(grandchild, ast.Import):
+ names.extend(grandchild.names)
+ for name in names:
+ # TODO: Add line/col
+ for blacklist_import, options in BLACKLIST_IMPORTS.items():
+ if re.search(blacklist_import, name.name):
+ new_only = options['new_only']
+ if self._is_new_module() and new_only:
+ self.reporter.error(
+ path=self.object_path,
+ **options['error']
+ )
+ elif not new_only:
+ self.reporter.error(
+ path=self.object_path,
+ **options['error']
+ )
+
+ def _find_module_utils(self, main):
+ linenos = []
+ found_basic = False
+ for child in self.ast.body:
+ if isinstance(child, (ast.Import, ast.ImportFrom)):
+ names = []
+ try:
+ names.append(child.module)
+ if child.module.endswith('.basic'):
+ found_basic = True
+ except AttributeError:
+ pass
+ names.extend([n.name for n in child.names])
+
+ if [n for n in names if n.startswith('ansible.module_utils')]:
+ linenos.append(child.lineno)
+
+ for name in child.names:
+ if ('module_utils' in getattr(child, 'module', '') and
+ isinstance(name, ast.alias) and
+ name.name == '*'):
+ msg = (
+ 'module-utils-specific-import',
+ ('module_utils imports should import specific '
+ 'components, not "*"')
+ )
+ if self._is_new_module():
+ self.reporter.error(
+ path=self.object_path,
+ code=msg[0],
+ msg=msg[1],
+ line=child.lineno
+ )
+ else:
+ self.reporter.warning(
+ path=self.object_path,
+ code=msg[0],
+ msg=msg[1],
+ line=child.lineno
+ )
+
+ if (isinstance(name, ast.alias) and
+ name.name == 'basic'):
+ found_basic = True
+
+ if not found_basic:
+ self.reporter.warning(
+ path=self.object_path,
+ code='missing-module-utils-basic-import',
+ msg='Did not find "ansible.module_utils.basic" import'
+ )
+
+ return linenos
+
+ def _get_first_callable(self):
+ linenos = []
+ for child in self.ast.body:
+ if isinstance(child, (ast.FunctionDef, ast.ClassDef)):
+ linenos.append(child.lineno)
+
+ return min(linenos)
+
+ def _find_main_call(self, look_for="main"):
+ """ Ensure that the module ends with:
+ if __name__ == '__main__':
+ main()
+ OR, in the case of modules that are in the docs-only deprecation phase
+ if __name__ == '__main__':
+ removed_module()
+ """
+ lineno = False
+ if_bodies = []
+ for child in self.ast.body:
+ if isinstance(child, ast.If):
+ try:
+ if child.test.left.id == '__name__':
+ if_bodies.extend(child.body)
+ except AttributeError:
+ pass
+
+ bodies = self.ast.body
+ bodies.extend(if_bodies)
+
+ for child in bodies:
+
+ # validate that the next to last line is 'if __name__ == "__main__"'
+ if child.lineno == (self.length - 1):
+
+ mainchecked = False
+ try:
+ if isinstance(child, ast.If) and \
+ child.test.left.id == '__name__' and \
+ len(child.test.ops) == 1 and \
+ isinstance(child.test.ops[0], ast.Eq) and \
+ child.test.comparators[0].s == '__main__':
+ mainchecked = True
+ except Exception:
+ pass
+
+ if not mainchecked:
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-if-name-main',
+ msg='Next to last line should be: if __name__ == "__main__":',
+ line=child.lineno
+ )
+
+ # validate that the final line is a call to main()
+ if isinstance(child, ast.Expr):
+ if isinstance(child.value, ast.Call):
+ if (isinstance(child.value.func, ast.Name) and
+ child.value.func.id == look_for):
+ lineno = child.lineno
+ if lineno < self.length - 1:
+ self.reporter.error(
+ path=self.object_path,
+ code='last-line-main-call',
+ msg=('Call to %s() not the last line' % look_for),
+ line=lineno
+ )
+
+ if not lineno:
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-main-call',
+ msg=('Did not find a call to %s()' % look_for)
+ )
+
+ return lineno or 0
+
+ def _find_has_import(self):
+ for child in self.ast.body:
+ found_try_except_import = False
+ found_has = False
+ if isinstance(child, TRY_EXCEPT):
+ bodies = child.body
+ for handler in child.handlers:
+ bodies.extend(handler.body)
+ for grandchild in bodies:
+ if isinstance(grandchild, ast.Import):
+ found_try_except_import = True
+ if isinstance(grandchild, ast.Assign):
+ for target in grandchild.targets:
+ if target.id.lower().startswith('has_'):
+ found_has = True
+ if found_try_except_import and not found_has:
+ # TODO: Add line/col
+ self.reporter.warning(
+ path=self.object_path,
+ code='try-except-missing-has',
+ msg='Found Try/Except block without HAS_ assignment'
+ )
+
+ def _ensure_imports_below_docs(self, doc_info, first_callable):
+ try:
+ min_doc_line = min(
+ [doc_info[key]['lineno'] for key in doc_info if doc_info[key]['lineno']]
+ )
+ except ValueError:
+ # We can't perform this validation, as there are no DOCs provided at all
+ return
+
+ max_doc_line = max(
+ [doc_info[key]['end_lineno'] for key in doc_info if doc_info[key]['end_lineno']]
+ )
+
+ import_lines = []
+
+ for child in self.ast.body:
+ if isinstance(child, (ast.Import, ast.ImportFrom)):
+ if isinstance(child, ast.ImportFrom) and child.module == '__future__':
+ # allowed from __future__ imports
+ for future_import in child.names:
+ if future_import.name not in self.WHITELIST_FUTURE_IMPORTS:
+ self.reporter.error(
+ path=self.object_path,
+ code='illegal-future-imports',
+ msg=('Only the following from __future__ imports are allowed: %s'
+ % ', '.join(self.WHITELIST_FUTURE_IMPORTS)),
+ line=child.lineno
+ )
+ break
+ else: # for-else. If we didn't find a problem nad break out of the loop, then this is a legal import
+ continue
+ import_lines.append(child.lineno)
+ if child.lineno < min_doc_line:
+ self.reporter.error(
+ path=self.object_path,
+ code='import-before-documentation',
+ msg=('Import found before documentation variables. '
+ 'All imports must appear below '
+ 'DOCUMENTATION/EXAMPLES/RETURN.'),
+ line=child.lineno
+ )
+ break
+ elif isinstance(child, TRY_EXCEPT):
+ bodies = child.body
+ for handler in child.handlers:
+ bodies.extend(handler.body)
+ for grandchild in bodies:
+ if isinstance(grandchild, (ast.Import, ast.ImportFrom)):
+ import_lines.append(grandchild.lineno)
+ if grandchild.lineno < min_doc_line:
+ self.reporter.error(
+ path=self.object_path,
+ code='import-before-documentation',
+ msg=('Import found before documentation '
+ 'variables. All imports must appear below '
+ 'DOCUMENTATION/EXAMPLES/RETURN.'),
+ line=child.lineno
+ )
+ break
+
+ for import_line in import_lines:
+ if not (max_doc_line < import_line < first_callable):
+ msg = (
+ 'import-placement',
+ ('Imports should be directly below DOCUMENTATION/EXAMPLES/'
+ 'RETURN.')
+ )
+ if self._is_new_module():
+ self.reporter.error(
+ path=self.object_path,
+ code=msg[0],
+ msg=msg[1],
+ line=import_line
+ )
+ else:
+ self.reporter.warning(
+ path=self.object_path,
+ code=msg[0],
+ msg=msg[1],
+ line=import_line
+ )
+
+ def _validate_ps_replacers(self):
+ # loop all (for/else + error)
+ # get module list for each
+ # check "shape" of each module name
+
+ module_requires = r'(?im)^#\s*requires\s+\-module(?:s?)\s*(Ansible\.ModuleUtils\..+)'
+ csharp_requires = r'(?im)^#\s*ansiblerequires\s+\-csharputil\s*(Ansible\..+)'
+ found_requires = False
+
+ for req_stmt in re.finditer(module_requires, self.text):
+ found_requires = True
+ # this will bomb on dictionary format - "don't do that"
+ module_list = [x.strip() for x in req_stmt.group(1).split(',')]
+ if len(module_list) > 1:
+ self.reporter.error(
+ path=self.object_path,
+ code='multiple-utils-per-requires',
+ msg='Ansible.ModuleUtils requirements do not support multiple modules per statement: "%s"' % req_stmt.group(0)
+ )
+ continue
+
+ module_name = module_list[0]
+
+ if module_name.lower().endswith('.psm1'):
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-requires-extension',
+ msg='Module #Requires should not end in .psm1: "%s"' % module_name
+ )
+
+ for req_stmt in re.finditer(csharp_requires, self.text):
+ found_requires = True
+ # this will bomb on dictionary format - "don't do that"
+ module_list = [x.strip() for x in req_stmt.group(1).split(',')]
+ if len(module_list) > 1:
+ self.reporter.error(
+ path=self.object_path,
+ code='multiple-csharp-utils-per-requires',
+ msg='Ansible C# util requirements do not support multiple utils per statement: "%s"' % req_stmt.group(0)
+ )
+ continue
+
+ module_name = module_list[0]
+
+ if module_name.lower().endswith('.cs'):
+ self.reporter.error(
+ path=self.object_path,
+ code='illegal-extension-cs',
+ msg='Module #AnsibleRequires -CSharpUtil should not end in .cs: "%s"' % module_name
+ )
+
+ # also accept the legacy #POWERSHELL_COMMON replacer signal
+ if not found_requires and REPLACER_WINDOWS not in self.text:
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-module-utils-import-csharp-requirements',
+ msg='No Ansible.ModuleUtils or C# Ansible util requirements/imports found'
+ )
+
+ def _find_ps_docs_py_file(self):
+ if self.object_name in self.PS_DOC_BLACKLIST:
+ return
+ py_path = self.path.replace('.ps1', '.py')
+ if not os.path.isfile(py_path):
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-python-doc',
+ msg='Missing python documentation file'
+ )
+ return py_path
+
+ def _get_docs(self):
+ docs = {
+ 'DOCUMENTATION': {
+ 'value': None,
+ 'lineno': 0,
+ 'end_lineno': 0,
+ },
+ 'EXAMPLES': {
+ 'value': None,
+ 'lineno': 0,
+ 'end_lineno': 0,
+ },
+ 'RETURN': {
+ 'value': None,
+ 'lineno': 0,
+ 'end_lineno': 0,
+ },
+ }
+ for child in self.ast.body:
+ if isinstance(child, ast.Assign):
+ for grandchild in child.targets:
+ if not isinstance(grandchild, ast.Name):
+ continue
+
+ if grandchild.id == 'DOCUMENTATION':
+ docs['DOCUMENTATION']['value'] = child.value.s
+ docs['DOCUMENTATION']['lineno'] = child.lineno
+ docs['DOCUMENTATION']['end_lineno'] = (
+ child.lineno + len(child.value.s.splitlines())
+ )
+ elif grandchild.id == 'EXAMPLES':
+ docs['EXAMPLES']['value'] = child.value.s
+ docs['EXAMPLES']['lineno'] = child.lineno
+ docs['EXAMPLES']['end_lineno'] = (
+ child.lineno + len(child.value.s.splitlines())
+ )
+ elif grandchild.id == 'RETURN':
+ docs['RETURN']['value'] = child.value.s
+ docs['RETURN']['lineno'] = child.lineno
+ docs['RETURN']['end_lineno'] = (
+ child.lineno + len(child.value.s.splitlines())
+ )
+
+ return docs
+
+ def _validate_docs_schema(self, doc, schema, name, error_code):
+ # TODO: Add line/col
+ errors = []
+ try:
+ schema(doc)
+ except Exception as e:
+ for error in e.errors:
+ error.data = doc
+ errors.extend(e.errors)
+
+ for error in errors:
+ path = [str(p) for p in error.path]
+
+ local_error_code = getattr(error, 'ansible_error_code', error_code)
+
+ if isinstance(error.data, dict):
+ error_message = humanize_error(error.data, error)
+ else:
+ error_message = error
+
+ if path:
+ combined_path = '%s.%s' % (name, '.'.join(path))
+ else:
+ combined_path = name
+
+ self.reporter.error(
+ path=self.object_path,
+ code=local_error_code,
+ msg='%s: %s' % (combined_path, error_message)
+ )
+
+ def _validate_docs(self):
+ doc_info = self._get_docs()
+ doc = None
+ documentation_exists = False
+ examples_exist = False
+ returns_exist = False
+ # We have three ways of marking deprecated/removed files. Have to check each one
+ # individually and then make sure they all agree
+ filename_deprecated_or_removed = False
+ deprecated = False
+ removed = False
+ doc_deprecated = None # doc legally might not exist
+ routing_says_deprecated = False
+
+ if self.object_name.startswith('_') and not os.path.islink(self.object_path):
+ filename_deprecated_or_removed = True
+
+ # We are testing a collection
+ if self.routing:
+ routing_deprecation = self.routing.get('plugin_routing', {}).get('modules', {}).get(self.name, {}).get('deprecation', {})
+ if routing_deprecation:
+ # meta/runtime.yml says this is deprecated
+ routing_says_deprecated = True
+ deprecated = True
+
+ if not removed:
+ if not bool(doc_info['DOCUMENTATION']['value']):
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-documentation',
+ msg='No DOCUMENTATION provided'
+ )
+ else:
+ documentation_exists = True
+ doc, errors, traces = parse_yaml(
+ doc_info['DOCUMENTATION']['value'],
+ doc_info['DOCUMENTATION']['lineno'],
+ self.name, 'DOCUMENTATION'
+ )
+ if doc:
+ add_collection_to_versions_and_dates(doc, self.collection_name, is_module=True)
+ for error in errors:
+ self.reporter.error(
+ path=self.object_path,
+ code='documentation-syntax-error',
+ **error
+ )
+ for trace in traces:
+ self.reporter.trace(
+ path=self.object_path,
+ tracebk=trace
+ )
+ if not errors and not traces:
+ missing_fragment = False
+ with CaptureStd():
+ try:
+ get_docstring(self.path, fragment_loader, verbose=True,
+ collection_name=self.collection_name, is_module=True)
+ except AssertionError:
+ fragment = doc['extends_documentation_fragment']
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-doc-fragment',
+ msg='DOCUMENTATION fragment missing: %s' % fragment
+ )
+ missing_fragment = True
+ except Exception as e:
+ self.reporter.trace(
+ path=self.object_path,
+ tracebk=traceback.format_exc()
+ )
+ self.reporter.error(
+ path=self.object_path,
+ code='documentation-error',
+ msg='Unknown DOCUMENTATION error, see TRACE: %s' % e
+ )
+
+ if not missing_fragment:
+ add_fragments(doc, self.object_path, fragment_loader=fragment_loader, is_module=True)
+
+ if 'options' in doc and doc['options'] is None:
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-documentation-options',
+ msg='DOCUMENTATION.options must be a dictionary/hash when used',
+ )
+
+ if 'deprecated' in doc and doc.get('deprecated'):
+ doc_deprecated = True
+ doc_deprecation = doc['deprecated']
+ documentation_collection = doc_deprecation.get('removed_from_collection')
+ if documentation_collection != self.collection_name:
+ self.reporter.error(
+ path=self.object_path,
+ code='deprecation-wrong-collection',
+ msg='"DOCUMENTATION.deprecation.removed_from_collection must be the current collection name: %r vs. %r' % (
+ documentation_collection, self.collection_name)
+ )
+ else:
+ doc_deprecated = False
+
+ if os.path.islink(self.object_path):
+ # This module has an alias, which we can tell as it's a symlink
+ # Rather than checking for `module: $filename` we need to check against the true filename
+ self._validate_docs_schema(
+ doc,
+ doc_schema(
+ os.readlink(self.object_path).split('.')[0],
+ for_collection=bool(self.collection),
+ deprecated_module=deprecated,
+ ),
+ 'DOCUMENTATION',
+ 'invalid-documentation',
+ )
+ else:
+ # This is the normal case
+ self._validate_docs_schema(
+ doc,
+ doc_schema(
+ self.object_name.split('.')[0],
+ for_collection=bool(self.collection),
+ deprecated_module=deprecated,
+ ),
+ 'DOCUMENTATION',
+ 'invalid-documentation',
+ )
+
+ if not self.collection:
+ existing_doc = self._check_for_new_args(doc)
+ self._check_version_added(doc, existing_doc)
+
+ if not bool(doc_info['EXAMPLES']['value']):
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-examples',
+ msg='No EXAMPLES provided'
+ )
+ else:
+ _doc, errors, traces = parse_yaml(doc_info['EXAMPLES']['value'],
+ doc_info['EXAMPLES']['lineno'],
+ self.name, 'EXAMPLES', load_all=True)
+ for error in errors:
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-examples',
+ **error
+ )
+ for trace in traces:
+ self.reporter.trace(
+ path=self.object_path,
+ tracebk=trace
+ )
+
+ if not bool(doc_info['RETURN']['value']):
+ if self._is_new_module():
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-return',
+ msg='No RETURN provided'
+ )
+ else:
+ self.reporter.warning(
+ path=self.object_path,
+ code='missing-return-legacy',
+ msg='No RETURN provided'
+ )
+ else:
+ data, errors, traces = parse_yaml(doc_info['RETURN']['value'],
+ doc_info['RETURN']['lineno'],
+ self.name, 'RETURN')
+ if data:
+ add_collection_to_versions_and_dates(data, self.collection_name, is_module=True, return_docs=True)
+ self._validate_docs_schema(data, return_schema(for_collection=bool(self.collection)),
+ 'RETURN', 'return-syntax-error')
+
+ for error in errors:
+ self.reporter.error(
+ path=self.object_path,
+ code='return-syntax-error',
+ **error
+ )
+ for trace in traces:
+ self.reporter.trace(
+ path=self.object_path,
+ tracebk=trace
+ )
+
+ # Check for mismatched deprecation
+ if not self.collection:
+ mismatched_deprecation = True
+ if not (filename_deprecated_or_removed or removed or deprecated or doc_deprecated):
+ mismatched_deprecation = False
+ else:
+ if (filename_deprecated_or_removed and deprecated and doc_deprecated):
+ mismatched_deprecation = False
+ if (filename_deprecated_or_removed and removed and not (documentation_exists or examples_exist or returns_exist)):
+ mismatched_deprecation = False
+
+ if mismatched_deprecation:
+ self.reporter.error(
+ path=self.object_path,
+ code='deprecation-mismatch',
+ msg='Module deprecation/removed must agree in documentaiton, by prepending filename with'
+ ' "_", and setting DOCUMENTATION.deprecated for deprecation or by removing all'
+ ' documentation for removed'
+ )
+ else:
+ # We are testing a collection
+ if self.object_name.startswith('_'):
+ self.reporter.error(
+ path=self.object_path,
+ code='collections-no-underscore-on-deprecation',
+ msg='Deprecated content in collections MUST NOT start with "_", update meta/runtime.yml instead',
+ )
+
+ if not (doc_deprecated == routing_says_deprecated):
+ # DOCUMENTATION.deprecated and meta/runtime.yml disagree
+ self.reporter.error(
+ path=self.object_path,
+ code='deprecation-mismatch',
+ msg='"meta/runtime.yml" and DOCUMENTATION.deprecation do not agree.'
+ )
+ elif routing_says_deprecated:
+ # Both DOCUMENTATION.deprecated and meta/runtime.yml agree that the module is deprecated.
+ # Make sure they give the same version or date.
+ routing_date = routing_deprecation.get('removal_date')
+ routing_version = routing_deprecation.get('removal_version')
+ # The versions and dates in the module documentation are auto-tagged, so remove the tag
+ # to make comparison possible and to avoid confusing the user.
+ documentation_date = doc_deprecation.get('removed_at_date')
+ documentation_version = doc_deprecation.get('removed_in')
+ if not compare_dates(routing_date, documentation_date):
+ self.reporter.error(
+ path=self.object_path,
+ code='deprecation-mismatch',
+ msg='"meta/runtime.yml" and DOCUMENTATION.deprecation do not agree on removal date: %r vs. %r' % (
+ routing_date, documentation_date)
+ )
+ if routing_version != documentation_version:
+ self.reporter.error(
+ path=self.object_path,
+ code='deprecation-mismatch',
+ msg='"meta/runtime.yml" and DOCUMENTATION.deprecation do not agree on removal version: %r vs. %r' % (
+ routing_version, documentation_version)
+ )
+
+ # In the future we should error if ANSIBLE_METADATA exists in a collection
+
+ return doc_info, doc
+
+ def _check_version_added(self, doc, existing_doc):
+ version_added_raw = doc.get('version_added')
+ try:
+ collection_name = doc.get('version_added_collection')
+ version_added = self._create_strict_version(
+ str(version_added_raw or '0.0'),
+ collection_name=collection_name)
+ except ValueError as e:
+ version_added = version_added_raw or '0.0'
+ if self._is_new_module() or version_added != 'historical':
+ # already reported during schema validation, except:
+ if version_added == 'historical':
+ self.reporter.error(
+ path=self.object_path,
+ code='module-invalid-version-added',
+ msg='version_added is not a valid version number: %r. Error: %s' % (version_added, e)
+ )
+ return
+
+ if existing_doc and str(version_added_raw) != str(existing_doc.get('version_added')):
+ self.reporter.error(
+ path=self.object_path,
+ code='module-incorrect-version-added',
+ msg='version_added should be %r. Currently %r' % (existing_doc.get('version_added'), version_added_raw)
+ )
+
+ if not self._is_new_module():
+ return
+
+ should_be = '.'.join(ansible_version.split('.')[:2])
+ strict_ansible_version = self._create_strict_version(should_be, collection_name='ansible.builtin')
+
+ if (version_added < strict_ansible_version or
+ strict_ansible_version < version_added):
+ self.reporter.error(
+ path=self.object_path,
+ code='module-incorrect-version-added',
+ msg='version_added should be %r. Currently %r' % (should_be, version_added_raw)
+ )
+
+ def _validate_ansible_module_call(self, docs):
+ try:
+ spec, args, kwargs = get_argument_spec(self.path, self.collection)
+ except AnsibleModuleNotInitialized:
+ self.reporter.error(
+ path=self.object_path,
+ code='ansible-module-not-initialized',
+ msg="Execution of the module did not result in initialization of AnsibleModule",
+ )
+ return
+ except AnsibleModuleImportError as e:
+ self.reporter.error(
+ path=self.object_path,
+ code='import-error',
+ msg="Exception attempting to import module for argument_spec introspection, '%s'" % e
+ )
+ self.reporter.trace(
+ path=self.object_path,
+ tracebk=traceback.format_exc()
+ )
+ return
+
+ self._validate_docs_schema(kwargs, ansible_module_kwargs_schema(for_collection=bool(self.collection)),
+ 'AnsibleModule', 'invalid-ansiblemodule-schema')
+
+ self._validate_argument_spec(docs, spec, kwargs)
+
+ def _validate_list_of_module_args(self, name, terms, spec, context):
+ if terms is None:
+ return
+ if not isinstance(terms, (list, tuple)):
+ # This is already reported by schema checking
+ return
+ for check in terms:
+ if not isinstance(check, (list, tuple)):
+ # This is already reported by schema checking
+ continue
+ bad_term = False
+ for term in check:
+ if not isinstance(term, string_types):
+ msg = name
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " must contain strings in the lists or tuples; found value %r" % (term, )
+ self.reporter.error(
+ path=self.object_path,
+ code=name + '-type',
+ msg=msg,
+ )
+ bad_term = True
+ if bad_term:
+ continue
+ if len(set(check)) != len(check):
+ msg = name
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has repeated terms"
+ self.reporter.error(
+ path=self.object_path,
+ code=name + '-collision',
+ msg=msg,
+ )
+ if not set(check) <= set(spec):
+ msg = name
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " contains terms which are not part of argument_spec: %s" % ", ".join(sorted(set(check).difference(set(spec))))
+ self.reporter.error(
+ path=self.object_path,
+ code=name + '-unknown',
+ msg=msg,
+ )
+
+ def _validate_required_if(self, terms, spec, context, module):
+ if terms is None:
+ return
+ if not isinstance(terms, (list, tuple)):
+ # This is already reported by schema checking
+ return
+ for check in terms:
+ if not isinstance(check, (list, tuple)) or len(check) not in [3, 4]:
+ # This is already reported by schema checking
+ continue
+ if len(check) == 4 and not isinstance(check[3], bool):
+ msg = "required_if"
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " must have forth value omitted or of type bool; got %r" % (check[3], )
+ self.reporter.error(
+ path=self.object_path,
+ code='required_if-is_one_of-type',
+ msg=msg,
+ )
+ requirements = check[2]
+ if not isinstance(requirements, (list, tuple)):
+ msg = "required_if"
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " must have third value (requirements) being a list or tuple; got type %r" % (requirements, )
+ self.reporter.error(
+ path=self.object_path,
+ code='required_if-requirements-type',
+ msg=msg,
+ )
+ continue
+ bad_term = False
+ for term in requirements:
+ if not isinstance(term, string_types):
+ msg = "required_if"
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " must have only strings in third value (requirements); got %r" % (term, )
+ self.reporter.error(
+ path=self.object_path,
+ code='required_if-requirements-type',
+ msg=msg,
+ )
+ bad_term = True
+ if bad_term:
+ continue
+ if len(set(requirements)) != len(requirements):
+ msg = "required_if"
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has repeated terms in requirements"
+ self.reporter.error(
+ path=self.object_path,
+ code='required_if-requirements-collision',
+ msg=msg,
+ )
+ if not set(requirements) <= set(spec):
+ msg = "required_if"
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " contains terms in requirements which are not part of argument_spec: %s" % ", ".join(sorted(set(requirements).difference(set(spec))))
+ self.reporter.error(
+ path=self.object_path,
+ code='required_if-requirements-unknown',
+ msg=msg,
+ )
+ key = check[0]
+ if key not in spec:
+ msg = "required_if"
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " must have its key %s in argument_spec" % key
+ self.reporter.error(
+ path=self.object_path,
+ code='required_if-unknown-key',
+ msg=msg,
+ )
+ continue
+ if key in requirements:
+ msg = "required_if"
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " contains its key %s in requirements" % key
+ self.reporter.error(
+ path=self.object_path,
+ code='required_if-key-in-requirements',
+ msg=msg,
+ )
+ value = check[1]
+ if value is not None:
+ _type = spec[key].get('type', 'str')
+ if callable(_type):
+ _type_checker = _type
+ else:
+ _type_checker = module._CHECK_ARGUMENT_TYPES_DISPATCHER.get(_type)
+ try:
+ with CaptureStd():
+ dummy = _type_checker(value)
+ except (Exception, SystemExit):
+ msg = "required_if"
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has value %r which does not fit to %s's parameter type %r" % (value, key, _type)
+ self.reporter.error(
+ path=self.object_path,
+ code='required_if-value-type',
+ msg=msg,
+ )
+
+ def _validate_required_by(self, terms, spec, context):
+ if terms is None:
+ return
+ if not isinstance(terms, Mapping):
+ # This is already reported by schema checking
+ return
+ for key, value in terms.items():
+ if isinstance(value, string_types):
+ value = [value]
+ if not isinstance(value, (list, tuple)):
+ # This is already reported by schema checking
+ continue
+ for term in value:
+ if not isinstance(term, string_types):
+ # This is already reported by schema checking
+ continue
+ if len(set(value)) != len(value) or key in value:
+ msg = "required_by"
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has repeated terms"
+ self.reporter.error(
+ path=self.object_path,
+ code='required_by-collision',
+ msg=msg,
+ )
+ if not set(value) <= set(spec) or key not in spec:
+ msg = "required_by"
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " contains terms which are not part of argument_spec: %s" % ", ".join(sorted(set(value).difference(set(spec))))
+ self.reporter.error(
+ path=self.object_path,
+ code='required_by-unknown',
+ msg=msg,
+ )
+
+ def _validate_argument_spec(self, docs, spec, kwargs, context=None, last_context_spec=None):
+ if not self.analyze_arg_spec:
+ return
+
+ if docs is None:
+ docs = {}
+
+ if context is None:
+ context = []
+
+ if last_context_spec is None:
+ last_context_spec = kwargs
+
+ try:
+ if not context:
+ add_fragments(docs, self.object_path, fragment_loader=fragment_loader, is_module=True)
+ except Exception:
+ # Cannot merge fragments
+ return
+
+ # Use this to access type checkers later
+ module = NoArgsAnsibleModule({})
+
+ self._validate_list_of_module_args('mutually_exclusive', last_context_spec.get('mutually_exclusive'), spec, context)
+ self._validate_list_of_module_args('required_together', last_context_spec.get('required_together'), spec, context)
+ self._validate_list_of_module_args('required_one_of', last_context_spec.get('required_one_of'), spec, context)
+ self._validate_required_if(last_context_spec.get('required_if'), spec, context, module)
+ self._validate_required_by(last_context_spec.get('required_by'), spec, context)
+
+ provider_args = set()
+ args_from_argspec = set()
+ deprecated_args_from_argspec = set()
+ doc_options = docs.get('options', {})
+ if doc_options is None:
+ doc_options = {}
+ for arg, data in spec.items():
+ restricted_argument_names = ('message', 'syslog_facility')
+ if arg.lower() in restricted_argument_names:
+ msg = "Argument '%s' in argument_spec " % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += "must not be one of %s as it is used " \
+ "internally by Ansible Core Engine" % (",".join(restricted_argument_names))
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-argument-name',
+ msg=msg,
+ )
+ continue
+ if 'aliases' in data:
+ for al in data['aliases']:
+ if al.lower() in restricted_argument_names:
+ msg = "Argument alias '%s' in argument_spec " % al
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += "must not be one of %s as it is used " \
+ "internally by Ansible Core Engine" % (",".join(restricted_argument_names))
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-argument-name',
+ msg=msg,
+ )
+ continue
+
+ if not isinstance(data, dict):
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " must be a dictionary/hash when used"
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-argument-spec',
+ msg=msg,
+ )
+ continue
+
+ removed_at_date = data.get('removed_at_date', None)
+ if removed_at_date is not None:
+ try:
+ if parse_isodate(removed_at_date, allow_date=False) < datetime.date.today():
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has a removed_at_date '%s' before today" % removed_at_date
+ self.reporter.error(
+ path=self.object_path,
+ code='deprecated-date',
+ msg=msg,
+ )
+ except ValueError:
+ # This should only happen when removed_at_date is not in ISO format. Since schema
+ # validation already reported this as an error, don't report it a second time.
+ pass
+
+ deprecated_aliases = data.get('deprecated_aliases', None)
+ if deprecated_aliases is not None:
+ for deprecated_alias in deprecated_aliases:
+ if 'name' in deprecated_alias and 'date' in deprecated_alias:
+ try:
+ date = deprecated_alias['date']
+ if parse_isodate(date, allow_date=False) < datetime.date.today():
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has deprecated aliases '%s' with removal date '%s' before today" % (
+ deprecated_alias['name'], deprecated_alias['date'])
+ self.reporter.error(
+ path=self.object_path,
+ code='deprecated-date',
+ msg=msg,
+ )
+ except ValueError:
+ # This should only happen when deprecated_alias['date'] is not in ISO format. Since
+ # schema validation already reported this as an error, don't report it a second
+ # time.
+ pass
+
+ has_version = False
+ if self.collection and self.collection_version is not None:
+ compare_version = self.collection_version
+ version_of_what = "this collection (%s)" % self.collection_version_str
+ code_prefix = 'collection'
+ has_version = True
+ elif not self.collection:
+ compare_version = LOOSE_ANSIBLE_VERSION
+ version_of_what = "Ansible (%s)" % ansible_version
+ code_prefix = 'ansible'
+ has_version = True
+
+ removed_in_version = data.get('removed_in_version', None)
+ if removed_in_version is not None:
+ try:
+ collection_name = data.get('removed_from_collection')
+ removed_in = self._create_version(str(removed_in_version), collection_name=collection_name)
+ if has_version and collection_name == self.collection_name and compare_version >= removed_in:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has a deprecated removed_in_version %r," % removed_in_version
+ msg += " i.e. the version is less than or equal to the current version of %s" % version_of_what
+ self.reporter.error(
+ path=self.object_path,
+ code=code_prefix + '-deprecated-version',
+ msg=msg,
+ )
+ except ValueError as e:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has an invalid removed_in_version number %r: %s" % (removed_in_version, e)
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-deprecated-version',
+ msg=msg,
+ )
+ except TypeError:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has an invalid removed_in_version number %r: " % (removed_in_version, )
+ msg += " error while comparing to version of %s" % version_of_what
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-deprecated-version',
+ msg=msg,
+ )
+
+ if deprecated_aliases is not None:
+ for deprecated_alias in deprecated_aliases:
+ if 'name' in deprecated_alias and 'version' in deprecated_alias:
+ try:
+ collection_name = deprecated_alias.get('collection_name')
+ version = self._create_version(str(deprecated_alias['version']), collection_name=collection_name)
+ if has_version and collection_name == self.collection_name and compare_version >= version:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has deprecated aliases '%s' with removal in version %r," % (
+ deprecated_alias['name'], deprecated_alias['version'])
+ msg += " i.e. the version is less than or equal to the current version of %s" % version_of_what
+ self.reporter.error(
+ path=self.object_path,
+ code=code_prefix + '-deprecated-version',
+ msg=msg,
+ )
+ except ValueError as e:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has deprecated aliases '%s' with invalid removal version %r: %s" % (
+ deprecated_alias['name'], deprecated_alias['version'], e)
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-deprecated-version',
+ msg=msg,
+ )
+ except TypeError:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has deprecated aliases '%s' with invalid removal version %r:" % (
+ deprecated_alias['name'], deprecated_alias['version'])
+ msg += " error while comparing to version of %s" % version_of_what
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-deprecated-version',
+ msg=msg,
+ )
+
+ aliases = data.get('aliases', [])
+ if arg in aliases:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " is specified as its own alias"
+ self.reporter.error(
+ path=self.object_path,
+ code='parameter-alias-self',
+ msg=msg
+ )
+ if len(aliases) > len(set(aliases)):
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has at least one alias specified multiple times in aliases"
+ self.reporter.error(
+ path=self.object_path,
+ code='parameter-alias-repeated',
+ msg=msg
+ )
+ if not context and arg == 'state':
+ bad_states = set(['list', 'info', 'get']) & set(data.get('choices', set()))
+ for bad_state in bad_states:
+ self.reporter.error(
+ path=self.object_path,
+ code='parameter-state-invalid-choice',
+ msg="Argument 'state' includes the value '%s' as a choice" % bad_state)
+ if not data.get('removed_in_version', None) and not data.get('removed_at_date', None):
+ args_from_argspec.add(arg)
+ args_from_argspec.update(aliases)
+ else:
+ deprecated_args_from_argspec.add(arg)
+ deprecated_args_from_argspec.update(aliases)
+ if arg == 'provider' and self.object_path.startswith('lib/ansible/modules/network/'):
+ if data.get('options') is not None and not isinstance(data.get('options'), Mapping):
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-argument-spec-options',
+ msg="Argument 'options' in argument_spec['provider'] must be a dictionary/hash when used",
+ )
+ elif data.get('options'):
+ # Record provider options from network modules, for later comparison
+ for provider_arg, provider_data in data.get('options', {}).items():
+ provider_args.add(provider_arg)
+ provider_args.update(provider_data.get('aliases', []))
+
+ if data.get('required') and data.get('default', object) != object:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " is marked as required but specifies a default. Arguments with a" \
+ " default should not be marked as required"
+ self.reporter.error(
+ path=self.object_path,
+ code='no-default-for-required-parameter',
+ msg=msg
+ )
+
+ if arg in provider_args:
+ # Provider args are being removed from network module top level
+ # don't validate docs<->arg_spec checks below
+ continue
+
+ _type = data.get('type', 'str')
+ if callable(_type):
+ _type_checker = _type
+ else:
+ _type_checker = module._CHECK_ARGUMENT_TYPES_DISPATCHER.get(_type)
+
+ _elements = data.get('elements')
+ if (_type == 'list') and not _elements:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines type as list but elements is not defined"
+ self.reporter.error(
+ path=self.object_path,
+ code='parameter-list-no-elements',
+ msg=msg
+ )
+ if _elements:
+ if not callable(_elements):
+ module._CHECK_ARGUMENT_TYPES_DISPATCHER.get(_elements)
+ if _type != 'list':
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines elements as %s but it is valid only when value of parameter type is list" % _elements
+ self.reporter.error(
+ path=self.object_path,
+ code='parameter-invalid-elements',
+ msg=msg
+ )
+
+ arg_default = None
+ if 'default' in data and not is_empty(data['default']):
+ try:
+ with CaptureStd():
+ arg_default = _type_checker(data['default'])
+ except (Exception, SystemExit):
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines default as (%r) but this is incompatible with parameter type %r" % (data['default'], _type)
+ self.reporter.error(
+ path=self.object_path,
+ code='incompatible-default-type',
+ msg=msg
+ )
+ continue
+
+ doc_options_args = []
+ for alias in sorted(set([arg] + list(aliases))):
+ if alias in doc_options:
+ doc_options_args.append(alias)
+ if len(doc_options_args) == 0:
+ # Undocumented arguments will be handled later (search for undocumented-parameter)
+ doc_options_arg = {}
+ else:
+ doc_options_arg = doc_options[doc_options_args[0]]
+ if len(doc_options_args) > 1:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " with aliases %s is documented multiple times, namely as %s" % (
+ ", ".join([("'%s'" % alias) for alias in aliases]),
+ ", ".join([("'%s'" % alias) for alias in doc_options_args])
+ )
+ self.reporter.error(
+ path=self.object_path,
+ code='parameter-documented-multiple-times',
+ msg=msg
+ )
+
+ try:
+ doc_default = None
+ if 'default' in doc_options_arg and not is_empty(doc_options_arg['default']):
+ with CaptureStd():
+ doc_default = _type_checker(doc_options_arg['default'])
+ except (Exception, SystemExit):
+ msg = "Argument '%s' in documentation" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines default as (%r) but this is incompatible with parameter type %r" % (doc_options_arg.get('default'), _type)
+ self.reporter.error(
+ path=self.object_path,
+ code='doc-default-incompatible-type',
+ msg=msg
+ )
+ continue
+
+ if arg_default != doc_default:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines default as (%r) but documentation defines default as (%r)" % (arg_default, doc_default)
+ self.reporter.error(
+ path=self.object_path,
+ code='doc-default-does-not-match-spec',
+ msg=msg
+ )
+
+ doc_type = doc_options_arg.get('type')
+ if 'type' in data and data['type'] is not None:
+ if doc_type is None:
+ if not arg.startswith('_'): # hidden parameter, for example _raw_params
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines type as %r but documentation doesn't define type" % (data['type'])
+ self.reporter.error(
+ path=self.object_path,
+ code='parameter-type-not-in-doc',
+ msg=msg
+ )
+ elif data['type'] != doc_type:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines type as %r but documentation defines type as %r" % (data['type'], doc_type)
+ self.reporter.error(
+ path=self.object_path,
+ code='doc-type-does-not-match-spec',
+ msg=msg
+ )
+ else:
+ if doc_type is None:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " uses default type ('str') but documentation doesn't define type"
+ self.reporter.error(
+ path=self.object_path,
+ code='doc-missing-type',
+ msg=msg
+ )
+ elif doc_type != 'str':
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " implies type as 'str' but documentation defines as %r" % doc_type
+ self.reporter.error(
+ path=self.object_path,
+ code='implied-parameter-type-mismatch',
+ msg=msg
+ )
+
+ doc_choices = []
+ try:
+ for choice in doc_options_arg.get('choices', []):
+ try:
+ with CaptureStd():
+ doc_choices.append(_type_checker(choice))
+ except (Exception, SystemExit):
+ msg = "Argument '%s' in documentation" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines choices as (%r) but this is incompatible with argument type %r" % (choice, _type)
+ self.reporter.error(
+ path=self.object_path,
+ code='doc-choices-incompatible-type',
+ msg=msg
+ )
+ raise StopIteration()
+ except StopIteration:
+ continue
+
+ arg_choices = []
+ try:
+ for choice in data.get('choices', []):
+ try:
+ with CaptureStd():
+ arg_choices.append(_type_checker(choice))
+ except (Exception, SystemExit):
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines choices as (%r) but this is incompatible with argument type %r" % (choice, _type)
+ self.reporter.error(
+ path=self.object_path,
+ code='incompatible-choices',
+ msg=msg
+ )
+ raise StopIteration()
+ except StopIteration:
+ continue
+
+ if not compare_unordered_lists(arg_choices, doc_choices):
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines choices as (%r) but documentation defines choices as (%r)" % (arg_choices, doc_choices)
+ self.reporter.error(
+ path=self.object_path,
+ code='doc-choices-do-not-match-spec',
+ msg=msg
+ )
+
+ doc_required = doc_options_arg.get('required', False)
+ data_required = data.get('required', False)
+ if (doc_required or data_required) and not (doc_required and data_required):
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ if doc_required:
+ msg += " is not required, but is documented as being required"
+ else:
+ msg += " is required, but is not documented as being required"
+ self.reporter.error(
+ path=self.object_path,
+ code='doc-required-mismatch',
+ msg=msg
+ )
+
+ doc_elements = doc_options_arg.get('elements', None)
+ doc_type = doc_options_arg.get('type', 'str')
+ data_elements = data.get('elements', None)
+ if (doc_elements and not doc_type == 'list'):
+ msg = "Argument '%s " % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines parameter elements as %s but it is valid only when value of parameter type is list" % doc_elements
+ self.reporter.error(
+ path=self.object_path,
+ code='doc-elements-invalid',
+ msg=msg
+ )
+ if (doc_elements or data_elements) and not (doc_elements == data_elements):
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ if data_elements:
+ msg += " specifies elements as %s," % data_elements
+ else:
+ msg += " does not specify elements,"
+ if doc_elements:
+ msg += "but elements is documented as being %s" % doc_elements
+ else:
+ msg += "but elements is not documented"
+ self.reporter.error(
+ path=self.object_path,
+ code='doc-elements-mismatch',
+ msg=msg
+ )
+
+ spec_suboptions = data.get('options')
+ doc_suboptions = doc_options_arg.get('suboptions', {})
+ if spec_suboptions:
+ if not doc_suboptions:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has sub-options but documentation does not define it"
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-suboption-docs',
+ msg=msg
+ )
+ self._validate_argument_spec({'options': doc_suboptions}, spec_suboptions, kwargs,
+ context=context + [arg], last_context_spec=data)
+
+ for arg in args_from_argspec:
+ if not str(arg).isidentifier():
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " is not a valid python identifier"
+ self.reporter.error(
+ path=self.object_path,
+ code='parameter-invalid',
+ msg=msg
+ )
+
+ if docs:
+ args_from_docs = set()
+ for arg, data in doc_options.items():
+ args_from_docs.add(arg)
+ args_from_docs.update(data.get('aliases', []))
+
+ args_missing_from_docs = args_from_argspec.difference(args_from_docs)
+ docs_missing_from_args = args_from_docs.difference(args_from_argspec | deprecated_args_from_argspec)
+ for arg in args_missing_from_docs:
+ if arg in provider_args:
+ # Provider args are being removed from network module top level
+ # So they are likely not documented on purpose
+ continue
+ msg = "Argument '%s'" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " is listed in the argument_spec, but not documented in the module documentation"
+ self.reporter.error(
+ path=self.object_path,
+ code='undocumented-parameter',
+ msg=msg
+ )
+ for arg in docs_missing_from_args:
+ msg = "Argument '%s'" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " is listed in DOCUMENTATION.options, but not accepted by the module argument_spec"
+ self.reporter.error(
+ path=self.object_path,
+ code='nonexistent-parameter-documented',
+ msg=msg
+ )
+
+ def _check_for_new_args(self, doc):
+ if not self.base_branch or self._is_new_module():
+ return
+
+ with CaptureStd():
+ try:
+ existing_doc, dummy_examples, dummy_return, existing_metadata = get_docstring(
+ self.base_module, fragment_loader, verbose=True, collection_name=self.collection_name, is_module=True)
+ existing_options = existing_doc.get('options', {}) or {}
+ except AssertionError:
+ fragment = doc['extends_documentation_fragment']
+ self.reporter.warning(
+ path=self.object_path,
+ code='missing-existing-doc-fragment',
+ msg='Pre-existing DOCUMENTATION fragment missing: %s' % fragment
+ )
+ return
+ except Exception as e:
+ self.reporter.warning_trace(
+ path=self.object_path,
+ tracebk=e
+ )
+ self.reporter.warning(
+ path=self.object_path,
+ code='unknown-doc-fragment',
+ msg=('Unknown pre-existing DOCUMENTATION error, see TRACE. Submodule refs may need updated')
+ )
+ return
+
+ try:
+ mod_collection_name = existing_doc.get('version_added_collection')
+ mod_version_added = self._create_strict_version(
+ str(existing_doc.get('version_added', '0.0')),
+ collection_name=mod_collection_name)
+ except ValueError:
+ mod_collection_name = self.collection_name
+ mod_version_added = self._create_strict_version('0.0')
+
+ options = doc.get('options', {}) or {}
+
+ should_be = '.'.join(ansible_version.split('.')[:2])
+ strict_ansible_version = self._create_strict_version(should_be, collection_name='ansible.builtin')
+
+ for option, details in options.items():
+ try:
+ names = [option] + details.get('aliases', [])
+ except (TypeError, AttributeError):
+ # Reporting of this syntax error will be handled by schema validation.
+ continue
+
+ if any(name in existing_options for name in names):
+ # The option already existed. Make sure version_added didn't change.
+ for name in names:
+ existing_collection_name = existing_options.get(name, {}).get('version_added_collection')
+ existing_version = existing_options.get(name, {}).get('version_added')
+ if existing_version:
+ break
+ current_collection_name = details.get('version_added_collection')
+ current_version = details.get('version_added')
+ if current_collection_name != existing_collection_name:
+ self.reporter.error(
+ path=self.object_path,
+ code='option-incorrect-version-added-collection',
+ msg=('version_added for existing option (%s) should '
+ 'belong to collection %r. Currently belongs to %r' %
+ (option, current_collection_name, existing_collection_name))
+ )
+ elif str(current_version) != str(existing_version):
+ self.reporter.error(
+ path=self.object_path,
+ code='option-incorrect-version-added',
+ msg=('version_added for existing option (%s) should '
+ 'be %r. Currently %r' %
+ (option, existing_version, current_version))
+ )
+ continue
+
+ try:
+ collection_name = details.get('version_added_collection')
+ version_added = self._create_strict_version(
+ str(details.get('version_added', '0.0')),
+ collection_name=collection_name)
+ except ValueError as e:
+ # already reported during schema validation
+ continue
+
+ if collection_name != self.collection_name:
+ continue
+ if (strict_ansible_version != mod_version_added and
+ (version_added < strict_ansible_version or
+ strict_ansible_version < version_added)):
+ self.reporter.error(
+ path=self.object_path,
+ code='option-incorrect-version-added',
+ msg=('version_added for new option (%s) should '
+ 'be %r. Currently %r' %
+ (option, should_be, version_added))
+ )
+
+ return existing_doc
+
+ @staticmethod
+ def is_blacklisted(path):
+ base_name = os.path.basename(path)
+ file_name = os.path.splitext(base_name)[0]
+
+ if file_name.startswith('_') and os.path.islink(path):
+ return True
+
+ if not frozenset((base_name, file_name)).isdisjoint(ModuleValidator.BLACKLIST):
+ return True
+
+ for pat in ModuleValidator.BLACKLIST_PATTERNS:
+ if fnmatch(base_name, pat):
+ return True
+
+ return False
+
+ def validate(self):
+ super(ModuleValidator, self).validate()
+ if not self._python_module() and not self._powershell_module():
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-extension',
+ msg=('Official Ansible modules must have a .py '
+ 'extension for python modules or a .ps1 '
+ 'for powershell modules')
+ )
+ self._python_module_override = True
+
+ if self._python_module() and self.ast is None:
+ self.reporter.error(
+ path=self.object_path,
+ code='python-syntax-error',
+ msg='Python SyntaxError while parsing module'
+ )
+ try:
+ compile(self.text, self.path, 'exec')
+ except Exception:
+ self.reporter.trace(
+ path=self.object_path,
+ tracebk=traceback.format_exc()
+ )
+ return
+
+ end_of_deprecation_should_be_removed_only = False
+ if self._python_module():
+ doc_info, docs = self._validate_docs()
+
+ # See if current version => deprecated.removed_in, ie, should be docs only
+ if docs and docs.get('deprecated', False):
+
+ if 'removed_in' in docs['deprecated']:
+ removed_in = None
+ collection_name = docs['deprecated'].get('removed_from_collection')
+ version = docs['deprecated']['removed_in']
+ if collection_name != self.collection_name:
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-module-deprecation-source',
+ msg=('The deprecation version for a module must be added in this collection')
+ )
+ else:
+ try:
+ removed_in = self._create_strict_version(str(version), collection_name=collection_name)
+ except ValueError as e:
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-module-deprecation-version',
+ msg=('The deprecation version %r cannot be parsed: %s' % (version, e))
+ )
+
+ if removed_in:
+ if not self.collection:
+ strict_ansible_version = self._create_strict_version(
+ '.'.join(ansible_version.split('.')[:2]), self.collection_name)
+ end_of_deprecation_should_be_removed_only = strict_ansible_version >= removed_in
+ elif self.collection_version:
+ strict_ansible_version = self.collection_version
+ end_of_deprecation_should_be_removed_only = strict_ansible_version >= removed_in
+
+ # handle deprecation by date
+ if 'removed_at_date' in docs['deprecated']:
+ try:
+ removed_at_date = docs['deprecated']['removed_at_date']
+ if parse_isodate(removed_at_date, allow_date=True) < datetime.date.today():
+ msg = "Module's deprecated.removed_at_date date '%s' is before today" % removed_at_date
+ self.reporter.error(path=self.object_path, code='deprecated-date', msg=msg)
+ except ValueError:
+ # This happens if the date cannot be parsed. This is already checked by the schema.
+ pass
+
+ if self._python_module() and not self._just_docs() and not end_of_deprecation_should_be_removed_only:
+ self._validate_ansible_module_call(docs)
+ self._check_for_sys_exit()
+ self._find_blacklist_imports()
+ main = self._find_main_call()
+ self._find_module_utils(main)
+ self._find_has_import()
+ first_callable = self._get_first_callable()
+ self._ensure_imports_below_docs(doc_info, first_callable)
+ self._check_for_subprocess()
+ self._check_for_os_call()
+
+ if self._powershell_module():
+ if self.basename in self.PS_DOC_BLACKLIST:
+ return
+
+ self._validate_ps_replacers()
+ docs_path = self._find_ps_docs_py_file()
+
+ # We can only validate PowerShell arg spec if it is using the new Ansible.Basic.AnsibleModule util
+ pattern = r'(?im)^#\s*ansiblerequires\s+\-csharputil\s*Ansible\.Basic'
+ if re.search(pattern, self.text) and self.object_name not in self.PS_ARG_VALIDATE_BLACKLIST:
+ with ModuleValidator(docs_path, base_branch=self.base_branch, git_cache=self.git_cache) as docs_mv:
+ docs = docs_mv._validate_docs()[1]
+ self._validate_ansible_module_call(docs)
+
+ self._check_gpl3_header()
+ if not self._just_docs() and not end_of_deprecation_should_be_removed_only:
+ self._check_interpreter(powershell=self._powershell_module())
+ self._check_type_instead_of_isinstance(
+ powershell=self._powershell_module()
+ )
+ if end_of_deprecation_should_be_removed_only:
+ # Ensure that `if __name__ == '__main__':` calls `removed_module()` which ensure that the module has no code in
+ main = self._find_main_call('removed_module')
+ # FIXME: Ensure that the version in the call to removed_module is less than +2.
+ # Otherwise it's time to remove the file (This may need to be done in another test to
+ # avoid breaking whenever the Ansible version bumps)
+
+
+class PythonPackageValidator(Validator):
+ BLACKLIST_FILES = frozenset(('__pycache__',))
+
+ def __init__(self, path, reporter=None):
+ super(PythonPackageValidator, self).__init__(reporter=reporter or Reporter())
+
+ self.path = path
+ self.basename = os.path.basename(path)
+
+ @property
+ def object_name(self):
+ return self.basename
+
+ @property
+ def object_path(self):
+ return self.path
+
+ def validate(self):
+ super(PythonPackageValidator, self).validate()
+
+ if self.basename in self.BLACKLIST_FILES:
+ return
+
+ init_file = os.path.join(self.path, '__init__.py')
+ if not os.path.exists(init_file):
+ self.reporter.error(
+ path=self.object_path,
+ code='subdirectory-missing-init',
+ msg='Ansible module subdirectories must contain an __init__.py'
+ )
+
+
+def setup_collection_loader():
+ collections_paths = os.environ.get('ANSIBLE_COLLECTIONS_PATH', '').split(os.pathsep)
+ _AnsibleCollectionFinder(collections_paths)
+
+
+def re_compile(value):
+ """
+ Argparse expects things to raise TypeError, re.compile raises an re.error
+ exception
+
+ This function is a shorthand to convert the re.error exception to a
+ TypeError
+ """
+
+ try:
+ return re.compile(value)
+ except re.error as e:
+ raise TypeError(e)
+
+
+def run():
+ parser = argparse.ArgumentParser(prog="validate-modules")
+ parser.add_argument('modules', nargs='+',
+ help='Path to module or module directory')
+ parser.add_argument('-w', '--warnings', help='Show warnings',
+ action='store_true')
+ parser.add_argument('--exclude', help='RegEx exclusion pattern',
+ type=re_compile)
+ parser.add_argument('--arg-spec', help='Analyze module argument spec',
+ action='store_true', default=False)
+ parser.add_argument('--base-branch', default=None,
+ help='Used in determining if new options were added')
+ parser.add_argument('--format', choices=['json', 'plain'], default='plain',
+ help='Output format. Default: "%(default)s"')
+ parser.add_argument('--output', default='-',
+ help='Output location, use "-" for stdout. '
+ 'Default "%(default)s"')
+ parser.add_argument('--collection',
+ help='Specifies the path to the collection, when '
+ 'validating files within a collection. Ensure '
+ 'that ANSIBLE_COLLECTIONS_PATH is set so the '
+ 'contents of the collection can be located')
+ parser.add_argument('--collection-version',
+ help='The collection\'s version number used to check '
+ 'deprecations')
+
+ args = parser.parse_args()
+
+ args.modules = [m.rstrip('/') for m in args.modules]
+
+ reporter = Reporter()
+ git_cache = GitCache(args.base_branch)
+
+ check_dirs = set()
+
+ routing = None
+ if args.collection:
+ setup_collection_loader()
+ routing_file = 'meta/runtime.yml'
+ # Load meta/runtime.yml if it exists, as it may contain deprecation information
+ if os.path.isfile(routing_file):
+ try:
+ with open(routing_file) as f:
+ routing = yaml.safe_load(f)
+ except yaml.error.MarkedYAMLError as ex:
+ print('%s:%d:%d: YAML load failed: %s' % (routing_file, ex.context_mark.line + 1, ex.context_mark.column + 1, re.sub(r'\s+', ' ', str(ex))))
+ except Exception as ex: # pylint: disable=broad-except
+ print('%s:%d:%d: YAML load failed: %s' % (routing_file, 0, 0, re.sub(r'\s+', ' ', str(ex))))
+
+ for module in args.modules:
+ if os.path.isfile(module):
+ path = module
+ if args.exclude and args.exclude.search(path):
+ continue
+ if ModuleValidator.is_blacklisted(path):
+ continue
+ with ModuleValidator(path, collection=args.collection, collection_version=args.collection_version,
+ analyze_arg_spec=args.arg_spec, base_branch=args.base_branch,
+ git_cache=git_cache, reporter=reporter, routing=routing) as mv1:
+ mv1.validate()
+ check_dirs.add(os.path.dirname(path))
+
+ for root, dirs, files in os.walk(module):
+ basedir = root[len(module) + 1:].split('/', 1)[0]
+ if basedir in BLACKLIST_DIRS:
+ continue
+ for dirname in dirs:
+ if root == module and dirname in BLACKLIST_DIRS:
+ continue
+ path = os.path.join(root, dirname)
+ if args.exclude and args.exclude.search(path):
+ continue
+ check_dirs.add(path)
+
+ for filename in files:
+ path = os.path.join(root, filename)
+ if args.exclude and args.exclude.search(path):
+ continue
+ if ModuleValidator.is_blacklisted(path):
+ continue
+ with ModuleValidator(path, collection=args.collection, collection_version=args.collection_version,
+ analyze_arg_spec=args.arg_spec, base_branch=args.base_branch,
+ git_cache=git_cache, reporter=reporter, routing=routing) as mv2:
+ mv2.validate()
+
+ if not args.collection:
+ for path in sorted(check_dirs):
+ pv = PythonPackageValidator(path, reporter=reporter)
+ pv.validate()
+
+ if args.format == 'plain':
+ sys.exit(reporter.plain(warnings=args.warnings, output=args.output))
+ else:
+ sys.exit(reporter.json(warnings=args.warnings, output=args.output))
+
+
+class GitCache:
+ def __init__(self, base_branch):
+ self.base_branch = base_branch
+
+ if self.base_branch:
+ self.base_tree = self._git(['ls-tree', '-r', '--name-only', self.base_branch, 'lib/ansible/modules/'])
+ else:
+ self.base_tree = []
+
+ try:
+ self.head_tree = self._git(['ls-tree', '-r', '--name-only', 'HEAD', 'lib/ansible/modules/'])
+ except GitError as ex:
+ if ex.status == 128:
+ # fallback when there is no .git directory
+ self.head_tree = self._get_module_files()
+ else:
+ raise
+ except OSError as ex:
+ if ex.errno == errno.ENOENT:
+ # fallback when git is not installed
+ self.head_tree = self._get_module_files()
+ else:
+ raise
+
+ self.base_module_paths = dict((os.path.basename(p), p) for p in self.base_tree if os.path.splitext(p)[1] in ('.py', '.ps1'))
+
+ self.base_module_paths.pop('__init__.py', None)
+
+ self.head_aliased_modules = set()
+
+ for path in self.head_tree:
+ filename = os.path.basename(path)
+
+ if filename.startswith('_') and filename != '__init__.py':
+ if os.path.islink(path):
+ self.head_aliased_modules.add(os.path.basename(os.path.realpath(path)))
+
+ @staticmethod
+ def _get_module_files():
+ module_files = []
+
+ for (dir_path, dir_names, file_names) in os.walk('lib/ansible/modules/'):
+ for file_name in file_names:
+ module_files.append(os.path.join(dir_path, file_name))
+
+ return module_files
+
+ @staticmethod
+ def _git(args):
+ cmd = ['git'] + args
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = p.communicate()
+ if p.returncode != 0:
+ raise GitError(stderr, p.returncode)
+ return stdout.decode('utf-8').splitlines()
+
+
+class GitError(Exception):
+ def __init__(self, message, status):
+ super(GitError, self).__init__(message)
+
+ self.status = status
+
+
+def main():
+ try:
+ run()
+ except KeyboardInterrupt:
+ pass
diff --git a/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/module_args.py b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/module_args.py
new file mode 100644
index 00000000..ac025291
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/module_args.py
@@ -0,0 +1,170 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2016 Matt Martz <matt@sivel.net>
+# Copyright (C) 2016 Rackspace US, Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import runpy
+import json
+import os
+import subprocess
+import sys
+
+from contextlib import contextmanager
+
+from ansible.executor.powershell.module_manifest import PSModuleDepFinder
+from ansible.module_utils.basic import FILE_COMMON_ARGUMENTS
+from ansible.module_utils.six import reraise
+from ansible.module_utils._text import to_bytes, to_text
+
+from .utils import CaptureStd, find_executable, get_module_name_from_filename
+
+
+class AnsibleModuleCallError(RuntimeError):
+ pass
+
+
+class AnsibleModuleImportError(ImportError):
+ pass
+
+
+class AnsibleModuleNotInitialized(Exception):
+ pass
+
+
+class _FakeAnsibleModuleInit:
+ def __init__(self):
+ self.args = tuple()
+ self.kwargs = {}
+ self.called = False
+
+ def __call__(self, *args, **kwargs):
+ self.args = args
+ self.kwargs = kwargs
+ self.called = True
+ raise AnsibleModuleCallError('AnsibleModuleCallError')
+
+
+def _fake_load_params():
+ pass
+
+
+@contextmanager
+def setup_env(filename):
+ # Used to clean up imports later
+ pre_sys_modules = list(sys.modules.keys())
+
+ fake = _FakeAnsibleModuleInit()
+ module = __import__('ansible.module_utils.basic').module_utils.basic
+ _original_init = module.AnsibleModule.__init__
+ _original_load_params = module._load_params
+ setattr(module.AnsibleModule, '__init__', fake)
+ setattr(module, '_load_params', _fake_load_params)
+
+ try:
+ yield fake
+ finally:
+ setattr(module.AnsibleModule, '__init__', _original_init)
+ setattr(module, '_load_params', _original_load_params)
+
+ # Clean up imports to prevent issues with mutable data being used in modules
+ for k in list(sys.modules.keys()):
+ # It's faster if we limit to items in ansible.module_utils
+ # But if this causes problems later, we should remove it
+ if k not in pre_sys_modules and k.startswith('ansible.module_utils.'):
+ del sys.modules[k]
+
+
+def get_ps_argument_spec(filename, collection):
+ fqc_name = get_module_name_from_filename(filename, collection)
+
+ pwsh = find_executable('pwsh')
+ if not pwsh:
+ raise FileNotFoundError('Required program for PowerShell arg spec inspection "pwsh" not found.')
+
+ module_path = os.path.join(os.getcwd(), filename)
+ b_module_path = to_bytes(module_path, errors='surrogate_or_strict')
+ with open(b_module_path, mode='rb') as module_fd:
+ b_module_data = module_fd.read()
+
+ ps_dep_finder = PSModuleDepFinder()
+ ps_dep_finder.scan_module(b_module_data, fqn=fqc_name)
+
+ # For ps_argspec.ps1 to compile Ansible.Basic it also needs the AddType module_util.
+ ps_dep_finder._add_module((b"Ansible.ModuleUtils.AddType", ".psm1", None), wrapper=False)
+
+ util_manifest = json.dumps({
+ 'module_path': to_text(module_path, errors='surrogiate_or_strict'),
+ 'ansible_basic': ps_dep_finder.cs_utils_module["Ansible.Basic"]['path'],
+ 'ps_utils': dict([(name, info['path']) for name, info in ps_dep_finder.ps_modules.items()]),
+ })
+
+ script_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ps_argspec.ps1')
+ proc = subprocess.Popen([script_path, util_manifest], stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ shell=False)
+ stdout, stderr = proc.communicate()
+
+ if proc.returncode != 0:
+ raise AnsibleModuleImportError("STDOUT:\n%s\nSTDERR:\n%s" % (stdout.decode('utf-8'), stderr.decode('utf-8')))
+
+ kwargs = json.loads(stdout)
+
+ # the validate-modules code expects the options spec to be under the argument_spec key not options as set in PS
+ kwargs['argument_spec'] = kwargs.pop('options', {})
+
+ return kwargs['argument_spec'], (), kwargs
+
+
+def get_py_argument_spec(filename, collection):
+ name = get_module_name_from_filename(filename, collection)
+
+ with setup_env(filename) as fake:
+ try:
+ with CaptureStd():
+ runpy.run_module(name, run_name='__main__', alter_sys=True)
+ except AnsibleModuleCallError:
+ pass
+ except BaseException as e:
+ # we want to catch all exceptions here, including sys.exit
+ reraise(AnsibleModuleImportError, AnsibleModuleImportError('%s' % e), sys.exc_info()[2])
+
+ if not fake.called:
+ raise AnsibleModuleNotInitialized()
+
+ try:
+ # for ping kwargs == {'argument_spec':{'data':{'type':'str','default':'pong'}}, 'supports_check_mode':True}
+ if 'argument_spec' in fake.kwargs:
+ argument_spec = fake.kwargs['argument_spec']
+ else:
+ argument_spec = fake.args[0]
+ # If add_file_common_args is truish, add options from FILE_COMMON_ARGUMENTS when not present.
+ # This is the only modification to argument_spec done by AnsibleModule itself, and which is
+ # not caught by setup_env's AnsibleModule replacement
+ if fake.kwargs.get('add_file_common_args'):
+ for k, v in FILE_COMMON_ARGUMENTS.items():
+ if k not in argument_spec:
+ argument_spec[k] = v
+ return argument_spec, fake.args, fake.kwargs
+ except (TypeError, IndexError):
+ return {}, (), {}
+
+
+def get_argument_spec(filename, collection):
+ if filename.endswith('.py'):
+ return get_py_argument_spec(filename, collection)
+ else:
+ return get_ps_argument_spec(filename, collection)
diff --git a/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/ps_argspec.ps1 b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/ps_argspec.ps1
new file mode 100755
index 00000000..5ceb9d50
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/ps_argspec.ps1
@@ -0,0 +1,110 @@
+#!/usr/bin/env pwsh
+#Requires -Version 6
+
+Set-StrictMode -Version 2.0
+$ErrorActionPreference = "Stop"
+$WarningPreference = "Stop"
+
+Function Resolve-CircularReference {
+ <#
+ .SYNOPSIS
+ Removes known types that cause a circular reference in their json serialization.
+
+ .PARAMETER Hash
+ The hash to scan for circular references
+ #>
+ [CmdletBinding()]
+ param (
+ [Parameter(Mandatory=$true)]
+ [System.Collections.IDictionary]
+ $Hash
+ )
+
+ foreach ($key in [String[]]$Hash.Keys) {
+ $value = $Hash[$key]
+ if ($value -is [System.Collections.IDictionary]) {
+ Resolve-CircularReference -Hash $value
+ } elseif ($value -is [Array] -or $value -is [System.Collections.IList]) {
+ $values = @(foreach ($v in $value) {
+ if ($v -is [System.Collections.IDictionary]) {
+ Resolve-CircularReference -Hash $v
+ }
+ ,$v
+ })
+ $Hash[$key] = $values
+ } elseif ($value -is [DateTime]) {
+ $Hash[$key] = $value.ToString("yyyy-MM-dd")
+ } elseif ($value -is [delegate]) {
+ # Type can be set to a delegate function which defines it's own type. For the documentation we just
+ # reflection that as raw
+ if ($key -eq 'type') {
+ $Hash[$key] = 'raw'
+ } else {
+ $Hash[$key] = $value.ToString() # Shouldn't ever happen but just in case.
+ }
+ }
+ }
+}
+
+$manifest = ConvertFrom-Json -InputObject $args[0] -AsHashtable
+if (-not $manifest.Contains('module_path') -or -not $manifest.module_path) {
+ Write-Error -Message "No module specified."
+ exit 1
+}
+$module_path = $manifest.module_path
+
+# Check if the path is relative and get the full path to the module
+if (-not ([System.IO.Path]::IsPathRooted($module_path))) {
+ $module_path = $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath($module_path)
+}
+
+if (-not (Test-Path -LiteralPath $module_path -PathType Leaf)) {
+ Write-Error -Message "The module at '$module_path' does not exist."
+ exit 1
+}
+
+$module_code = Get-Content -LiteralPath $module_path -Raw
+
+$powershell = [PowerShell]::Create()
+$powershell.Runspace.SessionStateProxy.SetVariable("ErrorActionPreference", "Stop")
+
+# Load the PowerShell module utils as the module may be using them to refer to shared module options. Currently we
+# can only load the PowerShell utils due to cross platform compatibility issues.
+if ($manifest.Contains('ps_utils')) {
+ foreach ($util_info in $manifest.ps_utils.GetEnumerator()) {
+ $util_name = $util_info.Key
+ $util_path = $util_info.Value
+
+ if (-not (Test-Path -LiteralPath $util_path -PathType Leaf)) {
+ # Failed to find the util path, just silently ignore for now and hope for the best.
+ continue
+ }
+
+ $util_sb = [ScriptBlock]::Create((Get-Content -LiteralPath $util_path -Raw))
+ $powershell.AddCommand('New-Module').AddParameters(@{
+ Name = $util_name
+ ScriptBlock = $util_sb
+ }) > $null
+ $powershell.AddCommand('Import-Module').AddParameter('WarningAction', 'SilentlyContinue') > $null
+ $powershell.AddCommand('Out-Null').AddStatement() > $null
+
+ # Also import it into the current runspace in case ps_argspec.ps1 needs to use it.
+ $null = New-Module -Name $util_name -ScriptBlock $util_sb | Import-Module -WarningAction SilentlyContinue
+ }
+}
+
+Add-CSharpType -References @(Get-Content -LiteralPath $manifest.ansible_basic -Raw)
+[Ansible.Basic.AnsibleModule]::_DebugArgSpec = $true
+
+$powershell.AddScript($module_code) > $null
+$powershell.Invoke() > $null
+
+if ($powershell.HadErrors) {
+ $powershell.Streams.Error
+ exit 1
+}
+
+$arg_spec = $powershell.Runspace.SessionStateProxy.GetVariable('ansibleTestArgSpec')
+Resolve-CircularReference -Hash $arg_spec
+
+ConvertTo-Json -InputObject $arg_spec -Compress -Depth 99
diff --git a/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/schema.py b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/schema.py
new file mode 100644
index 00000000..42a2ada4
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/schema.py
@@ -0,0 +1,488 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Matt Martz <matt@sivel.net>
+# Copyright: (c) 2015, Rackspace US, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from distutils.version import StrictVersion
+from functools import partial
+
+from voluptuous import ALLOW_EXTRA, PREVENT_EXTRA, All, Any, Invalid, Length, Required, Schema, Self, ValueInvalid
+from ansible.module_utils.six import string_types
+from ansible.module_utils.common.collections import is_iterable
+from ansible.utils.version import SemanticVersion
+
+from .utils import parse_isodate
+
+list_string_types = list(string_types)
+tuple_string_types = tuple(string_types)
+any_string_types = Any(*string_types)
+
+# Valid DOCUMENTATION.author lines
+# Based on Ansibulbot's extract_github_id()
+# author: First Last (@name) [optional anything]
+# "Ansible Core Team" - Used by the Bot
+# "Michael DeHaan" - nop
+# "OpenStack Ansible SIG" - OpenStack does not use GitHub
+# "Name (!UNKNOWN)" - For the few untraceable authors
+author_line = re.compile(r'^\w.*(\(@([\w-]+)\)|!UNKNOWN)(?![\w.])|^Ansible Core Team$|^Michael DeHaan$|^OpenStack Ansible SIG$')
+
+
+def _add_ansible_error_code(exception, error_code):
+ setattr(exception, 'ansible_error_code', error_code)
+ return exception
+
+
+def isodate(v, error_code=None):
+ try:
+ parse_isodate(v, allow_date=True)
+ except ValueError as e:
+ raise _add_ansible_error_code(Invalid(str(e)), error_code or 'ansible-invalid-date')
+ return v
+
+
+COLLECTION_NAME_RE = re.compile('^([^.]+.[^.]+)$')
+
+
+def collection_name(v, error_code=None):
+ if not isinstance(v, string_types):
+ raise _add_ansible_error_code(
+ Invalid('Collection name must be a string'), error_code or 'collection-invalid-name')
+ m = COLLECTION_NAME_RE.match(v)
+ if not m:
+ raise _add_ansible_error_code(
+ Invalid('Collection name must be of format `<namespace>.<name>`'), error_code or 'collection-invalid-name')
+ return v
+
+
+def version(for_collection=False):
+ if for_collection:
+ # We do not accept floats for versions in collections
+ return Any(*string_types)
+ return Any(float, *string_types)
+
+
+def date(error_code=None):
+ return Any(isodate, error_code=error_code)
+
+
+def is_callable(v):
+ if not callable(v):
+ raise ValueInvalid('not a valid value')
+ return v
+
+
+def sequence_of_sequences(min=None, max=None):
+ return All(
+ Any(
+ None,
+ [Any(list, tuple)],
+ tuple([Any(list, tuple)]),
+ ),
+ Any(
+ None,
+ [Length(min=min, max=max)],
+ tuple([Length(min=min, max=max)]),
+ ),
+ )
+
+
+seealso_schema = Schema(
+ [
+ Any(
+ {
+ Required('module'): Any(*string_types),
+ 'description': Any(*string_types),
+ },
+ {
+ Required('ref'): Any(*string_types),
+ Required('description'): Any(*string_types),
+ },
+ {
+ Required('name'): Any(*string_types),
+ Required('link'): Any(*string_types),
+ Required('description'): Any(*string_types),
+ },
+ ),
+ ]
+)
+
+
+argument_spec_types = ['bits', 'bool', 'bytes', 'dict', 'float', 'int', 'json', 'jsonarg', 'list', 'path', 'raw',
+ 'sid', 'str']
+
+
+argument_spec_modifiers = {
+ 'mutually_exclusive': sequence_of_sequences(min=2),
+ 'required_together': sequence_of_sequences(min=2),
+ 'required_one_of': sequence_of_sequences(min=2),
+ 'required_if': sequence_of_sequences(min=3, max=4),
+ 'required_by': Schema({str: Any(list_string_types, tuple_string_types, *string_types)}),
+}
+
+
+def no_required_with_default(v):
+ if v.get('default') and v.get('required'):
+ raise Invalid('required=True cannot be supplied with a default')
+ return v
+
+
+def elements_with_list(v):
+ if v.get('elements') and v.get('type') != 'list':
+ raise Invalid('type must be list to use elements')
+ return v
+
+
+def options_with_apply_defaults(v):
+ if v.get('apply_defaults') and not v.get('options'):
+ raise Invalid('apply_defaults=True requires options to be set')
+ return v
+
+
+def option_deprecation(v):
+ if v.get('removed_in_version') or v.get('removed_at_date'):
+ if v.get('removed_in_version') and v.get('removed_at_date'):
+ raise _add_ansible_error_code(
+ Invalid('Only one of removed_in_version and removed_at_date must be specified'),
+ error_code='deprecation-either-date-or-version')
+ if not v.get('removed_from_collection'):
+ raise _add_ansible_error_code(
+ Invalid('If removed_in_version or removed_at_date is specified, '
+ 'removed_from_collection must be specified as well'),
+ error_code='deprecation-collection-missing')
+ return
+ if v.get('removed_from_collection'):
+ raise Invalid('removed_from_collection cannot be specified without either '
+ 'removed_in_version or removed_at_date')
+
+
+def argument_spec_schema(for_collection):
+ any_string_types = Any(*string_types)
+ schema = {
+ any_string_types: {
+ 'type': Any(is_callable, *argument_spec_types),
+ 'elements': Any(*argument_spec_types),
+ 'default': object,
+ 'fallback': Any(
+ (is_callable, list_string_types),
+ [is_callable, list_string_types],
+ ),
+ 'choices': Any([object], (object,)),
+ 'required': bool,
+ 'no_log': bool,
+ 'aliases': Any(list_string_types, tuple(list_string_types)),
+ 'apply_defaults': bool,
+ 'removed_in_version': version(for_collection),
+ 'removed_at_date': date(),
+ 'removed_from_collection': collection_name,
+ 'options': Self,
+ 'deprecated_aliases': Any([Any(
+ {
+ Required('name'): Any(*string_types),
+ Required('date'): date(),
+ Required('collection_name'): collection_name,
+ },
+ {
+ Required('name'): Any(*string_types),
+ Required('version'): version(for_collection),
+ Required('collection_name'): collection_name,
+ },
+ )]),
+ }
+ }
+ schema[any_string_types].update(argument_spec_modifiers)
+ schemas = All(
+ schema,
+ Schema({any_string_types: no_required_with_default}),
+ Schema({any_string_types: elements_with_list}),
+ Schema({any_string_types: options_with_apply_defaults}),
+ Schema({any_string_types: option_deprecation}),
+ )
+ return Schema(schemas)
+
+
+def ansible_module_kwargs_schema(for_collection):
+ schema = {
+ 'argument_spec': argument_spec_schema(for_collection),
+ 'bypass_checks': bool,
+ 'no_log': bool,
+ 'check_invalid_arguments': Any(None, bool),
+ 'add_file_common_args': bool,
+ 'supports_check_mode': bool,
+ }
+ schema.update(argument_spec_modifiers)
+ return Schema(schema)
+
+
+json_value = Schema(Any(
+ None,
+ int,
+ float,
+ [Self],
+ *(list({str_type: Self} for str_type in string_types) + list(string_types))
+))
+
+
+def version_added(v, error_code='version-added-invalid', accept_historical=False):
+ if 'version_added' in v:
+ version_added = v.get('version_added')
+ if isinstance(version_added, string_types):
+ # If it is not a string, schema validation will have already complained
+ # - or we have a float and we are in ansible/ansible, in which case we're
+ # also happy.
+ if v.get('version_added_collection') == 'ansible.builtin':
+ if version_added == 'historical' and accept_historical:
+ return v
+ try:
+ version = StrictVersion()
+ version.parse(version_added)
+ except ValueError as exc:
+ raise _add_ansible_error_code(
+ Invalid('version_added (%r) is not a valid ansible-base version: '
+ '%s' % (version_added, exc)),
+ error_code=error_code)
+ else:
+ try:
+ version = SemanticVersion()
+ version.parse(version_added)
+ except ValueError as exc:
+ raise _add_ansible_error_code(
+ Invalid('version_added (%r) is not a valid collection version '
+ '(see specification at https://semver.org/): '
+ '%s' % (version_added, exc)),
+ error_code=error_code)
+ elif 'version_added_collection' in v:
+ # Must have been manual intervention, since version_added_collection is only
+ # added automatically when version_added is present
+ raise Invalid('version_added_collection cannot be specified without version_added')
+ return v
+
+
+def list_dict_option_schema(for_collection):
+ suboption_schema = Schema(
+ {
+ Required('description'): Any(list_string_types, *string_types),
+ 'required': bool,
+ 'choices': list,
+ 'aliases': Any(list_string_types),
+ 'version_added': version(for_collection),
+ 'version_added_collection': collection_name,
+ 'default': json_value,
+ # Note: Types are strings, not literal bools, such as True or False
+ 'type': Any(None, 'bits', 'bool', 'bytes', 'dict', 'float', 'int', 'json', 'jsonarg', 'list', 'path', 'raw', 'sid', 'str'),
+ # in case of type='list' elements define type of individual item in list
+ 'elements': Any(None, 'bits', 'bool', 'bytes', 'dict', 'float', 'int', 'json', 'jsonarg', 'list', 'path', 'raw', 'sid', 'str'),
+ # Recursive suboptions
+ 'suboptions': Any(None, *list({str_type: Self} for str_type in string_types)),
+ },
+ extra=PREVENT_EXTRA
+ )
+
+ # This generates list of dicts with keys from string_types and suboption_schema value
+ # for example in Python 3: {str: suboption_schema}
+ list_dict_suboption_schema = [{str_type: suboption_schema} for str_type in string_types]
+
+ option_schema = Schema(
+ {
+ Required('description'): Any(list_string_types, *string_types),
+ 'required': bool,
+ 'choices': list,
+ 'aliases': Any(list_string_types),
+ 'version_added': version(for_collection),
+ 'version_added_collection': collection_name,
+ 'default': json_value,
+ 'suboptions': Any(None, *list_dict_suboption_schema),
+ # Note: Types are strings, not literal bools, such as True or False
+ 'type': Any(None, 'bits', 'bool', 'bytes', 'dict', 'float', 'int', 'json', 'jsonarg', 'list', 'path', 'raw', 'sid', 'str'),
+ # in case of type='list' elements define type of individual item in list
+ 'elements': Any(None, 'bits', 'bool', 'bytes', 'dict', 'float', 'int', 'json', 'jsonarg', 'list', 'path', 'raw', 'sid', 'str'),
+ },
+ extra=PREVENT_EXTRA
+ )
+
+ option_version_added = Schema(
+ All({
+ 'suboptions': Any(None, *[{str_type: Self} for str_type in string_types]),
+ }, partial(version_added, error_code='option-invalid-version-added')),
+ extra=ALLOW_EXTRA
+ )
+
+ # This generates list of dicts with keys from string_types and option_schema value
+ # for example in Python 3: {str: option_schema}
+ return [{str_type: All(option_schema, option_version_added)} for str_type in string_types]
+
+
+def return_contains(v):
+ schema = Schema(
+ {
+ Required('contains'): Any(dict, list, *string_types)
+ },
+ extra=ALLOW_EXTRA
+ )
+ if v.get('type') == 'complex':
+ return schema(v)
+ return v
+
+
+def return_schema(for_collection):
+ return_contains_schema = Any(
+ All(
+ Schema(
+ {
+ Required('description'): Any(list_string_types, *string_types),
+ 'returned': Any(*string_types), # only returned on top level
+ Required('type'): Any('bool', 'complex', 'dict', 'float', 'int', 'list', 'str'),
+ 'version_added': version(for_collection),
+ 'version_added_collection': collection_name,
+ 'sample': json_value,
+ 'example': json_value,
+ 'contains': Any(None, *list({str_type: Self} for str_type in string_types)),
+ # in case of type='list' elements define type of individual item in list
+ 'elements': Any(None, 'bits', 'bool', 'bytes', 'dict', 'float', 'int', 'json', 'jsonarg', 'list', 'path', 'raw', 'sid', 'str'),
+ }
+ ),
+ Schema(return_contains),
+ Schema(partial(version_added, error_code='option-invalid-version-added')),
+ ),
+ Schema(type(None)),
+ )
+
+ # This generates list of dicts with keys from string_types and return_contains_schema value
+ # for example in Python 3: {str: return_contains_schema}
+ list_dict_return_contains_schema = [{str_type: return_contains_schema} for str_type in string_types]
+
+ return Any(
+ All(
+ Schema(
+ {
+ any_string_types: {
+ Required('description'): Any(list_string_types, *string_types),
+ Required('returned'): Any(*string_types),
+ Required('type'): Any('bool', 'complex', 'dict', 'float', 'int', 'list', 'str'),
+ 'version_added': version(for_collection),
+ 'version_added_collection': collection_name,
+ 'sample': json_value,
+ 'example': json_value,
+ 'contains': Any(None, *list_dict_return_contains_schema),
+ # in case of type='list' elements define type of individual item in list
+ 'elements': Any(None, 'bits', 'bool', 'bytes', 'dict', 'float', 'int', 'json', 'jsonarg', 'list', 'path', 'raw', 'sid', 'str'),
+ }
+ }
+ ),
+ Schema({any_string_types: return_contains}),
+ Schema({any_string_types: partial(version_added, error_code='option-invalid-version-added')}),
+ ),
+ Schema(type(None)),
+ )
+
+
+def deprecation_schema(for_collection):
+ main_fields = {
+ Required('why'): Any(*string_types),
+ Required('alternative'): Any(*string_types),
+ Required('removed_from_collection'): collection_name,
+ 'removed': Any(True),
+ }
+
+ date_schema = {
+ Required('removed_at_date'): date(),
+ }
+ date_schema.update(main_fields)
+
+ if for_collection:
+ version_schema = {
+ Required('removed_in'): version(for_collection),
+ }
+ else:
+ version_schema = {
+ # Only list branches that are deprecated or may have docs stubs in
+ # Deprecation cycle changed at 2.4 (though not retroactively)
+ # 2.3 -> removed_in: "2.5" + n for docs stub
+ # 2.4 -> removed_in: "2.8" + n for docs stub
+ Required('removed_in'): Any(
+ "2.2", "2.3", "2.4", "2.5", "2.6", "2.8", "2.9", "2.10", "2.11", "2.12", "2.13", "2.14"),
+ }
+ version_schema.update(main_fields)
+
+ return Any(
+ Schema(version_schema, extra=PREVENT_EXTRA),
+ Schema(date_schema, extra=PREVENT_EXTRA),
+ )
+
+
+def author(value):
+ if value is None:
+ return value # let schema checks handle
+
+ if not is_iterable(value):
+ value = [value]
+
+ for line in value:
+ if not isinstance(line, string_types):
+ continue # let schema checks handle
+ m = author_line.search(line)
+ if not m:
+ raise Invalid("Invalid author")
+
+ return value
+
+
+def doc_schema(module_name, for_collection=False, deprecated_module=False):
+
+ if module_name.startswith('_'):
+ module_name = module_name[1:]
+ deprecated_module = True
+ doc_schema_dict = {
+ Required('module'): module_name,
+ Required('short_description'): Any(*string_types),
+ Required('description'): Any(list_string_types, *string_types),
+ Required('author'): All(Any(None, list_string_types, *string_types), author),
+ 'notes': Any(None, list_string_types),
+ 'seealso': Any(None, seealso_schema),
+ 'requirements': list_string_types,
+ 'todo': Any(None, list_string_types, *string_types),
+ 'options': Any(None, *list_dict_option_schema(for_collection)),
+ 'extends_documentation_fragment': Any(list_string_types, *string_types),
+ 'version_added_collection': collection_name,
+ }
+
+ if for_collection:
+ # Optional
+ doc_schema_dict['version_added'] = version(for_collection=True)
+ else:
+ doc_schema_dict[Required('version_added')] = version(for_collection=False)
+
+ if deprecated_module:
+ deprecation_required_scheme = {
+ Required('deprecated'): Any(deprecation_schema(for_collection=for_collection)),
+ }
+
+ doc_schema_dict.update(deprecation_required_scheme)
+ return Schema(
+ All(
+ Schema(
+ doc_schema_dict,
+ extra=PREVENT_EXTRA
+ ),
+ partial(version_added, error_code='module-invalid-version-added', accept_historical=not for_collection),
+ )
+ )
+
+
+# Things to add soon
+####################
+# 1) Recursively validate `type: complex` fields
+# This will improve documentation, though require fair amount of module tidyup
+
+# Possible Future Enhancements
+##############################
+
+# 1) Don't allow empty options for choices, aliases, etc
+# 2) If type: bool ensure choices isn't set - perhaps use Exclusive
+# 3) both version_added should be quoted floats
+
+# Tool that takes JSON and generates RETURN skeleton (needs to support complex structures)
diff --git a/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/utils.py b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/utils.py
new file mode 100644
index 00000000..939ae651
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/utils.py
@@ -0,0 +1,218 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2015 Matt Martz <matt@sivel.net>
+# Copyright (C) 2015 Rackspace US, Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ast
+import datetime
+import os
+import re
+import sys
+
+from io import BytesIO, TextIOWrapper
+
+import yaml
+import yaml.reader
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import string_types
+
+
+class AnsibleTextIOWrapper(TextIOWrapper):
+ def write(self, s):
+ super(AnsibleTextIOWrapper, self).write(to_text(s, self.encoding, errors='replace'))
+
+
+def find_executable(executable, cwd=None, path=None):
+ """Finds the full path to the executable specified"""
+ match = None
+ real_cwd = os.getcwd()
+
+ if not cwd:
+ cwd = real_cwd
+
+ if os.path.dirname(executable):
+ target = os.path.join(cwd, executable)
+ if os.path.exists(target) and os.access(target, os.F_OK | os.X_OK):
+ match = executable
+ else:
+ path = os.environ.get('PATH', os.path.defpath)
+
+ path_dirs = path.split(os.path.pathsep)
+ seen_dirs = set()
+
+ for path_dir in path_dirs:
+ if path_dir in seen_dirs:
+ continue
+
+ seen_dirs.add(path_dir)
+
+ if os.path.abspath(path_dir) == real_cwd:
+ path_dir = cwd
+
+ candidate = os.path.join(path_dir, executable)
+
+ if os.path.exists(candidate) and os.access(candidate, os.F_OK | os.X_OK):
+ match = candidate
+ break
+
+ return match
+
+
+def find_globals(g, tree):
+ """Uses AST to find globals in an ast tree"""
+ for child in tree:
+ if hasattr(child, 'body') and isinstance(child.body, list):
+ find_globals(g, child.body)
+ elif isinstance(child, (ast.FunctionDef, ast.ClassDef)):
+ g.add(child.name)
+ continue
+ elif isinstance(child, ast.Assign):
+ try:
+ g.add(child.targets[0].id)
+ except (IndexError, AttributeError):
+ pass
+ elif isinstance(child, ast.Import):
+ g.add(child.names[0].name)
+ elif isinstance(child, ast.ImportFrom):
+ for name in child.names:
+ g_name = name.asname or name.name
+ if g_name == '*':
+ continue
+ g.add(g_name)
+
+
+class CaptureStd():
+ """Context manager to handle capturing stderr and stdout"""
+
+ def __enter__(self):
+ self.sys_stdout = sys.stdout
+ self.sys_stderr = sys.stderr
+ sys.stdout = self.stdout = AnsibleTextIOWrapper(BytesIO(), encoding=self.sys_stdout.encoding)
+ sys.stderr = self.stderr = AnsibleTextIOWrapper(BytesIO(), encoding=self.sys_stderr.encoding)
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ sys.stdout = self.sys_stdout
+ sys.stderr = self.sys_stderr
+
+ def get(self):
+ """Return ``(stdout, stderr)``"""
+
+ return self.stdout.buffer.getvalue(), self.stderr.buffer.getvalue()
+
+
+def get_module_name_from_filename(filename, collection):
+ # Calculate the module's name so that relative imports work correctly
+ if collection:
+ # collection is a relative path, example: ansible_collections/my_namespace/my_collection
+ # filename is a relative path, example: plugins/modules/my_module.py
+ path = os.path.join(collection, filename)
+ else:
+ # filename is a relative path, example: lib/ansible/modules/system/ping.py
+ path = os.path.relpath(filename, 'lib')
+
+ name = os.path.splitext(path)[0].replace(os.path.sep, '.')
+
+ return name
+
+
+def parse_yaml(value, lineno, module, name, load_all=False):
+ traces = []
+ errors = []
+ data = None
+
+ if load_all:
+ loader = yaml.safe_load_all
+ else:
+ loader = yaml.safe_load
+
+ try:
+ data = loader(value)
+ if load_all:
+ data = list(data)
+ except yaml.MarkedYAMLError as e:
+ e.problem_mark.line += lineno - 1
+ e.problem_mark.name = '%s.%s' % (module, name)
+ errors.append({
+ 'msg': '%s is not valid YAML' % name,
+ 'line': e.problem_mark.line + 1,
+ 'column': e.problem_mark.column + 1
+ })
+ traces.append(e)
+ except yaml.reader.ReaderError as e:
+ traces.append(e)
+ # TODO: Better line/column detection
+ errors.append({
+ 'msg': ('%s is not valid YAML. Character '
+ '0x%x at position %d.' % (name, e.character, e.position)),
+ 'line': lineno
+ })
+ except yaml.YAMLError as e:
+ traces.append(e)
+ errors.append({
+ 'msg': '%s is not valid YAML: %s: %s' % (name, type(e), e),
+ 'line': lineno
+ })
+
+ return data, errors, traces
+
+
+def is_empty(value):
+ """Evaluate null like values excluding False"""
+ if value is False:
+ return False
+ return not bool(value)
+
+
+def compare_unordered_lists(a, b):
+ """Safe list comparisons
+
+ Supports:
+ - unordered lists
+ - unhashable elements
+ """
+ return len(a) == len(b) and all(x in b for x in a)
+
+
+class NoArgsAnsibleModule(AnsibleModule):
+ """AnsibleModule that does not actually load params. This is used to get access to the
+ methods within AnsibleModule without having to fake a bunch of data
+ """
+ def _load_params(self):
+ self.params = {'_ansible_selinux_special_fs': [], '_ansible_remote_tmp': '/tmp', '_ansible_keep_remote_files': False, '_ansible_check_mode': False}
+
+
+def parse_isodate(v, allow_date):
+ if allow_date:
+ if isinstance(v, datetime.date):
+ return v
+ msg = 'Expected ISO 8601 date string (YYYY-MM-DD) or YAML date'
+ else:
+ msg = 'Expected ISO 8601 date string (YYYY-MM-DD)'
+ if not isinstance(v, string_types):
+ raise ValueError(msg)
+ # From Python 3.7 in, there is datetime.date.fromisoformat(). For older versions,
+ # we have to do things manually.
+ if not re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', v):
+ raise ValueError(msg)
+ try:
+ return datetime.datetime.strptime(v, '%Y-%m-%d').date()
+ except ValueError:
+ raise ValueError(msg)
diff --git a/test/lib/ansible_test/_data/sanity/yamllint/config/default.yml b/test/lib/ansible_test/_data/sanity/yamllint/config/default.yml
new file mode 100644
index 00000000..45d8b7ad
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/yamllint/config/default.yml
@@ -0,0 +1,19 @@
+extends: default
+
+rules:
+ braces: {max-spaces-inside: 1, level: error}
+ brackets: {max-spaces-inside: 1, level: error}
+ colons: {max-spaces-after: -1, level: error}
+ commas: {max-spaces-after: -1, level: error}
+ comments: disable
+ comments-indentation: disable
+ document-start: disable
+ empty-lines: {max: 3, level: error}
+ hyphens: {level: error}
+ indentation: disable
+ key-duplicates: enable
+ line-length: disable
+ new-line-at-end-of-file: disable
+ new-lines: {type: unix}
+ trailing-spaces: disable
+ truthy: disable
diff --git a/test/lib/ansible_test/_data/sanity/yamllint/config/modules.yml b/test/lib/ansible_test/_data/sanity/yamllint/config/modules.yml
new file mode 100644
index 00000000..da7e6049
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/yamllint/config/modules.yml
@@ -0,0 +1,19 @@
+extends: default
+
+rules:
+ braces: disable
+ brackets: disable
+ colons: disable
+ commas: disable
+ comments: disable
+ comments-indentation: disable
+ document-start: disable
+ empty-lines: disable
+ hyphens: disable
+ indentation: disable
+ key-duplicates: enable
+ line-length: disable
+ new-line-at-end-of-file: disable
+ new-lines: {type: unix}
+ trailing-spaces: disable
+ truthy: disable
diff --git a/test/lib/ansible_test/_data/sanity/yamllint/config/plugins.yml b/test/lib/ansible_test/_data/sanity/yamllint/config/plugins.yml
new file mode 100644
index 00000000..6d418137
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/yamllint/config/plugins.yml
@@ -0,0 +1,19 @@
+extends: default
+
+rules:
+ braces: disable
+ brackets: disable
+ colons: disable
+ commas: disable
+ comments: disable
+ comments-indentation: disable
+ document-start: disable
+ empty-lines: disable
+ hyphens: disable
+ indentation: disable
+ key-duplicates: disable
+ line-length: disable
+ new-line-at-end-of-file: disable
+ new-lines: {type: unix}
+ trailing-spaces: disable
+ truthy: disable
diff --git a/test/lib/ansible_test/_data/sanity/yamllint/yamllinter.py b/test/lib/ansible_test/_data/sanity/yamllint/yamllinter.py
new file mode 100644
index 00000000..933debe7
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/yamllint/yamllinter.py
@@ -0,0 +1,249 @@
+#!/usr/bin/env python
+"""Wrapper around yamllint that supports YAML embedded in Ansible modules."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ast
+import json
+import os
+import sys
+
+import yaml
+from yaml.resolver import Resolver
+from yaml.constructor import SafeConstructor
+from yaml.error import MarkedYAMLError
+from _yaml import CParser # pylint: disable=no-name-in-module
+
+from yamllint import linter
+from yamllint.config import YamlLintConfig
+
+
+def main():
+ """Main program body."""
+ paths = sys.argv[1:] or sys.stdin.read().splitlines()
+
+ checker = YamlChecker()
+ checker.check(paths)
+ checker.report()
+
+
+class TestConstructor(SafeConstructor):
+ """Yaml Safe Constructor that knows about Ansible tags"""
+
+
+TestConstructor.add_constructor(
+ u'!unsafe',
+ TestConstructor.construct_yaml_str)
+
+
+TestConstructor.add_constructor(
+ u'!vault',
+ TestConstructor.construct_yaml_str)
+
+
+TestConstructor.add_constructor(
+ u'!vault-encrypted',
+ TestConstructor.construct_yaml_str)
+
+
+class TestLoader(CParser, TestConstructor, Resolver):
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ TestConstructor.__init__(self)
+ Resolver.__init__(self)
+
+
+class YamlChecker:
+ """Wrapper around yamllint that supports YAML embedded in Ansible modules."""
+ def __init__(self):
+ self.messages = []
+
+ def report(self):
+ """Print yamllint report to stdout."""
+ report = dict(
+ messages=self.messages,
+ )
+
+ print(json.dumps(report, indent=4, sort_keys=True))
+
+ def check(self, paths):
+ """
+ :type paths: str
+ """
+ config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config')
+
+ yaml_conf = YamlLintConfig(file=os.path.join(config_path, 'default.yml'))
+ module_conf = YamlLintConfig(file=os.path.join(config_path, 'modules.yml'))
+ plugin_conf = YamlLintConfig(file=os.path.join(config_path, 'plugins.yml'))
+
+ for path in paths:
+ extension = os.path.splitext(path)[1]
+
+ with open(path) as f:
+ contents = f.read()
+
+ if extension in ('.yml', '.yaml'):
+ self.check_yaml(yaml_conf, path, contents)
+ elif extension == '.py':
+ if path.startswith('lib/ansible/modules/') or path.startswith('plugins/modules/'):
+ conf = module_conf
+ else:
+ conf = plugin_conf
+
+ self.check_module(conf, path, contents)
+ else:
+ raise Exception('unsupported extension: %s' % extension)
+
+ def check_yaml(self, conf, path, contents):
+ """
+ :type conf: YamlLintConfig
+ :type path: str
+ :type contents: str
+ """
+ self.check_parsable(path, contents)
+ self.messages += [self.result_to_message(r, path) for r in linter.run(contents, conf, path)]
+
+ def check_module(self, conf, path, contents):
+ """
+ :type conf: YamlLintConfig
+ :type path: str
+ :type contents: str
+ """
+ docs = self.get_module_docs(path, contents)
+
+ for key, value in docs.items():
+ yaml_data = value['yaml']
+ lineno = value['lineno']
+
+ if yaml_data.startswith('\n'):
+ yaml_data = yaml_data[1:]
+ lineno += 1
+
+ self.check_parsable(path, yaml_data, lineno)
+
+ messages = list(linter.run(yaml_data, conf, path))
+
+ self.messages += [self.result_to_message(r, path, lineno - 1, key) for r in messages]
+
+ def check_parsable(self, path, contents, lineno=1):
+ """
+ :type path: str
+ :type contents: str
+ :type lineno: int
+ """
+ try:
+ yaml.load(contents, Loader=TestLoader)
+ except MarkedYAMLError as e:
+ self.messages += [{'code': 'unparsable-with-libyaml',
+ 'message': '%s - %s' % (e.args[0], e.args[2]),
+ 'path': path,
+ 'line': e.problem_mark.line + lineno,
+ 'column': e.problem_mark.column + 1,
+ 'level': 'error',
+ }]
+
+ @staticmethod
+ def result_to_message(result, path, line_offset=0, prefix=''):
+ """
+ :type result: any
+ :type path: str
+ :type line_offset: int
+ :type prefix: str
+ :rtype: dict[str, any]
+ """
+ if prefix:
+ prefix = '%s: ' % prefix
+
+ return dict(
+ code=result.rule or result.level,
+ message=prefix + result.desc,
+ path=path,
+ line=result.line + line_offset,
+ column=result.column,
+ level=result.level,
+ )
+
+ def get_module_docs(self, path, contents):
+ """
+ :type path: str
+ :type contents: str
+ :rtype: dict[str, any]
+ """
+ module_doc_types = [
+ 'DOCUMENTATION',
+ 'EXAMPLES',
+ 'RETURN',
+ ]
+
+ docs = {}
+
+ def check_assignment(statement, doc_types=None):
+ """Check the given statement for a documentation assignment."""
+ for target in statement.targets:
+ if isinstance(target, ast.Tuple):
+ continue
+
+ if doc_types and target.id not in doc_types:
+ continue
+
+ docs[target.id] = dict(
+ yaml=statement.value.s,
+ lineno=statement.lineno,
+ end_lineno=statement.lineno + len(statement.value.s.splitlines())
+ )
+
+ module_ast = self.parse_module(path, contents)
+
+ if not module_ast:
+ return {}
+
+ is_plugin = path.startswith('lib/ansible/modules/') or path.startswith('lib/ansible/plugins/') or path.startswith('plugins/')
+ is_doc_fragment = path.startswith('lib/ansible/plugins/doc_fragments/') or path.startswith('plugins/doc_fragments/')
+
+ if is_plugin and not is_doc_fragment:
+ for body_statement in module_ast.body:
+ if isinstance(body_statement, ast.Assign):
+ check_assignment(body_statement, module_doc_types)
+ elif is_doc_fragment:
+ for body_statement in module_ast.body:
+ if isinstance(body_statement, ast.ClassDef):
+ for class_statement in body_statement.body:
+ if isinstance(class_statement, ast.Assign):
+ check_assignment(class_statement)
+ else:
+ raise Exception('unsupported path: %s' % path)
+
+ return docs
+
+ def parse_module(self, path, contents):
+ """
+ :type path: str
+ :type contents: str
+ :rtype: ast.Module | None
+ """
+ try:
+ return ast.parse(contents)
+ except SyntaxError as ex:
+ self.messages.append(dict(
+ code='python-syntax-error',
+ message=str(ex),
+ path=path,
+ line=ex.lineno,
+ column=ex.offset,
+ level='error',
+ ))
+ except Exception as ex: # pylint: disable=broad-except
+ self.messages.append(dict(
+ code='python-parse-error',
+ message=str(ex),
+ path=path,
+ line=0,
+ column=0,
+ level='error',
+ ))
+
+ return None
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/setup/ConfigureRemotingForAnsible.ps1 b/test/lib/ansible_test/_data/setup/ConfigureRemotingForAnsible.ps1
new file mode 100644
index 00000000..7e039bb4
--- /dev/null
+++ b/test/lib/ansible_test/_data/setup/ConfigureRemotingForAnsible.ps1
@@ -0,0 +1,453 @@
+#Requires -Version 3.0
+
+# Configure a Windows host for remote management with Ansible
+# -----------------------------------------------------------
+#
+# This script checks the current WinRM (PS Remoting) configuration and makes
+# the necessary changes to allow Ansible to connect, authenticate and
+# execute PowerShell commands.
+#
+# All events are logged to the Windows EventLog, useful for unattended runs.
+#
+# Use option -Verbose in order to see the verbose output messages.
+#
+# Use option -CertValidityDays to specify how long this certificate is valid
+# starting from today. So you would specify -CertValidityDays 3650 to get
+# a 10-year valid certificate.
+#
+# Use option -ForceNewSSLCert if the system has been SysPreped and a new
+# SSL Certificate must be forced on the WinRM Listener when re-running this
+# script. This is necessary when a new SID and CN name is created.
+#
+# Use option -EnableCredSSP to enable CredSSP as an authentication option.
+#
+# Use option -DisableBasicAuth to disable basic authentication.
+#
+# Use option -SkipNetworkProfileCheck to skip the network profile check.
+# Without specifying this the script will only run if the device's interfaces
+# are in DOMAIN or PRIVATE zones. Provide this switch if you want to enable
+# WinRM on a device with an interface in PUBLIC zone.
+#
+# Use option -SubjectName to specify the CN name of the certificate. This
+# defaults to the system's hostname and generally should not be specified.
+
+# Written by Trond Hindenes <trond@hindenes.com>
+# Updated by Chris Church <cchurch@ansible.com>
+# Updated by Michael Crilly <mike@autologic.cm>
+# Updated by Anton Ouzounov <Anton.Ouzounov@careerbuilder.com>
+# Updated by Nicolas Simond <contact@nicolas-simond.com>
+# Updated by Dag Wieërs <dag@wieers.com>
+# Updated by Jordan Borean <jborean93@gmail.com>
+# Updated by Erwan Quélin <erwan.quelin@gmail.com>
+# Updated by David Norman <david@dkn.email>
+#
+# Version 1.0 - 2014-07-06
+# Version 1.1 - 2014-11-11
+# Version 1.2 - 2015-05-15
+# Version 1.3 - 2016-04-04
+# Version 1.4 - 2017-01-05
+# Version 1.5 - 2017-02-09
+# Version 1.6 - 2017-04-18
+# Version 1.7 - 2017-11-23
+# Version 1.8 - 2018-02-23
+# Version 1.9 - 2018-09-21
+
+# Support -Verbose option
+[CmdletBinding()]
+
+Param (
+ [string]$SubjectName = $env:COMPUTERNAME,
+ [int]$CertValidityDays = 1095,
+ [switch]$SkipNetworkProfileCheck,
+ $CreateSelfSignedCert = $true,
+ [switch]$ForceNewSSLCert,
+ [switch]$GlobalHttpFirewallAccess,
+ [switch]$DisableBasicAuth = $false,
+ [switch]$EnableCredSSP
+)
+
+Function Write-Log
+{
+ $Message = $args[0]
+ Write-EventLog -LogName Application -Source $EventSource -EntryType Information -EventId 1 -Message $Message
+}
+
+Function Write-VerboseLog
+{
+ $Message = $args[0]
+ Write-Verbose $Message
+ Write-Log $Message
+}
+
+Function Write-HostLog
+{
+ $Message = $args[0]
+ Write-Output $Message
+ Write-Log $Message
+}
+
+Function New-LegacySelfSignedCert
+{
+ Param (
+ [string]$SubjectName,
+ [int]$ValidDays = 1095
+ )
+
+ $hostnonFQDN = $env:computerName
+ $hostFQDN = [System.Net.Dns]::GetHostByName(($env:computerName)).Hostname
+ $SignatureAlgorithm = "SHA256"
+
+ $name = New-Object -COM "X509Enrollment.CX500DistinguishedName.1"
+ $name.Encode("CN=$SubjectName", 0)
+
+ $key = New-Object -COM "X509Enrollment.CX509PrivateKey.1"
+ $key.ProviderName = "Microsoft Enhanced RSA and AES Cryptographic Provider"
+ $key.KeySpec = 1
+ $key.Length = 4096
+ $key.SecurityDescriptor = "D:PAI(A;;0xd01f01ff;;;SY)(A;;0xd01f01ff;;;BA)(A;;0x80120089;;;NS)"
+ $key.MachineContext = 1
+ $key.Create()
+
+ $serverauthoid = New-Object -COM "X509Enrollment.CObjectId.1"
+ $serverauthoid.InitializeFromValue("1.3.6.1.5.5.7.3.1")
+ $ekuoids = New-Object -COM "X509Enrollment.CObjectIds.1"
+ $ekuoids.Add($serverauthoid)
+ $ekuext = New-Object -COM "X509Enrollment.CX509ExtensionEnhancedKeyUsage.1"
+ $ekuext.InitializeEncode($ekuoids)
+
+ $cert = New-Object -COM "X509Enrollment.CX509CertificateRequestCertificate.1"
+ $cert.InitializeFromPrivateKey(2, $key, "")
+ $cert.Subject = $name
+ $cert.Issuer = $cert.Subject
+ $cert.NotBefore = (Get-Date).AddDays(-1)
+ $cert.NotAfter = $cert.NotBefore.AddDays($ValidDays)
+
+ $SigOID = New-Object -ComObject X509Enrollment.CObjectId
+ $SigOID.InitializeFromValue(([Security.Cryptography.Oid]$SignatureAlgorithm).Value)
+
+ [string[]] $AlternativeName += $hostnonFQDN
+ $AlternativeName += $hostFQDN
+ $IAlternativeNames = New-Object -ComObject X509Enrollment.CAlternativeNames
+
+ foreach ($AN in $AlternativeName)
+ {
+ $AltName = New-Object -ComObject X509Enrollment.CAlternativeName
+ $AltName.InitializeFromString(0x3,$AN)
+ $IAlternativeNames.Add($AltName)
+ }
+
+ $SubjectAlternativeName = New-Object -ComObject X509Enrollment.CX509ExtensionAlternativeNames
+ $SubjectAlternativeName.InitializeEncode($IAlternativeNames)
+
+ [String[]]$KeyUsage = ("DigitalSignature", "KeyEncipherment")
+ $KeyUsageObj = New-Object -ComObject X509Enrollment.CX509ExtensionKeyUsage
+ $KeyUsageObj.InitializeEncode([int][Security.Cryptography.X509Certificates.X509KeyUsageFlags]($KeyUsage))
+ $KeyUsageObj.Critical = $true
+
+ $cert.X509Extensions.Add($KeyUsageObj)
+ $cert.X509Extensions.Add($ekuext)
+ $cert.SignatureInformation.HashAlgorithm = $SigOID
+ $CERT.X509Extensions.Add($SubjectAlternativeName)
+ $cert.Encode()
+
+ $enrollment = New-Object -COM "X509Enrollment.CX509Enrollment.1"
+ $enrollment.InitializeFromRequest($cert)
+ $certdata = $enrollment.CreateRequest(0)
+ $enrollment.InstallResponse(2, $certdata, 0, "")
+
+ # extract/return the thumbprint from the generated cert
+ $parsed_cert = New-Object System.Security.Cryptography.X509Certificates.X509Certificate2
+ $parsed_cert.Import([System.Text.Encoding]::UTF8.GetBytes($certdata))
+
+ return $parsed_cert.Thumbprint
+}
+
+Function Enable-GlobalHttpFirewallAccess
+{
+ Write-Verbose "Forcing global HTTP firewall access"
+ # this is a fairly naive implementation; could be more sophisticated about rule matching/collapsing
+ $fw = New-Object -ComObject HNetCfg.FWPolicy2
+
+ # try to find/enable the default rule first
+ $add_rule = $false
+ $matching_rules = $fw.Rules | Where-Object { $_.Name -eq "Windows Remote Management (HTTP-In)" }
+ $rule = $null
+ If ($matching_rules) {
+ If ($matching_rules -isnot [Array]) {
+ Write-Verbose "Editing existing single HTTP firewall rule"
+ $rule = $matching_rules
+ }
+ Else {
+ # try to find one with the All or Public profile first
+ Write-Verbose "Found multiple existing HTTP firewall rules..."
+ $rule = $matching_rules | ForEach-Object { $_.Profiles -band 4 }[0]
+
+ If (-not $rule -or $rule -is [Array]) {
+ Write-Verbose "Editing an arbitrary single HTTP firewall rule (multiple existed)"
+ # oh well, just pick the first one
+ $rule = $matching_rules[0]
+ }
+ }
+ }
+
+ If (-not $rule) {
+ Write-Verbose "Creating a new HTTP firewall rule"
+ $rule = New-Object -ComObject HNetCfg.FWRule
+ $rule.Name = "Windows Remote Management (HTTP-In)"
+ $rule.Description = "Inbound rule for Windows Remote Management via WS-Management. [TCP 5985]"
+ $add_rule = $true
+ }
+
+ $rule.Profiles = 0x7FFFFFFF
+ $rule.Protocol = 6
+ $rule.LocalPorts = 5985
+ $rule.RemotePorts = "*"
+ $rule.LocalAddresses = "*"
+ $rule.RemoteAddresses = "*"
+ $rule.Enabled = $true
+ $rule.Direction = 1
+ $rule.Action = 1
+ $rule.Grouping = "Windows Remote Management"
+
+ If ($add_rule) {
+ $fw.Rules.Add($rule)
+ }
+
+ Write-Verbose "HTTP firewall rule $($rule.Name) updated"
+}
+
+# Setup error handling.
+Trap
+{
+ $_
+ Exit 1
+}
+$ErrorActionPreference = "Stop"
+
+# Get the ID and security principal of the current user account
+$myWindowsID=[System.Security.Principal.WindowsIdentity]::GetCurrent()
+$myWindowsPrincipal=new-object System.Security.Principal.WindowsPrincipal($myWindowsID)
+
+# Get the security principal for the Administrator role
+$adminRole=[System.Security.Principal.WindowsBuiltInRole]::Administrator
+
+# Check to see if we are currently running "as Administrator"
+if (-Not $myWindowsPrincipal.IsInRole($adminRole))
+{
+ Write-Output "ERROR: You need elevated Administrator privileges in order to run this script."
+ Write-Output " Start Windows PowerShell by using the Run as Administrator option."
+ Exit 2
+}
+
+$EventSource = $MyInvocation.MyCommand.Name
+If (-Not $EventSource)
+{
+ $EventSource = "Powershell CLI"
+}
+
+If ([System.Diagnostics.EventLog]::Exists('Application') -eq $False -or [System.Diagnostics.EventLog]::SourceExists($EventSource) -eq $False)
+{
+ New-EventLog -LogName Application -Source $EventSource
+}
+
+# Detect PowerShell version.
+If ($PSVersionTable.PSVersion.Major -lt 3)
+{
+ Write-Log "PowerShell version 3 or higher is required."
+ Throw "PowerShell version 3 or higher is required."
+}
+
+# Find and start the WinRM service.
+Write-Verbose "Verifying WinRM service."
+If (!(Get-Service "WinRM"))
+{
+ Write-Log "Unable to find the WinRM service."
+ Throw "Unable to find the WinRM service."
+}
+ElseIf ((Get-Service "WinRM").Status -ne "Running")
+{
+ Write-Verbose "Setting WinRM service to start automatically on boot."
+ Set-Service -Name "WinRM" -StartupType Automatic
+ Write-Log "Set WinRM service to start automatically on boot."
+ Write-Verbose "Starting WinRM service."
+ Start-Service -Name "WinRM" -ErrorAction Stop
+ Write-Log "Started WinRM service."
+
+}
+
+# WinRM should be running; check that we have a PS session config.
+If (!(Get-PSSessionConfiguration -Verbose:$false) -or (!(Get-ChildItem WSMan:\localhost\Listener)))
+{
+ If ($SkipNetworkProfileCheck) {
+ Write-Verbose "Enabling PS Remoting without checking Network profile."
+ Enable-PSRemoting -SkipNetworkProfileCheck -Force -ErrorAction Stop
+ Write-Log "Enabled PS Remoting without checking Network profile."
+ }
+ Else {
+ Write-Verbose "Enabling PS Remoting."
+ Enable-PSRemoting -Force -ErrorAction Stop
+ Write-Log "Enabled PS Remoting."
+ }
+}
+Else
+{
+ Write-Verbose "PS Remoting is already enabled."
+}
+
+# Ensure LocalAccountTokenFilterPolicy is set to 1
+# https://github.com/ansible/ansible/issues/42978
+$token_path = "HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System"
+$token_prop_name = "LocalAccountTokenFilterPolicy"
+$token_key = Get-Item -Path $token_path
+$token_value = $token_key.GetValue($token_prop_name, $null)
+if ($token_value -ne 1) {
+ Write-Verbose "Setting LocalAccountTOkenFilterPolicy to 1"
+ if ($null -ne $token_value) {
+ Remove-ItemProperty -Path $token_path -Name $token_prop_name
+ }
+ New-ItemProperty -Path $token_path -Name $token_prop_name -Value 1 -PropertyType DWORD > $null
+}
+
+# Make sure there is a SSL listener.
+$listeners = Get-ChildItem WSMan:\localhost\Listener
+If (!($listeners | Where-Object {$_.Keys -like "TRANSPORT=HTTPS"}))
+{
+ # We cannot use New-SelfSignedCertificate on 2012R2 and earlier
+ $thumbprint = New-LegacySelfSignedCert -SubjectName $SubjectName -ValidDays $CertValidityDays
+ Write-HostLog "Self-signed SSL certificate generated; thumbprint: $thumbprint"
+
+ # Create the hashtables of settings to be used.
+ $valueset = @{
+ Hostname = $SubjectName
+ CertificateThumbprint = $thumbprint
+ }
+
+ $selectorset = @{
+ Transport = "HTTPS"
+ Address = "*"
+ }
+
+ Write-Verbose "Enabling SSL listener."
+ New-WSManInstance -ResourceURI 'winrm/config/Listener' -SelectorSet $selectorset -ValueSet $valueset
+ Write-Log "Enabled SSL listener."
+}
+Else
+{
+ Write-Verbose "SSL listener is already active."
+
+ # Force a new SSL cert on Listener if the $ForceNewSSLCert
+ If ($ForceNewSSLCert)
+ {
+
+ # We cannot use New-SelfSignedCertificate on 2012R2 and earlier
+ $thumbprint = New-LegacySelfSignedCert -SubjectName $SubjectName -ValidDays $CertValidityDays
+ Write-HostLog "Self-signed SSL certificate generated; thumbprint: $thumbprint"
+
+ $valueset = @{
+ CertificateThumbprint = $thumbprint
+ Hostname = $SubjectName
+ }
+
+ # Delete the listener for SSL
+ $selectorset = @{
+ Address = "*"
+ Transport = "HTTPS"
+ }
+ Remove-WSManInstance -ResourceURI 'winrm/config/Listener' -SelectorSet $selectorset
+
+ # Add new Listener with new SSL cert
+ New-WSManInstance -ResourceURI 'winrm/config/Listener' -SelectorSet $selectorset -ValueSet $valueset
+ }
+}
+
+# Check for basic authentication.
+$basicAuthSetting = Get-ChildItem WSMan:\localhost\Service\Auth | Where-Object {$_.Name -eq "Basic"}
+
+If ($DisableBasicAuth)
+{
+ If (($basicAuthSetting.Value) -eq $true)
+ {
+ Write-Verbose "Disabling basic auth support."
+ Set-Item -Path "WSMan:\localhost\Service\Auth\Basic" -Value $false
+ Write-Log "Disabled basic auth support."
+ }
+ Else
+ {
+ Write-Verbose "Basic auth is already disabled."
+ }
+}
+Else
+{
+ If (($basicAuthSetting.Value) -eq $false)
+ {
+ Write-Verbose "Enabling basic auth support."
+ Set-Item -Path "WSMan:\localhost\Service\Auth\Basic" -Value $true
+ Write-Log "Enabled basic auth support."
+ }
+ Else
+ {
+ Write-Verbose "Basic auth is already enabled."
+ }
+}
+
+# If EnableCredSSP if set to true
+If ($EnableCredSSP)
+{
+ # Check for CredSSP authentication
+ $credsspAuthSetting = Get-ChildItem WSMan:\localhost\Service\Auth | Where-Object {$_.Name -eq "CredSSP"}
+ If (($credsspAuthSetting.Value) -eq $false)
+ {
+ Write-Verbose "Enabling CredSSP auth support."
+ Enable-WSManCredSSP -role server -Force
+ Write-Log "Enabled CredSSP auth support."
+ }
+}
+
+If ($GlobalHttpFirewallAccess) {
+ Enable-GlobalHttpFirewallAccess
+}
+
+# Configure firewall to allow WinRM HTTPS connections.
+$fwtest1 = netsh advfirewall firewall show rule name="Allow WinRM HTTPS"
+$fwtest2 = netsh advfirewall firewall show rule name="Allow WinRM HTTPS" profile=any
+If ($fwtest1.count -lt 5)
+{
+ Write-Verbose "Adding firewall rule to allow WinRM HTTPS."
+ netsh advfirewall firewall add rule profile=any name="Allow WinRM HTTPS" dir=in localport=5986 protocol=TCP action=allow
+ Write-Log "Added firewall rule to allow WinRM HTTPS."
+}
+ElseIf (($fwtest1.count -ge 5) -and ($fwtest2.count -lt 5))
+{
+ Write-Verbose "Updating firewall rule to allow WinRM HTTPS for any profile."
+ netsh advfirewall firewall set rule name="Allow WinRM HTTPS" new profile=any
+ Write-Log "Updated firewall rule to allow WinRM HTTPS for any profile."
+}
+Else
+{
+ Write-Verbose "Firewall rule already exists to allow WinRM HTTPS."
+}
+
+# Test a remoting connection to localhost, which should work.
+$httpResult = Invoke-Command -ComputerName "localhost" -ScriptBlock {$env:COMPUTERNAME} -ErrorVariable httpError -ErrorAction SilentlyContinue
+$httpsOptions = New-PSSessionOption -SkipCACheck -SkipCNCheck -SkipRevocationCheck
+
+$httpsResult = New-PSSession -UseSSL -ComputerName "localhost" -SessionOption $httpsOptions -ErrorVariable httpsError -ErrorAction SilentlyContinue
+
+If ($httpResult -and $httpsResult)
+{
+ Write-Verbose "HTTP: Enabled | HTTPS: Enabled"
+}
+ElseIf ($httpsResult -and !$httpResult)
+{
+ Write-Verbose "HTTP: Disabled | HTTPS: Enabled"
+}
+ElseIf ($httpResult -and !$httpsResult)
+{
+ Write-Verbose "HTTP: Enabled | HTTPS: Disabled"
+}
+Else
+{
+ Write-Log "Unable to establish an HTTP or HTTPS remoting session."
+ Throw "Unable to establish an HTTP or HTTPS remoting session."
+}
+Write-VerboseLog "PS Remoting has been successfully configured for Ansible."
diff --git a/test/lib/ansible_test/_data/setup/docker.sh b/test/lib/ansible_test/_data/setup/docker.sh
new file mode 100644
index 00000000..c65e8ac5
--- /dev/null
+++ b/test/lib/ansible_test/_data/setup/docker.sh
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+set -eu
+
+# Required for newer mysql-server packages to install/upgrade on Ubuntu 16.04.
+rm -f /usr/sbin/policy-rc.d
+
+# Improve prompts on remote host for interactive use.
+# shellcheck disable=SC1117
+cat << EOF > ~/.bashrc
+alias ls='ls --color=auto'
+export PS1='\[\e]0;\u@\h: \w\a\]\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
+cd ~/ansible/
+EOF
diff --git a/test/lib/ansible_test/_data/setup/remote.sh b/test/lib/ansible_test/_data/setup/remote.sh
new file mode 100644
index 00000000..654f678d
--- /dev/null
+++ b/test/lib/ansible_test/_data/setup/remote.sh
@@ -0,0 +1,162 @@
+#!/bin/sh
+
+set -eu
+
+platform="$1"
+python_version="$2"
+python_interpreter="python${python_version}"
+
+cd ~/
+
+install_pip () {
+ if ! "${python_interpreter}" -m pip.__main__ --version --disable-pip-version-check 2>/dev/null; then
+ curl --silent --show-error https://bootstrap.pypa.io/get-pip.py -o /tmp/get-pip.py
+ "${python_interpreter}" /tmp/get-pip.py --disable-pip-version-check --quiet
+ rm /tmp/get-pip.py
+ fi
+}
+
+if [ "${platform}" = "freebsd" ]; then
+ py_version="$(echo "${python_version}" | tr -d '.')"
+
+ while true; do
+ env ASSUME_ALWAYS_YES=YES pkg bootstrap && \
+ pkg install -q -y \
+ bash \
+ curl \
+ gtar \
+ "python${py_version}" \
+ "py${py_version}-Jinja2" \
+ "py${py_version}-virtualenv" \
+ "py${py_version}-cryptography" \
+ sudo \
+ && break
+ echo "Failed to install packages. Sleeping before trying again..."
+ sleep 10
+ done
+
+ install_pip
+
+ if ! grep '^PermitRootLogin yes$' /etc/ssh/sshd_config > /dev/null; then
+ sed -i '' 's/^# *PermitRootLogin.*$/PermitRootLogin yes/;' /etc/ssh/sshd_config
+ service sshd restart
+ fi
+elif [ "${platform}" = "rhel" ]; then
+ if grep '8\.' /etc/redhat-release; then
+ while true; do
+ yum module install -q -y python36 && \
+ yum install -q -y \
+ gcc \
+ python3-devel \
+ python3-jinja2 \
+ python3-virtualenv \
+ python3-cryptography \
+ iptables \
+ && break
+ echo "Failed to install packages. Sleeping before trying again..."
+ sleep 10
+ done
+ else
+ while true; do
+ yum install -q -y \
+ gcc \
+ python-devel \
+ python-virtualenv \
+ python2-cryptography \
+ && break
+ echo "Failed to install packages. Sleeping before trying again..."
+ sleep 10
+ done
+
+ install_pip
+ fi
+
+ # pin packaging and pyparsing to match the downstream vendored versions
+ "${python_interpreter}" -m pip install packaging==20.4 pyparsing==2.4.7 --disable-pip-version-check
+elif [ "${platform}" = "centos" ]; then
+ while true; do
+ yum install -q -y \
+ gcc \
+ python-devel \
+ python-virtualenv \
+ python2-cryptography \
+ libffi-devel \
+ openssl-devel \
+ && break
+ echo "Failed to install packages. Sleeping before trying again..."
+ sleep 10
+ done
+
+ install_pip
+elif [ "${platform}" = "macos" ]; then
+ while true; do
+ pip3 install --disable-pip-version-check --quiet \
+ 'virtualenv<20' \
+ && break
+ echo "Failed to install packages. Sleeping before trying again..."
+ sleep 10
+ done
+elif [ "${platform}" = "osx" ]; then
+ while true; do
+ pip install --disable-pip-version-check --quiet \
+ 'virtualenv<20' \
+ && break
+ echo "Failed to install packages. Sleeping before trying again..."
+ sleep 10
+ done
+elif [ "${platform}" = "aix" ]; then
+ chfs -a size=1G /
+ chfs -a size=4G /usr
+ chfs -a size=1G /var
+ chfs -a size=1G /tmp
+ chfs -a size=2G /opt
+ while true; do
+ yum install -q -y \
+ gcc \
+ libffi-devel \
+ python-jinja2 \
+ python-cryptography \
+ python-pip && \
+ pip install --disable-pip-version-check --quiet virtualenv \
+ && break
+ echo "Failed to install packages. Sleeping before trying again..."
+ sleep 10
+ done
+fi
+
+# Generate our ssh key and add it to our authorized_keys file.
+# We also need to add localhost's server keys to known_hosts.
+
+if [ ! -f "${HOME}/.ssh/id_rsa.pub" ]; then
+ ssh-keygen -m PEM -q -t rsa -N '' -f "${HOME}/.ssh/id_rsa"
+ # newer ssh-keygen PEM output (such as on RHEL 8.1) is not recognized by paramiko
+ touch "${HOME}/.ssh/id_rsa.new"
+ chmod 0600 "${HOME}/.ssh/id_rsa.new"
+ sed 's/\(BEGIN\|END\) PRIVATE KEY/\1 RSA PRIVATE KEY/' "${HOME}/.ssh/id_rsa" > "${HOME}/.ssh/id_rsa.new"
+ mv "${HOME}/.ssh/id_rsa.new" "${HOME}/.ssh/id_rsa"
+ cat "${HOME}/.ssh/id_rsa.pub" >> "${HOME}/.ssh/authorized_keys"
+ chmod 0600 "${HOME}/.ssh/authorized_keys"
+ for key in /etc/ssh/ssh_host_*_key.pub; do
+ pk=$(cat "${key}")
+ echo "localhost ${pk}" >> "${HOME}/.ssh/known_hosts"
+ done
+fi
+
+# Improve prompts on remote host for interactive use.
+# shellcheck disable=SC1117
+cat << EOF > ~/.bashrc
+if ls --color > /dev/null 2>&1; then
+ alias ls='ls --color'
+elif ls -G > /dev/null 2>&1; then
+ alias ls='ls -G'
+fi
+export PS1='\[\e]0;\u@\h: \w\a\]\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
+EOF
+
+# Make sure ~/ansible/ is the starting directory for interactive shells.
+if [ "${platform}" = "osx" ]; then
+ echo "cd ~/ansible/" >> ~/.bashrc
+elif [ "${platform}" = "macos" ] ; then
+ echo "export BASH_SILENCE_DEPRECATION_WARNING=1" >> ~/.bashrc
+ echo "cd ~/ansible/" >> ~/.bashrc
+fi
diff --git a/test/lib/ansible_test/_data/setup/windows-httptester.ps1 b/test/lib/ansible_test/_data/setup/windows-httptester.ps1
new file mode 100644
index 00000000..70bdb332
--- /dev/null
+++ b/test/lib/ansible_test/_data/setup/windows-httptester.ps1
@@ -0,0 +1,228 @@
+<#
+.SYNOPSIS
+Designed to set a Windows host to connect to the httptester container running
+on the Ansible host. This will setup the Windows host file and forward the
+local ports to use this connection. This will continue to run in the background
+until the script is deleted.
+
+Run this with SSH with the -R arguments to foward ports 8080 and 8443 to the
+httptester container.
+
+.PARAMETER Hosts
+A list of hostnames, delimited by '|', to add to the Windows hosts file for the
+httptester container, e.g. 'ansible.host.com|secondary.host.test'.
+#>
+[CmdletBinding()]
+param(
+ [Parameter(Mandatory=$true, Position=0)][String]$Hosts
+)
+$Hosts = $Hosts.Split('|')
+
+$ProgressPreference = "SilentlyContinue"
+$ErrorActionPreference = "Stop"
+$os_version = [Version](Get-Item -Path "$env:SystemRoot\System32\kernel32.dll").VersionInfo.ProductVersion
+Write-Verbose -Message "Configuring HTTP Tester on Windows $os_version for '$($Hosts -join "', '")'"
+
+Function Get-PmapperRuleBytes {
+ <#
+ .SYNOPSIS
+ Create the byte values that configures a rule in the PMapper configuration
+ file. This isn't really documented but because PMapper is only used for
+ Server 2008 R2 we will stick to 1 version and just live with the legacy
+ work for now.
+
+ .PARAMETER ListenPort
+ The port to listen on localhost, this will be forwarded to the host defined
+ by ConnectAddress and ConnectPort.
+
+ .PARAMETER ConnectAddress
+ The hostname or IP to map the traffic to.
+
+ .PARAMETER ConnectPort
+ This port of ConnectAddress to map the traffic to.
+ #>
+ param(
+ [Parameter(Mandatory=$true)][UInt16]$ListenPort,
+ [Parameter(Mandatory=$true)][String]$ConnectAddress,
+ [Parameter(Mandatory=$true)][Int]$ConnectPort
+ )
+
+ $connect_field = "$($ConnectAddress):$ConnectPort"
+ $connect_bytes = [System.Text.Encoding]::ASCII.GetBytes($connect_field)
+ $data_length = [byte]($connect_bytes.Length + 6) # size of payload minus header, length, and footer
+ $port_bytes = [System.BitConverter]::GetBytes($ListenPort)
+
+ $payload = [System.Collections.Generic.List`1[Byte]]@()
+ $payload.Add([byte]16) > $null # header is \x10, means Configure Mapping rule
+ $payload.Add($data_length) > $null
+ $payload.AddRange($connect_bytes)
+ $payload.AddRange($port_bytes)
+ $payload.AddRange([byte[]]@(0, 0)) # 2 extra bytes of padding
+ $payload.Add([byte]0) > $null # 0 is TCP, 1 is UDP
+ $payload.Add([byte]0) > $null # 0 is Any, 1 is Internet
+ $payload.Add([byte]31) > $null # footer is \x1f, means end of Configure Mapping rule
+
+ return ,$payload.ToArray()
+}
+
+Write-Verbose -Message "Adding host file entries"
+$hosts_file = "$env:SystemRoot\System32\drivers\etc\hosts"
+$hosts_file_lines = [System.IO.File]::ReadAllLines($hosts_file)
+$changed = $false
+foreach ($httptester_host in $Hosts) {
+ $host_line = "127.0.0.1 $httptester_host # ansible-test httptester"
+ if ($host_line -notin $hosts_file_lines) {
+ $hosts_file_lines += $host_line
+ $changed = $true
+ }
+}
+if ($changed) {
+ Write-Verbose -Message "Host file is missing entries, adding missing entries"
+ [System.IO.File]::WriteAllLines($hosts_file, $hosts_file_lines)
+}
+
+# forward ports
+$forwarded_ports = @{
+ 80 = 8080
+ 443 = 8443
+}
+if ($os_version -ge [Version]"6.2") {
+ Write-Verbose -Message "Using netsh to configure forwarded ports"
+ foreach ($forwarded_port in $forwarded_ports.GetEnumerator()) {
+ $port_set = netsh interface portproxy show v4tov4 | `
+ Where-Object { $_ -match "127.0.0.1\s*$($forwarded_port.Key)\s*127.0.0.1\s*$($forwarded_port.Value)" }
+
+ if (-not $port_set) {
+ Write-Verbose -Message "Adding netsh portproxy rule for $($forwarded_port.Key) -> $($forwarded_port.Value)"
+ $add_args = @(
+ "interface",
+ "portproxy",
+ "add",
+ "v4tov4",
+ "listenaddress=127.0.0.1",
+ "listenport=$($forwarded_port.Key)",
+ "connectaddress=127.0.0.1",
+ "connectport=$($forwarded_port.Value)"
+ )
+ $null = netsh $add_args 2>&1
+ }
+ }
+} else {
+ Write-Verbose -Message "Using Port Mapper to configure forwarded ports"
+ # netsh interface portproxy doesn't work on local addresses in older
+ # versions of Windows. Use custom application Port Mapper to acheive the
+ # same outcome
+ # http://www.analogx.com/contents/download/Network/pmapper/Freeware.htm
+ $s3_url = "https://ansible-ci-files.s3.amazonaws.com/ansible-test/pmapper-1.04.exe"
+
+ # download the Port Mapper executable to a temporary directory
+ $pmapper_folder = Join-Path -Path ([System.IO.Path]::GetTempPath()) -ChildPath ([System.IO.Path]::GetRandomFileName())
+ $pmapper_exe = Join-Path -Path $pmapper_folder -ChildPath pmapper.exe
+ $pmapper_config = Join-Path -Path $pmapper_folder -ChildPath pmapper.dat
+ New-Item -Path $pmapper_folder -ItemType Directory > $null
+
+ $stop = $false
+ do {
+ try {
+ Write-Verbose -Message "Attempting download of '$s3_url'"
+ (New-Object -TypeName System.Net.WebClient).DownloadFile($s3_url, $pmapper_exe)
+ $stop = $true
+ } catch { Start-Sleep -Second 5 }
+ } until ($stop)
+
+ # create the Port Mapper rule file that contains our forwarded ports
+ $fs = [System.IO.File]::Create($pmapper_config)
+ try {
+ foreach ($forwarded_port in $forwarded_ports.GetEnumerator()) {
+ Write-Verbose -Message "Creating forwarded port rule for $($forwarded_port.Key) -> $($forwarded_port.Value)"
+ $pmapper_rule = Get-PmapperRuleBytes -ListenPort $forwarded_port.Key -ConnectAddress 127.0.0.1 -ConnectPort $forwarded_port.Value
+ $fs.Write($pmapper_rule, 0, $pmapper_rule.Length)
+ }
+ } finally {
+ $fs.Close()
+ }
+
+ Write-Verbose -Message "Starting Port Mapper '$pmapper_exe' in the background"
+ $start_args = @{
+ CommandLine = $pmapper_exe
+ CurrentDirectory = $pmapper_folder
+ }
+ $res = Invoke-CimMethod -ClassName Win32_Process -MethodName Create -Arguments $start_args
+ if ($res.ReturnValue -ne 0) {
+ $error_msg = switch($res.ReturnValue) {
+ 2 { "Access denied" }
+ 3 { "Insufficient privilege" }
+ 8 { "Unknown failure" }
+ 9 { "Path not found" }
+ 21 { "Invalid parameter" }
+ default { "Undefined Error: $($res.ReturnValue)" }
+ }
+ Write-Error -Message "Failed to start pmapper: $error_msg"
+ }
+ $pmapper_pid = $res.ProcessId
+ Write-Verbose -Message "Port Mapper PID: $pmapper_pid"
+}
+
+Write-Verbose -Message "Wait for current script at '$PSCommandPath' to be deleted before running cleanup"
+$fsw = New-Object -TypeName System.IO.FileSystemWatcher
+$fsw.Path = Split-Path -Path $PSCommandPath -Parent
+$fsw.Filter = Split-Path -Path $PSCommandPath -Leaf
+$fsw.WaitForChanged([System.IO.WatcherChangeTypes]::Deleted, 3600000) > $null
+Write-Verbose -Message "Script delete or timeout reached, cleaning up Windows httptester artifacts"
+
+Write-Verbose -Message "Cleanup host file entries"
+$hosts_file_lines = [System.IO.File]::ReadAllLines($hosts_file)
+$new_lines = [System.Collections.ArrayList]@()
+$changed = $false
+foreach ($host_line in $hosts_file_lines) {
+ if ($host_line.EndsWith("# ansible-test httptester")) {
+ $changed = $true
+ continue
+ }
+ $new_lines.Add($host_line) > $null
+}
+if ($changed) {
+ Write-Verbose -Message "Host file has extra entries, removing extra entries"
+ [System.IO.File]::WriteAllLines($hosts_file, $new_lines)
+}
+
+if ($os_version -ge [Version]"6.2") {
+ Write-Verbose -Message "Cleanup of forwarded port configured in netsh"
+ foreach ($forwarded_port in $forwarded_ports.GetEnumerator()) {
+ $port_set = netsh interface portproxy show v4tov4 | `
+ Where-Object { $_ -match "127.0.0.1\s*$($forwarded_port.Key)\s*127.0.0.1\s*$($forwarded_port.Value)" }
+
+ if ($port_set) {
+ Write-Verbose -Message "Removing netsh portproxy rule for $($forwarded_port.Key) -> $($forwarded_port.Value)"
+ $delete_args = @(
+ "interface",
+ "portproxy",
+ "delete",
+ "v4tov4",
+ "listenaddress=127.0.0.1",
+ "listenport=$($forwarded_port.Key)"
+ )
+ $null = netsh $delete_args 2>&1
+ }
+ }
+} else {
+ Write-Verbose -Message "Stopping Port Mapper executable based on pid $pmapper_pid"
+ Stop-Process -Id $pmapper_pid -Force
+
+ # the process may not stop straight away, try multiple times to delete the Port Mapper folder
+ $attempts = 1
+ do {
+ try {
+ Write-Verbose -Message "Cleanup temporary files for Port Mapper at '$pmapper_folder' - Attempt: $attempts"
+ Remove-Item -Path $pmapper_folder -Force -Recurse
+ break
+ } catch {
+ Write-Verbose -Message "Cleanup temporary files for Port Mapper failed, waiting 5 seconds before trying again:$($_ | Out-String)"
+ if ($attempts -ge 5) {
+ break
+ }
+ $attempts += 1
+ Start-Sleep -Second 5
+ }
+ } until ($true)
+}
diff --git a/test/lib/ansible_test/_data/sslcheck.py b/test/lib/ansible_test/_data/sslcheck.py
new file mode 100755
index 00000000..37b82279
--- /dev/null
+++ b/test/lib/ansible_test/_data/sslcheck.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+"""Show openssl version."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+# noinspection PyBroadException
+try:
+ from ssl import OPENSSL_VERSION_INFO
+ VERSION = list(OPENSSL_VERSION_INFO[:3])
+except Exception: # pylint: disable=broad-except
+ VERSION = None
+
+
+def main():
+ """Main program entry point."""
+ print(json.dumps(dict(
+ version=VERSION,
+ )))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/versions.py b/test/lib/ansible_test/_data/versions.py
new file mode 100755
index 00000000..4babef01
--- /dev/null
+++ b/test/lib/ansible_test/_data/versions.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python
+"""Show python and pip versions."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+import warnings
+
+warnings.simplefilter('ignore') # avoid python version deprecation warnings when using newer pip dependencies
+
+try:
+ import pip
+except ImportError:
+ pip = None
+
+print(sys.version)
+
+if pip:
+ print('pip %s from %s' % (pip.__version__, os.path.dirname(pip.__file__)))
diff --git a/test/lib/ansible_test/_data/virtualenvcheck.py b/test/lib/ansible_test/_data/virtualenvcheck.py
new file mode 100755
index 00000000..552b6e7d
--- /dev/null
+++ b/test/lib/ansible_test/_data/virtualenvcheck.py
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+"""Detect the real python interpreter when running in a virtual environment created by the 'virtualenv' module."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+
+try:
+ from sys import real_prefix
+except ImportError:
+ real_prefix = None
+
+print(json.dumps(dict(
+ real_prefix=real_prefix,
+)))
diff --git a/test/lib/ansible_test/_data/yamlcheck.py b/test/lib/ansible_test/_data/yamlcheck.py
new file mode 100755
index 00000000..591842f4
--- /dev/null
+++ b/test/lib/ansible_test/_data/yamlcheck.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+"""Show python and pip versions."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+try:
+ import yaml
+except ImportError:
+ yaml = None
+
+try:
+ from yaml import CLoader
+except ImportError:
+ CLoader = None
+
+print(json.dumps(dict(
+ yaml=bool(yaml),
+ cloader=bool(CLoader),
+)))
diff --git a/test/lib/ansible_test/_internal/__init__.py b/test/lib/ansible_test/_internal/__init__.py
new file mode 100644
index 00000000..35f04422
--- /dev/null
+++ b/test/lib/ansible_test/_internal/__init__.py
@@ -0,0 +1,3 @@
+"""Support code for Ansible testing infrastructure."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
diff --git a/test/lib/ansible_test/_internal/ansible_util.py b/test/lib/ansible_test/_internal/ansible_util.py
new file mode 100644
index 00000000..5e9b5d7d
--- /dev/null
+++ b/test/lib/ansible_test/_internal/ansible_util.py
@@ -0,0 +1,260 @@
+"""Miscellaneous utility functions and classes specific to ansible cli tools."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+
+from . import types as t
+
+from .constants import (
+ SOFT_RLIMIT_NOFILE,
+)
+
+from .util import (
+ common_environment,
+ display,
+ find_python,
+ ApplicationError,
+ ANSIBLE_LIB_ROOT,
+ ANSIBLE_TEST_DATA_ROOT,
+ ANSIBLE_BIN_PATH,
+ ANSIBLE_SOURCE_ROOT,
+)
+
+from .util_common import (
+ create_temp_dir,
+ run_command,
+ ResultType,
+)
+
+from .config import (
+ IntegrationConfig,
+ PosixIntegrationConfig,
+ EnvironmentConfig,
+ CommonConfig,
+)
+
+from .data import (
+ data_context,
+)
+
+CHECK_YAML_VERSIONS = {}
+
+
+def ansible_environment(args, color=True, ansible_config=None):
+ """
+ :type args: CommonConfig
+ :type color: bool
+ :type ansible_config: str | None
+ :rtype: dict[str, str]
+ """
+ env = common_environment()
+ path = env['PATH']
+
+ if not path.startswith(ANSIBLE_BIN_PATH + os.path.pathsep):
+ path = ANSIBLE_BIN_PATH + os.path.pathsep + path
+
+ if not ansible_config:
+ # use the default empty configuration unless one has been provided
+ ansible_config = args.get_ansible_config()
+
+ if not args.explain and not os.path.exists(ansible_config):
+ raise ApplicationError('Configuration not found: %s' % ansible_config)
+
+ ansible = dict(
+ ANSIBLE_PYTHON_MODULE_RLIMIT_NOFILE=str(SOFT_RLIMIT_NOFILE),
+ ANSIBLE_FORCE_COLOR='%s' % 'true' if args.color and color else 'false',
+ ANSIBLE_FORCE_HANDLERS='true', # allow cleanup handlers to run when tests fail
+ ANSIBLE_HOST_PATTERN_MISMATCH='error', # prevent tests from unintentionally passing when hosts are not found
+ ANSIBLE_INVENTORY='/dev/null', # force tests to provide inventory
+ ANSIBLE_DEPRECATION_WARNINGS='false',
+ ANSIBLE_HOST_KEY_CHECKING='false',
+ ANSIBLE_RETRY_FILES_ENABLED='false',
+ ANSIBLE_CONFIG=ansible_config,
+ ANSIBLE_LIBRARY='/dev/null',
+ ANSIBLE_DEVEL_WARNING='false', # Don't show warnings that CI is running devel
+ PYTHONPATH=get_ansible_python_path(),
+ PAGER='/bin/cat',
+ PATH=path,
+ # give TQM worker processes time to report code coverage results
+ # without this the last task in a play may write no coverage file, an empty file, or an incomplete file
+ # enabled even when not using code coverage to surface warnings when worker processes do not exit cleanly
+ ANSIBLE_WORKER_SHUTDOWN_POLL_COUNT='100',
+ ANSIBLE_WORKER_SHUTDOWN_POLL_DELAY='0.1',
+ )
+
+ if isinstance(args, IntegrationConfig) and args.coverage:
+ # standard path injection is not effective for ansible-connection, instead the location must be configured
+ # ansible-connection only requires the injector for code coverage
+ # the correct python interpreter is already selected using the sys.executable used to invoke ansible
+ ansible.update(dict(
+ ANSIBLE_CONNECTION_PATH=os.path.join(ANSIBLE_TEST_DATA_ROOT, 'injector', 'ansible-connection'),
+ ))
+
+ if isinstance(args, PosixIntegrationConfig):
+ ansible.update(dict(
+ ANSIBLE_PYTHON_INTERPRETER='/set/ansible_python_interpreter/in/inventory', # force tests to set ansible_python_interpreter in inventory
+ ))
+
+ env.update(ansible)
+
+ if args.debug:
+ env.update(dict(
+ ANSIBLE_DEBUG='true',
+ ANSIBLE_LOG_PATH=os.path.join(ResultType.LOGS.name, 'debug.log'),
+ ))
+
+ if data_context().content.collection:
+ env.update(dict(
+ ANSIBLE_COLLECTIONS_PATH=data_context().content.collection.root,
+ ))
+
+ if data_context().content.is_ansible:
+ env.update(configure_plugin_paths(args))
+
+ return env
+
+
+def configure_plugin_paths(args): # type: (CommonConfig) -> t.Dict[str, str]
+ """Return environment variables with paths to plugins relevant for the current command."""
+ if not isinstance(args, IntegrationConfig):
+ return {}
+
+ support_path = os.path.join(ANSIBLE_SOURCE_ROOT, 'test', 'support', args.command)
+
+ # provide private copies of collections for integration tests
+ collection_root = os.path.join(support_path, 'collections')
+
+ env = dict(
+ ANSIBLE_COLLECTIONS_PATH=collection_root,
+ )
+
+ # provide private copies of plugins for integration tests
+ plugin_root = os.path.join(support_path, 'plugins')
+
+ plugin_list = [
+ 'action',
+ 'become',
+ 'cache',
+ 'callback',
+ 'cliconf',
+ 'connection',
+ 'filter',
+ 'httpapi',
+ 'inventory',
+ 'lookup',
+ 'netconf',
+ # 'shell' is not configurable
+ 'strategy',
+ 'terminal',
+ 'test',
+ 'vars',
+ ]
+
+ # most plugins follow a standard naming convention
+ plugin_map = dict(('%s_plugins' % name, name) for name in plugin_list)
+
+ # these plugins do not follow the standard naming convention
+ plugin_map.update(
+ doc_fragment='doc_fragments',
+ library='modules',
+ module_utils='module_utils',
+ )
+
+ env.update(dict(('ANSIBLE_%s' % key.upper(), os.path.join(plugin_root, value)) for key, value in plugin_map.items()))
+
+ # only configure directories which exist
+ env = dict((key, value) for key, value in env.items() if os.path.isdir(value))
+
+ return env
+
+
+def get_ansible_python_path(): # type: () -> str
+ """
+ Return a directory usable for PYTHONPATH, containing only the ansible package.
+ If a temporary directory is required, it will be cached for the lifetime of the process and cleaned up at exit.
+ """
+ if ANSIBLE_SOURCE_ROOT:
+ # when running from source there is no need for a temporary directory to isolate the ansible package
+ return os.path.dirname(ANSIBLE_LIB_ROOT)
+
+ try:
+ return get_ansible_python_path.python_path
+ except AttributeError:
+ pass
+
+ python_path = create_temp_dir(prefix='ansible-test-')
+ get_ansible_python_path.python_path = python_path
+
+ os.symlink(ANSIBLE_LIB_ROOT, os.path.join(python_path, 'ansible'))
+
+ return python_path
+
+
+def check_pyyaml(args, version, required=True, quiet=False):
+ """
+ :type args: EnvironmentConfig
+ :type version: str
+ :type required: bool
+ :type quiet: bool
+ """
+ try:
+ return CHECK_YAML_VERSIONS[version]
+ except KeyError:
+ pass
+
+ python = find_python(version)
+ stdout, _dummy = run_command(args, [python, os.path.join(ANSIBLE_TEST_DATA_ROOT, 'yamlcheck.py')],
+ capture=True, always=True)
+
+ result = json.loads(stdout)
+
+ yaml = result['yaml']
+ cloader = result['cloader']
+
+ if yaml or required:
+ # results are cached only if pyyaml is required or present
+ # it is assumed that tests will not uninstall/re-install pyyaml -- if they do, those changes will go undetected
+ CHECK_YAML_VERSIONS[version] = result
+
+ if not quiet:
+ if not yaml and required:
+ display.warning('PyYAML is not installed for interpreter: %s' % python)
+ elif not cloader:
+ display.warning('PyYAML will be slow due to installation without libyaml support for interpreter: %s' % python)
+
+ return result
+
+
+class CollectionDetail:
+ """Collection detail."""
+ def __init__(self): # type: () -> None
+ self.version = None # type: t.Optional[str]
+
+
+class CollectionDetailError(ApplicationError):
+ """An error occurred retrieving collection detail."""
+ def __init__(self, reason): # type: (str) -> None
+ super(CollectionDetailError, self).__init__('Error collecting collection detail: %s' % reason)
+ self.reason = reason
+
+
+def get_collection_detail(args, python): # type: (EnvironmentConfig, str) -> CollectionDetail
+ """Return collection detail."""
+ collection = data_context().content.collection
+ directory = os.path.join(collection.root, collection.directory)
+
+ stdout = run_command(args, [python, os.path.join(ANSIBLE_TEST_DATA_ROOT, 'collection_detail.py'), directory], capture=True, always=True)[0]
+ result = json.loads(stdout)
+ error = result.get('error')
+
+ if error:
+ raise CollectionDetailError(error)
+
+ version = result.get('version')
+
+ detail = CollectionDetail()
+ detail.version = str(version) if version is not None else None
+
+ return detail
diff --git a/test/lib/ansible_test/_internal/cache.py b/test/lib/ansible_test/_internal/cache.py
new file mode 100644
index 00000000..85fdbb1f
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cache.py
@@ -0,0 +1,35 @@
+"""Cache for commonly shared data that is intended to be immutable."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class CommonCache:
+ """Common cache."""
+ def __init__(self, args):
+ """
+ :param args: CommonConfig
+ """
+ self.args = args
+
+ def get(self, key, factory):
+ """
+ :param key: str
+ :param factory: () -> any
+ :rtype: any
+ """
+ if key not in self.args.cache:
+ self.args.cache[key] = factory()
+
+ return self.args.cache[key]
+
+ def get_with_args(self, key, factory):
+ """
+ :param key: str
+ :param factory: (CommonConfig) -> any
+ :rtype: any
+ """
+
+ if key not in self.args.cache:
+ self.args.cache[key] = factory(self.args)
+
+ return self.args.cache[key]
diff --git a/test/lib/ansible_test/_internal/ci/__init__.py b/test/lib/ansible_test/_internal/ci/__init__.py
new file mode 100644
index 00000000..d6e2ad6e
--- /dev/null
+++ b/test/lib/ansible_test/_internal/ci/__init__.py
@@ -0,0 +1,227 @@
+"""Support code for CI environments."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import abc
+import base64
+import json
+import os
+import tempfile
+
+
+from .. import types as t
+
+from ..encoding import (
+ to_bytes,
+ to_text,
+)
+
+from ..io import (
+ read_text_file,
+ write_text_file,
+)
+
+from ..config import (
+ CommonConfig,
+ TestConfig,
+)
+
+from ..util import (
+ ABC,
+ ApplicationError,
+ display,
+ get_subclasses,
+ import_plugins,
+ raw_command,
+)
+
+
+class ChangeDetectionNotSupported(ApplicationError):
+ """Exception for cases where change detection is not supported."""
+
+
+class AuthContext:
+ """Context information required for Ansible Core CI authentication."""
+ def __init__(self): # type: () -> None
+ self.region = None # type: t.Optional[str]
+
+
+class CIProvider(ABC):
+ """Base class for CI provider plugins."""
+ priority = 500
+
+ @staticmethod
+ @abc.abstractmethod
+ def is_supported(): # type: () -> bool
+ """Return True if this provider is supported in the current running environment."""
+
+ @property
+ @abc.abstractmethod
+ def code(self): # type: () -> str
+ """Return a unique code representing this provider."""
+
+ @property
+ @abc.abstractmethod
+ def name(self): # type: () -> str
+ """Return descriptive name for this provider."""
+
+ @abc.abstractmethod
+ def generate_resource_prefix(self): # type: () -> str
+ """Return a resource prefix specific to this CI provider."""
+
+ @abc.abstractmethod
+ def get_base_branch(self): # type: () -> str
+ """Return the base branch or an empty string."""
+
+ @abc.abstractmethod
+ def detect_changes(self, args): # type: (TestConfig) -> t.Optional[t.List[str]]
+ """Initialize change detection."""
+
+ @abc.abstractmethod
+ def supports_core_ci_auth(self, context): # type: (AuthContext) -> bool
+ """Return True if Ansible Core CI is supported."""
+
+ @abc.abstractmethod
+ def prepare_core_ci_auth(self, context): # type: (AuthContext) -> t.Dict[str, t.Any]
+ """Return authentication details for Ansible Core CI."""
+
+ @abc.abstractmethod
+ def get_git_details(self, args): # type: (CommonConfig) -> t.Optional[t.Dict[str, t.Any]]
+ """Return details about git in the current environment."""
+
+
+def get_ci_provider(): # type: () -> CIProvider
+ """Return a CI provider instance for the current environment."""
+ try:
+ return get_ci_provider.provider
+ except AttributeError:
+ pass
+
+ provider = None
+
+ import_plugins('ci')
+
+ candidates = sorted(get_subclasses(CIProvider), key=lambda c: (c.priority, c.__name__))
+
+ for candidate in candidates:
+ if candidate.is_supported():
+ provider = candidate()
+ break
+
+ if provider.code:
+ display.info('Detected CI provider: %s' % provider.name)
+
+ get_ci_provider.provider = provider
+
+ return provider
+
+
+class AuthHelper(ABC):
+ """Public key based authentication helper for Ansible Core CI."""
+ def sign_request(self, request): # type: (t.Dict[str, t.Any]) -> None
+ """Sign the given auth request and make the public key available."""
+ payload_bytes = to_bytes(json.dumps(request, sort_keys=True))
+ signature_raw_bytes = self.sign_bytes(payload_bytes)
+ signature = to_text(base64.b64encode(signature_raw_bytes))
+
+ request.update(signature=signature)
+
+ def initialize_private_key(self): # type: () -> str
+ """
+ Initialize and publish a new key pair (if needed) and return the private key.
+ The private key is cached across ansible-test invocations so it is only generated and published once per CI job.
+ """
+ path = os.path.expanduser('~/.ansible-core-ci-private.key')
+
+ if os.path.exists(to_bytes(path)):
+ private_key_pem = read_text_file(path)
+ else:
+ private_key_pem = self.generate_private_key()
+ write_text_file(path, private_key_pem)
+
+ return private_key_pem
+
+ @abc.abstractmethod
+ def sign_bytes(self, payload_bytes): # type: (bytes) -> bytes
+ """Sign the given payload and return the signature, initializing a new key pair if required."""
+
+ @abc.abstractmethod
+ def publish_public_key(self, public_key_pem): # type: (str) -> None
+ """Publish the given public key."""
+
+ @abc.abstractmethod
+ def generate_private_key(self): # type: () -> str
+ """Generate a new key pair, publishing the public key and returning the private key."""
+
+
+class CryptographyAuthHelper(AuthHelper, ABC): # pylint: disable=abstract-method
+ """Cryptography based public key based authentication helper for Ansible Core CI."""
+ def sign_bytes(self, payload_bytes): # type: (bytes) -> bytes
+ """Sign the given payload and return the signature, initializing a new key pair if required."""
+ # import cryptography here to avoid overhead and failures in environments which do not use/provide it
+ from cryptography.hazmat.backends import default_backend
+ from cryptography.hazmat.primitives import hashes
+ from cryptography.hazmat.primitives.asymmetric import ec
+ from cryptography.hazmat.primitives.serialization import load_pem_private_key
+
+ private_key_pem = self.initialize_private_key()
+ private_key = load_pem_private_key(to_bytes(private_key_pem), None, default_backend())
+
+ signature_raw_bytes = private_key.sign(payload_bytes, ec.ECDSA(hashes.SHA256()))
+
+ return signature_raw_bytes
+
+ def generate_private_key(self): # type: () -> str
+ """Generate a new key pair, publishing the public key and returning the private key."""
+ # import cryptography here to avoid overhead and failures in environments which do not use/provide it
+ from cryptography.hazmat.backends import default_backend
+ from cryptography.hazmat.primitives import serialization
+ from cryptography.hazmat.primitives.asymmetric import ec
+
+ private_key = ec.generate_private_key(ec.SECP384R1(), default_backend())
+ public_key = private_key.public_key()
+
+ private_key_pem = to_text(private_key.private_bytes(
+ encoding=serialization.Encoding.PEM,
+ format=serialization.PrivateFormat.PKCS8,
+ encryption_algorithm=serialization.NoEncryption(),
+ ))
+
+ public_key_pem = to_text(public_key.public_bytes(
+ encoding=serialization.Encoding.PEM,
+ format=serialization.PublicFormat.SubjectPublicKeyInfo,
+ ))
+
+ self.publish_public_key(public_key_pem)
+
+ return private_key_pem
+
+
+class OpenSSLAuthHelper(AuthHelper, ABC): # pylint: disable=abstract-method
+ """OpenSSL based public key based authentication helper for Ansible Core CI."""
+ def sign_bytes(self, payload_bytes): # type: (bytes) -> bytes
+ """Sign the given payload and return the signature, initializing a new key pair if required."""
+ private_key_pem = self.initialize_private_key()
+
+ with tempfile.NamedTemporaryFile() as private_key_file:
+ private_key_file.write(to_bytes(private_key_pem))
+ private_key_file.flush()
+
+ with tempfile.NamedTemporaryFile() as payload_file:
+ payload_file.write(payload_bytes)
+ payload_file.flush()
+
+ with tempfile.NamedTemporaryFile() as signature_file:
+ raw_command(['openssl', 'dgst', '-sha256', '-sign', private_key_file.name, '-out', signature_file.name, payload_file.name], capture=True)
+ signature_raw_bytes = signature_file.read()
+
+ return signature_raw_bytes
+
+ def generate_private_key(self): # type: () -> str
+ """Generate a new key pair, publishing the public key and returning the private key."""
+ private_key_pem = raw_command(['openssl', 'ecparam', '-genkey', '-name', 'secp384r1', '-noout'], capture=True)[0]
+ public_key_pem = raw_command(['openssl', 'ec', '-pubout'], data=private_key_pem, capture=True)[0]
+
+ self.publish_public_key(public_key_pem)
+
+ return private_key_pem
diff --git a/test/lib/ansible_test/_internal/ci/azp.py b/test/lib/ansible_test/_internal/ci/azp.py
new file mode 100644
index 00000000..f2a9d206
--- /dev/null
+++ b/test/lib/ansible_test/_internal/ci/azp.py
@@ -0,0 +1,268 @@
+"""Support code for working with Azure Pipelines."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+import tempfile
+import uuid
+
+from .. import types as t
+
+from ..encoding import (
+ to_bytes,
+)
+
+from ..config import (
+ CommonConfig,
+ TestConfig,
+)
+
+from ..git import (
+ Git,
+)
+
+from ..http import (
+ HttpClient,
+ urlencode,
+)
+
+from ..util import (
+ display,
+ MissingEnvironmentVariable,
+)
+
+from . import (
+ AuthContext,
+ ChangeDetectionNotSupported,
+ CIProvider,
+ CryptographyAuthHelper,
+)
+
+CODE = 'azp'
+
+
+class AzurePipelines(CIProvider):
+ """CI provider implementation for Azure Pipelines."""
+ def __init__(self):
+ self.auth = AzurePipelinesAuthHelper()
+
+ @staticmethod
+ def is_supported(): # type: () -> bool
+ """Return True if this provider is supported in the current running environment."""
+ return os.environ.get('SYSTEM_COLLECTIONURI', '').startswith('https://dev.azure.com/')
+
+ @property
+ def code(self): # type: () -> str
+ """Return a unique code representing this provider."""
+ return CODE
+
+ @property
+ def name(self): # type: () -> str
+ """Return descriptive name for this provider."""
+ return 'Azure Pipelines'
+
+ def generate_resource_prefix(self): # type: () -> str
+ """Return a resource prefix specific to this CI provider."""
+ try:
+ prefix = 'azp-%s-%s-%s' % (
+ os.environ['BUILD_BUILDID'],
+ os.environ['SYSTEM_JOBATTEMPT'],
+ os.environ['SYSTEM_JOBIDENTIFIER'],
+ )
+ except KeyError as ex:
+ raise MissingEnvironmentVariable(name=ex.args[0])
+
+ prefix = re.sub(r'[^a-zA-Z0-9]+', '-', prefix).lower()
+
+ return prefix
+
+ def get_base_branch(self): # type: () -> str
+ """Return the base branch or an empty string."""
+ base_branch = os.environ.get('SYSTEM_PULLREQUEST_TARGETBRANCH') or os.environ.get('BUILD_SOURCEBRANCHNAME')
+
+ if base_branch:
+ base_branch = 'origin/%s' % base_branch
+
+ return base_branch or ''
+
+ def detect_changes(self, args): # type: (TestConfig) -> t.Optional[t.List[str]]
+ """Initialize change detection."""
+ result = AzurePipelinesChanges(args)
+
+ if result.is_pr:
+ job_type = 'pull request'
+ else:
+ job_type = 'merge commit'
+
+ display.info('Processing %s for branch %s commit %s' % (job_type, result.branch, result.commit))
+
+ if not args.metadata.changes:
+ args.metadata.populate_changes(result.diff)
+
+ if result.paths is None:
+ # There are several likely causes of this:
+ # - First run on a new branch.
+ # - Too many pull requests passed since the last merge run passed.
+ display.warning('No successful commit found. All tests will be executed.')
+
+ return result.paths
+
+ def supports_core_ci_auth(self, context): # type: (AuthContext) -> bool
+ """Return True if Ansible Core CI is supported."""
+ return True
+
+ def prepare_core_ci_auth(self, context): # type: (AuthContext) -> t.Dict[str, t.Any]
+ """Return authentication details for Ansible Core CI."""
+ try:
+ request = dict(
+ org_name=os.environ['SYSTEM_COLLECTIONURI'].strip('/').split('/')[-1],
+ project_name=os.environ['SYSTEM_TEAMPROJECT'],
+ build_id=int(os.environ['BUILD_BUILDID']),
+ task_id=str(uuid.UUID(os.environ['SYSTEM_TASKINSTANCEID'])),
+ )
+ except KeyError as ex:
+ raise MissingEnvironmentVariable(name=ex.args[0])
+
+ self.auth.sign_request(request)
+
+ auth = dict(
+ azp=request,
+ )
+
+ return auth
+
+ def get_git_details(self, args): # type: (CommonConfig) -> t.Optional[t.Dict[str, t.Any]]
+ """Return details about git in the current environment."""
+ changes = AzurePipelinesChanges(args)
+
+ details = dict(
+ base_commit=changes.base_commit,
+ commit=changes.commit,
+ )
+
+ return details
+
+
+class AzurePipelinesAuthHelper(CryptographyAuthHelper):
+ """
+ Authentication helper for Azure Pipelines.
+ Based on cryptography since it is provided by the default Azure Pipelines environment.
+ """
+ def publish_public_key(self, public_key_pem): # type: (str) -> None
+ """Publish the given public key."""
+ try:
+ agent_temp_directory = os.environ['AGENT_TEMPDIRECTORY']
+ except KeyError as ex:
+ raise MissingEnvironmentVariable(name=ex.args[0])
+
+ # the temporary file cannot be deleted because we do not know when the agent has processed it
+ # placing the file in the agent's temp directory allows it to be picked up when the job is running in a container
+ with tempfile.NamedTemporaryFile(prefix='public-key-', suffix='.pem', delete=False, dir=agent_temp_directory) as public_key_file:
+ public_key_file.write(to_bytes(public_key_pem))
+ public_key_file.flush()
+
+ # make the agent aware of the public key by declaring it as an attachment
+ vso_add_attachment('ansible-core-ci', 'public-key.pem', public_key_file.name)
+
+
+class AzurePipelinesChanges:
+ """Change information for an Azure Pipelines build."""
+ def __init__(self, args): # type: (CommonConfig) -> None
+ self.args = args
+ self.git = Git()
+
+ try:
+ self.org_uri = os.environ['SYSTEM_COLLECTIONURI'] # ex: https://dev.azure.com/{org}/
+ self.project = os.environ['SYSTEM_TEAMPROJECT']
+ self.repo_type = os.environ['BUILD_REPOSITORY_PROVIDER'] # ex: GitHub
+ self.source_branch = os.environ['BUILD_SOURCEBRANCH']
+ self.source_branch_name = os.environ['BUILD_SOURCEBRANCHNAME']
+ self.pr_branch_name = os.environ.get('SYSTEM_PULLREQUEST_TARGETBRANCH')
+ except KeyError as ex:
+ raise MissingEnvironmentVariable(name=ex.args[0])
+
+ if self.source_branch.startswith('refs/tags/'):
+ raise ChangeDetectionNotSupported('Change detection is not supported for tags.')
+
+ self.org = self.org_uri.strip('/').split('/')[-1]
+ self.is_pr = self.pr_branch_name is not None
+
+ if self.is_pr:
+ # HEAD is a merge commit of the PR branch into the target branch
+ # HEAD^1 is HEAD of the target branch (first parent of merge commit)
+ # HEAD^2 is HEAD of the PR branch (second parent of merge commit)
+ # see: https://git-scm.com/docs/gitrevisions
+ self.branch = self.pr_branch_name
+ self.base_commit = 'HEAD^1'
+ self.commit = 'HEAD^2'
+ else:
+ commits = self.get_successful_merge_run_commits()
+
+ self.branch = self.source_branch_name
+ self.base_commit = self.get_last_successful_commit(commits)
+ self.commit = 'HEAD'
+
+ self.commit = self.git.run_git(['rev-parse', self.commit]).strip()
+
+ if self.base_commit:
+ self.base_commit = self.git.run_git(['rev-parse', self.base_commit]).strip()
+
+ # <commit>...<commit>
+ # This form is to view the changes on the branch containing and up to the second <commit>, starting at a common ancestor of both <commit>.
+ # see: https://git-scm.com/docs/git-diff
+ dot_range = '%s...%s' % (self.base_commit, self.commit)
+
+ self.paths = sorted(self.git.get_diff_names([dot_range]))
+ self.diff = self.git.get_diff([dot_range])
+ else:
+ self.paths = None # act as though change detection not enabled, do not filter targets
+ self.diff = []
+
+ def get_successful_merge_run_commits(self): # type: () -> t.Set[str]
+ """Return a set of recent successsful merge commits from Azure Pipelines."""
+ parameters = dict(
+ maxBuildsPerDefinition=100, # max 5000
+ queryOrder='queueTimeDescending', # assumes under normal circumstances that later queued jobs are for later commits
+ resultFilter='succeeded',
+ reasonFilter='batchedCI', # may miss some non-PR reasons, the alternative is to filter the list after receiving it
+ repositoryType=self.repo_type,
+ repositoryId='%s/%s' % (self.org, self.project),
+ )
+
+ url = '%s%s/build/builds?%s' % (self.org_uri, self.project, urlencode(parameters))
+
+ http = HttpClient(self.args)
+ response = http.get(url)
+
+ # noinspection PyBroadException
+ try:
+ result = response.json()
+ except Exception: # pylint: disable=broad-except
+ # most likely due to a private project, which returns an HTTP 203 response with HTML
+ display.warning('Unable to find project. Cannot determine changes. All tests will be executed.')
+ return set()
+
+ commits = set(build['sourceVersion'] for build in result['value'])
+
+ return commits
+
+ def get_last_successful_commit(self, commits): # type: (t.Set[str]) -> t.Optional[str]
+ """Return the last successful commit from git history that is found in the given commit list, or None."""
+ commit_history = self.git.get_rev_list(max_count=100)
+ ordered_successful_commits = [commit for commit in commit_history if commit in commits]
+ last_successful_commit = ordered_successful_commits[0] if ordered_successful_commits else None
+ return last_successful_commit
+
+
+def vso_add_attachment(file_type, file_name, path): # type: (str, str, str) -> None
+ """Upload and attach a file to the current timeline record."""
+ vso('task.addattachment', dict(type=file_type, name=file_name), path)
+
+
+def vso(name, data, message): # type: (str, t.Dict[str, str], str) -> None
+ """
+ Write a logging command for the Azure Pipelines agent to process.
+ See: https://docs.microsoft.com/en-us/azure/devops/pipelines/scripts/logging-commands?view=azure-devops&tabs=bash
+ """
+ display.info('##vso[%s %s]%s' % (name, ';'.join('='.join((key, value)) for key, value in data.items()), message))
diff --git a/test/lib/ansible_test/_internal/ci/local.py b/test/lib/ansible_test/_internal/ci/local.py
new file mode 100644
index 00000000..5f605c86
--- /dev/null
+++ b/test/lib/ansible_test/_internal/ci/local.py
@@ -0,0 +1,217 @@
+"""Support code for working without a supported CI provider."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import platform
+import random
+import re
+
+from .. import types as t
+
+from ..config import (
+ CommonConfig,
+ TestConfig,
+)
+
+from ..io import (
+ read_text_file,
+)
+
+from ..git import (
+ Git,
+)
+
+from ..util import (
+ ApplicationError,
+ display,
+ is_binary_file,
+ SubprocessError,
+)
+
+from . import (
+ AuthContext,
+ CIProvider,
+)
+
+CODE = '' # not really a CI provider, so use an empty string for the code
+
+
+class Local(CIProvider):
+ """CI provider implementation when not using CI."""
+ priority = 1000
+
+ @staticmethod
+ def is_supported(): # type: () -> bool
+ """Return True if this provider is supported in the current running environment."""
+ return True
+
+ @property
+ def code(self): # type: () -> str
+ """Return a unique code representing this provider."""
+ return CODE
+
+ @property
+ def name(self): # type: () -> str
+ """Return descriptive name for this provider."""
+ return 'Local'
+
+ def generate_resource_prefix(self): # type: () -> str
+ """Return a resource prefix specific to this CI provider."""
+ node = re.sub(r'[^a-zA-Z0-9]+', '-', platform.node().split('.')[0]).lower()
+
+ prefix = 'ansible-test-%s-%d' % (node, random.randint(10000000, 99999999))
+
+ return prefix
+
+ def get_base_branch(self): # type: () -> str
+ """Return the base branch or an empty string."""
+ return ''
+
+ def detect_changes(self, args): # type: (TestConfig) -> t.Optional[t.List[str]]
+ """Initialize change detection."""
+ result = LocalChanges(args)
+
+ display.info('Detected branch %s forked from %s at commit %s' % (
+ result.current_branch, result.fork_branch, result.fork_point))
+
+ if result.untracked and not args.untracked:
+ display.warning('Ignored %s untracked file(s). Use --untracked to include them.' %
+ len(result.untracked))
+
+ if result.committed and not args.committed:
+ display.warning('Ignored %s committed change(s). Omit --ignore-committed to include them.' %
+ len(result.committed))
+
+ if result.staged and not args.staged:
+ display.warning('Ignored %s staged change(s). Omit --ignore-staged to include them.' %
+ len(result.staged))
+
+ if result.unstaged and not args.unstaged:
+ display.warning('Ignored %s unstaged change(s). Omit --ignore-unstaged to include them.' %
+ len(result.unstaged))
+
+ names = set()
+
+ if args.tracked:
+ names |= set(result.tracked)
+ if args.untracked:
+ names |= set(result.untracked)
+ if args.committed:
+ names |= set(result.committed)
+ if args.staged:
+ names |= set(result.staged)
+ if args.unstaged:
+ names |= set(result.unstaged)
+
+ if not args.metadata.changes:
+ args.metadata.populate_changes(result.diff)
+
+ for path in result.untracked:
+ if is_binary_file(path):
+ args.metadata.changes[path] = ((0, 0),)
+ continue
+
+ line_count = len(read_text_file(path).splitlines())
+
+ args.metadata.changes[path] = ((1, line_count),)
+
+ return sorted(names)
+
+ def supports_core_ci_auth(self, context): # type: (AuthContext) -> bool
+ """Return True if Ansible Core CI is supported."""
+ path = self._get_aci_key_path(context)
+ return os.path.exists(path)
+
+ def prepare_core_ci_auth(self, context): # type: (AuthContext) -> t.Dict[str, t.Any]
+ """Return authentication details for Ansible Core CI."""
+ path = self._get_aci_key_path(context)
+ auth_key = read_text_file(path).strip()
+
+ request = dict(
+ key=auth_key,
+ nonce=None,
+ )
+
+ auth = dict(
+ remote=request,
+ )
+
+ return auth
+
+ def get_git_details(self, args): # type: (CommonConfig) -> t.Optional[t.Dict[str, t.Any]]
+ """Return details about git in the current environment."""
+ return None # not yet implemented for local
+
+ def _get_aci_key_path(self, context): # type: (AuthContext) -> str
+ path = os.path.expanduser('~/.ansible-core-ci.key')
+
+ if context.region:
+ path += '.%s' % context.region
+
+ return path
+
+
+class InvalidBranch(ApplicationError):
+ """Exception for invalid branch specification."""
+ def __init__(self, branch, reason): # type: (str, str) -> None
+ message = 'Invalid branch: %s\n%s' % (branch, reason)
+
+ super(InvalidBranch, self).__init__(message)
+
+ self.branch = branch
+
+
+class LocalChanges:
+ """Change information for local work."""
+ def __init__(self, args): # type: (TestConfig) -> None
+ self.args = args
+ self.git = Git()
+
+ self.current_branch = self.git.get_branch()
+
+ if self.is_official_branch(self.current_branch):
+ raise InvalidBranch(branch=self.current_branch,
+ reason='Current branch is not a feature branch.')
+
+ self.fork_branch = None
+ self.fork_point = None
+
+ self.local_branches = sorted(self.git.get_branches())
+ self.official_branches = sorted([b for b in self.local_branches if self.is_official_branch(b)])
+
+ for self.fork_branch in self.official_branches:
+ try:
+ self.fork_point = self.git.get_branch_fork_point(self.fork_branch)
+ break
+ except SubprocessError:
+ pass
+
+ if self.fork_point is None:
+ raise ApplicationError('Unable to auto-detect fork branch and fork point.')
+
+ # tracked files (including unchanged)
+ self.tracked = sorted(self.git.get_file_names(['--cached']))
+ # untracked files (except ignored)
+ self.untracked = sorted(self.git.get_file_names(['--others', '--exclude-standard']))
+ # tracked changes (including deletions) committed since the branch was forked
+ self.committed = sorted(self.git.get_diff_names([self.fork_point, 'HEAD']))
+ # tracked changes (including deletions) which are staged
+ self.staged = sorted(self.git.get_diff_names(['--cached']))
+ # tracked changes (including deletions) which are not staged
+ self.unstaged = sorted(self.git.get_diff_names([]))
+ # diff of all tracked files from fork point to working copy
+ self.diff = self.git.get_diff([self.fork_point])
+
+ def is_official_branch(self, name): # type: (str) -> bool
+ """Return True if the given branch name an official branch for development or releases."""
+ if self.args.base_branch:
+ return name == self.args.base_branch
+
+ if name == 'devel':
+ return True
+
+ if re.match(r'^stable-[0-9]+\.[0-9]+$', name):
+ return True
+
+ return False
diff --git a/test/lib/ansible_test/_internal/ci/shippable.py b/test/lib/ansible_test/_internal/ci/shippable.py
new file mode 100644
index 00000000..f9f0a192
--- /dev/null
+++ b/test/lib/ansible_test/_internal/ci/shippable.py
@@ -0,0 +1,269 @@
+"""Support code for working with Shippable."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+import time
+
+from .. import types as t
+
+from ..config import (
+ CommonConfig,
+ TestConfig,
+)
+
+from ..git import (
+ Git,
+)
+
+from ..http import (
+ HttpClient,
+ urlencode,
+)
+
+from ..util import (
+ ApplicationError,
+ display,
+ MissingEnvironmentVariable,
+ SubprocessError,
+)
+
+from . import (
+ AuthContext,
+ ChangeDetectionNotSupported,
+ CIProvider,
+ OpenSSLAuthHelper,
+)
+
+
+CODE = 'shippable'
+
+
+class Shippable(CIProvider):
+ """CI provider implementation for Shippable."""
+ def __init__(self):
+ self.auth = ShippableAuthHelper()
+
+ @staticmethod
+ def is_supported(): # type: () -> bool
+ """Return True if this provider is supported in the current running environment."""
+ return os.environ.get('SHIPPABLE') == 'true'
+
+ @property
+ def code(self): # type: () -> str
+ """Return a unique code representing this provider."""
+ return CODE
+
+ @property
+ def name(self): # type: () -> str
+ """Return descriptive name for this provider."""
+ return 'Shippable'
+
+ def generate_resource_prefix(self): # type: () -> str
+ """Return a resource prefix specific to this CI provider."""
+ try:
+ prefix = 'shippable-%s-%s' % (
+ os.environ['SHIPPABLE_BUILD_NUMBER'],
+ os.environ['SHIPPABLE_JOB_NUMBER'],
+ )
+ except KeyError as ex:
+ raise MissingEnvironmentVariable(name=ex.args[0])
+
+ return prefix
+
+ def get_base_branch(self): # type: () -> str
+ """Return the base branch or an empty string."""
+ base_branch = os.environ.get('BASE_BRANCH')
+
+ if base_branch:
+ base_branch = 'origin/%s' % base_branch
+
+ return base_branch or ''
+
+ def detect_changes(self, args): # type: (TestConfig) -> t.Optional[t.List[str]]
+ """Initialize change detection."""
+ result = ShippableChanges(args)
+
+ if result.is_pr:
+ job_type = 'pull request'
+ elif result.is_tag:
+ job_type = 'tag'
+ else:
+ job_type = 'merge commit'
+
+ display.info('Processing %s for branch %s commit %s' % (job_type, result.branch, result.commit))
+
+ if not args.metadata.changes:
+ args.metadata.populate_changes(result.diff)
+
+ if result.paths is None:
+ # There are several likely causes of this:
+ # - First run on a new branch.
+ # - Too many pull requests passed since the last merge run passed.
+ display.warning('No successful commit found. All tests will be executed.')
+
+ return result.paths
+
+ def supports_core_ci_auth(self, context): # type: (AuthContext) -> bool
+ """Return True if Ansible Core CI is supported."""
+ return True
+
+ def prepare_core_ci_auth(self, context): # type: (AuthContext) -> t.Dict[str, t.Any]
+ """Return authentication details for Ansible Core CI."""
+ try:
+ request = dict(
+ run_id=os.environ['SHIPPABLE_BUILD_ID'],
+ job_number=int(os.environ['SHIPPABLE_JOB_NUMBER']),
+ )
+ except KeyError as ex:
+ raise MissingEnvironmentVariable(name=ex.args[0])
+
+ self.auth.sign_request(request)
+
+ auth = dict(
+ shippable=request,
+ )
+
+ return auth
+
+ def get_git_details(self, args): # type: (CommonConfig) -> t.Optional[t.Dict[str, t.Any]]
+ """Return details about git in the current environment."""
+ commit = os.environ.get('COMMIT')
+ base_commit = os.environ.get('BASE_COMMIT')
+
+ details = dict(
+ base_commit=base_commit,
+ commit=commit,
+ merged_commit=self._get_merged_commit(args, commit),
+ )
+
+ return details
+
+ # noinspection PyUnusedLocal
+ def _get_merged_commit(self, args, commit): # type: (CommonConfig, str) -> t.Optional[str] # pylint: disable=unused-argument
+ """Find the merged commit that should be present."""
+ if not commit:
+ return None
+
+ git = Git()
+
+ try:
+ show_commit = git.run_git(['show', '--no-patch', '--no-abbrev', commit])
+ except SubprocessError as ex:
+ # This should only fail for pull requests where the commit does not exist.
+ # Merge runs would fail much earlier when attempting to checkout the commit.
+ raise ApplicationError('Commit %s was not found:\n\n%s\n\n'
+ 'GitHub may not have fully replicated the commit across their infrastructure.\n'
+ 'It is also possible the commit was removed by a force push between job creation and execution.\n'
+ 'Find the latest run for the pull request and restart failed jobs as needed.'
+ % (commit, ex.stderr.strip()))
+
+ head_commit = git.run_git(['show', '--no-patch', '--no-abbrev', 'HEAD'])
+
+ if show_commit == head_commit:
+ # Commit is HEAD, so this is not a pull request or the base branch for the pull request is up-to-date.
+ return None
+
+ match_merge = re.search(r'^Merge: (?P<parents>[0-9a-f]{40} [0-9a-f]{40})$', head_commit, flags=re.MULTILINE)
+
+ if not match_merge:
+ # The most likely scenarios resulting in a failure here are:
+ # A new run should or does supersede this job, but it wasn't cancelled in time.
+ # A job was superseded and then later restarted.
+ raise ApplicationError('HEAD is not commit %s or a merge commit:\n\n%s\n\n'
+ 'This job has likely been superseded by another run due to additional commits being pushed.\n'
+ 'Find the latest run for the pull request and restart failed jobs as needed.'
+ % (commit, head_commit.strip()))
+
+ parents = set(match_merge.group('parents').split(' '))
+
+ if len(parents) != 2:
+ raise ApplicationError('HEAD is a %d-way octopus merge.' % len(parents))
+
+ if commit not in parents:
+ raise ApplicationError('Commit %s is not a parent of HEAD.' % commit)
+
+ parents.remove(commit)
+
+ last_commit = parents.pop()
+
+ return last_commit
+
+
+class ShippableAuthHelper(OpenSSLAuthHelper):
+ """
+ Authentication helper for Shippable.
+ Based on OpenSSL since cryptography is not provided by the default Shippable environment.
+ """
+ def publish_public_key(self, public_key_pem): # type: (str) -> None
+ """Publish the given public key."""
+ # display the public key as a single line to avoid mangling such as when prefixing each line with a timestamp
+ display.info(public_key_pem.replace('\n', ' '))
+ # allow time for logs to become available to reduce repeated API calls
+ time.sleep(3)
+
+
+class ShippableChanges:
+ """Change information for Shippable build."""
+ def __init__(self, args): # type: (TestConfig) -> None
+ self.args = args
+ self.git = Git()
+
+ try:
+ self.branch = os.environ['BRANCH']
+ self.is_pr = os.environ['IS_PULL_REQUEST'] == 'true'
+ self.is_tag = os.environ['IS_GIT_TAG'] == 'true'
+ self.commit = os.environ['COMMIT']
+ self.project_id = os.environ['PROJECT_ID']
+ self.commit_range = os.environ['SHIPPABLE_COMMIT_RANGE']
+ except KeyError as ex:
+ raise MissingEnvironmentVariable(name=ex.args[0])
+
+ if self.is_tag:
+ raise ChangeDetectionNotSupported('Change detection is not supported for tags.')
+
+ if self.is_pr:
+ self.paths = sorted(self.git.get_diff_names([self.commit_range]))
+ self.diff = self.git.get_diff([self.commit_range])
+ else:
+ commits = self.get_successful_merge_run_commits(self.project_id, self.branch)
+ last_successful_commit = self.get_last_successful_commit(commits)
+
+ if last_successful_commit:
+ self.paths = sorted(self.git.get_diff_names([last_successful_commit, self.commit]))
+ self.diff = self.git.get_diff([last_successful_commit, self.commit])
+ else:
+ # first run for branch
+ self.paths = None # act as though change detection not enabled, do not filter targets
+ self.diff = []
+
+ def get_successful_merge_run_commits(self, project_id, branch): # type: (str, str) -> t.Set[str]
+ """Return a set of recent successsful merge commits from Shippable for the given project and branch."""
+ parameters = dict(
+ isPullRequest='false',
+ projectIds=project_id,
+ branch=branch,
+ )
+
+ url = 'https://api.shippable.com/runs?%s' % urlencode(parameters)
+
+ http = HttpClient(self.args, always=True)
+ response = http.get(url)
+ result = response.json()
+
+ if 'id' in result and result['id'] == 4004:
+ # most likely due to a private project, which returns an HTTP 200 response with JSON
+ display.warning('Unable to find project. Cannot determine changes. All tests will be executed.')
+ return set()
+
+ commits = set(run['commitSha'] for run in result if run['statusCode'] == 30)
+
+ return commits
+
+ def get_last_successful_commit(self, successful_commits): # type: (t.Set[str]) -> t.Optional[str]
+ """Return the last successful commit from git history that is found in the given commit list, or None."""
+ commit_history = self.git.get_rev_list(max_count=100)
+ ordered_successful_commits = [commit for commit in commit_history if commit in successful_commits]
+ last_successful_commit = ordered_successful_commits[0] if ordered_successful_commits else None
+ return last_successful_commit
diff --git a/test/lib/ansible_test/_internal/classification.py b/test/lib/ansible_test/_internal/classification.py
new file mode 100644
index 00000000..bfe6ccc3
--- /dev/null
+++ b/test/lib/ansible_test/_internal/classification.py
@@ -0,0 +1,975 @@
+"""Classify changes in Ansible code."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import collections
+import os
+import re
+import time
+
+from . import types as t
+
+from .target import (
+ walk_module_targets,
+ walk_integration_targets,
+ walk_units_targets,
+ walk_compile_targets,
+ walk_sanity_targets,
+ load_integration_prefixes,
+ analyze_integration_target_dependencies,
+)
+
+from .util import (
+ display,
+ is_subdir,
+)
+
+from .import_analysis import (
+ get_python_module_utils_imports,
+ get_python_module_utils_name,
+)
+
+from .csharp_import_analysis import (
+ get_csharp_module_utils_imports,
+ get_csharp_module_utils_name,
+)
+
+from .powershell_import_analysis import (
+ get_powershell_module_utils_imports,
+ get_powershell_module_utils_name,
+)
+
+from .config import (
+ TestConfig,
+ IntegrationConfig,
+)
+
+from .metadata import (
+ ChangeDescription,
+)
+
+from .data import (
+ data_context,
+)
+
+FOCUSED_TARGET = '__focused__'
+
+
+def categorize_changes(args, paths, verbose_command=None):
+ """
+ :type args: TestConfig
+ :type paths: list[str]
+ :type verbose_command: str
+ :rtype: ChangeDescription
+ """
+ mapper = PathMapper(args)
+
+ commands = {
+ 'sanity': set(),
+ 'units': set(),
+ 'integration': set(),
+ 'windows-integration': set(),
+ 'network-integration': set(),
+ }
+
+ focused_commands = collections.defaultdict(set)
+
+ deleted_paths = set()
+ original_paths = set()
+ additional_paths = set()
+ no_integration_paths = set()
+
+ for path in paths:
+ if not os.path.exists(path):
+ deleted_paths.add(path)
+ continue
+
+ original_paths.add(path)
+
+ dependent_paths = mapper.get_dependent_paths(path)
+
+ if not dependent_paths:
+ continue
+
+ display.info('Expanded "%s" to %d dependent file(s):' % (path, len(dependent_paths)), verbosity=2)
+
+ for dependent_path in dependent_paths:
+ display.info(dependent_path, verbosity=2)
+ additional_paths.add(dependent_path)
+
+ additional_paths -= set(paths) # don't count changed paths as additional paths
+
+ if additional_paths:
+ display.info('Expanded %d changed file(s) into %d additional dependent file(s).' % (len(paths), len(additional_paths)))
+ paths = sorted(set(paths) | additional_paths)
+
+ display.info('Mapping %d changed file(s) to tests.' % len(paths))
+
+ none_count = 0
+
+ for path in paths:
+ tests = mapper.classify(path)
+
+ if tests is None:
+ focused_target = False
+
+ display.info('%s -> all' % path, verbosity=1)
+ tests = all_tests(args) # not categorized, run all tests
+ display.warning('Path not categorized: %s' % path)
+ else:
+ focused_target = tests.pop(FOCUSED_TARGET, False) and path in original_paths
+
+ tests = dict((key, value) for key, value in tests.items() if value)
+
+ if focused_target and not any('integration' in command for command in tests):
+ no_integration_paths.add(path) # path triggers no integration tests
+
+ if verbose_command:
+ result = '%s: %s' % (verbose_command, tests.get(verbose_command) or 'none')
+
+ # identify targeted integration tests (those which only target a single integration command)
+ if 'integration' in verbose_command and tests.get(verbose_command):
+ if not any('integration' in command for command in tests if command != verbose_command):
+ if focused_target:
+ result += ' (focused)'
+
+ result += ' (targeted)'
+ else:
+ result = '%s' % tests
+
+ if not tests.get(verbose_command):
+ # minimize excessive output from potentially thousands of files which do not trigger tests
+ none_count += 1
+ verbosity = 2
+ else:
+ verbosity = 1
+
+ if args.verbosity >= verbosity:
+ display.info('%s -> %s' % (path, result), verbosity=1)
+
+ for command, target in tests.items():
+ commands[command].add(target)
+
+ if focused_target:
+ focused_commands[command].add(target)
+
+ if none_count > 0 and args.verbosity < 2:
+ display.notice('Omitted %d file(s) that triggered no tests.' % none_count)
+
+ for command in commands:
+ commands[command].discard('none')
+
+ if any(target == 'all' for target in commands[command]):
+ commands[command] = set(['all'])
+
+ commands = dict((c, sorted(commands[c])) for c in commands if commands[c])
+ focused_commands = dict((c, sorted(focused_commands[c])) for c in focused_commands)
+
+ for command in commands:
+ if commands[command] == ['all']:
+ commands[command] = [] # changes require testing all targets, do not filter targets
+
+ changes = ChangeDescription()
+ changes.command = verbose_command
+ changes.changed_paths = sorted(original_paths)
+ changes.deleted_paths = sorted(deleted_paths)
+ changes.regular_command_targets = commands
+ changes.focused_command_targets = focused_commands
+ changes.no_integration_paths = sorted(no_integration_paths)
+
+ return changes
+
+
+class PathMapper:
+ """Map file paths to test commands and targets."""
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ self.args = args
+ self.integration_all_target = get_integration_all_target(self.args)
+
+ self.integration_targets = list(walk_integration_targets())
+ self.module_targets = list(walk_module_targets())
+ self.compile_targets = list(walk_compile_targets())
+ self.units_targets = list(walk_units_targets())
+ self.sanity_targets = list(walk_sanity_targets())
+ self.powershell_targets = [target for target in self.sanity_targets if os.path.splitext(target.path)[1] in ('.ps1', '.psm1')]
+ self.csharp_targets = [target for target in self.sanity_targets if os.path.splitext(target.path)[1] == '.cs']
+
+ self.units_modules = set(target.module for target in self.units_targets if target.module)
+ self.units_paths = set(a for target in self.units_targets for a in target.aliases)
+ self.sanity_paths = set(target.path for target in self.sanity_targets)
+
+ self.module_names_by_path = dict((target.path, target.module) for target in self.module_targets)
+ self.integration_targets_by_name = dict((target.name, target) for target in self.integration_targets)
+ self.integration_targets_by_alias = dict((a, target) for target in self.integration_targets for a in target.aliases)
+
+ self.posix_integration_by_module = dict((m, target.name) for target in self.integration_targets
+ if 'posix/' in target.aliases for m in target.modules)
+ self.windows_integration_by_module = dict((m, target.name) for target in self.integration_targets
+ if 'windows/' in target.aliases for m in target.modules)
+ self.network_integration_by_module = dict((m, target.name) for target in self.integration_targets
+ if 'network/' in target.aliases for m in target.modules)
+
+ self.prefixes = load_integration_prefixes()
+ self.integration_dependencies = analyze_integration_target_dependencies(self.integration_targets)
+
+ self.python_module_utils_imports = {} # populated on first use to reduce overhead when not needed
+ self.powershell_module_utils_imports = {} # populated on first use to reduce overhead when not needed
+ self.csharp_module_utils_imports = {} # populated on first use to reduce overhead when not needed
+
+ self.paths_to_dependent_targets = {}
+
+ for target in self.integration_targets:
+ for path in target.needs_file:
+ if path not in self.paths_to_dependent_targets:
+ self.paths_to_dependent_targets[path] = set()
+
+ self.paths_to_dependent_targets[path].add(target)
+
+ def get_dependent_paths(self, path):
+ """
+ :type path: str
+ :rtype: list[str]
+ """
+ unprocessed_paths = set(self.get_dependent_paths_non_recursive(path))
+ paths = set()
+
+ while unprocessed_paths:
+ queued_paths = list(unprocessed_paths)
+ paths |= unprocessed_paths
+ unprocessed_paths = set()
+
+ for queued_path in queued_paths:
+ new_paths = self.get_dependent_paths_non_recursive(queued_path)
+
+ for new_path in new_paths:
+ if new_path not in paths:
+ unprocessed_paths.add(new_path)
+
+ return sorted(paths)
+
+ def get_dependent_paths_non_recursive(self, path):
+ """
+ :type path: str
+ :rtype: list[str]
+ """
+ paths = self.get_dependent_paths_internal(path)
+ paths += [target.path + '/' for target in self.paths_to_dependent_targets.get(path, set())]
+ paths = sorted(set(paths))
+
+ return paths
+
+ def get_dependent_paths_internal(self, path):
+ """
+ :type path: str
+ :rtype: list[str]
+ """
+ ext = os.path.splitext(os.path.split(path)[1])[1]
+
+ if is_subdir(path, data_context().content.module_utils_path):
+ if ext == '.py':
+ return self.get_python_module_utils_usage(path)
+
+ if ext == '.psm1':
+ return self.get_powershell_module_utils_usage(path)
+
+ if ext == '.cs':
+ return self.get_csharp_module_utils_usage(path)
+
+ if is_subdir(path, data_context().content.integration_targets_path):
+ return self.get_integration_target_usage(path)
+
+ return []
+
+ def get_python_module_utils_usage(self, path):
+ """
+ :type path: str
+ :rtype: list[str]
+ """
+ if not self.python_module_utils_imports:
+ display.info('Analyzing python module_utils imports...')
+ before = time.time()
+ self.python_module_utils_imports = get_python_module_utils_imports(self.compile_targets)
+ after = time.time()
+ display.info('Processed %d python module_utils in %d second(s).' % (len(self.python_module_utils_imports), after - before))
+
+ name = get_python_module_utils_name(path)
+
+ return sorted(self.python_module_utils_imports[name])
+
+ def get_powershell_module_utils_usage(self, path):
+ """
+ :type path: str
+ :rtype: list[str]
+ """
+ if not self.powershell_module_utils_imports:
+ display.info('Analyzing powershell module_utils imports...')
+ before = time.time()
+ self.powershell_module_utils_imports = get_powershell_module_utils_imports(self.powershell_targets)
+ after = time.time()
+ display.info('Processed %d powershell module_utils in %d second(s).' % (len(self.powershell_module_utils_imports), after - before))
+
+ name = get_powershell_module_utils_name(path)
+
+ return sorted(self.powershell_module_utils_imports[name])
+
+ def get_csharp_module_utils_usage(self, path):
+ """
+ :type path: str
+ :rtype: list[str]
+ """
+ if not self.csharp_module_utils_imports:
+ display.info('Analyzing C# module_utils imports...')
+ before = time.time()
+ self.csharp_module_utils_imports = get_csharp_module_utils_imports(self.powershell_targets, self.csharp_targets)
+ after = time.time()
+ display.info('Processed %d C# module_utils in %d second(s).' % (len(self.csharp_module_utils_imports), after - before))
+
+ name = get_csharp_module_utils_name(path)
+
+ return sorted(self.csharp_module_utils_imports[name])
+
+ def get_integration_target_usage(self, path):
+ """
+ :type path: str
+ :rtype: list[str]
+ """
+ target_name = path.split('/')[3]
+ dependents = [os.path.join(data_context().content.integration_targets_path, target) + os.path.sep
+ for target in sorted(self.integration_dependencies.get(target_name, set()))]
+
+ return dependents
+
+ def classify(self, path):
+ """
+ :type path: str
+ :rtype: dict[str, str] | None
+ """
+ result = self._classify(path)
+
+ # run all tests when no result given
+ if result is None:
+ return None
+
+ # run sanity on path unless result specified otherwise
+ if path in self.sanity_paths and 'sanity' not in result:
+ result['sanity'] = path
+
+ return result
+
+ def _classify(self, path): # type: (str) -> t.Optional[t.Dict[str, str]]
+ """Return the classification for the given path."""
+ if data_context().content.is_ansible:
+ return self._classify_ansible(path)
+
+ if data_context().content.collection:
+ return self._classify_collection(path)
+
+ return None
+
+ def _classify_common(self, path): # type: (str) -> t.Optional[t.Dict[str, str]]
+ """Return the classification for the given path using rules common to all layouts."""
+ dirname = os.path.dirname(path)
+ filename = os.path.basename(path)
+ name, ext = os.path.splitext(filename)
+
+ minimal = {}
+
+ if os.path.sep not in path:
+ if filename in (
+ 'azure-pipelines.yml',
+ 'shippable.yml',
+ ):
+ return all_tests(self.args) # test infrastructure, run all tests
+
+ if is_subdir(path, '.azure-pipelines'):
+ return all_tests(self.args) # test infrastructure, run all tests
+
+ if is_subdir(path, '.github'):
+ return minimal
+
+ if is_subdir(path, data_context().content.integration_targets_path):
+ if not os.path.exists(path):
+ return minimal
+
+ target = self.integration_targets_by_name.get(path.split('/')[3])
+
+ if not target:
+ display.warning('Unexpected non-target found: %s' % path)
+ return minimal
+
+ if 'hidden/' in target.aliases:
+ return minimal # already expanded using get_dependent_paths
+
+ return {
+ 'integration': target.name if 'posix/' in target.aliases else None,
+ 'windows-integration': target.name if 'windows/' in target.aliases else None,
+ 'network-integration': target.name if 'network/' in target.aliases else None,
+ FOCUSED_TARGET: True,
+ }
+
+ if is_subdir(path, data_context().content.integration_path):
+ if dirname == data_context().content.integration_path:
+ for command in (
+ 'integration',
+ 'windows-integration',
+ 'network-integration',
+ ):
+ if name == command and ext == '.cfg':
+ return {
+ command: self.integration_all_target,
+ }
+
+ if name == command + '.requirements' and ext == '.txt':
+ return {
+ command: self.integration_all_target,
+ }
+
+ return {
+ 'integration': self.integration_all_target,
+ 'windows-integration': self.integration_all_target,
+ 'network-integration': self.integration_all_target,
+ }
+
+ if is_subdir(path, data_context().content.sanity_path):
+ return {
+ 'sanity': 'all', # test infrastructure, run all sanity checks
+ }
+
+ if is_subdir(path, data_context().content.unit_path):
+ if path in self.units_paths:
+ return {
+ 'units': path,
+ }
+
+ # changes to files which are not unit tests should trigger tests from the nearest parent directory
+
+ test_path = os.path.dirname(path)
+
+ while test_path:
+ if test_path + '/' in self.units_paths:
+ return {
+ 'units': test_path + '/',
+ }
+
+ test_path = os.path.dirname(test_path)
+
+ if is_subdir(path, data_context().content.module_path):
+ module_name = self.module_names_by_path.get(path)
+
+ if module_name:
+ return {
+ 'units': module_name if module_name in self.units_modules else None,
+ 'integration': self.posix_integration_by_module.get(module_name) if ext == '.py' else None,
+ 'windows-integration': self.windows_integration_by_module.get(module_name) if ext in ['.cs', '.ps1'] else None,
+ 'network-integration': self.network_integration_by_module.get(module_name),
+ FOCUSED_TARGET: True,
+ }
+
+ return minimal
+
+ if is_subdir(path, data_context().content.module_utils_path):
+ if ext == '.cs':
+ return minimal # already expanded using get_dependent_paths
+
+ if ext == '.psm1':
+ return minimal # already expanded using get_dependent_paths
+
+ if ext == '.py':
+ return minimal # already expanded using get_dependent_paths
+
+ if is_subdir(path, data_context().content.plugin_paths['action']):
+ if ext == '.py':
+ if name.startswith('net_'):
+ network_target = 'network/.*_%s' % name[4:]
+
+ if any(re.search(r'^%s$' % network_target, alias) for alias in self.integration_targets_by_alias):
+ return {
+ 'network-integration': network_target,
+ 'units': 'all',
+ }
+
+ return {
+ 'network-integration': self.integration_all_target,
+ 'units': 'all',
+ }
+
+ if self.prefixes.get(name) == 'network':
+ network_platform = name
+ elif name.endswith('_config') and self.prefixes.get(name[:-7]) == 'network':
+ network_platform = name[:-7]
+ elif name.endswith('_template') and self.prefixes.get(name[:-9]) == 'network':
+ network_platform = name[:-9]
+ else:
+ network_platform = None
+
+ if network_platform:
+ network_target = 'network/%s/' % network_platform
+
+ if network_target in self.integration_targets_by_alias:
+ return {
+ 'network-integration': network_target,
+ 'units': 'all',
+ }
+
+ display.warning('Integration tests for "%s" not found.' % network_target, unique=True)
+
+ return {
+ 'units': 'all',
+ }
+
+ if is_subdir(path, data_context().content.plugin_paths['connection']):
+ if name == '__init__':
+ return {
+ 'integration': self.integration_all_target,
+ 'windows-integration': self.integration_all_target,
+ 'network-integration': self.integration_all_target,
+ 'units': 'test/units/plugins/connection/',
+ }
+
+ units_path = 'test/units/plugins/connection/test_%s.py' % name
+
+ if units_path not in self.units_paths:
+ units_path = None
+
+ integration_name = 'connection_%s' % name
+
+ if integration_name not in self.integration_targets_by_name:
+ integration_name = None
+
+ windows_integration_name = 'connection_windows_%s' % name
+
+ if windows_integration_name not in self.integration_targets_by_name:
+ windows_integration_name = None
+
+ # entire integration test commands depend on these connection plugins
+
+ if name in ['winrm', 'psrp']:
+ return {
+ 'windows-integration': self.integration_all_target,
+ 'units': units_path,
+ }
+
+ if name == 'local':
+ return {
+ 'integration': self.integration_all_target,
+ 'network-integration': self.integration_all_target,
+ 'units': units_path,
+ }
+
+ if name == 'network_cli':
+ return {
+ 'network-integration': self.integration_all_target,
+ 'units': units_path,
+ }
+
+ if name == 'paramiko_ssh':
+ return {
+ 'integration': integration_name,
+ 'network-integration': self.integration_all_target,
+ 'units': units_path,
+ }
+
+ # other connection plugins have isolated integration and unit tests
+
+ return {
+ 'integration': integration_name,
+ 'windows-integration': windows_integration_name,
+ 'units': units_path,
+ }
+
+ if is_subdir(path, data_context().content.plugin_paths['doc_fragments']):
+ return {
+ 'sanity': 'all',
+ }
+
+ if is_subdir(path, data_context().content.plugin_paths['inventory']):
+ if name == '__init__':
+ return all_tests(self.args) # broad impact, run all tests
+
+ # These inventory plugins are enabled by default (see INVENTORY_ENABLED).
+ # Without dedicated integration tests for these we must rely on the incidental coverage from other tests.
+ test_all = [
+ 'host_list',
+ 'script',
+ 'yaml',
+ 'ini',
+ 'auto',
+ ]
+
+ if name in test_all:
+ posix_integration_fallback = get_integration_all_target(self.args)
+ else:
+ posix_integration_fallback = None
+
+ target = self.integration_targets_by_name.get('inventory_%s' % name)
+ units_path = 'test/units/plugins/inventory/test_%s.py' % name
+
+ if units_path not in self.units_paths:
+ units_path = None
+
+ return {
+ 'integration': target.name if target and 'posix/' in target.aliases else posix_integration_fallback,
+ 'windows-integration': target.name if target and 'windows/' in target.aliases else None,
+ 'network-integration': target.name if target and 'network/' in target.aliases else None,
+ 'units': units_path,
+ FOCUSED_TARGET: target is not None,
+ }
+
+ if is_subdir(path, data_context().content.plugin_paths['filter']):
+ return self._simple_plugin_tests('filter', name)
+
+ if is_subdir(path, data_context().content.plugin_paths['lookup']):
+ return self._simple_plugin_tests('lookup', name)
+
+ if (is_subdir(path, data_context().content.plugin_paths['terminal']) or
+ is_subdir(path, data_context().content.plugin_paths['cliconf']) or
+ is_subdir(path, data_context().content.plugin_paths['netconf'])):
+ if ext == '.py':
+ if name in self.prefixes and self.prefixes[name] == 'network':
+ network_target = 'network/%s/' % name
+
+ if network_target in self.integration_targets_by_alias:
+ return {
+ 'network-integration': network_target,
+ 'units': 'all',
+ }
+
+ display.warning('Integration tests for "%s" not found.' % network_target, unique=True)
+
+ return {
+ 'units': 'all',
+ }
+
+ return {
+ 'network-integration': self.integration_all_target,
+ 'units': 'all',
+ }
+
+ if is_subdir(path, data_context().content.plugin_paths['test']):
+ return self._simple_plugin_tests('test', name)
+
+ return None
+
+ def _classify_collection(self, path): # type: (str) -> t.Optional[t.Dict[str, str]]
+ """Return the classification for the given path using rules specific to collections."""
+ result = self._classify_common(path)
+
+ if result is not None:
+ return result
+
+ filename = os.path.basename(path)
+ dummy, ext = os.path.splitext(filename)
+
+ minimal = {}
+
+ if path.startswith('changelogs/'):
+ return minimal
+
+ if path.startswith('docs/'):
+ return minimal
+
+ if '/' not in path:
+ if path in (
+ '.gitignore',
+ 'COPYING',
+ 'LICENSE',
+ 'Makefile',
+ ):
+ return minimal
+
+ if ext in (
+ '.in',
+ '.md',
+ '.rst',
+ '.toml',
+ '.txt',
+ ):
+ return minimal
+
+ return None
+
+ def _classify_ansible(self, path): # type: (str) -> t.Optional[t.Dict[str, str]]
+ """Return the classification for the given path using rules specific to Ansible."""
+ if path.startswith('test/units/compat/'):
+ return {
+ 'units': 'test/units/',
+ }
+
+ result = self._classify_common(path)
+
+ if result is not None:
+ return result
+
+ dirname = os.path.dirname(path)
+ filename = os.path.basename(path)
+ name, ext = os.path.splitext(filename)
+
+ minimal = {}
+
+ if path.startswith('bin/'):
+ return all_tests(self.args) # broad impact, run all tests
+
+ if path.startswith('changelogs/'):
+ return minimal
+
+ if path.startswith('contrib/'):
+ return {
+ 'units': 'test/units/contrib/'
+ }
+
+ if path.startswith('docs/'):
+ return minimal
+
+ if path.startswith('examples/'):
+ if path == 'examples/scripts/ConfigureRemotingForAnsible.ps1':
+ return {
+ 'windows-integration': 'connection_winrm',
+ }
+
+ return minimal
+
+ if path.startswith('hacking/'):
+ return minimal
+
+ if path.startswith('lib/ansible/executor/powershell/'):
+ units_path = 'test/units/executor/powershell/'
+
+ if units_path not in self.units_paths:
+ units_path = None
+
+ return {
+ 'windows-integration': self.integration_all_target,
+ 'units': units_path,
+ }
+
+ if path.startswith('lib/ansible/'):
+ return all_tests(self.args) # broad impact, run all tests
+
+ if path.startswith('licenses/'):
+ return minimal
+
+ if path.startswith('packaging/'):
+ if path.startswith('packaging/requirements/'):
+ if name.startswith('requirements-') and ext == '.txt':
+ component = name.split('-', 1)[1]
+
+ candidates = (
+ 'cloud/%s/' % component,
+ )
+
+ for candidate in candidates:
+ if candidate in self.integration_targets_by_alias:
+ return {
+ 'integration': candidate,
+ }
+
+ return all_tests(self.args) # broad impact, run all tests
+
+ return minimal
+
+ if path.startswith('test/ansible_test/'):
+ return minimal # these tests are not invoked from ansible-test
+
+ if path.startswith('test/lib/ansible_test/config/'):
+ if name.startswith('cloud-config-'):
+ # noinspection PyTypeChecker
+ cloud_target = 'cloud/%s/' % name.split('-')[2].split('.')[0]
+
+ if cloud_target in self.integration_targets_by_alias:
+ return {
+ 'integration': cloud_target,
+ }
+
+ if path.startswith('test/lib/ansible_test/_data/completion/'):
+ if path == 'test/lib/ansible_test/_data/completion/docker.txt':
+ return all_tests(self.args, force=True) # force all tests due to risk of breaking changes in new test environment
+
+ if path.startswith('test/lib/ansible_test/_internal/cloud/'):
+ cloud_target = 'cloud/%s/' % name
+
+ if cloud_target in self.integration_targets_by_alias:
+ return {
+ 'integration': cloud_target,
+ }
+
+ return all_tests(self.args) # test infrastructure, run all tests
+
+ if path.startswith('test/lib/ansible_test/_internal/sanity/'):
+ return {
+ 'sanity': 'all', # test infrastructure, run all sanity checks
+ 'integration': 'ansible-test', # run ansible-test self tests
+ }
+
+ if path.startswith('test/lib/ansible_test/_data/sanity/'):
+ return {
+ 'sanity': 'all', # test infrastructure, run all sanity checks
+ 'integration': 'ansible-test', # run ansible-test self tests
+ }
+
+ if path.startswith('test/lib/ansible_test/_internal/units/'):
+ return {
+ 'units': 'all', # test infrastructure, run all unit tests
+ 'integration': 'ansible-test', # run ansible-test self tests
+ }
+
+ if path.startswith('test/lib/ansible_test/_data/units/'):
+ return {
+ 'units': 'all', # test infrastructure, run all unit tests
+ 'integration': 'ansible-test', # run ansible-test self tests
+ }
+
+ if path.startswith('test/lib/ansible_test/_data/pytest/'):
+ return {
+ 'units': 'all', # test infrastructure, run all unit tests
+ 'integration': 'ansible-test', # run ansible-test self tests
+ }
+
+ if path.startswith('test/lib/ansible_test/_data/requirements/'):
+ if name in (
+ 'integration',
+ 'network-integration',
+ 'windows-integration',
+ ):
+ return {
+ name: self.integration_all_target,
+ }
+
+ if name in (
+ 'sanity',
+ 'units',
+ ):
+ return {
+ name: 'all',
+ }
+
+ if name.startswith('integration.cloud.'):
+ cloud_target = 'cloud/%s/' % name.split('.')[2]
+
+ if cloud_target in self.integration_targets_by_alias:
+ return {
+ 'integration': cloud_target,
+ }
+
+ if path.startswith('test/lib/'):
+ return all_tests(self.args) # test infrastructure, run all tests
+
+ if path.startswith('test/support/'):
+ return all_tests(self.args) # test infrastructure, run all tests
+
+ if path.startswith('test/utils/shippable/'):
+ if dirname == 'test/utils/shippable':
+ test_map = {
+ 'cloud.sh': 'integration:cloud/',
+ 'linux.sh': 'integration:all',
+ 'network.sh': 'network-integration:all',
+ 'remote.sh': 'integration:all',
+ 'sanity.sh': 'sanity:all',
+ 'units.sh': 'units:all',
+ 'windows.sh': 'windows-integration:all',
+ }
+
+ test_match = test_map.get(filename)
+
+ if test_match:
+ test_command, test_target = test_match.split(':')
+
+ return {
+ test_command: test_target,
+ }
+
+ cloud_target = 'cloud/%s/' % name
+
+ if cloud_target in self.integration_targets_by_alias:
+ return {
+ 'integration': cloud_target,
+ }
+
+ return all_tests(self.args) # test infrastructure, run all tests
+
+ if path.startswith('test/utils/'):
+ return minimal
+
+ if '/' not in path:
+ if path in (
+ '.gitattributes',
+ '.gitignore',
+ '.mailmap',
+ 'COPYING',
+ 'Makefile',
+ ):
+ return minimal
+
+ if path in (
+ 'setup.py',
+ ):
+ return all_tests(self.args) # broad impact, run all tests
+
+ if ext in (
+ '.in',
+ '.md',
+ '.rst',
+ '.toml',
+ '.txt',
+ ):
+ return minimal
+
+ return None # unknown, will result in fall-back to run all tests
+
+ def _simple_plugin_tests(self, plugin_type, plugin_name): # type: (str, str) -> t.Dict[str, t.Optional[str]]
+ """
+ Return tests for the given plugin type and plugin name.
+ This function is useful for plugin types which do not require special processing.
+ """
+ if plugin_name == '__init__':
+ return all_tests(self.args, True)
+
+ integration_target = self.integration_targets_by_name.get('%s_%s' % (plugin_type, plugin_name))
+
+ if integration_target:
+ integration_name = integration_target.name
+ else:
+ integration_name = None
+
+ units_path = os.path.join(data_context().content.unit_path, 'plugins', plugin_type, 'test_%s.py' % plugin_name)
+
+ if units_path not in self.units_paths:
+ units_path = None
+
+ return dict(
+ integration=integration_name,
+ units=units_path,
+ )
+
+
+def all_tests(args, force=False):
+ """
+ :type args: TestConfig
+ :type force: bool
+ :rtype: dict[str, str]
+ """
+ if force:
+ integration_all_target = 'all'
+ else:
+ integration_all_target = get_integration_all_target(args)
+
+ return {
+ 'sanity': 'all',
+ 'units': 'all',
+ 'integration': integration_all_target,
+ 'windows-integration': integration_all_target,
+ 'network-integration': integration_all_target,
+ }
+
+
+def get_integration_all_target(args):
+ """
+ :type args: TestConfig
+ :rtype: str
+ """
+ if isinstance(args, IntegrationConfig):
+ return args.changed_all_target
+
+ return 'all'
diff --git a/test/lib/ansible_test/_internal/cli.py b/test/lib/ansible_test/_internal/cli.py
new file mode 100644
index 00000000..c12b6488
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli.py
@@ -0,0 +1,1217 @@
+"""Test runner for all Ansible tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import errno
+import os
+import sys
+
+# This import should occur as early as possible.
+# It must occur before subprocess has been imported anywhere in the current process.
+from .init import (
+ CURRENT_RLIMIT_NOFILE,
+)
+
+from . import types as t
+
+from .util import (
+ ApplicationError,
+ display,
+ raw_command,
+ generate_pip_command,
+ read_lines_without_comments,
+ MAXFD,
+ ANSIBLE_TEST_DATA_ROOT,
+)
+
+from .delegation import (
+ check_delegation_args,
+ delegate,
+)
+
+from .executor import (
+ command_posix_integration,
+ command_network_integration,
+ command_windows_integration,
+ command_shell,
+ SUPPORTED_PYTHON_VERSIONS,
+ ApplicationWarning,
+ Delegate,
+ generate_pip_install,
+ check_startup,
+)
+
+from .config import (
+ PosixIntegrationConfig,
+ WindowsIntegrationConfig,
+ NetworkIntegrationConfig,
+ SanityConfig,
+ UnitsConfig,
+ ShellConfig,
+)
+
+from .env import (
+ EnvConfig,
+ command_env,
+ configure_timeout,
+)
+
+from .sanity import (
+ command_sanity,
+ sanity_init,
+ sanity_get_tests,
+)
+
+from .units import (
+ command_units,
+)
+
+from .target import (
+ find_target_completion,
+ walk_posix_integration_targets,
+ walk_network_integration_targets,
+ walk_windows_integration_targets,
+ walk_units_targets,
+ walk_sanity_targets,
+)
+
+from .core_ci import (
+ AWS_ENDPOINTS,
+)
+
+from .cloud import (
+ initialize_cloud_plugins,
+)
+
+from .data import (
+ data_context,
+)
+
+from .util_common import (
+ get_docker_completion,
+ get_network_completion,
+ get_remote_completion,
+ CommonConfig,
+)
+
+from .coverage.combine import (
+ command_coverage_combine,
+)
+
+from .coverage.erase import (
+ command_coverage_erase,
+)
+
+from .coverage.html import (
+ command_coverage_html,
+)
+
+from .coverage.report import (
+ command_coverage_report,
+ CoverageReportConfig,
+)
+
+from .coverage.xml import (
+ command_coverage_xml,
+)
+
+from .coverage.analyze.targets.generate import (
+ command_coverage_analyze_targets_generate,
+ CoverageAnalyzeTargetsGenerateConfig,
+)
+
+from .coverage.analyze.targets.expand import (
+ command_coverage_analyze_targets_expand,
+ CoverageAnalyzeTargetsExpandConfig,
+)
+
+from .coverage.analyze.targets.filter import (
+ command_coverage_analyze_targets_filter,
+ CoverageAnalyzeTargetsFilterConfig,
+)
+
+from .coverage.analyze.targets.combine import (
+ command_coverage_analyze_targets_combine,
+ CoverageAnalyzeTargetsCombineConfig,
+)
+
+from .coverage.analyze.targets.missing import (
+ command_coverage_analyze_targets_missing,
+ CoverageAnalyzeTargetsMissingConfig,
+)
+
+from .coverage import (
+ COVERAGE_GROUPS,
+ CoverageConfig,
+)
+
+if t.TYPE_CHECKING:
+ import argparse as argparse_module
+
+
+def main():
+ """Main program function."""
+ try:
+ os.chdir(data_context().content.root)
+ initialize_cloud_plugins()
+ sanity_init()
+ args = parse_args()
+ config = args.config(args) # type: CommonConfig
+ display.verbosity = config.verbosity
+ display.truncate = config.truncate
+ display.redact = config.redact
+ display.color = config.color
+ display.info_stderr = config.info_stderr
+ check_startup()
+ check_delegation_args(config)
+ configure_timeout(config)
+
+ display.info('RLIMIT_NOFILE: %s' % (CURRENT_RLIMIT_NOFILE,), verbosity=2)
+ display.info('MAXFD: %d' % MAXFD, verbosity=2)
+
+ try:
+ args.func(config)
+ delegate_args = None
+ except Delegate as ex:
+ # save delegation args for use once we exit the exception handler
+ delegate_args = (ex.exclude, ex.require, ex.integration_targets)
+
+ if delegate_args:
+ # noinspection PyTypeChecker
+ delegate(config, *delegate_args)
+
+ display.review_warnings()
+ except ApplicationWarning as ex:
+ display.warning(u'%s' % ex)
+ sys.exit(0)
+ except ApplicationError as ex:
+ display.error(u'%s' % ex)
+ sys.exit(1)
+ except KeyboardInterrupt:
+ sys.exit(2)
+ except IOError as ex:
+ if ex.errno == errno.EPIPE:
+ sys.exit(3)
+ raise
+
+
+def parse_args():
+ """Parse command line arguments."""
+ try:
+ import argparse
+ except ImportError:
+ if '--requirements' not in sys.argv:
+ raise
+ # install argparse without using constraints since pip may be too old to support them
+ # not using the ansible-test requirements file since this install is for sys.executable rather than the delegated python (which may be different)
+ # argparse has no special requirements, so upgrading pip is not required here
+ raw_command(generate_pip_install(generate_pip_command(sys.executable), '', packages=['argparse'], use_constraints=False))
+ import argparse
+
+ try:
+ import argcomplete
+ except ImportError:
+ argcomplete = None
+
+ if argcomplete:
+ epilog = 'Tab completion available using the "argcomplete" python package.'
+ else:
+ epilog = 'Install the "argcomplete" python package to enable tab completion.'
+
+ def key_value_type(value): # type: (str) -> t.Tuple[str, str]
+ """Wrapper around key_value."""
+ return key_value(argparse, value)
+
+ parser = argparse.ArgumentParser(epilog=epilog)
+
+ common = argparse.ArgumentParser(add_help=False)
+
+ common.add_argument('-e', '--explain',
+ action='store_true',
+ help='explain commands that would be executed')
+
+ common.add_argument('-v', '--verbose',
+ dest='verbosity',
+ action='count',
+ default=0,
+ help='display more output')
+
+ common.add_argument('--color',
+ metavar='COLOR',
+ nargs='?',
+ help='generate color output: %(choices)s',
+ choices=('yes', 'no', 'auto'),
+ const='yes',
+ default='auto')
+
+ common.add_argument('--debug',
+ action='store_true',
+ help='run ansible commands in debug mode')
+
+ # noinspection PyTypeChecker
+ common.add_argument('--truncate',
+ dest='truncate',
+ metavar='COLUMNS',
+ type=int,
+ default=display.columns,
+ help='truncate some long output (0=disabled) (default: auto)')
+
+ common.add_argument('--redact',
+ dest='redact',
+ action='store_true',
+ default=True,
+ help='redact sensitive values in output')
+
+ common.add_argument('--no-redact',
+ dest='redact',
+ action='store_false',
+ default=False,
+ help='show sensitive values in output')
+
+ common.add_argument('--check-python',
+ choices=SUPPORTED_PYTHON_VERSIONS,
+ help=argparse.SUPPRESS)
+
+ test = argparse.ArgumentParser(add_help=False, parents=[common])
+
+ test.add_argument('include',
+ metavar='TARGET',
+ nargs='*',
+ help='test the specified target').completer = complete_target
+
+ test.add_argument('--include',
+ metavar='TARGET',
+ action='append',
+ help='include the specified target').completer = complete_target
+
+ test.add_argument('--exclude',
+ metavar='TARGET',
+ action='append',
+ help='exclude the specified target').completer = complete_target
+
+ test.add_argument('--require',
+ metavar='TARGET',
+ action='append',
+ help='require the specified target').completer = complete_target
+
+ test.add_argument('--coverage',
+ action='store_true',
+ help='analyze code coverage when running tests')
+
+ test.add_argument('--coverage-label',
+ default='',
+ help='label to include in coverage output file names')
+
+ test.add_argument('--coverage-check',
+ action='store_true',
+ help='only verify code coverage can be enabled')
+
+ test.add_argument('--metadata',
+ help=argparse.SUPPRESS)
+
+ test.add_argument('--base-branch',
+ help='base branch used for change detection')
+
+ add_changes(test, argparse)
+ add_environments(test)
+
+ integration = argparse.ArgumentParser(add_help=False, parents=[test])
+
+ integration.add_argument('--python',
+ metavar='VERSION',
+ choices=SUPPORTED_PYTHON_VERSIONS + ('default',),
+ help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
+
+ integration.add_argument('--start-at',
+ metavar='TARGET',
+ help='start at the specified target').completer = complete_target
+
+ integration.add_argument('--start-at-task',
+ metavar='TASK',
+ help='start at the specified task')
+
+ integration.add_argument('--tags',
+ metavar='TAGS',
+ help='only run plays and tasks tagged with these values')
+
+ integration.add_argument('--skip-tags',
+ metavar='TAGS',
+ help='only run plays and tasks whose tags do not match these values')
+
+ integration.add_argument('--diff',
+ action='store_true',
+ help='show diff output')
+
+ integration.add_argument('--allow-destructive',
+ action='store_true',
+ help='allow destructive tests')
+
+ integration.add_argument('--allow-root',
+ action='store_true',
+ help='allow tests requiring root when not root')
+
+ integration.add_argument('--allow-disabled',
+ action='store_true',
+ help='allow tests which have been marked as disabled')
+
+ integration.add_argument('--allow-unstable',
+ action='store_true',
+ help='allow tests which have been marked as unstable')
+
+ integration.add_argument('--allow-unstable-changed',
+ action='store_true',
+ help='allow tests which have been marked as unstable when focused changes are detected')
+
+ integration.add_argument('--allow-unsupported',
+ action='store_true',
+ help='allow tests which have been marked as unsupported')
+
+ integration.add_argument('--retry-on-error',
+ action='store_true',
+ help='retry failed test with increased verbosity')
+
+ integration.add_argument('--continue-on-error',
+ action='store_true',
+ help='continue after failed test')
+
+ integration.add_argument('--debug-strategy',
+ action='store_true',
+ help='run test playbooks using the debug strategy')
+
+ integration.add_argument('--changed-all-target',
+ metavar='TARGET',
+ default='all',
+ help='target to run when all tests are needed')
+
+ integration.add_argument('--changed-all-mode',
+ metavar='MODE',
+ choices=('default', 'include', 'exclude'),
+ help='include/exclude behavior with --changed-all-target: %(choices)s')
+
+ integration.add_argument('--list-targets',
+ action='store_true',
+ help='list matching targets instead of running tests')
+
+ integration.add_argument('--no-temp-workdir',
+ action='store_true',
+ help='do not run tests from a temporary directory (use only for verifying broken tests)')
+
+ integration.add_argument('--no-temp-unicode',
+ action='store_true',
+ help='avoid unicode characters in temporary directory (use only for verifying broken tests)')
+
+ subparsers = parser.add_subparsers(metavar='COMMAND')
+ subparsers.required = True # work-around for python 3 bug which makes subparsers optional
+
+ posix_integration = subparsers.add_parser('integration',
+ parents=[integration],
+ help='posix integration tests')
+
+ posix_integration.set_defaults(func=command_posix_integration,
+ targets=walk_posix_integration_targets,
+ config=PosixIntegrationConfig)
+
+ add_extra_docker_options(posix_integration)
+ add_httptester_options(posix_integration, argparse)
+
+ network_integration = subparsers.add_parser('network-integration',
+ parents=[integration],
+ help='network integration tests')
+
+ network_integration.set_defaults(func=command_network_integration,
+ targets=walk_network_integration_targets,
+ config=NetworkIntegrationConfig)
+
+ add_extra_docker_options(network_integration, integration=False)
+
+ network_integration.add_argument('--platform',
+ metavar='PLATFORM',
+ action='append',
+ help='network platform/version').completer = complete_network_platform
+
+ network_integration.add_argument('--platform-collection',
+ type=key_value_type,
+ metavar='PLATFORM=COLLECTION',
+ action='append',
+ help='collection used to test platform').completer = complete_network_platform_collection
+
+ network_integration.add_argument('--platform-connection',
+ type=key_value_type,
+ metavar='PLATFORM=CONNECTION',
+ action='append',
+ help='connection used to test platform').completer = complete_network_platform_connection
+
+ network_integration.add_argument('--inventory',
+ metavar='PATH',
+ help='path to inventory used for tests')
+
+ network_integration.add_argument('--testcase',
+ metavar='TESTCASE',
+ help='limit a test to a specified testcase').completer = complete_network_testcase
+
+ windows_integration = subparsers.add_parser('windows-integration',
+ parents=[integration],
+ help='windows integration tests')
+
+ windows_integration.set_defaults(func=command_windows_integration,
+ targets=walk_windows_integration_targets,
+ config=WindowsIntegrationConfig)
+
+ add_extra_docker_options(windows_integration, integration=False)
+ add_httptester_options(windows_integration, argparse)
+
+ windows_integration.add_argument('--windows',
+ metavar='VERSION',
+ action='append',
+ help='windows version').completer = complete_windows
+
+ windows_integration.add_argument('--inventory',
+ metavar='PATH',
+ help='path to inventory used for tests')
+
+ units = subparsers.add_parser('units',
+ parents=[test],
+ help='unit tests')
+
+ units.set_defaults(func=command_units,
+ targets=walk_units_targets,
+ config=UnitsConfig)
+
+ units.add_argument('--python',
+ metavar='VERSION',
+ choices=SUPPORTED_PYTHON_VERSIONS + ('default',),
+ help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
+
+ units.add_argument('--collect-only',
+ action='store_true',
+ help='collect tests but do not execute them')
+
+ # noinspection PyTypeChecker
+ units.add_argument('--num-workers',
+ type=int,
+ help='number of workers to use (default: auto)')
+
+ units.add_argument('--requirements-mode',
+ choices=('only', 'skip'),
+ help=argparse.SUPPRESS)
+
+ add_extra_docker_options(units, integration=False)
+
+ sanity = subparsers.add_parser('sanity',
+ parents=[test],
+ help='sanity tests')
+
+ sanity.set_defaults(func=command_sanity,
+ targets=walk_sanity_targets,
+ config=SanityConfig)
+
+ sanity.add_argument('--test',
+ metavar='TEST',
+ action='append',
+ choices=[test.name for test in sanity_get_tests()],
+ help='tests to run').completer = complete_sanity_test
+
+ sanity.add_argument('--skip-test',
+ metavar='TEST',
+ action='append',
+ choices=[test.name for test in sanity_get_tests()],
+ help='tests to skip').completer = complete_sanity_test
+
+ sanity.add_argument('--allow-disabled',
+ action='store_true',
+ help='allow tests to run which are disabled by default')
+
+ sanity.add_argument('--list-tests',
+ action='store_true',
+ help='list available tests')
+
+ sanity.add_argument('--python',
+ metavar='VERSION',
+ choices=SUPPORTED_PYTHON_VERSIONS + ('default',),
+ help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
+
+ sanity.add_argument('--enable-optional-errors',
+ action='store_true',
+ help='enable optional errors')
+
+ add_lint(sanity)
+ add_extra_docker_options(sanity, integration=False)
+
+ shell = subparsers.add_parser('shell',
+ parents=[common],
+ help='open an interactive shell')
+
+ shell.add_argument('--python',
+ metavar='VERSION',
+ choices=SUPPORTED_PYTHON_VERSIONS + ('default',),
+ help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
+
+ shell.set_defaults(func=command_shell,
+ config=ShellConfig)
+
+ shell.add_argument('--raw',
+ action='store_true',
+ help='direct to shell with no setup')
+
+ add_environments(shell)
+ add_extra_docker_options(shell)
+ add_httptester_options(shell, argparse)
+
+ coverage_common = argparse.ArgumentParser(add_help=False, parents=[common])
+
+ add_environments(coverage_common, isolated_delegation=False)
+
+ coverage = subparsers.add_parser('coverage',
+ help='code coverage management and reporting')
+
+ coverage_subparsers = coverage.add_subparsers(metavar='COMMAND')
+ coverage_subparsers.required = True # work-around for python 3 bug which makes subparsers optional
+
+ add_coverage_analyze(coverage_subparsers, coverage_common)
+
+ coverage_combine = coverage_subparsers.add_parser('combine',
+ parents=[coverage_common],
+ help='combine coverage data and rewrite remote paths')
+
+ coverage_combine.set_defaults(func=command_coverage_combine,
+ config=CoverageConfig)
+
+ add_extra_coverage_options(coverage_combine)
+
+ coverage_erase = coverage_subparsers.add_parser('erase',
+ parents=[coverage_common],
+ help='erase coverage data files')
+
+ coverage_erase.set_defaults(func=command_coverage_erase,
+ config=CoverageConfig)
+
+ coverage_report = coverage_subparsers.add_parser('report',
+ parents=[coverage_common],
+ help='generate console coverage report')
+
+ coverage_report.set_defaults(func=command_coverage_report,
+ config=CoverageReportConfig)
+
+ coverage_report.add_argument('--show-missing',
+ action='store_true',
+ help='show line numbers of statements not executed')
+ coverage_report.add_argument('--include',
+ metavar='PAT1,PAT2,...',
+ help='include only files whose paths match one of these '
+ 'patterns. Accepts shell-style wildcards, which must be '
+ 'quoted.')
+ coverage_report.add_argument('--omit',
+ metavar='PAT1,PAT2,...',
+ help='omit files whose paths match one of these patterns. '
+ 'Accepts shell-style wildcards, which must be quoted.')
+
+ add_extra_coverage_options(coverage_report)
+
+ coverage_html = coverage_subparsers.add_parser('html',
+ parents=[coverage_common],
+ help='generate html coverage report')
+
+ coverage_html.set_defaults(func=command_coverage_html,
+ config=CoverageConfig)
+
+ add_extra_coverage_options(coverage_html)
+
+ coverage_xml = coverage_subparsers.add_parser('xml',
+ parents=[coverage_common],
+ help='generate xml coverage report')
+
+ coverage_xml.set_defaults(func=command_coverage_xml,
+ config=CoverageConfig)
+
+ add_extra_coverage_options(coverage_xml)
+
+ env = subparsers.add_parser('env',
+ parents=[common],
+ help='show information about the test environment')
+
+ env.set_defaults(func=command_env,
+ config=EnvConfig)
+
+ env.add_argument('--show',
+ action='store_true',
+ help='show environment on stdout')
+
+ env.add_argument('--dump',
+ action='store_true',
+ help='dump environment to disk')
+
+ env.add_argument('--list-files',
+ action='store_true',
+ help='list files on stdout')
+
+ # noinspection PyTypeChecker
+ env.add_argument('--timeout',
+ type=int,
+ metavar='MINUTES',
+ help='timeout for future ansible-test commands (0 clears)')
+
+ if argcomplete:
+ argcomplete.autocomplete(parser, always_complete_options=False, validator=lambda i, k: True)
+
+ args = parser.parse_args()
+
+ if args.explain and not args.verbosity:
+ args.verbosity = 1
+
+ if args.color == 'yes':
+ args.color = True
+ elif args.color == 'no':
+ args.color = False
+ else:
+ args.color = sys.stdout.isatty()
+
+ return args
+
+
+def key_value(argparse, value): # type: (argparse_module, str) -> t.Tuple[str, str]
+ """Type parsing and validation for argparse key/value pairs separated by an '=' character."""
+ parts = value.split('=')
+
+ if len(parts) != 2:
+ raise argparse.ArgumentTypeError('"%s" must be in the format "key=value"' % value)
+
+ return parts[0], parts[1]
+
+
+# noinspection PyProtectedMember
+def add_coverage_analyze(coverage_subparsers, coverage_common): # type: (argparse_module._SubParsersAction, argparse_module.ArgumentParser) -> None
+ """Add the `coverage analyze` subcommand."""
+ analyze = coverage_subparsers.add_parser(
+ 'analyze',
+ help='analyze collected coverage data',
+ )
+
+ analyze_subparsers = analyze.add_subparsers(metavar='COMMAND')
+ analyze_subparsers.required = True # work-around for python 3 bug which makes subparsers optional
+
+ targets = analyze_subparsers.add_parser(
+ 'targets',
+ help='analyze integration test target coverage',
+ )
+
+ targets_subparsers = targets.add_subparsers(metavar='COMMAND')
+ targets_subparsers.required = True # work-around for python 3 bug which makes subparsers optional
+
+ targets_generate = targets_subparsers.add_parser(
+ 'generate',
+ parents=[coverage_common],
+ help='aggregate coverage by integration test target',
+ )
+
+ targets_generate.set_defaults(
+ func=command_coverage_analyze_targets_generate,
+ config=CoverageAnalyzeTargetsGenerateConfig,
+ )
+
+ targets_generate.add_argument(
+ 'input_dir',
+ nargs='?',
+ help='directory to read coverage from',
+ )
+
+ targets_generate.add_argument(
+ 'output_file',
+ help='output file for aggregated coverage',
+ )
+
+ targets_expand = targets_subparsers.add_parser(
+ 'expand',
+ parents=[coverage_common],
+ help='expand target names from integers in aggregated coverage',
+ )
+
+ targets_expand.set_defaults(
+ func=command_coverage_analyze_targets_expand,
+ config=CoverageAnalyzeTargetsExpandConfig,
+ )
+
+ targets_expand.add_argument(
+ 'input_file',
+ help='input file to read aggregated coverage from',
+ )
+
+ targets_expand.add_argument(
+ 'output_file',
+ help='output file to write expanded coverage to',
+ )
+
+ targets_filter = targets_subparsers.add_parser(
+ 'filter',
+ parents=[coverage_common],
+ help='filter aggregated coverage data',
+ )
+
+ targets_filter.set_defaults(
+ func=command_coverage_analyze_targets_filter,
+ config=CoverageAnalyzeTargetsFilterConfig,
+ )
+
+ targets_filter.add_argument(
+ 'input_file',
+ help='input file to read aggregated coverage from',
+ )
+
+ targets_filter.add_argument(
+ 'output_file',
+ help='output file to write expanded coverage to',
+ )
+
+ targets_filter.add_argument(
+ '--include-target',
+ dest='include_targets',
+ action='append',
+ help='include the specified targets',
+ )
+
+ targets_filter.add_argument(
+ '--exclude-target',
+ dest='exclude_targets',
+ action='append',
+ help='exclude the specified targets',
+ )
+
+ targets_filter.add_argument(
+ '--include-path',
+ help='include paths matching the given regex',
+ )
+
+ targets_filter.add_argument(
+ '--exclude-path',
+ help='exclude paths matching the given regex',
+ )
+
+ targets_combine = targets_subparsers.add_parser(
+ 'combine',
+ parents=[coverage_common],
+ help='combine multiple aggregated coverage files',
+ )
+
+ targets_combine.set_defaults(
+ func=command_coverage_analyze_targets_combine,
+ config=CoverageAnalyzeTargetsCombineConfig,
+ )
+
+ targets_combine.add_argument(
+ 'input_file',
+ nargs='+',
+ help='input file to read aggregated coverage from',
+ )
+
+ targets_combine.add_argument(
+ 'output_file',
+ help='output file to write aggregated coverage to',
+ )
+
+ targets_missing = targets_subparsers.add_parser(
+ 'missing',
+ parents=[coverage_common],
+ help='identify coverage in one file missing in another',
+ )
+
+ targets_missing.set_defaults(
+ func=command_coverage_analyze_targets_missing,
+ config=CoverageAnalyzeTargetsMissingConfig,
+ )
+
+ targets_missing.add_argument(
+ 'from_file',
+ help='input file containing aggregated coverage',
+ )
+
+ targets_missing.add_argument(
+ 'to_file',
+ help='input file containing aggregated coverage',
+ )
+
+ targets_missing.add_argument(
+ 'output_file',
+ help='output file to write aggregated coverage to',
+ )
+
+ targets_missing.add_argument(
+ '--only-gaps',
+ action='store_true',
+ help='report only arcs/lines not hit by any target',
+ )
+
+ targets_missing.add_argument(
+ '--only-exists',
+ action='store_true',
+ help='limit results to files that exist',
+ )
+
+
+def add_lint(parser):
+ """
+ :type parser: argparse.ArgumentParser
+ """
+ parser.add_argument('--lint',
+ action='store_true',
+ help='write lint output to stdout, everything else stderr')
+
+ parser.add_argument('--junit',
+ action='store_true',
+ help='write test failures to junit xml files')
+
+ parser.add_argument('--failure-ok',
+ action='store_true',
+ help='exit successfully on failed tests after saving results')
+
+
+def add_changes(parser, argparse):
+ """
+ :type parser: argparse.ArgumentParser
+ :type argparse: argparse
+ """
+ parser.add_argument('--changed', action='store_true', help='limit targets based on changes')
+
+ changes = parser.add_argument_group(title='change detection arguments')
+
+ changes.add_argument('--tracked', action='store_true', help=argparse.SUPPRESS)
+ changes.add_argument('--untracked', action='store_true', help='include untracked files')
+ changes.add_argument('--ignore-committed', dest='committed', action='store_false', help='exclude committed files')
+ changes.add_argument('--ignore-staged', dest='staged', action='store_false', help='exclude staged files')
+ changes.add_argument('--ignore-unstaged', dest='unstaged', action='store_false', help='exclude unstaged files')
+
+ changes.add_argument('--changed-from', metavar='PATH', help=argparse.SUPPRESS)
+ changes.add_argument('--changed-path', metavar='PATH', action='append', help=argparse.SUPPRESS)
+
+
+def add_environments(parser, isolated_delegation=True):
+ """
+ :type parser: argparse.ArgumentParser
+ :type isolated_delegation: bool
+ """
+ parser.add_argument('--requirements',
+ action='store_true',
+ help='install command requirements')
+
+ parser.add_argument('--python-interpreter',
+ metavar='PATH',
+ default=None,
+ help='path to the docker or remote python interpreter')
+
+ parser.add_argument('--no-pip-check',
+ dest='pip_check',
+ default=True,
+ action='store_false',
+ help='do not run "pip check" to verify requirements')
+
+ environments = parser.add_mutually_exclusive_group()
+
+ environments.add_argument('--local',
+ action='store_true',
+ help='run from the local environment')
+
+ environments.add_argument('--venv',
+ action='store_true',
+ help='run from ansible-test managed virtual environments')
+
+ venv = parser.add_argument_group(title='venv arguments')
+
+ venv.add_argument('--venv-system-site-packages',
+ action='store_true',
+ help='enable system site packages')
+
+ if not isolated_delegation:
+ environments.set_defaults(
+ docker=None,
+ remote=None,
+ remote_stage=None,
+ remote_provider=None,
+ remote_aws_region=None,
+ remote_terminate=None,
+ remote_endpoint=None,
+ python_interpreter=None,
+ )
+
+ return
+
+ environments.add_argument('--docker',
+ metavar='IMAGE',
+ nargs='?',
+ default=None,
+ const='default',
+ help='run from a docker container').completer = complete_docker
+
+ environments.add_argument('--remote',
+ metavar='PLATFORM',
+ default=None,
+ help='run from a remote instance').completer = complete_remote_shell if parser.prog.endswith(' shell') else complete_remote
+
+ remote = parser.add_argument_group(title='remote arguments')
+
+ remote.add_argument('--remote-stage',
+ metavar='STAGE',
+ help='remote stage to use: prod, dev',
+ default='prod').completer = complete_remote_stage
+
+ remote.add_argument('--remote-provider',
+ metavar='PROVIDER',
+ help='remote provider to use: %(choices)s',
+ choices=['default', 'aws', 'azure', 'parallels', 'ibmvpc', 'ibmps'],
+ default='default')
+
+ remote.add_argument('--remote-endpoint',
+ metavar='ENDPOINT',
+ help='remote provisioning endpoint to use (default: auto)',
+ default=None)
+
+ remote.add_argument('--remote-aws-region',
+ metavar='REGION',
+ help='remote aws region to use: %(choices)s (default: auto)',
+ choices=sorted(AWS_ENDPOINTS),
+ default=None)
+
+ remote.add_argument('--remote-terminate',
+ metavar='WHEN',
+ help='terminate remote instance: %(choices)s (default: %(default)s)',
+ choices=['never', 'always', 'success'],
+ default='never')
+
+
+def add_extra_coverage_options(parser):
+ """
+ :type parser: argparse.ArgumentParser
+ """
+ parser.add_argument('--group-by',
+ metavar='GROUP',
+ action='append',
+ choices=COVERAGE_GROUPS,
+ help='group output by: %s' % ', '.join(COVERAGE_GROUPS))
+
+ parser.add_argument('--all',
+ action='store_true',
+ help='include all python/powershell source files')
+
+ parser.add_argument('--stub',
+ action='store_true',
+ help='generate empty report of all python/powershell source files')
+
+ parser.add_argument('--export',
+ help='directory to export combined coverage files to')
+
+
+def add_httptester_options(parser, argparse):
+ """
+ :type parser: argparse.ArgumentParser
+ :type argparse: argparse
+ """
+ group = parser.add_mutually_exclusive_group()
+
+ group.add_argument('--httptester',
+ metavar='IMAGE',
+ default='quay.io/ansible/http-test-container:1.0.0',
+ help='docker image to use for the httptester container')
+
+ group.add_argument('--disable-httptester',
+ dest='httptester',
+ action='store_const',
+ const='',
+ help='do not use the httptester container')
+
+ parser.add_argument('--inject-httptester',
+ action='store_true',
+ help=argparse.SUPPRESS) # internal use only
+
+
+def add_extra_docker_options(parser, integration=True):
+ """
+ :type parser: argparse.ArgumentParser
+ :type integration: bool
+ """
+ docker = parser.add_argument_group(title='docker arguments')
+
+ docker.add_argument('--docker-no-pull',
+ action='store_false',
+ dest='docker_pull',
+ help='do not explicitly pull the latest docker images')
+
+ if data_context().content.is_ansible:
+ docker.add_argument('--docker-keep-git',
+ action='store_true',
+ help='transfer git related files into the docker container')
+ else:
+ docker.set_defaults(
+ docker_keep_git=False,
+ )
+
+ docker.add_argument('--docker-seccomp',
+ metavar='SC',
+ choices=('default', 'unconfined'),
+ default=None,
+ help='set seccomp confinement for the test container: %(choices)s')
+
+ docker.add_argument('--docker-terminate',
+ metavar='WHEN',
+ help='terminate docker container: %(choices)s (default: %(default)s)',
+ choices=['never', 'always', 'success'],
+ default='always')
+
+ if not integration:
+ return
+
+ docker.add_argument('--docker-privileged',
+ action='store_true',
+ help='run docker container in privileged mode')
+
+ docker.add_argument('--docker-network',
+ help='run using the specified docker network')
+
+ # noinspection PyTypeChecker
+ docker.add_argument('--docker-memory',
+ help='memory limit for docker in bytes', type=int)
+
+
+# noinspection PyUnusedLocal
+def complete_remote_stage(prefix, parsed_args, **_): # pylint: disable=unused-argument
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ return [stage for stage in ('prod', 'dev') if stage.startswith(prefix)]
+
+
+def complete_target(prefix, parsed_args, **_):
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ return find_target_completion(parsed_args.targets, prefix)
+
+
+# noinspection PyUnusedLocal
+def complete_remote(prefix, parsed_args, **_):
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ del parsed_args
+
+ images = sorted(get_remote_completion().keys())
+
+ return [i for i in images if i.startswith(prefix)]
+
+
+# noinspection PyUnusedLocal
+def complete_remote_shell(prefix, parsed_args, **_):
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ del parsed_args
+
+ images = sorted(get_remote_completion().keys())
+
+ # 2008 doesn't support SSH so we do not add to the list of valid images
+ windows_completion_path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'completion', 'windows.txt')
+ images.extend(["windows/%s" % i for i in read_lines_without_comments(windows_completion_path, remove_blank_lines=True) if i != '2008'])
+
+ return [i for i in images if i.startswith(prefix)]
+
+
+# noinspection PyUnusedLocal
+def complete_docker(prefix, parsed_args, **_):
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ del parsed_args
+
+ images = sorted(get_docker_completion().keys())
+
+ return [i for i in images if i.startswith(prefix)]
+
+
+def complete_windows(prefix, parsed_args, **_):
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ images = read_lines_without_comments(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'completion', 'windows.txt'), remove_blank_lines=True)
+
+ return [i for i in images if i.startswith(prefix) and (not parsed_args.windows or i not in parsed_args.windows)]
+
+
+def complete_network_platform(prefix, parsed_args, **_):
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ images = sorted(get_network_completion())
+
+ return [i for i in images if i.startswith(prefix) and (not parsed_args.platform or i not in parsed_args.platform)]
+
+
+def complete_network_platform_collection(prefix, parsed_args, **_):
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ left = prefix.split('=')[0]
+ images = sorted(set(image.split('/')[0] for image in get_network_completion()))
+
+ return [i + '=' for i in images if i.startswith(left) and (not parsed_args.platform_collection or i not in [x[0] for x in parsed_args.platform_collection])]
+
+
+def complete_network_platform_connection(prefix, parsed_args, **_):
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ left = prefix.split('=')[0]
+ images = sorted(set(image.split('/')[0] for image in get_network_completion()))
+
+ return [i + '=' for i in images if i.startswith(left) and (not parsed_args.platform_connection or i not in [x[0] for x in parsed_args.platform_connection])]
+
+
+def complete_network_testcase(prefix, parsed_args, **_):
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ testcases = []
+
+ # since testcases are module specific, don't autocomplete if more than one
+ # module is specidied
+ if len(parsed_args.include) != 1:
+ return []
+
+ test_dir = os.path.join(data_context().content.integration_targets_path, parsed_args.include[0], 'tests')
+ connection_dirs = data_context().content.get_dirs(test_dir)
+
+ for connection_dir in connection_dirs:
+ for testcase in [os.path.basename(path) for path in data_context().content.get_files(connection_dir)]:
+ if testcase.startswith(prefix):
+ testcases.append(testcase.split('.')[0])
+
+ return testcases
+
+
+# noinspection PyUnusedLocal
+def complete_sanity_test(prefix, parsed_args, **_):
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ del parsed_args
+
+ tests = sorted(test.name for test in sanity_get_tests())
+
+ return [i for i in tests if i.startswith(prefix)]
diff --git a/test/lib/ansible_test/_internal/cloud/__init__.py b/test/lib/ansible_test/_internal/cloud/__init__.py
new file mode 100644
index 00000000..04f592c4
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/__init__.py
@@ -0,0 +1,429 @@
+"""Plugin system for cloud providers and environments for use in integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import abc
+import atexit
+import datetime
+import time
+import os
+import re
+import tempfile
+
+from .. import types as t
+
+from ..encoding import (
+ to_bytes,
+)
+
+from ..io import (
+ read_text_file,
+)
+
+from ..util import (
+ ApplicationError,
+ display,
+ import_plugins,
+ load_plugins,
+ ABC,
+ ANSIBLE_TEST_CONFIG_ROOT,
+)
+
+from ..util_common import (
+ write_json_test_results,
+ ResultType,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..config import (
+ IntegrationConfig,
+)
+
+from ..ci import (
+ get_ci_provider,
+)
+
+from ..data import (
+ data_context,
+)
+
+PROVIDERS = {}
+ENVIRONMENTS = {}
+
+
+def initialize_cloud_plugins():
+ """Import cloud plugins and load them into the plugin dictionaries."""
+ import_plugins('cloud')
+
+ load_plugins(CloudProvider, PROVIDERS)
+ load_plugins(CloudEnvironment, ENVIRONMENTS)
+
+
+def get_cloud_platforms(args, targets=None):
+ """
+ :type args: TestConfig
+ :type targets: tuple[IntegrationTarget] | None
+ :rtype: list[str]
+ """
+ if isinstance(args, IntegrationConfig):
+ if args.list_targets:
+ return []
+
+ if targets is None:
+ cloud_platforms = set(args.metadata.cloud_config or [])
+ else:
+ cloud_platforms = set(get_cloud_platform(target) for target in targets)
+
+ cloud_platforms.discard(None)
+
+ return sorted(cloud_platforms)
+
+
+def get_cloud_platform(target):
+ """
+ :type target: IntegrationTarget
+ :rtype: str | None
+ """
+ cloud_platforms = set(a.split('/')[1] for a in target.aliases if a.startswith('cloud/') and a.endswith('/') and a != 'cloud/')
+
+ if not cloud_platforms:
+ return None
+
+ if len(cloud_platforms) == 1:
+ cloud_platform = cloud_platforms.pop()
+
+ if cloud_platform not in PROVIDERS:
+ raise ApplicationError('Target %s aliases contains unknown cloud platform: %s' % (target.name, cloud_platform))
+
+ return cloud_platform
+
+ raise ApplicationError('Target %s aliases contains multiple cloud platforms: %s' % (target.name, ', '.join(sorted(cloud_platforms))))
+
+
+def get_cloud_providers(args, targets=None):
+ """
+ :type args: IntegrationConfig
+ :type targets: tuple[IntegrationTarget] | None
+ :rtype: list[CloudProvider]
+ """
+ return [PROVIDERS[p](args) for p in get_cloud_platforms(args, targets)]
+
+
+def get_cloud_environment(args, target):
+ """
+ :type args: IntegrationConfig
+ :type target: IntegrationTarget
+ :rtype: CloudEnvironment
+ """
+ cloud_platform = get_cloud_platform(target)
+
+ if not cloud_platform:
+ return None
+
+ return ENVIRONMENTS[cloud_platform](args)
+
+
+def cloud_filter(args, targets):
+ """
+ :type args: IntegrationConfig
+ :type targets: tuple[IntegrationTarget]
+ :return: list[str]
+ """
+ if args.metadata.cloud_config is not None:
+ return [] # cloud filter already performed prior to delegation
+
+ exclude = []
+
+ for provider in get_cloud_providers(args, targets):
+ provider.filter(targets, exclude)
+
+ return exclude
+
+
+def cloud_init(args, targets):
+ """
+ :type args: IntegrationConfig
+ :type targets: tuple[IntegrationTarget]
+ """
+ if args.metadata.cloud_config is not None:
+ return # cloud configuration already established prior to delegation
+
+ args.metadata.cloud_config = {}
+
+ results = {}
+
+ for provider in get_cloud_providers(args, targets):
+ args.metadata.cloud_config[provider.platform] = {}
+
+ start_time = time.time()
+ provider.setup()
+ end_time = time.time()
+
+ results[provider.platform] = dict(
+ platform=provider.platform,
+ setup_seconds=int(end_time - start_time),
+ targets=[target.name for target in targets],
+ )
+
+ if not args.explain and results:
+ result_name = '%s-%s.json' % (
+ args.command, re.sub(r'[^0-9]', '-', str(datetime.datetime.utcnow().replace(microsecond=0))))
+
+ data = dict(
+ clouds=results,
+ )
+
+ write_json_test_results(ResultType.DATA, result_name, data)
+
+
+class CloudBase(ABC):
+ """Base class for cloud plugins."""
+ __metaclass__ = abc.ABCMeta
+
+ _CONFIG_PATH = 'config_path'
+ _RESOURCE_PREFIX = 'resource_prefix'
+ _MANAGED = 'managed'
+ _SETUP_EXECUTED = 'setup_executed'
+
+ def __init__(self, args):
+ """
+ :type args: IntegrationConfig
+ """
+ self.args = args
+ self.platform = self.__module__.split('.')[-1]
+
+ def config_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None
+ """Add the config file to the payload file list."""
+ if self._get_cloud_config(self._CONFIG_PATH, ''):
+ pair = (self.config_path, os.path.relpath(self.config_path, data_context().content.root))
+
+ if pair not in files:
+ display.info('Including %s config: %s -> %s' % (self.platform, pair[0], pair[1]), verbosity=3)
+ files.append(pair)
+
+ data_context().register_payload_callback(config_callback)
+
+ @property
+ def setup_executed(self):
+ """
+ :rtype: bool
+ """
+ return self._get_cloud_config(self._SETUP_EXECUTED, False)
+
+ @setup_executed.setter
+ def setup_executed(self, value):
+ """
+ :type value: bool
+ """
+ self._set_cloud_config(self._SETUP_EXECUTED, value)
+
+ @property
+ def config_path(self):
+ """
+ :rtype: str
+ """
+ return os.path.join(data_context().content.root, self._get_cloud_config(self._CONFIG_PATH))
+
+ @config_path.setter
+ def config_path(self, value):
+ """
+ :type value: str
+ """
+ self._set_cloud_config(self._CONFIG_PATH, value)
+
+ @property
+ def resource_prefix(self):
+ """
+ :rtype: str
+ """
+ return self._get_cloud_config(self._RESOURCE_PREFIX)
+
+ @resource_prefix.setter
+ def resource_prefix(self, value):
+ """
+ :type value: str
+ """
+ self._set_cloud_config(self._RESOURCE_PREFIX, value)
+
+ @property
+ def managed(self):
+ """
+ :rtype: bool
+ """
+ return self._get_cloud_config(self._MANAGED)
+
+ @managed.setter
+ def managed(self, value):
+ """
+ :type value: bool
+ """
+ self._set_cloud_config(self._MANAGED, value)
+
+ def _get_cloud_config(self, key, default=None):
+ """
+ :type key: str
+ :type default: str | int | bool | None
+ :rtype: str | int | bool
+ """
+ if default is not None:
+ return self.args.metadata.cloud_config[self.platform].get(key, default)
+
+ return self.args.metadata.cloud_config[self.platform][key]
+
+ def _set_cloud_config(self, key, value):
+ """
+ :type key: str
+ :type value: str | int | bool
+ """
+ self.args.metadata.cloud_config[self.platform][key] = value
+
+
+class CloudProvider(CloudBase):
+ """Base class for cloud provider plugins. Sets up cloud resources before delegation."""
+ def __init__(self, args, config_extension='.ini'):
+ """
+ :type args: IntegrationConfig
+ :type config_extension: str
+ """
+ super(CloudProvider, self).__init__(args)
+
+ self.ci_provider = get_ci_provider()
+ self.remove_config = False
+ self.config_static_name = 'cloud-config-%s%s' % (self.platform, config_extension)
+ self.config_static_path = os.path.join(data_context().content.integration_path, self.config_static_name)
+ self.config_template_path = os.path.join(ANSIBLE_TEST_CONFIG_ROOT, '%s.template' % self.config_static_name)
+ self.config_extension = config_extension
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ skip = 'cloud/%s/' % self.platform
+ skipped = [target.name for target in targets if skip in target.aliases]
+
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which require config (see "%s"): %s'
+ % (skip.rstrip('/'), self.config_template_path, ', '.join(skipped)))
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ self.resource_prefix = self.ci_provider.generate_resource_prefix()
+
+ atexit.register(self.cleanup)
+
+ def get_remote_ssh_options(self):
+ """Get any additional options needed when delegating tests to a remote instance via SSH.
+ :rtype: list[str]
+ """
+ return []
+
+ def get_docker_run_options(self):
+ """Get any additional options needed when delegating tests to a docker container.
+ :rtype: list[str]
+ """
+ return []
+
+ def cleanup(self):
+ """Clean up the cloud resource and any temporary configuration files after tests complete."""
+ if self.remove_config:
+ os.remove(self.config_path)
+
+ def _use_static_config(self):
+ """
+ :rtype: bool
+ """
+ if os.path.isfile(self.config_static_path):
+ display.info('Using existing %s cloud config: %s' % (self.platform, self.config_static_path), verbosity=1)
+ self.config_path = self.config_static_path
+ static = True
+ else:
+ static = False
+
+ self.managed = not static
+
+ return static
+
+ def _write_config(self, content):
+ """
+ :type content: str
+ """
+ prefix = '%s-' % os.path.splitext(os.path.basename(self.config_static_path))[0]
+
+ with tempfile.NamedTemporaryFile(dir=data_context().content.integration_path, prefix=prefix, suffix=self.config_extension, delete=False) as config_fd:
+ filename = os.path.join(data_context().content.integration_path, os.path.basename(config_fd.name))
+
+ self.config_path = filename
+ self.remove_config = True
+
+ display.info('>>> Config: %s\n%s' % (filename, content.strip()), verbosity=3)
+
+ config_fd.write(to_bytes(content))
+ config_fd.flush()
+
+ def _read_config_template(self):
+ """
+ :rtype: str
+ """
+ lines = read_text_file(self.config_template_path).splitlines()
+ lines = [line for line in lines if not line.startswith('#')]
+ config = '\n'.join(lines).strip() + '\n'
+ return config
+
+ @staticmethod
+ def _populate_config_template(template, values):
+ """
+ :type template: str
+ :type values: dict[str, str]
+ :rtype: str
+ """
+ for key in sorted(values):
+ value = values[key]
+ template = template.replace('@%s' % key, value)
+
+ return template
+
+
+class CloudEnvironment(CloudBase):
+ """Base class for cloud environment plugins. Updates integration test environment after delegation."""
+ def setup_once(self):
+ """Run setup if it has not already been run."""
+ if self.setup_executed:
+ return
+
+ self.setup()
+ self.setup_executed = True
+
+ def setup(self):
+ """Setup which should be done once per environment instead of once per test target."""
+
+ @abc.abstractmethod
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+
+ def on_failure(self, target, tries):
+ """
+ :type target: IntegrationTarget
+ :type tries: int
+ """
+
+
+class CloudEnvironmentConfig:
+ """Configuration for the environment."""
+ def __init__(self, env_vars=None, ansible_vars=None, module_defaults=None, callback_plugins=None):
+ """
+ :type env_vars: dict[str, str] | None
+ :type ansible_vars: dict[str, any] | None
+ :type module_defaults: dict[str, dict[str, any]] | None
+ :type callback_plugins: list[str] | None
+ """
+ self.env_vars = env_vars
+ self.ansible_vars = ansible_vars
+ self.module_defaults = module_defaults
+ self.callback_plugins = callback_plugins
diff --git a/test/lib/ansible_test/_internal/cloud/acme.py b/test/lib/ansible_test/_internal/cloud/acme.py
new file mode 100644
index 00000000..3d0ace24
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/acme.py
@@ -0,0 +1,193 @@
+"""ACME plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import time
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..util import (
+ find_executable,
+ display,
+ ApplicationError,
+ SubprocessError,
+)
+
+from ..http import (
+ HttpClient,
+)
+
+from ..docker_util import (
+ docker_run,
+ docker_rm,
+ docker_inspect,
+ docker_pull,
+ get_docker_container_id,
+ get_docker_hostname,
+ get_docker_container_ip,
+ get_docker_preferred_network_name,
+ is_docker_user_defined_network,
+)
+
+
+class ACMEProvider(CloudProvider):
+ """ACME plugin. Sets up cloud resources for tests."""
+ DOCKER_SIMULATOR_NAME = 'acme-simulator'
+
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(ACMEProvider, self).__init__(args)
+
+ # The simulator must be pinned to a specific version to guarantee CI passes with the version used.
+ if os.environ.get('ANSIBLE_ACME_CONTAINER'):
+ self.image = os.environ.get('ANSIBLE_ACME_CONTAINER')
+ else:
+ self.image = 'quay.io/ansible/acme-test-container:2.0.0'
+ self.container_name = ''
+
+ def _wait_for_service(self, protocol, acme_host, port, local_part, name):
+ """Wait for an endpoint to accept connections."""
+ if self.args.explain:
+ return
+
+ client = HttpClient(self.args, always=True, insecure=True)
+ endpoint = '%s://%s:%d/%s' % (protocol, acme_host, port, local_part)
+
+ for dummy in range(1, 30):
+ display.info('Waiting for %s: %s' % (name, endpoint), verbosity=1)
+
+ try:
+ client.get(endpoint)
+ return
+ except SubprocessError:
+ pass
+
+ time.sleep(1)
+
+ raise ApplicationError('Timeout waiting for %s.' % name)
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ docker = find_executable('docker', required=False)
+
+ if docker:
+ return
+
+ skip = 'cloud/%s/' % self.platform
+ skipped = [target.name for target in targets if skip in target.aliases]
+
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which require the "docker" command: %s'
+ % (skip.rstrip('/'), ', '.join(skipped)))
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(ACMEProvider, self).setup()
+
+ if self._use_static_config():
+ self._setup_static()
+ else:
+ self._setup_dynamic()
+
+ def get_docker_run_options(self):
+ """Get any additional options needed when delegating tests to a docker container.
+ :rtype: list[str]
+ """
+ network = get_docker_preferred_network_name(self.args)
+
+ if self.managed and not is_docker_user_defined_network(network):
+ return ['--link', self.DOCKER_SIMULATOR_NAME]
+
+ return []
+
+ def cleanup(self):
+ """Clean up the cloud resource and any temporary configuration files after tests complete."""
+ if self.container_name:
+ docker_rm(self.args, self.container_name)
+
+ super(ACMEProvider, self).cleanup()
+
+ def _setup_dynamic(self):
+ """Create a ACME test container using docker."""
+ container_id = get_docker_container_id()
+
+ self.container_name = self.DOCKER_SIMULATOR_NAME
+
+ results = docker_inspect(self.args, self.container_name)
+
+ if results and not results[0].get('State', {}).get('Running'):
+ docker_rm(self.args, self.container_name)
+ results = []
+
+ if results:
+ display.info('Using the existing ACME docker test container.', verbosity=1)
+ else:
+ display.info('Starting a new ACME docker test container.', verbosity=1)
+
+ if not container_id:
+ # publish the simulator ports when not running inside docker
+ publish_ports = [
+ '-p', '5000:5000', # control port for flask app in container
+ '-p', '14000:14000', # Pebble ACME CA
+ ]
+ else:
+ publish_ports = []
+
+ if not os.environ.get('ANSIBLE_ACME_CONTAINER'):
+ docker_pull(self.args, self.image)
+
+ docker_run(
+ self.args,
+ self.image,
+ ['-d', '--name', self.container_name] + publish_ports,
+ )
+
+ if self.args.docker:
+ acme_host = self.DOCKER_SIMULATOR_NAME
+ elif container_id:
+ acme_host = self._get_simulator_address()
+ display.info('Found ACME test container address: %s' % acme_host, verbosity=1)
+ else:
+ acme_host = get_docker_hostname()
+
+ if container_id:
+ acme_host_ip = self._get_simulator_address()
+ else:
+ acme_host_ip = get_docker_hostname()
+
+ self._set_cloud_config('acme_host', acme_host)
+
+ self._wait_for_service('http', acme_host_ip, 5000, '', 'ACME controller')
+ self._wait_for_service('https', acme_host_ip, 14000, 'dir', 'ACME CA endpoint')
+
+ def _get_simulator_address(self):
+ return get_docker_container_ip(self.args, self.container_name)
+
+ def _setup_static(self):
+ raise NotImplementedError()
+
+
+class ACMEEnvironment(CloudEnvironment):
+ """ACME environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ ansible_vars = dict(
+ acme_host=self._get_cloud_config('acme_host'),
+ )
+
+ return CloudEnvironmentConfig(
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/aws.py b/test/lib/ansible_test/_internal/cloud/aws.py
new file mode 100644
index 00000000..190ef488
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/aws.py
@@ -0,0 +1,124 @@
+"""AWS plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ..util import (
+ ApplicationError,
+ display,
+ ConfigParser,
+)
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..core_ci import (
+ AnsibleCoreCI,
+)
+
+
+class AwsCloudProvider(CloudProvider):
+ """AWS cloud provider plugin. Sets up cloud resources before delegation."""
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ if os.path.isfile(self.config_static_path):
+ return
+
+ aci = self._create_ansible_core_ci()
+
+ if aci.available:
+ return
+
+ super(AwsCloudProvider, self).filter(targets, exclude)
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(AwsCloudProvider, self).setup()
+
+ aws_config_path = os.path.expanduser('~/.aws')
+
+ if os.path.exists(aws_config_path) and not self.args.docker and not self.args.remote:
+ raise ApplicationError('Rename "%s" or use the --docker or --remote option to isolate tests.' % aws_config_path)
+
+ if not self._use_static_config():
+ self._setup_dynamic()
+
+ def _setup_dynamic(self):
+ """Request AWS credentials through the Ansible Core CI service."""
+ display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
+
+ config = self._read_config_template()
+
+ aci = self._create_ansible_core_ci()
+
+ response = aci.start()
+
+ if not self.args.explain:
+ credentials = response['aws']['credentials']
+
+ values = dict(
+ ACCESS_KEY=credentials['access_key'],
+ SECRET_KEY=credentials['secret_key'],
+ SECURITY_TOKEN=credentials['session_token'],
+ REGION='us-east-1',
+ )
+
+ display.sensitive.add(values['SECRET_KEY'])
+ display.sensitive.add(values['SECURITY_TOKEN'])
+
+ config = self._populate_config_template(config, values)
+
+ self._write_config(config)
+
+ def _create_ansible_core_ci(self):
+ """
+ :rtype: AnsibleCoreCI
+ """
+ return AnsibleCoreCI(self.args, 'aws', 'sts', persist=False, stage=self.args.remote_stage, provider=self.args.remote_provider)
+
+
+class AwsCloudEnvironment(CloudEnvironment):
+ """AWS cloud environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ parser = ConfigParser()
+ parser.read(self.config_path)
+
+ ansible_vars = dict(
+ resource_prefix=self.resource_prefix,
+ )
+
+ ansible_vars.update(dict(parser.items('default')))
+
+ display.sensitive.add(ansible_vars.get('aws_secret_key'))
+ display.sensitive.add(ansible_vars.get('security_token'))
+
+ if 'aws_cleanup' not in ansible_vars:
+ ansible_vars['aws_cleanup'] = not self.managed
+
+ env_vars = {'ANSIBLE_DEBUG_BOTOCORE_LOGS': 'True'}
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ callback_plugins=['aws_resource_actions'],
+ )
+
+ def on_failure(self, target, tries):
+ """
+ :type target: TestTarget
+ :type tries: int
+ """
+ if not tries and self.managed:
+ display.notice('If %s failed due to permissions, the IAM test policy may need to be updated. '
+ 'https://docs.ansible.com/ansible/devel/dev_guide/platforms/aws_guidelines.html#aws-permissions-for-integration-tests.'
+ % target.name)
diff --git a/test/lib/ansible_test/_internal/cloud/azure.py b/test/lib/ansible_test/_internal/cloud/azure.py
new file mode 100644
index 00000000..02465eed
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/azure.py
@@ -0,0 +1,213 @@
+"""Azure plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ..io import (
+ read_text_file,
+)
+
+from ..util import (
+ ApplicationError,
+ display,
+ ConfigParser,
+)
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..http import (
+ HttpClient,
+ urlparse,
+ urlunparse,
+ parse_qs,
+)
+
+from ..core_ci import (
+ AnsibleCoreCI,
+)
+
+
+class AzureCloudProvider(CloudProvider):
+ """Azure cloud provider plugin. Sets up cloud resources before delegation."""
+ SHERLOCK_CONFIG_PATH = os.path.expanduser('~/.ansible-sherlock-ci.cfg')
+
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(AzureCloudProvider, self).__init__(args)
+
+ self.aci = None
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ if os.path.isfile(self.config_static_path):
+ return
+
+ aci = self._create_ansible_core_ci()
+
+ if aci.available:
+ return
+
+ if os.path.isfile(self.SHERLOCK_CONFIG_PATH):
+ return
+
+ super(AzureCloudProvider, self).filter(targets, exclude)
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(AzureCloudProvider, self).setup()
+
+ if not self._use_static_config():
+ self._setup_dynamic()
+
+ get_config(self.config_path) # check required variables
+
+ def cleanup(self):
+ """Clean up the cloud resource and any temporary configuration files after tests complete."""
+ if self.aci:
+ self.aci.stop()
+
+ super(AzureCloudProvider, self).cleanup()
+
+ def _setup_dynamic(self):
+ """Request Azure credentials through Sherlock."""
+ display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
+
+ config = self._read_config_template()
+ response = {}
+
+ if os.path.isfile(self.SHERLOCK_CONFIG_PATH):
+ sherlock_uri = read_text_file(self.SHERLOCK_CONFIG_PATH).splitlines()[0].strip() + '&rgcount=2'
+
+ parts = urlparse(sherlock_uri)
+ query_string = parse_qs(parts.query)
+ base_uri = urlunparse(parts[:4] + ('', ''))
+
+ if 'code' not in query_string:
+ example_uri = 'https://example.azurewebsites.net/api/sandbox-provisioning'
+ raise ApplicationError('The Sherlock URI must include the API key in the query string. Example: %s?code=xxx' % example_uri)
+
+ display.info('Initializing azure/sherlock from: %s' % base_uri, verbosity=1)
+
+ http = HttpClient(self.args)
+ result = http.get(sherlock_uri)
+
+ display.info('Started azure/sherlock from: %s' % base_uri, verbosity=1)
+
+ if not self.args.explain:
+ response = result.json()
+ else:
+ aci = self._create_ansible_core_ci()
+
+ aci_result = aci.start()
+
+ if not self.args.explain:
+ response = aci_result['azure']
+ self.aci = aci
+
+ if not self.args.explain:
+ values = dict(
+ AZURE_CLIENT_ID=response['clientId'],
+ AZURE_SECRET=response['clientSecret'],
+ AZURE_SUBSCRIPTION_ID=response['subscriptionId'],
+ AZURE_TENANT=response['tenantId'],
+ RESOURCE_GROUP=response['resourceGroupNames'][0],
+ RESOURCE_GROUP_SECONDARY=response['resourceGroupNames'][1],
+ )
+
+ display.sensitive.add(values['AZURE_SECRET'])
+
+ config = '\n'.join('%s: %s' % (key, values[key]) for key in sorted(values))
+
+ config = '[default]\n' + config
+
+ self._write_config(config)
+
+ def _create_ansible_core_ci(self):
+ """
+ :rtype: AnsibleCoreCI
+ """
+ return AnsibleCoreCI(self.args, 'azure', 'azure', persist=False, stage=self.args.remote_stage, provider=self.args.remote_provider)
+
+
+class AzureCloudEnvironment(CloudEnvironment):
+ """Azure cloud environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ env_vars = get_config(self.config_path)
+
+ display.sensitive.add(env_vars.get('AZURE_SECRET'))
+ display.sensitive.add(env_vars.get('AZURE_PASSWORD'))
+
+ ansible_vars = dict(
+ resource_prefix=self.resource_prefix,
+ )
+
+ ansible_vars.update(dict((key.lower(), value) for key, value in env_vars.items()))
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ )
+
+ def on_failure(self, target, tries):
+ """
+ :type target: TestTarget
+ :type tries: int
+ """
+ if not tries and self.managed:
+ display.notice('If %s failed due to permissions, the test policy may need to be updated. '
+ 'For help, consult @mattclay or @gundalow on GitHub or #ansible-devel on IRC.' % target.name)
+
+
+def get_config(config_path):
+ """
+ :type config_path: str
+ :rtype: dict[str, str]
+ """
+ parser = ConfigParser()
+ parser.read(config_path)
+
+ config = dict((key.upper(), value) for key, value in parser.items('default'))
+
+ rg_vars = (
+ 'RESOURCE_GROUP',
+ 'RESOURCE_GROUP_SECONDARY',
+ )
+
+ sp_vars = (
+ 'AZURE_CLIENT_ID',
+ 'AZURE_SECRET',
+ 'AZURE_SUBSCRIPTION_ID',
+ 'AZURE_TENANT',
+ )
+
+ ad_vars = (
+ 'AZURE_AD_USER',
+ 'AZURE_PASSWORD',
+ 'AZURE_SUBSCRIPTION_ID',
+ )
+
+ rg_ok = all(var in config for var in rg_vars)
+ sp_ok = all(var in config for var in sp_vars)
+ ad_ok = all(var in config for var in ad_vars)
+
+ if not rg_ok:
+ raise ApplicationError('Resource groups must be defined with: %s' % ', '.join(sorted(rg_vars)))
+
+ if not sp_ok and not ad_ok:
+ raise ApplicationError('Credentials must be defined using either:\nService Principal: %s\nActive Directory: %s' % (
+ ', '.join(sorted(sp_vars)), ', '.join(sorted(ad_vars))))
+
+ return config
diff --git a/test/lib/ansible_test/_internal/cloud/cloudscale.py b/test/lib/ansible_test/_internal/cloud/cloudscale.py
new file mode 100644
index 00000000..8e5885b2
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/cloudscale.py
@@ -0,0 +1,80 @@
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Gaudenz Steinlin <gaudenz.steinlin@cloudscale.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""Cloudscale plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..util import ConfigParser, display
+
+
+class CloudscaleCloudProvider(CloudProvider):
+ """Cloudscale cloud provider plugin. Sets up cloud resources before
+ delegation.
+ """
+
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(CloudscaleCloudProvider, self).__init__(args)
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ if os.path.isfile(self.config_static_path):
+ return
+
+ super(CloudscaleCloudProvider, self).filter(targets, exclude)
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(CloudscaleCloudProvider, self).setup()
+
+ if os.path.isfile(self.config_static_path):
+ display.info('Using existing %s cloud config: %s'
+ % (self.platform, self.config_static_path),
+ verbosity=1)
+ self.config_path = self.config_static_path
+ self.managed = False
+
+
+class CloudscaleCloudEnvironment(CloudEnvironment):
+ """Cloudscale cloud environment plugin. Updates integration test environment
+ after delegation.
+ """
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ parser = ConfigParser()
+ parser.read(self.config_path)
+
+ env_vars = dict(
+ CLOUDSCALE_API_TOKEN=parser.get('default', 'cloudscale_api_token'),
+ )
+
+ display.sensitive.add(env_vars['CLOUDSCALE_API_TOKEN'])
+
+ ansible_vars = dict(
+ cloudscale_resource_prefix=self.resource_prefix,
+ )
+
+ ansible_vars.update(dict((key.lower(), value) for key, value in env_vars.items()))
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/cs.py b/test/lib/ansible_test/_internal/cloud/cs.py
new file mode 100644
index 00000000..d028d9c4
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/cs.py
@@ -0,0 +1,300 @@
+"""CloudStack plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import re
+import time
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..util import (
+ find_executable,
+ ApplicationError,
+ display,
+ SubprocessError,
+ ConfigParser,
+)
+
+from ..http import (
+ HttpClient,
+ HttpError,
+ urlparse,
+)
+
+from ..docker_util import (
+ docker_run,
+ docker_rm,
+ docker_inspect,
+ docker_pull,
+ docker_network_inspect,
+ docker_exec,
+ get_docker_container_id,
+ get_docker_preferred_network_name,
+ get_docker_hostname,
+ is_docker_user_defined_network,
+)
+
+
+class CsCloudProvider(CloudProvider):
+ """CloudStack cloud provider plugin. Sets up cloud resources before delegation."""
+ DOCKER_SIMULATOR_NAME = 'cloudstack-sim'
+
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(CsCloudProvider, self).__init__(args)
+
+ # The simulator must be pinned to a specific version to guarantee CI passes with the version used.
+ self.image = 'quay.io/ansible/cloudstack-test-container:1.2.0'
+ self.container_name = ''
+ self.endpoint = ''
+ self.host = ''
+ self.port = 0
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ if os.path.isfile(self.config_static_path):
+ return
+
+ docker = find_executable('docker', required=False)
+
+ if docker:
+ return
+
+ skip = 'cloud/%s/' % self.platform
+ skipped = [target.name for target in targets if skip in target.aliases]
+
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which require the "docker" command or config (see "%s"): %s'
+ % (skip.rstrip('/'), self.config_template_path, ', '.join(skipped)))
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(CsCloudProvider, self).setup()
+
+ if self._use_static_config():
+ self._setup_static()
+ else:
+ self._setup_dynamic()
+
+ def get_remote_ssh_options(self):
+ """Get any additional options needed when delegating tests to a remote instance via SSH.
+ :rtype: list[str]
+ """
+ if self.managed:
+ return ['-R', '8888:%s:8888' % get_docker_hostname()]
+
+ return []
+
+ def get_docker_run_options(self):
+ """Get any additional options needed when delegating tests to a docker container.
+ :rtype: list[str]
+ """
+ network = get_docker_preferred_network_name(self.args)
+
+ if self.managed and not is_docker_user_defined_network(network):
+ return ['--link', self.DOCKER_SIMULATOR_NAME]
+
+ return []
+
+ def cleanup(self):
+ """Clean up the cloud resource and any temporary configuration files after tests complete."""
+ if self.container_name:
+ if self.ci_provider.code:
+ docker_rm(self.args, self.container_name)
+ elif not self.args.explain:
+ display.notice('Remember to run `docker rm -f %s` when finished testing.' % self.container_name)
+
+ super(CsCloudProvider, self).cleanup()
+
+ def _setup_static(self):
+ """Configure CloudStack tests for use with static configuration."""
+ parser = ConfigParser()
+ parser.read(self.config_static_path)
+
+ self.endpoint = parser.get('cloudstack', 'endpoint')
+
+ parts = urlparse(self.endpoint)
+
+ self.host = parts.hostname
+
+ if not self.host:
+ raise ApplicationError('Could not determine host from endpoint: %s' % self.endpoint)
+
+ if parts.port:
+ self.port = parts.port
+ elif parts.scheme == 'http':
+ self.port = 80
+ elif parts.scheme == 'https':
+ self.port = 443
+ else:
+ raise ApplicationError('Could not determine port from endpoint: %s' % self.endpoint)
+
+ display.info('Read cs host "%s" and port %d from config: %s' % (self.host, self.port, self.config_static_path), verbosity=1)
+
+ self._wait_for_service()
+
+ def _setup_dynamic(self):
+ """Create a CloudStack simulator using docker."""
+ config = self._read_config_template()
+
+ self.container_name = self.DOCKER_SIMULATOR_NAME
+
+ results = docker_inspect(self.args, self.container_name)
+
+ if results and not results[0]['State']['Running']:
+ docker_rm(self.args, self.container_name)
+ results = []
+
+ if results:
+ display.info('Using the existing CloudStack simulator docker container.', verbosity=1)
+ else:
+ display.info('Starting a new CloudStack simulator docker container.', verbosity=1)
+ docker_pull(self.args, self.image)
+ docker_run(self.args, self.image, ['-d', '-p', '8888:8888', '--name', self.container_name])
+
+ # apply work-around for OverlayFS issue
+ # https://github.com/docker/for-linux/issues/72#issuecomment-319904698
+ docker_exec(self.args, self.container_name, ['find', '/var/lib/mysql', '-type', 'f', '-exec', 'touch', '{}', ';'])
+
+ if not self.args.explain:
+ display.notice('The CloudStack simulator will probably be ready in 2 - 4 minutes.')
+
+ container_id = get_docker_container_id()
+
+ if container_id:
+ self.host = self._get_simulator_address()
+ display.info('Found CloudStack simulator container address: %s' % self.host, verbosity=1)
+ else:
+ self.host = get_docker_hostname()
+
+ self.port = 8888
+ self.endpoint = 'http://%s:%d' % (self.host, self.port)
+
+ self._wait_for_service()
+
+ if self.args.explain:
+ values = dict(
+ HOST=self.host,
+ PORT=str(self.port),
+ )
+ else:
+ credentials = self._get_credentials()
+
+ if self.args.docker:
+ host = self.DOCKER_SIMULATOR_NAME
+ elif self.args.remote:
+ host = 'localhost'
+ else:
+ host = self.host
+
+ values = dict(
+ HOST=host,
+ PORT=str(self.port),
+ KEY=credentials['apikey'],
+ SECRET=credentials['secretkey'],
+ )
+
+ display.sensitive.add(values['SECRET'])
+
+ config = self._populate_config_template(config, values)
+
+ self._write_config(config)
+
+ def _get_simulator_address(self):
+ current_network = get_docker_preferred_network_name(self.args)
+ networks = docker_network_inspect(self.args, current_network)
+
+ try:
+ network = [network for network in networks if network['Name'] == current_network][0]
+ containers = network['Containers']
+ container = [containers[container] for container in containers if containers[container]['Name'] == self.DOCKER_SIMULATOR_NAME][0]
+ return re.sub(r'/[0-9]+$', '', container['IPv4Address'])
+ except Exception:
+ display.error('Failed to process the following docker network inspect output:\n%s' %
+ json.dumps(networks, indent=4, sort_keys=True))
+ raise
+
+ def _wait_for_service(self):
+ """Wait for the CloudStack service endpoint to accept connections."""
+ if self.args.explain:
+ return
+
+ client = HttpClient(self.args, always=True)
+ endpoint = self.endpoint
+
+ for _iteration in range(1, 30):
+ display.info('Waiting for CloudStack service: %s' % endpoint, verbosity=1)
+
+ try:
+ client.get(endpoint)
+ return
+ except SubprocessError:
+ pass
+
+ time.sleep(10)
+
+ raise ApplicationError('Timeout waiting for CloudStack service.')
+
+ def _get_credentials(self):
+ """Wait for the CloudStack simulator to return credentials.
+ :rtype: dict[str, str]
+ """
+ client = HttpClient(self.args, always=True)
+ endpoint = '%s/admin.json' % self.endpoint
+
+ for _iteration in range(1, 30):
+ display.info('Waiting for CloudStack credentials: %s' % endpoint, verbosity=1)
+
+ response = client.get(endpoint)
+
+ if response.status_code == 200:
+ try:
+ return response.json()
+ except HttpError as ex:
+ display.error(ex)
+
+ time.sleep(10)
+
+ raise ApplicationError('Timeout waiting for CloudStack credentials.')
+
+
+class CsCloudEnvironment(CloudEnvironment):
+ """CloudStack cloud environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ parser = ConfigParser()
+ parser.read(self.config_path)
+
+ config = dict(parser.items('default'))
+
+ env_vars = dict(
+ CLOUDSTACK_ENDPOINT=config['endpoint'],
+ CLOUDSTACK_KEY=config['key'],
+ CLOUDSTACK_SECRET=config['secret'],
+ CLOUDSTACK_TIMEOUT=config['timeout'],
+ )
+
+ display.sensitive.add(env_vars['CLOUDSTACK_SECRET'])
+
+ ansible_vars = dict(
+ cs_resource_prefix=self.resource_prefix,
+ )
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/fallaxy.py b/test/lib/ansible_test/_internal/cloud/fallaxy.py
new file mode 100644
index 00000000..504094bd
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/fallaxy.py
@@ -0,0 +1,177 @@
+"""Fallaxy (ansible-galaxy) plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import uuid
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..util import (
+ find_executable,
+ display,
+)
+
+from ..docker_util import (
+ docker_run,
+ docker_rm,
+ docker_inspect,
+ docker_pull,
+ get_docker_container_id,
+)
+
+
+class FallaxyProvider(CloudProvider):
+ """Fallaxy plugin.
+
+ Sets up Fallaxy (ansible-galaxy) stub server for tests.
+
+ It's source source itself resides at: https://github.com/ansible/fallaxy-test-container
+ """
+
+ DOCKER_SIMULATOR_NAME = 'fallaxy-stub'
+
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(FallaxyProvider, self).__init__(args)
+
+ if os.environ.get('ANSIBLE_FALLAXY_CONTAINER'):
+ self.image = os.environ.get('ANSIBLE_FALLAXY_CONTAINER')
+ else:
+ self.image = 'quay.io/ansible/fallaxy-test-container:2.0.1'
+ self.container_name = ''
+
+ def filter(self, targets, exclude):
+ """Filter out the tests with the necessary config and res unavailable.
+
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ docker_cmd = 'docker'
+ docker = find_executable(docker_cmd, required=False)
+
+ if docker:
+ return
+
+ skip = 'cloud/%s/' % self.platform
+ skipped = [target.name for target in targets if skip in target.aliases]
+
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which require the "%s" command: %s'
+ % (skip.rstrip('/'), docker_cmd, ', '.join(skipped)))
+
+ def setup(self):
+ """Setup cloud resource before delegation and reg cleanup callback."""
+ super(FallaxyProvider, self).setup()
+
+ if self._use_static_config():
+ self._setup_static()
+ else:
+ self._setup_dynamic()
+
+ def get_docker_run_options(self):
+ """Get additional options needed when delegating tests to a container.
+
+ :rtype: list[str]
+ """
+ return ['--link', self.DOCKER_SIMULATOR_NAME] if self.managed else []
+
+ def cleanup(self):
+ """Clean up the resource and temporary configs files after tests."""
+ if self.container_name:
+ docker_rm(self.args, self.container_name)
+
+ super(FallaxyProvider, self).cleanup()
+
+ def _setup_dynamic(self):
+ container_id = get_docker_container_id()
+
+ if container_id:
+ display.info('Running in docker container: %s' % container_id, verbosity=1)
+
+ self.container_name = self.DOCKER_SIMULATOR_NAME
+
+ results = docker_inspect(self.args, self.container_name)
+
+ if results and not results[0].get('State', {}).get('Running'):
+ docker_rm(self.args, self.container_name)
+ results = []
+
+ display.info('%s Fallaxy simulator docker container.'
+ % ('Using the existing' if results else 'Starting a new'),
+ verbosity=1)
+
+ fallaxy_port = 8080
+ fallaxy_token = str(uuid.uuid4()).replace('-', '')
+
+ if not results:
+ if self.args.docker or container_id:
+ publish_ports = []
+ else:
+ # publish the simulator ports when not running inside docker
+ publish_ports = [
+ '-p', ':'.join((str(fallaxy_port),) * 2),
+ ]
+
+ if not os.environ.get('ANSIBLE_FALLAXY_CONTAINER'):
+ docker_pull(self.args, self.image)
+
+ docker_run(
+ self.args,
+ self.image,
+ ['-d', '--name', self.container_name, '-e', 'FALLAXY_TOKEN=%s' % fallaxy_token] + publish_ports,
+ )
+
+ if self.args.docker:
+ fallaxy_host = self.DOCKER_SIMULATOR_NAME
+ elif container_id:
+ fallaxy_host = self._get_simulator_address()
+ display.info('Found Fallaxy simulator container address: %s' % fallaxy_host, verbosity=1)
+ else:
+ fallaxy_host = 'localhost'
+
+ self._set_cloud_config('FALLAXY_HOST', fallaxy_host)
+ self._set_cloud_config('FALLAXY_PORT', str(fallaxy_port))
+ self._set_cloud_config('FALLAXY_TOKEN', fallaxy_token)
+
+ def _get_simulator_address(self):
+ results = docker_inspect(self.args, self.container_name)
+ ipaddress = results[0]['NetworkSettings']['IPAddress']
+ return ipaddress
+
+ def _setup_static(self):
+ raise NotImplementedError()
+
+
+class FallaxyEnvironment(CloudEnvironment):
+ """Fallaxy environment plugin.
+
+ Updates integration test environment after delegation.
+ """
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ fallaxy_token = self._get_cloud_config('FALLAXY_TOKEN')
+ fallaxy_host = self._get_cloud_config('FALLAXY_HOST')
+ fallaxy_port = self._get_cloud_config('FALLAXY_PORT')
+
+ return CloudEnvironmentConfig(
+ ansible_vars=dict(
+ fallaxy_token=fallaxy_token,
+ fallaxy_galaxy_server='http://%s:%s/api/' % (fallaxy_host, fallaxy_port),
+ fallaxy_ah_server='http://%s:%s/api/automation-hub/' % (fallaxy_host, fallaxy_port),
+ ),
+ env_vars=dict(
+ FALLAXY_TOKEN=fallaxy_token,
+ FALLAXY_GALAXY_SERVER='http://%s:%s/api/' % (fallaxy_host, fallaxy_port),
+ FALLAXY_AH_SERVER='http://%s:%s/api/automation-hub/' % (fallaxy_host, fallaxy_port),
+ ),
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/foreman.py b/test/lib/ansible_test/_internal/cloud/foreman.py
new file mode 100644
index 00000000..7517f1f6
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/foreman.py
@@ -0,0 +1,191 @@
+"""Foreman plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..util import (
+ find_executable,
+ display,
+)
+
+from ..docker_util import (
+ docker_run,
+ docker_rm,
+ docker_inspect,
+ docker_pull,
+ get_docker_container_id,
+ get_docker_hostname,
+ get_docker_container_ip,
+ get_docker_preferred_network_name,
+ is_docker_user_defined_network,
+)
+
+
+class ForemanProvider(CloudProvider):
+ """Foreman plugin.
+
+ Sets up Foreman stub server for tests.
+ """
+
+ DOCKER_SIMULATOR_NAME = 'foreman-stub'
+
+ DOCKER_IMAGE = 'quay.io/ansible/foreman-test-container:1.4.0'
+ """Default image to run Foreman stub from.
+
+ The simulator must be pinned to a specific version
+ to guarantee CI passes with the version used.
+
+ It's source source itself resides at:
+ https://github.com/ansible/foreman-test-container
+ """
+
+ def __init__(self, args):
+ """Set up container references for provider.
+
+ :type args: TestConfig
+ """
+ super(ForemanProvider, self).__init__(args)
+
+ self.__container_from_env = os.environ.get('ANSIBLE_FRMNSIM_CONTAINER')
+ """Overrides target container, might be used for development.
+
+ Use ANSIBLE_FRMNSIM_CONTAINER=whatever_you_want if you want
+ to use other image. Omit/empty otherwise.
+ """
+
+ self.image = self.__container_from_env or self.DOCKER_IMAGE
+ self.container_name = ''
+
+ def filter(self, targets, exclude):
+ """Filter out the tests with the necessary config and res unavailable.
+
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ docker_cmd = 'docker'
+ docker = find_executable(docker_cmd, required=False)
+
+ if docker:
+ return
+
+ skip = 'cloud/%s/' % self.platform
+ skipped = [target.name for target in targets if skip in target.aliases]
+
+ if skipped:
+ exclude.append(skip)
+ display.warning(
+ 'Excluding tests marked "%s" '
+ 'which require the "%s" command: %s'
+ % (skip.rstrip('/'), docker_cmd, ', '.join(skipped))
+ )
+
+ def setup(self):
+ """Setup cloud resource before delegation and reg cleanup callback."""
+ super(ForemanProvider, self).setup()
+
+ if self._use_static_config():
+ self._setup_static()
+ else:
+ self._setup_dynamic()
+
+ def get_docker_run_options(self):
+ """Get additional options needed when delegating tests to a container.
+
+ :rtype: list[str]
+ """
+ network = get_docker_preferred_network_name(self.args)
+
+ if self.managed and not is_docker_user_defined_network(network):
+ return ['--link', self.DOCKER_SIMULATOR_NAME]
+
+ return []
+
+ def cleanup(self):
+ """Clean up the resource and temporary configs files after tests."""
+ if self.container_name:
+ docker_rm(self.args, self.container_name)
+
+ super(ForemanProvider, self).cleanup()
+
+ def _setup_dynamic(self):
+ """Spawn a Foreman stub within docker container."""
+ foreman_port = 8080
+ container_id = get_docker_container_id()
+
+ self.container_name = self.DOCKER_SIMULATOR_NAME
+
+ results = docker_inspect(self.args, self.container_name)
+
+ if results and not results[0].get('State', {}).get('Running'):
+ docker_rm(self.args, self.container_name)
+ results = []
+
+ display.info(
+ '%s Foreman simulator docker container.'
+ % ('Using the existing' if results else 'Starting a new'),
+ verbosity=1,
+ )
+
+ if not results:
+ if self.args.docker or container_id:
+ publish_ports = []
+ else:
+ # publish the simulator ports when not running inside docker
+ publish_ports = [
+ '-p', ':'.join((str(foreman_port), ) * 2),
+ ]
+
+ if not self.__container_from_env:
+ docker_pull(self.args, self.image)
+
+ docker_run(
+ self.args,
+ self.image,
+ ['-d', '--name', self.container_name] + publish_ports,
+ )
+
+ if self.args.docker:
+ foreman_host = self.DOCKER_SIMULATOR_NAME
+ elif container_id:
+ foreman_host = self._get_simulator_address()
+ display.info(
+ 'Found Foreman simulator container address: %s'
+ % foreman_host, verbosity=1
+ )
+ else:
+ foreman_host = get_docker_hostname()
+
+ self._set_cloud_config('FOREMAN_HOST', foreman_host)
+ self._set_cloud_config('FOREMAN_PORT', str(foreman_port))
+
+ def _get_simulator_address(self):
+ return get_docker_container_ip(self.args, self.container_name)
+
+ def _setup_static(self):
+ raise NotImplementedError
+
+
+class ForemanEnvironment(CloudEnvironment):
+ """Foreman environment plugin.
+
+ Updates integration test environment after delegation.
+ """
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ env_vars = dict(
+ FOREMAN_HOST=self._get_cloud_config('FOREMAN_HOST'),
+ FOREMAN_PORT=self._get_cloud_config('FOREMAN_PORT'),
+ )
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/gcp.py b/test/lib/ansible_test/_internal/cloud/gcp.py
new file mode 100644
index 00000000..c8de1835
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/gcp.py
@@ -0,0 +1,62 @@
+# Copyright: (c) 2018, Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""GCP plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ..util import (
+ display,
+ ConfigParser,
+)
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+
+class GcpCloudProvider(CloudProvider):
+ """GCP cloud provider plugin. Sets up cloud resources before delegation."""
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+
+ if os.path.isfile(self.config_static_path):
+ return
+
+ super(GcpCloudProvider, self).filter(targets, exclude)
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(GcpCloudProvider, self).setup()
+
+ if not self._use_static_config():
+ display.notice(
+ 'static configuration could not be used. are you missing a template file?'
+ )
+
+
+class GcpCloudEnvironment(CloudEnvironment):
+ """GCP cloud environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ parser = ConfigParser()
+ parser.read(self.config_path)
+
+ ansible_vars = dict(
+ resource_prefix=self.resource_prefix,
+ )
+
+ ansible_vars.update(dict(parser.items('default')))
+
+ return CloudEnvironmentConfig(
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/hcloud.py b/test/lib/ansible_test/_internal/cloud/hcloud.py
new file mode 100644
index 00000000..5902b566
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/hcloud.py
@@ -0,0 +1,116 @@
+"""Hetzner Cloud plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ..util import (
+ display,
+ ConfigParser,
+)
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..core_ci import (
+ AnsibleCoreCI,
+)
+
+
+class HcloudCloudProvider(CloudProvider):
+ """Hetzner Cloud provider plugin. Sets up cloud resources before
+ delegation.
+ """
+
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(HcloudCloudProvider, self).__init__(args)
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ if os.path.isfile(self.config_static_path):
+ return
+
+ aci = self._create_ansible_core_ci()
+
+ if aci.available:
+ return
+
+ super(HcloudCloudProvider, self).filter(targets, exclude)
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(HcloudCloudProvider, self).setup()
+
+ if not self._use_static_config():
+ self._setup_dynamic()
+
+ def _setup_dynamic(self):
+ """Request Hetzner credentials through the Ansible Core CI service."""
+ display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
+
+ config = self._read_config_template()
+
+ aci = self._create_ansible_core_ci()
+
+ response = aci.start()
+
+ if not self.args.explain:
+ token = response['hetzner']['token']
+
+ display.sensitive.add(token)
+ display.info('Hetzner Cloud Token: %s' % token, verbosity=1)
+
+ values = dict(
+ TOKEN=token,
+ )
+
+ display.sensitive.add(values['TOKEN'])
+
+ config = self._populate_config_template(config, values)
+
+ self._write_config(config)
+
+ def _create_ansible_core_ci(self):
+ """
+ :rtype: AnsibleCoreCI
+ """
+ return AnsibleCoreCI(self.args, 'hetzner', 'hetzner', persist=False, stage=self.args.remote_stage, provider=self.args.remote_provider)
+
+
+class HcloudCloudEnvironment(CloudEnvironment):
+ """Hetzner Cloud cloud environment plugin. Updates integration test environment
+ after delegation.
+ """
+
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ parser = ConfigParser()
+ parser.read(self.config_path)
+
+ env_vars = dict(
+ HCLOUD_TOKEN=parser.get('default', 'hcloud_api_token'),
+ )
+
+ display.sensitive.add(env_vars['HCLOUD_TOKEN'])
+
+ ansible_vars = dict(
+ hcloud_prefix=self.resource_prefix,
+ )
+
+ ansible_vars.update(dict((key.lower(), value) for key, value in env_vars.items()))
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/nios.py b/test/lib/ansible_test/_internal/cloud/nios.py
new file mode 100644
index 00000000..b9a1a4e4
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/nios.py
@@ -0,0 +1,193 @@
+"""NIOS plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..util import (
+ find_executable,
+ display,
+)
+
+from ..docker_util import (
+ docker_run,
+ docker_rm,
+ docker_inspect,
+ docker_pull,
+ get_docker_container_id,
+ get_docker_hostname,
+ get_docker_container_ip,
+ get_docker_preferred_network_name,
+ is_docker_user_defined_network,
+)
+
+
+class NiosProvider(CloudProvider):
+ """Nios plugin.
+
+ Sets up NIOS mock server for tests.
+ """
+
+ DOCKER_SIMULATOR_NAME = 'nios-simulator'
+
+ DOCKER_IMAGE = 'quay.io/ansible/nios-test-container:1.3.0'
+ """Default image to run the nios simulator.
+
+ The simulator must be pinned to a specific version
+ to guarantee CI passes with the version used.
+
+ It's source source itself resides at:
+ https://github.com/ansible/nios-test-container
+ """
+
+ def __init__(self, args):
+ """Set up container references for provider.
+
+ :type args: TestConfig
+ """
+ super(NiosProvider, self).__init__(args)
+
+ self.__container_from_env = os.environ.get('ANSIBLE_NIOSSIM_CONTAINER')
+ """Overrides target container, might be used for development.
+
+ Use ANSIBLE_NIOSSIM_CONTAINER=whatever_you_want if you want
+ to use other image. Omit/empty otherwise.
+ """
+
+ self.image = self.__container_from_env or self.DOCKER_IMAGE
+ self.container_name = ''
+
+ def filter(self, targets, exclude):
+ """Filter out the tests with the necessary config and res unavailable.
+
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ docker_cmd = 'docker'
+ docker = find_executable(docker_cmd, required=False)
+
+ if docker:
+ return
+
+ skip = 'cloud/%s/' % self.platform
+ skipped = [target.name for target in targets if skip in target.aliases]
+
+ if skipped:
+ exclude.append(skip)
+ display.warning(
+ 'Excluding tests marked "%s" '
+ 'which require the "%s" command: %s'
+ % (skip.rstrip('/'), docker_cmd, ', '.join(skipped))
+ )
+
+ def setup(self):
+ """Setup cloud resource before delegation and reg cleanup callback."""
+ super(NiosProvider, self).setup()
+
+ if self._use_static_config():
+ self._setup_static()
+ else:
+ self._setup_dynamic()
+
+ def get_docker_run_options(self):
+ """Get additional options needed when delegating tests to a container.
+
+ :rtype: list[str]
+ """
+ network = get_docker_preferred_network_name(self.args)
+
+ if self.managed and not is_docker_user_defined_network(network):
+ return ['--link', self.DOCKER_SIMULATOR_NAME]
+
+ return []
+
+ def cleanup(self):
+ """Clean up the resource and temporary configs files after tests."""
+ if self.container_name:
+ docker_rm(self.args, self.container_name)
+
+ super(NiosProvider, self).cleanup()
+
+ def _setup_dynamic(self):
+ """Spawn a NIOS simulator within docker container."""
+ nios_port = 443
+ container_id = get_docker_container_id()
+
+ self.container_name = self.DOCKER_SIMULATOR_NAME
+
+ results = docker_inspect(self.args, self.container_name)
+
+ if results and not results[0].get('State', {}).get('Running'):
+ docker_rm(self.args, self.container_name)
+ results = []
+
+ display.info(
+ '%s NIOS simulator docker container.'
+ % ('Using the existing' if results else 'Starting a new'),
+ verbosity=1,
+ )
+
+ if not results:
+ if self.args.docker or container_id:
+ publish_ports = []
+ else:
+ # publish the simulator ports when not running inside docker
+ publish_ports = [
+ '-p', ':'.join((str(nios_port), ) * 2),
+ ]
+
+ if not self.__container_from_env:
+ docker_pull(self.args, self.image)
+
+ docker_run(
+ self.args,
+ self.image,
+ ['-d', '--name', self.container_name] + publish_ports,
+ )
+
+ if self.args.docker:
+ nios_host = self.DOCKER_SIMULATOR_NAME
+ elif container_id:
+ nios_host = self._get_simulator_address()
+ display.info(
+ 'Found NIOS simulator container address: %s'
+ % nios_host, verbosity=1
+ )
+ else:
+ nios_host = get_docker_hostname()
+
+ self._set_cloud_config('NIOS_HOST', nios_host)
+
+ def _get_simulator_address(self):
+ return get_docker_container_ip(self.args, self.container_name)
+
+ def _setup_static(self):
+ raise NotImplementedError
+
+
+class NiosEnvironment(CloudEnvironment):
+ """NIOS environment plugin.
+
+ Updates integration test environment after delegation.
+ """
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ ansible_vars = dict(
+ nios_provider=dict(
+ host=self._get_cloud_config('NIOS_HOST'),
+ username='admin',
+ password='infoblox',
+ ),
+ )
+
+ return CloudEnvironmentConfig(
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/opennebula.py b/test/lib/ansible_test/_internal/cloud/opennebula.py
new file mode 100644
index 00000000..559093e3
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/opennebula.py
@@ -0,0 +1,66 @@
+"""OpenNebula plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..util import (
+ display,
+ ConfigParser,
+)
+
+
+class OpenNebulaCloudProvider(CloudProvider):
+ """Checks if a configuration file has been passed or fixtures are going to be used for testing"""
+
+ def filter(self, targets, exclude):
+ """ no need to filter modules, they can either run from config file or from fixtures"""
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(OpenNebulaCloudProvider, self).setup()
+
+ if not self._use_static_config():
+ self._setup_dynamic()
+
+ def _setup_dynamic(self):
+ display.info('No config file provided, will run test from fixtures')
+
+ config = self._read_config_template()
+ values = dict(
+ URL="http://localhost/RPC2",
+ USERNAME='oneadmin',
+ PASSWORD='onepass',
+ FIXTURES='true',
+ REPLAY='true',
+ )
+ config = self._populate_config_template(config, values)
+ self._write_config(config)
+
+
+class OpenNebulaCloudEnvironment(CloudEnvironment):
+ """
+ Updates integration test environment after delegation. Will setup the config file as parameter.
+ """
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ parser = ConfigParser()
+ parser.read(self.config_path)
+
+ ansible_vars = dict(
+ resource_prefix=self.resource_prefix,
+ )
+
+ ansible_vars.update(dict(parser.items('default')))
+
+ display.sensitive.add(ansible_vars.get('opennebula_password'))
+
+ return CloudEnvironmentConfig(
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/openshift.py b/test/lib/ansible_test/_internal/cloud/openshift.py
new file mode 100644
index 00000000..450816bf
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/openshift.py
@@ -0,0 +1,236 @@
+"""OpenShift plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import re
+import time
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..io import (
+ read_text_file,
+)
+
+from ..util import (
+ find_executable,
+ ApplicationError,
+ display,
+ SubprocessError,
+)
+
+from ..http import (
+ HttpClient,
+)
+
+from ..docker_util import (
+ docker_exec,
+ docker_run,
+ docker_rm,
+ docker_inspect,
+ docker_pull,
+ docker_network_inspect,
+ get_docker_container_id,
+ get_docker_preferred_network_name,
+ get_docker_hostname,
+ is_docker_user_defined_network,
+)
+
+
+class OpenShiftCloudProvider(CloudProvider):
+ """OpenShift cloud provider plugin. Sets up cloud resources before delegation."""
+ DOCKER_CONTAINER_NAME = 'openshift-origin'
+
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(OpenShiftCloudProvider, self).__init__(args, config_extension='.kubeconfig')
+
+ # The image must be pinned to a specific version to guarantee CI passes with the version used.
+ self.image = 'openshift/origin:v3.9.0'
+ self.container_name = ''
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ if os.path.isfile(self.config_static_path):
+ return
+
+ docker = find_executable('docker', required=False)
+
+ if docker:
+ return
+
+ skip = 'cloud/%s/' % self.platform
+ skipped = [target.name for target in targets if skip in target.aliases]
+
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which require the "docker" command or config (see "%s"): %s'
+ % (skip.rstrip('/'), self.config_template_path, ', '.join(skipped)))
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(OpenShiftCloudProvider, self).setup()
+
+ if self._use_static_config():
+ self._setup_static()
+ else:
+ self._setup_dynamic()
+
+ def get_remote_ssh_options(self):
+ """Get any additional options needed when delegating tests to a remote instance via SSH.
+ :rtype: list[str]
+ """
+ if self.managed:
+ return ['-R', '8443:%s:8443' % get_docker_hostname()]
+
+ return []
+
+ def get_docker_run_options(self):
+ """Get any additional options needed when delegating tests to a docker container.
+ :rtype: list[str]
+ """
+ network = get_docker_preferred_network_name(self.args)
+
+ if self.managed and not is_docker_user_defined_network(network):
+ return ['--link', self.DOCKER_CONTAINER_NAME]
+
+ return []
+
+ def cleanup(self):
+ """Clean up the cloud resource and any temporary configuration files after tests complete."""
+ if self.container_name:
+ docker_rm(self.args, self.container_name)
+
+ super(OpenShiftCloudProvider, self).cleanup()
+
+ def _setup_static(self):
+ """Configure OpenShift tests for use with static configuration."""
+ config = read_text_file(self.config_static_path)
+
+ match = re.search(r'^ *server: (?P<server>.*)$', config, flags=re.MULTILINE)
+
+ if match:
+ endpoint = match.group('server')
+ self._wait_for_service(endpoint)
+ else:
+ display.warning('Could not find OpenShift endpoint in kubeconfig. Skipping check for OpenShift service availability.')
+
+ def _setup_dynamic(self):
+ """Create a OpenShift container using docker."""
+ self.container_name = self.DOCKER_CONTAINER_NAME
+
+ results = docker_inspect(self.args, self.container_name)
+
+ if results and not results[0]['State']['Running']:
+ docker_rm(self.args, self.container_name)
+ results = []
+
+ if results:
+ display.info('Using the existing OpenShift docker container.', verbosity=1)
+ else:
+ display.info('Starting a new OpenShift docker container.', verbosity=1)
+ docker_pull(self.args, self.image)
+ cmd = ['start', 'master', '--listen', 'https://0.0.0.0:8443']
+ docker_run(self.args, self.image, ['-d', '-p', '8443:8443', '--name', self.container_name], cmd)
+
+ container_id = get_docker_container_id()
+
+ if container_id:
+ host = self._get_container_address()
+ display.info('Found OpenShift container address: %s' % host, verbosity=1)
+ else:
+ host = get_docker_hostname()
+
+ port = 8443
+ endpoint = 'https://%s:%s/' % (host, port)
+
+ self._wait_for_service(endpoint)
+
+ if self.args.explain:
+ config = '# Unknown'
+ else:
+ if self.args.docker:
+ host = self.DOCKER_CONTAINER_NAME
+ elif self.args.remote:
+ host = 'localhost'
+
+ server = 'https://%s:%s' % (host, port)
+ config = self._get_config(server)
+
+ self._write_config(config)
+
+ def _get_container_address(self):
+ current_network = get_docker_preferred_network_name(self.args)
+ networks = docker_network_inspect(self.args, current_network)
+
+ try:
+ network = [network for network in networks if network['Name'] == current_network][0]
+ containers = network['Containers']
+ container = [containers[container] for container in containers if containers[container]['Name'] == self.DOCKER_CONTAINER_NAME][0]
+ return re.sub(r'/[0-9]+$', '', container['IPv4Address'])
+ except Exception:
+ display.error('Failed to process the following docker network inspect output:\n%s' %
+ json.dumps(networks, indent=4, sort_keys=True))
+ raise
+
+ def _wait_for_service(self, endpoint):
+ """Wait for the OpenShift service endpoint to accept connections.
+ :type endpoint: str
+ """
+ if self.args.explain:
+ return
+
+ client = HttpClient(self.args, always=True, insecure=True)
+
+ for dummy in range(1, 30):
+ display.info('Waiting for OpenShift service: %s' % endpoint, verbosity=1)
+
+ try:
+ client.get(endpoint)
+ return
+ except SubprocessError:
+ pass
+
+ time.sleep(10)
+
+ raise ApplicationError('Timeout waiting for OpenShift service.')
+
+ def _get_config(self, server):
+ """Get OpenShift config from container.
+ :type server: str
+ :rtype: dict[str, str]
+ """
+ cmd = ['cat', '/var/lib/origin/openshift.local.config/master/admin.kubeconfig']
+
+ stdout, dummy = docker_exec(self.args, self.container_name, cmd, capture=True)
+
+ config = stdout
+ config = re.sub(r'^( *)certificate-authority-data: .*$', r'\1insecure-skip-tls-verify: true', config, flags=re.MULTILINE)
+ config = re.sub(r'^( *)server: .*$', r'\1server: %s' % server, config, flags=re.MULTILINE)
+
+ return config
+
+
+class OpenShiftCloudEnvironment(CloudEnvironment):
+ """OpenShift cloud environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ env_vars = dict(
+ K8S_AUTH_KUBECONFIG=self.config_path,
+ )
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/scaleway.py b/test/lib/ansible_test/_internal/cloud/scaleway.py
new file mode 100644
index 00000000..22abe197
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/scaleway.py
@@ -0,0 +1,72 @@
+"""Scaleway plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..util import (
+ ConfigParser,
+ display,
+)
+
+
+class ScalewayCloudProvider(CloudProvider):
+ """Checks if a configuration file has been passed or fixtures are going to be used for testing"""
+
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(ScalewayCloudProvider, self).__init__(args)
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ if os.path.isfile(self.config_static_path):
+ return
+
+ super(ScalewayCloudProvider, self).filter(targets, exclude)
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(ScalewayCloudProvider, self).setup()
+
+ if os.path.isfile(self.config_static_path):
+ self.config_path = self.config_static_path
+ self.managed = False
+
+
+class ScalewayCloudEnvironment(CloudEnvironment):
+ """
+ Updates integration test environment after delegation. Will setup the config file as parameter.
+ """
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ parser = ConfigParser()
+ parser.read(self.config_path)
+
+ env_vars = dict(
+ SCW_API_KEY=parser.get('default', 'key'),
+ SCW_ORG=parser.get('default', 'org')
+ )
+
+ display.sensitive.add(env_vars['SCW_API_KEY'])
+
+ ansible_vars = dict(
+ scw_org=parser.get('default', 'org'),
+ )
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/tower.py b/test/lib/ansible_test/_internal/cloud/tower.py
new file mode 100644
index 00000000..227d170c
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/tower.py
@@ -0,0 +1,255 @@
+"""Tower plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import time
+
+from ..util import (
+ display,
+ ApplicationError,
+ SubprocessError,
+ ConfigParser,
+)
+
+from ..util_common import (
+ run_command,
+)
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..core_ci import (
+ AnsibleCoreCI,
+)
+
+
+class TowerCloudProvider(CloudProvider):
+ """Tower cloud provider plugin. Sets up cloud resources before delegation."""
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(TowerCloudProvider, self).__init__(args)
+
+ self.aci = None
+ self.version = ''
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ if os.path.isfile(self.config_static_path):
+ return
+
+ aci = get_tower_aci(self.args)
+
+ if aci.available:
+ return
+
+ super(TowerCloudProvider, self).filter(targets, exclude)
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(TowerCloudProvider, self).setup()
+
+ if self._use_static_config():
+ self._setup_static()
+ else:
+ self._setup_dynamic()
+
+ def check_tower_version(self, fallback=None):
+ """Check the Tower version being tested and determine the correct CLI version to use.
+ :type fallback: str | None
+ """
+ tower_cli_version_map = {
+ '3.1.5': '3.1.8',
+ '3.2.3': '3.3.0',
+ '3.3.5': '3.3.3',
+ '3.4.3': '3.3.3',
+ '3.6.3': '3.3.8',
+ }
+
+ cli_version = tower_cli_version_map.get(self.version, fallback)
+
+ if not cli_version:
+ raise ApplicationError('Mapping to ansible-tower-cli version required for Tower version: %s' % self.version)
+
+ self._set_cloud_config('tower_cli_version', cli_version)
+
+ def cleanup(self):
+ """Clean up the cloud resource and any temporary configuration files after tests complete."""
+ # cleanup on success or failure is not yet supported due to how cleanup is called
+ if self.aci and self.args.remote_terminate == 'always':
+ self.aci.stop()
+
+ super(TowerCloudProvider, self).cleanup()
+
+ def _setup_static(self):
+ config = TowerConfig.parse(self.config_static_path)
+
+ self.version = config.version
+ self.check_tower_version()
+
+ def _setup_dynamic(self):
+ """Request Tower credentials through the Ansible Core CI service."""
+ display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
+
+ # temporary solution to allow version selection
+ self.version = os.environ.get('TOWER_VERSION', '3.6.3')
+ self.check_tower_version(os.environ.get('TOWER_CLI_VERSION'))
+
+ aci = get_tower_aci(self.args, self.version)
+ aci.start()
+ aci.wait()
+
+ connection = aci.get()
+
+ config = self._read_config_template()
+
+ if not self.args.explain:
+ self.aci = aci
+
+ values = dict(
+ VERSION=self.version,
+ HOST=connection.hostname,
+ USERNAME=connection.username,
+ PASSWORD=connection.password,
+ )
+
+ display.sensitive.add(values['PASSWORD'])
+
+ config = self._populate_config_template(config, values)
+
+ self._write_config(config)
+
+
+class TowerCloudEnvironment(CloudEnvironment):
+ """Tower cloud environment plugin. Updates integration test environment after delegation."""
+ def setup(self):
+ """Setup which should be done once per environment instead of once per test target."""
+ self.setup_cli()
+ self.disable_pendo()
+
+ def setup_cli(self):
+ """Install the correct Tower CLI for the version of Tower being tested."""
+ tower_cli_version = self._get_cloud_config('tower_cli_version')
+
+ display.info('Installing Tower CLI version: %s' % tower_cli_version)
+
+ cmd = self.args.pip_command + ['install', '--disable-pip-version-check', 'ansible-tower-cli==%s' % tower_cli_version]
+
+ run_command(self.args, cmd)
+
+ cmd = ['tower-cli', 'config', 'verify_ssl', 'false']
+ run_command(self.args, cmd, capture=True)
+
+ def disable_pendo(self):
+ """Disable Pendo tracking."""
+ display.info('Disable Pendo tracking')
+
+ config = TowerConfig.parse(self.config_path)
+
+ # tower-cli does not recognize TOWER_ environment variables
+ cmd = ['tower-cli', 'setting', 'modify', 'PENDO_TRACKING_STATE', 'off',
+ '-h', config.host, '-u', config.username, '-p', config.password]
+
+ attempts = 60
+
+ while True:
+ attempts -= 1
+
+ try:
+ run_command(self.args, cmd, capture=True)
+ return
+ except SubprocessError as ex:
+ if not attempts:
+ raise ApplicationError('Timed out trying to disable Pendo tracking:\n%s' % ex)
+
+ time.sleep(5)
+
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ config = TowerConfig.parse(self.config_path)
+
+ env_vars = config.environment
+
+ ansible_vars = dict((key.lower(), value) for key, value in env_vars.items())
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ )
+
+
+class TowerConfig:
+ """Tower settings."""
+ def __init__(self, values):
+ self.version = values.get('version')
+ self.host = values.get('host')
+ self.username = values.get('username')
+ self.password = values.get('password')
+
+ if self.password:
+ display.sensitive.add(self.password)
+
+ @property
+ def environment(self):
+ """Tower settings as environment variables.
+ :rtype: dict[str, str]
+ """
+ env = dict(
+ TOWER_VERSION=self.version,
+ TOWER_HOST=self.host,
+ TOWER_USERNAME=self.username,
+ TOWER_PASSWORD=self.password,
+ )
+
+ return env
+
+ @staticmethod
+ def parse(path):
+ """
+ :type path: str
+ :rtype: TowerConfig
+ """
+ parser = ConfigParser()
+ parser.read(path)
+
+ keys = (
+ 'version',
+ 'host',
+ 'username',
+ 'password',
+ )
+
+ values = dict((k, parser.get('default', k)) for k in keys)
+ config = TowerConfig(values)
+
+ missing = [k for k in keys if not values.get(k)]
+
+ if missing:
+ raise ApplicationError('Missing or empty Tower configuration value(s): %s' % ', '.join(missing))
+
+ return config
+
+
+def get_tower_aci(args, version=None):
+ """
+ :type args: EnvironmentConfig
+ :type version: str | None
+ :rtype: AnsibleCoreCI
+ """
+ if version:
+ persist = True
+ else:
+ version = ''
+ persist = False
+
+ return AnsibleCoreCI(args, 'tower', version, persist=persist, stage=args.remote_stage, provider=args.remote_provider)
diff --git a/test/lib/ansible_test/_internal/cloud/vcenter.py b/test/lib/ansible_test/_internal/cloud/vcenter.py
new file mode 100644
index 00000000..3b38a19e
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/vcenter.py
@@ -0,0 +1,232 @@
+"""VMware vCenter plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..util import (
+ find_executable,
+ display,
+ ConfigParser,
+ ApplicationError,
+)
+
+from ..docker_util import (
+ docker_run,
+ docker_rm,
+ docker_inspect,
+ docker_pull,
+ get_docker_container_id,
+ get_docker_hostname,
+ get_docker_container_ip,
+ get_docker_preferred_network_name,
+ is_docker_user_defined_network,
+)
+
+
+class VcenterProvider(CloudProvider):
+ """VMware vcenter/esx plugin. Sets up cloud resources for tests."""
+ DOCKER_SIMULATOR_NAME = 'vcenter-simulator'
+
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(VcenterProvider, self).__init__(args)
+
+ # The simulator must be pinned to a specific version to guarantee CI passes with the version used.
+ if os.environ.get('ANSIBLE_VCSIM_CONTAINER'):
+ self.image = os.environ.get('ANSIBLE_VCSIM_CONTAINER')
+ else:
+ self.image = 'quay.io/ansible/vcenter-test-container:1.7.0'
+ self.container_name = ''
+
+ # VMware tests can be run on govcsim or BYO with a static config file.
+ # The simulator is the default if no config is provided.
+ self.vmware_test_platform = os.environ.get('VMWARE_TEST_PLATFORM', 'govcsim')
+ self.insecure = False
+ self.proxy = None
+ self.platform = 'vcenter'
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ if self.vmware_test_platform == 'govcsim' or (self.vmware_test_platform == '' and not os.path.isfile(self.config_static_path)):
+ docker = find_executable('docker', required=False)
+
+ if docker:
+ return
+
+ skip = 'cloud/%s/' % self.platform
+ skipped = [target.name for target in targets if skip in target.aliases]
+
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which require the "docker" command or config (see "%s"): %s'
+ % (skip.rstrip('/'), self.config_template_path, ', '.join(skipped)))
+ elif self.vmware_test_platform == 'static':
+ if os.path.isfile(self.config_static_path):
+ return
+
+ super(VcenterProvider, self).filter(targets, exclude)
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(VcenterProvider, self).setup()
+
+ self._set_cloud_config('vmware_test_platform', self.vmware_test_platform)
+ if self.vmware_test_platform == 'govcsim':
+ self._setup_dynamic_simulator()
+ self.managed = True
+ elif self.vmware_test_platform == 'static':
+ self._use_static_config()
+ self._setup_static()
+ else:
+ raise ApplicationError('Unknown vmware_test_platform: %s' % self.vmware_test_platform)
+
+ def get_docker_run_options(self):
+ """Get any additional options needed when delegating tests to a docker container.
+ :rtype: list[str]
+ """
+ network = get_docker_preferred_network_name(self.args)
+
+ if self.managed and not is_docker_user_defined_network(network):
+ return ['--link', self.DOCKER_SIMULATOR_NAME]
+
+ return []
+
+ def cleanup(self):
+ """Clean up the cloud resource and any temporary configuration files after tests complete."""
+ if self.container_name:
+ docker_rm(self.args, self.container_name)
+
+ super(VcenterProvider, self).cleanup()
+
+ def _setup_dynamic_simulator(self):
+ """Create a vcenter simulator using docker."""
+ container_id = get_docker_container_id()
+
+ self.container_name = self.DOCKER_SIMULATOR_NAME
+
+ results = docker_inspect(self.args, self.container_name)
+
+ if results and not results[0].get('State', {}).get('Running'):
+ docker_rm(self.args, self.container_name)
+ results = []
+
+ if results:
+ display.info('Using the existing vCenter simulator docker container.', verbosity=1)
+ else:
+ display.info('Starting a new vCenter simulator docker container.', verbosity=1)
+
+ if not self.args.docker and not container_id:
+ # publish the simulator ports when not running inside docker
+ publish_ports = [
+ '-p', '1443:443',
+ '-p', '8080:8080',
+ '-p', '8989:8989',
+ '-p', '5000:5000', # control port for flask app in simulator
+ ]
+ else:
+ publish_ports = []
+
+ if not os.environ.get('ANSIBLE_VCSIM_CONTAINER'):
+ docker_pull(self.args, self.image)
+
+ docker_run(
+ self.args,
+ self.image,
+ ['-d', '--name', self.container_name] + publish_ports,
+ )
+
+ if self.args.docker:
+ vcenter_hostname = self.DOCKER_SIMULATOR_NAME
+ elif container_id:
+ vcenter_hostname = self._get_simulator_address()
+ display.info('Found vCenter simulator container address: %s' % vcenter_hostname, verbosity=1)
+ else:
+ vcenter_hostname = get_docker_hostname()
+
+ self._set_cloud_config('vcenter_hostname', vcenter_hostname)
+
+ def _get_simulator_address(self):
+ return get_docker_container_ip(self.args, self.container_name)
+
+ def _setup_static(self):
+ if not os.path.exists(self.config_static_path):
+ raise ApplicationError('Configuration file does not exist: %s' % self.config_static_path)
+
+ parser = ConfigParser({
+ 'vcenter_port': '443',
+ 'vmware_proxy_host': '',
+ 'vmware_proxy_port': '8080'})
+ parser.read(self.config_static_path)
+
+ if parser.get('DEFAULT', 'vmware_validate_certs').lower() in ('no', 'false'):
+ self.insecure = True
+ proxy_host = parser.get('DEFAULT', 'vmware_proxy_host')
+ proxy_port = int(parser.get('DEFAULT', 'vmware_proxy_port'))
+ if proxy_host and proxy_port:
+ self.proxy = 'http://%s:%d' % (proxy_host, proxy_port)
+
+
+class VcenterEnvironment(CloudEnvironment):
+ """VMware vcenter/esx environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ try:
+ # We may be in a container, so we cannot just reach VMWARE_TEST_PLATFORM,
+ # We do a try/except instead
+ parser = ConfigParser()
+ parser.read(self.config_path) # static
+
+ env_vars = dict()
+ ansible_vars = dict(
+ resource_prefix=self.resource_prefix,
+ )
+ ansible_vars.update(dict(parser.items('DEFAULT', raw=True)))
+ except KeyError: # govcsim
+ env_vars = dict(
+ VCENTER_HOSTNAME=self._get_cloud_config('vcenter_hostname'),
+ VCENTER_USERNAME='user',
+ VCENTER_PASSWORD='pass',
+ )
+
+ ansible_vars = dict(
+ vcsim=self._get_cloud_config('vcenter_hostname'),
+ vcenter_hostname=self._get_cloud_config('vcenter_hostname'),
+ vcenter_username='user',
+ vcenter_password='pass',
+ )
+ # Shippable starts ansible-test from withing an existing container,
+ # and in this case, we don't have to change the vcenter port.
+ if not self.args.docker and not get_docker_container_id():
+ ansible_vars['vcenter_port'] = '1443'
+
+ for key, value in ansible_vars.items():
+ if key.endswith('_password'):
+ display.sensitive.add(value)
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ module_defaults={
+ 'group/vmware': {
+ 'hostname': ansible_vars['vcenter_hostname'],
+ 'username': ansible_vars['vcenter_username'],
+ 'password': ansible_vars['vcenter_password'],
+ 'port': ansible_vars.get('vcenter_port', '443'),
+ 'validate_certs': ansible_vars.get('vmware_validate_certs', 'no'),
+ },
+ },
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/vultr.py b/test/lib/ansible_test/_internal/cloud/vultr.py
new file mode 100644
index 00000000..ce6184f7
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/vultr.py
@@ -0,0 +1,71 @@
+"""Vultr plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..util import (
+ ConfigParser,
+ display,
+)
+
+
+class VultrCloudProvider(CloudProvider):
+ """Checks if a configuration file has been passed or fixtures are going to be used for testing"""
+
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(VultrCloudProvider, self).__init__(args)
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ if os.path.isfile(self.config_static_path):
+ return
+
+ super(VultrCloudProvider, self).filter(targets, exclude)
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(VultrCloudProvider, self).setup()
+
+ if os.path.isfile(self.config_static_path):
+ self.config_path = self.config_static_path
+ self.managed = False
+
+
+class VultrCloudEnvironment(CloudEnvironment):
+ """
+ Updates integration test environment after delegation. Will setup the config file as parameter.
+ """
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ parser = ConfigParser()
+ parser.read(self.config_path)
+
+ env_vars = dict(
+ VULTR_API_KEY=parser.get('default', 'key'),
+ )
+
+ display.sensitive.add(env_vars['VULTR_API_KEY'])
+
+ ansible_vars = dict(
+ vultr_resource_prefix=self.resource_prefix,
+ )
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/config.py b/test/lib/ansible_test/_internal/config.py
new file mode 100644
index 00000000..a3c31959
--- /dev/null
+++ b/test/lib/ansible_test/_internal/config.py
@@ -0,0 +1,356 @@
+"""Configuration classes."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+from . import types as t
+
+from .util import (
+ find_python,
+ generate_pip_command,
+ ApplicationError,
+)
+
+from .util_common import (
+ docker_qualify_image,
+ get_docker_completion,
+ get_remote_completion,
+ CommonConfig,
+)
+
+from .metadata import (
+ Metadata,
+)
+
+from .data import (
+ data_context,
+)
+
+try:
+ TIntegrationConfig = t.TypeVar('TIntegrationConfig', bound='IntegrationConfig')
+except AttributeError:
+ TIntegrationConfig = None # pylint: disable=invalid-name
+
+
+class ParsedRemote:
+ """A parsed version of a "remote" string."""
+ def __init__(self, arch, platform, version): # type: (t.Optional[str], str, str) -> None
+ self.arch = arch
+ self.platform = platform
+ self.version = version
+
+ @staticmethod
+ def parse(value): # type: (str) -> t.Optional['ParsedRemote']
+ """Return a ParsedRemote from the given value or None if the syntax is invalid."""
+ parts = value.split('/')
+
+ if len(parts) == 2:
+ arch = None
+ platform, version = parts
+ elif len(parts) == 3:
+ arch, platform, version = parts
+ else:
+ return None
+
+ return ParsedRemote(arch, platform, version)
+
+
+class EnvironmentConfig(CommonConfig):
+ """Configuration common to all commands which execute in an environment."""
+ def __init__(self, args, command):
+ """
+ :type args: any
+ :type command: str
+ """
+ super(EnvironmentConfig, self).__init__(args, command)
+
+ self.local = args.local is True
+ self.venv = args.venv
+ self.venv_system_site_packages = args.venv_system_site_packages
+
+ self.python = args.python if 'python' in args else None # type: str
+
+ self.docker = docker_qualify_image(args.docker) # type: str
+ self.docker_raw = args.docker # type: str
+ self.remote = args.remote # type: str
+
+ if self.remote:
+ self.parsed_remote = ParsedRemote.parse(self.remote)
+
+ if not self.parsed_remote or not self.parsed_remote.platform or not self.parsed_remote.version:
+ raise ApplicationError('Unrecognized remote "%s" syntax. Use "platform/version" or "arch/platform/version".' % self.remote)
+ else:
+ self.parsed_remote = None
+
+ self.docker_privileged = args.docker_privileged if 'docker_privileged' in args else False # type: bool
+ self.docker_pull = args.docker_pull if 'docker_pull' in args else False # type: bool
+ self.docker_keep_git = args.docker_keep_git if 'docker_keep_git' in args else False # type: bool
+ self.docker_seccomp = args.docker_seccomp if 'docker_seccomp' in args else None # type: str
+ self.docker_memory = args.docker_memory if 'docker_memory' in args else None
+ self.docker_terminate = args.docker_terminate if 'docker_terminate' in args else None # type: str
+ self.docker_network = args.docker_network if 'docker_network' in args else None # type: str
+
+ if self.docker_seccomp is None:
+ self.docker_seccomp = get_docker_completion().get(self.docker_raw, {}).get('seccomp', 'default')
+
+ self.remote_stage = args.remote_stage # type: str
+ self.remote_provider = args.remote_provider # type: str
+ self.remote_endpoint = args.remote_endpoint # type: t.Optional[str]
+ self.remote_aws_region = args.remote_aws_region # type: str
+ self.remote_terminate = args.remote_terminate # type: str
+
+ if self.remote_provider == 'default':
+ self.remote_provider = None
+
+ self.requirements = args.requirements # type: bool
+
+ if self.python == 'default':
+ self.python = None
+
+ actual_major_minor = '.'.join(str(i) for i in sys.version_info[:2])
+
+ self.python_version = self.python or actual_major_minor
+ self.python_interpreter = args.python_interpreter
+
+ self.pip_check = args.pip_check
+
+ self.delegate = self.docker or self.remote or self.venv
+ self.delegate_args = [] # type: t.List[str]
+
+ if self.delegate:
+ self.requirements = True
+
+ self.inject_httptester = args.inject_httptester if 'inject_httptester' in args else False # type: bool
+ self.httptester = docker_qualify_image(args.httptester if 'httptester' in args else '') # type: str
+
+ if self.get_delegated_completion().get('httptester', 'enabled') == 'disabled':
+ self.httptester = False
+
+ if self.get_delegated_completion().get('pip-check', 'enabled') == 'disabled':
+ self.pip_check = False
+
+ if args.check_python and args.check_python != actual_major_minor:
+ raise ApplicationError('Running under Python %s instead of Python %s as expected.' % (actual_major_minor, args.check_python))
+
+ if self.docker_keep_git:
+ def git_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None
+ """Add files from the content root .git directory to the payload file list."""
+ for dirpath, _dirnames, filenames in os.walk(os.path.join(data_context().content.root, '.git')):
+ paths = [os.path.join(dirpath, filename) for filename in filenames]
+ files.extend((path, os.path.relpath(path, data_context().content.root)) for path in paths)
+
+ data_context().register_payload_callback(git_callback)
+
+ @property
+ def python_executable(self):
+ """
+ :rtype: str
+ """
+ return find_python(self.python_version)
+
+ @property
+ def pip_command(self):
+ """
+ :rtype: list[str]
+ """
+ return generate_pip_command(self.python_executable)
+
+ def get_delegated_completion(self):
+ """Returns a dictionary of settings specific to the selected delegation system, if any. Otherwise returns an empty dictionary.
+ :rtype: dict[str, str]
+ """
+ if self.docker:
+ return get_docker_completion().get(self.docker_raw, {})
+
+ if self.remote:
+ return get_remote_completion().get(self.remote, {})
+
+ return {}
+
+
+class TestConfig(EnvironmentConfig):
+ """Configuration common to all test commands."""
+ def __init__(self, args, command):
+ """
+ :type args: any
+ :type command: str
+ """
+ super(TestConfig, self).__init__(args, command)
+
+ self.coverage = args.coverage # type: bool
+ self.coverage_label = args.coverage_label # type: str
+ self.coverage_check = args.coverage_check # type: bool
+ self.coverage_config_base_path = None # type: t.Optional[str]
+ self.include = args.include or [] # type: t.List[str]
+ self.exclude = args.exclude or [] # type: t.List[str]
+ self.require = args.require or [] # type: t.List[str]
+
+ self.changed = args.changed # type: bool
+ self.tracked = args.tracked # type: bool
+ self.untracked = args.untracked # type: bool
+ self.committed = args.committed # type: bool
+ self.staged = args.staged # type: bool
+ self.unstaged = args.unstaged # type: bool
+ self.changed_from = args.changed_from # type: str
+ self.changed_path = args.changed_path # type: t.List[str]
+ self.base_branch = args.base_branch # type: str
+
+ self.lint = args.lint if 'lint' in args else False # type: bool
+ self.junit = args.junit if 'junit' in args else False # type: bool
+ self.failure_ok = args.failure_ok if 'failure_ok' in args else False # type: bool
+
+ self.metadata = Metadata.from_file(args.metadata) if args.metadata else Metadata()
+ self.metadata_path = None
+
+ if self.coverage_check:
+ self.coverage = True
+
+ def metadata_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None
+ """Add the metadata file to the payload file list."""
+ config = self
+
+ if self.metadata_path:
+ files.append((os.path.abspath(config.metadata_path), config.metadata_path))
+
+ data_context().register_payload_callback(metadata_callback)
+
+
+class ShellConfig(EnvironmentConfig):
+ """Configuration for the shell command."""
+ def __init__(self, args):
+ """
+ :type args: any
+ """
+ super(ShellConfig, self).__init__(args, 'shell')
+
+ self.raw = args.raw # type: bool
+
+ if self.raw:
+ self.httptester = False
+
+
+class SanityConfig(TestConfig):
+ """Configuration for the sanity command."""
+ def __init__(self, args):
+ """
+ :type args: any
+ """
+ super(SanityConfig, self).__init__(args, 'sanity')
+
+ self.test = args.test # type: t.List[str]
+ self.skip_test = args.skip_test # type: t.List[str]
+ self.list_tests = args.list_tests # type: bool
+ self.allow_disabled = args.allow_disabled # type: bool
+ self.enable_optional_errors = args.enable_optional_errors # type: bool
+ self.info_stderr = self.lint
+
+
+class IntegrationConfig(TestConfig):
+ """Configuration for the integration command."""
+ def __init__(self, args, command):
+ """
+ :type args: any
+ :type command: str
+ """
+ super(IntegrationConfig, self).__init__(args, command)
+
+ self.start_at = args.start_at # type: str
+ self.start_at_task = args.start_at_task # type: str
+ self.allow_destructive = args.allow_destructive # type: bool
+ self.allow_root = args.allow_root # type: bool
+ self.allow_disabled = args.allow_disabled # type: bool
+ self.allow_unstable = args.allow_unstable # type: bool
+ self.allow_unstable_changed = args.allow_unstable_changed # type: bool
+ self.allow_unsupported = args.allow_unsupported # type: bool
+ self.retry_on_error = args.retry_on_error # type: bool
+ self.continue_on_error = args.continue_on_error # type: bool
+ self.debug_strategy = args.debug_strategy # type: bool
+ self.changed_all_target = args.changed_all_target # type: str
+ self.changed_all_mode = args.changed_all_mode # type: str
+ self.list_targets = args.list_targets # type: bool
+ self.tags = args.tags
+ self.skip_tags = args.skip_tags
+ self.diff = args.diff
+ self.no_temp_workdir = args.no_temp_workdir
+ self.no_temp_unicode = args.no_temp_unicode
+
+ if self.get_delegated_completion().get('temp-unicode', 'enabled') == 'disabled':
+ self.no_temp_unicode = True
+
+ if self.list_targets:
+ self.explain = True
+ self.info_stderr = True
+
+ def get_ansible_config(self): # type: () -> str
+ """Return the path to the Ansible config for the given config."""
+ ansible_config_relative_path = os.path.join(data_context().content.integration_path, '%s.cfg' % self.command)
+ ansible_config_path = os.path.join(data_context().content.root, ansible_config_relative_path)
+
+ if not os.path.exists(ansible_config_path):
+ # use the default empty configuration unless one has been provided
+ ansible_config_path = super(IntegrationConfig, self).get_ansible_config()
+
+ return ansible_config_path
+
+
+class PosixIntegrationConfig(IntegrationConfig):
+ """Configuration for the posix integration command."""
+
+ def __init__(self, args):
+ """
+ :type args: any
+ """
+ super(PosixIntegrationConfig, self).__init__(args, 'integration')
+
+
+class WindowsIntegrationConfig(IntegrationConfig):
+ """Configuration for the windows integration command."""
+
+ def __init__(self, args):
+ """
+ :type args: any
+ """
+ super(WindowsIntegrationConfig, self).__init__(args, 'windows-integration')
+
+ self.windows = args.windows # type: t.List[str]
+ self.inventory = args.inventory # type: str
+
+ if self.windows:
+ self.allow_destructive = True
+
+
+class NetworkIntegrationConfig(IntegrationConfig):
+ """Configuration for the network integration command."""
+
+ def __init__(self, args):
+ """
+ :type args: any
+ """
+ super(NetworkIntegrationConfig, self).__init__(args, 'network-integration')
+
+ self.platform = args.platform # type: t.List[str]
+ self.platform_collection = dict(args.platform_collection or []) # type: t.Dict[str, str]
+ self.platform_connection = dict(args.platform_connection or []) # type: t.Dict[str, str]
+ self.inventory = args.inventory # type: str
+ self.testcase = args.testcase # type: str
+
+
+class UnitsConfig(TestConfig):
+ """Configuration for the units command."""
+ def __init__(self, args):
+ """
+ :type args: any
+ """
+ super(UnitsConfig, self).__init__(args, 'units')
+
+ self.collect_only = args.collect_only # type: bool
+ self.num_workers = args.num_workers # type: int
+
+ self.requirements_mode = args.requirements_mode if 'requirements_mode' in args else ''
+
+ if self.requirements_mode == 'only':
+ self.requirements = True
+ elif self.requirements_mode == 'skip':
+ self.requirements = False
diff --git a/test/lib/ansible_test/_internal/constants.py b/test/lib/ansible_test/_internal/constants.py
new file mode 100644
index 00000000..f4307822
--- /dev/null
+++ b/test/lib/ansible_test/_internal/constants.py
@@ -0,0 +1,10 @@
+"""Constants used by ansible-test. Imports should not be used in this file."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# Setting a low soft RLIMIT_NOFILE value will improve the performance of subprocess.Popen on Python 2.x when close_fds=True.
+# This will affect all Python subprocesses. It will also affect the current Python process if set before subprocess is imported for the first time.
+SOFT_RLIMIT_NOFILE = 1024
+
+# File used to track the ansible-test test execution timeout.
+TIMEOUT_PATH = '.ansible-test-timeout.json'
diff --git a/test/lib/ansible_test/_internal/core_ci.py b/test/lib/ansible_test/_internal/core_ci.py
new file mode 100644
index 00000000..c984f4fe
--- /dev/null
+++ b/test/lib/ansible_test/_internal/core_ci.py
@@ -0,0 +1,680 @@
+"""Access Ansible Core CI remote services."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import re
+import traceback
+import uuid
+import errno
+import time
+
+from . import types as t
+
+from .http import (
+ HttpClient,
+ HttpResponse,
+ HttpError,
+)
+
+from .io import (
+ make_dirs,
+ read_text_file,
+ write_json_file,
+ write_text_file,
+)
+
+from .util import (
+ ApplicationError,
+ display,
+ ANSIBLE_TEST_DATA_ROOT,
+)
+
+from .util_common import (
+ run_command,
+ ResultType,
+)
+
+from .config import (
+ EnvironmentConfig,
+)
+
+from .ci import (
+ AuthContext,
+ get_ci_provider,
+)
+
+from .data import (
+ data_context,
+)
+
+AWS_ENDPOINTS = {
+ 'us-east-1': 'https://ansible-core-ci.testing.ansible.com',
+}
+
+
+class AnsibleCoreCI:
+ """Client for Ansible Core CI services."""
+ def __init__(self, args, platform, version, stage='prod', persist=True, load=True, provider=None, arch=None):
+ """
+ :type args: EnvironmentConfig
+ :type platform: str
+ :type version: str
+ :type stage: str
+ :type persist: bool
+ :type load: bool
+ :type provider: str | None
+ :type arch: str | None
+ """
+ self.args = args
+ self.arch = arch
+ self.platform = platform
+ self.version = version
+ self.stage = stage
+ self.client = HttpClient(args)
+ self.connection = None
+ self.instance_id = None
+ self.endpoint = None
+ self.max_threshold = 1
+ self.retries = 3
+ self.ci_provider = get_ci_provider()
+ self.auth_context = AuthContext()
+
+ if self.arch:
+ self.name = '%s-%s-%s' % (self.arch, self.platform, self.version)
+ else:
+ self.name = '%s-%s' % (self.platform, self.version)
+
+ # Assign each supported platform to one provider.
+ # This is used to determine the provider from the platform when no provider is specified.
+ providers = dict(
+ aws=(
+ 'aws',
+ 'windows',
+ 'freebsd',
+ 'vyos',
+ 'junos',
+ 'ios',
+ 'tower',
+ 'rhel',
+ 'hetzner',
+ ),
+ azure=(
+ 'azure',
+ ),
+ ibmps=(
+ 'aix',
+ 'ibmi',
+ ),
+ ibmvpc=(
+ 'centos arch=power', # avoid ibmvpc as default for no-arch centos to avoid making centos default to power
+ ),
+ parallels=(
+ 'macos',
+ 'osx',
+ ),
+ )
+
+ # Currently ansible-core-ci has no concept of arch selection. This effectively means each provider only supports one arch.
+ # The list below identifies which platforms accept an arch, and which one. These platforms can only be used with the specified arch.
+ provider_arches = dict(
+ ibmvpc='power',
+ )
+
+ if provider:
+ # override default provider selection (not all combinations are valid)
+ self.provider = provider
+ else:
+ self.provider = None
+
+ for candidate in providers:
+ choices = [
+ platform,
+ '%s arch=%s' % (platform, arch),
+ ]
+
+ if any(choice in providers[candidate] for choice in choices):
+ # assign default provider based on platform
+ self.provider = candidate
+ break
+
+ # If a provider has been selected, make sure the correct arch (or none) has been selected.
+ if self.provider:
+ required_arch = provider_arches.get(self.provider)
+
+ if self.arch != required_arch:
+ if required_arch:
+ if self.arch:
+ raise ApplicationError('Provider "%s" requires the "%s" arch instead of "%s".' % (self.provider, required_arch, self.arch))
+
+ raise ApplicationError('Provider "%s" requires the "%s" arch.' % (self.provider, required_arch))
+
+ raise ApplicationError('Provider "%s" does not support specification of an arch.' % self.provider)
+
+ self.path = os.path.expanduser('~/.ansible/test/instances/%s-%s-%s' % (self.name, self.provider, self.stage))
+
+ if self.provider in ('aws', 'azure', 'ibmps', 'ibmvpc'):
+ if args.remote_aws_region:
+ display.warning('The --remote-aws-region option is obsolete and will be removed in a future version of ansible-test.')
+ # permit command-line override of region selection
+ region = args.remote_aws_region
+ # use a dedicated CI key when overriding the region selection
+ self.auth_context.region = args.remote_aws_region
+ else:
+ region = 'us-east-1'
+
+ self.path = "%s-%s" % (self.path, region)
+
+ if self.args.remote_endpoint:
+ self.endpoints = (self.args.remote_endpoint,)
+ else:
+ self.endpoints = (AWS_ENDPOINTS[region],)
+
+ self.ssh_key = SshKey(args)
+
+ if self.platform == 'windows':
+ self.port = 5986
+ else:
+ self.port = 22
+
+ if self.provider == 'ibmps':
+ # Additional retries are neededed to accommodate images transitioning
+ # to the active state in the IBM cloud. This operation can take up to
+ # 90 seconds
+ self.retries = 7
+ elif self.provider == 'parallels':
+ if self.args.remote_endpoint:
+ self.endpoints = (self.args.remote_endpoint,)
+ else:
+ self.endpoints = (AWS_ENDPOINTS['us-east-1'],)
+
+ self.ssh_key = SshKey(args)
+ self.port = None
+ else:
+ if self.arch:
+ raise ApplicationError('Provider not detected for platform "%s" on arch "%s".' % (self.platform, self.arch))
+
+ raise ApplicationError('Provider not detected for platform "%s" with no arch specified.' % self.platform)
+
+ if persist and load and self._load():
+ try:
+ display.info('Checking existing %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
+ verbosity=1)
+
+ self.connection = self.get(always_raise_on=[404])
+
+ display.info('Loaded existing %s/%s from: %s' % (self.platform, self.version, self._uri), verbosity=1)
+ except HttpError as ex:
+ if ex.status != 404:
+ raise
+
+ self._clear()
+
+ display.info('Cleared stale %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
+ verbosity=1)
+
+ self.instance_id = None
+ self.endpoint = None
+ elif not persist:
+ self.instance_id = None
+ self.endpoint = None
+ self._clear()
+
+ if self.instance_id:
+ self.started = True
+ else:
+ self.started = False
+ self.instance_id = str(uuid.uuid4())
+ self.endpoint = None
+
+ display.sensitive.add(self.instance_id)
+
+ def _get_parallels_endpoints(self):
+ """
+ :rtype: tuple[str]
+ """
+ client = HttpClient(self.args, always=True)
+ display.info('Getting available endpoints...', verbosity=1)
+ sleep = 3
+
+ for _iteration in range(1, 10):
+ response = client.get('https://ansible-ci-files.s3.amazonaws.com/ansible-test/parallels-endpoints.txt')
+
+ if response.status_code == 200:
+ endpoints = tuple(response.response.splitlines())
+ display.info('Available endpoints (%d):\n%s' % (len(endpoints), '\n'.join(' - %s' % endpoint for endpoint in endpoints)), verbosity=1)
+ return endpoints
+
+ display.warning('HTTP %d error getting endpoints, trying again in %d seconds.' % (response.status_code, sleep))
+ time.sleep(sleep)
+
+ raise ApplicationError('Unable to get available endpoints.')
+
+ @property
+ def available(self):
+ """Return True if Ansible Core CI is supported."""
+ return self.ci_provider.supports_core_ci_auth(self.auth_context)
+
+ def start(self):
+ """Start instance."""
+ if self.started:
+ display.info('Skipping started %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
+ verbosity=1)
+ return None
+
+ return self._start(self.ci_provider.prepare_core_ci_auth(self.auth_context))
+
+ def stop(self):
+ """Stop instance."""
+ if not self.started:
+ display.info('Skipping invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
+ verbosity=1)
+ return
+
+ response = self.client.delete(self._uri)
+
+ if response.status_code == 404:
+ self._clear()
+ display.info('Cleared invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
+ verbosity=1)
+ return
+
+ if response.status_code == 200:
+ self._clear()
+ display.info('Stopped running %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
+ verbosity=1)
+ return
+
+ raise self._create_http_error(response)
+
+ def get(self, tries=3, sleep=15, always_raise_on=None):
+ """
+ Get instance connection information.
+ :type tries: int
+ :type sleep: int
+ :type always_raise_on: list[int] | None
+ :rtype: InstanceConnection
+ """
+ if not self.started:
+ display.info('Skipping invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
+ verbosity=1)
+ return None
+
+ if not always_raise_on:
+ always_raise_on = []
+
+ if self.connection and self.connection.running:
+ return self.connection
+
+ while True:
+ tries -= 1
+ response = self.client.get(self._uri)
+
+ if response.status_code == 200:
+ break
+
+ error = self._create_http_error(response)
+
+ if not tries or response.status_code in always_raise_on:
+ raise error
+
+ display.warning('%s. Trying again after %d seconds.' % (error, sleep))
+ time.sleep(sleep)
+
+ if self.args.explain:
+ self.connection = InstanceConnection(
+ running=True,
+ hostname='cloud.example.com',
+ port=self.port or 12345,
+ username='username',
+ password='password' if self.platform == 'windows' else None,
+ )
+ else:
+ response_json = response.json()
+ status = response_json['status']
+ con = response_json.get('connection')
+
+ if con:
+ self.connection = InstanceConnection(
+ running=status == 'running',
+ hostname=con['hostname'],
+ port=int(con.get('port', self.port)),
+ username=con['username'],
+ password=con.get('password'),
+ response_json=response_json,
+ )
+ else:
+ self.connection = InstanceConnection(
+ running=status == 'running',
+ response_json=response_json,
+ )
+
+ if self.connection.password:
+ display.sensitive.add(str(self.connection.password))
+
+ status = 'running' if self.connection.running else 'starting'
+
+ display.info('Status update: %s/%s on instance %s is %s.' %
+ (self.platform, self.version, self.instance_id, status),
+ verbosity=1)
+
+ return self.connection
+
+ def wait(self, iterations=90): # type: (t.Optional[int]) -> None
+ """Wait for the instance to become ready."""
+ for _iteration in range(1, iterations):
+ if self.get().running:
+ return
+ time.sleep(10)
+
+ raise ApplicationError('Timeout waiting for %s/%s instance %s.' %
+ (self.platform, self.version, self.instance_id))
+
+ @property
+ def _uri(self):
+ return '%s/%s/%s/%s' % (self.endpoint, self.stage, self.provider, self.instance_id)
+
+ def _start(self, auth):
+ """Start instance."""
+ display.info('Initializing new %s/%s instance %s.' % (self.platform, self.version, self.instance_id), verbosity=1)
+
+ if self.platform == 'windows':
+ winrm_config = read_text_file(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'setup', 'ConfigureRemotingForAnsible.ps1'))
+ else:
+ winrm_config = None
+
+ data = dict(
+ config=dict(
+ platform=self.platform,
+ version=self.version,
+ public_key=self.ssh_key.pub_contents if self.ssh_key else None,
+ query=False,
+ winrm_config=winrm_config,
+ )
+ )
+
+ data.update(dict(auth=auth))
+
+ headers = {
+ 'Content-Type': 'application/json',
+ }
+
+ response = self._start_try_endpoints(data, headers)
+
+ self.started = True
+ self._save()
+
+ display.info('Started %s/%s from: %s' % (self.platform, self.version, self._uri), verbosity=1)
+
+ if self.args.explain:
+ return {}
+
+ return response.json()
+
+ def _start_try_endpoints(self, data, headers):
+ """
+ :type data: dict[str, any]
+ :type headers: dict[str, str]
+ :rtype: HttpResponse
+ """
+ threshold = 1
+
+ while threshold <= self.max_threshold:
+ for self.endpoint in self.endpoints:
+ try:
+ return self._start_at_threshold(data, headers, threshold)
+ except CoreHttpError as ex:
+ if ex.status == 503:
+ display.info('Service Unavailable: %s' % ex.remote_message, verbosity=1)
+ continue
+ display.error(ex.remote_message)
+ except HttpError as ex:
+ display.error(u'%s' % ex)
+
+ time.sleep(3)
+
+ threshold += 1
+
+ raise ApplicationError('Maximum threshold reached and all endpoints exhausted.')
+
+ def _start_at_threshold(self, data, headers, threshold):
+ """
+ :type data: dict[str, any]
+ :type headers: dict[str, str]
+ :type threshold: int
+ :rtype: HttpResponse | None
+ """
+ tries = self.retries
+ sleep = 15
+
+ data['threshold'] = threshold
+
+ display.info('Trying endpoint: %s (threshold %d)' % (self.endpoint, threshold), verbosity=1)
+
+ while True:
+ tries -= 1
+ response = self.client.put(self._uri, data=json.dumps(data), headers=headers)
+
+ if response.status_code == 200:
+ return response
+
+ error = self._create_http_error(response)
+
+ if response.status_code == 503:
+ raise error
+
+ if not tries:
+ raise error
+
+ display.warning('%s. Trying again after %d seconds.' % (error, sleep))
+ time.sleep(sleep)
+
+ def _clear(self):
+ """Clear instance information."""
+ try:
+ self.connection = None
+ os.remove(self.path)
+ except OSError as ex:
+ if ex.errno != errno.ENOENT:
+ raise
+
+ def _load(self):
+ """Load instance information."""
+ try:
+ data = read_text_file(self.path)
+ except IOError as ex:
+ if ex.errno != errno.ENOENT:
+ raise
+
+ return False
+
+ if not data.startswith('{'):
+ return False # legacy format
+
+ config = json.loads(data)
+
+ return self.load(config)
+
+ def load(self, config):
+ """
+ :type config: dict[str, str]
+ :rtype: bool
+ """
+ self.instance_id = str(config['instance_id'])
+ self.endpoint = config['endpoint']
+ self.started = True
+
+ display.sensitive.add(self.instance_id)
+
+ return True
+
+ def _save(self):
+ """Save instance information."""
+ if self.args.explain:
+ return
+
+ config = self.save()
+
+ write_json_file(self.path, config, create_directories=True)
+
+ def save(self):
+ """
+ :rtype: dict[str, str]
+ """
+ return dict(
+ platform_version='%s/%s' % (self.platform, self.version),
+ instance_id=self.instance_id,
+ endpoint=self.endpoint,
+ )
+
+ @staticmethod
+ def _create_http_error(response):
+ """
+ :type response: HttpResponse
+ :rtype: ApplicationError
+ """
+ response_json = response.json()
+ stack_trace = ''
+
+ if 'message' in response_json:
+ message = response_json['message']
+ elif 'errorMessage' in response_json:
+ message = response_json['errorMessage'].strip()
+ if 'stackTrace' in response_json:
+ traceback_lines = response_json['stackTrace']
+
+ # AWS Lambda on Python 2.7 returns a list of tuples
+ # AWS Lambda on Python 3.7 returns a list of strings
+ if traceback_lines and isinstance(traceback_lines[0], list):
+ traceback_lines = traceback.format_list(traceback_lines)
+
+ trace = '\n'.join([x.rstrip() for x in traceback_lines])
+ stack_trace = ('\nTraceback (from remote server):\n%s' % trace)
+ else:
+ message = str(response_json)
+
+ return CoreHttpError(response.status_code, message, stack_trace)
+
+
+class CoreHttpError(HttpError):
+ """HTTP response as an error."""
+ def __init__(self, status, remote_message, remote_stack_trace):
+ """
+ :type status: int
+ :type remote_message: str
+ :type remote_stack_trace: str
+ """
+ super(CoreHttpError, self).__init__(status, '%s%s' % (remote_message, remote_stack_trace))
+
+ self.remote_message = remote_message
+ self.remote_stack_trace = remote_stack_trace
+
+
+class SshKey:
+ """Container for SSH key used to connect to remote instances."""
+ KEY_NAME = 'id_rsa'
+ PUB_NAME = 'id_rsa.pub'
+
+ def __init__(self, args):
+ """
+ :type args: EnvironmentConfig
+ """
+ key_pair = self.get_key_pair()
+
+ if not key_pair:
+ key_pair = self.generate_key_pair(args)
+
+ key, pub = key_pair
+ key_dst, pub_dst = self.get_in_tree_key_pair_paths()
+
+ def ssh_key_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None
+ """
+ Add the SSH keys to the payload file list.
+ They are either outside the source tree or in the cache dir which is ignored by default.
+ """
+ files.append((key, os.path.relpath(key_dst, data_context().content.root)))
+ files.append((pub, os.path.relpath(pub_dst, data_context().content.root)))
+
+ data_context().register_payload_callback(ssh_key_callback)
+
+ self.key, self.pub = key, pub
+
+ if args.explain:
+ self.pub_contents = None
+ else:
+ self.pub_contents = read_text_file(self.pub).strip()
+
+ def get_in_tree_key_pair_paths(self): # type: () -> t.Optional[t.Tuple[str, str]]
+ """Return the ansible-test SSH key pair paths from the content tree."""
+ temp_dir = ResultType.TMP.path
+
+ key = os.path.join(temp_dir, self.KEY_NAME)
+ pub = os.path.join(temp_dir, self.PUB_NAME)
+
+ return key, pub
+
+ def get_source_key_pair_paths(self): # type: () -> t.Optional[t.Tuple[str, str]]
+ """Return the ansible-test SSH key pair paths for the current user."""
+ base_dir = os.path.expanduser('~/.ansible/test/')
+
+ key = os.path.join(base_dir, self.KEY_NAME)
+ pub = os.path.join(base_dir, self.PUB_NAME)
+
+ return key, pub
+
+ def get_key_pair(self): # type: () -> t.Optional[t.Tuple[str, str]]
+ """Return the ansible-test SSH key pair paths if present, otherwise return None."""
+ key, pub = self.get_in_tree_key_pair_paths()
+
+ if os.path.isfile(key) and os.path.isfile(pub):
+ return key, pub
+
+ key, pub = self.get_source_key_pair_paths()
+
+ if os.path.isfile(key) and os.path.isfile(pub):
+ return key, pub
+
+ return None
+
+ def generate_key_pair(self, args): # type: (EnvironmentConfig) -> t.Tuple[str, str]
+ """Generate an SSH key pair for use by all ansible-test invocations for the current user."""
+ key, pub = self.get_source_key_pair_paths()
+
+ if not args.explain:
+ make_dirs(os.path.dirname(key))
+
+ if not os.path.isfile(key) or not os.path.isfile(pub):
+ run_command(args, ['ssh-keygen', '-m', 'PEM', '-q', '-t', 'rsa', '-N', '', '-f', key])
+
+ # newer ssh-keygen PEM output (such as on RHEL 8.1) is not recognized by paramiko
+ key_contents = read_text_file(key)
+ key_contents = re.sub(r'(BEGIN|END) PRIVATE KEY', r'\1 RSA PRIVATE KEY', key_contents)
+
+ write_text_file(key, key_contents)
+
+ return key, pub
+
+
+class InstanceConnection:
+ """Container for remote instance status and connection details."""
+ def __init__(self,
+ running, # type: bool
+ hostname=None, # type: t.Optional[str]
+ port=None, # type: t.Optional[int]
+ username=None, # type: t.Optional[str]
+ password=None, # type: t.Optional[str]
+ response_json=None, # type: t.Optional[t.Dict[str, t.Any]]
+ ): # type: (...) -> None
+ self.running = running
+ self.hostname = hostname
+ self.port = port
+ self.username = username
+ self.password = password
+ self.response_json = response_json or {}
+
+ def __str__(self):
+ if self.password:
+ return '%s:%s [%s:%s]' % (self.hostname, self.port, self.username, self.password)
+
+ return '%s:%s [%s]' % (self.hostname, self.port, self.username)
diff --git a/test/lib/ansible_test/_internal/coverage/__init__.py b/test/lib/ansible_test/_internal/coverage/__init__.py
new file mode 100644
index 00000000..462d672e
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/__init__.py
@@ -0,0 +1,323 @@
+"""Common logic for the coverage subcommand."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+
+from .. import types as t
+
+from ..encoding import (
+ to_bytes,
+)
+
+from ..io import (
+ open_binary_file,
+ read_json_file,
+)
+
+from ..util import (
+ ApplicationError,
+ common_environment,
+ display,
+ ANSIBLE_TEST_DATA_ROOT,
+)
+
+from ..util_common import (
+ intercept_command,
+ ResultType,
+)
+
+from ..config import (
+ EnvironmentConfig,
+)
+
+from ..executor import (
+ Delegate,
+ install_command_requirements,
+)
+
+from .. target import (
+ walk_module_targets,
+)
+
+from ..data import (
+ data_context,
+)
+
+if t.TYPE_CHECKING:
+ import coverage as coverage_module
+
+COVERAGE_GROUPS = ('command', 'target', 'environment', 'version')
+COVERAGE_CONFIG_PATH = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'coveragerc')
+COVERAGE_OUTPUT_FILE_NAME = 'coverage'
+
+
+class CoverageConfig(EnvironmentConfig):
+ """Configuration for the coverage command."""
+ def __init__(self, args): # type: (t.Any) -> None
+ super(CoverageConfig, self).__init__(args, 'coverage')
+
+ self.group_by = frozenset(args.group_by) if 'group_by' in args and args.group_by else set() # type: t.FrozenSet[str]
+ self.all = args.all if 'all' in args else False # type: bool
+ self.stub = args.stub if 'stub' in args else False # type: bool
+ self.export = args.export if 'export' in args else None # type: str
+ self.coverage = False # temporary work-around to support intercept_command in cover.py
+
+
+def initialize_coverage(args): # type: (CoverageConfig) -> coverage_module
+ """Delegate execution if requested, install requirements, then import and return the coverage module. Raises an exception if coverage is not available."""
+ if args.delegate:
+ raise Delegate()
+
+ if args.requirements:
+ install_command_requirements(args)
+
+ try:
+ import coverage
+ except ImportError:
+ coverage = None
+
+ if not coverage:
+ raise ApplicationError('You must install the "coverage" python module to use this command.')
+
+ coverage_version_string = coverage.__version__
+ coverage_version = tuple(int(v) for v in coverage_version_string.split('.'))
+
+ min_version = (4, 2)
+ max_version = (5, 0)
+
+ supported_version = True
+ recommended_version = '4.5.4'
+
+ if coverage_version < min_version or coverage_version >= max_version:
+ supported_version = False
+
+ if not supported_version:
+ raise ApplicationError('Version %s of "coverage" is not supported. Version %s is known to work and is recommended.' % (
+ coverage_version_string, recommended_version))
+
+ return coverage
+
+
+def run_coverage(args, output_file, command, cmd): # type: (CoverageConfig, str, str, t.List[str]) -> None
+ """Run the coverage cli tool with the specified options."""
+ env = common_environment()
+ env.update(dict(COVERAGE_FILE=output_file))
+
+ cmd = ['python', '-m', 'coverage.__main__', command, '--rcfile', COVERAGE_CONFIG_PATH] + cmd
+
+ intercept_command(args, target_name='coverage', env=env, cmd=cmd, disable_coverage=True)
+
+
+def get_python_coverage_files(path=None): # type: (t.Optional[str]) -> t.List[str]
+ """Return the list of Python coverage file paths."""
+ return get_coverage_files('python', path)
+
+
+def get_powershell_coverage_files(path=None): # type: (t.Optional[str]) -> t.List[str]
+ """Return the list of PowerShell coverage file paths."""
+ return get_coverage_files('powershell', path)
+
+
+def get_coverage_files(language, path=None): # type: (str, t.Optional[str]) -> t.List[str]
+ """Return the list of coverage file paths for the given language."""
+ coverage_dir = path or ResultType.COVERAGE.path
+ coverage_files = [os.path.join(coverage_dir, f) for f in os.listdir(coverage_dir)
+ if '=coverage.' in f and '=%s' % language in f]
+
+ return coverage_files
+
+
+def get_collection_path_regexes(): # type: () -> t.Tuple[t.Optional[t.Pattern], t.Optional[t.Pattern]]
+ """Return a pair of regexes used for identifying and manipulating collection paths."""
+ if data_context().content.collection:
+ collection_search_re = re.compile(r'/%s/' % data_context().content.collection.directory)
+ collection_sub_re = re.compile(r'^.*?/%s/' % data_context().content.collection.directory)
+ else:
+ collection_search_re = None
+ collection_sub_re = None
+
+ return collection_search_re, collection_sub_re
+
+
+def get_python_modules(): # type: () -> t.Dict[str, str]
+ """Return a dictionary of Ansible module names and their paths."""
+ return dict((target.module, target.path) for target in list(walk_module_targets()) if target.path.endswith('.py'))
+
+
+def enumerate_python_arcs(
+ path, # type: str
+ coverage, # type: coverage_module
+ modules, # type: t.Dict[str, str]
+ collection_search_re, # type: t.Optional[t.Pattern]
+ collection_sub_re, # type: t.Optional[t.Pattern]
+): # type: (...) -> t.Generator[t.Tuple[str, t.Set[t.Tuple[int, int]]]]
+ """Enumerate Python code coverage arcs in the given file."""
+ if os.path.getsize(path) == 0:
+ display.warning('Empty coverage file: %s' % path, verbosity=2)
+ return
+
+ original = coverage.CoverageData()
+
+ try:
+ original.read_file(path)
+ except Exception as ex: # pylint: disable=locally-disabled, broad-except
+ with open_binary_file(path) as file:
+ header = file.read(6)
+
+ if header == b'SQLite':
+ display.error('File created by "coverage" 5.0+: %s' % os.path.relpath(path))
+ else:
+ display.error(u'%s' % ex)
+
+ return
+
+ for filename in original.measured_files():
+ arcs = original.arcs(filename)
+
+ if not arcs:
+ # This is most likely due to using an unsupported version of coverage.
+ display.warning('No arcs found for "%s" in coverage file: %s' % (filename, path))
+ continue
+
+ filename = sanitize_filename(filename, modules=modules, collection_search_re=collection_search_re, collection_sub_re=collection_sub_re)
+
+ if not filename:
+ continue
+
+ yield filename, set(arcs)
+
+
+def enumerate_powershell_lines(
+ path, # type: str
+ collection_search_re, # type: t.Optional[t.Pattern]
+ collection_sub_re, # type: t.Optional[t.Pattern]
+): # type: (...) -> t.Generator[t.Tuple[str, t.Dict[int, int]]]
+ """Enumerate PowerShell code coverage lines in the given file."""
+ if os.path.getsize(path) == 0:
+ display.warning('Empty coverage file: %s' % path, verbosity=2)
+ return
+
+ try:
+ coverage_run = read_json_file(path)
+ except Exception as ex: # pylint: disable=locally-disabled, broad-except
+ display.error(u'%s' % ex)
+ return
+
+ for filename, hits in coverage_run.items():
+ filename = sanitize_filename(filename, collection_search_re=collection_search_re, collection_sub_re=collection_sub_re)
+
+ if not filename:
+ continue
+
+ if isinstance(hits, dict) and not hits.get('Line'):
+ # Input data was previously aggregated and thus uses the standard ansible-test output format for PowerShell coverage.
+ # This format differs from the more verbose format of raw coverage data from the remote Windows hosts.
+ hits = dict((int(key), value) for key, value in hits.items())
+
+ yield filename, hits
+ continue
+
+ # PowerShell unpacks arrays if there's only a single entry so this is a defensive check on that
+ if not isinstance(hits, list):
+ hits = [hits]
+
+ hits = dict((hit['Line'], hit['HitCount']) for hit in hits if hit)
+
+ yield filename, hits
+
+
+def sanitize_filename(
+ filename, # type: str
+ modules=None, # type: t.Optional[t.Dict[str, str]]
+ collection_search_re=None, # type: t.Optional[t.Pattern]
+ collection_sub_re=None, # type: t.Optional[t.Pattern]
+): # type: (...) -> t.Optional[str]
+ """Convert the given code coverage path to a local absolute path and return its, or None if the path is not valid."""
+ ansible_path = os.path.abspath('lib/ansible/') + '/'
+ root_path = data_context().content.root + '/'
+ integration_temp_path = os.path.sep + os.path.join(ResultType.TMP.relative_path, 'integration') + os.path.sep
+
+ if modules is None:
+ modules = {}
+
+ if '/ansible_modlib.zip/ansible/' in filename:
+ # Rewrite the module_utils path from the remote host to match the controller. Ansible 2.6 and earlier.
+ new_name = re.sub('^.*/ansible_modlib.zip/ansible/', ansible_path, filename)
+ display.info('%s -> %s' % (filename, new_name), verbosity=3)
+ filename = new_name
+ elif collection_search_re and collection_search_re.search(filename):
+ new_name = os.path.abspath(collection_sub_re.sub('', filename))
+ display.info('%s -> %s' % (filename, new_name), verbosity=3)
+ filename = new_name
+ elif re.search(r'/ansible_[^/]+_payload\.zip/ansible/', filename):
+ # Rewrite the module_utils path from the remote host to match the controller. Ansible 2.7 and later.
+ new_name = re.sub(r'^.*/ansible_[^/]+_payload\.zip/ansible/', ansible_path, filename)
+ display.info('%s -> %s' % (filename, new_name), verbosity=3)
+ filename = new_name
+ elif '/ansible_module_' in filename:
+ # Rewrite the module path from the remote host to match the controller. Ansible 2.6 and earlier.
+ module_name = re.sub('^.*/ansible_module_(?P<module>.*).py$', '\\g<module>', filename)
+ if module_name not in modules:
+ display.warning('Skipping coverage of unknown module: %s' % module_name)
+ return None
+ new_name = os.path.abspath(modules[module_name])
+ display.info('%s -> %s' % (filename, new_name), verbosity=3)
+ filename = new_name
+ elif re.search(r'/ansible_[^/]+_payload(_[^/]+|\.zip)/__main__\.py$', filename):
+ # Rewrite the module path from the remote host to match the controller. Ansible 2.7 and later.
+ # AnsiballZ versions using zipimporter will match the `.zip` portion of the regex.
+ # AnsiballZ versions not using zipimporter will match the `_[^/]+` portion of the regex.
+ module_name = re.sub(r'^.*/ansible_(?P<module>[^/]+)_payload(_[^/]+|\.zip)/__main__\.py$',
+ '\\g<module>', filename).rstrip('_')
+ if module_name not in modules:
+ display.warning('Skipping coverage of unknown module: %s' % module_name)
+ return None
+ new_name = os.path.abspath(modules[module_name])
+ display.info('%s -> %s' % (filename, new_name), verbosity=3)
+ filename = new_name
+ elif re.search('^(/.*?)?/root/ansible/', filename):
+ # Rewrite the path of code running on a remote host or in a docker container as root.
+ new_name = re.sub('^(/.*?)?/root/ansible/', root_path, filename)
+ display.info('%s -> %s' % (filename, new_name), verbosity=3)
+ filename = new_name
+ elif integration_temp_path in filename:
+ # Rewrite the path of code running from an integration test temporary directory.
+ new_name = re.sub(r'^.*' + re.escape(integration_temp_path) + '[^/]+/', root_path, filename)
+ display.info('%s -> %s' % (filename, new_name), verbosity=3)
+ filename = new_name
+
+ return filename
+
+
+class PathChecker:
+ """Checks code coverage paths to verify they are valid and reports on the findings."""
+ def __init__(self, args, collection_search_re=None): # type: (CoverageConfig, t.Optional[t.Pattern]) -> None
+ self.args = args
+ self.collection_search_re = collection_search_re
+ self.invalid_paths = []
+ self.invalid_path_chars = 0
+
+ def check_path(self, path): # type: (str) -> bool
+ """Return True if the given coverage path is valid, otherwise display a warning and return False."""
+ if os.path.isfile(to_bytes(path)):
+ return True
+
+ if self.collection_search_re and self.collection_search_re.search(path) and os.path.basename(path) == '__init__.py':
+ # the collection loader uses implicit namespace packages, so __init__.py does not need to exist on disk
+ # coverage is still reported for these non-existent files, but warnings are not needed
+ return False
+
+ self.invalid_paths.append(path)
+ self.invalid_path_chars += len(path)
+
+ if self.args.verbosity > 1:
+ display.warning('Invalid coverage path: %s' % path)
+
+ return False
+
+ def report(self): # type: () -> None
+ """Display a warning regarding invalid paths if any were found."""
+ if self.invalid_paths:
+ display.warning('Ignored %d characters from %d invalid coverage path(s).' % (self.invalid_path_chars, len(self.invalid_paths)))
diff --git a/test/lib/ansible_test/_internal/coverage/analyze/__init__.py b/test/lib/ansible_test/_internal/coverage/analyze/__init__.py
new file mode 100644
index 00000000..45770373
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/analyze/__init__.py
@@ -0,0 +1,19 @@
+"""Common logic for the `coverage analyze` subcommand."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ... import types as t
+
+from .. import (
+ CoverageConfig,
+)
+
+
+class CoverageAnalyzeConfig(CoverageConfig):
+ """Configuration for the `coverage analyze` command."""
+ def __init__(self, args): # type: (t.Any) -> None
+ super(CoverageAnalyzeConfig, self).__init__(args)
+
+ # avoid mixing log messages with file output when using `/dev/stdout` for the output file on commands
+ # this may be worth considering as the default behavior in the future, instead of being dependent on the command or options used
+ self.info_stderr = True
diff --git a/test/lib/ansible_test/_internal/coverage/analyze/targets/__init__.py b/test/lib/ansible_test/_internal/coverage/analyze/targets/__init__.py
new file mode 100644
index 00000000..8fe571b8
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/analyze/targets/__init__.py
@@ -0,0 +1,154 @@
+"""Analyze integration test target code coverage."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from .... import types as t
+
+from ....io import (
+ read_json_file,
+ write_json_file,
+)
+
+from ....util import (
+ ApplicationError,
+ display,
+)
+
+from .. import (
+ CoverageAnalyzeConfig,
+)
+
+if t.TYPE_CHECKING:
+ TargetKey = t.TypeVar('TargetKey', int, t.Tuple[int, int])
+ NamedPoints = t.Dict[str, t.Dict[TargetKey, t.Set[str]]]
+ IndexedPoints = t.Dict[str, t.Dict[TargetKey, t.Set[int]]]
+ Arcs = t.Dict[str, t.Dict[t.Tuple[int, int], t.Set[int]]]
+ Lines = t.Dict[str, t.Dict[int, t.Set[int]]]
+ TargetIndexes = t.Dict[str, int]
+ TargetSetIndexes = t.Dict[t.FrozenSet[int], int]
+
+
+class CoverageAnalyzeTargetsConfig(CoverageAnalyzeConfig):
+ """Configuration for the `coverage analyze targets` command."""
+ def __init__(self, args): # type: (t.Any) -> None
+ super(CoverageAnalyzeTargetsConfig, self).__init__(args)
+
+ self.info_stderr = True
+
+
+def make_report(target_indexes, arcs, lines): # type: (TargetIndexes, Arcs, Lines) -> t.Dict[str, t.Any]
+ """Condense target indexes, arcs and lines into a compact report."""
+ set_indexes = {}
+ arc_refs = dict((path, dict((format_arc(arc), get_target_set_index(indexes, set_indexes)) for arc, indexes in data.items())) for path, data in arcs.items())
+ line_refs = dict((path, dict((line, get_target_set_index(indexes, set_indexes)) for line, indexes in data.items())) for path, data in lines.items())
+
+ report = dict(
+ targets=[name for name, index in sorted(target_indexes.items(), key=lambda kvp: kvp[1])],
+ target_sets=[sorted(data) for data, index in sorted(set_indexes.items(), key=lambda kvp: kvp[1])],
+ arcs=arc_refs,
+ lines=line_refs,
+ )
+
+ return report
+
+
+def load_report(report): # type: (t.Dict[str, t.Any]) -> t.Tuple[t.List[str], Arcs, Lines]
+ """Extract target indexes, arcs and lines from an existing report."""
+ try:
+ target_indexes = report['targets'] # type: t.List[str]
+ target_sets = report['target_sets'] # type: t.List[t.List[int]]
+ arc_data = report['arcs'] # type: t.Dict[str, t.Dict[str, int]]
+ line_data = report['lines'] # type: t.Dict[str, t.Dict[int, int]]
+ except KeyError as ex:
+ raise ApplicationError('Document is missing key "%s".' % ex.args)
+ except TypeError:
+ raise ApplicationError('Document is type "%s" instead of "dict".' % type(report).__name__)
+
+ arcs = dict((path, dict((parse_arc(arc), set(target_sets[index])) for arc, index in data.items())) for path, data in arc_data.items())
+ lines = dict((path, dict((int(line), set(target_sets[index])) for line, index in data.items())) for path, data in line_data.items())
+
+ return target_indexes, arcs, lines
+
+
+def read_report(path): # type: (str) -> t.Tuple[t.List[str], Arcs, Lines]
+ """Read a JSON report from disk."""
+ try:
+ report = read_json_file(path)
+ except Exception as ex:
+ raise ApplicationError('File "%s" is not valid JSON: %s' % (path, ex))
+
+ try:
+ return load_report(report)
+ except ApplicationError as ex:
+ raise ApplicationError('File "%s" is not an aggregated coverage data file. %s' % (path, ex))
+
+
+def write_report(args, report, path): # type: (CoverageAnalyzeTargetsConfig, t.Dict[str, t.Any], str) -> None
+ """Write a JSON report to disk."""
+ if args.explain:
+ return
+
+ write_json_file(path, report, formatted=False)
+
+ display.info('Generated %d byte report with %d targets covering %d files.' % (
+ os.path.getsize(path), len(report['targets']), len(set(report['arcs'].keys()) | set(report['lines'].keys())),
+ ), verbosity=1)
+
+
+def format_arc(value): # type: (t.Tuple[int, int]) -> str
+ """Format an arc tuple as a string."""
+ return '%d:%d' % value
+
+
+def parse_arc(value): # type: (str) -> t.Tuple[int, int]
+ """Parse an arc string into a tuple."""
+ first, last = tuple(map(int, value.split(':')))
+ return first, last
+
+
+def get_target_set_index(data, target_set_indexes): # type: (t.Set[int], TargetSetIndexes) -> int
+ """Find or add the target set in the result set and return the target set index."""
+ return target_set_indexes.setdefault(frozenset(data), len(target_set_indexes))
+
+
+def get_target_index(name, target_indexes): # type: (str, TargetIndexes) -> int
+ """Find or add the target in the result set and return the target index."""
+ return target_indexes.setdefault(name, len(target_indexes))
+
+
+def expand_indexes(
+ source_data, # type: IndexedPoints
+ source_index, # type: t.List[str]
+ format_func, # type: t.Callable[t.Tuple[t.Any], str]
+): # type: (...) -> NamedPoints
+ """Expand indexes from the source into target names for easier processing of the data (arcs or lines)."""
+ combined_data = {} # type: t.Dict[str, t.Dict[t.Any, t.Set[str]]]
+
+ for covered_path, covered_points in source_data.items():
+ combined_points = combined_data.setdefault(covered_path, {})
+
+ for covered_point, covered_target_indexes in covered_points.items():
+ combined_point = combined_points.setdefault(format_func(covered_point), set())
+
+ for covered_target_index in covered_target_indexes:
+ combined_point.add(source_index[covered_target_index])
+
+ return combined_data
+
+
+def generate_indexes(target_indexes, data): # type: (TargetIndexes, NamedPoints) -> IndexedPoints
+ """Return an indexed version of the given data (arcs or points)."""
+ results = {} # type: IndexedPoints
+
+ for path, points in data.items():
+ result_points = results[path] = {}
+
+ for point, target_names in points.items():
+ result_point = result_points[point] = set()
+
+ for target_name in target_names:
+ result_point.add(get_target_index(target_name, target_indexes))
+
+ return results
diff --git a/test/lib/ansible_test/_internal/coverage/analyze/targets/combine.py b/test/lib/ansible_test/_internal/coverage/analyze/targets/combine.py
new file mode 100644
index 00000000..35148ff6
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/analyze/targets/combine.py
@@ -0,0 +1,64 @@
+"""Combine integration test target code coverage reports."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from .... import types as t
+
+from . import (
+ CoverageAnalyzeTargetsConfig,
+ get_target_index,
+ make_report,
+ read_report,
+ write_report,
+)
+
+if t.TYPE_CHECKING:
+ from . import (
+ Arcs,
+ IndexedPoints,
+ Lines,
+ TargetIndexes,
+ )
+
+
+class CoverageAnalyzeTargetsCombineConfig(CoverageAnalyzeTargetsConfig):
+ """Configuration for the `coverage analyze targets combine` command."""
+ def __init__(self, args): # type: (t.Any) -> None
+ super(CoverageAnalyzeTargetsCombineConfig, self).__init__(args)
+
+ self.input_files = args.input_file # type: t.List[str]
+ self.output_file = args.output_file # type: str
+
+
+def command_coverage_analyze_targets_combine(args): # type: (CoverageAnalyzeTargetsCombineConfig) -> None
+ """Combine integration test target code coverage reports."""
+ combined_target_indexes = {} # type: TargetIndexes
+ combined_path_arcs = {} # type: Arcs
+ combined_path_lines = {} # type: Lines
+
+ for report_path in args.input_files:
+ covered_targets, covered_path_arcs, covered_path_lines = read_report(report_path)
+
+ merge_indexes(covered_path_arcs, covered_targets, combined_path_arcs, combined_target_indexes)
+ merge_indexes(covered_path_lines, covered_targets, combined_path_lines, combined_target_indexes)
+
+ report = make_report(combined_target_indexes, combined_path_arcs, combined_path_lines)
+
+ write_report(args, report, args.output_file)
+
+
+def merge_indexes(
+ source_data, # type: IndexedPoints
+ source_index, # type: t.List[str]
+ combined_data, # type: IndexedPoints
+ combined_index, # type: TargetIndexes
+): # type: (...) -> None
+ """Merge indexes from the source into the combined data set (arcs or lines)."""
+ for covered_path, covered_points in source_data.items():
+ combined_points = combined_data.setdefault(covered_path, {})
+
+ for covered_point, covered_target_indexes in covered_points.items():
+ combined_point = combined_points.setdefault(covered_point, set())
+
+ for covered_target_index in covered_target_indexes:
+ combined_point.add(get_target_index(source_index[covered_target_index], combined_index))
diff --git a/test/lib/ansible_test/_internal/coverage/analyze/targets/expand.py b/test/lib/ansible_test/_internal/coverage/analyze/targets/expand.py
new file mode 100644
index 00000000..388dd6cb
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/analyze/targets/expand.py
@@ -0,0 +1,39 @@
+"""Expand target names in an aggregated coverage file."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from .... import types as t
+
+from ....io import (
+ SortedSetEncoder,
+ write_json_file,
+)
+
+from . import (
+ CoverageAnalyzeTargetsConfig,
+ expand_indexes,
+ format_arc,
+ read_report,
+)
+
+
+class CoverageAnalyzeTargetsExpandConfig(CoverageAnalyzeTargetsConfig):
+ """Configuration for the `coverage analyze targets expand` command."""
+ def __init__(self, args): # type: (t.Any) -> None
+ super(CoverageAnalyzeTargetsExpandConfig, self).__init__(args)
+
+ self.input_file = args.input_file # type: str
+ self.output_file = args.output_file # type: str
+
+
+def command_coverage_analyze_targets_expand(args): # type: (CoverageAnalyzeTargetsExpandConfig) -> None
+ """Expand target names in an aggregated coverage file."""
+ covered_targets, covered_path_arcs, covered_path_lines = read_report(args.input_file)
+
+ report = dict(
+ arcs=expand_indexes(covered_path_arcs, covered_targets, format_arc),
+ lines=expand_indexes(covered_path_lines, covered_targets, str),
+ )
+
+ if not args.explain:
+ write_json_file(args.output_file, report, encoder=SortedSetEncoder)
diff --git a/test/lib/ansible_test/_internal/coverage/analyze/targets/filter.py b/test/lib/ansible_test/_internal/coverage/analyze/targets/filter.py
new file mode 100644
index 00000000..e90fb227
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/analyze/targets/filter.py
@@ -0,0 +1,104 @@
+"""Filter an aggregated coverage file, keeping only the specified targets."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from .... import types as t
+
+from . import (
+ CoverageAnalyzeTargetsConfig,
+ expand_indexes,
+ generate_indexes,
+ make_report,
+ read_report,
+ write_report,
+)
+
+if t.TYPE_CHECKING:
+ from . import (
+ NamedPoints,
+ TargetIndexes,
+ )
+
+
+class CoverageAnalyzeTargetsFilterConfig(CoverageAnalyzeTargetsConfig):
+ """Configuration for the `coverage analyze targets filter` command."""
+ def __init__(self, args): # type: (t.Any) -> None
+ super(CoverageAnalyzeTargetsFilterConfig, self).__init__(args)
+
+ self.input_file = args.input_file # type: str
+ self.output_file = args.output_file # type: str
+ self.include_targets = args.include_targets # type: t.List[str]
+ self.exclude_targets = args.exclude_targets # type: t.List[str]
+ self.include_path = args.include_path # type: t.Optional[str]
+ self.exclude_path = args.exclude_path # type: t.Optional[str]
+
+
+def command_coverage_analyze_targets_filter(args): # type: (CoverageAnalyzeTargetsFilterConfig) -> None
+ """Filter target names in an aggregated coverage file."""
+ covered_targets, covered_path_arcs, covered_path_lines = read_report(args.input_file)
+
+ filtered_path_arcs = expand_indexes(covered_path_arcs, covered_targets, lambda v: v)
+ filtered_path_lines = expand_indexes(covered_path_lines, covered_targets, lambda v: v)
+
+ include_targets = set(args.include_targets) if args.include_targets else None
+ exclude_targets = set(args.exclude_targets) if args.exclude_targets else None
+
+ include_path = re.compile(args.include_path) if args.include_path else None
+ exclude_path = re.compile(args.exclude_path) if args.exclude_path else None
+
+ def path_filter_func(path):
+ if include_path and not re.search(include_path, path):
+ return False
+
+ if exclude_path and re.search(exclude_path, path):
+ return False
+
+ return True
+
+ def target_filter_func(targets):
+ if include_targets:
+ targets &= include_targets
+
+ if exclude_targets:
+ targets -= exclude_targets
+
+ return targets
+
+ filtered_path_arcs = filter_data(filtered_path_arcs, path_filter_func, target_filter_func)
+ filtered_path_lines = filter_data(filtered_path_lines, path_filter_func, target_filter_func)
+
+ target_indexes = {} # type: TargetIndexes
+ indexed_path_arcs = generate_indexes(target_indexes, filtered_path_arcs)
+ indexed_path_lines = generate_indexes(target_indexes, filtered_path_lines)
+
+ report = make_report(target_indexes, indexed_path_arcs, indexed_path_lines)
+
+ write_report(args, report, args.output_file)
+
+
+def filter_data(
+ data, # type: NamedPoints
+ path_filter_func, # type: t.Callable[[str], bool]
+ target_filter_func, # type: t.Callable[[t.Set[str]], t.Set[str]]
+): # type: (...) -> NamedPoints
+ """Filter the data set using the specified filter function."""
+ result = {} # type: NamedPoints
+
+ for src_path, src_points in data.items():
+ if not path_filter_func(src_path):
+ continue
+
+ dst_points = {}
+
+ for src_point, src_targets in src_points.items():
+ dst_targets = target_filter_func(src_targets)
+
+ if dst_targets:
+ dst_points[src_point] = dst_targets
+
+ if dst_points:
+ result[src_path] = dst_points
+
+ return result
diff --git a/test/lib/ansible_test/_internal/coverage/analyze/targets/generate.py b/test/lib/ansible_test/_internal/coverage/analyze/targets/generate.py
new file mode 100644
index 00000000..a14b6f55
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/analyze/targets/generate.py
@@ -0,0 +1,146 @@
+"""Analyze code coverage data to determine which integration test targets provide coverage for each arc or line."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from .... import types as t
+
+from ....encoding import (
+ to_text,
+)
+
+from ....data import (
+ data_context,
+)
+
+from ....util_common import (
+ ResultType,
+)
+
+from ... import (
+ enumerate_powershell_lines,
+ enumerate_python_arcs,
+ get_collection_path_regexes,
+ get_powershell_coverage_files,
+ get_python_coverage_files,
+ get_python_modules,
+ initialize_coverage,
+ PathChecker,
+)
+
+from . import (
+ CoverageAnalyzeTargetsConfig,
+ get_target_index,
+ make_report,
+ write_report,
+)
+
+if t.TYPE_CHECKING:
+ from . import (
+ Arcs,
+ Lines,
+ TargetIndexes,
+ )
+
+
+class CoverageAnalyzeTargetsGenerateConfig(CoverageAnalyzeTargetsConfig):
+ """Configuration for the `coverage analyze targets generate` command."""
+ def __init__(self, args): # type: (t.Any) -> None
+ super(CoverageAnalyzeTargetsGenerateConfig, self).__init__(args)
+
+ self.input_dir = args.input_dir or ResultType.COVERAGE.path # type: str
+ self.output_file = args.output_file # type: str
+
+
+def command_coverage_analyze_targets_generate(args): # type: (CoverageAnalyzeTargetsGenerateConfig) -> None
+ """Analyze code coverage data to determine which integration test targets provide coverage for each arc or line."""
+ root = data_context().content.root
+ target_indexes = {}
+ arcs = dict((os.path.relpath(path, root), data) for path, data in analyze_python_coverage(args, args.input_dir, target_indexes).items())
+ lines = dict((os.path.relpath(path, root), data) for path, data in analyze_powershell_coverage(args, args.input_dir, target_indexes).items())
+ report = make_report(target_indexes, arcs, lines)
+ write_report(args, report, args.output_file)
+
+
+def analyze_python_coverage(
+ args, # type: CoverageAnalyzeTargetsGenerateConfig
+ path, # type: str
+ target_indexes, # type: TargetIndexes
+): # type: (...) -> Arcs
+ """Analyze Python code coverage."""
+ results = {} # type: Arcs
+ collection_search_re, collection_sub_re = get_collection_path_regexes()
+ modules = get_python_modules()
+ python_files = get_python_coverage_files(path)
+ coverage = initialize_coverage(args)
+
+ for python_file in python_files:
+ if not is_integration_coverage_file(python_file):
+ continue
+
+ target_name = get_target_name(python_file)
+ target_index = get_target_index(target_name, target_indexes)
+
+ for filename, covered_arcs in enumerate_python_arcs(python_file, coverage, modules, collection_search_re, collection_sub_re):
+ arcs = results.setdefault(filename, {})
+
+ for covered_arc in covered_arcs:
+ arc = arcs.setdefault(covered_arc, set())
+ arc.add(target_index)
+
+ prune_invalid_filenames(args, results, collection_search_re=collection_search_re)
+
+ return results
+
+
+def analyze_powershell_coverage(
+ args, # type: CoverageAnalyzeTargetsGenerateConfig
+ path, # type: str
+ target_indexes, # type: TargetIndexes
+): # type: (...) -> Lines
+ """Analyze PowerShell code coverage"""
+ results = {} # type: Lines
+ collection_search_re, collection_sub_re = get_collection_path_regexes()
+ powershell_files = get_powershell_coverage_files(path)
+
+ for powershell_file in powershell_files:
+ if not is_integration_coverage_file(powershell_file):
+ continue
+
+ target_name = get_target_name(powershell_file)
+ target_index = get_target_index(target_name, target_indexes)
+
+ for filename, hits in enumerate_powershell_lines(powershell_file, collection_search_re, collection_sub_re):
+ lines = results.setdefault(filename, {})
+
+ for covered_line in hits:
+ line = lines.setdefault(covered_line, set())
+ line.add(target_index)
+
+ prune_invalid_filenames(args, results)
+
+ return results
+
+
+def prune_invalid_filenames(
+ args, # type: CoverageAnalyzeTargetsGenerateConfig
+ results, # type: t.Dict[str, t.Any]
+ collection_search_re=None, # type: t.Optional[str]
+): # type: (...) -> None
+ """Remove invalid filenames from the given result set."""
+ path_checker = PathChecker(args, collection_search_re)
+
+ for path in list(results.keys()):
+ if not path_checker.check_path(path):
+ del results[path]
+
+
+def get_target_name(path): # type: (str) -> str
+ """Extract the test target name from the given coverage path."""
+ return to_text(os.path.basename(path).split('=')[1])
+
+
+def is_integration_coverage_file(path): # type: (str) -> bool
+ """Returns True if the coverage file came from integration tests, otherwise False."""
+ return os.path.basename(path).split('=')[0] in ('integration', 'windows-integration', 'network-integration')
diff --git a/test/lib/ansible_test/_internal/coverage/analyze/targets/missing.py b/test/lib/ansible_test/_internal/coverage/analyze/targets/missing.py
new file mode 100644
index 00000000..613a0ef2
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/analyze/targets/missing.py
@@ -0,0 +1,109 @@
+"""Identify aggregated coverage in one file missing from another."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from .... import types as t
+
+from ....encoding import (
+ to_bytes,
+)
+
+from . import (
+ CoverageAnalyzeTargetsConfig,
+ get_target_index,
+ make_report,
+ read_report,
+ write_report,
+)
+
+if t.TYPE_CHECKING:
+ from . import (
+ TargetIndexes,
+ IndexedPoints,
+ )
+
+
+class CoverageAnalyzeTargetsMissingConfig(CoverageAnalyzeTargetsConfig):
+ """Configuration for the `coverage analyze targets missing` command."""
+ def __init__(self, args): # type: (t.Any) -> None
+ super(CoverageAnalyzeTargetsMissingConfig, self).__init__(args)
+
+ self.from_file = args.from_file # type: str
+ self.to_file = args.to_file # type: str
+ self.output_file = args.output_file # type: str
+
+ self.only_gaps = args.only_gaps # type: bool
+ self.only_exists = args.only_exists # type: bool
+
+
+def command_coverage_analyze_targets_missing(args): # type: (CoverageAnalyzeTargetsMissingConfig) -> None
+ """Identify aggregated coverage in one file missing from another."""
+ from_targets, from_path_arcs, from_path_lines = read_report(args.from_file)
+ to_targets, to_path_arcs, to_path_lines = read_report(args.to_file)
+ target_indexes = {}
+
+ if args.only_gaps:
+ arcs = find_gaps(from_path_arcs, from_targets, to_path_arcs, target_indexes, args.only_exists)
+ lines = find_gaps(from_path_lines, from_targets, to_path_lines, target_indexes, args.only_exists)
+ else:
+ arcs = find_missing(from_path_arcs, from_targets, to_path_arcs, to_targets, target_indexes, args.only_exists)
+ lines = find_missing(from_path_lines, from_targets, to_path_lines, to_targets, target_indexes, args.only_exists)
+
+ report = make_report(target_indexes, arcs, lines)
+ write_report(args, report, args.output_file)
+
+
+def find_gaps(
+ from_data, # type: IndexedPoints
+ from_index, # type: t.List[str]
+ to_data, # type: IndexedPoints
+ target_indexes, # type: TargetIndexes
+ only_exists, # type: bool
+): # type: (...) -> IndexedPoints
+ """Find gaps in coverage between the from and to data sets."""
+ target_data = {}
+
+ for from_path, from_points in from_data.items():
+ if only_exists and not os.path.isfile(to_bytes(from_path)):
+ continue
+
+ to_points = to_data.get(from_path, {})
+
+ gaps = set(from_points.keys()) - set(to_points.keys())
+
+ if gaps:
+ gap_points = dict((key, value) for key, value in from_points.items() if key in gaps)
+ target_data[from_path] = dict((gap, set(get_target_index(from_index[i], target_indexes) for i in indexes)) for gap, indexes in gap_points.items())
+
+ return target_data
+
+
+def find_missing(
+ from_data, # type: IndexedPoints
+ from_index, # type: t.List[str]
+ to_data, # type: IndexedPoints
+ to_index, # type: t.List[str]
+ target_indexes, # type: TargetIndexes
+ only_exists, # type: bool
+): # type: (...) -> IndexedPoints
+ """Find coverage in from_data not present in to_data (arcs or lines)."""
+ target_data = {}
+
+ for from_path, from_points in from_data.items():
+ if only_exists and not os.path.isfile(to_bytes(from_path)):
+ continue
+
+ to_points = to_data.get(from_path, {})
+
+ for from_point, from_target_indexes in from_points.items():
+ to_target_indexes = to_points.get(from_point, set())
+
+ remaining_targets = set(from_index[i] for i in from_target_indexes) - set(to_index[i] for i in to_target_indexes)
+
+ if remaining_targets:
+ target_index = target_data.setdefault(from_path, {}).setdefault(from_point, set())
+ target_index.update(get_target_index(name, target_indexes) for name in remaining_targets)
+
+ return target_data
diff --git a/test/lib/ansible_test/_internal/coverage/combine.py b/test/lib/ansible_test/_internal/coverage/combine.py
new file mode 100644
index 00000000..fa0b8f7d
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/combine.py
@@ -0,0 +1,297 @@
+"""Combine code coverage files."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ..target import (
+ walk_compile_targets,
+ walk_powershell_targets,
+)
+
+from ..io import (
+ read_text_file,
+)
+
+from ..util import (
+ display,
+)
+
+from ..util_common import (
+ ResultType,
+ write_json_file,
+ write_json_test_results,
+)
+
+from . import (
+ enumerate_python_arcs,
+ enumerate_powershell_lines,
+ get_collection_path_regexes,
+ get_python_coverage_files,
+ get_python_modules,
+ get_powershell_coverage_files,
+ initialize_coverage,
+ COVERAGE_OUTPUT_FILE_NAME,
+ COVERAGE_GROUPS,
+ CoverageConfig,
+ PathChecker,
+)
+
+
+def command_coverage_combine(args):
+ """Patch paths in coverage files and merge into a single file.
+ :type args: CoverageConfig
+ :rtype: list[str]
+ """
+ paths = _command_coverage_combine_powershell(args) + _command_coverage_combine_python(args)
+
+ for path in paths:
+ display.info('Generated combined output: %s' % path, verbosity=1)
+
+ return paths
+
+
+def _command_coverage_combine_python(args):
+ """
+ :type args: CoverageConfig
+ :rtype: list[str]
+ """
+ coverage = initialize_coverage(args)
+
+ modules = get_python_modules()
+
+ coverage_files = get_python_coverage_files()
+
+ counter = 0
+ sources = _get_coverage_targets(args, walk_compile_targets)
+ groups = _build_stub_groups(args, sources, lambda line_count: set())
+
+ collection_search_re, collection_sub_re = get_collection_path_regexes()
+
+ for coverage_file in coverage_files:
+ counter += 1
+ display.info('[%4d/%4d] %s' % (counter, len(coverage_files), coverage_file), verbosity=2)
+
+ group = get_coverage_group(args, coverage_file)
+
+ if group is None:
+ display.warning('Unexpected name for coverage file: %s' % coverage_file)
+ continue
+
+ for filename, arcs in enumerate_python_arcs(coverage_file, coverage, modules, collection_search_re, collection_sub_re):
+ if group not in groups:
+ groups[group] = {}
+
+ arc_data = groups[group]
+
+ if filename not in arc_data:
+ arc_data[filename] = set()
+
+ arc_data[filename].update(arcs)
+
+ output_files = []
+
+ if args.export:
+ coverage_file = os.path.join(args.export, '')
+ suffix = '=coverage.combined'
+ else:
+ coverage_file = os.path.join(ResultType.COVERAGE.path, COVERAGE_OUTPUT_FILE_NAME)
+ suffix = ''
+
+ path_checker = PathChecker(args, collection_search_re)
+
+ for group in sorted(groups):
+ arc_data = groups[group]
+
+ updated = coverage.CoverageData()
+
+ for filename in arc_data:
+ if not path_checker.check_path(filename):
+ continue
+
+ updated.add_arcs({filename: list(arc_data[filename])})
+
+ if args.all:
+ updated.add_arcs(dict((source[0], []) for source in sources))
+
+ if not args.explain:
+ output_file = coverage_file + group + suffix
+ updated.write_file(output_file) # always write files to make sure stale files do not exist
+
+ if updated:
+ # only report files which are non-empty to prevent coverage from reporting errors
+ output_files.append(output_file)
+
+ path_checker.report()
+
+ return sorted(output_files)
+
+
+def _command_coverage_combine_powershell(args):
+ """
+ :type args: CoverageConfig
+ :rtype: list[str]
+ """
+ coverage_files = get_powershell_coverage_files()
+
+ def _default_stub_value(lines):
+ val = {}
+ for line in range(lines):
+ val[line] = 0
+ return val
+
+ counter = 0
+ sources = _get_coverage_targets(args, walk_powershell_targets)
+ groups = _build_stub_groups(args, sources, _default_stub_value)
+
+ collection_search_re, collection_sub_re = get_collection_path_regexes()
+
+ for coverage_file in coverage_files:
+ counter += 1
+ display.info('[%4d/%4d] %s' % (counter, len(coverage_files), coverage_file), verbosity=2)
+
+ group = get_coverage_group(args, coverage_file)
+
+ if group is None:
+ display.warning('Unexpected name for coverage file: %s' % coverage_file)
+ continue
+
+ for filename, hits in enumerate_powershell_lines(coverage_file, collection_search_re, collection_sub_re):
+ if group not in groups:
+ groups[group] = {}
+
+ coverage_data = groups[group]
+
+ if filename not in coverage_data:
+ coverage_data[filename] = {}
+
+ file_coverage = coverage_data[filename]
+
+ for line_no, hit_count in hits.items():
+ file_coverage[line_no] = file_coverage.get(line_no, 0) + hit_count
+
+ output_files = []
+
+ path_checker = PathChecker(args)
+
+ for group in sorted(groups):
+ coverage_data = dict((filename, data) for filename, data in groups[group].items() if path_checker.check_path(filename))
+
+ if args.all:
+ # Add 0 line entries for files not in coverage_data
+ for source, source_line_count in sources:
+ if source in coverage_data:
+ continue
+
+ coverage_data[source] = _default_stub_value(source_line_count)
+
+ if not args.explain:
+ if args.export:
+ output_file = os.path.join(args.export, group + '=coverage.combined')
+ write_json_file(output_file, coverage_data, formatted=False)
+ output_files.append(output_file)
+ continue
+
+ output_file = COVERAGE_OUTPUT_FILE_NAME + group + '-powershell'
+
+ write_json_test_results(ResultType.COVERAGE, output_file, coverage_data, formatted=False)
+
+ output_files.append(os.path.join(ResultType.COVERAGE.path, output_file))
+
+ path_checker.report()
+
+ return sorted(output_files)
+
+
+def _get_coverage_targets(args, walk_func):
+ """
+ :type args: CoverageConfig
+ :type walk_func: Func
+ :rtype: list[tuple[str, int]]
+ """
+ sources = []
+
+ if args.all or args.stub:
+ # excludes symlinks of regular files to avoid reporting on the same file multiple times
+ # in the future it would be nice to merge any coverage for symlinks into the real files
+ for target in walk_func(include_symlinks=False):
+ target_path = os.path.abspath(target.path)
+
+ target_lines = len(read_text_file(target_path).splitlines())
+
+ sources.append((target_path, target_lines))
+
+ sources.sort()
+
+ return sources
+
+
+def _build_stub_groups(args, sources, default_stub_value):
+ """
+ :type args: CoverageConfig
+ :type sources: List[tuple[str, int]]
+ :type default_stub_value: Func[int]
+ :rtype: dict
+ """
+ groups = {}
+
+ if args.stub:
+ stub_group = []
+ stub_groups = [stub_group]
+ stub_line_limit = 500000
+ stub_line_count = 0
+
+ for source, source_line_count in sources:
+ stub_group.append((source, source_line_count))
+ stub_line_count += source_line_count
+
+ if stub_line_count > stub_line_limit:
+ stub_line_count = 0
+ stub_group = []
+ stub_groups.append(stub_group)
+
+ for stub_index, stub_group in enumerate(stub_groups):
+ if not stub_group:
+ continue
+
+ groups['=stub-%02d' % (stub_index + 1)] = dict((source, default_stub_value(line_count))
+ for source, line_count in stub_group)
+
+ return groups
+
+
+def get_coverage_group(args, coverage_file):
+ """
+ :type args: CoverageConfig
+ :type coverage_file: str
+ :rtype: str
+ """
+ parts = os.path.basename(coverage_file).split('=', 4)
+
+ # noinspection PyTypeChecker
+ if len(parts) != 5 or not parts[4].startswith('coverage.'):
+ return None
+
+ names = dict(
+ command=parts[0],
+ target=parts[1],
+ environment=parts[2],
+ version=parts[3],
+ )
+
+ export_names = dict(
+ version=parts[3],
+ )
+
+ group = ''
+
+ for part in COVERAGE_GROUPS:
+ if part in args.group_by:
+ group += '=%s' % names[part]
+ elif args.export:
+ group += '=%s' % export_names.get(part, 'various')
+
+ if args.export:
+ group = group.lstrip('=')
+
+ return group
diff --git a/test/lib/ansible_test/_internal/coverage/erase.py b/test/lib/ansible_test/_internal/coverage/erase.py
new file mode 100644
index 00000000..92d241c7
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/erase.py
@@ -0,0 +1,27 @@
+"""Erase code coverage files."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ..util_common import (
+ ResultType,
+)
+
+from . import (
+ CoverageConfig,
+)
+
+
+def command_coverage_erase(args): # type: (CoverageConfig) -> None
+ """Erase code coverage data files collected during test runs."""
+ coverage_dir = ResultType.COVERAGE.path
+
+ for name in os.listdir(coverage_dir):
+ if not name.startswith('coverage') and '=coverage.' not in name:
+ continue
+
+ path = os.path.join(coverage_dir, name)
+
+ if not args.explain:
+ os.remove(path)
diff --git a/test/lib/ansible_test/_internal/coverage/html.py b/test/lib/ansible_test/_internal/coverage/html.py
new file mode 100644
index 00000000..63956a19
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/html.py
@@ -0,0 +1,45 @@
+"""Generate HTML code coverage reports."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ..io import (
+ make_dirs,
+)
+
+from ..util import (
+ display,
+)
+
+from ..util_common import (
+ ResultType,
+)
+
+from .combine import (
+ command_coverage_combine,
+)
+
+from . import (
+ run_coverage,
+ CoverageConfig,
+)
+
+
+def command_coverage_html(args):
+ """
+ :type args: CoverageConfig
+ """
+ output_files = command_coverage_combine(args)
+
+ for output_file in output_files:
+ if output_file.endswith('-powershell'):
+ # coverage.py does not support non-Python files so we just skip the local html report.
+ display.info("Skipping output file %s in html generation" % output_file, verbosity=3)
+ continue
+
+ dir_name = os.path.join(ResultType.REPORTS.path, os.path.basename(output_file))
+ make_dirs(dir_name)
+ run_coverage(args, output_file, 'html', ['-i', '-d', dir_name])
+
+ display.info('HTML report generated: file:///%s' % os.path.join(dir_name, 'index.html'))
diff --git a/test/lib/ansible_test/_internal/coverage/report.py b/test/lib/ansible_test/_internal/coverage/report.py
new file mode 100644
index 00000000..24efa637
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/report.py
@@ -0,0 +1,156 @@
+"""Generate console code coverage reports."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ..io import (
+ read_json_file,
+)
+
+from ..util import (
+ display,
+)
+
+from ..data import (
+ data_context,
+)
+
+from .combine import (
+ command_coverage_combine,
+)
+
+from . import (
+ run_coverage,
+ CoverageConfig,
+)
+
+
+def command_coverage_report(args):
+ """
+ :type args: CoverageReportConfig
+ """
+ output_files = command_coverage_combine(args)
+
+ for output_file in output_files:
+ if args.group_by or args.stub:
+ display.info('>>> Coverage Group: %s' % ' '.join(os.path.basename(output_file).split('=')[1:]))
+
+ if output_file.endswith('-powershell'):
+ display.info(_generate_powershell_output_report(args, output_file))
+ else:
+ options = []
+
+ if args.show_missing:
+ options.append('--show-missing')
+
+ if args.include:
+ options.extend(['--include', args.include])
+
+ if args.omit:
+ options.extend(['--omit', args.omit])
+
+ run_coverage(args, output_file, 'report', options)
+
+
+def _generate_powershell_output_report(args, coverage_file):
+ """
+ :type args: CoverageReportConfig
+ :type coverage_file: str
+ :rtype: str
+ """
+ coverage_info = read_json_file(coverage_file)
+
+ root_path = data_context().content.root + '/'
+
+ name_padding = 7
+ cover_padding = 8
+
+ file_report = []
+ total_stmts = 0
+ total_miss = 0
+
+ for filename in sorted(coverage_info.keys()):
+ hit_info = coverage_info[filename]
+
+ if filename.startswith(root_path):
+ filename = filename[len(root_path):]
+
+ if args.omit and filename in args.omit:
+ continue
+ if args.include and filename not in args.include:
+ continue
+
+ stmts = len(hit_info)
+ miss = len([c for c in hit_info.values() if c == 0])
+
+ name_padding = max(name_padding, len(filename) + 3)
+
+ total_stmts += stmts
+ total_miss += miss
+
+ cover = "{0}%".format(int((stmts - miss) / stmts * 100))
+
+ missing = []
+ current_missing = None
+ sorted_lines = sorted([int(x) for x in hit_info.keys()])
+ for idx, line in enumerate(sorted_lines):
+ hit = hit_info[str(line)]
+ if hit == 0 and current_missing is None:
+ current_missing = line
+ elif hit != 0 and current_missing is not None:
+ end_line = sorted_lines[idx - 1]
+ if current_missing == end_line:
+ missing.append(str(current_missing))
+ else:
+ missing.append('%s-%s' % (current_missing, end_line))
+ current_missing = None
+
+ if current_missing is not None:
+ end_line = sorted_lines[-1]
+ if current_missing == end_line:
+ missing.append(str(current_missing))
+ else:
+ missing.append('%s-%s' % (current_missing, end_line))
+
+ file_report.append({'name': filename, 'stmts': stmts, 'miss': miss, 'cover': cover, 'missing': missing})
+
+ if total_stmts == 0:
+ return ''
+
+ total_percent = '{0}%'.format(int((total_stmts - total_miss) / total_stmts * 100))
+ stmts_padding = max(8, len(str(total_stmts)))
+ miss_padding = max(7, len(str(total_miss)))
+
+ line_length = name_padding + stmts_padding + miss_padding + cover_padding
+
+ header = 'Name'.ljust(name_padding) + 'Stmts'.rjust(stmts_padding) + 'Miss'.rjust(miss_padding) + \
+ 'Cover'.rjust(cover_padding)
+
+ if args.show_missing:
+ header += 'Lines Missing'.rjust(16)
+ line_length += 16
+
+ line_break = '-' * line_length
+ lines = ['%s%s%s%s%s' % (f['name'].ljust(name_padding), str(f['stmts']).rjust(stmts_padding),
+ str(f['miss']).rjust(miss_padding), f['cover'].rjust(cover_padding),
+ ' ' + ', '.join(f['missing']) if args.show_missing else '')
+ for f in file_report]
+ totals = 'TOTAL'.ljust(name_padding) + str(total_stmts).rjust(stmts_padding) + \
+ str(total_miss).rjust(miss_padding) + total_percent.rjust(cover_padding)
+
+ report = '{0}\n{1}\n{2}\n{1}\n{3}'.format(header, line_break, "\n".join(lines), totals)
+ return report
+
+
+class CoverageReportConfig(CoverageConfig):
+ """Configuration for the coverage report command."""
+ def __init__(self, args):
+ """
+ :type args: any
+ """
+ super(CoverageReportConfig, self).__init__(args)
+
+ self.show_missing = args.show_missing # type: bool
+ self.include = args.include # type: str
+ self.omit = args.omit # type: str
diff --git a/test/lib/ansible_test/_internal/coverage/xml.py b/test/lib/ansible_test/_internal/coverage/xml.py
new file mode 100644
index 00000000..94b5abc5
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/xml.py
@@ -0,0 +1,191 @@
+"""Generate XML code coverage reports."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import time
+
+from xml.etree.ElementTree import (
+ Comment,
+ Element,
+ SubElement,
+ tostring,
+)
+
+from xml.dom import (
+ minidom,
+)
+
+from ..io import (
+ make_dirs,
+ read_json_file,
+)
+
+from ..util_common import (
+ ResultType,
+ write_text_test_results,
+)
+
+from ..env import (
+ get_ansible_version,
+)
+
+from ..data import (
+ data_context,
+)
+
+from .combine import (
+ command_coverage_combine,
+)
+
+from . import (
+ run_coverage,
+ CoverageConfig,
+)
+
+
+def command_coverage_xml(args):
+ """
+ :type args: CoverageConfig
+ """
+ output_files = command_coverage_combine(args)
+
+ for output_file in output_files:
+ xml_name = '%s.xml' % os.path.basename(output_file)
+ if output_file.endswith('-powershell'):
+ report = _generate_powershell_xml(output_file)
+
+ rough_string = tostring(report, 'utf-8')
+ reparsed = minidom.parseString(rough_string)
+ pretty = reparsed.toprettyxml(indent=' ')
+
+ write_text_test_results(ResultType.REPORTS, xml_name, pretty)
+ else:
+ xml_path = os.path.join(ResultType.REPORTS.path, xml_name)
+ make_dirs(ResultType.REPORTS.path)
+ run_coverage(args, output_file, 'xml', ['-i', '-o', xml_path])
+
+
+def _generate_powershell_xml(coverage_file):
+ """
+ :type coverage_file: str
+ :rtype: Element
+ """
+ coverage_info = read_json_file(coverage_file)
+
+ content_root = data_context().content.root
+ is_ansible = data_context().content.is_ansible
+
+ packages = {}
+ for path, results in coverage_info.items():
+ filename = os.path.splitext(os.path.basename(path))[0]
+
+ if filename.startswith('Ansible.ModuleUtils'):
+ package = 'ansible.module_utils'
+ elif is_ansible:
+ package = 'ansible.modules'
+ else:
+ rel_path = path[len(content_root) + 1:]
+ plugin_type = "modules" if rel_path.startswith("plugins/modules") else "module_utils"
+ package = 'ansible_collections.%splugins.%s' % (data_context().content.collection.prefix, plugin_type)
+
+ if package not in packages:
+ packages[package] = {}
+
+ packages[package][path] = results
+
+ elem_coverage = Element('coverage')
+ elem_coverage.append(
+ Comment(' Generated by ansible-test from the Ansible project: https://www.ansible.com/ '))
+ elem_coverage.append(
+ Comment(' Based on https://raw.githubusercontent.com/cobertura/web/master/htdocs/xml/coverage-04.dtd '))
+
+ elem_sources = SubElement(elem_coverage, 'sources')
+
+ elem_source = SubElement(elem_sources, 'source')
+ elem_source.text = data_context().content.root
+
+ elem_packages = SubElement(elem_coverage, 'packages')
+
+ total_lines_hit = 0
+ total_line_count = 0
+
+ for package_name, package_data in packages.items():
+ lines_hit, line_count = _add_cobertura_package(elem_packages, package_name, package_data)
+
+ total_lines_hit += lines_hit
+ total_line_count += line_count
+
+ elem_coverage.attrib.update({
+ 'branch-rate': '0',
+ 'branches-covered': '0',
+ 'branches-valid': '0',
+ 'complexity': '0',
+ 'line-rate': str(round(total_lines_hit / total_line_count, 4)) if total_line_count else "0",
+ 'lines-covered': str(total_line_count),
+ 'lines-valid': str(total_lines_hit),
+ 'timestamp': str(int(time.time())),
+ 'version': get_ansible_version(),
+ })
+
+ return elem_coverage
+
+
+def _add_cobertura_package(packages, package_name, package_data):
+ """
+ :type packages: SubElement
+ :type package_name: str
+ :type package_data: Dict[str, Dict[str, int]]
+ :rtype: Tuple[int, int]
+ """
+ elem_package = SubElement(packages, 'package')
+ elem_classes = SubElement(elem_package, 'classes')
+
+ total_lines_hit = 0
+ total_line_count = 0
+
+ for path, results in package_data.items():
+ lines_hit = len([True for hits in results.values() if hits])
+ line_count = len(results)
+
+ total_lines_hit += lines_hit
+ total_line_count += line_count
+
+ elem_class = SubElement(elem_classes, 'class')
+
+ class_name = os.path.splitext(os.path.basename(path))[0]
+ if class_name.startswith("Ansible.ModuleUtils"):
+ class_name = class_name[20:]
+
+ content_root = data_context().content.root
+ filename = path
+ if filename.startswith(content_root):
+ filename = filename[len(content_root) + 1:]
+
+ elem_class.attrib.update({
+ 'branch-rate': '0',
+ 'complexity': '0',
+ 'filename': filename,
+ 'line-rate': str(round(lines_hit / line_count, 4)) if line_count else "0",
+ 'name': class_name,
+ })
+
+ SubElement(elem_class, 'methods')
+
+ elem_lines = SubElement(elem_class, 'lines')
+
+ for number, hits in results.items():
+ elem_line = SubElement(elem_lines, 'line')
+ elem_line.attrib.update(
+ hits=str(hits),
+ number=str(number),
+ )
+
+ elem_package.attrib.update({
+ 'branch-rate': '0',
+ 'complexity': '0',
+ 'line-rate': str(round(total_lines_hit / total_line_count, 4)) if total_line_count else "0",
+ 'name': package_name,
+ })
+
+ return total_lines_hit, total_line_count
diff --git a/test/lib/ansible_test/_internal/coverage_util.py b/test/lib/ansible_test/_internal/coverage_util.py
new file mode 100644
index 00000000..e5434231
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage_util.py
@@ -0,0 +1,125 @@
+"""Utility code for facilitating collection of code coverage when running tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import contextlib
+import os
+import tempfile
+
+from .config import (
+ IntegrationConfig,
+ SanityConfig,
+ TestConfig,
+)
+
+from .io import (
+ write_text_file,
+)
+
+from .util import (
+ COVERAGE_CONFIG_NAME,
+ remove_tree,
+)
+
+from .data import (
+ data_context,
+)
+
+
+@contextlib.contextmanager
+def coverage_context(args): # type: (TestConfig) -> None
+ """Content to set up and clean up code coverage configuration for tests."""
+ coverage_setup(args)
+
+ try:
+ yield
+ finally:
+ coverage_cleanup(args)
+
+
+def coverage_setup(args): # type: (TestConfig) -> None
+ """Set up code coverage configuration before running tests."""
+ if not args.coverage:
+ return
+
+ coverage_config = generate_coverage_config(args)
+
+ if args.explain:
+ args.coverage_config_base_path = '/tmp/coverage-temp-dir'
+ else:
+ args.coverage_config_base_path = tempfile.mkdtemp()
+
+ write_text_file(os.path.join(args.coverage_config_base_path, COVERAGE_CONFIG_NAME), coverage_config)
+
+
+def coverage_cleanup(args): # type: (TestConfig) -> None
+ """Clean up code coverage configuration after tests have finished."""
+ if args.coverage_config_base_path and not args.explain:
+ remove_tree(args.coverage_config_base_path)
+ args.coverage_config_base_path = None
+
+
+def generate_coverage_config(args): # type: (TestConfig) -> str
+ """Generate code coverage configuration for tests."""
+ if data_context().content.collection:
+ coverage_config = generate_collection_coverage_config(args)
+ else:
+ coverage_config = generate_ansible_coverage_config()
+
+ return coverage_config
+
+
+def generate_ansible_coverage_config(): # type: () -> str
+ """Generate code coverage configuration for Ansible tests."""
+ coverage_config = '''
+[run]
+branch = True
+concurrency = multiprocessing
+parallel = True
+
+omit =
+ */python*/dist-packages/*
+ */python*/site-packages/*
+ */python*/distutils/*
+ */pyshared/*
+ */pytest
+ */AnsiballZ_*.py
+ */test/results/*
+'''
+
+ return coverage_config
+
+
+def generate_collection_coverage_config(args): # type: (TestConfig) -> str
+ """Generate code coverage configuration for Ansible Collection tests."""
+ coverage_config = '''
+[run]
+branch = True
+concurrency = multiprocessing
+parallel = True
+disable_warnings =
+ no-data-collected
+'''
+
+ if isinstance(args, IntegrationConfig):
+ coverage_config += '''
+include =
+ %s/*
+ */%s/*
+''' % (data_context().content.root, data_context().content.collection.directory)
+ elif isinstance(args, SanityConfig):
+ # temporary work-around for import sanity test
+ coverage_config += '''
+include =
+ %s/*
+
+omit =
+ %s/*
+''' % (data_context().content.root, os.path.join(data_context().content.root, data_context().content.results_path))
+ else:
+ coverage_config += '''
+include =
+ %s/*
+''' % data_context().content.root
+
+ return coverage_config
diff --git a/test/lib/ansible_test/_internal/csharp_import_analysis.py b/test/lib/ansible_test/_internal/csharp_import_analysis.py
new file mode 100644
index 00000000..daa8892c
--- /dev/null
+++ b/test/lib/ansible_test/_internal/csharp_import_analysis.py
@@ -0,0 +1,106 @@
+"""Analyze C# import statements."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+
+from .io import (
+ open_text_file,
+)
+
+from .util import (
+ display,
+)
+
+from .util_common import (
+ resolve_csharp_ps_util,
+)
+
+from .data import (
+ data_context,
+)
+
+
+def get_csharp_module_utils_imports(powershell_targets, csharp_targets):
+ """Return a dictionary of module_utils names mapped to sets of powershell file paths.
+ :type powershell_targets: list[TestTarget] - C# files
+ :type csharp_targets: list[TestTarget] - PS files
+ :rtype: dict[str, set[str]]
+ """
+
+ module_utils = enumerate_module_utils()
+
+ imports_by_target_path = {}
+
+ for target in powershell_targets:
+ imports_by_target_path[target.path] = extract_csharp_module_utils_imports(target.path, module_utils, False)
+
+ for target in csharp_targets:
+ imports_by_target_path[target.path] = extract_csharp_module_utils_imports(target.path, module_utils, True)
+
+ imports = dict([(module_util, set()) for module_util in module_utils])
+
+ for target_path in imports_by_target_path:
+ for module_util in imports_by_target_path[target_path]:
+ imports[module_util].add(target_path)
+
+ for module_util in sorted(imports):
+ if not imports[module_util]:
+ display.warning('No imports found which use the "%s" module_util.' % module_util)
+
+ return imports
+
+
+def get_csharp_module_utils_name(path): # type: (str) -> str
+ """Return a namespace and name from the given module_utils path."""
+ base_path = data_context().content.module_utils_csharp_path
+
+ if data_context().content.collection:
+ prefix = 'ansible_collections.' + data_context().content.collection.prefix + 'plugins.module_utils.'
+ else:
+ prefix = ''
+
+ name = prefix + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.path.sep, '.')
+
+ return name
+
+
+def enumerate_module_utils():
+ """Return a list of available module_utils imports.
+ :rtype: set[str]
+ """
+ return set(get_csharp_module_utils_name(p)
+ for p in data_context().content.walk_files(data_context().content.module_utils_csharp_path)
+ if os.path.splitext(p)[1] == '.cs')
+
+
+def extract_csharp_module_utils_imports(path, module_utils, is_pure_csharp):
+ """Return a list of module_utils imports found in the specified source file.
+ :type path: str
+ :type module_utils: set[str]
+ :type is_pure_csharp: bool
+ :rtype: set[str]
+ """
+ imports = set()
+ if is_pure_csharp:
+ pattern = re.compile(r'(?i)^using\s((?:Ansible|AnsibleCollections)\..+);$')
+ else:
+ pattern = re.compile(r'(?i)^#\s*ansiblerequires\s+-csharputil\s+((?:Ansible|ansible.collections|\.)\..+)')
+
+ with open_text_file(path) as module_file:
+ for line_number, line in enumerate(module_file, 1):
+ match = re.search(pattern, line)
+
+ if not match:
+ continue
+
+ import_name = resolve_csharp_ps_util(match.group(1), path)
+
+ if import_name in module_utils:
+ imports.add(import_name)
+ elif data_context().content.is_ansible or \
+ import_name.startswith('ansible_collections.%s' % data_context().content.prefix):
+ display.warning('%s:%d Invalid module_utils import: %s' % (path, line_number, import_name))
+
+ return imports
diff --git a/test/lib/ansible_test/_internal/data.py b/test/lib/ansible_test/_internal/data.py
new file mode 100644
index 00000000..38ae6d21
--- /dev/null
+++ b/test/lib/ansible_test/_internal/data.py
@@ -0,0 +1,200 @@
+"""Context information for the current invocation of ansible-test."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from . import types as t
+
+from .util import (
+ ApplicationError,
+ import_plugins,
+ is_subdir,
+ ANSIBLE_LIB_ROOT,
+ ANSIBLE_TEST_ROOT,
+ ANSIBLE_SOURCE_ROOT,
+ display,
+)
+
+from .provider import (
+ find_path_provider,
+ get_path_provider_classes,
+ ProviderNotFoundForPath,
+)
+
+from .provider.source import (
+ SourceProvider,
+)
+
+from .provider.source.unversioned import (
+ UnversionedSource,
+)
+
+from .provider.source.installed import (
+ InstalledSource,
+)
+
+from .provider.layout import (
+ ContentLayout,
+ LayoutProvider,
+)
+
+
+class DataContext:
+ """Data context providing details about the current execution environment for ansible-test."""
+ def __init__(self):
+ content_path = os.environ.get('ANSIBLE_TEST_CONTENT_ROOT')
+ current_path = os.getcwd()
+
+ layout_providers = get_path_provider_classes(LayoutProvider)
+ source_providers = get_path_provider_classes(SourceProvider)
+
+ self.__layout_providers = layout_providers
+ self.__source_providers = source_providers
+ self.__ansible_source = None # type: t.Optional[t.Tuple[t.Tuple[str, str], ...]]
+
+ self.payload_callbacks = [] # type: t.List[t.Callable[t.List[t.Tuple[str, str]], None]]
+
+ if content_path:
+ content = self.__create_content_layout(layout_providers, source_providers, content_path, False)
+ elif ANSIBLE_SOURCE_ROOT and is_subdir(current_path, ANSIBLE_SOURCE_ROOT):
+ content = self.__create_content_layout(layout_providers, source_providers, ANSIBLE_SOURCE_ROOT, False)
+ else:
+ content = self.__create_content_layout(layout_providers, source_providers, current_path, True)
+
+ self.content = content # type: ContentLayout
+
+ def create_collection_layouts(self): # type: () -> t.List[ContentLayout]
+ """
+ Return a list of collection layouts, one for each collection in the same collection root as the current collection layout.
+ An empty list is returned if the current content layout is not a collection layout.
+ """
+ layout = self.content
+ collection = layout.collection
+
+ if not collection:
+ return []
+
+ root_path = os.path.join(collection.root, 'ansible_collections')
+ display.info('Scanning collection root: %s' % root_path, verbosity=1)
+ namespace_names = sorted(name for name in os.listdir(root_path) if os.path.isdir(os.path.join(root_path, name)))
+ collections = []
+
+ for namespace_name in namespace_names:
+ namespace_path = os.path.join(root_path, namespace_name)
+ collection_names = sorted(name for name in os.listdir(namespace_path) if os.path.isdir(os.path.join(namespace_path, name)))
+
+ for collection_name in collection_names:
+ collection_path = os.path.join(namespace_path, collection_name)
+
+ if collection_path == os.path.join(collection.root, collection.directory):
+ collection_layout = layout
+ else:
+ collection_layout = self.__create_content_layout(self.__layout_providers, self.__source_providers, collection_path, False)
+
+ file_count = len(collection_layout.all_files())
+
+ if not file_count:
+ continue
+
+ display.info('Including collection: %s (%d files)' % (collection_layout.collection.full_name, file_count), verbosity=1)
+ collections.append(collection_layout)
+
+ return collections
+
+ @staticmethod
+ def __create_content_layout(layout_providers, # type: t.List[t.Type[LayoutProvider]]
+ source_providers, # type: t.List[t.Type[SourceProvider]]
+ root, # type: str
+ walk, # type: bool
+ ): # type: (...) -> ContentLayout
+ """Create a content layout using the given providers and root path."""
+ layout_provider = find_path_provider(LayoutProvider, layout_providers, root, walk)
+
+ try:
+ # Begin the search for the source provider at the layout provider root.
+ # This intentionally ignores version control within subdirectories of the layout root, a condition which was previously an error.
+ # Doing so allows support for older git versions for which it is difficult to distinguish between a super project and a sub project.
+ # It also provides a better user experience, since the solution for the user would effectively be the same -- to remove the nested version control.
+ source_provider = find_path_provider(SourceProvider, source_providers, layout_provider.root, walk)
+ except ProviderNotFoundForPath:
+ source_provider = UnversionedSource(layout_provider.root)
+
+ layout = layout_provider.create(layout_provider.root, source_provider.get_paths(layout_provider.root))
+
+ return layout
+
+ def __create_ansible_source(self):
+ """Return a tuple of Ansible source files with both absolute and relative paths."""
+ if not ANSIBLE_SOURCE_ROOT:
+ sources = []
+
+ source_provider = InstalledSource(ANSIBLE_LIB_ROOT)
+ sources.extend((os.path.join(source_provider.root, path), os.path.join('lib', 'ansible', path))
+ for path in source_provider.get_paths(source_provider.root))
+
+ source_provider = InstalledSource(ANSIBLE_TEST_ROOT)
+ sources.extend((os.path.join(source_provider.root, path), os.path.join('test', 'lib', 'ansible_test', path))
+ for path in source_provider.get_paths(source_provider.root))
+
+ return tuple(sources)
+
+ if self.content.is_ansible:
+ return tuple((os.path.join(self.content.root, path), path) for path in self.content.all_files())
+
+ try:
+ source_provider = find_path_provider(SourceProvider, self.__source_providers, ANSIBLE_SOURCE_ROOT, False)
+ except ProviderNotFoundForPath:
+ source_provider = UnversionedSource(ANSIBLE_SOURCE_ROOT)
+
+ return tuple((os.path.join(source_provider.root, path), path) for path in source_provider.get_paths(source_provider.root))
+
+ @property
+ def ansible_source(self): # type: () -> t.Tuple[t.Tuple[str, str], ...]
+ """Return a tuple of Ansible source files with both absolute and relative paths."""
+ if not self.__ansible_source:
+ self.__ansible_source = self.__create_ansible_source()
+
+ return self.__ansible_source
+
+ def register_payload_callback(self, callback): # type: (t.Callable[t.List[t.Tuple[str, str]], None]) -> None
+ """Register the given payload callback."""
+ self.payload_callbacks.append(callback)
+
+
+def data_init(): # type: () -> DataContext
+ """Initialize provider plugins."""
+ provider_types = (
+ 'layout',
+ 'source',
+ )
+
+ for provider_type in provider_types:
+ import_plugins('provider/%s' % provider_type)
+
+ try:
+ context = DataContext()
+ except ProviderNotFoundForPath:
+ options = [
+ ' - an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/',
+ ]
+
+ if ANSIBLE_SOURCE_ROOT:
+ options.insert(0, ' - the Ansible source: %s/' % ANSIBLE_SOURCE_ROOT)
+
+ raise ApplicationError('''The current working directory must be at or below:
+
+%s
+
+Current working directory: %s''' % ('\n'.join(options), os.getcwd()))
+
+ return context
+
+
+def data_context(): # type: () -> DataContext
+ """Return the current data context."""
+ try:
+ return data_context.instance
+ except AttributeError:
+ data_context.instance = data_init()
+ return data_context.instance
diff --git a/test/lib/ansible_test/_internal/delegation.py b/test/lib/ansible_test/_internal/delegation.py
new file mode 100644
index 00000000..3262dd51
--- /dev/null
+++ b/test/lib/ansible_test/_internal/delegation.py
@@ -0,0 +1,667 @@
+"""Delegate test execution to another environment."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+import sys
+import tempfile
+
+from . import types as t
+
+from .io import (
+ make_dirs,
+)
+
+from .executor import (
+ SUPPORTED_PYTHON_VERSIONS,
+ HTTPTESTER_HOSTS,
+ create_shell_command,
+ run_httptester,
+ start_httptester,
+ get_python_interpreter,
+ get_python_version,
+)
+
+from .config import (
+ TestConfig,
+ EnvironmentConfig,
+ IntegrationConfig,
+ WindowsIntegrationConfig,
+ NetworkIntegrationConfig,
+ ShellConfig,
+ SanityConfig,
+ UnitsConfig,
+)
+
+from .core_ci import (
+ AnsibleCoreCI,
+)
+
+from .manage_ci import (
+ ManagePosixCI,
+ ManageWindowsCI,
+)
+
+from .util import (
+ ApplicationError,
+ common_environment,
+ display,
+ ANSIBLE_BIN_PATH,
+ ANSIBLE_TEST_DATA_ROOT,
+ ANSIBLE_LIB_ROOT,
+ ANSIBLE_TEST_ROOT,
+ tempdir,
+)
+
+from .util_common import (
+ run_command,
+ ResultType,
+ create_interpreter_wrapper,
+ get_docker_completion,
+ get_remote_completion,
+)
+
+from .docker_util import (
+ docker_exec,
+ docker_get,
+ docker_pull,
+ docker_put,
+ docker_rm,
+ docker_run,
+ docker_available,
+ docker_network_disconnect,
+ get_docker_networks,
+ get_docker_preferred_network_name,
+ get_docker_hostname,
+ is_docker_user_defined_network,
+)
+
+from .cloud import (
+ get_cloud_providers,
+)
+
+from .target import (
+ IntegrationTarget,
+)
+
+from .data import (
+ data_context,
+)
+
+from .payload import (
+ create_payload,
+)
+
+from .venv import (
+ create_virtual_environment,
+)
+
+from .ci import (
+ get_ci_provider,
+)
+
+
+def check_delegation_args(args):
+ """
+ :type args: CommonConfig
+ """
+ if not isinstance(args, EnvironmentConfig):
+ return
+
+ if args.docker:
+ get_python_version(args, get_docker_completion(), args.docker_raw)
+ elif args.remote:
+ get_python_version(args, get_remote_completion(), args.remote)
+
+
+def delegate(args, exclude, require, integration_targets):
+ """
+ :type args: EnvironmentConfig
+ :type exclude: list[str]
+ :type require: list[str]
+ :type integration_targets: tuple[IntegrationTarget]
+ :rtype: bool
+ """
+ if isinstance(args, TestConfig):
+ args.metadata.ci_provider = get_ci_provider().code
+
+ make_dirs(ResultType.TMP.path)
+
+ with tempfile.NamedTemporaryFile(prefix='metadata-', suffix='.json', dir=ResultType.TMP.path) as metadata_fd:
+ args.metadata_path = os.path.join(ResultType.TMP.relative_path, os.path.basename(metadata_fd.name))
+ args.metadata.to_file(args.metadata_path)
+
+ try:
+ return delegate_command(args, exclude, require, integration_targets)
+ finally:
+ args.metadata_path = None
+ else:
+ return delegate_command(args, exclude, require, integration_targets)
+
+
+def delegate_command(args, exclude, require, integration_targets):
+ """
+ :type args: EnvironmentConfig
+ :type exclude: list[str]
+ :type require: list[str]
+ :type integration_targets: tuple[IntegrationTarget]
+ :rtype: bool
+ """
+ if args.venv:
+ delegate_venv(args, exclude, require, integration_targets)
+ return True
+
+ if args.docker:
+ delegate_docker(args, exclude, require, integration_targets)
+ return True
+
+ if args.remote:
+ delegate_remote(args, exclude, require, integration_targets)
+ return True
+
+ return False
+
+
+def delegate_venv(args, # type: EnvironmentConfig
+ exclude, # type: t.List[str]
+ require, # type: t.List[str]
+ integration_targets, # type: t.Tuple[IntegrationTarget, ...]
+ ): # type: (...) -> None
+ """Delegate ansible-test execution to a virtual environment using venv or virtualenv."""
+ if args.python:
+ versions = (args.python_version,)
+ else:
+ versions = SUPPORTED_PYTHON_VERSIONS
+
+ if args.httptester:
+ needs_httptester = sorted(target.name for target in integration_targets if 'needs/httptester/' in target.aliases)
+
+ if needs_httptester:
+ display.warning('Use --docker or --remote to enable httptester for tests marked "needs/httptester": %s' % ', '.join(needs_httptester))
+
+ if args.venv_system_site_packages:
+ suffix = '-ssp'
+ else:
+ suffix = ''
+
+ venvs = dict((version, os.path.join(ResultType.TMP.path, 'delegation', 'python%s%s' % (version, suffix))) for version in versions)
+ venvs = dict((version, path) for version, path in venvs.items() if create_virtual_environment(args, version, path, args.venv_system_site_packages))
+
+ if not venvs:
+ raise ApplicationError('No usable virtual environment support found.')
+
+ options = {
+ '--venv': 0,
+ '--venv-system-site-packages': 0,
+ }
+
+ with tempdir() as inject_path:
+ for version, path in venvs.items():
+ create_interpreter_wrapper(os.path.join(path, 'bin', 'python'), os.path.join(inject_path, 'python%s' % version))
+
+ python_interpreter = os.path.join(inject_path, 'python%s' % args.python_version)
+
+ cmd = generate_command(args, python_interpreter, ANSIBLE_BIN_PATH, data_context().content.root, options, exclude, require)
+
+ if isinstance(args, TestConfig):
+ if args.coverage and not args.coverage_label:
+ cmd += ['--coverage-label', 'venv']
+
+ env = common_environment()
+
+ with tempdir() as library_path:
+ # expose ansible and ansible_test to the virtual environment (only required when running from an install)
+ os.symlink(ANSIBLE_LIB_ROOT, os.path.join(library_path, 'ansible'))
+ os.symlink(ANSIBLE_TEST_ROOT, os.path.join(library_path, 'ansible_test'))
+
+ env.update(
+ PATH=inject_path + os.path.pathsep + env['PATH'],
+ PYTHONPATH=library_path,
+ )
+
+ run_command(args, cmd, env=env)
+
+
+def delegate_docker(args, exclude, require, integration_targets):
+ """
+ :type args: EnvironmentConfig
+ :type exclude: list[str]
+ :type require: list[str]
+ :type integration_targets: tuple[IntegrationTarget]
+ """
+ test_image = args.docker
+ privileged = args.docker_privileged
+
+ if isinstance(args, ShellConfig):
+ use_httptester = args.httptester
+ else:
+ use_httptester = args.httptester and any('needs/httptester/' in target.aliases for target in integration_targets)
+
+ if use_httptester:
+ docker_pull(args, args.httptester)
+
+ docker_pull(args, test_image)
+
+ httptester_id = None
+ test_id = None
+ success = False
+
+ options = {
+ '--docker': 1,
+ '--docker-privileged': 0,
+ '--docker-util': 1,
+ }
+
+ python_interpreter = get_python_interpreter(args, get_docker_completion(), args.docker_raw)
+
+ pwd = '/root'
+ ansible_root = os.path.join(pwd, 'ansible')
+
+ if data_context().content.collection:
+ content_root = os.path.join(pwd, data_context().content.collection.directory)
+ else:
+ content_root = ansible_root
+
+ remote_results_root = os.path.join(content_root, data_context().content.results_path)
+
+ cmd = generate_command(args, python_interpreter, os.path.join(ansible_root, 'bin'), content_root, options, exclude, require)
+
+ if isinstance(args, TestConfig):
+ if args.coverage and not args.coverage_label:
+ image_label = args.docker_raw
+ image_label = re.sub('[^a-zA-Z0-9]+', '-', image_label)
+ cmd += ['--coverage-label', 'docker-%s' % image_label]
+
+ if isinstance(args, IntegrationConfig):
+ if not args.allow_destructive:
+ cmd.append('--allow-destructive')
+
+ cmd_options = []
+
+ if isinstance(args, ShellConfig) or (isinstance(args, IntegrationConfig) and args.debug_strategy):
+ cmd_options.append('-it')
+
+ with tempfile.NamedTemporaryFile(prefix='ansible-source-', suffix='.tgz') as local_source_fd:
+ try:
+ create_payload(args, local_source_fd.name)
+
+ if use_httptester:
+ httptester_id = run_httptester(args)
+ else:
+ httptester_id = None
+
+ test_options = [
+ '--detach',
+ '--volume', '/sys/fs/cgroup:/sys/fs/cgroup:ro',
+ '--privileged=%s' % str(privileged).lower(),
+ ]
+
+ if args.docker_memory:
+ test_options.extend([
+ '--memory=%d' % args.docker_memory,
+ '--memory-swap=%d' % args.docker_memory,
+ ])
+
+ docker_socket = '/var/run/docker.sock'
+
+ if args.docker_seccomp != 'default':
+ test_options += ['--security-opt', 'seccomp=%s' % args.docker_seccomp]
+
+ if get_docker_hostname() != 'localhost' or os.path.exists(docker_socket):
+ test_options += ['--volume', '%s:%s' % (docker_socket, docker_socket)]
+
+ if httptester_id:
+ test_options += ['--env', 'HTTPTESTER=1']
+
+ network = get_docker_preferred_network_name(args)
+
+ if not is_docker_user_defined_network(network):
+ # legacy links are required when using the default bridge network instead of user-defined networks
+ for host in HTTPTESTER_HOSTS:
+ test_options += ['--link', '%s:%s' % (httptester_id, host)]
+
+ if isinstance(args, IntegrationConfig):
+ cloud_platforms = get_cloud_providers(args)
+
+ for cloud_platform in cloud_platforms:
+ test_options += cloud_platform.get_docker_run_options()
+
+ test_id = docker_run(args, test_image, options=test_options)[0]
+
+ if args.explain:
+ test_id = 'test_id'
+ else:
+ test_id = test_id.strip()
+
+ # write temporary files to /root since /tmp isn't ready immediately on container start
+ docker_put(args, test_id, os.path.join(ANSIBLE_TEST_DATA_ROOT, 'setup', 'docker.sh'), '/root/docker.sh')
+ docker_exec(args, test_id, ['/bin/bash', '/root/docker.sh'])
+ docker_put(args, test_id, local_source_fd.name, '/root/test.tgz')
+ docker_exec(args, test_id, ['tar', 'oxzf', '/root/test.tgz', '-C', '/root'])
+
+ # docker images are only expected to have a single python version available
+ if isinstance(args, UnitsConfig) and not args.python:
+ cmd += ['--python', 'default']
+
+ # run unit tests unprivileged to prevent stray writes to the source tree
+ # also disconnect from the network once requirements have been installed
+ if isinstance(args, UnitsConfig):
+ writable_dirs = [
+ os.path.join(content_root, ResultType.JUNIT.relative_path),
+ os.path.join(content_root, ResultType.COVERAGE.relative_path),
+ ]
+
+ docker_exec(args, test_id, ['mkdir', '-p'] + writable_dirs)
+ docker_exec(args, test_id, ['chmod', '777'] + writable_dirs)
+ docker_exec(args, test_id, ['chmod', '755', '/root'])
+ docker_exec(args, test_id, ['chmod', '644', os.path.join(content_root, args.metadata_path)])
+
+ docker_exec(args, test_id, ['useradd', 'pytest', '--create-home'])
+
+ docker_exec(args, test_id, cmd + ['--requirements-mode', 'only'], options=cmd_options)
+
+ networks = get_docker_networks(args, test_id)
+
+ if networks is not None:
+ for network in networks:
+ docker_network_disconnect(args, test_id, network)
+ else:
+ display.warning('Network disconnection is not supported (this is normal under podman). '
+ 'Tests will not be isolated from the network. Network-related tests may misbehave.')
+
+ cmd += ['--requirements-mode', 'skip']
+
+ cmd_options += ['--user', 'pytest']
+
+ try:
+ docker_exec(args, test_id, cmd, options=cmd_options)
+ # docker_exec will throw SubprocessError if not successful
+ # If we make it here, all the prep work earlier and the docker_exec line above were all successful.
+ success = True
+ finally:
+ local_test_root = os.path.dirname(os.path.join(data_context().content.root, data_context().content.results_path))
+
+ remote_test_root = os.path.dirname(remote_results_root)
+ remote_results_name = os.path.basename(remote_results_root)
+ remote_temp_file = os.path.join('/root', remote_results_name + '.tgz')
+
+ make_dirs(local_test_root) # make sure directory exists for collections which have no tests
+
+ with tempfile.NamedTemporaryFile(prefix='ansible-result-', suffix='.tgz') as local_result_fd:
+ docker_exec(args, test_id, ['tar', 'czf', remote_temp_file, '--exclude', ResultType.TMP.name, '-C', remote_test_root, remote_results_name])
+ docker_get(args, test_id, remote_temp_file, local_result_fd.name)
+ run_command(args, ['tar', 'oxzf', local_result_fd.name, '-C', local_test_root])
+ finally:
+ if httptester_id:
+ docker_rm(args, httptester_id)
+
+ if test_id:
+ if args.docker_terminate == 'always' or (args.docker_terminate == 'success' and success):
+ docker_rm(args, test_id)
+
+
+def delegate_remote(args, exclude, require, integration_targets):
+ """
+ :type args: EnvironmentConfig
+ :type exclude: list[str]
+ :type require: list[str]
+ :type integration_targets: tuple[IntegrationTarget]
+ """
+ remote = args.parsed_remote
+
+ core_ci = AnsibleCoreCI(args, remote.platform, remote.version, stage=args.remote_stage, provider=args.remote_provider, arch=remote.arch)
+ success = False
+ raw = False
+
+ if isinstance(args, ShellConfig):
+ use_httptester = args.httptester
+ raw = args.raw
+ else:
+ use_httptester = args.httptester and any('needs/httptester/' in target.aliases for target in integration_targets)
+
+ if use_httptester and not docker_available():
+ display.warning('Assuming --disable-httptester since `docker` is not available.')
+ use_httptester = False
+
+ httptester_id = None
+ ssh_options = []
+ content_root = None
+
+ try:
+ core_ci.start()
+
+ if use_httptester:
+ httptester_id, ssh_options = start_httptester(args)
+
+ core_ci.wait()
+
+ python_version = get_python_version(args, get_remote_completion(), args.remote)
+
+ if remote.platform == 'windows':
+ # Windows doesn't need the ansible-test fluff, just run the SSH command
+ manage = ManageWindowsCI(core_ci)
+ manage.setup(python_version)
+
+ cmd = ['powershell.exe']
+ elif raw:
+ manage = ManagePosixCI(core_ci)
+ manage.setup(python_version)
+
+ cmd = create_shell_command(['bash'])
+ else:
+ manage = ManagePosixCI(core_ci)
+ pwd = manage.setup(python_version)
+
+ options = {
+ '--remote': 1,
+ }
+
+ python_interpreter = get_python_interpreter(args, get_remote_completion(), args.remote)
+
+ ansible_root = os.path.join(pwd, 'ansible')
+
+ if data_context().content.collection:
+ content_root = os.path.join(pwd, data_context().content.collection.directory)
+ else:
+ content_root = ansible_root
+
+ cmd = generate_command(args, python_interpreter, os.path.join(ansible_root, 'bin'), content_root, options, exclude, require)
+
+ if httptester_id:
+ cmd += ['--inject-httptester']
+
+ if isinstance(args, TestConfig):
+ if args.coverage and not args.coverage_label:
+ cmd += ['--coverage-label', 'remote-%s-%s' % (remote.platform, remote.version)]
+
+ if isinstance(args, IntegrationConfig):
+ if not args.allow_destructive:
+ cmd.append('--allow-destructive')
+
+ # remote instances are only expected to have a single python version available
+ if isinstance(args, UnitsConfig) and not args.python:
+ cmd += ['--python', 'default']
+
+ if isinstance(args, IntegrationConfig):
+ cloud_platforms = get_cloud_providers(args)
+
+ for cloud_platform in cloud_platforms:
+ ssh_options += cloud_platform.get_remote_ssh_options()
+
+ try:
+ manage.ssh(cmd, ssh_options)
+ success = True
+ finally:
+ download = False
+
+ if remote.platform != 'windows':
+ download = True
+
+ if isinstance(args, ShellConfig):
+ if args.raw:
+ download = False
+
+ if download and content_root:
+ local_test_root = os.path.dirname(os.path.join(data_context().content.root, data_context().content.results_path))
+
+ remote_results_root = os.path.join(content_root, data_context().content.results_path)
+ remote_results_name = os.path.basename(remote_results_root)
+ remote_temp_path = os.path.join('/tmp', remote_results_name)
+
+ # AIX cp and GNU cp provide different options, no way could be found to have a common
+ # pattern and achieve the same goal
+ cp_opts = '-hr' if remote.platform in ['aix', 'ibmi'] else '-a'
+
+ manage.ssh('rm -rf {0} && mkdir {0} && cp {1} {2}/* {0}/ && chmod -R a+r {0}'.format(remote_temp_path, cp_opts, remote_results_root))
+ manage.download(remote_temp_path, local_test_root)
+ finally:
+ if args.remote_terminate == 'always' or (args.remote_terminate == 'success' and success):
+ core_ci.stop()
+
+ if httptester_id:
+ docker_rm(args, httptester_id)
+
+
+def generate_command(args, python_interpreter, ansible_bin_path, content_root, options, exclude, require):
+ """
+ :type args: EnvironmentConfig
+ :type python_interpreter: str | None
+ :type ansible_bin_path: str
+ :type content_root: str
+ :type options: dict[str, int]
+ :type exclude: list[str]
+ :type require: list[str]
+ :rtype: list[str]
+ """
+ options['--color'] = 1
+
+ cmd = [os.path.join(ansible_bin_path, 'ansible-test')]
+
+ if python_interpreter:
+ cmd = [python_interpreter] + cmd
+
+ # Force the encoding used during delegation.
+ # This is only needed because ansible-test relies on Python's file system encoding.
+ # Environments that do not have the locale configured are thus unable to work with unicode file paths.
+ # Examples include FreeBSD and some Linux containers.
+ env_vars = dict(
+ LC_ALL='en_US.UTF-8',
+ ANSIBLE_TEST_CONTENT_ROOT=content_root,
+ )
+
+ env_args = ['%s=%s' % (key, env_vars[key]) for key in sorted(env_vars)]
+
+ cmd = ['/usr/bin/env'] + env_args + cmd
+
+ cmd += list(filter_options(args, sys.argv[1:], options, exclude, require))
+ cmd += ['--color', 'yes' if args.color else 'no']
+
+ if args.requirements:
+ cmd += ['--requirements']
+
+ if isinstance(args, ShellConfig):
+ cmd = create_shell_command(cmd)
+ elif isinstance(args, SanityConfig):
+ base_branch = args.base_branch or get_ci_provider().get_base_branch()
+
+ if base_branch:
+ cmd += ['--base-branch', base_branch]
+
+ return cmd
+
+
+def filter_options(args, argv, options, exclude, require):
+ """
+ :type args: EnvironmentConfig
+ :type argv: list[str]
+ :type options: dict[str, int]
+ :type exclude: list[str]
+ :type require: list[str]
+ :rtype: collections.Iterable[str]
+ """
+ options = options.copy()
+
+ options['--requirements'] = 0
+ options['--truncate'] = 1
+ options['--redact'] = 0
+ options['--no-redact'] = 0
+
+ if isinstance(args, TestConfig):
+ options.update({
+ '--changed': 0,
+ '--tracked': 0,
+ '--untracked': 0,
+ '--ignore-committed': 0,
+ '--ignore-staged': 0,
+ '--ignore-unstaged': 0,
+ '--changed-from': 1,
+ '--changed-path': 1,
+ '--metadata': 1,
+ '--exclude': 1,
+ '--require': 1,
+ })
+ elif isinstance(args, SanityConfig):
+ options.update({
+ '--base-branch': 1,
+ })
+
+ if isinstance(args, IntegrationConfig):
+ options.update({
+ '--no-temp-unicode': 0,
+ '--no-pip-check': 0,
+ })
+
+ if isinstance(args, (NetworkIntegrationConfig, WindowsIntegrationConfig)):
+ options.update({
+ '--inventory': 1,
+ })
+
+ remaining = 0
+
+ for arg in argv:
+ if not arg.startswith('-') and remaining:
+ remaining -= 1
+ continue
+
+ remaining = 0
+
+ parts = arg.split('=', 1)
+ key = parts[0]
+
+ if key in options:
+ remaining = options[key] - len(parts) + 1
+ continue
+
+ yield arg
+
+ for arg in args.delegate_args:
+ yield arg
+
+ for target in exclude:
+ yield '--exclude'
+ yield target
+
+ for target in require:
+ yield '--require'
+ yield target
+
+ if isinstance(args, TestConfig):
+ if args.metadata_path:
+ yield '--metadata'
+ yield args.metadata_path
+
+ yield '--truncate'
+ yield '%d' % args.truncate
+
+ if args.redact:
+ yield '--redact'
+ else:
+ yield '--no-redact'
+
+ if isinstance(args, IntegrationConfig):
+ if args.no_temp_unicode:
+ yield '--no-temp-unicode'
+
+ if not args.pip_check:
+ yield '--no-pip-check'
diff --git a/test/lib/ansible_test/_internal/diff.py b/test/lib/ansible_test/_internal/diff.py
new file mode 100644
index 00000000..1e2038b9
--- /dev/null
+++ b/test/lib/ansible_test/_internal/diff.py
@@ -0,0 +1,256 @@
+"""Diff parsing functions and classes."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import textwrap
+import traceback
+
+from . import types as t
+
+from .util import (
+ ApplicationError,
+)
+
+
+def parse_diff(lines):
+ """
+ :type lines: list[str]
+ :rtype: list[FileDiff]
+ """
+ return DiffParser(lines).files
+
+
+class FileDiff:
+ """Parsed diff for a single file."""
+ def __init__(self, old_path, new_path):
+ """
+ :type old_path: str
+ :type new_path: str
+ """
+ self.old = DiffSide(old_path, new=False)
+ self.new = DiffSide(new_path, new=True)
+ self.headers = [] # type: t.List[str]
+ self.binary = False
+
+ def append_header(self, line):
+ """
+ :type line: str
+ """
+ self.headers.append(line)
+
+ @property
+ def is_complete(self):
+ """
+ :rtype: bool
+ """
+ return self.old.is_complete and self.new.is_complete
+
+
+class DiffSide:
+ """Parsed diff for a single 'side' of a single file."""
+ def __init__(self, path, new):
+ """
+ :type path: str
+ :type new: bool
+ """
+ self.path = path
+ self.new = new
+ self.prefix = '+' if self.new else '-'
+ self.eof_newline = True
+ self.exists = True
+
+ self.lines = [] # type: t.List[t.Tuple[int, str]]
+ self.lines_and_context = [] # type: t.List[t.Tuple[int, str]]
+ self.ranges = [] # type: t.List[t.Tuple[int, int]]
+
+ self._next_line_number = 0
+ self._lines_remaining = 0
+ self._range_start = 0
+
+ def set_start(self, line_start, line_count):
+ """
+ :type line_start: int
+ :type line_count: int
+ """
+ self._next_line_number = line_start
+ self._lines_remaining = line_count
+ self._range_start = 0
+
+ def append(self, line):
+ """
+ :type line: str
+ """
+ if self._lines_remaining <= 0:
+ raise Exception('Diff range overflow.')
+
+ entry = self._next_line_number, line
+
+ if line.startswith(' '):
+ pass
+ elif line.startswith(self.prefix):
+ self.lines.append(entry)
+
+ if not self._range_start:
+ self._range_start = self._next_line_number
+ else:
+ raise Exception('Unexpected diff content prefix.')
+
+ self.lines_and_context.append(entry)
+
+ self._lines_remaining -= 1
+
+ if self._range_start:
+ if self.is_complete:
+ range_end = self._next_line_number
+ elif line.startswith(' '):
+ range_end = self._next_line_number - 1
+ else:
+ range_end = 0
+
+ if range_end:
+ self.ranges.append((self._range_start, range_end))
+ self._range_start = 0
+
+ self._next_line_number += 1
+
+ @property
+ def is_complete(self):
+ """
+ :rtype: bool
+ """
+ return self._lines_remaining == 0
+
+ def format_lines(self, context=True):
+ """
+ :type context: bool
+ :rtype: list[str]
+ """
+ if context:
+ lines = self.lines_and_context
+ else:
+ lines = self.lines
+
+ return ['%s:%4d %s' % (self.path, line[0], line[1]) for line in lines]
+
+
+class DiffParser:
+ """Parse diff lines."""
+ def __init__(self, lines):
+ """
+ :type lines: list[str]
+ """
+ self.lines = lines
+ self.files = [] # type: t.List[FileDiff]
+
+ self.action = self.process_start
+ self.line_number = 0
+ self.previous_line = None # type: t.Optional[str]
+ self.line = None # type: t.Optional[str]
+ self.file = None # type: t.Optional[FileDiff]
+
+ for self.line in self.lines:
+ self.line_number += 1
+
+ try:
+ self.action()
+ except Exception as ex:
+ message = textwrap.dedent('''
+ %s
+
+ Line: %d
+ Previous: %s
+ Current: %s
+ %s
+ ''').strip() % (
+ ex,
+ self.line_number,
+ self.previous_line or '',
+ self.line or '',
+ traceback.format_exc(),
+ )
+
+ raise ApplicationError(message.strip())
+
+ self.previous_line = self.line
+
+ self.complete_file()
+
+ def process_start(self):
+ """Process a diff start line."""
+ self.complete_file()
+
+ match = re.search(r'^diff --git "?a/(?P<old_path>.*)"? "?b/(?P<new_path>.*)"?$', self.line)
+
+ if not match:
+ raise Exception('Unexpected diff start line.')
+
+ self.file = FileDiff(match.group('old_path'), match.group('new_path'))
+ self.action = self.process_continue
+
+ def process_range(self):
+ """Process a diff range line."""
+ match = re.search(r'^@@ -((?P<old_start>[0-9]+),)?(?P<old_count>[0-9]+) \+((?P<new_start>[0-9]+),)?(?P<new_count>[0-9]+) @@', self.line)
+
+ if not match:
+ raise Exception('Unexpected diff range line.')
+
+ self.file.old.set_start(int(match.group('old_start') or 1), int(match.group('old_count')))
+ self.file.new.set_start(int(match.group('new_start') or 1), int(match.group('new_count')))
+ self.action = self.process_content
+
+ def process_continue(self):
+ """Process a diff start, range or header line."""
+ if self.line.startswith('diff '):
+ self.process_start()
+ elif self.line.startswith('@@ '):
+ self.process_range()
+ else:
+ self.process_header()
+
+ def process_header(self):
+ """Process a diff header line."""
+ if self.line.startswith('Binary files '):
+ self.file.binary = True
+ elif self.line == '--- /dev/null':
+ self.file.old.exists = False
+ elif self.line == '+++ /dev/null':
+ self.file.new.exists = False
+ else:
+ self.file.append_header(self.line)
+
+ def process_content(self):
+ """Process a diff content line."""
+ if self.line == r'\ No newline at end of file':
+ if self.previous_line.startswith(' '):
+ self.file.old.eof_newline = False
+ self.file.new.eof_newline = False
+ elif self.previous_line.startswith('-'):
+ self.file.old.eof_newline = False
+ elif self.previous_line.startswith('+'):
+ self.file.new.eof_newline = False
+ else:
+ raise Exception('Unexpected previous diff content line.')
+
+ return
+
+ if self.file.is_complete:
+ self.process_continue()
+ return
+
+ if self.line.startswith(' '):
+ self.file.old.append(self.line)
+ self.file.new.append(self.line)
+ elif self.line.startswith('-'):
+ self.file.old.append(self.line)
+ elif self.line.startswith('+'):
+ self.file.new.append(self.line)
+ else:
+ raise Exception('Unexpected diff content line.')
+
+ def complete_file(self):
+ """Complete processing of the current file, if any."""
+ if not self.file:
+ return
+
+ self.files.append(self.file)
diff --git a/test/lib/ansible_test/_internal/docker_util.py b/test/lib/ansible_test/_internal/docker_util.py
new file mode 100644
index 00000000..54007d1c
--- /dev/null
+++ b/test/lib/ansible_test/_internal/docker_util.py
@@ -0,0 +1,399 @@
+"""Functions for accessing docker via the docker cli."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import time
+
+from .io import (
+ open_binary_file,
+ read_text_file,
+)
+
+from .util import (
+ ApplicationError,
+ common_environment,
+ display,
+ find_executable,
+ SubprocessError,
+)
+
+from .http import (
+ urlparse,
+)
+
+from .util_common import (
+ run_command,
+)
+
+from .config import (
+ EnvironmentConfig,
+)
+
+BUFFER_SIZE = 256 * 256
+
+
+def docker_available():
+ """
+ :rtype: bool
+ """
+ return find_executable('docker', required=False)
+
+
+def get_docker_hostname(): # type: () -> str
+ """Return the hostname of the Docker service."""
+ try:
+ return get_docker_hostname.hostname
+ except AttributeError:
+ pass
+
+ docker_host = os.environ.get('DOCKER_HOST')
+
+ if docker_host and docker_host.startswith('tcp://'):
+ try:
+ hostname = urlparse(docker_host)[1].split(':')[0]
+ display.info('Detected Docker host: %s' % hostname, verbosity=1)
+ except ValueError:
+ hostname = 'localhost'
+ display.warning('Could not parse DOCKER_HOST environment variable "%s", falling back to localhost.' % docker_host)
+ else:
+ hostname = 'localhost'
+ display.info('Assuming Docker is available on localhost.', verbosity=1)
+
+ get_docker_hostname.hostname = hostname
+
+ return hostname
+
+
+def get_docker_container_id():
+ """
+ :rtype: str | None
+ """
+ try:
+ return get_docker_container_id.container_id
+ except AttributeError:
+ pass
+
+ path = '/proc/self/cpuset'
+ container_id = None
+
+ if os.path.exists(path):
+ # File content varies based on the environment:
+ # No Container: /
+ # Docker: /docker/c86f3732b5ba3d28bb83b6e14af767ab96abbc52de31313dcb1176a62d91a507
+ # Azure Pipelines (Docker): /azpl_job/0f2edfed602dd6ec9f2e42c867f4d5ee640ebf4c058e6d3196d4393bb8fd0891
+ # Podman: /../../../../../..
+ contents = read_text_file(path)
+
+ cgroup_path, cgroup_name = os.path.split(contents.strip())
+
+ if cgroup_path in ('/docker', '/azpl_job'):
+ container_id = cgroup_name
+
+ get_docker_container_id.container_id = container_id
+
+ if container_id:
+ display.info('Detected execution in Docker container: %s' % container_id, verbosity=1)
+
+ return container_id
+
+
+def get_docker_container_ip(args, container_id):
+ """
+ :type args: EnvironmentConfig
+ :type container_id: str
+ :rtype: str
+ """
+ results = docker_inspect(args, container_id)
+ network_settings = results[0]['NetworkSettings']
+ networks = network_settings.get('Networks')
+
+ if networks:
+ network_name = get_docker_preferred_network_name(args)
+ ipaddress = networks[network_name]['IPAddress']
+ else:
+ # podman doesn't provide Networks, fall back to using IPAddress
+ ipaddress = network_settings['IPAddress']
+
+ if not ipaddress:
+ raise ApplicationError('Cannot retrieve IP address for container: %s' % container_id)
+
+ return ipaddress
+
+
+def get_docker_network_name(args, container_id): # type: (EnvironmentConfig, str) -> str
+ """
+ Return the network name of the specified container.
+ Raises an exception if zero or more than one network is found.
+ """
+ networks = get_docker_networks(args, container_id)
+
+ if not networks:
+ raise ApplicationError('No network found for Docker container: %s.' % container_id)
+
+ if len(networks) > 1:
+ raise ApplicationError('Found multiple networks for Docker container %s instead of only one: %s' % (container_id, ', '.join(networks)))
+
+ return networks[0]
+
+
+def get_docker_preferred_network_name(args): # type: (EnvironmentConfig) -> str
+ """
+ Return the preferred network name for use with Docker. The selection logic is:
+ - the network selected by the user with `--docker-network`
+ - the network of the currently running docker container (if any)
+ - the default docker network (returns None)
+ """
+ network = None
+
+ if args.docker_network:
+ network = args.docker_network
+ else:
+ current_container_id = get_docker_container_id()
+
+ if current_container_id:
+ # Make sure any additional containers we launch use the same network as the current container we're running in.
+ # This is needed when ansible-test is running in a container that is not connected to Docker's default network.
+ network = get_docker_network_name(args, current_container_id)
+
+ return network
+
+
+def is_docker_user_defined_network(network): # type: (str) -> bool
+ """Return True if the network being used is a user-defined network."""
+ return network and network != 'bridge'
+
+
+def get_docker_networks(args, container_id):
+ """
+ :param args: EnvironmentConfig
+ :param container_id: str
+ :rtype: list[str]
+ """
+ results = docker_inspect(args, container_id)
+ # podman doesn't return Networks- just silently return None if it's missing...
+ networks = results[0]['NetworkSettings'].get('Networks')
+ if networks is None:
+ return None
+ return sorted(networks)
+
+
+def docker_pull(args, image):
+ """
+ :type args: EnvironmentConfig
+ :type image: str
+ """
+ if ('@' in image or ':' in image) and docker_images(args, image):
+ display.info('Skipping docker pull of existing image with tag or digest: %s' % image, verbosity=2)
+ return
+
+ if not args.docker_pull:
+ display.warning('Skipping docker pull for "%s". Image may be out-of-date.' % image)
+ return
+
+ for _iteration in range(1, 10):
+ try:
+ docker_command(args, ['pull', image])
+ return
+ except SubprocessError:
+ display.warning('Failed to pull docker image "%s". Waiting a few seconds before trying again.' % image)
+ time.sleep(3)
+
+ raise ApplicationError('Failed to pull docker image "%s".' % image)
+
+
+def docker_put(args, container_id, src, dst):
+ """
+ :type args: EnvironmentConfig
+ :type container_id: str
+ :type src: str
+ :type dst: str
+ """
+ # avoid 'docker cp' due to a bug which causes 'docker rm' to fail
+ with open_binary_file(src) as src_fd:
+ docker_exec(args, container_id, ['dd', 'of=%s' % dst, 'bs=%s' % BUFFER_SIZE],
+ options=['-i'], stdin=src_fd, capture=True)
+
+
+def docker_get(args, container_id, src, dst):
+ """
+ :type args: EnvironmentConfig
+ :type container_id: str
+ :type src: str
+ :type dst: str
+ """
+ # avoid 'docker cp' due to a bug which causes 'docker rm' to fail
+ with open_binary_file(dst, 'wb') as dst_fd:
+ docker_exec(args, container_id, ['dd', 'if=%s' % src, 'bs=%s' % BUFFER_SIZE],
+ options=['-i'], stdout=dst_fd, capture=True)
+
+
+def docker_run(args, image, options, cmd=None):
+ """
+ :type args: EnvironmentConfig
+ :type image: str
+ :type options: list[str] | None
+ :type cmd: list[str] | None
+ :rtype: str | None, str | None
+ """
+ if not options:
+ options = []
+
+ if not cmd:
+ cmd = []
+
+ network = get_docker_preferred_network_name(args)
+
+ if is_docker_user_defined_network(network):
+ # Only when the network is not the default bridge network.
+ # Using this with the default bridge network results in an error when using --link: links are only supported for user-defined networks
+ options.extend(['--network', network])
+
+ for _iteration in range(1, 3):
+ try:
+ return docker_command(args, ['run'] + options + [image] + cmd, capture=True)
+ except SubprocessError as ex:
+ display.error(ex)
+ display.warning('Failed to run docker image "%s". Waiting a few seconds before trying again.' % image)
+ time.sleep(3)
+
+ raise ApplicationError('Failed to run docker image "%s".' % image)
+
+
+def docker_images(args, image):
+ """
+ :param args: CommonConfig
+ :param image: str
+ :rtype: list[dict[str, any]]
+ """
+ try:
+ stdout, _dummy = docker_command(args, ['images', image, '--format', '{{json .}}'], capture=True, always=True)
+ except SubprocessError as ex:
+ if 'no such image' in ex.stderr:
+ stdout = '' # podman does not handle this gracefully, exits 125
+ else:
+ raise ex
+ results = [json.loads(line) for line in stdout.splitlines()]
+ return results
+
+
+def docker_rm(args, container_id):
+ """
+ :type args: EnvironmentConfig
+ :type container_id: str
+ """
+ try:
+ docker_command(args, ['rm', '-f', container_id], capture=True)
+ except SubprocessError as ex:
+ if 'no such container' in ex.stderr:
+ pass # podman does not handle this gracefully, exits 1
+ else:
+ raise ex
+
+
+def docker_inspect(args, container_id):
+ """
+ :type args: EnvironmentConfig
+ :type container_id: str
+ :rtype: list[dict]
+ """
+ if args.explain:
+ return []
+
+ try:
+ stdout = docker_command(args, ['inspect', container_id], capture=True)[0]
+ return json.loads(stdout)
+ except SubprocessError as ex:
+ if 'no such image' in ex.stderr:
+ return [] # podman does not handle this gracefully, exits 125
+ try:
+ return json.loads(ex.stdout)
+ except Exception:
+ raise ex
+
+
+def docker_network_disconnect(args, container_id, network):
+ """
+ :param args: EnvironmentConfig
+ :param container_id: str
+ :param network: str
+ """
+ docker_command(args, ['network', 'disconnect', network, container_id], capture=True)
+
+
+def docker_network_inspect(args, network):
+ """
+ :type args: EnvironmentConfig
+ :type network: str
+ :rtype: list[dict]
+ """
+ if args.explain:
+ return []
+
+ try:
+ stdout = docker_command(args, ['network', 'inspect', network], capture=True)[0]
+ return json.loads(stdout)
+ except SubprocessError as ex:
+ try:
+ return json.loads(ex.stdout)
+ except Exception:
+ raise ex
+
+
+def docker_exec(args, container_id, cmd, options=None, capture=False, stdin=None, stdout=None):
+ """
+ :type args: EnvironmentConfig
+ :type container_id: str
+ :type cmd: list[str]
+ :type options: list[str] | None
+ :type capture: bool
+ :type stdin: BinaryIO | None
+ :type stdout: BinaryIO | None
+ :rtype: str | None, str | None
+ """
+ if not options:
+ options = []
+
+ return docker_command(args, ['exec'] + options + [container_id] + cmd, capture=capture, stdin=stdin, stdout=stdout)
+
+
+def docker_info(args):
+ """
+ :param args: CommonConfig
+ :rtype: dict[str, any]
+ """
+ stdout, _dummy = docker_command(args, ['info', '--format', '{{json .}}'], capture=True, always=True)
+ return json.loads(stdout)
+
+
+def docker_version(args):
+ """
+ :param args: CommonConfig
+ :rtype: dict[str, any]
+ """
+ stdout, _dummy = docker_command(args, ['version', '--format', '{{json .}}'], capture=True, always=True)
+ return json.loads(stdout)
+
+
+def docker_command(args, cmd, capture=False, stdin=None, stdout=None, always=False):
+ """
+ :type args: CommonConfig
+ :type cmd: list[str]
+ :type capture: bool
+ :type stdin: file | None
+ :type stdout: file | None
+ :type always: bool
+ :rtype: str | None, str | None
+ """
+ env = docker_environment()
+ return run_command(args, ['docker'] + cmd, env=env, capture=capture, stdin=stdin, stdout=stdout, always=always)
+
+
+def docker_environment():
+ """
+ :rtype: dict[str, str]
+ """
+ env = common_environment()
+ env.update(dict((key, os.environ[key]) for key in os.environ if key.startswith('DOCKER_')))
+ return env
diff --git a/test/lib/ansible_test/_internal/encoding.py b/test/lib/ansible_test/_internal/encoding.py
new file mode 100644
index 00000000..8e014794
--- /dev/null
+++ b/test/lib/ansible_test/_internal/encoding.py
@@ -0,0 +1,41 @@
+"""Functions for encoding and decoding strings."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from . import types as t
+
+ENCODING = 'utf-8'
+
+Text = type(u'')
+
+
+def to_optional_bytes(value, errors='strict'): # type: (t.Optional[t.AnyStr], str) -> t.Optional[bytes]
+ """Return the given value as bytes encoded using UTF-8 if not already bytes, or None if the value is None."""
+ return None if value is None else to_bytes(value, errors)
+
+
+def to_optional_text(value, errors='strict'): # type: (t.Optional[t.AnyStr], str) -> t.Optional[t.Text]
+ """Return the given value as text decoded using UTF-8 if not already text, or None if the value is None."""
+ return None if value is None else to_text(value, errors)
+
+
+def to_bytes(value, errors='strict'): # type: (t.AnyStr, str) -> bytes
+ """Return the given value as bytes encoded using UTF-8 if not already bytes."""
+ if isinstance(value, bytes):
+ return value
+
+ if isinstance(value, Text):
+ return value.encode(ENCODING, errors)
+
+ raise Exception('value is not bytes or text: %s' % type(value))
+
+
+def to_text(value, errors='strict'): # type: (t.AnyStr, str) -> t.Text
+ """Return the given value as text decoded using UTF-8 if not already text."""
+ if isinstance(value, bytes):
+ return value.decode(ENCODING, errors)
+
+ if isinstance(value, Text):
+ return value
+
+ raise Exception('value is not bytes or text: %s' % type(value))
diff --git a/test/lib/ansible_test/_internal/env.py b/test/lib/ansible_test/_internal/env.py
new file mode 100644
index 00000000..60c0245e
--- /dev/null
+++ b/test/lib/ansible_test/_internal/env.py
@@ -0,0 +1,293 @@
+"""Show information about the test environment."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import datetime
+import functools
+import os
+import platform
+import signal
+import sys
+import time
+
+from .config import (
+ CommonConfig,
+ TestConfig,
+)
+
+from .io import (
+ write_json_file,
+ read_json_file,
+)
+
+from .util import (
+ display,
+ find_executable,
+ SubprocessError,
+ ApplicationError,
+ get_ansible_version,
+ get_available_python_versions,
+)
+
+from .util_common import (
+ data_context,
+ write_json_test_results,
+ ResultType,
+)
+
+from .docker_util import (
+ docker_info,
+ docker_version
+)
+
+from .thread import (
+ WrappedThread,
+)
+
+from .constants import (
+ TIMEOUT_PATH,
+)
+
+from .test import (
+ TestTimeout,
+)
+
+from .executor import (
+ SUPPORTED_PYTHON_VERSIONS,
+)
+
+from .ci import (
+ get_ci_provider,
+)
+
+
+class EnvConfig(CommonConfig):
+ """Configuration for the tools command."""
+ def __init__(self, args):
+ """
+ :type args: any
+ """
+ super(EnvConfig, self).__init__(args, 'env')
+
+ self.show = args.show
+ self.dump = args.dump
+ self.timeout = args.timeout
+ self.list_files = args.list_files
+
+ if not self.show and not self.dump and self.timeout is None and not self.list_files:
+ # default to --show if no options were given
+ self.show = True
+
+
+def command_env(args):
+ """
+ :type args: EnvConfig
+ """
+ show_dump_env(args)
+ list_files_env(args)
+ set_timeout(args)
+
+
+def show_dump_env(args):
+ """
+ :type args: EnvConfig
+ """
+ if not args.show and not args.dump:
+ return
+
+ data = dict(
+ ansible=dict(
+ version=get_ansible_version(),
+ ),
+ docker=get_docker_details(args),
+ environ=os.environ.copy(),
+ location=dict(
+ pwd=os.environ.get('PWD', None),
+ cwd=os.getcwd(),
+ ),
+ git=get_ci_provider().get_git_details(args),
+ platform=dict(
+ datetime=datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
+ platform=platform.platform(),
+ uname=platform.uname(),
+ ),
+ python=dict(
+ executable=sys.executable,
+ version=platform.python_version(),
+ ),
+ interpreters=get_available_python_versions(SUPPORTED_PYTHON_VERSIONS),
+ )
+
+ if args.show:
+ verbose = {
+ 'docker': 3,
+ 'docker.executable': 0,
+ 'environ': 2,
+ 'platform.uname': 1,
+ }
+
+ show_dict(data, verbose)
+
+ if args.dump and not args.explain:
+ write_json_test_results(ResultType.BOT, 'data-environment.json', data)
+
+
+def list_files_env(args): # type: (EnvConfig) -> None
+ """List files on stdout."""
+ if not args.list_files:
+ return
+
+ for path in data_context().content.all_files():
+ display.info(path)
+
+
+def set_timeout(args):
+ """
+ :type args: EnvConfig
+ """
+ if args.timeout is None:
+ return
+
+ if args.timeout:
+ deadline = (datetime.datetime.utcnow() + datetime.timedelta(minutes=args.timeout)).strftime('%Y-%m-%dT%H:%M:%SZ')
+
+ display.info('Setting a %d minute test timeout which will end at: %s' % (args.timeout, deadline), verbosity=1)
+ else:
+ deadline = None
+
+ display.info('Clearing existing test timeout.', verbosity=1)
+
+ if args.explain:
+ return
+
+ if deadline:
+ data = dict(
+ duration=args.timeout,
+ deadline=deadline,
+ )
+
+ write_json_file(TIMEOUT_PATH, data)
+ elif os.path.exists(TIMEOUT_PATH):
+ os.remove(TIMEOUT_PATH)
+
+
+def get_timeout():
+ """
+ :rtype: dict[str, any] | None
+ """
+ if not os.path.exists(TIMEOUT_PATH):
+ return None
+
+ data = read_json_file(TIMEOUT_PATH)
+ data['deadline'] = datetime.datetime.strptime(data['deadline'], '%Y-%m-%dT%H:%M:%SZ')
+
+ return data
+
+
+def configure_timeout(args):
+ """
+ :type args: CommonConfig
+ """
+ if isinstance(args, TestConfig):
+ configure_test_timeout(args) # only tests are subject to the timeout
+
+
+def configure_test_timeout(args):
+ """
+ :type args: TestConfig
+ """
+ timeout = get_timeout()
+
+ if not timeout:
+ return
+
+ timeout_start = datetime.datetime.utcnow()
+ timeout_duration = timeout['duration']
+ timeout_deadline = timeout['deadline']
+ timeout_remaining = timeout_deadline - timeout_start
+
+ test_timeout = TestTimeout(timeout_duration)
+
+ if timeout_remaining <= datetime.timedelta():
+ test_timeout.write(args)
+
+ raise ApplicationError('The %d minute test timeout expired %s ago at %s.' % (
+ timeout_duration, timeout_remaining * -1, timeout_deadline))
+
+ display.info('The %d minute test timeout expires in %s at %s.' % (
+ timeout_duration, timeout_remaining, timeout_deadline), verbosity=1)
+
+ def timeout_handler(_dummy1, _dummy2):
+ """Runs when SIGUSR1 is received."""
+ test_timeout.write(args)
+
+ raise ApplicationError('Tests aborted after exceeding the %d minute time limit.' % timeout_duration)
+
+ def timeout_waiter(timeout_seconds):
+ """
+ :type timeout_seconds: int
+ """
+ time.sleep(timeout_seconds)
+ os.kill(os.getpid(), signal.SIGUSR1)
+
+ signal.signal(signal.SIGUSR1, timeout_handler)
+
+ instance = WrappedThread(functools.partial(timeout_waiter, timeout_remaining.seconds))
+ instance.daemon = True
+ instance.start()
+
+
+def show_dict(data, verbose, root_verbosity=0, path=None):
+ """
+ :type data: dict[str, any]
+ :type verbose: dict[str, int]
+ :type root_verbosity: int
+ :type path: list[str] | None
+ """
+ path = path if path else []
+
+ for key, value in sorted(data.items()):
+ indent = ' ' * len(path)
+ key_path = path + [key]
+ key_name = '.'.join(key_path)
+ verbosity = verbose.get(key_name, root_verbosity)
+
+ if isinstance(value, (tuple, list)):
+ display.info(indent + '%s:' % key, verbosity=verbosity)
+ for item in value:
+ display.info(indent + ' - %s' % item, verbosity=verbosity)
+ elif isinstance(value, dict):
+ min_verbosity = min([verbosity] + [v for k, v in verbose.items() if k.startswith('%s.' % key)])
+ display.info(indent + '%s:' % key, verbosity=min_verbosity)
+ show_dict(value, verbose, verbosity, key_path)
+ else:
+ display.info(indent + '%s: %s' % (key, value), verbosity=verbosity)
+
+
+def get_docker_details(args):
+ """
+ :type args: CommonConfig
+ :rtype: dict[str, any]
+ """
+ docker = find_executable('docker', required=False)
+ info = None
+ version = None
+
+ if docker:
+ try:
+ info = docker_info(args)
+ except SubprocessError as ex:
+ display.warning('Failed to collect docker info:\n%s' % ex)
+
+ try:
+ version = docker_version(args)
+ except SubprocessError as ex:
+ display.warning('Failed to collect docker version:\n%s' % ex)
+
+ docker_details = dict(
+ executable=docker,
+ info=info,
+ version=version,
+ )
+
+ return docker_details
diff --git a/test/lib/ansible_test/_internal/executor.py b/test/lib/ansible_test/_internal/executor.py
new file mode 100644
index 00000000..881439ef
--- /dev/null
+++ b/test/lib/ansible_test/_internal/executor.py
@@ -0,0 +1,2186 @@
+"""Execute Ansible tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import datetime
+import re
+import time
+import textwrap
+import functools
+import hashlib
+import difflib
+import filecmp
+import random
+import string
+import shutil
+
+from . import types as t
+
+from .thread import (
+ WrappedThread,
+)
+
+from .core_ci import (
+ AnsibleCoreCI,
+ SshKey,
+)
+
+from .manage_ci import (
+ ManageWindowsCI,
+ ManageNetworkCI,
+)
+
+from .cloud import (
+ cloud_filter,
+ cloud_init,
+ get_cloud_environment,
+ get_cloud_platforms,
+ CloudEnvironmentConfig,
+)
+
+from .io import (
+ make_dirs,
+ open_text_file,
+ read_binary_file,
+ read_text_file,
+ write_text_file,
+)
+
+from .util import (
+ ApplicationWarning,
+ ApplicationError,
+ SubprocessError,
+ display,
+ remove_tree,
+ find_executable,
+ raw_command,
+ get_available_port,
+ generate_pip_command,
+ find_python,
+ cmd_quote,
+ ANSIBLE_LIB_ROOT,
+ ANSIBLE_TEST_DATA_ROOT,
+ ANSIBLE_TEST_CONFIG_ROOT,
+ get_ansible_version,
+ tempdir,
+ open_zipfile,
+ SUPPORTED_PYTHON_VERSIONS,
+ str_to_version,
+ version_to_str,
+)
+
+from .util_common import (
+ get_docker_completion,
+ get_network_settings,
+ get_remote_completion,
+ get_python_path,
+ intercept_command,
+ named_temporary_file,
+ run_command,
+ write_json_test_results,
+ ResultType,
+ handle_layout_messages,
+)
+
+from .docker_util import (
+ docker_pull,
+ docker_run,
+ docker_available,
+ docker_rm,
+ get_docker_container_id,
+ get_docker_container_ip,
+ get_docker_hostname,
+ get_docker_preferred_network_name,
+ is_docker_user_defined_network,
+)
+
+from .ansible_util import (
+ ansible_environment,
+ check_pyyaml,
+)
+
+from .target import (
+ IntegrationTarget,
+ walk_internal_targets,
+ walk_posix_integration_targets,
+ walk_network_integration_targets,
+ walk_windows_integration_targets,
+ TIntegrationTarget,
+)
+
+from .ci import (
+ get_ci_provider,
+)
+
+from .classification import (
+ categorize_changes,
+)
+
+from .config import (
+ TestConfig,
+ EnvironmentConfig,
+ IntegrationConfig,
+ NetworkIntegrationConfig,
+ PosixIntegrationConfig,
+ ShellConfig,
+ WindowsIntegrationConfig,
+ TIntegrationConfig,
+)
+
+from .metadata import (
+ ChangeDescription,
+)
+
+from .integration import (
+ integration_test_environment,
+ integration_test_config_file,
+ setup_common_temp_dir,
+ get_inventory_relative_path,
+ check_inventory,
+ delegate_inventory,
+)
+
+from .data import (
+ data_context,
+)
+
+HTTPTESTER_HOSTS = (
+ 'ansible.http.tests',
+ 'sni1.ansible.http.tests',
+ 'fail.ansible.http.tests',
+)
+
+
+def check_startup():
+ """Checks to perform at startup before running commands."""
+ check_legacy_modules()
+
+
+def check_legacy_modules():
+ """Detect conflicts with legacy core/extras module directories to avoid problems later."""
+ for directory in 'core', 'extras':
+ path = 'lib/ansible/modules/%s' % directory
+
+ for root, _dir_names, file_names in os.walk(path):
+ if file_names:
+ # the directory shouldn't exist, but if it does, it must contain no files
+ raise ApplicationError('Files prohibited in "%s". '
+ 'These are most likely legacy modules from version 2.2 or earlier.' % root)
+
+
+def create_shell_command(command):
+ """
+ :type command: list[str]
+ :rtype: list[str]
+ """
+ optional_vars = (
+ 'TERM',
+ )
+
+ cmd = ['/usr/bin/env']
+ cmd += ['%s=%s' % (var, os.environ[var]) for var in optional_vars if var in os.environ]
+ cmd += command
+
+ return cmd
+
+
+def get_openssl_version(args, python, python_version): # type: (EnvironmentConfig, str, str) -> t.Optional[t.Tuple[int, ...]]
+ """Return the openssl version."""
+ if not python_version.startswith('2.'):
+ # OpenSSL version checking only works on Python 3.x.
+ # This should be the most accurate, since it is the Python we will be using.
+ version = json.loads(run_command(args, [python, os.path.join(ANSIBLE_TEST_DATA_ROOT, 'sslcheck.py')], capture=True, always=True)[0])['version']
+
+ if version:
+ display.info('Detected OpenSSL version %s under Python %s.' % (version_to_str(version), python_version), verbosity=1)
+
+ return tuple(version)
+
+ # Fall back to detecting the OpenSSL version from the CLI.
+ # This should provide an adequate solution on Python 2.x.
+ openssl_path = find_executable('openssl', required=False)
+
+ if openssl_path:
+ try:
+ result = raw_command([openssl_path, 'version'], capture=True)[0]
+ except SubprocessError:
+ result = ''
+
+ match = re.search(r'^OpenSSL (?P<version>[0-9]+\.[0-9]+\.[0-9]+)', result)
+
+ if match:
+ version = str_to_version(match.group('version'))
+
+ display.info('Detected OpenSSL version %s using the openssl CLI.' % version_to_str(version), verbosity=1)
+
+ return version
+
+ display.info('Unable to detect OpenSSL version.', verbosity=1)
+
+ return None
+
+
+def get_setuptools_version(args, python): # type: (EnvironmentConfig, str) -> t.Tuple[int]
+ """Return the setuptools version for the given python."""
+ try:
+ return str_to_version(raw_command([python, '-c', 'import setuptools; print(setuptools.__version__)'], capture=True)[0])
+ except SubprocessError:
+ if args.explain:
+ return tuple() # ignore errors in explain mode in case setuptools is not aleady installed
+
+ raise
+
+
+def get_cryptography_requirement(args, python_version): # type: (EnvironmentConfig, str) -> str
+ """
+ Return the correct cryptography requirement for the given python version.
+ The version of cryptography installed depends on the python version, setuptools version and openssl version.
+ """
+ python = find_python(python_version)
+ setuptools_version = get_setuptools_version(args, python)
+ openssl_version = get_openssl_version(args, python, python_version)
+
+ if setuptools_version >= (18, 5):
+ if python_version == '2.6':
+ # cryptography 2.2+ requires python 2.7+
+ # see https://github.com/pyca/cryptography/blob/master/CHANGELOG.rst#22---2018-03-19
+ cryptography = 'cryptography < 2.2'
+ elif openssl_version and openssl_version < (1, 1, 0):
+ # cryptography 3.2 requires openssl 1.1.x or later
+ # see https://cryptography.io/en/latest/changelog.html#v3-2
+ cryptography = 'cryptography < 3.2'
+ else:
+ cryptography = 'cryptography'
+ else:
+ # cryptography 2.1+ requires setuptools 18.5+
+ # see https://github.com/pyca/cryptography/blob/62287ae18383447585606b9d0765c0f1b8a9777c/setup.py#L26
+ cryptography = 'cryptography < 2.1'
+
+ return cryptography
+
+
+def install_command_requirements(args, python_version=None, context=None, enable_pyyaml_check=False):
+ """
+ :type args: EnvironmentConfig
+ :type python_version: str | None
+ :type context: str | None
+ :type enable_pyyaml_check: bool
+ """
+ if not args.explain:
+ make_dirs(ResultType.COVERAGE.path)
+ make_dirs(ResultType.DATA.path)
+
+ if isinstance(args, ShellConfig):
+ if args.raw:
+ return
+
+ generate_egg_info(args)
+
+ if not args.requirements:
+ return
+
+ if isinstance(args, ShellConfig):
+ return
+
+ packages = []
+
+ if isinstance(args, TestConfig):
+ if args.coverage:
+ packages.append('coverage')
+ if args.junit:
+ packages.append('junit-xml')
+
+ if not python_version:
+ python_version = args.python_version
+
+ pip = generate_pip_command(find_python(python_version))
+
+ # skip packages which have aleady been installed for python_version
+
+ try:
+ package_cache = install_command_requirements.package_cache
+ except AttributeError:
+ package_cache = install_command_requirements.package_cache = {}
+
+ installed_packages = package_cache.setdefault(python_version, set())
+ skip_packages = [package for package in packages if package in installed_packages]
+
+ for package in skip_packages:
+ packages.remove(package)
+
+ installed_packages.update(packages)
+
+ if args.command != 'sanity':
+ install_ansible_test_requirements(args, pip)
+
+ # make sure setuptools is available before trying to install cryptography
+ # the installed version of setuptools affects the version of cryptography to install
+ run_command(args, generate_pip_install(pip, '', packages=['setuptools']))
+
+ # install the latest cryptography version that the current requirements can support
+ # use a custom constraints file to avoid the normal constraints file overriding the chosen version of cryptography
+ # if not installed here later install commands may try to install an unsupported version due to the presence of older setuptools
+ # this is done instead of upgrading setuptools to allow tests to function with older distribution provided versions of setuptools
+ run_command(args, generate_pip_install(pip, '',
+ packages=[get_cryptography_requirement(args, python_version)],
+ constraints=os.path.join(ANSIBLE_TEST_DATA_ROOT, 'cryptography-constraints.txt')))
+
+ commands = [generate_pip_install(pip, args.command, packages=packages, context=context)]
+
+ if isinstance(args, IntegrationConfig):
+ for cloud_platform in get_cloud_platforms(args):
+ commands.append(generate_pip_install(pip, '%s.cloud.%s' % (args.command, cloud_platform)))
+
+ commands = [cmd for cmd in commands if cmd]
+
+ if not commands:
+ return # no need to detect changes or run pip check since we are not making any changes
+
+ # only look for changes when more than one requirements file is needed
+ detect_pip_changes = len(commands) > 1
+
+ # first pass to install requirements, changes expected unless environment is already set up
+ install_ansible_test_requirements(args, pip)
+ changes = run_pip_commands(args, pip, commands, detect_pip_changes)
+
+ if changes:
+ # second pass to check for conflicts in requirements, changes are not expected here
+ changes = run_pip_commands(args, pip, commands, detect_pip_changes)
+
+ if changes:
+ raise ApplicationError('Conflicts detected in requirements. The following commands reported changes during verification:\n%s' %
+ '\n'.join((' '.join(cmd_quote(c) for c in cmd) for cmd in changes)))
+
+ if args.pip_check:
+ # ask pip to check for conflicts between installed packages
+ try:
+ run_command(args, pip + ['check', '--disable-pip-version-check'], capture=True)
+ except SubprocessError as ex:
+ if ex.stderr.strip() == 'ERROR: unknown command "check"':
+ display.warning('Cannot check pip requirements for conflicts because "pip check" is not supported.')
+ else:
+ raise
+
+ if enable_pyyaml_check:
+ # pyyaml may have been one of the requirements that was installed, so perform an optional check for it
+ check_pyyaml(args, python_version, required=False)
+
+
+def install_ansible_test_requirements(args, pip): # type: (EnvironmentConfig, t.List[str]) -> None
+ """Install requirements for ansible-test for the given pip if not already installed."""
+ try:
+ installed = install_command_requirements.installed
+ except AttributeError:
+ installed = install_command_requirements.installed = set()
+
+ if tuple(pip) in installed:
+ return
+
+ # make sure basic ansible-test requirements are met, including making sure that pip is recent enough to support constraints
+ # virtualenvs created by older distributions may include very old pip versions, such as those created in the centos6 test container (pip 6.0.8)
+ run_command(args, generate_pip_install(pip, 'ansible-test', use_constraints=False))
+
+ installed.add(tuple(pip))
+
+
+def run_pip_commands(args, pip, commands, detect_pip_changes=False):
+ """
+ :type args: EnvironmentConfig
+ :type pip: list[str]
+ :type commands: list[list[str]]
+ :type detect_pip_changes: bool
+ :rtype: list[list[str]]
+ """
+ changes = []
+
+ after_list = pip_list(args, pip) if detect_pip_changes else None
+
+ for cmd in commands:
+ if not cmd:
+ continue
+
+ before_list = after_list
+
+ run_command(args, cmd)
+
+ after_list = pip_list(args, pip) if detect_pip_changes else None
+
+ if before_list != after_list:
+ changes.append(cmd)
+
+ return changes
+
+
+def pip_list(args, pip):
+ """
+ :type args: EnvironmentConfig
+ :type pip: list[str]
+ :rtype: str
+ """
+ stdout = run_command(args, pip + ['list'], capture=True)[0]
+ return stdout
+
+
+def generate_egg_info(args):
+ """
+ :type args: EnvironmentConfig
+ """
+ if args.explain:
+ return
+
+ ansible_version = get_ansible_version()
+
+ # inclusion of the version number in the path is optional
+ # see: https://setuptools.readthedocs.io/en/latest/formats.html#filename-embedded-metadata
+ egg_info_path = ANSIBLE_LIB_ROOT + '_base-%s.egg-info' % ansible_version
+
+ if os.path.exists(egg_info_path):
+ return
+
+ egg_info_path = ANSIBLE_LIB_ROOT + '_base.egg-info'
+
+ if os.path.exists(egg_info_path):
+ return
+
+ # minimal PKG-INFO stub following the format defined in PEP 241
+ # required for older setuptools versions to avoid a traceback when importing pkg_resources from packages like cryptography
+ # newer setuptools versions are happy with an empty directory
+ # including a stub here means we don't need to locate the existing file or have setup.py generate it when running from source
+ pkg_info = '''
+Metadata-Version: 1.0
+Name: ansible
+Version: %s
+Platform: UNKNOWN
+Summary: Radically simple IT automation
+Author-email: info@ansible.com
+License: GPLv3+
+''' % get_ansible_version()
+
+ pkg_info_path = os.path.join(egg_info_path, 'PKG-INFO')
+
+ write_text_file(pkg_info_path, pkg_info.lstrip(), create_directories=True)
+
+
+def generate_pip_install(pip, command, packages=None, constraints=None, use_constraints=True, context=None):
+ """
+ :type pip: list[str]
+ :type command: str
+ :type packages: list[str] | None
+ :type constraints: str | None
+ :type use_constraints: bool
+ :type context: str | None
+ :rtype: list[str] | None
+ """
+ constraints = constraints or os.path.join(ANSIBLE_TEST_DATA_ROOT, 'requirements', 'constraints.txt')
+ requirements = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'requirements', '%s.txt' % ('%s.%s' % (command, context) if context else command))
+ content_constraints = None
+
+ options = []
+
+ if os.path.exists(requirements) and os.path.getsize(requirements):
+ options += ['-r', requirements]
+
+ if command == 'sanity' and data_context().content.is_ansible:
+ requirements = os.path.join(data_context().content.sanity_path, 'code-smell', '%s.requirements.txt' % context)
+
+ if os.path.exists(requirements) and os.path.getsize(requirements):
+ options += ['-r', requirements]
+
+ if command == 'units':
+ requirements = os.path.join(data_context().content.unit_path, 'requirements.txt')
+
+ if os.path.exists(requirements) and os.path.getsize(requirements):
+ options += ['-r', requirements]
+
+ content_constraints = os.path.join(data_context().content.unit_path, 'constraints.txt')
+
+ if command in ('integration', 'windows-integration', 'network-integration'):
+ requirements = os.path.join(data_context().content.integration_path, 'requirements.txt')
+
+ if os.path.exists(requirements) and os.path.getsize(requirements):
+ options += ['-r', requirements]
+
+ requirements = os.path.join(data_context().content.integration_path, '%s.requirements.txt' % command)
+
+ if os.path.exists(requirements) and os.path.getsize(requirements):
+ options += ['-r', requirements]
+
+ content_constraints = os.path.join(data_context().content.integration_path, 'constraints.txt')
+
+ if command.startswith('integration.cloud.'):
+ content_constraints = os.path.join(data_context().content.integration_path, 'constraints.txt')
+
+ if packages:
+ options += packages
+
+ if not options:
+ return None
+
+ if use_constraints:
+ if content_constraints and os.path.exists(content_constraints) and os.path.getsize(content_constraints):
+ # listing content constraints first gives them priority over constraints provided by ansible-test
+ options.extend(['-c', content_constraints])
+
+ options.extend(['-c', constraints])
+
+ return pip + ['install', '--disable-pip-version-check'] + options
+
+
+def command_shell(args):
+ """
+ :type args: ShellConfig
+ """
+ if args.delegate:
+ raise Delegate()
+
+ install_command_requirements(args)
+
+ if args.inject_httptester:
+ inject_httptester(args)
+
+ cmd = create_shell_command(['bash', '-i'])
+ run_command(args, cmd)
+
+
+def command_posix_integration(args):
+ """
+ :type args: PosixIntegrationConfig
+ """
+ handle_layout_messages(data_context().content.integration_messages)
+
+ inventory_relative_path = get_inventory_relative_path(args)
+ inventory_path = os.path.join(ANSIBLE_TEST_DATA_ROOT, os.path.basename(inventory_relative_path))
+
+ all_targets = tuple(walk_posix_integration_targets(include_hidden=True))
+ internal_targets = command_integration_filter(args, all_targets)
+ command_integration_filtered(args, internal_targets, all_targets, inventory_path)
+
+
+def command_network_integration(args):
+ """
+ :type args: NetworkIntegrationConfig
+ """
+ handle_layout_messages(data_context().content.integration_messages)
+
+ inventory_relative_path = get_inventory_relative_path(args)
+ template_path = os.path.join(ANSIBLE_TEST_CONFIG_ROOT, os.path.basename(inventory_relative_path)) + '.template'
+
+ if args.inventory:
+ inventory_path = os.path.join(data_context().content.root, data_context().content.integration_path, args.inventory)
+ else:
+ inventory_path = os.path.join(data_context().content.root, inventory_relative_path)
+
+ if args.no_temp_workdir:
+ # temporary solution to keep DCI tests working
+ inventory_exists = os.path.exists(inventory_path)
+ else:
+ inventory_exists = os.path.isfile(inventory_path)
+
+ if not args.explain and not args.platform and not inventory_exists:
+ raise ApplicationError(
+ 'Inventory not found: %s\n'
+ 'Use --inventory to specify the inventory path.\n'
+ 'Use --platform to provision resources and generate an inventory file.\n'
+ 'See also inventory template: %s' % (inventory_path, template_path)
+ )
+
+ check_inventory(args, inventory_path)
+ delegate_inventory(args, inventory_path)
+
+ all_targets = tuple(walk_network_integration_targets(include_hidden=True))
+ internal_targets = command_integration_filter(args, all_targets, init_callback=network_init)
+ instances = [] # type: t.List[WrappedThread]
+
+ if args.platform:
+ get_python_path(args, args.python_executable) # initialize before starting threads
+
+ configs = dict((config['platform_version'], config) for config in args.metadata.instance_config)
+
+ for platform_version in args.platform:
+ platform, version = platform_version.split('/', 1)
+ config = configs.get(platform_version)
+
+ if not config:
+ continue
+
+ instance = WrappedThread(functools.partial(network_run, args, platform, version, config))
+ instance.daemon = True
+ instance.start()
+ instances.append(instance)
+
+ while any(instance.is_alive() for instance in instances):
+ time.sleep(1)
+
+ remotes = [instance.wait_for_result() for instance in instances]
+ inventory = network_inventory(remotes)
+
+ display.info('>>> Inventory: %s\n%s' % (inventory_path, inventory.strip()), verbosity=3)
+
+ if not args.explain:
+ write_text_file(inventory_path, inventory)
+
+ success = False
+
+ try:
+ command_integration_filtered(args, internal_targets, all_targets, inventory_path)
+ success = True
+ finally:
+ if args.remote_terminate == 'always' or (args.remote_terminate == 'success' and success):
+ for instance in instances:
+ instance.result.stop()
+
+
+def network_init(args, internal_targets): # type: (NetworkIntegrationConfig, t.Tuple[IntegrationTarget, ...]) -> None
+ """Initialize platforms for network integration tests."""
+ if not args.platform:
+ return
+
+ if args.metadata.instance_config is not None:
+ return
+
+ platform_targets = set(a for target in internal_targets for a in target.aliases if a.startswith('network/'))
+
+ instances = [] # type: t.List[WrappedThread]
+
+ # generate an ssh key (if needed) up front once, instead of for each instance
+ SshKey(args)
+
+ for platform_version in args.platform:
+ platform, version = platform_version.split('/', 1)
+ platform_target = 'network/%s/' % platform
+
+ if platform_target not in platform_targets:
+ display.warning('Skipping "%s" because selected tests do not target the "%s" platform.' % (
+ platform_version, platform))
+ continue
+
+ instance = WrappedThread(functools.partial(network_start, args, platform, version))
+ instance.daemon = True
+ instance.start()
+ instances.append(instance)
+
+ while any(instance.is_alive() for instance in instances):
+ time.sleep(1)
+
+ args.metadata.instance_config = [instance.wait_for_result() for instance in instances]
+
+
+def network_start(args, platform, version):
+ """
+ :type args: NetworkIntegrationConfig
+ :type platform: str
+ :type version: str
+ :rtype: AnsibleCoreCI
+ """
+ core_ci = AnsibleCoreCI(args, platform, version, stage=args.remote_stage, provider=args.remote_provider)
+ core_ci.start()
+
+ return core_ci.save()
+
+
+def network_run(args, platform, version, config):
+ """
+ :type args: NetworkIntegrationConfig
+ :type platform: str
+ :type version: str
+ :type config: dict[str, str]
+ :rtype: AnsibleCoreCI
+ """
+ core_ci = AnsibleCoreCI(args, platform, version, stage=args.remote_stage, provider=args.remote_provider, load=False)
+ core_ci.load(config)
+ core_ci.wait()
+
+ manage = ManageNetworkCI(core_ci)
+ manage.wait()
+
+ return core_ci
+
+
+def network_inventory(remotes):
+ """
+ :type remotes: list[AnsibleCoreCI]
+ :rtype: str
+ """
+ groups = dict([(remote.platform, []) for remote in remotes])
+ net = []
+
+ for remote in remotes:
+ options = dict(
+ ansible_host=remote.connection.hostname,
+ ansible_user=remote.connection.username,
+ ansible_ssh_private_key_file=os.path.abspath(remote.ssh_key.key),
+ )
+
+ settings = get_network_settings(remote.args, remote.platform, remote.version)
+
+ options.update(settings.inventory_vars)
+
+ groups[remote.platform].append(
+ '%s %s' % (
+ remote.name.replace('.', '-'),
+ ' '.join('%s="%s"' % (k, options[k]) for k in sorted(options)),
+ )
+ )
+
+ net.append(remote.platform)
+
+ groups['net:children'] = net
+
+ template = ''
+
+ for group in groups:
+ hosts = '\n'.join(groups[group])
+
+ template += textwrap.dedent("""
+ [%s]
+ %s
+ """) % (group, hosts)
+
+ inventory = template
+
+ return inventory
+
+
+def command_windows_integration(args):
+ """
+ :type args: WindowsIntegrationConfig
+ """
+ handle_layout_messages(data_context().content.integration_messages)
+
+ inventory_relative_path = get_inventory_relative_path(args)
+ template_path = os.path.join(ANSIBLE_TEST_CONFIG_ROOT, os.path.basename(inventory_relative_path)) + '.template'
+
+ if args.inventory:
+ inventory_path = os.path.join(data_context().content.root, data_context().content.integration_path, args.inventory)
+ else:
+ inventory_path = os.path.join(data_context().content.root, inventory_relative_path)
+
+ if not args.explain and not args.windows and not os.path.isfile(inventory_path):
+ raise ApplicationError(
+ 'Inventory not found: %s\n'
+ 'Use --inventory to specify the inventory path.\n'
+ 'Use --windows to provision resources and generate an inventory file.\n'
+ 'See also inventory template: %s' % (inventory_path, template_path)
+ )
+
+ check_inventory(args, inventory_path)
+ delegate_inventory(args, inventory_path)
+
+ all_targets = tuple(walk_windows_integration_targets(include_hidden=True))
+ internal_targets = command_integration_filter(args, all_targets, init_callback=windows_init)
+ instances = [] # type: t.List[WrappedThread]
+ pre_target = None
+ post_target = None
+ httptester_id = None
+
+ if args.windows:
+ get_python_path(args, args.python_executable) # initialize before starting threads
+
+ configs = dict((config['platform_version'], config) for config in args.metadata.instance_config)
+
+ for version in args.windows:
+ config = configs['windows/%s' % version]
+
+ instance = WrappedThread(functools.partial(windows_run, args, version, config))
+ instance.daemon = True
+ instance.start()
+ instances.append(instance)
+
+ while any(instance.is_alive() for instance in instances):
+ time.sleep(1)
+
+ remotes = [instance.wait_for_result() for instance in instances]
+ inventory = windows_inventory(remotes)
+
+ display.info('>>> Inventory: %s\n%s' % (inventory_path, inventory.strip()), verbosity=3)
+
+ if not args.explain:
+ write_text_file(inventory_path, inventory)
+
+ use_httptester = args.httptester and any('needs/httptester/' in target.aliases for target in internal_targets)
+ # if running under Docker delegation, the httptester may have already been started
+ docker_httptester = bool(os.environ.get("HTTPTESTER", False))
+
+ if use_httptester and not docker_available() and not docker_httptester:
+ display.warning('Assuming --disable-httptester since `docker` is not available.')
+ elif use_httptester:
+ if docker_httptester:
+ # we are running in a Docker container that is linked to the httptester container, we just need to
+ # forward these requests to the linked hostname
+ first_host = HTTPTESTER_HOSTS[0]
+ ssh_options = ["-R", "8080:%s:80" % first_host, "-R", "8443:%s:443" % first_host]
+ else:
+ # we are running directly and need to start the httptester container ourselves and forward the port
+ # from there manually set so HTTPTESTER env var is set during the run
+ args.inject_httptester = True
+ httptester_id, ssh_options = start_httptester(args)
+
+ # to get this SSH command to run in the background we need to set to run in background (-f) and disable
+ # the pty allocation (-T)
+ ssh_options.insert(0, "-fT")
+
+ # create a script that will continue to run in the background until the script is deleted, this will
+ # cleanup and close the connection
+ def forward_ssh_ports(target):
+ """
+ :type target: IntegrationTarget
+ """
+ if 'needs/httptester/' not in target.aliases:
+ return
+
+ for remote in [r for r in remotes if r.version != '2008']:
+ manage = ManageWindowsCI(remote)
+ manage.upload(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'setup', 'windows-httptester.ps1'), watcher_path)
+
+ # We cannot pass an array of string with -File so we just use a delimiter for multiple values
+ script = "powershell.exe -NoProfile -ExecutionPolicy Bypass -File .\\%s -Hosts \"%s\"" \
+ % (watcher_path, "|".join(HTTPTESTER_HOSTS))
+ if args.verbosity > 3:
+ script += " -Verbose"
+ manage.ssh(script, options=ssh_options, force_pty=False)
+
+ def cleanup_ssh_ports(target):
+ """
+ :type target: IntegrationTarget
+ """
+ if 'needs/httptester/' not in target.aliases:
+ return
+
+ for remote in [r for r in remotes if r.version != '2008']:
+ # delete the tmp file that keeps the http-tester alive
+ manage = ManageWindowsCI(remote)
+ manage.ssh("cmd.exe /c \"del %s /F /Q\"" % watcher_path, force_pty=False)
+
+ watcher_path = "ansible-test-http-watcher-%s.ps1" % time.time()
+ pre_target = forward_ssh_ports
+ post_target = cleanup_ssh_ports
+
+ def run_playbook(playbook, run_playbook_vars): # type: (str, t.Dict[str, t.Any]) -> None
+ playbook_path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'playbooks', playbook)
+ command = ['ansible-playbook', '-i', inventory_path, playbook_path, '-e', json.dumps(run_playbook_vars)]
+ if args.verbosity:
+ command.append('-%s' % ('v' * args.verbosity))
+
+ env = ansible_environment(args)
+ intercept_command(args, command, '', env, disable_coverage=True)
+
+ remote_temp_path = None
+
+ if args.coverage and not args.coverage_check:
+ # Create the remote directory that is writable by everyone. Use Ansible to talk to the remote host.
+ remote_temp_path = 'C:\\ansible_test_coverage_%s' % time.time()
+ playbook_vars = {'remote_temp_path': remote_temp_path}
+ run_playbook('windows_coverage_setup.yml', playbook_vars)
+
+ success = False
+
+ try:
+ command_integration_filtered(args, internal_targets, all_targets, inventory_path, pre_target=pre_target,
+ post_target=post_target, remote_temp_path=remote_temp_path)
+ success = True
+ finally:
+ if httptester_id:
+ docker_rm(args, httptester_id)
+
+ if remote_temp_path:
+ # Zip up the coverage files that were generated and fetch it back to localhost.
+ with tempdir() as local_temp_path:
+ playbook_vars = {'remote_temp_path': remote_temp_path, 'local_temp_path': local_temp_path}
+ run_playbook('windows_coverage_teardown.yml', playbook_vars)
+
+ for filename in os.listdir(local_temp_path):
+ with open_zipfile(os.path.join(local_temp_path, filename)) as coverage_zip:
+ coverage_zip.extractall(ResultType.COVERAGE.path)
+
+ if args.remote_terminate == 'always' or (args.remote_terminate == 'success' and success):
+ for instance in instances:
+ instance.result.stop()
+
+
+# noinspection PyUnusedLocal
+def windows_init(args, internal_targets): # pylint: disable=locally-disabled, unused-argument
+ """
+ :type args: WindowsIntegrationConfig
+ :type internal_targets: tuple[IntegrationTarget]
+ """
+ if not args.windows:
+ return
+
+ if args.metadata.instance_config is not None:
+ return
+
+ instances = [] # type: t.List[WrappedThread]
+
+ for version in args.windows:
+ instance = WrappedThread(functools.partial(windows_start, args, version))
+ instance.daemon = True
+ instance.start()
+ instances.append(instance)
+
+ while any(instance.is_alive() for instance in instances):
+ time.sleep(1)
+
+ args.metadata.instance_config = [instance.wait_for_result() for instance in instances]
+
+
+def windows_start(args, version):
+ """
+ :type args: WindowsIntegrationConfig
+ :type version: str
+ :rtype: AnsibleCoreCI
+ """
+ core_ci = AnsibleCoreCI(args, 'windows', version, stage=args.remote_stage, provider=args.remote_provider)
+ core_ci.start()
+
+ return core_ci.save()
+
+
+def windows_run(args, version, config):
+ """
+ :type args: WindowsIntegrationConfig
+ :type version: str
+ :type config: dict[str, str]
+ :rtype: AnsibleCoreCI
+ """
+ core_ci = AnsibleCoreCI(args, 'windows', version, stage=args.remote_stage, provider=args.remote_provider, load=False)
+ core_ci.load(config)
+ core_ci.wait()
+
+ manage = ManageWindowsCI(core_ci)
+ manage.wait()
+
+ return core_ci
+
+
+def windows_inventory(remotes):
+ """
+ :type remotes: list[AnsibleCoreCI]
+ :rtype: str
+ """
+ hosts = []
+
+ for remote in remotes:
+ options = dict(
+ ansible_host=remote.connection.hostname,
+ ansible_user=remote.connection.username,
+ ansible_password=remote.connection.password,
+ ansible_port=remote.connection.port,
+ )
+
+ # used for the connection_windows_ssh test target
+ if remote.ssh_key:
+ options["ansible_ssh_private_key_file"] = os.path.abspath(remote.ssh_key.key)
+
+ if remote.name == 'windows-2008':
+ options.update(
+ # force 2008 to use PSRP for the connection plugin
+ ansible_connection='psrp',
+ ansible_psrp_auth='basic',
+ ansible_psrp_cert_validation='ignore',
+ )
+ elif remote.name == 'windows-2016':
+ options.update(
+ # force 2016 to use NTLM + HTTP message encryption
+ ansible_connection='winrm',
+ ansible_winrm_server_cert_validation='ignore',
+ ansible_winrm_transport='ntlm',
+ ansible_winrm_scheme='http',
+ ansible_port='5985',
+ )
+ else:
+ options.update(
+ ansible_connection='winrm',
+ ansible_winrm_server_cert_validation='ignore',
+ )
+
+ hosts.append(
+ '%s %s' % (
+ remote.name.replace('/', '_'),
+ ' '.join('%s="%s"' % (k, options[k]) for k in sorted(options)),
+ )
+ )
+
+ template = """
+ [windows]
+ %s
+
+ # support winrm binary module tests (temporary solution)
+ [testhost:children]
+ windows
+ """
+
+ template = textwrap.dedent(template)
+ inventory = template % ('\n'.join(hosts))
+
+ return inventory
+
+
+def command_integration_filter(args, # type: TIntegrationConfig
+ targets, # type: t.Iterable[TIntegrationTarget]
+ init_callback=None, # type: t.Callable[[TIntegrationConfig, t.Tuple[TIntegrationTarget, ...]], None]
+ ): # type: (...) -> t.Tuple[TIntegrationTarget, ...]
+ """Filter the given integration test targets."""
+ targets = tuple(target for target in targets if 'hidden/' not in target.aliases)
+ changes = get_changes_filter(args)
+
+ # special behavior when the --changed-all-target target is selected based on changes
+ if args.changed_all_target in changes:
+ # act as though the --changed-all-target target was in the include list
+ if args.changed_all_mode == 'include' and args.changed_all_target not in args.include:
+ args.include.append(args.changed_all_target)
+ args.delegate_args += ['--include', args.changed_all_target]
+ # act as though the --changed-all-target target was in the exclude list
+ elif args.changed_all_mode == 'exclude' and args.changed_all_target not in args.exclude:
+ args.exclude.append(args.changed_all_target)
+
+ require = args.require + changes
+ exclude = args.exclude
+
+ internal_targets = walk_internal_targets(targets, args.include, exclude, require)
+ environment_exclude = get_integration_filter(args, internal_targets)
+
+ environment_exclude += cloud_filter(args, internal_targets)
+
+ if environment_exclude:
+ exclude += environment_exclude
+ internal_targets = walk_internal_targets(targets, args.include, exclude, require)
+
+ if not internal_targets:
+ raise AllTargetsSkipped()
+
+ if args.start_at and not any(target.name == args.start_at for target in internal_targets):
+ raise ApplicationError('Start at target matches nothing: %s' % args.start_at)
+
+ if init_callback:
+ init_callback(args, internal_targets)
+
+ cloud_init(args, internal_targets)
+
+ vars_file_src = os.path.join(data_context().content.root, data_context().content.integration_vars_path)
+
+ if os.path.exists(vars_file_src):
+ def integration_config_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None
+ """
+ Add the integration config vars file to the payload file list.
+ This will preserve the file during delegation even if the file is ignored by source control.
+ """
+ files.append((vars_file_src, data_context().content.integration_vars_path))
+
+ data_context().register_payload_callback(integration_config_callback)
+
+ if args.delegate:
+ raise Delegate(require=require, exclude=exclude, integration_targets=internal_targets)
+
+ install_command_requirements(args)
+
+ return internal_targets
+
+
+def command_integration_filtered(args, targets, all_targets, inventory_path, pre_target=None, post_target=None,
+ remote_temp_path=None):
+ """
+ :type args: IntegrationConfig
+ :type targets: tuple[IntegrationTarget]
+ :type all_targets: tuple[IntegrationTarget]
+ :type inventory_path: str
+ :type pre_target: (IntegrationTarget) -> None | None
+ :type post_target: (IntegrationTarget) -> None | None
+ :type remote_temp_path: str | None
+ """
+ found = False
+ passed = []
+ failed = []
+
+ targets_iter = iter(targets)
+ all_targets_dict = dict((target.name, target) for target in all_targets)
+
+ setup_errors = []
+ setup_targets_executed = set()
+
+ for target in all_targets:
+ for setup_target in target.setup_once + target.setup_always:
+ if setup_target not in all_targets_dict:
+ setup_errors.append('Target "%s" contains invalid setup target: %s' % (target.name, setup_target))
+
+ if setup_errors:
+ raise ApplicationError('Found %d invalid setup aliases:\n%s' % (len(setup_errors), '\n'.join(setup_errors)))
+
+ check_pyyaml(args, args.python_version)
+
+ test_dir = os.path.join(ResultType.TMP.path, 'output_dir')
+
+ if not args.explain and any('needs/ssh/' in target.aliases for target in targets):
+ max_tries = 20
+ display.info('SSH service required for tests. Checking to make sure we can connect.')
+ for i in range(1, max_tries + 1):
+ try:
+ run_command(args, ['ssh', '-o', 'BatchMode=yes', 'localhost', 'id'], capture=True)
+ display.info('SSH service responded.')
+ break
+ except SubprocessError:
+ if i == max_tries:
+ raise
+ seconds = 3
+ display.warning('SSH service not responding. Waiting %d second(s) before checking again.' % seconds)
+ time.sleep(seconds)
+
+ # Windows is different as Ansible execution is done locally but the host is remote
+ if args.inject_httptester and not isinstance(args, WindowsIntegrationConfig):
+ inject_httptester(args)
+
+ start_at_task = args.start_at_task
+
+ results = {}
+
+ current_environment = None # type: t.Optional[EnvironmentDescription]
+
+ # common temporary directory path that will be valid on both the controller and the remote
+ # it must be common because it will be referenced in environment variables that are shared across multiple hosts
+ common_temp_path = '/tmp/ansible-test-%s' % ''.join(random.choice(string.ascii_letters + string.digits) for _idx in range(8))
+
+ setup_common_temp_dir(args, common_temp_path)
+
+ try:
+ for target in targets_iter:
+ if args.start_at and not found:
+ found = target.name == args.start_at
+
+ if not found:
+ continue
+
+ if args.list_targets:
+ print(target.name)
+ continue
+
+ tries = 2 if args.retry_on_error else 1
+ verbosity = args.verbosity
+
+ cloud_environment = get_cloud_environment(args, target)
+
+ original_environment = current_environment if current_environment else EnvironmentDescription(args)
+ current_environment = None
+
+ display.info('>>> Environment Description\n%s' % original_environment, verbosity=3)
+
+ try:
+ while tries:
+ tries -= 1
+
+ try:
+ if cloud_environment:
+ cloud_environment.setup_once()
+
+ run_setup_targets(args, test_dir, target.setup_once, all_targets_dict, setup_targets_executed, inventory_path, common_temp_path, False)
+
+ start_time = time.time()
+
+ run_setup_targets(args, test_dir, target.setup_always, all_targets_dict, setup_targets_executed, inventory_path, common_temp_path, True)
+
+ if not args.explain:
+ # create a fresh test directory for each test target
+ remove_tree(test_dir)
+ make_dirs(test_dir)
+
+ if pre_target:
+ pre_target(target)
+
+ try:
+ if target.script_path:
+ command_integration_script(args, target, test_dir, inventory_path, common_temp_path,
+ remote_temp_path=remote_temp_path)
+ else:
+ command_integration_role(args, target, start_at_task, test_dir, inventory_path,
+ common_temp_path, remote_temp_path=remote_temp_path)
+ start_at_task = None
+ finally:
+ if post_target:
+ post_target(target)
+
+ end_time = time.time()
+
+ results[target.name] = dict(
+ name=target.name,
+ type=target.type,
+ aliases=target.aliases,
+ modules=target.modules,
+ run_time_seconds=int(end_time - start_time),
+ setup_once=target.setup_once,
+ setup_always=target.setup_always,
+ coverage=args.coverage,
+ coverage_label=args.coverage_label,
+ python_version=args.python_version,
+ )
+
+ break
+ except SubprocessError:
+ if cloud_environment:
+ cloud_environment.on_failure(target, tries)
+
+ if not original_environment.validate(target.name, throw=False):
+ raise
+
+ if not tries:
+ raise
+
+ display.warning('Retrying test target "%s" with maximum verbosity.' % target.name)
+ display.verbosity = args.verbosity = 6
+
+ start_time = time.time()
+ current_environment = EnvironmentDescription(args)
+ end_time = time.time()
+
+ EnvironmentDescription.check(original_environment, current_environment, target.name, throw=True)
+
+ results[target.name]['validation_seconds'] = int(end_time - start_time)
+
+ passed.append(target)
+ except Exception as ex:
+ failed.append(target)
+
+ if args.continue_on_error:
+ display.error(ex)
+ continue
+
+ display.notice('To resume at this test target, use the option: --start-at %s' % target.name)
+
+ next_target = next(targets_iter, None)
+
+ if next_target:
+ display.notice('To resume after this test target, use the option: --start-at %s' % next_target.name)
+
+ raise
+ finally:
+ display.verbosity = args.verbosity = verbosity
+
+ finally:
+ if not args.explain:
+ if args.coverage:
+ coverage_temp_path = os.path.join(common_temp_path, ResultType.COVERAGE.name)
+ coverage_save_path = ResultType.COVERAGE.path
+
+ for filename in os.listdir(coverage_temp_path):
+ shutil.copy(os.path.join(coverage_temp_path, filename), os.path.join(coverage_save_path, filename))
+
+ remove_tree(common_temp_path)
+
+ result_name = '%s-%s.json' % (
+ args.command, re.sub(r'[^0-9]', '-', str(datetime.datetime.utcnow().replace(microsecond=0))))
+
+ data = dict(
+ targets=results,
+ )
+
+ write_json_test_results(ResultType.DATA, result_name, data)
+
+ if failed:
+ raise ApplicationError('The %d integration test(s) listed below (out of %d) failed. See error output above for details:\n%s' % (
+ len(failed), len(passed) + len(failed), '\n'.join(target.name for target in failed)))
+
+
+def start_httptester(args):
+ """
+ :type args: EnvironmentConfig
+ :rtype: str, list[str]
+ """
+
+ # map ports from remote -> localhost -> container
+ # passing through localhost is only used when ansible-test is not already running inside a docker container
+ ports = [
+ dict(
+ remote=8080,
+ container=80,
+ ),
+ dict(
+ remote=8443,
+ container=443,
+ ),
+ ]
+
+ container_id = get_docker_container_id()
+
+ if not container_id:
+ for item in ports:
+ item['localhost'] = get_available_port()
+
+ docker_pull(args, args.httptester)
+
+ httptester_id = run_httptester(args, dict((port['localhost'], port['container']) for port in ports if 'localhost' in port))
+
+ if container_id:
+ container_host = get_docker_container_ip(args, httptester_id)
+ display.info('Found httptester container address: %s' % container_host, verbosity=1)
+ else:
+ container_host = get_docker_hostname()
+
+ ssh_options = []
+
+ for port in ports:
+ ssh_options += ['-R', '%d:%s:%d' % (port['remote'], container_host, port.get('localhost', port['container']))]
+
+ return httptester_id, ssh_options
+
+
+def run_httptester(args, ports=None):
+ """
+ :type args: EnvironmentConfig
+ :type ports: dict[int, int] | None
+ :rtype: str
+ """
+ options = [
+ '--detach',
+ ]
+
+ if ports:
+ for localhost_port, container_port in ports.items():
+ options += ['-p', '%d:%d' % (localhost_port, container_port)]
+
+ network = get_docker_preferred_network_name(args)
+
+ if is_docker_user_defined_network(network):
+ # network-scoped aliases are only supported for containers in user defined networks
+ for alias in HTTPTESTER_HOSTS:
+ options.extend(['--network-alias', alias])
+
+ httptester_id = docker_run(args, args.httptester, options=options)[0]
+
+ if args.explain:
+ httptester_id = 'httptester_id'
+ else:
+ httptester_id = httptester_id.strip()
+
+ return httptester_id
+
+
+def inject_httptester(args):
+ """
+ :type args: CommonConfig
+ """
+ comment = ' # ansible-test httptester\n'
+ append_lines = ['127.0.0.1 %s%s' % (host, comment) for host in HTTPTESTER_HOSTS]
+ hosts_path = '/etc/hosts'
+
+ original_lines = read_text_file(hosts_path).splitlines(True)
+
+ if not any(line.endswith(comment) for line in original_lines):
+ write_text_file(hosts_path, ''.join(original_lines + append_lines))
+
+ # determine which forwarding mechanism to use
+ pfctl = find_executable('pfctl', required=False)
+ iptables = find_executable('iptables', required=False)
+
+ if pfctl:
+ kldload = find_executable('kldload', required=False)
+
+ if kldload:
+ try:
+ run_command(args, ['kldload', 'pf'], capture=True)
+ except SubprocessError:
+ pass # already loaded
+
+ rules = '''
+rdr pass inet proto tcp from any to any port 80 -> 127.0.0.1 port 8080
+rdr pass inet proto tcp from any to any port 443 -> 127.0.0.1 port 8443
+'''
+ cmd = ['pfctl', '-ef', '-']
+
+ try:
+ run_command(args, cmd, capture=True, data=rules)
+ except SubprocessError:
+ pass # non-zero exit status on success
+
+ elif iptables:
+ ports = [
+ (80, 8080),
+ (443, 8443),
+ ]
+
+ for src, dst in ports:
+ rule = ['-o', 'lo', '-p', 'tcp', '--dport', str(src), '-j', 'REDIRECT', '--to-port', str(dst)]
+
+ try:
+ # check for existing rule
+ cmd = ['iptables', '-t', 'nat', '-C', 'OUTPUT'] + rule
+ run_command(args, cmd, capture=True)
+ except SubprocessError:
+ # append rule when it does not exist
+ cmd = ['iptables', '-t', 'nat', '-A', 'OUTPUT'] + rule
+ run_command(args, cmd, capture=True)
+ else:
+ raise ApplicationError('No supported port forwarding mechanism detected.')
+
+
+def run_setup_targets(args, test_dir, target_names, targets_dict, targets_executed, inventory_path, temp_path, always):
+ """
+ :type args: IntegrationConfig
+ :type test_dir: str
+ :type target_names: list[str]
+ :type targets_dict: dict[str, IntegrationTarget]
+ :type targets_executed: set[str]
+ :type inventory_path: str
+ :type temp_path: str
+ :type always: bool
+ """
+ for target_name in target_names:
+ if not always and target_name in targets_executed:
+ continue
+
+ target = targets_dict[target_name]
+
+ if not args.explain:
+ # create a fresh test directory for each test target
+ remove_tree(test_dir)
+ make_dirs(test_dir)
+
+ if target.script_path:
+ command_integration_script(args, target, test_dir, inventory_path, temp_path)
+ else:
+ command_integration_role(args, target, None, test_dir, inventory_path, temp_path)
+
+ targets_executed.add(target_name)
+
+
+def integration_environment(args, target, test_dir, inventory_path, ansible_config, env_config):
+ """
+ :type args: IntegrationConfig
+ :type target: IntegrationTarget
+ :type test_dir: str
+ :type inventory_path: str
+ :type ansible_config: str | None
+ :type env_config: CloudEnvironmentConfig | None
+ :rtype: dict[str, str]
+ """
+ env = ansible_environment(args, ansible_config=ansible_config)
+
+ if args.inject_httptester:
+ env.update(dict(
+ HTTPTESTER='1',
+ ))
+
+ callback_plugins = ['junit'] + (env_config.callback_plugins or [] if env_config else [])
+
+ integration = dict(
+ JUNIT_OUTPUT_DIR=ResultType.JUNIT.path,
+ ANSIBLE_CALLBACK_WHITELIST=','.join(sorted(set(callback_plugins))),
+ ANSIBLE_TEST_CI=args.metadata.ci_provider or get_ci_provider().code,
+ ANSIBLE_TEST_COVERAGE='check' if args.coverage_check else ('yes' if args.coverage else ''),
+ OUTPUT_DIR=test_dir,
+ INVENTORY_PATH=os.path.abspath(inventory_path),
+ )
+
+ if args.debug_strategy:
+ env.update(dict(ANSIBLE_STRATEGY='debug'))
+
+ if 'non_local/' in target.aliases:
+ if args.coverage:
+ display.warning('Skipping coverage reporting on Ansible modules for non-local test: %s' % target.name)
+
+ env.update(dict(ANSIBLE_TEST_REMOTE_INTERPRETER=''))
+
+ env.update(integration)
+
+ return env
+
+
+def command_integration_script(args, target, test_dir, inventory_path, temp_path, remote_temp_path=None):
+ """
+ :type args: IntegrationConfig
+ :type target: IntegrationTarget
+ :type test_dir: str
+ :type inventory_path: str
+ :type temp_path: str
+ :type remote_temp_path: str | None
+ """
+ display.info('Running %s integration test script' % target.name)
+
+ env_config = None
+
+ if isinstance(args, PosixIntegrationConfig):
+ cloud_environment = get_cloud_environment(args, target)
+
+ if cloud_environment:
+ env_config = cloud_environment.get_environment_config()
+
+ with integration_test_environment(args, target, inventory_path) as test_env:
+ cmd = ['./%s' % os.path.basename(target.script_path)]
+
+ if args.verbosity:
+ cmd.append('-' + ('v' * args.verbosity))
+
+ env = integration_environment(args, target, test_dir, test_env.inventory_path, test_env.ansible_config, env_config)
+ cwd = os.path.join(test_env.targets_dir, target.relative_path)
+
+ env.update(dict(
+ # support use of adhoc ansible commands in collections without specifying the fully qualified collection name
+ ANSIBLE_PLAYBOOK_DIR=cwd,
+ ))
+
+ if env_config and env_config.env_vars:
+ env.update(env_config.env_vars)
+
+ with integration_test_config_file(args, env_config, test_env.integration_dir) as config_path:
+ if config_path:
+ cmd += ['-e', '@%s' % config_path]
+
+ module_coverage = 'non_local/' not in target.aliases
+ intercept_command(args, cmd, target_name=target.name, env=env, cwd=cwd, temp_path=temp_path,
+ remote_temp_path=remote_temp_path, module_coverage=module_coverage)
+
+
+def command_integration_role(args, target, start_at_task, test_dir, inventory_path, temp_path, remote_temp_path=None):
+ """
+ :type args: IntegrationConfig
+ :type target: IntegrationTarget
+ :type start_at_task: str | None
+ :type test_dir: str
+ :type inventory_path: str
+ :type temp_path: str
+ :type remote_temp_path: str | None
+ """
+ display.info('Running %s integration test role' % target.name)
+
+ env_config = None
+
+ vars_files = []
+ variables = dict(
+ output_dir=test_dir,
+ )
+
+ if isinstance(args, WindowsIntegrationConfig):
+ hosts = 'windows'
+ gather_facts = False
+ variables.update(dict(
+ win_output_dir=r'C:\ansible_testing',
+ ))
+ elif isinstance(args, NetworkIntegrationConfig):
+ hosts = target.network_platform
+ gather_facts = False
+ else:
+ hosts = 'testhost'
+ gather_facts = True
+
+ cloud_environment = get_cloud_environment(args, target)
+
+ if cloud_environment:
+ env_config = cloud_environment.get_environment_config()
+
+ with integration_test_environment(args, target, inventory_path) as test_env:
+ if os.path.exists(test_env.vars_file):
+ vars_files.append(os.path.relpath(test_env.vars_file, test_env.integration_dir))
+
+ play = dict(
+ hosts=hosts,
+ gather_facts=gather_facts,
+ vars_files=vars_files,
+ vars=variables,
+ roles=[
+ target.name,
+ ],
+ )
+
+ if env_config:
+ if env_config.ansible_vars:
+ variables.update(env_config.ansible_vars)
+
+ play.update(dict(
+ environment=env_config.env_vars,
+ module_defaults=env_config.module_defaults,
+ ))
+
+ playbook = json.dumps([play], indent=4, sort_keys=True)
+
+ with named_temporary_file(args=args, directory=test_env.integration_dir, prefix='%s-' % target.name, suffix='.yml', content=playbook) as playbook_path:
+ filename = os.path.basename(playbook_path)
+
+ display.info('>>> Playbook: %s\n%s' % (filename, playbook.strip()), verbosity=3)
+
+ cmd = ['ansible-playbook', filename, '-i', os.path.relpath(test_env.inventory_path, test_env.integration_dir)]
+
+ if start_at_task:
+ cmd += ['--start-at-task', start_at_task]
+
+ if args.tags:
+ cmd += ['--tags', args.tags]
+
+ if args.skip_tags:
+ cmd += ['--skip-tags', args.skip_tags]
+
+ if args.diff:
+ cmd += ['--diff']
+
+ if isinstance(args, NetworkIntegrationConfig):
+ if args.testcase:
+ cmd += ['-e', 'testcase=%s' % args.testcase]
+
+ if args.verbosity:
+ cmd.append('-' + ('v' * args.verbosity))
+
+ env = integration_environment(args, target, test_dir, test_env.inventory_path, test_env.ansible_config, env_config)
+ cwd = test_env.integration_dir
+
+ env.update(dict(
+ # support use of adhoc ansible commands in collections without specifying the fully qualified collection name
+ ANSIBLE_PLAYBOOK_DIR=cwd,
+ ))
+
+ env['ANSIBLE_ROLES_PATH'] = test_env.targets_dir
+
+ module_coverage = 'non_local/' not in target.aliases
+ intercept_command(args, cmd, target_name=target.name, env=env, cwd=cwd, temp_path=temp_path,
+ remote_temp_path=remote_temp_path, module_coverage=module_coverage)
+
+
+def get_changes_filter(args):
+ """
+ :type args: TestConfig
+ :rtype: list[str]
+ """
+ paths = detect_changes(args)
+
+ if not args.metadata.change_description:
+ if paths:
+ changes = categorize_changes(args, paths, args.command)
+ else:
+ changes = ChangeDescription()
+
+ args.metadata.change_description = changes
+
+ if paths is None:
+ return [] # change detection not enabled, do not filter targets
+
+ if not paths:
+ raise NoChangesDetected()
+
+ if args.metadata.change_description.targets is None:
+ raise NoTestsForChanges()
+
+ return args.metadata.change_description.targets
+
+
+def detect_changes(args):
+ """
+ :type args: TestConfig
+ :rtype: list[str] | None
+ """
+ if args.changed:
+ paths = get_ci_provider().detect_changes(args)
+ elif args.changed_from or args.changed_path:
+ paths = args.changed_path or []
+ if args.changed_from:
+ paths += read_text_file(args.changed_from).splitlines()
+ else:
+ return None # change detection not enabled
+
+ if paths is None:
+ return None # act as though change detection not enabled, do not filter targets
+
+ display.info('Detected changes in %d file(s).' % len(paths))
+
+ for path in paths:
+ display.info(path, verbosity=1)
+
+ return paths
+
+
+def get_integration_filter(args, targets):
+ """
+ :type args: IntegrationConfig
+ :type targets: tuple[IntegrationTarget]
+ :rtype: list[str]
+ """
+ if args.docker:
+ return get_integration_docker_filter(args, targets)
+
+ if args.remote:
+ return get_integration_remote_filter(args, targets)
+
+ return get_integration_local_filter(args, targets)
+
+
+def common_integration_filter(args, targets, exclude):
+ """
+ :type args: IntegrationConfig
+ :type targets: tuple[IntegrationTarget]
+ :type exclude: list[str]
+ """
+ override_disabled = set(target for target in args.include if target.startswith('disabled/'))
+
+ if not args.allow_disabled:
+ skip = 'disabled/'
+ override = [target.name for target in targets if override_disabled & set(target.aliases)]
+ skipped = [target.name for target in targets if skip in target.aliases and target.name not in override]
+ if skipped:
+ exclude.extend(skipped)
+ display.warning('Excluding tests marked "%s" which require --allow-disabled or prefixing with "disabled/": %s'
+ % (skip.rstrip('/'), ', '.join(skipped)))
+
+ override_unsupported = set(target for target in args.include if target.startswith('unsupported/'))
+
+ if not args.allow_unsupported:
+ skip = 'unsupported/'
+ override = [target.name for target in targets if override_unsupported & set(target.aliases)]
+ skipped = [target.name for target in targets if skip in target.aliases and target.name not in override]
+ if skipped:
+ exclude.extend(skipped)
+ display.warning('Excluding tests marked "%s" which require --allow-unsupported or prefixing with "unsupported/": %s'
+ % (skip.rstrip('/'), ', '.join(skipped)))
+
+ override_unstable = set(target for target in args.include if target.startswith('unstable/'))
+
+ if args.allow_unstable_changed:
+ override_unstable |= set(args.metadata.change_description.focused_targets or [])
+
+ if not args.allow_unstable:
+ skip = 'unstable/'
+ override = [target.name for target in targets if override_unstable & set(target.aliases)]
+ skipped = [target.name for target in targets if skip in target.aliases and target.name not in override]
+ if skipped:
+ exclude.extend(skipped)
+ display.warning('Excluding tests marked "%s" which require --allow-unstable or prefixing with "unstable/": %s'
+ % (skip.rstrip('/'), ', '.join(skipped)))
+
+ # only skip a Windows test if using --windows and all the --windows versions are defined in the aliases as skip/windows/%s
+ if isinstance(args, WindowsIntegrationConfig) and args.windows:
+ all_skipped = []
+ not_skipped = []
+
+ for target in targets:
+ if "skip/windows/" not in target.aliases:
+ continue
+
+ skip_valid = []
+ skip_missing = []
+ for version in args.windows:
+ if "skip/windows/%s/" % version in target.aliases:
+ skip_valid.append(version)
+ else:
+ skip_missing.append(version)
+
+ if skip_missing and skip_valid:
+ not_skipped.append((target.name, skip_valid, skip_missing))
+ elif skip_valid:
+ all_skipped.append(target.name)
+
+ if all_skipped:
+ exclude.extend(all_skipped)
+ skip_aliases = ["skip/windows/%s/" % w for w in args.windows]
+ display.warning('Excluding tests marked "%s" which are set to skip with --windows %s: %s'
+ % ('", "'.join(skip_aliases), ', '.join(args.windows), ', '.join(all_skipped)))
+
+ if not_skipped:
+ for target, skip_valid, skip_missing in not_skipped:
+ # warn when failing to skip due to lack of support for skipping only some versions
+ display.warning('Including test "%s" which was marked to skip for --windows %s but not %s.'
+ % (target, ', '.join(skip_valid), ', '.join(skip_missing)))
+
+
+def get_integration_local_filter(args, targets):
+ """
+ :type args: IntegrationConfig
+ :type targets: tuple[IntegrationTarget]
+ :rtype: list[str]
+ """
+ exclude = []
+
+ common_integration_filter(args, targets, exclude)
+
+ if not args.allow_root and os.getuid() != 0:
+ skip = 'needs/root/'
+ skipped = [target.name for target in targets if skip in target.aliases]
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which require --allow-root or running as root: %s'
+ % (skip.rstrip('/'), ', '.join(skipped)))
+
+ override_destructive = set(target for target in args.include if target.startswith('destructive/'))
+
+ if not args.allow_destructive:
+ skip = 'destructive/'
+ override = [target.name for target in targets if override_destructive & set(target.aliases)]
+ skipped = [target.name for target in targets if skip in target.aliases and target.name not in override]
+ if skipped:
+ exclude.extend(skipped)
+ display.warning('Excluding tests marked "%s" which require --allow-destructive or prefixing with "destructive/" to run locally: %s'
+ % (skip.rstrip('/'), ', '.join(skipped)))
+
+ exclude_targets_by_python_version(targets, args.python_version, exclude)
+
+ return exclude
+
+
+def get_integration_docker_filter(args, targets):
+ """
+ :type args: IntegrationConfig
+ :type targets: tuple[IntegrationTarget]
+ :rtype: list[str]
+ """
+ exclude = []
+
+ common_integration_filter(args, targets, exclude)
+
+ skip = 'skip/docker/'
+ skipped = [target.name for target in targets if skip in target.aliases]
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which cannot run under docker: %s'
+ % (skip.rstrip('/'), ', '.join(skipped)))
+
+ if not args.docker_privileged:
+ skip = 'needs/privileged/'
+ skipped = [target.name for target in targets if skip in target.aliases]
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which require --docker-privileged to run under docker: %s'
+ % (skip.rstrip('/'), ', '.join(skipped)))
+
+ python_version = get_python_version(args, get_docker_completion(), args.docker_raw)
+
+ exclude_targets_by_python_version(targets, python_version, exclude)
+
+ return exclude
+
+
+def get_integration_remote_filter(args, targets):
+ """
+ :type args: IntegrationConfig
+ :type targets: tuple[IntegrationTarget]
+ :rtype: list[str]
+ """
+ remote = args.parsed_remote
+
+ exclude = []
+
+ common_integration_filter(args, targets, exclude)
+
+ skips = {
+ 'skip/%s' % remote.platform: remote.platform,
+ 'skip/%s/%s' % (remote.platform, remote.version): '%s %s' % (remote.platform, remote.version),
+ 'skip/%s%s' % (remote.platform, remote.version): '%s %s' % (remote.platform, remote.version), # legacy syntax, use above format
+ }
+
+ if remote.arch:
+ skips.update({
+ 'skip/%s/%s' % (remote.arch, remote.platform): '%s on %s' % (remote.platform, remote.arch),
+ 'skip/%s/%s/%s' % (remote.arch, remote.platform, remote.version): '%s %s on %s' % (remote.platform, remote.version, remote.arch),
+ })
+
+ for skip, description in skips.items():
+ skipped = [target.name for target in targets if skip in target.skips]
+ if skipped:
+ exclude.append(skip + '/')
+ display.warning('Excluding tests marked "%s" which are not supported on %s: %s' % (skip, description, ', '.join(skipped)))
+
+ python_version = get_python_version(args, get_remote_completion(), args.remote)
+
+ exclude_targets_by_python_version(targets, python_version, exclude)
+
+ return exclude
+
+
+def exclude_targets_by_python_version(targets, python_version, exclude):
+ """
+ :type targets: tuple[IntegrationTarget]
+ :type python_version: str
+ :type exclude: list[str]
+ """
+ if not python_version:
+ display.warning('Python version unknown. Unable to skip tests based on Python version.')
+ return
+
+ python_major_version = python_version.split('.')[0]
+
+ skip = 'skip/python%s/' % python_version
+ skipped = [target.name for target in targets if skip in target.aliases]
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which are not supported on python %s: %s'
+ % (skip.rstrip('/'), python_version, ', '.join(skipped)))
+
+ skip = 'skip/python%s/' % python_major_version
+ skipped = [target.name for target in targets if skip in target.aliases]
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which are not supported on python %s: %s'
+ % (skip.rstrip('/'), python_version, ', '.join(skipped)))
+
+
+def get_python_version(args, configs, name):
+ """
+ :type args: EnvironmentConfig
+ :type configs: dict[str, dict[str, str]]
+ :type name: str
+ """
+ config = configs.get(name, {})
+ config_python = config.get('python')
+
+ if not config or not config_python:
+ if args.python:
+ return args.python
+
+ display.warning('No Python version specified. '
+ 'Use completion config or the --python option to specify one.', unique=True)
+
+ return '' # failure to provide a version may result in failures or reduced functionality later
+
+ supported_python_versions = config_python.split(',')
+ default_python_version = supported_python_versions[0]
+
+ if args.python and args.python not in supported_python_versions:
+ raise ApplicationError('Python %s is not supported by %s. Supported Python version(s) are: %s' % (
+ args.python, name, ', '.join(sorted(supported_python_versions))))
+
+ python_version = args.python or default_python_version
+
+ return python_version
+
+
+def get_python_interpreter(args, configs, name):
+ """
+ :type args: EnvironmentConfig
+ :type configs: dict[str, dict[str, str]]
+ :type name: str
+ """
+ if args.python_interpreter:
+ return args.python_interpreter
+
+ config = configs.get(name, {})
+
+ if not config:
+ if args.python:
+ guess = 'python%s' % args.python
+ else:
+ guess = 'python'
+
+ display.warning('Using "%s" as the Python interpreter. '
+ 'Use completion config or the --python-interpreter option to specify the path.' % guess, unique=True)
+
+ return guess
+
+ python_version = get_python_version(args, configs, name)
+
+ python_dir = config.get('python_dir', '/usr/bin')
+ python_interpreter = os.path.join(python_dir, 'python%s' % python_version)
+ python_interpreter = config.get('python%s' % python_version, python_interpreter)
+
+ return python_interpreter
+
+
+class EnvironmentDescription:
+ """Description of current running environment."""
+ def __init__(self, args):
+ """Initialize snapshot of environment configuration.
+ :type args: IntegrationConfig
+ """
+ self.args = args
+
+ if self.args.explain:
+ self.data = {}
+ return
+
+ warnings = []
+
+ versions = ['']
+ versions += SUPPORTED_PYTHON_VERSIONS
+ versions += list(set(v.split('.')[0] for v in SUPPORTED_PYTHON_VERSIONS))
+
+ version_check = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'versions.py')
+ python_paths = dict((v, find_executable('python%s' % v, required=False)) for v in sorted(versions))
+ pip_paths = dict((v, find_executable('pip%s' % v, required=False)) for v in sorted(versions))
+ program_versions = dict((v, self.get_version([python_paths[v], version_check], warnings)) for v in sorted(python_paths) if python_paths[v])
+ pip_interpreters = dict((v, self.get_shebang(pip_paths[v])) for v in sorted(pip_paths) if pip_paths[v])
+ known_hosts_hash = self.get_hash(os.path.expanduser('~/.ssh/known_hosts'))
+
+ for version in sorted(versions):
+ self.check_python_pip_association(version, python_paths, pip_paths, pip_interpreters, warnings)
+
+ for warning in warnings:
+ display.warning(warning, unique=True)
+
+ self.data = dict(
+ python_paths=python_paths,
+ pip_paths=pip_paths,
+ program_versions=program_versions,
+ pip_interpreters=pip_interpreters,
+ known_hosts_hash=known_hosts_hash,
+ warnings=warnings,
+ )
+
+ @staticmethod
+ def check_python_pip_association(version, python_paths, pip_paths, pip_interpreters, warnings):
+ """
+ :type version: str
+ :param python_paths: dict[str, str]
+ :param pip_paths: dict[str, str]
+ :param pip_interpreters: dict[str, str]
+ :param warnings: list[str]
+ """
+ python_label = 'Python%s' % (' %s' % version if version else '')
+
+ pip_path = pip_paths.get(version)
+ python_path = python_paths.get(version)
+
+ if not python_path and not pip_path:
+ # neither python or pip is present for this version
+ return
+
+ if not python_path:
+ warnings.append('A %s interpreter was not found, yet a matching pip was found at "%s".' % (python_label, pip_path))
+ return
+
+ if not pip_path:
+ warnings.append('A %s interpreter was found at "%s", yet a matching pip was not found.' % (python_label, python_path))
+ return
+
+ pip_shebang = pip_interpreters.get(version)
+
+ match = re.search(r'#!\s*(?P<command>[^\s]+)', pip_shebang)
+
+ if not match:
+ warnings.append('A %s pip was found at "%s", but it does not have a valid shebang: %s' % (python_label, pip_path, pip_shebang))
+ return
+
+ pip_interpreter = os.path.realpath(match.group('command'))
+ python_interpreter = os.path.realpath(python_path)
+
+ if pip_interpreter == python_interpreter:
+ return
+
+ try:
+ identical = filecmp.cmp(pip_interpreter, python_interpreter)
+ except OSError:
+ identical = False
+
+ if identical:
+ return
+
+ warnings.append('A %s pip was found at "%s", but it uses interpreter "%s" instead of "%s".' % (
+ python_label, pip_path, pip_interpreter, python_interpreter))
+
+ def __str__(self):
+ """
+ :rtype: str
+ """
+ return json.dumps(self.data, sort_keys=True, indent=4)
+
+ def validate(self, target_name, throw):
+ """
+ :type target_name: str
+ :type throw: bool
+ :rtype: bool
+ """
+ current = EnvironmentDescription(self.args)
+
+ return self.check(self, current, target_name, throw)
+
+ @staticmethod
+ def check(original, current, target_name, throw):
+ """
+ :type original: EnvironmentDescription
+ :type current: EnvironmentDescription
+ :type target_name: str
+ :type throw: bool
+ :rtype: bool
+ """
+ original_json = str(original)
+ current_json = str(current)
+
+ if original_json == current_json:
+ return True
+
+ unified_diff = '\n'.join(difflib.unified_diff(
+ a=original_json.splitlines(),
+ b=current_json.splitlines(),
+ fromfile='original.json',
+ tofile='current.json',
+ lineterm='',
+ ))
+
+ message = ('Test target "%s" has changed the test environment!\n'
+ 'If these changes are necessary, they must be reverted before the test finishes.\n'
+ '>>> Original Environment\n'
+ '%s\n'
+ '>>> Current Environment\n'
+ '%s\n'
+ '>>> Environment Diff\n'
+ '%s'
+ % (target_name, original_json, current_json, unified_diff))
+
+ if throw:
+ raise ApplicationError(message)
+
+ display.error(message)
+
+ return False
+
+ @staticmethod
+ def get_version(command, warnings):
+ """
+ :type command: list[str]
+ :type warnings: list[text]
+ :rtype: list[str]
+ """
+ try:
+ stdout, stderr = raw_command(command, capture=True, cmd_verbosity=2)
+ except SubprocessError as ex:
+ warnings.append(u'%s' % ex)
+ return None # all failures are equal, we don't care why it failed, only that it did
+
+ return [line.strip() for line in ((stdout or '').strip() + (stderr or '').strip()).splitlines()]
+
+ @staticmethod
+ def get_shebang(path):
+ """
+ :type path: str
+ :rtype: str
+ """
+ with open_text_file(path) as script_fd:
+ return script_fd.readline().strip()
+
+ @staticmethod
+ def get_hash(path):
+ """
+ :type path: str
+ :rtype: str | None
+ """
+ if not os.path.exists(path):
+ return None
+
+ file_hash = hashlib.md5()
+
+ file_hash.update(read_binary_file(path))
+
+ return file_hash.hexdigest()
+
+
+class NoChangesDetected(ApplicationWarning):
+ """Exception when change detection was performed, but no changes were found."""
+ def __init__(self):
+ super(NoChangesDetected, self).__init__('No changes detected.')
+
+
+class NoTestsForChanges(ApplicationWarning):
+ """Exception when changes detected, but no tests trigger as a result."""
+ def __init__(self):
+ super(NoTestsForChanges, self).__init__('No tests found for detected changes.')
+
+
+class Delegate(Exception):
+ """Trigger command delegation."""
+ def __init__(self, exclude=None, require=None, integration_targets=None):
+ """
+ :type exclude: list[str] | None
+ :type require: list[str] | None
+ :type integration_targets: tuple[IntegrationTarget] | None
+ """
+ super(Delegate, self).__init__()
+
+ self.exclude = exclude or []
+ self.require = require or []
+ self.integration_targets = integration_targets or tuple()
+
+
+class AllTargetsSkipped(ApplicationWarning):
+ """All targets skipped."""
+ def __init__(self):
+ super(AllTargetsSkipped, self).__init__('All targets skipped.')
diff --git a/test/lib/ansible_test/_internal/git.py b/test/lib/ansible_test/_internal/git.py
new file mode 100644
index 00000000..acc39f3f
--- /dev/null
+++ b/test/lib/ansible_test/_internal/git.py
@@ -0,0 +1,137 @@
+"""Wrapper around git command-line tools."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from . import types as t
+
+from .util import (
+ SubprocessError,
+ raw_command,
+)
+
+
+class Git:
+ """Wrapper around git command-line tools."""
+ def __init__(self, root=None): # type: (t.Optional[str]) -> None
+ self.git = 'git'
+ self.root = root
+
+ def get_diff(self, args, git_options=None):
+ """
+ :type args: list[str]
+ :type git_options: list[str] | None
+ :rtype: list[str]
+ """
+ cmd = ['diff'] + args
+ if git_options is None:
+ git_options = ['-c', 'core.quotePath=']
+ return self.run_git_split(git_options + cmd, '\n', str_errors='replace')
+
+ def get_diff_names(self, args):
+ """
+ :type args: list[str]
+ :rtype: list[str]
+ """
+ cmd = ['diff', '--name-only', '--no-renames', '-z'] + args
+ return self.run_git_split(cmd, '\0')
+
+ def get_submodule_paths(self): # type: () -> t.List[str]
+ """Return a list of submodule paths recursively."""
+ cmd = ['submodule', 'status', '--recursive']
+ output = self.run_git_split(cmd, '\n')
+ submodule_paths = [re.search(r'^.[0-9a-f]+ (?P<path>[^ ]+)', line).group('path') for line in output]
+
+ # status is returned for all submodules in the current git repository relative to the current directory
+ # when the current directory is not the root of the git repository this can yield relative paths which are not below the current directory
+ # this can occur when multiple collections are in a git repo and some collections are submodules when others are not
+ # specifying "." as the path to enumerate would limit results to the current directory, but can cause the git command to fail with the error:
+ # error: pathspec '.' did not match any file(s) known to git
+ # this can occur when the current directory contains no files tracked by git
+ # instead we'll filter out the relative paths, since we're only interested in those at or below the current directory
+ submodule_paths = [path for path in submodule_paths if not path.startswith('../')]
+
+ return submodule_paths
+
+ def get_file_names(self, args):
+ """
+ :type args: list[str]
+ :rtype: list[str]
+ """
+ cmd = ['ls-files', '-z'] + args
+ return self.run_git_split(cmd, '\0')
+
+ def get_branches(self):
+ """
+ :rtype: list[str]
+ """
+ cmd = ['for-each-ref', 'refs/heads/', '--format', '%(refname:strip=2)']
+ return self.run_git_split(cmd)
+
+ def get_branch(self):
+ """
+ :rtype: str
+ """
+ cmd = ['symbolic-ref', '--short', 'HEAD']
+ return self.run_git(cmd).strip()
+
+ def get_rev_list(self, commits=None, max_count=None):
+ """
+ :type commits: list[str] | None
+ :type max_count: int | None
+ :rtype: list[str]
+ """
+ cmd = ['rev-list']
+
+ if commits:
+ cmd += commits
+ else:
+ cmd += ['HEAD']
+
+ if max_count:
+ cmd += ['--max-count', '%s' % max_count]
+
+ return self.run_git_split(cmd)
+
+ def get_branch_fork_point(self, branch):
+ """
+ :type branch: str
+ :rtype: str
+ """
+ cmd = ['merge-base', '--fork-point', branch]
+ return self.run_git(cmd).strip()
+
+ def is_valid_ref(self, ref):
+ """
+ :type ref: str
+ :rtype: bool
+ """
+ cmd = ['show', ref]
+ try:
+ self.run_git(cmd, str_errors='replace')
+ return True
+ except SubprocessError:
+ return False
+
+ def run_git_split(self, cmd, separator=None, str_errors='strict'):
+ """
+ :type cmd: list[str]
+ :type separator: str | None
+ :type str_errors: str
+ :rtype: list[str]
+ """
+ output = self.run_git(cmd, str_errors=str_errors).strip(separator)
+
+ if not output:
+ return []
+
+ return output.split(separator)
+
+ def run_git(self, cmd, str_errors='strict'):
+ """
+ :type cmd: list[str]
+ :type str_errors: str
+ :rtype: str
+ """
+ return raw_command([self.git] + cmd, cwd=self.root, capture=True, str_errors=str_errors)[0]
diff --git a/test/lib/ansible_test/_internal/http.py b/test/lib/ansible_test/_internal/http.py
new file mode 100644
index 00000000..6607a10b
--- /dev/null
+++ b/test/lib/ansible_test/_internal/http.py
@@ -0,0 +1,181 @@
+"""
+Primitive replacement for requests to avoid extra dependency.
+Avoids use of urllib2 due to lack of SNI support.
+"""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import time
+
+try:
+ from urllib import urlencode
+except ImportError:
+ # noinspection PyCompatibility, PyUnresolvedReferences
+ from urllib.parse import urlencode # pylint: disable=locally-disabled, import-error, no-name-in-module
+
+try:
+ # noinspection PyCompatibility
+ from urlparse import urlparse, urlunparse, parse_qs
+except ImportError:
+ # noinspection PyCompatibility, PyUnresolvedReferences
+ from urllib.parse import urlparse, urlunparse, parse_qs # pylint: disable=locally-disabled, ungrouped-imports
+
+from .util import (
+ ApplicationError,
+ SubprocessError,
+ display,
+)
+
+from .util_common import (
+ CommonConfig,
+ run_command,
+)
+
+
+class HttpClient:
+ """Make HTTP requests via curl."""
+ def __init__(self, args, always=False, insecure=False, proxy=None):
+ """
+ :type args: CommonConfig
+ :type always: bool
+ :type insecure: bool
+ """
+ self.args = args
+ self.always = always
+ self.insecure = insecure
+ self.proxy = proxy
+
+ self.username = None
+ self.password = None
+
+ def get(self, url):
+ """
+ :type url: str
+ :rtype: HttpResponse
+ """
+ return self.request('GET', url)
+
+ def delete(self, url):
+ """
+ :type url: str
+ :rtype: HttpResponse
+ """
+ return self.request('DELETE', url)
+
+ def put(self, url, data=None, headers=None):
+ """
+ :type url: str
+ :type data: str | None
+ :type headers: dict[str, str] | None
+ :rtype: HttpResponse
+ """
+ return self.request('PUT', url, data, headers)
+
+ def request(self, method, url, data=None, headers=None):
+ """
+ :type method: str
+ :type url: str
+ :type data: str | None
+ :type headers: dict[str, str] | None
+ :rtype: HttpResponse
+ """
+ cmd = ['curl', '-s', '-S', '-i', '-X', method]
+
+ if self.insecure:
+ cmd += ['--insecure']
+
+ if headers is None:
+ headers = {}
+
+ headers['Expect'] = '' # don't send expect continue header
+
+ if self.username:
+ if self.password:
+ display.sensitive.add(self.password)
+ cmd += ['-u', '%s:%s' % (self.username, self.password)]
+ else:
+ cmd += ['-u', self.username]
+
+ for header in headers.keys():
+ cmd += ['-H', '%s: %s' % (header, headers[header])]
+
+ if data is not None:
+ cmd += ['-d', data]
+
+ if self.proxy:
+ cmd += ['-x', self.proxy]
+
+ cmd += [url]
+
+ attempts = 0
+ max_attempts = 3
+ sleep_seconds = 3
+
+ # curl error codes which are safe to retry (request never sent to server)
+ retry_on_status = (
+ 6, # CURLE_COULDNT_RESOLVE_HOST
+ )
+
+ stdout = ''
+
+ while True:
+ attempts += 1
+
+ try:
+ stdout = run_command(self.args, cmd, capture=True, always=self.always, cmd_verbosity=2)[0]
+ break
+ except SubprocessError as ex:
+ if ex.status in retry_on_status and attempts < max_attempts:
+ display.warning(u'%s' % ex)
+ time.sleep(sleep_seconds)
+ continue
+
+ raise
+
+ if self.args.explain and not self.always:
+ return HttpResponse(method, url, 200, '')
+
+ header, body = stdout.split('\r\n\r\n', 1)
+
+ response_headers = header.split('\r\n')
+ first_line = response_headers[0]
+ http_response = first_line.split(' ')
+ status_code = int(http_response[1])
+
+ return HttpResponse(method, url, status_code, body)
+
+
+class HttpResponse:
+ """HTTP response from curl."""
+ def __init__(self, method, url, status_code, response):
+ """
+ :type method: str
+ :type url: str
+ :type status_code: int
+ :type response: str
+ """
+ self.method = method
+ self.url = url
+ self.status_code = status_code
+ self.response = response
+
+ def json(self):
+ """
+ :rtype: any
+ """
+ try:
+ return json.loads(self.response)
+ except ValueError:
+ raise HttpError(self.status_code, 'Cannot parse response to %s %s as JSON:\n%s' % (self.method, self.url, self.response))
+
+
+class HttpError(ApplicationError):
+ """HTTP response as an error."""
+ def __init__(self, status, message):
+ """
+ :type status: int
+ :type message: str
+ """
+ super(HttpError, self).__init__('%s: %s' % (status, message))
+ self.status = status
diff --git a/test/lib/ansible_test/_internal/import_analysis.py b/test/lib/ansible_test/_internal/import_analysis.py
new file mode 100644
index 00000000..9cc5376f
--- /dev/null
+++ b/test/lib/ansible_test/_internal/import_analysis.py
@@ -0,0 +1,362 @@
+"""Analyze python import statements."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ast
+import os
+import re
+
+from . import types as t
+
+from .io import (
+ read_binary_file,
+)
+
+from .util import (
+ display,
+ ApplicationError,
+ is_subdir,
+)
+
+from .data import (
+ data_context,
+)
+
+VIRTUAL_PACKAGES = set([
+ 'ansible.module_utils.six',
+])
+
+
+def get_python_module_utils_imports(compile_targets):
+ """Return a dictionary of module_utils names mapped to sets of python file paths.
+ :type compile_targets: list[TestTarget]
+ :rtype: dict[str, set[str]]
+ """
+
+ module_utils = enumerate_module_utils()
+
+ virtual_utils = set(m for m in module_utils if any(m.startswith('%s.' % v) for v in VIRTUAL_PACKAGES))
+ module_utils -= virtual_utils
+
+ imports_by_target_path = {}
+
+ for target in compile_targets:
+ imports_by_target_path[target.path] = extract_python_module_utils_imports(target.path, module_utils)
+
+ def recurse_import(import_name, depth=0, seen=None): # type: (str, int, t.Optional[t.Set[str]]) -> t.Set[str]
+ """Recursively expand module_utils imports from module_utils files."""
+ display.info('module_utils import: %s%s' % (' ' * depth, import_name), verbosity=4)
+
+ if seen is None:
+ seen = set([import_name])
+
+ results = set([import_name])
+
+ # virtual packages depend on the modules they contain instead of the reverse
+ if import_name in VIRTUAL_PACKAGES:
+ for sub_import in sorted(virtual_utils):
+ if sub_import.startswith('%s.' % import_name):
+ if sub_import in seen:
+ continue
+
+ seen.add(sub_import)
+
+ matches = sorted(recurse_import(sub_import, depth + 1, seen))
+
+ for result in matches:
+ results.add(result)
+
+ import_path = get_import_path(import_name)
+
+ if import_path not in imports_by_target_path:
+ import_path = get_import_path(import_name, package=True)
+
+ if import_path not in imports_by_target_path:
+ raise ApplicationError('Cannot determine path for module_utils import: %s' % import_name)
+
+ # process imports in reverse so the deepest imports come first
+ for name in sorted(imports_by_target_path[import_path], reverse=True):
+ if name in virtual_utils:
+ continue
+
+ if name in seen:
+ continue
+
+ seen.add(name)
+
+ matches = sorted(recurse_import(name, depth + 1, seen))
+
+ for result in matches:
+ results.add(result)
+
+ return results
+
+ for module_util in module_utils:
+ # recurse over module_utils imports while excluding self
+ module_util_imports = recurse_import(module_util)
+ module_util_imports.remove(module_util)
+
+ # add recursive imports to all path entries which import this module_util
+ for target_path in imports_by_target_path:
+ if module_util in imports_by_target_path[target_path]:
+ for module_util_import in sorted(module_util_imports):
+ if module_util_import not in imports_by_target_path[target_path]:
+ display.info('%s inherits import %s via %s' % (target_path, module_util_import, module_util), verbosity=6)
+ imports_by_target_path[target_path].add(module_util_import)
+
+ imports = dict([(module_util, set()) for module_util in module_utils | virtual_utils])
+
+ for target_path in imports_by_target_path:
+ for module_util in imports_by_target_path[target_path]:
+ imports[module_util].add(target_path)
+
+ # for purposes of mapping module_utils to paths, treat imports of virtual utils the same as the parent package
+ for virtual_util in virtual_utils:
+ parent_package = '.'.join(virtual_util.split('.')[:-1])
+ imports[virtual_util] = imports[parent_package]
+ display.info('%s reports imports from parent package %s' % (virtual_util, parent_package), verbosity=6)
+
+ for module_util in sorted(imports):
+ if not imports[module_util]:
+ package_path = get_import_path(module_util, package=True)
+
+ if os.path.exists(package_path) and not os.path.getsize(package_path):
+ continue # ignore empty __init__.py files
+
+ display.warning('No imports found which use the "%s" module_util.' % module_util)
+
+ return imports
+
+
+def get_python_module_utils_name(path): # type: (str) -> str
+ """Return a namespace and name from the given module_utils path."""
+ base_path = data_context().content.module_utils_path
+
+ if data_context().content.collection:
+ prefix = 'ansible_collections.' + data_context().content.collection.prefix + 'plugins.module_utils'
+ else:
+ prefix = 'ansible.module_utils'
+
+ if path.endswith('/__init__.py'):
+ path = os.path.dirname(path)
+
+ if path == base_path:
+ name = prefix
+ else:
+ name = prefix + '.' + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.path.sep, '.')
+
+ return name
+
+
+def enumerate_module_utils():
+ """Return a list of available module_utils imports.
+ :rtype: set[str]
+ """
+ module_utils = []
+
+ for path in data_context().content.walk_files(data_context().content.module_utils_path):
+ ext = os.path.splitext(path)[1]
+
+ if ext != '.py':
+ continue
+
+ module_utils.append(get_python_module_utils_name(path))
+
+ return set(module_utils)
+
+
+def extract_python_module_utils_imports(path, module_utils):
+ """Return a list of module_utils imports found in the specified source file.
+ :type path: str
+ :type module_utils: set[str]
+ :rtype: set[str]
+ """
+ # Python code must be read as bytes to avoid a SyntaxError when the source uses comments to declare the file encoding.
+ # See: https://www.python.org/dev/peps/pep-0263
+ # Specifically: If a Unicode string with a coding declaration is passed to compile(), a SyntaxError will be raised.
+ code = read_binary_file(path)
+
+ try:
+ tree = ast.parse(code)
+ except SyntaxError as ex:
+ # Treat this error as a warning so tests can be executed as best as possible.
+ # The compile test will detect and report this syntax error.
+ display.warning('%s:%s Syntax error extracting module_utils imports: %s' % (path, ex.lineno, ex.msg))
+ return set()
+
+ finder = ModuleUtilFinder(path, module_utils)
+ finder.visit(tree)
+ return finder.imports
+
+
+def get_import_path(name, package=False): # type: (str, bool) -> str
+ """Return a path from an import name."""
+ if package:
+ filename = os.path.join(name.replace('.', '/'), '__init__.py')
+ else:
+ filename = '%s.py' % name.replace('.', '/')
+
+ if name.startswith('ansible.module_utils.') or name == 'ansible.module_utils':
+ path = os.path.join('lib', filename)
+ elif data_context().content.collection and (
+ name.startswith('ansible_collections.%s.plugins.module_utils.' % data_context().content.collection.full_name) or
+ name == 'ansible_collections.%s.plugins.module_utils' % data_context().content.collection.full_name):
+ path = '/'.join(filename.split('/')[3:])
+ else:
+ raise Exception('Unexpected import name: %s' % name)
+
+ return path
+
+
+def path_to_module(path): # type: (str) -> str
+ """Convert the given path to a module name."""
+ module = os.path.splitext(path)[0].replace(os.path.sep, '.')
+
+ if module.endswith('.__init__'):
+ module = module[:-9]
+
+ return module
+
+
+def relative_to_absolute(name, level, module, path, lineno): # type: (str, int, str, str, int) -> str
+ """Convert a relative import to an absolute import."""
+ if level <= 0:
+ absolute_name = name
+ elif not module:
+ display.warning('Cannot resolve relative import "%s%s" in unknown module at %s:%d' % ('.' * level, name, path, lineno))
+ absolute_name = 'relative.nomodule'
+ else:
+ parts = module.split('.')
+
+ if level >= len(parts):
+ display.warning('Cannot resolve relative import "%s%s" above module "%s" at %s:%d' % ('.' * level, name, module, path, lineno))
+ absolute_name = 'relative.abovelevel'
+ else:
+ absolute_name = '.'.join(parts[:-level] + [name])
+
+ return absolute_name
+
+
+class ModuleUtilFinder(ast.NodeVisitor):
+ """AST visitor to find valid module_utils imports."""
+ def __init__(self, path, module_utils):
+ """Return a list of module_utils imports found in the specified source file.
+ :type path: str
+ :type module_utils: set[str]
+ """
+ self.path = path
+ self.module_utils = module_utils
+ self.imports = set()
+
+ # implicitly import parent package
+
+ if path.endswith('/__init__.py'):
+ path = os.path.split(path)[0]
+
+ if path.startswith('lib/ansible/module_utils/'):
+ package = os.path.split(path)[0].replace('/', '.')[4:]
+
+ if package != 'ansible.module_utils' and package not in VIRTUAL_PACKAGES:
+ self.add_import(package, 0)
+
+ self.module = None
+
+ if data_context().content.is_ansible:
+ # Various parts of the Ansible source tree execute within diffent modules.
+ # To support import analysis, each file which uses relative imports must reside under a path defined here.
+ # The mapping is a tuple consisting of a path pattern to match and a replacement path.
+ # During analyis, any relative imports not covered here will result in warnings, which can be fixed by adding the appropriate entry.
+ path_map = (
+ ('^hacking/build_library/build_ansible/', 'build_ansible/'),
+ ('^lib/ansible/', 'ansible/'),
+ ('^test/lib/ansible_test/_data/sanity/validate-modules/', 'validate_modules/'),
+ ('^test/units/', 'test/units/'),
+ ('^test/lib/ansible_test/_internal/', 'ansible_test/_internal/'),
+ ('^test/integration/targets/.*/ansible_collections/(?P<ns>[^/]*)/(?P<col>[^/]*)/', r'ansible_collections/\g<ns>/\g<col>/'),
+ ('^test/integration/targets/.*/library/', 'ansible/modules/'),
+ )
+
+ for pattern, replacement in path_map:
+ if re.search(pattern, self.path):
+ revised_path = re.sub(pattern, replacement, self.path)
+ self.module = path_to_module(revised_path)
+ break
+ else:
+ # This assumes that all files within the collection are executed by Ansible as part of the collection.
+ # While that will usually be true, there are exceptions which will result in this resolution being incorrect.
+ self.module = path_to_module(os.path.join(data_context().content.collection.directory, self.path))
+
+ # noinspection PyPep8Naming
+ # pylint: disable=locally-disabled, invalid-name
+ def visit_Import(self, node):
+ """
+ :type node: ast.Import
+ """
+ self.generic_visit(node)
+
+ # import ansible.module_utils.MODULE[.MODULE]
+ # import ansible_collections.{ns}.{col}.plugins.module_utils.module_utils.MODULE[.MODULE]
+ self.add_imports([alias.name for alias in node.names], node.lineno)
+
+ # noinspection PyPep8Naming
+ # pylint: disable=locally-disabled, invalid-name
+ def visit_ImportFrom(self, node):
+ """
+ :type node: ast.ImportFrom
+ """
+ self.generic_visit(node)
+
+ if not node.module:
+ return
+
+ module = relative_to_absolute(node.module, node.level, self.module, self.path, node.lineno)
+
+ if not module.startswith('ansible'):
+ return
+
+ # from ansible.module_utils import MODULE[, MODULE]
+ # from ansible.module_utils.MODULE[.MODULE] import MODULE[, MODULE]
+ # from ansible_collections.{ns}.{col}.plugins.module_utils import MODULE[, MODULE]
+ # from ansible_collections.{ns}.{col}.plugins.module_utils.MODULE[.MODULE] import MODULE[, MODULE]
+ self.add_imports(['%s.%s' % (module, alias.name) for alias in node.names], node.lineno)
+
+ def add_import(self, name, line_number):
+ """
+ :type name: str
+ :type line_number: int
+ """
+ import_name = name
+
+ while self.is_module_util_name(name):
+ if name in self.module_utils:
+ if name not in self.imports:
+ display.info('%s:%d imports module_utils: %s' % (self.path, line_number, name), verbosity=5)
+ self.imports.add(name)
+
+ return # duplicate imports are ignored
+
+ name = '.'.join(name.split('.')[:-1])
+
+ if is_subdir(self.path, data_context().content.test_path):
+ return # invalid imports in tests are ignored
+
+ # Treat this error as a warning so tests can be executed as best as possible.
+ # This error should be detected by unit or integration tests.
+ display.warning('%s:%d Invalid module_utils import: %s' % (self.path, line_number, import_name))
+
+ def add_imports(self, names, line_no): # type: (t.List[str], int) -> None
+ """Add the given import names if they are module_utils imports."""
+ for name in names:
+ if self.is_module_util_name(name):
+ self.add_import(name, line_no)
+
+ @staticmethod
+ def is_module_util_name(name): # type: (str) -> bool
+ """Return True if the given name is a module_util name for the content under test. External module_utils are ignored."""
+ if data_context().content.is_ansible and name.startswith('ansible.module_utils.'):
+ return True
+
+ if data_context().content.collection and name.startswith('ansible_collections.%s.plugins.module_utils.' % data_context().content.collection.full_name):
+ return True
+
+ return False
diff --git a/test/lib/ansible_test/_internal/init.py b/test/lib/ansible_test/_internal/init.py
new file mode 100644
index 00000000..682e6b0c
--- /dev/null
+++ b/test/lib/ansible_test/_internal/init.py
@@ -0,0 +1,16 @@
+"""Early initialization for ansible-test before most other imports have been performed."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import resource
+
+from .constants import (
+ SOFT_RLIMIT_NOFILE,
+)
+
+CURRENT_RLIMIT_NOFILE = resource.getrlimit(resource.RLIMIT_NOFILE)
+DESIRED_RLIMIT_NOFILE = (SOFT_RLIMIT_NOFILE, CURRENT_RLIMIT_NOFILE[1])
+
+if DESIRED_RLIMIT_NOFILE < CURRENT_RLIMIT_NOFILE:
+ resource.setrlimit(resource.RLIMIT_NOFILE, DESIRED_RLIMIT_NOFILE)
+ CURRENT_RLIMIT_NOFILE = DESIRED_RLIMIT_NOFILE
diff --git a/test/lib/ansible_test/_internal/integration/__init__.py b/test/lib/ansible_test/_internal/integration/__init__.py
new file mode 100644
index 00000000..f7be34e7
--- /dev/null
+++ b/test/lib/ansible_test/_internal/integration/__init__.py
@@ -0,0 +1,349 @@
+"""Ansible integration test infrastructure."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import contextlib
+import json
+import os
+import shutil
+import tempfile
+
+from .. import types as t
+
+from ..encoding import (
+ to_bytes,
+)
+
+from ..target import (
+ analyze_integration_target_dependencies,
+ walk_integration_targets,
+)
+
+from ..config import (
+ IntegrationConfig,
+ NetworkIntegrationConfig,
+ PosixIntegrationConfig,
+ WindowsIntegrationConfig,
+)
+
+from ..io import (
+ make_dirs,
+ write_text_file,
+ read_text_file,
+)
+
+from ..util import (
+ ApplicationError,
+ display,
+ COVERAGE_CONFIG_NAME,
+ MODE_DIRECTORY,
+ MODE_DIRECTORY_WRITE,
+ MODE_FILE,
+)
+
+from ..util_common import (
+ named_temporary_file,
+ ResultType,
+)
+
+from ..coverage_util import (
+ generate_coverage_config,
+)
+
+from ..cache import (
+ CommonCache,
+)
+
+from ..cloud import (
+ CloudEnvironmentConfig,
+)
+
+from ..data import (
+ data_context,
+)
+
+
+def setup_common_temp_dir(args, path):
+ """
+ :type args: IntegrationConfig
+ :type path: str
+ """
+ if args.explain:
+ return
+
+ os.mkdir(path)
+ os.chmod(path, MODE_DIRECTORY)
+
+ if args.coverage:
+ coverage_config_path = os.path.join(path, COVERAGE_CONFIG_NAME)
+
+ coverage_config = generate_coverage_config(args)
+
+ write_text_file(coverage_config_path, coverage_config)
+
+ os.chmod(coverage_config_path, MODE_FILE)
+
+ coverage_output_path = os.path.join(path, ResultType.COVERAGE.name)
+
+ os.mkdir(coverage_output_path)
+ os.chmod(coverage_output_path, MODE_DIRECTORY_WRITE)
+
+
+def generate_dependency_map(integration_targets):
+ """
+ :type integration_targets: list[IntegrationTarget]
+ :rtype: dict[str, set[IntegrationTarget]]
+ """
+ targets_dict = dict((target.name, target) for target in integration_targets)
+ target_dependencies = analyze_integration_target_dependencies(integration_targets)
+ dependency_map = {}
+
+ invalid_targets = set()
+
+ for dependency, dependents in target_dependencies.items():
+ dependency_target = targets_dict.get(dependency)
+
+ if not dependency_target:
+ invalid_targets.add(dependency)
+ continue
+
+ for dependent in dependents:
+ if dependent not in dependency_map:
+ dependency_map[dependent] = set()
+
+ dependency_map[dependent].add(dependency_target)
+
+ if invalid_targets:
+ raise ApplicationError('Non-existent target dependencies: %s' % ', '.join(sorted(invalid_targets)))
+
+ return dependency_map
+
+
+def get_files_needed(target_dependencies):
+ """
+ :type target_dependencies: list[IntegrationTarget]
+ :rtype: list[str]
+ """
+ files_needed = []
+
+ for target_dependency in target_dependencies:
+ files_needed += target_dependency.needs_file
+
+ files_needed = sorted(set(files_needed))
+
+ invalid_paths = [path for path in files_needed if not os.path.isfile(path)]
+
+ if invalid_paths:
+ raise ApplicationError('Invalid "needs/file/*" aliases:\n%s' % '\n'.join(invalid_paths))
+
+ return files_needed
+
+
+def check_inventory(args, inventory_path): # type: (IntegrationConfig, str) -> None
+ """Check the given inventory for issues."""
+ if args.docker or args.remote:
+ if os.path.exists(inventory_path):
+ inventory = read_text_file(inventory_path)
+
+ if 'ansible_ssh_private_key_file' in inventory:
+ display.warning('Use of "ansible_ssh_private_key_file" in inventory with the --docker or --remote option is unsupported and will likely fail.')
+
+
+def get_inventory_relative_path(args): # type: (IntegrationConfig) -> str
+ """Return the inventory path used for the given integration configuration relative to the content root."""
+ inventory_names = {
+ PosixIntegrationConfig: 'inventory',
+ WindowsIntegrationConfig: 'inventory.winrm',
+ NetworkIntegrationConfig: 'inventory.networking',
+ } # type: t.Dict[t.Type[IntegrationConfig], str]
+
+ return os.path.join(data_context().content.integration_path, inventory_names[type(args)])
+
+
+def delegate_inventory(args, inventory_path_src): # type: (IntegrationConfig, str) -> None
+ """Make the given inventory available during delegation."""
+ if isinstance(args, PosixIntegrationConfig):
+ return
+
+ def inventory_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None
+ """
+ Add the inventory file to the payload file list.
+ This will preserve the file during delegation even if it is ignored or is outside the content and install roots.
+ """
+ inventory_path = get_inventory_relative_path(args)
+ inventory_tuple = inventory_path_src, inventory_path
+
+ if os.path.isfile(inventory_path_src) and inventory_tuple not in files:
+ originals = [item for item in files if item[1] == inventory_path]
+
+ if originals:
+ for original in originals:
+ files.remove(original)
+
+ display.warning('Overriding inventory file "%s" with "%s".' % (inventory_path, inventory_path_src))
+ else:
+ display.notice('Sourcing inventory file "%s" from "%s".' % (inventory_path, inventory_path_src))
+
+ files.append(inventory_tuple)
+
+ data_context().register_payload_callback(inventory_callback)
+
+
+@contextlib.contextmanager
+def integration_test_environment(args, target, inventory_path_src):
+ """
+ :type args: IntegrationConfig
+ :type target: IntegrationTarget
+ :type inventory_path_src: str
+ """
+ ansible_config_src = args.get_ansible_config()
+ ansible_config_relative = os.path.join(data_context().content.integration_path, '%s.cfg' % args.command)
+
+ if args.no_temp_workdir or 'no/temp_workdir/' in target.aliases:
+ display.warning('Disabling the temp work dir is a temporary debugging feature that may be removed in the future without notice.')
+
+ integration_dir = os.path.join(data_context().content.root, data_context().content.integration_path)
+ targets_dir = os.path.join(data_context().content.root, data_context().content.integration_targets_path)
+ inventory_path = inventory_path_src
+ ansible_config = ansible_config_src
+ vars_file = os.path.join(data_context().content.root, data_context().content.integration_vars_path)
+
+ yield IntegrationEnvironment(integration_dir, targets_dir, inventory_path, ansible_config, vars_file)
+ return
+
+ # When testing a collection, the temporary directory must reside within the collection.
+ # This is necessary to enable support for the default collection for non-collection content (playbooks and roles).
+ root_temp_dir = os.path.join(ResultType.TMP.path, 'integration')
+
+ prefix = '%s-' % target.name
+ suffix = u'-\u00c5\u00d1\u015a\u00cc\u03b2\u0141\u00c8'
+
+ if args.no_temp_unicode or 'no/temp_unicode/' in target.aliases:
+ display.warning('Disabling unicode in the temp work dir is a temporary debugging feature that may be removed in the future without notice.')
+ suffix = '-ansible'
+
+ if args.explain:
+ temp_dir = os.path.join(root_temp_dir, '%stemp%s' % (prefix, suffix))
+ else:
+ make_dirs(root_temp_dir)
+ temp_dir = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=root_temp_dir)
+
+ try:
+ display.info('Preparing temporary directory: %s' % temp_dir, verbosity=2)
+
+ inventory_relative_path = get_inventory_relative_path(args)
+ inventory_path = os.path.join(temp_dir, inventory_relative_path)
+
+ cache = IntegrationCache(args)
+
+ target_dependencies = sorted([target] + list(cache.dependency_map.get(target.name, set())))
+
+ files_needed = get_files_needed(target_dependencies)
+
+ integration_dir = os.path.join(temp_dir, data_context().content.integration_path)
+ targets_dir = os.path.join(temp_dir, data_context().content.integration_targets_path)
+ ansible_config = os.path.join(temp_dir, ansible_config_relative)
+
+ vars_file_src = os.path.join(data_context().content.root, data_context().content.integration_vars_path)
+ vars_file = os.path.join(temp_dir, data_context().content.integration_vars_path)
+
+ file_copies = [
+ (ansible_config_src, ansible_config),
+ (inventory_path_src, inventory_path),
+ ]
+
+ if os.path.exists(vars_file_src):
+ file_copies.append((vars_file_src, vars_file))
+
+ file_copies += [(path, os.path.join(temp_dir, path)) for path in files_needed]
+
+ integration_targets_relative_path = data_context().content.integration_targets_path
+
+ directory_copies = [
+ (
+ os.path.join(integration_targets_relative_path, target.relative_path),
+ os.path.join(temp_dir, integration_targets_relative_path, target.relative_path)
+ )
+ for target in target_dependencies
+ ]
+
+ directory_copies = sorted(set(directory_copies))
+ file_copies = sorted(set(file_copies))
+
+ if not args.explain:
+ make_dirs(integration_dir)
+
+ for dir_src, dir_dst in directory_copies:
+ display.info('Copying %s/ to %s/' % (dir_src, dir_dst), verbosity=2)
+
+ if not args.explain:
+ shutil.copytree(to_bytes(dir_src), to_bytes(dir_dst), symlinks=True)
+
+ for file_src, file_dst in file_copies:
+ display.info('Copying %s to %s' % (file_src, file_dst), verbosity=2)
+
+ if not args.explain:
+ make_dirs(os.path.dirname(file_dst))
+ shutil.copy2(file_src, file_dst)
+
+ yield IntegrationEnvironment(integration_dir, targets_dir, inventory_path, ansible_config, vars_file)
+ finally:
+ if not args.explain:
+ shutil.rmtree(temp_dir)
+
+
+@contextlib.contextmanager
+def integration_test_config_file(args, env_config, integration_dir):
+ """
+ :type args: IntegrationConfig
+ :type env_config: CloudEnvironmentConfig
+ :type integration_dir: str
+ """
+ if not env_config:
+ yield None
+ return
+
+ config_vars = (env_config.ansible_vars or {}).copy()
+
+ config_vars.update(dict(
+ ansible_test=dict(
+ environment=env_config.env_vars,
+ module_defaults=env_config.module_defaults,
+ )
+ ))
+
+ config_file = json.dumps(config_vars, indent=4, sort_keys=True)
+
+ with named_temporary_file(args, 'config-file-', '.json', integration_dir, config_file) as path:
+ filename = os.path.relpath(path, integration_dir)
+
+ display.info('>>> Config File: %s\n%s' % (filename, config_file), verbosity=3)
+
+ yield path
+
+
+class IntegrationEnvironment:
+ """Details about the integration environment."""
+ def __init__(self, integration_dir, targets_dir, inventory_path, ansible_config, vars_file):
+ self.integration_dir = integration_dir
+ self.targets_dir = targets_dir
+ self.inventory_path = inventory_path
+ self.ansible_config = ansible_config
+ self.vars_file = vars_file
+
+
+class IntegrationCache(CommonCache):
+ """Integration cache."""
+ @property
+ def integration_targets(self):
+ """
+ :rtype: list[IntegrationTarget]
+ """
+ return self.get('integration_targets', lambda: list(walk_integration_targets()))
+
+ @property
+ def dependency_map(self):
+ """
+ :rtype: dict[str, set[IntegrationTarget]]
+ """
+ return self.get('dependency_map', lambda: generate_dependency_map(self.integration_targets))
diff --git a/test/lib/ansible_test/_internal/io.py b/test/lib/ansible_test/_internal/io.py
new file mode 100644
index 00000000..0f61cd2d
--- /dev/null
+++ b/test/lib/ansible_test/_internal/io.py
@@ -0,0 +1,94 @@
+"""Functions for disk IO."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import errno
+import io
+import json
+import os
+
+from . import types as t
+
+from .encoding import (
+ ENCODING,
+ to_bytes,
+ to_text,
+)
+
+
+def read_json_file(path): # type: (t.AnyStr) -> t.Any
+ """Parse and return the json content from the specified path."""
+ return json.loads(read_text_file(path))
+
+
+def read_text_file(path): # type: (t.AnyStr) -> t.Text
+ """Return the contents of the specified path as text."""
+ return to_text(read_binary_file(path))
+
+
+def read_binary_file(path): # type: (t.AnyStr) -> bytes
+ """Return the contents of the specified path as bytes."""
+ with open_binary_file(path) as file:
+ return file.read()
+
+
+def make_dirs(path): # type: (str) -> None
+ """Create a directory at path, including any necessary parent directories."""
+ try:
+ os.makedirs(to_bytes(path))
+ except OSError as ex:
+ if ex.errno != errno.EEXIST:
+ raise
+
+
+def write_json_file(path, # type: str
+ content, # type: t.Union[t.List[t.Any], t.Dict[str, t.Any]]
+ create_directories=False, # type: bool
+ formatted=True, # type: bool
+ encoder=None, # type: t.Optional[t.Callable[[t.Any], t.Any]]
+ ): # type: (...) -> None
+ """Write the given json content to the specified path, optionally creating missing directories."""
+ text_content = json.dumps(content,
+ sort_keys=formatted,
+ indent=4 if formatted else None,
+ separators=(', ', ': ') if formatted else (',', ':'),
+ cls=encoder,
+ ) + '\n'
+
+ write_text_file(path, text_content, create_directories=create_directories)
+
+
+def write_text_file(path, content, create_directories=False): # type: (str, str, bool) -> None
+ """Write the given text content to the specified path, optionally creating missing directories."""
+ if create_directories:
+ make_dirs(os.path.dirname(path))
+
+ with open_binary_file(path, 'wb') as file:
+ file.write(to_bytes(content))
+
+
+def open_text_file(path, mode='r'): # type: (str, str) -> t.TextIO
+ """Open the given path for text access."""
+ if 'b' in mode:
+ raise Exception('mode cannot include "b" for text files: %s' % mode)
+
+ # noinspection PyTypeChecker
+ return io.open(to_bytes(path), mode, encoding=ENCODING)
+
+
+def open_binary_file(path, mode='rb'): # type: (str, str) -> t.BinaryIO
+ """Open the given path for binary access."""
+ if 'b' not in mode:
+ raise Exception('mode must include "b" for binary files: %s' % mode)
+
+ # noinspection PyTypeChecker
+ return io.open(to_bytes(path), mode)
+
+
+class SortedSetEncoder(json.JSONEncoder):
+ """Encode sets as sorted lists."""
+ def default(self, obj): # pylint: disable=method-hidden, arguments-differ
+ if isinstance(obj, set):
+ return sorted(obj)
+
+ return super(SortedSetEncoder).default(self, obj)
diff --git a/test/lib/ansible_test/_internal/manage_ci.py b/test/lib/ansible_test/_internal/manage_ci.py
new file mode 100644
index 00000000..e81dad68
--- /dev/null
+++ b/test/lib/ansible_test/_internal/manage_ci.py
@@ -0,0 +1,335 @@
+"""Access Ansible Core CI remote services."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import tempfile
+import time
+
+from .util import (
+ SubprocessError,
+ ApplicationError,
+ cmd_quote,
+ display,
+ ANSIBLE_TEST_DATA_ROOT,
+)
+
+from .util_common import (
+ intercept_command,
+ get_network_settings,
+ run_command,
+)
+
+from .core_ci import (
+ AnsibleCoreCI,
+)
+
+from .ansible_util import (
+ ansible_environment,
+)
+
+from .config import (
+ ShellConfig,
+)
+
+from .payload import (
+ create_payload,
+)
+
+
+class ManageWindowsCI:
+ """Manage access to a Windows instance provided by Ansible Core CI."""
+ def __init__(self, core_ci):
+ """
+ :type core_ci: AnsibleCoreCI
+ """
+ self.core_ci = core_ci
+ self.ssh_args = ['-i', self.core_ci.ssh_key.key]
+
+ ssh_options = dict(
+ BatchMode='yes',
+ StrictHostKeyChecking='no',
+ UserKnownHostsFile='/dev/null',
+ ServerAliveInterval=15,
+ ServerAliveCountMax=4,
+ )
+
+ for ssh_option in sorted(ssh_options):
+ self.ssh_args += ['-o', '%s=%s' % (ssh_option, ssh_options[ssh_option])]
+
+ def setup(self, python_version):
+ """Used in delegate_remote to setup the host, no action is required for Windows.
+ :type python_version: str
+ """
+
+ def wait(self):
+ """Wait for instance to respond to ansible ping."""
+ extra_vars = [
+ 'ansible_connection=winrm',
+ 'ansible_host=%s' % self.core_ci.connection.hostname,
+ 'ansible_user=%s' % self.core_ci.connection.username,
+ 'ansible_password=%s' % self.core_ci.connection.password,
+ 'ansible_port=%s' % self.core_ci.connection.port,
+ 'ansible_winrm_server_cert_validation=ignore',
+ ]
+
+ name = 'windows_%s' % self.core_ci.version
+
+ env = ansible_environment(self.core_ci.args)
+ cmd = ['ansible', '-m', 'ansible.windows.win_ping', '-i', '%s,' % name, name, '-e', ' '.join(extra_vars)]
+
+ for dummy in range(1, 120):
+ try:
+ intercept_command(self.core_ci.args, cmd, 'ping', env=env, disable_coverage=True)
+ return
+ except SubprocessError:
+ time.sleep(10)
+
+ raise ApplicationError('Timeout waiting for %s/%s instance %s.' %
+ (self.core_ci.platform, self.core_ci.version, self.core_ci.instance_id))
+
+ def download(self, remote, local):
+ """
+ :type remote: str
+ :type local: str
+ """
+ self.scp('%s@%s:%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname, remote), local)
+
+ def upload(self, local, remote):
+ """
+ :type local: str
+ :type remote: str
+ """
+ self.scp(local, '%s@%s:%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname, remote))
+
+ def ssh(self, command, options=None, force_pty=True):
+ """
+ :type command: str | list[str]
+ :type options: list[str] | None
+ :type force_pty: bool
+ """
+ if not options:
+ options = []
+ if force_pty:
+ options.append('-tt')
+
+ if isinstance(command, list):
+ command = ' '.join(cmd_quote(c) for c in command)
+
+ run_command(self.core_ci.args,
+ ['ssh', '-q'] + self.ssh_args +
+ options +
+ ['-p', '22',
+ '%s@%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname)] +
+ [command])
+
+ def scp(self, src, dst):
+ """
+ :type src: str
+ :type dst: str
+ """
+ for dummy in range(1, 10):
+ try:
+ run_command(self.core_ci.args,
+ ['scp'] + self.ssh_args +
+ ['-P', '22', '-q', '-r', src, dst])
+ return
+ except SubprocessError:
+ time.sleep(10)
+
+ raise ApplicationError('Failed transfer: %s -> %s' % (src, dst))
+
+
+class ManageNetworkCI:
+ """Manage access to a network instance provided by Ansible Core CI."""
+ def __init__(self, core_ci):
+ """
+ :type core_ci: AnsibleCoreCI
+ """
+ self.core_ci = core_ci
+
+ def wait(self):
+ """Wait for instance to respond to ansible ping."""
+ settings = get_network_settings(self.core_ci.args, self.core_ci.platform, self.core_ci.version)
+
+ extra_vars = [
+ 'ansible_host=%s' % self.core_ci.connection.hostname,
+ 'ansible_port=%s' % self.core_ci.connection.port,
+ 'ansible_ssh_private_key_file=%s' % self.core_ci.ssh_key.key,
+ ] + [
+ '%s=%s' % (key, value) for key, value in settings.inventory_vars.items()
+ ]
+
+ name = '%s-%s' % (self.core_ci.platform, self.core_ci.version.replace('.', '-'))
+
+ env = ansible_environment(self.core_ci.args)
+ cmd = [
+ 'ansible',
+ '-m', '%s%s_command' % (settings.collection + '.' if settings.collection else '', self.core_ci.platform),
+ '-a', 'commands=?',
+ '-u', self.core_ci.connection.username,
+ '-i', '%s,' % name,
+ '-e', ' '.join(extra_vars),
+ name,
+ ]
+
+ for dummy in range(1, 90):
+ try:
+ intercept_command(self.core_ci.args, cmd, 'ping', env=env, disable_coverage=True)
+ return
+ except SubprocessError:
+ time.sleep(10)
+
+ raise ApplicationError('Timeout waiting for %s/%s instance %s.' %
+ (self.core_ci.platform, self.core_ci.version, self.core_ci.instance_id))
+
+
+class ManagePosixCI:
+ """Manage access to a POSIX instance provided by Ansible Core CI."""
+ def __init__(self, core_ci):
+ """
+ :type core_ci: AnsibleCoreCI
+ """
+ self.core_ci = core_ci
+ self.ssh_args = ['-i', self.core_ci.ssh_key.key]
+
+ ssh_options = dict(
+ BatchMode='yes',
+ StrictHostKeyChecking='no',
+ UserKnownHostsFile='/dev/null',
+ ServerAliveInterval=15,
+ ServerAliveCountMax=4,
+ )
+
+ for ssh_option in sorted(ssh_options):
+ self.ssh_args += ['-o', '%s=%s' % (ssh_option, ssh_options[ssh_option])]
+
+ if self.core_ci.platform == 'freebsd':
+ if self.core_ci.provider == 'aws':
+ self.become = ['su', '-l', 'root', '-c']
+ elif self.core_ci.provider == 'azure':
+ self.become = ['sudo', '-in', 'sh', '-c']
+ else:
+ raise NotImplementedError('provider %s has not been implemented' % self.core_ci.provider)
+ elif self.core_ci.platform == 'macos':
+ self.become = ['sudo', '-in', 'PATH=/usr/local/bin:$PATH', 'sh', '-c']
+ elif self.core_ci.platform == 'osx':
+ self.become = ['sudo', '-in', 'PATH=/usr/local/bin:$PATH']
+ elif self.core_ci.platform == 'rhel' or self.core_ci.platform == 'centos':
+ self.become = ['sudo', '-in', 'bash', '-c']
+ elif self.core_ci.platform in ['aix', 'ibmi']:
+ self.become = []
+
+ def setup(self, python_version):
+ """Start instance and wait for it to become ready and respond to an ansible ping.
+ :type python_version: str
+ :rtype: str
+ """
+ pwd = self.wait()
+
+ display.info('Remote working directory: %s' % pwd, verbosity=1)
+
+ if isinstance(self.core_ci.args, ShellConfig):
+ if self.core_ci.args.raw:
+ return pwd
+
+ self.configure(python_version)
+ self.upload_source()
+
+ return pwd
+
+ def wait(self): # type: () -> str
+ """Wait for instance to respond to SSH."""
+ for dummy in range(1, 90):
+ try:
+ stdout = self.ssh('pwd', capture=True)[0]
+
+ if self.core_ci.args.explain:
+ return '/pwd'
+
+ pwd = stdout.strip().splitlines()[-1]
+
+ if not pwd.startswith('/'):
+ raise Exception('Unexpected current working directory "%s" from "pwd" command output:\n%s' % (pwd, stdout))
+
+ return pwd
+ except SubprocessError:
+ time.sleep(10)
+
+ raise ApplicationError('Timeout waiting for %s/%s instance %s.' %
+ (self.core_ci.platform, self.core_ci.version, self.core_ci.instance_id))
+
+ def configure(self, python_version):
+ """Configure remote host for testing.
+ :type python_version: str
+ """
+ self.upload(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'setup', 'remote.sh'), '/tmp')
+ self.ssh('chmod +x /tmp/remote.sh && /tmp/remote.sh %s %s' % (self.core_ci.platform, python_version))
+
+ def upload_source(self):
+ """Upload and extract source."""
+ with tempfile.NamedTemporaryFile(prefix='ansible-source-', suffix='.tgz') as local_source_fd:
+ remote_source_dir = '/tmp'
+ remote_source_path = os.path.join(remote_source_dir, os.path.basename(local_source_fd.name))
+
+ create_payload(self.core_ci.args, local_source_fd.name)
+
+ self.upload(local_source_fd.name, remote_source_dir)
+ # AIX does not provide the GNU tar version, leading to parameters
+ # being different and -z not being recognized. This pattern works
+ # with both versions of tar.
+ self.ssh(
+ 'rm -rf ~/ansible ~/ansible_collections && cd ~/ && gunzip --stdout %s | tar oxf - && rm %s' %
+ (remote_source_path, remote_source_path)
+ )
+
+ def download(self, remote, local):
+ """
+ :type remote: str
+ :type local: str
+ """
+ self.scp('%s@%s:%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname, remote), local)
+
+ def upload(self, local, remote):
+ """
+ :type local: str
+ :type remote: str
+ """
+ self.scp(local, '%s@%s:%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname, remote))
+
+ def ssh(self, command, options=None, capture=False):
+ """
+ :type command: str | list[str]
+ :type options: list[str] | None
+ :type capture: bool
+ :rtype: str | None, str | None
+ """
+ if not options:
+ options = []
+
+ if isinstance(command, list):
+ command = ' '.join(cmd_quote(c) for c in command)
+
+ command = cmd_quote(command) if self.become else command
+ return run_command(self.core_ci.args,
+ ['ssh', '-tt', '-q'] + self.ssh_args +
+ options +
+ ['-p', str(self.core_ci.connection.port),
+ '%s@%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname)] +
+ self.become + [command], capture=capture)
+
+ def scp(self, src, dst):
+ """
+ :type src: str
+ :type dst: str
+ """
+ for dummy in range(1, 10):
+ try:
+ run_command(self.core_ci.args,
+ ['scp'] + self.ssh_args +
+ ['-P', str(self.core_ci.connection.port), '-q', '-r', src, dst])
+ return
+ except SubprocessError:
+ time.sleep(10)
+
+ raise ApplicationError('Failed transfer: %s -> %s' % (src, dst))
diff --git a/test/lib/ansible_test/_internal/metadata.py b/test/lib/ansible_test/_internal/metadata.py
new file mode 100644
index 00000000..36575d0c
--- /dev/null
+++ b/test/lib/ansible_test/_internal/metadata.py
@@ -0,0 +1,151 @@
+"""Test metadata for passing data to delegated tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from . import types as t
+
+from .util import (
+ display,
+)
+
+from .io import (
+ write_json_file,
+ read_json_file,
+)
+
+from .diff import (
+ parse_diff,
+ FileDiff,
+)
+
+
+class Metadata:
+ """Metadata object for passing data to delegated tests."""
+ def __init__(self):
+ """Initialize metadata."""
+ self.changes = {} # type: t.Dict[str, t.Tuple[t.Tuple[int, int]]]
+ self.cloud_config = None # type: t.Optional[t.Dict[str, str]]
+ self.instance_config = None # type: t.Optional[t.List[t.Dict[str, str]]]
+ self.change_description = None # type: t.Optional[ChangeDescription]
+ self.ci_provider = None # type: t.Optional[str]
+
+ def populate_changes(self, diff):
+ """
+ :type diff: list[str] | None
+ """
+ patches = parse_diff(diff)
+ patches = sorted(patches, key=lambda k: k.new.path) # type: t.List[FileDiff]
+
+ self.changes = dict((patch.new.path, tuple(patch.new.ranges)) for patch in patches)
+
+ renames = [patch.old.path for patch in patches if patch.old.path != patch.new.path and patch.old.exists and patch.new.exists]
+ deletes = [patch.old.path for patch in patches if not patch.new.exists]
+
+ # make sure old paths which were renamed or deleted are registered in changes
+ for path in renames + deletes:
+ if path in self.changes:
+ # old path was replaced with another file
+ continue
+
+ # failed tests involving deleted files should be using line 0 since there is no content remaining
+ self.changes[path] = ((0, 0),)
+
+ def to_dict(self):
+ """
+ :rtype: dict[str, any]
+ """
+ return dict(
+ changes=self.changes,
+ cloud_config=self.cloud_config,
+ instance_config=self.instance_config,
+ ci_provider=self.ci_provider,
+ change_description=self.change_description.to_dict(),
+ )
+
+ def to_file(self, path):
+ """
+ :type path: path
+ """
+ data = self.to_dict()
+
+ display.info('>>> Metadata: %s\n%s' % (path, data), verbosity=3)
+
+ write_json_file(path, data)
+
+ @staticmethod
+ def from_file(path):
+ """
+ :type path: str
+ :rtype: Metadata
+ """
+ data = read_json_file(path)
+ return Metadata.from_dict(data)
+
+ @staticmethod
+ def from_dict(data):
+ """
+ :type data: dict[str, any]
+ :rtype: Metadata
+ """
+ metadata = Metadata()
+ metadata.changes = data['changes']
+ metadata.cloud_config = data['cloud_config']
+ metadata.instance_config = data['instance_config']
+ metadata.ci_provider = data['ci_provider']
+ metadata.change_description = ChangeDescription.from_dict(data['change_description'])
+
+ return metadata
+
+
+class ChangeDescription:
+ """Description of changes."""
+ def __init__(self):
+ self.command = '' # type: str
+ self.changed_paths = [] # type: t.List[str]
+ self.deleted_paths = [] # type: t.List[str]
+ self.regular_command_targets = {} # type: t.Dict[str, t.List[str]]
+ self.focused_command_targets = {} # type: t.Dict[str, t.List[str]]
+ self.no_integration_paths = [] # type: t.List[str]
+
+ @property
+ def targets(self):
+ """
+ :rtype: list[str] | None
+ """
+ return self.regular_command_targets.get(self.command)
+
+ @property
+ def focused_targets(self):
+ """
+ :rtype: list[str] | None
+ """
+ return self.focused_command_targets.get(self.command)
+
+ def to_dict(self):
+ """
+ :rtype: dict[str, any]
+ """
+ return dict(
+ command=self.command,
+ changed_paths=self.changed_paths,
+ deleted_paths=self.deleted_paths,
+ regular_command_targets=self.regular_command_targets,
+ focused_command_targets=self.focused_command_targets,
+ no_integration_paths=self.no_integration_paths,
+ )
+
+ @staticmethod
+ def from_dict(data):
+ """
+ :param data: dict[str, any]
+ :rtype: ChangeDescription
+ """
+ changes = ChangeDescription()
+ changes.command = data['command']
+ changes.changed_paths = data['changed_paths']
+ changes.deleted_paths = data['deleted_paths']
+ changes.regular_command_targets = data['regular_command_targets']
+ changes.focused_command_targets = data['focused_command_targets']
+ changes.no_integration_paths = data['no_integration_paths']
+
+ return changes
diff --git a/test/lib/ansible_test/_internal/payload.py b/test/lib/ansible_test/_internal/payload.py
new file mode 100644
index 00000000..161faba0
--- /dev/null
+++ b/test/lib/ansible_test/_internal/payload.py
@@ -0,0 +1,146 @@
+"""Payload management for sending Ansible files and test content to other systems (VMs, containers)."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import atexit
+import os
+import stat
+import tarfile
+import tempfile
+import time
+
+from . import types as t
+
+from .config import (
+ IntegrationConfig,
+ ShellConfig,
+)
+
+from .util import (
+ display,
+ ANSIBLE_SOURCE_ROOT,
+ remove_tree,
+ is_subdir,
+)
+
+from .data import (
+ data_context,
+)
+
+from .util_common import (
+ CommonConfig,
+)
+
+# improve performance by disabling uid/gid lookups
+tarfile.pwd = None
+tarfile.grp = None
+
+# this bin symlink map must exactly match the contents of the bin directory
+# it is necessary for payload creation to reconstruct the bin directory when running ansible-test from an installed version of ansible
+ANSIBLE_BIN_SYMLINK_MAP = {
+ 'ansible': '../lib/ansible/cli/scripts/ansible_cli_stub.py',
+ 'ansible-config': 'ansible',
+ 'ansible-connection': '../lib/ansible/cli/scripts/ansible_connection_cli_stub.py',
+ 'ansible-console': 'ansible',
+ 'ansible-doc': 'ansible',
+ 'ansible-galaxy': 'ansible',
+ 'ansible-inventory': 'ansible',
+ 'ansible-playbook': 'ansible',
+ 'ansible-pull': 'ansible',
+ 'ansible-test': '../test/lib/ansible_test/_data/cli/ansible_test_cli_stub.py',
+ 'ansible-vault': 'ansible',
+}
+
+
+def create_payload(args, dst_path): # type: (CommonConfig, str) -> None
+ """Create a payload for delegation."""
+ if args.explain:
+ return
+
+ files = list(data_context().ansible_source)
+ filters = {}
+
+ def make_executable(tar_info): # type: (tarfile.TarInfo) -> t.Optional[tarfile.TarInfo]
+ """Make the given file executable."""
+ tar_info.mode |= stat.S_IXUSR | stat.S_IXOTH | stat.S_IXGRP
+ return tar_info
+
+ if not ANSIBLE_SOURCE_ROOT:
+ # reconstruct the bin directory which is not available when running from an ansible install
+ files.extend(create_temporary_bin_files(args))
+ filters.update(dict((os.path.join('ansible', path[3:]), make_executable) for path in ANSIBLE_BIN_SYMLINK_MAP.values() if path.startswith('../')))
+
+ if not data_context().content.is_ansible:
+ # exclude unnecessary files when not testing ansible itself
+ files = [f for f in files if
+ is_subdir(f[1], 'bin/') or
+ is_subdir(f[1], 'lib/ansible/') or
+ is_subdir(f[1], 'test/lib/ansible_test/')]
+
+ if not isinstance(args, (ShellConfig, IntegrationConfig)):
+ # exclude built-in ansible modules when they are not needed
+ files = [f for f in files if not is_subdir(f[1], 'lib/ansible/modules/') or f[1] == 'lib/ansible/modules/__init__.py']
+
+ collection_layouts = data_context().create_collection_layouts()
+
+ content_files = []
+ extra_files = []
+
+ for layout in collection_layouts:
+ if layout == data_context().content:
+ # include files from the current collection (layout.collection.directory will be added later)
+ content_files.extend((os.path.join(layout.root, path), path) for path in data_context().content.all_files())
+ else:
+ # include files from each collection in the same collection root as the content being tested
+ extra_files.extend((os.path.join(layout.root, path), os.path.join(layout.collection.directory, path)) for path in layout.all_files())
+ else:
+ # when testing ansible itself the ansible source is the content
+ content_files = files
+ # there are no extra files when testing ansible itself
+ extra_files = []
+
+ for callback in data_context().payload_callbacks:
+ # execute callbacks only on the content paths
+ # this is done before placing them in the appropriate subdirectory (see below)
+ callback(content_files)
+
+ # place ansible source files under the 'ansible' directory on the delegated host
+ files = [(src, os.path.join('ansible', dst)) for src, dst in files]
+
+ if data_context().content.collection:
+ # place collection files under the 'ansible_collections/{namespace}/{collection}' directory on the delegated host
+ files.extend((src, os.path.join(data_context().content.collection.directory, dst)) for src, dst in content_files)
+ # extra files already have the correct destination path
+ files.extend(extra_files)
+
+ # maintain predictable file order
+ files = sorted(set(files))
+
+ display.info('Creating a payload archive containing %d files...' % len(files), verbosity=1)
+
+ start = time.time()
+
+ with tarfile.TarFile.open(dst_path, mode='w:gz', compresslevel=4, format=tarfile.GNU_FORMAT) as tar:
+ for src, dst in files:
+ display.info('%s -> %s' % (src, dst), verbosity=4)
+ tar.add(src, dst, filter=filters.get(dst))
+
+ duration = time.time() - start
+ payload_size_bytes = os.path.getsize(dst_path)
+
+ display.info('Created a %d byte payload archive containing %d files in %d seconds.' % (payload_size_bytes, len(files), duration), verbosity=1)
+
+
+def create_temporary_bin_files(args): # type: (CommonConfig) -> t.Tuple[t.Tuple[str, str], ...]
+ """Create a temporary ansible bin directory populated using the symlink map."""
+ if args.explain:
+ temp_path = '/tmp/ansible-tmp-bin'
+ else:
+ temp_path = tempfile.mkdtemp(prefix='ansible', suffix='bin')
+ atexit.register(remove_tree, temp_path)
+
+ for name, dest in ANSIBLE_BIN_SYMLINK_MAP.items():
+ path = os.path.join(temp_path, name)
+ os.symlink(dest, path)
+
+ return tuple((os.path.join(temp_path, name), os.path.join('bin', name)) for name in sorted(ANSIBLE_BIN_SYMLINK_MAP))
diff --git a/test/lib/ansible_test/_internal/powershell_import_analysis.py b/test/lib/ansible_test/_internal/powershell_import_analysis.py
new file mode 100644
index 00000000..cfc61859
--- /dev/null
+++ b/test/lib/ansible_test/_internal/powershell_import_analysis.py
@@ -0,0 +1,105 @@
+"""Analyze powershell import statements."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+
+from .io import (
+ read_text_file,
+)
+
+from .util import (
+ display,
+)
+
+from .util_common import (
+ resolve_csharp_ps_util,
+)
+
+from .data import (
+ data_context,
+)
+
+
+def get_powershell_module_utils_imports(powershell_targets):
+ """Return a dictionary of module_utils names mapped to sets of powershell file paths.
+ :type powershell_targets: list[TestTarget]
+ :rtype: dict[str, set[str]]
+ """
+
+ module_utils = enumerate_module_utils()
+
+ imports_by_target_path = {}
+
+ for target in powershell_targets:
+ imports_by_target_path[target.path] = extract_powershell_module_utils_imports(target.path, module_utils)
+
+ imports = dict([(module_util, set()) for module_util in module_utils])
+
+ for target_path in imports_by_target_path:
+ for module_util in imports_by_target_path[target_path]:
+ imports[module_util].add(target_path)
+
+ for module_util in sorted(imports):
+ if not imports[module_util]:
+ display.warning('No imports found which use the "%s" module_util.' % module_util)
+
+ return imports
+
+
+def get_powershell_module_utils_name(path): # type: (str) -> str
+ """Return a namespace and name from the given module_utils path."""
+ base_path = data_context().content.module_utils_powershell_path
+
+ if data_context().content.collection:
+ prefix = 'ansible_collections.' + data_context().content.collection.prefix + 'plugins.module_utils.'
+ else:
+ prefix = ''
+
+ name = prefix + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.path.sep, '.')
+
+ return name
+
+
+def enumerate_module_utils():
+ """Return a list of available module_utils imports.
+ :rtype: set[str]
+ """
+ return set(get_powershell_module_utils_name(p)
+ for p in data_context().content.walk_files(data_context().content.module_utils_powershell_path)
+ if os.path.splitext(p)[1] == '.psm1')
+
+
+def extract_powershell_module_utils_imports(path, module_utils):
+ """Return a list of module_utils imports found in the specified source file.
+ :type path: str
+ :type module_utils: set[str]
+ :rtype: set[str]
+ """
+ imports = set()
+
+ code = read_text_file(path)
+
+ if data_context().content.is_ansible and '# POWERSHELL_COMMON' in code:
+ imports.add('Ansible.ModuleUtils.Legacy')
+
+ lines = code.splitlines()
+ line_number = 0
+
+ for line in lines:
+ line_number += 1
+ match = re.search(r'(?i)^#\s*(?:requires\s+-module(?:s?)|ansiblerequires\s+-powershell)\s*((?:Ansible|ansible_collections|\.)\..+)', line)
+
+ if not match:
+ continue
+
+ import_name = resolve_csharp_ps_util(match.group(1), path)
+
+ if import_name in module_utils:
+ imports.add(import_name)
+ elif data_context().content.is_ansible or \
+ import_name.startswith('ansible_collections.%s' % data_context().content.prefix):
+ display.warning('%s:%d Invalid module_utils import: %s' % (path, line_number, import_name))
+
+ return imports
diff --git a/test/lib/ansible_test/_internal/provider/__init__.py b/test/lib/ansible_test/_internal/provider/__init__.py
new file mode 100644
index 00000000..6e034b53
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/__init__.py
@@ -0,0 +1,78 @@
+"""Provider (plugin) infrastructure for ansible-test."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import abc
+import os
+
+from .. import types as t
+
+from ..util import (
+ ABC,
+ ApplicationError,
+ get_subclasses,
+)
+
+
+try:
+ TPathProvider = t.TypeVar('TPathProvider', bound='PathProvider')
+except AttributeError:
+ TPathProvider = None # pylint: disable=invalid-name
+
+
+def get_path_provider_classes(provider_type): # type: (t.Type[TPathProvider]) -> t.List[t.Type[TPathProvider]]
+ """Return a list of path provider classes of the given type."""
+ return sorted(get_subclasses(provider_type), key=lambda c: (c.priority, c.__name__))
+
+
+def find_path_provider(provider_type, # type: t.Type[TPathProvider],
+ provider_classes, # type: t.List[t.Type[TPathProvider]]
+ path, # type: str
+ walk, # type: bool
+ ): # type: (...) -> TPathProvider
+ """Return the first found path provider of the given type for the given path."""
+ sequences = sorted(set(pc.sequence for pc in provider_classes if pc.sequence > 0))
+
+ for sequence in sequences:
+ candidate_path = path
+ tier_classes = [pc for pc in provider_classes if pc.sequence == sequence]
+
+ while True:
+ for provider_class in tier_classes:
+ if provider_class.is_content_root(candidate_path):
+ return provider_class(candidate_path)
+
+ if not walk:
+ break
+
+ parent_path = os.path.dirname(candidate_path)
+
+ if parent_path == candidate_path:
+ break
+
+ candidate_path = parent_path
+
+ raise ProviderNotFoundForPath(provider_type, path)
+
+
+class ProviderNotFoundForPath(ApplicationError):
+ """Exception generated when a path based provider cannot be found for a given path."""
+ def __init__(self, provider_type, path): # type: (t.Type, str) -> None
+ super(ProviderNotFoundForPath, self).__init__('No %s found for path: %s' % (provider_type.__name__, path))
+
+ self.provider_type = provider_type
+ self.path = path
+
+
+class PathProvider(ABC):
+ """Base class for provider plugins that are path based."""
+ sequence = 500
+ priority = 500
+
+ def __init__(self, root): # type: (str) -> None
+ self.root = root
+
+ @staticmethod
+ @abc.abstractmethod
+ def is_content_root(path): # type: (str) -> bool
+ """Return True if the given path is a content root for this provider."""
diff --git a/test/lib/ansible_test/_internal/provider/layout/__init__.py b/test/lib/ansible_test/_internal/provider/layout/__init__.py
new file mode 100644
index 00000000..03d596fc
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/layout/__init__.py
@@ -0,0 +1,232 @@
+"""Code for finding content."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import abc
+import collections
+import os
+
+from ... import types as t
+
+from ...util import (
+ ANSIBLE_SOURCE_ROOT,
+)
+
+from .. import (
+ PathProvider,
+)
+
+
+class Layout:
+ """Description of content locations and helper methods to access content."""
+ def __init__(self,
+ root, # type: str
+ paths, # type: t.List[str]
+ ): # type: (...) -> None
+ self.root = root
+
+ self.__paths = paths # contains both file paths and symlinked directory paths (ending with os.path.sep)
+ self.__files = [path for path in paths if not path.endswith(os.path.sep)] # contains only file paths
+ self.__paths_tree = paths_to_tree(self.__paths)
+ self.__files_tree = paths_to_tree(self.__files)
+
+ def all_files(self, include_symlinked_directories=False): # type: (bool) -> t.List[str]
+ """Return a list of all file paths."""
+ if include_symlinked_directories:
+ return self.__paths
+
+ return self.__files
+
+ def walk_files(self, directory, include_symlinked_directories=False): # type: (str, bool) -> t.List[str]
+ """Return a list of file paths found recursively under the given directory."""
+ if include_symlinked_directories:
+ tree = self.__paths_tree
+ else:
+ tree = self.__files_tree
+
+ parts = directory.rstrip(os.path.sep).split(os.path.sep)
+ item = get_tree_item(tree, parts)
+
+ if not item:
+ return []
+
+ directories = collections.deque(item[0].values())
+
+ files = list(item[1])
+
+ while directories:
+ item = directories.pop()
+ directories.extend(item[0].values())
+ files.extend(item[1])
+
+ return files
+
+ def get_dirs(self, directory): # type: (str) -> t.List[str]
+ """Return a list directory paths found directly under the given directory."""
+ parts = directory.rstrip(os.path.sep).split(os.path.sep)
+ item = get_tree_item(self.__files_tree, parts)
+ return [os.path.join(directory, key) for key in item[0].keys()] if item else []
+
+ def get_files(self, directory): # type: (str) -> t.List[str]
+ """Return a list of file paths found directly under the given directory."""
+ parts = directory.rstrip(os.path.sep).split(os.path.sep)
+ item = get_tree_item(self.__files_tree, parts)
+ return item[1] if item else []
+
+
+class ContentLayout(Layout):
+ """Information about the current Ansible content being tested."""
+ def __init__(self,
+ root, # type: str
+ paths, # type: t.List[str]
+ plugin_paths, # type: t.Dict[str, str]
+ collection, # type: t.Optional[CollectionDetail]
+ test_path, # type: str
+ results_path, # type: str
+ sanity_path, # type: str
+ sanity_messages, # type: t.Optional[LayoutMessages]
+ integration_path, # type: str
+ integration_targets_path, # type: str
+ integration_vars_path, # type: str
+ integration_messages, # type: t.Optional[LayoutMessages]
+ unit_path, # type: str
+ unit_module_path, # type: str
+ unit_module_utils_path, # type: str
+ unit_messages, # type: t.Optional[LayoutMessages]
+ ): # type: (...) -> None
+ super(ContentLayout, self).__init__(root, paths)
+
+ self.plugin_paths = plugin_paths
+ self.collection = collection
+ self.test_path = test_path
+ self.results_path = results_path
+ self.sanity_path = sanity_path
+ self.sanity_messages = sanity_messages
+ self.integration_path = integration_path
+ self.integration_targets_path = integration_targets_path
+ self.integration_vars_path = integration_vars_path
+ self.integration_messages = integration_messages
+ self.unit_path = unit_path
+ self.unit_module_path = unit_module_path
+ self.unit_module_utils_path = unit_module_utils_path
+ self.unit_messages = unit_messages
+
+ self.is_ansible = root == ANSIBLE_SOURCE_ROOT
+
+ @property
+ def prefix(self): # type: () -> str
+ """Return the collection prefix or an empty string if not a collection."""
+ if self.collection:
+ return self.collection.prefix
+
+ return ''
+
+ @property
+ def module_path(self): # type: () -> t.Optional[str]
+ """Return the path where modules are found, if any."""
+ return self.plugin_paths.get('modules')
+
+ @property
+ def module_utils_path(self): # type: () -> t.Optional[str]
+ """Return the path where module_utils are found, if any."""
+ return self.plugin_paths.get('module_utils')
+
+ @property
+ def module_utils_powershell_path(self): # type: () -> t.Optional[str]
+ """Return the path where powershell module_utils are found, if any."""
+ if self.is_ansible:
+ return os.path.join(self.plugin_paths['module_utils'], 'powershell')
+
+ return self.plugin_paths.get('module_utils')
+
+ @property
+ def module_utils_csharp_path(self): # type: () -> t.Optional[str]
+ """Return the path where csharp module_utils are found, if any."""
+ if self.is_ansible:
+ return os.path.join(self.plugin_paths['module_utils'], 'csharp')
+
+ return self.plugin_paths.get('module_utils')
+
+
+class LayoutMessages:
+ """Messages generated during layout creation that should be deferred for later display."""
+ def __init__(self):
+ self.info = [] # type: t.List[str]
+ self.warning = [] # type: t.List[str]
+ self.error = [] # type: t.List[str]
+
+
+class CollectionDetail:
+ """Details about the layout of the current collection."""
+ def __init__(self,
+ name, # type: str
+ namespace, # type: str
+ root, # type: str
+ ): # type: (...) -> None
+ self.name = name
+ self.namespace = namespace
+ self.root = root
+ self.full_name = '%s.%s' % (namespace, name)
+ self.prefix = '%s.' % self.full_name
+ self.directory = os.path.join('ansible_collections', namespace, name)
+
+
+class LayoutProvider(PathProvider):
+ """Base class for layout providers."""
+ PLUGIN_TYPES = (
+ 'action',
+ 'become',
+ 'cache',
+ 'callback',
+ 'cliconf',
+ 'connection',
+ 'doc_fragments',
+ 'filter',
+ 'httpapi',
+ 'inventory',
+ 'lookup',
+ 'module_utils',
+ 'modules',
+ 'netconf',
+ 'shell',
+ 'strategy',
+ 'terminal',
+ 'test',
+ 'vars',
+ )
+
+ @abc.abstractmethod
+ def create(self, root, paths): # type: (str, t.List[str]) -> ContentLayout
+ """Create a layout using the given root and paths."""
+
+
+def paths_to_tree(paths): # type: (t.List[str]) -> t.Tuple(t.Dict[str, t.Any], t.List[str])
+ """Return a filesystem tree from the given list of paths."""
+ tree = {}, []
+
+ for path in paths:
+ parts = path.split(os.path.sep)
+ root = tree
+
+ for part in parts[:-1]:
+ if part not in root[0]:
+ root[0][part] = {}, []
+
+ root = root[0][part]
+
+ root[1].append(path)
+
+ return tree
+
+
+def get_tree_item(tree, parts): # type: (t.Tuple(t.Dict[str, t.Any], t.List[str]), t.List[str]) -> t.Optional[t.Tuple(t.Dict[str, t.Any], t.List[str])]
+ """Return the portion of the tree found under the path given by parts, or None if it does not exist."""
+ root = tree
+
+ for part in parts:
+ root = root[0].get(part)
+
+ if not root:
+ return None
+
+ return root
diff --git a/test/lib/ansible_test/_internal/provider/layout/ansible.py b/test/lib/ansible_test/_internal/provider/layout/ansible.py
new file mode 100644
index 00000000..49ca482b
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/layout/ansible.py
@@ -0,0 +1,47 @@
+"""Layout provider for Ansible source."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ... import types as t
+
+from . import (
+ ContentLayout,
+ LayoutProvider,
+)
+
+
+class AnsibleLayout(LayoutProvider):
+ """Layout provider for Ansible source."""
+ @staticmethod
+ def is_content_root(path): # type: (str) -> bool
+ """Return True if the given path is a content root for this provider."""
+ return os.path.exists(os.path.join(path, 'setup.py')) and os.path.exists(os.path.join(path, 'bin/ansible-test'))
+
+ def create(self, root, paths): # type: (str, t.List[str]) -> ContentLayout
+ """Create a Layout using the given root and paths."""
+ plugin_paths = dict((p, os.path.join('lib/ansible/plugins', p)) for p in self.PLUGIN_TYPES)
+
+ plugin_paths.update(dict(
+ modules='lib/ansible/modules',
+ module_utils='lib/ansible/module_utils',
+ ))
+
+ return ContentLayout(root,
+ paths,
+ plugin_paths=plugin_paths,
+ collection=None,
+ test_path='test',
+ results_path='test/results',
+ sanity_path='test/sanity',
+ sanity_messages=None,
+ integration_path='test/integration',
+ integration_targets_path='test/integration/targets',
+ integration_vars_path='test/integration/integration_config.yml',
+ integration_messages=None,
+ unit_path='test/units',
+ unit_module_path='test/units/modules',
+ unit_module_utils_path='test/units/module_utils',
+ unit_messages=None,
+ )
diff --git a/test/lib/ansible_test/_internal/provider/layout/collection.py b/test/lib/ansible_test/_internal/provider/layout/collection.py
new file mode 100644
index 00000000..ffad29f2
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/layout/collection.py
@@ -0,0 +1,123 @@
+"""Layout provider for Ansible collections."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ... import types as t
+
+from . import (
+ ContentLayout,
+ LayoutProvider,
+ CollectionDetail,
+ LayoutMessages,
+)
+
+
+class CollectionLayout(LayoutProvider):
+ """Layout provider for Ansible collections."""
+ __module_path = 'plugins/modules'
+ __unit_path = 'test/unit'
+
+ @staticmethod
+ def is_content_root(path): # type: (str) -> bool
+ """Return True if the given path is a content root for this provider."""
+ if os.path.basename(os.path.dirname(os.path.dirname(path))) == 'ansible_collections':
+ return True
+
+ return False
+
+ def create(self, root, paths): # type: (str, t.List[str]) -> ContentLayout
+ """Create a Layout using the given root and paths."""
+ plugin_paths = dict((p, os.path.join('plugins', p)) for p in self.PLUGIN_TYPES)
+
+ collection_root = os.path.dirname(os.path.dirname(root))
+ collection_dir = os.path.relpath(root, collection_root)
+ collection_namespace, collection_name = collection_dir.split(os.path.sep)
+
+ collection_root = os.path.dirname(collection_root)
+
+ sanity_messages = LayoutMessages()
+ integration_messages = LayoutMessages()
+ unit_messages = LayoutMessages()
+
+ # these apply to all test commands
+ self.__check_test_path(paths, sanity_messages)
+ self.__check_test_path(paths, integration_messages)
+ self.__check_test_path(paths, unit_messages)
+
+ # these apply to specific test commands
+ integration_targets_path = self.__check_integration_path(paths, integration_messages)
+ self.__check_unit_path(paths, unit_messages)
+
+ return ContentLayout(root,
+ paths,
+ plugin_paths=plugin_paths,
+ collection=CollectionDetail(
+ name=collection_name,
+ namespace=collection_namespace,
+ root=collection_root,
+ ),
+ test_path='tests',
+ results_path='tests/output',
+ sanity_path='tests/sanity',
+ sanity_messages=sanity_messages,
+ integration_path='tests/integration',
+ integration_targets_path=integration_targets_path.rstrip(os.path.sep),
+ integration_vars_path='tests/integration/integration_config.yml',
+ integration_messages=integration_messages,
+ unit_path='tests/unit',
+ unit_module_path='tests/unit/plugins/modules',
+ unit_module_utils_path='tests/unit/plugins/module_utils',
+ unit_messages=unit_messages,
+ )
+
+ @staticmethod
+ def __check_test_path(paths, messages): # type: (t.List[str], LayoutMessages) -> None
+ modern_test_path = 'tests/'
+ modern_test_path_found = any(path.startswith(modern_test_path) for path in paths)
+ legacy_test_path = 'test/'
+ legacy_test_path_found = any(path.startswith(legacy_test_path) for path in paths)
+
+ if modern_test_path_found and legacy_test_path_found:
+ messages.warning.append('Ignoring tests in "%s" in favor of "%s".' % (legacy_test_path, modern_test_path))
+ elif legacy_test_path_found:
+ messages.warning.append('Ignoring tests in "%s" that should be in "%s".' % (legacy_test_path, modern_test_path))
+
+ @staticmethod
+ def __check_integration_path(paths, messages): # type: (t.List[str], LayoutMessages) -> str
+ modern_integration_path = 'roles/test/'
+ modern_integration_path_found = any(path.startswith(modern_integration_path) for path in paths)
+ legacy_integration_path = 'tests/integration/targets/'
+ legacy_integration_path_found = any(path.startswith(legacy_integration_path) for path in paths)
+
+ if modern_integration_path_found and legacy_integration_path_found:
+ messages.warning.append('Ignoring tests in "%s" in favor of "%s".' % (legacy_integration_path, modern_integration_path))
+ integration_targets_path = modern_integration_path
+ elif legacy_integration_path_found:
+ messages.info.append('Falling back to tests in "%s" because "%s" was not found.' % (legacy_integration_path, modern_integration_path))
+ integration_targets_path = legacy_integration_path
+ elif modern_integration_path_found:
+ messages.info.append('Loading tests from "%s".' % modern_integration_path)
+ integration_targets_path = modern_integration_path
+ else:
+ messages.error.append('Cannot run integration tests without "%s" or "%s".' % (modern_integration_path, legacy_integration_path))
+ integration_targets_path = modern_integration_path
+
+ return integration_targets_path
+
+ @staticmethod
+ def __check_unit_path(paths, messages): # type: (t.List[str], LayoutMessages) -> None
+ modern_unit_path = 'tests/unit/'
+ modern_unit_path_found = any(path.startswith(modern_unit_path) for path in paths)
+ legacy_unit_path = 'tests/units/' # test/units/ will be covered by the warnings for test/ vs tests/
+ legacy_unit_path_found = any(path.startswith(legacy_unit_path) for path in paths)
+
+ if modern_unit_path_found and legacy_unit_path_found:
+ messages.warning.append('Ignoring tests in "%s" in favor of "%s".' % (legacy_unit_path, modern_unit_path))
+ elif legacy_unit_path_found:
+ messages.warning.append('Rename "%s" to "%s" to run unit tests.' % (legacy_unit_path, modern_unit_path))
+ elif modern_unit_path_found:
+ pass # unit tests only run from one directory so no message is needed
+ else:
+ messages.error.append('Cannot run unit tests without "%s".' % modern_unit_path)
diff --git a/test/lib/ansible_test/_internal/provider/source/__init__.py b/test/lib/ansible_test/_internal/provider/source/__init__.py
new file mode 100644
index 00000000..fab28b09
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/source/__init__.py
@@ -0,0 +1,18 @@
+"""Common code for source providers."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import abc
+
+from ... import types as t
+
+from .. import (
+ PathProvider,
+)
+
+
+class SourceProvider(PathProvider):
+ """Base class for source providers."""
+ @abc.abstractmethod
+ def get_paths(self, path): # type: (str) -> t.List[str]
+ """Return the list of available content paths under the given path."""
diff --git a/test/lib/ansible_test/_internal/provider/source/git.py b/test/lib/ansible_test/_internal/provider/source/git.py
new file mode 100644
index 00000000..0bf81a1c
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/source/git.py
@@ -0,0 +1,72 @@
+"""Source provider for a content root managed by git version control."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ... import types as t
+
+from ...git import (
+ Git,
+)
+
+from ...encoding import (
+ to_bytes,
+)
+
+from ...util import (
+ SubprocessError,
+)
+
+from . import (
+ SourceProvider,
+)
+
+
+class GitSource(SourceProvider):
+ """Source provider for a content root managed by git version control."""
+ @staticmethod
+ def is_content_root(path): # type: (str) -> bool
+ """Return True if the given path is a content root for this provider."""
+ return os.path.exists(os.path.join(path, '.git'))
+
+ def get_paths(self, path): # type: (str) -> t.List[str]
+ """Return the list of available content paths under the given path."""
+ paths = self.__get_paths(path)
+
+ try:
+ submodule_paths = Git(path).get_submodule_paths()
+ except SubprocessError:
+ if path == self.root:
+ raise
+
+ # older versions of git require submodule commands to be executed from the top level of the working tree
+ # git version 2.18.1 (centos8) does not have this restriction
+ # git version 1.8.3.1 (centos7) does
+ # fall back to using the top level directory of the working tree only when needed
+ # this avoids penalizing newer git versions with a potentially slower analysis due to additional submodules
+ rel_path = os.path.relpath(path, self.root) + os.path.sep
+
+ submodule_paths = Git(self.root).get_submodule_paths()
+ submodule_paths = [os.path.relpath(p, rel_path) for p in submodule_paths if p.startswith(rel_path)]
+
+ for submodule_path in submodule_paths:
+ paths.extend(os.path.join(submodule_path, p) for p in self.__get_paths(os.path.join(path, submodule_path)))
+
+ # git reports submodule directories as regular files
+ paths = [p for p in paths if p not in submodule_paths]
+
+ return paths
+
+ @staticmethod
+ def __get_paths(path): # type: (str) -> t.List[str]
+ """Return the list of available content paths under the given path."""
+ git = Git(path)
+ paths = git.get_file_names(['--cached', '--others', '--exclude-standard'])
+ deleted_paths = git.get_file_names(['--deleted'])
+ paths = sorted(set(paths) - set(deleted_paths))
+
+ # directory symlinks are reported by git as regular files but they need to be treated as directories
+ paths = [path + os.path.sep if os.path.isdir(to_bytes(path)) else path for path in paths]
+
+ return paths
diff --git a/test/lib/ansible_test/_internal/provider/source/installed.py b/test/lib/ansible_test/_internal/provider/source/installed.py
new file mode 100644
index 00000000..d24a6e3d
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/source/installed.py
@@ -0,0 +1,43 @@
+"""Source provider for content which has been installed."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ... import types as t
+
+from . import (
+ SourceProvider,
+)
+
+
+class InstalledSource(SourceProvider):
+ """Source provider for content which has been installed."""
+ sequence = 0 # disable automatic detection
+
+ @staticmethod
+ def is_content_root(path): # type: (str) -> bool
+ """Return True if the given path is a content root for this provider."""
+ return False
+
+ def get_paths(self, path): # type: (str) -> t.List[str]
+ """Return the list of available content paths under the given path."""
+ paths = []
+
+ kill_extensions = (
+ '.pyc',
+ '.pyo',
+ )
+
+ for root, _dummy, file_names in os.walk(path):
+ rel_root = os.path.relpath(root, path)
+
+ if rel_root == '.':
+ rel_root = ''
+
+ paths.extend([os.path.join(rel_root, file_name) for file_name in file_names
+ if not os.path.splitext(file_name)[1] in kill_extensions])
+
+ # NOTE: directory symlinks are ignored as there should be no directory symlinks for an install
+
+ return paths
diff --git a/test/lib/ansible_test/_internal/provider/source/unversioned.py b/test/lib/ansible_test/_internal/provider/source/unversioned.py
new file mode 100644
index 00000000..09105789
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/source/unversioned.py
@@ -0,0 +1,87 @@
+"""Fallback source provider when no other provider matches the content root."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ... import types as t
+
+from ...constants import (
+ TIMEOUT_PATH,
+)
+
+from ...encoding import (
+ to_bytes,
+)
+
+from . import (
+ SourceProvider,
+)
+
+
+class UnversionedSource(SourceProvider):
+ """Fallback source provider when no other provider matches the content root."""
+ sequence = 0 # disable automatic detection
+
+ @staticmethod
+ def is_content_root(path): # type: (str) -> bool
+ """Return True if the given path is a content root for this provider."""
+ return False
+
+ def get_paths(self, path): # type: (str) -> t.List[str]
+ """Return the list of available content paths under the given path."""
+ paths = []
+
+ kill_any_dir = (
+ '.idea',
+ '.pytest_cache',
+ '__pycache__',
+ 'ansible.egg-info',
+ 'ansible_base.egg-info',
+ )
+
+ kill_sub_dir = {
+ 'test': (
+ 'results',
+ 'cache',
+ 'output',
+ ),
+ 'tests': (
+ 'output',
+ ),
+ 'docs/docsite': (
+ '_build',
+ ),
+ }
+
+ kill_sub_file = {
+ '': (
+ TIMEOUT_PATH,
+ ),
+ }
+
+ kill_extensions = (
+ '.pyc',
+ '.pyo',
+ '.retry',
+ )
+
+ for root, dir_names, file_names in os.walk(path):
+ rel_root = os.path.relpath(root, path)
+
+ if rel_root == '.':
+ rel_root = ''
+
+ for kill in kill_any_dir + kill_sub_dir.get(rel_root, ()):
+ if kill in dir_names:
+ dir_names.remove(kill)
+
+ kill_files = kill_sub_file.get(rel_root, ())
+
+ paths.extend([os.path.join(rel_root, file_name) for file_name in file_names
+ if not os.path.splitext(file_name)[1] in kill_extensions and file_name not in kill_files])
+
+ # include directory symlinks since they will not be traversed and would otherwise go undetected
+ paths.extend([os.path.join(rel_root, dir_name) + os.path.sep for dir_name in dir_names if os.path.islink(to_bytes(dir_name))])
+
+ return paths
diff --git a/test/lib/ansible_test/_internal/sanity/__init__.py b/test/lib/ansible_test/_internal/sanity/__init__.py
new file mode 100644
index 00000000..976bbb2f
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/__init__.py
@@ -0,0 +1,946 @@
+"""Execute Ansible sanity tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import abc
+import glob
+import os
+import re
+import collections
+
+from .. import types as t
+
+from ..io import (
+ read_json_file,
+)
+
+from ..util import (
+ ApplicationError,
+ SubprocessError,
+ display,
+ import_plugins,
+ load_plugins,
+ parse_to_list_of_dict,
+ ABC,
+ ANSIBLE_TEST_DATA_ROOT,
+ is_binary_file,
+ read_lines_without_comments,
+ get_available_python_versions,
+ find_python,
+ is_subdir,
+ paths_to_dirs,
+ get_ansible_version,
+ str_to_version,
+)
+
+from ..util_common import (
+ run_command,
+ intercept_command,
+ handle_layout_messages,
+)
+
+from ..ansible_util import (
+ ansible_environment,
+)
+
+from ..target import (
+ walk_internal_targets,
+ walk_sanity_targets,
+ TestTarget,
+)
+
+from ..executor import (
+ get_changes_filter,
+ AllTargetsSkipped,
+ Delegate,
+ install_command_requirements,
+ SUPPORTED_PYTHON_VERSIONS,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+from ..test import (
+ TestSuccess,
+ TestFailure,
+ TestSkipped,
+ TestMessage,
+ calculate_best_confidence,
+)
+
+from ..data import (
+ data_context,
+)
+
+COMMAND = 'sanity'
+SANITY_ROOT = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'sanity')
+
+
+def command_sanity(args):
+ """
+ :type args: SanityConfig
+ """
+ handle_layout_messages(data_context().content.sanity_messages)
+
+ changes = get_changes_filter(args)
+ require = args.require + changes
+ targets = SanityTargets.create(args.include, args.exclude, require)
+
+ if not targets.include:
+ raise AllTargetsSkipped()
+
+ if args.delegate:
+ raise Delegate(require=changes, exclude=args.exclude)
+
+ tests = sanity_get_tests()
+
+ if args.test:
+ tests = [target for target in tests if target.name in args.test]
+ else:
+ disabled = [target.name for target in tests if not target.enabled and not args.allow_disabled]
+ tests = [target for target in tests if target.enabled or args.allow_disabled]
+
+ if disabled:
+ display.warning('Skipping tests disabled by default without --allow-disabled: %s' % ', '.join(sorted(disabled)))
+
+ if args.skip_test:
+ tests = [target for target in tests if target.name not in args.skip_test]
+
+ total = 0
+ failed = []
+
+ for test in tests:
+ if args.list_tests:
+ display.info(test.name)
+ continue
+
+ available_versions = sorted(get_available_python_versions(SUPPORTED_PYTHON_VERSIONS).keys())
+
+ if args.python:
+ # specific version selected
+ versions = (args.python,)
+ elif isinstance(test, SanityMultipleVersion):
+ # try all supported versions for multi-version tests when a specific version has not been selected
+ versions = test.supported_python_versions
+ elif not test.supported_python_versions or args.python_version in test.supported_python_versions:
+ # the test works with any version or the version we're already running
+ versions = (args.python_version,)
+ else:
+ # available versions supported by the test
+ versions = tuple(sorted(set(available_versions) & set(test.supported_python_versions)))
+ # use the lowest available version supported by the test or the current version as a fallback (which will be skipped)
+ versions = versions[:1] or (args.python_version,)
+
+ for version in versions:
+ if isinstance(test, SanityMultipleVersion):
+ skip_version = version
+ else:
+ skip_version = None
+
+ options = ''
+
+ if test.supported_python_versions and version not in test.supported_python_versions:
+ display.warning("Skipping sanity test '%s' on unsupported Python %s." % (test.name, version))
+ result = SanitySkipped(test.name, skip_version)
+ elif not args.python and version not in available_versions:
+ display.warning("Skipping sanity test '%s' on Python %s due to missing interpreter." % (test.name, version))
+ result = SanitySkipped(test.name, skip_version)
+ else:
+ if test.supported_python_versions:
+ display.info("Running sanity test '%s' with Python %s" % (test.name, version))
+ else:
+ display.info("Running sanity test '%s'" % test.name)
+
+ if isinstance(test, SanityCodeSmellTest):
+ settings = test.load_processor(args)
+ elif isinstance(test, SanityMultipleVersion):
+ settings = test.load_processor(args, version)
+ elif isinstance(test, SanitySingleVersion):
+ settings = test.load_processor(args)
+ elif isinstance(test, SanityVersionNeutral):
+ settings = test.load_processor(args)
+ else:
+ raise Exception('Unsupported test type: %s' % type(test))
+
+ all_targets = targets.targets
+
+ if test.all_targets:
+ usable_targets = targets.targets
+ elif test.no_targets:
+ usable_targets = tuple()
+ else:
+ usable_targets = targets.include
+
+ all_targets = SanityTargets.filter_and_inject_targets(test, all_targets)
+ usable_targets = SanityTargets.filter_and_inject_targets(test, usable_targets)
+
+ usable_targets = sorted(test.filter_targets(list(usable_targets)))
+ usable_targets = settings.filter_skipped_targets(usable_targets)
+ sanity_targets = SanityTargets(tuple(all_targets), tuple(usable_targets))
+
+ if usable_targets or test.no_targets:
+ install_command_requirements(args, version, context=test.name, enable_pyyaml_check=True)
+
+ if isinstance(test, SanityCodeSmellTest):
+ result = test.test(args, sanity_targets, version)
+ elif isinstance(test, SanityMultipleVersion):
+ result = test.test(args, sanity_targets, version)
+ options = ' --python %s' % version
+ elif isinstance(test, SanitySingleVersion):
+ result = test.test(args, sanity_targets, version)
+ elif isinstance(test, SanityVersionNeutral):
+ result = test.test(args, sanity_targets)
+ else:
+ raise Exception('Unsupported test type: %s' % type(test))
+ else:
+ result = SanitySkipped(test.name, skip_version)
+
+ result.write(args)
+
+ total += 1
+
+ if isinstance(result, SanityFailure):
+ failed.append(result.test + options)
+
+ if failed:
+ message = 'The %d sanity test(s) listed below (out of %d) failed. See error output above for details.\n%s' % (
+ len(failed), total, '\n'.join(failed))
+
+ if args.failure_ok:
+ display.error(message)
+ else:
+ raise ApplicationError(message)
+
+
+def collect_code_smell_tests(): # type: () -> t.Tuple[SanityFunc, ...]
+ """Return a tuple of available code smell sanity tests."""
+ paths = glob.glob(os.path.join(SANITY_ROOT, 'code-smell', '*.py'))
+
+ if data_context().content.is_ansible:
+ # include Ansible specific code-smell tests which are not configured to be skipped
+ ansible_code_smell_root = os.path.join(data_context().content.root, 'test', 'sanity', 'code-smell')
+ skip_tests = read_lines_without_comments(os.path.join(ansible_code_smell_root, 'skip.txt'), remove_blank_lines=True, optional=True)
+ paths.extend(path for path in glob.glob(os.path.join(ansible_code_smell_root, '*.py')) if os.path.basename(path) not in skip_tests)
+
+ paths = sorted(p for p in paths if os.access(p, os.X_OK) and os.path.isfile(p))
+ tests = tuple(SanityCodeSmellTest(p) for p in paths)
+
+ return tests
+
+
+def sanity_get_tests():
+ """
+ :rtype: tuple[SanityFunc]
+ """
+ return SANITY_TESTS
+
+
+class SanityIgnoreParser:
+ """Parser for the consolidated sanity test ignore file."""
+ NO_CODE = '_'
+
+ def __init__(self, args): # type: (SanityConfig) -> None
+ if data_context().content.collection:
+ ansible_version = '%s.%s' % tuple(get_ansible_version().split('.')[:2])
+
+ ansible_label = 'Ansible %s' % ansible_version
+ file_name = 'ignore-%s.txt' % ansible_version
+ else:
+ ansible_label = 'Ansible'
+ file_name = 'ignore.txt'
+
+ self.args = args
+ self.relative_path = os.path.join(data_context().content.sanity_path, file_name)
+ self.path = os.path.join(data_context().content.root, self.relative_path)
+ self.ignores = collections.defaultdict(lambda: collections.defaultdict(dict)) # type: t.Dict[str, t.Dict[str, t.Dict[str, int]]]
+ self.skips = collections.defaultdict(lambda: collections.defaultdict(int)) # type: t.Dict[str, t.Dict[str, int]]
+ self.parse_errors = [] # type: t.List[t.Tuple[int, int, str]]
+ self.file_not_found_errors = [] # type: t.List[t.Tuple[int, str]]
+
+ lines = read_lines_without_comments(self.path, optional=True)
+ targets = SanityTargets.get_targets()
+ paths = set(target.path for target in targets)
+ tests_by_name = {} # type: t.Dict[str, SanityTest]
+ versioned_test_names = set() # type: t.Set[str]
+ unversioned_test_names = {} # type: t.Dict[str, str]
+ directories = paths_to_dirs(list(paths))
+ paths_by_test = {} # type: t.Dict[str, t.Set[str]]
+
+ display.info('Read %d sanity test ignore line(s) for %s from: %s' % (len(lines), ansible_label, self.relative_path), verbosity=1)
+
+ for test in sanity_get_tests():
+ test_targets = SanityTargets.filter_and_inject_targets(test, targets)
+
+ paths_by_test[test.name] = set(target.path for target in test.filter_targets(test_targets))
+
+ if isinstance(test, SanityMultipleVersion):
+ versioned_test_names.add(test.name)
+ tests_by_name.update(dict(('%s-%s' % (test.name, python_version), test) for python_version in test.supported_python_versions))
+ else:
+ unversioned_test_names.update(dict(('%s-%s' % (test.name, python_version), test.name) for python_version in SUPPORTED_PYTHON_VERSIONS))
+ tests_by_name[test.name] = test
+
+ for line_no, line in enumerate(lines, start=1):
+ if not line:
+ self.parse_errors.append((line_no, 1, "Line cannot be empty or contain only a comment"))
+ continue
+
+ parts = line.split(' ')
+ path = parts[0]
+ codes = parts[1:]
+
+ if not path:
+ self.parse_errors.append((line_no, 1, "Line cannot start with a space"))
+ continue
+
+ if path.endswith(os.path.sep):
+ if path not in directories:
+ self.file_not_found_errors.append((line_no, path))
+ continue
+ else:
+ if path not in paths:
+ self.file_not_found_errors.append((line_no, path))
+ continue
+
+ if not codes:
+ self.parse_errors.append((line_no, len(path), "Error code required after path"))
+ continue
+
+ code = codes[0]
+
+ if not code:
+ self.parse_errors.append((line_no, len(path) + 1, "Error code after path cannot be empty"))
+ continue
+
+ if len(codes) > 1:
+ self.parse_errors.append((line_no, len(path) + len(code) + 2, "Error code cannot contain spaces"))
+ continue
+
+ parts = code.split('!')
+ code = parts[0]
+ commands = parts[1:]
+
+ parts = code.split(':')
+ test_name = parts[0]
+ error_codes = parts[1:]
+
+ test = tests_by_name.get(test_name)
+
+ if not test:
+ unversioned_name = unversioned_test_names.get(test_name)
+
+ if unversioned_name:
+ self.parse_errors.append((line_no, len(path) + len(unversioned_name) + 2, "Sanity test '%s' cannot use a Python version like '%s'" % (
+ unversioned_name, test_name)))
+ elif test_name in versioned_test_names:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + 1, "Sanity test '%s' requires a Python version like '%s-%s'" % (
+ test_name, test_name, args.python_version)))
+ else:
+ self.parse_errors.append((line_no, len(path) + 2, "Sanity test '%s' does not exist" % test_name))
+
+ continue
+
+ if path.endswith(os.path.sep) and not test.include_directories:
+ self.parse_errors.append((line_no, 1, "Sanity test '%s' does not support directory paths" % test_name))
+ continue
+
+ if path not in paths_by_test[test.name] and not test.no_targets:
+ self.parse_errors.append((line_no, 1, "Sanity test '%s' does not test path '%s'" % (test_name, path)))
+ continue
+
+ if commands and error_codes:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Error code cannot contain both '!' and ':' characters"))
+ continue
+
+ if commands:
+ command = commands[0]
+
+ if len(commands) > 1:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + len(command) + 3, "Error code cannot contain multiple '!' characters"))
+ continue
+
+ if command == 'skip':
+ if not test.can_skip:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Sanity test '%s' cannot be skipped" % test_name))
+ continue
+
+ existing_line_no = self.skips.get(test_name, {}).get(path)
+
+ if existing_line_no:
+ self.parse_errors.append((line_no, 1, "Duplicate '%s' skip for path '%s' first found on line %d" % (test_name, path, existing_line_no)))
+ continue
+
+ self.skips[test_name][path] = line_no
+ continue
+
+ self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Command '!%s' not recognized" % command))
+ continue
+
+ if not test.can_ignore:
+ self.parse_errors.append((line_no, len(path) + 1, "Sanity test '%s' cannot be ignored" % test_name))
+ continue
+
+ if test.error_code:
+ if not error_codes:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + 1, "Sanity test '%s' requires an error code" % test_name))
+ continue
+
+ error_code = error_codes[0]
+
+ if len(error_codes) > 1:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + len(error_code) + 3, "Error code cannot contain multiple ':' characters"))
+ continue
+
+ if error_code in test.optional_error_codes:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + 3, "Optional error code '%s' cannot be ignored" % (
+ error_code)))
+ continue
+ else:
+ if error_codes:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Sanity test '%s' does not support error codes" % test_name))
+ continue
+
+ error_code = self.NO_CODE
+
+ existing = self.ignores.get(test_name, {}).get(path, {}).get(error_code)
+
+ if existing:
+ if test.error_code:
+ self.parse_errors.append((line_no, 1, "Duplicate '%s' ignore for error code '%s' for path '%s' first found on line %d" % (
+ test_name, error_code, path, existing)))
+ else:
+ self.parse_errors.append((line_no, 1, "Duplicate '%s' ignore for path '%s' first found on line %d" % (
+ test_name, path, existing)))
+
+ continue
+
+ self.ignores[test_name][path][error_code] = line_no
+
+ @staticmethod
+ def load(args): # type: (SanityConfig) -> SanityIgnoreParser
+ """Return the current SanityIgnore instance, initializing it if needed."""
+ try:
+ return SanityIgnoreParser.instance
+ except AttributeError:
+ pass
+
+ SanityIgnoreParser.instance = SanityIgnoreParser(args)
+ return SanityIgnoreParser.instance
+
+
+class SanityIgnoreProcessor:
+ """Processor for sanity test ignores for a single run of one sanity test."""
+ def __init__(self,
+ args, # type: SanityConfig
+ test, # type: SanityTest
+ python_version, # type: t.Optional[str]
+ ): # type: (...) -> None
+ name = test.name
+ code = test.error_code
+
+ if python_version:
+ full_name = '%s-%s' % (name, python_version)
+ else:
+ full_name = name
+
+ self.args = args
+ self.test = test
+ self.code = code
+ self.parser = SanityIgnoreParser.load(args)
+ self.ignore_entries = self.parser.ignores.get(full_name, {})
+ self.skip_entries = self.parser.skips.get(full_name, {})
+ self.used_line_numbers = set() # type: t.Set[int]
+
+ def filter_skipped_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given targets, with any skipped paths filtered out."""
+ return sorted(target for target in targets if target.path not in self.skip_entries)
+
+ def process_errors(self, errors, paths): # type: (t.List[SanityMessage], t.List[str]) -> t.List[SanityMessage]
+ """Return the given errors filtered for ignores and with any settings related errors included."""
+ errors = self.filter_messages(errors)
+ errors.extend(self.get_errors(paths))
+
+ errors = sorted(set(errors))
+
+ return errors
+
+ def filter_messages(self, messages): # type: (t.List[SanityMessage]) -> t.List[SanityMessage]
+ """Return a filtered list of the given messages using the entries that have been loaded."""
+ filtered = []
+
+ for message in messages:
+ if message.code in self.test.optional_error_codes and not self.args.enable_optional_errors:
+ continue
+
+ path_entry = self.ignore_entries.get(message.path)
+
+ if path_entry:
+ code = message.code if self.code else SanityIgnoreParser.NO_CODE
+ line_no = path_entry.get(code)
+
+ if line_no:
+ self.used_line_numbers.add(line_no)
+ continue
+
+ filtered.append(message)
+
+ return filtered
+
+ def get_errors(self, paths): # type: (t.List[str]) -> t.List[SanityMessage]
+ """Return error messages related to issues with the file."""
+ messages = []
+
+ # unused errors
+
+ unused = [] # type: t.List[t.Tuple[int, str, str]]
+
+ if self.test.no_targets or self.test.all_targets:
+ # tests which do not accept a target list, or which use all targets, always return all possible errors, so all ignores can be checked
+ targets = SanityTargets.get_targets()
+ test_targets = SanityTargets.filter_and_inject_targets(self.test, targets)
+ paths = [target.path for target in test_targets]
+
+ for path in paths:
+ path_entry = self.ignore_entries.get(path)
+
+ if not path_entry:
+ continue
+
+ unused.extend((line_no, path, code) for code, line_no in path_entry.items() if line_no not in self.used_line_numbers)
+
+ messages.extend(SanityMessage(
+ code=self.code,
+ message="Ignoring '%s' on '%s' is unnecessary" % (code, path) if self.code else "Ignoring '%s' is unnecessary" % path,
+ path=self.parser.relative_path,
+ line=line,
+ column=1,
+ confidence=calculate_best_confidence(((self.parser.path, line), (path, 0)), self.args.metadata) if self.args.metadata.changes else None,
+ ) for line, path, code in unused)
+
+ return messages
+
+
+class SanitySuccess(TestSuccess):
+ """Sanity test success."""
+ def __init__(self, test, python_version=None):
+ """
+ :type test: str
+ :type python_version: str
+ """
+ super(SanitySuccess, self).__init__(COMMAND, test, python_version)
+
+
+class SanitySkipped(TestSkipped):
+ """Sanity test skipped."""
+ def __init__(self, test, python_version=None):
+ """
+ :type test: str
+ :type python_version: str
+ """
+ super(SanitySkipped, self).__init__(COMMAND, test, python_version)
+
+
+class SanityFailure(TestFailure):
+ """Sanity test failure."""
+ def __init__(self, test, python_version=None, messages=None, summary=None):
+ """
+ :type test: str
+ :type python_version: str
+ :type messages: list[SanityMessage]
+ :type summary: unicode
+ """
+ super(SanityFailure, self).__init__(COMMAND, test, python_version, messages, summary)
+
+
+class SanityMessage(TestMessage):
+ """Single sanity test message for one file."""
+
+
+class SanityTargets:
+ """Sanity test target information."""
+ def __init__(self, targets, include): # type: (t.Tuple[TestTarget], t.Tuple[TestTarget]) -> None
+ self.targets = targets
+ self.include = include
+
+ @staticmethod
+ def create(include, exclude, require): # type: (t.List[str], t.List[str], t.List[str]) -> SanityTargets
+ """Create a SanityTargets instance from the given include, exclude and require lists."""
+ _targets = SanityTargets.get_targets()
+ _include = walk_internal_targets(_targets, include, exclude, require)
+ return SanityTargets(_targets, _include)
+
+ @staticmethod
+ def filter_and_inject_targets(test, targets): # type: (SanityTest, t.Iterable[TestTarget]) -> t.List[TestTarget]
+ """Filter and inject targets based on test requirements and the given target list."""
+ test_targets = list(targets)
+
+ if not test.include_symlinks:
+ # remove all symlinks unless supported by the test
+ test_targets = [target for target in test_targets if not target.symlink]
+
+ if not test.include_directories or not test.include_symlinks:
+ # exclude symlinked directories unless supported by the test
+ test_targets = [target for target in test_targets if not target.path.endswith(os.path.sep)]
+
+ if test.include_directories:
+ # include directories containing any of the included files
+ test_targets += tuple(TestTarget(path, None, None, '') for path in paths_to_dirs([target.path for target in test_targets]))
+
+ if not test.include_symlinks:
+ # remove all directory symlinks unless supported by the test
+ test_targets = [target for target in test_targets if not target.symlink]
+
+ return test_targets
+
+ @staticmethod
+ def get_targets(): # type: () -> t.Tuple[TestTarget, ...]
+ """Return a tuple of sanity test targets. Uses a cached version when available."""
+ try:
+ return SanityTargets.get_targets.targets
+ except AttributeError:
+ SanityTargets.get_targets.targets = tuple(sorted(walk_sanity_targets()))
+
+ return SanityTargets.get_targets.targets
+
+
+class SanityTest(ABC):
+ """Sanity test base class."""
+ __metaclass__ = abc.ABCMeta
+
+ ansible_only = False
+
+ def __init__(self, name):
+ self.name = name
+ self.enabled = True
+
+ # Optional error codes represent errors which spontaneously occur without changes to the content under test, such as those based on the current date.
+ # Because these errors can be unpredictable they behave differently than normal error codes:
+ # * They are not reported by default. The `--enable-optional-errors` option must be used to display these errors.
+ # * They cannot be ignored. This is done to maintain the integrity of the ignore system.
+ self.optional_error_codes = set()
+
+ @property
+ def error_code(self): # type: () -> t.Optional[str]
+ """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
+ return None
+
+ @property
+ def can_ignore(self): # type: () -> bool
+ """True if the test supports ignore entries."""
+ return True
+
+ @property
+ def can_skip(self): # type: () -> bool
+ """True if the test supports skip entries."""
+ return not self.all_targets and not self.no_targets
+
+ @property
+ def all_targets(self): # type: () -> bool
+ """True if test targets will not be filtered using includes, excludes, requires or changes. Mutually exclusive with no_targets."""
+ return False
+
+ @property
+ def no_targets(self): # type: () -> bool
+ """True if the test does not use test targets. Mutually exclusive with all_targets."""
+ return False
+
+ @property
+ def include_directories(self): # type: () -> bool
+ """True if the test targets should include directories."""
+ return False
+
+ @property
+ def include_symlinks(self): # type: () -> bool
+ """True if the test targets should include symlinks."""
+ return False
+
+ @property
+ def supported_python_versions(self): # type: () -> t.Optional[t.Tuple[str, ...]]
+ """A tuple of supported Python versions or None if the test does not depend on specific Python versions."""
+ return tuple(python_version for python_version in SUPPORTED_PYTHON_VERSIONS if python_version.startswith('3.'))
+
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget] # pylint: disable=unused-argument
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ if self.no_targets:
+ return []
+
+ raise NotImplementedError('Sanity test "%s" must implement "filter_targets" or set "no_targets" to True.' % self.name)
+
+
+class SanityCodeSmellTest(SanityTest):
+ """Sanity test script."""
+ def __init__(self, path):
+ name = os.path.splitext(os.path.basename(path))[0]
+ config_path = os.path.splitext(path)[0] + '.json'
+
+ super(SanityCodeSmellTest, self).__init__(name)
+
+ self.path = path
+ self.config_path = config_path if os.path.exists(config_path) else None
+ self.config = None
+
+ if self.config_path:
+ self.config = read_json_file(self.config_path)
+
+ if self.config:
+ self.enabled = not self.config.get('disabled')
+
+ self.output = self.config.get('output') # type: t.Optional[str]
+ self.extensions = self.config.get('extensions') # type: t.List[str]
+ self.prefixes = self.config.get('prefixes') # type: t.List[str]
+ self.files = self.config.get('files') # type: t.List[str]
+ self.text = self.config.get('text') # type: t.Optional[bool]
+ self.ignore_self = self.config.get('ignore_self') # type: bool
+ self.intercept = self.config.get('intercept') # type: bool
+ self.minimum_python_version = self.config.get('minimum_python_version') # type: t.Optional[str]
+
+ self.__all_targets = self.config.get('all_targets') # type: bool
+ self.__no_targets = self.config.get('no_targets') # type: bool
+ self.__include_directories = self.config.get('include_directories') # type: bool
+ self.__include_symlinks = self.config.get('include_symlinks') # type: bool
+ else:
+ self.output = None
+ self.extensions = []
+ self.prefixes = []
+ self.files = []
+ self.text = None # type: t.Optional[bool]
+ self.ignore_self = False
+ self.intercept = False
+ self.minimum_python_version = None # type: t.Optional[str]
+
+ self.__all_targets = False
+ self.__no_targets = True
+ self.__include_directories = False
+ self.__include_symlinks = False
+
+ if self.no_targets:
+ mutually_exclusive = (
+ 'extensions',
+ 'prefixes',
+ 'files',
+ 'text',
+ 'ignore_self',
+ 'all_targets',
+ 'include_directories',
+ 'include_symlinks',
+ )
+
+ problems = sorted(name for name in mutually_exclusive if getattr(self, name))
+
+ if problems:
+ raise ApplicationError('Sanity test "%s" option "no_targets" is mutually exclusive with options: %s' % (self.name, ', '.join(problems)))
+
+ @property
+ def all_targets(self): # type: () -> bool
+ """True if test targets will not be filtered using includes, excludes, requires or changes. Mutually exclusive with no_targets."""
+ return self.__all_targets
+
+ @property
+ def no_targets(self): # type: () -> bool
+ """True if the test does not use test targets. Mutually exclusive with all_targets."""
+ return self.__no_targets
+
+ @property
+ def include_directories(self): # type: () -> bool
+ """True if the test targets should include directories."""
+ return self.__include_directories
+
+ @property
+ def include_symlinks(self): # type: () -> bool
+ """True if the test targets should include symlinks."""
+ return self.__include_symlinks
+
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ if self.no_targets:
+ return []
+
+ if self.text is not None:
+ if self.text:
+ targets = [target for target in targets if not is_binary_file(target.path)]
+ else:
+ targets = [target for target in targets if is_binary_file(target.path)]
+
+ if self.extensions:
+ targets = [target for target in targets if os.path.splitext(target.path)[1] in self.extensions
+ or (is_subdir(target.path, 'bin') and '.py' in self.extensions)]
+
+ if self.prefixes:
+ targets = [target for target in targets if any(target.path.startswith(pre) for pre in self.prefixes)]
+
+ if self.files:
+ targets = [target for target in targets if os.path.basename(target.path) in self.files]
+
+ if self.ignore_self and data_context().content.is_ansible:
+ relative_self_path = os.path.relpath(self.path, data_context().content.root)
+ targets = [target for target in targets if target.path != relative_self_path]
+
+ return targets
+
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+ if self.minimum_python_version:
+ if str_to_version(python_version) < str_to_version(self.minimum_python_version):
+ display.warning("Skipping sanity test '%s' on unsupported Python %s; requires Python %s or newer." % (
+ self.name, python_version, self.minimum_python_version))
+ return SanitySkipped(self.name, 'Test requires Python %s or newer' % (self.minimum_python_version, ))
+
+ cmd = [find_python(python_version), self.path]
+
+ env = ansible_environment(args, color=False)
+
+ pattern = None
+ data = None
+
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ if self.config:
+ if self.output == 'path-line-column-message':
+ pattern = '^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<message>.*)$'
+ elif self.output == 'path-message':
+ pattern = '^(?P<path>[^:]*): (?P<message>.*)$'
+ else:
+ pattern = ApplicationError('Unsupported output type: %s' % self.output)
+
+ if not self.no_targets:
+ data = '\n'.join(paths)
+
+ if data:
+ display.info(data, verbosity=4)
+
+ try:
+ if self.intercept:
+ stdout, stderr = intercept_command(args, cmd, target_name='sanity.%s' % self.name, data=data, env=env, capture=True, disable_coverage=True)
+ else:
+ stdout, stderr = run_command(args, cmd, data=data, env=env, capture=True)
+
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ if stdout and not stderr:
+ if pattern:
+ matches = parse_to_list_of_dict(pattern, stdout)
+
+ messages = [SanityMessage(
+ message=m['message'],
+ path=m['path'],
+ line=int(m.get('line', 0)),
+ column=int(m.get('column', 0)),
+ ) for m in matches]
+
+ messages = settings.process_errors(messages, paths)
+
+ if not messages:
+ return SanitySuccess(self.name)
+
+ return SanityFailure(self.name, messages=messages)
+
+ if stderr or status:
+ summary = u'%s' % SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+ return SanityFailure(self.name, summary=summary)
+
+ messages = settings.process_errors([], paths)
+
+ if messages:
+ return SanityFailure(self.name, messages=messages)
+
+ return SanitySuccess(self.name)
+
+ def load_processor(self, args): # type: (SanityConfig) -> SanityIgnoreProcessor
+ """Load the ignore processor for this sanity test."""
+ return SanityIgnoreProcessor(args, self, None)
+
+
+class SanityFunc(SanityTest):
+ """Base class for sanity test plugins."""
+ def __init__(self):
+ name = self.__class__.__name__
+ name = re.sub(r'Test$', '', name) # drop Test suffix
+ name = re.sub(r'(.)([A-Z][a-z]+)', r'\1-\2', name).lower() # use dashes instead of capitalization
+
+ super(SanityFunc, self).__init__(name)
+
+
+class SanityVersionNeutral(SanityFunc):
+ """Base class for sanity test plugins which are idependent of the python version being used."""
+ @abc.abstractmethod
+ def test(self, args, targets):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :rtype: TestResult
+ """
+
+ def load_processor(self, args): # type: (SanityConfig) -> SanityIgnoreProcessor
+ """Load the ignore processor for this sanity test."""
+ return SanityIgnoreProcessor(args, self, None)
+
+ @property
+ def supported_python_versions(self): # type: () -> t.Optional[t.Tuple[str, ...]]
+ """A tuple of supported Python versions or None if the test does not depend on specific Python versions."""
+ return None
+
+
+class SanitySingleVersion(SanityFunc):
+ """Base class for sanity test plugins which should run on a single python version."""
+ @abc.abstractmethod
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+
+ def load_processor(self, args): # type: (SanityConfig) -> SanityIgnoreProcessor
+ """Load the ignore processor for this sanity test."""
+ return SanityIgnoreProcessor(args, self, None)
+
+
+class SanityMultipleVersion(SanityFunc):
+ """Base class for sanity test plugins which should run on multiple python versions."""
+ @abc.abstractmethod
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+
+ def load_processor(self, args, python_version): # type: (SanityConfig, str) -> SanityIgnoreProcessor
+ """Load the ignore processor for this sanity test."""
+ return SanityIgnoreProcessor(args, self, python_version)
+
+ @property
+ def supported_python_versions(self): # type: () -> t.Optional[t.Tuple[str, ...]]
+ """A tuple of supported Python versions or None if the test does not depend on specific Python versions."""
+ return SUPPORTED_PYTHON_VERSIONS
+
+
+SANITY_TESTS = (
+)
+
+
+def sanity_init():
+ """Initialize full sanity test list (includes code-smell scripts determined at runtime)."""
+ import_plugins('sanity')
+ sanity_plugins = {} # type: t.Dict[str, t.Type[SanityFunc]]
+ load_plugins(SanityFunc, sanity_plugins)
+ sanity_tests = tuple([plugin() for plugin in sanity_plugins.values() if data_context().content.is_ansible or not plugin.ansible_only])
+ global SANITY_TESTS # pylint: disable=locally-disabled, global-statement
+ SANITY_TESTS = tuple(sorted(sanity_tests + collect_code_smell_tests(), key=lambda k: k.name))
diff --git a/test/lib/ansible_test/_internal/sanity/ansible_doc.py b/test/lib/ansible_test/_internal/sanity/ansible_doc.py
new file mode 100644
index 00000000..c6b997cf
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/ansible_doc.py
@@ -0,0 +1,144 @@
+"""Sanity test for ansible-doc."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import collections
+import os
+import re
+
+from .. import types as t
+
+from ..sanity import (
+ SanitySingleVersion,
+ SanityFailure,
+ SanitySuccess,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..util import (
+ SubprocessError,
+ display,
+ is_subdir,
+)
+
+from ..util_common import (
+ intercept_command,
+)
+
+from ..ansible_util import (
+ ansible_environment,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+from ..data import (
+ data_context,
+)
+
+from ..coverage_util import (
+ coverage_context,
+)
+
+
+class AnsibleDocTest(SanitySingleVersion):
+ """Sanity test for ansible-doc."""
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ # This should use documentable plugins from constants instead
+ unsupported_plugin_types = set([
+ # not supported by ansible-doc
+ 'action',
+ 'doc_fragments',
+ 'filter',
+ 'module_utils',
+ 'terminal',
+ 'test',
+ ])
+
+ plugin_paths = [plugin_path for plugin_type, plugin_path in data_context().content.plugin_paths.items() if plugin_type not in unsupported_plugin_types]
+
+ return [target for target in targets
+ if os.path.splitext(target.path)[1] == '.py'
+ and os.path.basename(target.path) != '__init__.py'
+ and any(is_subdir(target.path, path) for path in plugin_paths)
+ ]
+
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ doc_targets = collections.defaultdict(list)
+ target_paths = collections.defaultdict(dict)
+
+ remap_types = dict(
+ modules='module',
+ )
+
+ for plugin_type, plugin_path in data_context().content.plugin_paths.items():
+ plugin_type = remap_types.get(plugin_type, plugin_type)
+
+ for plugin_file_path in [target.name for target in targets.include if is_subdir(target.path, plugin_path)]:
+ plugin_name = os.path.splitext(os.path.basename(plugin_file_path))[0]
+
+ if plugin_name.startswith('_'):
+ plugin_name = plugin_name[1:]
+
+ doc_targets[plugin_type].append(data_context().content.prefix + plugin_name)
+ target_paths[plugin_type][data_context().content.prefix + plugin_name] = plugin_file_path
+
+ env = ansible_environment(args, color=False)
+ error_messages = []
+
+ for doc_type in sorted(doc_targets):
+ for format_option in [None, '--json']:
+ cmd = ['ansible-doc', '-t', doc_type]
+ if format_option is not None:
+ cmd.append(format_option)
+ cmd.extend(sorted(doc_targets[doc_type]))
+
+ try:
+ with coverage_context(args):
+ stdout, stderr = intercept_command(args, cmd, target_name='ansible-doc', env=env, capture=True, python_version=python_version)
+
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if status:
+ summary = u'%s' % SubprocessError(cmd=cmd, status=status, stderr=stderr)
+ return SanityFailure(self.name, summary=summary)
+
+ if stdout:
+ display.info(stdout.strip(), verbosity=3)
+
+ if stderr:
+ # ignore removed module/plugin warnings
+ stderr = re.sub(r'\[WARNING\]: [^ ]+ [^ ]+ has been removed\n', '', stderr).strip()
+
+ if stderr:
+ summary = u'Output on stderr from ansible-doc is considered an error.\n\n%s' % SubprocessError(cmd, stderr=stderr)
+ return SanityFailure(self.name, summary=summary)
+
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ error_messages = settings.process_errors(error_messages, paths)
+
+ if error_messages:
+ return SanityFailure(self.name, messages=error_messages)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/sanity/bin_symlinks.py b/test/lib/ansible_test/_internal/sanity/bin_symlinks.py
new file mode 100644
index 00000000..bd0ba58e
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/bin_symlinks.py
@@ -0,0 +1,110 @@
+"""Sanity test for symlinks in the bin directory."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from .. import types as t
+
+from ..sanity import (
+ SanityVersionNeutral,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+from ..data import (
+ data_context,
+)
+
+from ..payload import (
+ ANSIBLE_BIN_SYMLINK_MAP,
+ __file__ as symlink_map_full_path,
+)
+
+from ..util import (
+ ANSIBLE_BIN_PATH,
+ ANSIBLE_TEST_DATA_ROOT,
+)
+
+
+class BinSymlinksTest(SanityVersionNeutral):
+ """Sanity test for symlinks in the bin directory."""
+ ansible_only = True
+
+ @property
+ def can_ignore(self): # type: () -> bool
+ """True if the test supports ignore entries."""
+ return False
+
+ @property
+ def no_targets(self): # type: () -> bool
+ """True if the test does not use test targets. Mutually exclusive with all_targets."""
+ return True
+
+ # noinspection PyUnusedLocal
+ def test(self, args, targets): # pylint: disable=locally-disabled, unused-argument
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :rtype: TestResult
+ """
+ bin_root = ANSIBLE_BIN_PATH
+ bin_names = os.listdir(bin_root)
+ bin_paths = sorted(os.path.join(bin_root, path) for path in bin_names)
+
+ injector_root = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'injector')
+ injector_names = os.listdir(injector_root)
+
+ errors = [] # type: t.List[t.Tuple[str, str]]
+
+ symlink_map_path = os.path.relpath(symlink_map_full_path, data_context().content.root)
+
+ for bin_path in bin_paths:
+ if not os.path.islink(bin_path):
+ errors.append((bin_path, 'not a symbolic link'))
+ continue
+
+ dest = os.readlink(bin_path)
+
+ if not os.path.exists(bin_path):
+ errors.append((bin_path, 'points to non-existent path "%s"' % dest))
+ continue
+
+ if not os.path.isfile(bin_path):
+ errors.append((bin_path, 'points to non-file "%s"' % dest))
+ continue
+
+ map_dest = ANSIBLE_BIN_SYMLINK_MAP.get(os.path.basename(bin_path))
+
+ if not map_dest:
+ errors.append((bin_path, 'missing from ANSIBLE_BIN_SYMLINK_MAP in file "%s"' % symlink_map_path))
+ continue
+
+ if dest != map_dest:
+ errors.append((bin_path, 'points to "%s" instead of "%s" from ANSIBLE_BIN_SYMLINK_MAP in file "%s"' % (dest, map_dest, symlink_map_path)))
+ continue
+
+ if not os.access(bin_path, os.X_OK):
+ errors.append((bin_path, 'points to non-executable file "%s"' % dest))
+ continue
+
+ for bin_name, dest in ANSIBLE_BIN_SYMLINK_MAP.items():
+ if bin_name not in bin_names:
+ bin_path = os.path.join(bin_root, bin_name)
+ errors.append((bin_path, 'missing symlink to "%s" defined in ANSIBLE_BIN_SYMLINK_MAP in file "%s"' % (dest, symlink_map_path)))
+
+ if bin_name not in injector_names:
+ injector_path = os.path.join(injector_root, bin_name)
+ errors.append((injector_path, 'missing symlink to "python.py"'))
+
+ messages = [SanityMessage(message=message, path=os.path.relpath(path, data_context().content.root), confidence=100) for path, message in errors]
+
+ if errors:
+ return SanityFailure(self.name, messages=messages)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/sanity/compile.py b/test/lib/ansible_test/_internal/sanity/compile.py
new file mode 100644
index 00000000..5a517272
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/compile.py
@@ -0,0 +1,92 @@
+"""Sanity test for proper python syntax."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from .. import types as t
+
+from ..sanity import (
+ SanityMultipleVersion,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SanityTargets,
+ SANITY_ROOT,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..util import (
+ SubprocessError,
+ display,
+ find_python,
+ parse_to_list_of_dict,
+ is_subdir,
+)
+
+from ..util_common import (
+ run_command,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+
+class CompileTest(SanityMultipleVersion):
+ """Sanity test for proper python syntax."""
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ return [target for target in targets if os.path.splitext(target.path)[1] == '.py' or is_subdir(target.path, 'bin')]
+
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+ settings = self.load_processor(args, python_version)
+
+ paths = [target.path for target in targets.include]
+
+ cmd = [find_python(python_version), os.path.join(SANITY_ROOT, 'compile', 'compile.py')]
+
+ data = '\n'.join(paths)
+
+ display.info(data, verbosity=4)
+
+ try:
+ stdout, stderr = run_command(args, cmd, data=data, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if stderr:
+ raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+
+ if args.explain:
+ return SanitySuccess(self.name, python_version=python_version)
+
+ pattern = r'^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<message>.*)$'
+
+ results = parse_to_list_of_dict(pattern, stdout)
+
+ results = [SanityMessage(
+ message=r['message'],
+ path=r['path'].replace('./', ''),
+ line=int(r['line']),
+ column=int(r['column']),
+ ) for r in results]
+
+ results = settings.process_errors(results, paths)
+
+ if results:
+ return SanityFailure(self.name, messages=results, python_version=python_version)
+
+ return SanitySuccess(self.name, python_version=python_version)
diff --git a/test/lib/ansible_test/_internal/sanity/ignores.py b/test/lib/ansible_test/_internal/sanity/ignores.py
new file mode 100644
index 00000000..8b6df50c
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/ignores.py
@@ -0,0 +1,89 @@
+"""Sanity test for the sanity ignore file."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ..sanity import (
+ SanityFailure,
+ SanityIgnoreParser,
+ SanityVersionNeutral,
+ SanitySuccess,
+ SanityMessage,
+)
+
+from ..test import (
+ calculate_confidence,
+ calculate_best_confidence,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+
+class IgnoresTest(SanityVersionNeutral):
+ """Sanity test for sanity test ignore entries."""
+ @property
+ def can_ignore(self): # type: () -> bool
+ """True if the test supports ignore entries."""
+ return False
+
+ @property
+ def no_targets(self): # type: () -> bool
+ """True if the test does not use test targets. Mutually exclusive with all_targets."""
+ return True
+
+ # noinspection PyUnusedLocal
+ def test(self, args, targets): # pylint: disable=locally-disabled, unused-argument
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :rtype: TestResult
+ """
+ sanity_ignore = SanityIgnoreParser.load(args)
+
+ messages = []
+
+ # parse errors
+
+ messages.extend(SanityMessage(
+ message=message,
+ path=sanity_ignore.relative_path,
+ line=line,
+ column=column,
+ confidence=calculate_confidence(sanity_ignore.path, line, args.metadata) if args.metadata.changes else None,
+ ) for line, column, message in sanity_ignore.parse_errors)
+
+ # file not found errors
+
+ messages.extend(SanityMessage(
+ message="%s '%s' does not exist" % ("Directory" if path.endswith(os.path.sep) else "File", path),
+ path=sanity_ignore.relative_path,
+ line=line,
+ column=1,
+ confidence=calculate_best_confidence(((sanity_ignore.path, line), (path, 0)), args.metadata) if args.metadata.changes else None,
+ ) for line, path in sanity_ignore.file_not_found_errors)
+
+ # conflicting ignores and skips
+
+ for test_name, ignores in sanity_ignore.ignores.items():
+ for ignore_path, ignore_entry in ignores.items():
+ skip_line_no = sanity_ignore.skips.get(test_name, {}).get(ignore_path)
+
+ if not skip_line_no:
+ continue
+
+ for ignore_line_no in ignore_entry.values():
+ messages.append(SanityMessage(
+ message="Ignoring '%s' is unnecessary due to skip entry on line %d" % (ignore_path, skip_line_no),
+ path=sanity_ignore.relative_path,
+ line=ignore_line_no,
+ column=1,
+ confidence=calculate_confidence(sanity_ignore.path, ignore_line_no, args.metadata) if args.metadata.changes else None,
+ ))
+
+ if messages:
+ return SanityFailure(self.name, messages=messages)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/sanity/import.py b/test/lib/ansible_test/_internal/sanity/import.py
new file mode 100644
index 00000000..7d4776ae
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/import.py
@@ -0,0 +1,184 @@
+"""Sanity test for proper import exception handling."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from .. import types as t
+
+from ..sanity import (
+ SanityMultipleVersion,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SanitySkipped,
+ SANITY_ROOT,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..util import (
+ SubprocessError,
+ remove_tree,
+ display,
+ parse_to_list_of_dict,
+ is_subdir,
+ generate_pip_command,
+ find_python,
+)
+
+from ..util_common import (
+ intercept_command,
+ run_command,
+ ResultType,
+)
+
+from ..ansible_util import (
+ ansible_environment,
+)
+
+from ..executor import (
+ generate_pip_install,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+from ..coverage_util import (
+ coverage_context,
+)
+
+from ..venv import (
+ create_virtual_environment,
+)
+
+from ..data import (
+ data_context,
+)
+
+
+class ImportTest(SanityMultipleVersion):
+ """Sanity test for proper import exception handling."""
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ return [target for target in targets if os.path.splitext(target.path)[1] == '.py' and
+ (is_subdir(target.path, data_context().content.module_path) or is_subdir(target.path, data_context().content.module_utils_path))]
+
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+ capture_pip = args.verbosity < 2
+
+ python = find_python(python_version)
+
+ if python_version.startswith('2.') and args.requirements:
+ # hack to make sure that virtualenv is available under Python 2.x
+ # on Python 3.x we can use the built-in venv
+ pip = generate_pip_command(python)
+ run_command(args, generate_pip_install(pip, '', packages=['virtualenv']), capture=capture_pip)
+
+ settings = self.load_processor(args, python_version)
+
+ paths = [target.path for target in targets.include]
+
+ env = ansible_environment(args, color=False)
+
+ temp_root = os.path.join(ResultType.TMP.path, 'sanity', 'import')
+
+ # create a clean virtual environment to minimize the available imports beyond the python standard library
+ virtual_environment_path = os.path.join(temp_root, 'minimal-py%s' % python_version.replace('.', ''))
+ virtual_environment_bin = os.path.join(virtual_environment_path, 'bin')
+
+ remove_tree(virtual_environment_path)
+
+ if not create_virtual_environment(args, python_version, virtual_environment_path):
+ display.warning("Skipping sanity test '%s' on Python %s due to missing virtual environment support." % (self.name, python_version))
+ return SanitySkipped(self.name, python_version)
+
+ # add the importer to our virtual environment so it can be accessed through the coverage injector
+ importer_path = os.path.join(virtual_environment_bin, 'importer.py')
+ yaml_to_json_path = os.path.join(virtual_environment_bin, 'yaml_to_json.py')
+ if not args.explain:
+ os.symlink(os.path.abspath(os.path.join(SANITY_ROOT, 'import', 'importer.py')), importer_path)
+ os.symlink(os.path.abspath(os.path.join(SANITY_ROOT, 'import', 'yaml_to_json.py')), yaml_to_json_path)
+
+ # activate the virtual environment
+ env['PATH'] = '%s:%s' % (virtual_environment_bin, env['PATH'])
+
+ env.update(
+ SANITY_TEMP_PATH=ResultType.TMP.path,
+ )
+
+ if data_context().content.collection:
+ env.update(
+ SANITY_COLLECTION_FULL_NAME=data_context().content.collection.full_name,
+ SANITY_EXTERNAL_PYTHON=python,
+ )
+
+ virtualenv_python = os.path.join(virtual_environment_bin, 'python')
+ virtualenv_pip = generate_pip_command(virtualenv_python)
+
+ # make sure coverage is available in the virtual environment if needed
+ if args.coverage:
+ run_command(args, generate_pip_install(virtualenv_pip, '', packages=['setuptools']), env=env, capture=capture_pip)
+ run_command(args, generate_pip_install(virtualenv_pip, '', packages=['coverage']), env=env, capture=capture_pip)
+
+ try:
+ # In some environments pkg_resources is installed as a separate pip package which needs to be removed.
+ # For example, using Python 3.8 on Ubuntu 18.04 a virtualenv is created with only pip and setuptools.
+ # However, a venv is created with an additional pkg-resources package which is independent of setuptools.
+ # Making sure pkg-resources is removed preserves the import test consistency between venv and virtualenv.
+ # Additionally, in the above example, the pyparsing package vendored with pkg-resources is out-of-date and generates deprecation warnings.
+ # Thus it is important to remove pkg-resources to prevent system installed packages from generating deprecation warnings.
+ run_command(args, virtualenv_pip + ['uninstall', '--disable-pip-version-check', '-y', 'pkg-resources'], env=env, capture=capture_pip)
+ except SubprocessError:
+ pass
+
+ run_command(args, virtualenv_pip + ['uninstall', '--disable-pip-version-check', '-y', 'setuptools'], env=env, capture=capture_pip)
+ run_command(args, virtualenv_pip + ['uninstall', '--disable-pip-version-check', '-y', 'pip'], env=env, capture=capture_pip)
+
+ cmd = ['importer.py']
+
+ data = '\n'.join(paths)
+
+ display.info(data, verbosity=4)
+
+ results = []
+
+ try:
+ with coverage_context(args):
+ stdout, stderr = intercept_command(args, cmd, self.name, env, capture=True, data=data, python_version=python_version,
+ virtualenv=virtualenv_python)
+
+ if stdout or stderr:
+ raise SubprocessError(cmd, stdout=stdout, stderr=stderr)
+ except SubprocessError as ex:
+ if ex.status != 10 or ex.stderr or not ex.stdout:
+ raise
+
+ pattern = r'^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<message>.*)$'
+
+ results = parse_to_list_of_dict(pattern, ex.stdout)
+
+ relative_temp_root = os.path.relpath(temp_root, data_context().content.root) + os.path.sep
+
+ results = [SanityMessage(
+ message=r['message'],
+ path=os.path.relpath(r['path'], relative_temp_root) if r['path'].startswith(relative_temp_root) else r['path'],
+ line=int(r['line']),
+ column=int(r['column']),
+ ) for r in results]
+
+ results = settings.process_errors(results, paths)
+
+ if results:
+ return SanityFailure(self.name, messages=results, python_version=python_version)
+
+ return SanitySuccess(self.name, python_version=python_version)
diff --git a/test/lib/ansible_test/_internal/sanity/integration_aliases.py b/test/lib/ansible_test/_internal/sanity/integration_aliases.py
new file mode 100644
index 00000000..e21c093a
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/integration_aliases.py
@@ -0,0 +1,399 @@
+"""Sanity test to check integration test aliases."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import textwrap
+import os
+
+from .. import types as t
+
+from ..sanity import (
+ SanityVersionNeutral,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SanityTargets,
+ SANITY_ROOT,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+from ..target import (
+ filter_targets,
+ walk_posix_integration_targets,
+ walk_windows_integration_targets,
+ walk_integration_targets,
+ walk_module_targets,
+)
+
+from ..cloud import (
+ get_cloud_platforms,
+)
+
+from ..io import (
+ read_text_file,
+)
+
+from ..util import (
+ display,
+ find_python,
+ raw_command,
+)
+
+from ..util_common import (
+ write_json_test_results,
+ ResultType,
+)
+
+
+class IntegrationAliasesTest(SanityVersionNeutral):
+ """Sanity test to evaluate integration test aliases."""
+ CI_YML = '.azure-pipelines/azure-pipelines.yml'
+ TEST_ALIAS_PREFIX = 'shippable' # this will be changed at some point in the future
+
+ DISABLED = 'disabled/'
+ UNSTABLE = 'unstable/'
+ UNSUPPORTED = 'unsupported/'
+
+ EXPLAIN_URL = 'https://docs.ansible.com/ansible/devel/dev_guide/testing/sanity/integration-aliases.html'
+
+ TEMPLATE_DISABLED = """
+ The following integration tests are **disabled** [[explain]({explain_url}#disabled)]:
+
+ {tests}
+
+ Consider fixing the integration tests before or alongside changes.
+ """
+
+ TEMPLATE_UNSTABLE = """
+ The following integration tests are **unstable** [[explain]({explain_url}#unstable)]:
+
+ {tests}
+
+ Tests may need to be restarted due to failures unrelated to changes.
+ """
+
+ TEMPLATE_UNSUPPORTED = """
+ The following integration tests are **unsupported** [[explain]({explain_url}#unsupported)]:
+
+ {tests}
+
+ Consider running the tests manually or extending test infrastructure to add support.
+ """
+
+ TEMPLATE_UNTESTED = """
+ The following modules have **no integration tests** [[explain]({explain_url}#untested)]:
+
+ {tests}
+
+ Consider adding integration tests before or alongside changes.
+ """
+
+ ansible_only = True
+
+ def __init__(self):
+ super(IntegrationAliasesTest, self).__init__()
+
+ self._ci_config = {} # type: t.Dict[str, t.Any]
+ self._ci_test_groups = {} # type: t.Dict[str, t.List[int]]
+
+ @property
+ def can_ignore(self): # type: () -> bool
+ """True if the test supports ignore entries."""
+ return False
+
+ @property
+ def no_targets(self): # type: () -> bool
+ """True if the test does not use test targets. Mutually exclusive with all_targets."""
+ return True
+
+ def load_ci_config(self, args): # type: (SanityConfig) -> t.Dict[str, t.Any]
+ """Load and return the CI YAML configuration."""
+ if not self._ci_config:
+ self._ci_config = self.load_yaml(args, self.CI_YML)
+
+ return self._ci_config
+
+ @property
+ def ci_test_groups(self): # type: () -> t.Dict[str, t.List[int]]
+ """Return a dictionary of CI test names and their group(s)."""
+ if not self._ci_test_groups:
+ test_groups = {}
+
+ for stage in self._ci_config['stages']:
+ for job in stage['jobs']:
+ if job.get('template') != 'templates/matrix.yml':
+ continue
+
+ parameters = job['parameters']
+
+ groups = parameters.get('groups', [])
+ test_format = parameters.get('testFormat', '{0}')
+ test_group_format = parameters.get('groupFormat', '{0}/{{1}}')
+
+ for target in parameters['targets']:
+ test = target.get('test') or target.get('name')
+
+ if groups:
+ tests_formatted = [test_group_format.format(test_format).format(test, group) for group in groups]
+ else:
+ tests_formatted = [test_format.format(test)]
+
+ for test_formatted in tests_formatted:
+ parts = test_formatted.split('/')
+ key = parts[0]
+
+ if key in ('sanity', 'units'):
+ continue
+
+ try:
+ group = int(parts[-1])
+ except ValueError:
+ continue
+
+ if group < 1 or group > 99:
+ continue
+
+ group_set = test_groups.setdefault(key, set())
+ group_set.add(group)
+
+ self._ci_test_groups = dict((key, sorted(value)) for key, value in test_groups.items())
+
+ return self._ci_test_groups
+
+ def format_test_group_alias(self, name, fallback=''):
+ """
+ :type name: str
+ :type fallback: str
+ :rtype: str
+ """
+ group_numbers = self.ci_test_groups.get(name, None)
+
+ if group_numbers:
+ if min(group_numbers) != 1:
+ display.warning('Min test group "%s" in %s is %d instead of 1.' % (name, self.CI_YML, min(group_numbers)), unique=True)
+
+ if max(group_numbers) != len(group_numbers):
+ display.warning('Max test group "%s" in %s is %d instead of %d.' % (name, self.CI_YML, max(group_numbers), len(group_numbers)), unique=True)
+
+ if max(group_numbers) > 9:
+ alias = '%s/%s/group(%s)/' % (self.TEST_ALIAS_PREFIX, name, '|'.join(str(i) for i in range(min(group_numbers), max(group_numbers) + 1)))
+ elif len(group_numbers) > 1:
+ alias = '%s/%s/group[%d-%d]/' % (self.TEST_ALIAS_PREFIX, name, min(group_numbers), max(group_numbers))
+ else:
+ alias = '%s/%s/group%d/' % (self.TEST_ALIAS_PREFIX, name, min(group_numbers))
+ elif fallback:
+ alias = '%s/%s/group%d/' % (self.TEST_ALIAS_PREFIX, fallback, 1)
+ else:
+ raise Exception('cannot find test group "%s" in %s' % (name, self.CI_YML))
+
+ return alias
+
+ def load_yaml(self, args, path): # type: (SanityConfig, str) -> t.Dict[str, t.Any]
+ """Load the specified YAML file and return the contents."""
+ yaml_to_json_path = os.path.join(SANITY_ROOT, self.name, 'yaml_to_json.py')
+ python = find_python(args.python_version)
+
+ return json.loads(raw_command([python, yaml_to_json_path], data=read_text_file(path), capture=True)[0])
+
+ def test(self, args, targets):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :rtype: TestResult
+ """
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ if not os.path.isfile(self.CI_YML):
+ return SanityFailure(self.name, messages=[SanityMessage(
+ message='file missing',
+ path=self.CI_YML,
+ )])
+
+ results = dict(
+ comments=[],
+ labels={},
+ )
+
+ self.load_ci_config(args)
+ self.check_changes(args, results)
+
+ write_json_test_results(ResultType.BOT, 'data-sanity-ci.json', results)
+
+ messages = []
+
+ messages += self.check_posix_targets(args)
+ messages += self.check_windows_targets()
+
+ if messages:
+ return SanityFailure(self.name, messages=messages)
+
+ return SanitySuccess(self.name)
+
+ def check_posix_targets(self, args):
+ """
+ :type args: SanityConfig
+ :rtype: list[SanityMessage]
+ """
+ posix_targets = tuple(walk_posix_integration_targets())
+
+ clouds = get_cloud_platforms(args, posix_targets)
+ cloud_targets = ['cloud/%s/' % cloud for cloud in clouds]
+
+ all_cloud_targets = tuple(filter_targets(posix_targets, ['cloud/'], include=True, directories=False, errors=False))
+ invalid_cloud_targets = tuple(filter_targets(all_cloud_targets, cloud_targets, include=False, directories=False, errors=False))
+
+ messages = []
+
+ for target in invalid_cloud_targets:
+ for alias in target.aliases:
+ if alias.startswith('cloud/') and alias != 'cloud/':
+ if any(alias.startswith(cloud_target) for cloud_target in cloud_targets):
+ continue
+
+ messages.append(SanityMessage('invalid alias `%s`' % alias, '%s/aliases' % target.path))
+
+ messages += self.check_ci_group(
+ targets=tuple(filter_targets(posix_targets, ['cloud/', '%s/generic/' % self.TEST_ALIAS_PREFIX], include=False,
+ directories=False, errors=False)),
+ find=self.format_test_group_alias('linux').replace('linux', 'posix'),
+ find_incidental=['%s/posix/incidental/' % self.TEST_ALIAS_PREFIX],
+ )
+
+ messages += self.check_ci_group(
+ targets=tuple(filter_targets(posix_targets, ['%s/generic/' % self.TEST_ALIAS_PREFIX], include=True, directories=False,
+ errors=False)),
+ find=self.format_test_group_alias('generic'),
+ )
+
+ for cloud in clouds:
+ messages += self.check_ci_group(
+ targets=tuple(filter_targets(posix_targets, ['cloud/%s/' % cloud], include=True, directories=False, errors=False)),
+ find=self.format_test_group_alias(cloud, 'cloud'),
+ find_incidental=['%s/%s/incidental/' % (self.TEST_ALIAS_PREFIX, cloud), '%s/cloud/incidental/' % self.TEST_ALIAS_PREFIX],
+ )
+
+ return messages
+
+ def check_windows_targets(self):
+ """
+ :rtype: list[SanityMessage]
+ """
+ windows_targets = tuple(walk_windows_integration_targets())
+
+ messages = []
+
+ messages += self.check_ci_group(
+ targets=windows_targets,
+ find=self.format_test_group_alias('windows'),
+ find_incidental=['%s/windows/incidental/' % self.TEST_ALIAS_PREFIX],
+ )
+
+ return messages
+
+ def check_ci_group(self, targets, find, find_incidental=None):
+ """
+ :type targets: tuple[CompletionTarget]
+ :type find: str
+ :type find_incidental: list[str] | None
+ :rtype: list[SanityMessage]
+ """
+ all_paths = set(target.path for target in targets)
+ supported_paths = set(target.path for target in filter_targets(targets, [find], include=True, directories=False, errors=False))
+ unsupported_paths = set(target.path for target in filter_targets(targets, [self.UNSUPPORTED], include=True, directories=False, errors=False))
+
+ if find_incidental:
+ incidental_paths = set(target.path for target in filter_targets(targets, find_incidental, include=True, directories=False, errors=False))
+ else:
+ incidental_paths = set()
+
+ unassigned_paths = all_paths - supported_paths - unsupported_paths - incidental_paths
+ conflicting_paths = supported_paths & unsupported_paths
+
+ unassigned_message = 'missing alias `%s` or `%s`' % (find.strip('/'), self.UNSUPPORTED.strip('/'))
+ conflicting_message = 'conflicting alias `%s` and `%s`' % (find.strip('/'), self.UNSUPPORTED.strip('/'))
+
+ messages = []
+
+ for path in unassigned_paths:
+ messages.append(SanityMessage(unassigned_message, '%s/aliases' % path))
+
+ for path in conflicting_paths:
+ messages.append(SanityMessage(conflicting_message, '%s/aliases' % path))
+
+ return messages
+
+ def check_changes(self, args, results):
+ """
+ :type args: SanityConfig
+ :type results: dict[str, any]
+ """
+ integration_targets = list(walk_integration_targets())
+ module_targets = list(walk_module_targets())
+
+ integration_targets_by_name = dict((target.name, target) for target in integration_targets)
+ module_names_by_path = dict((target.path, target.module) for target in module_targets)
+
+ disabled_targets = []
+ unstable_targets = []
+ unsupported_targets = []
+
+ for command in [command for command in args.metadata.change_description.focused_command_targets if 'integration' in command]:
+ for target in args.metadata.change_description.focused_command_targets[command]:
+ if self.DISABLED in integration_targets_by_name[target].aliases:
+ disabled_targets.append(target)
+ elif self.UNSTABLE in integration_targets_by_name[target].aliases:
+ unstable_targets.append(target)
+ elif self.UNSUPPORTED in integration_targets_by_name[target].aliases:
+ unsupported_targets.append(target)
+
+ untested_modules = []
+
+ for path in args.metadata.change_description.no_integration_paths:
+ module = module_names_by_path.get(path)
+
+ if module:
+ untested_modules.append(module)
+
+ comments = [
+ self.format_comment(self.TEMPLATE_DISABLED, disabled_targets),
+ self.format_comment(self.TEMPLATE_UNSTABLE, unstable_targets),
+ self.format_comment(self.TEMPLATE_UNSUPPORTED, unsupported_targets),
+ self.format_comment(self.TEMPLATE_UNTESTED, untested_modules),
+ ]
+
+ comments = [comment for comment in comments if comment]
+
+ labels = dict(
+ needs_tests=bool(untested_modules),
+ disabled_tests=bool(disabled_targets),
+ unstable_tests=bool(unstable_targets),
+ unsupported_tests=bool(unsupported_targets),
+ )
+
+ results['comments'] += comments
+ results['labels'].update(labels)
+
+ def format_comment(self, template, targets):
+ """
+ :type template: str
+ :type targets: list[str]
+ :rtype: str | None
+ """
+ if not targets:
+ return None
+
+ tests = '\n'.join('- %s' % target for target in targets)
+
+ data = dict(
+ explain_url=self.EXPLAIN_URL,
+ tests=tests,
+ )
+
+ message = textwrap.dedent(template).strip().format(**data)
+
+ return message
diff --git a/test/lib/ansible_test/_internal/sanity/pep8.py b/test/lib/ansible_test/_internal/sanity/pep8.py
new file mode 100644
index 00000000..9eb40dbc
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/pep8.py
@@ -0,0 +1,109 @@
+"""Sanity test for PEP 8 style guidelines using pycodestyle."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from .. import types as t
+
+from ..sanity import (
+ SanitySingleVersion,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SANITY_ROOT,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..util import (
+ SubprocessError,
+ read_lines_without_comments,
+ parse_to_list_of_dict,
+ find_python,
+ is_subdir,
+)
+
+from ..util_common import (
+ run_command,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+
+class Pep8Test(SanitySingleVersion):
+ """Sanity test for PEP 8 style guidelines using pycodestyle."""
+ @property
+ def error_code(self): # type: () -> t.Optional[str]
+ """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
+ return 'A100'
+
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ return [target for target in targets if os.path.splitext(target.path)[1] == '.py' or is_subdir(target.path, 'bin')]
+
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+ current_ignore_file = os.path.join(SANITY_ROOT, 'pep8', 'current-ignore.txt')
+ current_ignore = sorted(read_lines_without_comments(current_ignore_file, remove_blank_lines=True))
+
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ cmd = [
+ find_python(python_version),
+ '-m', 'pycodestyle',
+ '--max-line-length', '160',
+ '--config', '/dev/null',
+ '--ignore', ','.join(sorted(current_ignore)),
+ ] + paths
+
+ if paths:
+ try:
+ stdout, stderr = run_command(args, cmd, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if stderr:
+ raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+ else:
+ stdout = None
+
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ if stdout:
+ pattern = '^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<code>[WE][0-9]{3}) (?P<message>.*)$'
+
+ results = parse_to_list_of_dict(pattern, stdout)
+ else:
+ results = []
+
+ results = [SanityMessage(
+ message=r['message'],
+ path=r['path'],
+ line=int(r['line']),
+ column=int(r['column']),
+ level='warning' if r['code'].startswith('W') else 'error',
+ code=r['code'],
+ ) for r in results]
+
+ errors = settings.process_errors(results, paths)
+
+ if errors:
+ return SanityFailure(self.name, messages=errors)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/sanity/pslint.py b/test/lib/ansible_test/_internal/sanity/pslint.py
new file mode 100644
index 00000000..256eee04
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/pslint.py
@@ -0,0 +1,121 @@
+"""Sanity test using PSScriptAnalyzer."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import re
+
+from .. import types as t
+
+from ..sanity import (
+ SanityVersionNeutral,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SanitySkipped,
+ SANITY_ROOT,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..util import (
+ SubprocessError,
+ find_executable,
+ ANSIBLE_TEST_DATA_ROOT,
+)
+
+from ..util_common import (
+ run_command,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+from ..data import (
+ data_context,
+)
+
+
+class PslintTest(SanityVersionNeutral):
+ """Sanity test using PSScriptAnalyzer."""
+ @property
+ def error_code(self): # type: () -> t.Optional[str]
+ """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
+ return 'AnsibleTest'
+
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ return [target for target in targets if os.path.splitext(target.path)[1] in ('.ps1', '.psm1', '.psd1')]
+
+ def test(self, args, targets):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :rtype: TestResult
+ """
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ if not find_executable('pwsh', required='warning'):
+ return SanitySkipped(self.name)
+
+ cmds = []
+
+ if args.requirements:
+ cmds.append([os.path.join(ANSIBLE_TEST_DATA_ROOT, 'requirements', 'sanity.ps1')])
+
+ cmds.append([os.path.join(SANITY_ROOT, 'pslint', 'pslint.ps1')] + paths)
+
+ stdout = ''
+
+ for cmd in cmds:
+ try:
+ stdout, stderr = run_command(args, cmd, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if stderr:
+ raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ severity = [
+ 'Information',
+ 'Warning',
+ 'Error',
+ 'ParseError',
+ ]
+
+ cwd = data_context().content.root + '/'
+
+ # replace unicode smart quotes and ellipsis with ascii versions
+ stdout = re.sub(u'[\u2018\u2019]', "'", stdout)
+ stdout = re.sub(u'[\u201c\u201d]', '"', stdout)
+ stdout = re.sub(u'[\u2026]', '...', stdout)
+
+ messages = json.loads(stdout)
+
+ errors = [SanityMessage(
+ code=m['RuleName'],
+ message=m['Message'],
+ path=m['ScriptPath'].replace(cwd, ''),
+ line=m['Line'] or 0,
+ column=m['Column'] or 0,
+ level=severity[m['Severity']],
+ ) for m in messages]
+
+ errors = settings.process_errors(errors, paths)
+
+ if errors:
+ return SanityFailure(self.name, messages=errors)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/sanity/pylint.py b/test/lib/ansible_test/_internal/sanity/pylint.py
new file mode 100644
index 00000000..769a1717
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/pylint.py
@@ -0,0 +1,281 @@
+"""Sanity test using pylint."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import itertools
+import json
+import os
+import datetime
+
+from .. import types as t
+
+from ..sanity import (
+ SanitySingleVersion,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SANITY_ROOT,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..util import (
+ SubprocessError,
+ display,
+ ConfigParser,
+ is_subdir,
+ find_python,
+)
+
+from ..util_common import (
+ run_command,
+)
+
+from ..ansible_util import (
+ ansible_environment,
+ get_collection_detail,
+ CollectionDetail,
+ CollectionDetailError,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+from ..data import (
+ data_context,
+)
+
+
+class PylintTest(SanitySingleVersion):
+ """Sanity test using pylint."""
+
+ def __init__(self):
+ super(PylintTest, self).__init__()
+ self.optional_error_codes.update([
+ 'ansible-deprecated-date',
+ 'too-complex',
+ ])
+
+ @property
+ def error_code(self): # type: () -> t.Optional[str]
+ """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
+ return 'ansible-test'
+
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ return [target for target in targets if os.path.splitext(target.path)[1] == '.py' or is_subdir(target.path, 'bin')]
+
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+ plugin_dir = os.path.join(SANITY_ROOT, 'pylint', 'plugins')
+ plugin_names = sorted(p[0] for p in [
+ os.path.splitext(p) for p in os.listdir(plugin_dir)] if p[1] == '.py' and p[0] != '__init__')
+
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ module_paths = [os.path.relpath(p, data_context().content.module_path).split(os.path.sep) for p in
+ paths if is_subdir(p, data_context().content.module_path)]
+ module_dirs = sorted(set([p[0] for p in module_paths if len(p) > 1]))
+
+ large_module_group_threshold = 500
+ large_module_groups = [key for key, value in
+ itertools.groupby(module_paths, lambda p: p[0] if len(p) > 1 else '') if len(list(value)) > large_module_group_threshold]
+
+ large_module_group_paths = [os.path.relpath(p, data_context().content.module_path).split(os.path.sep) for p in paths
+ if any(is_subdir(p, os.path.join(data_context().content.module_path, g)) for g in large_module_groups)]
+ large_module_group_dirs = sorted(set([os.path.sep.join(p[:2]) for p in large_module_group_paths if len(p) > 2]))
+
+ contexts = []
+ remaining_paths = set(paths)
+
+ def add_context(available_paths, context_name, context_filter):
+ """
+ :type available_paths: set[str]
+ :type context_name: str
+ :type context_filter: (str) -> bool
+ """
+ filtered_paths = set(p for p in available_paths if context_filter(p))
+ contexts.append((context_name, sorted(filtered_paths)))
+ available_paths -= filtered_paths
+
+ def filter_path(path_filter=None):
+ """
+ :type path_filter: str
+ :rtype: (str) -> bool
+ """
+ def context_filter(path_to_filter):
+ """
+ :type path_to_filter: str
+ :rtype: bool
+ """
+ return is_subdir(path_to_filter, path_filter)
+
+ return context_filter
+
+ for large_module_dir in large_module_group_dirs:
+ add_context(remaining_paths, 'modules/%s' % large_module_dir, filter_path(os.path.join(data_context().content.module_path, large_module_dir)))
+
+ for module_dir in module_dirs:
+ add_context(remaining_paths, 'modules/%s' % module_dir, filter_path(os.path.join(data_context().content.module_path, module_dir)))
+
+ add_context(remaining_paths, 'modules', filter_path(data_context().content.module_path))
+ add_context(remaining_paths, 'module_utils', filter_path(data_context().content.module_utils_path))
+
+ add_context(remaining_paths, 'units', filter_path(data_context().content.unit_path))
+
+ if data_context().content.collection:
+ add_context(remaining_paths, 'collection', lambda p: True)
+ else:
+ add_context(remaining_paths, 'validate-modules', filter_path('test/lib/ansible_test/_data/sanity/validate-modules/'))
+ add_context(remaining_paths, 'validate-modules-unit', filter_path('test/lib/ansible_test/tests/validate-modules-unit/'))
+ add_context(remaining_paths, 'sanity', filter_path('test/lib/ansible_test/_data/sanity/'))
+ add_context(remaining_paths, 'ansible-test', filter_path('test/lib/'))
+ add_context(remaining_paths, 'test', filter_path('test/'))
+ add_context(remaining_paths, 'hacking', filter_path('hacking/'))
+ add_context(remaining_paths, 'ansible', lambda p: True)
+
+ messages = []
+ context_times = []
+
+ python = find_python(python_version)
+
+ collection_detail = None
+
+ if data_context().content.collection:
+ try:
+ collection_detail = get_collection_detail(args, python)
+
+ if not collection_detail.version:
+ display.warning('Skipping pylint collection version checks since no collection version was found.')
+ except CollectionDetailError as ex:
+ display.warning('Skipping pylint collection version checks since collection detail loading failed: %s' % ex.reason)
+
+ test_start = datetime.datetime.utcnow()
+
+ for context, context_paths in sorted(contexts):
+ if not context_paths:
+ continue
+
+ context_start = datetime.datetime.utcnow()
+ messages += self.pylint(args, context, context_paths, plugin_dir, plugin_names, python, collection_detail)
+ context_end = datetime.datetime.utcnow()
+
+ context_times.append('%s: %d (%s)' % (context, len(context_paths), context_end - context_start))
+
+ test_end = datetime.datetime.utcnow()
+
+ for context_time in context_times:
+ display.info(context_time, verbosity=4)
+
+ display.info('total: %d (%s)' % (len(paths), test_end - test_start), verbosity=4)
+
+ errors = [SanityMessage(
+ message=m['message'].replace('\n', ' '),
+ path=m['path'],
+ line=int(m['line']),
+ column=int(m['column']),
+ level=m['type'],
+ code=m['symbol'],
+ ) for m in messages]
+
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ errors = settings.process_errors(errors, paths)
+
+ if errors:
+ return SanityFailure(self.name, messages=errors)
+
+ return SanitySuccess(self.name)
+
+ @staticmethod
+ def pylint(
+ args, # type: SanityConfig
+ context, # type: str
+ paths, # type: t.List[str]
+ plugin_dir, # type: str
+ plugin_names, # type: t.List[str]
+ python, # type: str
+ collection_detail, # type: CollectionDetail
+ ): # type: (...) -> t.List[t.Dict[str, str]]
+ """Run pylint using the config specified by the context on the specified paths."""
+ rcfile = os.path.join(SANITY_ROOT, 'pylint', 'config', context.split('/')[0] + '.cfg')
+
+ if not os.path.exists(rcfile):
+ if data_context().content.collection:
+ rcfile = os.path.join(SANITY_ROOT, 'pylint', 'config', 'collection.cfg')
+ else:
+ rcfile = os.path.join(SANITY_ROOT, 'pylint', 'config', 'default.cfg')
+
+ parser = ConfigParser()
+ parser.read(rcfile)
+
+ if parser.has_section('ansible-test'):
+ config = dict(parser.items('ansible-test'))
+ else:
+ config = dict()
+
+ disable_plugins = set(i.strip() for i in config.get('disable-plugins', '').split(',') if i)
+ load_plugins = set(plugin_names + ['pylint.extensions.mccabe']) - disable_plugins
+
+ cmd = [
+ python,
+ '-m', 'pylint',
+ '--jobs', '0',
+ '--reports', 'n',
+ '--max-line-length', '160',
+ '--max-complexity', '20',
+ '--rcfile', rcfile,
+ '--output-format', 'json',
+ '--load-plugins', ','.join(load_plugins),
+ ] + paths
+
+ if data_context().content.collection:
+ cmd.extend(['--collection-name', data_context().content.collection.full_name])
+
+ if collection_detail and collection_detail.version:
+ cmd.extend(['--collection-version', collection_detail.version])
+
+ append_python_path = [plugin_dir]
+
+ if data_context().content.collection:
+ append_python_path.append(data_context().content.collection.root)
+
+ env = ansible_environment(args)
+ env['PYTHONPATH'] += os.path.pathsep + os.path.pathsep.join(append_python_path)
+
+ # expose plugin paths for use in custom plugins
+ env.update(dict(('ANSIBLE_TEST_%s_PATH' % k.upper(), os.path.abspath(v) + os.path.sep) for k, v in data_context().content.plugin_paths.items()))
+
+ if paths:
+ display.info('Checking %d file(s) in context "%s" with config: %s' % (len(paths), context, rcfile), verbosity=1)
+
+ try:
+ stdout, stderr = run_command(args, cmd, env=env, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if stderr or status >= 32:
+ raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+ else:
+ stdout = None
+
+ if not args.explain and stdout:
+ messages = json.loads(stdout)
+ else:
+ messages = []
+
+ return messages
diff --git a/test/lib/ansible_test/_internal/sanity/rstcheck.py b/test/lib/ansible_test/_internal/sanity/rstcheck.py
new file mode 100644
index 00000000..2d8a01d5
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/rstcheck.py
@@ -0,0 +1,95 @@
+"""Sanity test using rstcheck."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from .. import types as t
+
+from ..sanity import (
+ SanitySingleVersion,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SANITY_ROOT,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..util import (
+ SubprocessError,
+ parse_to_list_of_dict,
+ read_lines_without_comments,
+ find_python,
+)
+
+from ..util_common import (
+ run_command,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+
+class RstcheckTest(SanitySingleVersion):
+ """Sanity test using rstcheck."""
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ return [target for target in targets if os.path.splitext(target.path)[1] in ('.rst',)]
+
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+ ignore_file = os.path.join(SANITY_ROOT, 'rstcheck', 'ignore-substitutions.txt')
+ ignore_substitutions = sorted(set(read_lines_without_comments(ignore_file, remove_blank_lines=True)))
+
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ cmd = [
+ find_python(python_version),
+ '-m', 'rstcheck',
+ '--report', 'warning',
+ '--ignore-substitutions', ','.join(ignore_substitutions),
+ ] + paths
+
+ try:
+ stdout, stderr = run_command(args, cmd, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if stdout:
+ raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ pattern = r'^(?P<path>[^:]*):(?P<line>[0-9]+): \((?P<level>INFO|WARNING|ERROR|SEVERE)/[0-4]\) (?P<message>.*)$'
+
+ results = parse_to_list_of_dict(pattern, stderr)
+
+ results = [SanityMessage(
+ message=r['message'],
+ path=r['path'],
+ line=int(r['line']),
+ column=0,
+ level=r['level'],
+ ) for r in results]
+
+ settings.process_errors(results, paths)
+
+ if results:
+ return SanityFailure(self.name, messages=results)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/sanity/sanity_docs.py b/test/lib/ansible_test/_internal/sanity/sanity_docs.py
new file mode 100644
index 00000000..44638075
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/sanity_docs.py
@@ -0,0 +1,62 @@
+"""Sanity test for documentation of sanity tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ..sanity import (
+ SanityVersionNeutral,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ sanity_get_tests,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+from ..data import (
+ data_context,
+)
+
+
+class SanityDocsTest(SanityVersionNeutral):
+ """Sanity test for documentation of sanity tests."""
+ ansible_only = True
+
+ @property
+ def can_ignore(self): # type: () -> bool
+ """True if the test supports ignore entries."""
+ return False
+
+ @property
+ def no_targets(self): # type: () -> bool
+ """True if the test does not use test targets. Mutually exclusive with all_targets."""
+ return True
+
+ # noinspection PyUnusedLocal
+ def test(self, args, targets): # pylint: disable=locally-disabled, unused-argument
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :rtype: TestResult
+ """
+ sanity_dir = 'docs/docsite/rst/dev_guide/testing/sanity'
+ sanity_docs = set(part[0] for part in (os.path.splitext(os.path.basename(path)) for path in data_context().content.get_files(sanity_dir))
+ if part[1] == '.rst')
+ sanity_tests = set(sanity_test.name for sanity_test in sanity_get_tests())
+
+ missing = sanity_tests - sanity_docs
+
+ results = []
+
+ results += [SanityMessage(
+ message='missing docs for ansible-test sanity --test %s' % r,
+ path=os.path.join(sanity_dir, '%s.rst' % r),
+ ) for r in sorted(missing)]
+
+ if results:
+ return SanityFailure(self.name, messages=results)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/sanity/shellcheck.py b/test/lib/ansible_test/_internal/sanity/shellcheck.py
new file mode 100644
index 00000000..82689ced
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/shellcheck.py
@@ -0,0 +1,110 @@
+"""Sanity test using shellcheck."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from xml.etree.ElementTree import (
+ fromstring,
+ Element,
+)
+
+from .. import types as t
+
+from ..sanity import (
+ SanityVersionNeutral,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SanitySkipped,
+ SANITY_ROOT,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..util import (
+ SubprocessError,
+ read_lines_without_comments,
+ find_executable,
+)
+
+from ..util_common import (
+ run_command,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+
+class ShellcheckTest(SanityVersionNeutral):
+ """Sanity test using shellcheck."""
+ @property
+ def error_code(self): # type: () -> t.Optional[str]
+ """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
+ return 'AT1000'
+
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ return [target for target in targets if os.path.splitext(target.path)[1] == '.sh']
+
+ def test(self, args, targets):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :rtype: TestResult
+ """
+ exclude_file = os.path.join(SANITY_ROOT, 'shellcheck', 'exclude.txt')
+ exclude = set(read_lines_without_comments(exclude_file, remove_blank_lines=True, optional=True))
+
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ if not find_executable('shellcheck', required='warning'):
+ return SanitySkipped(self.name)
+
+ cmd = [
+ 'shellcheck',
+ '-e', ','.join(sorted(exclude)),
+ '--format', 'checkstyle',
+ ] + paths
+
+ try:
+ stdout, stderr = run_command(args, cmd, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if stderr or status > 1:
+ raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ # json output is missing file paths in older versions of shellcheck, so we'll use xml instead
+ root = fromstring(stdout) # type: Element
+
+ results = []
+
+ for item in root: # type: Element
+ for entry in item: # type: Element
+ results.append(SanityMessage(
+ message=entry.attrib['message'],
+ path=item.attrib['name'],
+ line=int(entry.attrib['line']),
+ column=int(entry.attrib['column']),
+ level=entry.attrib['severity'],
+ code=entry.attrib['source'].replace('ShellCheck.', ''),
+ ))
+
+ results = settings.process_errors(results, paths)
+
+ if results:
+ return SanityFailure(self.name, messages=results)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/sanity/validate_modules.py b/test/lib/ansible_test/_internal/sanity/validate_modules.py
new file mode 100644
index 00000000..add3cdc7
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/validate_modules.py
@@ -0,0 +1,149 @@
+"""Sanity test using validate-modules."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+
+from .. import types as t
+
+from ..sanity import (
+ SanitySingleVersion,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SANITY_ROOT,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..util import (
+ SubprocessError,
+ display,
+ find_python,
+)
+
+from ..util_common import (
+ run_command,
+)
+
+from ..ansible_util import (
+ ansible_environment,
+ get_collection_detail,
+ CollectionDetailError,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+from ..ci import (
+ get_ci_provider,
+)
+
+from ..data import (
+ data_context,
+)
+
+
+class ValidateModulesTest(SanitySingleVersion):
+ """Sanity test using validate-modules."""
+
+ def __init__(self):
+ super(ValidateModulesTest, self).__init__()
+ self.optional_error_codes.update([
+ 'deprecated-date',
+ ])
+
+ @property
+ def error_code(self): # type: () -> t.Optional[str]
+ """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
+ return 'A100'
+
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ return [target for target in targets if target.module]
+
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+ env = ansible_environment(args, color=False)
+
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ python = find_python(python_version)
+
+ cmd = [
+ python,
+ os.path.join(SANITY_ROOT, 'validate-modules', 'validate-modules'),
+ '--format', 'json',
+ '--arg-spec',
+ ] + paths
+
+ if data_context().content.collection:
+ cmd.extend(['--collection', data_context().content.collection.directory])
+
+ try:
+ collection_detail = get_collection_detail(args, python)
+
+ if collection_detail.version:
+ cmd.extend(['--collection-version', collection_detail.version])
+ else:
+ display.warning('Skipping validate-modules collection version checks since no collection version was found.')
+ except CollectionDetailError as ex:
+ display.warning('Skipping validate-modules collection version checks since collection detail loading failed: %s' % ex.reason)
+ else:
+ base_branch = args.base_branch or get_ci_provider().get_base_branch()
+
+ if base_branch:
+ cmd.extend([
+ '--base-branch', base_branch,
+ ])
+ else:
+ display.warning('Cannot perform module comparison against the base branch because the base branch was not detected.')
+
+ try:
+ stdout, stderr = run_command(args, cmd, env=env, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if stderr or status not in (0, 3):
+ raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ messages = json.loads(stdout)
+
+ errors = []
+
+ for filename in messages:
+ output = messages[filename]
+
+ for item in output['errors']:
+ errors.append(SanityMessage(
+ path=filename,
+ line=int(item['line']) if 'line' in item else 0,
+ column=int(item['column']) if 'column' in item else 0,
+ level='error',
+ code='%s' % item['code'],
+ message=item['msg'],
+ ))
+
+ errors = settings.process_errors(errors, paths)
+
+ if errors:
+ return SanityFailure(self.name, messages=errors)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/sanity/yamllint.py b/test/lib/ansible_test/_internal/sanity/yamllint.py
new file mode 100644
index 00000000..85a576d0
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/yamllint.py
@@ -0,0 +1,136 @@
+"""Sanity test using yamllint."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+
+from .. import types as t
+
+from ..import ansible_util
+
+from ..sanity import (
+ SanitySingleVersion,
+ SanityMessage,
+ SanityFailure,
+ SanitySkipped,
+ SanitySuccess,
+ SANITY_ROOT,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..util import (
+ SubprocessError,
+ display,
+ is_subdir,
+ find_python,
+)
+
+from ..util_common import (
+ run_command,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+from ..data import (
+ data_context,
+)
+
+
+class YamllintTest(SanitySingleVersion):
+ """Sanity test using yamllint."""
+ @property
+ def error_code(self): # type: () -> t.Optional[str]
+ """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
+ return 'ansible-test'
+
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ yaml_targets = [target for target in targets if os.path.splitext(target.path)[1] in ('.yml', '.yaml')]
+
+ for plugin_type, plugin_path in sorted(data_context().content.plugin_paths.items()):
+ if plugin_type == 'module_utils':
+ continue
+
+ yaml_targets.extend([target for target in targets if
+ os.path.splitext(target.path)[1] == '.py' and
+ os.path.basename(target.path) != '__init__.py' and
+ is_subdir(target.path, plugin_path)])
+
+ return yaml_targets
+
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+ pyyaml_presence = ansible_util.check_pyyaml(args, python_version, quiet=True)
+ if not pyyaml_presence['cloader']:
+ display.warning("Skipping sanity test '%s' due to missing libyaml support in PyYAML."
+ % self.name)
+ return SanitySkipped(self.name)
+
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ python = find_python(python_version)
+
+ results = self.test_paths(args, paths, python)
+ results = settings.process_errors(results, paths)
+
+ if results:
+ return SanityFailure(self.name, messages=results)
+
+ return SanitySuccess(self.name)
+
+ @staticmethod
+ def test_paths(args, paths, python):
+ """
+ :type args: SanityConfig
+ :type paths: list[str]
+ :type python: str
+ :rtype: list[SanityMessage]
+ """
+ cmd = [
+ python,
+ os.path.join(SANITY_ROOT, 'yamllint', 'yamllinter.py'),
+ ]
+
+ data = '\n'.join(paths)
+
+ display.info(data, verbosity=4)
+
+ try:
+ stdout, stderr = run_command(args, cmd, data=data, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if stderr:
+ raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+
+ if args.explain:
+ return []
+
+ results = json.loads(stdout)['messages']
+
+ results = [SanityMessage(
+ code=r['code'],
+ message=r['message'],
+ path=r['path'],
+ line=int(r['line']),
+ column=int(r['column']),
+ level=r['level'],
+ ) for r in results]
+
+ return results
diff --git a/test/lib/ansible_test/_internal/target.py b/test/lib/ansible_test/_internal/target.py
new file mode 100644
index 00000000..7bafd717
--- /dev/null
+++ b/test/lib/ansible_test/_internal/target.py
@@ -0,0 +1,694 @@
+"""Test target identification, iteration and inclusion/exclusion."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import collections
+import os
+import re
+import itertools
+import abc
+
+from . import types as t
+
+from .encoding import (
+ to_bytes,
+ to_text,
+)
+
+from .io import (
+ read_text_file,
+)
+
+from .util import (
+ ApplicationError,
+ display,
+ read_lines_without_comments,
+ is_subdir,
+)
+
+from .data import (
+ data_context,
+)
+
+MODULE_EXTENSIONS = '.py', '.ps1'
+
+try:
+ TCompletionTarget = t.TypeVar('TCompletionTarget', bound='CompletionTarget')
+except AttributeError:
+ TCompletionTarget = None # pylint: disable=invalid-name
+
+try:
+ TIntegrationTarget = t.TypeVar('TIntegrationTarget', bound='IntegrationTarget')
+except AttributeError:
+ TIntegrationTarget = None # pylint: disable=invalid-name
+
+
+def find_target_completion(target_func, prefix):
+ """
+ :type target_func: () -> collections.Iterable[CompletionTarget]
+ :type prefix: unicode
+ :rtype: list[str]
+ """
+ try:
+ targets = target_func()
+ short = os.environ.get('COMP_TYPE') == '63' # double tab completion from bash
+ matches = walk_completion_targets(targets, prefix, short)
+ return matches
+ except Exception as ex: # pylint: disable=locally-disabled, broad-except
+ return [u'%s' % ex]
+
+
+def walk_completion_targets(targets, prefix, short=False):
+ """
+ :type targets: collections.Iterable[CompletionTarget]
+ :type prefix: str
+ :type short: bool
+ :rtype: tuple[str]
+ """
+ aliases = set(alias for target in targets for alias in target.aliases)
+
+ if prefix.endswith('/') and prefix in aliases:
+ aliases.remove(prefix)
+
+ matches = [alias for alias in aliases if alias.startswith(prefix) and '/' not in alias[len(prefix):-1]]
+
+ if short:
+ offset = len(os.path.dirname(prefix))
+ if offset:
+ offset += 1
+ relative_matches = [match[offset:] for match in matches if len(match) > offset]
+ if len(relative_matches) > 1:
+ matches = relative_matches
+
+ return tuple(sorted(matches))
+
+
+def walk_internal_targets(targets, includes=None, excludes=None, requires=None):
+ """
+ :type targets: collections.Iterable[T <= CompletionTarget]
+ :type includes: list[str]
+ :type excludes: list[str]
+ :type requires: list[str]
+ :rtype: tuple[T <= CompletionTarget]
+ """
+ targets = tuple(targets)
+
+ include_targets = sorted(filter_targets(targets, includes, errors=True, directories=False), key=lambda include_target: include_target.name)
+
+ if requires:
+ require_targets = set(filter_targets(targets, requires, errors=True, directories=False))
+ include_targets = [require_target for require_target in include_targets if require_target in require_targets]
+
+ if excludes:
+ list(filter_targets(targets, excludes, errors=True, include=False, directories=False))
+
+ internal_targets = set(filter_targets(include_targets, excludes, errors=False, include=False, directories=False))
+ return tuple(sorted(internal_targets, key=lambda sort_target: sort_target.name))
+
+
+def filter_targets(targets, # type: t.Iterable[TCompletionTarget]
+ patterns, # type: t.List[str]
+ include=True, # type: bool
+ directories=True, # type: bool
+ errors=True, # type: bool
+ ): # type: (...) -> t.Iterable[TCompletionTarget]
+ """Iterate over the given targets and filter them based on the supplied arguments."""
+ unmatched = set(patterns or ())
+ compiled_patterns = dict((p, re.compile('^%s$' % p)) for p in patterns) if patterns else None
+
+ for target in targets:
+ matched_directories = set()
+ match = False
+
+ if patterns:
+ for alias in target.aliases:
+ for pattern in patterns:
+ if compiled_patterns[pattern].match(alias):
+ match = True
+
+ try:
+ unmatched.remove(pattern)
+ except KeyError:
+ pass
+
+ if alias.endswith('/'):
+ if target.base_path and len(target.base_path) > len(alias):
+ matched_directories.add(target.base_path)
+ else:
+ matched_directories.add(alias)
+ elif include:
+ match = True
+ if not target.base_path:
+ matched_directories.add('.')
+ for alias in target.aliases:
+ if alias.endswith('/'):
+ if target.base_path and len(target.base_path) > len(alias):
+ matched_directories.add(target.base_path)
+ else:
+ matched_directories.add(alias)
+
+ if match != include:
+ continue
+
+ if directories and matched_directories:
+ yield DirectoryTarget(to_text(sorted(matched_directories, key=len)[0]), target.modules)
+ else:
+ yield target
+
+ if errors:
+ if unmatched:
+ raise TargetPatternsNotMatched(unmatched)
+
+
+def walk_module_targets():
+ """
+ :rtype: collections.Iterable[TestTarget]
+ """
+ for target in walk_test_targets(path=data_context().content.module_path, module_path=data_context().content.module_path, extensions=MODULE_EXTENSIONS):
+ if not target.module:
+ continue
+
+ yield target
+
+
+def walk_units_targets():
+ """
+ :rtype: collections.Iterable[TestTarget]
+ """
+ return walk_test_targets(path=data_context().content.unit_path, module_path=data_context().content.unit_module_path, extensions=('.py',), prefix='test_')
+
+
+def walk_compile_targets(include_symlinks=True):
+ """
+ :type include_symlinks: bool
+ :rtype: collections.Iterable[TestTarget]
+ """
+ return walk_test_targets(module_path=data_context().content.module_path, extensions=('.py',), extra_dirs=('bin',), include_symlinks=include_symlinks)
+
+
+def walk_powershell_targets(include_symlinks=True):
+ """
+ :rtype: collections.Iterable[TestTarget]
+ """
+ return walk_test_targets(module_path=data_context().content.module_path, extensions=('.ps1', '.psm1'), include_symlinks=include_symlinks)
+
+
+def walk_sanity_targets():
+ """
+ :rtype: collections.Iterable[TestTarget]
+ """
+ return walk_test_targets(module_path=data_context().content.module_path, include_symlinks=True, include_symlinked_directories=True)
+
+
+def walk_posix_integration_targets(include_hidden=False):
+ """
+ :type include_hidden: bool
+ :rtype: collections.Iterable[IntegrationTarget]
+ """
+ for target in walk_integration_targets():
+ if 'posix/' in target.aliases or (include_hidden and 'hidden/posix/' in target.aliases):
+ yield target
+
+
+def walk_network_integration_targets(include_hidden=False):
+ """
+ :type include_hidden: bool
+ :rtype: collections.Iterable[IntegrationTarget]
+ """
+ for target in walk_integration_targets():
+ if 'network/' in target.aliases or (include_hidden and 'hidden/network/' in target.aliases):
+ yield target
+
+
+def walk_windows_integration_targets(include_hidden=False):
+ """
+ :type include_hidden: bool
+ :rtype: collections.Iterable[IntegrationTarget]
+ """
+ for target in walk_integration_targets():
+ if 'windows/' in target.aliases or (include_hidden and 'hidden/windows/' in target.aliases):
+ yield target
+
+
+def walk_integration_targets():
+ """
+ :rtype: collections.Iterable[IntegrationTarget]
+ """
+ path = data_context().content.integration_targets_path
+ modules = frozenset(target.module for target in walk_module_targets())
+ paths = data_context().content.walk_files(path)
+ prefixes = load_integration_prefixes()
+ targets_path_tuple = tuple(path.split(os.path.sep))
+
+ entry_dirs = (
+ 'defaults',
+ 'files',
+ 'handlers',
+ 'meta',
+ 'tasks',
+ 'templates',
+ 'vars',
+ )
+
+ entry_files = (
+ 'main.yml',
+ 'main.yaml',
+ )
+
+ entry_points = []
+
+ for entry_dir in entry_dirs:
+ for entry_file in entry_files:
+ entry_points.append(os.path.join(os.path.sep, entry_dir, entry_file))
+
+ # any directory with at least one file is a target
+ path_tuples = set(tuple(os.path.dirname(p).split(os.path.sep))
+ for p in paths)
+
+ # also detect targets which are ansible roles, looking for standard entry points
+ path_tuples.update(tuple(os.path.dirname(os.path.dirname(p)).split(os.path.sep))
+ for p in paths if any(p.endswith(entry_point) for entry_point in entry_points))
+
+ # remove the top-level directory if it was included
+ if targets_path_tuple in path_tuples:
+ path_tuples.remove(targets_path_tuple)
+
+ previous_path_tuple = None
+ paths = []
+
+ for path_tuple in sorted(path_tuples):
+ if previous_path_tuple and previous_path_tuple == path_tuple[:len(previous_path_tuple)]:
+ # ignore nested directories
+ continue
+
+ previous_path_tuple = path_tuple
+ paths.append(os.path.sep.join(path_tuple))
+
+ for path in paths:
+ yield IntegrationTarget(to_text(path), modules, prefixes)
+
+
+def load_integration_prefixes():
+ """
+ :rtype: dict[str, str]
+ """
+ path = data_context().content.integration_path
+ file_paths = sorted(f for f in data_context().content.get_files(path) if os.path.splitext(os.path.basename(f))[0] == 'target-prefixes')
+ prefixes = {}
+
+ for file_path in file_paths:
+ prefix = os.path.splitext(file_path)[1][1:]
+ prefixes.update(dict((k, prefix) for k in read_text_file(file_path).splitlines()))
+
+ return prefixes
+
+
+def walk_test_targets(path=None, module_path=None, extensions=None, prefix=None, extra_dirs=None, include_symlinks=False, include_symlinked_directories=False):
+ """
+ :type path: str | None
+ :type module_path: str | None
+ :type extensions: tuple[str] | None
+ :type prefix: str | None
+ :type extra_dirs: tuple[str] | None
+ :type include_symlinks: bool
+ :type include_symlinked_directories: bool
+ :rtype: collections.Iterable[TestTarget]
+ """
+ if path:
+ file_paths = data_context().content.walk_files(path, include_symlinked_directories=include_symlinked_directories)
+ else:
+ file_paths = data_context().content.all_files(include_symlinked_directories=include_symlinked_directories)
+
+ for file_path in file_paths:
+ name, ext = os.path.splitext(os.path.basename(file_path))
+
+ if extensions and ext not in extensions:
+ continue
+
+ if prefix and not name.startswith(prefix):
+ continue
+
+ symlink = os.path.islink(to_bytes(file_path.rstrip(os.path.sep)))
+
+ if symlink and not include_symlinks:
+ continue
+
+ yield TestTarget(to_text(file_path), module_path, prefix, path, symlink)
+
+ file_paths = []
+
+ if extra_dirs:
+ for extra_dir in extra_dirs:
+ for file_path in data_context().content.get_files(extra_dir):
+ file_paths.append(file_path)
+
+ for file_path in file_paths:
+ symlink = os.path.islink(to_bytes(file_path.rstrip(os.path.sep)))
+
+ if symlink and not include_symlinks:
+ continue
+
+ yield TestTarget(file_path, module_path, prefix, path, symlink)
+
+
+def analyze_integration_target_dependencies(integration_targets):
+ """
+ :type integration_targets: list[IntegrationTarget]
+ :rtype: dict[str,set[str]]
+ """
+ real_target_root = os.path.realpath(data_context().content.integration_targets_path) + '/'
+
+ role_targets = [target for target in integration_targets if target.type == 'role']
+ hidden_role_target_names = set(target.name for target in role_targets if 'hidden/' in target.aliases)
+
+ dependencies = collections.defaultdict(set)
+
+ # handle setup dependencies
+ for target in integration_targets:
+ for setup_target_name in target.setup_always + target.setup_once:
+ dependencies[setup_target_name].add(target.name)
+
+ # handle target dependencies
+ for target in integration_targets:
+ for need_target in target.needs_target:
+ dependencies[need_target].add(target.name)
+
+ # handle symlink dependencies between targets
+ # this use case is supported, but discouraged
+ for target in integration_targets:
+ for path in data_context().content.walk_files(target.path):
+ if not os.path.islink(to_bytes(path.rstrip(os.path.sep))):
+ continue
+
+ real_link_path = os.path.realpath(path)
+
+ if not real_link_path.startswith(real_target_root):
+ continue
+
+ link_target = real_link_path[len(real_target_root):].split('/')[0]
+
+ if link_target == target.name:
+ continue
+
+ dependencies[link_target].add(target.name)
+
+ # intentionally primitive analysis of role meta to avoid a dependency on pyyaml
+ # script based targets are scanned as they may execute a playbook with role dependencies
+ for target in integration_targets:
+ meta_dir = os.path.join(target.path, 'meta')
+
+ if not os.path.isdir(meta_dir):
+ continue
+
+ meta_paths = data_context().content.get_files(meta_dir)
+
+ for meta_path in meta_paths:
+ if os.path.exists(meta_path):
+ # try and decode the file as a utf-8 string, skip if it contains invalid chars (binary file)
+ try:
+ meta_lines = read_text_file(meta_path).splitlines()
+ except UnicodeDecodeError:
+ continue
+
+ for meta_line in meta_lines:
+ if re.search(r'^ *#.*$', meta_line):
+ continue
+
+ if not meta_line.strip():
+ continue
+
+ for hidden_target_name in hidden_role_target_names:
+ if hidden_target_name in meta_line:
+ dependencies[hidden_target_name].add(target.name)
+
+ while True:
+ changes = 0
+
+ for dummy, dependent_target_names in dependencies.items():
+ for dependent_target_name in list(dependent_target_names):
+ new_target_names = dependencies.get(dependent_target_name)
+
+ if new_target_names:
+ for new_target_name in new_target_names:
+ if new_target_name not in dependent_target_names:
+ dependent_target_names.add(new_target_name)
+ changes += 1
+
+ if not changes:
+ break
+
+ for target_name in sorted(dependencies):
+ consumers = dependencies[target_name]
+
+ if not consumers:
+ continue
+
+ display.info('%s:' % target_name, verbosity=4)
+
+ for consumer in sorted(consumers):
+ display.info(' %s' % consumer, verbosity=4)
+
+ return dependencies
+
+
+class CompletionTarget:
+ """Command-line argument completion target base class."""
+ __metaclass__ = abc.ABCMeta
+
+ def __init__(self):
+ self.name = None
+ self.path = None
+ self.base_path = None
+ self.modules = tuple()
+ self.aliases = tuple()
+
+ def __eq__(self, other):
+ if isinstance(other, CompletionTarget):
+ return self.__repr__() == other.__repr__()
+
+ return False
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __lt__(self, other):
+ return self.name.__lt__(other.name)
+
+ def __gt__(self, other):
+ return self.name.__gt__(other.name)
+
+ def __hash__(self):
+ return hash(self.__repr__())
+
+ def __repr__(self):
+ if self.modules:
+ return '%s (%s)' % (self.name, ', '.join(self.modules))
+
+ return self.name
+
+
+class DirectoryTarget(CompletionTarget):
+ """Directory target."""
+ def __init__(self, path, modules):
+ """
+ :type path: str
+ :type modules: tuple[str]
+ """
+ super(DirectoryTarget, self).__init__()
+
+ self.name = path
+ self.path = path
+ self.modules = modules
+
+
+class TestTarget(CompletionTarget):
+ """Generic test target."""
+ def __init__(self, path, module_path, module_prefix, base_path, symlink=None):
+ """
+ :type path: str
+ :type module_path: str | None
+ :type module_prefix: str | None
+ :type base_path: str
+ :type symlink: bool | None
+ """
+ super(TestTarget, self).__init__()
+
+ if symlink is None:
+ symlink = os.path.islink(to_bytes(path.rstrip(os.path.sep)))
+
+ self.name = path
+ self.path = path
+ self.base_path = base_path + '/' if base_path else None
+ self.symlink = symlink
+
+ name, ext = os.path.splitext(os.path.basename(self.path))
+
+ if module_path and is_subdir(path, module_path) and name != '__init__' and ext in MODULE_EXTENSIONS:
+ self.module = name[len(module_prefix or ''):].lstrip('_')
+ self.modules = (self.module,)
+ else:
+ self.module = None
+ self.modules = tuple()
+
+ aliases = [self.path, self.module]
+ parts = self.path.split('/')
+
+ for i in range(1, len(parts)):
+ alias = '%s/' % '/'.join(parts[:i])
+ aliases.append(alias)
+
+ aliases = [a for a in aliases if a]
+
+ self.aliases = tuple(sorted(aliases))
+
+
+class IntegrationTarget(CompletionTarget):
+ """Integration test target."""
+ non_posix = frozenset((
+ 'network',
+ 'windows',
+ ))
+
+ categories = frozenset(non_posix | frozenset((
+ 'posix',
+ 'module',
+ 'needs',
+ 'skip',
+ )))
+
+ def __init__(self, path, modules, prefixes):
+ """
+ :type path: str
+ :type modules: frozenset[str]
+ :type prefixes: dict[str, str]
+ """
+ super(IntegrationTarget, self).__init__()
+
+ self.relative_path = os.path.relpath(path, data_context().content.integration_targets_path)
+ self.name = self.relative_path.replace(os.path.sep, '.')
+ self.path = path
+
+ # script_path and type
+
+ file_paths = data_context().content.get_files(path)
+ runme_path = os.path.join(path, 'runme.sh')
+
+ if runme_path in file_paths:
+ self.type = 'script'
+ self.script_path = runme_path
+ else:
+ self.type = 'role' # ansible will consider these empty roles, so ansible-test should as well
+ self.script_path = None
+
+ # static_aliases
+
+ aliases_path = os.path.join(path, 'aliases')
+
+ if aliases_path in file_paths:
+ static_aliases = tuple(read_lines_without_comments(aliases_path, remove_blank_lines=True))
+ else:
+ static_aliases = tuple()
+
+ # modules
+
+ if self.name in modules:
+ module_name = self.name
+ elif self.name.startswith('win_') and self.name[4:] in modules:
+ module_name = self.name[4:]
+ else:
+ module_name = None
+
+ self.modules = tuple(sorted(a for a in static_aliases + tuple([module_name]) if a in modules))
+
+ # groups
+
+ groups = [self.type]
+ groups += [a for a in static_aliases if a not in modules]
+ groups += ['module/%s' % m for m in self.modules]
+
+ if not self.modules:
+ groups.append('non_module')
+
+ if 'destructive' not in groups:
+ groups.append('non_destructive')
+
+ if '_' in self.name:
+ prefix = self.name[:self.name.find('_')]
+ else:
+ prefix = None
+
+ if prefix in prefixes:
+ group = prefixes[prefix]
+
+ if group != prefix:
+ group = '%s/%s' % (group, prefix)
+
+ groups.append(group)
+
+ if self.name.startswith('win_'):
+ groups.append('windows')
+
+ if self.name.startswith('connection_'):
+ groups.append('connection')
+
+ if self.name.startswith('setup_') or self.name.startswith('prepare_'):
+ groups.append('hidden')
+
+ if self.type not in ('script', 'role'):
+ groups.append('hidden')
+
+ targets_relative_path = data_context().content.integration_targets_path
+
+ # Collect skip entries before group expansion to avoid registering more specific skip entries as less specific versions.
+ self.skips = tuple(g for g in groups if g.startswith('skip/'))
+
+ # Collect file paths before group expansion to avoid including the directories.
+ # Ignore references to test targets, as those must be defined using `needs/target/*` or other target references.
+ self.needs_file = tuple(sorted(set('/'.join(g.split('/')[2:]) for g in groups if
+ g.startswith('needs/file/') and not g.startswith('needs/file/%s/' % targets_relative_path))))
+
+ # network platform
+ networks = [g.split('/')[1] for g in groups if g.startswith('network/')]
+ self.network_platform = networks[0] if networks else None
+
+ for group in itertools.islice(groups, 0, len(groups)):
+ if '/' in group:
+ parts = group.split('/')
+ for i in range(1, len(parts)):
+ groups.append('/'.join(parts[:i]))
+
+ if not any(g in self.non_posix for g in groups):
+ groups.append('posix')
+
+ # aliases
+
+ aliases = [self.name] + \
+ ['%s/' % g for g in groups] + \
+ ['%s/%s' % (g, self.name) for g in groups if g not in self.categories]
+
+ if 'hidden/' in aliases:
+ aliases = ['hidden/'] + ['hidden/%s' % a for a in aliases if not a.startswith('hidden/')]
+
+ self.aliases = tuple(sorted(set(aliases)))
+
+ # configuration
+
+ self.setup_once = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('setup/once/'))))
+ self.setup_always = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('setup/always/'))))
+ self.needs_target = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('needs/target/'))))
+
+
+class TargetPatternsNotMatched(ApplicationError):
+ """One or more targets were not matched when a match was required."""
+ def __init__(self, patterns):
+ """
+ :type patterns: set[str]
+ """
+ self.patterns = sorted(patterns)
+
+ if len(patterns) > 1:
+ message = 'Target patterns not matched:\n%s' % '\n'.join(self.patterns)
+ else:
+ message = 'Target pattern not matched: %s' % self.patterns[0]
+
+ super(TargetPatternsNotMatched, self).__init__(message)
diff --git a/test/lib/ansible_test/_internal/test.py b/test/lib/ansible_test/_internal/test.py
new file mode 100644
index 00000000..8d9629a9
--- /dev/null
+++ b/test/lib/ansible_test/_internal/test.py
@@ -0,0 +1,524 @@
+"""Classes for storing and processing test results."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import datetime
+import re
+
+from . import types as t
+
+from .util import (
+ display,
+ get_ansible_version,
+)
+
+from .util_common import (
+ write_text_test_results,
+ write_json_test_results,
+ ResultType,
+)
+
+from .config import (
+ TestConfig,
+)
+
+
+def calculate_best_confidence(choices, metadata):
+ """
+ :type choices: tuple[tuple[str, int]]
+ :type metadata: Metadata
+ :rtype: int
+ """
+ best_confidence = 0
+
+ for path, line in choices:
+ confidence = calculate_confidence(path, line, metadata)
+ best_confidence = max(confidence, best_confidence)
+
+ return best_confidence
+
+
+def calculate_confidence(path, line, metadata):
+ """
+ :type path: str
+ :type line: int
+ :type metadata: Metadata
+ :rtype: int
+ """
+ ranges = metadata.changes.get(path)
+
+ # no changes were made to the file
+ if not ranges:
+ return 0
+
+ # changes were made to the same file and line
+ if any(r[0] <= line <= r[1] in r for r in ranges):
+ return 100
+
+ # changes were made to the same file and the line number is unknown
+ if line == 0:
+ return 75
+
+ # changes were made to the same file and the line number is different
+ return 50
+
+
+class TestResult:
+ """Base class for test results."""
+ def __init__(self, command, test, python_version=None):
+ """
+ :type command: str
+ :type test: str
+ :type python_version: str
+ """
+ self.command = command
+ self.test = test
+ self.python_version = python_version
+ self.name = self.test or self.command
+
+ if self.python_version:
+ self.name += '-python-%s' % self.python_version
+
+ try:
+ import junit_xml
+ except ImportError:
+ junit_xml = None
+
+ self.junit = junit_xml
+
+ def write(self, args):
+ """
+ :type args: TestConfig
+ """
+ self.write_console()
+ self.write_bot(args)
+
+ if args.lint:
+ self.write_lint()
+
+ if args.junit:
+ if self.junit:
+ self.write_junit(args)
+ else:
+ display.warning('Skipping junit xml output because the `junit-xml` python package was not found.', unique=True)
+
+ def write_console(self):
+ """Write results to console."""
+
+ def write_lint(self):
+ """Write lint results to stdout."""
+
+ def write_bot(self, args):
+ """
+ :type args: TestConfig
+ """
+
+ def write_junit(self, args):
+ """
+ :type args: TestConfig
+ """
+
+ def create_result_name(self, extension):
+ """
+ :type extension: str
+ :rtype: str
+ """
+ name = 'ansible-test-%s' % self.command
+
+ if self.test:
+ name += '-%s' % self.test
+
+ if self.python_version:
+ name += '-python-%s' % self.python_version
+
+ name += extension
+
+ return name
+
+ def save_junit(self, args, test_case, properties=None):
+ """
+ :type args: TestConfig
+ :type test_case: junit_xml.TestCase
+ :type properties: dict[str, str] | None
+ :rtype: str | None
+ """
+ test_suites = [
+ self.junit.TestSuite(
+ name='ansible-test',
+ test_cases=[test_case],
+ timestamp=datetime.datetime.utcnow().replace(microsecond=0).isoformat(),
+ properties=properties,
+ ),
+ ]
+
+ # the junit_xml API is changing in version 2.0.0
+ # TestSuite.to_xml_string is being replaced with to_xml_report_string
+ # see: https://github.com/kyrus/python-junit-xml/blob/63db26da353790500642fd02cae1543eb41aab8b/junit_xml/__init__.py#L249-L261
+ try:
+ to_xml_string = self.junit.to_xml_report_string
+ except AttributeError:
+ to_xml_string = self.junit.TestSuite.to_xml_string
+
+ report = to_xml_string(test_suites=test_suites, prettyprint=True, encoding='utf-8')
+
+ if args.explain:
+ return
+
+ write_text_test_results(ResultType.JUNIT, self.create_result_name('.xml'), report)
+
+
+class TestTimeout(TestResult):
+ """Test timeout."""
+ def __init__(self, timeout_duration):
+ """
+ :type timeout_duration: int
+ """
+ super(TestTimeout, self).__init__(command='timeout', test='')
+
+ self.timeout_duration = timeout_duration
+
+ def write(self, args):
+ """
+ :type args: TestConfig
+ """
+ message = 'Tests were aborted after exceeding the %d minute time limit.' % self.timeout_duration
+
+ # Include a leading newline to improve readability on Shippable "Tests" tab.
+ # Without this, the first line becomes indented.
+ output = '''
+One or more of the following situations may be responsible:
+
+- Code changes have resulted in tests that hang or run for an excessive amount of time.
+- Tests have been added which exceed the time limit when combined with existing tests.
+- Test infrastructure and/or external dependencies are operating slower than normal.'''
+
+ if args.coverage:
+ output += '\n- Additional overhead from collecting code coverage has resulted in tests exceeding the time limit.'
+
+ output += '\n\nConsult the console log for additional details on where the timeout occurred.'
+
+ timestamp = datetime.datetime.utcnow().replace(microsecond=0).isoformat()
+
+ # hack to avoid requiring junit-xml, which may not be pre-installed outside our test containers
+ xml = '''
+<?xml version="1.0" encoding="utf-8"?>
+<testsuites disabled="0" errors="1" failures="0" tests="1" time="0.0">
+\t<testsuite disabled="0" errors="1" failures="0" file="None" log="None" name="ansible-test" skipped="0" tests="1" time="0" timestamp="%s" url="None">
+\t\t<testcase classname="timeout" name="timeout">
+\t\t\t<error message="%s" type="error">%s</error>
+\t\t</testcase>
+\t</testsuite>
+</testsuites>
+''' % (timestamp, message, output)
+
+ write_text_test_results(ResultType.JUNIT, self.create_result_name('.xml'), xml.lstrip())
+
+
+class TestSuccess(TestResult):
+ """Test success."""
+ def write_junit(self, args):
+ """
+ :type args: TestConfig
+ """
+ test_case = self.junit.TestCase(classname=self.command, name=self.name)
+
+ self.save_junit(args, test_case)
+
+
+class TestSkipped(TestResult):
+ """Test skipped."""
+ def write_console(self):
+ """Write results to console."""
+ display.info('No tests applicable.', verbosity=1)
+
+ def write_junit(self, args):
+ """
+ :type args: TestConfig
+ """
+ test_case = self.junit.TestCase(classname=self.command, name=self.name)
+ test_case.add_skipped_info('No tests applicable.')
+
+ self.save_junit(args, test_case)
+
+
+class TestFailure(TestResult):
+ """Test failure."""
+ def __init__(self, command, test, python_version=None, messages=None, summary=None):
+ """
+ :type command: str
+ :type test: str
+ :type python_version: str | None
+ :type messages: list[TestMessage] | None
+ :type summary: unicode | None
+ """
+ super(TestFailure, self).__init__(command, test, python_version)
+
+ if messages:
+ messages = sorted(messages)
+ else:
+ messages = []
+
+ self.messages = messages
+ self.summary = summary
+
+ def write(self, args):
+ """
+ :type args: TestConfig
+ """
+ if args.metadata.changes:
+ self.populate_confidence(args.metadata)
+
+ super(TestFailure, self).write(args)
+
+ def write_console(self):
+ """Write results to console."""
+ if self.summary:
+ display.error(self.summary)
+ else:
+ if self.python_version:
+ specifier = ' on python %s' % self.python_version
+ else:
+ specifier = ''
+
+ display.error('Found %d %s issue(s)%s which need to be resolved:' % (len(self.messages), self.test or self.command, specifier))
+
+ for message in self.messages:
+ display.error(message.format(show_confidence=True))
+
+ doc_url = self.find_docs()
+ if doc_url:
+ display.info('See documentation for help: %s' % doc_url)
+
+ def write_lint(self):
+ """Write lint results to stdout."""
+ if self.summary:
+ command = self.format_command()
+ message = 'The test `%s` failed. See stderr output for details.' % command
+ path = ''
+ message = TestMessage(message, path)
+ print(message)
+ else:
+ for message in self.messages:
+ print(message)
+
+ def write_junit(self, args):
+ """
+ :type args: TestConfig
+ """
+ title = self.format_title()
+ output = self.format_block()
+
+ test_case = self.junit.TestCase(classname=self.command, name=self.name)
+
+ # Include a leading newline to improve readability on Shippable "Tests" tab.
+ # Without this, the first line becomes indented.
+ test_case.add_failure_info(message=title, output='\n%s' % output)
+
+ self.save_junit(args, test_case)
+
+ def write_bot(self, args):
+ """
+ :type args: TestConfig
+ """
+ docs = self.find_docs()
+ message = self.format_title(help_link=docs)
+ output = self.format_block()
+
+ if self.messages:
+ verified = all((m.confidence or 0) >= 50 for m in self.messages)
+ else:
+ verified = False
+
+ bot_data = dict(
+ verified=verified,
+ docs=docs,
+ results=[
+ dict(
+ message=message,
+ output=output,
+ ),
+ ],
+ )
+
+ if args.explain:
+ return
+
+ write_json_test_results(ResultType.BOT, self.create_result_name('.json'), bot_data)
+
+ def populate_confidence(self, metadata):
+ """
+ :type metadata: Metadata
+ """
+ for message in self.messages:
+ if message.confidence is None:
+ message.confidence = calculate_confidence(message.path, message.line, metadata)
+
+ def format_command(self):
+ """
+ :rtype: str
+ """
+ command = 'ansible-test %s' % self.command
+
+ if self.test:
+ command += ' --test %s' % self.test
+
+ if self.python_version:
+ command += ' --python %s' % self.python_version
+
+ return command
+
+ def find_docs(self):
+ """
+ :rtype: str
+ """
+ if self.command != 'sanity':
+ return None # only sanity tests have docs links
+
+ # Use the major.minor version for the URL only if this a release that
+ # matches the pattern 2.4.0, otherwise, use 'devel'
+ ansible_version = get_ansible_version()
+ url_version = 'devel'
+ if re.search(r'^[0-9.]+$', ansible_version):
+ url_version = '.'.join(ansible_version.split('.')[:2])
+
+ testing_docs_url = 'https://docs.ansible.com/ansible/%s/dev_guide/testing' % url_version
+
+ url = '%s/%s/' % (testing_docs_url, self.command)
+
+ if self.test:
+ url += '%s.html' % self.test
+
+ return url
+
+ def format_title(self, help_link=None):
+ """
+ :type help_link: str | None
+ :rtype: str
+ """
+ command = self.format_command()
+
+ if self.summary:
+ reason = 'the error'
+ else:
+ reason = '1 error' if len(self.messages) == 1 else '%d errors' % len(self.messages)
+
+ if help_link:
+ help_link_markup = ' [[explain](%s)]' % help_link
+ else:
+ help_link_markup = ''
+
+ title = 'The test `%s`%s failed with %s:' % (command, help_link_markup, reason)
+
+ return title
+
+ def format_block(self):
+ """
+ :rtype: str
+ """
+ if self.summary:
+ block = self.summary
+ else:
+ block = '\n'.join(m.format() for m in self.messages)
+
+ message = block.strip()
+
+ # Hack to remove ANSI color reset code from SubprocessError messages.
+ message = message.replace(display.clear, '')
+
+ return message
+
+
+class TestMessage:
+ """Single test message for one file."""
+ def __init__(self, message, path, line=0, column=0, level='error', code=None, confidence=None):
+ """
+ :type message: str
+ :type path: str
+ :type line: int
+ :type column: int
+ :type level: str
+ :type code: str | None
+ :type confidence: int | None
+ """
+ self.__path = path
+ self.__line = line
+ self.__column = column
+ self.__level = level
+ self.__code = code
+ self.__message = message
+
+ self.confidence = confidence
+
+ @property
+ def path(self): # type: () -> str
+ """Return the path."""
+ return self.__path
+
+ @property
+ def line(self): # type: () -> int
+ """Return the line number, or 0 if none is available."""
+ return self.__line
+
+ @property
+ def column(self): # type: () -> int
+ """Return the column number, or 0 if none is available."""
+ return self.__column
+
+ @property
+ def level(self): # type: () -> str
+ """Return the level."""
+ return self.__level
+
+ @property
+ def code(self): # type: () -> t.Optional[str]
+ """Return the code, if any."""
+ return self.__code
+
+ @property
+ def message(self): # type: () -> str
+ """Return the message."""
+ return self.__message
+
+ @property
+ def tuple(self): # type: () -> t.Tuple[str, int, int, str, t.Optional[str], str]
+ """Return a tuple with all the immutable values of this test message."""
+ return self.__path, self.__line, self.__column, self.__level, self.__code, self.__message
+
+ def __lt__(self, other):
+ return self.tuple < other.tuple
+
+ def __le__(self, other):
+ return self.tuple <= other.tuple
+
+ def __eq__(self, other):
+ return self.tuple == other.tuple
+
+ def __ne__(self, other):
+ return self.tuple != other.tuple
+
+ def __gt__(self, other):
+ return self.tuple > other.tuple
+
+ def __ge__(self, other):
+ return self.tuple >= other.tuple
+
+ def __hash__(self):
+ return hash(self.tuple)
+
+ def __str__(self):
+ return self.format()
+
+ def format(self, show_confidence=False):
+ """
+ :type show_confidence: bool
+ :rtype: str
+ """
+ if self.__code:
+ msg = '%s: %s' % (self.__code, self.__message)
+ else:
+ msg = self.__message
+
+ if show_confidence and self.confidence is not None:
+ msg += ' (%d%%)' % self.confidence
+
+ return '%s:%s:%s: %s' % (self.__path, self.__line, self.__column, msg)
diff --git a/test/lib/ansible_test/_internal/thread.py b/test/lib/ansible_test/_internal/thread.py
new file mode 100644
index 00000000..49fbc1ba
--- /dev/null
+++ b/test/lib/ansible_test/_internal/thread.py
@@ -0,0 +1,57 @@
+"""Python threading tools."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import threading
+import sys
+
+try:
+ # noinspection PyPep8Naming
+ import Queue as queue
+except ImportError:
+ # noinspection PyUnresolvedReferences
+ import queue # pylint: disable=locally-disabled, import-error
+
+
+class WrappedThread(threading.Thread):
+ """Wrapper around Thread which captures results and exceptions."""
+ def __init__(self, action):
+ """
+ :type action: () -> any
+ """
+ # noinspection PyOldStyleClasses
+ super(WrappedThread, self).__init__()
+ self._result = queue.Queue()
+ self.action = action
+ self.result = None
+
+ def run(self):
+ """
+ Run action and capture results or exception.
+ Do not override. Do not call directly. Executed by the start() method.
+ """
+ # We truly want to catch anything that the worker thread might do including call sys.exit.
+ # Therefore we catch *everything* (including old-style class exceptions)
+ # noinspection PyBroadException, PyPep8
+ try:
+ self._result.put((self.action(), None))
+ # pylint: disable=locally-disabled, bare-except
+ except: # noqa
+ self._result.put((None, sys.exc_info()))
+
+ def wait_for_result(self):
+ """
+ Wait for thread to exit and return the result or raise an exception.
+ :rtype: any
+ """
+ result, exception = self._result.get()
+
+ if exception:
+ if sys.version_info[0] > 2:
+ raise exception[1].with_traceback(exception[2])
+ # noinspection PyRedundantParentheses
+ exec('raise exception[0], exception[1], exception[2]') # pylint: disable=locally-disabled, exec-used
+
+ self.result = result
+
+ return result
diff --git a/test/lib/ansible_test/_internal/types.py b/test/lib/ansible_test/_internal/types.py
new file mode 100644
index 00000000..46ef7066
--- /dev/null
+++ b/test/lib/ansible_test/_internal/types.py
@@ -0,0 +1,32 @@
+"""Import wrapper for type hints when available."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+TYPE_CHECKING = False
+
+try:
+ from typing import (
+ Any,
+ AnyStr,
+ BinaryIO,
+ Callable,
+ Dict,
+ FrozenSet,
+ Generator,
+ IO,
+ Iterable,
+ Iterator,
+ List,
+ Optional,
+ Pattern,
+ Set,
+ Text,
+ TextIO,
+ Tuple,
+ Type,
+ TYPE_CHECKING,
+ TypeVar,
+ Union,
+ )
+except ImportError:
+ pass
diff --git a/test/lib/ansible_test/_internal/units/__init__.py b/test/lib/ansible_test/_internal/units/__init__.py
new file mode 100644
index 00000000..22145431
--- /dev/null
+++ b/test/lib/ansible_test/_internal/units/__init__.py
@@ -0,0 +1,159 @@
+"""Execute unit tests using pytest."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+from ..util import (
+ ANSIBLE_TEST_DATA_ROOT,
+ display,
+ get_available_python_versions,
+ is_subdir,
+ SubprocessError,
+ REMOTE_ONLY_PYTHON_VERSIONS,
+)
+
+from ..util_common import (
+ intercept_command,
+ ResultType,
+ handle_layout_messages,
+)
+
+from ..ansible_util import (
+ ansible_environment,
+ check_pyyaml,
+)
+
+from ..target import (
+ walk_internal_targets,
+ walk_units_targets,
+)
+
+from ..config import (
+ UnitsConfig,
+)
+
+from ..coverage_util import (
+ coverage_context,
+)
+
+from ..data import (
+ data_context,
+)
+
+from ..executor import (
+ AllTargetsSkipped,
+ Delegate,
+ get_changes_filter,
+ install_command_requirements,
+ SUPPORTED_PYTHON_VERSIONS,
+)
+
+
+def command_units(args):
+ """
+ :type args: UnitsConfig
+ """
+ handle_layout_messages(data_context().content.unit_messages)
+
+ changes = get_changes_filter(args)
+ require = args.require + changes
+ include = walk_internal_targets(walk_units_targets(), args.include, args.exclude, require)
+
+ paths = [target.path for target in include]
+ remote_paths = [path for path in paths
+ if is_subdir(path, data_context().content.unit_module_path)
+ or is_subdir(path, data_context().content.unit_module_utils_path)]
+
+ if not paths:
+ raise AllTargetsSkipped()
+
+ if args.python and args.python in REMOTE_ONLY_PYTHON_VERSIONS and not remote_paths:
+ raise AllTargetsSkipped()
+
+ if args.delegate:
+ raise Delegate(require=changes, exclude=args.exclude)
+
+ version_commands = []
+
+ available_versions = sorted(get_available_python_versions(list(SUPPORTED_PYTHON_VERSIONS)).keys())
+
+ for version in SUPPORTED_PYTHON_VERSIONS:
+ # run all versions unless version given, in which case run only that version
+ if args.python and version != args.python_version:
+ continue
+
+ if not args.python and version not in available_versions:
+ display.warning("Skipping unit tests on Python %s due to missing interpreter." % version)
+ continue
+
+ if args.requirements_mode != 'skip':
+ install_command_requirements(args, version)
+
+ env = ansible_environment(args)
+
+ cmd = [
+ 'pytest',
+ '--boxed',
+ '-r', 'a',
+ '-n', str(args.num_workers) if args.num_workers else 'auto',
+ '--color',
+ 'yes' if args.color else 'no',
+ '-p', 'no:cacheprovider',
+ '-c', os.path.join(ANSIBLE_TEST_DATA_ROOT, 'pytest.ini'),
+ '--junit-xml', os.path.join(ResultType.JUNIT.path, 'python%s-units.xml' % version),
+ ]
+
+ if not data_context().content.collection:
+ cmd.append('--durations=25')
+
+ if version != '2.6':
+ # added in pytest 4.5.0, which requires python 2.7+
+ cmd.append('--strict-markers')
+
+ plugins = []
+
+ if args.coverage:
+ plugins.append('ansible_pytest_coverage')
+
+ if data_context().content.collection:
+ plugins.append('ansible_pytest_collections')
+
+ if plugins:
+ env['PYTHONPATH'] += ':%s' % os.path.join(ANSIBLE_TEST_DATA_ROOT, 'pytest/plugins')
+ env['PYTEST_PLUGINS'] = ','.join(plugins)
+
+ if args.collect_only:
+ cmd.append('--collect-only')
+
+ if args.verbosity:
+ cmd.append('-' + ('v' * args.verbosity))
+
+ if version in REMOTE_ONLY_PYTHON_VERSIONS:
+ test_paths = remote_paths
+ else:
+ test_paths = paths
+
+ if not test_paths:
+ continue
+
+ cmd.extend(test_paths)
+
+ version_commands.append((version, cmd, env))
+
+ if args.requirements_mode == 'only':
+ sys.exit()
+
+ for version, command, env in version_commands:
+ check_pyyaml(args, version)
+
+ display.info('Unit test with Python %s' % version)
+
+ try:
+ with coverage_context(args):
+ intercept_command(args, command, target_name='units', env=env, python_version=version)
+ except SubprocessError as ex:
+ # pytest exits with status code 5 when all tests are skipped, which isn't an error for our use case
+ if ex.status != 5:
+ raise
diff --git a/test/lib/ansible_test/_internal/util.py b/test/lib/ansible_test/_internal/util.py
new file mode 100644
index 00000000..005c3e05
--- /dev/null
+++ b/test/lib/ansible_test/_internal/util.py
@@ -0,0 +1,853 @@
+"""Miscellaneous utility functions and classes."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import contextlib
+import errno
+import fcntl
+import inspect
+import os
+import pkgutil
+import random
+import re
+import shutil
+import socket
+import stat
+import string
+import subprocess
+import sys
+import tempfile
+import time
+import zipfile
+
+from struct import unpack, pack
+from termios import TIOCGWINSZ
+
+try:
+ from abc import ABC
+except ImportError:
+ from abc import ABCMeta
+ ABC = ABCMeta('ABC', (), {})
+
+try:
+ # noinspection PyCompatibility
+ from configparser import ConfigParser
+except ImportError:
+ # noinspection PyCompatibility,PyUnresolvedReferences
+ from ConfigParser import SafeConfigParser as ConfigParser
+
+try:
+ # noinspection PyProtectedMember
+ from shlex import quote as cmd_quote
+except ImportError:
+ # noinspection PyProtectedMember
+ from pipes import quote as cmd_quote
+
+from . import types as t
+
+from .encoding import (
+ to_bytes,
+ to_optional_bytes,
+ to_optional_text,
+)
+
+from .io import (
+ open_binary_file,
+ read_text_file,
+)
+
+try:
+ C = t.TypeVar('C')
+except AttributeError:
+ C = None
+
+
+PYTHON_PATHS = {} # type: t.Dict[str, str]
+
+try:
+ # noinspection PyUnresolvedReferences
+ MAXFD = subprocess.MAXFD
+except AttributeError:
+ MAXFD = -1
+
+COVERAGE_CONFIG_NAME = 'coveragerc'
+
+ANSIBLE_TEST_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+
+# assume running from install
+ANSIBLE_ROOT = os.path.dirname(ANSIBLE_TEST_ROOT)
+ANSIBLE_BIN_PATH = os.path.dirname(os.path.abspath(sys.argv[0]))
+ANSIBLE_LIB_ROOT = os.path.join(ANSIBLE_ROOT, 'ansible')
+ANSIBLE_SOURCE_ROOT = None
+
+if not os.path.exists(ANSIBLE_LIB_ROOT):
+ # running from source
+ ANSIBLE_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(ANSIBLE_TEST_ROOT)))
+ ANSIBLE_BIN_PATH = os.path.join(ANSIBLE_ROOT, 'bin')
+ ANSIBLE_LIB_ROOT = os.path.join(ANSIBLE_ROOT, 'lib', 'ansible')
+ ANSIBLE_SOURCE_ROOT = ANSIBLE_ROOT
+
+ANSIBLE_TEST_DATA_ROOT = os.path.join(ANSIBLE_TEST_ROOT, '_data')
+ANSIBLE_TEST_CONFIG_ROOT = os.path.join(ANSIBLE_TEST_ROOT, 'config')
+
+# Modes are set to allow all users the same level of access.
+# This permits files to be used in tests that change users.
+# The only exception is write access to directories for the user creating them.
+# This avoids having to modify the directory permissions a second time.
+
+MODE_READ = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
+
+MODE_FILE = MODE_READ
+MODE_FILE_EXECUTE = MODE_FILE | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
+MODE_FILE_WRITE = MODE_FILE | stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH
+
+MODE_DIRECTORY = MODE_READ | stat.S_IWUSR | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
+MODE_DIRECTORY_WRITE = MODE_DIRECTORY | stat.S_IWGRP | stat.S_IWOTH
+
+REMOTE_ONLY_PYTHON_VERSIONS = (
+ '2.6',
+)
+
+SUPPORTED_PYTHON_VERSIONS = (
+ '2.6',
+ '2.7',
+ '3.5',
+ '3.6',
+ '3.7',
+ '3.8',
+ '3.9',
+)
+
+
+def remove_file(path):
+ """
+ :type path: str
+ """
+ if os.path.isfile(path):
+ os.remove(path)
+
+
+def read_lines_without_comments(path, remove_blank_lines=False, optional=False): # type: (str, bool, bool) -> t.List[str]
+ """
+ Returns lines from the specified text file with comments removed.
+ Comments are any content from a hash symbol to the end of a line.
+ Any spaces immediately before a comment are also removed.
+ """
+ if optional and not os.path.exists(path):
+ return []
+
+ lines = read_text_file(path).splitlines()
+
+ lines = [re.sub(r' *#.*$', '', line) for line in lines]
+
+ if remove_blank_lines:
+ lines = [line for line in lines if line]
+
+ return lines
+
+
+def find_executable(executable, cwd=None, path=None, required=True):
+ """
+ :type executable: str
+ :type cwd: str
+ :type path: str
+ :type required: bool | str
+ :rtype: str | None
+ """
+ match = None
+ real_cwd = os.getcwd()
+
+ if not cwd:
+ cwd = real_cwd
+
+ if os.path.dirname(executable):
+ target = os.path.join(cwd, executable)
+ if os.path.exists(target) and os.access(target, os.F_OK | os.X_OK):
+ match = executable
+ else:
+ if path is None:
+ path = os.environ.get('PATH', os.path.defpath)
+
+ if path:
+ path_dirs = path.split(os.path.pathsep)
+ seen_dirs = set()
+
+ for path_dir in path_dirs:
+ if path_dir in seen_dirs:
+ continue
+
+ seen_dirs.add(path_dir)
+
+ if os.path.abspath(path_dir) == real_cwd:
+ path_dir = cwd
+
+ candidate = os.path.join(path_dir, executable)
+
+ if os.path.exists(candidate) and os.access(candidate, os.F_OK | os.X_OK):
+ match = candidate
+ break
+
+ if not match and required:
+ message = 'Required program "%s" not found.' % executable
+
+ if required != 'warning':
+ raise ApplicationError(message)
+
+ display.warning(message)
+
+ return match
+
+
+def find_python(version, path=None, required=True):
+ """
+ :type version: str
+ :type path: str | None
+ :type required: bool
+ :rtype: str
+ """
+ version_info = tuple(int(n) for n in version.split('.'))
+
+ if not path and version_info == sys.version_info[:len(version_info)]:
+ python_bin = sys.executable
+ else:
+ python_bin = find_executable('python%s' % version, path=path, required=required)
+
+ return python_bin
+
+
+def get_ansible_version(): # type: () -> str
+ """Return the Ansible version."""
+ try:
+ return get_ansible_version.version
+ except AttributeError:
+ pass
+
+ # ansible may not be in our sys.path
+ # avoids a symlink to release.py since ansible placement relative to ansible-test may change during delegation
+ load_module(os.path.join(ANSIBLE_LIB_ROOT, 'release.py'), 'ansible_release')
+
+ # noinspection PyUnresolvedReferences
+ from ansible_release import __version__ as ansible_version # pylint: disable=import-error
+
+ get_ansible_version.version = ansible_version
+
+ return ansible_version
+
+
+def get_available_python_versions(versions): # type: (t.List[str]) -> t.Dict[str, str]
+ """Return a dictionary indicating which of the requested Python versions are available."""
+ try:
+ return get_available_python_versions.result
+ except AttributeError:
+ pass
+
+ get_available_python_versions.result = dict((version, path) for version, path in
+ ((version, find_python(version, required=False)) for version in versions) if path)
+
+ return get_available_python_versions.result
+
+
+def generate_pip_command(python):
+ """
+ :type python: str
+ :rtype: list[str]
+ """
+ return [python, os.path.join(ANSIBLE_TEST_DATA_ROOT, 'quiet_pip.py')]
+
+
+def raw_command(cmd, capture=False, env=None, data=None, cwd=None, explain=False, stdin=None, stdout=None,
+ cmd_verbosity=1, str_errors='strict'):
+ """
+ :type cmd: collections.Iterable[str]
+ :type capture: bool
+ :type env: dict[str, str] | None
+ :type data: str | None
+ :type cwd: str | None
+ :type explain: bool
+ :type stdin: file | None
+ :type stdout: file | None
+ :type cmd_verbosity: int
+ :type str_errors: str
+ :rtype: str | None, str | None
+ """
+ if not cwd:
+ cwd = os.getcwd()
+
+ if not env:
+ env = common_environment()
+
+ cmd = list(cmd)
+
+ escaped_cmd = ' '.join(cmd_quote(c) for c in cmd)
+
+ display.info('Run command: %s' % escaped_cmd, verbosity=cmd_verbosity, truncate=True)
+ display.info('Working directory: %s' % cwd, verbosity=2)
+
+ program = find_executable(cmd[0], cwd=cwd, path=env['PATH'], required='warning')
+
+ if program:
+ display.info('Program found: %s' % program, verbosity=2)
+
+ for key in sorted(env.keys()):
+ display.info('%s=%s' % (key, env[key]), verbosity=2)
+
+ if explain:
+ return None, None
+
+ communicate = False
+
+ if stdin is not None:
+ data = None
+ communicate = True
+ elif data is not None:
+ stdin = subprocess.PIPE
+ communicate = True
+
+ if stdout:
+ communicate = True
+
+ if capture:
+ stdout = stdout or subprocess.PIPE
+ stderr = subprocess.PIPE
+ communicate = True
+ else:
+ stderr = None
+
+ start = time.time()
+ process = None
+
+ try:
+ try:
+ cmd_bytes = [to_bytes(c) for c in cmd]
+ env_bytes = dict((to_bytes(k), to_bytes(v)) for k, v in env.items())
+ process = subprocess.Popen(cmd_bytes, env=env_bytes, stdin=stdin, stdout=stdout, stderr=stderr, cwd=cwd)
+ except OSError as ex:
+ if ex.errno == errno.ENOENT:
+ raise ApplicationError('Required program "%s" not found.' % cmd[0])
+ raise
+
+ if communicate:
+ data_bytes = to_optional_bytes(data)
+ stdout_bytes, stderr_bytes = process.communicate(data_bytes)
+ stdout_text = to_optional_text(stdout_bytes, str_errors) or u''
+ stderr_text = to_optional_text(stderr_bytes, str_errors) or u''
+ else:
+ process.wait()
+ stdout_text, stderr_text = None, None
+ finally:
+ if process and process.returncode is None:
+ process.kill()
+ display.info('') # the process we're interrupting may have completed a partial line of output
+ display.notice('Killed command to avoid an orphaned child process during handling of an unexpected exception.')
+
+ status = process.returncode
+ runtime = time.time() - start
+
+ display.info('Command exited with status %s after %s seconds.' % (status, runtime), verbosity=4)
+
+ if status == 0:
+ return stdout_text, stderr_text
+
+ raise SubprocessError(cmd, status, stdout_text, stderr_text, runtime)
+
+
+def common_environment():
+ """Common environment used for executing all programs."""
+ env = dict(
+ LC_ALL='en_US.UTF-8',
+ PATH=os.environ.get('PATH', os.path.defpath),
+ )
+
+ required = (
+ 'HOME',
+ )
+
+ optional = (
+ 'HTTPTESTER',
+ 'LD_LIBRARY_PATH',
+ 'SSH_AUTH_SOCK',
+ # MacOS High Sierra Compatibility
+ # http://sealiesoftware.com/blog/archive/2017/6/5/Objective-C_and_fork_in_macOS_1013.html
+ # Example configuration for macOS:
+ # export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES
+ 'OBJC_DISABLE_INITIALIZE_FORK_SAFETY',
+ 'ANSIBLE_KEEP_REMOTE_FILES',
+ # MacOS Homebrew Compatibility
+ # https://cryptography.io/en/latest/installation/#building-cryptography-on-macos
+ # This may also be required to install pyyaml with libyaml support when installed in non-standard locations.
+ # Example configuration for brew on macOS:
+ # export LDFLAGS="-L$(brew --prefix openssl)/lib/ -L$(brew --prefix libyaml)/lib/"
+ # export CFLAGS="-I$(brew --prefix openssl)/include/ -I$(brew --prefix libyaml)/include/"
+ # However, this is not adequate for PyYAML 3.13, which is the latest version supported on Python 2.6.
+ # For that version the standard location must be used, or `pip install` must be invoked with additional options:
+ # --global-option=build_ext --global-option=-L{path_to_lib_dir}
+ 'LDFLAGS',
+ 'CFLAGS',
+ )
+
+ env.update(pass_vars(required=required, optional=optional))
+
+ return env
+
+
+def pass_vars(required, optional):
+ """
+ :type required: collections.Iterable[str]
+ :type optional: collections.Iterable[str]
+ :rtype: dict[str, str]
+ """
+ env = {}
+
+ for name in required:
+ if name not in os.environ:
+ raise MissingEnvironmentVariable(name)
+ env[name] = os.environ[name]
+
+ for name in optional:
+ if name not in os.environ:
+ continue
+ env[name] = os.environ[name]
+
+ return env
+
+
+def deepest_path(path_a, path_b):
+ """Return the deepest of two paths, or None if the paths are unrelated.
+ :type path_a: str
+ :type path_b: str
+ :rtype: str | None
+ """
+ if path_a == '.':
+ path_a = ''
+
+ if path_b == '.':
+ path_b = ''
+
+ if path_a.startswith(path_b):
+ return path_a or '.'
+
+ if path_b.startswith(path_a):
+ return path_b or '.'
+
+ return None
+
+
+def remove_tree(path):
+ """
+ :type path: str
+ """
+ try:
+ shutil.rmtree(to_bytes(path))
+ except OSError as ex:
+ if ex.errno != errno.ENOENT:
+ raise
+
+
+def is_binary_file(path):
+ """
+ :type path: str
+ :rtype: bool
+ """
+ assume_text = set([
+ '.cfg',
+ '.conf',
+ '.crt',
+ '.cs',
+ '.css',
+ '.html',
+ '.ini',
+ '.j2',
+ '.js',
+ '.json',
+ '.md',
+ '.pem',
+ '.ps1',
+ '.psm1',
+ '.py',
+ '.rst',
+ '.sh',
+ '.txt',
+ '.xml',
+ '.yaml',
+ '.yml',
+ ])
+
+ assume_binary = set([
+ '.bin',
+ '.eot',
+ '.gz',
+ '.ico',
+ '.iso',
+ '.jpg',
+ '.otf',
+ '.p12',
+ '.png',
+ '.pyc',
+ '.rpm',
+ '.ttf',
+ '.woff',
+ '.woff2',
+ '.zip',
+ ])
+
+ ext = os.path.splitext(path)[1]
+
+ if ext in assume_text:
+ return False
+
+ if ext in assume_binary:
+ return True
+
+ with open_binary_file(path) as path_fd:
+ # noinspection PyTypeChecker
+ return b'\0' in path_fd.read(4096)
+
+
+def generate_password():
+ """Generate a random password.
+ :rtype: str
+ """
+ chars = [
+ string.ascii_letters,
+ string.digits,
+ string.ascii_letters,
+ string.digits,
+ '-',
+ ] * 4
+
+ password = ''.join([random.choice(char) for char in chars[:-1]])
+
+ display.sensitive.add(password)
+
+ return password
+
+
+class Display:
+ """Manages color console output."""
+ clear = '\033[0m'
+ red = '\033[31m'
+ green = '\033[32m'
+ yellow = '\033[33m'
+ blue = '\033[34m'
+ purple = '\033[35m'
+ cyan = '\033[36m'
+
+ verbosity_colors = {
+ 0: None,
+ 1: green,
+ 2: blue,
+ 3: cyan,
+ }
+
+ def __init__(self):
+ self.verbosity = 0
+ self.color = sys.stdout.isatty()
+ self.warnings = []
+ self.warnings_unique = set()
+ self.info_stderr = False
+ self.rows = 0
+ self.columns = 0
+ self.truncate = 0
+ self.redact = True
+ self.sensitive = set()
+
+ if os.isatty(0):
+ self.rows, self.columns = unpack('HHHH', fcntl.ioctl(0, TIOCGWINSZ, pack('HHHH', 0, 0, 0, 0)))[:2]
+
+ def __warning(self, message):
+ """
+ :type message: str
+ """
+ self.print_message('WARNING: %s' % message, color=self.purple, fd=sys.stderr)
+
+ def review_warnings(self):
+ """Review all warnings which previously occurred."""
+ if not self.warnings:
+ return
+
+ self.__warning('Reviewing previous %d warning(s):' % len(self.warnings))
+
+ for warning in self.warnings:
+ self.__warning(warning)
+
+ def warning(self, message, unique=False, verbosity=0):
+ """
+ :type message: str
+ :type unique: bool
+ :type verbosity: int
+ """
+ if verbosity > self.verbosity:
+ return
+
+ if unique:
+ if message in self.warnings_unique:
+ return
+
+ self.warnings_unique.add(message)
+
+ self.__warning(message)
+ self.warnings.append(message)
+
+ def notice(self, message):
+ """
+ :type message: str
+ """
+ self.print_message('NOTICE: %s' % message, color=self.purple, fd=sys.stderr)
+
+ def error(self, message):
+ """
+ :type message: str
+ """
+ self.print_message('ERROR: %s' % message, color=self.red, fd=sys.stderr)
+
+ def info(self, message, verbosity=0, truncate=False):
+ """
+ :type message: str
+ :type verbosity: int
+ :type truncate: bool
+ """
+ if self.verbosity >= verbosity:
+ color = self.verbosity_colors.get(verbosity, self.yellow)
+ self.print_message(message, color=color, fd=sys.stderr if self.info_stderr else sys.stdout, truncate=truncate)
+
+ def print_message(self, message, color=None, fd=sys.stdout, truncate=False): # pylint: disable=locally-disabled, invalid-name
+ """
+ :type message: str
+ :type color: str | None
+ :type fd: file
+ :type truncate: bool
+ """
+ if self.redact and self.sensitive:
+ for item in self.sensitive:
+ if not item:
+ continue
+
+ message = message.replace(item, '*' * len(item))
+
+ if truncate:
+ if len(message) > self.truncate > 5:
+ message = message[:self.truncate - 5] + ' ...'
+
+ if color and self.color:
+ # convert color resets in message to desired color
+ message = message.replace(self.clear, color)
+ message = '%s%s%s' % (color, message, self.clear)
+
+ if sys.version_info[0] == 2:
+ message = to_bytes(message)
+
+ print(message, file=fd)
+ fd.flush()
+
+
+class ApplicationError(Exception):
+ """General application error."""
+
+
+class ApplicationWarning(Exception):
+ """General application warning which interrupts normal program flow."""
+
+
+class SubprocessError(ApplicationError):
+ """Error resulting from failed subprocess execution."""
+ def __init__(self, cmd, status=0, stdout=None, stderr=None, runtime=None):
+ """
+ :type cmd: list[str]
+ :type status: int
+ :type stdout: str | None
+ :type stderr: str | None
+ :type runtime: float | None
+ """
+ message = 'Command "%s" returned exit status %s.\n' % (' '.join(cmd_quote(c) for c in cmd), status)
+
+ if stderr:
+ message += '>>> Standard Error\n'
+ message += '%s%s\n' % (stderr.strip(), Display.clear)
+
+ if stdout:
+ message += '>>> Standard Output\n'
+ message += '%s%s\n' % (stdout.strip(), Display.clear)
+
+ message = message.strip()
+
+ super(SubprocessError, self).__init__(message)
+
+ self.cmd = cmd
+ self.message = message
+ self.status = status
+ self.stdout = stdout
+ self.stderr = stderr
+ self.runtime = runtime
+
+
+class MissingEnvironmentVariable(ApplicationError):
+ """Error caused by missing environment variable."""
+ def __init__(self, name):
+ """
+ :type name: str
+ """
+ super(MissingEnvironmentVariable, self).__init__('Missing environment variable: %s' % name)
+
+ self.name = name
+
+
+def parse_to_list_of_dict(pattern, value):
+ """
+ :type pattern: str
+ :type value: str
+ :return: list[dict[str, str]]
+ """
+ matched = []
+ unmatched = []
+
+ for line in value.splitlines():
+ match = re.search(pattern, line)
+
+ if match:
+ matched.append(match.groupdict())
+ else:
+ unmatched.append(line)
+
+ if unmatched:
+ raise Exception('Pattern "%s" did not match values:\n%s' % (pattern, '\n'.join(unmatched)))
+
+ return matched
+
+
+def get_available_port():
+ """
+ :rtype: int
+ """
+ # this relies on the kernel not reusing previously assigned ports immediately
+ socket_fd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+
+ with contextlib.closing(socket_fd):
+ socket_fd.bind(('', 0))
+ return socket_fd.getsockname()[1]
+
+
+def get_subclasses(class_type): # type: (t.Type[C]) -> t.Set[t.Type[C]]
+ """Returns the set of types that are concrete subclasses of the given type."""
+ subclasses = set() # type: t.Set[t.Type[C]]
+ queue = [class_type] # type: t.List[t.Type[C]]
+
+ while queue:
+ parent = queue.pop()
+
+ for child in parent.__subclasses__():
+ if child not in subclasses:
+ if not inspect.isabstract(child):
+ subclasses.add(child)
+ queue.append(child)
+
+ return subclasses
+
+
+def is_subdir(candidate_path, path): # type: (str, str) -> bool
+ """Returns true if candidate_path is path or a subdirectory of path."""
+ if not path.endswith(os.path.sep):
+ path += os.path.sep
+
+ if not candidate_path.endswith(os.path.sep):
+ candidate_path += os.path.sep
+
+ return candidate_path.startswith(path)
+
+
+def paths_to_dirs(paths): # type: (t.List[str]) -> t.List[str]
+ """Returns a list of directories extracted from the given list of paths."""
+ dir_names = set()
+
+ for path in paths:
+ while True:
+ path = os.path.dirname(path)
+
+ if not path or path == os.path.sep:
+ break
+
+ dir_names.add(path + os.path.sep)
+
+ return sorted(dir_names)
+
+
+def str_to_version(version): # type: (str) -> t.Tuple[int, ...]
+ """Return a version tuple from a version string."""
+ return tuple(int(n) for n in version.split('.'))
+
+
+def version_to_str(version): # type: (t.Tuple[int, ...]) -> str
+ """Return a version string from a version tuple."""
+ return '.'.join(str(n) for n in version)
+
+
+def import_plugins(directory, root=None): # type: (str, t.Optional[str]) -> None
+ """
+ Import plugins from the given directory relative to the given root.
+ If the root is not provided, the 'lib' directory for the test runner will be used.
+ """
+ if root is None:
+ root = os.path.dirname(__file__)
+
+ path = os.path.join(root, directory)
+ package = __name__.rsplit('.', 1)[0]
+ prefix = '%s.%s.' % (package, directory.replace(os.path.sep, '.'))
+
+ for (_module_loader, name, _ispkg) in pkgutil.iter_modules([path], prefix=prefix):
+ module_path = os.path.join(root, name[len(package) + 1:].replace('.', os.path.sep) + '.py')
+ load_module(module_path, name)
+
+
+def load_plugins(base_type, database): # type: (t.Type[C], t.Dict[str, t.Type[C]]) -> None
+ """
+ Load plugins of the specified type and track them in the specified database.
+ Only plugins which have already been imported will be loaded.
+ """
+ plugins = dict((sc.__module__.rsplit('.', 1)[1], sc) for sc in get_subclasses(base_type)) # type: t.Dict[str, t.Type[C]]
+
+ for plugin in plugins:
+ database[plugin] = plugins[plugin]
+
+
+def load_module(path, name): # type: (str, str) -> None
+ """Load a Python module using the given name and path."""
+ if name in sys.modules:
+ return
+
+ if sys.version_info >= (3, 4):
+ # noinspection PyUnresolvedReferences
+ import importlib.util
+
+ # noinspection PyUnresolvedReferences
+ spec = importlib.util.spec_from_file_location(name, path)
+ # noinspection PyUnresolvedReferences
+ module = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(module)
+
+ sys.modules[name] = module
+ else:
+ # noinspection PyDeprecation
+ import imp
+
+ # load_source (and thus load_module) require a file opened with `open` in text mode
+ with open(to_bytes(path)) as module_file:
+ # noinspection PyDeprecation
+ imp.load_module(name, module_file, path, ('.py', 'r', imp.PY_SOURCE))
+
+
+@contextlib.contextmanager
+def tempdir(): # type: () -> str
+ """Creates a temporary directory that is deleted outside the context scope."""
+ temp_path = tempfile.mkdtemp()
+ yield temp_path
+ shutil.rmtree(temp_path)
+
+
+@contextlib.contextmanager
+def open_zipfile(path, mode='r'):
+ """Opens a zip file and closes the file automatically."""
+ zib_obj = zipfile.ZipFile(path, mode=mode)
+ yield zib_obj
+ zib_obj.close()
+
+
+display = Display() # pylint: disable=locally-disabled, invalid-name
diff --git a/test/lib/ansible_test/_internal/util_common.py b/test/lib/ansible_test/_internal/util_common.py
new file mode 100644
index 00000000..1ac2e60d
--- /dev/null
+++ b/test/lib/ansible_test/_internal/util_common.py
@@ -0,0 +1,487 @@
+"""Common utility code that depends on CommonConfig."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import atexit
+import contextlib
+import os
+import shutil
+import sys
+import tempfile
+import textwrap
+
+from . import types as t
+
+from .encoding import (
+ to_bytes,
+)
+
+from .util import (
+ common_environment,
+ COVERAGE_CONFIG_NAME,
+ display,
+ find_python,
+ remove_tree,
+ MODE_DIRECTORY,
+ MODE_FILE_EXECUTE,
+ PYTHON_PATHS,
+ raw_command,
+ read_lines_without_comments,
+ ANSIBLE_TEST_DATA_ROOT,
+ ApplicationError,
+)
+
+from .io import (
+ write_text_file,
+ write_json_file,
+)
+
+from .data import (
+ data_context,
+)
+
+from .provider.layout import (
+ LayoutMessages,
+)
+
+DOCKER_COMPLETION = {} # type: t.Dict[str, t.Dict[str, str]]
+REMOTE_COMPLETION = {} # type: t.Dict[str, t.Dict[str, str]]
+NETWORK_COMPLETION = {} # type: t.Dict[str, t.Dict[str, str]]
+
+
+class ResultType:
+ """Test result type."""
+ BOT = None # type: ResultType
+ COVERAGE = None # type: ResultType
+ DATA = None # type: ResultType
+ JUNIT = None # type: ResultType
+ LOGS = None # type: ResultType
+ REPORTS = None # type: ResultType
+ TMP = None # type: ResultType
+
+ @staticmethod
+ def _populate():
+ ResultType.BOT = ResultType('bot')
+ ResultType.COVERAGE = ResultType('coverage')
+ ResultType.DATA = ResultType('data')
+ ResultType.JUNIT = ResultType('junit')
+ ResultType.LOGS = ResultType('logs')
+ ResultType.REPORTS = ResultType('reports')
+ ResultType.TMP = ResultType('.tmp')
+
+ def __init__(self, name): # type: (str) -> None
+ self.name = name
+
+ @property
+ def relative_path(self): # type: () -> str
+ """The content relative path to the results."""
+ return os.path.join(data_context().content.results_path, self.name)
+
+ @property
+ def path(self): # type: () -> str
+ """The absolute path to the results."""
+ return os.path.join(data_context().content.root, self.relative_path)
+
+ def __str__(self): # type: () -> str
+ return self.name
+
+
+# noinspection PyProtectedMember
+ResultType._populate() # pylint: disable=protected-access
+
+
+class CommonConfig:
+ """Configuration common to all commands."""
+ def __init__(self, args, command):
+ """
+ :type args: any
+ :type command: str
+ """
+ self.command = command
+
+ self.color = args.color # type: bool
+ self.explain = args.explain # type: bool
+ self.verbosity = args.verbosity # type: int
+ self.debug = args.debug # type: bool
+ self.truncate = args.truncate # type: int
+ self.redact = args.redact # type: bool
+
+ self.info_stderr = False # type: bool
+
+ self.cache = {}
+
+ def get_ansible_config(self): # type: () -> str
+ """Return the path to the Ansible config for the given config."""
+ return os.path.join(ANSIBLE_TEST_DATA_ROOT, 'ansible.cfg')
+
+
+class NetworkPlatformSettings:
+ """Settings required for provisioning a network platform."""
+ def __init__(self, collection, inventory_vars): # type: (str, t.Type[str, str]) -> None
+ self.collection = collection
+ self.inventory_vars = inventory_vars
+
+
+def get_docker_completion():
+ """
+ :rtype: dict[str, dict[str, str]]
+ """
+ return get_parameterized_completion(DOCKER_COMPLETION, 'docker')
+
+
+def get_remote_completion():
+ """
+ :rtype: dict[str, dict[str, str]]
+ """
+ return get_parameterized_completion(REMOTE_COMPLETION, 'remote')
+
+
+def get_network_completion():
+ """
+ :rtype: dict[str, dict[str, str]]
+ """
+ return get_parameterized_completion(NETWORK_COMPLETION, 'network')
+
+
+def get_parameterized_completion(cache, name):
+ """
+ :type cache: dict[str, dict[str, str]]
+ :type name: str
+ :rtype: dict[str, dict[str, str]]
+ """
+ if not cache:
+ if data_context().content.collection:
+ context = 'collection'
+ else:
+ context = 'ansible-base'
+
+ images = read_lines_without_comments(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'completion', '%s.txt' % name), remove_blank_lines=True)
+
+ cache.update(dict(kvp for kvp in [parse_parameterized_completion(i) for i in images] if kvp and kvp[1].get('context', context) == context))
+
+ return cache
+
+
+def parse_parameterized_completion(value): # type: (str) -> t.Optional[t.Tuple[str, t.Dict[str, str]]]
+ """Parse the given completion entry, returning the entry name and a dictionary of key/value settings."""
+ values = value.split()
+
+ if not values:
+ return None
+
+ name = values[0]
+ data = dict((kvp[0], kvp[1] if len(kvp) > 1 else '') for kvp in [item.split('=', 1) for item in values[1:]])
+
+ return name, data
+
+
+def docker_qualify_image(name):
+ """
+ :type name: str
+ :rtype: str
+ """
+ config = get_docker_completion().get(name, {})
+
+ return config.get('name', name)
+
+
+def get_network_settings(args, platform, version): # type: (NetworkIntegrationConfig, str, str) -> NetworkPlatformSettings
+ """Returns settings for the given network platform and version."""
+ platform_version = '%s/%s' % (platform, version)
+ completion = get_network_completion().get(platform_version, {})
+ collection = args.platform_collection.get(platform, completion.get('collection'))
+
+ settings = NetworkPlatformSettings(
+ collection,
+ dict(
+ ansible_connection=args.platform_connection.get(platform, completion.get('connection')),
+ ansible_network_os='%s.%s' % (collection, platform) if collection else platform,
+ )
+ )
+
+ return settings
+
+
+def handle_layout_messages(messages): # type: (t.Optional[LayoutMessages]) -> None
+ """Display the given layout messages."""
+ if not messages:
+ return
+
+ for message in messages.info:
+ display.info(message, verbosity=1)
+
+ for message in messages.warning:
+ display.warning(message)
+
+ if messages.error:
+ raise ApplicationError('\n'.join(messages.error))
+
+
+@contextlib.contextmanager
+def named_temporary_file(args, prefix, suffix, directory, content):
+ """
+ :param args: CommonConfig
+ :param prefix: str
+ :param suffix: str
+ :param directory: str
+ :param content: str | bytes | unicode
+ :rtype: str
+ """
+ if args.explain:
+ yield os.path.join(directory, '%stemp%s' % (prefix, suffix))
+ else:
+ with tempfile.NamedTemporaryFile(prefix=prefix, suffix=suffix, dir=directory) as tempfile_fd:
+ tempfile_fd.write(to_bytes(content))
+ tempfile_fd.flush()
+
+ yield tempfile_fd.name
+
+
+def write_json_test_results(category, # type: ResultType
+ name, # type: str
+ content, # type: t.Union[t.List[t.Any], t.Dict[str, t.Any]]
+ formatted=True, # type: bool
+ encoder=None, # type: t.Optional[t.Callable[[t.Any], t.Any]]
+ ): # type: (...) -> None
+ """Write the given json content to the specified test results path, creating directories as needed."""
+ path = os.path.join(category.path, name)
+ write_json_file(path, content, create_directories=True, formatted=formatted, encoder=encoder)
+
+
+def write_text_test_results(category, name, content): # type: (ResultType, str, str) -> None
+ """Write the given text content to the specified test results path, creating directories as needed."""
+ path = os.path.join(category.path, name)
+ write_text_file(path, content, create_directories=True)
+
+
+def get_python_path(args, interpreter):
+ """
+ :type args: TestConfig
+ :type interpreter: str
+ :rtype: str
+ """
+ python_path = PYTHON_PATHS.get(interpreter)
+
+ if python_path:
+ return python_path
+
+ prefix = 'python-'
+ suffix = '-ansible'
+
+ root_temp_dir = '/tmp'
+
+ if args.explain:
+ return os.path.join(root_temp_dir, ''.join((prefix, 'temp', suffix)))
+
+ python_path = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=root_temp_dir)
+ injected_interpreter = os.path.join(python_path, 'python')
+
+ # A symlink is faster than the execv wrapper, but isn't compatible with virtual environments.
+ # Attempt to detect when it is safe to use a symlink by checking the real path of the interpreter.
+ use_symlink = os.path.dirname(os.path.realpath(interpreter)) == os.path.dirname(interpreter)
+
+ if use_symlink:
+ display.info('Injecting "%s" as a symlink to the "%s" interpreter.' % (injected_interpreter, interpreter), verbosity=1)
+
+ os.symlink(interpreter, injected_interpreter)
+ else:
+ display.info('Injecting "%s" as a execv wrapper for the "%s" interpreter.' % (injected_interpreter, interpreter), verbosity=1)
+
+ create_interpreter_wrapper(interpreter, injected_interpreter)
+
+ os.chmod(python_path, MODE_DIRECTORY)
+
+ if not PYTHON_PATHS:
+ atexit.register(cleanup_python_paths)
+
+ PYTHON_PATHS[interpreter] = python_path
+
+ return python_path
+
+
+def create_temp_dir(prefix=None, suffix=None, base_dir=None): # type: (t.Optional[str], t.Optional[str], t.Optional[str]) -> str
+ """Create a temporary directory that persists until the current process exits."""
+ temp_path = tempfile.mkdtemp(prefix=prefix or 'tmp', suffix=suffix or '', dir=base_dir)
+ atexit.register(remove_tree, temp_path)
+ return temp_path
+
+
+def create_interpreter_wrapper(interpreter, injected_interpreter): # type: (str, str) -> None
+ """Create a wrapper for the given Python interpreter at the specified path."""
+ # sys.executable is used for the shebang to guarantee it is a binary instead of a script
+ # injected_interpreter could be a script from the system or our own wrapper created for the --venv option
+ shebang_interpreter = sys.executable
+
+ code = textwrap.dedent('''
+ #!%s
+
+ from __future__ import absolute_import
+
+ from os import execv
+ from sys import argv
+
+ python = '%s'
+
+ execv(python, [python] + argv[1:])
+ ''' % (shebang_interpreter, interpreter)).lstrip()
+
+ write_text_file(injected_interpreter, code)
+
+ os.chmod(injected_interpreter, MODE_FILE_EXECUTE)
+
+
+def cleanup_python_paths():
+ """Clean up all temporary python directories."""
+ for path in sorted(PYTHON_PATHS.values()):
+ display.info('Cleaning up temporary python directory: %s' % path, verbosity=2)
+ shutil.rmtree(path)
+
+
+def get_coverage_environment(args, target_name, version, temp_path, module_coverage, remote_temp_path=None):
+ """
+ :type args: TestConfig
+ :type target_name: str
+ :type version: str
+ :type temp_path: str
+ :type module_coverage: bool
+ :type remote_temp_path: str | None
+ :rtype: dict[str, str]
+ """
+ if temp_path:
+ # integration tests (both localhost and the optional testhost)
+ # config and results are in a temporary directory
+ coverage_config_base_path = temp_path
+ coverage_output_base_path = temp_path
+ elif args.coverage_config_base_path:
+ # unit tests, sanity tests and other special cases (localhost only)
+ # config is in a temporary directory
+ # results are in the source tree
+ coverage_config_base_path = args.coverage_config_base_path
+ coverage_output_base_path = os.path.join(data_context().content.root, data_context().content.results_path)
+ else:
+ raise Exception('No temp path and no coverage config base path. Check for missing coverage_context usage.')
+
+ config_file = os.path.join(coverage_config_base_path, COVERAGE_CONFIG_NAME)
+ coverage_file = os.path.join(coverage_output_base_path, ResultType.COVERAGE.name, '%s=%s=%s=%s=coverage' % (
+ args.command, target_name, args.coverage_label or 'local-%s' % version, 'python-%s' % version))
+
+ if not args.explain and not os.path.exists(config_file):
+ raise Exception('Missing coverage config file: %s' % config_file)
+
+ if args.coverage_check:
+ # cause the 'coverage' module to be found, but not imported or enabled
+ coverage_file = ''
+
+ # Enable code coverage collection on local Python programs (this does not include Ansible modules).
+ # Used by the injectors to support code coverage.
+ # Used by the pytest unit test plugin to support code coverage.
+ # The COVERAGE_FILE variable is also used directly by the 'coverage' module.
+ env = dict(
+ COVERAGE_CONF=config_file,
+ COVERAGE_FILE=coverage_file,
+ )
+
+ if module_coverage:
+ # Enable code coverage collection on Ansible modules (both local and remote).
+ # Used by the AnsiballZ wrapper generator in lib/ansible/executor/module_common.py to support code coverage.
+ env.update(dict(
+ _ANSIBLE_COVERAGE_CONFIG=config_file,
+ _ANSIBLE_COVERAGE_OUTPUT=coverage_file,
+ ))
+
+ if remote_temp_path:
+ # Include the command, target and label so the remote host can create a filename with that info. The remote
+ # is responsible for adding '={language version}=coverage.{hostname}.{pid}.{id}'
+ env['_ANSIBLE_COVERAGE_REMOTE_OUTPUT'] = os.path.join(remote_temp_path, '%s=%s=%s' % (
+ args.command, target_name, args.coverage_label or 'remote'))
+ env['_ANSIBLE_COVERAGE_REMOTE_WHITELIST'] = os.path.join(data_context().content.root, '*')
+
+ return env
+
+
+def intercept_command(args, cmd, target_name, env, capture=False, data=None, cwd=None, python_version=None, temp_path=None, module_coverage=True,
+ virtualenv=None, disable_coverage=False, remote_temp_path=None):
+ """
+ :type args: TestConfig
+ :type cmd: collections.Iterable[str]
+ :type target_name: str
+ :type env: dict[str, str]
+ :type capture: bool
+ :type data: str | None
+ :type cwd: str | None
+ :type python_version: str | None
+ :type temp_path: str | None
+ :type module_coverage: bool
+ :type virtualenv: str | None
+ :type disable_coverage: bool
+ :type remote_temp_path: str | None
+ :rtype: str | None, str | None
+ """
+ if not env:
+ env = common_environment()
+ else:
+ env = env.copy()
+
+ cmd = list(cmd)
+ version = python_version or args.python_version
+ interpreter = virtualenv or find_python(version)
+ inject_path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'injector')
+
+ if not virtualenv:
+ # injection of python into the path is required when not activating a virtualenv
+ # otherwise scripts may find the wrong interpreter or possibly no interpreter
+ python_path = get_python_path(args, interpreter)
+ inject_path = python_path + os.path.pathsep + inject_path
+
+ env['PATH'] = inject_path + os.path.pathsep + env['PATH']
+ env['ANSIBLE_TEST_PYTHON_VERSION'] = version
+ env['ANSIBLE_TEST_PYTHON_INTERPRETER'] = interpreter
+
+ if args.coverage and not disable_coverage:
+ # add the necessary environment variables to enable code coverage collection
+ env.update(get_coverage_environment(args, target_name, version, temp_path, module_coverage,
+ remote_temp_path=remote_temp_path))
+
+ return run_command(args, cmd, capture=capture, env=env, data=data, cwd=cwd)
+
+
+def resolve_csharp_ps_util(import_name, path):
+ """
+ :type import_name: str
+ :type path: str
+ """
+ if data_context().content.is_ansible or not import_name.startswith('.'):
+ # We don't support relative paths for builtin utils, there's no point.
+ return import_name
+
+ packages = import_name.split('.')
+ module_packages = path.split(os.path.sep)
+
+ for package in packages:
+ if not module_packages or package:
+ break
+ del module_packages[-1]
+
+ return 'ansible_collections.%s%s' % (data_context().content.prefix,
+ '.'.join(module_packages + [p for p in packages if p]))
+
+
+def run_command(args, cmd, capture=False, env=None, data=None, cwd=None, always=False, stdin=None, stdout=None,
+ cmd_verbosity=1, str_errors='strict'):
+ """
+ :type args: CommonConfig
+ :type cmd: collections.Iterable[str]
+ :type capture: bool
+ :type env: dict[str, str] | None
+ :type data: str | None
+ :type cwd: str | None
+ :type always: bool
+ :type stdin: file | None
+ :type stdout: file | None
+ :type cmd_verbosity: int
+ :type str_errors: str
+ :rtype: str | None, str | None
+ """
+ explain = args.explain and not always
+ return raw_command(cmd, capture=capture, env=env, data=data, cwd=cwd, explain=explain, stdin=stdin, stdout=stdout,
+ cmd_verbosity=cmd_verbosity, str_errors=str_errors)
diff --git a/test/lib/ansible_test/_internal/venv.py b/test/lib/ansible_test/_internal/venv.py
new file mode 100644
index 00000000..37eef367
--- /dev/null
+++ b/test/lib/ansible_test/_internal/venv.py
@@ -0,0 +1,227 @@
+"""Virtual environment management."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import sys
+
+from . import types as t
+
+from .config import (
+ EnvironmentConfig,
+)
+
+from .util import (
+ find_python,
+ SubprocessError,
+ get_available_python_versions,
+ SUPPORTED_PYTHON_VERSIONS,
+ ANSIBLE_TEST_DATA_ROOT,
+ display,
+ remove_tree,
+)
+
+from .util_common import (
+ run_command,
+)
+
+
+def create_virtual_environment(args, # type: EnvironmentConfig
+ version, # type: str
+ path, # type: str
+ system_site_packages=False, # type: bool
+ pip=True, # type: bool
+ ): # type: (...) -> bool
+ """Create a virtual environment using venv or virtualenv for the requested Python version."""
+ if os.path.isdir(path):
+ display.info('Using existing Python %s virtual environment: %s' % (version, path), verbosity=1)
+ return True
+
+ python = find_python(version, required=False)
+ python_version = tuple(int(v) for v in version.split('.'))
+
+ if not python:
+ # the requested python version could not be found
+ return False
+
+ if python_version >= (3, 0):
+ # use the built-in 'venv' module on Python 3.x
+ # creating a virtual environment using 'venv' when running in a virtual environment created by 'virtualenv' results
+ # in a copy of the original virtual environment instead of creation of a new one
+ # avoid this issue by only using "real" python interpreters to invoke 'venv'
+ for real_python in iterate_real_pythons(args, version):
+ if run_venv(args, real_python, system_site_packages, pip, path):
+ display.info('Created Python %s virtual environment using "venv": %s' % (version, path), verbosity=1)
+ return True
+
+ # something went wrong, most likely the package maintainer for the Python installation removed ensurepip
+ # which will prevent creation of a virtual environment without installation of other OS packages
+
+ # use the installed 'virtualenv' module on the Python requested version
+ if run_virtualenv(args, python, python, system_site_packages, pip, path):
+ display.info('Created Python %s virtual environment using "virtualenv": %s' % (version, path), verbosity=1)
+ return True
+
+ available_pythons = get_available_python_versions(SUPPORTED_PYTHON_VERSIONS)
+
+ for available_python_version, available_python_interpreter in sorted(available_pythons.items()):
+ virtualenv_version = get_virtualenv_version(args, available_python_interpreter)
+
+ if not virtualenv_version:
+ # virtualenv not available for this Python or we were unable to detect the version
+ continue
+
+ if python_version == (2, 6) and virtualenv_version >= (16, 0, 0):
+ # virtualenv 16.0.0 dropped python 2.6 support: https://virtualenv.pypa.io/en/latest/changes/#v16-0-0-2018-05-16
+ continue
+
+ # try using 'virtualenv' from another Python to setup the desired version
+ if run_virtualenv(args, available_python_interpreter, python, system_site_packages, pip, path):
+ display.info('Created Python %s virtual environment using "virtualenv" on Python %s: %s' % (version, available_python_version, path), verbosity=1)
+ return True
+
+ # no suitable 'virtualenv' available
+ return False
+
+
+def iterate_real_pythons(args, version): # type: (EnvironmentConfig, str) -> t.Iterable[str]
+ """
+ Iterate through available real python interpreters of the requested version.
+ The current interpreter will be checked and then the path will be searched.
+ """
+ version_info = tuple(int(n) for n in version.split('.'))
+ current_python = None
+
+ if version_info == sys.version_info[:len(version_info)]:
+ current_python = sys.executable
+ real_prefix = get_python_real_prefix(args, current_python)
+
+ if real_prefix:
+ current_python = find_python(version, os.path.join(real_prefix, 'bin'))
+
+ if current_python:
+ yield current_python
+
+ path = os.environ.get('PATH', os.path.defpath)
+
+ if not path:
+ return
+
+ found_python = find_python(version, path)
+
+ if not found_python:
+ return
+
+ if found_python == current_python:
+ return
+
+ real_prefix = get_python_real_prefix(args, found_python)
+
+ if real_prefix:
+ found_python = find_python(version, os.path.join(real_prefix, 'bin'))
+
+ if found_python:
+ yield found_python
+
+
+def get_python_real_prefix(args, path): # type: (EnvironmentConfig, str) -> t.Optional[str]
+ """
+ Return the real prefix of the specified interpreter or None if the interpreter is not a virtual environment created by 'virtualenv'.
+ """
+ cmd = [path, os.path.join(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'virtualenvcheck.py'))]
+ check_result = json.loads(run_command(args, cmd, capture=True, always=True)[0])
+ real_prefix = check_result['real_prefix']
+ return real_prefix
+
+
+def run_venv(args, # type: EnvironmentConfig
+ run_python, # type: str
+ system_site_packages, # type: bool
+ pip, # type: bool
+ path, # type: str
+ ): # type: (...) -> bool
+ """Create a virtual environment using the 'venv' module. Not available on Python 2.x."""
+ cmd = [run_python, '-m', 'venv']
+
+ if system_site_packages:
+ cmd.append('--system-site-packages')
+
+ if not pip:
+ cmd.append('--without-pip')
+
+ cmd.append(path)
+
+ try:
+ run_command(args, cmd, capture=True)
+ except SubprocessError as ex:
+ remove_tree(path)
+
+ if args.verbosity > 1:
+ display.error(ex)
+
+ return False
+
+ return True
+
+
+def run_virtualenv(args, # type: EnvironmentConfig
+ run_python, # type: str
+ env_python, # type: str
+ system_site_packages, # type: bool
+ pip, # type: bool
+ path, # type: str
+ ): # type: (...) -> bool
+ """Create a virtual environment using the 'virtualenv' module."""
+ # always specify --python to guarantee the desired interpreter is provided
+ # otherwise virtualenv may select a different interpreter than the one running virtualenv
+ cmd = [run_python, '-m', 'virtualenv', '--python', env_python]
+
+ if system_site_packages:
+ cmd.append('--system-site-packages')
+
+ if not pip:
+ cmd.append('--no-pip')
+
+ cmd.append(path)
+
+ try:
+ run_command(args, cmd, capture=True)
+ except SubprocessError as ex:
+ remove_tree(path)
+
+ if args.verbosity > 1:
+ display.error(ex)
+
+ return False
+
+ return True
+
+
+def get_virtualenv_version(args, python): # type: (EnvironmentConfig, str) -> t.Optional[t.Tuple[int, ...]]
+ """Get the virtualenv version for the given python intepreter, if available."""
+ try:
+ return get_virtualenv_version.result
+ except AttributeError:
+ pass
+
+ get_virtualenv_version.result = None
+
+ cmd = [python, '-m', 'virtualenv', '--version']
+
+ try:
+ stdout = run_command(args, cmd, capture=True)[0]
+ except SubprocessError as ex:
+ if args.verbosity > 1:
+ display.error(ex)
+
+ stdout = ''
+
+ if stdout:
+ # noinspection PyBroadException
+ try:
+ get_virtualenv_version.result = tuple(int(v) for v in stdout.strip().split('.'))
+ except Exception: # pylint: disable=broad-except
+ pass
+
+ return get_virtualenv_version.result
diff --git a/test/lib/ansible_test/config/cloud-config-aws.ini.template b/test/lib/ansible_test/config/cloud-config-aws.ini.template
new file mode 100644
index 00000000..88b9fea6
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-aws.ini.template
@@ -0,0 +1,26 @@
+# This is the configuration template for ansible-test AWS integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+# 2) Using the automatically provisioned AWS credentials in ansible-test.
+#
+# If you do not want to use the automatically provisioned temporary AWS credentials,
+# fill in the @VAR placeholders below and save this file without the .template extension.
+# This will cause ansible-test to use the given configuration instead of temporary credentials.
+#
+# NOTE: Automatic provisioning of AWS credentials requires an ansible-core-ci API key.
+
+[default]
+aws_access_key: @ACCESS_KEY
+aws_secret_key: @SECRET_KEY
+security_token: @SECURITY_TOKEN
+aws_region: @REGION
+# aws_cleanup controls whether the environment is cleaned up after tests have completed
+# This only applies to tests that have a cleanup stage
+# Defaults to true when using this template
+# aws_cleanup: true
+# aliases for backwards compatibility with older integration test playbooks
+ec2_access_key: {{ aws_access_key }}
+ec2_secret_key: {{ aws_secret_key }}
+ec2_region: {{ aws_region }}
diff --git a/test/lib/ansible_test/config/cloud-config-azure.ini.template b/test/lib/ansible_test/config/cloud-config-azure.ini.template
new file mode 100644
index 00000000..ac5266ba
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-azure.ini.template
@@ -0,0 +1,32 @@
+# This is the configuration template for ansible-test Azure integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+# 2) Using the automatically provisioned Azure credentials in ansible-test.
+#
+# If you do not want to use the automatically provisioned temporary Azure credentials,
+# fill in the values below and save this file without the .template extension.
+# This will cause ansible-test to use the given configuration instead of temporary credentials.
+#
+# NOTE: Automatic provisioning of Azure credentials requires one of:
+# 1) ansible-core-ci API key in ~/.ansible-core-ci.key
+# 2) Sherlock URL (including API key) in ~/.ansible-sherlock-ci.cfg
+
+[default]
+# Provide either Service Principal or Active Directory credentials below.
+
+# Service Principal
+AZURE_CLIENT_ID:
+AZURE_SECRET:
+AZURE_SUBSCRIPTION_ID:
+AZURE_TENANT:
+
+# Active Directory
+AZURE_AD_USER:
+AZURE_PASSWORD:
+AZURE_SUBSCRIPTION_ID:
+
+# Resource Groups
+RESOURCE_GROUP:
+RESOURCE_GROUP_SECONDARY:
diff --git a/test/lib/ansible_test/config/cloud-config-cloudscale.ini.template b/test/lib/ansible_test/config/cloud-config-cloudscale.ini.template
new file mode 100644
index 00000000..1c99e9b8
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-cloudscale.ini.template
@@ -0,0 +1,9 @@
+# This is the configuration template for ansible-test cloudscale integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+#
+
+[default]
+cloudscale_api_token = @API_TOKEN
diff --git a/test/lib/ansible_test/config/cloud-config-cs.ini.template b/test/lib/ansible_test/config/cloud-config-cs.ini.template
new file mode 100644
index 00000000..f8d8a915
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-cs.ini.template
@@ -0,0 +1,18 @@
+# This is the configuration template for ansible-test CloudStack integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+# 2) Using the automatically provisioned cloudstack-sim docker container in ansible-test.
+#
+# If you do not want to use the automatically provided CloudStack simulator,
+# fill in the @VAR placeholders below and save this file without the .template extension.
+# This will cause ansible-test to use the given configuration and not launch the simulator.
+#
+# It is recommended that you DO NOT use this template unless you cannot use the simulator.
+
+[default]
+endpoint = http://@HOST:@PORT/client/api
+key = @KEY
+secret = @SECRET
+timeout = 60
diff --git a/test/lib/ansible_test/config/cloud-config-gcp.ini.template b/test/lib/ansible_test/config/cloud-config-gcp.ini.template
new file mode 100644
index 00000000..00a20971
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-gcp.ini.template
@@ -0,0 +1,18 @@
+# This is the configuration template for ansible-test GCP integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+# 2) Using the automatically provisioned cloudstack-sim docker container in ansible-test.
+#
+# If you do not want to use the automatically provided GCP simulator,
+# fill in the @VAR placeholders below and save this file without the .template extension.
+# This will cause ansible-test to use the given configuration and not launch the simulator.
+#
+# It is recommended that you DO NOT use this template unless you cannot use the simulator.
+
+[default]
+gcp_project: @PROJECT
+gcp_cred_file: @CRED_FILE
+gcp_cred_kind: @CRED_KIND
+gcp_cred_email: @CRED_EMAIL
diff --git a/test/lib/ansible_test/config/cloud-config-hcloud.ini.template b/test/lib/ansible_test/config/cloud-config-hcloud.ini.template
new file mode 100644
index 00000000..8db658db
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-hcloud.ini.template
@@ -0,0 +1,15 @@
+# This is the configuration template for ansible-test Hetzner Cloud integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+# 2) Using the automatically provisioned Hetzner Cloud credentials in ansible-test.
+#
+# If you do not want to use the automatically provisioned temporary Hetzner Cloud credentials,
+# fill in the @VAR placeholders below and save this file without the .template extension.
+# This will cause ansible-test to use the given configuration instead of temporary credentials.
+#
+# NOTE: Automatic provisioning of Hetzner Cloud credentials requires an ansible-core-ci API key.
+
+[default]
+hcloud_api_token= @TOKEN
diff --git a/test/lib/ansible_test/config/cloud-config-opennebula.ini.template b/test/lib/ansible_test/config/cloud-config-opennebula.ini.template
new file mode 100644
index 00000000..00c56db1
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-opennebula.ini.template
@@ -0,0 +1,20 @@
+# This is the configuration template for ansible-test OpenNebula integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+# 2) Running integration tests against previously recorded XMLRPC fixtures
+#
+# If you want to test against a Live OpenNebula platform,
+# fill in the values below and save this file without the .template extension.
+# This will cause ansible-test to use the given configuration.
+#
+# If you run with @FIXTURES enabled (true) then you can decide if you want to
+# run in @REPLAY mode (true) or, record mode (false).
+
+[default]
+opennebula_url: @URL
+opennebula_username: @USERNAME
+opennebula_password: @PASSWORD
+opennebula_test_fixture: @FIXTURES
+opennebula_test_fixture_replay: @REPLAY \ No newline at end of file
diff --git a/test/lib/ansible_test/config/cloud-config-openshift.kubeconfig.template b/test/lib/ansible_test/config/cloud-config-openshift.kubeconfig.template
new file mode 100644
index 00000000..0a10f23b
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-openshift.kubeconfig.template
@@ -0,0 +1,12 @@
+# This is the configuration template for ansible-test OpenShift integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+# 2) Using the automatically provisioned openshift-origin docker container in ansible-test.
+#
+# If you do not want to use the automatically provided OpenShift container,
+# place your kubeconfig file next to this file, with the same name, but without the .template extension.
+# This will cause ansible-test to use the given configuration and not launch the automatically provided container.
+#
+# It is recommended that you DO NOT use this template unless you cannot use the automatically provided container.
diff --git a/test/lib/ansible_test/config/cloud-config-scaleway.ini.template b/test/lib/ansible_test/config/cloud-config-scaleway.ini.template
new file mode 100644
index 00000000..f10419e0
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-scaleway.ini.template
@@ -0,0 +1,13 @@
+# This is the configuration template for ansible-test Scaleway integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+#
+# If you want to test against the Vultr public API,
+# fill in the values below and save this file without the .template extension.
+# This will cause ansible-test to use the given configuration.
+
+[default]
+key = @KEY
+org = @ORG
diff --git a/test/lib/ansible_test/config/cloud-config-tower.ini.template b/test/lib/ansible_test/config/cloud-config-tower.ini.template
new file mode 100644
index 00000000..c76740ab
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-tower.ini.template
@@ -0,0 +1,18 @@
+# This is the configuration template for ansible-test Tower integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+# 2) Using the automatically provisioned Tower credentials in ansible-test.
+#
+# If you do not want to use the automatically provisioned temporary Tower credentials,
+# fill in the @VAR placeholders below and save this file without the .template extension.
+# This will cause ansible-test to use the given configuration instead of temporary credentials.
+#
+# NOTE: Automatic provisioning of Tower credentials requires an ansible-core-ci API key.
+
+[default]
+version=@VERSION
+host=@HOST
+username=@USERNAME
+password=@PASSWORD
diff --git a/test/lib/ansible_test/config/cloud-config-vcenter.ini.template b/test/lib/ansible_test/config/cloud-config-vcenter.ini.template
new file mode 100644
index 00000000..eff8bf74
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-vcenter.ini.template
@@ -0,0 +1,26 @@
+# This is the configuration template for ansible-test VMware integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+# 2) Using the automatically provisioned VMware credentials in ansible-test.
+#
+# If you do not want to use the automatically provisioned temporary VMware credentials,
+# fill in the @VAR placeholders below and save this file without the .template extension.
+# This will cause ansible-test to use the given configuration instead of temporary credentials.
+#
+# NOTE: Automatic provisioning of VMware credentials requires an ansible-core-ci API key.
+
+[DEFAULT]
+vcenter_username: @VMWARE_USERNAME
+vcenter_password: @VMWARE_PASSWORD
+vcenter_hostname: @VMWARE_HOSTNAME
+vmware_validate_certs: @VMWARE_VALIDATE_CERTS
+esxi1_username: @ESXI1_USERNAME
+esxi1_hostname: @ESXI1_HOSTNAME
+esxi1_password: @ESXI1_PASSWORD
+esxi2_username: @ESXI2_USERNAME
+esxi2_hostname: @ESXI2_HOSTNAME
+esxi2_password: @ESXI2_PASSWORD
+vmware_proxy_host: @VMWARE_PROXY_HOST
+vmware_proxy_port: @VMWARE_PROXY_PORT
diff --git a/test/lib/ansible_test/config/cloud-config-vultr.ini.template b/test/lib/ansible_test/config/cloud-config-vultr.ini.template
new file mode 100644
index 00000000..48b82108
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-vultr.ini.template
@@ -0,0 +1,12 @@
+# This is the configuration template for ansible-test Vultr integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+#
+# If you want to test against the Vultr public API,
+# fill in the values below and save this file without the .template extension.
+# This will cause ansible-test to use the given configuration.
+
+[default]
+key = @KEY
diff --git a/test/lib/ansible_test/config/inventory.networking.template b/test/lib/ansible_test/config/inventory.networking.template
new file mode 100644
index 00000000..a1545684
--- /dev/null
+++ b/test/lib/ansible_test/config/inventory.networking.template
@@ -0,0 +1,42 @@
+# This is the configuration template for ansible-test network-integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+# 2) Using the `--platform` option to provision temporary network instances on EC2.
+#
+# If you do not want to use the automatically provisioned temporary network instances,
+# fill in the @VAR placeholders below and save this file without the .template extension.
+#
+# NOTE: Automatic provisioning of network instances on EC2 requires an ansible-core-ci API key.
+
+[@OS]
+@NAME ansible_connection="local" ansible_host=@HOST ansible_network_os="@OS" ansible_user="@USER" ansible_ssh_private_key_file="@KEY_FILE"
+
+[aci:vars]
+aci_hostname=your-apic-1
+aci_username=admin
+aci_password=your-apic-password
+aci_validate_certs=no
+aci_use_ssl=yes
+aci_use_proxy=no
+
+[aci]
+localhost ansible_ssh_host=127.0.0.1 ansible_connection=local
+
+[mso:vars]
+mso_hostname=your-mso-1
+mso_username=admin
+mso_password=your-mso-password
+mso_validate_certs=no
+mso_use_ssl=yes
+mso_use_proxy=no
+
+[mso]
+localhost ansible_ssh_host=127.0.0.1 ansible_connection=local
+
+###
+# Example
+#
+# [vyos]
+# vyos01.example.net ansible_connection=local ansible_network_os="vyos" ansible_user=admin ansible_ssh_pass=mypassword
diff --git a/test/lib/ansible_test/config/inventory.winrm.template b/test/lib/ansible_test/config/inventory.winrm.template
new file mode 100644
index 00000000..34bbee2d
--- /dev/null
+++ b/test/lib/ansible_test/config/inventory.winrm.template
@@ -0,0 +1,28 @@
+# This is the configuration template for ansible-test windows-integration tests.
+# It can also be used with the legacy `make` based method of running tests.
+#
+# You do not need this template if you are:
+#
+# 1) Using the `--windows` option to provision temporary Windows instances on EC2.
+#
+# If you do not want to use the automatically provisioned temporary Windows instances,
+# fill in the @VAR placeholders below and save this file without the .template extension.
+#
+# NOTE: Automatic provisioning of Windows instances on EC2 requires an ansible-core-ci API key.
+#
+# REMINDER: Standard ports for winrm are 5985 (HTTP) and 5986 (HTTPS).
+
+[windows]
+@NAME ansible_host=@HOST ansible_user=@USER ansible_password=@PASSWORD ansible_port=@PORT
+
+[windows:vars]
+ansible_connection=winrm
+ansible_winrm_server_cert_validation=ignore
+
+# support winrm connection tests (temporary solution, does not support testing enable/disable of pipelining)
+[winrm:children]
+windows
+
+# support tests that target testhost
+[testhost:children]
+windows
diff --git a/test/sanity/code-smell/configure-remoting-ps1.json b/test/sanity/code-smell/configure-remoting-ps1.json
new file mode 100644
index 00000000..593b765d
--- /dev/null
+++ b/test/sanity/code-smell/configure-remoting-ps1.json
@@ -0,0 +1,4 @@
+{
+ "no_targets": true,
+ "output": "path-message"
+}
diff --git a/test/sanity/code-smell/configure-remoting-ps1.py b/test/sanity/code-smell/configure-remoting-ps1.py
new file mode 100755
index 00000000..51dff20c
--- /dev/null
+++ b/test/sanity/code-smell/configure-remoting-ps1.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+
+def main():
+ # required by external automated processes and should not be moved, renamed or converted to a symbolic link
+ original = 'examples/scripts/ConfigureRemotingForAnsible.ps1'
+ # required to be packaged with ansible-test and must match the original file, but cannot be a symbolic link
+ # the packaged version is needed to run tests when ansible-test has been installed
+ # keeping the packaged version identical to the original makes sure tests cover both files
+ packaged = 'test/lib/ansible_test/_data/setup/ConfigureRemotingForAnsible.ps1'
+
+ copy_valid = False
+
+ if os.path.isfile(original) and os.path.isfile(packaged):
+ with open(original, 'rb') as original_file:
+ original_content = original_file.read()
+
+ with open(packaged, 'rb') as packaged_file:
+ packaged_content = packaged_file.read()
+
+ if original_content == packaged_content:
+ copy_valid = True
+
+ if not copy_valid:
+ print('%s: must be an exact copy of "%s"' % (packaged, original))
+
+ for path in [original, packaged]:
+ directory = path
+
+ while True:
+ directory = os.path.dirname(directory)
+
+ if not directory:
+ break
+
+ if not os.path.isdir(directory):
+ print('%s: must be a directory' % directory)
+
+ if os.path.islink(directory):
+ print('%s: cannot be a symbolic link' % directory)
+
+ if not os.path.isfile(path):
+ print('%s: must be a file' % path)
+
+ if os.path.islink(path):
+ print('%s: cannot be a symbolic link' % path)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/sanity/code-smell/deprecated-config.json b/test/sanity/code-smell/deprecated-config.json
new file mode 100644
index 00000000..4a884860
--- /dev/null
+++ b/test/sanity/code-smell/deprecated-config.json
@@ -0,0 +1,10 @@
+{
+ "all_targets": true,
+ "output": "path-message",
+ "extensions": [
+ ".py"
+ ],
+ "prefixes": [
+ "lib/ansible/"
+ ]
+}
diff --git a/test/sanity/code-smell/deprecated-config.py b/test/sanity/code-smell/deprecated-config.py
new file mode 100755
index 00000000..08e93c36
--- /dev/null
+++ b/test/sanity/code-smell/deprecated-config.py
@@ -0,0 +1,102 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# (c) 2018, Matt Martz <matt@sivel.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import mmap
+import os
+import re
+import sys
+
+from distutils.version import StrictVersion
+
+import yaml
+
+import ansible.config
+
+from ansible.plugins.loader import fragment_loader
+from ansible.release import __version__ as ansible_version
+from ansible.utils.plugin_docs import get_docstring
+
+DOC_RE = re.compile(b'^DOCUMENTATION', flags=re.M)
+ANSIBLE_MAJOR = StrictVersion('.'.join(ansible_version.split('.')[:2]))
+
+
+def find_deprecations(obj, path=None):
+ if not isinstance(obj, (list, dict)):
+ return
+
+ try:
+ items = obj.items()
+ except AttributeError:
+ items = enumerate(obj)
+
+ for key, value in items:
+ if path is None:
+ this_path = []
+ else:
+ this_path = path[:]
+
+ this_path.append(key)
+
+ if key != 'deprecated':
+ for result in find_deprecations(value, path=this_path):
+ yield result
+ else:
+ try:
+ version = value['version']
+ this_path.append('version')
+ except KeyError:
+ version = value['removed_in']
+ this_path.append('removed_in')
+ if StrictVersion(version) <= ANSIBLE_MAJOR:
+ yield (this_path, version)
+
+
+def main():
+ plugins = []
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'rb') as f:
+ try:
+ mm_file = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
+ except ValueError:
+ continue
+ if DOC_RE.search(mm_file):
+ plugins.append(path)
+ mm_file.close()
+
+ for plugin in plugins:
+ data = {}
+ data['doc'], data['examples'], data['return'], data['metadata'] = get_docstring(plugin, fragment_loader)
+ for result in find_deprecations(data['doc']):
+ print(
+ '%s: %s is scheduled for removal in %s' % (plugin, '.'.join(str(i) for i in result[0][:-2]), result[1])
+ )
+
+ base = os.path.join(os.path.dirname(ansible.config.__file__), 'base.yml')
+ with open(base) as f:
+ data = yaml.safe_load(f)
+
+ for result in find_deprecations(data):
+ print('%s: %s is scheduled for removal in %s' % (base, '.'.join(str(i) for i in result[0][:-2]), result[1]))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/sanity/code-smell/deprecated-config.requirements.txt b/test/sanity/code-smell/deprecated-config.requirements.txt
new file mode 100644
index 00000000..cfefdeec
--- /dev/null
+++ b/test/sanity/code-smell/deprecated-config.requirements.txt
@@ -0,0 +1,2 @@
+jinja2 # ansible-base requirement
+pyyaml
diff --git a/test/sanity/code-smell/docs-build.json b/test/sanity/code-smell/docs-build.json
new file mode 100644
index 00000000..0218bfc5
--- /dev/null
+++ b/test/sanity/code-smell/docs-build.json
@@ -0,0 +1,6 @@
+{
+ "intercept": true,
+ "disabled": true,
+ "no_targets": true,
+ "output": "path-line-column-message"
+}
diff --git a/test/sanity/code-smell/docs-build.py b/test/sanity/code-smell/docs-build.py
new file mode 100755
index 00000000..9b6cbd3f
--- /dev/null
+++ b/test/sanity/code-smell/docs-build.py
@@ -0,0 +1,135 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+import subprocess
+import sys
+
+
+def main():
+ base_dir = os.getcwd() + os.path.sep
+ docs_dir = os.path.abspath('docs/docsite')
+ cmd = ['make', 'base_singlehtmldocs']
+
+ sphinx = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=docs_dir)
+ stdout, stderr = sphinx.communicate()
+
+ stdout = stdout.decode('utf-8')
+ stderr = stderr.decode('utf-8')
+
+ if sphinx.returncode != 0:
+ sys.stderr.write("Command '%s' failed with status code: %d\n" % (' '.join(cmd), sphinx.returncode))
+
+ if stdout.strip():
+ stdout = simplify_stdout(stdout)
+
+ sys.stderr.write("--> Standard Output\n")
+ sys.stderr.write("%s\n" % stdout.strip())
+
+ if stderr.strip():
+ sys.stderr.write("--> Standard Error\n")
+ sys.stderr.write("%s\n" % stderr.strip())
+
+ sys.exit(1)
+
+ with open('docs/docsite/rst_warnings', 'r') as warnings_fd:
+ output = warnings_fd.read().strip()
+ lines = output.splitlines()
+
+ known_warnings = {
+ 'block-quote-missing-blank-line': r'^Block quote ends without a blank line; unexpected unindent.$',
+ 'literal-block-lex-error': r'^Could not lex literal_block as "[^"]*". Highlighting skipped.$',
+ 'duplicate-label': r'^duplicate label ',
+ 'undefined-label': r'undefined label: ',
+ 'unknown-document': r'unknown document: ',
+ 'toc-tree-missing-document': r'toctree contains reference to nonexisting document ',
+ 'reference-target-not-found': r'[^ ]* reference target not found: ',
+ 'not-in-toc-tree': r"document isn't included in any toctree$",
+ 'unexpected-indentation': r'^Unexpected indentation.$',
+ 'definition-list-missing-blank-line': r'^Definition list ends without a blank line; unexpected unindent.$',
+ 'explicit-markup-missing-blank-line': r'Explicit markup ends without a blank line; unexpected unindent.$',
+ 'toc-tree-glob-pattern-no-match': r"^toctree glob pattern '[^']*' didn't match any documents$",
+ 'unknown-interpreted-text-role': '^Unknown interpreted text role "[^"]*".$',
+ }
+
+ for line in lines:
+ match = re.search('^(?P<path>[^:]+):((?P<line>[0-9]+):)?((?P<column>[0-9]+):)? (?P<level>WARNING|ERROR): (?P<message>.*)$', line)
+
+ if not match:
+ path = 'docs/docsite/rst/index.rst'
+ lineno = 0
+ column = 0
+ code = 'unknown'
+ message = line
+
+ # surface unknown lines while filtering out known lines to avoid excessive output
+ print('%s:%d:%d: %s: %s' % (path, lineno, column, code, message))
+ continue
+
+ path = match.group('path')
+ lineno = int(match.group('line') or 0)
+ column = int(match.group('column') or 0)
+ level = match.group('level').lower()
+ message = match.group('message')
+
+ path = os.path.abspath(path)
+
+ if path.startswith(base_dir):
+ path = path[len(base_dir):]
+
+ if path.startswith('rst/'):
+ path = 'docs/docsite/' + path # fix up paths reported relative to `docs/docsite/`
+
+ if level == 'warning':
+ code = 'warning'
+
+ for label, pattern in known_warnings.items():
+ if re.search(pattern, message):
+ code = label
+ break
+ else:
+ code = 'error'
+
+ print('%s:%d:%d: %s: %s' % (path, lineno, column, code, message))
+
+
+def simplify_stdout(value):
+ """Simplify output by omitting earlier 'rendering: ...' messages."""
+ lines = value.strip().splitlines()
+
+ rendering = []
+ keep = []
+
+ def truncate_rendering():
+ """Keep last rendering line (if any) with a message about omitted lines as needed."""
+ if not rendering:
+ return
+
+ notice = rendering[-1]
+
+ if len(rendering) > 1:
+ notice += ' (%d previous rendering line(s) omitted)' % (len(rendering) - 1)
+
+ keep.append(notice)
+ # Could change to rendering.clear() if we do not support python2
+ rendering[:] = []
+
+ for line in lines:
+ if line.startswith('rendering: '):
+ rendering.append(line)
+ continue
+
+ truncate_rendering()
+ keep.append(line)
+
+ truncate_rendering()
+
+ result = '\n'.join(keep)
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/sanity/code-smell/docs-build.requirements.txt b/test/sanity/code-smell/docs-build.requirements.txt
new file mode 100644
index 00000000..5e458795
--- /dev/null
+++ b/test/sanity/code-smell/docs-build.requirements.txt
@@ -0,0 +1,6 @@
+jinja2
+pyyaml
+sphinx
+sphinx-notfound-page
+straight.plugin
+antsibull
diff --git a/test/sanity/code-smell/no-unwanted-files.json b/test/sanity/code-smell/no-unwanted-files.json
new file mode 100644
index 00000000..7a89ebbe
--- /dev/null
+++ b/test/sanity/code-smell/no-unwanted-files.json
@@ -0,0 +1,7 @@
+{
+ "include_symlinks": true,
+ "prefixes": [
+ "lib/"
+ ],
+ "output": "path-message"
+}
diff --git a/test/sanity/code-smell/no-unwanted-files.py b/test/sanity/code-smell/no-unwanted-files.py
new file mode 100755
index 00000000..bff09152
--- /dev/null
+++ b/test/sanity/code-smell/no-unwanted-files.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+"""Prevent unwanted files from being added to the source tree."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+
+def main():
+ """Main entry point."""
+ paths = sys.argv[1:] or sys.stdin.read().splitlines()
+
+ allowed_extensions = (
+ '.cs',
+ '.ps1',
+ '.psm1',
+ '.py',
+ )
+
+ skip_paths = set([
+ 'lib/ansible/config/ansible_builtin_runtime.yml', # not included in the sanity ignore file since it won't exist until after migration
+ ])
+
+ skip_directories = (
+ 'lib/ansible/galaxy/data/',
+ )
+
+ for path in paths:
+ if path in skip_paths:
+ continue
+
+ if any(path.startswith(skip_directory) for skip_directory in skip_directories):
+ continue
+
+ if path.startswith('lib/') and not path.startswith('lib/ansible/'):
+ print('%s: all "lib" content must reside in the "lib/ansible" directory' % path)
+ continue
+
+ ext = os.path.splitext(path)[1]
+
+ if ext not in allowed_extensions:
+ print('%s: extension must be one of: %s' % (path, ', '.join(allowed_extensions)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/sanity/code-smell/obsolete-files.json b/test/sanity/code-smell/obsolete-files.json
new file mode 100644
index 00000000..02d39204
--- /dev/null
+++ b/test/sanity/code-smell/obsolete-files.json
@@ -0,0 +1,17 @@
+{
+ "include_symlinks": true,
+ "prefixes": [
+ "test/runner/",
+ "test/sanity/ansible-doc/",
+ "test/sanity/compile/",
+ "test/sanity/import/",
+ "test/sanity/pep8/",
+ "test/sanity/pslint/",
+ "test/sanity/pylint/",
+ "test/sanity/rstcheck/",
+ "test/sanity/shellcheck/",
+ "test/sanity/validate-modules/",
+ "test/sanity/yamllint/"
+ ],
+ "output": "path-message"
+}
diff --git a/test/sanity/code-smell/obsolete-files.py b/test/sanity/code-smell/obsolete-files.py
new file mode 100755
index 00000000..e9ddc8a5
--- /dev/null
+++ b/test/sanity/code-smell/obsolete-files.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""Prevent files from being added to directories that are now obsolete."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+
+def main():
+ """Main entry point."""
+ paths = sys.argv[1:] or sys.stdin.read().splitlines()
+
+ for path in paths:
+ print('%s: directory "%s/" is obsolete and should not contain any files' % (path, os.path.dirname(path)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/sanity/code-smell/package-data.json b/test/sanity/code-smell/package-data.json
new file mode 100644
index 00000000..2b8a5326
--- /dev/null
+++ b/test/sanity/code-smell/package-data.json
@@ -0,0 +1,6 @@
+{
+ "intercept": true,
+ "disabled": true,
+ "all_targets": true,
+ "output": "path-message"
+}
diff --git a/test/sanity/code-smell/package-data.py b/test/sanity/code-smell/package-data.py
new file mode 100755
index 00000000..822c11d5
--- /dev/null
+++ b/test/sanity/code-smell/package-data.py
@@ -0,0 +1,376 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import contextlib
+import fnmatch
+import glob
+import os
+import re
+import shutil
+import subprocess
+import sys
+import tarfile
+import tempfile
+
+
+def assemble_files_to_ship(complete_file_list):
+ """
+ This looks for all files which should be shipped in the sdist
+ """
+ # All files which are in the repository except these:
+ ignore_patterns = (
+ # Developer-only tools
+ '.azure-pipelines/*',
+ '.github/*',
+ '.github/*/*',
+ 'changelogs/fragments/*',
+ 'hacking/backport/*',
+ 'hacking/shippable/*',
+ 'hacking/tests/*',
+ 'hacking/ticket_stubs/*',
+ 'test/sanity/code-smell/botmeta.*',
+ 'test/utils/*',
+ 'test/utils/*/*',
+ 'test/utils/*/*/*',
+ '.git*',
+ )
+ ignore_files = frozenset((
+ # Developer-only tools
+ 'changelogs/config.yaml',
+ 'hacking/README.md',
+ 'hacking/ansible-profile',
+ 'hacking/cgroup_perf_recap_graph.py',
+ 'hacking/create_deprecated_issues.py',
+ 'hacking/deprecated_issue_template.md',
+ 'hacking/fix_test_syntax.py',
+ 'hacking/get_library.py',
+ 'hacking/metadata-tool.py',
+ 'hacking/report.py',
+ 'hacking/return_skeleton_generator.py',
+ 'hacking/test-module',
+ 'hacking/test-module.py',
+ 'test/support/README.md',
+ '.cherry_picker.toml',
+ '.mailmap',
+ # Possibly should be included
+ 'examples/scripts/uptime.py',
+ 'examples/scripts/my_test.py',
+ 'examples/scripts/my_test_info.py',
+ 'examples/scripts/my_test_facts.py',
+ 'examples/DOCUMENTATION.yml',
+ 'examples/play.yml',
+ 'examples/hosts.yaml',
+ 'examples/hosts.yml',
+ 'examples/inventory_script_schema.json',
+ 'examples/plugin_filters.yml',
+ 'hacking/env-setup',
+ 'hacking/env-setup.fish',
+ 'MANIFEST',
+ ))
+
+ # These files are generated and then intentionally added to the sdist
+
+ # Manpages
+ manpages = ['docs/man/man1/ansible.1']
+ for dirname, dummy, files in os.walk('bin'):
+ for filename in files:
+ path = os.path.join(dirname, filename)
+ if os.path.islink(path):
+ if os.readlink(path) == 'ansible':
+ manpages.append('docs/man/man1/%s.1' % filename)
+
+ # Misc
+ misc_generated_files = [
+ 'SYMLINK_CACHE.json',
+ 'PKG-INFO',
+ ]
+
+ shipped_files = manpages + misc_generated_files
+
+ for path in complete_file_list:
+ if path not in ignore_files:
+ for ignore in ignore_patterns:
+ if fnmatch.fnmatch(path, ignore):
+ break
+ else:
+ shipped_files.append(path)
+
+ return shipped_files
+
+
+def assemble_files_to_install(complete_file_list):
+ """
+ This looks for all of the files which should show up in an installation of ansible
+ """
+ ignore_patterns = tuple()
+
+ pkg_data_files = []
+ for path in complete_file_list:
+
+ if path.startswith("lib/ansible"):
+ prefix = 'lib'
+ elif path.startswith("test/lib/ansible_test"):
+ prefix = 'test/lib'
+ else:
+ continue
+
+ for ignore in ignore_patterns:
+ if fnmatch.fnmatch(path, ignore):
+ break
+ else:
+ pkg_data_files.append(os.path.relpath(path, prefix))
+
+ return pkg_data_files
+
+
+@contextlib.contextmanager
+def clean_repository(file_list):
+ """Copy the repository to clean it of artifacts"""
+ # Create a tempdir that will be the clean repo
+ with tempfile.TemporaryDirectory() as repo_root:
+ directories = set((repo_root + os.path.sep,))
+
+ for filename in file_list:
+ # Determine if we need to create the directory
+ directory = os.path.dirname(filename)
+ dest_dir = os.path.join(repo_root, directory)
+ if dest_dir not in directories:
+ os.makedirs(dest_dir)
+
+ # Keep track of all the directories that now exist
+ path_components = directory.split(os.path.sep)
+ path = repo_root
+ for component in path_components:
+ path = os.path.join(path, component)
+ if path not in directories:
+ directories.add(path)
+
+ # Copy the file
+ shutil.copy2(filename, dest_dir, follow_symlinks=False)
+
+ yield repo_root
+
+
+def create_sdist(tmp_dir):
+ """Create an sdist in the repository"""
+ create = subprocess.Popen(
+ ['make', 'snapshot', 'SDIST_DIR=%s' % tmp_dir],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=True,
+ )
+
+ stderr = create.communicate()[1]
+
+ if create.returncode != 0:
+ raise Exception('make snapshot failed:\n%s' % stderr)
+
+ # Determine path to sdist
+ tmp_dir_files = os.listdir(tmp_dir)
+
+ if not tmp_dir_files:
+ raise Exception('sdist was not created in the temp dir')
+ elif len(tmp_dir_files) > 1:
+ raise Exception('Unexpected extra files in the temp dir')
+
+ return os.path.join(tmp_dir, tmp_dir_files[0])
+
+
+def extract_sdist(sdist_path, tmp_dir):
+ """Untar the sdist"""
+ # Untar the sdist from the tmp_dir
+ with tarfile.open(os.path.join(tmp_dir, sdist_path), 'r|*') as sdist:
+ sdist.extractall(path=tmp_dir)
+
+ # Determine the sdist directory name
+ sdist_filename = os.path.basename(sdist_path)
+ tmp_dir_files = os.listdir(tmp_dir)
+ try:
+ tmp_dir_files.remove(sdist_filename)
+ except ValueError:
+ # Unexpected could not find original sdist in temp dir
+ raise
+
+ if len(tmp_dir_files) > 1:
+ raise Exception('Unexpected extra files in the temp dir')
+ elif len(tmp_dir_files) < 1:
+ raise Exception('sdist extraction did not occur i nthe temp dir')
+
+ return os.path.join(tmp_dir, tmp_dir_files[0])
+
+
+def install_sdist(tmp_dir, sdist_dir):
+ """Install the extracted sdist into the temporary directory"""
+ install = subprocess.Popen(
+ ['python', 'setup.py', 'install', '--root=%s' % tmp_dir],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=True,
+ cwd=os.path.join(tmp_dir, sdist_dir),
+ )
+
+ stdout, stderr = install.communicate()
+
+ if install.returncode != 0:
+ raise Exception('sdist install failed:\n%s' % stderr)
+
+ # Determine the prefix for the installed files
+ match = re.search('^creating (%s/.*?/(?:site|dist)-packages)/ansible$' %
+ tmp_dir, stdout, flags=re.M)
+ return match.group(1)
+
+
+def check_sdist_contains_expected(sdist_dir, to_ship_files):
+ """Check that the files we expect to ship are present in the sdist"""
+ results = []
+ for filename in to_ship_files:
+ path = os.path.join(sdist_dir, filename)
+ if not os.path.exists(path):
+ results.append('%s: File was not added to sdist' % filename)
+
+ # Also changelog
+ changelog_files = glob.glob(os.path.join(sdist_dir, 'changelogs/CHANGELOG-v2.[0-9]*.rst'))
+ if not changelog_files:
+ results.append('changelogs/CHANGELOG-v2.*.rst: Changelog file was not added to the sdist')
+ elif len(changelog_files) > 1:
+ results.append('changelogs/CHANGELOG-v2.*.rst: Too many changelog files: %s'
+ % changelog_files)
+
+ return results
+
+
+def check_sdist_files_are_wanted(sdist_dir, to_ship_files):
+ """Check that all files in the sdist are desired"""
+ results = []
+ for dirname, dummy, files in os.walk(sdist_dir):
+ dirname = os.path.relpath(dirname, start=sdist_dir)
+ if dirname == '.':
+ dirname = ''
+
+ for filename in files:
+ path = os.path.join(dirname, filename)
+ if path not in to_ship_files:
+ if fnmatch.fnmatch(path, 'changelogs/CHANGELOG-v2.[0-9]*.rst'):
+ # changelog files are expected
+ continue
+
+ # FIXME: ansible-test doesn't pass the paths of symlinks to us so we aren't
+ # checking those
+ if not os.path.islink(os.path.join(sdist_dir, path)):
+ results.append('%s: File in sdist was not in the repository' % path)
+
+ return results
+
+
+def check_installed_contains_expected(install_dir, to_install_files):
+ """Check that all the files we expect to be installed are"""
+ results = []
+ for filename in to_install_files:
+ path = os.path.join(install_dir, filename)
+ if not os.path.exists(path):
+ results.append('%s: File not installed' % os.path.join('lib', filename))
+
+ return results
+
+
+EGG_RE = re.compile('ansible[^/]+\\.egg-info/(PKG-INFO|SOURCES.txt|'
+ 'dependency_links.txt|not-zip-safe|requires.txt|top_level.txt)$')
+
+
+def check_installed_files_are_wanted(install_dir, to_install_files):
+ """Check that all installed files were desired"""
+ results = []
+
+ for dirname, dummy, files in os.walk(install_dir):
+ dirname = os.path.relpath(dirname, start=install_dir)
+ if dirname == '.':
+ dirname = ''
+
+ for filename in files:
+ # If this is a byte code cache, look for the python file's name
+ directory = dirname
+ if filename.endswith('.pyc') or filename.endswith('.pyo'):
+ # Remove the trailing "o" or c"
+ filename = filename[:-1]
+
+ if directory.endswith('%s__pycache__' % os.path.sep):
+ # Python3 byte code cache, look for the basename of
+ # __pycache__/__init__.cpython-36.py
+ segments = filename.rsplit('.', 2)
+ if len(segments) >= 3:
+ filename = '.'.join((segments[0], segments[2]))
+ directory = os.path.dirname(directory)
+
+ path = os.path.join(directory, filename)
+
+ # Test that the file was listed for installation
+ if path not in to_install_files:
+ # FIXME: ansible-test doesn't pass the paths of symlinks to us so we
+ # aren't checking those
+ if not os.path.islink(os.path.join(install_dir, path)):
+ if not EGG_RE.match(path):
+ results.append('%s: File was installed but was not supposed to be' % path)
+
+ return results
+
+
+def _find_symlinks():
+ symlink_list = []
+ for dirname, directories, filenames in os.walk('.'):
+ for filename in filenames:
+ path = os.path.join(dirname, filename)
+ # Strip off "./" from the front
+ path = path[2:]
+ if os.path.islink(path):
+ symlink_list.append(path)
+
+ return symlink_list
+
+
+def main():
+ """All of the files in the repository"""
+ complete_file_list = []
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ complete_file_list.append(path)
+
+ # ansible-test isn't currently passing symlinks to us so construct those ourselves for now
+ for filename in _find_symlinks():
+ if filename not in complete_file_list:
+ # For some reason ansible-test is passing us lib/ansible/module_utils/ansible_release.py
+ # which is a symlink even though it doesn't pass any others
+ complete_file_list.append(filename)
+
+ # We may run this after docs sanity tests so get a clean repository to run in
+ with clean_repository(complete_file_list) as clean_repo_dir:
+ os.chdir(clean_repo_dir)
+
+ to_ship_files = assemble_files_to_ship(complete_file_list)
+ to_install_files = assemble_files_to_install(complete_file_list)
+
+ results = []
+ with tempfile.TemporaryDirectory() as tmp_dir:
+ sdist_path = create_sdist(tmp_dir)
+ sdist_dir = extract_sdist(sdist_path, tmp_dir)
+
+ # Check that the files that are supposed to be in the sdist are there
+ results.extend(check_sdist_contains_expected(sdist_dir, to_ship_files))
+
+ # Check that the files that are in the sdist are in the repository
+ results.extend(check_sdist_files_are_wanted(sdist_dir, to_ship_files))
+
+ # install the sdist
+ install_dir = install_sdist(tmp_dir, sdist_dir)
+
+ # Check that the files that are supposed to be installed are there
+ results.extend(check_installed_contains_expected(install_dir, to_install_files))
+
+ # Check that the files that are installed are supposed to be installed
+ results.extend(check_installed_files_are_wanted(install_dir, to_install_files))
+
+ for message in results:
+ print(message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/sanity/code-smell/package-data.requirements.txt b/test/sanity/code-smell/package-data.requirements.txt
new file mode 100644
index 00000000..5d74c715
--- /dev/null
+++ b/test/sanity/code-smell/package-data.requirements.txt
@@ -0,0 +1,10 @@
+docutils
+jinja2
+packaging
+pyyaml # ansible-base requirement
+rstcheck
+setuptools > 39.2
+straight.plugin
+
+# changelog build requires python 3.6+
+antsibull-changelog ; python_version >= '3.6'
diff --git a/test/sanity/code-smell/release-names.json b/test/sanity/code-smell/release-names.json
new file mode 100644
index 00000000..593b765d
--- /dev/null
+++ b/test/sanity/code-smell/release-names.json
@@ -0,0 +1,4 @@
+{
+ "no_targets": true,
+ "output": "path-message"
+}
diff --git a/test/sanity/code-smell/release-names.py b/test/sanity/code-smell/release-names.py
new file mode 100755
index 00000000..f8003320
--- /dev/null
+++ b/test/sanity/code-smell/release-names.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# (c) 2019, Ansible Project
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+Test that the release name is present in the list of used up release names
+"""
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from yaml import safe_load
+
+from ansible.release import __codename__
+
+
+def main():
+ """Entrypoint to the script"""
+
+ with open('.github/RELEASE_NAMES.yml') as f:
+ releases = safe_load(f.read())
+
+ # Why this format? The file's sole purpose is to be read by a human when they need to know
+ # which release names have already been used. So:
+ # 1) It's easier for a human to find the release names when there's one on each line
+ # 2) It helps keep other people from using the file and then asking for new features in it
+ for name in (r.split(maxsplit=1)[1] for r in releases):
+ if __codename__ == name:
+ break
+ else:
+ print('.github/RELEASE_NAMES.yml: Current codename was not present in the file')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/sanity/code-smell/release-names.requirements.txt b/test/sanity/code-smell/release-names.requirements.txt
new file mode 100644
index 00000000..c3726e8b
--- /dev/null
+++ b/test/sanity/code-smell/release-names.requirements.txt
@@ -0,0 +1 @@
+pyyaml
diff --git a/test/sanity/code-smell/required-and-default-attributes.json b/test/sanity/code-smell/required-and-default-attributes.json
new file mode 100644
index 00000000..dd9ac7b1
--- /dev/null
+++ b/test/sanity/code-smell/required-and-default-attributes.json
@@ -0,0 +1,9 @@
+{
+ "prefixes": [
+ "lib/ansible/"
+ ],
+ "extensions": [
+ ".py"
+ ],
+ "output": "path-line-column-message"
+}
diff --git a/test/sanity/code-smell/required-and-default-attributes.py b/test/sanity/code-smell/required-and-default-attributes.py
new file mode 100755
index 00000000..5ef410bd
--- /dev/null
+++ b/test/sanity/code-smell/required-and-default-attributes.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import sys
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ match = re.search(r'(FieldAttribute.*(default|required).*(default|required))', text)
+
+ if match:
+ print('%s:%d:%d: use only one of `default` or `required` with `FieldAttribute`' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/sanity/code-smell/skip.txt b/test/sanity/code-smell/skip.txt
new file mode 100644
index 00000000..6fb327b3
--- /dev/null
+++ b/test/sanity/code-smell/skip.txt
@@ -0,0 +1,2 @@
+deprecated-config.py # disabled by default, to be enabled by the release manager after branching
+update-bundled.py # disabled by default, to be enabled by the release manager after branching
diff --git a/test/sanity/code-smell/test-constraints.json b/test/sanity/code-smell/test-constraints.json
new file mode 100644
index 00000000..69b07bf3
--- /dev/null
+++ b/test/sanity/code-smell/test-constraints.json
@@ -0,0 +1,9 @@
+{
+ "prefixes": [
+ "test/lib/ansible_test/_data/requirements/"
+ ],
+ "extensions": [
+ ".txt"
+ ],
+ "output": "path-line-column-message"
+}
diff --git a/test/sanity/code-smell/test-constraints.py b/test/sanity/code-smell/test-constraints.py
new file mode 100755
index 00000000..e8b9c795
--- /dev/null
+++ b/test/sanity/code-smell/test-constraints.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import sys
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ match = re.search(r'^[^;#]*?([<>=])(?!.*sanity_ok.*)', text)
+
+ if match:
+ print('%s:%d:%d: put constraints in `test/lib/ansible_test/_data/requirements/constraints.txt`' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/sanity/code-smell/update-bundled.json b/test/sanity/code-smell/update-bundled.json
new file mode 100644
index 00000000..379bf4d7
--- /dev/null
+++ b/test/sanity/code-smell/update-bundled.json
@@ -0,0 +1,8 @@
+{
+ "all_targets": true,
+ "ignore_self": true,
+ "extensions": [
+ ".py"
+ ],
+ "output": "path-message"
+}
diff --git a/test/sanity/code-smell/update-bundled.py b/test/sanity/code-smell/update-bundled.py
new file mode 100755
index 00000000..121e225f
--- /dev/null
+++ b/test/sanity/code-smell/update-bundled.py
@@ -0,0 +1,165 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# (c) 2018, Ansible Project
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+This test checks whether the libraries we're bundling are out of date and need to be synced with
+a newer upstream release.
+"""
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import fnmatch
+import json
+import re
+import sys
+from distutils.version import LooseVersion
+
+import packaging.specifiers
+
+from ansible.module_utils.urls import open_url
+
+
+BUNDLED_RE = re.compile(b'\\b_BUNDLED_METADATA\\b')
+
+
+def get_bundled_libs(paths):
+ """
+ Return the set of known bundled libraries
+
+ :arg paths: The paths which the test has been instructed to check
+ :returns: The list of all files which we know to contain bundled libraries. If a bundled
+ library consists of multiple files, this should be the file which has metadata included.
+ """
+ bundled_libs = set()
+ for filename in fnmatch.filter(paths, 'lib/ansible/compat/*/__init__.py'):
+ bundled_libs.add(filename)
+
+ bundled_libs.add('lib/ansible/module_utils/distro/__init__.py')
+ bundled_libs.add('lib/ansible/module_utils/six/__init__.py')
+ bundled_libs.add('lib/ansible/module_utils/compat/ipaddress.py')
+ # backports.ssl_match_hostname should be moved to its own file in the future
+ bundled_libs.add('lib/ansible/module_utils/urls.py')
+
+ return bundled_libs
+
+
+def get_files_with_bundled_metadata(paths):
+ """
+ Search for any files which have bundled metadata inside of them
+
+ :arg paths: Iterable of filenames to search for metadata inside of
+ :returns: A set of pathnames which contained metadata
+ """
+
+ with_metadata = set()
+ for path in paths:
+ with open(path, 'rb') as f:
+ body = f.read()
+
+ if BUNDLED_RE.search(body):
+ with_metadata.add(path)
+
+ return with_metadata
+
+
+def get_bundled_metadata(filename):
+ """
+ Retrieve the metadata about a bundled library from a python file
+
+ :arg filename: The filename to look inside for the metadata
+ :raises ValueError: If we're unable to extract metadata from the file
+ :returns: The metadata from the python file
+ """
+ with open(filename, 'r') as module:
+ for line in module:
+ if line.strip().startswith('_BUNDLED_METADATA'):
+ data = line[line.index('{'):].strip()
+ break
+ else:
+ raise ValueError('Unable to check bundled library for update. Please add'
+ ' _BUNDLED_METADATA dictionary to the library file with'
+ ' information on pypi name and bundled version.')
+ metadata = json.loads(data)
+ return metadata
+
+
+def get_latest_applicable_version(pypi_data, constraints=None):
+ """Get the latest pypi version of the package that we allow
+
+ :arg pypi_data: Pypi information about the data as returned by
+ ``https://pypi.org/pypi/{pkg_name}/json``
+ :kwarg constraints: version constraints on what we're allowed to use as specified by
+ the bundled metadata
+ :returns: The most recent version on pypi that are allowed by ``constraints``
+ """
+ latest_version = "0"
+ if constraints:
+ version_specification = packaging.specifiers.SpecifierSet(constraints)
+ for version in pypi_data['releases']:
+ if version in version_specification:
+ if LooseVersion(version) > LooseVersion(latest_version):
+ latest_version = version
+ else:
+ latest_version = pypi_data['info']['version']
+
+ return latest_version
+
+
+def main():
+ """Entrypoint to the script"""
+
+ paths = sys.argv[1:] or sys.stdin.read().splitlines()
+
+ bundled_libs = get_bundled_libs(paths)
+ files_with_bundled_metadata = get_files_with_bundled_metadata(paths)
+
+ for filename in files_with_bundled_metadata.difference(bundled_libs):
+ print('{0}: ERROR: File contains _BUNDLED_METADATA but needs to be added to'
+ ' test/sanity/code-smell/update-bundled.py'.format(filename))
+
+ for filename in bundled_libs:
+ try:
+ metadata = get_bundled_metadata(filename)
+ except ValueError as e:
+ print('{0}: ERROR: {1}'.format(filename, e))
+ continue
+ except (IOError, OSError) as e:
+ if e.errno == 2:
+ print('{0}: ERROR: {1}. Perhaps the bundled library has been removed'
+ ' or moved and the bundled library test needs to be modified as'
+ ' well?'.format(filename, e))
+
+ pypi_fh = open_url('https://pypi.org/pypi/{0}/json'.format(metadata['pypi_name']))
+ pypi_data = json.loads(pypi_fh.read().decode('utf-8'))
+
+ constraints = metadata.get('version_constraints', None)
+ latest_version = get_latest_applicable_version(pypi_data, constraints)
+
+ if LooseVersion(metadata['version']) < LooseVersion(latest_version):
+ print('{0}: UPDATE {1} from {2} to {3} {4}'.format(
+ filename,
+ metadata['pypi_name'],
+ metadata['version'],
+ latest_version,
+ 'https://pypi.org/pypi/{0}/json'.format(metadata['pypi_name'])))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/sanity/code-smell/update-bundled.requirements.txt b/test/sanity/code-smell/update-bundled.requirements.txt
new file mode 100644
index 00000000..748809f7
--- /dev/null
+++ b/test/sanity/code-smell/update-bundled.requirements.txt
@@ -0,0 +1 @@
+packaging
diff --git a/test/sanity/ignore.txt b/test/sanity/ignore.txt
new file mode 100644
index 00000000..e1b2b007
--- /dev/null
+++ b/test/sanity/ignore.txt
@@ -0,0 +1,426 @@
+docs/bin/find-plugin-refs.py future-import-boilerplate
+docs/bin/find-plugin-refs.py metaclass-boilerplate
+docs/docsite/_extensions/pygments_lexer.py future-import-boilerplate
+docs/docsite/_extensions/pygments_lexer.py metaclass-boilerplate
+docs/docsite/_themes/sphinx_rtd_theme/__init__.py future-import-boilerplate
+docs/docsite/_themes/sphinx_rtd_theme/__init__.py metaclass-boilerplate
+docs/docsite/rst/conf.py future-import-boilerplate
+docs/docsite/rst/conf.py metaclass-boilerplate
+docs/docsite/rst/dev_guide/testing/sanity/no-smart-quotes.rst no-smart-quotes
+examples/play.yml shebang
+examples/scripts/my_test.py shebang # example module but not in a normal module location
+examples/scripts/my_test_facts.py shebang # example module but not in a normal module location
+examples/scripts/my_test_info.py shebang # example module but not in a normal module location
+examples/scripts/ConfigureRemotingForAnsible.ps1 pslint:PSCustomUseLiteralPath
+examples/scripts/upgrade_to_ps3.ps1 pslint:PSCustomUseLiteralPath
+examples/scripts/upgrade_to_ps3.ps1 pslint:PSUseApprovedVerbs
+hacking/build-ansible.py shebang # only run by release engineers, Python 3.6+ required
+hacking/build_library/build_ansible/announce.py compile-2.6!skip # release process only, 3.6+ required
+hacking/build_library/build_ansible/announce.py compile-2.7!skip # release process only, 3.6+ required
+hacking/build_library/build_ansible/announce.py compile-3.5!skip # release process only, 3.6+ required
+hacking/build_library/build_ansible/commands.py compile-2.6!skip # release and docs process only, 3.6+ required
+hacking/build_library/build_ansible/commands.py compile-2.7!skip # release and docs process only, 3.6+ required
+hacking/build_library/build_ansible/commands.py compile-3.5!skip # release and docs process only, 3.6+ required
+hacking/build_library/build_ansible/command_plugins/dump_config.py compile-2.6!skip # docs build only, 3.6+ required
+hacking/build_library/build_ansible/command_plugins/dump_config.py compile-2.7!skip # docs build only, 3.6+ required
+hacking/build_library/build_ansible/command_plugins/dump_config.py compile-3.5!skip # docs build only, 3.6+ required
+hacking/build_library/build_ansible/command_plugins/dump_keywords.py compile-2.6!skip # docs build only, 3.6+ required
+hacking/build_library/build_ansible/command_plugins/dump_keywords.py compile-2.7!skip # docs build only, 3.6+ required
+hacking/build_library/build_ansible/command_plugins/dump_keywords.py compile-3.5!skip # docs build only, 3.6+ required
+hacking/build_library/build_ansible/command_plugins/generate_man.py compile-2.6!skip # docs build only, 3.6+ required
+hacking/build_library/build_ansible/command_plugins/generate_man.py compile-2.7!skip # docs build only, 3.6+ required
+hacking/build_library/build_ansible/command_plugins/generate_man.py compile-3.5!skip # docs build only, 3.6+ required
+hacking/build_library/build_ansible/command_plugins/porting_guide.py compile-2.6!skip # release process only, 3.6+ required
+hacking/build_library/build_ansible/command_plugins/porting_guide.py compile-2.7!skip # release process only, 3.6+ required
+hacking/build_library/build_ansible/command_plugins/porting_guide.py compile-3.5!skip # release process only, 3.6+ required
+hacking/build_library/build_ansible/command_plugins/release_announcement.py compile-2.6!skip # release process only, 3.6+ required
+hacking/build_library/build_ansible/command_plugins/release_announcement.py compile-2.7!skip # release process only, 3.6+ required
+hacking/build_library/build_ansible/command_plugins/release_announcement.py compile-3.5!skip # release process only, 3.6+ required
+hacking/build_library/build_ansible/command_plugins/update_intersphinx.py compile-2.6!skip # release process and docs build only, 3.6+ required
+hacking/build_library/build_ansible/command_plugins/update_intersphinx.py compile-2.7!skip # release process and docs build only, 3.6+ required
+hacking/build_library/build_ansible/command_plugins/update_intersphinx.py compile-3.5!skip # release process and docs build only, 3.6+ required
+hacking/fix_test_syntax.py future-import-boilerplate
+hacking/fix_test_syntax.py metaclass-boilerplate
+hacking/get_library.py future-import-boilerplate
+hacking/get_library.py metaclass-boilerplate
+hacking/report.py future-import-boilerplate
+hacking/report.py metaclass-boilerplate
+hacking/return_skeleton_generator.py future-import-boilerplate
+hacking/return_skeleton_generator.py metaclass-boilerplate
+hacking/test-module.py future-import-boilerplate
+hacking/test-module.py metaclass-boilerplate
+hacking/tests/gen_distribution_version_testcase.py future-import-boilerplate
+hacking/tests/gen_distribution_version_testcase.py metaclass-boilerplate
+lib/ansible/cli/console.py pylint:blacklisted-name
+lib/ansible/cli/scripts/ansible_cli_stub.py shebang
+lib/ansible/cli/scripts/ansible_connection_cli_stub.py shebang
+lib/ansible/config/base.yml no-unwanted-files
+lib/ansible/executor/playbook_executor.py pylint:blacklisted-name
+lib/ansible/executor/powershell/async_watchdog.ps1 pslint:PSCustomUseLiteralPath
+lib/ansible/executor/powershell/async_wrapper.ps1 pslint:PSCustomUseLiteralPath
+lib/ansible/executor/powershell/exec_wrapper.ps1 pslint:PSCustomUseLiteralPath
+lib/ansible/executor/task_queue_manager.py pylint:blacklisted-name
+lib/ansible/galaxy/collection.py compile-2.6!skip # 'ansible-galaxy collection' requires 2.7+
+lib/ansible/module_utils/compat/_selectors2.py future-import-boilerplate # ignore bundled
+lib/ansible/module_utils/compat/_selectors2.py metaclass-boilerplate # ignore bundled
+lib/ansible/module_utils/compat/_selectors2.py pylint:blacklisted-name
+lib/ansible/module_utils/distro/__init__.py empty-init # breaks namespacing, bundled, do not override
+lib/ansible/module_utils/distro/_distro.py future-import-boilerplate # ignore bundled
+lib/ansible/module_utils/distro/_distro.py metaclass-boilerplate # ignore bundled
+lib/ansible/module_utils/distro/_distro.py no-assert
+lib/ansible/module_utils/distro/_distro.py pep8!skip # bundled code we don't want to modify
+lib/ansible/module_utils/facts/__init__.py empty-init # breaks namespacing, deprecate and eventually remove
+lib/ansible/module_utils/facts/network/linux.py pylint:blacklisted-name
+lib/ansible/module_utils/facts/system/distribution.py pylint:ansible-bad-function
+lib/ansible/module_utils/powershell/Ansible.ModuleUtils.ArgvParser.psm1 pslint:PSUseApprovedVerbs
+lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CommandUtil.psm1 pslint:PSProvideCommentHelp # need to agree on best format for comment location
+lib/ansible/module_utils/powershell/Ansible.ModuleUtils.CommandUtil.psm1 pslint:PSUseApprovedVerbs
+lib/ansible/module_utils/powershell/Ansible.ModuleUtils.FileUtil.psm1 pslint:PSCustomUseLiteralPath
+lib/ansible/module_utils/powershell/Ansible.ModuleUtils.FileUtil.psm1 pslint:PSProvideCommentHelp
+lib/ansible/module_utils/powershell/Ansible.ModuleUtils.Legacy.psm1 pslint:PSCustomUseLiteralPath
+lib/ansible/module_utils/powershell/Ansible.ModuleUtils.Legacy.psm1 pslint:PSUseApprovedVerbs
+lib/ansible/module_utils/powershell/Ansible.ModuleUtils.LinkUtil.psm1 pslint:PSUseApprovedVerbs
+lib/ansible/module_utils/pycompat24.py no-get-exception
+lib/ansible/module_utils/six/__init__.py empty-init # breaks namespacing, bundled, do not override
+lib/ansible/module_utils/six/__init__.py future-import-boilerplate # ignore bundled
+lib/ansible/module_utils/six/__init__.py metaclass-boilerplate # ignore bundled
+lib/ansible/module_utils/six/__init__.py no-basestring
+lib/ansible/module_utils/six/__init__.py no-dict-iteritems
+lib/ansible/module_utils/six/__init__.py no-dict-iterkeys
+lib/ansible/module_utils/six/__init__.py no-dict-itervalues
+lib/ansible/module_utils/six/__init__.py replace-urlopen
+lib/ansible/module_utils/urls.py pylint:blacklisted-name
+lib/ansible/module_utils/urls.py replace-urlopen
+lib/ansible/modules/command.py validate-modules:doc-default-does-not-match-spec # _uses_shell is undocumented
+lib/ansible/modules/command.py validate-modules:doc-missing-type
+lib/ansible/modules/command.py validate-modules:nonexistent-parameter-documented
+lib/ansible/modules/command.py validate-modules:parameter-list-no-elements
+lib/ansible/modules/command.py validate-modules:undocumented-parameter
+lib/ansible/modules/expect.py validate-modules:doc-missing-type
+lib/ansible/modules/assemble.py validate-modules:nonexistent-parameter-documented
+lib/ansible/modules/blockinfile.py validate-modules:doc-choices-do-not-match-spec
+lib/ansible/modules/blockinfile.py validate-modules:doc-default-does-not-match-spec
+lib/ansible/modules/copy.py pylint:blacklisted-name
+lib/ansible/modules/copy.py validate-modules:doc-default-does-not-match-spec
+lib/ansible/modules/copy.py validate-modules:doc-type-does-not-match-spec
+lib/ansible/modules/copy.py validate-modules:nonexistent-parameter-documented
+lib/ansible/modules/copy.py validate-modules:undocumented-parameter
+lib/ansible/modules/file.py pylint:ansible-bad-function
+lib/ansible/modules/file.py validate-modules:doc-default-does-not-match-spec
+lib/ansible/modules/file.py validate-modules:undocumented-parameter
+lib/ansible/modules/find.py use-argspec-type-path # fix needed
+lib/ansible/modules/find.py validate-modules:parameter-list-no-elements
+lib/ansible/modules/find.py validate-modules:parameter-type-not-in-doc
+lib/ansible/modules/lineinfile.py validate-modules:doc-choices-do-not-match-spec
+lib/ansible/modules/lineinfile.py validate-modules:doc-default-does-not-match-spec
+lib/ansible/modules/lineinfile.py validate-modules:nonexistent-parameter-documented
+lib/ansible/modules/replace.py validate-modules:nonexistent-parameter-documented
+lib/ansible/modules/stat.py validate-modules:doc-default-does-not-match-spec # get_md5 is undocumented
+lib/ansible/modules/stat.py validate-modules:parameter-invalid
+lib/ansible/modules/stat.py validate-modules:parameter-type-not-in-doc
+lib/ansible/modules/stat.py validate-modules:undocumented-parameter
+lib/ansible/modules/unarchive.py validate-modules:nonexistent-parameter-documented
+lib/ansible/modules/unarchive.py validate-modules:parameter-list-no-elements
+lib/ansible/modules/get_url.py validate-modules:parameter-type-not-in-doc
+lib/ansible/modules/uri.py pylint:blacklisted-name
+lib/ansible/modules/uri.py validate-modules:doc-required-mismatch
+lib/ansible/modules/uri.py validate-modules:parameter-list-no-elements
+lib/ansible/modules/uri.py validate-modules:parameter-type-not-in-doc
+lib/ansible/modules/pip.py pylint:blacklisted-name
+lib/ansible/modules/pip.py validate-modules:doc-elements-mismatch
+lib/ansible/modules/pip.py validate-modules:invalid-ansiblemodule-schema
+lib/ansible/modules/apt.py validate-modules:doc-default-does-not-match-spec
+lib/ansible/modules/apt.py validate-modules:parameter-invalid
+lib/ansible/modules/apt.py validate-modules:parameter-type-not-in-doc
+lib/ansible/modules/apt.py validate-modules:undocumented-parameter
+lib/ansible/modules/apt_key.py validate-modules:mutually_exclusive-unknown
+lib/ansible/modules/apt_key.py validate-modules:parameter-type-not-in-doc
+lib/ansible/modules/apt_key.py validate-modules:undocumented-parameter
+lib/ansible/modules/apt_repository.py validate-modules:doc-default-does-not-match-spec
+lib/ansible/modules/apt_repository.py validate-modules:parameter-invalid
+lib/ansible/modules/apt_repository.py validate-modules:parameter-type-not-in-doc
+lib/ansible/modules/apt_repository.py validate-modules:undocumented-parameter
+lib/ansible/modules/dnf.py validate-modules:doc-missing-type
+lib/ansible/modules/dnf.py validate-modules:doc-required-mismatch
+lib/ansible/modules/dnf.py validate-modules:parameter-invalid
+lib/ansible/modules/dnf.py validate-modules:parameter-list-no-elements
+lib/ansible/modules/dnf.py validate-modules:parameter-type-not-in-doc
+lib/ansible/modules/dpkg_selections.py validate-modules:doc-missing-type
+lib/ansible/modules/dpkg_selections.py validate-modules:doc-required-mismatch
+lib/ansible/modules/package_facts.py validate-modules:doc-choices-do-not-match-spec
+lib/ansible/modules/package_facts.py validate-modules:doc-missing-type
+lib/ansible/modules/package_facts.py validate-modules:parameter-list-no-elements
+lib/ansible/modules/rpm_key.py validate-modules:parameter-type-not-in-doc
+lib/ansible/modules/yum.py pylint:blacklisted-name
+lib/ansible/modules/yum.py validate-modules:doc-missing-type
+lib/ansible/modules/yum.py validate-modules:parameter-invalid
+lib/ansible/modules/yum.py validate-modules:parameter-list-no-elements
+lib/ansible/modules/yum.py validate-modules:parameter-type-not-in-doc
+lib/ansible/modules/yum_repository.py validate-modules:doc-default-does-not-match-spec
+lib/ansible/modules/yum_repository.py validate-modules:doc-missing-type
+lib/ansible/modules/yum_repository.py validate-modules:parameter-list-no-elements
+lib/ansible/modules/yum_repository.py validate-modules:parameter-type-not-in-doc
+lib/ansible/modules/yum_repository.py validate-modules:undocumented-parameter
+lib/ansible/modules/git.py pylint:blacklisted-name
+lib/ansible/modules/git.py use-argspec-type-path
+lib/ansible/modules/git.py validate-modules:doc-missing-type
+lib/ansible/modules/git.py validate-modules:doc-required-mismatch
+lib/ansible/modules/git.py validate-modules:parameter-list-no-elements
+lib/ansible/modules/git.py validate-modules:parameter-type-not-in-doc
+lib/ansible/modules/subversion.py validate-modules:doc-required-mismatch
+lib/ansible/modules/subversion.py validate-modules:parameter-type-not-in-doc
+lib/ansible/modules/subversion.py validate-modules:undocumented-parameter
+lib/ansible/modules/getent.py validate-modules:parameter-type-not-in-doc
+lib/ansible/modules/hostname.py validate-modules:invalid-ansiblemodule-schema
+lib/ansible/modules/hostname.py validate-modules:parameter-type-not-in-doc
+lib/ansible/modules/iptables.py pylint:blacklisted-name
+lib/ansible/modules/iptables.py validate-modules:parameter-list-no-elements
+lib/ansible/modules/service.py validate-modules:nonexistent-parameter-documented
+lib/ansible/modules/service.py validate-modules:use-run-command-not-popen
+lib/ansible/modules/setup.py validate-modules:doc-missing-type
+lib/ansible/modules/setup.py validate-modules:parameter-list-no-elements
+lib/ansible/modules/setup.py validate-modules:parameter-type-not-in-doc
+lib/ansible/modules/systemd.py validate-modules:parameter-invalid
+lib/ansible/modules/systemd.py validate-modules:parameter-type-not-in-doc
+lib/ansible/modules/systemd.py validate-modules:return-syntax-error
+lib/ansible/modules/sysvinit.py validate-modules:parameter-list-no-elements
+lib/ansible/modules/sysvinit.py validate-modules:parameter-type-not-in-doc
+lib/ansible/modules/sysvinit.py validate-modules:return-syntax-error
+lib/ansible/modules/user.py validate-modules:doc-default-does-not-match-spec
+lib/ansible/modules/user.py validate-modules:doc-default-incompatible-type
+lib/ansible/modules/user.py validate-modules:parameter-list-no-elements
+lib/ansible/modules/user.py validate-modules:use-run-command-not-popen
+lib/ansible/modules/async_status.py use-argspec-type-path
+lib/ansible/modules/async_status.py validate-modules!skip
+lib/ansible/modules/async_wrapper.py ansible-doc!skip # not an actual module
+lib/ansible/modules/async_wrapper.py pylint:ansible-bad-function
+lib/ansible/modules/async_wrapper.py use-argspec-type-path
+lib/ansible/modules/wait_for.py validate-modules:parameter-list-no-elements
+lib/ansible/parsing/vault/__init__.py pylint:blacklisted-name
+lib/ansible/playbook/base.py pylint:blacklisted-name
+lib/ansible/playbook/collectionsearch.py required-and-default-attributes # https://github.com/ansible/ansible/issues/61460
+lib/ansible/playbook/helpers.py pylint:blacklisted-name
+lib/ansible/playbook/role/__init__.py pylint:blacklisted-name
+lib/ansible/plugins/action/normal.py action-plugin-docs # default action plugin for modules without a dedicated action plugin
+lib/ansible/plugins/cache/base.py ansible-doc!skip # not a plugin, but a stub for backwards compatibility
+lib/ansible/plugins/lookup/sequence.py pylint:blacklisted-name
+lib/ansible/plugins/strategy/__init__.py pylint:blacklisted-name
+lib/ansible/plugins/strategy/linear.py pylint:blacklisted-name
+lib/ansible/vars/hostvars.py pylint:blacklisted-name
+test/integration/targets/ansible-runner/files/adhoc_example1.py future-import-boilerplate
+test/integration/targets/ansible-runner/files/adhoc_example1.py metaclass-boilerplate
+test/integration/targets/ansible-runner/files/playbook_example1.py future-import-boilerplate
+test/integration/targets/ansible-runner/files/playbook_example1.py metaclass-boilerplate
+test/integration/targets/ansible-test/ansible_collections/ns/col/plugins/modules/hello.py pylint:relative-beyond-top-level
+test/integration/targets/ansible-test/ansible_collections/ns/col/tests/unit/plugins/module_utils/test_my_util.py pylint:relative-beyond-top-level
+test/integration/targets/ansible-test/ansible_collections/ns/col/tests/unit/plugins/modules/test_hello.py pylint:relative-beyond-top-level
+test/integration/targets/ansible-test-docker/ansible_collections/ns/col/plugins/modules/hello.py pylint:relative-beyond-top-level
+test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/unit/plugins/module_utils/test_my_util.py pylint:relative-beyond-top-level
+test/integration/targets/ansible-test-docker/ansible_collections/ns/col/tests/unit/plugins/modules/test_hello.py pylint:relative-beyond-top-level
+test/integration/targets/async_fail/library/async_test.py future-import-boilerplate
+test/integration/targets/async_fail/library/async_test.py metaclass-boilerplate
+test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/lookup/lookup_no_future_boilerplate.py future-import-boilerplate
+test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util2.py pylint:relative-beyond-top-level
+test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util3.py pylint:relative-beyond-top-level
+test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/modules/my_module.py pylint:relative-beyond-top-level
+test/integration/targets/expect/files/test_command.py future-import-boilerplate
+test/integration/targets/expect/files/test_command.py metaclass-boilerplate
+test/integration/targets/gathering_facts/library/bogus_facts shebang
+test/integration/targets/gathering_facts/library/facts_one shebang
+test/integration/targets/gathering_facts/library/facts_two shebang
+test/integration/targets/get_url/files/testserver.py future-import-boilerplate
+test/integration/targets/get_url/files/testserver.py metaclass-boilerplate
+test/integration/targets/group/files/gidget.py future-import-boilerplate
+test/integration/targets/group/files/gidget.py metaclass-boilerplate
+test/integration/targets/ignore_unreachable/fake_connectors/bad_exec.py future-import-boilerplate
+test/integration/targets/ignore_unreachable/fake_connectors/bad_exec.py metaclass-boilerplate
+test/integration/targets/ignore_unreachable/fake_connectors/bad_put_file.py future-import-boilerplate
+test/integration/targets/ignore_unreachable/fake_connectors/bad_put_file.py metaclass-boilerplate
+test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xSetReboot/ANSIBLE_xSetReboot.psm1 pslint!skip
+test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.psm1 pslint!skip
+test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.0/xTestDsc.psd1 pslint!skip
+test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.1/DSCResources/ANSIBLE_xTestResource/ANSIBLE_xTestResource.psm1 pslint!skip
+test/integration/targets/incidental_win_dsc/files/xTestDsc/1.0.1/xTestDsc.psd1 pslint!skip
+test/integration/targets/incidental_win_ping/library/win_ping_syntax_error.ps1 pslint!skip
+test/integration/targets/incidental_win_reboot/templates/post_reboot.ps1 pslint!skip
+test/integration/targets/lookup_ini/lookup-8859-15.ini no-smart-quotes
+test/integration/targets/module_precedence/lib_with_extension/a.ini shebang
+test/integration/targets/module_precedence/lib_with_extension/ping.ini shebang
+test/integration/targets/module_precedence/lib_with_extension/ping.py future-import-boilerplate
+test/integration/targets/module_precedence/lib_with_extension/ping.py metaclass-boilerplate
+test/integration/targets/module_precedence/multiple_roles/bar/library/ping.py future-import-boilerplate
+test/integration/targets/module_precedence/multiple_roles/bar/library/ping.py metaclass-boilerplate
+test/integration/targets/module_precedence/multiple_roles/foo/library/ping.py future-import-boilerplate
+test/integration/targets/module_precedence/multiple_roles/foo/library/ping.py metaclass-boilerplate
+test/integration/targets/module_precedence/roles_with_extension/foo/library/a.ini shebang
+test/integration/targets/module_precedence/roles_with_extension/foo/library/ping.ini shebang
+test/integration/targets/module_precedence/roles_with_extension/foo/library/ping.py future-import-boilerplate
+test/integration/targets/module_precedence/roles_with_extension/foo/library/ping.py metaclass-boilerplate
+test/integration/targets/module_utils/library/test.py future-import-boilerplate
+test/integration/targets/module_utils/library/test.py metaclass-boilerplate
+test/integration/targets/module_utils/library/test_env_override.py future-import-boilerplate
+test/integration/targets/module_utils/library/test_env_override.py metaclass-boilerplate
+test/integration/targets/module_utils/library/test_failure.py future-import-boilerplate
+test/integration/targets/module_utils/library/test_failure.py metaclass-boilerplate
+test/integration/targets/module_utils/library/test_override.py future-import-boilerplate
+test/integration/targets/module_utils/library/test_override.py metaclass-boilerplate
+test/integration/targets/module_utils/module_utils/bar0/foo.py pylint:blacklisted-name
+test/integration/targets/module_utils/module_utils/foo.py pylint:blacklisted-name
+test/integration/targets/module_utils/module_utils/sub/bar/__init__.py pylint:blacklisted-name
+test/integration/targets/module_utils/module_utils/sub/bar/bar.py pylint:blacklisted-name
+test/integration/targets/module_utils/module_utils/yak/zebra/foo.py pylint:blacklisted-name
+test/integration/targets/old_style_modules_posix/library/helloworld.sh shebang
+test/integration/targets/pause/test-pause.py future-import-boilerplate
+test/integration/targets/pause/test-pause.py metaclass-boilerplate
+test/integration/targets/pip/files/ansible_test_pip_chdir/__init__.py future-import-boilerplate
+test/integration/targets/pip/files/ansible_test_pip_chdir/__init__.py metaclass-boilerplate
+test/integration/targets/pip/files/setup.py future-import-boilerplate
+test/integration/targets/pip/files/setup.py metaclass-boilerplate
+test/integration/targets/run_modules/library/test.py future-import-boilerplate
+test/integration/targets/run_modules/library/test.py metaclass-boilerplate
+test/integration/targets/script/files/no_shebang.py future-import-boilerplate
+test/integration/targets/script/files/no_shebang.py metaclass-boilerplate
+test/integration/targets/service/files/ansible_test_service.py future-import-boilerplate
+test/integration/targets/service/files/ansible_test_service.py metaclass-boilerplate
+test/integration/targets/setup_rpm_repo/files/create-repo.py future-import-boilerplate
+test/integration/targets/setup_rpm_repo/files/create-repo.py metaclass-boilerplate
+test/integration/targets/template/files/encoding_1252_utf-8.expected no-smart-quotes
+test/integration/targets/template/files/encoding_1252_windows-1252.expected no-smart-quotes
+test/integration/targets/template/files/foo.dos.txt line-endings
+test/integration/targets/template/role_filter/filter_plugins/myplugin.py future-import-boilerplate
+test/integration/targets/template/role_filter/filter_plugins/myplugin.py metaclass-boilerplate
+test/integration/targets/template/templates/encoding_1252.j2 no-smart-quotes
+test/integration/targets/infra/library/test.py future-import-boilerplate
+test/integration/targets/infra/library/test.py metaclass-boilerplate
+test/integration/targets/unicode/unicode.yml no-smart-quotes
+test/integration/targets/uri/files/testserver.py future-import-boilerplate
+test/integration/targets/uri/files/testserver.py metaclass-boilerplate
+test/integration/targets/var_precedence/ansible-var-precedence-check.py future-import-boilerplate
+test/integration/targets/var_precedence/ansible-var-precedence-check.py metaclass-boilerplate
+test/integration/targets/builtin_vars_prompt/test-vars_prompt.py future-import-boilerplate
+test/integration/targets/builtin_vars_prompt/test-vars_prompt.py metaclass-boilerplate
+test/integration/targets/vault/test-vault-client.py future-import-boilerplate
+test/integration/targets/vault/test-vault-client.py metaclass-boilerplate
+test/integration/targets/wait_for/files/testserver.py future-import-boilerplate
+test/integration/targets/wait_for/files/testserver.py metaclass-boilerplate
+test/integration/targets/want_json_modules_posix/library/helloworld.py future-import-boilerplate
+test/integration/targets/want_json_modules_posix/library/helloworld.py metaclass-boilerplate
+test/integration/targets/win_exec_wrapper/library/test_fail.ps1 pslint:PSCustomUseLiteralPath
+test/integration/targets/win_exec_wrapper/tasks/main.yml no-smart-quotes # We are explicitly testing smart quote support for env vars
+test/integration/targets/win_fetch/tasks/main.yml no-smart-quotes # We are explictly testing smart quotes in the file name to fetch
+test/integration/targets/win_module_utils/library/legacy_only_new_way_win_line_ending.ps1 line-endings # Explicitly tests that we still work with Windows line endings
+test/integration/targets/win_module_utils/library/legacy_only_old_way_win_line_ending.ps1 line-endings # Explicitly tests that we still work with Windows line endings
+test/integration/targets/win_script/files/test_script.ps1 pslint:PSAvoidUsingWriteHost # Keep
+test/integration/targets/win_script/files/test_script_creates_file.ps1 pslint:PSAvoidUsingCmdletAliases
+test/integration/targets/win_script/files/test_script_removes_file.ps1 pslint:PSCustomUseLiteralPath
+test/integration/targets/win_script/files/test_script_with_args.ps1 pslint:PSAvoidUsingWriteHost # Keep
+test/integration/targets/win_script/files/test_script_with_splatting.ps1 pslint:PSAvoidUsingWriteHost # Keep
+test/integration/targets/windows-minimal/library/win_ping_syntax_error.ps1 pslint!skip
+test/lib/ansible_test/_data/requirements/constraints.txt test-constraints
+test/lib/ansible_test/_data/requirements/integration.cloud.azure.txt test-constraints
+test/lib/ansible_test/_data/requirements/sanity.ps1 pslint:PSCustomUseLiteralPath # Uses wildcards on purpose
+test/lib/ansible_test/_data/sanity/pylint/plugins/string_format.py use-compat-six
+test/lib/ansible_test/_data/setup/ConfigureRemotingForAnsible.ps1 pslint:PSCustomUseLiteralPath
+test/lib/ansible_test/_data/setup/windows-httptester.ps1 pslint:PSCustomUseLiteralPath
+test/support/integration/plugins/module_utils/azure_rm_common.py future-import-boilerplate
+test/support/integration/plugins/module_utils/azure_rm_common.py metaclass-boilerplate
+test/support/integration/plugins/module_utils/azure_rm_common_rest.py future-import-boilerplate
+test/support/integration/plugins/module_utils/azure_rm_common_rest.py metaclass-boilerplate
+test/support/integration/plugins/module_utils/cloud.py future-import-boilerplate
+test/support/integration/plugins/module_utils/cloud.py metaclass-boilerplate
+test/support/integration/plugins/module_utils/compat/ipaddress.py future-import-boilerplate
+test/support/integration/plugins/module_utils/compat/ipaddress.py metaclass-boilerplate
+test/support/integration/plugins/module_utils/compat/ipaddress.py no-unicode-literals
+test/support/integration/plugins/module_utils/database.py future-import-boilerplate
+test/support/integration/plugins/module_utils/database.py metaclass-boilerplate
+test/support/integration/plugins/module_utils/mysql.py future-import-boilerplate
+test/support/integration/plugins/module_utils/mysql.py metaclass-boilerplate
+test/support/integration/plugins/module_utils/network/common/utils.py future-import-boilerplate
+test/support/integration/plugins/module_utils/network/common/utils.py metaclass-boilerplate
+test/support/integration/plugins/module_utils/postgres.py future-import-boilerplate
+test/support/integration/plugins/module_utils/postgres.py metaclass-boilerplate
+test/support/integration/plugins/modules/lvg.py pylint:blacklisted-name
+test/support/integration/plugins/modules/synchronize.py pylint:blacklisted-name
+test/support/integration/plugins/modules/timezone.py pylint:blacklisted-name
+test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/netconf.py future-import-boilerplate
+test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/netconf.py metaclass-boilerplate
+test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/network_agnostic.py future-import-boilerplate
+test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/network_agnostic.py metaclass-boilerplate
+test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/compat/ipaddress.py future-import-boilerplate
+test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/compat/ipaddress.py metaclass-boilerplate
+test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/compat/ipaddress.py no-unicode-literals
+test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/compat/ipaddress.py pep8:E203
+test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/cfg/base.py future-import-boilerplate
+test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/cfg/base.py metaclass-boilerplate
+test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/config.py future-import-boilerplate
+test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/config.py metaclass-boilerplate
+test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/facts/facts.py future-import-boilerplate
+test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/facts/facts.py metaclass-boilerplate
+test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/netconf.py future-import-boilerplate
+test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/netconf.py metaclass-boilerplate
+test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/network.py future-import-boilerplate
+test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/network.py metaclass-boilerplate
+test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/parsing.py future-import-boilerplate
+test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/parsing.py metaclass-boilerplate
+test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/utils.py future-import-boilerplate
+test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/utils.py metaclass-boilerplate
+test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/netconf/netconf.py future-import-boilerplate
+test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/netconf/netconf.py metaclass-boilerplate
+test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/restconf/restconf.py future-import-boilerplate
+test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/restconf/restconf.py metaclass-boilerplate
+test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/doc_fragments/ios.py future-import-boilerplate
+test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/doc_fragments/ios.py metaclass-boilerplate
+test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/module_utils/network/ios/ios.py future-import-boilerplate
+test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/module_utils/network/ios/ios.py metaclass-boilerplate
+test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_command.py future-import-boilerplate
+test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_command.py metaclass-boilerplate
+test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_config.py future-import-boilerplate
+test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_config.py metaclass-boilerplate
+test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_config.py pep8:E501
+test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/doc_fragments/vyos.py future-import-boilerplate
+test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/doc_fragments/vyos.py metaclass-boilerplate
+test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/vyos.py future-import-boilerplate
+test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/vyos.py metaclass-boilerplate
+test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_command.py future-import-boilerplate
+test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_command.py metaclass-boilerplate
+test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_command.py pep8:E231
+test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_command.py pylint:blacklisted-name
+test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_config.py future-import-boilerplate
+test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_config.py metaclass-boilerplate
+test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_facts.py future-import-boilerplate
+test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_facts.py metaclass-boilerplate
+test/support/windows-integration/plugins/modules/async_status.ps1 pslint!skip
+test/support/windows-integration/plugins/modules/setup.ps1 pslint!skip
+test/support/windows-integration/plugins/modules/win_copy.ps1 pslint!skip
+test/support/windows-integration/plugins/modules/win_dsc.ps1 pslint!skip
+test/support/windows-integration/plugins/modules/win_feature.ps1 pslint!skip
+test/support/windows-integration/plugins/modules/win_find.ps1 pslint!skip
+test/support/windows-integration/plugins/modules/win_lineinfile.ps1 pslint!skip
+test/support/windows-integration/plugins/modules/win_regedit.ps1 pslint!skip
+test/support/windows-integration/plugins/modules/win_security_policy.ps1 pslint!skip
+test/support/windows-integration/plugins/modules/win_shell.ps1 pslint!skip
+test/support/windows-integration/plugins/modules/win_wait_for.ps1 pslint!skip
+test/units/executor/test_play_iterator.py pylint:blacklisted-name
+test/units/module_utils/basic/test_deprecate_warn.py pylint:ansible-deprecated-no-version
+test/units/module_utils/basic/test_deprecate_warn.py pylint:ansible-deprecated-version
+test/units/module_utils/basic/test_run_command.py pylint:blacklisted-name
+test/units/module_utils/urls/fixtures/multipart.txt line-endings # Fixture for HTTP tests that use CRLF
+test/units/module_utils/urls/test_Request.py replace-urlopen
+test/units/module_utils/urls/test_fetch_url.py replace-urlopen
+test/units/modules/test_apt.py pylint:blacklisted-name
+test/units/modules/test_known_hosts.py pylint:ansible-bad-function
+test/units/parsing/vault/test_vault.py pylint:blacklisted-name
+test/units/playbook/role/test_role.py pylint:blacklisted-name
+test/units/plugins/test_plugins.py pylint:blacklisted-name
+test/units/template/test_templar.py pylint:blacklisted-name
+test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/module_utils/my_util.py future-import-boilerplate # test expects no boilerplate
+test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/module_utils/my_util.py metaclass-boilerplate # test expects no boilerplate
+test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/action/my_action.py pylint:relative-beyond-top-level
+test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/modules/__init__.py empty-init # testing that collections don't need inits
+test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/__init__.py empty-init # testing that collections don't need inits
+test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/ansible/__init__.py empty-init # testing that collections don't need inits
+test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/testns/__init__.py empty-init # testing that collections don't need inits
+test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/testns/testcoll/__init__.py empty-init # testing that collections don't need inits
+test/units/utils/collection_loader/test_collection_loader.py pylint:undefined-variable # magic runtime local var splatting
+test/utils/shippable/check_matrix.py replace-urlopen
+test/utils/shippable/timing.py shebang
diff --git a/test/support/integration/plugins/cache/jsonfile.py b/test/support/integration/plugins/cache/jsonfile.py
new file mode 100644
index 00000000..80b16f55
--- /dev/null
+++ b/test/support/integration/plugins/cache/jsonfile.py
@@ -0,0 +1,63 @@
+# (c) 2014, Brian Coca, Josh Drake, et al
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ cache: jsonfile
+ short_description: JSON formatted files.
+ description:
+ - This cache uses JSON formatted, per host, files saved to the filesystem.
+ version_added: "1.9"
+ author: Ansible Core (@ansible-core)
+ options:
+ _uri:
+ required: True
+ description:
+ - Path in which the cache plugin will save the JSON files
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_CONNECTION
+ ini:
+ - key: fact_caching_connection
+ section: defaults
+ _prefix:
+ description: User defined prefix to use when creating the JSON files
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_PREFIX
+ ini:
+ - key: fact_caching_prefix
+ section: defaults
+ _timeout:
+ default: 86400
+ description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
+ ini:
+ - key: fact_caching_timeout
+ section: defaults
+ type: integer
+'''
+
+import codecs
+import json
+
+from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
+from ansible.plugins.cache import BaseFileCacheModule
+
+
+class CacheModule(BaseFileCacheModule):
+ """
+ A caching module backed by json files.
+ """
+
+ def _load(self, filepath):
+ # Valid JSON is always UTF-8 encoded.
+ with codecs.open(filepath, 'r', encoding='utf-8') as f:
+ return json.load(f, cls=AnsibleJSONDecoder)
+
+ def _dump(self, value, filepath):
+ with codecs.open(filepath, 'w', encoding='utf-8') as f:
+ f.write(json.dumps(value, cls=AnsibleJSONEncoder, sort_keys=True, indent=4))
diff --git a/test/support/integration/plugins/filter/json_query.py b/test/support/integration/plugins/filter/json_query.py
new file mode 100644
index 00000000..d1da71b4
--- /dev/null
+++ b/test/support/integration/plugins/filter/json_query.py
@@ -0,0 +1,53 @@
+# (c) 2015, Filipe Niero Felisbino <filipenf@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError, AnsibleFilterError
+
+try:
+ import jmespath
+ HAS_LIB = True
+except ImportError:
+ HAS_LIB = False
+
+
+def json_query(data, expr):
+ '''Query data using jmespath query language ( http://jmespath.org ). Example:
+ - debug: msg="{{ instance | json_query(tagged_instances[*].block_device_mapping.*.volume_id') }}"
+ '''
+ if not HAS_LIB:
+ raise AnsibleError('You need to install "jmespath" prior to running '
+ 'json_query filter')
+
+ try:
+ return jmespath.search(expr, data)
+ except jmespath.exceptions.JMESPathError as e:
+ raise AnsibleFilterError('JMESPathError in json_query filter plugin:\n%s' % e)
+ except Exception as e:
+ # For older jmespath, we can get ValueError and TypeError without much info.
+ raise AnsibleFilterError('Error in jmespath.search in json_query filter plugin:\n%s' % e)
+
+
+class FilterModule(object):
+ ''' Query filter '''
+
+ def filters(self):
+ return {
+ 'json_query': json_query
+ }
diff --git a/test/support/integration/plugins/inventory/aws_ec2.py b/test/support/integration/plugins/inventory/aws_ec2.py
new file mode 100644
index 00000000..09c42cf9
--- /dev/null
+++ b/test/support/integration/plugins/inventory/aws_ec2.py
@@ -0,0 +1,760 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: aws_ec2
+ plugin_type: inventory
+ short_description: EC2 inventory source
+ requirements:
+ - boto3
+ - botocore
+ extends_documentation_fragment:
+ - inventory_cache
+ - constructed
+ description:
+ - Get inventory hosts from Amazon Web Services EC2.
+ - Uses a YAML configuration file that ends with C(aws_ec2.(yml|yaml)).
+ notes:
+ - If no credentials are provided and the control node has an associated IAM instance profile then the
+ role will be used for authentication.
+ author:
+ - Sloane Hertel (@s-hertel)
+ options:
+ aws_profile:
+ description: The AWS profile
+ type: str
+ aliases: [ boto_profile ]
+ env:
+ - name: AWS_DEFAULT_PROFILE
+ - name: AWS_PROFILE
+ aws_access_key:
+ description: The AWS access key to use.
+ type: str
+ aliases: [ aws_access_key_id ]
+ env:
+ - name: EC2_ACCESS_KEY
+ - name: AWS_ACCESS_KEY
+ - name: AWS_ACCESS_KEY_ID
+ aws_secret_key:
+ description: The AWS secret key that corresponds to the access key.
+ type: str
+ aliases: [ aws_secret_access_key ]
+ env:
+ - name: EC2_SECRET_KEY
+ - name: AWS_SECRET_KEY
+ - name: AWS_SECRET_ACCESS_KEY
+ aws_security_token:
+ description: The AWS security token if using temporary access and secret keys.
+ type: str
+ env:
+ - name: EC2_SECURITY_TOKEN
+ - name: AWS_SESSION_TOKEN
+ - name: AWS_SECURITY_TOKEN
+ plugin:
+ description: Token that ensures this is a source file for the plugin.
+ required: True
+ choices: ['aws_ec2']
+ iam_role_arn:
+ description: The ARN of the IAM role to assume to perform the inventory lookup. You should still provide AWS
+ credentials with enough privilege to perform the AssumeRole action.
+ version_added: '2.9'
+ regions:
+ description:
+ - A list of regions in which to describe EC2 instances.
+ - If empty (the default) default this will include all regions, except possibly restricted ones like us-gov-west-1 and cn-north-1.
+ type: list
+ default: []
+ hostnames:
+ description:
+ - A list in order of precedence for hostname variables.
+ - You can use the options specified in U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options).
+ - To use tags as hostnames use the syntax tag:Name=Value to use the hostname Name_Value, or tag:Name to use the value of the Name tag.
+ type: list
+ default: []
+ filters:
+ description:
+ - A dictionary of filter value pairs.
+ - Available filters are listed here U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options).
+ type: dict
+ default: {}
+ include_extra_api_calls:
+ description:
+ - Add two additional API calls for every instance to include 'persistent' and 'events' host variables.
+ - Spot instances may be persistent and instances may have associated events.
+ type: bool
+ default: False
+ version_added: '2.8'
+ strict_permissions:
+ description:
+ - By default if a 403 (Forbidden) error code is encountered this plugin will fail.
+ - You can set this option to False in the inventory config file which will allow 403 errors to be gracefully skipped.
+ type: bool
+ default: True
+ use_contrib_script_compatible_sanitization:
+ description:
+ - By default this plugin is using a general group name sanitization to create safe and usable group names for use in Ansible.
+ This option allows you to override that, in efforts to allow migration from the old inventory script and
+ matches the sanitization of groups when the script's ``replace_dash_in_groups`` option is set to ``False``.
+ To replicate behavior of ``replace_dash_in_groups = True`` with constructed groups,
+ you will need to replace hyphens with underscores via the regex_replace filter for those entries.
+ - For this to work you should also turn off the TRANSFORM_INVALID_GROUP_CHARS setting,
+ otherwise the core engine will just use the standard sanitization on top.
+ - This is not the default as such names break certain functionality as not all characters are valid Python identifiers
+ which group names end up being used as.
+ type: bool
+ default: False
+ version_added: '2.8'
+'''
+
+EXAMPLES = '''
+# Minimal example using environment vars or instance role credentials
+# Fetch all hosts in us-east-1, the hostname is the public DNS if it exists, otherwise the private IP address
+plugin: aws_ec2
+regions:
+ - us-east-1
+
+# Example using filters, ignoring permission errors, and specifying the hostname precedence
+plugin: aws_ec2
+boto_profile: aws_profile
+# Populate inventory with instances in these regions
+regions:
+ - us-east-1
+ - us-east-2
+filters:
+ # All instances with their `Environment` tag set to `dev`
+ tag:Environment: dev
+ # All dev and QA hosts
+ tag:Environment:
+ - dev
+ - qa
+ instance.group-id: sg-xxxxxxxx
+# Ignores 403 errors rather than failing
+strict_permissions: False
+# Note: I(hostnames) sets the inventory_hostname. To modify ansible_host without modifying
+# inventory_hostname use compose (see example below).
+hostnames:
+ - tag:Name=Tag1,Name=Tag2 # Return specific hosts only
+ - tag:CustomDNSName
+ - dns-name
+ - private-ip-address
+
+# Example using constructed features to create groups and set ansible_host
+plugin: aws_ec2
+regions:
+ - us-east-1
+ - us-west-1
+# keyed_groups may be used to create custom groups
+strict: False
+keyed_groups:
+ # Add e.g. x86_64 hosts to an arch_x86_64 group
+ - prefix: arch
+ key: 'architecture'
+ # Add hosts to tag_Name_Value groups for each Name/Value tag pair
+ - prefix: tag
+ key: tags
+ # Add hosts to e.g. instance_type_z3_tiny
+ - prefix: instance_type
+ key: instance_type
+ # Create security_groups_sg_abcd1234 group for each SG
+ - key: 'security_groups|json_query("[].group_id")'
+ prefix: 'security_groups'
+ # Create a group for each value of the Application tag
+ - key: tags.Application
+ separator: ''
+ # Create a group per region e.g. aws_region_us_east_2
+ - key: placement.region
+ prefix: aws_region
+ # Create a group (or groups) based on the value of a custom tag "Role" and add them to a metagroup called "project"
+ - key: tags['Role']
+ prefix: foo
+ parent_group: "project"
+# Set individual variables with compose
+compose:
+ # Use the private IP address to connect to the host
+ # (note: this does not modify inventory_hostname, which is set via I(hostnames))
+ ansible_host: private_ip_address
+'''
+
+import re
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
+from ansible.utils.display import Display
+from ansible.module_utils.six import string_types
+
+try:
+ import boto3
+ import botocore
+except ImportError:
+ raise AnsibleError('The ec2 dynamic inventory plugin requires boto3 and botocore.')
+
+display = Display()
+
+# The mappings give an array of keys to get from the filter name to the value
+# returned by boto3's EC2 describe_instances method.
+
+instance_meta_filter_to_boto_attr = {
+ 'group-id': ('Groups', 'GroupId'),
+ 'group-name': ('Groups', 'GroupName'),
+ 'network-interface.attachment.instance-owner-id': ('OwnerId',),
+ 'owner-id': ('OwnerId',),
+ 'requester-id': ('RequesterId',),
+ 'reservation-id': ('ReservationId',),
+}
+
+instance_data_filter_to_boto_attr = {
+ 'affinity': ('Placement', 'Affinity'),
+ 'architecture': ('Architecture',),
+ 'availability-zone': ('Placement', 'AvailabilityZone'),
+ 'block-device-mapping.attach-time': ('BlockDeviceMappings', 'Ebs', 'AttachTime'),
+ 'block-device-mapping.delete-on-termination': ('BlockDeviceMappings', 'Ebs', 'DeleteOnTermination'),
+ 'block-device-mapping.device-name': ('BlockDeviceMappings', 'DeviceName'),
+ 'block-device-mapping.status': ('BlockDeviceMappings', 'Ebs', 'Status'),
+ 'block-device-mapping.volume-id': ('BlockDeviceMappings', 'Ebs', 'VolumeId'),
+ 'client-token': ('ClientToken',),
+ 'dns-name': ('PublicDnsName',),
+ 'host-id': ('Placement', 'HostId'),
+ 'hypervisor': ('Hypervisor',),
+ 'iam-instance-profile.arn': ('IamInstanceProfile', 'Arn'),
+ 'image-id': ('ImageId',),
+ 'instance-id': ('InstanceId',),
+ 'instance-lifecycle': ('InstanceLifecycle',),
+ 'instance-state-code': ('State', 'Code'),
+ 'instance-state-name': ('State', 'Name'),
+ 'instance-type': ('InstanceType',),
+ 'instance.group-id': ('SecurityGroups', 'GroupId'),
+ 'instance.group-name': ('SecurityGroups', 'GroupName'),
+ 'ip-address': ('PublicIpAddress',),
+ 'kernel-id': ('KernelId',),
+ 'key-name': ('KeyName',),
+ 'launch-index': ('AmiLaunchIndex',),
+ 'launch-time': ('LaunchTime',),
+ 'monitoring-state': ('Monitoring', 'State'),
+ 'network-interface.addresses.private-ip-address': ('NetworkInterfaces', 'PrivateIpAddress'),
+ 'network-interface.addresses.primary': ('NetworkInterfaces', 'PrivateIpAddresses', 'Primary'),
+ 'network-interface.addresses.association.public-ip': ('NetworkInterfaces', 'PrivateIpAddresses', 'Association', 'PublicIp'),
+ 'network-interface.addresses.association.ip-owner-id': ('NetworkInterfaces', 'PrivateIpAddresses', 'Association', 'IpOwnerId'),
+ 'network-interface.association.public-ip': ('NetworkInterfaces', 'Association', 'PublicIp'),
+ 'network-interface.association.ip-owner-id': ('NetworkInterfaces', 'Association', 'IpOwnerId'),
+ 'network-interface.association.allocation-id': ('ElasticGpuAssociations', 'ElasticGpuId'),
+ 'network-interface.association.association-id': ('ElasticGpuAssociations', 'ElasticGpuAssociationId'),
+ 'network-interface.attachment.attachment-id': ('NetworkInterfaces', 'Attachment', 'AttachmentId'),
+ 'network-interface.attachment.instance-id': ('InstanceId',),
+ 'network-interface.attachment.device-index': ('NetworkInterfaces', 'Attachment', 'DeviceIndex'),
+ 'network-interface.attachment.status': ('NetworkInterfaces', 'Attachment', 'Status'),
+ 'network-interface.attachment.attach-time': ('NetworkInterfaces', 'Attachment', 'AttachTime'),
+ 'network-interface.attachment.delete-on-termination': ('NetworkInterfaces', 'Attachment', 'DeleteOnTermination'),
+ 'network-interface.availability-zone': ('Placement', 'AvailabilityZone'),
+ 'network-interface.description': ('NetworkInterfaces', 'Description'),
+ 'network-interface.group-id': ('NetworkInterfaces', 'Groups', 'GroupId'),
+ 'network-interface.group-name': ('NetworkInterfaces', 'Groups', 'GroupName'),
+ 'network-interface.ipv6-addresses.ipv6-address': ('NetworkInterfaces', 'Ipv6Addresses', 'Ipv6Address'),
+ 'network-interface.mac-address': ('NetworkInterfaces', 'MacAddress'),
+ 'network-interface.network-interface-id': ('NetworkInterfaces', 'NetworkInterfaceId'),
+ 'network-interface.owner-id': ('NetworkInterfaces', 'OwnerId'),
+ 'network-interface.private-dns-name': ('NetworkInterfaces', 'PrivateDnsName'),
+ # 'network-interface.requester-id': (),
+ 'network-interface.requester-managed': ('NetworkInterfaces', 'Association', 'IpOwnerId'),
+ 'network-interface.status': ('NetworkInterfaces', 'Status'),
+ 'network-interface.source-dest-check': ('NetworkInterfaces', 'SourceDestCheck'),
+ 'network-interface.subnet-id': ('NetworkInterfaces', 'SubnetId'),
+ 'network-interface.vpc-id': ('NetworkInterfaces', 'VpcId'),
+ 'placement-group-name': ('Placement', 'GroupName'),
+ 'platform': ('Platform',),
+ 'private-dns-name': ('PrivateDnsName',),
+ 'private-ip-address': ('PrivateIpAddress',),
+ 'product-code': ('ProductCodes', 'ProductCodeId'),
+ 'product-code.type': ('ProductCodes', 'ProductCodeType'),
+ 'ramdisk-id': ('RamdiskId',),
+ 'reason': ('StateTransitionReason',),
+ 'root-device-name': ('RootDeviceName',),
+ 'root-device-type': ('RootDeviceType',),
+ 'source-dest-check': ('SourceDestCheck',),
+ 'spot-instance-request-id': ('SpotInstanceRequestId',),
+ 'state-reason-code': ('StateReason', 'Code'),
+ 'state-reason-message': ('StateReason', 'Message'),
+ 'subnet-id': ('SubnetId',),
+ 'tag': ('Tags',),
+ 'tag-key': ('Tags',),
+ 'tag-value': ('Tags',),
+ 'tenancy': ('Placement', 'Tenancy'),
+ 'virtualization-type': ('VirtualizationType',),
+ 'vpc-id': ('VpcId',),
+}
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+
+ NAME = 'aws_ec2'
+
+ def __init__(self):
+ super(InventoryModule, self).__init__()
+
+ self.group_prefix = 'aws_ec2_'
+
+ # credentials
+ self.boto_profile = None
+ self.aws_secret_access_key = None
+ self.aws_access_key_id = None
+ self.aws_security_token = None
+ self.iam_role_arn = None
+
+ def _compile_values(self, obj, attr):
+ '''
+ :param obj: A list or dict of instance attributes
+ :param attr: A key
+ :return The value(s) found via the attr
+ '''
+ if obj is None:
+ return
+
+ temp_obj = []
+
+ if isinstance(obj, list) or isinstance(obj, tuple):
+ for each in obj:
+ value = self._compile_values(each, attr)
+ if value:
+ temp_obj.append(value)
+ else:
+ temp_obj = obj.get(attr)
+
+ has_indexes = any([isinstance(temp_obj, list), isinstance(temp_obj, tuple)])
+ if has_indexes and len(temp_obj) == 1:
+ return temp_obj[0]
+
+ return temp_obj
+
+ def _get_boto_attr_chain(self, filter_name, instance):
+ '''
+ :param filter_name: The filter
+ :param instance: instance dict returned by boto3 ec2 describe_instances()
+ '''
+ allowed_filters = sorted(list(instance_data_filter_to_boto_attr.keys()) + list(instance_meta_filter_to_boto_attr.keys()))
+ if filter_name not in allowed_filters:
+ raise AnsibleError("Invalid filter '%s' provided; filter must be one of %s." % (filter_name,
+ allowed_filters))
+ if filter_name in instance_data_filter_to_boto_attr:
+ boto_attr_list = instance_data_filter_to_boto_attr[filter_name]
+ else:
+ boto_attr_list = instance_meta_filter_to_boto_attr[filter_name]
+
+ instance_value = instance
+ for attribute in boto_attr_list:
+ instance_value = self._compile_values(instance_value, attribute)
+ return instance_value
+
+ def _get_credentials(self):
+ '''
+ :return A dictionary of boto client credentials
+ '''
+ boto_params = {}
+ for credential in (('aws_access_key_id', self.aws_access_key_id),
+ ('aws_secret_access_key', self.aws_secret_access_key),
+ ('aws_session_token', self.aws_security_token)):
+ if credential[1]:
+ boto_params[credential[0]] = credential[1]
+
+ return boto_params
+
+ def _get_connection(self, credentials, region='us-east-1'):
+ try:
+ connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region, **credentials)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ if self.boto_profile:
+ try:
+ connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
+ else:
+ raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
+ return connection
+
+ def _boto3_assume_role(self, credentials, region):
+ """
+ Assume an IAM role passed by iam_role_arn parameter
+
+ :return: a dict containing the credentials of the assumed role
+ """
+
+ iam_role_arn = self.iam_role_arn
+
+ try:
+ sts_connection = boto3.session.Session(profile_name=self.boto_profile).client('sts', region, **credentials)
+ sts_session = sts_connection.assume_role(RoleArn=iam_role_arn, RoleSessionName='ansible_aws_ec2_dynamic_inventory')
+ return dict(
+ aws_access_key_id=sts_session['Credentials']['AccessKeyId'],
+ aws_secret_access_key=sts_session['Credentials']['SecretAccessKey'],
+ aws_session_token=sts_session['Credentials']['SessionToken']
+ )
+ except botocore.exceptions.ClientError as e:
+ raise AnsibleError("Unable to assume IAM role: %s" % to_native(e))
+
+ def _boto3_conn(self, regions):
+ '''
+ :param regions: A list of regions to create a boto3 client
+
+ Generator that yields a boto3 client and the region
+ '''
+
+ credentials = self._get_credentials()
+ iam_role_arn = self.iam_role_arn
+
+ if not regions:
+ try:
+ # as per https://boto3.amazonaws.com/v1/documentation/api/latest/guide/ec2-example-regions-avail-zones.html
+ client = self._get_connection(credentials)
+ resp = client.describe_regions()
+ regions = [x['RegionName'] for x in resp.get('Regions', [])]
+ except botocore.exceptions.NoRegionError:
+ # above seems to fail depending on boto3 version, ignore and lets try something else
+ pass
+
+ # fallback to local list hardcoded in boto3 if still no regions
+ if not regions:
+ session = boto3.Session()
+ regions = session.get_available_regions('ec2')
+
+ # I give up, now you MUST give me regions
+ if not regions:
+ raise AnsibleError('Unable to get regions list from available methods, you must specify the "regions" option to continue.')
+
+ for region in regions:
+ connection = self._get_connection(credentials, region)
+ try:
+ if iam_role_arn is not None:
+ assumed_credentials = self._boto3_assume_role(credentials, region)
+ else:
+ assumed_credentials = credentials
+ connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region, **assumed_credentials)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ if self.boto_profile:
+ try:
+ connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
+ else:
+ raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
+ yield connection, region
+
+ def _get_instances_by_region(self, regions, filters, strict_permissions):
+ '''
+ :param regions: a list of regions in which to describe instances
+ :param filters: a list of boto3 filter dictionaries
+ :param strict_permissions: a boolean determining whether to fail or ignore 403 error codes
+ :return A list of instance dictionaries
+ '''
+ all_instances = []
+
+ for connection, region in self._boto3_conn(regions):
+ try:
+ # By default find non-terminated/terminating instances
+ if not any([f['Name'] == 'instance-state-name' for f in filters]):
+ filters.append({'Name': 'instance-state-name', 'Values': ['running', 'pending', 'stopping', 'stopped']})
+ paginator = connection.get_paginator('describe_instances')
+ reservations = paginator.paginate(Filters=filters).build_full_result().get('Reservations')
+ instances = []
+ for r in reservations:
+ new_instances = r['Instances']
+ for instance in new_instances:
+ instance.update(self._get_reservation_details(r))
+ if self.get_option('include_extra_api_calls'):
+ instance.update(self._get_event_set_and_persistence(connection, instance['InstanceId'], instance.get('SpotInstanceRequestId')))
+ instances.extend(new_instances)
+ except botocore.exceptions.ClientError as e:
+ if e.response['ResponseMetadata']['HTTPStatusCode'] == 403 and not strict_permissions:
+ instances = []
+ else:
+ raise AnsibleError("Failed to describe instances: %s" % to_native(e))
+ except botocore.exceptions.BotoCoreError as e:
+ raise AnsibleError("Failed to describe instances: %s" % to_native(e))
+
+ all_instances.extend(instances)
+
+ return sorted(all_instances, key=lambda x: x['InstanceId'])
+
+ def _get_reservation_details(self, reservation):
+ return {
+ 'OwnerId': reservation['OwnerId'],
+ 'RequesterId': reservation.get('RequesterId', ''),
+ 'ReservationId': reservation['ReservationId']
+ }
+
+ def _get_event_set_and_persistence(self, connection, instance_id, spot_instance):
+ host_vars = {'Events': '', 'Persistent': False}
+ try:
+ kwargs = {'InstanceIds': [instance_id]}
+ host_vars['Events'] = connection.describe_instance_status(**kwargs)['InstanceStatuses'][0].get('Events', '')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ if not self.get_option('strict_permissions'):
+ pass
+ else:
+ raise AnsibleError("Failed to describe instance status: %s" % to_native(e))
+ if spot_instance:
+ try:
+ kwargs = {'SpotInstanceRequestIds': [spot_instance]}
+ host_vars['Persistent'] = bool(
+ connection.describe_spot_instance_requests(**kwargs)['SpotInstanceRequests'][0].get('Type') == 'persistent'
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ if not self.get_option('strict_permissions'):
+ pass
+ else:
+ raise AnsibleError("Failed to describe spot instance requests: %s" % to_native(e))
+ return host_vars
+
+ def _get_tag_hostname(self, preference, instance):
+ tag_hostnames = preference.split('tag:', 1)[1]
+ if ',' in tag_hostnames:
+ tag_hostnames = tag_hostnames.split(',')
+ else:
+ tag_hostnames = [tag_hostnames]
+ tags = boto3_tag_list_to_ansible_dict(instance.get('Tags', []))
+ for v in tag_hostnames:
+ if '=' in v:
+ tag_name, tag_value = v.split('=')
+ if tags.get(tag_name) == tag_value:
+ return to_text(tag_name) + "_" + to_text(tag_value)
+ else:
+ tag_value = tags.get(v)
+ if tag_value:
+ return to_text(tag_value)
+ return None
+
+ def _get_hostname(self, instance, hostnames):
+ '''
+ :param instance: an instance dict returned by boto3 ec2 describe_instances()
+ :param hostnames: a list of hostname destination variables in order of preference
+ :return the preferred identifer for the host
+ '''
+ if not hostnames:
+ hostnames = ['dns-name', 'private-dns-name']
+
+ hostname = None
+ for preference in hostnames:
+ if 'tag' in preference:
+ if not preference.startswith('tag:'):
+ raise AnsibleError("To name a host by tags name_value, use 'tag:name=value'.")
+ hostname = self._get_tag_hostname(preference, instance)
+ else:
+ hostname = self._get_boto_attr_chain(preference, instance)
+ if hostname:
+ break
+ if hostname:
+ if ':' in to_text(hostname):
+ return self._sanitize_group_name((to_text(hostname)))
+ else:
+ return to_text(hostname)
+
+ def _query(self, regions, filters, strict_permissions):
+ '''
+ :param regions: a list of regions to query
+ :param filters: a list of boto3 filter dictionaries
+ :param hostnames: a list of hostname destination variables in order of preference
+ :param strict_permissions: a boolean determining whether to fail or ignore 403 error codes
+ '''
+ return {'aws_ec2': self._get_instances_by_region(regions, filters, strict_permissions)}
+
+ def _populate(self, groups, hostnames):
+ for group in groups:
+ group = self.inventory.add_group(group)
+ self._add_hosts(hosts=groups[group], group=group, hostnames=hostnames)
+ self.inventory.add_child('all', group)
+
+ def _add_hosts(self, hosts, group, hostnames):
+ '''
+ :param hosts: a list of hosts to be added to a group
+ :param group: the name of the group to which the hosts belong
+ :param hostnames: a list of hostname destination variables in order of preference
+ '''
+ for host in hosts:
+ hostname = self._get_hostname(host, hostnames)
+
+ host = camel_dict_to_snake_dict(host, ignore_list=['Tags'])
+ host['tags'] = boto3_tag_list_to_ansible_dict(host.get('tags', []))
+
+ # Allow easier grouping by region
+ host['placement']['region'] = host['placement']['availability_zone'][:-1]
+
+ if not hostname:
+ continue
+ self.inventory.add_host(hostname, group=group)
+ for hostvar, hostval in host.items():
+ self.inventory.set_variable(hostname, hostvar, hostval)
+
+ # Use constructed if applicable
+
+ strict = self.get_option('strict')
+
+ # Composed variables
+ self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict)
+
+ # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
+ self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict)
+
+ # Create groups based on variable values and add the corresponding hosts to it
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict)
+
+ def _set_credentials(self):
+ '''
+ :param config_data: contents of the inventory config file
+ '''
+
+ self.boto_profile = self.get_option('aws_profile')
+ self.aws_access_key_id = self.get_option('aws_access_key')
+ self.aws_secret_access_key = self.get_option('aws_secret_key')
+ self.aws_security_token = self.get_option('aws_security_token')
+ self.iam_role_arn = self.get_option('iam_role_arn')
+
+ if not self.boto_profile and not (self.aws_access_key_id and self.aws_secret_access_key):
+ session = botocore.session.get_session()
+ try:
+ credentials = session.get_credentials().get_frozen_credentials()
+ except AttributeError:
+ pass
+ else:
+ self.aws_access_key_id = credentials.access_key
+ self.aws_secret_access_key = credentials.secret_key
+ self.aws_security_token = credentials.token
+
+ if not self.boto_profile and not (self.aws_access_key_id and self.aws_secret_access_key):
+ raise AnsibleError("Insufficient boto credentials found. Please provide them in your "
+ "inventory configuration file or set them as environment variables.")
+
+ def verify_file(self, path):
+ '''
+ :param loader: an ansible.parsing.dataloader.DataLoader object
+ :param path: the path to the inventory config file
+ :return the contents of the config file
+ '''
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('aws_ec2.yml', 'aws_ec2.yaml')):
+ return True
+ display.debug("aws_ec2 inventory filename must end with 'aws_ec2.yml' or 'aws_ec2.yaml'")
+ return False
+
+ def parse(self, inventory, loader, path, cache=True):
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ self._read_config_data(path)
+
+ if self.get_option('use_contrib_script_compatible_sanitization'):
+ self._sanitize_group_name = self._legacy_script_compatible_group_sanitization
+
+ self._set_credentials()
+
+ # get user specifications
+ regions = self.get_option('regions')
+ filters = ansible_dict_to_boto3_filter_list(self.get_option('filters'))
+ hostnames = self.get_option('hostnames')
+ strict_permissions = self.get_option('strict_permissions')
+
+ cache_key = self.get_cache_key(path)
+ # false when refresh_cache or --flush-cache is used
+ if cache:
+ # get the user-specified directive
+ cache = self.get_option('cache')
+
+ # Generate inventory
+ cache_needs_update = False
+ if cache:
+ try:
+ results = self._cache[cache_key]
+ except KeyError:
+ # if cache expires or cache file doesn't exist
+ cache_needs_update = True
+
+ if not cache or cache_needs_update:
+ results = self._query(regions, filters, strict_permissions)
+
+ self._populate(results, hostnames)
+
+ # If the cache has expired/doesn't exist or if refresh_inventory/flush cache is used
+ # when the user is using caching, update the cached inventory
+ if cache_needs_update or (not cache and self.get_option('cache')):
+ self._cache[cache_key] = results
+
+ @staticmethod
+ def _legacy_script_compatible_group_sanitization(name):
+
+ # note that while this mirrors what the script used to do, it has many issues with unicode and usability in python
+ regex = re.compile(r"[^A-Za-z0-9\_\-]")
+
+ return regex.sub('_', name)
+
+
+def ansible_dict_to_boto3_filter_list(filters_dict):
+
+ """ Convert an Ansible dict of filters to list of dicts that boto3 can use
+ Args:
+ filters_dict (dict): Dict of AWS filters.
+ Basic Usage:
+ >>> filters = {'some-aws-id': 'i-01234567'}
+ >>> ansible_dict_to_boto3_filter_list(filters)
+ {
+ 'some-aws-id': 'i-01234567'
+ }
+ Returns:
+ List: List of AWS filters and their values
+ [
+ {
+ 'Name': 'some-aws-id',
+ 'Values': [
+ 'i-01234567',
+ ]
+ }
+ ]
+ """
+
+ filters_list = []
+ for k, v in filters_dict.items():
+ filter_dict = {'Name': k}
+ if isinstance(v, string_types):
+ filter_dict['Values'] = [v]
+ else:
+ filter_dict['Values'] = v
+
+ filters_list.append(filter_dict)
+
+ return filters_list
+
+
+def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_key_name=None):
+
+ """ Convert a boto3 list of resource tags to a flat dict of key:value pairs
+ Args:
+ tags_list (list): List of dicts representing AWS tags.
+ tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
+ tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
+ Basic Usage:
+ >>> tags_list = [{'Key': 'MyTagKey', 'Value': 'MyTagValue'}]
+ >>> boto3_tag_list_to_ansible_dict(tags_list)
+ [
+ {
+ 'Key': 'MyTagKey',
+ 'Value': 'MyTagValue'
+ }
+ ]
+ Returns:
+ Dict: Dict of key:value pairs representing AWS tags
+ {
+ 'MyTagKey': 'MyTagValue',
+ }
+ """
+
+ if tag_name_key_name and tag_value_key_name:
+ tag_candidates = {tag_name_key_name: tag_value_key_name}
+ else:
+ tag_candidates = {'key': 'value', 'Key': 'Value'}
+
+ if not tags_list:
+ return {}
+ for k, v in tag_candidates.items():
+ if k in tags_list[0] and v in tags_list[0]:
+ return dict((tag[k], tag[v]) for tag in tags_list)
+ raise ValueError("Couldn't find tag key (candidates %s) in tag list %s" % (str(tag_candidates), str(tags_list)))
diff --git a/test/support/integration/plugins/inventory/docker_swarm.py b/test/support/integration/plugins/inventory/docker_swarm.py
new file mode 100644
index 00000000..d0a95ca0
--- /dev/null
+++ b/test/support/integration/plugins/inventory/docker_swarm.py
@@ -0,0 +1,351 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Stefan Heitmueller <stefan.heitmueller@gmx.com>
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: docker_swarm
+ plugin_type: inventory
+ version_added: '2.8'
+ author:
+ - Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
+ short_description: Ansible dynamic inventory plugin for Docker swarm nodes.
+ requirements:
+ - python >= 2.7
+ - L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0
+ extends_documentation_fragment:
+ - constructed
+ description:
+ - Reads inventories from the Docker swarm API.
+ - Uses a YAML configuration file docker_swarm.[yml|yaml].
+ - "The plugin returns following groups of swarm nodes: I(all) - all hosts; I(workers) - all worker nodes;
+ I(managers) - all manager nodes; I(leader) - the swarm leader node;
+ I(nonleaders) - all nodes except the swarm leader."
+ options:
+ plugin:
+ description: The name of this plugin, it should always be set to C(docker_swarm) for this plugin to
+ recognize it as it's own.
+ type: str
+ required: true
+ choices: docker_swarm
+ docker_host:
+ description:
+ - Socket of a Docker swarm manager node (C(tcp), C(unix)).
+ - "Use C(unix://var/run/docker.sock) to connect via local socket."
+ type: str
+ required: true
+ aliases: [ docker_url ]
+ verbose_output:
+ description: Toggle to (not) include all available nodes metadata (e.g. C(Platform), C(Architecture), C(OS),
+ C(EngineVersion))
+ type: bool
+ default: yes
+ tls:
+ description: Connect using TLS without verifying the authenticity of the Docker host server.
+ type: bool
+ default: no
+ validate_certs:
+ description: Toggle if connecting using TLS with or without verifying the authenticity of the Docker
+ host server.
+ type: bool
+ default: no
+ aliases: [ tls_verify ]
+ client_key:
+ description: Path to the client's TLS key file.
+ type: path
+ aliases: [ tls_client_key, key_path ]
+ ca_cert:
+ description: Use a CA certificate when performing server verification by providing the path to a CA
+ certificate file.
+ type: path
+ aliases: [ tls_ca_cert, cacert_path ]
+ client_cert:
+ description: Path to the client's TLS certificate file.
+ type: path
+ aliases: [ tls_client_cert, cert_path ]
+ tls_hostname:
+ description: When verifying the authenticity of the Docker host server, provide the expected name of
+ the server.
+ type: str
+ ssl_version:
+ description: Provide a valid SSL version number. Default value determined by ssl.py module.
+ type: str
+ api_version:
+ description:
+ - The version of the Docker API running on the Docker Host.
+ - Defaults to the latest version of the API supported by docker-py.
+ type: str
+ aliases: [ docker_api_version ]
+ timeout:
+ description:
+ - The maximum amount of time in seconds to wait on a response from the API.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT)
+ will be used instead. If the environment variable is not set, the default value will be used.
+ type: int
+ default: 60
+ aliases: [ time_out ]
+ include_host_uri:
+ description: Toggle to return the additional attribute C(ansible_host_uri) which contains the URI of the
+ swarm leader in format of C(tcp://172.16.0.1:2376). This value may be used without additional
+ modification as value of option I(docker_host) in Docker Swarm modules when connecting via API.
+ The port always defaults to C(2376).
+ type: bool
+ default: no
+ include_host_uri_port:
+ description: Override the detected port number included in I(ansible_host_uri)
+ type: int
+'''
+
+EXAMPLES = '''
+# Minimal example using local docker
+plugin: docker_swarm
+docker_host: unix://var/run/docker.sock
+
+# Minimal example using remote docker
+plugin: docker_swarm
+docker_host: tcp://my-docker-host:2375
+
+# Example using remote docker with unverified TLS
+plugin: docker_swarm
+docker_host: tcp://my-docker-host:2376
+tls: yes
+
+# Example using remote docker with verified TLS and client certificate verification
+plugin: docker_swarm
+docker_host: tcp://my-docker-host:2376
+validate_certs: yes
+ca_cert: /somewhere/ca.pem
+client_key: /somewhere/key.pem
+client_cert: /somewhere/cert.pem
+
+# Example using constructed features to create groups and set ansible_host
+plugin: docker_swarm
+docker_host: tcp://my-docker-host:2375
+strict: False
+keyed_groups:
+ # add e.g. x86_64 hosts to an arch_x86_64 group
+ - prefix: arch
+ key: 'Description.Platform.Architecture'
+ # add e.g. linux hosts to an os_linux group
+ - prefix: os
+ key: 'Description.Platform.OS'
+ # create a group per node label
+ # e.g. a node labeled w/ "production" ends up in group "label_production"
+ # hint: labels containing special characters will be converted to safe names
+ - key: 'Spec.Labels'
+ prefix: label
+'''
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
+from ansible.parsing.utils.addresses import parse_address
+
+try:
+ import docker
+ from docker.errors import TLSParameterError
+ from docker.tls import TLSConfig
+ HAS_DOCKER = True
+except ImportError:
+ HAS_DOCKER = False
+
+
+def update_tls_hostname(result):
+ if result['tls_hostname'] is None:
+ # get default machine name from the url
+ parsed_url = urlparse(result['docker_host'])
+ if ':' in parsed_url.netloc:
+ result['tls_hostname'] = parsed_url.netloc[:parsed_url.netloc.rindex(':')]
+ else:
+ result['tls_hostname'] = parsed_url
+
+
+def _get_tls_config(fail_function, **kwargs):
+ try:
+ tls_config = TLSConfig(**kwargs)
+ return tls_config
+ except TLSParameterError as exc:
+ fail_function("TLS config error: %s" % exc)
+
+
+def get_connect_params(auth, fail_function):
+ if auth['tls'] or auth['tls_verify']:
+ auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://')
+
+ if auth['tls_verify'] and auth['cert_path'] and auth['key_path']:
+ # TLS with certs and host verification
+ if auth['cacert_path']:
+ tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
+ ca_cert=auth['cacert_path'],
+ verify=True,
+ assert_hostname=auth['tls_hostname'],
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ else:
+ tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
+ verify=True,
+ assert_hostname=auth['tls_hostname'],
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls_verify'] and auth['cacert_path']:
+ # TLS with cacert only
+ tls_config = _get_tls_config(ca_cert=auth['cacert_path'],
+ assert_hostname=auth['tls_hostname'],
+ verify=True,
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls_verify']:
+ # TLS with verify and no certs
+ tls_config = _get_tls_config(verify=True,
+ assert_hostname=auth['tls_hostname'],
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls'] and auth['cert_path'] and auth['key_path']:
+ # TLS with certs and no host verification
+ tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
+ verify=False,
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls']:
+ # TLS with no certs and not host verification
+ tls_config = _get_tls_config(verify=False,
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ # No TLS
+ return dict(base_url=auth['docker_host'],
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable):
+ ''' Host inventory parser for ansible using Docker swarm as source. '''
+
+ NAME = 'docker_swarm'
+
+ def _fail(self, msg):
+ raise AnsibleError(msg)
+
+ def _populate(self):
+ raw_params = dict(
+ docker_host=self.get_option('docker_host'),
+ tls=self.get_option('tls'),
+ tls_verify=self.get_option('validate_certs'),
+ key_path=self.get_option('client_key'),
+ cacert_path=self.get_option('ca_cert'),
+ cert_path=self.get_option('client_cert'),
+ tls_hostname=self.get_option('tls_hostname'),
+ api_version=self.get_option('api_version'),
+ timeout=self.get_option('timeout'),
+ ssl_version=self.get_option('ssl_version'),
+ debug=None,
+ )
+ update_tls_hostname(raw_params)
+ connect_params = get_connect_params(raw_params, fail_function=self._fail)
+ self.client = docker.DockerClient(**connect_params)
+ self.inventory.add_group('all')
+ self.inventory.add_group('manager')
+ self.inventory.add_group('worker')
+ self.inventory.add_group('leader')
+ self.inventory.add_group('nonleaders')
+
+ if self.get_option('include_host_uri'):
+ if self.get_option('include_host_uri_port'):
+ host_uri_port = str(self.get_option('include_host_uri_port'))
+ elif self.get_option('tls') or self.get_option('validate_certs'):
+ host_uri_port = '2376'
+ else:
+ host_uri_port = '2375'
+
+ try:
+ self.nodes = self.client.nodes.list()
+ for self.node in self.nodes:
+ self.node_attrs = self.client.nodes.get(self.node.id).attrs
+ self.inventory.add_host(self.node_attrs['ID'])
+ self.inventory.add_host(self.node_attrs['ID'], group=self.node_attrs['Spec']['Role'])
+ self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host',
+ self.node_attrs['Status']['Addr'])
+ if self.get_option('include_host_uri'):
+ self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
+ 'tcp://' + self.node_attrs['Status']['Addr'] + ':' + host_uri_port)
+ if self.get_option('verbose_output'):
+ self.inventory.set_variable(self.node_attrs['ID'], 'docker_swarm_node_attributes', self.node_attrs)
+ if 'ManagerStatus' in self.node_attrs:
+ if self.node_attrs['ManagerStatus'].get('Leader'):
+ # This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
+ # Check moby/moby#35437 for details
+ swarm_leader_ip = parse_address(self.node_attrs['ManagerStatus']['Addr'])[0] or \
+ self.node_attrs['Status']['Addr']
+ if self.get_option('include_host_uri'):
+ self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
+ 'tcp://' + swarm_leader_ip + ':' + host_uri_port)
+ self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host', swarm_leader_ip)
+ self.inventory.add_host(self.node_attrs['ID'], group='leader')
+ else:
+ self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
+ else:
+ self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
+ # Use constructed if applicable
+ strict = self.get_option('strict')
+ # Composed variables
+ self._set_composite_vars(self.get_option('compose'),
+ self.node_attrs,
+ self.node_attrs['ID'],
+ strict=strict)
+ # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
+ self._add_host_to_composed_groups(self.get_option('groups'),
+ self.node_attrs,
+ self.node_attrs['ID'],
+ strict=strict)
+ # Create groups based on variable values and add the corresponding hosts to it
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'),
+ self.node_attrs,
+ self.node_attrs['ID'],
+ strict=strict)
+ except Exception as e:
+ raise AnsibleError('Unable to fetch hosts from Docker swarm API, this was the original exception: %s' %
+ to_native(e))
+
+ def verify_file(self, path):
+ """Return the possibly of a file being consumable by this plugin."""
+ return (
+ super(InventoryModule, self).verify_file(path) and
+ path.endswith((self.NAME + '.yaml', self.NAME + '.yml')))
+
+ def parse(self, inventory, loader, path, cache=True):
+ if not HAS_DOCKER:
+ raise AnsibleError('The Docker swarm dynamic inventory plugin requires the Docker SDK for Python: '
+ 'https://github.com/docker/docker-py.')
+ super(InventoryModule, self).parse(inventory, loader, path, cache)
+ self._read_config_data(path)
+ self._populate()
diff --git a/test/support/integration/plugins/inventory/foreman.py b/test/support/integration/plugins/inventory/foreman.py
new file mode 100644
index 00000000..43073f81
--- /dev/null
+++ b/test/support/integration/plugins/inventory/foreman.py
@@ -0,0 +1,295 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Guido Günther <agx@sigxcpu.org>, Daniel Lobato Garcia <dlobatog@redhat.com>
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: foreman
+ plugin_type: inventory
+ short_description: foreman inventory source
+ version_added: "2.6"
+ requirements:
+ - requests >= 1.1
+ description:
+ - Get inventory hosts from the foreman service.
+ - "Uses a configuration file as an inventory source, it must end in ``.foreman.yml`` or ``.foreman.yaml`` and has a ``plugin: foreman`` entry."
+ extends_documentation_fragment:
+ - inventory_cache
+ - constructed
+ options:
+ plugin:
+ description: the name of this plugin, it should always be set to 'foreman' for this plugin to recognize it as it's own.
+ required: True
+ choices: ['foreman']
+ url:
+ description: url to foreman
+ default: 'http://localhost:3000'
+ env:
+ - name: FOREMAN_SERVER
+ version_added: "2.8"
+ user:
+ description: foreman authentication user
+ required: True
+ env:
+ - name: FOREMAN_USER
+ version_added: "2.8"
+ password:
+ description: foreman authentication password
+ required: True
+ env:
+ - name: FOREMAN_PASSWORD
+ version_added: "2.8"
+ validate_certs:
+ description: verify SSL certificate if using https
+ type: boolean
+ default: False
+ group_prefix:
+ description: prefix to apply to foreman groups
+ default: foreman_
+ vars_prefix:
+ description: prefix to apply to host variables, does not include facts nor params
+ default: foreman_
+ want_facts:
+ description: Toggle, if True the plugin will retrieve host facts from the server
+ type: boolean
+ default: False
+ want_params:
+ description: Toggle, if true the inventory will retrieve 'all_parameters' information as host vars
+ type: boolean
+ default: False
+ want_hostcollections:
+ description: Toggle, if true the plugin will create Ansible groups for host collections
+ type: boolean
+ default: False
+ version_added: '2.10'
+ want_ansible_ssh_host:
+ description: Toggle, if true the plugin will populate the ansible_ssh_host variable to explicitly specify the connection target
+ type: boolean
+ default: False
+ version_added: '2.10'
+
+'''
+
+EXAMPLES = '''
+# my.foreman.yml
+plugin: foreman
+url: http://localhost:2222
+user: ansible-tester
+password: secure
+validate_certs: False
+'''
+
+from distutils.version import LooseVersion
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.common._collections_compat import MutableMapping
+from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, to_safe_group_name, Constructable
+
+# 3rd party imports
+try:
+ import requests
+ if LooseVersion(requests.__version__) < LooseVersion('1.1.0'):
+ raise ImportError
+except ImportError:
+ raise AnsibleError('This script requires python-requests 1.1 as a minimum version')
+
+from requests.auth import HTTPBasicAuth
+
+
+class InventoryModule(BaseInventoryPlugin, Cacheable, Constructable):
+ ''' Host inventory parser for ansible using foreman as source. '''
+
+ NAME = 'foreman'
+
+ def __init__(self):
+
+ super(InventoryModule, self).__init__()
+
+ # from config
+ self.foreman_url = None
+
+ self.session = None
+ self.cache_key = None
+ self.use_cache = None
+
+ def verify_file(self, path):
+
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('foreman.yaml', 'foreman.yml')):
+ valid = True
+ else:
+ self.display.vvv('Skipping due to inventory source not ending in "foreman.yaml" nor "foreman.yml"')
+ return valid
+
+ def _get_session(self):
+ if not self.session:
+ self.session = requests.session()
+ self.session.auth = HTTPBasicAuth(self.get_option('user'), to_bytes(self.get_option('password')))
+ self.session.verify = self.get_option('validate_certs')
+ return self.session
+
+ def _get_json(self, url, ignore_errors=None):
+
+ if not self.use_cache or url not in self._cache.get(self.cache_key, {}):
+
+ if self.cache_key not in self._cache:
+ self._cache[self.cache_key] = {url: ''}
+
+ results = []
+ s = self._get_session()
+ params = {'page': 1, 'per_page': 250}
+ while True:
+ ret = s.get(url, params=params)
+ if ignore_errors and ret.status_code in ignore_errors:
+ break
+ ret.raise_for_status()
+ json = ret.json()
+
+ # process results
+ # FIXME: This assumes 'return type' matches a specific query,
+ # it will break if we expand the queries and they dont have different types
+ if 'results' not in json:
+ # /hosts/:id dos not have a 'results' key
+ results = json
+ break
+ elif isinstance(json['results'], MutableMapping):
+ # /facts are returned as dict in 'results'
+ results = json['results']
+ break
+ else:
+ # /hosts 's 'results' is a list of all hosts, returned is paginated
+ results = results + json['results']
+
+ # check for end of paging
+ if len(results) >= json['subtotal']:
+ break
+ if len(json['results']) == 0:
+ self.display.warning("Did not make any progress during loop. expected %d got %d" % (json['subtotal'], len(results)))
+ break
+
+ # get next page
+ params['page'] += 1
+
+ self._cache[self.cache_key][url] = results
+
+ return self._cache[self.cache_key][url]
+
+ def _get_hosts(self):
+ return self._get_json("%s/api/v2/hosts" % self.foreman_url)
+
+ def _get_all_params_by_id(self, hid):
+ url = "%s/api/v2/hosts/%s" % (self.foreman_url, hid)
+ ret = self._get_json(url, [404])
+ if not ret or not isinstance(ret, MutableMapping) or not ret.get('all_parameters', False):
+ return {}
+ return ret.get('all_parameters')
+
+ def _get_facts_by_id(self, hid):
+ url = "%s/api/v2/hosts/%s/facts" % (self.foreman_url, hid)
+ return self._get_json(url)
+
+ def _get_host_data_by_id(self, hid):
+ url = "%s/api/v2/hosts/%s" % (self.foreman_url, hid)
+ return self._get_json(url)
+
+ def _get_facts(self, host):
+ """Fetch all host facts of the host"""
+
+ ret = self._get_facts_by_id(host['id'])
+ if len(ret.values()) == 0:
+ facts = {}
+ elif len(ret.values()) == 1:
+ facts = list(ret.values())[0]
+ else:
+ raise ValueError("More than one set of facts returned for '%s'" % host)
+ return facts
+
+ def _populate(self):
+
+ for host in self._get_hosts():
+
+ if host.get('name'):
+ host_name = self.inventory.add_host(host['name'])
+
+ # create directly mapped groups
+ group_name = host.get('hostgroup_title', host.get('hostgroup_name'))
+ if group_name:
+ group_name = to_safe_group_name('%s%s' % (self.get_option('group_prefix'), group_name.lower().replace(" ", "")))
+ group_name = self.inventory.add_group(group_name)
+ self.inventory.add_child(group_name, host_name)
+
+ # set host vars from host info
+ try:
+ for k, v in host.items():
+ if k not in ('name', 'hostgroup_title', 'hostgroup_name'):
+ try:
+ self.inventory.set_variable(host_name, self.get_option('vars_prefix') + k, v)
+ except ValueError as e:
+ self.display.warning("Could not set host info hostvar for %s, skipping %s: %s" % (host, k, to_text(e)))
+ except ValueError as e:
+ self.display.warning("Could not get host info for %s, skipping: %s" % (host_name, to_text(e)))
+
+ # set host vars from params
+ if self.get_option('want_params'):
+ for p in self._get_all_params_by_id(host['id']):
+ try:
+ self.inventory.set_variable(host_name, p['name'], p['value'])
+ except ValueError as e:
+ self.display.warning("Could not set hostvar %s to '%s' for the '%s' host, skipping: %s" %
+ (p['name'], to_native(p['value']), host, to_native(e)))
+
+ # set host vars from facts
+ if self.get_option('want_facts'):
+ self.inventory.set_variable(host_name, 'foreman_facts', self._get_facts(host))
+
+ # create group for host collections
+ if self.get_option('want_hostcollections'):
+ host_data = self._get_host_data_by_id(host['id'])
+ hostcollections = host_data.get('host_collections')
+ if hostcollections:
+ # Create Ansible groups for host collections
+ for hostcollection in hostcollections:
+ try:
+ hostcollection_group = to_safe_group_name('%shostcollection_%s' % (self.get_option('group_prefix'),
+ hostcollection['name'].lower().replace(" ", "")))
+ hostcollection_group = self.inventory.add_group(hostcollection_group)
+ self.inventory.add_child(hostcollection_group, host_name)
+ except ValueError as e:
+ self.display.warning("Could not create groups for host collections for %s, skipping: %s" % (host_name, to_text(e)))
+
+ # put ansible_ssh_host as hostvar
+ if self.get_option('want_ansible_ssh_host'):
+ for key in ('ip', 'ipv4', 'ipv6'):
+ if host.get(key):
+ try:
+ self.inventory.set_variable(host_name, 'ansible_ssh_host', host[key])
+ break
+ except ValueError as e:
+ self.display.warning("Could not set hostvar ansible_ssh_host to '%s' for the '%s' host, skipping: %s" %
+ (host[key], host_name, to_text(e)))
+
+ strict = self.get_option('strict')
+
+ hostvars = self.inventory.get_host(host_name).get_vars()
+ self._set_composite_vars(self.get_option('compose'), hostvars, host_name, strict)
+ self._add_host_to_composed_groups(self.get_option('groups'), hostvars, host_name, strict)
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), hostvars, host_name, strict)
+
+ def parse(self, inventory, loader, path, cache=True):
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ # read config from file, this sets 'options'
+ self._read_config_data(path)
+
+ # get connection host
+ self.foreman_url = self.get_option('url')
+ self.cache_key = self.get_cache_key(path)
+ self.use_cache = cache and self.get_option('cache')
+
+ # actually populate inventory
+ self._populate()
diff --git a/test/support/integration/plugins/lookup/rabbitmq.py b/test/support/integration/plugins/lookup/rabbitmq.py
new file mode 100644
index 00000000..7c2745f4
--- /dev/null
+++ b/test/support/integration/plugins/lookup/rabbitmq.py
@@ -0,0 +1,190 @@
+# (c) 2018, John Imison <john+github@imison.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: rabbitmq
+ author: John Imison <@Im0>
+ version_added: "2.8"
+ short_description: Retrieve messages from an AMQP/AMQPS RabbitMQ queue.
+ description:
+ - This lookup uses a basic get to retrieve all, or a limited number C(count), messages from a RabbitMQ queue.
+ options:
+ url:
+ description:
+ - An URI connection string to connect to the AMQP/AMQPS RabbitMQ server.
+ - For more information refer to the URI spec U(https://www.rabbitmq.com/uri-spec.html).
+ required: True
+ queue:
+ description:
+ - The queue to get messages from.
+ required: True
+ count:
+ description:
+ - How many messages to collect from the queue.
+ - If not set, defaults to retrieving all the messages from the queue.
+ requirements:
+ - The python pika package U(https://pypi.org/project/pika/).
+ notes:
+ - This lookup implements BlockingChannel.basic_get to get messages from a RabbitMQ server.
+ - After retrieving a message from the server, receipt of the message is acknowledged and the message on the server is deleted.
+ - Pika is a pure-Python implementation of the AMQP 0-9-1 protocol that tries to stay fairly independent of the underlying network support library.
+ - More information about pika can be found at U(https://pika.readthedocs.io/en/stable/).
+ - This plugin is tested against RabbitMQ. Other AMQP 0.9.1 protocol based servers may work but not tested/guaranteed.
+ - Assigning the return messages to a variable under C(vars) may result in unexpected results as the lookup is evaluated every time the
+ variable is referenced.
+ - Currently this plugin only handles text based messages from a queue. Unexpected results may occur when retrieving binary data.
+"""
+
+
+EXAMPLES = """
+- name: Get all messages off a queue
+ debug:
+ msg: "{{ lookup('rabbitmq', url='amqp://guest:guest@192.168.0.10:5672/%2F', queue='hello') }}"
+
+
+# If you are intending on using the returned messages as a variable in more than
+# one task (eg. debug, template), it is recommended to set_fact.
+
+- name: Get 2 messages off a queue and set a fact for re-use
+ set_fact:
+ messages: "{{ lookup('rabbitmq', url='amqp://guest:guest@192.168.0.10:5672/%2F', queue='hello', count=2) }}"
+
+- name: Dump out contents of the messages
+ debug:
+ var: messages
+
+"""
+
+RETURN = """
+ _list:
+ description:
+ - A list of dictionaries with keys and value from the queue.
+ type: list
+ contains:
+ content_type:
+ description: The content_type on the message in the queue.
+ type: str
+ delivery_mode:
+ description: The delivery_mode on the message in the queue.
+ type: str
+ delivery_tag:
+ description: The delivery_tag on the message in the queue.
+ type: str
+ exchange:
+ description: The exchange the message came from.
+ type: str
+ message_count:
+ description: The message_count for the message on the queue.
+ type: str
+ msg:
+ description: The content of the message.
+ type: str
+ redelivered:
+ description: The redelivered flag. True if the message has been delivered before.
+ type: bool
+ routing_key:
+ description: The routing_key on the message in the queue.
+ type: str
+ headers:
+ description: The headers for the message returned from the queue.
+ type: dict
+ json:
+ description: If application/json is specified in content_type, json will be loaded into variables.
+ type: dict
+
+"""
+
+import json
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils._text import to_native, to_text
+from ansible.utils.display import Display
+
+try:
+ import pika
+ from pika import spec
+ HAS_PIKA = True
+except ImportError:
+ HAS_PIKA = False
+
+display = Display()
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, url=None, queue=None, count=None):
+ if not HAS_PIKA:
+ raise AnsibleError('pika python package is required for rabbitmq lookup.')
+ if not url:
+ raise AnsibleError('URL is required for rabbitmq lookup.')
+ if not queue:
+ raise AnsibleError('Queue is required for rabbitmq lookup.')
+
+ display.vvv(u"terms:%s : variables:%s url:%s queue:%s count:%s" % (terms, variables, url, queue, count))
+
+ try:
+ parameters = pika.URLParameters(url)
+ except Exception as e:
+ raise AnsibleError("URL malformed: %s" % to_native(e))
+
+ try:
+ connection = pika.BlockingConnection(parameters)
+ except Exception as e:
+ raise AnsibleError("Connection issue: %s" % to_native(e))
+
+ try:
+ conn_channel = connection.channel()
+ except pika.exceptions.AMQPChannelError as e:
+ try:
+ connection.close()
+ except pika.exceptions.AMQPConnectionError as ie:
+ raise AnsibleError("Channel and connection closing issues: %s / %s" % to_native(e), to_native(ie))
+ raise AnsibleError("Channel issue: %s" % to_native(e))
+
+ ret = []
+ idx = 0
+
+ while True:
+ method_frame, properties, body = conn_channel.basic_get(queue=queue)
+ if method_frame:
+ display.vvv(u"%s, %s, %s " % (method_frame, properties, to_text(body)))
+
+ # TODO: In the future consider checking content_type and handle text/binary data differently.
+ msg_details = dict({
+ 'msg': to_text(body),
+ 'message_count': method_frame.message_count,
+ 'routing_key': method_frame.routing_key,
+ 'delivery_tag': method_frame.delivery_tag,
+ 'redelivered': method_frame.redelivered,
+ 'exchange': method_frame.exchange,
+ 'delivery_mode': properties.delivery_mode,
+ 'content_type': properties.content_type,
+ 'headers': properties.headers
+ })
+ if properties.content_type == 'application/json':
+ try:
+ msg_details['json'] = json.loads(msg_details['msg'])
+ except ValueError as e:
+ raise AnsibleError("Unable to decode JSON for message %s: %s" % (method_frame.delivery_tag, to_native(e)))
+
+ ret.append(msg_details)
+ conn_channel.basic_ack(method_frame.delivery_tag)
+ idx += 1
+ if method_frame.message_count == 0 or idx == count:
+ break
+ # If we didn't get a method_frame, exit.
+ else:
+ break
+
+ if connection.is_closed:
+ return [ret]
+ else:
+ try:
+ connection.close()
+ except pika.exceptions.AMQPConnectionError:
+ pass
+ return [ret]
diff --git a/test/support/integration/plugins/module_utils/aws/__init__.py b/test/support/integration/plugins/module_utils/aws/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/aws/__init__.py
diff --git a/test/support/integration/plugins/module_utils/aws/core.py b/test/support/integration/plugins/module_utils/aws/core.py
new file mode 100644
index 00000000..c4527b6d
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/aws/core.py
@@ -0,0 +1,335 @@
+#
+# Copyright 2017 Michael De La Rue | Ansible
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+"""This module adds shared support for generic Amazon AWS modules
+
+**This code is not yet ready for use in user modules. As of 2017**
+**and through to 2018, the interface is likely to change**
+**aggressively as the exact correct interface for ansible AWS modules**
+**is identified. In particular, until this notice goes away or is**
+**changed, methods may disappear from the interface. Please don't**
+**publish modules using this except directly to the main Ansible**
+**development repository.**
+
+In order to use this module, include it as part of a custom
+module as shown below.
+
+ from ansible.module_utils.aws import AnsibleAWSModule
+ module = AnsibleAWSModule(argument_spec=dictionary, supports_check_mode=boolean
+ mutually_exclusive=list1, required_together=list2)
+
+The 'AnsibleAWSModule' module provides similar, but more restricted,
+interfaces to the normal Ansible module. It also includes the
+additional methods for connecting to AWS using the standard module arguments
+
+ m.resource('lambda') # - get an AWS connection as a boto3 resource.
+
+or
+
+ m.client('sts') # - get an AWS connection as a boto3 client.
+
+To make use of AWSRetry easier, it can now be wrapped around any call from a
+module-created client. To add retries to a client, create a client:
+
+ m.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+
+Any calls from that client can be made to use the decorator passed at call-time
+using the `aws_retry` argument. By default, no retries are used.
+
+ ec2 = m.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+ ec2.describe_instances(InstanceIds=['i-123456789'], aws_retry=True)
+
+The call will be retried the specified number of times, so the calling functions
+don't need to be wrapped in the backoff decorator.
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import logging
+import traceback
+from functools import wraps
+from distutils.version import LooseVersion
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ # Python 3
+ from io import StringIO
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+from ansible.module_utils.ec2 import HAS_BOTO3, camel_dict_to_snake_dict, ec2_argument_spec, boto3_conn
+from ansible.module_utils.ec2 import get_aws_connection_info, get_aws_region
+
+# We will also export HAS_BOTO3 so end user modules can use it.
+__all__ = ('AnsibleAWSModule', 'HAS_BOTO3', 'is_boto3_error_code')
+
+
+class AnsibleAWSModule(object):
+ """An ansible module class for AWS modules
+
+ AnsibleAWSModule provides an a class for building modules which
+ connect to Amazon Web Services. The interface is currently more
+ restricted than the basic module class with the aim that later the
+ basic module class can be reduced. If you find that any key
+ feature is missing please contact the author/Ansible AWS team
+ (available on #ansible-aws on IRC) to request the additional
+ features needed.
+ """
+ default_settings = {
+ "default_args": True,
+ "check_boto3": True,
+ "auto_retry": True,
+ "module_class": AnsibleModule
+ }
+
+ def __init__(self, **kwargs):
+ local_settings = {}
+ for key in AnsibleAWSModule.default_settings:
+ try:
+ local_settings[key] = kwargs.pop(key)
+ except KeyError:
+ local_settings[key] = AnsibleAWSModule.default_settings[key]
+ self.settings = local_settings
+
+ if local_settings["default_args"]:
+ # ec2_argument_spec contains the region so we use that; there's a patch coming which
+ # will add it to aws_argument_spec so if that's accepted then later we should change
+ # over
+ argument_spec_full = ec2_argument_spec()
+ try:
+ argument_spec_full.update(kwargs["argument_spec"])
+ except (TypeError, NameError):
+ pass
+ kwargs["argument_spec"] = argument_spec_full
+
+ self._module = AnsibleAWSModule.default_settings["module_class"](**kwargs)
+
+ if local_settings["check_boto3"] and not HAS_BOTO3:
+ self._module.fail_json(
+ msg=missing_required_lib('botocore or boto3'))
+
+ self.check_mode = self._module.check_mode
+ self._diff = self._module._diff
+ self._name = self._module._name
+
+ self._botocore_endpoint_log_stream = StringIO()
+ self.logger = None
+ if self.params.get('debug_botocore_endpoint_logs'):
+ self.logger = logging.getLogger('botocore.endpoint')
+ self.logger.setLevel(logging.DEBUG)
+ self.logger.addHandler(logging.StreamHandler(self._botocore_endpoint_log_stream))
+
+ @property
+ def params(self):
+ return self._module.params
+
+ def _get_resource_action_list(self):
+ actions = []
+ for ln in self._botocore_endpoint_log_stream.getvalue().split('\n'):
+ ln = ln.strip()
+ if not ln:
+ continue
+ found_operational_request = re.search(r"OperationModel\(name=.*?\)", ln)
+ if found_operational_request:
+ operation_request = found_operational_request.group(0)[20:-1]
+ resource = re.search(r"https://.*?\.", ln).group(0)[8:-1]
+ actions.append("{0}:{1}".format(resource, operation_request))
+ return list(set(actions))
+
+ def exit_json(self, *args, **kwargs):
+ if self.params.get('debug_botocore_endpoint_logs'):
+ kwargs['resource_actions'] = self._get_resource_action_list()
+ return self._module.exit_json(*args, **kwargs)
+
+ def fail_json(self, *args, **kwargs):
+ if self.params.get('debug_botocore_endpoint_logs'):
+ kwargs['resource_actions'] = self._get_resource_action_list()
+ return self._module.fail_json(*args, **kwargs)
+
+ def debug(self, *args, **kwargs):
+ return self._module.debug(*args, **kwargs)
+
+ def warn(self, *args, **kwargs):
+ return self._module.warn(*args, **kwargs)
+
+ def deprecate(self, *args, **kwargs):
+ return self._module.deprecate(*args, **kwargs)
+
+ def boolean(self, *args, **kwargs):
+ return self._module.boolean(*args, **kwargs)
+
+ def md5(self, *args, **kwargs):
+ return self._module.md5(*args, **kwargs)
+
+ def client(self, service, retry_decorator=None):
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True)
+ conn = boto3_conn(self, conn_type='client', resource=service,
+ region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ return conn if retry_decorator is None else _RetryingBotoClientWrapper(conn, retry_decorator)
+
+ def resource(self, service):
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True)
+ return boto3_conn(self, conn_type='resource', resource=service,
+ region=region, endpoint=ec2_url, **aws_connect_kwargs)
+
+ @property
+ def region(self, boto3=True):
+ return get_aws_region(self, boto3)
+
+ def fail_json_aws(self, exception, msg=None):
+ """call fail_json with processed exception
+
+ function for converting exceptions thrown by AWS SDK modules,
+ botocore, boto3 and boto, into nice error messages.
+ """
+ last_traceback = traceback.format_exc()
+
+ # to_native is trusted to handle exceptions that str() could
+ # convert to text.
+ try:
+ except_msg = to_native(exception.message)
+ except AttributeError:
+ except_msg = to_native(exception)
+
+ if msg is not None:
+ message = '{0}: {1}'.format(msg, except_msg)
+ else:
+ message = except_msg
+
+ try:
+ response = exception.response
+ except AttributeError:
+ response = None
+
+ failure = dict(
+ msg=message,
+ exception=last_traceback,
+ **self._gather_versions()
+ )
+
+ if response is not None:
+ failure.update(**camel_dict_to_snake_dict(response))
+
+ self.fail_json(**failure)
+
+ def _gather_versions(self):
+ """Gather AWS SDK (boto3 and botocore) dependency versions
+
+ Returns {'boto3_version': str, 'botocore_version': str}
+ Returns {} if neither are installed
+ """
+ if not HAS_BOTO3:
+ return {}
+ import boto3
+ import botocore
+ return dict(boto3_version=boto3.__version__,
+ botocore_version=botocore.__version__)
+
+ def boto3_at_least(self, desired):
+ """Check if the available boto3 version is greater than or equal to a desired version.
+
+ Usage:
+ if module.params.get('assign_ipv6_address') and not module.boto3_at_least('1.4.4'):
+ # conditionally fail on old boto3 versions if a specific feature is not supported
+ module.fail_json(msg="Boto3 can't deal with EC2 IPv6 addresses before version 1.4.4.")
+ """
+ existing = self._gather_versions()
+ return LooseVersion(existing['boto3_version']) >= LooseVersion(desired)
+
+ def botocore_at_least(self, desired):
+ """Check if the available botocore version is greater than or equal to a desired version.
+
+ Usage:
+ if not module.botocore_at_least('1.2.3'):
+ module.fail_json(msg='The Serverless Elastic Load Compute Service is not in botocore before v1.2.3')
+ if not module.botocore_at_least('1.5.3'):
+ module.warn('Botocore did not include waiters for Service X before 1.5.3. '
+ 'To wait until Service X resources are fully available, update botocore.')
+ """
+ existing = self._gather_versions()
+ return LooseVersion(existing['botocore_version']) >= LooseVersion(desired)
+
+
+class _RetryingBotoClientWrapper(object):
+ __never_wait = (
+ 'get_paginator', 'can_paginate',
+ 'get_waiter', 'generate_presigned_url',
+ )
+
+ def __init__(self, client, retry):
+ self.client = client
+ self.retry = retry
+
+ def _create_optional_retry_wrapper_function(self, unwrapped):
+ retrying_wrapper = self.retry(unwrapped)
+
+ @wraps(unwrapped)
+ def deciding_wrapper(aws_retry=False, *args, **kwargs):
+ if aws_retry:
+ return retrying_wrapper(*args, **kwargs)
+ else:
+ return unwrapped(*args, **kwargs)
+ return deciding_wrapper
+
+ def __getattr__(self, name):
+ unwrapped = getattr(self.client, name)
+ if name in self.__never_wait:
+ return unwrapped
+ elif callable(unwrapped):
+ wrapped = self._create_optional_retry_wrapper_function(unwrapped)
+ setattr(self, name, wrapped)
+ return wrapped
+ else:
+ return unwrapped
+
+
+def is_boto3_error_code(code, e=None):
+ """Check if the botocore exception is raised by a specific error code.
+
+ Returns ClientError if the error code matches, a dummy exception if it does not have an error code or does not match
+
+ Example:
+ try:
+ ec2.describe_instances(InstanceIds=['potato'])
+ except is_boto3_error_code('InvalidInstanceID.Malformed'):
+ # handle the error for that code case
+ except botocore.exceptions.ClientError as e:
+ # handle the generic error case for all other codes
+ """
+ from botocore.exceptions import ClientError
+ if e is None:
+ import sys
+ dummy, e, dummy = sys.exc_info()
+ if isinstance(e, ClientError) and e.response['Error']['Code'] == code:
+ return ClientError
+ return type('NeverEverRaisedException', (Exception,), {})
+
+
+def get_boto3_client_method_parameters(client, method_name, required=False):
+ op = client.meta.method_to_api_mapping.get(method_name)
+ input_shape = client._service_model.operation_model(op).input_shape
+ if not input_shape:
+ parameters = []
+ elif required:
+ parameters = list(input_shape.required_members)
+ else:
+ parameters = list(input_shape.members.keys())
+ return parameters
diff --git a/test/support/integration/plugins/module_utils/aws/iam.py b/test/support/integration/plugins/module_utils/aws/iam.py
new file mode 100644
index 00000000..f05999aa
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/aws/iam.py
@@ -0,0 +1,49 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import traceback
+
+try:
+ from botocore.exceptions import ClientError, NoCredentialsError
+except ImportError:
+ pass # caught by HAS_BOTO3
+
+from ansible.module_utils._text import to_native
+
+
+def get_aws_account_id(module):
+ """ Given AnsibleAWSModule instance, get the active AWS account ID
+
+ get_account_id tries too find out the account that we are working
+ on. It's not guaranteed that this will be easy so we try in
+ several different ways. Giving either IAM or STS privilages to
+ the account should be enough to permit this.
+ """
+ account_id = None
+ try:
+ sts_client = module.client('sts')
+ account_id = sts_client.get_caller_identity().get('Account')
+ # non-STS sessions may also get NoCredentialsError from this STS call, so
+ # we must catch that too and try the IAM version
+ except (ClientError, NoCredentialsError):
+ try:
+ iam_client = module.client('iam')
+ account_id = iam_client.get_user()['User']['Arn'].split(':')[4]
+ except ClientError as e:
+ if (e.response['Error']['Code'] == 'AccessDenied'):
+ except_msg = to_native(e)
+ # don't match on `arn:aws` because of China region `arn:aws-cn` and similar
+ account_id = except_msg.search(r"arn:\w+:iam::([0-9]{12,32}):\w+/").group(1)
+ if account_id is None:
+ module.fail_json_aws(e, msg="Could not get AWS account information")
+ except Exception as e:
+ module.fail_json(
+ msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions.",
+ exception=traceback.format_exc()
+ )
+ if not account_id:
+ module.fail_json(msg="Failed while determining AWS account ID. Try allowing sts:GetCallerIdentity or iam:GetUser permissions.")
+ return to_native(account_id)
diff --git a/test/support/integration/plugins/module_utils/aws/s3.py b/test/support/integration/plugins/module_utils/aws/s3.py
new file mode 100644
index 00000000..2185869d
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/aws/s3.py
@@ -0,0 +1,50 @@
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # Handled by the calling module
+
+HAS_MD5 = True
+try:
+ from hashlib import md5
+except ImportError:
+ try:
+ from md5 import md5
+ except ImportError:
+ HAS_MD5 = False
+
+
+def calculate_etag(module, filename, etag, s3, bucket, obj, version=None):
+ if not HAS_MD5:
+ return None
+
+ if '-' in etag:
+ # Multi-part ETag; a hash of the hashes of each part.
+ parts = int(etag[1:-1].split('-')[1])
+ digests = []
+
+ s3_kwargs = dict(
+ Bucket=bucket,
+ Key=obj,
+ )
+ if version:
+ s3_kwargs['VersionId'] = version
+
+ with open(filename, 'rb') as f:
+ for part_num in range(1, parts + 1):
+ s3_kwargs['PartNumber'] = part_num
+ try:
+ head = s3.head_object(**s3_kwargs)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get head object")
+ digests.append(md5(f.read(int(head['ContentLength']))))
+
+ digest_squared = md5(b''.join(m.digest() for m in digests))
+ return '"{0}-{1}"'.format(digest_squared.hexdigest(), len(digests))
+ else: # Compute the MD5 sum normally
+ return '"{0}"'.format(module.md5(filename))
diff --git a/test/support/integration/plugins/module_utils/aws/waiters.py b/test/support/integration/plugins/module_utils/aws/waiters.py
new file mode 100644
index 00000000..25db598b
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/aws/waiters.py
@@ -0,0 +1,405 @@
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ import botocore.waiter as core_waiter
+except ImportError:
+ pass # caught by HAS_BOTO3
+
+
+ec2_data = {
+ "version": 2,
+ "waiters": {
+ "InternetGatewayExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeInternetGateways",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(InternetGateways) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidInternetGatewayID.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "RouteTableExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeRouteTables",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(RouteTables[]) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidRouteTableID.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "SecurityGroupExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSecurityGroups",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(SecurityGroups[]) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidGroup.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "SubnetExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(Subnets[]) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidSubnetID.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "SubnetHasMapPublic": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "pathAll",
+ "expected": True,
+ "argument": "Subnets[].MapPublicIpOnLaunch",
+ "state": "success"
+ },
+ ]
+ },
+ "SubnetNoMapPublic": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "pathAll",
+ "expected": False,
+ "argument": "Subnets[].MapPublicIpOnLaunch",
+ "state": "success"
+ },
+ ]
+ },
+ "SubnetHasAssignIpv6": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "pathAll",
+ "expected": True,
+ "argument": "Subnets[].AssignIpv6AddressOnCreation",
+ "state": "success"
+ },
+ ]
+ },
+ "SubnetNoAssignIpv6": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "pathAll",
+ "expected": False,
+ "argument": "Subnets[].AssignIpv6AddressOnCreation",
+ "state": "success"
+ },
+ ]
+ },
+ "SubnetDeleted": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(Subnets[]) > `0`",
+ "state": "retry"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidSubnetID.NotFound",
+ "state": "success"
+ },
+ ]
+ },
+ "VpnGatewayExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeVpnGateways",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(VpnGateways[]) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidVpnGatewayID.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "VpnGatewayDetached": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeVpnGateways",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "VpnGateways[0].State == 'available'",
+ "state": "success"
+ },
+ ]
+ },
+ }
+}
+
+
+waf_data = {
+ "version": 2,
+ "waiters": {
+ "ChangeTokenInSync": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "GetChangeTokenStatus",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "ChangeTokenStatus == 'INSYNC'",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "WAFInternalErrorException",
+ "state": "retry"
+ }
+ ]
+ }
+ }
+}
+
+eks_data = {
+ "version": 2,
+ "waiters": {
+ "ClusterActive": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "DescribeCluster",
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "path",
+ "argument": "cluster.status",
+ "expected": "ACTIVE"
+ },
+ {
+ "state": "retry",
+ "matcher": "error",
+ "expected": "ResourceNotFoundException"
+ }
+ ]
+ },
+ "ClusterDeleted": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "DescribeCluster",
+ "acceptors": [
+ {
+ "state": "retry",
+ "matcher": "path",
+ "argument": "cluster.status != 'DELETED'",
+ "expected": True
+ },
+ {
+ "state": "success",
+ "matcher": "error",
+ "expected": "ResourceNotFoundException"
+ }
+ ]
+ }
+ }
+}
+
+
+rds_data = {
+ "version": 2,
+ "waiters": {
+ "DBInstanceStopped": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "DescribeDBInstances",
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "pathAll",
+ "argument": "DBInstances[].DBInstanceStatus",
+ "expected": "stopped"
+ },
+ ]
+ }
+ }
+}
+
+
+def ec2_model(name):
+ ec2_models = core_waiter.WaiterModel(waiter_config=ec2_data)
+ return ec2_models.get_waiter(name)
+
+
+def waf_model(name):
+ waf_models = core_waiter.WaiterModel(waiter_config=waf_data)
+ return waf_models.get_waiter(name)
+
+
+def eks_model(name):
+ eks_models = core_waiter.WaiterModel(waiter_config=eks_data)
+ return eks_models.get_waiter(name)
+
+
+def rds_model(name):
+ rds_models = core_waiter.WaiterModel(waiter_config=rds_data)
+ return rds_models.get_waiter(name)
+
+
+waiters_by_name = {
+ ('EC2', 'internet_gateway_exists'): lambda ec2: core_waiter.Waiter(
+ 'internet_gateway_exists',
+ ec2_model('InternetGatewayExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_internet_gateways
+ )),
+ ('EC2', 'route_table_exists'): lambda ec2: core_waiter.Waiter(
+ 'route_table_exists',
+ ec2_model('RouteTableExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_route_tables
+ )),
+ ('EC2', 'security_group_exists'): lambda ec2: core_waiter.Waiter(
+ 'security_group_exists',
+ ec2_model('SecurityGroupExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_security_groups
+ )),
+ ('EC2', 'subnet_exists'): lambda ec2: core_waiter.Waiter(
+ 'subnet_exists',
+ ec2_model('SubnetExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_has_map_public'): lambda ec2: core_waiter.Waiter(
+ 'subnet_has_map_public',
+ ec2_model('SubnetHasMapPublic'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_no_map_public'): lambda ec2: core_waiter.Waiter(
+ 'subnet_no_map_public',
+ ec2_model('SubnetNoMapPublic'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_has_assign_ipv6'): lambda ec2: core_waiter.Waiter(
+ 'subnet_has_assign_ipv6',
+ ec2_model('SubnetHasAssignIpv6'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_no_assign_ipv6'): lambda ec2: core_waiter.Waiter(
+ 'subnet_no_assign_ipv6',
+ ec2_model('SubnetNoAssignIpv6'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_deleted'): lambda ec2: core_waiter.Waiter(
+ 'subnet_deleted',
+ ec2_model('SubnetDeleted'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'vpn_gateway_exists'): lambda ec2: core_waiter.Waiter(
+ 'vpn_gateway_exists',
+ ec2_model('VpnGatewayExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_vpn_gateways
+ )),
+ ('EC2', 'vpn_gateway_detached'): lambda ec2: core_waiter.Waiter(
+ 'vpn_gateway_detached',
+ ec2_model('VpnGatewayDetached'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_vpn_gateways
+ )),
+ ('WAF', 'change_token_in_sync'): lambda waf: core_waiter.Waiter(
+ 'change_token_in_sync',
+ waf_model('ChangeTokenInSync'),
+ core_waiter.NormalizedOperationMethod(
+ waf.get_change_token_status
+ )),
+ ('WAFRegional', 'change_token_in_sync'): lambda waf: core_waiter.Waiter(
+ 'change_token_in_sync',
+ waf_model('ChangeTokenInSync'),
+ core_waiter.NormalizedOperationMethod(
+ waf.get_change_token_status
+ )),
+ ('EKS', 'cluster_active'): lambda eks: core_waiter.Waiter(
+ 'cluster_active',
+ eks_model('ClusterActive'),
+ core_waiter.NormalizedOperationMethod(
+ eks.describe_cluster
+ )),
+ ('EKS', 'cluster_deleted'): lambda eks: core_waiter.Waiter(
+ 'cluster_deleted',
+ eks_model('ClusterDeleted'),
+ core_waiter.NormalizedOperationMethod(
+ eks.describe_cluster
+ )),
+ ('RDS', 'db_instance_stopped'): lambda rds: core_waiter.Waiter(
+ 'db_instance_stopped',
+ rds_model('DBInstanceStopped'),
+ core_waiter.NormalizedOperationMethod(
+ rds.describe_db_instances
+ )),
+}
+
+
+def get_waiter(client, waiter_name):
+ try:
+ return waiters_by_name[(client.__class__.__name__, waiter_name)](client)
+ except KeyError:
+ raise NotImplementedError("Waiter {0} could not be found for client {1}. Available waiters: {2}".format(
+ waiter_name, type(client), ', '.join(repr(k) for k in waiters_by_name.keys())))
diff --git a/test/support/integration/plugins/module_utils/azure_rm_common.py b/test/support/integration/plugins/module_utils/azure_rm_common.py
new file mode 100644
index 00000000..a7b55e97
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/azure_rm_common.py
@@ -0,0 +1,1473 @@
+# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
+# Chris Houseknecht, <house@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+import os
+import re
+import types
+import copy
+import inspect
+import traceback
+import json
+
+from os.path import expanduser
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+try:
+ from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION
+except Exception:
+ ANSIBLE_VERSION = 'unknown'
+from ansible.module_utils.six.moves import configparser
+import ansible.module_utils.six.moves.urllib.parse as urlparse
+
+AZURE_COMMON_ARGS = dict(
+ auth_source=dict(
+ type='str',
+ choices=['auto', 'cli', 'env', 'credential_file', 'msi']
+ ),
+ profile=dict(type='str'),
+ subscription_id=dict(type='str'),
+ client_id=dict(type='str', no_log=True),
+ secret=dict(type='str', no_log=True),
+ tenant=dict(type='str', no_log=True),
+ ad_user=dict(type='str', no_log=True),
+ password=dict(type='str', no_log=True),
+ cloud_environment=dict(type='str', default='AzureCloud'),
+ cert_validation_mode=dict(type='str', choices=['validate', 'ignore']),
+ api_profile=dict(type='str', default='latest'),
+ adfs_authority_url=dict(type='str', default=None)
+)
+
+AZURE_CREDENTIAL_ENV_MAPPING = dict(
+ profile='AZURE_PROFILE',
+ subscription_id='AZURE_SUBSCRIPTION_ID',
+ client_id='AZURE_CLIENT_ID',
+ secret='AZURE_SECRET',
+ tenant='AZURE_TENANT',
+ ad_user='AZURE_AD_USER',
+ password='AZURE_PASSWORD',
+ cloud_environment='AZURE_CLOUD_ENVIRONMENT',
+ cert_validation_mode='AZURE_CERT_VALIDATION_MODE',
+ adfs_authority_url='AZURE_ADFS_AUTHORITY_URL'
+)
+
+
+class SDKProfile(object): # pylint: disable=too-few-public-methods
+
+ def __init__(self, default_api_version, profile=None):
+ """Constructor.
+
+ :param str default_api_version: Default API version if not overridden by a profile. Nullable.
+ :param profile: A dict operation group name to API version.
+ :type profile: dict[str, str]
+ """
+ self.profile = profile if profile is not None else {}
+ self.profile[None] = default_api_version
+
+ @property
+ def default_api_version(self):
+ return self.profile[None]
+
+
+# FUTURE: this should come from the SDK or an external location.
+# For now, we have to copy from azure-cli
+AZURE_API_PROFILES = {
+ 'latest': {
+ 'ContainerInstanceManagementClient': '2018-02-01-preview',
+ 'ComputeManagementClient': dict(
+ default_api_version='2018-10-01',
+ resource_skus='2018-10-01',
+ disks='2018-06-01',
+ snapshots='2018-10-01',
+ virtual_machine_run_commands='2018-10-01'
+ ),
+ 'NetworkManagementClient': '2018-08-01',
+ 'ResourceManagementClient': '2017-05-10',
+ 'StorageManagementClient': '2017-10-01',
+ 'WebSiteManagementClient': '2018-02-01',
+ 'PostgreSQLManagementClient': '2017-12-01',
+ 'MySQLManagementClient': '2017-12-01',
+ 'MariaDBManagementClient': '2019-03-01',
+ 'ManagementLockClient': '2016-09-01'
+ },
+ '2019-03-01-hybrid': {
+ 'StorageManagementClient': '2017-10-01',
+ 'NetworkManagementClient': '2017-10-01',
+ 'ComputeManagementClient': SDKProfile('2017-12-01', {
+ 'resource_skus': '2017-09-01',
+ 'disks': '2017-03-30',
+ 'snapshots': '2017-03-30'
+ }),
+ 'ManagementLinkClient': '2016-09-01',
+ 'ManagementLockClient': '2016-09-01',
+ 'PolicyClient': '2016-12-01',
+ 'ResourceManagementClient': '2018-05-01',
+ 'SubscriptionClient': '2016-06-01',
+ 'DnsManagementClient': '2016-04-01',
+ 'KeyVaultManagementClient': '2016-10-01',
+ 'AuthorizationManagementClient': SDKProfile('2015-07-01', {
+ 'classic_administrators': '2015-06-01',
+ 'policy_assignments': '2016-12-01',
+ 'policy_definitions': '2016-12-01'
+ }),
+ 'KeyVaultClient': '2016-10-01',
+ 'azure.multiapi.storage': '2017-11-09',
+ 'azure.multiapi.cosmosdb': '2017-04-17'
+ },
+ '2018-03-01-hybrid': {
+ 'StorageManagementClient': '2016-01-01',
+ 'NetworkManagementClient': '2017-10-01',
+ 'ComputeManagementClient': SDKProfile('2017-03-30'),
+ 'ManagementLinkClient': '2016-09-01',
+ 'ManagementLockClient': '2016-09-01',
+ 'PolicyClient': '2016-12-01',
+ 'ResourceManagementClient': '2018-02-01',
+ 'SubscriptionClient': '2016-06-01',
+ 'DnsManagementClient': '2016-04-01',
+ 'KeyVaultManagementClient': '2016-10-01',
+ 'AuthorizationManagementClient': SDKProfile('2015-07-01', {
+ 'classic_administrators': '2015-06-01'
+ }),
+ 'KeyVaultClient': '2016-10-01',
+ 'azure.multiapi.storage': '2017-04-17',
+ 'azure.multiapi.cosmosdb': '2017-04-17'
+ },
+ '2017-03-09-profile': {
+ 'StorageManagementClient': '2016-01-01',
+ 'NetworkManagementClient': '2015-06-15',
+ 'ComputeManagementClient': SDKProfile('2016-03-30'),
+ 'ManagementLinkClient': '2016-09-01',
+ 'ManagementLockClient': '2015-01-01',
+ 'PolicyClient': '2015-10-01-preview',
+ 'ResourceManagementClient': '2016-02-01',
+ 'SubscriptionClient': '2016-06-01',
+ 'DnsManagementClient': '2016-04-01',
+ 'KeyVaultManagementClient': '2016-10-01',
+ 'AuthorizationManagementClient': SDKProfile('2015-07-01', {
+ 'classic_administrators': '2015-06-01'
+ }),
+ 'KeyVaultClient': '2016-10-01',
+ 'azure.multiapi.storage': '2015-04-05'
+ }
+}
+
+AZURE_TAG_ARGS = dict(
+ tags=dict(type='dict'),
+ append_tags=dict(type='bool', default=True),
+)
+
+AZURE_COMMON_REQUIRED_IF = [
+ ('log_mode', 'file', ['log_path'])
+]
+
+ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ANSIBLE_VERSION)
+CLOUDSHELL_USER_AGENT_KEY = 'AZURE_HTTP_USER_AGENT'
+VSCODEEXT_USER_AGENT_KEY = 'VSCODEEXT_USER_AGENT'
+
+CIDR_PATTERN = re.compile(r"(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1"
+ r"[0-9]{2}|2[0-4][0-9]|25[0-5])(/([0-9]|[1-2][0-9]|3[0-2]))")
+
+AZURE_SUCCESS_STATE = "Succeeded"
+AZURE_FAILED_STATE = "Failed"
+
+HAS_AZURE = True
+HAS_AZURE_EXC = None
+HAS_AZURE_CLI_CORE = True
+HAS_AZURE_CLI_CORE_EXC = None
+
+HAS_MSRESTAZURE = True
+HAS_MSRESTAZURE_EXC = None
+
+try:
+ import importlib
+except ImportError:
+ # This passes the sanity import test, but does not provide a user friendly error message.
+ # Doing so would require catching Exception for all imports of Azure dependencies in modules and module_utils.
+ importlib = None
+
+try:
+ from packaging.version import Version
+ HAS_PACKAGING_VERSION = True
+ HAS_PACKAGING_VERSION_EXC = None
+except ImportError:
+ Version = None
+ HAS_PACKAGING_VERSION = False
+ HAS_PACKAGING_VERSION_EXC = traceback.format_exc()
+
+# NB: packaging issue sometimes cause msrestazure not to be installed, check it separately
+try:
+ from msrest.serialization import Serializer
+except ImportError:
+ HAS_MSRESTAZURE_EXC = traceback.format_exc()
+ HAS_MSRESTAZURE = False
+
+try:
+ from enum import Enum
+ from msrestazure.azure_active_directory import AADTokenCredentials
+ from msrestazure.azure_exceptions import CloudError
+ from msrestazure.azure_active_directory import MSIAuthentication
+ from msrestazure.tools import parse_resource_id, resource_id, is_valid_resource_id
+ from msrestazure import azure_cloud
+ from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials
+ from azure.mgmt.monitor.version import VERSION as monitor_client_version
+ from azure.mgmt.network.version import VERSION as network_client_version
+ from azure.mgmt.storage.version import VERSION as storage_client_version
+ from azure.mgmt.compute.version import VERSION as compute_client_version
+ from azure.mgmt.resource.version import VERSION as resource_client_version
+ from azure.mgmt.dns.version import VERSION as dns_client_version
+ from azure.mgmt.web.version import VERSION as web_client_version
+ from azure.mgmt.network import NetworkManagementClient
+ from azure.mgmt.resource.resources import ResourceManagementClient
+ from azure.mgmt.resource.subscriptions import SubscriptionClient
+ from azure.mgmt.storage import StorageManagementClient
+ from azure.mgmt.compute import ComputeManagementClient
+ from azure.mgmt.dns import DnsManagementClient
+ from azure.mgmt.monitor import MonitorManagementClient
+ from azure.mgmt.web import WebSiteManagementClient
+ from azure.mgmt.containerservice import ContainerServiceClient
+ from azure.mgmt.marketplaceordering import MarketplaceOrderingAgreements
+ from azure.mgmt.trafficmanager import TrafficManagerManagementClient
+ from azure.storage.cloudstorageaccount import CloudStorageAccount
+ from azure.storage.blob import PageBlobService, BlockBlobService
+ from adal.authentication_context import AuthenticationContext
+ from azure.mgmt.sql import SqlManagementClient
+ from azure.mgmt.servicebus import ServiceBusManagementClient
+ import azure.mgmt.servicebus.models as ServicebusModel
+ from azure.mgmt.rdbms.postgresql import PostgreSQLManagementClient
+ from azure.mgmt.rdbms.mysql import MySQLManagementClient
+ from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
+ from azure.mgmt.containerregistry import ContainerRegistryManagementClient
+ from azure.mgmt.containerinstance import ContainerInstanceManagementClient
+ from azure.mgmt.loganalytics import LogAnalyticsManagementClient
+ import azure.mgmt.loganalytics.models as LogAnalyticsModels
+ from azure.mgmt.automation import AutomationClient
+ import azure.mgmt.automation.models as AutomationModel
+ from azure.mgmt.iothub import IotHubClient
+ from azure.mgmt.iothub import models as IoTHubModels
+ from msrest.service_client import ServiceClient
+ from msrestazure import AzureConfiguration
+ from msrest.authentication import Authentication
+ from azure.mgmt.resource.locks import ManagementLockClient
+except ImportError as exc:
+ Authentication = object
+ HAS_AZURE_EXC = traceback.format_exc()
+ HAS_AZURE = False
+
+from base64 import b64encode, b64decode
+from hashlib import sha256
+from hmac import HMAC
+from time import time
+
+try:
+ from urllib import (urlencode, quote_plus)
+except ImportError:
+ from urllib.parse import (urlencode, quote_plus)
+
+try:
+ from azure.cli.core.util import CLIError
+ from azure.common.credentials import get_azure_cli_credentials, get_cli_profile
+ from azure.common.cloud import get_cli_active_cloud
+except ImportError:
+ HAS_AZURE_CLI_CORE = False
+ HAS_AZURE_CLI_CORE_EXC = None
+ CLIError = Exception
+
+
+def azure_id_to_dict(id):
+ pieces = re.sub(r'^\/', '', id).split('/')
+ result = {}
+ index = 0
+ while index < len(pieces) - 1:
+ result[pieces[index]] = pieces[index + 1]
+ index += 1
+ return result
+
+
+def format_resource_id(val, subscription_id, namespace, types, resource_group):
+ return resource_id(name=val,
+ resource_group=resource_group,
+ namespace=namespace,
+ type=types,
+ subscription=subscription_id) if not is_valid_resource_id(val) else val
+
+
+def normalize_location_name(name):
+ return name.replace(' ', '').lower()
+
+
+# FUTURE: either get this from the requirements file (if we can be sure it's always available at runtime)
+# or generate the requirements files from this so we only have one source of truth to maintain...
+AZURE_PKG_VERSIONS = {
+ 'StorageManagementClient': {
+ 'package_name': 'storage',
+ 'expected_version': '3.1.0'
+ },
+ 'ComputeManagementClient': {
+ 'package_name': 'compute',
+ 'expected_version': '4.4.0'
+ },
+ 'ContainerInstanceManagementClient': {
+ 'package_name': 'containerinstance',
+ 'expected_version': '0.4.0'
+ },
+ 'NetworkManagementClient': {
+ 'package_name': 'network',
+ 'expected_version': '2.3.0'
+ },
+ 'ResourceManagementClient': {
+ 'package_name': 'resource',
+ 'expected_version': '2.1.0'
+ },
+ 'DnsManagementClient': {
+ 'package_name': 'dns',
+ 'expected_version': '2.1.0'
+ },
+ 'WebSiteManagementClient': {
+ 'package_name': 'web',
+ 'expected_version': '0.41.0'
+ },
+ 'TrafficManagerManagementClient': {
+ 'package_name': 'trafficmanager',
+ 'expected_version': '0.50.0'
+ },
+} if HAS_AZURE else {}
+
+
+AZURE_MIN_RELEASE = '2.0.0'
+
+
+class AzureRMModuleBase(object):
+ def __init__(self, derived_arg_spec, bypass_checks=False, no_log=False,
+ mutually_exclusive=None, required_together=None,
+ required_one_of=None, add_file_common_args=False, supports_check_mode=False,
+ required_if=None, supports_tags=True, facts_module=False, skip_exec=False):
+
+ merged_arg_spec = dict()
+ merged_arg_spec.update(AZURE_COMMON_ARGS)
+ if supports_tags:
+ merged_arg_spec.update(AZURE_TAG_ARGS)
+
+ if derived_arg_spec:
+ merged_arg_spec.update(derived_arg_spec)
+
+ merged_required_if = list(AZURE_COMMON_REQUIRED_IF)
+ if required_if:
+ merged_required_if += required_if
+
+ self.module = AnsibleModule(argument_spec=merged_arg_spec,
+ bypass_checks=bypass_checks,
+ no_log=no_log,
+ mutually_exclusive=mutually_exclusive,
+ required_together=required_together,
+ required_one_of=required_one_of,
+ add_file_common_args=add_file_common_args,
+ supports_check_mode=supports_check_mode,
+ required_if=merged_required_if)
+
+ if not HAS_PACKAGING_VERSION:
+ self.fail(msg=missing_required_lib('packaging'),
+ exception=HAS_PACKAGING_VERSION_EXC)
+
+ if not HAS_MSRESTAZURE:
+ self.fail(msg=missing_required_lib('msrestazure'),
+ exception=HAS_MSRESTAZURE_EXC)
+
+ if not HAS_AZURE:
+ self.fail(msg=missing_required_lib('ansible[azure] (azure >= {0})'.format(AZURE_MIN_RELEASE)),
+ exception=HAS_AZURE_EXC)
+
+ self._network_client = None
+ self._storage_client = None
+ self._resource_client = None
+ self._compute_client = None
+ self._dns_client = None
+ self._web_client = None
+ self._marketplace_client = None
+ self._sql_client = None
+ self._mysql_client = None
+ self._mariadb_client = None
+ self._postgresql_client = None
+ self._containerregistry_client = None
+ self._containerinstance_client = None
+ self._containerservice_client = None
+ self._managedcluster_client = None
+ self._traffic_manager_management_client = None
+ self._monitor_client = None
+ self._resource = None
+ self._log_analytics_client = None
+ self._servicebus_client = None
+ self._automation_client = None
+ self._IoThub_client = None
+ self._lock_client = None
+
+ self.check_mode = self.module.check_mode
+ self.api_profile = self.module.params.get('api_profile')
+ self.facts_module = facts_module
+ # self.debug = self.module.params.get('debug')
+
+ # delegate auth to AzureRMAuth class (shared with all plugin types)
+ self.azure_auth = AzureRMAuth(fail_impl=self.fail, **self.module.params)
+
+ # common parameter validation
+ if self.module.params.get('tags'):
+ self.validate_tags(self.module.params['tags'])
+
+ if not skip_exec:
+ res = self.exec_module(**self.module.params)
+ self.module.exit_json(**res)
+
+ def check_client_version(self, client_type):
+ # Ensure Azure modules are at least 2.0.0rc5.
+ package_version = AZURE_PKG_VERSIONS.get(client_type.__name__, None)
+ if package_version is not None:
+ client_name = package_version.get('package_name')
+ try:
+ client_module = importlib.import_module(client_type.__module__)
+ client_version = client_module.VERSION
+ except (RuntimeError, AttributeError):
+ # can't get at the module version for some reason, just fail silently...
+ return
+ expected_version = package_version.get('expected_version')
+ if Version(client_version) < Version(expected_version):
+ self.fail("Installed azure-mgmt-{0} client version is {1}. The minimum supported version is {2}. Try "
+ "`pip install ansible[azure]`".format(client_name, client_version, expected_version))
+ if Version(client_version) != Version(expected_version):
+ self.module.warn("Installed azure-mgmt-{0} client version is {1}. The expected version is {2}. Try "
+ "`pip install ansible[azure]`".format(client_name, client_version, expected_version))
+
+ def exec_module(self, **kwargs):
+ self.fail("Error: {0} failed to implement exec_module method.".format(self.__class__.__name__))
+
+ def fail(self, msg, **kwargs):
+ '''
+ Shortcut for calling module.fail()
+
+ :param msg: Error message text.
+ :param kwargs: Any key=value pairs
+ :return: None
+ '''
+ self.module.fail_json(msg=msg, **kwargs)
+
+ def deprecate(self, msg, version=None, collection_name=None):
+ self.module.deprecate(msg, version, collection_name=collection_name)
+
+ def log(self, msg, pretty_print=False):
+ if pretty_print:
+ self.module.debug(json.dumps(msg, indent=4, sort_keys=True))
+ else:
+ self.module.debug(msg)
+
+ def validate_tags(self, tags):
+ '''
+ Check if tags dictionary contains string:string pairs.
+
+ :param tags: dictionary of string:string pairs
+ :return: None
+ '''
+ if not self.facts_module:
+ if not isinstance(tags, dict):
+ self.fail("Tags must be a dictionary of string:string values.")
+ for key, value in tags.items():
+ if not isinstance(value, str):
+ self.fail("Tags values must be strings. Found {0}:{1}".format(str(key), str(value)))
+
+ def update_tags(self, tags):
+ '''
+ Call from the module to update metadata tags. Returns tuple
+ with bool indicating if there was a change and dict of new
+ tags to assign to the object.
+
+ :param tags: metadata tags from the object
+ :return: bool, dict
+ '''
+ tags = tags or dict()
+ new_tags = copy.copy(tags) if isinstance(tags, dict) else dict()
+ param_tags = self.module.params.get('tags') if isinstance(self.module.params.get('tags'), dict) else dict()
+ append_tags = self.module.params.get('append_tags') if self.module.params.get('append_tags') is not None else True
+ changed = False
+ # check add or update
+ for key, value in param_tags.items():
+ if not new_tags.get(key) or new_tags[key] != value:
+ changed = True
+ new_tags[key] = value
+ # check remove
+ if not append_tags:
+ for key, value in tags.items():
+ if not param_tags.get(key):
+ new_tags.pop(key)
+ changed = True
+ return changed, new_tags
+
+ def has_tags(self, obj_tags, tag_list):
+ '''
+ Used in fact modules to compare object tags to list of parameter tags. Return true if list of parameter tags
+ exists in object tags.
+
+ :param obj_tags: dictionary of tags from an Azure object.
+ :param tag_list: list of tag keys or tag key:value pairs
+ :return: bool
+ '''
+
+ if not obj_tags and tag_list:
+ return False
+
+ if not tag_list:
+ return True
+
+ matches = 0
+ result = False
+ for tag in tag_list:
+ tag_key = tag
+ tag_value = None
+ if ':' in tag:
+ tag_key, tag_value = tag.split(':')
+ if tag_value and obj_tags.get(tag_key) == tag_value:
+ matches += 1
+ elif not tag_value and obj_tags.get(tag_key):
+ matches += 1
+ if matches == len(tag_list):
+ result = True
+ return result
+
+ def get_resource_group(self, resource_group):
+ '''
+ Fetch a resource group.
+
+ :param resource_group: name of a resource group
+ :return: resource group object
+ '''
+ try:
+ return self.rm_client.resource_groups.get(resource_group)
+ except CloudError as cloud_error:
+ self.fail("Error retrieving resource group {0} - {1}".format(resource_group, cloud_error.message))
+ except Exception as exc:
+ self.fail("Error retrieving resource group {0} - {1}".format(resource_group, str(exc)))
+
+ def parse_resource_to_dict(self, resource):
+ '''
+ Return a dict of the give resource, which contains name and resource group.
+
+ :param resource: It can be a resource name, id or a dict contains name and resource group.
+ '''
+ resource_dict = parse_resource_id(resource) if not isinstance(resource, dict) else resource
+ resource_dict['resource_group'] = resource_dict.get('resource_group', self.resource_group)
+ resource_dict['subscription_id'] = resource_dict.get('subscription_id', self.subscription_id)
+ return resource_dict
+
+ def serialize_obj(self, obj, class_name, enum_modules=None):
+ '''
+ Return a JSON representation of an Azure object.
+
+ :param obj: Azure object
+ :param class_name: Name of the object's class
+ :param enum_modules: List of module names to build enum dependencies from.
+ :return: serialized result
+ '''
+ enum_modules = [] if enum_modules is None else enum_modules
+
+ dependencies = dict()
+ if enum_modules:
+ for module_name in enum_modules:
+ mod = importlib.import_module(module_name)
+ for mod_class_name, mod_class_obj in inspect.getmembers(mod, predicate=inspect.isclass):
+ dependencies[mod_class_name] = mod_class_obj
+ self.log("dependencies: ")
+ self.log(str(dependencies))
+ serializer = Serializer(classes=dependencies)
+ return serializer.body(obj, class_name, keep_readonly=True)
+
+ def get_poller_result(self, poller, wait=5):
+ '''
+ Consistent method of waiting on and retrieving results from Azure's long poller
+
+ :param poller Azure poller object
+ :return object resulting from the original request
+ '''
+ try:
+ delay = wait
+ while not poller.done():
+ self.log("Waiting for {0} sec".format(delay))
+ poller.wait(timeout=delay)
+ return poller.result()
+ except Exception as exc:
+ self.log(str(exc))
+ raise
+
+ def check_provisioning_state(self, azure_object, requested_state='present'):
+ '''
+ Check an Azure object's provisioning state. If something did not complete the provisioning
+ process, then we cannot operate on it.
+
+ :param azure_object An object such as a subnet, storageaccount, etc. Must have provisioning_state
+ and name attributes.
+ :return None
+ '''
+
+ if hasattr(azure_object, 'properties') and hasattr(azure_object.properties, 'provisioning_state') and \
+ hasattr(azure_object, 'name'):
+ # resource group object fits this model
+ if isinstance(azure_object.properties.provisioning_state, Enum):
+ if azure_object.properties.provisioning_state.value != AZURE_SUCCESS_STATE and \
+ requested_state != 'absent':
+ self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
+ azure_object.name, azure_object.properties.provisioning_state, AZURE_SUCCESS_STATE))
+ return
+ if azure_object.properties.provisioning_state != AZURE_SUCCESS_STATE and \
+ requested_state != 'absent':
+ self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
+ azure_object.name, azure_object.properties.provisioning_state, AZURE_SUCCESS_STATE))
+ return
+
+ if hasattr(azure_object, 'provisioning_state') or not hasattr(azure_object, 'name'):
+ if isinstance(azure_object.provisioning_state, Enum):
+ if azure_object.provisioning_state.value != AZURE_SUCCESS_STATE and requested_state != 'absent':
+ self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
+ azure_object.name, azure_object.provisioning_state, AZURE_SUCCESS_STATE))
+ return
+ if azure_object.provisioning_state != AZURE_SUCCESS_STATE and requested_state != 'absent':
+ self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
+ azure_object.name, azure_object.provisioning_state, AZURE_SUCCESS_STATE))
+
+ def get_blob_client(self, resource_group_name, storage_account_name, storage_blob_type='block'):
+ keys = dict()
+ try:
+ # Get keys from the storage account
+ self.log('Getting keys')
+ account_keys = self.storage_client.storage_accounts.list_keys(resource_group_name, storage_account_name)
+ except Exception as exc:
+ self.fail("Error getting keys for account {0} - {1}".format(storage_account_name, str(exc)))
+
+ try:
+ self.log('Create blob service')
+ if storage_blob_type == 'page':
+ return PageBlobService(endpoint_suffix=self._cloud_environment.suffixes.storage_endpoint,
+ account_name=storage_account_name,
+ account_key=account_keys.keys[0].value)
+ elif storage_blob_type == 'block':
+ return BlockBlobService(endpoint_suffix=self._cloud_environment.suffixes.storage_endpoint,
+ account_name=storage_account_name,
+ account_key=account_keys.keys[0].value)
+ else:
+ raise Exception("Invalid storage blob type defined.")
+ except Exception as exc:
+ self.fail("Error creating blob service client for storage account {0} - {1}".format(storage_account_name,
+ str(exc)))
+
+ def create_default_pip(self, resource_group, location, public_ip_name, allocation_method='Dynamic', sku=None):
+ '''
+ Create a default public IP address <public_ip_name> to associate with a network interface.
+ If a PIP address matching <public_ip_name> exists, return it. Otherwise, create one.
+
+ :param resource_group: name of an existing resource group
+ :param location: a valid azure location
+ :param public_ip_name: base name to assign the public IP address
+ :param allocation_method: one of 'Static' or 'Dynamic'
+ :param sku: sku
+ :return: PIP object
+ '''
+ pip = None
+
+ self.log("Starting create_default_pip {0}".format(public_ip_name))
+ self.log("Check to see if public IP {0} exists".format(public_ip_name))
+ try:
+ pip = self.network_client.public_ip_addresses.get(resource_group, public_ip_name)
+ except CloudError:
+ pass
+
+ if pip:
+ self.log("Public ip {0} found.".format(public_ip_name))
+ self.check_provisioning_state(pip)
+ return pip
+
+ params = self.network_models.PublicIPAddress(
+ location=location,
+ public_ip_allocation_method=allocation_method,
+ sku=sku
+ )
+ self.log('Creating default public IP {0}'.format(public_ip_name))
+ try:
+ poller = self.network_client.public_ip_addresses.create_or_update(resource_group, public_ip_name, params)
+ except Exception as exc:
+ self.fail("Error creating {0} - {1}".format(public_ip_name, str(exc)))
+
+ return self.get_poller_result(poller)
+
+ def create_default_securitygroup(self, resource_group, location, security_group_name, os_type, open_ports):
+ '''
+ Create a default security group <security_group_name> to associate with a network interface. If a security group matching
+ <security_group_name> exists, return it. Otherwise, create one.
+
+ :param resource_group: Resource group name
+ :param location: azure location name
+ :param security_group_name: base name to use for the security group
+ :param os_type: one of 'Windows' or 'Linux'. Determins any default rules added to the security group.
+ :param ssh_port: for os_type 'Linux' port used in rule allowing SSH access.
+ :param rdp_port: for os_type 'Windows' port used in rule allowing RDP access.
+ :return: security_group object
+ '''
+ group = None
+
+ self.log("Create security group {0}".format(security_group_name))
+ self.log("Check to see if security group {0} exists".format(security_group_name))
+ try:
+ group = self.network_client.network_security_groups.get(resource_group, security_group_name)
+ except CloudError:
+ pass
+
+ if group:
+ self.log("Security group {0} found.".format(security_group_name))
+ self.check_provisioning_state(group)
+ return group
+
+ parameters = self.network_models.NetworkSecurityGroup()
+ parameters.location = location
+
+ if not open_ports:
+ # Open default ports based on OS type
+ if os_type == 'Linux':
+ # add an inbound SSH rule
+ parameters.security_rules = [
+ self.network_models.SecurityRule(protocol='Tcp',
+ source_address_prefix='*',
+ destination_address_prefix='*',
+ access='Allow',
+ direction='Inbound',
+ description='Allow SSH Access',
+ source_port_range='*',
+ destination_port_range='22',
+ priority=100,
+ name='SSH')
+ ]
+ parameters.location = location
+ else:
+ # for windows add inbound RDP and WinRM rules
+ parameters.security_rules = [
+ self.network_models.SecurityRule(protocol='Tcp',
+ source_address_prefix='*',
+ destination_address_prefix='*',
+ access='Allow',
+ direction='Inbound',
+ description='Allow RDP port 3389',
+ source_port_range='*',
+ destination_port_range='3389',
+ priority=100,
+ name='RDP01'),
+ self.network_models.SecurityRule(protocol='Tcp',
+ source_address_prefix='*',
+ destination_address_prefix='*',
+ access='Allow',
+ direction='Inbound',
+ description='Allow WinRM HTTPS port 5986',
+ source_port_range='*',
+ destination_port_range='5986',
+ priority=101,
+ name='WinRM01'),
+ ]
+ else:
+ # Open custom ports
+ parameters.security_rules = []
+ priority = 100
+ for port in open_ports:
+ priority += 1
+ rule_name = "Rule_{0}".format(priority)
+ parameters.security_rules.append(
+ self.network_models.SecurityRule(protocol='Tcp',
+ source_address_prefix='*',
+ destination_address_prefix='*',
+ access='Allow',
+ direction='Inbound',
+ source_port_range='*',
+ destination_port_range=str(port),
+ priority=priority,
+ name=rule_name)
+ )
+
+ self.log('Creating default security group {0}'.format(security_group_name))
+ try:
+ poller = self.network_client.network_security_groups.create_or_update(resource_group,
+ security_group_name,
+ parameters)
+ except Exception as exc:
+ self.fail("Error creating default security rule {0} - {1}".format(security_group_name, str(exc)))
+
+ return self.get_poller_result(poller)
+
+ @staticmethod
+ def _validation_ignore_callback(session, global_config, local_config, **kwargs):
+ session.verify = False
+
+ def get_api_profile(self, client_type_name, api_profile_name):
+ profile_all_clients = AZURE_API_PROFILES.get(api_profile_name)
+
+ if not profile_all_clients:
+ raise KeyError("unknown Azure API profile: {0}".format(api_profile_name))
+
+ profile_raw = profile_all_clients.get(client_type_name, None)
+
+ if not profile_raw:
+ self.module.warn("Azure API profile {0} does not define an entry for {1}".format(api_profile_name, client_type_name))
+
+ if isinstance(profile_raw, dict):
+ if not profile_raw.get('default_api_version'):
+ raise KeyError("Azure API profile {0} does not define 'default_api_version'".format(api_profile_name))
+ return profile_raw
+
+ # wrap basic strings in a dict that just defines the default
+ return dict(default_api_version=profile_raw)
+
+ def get_mgmt_svc_client(self, client_type, base_url=None, api_version=None):
+ self.log('Getting management service client {0}'.format(client_type.__name__))
+ self.check_client_version(client_type)
+
+ client_argspec = inspect.getargspec(client_type.__init__)
+
+ if not base_url:
+ # most things are resource_manager, don't make everyone specify
+ base_url = self.azure_auth._cloud_environment.endpoints.resource_manager
+
+ client_kwargs = dict(credentials=self.azure_auth.azure_credentials, subscription_id=self.azure_auth.subscription_id, base_url=base_url)
+
+ api_profile_dict = {}
+
+ if self.api_profile:
+ api_profile_dict = self.get_api_profile(client_type.__name__, self.api_profile)
+
+ # unversioned clients won't accept profile; only send it if necessary
+ # clients without a version specified in the profile will use the default
+ if api_profile_dict and 'profile' in client_argspec.args:
+ client_kwargs['profile'] = api_profile_dict
+
+ # If the client doesn't accept api_version, it's unversioned.
+ # If it does, favor explicitly-specified api_version, fall back to api_profile
+ if 'api_version' in client_argspec.args:
+ profile_default_version = api_profile_dict.get('default_api_version', None)
+ if api_version or profile_default_version:
+ client_kwargs['api_version'] = api_version or profile_default_version
+ if 'profile' in client_kwargs:
+ # remove profile; only pass API version if specified
+ client_kwargs.pop('profile')
+
+ client = client_type(**client_kwargs)
+
+ # FUTURE: remove this once everything exposes models directly (eg, containerinstance)
+ try:
+ getattr(client, "models")
+ except AttributeError:
+ def _ansible_get_models(self, *arg, **kwarg):
+ return self._ansible_models
+
+ setattr(client, '_ansible_models', importlib.import_module(client_type.__module__).models)
+ client.models = types.MethodType(_ansible_get_models, client)
+
+ client.config = self.add_user_agent(client.config)
+
+ if self.azure_auth._cert_validation_mode == 'ignore':
+ client.config.session_configuration_callback = self._validation_ignore_callback
+
+ return client
+
+ def add_user_agent(self, config):
+ # Add user agent for Ansible
+ config.add_user_agent(ANSIBLE_USER_AGENT)
+ # Add user agent when running from Cloud Shell
+ if CLOUDSHELL_USER_AGENT_KEY in os.environ:
+ config.add_user_agent(os.environ[CLOUDSHELL_USER_AGENT_KEY])
+ # Add user agent when running from VSCode extension
+ if VSCODEEXT_USER_AGENT_KEY in os.environ:
+ config.add_user_agent(os.environ[VSCODEEXT_USER_AGENT_KEY])
+ return config
+
+ def generate_sas_token(self, **kwags):
+ base_url = kwags.get('base_url', None)
+ expiry = kwags.get('expiry', time() + 3600)
+ key = kwags.get('key', None)
+ policy = kwags.get('policy', None)
+ url = quote_plus(base_url)
+ ttl = int(expiry)
+ sign_key = '{0}\n{1}'.format(url, ttl)
+ signature = b64encode(HMAC(b64decode(key), sign_key.encode('utf-8'), sha256).digest())
+ result = {
+ 'sr': url,
+ 'sig': signature,
+ 'se': str(ttl),
+ }
+ if policy:
+ result['skn'] = policy
+ return 'SharedAccessSignature ' + urlencode(result)
+
+ def get_data_svc_client(self, **kwags):
+ url = kwags.get('base_url', None)
+ config = AzureConfiguration(base_url='https://{0}'.format(url))
+ config.credentials = AzureSASAuthentication(token=self.generate_sas_token(**kwags))
+ config = self.add_user_agent(config)
+ return ServiceClient(creds=config.credentials, config=config)
+
+ # passthru methods to AzureAuth instance for backcompat
+ @property
+ def credentials(self):
+ return self.azure_auth.credentials
+
+ @property
+ def _cloud_environment(self):
+ return self.azure_auth._cloud_environment
+
+ @property
+ def subscription_id(self):
+ return self.azure_auth.subscription_id
+
+ @property
+ def storage_client(self):
+ self.log('Getting storage client...')
+ if not self._storage_client:
+ self._storage_client = self.get_mgmt_svc_client(StorageManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2018-07-01')
+ return self._storage_client
+
+ @property
+ def storage_models(self):
+ return StorageManagementClient.models("2018-07-01")
+
+ @property
+ def network_client(self):
+ self.log('Getting network client')
+ if not self._network_client:
+ self._network_client = self.get_mgmt_svc_client(NetworkManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2019-06-01')
+ return self._network_client
+
+ @property
+ def network_models(self):
+ self.log("Getting network models...")
+ return NetworkManagementClient.models("2018-08-01")
+
+ @property
+ def rm_client(self):
+ self.log('Getting resource manager client')
+ if not self._resource_client:
+ self._resource_client = self.get_mgmt_svc_client(ResourceManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2017-05-10')
+ return self._resource_client
+
+ @property
+ def rm_models(self):
+ self.log("Getting resource manager models")
+ return ResourceManagementClient.models("2017-05-10")
+
+ @property
+ def compute_client(self):
+ self.log('Getting compute client')
+ if not self._compute_client:
+ self._compute_client = self.get_mgmt_svc_client(ComputeManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2019-07-01')
+ return self._compute_client
+
+ @property
+ def compute_models(self):
+ self.log("Getting compute models")
+ return ComputeManagementClient.models("2019-07-01")
+
+ @property
+ def dns_client(self):
+ self.log('Getting dns client')
+ if not self._dns_client:
+ self._dns_client = self.get_mgmt_svc_client(DnsManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2018-05-01')
+ return self._dns_client
+
+ @property
+ def dns_models(self):
+ self.log("Getting dns models...")
+ return DnsManagementClient.models('2018-05-01')
+
+ @property
+ def web_client(self):
+ self.log('Getting web client')
+ if not self._web_client:
+ self._web_client = self.get_mgmt_svc_client(WebSiteManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2018-02-01')
+ return self._web_client
+
+ @property
+ def containerservice_client(self):
+ self.log('Getting container service client')
+ if not self._containerservice_client:
+ self._containerservice_client = self.get_mgmt_svc_client(ContainerServiceClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2017-07-01')
+ return self._containerservice_client
+
+ @property
+ def managedcluster_models(self):
+ self.log("Getting container service models")
+ return ContainerServiceClient.models('2018-03-31')
+
+ @property
+ def managedcluster_client(self):
+ self.log('Getting container service client')
+ if not self._managedcluster_client:
+ self._managedcluster_client = self.get_mgmt_svc_client(ContainerServiceClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2018-03-31')
+ return self._managedcluster_client
+
+ @property
+ def sql_client(self):
+ self.log('Getting SQL client')
+ if not self._sql_client:
+ self._sql_client = self.get_mgmt_svc_client(SqlManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._sql_client
+
+ @property
+ def postgresql_client(self):
+ self.log('Getting PostgreSQL client')
+ if not self._postgresql_client:
+ self._postgresql_client = self.get_mgmt_svc_client(PostgreSQLManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._postgresql_client
+
+ @property
+ def mysql_client(self):
+ self.log('Getting MySQL client')
+ if not self._mysql_client:
+ self._mysql_client = self.get_mgmt_svc_client(MySQLManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._mysql_client
+
+ @property
+ def mariadb_client(self):
+ self.log('Getting MariaDB client')
+ if not self._mariadb_client:
+ self._mariadb_client = self.get_mgmt_svc_client(MariaDBManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._mariadb_client
+
+ @property
+ def sql_client(self):
+ self.log('Getting SQL client')
+ if not self._sql_client:
+ self._sql_client = self.get_mgmt_svc_client(SqlManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._sql_client
+
+ @property
+ def containerregistry_client(self):
+ self.log('Getting container registry mgmt client')
+ if not self._containerregistry_client:
+ self._containerregistry_client = self.get_mgmt_svc_client(ContainerRegistryManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2017-10-01')
+
+ return self._containerregistry_client
+
+ @property
+ def containerinstance_client(self):
+ self.log('Getting container instance mgmt client')
+ if not self._containerinstance_client:
+ self._containerinstance_client = self.get_mgmt_svc_client(ContainerInstanceManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2018-06-01')
+
+ return self._containerinstance_client
+
+ @property
+ def marketplace_client(self):
+ self.log('Getting marketplace agreement client')
+ if not self._marketplace_client:
+ self._marketplace_client = self.get_mgmt_svc_client(MarketplaceOrderingAgreements,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._marketplace_client
+
+ @property
+ def traffic_manager_management_client(self):
+ self.log('Getting traffic manager client')
+ if not self._traffic_manager_management_client:
+ self._traffic_manager_management_client = self.get_mgmt_svc_client(TrafficManagerManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._traffic_manager_management_client
+
+ @property
+ def monitor_client(self):
+ self.log('Getting monitor client')
+ if not self._monitor_client:
+ self._monitor_client = self.get_mgmt_svc_client(MonitorManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._monitor_client
+
+ @property
+ def log_analytics_client(self):
+ self.log('Getting log analytics client')
+ if not self._log_analytics_client:
+ self._log_analytics_client = self.get_mgmt_svc_client(LogAnalyticsManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._log_analytics_client
+
+ @property
+ def log_analytics_models(self):
+ self.log('Getting log analytics models')
+ return LogAnalyticsModels
+
+ @property
+ def servicebus_client(self):
+ self.log('Getting servicebus client')
+ if not self._servicebus_client:
+ self._servicebus_client = self.get_mgmt_svc_client(ServiceBusManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._servicebus_client
+
+ @property
+ def servicebus_models(self):
+ return ServicebusModel
+
+ @property
+ def automation_client(self):
+ self.log('Getting automation client')
+ if not self._automation_client:
+ self._automation_client = self.get_mgmt_svc_client(AutomationClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._automation_client
+
+ @property
+ def automation_models(self):
+ return AutomationModel
+
+ @property
+ def IoThub_client(self):
+ self.log('Getting iothub client')
+ if not self._IoThub_client:
+ self._IoThub_client = self.get_mgmt_svc_client(IotHubClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._IoThub_client
+
+ @property
+ def IoThub_models(self):
+ return IoTHubModels
+
+ @property
+ def automation_client(self):
+ self.log('Getting automation client')
+ if not self._automation_client:
+ self._automation_client = self.get_mgmt_svc_client(AutomationClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._automation_client
+
+ @property
+ def automation_models(self):
+ return AutomationModel
+
+ @property
+ def lock_client(self):
+ self.log('Getting lock client')
+ if not self._lock_client:
+ self._lock_client = self.get_mgmt_svc_client(ManagementLockClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2016-09-01')
+ return self._lock_client
+
+ @property
+ def lock_models(self):
+ self.log("Getting lock models")
+ return ManagementLockClient.models('2016-09-01')
+
+
+class AzureSASAuthentication(Authentication):
+ """Simple SAS Authentication.
+ An implementation of Authentication in
+ https://github.com/Azure/msrest-for-python/blob/0732bc90bdb290e5f58c675ffdd7dbfa9acefc93/msrest/authentication.py
+
+ :param str token: SAS token
+ """
+ def __init__(self, token):
+ self.token = token
+
+ def signed_session(self):
+ session = super(AzureSASAuthentication, self).signed_session()
+ session.headers['Authorization'] = self.token
+ return session
+
+
+class AzureRMAuthException(Exception):
+ pass
+
+
+class AzureRMAuth(object):
+ def __init__(self, auth_source='auto', profile=None, subscription_id=None, client_id=None, secret=None,
+ tenant=None, ad_user=None, password=None, cloud_environment='AzureCloud', cert_validation_mode='validate',
+ api_profile='latest', adfs_authority_url=None, fail_impl=None, **kwargs):
+
+ if fail_impl:
+ self._fail_impl = fail_impl
+ else:
+ self._fail_impl = self._default_fail_impl
+
+ self._cloud_environment = None
+ self._adfs_authority_url = None
+
+ # authenticate
+ self.credentials = self._get_credentials(
+ dict(auth_source=auth_source, profile=profile, subscription_id=subscription_id, client_id=client_id, secret=secret,
+ tenant=tenant, ad_user=ad_user, password=password, cloud_environment=cloud_environment,
+ cert_validation_mode=cert_validation_mode, api_profile=api_profile, adfs_authority_url=adfs_authority_url))
+
+ if not self.credentials:
+ if HAS_AZURE_CLI_CORE:
+ self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
+ "define a profile in ~/.azure/credentials, or log in with Azure CLI (`az login`).")
+ else:
+ self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
+ "define a profile in ~/.azure/credentials, or install Azure CLI and log in (`az login`).")
+
+ # cert validation mode precedence: module-arg, credential profile, env, "validate"
+ self._cert_validation_mode = cert_validation_mode or self.credentials.get('cert_validation_mode') or \
+ os.environ.get('AZURE_CERT_VALIDATION_MODE') or 'validate'
+
+ if self._cert_validation_mode not in ['validate', 'ignore']:
+ self.fail('invalid cert_validation_mode: {0}'.format(self._cert_validation_mode))
+
+ # if cloud_environment specified, look up/build Cloud object
+ raw_cloud_env = self.credentials.get('cloud_environment')
+ if self.credentials.get('credentials') is not None and raw_cloud_env is not None:
+ self._cloud_environment = raw_cloud_env
+ elif not raw_cloud_env:
+ self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD # SDK default
+ else:
+ # try to look up "well-known" values via the name attribute on azure_cloud members
+ all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)]
+ matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env]
+ if len(matched_clouds) == 1:
+ self._cloud_environment = matched_clouds[0]
+ elif len(matched_clouds) > 1:
+ self.fail("Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'".format(raw_cloud_env))
+ else:
+ if not urlparse.urlparse(raw_cloud_env).scheme:
+ self.fail("cloud_environment must be an endpoint discovery URL or one of {0}".format([x.name for x in all_clouds]))
+ try:
+ self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(raw_cloud_env)
+ except Exception as e:
+ self.fail("cloud_environment {0} could not be resolved: {1}".format(raw_cloud_env, e.message), exception=traceback.format_exc())
+
+ if self.credentials.get('subscription_id', None) is None and self.credentials.get('credentials') is None:
+ self.fail("Credentials did not include a subscription_id value.")
+ self.log("setting subscription_id")
+ self.subscription_id = self.credentials['subscription_id']
+
+ # get authentication authority
+ # for adfs, user could pass in authority or not.
+ # for others, use default authority from cloud environment
+ if self.credentials.get('adfs_authority_url') is None:
+ self._adfs_authority_url = self._cloud_environment.endpoints.active_directory
+ else:
+ self._adfs_authority_url = self.credentials.get('adfs_authority_url')
+
+ # get resource from cloud environment
+ self._resource = self._cloud_environment.endpoints.active_directory_resource_id
+
+ if self.credentials.get('credentials') is not None:
+ # AzureCLI credentials
+ self.azure_credentials = self.credentials['credentials']
+ elif self.credentials.get('client_id') is not None and \
+ self.credentials.get('secret') is not None and \
+ self.credentials.get('tenant') is not None:
+ self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'],
+ secret=self.credentials['secret'],
+ tenant=self.credentials['tenant'],
+ cloud_environment=self._cloud_environment,
+ verify=self._cert_validation_mode == 'validate')
+
+ elif self.credentials.get('ad_user') is not None and \
+ self.credentials.get('password') is not None and \
+ self.credentials.get('client_id') is not None and \
+ self.credentials.get('tenant') is not None:
+
+ self.azure_credentials = self.acquire_token_with_username_password(
+ self._adfs_authority_url,
+ self._resource,
+ self.credentials['ad_user'],
+ self.credentials['password'],
+ self.credentials['client_id'],
+ self.credentials['tenant'])
+
+ elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None:
+ tenant = self.credentials.get('tenant')
+ if not tenant:
+ tenant = 'common' # SDK default
+
+ self.azure_credentials = UserPassCredentials(self.credentials['ad_user'],
+ self.credentials['password'],
+ tenant=tenant,
+ cloud_environment=self._cloud_environment,
+ verify=self._cert_validation_mode == 'validate')
+ else:
+ self.fail("Failed to authenticate with provided credentials. Some attributes were missing. "
+ "Credentials must include client_id, secret and tenant or ad_user and password, or "
+ "ad_user, password, client_id, tenant and adfs_authority_url(optional) for ADFS authentication, or "
+ "be logged in using AzureCLI.")
+
+ def fail(self, msg, exception=None, **kwargs):
+ self._fail_impl(msg)
+
+ def _default_fail_impl(self, msg, exception=None, **kwargs):
+ raise AzureRMAuthException(msg)
+
+ def _get_profile(self, profile="default"):
+ path = expanduser("~/.azure/credentials")
+ try:
+ config = configparser.ConfigParser()
+ config.read(path)
+ except Exception as exc:
+ self.fail("Failed to access {0}. Check that the file exists and you have read "
+ "access. {1}".format(path, str(exc)))
+ credentials = dict()
+ for key in AZURE_CREDENTIAL_ENV_MAPPING:
+ try:
+ credentials[key] = config.get(profile, key, raw=True)
+ except Exception:
+ pass
+
+ if credentials.get('subscription_id'):
+ return credentials
+
+ return None
+
+ def _get_msi_credentials(self, subscription_id_param=None, **kwargs):
+ client_id = kwargs.get('client_id', None)
+ credentials = MSIAuthentication(client_id=client_id)
+ subscription_id = subscription_id_param or os.environ.get(AZURE_CREDENTIAL_ENV_MAPPING['subscription_id'], None)
+ if not subscription_id:
+ try:
+ # use the first subscription of the MSI
+ subscription_client = SubscriptionClient(credentials)
+ subscription = next(subscription_client.subscriptions.list())
+ subscription_id = str(subscription.subscription_id)
+ except Exception as exc:
+ self.fail("Failed to get MSI token: {0}. "
+ "Please check whether your machine enabled MSI or grant access to any subscription.".format(str(exc)))
+ return {
+ 'credentials': credentials,
+ 'subscription_id': subscription_id
+ }
+
+ def _get_azure_cli_credentials(self):
+ credentials, subscription_id = get_azure_cli_credentials()
+ cloud_environment = get_cli_active_cloud()
+
+ cli_credentials = {
+ 'credentials': credentials,
+ 'subscription_id': subscription_id,
+ 'cloud_environment': cloud_environment
+ }
+ return cli_credentials
+
+ def _get_env_credentials(self):
+ env_credentials = dict()
+ for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
+ env_credentials[attribute] = os.environ.get(env_variable, None)
+
+ if env_credentials['profile']:
+ credentials = self._get_profile(env_credentials['profile'])
+ return credentials
+
+ if env_credentials.get('subscription_id') is not None:
+ return env_credentials
+
+ return None
+
+ # TODO: use explicit kwargs instead of intermediate dict
+ def _get_credentials(self, params):
+ # Get authentication credentials.
+ self.log('Getting credentials')
+
+ arg_credentials = dict()
+ for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
+ arg_credentials[attribute] = params.get(attribute, None)
+
+ auth_source = params.get('auth_source', None)
+ if not auth_source:
+ auth_source = os.environ.get('ANSIBLE_AZURE_AUTH_SOURCE', 'auto')
+
+ if auth_source == 'msi':
+ self.log('Retrieving credenitals from MSI')
+ return self._get_msi_credentials(arg_credentials['subscription_id'], client_id=params.get('client_id', None))
+
+ if auth_source == 'cli':
+ if not HAS_AZURE_CLI_CORE:
+ self.fail(msg=missing_required_lib('azure-cli', reason='for `cli` auth_source'),
+ exception=HAS_AZURE_CLI_CORE_EXC)
+ try:
+ self.log('Retrieving credentials from Azure CLI profile')
+ cli_credentials = self._get_azure_cli_credentials()
+ return cli_credentials
+ except CLIError as err:
+ self.fail("Azure CLI profile cannot be loaded - {0}".format(err))
+
+ if auth_source == 'env':
+ self.log('Retrieving credentials from environment')
+ env_credentials = self._get_env_credentials()
+ return env_credentials
+
+ if auth_source == 'credential_file':
+ self.log("Retrieving credentials from credential file")
+ profile = params.get('profile') or 'default'
+ default_credentials = self._get_profile(profile)
+ return default_credentials
+
+ # auto, precedence: module parameters -> environment variables -> default profile in ~/.azure/credentials
+ # try module params
+ if arg_credentials['profile'] is not None:
+ self.log('Retrieving credentials with profile parameter.')
+ credentials = self._get_profile(arg_credentials['profile'])
+ return credentials
+
+ if arg_credentials['subscription_id']:
+ self.log('Received credentials from parameters.')
+ return arg_credentials
+
+ # try environment
+ env_credentials = self._get_env_credentials()
+ if env_credentials:
+ self.log('Received credentials from env.')
+ return env_credentials
+
+ # try default profile from ~./azure/credentials
+ default_credentials = self._get_profile()
+ if default_credentials:
+ self.log('Retrieved default profile credentials from ~/.azure/credentials.')
+ return default_credentials
+
+ try:
+ if HAS_AZURE_CLI_CORE:
+ self.log('Retrieving credentials from AzureCLI profile')
+ cli_credentials = self._get_azure_cli_credentials()
+ return cli_credentials
+ except CLIError as ce:
+ self.log('Error getting AzureCLI profile credentials - {0}'.format(ce))
+
+ return None
+
+ def acquire_token_with_username_password(self, authority, resource, username, password, client_id, tenant):
+ authority_uri = authority
+
+ if tenant is not None:
+ authority_uri = authority + '/' + tenant
+
+ context = AuthenticationContext(authority_uri)
+ token_response = context.acquire_token_with_username_password(resource, username, password, client_id)
+
+ return AADTokenCredentials(token_response)
+
+ def log(self, msg, pretty_print=False):
+ pass
+ # Use only during module development
+ # if self.debug:
+ # log_file = open('azure_rm.log', 'a')
+ # if pretty_print:
+ # log_file.write(json.dumps(msg, indent=4, sort_keys=True))
+ # else:
+ # log_file.write(msg + u'\n')
diff --git a/test/support/integration/plugins/module_utils/azure_rm_common_rest.py b/test/support/integration/plugins/module_utils/azure_rm_common_rest.py
new file mode 100644
index 00000000..4fd7eaa3
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/azure_rm_common_rest.py
@@ -0,0 +1,97 @@
+# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrestazure.azure_configuration import AzureConfiguration
+ from msrest.service_client import ServiceClient
+ from msrest.pipeline import ClientRawResponse
+ from msrest.polling import LROPoller
+ from msrestazure.polling.arm_polling import ARMPolling
+ import uuid
+ import json
+except ImportError:
+ # This is handled in azure_rm_common
+ AzureConfiguration = object
+
+ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ANSIBLE_VERSION)
+
+
+class GenericRestClientConfiguration(AzureConfiguration):
+
+ def __init__(self, credentials, subscription_id, base_url=None):
+
+ if credentials is None:
+ raise ValueError("Parameter 'credentials' must not be None.")
+ if subscription_id is None:
+ raise ValueError("Parameter 'subscription_id' must not be None.")
+ if not base_url:
+ base_url = 'https://management.azure.com'
+
+ super(GenericRestClientConfiguration, self).__init__(base_url)
+
+ self.add_user_agent(ANSIBLE_USER_AGENT)
+
+ self.credentials = credentials
+ self.subscription_id = subscription_id
+
+
+class GenericRestClient(object):
+
+ def __init__(self, credentials, subscription_id, base_url=None):
+ self.config = GenericRestClientConfiguration(credentials, subscription_id, base_url)
+ self._client = ServiceClient(self.config.credentials, self.config)
+ self.models = None
+
+ def query(self, url, method, query_parameters, header_parameters, body, expected_status_codes, polling_timeout, polling_interval):
+ # Construct and send request
+ operation_config = {}
+
+ request = None
+
+ if header_parameters is None:
+ header_parameters = {}
+
+ header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
+
+ if method == 'GET':
+ request = self._client.get(url, query_parameters)
+ elif method == 'PUT':
+ request = self._client.put(url, query_parameters)
+ elif method == 'POST':
+ request = self._client.post(url, query_parameters)
+ elif method == 'HEAD':
+ request = self._client.head(url, query_parameters)
+ elif method == 'PATCH':
+ request = self._client.patch(url, query_parameters)
+ elif method == 'DELETE':
+ request = self._client.delete(url, query_parameters)
+ elif method == 'MERGE':
+ request = self._client.merge(url, query_parameters)
+
+ response = self._client.send(request, header_parameters, body, **operation_config)
+
+ if response.status_code not in expected_status_codes:
+ exp = CloudError(response)
+ exp.request_id = response.headers.get('x-ms-request-id')
+ raise exp
+ elif response.status_code == 202 and polling_timeout > 0:
+ def get_long_running_output(response):
+ return response
+ poller = LROPoller(self._client,
+ ClientRawResponse(None, response),
+ get_long_running_output,
+ ARMPolling(polling_interval, **operation_config))
+ response = self.get_poller_result(poller, polling_timeout)
+
+ return response
+
+ def get_poller_result(self, poller, timeout):
+ try:
+ poller.wait(timeout=timeout)
+ return poller.result()
+ except Exception as exc:
+ raise
diff --git a/test/support/integration/plugins/module_utils/cloud.py b/test/support/integration/plugins/module_utils/cloud.py
new file mode 100644
index 00000000..0d29071f
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/cloud.py
@@ -0,0 +1,217 @@
+#
+# (c) 2016 Allen Sanabria, <asanabria@linuxdynasty.org>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+"""
+This module adds shared support for generic cloud modules
+
+In order to use this module, include it as part of a custom
+module as shown below.
+
+from ansible.module_utils.cloud import CloudRetry
+
+The 'cloud' module provides the following common classes:
+
+ * CloudRetry
+ - The base class to be used by other cloud providers, in order to
+ provide a backoff/retry decorator based on status codes.
+
+ - Example using the AWSRetry class which inherits from CloudRetry.
+
+ @AWSRetry.exponential_backoff(retries=10, delay=3)
+ get_ec2_security_group_ids_from_names()
+
+ @AWSRetry.jittered_backoff()
+ get_ec2_security_group_ids_from_names()
+
+"""
+import random
+from functools import wraps
+import syslog
+import time
+
+
+def _exponential_backoff(retries=10, delay=2, backoff=2, max_delay=60):
+ """ Customizable exponential backoff strategy.
+ Args:
+ retries (int): Maximum number of times to retry a request.
+ delay (float): Initial (base) delay.
+ backoff (float): base of the exponent to use for exponential
+ backoff.
+ max_delay (int): Optional. If provided each delay generated is capped
+ at this amount. Defaults to 60 seconds.
+ Returns:
+ Callable that returns a generator. This generator yields durations in
+ seconds to be used as delays for an exponential backoff strategy.
+ Usage:
+ >>> backoff = _exponential_backoff()
+ >>> backoff
+ <function backoff_backoff at 0x7f0d939facf8>
+ >>> list(backoff())
+ [2, 4, 8, 16, 32, 60, 60, 60, 60, 60]
+ """
+ def backoff_gen():
+ for retry in range(0, retries):
+ sleep = delay * backoff ** retry
+ yield sleep if max_delay is None else min(sleep, max_delay)
+ return backoff_gen
+
+
+def _full_jitter_backoff(retries=10, delay=3, max_delay=60, _random=random):
+ """ Implements the "Full Jitter" backoff strategy described here
+ https://www.awsarchitectureblog.com/2015/03/backoff.html
+ Args:
+ retries (int): Maximum number of times to retry a request.
+ delay (float): Approximate number of seconds to sleep for the first
+ retry.
+ max_delay (int): The maximum number of seconds to sleep for any retry.
+ _random (random.Random or None): Makes this generator testable by
+ allowing developers to explicitly pass in the a seeded Random.
+ Returns:
+ Callable that returns a generator. This generator yields durations in
+ seconds to be used as delays for a full jitter backoff strategy.
+ Usage:
+ >>> backoff = _full_jitter_backoff(retries=5)
+ >>> backoff
+ <function backoff_backoff at 0x7f0d939facf8>
+ >>> list(backoff())
+ [3, 6, 5, 23, 38]
+ >>> list(backoff())
+ [2, 1, 6, 6, 31]
+ """
+ def backoff_gen():
+ for retry in range(0, retries):
+ yield _random.randint(0, min(max_delay, delay * 2 ** retry))
+ return backoff_gen
+
+
+class CloudRetry(object):
+ """ CloudRetry can be used by any cloud provider, in order to implement a
+ backoff algorithm/retry effect based on Status Code from Exceptions.
+ """
+ # This is the base class of the exception.
+ # AWS Example botocore.exceptions.ClientError
+ base_class = None
+
+ @staticmethod
+ def status_code_from_exception(error):
+ """ Return the status code from the exception object
+ Args:
+ error (object): The exception itself.
+ """
+ pass
+
+ @staticmethod
+ def found(response_code, catch_extra_error_codes=None):
+ """ Return True if the Response Code to retry on was found.
+ Args:
+ response_code (str): This is the Response Code that is being matched against.
+ """
+ pass
+
+ @classmethod
+ def _backoff(cls, backoff_strategy, catch_extra_error_codes=None):
+ """ Retry calling the Cloud decorated function using the provided
+ backoff strategy.
+ Args:
+ backoff_strategy (callable): Callable that returns a generator. The
+ generator should yield sleep times for each retry of the decorated
+ function.
+ """
+ def deco(f):
+ @wraps(f)
+ def retry_func(*args, **kwargs):
+ for delay in backoff_strategy():
+ try:
+ return f(*args, **kwargs)
+ except Exception as e:
+ if isinstance(e, cls.base_class):
+ response_code = cls.status_code_from_exception(e)
+ if cls.found(response_code, catch_extra_error_codes):
+ msg = "{0}: Retrying in {1} seconds...".format(str(e), delay)
+ syslog.syslog(syslog.LOG_INFO, msg)
+ time.sleep(delay)
+ else:
+ # Return original exception if exception is not a ClientError
+ raise e
+ else:
+ # Return original exception if exception is not a ClientError
+ raise e
+ return f(*args, **kwargs)
+
+ return retry_func # true decorator
+
+ return deco
+
+ @classmethod
+ def exponential_backoff(cls, retries=10, delay=3, backoff=2, max_delay=60, catch_extra_error_codes=None):
+ """
+ Retry calling the Cloud decorated function using an exponential backoff.
+
+ Kwargs:
+ retries (int): Number of times to retry a failed request before giving up
+ default=10
+ delay (int or float): Initial delay between retries in seconds
+ default=3
+ backoff (int or float): backoff multiplier e.g. value of 2 will
+ double the delay each retry
+ default=1.1
+ max_delay (int or None): maximum amount of time to wait between retries.
+ default=60
+ """
+ return cls._backoff(_exponential_backoff(
+ retries=retries, delay=delay, backoff=backoff, max_delay=max_delay), catch_extra_error_codes)
+
+ @classmethod
+ def jittered_backoff(cls, retries=10, delay=3, max_delay=60, catch_extra_error_codes=None):
+ """
+ Retry calling the Cloud decorated function using a jittered backoff
+ strategy. More on this strategy here:
+
+ https://www.awsarchitectureblog.com/2015/03/backoff.html
+
+ Kwargs:
+ retries (int): Number of times to retry a failed request before giving up
+ default=10
+ delay (int): Initial delay between retries in seconds
+ default=3
+ max_delay (int): maximum amount of time to wait between retries.
+ default=60
+ """
+ return cls._backoff(_full_jitter_backoff(
+ retries=retries, delay=delay, max_delay=max_delay), catch_extra_error_codes)
+
+ @classmethod
+ def backoff(cls, tries=10, delay=3, backoff=1.1, catch_extra_error_codes=None):
+ """
+ Retry calling the Cloud decorated function using an exponential backoff.
+
+ Compatibility for the original implementation of CloudRetry.backoff that
+ did not provide configurable backoff strategies. Developers should use
+ CloudRetry.exponential_backoff instead.
+
+ Kwargs:
+ tries (int): Number of times to try (not retry) before giving up
+ default=10
+ delay (int or float): Initial delay between retries in seconds
+ default=3
+ backoff (int or float): backoff multiplier e.g. value of 2 will
+ double the delay each retry
+ default=1.1
+ """
+ return cls.exponential_backoff(
+ retries=tries - 1, delay=delay, backoff=backoff, max_delay=None, catch_extra_error_codes=catch_extra_error_codes)
diff --git a/test/support/integration/plugins/module_utils/compat/__init__.py b/test/support/integration/plugins/module_utils/compat/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/compat/__init__.py
diff --git a/test/support/integration/plugins/module_utils/compat/ipaddress.py b/test/support/integration/plugins/module_utils/compat/ipaddress.py
new file mode 100644
index 00000000..c46ad72a
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/compat/ipaddress.py
@@ -0,0 +1,2476 @@
+# -*- coding: utf-8 -*-
+
+# This code is part of Ansible, but is an independent component.
+# This particular file, and this file only, is based on
+# Lib/ipaddress.py of cpython
+# It is licensed under the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+#
+# 1. This LICENSE AGREEMENT is between the Python Software Foundation
+# ("PSF"), and the Individual or Organization ("Licensee") accessing and
+# otherwise using this software ("Python") in source or binary form and
+# its associated documentation.
+#
+# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
+# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+# analyze, test, perform and/or display publicly, prepare derivative works,
+# distribute, and otherwise use Python alone or in any derivative version,
+# provided, however, that PSF's License Agreement and PSF's notice of copyright,
+# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved"
+# are retained in Python alone or in any derivative version prepared by Licensee.
+#
+# 3. In the event Licensee prepares a derivative work that is based on
+# or incorporates Python or any part thereof, and wants to make
+# the derivative work available to others as provided herein, then
+# Licensee hereby agrees to include in any such work a brief summary of
+# the changes made to Python.
+#
+# 4. PSF is making Python available to Licensee on an "AS IS"
+# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+# INFRINGE ANY THIRD PARTY RIGHTS.
+#
+# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+#
+# 6. This License Agreement will automatically terminate upon a material
+# breach of its terms and conditions.
+#
+# 7. Nothing in this License Agreement shall be deemed to create any
+# relationship of agency, partnership, or joint venture between PSF and
+# Licensee. This License Agreement does not grant permission to use PSF
+# trademarks or trade name in a trademark sense to endorse or promote
+# products or services of Licensee, or any third party.
+#
+# 8. By copying, installing or otherwise using Python, Licensee
+# agrees to be bound by the terms and conditions of this License
+# Agreement.
+
+# Copyright 2007 Google Inc.
+# Licensed to PSF under a Contributor Agreement.
+
+"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
+
+This library is used to create/poke/manipulate IPv4 and IPv6 addresses
+and networks.
+
+"""
+
+from __future__ import unicode_literals
+
+
+import itertools
+import struct
+
+
+# The following makes it easier for us to script updates of the bundled code and is not part of
+# upstream
+_BUNDLED_METADATA = {"pypi_name": "ipaddress", "version": "1.0.22"}
+
+__version__ = '1.0.22'
+
+# Compatibility functions
+_compat_int_types = (int,)
+try:
+ _compat_int_types = (int, long)
+except NameError:
+ pass
+try:
+ _compat_str = unicode
+except NameError:
+ _compat_str = str
+ assert bytes != str
+if b'\0'[0] == 0: # Python 3 semantics
+ def _compat_bytes_to_byte_vals(byt):
+ return byt
+else:
+ def _compat_bytes_to_byte_vals(byt):
+ return [struct.unpack(b'!B', b)[0] for b in byt]
+try:
+ _compat_int_from_byte_vals = int.from_bytes
+except AttributeError:
+ def _compat_int_from_byte_vals(bytvals, endianess):
+ assert endianess == 'big'
+ res = 0
+ for bv in bytvals:
+ assert isinstance(bv, _compat_int_types)
+ res = (res << 8) + bv
+ return res
+
+
+def _compat_to_bytes(intval, length, endianess):
+ assert isinstance(intval, _compat_int_types)
+ assert endianess == 'big'
+ if length == 4:
+ if intval < 0 or intval >= 2 ** 32:
+ raise struct.error("integer out of range for 'I' format code")
+ return struct.pack(b'!I', intval)
+ elif length == 16:
+ if intval < 0 or intval >= 2 ** 128:
+ raise struct.error("integer out of range for 'QQ' format code")
+ return struct.pack(b'!QQ', intval >> 64, intval & 0xffffffffffffffff)
+ else:
+ raise NotImplementedError()
+
+
+if hasattr(int, 'bit_length'):
+ # Not int.bit_length , since that won't work in 2.7 where long exists
+ def _compat_bit_length(i):
+ return i.bit_length()
+else:
+ def _compat_bit_length(i):
+ for res in itertools.count():
+ if i >> res == 0:
+ return res
+
+
+def _compat_range(start, end, step=1):
+ assert step > 0
+ i = start
+ while i < end:
+ yield i
+ i += step
+
+
+class _TotalOrderingMixin(object):
+ __slots__ = ()
+
+ # Helper that derives the other comparison operations from
+ # __lt__ and __eq__
+ # We avoid functools.total_ordering because it doesn't handle
+ # NotImplemented correctly yet (http://bugs.python.org/issue10042)
+ def __eq__(self, other):
+ raise NotImplementedError
+
+ def __ne__(self, other):
+ equal = self.__eq__(other)
+ if equal is NotImplemented:
+ return NotImplemented
+ return not equal
+
+ def __lt__(self, other):
+ raise NotImplementedError
+
+ def __le__(self, other):
+ less = self.__lt__(other)
+ if less is NotImplemented or not less:
+ return self.__eq__(other)
+ return less
+
+ def __gt__(self, other):
+ less = self.__lt__(other)
+ if less is NotImplemented:
+ return NotImplemented
+ equal = self.__eq__(other)
+ if equal is NotImplemented:
+ return NotImplemented
+ return not (less or equal)
+
+ def __ge__(self, other):
+ less = self.__lt__(other)
+ if less is NotImplemented:
+ return NotImplemented
+ return not less
+
+
+IPV4LENGTH = 32
+IPV6LENGTH = 128
+
+
+class AddressValueError(ValueError):
+ """A Value Error related to the address."""
+
+
+class NetmaskValueError(ValueError):
+ """A Value Error related to the netmask."""
+
+
+def ip_address(address):
+ """Take an IP string/int and return an object of the correct type.
+
+ Args:
+ address: A string or integer, the IP address. Either IPv4 or
+ IPv6 addresses may be supplied; integers less than 2**32 will
+ be considered to be IPv4 by default.
+
+ Returns:
+ An IPv4Address or IPv6Address object.
+
+ Raises:
+ ValueError: if the *address* passed isn't either a v4 or a v6
+ address
+
+ """
+ try:
+ return IPv4Address(address)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ try:
+ return IPv6Address(address)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ if isinstance(address, bytes):
+ raise AddressValueError(
+ '%r does not appear to be an IPv4 or IPv6 address. '
+ 'Did you pass in a bytes (str in Python 2) instead of'
+ ' a unicode object?' % address)
+
+ raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
+ address)
+
+
+def ip_network(address, strict=True):
+ """Take an IP string/int and return an object of the correct type.
+
+ Args:
+ address: A string or integer, the IP network. Either IPv4 or
+ IPv6 networks may be supplied; integers less than 2**32 will
+ be considered to be IPv4 by default.
+
+ Returns:
+ An IPv4Network or IPv6Network object.
+
+ Raises:
+ ValueError: if the string passed isn't either a v4 or a v6
+ address. Or if the network has host bits set.
+
+ """
+ try:
+ return IPv4Network(address, strict)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ try:
+ return IPv6Network(address, strict)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ if isinstance(address, bytes):
+ raise AddressValueError(
+ '%r does not appear to be an IPv4 or IPv6 network. '
+ 'Did you pass in a bytes (str in Python 2) instead of'
+ ' a unicode object?' % address)
+
+ raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %
+ address)
+
+
+def ip_interface(address):
+ """Take an IP string/int and return an object of the correct type.
+
+ Args:
+ address: A string or integer, the IP address. Either IPv4 or
+ IPv6 addresses may be supplied; integers less than 2**32 will
+ be considered to be IPv4 by default.
+
+ Returns:
+ An IPv4Interface or IPv6Interface object.
+
+ Raises:
+ ValueError: if the string passed isn't either a v4 or a v6
+ address.
+
+ Notes:
+ The IPv?Interface classes describe an Address on a particular
+ Network, so they're basically a combination of both the Address
+ and Network classes.
+
+ """
+ try:
+ return IPv4Interface(address)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ try:
+ return IPv6Interface(address)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ raise ValueError('%r does not appear to be an IPv4 or IPv6 interface' %
+ address)
+
+
+def v4_int_to_packed(address):
+ """Represent an address as 4 packed bytes in network (big-endian) order.
+
+ Args:
+ address: An integer representation of an IPv4 IP address.
+
+ Returns:
+ The integer address packed as 4 bytes in network (big-endian) order.
+
+ Raises:
+ ValueError: If the integer is negative or too large to be an
+ IPv4 IP address.
+
+ """
+ try:
+ return _compat_to_bytes(address, 4, 'big')
+ except (struct.error, OverflowError):
+ raise ValueError("Address negative or too large for IPv4")
+
+
+def v6_int_to_packed(address):
+ """Represent an address as 16 packed bytes in network (big-endian) order.
+
+ Args:
+ address: An integer representation of an IPv6 IP address.
+
+ Returns:
+ The integer address packed as 16 bytes in network (big-endian) order.
+
+ """
+ try:
+ return _compat_to_bytes(address, 16, 'big')
+ except (struct.error, OverflowError):
+ raise ValueError("Address negative or too large for IPv6")
+
+
+def _split_optional_netmask(address):
+ """Helper to split the netmask and raise AddressValueError if needed"""
+ addr = _compat_str(address).split('/')
+ if len(addr) > 2:
+ raise AddressValueError("Only one '/' permitted in %r" % address)
+ return addr
+
+
+def _find_address_range(addresses):
+ """Find a sequence of sorted deduplicated IPv#Address.
+
+ Args:
+ addresses: a list of IPv#Address objects.
+
+ Yields:
+ A tuple containing the first and last IP addresses in the sequence.
+
+ """
+ it = iter(addresses)
+ first = last = next(it) # pylint: disable=stop-iteration-return
+ for ip in it:
+ if ip._ip != last._ip + 1:
+ yield first, last
+ first = ip
+ last = ip
+ yield first, last
+
+
+def _count_righthand_zero_bits(number, bits):
+ """Count the number of zero bits on the right hand side.
+
+ Args:
+ number: an integer.
+ bits: maximum number of bits to count.
+
+ Returns:
+ The number of zero bits on the right hand side of the number.
+
+ """
+ if number == 0:
+ return bits
+ return min(bits, _compat_bit_length(~number & (number - 1)))
+
+
+def summarize_address_range(first, last):
+ """Summarize a network range given the first and last IP addresses.
+
+ Example:
+ >>> list(summarize_address_range(IPv4Address('192.0.2.0'),
+ ... IPv4Address('192.0.2.130')))
+ ... #doctest: +NORMALIZE_WHITESPACE
+ [IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'),
+ IPv4Network('192.0.2.130/32')]
+
+ Args:
+ first: the first IPv4Address or IPv6Address in the range.
+ last: the last IPv4Address or IPv6Address in the range.
+
+ Returns:
+ An iterator of the summarized IPv(4|6) network objects.
+
+ Raise:
+ TypeError:
+ If the first and last objects are not IP addresses.
+ If the first and last objects are not the same version.
+ ValueError:
+ If the last object is not greater than the first.
+ If the version of the first address is not 4 or 6.
+
+ """
+ if (not (isinstance(first, _BaseAddress) and
+ isinstance(last, _BaseAddress))):
+ raise TypeError('first and last must be IP addresses, not networks')
+ if first.version != last.version:
+ raise TypeError("%s and %s are not of the same version" % (
+ first, last))
+ if first > last:
+ raise ValueError('last IP address must be greater than first')
+
+ if first.version == 4:
+ ip = IPv4Network
+ elif first.version == 6:
+ ip = IPv6Network
+ else:
+ raise ValueError('unknown IP version')
+
+ ip_bits = first._max_prefixlen
+ first_int = first._ip
+ last_int = last._ip
+ while first_int <= last_int:
+ nbits = min(_count_righthand_zero_bits(first_int, ip_bits),
+ _compat_bit_length(last_int - first_int + 1) - 1)
+ net = ip((first_int, ip_bits - nbits))
+ yield net
+ first_int += 1 << nbits
+ if first_int - 1 == ip._ALL_ONES:
+ break
+
+
+def _collapse_addresses_internal(addresses):
+ """Loops through the addresses, collapsing concurrent netblocks.
+
+ Example:
+
+ ip1 = IPv4Network('192.0.2.0/26')
+ ip2 = IPv4Network('192.0.2.64/26')
+ ip3 = IPv4Network('192.0.2.128/26')
+ ip4 = IPv4Network('192.0.2.192/26')
+
+ _collapse_addresses_internal([ip1, ip2, ip3, ip4]) ->
+ [IPv4Network('192.0.2.0/24')]
+
+ This shouldn't be called directly; it is called via
+ collapse_addresses([]).
+
+ Args:
+ addresses: A list of IPv4Network's or IPv6Network's
+
+ Returns:
+ A list of IPv4Network's or IPv6Network's depending on what we were
+ passed.
+
+ """
+ # First merge
+ to_merge = list(addresses)
+ subnets = {}
+ while to_merge:
+ net = to_merge.pop()
+ supernet = net.supernet()
+ existing = subnets.get(supernet)
+ if existing is None:
+ subnets[supernet] = net
+ elif existing != net:
+ # Merge consecutive subnets
+ del subnets[supernet]
+ to_merge.append(supernet)
+ # Then iterate over resulting networks, skipping subsumed subnets
+ last = None
+ for net in sorted(subnets.values()):
+ if last is not None:
+ # Since they are sorted,
+ # last.network_address <= net.network_address is a given.
+ if last.broadcast_address >= net.broadcast_address:
+ continue
+ yield net
+ last = net
+
+
+def collapse_addresses(addresses):
+ """Collapse a list of IP objects.
+
+ Example:
+ collapse_addresses([IPv4Network('192.0.2.0/25'),
+ IPv4Network('192.0.2.128/25')]) ->
+ [IPv4Network('192.0.2.0/24')]
+
+ Args:
+ addresses: An iterator of IPv4Network or IPv6Network objects.
+
+ Returns:
+ An iterator of the collapsed IPv(4|6)Network objects.
+
+ Raises:
+ TypeError: If passed a list of mixed version objects.
+
+ """
+ addrs = []
+ ips = []
+ nets = []
+
+ # split IP addresses and networks
+ for ip in addresses:
+ if isinstance(ip, _BaseAddress):
+ if ips and ips[-1]._version != ip._version:
+ raise TypeError("%s and %s are not of the same version" % (
+ ip, ips[-1]))
+ ips.append(ip)
+ elif ip._prefixlen == ip._max_prefixlen:
+ if ips and ips[-1]._version != ip._version:
+ raise TypeError("%s and %s are not of the same version" % (
+ ip, ips[-1]))
+ try:
+ ips.append(ip.ip)
+ except AttributeError:
+ ips.append(ip.network_address)
+ else:
+ if nets and nets[-1]._version != ip._version:
+ raise TypeError("%s and %s are not of the same version" % (
+ ip, nets[-1]))
+ nets.append(ip)
+
+ # sort and dedup
+ ips = sorted(set(ips))
+
+ # find consecutive address ranges in the sorted sequence and summarize them
+ if ips:
+ for first, last in _find_address_range(ips):
+ addrs.extend(summarize_address_range(first, last))
+
+ return _collapse_addresses_internal(addrs + nets)
+
+
+def get_mixed_type_key(obj):
+ """Return a key suitable for sorting between networks and addresses.
+
+ Address and Network objects are not sortable by default; they're
+ fundamentally different so the expression
+
+ IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24')
+
+ doesn't make any sense. There are some times however, where you may wish
+ to have ipaddress sort these for you anyway. If you need to do this, you
+ can use this function as the key= argument to sorted().
+
+ Args:
+ obj: either a Network or Address object.
+ Returns:
+ appropriate key.
+
+ """
+ if isinstance(obj, _BaseNetwork):
+ return obj._get_networks_key()
+ elif isinstance(obj, _BaseAddress):
+ return obj._get_address_key()
+ return NotImplemented
+
+
+class _IPAddressBase(_TotalOrderingMixin):
+
+ """The mother class."""
+
+ __slots__ = ()
+
+ @property
+ def exploded(self):
+ """Return the longhand version of the IP address as a string."""
+ return self._explode_shorthand_ip_string()
+
+ @property
+ def compressed(self):
+ """Return the shorthand version of the IP address as a string."""
+ return _compat_str(self)
+
+ @property
+ def reverse_pointer(self):
+ """The name of the reverse DNS pointer for the IP address, e.g.:
+ >>> ipaddress.ip_address("127.0.0.1").reverse_pointer
+ '1.0.0.127.in-addr.arpa'
+ >>> ipaddress.ip_address("2001:db8::1").reverse_pointer
+ '1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa'
+
+ """
+ return self._reverse_pointer()
+
+ @property
+ def version(self):
+ msg = '%200s has no version specified' % (type(self),)
+ raise NotImplementedError(msg)
+
+ def _check_int_address(self, address):
+ if address < 0:
+ msg = "%d (< 0) is not permitted as an IPv%d address"
+ raise AddressValueError(msg % (address, self._version))
+ if address > self._ALL_ONES:
+ msg = "%d (>= 2**%d) is not permitted as an IPv%d address"
+ raise AddressValueError(msg % (address, self._max_prefixlen,
+ self._version))
+
+ def _check_packed_address(self, address, expected_len):
+ address_len = len(address)
+ if address_len != expected_len:
+ msg = (
+ '%r (len %d != %d) is not permitted as an IPv%d address. '
+ 'Did you pass in a bytes (str in Python 2) instead of'
+ ' a unicode object?')
+ raise AddressValueError(msg % (address, address_len,
+ expected_len, self._version))
+
+ @classmethod
+ def _ip_int_from_prefix(cls, prefixlen):
+ """Turn the prefix length into a bitwise netmask
+
+ Args:
+ prefixlen: An integer, the prefix length.
+
+ Returns:
+ An integer.
+
+ """
+ return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen)
+
+ @classmethod
+ def _prefix_from_ip_int(cls, ip_int):
+ """Return prefix length from the bitwise netmask.
+
+ Args:
+ ip_int: An integer, the netmask in expanded bitwise format
+
+ Returns:
+ An integer, the prefix length.
+
+ Raises:
+ ValueError: If the input intermingles zeroes & ones
+ """
+ trailing_zeroes = _count_righthand_zero_bits(ip_int,
+ cls._max_prefixlen)
+ prefixlen = cls._max_prefixlen - trailing_zeroes
+ leading_ones = ip_int >> trailing_zeroes
+ all_ones = (1 << prefixlen) - 1
+ if leading_ones != all_ones:
+ byteslen = cls._max_prefixlen // 8
+ details = _compat_to_bytes(ip_int, byteslen, 'big')
+ msg = 'Netmask pattern %r mixes zeroes & ones'
+ raise ValueError(msg % details)
+ return prefixlen
+
+ @classmethod
+ def _report_invalid_netmask(cls, netmask_str):
+ msg = '%r is not a valid netmask' % netmask_str
+ raise NetmaskValueError(msg)
+
+ @classmethod
+ def _prefix_from_prefix_string(cls, prefixlen_str):
+ """Return prefix length from a numeric string
+
+ Args:
+ prefixlen_str: The string to be converted
+
+ Returns:
+ An integer, the prefix length.
+
+ Raises:
+ NetmaskValueError: If the input is not a valid netmask
+ """
+ # int allows a leading +/- as well as surrounding whitespace,
+ # so we ensure that isn't the case
+ if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str):
+ cls._report_invalid_netmask(prefixlen_str)
+ try:
+ prefixlen = int(prefixlen_str)
+ except ValueError:
+ cls._report_invalid_netmask(prefixlen_str)
+ if not (0 <= prefixlen <= cls._max_prefixlen):
+ cls._report_invalid_netmask(prefixlen_str)
+ return prefixlen
+
+ @classmethod
+ def _prefix_from_ip_string(cls, ip_str):
+ """Turn a netmask/hostmask string into a prefix length
+
+ Args:
+ ip_str: The netmask/hostmask to be converted
+
+ Returns:
+ An integer, the prefix length.
+
+ Raises:
+ NetmaskValueError: If the input is not a valid netmask/hostmask
+ """
+ # Parse the netmask/hostmask like an IP address.
+ try:
+ ip_int = cls._ip_int_from_string(ip_str)
+ except AddressValueError:
+ cls._report_invalid_netmask(ip_str)
+
+ # Try matching a netmask (this would be /1*0*/ as a bitwise regexp).
+ # Note that the two ambiguous cases (all-ones and all-zeroes) are
+ # treated as netmasks.
+ try:
+ return cls._prefix_from_ip_int(ip_int)
+ except ValueError:
+ pass
+
+ # Invert the bits, and try matching a /0+1+/ hostmask instead.
+ ip_int ^= cls._ALL_ONES
+ try:
+ return cls._prefix_from_ip_int(ip_int)
+ except ValueError:
+ cls._report_invalid_netmask(ip_str)
+
+ def __reduce__(self):
+ return self.__class__, (_compat_str(self),)
+
+
+class _BaseAddress(_IPAddressBase):
+
+ """A generic IP object.
+
+ This IP class contains the version independent methods which are
+ used by single IP addresses.
+ """
+
+ __slots__ = ()
+
+ def __int__(self):
+ return self._ip
+
+ def __eq__(self, other):
+ try:
+ return (self._ip == other._ip and
+ self._version == other._version)
+ except AttributeError:
+ return NotImplemented
+
+ def __lt__(self, other):
+ if not isinstance(other, _IPAddressBase):
+ return NotImplemented
+ if not isinstance(other, _BaseAddress):
+ raise TypeError('%s and %s are not of the same type' % (
+ self, other))
+ if self._version != other._version:
+ raise TypeError('%s and %s are not of the same version' % (
+ self, other))
+ if self._ip != other._ip:
+ return self._ip < other._ip
+ return False
+
+ # Shorthand for Integer addition and subtraction. This is not
+ # meant to ever support addition/subtraction of addresses.
+ def __add__(self, other):
+ if not isinstance(other, _compat_int_types):
+ return NotImplemented
+ return self.__class__(int(self) + other)
+
+ def __sub__(self, other):
+ if not isinstance(other, _compat_int_types):
+ return NotImplemented
+ return self.__class__(int(self) - other)
+
+ def __repr__(self):
+ return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
+
+ def __str__(self):
+ return _compat_str(self._string_from_ip_int(self._ip))
+
+ def __hash__(self):
+ return hash(hex(int(self._ip)))
+
+ def _get_address_key(self):
+ return (self._version, self)
+
+ def __reduce__(self):
+ return self.__class__, (self._ip,)
+
+
+class _BaseNetwork(_IPAddressBase):
+
+ """A generic IP network object.
+
+ This IP class contains the version independent methods which are
+ used by networks.
+
+ """
+ def __init__(self, address):
+ self._cache = {}
+
+ def __repr__(self):
+ return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
+
+ def __str__(self):
+ return '%s/%d' % (self.network_address, self.prefixlen)
+
+ def hosts(self):
+ """Generate Iterator over usable hosts in a network.
+
+ This is like __iter__ except it doesn't return the network
+ or broadcast addresses.
+
+ """
+ network = int(self.network_address)
+ broadcast = int(self.broadcast_address)
+ for x in _compat_range(network + 1, broadcast):
+ yield self._address_class(x)
+
+ def __iter__(self):
+ network = int(self.network_address)
+ broadcast = int(self.broadcast_address)
+ for x in _compat_range(network, broadcast + 1):
+ yield self._address_class(x)
+
+ def __getitem__(self, n):
+ network = int(self.network_address)
+ broadcast = int(self.broadcast_address)
+ if n >= 0:
+ if network + n > broadcast:
+ raise IndexError('address out of range')
+ return self._address_class(network + n)
+ else:
+ n += 1
+ if broadcast + n < network:
+ raise IndexError('address out of range')
+ return self._address_class(broadcast + n)
+
+ def __lt__(self, other):
+ if not isinstance(other, _IPAddressBase):
+ return NotImplemented
+ if not isinstance(other, _BaseNetwork):
+ raise TypeError('%s and %s are not of the same type' % (
+ self, other))
+ if self._version != other._version:
+ raise TypeError('%s and %s are not of the same version' % (
+ self, other))
+ if self.network_address != other.network_address:
+ return self.network_address < other.network_address
+ if self.netmask != other.netmask:
+ return self.netmask < other.netmask
+ return False
+
+ def __eq__(self, other):
+ try:
+ return (self._version == other._version and
+ self.network_address == other.network_address and
+ int(self.netmask) == int(other.netmask))
+ except AttributeError:
+ return NotImplemented
+
+ def __hash__(self):
+ return hash(int(self.network_address) ^ int(self.netmask))
+
+ def __contains__(self, other):
+ # always false if one is v4 and the other is v6.
+ if self._version != other._version:
+ return False
+ # dealing with another network.
+ if isinstance(other, _BaseNetwork):
+ return False
+ # dealing with another address
+ else:
+ # address
+ return (int(self.network_address) <= int(other._ip) <=
+ int(self.broadcast_address))
+
+ def overlaps(self, other):
+ """Tell if self is partly contained in other."""
+ return self.network_address in other or (
+ self.broadcast_address in other or (
+ other.network_address in self or (
+ other.broadcast_address in self)))
+
+ @property
+ def broadcast_address(self):
+ x = self._cache.get('broadcast_address')
+ if x is None:
+ x = self._address_class(int(self.network_address) |
+ int(self.hostmask))
+ self._cache['broadcast_address'] = x
+ return x
+
+ @property
+ def hostmask(self):
+ x = self._cache.get('hostmask')
+ if x is None:
+ x = self._address_class(int(self.netmask) ^ self._ALL_ONES)
+ self._cache['hostmask'] = x
+ return x
+
+ @property
+ def with_prefixlen(self):
+ return '%s/%d' % (self.network_address, self._prefixlen)
+
+ @property
+ def with_netmask(self):
+ return '%s/%s' % (self.network_address, self.netmask)
+
+ @property
+ def with_hostmask(self):
+ return '%s/%s' % (self.network_address, self.hostmask)
+
+ @property
+ def num_addresses(self):
+ """Number of hosts in the current subnet."""
+ return int(self.broadcast_address) - int(self.network_address) + 1
+
+ @property
+ def _address_class(self):
+ # Returning bare address objects (rather than interfaces) allows for
+ # more consistent behaviour across the network address, broadcast
+ # address and individual host addresses.
+ msg = '%200s has no associated address class' % (type(self),)
+ raise NotImplementedError(msg)
+
+ @property
+ def prefixlen(self):
+ return self._prefixlen
+
+ def address_exclude(self, other):
+ """Remove an address from a larger block.
+
+ For example:
+
+ addr1 = ip_network('192.0.2.0/28')
+ addr2 = ip_network('192.0.2.1/32')
+ list(addr1.address_exclude(addr2)) =
+ [IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'),
+ IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')]
+
+ or IPv6:
+
+ addr1 = ip_network('2001:db8::1/32')
+ addr2 = ip_network('2001:db8::1/128')
+ list(addr1.address_exclude(addr2)) =
+ [ip_network('2001:db8::1/128'),
+ ip_network('2001:db8::2/127'),
+ ip_network('2001:db8::4/126'),
+ ip_network('2001:db8::8/125'),
+ ...
+ ip_network('2001:db8:8000::/33')]
+
+ Args:
+ other: An IPv4Network or IPv6Network object of the same type.
+
+ Returns:
+ An iterator of the IPv(4|6)Network objects which is self
+ minus other.
+
+ Raises:
+ TypeError: If self and other are of differing address
+ versions, or if other is not a network object.
+ ValueError: If other is not completely contained by self.
+
+ """
+ if not self._version == other._version:
+ raise TypeError("%s and %s are not of the same version" % (
+ self, other))
+
+ if not isinstance(other, _BaseNetwork):
+ raise TypeError("%s is not a network object" % other)
+
+ if not other.subnet_of(self):
+ raise ValueError('%s not contained in %s' % (other, self))
+ if other == self:
+ return
+
+ # Make sure we're comparing the network of other.
+ other = other.__class__('%s/%s' % (other.network_address,
+ other.prefixlen))
+
+ s1, s2 = self.subnets()
+ while s1 != other and s2 != other:
+ if other.subnet_of(s1):
+ yield s2
+ s1, s2 = s1.subnets()
+ elif other.subnet_of(s2):
+ yield s1
+ s1, s2 = s2.subnets()
+ else:
+ # If we got here, there's a bug somewhere.
+ raise AssertionError('Error performing exclusion: '
+ 's1: %s s2: %s other: %s' %
+ (s1, s2, other))
+ if s1 == other:
+ yield s2
+ elif s2 == other:
+ yield s1
+ else:
+ # If we got here, there's a bug somewhere.
+ raise AssertionError('Error performing exclusion: '
+ 's1: %s s2: %s other: %s' %
+ (s1, s2, other))
+
+ def compare_networks(self, other):
+ """Compare two IP objects.
+
+ This is only concerned about the comparison of the integer
+ representation of the network addresses. This means that the
+ host bits aren't considered at all in this method. If you want
+ to compare host bits, you can easily enough do a
+ 'HostA._ip < HostB._ip'
+
+ Args:
+ other: An IP object.
+
+ Returns:
+ If the IP versions of self and other are the same, returns:
+
+ -1 if self < other:
+ eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25')
+ IPv6Network('2001:db8::1000/124') <
+ IPv6Network('2001:db8::2000/124')
+ 0 if self == other
+ eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24')
+ IPv6Network('2001:db8::1000/124') ==
+ IPv6Network('2001:db8::1000/124')
+ 1 if self > other
+ eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25')
+ IPv6Network('2001:db8::2000/124') >
+ IPv6Network('2001:db8::1000/124')
+
+ Raises:
+ TypeError if the IP versions are different.
+
+ """
+ # does this need to raise a ValueError?
+ if self._version != other._version:
+ raise TypeError('%s and %s are not of the same type' % (
+ self, other))
+ # self._version == other._version below here:
+ if self.network_address < other.network_address:
+ return -1
+ if self.network_address > other.network_address:
+ return 1
+ # self.network_address == other.network_address below here:
+ if self.netmask < other.netmask:
+ return -1
+ if self.netmask > other.netmask:
+ return 1
+ return 0
+
+ def _get_networks_key(self):
+ """Network-only key function.
+
+ Returns an object that identifies this address' network and
+ netmask. This function is a suitable "key" argument for sorted()
+ and list.sort().
+
+ """
+ return (self._version, self.network_address, self.netmask)
+
+ def subnets(self, prefixlen_diff=1, new_prefix=None):
+ """The subnets which join to make the current subnet.
+
+ In the case that self contains only one IP
+ (self._prefixlen == 32 for IPv4 or self._prefixlen == 128
+ for IPv6), yield an iterator with just ourself.
+
+ Args:
+ prefixlen_diff: An integer, the amount the prefix length
+ should be increased by. This should not be set if
+ new_prefix is also set.
+ new_prefix: The desired new prefix length. This must be a
+ larger number (smaller prefix) than the existing prefix.
+ This should not be set if prefixlen_diff is also set.
+
+ Returns:
+ An iterator of IPv(4|6) objects.
+
+ Raises:
+ ValueError: The prefixlen_diff is too small or too large.
+ OR
+ prefixlen_diff and new_prefix are both set or new_prefix
+ is a smaller number than the current prefix (smaller
+ number means a larger network)
+
+ """
+ if self._prefixlen == self._max_prefixlen:
+ yield self
+ return
+
+ if new_prefix is not None:
+ if new_prefix < self._prefixlen:
+ raise ValueError('new prefix must be longer')
+ if prefixlen_diff != 1:
+ raise ValueError('cannot set prefixlen_diff and new_prefix')
+ prefixlen_diff = new_prefix - self._prefixlen
+
+ if prefixlen_diff < 0:
+ raise ValueError('prefix length diff must be > 0')
+ new_prefixlen = self._prefixlen + prefixlen_diff
+
+ if new_prefixlen > self._max_prefixlen:
+ raise ValueError(
+ 'prefix length diff %d is invalid for netblock %s' % (
+ new_prefixlen, self))
+
+ start = int(self.network_address)
+ end = int(self.broadcast_address) + 1
+ step = (int(self.hostmask) + 1) >> prefixlen_diff
+ for new_addr in _compat_range(start, end, step):
+ current = self.__class__((new_addr, new_prefixlen))
+ yield current
+
+ def supernet(self, prefixlen_diff=1, new_prefix=None):
+ """The supernet containing the current network.
+
+ Args:
+ prefixlen_diff: An integer, the amount the prefix length of
+ the network should be decreased by. For example, given a
+ /24 network and a prefixlen_diff of 3, a supernet with a
+ /21 netmask is returned.
+
+ Returns:
+ An IPv4 network object.
+
+ Raises:
+ ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have
+ a negative prefix length.
+ OR
+ If prefixlen_diff and new_prefix are both set or new_prefix is a
+ larger number than the current prefix (larger number means a
+ smaller network)
+
+ """
+ if self._prefixlen == 0:
+ return self
+
+ if new_prefix is not None:
+ if new_prefix > self._prefixlen:
+ raise ValueError('new prefix must be shorter')
+ if prefixlen_diff != 1:
+ raise ValueError('cannot set prefixlen_diff and new_prefix')
+ prefixlen_diff = self._prefixlen - new_prefix
+
+ new_prefixlen = self.prefixlen - prefixlen_diff
+ if new_prefixlen < 0:
+ raise ValueError(
+ 'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
+ (self.prefixlen, prefixlen_diff))
+ return self.__class__((
+ int(self.network_address) & (int(self.netmask) << prefixlen_diff),
+ new_prefixlen))
+
+ @property
+ def is_multicast(self):
+ """Test if the address is reserved for multicast use.
+
+ Returns:
+ A boolean, True if the address is a multicast address.
+ See RFC 2373 2.7 for details.
+
+ """
+ return (self.network_address.is_multicast and
+ self.broadcast_address.is_multicast)
+
+ @staticmethod
+ def _is_subnet_of(a, b):
+ try:
+ # Always false if one is v4 and the other is v6.
+ if a._version != b._version:
+ raise TypeError("%s and %s are not of the same version" % (a, b))
+ return (b.network_address <= a.network_address and
+ b.broadcast_address >= a.broadcast_address)
+ except AttributeError:
+ raise TypeError("Unable to test subnet containment "
+ "between %s and %s" % (a, b))
+
+ def subnet_of(self, other):
+ """Return True if this network is a subnet of other."""
+ return self._is_subnet_of(self, other)
+
+ def supernet_of(self, other):
+ """Return True if this network is a supernet of other."""
+ return self._is_subnet_of(other, self)
+
+ @property
+ def is_reserved(self):
+ """Test if the address is otherwise IETF reserved.
+
+ Returns:
+ A boolean, True if the address is within one of the
+ reserved IPv6 Network ranges.
+
+ """
+ return (self.network_address.is_reserved and
+ self.broadcast_address.is_reserved)
+
+ @property
+ def is_link_local(self):
+ """Test if the address is reserved for link-local.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 4291.
+
+ """
+ return (self.network_address.is_link_local and
+ self.broadcast_address.is_link_local)
+
+ @property
+ def is_private(self):
+ """Test if this address is allocated for private networks.
+
+ Returns:
+ A boolean, True if the address is reserved per
+ iana-ipv4-special-registry or iana-ipv6-special-registry.
+
+ """
+ return (self.network_address.is_private and
+ self.broadcast_address.is_private)
+
+ @property
+ def is_global(self):
+ """Test if this address is allocated for public networks.
+
+ Returns:
+ A boolean, True if the address is not reserved per
+ iana-ipv4-special-registry or iana-ipv6-special-registry.
+
+ """
+ return not self.is_private
+
+ @property
+ def is_unspecified(self):
+ """Test if the address is unspecified.
+
+ Returns:
+ A boolean, True if this is the unspecified address as defined in
+ RFC 2373 2.5.2.
+
+ """
+ return (self.network_address.is_unspecified and
+ self.broadcast_address.is_unspecified)
+
+ @property
+ def is_loopback(self):
+ """Test if the address is a loopback address.
+
+ Returns:
+ A boolean, True if the address is a loopback address as defined in
+ RFC 2373 2.5.3.
+
+ """
+ return (self.network_address.is_loopback and
+ self.broadcast_address.is_loopback)
+
+
+class _BaseV4(object):
+
+ """Base IPv4 object.
+
+ The following methods are used by IPv4 objects in both single IP
+ addresses and networks.
+
+ """
+
+ __slots__ = ()
+ _version = 4
+ # Equivalent to 255.255.255.255 or 32 bits of 1's.
+ _ALL_ONES = (2 ** IPV4LENGTH) - 1
+ _DECIMAL_DIGITS = frozenset('0123456789')
+
+ # the valid octets for host and netmasks. only useful for IPv4.
+ _valid_mask_octets = frozenset([255, 254, 252, 248, 240, 224, 192, 128, 0])
+
+ _max_prefixlen = IPV4LENGTH
+ # There are only a handful of valid v4 netmasks, so we cache them all
+ # when constructed (see _make_netmask()).
+ _netmask_cache = {}
+
+ def _explode_shorthand_ip_string(self):
+ return _compat_str(self)
+
+ @classmethod
+ def _make_netmask(cls, arg):
+ """Make a (netmask, prefix_len) tuple from the given argument.
+
+ Argument can be:
+ - an integer (the prefix length)
+ - a string representing the prefix length (e.g. "24")
+ - a string representing the prefix netmask (e.g. "255.255.255.0")
+ """
+ if arg not in cls._netmask_cache:
+ if isinstance(arg, _compat_int_types):
+ prefixlen = arg
+ else:
+ try:
+ # Check for a netmask in prefix length form
+ prefixlen = cls._prefix_from_prefix_string(arg)
+ except NetmaskValueError:
+ # Check for a netmask or hostmask in dotted-quad form.
+ # This may raise NetmaskValueError.
+ prefixlen = cls._prefix_from_ip_string(arg)
+ netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen))
+ cls._netmask_cache[arg] = netmask, prefixlen
+ return cls._netmask_cache[arg]
+
+ @classmethod
+ def _ip_int_from_string(cls, ip_str):
+ """Turn the given IP string into an integer for comparison.
+
+ Args:
+ ip_str: A string, the IP ip_str.
+
+ Returns:
+ The IP ip_str as an integer.
+
+ Raises:
+ AddressValueError: if ip_str isn't a valid IPv4 Address.
+
+ """
+ if not ip_str:
+ raise AddressValueError('Address cannot be empty')
+
+ octets = ip_str.split('.')
+ if len(octets) != 4:
+ raise AddressValueError("Expected 4 octets in %r" % ip_str)
+
+ try:
+ return _compat_int_from_byte_vals(
+ map(cls._parse_octet, octets), 'big')
+ except ValueError as exc:
+ raise AddressValueError("%s in %r" % (exc, ip_str))
+
+ @classmethod
+ def _parse_octet(cls, octet_str):
+ """Convert a decimal octet into an integer.
+
+ Args:
+ octet_str: A string, the number to parse.
+
+ Returns:
+ The octet as an integer.
+
+ Raises:
+ ValueError: if the octet isn't strictly a decimal from [0..255].
+
+ """
+ if not octet_str:
+ raise ValueError("Empty octet not permitted")
+ # Whitelist the characters, since int() allows a lot of bizarre stuff.
+ if not cls._DECIMAL_DIGITS.issuperset(octet_str):
+ msg = "Only decimal digits permitted in %r"
+ raise ValueError(msg % octet_str)
+ # We do the length check second, since the invalid character error
+ # is likely to be more informative for the user
+ if len(octet_str) > 3:
+ msg = "At most 3 characters permitted in %r"
+ raise ValueError(msg % octet_str)
+ # Convert to integer (we know digits are legal)
+ octet_int = int(octet_str, 10)
+ # Any octets that look like they *might* be written in octal,
+ # and which don't look exactly the same in both octal and
+ # decimal are rejected as ambiguous
+ if octet_int > 7 and octet_str[0] == '0':
+ msg = "Ambiguous (octal/decimal) value in %r not permitted"
+ raise ValueError(msg % octet_str)
+ if octet_int > 255:
+ raise ValueError("Octet %d (> 255) not permitted" % octet_int)
+ return octet_int
+
+ @classmethod
+ def _string_from_ip_int(cls, ip_int):
+ """Turns a 32-bit integer into dotted decimal notation.
+
+ Args:
+ ip_int: An integer, the IP address.
+
+ Returns:
+ The IP address as a string in dotted decimal notation.
+
+ """
+ return '.'.join(_compat_str(struct.unpack(b'!B', b)[0]
+ if isinstance(b, bytes)
+ else b)
+ for b in _compat_to_bytes(ip_int, 4, 'big'))
+
+ def _is_hostmask(self, ip_str):
+ """Test if the IP string is a hostmask (rather than a netmask).
+
+ Args:
+ ip_str: A string, the potential hostmask.
+
+ Returns:
+ A boolean, True if the IP string is a hostmask.
+
+ """
+ bits = ip_str.split('.')
+ try:
+ parts = [x for x in map(int, bits) if x in self._valid_mask_octets]
+ except ValueError:
+ return False
+ if len(parts) != len(bits):
+ return False
+ if parts[0] < parts[-1]:
+ return True
+ return False
+
+ def _reverse_pointer(self):
+ """Return the reverse DNS pointer name for the IPv4 address.
+
+ This implements the method described in RFC1035 3.5.
+
+ """
+ reverse_octets = _compat_str(self).split('.')[::-1]
+ return '.'.join(reverse_octets) + '.in-addr.arpa'
+
+ @property
+ def max_prefixlen(self):
+ return self._max_prefixlen
+
+ @property
+ def version(self):
+ return self._version
+
+
+class IPv4Address(_BaseV4, _BaseAddress):
+
+ """Represent and manipulate single IPv4 Addresses."""
+
+ __slots__ = ('_ip', '__weakref__')
+
+ def __init__(self, address):
+
+ """
+ Args:
+ address: A string or integer representing the IP
+
+ Additionally, an integer can be passed, so
+ IPv4Address('192.0.2.1') == IPv4Address(3221225985).
+ or, more generally
+ IPv4Address(int(IPv4Address('192.0.2.1'))) ==
+ IPv4Address('192.0.2.1')
+
+ Raises:
+ AddressValueError: If ipaddress isn't a valid IPv4 address.
+
+ """
+ # Efficient constructor from integer.
+ if isinstance(address, _compat_int_types):
+ self._check_int_address(address)
+ self._ip = address
+ return
+
+ # Constructing from a packed address
+ if isinstance(address, bytes):
+ self._check_packed_address(address, 4)
+ bvs = _compat_bytes_to_byte_vals(address)
+ self._ip = _compat_int_from_byte_vals(bvs, 'big')
+ return
+
+ # Assume input argument to be string or any object representation
+ # which converts into a formatted IP string.
+ addr_str = _compat_str(address)
+ if '/' in addr_str:
+ raise AddressValueError("Unexpected '/' in %r" % address)
+ self._ip = self._ip_int_from_string(addr_str)
+
+ @property
+ def packed(self):
+ """The binary representation of this address."""
+ return v4_int_to_packed(self._ip)
+
+ @property
+ def is_reserved(self):
+ """Test if the address is otherwise IETF reserved.
+
+ Returns:
+ A boolean, True if the address is within the
+ reserved IPv4 Network range.
+
+ """
+ return self in self._constants._reserved_network
+
+ @property
+ def is_private(self):
+ """Test if this address is allocated for private networks.
+
+ Returns:
+ A boolean, True if the address is reserved per
+ iana-ipv4-special-registry.
+
+ """
+ return any(self in net for net in self._constants._private_networks)
+
+ @property
+ def is_global(self):
+ return (
+ self not in self._constants._public_network and
+ not self.is_private)
+
+ @property
+ def is_multicast(self):
+ """Test if the address is reserved for multicast use.
+
+ Returns:
+ A boolean, True if the address is multicast.
+ See RFC 3171 for details.
+
+ """
+ return self in self._constants._multicast_network
+
+ @property
+ def is_unspecified(self):
+ """Test if the address is unspecified.
+
+ Returns:
+ A boolean, True if this is the unspecified address as defined in
+ RFC 5735 3.
+
+ """
+ return self == self._constants._unspecified_address
+
+ @property
+ def is_loopback(self):
+ """Test if the address is a loopback address.
+
+ Returns:
+ A boolean, True if the address is a loopback per RFC 3330.
+
+ """
+ return self in self._constants._loopback_network
+
+ @property
+ def is_link_local(self):
+ """Test if the address is reserved for link-local.
+
+ Returns:
+ A boolean, True if the address is link-local per RFC 3927.
+
+ """
+ return self in self._constants._linklocal_network
+
+
+class IPv4Interface(IPv4Address):
+
+ def __init__(self, address):
+ if isinstance(address, (bytes, _compat_int_types)):
+ IPv4Address.__init__(self, address)
+ self.network = IPv4Network(self._ip)
+ self._prefixlen = self._max_prefixlen
+ return
+
+ if isinstance(address, tuple):
+ IPv4Address.__init__(self, address[0])
+ if len(address) > 1:
+ self._prefixlen = int(address[1])
+ else:
+ self._prefixlen = self._max_prefixlen
+
+ self.network = IPv4Network(address, strict=False)
+ self.netmask = self.network.netmask
+ self.hostmask = self.network.hostmask
+ return
+
+ addr = _split_optional_netmask(address)
+ IPv4Address.__init__(self, addr[0])
+
+ self.network = IPv4Network(address, strict=False)
+ self._prefixlen = self.network._prefixlen
+
+ self.netmask = self.network.netmask
+ self.hostmask = self.network.hostmask
+
+ def __str__(self):
+ return '%s/%d' % (self._string_from_ip_int(self._ip),
+ self.network.prefixlen)
+
+ def __eq__(self, other):
+ address_equal = IPv4Address.__eq__(self, other)
+ if not address_equal or address_equal is NotImplemented:
+ return address_equal
+ try:
+ return self.network == other.network
+ except AttributeError:
+ # An interface with an associated network is NOT the
+ # same as an unassociated address. That's why the hash
+ # takes the extra info into account.
+ return False
+
+ def __lt__(self, other):
+ address_less = IPv4Address.__lt__(self, other)
+ if address_less is NotImplemented:
+ return NotImplemented
+ try:
+ return (self.network < other.network or
+ self.network == other.network and address_less)
+ except AttributeError:
+ # We *do* allow addresses and interfaces to be sorted. The
+ # unassociated address is considered less than all interfaces.
+ return False
+
+ def __hash__(self):
+ return self._ip ^ self._prefixlen ^ int(self.network.network_address)
+
+ __reduce__ = _IPAddressBase.__reduce__
+
+ @property
+ def ip(self):
+ return IPv4Address(self._ip)
+
+ @property
+ def with_prefixlen(self):
+ return '%s/%s' % (self._string_from_ip_int(self._ip),
+ self._prefixlen)
+
+ @property
+ def with_netmask(self):
+ return '%s/%s' % (self._string_from_ip_int(self._ip),
+ self.netmask)
+
+ @property
+ def with_hostmask(self):
+ return '%s/%s' % (self._string_from_ip_int(self._ip),
+ self.hostmask)
+
+
+class IPv4Network(_BaseV4, _BaseNetwork):
+
+ """This class represents and manipulates 32-bit IPv4 network + addresses..
+
+ Attributes: [examples for IPv4Network('192.0.2.0/27')]
+ .network_address: IPv4Address('192.0.2.0')
+ .hostmask: IPv4Address('0.0.0.31')
+ .broadcast_address: IPv4Address('192.0.2.32')
+ .netmask: IPv4Address('255.255.255.224')
+ .prefixlen: 27
+
+ """
+ # Class to use when creating address objects
+ _address_class = IPv4Address
+
+ def __init__(self, address, strict=True):
+
+ """Instantiate a new IPv4 network object.
+
+ Args:
+ address: A string or integer representing the IP [& network].
+ '192.0.2.0/24'
+ '192.0.2.0/255.255.255.0'
+ '192.0.0.2/0.0.0.255'
+ are all functionally the same in IPv4. Similarly,
+ '192.0.2.1'
+ '192.0.2.1/255.255.255.255'
+ '192.0.2.1/32'
+ are also functionally equivalent. That is to say, failing to
+ provide a subnetmask will create an object with a mask of /32.
+
+ If the mask (portion after the / in the argument) is given in
+ dotted quad form, it is treated as a netmask if it starts with a
+ non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
+ starts with a zero field (e.g. 0.255.255.255 == /8), with the
+ single exception of an all-zero mask which is treated as a
+ netmask == /0. If no mask is given, a default of /32 is used.
+
+ Additionally, an integer can be passed, so
+ IPv4Network('192.0.2.1') == IPv4Network(3221225985)
+ or, more generally
+ IPv4Interface(int(IPv4Interface('192.0.2.1'))) ==
+ IPv4Interface('192.0.2.1')
+
+ Raises:
+ AddressValueError: If ipaddress isn't a valid IPv4 address.
+ NetmaskValueError: If the netmask isn't valid for
+ an IPv4 address.
+ ValueError: If strict is True and a network address is not
+ supplied.
+
+ """
+ _BaseNetwork.__init__(self, address)
+
+ # Constructing from a packed address or integer
+ if isinstance(address, (_compat_int_types, bytes)):
+ self.network_address = IPv4Address(address)
+ self.netmask, self._prefixlen = self._make_netmask(
+ self._max_prefixlen)
+ # fixme: address/network test here.
+ return
+
+ if isinstance(address, tuple):
+ if len(address) > 1:
+ arg = address[1]
+ else:
+ # We weren't given an address[1]
+ arg = self._max_prefixlen
+ self.network_address = IPv4Address(address[0])
+ self.netmask, self._prefixlen = self._make_netmask(arg)
+ packed = int(self.network_address)
+ if packed & int(self.netmask) != packed:
+ if strict:
+ raise ValueError('%s has host bits set' % self)
+ else:
+ self.network_address = IPv4Address(packed &
+ int(self.netmask))
+ return
+
+ # Assume input argument to be string or any object representation
+ # which converts into a formatted IP prefix string.
+ addr = _split_optional_netmask(address)
+ self.network_address = IPv4Address(self._ip_int_from_string(addr[0]))
+
+ if len(addr) == 2:
+ arg = addr[1]
+ else:
+ arg = self._max_prefixlen
+ self.netmask, self._prefixlen = self._make_netmask(arg)
+
+ if strict:
+ if (IPv4Address(int(self.network_address) & int(self.netmask)) !=
+ self.network_address):
+ raise ValueError('%s has host bits set' % self)
+ self.network_address = IPv4Address(int(self.network_address) &
+ int(self.netmask))
+
+ if self._prefixlen == (self._max_prefixlen - 1):
+ self.hosts = self.__iter__
+
+ @property
+ def is_global(self):
+ """Test if this address is allocated for public networks.
+
+ Returns:
+ A boolean, True if the address is not reserved per
+ iana-ipv4-special-registry.
+
+ """
+ return (not (self.network_address in IPv4Network('100.64.0.0/10') and
+ self.broadcast_address in IPv4Network('100.64.0.0/10')) and
+ not self.is_private)
+
+
+class _IPv4Constants(object):
+
+ _linklocal_network = IPv4Network('169.254.0.0/16')
+
+ _loopback_network = IPv4Network('127.0.0.0/8')
+
+ _multicast_network = IPv4Network('224.0.0.0/4')
+
+ _public_network = IPv4Network('100.64.0.0/10')
+
+ _private_networks = [
+ IPv4Network('0.0.0.0/8'),
+ IPv4Network('10.0.0.0/8'),
+ IPv4Network('127.0.0.0/8'),
+ IPv4Network('169.254.0.0/16'),
+ IPv4Network('172.16.0.0/12'),
+ IPv4Network('192.0.0.0/29'),
+ IPv4Network('192.0.0.170/31'),
+ IPv4Network('192.0.2.0/24'),
+ IPv4Network('192.168.0.0/16'),
+ IPv4Network('198.18.0.0/15'),
+ IPv4Network('198.51.100.0/24'),
+ IPv4Network('203.0.113.0/24'),
+ IPv4Network('240.0.0.0/4'),
+ IPv4Network('255.255.255.255/32'),
+ ]
+
+ _reserved_network = IPv4Network('240.0.0.0/4')
+
+ _unspecified_address = IPv4Address('0.0.0.0')
+
+
+IPv4Address._constants = _IPv4Constants
+
+
+class _BaseV6(object):
+
+ """Base IPv6 object.
+
+ The following methods are used by IPv6 objects in both single IP
+ addresses and networks.
+
+ """
+
+ __slots__ = ()
+ _version = 6
+ _ALL_ONES = (2 ** IPV6LENGTH) - 1
+ _HEXTET_COUNT = 8
+ _HEX_DIGITS = frozenset('0123456789ABCDEFabcdef')
+ _max_prefixlen = IPV6LENGTH
+
+ # There are only a bunch of valid v6 netmasks, so we cache them all
+ # when constructed (see _make_netmask()).
+ _netmask_cache = {}
+
+ @classmethod
+ def _make_netmask(cls, arg):
+ """Make a (netmask, prefix_len) tuple from the given argument.
+
+ Argument can be:
+ - an integer (the prefix length)
+ - a string representing the prefix length (e.g. "24")
+ - a string representing the prefix netmask (e.g. "255.255.255.0")
+ """
+ if arg not in cls._netmask_cache:
+ if isinstance(arg, _compat_int_types):
+ prefixlen = arg
+ else:
+ prefixlen = cls._prefix_from_prefix_string(arg)
+ netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen))
+ cls._netmask_cache[arg] = netmask, prefixlen
+ return cls._netmask_cache[arg]
+
+ @classmethod
+ def _ip_int_from_string(cls, ip_str):
+ """Turn an IPv6 ip_str into an integer.
+
+ Args:
+ ip_str: A string, the IPv6 ip_str.
+
+ Returns:
+ An int, the IPv6 address
+
+ Raises:
+ AddressValueError: if ip_str isn't a valid IPv6 Address.
+
+ """
+ if not ip_str:
+ raise AddressValueError('Address cannot be empty')
+
+ parts = ip_str.split(':')
+
+ # An IPv6 address needs at least 2 colons (3 parts).
+ _min_parts = 3
+ if len(parts) < _min_parts:
+ msg = "At least %d parts expected in %r" % (_min_parts, ip_str)
+ raise AddressValueError(msg)
+
+ # If the address has an IPv4-style suffix, convert it to hexadecimal.
+ if '.' in parts[-1]:
+ try:
+ ipv4_int = IPv4Address(parts.pop())._ip
+ except AddressValueError as exc:
+ raise AddressValueError("%s in %r" % (exc, ip_str))
+ parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF))
+ parts.append('%x' % (ipv4_int & 0xFFFF))
+
+ # An IPv6 address can't have more than 8 colons (9 parts).
+ # The extra colon comes from using the "::" notation for a single
+ # leading or trailing zero part.
+ _max_parts = cls._HEXTET_COUNT + 1
+ if len(parts) > _max_parts:
+ msg = "At most %d colons permitted in %r" % (
+ _max_parts - 1, ip_str)
+ raise AddressValueError(msg)
+
+ # Disregarding the endpoints, find '::' with nothing in between.
+ # This indicates that a run of zeroes has been skipped.
+ skip_index = None
+ for i in _compat_range(1, len(parts) - 1):
+ if not parts[i]:
+ if skip_index is not None:
+ # Can't have more than one '::'
+ msg = "At most one '::' permitted in %r" % ip_str
+ raise AddressValueError(msg)
+ skip_index = i
+
+ # parts_hi is the number of parts to copy from above/before the '::'
+ # parts_lo is the number of parts to copy from below/after the '::'
+ if skip_index is not None:
+ # If we found a '::', then check if it also covers the endpoints.
+ parts_hi = skip_index
+ parts_lo = len(parts) - skip_index - 1
+ if not parts[0]:
+ parts_hi -= 1
+ if parts_hi:
+ msg = "Leading ':' only permitted as part of '::' in %r"
+ raise AddressValueError(msg % ip_str) # ^: requires ^::
+ if not parts[-1]:
+ parts_lo -= 1
+ if parts_lo:
+ msg = "Trailing ':' only permitted as part of '::' in %r"
+ raise AddressValueError(msg % ip_str) # :$ requires ::$
+ parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo)
+ if parts_skipped < 1:
+ msg = "Expected at most %d other parts with '::' in %r"
+ raise AddressValueError(msg % (cls._HEXTET_COUNT - 1, ip_str))
+ else:
+ # Otherwise, allocate the entire address to parts_hi. The
+ # endpoints could still be empty, but _parse_hextet() will check
+ # for that.
+ if len(parts) != cls._HEXTET_COUNT:
+ msg = "Exactly %d parts expected without '::' in %r"
+ raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str))
+ if not parts[0]:
+ msg = "Leading ':' only permitted as part of '::' in %r"
+ raise AddressValueError(msg % ip_str) # ^: requires ^::
+ if not parts[-1]:
+ msg = "Trailing ':' only permitted as part of '::' in %r"
+ raise AddressValueError(msg % ip_str) # :$ requires ::$
+ parts_hi = len(parts)
+ parts_lo = 0
+ parts_skipped = 0
+
+ try:
+ # Now, parse the hextets into a 128-bit integer.
+ ip_int = 0
+ for i in range(parts_hi):
+ ip_int <<= 16
+ ip_int |= cls._parse_hextet(parts[i])
+ ip_int <<= 16 * parts_skipped
+ for i in range(-parts_lo, 0):
+ ip_int <<= 16
+ ip_int |= cls._parse_hextet(parts[i])
+ return ip_int
+ except ValueError as exc:
+ raise AddressValueError("%s in %r" % (exc, ip_str))
+
+ @classmethod
+ def _parse_hextet(cls, hextet_str):
+ """Convert an IPv6 hextet string into an integer.
+
+ Args:
+ hextet_str: A string, the number to parse.
+
+ Returns:
+ The hextet as an integer.
+
+ Raises:
+ ValueError: if the input isn't strictly a hex number from
+ [0..FFFF].
+
+ """
+ # Whitelist the characters, since int() allows a lot of bizarre stuff.
+ if not cls._HEX_DIGITS.issuperset(hextet_str):
+ raise ValueError("Only hex digits permitted in %r" % hextet_str)
+ # We do the length check second, since the invalid character error
+ # is likely to be more informative for the user
+ if len(hextet_str) > 4:
+ msg = "At most 4 characters permitted in %r"
+ raise ValueError(msg % hextet_str)
+ # Length check means we can skip checking the integer value
+ return int(hextet_str, 16)
+
+ @classmethod
+ def _compress_hextets(cls, hextets):
+ """Compresses a list of hextets.
+
+ Compresses a list of strings, replacing the longest continuous
+ sequence of "0" in the list with "" and adding empty strings at
+ the beginning or at the end of the string such that subsequently
+ calling ":".join(hextets) will produce the compressed version of
+ the IPv6 address.
+
+ Args:
+ hextets: A list of strings, the hextets to compress.
+
+ Returns:
+ A list of strings.
+
+ """
+ best_doublecolon_start = -1
+ best_doublecolon_len = 0
+ doublecolon_start = -1
+ doublecolon_len = 0
+ for index, hextet in enumerate(hextets):
+ if hextet == '0':
+ doublecolon_len += 1
+ if doublecolon_start == -1:
+ # Start of a sequence of zeros.
+ doublecolon_start = index
+ if doublecolon_len > best_doublecolon_len:
+ # This is the longest sequence of zeros so far.
+ best_doublecolon_len = doublecolon_len
+ best_doublecolon_start = doublecolon_start
+ else:
+ doublecolon_len = 0
+ doublecolon_start = -1
+
+ if best_doublecolon_len > 1:
+ best_doublecolon_end = (best_doublecolon_start +
+ best_doublecolon_len)
+ # For zeros at the end of the address.
+ if best_doublecolon_end == len(hextets):
+ hextets += ['']
+ hextets[best_doublecolon_start:best_doublecolon_end] = ['']
+ # For zeros at the beginning of the address.
+ if best_doublecolon_start == 0:
+ hextets = [''] + hextets
+
+ return hextets
+
+ @classmethod
+ def _string_from_ip_int(cls, ip_int=None):
+ """Turns a 128-bit integer into hexadecimal notation.
+
+ Args:
+ ip_int: An integer, the IP address.
+
+ Returns:
+ A string, the hexadecimal representation of the address.
+
+ Raises:
+ ValueError: The address is bigger than 128 bits of all ones.
+
+ """
+ if ip_int is None:
+ ip_int = int(cls._ip)
+
+ if ip_int > cls._ALL_ONES:
+ raise ValueError('IPv6 address is too large')
+
+ hex_str = '%032x' % ip_int
+ hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)]
+
+ hextets = cls._compress_hextets(hextets)
+ return ':'.join(hextets)
+
+ def _explode_shorthand_ip_string(self):
+ """Expand a shortened IPv6 address.
+
+ Args:
+ ip_str: A string, the IPv6 address.
+
+ Returns:
+ A string, the expanded IPv6 address.
+
+ """
+ if isinstance(self, IPv6Network):
+ ip_str = _compat_str(self.network_address)
+ elif isinstance(self, IPv6Interface):
+ ip_str = _compat_str(self.ip)
+ else:
+ ip_str = _compat_str(self)
+
+ ip_int = self._ip_int_from_string(ip_str)
+ hex_str = '%032x' % ip_int
+ parts = [hex_str[x:x + 4] for x in range(0, 32, 4)]
+ if isinstance(self, (_BaseNetwork, IPv6Interface)):
+ return '%s/%d' % (':'.join(parts), self._prefixlen)
+ return ':'.join(parts)
+
+ def _reverse_pointer(self):
+ """Return the reverse DNS pointer name for the IPv6 address.
+
+ This implements the method described in RFC3596 2.5.
+
+ """
+ reverse_chars = self.exploded[::-1].replace(':', '')
+ return '.'.join(reverse_chars) + '.ip6.arpa'
+
+ @property
+ def max_prefixlen(self):
+ return self._max_prefixlen
+
+ @property
+ def version(self):
+ return self._version
+
+
+class IPv6Address(_BaseV6, _BaseAddress):
+
+ """Represent and manipulate single IPv6 Addresses."""
+
+ __slots__ = ('_ip', '__weakref__')
+
+ def __init__(self, address):
+ """Instantiate a new IPv6 address object.
+
+ Args:
+ address: A string or integer representing the IP
+
+ Additionally, an integer can be passed, so
+ IPv6Address('2001:db8::') ==
+ IPv6Address(42540766411282592856903984951653826560)
+ or, more generally
+ IPv6Address(int(IPv6Address('2001:db8::'))) ==
+ IPv6Address('2001:db8::')
+
+ Raises:
+ AddressValueError: If address isn't a valid IPv6 address.
+
+ """
+ # Efficient constructor from integer.
+ if isinstance(address, _compat_int_types):
+ self._check_int_address(address)
+ self._ip = address
+ return
+
+ # Constructing from a packed address
+ if isinstance(address, bytes):
+ self._check_packed_address(address, 16)
+ bvs = _compat_bytes_to_byte_vals(address)
+ self._ip = _compat_int_from_byte_vals(bvs, 'big')
+ return
+
+ # Assume input argument to be string or any object representation
+ # which converts into a formatted IP string.
+ addr_str = _compat_str(address)
+ if '/' in addr_str:
+ raise AddressValueError("Unexpected '/' in %r" % address)
+ self._ip = self._ip_int_from_string(addr_str)
+
+ @property
+ def packed(self):
+ """The binary representation of this address."""
+ return v6_int_to_packed(self._ip)
+
+ @property
+ def is_multicast(self):
+ """Test if the address is reserved for multicast use.
+
+ Returns:
+ A boolean, True if the address is a multicast address.
+ See RFC 2373 2.7 for details.
+
+ """
+ return self in self._constants._multicast_network
+
+ @property
+ def is_reserved(self):
+ """Test if the address is otherwise IETF reserved.
+
+ Returns:
+ A boolean, True if the address is within one of the
+ reserved IPv6 Network ranges.
+
+ """
+ return any(self in x for x in self._constants._reserved_networks)
+
+ @property
+ def is_link_local(self):
+ """Test if the address is reserved for link-local.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 4291.
+
+ """
+ return self in self._constants._linklocal_network
+
+ @property
+ def is_site_local(self):
+ """Test if the address is reserved for site-local.
+
+ Note that the site-local address space has been deprecated by RFC 3879.
+ Use is_private to test if this address is in the space of unique local
+ addresses as defined by RFC 4193.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 3513 2.5.6.
+
+ """
+ return self in self._constants._sitelocal_network
+
+ @property
+ def is_private(self):
+ """Test if this address is allocated for private networks.
+
+ Returns:
+ A boolean, True if the address is reserved per
+ iana-ipv6-special-registry.
+
+ """
+ return any(self in net for net in self._constants._private_networks)
+
+ @property
+ def is_global(self):
+ """Test if this address is allocated for public networks.
+
+ Returns:
+ A boolean, true if the address is not reserved per
+ iana-ipv6-special-registry.
+
+ """
+ return not self.is_private
+
+ @property
+ def is_unspecified(self):
+ """Test if the address is unspecified.
+
+ Returns:
+ A boolean, True if this is the unspecified address as defined in
+ RFC 2373 2.5.2.
+
+ """
+ return self._ip == 0
+
+ @property
+ def is_loopback(self):
+ """Test if the address is a loopback address.
+
+ Returns:
+ A boolean, True if the address is a loopback address as defined in
+ RFC 2373 2.5.3.
+
+ """
+ return self._ip == 1
+
+ @property
+ def ipv4_mapped(self):
+ """Return the IPv4 mapped address.
+
+ Returns:
+ If the IPv6 address is a v4 mapped address, return the
+ IPv4 mapped address. Return None otherwise.
+
+ """
+ if (self._ip >> 32) != 0xFFFF:
+ return None
+ return IPv4Address(self._ip & 0xFFFFFFFF)
+
+ @property
+ def teredo(self):
+ """Tuple of embedded teredo IPs.
+
+ Returns:
+ Tuple of the (server, client) IPs or None if the address
+ doesn't appear to be a teredo address (doesn't start with
+ 2001::/32)
+
+ """
+ if (self._ip >> 96) != 0x20010000:
+ return None
+ return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
+ IPv4Address(~self._ip & 0xFFFFFFFF))
+
+ @property
+ def sixtofour(self):
+ """Return the IPv4 6to4 embedded address.
+
+ Returns:
+ The IPv4 6to4-embedded address if present or None if the
+ address doesn't appear to contain a 6to4 embedded address.
+
+ """
+ if (self._ip >> 112) != 0x2002:
+ return None
+ return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
+
+
+class IPv6Interface(IPv6Address):
+
+ def __init__(self, address):
+ if isinstance(address, (bytes, _compat_int_types)):
+ IPv6Address.__init__(self, address)
+ self.network = IPv6Network(self._ip)
+ self._prefixlen = self._max_prefixlen
+ return
+ if isinstance(address, tuple):
+ IPv6Address.__init__(self, address[0])
+ if len(address) > 1:
+ self._prefixlen = int(address[1])
+ else:
+ self._prefixlen = self._max_prefixlen
+ self.network = IPv6Network(address, strict=False)
+ self.netmask = self.network.netmask
+ self.hostmask = self.network.hostmask
+ return
+
+ addr = _split_optional_netmask(address)
+ IPv6Address.__init__(self, addr[0])
+ self.network = IPv6Network(address, strict=False)
+ self.netmask = self.network.netmask
+ self._prefixlen = self.network._prefixlen
+ self.hostmask = self.network.hostmask
+
+ def __str__(self):
+ return '%s/%d' % (self._string_from_ip_int(self._ip),
+ self.network.prefixlen)
+
+ def __eq__(self, other):
+ address_equal = IPv6Address.__eq__(self, other)
+ if not address_equal or address_equal is NotImplemented:
+ return address_equal
+ try:
+ return self.network == other.network
+ except AttributeError:
+ # An interface with an associated network is NOT the
+ # same as an unassociated address. That's why the hash
+ # takes the extra info into account.
+ return False
+
+ def __lt__(self, other):
+ address_less = IPv6Address.__lt__(self, other)
+ if address_less is NotImplemented:
+ return NotImplemented
+ try:
+ return (self.network < other.network or
+ self.network == other.network and address_less)
+ except AttributeError:
+ # We *do* allow addresses and interfaces to be sorted. The
+ # unassociated address is considered less than all interfaces.
+ return False
+
+ def __hash__(self):
+ return self._ip ^ self._prefixlen ^ int(self.network.network_address)
+
+ __reduce__ = _IPAddressBase.__reduce__
+
+ @property
+ def ip(self):
+ return IPv6Address(self._ip)
+
+ @property
+ def with_prefixlen(self):
+ return '%s/%s' % (self._string_from_ip_int(self._ip),
+ self._prefixlen)
+
+ @property
+ def with_netmask(self):
+ return '%s/%s' % (self._string_from_ip_int(self._ip),
+ self.netmask)
+
+ @property
+ def with_hostmask(self):
+ return '%s/%s' % (self._string_from_ip_int(self._ip),
+ self.hostmask)
+
+ @property
+ def is_unspecified(self):
+ return self._ip == 0 and self.network.is_unspecified
+
+ @property
+ def is_loopback(self):
+ return self._ip == 1 and self.network.is_loopback
+
+
+class IPv6Network(_BaseV6, _BaseNetwork):
+
+ """This class represents and manipulates 128-bit IPv6 networks.
+
+ Attributes: [examples for IPv6('2001:db8::1000/124')]
+ .network_address: IPv6Address('2001:db8::1000')
+ .hostmask: IPv6Address('::f')
+ .broadcast_address: IPv6Address('2001:db8::100f')
+ .netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0')
+ .prefixlen: 124
+
+ """
+
+ # Class to use when creating address objects
+ _address_class = IPv6Address
+
+ def __init__(self, address, strict=True):
+ """Instantiate a new IPv6 Network object.
+
+ Args:
+ address: A string or integer representing the IPv6 network or the
+ IP and prefix/netmask.
+ '2001:db8::/128'
+ '2001:db8:0000:0000:0000:0000:0000:0000/128'
+ '2001:db8::'
+ are all functionally the same in IPv6. That is to say,
+ failing to provide a subnetmask will create an object with
+ a mask of /128.
+
+ Additionally, an integer can be passed, so
+ IPv6Network('2001:db8::') ==
+ IPv6Network(42540766411282592856903984951653826560)
+ or, more generally
+ IPv6Network(int(IPv6Network('2001:db8::'))) ==
+ IPv6Network('2001:db8::')
+
+ strict: A boolean. If true, ensure that we have been passed
+ A true network address, eg, 2001:db8::1000/124 and not an
+ IP address on a network, eg, 2001:db8::1/124.
+
+ Raises:
+ AddressValueError: If address isn't a valid IPv6 address.
+ NetmaskValueError: If the netmask isn't valid for
+ an IPv6 address.
+ ValueError: If strict was True and a network address was not
+ supplied.
+
+ """
+ _BaseNetwork.__init__(self, address)
+
+ # Efficient constructor from integer or packed address
+ if isinstance(address, (bytes, _compat_int_types)):
+ self.network_address = IPv6Address(address)
+ self.netmask, self._prefixlen = self._make_netmask(
+ self._max_prefixlen)
+ return
+
+ if isinstance(address, tuple):
+ if len(address) > 1:
+ arg = address[1]
+ else:
+ arg = self._max_prefixlen
+ self.netmask, self._prefixlen = self._make_netmask(arg)
+ self.network_address = IPv6Address(address[0])
+ packed = int(self.network_address)
+ if packed & int(self.netmask) != packed:
+ if strict:
+ raise ValueError('%s has host bits set' % self)
+ else:
+ self.network_address = IPv6Address(packed &
+ int(self.netmask))
+ return
+
+ # Assume input argument to be string or any object representation
+ # which converts into a formatted IP prefix string.
+ addr = _split_optional_netmask(address)
+
+ self.network_address = IPv6Address(self._ip_int_from_string(addr[0]))
+
+ if len(addr) == 2:
+ arg = addr[1]
+ else:
+ arg = self._max_prefixlen
+ self.netmask, self._prefixlen = self._make_netmask(arg)
+
+ if strict:
+ if (IPv6Address(int(self.network_address) & int(self.netmask)) !=
+ self.network_address):
+ raise ValueError('%s has host bits set' % self)
+ self.network_address = IPv6Address(int(self.network_address) &
+ int(self.netmask))
+
+ if self._prefixlen == (self._max_prefixlen - 1):
+ self.hosts = self.__iter__
+
+ def hosts(self):
+ """Generate Iterator over usable hosts in a network.
+
+ This is like __iter__ except it doesn't return the
+ Subnet-Router anycast address.
+
+ """
+ network = int(self.network_address)
+ broadcast = int(self.broadcast_address)
+ for x in _compat_range(network + 1, broadcast + 1):
+ yield self._address_class(x)
+
+ @property
+ def is_site_local(self):
+ """Test if the address is reserved for site-local.
+
+ Note that the site-local address space has been deprecated by RFC 3879.
+ Use is_private to test if this address is in the space of unique local
+ addresses as defined by RFC 4193.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 3513 2.5.6.
+
+ """
+ return (self.network_address.is_site_local and
+ self.broadcast_address.is_site_local)
+
+
+class _IPv6Constants(object):
+
+ _linklocal_network = IPv6Network('fe80::/10')
+
+ _multicast_network = IPv6Network('ff00::/8')
+
+ _private_networks = [
+ IPv6Network('::1/128'),
+ IPv6Network('::/128'),
+ IPv6Network('::ffff:0:0/96'),
+ IPv6Network('100::/64'),
+ IPv6Network('2001::/23'),
+ IPv6Network('2001:2::/48'),
+ IPv6Network('2001:db8::/32'),
+ IPv6Network('2001:10::/28'),
+ IPv6Network('fc00::/7'),
+ IPv6Network('fe80::/10'),
+ ]
+
+ _reserved_networks = [
+ IPv6Network('::/8'), IPv6Network('100::/8'),
+ IPv6Network('200::/7'), IPv6Network('400::/6'),
+ IPv6Network('800::/5'), IPv6Network('1000::/4'),
+ IPv6Network('4000::/3'), IPv6Network('6000::/3'),
+ IPv6Network('8000::/3'), IPv6Network('A000::/3'),
+ IPv6Network('C000::/3'), IPv6Network('E000::/4'),
+ IPv6Network('F000::/5'), IPv6Network('F800::/6'),
+ IPv6Network('FE00::/9'),
+ ]
+
+ _sitelocal_network = IPv6Network('fec0::/10')
+
+
+IPv6Address._constants = _IPv6Constants
diff --git a/test/support/integration/plugins/module_utils/crypto.py b/test/support/integration/plugins/module_utils/crypto.py
new file mode 100644
index 00000000..e67eeff1
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/crypto.py
@@ -0,0 +1,2125 @@
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, Yanis Guenane <yanis+ansible@guenane.org>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+# ----------------------------------------------------------------------
+# A clearly marked portion of this file is licensed under the BSD license
+# Copyright (c) 2015, 2016 Paul Kehrer (@reaperhulk)
+# Copyright (c) 2017 Fraser Tweedale (@frasertweedale)
+# For more details, search for the function _obj2txt().
+# ---------------------------------------------------------------------
+# A clearly marked portion of this file is extracted from a project that
+# is licensed under the Apache License 2.0
+# Copyright (c) the OpenSSL contributors
+# For more details, search for the function _OID_MAP.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+import sys
+from distutils.version import LooseVersion
+
+try:
+ import OpenSSL
+ from OpenSSL import crypto
+except ImportError:
+ # An error will be raised in the calling class to let the end
+ # user know that OpenSSL couldn't be found.
+ pass
+
+try:
+ import cryptography
+ from cryptography import x509
+ from cryptography.hazmat.backends import default_backend as cryptography_backend
+ from cryptography.hazmat.primitives.serialization import load_pem_private_key
+ from cryptography.hazmat.primitives import hashes
+ from cryptography.hazmat.primitives import serialization
+ import ipaddress
+
+ # Older versions of cryptography (< 2.1) do not have __hash__ functions for
+ # general name objects (DNSName, IPAddress, ...), while providing overloaded
+ # equality and string representation operations. This makes it impossible to
+ # use them in hash-based data structures such as set or dict. Since we are
+ # actually doing that in openssl_certificate, and potentially in other code,
+ # we need to monkey-patch __hash__ for these classes to make sure our code
+ # works fine.
+ if LooseVersion(cryptography.__version__) < LooseVersion('2.1'):
+ # A very simply hash function which relies on the representation
+ # of an object to be implemented. This is the case since at least
+ # cryptography 1.0, see
+ # https://github.com/pyca/cryptography/commit/7a9abce4bff36c05d26d8d2680303a6f64a0e84f
+ def simple_hash(self):
+ return hash(repr(self))
+
+ # The hash functions for the following types were added for cryptography 2.1:
+ # https://github.com/pyca/cryptography/commit/fbfc36da2a4769045f2373b004ddf0aff906cf38
+ x509.DNSName.__hash__ = simple_hash
+ x509.DirectoryName.__hash__ = simple_hash
+ x509.GeneralName.__hash__ = simple_hash
+ x509.IPAddress.__hash__ = simple_hash
+ x509.OtherName.__hash__ = simple_hash
+ x509.RegisteredID.__hash__ = simple_hash
+
+ if LooseVersion(cryptography.__version__) < LooseVersion('1.2'):
+ # The hash functions for the following types were added for cryptography 1.2:
+ # https://github.com/pyca/cryptography/commit/b642deed88a8696e5f01ce6855ccf89985fc35d0
+ # https://github.com/pyca/cryptography/commit/d1b5681f6db2bde7a14625538bd7907b08dfb486
+ x509.RFC822Name.__hash__ = simple_hash
+ x509.UniformResourceIdentifier.__hash__ = simple_hash
+
+ # Test whether we have support for X25519, X448, Ed25519 and/or Ed448
+ try:
+ import cryptography.hazmat.primitives.asymmetric.x25519
+ CRYPTOGRAPHY_HAS_X25519 = True
+ try:
+ cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.private_bytes
+ CRYPTOGRAPHY_HAS_X25519_FULL = True
+ except AttributeError:
+ CRYPTOGRAPHY_HAS_X25519_FULL = False
+ except ImportError:
+ CRYPTOGRAPHY_HAS_X25519 = False
+ CRYPTOGRAPHY_HAS_X25519_FULL = False
+ try:
+ import cryptography.hazmat.primitives.asymmetric.x448
+ CRYPTOGRAPHY_HAS_X448 = True
+ except ImportError:
+ CRYPTOGRAPHY_HAS_X448 = False
+ try:
+ import cryptography.hazmat.primitives.asymmetric.ed25519
+ CRYPTOGRAPHY_HAS_ED25519 = True
+ except ImportError:
+ CRYPTOGRAPHY_HAS_ED25519 = False
+ try:
+ import cryptography.hazmat.primitives.asymmetric.ed448
+ CRYPTOGRAPHY_HAS_ED448 = True
+ except ImportError:
+ CRYPTOGRAPHY_HAS_ED448 = False
+
+ HAS_CRYPTOGRAPHY = True
+except ImportError:
+ # Error handled in the calling module.
+ CRYPTOGRAPHY_HAS_X25519 = False
+ CRYPTOGRAPHY_HAS_X25519_FULL = False
+ CRYPTOGRAPHY_HAS_X448 = False
+ CRYPTOGRAPHY_HAS_ED25519 = False
+ CRYPTOGRAPHY_HAS_ED448 = False
+ HAS_CRYPTOGRAPHY = False
+
+
+import abc
+import base64
+import binascii
+import datetime
+import errno
+import hashlib
+import os
+import re
+import tempfile
+
+from ansible.module_utils import six
+from ansible.module_utils._text import to_native, to_bytes, to_text
+
+
+class OpenSSLObjectError(Exception):
+ pass
+
+
+class OpenSSLBadPassphraseError(OpenSSLObjectError):
+ pass
+
+
+def get_fingerprint_of_bytes(source):
+ """Generate the fingerprint of the given bytes."""
+
+ fingerprint = {}
+
+ try:
+ algorithms = hashlib.algorithms
+ except AttributeError:
+ try:
+ algorithms = hashlib.algorithms_guaranteed
+ except AttributeError:
+ return None
+
+ for algo in algorithms:
+ f = getattr(hashlib, algo)
+ try:
+ h = f(source)
+ except ValueError:
+ # This can happen for hash algorithms not supported in FIPS mode
+ # (https://github.com/ansible/ansible/issues/67213)
+ continue
+ try:
+ # Certain hash functions have a hexdigest() which expects a length parameter
+ pubkey_digest = h.hexdigest()
+ except TypeError:
+ pubkey_digest = h.hexdigest(32)
+ fingerprint[algo] = ':'.join(pubkey_digest[i:i + 2] for i in range(0, len(pubkey_digest), 2))
+
+ return fingerprint
+
+
+def get_fingerprint(path, passphrase=None, content=None, backend='pyopenssl'):
+ """Generate the fingerprint of the public key. """
+
+ privatekey = load_privatekey(path, passphrase=passphrase, content=content, check_passphrase=False, backend=backend)
+
+ if backend == 'pyopenssl':
+ try:
+ publickey = crypto.dump_publickey(crypto.FILETYPE_ASN1, privatekey)
+ except AttributeError:
+ # If PyOpenSSL < 16.0 crypto.dump_publickey() will fail.
+ try:
+ bio = crypto._new_mem_buf()
+ rc = crypto._lib.i2d_PUBKEY_bio(bio, privatekey._pkey)
+ if rc != 1:
+ crypto._raise_current_error()
+ publickey = crypto._bio_to_string(bio)
+ except AttributeError:
+ # By doing this we prevent the code from raising an error
+ # yet we return no value in the fingerprint hash.
+ return None
+ elif backend == 'cryptography':
+ publickey = privatekey.public_key().public_bytes(
+ serialization.Encoding.DER,
+ serialization.PublicFormat.SubjectPublicKeyInfo
+ )
+
+ return get_fingerprint_of_bytes(publickey)
+
+
+def load_file_if_exists(path, module=None, ignore_errors=False):
+ try:
+ with open(path, 'rb') as f:
+ return f.read()
+ except EnvironmentError as exc:
+ if exc.errno == errno.ENOENT:
+ return None
+ if ignore_errors:
+ return None
+ if module is None:
+ raise
+ module.fail_json('Error while loading {0} - {1}'.format(path, str(exc)))
+ except Exception as exc:
+ if ignore_errors:
+ return None
+ if module is None:
+ raise
+ module.fail_json('Error while loading {0} - {1}'.format(path, str(exc)))
+
+
+def load_privatekey(path, passphrase=None, check_passphrase=True, content=None, backend='pyopenssl'):
+ """Load the specified OpenSSL private key.
+
+ The content can also be specified via content; in that case,
+ this function will not load the key from disk.
+ """
+
+ try:
+ if content is None:
+ with open(path, 'rb') as b_priv_key_fh:
+ priv_key_detail = b_priv_key_fh.read()
+ else:
+ priv_key_detail = content
+
+ if backend == 'pyopenssl':
+
+ # First try: try to load with real passphrase (resp. empty string)
+ # Will work if this is the correct passphrase, or the key is not
+ # password-protected.
+ try:
+ result = crypto.load_privatekey(crypto.FILETYPE_PEM,
+ priv_key_detail,
+ to_bytes(passphrase or ''))
+ except crypto.Error as e:
+ if len(e.args) > 0 and len(e.args[0]) > 0:
+ if e.args[0][0][2] in ('bad decrypt', 'bad password read'):
+ # This happens in case we have the wrong passphrase.
+ if passphrase is not None:
+ raise OpenSSLBadPassphraseError('Wrong passphrase provided for private key!')
+ else:
+ raise OpenSSLBadPassphraseError('No passphrase provided, but private key is password-protected!')
+ raise OpenSSLObjectError('Error while deserializing key: {0}'.format(e))
+ if check_passphrase:
+ # Next we want to make sure that the key is actually protected by
+ # a passphrase (in case we did try the empty string before, make
+ # sure that the key is not protected by the empty string)
+ try:
+ crypto.load_privatekey(crypto.FILETYPE_PEM,
+ priv_key_detail,
+ to_bytes('y' if passphrase == 'x' else 'x'))
+ if passphrase is not None:
+ # Since we can load the key without an exception, the
+ # key isn't password-protected
+ raise OpenSSLBadPassphraseError('Passphrase provided, but private key is not password-protected!')
+ except crypto.Error as e:
+ if passphrase is None and len(e.args) > 0 and len(e.args[0]) > 0:
+ if e.args[0][0][2] in ('bad decrypt', 'bad password read'):
+ # The key is obviously protected by the empty string.
+ # Don't do this at home (if it's possible at all)...
+ raise OpenSSLBadPassphraseError('No passphrase provided, but private key is password-protected!')
+ elif backend == 'cryptography':
+ try:
+ result = load_pem_private_key(priv_key_detail,
+ None if passphrase is None else to_bytes(passphrase),
+ cryptography_backend())
+ except TypeError as dummy:
+ raise OpenSSLBadPassphraseError('Wrong or empty passphrase provided for private key')
+ except ValueError as dummy:
+ raise OpenSSLBadPassphraseError('Wrong passphrase provided for private key')
+
+ return result
+ except (IOError, OSError) as exc:
+ raise OpenSSLObjectError(exc)
+
+
+def load_certificate(path, content=None, backend='pyopenssl'):
+ """Load the specified certificate."""
+
+ try:
+ if content is None:
+ with open(path, 'rb') as cert_fh:
+ cert_content = cert_fh.read()
+ else:
+ cert_content = content
+ if backend == 'pyopenssl':
+ return crypto.load_certificate(crypto.FILETYPE_PEM, cert_content)
+ elif backend == 'cryptography':
+ return x509.load_pem_x509_certificate(cert_content, cryptography_backend())
+ except (IOError, OSError) as exc:
+ raise OpenSSLObjectError(exc)
+
+
+def load_certificate_request(path, content=None, backend='pyopenssl'):
+ """Load the specified certificate signing request."""
+ try:
+ if content is None:
+ with open(path, 'rb') as csr_fh:
+ csr_content = csr_fh.read()
+ else:
+ csr_content = content
+ except (IOError, OSError) as exc:
+ raise OpenSSLObjectError(exc)
+ if backend == 'pyopenssl':
+ return crypto.load_certificate_request(crypto.FILETYPE_PEM, csr_content)
+ elif backend == 'cryptography':
+ return x509.load_pem_x509_csr(csr_content, cryptography_backend())
+
+
+def parse_name_field(input_dict):
+ """Take a dict with key: value or key: list_of_values mappings and return a list of tuples"""
+
+ result = []
+ for key in input_dict:
+ if isinstance(input_dict[key], list):
+ for entry in input_dict[key]:
+ result.append((key, entry))
+ else:
+ result.append((key, input_dict[key]))
+ return result
+
+
+def convert_relative_to_datetime(relative_time_string):
+ """Get a datetime.datetime or None from a string in the time format described in sshd_config(5)"""
+
+ parsed_result = re.match(
+ r"^(?P<prefix>[+-])((?P<weeks>\d+)[wW])?((?P<days>\d+)[dD])?((?P<hours>\d+)[hH])?((?P<minutes>\d+)[mM])?((?P<seconds>\d+)[sS]?)?$",
+ relative_time_string)
+
+ if parsed_result is None or len(relative_time_string) == 1:
+ # not matched or only a single "+" or "-"
+ return None
+
+ offset = datetime.timedelta(0)
+ if parsed_result.group("weeks") is not None:
+ offset += datetime.timedelta(weeks=int(parsed_result.group("weeks")))
+ if parsed_result.group("days") is not None:
+ offset += datetime.timedelta(days=int(parsed_result.group("days")))
+ if parsed_result.group("hours") is not None:
+ offset += datetime.timedelta(hours=int(parsed_result.group("hours")))
+ if parsed_result.group("minutes") is not None:
+ offset += datetime.timedelta(
+ minutes=int(parsed_result.group("minutes")))
+ if parsed_result.group("seconds") is not None:
+ offset += datetime.timedelta(
+ seconds=int(parsed_result.group("seconds")))
+
+ if parsed_result.group("prefix") == "+":
+ return datetime.datetime.utcnow() + offset
+ else:
+ return datetime.datetime.utcnow() - offset
+
+
+def get_relative_time_option(input_string, input_name, backend='cryptography'):
+ """Return an absolute timespec if a relative timespec or an ASN1 formatted
+ string is provided.
+
+ The return value will be a datetime object for the cryptography backend,
+ and a ASN1 formatted string for the pyopenssl backend."""
+ result = to_native(input_string)
+ if result is None:
+ raise OpenSSLObjectError(
+ 'The timespec "%s" for %s is not valid' %
+ input_string, input_name)
+ # Relative time
+ if result.startswith("+") or result.startswith("-"):
+ result_datetime = convert_relative_to_datetime(result)
+ if backend == 'pyopenssl':
+ return result_datetime.strftime("%Y%m%d%H%M%SZ")
+ elif backend == 'cryptography':
+ return result_datetime
+ # Absolute time
+ if backend == 'pyopenssl':
+ return input_string
+ elif backend == 'cryptography':
+ for date_fmt in ['%Y%m%d%H%M%SZ', '%Y%m%d%H%MZ', '%Y%m%d%H%M%S%z', '%Y%m%d%H%M%z']:
+ try:
+ return datetime.datetime.strptime(result, date_fmt)
+ except ValueError:
+ pass
+
+ raise OpenSSLObjectError(
+ 'The time spec "%s" for %s is invalid' %
+ (input_string, input_name)
+ )
+
+
+def select_message_digest(digest_string):
+ digest = None
+ if digest_string == 'sha256':
+ digest = hashes.SHA256()
+ elif digest_string == 'sha384':
+ digest = hashes.SHA384()
+ elif digest_string == 'sha512':
+ digest = hashes.SHA512()
+ elif digest_string == 'sha1':
+ digest = hashes.SHA1()
+ elif digest_string == 'md5':
+ digest = hashes.MD5()
+ return digest
+
+
+def write_file(module, content, default_mode=None, path=None):
+ '''
+ Writes content into destination file as securely as possible.
+ Uses file arguments from module.
+ '''
+ # Find out parameters for file
+ file_args = module.load_file_common_arguments(module.params, path=path)
+ if file_args['mode'] is None:
+ file_args['mode'] = default_mode
+ # Create tempfile name
+ tmp_fd, tmp_name = tempfile.mkstemp(prefix=b'.ansible_tmp')
+ try:
+ os.close(tmp_fd)
+ except Exception as dummy:
+ pass
+ module.add_cleanup_file(tmp_name) # if we fail, let Ansible try to remove the file
+ try:
+ try:
+ # Create tempfile
+ file = os.open(tmp_name, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
+ os.write(file, content)
+ os.close(file)
+ except Exception as e:
+ try:
+ os.remove(tmp_name)
+ except Exception as dummy:
+ pass
+ module.fail_json(msg='Error while writing result into temporary file: {0}'.format(e))
+ # Update destination to wanted permissions
+ if os.path.exists(file_args['path']):
+ module.set_fs_attributes_if_different(file_args, False)
+ # Move tempfile to final destination
+ module.atomic_move(tmp_name, file_args['path'])
+ # Try to update permissions again
+ module.set_fs_attributes_if_different(file_args, False)
+ except Exception as e:
+ try:
+ os.remove(tmp_name)
+ except Exception as dummy:
+ pass
+ module.fail_json(msg='Error while writing result: {0}'.format(e))
+
+
+@six.add_metaclass(abc.ABCMeta)
+class OpenSSLObject(object):
+
+ def __init__(self, path, state, force, check_mode):
+ self.path = path
+ self.state = state
+ self.force = force
+ self.name = os.path.basename(path)
+ self.changed = False
+ self.check_mode = check_mode
+
+ def check(self, module, perms_required=True):
+ """Ensure the resource is in its desired state."""
+
+ def _check_state():
+ return os.path.exists(self.path)
+
+ def _check_perms(module):
+ file_args = module.load_file_common_arguments(module.params)
+ return not module.set_fs_attributes_if_different(file_args, False)
+
+ if not perms_required:
+ return _check_state()
+
+ return _check_state() and _check_perms(module)
+
+ @abc.abstractmethod
+ def dump(self):
+ """Serialize the object into a dictionary."""
+
+ pass
+
+ @abc.abstractmethod
+ def generate(self):
+ """Generate the resource."""
+
+ pass
+
+ def remove(self, module):
+ """Remove the resource from the filesystem."""
+
+ try:
+ os.remove(self.path)
+ self.changed = True
+ except OSError as exc:
+ if exc.errno != errno.ENOENT:
+ raise OpenSSLObjectError(exc)
+ else:
+ pass
+
+
+# #####################################################################################
+# #####################################################################################
+# This has been extracted from the OpenSSL project's objects.txt:
+# https://github.com/openssl/openssl/blob/9537fe5757bb07761fa275d779bbd40bcf5530e4/crypto/objects/objects.txt
+# Extracted with https://gist.github.com/felixfontein/376748017ad65ead093d56a45a5bf376
+#
+# In case the following data structure has any copyrightable content, note that it is licensed as follows:
+# Copyright (c) the OpenSSL contributors
+# Licensed under the Apache License 2.0
+# https://github.com/openssl/openssl/blob/master/LICENSE
+_OID_MAP = {
+ '0': ('itu-t', 'ITU-T', 'ccitt'),
+ '0.3.4401.5': ('ntt-ds', ),
+ '0.3.4401.5.3.1.9': ('camellia', ),
+ '0.3.4401.5.3.1.9.1': ('camellia-128-ecb', 'CAMELLIA-128-ECB'),
+ '0.3.4401.5.3.1.9.3': ('camellia-128-ofb', 'CAMELLIA-128-OFB'),
+ '0.3.4401.5.3.1.9.4': ('camellia-128-cfb', 'CAMELLIA-128-CFB'),
+ '0.3.4401.5.3.1.9.6': ('camellia-128-gcm', 'CAMELLIA-128-GCM'),
+ '0.3.4401.5.3.1.9.7': ('camellia-128-ccm', 'CAMELLIA-128-CCM'),
+ '0.3.4401.5.3.1.9.9': ('camellia-128-ctr', 'CAMELLIA-128-CTR'),
+ '0.3.4401.5.3.1.9.10': ('camellia-128-cmac', 'CAMELLIA-128-CMAC'),
+ '0.3.4401.5.3.1.9.21': ('camellia-192-ecb', 'CAMELLIA-192-ECB'),
+ '0.3.4401.5.3.1.9.23': ('camellia-192-ofb', 'CAMELLIA-192-OFB'),
+ '0.3.4401.5.3.1.9.24': ('camellia-192-cfb', 'CAMELLIA-192-CFB'),
+ '0.3.4401.5.3.1.9.26': ('camellia-192-gcm', 'CAMELLIA-192-GCM'),
+ '0.3.4401.5.3.1.9.27': ('camellia-192-ccm', 'CAMELLIA-192-CCM'),
+ '0.3.4401.5.3.1.9.29': ('camellia-192-ctr', 'CAMELLIA-192-CTR'),
+ '0.3.4401.5.3.1.9.30': ('camellia-192-cmac', 'CAMELLIA-192-CMAC'),
+ '0.3.4401.5.3.1.9.41': ('camellia-256-ecb', 'CAMELLIA-256-ECB'),
+ '0.3.4401.5.3.1.9.43': ('camellia-256-ofb', 'CAMELLIA-256-OFB'),
+ '0.3.4401.5.3.1.9.44': ('camellia-256-cfb', 'CAMELLIA-256-CFB'),
+ '0.3.4401.5.3.1.9.46': ('camellia-256-gcm', 'CAMELLIA-256-GCM'),
+ '0.3.4401.5.3.1.9.47': ('camellia-256-ccm', 'CAMELLIA-256-CCM'),
+ '0.3.4401.5.3.1.9.49': ('camellia-256-ctr', 'CAMELLIA-256-CTR'),
+ '0.3.4401.5.3.1.9.50': ('camellia-256-cmac', 'CAMELLIA-256-CMAC'),
+ '0.9': ('data', ),
+ '0.9.2342': ('pss', ),
+ '0.9.2342.19200300': ('ucl', ),
+ '0.9.2342.19200300.100': ('pilot', ),
+ '0.9.2342.19200300.100.1': ('pilotAttributeType', ),
+ '0.9.2342.19200300.100.1.1': ('userId', 'UID'),
+ '0.9.2342.19200300.100.1.2': ('textEncodedORAddress', ),
+ '0.9.2342.19200300.100.1.3': ('rfc822Mailbox', 'mail'),
+ '0.9.2342.19200300.100.1.4': ('info', ),
+ '0.9.2342.19200300.100.1.5': ('favouriteDrink', ),
+ '0.9.2342.19200300.100.1.6': ('roomNumber', ),
+ '0.9.2342.19200300.100.1.7': ('photo', ),
+ '0.9.2342.19200300.100.1.8': ('userClass', ),
+ '0.9.2342.19200300.100.1.9': ('host', ),
+ '0.9.2342.19200300.100.1.10': ('manager', ),
+ '0.9.2342.19200300.100.1.11': ('documentIdentifier', ),
+ '0.9.2342.19200300.100.1.12': ('documentTitle', ),
+ '0.9.2342.19200300.100.1.13': ('documentVersion', ),
+ '0.9.2342.19200300.100.1.14': ('documentAuthor', ),
+ '0.9.2342.19200300.100.1.15': ('documentLocation', ),
+ '0.9.2342.19200300.100.1.20': ('homeTelephoneNumber', ),
+ '0.9.2342.19200300.100.1.21': ('secretary', ),
+ '0.9.2342.19200300.100.1.22': ('otherMailbox', ),
+ '0.9.2342.19200300.100.1.23': ('lastModifiedTime', ),
+ '0.9.2342.19200300.100.1.24': ('lastModifiedBy', ),
+ '0.9.2342.19200300.100.1.25': ('domainComponent', 'DC'),
+ '0.9.2342.19200300.100.1.26': ('aRecord', ),
+ '0.9.2342.19200300.100.1.27': ('pilotAttributeType27', ),
+ '0.9.2342.19200300.100.1.28': ('mXRecord', ),
+ '0.9.2342.19200300.100.1.29': ('nSRecord', ),
+ '0.9.2342.19200300.100.1.30': ('sOARecord', ),
+ '0.9.2342.19200300.100.1.31': ('cNAMERecord', ),
+ '0.9.2342.19200300.100.1.37': ('associatedDomain', ),
+ '0.9.2342.19200300.100.1.38': ('associatedName', ),
+ '0.9.2342.19200300.100.1.39': ('homePostalAddress', ),
+ '0.9.2342.19200300.100.1.40': ('personalTitle', ),
+ '0.9.2342.19200300.100.1.41': ('mobileTelephoneNumber', ),
+ '0.9.2342.19200300.100.1.42': ('pagerTelephoneNumber', ),
+ '0.9.2342.19200300.100.1.43': ('friendlyCountryName', ),
+ '0.9.2342.19200300.100.1.44': ('uniqueIdentifier', 'uid'),
+ '0.9.2342.19200300.100.1.45': ('organizationalStatus', ),
+ '0.9.2342.19200300.100.1.46': ('janetMailbox', ),
+ '0.9.2342.19200300.100.1.47': ('mailPreferenceOption', ),
+ '0.9.2342.19200300.100.1.48': ('buildingName', ),
+ '0.9.2342.19200300.100.1.49': ('dSAQuality', ),
+ '0.9.2342.19200300.100.1.50': ('singleLevelQuality', ),
+ '0.9.2342.19200300.100.1.51': ('subtreeMinimumQuality', ),
+ '0.9.2342.19200300.100.1.52': ('subtreeMaximumQuality', ),
+ '0.9.2342.19200300.100.1.53': ('personalSignature', ),
+ '0.9.2342.19200300.100.1.54': ('dITRedirect', ),
+ '0.9.2342.19200300.100.1.55': ('audio', ),
+ '0.9.2342.19200300.100.1.56': ('documentPublisher', ),
+ '0.9.2342.19200300.100.3': ('pilotAttributeSyntax', ),
+ '0.9.2342.19200300.100.3.4': ('iA5StringSyntax', ),
+ '0.9.2342.19200300.100.3.5': ('caseIgnoreIA5StringSyntax', ),
+ '0.9.2342.19200300.100.4': ('pilotObjectClass', ),
+ '0.9.2342.19200300.100.4.3': ('pilotObject', ),
+ '0.9.2342.19200300.100.4.4': ('pilotPerson', ),
+ '0.9.2342.19200300.100.4.5': ('account', ),
+ '0.9.2342.19200300.100.4.6': ('document', ),
+ '0.9.2342.19200300.100.4.7': ('room', ),
+ '0.9.2342.19200300.100.4.9': ('documentSeries', ),
+ '0.9.2342.19200300.100.4.13': ('Domain', 'domain'),
+ '0.9.2342.19200300.100.4.14': ('rFC822localPart', ),
+ '0.9.2342.19200300.100.4.15': ('dNSDomain', ),
+ '0.9.2342.19200300.100.4.17': ('domainRelatedObject', ),
+ '0.9.2342.19200300.100.4.18': ('friendlyCountry', ),
+ '0.9.2342.19200300.100.4.19': ('simpleSecurityObject', ),
+ '0.9.2342.19200300.100.4.20': ('pilotOrganization', ),
+ '0.9.2342.19200300.100.4.21': ('pilotDSA', ),
+ '0.9.2342.19200300.100.4.22': ('qualityLabelledData', ),
+ '0.9.2342.19200300.100.10': ('pilotGroups', ),
+ '1': ('iso', 'ISO'),
+ '1.0.9797.3.4': ('gmac', 'GMAC'),
+ '1.0.10118.3.0.55': ('whirlpool', ),
+ '1.2': ('ISO Member Body', 'member-body'),
+ '1.2.156': ('ISO CN Member Body', 'ISO-CN'),
+ '1.2.156.10197': ('oscca', ),
+ '1.2.156.10197.1': ('sm-scheme', ),
+ '1.2.156.10197.1.104.1': ('sm4-ecb', 'SM4-ECB'),
+ '1.2.156.10197.1.104.2': ('sm4-cbc', 'SM4-CBC'),
+ '1.2.156.10197.1.104.3': ('sm4-ofb', 'SM4-OFB'),
+ '1.2.156.10197.1.104.4': ('sm4-cfb', 'SM4-CFB'),
+ '1.2.156.10197.1.104.5': ('sm4-cfb1', 'SM4-CFB1'),
+ '1.2.156.10197.1.104.6': ('sm4-cfb8', 'SM4-CFB8'),
+ '1.2.156.10197.1.104.7': ('sm4-ctr', 'SM4-CTR'),
+ '1.2.156.10197.1.301': ('sm2', 'SM2'),
+ '1.2.156.10197.1.401': ('sm3', 'SM3'),
+ '1.2.156.10197.1.501': ('SM2-with-SM3', 'SM2-SM3'),
+ '1.2.156.10197.1.504': ('sm3WithRSAEncryption', 'RSA-SM3'),
+ '1.2.392.200011.61.1.1.1.2': ('camellia-128-cbc', 'CAMELLIA-128-CBC'),
+ '1.2.392.200011.61.1.1.1.3': ('camellia-192-cbc', 'CAMELLIA-192-CBC'),
+ '1.2.392.200011.61.1.1.1.4': ('camellia-256-cbc', 'CAMELLIA-256-CBC'),
+ '1.2.392.200011.61.1.1.3.2': ('id-camellia128-wrap', ),
+ '1.2.392.200011.61.1.1.3.3': ('id-camellia192-wrap', ),
+ '1.2.392.200011.61.1.1.3.4': ('id-camellia256-wrap', ),
+ '1.2.410.200004': ('kisa', 'KISA'),
+ '1.2.410.200004.1.3': ('seed-ecb', 'SEED-ECB'),
+ '1.2.410.200004.1.4': ('seed-cbc', 'SEED-CBC'),
+ '1.2.410.200004.1.5': ('seed-cfb', 'SEED-CFB'),
+ '1.2.410.200004.1.6': ('seed-ofb', 'SEED-OFB'),
+ '1.2.410.200046.1.1': ('aria', ),
+ '1.2.410.200046.1.1.1': ('aria-128-ecb', 'ARIA-128-ECB'),
+ '1.2.410.200046.1.1.2': ('aria-128-cbc', 'ARIA-128-CBC'),
+ '1.2.410.200046.1.1.3': ('aria-128-cfb', 'ARIA-128-CFB'),
+ '1.2.410.200046.1.1.4': ('aria-128-ofb', 'ARIA-128-OFB'),
+ '1.2.410.200046.1.1.5': ('aria-128-ctr', 'ARIA-128-CTR'),
+ '1.2.410.200046.1.1.6': ('aria-192-ecb', 'ARIA-192-ECB'),
+ '1.2.410.200046.1.1.7': ('aria-192-cbc', 'ARIA-192-CBC'),
+ '1.2.410.200046.1.1.8': ('aria-192-cfb', 'ARIA-192-CFB'),
+ '1.2.410.200046.1.1.9': ('aria-192-ofb', 'ARIA-192-OFB'),
+ '1.2.410.200046.1.1.10': ('aria-192-ctr', 'ARIA-192-CTR'),
+ '1.2.410.200046.1.1.11': ('aria-256-ecb', 'ARIA-256-ECB'),
+ '1.2.410.200046.1.1.12': ('aria-256-cbc', 'ARIA-256-CBC'),
+ '1.2.410.200046.1.1.13': ('aria-256-cfb', 'ARIA-256-CFB'),
+ '1.2.410.200046.1.1.14': ('aria-256-ofb', 'ARIA-256-OFB'),
+ '1.2.410.200046.1.1.15': ('aria-256-ctr', 'ARIA-256-CTR'),
+ '1.2.410.200046.1.1.34': ('aria-128-gcm', 'ARIA-128-GCM'),
+ '1.2.410.200046.1.1.35': ('aria-192-gcm', 'ARIA-192-GCM'),
+ '1.2.410.200046.1.1.36': ('aria-256-gcm', 'ARIA-256-GCM'),
+ '1.2.410.200046.1.1.37': ('aria-128-ccm', 'ARIA-128-CCM'),
+ '1.2.410.200046.1.1.38': ('aria-192-ccm', 'ARIA-192-CCM'),
+ '1.2.410.200046.1.1.39': ('aria-256-ccm', 'ARIA-256-CCM'),
+ '1.2.643.2.2': ('cryptopro', ),
+ '1.2.643.2.2.3': ('GOST R 34.11-94 with GOST R 34.10-2001', 'id-GostR3411-94-with-GostR3410-2001'),
+ '1.2.643.2.2.4': ('GOST R 34.11-94 with GOST R 34.10-94', 'id-GostR3411-94-with-GostR3410-94'),
+ '1.2.643.2.2.9': ('GOST R 34.11-94', 'md_gost94'),
+ '1.2.643.2.2.10': ('HMAC GOST 34.11-94', 'id-HMACGostR3411-94'),
+ '1.2.643.2.2.14.0': ('id-Gost28147-89-None-KeyMeshing', ),
+ '1.2.643.2.2.14.1': ('id-Gost28147-89-CryptoPro-KeyMeshing', ),
+ '1.2.643.2.2.19': ('GOST R 34.10-2001', 'gost2001'),
+ '1.2.643.2.2.20': ('GOST R 34.10-94', 'gost94'),
+ '1.2.643.2.2.20.1': ('id-GostR3410-94-a', ),
+ '1.2.643.2.2.20.2': ('id-GostR3410-94-aBis', ),
+ '1.2.643.2.2.20.3': ('id-GostR3410-94-b', ),
+ '1.2.643.2.2.20.4': ('id-GostR3410-94-bBis', ),
+ '1.2.643.2.2.21': ('GOST 28147-89', 'gost89'),
+ '1.2.643.2.2.22': ('GOST 28147-89 MAC', 'gost-mac'),
+ '1.2.643.2.2.23': ('GOST R 34.11-94 PRF', 'prf-gostr3411-94'),
+ '1.2.643.2.2.30.0': ('id-GostR3411-94-TestParamSet', ),
+ '1.2.643.2.2.30.1': ('id-GostR3411-94-CryptoProParamSet', ),
+ '1.2.643.2.2.31.0': ('id-Gost28147-89-TestParamSet', ),
+ '1.2.643.2.2.31.1': ('id-Gost28147-89-CryptoPro-A-ParamSet', ),
+ '1.2.643.2.2.31.2': ('id-Gost28147-89-CryptoPro-B-ParamSet', ),
+ '1.2.643.2.2.31.3': ('id-Gost28147-89-CryptoPro-C-ParamSet', ),
+ '1.2.643.2.2.31.4': ('id-Gost28147-89-CryptoPro-D-ParamSet', ),
+ '1.2.643.2.2.31.5': ('id-Gost28147-89-CryptoPro-Oscar-1-1-ParamSet', ),
+ '1.2.643.2.2.31.6': ('id-Gost28147-89-CryptoPro-Oscar-1-0-ParamSet', ),
+ '1.2.643.2.2.31.7': ('id-Gost28147-89-CryptoPro-RIC-1-ParamSet', ),
+ '1.2.643.2.2.32.0': ('id-GostR3410-94-TestParamSet', ),
+ '1.2.643.2.2.32.2': ('id-GostR3410-94-CryptoPro-A-ParamSet', ),
+ '1.2.643.2.2.32.3': ('id-GostR3410-94-CryptoPro-B-ParamSet', ),
+ '1.2.643.2.2.32.4': ('id-GostR3410-94-CryptoPro-C-ParamSet', ),
+ '1.2.643.2.2.32.5': ('id-GostR3410-94-CryptoPro-D-ParamSet', ),
+ '1.2.643.2.2.33.1': ('id-GostR3410-94-CryptoPro-XchA-ParamSet', ),
+ '1.2.643.2.2.33.2': ('id-GostR3410-94-CryptoPro-XchB-ParamSet', ),
+ '1.2.643.2.2.33.3': ('id-GostR3410-94-CryptoPro-XchC-ParamSet', ),
+ '1.2.643.2.2.35.0': ('id-GostR3410-2001-TestParamSet', ),
+ '1.2.643.2.2.35.1': ('id-GostR3410-2001-CryptoPro-A-ParamSet', ),
+ '1.2.643.2.2.35.2': ('id-GostR3410-2001-CryptoPro-B-ParamSet', ),
+ '1.2.643.2.2.35.3': ('id-GostR3410-2001-CryptoPro-C-ParamSet', ),
+ '1.2.643.2.2.36.0': ('id-GostR3410-2001-CryptoPro-XchA-ParamSet', ),
+ '1.2.643.2.2.36.1': ('id-GostR3410-2001-CryptoPro-XchB-ParamSet', ),
+ '1.2.643.2.2.98': ('GOST R 34.10-2001 DH', 'id-GostR3410-2001DH'),
+ '1.2.643.2.2.99': ('GOST R 34.10-94 DH', 'id-GostR3410-94DH'),
+ '1.2.643.2.9': ('cryptocom', ),
+ '1.2.643.2.9.1.3.3': ('GOST R 34.11-94 with GOST R 34.10-94 Cryptocom', 'id-GostR3411-94-with-GostR3410-94-cc'),
+ '1.2.643.2.9.1.3.4': ('GOST R 34.11-94 with GOST R 34.10-2001 Cryptocom', 'id-GostR3411-94-with-GostR3410-2001-cc'),
+ '1.2.643.2.9.1.5.3': ('GOST 34.10-94 Cryptocom', 'gost94cc'),
+ '1.2.643.2.9.1.5.4': ('GOST 34.10-2001 Cryptocom', 'gost2001cc'),
+ '1.2.643.2.9.1.6.1': ('GOST 28147-89 Cryptocom ParamSet', 'id-Gost28147-89-cc'),
+ '1.2.643.2.9.1.8.1': ('GOST R 3410-2001 Parameter Set Cryptocom', 'id-GostR3410-2001-ParamSet-cc'),
+ '1.2.643.3.131.1.1': ('INN', 'INN'),
+ '1.2.643.7.1': ('id-tc26', ),
+ '1.2.643.7.1.1': ('id-tc26-algorithms', ),
+ '1.2.643.7.1.1.1': ('id-tc26-sign', ),
+ '1.2.643.7.1.1.1.1': ('GOST R 34.10-2012 with 256 bit modulus', 'gost2012_256'),
+ '1.2.643.7.1.1.1.2': ('GOST R 34.10-2012 with 512 bit modulus', 'gost2012_512'),
+ '1.2.643.7.1.1.2': ('id-tc26-digest', ),
+ '1.2.643.7.1.1.2.2': ('GOST R 34.11-2012 with 256 bit hash', 'md_gost12_256'),
+ '1.2.643.7.1.1.2.3': ('GOST R 34.11-2012 with 512 bit hash', 'md_gost12_512'),
+ '1.2.643.7.1.1.3': ('id-tc26-signwithdigest', ),
+ '1.2.643.7.1.1.3.2': ('GOST R 34.10-2012 with GOST R 34.11-2012 (256 bit)', 'id-tc26-signwithdigest-gost3410-2012-256'),
+ '1.2.643.7.1.1.3.3': ('GOST R 34.10-2012 with GOST R 34.11-2012 (512 bit)', 'id-tc26-signwithdigest-gost3410-2012-512'),
+ '1.2.643.7.1.1.4': ('id-tc26-mac', ),
+ '1.2.643.7.1.1.4.1': ('HMAC GOST 34.11-2012 256 bit', 'id-tc26-hmac-gost-3411-2012-256'),
+ '1.2.643.7.1.1.4.2': ('HMAC GOST 34.11-2012 512 bit', 'id-tc26-hmac-gost-3411-2012-512'),
+ '1.2.643.7.1.1.5': ('id-tc26-cipher', ),
+ '1.2.643.7.1.1.5.1': ('id-tc26-cipher-gostr3412-2015-magma', ),
+ '1.2.643.7.1.1.5.1.1': ('id-tc26-cipher-gostr3412-2015-magma-ctracpkm', ),
+ '1.2.643.7.1.1.5.1.2': ('id-tc26-cipher-gostr3412-2015-magma-ctracpkm-omac', ),
+ '1.2.643.7.1.1.5.2': ('id-tc26-cipher-gostr3412-2015-kuznyechik', ),
+ '1.2.643.7.1.1.5.2.1': ('id-tc26-cipher-gostr3412-2015-kuznyechik-ctracpkm', ),
+ '1.2.643.7.1.1.5.2.2': ('id-tc26-cipher-gostr3412-2015-kuznyechik-ctracpkm-omac', ),
+ '1.2.643.7.1.1.6': ('id-tc26-agreement', ),
+ '1.2.643.7.1.1.6.1': ('id-tc26-agreement-gost-3410-2012-256', ),
+ '1.2.643.7.1.1.6.2': ('id-tc26-agreement-gost-3410-2012-512', ),
+ '1.2.643.7.1.1.7': ('id-tc26-wrap', ),
+ '1.2.643.7.1.1.7.1': ('id-tc26-wrap-gostr3412-2015-magma', ),
+ '1.2.643.7.1.1.7.1.1': ('id-tc26-wrap-gostr3412-2015-magma-kexp15', 'id-tc26-wrap-gostr3412-2015-kuznyechik-kexp15'),
+ '1.2.643.7.1.1.7.2': ('id-tc26-wrap-gostr3412-2015-kuznyechik', ),
+ '1.2.643.7.1.2': ('id-tc26-constants', ),
+ '1.2.643.7.1.2.1': ('id-tc26-sign-constants', ),
+ '1.2.643.7.1.2.1.1': ('id-tc26-gost-3410-2012-256-constants', ),
+ '1.2.643.7.1.2.1.1.1': ('GOST R 34.10-2012 (256 bit) ParamSet A', 'id-tc26-gost-3410-2012-256-paramSetA'),
+ '1.2.643.7.1.2.1.1.2': ('GOST R 34.10-2012 (256 bit) ParamSet B', 'id-tc26-gost-3410-2012-256-paramSetB'),
+ '1.2.643.7.1.2.1.1.3': ('GOST R 34.10-2012 (256 bit) ParamSet C', 'id-tc26-gost-3410-2012-256-paramSetC'),
+ '1.2.643.7.1.2.1.1.4': ('GOST R 34.10-2012 (256 bit) ParamSet D', 'id-tc26-gost-3410-2012-256-paramSetD'),
+ '1.2.643.7.1.2.1.2': ('id-tc26-gost-3410-2012-512-constants', ),
+ '1.2.643.7.1.2.1.2.0': ('GOST R 34.10-2012 (512 bit) testing parameter set', 'id-tc26-gost-3410-2012-512-paramSetTest'),
+ '1.2.643.7.1.2.1.2.1': ('GOST R 34.10-2012 (512 bit) ParamSet A', 'id-tc26-gost-3410-2012-512-paramSetA'),
+ '1.2.643.7.1.2.1.2.2': ('GOST R 34.10-2012 (512 bit) ParamSet B', 'id-tc26-gost-3410-2012-512-paramSetB'),
+ '1.2.643.7.1.2.1.2.3': ('GOST R 34.10-2012 (512 bit) ParamSet C', 'id-tc26-gost-3410-2012-512-paramSetC'),
+ '1.2.643.7.1.2.2': ('id-tc26-digest-constants', ),
+ '1.2.643.7.1.2.5': ('id-tc26-cipher-constants', ),
+ '1.2.643.7.1.2.5.1': ('id-tc26-gost-28147-constants', ),
+ '1.2.643.7.1.2.5.1.1': ('GOST 28147-89 TC26 parameter set', 'id-tc26-gost-28147-param-Z'),
+ '1.2.643.100.1': ('OGRN', 'OGRN'),
+ '1.2.643.100.3': ('SNILS', 'SNILS'),
+ '1.2.643.100.111': ('Signing Tool of Subject', 'subjectSignTool'),
+ '1.2.643.100.112': ('Signing Tool of Issuer', 'issuerSignTool'),
+ '1.2.804': ('ISO-UA', ),
+ '1.2.804.2.1.1.1': ('ua-pki', ),
+ '1.2.804.2.1.1.1.1.1.1': ('DSTU Gost 28147-2009', 'dstu28147'),
+ '1.2.804.2.1.1.1.1.1.1.2': ('DSTU Gost 28147-2009 OFB mode', 'dstu28147-ofb'),
+ '1.2.804.2.1.1.1.1.1.1.3': ('DSTU Gost 28147-2009 CFB mode', 'dstu28147-cfb'),
+ '1.2.804.2.1.1.1.1.1.1.5': ('DSTU Gost 28147-2009 key wrap', 'dstu28147-wrap'),
+ '1.2.804.2.1.1.1.1.1.2': ('HMAC DSTU Gost 34311-95', 'hmacWithDstu34311'),
+ '1.2.804.2.1.1.1.1.2.1': ('DSTU Gost 34311-95', 'dstu34311'),
+ '1.2.804.2.1.1.1.1.3.1.1': ('DSTU 4145-2002 little endian', 'dstu4145le'),
+ '1.2.804.2.1.1.1.1.3.1.1.1.1': ('DSTU 4145-2002 big endian', 'dstu4145be'),
+ '1.2.804.2.1.1.1.1.3.1.1.2.0': ('DSTU curve 0', 'uacurve0'),
+ '1.2.804.2.1.1.1.1.3.1.1.2.1': ('DSTU curve 1', 'uacurve1'),
+ '1.2.804.2.1.1.1.1.3.1.1.2.2': ('DSTU curve 2', 'uacurve2'),
+ '1.2.804.2.1.1.1.1.3.1.1.2.3': ('DSTU curve 3', 'uacurve3'),
+ '1.2.804.2.1.1.1.1.3.1.1.2.4': ('DSTU curve 4', 'uacurve4'),
+ '1.2.804.2.1.1.1.1.3.1.1.2.5': ('DSTU curve 5', 'uacurve5'),
+ '1.2.804.2.1.1.1.1.3.1.1.2.6': ('DSTU curve 6', 'uacurve6'),
+ '1.2.804.2.1.1.1.1.3.1.1.2.7': ('DSTU curve 7', 'uacurve7'),
+ '1.2.804.2.1.1.1.1.3.1.1.2.8': ('DSTU curve 8', 'uacurve8'),
+ '1.2.804.2.1.1.1.1.3.1.1.2.9': ('DSTU curve 9', 'uacurve9'),
+ '1.2.840': ('ISO US Member Body', 'ISO-US'),
+ '1.2.840.10040': ('X9.57', 'X9-57'),
+ '1.2.840.10040.2': ('holdInstruction', ),
+ '1.2.840.10040.2.1': ('Hold Instruction None', 'holdInstructionNone'),
+ '1.2.840.10040.2.2': ('Hold Instruction Call Issuer', 'holdInstructionCallIssuer'),
+ '1.2.840.10040.2.3': ('Hold Instruction Reject', 'holdInstructionReject'),
+ '1.2.840.10040.4': ('X9.57 CM ?', 'X9cm'),
+ '1.2.840.10040.4.1': ('dsaEncryption', 'DSA'),
+ '1.2.840.10040.4.3': ('dsaWithSHA1', 'DSA-SHA1'),
+ '1.2.840.10045': ('ANSI X9.62', 'ansi-X9-62'),
+ '1.2.840.10045.1': ('id-fieldType', ),
+ '1.2.840.10045.1.1': ('prime-field', ),
+ '1.2.840.10045.1.2': ('characteristic-two-field', ),
+ '1.2.840.10045.1.2.3': ('id-characteristic-two-basis', ),
+ '1.2.840.10045.1.2.3.1': ('onBasis', ),
+ '1.2.840.10045.1.2.3.2': ('tpBasis', ),
+ '1.2.840.10045.1.2.3.3': ('ppBasis', ),
+ '1.2.840.10045.2': ('id-publicKeyType', ),
+ '1.2.840.10045.2.1': ('id-ecPublicKey', ),
+ '1.2.840.10045.3': ('ellipticCurve', ),
+ '1.2.840.10045.3.0': ('c-TwoCurve', ),
+ '1.2.840.10045.3.0.1': ('c2pnb163v1', ),
+ '1.2.840.10045.3.0.2': ('c2pnb163v2', ),
+ '1.2.840.10045.3.0.3': ('c2pnb163v3', ),
+ '1.2.840.10045.3.0.4': ('c2pnb176v1', ),
+ '1.2.840.10045.3.0.5': ('c2tnb191v1', ),
+ '1.2.840.10045.3.0.6': ('c2tnb191v2', ),
+ '1.2.840.10045.3.0.7': ('c2tnb191v3', ),
+ '1.2.840.10045.3.0.8': ('c2onb191v4', ),
+ '1.2.840.10045.3.0.9': ('c2onb191v5', ),
+ '1.2.840.10045.3.0.10': ('c2pnb208w1', ),
+ '1.2.840.10045.3.0.11': ('c2tnb239v1', ),
+ '1.2.840.10045.3.0.12': ('c2tnb239v2', ),
+ '1.2.840.10045.3.0.13': ('c2tnb239v3', ),
+ '1.2.840.10045.3.0.14': ('c2onb239v4', ),
+ '1.2.840.10045.3.0.15': ('c2onb239v5', ),
+ '1.2.840.10045.3.0.16': ('c2pnb272w1', ),
+ '1.2.840.10045.3.0.17': ('c2pnb304w1', ),
+ '1.2.840.10045.3.0.18': ('c2tnb359v1', ),
+ '1.2.840.10045.3.0.19': ('c2pnb368w1', ),
+ '1.2.840.10045.3.0.20': ('c2tnb431r1', ),
+ '1.2.840.10045.3.1': ('primeCurve', ),
+ '1.2.840.10045.3.1.1': ('prime192v1', ),
+ '1.2.840.10045.3.1.2': ('prime192v2', ),
+ '1.2.840.10045.3.1.3': ('prime192v3', ),
+ '1.2.840.10045.3.1.4': ('prime239v1', ),
+ '1.2.840.10045.3.1.5': ('prime239v2', ),
+ '1.2.840.10045.3.1.6': ('prime239v3', ),
+ '1.2.840.10045.3.1.7': ('prime256v1', ),
+ '1.2.840.10045.4': ('id-ecSigType', ),
+ '1.2.840.10045.4.1': ('ecdsa-with-SHA1', ),
+ '1.2.840.10045.4.2': ('ecdsa-with-Recommended', ),
+ '1.2.840.10045.4.3': ('ecdsa-with-Specified', ),
+ '1.2.840.10045.4.3.1': ('ecdsa-with-SHA224', ),
+ '1.2.840.10045.4.3.2': ('ecdsa-with-SHA256', ),
+ '1.2.840.10045.4.3.3': ('ecdsa-with-SHA384', ),
+ '1.2.840.10045.4.3.4': ('ecdsa-with-SHA512', ),
+ '1.2.840.10046.2.1': ('X9.42 DH', 'dhpublicnumber'),
+ '1.2.840.113533.7.66.10': ('cast5-cbc', 'CAST5-CBC'),
+ '1.2.840.113533.7.66.12': ('pbeWithMD5AndCast5CBC', ),
+ '1.2.840.113533.7.66.13': ('password based MAC', 'id-PasswordBasedMAC'),
+ '1.2.840.113533.7.66.30': ('Diffie-Hellman based MAC', 'id-DHBasedMac'),
+ '1.2.840.113549': ('RSA Data Security, Inc.', 'rsadsi'),
+ '1.2.840.113549.1': ('RSA Data Security, Inc. PKCS', 'pkcs'),
+ '1.2.840.113549.1.1': ('pkcs1', ),
+ '1.2.840.113549.1.1.1': ('rsaEncryption', ),
+ '1.2.840.113549.1.1.2': ('md2WithRSAEncryption', 'RSA-MD2'),
+ '1.2.840.113549.1.1.3': ('md4WithRSAEncryption', 'RSA-MD4'),
+ '1.2.840.113549.1.1.4': ('md5WithRSAEncryption', 'RSA-MD5'),
+ '1.2.840.113549.1.1.5': ('sha1WithRSAEncryption', 'RSA-SHA1'),
+ '1.2.840.113549.1.1.6': ('rsaOAEPEncryptionSET', ),
+ '1.2.840.113549.1.1.7': ('rsaesOaep', 'RSAES-OAEP'),
+ '1.2.840.113549.1.1.8': ('mgf1', 'MGF1'),
+ '1.2.840.113549.1.1.9': ('pSpecified', 'PSPECIFIED'),
+ '1.2.840.113549.1.1.10': ('rsassaPss', 'RSASSA-PSS'),
+ '1.2.840.113549.1.1.11': ('sha256WithRSAEncryption', 'RSA-SHA256'),
+ '1.2.840.113549.1.1.12': ('sha384WithRSAEncryption', 'RSA-SHA384'),
+ '1.2.840.113549.1.1.13': ('sha512WithRSAEncryption', 'RSA-SHA512'),
+ '1.2.840.113549.1.1.14': ('sha224WithRSAEncryption', 'RSA-SHA224'),
+ '1.2.840.113549.1.1.15': ('sha512-224WithRSAEncryption', 'RSA-SHA512/224'),
+ '1.2.840.113549.1.1.16': ('sha512-256WithRSAEncryption', 'RSA-SHA512/256'),
+ '1.2.840.113549.1.3': ('pkcs3', ),
+ '1.2.840.113549.1.3.1': ('dhKeyAgreement', ),
+ '1.2.840.113549.1.5': ('pkcs5', ),
+ '1.2.840.113549.1.5.1': ('pbeWithMD2AndDES-CBC', 'PBE-MD2-DES'),
+ '1.2.840.113549.1.5.3': ('pbeWithMD5AndDES-CBC', 'PBE-MD5-DES'),
+ '1.2.840.113549.1.5.4': ('pbeWithMD2AndRC2-CBC', 'PBE-MD2-RC2-64'),
+ '1.2.840.113549.1.5.6': ('pbeWithMD5AndRC2-CBC', 'PBE-MD5-RC2-64'),
+ '1.2.840.113549.1.5.10': ('pbeWithSHA1AndDES-CBC', 'PBE-SHA1-DES'),
+ '1.2.840.113549.1.5.11': ('pbeWithSHA1AndRC2-CBC', 'PBE-SHA1-RC2-64'),
+ '1.2.840.113549.1.5.12': ('PBKDF2', ),
+ '1.2.840.113549.1.5.13': ('PBES2', ),
+ '1.2.840.113549.1.5.14': ('PBMAC1', ),
+ '1.2.840.113549.1.7': ('pkcs7', ),
+ '1.2.840.113549.1.7.1': ('pkcs7-data', ),
+ '1.2.840.113549.1.7.2': ('pkcs7-signedData', ),
+ '1.2.840.113549.1.7.3': ('pkcs7-envelopedData', ),
+ '1.2.840.113549.1.7.4': ('pkcs7-signedAndEnvelopedData', ),
+ '1.2.840.113549.1.7.5': ('pkcs7-digestData', ),
+ '1.2.840.113549.1.7.6': ('pkcs7-encryptedData', ),
+ '1.2.840.113549.1.9': ('pkcs9', ),
+ '1.2.840.113549.1.9.1': ('emailAddress', ),
+ '1.2.840.113549.1.9.2': ('unstructuredName', ),
+ '1.2.840.113549.1.9.3': ('contentType', ),
+ '1.2.840.113549.1.9.4': ('messageDigest', ),
+ '1.2.840.113549.1.9.5': ('signingTime', ),
+ '1.2.840.113549.1.9.6': ('countersignature', ),
+ '1.2.840.113549.1.9.7': ('challengePassword', ),
+ '1.2.840.113549.1.9.8': ('unstructuredAddress', ),
+ '1.2.840.113549.1.9.9': ('extendedCertificateAttributes', ),
+ '1.2.840.113549.1.9.14': ('Extension Request', 'extReq'),
+ '1.2.840.113549.1.9.15': ('S/MIME Capabilities', 'SMIME-CAPS'),
+ '1.2.840.113549.1.9.16': ('S/MIME', 'SMIME'),
+ '1.2.840.113549.1.9.16.0': ('id-smime-mod', ),
+ '1.2.840.113549.1.9.16.0.1': ('id-smime-mod-cms', ),
+ '1.2.840.113549.1.9.16.0.2': ('id-smime-mod-ess', ),
+ '1.2.840.113549.1.9.16.0.3': ('id-smime-mod-oid', ),
+ '1.2.840.113549.1.9.16.0.4': ('id-smime-mod-msg-v3', ),
+ '1.2.840.113549.1.9.16.0.5': ('id-smime-mod-ets-eSignature-88', ),
+ '1.2.840.113549.1.9.16.0.6': ('id-smime-mod-ets-eSignature-97', ),
+ '1.2.840.113549.1.9.16.0.7': ('id-smime-mod-ets-eSigPolicy-88', ),
+ '1.2.840.113549.1.9.16.0.8': ('id-smime-mod-ets-eSigPolicy-97', ),
+ '1.2.840.113549.1.9.16.1': ('id-smime-ct', ),
+ '1.2.840.113549.1.9.16.1.1': ('id-smime-ct-receipt', ),
+ '1.2.840.113549.1.9.16.1.2': ('id-smime-ct-authData', ),
+ '1.2.840.113549.1.9.16.1.3': ('id-smime-ct-publishCert', ),
+ '1.2.840.113549.1.9.16.1.4': ('id-smime-ct-TSTInfo', ),
+ '1.2.840.113549.1.9.16.1.5': ('id-smime-ct-TDTInfo', ),
+ '1.2.840.113549.1.9.16.1.6': ('id-smime-ct-contentInfo', ),
+ '1.2.840.113549.1.9.16.1.7': ('id-smime-ct-DVCSRequestData', ),
+ '1.2.840.113549.1.9.16.1.8': ('id-smime-ct-DVCSResponseData', ),
+ '1.2.840.113549.1.9.16.1.9': ('id-smime-ct-compressedData', ),
+ '1.2.840.113549.1.9.16.1.19': ('id-smime-ct-contentCollection', ),
+ '1.2.840.113549.1.9.16.1.23': ('id-smime-ct-authEnvelopedData', ),
+ '1.2.840.113549.1.9.16.1.27': ('id-ct-asciiTextWithCRLF', ),
+ '1.2.840.113549.1.9.16.1.28': ('id-ct-xml', ),
+ '1.2.840.113549.1.9.16.2': ('id-smime-aa', ),
+ '1.2.840.113549.1.9.16.2.1': ('id-smime-aa-receiptRequest', ),
+ '1.2.840.113549.1.9.16.2.2': ('id-smime-aa-securityLabel', ),
+ '1.2.840.113549.1.9.16.2.3': ('id-smime-aa-mlExpandHistory', ),
+ '1.2.840.113549.1.9.16.2.4': ('id-smime-aa-contentHint', ),
+ '1.2.840.113549.1.9.16.2.5': ('id-smime-aa-msgSigDigest', ),
+ '1.2.840.113549.1.9.16.2.6': ('id-smime-aa-encapContentType', ),
+ '1.2.840.113549.1.9.16.2.7': ('id-smime-aa-contentIdentifier', ),
+ '1.2.840.113549.1.9.16.2.8': ('id-smime-aa-macValue', ),
+ '1.2.840.113549.1.9.16.2.9': ('id-smime-aa-equivalentLabels', ),
+ '1.2.840.113549.1.9.16.2.10': ('id-smime-aa-contentReference', ),
+ '1.2.840.113549.1.9.16.2.11': ('id-smime-aa-encrypKeyPref', ),
+ '1.2.840.113549.1.9.16.2.12': ('id-smime-aa-signingCertificate', ),
+ '1.2.840.113549.1.9.16.2.13': ('id-smime-aa-smimeEncryptCerts', ),
+ '1.2.840.113549.1.9.16.2.14': ('id-smime-aa-timeStampToken', ),
+ '1.2.840.113549.1.9.16.2.15': ('id-smime-aa-ets-sigPolicyId', ),
+ '1.2.840.113549.1.9.16.2.16': ('id-smime-aa-ets-commitmentType', ),
+ '1.2.840.113549.1.9.16.2.17': ('id-smime-aa-ets-signerLocation', ),
+ '1.2.840.113549.1.9.16.2.18': ('id-smime-aa-ets-signerAttr', ),
+ '1.2.840.113549.1.9.16.2.19': ('id-smime-aa-ets-otherSigCert', ),
+ '1.2.840.113549.1.9.16.2.20': ('id-smime-aa-ets-contentTimestamp', ),
+ '1.2.840.113549.1.9.16.2.21': ('id-smime-aa-ets-CertificateRefs', ),
+ '1.2.840.113549.1.9.16.2.22': ('id-smime-aa-ets-RevocationRefs', ),
+ '1.2.840.113549.1.9.16.2.23': ('id-smime-aa-ets-certValues', ),
+ '1.2.840.113549.1.9.16.2.24': ('id-smime-aa-ets-revocationValues', ),
+ '1.2.840.113549.1.9.16.2.25': ('id-smime-aa-ets-escTimeStamp', ),
+ '1.2.840.113549.1.9.16.2.26': ('id-smime-aa-ets-certCRLTimestamp', ),
+ '1.2.840.113549.1.9.16.2.27': ('id-smime-aa-ets-archiveTimeStamp', ),
+ '1.2.840.113549.1.9.16.2.28': ('id-smime-aa-signatureType', ),
+ '1.2.840.113549.1.9.16.2.29': ('id-smime-aa-dvcs-dvc', ),
+ '1.2.840.113549.1.9.16.2.47': ('id-smime-aa-signingCertificateV2', ),
+ '1.2.840.113549.1.9.16.3': ('id-smime-alg', ),
+ '1.2.840.113549.1.9.16.3.1': ('id-smime-alg-ESDHwith3DES', ),
+ '1.2.840.113549.1.9.16.3.2': ('id-smime-alg-ESDHwithRC2', ),
+ '1.2.840.113549.1.9.16.3.3': ('id-smime-alg-3DESwrap', ),
+ '1.2.840.113549.1.9.16.3.4': ('id-smime-alg-RC2wrap', ),
+ '1.2.840.113549.1.9.16.3.5': ('id-smime-alg-ESDH', ),
+ '1.2.840.113549.1.9.16.3.6': ('id-smime-alg-CMS3DESwrap', ),
+ '1.2.840.113549.1.9.16.3.7': ('id-smime-alg-CMSRC2wrap', ),
+ '1.2.840.113549.1.9.16.3.8': ('zlib compression', 'ZLIB'),
+ '1.2.840.113549.1.9.16.3.9': ('id-alg-PWRI-KEK', ),
+ '1.2.840.113549.1.9.16.4': ('id-smime-cd', ),
+ '1.2.840.113549.1.9.16.4.1': ('id-smime-cd-ldap', ),
+ '1.2.840.113549.1.9.16.5': ('id-smime-spq', ),
+ '1.2.840.113549.1.9.16.5.1': ('id-smime-spq-ets-sqt-uri', ),
+ '1.2.840.113549.1.9.16.5.2': ('id-smime-spq-ets-sqt-unotice', ),
+ '1.2.840.113549.1.9.16.6': ('id-smime-cti', ),
+ '1.2.840.113549.1.9.16.6.1': ('id-smime-cti-ets-proofOfOrigin', ),
+ '1.2.840.113549.1.9.16.6.2': ('id-smime-cti-ets-proofOfReceipt', ),
+ '1.2.840.113549.1.9.16.6.3': ('id-smime-cti-ets-proofOfDelivery', ),
+ '1.2.840.113549.1.9.16.6.4': ('id-smime-cti-ets-proofOfSender', ),
+ '1.2.840.113549.1.9.16.6.5': ('id-smime-cti-ets-proofOfApproval', ),
+ '1.2.840.113549.1.9.16.6.6': ('id-smime-cti-ets-proofOfCreation', ),
+ '1.2.840.113549.1.9.20': ('friendlyName', ),
+ '1.2.840.113549.1.9.21': ('localKeyID', ),
+ '1.2.840.113549.1.9.22': ('certTypes', ),
+ '1.2.840.113549.1.9.22.1': ('x509Certificate', ),
+ '1.2.840.113549.1.9.22.2': ('sdsiCertificate', ),
+ '1.2.840.113549.1.9.23': ('crlTypes', ),
+ '1.2.840.113549.1.9.23.1': ('x509Crl', ),
+ '1.2.840.113549.1.12': ('pkcs12', ),
+ '1.2.840.113549.1.12.1': ('pkcs12-pbeids', ),
+ '1.2.840.113549.1.12.1.1': ('pbeWithSHA1And128BitRC4', 'PBE-SHA1-RC4-128'),
+ '1.2.840.113549.1.12.1.2': ('pbeWithSHA1And40BitRC4', 'PBE-SHA1-RC4-40'),
+ '1.2.840.113549.1.12.1.3': ('pbeWithSHA1And3-KeyTripleDES-CBC', 'PBE-SHA1-3DES'),
+ '1.2.840.113549.1.12.1.4': ('pbeWithSHA1And2-KeyTripleDES-CBC', 'PBE-SHA1-2DES'),
+ '1.2.840.113549.1.12.1.5': ('pbeWithSHA1And128BitRC2-CBC', 'PBE-SHA1-RC2-128'),
+ '1.2.840.113549.1.12.1.6': ('pbeWithSHA1And40BitRC2-CBC', 'PBE-SHA1-RC2-40'),
+ '1.2.840.113549.1.12.10': ('pkcs12-Version1', ),
+ '1.2.840.113549.1.12.10.1': ('pkcs12-BagIds', ),
+ '1.2.840.113549.1.12.10.1.1': ('keyBag', ),
+ '1.2.840.113549.1.12.10.1.2': ('pkcs8ShroudedKeyBag', ),
+ '1.2.840.113549.1.12.10.1.3': ('certBag', ),
+ '1.2.840.113549.1.12.10.1.4': ('crlBag', ),
+ '1.2.840.113549.1.12.10.1.5': ('secretBag', ),
+ '1.2.840.113549.1.12.10.1.6': ('safeContentsBag', ),
+ '1.2.840.113549.2.2': ('md2', 'MD2'),
+ '1.2.840.113549.2.4': ('md4', 'MD4'),
+ '1.2.840.113549.2.5': ('md5', 'MD5'),
+ '1.2.840.113549.2.6': ('hmacWithMD5', ),
+ '1.2.840.113549.2.7': ('hmacWithSHA1', ),
+ '1.2.840.113549.2.8': ('hmacWithSHA224', ),
+ '1.2.840.113549.2.9': ('hmacWithSHA256', ),
+ '1.2.840.113549.2.10': ('hmacWithSHA384', ),
+ '1.2.840.113549.2.11': ('hmacWithSHA512', ),
+ '1.2.840.113549.2.12': ('hmacWithSHA512-224', ),
+ '1.2.840.113549.2.13': ('hmacWithSHA512-256', ),
+ '1.2.840.113549.3.2': ('rc2-cbc', 'RC2-CBC'),
+ '1.2.840.113549.3.4': ('rc4', 'RC4'),
+ '1.2.840.113549.3.7': ('des-ede3-cbc', 'DES-EDE3-CBC'),
+ '1.2.840.113549.3.8': ('rc5-cbc', 'RC5-CBC'),
+ '1.2.840.113549.3.10': ('des-cdmf', 'DES-CDMF'),
+ '1.3': ('identified-organization', 'org', 'ORG'),
+ '1.3.6': ('dod', 'DOD'),
+ '1.3.6.1': ('iana', 'IANA', 'internet'),
+ '1.3.6.1.1': ('Directory', 'directory'),
+ '1.3.6.1.2': ('Management', 'mgmt'),
+ '1.3.6.1.3': ('Experimental', 'experimental'),
+ '1.3.6.1.4': ('Private', 'private'),
+ '1.3.6.1.4.1': ('Enterprises', 'enterprises'),
+ '1.3.6.1.4.1.188.7.1.1.2': ('idea-cbc', 'IDEA-CBC'),
+ '1.3.6.1.4.1.311.2.1.14': ('Microsoft Extension Request', 'msExtReq'),
+ '1.3.6.1.4.1.311.2.1.21': ('Microsoft Individual Code Signing', 'msCodeInd'),
+ '1.3.6.1.4.1.311.2.1.22': ('Microsoft Commercial Code Signing', 'msCodeCom'),
+ '1.3.6.1.4.1.311.10.3.1': ('Microsoft Trust List Signing', 'msCTLSign'),
+ '1.3.6.1.4.1.311.10.3.3': ('Microsoft Server Gated Crypto', 'msSGC'),
+ '1.3.6.1.4.1.311.10.3.4': ('Microsoft Encrypted File System', 'msEFS'),
+ '1.3.6.1.4.1.311.17.1': ('Microsoft CSP Name', 'CSPName'),
+ '1.3.6.1.4.1.311.17.2': ('Microsoft Local Key set', 'LocalKeySet'),
+ '1.3.6.1.4.1.311.20.2.2': ('Microsoft Smartcardlogin', 'msSmartcardLogin'),
+ '1.3.6.1.4.1.311.20.2.3': ('Microsoft Universal Principal Name', 'msUPN'),
+ '1.3.6.1.4.1.311.60.2.1.1': ('jurisdictionLocalityName', 'jurisdictionL'),
+ '1.3.6.1.4.1.311.60.2.1.2': ('jurisdictionStateOrProvinceName', 'jurisdictionST'),
+ '1.3.6.1.4.1.311.60.2.1.3': ('jurisdictionCountryName', 'jurisdictionC'),
+ '1.3.6.1.4.1.1466.344': ('dcObject', 'dcobject'),
+ '1.3.6.1.4.1.1722.12.2.1.16': ('blake2b512', 'BLAKE2b512'),
+ '1.3.6.1.4.1.1722.12.2.2.8': ('blake2s256', 'BLAKE2s256'),
+ '1.3.6.1.4.1.3029.1.2': ('bf-cbc', 'BF-CBC'),
+ '1.3.6.1.4.1.11129.2.4.2': ('CT Precertificate SCTs', 'ct_precert_scts'),
+ '1.3.6.1.4.1.11129.2.4.3': ('CT Precertificate Poison', 'ct_precert_poison'),
+ '1.3.6.1.4.1.11129.2.4.4': ('CT Precertificate Signer', 'ct_precert_signer'),
+ '1.3.6.1.4.1.11129.2.4.5': ('CT Certificate SCTs', 'ct_cert_scts'),
+ '1.3.6.1.4.1.11591.4.11': ('scrypt', 'id-scrypt'),
+ '1.3.6.1.5': ('Security', 'security'),
+ '1.3.6.1.5.2.3': ('id-pkinit', ),
+ '1.3.6.1.5.2.3.4': ('PKINIT Client Auth', 'pkInitClientAuth'),
+ '1.3.6.1.5.2.3.5': ('Signing KDC Response', 'pkInitKDC'),
+ '1.3.6.1.5.5.7': ('PKIX', ),
+ '1.3.6.1.5.5.7.0': ('id-pkix-mod', ),
+ '1.3.6.1.5.5.7.0.1': ('id-pkix1-explicit-88', ),
+ '1.3.6.1.5.5.7.0.2': ('id-pkix1-implicit-88', ),
+ '1.3.6.1.5.5.7.0.3': ('id-pkix1-explicit-93', ),
+ '1.3.6.1.5.5.7.0.4': ('id-pkix1-implicit-93', ),
+ '1.3.6.1.5.5.7.0.5': ('id-mod-crmf', ),
+ '1.3.6.1.5.5.7.0.6': ('id-mod-cmc', ),
+ '1.3.6.1.5.5.7.0.7': ('id-mod-kea-profile-88', ),
+ '1.3.6.1.5.5.7.0.8': ('id-mod-kea-profile-93', ),
+ '1.3.6.1.5.5.7.0.9': ('id-mod-cmp', ),
+ '1.3.6.1.5.5.7.0.10': ('id-mod-qualified-cert-88', ),
+ '1.3.6.1.5.5.7.0.11': ('id-mod-qualified-cert-93', ),
+ '1.3.6.1.5.5.7.0.12': ('id-mod-attribute-cert', ),
+ '1.3.6.1.5.5.7.0.13': ('id-mod-timestamp-protocol', ),
+ '1.3.6.1.5.5.7.0.14': ('id-mod-ocsp', ),
+ '1.3.6.1.5.5.7.0.15': ('id-mod-dvcs', ),
+ '1.3.6.1.5.5.7.0.16': ('id-mod-cmp2000', ),
+ '1.3.6.1.5.5.7.1': ('id-pe', ),
+ '1.3.6.1.5.5.7.1.1': ('Authority Information Access', 'authorityInfoAccess'),
+ '1.3.6.1.5.5.7.1.2': ('Biometric Info', 'biometricInfo'),
+ '1.3.6.1.5.5.7.1.3': ('qcStatements', ),
+ '1.3.6.1.5.5.7.1.4': ('ac-auditEntity', ),
+ '1.3.6.1.5.5.7.1.5': ('ac-targeting', ),
+ '1.3.6.1.5.5.7.1.6': ('aaControls', ),
+ '1.3.6.1.5.5.7.1.7': ('sbgp-ipAddrBlock', ),
+ '1.3.6.1.5.5.7.1.8': ('sbgp-autonomousSysNum', ),
+ '1.3.6.1.5.5.7.1.9': ('sbgp-routerIdentifier', ),
+ '1.3.6.1.5.5.7.1.10': ('ac-proxying', ),
+ '1.3.6.1.5.5.7.1.11': ('Subject Information Access', 'subjectInfoAccess'),
+ '1.3.6.1.5.5.7.1.14': ('Proxy Certificate Information', 'proxyCertInfo'),
+ '1.3.6.1.5.5.7.1.24': ('TLS Feature', 'tlsfeature'),
+ '1.3.6.1.5.5.7.2': ('id-qt', ),
+ '1.3.6.1.5.5.7.2.1': ('Policy Qualifier CPS', 'id-qt-cps'),
+ '1.3.6.1.5.5.7.2.2': ('Policy Qualifier User Notice', 'id-qt-unotice'),
+ '1.3.6.1.5.5.7.2.3': ('textNotice', ),
+ '1.3.6.1.5.5.7.3': ('id-kp', ),
+ '1.3.6.1.5.5.7.3.1': ('TLS Web Server Authentication', 'serverAuth'),
+ '1.3.6.1.5.5.7.3.2': ('TLS Web Client Authentication', 'clientAuth'),
+ '1.3.6.1.5.5.7.3.3': ('Code Signing', 'codeSigning'),
+ '1.3.6.1.5.5.7.3.4': ('E-mail Protection', 'emailProtection'),
+ '1.3.6.1.5.5.7.3.5': ('IPSec End System', 'ipsecEndSystem'),
+ '1.3.6.1.5.5.7.3.6': ('IPSec Tunnel', 'ipsecTunnel'),
+ '1.3.6.1.5.5.7.3.7': ('IPSec User', 'ipsecUser'),
+ '1.3.6.1.5.5.7.3.8': ('Time Stamping', 'timeStamping'),
+ '1.3.6.1.5.5.7.3.9': ('OCSP Signing', 'OCSPSigning'),
+ '1.3.6.1.5.5.7.3.10': ('dvcs', 'DVCS'),
+ '1.3.6.1.5.5.7.3.17': ('ipsec Internet Key Exchange', 'ipsecIKE'),
+ '1.3.6.1.5.5.7.3.18': ('Ctrl/provision WAP Access', 'capwapAC'),
+ '1.3.6.1.5.5.7.3.19': ('Ctrl/Provision WAP Termination', 'capwapWTP'),
+ '1.3.6.1.5.5.7.3.21': ('SSH Client', 'secureShellClient'),
+ '1.3.6.1.5.5.7.3.22': ('SSH Server', 'secureShellServer'),
+ '1.3.6.1.5.5.7.3.23': ('Send Router', 'sendRouter'),
+ '1.3.6.1.5.5.7.3.24': ('Send Proxied Router', 'sendProxiedRouter'),
+ '1.3.6.1.5.5.7.3.25': ('Send Owner', 'sendOwner'),
+ '1.3.6.1.5.5.7.3.26': ('Send Proxied Owner', 'sendProxiedOwner'),
+ '1.3.6.1.5.5.7.3.27': ('CMC Certificate Authority', 'cmcCA'),
+ '1.3.6.1.5.5.7.3.28': ('CMC Registration Authority', 'cmcRA'),
+ '1.3.6.1.5.5.7.4': ('id-it', ),
+ '1.3.6.1.5.5.7.4.1': ('id-it-caProtEncCert', ),
+ '1.3.6.1.5.5.7.4.2': ('id-it-signKeyPairTypes', ),
+ '1.3.6.1.5.5.7.4.3': ('id-it-encKeyPairTypes', ),
+ '1.3.6.1.5.5.7.4.4': ('id-it-preferredSymmAlg', ),
+ '1.3.6.1.5.5.7.4.5': ('id-it-caKeyUpdateInfo', ),
+ '1.3.6.1.5.5.7.4.6': ('id-it-currentCRL', ),
+ '1.3.6.1.5.5.7.4.7': ('id-it-unsupportedOIDs', ),
+ '1.3.6.1.5.5.7.4.8': ('id-it-subscriptionRequest', ),
+ '1.3.6.1.5.5.7.4.9': ('id-it-subscriptionResponse', ),
+ '1.3.6.1.5.5.7.4.10': ('id-it-keyPairParamReq', ),
+ '1.3.6.1.5.5.7.4.11': ('id-it-keyPairParamRep', ),
+ '1.3.6.1.5.5.7.4.12': ('id-it-revPassphrase', ),
+ '1.3.6.1.5.5.7.4.13': ('id-it-implicitConfirm', ),
+ '1.3.6.1.5.5.7.4.14': ('id-it-confirmWaitTime', ),
+ '1.3.6.1.5.5.7.4.15': ('id-it-origPKIMessage', ),
+ '1.3.6.1.5.5.7.4.16': ('id-it-suppLangTags', ),
+ '1.3.6.1.5.5.7.5': ('id-pkip', ),
+ '1.3.6.1.5.5.7.5.1': ('id-regCtrl', ),
+ '1.3.6.1.5.5.7.5.1.1': ('id-regCtrl-regToken', ),
+ '1.3.6.1.5.5.7.5.1.2': ('id-regCtrl-authenticator', ),
+ '1.3.6.1.5.5.7.5.1.3': ('id-regCtrl-pkiPublicationInfo', ),
+ '1.3.6.1.5.5.7.5.1.4': ('id-regCtrl-pkiArchiveOptions', ),
+ '1.3.6.1.5.5.7.5.1.5': ('id-regCtrl-oldCertID', ),
+ '1.3.6.1.5.5.7.5.1.6': ('id-regCtrl-protocolEncrKey', ),
+ '1.3.6.1.5.5.7.5.2': ('id-regInfo', ),
+ '1.3.6.1.5.5.7.5.2.1': ('id-regInfo-utf8Pairs', ),
+ '1.3.6.1.5.5.7.5.2.2': ('id-regInfo-certReq', ),
+ '1.3.6.1.5.5.7.6': ('id-alg', ),
+ '1.3.6.1.5.5.7.6.1': ('id-alg-des40', ),
+ '1.3.6.1.5.5.7.6.2': ('id-alg-noSignature', ),
+ '1.3.6.1.5.5.7.6.3': ('id-alg-dh-sig-hmac-sha1', ),
+ '1.3.6.1.5.5.7.6.4': ('id-alg-dh-pop', ),
+ '1.3.6.1.5.5.7.7': ('id-cmc', ),
+ '1.3.6.1.5.5.7.7.1': ('id-cmc-statusInfo', ),
+ '1.3.6.1.5.5.7.7.2': ('id-cmc-identification', ),
+ '1.3.6.1.5.5.7.7.3': ('id-cmc-identityProof', ),
+ '1.3.6.1.5.5.7.7.4': ('id-cmc-dataReturn', ),
+ '1.3.6.1.5.5.7.7.5': ('id-cmc-transactionId', ),
+ '1.3.6.1.5.5.7.7.6': ('id-cmc-senderNonce', ),
+ '1.3.6.1.5.5.7.7.7': ('id-cmc-recipientNonce', ),
+ '1.3.6.1.5.5.7.7.8': ('id-cmc-addExtensions', ),
+ '1.3.6.1.5.5.7.7.9': ('id-cmc-encryptedPOP', ),
+ '1.3.6.1.5.5.7.7.10': ('id-cmc-decryptedPOP', ),
+ '1.3.6.1.5.5.7.7.11': ('id-cmc-lraPOPWitness', ),
+ '1.3.6.1.5.5.7.7.15': ('id-cmc-getCert', ),
+ '1.3.6.1.5.5.7.7.16': ('id-cmc-getCRL', ),
+ '1.3.6.1.5.5.7.7.17': ('id-cmc-revokeRequest', ),
+ '1.3.6.1.5.5.7.7.18': ('id-cmc-regInfo', ),
+ '1.3.6.1.5.5.7.7.19': ('id-cmc-responseInfo', ),
+ '1.3.6.1.5.5.7.7.21': ('id-cmc-queryPending', ),
+ '1.3.6.1.5.5.7.7.22': ('id-cmc-popLinkRandom', ),
+ '1.3.6.1.5.5.7.7.23': ('id-cmc-popLinkWitness', ),
+ '1.3.6.1.5.5.7.7.24': ('id-cmc-confirmCertAcceptance', ),
+ '1.3.6.1.5.5.7.8': ('id-on', ),
+ '1.3.6.1.5.5.7.8.1': ('id-on-personalData', ),
+ '1.3.6.1.5.5.7.8.3': ('Permanent Identifier', 'id-on-permanentIdentifier'),
+ '1.3.6.1.5.5.7.9': ('id-pda', ),
+ '1.3.6.1.5.5.7.9.1': ('id-pda-dateOfBirth', ),
+ '1.3.6.1.5.5.7.9.2': ('id-pda-placeOfBirth', ),
+ '1.3.6.1.5.5.7.9.3': ('id-pda-gender', ),
+ '1.3.6.1.5.5.7.9.4': ('id-pda-countryOfCitizenship', ),
+ '1.3.6.1.5.5.7.9.5': ('id-pda-countryOfResidence', ),
+ '1.3.6.1.5.5.7.10': ('id-aca', ),
+ '1.3.6.1.5.5.7.10.1': ('id-aca-authenticationInfo', ),
+ '1.3.6.1.5.5.7.10.2': ('id-aca-accessIdentity', ),
+ '1.3.6.1.5.5.7.10.3': ('id-aca-chargingIdentity', ),
+ '1.3.6.1.5.5.7.10.4': ('id-aca-group', ),
+ '1.3.6.1.5.5.7.10.5': ('id-aca-role', ),
+ '1.3.6.1.5.5.7.10.6': ('id-aca-encAttrs', ),
+ '1.3.6.1.5.5.7.11': ('id-qcs', ),
+ '1.3.6.1.5.5.7.11.1': ('id-qcs-pkixQCSyntax-v1', ),
+ '1.3.6.1.5.5.7.12': ('id-cct', ),
+ '1.3.6.1.5.5.7.12.1': ('id-cct-crs', ),
+ '1.3.6.1.5.5.7.12.2': ('id-cct-PKIData', ),
+ '1.3.6.1.5.5.7.12.3': ('id-cct-PKIResponse', ),
+ '1.3.6.1.5.5.7.21': ('id-ppl', ),
+ '1.3.6.1.5.5.7.21.0': ('Any language', 'id-ppl-anyLanguage'),
+ '1.3.6.1.5.5.7.21.1': ('Inherit all', 'id-ppl-inheritAll'),
+ '1.3.6.1.5.5.7.21.2': ('Independent', 'id-ppl-independent'),
+ '1.3.6.1.5.5.7.48': ('id-ad', ),
+ '1.3.6.1.5.5.7.48.1': ('OCSP', 'OCSP', 'id-pkix-OCSP'),
+ '1.3.6.1.5.5.7.48.1.1': ('Basic OCSP Response', 'basicOCSPResponse'),
+ '1.3.6.1.5.5.7.48.1.2': ('OCSP Nonce', 'Nonce'),
+ '1.3.6.1.5.5.7.48.1.3': ('OCSP CRL ID', 'CrlID'),
+ '1.3.6.1.5.5.7.48.1.4': ('Acceptable OCSP Responses', 'acceptableResponses'),
+ '1.3.6.1.5.5.7.48.1.5': ('OCSP No Check', 'noCheck'),
+ '1.3.6.1.5.5.7.48.1.6': ('OCSP Archive Cutoff', 'archiveCutoff'),
+ '1.3.6.1.5.5.7.48.1.7': ('OCSP Service Locator', 'serviceLocator'),
+ '1.3.6.1.5.5.7.48.1.8': ('Extended OCSP Status', 'extendedStatus'),
+ '1.3.6.1.5.5.7.48.1.9': ('valid', ),
+ '1.3.6.1.5.5.7.48.1.10': ('path', ),
+ '1.3.6.1.5.5.7.48.1.11': ('Trust Root', 'trustRoot'),
+ '1.3.6.1.5.5.7.48.2': ('CA Issuers', 'caIssuers'),
+ '1.3.6.1.5.5.7.48.3': ('AD Time Stamping', 'ad_timestamping'),
+ '1.3.6.1.5.5.7.48.4': ('ad dvcs', 'AD_DVCS'),
+ '1.3.6.1.5.5.7.48.5': ('CA Repository', 'caRepository'),
+ '1.3.6.1.5.5.8.1.1': ('hmac-md5', 'HMAC-MD5'),
+ '1.3.6.1.5.5.8.1.2': ('hmac-sha1', 'HMAC-SHA1'),
+ '1.3.6.1.6': ('SNMPv2', 'snmpv2'),
+ '1.3.6.1.7': ('Mail', ),
+ '1.3.6.1.7.1': ('MIME MHS', 'mime-mhs'),
+ '1.3.6.1.7.1.1': ('mime-mhs-headings', 'mime-mhs-headings'),
+ '1.3.6.1.7.1.1.1': ('id-hex-partial-message', 'id-hex-partial-message'),
+ '1.3.6.1.7.1.1.2': ('id-hex-multipart-message', 'id-hex-multipart-message'),
+ '1.3.6.1.7.1.2': ('mime-mhs-bodies', 'mime-mhs-bodies'),
+ '1.3.14.3.2': ('algorithm', 'algorithm'),
+ '1.3.14.3.2.3': ('md5WithRSA', 'RSA-NP-MD5'),
+ '1.3.14.3.2.6': ('des-ecb', 'DES-ECB'),
+ '1.3.14.3.2.7': ('des-cbc', 'DES-CBC'),
+ '1.3.14.3.2.8': ('des-ofb', 'DES-OFB'),
+ '1.3.14.3.2.9': ('des-cfb', 'DES-CFB'),
+ '1.3.14.3.2.11': ('rsaSignature', ),
+ '1.3.14.3.2.12': ('dsaEncryption-old', 'DSA-old'),
+ '1.3.14.3.2.13': ('dsaWithSHA', 'DSA-SHA'),
+ '1.3.14.3.2.15': ('shaWithRSAEncryption', 'RSA-SHA'),
+ '1.3.14.3.2.17': ('des-ede', 'DES-EDE'),
+ '1.3.14.3.2.18': ('sha', 'SHA'),
+ '1.3.14.3.2.26': ('sha1', 'SHA1'),
+ '1.3.14.3.2.27': ('dsaWithSHA1-old', 'DSA-SHA1-old'),
+ '1.3.14.3.2.29': ('sha1WithRSA', 'RSA-SHA1-2'),
+ '1.3.36.3.2.1': ('ripemd160', 'RIPEMD160'),
+ '1.3.36.3.3.1.2': ('ripemd160WithRSA', 'RSA-RIPEMD160'),
+ '1.3.36.3.3.2.8.1.1.1': ('brainpoolP160r1', ),
+ '1.3.36.3.3.2.8.1.1.2': ('brainpoolP160t1', ),
+ '1.3.36.3.3.2.8.1.1.3': ('brainpoolP192r1', ),
+ '1.3.36.3.3.2.8.1.1.4': ('brainpoolP192t1', ),
+ '1.3.36.3.3.2.8.1.1.5': ('brainpoolP224r1', ),
+ '1.3.36.3.3.2.8.1.1.6': ('brainpoolP224t1', ),
+ '1.3.36.3.3.2.8.1.1.7': ('brainpoolP256r1', ),
+ '1.3.36.3.3.2.8.1.1.8': ('brainpoolP256t1', ),
+ '1.3.36.3.3.2.8.1.1.9': ('brainpoolP320r1', ),
+ '1.3.36.3.3.2.8.1.1.10': ('brainpoolP320t1', ),
+ '1.3.36.3.3.2.8.1.1.11': ('brainpoolP384r1', ),
+ '1.3.36.3.3.2.8.1.1.12': ('brainpoolP384t1', ),
+ '1.3.36.3.3.2.8.1.1.13': ('brainpoolP512r1', ),
+ '1.3.36.3.3.2.8.1.1.14': ('brainpoolP512t1', ),
+ '1.3.36.8.3.3': ('Professional Information or basis for Admission', 'x509ExtAdmission'),
+ '1.3.101.1.4.1': ('Strong Extranet ID', 'SXNetID'),
+ '1.3.101.110': ('X25519', ),
+ '1.3.101.111': ('X448', ),
+ '1.3.101.112': ('ED25519', ),
+ '1.3.101.113': ('ED448', ),
+ '1.3.111': ('ieee', ),
+ '1.3.111.2.1619': ('IEEE Security in Storage Working Group', 'ieee-siswg'),
+ '1.3.111.2.1619.0.1.1': ('aes-128-xts', 'AES-128-XTS'),
+ '1.3.111.2.1619.0.1.2': ('aes-256-xts', 'AES-256-XTS'),
+ '1.3.132': ('certicom-arc', ),
+ '1.3.132.0': ('secg_ellipticCurve', ),
+ '1.3.132.0.1': ('sect163k1', ),
+ '1.3.132.0.2': ('sect163r1', ),
+ '1.3.132.0.3': ('sect239k1', ),
+ '1.3.132.0.4': ('sect113r1', ),
+ '1.3.132.0.5': ('sect113r2', ),
+ '1.3.132.0.6': ('secp112r1', ),
+ '1.3.132.0.7': ('secp112r2', ),
+ '1.3.132.0.8': ('secp160r1', ),
+ '1.3.132.0.9': ('secp160k1', ),
+ '1.3.132.0.10': ('secp256k1', ),
+ '1.3.132.0.15': ('sect163r2', ),
+ '1.3.132.0.16': ('sect283k1', ),
+ '1.3.132.0.17': ('sect283r1', ),
+ '1.3.132.0.22': ('sect131r1', ),
+ '1.3.132.0.23': ('sect131r2', ),
+ '1.3.132.0.24': ('sect193r1', ),
+ '1.3.132.0.25': ('sect193r2', ),
+ '1.3.132.0.26': ('sect233k1', ),
+ '1.3.132.0.27': ('sect233r1', ),
+ '1.3.132.0.28': ('secp128r1', ),
+ '1.3.132.0.29': ('secp128r2', ),
+ '1.3.132.0.30': ('secp160r2', ),
+ '1.3.132.0.31': ('secp192k1', ),
+ '1.3.132.0.32': ('secp224k1', ),
+ '1.3.132.0.33': ('secp224r1', ),
+ '1.3.132.0.34': ('secp384r1', ),
+ '1.3.132.0.35': ('secp521r1', ),
+ '1.3.132.0.36': ('sect409k1', ),
+ '1.3.132.0.37': ('sect409r1', ),
+ '1.3.132.0.38': ('sect571k1', ),
+ '1.3.132.0.39': ('sect571r1', ),
+ '1.3.132.1': ('secg-scheme', ),
+ '1.3.132.1.11.0': ('dhSinglePass-stdDH-sha224kdf-scheme', ),
+ '1.3.132.1.11.1': ('dhSinglePass-stdDH-sha256kdf-scheme', ),
+ '1.3.132.1.11.2': ('dhSinglePass-stdDH-sha384kdf-scheme', ),
+ '1.3.132.1.11.3': ('dhSinglePass-stdDH-sha512kdf-scheme', ),
+ '1.3.132.1.14.0': ('dhSinglePass-cofactorDH-sha224kdf-scheme', ),
+ '1.3.132.1.14.1': ('dhSinglePass-cofactorDH-sha256kdf-scheme', ),
+ '1.3.132.1.14.2': ('dhSinglePass-cofactorDH-sha384kdf-scheme', ),
+ '1.3.132.1.14.3': ('dhSinglePass-cofactorDH-sha512kdf-scheme', ),
+ '1.3.133.16.840.63.0': ('x9-63-scheme', ),
+ '1.3.133.16.840.63.0.2': ('dhSinglePass-stdDH-sha1kdf-scheme', ),
+ '1.3.133.16.840.63.0.3': ('dhSinglePass-cofactorDH-sha1kdf-scheme', ),
+ '2': ('joint-iso-itu-t', 'JOINT-ISO-ITU-T', 'joint-iso-ccitt'),
+ '2.5': ('directory services (X.500)', 'X500'),
+ '2.5.1.5': ('Selected Attribute Types', 'selected-attribute-types'),
+ '2.5.1.5.55': ('clearance', ),
+ '2.5.4': ('X509', ),
+ '2.5.4.3': ('commonName', 'CN'),
+ '2.5.4.4': ('surname', 'SN'),
+ '2.5.4.5': ('serialNumber', ),
+ '2.5.4.6': ('countryName', 'C'),
+ '2.5.4.7': ('localityName', 'L'),
+ '2.5.4.8': ('stateOrProvinceName', 'ST'),
+ '2.5.4.9': ('streetAddress', 'street'),
+ '2.5.4.10': ('organizationName', 'O'),
+ '2.5.4.11': ('organizationalUnitName', 'OU'),
+ '2.5.4.12': ('title', 'title'),
+ '2.5.4.13': ('description', ),
+ '2.5.4.14': ('searchGuide', ),
+ '2.5.4.15': ('businessCategory', ),
+ '2.5.4.16': ('postalAddress', ),
+ '2.5.4.17': ('postalCode', ),
+ '2.5.4.18': ('postOfficeBox', ),
+ '2.5.4.19': ('physicalDeliveryOfficeName', ),
+ '2.5.4.20': ('telephoneNumber', ),
+ '2.5.4.21': ('telexNumber', ),
+ '2.5.4.22': ('teletexTerminalIdentifier', ),
+ '2.5.4.23': ('facsimileTelephoneNumber', ),
+ '2.5.4.24': ('x121Address', ),
+ '2.5.4.25': ('internationaliSDNNumber', ),
+ '2.5.4.26': ('registeredAddress', ),
+ '2.5.4.27': ('destinationIndicator', ),
+ '2.5.4.28': ('preferredDeliveryMethod', ),
+ '2.5.4.29': ('presentationAddress', ),
+ '2.5.4.30': ('supportedApplicationContext', ),
+ '2.5.4.31': ('member', ),
+ '2.5.4.32': ('owner', ),
+ '2.5.4.33': ('roleOccupant', ),
+ '2.5.4.34': ('seeAlso', ),
+ '2.5.4.35': ('userPassword', ),
+ '2.5.4.36': ('userCertificate', ),
+ '2.5.4.37': ('cACertificate', ),
+ '2.5.4.38': ('authorityRevocationList', ),
+ '2.5.4.39': ('certificateRevocationList', ),
+ '2.5.4.40': ('crossCertificatePair', ),
+ '2.5.4.41': ('name', 'name'),
+ '2.5.4.42': ('givenName', 'GN'),
+ '2.5.4.43': ('initials', 'initials'),
+ '2.5.4.44': ('generationQualifier', ),
+ '2.5.4.45': ('x500UniqueIdentifier', ),
+ '2.5.4.46': ('dnQualifier', 'dnQualifier'),
+ '2.5.4.47': ('enhancedSearchGuide', ),
+ '2.5.4.48': ('protocolInformation', ),
+ '2.5.4.49': ('distinguishedName', ),
+ '2.5.4.50': ('uniqueMember', ),
+ '2.5.4.51': ('houseIdentifier', ),
+ '2.5.4.52': ('supportedAlgorithms', ),
+ '2.5.4.53': ('deltaRevocationList', ),
+ '2.5.4.54': ('dmdName', ),
+ '2.5.4.65': ('pseudonym', ),
+ '2.5.4.72': ('role', 'role'),
+ '2.5.4.97': ('organizationIdentifier', ),
+ '2.5.4.98': ('countryCode3c', 'c3'),
+ '2.5.4.99': ('countryCode3n', 'n3'),
+ '2.5.4.100': ('dnsName', ),
+ '2.5.8': ('directory services - algorithms', 'X500algorithms'),
+ '2.5.8.1.1': ('rsa', 'RSA'),
+ '2.5.8.3.100': ('mdc2WithRSA', 'RSA-MDC2'),
+ '2.5.8.3.101': ('mdc2', 'MDC2'),
+ '2.5.29': ('id-ce', ),
+ '2.5.29.9': ('X509v3 Subject Directory Attributes', 'subjectDirectoryAttributes'),
+ '2.5.29.14': ('X509v3 Subject Key Identifier', 'subjectKeyIdentifier'),
+ '2.5.29.15': ('X509v3 Key Usage', 'keyUsage'),
+ '2.5.29.16': ('X509v3 Private Key Usage Period', 'privateKeyUsagePeriod'),
+ '2.5.29.17': ('X509v3 Subject Alternative Name', 'subjectAltName'),
+ '2.5.29.18': ('X509v3 Issuer Alternative Name', 'issuerAltName'),
+ '2.5.29.19': ('X509v3 Basic Constraints', 'basicConstraints'),
+ '2.5.29.20': ('X509v3 CRL Number', 'crlNumber'),
+ '2.5.29.21': ('X509v3 CRL Reason Code', 'CRLReason'),
+ '2.5.29.23': ('Hold Instruction Code', 'holdInstructionCode'),
+ '2.5.29.24': ('Invalidity Date', 'invalidityDate'),
+ '2.5.29.27': ('X509v3 Delta CRL Indicator', 'deltaCRL'),
+ '2.5.29.28': ('X509v3 Issuing Distribution Point', 'issuingDistributionPoint'),
+ '2.5.29.29': ('X509v3 Certificate Issuer', 'certificateIssuer'),
+ '2.5.29.30': ('X509v3 Name Constraints', 'nameConstraints'),
+ '2.5.29.31': ('X509v3 CRL Distribution Points', 'crlDistributionPoints'),
+ '2.5.29.32': ('X509v3 Certificate Policies', 'certificatePolicies'),
+ '2.5.29.32.0': ('X509v3 Any Policy', 'anyPolicy'),
+ '2.5.29.33': ('X509v3 Policy Mappings', 'policyMappings'),
+ '2.5.29.35': ('X509v3 Authority Key Identifier', 'authorityKeyIdentifier'),
+ '2.5.29.36': ('X509v3 Policy Constraints', 'policyConstraints'),
+ '2.5.29.37': ('X509v3 Extended Key Usage', 'extendedKeyUsage'),
+ '2.5.29.37.0': ('Any Extended Key Usage', 'anyExtendedKeyUsage'),
+ '2.5.29.46': ('X509v3 Freshest CRL', 'freshestCRL'),
+ '2.5.29.54': ('X509v3 Inhibit Any Policy', 'inhibitAnyPolicy'),
+ '2.5.29.55': ('X509v3 AC Targeting', 'targetInformation'),
+ '2.5.29.56': ('X509v3 No Revocation Available', 'noRevAvail'),
+ '2.16.840.1.101.3': ('csor', ),
+ '2.16.840.1.101.3.4': ('nistAlgorithms', ),
+ '2.16.840.1.101.3.4.1': ('aes', ),
+ '2.16.840.1.101.3.4.1.1': ('aes-128-ecb', 'AES-128-ECB'),
+ '2.16.840.1.101.3.4.1.2': ('aes-128-cbc', 'AES-128-CBC'),
+ '2.16.840.1.101.3.4.1.3': ('aes-128-ofb', 'AES-128-OFB'),
+ '2.16.840.1.101.3.4.1.4': ('aes-128-cfb', 'AES-128-CFB'),
+ '2.16.840.1.101.3.4.1.5': ('id-aes128-wrap', ),
+ '2.16.840.1.101.3.4.1.6': ('aes-128-gcm', 'id-aes128-GCM'),
+ '2.16.840.1.101.3.4.1.7': ('aes-128-ccm', 'id-aes128-CCM'),
+ '2.16.840.1.101.3.4.1.8': ('id-aes128-wrap-pad', ),
+ '2.16.840.1.101.3.4.1.21': ('aes-192-ecb', 'AES-192-ECB'),
+ '2.16.840.1.101.3.4.1.22': ('aes-192-cbc', 'AES-192-CBC'),
+ '2.16.840.1.101.3.4.1.23': ('aes-192-ofb', 'AES-192-OFB'),
+ '2.16.840.1.101.3.4.1.24': ('aes-192-cfb', 'AES-192-CFB'),
+ '2.16.840.1.101.3.4.1.25': ('id-aes192-wrap', ),
+ '2.16.840.1.101.3.4.1.26': ('aes-192-gcm', 'id-aes192-GCM'),
+ '2.16.840.1.101.3.4.1.27': ('aes-192-ccm', 'id-aes192-CCM'),
+ '2.16.840.1.101.3.4.1.28': ('id-aes192-wrap-pad', ),
+ '2.16.840.1.101.3.4.1.41': ('aes-256-ecb', 'AES-256-ECB'),
+ '2.16.840.1.101.3.4.1.42': ('aes-256-cbc', 'AES-256-CBC'),
+ '2.16.840.1.101.3.4.1.43': ('aes-256-ofb', 'AES-256-OFB'),
+ '2.16.840.1.101.3.4.1.44': ('aes-256-cfb', 'AES-256-CFB'),
+ '2.16.840.1.101.3.4.1.45': ('id-aes256-wrap', ),
+ '2.16.840.1.101.3.4.1.46': ('aes-256-gcm', 'id-aes256-GCM'),
+ '2.16.840.1.101.3.4.1.47': ('aes-256-ccm', 'id-aes256-CCM'),
+ '2.16.840.1.101.3.4.1.48': ('id-aes256-wrap-pad', ),
+ '2.16.840.1.101.3.4.2': ('nist_hashalgs', ),
+ '2.16.840.1.101.3.4.2.1': ('sha256', 'SHA256'),
+ '2.16.840.1.101.3.4.2.2': ('sha384', 'SHA384'),
+ '2.16.840.1.101.3.4.2.3': ('sha512', 'SHA512'),
+ '2.16.840.1.101.3.4.2.4': ('sha224', 'SHA224'),
+ '2.16.840.1.101.3.4.2.5': ('sha512-224', 'SHA512-224'),
+ '2.16.840.1.101.3.4.2.6': ('sha512-256', 'SHA512-256'),
+ '2.16.840.1.101.3.4.2.7': ('sha3-224', 'SHA3-224'),
+ '2.16.840.1.101.3.4.2.8': ('sha3-256', 'SHA3-256'),
+ '2.16.840.1.101.3.4.2.9': ('sha3-384', 'SHA3-384'),
+ '2.16.840.1.101.3.4.2.10': ('sha3-512', 'SHA3-512'),
+ '2.16.840.1.101.3.4.2.11': ('shake128', 'SHAKE128'),
+ '2.16.840.1.101.3.4.2.12': ('shake256', 'SHAKE256'),
+ '2.16.840.1.101.3.4.2.13': ('hmac-sha3-224', 'id-hmacWithSHA3-224'),
+ '2.16.840.1.101.3.4.2.14': ('hmac-sha3-256', 'id-hmacWithSHA3-256'),
+ '2.16.840.1.101.3.4.2.15': ('hmac-sha3-384', 'id-hmacWithSHA3-384'),
+ '2.16.840.1.101.3.4.2.16': ('hmac-sha3-512', 'id-hmacWithSHA3-512'),
+ '2.16.840.1.101.3.4.3': ('dsa_with_sha2', 'sigAlgs'),
+ '2.16.840.1.101.3.4.3.1': ('dsa_with_SHA224', ),
+ '2.16.840.1.101.3.4.3.2': ('dsa_with_SHA256', ),
+ '2.16.840.1.101.3.4.3.3': ('dsa_with_SHA384', 'id-dsa-with-sha384'),
+ '2.16.840.1.101.3.4.3.4': ('dsa_with_SHA512', 'id-dsa-with-sha512'),
+ '2.16.840.1.101.3.4.3.5': ('dsa_with_SHA3-224', 'id-dsa-with-sha3-224'),
+ '2.16.840.1.101.3.4.3.6': ('dsa_with_SHA3-256', 'id-dsa-with-sha3-256'),
+ '2.16.840.1.101.3.4.3.7': ('dsa_with_SHA3-384', 'id-dsa-with-sha3-384'),
+ '2.16.840.1.101.3.4.3.8': ('dsa_with_SHA3-512', 'id-dsa-with-sha3-512'),
+ '2.16.840.1.101.3.4.3.9': ('ecdsa_with_SHA3-224', 'id-ecdsa-with-sha3-224'),
+ '2.16.840.1.101.3.4.3.10': ('ecdsa_with_SHA3-256', 'id-ecdsa-with-sha3-256'),
+ '2.16.840.1.101.3.4.3.11': ('ecdsa_with_SHA3-384', 'id-ecdsa-with-sha3-384'),
+ '2.16.840.1.101.3.4.3.12': ('ecdsa_with_SHA3-512', 'id-ecdsa-with-sha3-512'),
+ '2.16.840.1.101.3.4.3.13': ('RSA-SHA3-224', 'id-rsassa-pkcs1-v1_5-with-sha3-224'),
+ '2.16.840.1.101.3.4.3.14': ('RSA-SHA3-256', 'id-rsassa-pkcs1-v1_5-with-sha3-256'),
+ '2.16.840.1.101.3.4.3.15': ('RSA-SHA3-384', 'id-rsassa-pkcs1-v1_5-with-sha3-384'),
+ '2.16.840.1.101.3.4.3.16': ('RSA-SHA3-512', 'id-rsassa-pkcs1-v1_5-with-sha3-512'),
+ '2.16.840.1.113730': ('Netscape Communications Corp.', 'Netscape'),
+ '2.16.840.1.113730.1': ('Netscape Certificate Extension', 'nsCertExt'),
+ '2.16.840.1.113730.1.1': ('Netscape Cert Type', 'nsCertType'),
+ '2.16.840.1.113730.1.2': ('Netscape Base Url', 'nsBaseUrl'),
+ '2.16.840.1.113730.1.3': ('Netscape Revocation Url', 'nsRevocationUrl'),
+ '2.16.840.1.113730.1.4': ('Netscape CA Revocation Url', 'nsCaRevocationUrl'),
+ '2.16.840.1.113730.1.7': ('Netscape Renewal Url', 'nsRenewalUrl'),
+ '2.16.840.1.113730.1.8': ('Netscape CA Policy Url', 'nsCaPolicyUrl'),
+ '2.16.840.1.113730.1.12': ('Netscape SSL Server Name', 'nsSslServerName'),
+ '2.16.840.1.113730.1.13': ('Netscape Comment', 'nsComment'),
+ '2.16.840.1.113730.2': ('Netscape Data Type', 'nsDataType'),
+ '2.16.840.1.113730.2.5': ('Netscape Certificate Sequence', 'nsCertSequence'),
+ '2.16.840.1.113730.4.1': ('Netscape Server Gated Crypto', 'nsSGC'),
+ '2.23': ('International Organizations', 'international-organizations'),
+ '2.23.42': ('Secure Electronic Transactions', 'id-set'),
+ '2.23.42.0': ('content types', 'set-ctype'),
+ '2.23.42.0.0': ('setct-PANData', ),
+ '2.23.42.0.1': ('setct-PANToken', ),
+ '2.23.42.0.2': ('setct-PANOnly', ),
+ '2.23.42.0.3': ('setct-OIData', ),
+ '2.23.42.0.4': ('setct-PI', ),
+ '2.23.42.0.5': ('setct-PIData', ),
+ '2.23.42.0.6': ('setct-PIDataUnsigned', ),
+ '2.23.42.0.7': ('setct-HODInput', ),
+ '2.23.42.0.8': ('setct-AuthResBaggage', ),
+ '2.23.42.0.9': ('setct-AuthRevReqBaggage', ),
+ '2.23.42.0.10': ('setct-AuthRevResBaggage', ),
+ '2.23.42.0.11': ('setct-CapTokenSeq', ),
+ '2.23.42.0.12': ('setct-PInitResData', ),
+ '2.23.42.0.13': ('setct-PI-TBS', ),
+ '2.23.42.0.14': ('setct-PResData', ),
+ '2.23.42.0.16': ('setct-AuthReqTBS', ),
+ '2.23.42.0.17': ('setct-AuthResTBS', ),
+ '2.23.42.0.18': ('setct-AuthResTBSX', ),
+ '2.23.42.0.19': ('setct-AuthTokenTBS', ),
+ '2.23.42.0.20': ('setct-CapTokenData', ),
+ '2.23.42.0.21': ('setct-CapTokenTBS', ),
+ '2.23.42.0.22': ('setct-AcqCardCodeMsg', ),
+ '2.23.42.0.23': ('setct-AuthRevReqTBS', ),
+ '2.23.42.0.24': ('setct-AuthRevResData', ),
+ '2.23.42.0.25': ('setct-AuthRevResTBS', ),
+ '2.23.42.0.26': ('setct-CapReqTBS', ),
+ '2.23.42.0.27': ('setct-CapReqTBSX', ),
+ '2.23.42.0.28': ('setct-CapResData', ),
+ '2.23.42.0.29': ('setct-CapRevReqTBS', ),
+ '2.23.42.0.30': ('setct-CapRevReqTBSX', ),
+ '2.23.42.0.31': ('setct-CapRevResData', ),
+ '2.23.42.0.32': ('setct-CredReqTBS', ),
+ '2.23.42.0.33': ('setct-CredReqTBSX', ),
+ '2.23.42.0.34': ('setct-CredResData', ),
+ '2.23.42.0.35': ('setct-CredRevReqTBS', ),
+ '2.23.42.0.36': ('setct-CredRevReqTBSX', ),
+ '2.23.42.0.37': ('setct-CredRevResData', ),
+ '2.23.42.0.38': ('setct-PCertReqData', ),
+ '2.23.42.0.39': ('setct-PCertResTBS', ),
+ '2.23.42.0.40': ('setct-BatchAdminReqData', ),
+ '2.23.42.0.41': ('setct-BatchAdminResData', ),
+ '2.23.42.0.42': ('setct-CardCInitResTBS', ),
+ '2.23.42.0.43': ('setct-MeAqCInitResTBS', ),
+ '2.23.42.0.44': ('setct-RegFormResTBS', ),
+ '2.23.42.0.45': ('setct-CertReqData', ),
+ '2.23.42.0.46': ('setct-CertReqTBS', ),
+ '2.23.42.0.47': ('setct-CertResData', ),
+ '2.23.42.0.48': ('setct-CertInqReqTBS', ),
+ '2.23.42.0.49': ('setct-ErrorTBS', ),
+ '2.23.42.0.50': ('setct-PIDualSignedTBE', ),
+ '2.23.42.0.51': ('setct-PIUnsignedTBE', ),
+ '2.23.42.0.52': ('setct-AuthReqTBE', ),
+ '2.23.42.0.53': ('setct-AuthResTBE', ),
+ '2.23.42.0.54': ('setct-AuthResTBEX', ),
+ '2.23.42.0.55': ('setct-AuthTokenTBE', ),
+ '2.23.42.0.56': ('setct-CapTokenTBE', ),
+ '2.23.42.0.57': ('setct-CapTokenTBEX', ),
+ '2.23.42.0.58': ('setct-AcqCardCodeMsgTBE', ),
+ '2.23.42.0.59': ('setct-AuthRevReqTBE', ),
+ '2.23.42.0.60': ('setct-AuthRevResTBE', ),
+ '2.23.42.0.61': ('setct-AuthRevResTBEB', ),
+ '2.23.42.0.62': ('setct-CapReqTBE', ),
+ '2.23.42.0.63': ('setct-CapReqTBEX', ),
+ '2.23.42.0.64': ('setct-CapResTBE', ),
+ '2.23.42.0.65': ('setct-CapRevReqTBE', ),
+ '2.23.42.0.66': ('setct-CapRevReqTBEX', ),
+ '2.23.42.0.67': ('setct-CapRevResTBE', ),
+ '2.23.42.0.68': ('setct-CredReqTBE', ),
+ '2.23.42.0.69': ('setct-CredReqTBEX', ),
+ '2.23.42.0.70': ('setct-CredResTBE', ),
+ '2.23.42.0.71': ('setct-CredRevReqTBE', ),
+ '2.23.42.0.72': ('setct-CredRevReqTBEX', ),
+ '2.23.42.0.73': ('setct-CredRevResTBE', ),
+ '2.23.42.0.74': ('setct-BatchAdminReqTBE', ),
+ '2.23.42.0.75': ('setct-BatchAdminResTBE', ),
+ '2.23.42.0.76': ('setct-RegFormReqTBE', ),
+ '2.23.42.0.77': ('setct-CertReqTBE', ),
+ '2.23.42.0.78': ('setct-CertReqTBEX', ),
+ '2.23.42.0.79': ('setct-CertResTBE', ),
+ '2.23.42.0.80': ('setct-CRLNotificationTBS', ),
+ '2.23.42.0.81': ('setct-CRLNotificationResTBS', ),
+ '2.23.42.0.82': ('setct-BCIDistributionTBS', ),
+ '2.23.42.1': ('message extensions', 'set-msgExt'),
+ '2.23.42.1.1': ('generic cryptogram', 'setext-genCrypt'),
+ '2.23.42.1.3': ('merchant initiated auth', 'setext-miAuth'),
+ '2.23.42.1.4': ('setext-pinSecure', ),
+ '2.23.42.1.5': ('setext-pinAny', ),
+ '2.23.42.1.7': ('setext-track2', ),
+ '2.23.42.1.8': ('additional verification', 'setext-cv'),
+ '2.23.42.3': ('set-attr', ),
+ '2.23.42.3.0': ('setAttr-Cert', ),
+ '2.23.42.3.0.0': ('set-rootKeyThumb', ),
+ '2.23.42.3.0.1': ('set-addPolicy', ),
+ '2.23.42.3.1': ('payment gateway capabilities', 'setAttr-PGWYcap'),
+ '2.23.42.3.2': ('setAttr-TokenType', ),
+ '2.23.42.3.2.1': ('setAttr-Token-EMV', ),
+ '2.23.42.3.2.2': ('setAttr-Token-B0Prime', ),
+ '2.23.42.3.3': ('issuer capabilities', 'setAttr-IssCap'),
+ '2.23.42.3.3.3': ('setAttr-IssCap-CVM', ),
+ '2.23.42.3.3.3.1': ('generate cryptogram', 'setAttr-GenCryptgrm'),
+ '2.23.42.3.3.4': ('setAttr-IssCap-T2', ),
+ '2.23.42.3.3.4.1': ('encrypted track 2', 'setAttr-T2Enc'),
+ '2.23.42.3.3.4.2': ('cleartext track 2', 'setAttr-T2cleartxt'),
+ '2.23.42.3.3.5': ('setAttr-IssCap-Sig', ),
+ '2.23.42.3.3.5.1': ('ICC or token signature', 'setAttr-TokICCsig'),
+ '2.23.42.3.3.5.2': ('secure device signature', 'setAttr-SecDevSig'),
+ '2.23.42.5': ('set-policy', ),
+ '2.23.42.5.0': ('set-policy-root', ),
+ '2.23.42.7': ('certificate extensions', 'set-certExt'),
+ '2.23.42.7.0': ('setCext-hashedRoot', ),
+ '2.23.42.7.1': ('setCext-certType', ),
+ '2.23.42.7.2': ('setCext-merchData', ),
+ '2.23.42.7.3': ('setCext-cCertRequired', ),
+ '2.23.42.7.4': ('setCext-tunneling', ),
+ '2.23.42.7.5': ('setCext-setExt', ),
+ '2.23.42.7.6': ('setCext-setQualf', ),
+ '2.23.42.7.7': ('setCext-PGWYcapabilities', ),
+ '2.23.42.7.8': ('setCext-TokenIdentifier', ),
+ '2.23.42.7.9': ('setCext-Track2Data', ),
+ '2.23.42.7.10': ('setCext-TokenType', ),
+ '2.23.42.7.11': ('setCext-IssuerCapabilities', ),
+ '2.23.42.8': ('set-brand', ),
+ '2.23.42.8.1': ('set-brand-IATA-ATA', ),
+ '2.23.42.8.4': ('set-brand-Visa', ),
+ '2.23.42.8.5': ('set-brand-MasterCard', ),
+ '2.23.42.8.30': ('set-brand-Diners', ),
+ '2.23.42.8.34': ('set-brand-AmericanExpress', ),
+ '2.23.42.8.35': ('set-brand-JCB', ),
+ '2.23.42.8.6011': ('set-brand-Novus', ),
+ '2.23.43': ('wap', ),
+ '2.23.43.1': ('wap-wsg', ),
+ '2.23.43.1.4': ('wap-wsg-idm-ecid', ),
+ '2.23.43.1.4.1': ('wap-wsg-idm-ecid-wtls1', ),
+ '2.23.43.1.4.3': ('wap-wsg-idm-ecid-wtls3', ),
+ '2.23.43.1.4.4': ('wap-wsg-idm-ecid-wtls4', ),
+ '2.23.43.1.4.5': ('wap-wsg-idm-ecid-wtls5', ),
+ '2.23.43.1.4.6': ('wap-wsg-idm-ecid-wtls6', ),
+ '2.23.43.1.4.7': ('wap-wsg-idm-ecid-wtls7', ),
+ '2.23.43.1.4.8': ('wap-wsg-idm-ecid-wtls8', ),
+ '2.23.43.1.4.9': ('wap-wsg-idm-ecid-wtls9', ),
+ '2.23.43.1.4.10': ('wap-wsg-idm-ecid-wtls10', ),
+ '2.23.43.1.4.11': ('wap-wsg-idm-ecid-wtls11', ),
+ '2.23.43.1.4.12': ('wap-wsg-idm-ecid-wtls12', ),
+}
+# #####################################################################################
+# #####################################################################################
+
+_OID_LOOKUP = dict()
+_NORMALIZE_NAMES = dict()
+_NORMALIZE_NAMES_SHORT = dict()
+
+for dotted, names in _OID_MAP.items():
+ for name in names:
+ if name in _NORMALIZE_NAMES and _OID_LOOKUP[name] != dotted:
+ raise AssertionError(
+ 'Name collision during setup: "{0}" for OIDs {1} and {2}'
+ .format(name, dotted, _OID_LOOKUP[name])
+ )
+ _NORMALIZE_NAMES[name] = names[0]
+ _NORMALIZE_NAMES_SHORT[name] = names[-1]
+ _OID_LOOKUP[name] = dotted
+for alias, original in [('userID', 'userId')]:
+ if alias in _NORMALIZE_NAMES:
+ raise AssertionError(
+ 'Name collision during adding aliases: "{0}" (alias for "{1}") is already mapped to OID {2}'
+ .format(alias, original, _OID_LOOKUP[alias])
+ )
+ _NORMALIZE_NAMES[alias] = original
+ _NORMALIZE_NAMES_SHORT[alias] = _NORMALIZE_NAMES_SHORT[original]
+ _OID_LOOKUP[alias] = _OID_LOOKUP[original]
+
+
+def pyopenssl_normalize_name(name, short=False):
+ nid = OpenSSL._util.lib.OBJ_txt2nid(to_bytes(name))
+ if nid != 0:
+ b_name = OpenSSL._util.lib.OBJ_nid2ln(nid)
+ name = to_text(OpenSSL._util.ffi.string(b_name))
+ if short:
+ return _NORMALIZE_NAMES_SHORT.get(name, name)
+ else:
+ return _NORMALIZE_NAMES.get(name, name)
+
+
+# #####################################################################################
+# #####################################################################################
+# # This excerpt is dual licensed under the terms of the Apache License, Version
+# # 2.0, and the BSD License. See the LICENSE file at
+# # https://github.com/pyca/cryptography/blob/master/LICENSE for complete details.
+# #
+# # Adapted from cryptography's hazmat/backends/openssl/decode_asn1.py
+# #
+# # Copyright (c) 2015, 2016 Paul Kehrer (@reaperhulk)
+# # Copyright (c) 2017 Fraser Tweedale (@frasertweedale)
+# #
+# # Relevant commits from cryptography project (https://github.com/pyca/cryptography):
+# # pyca/cryptography@719d536dd691e84e208534798f2eb4f82aaa2e07
+# # pyca/cryptography@5ab6d6a5c05572bd1c75f05baf264a2d0001894a
+# # pyca/cryptography@2e776e20eb60378e0af9b7439000d0e80da7c7e3
+# # pyca/cryptography@fb309ed24647d1be9e319b61b1f2aa8ebb87b90b
+# # pyca/cryptography@2917e460993c475c72d7146c50dc3bbc2414280d
+# # pyca/cryptography@3057f91ea9a05fb593825006d87a391286a4d828
+# # pyca/cryptography@d607dd7e5bc5c08854ec0c9baff70ba4a35be36f
+def _obj2txt(openssl_lib, openssl_ffi, obj):
+ # Set to 80 on the recommendation of
+ # https://www.openssl.org/docs/crypto/OBJ_nid2ln.html#return_values
+ #
+ # But OIDs longer than this occur in real life (e.g. Active
+ # Directory makes some very long OIDs). So we need to detect
+ # and properly handle the case where the default buffer is not
+ # big enough.
+ #
+ buf_len = 80
+ buf = openssl_ffi.new("char[]", buf_len)
+
+ # 'res' is the number of bytes that *would* be written if the
+ # buffer is large enough. If 'res' > buf_len - 1, we need to
+ # alloc a big-enough buffer and go again.
+ res = openssl_lib.OBJ_obj2txt(buf, buf_len, obj, 1)
+ if res > buf_len - 1: # account for terminating null byte
+ buf_len = res + 1
+ buf = openssl_ffi.new("char[]", buf_len)
+ res = openssl_lib.OBJ_obj2txt(buf, buf_len, obj, 1)
+ return openssl_ffi.buffer(buf, res)[:].decode()
+# #####################################################################################
+# #####################################################################################
+
+
+def cryptography_get_extensions_from_cert(cert):
+ # Since cryptography won't give us the DER value for an extension
+ # (that is only stored for unrecognized extensions), we have to re-do
+ # the extension parsing outselves.
+ result = dict()
+ backend = cert._backend
+ x509_obj = cert._x509
+
+ for i in range(backend._lib.X509_get_ext_count(x509_obj)):
+ ext = backend._lib.X509_get_ext(x509_obj, i)
+ if ext == backend._ffi.NULL:
+ continue
+ crit = backend._lib.X509_EXTENSION_get_critical(ext)
+ data = backend._lib.X509_EXTENSION_get_data(ext)
+ backend.openssl_assert(data != backend._ffi.NULL)
+ der = backend._ffi.buffer(data.data, data.length)[:]
+ entry = dict(
+ critical=(crit == 1),
+ value=base64.b64encode(der),
+ )
+ oid = _obj2txt(backend._lib, backend._ffi, backend._lib.X509_EXTENSION_get_object(ext))
+ result[oid] = entry
+ return result
+
+
+def cryptography_get_extensions_from_csr(csr):
+ # Since cryptography won't give us the DER value for an extension
+ # (that is only stored for unrecognized extensions), we have to re-do
+ # the extension parsing outselves.
+ result = dict()
+ backend = csr._backend
+
+ extensions = backend._lib.X509_REQ_get_extensions(csr._x509_req)
+ extensions = backend._ffi.gc(
+ extensions,
+ lambda ext: backend._lib.sk_X509_EXTENSION_pop_free(
+ ext,
+ backend._ffi.addressof(backend._lib._original_lib, "X509_EXTENSION_free")
+ )
+ )
+
+ for i in range(backend._lib.sk_X509_EXTENSION_num(extensions)):
+ ext = backend._lib.sk_X509_EXTENSION_value(extensions, i)
+ if ext == backend._ffi.NULL:
+ continue
+ crit = backend._lib.X509_EXTENSION_get_critical(ext)
+ data = backend._lib.X509_EXTENSION_get_data(ext)
+ backend.openssl_assert(data != backend._ffi.NULL)
+ der = backend._ffi.buffer(data.data, data.length)[:]
+ entry = dict(
+ critical=(crit == 1),
+ value=base64.b64encode(der),
+ )
+ oid = _obj2txt(backend._lib, backend._ffi, backend._lib.X509_EXTENSION_get_object(ext))
+ result[oid] = entry
+ return result
+
+
+def pyopenssl_get_extensions_from_cert(cert):
+ # While pyOpenSSL allows us to get an extension's DER value, it won't
+ # give us the dotted string for an OID. So we have to do some magic to
+ # get hold of it.
+ result = dict()
+ ext_count = cert.get_extension_count()
+ for i in range(0, ext_count):
+ ext = cert.get_extension(i)
+ entry = dict(
+ critical=bool(ext.get_critical()),
+ value=base64.b64encode(ext.get_data()),
+ )
+ oid = _obj2txt(
+ OpenSSL._util.lib,
+ OpenSSL._util.ffi,
+ OpenSSL._util.lib.X509_EXTENSION_get_object(ext._extension)
+ )
+ # This could also be done a bit simpler:
+ #
+ # oid = _obj2txt(OpenSSL._util.lib, OpenSSL._util.ffi, OpenSSL._util.lib.OBJ_nid2obj(ext._nid))
+ #
+ # Unfortunately this gives the wrong result in case the linked OpenSSL
+ # doesn't know the OID. That's why we have to get the OID dotted string
+ # similarly to how cryptography does it.
+ result[oid] = entry
+ return result
+
+
+def pyopenssl_get_extensions_from_csr(csr):
+ # While pyOpenSSL allows us to get an extension's DER value, it won't
+ # give us the dotted string for an OID. So we have to do some magic to
+ # get hold of it.
+ result = dict()
+ for ext in csr.get_extensions():
+ entry = dict(
+ critical=bool(ext.get_critical()),
+ value=base64.b64encode(ext.get_data()),
+ )
+ oid = _obj2txt(
+ OpenSSL._util.lib,
+ OpenSSL._util.ffi,
+ OpenSSL._util.lib.X509_EXTENSION_get_object(ext._extension)
+ )
+ # This could also be done a bit simpler:
+ #
+ # oid = _obj2txt(OpenSSL._util.lib, OpenSSL._util.ffi, OpenSSL._util.lib.OBJ_nid2obj(ext._nid))
+ #
+ # Unfortunately this gives the wrong result in case the linked OpenSSL
+ # doesn't know the OID. That's why we have to get the OID dotted string
+ # similarly to how cryptography does it.
+ result[oid] = entry
+ return result
+
+
+def cryptography_name_to_oid(name):
+ dotted = _OID_LOOKUP.get(name)
+ if dotted is None:
+ raise OpenSSLObjectError('Cannot find OID for "{0}"'.format(name))
+ return x509.oid.ObjectIdentifier(dotted)
+
+
+def cryptography_oid_to_name(oid, short=False):
+ dotted_string = oid.dotted_string
+ names = _OID_MAP.get(dotted_string)
+ name = names[0] if names else oid._name
+ if short:
+ return _NORMALIZE_NAMES_SHORT.get(name, name)
+ else:
+ return _NORMALIZE_NAMES.get(name, name)
+
+
+def cryptography_get_name(name):
+ '''
+ Given a name string, returns a cryptography x509.Name object.
+ Raises an OpenSSLObjectError if the name is unknown or cannot be parsed.
+ '''
+ try:
+ if name.startswith('DNS:'):
+ return x509.DNSName(to_text(name[4:]))
+ if name.startswith('IP:'):
+ return x509.IPAddress(ipaddress.ip_address(to_text(name[3:])))
+ if name.startswith('email:'):
+ return x509.RFC822Name(to_text(name[6:]))
+ if name.startswith('URI:'):
+ return x509.UniformResourceIdentifier(to_text(name[4:]))
+ except Exception as e:
+ raise OpenSSLObjectError('Cannot parse Subject Alternative Name "{0}": {1}'.format(name, e))
+ if ':' not in name:
+ raise OpenSSLObjectError('Cannot parse Subject Alternative Name "{0}" (forgot "DNS:" prefix?)'.format(name))
+ raise OpenSSLObjectError('Cannot parse Subject Alternative Name "{0}" (potentially unsupported by cryptography backend)'.format(name))
+
+
+def _get_hex(bytesstr):
+ if bytesstr is None:
+ return bytesstr
+ data = binascii.hexlify(bytesstr)
+ data = to_text(b':'.join(data[i:i + 2] for i in range(0, len(data), 2)))
+ return data
+
+
+def cryptography_decode_name(name):
+ '''
+ Given a cryptography x509.Name object, returns a string.
+ Raises an OpenSSLObjectError if the name is not supported.
+ '''
+ if isinstance(name, x509.DNSName):
+ return 'DNS:{0}'.format(name.value)
+ if isinstance(name, x509.IPAddress):
+ return 'IP:{0}'.format(name.value.compressed)
+ if isinstance(name, x509.RFC822Name):
+ return 'email:{0}'.format(name.value)
+ if isinstance(name, x509.UniformResourceIdentifier):
+ return 'URI:{0}'.format(name.value)
+ if isinstance(name, x509.DirectoryName):
+ # FIXME: test
+ return 'DirName:' + ''.join(['/{0}:{1}'.format(attribute.oid._name, attribute.value) for attribute in name.value])
+ if isinstance(name, x509.RegisteredID):
+ # FIXME: test
+ return 'RegisteredID:{0}'.format(name.value)
+ if isinstance(name, x509.OtherName):
+ # FIXME: test
+ return '{0}:{1}'.format(name.type_id.dotted_string, _get_hex(name.value))
+ raise OpenSSLObjectError('Cannot decode name "{0}"'.format(name))
+
+
+def _cryptography_get_keyusage(usage):
+ '''
+ Given a key usage identifier string, returns the parameter name used by cryptography's x509.KeyUsage().
+ Raises an OpenSSLObjectError if the identifier is unknown.
+ '''
+ if usage in ('Digital Signature', 'digitalSignature'):
+ return 'digital_signature'
+ if usage in ('Non Repudiation', 'nonRepudiation'):
+ return 'content_commitment'
+ if usage in ('Key Encipherment', 'keyEncipherment'):
+ return 'key_encipherment'
+ if usage in ('Data Encipherment', 'dataEncipherment'):
+ return 'data_encipherment'
+ if usage in ('Key Agreement', 'keyAgreement'):
+ return 'key_agreement'
+ if usage in ('Certificate Sign', 'keyCertSign'):
+ return 'key_cert_sign'
+ if usage in ('CRL Sign', 'cRLSign'):
+ return 'crl_sign'
+ if usage in ('Encipher Only', 'encipherOnly'):
+ return 'encipher_only'
+ if usage in ('Decipher Only', 'decipherOnly'):
+ return 'decipher_only'
+ raise OpenSSLObjectError('Unknown key usage "{0}"'.format(usage))
+
+
+def cryptography_parse_key_usage_params(usages):
+ '''
+ Given a list of key usage identifier strings, returns the parameters for cryptography's x509.KeyUsage().
+ Raises an OpenSSLObjectError if an identifier is unknown.
+ '''
+ params = dict(
+ digital_signature=False,
+ content_commitment=False,
+ key_encipherment=False,
+ data_encipherment=False,
+ key_agreement=False,
+ key_cert_sign=False,
+ crl_sign=False,
+ encipher_only=False,
+ decipher_only=False,
+ )
+ for usage in usages:
+ params[_cryptography_get_keyusage(usage)] = True
+ return params
+
+
+def cryptography_get_basic_constraints(constraints):
+ '''
+ Given a list of constraints, returns a tuple (ca, path_length).
+ Raises an OpenSSLObjectError if a constraint is unknown or cannot be parsed.
+ '''
+ ca = False
+ path_length = None
+ if constraints:
+ for constraint in constraints:
+ if constraint.startswith('CA:'):
+ if constraint == 'CA:TRUE':
+ ca = True
+ elif constraint == 'CA:FALSE':
+ ca = False
+ else:
+ raise OpenSSLObjectError('Unknown basic constraint value "{0}" for CA'.format(constraint[3:]))
+ elif constraint.startswith('pathlen:'):
+ v = constraint[len('pathlen:'):]
+ try:
+ path_length = int(v)
+ except Exception as e:
+ raise OpenSSLObjectError('Cannot parse path length constraint "{0}" ({1})'.format(v, e))
+ else:
+ raise OpenSSLObjectError('Unknown basic constraint "{0}"'.format(constraint))
+ return ca, path_length
+
+
+def binary_exp_mod(f, e, m):
+ '''Computes f^e mod m in O(log e) multiplications modulo m.'''
+ # Compute len_e = floor(log_2(e))
+ len_e = -1
+ x = e
+ while x > 0:
+ x >>= 1
+ len_e += 1
+ # Compute f**e mod m
+ result = 1
+ for k in range(len_e, -1, -1):
+ result = (result * result) % m
+ if ((e >> k) & 1) != 0:
+ result = (result * f) % m
+ return result
+
+
+def simple_gcd(a, b):
+ '''Compute GCD of its two inputs.'''
+ while b != 0:
+ a, b = b, a % b
+ return a
+
+
+def quick_is_not_prime(n):
+ '''Does some quick checks to see if we can poke a hole into the primality of n.
+
+ A result of `False` does **not** mean that the number is prime; it just means
+ that we couldn't detect quickly whether it is not prime.
+ '''
+ if n <= 2:
+ return True
+ # The constant in the next line is the product of all primes < 200
+ if simple_gcd(n, 7799922041683461553249199106329813876687996789903550945093032474868511536164700810) > 1:
+ return True
+ # TODO: maybe do some iterations of Miller-Rabin to increase confidence
+ # (https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test)
+ return False
+
+
+python_version = (sys.version_info[0], sys.version_info[1])
+if python_version >= (2, 7) or python_version >= (3, 1):
+ # Ansible still supports Python 2.6 on remote nodes
+ def count_bits(no):
+ no = abs(no)
+ if no == 0:
+ return 0
+ return no.bit_length()
+else:
+ # Slow, but works
+ def count_bits(no):
+ no = abs(no)
+ count = 0
+ while no > 0:
+ no >>= 1
+ count += 1
+ return count
+
+
+PEM_START = '-----BEGIN '
+PEM_END = '-----'
+PKCS8_PRIVATEKEY_NAMES = ('PRIVATE KEY', 'ENCRYPTED PRIVATE KEY')
+PKCS1_PRIVATEKEY_SUFFIX = ' PRIVATE KEY'
+
+
+def identify_private_key_format(content):
+ '''Given the contents of a private key file, identifies its format.'''
+ # See https://github.com/openssl/openssl/blob/master/crypto/pem/pem_pkey.c#L40-L85
+ # (PEM_read_bio_PrivateKey)
+ # and https://github.com/openssl/openssl/blob/master/include/openssl/pem.h#L46-L47
+ # (PEM_STRING_PKCS8, PEM_STRING_PKCS8INF)
+ try:
+ lines = content.decode('utf-8').splitlines(False)
+ if lines[0].startswith(PEM_START) and lines[0].endswith(PEM_END) and len(lines[0]) > len(PEM_START) + len(PEM_END):
+ name = lines[0][len(PEM_START):-len(PEM_END)]
+ if name in PKCS8_PRIVATEKEY_NAMES:
+ return 'pkcs8'
+ if len(name) > len(PKCS1_PRIVATEKEY_SUFFIX) and name.endswith(PKCS1_PRIVATEKEY_SUFFIX):
+ return 'pkcs1'
+ return 'unknown-pem'
+ except UnicodeDecodeError:
+ pass
+ return 'raw'
+
+
+def cryptography_key_needs_digest_for_signing(key):
+ '''Tests whether the given private key requires a digest algorithm for signing.
+
+ Ed25519 and Ed448 keys do not; they need None to be passed as the digest algorithm.
+ '''
+ if CRYPTOGRAPHY_HAS_ED25519 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey):
+ return False
+ if CRYPTOGRAPHY_HAS_ED448 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey):
+ return False
+ return True
+
+
+def cryptography_compare_public_keys(key1, key2):
+ '''Tests whether two public keys are the same.
+
+ Needs special logic for Ed25519 and Ed448 keys, since they do not have public_numbers().
+ '''
+ if CRYPTOGRAPHY_HAS_ED25519:
+ a = isinstance(key1, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PublicKey)
+ b = isinstance(key2, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PublicKey)
+ if a or b:
+ if not a or not b:
+ return False
+ a = key1.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw)
+ b = key2.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw)
+ return a == b
+ if CRYPTOGRAPHY_HAS_ED448:
+ a = isinstance(key1, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PublicKey)
+ b = isinstance(key2, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PublicKey)
+ if a or b:
+ if not a or not b:
+ return False
+ a = key1.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw)
+ b = key2.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw)
+ return a == b
+ return key1.public_numbers() == key2.public_numbers()
+
+
+if HAS_CRYPTOGRAPHY:
+ REVOCATION_REASON_MAP = {
+ 'unspecified': x509.ReasonFlags.unspecified,
+ 'key_compromise': x509.ReasonFlags.key_compromise,
+ 'ca_compromise': x509.ReasonFlags.ca_compromise,
+ 'affiliation_changed': x509.ReasonFlags.affiliation_changed,
+ 'superseded': x509.ReasonFlags.superseded,
+ 'cessation_of_operation': x509.ReasonFlags.cessation_of_operation,
+ 'certificate_hold': x509.ReasonFlags.certificate_hold,
+ 'privilege_withdrawn': x509.ReasonFlags.privilege_withdrawn,
+ 'aa_compromise': x509.ReasonFlags.aa_compromise,
+ 'remove_from_crl': x509.ReasonFlags.remove_from_crl,
+ }
+ REVOCATION_REASON_MAP_INVERSE = dict()
+ for k, v in REVOCATION_REASON_MAP.items():
+ REVOCATION_REASON_MAP_INVERSE[v] = k
+
+
+def cryptography_decode_revoked_certificate(cert):
+ result = {
+ 'serial_number': cert.serial_number,
+ 'revocation_date': cert.revocation_date,
+ 'issuer': None,
+ 'issuer_critical': False,
+ 'reason': None,
+ 'reason_critical': False,
+ 'invalidity_date': None,
+ 'invalidity_date_critical': False,
+ }
+ try:
+ ext = cert.extensions.get_extension_for_class(x509.CertificateIssuer)
+ result['issuer'] = list(ext.value)
+ result['issuer_critical'] = ext.critical
+ except x509.ExtensionNotFound:
+ pass
+ try:
+ ext = cert.extensions.get_extension_for_class(x509.CRLReason)
+ result['reason'] = ext.value.reason
+ result['reason_critical'] = ext.critical
+ except x509.ExtensionNotFound:
+ pass
+ try:
+ ext = cert.extensions.get_extension_for_class(x509.InvalidityDate)
+ result['invalidity_date'] = ext.value.invalidity_date
+ result['invalidity_date_critical'] = ext.critical
+ except x509.ExtensionNotFound:
+ pass
+ return result
diff --git a/test/support/integration/plugins/module_utils/database.py b/test/support/integration/plugins/module_utils/database.py
new file mode 100644
index 00000000..014939a2
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/database.py
@@ -0,0 +1,142 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class SQLParseError(Exception):
+ pass
+
+
+class UnclosedQuoteError(SQLParseError):
+ pass
+
+
+# maps a type of identifier to the maximum number of dot levels that are
+# allowed to specify that identifier. For example, a database column can be
+# specified by up to 4 levels: database.schema.table.column
+_PG_IDENTIFIER_TO_DOT_LEVEL = dict(
+ database=1,
+ schema=2,
+ table=3,
+ column=4,
+ role=1,
+ tablespace=1,
+ sequence=3,
+ publication=1,
+)
+_MYSQL_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, table=2, column=3, role=1, vars=1)
+
+
+def _find_end_quote(identifier, quote_char):
+ accumulate = 0
+ while True:
+ try:
+ quote = identifier.index(quote_char)
+ except ValueError:
+ raise UnclosedQuoteError
+ accumulate = accumulate + quote
+ try:
+ next_char = identifier[quote + 1]
+ except IndexError:
+ return accumulate
+ if next_char == quote_char:
+ try:
+ identifier = identifier[quote + 2:]
+ accumulate = accumulate + 2
+ except IndexError:
+ raise UnclosedQuoteError
+ else:
+ return accumulate
+
+
+def _identifier_parse(identifier, quote_char):
+ if not identifier:
+ raise SQLParseError('Identifier name unspecified or unquoted trailing dot')
+
+ already_quoted = False
+ if identifier.startswith(quote_char):
+ already_quoted = True
+ try:
+ end_quote = _find_end_quote(identifier[1:], quote_char=quote_char) + 1
+ except UnclosedQuoteError:
+ already_quoted = False
+ else:
+ if end_quote < len(identifier) - 1:
+ if identifier[end_quote + 1] == '.':
+ dot = end_quote + 1
+ first_identifier = identifier[:dot]
+ next_identifier = identifier[dot + 1:]
+ further_identifiers = _identifier_parse(next_identifier, quote_char)
+ further_identifiers.insert(0, first_identifier)
+ else:
+ raise SQLParseError('User escaped identifiers must escape extra quotes')
+ else:
+ further_identifiers = [identifier]
+
+ if not already_quoted:
+ try:
+ dot = identifier.index('.')
+ except ValueError:
+ identifier = identifier.replace(quote_char, quote_char * 2)
+ identifier = ''.join((quote_char, identifier, quote_char))
+ further_identifiers = [identifier]
+ else:
+ if dot == 0 or dot >= len(identifier) - 1:
+ identifier = identifier.replace(quote_char, quote_char * 2)
+ identifier = ''.join((quote_char, identifier, quote_char))
+ further_identifiers = [identifier]
+ else:
+ first_identifier = identifier[:dot]
+ next_identifier = identifier[dot + 1:]
+ further_identifiers = _identifier_parse(next_identifier, quote_char)
+ first_identifier = first_identifier.replace(quote_char, quote_char * 2)
+ first_identifier = ''.join((quote_char, first_identifier, quote_char))
+ further_identifiers.insert(0, first_identifier)
+
+ return further_identifiers
+
+
+def pg_quote_identifier(identifier, id_type):
+ identifier_fragments = _identifier_parse(identifier, quote_char='"')
+ if len(identifier_fragments) > _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]:
+ raise SQLParseError('PostgreSQL does not support %s with more than %i dots' % (id_type, _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]))
+ return '.'.join(identifier_fragments)
+
+
+def mysql_quote_identifier(identifier, id_type):
+ identifier_fragments = _identifier_parse(identifier, quote_char='`')
+ if (len(identifier_fragments) - 1) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]:
+ raise SQLParseError('MySQL does not support %s with more than %i dots' % (id_type, _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]))
+
+ special_cased_fragments = []
+ for fragment in identifier_fragments:
+ if fragment == '`*`':
+ special_cased_fragments.append('*')
+ else:
+ special_cased_fragments.append(fragment)
+
+ return '.'.join(special_cased_fragments)
diff --git a/test/support/integration/plugins/module_utils/docker/__init__.py b/test/support/integration/plugins/module_utils/docker/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/docker/__init__.py
diff --git a/test/support/integration/plugins/module_utils/docker/common.py b/test/support/integration/plugins/module_utils/docker/common.py
new file mode 100644
index 00000000..03307250
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/docker/common.py
@@ -0,0 +1,1022 @@
+#
+# Copyright 2016 Red Hat | Ansible
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import os
+import platform
+import re
+import sys
+from datetime import timedelta
+from distutils.version import LooseVersion
+
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib
+from ansible.module_utils.common._collections_compat import Mapping, Sequence
+from ansible.module_utils.six import string_types
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE, BOOLEANS_FALSE
+
+HAS_DOCKER_PY = True
+HAS_DOCKER_PY_2 = False
+HAS_DOCKER_PY_3 = False
+HAS_DOCKER_ERROR = None
+
+try:
+ from requests.exceptions import SSLError
+ from docker import __version__ as docker_version
+ from docker.errors import APIError, NotFound, TLSParameterError
+ from docker.tls import TLSConfig
+ from docker import auth
+
+ if LooseVersion(docker_version) >= LooseVersion('3.0.0'):
+ HAS_DOCKER_PY_3 = True
+ from docker import APIClient as Client
+ elif LooseVersion(docker_version) >= LooseVersion('2.0.0'):
+ HAS_DOCKER_PY_2 = True
+ from docker import APIClient as Client
+ else:
+ from docker import Client
+
+except ImportError as exc:
+ HAS_DOCKER_ERROR = str(exc)
+ HAS_DOCKER_PY = False
+
+
+# The next 2 imports ``docker.models`` and ``docker.ssladapter`` are used
+# to ensure the user does not have both ``docker`` and ``docker-py`` modules
+# installed, as they utilize the same namespace are are incompatible
+try:
+ # docker (Docker SDK for Python >= 2.0.0)
+ import docker.models # noqa: F401
+ HAS_DOCKER_MODELS = True
+except ImportError:
+ HAS_DOCKER_MODELS = False
+
+try:
+ # docker-py (Docker SDK for Python < 2.0.0)
+ import docker.ssladapter # noqa: F401
+ HAS_DOCKER_SSLADAPTER = True
+except ImportError:
+ HAS_DOCKER_SSLADAPTER = False
+
+
+try:
+ from requests.exceptions import RequestException
+except ImportError:
+ # Either docker-py is no longer using requests, or docker-py isn't around either,
+ # or docker-py's dependency requests is missing. In any case, define an exception
+ # class RequestException so that our code doesn't break.
+ class RequestException(Exception):
+ pass
+
+
+DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock'
+DEFAULT_TLS = False
+DEFAULT_TLS_VERIFY = False
+DEFAULT_TLS_HOSTNAME = 'localhost'
+MIN_DOCKER_VERSION = "1.8.0"
+DEFAULT_TIMEOUT_SECONDS = 60
+
+DOCKER_COMMON_ARGS = dict(
+ docker_host=dict(type='str', default=DEFAULT_DOCKER_HOST, fallback=(env_fallback, ['DOCKER_HOST']), aliases=['docker_url']),
+ tls_hostname=dict(type='str', default=DEFAULT_TLS_HOSTNAME, fallback=(env_fallback, ['DOCKER_TLS_HOSTNAME'])),
+ api_version=dict(type='str', default='auto', fallback=(env_fallback, ['DOCKER_API_VERSION']), aliases=['docker_api_version']),
+ timeout=dict(type='int', default=DEFAULT_TIMEOUT_SECONDS, fallback=(env_fallback, ['DOCKER_TIMEOUT'])),
+ ca_cert=dict(type='path', aliases=['tls_ca_cert', 'cacert_path']),
+ client_cert=dict(type='path', aliases=['tls_client_cert', 'cert_path']),
+ client_key=dict(type='path', aliases=['tls_client_key', 'key_path']),
+ ssl_version=dict(type='str', fallback=(env_fallback, ['DOCKER_SSL_VERSION'])),
+ tls=dict(type='bool', default=DEFAULT_TLS, fallback=(env_fallback, ['DOCKER_TLS'])),
+ validate_certs=dict(type='bool', default=DEFAULT_TLS_VERIFY, fallback=(env_fallback, ['DOCKER_TLS_VERIFY']), aliases=['tls_verify']),
+ debug=dict(type='bool', default=False)
+)
+
+DOCKER_MUTUALLY_EXCLUSIVE = []
+
+DOCKER_REQUIRED_TOGETHER = [
+ ['client_cert', 'client_key']
+]
+
+DEFAULT_DOCKER_REGISTRY = 'https://index.docker.io/v1/'
+EMAIL_REGEX = r'[^@]+@[^@]+\.[^@]+'
+BYTE_SUFFIXES = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
+
+
+if not HAS_DOCKER_PY:
+ docker_version = None
+
+ # No Docker SDK for Python. Create a place holder client to allow
+ # instantiation of AnsibleModule and proper error handing
+ class Client(object): # noqa: F811
+ def __init__(self, **kwargs):
+ pass
+
+ class APIError(Exception): # noqa: F811
+ pass
+
+ class NotFound(Exception): # noqa: F811
+ pass
+
+
+def is_image_name_id(name):
+ """Check whether the given image name is in fact an image ID (hash)."""
+ if re.match('^sha256:[0-9a-fA-F]{64}$', name):
+ return True
+ return False
+
+
+def is_valid_tag(tag, allow_empty=False):
+ """Check whether the given string is a valid docker tag name."""
+ if not tag:
+ return allow_empty
+ # See here ("Extended description") for a definition what tags can be:
+ # https://docs.docker.com/engine/reference/commandline/tag/
+ return bool(re.match('^[a-zA-Z0-9_][a-zA-Z0-9_.-]{0,127}$', tag))
+
+
+def sanitize_result(data):
+ """Sanitize data object for return to Ansible.
+
+ When the data object contains types such as docker.types.containers.HostConfig,
+ Ansible will fail when these are returned via exit_json or fail_json.
+ HostConfig is derived from dict, but its constructor requires additional
+ arguments. This function sanitizes data structures by recursively converting
+ everything derived from dict to dict and everything derived from list (and tuple)
+ to a list.
+ """
+ if isinstance(data, dict):
+ return dict((k, sanitize_result(v)) for k, v in data.items())
+ elif isinstance(data, (list, tuple)):
+ return [sanitize_result(v) for v in data]
+ else:
+ return data
+
+
+class DockerBaseClass(object):
+
+ def __init__(self):
+ self.debug = False
+
+ def log(self, msg, pretty_print=False):
+ pass
+ # if self.debug:
+ # log_file = open('docker.log', 'a')
+ # if pretty_print:
+ # log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
+ # log_file.write(u'\n')
+ # else:
+ # log_file.write(msg + u'\n')
+
+
+def update_tls_hostname(result):
+ if result['tls_hostname'] is None:
+ # get default machine name from the url
+ parsed_url = urlparse(result['docker_host'])
+ if ':' in parsed_url.netloc:
+ result['tls_hostname'] = parsed_url.netloc[:parsed_url.netloc.rindex(':')]
+ else:
+ result['tls_hostname'] = parsed_url
+
+
+def _get_tls_config(fail_function, **kwargs):
+ try:
+ tls_config = TLSConfig(**kwargs)
+ return tls_config
+ except TLSParameterError as exc:
+ fail_function("TLS config error: %s" % exc)
+
+
+def get_connect_params(auth, fail_function):
+ if auth['tls'] or auth['tls_verify']:
+ auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://')
+
+ if auth['tls_verify'] and auth['cert_path'] and auth['key_path']:
+ # TLS with certs and host verification
+ if auth['cacert_path']:
+ tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
+ ca_cert=auth['cacert_path'],
+ verify=True,
+ assert_hostname=auth['tls_hostname'],
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ else:
+ tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
+ verify=True,
+ assert_hostname=auth['tls_hostname'],
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls_verify'] and auth['cacert_path']:
+ # TLS with cacert only
+ tls_config = _get_tls_config(ca_cert=auth['cacert_path'],
+ assert_hostname=auth['tls_hostname'],
+ verify=True,
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls_verify']:
+ # TLS with verify and no certs
+ tls_config = _get_tls_config(verify=True,
+ assert_hostname=auth['tls_hostname'],
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls'] and auth['cert_path'] and auth['key_path']:
+ # TLS with certs and no host verification
+ tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
+ verify=False,
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls']:
+ # TLS with no certs and not host verification
+ tls_config = _get_tls_config(verify=False,
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ # No TLS
+ return dict(base_url=auth['docker_host'],
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+
+DOCKERPYUPGRADE_SWITCH_TO_DOCKER = "Try `pip uninstall docker-py` followed by `pip install docker`."
+DOCKERPYUPGRADE_UPGRADE_DOCKER = "Use `pip install --upgrade docker` to upgrade."
+DOCKERPYUPGRADE_RECOMMEND_DOCKER = ("Use `pip install --upgrade docker-py` to upgrade. "
+ "Hint: if you do not need Python 2.6 support, try "
+ "`pip uninstall docker-py` instead, followed by `pip install docker`.")
+
+
+class AnsibleDockerClient(Client):
+
+ def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None,
+ required_together=None, required_if=None, min_docker_version=MIN_DOCKER_VERSION,
+ min_docker_api_version=None, option_minimal_versions=None,
+ option_minimal_versions_ignore_params=None, fail_results=None):
+
+ # Modules can put information in here which will always be returned
+ # in case client.fail() is called.
+ self.fail_results = fail_results or {}
+
+ merged_arg_spec = dict()
+ merged_arg_spec.update(DOCKER_COMMON_ARGS)
+ if argument_spec:
+ merged_arg_spec.update(argument_spec)
+ self.arg_spec = merged_arg_spec
+
+ mutually_exclusive_params = []
+ mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
+ if mutually_exclusive:
+ mutually_exclusive_params += mutually_exclusive
+
+ required_together_params = []
+ required_together_params += DOCKER_REQUIRED_TOGETHER
+ if required_together:
+ required_together_params += required_together
+
+ self.module = AnsibleModule(
+ argument_spec=merged_arg_spec,
+ supports_check_mode=supports_check_mode,
+ mutually_exclusive=mutually_exclusive_params,
+ required_together=required_together_params,
+ required_if=required_if)
+
+ NEEDS_DOCKER_PY2 = (LooseVersion(min_docker_version) >= LooseVersion('2.0.0'))
+
+ self.docker_py_version = LooseVersion(docker_version)
+
+ if HAS_DOCKER_MODELS and HAS_DOCKER_SSLADAPTER:
+ self.fail("Cannot have both the docker-py and docker python modules (old and new version of Docker "
+ "SDK for Python) installed together as they use the same namespace and cause a corrupt "
+ "installation. Please uninstall both packages, and re-install only the docker-py or docker "
+ "python module (for %s's Python %s). It is recommended to install the docker module if no "
+ "support for Python 2.6 is required. Please note that simply uninstalling one of the modules "
+ "can leave the other module in a broken state." % (platform.node(), sys.executable))
+
+ if not HAS_DOCKER_PY:
+ if NEEDS_DOCKER_PY2:
+ msg = missing_required_lib("Docker SDK for Python: docker")
+ msg = msg + ", for example via `pip install docker`. The error was: %s"
+ else:
+ msg = missing_required_lib("Docker SDK for Python: docker (Python >= 2.7) or docker-py (Python 2.6)")
+ msg = msg + ", for example via `pip install docker` or `pip install docker-py` (Python 2.6). The error was: %s"
+ self.fail(msg % HAS_DOCKER_ERROR)
+
+ if self.docker_py_version < LooseVersion(min_docker_version):
+ msg = "Error: Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s."
+ if not NEEDS_DOCKER_PY2:
+ # The minimal required version is < 2.0 (and the current version as well).
+ # Advertise docker (instead of docker-py) for non-Python-2.6 users.
+ msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
+ elif docker_version < LooseVersion('2.0'):
+ msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
+ else:
+ msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
+ self.fail(msg % (docker_version, platform.node(), sys.executable, min_docker_version))
+
+ self.debug = self.module.params.get('debug')
+ self.check_mode = self.module.check_mode
+ self._connect_params = get_connect_params(self.auth_params, fail_function=self.fail)
+
+ try:
+ super(AnsibleDockerClient, self).__init__(**self._connect_params)
+ self.docker_api_version_str = self.version()['ApiVersion']
+ except APIError as exc:
+ self.fail("Docker API error: %s" % exc)
+ except Exception as exc:
+ self.fail("Error connecting: %s" % exc)
+
+ self.docker_api_version = LooseVersion(self.docker_api_version_str)
+ if min_docker_api_version is not None:
+ if self.docker_api_version < LooseVersion(min_docker_api_version):
+ self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version))
+
+ if option_minimal_versions is not None:
+ self._get_minimal_versions(option_minimal_versions, option_minimal_versions_ignore_params)
+
+ def log(self, msg, pretty_print=False):
+ pass
+ # if self.debug:
+ # log_file = open('docker.log', 'a')
+ # if pretty_print:
+ # log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
+ # log_file.write(u'\n')
+ # else:
+ # log_file.write(msg + u'\n')
+
+ def fail(self, msg, **kwargs):
+ self.fail_results.update(kwargs)
+ self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
+
+ @staticmethod
+ def _get_value(param_name, param_value, env_variable, default_value):
+ if param_value is not None:
+ # take module parameter value
+ if param_value in BOOLEANS_TRUE:
+ return True
+ if param_value in BOOLEANS_FALSE:
+ return False
+ return param_value
+
+ if env_variable is not None:
+ env_value = os.environ.get(env_variable)
+ if env_value is not None:
+ # take the env variable value
+ if param_name == 'cert_path':
+ return os.path.join(env_value, 'cert.pem')
+ if param_name == 'cacert_path':
+ return os.path.join(env_value, 'ca.pem')
+ if param_name == 'key_path':
+ return os.path.join(env_value, 'key.pem')
+ if env_value in BOOLEANS_TRUE:
+ return True
+ if env_value in BOOLEANS_FALSE:
+ return False
+ return env_value
+
+ # take the default
+ return default_value
+
+ @property
+ def auth_params(self):
+ # Get authentication credentials.
+ # Precedence: module parameters-> environment variables-> defaults.
+
+ self.log('Getting credentials')
+
+ params = dict()
+ for key in DOCKER_COMMON_ARGS:
+ params[key] = self.module.params.get(key)
+
+ if self.module.params.get('use_tls'):
+ # support use_tls option in docker_image.py. This will be deprecated.
+ use_tls = self.module.params.get('use_tls')
+ if use_tls == 'encrypt':
+ params['tls'] = True
+ if use_tls == 'verify':
+ params['validate_certs'] = True
+
+ result = dict(
+ docker_host=self._get_value('docker_host', params['docker_host'], 'DOCKER_HOST',
+ DEFAULT_DOCKER_HOST),
+ tls_hostname=self._get_value('tls_hostname', params['tls_hostname'],
+ 'DOCKER_TLS_HOSTNAME', DEFAULT_TLS_HOSTNAME),
+ api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION',
+ 'auto'),
+ cacert_path=self._get_value('cacert_path', params['ca_cert'], 'DOCKER_CERT_PATH', None),
+ cert_path=self._get_value('cert_path', params['client_cert'], 'DOCKER_CERT_PATH', None),
+ key_path=self._get_value('key_path', params['client_key'], 'DOCKER_CERT_PATH', None),
+ ssl_version=self._get_value('ssl_version', params['ssl_version'], 'DOCKER_SSL_VERSION', None),
+ tls=self._get_value('tls', params['tls'], 'DOCKER_TLS', DEFAULT_TLS),
+ tls_verify=self._get_value('tls_verfy', params['validate_certs'], 'DOCKER_TLS_VERIFY',
+ DEFAULT_TLS_VERIFY),
+ timeout=self._get_value('timeout', params['timeout'], 'DOCKER_TIMEOUT',
+ DEFAULT_TIMEOUT_SECONDS),
+ )
+
+ update_tls_hostname(result)
+
+ return result
+
+ def _handle_ssl_error(self, error):
+ match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
+ if match:
+ self.fail("You asked for verification that Docker daemons certificate's hostname matches %s. "
+ "The actual certificate's hostname is %s. Most likely you need to set DOCKER_TLS_HOSTNAME "
+ "or pass `tls_hostname` with a value of %s. You may also use TLS without verification by "
+ "setting the `tls` parameter to true."
+ % (self.auth_params['tls_hostname'], match.group(1), match.group(1)))
+ self.fail("SSL Exception: %s" % (error))
+
+ def _get_minimal_versions(self, option_minimal_versions, ignore_params=None):
+ self.option_minimal_versions = dict()
+ for option in self.module.argument_spec:
+ if ignore_params is not None:
+ if option in ignore_params:
+ continue
+ self.option_minimal_versions[option] = dict()
+ self.option_minimal_versions.update(option_minimal_versions)
+
+ for option, data in self.option_minimal_versions.items():
+ # Test whether option is supported, and store result
+ support_docker_py = True
+ support_docker_api = True
+ if 'docker_py_version' in data:
+ support_docker_py = self.docker_py_version >= LooseVersion(data['docker_py_version'])
+ if 'docker_api_version' in data:
+ support_docker_api = self.docker_api_version >= LooseVersion(data['docker_api_version'])
+ data['supported'] = support_docker_py and support_docker_api
+ # Fail if option is not supported but used
+ if not data['supported']:
+ # Test whether option is specified
+ if 'detect_usage' in data:
+ used = data['detect_usage'](self)
+ else:
+ used = self.module.params.get(option) is not None
+ if used and 'default' in self.module.argument_spec[option]:
+ used = self.module.params[option] != self.module.argument_spec[option]['default']
+ if used:
+ # If the option is used, compose error message.
+ if 'usage_msg' in data:
+ usg = data['usage_msg']
+ else:
+ usg = 'set %s option' % (option, )
+ if not support_docker_api:
+ msg = 'Docker API version is %s. Minimum version required is %s to %s.'
+ msg = msg % (self.docker_api_version_str, data['docker_api_version'], usg)
+ elif not support_docker_py:
+ msg = "Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s to %s. "
+ if LooseVersion(data['docker_py_version']) < LooseVersion('2.0.0'):
+ msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
+ elif self.docker_py_version < LooseVersion('2.0.0'):
+ msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
+ else:
+ msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
+ msg = msg % (docker_version, platform.node(), sys.executable, data['docker_py_version'], usg)
+ else:
+ # should not happen
+ msg = 'Cannot %s with your configuration.' % (usg, )
+ self.fail(msg)
+
+ def get_container_by_id(self, container_id):
+ try:
+ self.log("Inspecting container Id %s" % container_id)
+ result = self.inspect_container(container=container_id)
+ self.log("Completed container inspection")
+ return result
+ except NotFound as dummy:
+ return None
+ except Exception as exc:
+ self.fail("Error inspecting container: %s" % exc)
+
+ def get_container(self, name=None):
+ '''
+ Lookup a container and return the inspection results.
+ '''
+ if name is None:
+ return None
+
+ search_name = name
+ if not name.startswith('/'):
+ search_name = '/' + name
+
+ result = None
+ try:
+ for container in self.containers(all=True):
+ self.log("testing container: %s" % (container['Names']))
+ if isinstance(container['Names'], list) and search_name in container['Names']:
+ result = container
+ break
+ if container['Id'].startswith(name):
+ result = container
+ break
+ if container['Id'] == name:
+ result = container
+ break
+ except SSLError as exc:
+ self._handle_ssl_error(exc)
+ except Exception as exc:
+ self.fail("Error retrieving container list: %s" % exc)
+
+ if result is None:
+ return None
+
+ return self.get_container_by_id(result['Id'])
+
+ def get_network(self, name=None, network_id=None):
+ '''
+ Lookup a network and return the inspection results.
+ '''
+ if name is None and network_id is None:
+ return None
+
+ result = None
+
+ if network_id is None:
+ try:
+ for network in self.networks():
+ self.log("testing network: %s" % (network['Name']))
+ if name == network['Name']:
+ result = network
+ break
+ if network['Id'].startswith(name):
+ result = network
+ break
+ except SSLError as exc:
+ self._handle_ssl_error(exc)
+ except Exception as exc:
+ self.fail("Error retrieving network list: %s" % exc)
+
+ if result is not None:
+ network_id = result['Id']
+
+ if network_id is not None:
+ try:
+ self.log("Inspecting network Id %s" % network_id)
+ result = self.inspect_network(network_id)
+ self.log("Completed network inspection")
+ except NotFound as dummy:
+ return None
+ except Exception as exc:
+ self.fail("Error inspecting network: %s" % exc)
+
+ return result
+
+ def find_image(self, name, tag):
+ '''
+ Lookup an image (by name and tag) and return the inspection results.
+ '''
+ if not name:
+ return None
+
+ self.log("Find image %s:%s" % (name, tag))
+ images = self._image_lookup(name, tag)
+ if not images:
+ # In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
+ registry, repo_name = auth.resolve_repository_name(name)
+ if registry == 'docker.io':
+ # If docker.io is explicitly there in name, the image
+ # isn't found in some cases (#41509)
+ self.log("Check for docker.io image: %s" % repo_name)
+ images = self._image_lookup(repo_name, tag)
+ if not images and repo_name.startswith('library/'):
+ # Sometimes library/xxx images are not found
+ lookup = repo_name[len('library/'):]
+ self.log("Check for docker.io image: %s" % lookup)
+ images = self._image_lookup(lookup, tag)
+ if not images:
+ # Last case: if docker.io wasn't there, it can be that
+ # the image wasn't found either (#15586)
+ lookup = "%s/%s" % (registry, repo_name)
+ self.log("Check for docker.io image: %s" % lookup)
+ images = self._image_lookup(lookup, tag)
+
+ if len(images) > 1:
+ self.fail("Registry returned more than one result for %s:%s" % (name, tag))
+
+ if len(images) == 1:
+ try:
+ inspection = self.inspect_image(images[0]['Id'])
+ except Exception as exc:
+ self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc)))
+ return inspection
+
+ self.log("Image %s:%s not found." % (name, tag))
+ return None
+
+ def find_image_by_id(self, image_id):
+ '''
+ Lookup an image (by ID) and return the inspection results.
+ '''
+ if not image_id:
+ return None
+
+ self.log("Find image %s (by ID)" % image_id)
+ try:
+ inspection = self.inspect_image(image_id)
+ except Exception as exc:
+ self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc)))
+ return inspection
+
+ def _image_lookup(self, name, tag):
+ '''
+ Including a tag in the name parameter sent to the Docker SDK for Python images method
+ does not work consistently. Instead, get the result set for name and manually check
+ if the tag exists.
+ '''
+ try:
+ response = self.images(name=name)
+ except Exception as exc:
+ self.fail("Error searching for image %s - %s" % (name, str(exc)))
+ images = response
+ if tag:
+ lookup = "%s:%s" % (name, tag)
+ lookup_digest = "%s@%s" % (name, tag)
+ images = []
+ for image in response:
+ tags = image.get('RepoTags')
+ digests = image.get('RepoDigests')
+ if (tags and lookup in tags) or (digests and lookup_digest in digests):
+ images = [image]
+ break
+ return images
+
+ def pull_image(self, name, tag="latest"):
+ '''
+ Pull an image
+ '''
+ self.log("Pulling image %s:%s" % (name, tag))
+ old_tag = self.find_image(name, tag)
+ try:
+ for line in self.pull(name, tag=tag, stream=True, decode=True):
+ self.log(line, pretty_print=True)
+ if line.get('error'):
+ if line.get('errorDetail'):
+ error_detail = line.get('errorDetail')
+ self.fail("Error pulling %s - code: %s message: %s" % (name,
+ error_detail.get('code'),
+ error_detail.get('message')))
+ else:
+ self.fail("Error pulling %s - %s" % (name, line.get('error')))
+ except Exception as exc:
+ self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc)))
+
+ new_tag = self.find_image(name, tag)
+
+ return new_tag, old_tag == new_tag
+
+ def report_warnings(self, result, warnings_key=None):
+ '''
+ Checks result of client operation for warnings, and if present, outputs them.
+
+ warnings_key should be a list of keys used to crawl the result dictionary.
+ For example, if warnings_key == ['a', 'b'], the function will consider
+ result['a']['b'] if these keys exist. If the result is a non-empty string, it
+ will be reported as a warning. If the result is a list, every entry will be
+ reported as a warning.
+
+ In most cases (if warnings are returned at all), warnings_key should be
+ ['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings'].
+ '''
+ if warnings_key is None:
+ warnings_key = ['Warnings']
+ for key in warnings_key:
+ if not isinstance(result, Mapping):
+ return
+ result = result.get(key)
+ if isinstance(result, Sequence):
+ for warning in result:
+ self.module.warn('Docker warning: {0}'.format(warning))
+ elif isinstance(result, string_types) and result:
+ self.module.warn('Docker warning: {0}'.format(result))
+
+ def inspect_distribution(self, image, **kwargs):
+ '''
+ Get image digest by directly calling the Docker API when running Docker SDK < 4.0.0
+ since prior versions did not support accessing private repositories.
+ '''
+ if self.docker_py_version < LooseVersion('4.0.0'):
+ registry = auth.resolve_repository_name(image)[0]
+ header = auth.get_config_header(self, registry)
+ if header:
+ return self._result(self._get(
+ self._url('/distribution/{0}/json', image),
+ headers={'X-Registry-Auth': header}
+ ), json=True)
+ return super(AnsibleDockerClient, self).inspect_distribution(image, **kwargs)
+
+
+def compare_dict_allow_more_present(av, bv):
+ '''
+ Compare two dictionaries for whether every entry of the first is in the second.
+ '''
+ for key, value in av.items():
+ if key not in bv:
+ return False
+ if bv[key] != value:
+ return False
+ return True
+
+
+def compare_generic(a, b, method, datatype):
+ '''
+ Compare values a and b as described by method and datatype.
+
+ Returns ``True`` if the values compare equal, and ``False`` if not.
+
+ ``a`` is usually the module's parameter, while ``b`` is a property
+ of the current object. ``a`` must not be ``None`` (except for
+ ``datatype == 'value'``).
+
+ Valid values for ``method`` are:
+ - ``ignore`` (always compare as equal);
+ - ``strict`` (only compare if really equal)
+ - ``allow_more_present`` (allow b to have elements which a does not have).
+
+ Valid values for ``datatype`` are:
+ - ``value``: for simple values (strings, numbers, ...);
+ - ``list``: for ``list``s or ``tuple``s where order matters;
+ - ``set``: for ``list``s, ``tuple``s or ``set``s where order does not
+ matter;
+ - ``set(dict)``: for ``list``s, ``tuple``s or ``sets`` where order does
+ not matter and which contain ``dict``s; ``allow_more_present`` is used
+ for the ``dict``s, and these are assumed to be dictionaries of values;
+ - ``dict``: for dictionaries of values.
+ '''
+ if method == 'ignore':
+ return True
+ # If a or b is None:
+ if a is None or b is None:
+ # If both are None: equality
+ if a == b:
+ return True
+ # Otherwise, not equal for values, and equal
+ # if the other is empty for set/list/dict
+ if datatype == 'value':
+ return False
+ # For allow_more_present, allow a to be None
+ if method == 'allow_more_present' and a is None:
+ return True
+ # Otherwise, the iterable object which is not None must have length 0
+ return len(b if a is None else a) == 0
+ # Do proper comparison (both objects not None)
+ if datatype == 'value':
+ return a == b
+ elif datatype == 'list':
+ if method == 'strict':
+ return a == b
+ else:
+ i = 0
+ for v in a:
+ while i < len(b) and b[i] != v:
+ i += 1
+ if i == len(b):
+ return False
+ i += 1
+ return True
+ elif datatype == 'dict':
+ if method == 'strict':
+ return a == b
+ else:
+ return compare_dict_allow_more_present(a, b)
+ elif datatype == 'set':
+ set_a = set(a)
+ set_b = set(b)
+ if method == 'strict':
+ return set_a == set_b
+ else:
+ return set_b >= set_a
+ elif datatype == 'set(dict)':
+ for av in a:
+ found = False
+ for bv in b:
+ if compare_dict_allow_more_present(av, bv):
+ found = True
+ break
+ if not found:
+ return False
+ if method == 'strict':
+ # If we would know that both a and b do not contain duplicates,
+ # we could simply compare len(a) to len(b) to finish this test.
+ # We can assume that b has no duplicates (as it is returned by
+ # docker), but we don't know for a.
+ for bv in b:
+ found = False
+ for av in a:
+ if compare_dict_allow_more_present(av, bv):
+ found = True
+ break
+ if not found:
+ return False
+ return True
+
+
+class DifferenceTracker(object):
+ def __init__(self):
+ self._diff = []
+
+ def add(self, name, parameter=None, active=None):
+ self._diff.append(dict(
+ name=name,
+ parameter=parameter,
+ active=active,
+ ))
+
+ def merge(self, other_tracker):
+ self._diff.extend(other_tracker._diff)
+
+ @property
+ def empty(self):
+ return len(self._diff) == 0
+
+ def get_before_after(self):
+ '''
+ Return texts ``before`` and ``after``.
+ '''
+ before = dict()
+ after = dict()
+ for item in self._diff:
+ before[item['name']] = item['active']
+ after[item['name']] = item['parameter']
+ return before, after
+
+ def has_difference_for(self, name):
+ '''
+ Returns a boolean if a difference exists for name
+ '''
+ return any(diff for diff in self._diff if diff['name'] == name)
+
+ def get_legacy_docker_container_diffs(self):
+ '''
+ Return differences in the docker_container legacy format.
+ '''
+ result = []
+ for entry in self._diff:
+ item = dict()
+ item[entry['name']] = dict(
+ parameter=entry['parameter'],
+ container=entry['active'],
+ )
+ result.append(item)
+ return result
+
+ def get_legacy_docker_diffs(self):
+ '''
+ Return differences in the docker_container legacy format.
+ '''
+ result = [entry['name'] for entry in self._diff]
+ return result
+
+
+def clean_dict_booleans_for_docker_api(data):
+ '''
+ Go doesn't like Python booleans 'True' or 'False', while Ansible is just
+ fine with them in YAML. As such, they need to be converted in cases where
+ we pass dictionaries to the Docker API (e.g. docker_network's
+ driver_options and docker_prune's filters).
+ '''
+ result = dict()
+ if data is not None:
+ for k, v in data.items():
+ if v is True:
+ v = 'true'
+ elif v is False:
+ v = 'false'
+ else:
+ v = str(v)
+ result[str(k)] = v
+ return result
+
+
+def convert_duration_to_nanosecond(time_str):
+ """
+ Return time duration in nanosecond.
+ """
+ if not isinstance(time_str, str):
+ raise ValueError('Missing unit in duration - %s' % time_str)
+
+ regex = re.compile(
+ r'^(((?P<hours>\d+)h)?'
+ r'((?P<minutes>\d+)m(?!s))?'
+ r'((?P<seconds>\d+)s)?'
+ r'((?P<milliseconds>\d+)ms)?'
+ r'((?P<microseconds>\d+)us)?)$'
+ )
+ parts = regex.match(time_str)
+
+ if not parts:
+ raise ValueError('Invalid time duration - %s' % time_str)
+
+ parts = parts.groupdict()
+ time_params = {}
+ for (name, value) in parts.items():
+ if value:
+ time_params[name] = int(value)
+
+ delta = timedelta(**time_params)
+ time_in_nanoseconds = (
+ delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6
+ ) * 10 ** 3
+
+ return time_in_nanoseconds
+
+
+def parse_healthcheck(healthcheck):
+ """
+ Return dictionary of healthcheck parameters and boolean if
+ healthcheck defined in image was requested to be disabled.
+ """
+ if (not healthcheck) or (not healthcheck.get('test')):
+ return None, None
+
+ result = dict()
+
+ # All supported healthcheck parameters
+ options = dict(
+ test='test',
+ interval='interval',
+ timeout='timeout',
+ start_period='start_period',
+ retries='retries'
+ )
+
+ duration_options = ['interval', 'timeout', 'start_period']
+
+ for (key, value) in options.items():
+ if value in healthcheck:
+ if healthcheck.get(value) is None:
+ # due to recursive argument_spec, all keys are always present
+ # (but have default value None if not specified)
+ continue
+ if value in duration_options:
+ time = convert_duration_to_nanosecond(healthcheck.get(value))
+ if time:
+ result[key] = time
+ elif healthcheck.get(value):
+ result[key] = healthcheck.get(value)
+ if key == 'test':
+ if isinstance(result[key], (tuple, list)):
+ result[key] = [str(e) for e in result[key]]
+ else:
+ result[key] = ['CMD-SHELL', str(result[key])]
+ elif key == 'retries':
+ try:
+ result[key] = int(result[key])
+ except ValueError:
+ raise ValueError(
+ 'Cannot parse number of retries for healthcheck. '
+ 'Expected an integer, got "{0}".'.format(result[key])
+ )
+
+ if result['test'] == ['NONE']:
+ # If the user explicitly disables the healthcheck, return None
+ # as the healthcheck object, and set disable_healthcheck to True
+ return None, True
+
+ return result, False
+
+
+def omit_none_from_dict(d):
+ """
+ Return a copy of the dictionary with all keys with value None omitted.
+ """
+ return dict((k, v) for (k, v) in d.items() if v is not None)
diff --git a/test/support/integration/plugins/module_utils/docker/swarm.py b/test/support/integration/plugins/module_utils/docker/swarm.py
new file mode 100644
index 00000000..55d94db0
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/docker/swarm.py
@@ -0,0 +1,280 @@
+# (c) 2019 Piotr Wojciechowski (@wojciechowskipiotr) <piotr@it-playground.pl>
+# (c) Thierry Bouvet (@tbouvet)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import json
+from time import sleep
+
+try:
+ from docker.errors import APIError, NotFound
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.docker.common import (
+ AnsibleDockerClient,
+ LooseVersion,
+)
+
+
+class AnsibleDockerSwarmClient(AnsibleDockerClient):
+
+ def __init__(self, **kwargs):
+ super(AnsibleDockerSwarmClient, self).__init__(**kwargs)
+
+ def get_swarm_node_id(self):
+ """
+ Get the 'NodeID' of the Swarm node or 'None' if host is not in Swarm. It returns the NodeID
+ of Docker host the module is executed on
+ :return:
+ NodeID of host or 'None' if not part of Swarm
+ """
+
+ try:
+ info = self.info()
+ except APIError as exc:
+ self.fail("Failed to get node information for %s" % to_native(exc))
+
+ if info:
+ json_str = json.dumps(info, ensure_ascii=False)
+ swarm_info = json.loads(json_str)
+ if swarm_info['Swarm']['NodeID']:
+ return swarm_info['Swarm']['NodeID']
+ return None
+
+ def check_if_swarm_node(self, node_id=None):
+ """
+ Checking if host is part of Docker Swarm. If 'node_id' is not provided it reads the Docker host
+ system information looking if specific key in output exists. If 'node_id' is provided then it tries to
+ read node information assuming it is run on Swarm manager. The get_node_inspect() method handles exception if
+ it is not executed on Swarm manager
+
+ :param node_id: Node identifier
+ :return:
+ bool: True if node is part of Swarm, False otherwise
+ """
+
+ if node_id is None:
+ try:
+ info = self.info()
+ except APIError:
+ self.fail("Failed to get host information.")
+
+ if info:
+ json_str = json.dumps(info, ensure_ascii=False)
+ swarm_info = json.loads(json_str)
+ if swarm_info['Swarm']['NodeID']:
+ return True
+ if swarm_info['Swarm']['LocalNodeState'] in ('active', 'pending', 'locked'):
+ return True
+ return False
+ else:
+ try:
+ node_info = self.get_node_inspect(node_id=node_id)
+ except APIError:
+ return
+
+ if node_info['ID'] is not None:
+ return True
+ return False
+
+ def check_if_swarm_manager(self):
+ """
+ Checks if node role is set as Manager in Swarm. The node is the docker host on which module action
+ is performed. The inspect_swarm() will fail if node is not a manager
+
+ :return: True if node is Swarm Manager, False otherwise
+ """
+
+ try:
+ self.inspect_swarm()
+ return True
+ except APIError:
+ return False
+
+ def fail_task_if_not_swarm_manager(self):
+ """
+ If host is not a swarm manager then Ansible task on this host should end with 'failed' state
+ """
+ if not self.check_if_swarm_manager():
+ self.fail("Error running docker swarm module: must run on swarm manager node")
+
+ def check_if_swarm_worker(self):
+ """
+ Checks if node role is set as Worker in Swarm. The node is the docker host on which module action
+ is performed. Will fail if run on host that is not part of Swarm via check_if_swarm_node()
+
+ :return: True if node is Swarm Worker, False otherwise
+ """
+
+ if self.check_if_swarm_node() and not self.check_if_swarm_manager():
+ return True
+ return False
+
+ def check_if_swarm_node_is_down(self, node_id=None, repeat_check=1):
+ """
+ Checks if node status on Swarm manager is 'down'. If node_id is provided it query manager about
+ node specified in parameter, otherwise it query manager itself. If run on Swarm Worker node or
+ host that is not part of Swarm it will fail the playbook
+
+ :param repeat_check: number of check attempts with 5 seconds delay between them, by default check only once
+ :param node_id: node ID or name, if None then method will try to get node_id of host module run on
+ :return:
+ True if node is part of swarm but its state is down, False otherwise
+ """
+
+ if repeat_check < 1:
+ repeat_check = 1
+
+ if node_id is None:
+ node_id = self.get_swarm_node_id()
+
+ for retry in range(0, repeat_check):
+ if retry > 0:
+ sleep(5)
+ node_info = self.get_node_inspect(node_id=node_id)
+ if node_info['Status']['State'] == 'down':
+ return True
+ return False
+
+ def get_node_inspect(self, node_id=None, skip_missing=False):
+ """
+ Returns Swarm node info as in 'docker node inspect' command about single node
+
+ :param skip_missing: if True then function will return None instead of failing the task
+ :param node_id: node ID or name, if None then method will try to get node_id of host module run on
+ :return:
+ Single node information structure
+ """
+
+ if node_id is None:
+ node_id = self.get_swarm_node_id()
+
+ if node_id is None:
+ self.fail("Failed to get node information.")
+
+ try:
+ node_info = self.inspect_node(node_id=node_id)
+ except APIError as exc:
+ if exc.status_code == 503:
+ self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
+ if exc.status_code == 404:
+ if skip_missing:
+ return None
+ self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
+ except Exception as exc:
+ self.fail("Error inspecting swarm node: %s" % exc)
+
+ json_str = json.dumps(node_info, ensure_ascii=False)
+ node_info = json.loads(json_str)
+
+ if 'ManagerStatus' in node_info:
+ if node_info['ManagerStatus'].get('Leader'):
+ # This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
+ # Check moby/moby#35437 for details
+ count_colons = node_info['ManagerStatus']['Addr'].count(":")
+ if count_colons == 1:
+ swarm_leader_ip = node_info['ManagerStatus']['Addr'].split(":", 1)[0] or node_info['Status']['Addr']
+ else:
+ swarm_leader_ip = node_info['Status']['Addr']
+ node_info['Status']['Addr'] = swarm_leader_ip
+ return node_info
+
+ def get_all_nodes_inspect(self):
+ """
+ Returns Swarm node info as in 'docker node inspect' command about all registered nodes
+
+ :return:
+ Structure with information about all nodes
+ """
+ try:
+ node_info = self.nodes()
+ except APIError as exc:
+ if exc.status_code == 503:
+ self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
+ self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
+ except Exception as exc:
+ self.fail("Error inspecting swarm node: %s" % exc)
+
+ json_str = json.dumps(node_info, ensure_ascii=False)
+ node_info = json.loads(json_str)
+ return node_info
+
+ def get_all_nodes_list(self, output='short'):
+ """
+ Returns list of nodes registered in Swarm
+
+ :param output: Defines format of returned data
+ :return:
+ If 'output' is 'short' then return data is list of nodes hostnames registered in Swarm,
+ if 'output' is 'long' then returns data is list of dict containing the attributes as in
+ output of command 'docker node ls'
+ """
+ nodes_list = []
+
+ nodes_inspect = self.get_all_nodes_inspect()
+ if nodes_inspect is None:
+ return None
+
+ if output == 'short':
+ for node in nodes_inspect:
+ nodes_list.append(node['Description']['Hostname'])
+ elif output == 'long':
+ for node in nodes_inspect:
+ node_property = {}
+
+ node_property.update({'ID': node['ID']})
+ node_property.update({'Hostname': node['Description']['Hostname']})
+ node_property.update({'Status': node['Status']['State']})
+ node_property.update({'Availability': node['Spec']['Availability']})
+ if 'ManagerStatus' in node:
+ if node['ManagerStatus']['Leader'] is True:
+ node_property.update({'Leader': True})
+ node_property.update({'ManagerStatus': node['ManagerStatus']['Reachability']})
+ node_property.update({'EngineVersion': node['Description']['Engine']['EngineVersion']})
+
+ nodes_list.append(node_property)
+ else:
+ return None
+
+ return nodes_list
+
+ def get_node_name_by_id(self, nodeid):
+ return self.get_node_inspect(nodeid)['Description']['Hostname']
+
+ def get_unlock_key(self):
+ if self.docker_py_version < LooseVersion('2.7.0'):
+ return None
+ return super(AnsibleDockerSwarmClient, self).get_unlock_key()
+
+ def get_service_inspect(self, service_id, skip_missing=False):
+ """
+ Returns Swarm service info as in 'docker service inspect' command about single service
+
+ :param service_id: service ID or name
+ :param skip_missing: if True then function will return None instead of failing the task
+ :return:
+ Single service information structure
+ """
+ try:
+ service_info = self.inspect_service(service_id)
+ except NotFound as exc:
+ if skip_missing is False:
+ self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
+ else:
+ return None
+ except APIError as exc:
+ if exc.status_code == 503:
+ self.fail("Cannot inspect service: To inspect service execute module on Swarm Manager")
+ self.fail("Error inspecting swarm service: %s" % exc)
+ except Exception as exc:
+ self.fail("Error inspecting swarm service: %s" % exc)
+
+ json_str = json.dumps(service_info, ensure_ascii=False)
+ service_info = json.loads(json_str)
+ return service_info
diff --git a/test/support/integration/plugins/module_utils/ec2.py b/test/support/integration/plugins/module_utils/ec2.py
new file mode 100644
index 00000000..0d28108d
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/ec2.py
@@ -0,0 +1,758 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+import sys
+import traceback
+
+from ansible.module_utils.ansible_release import __version__
+from ansible.module_utils.basic import missing_required_lib, env_fallback
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.cloud import CloudRetry
+from ansible.module_utils.six import string_types, binary_type, text_type
+from ansible.module_utils.common.dict_transformations import (
+ camel_dict_to_snake_dict, snake_dict_to_camel_dict,
+ _camel_to_snake, _snake_to_camel,
+)
+
+BOTO_IMP_ERR = None
+try:
+ import boto
+ import boto.ec2 # boto does weird import stuff
+ HAS_BOTO = True
+except ImportError:
+ BOTO_IMP_ERR = traceback.format_exc()
+ HAS_BOTO = False
+
+BOTO3_IMP_ERR = None
+try:
+ import boto3
+ import botocore
+ HAS_BOTO3 = True
+except Exception:
+ BOTO3_IMP_ERR = traceback.format_exc()
+ HAS_BOTO3 = False
+
+try:
+ # Although this is to allow Python 3 the ability to use the custom comparison as a key, Python 2.7 also
+ # uses this (and it works as expected). Python 2.6 will trigger the ImportError.
+ from functools import cmp_to_key
+ PY3_COMPARISON = True
+except ImportError:
+ PY3_COMPARISON = False
+
+
+class AnsibleAWSError(Exception):
+ pass
+
+
+def _botocore_exception_maybe():
+ """
+ Allow for boto3 not being installed when using these utils by wrapping
+ botocore.exceptions instead of assigning from it directly.
+ """
+ if HAS_BOTO3:
+ return botocore.exceptions.ClientError
+ return type(None)
+
+
+class AWSRetry(CloudRetry):
+ base_class = _botocore_exception_maybe()
+
+ @staticmethod
+ def status_code_from_exception(error):
+ return error.response['Error']['Code']
+
+ @staticmethod
+ def found(response_code, catch_extra_error_codes=None):
+ # This list of failures is based on this API Reference
+ # http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html
+ #
+ # TooManyRequestsException comes from inside botocore when it
+ # does retrys, unfortunately however it does not try long
+ # enough to allow some services such as API Gateway to
+ # complete configuration. At the moment of writing there is a
+ # botocore/boto3 bug open to fix this.
+ #
+ # https://github.com/boto/boto3/issues/876 (and linked PRs etc)
+ retry_on = [
+ 'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable',
+ 'InternalFailure', 'InternalError', 'TooManyRequestsException',
+ 'Throttling'
+ ]
+ if catch_extra_error_codes:
+ retry_on.extend(catch_extra_error_codes)
+
+ return response_code in retry_on
+
+
+def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params):
+ try:
+ return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params)
+ except ValueError as e:
+ module.fail_json(msg="Couldn't connect to AWS: %s" % to_native(e))
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError,
+ botocore.exceptions.NoCredentialsError, botocore.exceptions.ConfigParseError) as e:
+ module.fail_json(msg=to_native(e))
+ except botocore.exceptions.NoRegionError as e:
+ module.fail_json(msg="The %s module requires a region and none was found in configuration, "
+ "environment variables or module parameters" % module._name)
+
+
+def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **params):
+ profile = params.pop('profile_name', None)
+
+ if conn_type not in ['both', 'resource', 'client']:
+ raise ValueError('There is an issue in the calling code. You '
+ 'must specify either both, resource, or client to '
+ 'the conn_type parameter in the boto3_conn function '
+ 'call')
+
+ config = botocore.config.Config(
+ user_agent_extra='Ansible/{0}'.format(__version__),
+ )
+
+ if params.get('config') is not None:
+ config = config.merge(params.pop('config'))
+ if params.get('aws_config') is not None:
+ config = config.merge(params.pop('aws_config'))
+
+ session = boto3.session.Session(
+ profile_name=profile,
+ )
+
+ if conn_type == 'resource':
+ return session.resource(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
+ elif conn_type == 'client':
+ return session.client(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
+ else:
+ client = session.client(resource, region_name=region, endpoint_url=endpoint, **params)
+ resource = session.resource(resource, region_name=region, endpoint_url=endpoint, **params)
+ return client, resource
+
+
+boto3_inventory_conn = _boto3_conn
+
+
+def boto_exception(err):
+ """
+ Extracts the error message from a boto exception.
+
+ :param err: Exception from boto
+ :return: Error message
+ """
+ if hasattr(err, 'error_message'):
+ error = err.error_message
+ elif hasattr(err, 'message'):
+ error = str(err.message) + ' ' + str(err) + ' - ' + str(type(err))
+ else:
+ error = '%s: %s' % (Exception, err)
+
+ return error
+
+
+def aws_common_argument_spec():
+ return dict(
+ debug_botocore_endpoint_logs=dict(fallback=(env_fallback, ['ANSIBLE_DEBUG_BOTOCORE_LOGS']), default=False, type='bool'),
+ ec2_url=dict(),
+ aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True),
+ aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),
+ validate_certs=dict(default=True, type='bool'),
+ security_token=dict(aliases=['access_token'], no_log=True),
+ profile=dict(),
+ aws_config=dict(type='dict'),
+ )
+
+
+def ec2_argument_spec():
+ spec = aws_common_argument_spec()
+ spec.update(
+ dict(
+ region=dict(aliases=['aws_region', 'ec2_region']),
+ )
+ )
+ return spec
+
+
+def get_aws_region(module, boto3=False):
+ region = module.params.get('region')
+
+ if region:
+ return region
+
+ if 'AWS_REGION' in os.environ:
+ return os.environ['AWS_REGION']
+ if 'AWS_DEFAULT_REGION' in os.environ:
+ return os.environ['AWS_DEFAULT_REGION']
+ if 'EC2_REGION' in os.environ:
+ return os.environ['EC2_REGION']
+
+ if not boto3:
+ if not HAS_BOTO:
+ module.fail_json(msg=missing_required_lib('boto'), exception=BOTO_IMP_ERR)
+ # boto.config.get returns None if config not found
+ region = boto.config.get('Boto', 'aws_region')
+ if region:
+ return region
+ return boto.config.get('Boto', 'ec2_region')
+
+ if not HAS_BOTO3:
+ module.fail_json(msg=missing_required_lib('boto3'), exception=BOTO3_IMP_ERR)
+
+ # here we don't need to make an additional call, will default to 'us-east-1' if the below evaluates to None.
+ try:
+ profile_name = module.params.get('profile')
+ return botocore.session.Session(profile=profile_name).get_config_variable('region')
+ except botocore.exceptions.ProfileNotFound as e:
+ return None
+
+
+def get_aws_connection_info(module, boto3=False):
+
+ # Check module args for credentials, then check environment vars
+ # access_key
+
+ ec2_url = module.params.get('ec2_url')
+ access_key = module.params.get('aws_access_key')
+ secret_key = module.params.get('aws_secret_key')
+ security_token = module.params.get('security_token')
+ region = get_aws_region(module, boto3)
+ profile_name = module.params.get('profile')
+ validate_certs = module.params.get('validate_certs')
+ config = module.params.get('aws_config')
+
+ if not ec2_url:
+ if 'AWS_URL' in os.environ:
+ ec2_url = os.environ['AWS_URL']
+ elif 'EC2_URL' in os.environ:
+ ec2_url = os.environ['EC2_URL']
+
+ if not access_key:
+ if os.environ.get('AWS_ACCESS_KEY_ID'):
+ access_key = os.environ['AWS_ACCESS_KEY_ID']
+ elif os.environ.get('AWS_ACCESS_KEY'):
+ access_key = os.environ['AWS_ACCESS_KEY']
+ elif os.environ.get('EC2_ACCESS_KEY'):
+ access_key = os.environ['EC2_ACCESS_KEY']
+ elif HAS_BOTO and boto.config.get('Credentials', 'aws_access_key_id'):
+ access_key = boto.config.get('Credentials', 'aws_access_key_id')
+ elif HAS_BOTO and boto.config.get('default', 'aws_access_key_id'):
+ access_key = boto.config.get('default', 'aws_access_key_id')
+ else:
+ # in case access_key came in as empty string
+ access_key = None
+
+ if not secret_key:
+ if os.environ.get('AWS_SECRET_ACCESS_KEY'):
+ secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
+ elif os.environ.get('AWS_SECRET_KEY'):
+ secret_key = os.environ['AWS_SECRET_KEY']
+ elif os.environ.get('EC2_SECRET_KEY'):
+ secret_key = os.environ['EC2_SECRET_KEY']
+ elif HAS_BOTO and boto.config.get('Credentials', 'aws_secret_access_key'):
+ secret_key = boto.config.get('Credentials', 'aws_secret_access_key')
+ elif HAS_BOTO and boto.config.get('default', 'aws_secret_access_key'):
+ secret_key = boto.config.get('default', 'aws_secret_access_key')
+ else:
+ # in case secret_key came in as empty string
+ secret_key = None
+
+ if not security_token:
+ if os.environ.get('AWS_SECURITY_TOKEN'):
+ security_token = os.environ['AWS_SECURITY_TOKEN']
+ elif os.environ.get('AWS_SESSION_TOKEN'):
+ security_token = os.environ['AWS_SESSION_TOKEN']
+ elif os.environ.get('EC2_SECURITY_TOKEN'):
+ security_token = os.environ['EC2_SECURITY_TOKEN']
+ elif HAS_BOTO and boto.config.get('Credentials', 'aws_security_token'):
+ security_token = boto.config.get('Credentials', 'aws_security_token')
+ elif HAS_BOTO and boto.config.get('default', 'aws_security_token'):
+ security_token = boto.config.get('default', 'aws_security_token')
+ else:
+ # in case secret_token came in as empty string
+ security_token = None
+
+ if HAS_BOTO3 and boto3:
+ boto_params = dict(aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key,
+ aws_session_token=security_token)
+ boto_params['verify'] = validate_certs
+
+ if profile_name:
+ boto_params = dict(aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None)
+ boto_params['profile_name'] = profile_name
+
+ else:
+ boto_params = dict(aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key,
+ security_token=security_token)
+
+ # only set profile_name if passed as an argument
+ if profile_name:
+ boto_params['profile_name'] = profile_name
+
+ boto_params['validate_certs'] = validate_certs
+
+ if config is not None:
+ if HAS_BOTO3 and boto3:
+ boto_params['aws_config'] = botocore.config.Config(**config)
+ elif HAS_BOTO and not boto3:
+ if 'user_agent' in config:
+ sys.modules["boto.connection"].UserAgent = config['user_agent']
+
+ for param, value in boto_params.items():
+ if isinstance(value, binary_type):
+ boto_params[param] = text_type(value, 'utf-8', 'strict')
+
+ return region, ec2_url, boto_params
+
+
+def get_ec2_creds(module):
+ ''' for compatibility mode with old modules that don't/can't yet
+ use ec2_connect method '''
+ region, ec2_url, boto_params = get_aws_connection_info(module)
+ return ec2_url, boto_params['aws_access_key_id'], boto_params['aws_secret_access_key'], region
+
+
+def boto_fix_security_token_in_profile(conn, profile_name):
+ ''' monkey patch for boto issue boto/boto#2100 '''
+ profile = 'profile ' + profile_name
+ if boto.config.has_option(profile, 'aws_security_token'):
+ conn.provider.set_security_token(boto.config.get(profile, 'aws_security_token'))
+ return conn
+
+
+def connect_to_aws(aws_module, region, **params):
+ try:
+ conn = aws_module.connect_to_region(region, **params)
+ except(boto.provider.ProfileNotFoundError):
+ raise AnsibleAWSError("Profile given for AWS was not found. Please fix and retry.")
+ if not conn:
+ if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]:
+ raise AnsibleAWSError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade "
+ "boto or extend with endpoints_path" % (region, aws_module.__name__))
+ else:
+ raise AnsibleAWSError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__))
+ if params.get('profile_name'):
+ conn = boto_fix_security_token_in_profile(conn, params['profile_name'])
+ return conn
+
+
+def ec2_connect(module):
+
+ """ Return an ec2 connection"""
+
+ region, ec2_url, boto_params = get_aws_connection_info(module)
+
+ # If we have a region specified, connect to its endpoint.
+ if region:
+ try:
+ ec2 = connect_to_aws(boto.ec2, region, **boto_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError, boto.provider.ProfileNotFoundError) as e:
+ module.fail_json(msg=str(e))
+ # Otherwise, no region so we fallback to the old connection method
+ elif ec2_url:
+ try:
+ ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError, boto.provider.ProfileNotFoundError) as e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="Either region or ec2_url must be specified")
+
+ return ec2
+
+
+def ansible_dict_to_boto3_filter_list(filters_dict):
+
+ """ Convert an Ansible dict of filters to list of dicts that boto3 can use
+ Args:
+ filters_dict (dict): Dict of AWS filters.
+ Basic Usage:
+ >>> filters = {'some-aws-id': 'i-01234567'}
+ >>> ansible_dict_to_boto3_filter_list(filters)
+ {
+ 'some-aws-id': 'i-01234567'
+ }
+ Returns:
+ List: List of AWS filters and their values
+ [
+ {
+ 'Name': 'some-aws-id',
+ 'Values': [
+ 'i-01234567',
+ ]
+ }
+ ]
+ """
+
+ filters_list = []
+ for k, v in filters_dict.items():
+ filter_dict = {'Name': k}
+ if isinstance(v, string_types):
+ filter_dict['Values'] = [v]
+ else:
+ filter_dict['Values'] = v
+
+ filters_list.append(filter_dict)
+
+ return filters_list
+
+
+def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_key_name=None):
+
+ """ Convert a boto3 list of resource tags to a flat dict of key:value pairs
+ Args:
+ tags_list (list): List of dicts representing AWS tags.
+ tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
+ tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
+ Basic Usage:
+ >>> tags_list = [{'Key': 'MyTagKey', 'Value': 'MyTagValue'}]
+ >>> boto3_tag_list_to_ansible_dict(tags_list)
+ [
+ {
+ 'Key': 'MyTagKey',
+ 'Value': 'MyTagValue'
+ }
+ ]
+ Returns:
+ Dict: Dict of key:value pairs representing AWS tags
+ {
+ 'MyTagKey': 'MyTagValue',
+ }
+ """
+
+ if tag_name_key_name and tag_value_key_name:
+ tag_candidates = {tag_name_key_name: tag_value_key_name}
+ else:
+ tag_candidates = {'key': 'value', 'Key': 'Value'}
+
+ if not tags_list:
+ return {}
+ for k, v in tag_candidates.items():
+ if k in tags_list[0] and v in tags_list[0]:
+ return dict((tag[k], tag[v]) for tag in tags_list)
+ raise ValueError("Couldn't find tag key (candidates %s) in tag list %s" % (str(tag_candidates), str(tags_list)))
+
+
+def ansible_dict_to_boto3_tag_list(tags_dict, tag_name_key_name='Key', tag_value_key_name='Value'):
+
+ """ Convert a flat dict of key:value pairs representing AWS resource tags to a boto3 list of dicts
+ Args:
+ tags_dict (dict): Dict representing AWS resource tags.
+ tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
+ tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
+ Basic Usage:
+ >>> tags_dict = {'MyTagKey': 'MyTagValue'}
+ >>> ansible_dict_to_boto3_tag_list(tags_dict)
+ {
+ 'MyTagKey': 'MyTagValue'
+ }
+ Returns:
+ List: List of dicts containing tag keys and values
+ [
+ {
+ 'Key': 'MyTagKey',
+ 'Value': 'MyTagValue'
+ }
+ ]
+ """
+
+ tags_list = []
+ for k, v in tags_dict.items():
+ tags_list.append({tag_name_key_name: k, tag_value_key_name: to_native(v)})
+
+ return tags_list
+
+
+def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id=None, boto3=True):
+
+ """ Return list of security group IDs from security group names. Note that security group names are not unique
+ across VPCs. If a name exists across multiple VPCs and no VPC ID is supplied, all matching IDs will be returned. This
+ will probably lead to a boto exception if you attempt to assign both IDs to a resource so ensure you wrap the call in
+ a try block
+ """
+
+ def get_sg_name(sg, boto3):
+
+ if boto3:
+ return sg['GroupName']
+ else:
+ return sg.name
+
+ def get_sg_id(sg, boto3):
+
+ if boto3:
+ return sg['GroupId']
+ else:
+ return sg.id
+
+ sec_group_id_list = []
+
+ if isinstance(sec_group_list, string_types):
+ sec_group_list = [sec_group_list]
+
+ # Get all security groups
+ if boto3:
+ if vpc_id:
+ filters = [
+ {
+ 'Name': 'vpc-id',
+ 'Values': [
+ vpc_id,
+ ]
+ }
+ ]
+ all_sec_groups = ec2_connection.describe_security_groups(Filters=filters)['SecurityGroups']
+ else:
+ all_sec_groups = ec2_connection.describe_security_groups()['SecurityGroups']
+ else:
+ if vpc_id:
+ filters = {'vpc-id': vpc_id}
+ all_sec_groups = ec2_connection.get_all_security_groups(filters=filters)
+ else:
+ all_sec_groups = ec2_connection.get_all_security_groups()
+
+ unmatched = set(sec_group_list).difference(str(get_sg_name(all_sg, boto3)) for all_sg in all_sec_groups)
+ sec_group_name_list = list(set(sec_group_list) - set(unmatched))
+
+ if len(unmatched) > 0:
+ # If we have unmatched names that look like an ID, assume they are
+ import re
+ sec_group_id_list = [sg for sg in unmatched if re.match('sg-[a-fA-F0-9]+$', sg)]
+ still_unmatched = [sg for sg in unmatched if not re.match('sg-[a-fA-F0-9]+$', sg)]
+ if len(still_unmatched) > 0:
+ raise ValueError("The following group names are not valid: %s" % ', '.join(still_unmatched))
+
+ sec_group_id_list += [str(get_sg_id(all_sg, boto3)) for all_sg in all_sec_groups if str(get_sg_name(all_sg, boto3)) in sec_group_name_list]
+
+ return sec_group_id_list
+
+
+def _hashable_policy(policy, policy_list):
+ """
+ Takes a policy and returns a list, the contents of which are all hashable and sorted.
+ Example input policy:
+ {'Version': '2012-10-17',
+ 'Statement': [{'Action': 's3:PutObjectAcl',
+ 'Sid': 'AddCannedAcl2',
+ 'Resource': 'arn:aws:s3:::test_policy/*',
+ 'Effect': 'Allow',
+ 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']}
+ }]}
+ Returned value:
+ [('Statement', ((('Action', (u's3:PutObjectAcl',)),
+ ('Effect', (u'Allow',)),
+ ('Principal', ('AWS', ((u'arn:aws:iam::XXXXXXXXXXXX:user/username1',), (u'arn:aws:iam::XXXXXXXXXXXX:user/username2',)))),
+ ('Resource', (u'arn:aws:s3:::test_policy/*',)), ('Sid', (u'AddCannedAcl2',)))),
+ ('Version', (u'2012-10-17',)))]
+
+ """
+ # Amazon will automatically convert bool and int to strings for us
+ if isinstance(policy, bool):
+ return tuple([str(policy).lower()])
+ elif isinstance(policy, int):
+ return tuple([str(policy)])
+
+ if isinstance(policy, list):
+ for each in policy:
+ tupleified = _hashable_policy(each, [])
+ if isinstance(tupleified, list):
+ tupleified = tuple(tupleified)
+ policy_list.append(tupleified)
+ elif isinstance(policy, string_types) or isinstance(policy, binary_type):
+ policy = to_text(policy)
+ # convert root account ARNs to just account IDs
+ if policy.startswith('arn:aws:iam::') and policy.endswith(':root'):
+ policy = policy.split(':')[4]
+ return [policy]
+ elif isinstance(policy, dict):
+ sorted_keys = list(policy.keys())
+ sorted_keys.sort()
+ for key in sorted_keys:
+ tupleified = _hashable_policy(policy[key], [])
+ if isinstance(tupleified, list):
+ tupleified = tuple(tupleified)
+ policy_list.append((key, tupleified))
+
+ # ensure we aren't returning deeply nested structures of length 1
+ if len(policy_list) == 1 and isinstance(policy_list[0], tuple):
+ policy_list = policy_list[0]
+ if isinstance(policy_list, list):
+ if PY3_COMPARISON:
+ policy_list.sort(key=cmp_to_key(py3cmp))
+ else:
+ policy_list.sort()
+ return policy_list
+
+
+def py3cmp(a, b):
+ """ Python 2 can sort lists of mixed types. Strings < tuples. Without this function this fails on Python 3."""
+ try:
+ if a > b:
+ return 1
+ elif a < b:
+ return -1
+ else:
+ return 0
+ except TypeError as e:
+ # check to see if they're tuple-string
+ # always say strings are less than tuples (to maintain compatibility with python2)
+ str_ind = to_text(e).find('str')
+ tup_ind = to_text(e).find('tuple')
+ if -1 not in (str_ind, tup_ind):
+ if str_ind < tup_ind:
+ return -1
+ elif tup_ind < str_ind:
+ return 1
+ raise
+
+
+def compare_policies(current_policy, new_policy):
+ """ Compares the existing policy and the updated policy
+ Returns True if there is a difference between policies.
+ """
+ return set(_hashable_policy(new_policy, [])) != set(_hashable_policy(current_policy, []))
+
+
+def sort_json_policy_dict(policy_dict):
+
+ """ Sort any lists in an IAM JSON policy so that comparison of two policies with identical values but
+ different orders will return true
+ Args:
+ policy_dict (dict): Dict representing IAM JSON policy.
+ Basic Usage:
+ >>> my_iam_policy = {'Principle': {'AWS':["31","7","14","101"]}
+ >>> sort_json_policy_dict(my_iam_policy)
+ Returns:
+ Dict: Will return a copy of the policy as a Dict but any List will be sorted
+ {
+ 'Principle': {
+ 'AWS': [ '7', '14', '31', '101' ]
+ }
+ }
+ """
+
+ def value_is_list(my_list):
+
+ checked_list = []
+ for item in my_list:
+ if isinstance(item, dict):
+ checked_list.append(sort_json_policy_dict(item))
+ elif isinstance(item, list):
+ checked_list.append(value_is_list(item))
+ else:
+ checked_list.append(item)
+
+ # Sort list. If it's a list of dictionaries, sort by tuple of key-value
+ # pairs, since Python 3 doesn't allow comparisons such as `<` between dictionaries.
+ checked_list.sort(key=lambda x: sorted(x.items()) if isinstance(x, dict) else x)
+ return checked_list
+
+ ordered_policy_dict = {}
+ for key, value in policy_dict.items():
+ if isinstance(value, dict):
+ ordered_policy_dict[key] = sort_json_policy_dict(value)
+ elif isinstance(value, list):
+ ordered_policy_dict[key] = value_is_list(value)
+ else:
+ ordered_policy_dict[key] = value
+
+ return ordered_policy_dict
+
+
+def map_complex_type(complex_type, type_map):
+ """
+ Allows to cast elements within a dictionary to a specific type
+ Example of usage:
+
+ DEPLOYMENT_CONFIGURATION_TYPE_MAP = {
+ 'maximum_percent': 'int',
+ 'minimum_healthy_percent': 'int'
+ }
+
+ deployment_configuration = map_complex_type(module.params['deployment_configuration'],
+ DEPLOYMENT_CONFIGURATION_TYPE_MAP)
+
+ This ensures all keys within the root element are casted and valid integers
+ """
+
+ if complex_type is None:
+ return
+ new_type = type(complex_type)()
+ if isinstance(complex_type, dict):
+ for key in complex_type:
+ if key in type_map:
+ if isinstance(type_map[key], list):
+ new_type[key] = map_complex_type(
+ complex_type[key],
+ type_map[key][0])
+ else:
+ new_type[key] = map_complex_type(
+ complex_type[key],
+ type_map[key])
+ else:
+ return complex_type
+ elif isinstance(complex_type, list):
+ for i in range(len(complex_type)):
+ new_type.append(map_complex_type(
+ complex_type[i],
+ type_map))
+ elif type_map:
+ return globals()['__builtins__'][type_map](complex_type)
+ return new_type
+
+
+def compare_aws_tags(current_tags_dict, new_tags_dict, purge_tags=True):
+ """
+ Compare two dicts of AWS tags. Dicts are expected to of been created using 'boto3_tag_list_to_ansible_dict' helper function.
+ Two dicts are returned - the first is tags to be set, the second is any tags to remove. Since the AWS APIs differ
+ these may not be able to be used out of the box.
+
+ :param current_tags_dict:
+ :param new_tags_dict:
+ :param purge_tags:
+ :return: tag_key_value_pairs_to_set: a dict of key value pairs that need to be set in AWS. If all tags are identical this dict will be empty
+ :return: tag_keys_to_unset: a list of key names (type str) that need to be unset in AWS. If no tags need to be unset this list will be empty
+ """
+
+ tag_key_value_pairs_to_set = {}
+ tag_keys_to_unset = []
+
+ for key in current_tags_dict.keys():
+ if key not in new_tags_dict and purge_tags:
+ tag_keys_to_unset.append(key)
+
+ for key in set(new_tags_dict.keys()) - set(tag_keys_to_unset):
+ if to_text(new_tags_dict[key]) != current_tags_dict.get(key):
+ tag_key_value_pairs_to_set[key] = new_tags_dict[key]
+
+ return tag_key_value_pairs_to_set, tag_keys_to_unset
diff --git a/test/support/integration/plugins/module_utils/ecs/__init__.py b/test/support/integration/plugins/module_utils/ecs/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/ecs/__init__.py
diff --git a/test/support/integration/plugins/module_utils/ecs/api.py b/test/support/integration/plugins/module_utils/ecs/api.py
new file mode 100644
index 00000000..d89b0333
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/ecs/api.py
@@ -0,0 +1,364 @@
+# -*- coding: utf-8 -*-
+
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is licensed under the
+# Modified BSD License. Modules you write using this snippet, which is embedded
+# dynamically by Ansible, still belong to the author of the module, and may assign
+# their own license to the complete work.
+#
+# Copyright (c), Entrust Datacard Corporation, 2019
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import json
+import os
+import re
+import time
+import traceback
+
+from ansible.module_utils._text import to_text, to_native
+from ansible.module_utils.basic import missing_required_lib
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+from ansible.module_utils.urls import Request
+
+YAML_IMP_ERR = None
+try:
+ import yaml
+except ImportError:
+ YAML_FOUND = False
+ YAML_IMP_ERR = traceback.format_exc()
+else:
+ YAML_FOUND = True
+
+valid_file_format = re.compile(r".*(\.)(yml|yaml|json)$")
+
+
+def ecs_client_argument_spec():
+ return dict(
+ entrust_api_user=dict(type='str', required=True),
+ entrust_api_key=dict(type='str', required=True, no_log=True),
+ entrust_api_client_cert_path=dict(type='path', required=True),
+ entrust_api_client_cert_key_path=dict(type='path', required=True, no_log=True),
+ entrust_api_specification_path=dict(type='path', default='https://cloud.entrust.net/EntrustCloud/documentation/cms-api-2.1.0.yaml'),
+ )
+
+
+class SessionConfigurationException(Exception):
+ """ Raised if we cannot configure a session with the API """
+
+ pass
+
+
+class RestOperationException(Exception):
+ """ Encapsulate a REST API error """
+
+ def __init__(self, error):
+ self.status = to_native(error.get("status", None))
+ self.errors = [to_native(err.get("message")) for err in error.get("errors", {})]
+ self.message = to_native(" ".join(self.errors))
+
+
+def generate_docstring(operation_spec):
+ """Generate a docstring for an operation defined in operation_spec (swagger)"""
+ # Description of the operation
+ docs = operation_spec.get("description", "No Description")
+ docs += "\n\n"
+
+ # Parameters of the operation
+ parameters = operation_spec.get("parameters", [])
+ if len(parameters) != 0:
+ docs += "\tArguments:\n\n"
+ for parameter in parameters:
+ docs += "{0} ({1}:{2}): {3}\n".format(
+ parameter.get("name"),
+ parameter.get("type", "No Type"),
+ "Required" if parameter.get("required", False) else "Not Required",
+ parameter.get("description"),
+ )
+
+ return docs
+
+
+def bind(instance, method, operation_spec):
+ def binding_scope_fn(*args, **kwargs):
+ return method(instance, *args, **kwargs)
+
+ # Make sure we don't confuse users; add the proper name and documentation to the function.
+ # Users can use !help(<function>) to get help on the function from interactive python or pdb
+ operation_name = operation_spec.get("operationId").split("Using")[0]
+ binding_scope_fn.__name__ = str(operation_name)
+ binding_scope_fn.__doc__ = generate_docstring(operation_spec)
+
+ return binding_scope_fn
+
+
+class RestOperation(object):
+ def __init__(self, session, uri, method, parameters=None):
+ self.session = session
+ self.method = method
+ if parameters is None:
+ self.parameters = {}
+ else:
+ self.parameters = parameters
+ self.url = "{scheme}://{host}{base_path}{uri}".format(scheme="https", host=session._spec.get("host"), base_path=session._spec.get("basePath"), uri=uri)
+
+ def restmethod(self, *args, **kwargs):
+ """Do the hard work of making the request here"""
+
+ # gather named path parameters and do substitution on the URL
+ if self.parameters:
+ path_parameters = {}
+ body_parameters = {}
+ query_parameters = {}
+ for x in self.parameters:
+ expected_location = x.get("in")
+ key_name = x.get("name", None)
+ key_value = kwargs.get(key_name, None)
+ if expected_location == "path" and key_name and key_value:
+ path_parameters.update({key_name: key_value})
+ elif expected_location == "body" and key_name and key_value:
+ body_parameters.update({key_name: key_value})
+ elif expected_location == "query" and key_name and key_value:
+ query_parameters.update({key_name: key_value})
+
+ if len(body_parameters.keys()) >= 1:
+ body_parameters = body_parameters.get(list(body_parameters.keys())[0])
+ else:
+ body_parameters = None
+ else:
+ path_parameters = {}
+ query_parameters = {}
+ body_parameters = None
+
+ # This will fail if we have not set path parameters with a KeyError
+ url = self.url.format(**path_parameters)
+ if query_parameters:
+ # modify the URL to add path parameters
+ url = url + "?" + urlencode(query_parameters)
+
+ try:
+ if body_parameters:
+ body_parameters_json = json.dumps(body_parameters)
+ response = self.session.request.open(method=self.method, url=url, data=body_parameters_json)
+ else:
+ response = self.session.request.open(method=self.method, url=url)
+ request_error = False
+ except HTTPError as e:
+ # An HTTPError has the same methods available as a valid response from request.open
+ response = e
+ request_error = True
+
+ # Return the result if JSON and success ({} for empty responses)
+ # Raise an exception if there was a failure.
+ try:
+ result_code = response.getcode()
+ result = json.loads(response.read())
+ except ValueError:
+ result = {}
+
+ if result or result == {}:
+ if result_code and result_code < 400:
+ return result
+ else:
+ raise RestOperationException(result)
+
+ # Raise a generic RestOperationException if this fails
+ raise RestOperationException({"status": result_code, "errors": [{"message": "REST Operation Failed"}]})
+
+
+class Resource(object):
+ """ Implement basic CRUD operations against a path. """
+
+ def __init__(self, session):
+ self.session = session
+ self.parameters = {}
+
+ for url in session._spec.get("paths").keys():
+ methods = session._spec.get("paths").get(url)
+ for method in methods.keys():
+ operation_spec = methods.get(method)
+ operation_name = operation_spec.get("operationId", None)
+ parameters = operation_spec.get("parameters")
+
+ if not operation_name:
+ if method.lower() == "post":
+ operation_name = "Create"
+ elif method.lower() == "get":
+ operation_name = "Get"
+ elif method.lower() == "put":
+ operation_name = "Update"
+ elif method.lower() == "delete":
+ operation_name = "Delete"
+ elif method.lower() == "patch":
+ operation_name = "Patch"
+ else:
+ raise SessionConfigurationException(to_native("Invalid REST method type {0}".format(method)))
+
+ # Get the non-parameter parts of the URL and append to the operation name
+ # e.g /application/version -> GetApplicationVersion
+ # e.g. /application/{id} -> GetApplication
+ # This may lead to duplicates, which we must prevent.
+ operation_name += re.sub(r"{(.*)}", "", url).replace("/", " ").title().replace(" ", "")
+ operation_spec["operationId"] = operation_name
+
+ op = RestOperation(session, url, method, parameters)
+ setattr(self, operation_name, bind(self, op.restmethod, operation_spec))
+
+
+# Session to encapsulate the connection parameters of the module_utils Request object, the api spec, etc
+class ECSSession(object):
+ def __init__(self, name, **kwargs):
+ """
+ Initialize our session
+ """
+
+ self._set_config(name, **kwargs)
+
+ def client(self):
+ resource = Resource(self)
+ return resource
+
+ def _set_config(self, name, **kwargs):
+ headers = {
+ "Content-Type": "application/json",
+ "Connection": "keep-alive",
+ }
+ self.request = Request(headers=headers, timeout=60)
+
+ configurators = [self._read_config_vars]
+ for configurator in configurators:
+ self._config = configurator(name, **kwargs)
+ if self._config:
+ break
+ if self._config is None:
+ raise SessionConfigurationException(to_native("No Configuration Found."))
+
+ # set up auth if passed
+ entrust_api_user = self.get_config("entrust_api_user")
+ entrust_api_key = self.get_config("entrust_api_key")
+ if entrust_api_user and entrust_api_key:
+ self.request.url_username = entrust_api_user
+ self.request.url_password = entrust_api_key
+ else:
+ raise SessionConfigurationException(to_native("User and key must be provided."))
+
+ # set up client certificate if passed (support all-in one or cert + key)
+ entrust_api_cert = self.get_config("entrust_api_cert")
+ entrust_api_cert_key = self.get_config("entrust_api_cert_key")
+ if entrust_api_cert:
+ self.request.client_cert = entrust_api_cert
+ if entrust_api_cert_key:
+ self.request.client_key = entrust_api_cert_key
+ else:
+ raise SessionConfigurationException(to_native("Client certificate for authentication to the API must be provided."))
+
+ # set up the spec
+ entrust_api_specification_path = self.get_config("entrust_api_specification_path")
+
+ if not entrust_api_specification_path.startswith("http") and not os.path.isfile(entrust_api_specification_path):
+ raise SessionConfigurationException(to_native("OpenAPI specification was not found at location {0}.".format(entrust_api_specification_path)))
+ if not valid_file_format.match(entrust_api_specification_path):
+ raise SessionConfigurationException(to_native("OpenAPI specification filename must end in .json, .yml or .yaml"))
+
+ self.verify = True
+
+ if entrust_api_specification_path.startswith("http"):
+ try:
+ http_response = Request().open(method="GET", url=entrust_api_specification_path)
+ http_response_contents = http_response.read()
+ if entrust_api_specification_path.endswith(".json"):
+ self._spec = json.load(http_response_contents)
+ elif entrust_api_specification_path.endswith(".yml") or entrust_api_specification_path.endswith(".yaml"):
+ self._spec = yaml.safe_load(http_response_contents)
+ except HTTPError as e:
+ raise SessionConfigurationException(to_native("Error downloading specification from address '{0}', received error code '{1}'".format(
+ entrust_api_specification_path, e.getcode())))
+ else:
+ with open(entrust_api_specification_path) as f:
+ if ".json" in entrust_api_specification_path:
+ self._spec = json.load(f)
+ elif ".yml" in entrust_api_specification_path or ".yaml" in entrust_api_specification_path:
+ self._spec = yaml.safe_load(f)
+
+ def get_config(self, item):
+ return self._config.get(item, None)
+
+ def _read_config_vars(self, name, **kwargs):
+ """ Read configuration from variables passed to the module. """
+ config = {}
+
+ entrust_api_specification_path = kwargs.get("entrust_api_specification_path")
+ if not entrust_api_specification_path or (not entrust_api_specification_path.startswith("http") and not os.path.isfile(entrust_api_specification_path)):
+ raise SessionConfigurationException(
+ to_native(
+ "Parameter provided for entrust_api_specification_path of value '{0}' was not a valid file path or HTTPS address.".format(
+ entrust_api_specification_path
+ )
+ )
+ )
+
+ for required_file in ["entrust_api_cert", "entrust_api_cert_key"]:
+ file_path = kwargs.get(required_file)
+ if not file_path or not os.path.isfile(file_path):
+ raise SessionConfigurationException(
+ to_native("Parameter provided for {0} of value '{1}' was not a valid file path.".format(required_file, file_path))
+ )
+
+ for required_var in ["entrust_api_user", "entrust_api_key"]:
+ if not kwargs.get(required_var):
+ raise SessionConfigurationException(to_native("Parameter provided for {0} was missing.".format(required_var)))
+
+ config["entrust_api_cert"] = kwargs.get("entrust_api_cert")
+ config["entrust_api_cert_key"] = kwargs.get("entrust_api_cert_key")
+ config["entrust_api_specification_path"] = kwargs.get("entrust_api_specification_path")
+ config["entrust_api_user"] = kwargs.get("entrust_api_user")
+ config["entrust_api_key"] = kwargs.get("entrust_api_key")
+
+ return config
+
+
+def ECSClient(entrust_api_user=None, entrust_api_key=None, entrust_api_cert=None, entrust_api_cert_key=None, entrust_api_specification_path=None):
+ """Create an ECS client"""
+
+ if not YAML_FOUND:
+ raise SessionConfigurationException(missing_required_lib("PyYAML"), exception=YAML_IMP_ERR)
+
+ if entrust_api_specification_path is None:
+ entrust_api_specification_path = "https://cloud.entrust.net/EntrustCloud/documentation/cms-api-2.1.0.yaml"
+
+ # Not functionally necessary with current uses of this module_util, but better to be explicit for future use cases
+ entrust_api_user = to_text(entrust_api_user)
+ entrust_api_key = to_text(entrust_api_key)
+ entrust_api_cert_key = to_text(entrust_api_cert_key)
+ entrust_api_specification_path = to_text(entrust_api_specification_path)
+
+ return ECSSession(
+ "ecs",
+ entrust_api_user=entrust_api_user,
+ entrust_api_key=entrust_api_key,
+ entrust_api_cert=entrust_api_cert,
+ entrust_api_cert_key=entrust_api_cert_key,
+ entrust_api_specification_path=entrust_api_specification_path,
+ ).client()
diff --git a/test/support/integration/plugins/module_utils/mysql.py b/test/support/integration/plugins/module_utils/mysql.py
new file mode 100644
index 00000000..46198f36
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/mysql.py
@@ -0,0 +1,106 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Jonathan Mainguy <jon@soh.re>, 2015
+# Most of this was originally added by Sven Schliesing @muffl0n in the mysql_user.py module
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+
+try:
+ import pymysql as mysql_driver
+ _mysql_cursor_param = 'cursor'
+except ImportError:
+ try:
+ import MySQLdb as mysql_driver
+ import MySQLdb.cursors
+ _mysql_cursor_param = 'cursorclass'
+ except ImportError:
+ mysql_driver = None
+
+mysql_driver_fail_msg = 'The PyMySQL (Python 2.7 and Python 3.X) or MySQL-python (Python 2.X) module is required.'
+
+
+def mysql_connect(module, login_user=None, login_password=None, config_file='', ssl_cert=None, ssl_key=None, ssl_ca=None, db=None, cursor_class=None,
+ connect_timeout=30, autocommit=False):
+ config = {}
+
+ if ssl_ca is not None or ssl_key is not None or ssl_cert is not None:
+ config['ssl'] = {}
+
+ if module.params['login_unix_socket']:
+ config['unix_socket'] = module.params['login_unix_socket']
+ else:
+ config['host'] = module.params['login_host']
+ config['port'] = module.params['login_port']
+
+ if os.path.exists(config_file):
+ config['read_default_file'] = config_file
+
+ # If login_user or login_password are given, they should override the
+ # config file
+ if login_user is not None:
+ config['user'] = login_user
+ if login_password is not None:
+ config['passwd'] = login_password
+ if ssl_cert is not None:
+ config['ssl']['cert'] = ssl_cert
+ if ssl_key is not None:
+ config['ssl']['key'] = ssl_key
+ if ssl_ca is not None:
+ config['ssl']['ca'] = ssl_ca
+ if db is not None:
+ config['db'] = db
+ if connect_timeout is not None:
+ config['connect_timeout'] = connect_timeout
+
+ if _mysql_cursor_param == 'cursor':
+ # In case of PyMySQL driver:
+ db_connection = mysql_driver.connect(autocommit=autocommit, **config)
+ else:
+ # In case of MySQLdb driver
+ db_connection = mysql_driver.connect(**config)
+ if autocommit:
+ db_connection.autocommit(True)
+
+ if cursor_class == 'DictCursor':
+ return db_connection.cursor(**{_mysql_cursor_param: mysql_driver.cursors.DictCursor}), db_connection
+ else:
+ return db_connection.cursor(), db_connection
+
+
+def mysql_common_argument_spec():
+ return dict(
+ login_user=dict(type='str', default=None),
+ login_password=dict(type='str', no_log=True),
+ login_host=dict(type='str', default='localhost'),
+ login_port=dict(type='int', default=3306),
+ login_unix_socket=dict(type='str'),
+ config_file=dict(type='path', default='~/.my.cnf'),
+ connect_timeout=dict(type='int', default=30),
+ client_cert=dict(type='path', aliases=['ssl_cert']),
+ client_key=dict(type='path', aliases=['ssl_key']),
+ ca_cert=dict(type='path', aliases=['ssl_ca']),
+ )
diff --git a/test/support/integration/plugins/module_utils/net_tools/__init__.py b/test/support/integration/plugins/module_utils/net_tools/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/net_tools/__init__.py
diff --git a/test/support/integration/plugins/module_utils/network/__init__.py b/test/support/integration/plugins/module_utils/network/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/network/__init__.py
diff --git a/test/support/integration/plugins/module_utils/network/common/__init__.py b/test/support/integration/plugins/module_utils/network/common/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/network/common/__init__.py
diff --git a/test/support/integration/plugins/module_utils/network/common/utils.py b/test/support/integration/plugins/module_utils/network/common/utils.py
new file mode 100644
index 00000000..80317387
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/network/common/utils.py
@@ -0,0 +1,643 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# (c) 2016 Red Hat Inc.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+# Networking tools for network modules only
+
+import re
+import ast
+import operator
+import socket
+import json
+
+from itertools import chain
+
+from ansible.module_utils._text import to_text, to_bytes
+from ansible.module_utils.common._collections_compat import Mapping
+from ansible.module_utils.six import iteritems, string_types
+from ansible.module_utils import basic
+from ansible.module_utils.parsing.convert_bool import boolean
+
+# Backwards compatibility for 3rd party modules
+# TODO(pabelanger): With move to ansible.netcommon, we should clean this code
+# up and have modules import directly themself.
+from ansible.module_utils.common.network import ( # noqa: F401
+ to_bits, is_netmask, is_masklen, to_netmask, to_masklen, to_subnet, to_ipv6_network, VALID_MASKS
+)
+
+try:
+ from jinja2 import Environment, StrictUndefined
+ from jinja2.exceptions import UndefinedError
+ HAS_JINJA2 = True
+except ImportError:
+ HAS_JINJA2 = False
+
+
+OPERATORS = frozenset(['ge', 'gt', 'eq', 'neq', 'lt', 'le'])
+ALIASES = frozenset([('min', 'ge'), ('max', 'le'), ('exactly', 'eq'), ('neq', 'ne')])
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple, set)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+def to_lines(stdout):
+ for item in stdout:
+ if isinstance(item, string_types):
+ item = to_text(item).split('\n')
+ yield item
+
+
+def transform_commands(module):
+ transform = ComplexList(dict(
+ command=dict(key=True),
+ output=dict(),
+ prompt=dict(type='list'),
+ answer=dict(type='list'),
+ newline=dict(type='bool', default=True),
+ sendonly=dict(type='bool', default=False),
+ check_all=dict(type='bool', default=False),
+ ), module)
+
+ return transform(module.params['commands'])
+
+
+def sort_list(val):
+ if isinstance(val, list):
+ return sorted(val)
+ return val
+
+
+class Entity(object):
+ """Transforms a dict to with an argument spec
+
+ This class will take a dict and apply an Ansible argument spec to the
+ values. The resulting dict will contain all of the keys in the param
+ with appropriate values set.
+
+ Example::
+
+ argument_spec = dict(
+ command=dict(key=True),
+ display=dict(default='text', choices=['text', 'json']),
+ validate=dict(type='bool')
+ )
+ transform = Entity(module, argument_spec)
+ value = dict(command='foo')
+ result = transform(value)
+ print result
+ {'command': 'foo', 'display': 'text', 'validate': None}
+
+ Supported argument spec:
+ * key - specifies how to map a single value to a dict
+ * read_from - read and apply the argument_spec from the module
+ * required - a value is required
+ * type - type of value (uses AnsibleModule type checker)
+ * fallback - implements fallback function
+ * choices - set of valid options
+ * default - default value
+ """
+
+ def __init__(self, module, attrs=None, args=None, keys=None, from_argspec=False):
+ args = [] if args is None else args
+
+ self._attributes = attrs or {}
+ self._module = module
+
+ for arg in args:
+ self._attributes[arg] = dict()
+ if from_argspec:
+ self._attributes[arg]['read_from'] = arg
+ if keys and arg in keys:
+ self._attributes[arg]['key'] = True
+
+ self.attr_names = frozenset(self._attributes.keys())
+
+ _has_key = False
+
+ for name, attr in iteritems(self._attributes):
+ if attr.get('read_from'):
+ if attr['read_from'] not in self._module.argument_spec:
+ module.fail_json(msg='argument %s does not exist' % attr['read_from'])
+ spec = self._module.argument_spec.get(attr['read_from'])
+ for key, value in iteritems(spec):
+ if key not in attr:
+ attr[key] = value
+
+ if attr.get('key'):
+ if _has_key:
+ module.fail_json(msg='only one key value can be specified')
+ _has_key = True
+ attr['required'] = True
+
+ def serialize(self):
+ return self._attributes
+
+ def to_dict(self, value):
+ obj = {}
+ for name, attr in iteritems(self._attributes):
+ if attr.get('key'):
+ obj[name] = value
+ else:
+ obj[name] = attr.get('default')
+ return obj
+
+ def __call__(self, value, strict=True):
+ if not isinstance(value, dict):
+ value = self.to_dict(value)
+
+ if strict:
+ unknown = set(value).difference(self.attr_names)
+ if unknown:
+ self._module.fail_json(msg='invalid keys: %s' % ','.join(unknown))
+
+ for name, attr in iteritems(self._attributes):
+ if value.get(name) is None:
+ value[name] = attr.get('default')
+
+ if attr.get('fallback') and not value.get(name):
+ fallback = attr.get('fallback', (None,))
+ fallback_strategy = fallback[0]
+ fallback_args = []
+ fallback_kwargs = {}
+ if fallback_strategy is not None:
+ for item in fallback[1:]:
+ if isinstance(item, dict):
+ fallback_kwargs = item
+ else:
+ fallback_args = item
+ try:
+ value[name] = fallback_strategy(*fallback_args, **fallback_kwargs)
+ except basic.AnsibleFallbackNotFound:
+ continue
+
+ if attr.get('required') and value.get(name) is None:
+ self._module.fail_json(msg='missing required attribute %s' % name)
+
+ if 'choices' in attr:
+ if value[name] not in attr['choices']:
+ self._module.fail_json(msg='%s must be one of %s, got %s' % (name, ', '.join(attr['choices']), value[name]))
+
+ if value[name] is not None:
+ value_type = attr.get('type', 'str')
+ type_checker = self._module._CHECK_ARGUMENT_TYPES_DISPATCHER[value_type]
+ type_checker(value[name])
+ elif value.get(name):
+ value[name] = self._module.params[name]
+
+ return value
+
+
+class EntityCollection(Entity):
+ """Extends ```Entity``` to handle a list of dicts """
+
+ def __call__(self, iterable, strict=True):
+ if iterable is None:
+ iterable = [super(EntityCollection, self).__call__(self._module.params, strict)]
+
+ if not isinstance(iterable, (list, tuple)):
+ self._module.fail_json(msg='value must be an iterable')
+
+ return [(super(EntityCollection, self).__call__(i, strict)) for i in iterable]
+
+
+# these two are for backwards compatibility and can be removed once all of the
+# modules that use them are updated
+class ComplexDict(Entity):
+ def __init__(self, attrs, module, *args, **kwargs):
+ super(ComplexDict, self).__init__(module, attrs, *args, **kwargs)
+
+
+class ComplexList(EntityCollection):
+ def __init__(self, attrs, module, *args, **kwargs):
+ super(ComplexList, self).__init__(module, attrs, *args, **kwargs)
+
+
+def dict_diff(base, comparable):
+ """ Generate a dict object of differences
+
+ This function will compare two dict objects and return the difference
+ between them as a dict object. For scalar values, the key will reflect
+ the updated value. If the key does not exist in `comparable`, then then no
+ key will be returned. For lists, the value in comparable will wholly replace
+ the value in base for the key. For dicts, the returned value will only
+ return keys that are different.
+
+ :param base: dict object to base the diff on
+ :param comparable: dict object to compare against base
+
+ :returns: new dict object with differences
+ """
+ if not isinstance(base, dict):
+ raise AssertionError("`base` must be of type <dict>")
+ if not isinstance(comparable, dict):
+ if comparable is None:
+ comparable = dict()
+ else:
+ raise AssertionError("`comparable` must be of type <dict>")
+
+ updates = dict()
+
+ for key, value in iteritems(base):
+ if isinstance(value, dict):
+ item = comparable.get(key)
+ if item is not None:
+ sub_diff = dict_diff(value, comparable[key])
+ if sub_diff:
+ updates[key] = sub_diff
+ else:
+ comparable_value = comparable.get(key)
+ if comparable_value is not None:
+ if sort_list(base[key]) != sort_list(comparable_value):
+ updates[key] = comparable_value
+
+ for key in set(comparable.keys()).difference(base.keys()):
+ updates[key] = comparable.get(key)
+
+ return updates
+
+
+def dict_merge(base, other):
+ """ Return a new dict object that combines base and other
+
+ This will create a new dict object that is a combination of the key/value
+ pairs from base and other. When both keys exist, the value will be
+ selected from other. If the value is a list object, the two lists will
+ be combined and duplicate entries removed.
+
+ :param base: dict object to serve as base
+ :param other: dict object to combine with base
+
+ :returns: new combined dict object
+ """
+ if not isinstance(base, dict):
+ raise AssertionError("`base` must be of type <dict>")
+ if not isinstance(other, dict):
+ raise AssertionError("`other` must be of type <dict>")
+
+ combined = dict()
+
+ for key, value in iteritems(base):
+ if isinstance(value, dict):
+ if key in other:
+ item = other.get(key)
+ if item is not None:
+ if isinstance(other[key], Mapping):
+ combined[key] = dict_merge(value, other[key])
+ else:
+ combined[key] = other[key]
+ else:
+ combined[key] = item
+ else:
+ combined[key] = value
+ elif isinstance(value, list):
+ if key in other:
+ item = other.get(key)
+ if item is not None:
+ try:
+ combined[key] = list(set(chain(value, item)))
+ except TypeError:
+ value.extend([i for i in item if i not in value])
+ combined[key] = value
+ else:
+ combined[key] = item
+ else:
+ combined[key] = value
+ else:
+ if key in other:
+ other_value = other.get(key)
+ if other_value is not None:
+ if sort_list(base[key]) != sort_list(other_value):
+ combined[key] = other_value
+ else:
+ combined[key] = value
+ else:
+ combined[key] = other_value
+ else:
+ combined[key] = value
+
+ for key in set(other.keys()).difference(base.keys()):
+ combined[key] = other.get(key)
+
+ return combined
+
+
+def param_list_to_dict(param_list, unique_key="name", remove_key=True):
+ """Rotates a list of dictionaries to be a dictionary of dictionaries.
+
+ :param param_list: The aforementioned list of dictionaries
+ :param unique_key: The name of a key which is present and unique in all of param_list's dictionaries. The value
+ behind this key will be the key each dictionary can be found at in the new root dictionary
+ :param remove_key: If True, remove unique_key from the individual dictionaries before returning.
+ """
+ param_dict = {}
+ for params in param_list:
+ params = params.copy()
+ if remove_key:
+ name = params.pop(unique_key)
+ else:
+ name = params.get(unique_key)
+ param_dict[name] = params
+
+ return param_dict
+
+
+def conditional(expr, val, cast=None):
+ match = re.match(r'^(.+)\((.+)\)$', str(expr), re.I)
+ if match:
+ op, arg = match.groups()
+ else:
+ op = 'eq'
+ if ' ' in str(expr):
+ raise AssertionError('invalid expression: cannot contain spaces')
+ arg = expr
+
+ if cast is None and val is not None:
+ arg = type(val)(arg)
+ elif callable(cast):
+ arg = cast(arg)
+ val = cast(val)
+
+ op = next((oper for alias, oper in ALIASES if op == alias), op)
+
+ if not hasattr(operator, op) and op not in OPERATORS:
+ raise ValueError('unknown operator: %s' % op)
+
+ func = getattr(operator, op)
+ return func(val, arg)
+
+
+def ternary(value, true_val, false_val):
+ ''' value ? true_val : false_val '''
+ if value:
+ return true_val
+ else:
+ return false_val
+
+
+def remove_default_spec(spec):
+ for item in spec:
+ if 'default' in spec[item]:
+ del spec[item]['default']
+
+
+def validate_ip_address(address):
+ try:
+ socket.inet_aton(address)
+ except socket.error:
+ return False
+ return address.count('.') == 3
+
+
+def validate_ip_v6_address(address):
+ try:
+ socket.inet_pton(socket.AF_INET6, address)
+ except socket.error:
+ return False
+ return True
+
+
+def validate_prefix(prefix):
+ if prefix and not 0 <= int(prefix) <= 32:
+ return False
+ return True
+
+
+def load_provider(spec, args):
+ provider = args.get('provider') or {}
+ for key, value in iteritems(spec):
+ if key not in provider:
+ if 'fallback' in value:
+ provider[key] = _fallback(value['fallback'])
+ elif 'default' in value:
+ provider[key] = value['default']
+ else:
+ provider[key] = None
+ if 'authorize' in provider:
+ # Coerce authorize to provider if a string has somehow snuck in.
+ provider['authorize'] = boolean(provider['authorize'] or False)
+ args['provider'] = provider
+ return provider
+
+
+def _fallback(fallback):
+ strategy = fallback[0]
+ args = []
+ kwargs = {}
+
+ for item in fallback[1:]:
+ if isinstance(item, dict):
+ kwargs = item
+ else:
+ args = item
+ try:
+ return strategy(*args, **kwargs)
+ except basic.AnsibleFallbackNotFound:
+ pass
+
+
+def generate_dict(spec):
+ """
+ Generate dictionary which is in sync with argspec
+
+ :param spec: A dictionary that is the argspec of the module
+ :rtype: A dictionary
+ :returns: A dictionary in sync with argspec with default value
+ """
+ obj = {}
+ if not spec:
+ return obj
+
+ for key, val in iteritems(spec):
+ if 'default' in val:
+ dct = {key: val['default']}
+ elif 'type' in val and val['type'] == 'dict':
+ dct = {key: generate_dict(val['options'])}
+ else:
+ dct = {key: None}
+ obj.update(dct)
+ return obj
+
+
+def parse_conf_arg(cfg, arg):
+ """
+ Parse config based on argument
+
+ :param cfg: A text string which is a line of configuration.
+ :param arg: A text string which is to be matched.
+ :rtype: A text string
+ :returns: A text string if match is found
+ """
+ match = re.search(r'%s (.+)(\n|$)' % arg, cfg, re.M)
+ if match:
+ result = match.group(1).strip()
+ else:
+ result = None
+ return result
+
+
+def parse_conf_cmd_arg(cfg, cmd, res1, res2=None, delete_str='no'):
+ """
+ Parse config based on command
+
+ :param cfg: A text string which is a line of configuration.
+ :param cmd: A text string which is the command to be matched
+ :param res1: A text string to be returned if the command is present
+ :param res2: A text string to be returned if the negate command
+ is present
+ :param delete_str: A text string to identify the start of the
+ negate command
+ :rtype: A text string
+ :returns: A text string if match is found
+ """
+ match = re.search(r'\n\s+%s(\n|$)' % cmd, cfg)
+ if match:
+ return res1
+ if res2 is not None:
+ match = re.search(r'\n\s+%s %s(\n|$)' % (delete_str, cmd), cfg)
+ if match:
+ return res2
+ return None
+
+
+def get_xml_conf_arg(cfg, path, data='text'):
+ """
+ :param cfg: The top level configuration lxml Element tree object
+ :param path: The relative xpath w.r.t to top level element (cfg)
+ to be searched in the xml hierarchy
+ :param data: The type of data to be returned for the matched xml node.
+ Valid values are text, tag, attrib, with default as text.
+ :return: Returns the required type for the matched xml node or else None
+ """
+ match = cfg.xpath(path)
+ if len(match):
+ if data == 'tag':
+ result = getattr(match[0], 'tag')
+ elif data == 'attrib':
+ result = getattr(match[0], 'attrib')
+ else:
+ result = getattr(match[0], 'text')
+ else:
+ result = None
+ return result
+
+
+def remove_empties(cfg_dict):
+ """
+ Generate final config dictionary
+
+ :param cfg_dict: A dictionary parsed in the facts system
+ :rtype: A dictionary
+ :returns: A dictionary by eliminating keys that have null values
+ """
+ final_cfg = {}
+ if not cfg_dict:
+ return final_cfg
+
+ for key, val in iteritems(cfg_dict):
+ dct = None
+ if isinstance(val, dict):
+ child_val = remove_empties(val)
+ if child_val:
+ dct = {key: child_val}
+ elif (isinstance(val, list) and val
+ and all([isinstance(x, dict) for x in val])):
+ child_val = [remove_empties(x) for x in val]
+ if child_val:
+ dct = {key: child_val}
+ elif val not in [None, [], {}, (), '']:
+ dct = {key: val}
+ if dct:
+ final_cfg.update(dct)
+ return final_cfg
+
+
+def validate_config(spec, data):
+ """
+ Validate if the input data against the AnsibleModule spec format
+ :param spec: Ansible argument spec
+ :param data: Data to be validated
+ :return:
+ """
+ params = basic._ANSIBLE_ARGS
+ basic._ANSIBLE_ARGS = to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': data}))
+ validated_data = basic.AnsibleModule(spec).params
+ basic._ANSIBLE_ARGS = params
+ return validated_data
+
+
+def search_obj_in_list(name, lst, key='name'):
+ if not lst:
+ return None
+ else:
+ for item in lst:
+ if item.get(key) == name:
+ return item
+
+
+class Template:
+
+ def __init__(self):
+ if not HAS_JINJA2:
+ raise ImportError("jinja2 is required but does not appear to be installed. "
+ "It can be installed using `pip install jinja2`")
+
+ self.env = Environment(undefined=StrictUndefined)
+ self.env.filters.update({'ternary': ternary})
+
+ def __call__(self, value, variables=None, fail_on_undefined=True):
+ variables = variables or {}
+
+ if not self.contains_vars(value):
+ return value
+
+ try:
+ value = self.env.from_string(value).render(variables)
+ except UndefinedError:
+ if not fail_on_undefined:
+ return None
+ raise
+
+ if value:
+ try:
+ return ast.literal_eval(value)
+ except Exception:
+ return str(value)
+ else:
+ return None
+
+ def contains_vars(self, data):
+ if isinstance(data, string_types):
+ for marker in (self.env.block_start_string, self.env.variable_start_string, self.env.comment_start_string):
+ if marker in data:
+ return True
+ return False
diff --git a/test/support/integration/plugins/module_utils/postgres.py b/test/support/integration/plugins/module_utils/postgres.py
new file mode 100644
index 00000000..63811c30
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/postgres.py
@@ -0,0 +1,330 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Ted Timmons <ted@timmons.me>, 2017.
+# Most of this was originally added by other creators in the postgresql_user module.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+psycopg2 = None # This line needs for unit tests
+try:
+ import psycopg2
+ HAS_PSYCOPG2 = True
+except ImportError:
+ HAS_PSYCOPG2 = False
+
+from ansible.module_utils.basic import missing_required_lib
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import iteritems
+from distutils.version import LooseVersion
+
+
+def postgres_common_argument_spec():
+ """
+ Return a dictionary with connection options.
+
+ The options are commonly used by most of PostgreSQL modules.
+ """
+ return dict(
+ login_user=dict(default='postgres'),
+ login_password=dict(default='', no_log=True),
+ login_host=dict(default=''),
+ login_unix_socket=dict(default=''),
+ port=dict(type='int', default=5432, aliases=['login_port']),
+ ssl_mode=dict(default='prefer', choices=['allow', 'disable', 'prefer', 'require', 'verify-ca', 'verify-full']),
+ ca_cert=dict(aliases=['ssl_rootcert']),
+ )
+
+
+def ensure_required_libs(module):
+ """Check required libraries."""
+ if not HAS_PSYCOPG2:
+ module.fail_json(msg=missing_required_lib('psycopg2'))
+
+ if module.params.get('ca_cert') and LooseVersion(psycopg2.__version__) < LooseVersion('2.4.3'):
+ module.fail_json(msg='psycopg2 must be at least 2.4.3 in order to use the ca_cert parameter')
+
+
+def connect_to_db(module, conn_params, autocommit=False, fail_on_conn=True):
+ """Connect to a PostgreSQL database.
+
+ Return psycopg2 connection object.
+
+ Args:
+ module (AnsibleModule) -- object of ansible.module_utils.basic.AnsibleModule class
+ conn_params (dict) -- dictionary with connection parameters
+
+ Kwargs:
+ autocommit (bool) -- commit automatically (default False)
+ fail_on_conn (bool) -- fail if connection failed or just warn and return None (default True)
+ """
+ ensure_required_libs(module)
+
+ db_connection = None
+ try:
+ db_connection = psycopg2.connect(**conn_params)
+ if autocommit:
+ if LooseVersion(psycopg2.__version__) >= LooseVersion('2.4.2'):
+ db_connection.set_session(autocommit=True)
+ else:
+ db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
+
+ # Switch role, if specified:
+ if module.params.get('session_role'):
+ cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
+
+ try:
+ cursor.execute('SET ROLE "%s"' % module.params['session_role'])
+ except Exception as e:
+ module.fail_json(msg="Could not switch role: %s" % to_native(e))
+ finally:
+ cursor.close()
+
+ except TypeError as e:
+ if 'sslrootcert' in e.args[0]:
+ module.fail_json(msg='Postgresql server must be at least '
+ 'version 8.4 to support sslrootcert')
+
+ if fail_on_conn:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e))
+ else:
+ module.warn("PostgreSQL server is unavailable: %s" % to_native(e))
+ db_connection = None
+
+ except Exception as e:
+ if fail_on_conn:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e))
+ else:
+ module.warn("PostgreSQL server is unavailable: %s" % to_native(e))
+ db_connection = None
+
+ return db_connection
+
+
+def exec_sql(obj, query, query_params=None, ddl=False, add_to_executed=True, dont_exec=False):
+ """Execute SQL.
+
+ Auxiliary function for PostgreSQL user classes.
+
+ Returns a query result if possible or True/False if ddl=True arg was passed.
+ It necessary for statements that don't return any result (like DDL queries).
+
+ Args:
+ obj (obj) -- must be an object of a user class.
+ The object must have module (AnsibleModule class object) and
+ cursor (psycopg cursor object) attributes
+ query (str) -- SQL query to execute
+
+ Kwargs:
+ query_params (dict or tuple) -- Query parameters to prevent SQL injections,
+ could be a dict or tuple
+ ddl (bool) -- must return True or False instead of rows (typical for DDL queries)
+ (default False)
+ add_to_executed (bool) -- append the query to obj.executed_queries attribute
+ dont_exec (bool) -- used with add_to_executed=True to generate a query, add it
+ to obj.executed_queries list and return True (default False)
+ """
+
+ if dont_exec:
+ # This is usually needed to return queries in check_mode
+ # without execution
+ query = obj.cursor.mogrify(query, query_params)
+ if add_to_executed:
+ obj.executed_queries.append(query)
+
+ return True
+
+ try:
+ if query_params is not None:
+ obj.cursor.execute(query, query_params)
+ else:
+ obj.cursor.execute(query)
+
+ if add_to_executed:
+ if query_params is not None:
+ obj.executed_queries.append(obj.cursor.mogrify(query, query_params))
+ else:
+ obj.executed_queries.append(query)
+
+ if not ddl:
+ res = obj.cursor.fetchall()
+ return res
+ return True
+ except Exception as e:
+ obj.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
+ return False
+
+
+def get_conn_params(module, params_dict, warn_db_default=True):
+ """Get connection parameters from the passed dictionary.
+
+ Return a dictionary with parameters to connect to PostgreSQL server.
+
+ Args:
+ module (AnsibleModule) -- object of ansible.module_utils.basic.AnsibleModule class
+ params_dict (dict) -- dictionary with variables
+
+ Kwargs:
+ warn_db_default (bool) -- warn that the default DB is used (default True)
+ """
+ # To use defaults values, keyword arguments must be absent, so
+ # check which values are empty and don't include in the return dictionary
+ params_map = {
+ "login_host": "host",
+ "login_user": "user",
+ "login_password": "password",
+ "port": "port",
+ "ssl_mode": "sslmode",
+ "ca_cert": "sslrootcert"
+ }
+
+ # Might be different in the modules:
+ if params_dict.get('db'):
+ params_map['db'] = 'database'
+ elif params_dict.get('database'):
+ params_map['database'] = 'database'
+ elif params_dict.get('login_db'):
+ params_map['login_db'] = 'database'
+ else:
+ if warn_db_default:
+ module.warn('Database name has not been passed, '
+ 'used default database to connect to.')
+
+ kw = dict((params_map[k], v) for (k, v) in iteritems(params_dict)
+ if k in params_map and v != '' and v is not None)
+
+ # If a login_unix_socket is specified, incorporate it here.
+ is_localhost = "host" not in kw or kw["host"] is None or kw["host"] == "localhost"
+ if is_localhost and params_dict["login_unix_socket"] != "":
+ kw["host"] = params_dict["login_unix_socket"]
+
+ return kw
+
+
+class PgMembership(object):
+ def __init__(self, module, cursor, groups, target_roles, fail_on_role=True):
+ self.module = module
+ self.cursor = cursor
+ self.target_roles = [r.strip() for r in target_roles]
+ self.groups = [r.strip() for r in groups]
+ self.executed_queries = []
+ self.granted = {}
+ self.revoked = {}
+ self.fail_on_role = fail_on_role
+ self.non_existent_roles = []
+ self.changed = False
+ self.__check_roles_exist()
+
+ def grant(self):
+ for group in self.groups:
+ self.granted[group] = []
+
+ for role in self.target_roles:
+ # If role is in a group now, pass:
+ if self.__check_membership(group, role):
+ continue
+
+ query = 'GRANT "%s" TO "%s"' % (group, role)
+ self.changed = exec_sql(self, query, ddl=True)
+
+ if self.changed:
+ self.granted[group].append(role)
+
+ return self.changed
+
+ def revoke(self):
+ for group in self.groups:
+ self.revoked[group] = []
+
+ for role in self.target_roles:
+ # If role is not in a group now, pass:
+ if not self.__check_membership(group, role):
+ continue
+
+ query = 'REVOKE "%s" FROM "%s"' % (group, role)
+ self.changed = exec_sql(self, query, ddl=True)
+
+ if self.changed:
+ self.revoked[group].append(role)
+
+ return self.changed
+
+ def __check_membership(self, src_role, dst_role):
+ query = ("SELECT ARRAY(SELECT b.rolname FROM "
+ "pg_catalog.pg_auth_members m "
+ "JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) "
+ "WHERE m.member = r.oid) "
+ "FROM pg_catalog.pg_roles r "
+ "WHERE r.rolname = %(dst_role)s")
+
+ res = exec_sql(self, query, query_params={'dst_role': dst_role}, add_to_executed=False)
+ membership = []
+ if res:
+ membership = res[0][0]
+
+ if not membership:
+ return False
+
+ if src_role in membership:
+ return True
+
+ return False
+
+ def __check_roles_exist(self):
+ existent_groups = self.__roles_exist(self.groups)
+ existent_roles = self.__roles_exist(self.target_roles)
+
+ for group in self.groups:
+ if group not in existent_groups:
+ if self.fail_on_role:
+ self.module.fail_json(msg="Role %s does not exist" % group)
+ else:
+ self.module.warn("Role %s does not exist, pass" % group)
+ self.non_existent_roles.append(group)
+
+ for role in self.target_roles:
+ if role not in existent_roles:
+ if self.fail_on_role:
+ self.module.fail_json(msg="Role %s does not exist" % role)
+ else:
+ self.module.warn("Role %s does not exist, pass" % role)
+
+ if role not in self.groups:
+ self.non_existent_roles.append(role)
+
+ else:
+ if self.fail_on_role:
+ self.module.exit_json(msg="Role role '%s' is a member of role '%s'" % (role, role))
+ else:
+ self.module.warn("Role role '%s' is a member of role '%s', pass" % (role, role))
+
+ # Update role lists, excluding non existent roles:
+ self.groups = [g for g in self.groups if g not in self.non_existent_roles]
+
+ self.target_roles = [r for r in self.target_roles if r not in self.non_existent_roles]
+
+ def __roles_exist(self, roles):
+ tmp = ["'" + x + "'" for x in roles]
+ query = "SELECT rolname FROM pg_roles WHERE rolname IN (%s)" % ','.join(tmp)
+ return [x[0] for x in exec_sql(self, query, add_to_executed=False)]
diff --git a/test/support/integration/plugins/module_utils/rabbitmq.py b/test/support/integration/plugins/module_utils/rabbitmq.py
new file mode 100644
index 00000000..cf764006
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/rabbitmq.py
@@ -0,0 +1,220 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2016, Jorge Rodriguez <jorge.rodriguez@tiriel.eu>
+# Copyright: (c) 2018, John Imison <john+github@imison.net>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import missing_required_lib
+from ansible.module_utils.six.moves.urllib import parse as urllib_parse
+from mimetypes import MimeTypes
+
+import os
+import json
+import traceback
+
+PIKA_IMP_ERR = None
+try:
+ import pika
+ import pika.exceptions
+ from pika import spec
+ HAS_PIKA = True
+except ImportError:
+ PIKA_IMP_ERR = traceback.format_exc()
+ HAS_PIKA = False
+
+
+def rabbitmq_argument_spec():
+ return dict(
+ login_user=dict(type='str', default='guest'),
+ login_password=dict(type='str', default='guest', no_log=True),
+ login_host=dict(type='str', default='localhost'),
+ login_port=dict(type='str', default='15672'),
+ login_protocol=dict(type='str', default='http', choices=['http', 'https']),
+ ca_cert=dict(type='path', aliases=['cacert']),
+ client_cert=dict(type='path', aliases=['cert']),
+ client_key=dict(type='path', aliases=['key']),
+ vhost=dict(type='str', default='/'),
+ )
+
+
+# notification/rabbitmq_basic_publish.py
+class RabbitClient():
+ def __init__(self, module):
+ self.module = module
+ self.params = module.params
+ self.check_required_library()
+ self.check_host_params()
+ self.url = self.params['url']
+ self.proto = self.params['proto']
+ self.username = self.params['username']
+ self.password = self.params['password']
+ self.host = self.params['host']
+ self.port = self.params['port']
+ self.vhost = self.params['vhost']
+ self.queue = self.params['queue']
+ self.headers = self.params['headers']
+ self.cafile = self.params['cafile']
+ self.certfile = self.params['certfile']
+ self.keyfile = self.params['keyfile']
+
+ if self.host is not None:
+ self.build_url()
+
+ if self.cafile is not None:
+ self.append_ssl_certs()
+
+ self.connect_to_rabbitmq()
+
+ def check_required_library(self):
+ if not HAS_PIKA:
+ self.module.fail_json(msg=missing_required_lib("pika"), exception=PIKA_IMP_ERR)
+
+ def check_host_params(self):
+ # Fail if url is specified and other conflicting parameters have been specified
+ if self.params['url'] is not None and any(self.params[k] is not None for k in ['proto', 'host', 'port', 'password', 'username', 'vhost']):
+ self.module.fail_json(msg="url and proto, host, port, vhost, username or password cannot be specified at the same time.")
+
+ # Fail if url not specified and there is a missing parameter to build the url
+ if self.params['url'] is None and any(self.params[k] is None for k in ['proto', 'host', 'port', 'password', 'username', 'vhost']):
+ self.module.fail_json(msg="Connection parameters must be passed via url, or, proto, host, port, vhost, username or password.")
+
+ def append_ssl_certs(self):
+ ssl_options = {}
+ if self.cafile:
+ ssl_options['cafile'] = self.cafile
+ if self.certfile:
+ ssl_options['certfile'] = self.certfile
+ if self.keyfile:
+ ssl_options['keyfile'] = self.keyfile
+
+ self.url = self.url + '?ssl_options=' + urllib_parse.quote(json.dumps(ssl_options))
+
+ @staticmethod
+ def rabbitmq_argument_spec():
+ return dict(
+ url=dict(type='str'),
+ proto=dict(type='str', choices=['amqp', 'amqps']),
+ host=dict(type='str'),
+ port=dict(type='int'),
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ vhost=dict(type='str'),
+ queue=dict(type='str')
+ )
+
+ ''' Consider some file size limits here '''
+ def _read_file(self, path):
+ try:
+ with open(path, "rb") as file_handle:
+ return file_handle.read()
+ except IOError as e:
+ self.module.fail_json(msg="Unable to open file %s: %s" % (path, to_native(e)))
+
+ @staticmethod
+ def _check_file_mime_type(path):
+ mime = MimeTypes()
+ return mime.guess_type(path)
+
+ def build_url(self):
+ self.url = '{0}://{1}:{2}@{3}:{4}/{5}'.format(self.proto,
+ self.username,
+ self.password,
+ self.host,
+ self.port,
+ self.vhost)
+
+ def connect_to_rabbitmq(self):
+ """
+ Function to connect to rabbitmq using username and password
+ """
+ try:
+ parameters = pika.URLParameters(self.url)
+ except Exception as e:
+ self.module.fail_json(msg="URL malformed: %s" % to_native(e))
+
+ try:
+ self.connection = pika.BlockingConnection(parameters)
+ except Exception as e:
+ self.module.fail_json(msg="Connection issue: %s" % to_native(e))
+
+ try:
+ self.conn_channel = self.connection.channel()
+ except pika.exceptions.AMQPChannelError as e:
+ self.close_connection()
+ self.module.fail_json(msg="Channel issue: %s" % to_native(e))
+
+ def close_connection(self):
+ try:
+ self.connection.close()
+ except pika.exceptions.AMQPConnectionError:
+ pass
+
+ def basic_publish(self):
+ self.content_type = self.params.get("content_type")
+
+ if self.params.get("body") is not None:
+ args = dict(
+ body=self.params.get("body"),
+ exchange=self.params.get("exchange"),
+ routing_key=self.params.get("routing_key"),
+ properties=pika.BasicProperties(content_type=self.content_type, delivery_mode=1, headers=self.headers))
+
+ # If src (file) is defined and content_type is left as default, do a mime lookup on the file
+ if self.params.get("src") is not None and self.content_type == 'text/plain':
+ self.content_type = RabbitClient._check_file_mime_type(self.params.get("src"))[0]
+ self.headers.update(
+ filename=os.path.basename(self.params.get("src"))
+ )
+
+ args = dict(
+ body=self._read_file(self.params.get("src")),
+ exchange=self.params.get("exchange"),
+ routing_key=self.params.get("routing_key"),
+ properties=pika.BasicProperties(content_type=self.content_type,
+ delivery_mode=1,
+ headers=self.headers
+ ))
+ elif self.params.get("src") is not None:
+ args = dict(
+ body=self._read_file(self.params.get("src")),
+ exchange=self.params.get("exchange"),
+ routing_key=self.params.get("routing_key"),
+ properties=pika.BasicProperties(content_type=self.content_type,
+ delivery_mode=1,
+ headers=self.headers
+ ))
+
+ try:
+ # If queue is not defined, RabbitMQ will return the queue name of the automatically generated queue.
+ if self.queue is None:
+ result = self.conn_channel.queue_declare(durable=self.params.get("durable"),
+ exclusive=self.params.get("exclusive"),
+ auto_delete=self.params.get("auto_delete"))
+ self.conn_channel.confirm_delivery()
+ self.queue = result.method.queue
+ else:
+ self.conn_channel.queue_declare(queue=self.queue,
+ durable=self.params.get("durable"),
+ exclusive=self.params.get("exclusive"),
+ auto_delete=self.params.get("auto_delete"))
+ self.conn_channel.confirm_delivery()
+ except Exception as e:
+ self.module.fail_json(msg="Queue declare issue: %s" % to_native(e))
+
+ # https://github.com/ansible/ansible/blob/devel/lib/ansible/module_utils/cloudstack.py#L150
+ if args['routing_key'] is None:
+ args['routing_key'] = self.queue
+
+ if args['exchange'] is None:
+ args['exchange'] = ''
+
+ try:
+ self.conn_channel.basic_publish(**args)
+ return True
+ except pika.exceptions.UnroutableError:
+ return False
diff --git a/test/support/integration/plugins/modules/_azure_rm_mariadbconfiguration_facts.py b/test/support/integration/plugins/modules/_azure_rm_mariadbconfiguration_facts.py
new file mode 120000
index 00000000..f9993bfb
--- /dev/null
+++ b/test/support/integration/plugins/modules/_azure_rm_mariadbconfiguration_facts.py
@@ -0,0 +1 @@
+azure_rm_mariadbconfiguration_info.py \ No newline at end of file
diff --git a/test/support/integration/plugins/modules/_azure_rm_mariadbdatabase_facts.py b/test/support/integration/plugins/modules/_azure_rm_mariadbdatabase_facts.py
new file mode 120000
index 00000000..b8293e64
--- /dev/null
+++ b/test/support/integration/plugins/modules/_azure_rm_mariadbdatabase_facts.py
@@ -0,0 +1 @@
+azure_rm_mariadbdatabase_info.py \ No newline at end of file
diff --git a/test/support/integration/plugins/modules/_azure_rm_mariadbfirewallrule_facts.py b/test/support/integration/plugins/modules/_azure_rm_mariadbfirewallrule_facts.py
new file mode 120000
index 00000000..4311a0c1
--- /dev/null
+++ b/test/support/integration/plugins/modules/_azure_rm_mariadbfirewallrule_facts.py
@@ -0,0 +1 @@
+azure_rm_mariadbfirewallrule_info.py \ No newline at end of file
diff --git a/test/support/integration/plugins/modules/_azure_rm_mariadbserver_facts.py b/test/support/integration/plugins/modules/_azure_rm_mariadbserver_facts.py
new file mode 120000
index 00000000..5f76e0e9
--- /dev/null
+++ b/test/support/integration/plugins/modules/_azure_rm_mariadbserver_facts.py
@@ -0,0 +1 @@
+azure_rm_mariadbserver_info.py \ No newline at end of file
diff --git a/test/support/integration/plugins/modules/_azure_rm_resource_facts.py b/test/support/integration/plugins/modules/_azure_rm_resource_facts.py
new file mode 120000
index 00000000..710fda10
--- /dev/null
+++ b/test/support/integration/plugins/modules/_azure_rm_resource_facts.py
@@ -0,0 +1 @@
+azure_rm_resource_info.py \ No newline at end of file
diff --git a/test/support/integration/plugins/modules/_azure_rm_webapp_facts.py b/test/support/integration/plugins/modules/_azure_rm_webapp_facts.py
new file mode 120000
index 00000000..ead87c85
--- /dev/null
+++ b/test/support/integration/plugins/modules/_azure_rm_webapp_facts.py
@@ -0,0 +1 @@
+azure_rm_webapp_info.py \ No newline at end of file
diff --git a/test/support/integration/plugins/modules/aws_az_info.py b/test/support/integration/plugins/modules/aws_az_info.py
new file mode 100644
index 00000000..c1efed6f
--- /dev/null
+++ b/test/support/integration/plugins/modules/aws_az_info.py
@@ -0,0 +1,111 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'supported_by': 'community',
+ 'status': ['preview']
+}
+
+DOCUMENTATION = '''
+module: aws_az_info
+short_description: Gather information about availability zones in AWS.
+description:
+ - Gather information about availability zones in AWS.
+ - This module was called C(aws_az_facts) before Ansible 2.9. The usage did not change.
+version_added: '2.5'
+author: 'Henrique Rodrigues (@Sodki)'
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
+ U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html) for
+ possible filters. Filter names and values are case sensitive. You can also use underscores
+ instead of dashes (-) in the filter keys, which will take precedence in case of conflict.
+ required: false
+ default: {}
+ type: dict
+extends_documentation_fragment:
+ - aws
+ - ec2
+requirements: [botocore, boto3]
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all availability zones
+- aws_az_info:
+
+# Gather information about a single availability zone
+- aws_az_info:
+ filters:
+ zone-name: eu-west-1a
+'''
+
+RETURN = '''
+availability_zones:
+ returned: on success
+ description: >
+ Availability zones that match the provided filters. Each element consists of a dict with all the information
+ related to that available zone.
+ type: list
+ sample: "[
+ {
+ 'messages': [],
+ 'region_name': 'us-west-1',
+ 'state': 'available',
+ 'zone_name': 'us-west-1b'
+ },
+ {
+ 'messages': [],
+ 'region_name': 'us-west-1',
+ 'state': 'available',
+ 'zone_name': 'us-west-1c'
+ }
+ ]"
+'''
+
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(default={}, type='dict')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+ if module._name == 'aws_az_facts':
+ module.deprecate("The 'aws_az_facts' module has been renamed to 'aws_az_info'",
+ version='2.14', collection_name='ansible.builtin')
+
+ connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ # Replace filter key underscores with dashes, for compatibility
+ sanitized_filters = dict((k.replace('_', '-'), v) for k, v in module.params.get('filters').items())
+
+ try:
+ availability_zones = connection.describe_availability_zones(
+ Filters=ansible_dict_to_boto3_filter_list(sanitized_filters)
+ )
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to describe availability zones.")
+
+ # Turn the boto3 result into ansible_friendly_snaked_names
+ snaked_availability_zones = [camel_dict_to_snake_dict(az) for az in availability_zones['AvailabilityZones']]
+
+ module.exit_json(availability_zones=snaked_availability_zones)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/aws_s3.py b/test/support/integration/plugins/modules/aws_s3.py
new file mode 100644
index 00000000..54874f05
--- /dev/null
+++ b/test/support/integration/plugins/modules/aws_s3.py
@@ -0,0 +1,925 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+
+DOCUMENTATION = '''
+---
+module: aws_s3
+short_description: manage objects in S3.
+description:
+ - This module allows the user to manage S3 buckets and the objects within them. Includes support for creating and
+ deleting both objects and buckets, retrieving objects as files or strings and generating download links.
+ This module has a dependency on boto3 and botocore.
+notes:
+ - In 2.4, this module has been renamed from C(s3) into M(aws_s3).
+version_added: "1.1"
+options:
+ bucket:
+ description:
+ - Bucket name.
+ required: true
+ type: str
+ dest:
+ description:
+ - The destination file path when downloading an object/key with a GET operation.
+ version_added: "1.3"
+ type: path
+ encrypt:
+ description:
+ - When set for PUT mode, asks for server-side encryption.
+ default: true
+ version_added: "2.0"
+ type: bool
+ encryption_mode:
+ description:
+ - What encryption mode to use if I(encrypt=true).
+ default: AES256
+ choices:
+ - AES256
+ - aws:kms
+ version_added: "2.7"
+ type: str
+ expiry:
+ description:
+ - Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a I(mode=put) or I(mode=geturl) operation.
+ default: 600
+ aliases: ['expiration']
+ type: int
+ headers:
+ description:
+ - Custom headers for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
+ version_added: "2.0"
+ type: dict
+ marker:
+ description:
+ - Specifies the key to start with when using list mode. Object keys are returned in alphabetical order, starting with key after the marker in order.
+ version_added: "2.0"
+ type: str
+ max_keys:
+ description:
+ - Max number of results to return in list mode, set this if you want to retrieve fewer than the default 1000 keys.
+ default: 1000
+ version_added: "2.0"
+ type: int
+ metadata:
+ description:
+ - Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
+ version_added: "1.6"
+ type: dict
+ mode:
+ description:
+ - Switches the module behaviour between put (upload), get (download), geturl (return download url, Ansible 1.3+),
+ getstr (download object as string (1.3+)), list (list keys, Ansible 2.0+), create (bucket), delete (bucket),
+ and delobj (delete object, Ansible 2.0+).
+ required: true
+ choices: ['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list']
+ type: str
+ object:
+ description:
+ - Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples.
+ type: str
+ permission:
+ description:
+ - This option lets the user set the canned permissions on the object/bucket that are created.
+ The permissions that can be set are C(private), C(public-read), C(public-read-write), C(authenticated-read) for a bucket or
+ C(private), C(public-read), C(public-read-write), C(aws-exec-read), C(authenticated-read), C(bucket-owner-read),
+ C(bucket-owner-full-control) for an object. Multiple permissions can be specified as a list.
+ default: ['private']
+ version_added: "2.0"
+ type: list
+ elements: str
+ prefix:
+ description:
+ - Limits the response to keys that begin with the specified prefix for list mode.
+ default: ""
+ version_added: "2.0"
+ type: str
+ version:
+ description:
+ - Version ID of the object inside the bucket. Can be used to get a specific version of a file if versioning is enabled in the target bucket.
+ version_added: "2.0"
+ type: str
+ overwrite:
+ description:
+ - Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations.
+ Boolean or one of [always, never, different], true is equal to 'always' and false is equal to 'never', new in 2.0.
+ When this is set to 'different', the md5 sum of the local file is compared with the 'ETag' of the object/key in S3.
+ The ETag may or may not be an MD5 digest of the object data. See the ETag response header here
+ U(https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html)
+ default: 'always'
+ aliases: ['force']
+ version_added: "1.2"
+ type: str
+ retries:
+ description:
+ - On recoverable failure, how many times to retry before actually failing.
+ default: 0
+ version_added: "2.0"
+ type: int
+ aliases: ['retry']
+ s3_url:
+ description:
+ - S3 URL endpoint for usage with Ceph, Eucalyptus and fakes3 etc. Otherwise assumes AWS.
+ aliases: [ S3_URL ]
+ type: str
+ dualstack:
+ description:
+ - Enables Amazon S3 Dual-Stack Endpoints, allowing S3 communications using both IPv4 and IPv6.
+ - Requires at least botocore version 1.4.45.
+ type: bool
+ default: false
+ version_added: "2.7"
+ rgw:
+ description:
+ - Enable Ceph RGW S3 support. This option requires an explicit url via I(s3_url).
+ default: false
+ version_added: "2.2"
+ type: bool
+ src:
+ description:
+ - The source file path when performing a PUT operation.
+ version_added: "1.3"
+ type: str
+ ignore_nonexistent_bucket:
+ description:
+ - "Overrides initial bucket lookups in case bucket or iam policies are restrictive. Example: a user may have the
+ GetObject permission but no other permissions. In this case using the option mode: get will fail without specifying
+ I(ignore_nonexistent_bucket=true)."
+ version_added: "2.3"
+ type: bool
+ encryption_kms_key_id:
+ description:
+ - KMS key id to use when encrypting objects using I(encrypting=aws:kms). Ignored if I(encryption) is not C(aws:kms)
+ version_added: "2.7"
+ type: str
+requirements: [ "boto3", "botocore" ]
+author:
+ - "Lester Wade (@lwade)"
+ - "Sloane Hertel (@s-hertel)"
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+- name: Simple PUT operation
+ aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ src: /usr/local/myfile.txt
+ mode: put
+
+- name: Simple PUT operation in Ceph RGW S3
+ aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ src: /usr/local/myfile.txt
+ mode: put
+ rgw: true
+ s3_url: "http://localhost:8000"
+
+- name: Simple GET operation
+ aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ dest: /usr/local/myfile.txt
+ mode: get
+
+- name: Get a specific version of an object.
+ aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ version: 48c9ee5131af7a716edc22df9772aa6f
+ dest: /usr/local/myfile.txt
+ mode: get
+
+- name: PUT/upload with metadata
+ aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ src: /usr/local/myfile.txt
+ mode: put
+ metadata: 'Content-Encoding=gzip,Cache-Control=no-cache'
+
+- name: PUT/upload with custom headers
+ aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ src: /usr/local/myfile.txt
+ mode: put
+ headers: 'x-amz-grant-full-control=emailAddress=owner@example.com'
+
+- name: List keys simple
+ aws_s3:
+ bucket: mybucket
+ mode: list
+
+- name: List keys all options
+ aws_s3:
+ bucket: mybucket
+ mode: list
+ prefix: /my/desired/
+ marker: /my/desired/0023.txt
+ max_keys: 472
+
+- name: Create an empty bucket
+ aws_s3:
+ bucket: mybucket
+ mode: create
+ permission: public-read
+
+- name: Create a bucket with key as directory, in the EU region
+ aws_s3:
+ bucket: mybucket
+ object: /my/directory/path
+ mode: create
+ region: eu-west-1
+
+- name: Delete a bucket and all contents
+ aws_s3:
+ bucket: mybucket
+ mode: delete
+
+- name: GET an object but don't download if the file checksums match. New in 2.0
+ aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ dest: /usr/local/myfile.txt
+ mode: get
+ overwrite: different
+
+- name: Delete an object from a bucket
+ aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ mode: delobj
+'''
+
+RETURN = '''
+msg:
+ description: Message indicating the status of the operation.
+ returned: always
+ type: str
+ sample: PUT operation complete
+url:
+ description: URL of the object.
+ returned: (for put and geturl operations)
+ type: str
+ sample: https://my-bucket.s3.amazonaws.com/my-key.txt?AWSAccessKeyId=<access-key>&Expires=1506888865&Signature=<signature>
+expiry:
+ description: Number of seconds the presigned url is valid for.
+ returned: (for geturl operation)
+ type: int
+ sample: 600
+contents:
+ description: Contents of the object as string.
+ returned: (for getstr operation)
+ type: str
+ sample: "Hello, world!"
+s3_keys:
+ description: List of object keys.
+ returned: (for list operation)
+ type: list
+ elements: str
+ sample:
+ - prefix1/
+ - prefix1/key1
+ - prefix1/key2
+'''
+
+import mimetypes
+import os
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ssl import SSLError
+from ansible.module_utils.basic import to_text, to_native
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils.aws.s3 import calculate_etag, HAS_MD5
+from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn
+
+try:
+ import botocore
+except ImportError:
+ pass # will be detected by imported AnsibleAWSModule
+
+IGNORE_S3_DROP_IN_EXCEPTIONS = ['XNotImplemented', 'NotImplemented']
+
+
+class Sigv4Required(Exception):
+ pass
+
+
+def key_check(module, s3, bucket, obj, version=None, validate=True):
+ exists = True
+ try:
+ if version:
+ s3.head_object(Bucket=bucket, Key=obj, VersionId=version)
+ else:
+ s3.head_object(Bucket=bucket, Key=obj)
+ except botocore.exceptions.ClientError as e:
+ # if a client error is thrown, check if it's a 404 error
+ # if it's a 404 error, then the object does not exist
+ error_code = int(e.response['Error']['Code'])
+ if error_code == 404:
+ exists = False
+ elif error_code == 403 and validate is False:
+ pass
+ else:
+ module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj)
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj)
+ return exists
+
+
+def etag_compare(module, local_file, s3, bucket, obj, version=None):
+ s3_etag = get_etag(s3, bucket, obj, version=version)
+ local_etag = calculate_etag(module, local_file, s3_etag, s3, bucket, obj, version)
+
+ return s3_etag == local_etag
+
+
+def get_etag(s3, bucket, obj, version=None):
+ if version:
+ key_check = s3.head_object(Bucket=bucket, Key=obj, VersionId=version)
+ else:
+ key_check = s3.head_object(Bucket=bucket, Key=obj)
+ if not key_check:
+ return None
+ return key_check['ETag']
+
+
+def bucket_check(module, s3, bucket, validate=True):
+ exists = True
+ try:
+ s3.head_bucket(Bucket=bucket)
+ except botocore.exceptions.ClientError as e:
+ # If a client error is thrown, then check that it was a 404 error.
+ # If it was a 404 error, then the bucket does not exist.
+ error_code = int(e.response['Error']['Code'])
+ if error_code == 404:
+ exists = False
+ elif error_code == 403 and validate is False:
+ pass
+ else:
+ module.fail_json_aws(e, msg="Failed while looking up bucket (during bucket_check) %s." % bucket)
+ except botocore.exceptions.EndpointConnectionError as e:
+ module.fail_json_aws(e, msg="Invalid endpoint provided")
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="Failed while looking up bucket (during bucket_check) %s." % bucket)
+ return exists
+
+
+def create_bucket(module, s3, bucket, location=None):
+ if module.check_mode:
+ module.exit_json(msg="CREATE operation skipped - running in check mode", changed=True)
+ configuration = {}
+ if location not in ('us-east-1', None):
+ configuration['LocationConstraint'] = location
+ try:
+ if len(configuration) > 0:
+ s3.create_bucket(Bucket=bucket, CreateBucketConfiguration=configuration)
+ else:
+ s3.create_bucket(Bucket=bucket)
+ if module.params.get('permission'):
+ # Wait for the bucket to exist before setting ACLs
+ s3.get_waiter('bucket_exists').wait(Bucket=bucket)
+ for acl in module.params.get('permission'):
+ s3.put_bucket_acl(ACL=acl, Bucket=bucket)
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] in IGNORE_S3_DROP_IN_EXCEPTIONS:
+ module.warn("PutBucketAcl is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning")
+ else:
+ module.fail_json_aws(e, msg="Failed while creating bucket or setting acl (check that you have CreateBucket and PutBucketAcl permission).")
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="Failed while creating bucket or setting acl (check that you have CreateBucket and PutBucketAcl permission).")
+
+ if bucket:
+ return True
+
+
+def paginated_list(s3, **pagination_params):
+ pg = s3.get_paginator('list_objects_v2')
+ for page in pg.paginate(**pagination_params):
+ yield [data['Key'] for data in page.get('Contents', [])]
+
+
+def paginated_versioned_list_with_fallback(s3, **pagination_params):
+ try:
+ versioned_pg = s3.get_paginator('list_object_versions')
+ for page in versioned_pg.paginate(**pagination_params):
+ delete_markers = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('DeleteMarkers', [])]
+ current_objects = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('Versions', [])]
+ yield delete_markers + current_objects
+ except botocore.exceptions.ClientError as e:
+ if to_text(e.response['Error']['Code']) in IGNORE_S3_DROP_IN_EXCEPTIONS + ['AccessDenied']:
+ for page in paginated_list(s3, **pagination_params):
+ yield [{'Key': data['Key']} for data in page]
+
+
+def list_keys(module, s3, bucket, prefix, marker, max_keys):
+ pagination_params = {'Bucket': bucket}
+ for param_name, param_value in (('Prefix', prefix), ('StartAfter', marker), ('MaxKeys', max_keys)):
+ pagination_params[param_name] = param_value
+ try:
+ keys = sum(paginated_list(s3, **pagination_params), [])
+ module.exit_json(msg="LIST operation complete", s3_keys=keys)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed while listing the keys in the bucket {0}".format(bucket))
+
+
+def delete_bucket(module, s3, bucket):
+ if module.check_mode:
+ module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True)
+ try:
+ exists = bucket_check(module, s3, bucket)
+ if exists is False:
+ return False
+ # if there are contents then we need to delete them before we can delete the bucket
+ for keys in paginated_versioned_list_with_fallback(s3, Bucket=bucket):
+ if keys:
+ s3.delete_objects(Bucket=bucket, Delete={'Objects': keys})
+ s3.delete_bucket(Bucket=bucket)
+ return True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed while deleting bucket %s." % bucket)
+
+
+def delete_key(module, s3, bucket, obj):
+ if module.check_mode:
+ module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True)
+ try:
+ s3.delete_object(Bucket=bucket, Key=obj)
+ module.exit_json(msg="Object deleted from bucket %s." % (bucket), changed=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed while trying to delete %s." % obj)
+
+
+def create_dirkey(module, s3, bucket, obj, encrypt):
+ if module.check_mode:
+ module.exit_json(msg="PUT operation skipped - running in check mode", changed=True)
+ try:
+ params = {'Bucket': bucket, 'Key': obj, 'Body': b''}
+ if encrypt:
+ params['ServerSideEncryption'] = module.params['encryption_mode']
+ if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms':
+ params['SSEKMSKeyId'] = module.params['encryption_kms_key_id']
+
+ s3.put_object(**params)
+ for acl in module.params.get('permission'):
+ s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj)
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] in IGNORE_S3_DROP_IN_EXCEPTIONS:
+ module.warn("PutObjectAcl is not implemented by your storage provider. Set the permissions parameters to the empty list to avoid this warning")
+ else:
+ module.fail_json_aws(e, msg="Failed while creating object %s." % obj)
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="Failed while creating object %s." % obj)
+ module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket), changed=True)
+
+
+def path_check(path):
+ if os.path.exists(path):
+ return True
+ else:
+ return False
+
+
+def option_in_extra_args(option):
+ temp_option = option.replace('-', '').lower()
+
+ allowed_extra_args = {'acl': 'ACL', 'cachecontrol': 'CacheControl', 'contentdisposition': 'ContentDisposition',
+ 'contentencoding': 'ContentEncoding', 'contentlanguage': 'ContentLanguage',
+ 'contenttype': 'ContentType', 'expires': 'Expires', 'grantfullcontrol': 'GrantFullControl',
+ 'grantread': 'GrantRead', 'grantreadacp': 'GrantReadACP', 'grantwriteacp': 'GrantWriteACP',
+ 'metadata': 'Metadata', 'requestpayer': 'RequestPayer', 'serversideencryption': 'ServerSideEncryption',
+ 'storageclass': 'StorageClass', 'ssecustomeralgorithm': 'SSECustomerAlgorithm', 'ssecustomerkey': 'SSECustomerKey',
+ 'ssecustomerkeymd5': 'SSECustomerKeyMD5', 'ssekmskeyid': 'SSEKMSKeyId', 'websiteredirectlocation': 'WebsiteRedirectLocation'}
+
+ if temp_option in allowed_extra_args:
+ return allowed_extra_args[temp_option]
+
+
+def upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers):
+ if module.check_mode:
+ module.exit_json(msg="PUT operation skipped - running in check mode", changed=True)
+ try:
+ extra = {}
+ if encrypt:
+ extra['ServerSideEncryption'] = module.params['encryption_mode']
+ if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms':
+ extra['SSEKMSKeyId'] = module.params['encryption_kms_key_id']
+ if metadata:
+ extra['Metadata'] = {}
+
+ # determine object metadata and extra arguments
+ for option in metadata:
+ extra_args_option = option_in_extra_args(option)
+ if extra_args_option is not None:
+ extra[extra_args_option] = metadata[option]
+ else:
+ extra['Metadata'][option] = metadata[option]
+
+ if 'ContentType' not in extra:
+ content_type = mimetypes.guess_type(src)[0]
+ if content_type is None:
+ # s3 default content type
+ content_type = 'binary/octet-stream'
+ extra['ContentType'] = content_type
+
+ s3.upload_file(Filename=src, Bucket=bucket, Key=obj, ExtraArgs=extra)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to complete PUT operation.")
+ try:
+ for acl in module.params.get('permission'):
+ s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj)
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] in IGNORE_S3_DROP_IN_EXCEPTIONS:
+ module.warn("PutObjectAcl is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning")
+ else:
+ module.fail_json_aws(e, msg="Unable to set object ACL")
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="Unable to set object ACL")
+ try:
+ url = s3.generate_presigned_url(ClientMethod='put_object',
+ Params={'Bucket': bucket, 'Key': obj},
+ ExpiresIn=expiry)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to generate presigned URL")
+ module.exit_json(msg="PUT operation complete", url=url, changed=True)
+
+
+def download_s3file(module, s3, bucket, obj, dest, retries, version=None):
+ if module.check_mode:
+ module.exit_json(msg="GET operation skipped - running in check mode", changed=True)
+ # retries is the number of loops; range/xrange needs to be one
+ # more to get that count of loops.
+ try:
+ if version:
+ key = s3.get_object(Bucket=bucket, Key=obj, VersionId=version)
+ else:
+ key = s3.get_object(Bucket=bucket, Key=obj)
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'InvalidArgument' and 'require AWS Signature Version 4' in to_text(e):
+ raise Sigv4Required()
+ elif e.response['Error']['Code'] not in ("403", "404"):
+ # AccessDenied errors may be triggered if 1) file does not exist or 2) file exists but
+ # user does not have the s3:GetObject permission. 404 errors are handled by download_file().
+ module.fail_json_aws(e, msg="Could not find the key %s." % obj)
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="Could not find the key %s." % obj)
+
+ optional_kwargs = {'ExtraArgs': {'VersionId': version}} if version else {}
+ for x in range(0, retries + 1):
+ try:
+ s3.download_file(bucket, obj, dest, **optional_kwargs)
+ module.exit_json(msg="GET operation complete", changed=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ # actually fail on last pass through the loop.
+ if x >= retries:
+ module.fail_json_aws(e, msg="Failed while downloading %s." % obj)
+ # otherwise, try again, this may be a transient timeout.
+ except SSLError as e: # will ClientError catch SSLError?
+ # actually fail on last pass through the loop.
+ if x >= retries:
+ module.fail_json_aws(e, msg="s3 download failed")
+ # otherwise, try again, this may be a transient timeout.
+
+
+def download_s3str(module, s3, bucket, obj, version=None, validate=True):
+ if module.check_mode:
+ module.exit_json(msg="GET operation skipped - running in check mode", changed=True)
+ try:
+ if version:
+ contents = to_native(s3.get_object(Bucket=bucket, Key=obj, VersionId=version)["Body"].read())
+ else:
+ contents = to_native(s3.get_object(Bucket=bucket, Key=obj)["Body"].read())
+ module.exit_json(msg="GET operation complete", contents=contents, changed=True)
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'InvalidArgument' and 'require AWS Signature Version 4' in to_text(e):
+ raise Sigv4Required()
+ else:
+ module.fail_json_aws(e, msg="Failed while getting contents of object %s as a string." % obj)
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="Failed while getting contents of object %s as a string." % obj)
+
+
+def get_download_url(module, s3, bucket, obj, expiry, changed=True):
+ try:
+ url = s3.generate_presigned_url(ClientMethod='get_object',
+ Params={'Bucket': bucket, 'Key': obj},
+ ExpiresIn=expiry)
+ module.exit_json(msg="Download url:", url=url, expiry=expiry, changed=changed)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed while getting download url.")
+
+
+def is_fakes3(s3_url):
+ """ Return True if s3_url has scheme fakes3:// """
+ if s3_url is not None:
+ return urlparse(s3_url).scheme in ('fakes3', 'fakes3s')
+ else:
+ return False
+
+
+def get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=False):
+ if s3_url and rgw: # TODO - test this
+ rgw = urlparse(s3_url)
+ params = dict(module=module, conn_type='client', resource='s3', use_ssl=rgw.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs)
+ elif is_fakes3(s3_url):
+ fakes3 = urlparse(s3_url)
+ port = fakes3.port
+ if fakes3.scheme == 'fakes3s':
+ protocol = "https"
+ if port is None:
+ port = 443
+ else:
+ protocol = "http"
+ if port is None:
+ port = 80
+ params = dict(module=module, conn_type='client', resource='s3', region=location,
+ endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)),
+ use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs)
+ else:
+ params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs)
+ if module.params['mode'] == 'put' and module.params['encryption_mode'] == 'aws:kms':
+ params['config'] = botocore.client.Config(signature_version='s3v4')
+ elif module.params['mode'] in ('get', 'getstr') and sig_4:
+ params['config'] = botocore.client.Config(signature_version='s3v4')
+ if module.params['dualstack']:
+ dualconf = botocore.client.Config(s3={'use_dualstack_endpoint': True})
+ if 'config' in params:
+ params['config'] = params['config'].merge(dualconf)
+ else:
+ params['config'] = dualconf
+ return boto3_conn(**params)
+
+
+def main():
+ argument_spec = dict(
+ bucket=dict(required=True),
+ dest=dict(default=None, type='path'),
+ encrypt=dict(default=True, type='bool'),
+ encryption_mode=dict(choices=['AES256', 'aws:kms'], default='AES256'),
+ expiry=dict(default=600, type='int', aliases=['expiration']),
+ headers=dict(type='dict'),
+ marker=dict(default=""),
+ max_keys=dict(default=1000, type='int'),
+ metadata=dict(type='dict'),
+ mode=dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True),
+ object=dict(),
+ permission=dict(type='list', default=['private']),
+ version=dict(default=None),
+ overwrite=dict(aliases=['force'], default='always'),
+ prefix=dict(default=""),
+ retries=dict(aliases=['retry'], type='int', default=0),
+ s3_url=dict(aliases=['S3_URL']),
+ dualstack=dict(default='no', type='bool'),
+ rgw=dict(default='no', type='bool'),
+ src=dict(),
+ ignore_nonexistent_bucket=dict(default=False, type='bool'),
+ encryption_kms_key_id=dict()
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[['mode', 'put', ['src', 'object']],
+ ['mode', 'get', ['dest', 'object']],
+ ['mode', 'getstr', ['object']],
+ ['mode', 'geturl', ['object']]],
+ )
+
+ bucket = module.params.get('bucket')
+ encrypt = module.params.get('encrypt')
+ expiry = module.params.get('expiry')
+ dest = module.params.get('dest', '')
+ headers = module.params.get('headers')
+ marker = module.params.get('marker')
+ max_keys = module.params.get('max_keys')
+ metadata = module.params.get('metadata')
+ mode = module.params.get('mode')
+ obj = module.params.get('object')
+ version = module.params.get('version')
+ overwrite = module.params.get('overwrite')
+ prefix = module.params.get('prefix')
+ retries = module.params.get('retries')
+ s3_url = module.params.get('s3_url')
+ dualstack = module.params.get('dualstack')
+ rgw = module.params.get('rgw')
+ src = module.params.get('src')
+ ignore_nonexistent_bucket = module.params.get('ignore_nonexistent_bucket')
+
+ object_canned_acl = ["private", "public-read", "public-read-write", "aws-exec-read", "authenticated-read", "bucket-owner-read", "bucket-owner-full-control"]
+ bucket_canned_acl = ["private", "public-read", "public-read-write", "authenticated-read"]
+
+ if overwrite not in ['always', 'never', 'different']:
+ if module.boolean(overwrite):
+ overwrite = 'always'
+ else:
+ overwrite = 'never'
+
+ if overwrite == 'different' and not HAS_MD5:
+ module.fail_json(msg='overwrite=different is unavailable: ETag calculation requires MD5 support')
+
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+
+ if region in ('us-east-1', '', None):
+ # default to US Standard region
+ location = 'us-east-1'
+ else:
+ # Boto uses symbolic names for locations but region strings will
+ # actually work fine for everything except us-east-1 (US Standard)
+ location = region
+
+ if module.params.get('object'):
+ obj = module.params['object']
+ # If there is a top level object, do nothing - if the object starts with /
+ # remove the leading character to maintain compatibility with Ansible versions < 2.4
+ if obj.startswith('/'):
+ obj = obj[1:]
+
+ # Bucket deletion does not require obj. Prevents ambiguity with delobj.
+ if obj and mode == "delete":
+ module.fail_json(msg='Parameter obj cannot be used with mode=delete')
+
+ # allow eucarc environment variables to be used if ansible vars aren't set
+ if not s3_url and 'S3_URL' in os.environ:
+ s3_url = os.environ['S3_URL']
+
+ if dualstack and s3_url is not None and 'amazonaws.com' not in s3_url:
+ module.fail_json(msg='dualstack only applies to AWS S3')
+
+ if dualstack and not module.botocore_at_least('1.4.45'):
+ module.fail_json(msg='dualstack requires botocore >= 1.4.45')
+
+ # rgw requires an explicit url
+ if rgw and not s3_url:
+ module.fail_json(msg='rgw flavour requires s3_url')
+
+ # Look at s3_url and tweak connection settings
+ # if connecting to RGW, Walrus or fakes3
+ if s3_url:
+ for key in ['validate_certs', 'security_token', 'profile_name']:
+ aws_connect_kwargs.pop(key, None)
+ s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url)
+
+ validate = not ignore_nonexistent_bucket
+
+ # separate types of ACLs
+ bucket_acl = [acl for acl in module.params.get('permission') if acl in bucket_canned_acl]
+ object_acl = [acl for acl in module.params.get('permission') if acl in object_canned_acl]
+ error_acl = [acl for acl in module.params.get('permission') if acl not in bucket_canned_acl and acl not in object_canned_acl]
+ if error_acl:
+ module.fail_json(msg='Unknown permission specified: %s' % error_acl)
+
+ # First, we check to see if the bucket exists, we get "bucket" returned.
+ bucketrtn = bucket_check(module, s3, bucket, validate=validate)
+
+ if validate and mode not in ('create', 'put', 'delete') and not bucketrtn:
+ module.fail_json(msg="Source bucket cannot be found.")
+
+ if mode == 'get':
+ keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
+ if keyrtn is False:
+ if version:
+ module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version))
+ else:
+ module.fail_json(msg="Key %s does not exist." % obj)
+
+ if path_check(dest) and overwrite != 'always':
+ if overwrite == 'never':
+ module.exit_json(msg="Local object already exists and overwrite is disabled.", changed=False)
+ if etag_compare(module, dest, s3, bucket, obj, version=version):
+ module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False)
+
+ try:
+ download_s3file(module, s3, bucket, obj, dest, retries, version=version)
+ except Sigv4Required:
+ s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)
+ download_s3file(module, s3, bucket, obj, dest, retries, version=version)
+
+ if mode == 'put':
+
+ # if putting an object in a bucket yet to be created, acls for the bucket and/or the object may be specified
+ # these were separated into the variables bucket_acl and object_acl above
+
+ if not path_check(src):
+ module.fail_json(msg="Local object for PUT does not exist")
+
+ if bucketrtn:
+ keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
+ else:
+ # If the bucket doesn't exist we should create it.
+ # only use valid bucket acls for create_bucket function
+ module.params['permission'] = bucket_acl
+ create_bucket(module, s3, bucket, location)
+
+ if keyrtn and overwrite != 'always':
+ if overwrite == 'never' or etag_compare(module, src, s3, bucket, obj):
+ # Return the download URL for the existing object
+ get_download_url(module, s3, bucket, obj, expiry, changed=False)
+
+ # only use valid object acls for the upload_s3file function
+ module.params['permission'] = object_acl
+ upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
+
+ # Delete an object from a bucket, not the entire bucket
+ if mode == 'delobj':
+ if obj is None:
+ module.fail_json(msg="object parameter is required")
+ if bucket:
+ deletertn = delete_key(module, s3, bucket, obj)
+ if deletertn is True:
+ module.exit_json(msg="Object deleted from bucket %s." % bucket, changed=True)
+ else:
+ module.fail_json(msg="Bucket parameter is required.")
+
+ # Delete an entire bucket, including all objects in the bucket
+ if mode == 'delete':
+ if bucket:
+ deletertn = delete_bucket(module, s3, bucket)
+ if deletertn is True:
+ module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=True)
+ else:
+ module.fail_json(msg="Bucket parameter is required.")
+
+ # Support for listing a set of keys
+ if mode == 'list':
+ exists = bucket_check(module, s3, bucket)
+
+ # If the bucket does not exist then bail out
+ if not exists:
+ module.fail_json(msg="Target bucket (%s) cannot be found" % bucket)
+
+ list_keys(module, s3, bucket, prefix, marker, max_keys)
+
+ # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now.
+ # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS.
+ if mode == 'create':
+
+ # if both creating a bucket and putting an object in it, acls for the bucket and/or the object may be specified
+ # these were separated above into the variables bucket_acl and object_acl
+
+ if bucket and not obj:
+ if bucketrtn:
+ module.exit_json(msg="Bucket already exists.", changed=False)
+ else:
+ # only use valid bucket acls when creating the bucket
+ module.params['permission'] = bucket_acl
+ module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, s3, bucket, location))
+ if bucket and obj:
+ if obj.endswith('/'):
+ dirobj = obj
+ else:
+ dirobj = obj + "/"
+ if bucketrtn:
+ if key_check(module, s3, bucket, dirobj):
+ module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False)
+ else:
+ # setting valid object acls for the create_dirkey function
+ module.params['permission'] = object_acl
+ create_dirkey(module, s3, bucket, dirobj, encrypt)
+ else:
+ # only use valid bucket acls for the create_bucket function
+ module.params['permission'] = bucket_acl
+ created = create_bucket(module, s3, bucket, location)
+ # only use valid object acls for the create_dirkey function
+ module.params['permission'] = object_acl
+ create_dirkey(module, s3, bucket, dirobj, encrypt)
+
+ # Support for grabbing the time-expired URL for an object in S3/Walrus.
+ if mode == 'geturl':
+ if not bucket and not obj:
+ module.fail_json(msg="Bucket and Object parameters must be set")
+
+ keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
+ if keyrtn:
+ get_download_url(module, s3, bucket, obj, expiry)
+ else:
+ module.fail_json(msg="Key %s does not exist." % obj)
+
+ if mode == 'getstr':
+ if bucket and obj:
+ keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
+ if keyrtn:
+ try:
+ download_s3str(module, s3, bucket, obj, version=version)
+ except Sigv4Required:
+ s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)
+ download_s3str(module, s3, bucket, obj, version=version)
+ elif version is not None:
+ module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version))
+ else:
+ module.fail_json(msg="Key %s does not exist." % obj)
+
+ module.exit_json(failed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_appserviceplan.py b/test/support/integration/plugins/modules/azure_rm_appserviceplan.py
new file mode 100644
index 00000000..ee871c35
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_appserviceplan.py
@@ -0,0 +1,379 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2018 Yunge Zhu, <yungez@microsoft.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_appserviceplan
+version_added: "2.7"
+short_description: Manage App Service Plan
+description:
+ - Create, update and delete instance of App Service Plan.
+
+options:
+ resource_group:
+ description:
+ - Name of the resource group to which the resource belongs.
+ required: True
+
+ name:
+ description:
+ - Unique name of the app service plan to create or update.
+ required: True
+
+ location:
+ description:
+ - Resource location. If not set, location from the resource group will be used as default.
+
+ sku:
+ description:
+ - The pricing tiers, e.g., C(F1), C(D1), C(B1), C(B2), C(B3), C(S1), C(P1), C(P1V2) etc.
+ - Please see U(https://azure.microsoft.com/en-us/pricing/details/app-service/plans/) for more detail.
+ - For Linux app service plan, please see U(https://azure.microsoft.com/en-us/pricing/details/app-service/linux/) for more detail.
+ is_linux:
+ description:
+ - Describe whether to host webapp on Linux worker.
+ type: bool
+ default: false
+
+ number_of_workers:
+ description:
+ - Describe number of workers to be allocated.
+
+ state:
+ description:
+ - Assert the state of the app service plan.
+ - Use C(present) to create or update an app service plan and C(absent) to delete it.
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+ - azure
+ - azure_tags
+
+author:
+ - Yunge Zhu (@yungezz)
+
+'''
+
+EXAMPLES = '''
+ - name: Create a windows app service plan
+ azure_rm_appserviceplan:
+ resource_group: myResourceGroup
+ name: myAppPlan
+ location: eastus
+ sku: S1
+
+ - name: Create a linux app service plan
+ azure_rm_appserviceplan:
+ resource_group: myResourceGroup
+ name: myAppPlan
+ location: eastus
+ sku: S1
+ is_linux: true
+ number_of_workers: 1
+
+ - name: update sku of existing windows app service plan
+ azure_rm_appserviceplan:
+ resource_group: myResourceGroup
+ name: myAppPlan
+ location: eastus
+ sku: S2
+'''
+
+RETURN = '''
+azure_appserviceplan:
+ description: Facts about the current state of the app service plan.
+ returned: always
+ type: dict
+ sample: {
+ "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/serverfarms/myAppPlan"
+ }
+'''
+
+import time
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrest.polling import LROPoller
+ from msrestazure.azure_operation import AzureOperationPoller
+ from msrest.serialization import Model
+ from azure.mgmt.web.models import (
+ app_service_plan, AppServicePlan, SkuDescription
+ )
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+def _normalize_sku(sku):
+ if sku is None:
+ return sku
+
+ sku = sku.upper()
+ if sku == 'FREE':
+ return 'F1'
+ elif sku == 'SHARED':
+ return 'D1'
+ return sku
+
+
+def get_sku_name(tier):
+ tier = tier.upper()
+ if tier == 'F1' or tier == "FREE":
+ return 'FREE'
+ elif tier == 'D1' or tier == "SHARED":
+ return 'SHARED'
+ elif tier in ['B1', 'B2', 'B3', 'BASIC']:
+ return 'BASIC'
+ elif tier in ['S1', 'S2', 'S3']:
+ return 'STANDARD'
+ elif tier in ['P1', 'P2', 'P3']:
+ return 'PREMIUM'
+ elif tier in ['P1V2', 'P2V2', 'P3V2']:
+ return 'PREMIUMV2'
+ else:
+ return None
+
+
+def appserviceplan_to_dict(plan):
+ return dict(
+ id=plan.id,
+ name=plan.name,
+ kind=plan.kind,
+ location=plan.location,
+ reserved=plan.reserved,
+ is_linux=plan.reserved,
+ provisioning_state=plan.provisioning_state,
+ status=plan.status,
+ target_worker_count=plan.target_worker_count,
+ sku=dict(
+ name=plan.sku.name,
+ size=plan.sku.size,
+ tier=plan.sku.tier,
+ family=plan.sku.family,
+ capacity=plan.sku.capacity
+ ),
+ resource_group=plan.resource_group,
+ number_of_sites=plan.number_of_sites,
+ tags=plan.tags if plan.tags else None
+ )
+
+
+class AzureRMAppServicePlans(AzureRMModuleBase):
+ """Configuration class for an Azure RM App Service Plan resource"""
+
+ def __init__(self):
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str',
+ required=True
+ ),
+ location=dict(
+ type='str'
+ ),
+ sku=dict(
+ type='str'
+ ),
+ is_linux=dict(
+ type='bool',
+ default=False
+ ),
+ number_of_workers=dict(
+ type='str'
+ ),
+ state=dict(
+ type='str',
+ default='present',
+ choices=['present', 'absent']
+ )
+ )
+
+ self.resource_group = None
+ self.name = None
+ self.location = None
+
+ self.sku = None
+ self.is_linux = None
+ self.number_of_workers = 1
+
+ self.tags = None
+
+ self.results = dict(
+ changed=False,
+ ansible_facts=dict(azure_appserviceplan=None)
+ )
+ self.state = None
+
+ super(AzureRMAppServicePlans, self).__init__(derived_arg_spec=self.module_arg_spec,
+ supports_check_mode=True,
+ supports_tags=True)
+
+ def exec_module(self, **kwargs):
+ """Main module execution method"""
+
+ for key in list(self.module_arg_spec.keys()) + ['tags']:
+ if kwargs[key]:
+ setattr(self, key, kwargs[key])
+
+ old_response = None
+ response = None
+ to_be_updated = False
+
+ # set location
+ resource_group = self.get_resource_group(self.resource_group)
+ if not self.location:
+ self.location = resource_group.location
+
+ # get app service plan
+ old_response = self.get_plan()
+
+ # if not existing
+ if not old_response:
+ self.log("App Service plan doesn't exist")
+
+ if self.state == "present":
+ to_be_updated = True
+
+ if not self.sku:
+ self.fail('Please specify sku in plan when creation')
+
+ else:
+ # existing app service plan, do update
+ self.log("App Service Plan already exists")
+
+ if self.state == 'present':
+ self.log('Result: {0}'.format(old_response))
+
+ update_tags, newtags = self.update_tags(old_response.get('tags', dict()))
+
+ if update_tags:
+ to_be_updated = True
+ self.tags = newtags
+
+ # check if sku changed
+ if self.sku and _normalize_sku(self.sku) != old_response['sku']['size']:
+ to_be_updated = True
+
+ # check if number_of_workers changed
+ if self.number_of_workers and int(self.number_of_workers) != old_response['sku']['capacity']:
+ to_be_updated = True
+
+ if self.is_linux and self.is_linux != old_response['reserved']:
+ self.fail("Operation not allowed: cannot update reserved of app service plan.")
+
+ if old_response:
+ self.results['id'] = old_response['id']
+
+ if to_be_updated:
+ self.log('Need to Create/Update app service plan')
+ self.results['changed'] = True
+
+ if self.check_mode:
+ return self.results
+
+ response = self.create_or_update_plan()
+ self.results['id'] = response['id']
+
+ if self.state == 'absent' and old_response:
+ self.log("Delete app service plan")
+ self.results['changed'] = True
+
+ if self.check_mode:
+ return self.results
+
+ self.delete_plan()
+
+ self.log('App service plan instance deleted')
+
+ return self.results
+
+ def get_plan(self):
+ '''
+ Gets app service plan
+ :return: deserialized app service plan dictionary
+ '''
+ self.log("Get App Service Plan {0}".format(self.name))
+
+ try:
+ response = self.web_client.app_service_plans.get(self.resource_group, self.name)
+ if response:
+ self.log("Response : {0}".format(response))
+ self.log("App Service Plan : {0} found".format(response.name))
+
+ return appserviceplan_to_dict(response)
+ except CloudError as ex:
+ self.log("Didn't find app service plan {0} in resource group {1}".format(self.name, self.resource_group))
+
+ return False
+
+ def create_or_update_plan(self):
+ '''
+ Creates app service plan
+ :return: deserialized app service plan dictionary
+ '''
+ self.log("Create App Service Plan {0}".format(self.name))
+
+ try:
+ # normalize sku
+ sku = _normalize_sku(self.sku)
+
+ sku_def = SkuDescription(tier=get_sku_name(
+ sku), name=sku, capacity=self.number_of_workers)
+ plan_def = AppServicePlan(
+ location=self.location, app_service_plan_name=self.name, sku=sku_def, reserved=self.is_linux, tags=self.tags if self.tags else None)
+
+ response = self.web_client.app_service_plans.create_or_update(self.resource_group, self.name, plan_def)
+
+ if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
+ response = self.get_poller_result(response)
+
+ self.log("Response : {0}".format(response))
+
+ return appserviceplan_to_dict(response)
+ except CloudError as ex:
+ self.fail("Failed to create app service plan {0} in resource group {1}: {2}".format(self.name, self.resource_group, str(ex)))
+
+ def delete_plan(self):
+ '''
+ Deletes specified App service plan in the specified subscription and resource group.
+
+ :return: True
+ '''
+ self.log("Deleting the App service plan {0}".format(self.name))
+ try:
+ response = self.web_client.app_service_plans.delete(resource_group_name=self.resource_group,
+ name=self.name)
+ except CloudError as e:
+ self.log('Error attempting to delete App service plan.')
+ self.fail(
+ "Error deleting the App service plan : {0}".format(str(e)))
+
+ return True
+
+
+def main():
+ """Main execution"""
+ AzureRMAppServicePlans()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_functionapp.py b/test/support/integration/plugins/modules/azure_rm_functionapp.py
new file mode 100644
index 00000000..0c372a88
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_functionapp.py
@@ -0,0 +1,421 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Thomas Stringer <tomstr@microsoft.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_functionapp
+version_added: "2.4"
+short_description: Manage Azure Function Apps
+description:
+ - Create, update or delete an Azure Function App.
+options:
+ resource_group:
+ description:
+ - Name of resource group.
+ required: true
+ aliases:
+ - resource_group_name
+ name:
+ description:
+ - Name of the Azure Function App.
+ required: true
+ location:
+ description:
+ - Valid Azure location. Defaults to location of the resource group.
+ plan:
+ description:
+ - App service plan.
+ - It can be name of existing app service plan in same resource group as function app.
+ - It can be resource id of existing app service plan.
+ - Resource id. For example /subscriptions/<subs_id>/resourceGroups/<resource_group>/providers/Microsoft.Web/serverFarms/<plan_name>.
+ - It can be a dict which contains C(name), C(resource_group).
+ - C(name). Name of app service plan.
+ - C(resource_group). Resource group name of app service plan.
+ version_added: "2.8"
+ container_settings:
+ description: Web app container settings.
+ suboptions:
+ name:
+ description:
+ - Name of container. For example "imagename:tag".
+ registry_server_url:
+ description:
+ - Container registry server url. For example C(mydockerregistry.io).
+ registry_server_user:
+ description:
+ - The container registry server user name.
+ registry_server_password:
+ description:
+ - The container registry server password.
+ version_added: "2.8"
+ storage_account:
+ description:
+ - Name of the storage account to use.
+ required: true
+ aliases:
+ - storage
+ - storage_account_name
+ app_settings:
+ description:
+ - Dictionary containing application settings.
+ state:
+ description:
+ - Assert the state of the Function App. Use C(present) to create or update a Function App and C(absent) to delete.
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+ - azure
+ - azure_tags
+
+author:
+ - Thomas Stringer (@trstringer)
+'''
+
+EXAMPLES = '''
+- name: Create a function app
+ azure_rm_functionapp:
+ resource_group: myResourceGroup
+ name: myFunctionApp
+ storage_account: myStorageAccount
+
+- name: Create a function app with app settings
+ azure_rm_functionapp:
+ resource_group: myResourceGroup
+ name: myFunctionApp
+ storage_account: myStorageAccount
+ app_settings:
+ setting1: value1
+ setting2: value2
+
+- name: Create container based function app
+ azure_rm_functionapp:
+ resource_group: myResourceGroup
+ name: myFunctionApp
+ storage_account: myStorageAccount
+ plan:
+ resource_group: myResourceGroup
+ name: myAppPlan
+ container_settings:
+ name: httpd
+ registry_server_url: index.docker.io
+
+- name: Delete a function app
+ azure_rm_functionapp:
+ resource_group: myResourceGroup
+ name: myFunctionApp
+ state: absent
+'''
+
+RETURN = '''
+state:
+ description:
+ - Current state of the Azure Function App.
+ returned: success
+ type: dict
+ example:
+ id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/myFunctionApp
+ name: myfunctionapp
+ kind: functionapp
+ location: East US
+ type: Microsoft.Web/sites
+ state: Running
+ host_names:
+ - myfunctionapp.azurewebsites.net
+ repository_site_name: myfunctionapp
+ usage_state: Normal
+ enabled: true
+ enabled_host_names:
+ - myfunctionapp.azurewebsites.net
+ - myfunctionapp.scm.azurewebsites.net
+ availability_state: Normal
+ host_name_ssl_states:
+ - name: myfunctionapp.azurewebsites.net
+ ssl_state: Disabled
+ host_type: Standard
+ - name: myfunctionapp.scm.azurewebsites.net
+ ssl_state: Disabled
+ host_type: Repository
+ server_farm_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/serverfarms/EastUSPlan
+ reserved: false
+ last_modified_time_utc: 2017-08-22T18:54:01.190Z
+ scm_site_also_stopped: false
+ client_affinity_enabled: true
+ client_cert_enabled: false
+ host_names_disabled: false
+ outbound_ip_addresses: ............
+ container_size: 1536
+ daily_memory_time_quota: 0
+ resource_group: myResourceGroup
+ default_host_name: myfunctionapp.azurewebsites.net
+''' # NOQA
+
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from azure.mgmt.web.models import (
+ site_config, app_service_plan, Site, SiteConfig, NameValuePair, SiteSourceControl,
+ AppServicePlan, SkuDescription
+ )
+ from azure.mgmt.resource.resources import ResourceManagementClient
+ from msrest.polling import LROPoller
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+container_settings_spec = dict(
+ name=dict(type='str', required=True),
+ registry_server_url=dict(type='str'),
+ registry_server_user=dict(type='str'),
+ registry_server_password=dict(type='str', no_log=True)
+)
+
+
+class AzureRMFunctionApp(AzureRMModuleBase):
+
+ def __init__(self):
+
+ self.module_arg_spec = dict(
+ resource_group=dict(type='str', required=True, aliases=['resource_group_name']),
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ location=dict(type='str'),
+ storage_account=dict(
+ type='str',
+ aliases=['storage', 'storage_account_name']
+ ),
+ app_settings=dict(type='dict'),
+ plan=dict(
+ type='raw'
+ ),
+ container_settings=dict(
+ type='dict',
+ options=container_settings_spec
+ )
+ )
+
+ self.results = dict(
+ changed=False,
+ state=dict()
+ )
+
+ self.resource_group = None
+ self.name = None
+ self.state = None
+ self.location = None
+ self.storage_account = None
+ self.app_settings = None
+ self.plan = None
+ self.container_settings = None
+
+ required_if = [('state', 'present', ['storage_account'])]
+
+ super(AzureRMFunctionApp, self).__init__(
+ self.module_arg_spec,
+ supports_check_mode=True,
+ required_if=required_if
+ )
+
+ def exec_module(self, **kwargs):
+
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+ if self.app_settings is None:
+ self.app_settings = dict()
+
+ try:
+ resource_group = self.rm_client.resource_groups.get(self.resource_group)
+ except CloudError:
+ self.fail('Unable to retrieve resource group')
+
+ self.location = self.location or resource_group.location
+
+ try:
+ function_app = self.web_client.web_apps.get(
+ resource_group_name=self.resource_group,
+ name=self.name
+ )
+ # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError
+ exists = function_app is not None
+ except CloudError as exc:
+ exists = False
+
+ if self.state == 'absent':
+ if exists:
+ if self.check_mode:
+ self.results['changed'] = True
+ return self.results
+ try:
+ self.web_client.web_apps.delete(
+ resource_group_name=self.resource_group,
+ name=self.name
+ )
+ self.results['changed'] = True
+ except CloudError as exc:
+ self.fail('Failure while deleting web app: {0}'.format(exc))
+ else:
+ self.results['changed'] = False
+ else:
+ kind = 'functionapp'
+ linux_fx_version = None
+ if self.container_settings and self.container_settings.get('name'):
+ kind = 'functionapp,linux,container'
+ linux_fx_version = 'DOCKER|'
+ if self.container_settings.get('registry_server_url'):
+ self.app_settings['DOCKER_REGISTRY_SERVER_URL'] = 'https://' + self.container_settings['registry_server_url']
+ linux_fx_version += self.container_settings['registry_server_url'] + '/'
+ linux_fx_version += self.container_settings['name']
+ if self.container_settings.get('registry_server_user'):
+ self.app_settings['DOCKER_REGISTRY_SERVER_USERNAME'] = self.container_settings.get('registry_server_user')
+
+ if self.container_settings.get('registry_server_password'):
+ self.app_settings['DOCKER_REGISTRY_SERVER_PASSWORD'] = self.container_settings.get('registry_server_password')
+
+ if not self.plan and function_app:
+ self.plan = function_app.server_farm_id
+
+ if not exists:
+ function_app = Site(
+ location=self.location,
+ kind=kind,
+ site_config=SiteConfig(
+ app_settings=self.aggregated_app_settings(),
+ scm_type='LocalGit'
+ )
+ )
+ self.results['changed'] = True
+ else:
+ self.results['changed'], function_app = self.update(function_app)
+
+ # get app service plan
+ if self.plan:
+ if isinstance(self.plan, dict):
+ self.plan = "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Web/serverfarms/{2}".format(
+ self.subscription_id,
+ self.plan.get('resource_group', self.resource_group),
+ self.plan.get('name')
+ )
+ function_app.server_farm_id = self.plan
+
+ # set linux fx version
+ if linux_fx_version:
+ function_app.site_config.linux_fx_version = linux_fx_version
+
+ if self.check_mode:
+ self.results['state'] = function_app.as_dict()
+ elif self.results['changed']:
+ try:
+ new_function_app = self.web_client.web_apps.create_or_update(
+ resource_group_name=self.resource_group,
+ name=self.name,
+ site_envelope=function_app
+ ).result()
+ self.results['state'] = new_function_app.as_dict()
+ except CloudError as exc:
+ self.fail('Error creating or updating web app: {0}'.format(exc))
+
+ return self.results
+
+ def update(self, source_function_app):
+ """Update the Site object if there are any changes"""
+
+ source_app_settings = self.web_client.web_apps.list_application_settings(
+ resource_group_name=self.resource_group,
+ name=self.name
+ )
+
+ changed, target_app_settings = self.update_app_settings(source_app_settings.properties)
+
+ source_function_app.site_config = SiteConfig(
+ app_settings=target_app_settings,
+ scm_type='LocalGit'
+ )
+
+ return changed, source_function_app
+
+ def update_app_settings(self, source_app_settings):
+ """Update app settings"""
+
+ target_app_settings = self.aggregated_app_settings()
+ target_app_settings_dict = dict([(i.name, i.value) for i in target_app_settings])
+ return target_app_settings_dict != source_app_settings, target_app_settings
+
+ def necessary_functionapp_settings(self):
+ """Construct the necessary app settings required for an Azure Function App"""
+
+ function_app_settings = []
+
+ if self.container_settings is None:
+ for key in ['AzureWebJobsStorage', 'WEBSITE_CONTENTAZUREFILECONNECTIONSTRING', 'AzureWebJobsDashboard']:
+ function_app_settings.append(NameValuePair(name=key, value=self.storage_connection_string))
+ function_app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~1'))
+ function_app_settings.append(NameValuePair(name='WEBSITE_NODE_DEFAULT_VERSION', value='6.5.0'))
+ function_app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=self.name))
+ else:
+ function_app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~2'))
+ function_app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE', value=False))
+ function_app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=self.storage_connection_string))
+
+ return function_app_settings
+
+ def aggregated_app_settings(self):
+ """Combine both system and user app settings"""
+
+ function_app_settings = self.necessary_functionapp_settings()
+ for app_setting_key in self.app_settings:
+ found_setting = None
+ for s in function_app_settings:
+ if s.name == app_setting_key:
+ found_setting = s
+ break
+ if found_setting:
+ found_setting.value = self.app_settings[app_setting_key]
+ else:
+ function_app_settings.append(NameValuePair(
+ name=app_setting_key,
+ value=self.app_settings[app_setting_key]
+ ))
+ return function_app_settings
+
+ @property
+ def storage_connection_string(self):
+ """Construct the storage account connection string"""
+
+ return 'DefaultEndpointsProtocol=https;AccountName={0};AccountKey={1}'.format(
+ self.storage_account,
+ self.storage_key
+ )
+
+ @property
+ def storage_key(self):
+ """Retrieve the storage account key"""
+
+ return self.storage_client.storage_accounts.list_keys(
+ resource_group_name=self.resource_group,
+ account_name=self.storage_account
+ ).keys[0].value
+
+
+def main():
+ """Main function execution"""
+
+ AzureRMFunctionApp()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_functionapp_info.py b/test/support/integration/plugins/modules/azure_rm_functionapp_info.py
new file mode 100644
index 00000000..40672f95
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_functionapp_info.py
@@ -0,0 +1,207 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2016 Thomas Stringer, <tomstr@microsoft.com>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_functionapp_info
+version_added: "2.9"
+short_description: Get Azure Function App facts
+description:
+ - Get facts for one Azure Function App or all Function Apps within a resource group.
+options:
+ name:
+ description:
+ - Only show results for a specific Function App.
+ resource_group:
+ description:
+ - Limit results to a resource group. Required when filtering by name.
+ aliases:
+ - resource_group_name
+ tags:
+ description:
+ - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Thomas Stringer (@trstringer)
+'''
+
+EXAMPLES = '''
+ - name: Get facts for one Function App
+ azure_rm_functionapp_info:
+ resource_group: myResourceGroup
+ name: myfunctionapp
+
+ - name: Get facts for all Function Apps in a resource group
+ azure_rm_functionapp_info:
+ resource_group: myResourceGroup
+
+ - name: Get facts for all Function Apps by tags
+ azure_rm_functionapp_info:
+ tags:
+ - testing
+'''
+
+RETURN = '''
+azure_functionapps:
+ description:
+ - List of Azure Function Apps dicts.
+ returned: always
+ type: list
+ example:
+ id: /subscriptions/.../resourceGroups/ansible-rg/providers/Microsoft.Web/sites/myfunctionapp
+ name: myfunctionapp
+ kind: functionapp
+ location: East US
+ type: Microsoft.Web/sites
+ state: Running
+ host_names:
+ - myfunctionapp.azurewebsites.net
+ repository_site_name: myfunctionapp
+ usage_state: Normal
+ enabled: true
+ enabled_host_names:
+ - myfunctionapp.azurewebsites.net
+ - myfunctionapp.scm.azurewebsites.net
+ availability_state: Normal
+ host_name_ssl_states:
+ - name: myfunctionapp.azurewebsites.net
+ ssl_state: Disabled
+ host_type: Standard
+ - name: myfunctionapp.scm.azurewebsites.net
+ ssl_state: Disabled
+ host_type: Repository
+ server_farm_id: /subscriptions/.../resourceGroups/ansible-rg/providers/Microsoft.Web/serverfarms/EastUSPlan
+ reserved: false
+ last_modified_time_utc: 2017-08-22T18:54:01.190Z
+ scm_site_also_stopped: false
+ client_affinity_enabled: true
+ client_cert_enabled: false
+ host_names_disabled: false
+ outbound_ip_addresses: ............
+ container_size: 1536
+ daily_memory_time_quota: 0
+ resource_group: myResourceGroup
+ default_host_name: myfunctionapp.azurewebsites.net
+'''
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+except Exception:
+ # This is handled in azure_rm_common
+ pass
+
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+
+class AzureRMFunctionAppInfo(AzureRMModuleBase):
+ def __init__(self):
+
+ self.module_arg_spec = dict(
+ name=dict(type='str'),
+ resource_group=dict(type='str', aliases=['resource_group_name']),
+ tags=dict(type='list'),
+ )
+
+ self.results = dict(
+ changed=False,
+ ansible_info=dict(azure_functionapps=[])
+ )
+
+ self.name = None
+ self.resource_group = None
+ self.tags = None
+
+ super(AzureRMFunctionAppInfo, self).__init__(
+ self.module_arg_spec,
+ supports_tags=False,
+ facts_module=True
+ )
+
+ def exec_module(self, **kwargs):
+
+ is_old_facts = self.module._name == 'azure_rm_functionapp_facts'
+ if is_old_facts:
+ self.module.deprecate("The 'azure_rm_functionapp_facts' module has been renamed to 'azure_rm_functionapp_info'",
+ version='2.13', collection_name='ansible.builtin')
+
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+
+ if self.name and not self.resource_group:
+ self.fail("Parameter error: resource group required when filtering by name.")
+
+ if self.name:
+ self.results['ansible_info']['azure_functionapps'] = self.get_functionapp()
+ elif self.resource_group:
+ self.results['ansible_info']['azure_functionapps'] = self.list_resource_group()
+ else:
+ self.results['ansible_info']['azure_functionapps'] = self.list_all()
+
+ return self.results
+
+ def get_functionapp(self):
+ self.log('Get properties for Function App {0}'.format(self.name))
+ function_app = None
+ result = []
+
+ try:
+ function_app = self.web_client.web_apps.get(
+ self.resource_group,
+ self.name
+ )
+ except CloudError:
+ pass
+
+ if function_app and self.has_tags(function_app.tags, self.tags):
+ result = function_app.as_dict()
+
+ return [result]
+
+ def list_resource_group(self):
+ self.log('List items')
+ try:
+ response = self.web_client.web_apps.list_by_resource_group(self.resource_group)
+ except Exception as exc:
+ self.fail("Error listing for resource group {0} - {1}".format(self.resource_group, str(exc)))
+
+ results = []
+ for item in response:
+ if self.has_tags(item.tags, self.tags):
+ results.append(item.as_dict())
+ return results
+
+ def list_all(self):
+ self.log('List all items')
+ try:
+ response = self.web_client.web_apps.list_by_resource_group(self.resource_group)
+ except Exception as exc:
+ self.fail("Error listing all items - {0}".format(str(exc)))
+
+ results = []
+ for item in response:
+ if self.has_tags(item.tags, self.tags):
+ results.append(item.as_dict())
+ return results
+
+
+def main():
+ AzureRMFunctionAppInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbconfiguration.py b/test/support/integration/plugins/modules/azure_rm_mariadbconfiguration.py
new file mode 100644
index 00000000..212cf795
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_mariadbconfiguration.py
@@ -0,0 +1,241 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
+# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_mariadbconfiguration
+version_added: "2.8"
+short_description: Manage Configuration instance
+description:
+ - Create, update and delete instance of Configuration.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource.
+ required: True
+ server_name:
+ description:
+ - The name of the server.
+ required: True
+ name:
+ description:
+ - The name of the server configuration.
+ required: True
+ value:
+ description:
+ - Value of the configuration.
+ state:
+ description:
+ - Assert the state of the MariaDB configuration. Use C(present) to update setting, or C(absent) to reset to default value.
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Zim Kalinowski (@zikalino)
+ - Matti Ranta (@techknowlogick)
+'''
+
+EXAMPLES = '''
+ - name: Update SQL Server setting
+ azure_rm_mariadbconfiguration:
+ resource_group: myResourceGroup
+ server_name: myServer
+ name: event_scheduler
+ value: "ON"
+'''
+
+RETURN = '''
+id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/myServer/confi
+ gurations/event_scheduler"
+'''
+
+import time
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrest.polling import LROPoller
+ from azure.mgmt.rdbms.mysql import MariaDBManagementClient
+ from msrest.serialization import Model
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class Actions:
+ NoAction, Create, Update, Delete = range(4)
+
+
+class AzureRMMariaDbConfiguration(AzureRMModuleBase):
+
+ def __init__(self):
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ server_name=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str',
+ required=True
+ ),
+ value=dict(
+ type='str'
+ ),
+ state=dict(
+ type='str',
+ default='present',
+ choices=['present', 'absent']
+ )
+ )
+
+ self.resource_group = None
+ self.server_name = None
+ self.name = None
+ self.value = None
+
+ self.results = dict(changed=False)
+ self.state = None
+ self.to_do = Actions.NoAction
+
+ super(AzureRMMariaDbConfiguration, self).__init__(derived_arg_spec=self.module_arg_spec,
+ supports_check_mode=True,
+ supports_tags=False)
+
+ def exec_module(self, **kwargs):
+
+ for key in list(self.module_arg_spec.keys()):
+ if hasattr(self, key):
+ setattr(self, key, kwargs[key])
+
+ old_response = None
+ response = None
+
+ old_response = self.get_configuration()
+
+ if not old_response:
+ self.log("Configuration instance doesn't exist")
+ if self.state == 'absent':
+ self.log("Old instance didn't exist")
+ else:
+ self.to_do = Actions.Create
+ else:
+ self.log("Configuration instance already exists")
+ if self.state == 'absent' and old_response['source'] == 'user-override':
+ self.to_do = Actions.Delete
+ elif self.state == 'present':
+ self.log("Need to check if Configuration instance has to be deleted or may be updated")
+ if self.value != old_response.get('value'):
+ self.to_do = Actions.Update
+
+ if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
+ self.log("Need to Create / Update the Configuration instance")
+
+ if self.check_mode:
+ self.results['changed'] = True
+ return self.results
+
+ response = self.create_update_configuration()
+
+ self.results['changed'] = True
+ self.log("Creation / Update done")
+ elif self.to_do == Actions.Delete:
+ self.log("Configuration instance deleted")
+ self.results['changed'] = True
+
+ if self.check_mode:
+ return self.results
+
+ self.delete_configuration()
+ else:
+ self.log("Configuration instance unchanged")
+ self.results['changed'] = False
+ response = old_response
+
+ if response:
+ self.results["id"] = response["id"]
+
+ return self.results
+
+ def create_update_configuration(self):
+ self.log("Creating / Updating the Configuration instance {0}".format(self.name))
+
+ try:
+ response = self.mariadb_client.configurations.create_or_update(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ configuration_name=self.name,
+ value=self.value,
+ source='user-override')
+ if isinstance(response, LROPoller):
+ response = self.get_poller_result(response)
+
+ except CloudError as exc:
+ self.log('Error attempting to create the Configuration instance.')
+ self.fail("Error creating the Configuration instance: {0}".format(str(exc)))
+ return response.as_dict()
+
+ def delete_configuration(self):
+ self.log("Deleting the Configuration instance {0}".format(self.name))
+ try:
+ response = self.mariadb_client.configurations.create_or_update(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ configuration_name=self.name,
+ source='system-default')
+ except CloudError as e:
+ self.log('Error attempting to delete the Configuration instance.')
+ self.fail("Error deleting the Configuration instance: {0}".format(str(e)))
+
+ return True
+
+ def get_configuration(self):
+ self.log("Checking if the Configuration instance {0} is present".format(self.name))
+ found = False
+ try:
+ response = self.mariadb_client.configurations.get(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ configuration_name=self.name)
+ found = True
+ self.log("Response : {0}".format(response))
+ self.log("Configuration instance : {0} found".format(response.name))
+ except CloudError as e:
+ self.log('Did not find the Configuration instance.')
+ if found is True:
+ return response.as_dict()
+
+ return False
+
+
+def main():
+ """Main execution"""
+ AzureRMMariaDbConfiguration()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbconfiguration_info.py b/test/support/integration/plugins/modules/azure_rm_mariadbconfiguration_info.py
new file mode 100644
index 00000000..3faac5eb
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_mariadbconfiguration_info.py
@@ -0,0 +1,217 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
+# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_mariadbconfiguration_info
+version_added: "2.9"
+short_description: Get Azure MariaDB Configuration facts
+description:
+ - Get facts of Azure MariaDB Configuration.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
+ required: True
+ type: str
+ server_name:
+ description:
+ - The name of the server.
+ required: True
+ type: str
+ name:
+ description:
+ - Setting name.
+ type: str
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Zim Kalinowski (@zikalino)
+ - Matti Ranta (@techknowlogick)
+
+'''
+
+EXAMPLES = '''
+ - name: Get specific setting of MariaDB Server
+ azure_rm_mariadbconfiguration_info:
+ resource_group: myResourceGroup
+ server_name: testserver
+ name: deadlock_timeout
+
+ - name: Get all settings of MariaDB Server
+ azure_rm_mariadbconfiguration_info:
+ resource_group: myResourceGroup
+ server_name: server_name
+'''
+
+RETURN = '''
+settings:
+ description:
+ - A list of dictionaries containing MariaDB Server settings.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - Setting resource ID.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testserver
+ /configurations/deadlock_timeout"
+ name:
+ description:
+ - Setting name.
+ returned: always
+ type: str
+ sample: deadlock_timeout
+ value:
+ description:
+ - Setting value.
+ returned: always
+ type: raw
+ sample: 1000
+ description:
+ description:
+ - Description of the configuration.
+ returned: always
+ type: str
+ sample: Deadlock timeout.
+ source:
+ description:
+ - Source of the configuration.
+ returned: always
+ type: str
+ sample: system-default
+'''
+
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrestazure.azure_operation import AzureOperationPoller
+ from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
+ from msrest.serialization import Model
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMMariaDbConfigurationInfo(AzureRMModuleBase):
+ def __init__(self):
+ # define user inputs into argument
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ server_name=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str'
+ )
+ )
+ # store the results of the module operation
+ self.results = dict(changed=False)
+ self.mgmt_client = None
+ self.resource_group = None
+ self.server_name = None
+ self.name = None
+ super(AzureRMMariaDbConfigurationInfo, self).__init__(self.module_arg_spec, supports_tags=False)
+
+ def exec_module(self, **kwargs):
+ is_old_facts = self.module._name == 'azure_rm_mariadbconfiguration_facts'
+ if is_old_facts:
+ self.module.deprecate("The 'azure_rm_mariadbconfiguration_facts' module has been renamed to 'azure_rm_mariadbconfiguration_info'",
+ version='2.13', collection_name='ansible.builtin')
+
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+ self.mgmt_client = self.get_mgmt_svc_client(MariaDBManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+
+ if self.name is not None:
+ self.results['settings'] = self.get()
+ else:
+ self.results['settings'] = self.list_by_server()
+ return self.results
+
+ def get(self):
+ '''
+ Gets facts of the specified MariaDB Configuration.
+
+ :return: deserialized MariaDB Configurationinstance state dictionary
+ '''
+ response = None
+ results = []
+ try:
+ response = self.mgmt_client.configurations.get(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ configuration_name=self.name)
+ self.log("Response : {0}".format(response))
+ except CloudError as e:
+ self.log('Could not get facts for Configurations.')
+
+ if response is not None:
+ results.append(self.format_item(response))
+
+ return results
+
+ def list_by_server(self):
+ '''
+ Gets facts of the specified MariaDB Configuration.
+
+ :return: deserialized MariaDB Configurationinstance state dictionary
+ '''
+ response = None
+ results = []
+ try:
+ response = self.mgmt_client.configurations.list_by_server(resource_group_name=self.resource_group,
+ server_name=self.server_name)
+ self.log("Response : {0}".format(response))
+ except CloudError as e:
+ self.log('Could not get facts for Configurations.')
+
+ if response is not None:
+ for item in response:
+ results.append(self.format_item(item))
+
+ return results
+
+ def format_item(self, item):
+ d = item.as_dict()
+ d = {
+ 'resource_group': self.resource_group,
+ 'server_name': self.server_name,
+ 'id': d['id'],
+ 'name': d['name'],
+ 'value': d['value'],
+ 'description': d['description'],
+ 'source': d['source']
+ }
+ return d
+
+
+def main():
+ AzureRMMariaDbConfigurationInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbdatabase.py b/test/support/integration/plugins/modules/azure_rm_mariadbdatabase.py
new file mode 100644
index 00000000..8492b968
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_mariadbdatabase.py
@@ -0,0 +1,304 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
+# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_mariadbdatabase
+version_added: "2.8"
+short_description: Manage MariaDB Database instance
+description:
+ - Create, update and delete instance of MariaDB Database.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
+ required: True
+ server_name:
+ description:
+ - The name of the server.
+ required: True
+ name:
+ description:
+ - The name of the database.
+ required: True
+ charset:
+ description:
+ - The charset of the database. Check MariaDB documentation for possible values.
+ - This is only set on creation, use I(force_update) to recreate a database if the values don't match.
+ collation:
+ description:
+ - The collation of the database. Check MariaDB documentation for possible values.
+ - This is only set on creation, use I(force_update) to recreate a database if the values don't match.
+ force_update:
+ description:
+ - When set to C(true), will delete and recreate the existing MariaDB database if any of the properties don't match what is set.
+ - When set to C(false), no change will occur to the database even if any of the properties do not match.
+ type: bool
+ default: 'no'
+ state:
+ description:
+ - Assert the state of the MariaDB Database. Use C(present) to create or update a database and C(absent) to delete it.
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Zim Kalinowski (@zikalino)
+ - Matti Ranta (@techknowlogick)
+
+'''
+
+EXAMPLES = '''
+ - name: Create (or update) MariaDB Database
+ azure_rm_mariadbdatabase:
+ resource_group: myResourceGroup
+ server_name: testserver
+ name: db1
+'''
+
+RETURN = '''
+id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testserver/databases/db1
+name:
+ description:
+ - Resource name.
+ returned: always
+ type: str
+ sample: db1
+'''
+
+import time
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
+ from msrestazure.azure_exceptions import CloudError
+ from msrest.polling import LROPoller
+ from msrest.serialization import Model
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class Actions:
+ NoAction, Create, Update, Delete = range(4)
+
+
+class AzureRMMariaDbDatabase(AzureRMModuleBase):
+ """Configuration class for an Azure RM MariaDB Database resource"""
+
+ def __init__(self):
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ server_name=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str',
+ required=True
+ ),
+ charset=dict(
+ type='str'
+ ),
+ collation=dict(
+ type='str'
+ ),
+ force_update=dict(
+ type='bool',
+ default=False
+ ),
+ state=dict(
+ type='str',
+ default='present',
+ choices=['present', 'absent']
+ )
+ )
+
+ self.resource_group = None
+ self.server_name = None
+ self.name = None
+ self.force_update = None
+ self.parameters = dict()
+
+ self.results = dict(changed=False)
+ self.mgmt_client = None
+ self.state = None
+ self.to_do = Actions.NoAction
+
+ super(AzureRMMariaDbDatabase, self).__init__(derived_arg_spec=self.module_arg_spec,
+ supports_check_mode=True,
+ supports_tags=False)
+
+ def exec_module(self, **kwargs):
+ """Main module execution method"""
+
+ for key in list(self.module_arg_spec.keys()):
+ if hasattr(self, key):
+ setattr(self, key, kwargs[key])
+ elif kwargs[key] is not None:
+ if key == "charset":
+ self.parameters["charset"] = kwargs[key]
+ elif key == "collation":
+ self.parameters["collation"] = kwargs[key]
+
+ old_response = None
+ response = None
+
+ self.mgmt_client = self.get_mgmt_svc_client(MariaDBManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+
+ resource_group = self.get_resource_group(self.resource_group)
+
+ old_response = self.get_mariadbdatabase()
+
+ if not old_response:
+ self.log("MariaDB Database instance doesn't exist")
+ if self.state == 'absent':
+ self.log("Old instance didn't exist")
+ else:
+ self.to_do = Actions.Create
+ else:
+ self.log("MariaDB Database instance already exists")
+ if self.state == 'absent':
+ self.to_do = Actions.Delete
+ elif self.state == 'present':
+ self.log("Need to check if MariaDB Database instance has to be deleted or may be updated")
+ if ('collation' in self.parameters) and (self.parameters['collation'] != old_response['collation']):
+ self.to_do = Actions.Update
+ if ('charset' in self.parameters) and (self.parameters['charset'] != old_response['charset']):
+ self.to_do = Actions.Update
+ if self.to_do == Actions.Update:
+ if self.force_update:
+ if not self.check_mode:
+ self.delete_mariadbdatabase()
+ else:
+ self.fail("Database properties cannot be updated without setting 'force_update' option")
+ self.to_do = Actions.NoAction
+
+ if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
+ self.log("Need to Create / Update the MariaDB Database instance")
+
+ if self.check_mode:
+ self.results['changed'] = True
+ return self.results
+
+ response = self.create_update_mariadbdatabase()
+ self.results['changed'] = True
+ self.log("Creation / Update done")
+ elif self.to_do == Actions.Delete:
+ self.log("MariaDB Database instance deleted")
+ self.results['changed'] = True
+
+ if self.check_mode:
+ return self.results
+
+ self.delete_mariadbdatabase()
+ # make sure instance is actually deleted, for some Azure resources, instance is hanging around
+ # for some time after deletion -- this should be really fixed in Azure
+ while self.get_mariadbdatabase():
+ time.sleep(20)
+ else:
+ self.log("MariaDB Database instance unchanged")
+ self.results['changed'] = False
+ response = old_response
+
+ if response:
+ self.results["id"] = response["id"]
+ self.results["name"] = response["name"]
+
+ return self.results
+
+ def create_update_mariadbdatabase(self):
+ '''
+ Creates or updates MariaDB Database with the specified configuration.
+
+ :return: deserialized MariaDB Database instance state dictionary
+ '''
+ self.log("Creating / Updating the MariaDB Database instance {0}".format(self.name))
+
+ try:
+ response = self.mgmt_client.databases.create_or_update(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ database_name=self.name,
+ parameters=self.parameters)
+ if isinstance(response, LROPoller):
+ response = self.get_poller_result(response)
+
+ except CloudError as exc:
+ self.log('Error attempting to create the MariaDB Database instance.')
+ self.fail("Error creating the MariaDB Database instance: {0}".format(str(exc)))
+ return response.as_dict()
+
+ def delete_mariadbdatabase(self):
+ '''
+ Deletes specified MariaDB Database instance in the specified subscription and resource group.
+
+ :return: True
+ '''
+ self.log("Deleting the MariaDB Database instance {0}".format(self.name))
+ try:
+ response = self.mgmt_client.databases.delete(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ database_name=self.name)
+ except CloudError as e:
+ self.log('Error attempting to delete the MariaDB Database instance.')
+ self.fail("Error deleting the MariaDB Database instance: {0}".format(str(e)))
+
+ return True
+
+ def get_mariadbdatabase(self):
+ '''
+ Gets the properties of the specified MariaDB Database.
+
+ :return: deserialized MariaDB Database instance state dictionary
+ '''
+ self.log("Checking if the MariaDB Database instance {0} is present".format(self.name))
+ found = False
+ try:
+ response = self.mgmt_client.databases.get(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ database_name=self.name)
+ found = True
+ self.log("Response : {0}".format(response))
+ self.log("MariaDB Database instance : {0} found".format(response.name))
+ except CloudError as e:
+ self.log('Did not find the MariaDB Database instance.')
+ if found is True:
+ return response.as_dict()
+
+ return False
+
+
+def main():
+ """Main execution"""
+ AzureRMMariaDbDatabase()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbdatabase_info.py b/test/support/integration/plugins/modules/azure_rm_mariadbdatabase_info.py
new file mode 100644
index 00000000..e9c99c14
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_mariadbdatabase_info.py
@@ -0,0 +1,212 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
+# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_mariadbdatabase_info
+version_added: "2.9"
+short_description: Get Azure MariaDB Database facts
+description:
+ - Get facts of MariaDB Database.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
+ required: True
+ type: str
+ server_name:
+ description:
+ - The name of the server.
+ required: True
+ type: str
+ name:
+ description:
+ - The name of the database.
+ type: str
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Zim Kalinowski (@zikalino)
+ - Matti Ranta (@techknowlogick)
+
+'''
+
+EXAMPLES = '''
+ - name: Get instance of MariaDB Database
+ azure_rm_mariadbdatabase_info:
+ resource_group: myResourceGroup
+ server_name: server_name
+ name: database_name
+
+ - name: List instances of MariaDB Database
+ azure_rm_mariadbdatabase_info:
+ resource_group: myResourceGroup
+ server_name: server_name
+'''
+
+RETURN = '''
+databases:
+ description:
+ - A list of dictionaries containing facts for MariaDB Databases.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testser
+ ver/databases/db1"
+ resource_group:
+ description:
+ - Resource group name.
+ returned: always
+ type: str
+ sample: testrg
+ server_name:
+ description:
+ - Server name.
+ returned: always
+ type: str
+ sample: testserver
+ name:
+ description:
+ - Resource name.
+ returned: always
+ type: str
+ sample: db1
+ charset:
+ description:
+ - The charset of the database.
+ returned: always
+ type: str
+ sample: UTF8
+ collation:
+ description:
+ - The collation of the database.
+ returned: always
+ type: str
+ sample: English_United States.1252
+'''
+
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
+ from msrest.serialization import Model
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMMariaDbDatabaseInfo(AzureRMModuleBase):
+ def __init__(self):
+ # define user inputs into argument
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ server_name=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str'
+ )
+ )
+ # store the results of the module operation
+ self.results = dict(
+ changed=False
+ )
+ self.resource_group = None
+ self.server_name = None
+ self.name = None
+ super(AzureRMMariaDbDatabaseInfo, self).__init__(self.module_arg_spec, supports_tags=False)
+
+ def exec_module(self, **kwargs):
+ is_old_facts = self.module._name == 'azure_rm_mariadbdatabase_facts'
+ if is_old_facts:
+ self.module.deprecate("The 'azure_rm_mariadbdatabase_facts' module has been renamed to 'azure_rm_mariadbdatabase_info'",
+ version='2.13', collection_name='ansible.builtin')
+
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+
+ if (self.resource_group is not None and
+ self.server_name is not None and
+ self.name is not None):
+ self.results['databases'] = self.get()
+ elif (self.resource_group is not None and
+ self.server_name is not None):
+ self.results['databases'] = self.list_by_server()
+ return self.results
+
+ def get(self):
+ response = None
+ results = []
+ try:
+ response = self.mariadb_client.databases.get(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ database_name=self.name)
+ self.log("Response : {0}".format(response))
+ except CloudError as e:
+ self.log('Could not get facts for Databases.')
+
+ if response is not None:
+ results.append(self.format_item(response))
+
+ return results
+
+ def list_by_server(self):
+ response = None
+ results = []
+ try:
+ response = self.mariadb_client.databases.list_by_server(resource_group_name=self.resource_group,
+ server_name=self.server_name)
+ self.log("Response : {0}".format(response))
+ except CloudError as e:
+ self.fail("Error listing for server {0} - {1}".format(self.server_name, str(e)))
+
+ if response is not None:
+ for item in response:
+ results.append(self.format_item(item))
+
+ return results
+
+ def format_item(self, item):
+ d = item.as_dict()
+ d = {
+ 'resource_group': self.resource_group,
+ 'server_name': self.server_name,
+ 'name': d['name'],
+ 'charset': d['charset'],
+ 'collation': d['collation']
+ }
+ return d
+
+
+def main():
+ AzureRMMariaDbDatabaseInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule.py b/test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule.py
new file mode 100644
index 00000000..1fc8c5e7
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule.py
@@ -0,0 +1,277 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
+# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_mariadbfirewallrule
+version_added: "2.8"
+short_description: Manage MariaDB firewall rule instance
+description:
+ - Create, update and delete instance of MariaDB firewall rule.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
+ required: True
+ server_name:
+ description:
+ - The name of the server.
+ required: True
+ name:
+ description:
+ - The name of the MariaDB firewall rule.
+ required: True
+ start_ip_address:
+ description:
+ - The start IP address of the MariaDB firewall rule. Must be IPv4 format.
+ end_ip_address:
+ description:
+ - The end IP address of the MariaDB firewall rule. Must be IPv4 format.
+ state:
+ description:
+ - Assert the state of the MariaDB firewall rule. Use C(present) to create or update a rule and C(absent) to ensure it is not present.
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Zim Kalinowski (@zikalino)
+ - Matti Ranta (@techknowlogick)
+
+'''
+
+EXAMPLES = '''
+ - name: Create (or update) MariaDB firewall rule
+ azure_rm_mariadbfirewallrule:
+ resource_group: myResourceGroup
+ server_name: testserver
+ name: rule1
+ start_ip_address: 10.0.0.17
+ end_ip_address: 10.0.0.20
+'''
+
+RETURN = '''
+id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testserver/fire
+ wallRules/rule1"
+'''
+
+import time
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrest.polling import LROPoller
+ from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
+ from msrest.serialization import Model
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class Actions:
+ NoAction, Create, Update, Delete = range(4)
+
+
+class AzureRMMariaDbFirewallRule(AzureRMModuleBase):
+ """Configuration class for an Azure RM MariaDB firewall rule resource"""
+
+ def __init__(self):
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ server_name=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str',
+ required=True
+ ),
+ start_ip_address=dict(
+ type='str'
+ ),
+ end_ip_address=dict(
+ type='str'
+ ),
+ state=dict(
+ type='str',
+ default='present',
+ choices=['present', 'absent']
+ )
+ )
+
+ self.resource_group = None
+ self.server_name = None
+ self.name = None
+ self.start_ip_address = None
+ self.end_ip_address = None
+
+ self.results = dict(changed=False)
+ self.state = None
+ self.to_do = Actions.NoAction
+
+ super(AzureRMMariaDbFirewallRule, self).__init__(derived_arg_spec=self.module_arg_spec,
+ supports_check_mode=True,
+ supports_tags=False)
+
+ def exec_module(self, **kwargs):
+ """Main module execution method"""
+
+ for key in list(self.module_arg_spec.keys()):
+ if hasattr(self, key):
+ setattr(self, key, kwargs[key])
+
+ old_response = None
+ response = None
+
+ resource_group = self.get_resource_group(self.resource_group)
+
+ old_response = self.get_firewallrule()
+
+ if not old_response:
+ self.log("MariaDB firewall rule instance doesn't exist")
+ if self.state == 'absent':
+ self.log("Old instance didn't exist")
+ else:
+ self.to_do = Actions.Create
+ else:
+ self.log("MariaDB firewall rule instance already exists")
+ if self.state == 'absent':
+ self.to_do = Actions.Delete
+ elif self.state == 'present':
+ self.log("Need to check if MariaDB firewall rule instance has to be deleted or may be updated")
+ if (self.start_ip_address is not None) and (self.start_ip_address != old_response['start_ip_address']):
+ self.to_do = Actions.Update
+ if (self.end_ip_address is not None) and (self.end_ip_address != old_response['end_ip_address']):
+ self.to_do = Actions.Update
+
+ if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
+ self.log("Need to Create / Update the MariaDB firewall rule instance")
+
+ if self.check_mode:
+ self.results['changed'] = True
+ return self.results
+
+ response = self.create_update_firewallrule()
+
+ if not old_response:
+ self.results['changed'] = True
+ else:
+ self.results['changed'] = old_response.__ne__(response)
+ self.log("Creation / Update done")
+ elif self.to_do == Actions.Delete:
+ self.log("MariaDB firewall rule instance deleted")
+ self.results['changed'] = True
+
+ if self.check_mode:
+ return self.results
+
+ self.delete_firewallrule()
+ # make sure instance is actually deleted, for some Azure resources, instance is hanging around
+ # for some time after deletion -- this should be really fixed in Azure
+ while self.get_firewallrule():
+ time.sleep(20)
+ else:
+ self.log("MariaDB firewall rule instance unchanged")
+ self.results['changed'] = False
+ response = old_response
+
+ if response:
+ self.results["id"] = response["id"]
+
+ return self.results
+
+ def create_update_firewallrule(self):
+ '''
+ Creates or updates MariaDB firewall rule with the specified configuration.
+
+ :return: deserialized MariaDB firewall rule instance state dictionary
+ '''
+ self.log("Creating / Updating the MariaDB firewall rule instance {0}".format(self.name))
+
+ try:
+ response = self.mariadb_client.firewall_rules.create_or_update(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ firewall_rule_name=self.name,
+ start_ip_address=self.start_ip_address,
+ end_ip_address=self.end_ip_address)
+ if isinstance(response, LROPoller):
+ response = self.get_poller_result(response)
+
+ except CloudError as exc:
+ self.log('Error attempting to create the MariaDB firewall rule instance.')
+ self.fail("Error creating the MariaDB firewall rule instance: {0}".format(str(exc)))
+ return response.as_dict()
+
+ def delete_firewallrule(self):
+ '''
+ Deletes specified MariaDB firewall rule instance in the specified subscription and resource group.
+
+ :return: True
+ '''
+ self.log("Deleting the MariaDB firewall rule instance {0}".format(self.name))
+ try:
+ response = self.mariadb_client.firewall_rules.delete(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ firewall_rule_name=self.name)
+ except CloudError as e:
+ self.log('Error attempting to delete the MariaDB firewall rule instance.')
+ self.fail("Error deleting the MariaDB firewall rule instance: {0}".format(str(e)))
+
+ return True
+
+ def get_firewallrule(self):
+ '''
+ Gets the properties of the specified MariaDB firewall rule.
+
+ :return: deserialized MariaDB firewall rule instance state dictionary
+ '''
+ self.log("Checking if the MariaDB firewall rule instance {0} is present".format(self.name))
+ found = False
+ try:
+ response = self.mariadb_client.firewall_rules.get(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ firewall_rule_name=self.name)
+ found = True
+ self.log("Response : {0}".format(response))
+ self.log("MariaDB firewall rule instance : {0} found".format(response.name))
+ except CloudError as e:
+ self.log('Did not find the MariaDB firewall rule instance.')
+ if found is True:
+ return response.as_dict()
+
+ return False
+
+
+def main():
+ """Main execution"""
+ AzureRMMariaDbFirewallRule()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule_info.py b/test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule_info.py
new file mode 100644
index 00000000..ef71be8d
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule_info.py
@@ -0,0 +1,208 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
+# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_mariadbfirewallrule_info
+version_added: "2.9"
+short_description: Get Azure MariaDB Firewall Rule facts
+description:
+ - Get facts of Azure MariaDB Firewall Rule.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group.
+ required: True
+ type: str
+ server_name:
+ description:
+ - The name of the server.
+ required: True
+ type: str
+ name:
+ description:
+ - The name of the server firewall rule.
+ type: str
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Zim Kalinowski (@zikalino)
+ - Matti Ranta (@techknowlogick)
+
+'''
+
+EXAMPLES = '''
+ - name: Get instance of MariaDB Firewall Rule
+ azure_rm_mariadbfirewallrule_info:
+ resource_group: myResourceGroup
+ server_name: server_name
+ name: firewall_rule_name
+
+ - name: List instances of MariaDB Firewall Rule
+ azure_rm_mariadbfirewallrule_info:
+ resource_group: myResourceGroup
+ server_name: server_name
+'''
+
+RETURN = '''
+rules:
+ description:
+ - A list of dictionaries containing facts for MariaDB Firewall Rule.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/TestGroup/providers/Microsoft.DBforMariaDB/servers/testserver/fire
+ wallRules/rule1"
+ server_name:
+ description:
+ - The name of the server.
+ returned: always
+ type: str
+ sample: testserver
+ name:
+ description:
+ - Resource name.
+ returned: always
+ type: str
+ sample: rule1
+ start_ip_address:
+ description:
+ - The start IP address of the MariaDB firewall rule.
+ returned: always
+ type: str
+ sample: 10.0.0.16
+ end_ip_address:
+ description:
+ - The end IP address of the MariaDB firewall rule.
+ returned: always
+ type: str
+ sample: 10.0.0.18
+'''
+
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrestazure.azure_operation import AzureOperationPoller
+ from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
+ from msrest.serialization import Model
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMMariaDbFirewallRuleInfo(AzureRMModuleBase):
+ def __init__(self):
+ # define user inputs into argument
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ server_name=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str'
+ )
+ )
+ # store the results of the module operation
+ self.results = dict(
+ changed=False
+ )
+ self.mgmt_client = None
+ self.resource_group = None
+ self.server_name = None
+ self.name = None
+ super(AzureRMMariaDbFirewallRuleInfo, self).__init__(self.module_arg_spec, supports_tags=False)
+
+ def exec_module(self, **kwargs):
+ is_old_facts = self.module._name == 'azure_rm_mariadbfirewallrule_facts'
+ if is_old_facts:
+ self.module.deprecate("The 'azure_rm_mariadbfirewallrule_facts' module has been renamed to 'azure_rm_mariadbfirewallrule_info'",
+ version='2.13', collection_name='ansible.builtin')
+
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+ self.mgmt_client = self.get_mgmt_svc_client(MariaDBManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+
+ if (self.name is not None):
+ self.results['rules'] = self.get()
+ else:
+ self.results['rules'] = self.list_by_server()
+ return self.results
+
+ def get(self):
+ response = None
+ results = []
+ try:
+ response = self.mgmt_client.firewall_rules.get(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ firewall_rule_name=self.name)
+ self.log("Response : {0}".format(response))
+ except CloudError as e:
+ self.log('Could not get facts for FirewallRules.')
+
+ if response is not None:
+ results.append(self.format_item(response))
+
+ return results
+
+ def list_by_server(self):
+ response = None
+ results = []
+ try:
+ response = self.mgmt_client.firewall_rules.list_by_server(resource_group_name=self.resource_group,
+ server_name=self.server_name)
+ self.log("Response : {0}".format(response))
+ except CloudError as e:
+ self.log('Could not get facts for FirewallRules.')
+
+ if response is not None:
+ for item in response:
+ results.append(self.format_item(item))
+
+ return results
+
+ def format_item(self, item):
+ d = item.as_dict()
+ d = {
+ 'resource_group': self.resource_group,
+ 'id': d['id'],
+ 'server_name': self.server_name,
+ 'name': d['name'],
+ 'start_ip_address': d['start_ip_address'],
+ 'end_ip_address': d['end_ip_address']
+ }
+ return d
+
+
+def main():
+ AzureRMMariaDbFirewallRuleInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbserver.py b/test/support/integration/plugins/modules/azure_rm_mariadbserver.py
new file mode 100644
index 00000000..30a29988
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_mariadbserver.py
@@ -0,0 +1,388 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
+# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_mariadbserver
+version_added: "2.8"
+short_description: Manage MariaDB Server instance
+description:
+ - Create, update and delete instance of MariaDB Server.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
+ required: True
+ name:
+ description:
+ - The name of the server.
+ required: True
+ sku:
+ description:
+ - The SKU (pricing tier) of the server.
+ suboptions:
+ name:
+ description:
+ - The name of the SKU, typically, tier + family + cores, for example C(B_Gen4_1), C(GP_Gen5_8).
+ tier:
+ description:
+ - The tier of the particular SKU, for example C(Basic).
+ choices:
+ - basic
+ - standard
+ capacity:
+ description:
+ - The scale up/out capacity, representing server's compute units.
+ type: int
+ size:
+ description:
+ - The size code, to be interpreted by resource as appropriate.
+ location:
+ description:
+ - Resource location. If not set, location from the resource group will be used as default.
+ storage_mb:
+ description:
+ - The maximum storage allowed for a server.
+ type: int
+ version:
+ description:
+ - Server version.
+ choices:
+ - 10.2
+ enforce_ssl:
+ description:
+ - Enable SSL enforcement.
+ type: bool
+ default: False
+ admin_username:
+ description:
+ - The administrator's login name of a server. Can only be specified when the server is being created (and is required for creation).
+ admin_password:
+ description:
+ - The password of the administrator login.
+ create_mode:
+ description:
+ - Create mode of SQL Server.
+ default: Default
+ state:
+ description:
+ - Assert the state of the MariaDB Server. Use C(present) to create or update a server and C(absent) to delete it.
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+ - azure
+ - azure_tags
+
+author:
+ - Zim Kalinowski (@zikalino)
+ - Matti Ranta (@techknowlogick)
+
+'''
+
+EXAMPLES = '''
+ - name: Create (or update) MariaDB Server
+ azure_rm_mariadbserver:
+ resource_group: myResourceGroup
+ name: testserver
+ sku:
+ name: B_Gen5_1
+ tier: Basic
+ location: eastus
+ storage_mb: 1024
+ enforce_ssl: True
+ version: 10.2
+ admin_username: cloudsa
+ admin_password: password
+'''
+
+RETURN = '''
+id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/mariadbsrv1b6dd89593
+version:
+ description:
+ - Server version. Possible values include C(10.2).
+ returned: always
+ type: str
+ sample: 10.2
+state:
+ description:
+ - A state of a server that is visible to user. Possible values include C(Ready), C(Dropping), C(Disabled).
+ returned: always
+ type: str
+ sample: Ready
+fully_qualified_domain_name:
+ description:
+ - The fully qualified domain name of a server.
+ returned: always
+ type: str
+ sample: mariadbsrv1b6dd89593.mariadb.database.azure.com
+'''
+
+import time
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
+ from msrestazure.azure_exceptions import CloudError
+ from msrest.polling import LROPoller
+ from msrest.serialization import Model
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class Actions:
+ NoAction, Create, Update, Delete = range(4)
+
+
+class AzureRMMariaDbServers(AzureRMModuleBase):
+ """Configuration class for an Azure RM MariaDB Server resource"""
+
+ def __init__(self):
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str',
+ required=True
+ ),
+ sku=dict(
+ type='dict'
+ ),
+ location=dict(
+ type='str'
+ ),
+ storage_mb=dict(
+ type='int'
+ ),
+ version=dict(
+ type='str',
+ choices=['10.2']
+ ),
+ enforce_ssl=dict(
+ type='bool',
+ default=False
+ ),
+ create_mode=dict(
+ type='str',
+ default='Default'
+ ),
+ admin_username=dict(
+ type='str'
+ ),
+ admin_password=dict(
+ type='str',
+ no_log=True
+ ),
+ state=dict(
+ type='str',
+ default='present',
+ choices=['present', 'absent']
+ )
+ )
+
+ self.resource_group = None
+ self.name = None
+ self.parameters = dict()
+ self.tags = None
+
+ self.results = dict(changed=False)
+ self.state = None
+ self.to_do = Actions.NoAction
+
+ super(AzureRMMariaDbServers, self).__init__(derived_arg_spec=self.module_arg_spec,
+ supports_check_mode=True,
+ supports_tags=True)
+
+ def exec_module(self, **kwargs):
+ """Main module execution method"""
+
+ for key in list(self.module_arg_spec.keys()) + ['tags']:
+ if hasattr(self, key):
+ setattr(self, key, kwargs[key])
+ elif kwargs[key] is not None:
+ if key == "sku":
+ ev = kwargs[key]
+ if 'tier' in ev:
+ if ev['tier'] == 'basic':
+ ev['tier'] = 'Basic'
+ elif ev['tier'] == 'standard':
+ ev['tier'] = 'Standard'
+ self.parameters["sku"] = ev
+ elif key == "location":
+ self.parameters["location"] = kwargs[key]
+ elif key == "storage_mb":
+ self.parameters.setdefault("properties", {}).setdefault("storage_profile", {})["storage_mb"] = kwargs[key]
+ elif key == "version":
+ self.parameters.setdefault("properties", {})["version"] = kwargs[key]
+ elif key == "enforce_ssl":
+ self.parameters.setdefault("properties", {})["ssl_enforcement"] = 'Enabled' if kwargs[key] else 'Disabled'
+ elif key == "create_mode":
+ self.parameters.setdefault("properties", {})["create_mode"] = kwargs[key]
+ elif key == "admin_username":
+ self.parameters.setdefault("properties", {})["administrator_login"] = kwargs[key]
+ elif key == "admin_password":
+ self.parameters.setdefault("properties", {})["administrator_login_password"] = kwargs[key]
+
+ old_response = None
+ response = None
+
+ resource_group = self.get_resource_group(self.resource_group)
+
+ if "location" not in self.parameters:
+ self.parameters["location"] = resource_group.location
+
+ old_response = self.get_mariadbserver()
+
+ if not old_response:
+ self.log("MariaDB Server instance doesn't exist")
+ if self.state == 'absent':
+ self.log("Old instance didn't exist")
+ else:
+ self.to_do = Actions.Create
+ else:
+ self.log("MariaDB Server instance already exists")
+ if self.state == 'absent':
+ self.to_do = Actions.Delete
+ elif self.state == 'present':
+ self.log("Need to check if MariaDB Server instance has to be deleted or may be updated")
+ update_tags, newtags = self.update_tags(old_response.get('tags', {}))
+ if update_tags:
+ self.tags = newtags
+ self.to_do = Actions.Update
+
+ if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
+ self.log("Need to Create / Update the MariaDB Server instance")
+
+ if self.check_mode:
+ self.results['changed'] = True
+ return self.results
+
+ response = self.create_update_mariadbserver()
+
+ if not old_response:
+ self.results['changed'] = True
+ else:
+ self.results['changed'] = old_response.__ne__(response)
+ self.log("Creation / Update done")
+ elif self.to_do == Actions.Delete:
+ self.log("MariaDB Server instance deleted")
+ self.results['changed'] = True
+
+ if self.check_mode:
+ return self.results
+
+ self.delete_mariadbserver()
+ # make sure instance is actually deleted, for some Azure resources, instance is hanging around
+ # for some time after deletion -- this should be really fixed in Azure
+ while self.get_mariadbserver():
+ time.sleep(20)
+ else:
+ self.log("MariaDB Server instance unchanged")
+ self.results['changed'] = False
+ response = old_response
+
+ if response:
+ self.results["id"] = response["id"]
+ self.results["version"] = response["version"]
+ self.results["state"] = response["user_visible_state"]
+ self.results["fully_qualified_domain_name"] = response["fully_qualified_domain_name"]
+
+ return self.results
+
+ def create_update_mariadbserver(self):
+ '''
+ Creates or updates MariaDB Server with the specified configuration.
+
+ :return: deserialized MariaDB Server instance state dictionary
+ '''
+ self.log("Creating / Updating the MariaDB Server instance {0}".format(self.name))
+
+ try:
+ self.parameters['tags'] = self.tags
+ if self.to_do == Actions.Create:
+ response = self.mariadb_client.servers.create(resource_group_name=self.resource_group,
+ server_name=self.name,
+ parameters=self.parameters)
+ else:
+ # structure of parameters for update must be changed
+ self.parameters.update(self.parameters.pop("properties", {}))
+ response = self.mariadb_client.servers.update(resource_group_name=self.resource_group,
+ server_name=self.name,
+ parameters=self.parameters)
+ if isinstance(response, LROPoller):
+ response = self.get_poller_result(response)
+
+ except CloudError as exc:
+ self.log('Error attempting to create the MariaDB Server instance.')
+ self.fail("Error creating the MariaDB Server instance: {0}".format(str(exc)))
+ return response.as_dict()
+
+ def delete_mariadbserver(self):
+ '''
+ Deletes specified MariaDB Server instance in the specified subscription and resource group.
+
+ :return: True
+ '''
+ self.log("Deleting the MariaDB Server instance {0}".format(self.name))
+ try:
+ response = self.mariadb_client.servers.delete(resource_group_name=self.resource_group,
+ server_name=self.name)
+ except CloudError as e:
+ self.log('Error attempting to delete the MariaDB Server instance.')
+ self.fail("Error deleting the MariaDB Server instance: {0}".format(str(e)))
+
+ return True
+
+ def get_mariadbserver(self):
+ '''
+ Gets the properties of the specified MariaDB Server.
+
+ :return: deserialized MariaDB Server instance state dictionary
+ '''
+ self.log("Checking if the MariaDB Server instance {0} is present".format(self.name))
+ found = False
+ try:
+ response = self.mariadb_client.servers.get(resource_group_name=self.resource_group,
+ server_name=self.name)
+ found = True
+ self.log("Response : {0}".format(response))
+ self.log("MariaDB Server instance : {0} found".format(response.name))
+ except CloudError as e:
+ self.log('Did not find the MariaDB Server instance.')
+ if found is True:
+ return response.as_dict()
+
+ return False
+
+
+def main():
+ """Main execution"""
+ AzureRMMariaDbServers()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbserver_info.py b/test/support/integration/plugins/modules/azure_rm_mariadbserver_info.py
new file mode 100644
index 00000000..464aa4d8
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_mariadbserver_info.py
@@ -0,0 +1,265 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
+# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_mariadbserver_info
+version_added: "2.9"
+short_description: Get Azure MariaDB Server facts
+description:
+ - Get facts of MariaDB Server.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
+ required: True
+ type: str
+ name:
+ description:
+ - The name of the server.
+ type: str
+ tags:
+ description:
+ - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
+ type: list
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Zim Kalinowski (@zikalino)
+ - Matti Ranta (@techknowlogick)
+
+'''
+
+EXAMPLES = '''
+ - name: Get instance of MariaDB Server
+ azure_rm_mariadbserver_info:
+ resource_group: myResourceGroup
+ name: server_name
+
+ - name: List instances of MariaDB Server
+ azure_rm_mariadbserver_info:
+ resource_group: myResourceGroup
+'''
+
+RETURN = '''
+servers:
+ description:
+ - A list of dictionaries containing facts for MariaDB servers.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/myabdud1223
+ resource_group:
+ description:
+ - Resource group name.
+ returned: always
+ type: str
+ sample: myResourceGroup
+ name:
+ description:
+ - Resource name.
+ returned: always
+ type: str
+ sample: myabdud1223
+ location:
+ description:
+ - The location the resource resides in.
+ returned: always
+ type: str
+ sample: eastus
+ sku:
+ description:
+ - The SKU of the server.
+ returned: always
+ type: complex
+ contains:
+ name:
+ description:
+ - The name of the SKU.
+ returned: always
+ type: str
+ sample: GP_Gen4_2
+ tier:
+ description:
+ - The tier of the particular SKU.
+ returned: always
+ type: str
+ sample: GeneralPurpose
+ capacity:
+ description:
+ - The scale capacity.
+ returned: always
+ type: int
+ sample: 2
+ storage_mb:
+ description:
+ - The maximum storage allowed for a server.
+ returned: always
+ type: int
+ sample: 128000
+ enforce_ssl:
+ description:
+ - Enable SSL enforcement.
+ returned: always
+ type: bool
+ sample: False
+ admin_username:
+ description:
+ - The administrator's login name of a server.
+ returned: always
+ type: str
+ sample: serveradmin
+ version:
+ description:
+ - Server version.
+ returned: always
+ type: str
+ sample: "9.6"
+ user_visible_state:
+ description:
+ - A state of a server that is visible to user.
+ returned: always
+ type: str
+ sample: Ready
+ fully_qualified_domain_name:
+ description:
+ - The fully qualified domain name of a server.
+ returned: always
+ type: str
+ sample: myabdud1223.mys.database.azure.com
+ tags:
+ description:
+ - Tags assigned to the resource. Dictionary of string:string pairs.
+ type: dict
+ sample: { tag1: abc }
+'''
+
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
+ from msrest.serialization import Model
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMMariaDbServerInfo(AzureRMModuleBase):
+ def __init__(self):
+ # define user inputs into argument
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str'
+ ),
+ tags=dict(
+ type='list'
+ )
+ )
+ # store the results of the module operation
+ self.results = dict(
+ changed=False
+ )
+ self.resource_group = None
+ self.name = None
+ self.tags = None
+ super(AzureRMMariaDbServerInfo, self).__init__(self.module_arg_spec, supports_tags=False)
+
+ def exec_module(self, **kwargs):
+ is_old_facts = self.module._name == 'azure_rm_mariadbserver_facts'
+ if is_old_facts:
+ self.module.deprecate("The 'azure_rm_mariadbserver_facts' module has been renamed to 'azure_rm_mariadbserver_info'",
+ version='2.13', collection_name='ansible.builtin')
+
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+
+ if (self.resource_group is not None and
+ self.name is not None):
+ self.results['servers'] = self.get()
+ elif (self.resource_group is not None):
+ self.results['servers'] = self.list_by_resource_group()
+ return self.results
+
+ def get(self):
+ response = None
+ results = []
+ try:
+ response = self.mariadb_client.servers.get(resource_group_name=self.resource_group,
+ server_name=self.name)
+ self.log("Response : {0}".format(response))
+ except CloudError as e:
+ self.log('Could not get facts for MariaDB Server.')
+
+ if response and self.has_tags(response.tags, self.tags):
+ results.append(self.format_item(response))
+
+ return results
+
+ def list_by_resource_group(self):
+ response = None
+ results = []
+ try:
+ response = self.mariadb_client.servers.list_by_resource_group(resource_group_name=self.resource_group)
+ self.log("Response : {0}".format(response))
+ except CloudError as e:
+ self.log('Could not get facts for MariaDB Servers.')
+
+ if response is not None:
+ for item in response:
+ if self.has_tags(item.tags, self.tags):
+ results.append(self.format_item(item))
+
+ return results
+
+ def format_item(self, item):
+ d = item.as_dict()
+ d = {
+ 'id': d['id'],
+ 'resource_group': self.resource_group,
+ 'name': d['name'],
+ 'sku': d['sku'],
+ 'location': d['location'],
+ 'storage_mb': d['storage_profile']['storage_mb'],
+ 'version': d['version'],
+ 'enforce_ssl': (d['ssl_enforcement'] == 'Enabled'),
+ 'admin_username': d['administrator_login'],
+ 'user_visible_state': d['user_visible_state'],
+ 'fully_qualified_domain_name': d['fully_qualified_domain_name'],
+ 'tags': d.get('tags')
+ }
+
+ return d
+
+
+def main():
+ AzureRMMariaDbServerInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_resource.py b/test/support/integration/plugins/modules/azure_rm_resource.py
new file mode 100644
index 00000000..6ea3e3bb
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_resource.py
@@ -0,0 +1,427 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_resource
+version_added: "2.6"
+short_description: Create any Azure resource
+description:
+ - Create, update or delete any Azure resource using Azure REST API.
+ - This module gives access to resources that are not supported via Ansible modules.
+ - Refer to U(https://docs.microsoft.com/en-us/rest/api/) regarding details related to specific resource REST API.
+
+options:
+ url:
+ description:
+ - Azure RM Resource URL.
+ api_version:
+ description:
+ - Specific API version to be used.
+ provider:
+ description:
+ - Provider type.
+ - Required if URL is not specified.
+ resource_group:
+ description:
+ - Resource group to be used.
+ - Required if URL is not specified.
+ resource_type:
+ description:
+ - Resource type.
+ - Required if URL is not specified.
+ resource_name:
+ description:
+ - Resource name.
+ - Required if URL Is not specified.
+ subresource:
+ description:
+ - List of subresources.
+ suboptions:
+ namespace:
+ description:
+ - Subresource namespace.
+ type:
+ description:
+ - Subresource type.
+ name:
+ description:
+ - Subresource name.
+ body:
+ description:
+ - The body of the HTTP request/response to the web service.
+ method:
+ description:
+ - The HTTP method of the request or response. It must be uppercase.
+ choices:
+ - GET
+ - PUT
+ - POST
+ - HEAD
+ - PATCH
+ - DELETE
+ - MERGE
+ default: "PUT"
+ status_code:
+ description:
+ - A valid, numeric, HTTP status code that signifies success of the request. Can also be comma separated list of status codes.
+ type: list
+ default: [ 200, 201, 202 ]
+ idempotency:
+ description:
+ - If enabled, idempotency check will be done by using I(method=GET) first and then comparing with I(body).
+ default: no
+ type: bool
+ polling_timeout:
+ description:
+ - If enabled, idempotency check will be done by using I(method=GET) first and then comparing with I(body).
+ default: 0
+ type: int
+ version_added: "2.8"
+ polling_interval:
+ description:
+ - If enabled, idempotency check will be done by using I(method=GET) first and then comparing with I(body).
+ default: 60
+ type: int
+ version_added: "2.8"
+ state:
+ description:
+ - Assert the state of the resource. Use C(present) to create or update resource or C(absent) to delete resource.
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Zim Kalinowski (@zikalino)
+
+'''
+
+EXAMPLES = '''
+ - name: Update scaleset info using azure_rm_resource
+ azure_rm_resource:
+ resource_group: myResourceGroup
+ provider: compute
+ resource_type: virtualmachinescalesets
+ resource_name: myVmss
+ api_version: "2017-12-01"
+ body: { body }
+'''
+
+RETURN = '''
+response:
+ description:
+ - Response specific to resource type.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - Resource ID.
+ type: str
+ returned: always
+ sample: "/subscriptions/xxxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Storage/storageAccounts/staccb57dc95183"
+ kind:
+ description:
+ - The kind of storage.
+ type: str
+ returned: always
+ sample: Storage
+ location:
+ description:
+ - The resource location, defaults to location of the resource group.
+ type: str
+ returned: always
+ sample: eastus
+ name:
+ description:
+ The storage account name.
+ type: str
+ returned: always
+ sample: staccb57dc95183
+ properties:
+ description:
+ - The storage account's related properties.
+ type: dict
+ returned: always
+ sample: {
+ "creationTime": "2019-06-13T06:34:33.0996676Z",
+ "encryption": {
+ "keySource": "Microsoft.Storage",
+ "services": {
+ "blob": {
+ "enabled": true,
+ "lastEnabledTime": "2019-06-13T06:34:33.1934074Z"
+ },
+ "file": {
+ "enabled": true,
+ "lastEnabledTime": "2019-06-13T06:34:33.1934074Z"
+ }
+ }
+ },
+ "networkAcls": {
+ "bypass": "AzureServices",
+ "defaultAction": "Allow",
+ "ipRules": [],
+ "virtualNetworkRules": []
+ },
+ "primaryEndpoints": {
+ "blob": "https://staccb57dc95183.blob.core.windows.net/",
+ "file": "https://staccb57dc95183.file.core.windows.net/",
+ "queue": "https://staccb57dc95183.queue.core.windows.net/",
+ "table": "https://staccb57dc95183.table.core.windows.net/"
+ },
+ "primaryLocation": "eastus",
+ "provisioningState": "Succeeded",
+ "secondaryLocation": "westus",
+ "statusOfPrimary": "available",
+ "statusOfSecondary": "available",
+ "supportsHttpsTrafficOnly": false
+ }
+ sku:
+ description:
+ - The storage account SKU.
+ type: dict
+ returned: always
+ sample: {
+ "name": "Standard_GRS",
+ "tier": "Standard"
+ }
+ tags:
+ description:
+ - Resource tags.
+ type: dict
+ returned: always
+ sample: { 'key1': 'value1' }
+ type:
+ description:
+ - The resource type.
+ type: str
+ returned: always
+ sample: "Microsoft.Storage/storageAccounts"
+
+'''
+
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+from ansible.module_utils.azure_rm_common_rest import GenericRestClient
+from ansible.module_utils.common.dict_transformations import dict_merge
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrest.service_client import ServiceClient
+ from msrestazure.tools import resource_id, is_valid_resource_id
+ import json
+
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMResource(AzureRMModuleBase):
+ def __init__(self):
+ # define user inputs into argument
+ self.module_arg_spec = dict(
+ url=dict(
+ type='str'
+ ),
+ provider=dict(
+ type='str',
+ ),
+ resource_group=dict(
+ type='str',
+ ),
+ resource_type=dict(
+ type='str',
+ ),
+ resource_name=dict(
+ type='str',
+ ),
+ subresource=dict(
+ type='list',
+ default=[]
+ ),
+ api_version=dict(
+ type='str'
+ ),
+ method=dict(
+ type='str',
+ default='PUT',
+ choices=["GET", "PUT", "POST", "HEAD", "PATCH", "DELETE", "MERGE"]
+ ),
+ body=dict(
+ type='raw'
+ ),
+ status_code=dict(
+ type='list',
+ default=[200, 201, 202]
+ ),
+ idempotency=dict(
+ type='bool',
+ default=False
+ ),
+ polling_timeout=dict(
+ type='int',
+ default=0
+ ),
+ polling_interval=dict(
+ type='int',
+ default=60
+ ),
+ state=dict(
+ type='str',
+ default='present',
+ choices=['present', 'absent']
+ )
+ )
+ # store the results of the module operation
+ self.results = dict(
+ changed=False,
+ response=None
+ )
+ self.mgmt_client = None
+ self.url = None
+ self.api_version = None
+ self.provider = None
+ self.resource_group = None
+ self.resource_type = None
+ self.resource_name = None
+ self.subresource_type = None
+ self.subresource_name = None
+ self.subresource = []
+ self.method = None
+ self.status_code = []
+ self.idempotency = False
+ self.polling_timeout = None
+ self.polling_interval = None
+ self.state = None
+ self.body = None
+ super(AzureRMResource, self).__init__(self.module_arg_spec, supports_tags=False)
+
+ def exec_module(self, **kwargs):
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+ self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+
+ if self.state == 'absent':
+ self.method = 'DELETE'
+ self.status_code.append(204)
+
+ if self.url is None:
+ orphan = None
+ rargs = dict()
+ rargs['subscription'] = self.subscription_id
+ rargs['resource_group'] = self.resource_group
+ if not (self.provider is None or self.provider.lower().startswith('.microsoft')):
+ rargs['namespace'] = "Microsoft." + self.provider
+ else:
+ rargs['namespace'] = self.provider
+
+ if self.resource_type is not None and self.resource_name is not None:
+ rargs['type'] = self.resource_type
+ rargs['name'] = self.resource_name
+ for i in range(len(self.subresource)):
+ resource_ns = self.subresource[i].get('namespace', None)
+ resource_type = self.subresource[i].get('type', None)
+ resource_name = self.subresource[i].get('name', None)
+ if resource_type is not None and resource_name is not None:
+ rargs['child_namespace_' + str(i + 1)] = resource_ns
+ rargs['child_type_' + str(i + 1)] = resource_type
+ rargs['child_name_' + str(i + 1)] = resource_name
+ else:
+ orphan = resource_type
+ else:
+ orphan = self.resource_type
+
+ self.url = resource_id(**rargs)
+
+ if orphan is not None:
+ self.url += '/' + orphan
+
+ # if api_version was not specified, get latest one
+ if not self.api_version:
+ try:
+ # extract provider and resource type
+ if "/providers/" in self.url:
+ provider = self.url.split("/providers/")[1].split("/")[0]
+ resourceType = self.url.split(provider + "/")[1].split("/")[0]
+ url = "/subscriptions/" + self.subscription_id + "/providers/" + provider
+ api_versions = json.loads(self.mgmt_client.query(url, "GET", {'api-version': '2015-01-01'}, None, None, [200], 0, 0).text)
+ for rt in api_versions['resourceTypes']:
+ if rt['resourceType'].lower() == resourceType.lower():
+ self.api_version = rt['apiVersions'][0]
+ break
+ else:
+ # if there's no provider in API version, assume Microsoft.Resources
+ self.api_version = '2018-05-01'
+ if not self.api_version:
+ self.fail("Couldn't find api version for {0}/{1}".format(provider, resourceType))
+ except Exception as exc:
+ self.fail("Failed to obtain API version: {0}".format(str(exc)))
+
+ query_parameters = {}
+ query_parameters['api-version'] = self.api_version
+
+ header_parameters = {}
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+
+ needs_update = True
+ response = None
+
+ if self.idempotency:
+ original = self.mgmt_client.query(self.url, "GET", query_parameters, None, None, [200, 404], 0, 0)
+
+ if original.status_code == 404:
+ if self.state == 'absent':
+ needs_update = False
+ else:
+ try:
+ response = json.loads(original.text)
+ needs_update = (dict_merge(response, self.body) != response)
+ except Exception:
+ pass
+
+ if needs_update:
+ response = self.mgmt_client.query(self.url,
+ self.method,
+ query_parameters,
+ header_parameters,
+ self.body,
+ self.status_code,
+ self.polling_timeout,
+ self.polling_interval)
+ if self.state == 'present':
+ try:
+ response = json.loads(response.text)
+ except Exception:
+ response = response.text
+ else:
+ response = None
+
+ self.results['response'] = response
+ self.results['changed'] = needs_update
+
+ return self.results
+
+
+def main():
+ AzureRMResource()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_resource_info.py b/test/support/integration/plugins/modules/azure_rm_resource_info.py
new file mode 100644
index 00000000..f797f662
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_resource_info.py
@@ -0,0 +1,432 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_resource_info
+version_added: "2.9"
+short_description: Generic facts of Azure resources
+description:
+ - Obtain facts of any resource using Azure REST API.
+ - This module gives access to resources that are not supported via Ansible modules.
+ - Refer to U(https://docs.microsoft.com/en-us/rest/api/) regarding details related to specific resource REST API.
+
+options:
+ url:
+ description:
+ - Azure RM Resource URL.
+ api_version:
+ description:
+ - Specific API version to be used.
+ provider:
+ description:
+ - Provider type, should be specified in no URL is given.
+ resource_group:
+ description:
+ - Resource group to be used.
+ - Required if URL is not specified.
+ resource_type:
+ description:
+ - Resource type.
+ resource_name:
+ description:
+ - Resource name.
+ subresource:
+ description:
+ - List of subresources.
+ suboptions:
+ namespace:
+ description:
+ - Subresource namespace.
+ type:
+ description:
+ - Subresource type.
+ name:
+ description:
+ - Subresource name.
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Zim Kalinowski (@zikalino)
+
+'''
+
+EXAMPLES = '''
+ - name: Get scaleset info
+ azure_rm_resource_info:
+ resource_group: myResourceGroup
+ provider: compute
+ resource_type: virtualmachinescalesets
+ resource_name: myVmss
+ api_version: "2017-12-01"
+
+ - name: Query all the resources in the resource group
+ azure_rm_resource_info:
+ resource_group: "{{ resource_group }}"
+ resource_type: resources
+'''
+
+RETURN = '''
+response:
+ description:
+ - Response specific to resource type.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - Id of the Azure resource.
+ type: str
+ returned: always
+ sample: "/subscriptions/xxxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Compute/virtualMachines/myVM"
+ location:
+ description:
+ - Resource location.
+ type: str
+ returned: always
+ sample: eastus
+ name:
+ description:
+ - Resource name.
+ type: str
+ returned: always
+ sample: myVM
+ properties:
+ description:
+ - Specifies the virtual machine's property.
+ type: complex
+ returned: always
+ contains:
+ diagnosticsProfile:
+ description:
+ - Specifies the boot diagnostic settings state.
+ type: complex
+ returned: always
+ contains:
+ bootDiagnostics:
+ description:
+ - A debugging feature, which to view Console Output and Screenshot to diagnose VM status.
+ type: dict
+ returned: always
+ sample: {
+ "enabled": true,
+ "storageUri": "https://vxisurgdiag.blob.core.windows.net/"
+ }
+ hardwareProfile:
+ description:
+ - Specifies the hardware settings for the virtual machine.
+ type: dict
+ returned: always
+ sample: {
+ "vmSize": "Standard_D2s_v3"
+ }
+ networkProfile:
+ description:
+ - Specifies the network interfaces of the virtual machine.
+ type: complex
+ returned: always
+ contains:
+ networkInterfaces:
+ description:
+ - Describes a network interface reference.
+ type: list
+ returned: always
+ sample:
+ - {
+ "id": "/subscriptions/xxxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Network/networkInterfaces/myvm441"
+ }
+ osProfile:
+ description:
+ - Specifies the operating system settings for the virtual machine.
+ type: complex
+ returned: always
+ contains:
+ adminUsername:
+ description:
+ - Specifies the name of the administrator account.
+ type: str
+ returned: always
+ sample: azureuser
+ allowExtensionOperations:
+ description:
+ - Specifies whether extension operations should be allowed on the virtual machine.
+ - This may only be set to False when no extensions are present on the virtual machine.
+ type: bool
+ returned: always
+ sample: true
+ computerName:
+ description:
+ - Specifies the host OS name of the virtual machine.
+ type: str
+ returned: always
+ sample: myVM
+ requireGuestProvisionSignale:
+ description:
+ - Specifies the host require guest provision signal or not.
+ type: bool
+ returned: always
+ sample: true
+ secrets:
+ description:
+ - Specifies set of certificates that should be installed onto the virtual machine.
+ type: list
+ returned: always
+ sample: []
+ linuxConfiguration:
+ description:
+ - Specifies the Linux operating system settings on the virtual machine.
+ type: dict
+ returned: when OS type is Linux
+ sample: {
+ "disablePasswordAuthentication": false,
+ "provisionVMAgent": true
+ }
+ provisioningState:
+ description:
+ - The provisioning state.
+ type: str
+ returned: always
+ sample: Succeeded
+ vmID:
+ description:
+ - Specifies the VM unique ID which is a 128-bits identifier that is encoded and stored in all Azure laaS VMs SMBIOS.
+ - It can be read using platform BIOS commands.
+ type: str
+ returned: always
+ sample: "eb86d9bb-6725-4787-a487-2e497d5b340c"
+ storageProfile:
+ description:
+ - Specifies the storage account type for the managed disk.
+ type: complex
+ returned: always
+ contains:
+ dataDisks:
+ description:
+ - Specifies the parameters that are used to add a data disk to virtual machine.
+ type: list
+ returned: always
+ sample:
+ - {
+ "caching": "None",
+ "createOption": "Attach",
+ "diskSizeGB": 1023,
+ "lun": 2,
+ "managedDisk": {
+ "id": "/subscriptions/xxxx....xxxx/resourceGroups/V-XISURG/providers/Microsoft.Compute/disks/testdisk2",
+ "storageAccountType": "StandardSSD_LRS"
+ },
+ "name": "testdisk2"
+ }
+ - {
+ "caching": "None",
+ "createOption": "Attach",
+ "diskSizeGB": 1023,
+ "lun": 1,
+ "managedDisk": {
+ "id": "/subscriptions/xxxx...xxxx/resourceGroups/V-XISURG/providers/Microsoft.Compute/disks/testdisk3",
+ "storageAccountType": "StandardSSD_LRS"
+ },
+ "name": "testdisk3"
+ }
+
+ imageReference:
+ description:
+ - Specifies information about the image to use.
+ type: dict
+ returned: always
+ sample: {
+ "offer": "UbuntuServer",
+ "publisher": "Canonical",
+ "sku": "18.04-LTS",
+ "version": "latest"
+ }
+ osDisk:
+ description:
+ - Specifies information about the operating system disk used by the virtual machine.
+ type: dict
+ returned: always
+ sample: {
+ "caching": "ReadWrite",
+ "createOption": "FromImage",
+ "diskSizeGB": 30,
+ "managedDisk": {
+ "id": "/subscriptions/xxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Compute/disks/myVM_disk1_xxx",
+ "storageAccountType": "Premium_LRS"
+ },
+ "name": "myVM_disk1_xxx",
+ "osType": "Linux"
+ }
+ type:
+ description:
+ - The type of identity used for the virtual machine.
+ type: str
+ returned: always
+ sample: "Microsoft.Compute/virtualMachines"
+'''
+
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+from ansible.module_utils.azure_rm_common_rest import GenericRestClient
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrest.service_client import ServiceClient
+ from msrestazure.tools import resource_id, is_valid_resource_id
+ import json
+
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMResourceInfo(AzureRMModuleBase):
+ def __init__(self):
+ # define user inputs into argument
+ self.module_arg_spec = dict(
+ url=dict(
+ type='str'
+ ),
+ provider=dict(
+ type='str'
+ ),
+ resource_group=dict(
+ type='str'
+ ),
+ resource_type=dict(
+ type='str'
+ ),
+ resource_name=dict(
+ type='str'
+ ),
+ subresource=dict(
+ type='list',
+ default=[]
+ ),
+ api_version=dict(
+ type='str'
+ )
+ )
+ # store the results of the module operation
+ self.results = dict(
+ response=[]
+ )
+ self.mgmt_client = None
+ self.url = None
+ self.api_version = None
+ self.provider = None
+ self.resource_group = None
+ self.resource_type = None
+ self.resource_name = None
+ self.subresource = []
+ super(AzureRMResourceInfo, self).__init__(self.module_arg_spec, supports_tags=False)
+
+ def exec_module(self, **kwargs):
+ is_old_facts = self.module._name == 'azure_rm_resource_facts'
+ if is_old_facts:
+ self.module.deprecate("The 'azure_rm_resource_facts' module has been renamed to 'azure_rm_resource_info'",
+ version='2.13', collection_name='ansible.builtin')
+
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+ self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+
+ if self.url is None:
+ orphan = None
+ rargs = dict()
+ rargs['subscription'] = self.subscription_id
+ rargs['resource_group'] = self.resource_group
+ if not (self.provider is None or self.provider.lower().startswith('.microsoft')):
+ rargs['namespace'] = "Microsoft." + self.provider
+ else:
+ rargs['namespace'] = self.provider
+
+ if self.resource_type is not None and self.resource_name is not None:
+ rargs['type'] = self.resource_type
+ rargs['name'] = self.resource_name
+ for i in range(len(self.subresource)):
+ resource_ns = self.subresource[i].get('namespace', None)
+ resource_type = self.subresource[i].get('type', None)
+ resource_name = self.subresource[i].get('name', None)
+ if resource_type is not None and resource_name is not None:
+ rargs['child_namespace_' + str(i + 1)] = resource_ns
+ rargs['child_type_' + str(i + 1)] = resource_type
+ rargs['child_name_' + str(i + 1)] = resource_name
+ else:
+ orphan = resource_type
+ else:
+ orphan = self.resource_type
+
+ self.url = resource_id(**rargs)
+
+ if orphan is not None:
+ self.url += '/' + orphan
+
+ # if api_version was not specified, get latest one
+ if not self.api_version:
+ try:
+ # extract provider and resource type
+ if "/providers/" in self.url:
+ provider = self.url.split("/providers/")[1].split("/")[0]
+ resourceType = self.url.split(provider + "/")[1].split("/")[0]
+ url = "/subscriptions/" + self.subscription_id + "/providers/" + provider
+ api_versions = json.loads(self.mgmt_client.query(url, "GET", {'api-version': '2015-01-01'}, None, None, [200], 0, 0).text)
+ for rt in api_versions['resourceTypes']:
+ if rt['resourceType'].lower() == resourceType.lower():
+ self.api_version = rt['apiVersions'][0]
+ break
+ else:
+ # if there's no provider in API version, assume Microsoft.Resources
+ self.api_version = '2018-05-01'
+ if not self.api_version:
+ self.fail("Couldn't find api version for {0}/{1}".format(provider, resourceType))
+ except Exception as exc:
+ self.fail("Failed to obtain API version: {0}".format(str(exc)))
+
+ self.results['url'] = self.url
+
+ query_parameters = {}
+ query_parameters['api-version'] = self.api_version
+
+ header_parameters = {}
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+ skiptoken = None
+
+ while True:
+ if skiptoken:
+ query_parameters['skiptoken'] = skiptoken
+ response = self.mgmt_client.query(self.url, "GET", query_parameters, header_parameters, None, [200, 404], 0, 0)
+ try:
+ response = json.loads(response.text)
+ if isinstance(response, dict):
+ if response.get('value'):
+ self.results['response'] = self.results['response'] + response['value']
+ skiptoken = response.get('nextLink')
+ else:
+ self.results['response'] = self.results['response'] + [response]
+ except Exception as e:
+ self.fail('Failed to parse response: ' + str(e))
+ if not skiptoken:
+ break
+ return self.results
+
+
+def main():
+ AzureRMResourceInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_storageaccount.py b/test/support/integration/plugins/modules/azure_rm_storageaccount.py
new file mode 100644
index 00000000..d4158bbd
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_storageaccount.py
@@ -0,0 +1,684 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
+# Chris Houseknecht, <house@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_storageaccount
+version_added: "2.1"
+short_description: Manage Azure storage accounts
+description:
+ - Create, update or delete a storage account.
+options:
+ resource_group:
+ description:
+ - Name of the resource group to use.
+ required: true
+ aliases:
+ - resource_group_name
+ name:
+ description:
+ - Name of the storage account to update or create.
+ state:
+ description:
+ - State of the storage account. Use C(present) to create or update a storage account and use C(absent) to delete an account.
+ default: present
+ choices:
+ - absent
+ - present
+ location:
+ description:
+ - Valid Azure location. Defaults to location of the resource group.
+ account_type:
+ description:
+ - Type of storage account. Required when creating a storage account.
+ - C(Standard_ZRS) and C(Premium_LRS) accounts cannot be changed to other account types.
+ - Other account types cannot be changed to C(Standard_ZRS) or C(Premium_LRS).
+ choices:
+ - Premium_LRS
+ - Standard_GRS
+ - Standard_LRS
+ - StandardSSD_LRS
+ - Standard_RAGRS
+ - Standard_ZRS
+ - Premium_ZRS
+ aliases:
+ - type
+ custom_domain:
+ description:
+ - User domain assigned to the storage account.
+ - Must be a dictionary with I(name) and I(use_sub_domain) keys where I(name) is the CNAME source.
+ - Only one custom domain is supported per storage account at this time.
+ - To clear the existing custom domain, use an empty string for the custom domain name property.
+ - Can be added to an existing storage account. Will be ignored during storage account creation.
+ aliases:
+ - custom_dns_domain_suffix
+ kind:
+ description:
+ - The kind of storage.
+ default: 'Storage'
+ choices:
+ - Storage
+ - StorageV2
+ - BlobStorage
+ version_added: "2.2"
+ access_tier:
+ description:
+ - The access tier for this storage account. Required when I(kind=BlobStorage).
+ choices:
+ - Hot
+ - Cool
+ version_added: "2.4"
+ force_delete_nonempty:
+ description:
+ - Attempt deletion if resource already exists and cannot be updated.
+ type: bool
+ aliases:
+ - force
+ https_only:
+ description:
+ - Allows https traffic only to storage service when set to C(true).
+ type: bool
+ version_added: "2.8"
+ blob_cors:
+ description:
+ - Specifies CORS rules for the Blob service.
+ - You can include up to five CorsRule elements in the request.
+ - If no blob_cors elements are included in the argument list, nothing about CORS will be changed.
+ - If you want to delete all CORS rules and disable CORS for the Blob service, explicitly set I(blob_cors=[]).
+ type: list
+ version_added: "2.8"
+ suboptions:
+ allowed_origins:
+ description:
+ - A list of origin domains that will be allowed via CORS, or "*" to allow all domains.
+ type: list
+ required: true
+ allowed_methods:
+ description:
+ - A list of HTTP methods that are allowed to be executed by the origin.
+ type: list
+ required: true
+ max_age_in_seconds:
+ description:
+ - The number of seconds that the client/browser should cache a preflight response.
+ type: int
+ required: true
+ exposed_headers:
+ description:
+ - A list of response headers to expose to CORS clients.
+ type: list
+ required: true
+ allowed_headers:
+ description:
+ - A list of headers allowed to be part of the cross-origin request.
+ type: list
+ required: true
+
+extends_documentation_fragment:
+ - azure
+ - azure_tags
+
+author:
+ - Chris Houseknecht (@chouseknecht)
+ - Matt Davis (@nitzmahone)
+'''
+
+EXAMPLES = '''
+ - name: remove account, if it exists
+ azure_rm_storageaccount:
+ resource_group: myResourceGroup
+ name: clh0002
+ state: absent
+
+ - name: create an account
+ azure_rm_storageaccount:
+ resource_group: myResourceGroup
+ name: clh0002
+ type: Standard_RAGRS
+ tags:
+ testing: testing
+ delete: on-exit
+
+ - name: create an account with blob CORS
+ azure_rm_storageaccount:
+ resource_group: myResourceGroup
+ name: clh002
+ type: Standard_RAGRS
+ blob_cors:
+ - allowed_origins:
+ - http://www.example.com/
+ allowed_methods:
+ - GET
+ - POST
+ allowed_headers:
+ - x-ms-meta-data*
+ - x-ms-meta-target*
+ - x-ms-meta-abc
+ exposed_headers:
+ - x-ms-meta-*
+ max_age_in_seconds: 200
+'''
+
+
+RETURN = '''
+state:
+ description:
+ - Current state of the storage account.
+ returned: always
+ type: complex
+ contains:
+ account_type:
+ description:
+ - Type of storage account.
+ returned: always
+ type: str
+ sample: Standard_RAGRS
+ custom_domain:
+ description:
+ - User domain assigned to the storage account.
+ returned: always
+ type: complex
+ contains:
+ name:
+ description:
+ - CNAME source.
+ returned: always
+ type: str
+ sample: testaccount
+ use_sub_domain:
+ description:
+ - Whether to use sub domain.
+ returned: always
+ type: bool
+ sample: true
+ id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Storage/storageAccounts/clh0003"
+ location:
+ description:
+ - Valid Azure location. Defaults to location of the resource group.
+ returned: always
+ type: str
+ sample: eastus2
+ name:
+ description:
+ - Name of the storage account to update or create.
+ returned: always
+ type: str
+ sample: clh0003
+ primary_endpoints:
+ description:
+ - The URLs to retrieve the public I(blob), I(queue), or I(table) object from the primary location.
+ returned: always
+ type: dict
+ sample: {
+ "blob": "https://clh0003.blob.core.windows.net/",
+ "queue": "https://clh0003.queue.core.windows.net/",
+ "table": "https://clh0003.table.core.windows.net/"
+ }
+ primary_location:
+ description:
+ - The location of the primary data center for the storage account.
+ returned: always
+ type: str
+ sample: eastus2
+ provisioning_state:
+ description:
+ - The status of the storage account.
+ - Possible values include C(Creating), C(ResolvingDNS), C(Succeeded).
+ returned: always
+ type: str
+ sample: Succeeded
+ resource_group:
+ description:
+ - The resource group's name.
+ returned: always
+ type: str
+ sample: Testing
+ secondary_endpoints:
+ description:
+ - The URLs to retrieve the public I(blob), I(queue), or I(table) object from the secondary location.
+ returned: always
+ type: dict
+ sample: {
+ "blob": "https://clh0003-secondary.blob.core.windows.net/",
+ "queue": "https://clh0003-secondary.queue.core.windows.net/",
+ "table": "https://clh0003-secondary.table.core.windows.net/"
+ }
+ secondary_location:
+ description:
+ - The location of the geo-replicated secondary for the storage account.
+ returned: always
+ type: str
+ sample: centralus
+ status_of_primary:
+ description:
+ - The status of the primary location of the storage account; either C(available) or C(unavailable).
+ returned: always
+ type: str
+ sample: available
+ status_of_secondary:
+ description:
+ - The status of the secondary location of the storage account; either C(available) or C(unavailable).
+ returned: always
+ type: str
+ sample: available
+ tags:
+ description:
+ - Resource tags.
+ returned: always
+ type: dict
+ sample: { 'tags1': 'value1' }
+ type:
+ description:
+ - The storage account type.
+ returned: always
+ type: str
+ sample: "Microsoft.Storage/storageAccounts"
+'''
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from azure.storage.cloudstorageaccount import CloudStorageAccount
+ from azure.common import AzureMissingResourceHttpError
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+import copy
+from ansible.module_utils.azure_rm_common import AZURE_SUCCESS_STATE, AzureRMModuleBase
+from ansible.module_utils._text import to_native
+
+cors_rule_spec = dict(
+ allowed_origins=dict(type='list', elements='str', required=True),
+ allowed_methods=dict(type='list', elements='str', required=True),
+ max_age_in_seconds=dict(type='int', required=True),
+ exposed_headers=dict(type='list', elements='str', required=True),
+ allowed_headers=dict(type='list', elements='str', required=True),
+)
+
+
+def compare_cors(cors1, cors2):
+ if len(cors1) != len(cors2):
+ return False
+ copy2 = copy.copy(cors2)
+ for rule1 in cors1:
+ matched = False
+ for rule2 in copy2:
+ if (rule1['max_age_in_seconds'] == rule2['max_age_in_seconds']
+ and set(rule1['allowed_methods']) == set(rule2['allowed_methods'])
+ and set(rule1['allowed_origins']) == set(rule2['allowed_origins'])
+ and set(rule1['allowed_headers']) == set(rule2['allowed_headers'])
+ and set(rule1['exposed_headers']) == set(rule2['exposed_headers'])):
+ matched = True
+ copy2.remove(rule2)
+ if not matched:
+ return False
+ return True
+
+
+class AzureRMStorageAccount(AzureRMModuleBase):
+
+ def __init__(self):
+
+ self.module_arg_spec = dict(
+ account_type=dict(type='str',
+ choices=['Premium_LRS', 'Standard_GRS', 'Standard_LRS', 'StandardSSD_LRS', 'Standard_RAGRS', 'Standard_ZRS', 'Premium_ZRS'],
+ aliases=['type']),
+ custom_domain=dict(type='dict', aliases=['custom_dns_domain_suffix']),
+ location=dict(type='str'),
+ name=dict(type='str', required=True),
+ resource_group=dict(required=True, type='str', aliases=['resource_group_name']),
+ state=dict(default='present', choices=['present', 'absent']),
+ force_delete_nonempty=dict(type='bool', default=False, aliases=['force']),
+ tags=dict(type='dict'),
+ kind=dict(type='str', default='Storage', choices=['Storage', 'StorageV2', 'BlobStorage']),
+ access_tier=dict(type='str', choices=['Hot', 'Cool']),
+ https_only=dict(type='bool', default=False),
+ blob_cors=dict(type='list', options=cors_rule_spec, elements='dict')
+ )
+
+ self.results = dict(
+ changed=False,
+ state=dict()
+ )
+
+ self.account_dict = None
+ self.resource_group = None
+ self.name = None
+ self.state = None
+ self.location = None
+ self.account_type = None
+ self.custom_domain = None
+ self.tags = None
+ self.force_delete_nonempty = None
+ self.kind = None
+ self.access_tier = None
+ self.https_only = None
+ self.blob_cors = None
+
+ super(AzureRMStorageAccount, self).__init__(self.module_arg_spec,
+ supports_check_mode=True)
+
+ def exec_module(self, **kwargs):
+
+ for key in list(self.module_arg_spec.keys()) + ['tags']:
+ setattr(self, key, kwargs[key])
+
+ resource_group = self.get_resource_group(self.resource_group)
+ if not self.location:
+ # Set default location
+ self.location = resource_group.location
+
+ if len(self.name) < 3 or len(self.name) > 24:
+ self.fail("Parameter error: name length must be between 3 and 24 characters.")
+
+ if self.custom_domain:
+ if self.custom_domain.get('name', None) is None:
+ self.fail("Parameter error: expecting custom_domain to have a name attribute of type string.")
+ if self.custom_domain.get('use_sub_domain', None) is None:
+ self.fail("Parameter error: expecting custom_domain to have a use_sub_domain "
+ "attribute of type boolean.")
+
+ self.account_dict = self.get_account()
+
+ if self.state == 'present' and self.account_dict and \
+ self.account_dict['provisioning_state'] != AZURE_SUCCESS_STATE:
+ self.fail("Error: storage account {0} has not completed provisioning. State is {1}. Expecting state "
+ "to be {2}.".format(self.name, self.account_dict['provisioning_state'], AZURE_SUCCESS_STATE))
+
+ if self.account_dict is not None:
+ self.results['state'] = self.account_dict
+ else:
+ self.results['state'] = dict()
+
+ if self.state == 'present':
+ if not self.account_dict:
+ self.results['state'] = self.create_account()
+ else:
+ self.update_account()
+ elif self.state == 'absent' and self.account_dict:
+ self.delete_account()
+ self.results['state'] = dict(Status='Deleted')
+
+ return self.results
+
+ def check_name_availability(self):
+ self.log('Checking name availability for {0}'.format(self.name))
+ try:
+ response = self.storage_client.storage_accounts.check_name_availability(self.name)
+ except CloudError as e:
+ self.log('Error attempting to validate name.')
+ self.fail("Error checking name availability: {0}".format(str(e)))
+ if not response.name_available:
+ self.log('Error name not available.')
+ self.fail("{0} - {1}".format(response.message, response.reason))
+
+ def get_account(self):
+ self.log('Get properties for account {0}'.format(self.name))
+ account_obj = None
+ blob_service_props = None
+ account_dict = None
+
+ try:
+ account_obj = self.storage_client.storage_accounts.get_properties(self.resource_group, self.name)
+ blob_service_props = self.storage_client.blob_services.get_service_properties(self.resource_group, self.name)
+ except CloudError:
+ pass
+
+ if account_obj:
+ account_dict = self.account_obj_to_dict(account_obj, blob_service_props)
+
+ return account_dict
+
+ def account_obj_to_dict(self, account_obj, blob_service_props=None):
+ account_dict = dict(
+ id=account_obj.id,
+ name=account_obj.name,
+ location=account_obj.location,
+ resource_group=self.resource_group,
+ type=account_obj.type,
+ access_tier=(account_obj.access_tier.value
+ if account_obj.access_tier is not None else None),
+ sku_tier=account_obj.sku.tier.value,
+ sku_name=account_obj.sku.name.value,
+ provisioning_state=account_obj.provisioning_state.value,
+ secondary_location=account_obj.secondary_location,
+ status_of_primary=(account_obj.status_of_primary.value
+ if account_obj.status_of_primary is not None else None),
+ status_of_secondary=(account_obj.status_of_secondary.value
+ if account_obj.status_of_secondary is not None else None),
+ primary_location=account_obj.primary_location,
+ https_only=account_obj.enable_https_traffic_only
+ )
+ account_dict['custom_domain'] = None
+ if account_obj.custom_domain:
+ account_dict['custom_domain'] = dict(
+ name=account_obj.custom_domain.name,
+ use_sub_domain=account_obj.custom_domain.use_sub_domain
+ )
+
+ account_dict['primary_endpoints'] = None
+ if account_obj.primary_endpoints:
+ account_dict['primary_endpoints'] = dict(
+ blob=account_obj.primary_endpoints.blob,
+ queue=account_obj.primary_endpoints.queue,
+ table=account_obj.primary_endpoints.table
+ )
+ account_dict['secondary_endpoints'] = None
+ if account_obj.secondary_endpoints:
+ account_dict['secondary_endpoints'] = dict(
+ blob=account_obj.secondary_endpoints.blob,
+ queue=account_obj.secondary_endpoints.queue,
+ table=account_obj.secondary_endpoints.table
+ )
+ account_dict['tags'] = None
+ if account_obj.tags:
+ account_dict['tags'] = account_obj.tags
+ if blob_service_props and blob_service_props.cors and blob_service_props.cors.cors_rules:
+ account_dict['blob_cors'] = [dict(
+ allowed_origins=[to_native(y) for y in x.allowed_origins],
+ allowed_methods=[to_native(y) for y in x.allowed_methods],
+ max_age_in_seconds=x.max_age_in_seconds,
+ exposed_headers=[to_native(y) for y in x.exposed_headers],
+ allowed_headers=[to_native(y) for y in x.allowed_headers]
+ ) for x in blob_service_props.cors.cors_rules]
+ return account_dict
+
+ def update_account(self):
+ self.log('Update storage account {0}'.format(self.name))
+ if bool(self.https_only) != bool(self.account_dict.get('https_only')):
+ self.results['changed'] = True
+ self.account_dict['https_only'] = self.https_only
+ if not self.check_mode:
+ try:
+ parameters = self.storage_models.StorageAccountUpdateParameters(enable_https_traffic_only=self.https_only)
+ self.storage_client.storage_accounts.update(self.resource_group,
+ self.name,
+ parameters)
+ except Exception as exc:
+ self.fail("Failed to update account type: {0}".format(str(exc)))
+
+ if self.account_type:
+ if self.account_type != self.account_dict['sku_name']:
+ # change the account type
+ SkuName = self.storage_models.SkuName
+ if self.account_dict['sku_name'] in [SkuName.premium_lrs, SkuName.standard_zrs]:
+ self.fail("Storage accounts of type {0} and {1} cannot be changed.".format(
+ SkuName.premium_lrs, SkuName.standard_zrs))
+ if self.account_type in [SkuName.premium_lrs, SkuName.standard_zrs]:
+ self.fail("Storage account of type {0} cannot be changed to a type of {1} or {2}.".format(
+ self.account_dict['sku_name'], SkuName.premium_lrs, SkuName.standard_zrs))
+
+ self.results['changed'] = True
+ self.account_dict['sku_name'] = self.account_type
+
+ if self.results['changed'] and not self.check_mode:
+ # Perform the update. The API only allows changing one attribute per call.
+ try:
+ self.log("sku_name: %s" % self.account_dict['sku_name'])
+ self.log("sku_tier: %s" % self.account_dict['sku_tier'])
+ sku = self.storage_models.Sku(name=SkuName(self.account_dict['sku_name']))
+ sku.tier = self.storage_models.SkuTier(self.account_dict['sku_tier'])
+ parameters = self.storage_models.StorageAccountUpdateParameters(sku=sku)
+ self.storage_client.storage_accounts.update(self.resource_group,
+ self.name,
+ parameters)
+ except Exception as exc:
+ self.fail("Failed to update account type: {0}".format(str(exc)))
+
+ if self.custom_domain:
+ if not self.account_dict['custom_domain'] or self.account_dict['custom_domain'] != self.custom_domain:
+ self.results['changed'] = True
+ self.account_dict['custom_domain'] = self.custom_domain
+
+ if self.results['changed'] and not self.check_mode:
+ new_domain = self.storage_models.CustomDomain(name=self.custom_domain['name'],
+ use_sub_domain=self.custom_domain['use_sub_domain'])
+ parameters = self.storage_models.StorageAccountUpdateParameters(custom_domain=new_domain)
+ try:
+ self.storage_client.storage_accounts.update(self.resource_group, self.name, parameters)
+ except Exception as exc:
+ self.fail("Failed to update custom domain: {0}".format(str(exc)))
+
+ if self.access_tier:
+ if not self.account_dict['access_tier'] or self.account_dict['access_tier'] != self.access_tier:
+ self.results['changed'] = True
+ self.account_dict['access_tier'] = self.access_tier
+
+ if self.results['changed'] and not self.check_mode:
+ parameters = self.storage_models.StorageAccountUpdateParameters(access_tier=self.access_tier)
+ try:
+ self.storage_client.storage_accounts.update(self.resource_group, self.name, parameters)
+ except Exception as exc:
+ self.fail("Failed to update access tier: {0}".format(str(exc)))
+
+ update_tags, self.account_dict['tags'] = self.update_tags(self.account_dict['tags'])
+ if update_tags:
+ self.results['changed'] = True
+ if not self.check_mode:
+ parameters = self.storage_models.StorageAccountUpdateParameters(tags=self.account_dict['tags'])
+ try:
+ self.storage_client.storage_accounts.update(self.resource_group, self.name, parameters)
+ except Exception as exc:
+ self.fail("Failed to update tags: {0}".format(str(exc)))
+
+ if self.blob_cors and not compare_cors(self.account_dict.get('blob_cors', []), self.blob_cors):
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.set_blob_cors()
+
+ def create_account(self):
+ self.log("Creating account {0}".format(self.name))
+
+ if not self.location:
+ self.fail('Parameter error: location required when creating a storage account.')
+
+ if not self.account_type:
+ self.fail('Parameter error: account_type required when creating a storage account.')
+
+ if not self.access_tier and self.kind == 'BlobStorage':
+ self.fail('Parameter error: access_tier required when creating a storage account of type BlobStorage.')
+
+ self.check_name_availability()
+ self.results['changed'] = True
+
+ if self.check_mode:
+ account_dict = dict(
+ location=self.location,
+ account_type=self.account_type,
+ name=self.name,
+ resource_group=self.resource_group,
+ enable_https_traffic_only=self.https_only,
+ tags=dict()
+ )
+ if self.tags:
+ account_dict['tags'] = self.tags
+ if self.blob_cors:
+ account_dict['blob_cors'] = self.blob_cors
+ return account_dict
+ sku = self.storage_models.Sku(name=self.storage_models.SkuName(self.account_type))
+ sku.tier = self.storage_models.SkuTier.standard if 'Standard' in self.account_type else \
+ self.storage_models.SkuTier.premium
+ parameters = self.storage_models.StorageAccountCreateParameters(sku=sku,
+ kind=self.kind,
+ location=self.location,
+ tags=self.tags,
+ access_tier=self.access_tier)
+ self.log(str(parameters))
+ try:
+ poller = self.storage_client.storage_accounts.create(self.resource_group, self.name, parameters)
+ self.get_poller_result(poller)
+ except CloudError as e:
+ self.log('Error creating storage account.')
+ self.fail("Failed to create account: {0}".format(str(e)))
+ if self.blob_cors:
+ self.set_blob_cors()
+ # the poller doesn't actually return anything
+ return self.get_account()
+
+ def delete_account(self):
+ if self.account_dict['provisioning_state'] == self.storage_models.ProvisioningState.succeeded.value and \
+ not self.force_delete_nonempty and self.account_has_blob_containers():
+ self.fail("Account contains blob containers. Is it in use? Use the force_delete_nonempty option to attempt deletion.")
+
+ self.log('Delete storage account {0}'.format(self.name))
+ self.results['changed'] = True
+ if not self.check_mode:
+ try:
+ status = self.storage_client.storage_accounts.delete(self.resource_group, self.name)
+ self.log("delete status: ")
+ self.log(str(status))
+ except CloudError as e:
+ self.fail("Failed to delete the account: {0}".format(str(e)))
+ return True
+
+ def account_has_blob_containers(self):
+ '''
+ If there are blob containers, then there are likely VMs depending on this account and it should
+ not be deleted.
+ '''
+ self.log('Checking for existing blob containers')
+ blob_service = self.get_blob_client(self.resource_group, self.name)
+ try:
+ response = blob_service.list_containers()
+ except AzureMissingResourceHttpError:
+ # No blob storage available?
+ return False
+
+ if len(response.items) > 0:
+ return True
+ return False
+
+ def set_blob_cors(self):
+ try:
+ cors_rules = self.storage_models.CorsRules(cors_rules=[self.storage_models.CorsRule(**x) for x in self.blob_cors])
+ self.storage_client.blob_services.set_service_properties(self.resource_group,
+ self.name,
+ self.storage_models.BlobServiceProperties(cors=cors_rules))
+ except Exception as exc:
+ self.fail("Failed to set CORS rules: {0}".format(str(exc)))
+
+
+def main():
+ AzureRMStorageAccount()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_webapp.py b/test/support/integration/plugins/modules/azure_rm_webapp.py
new file mode 100644
index 00000000..4f185f45
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_webapp.py
@@ -0,0 +1,1070 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2018 Yunge Zhu, <yungez@microsoft.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_webapp
+version_added: "2.7"
+short_description: Manage Web App instances
+description:
+ - Create, update and delete instance of Web App.
+
+options:
+ resource_group:
+ description:
+ - Name of the resource group to which the resource belongs.
+ required: True
+ name:
+ description:
+ - Unique name of the app to create or update. To create or update a deployment slot, use the {slot} parameter.
+ required: True
+
+ location:
+ description:
+ - Resource location. If not set, location from the resource group will be used as default.
+
+ plan:
+ description:
+ - App service plan. Required for creation.
+ - Can be name of existing app service plan in same resource group as web app.
+ - Can be the resource ID of an existing app service plan. For example
+ /subscriptions/<subs_id>/resourceGroups/<resource_group>/providers/Microsoft.Web/serverFarms/<plan_name>.
+ - Can be a dict containing five parameters, defined below.
+ - C(name), name of app service plan.
+ - C(resource_group), resource group of the app service plan.
+ - C(sku), SKU of app service plan, allowed values listed on U(https://azure.microsoft.com/en-us/pricing/details/app-service/linux/).
+ - C(is_linux), whether or not the app service plan is Linux. defaults to C(False).
+ - C(number_of_workers), number of workers for app service plan.
+
+ frameworks:
+ description:
+ - Set of run time framework settings. Each setting is a dictionary.
+ - See U(https://docs.microsoft.com/en-us/azure/app-service/app-service-web-overview) for more info.
+ suboptions:
+ name:
+ description:
+ - Name of the framework.
+ - Supported framework list for Windows web app and Linux web app is different.
+ - Windows web apps support C(java), C(net_framework), C(php), C(python), and C(node) from June 2018.
+ - Windows web apps support multiple framework at the same time.
+ - Linux web apps support C(java), C(ruby), C(php), C(dotnetcore), and C(node) from June 2018.
+ - Linux web apps support only one framework.
+ - Java framework is mutually exclusive with others.
+ choices:
+ - java
+ - net_framework
+ - php
+ - python
+ - ruby
+ - dotnetcore
+ - node
+ version:
+ description:
+ - Version of the framework. For Linux web app supported value, see U(https://aka.ms/linux-stacks) for more info.
+ - C(net_framework) supported value sample, C(v4.0) for .NET 4.6 and C(v3.0) for .NET 3.5.
+ - C(php) supported value sample, C(5.5), C(5.6), C(7.0).
+ - C(python) supported value sample, C(5.5), C(5.6), C(7.0).
+ - C(node) supported value sample, C(6.6), C(6.9).
+ - C(dotnetcore) supported value sample, C(1.0), C(1.1), C(1.2).
+ - C(ruby) supported value sample, C(2.3).
+ - C(java) supported value sample, C(1.9) for Windows web app. C(1.8) for Linux web app.
+ settings:
+ description:
+ - List of settings of the framework.
+ suboptions:
+ java_container:
+ description:
+ - Name of Java container.
+ - Supported only when I(frameworks=java). Sample values C(Tomcat), C(Jetty).
+ java_container_version:
+ description:
+ - Version of Java container.
+ - Supported only when I(frameworks=java).
+ - Sample values for C(Tomcat), C(8.0), C(8.5), C(9.0). For C(Jetty,), C(9.1), C(9.3).
+
+ container_settings:
+ description:
+ - Web app container settings.
+ suboptions:
+ name:
+ description:
+ - Name of container, for example C(imagename:tag).
+ registry_server_url:
+ description:
+ - Container registry server URL, for example C(mydockerregistry.io).
+ registry_server_user:
+ description:
+ - The container registry server user name.
+ registry_server_password:
+ description:
+ - The container registry server password.
+
+ scm_type:
+ description:
+ - Repository type of deployment source, for example C(LocalGit), C(GitHub).
+ - List of supported values maintained at U(https://docs.microsoft.com/en-us/rest/api/appservice/webapps/createorupdate#scmtype).
+
+ deployment_source:
+ description:
+ - Deployment source for git.
+ suboptions:
+ url:
+ description:
+ - Repository url of deployment source.
+
+ branch:
+ description:
+ - The branch name of the repository.
+ startup_file:
+ description:
+ - The web's startup file.
+ - Used only for Linux web apps.
+
+ client_affinity_enabled:
+ description:
+ - Whether or not to send session affinity cookies, which route client requests in the same session to the same instance.
+ type: bool
+ default: True
+
+ https_only:
+ description:
+ - Configures web site to accept only https requests.
+ type: bool
+
+ dns_registration:
+ description:
+ - Whether or not the web app hostname is registered with DNS on creation. Set to C(false) to register.
+ type: bool
+
+ skip_custom_domain_verification:
+ description:
+ - Whether or not to skip verification of custom (non *.azurewebsites.net) domains associated with web app. Set to C(true) to skip.
+ type: bool
+
+ ttl_in_seconds:
+ description:
+ - Time to live in seconds for web app default domain name.
+
+ app_settings:
+ description:
+ - Configure web app application settings. Suboptions are in key value pair format.
+
+ purge_app_settings:
+ description:
+ - Purge any existing application settings. Replace web app application settings with app_settings.
+ type: bool
+
+ app_state:
+ description:
+ - Start/Stop/Restart the web app.
+ type: str
+ choices:
+ - started
+ - stopped
+ - restarted
+ default: started
+
+ state:
+ description:
+ - State of the Web App.
+ - Use C(present) to create or update a Web App and C(absent) to delete it.
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+ - azure
+ - azure_tags
+
+author:
+ - Yunge Zhu (@yungezz)
+
+'''
+
+EXAMPLES = '''
+ - name: Create a windows web app with non-exist app service plan
+ azure_rm_webapp:
+ resource_group: myResourceGroup
+ name: myWinWebapp
+ plan:
+ resource_group: myAppServicePlan_rg
+ name: myAppServicePlan
+ is_linux: false
+ sku: S1
+
+ - name: Create a docker web app with some app settings, with docker image
+ azure_rm_webapp:
+ resource_group: myResourceGroup
+ name: myDockerWebapp
+ plan:
+ resource_group: myAppServicePlan_rg
+ name: myAppServicePlan
+ is_linux: true
+ sku: S1
+ number_of_workers: 2
+ app_settings:
+ testkey: testvalue
+ testkey2: testvalue2
+ container_settings:
+ name: ansible/ansible:ubuntu1404
+
+ - name: Create a docker web app with private acr registry
+ azure_rm_webapp:
+ resource_group: myResourceGroup
+ name: myDockerWebapp
+ plan: myAppServicePlan
+ app_settings:
+ testkey: testvalue
+ container_settings:
+ name: ansible/ubuntu1404
+ registry_server_url: myregistry.io
+ registry_server_user: user
+ registry_server_password: pass
+
+ - name: Create a linux web app with Node 6.6 framework
+ azure_rm_webapp:
+ resource_group: myResourceGroup
+ name: myLinuxWebapp
+ plan:
+ resource_group: myAppServicePlan_rg
+ name: myAppServicePlan
+ app_settings:
+ testkey: testvalue
+ frameworks:
+ - name: "node"
+ version: "6.6"
+
+ - name: Create a windows web app with node, php
+ azure_rm_webapp:
+ resource_group: myResourceGroup
+ name: myWinWebapp
+ plan:
+ resource_group: myAppServicePlan_rg
+ name: myAppServicePlan
+ app_settings:
+ testkey: testvalue
+ frameworks:
+ - name: "node"
+ version: 6.6
+ - name: "php"
+ version: "7.0"
+
+ - name: Create a stage deployment slot for an existing web app
+ azure_rm_webapp:
+ resource_group: myResourceGroup
+ name: myWebapp/slots/stage
+ plan:
+ resource_group: myAppServicePlan_rg
+ name: myAppServicePlan
+ app_settings:
+ testkey:testvalue
+
+ - name: Create a linux web app with java framework
+ azure_rm_webapp:
+ resource_group: myResourceGroup
+ name: myLinuxWebapp
+ plan:
+ resource_group: myAppServicePlan_rg
+ name: myAppServicePlan
+ app_settings:
+ testkey: testvalue
+ frameworks:
+ - name: "java"
+ version: "8"
+ settings:
+ java_container: "Tomcat"
+ java_container_version: "8.5"
+'''
+
+RETURN = '''
+azure_webapp:
+ description:
+ - ID of current web app.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/myWebApp"
+'''
+
+import time
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrest.polling import LROPoller
+ from msrest.serialization import Model
+ from azure.mgmt.web.models import (
+ site_config, app_service_plan, Site,
+ AppServicePlan, SkuDescription, NameValuePair
+ )
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+container_settings_spec = dict(
+ name=dict(type='str', required=True),
+ registry_server_url=dict(type='str'),
+ registry_server_user=dict(type='str'),
+ registry_server_password=dict(type='str', no_log=True)
+)
+
+deployment_source_spec = dict(
+ url=dict(type='str'),
+ branch=dict(type='str')
+)
+
+
+framework_settings_spec = dict(
+ java_container=dict(type='str', required=True),
+ java_container_version=dict(type='str', required=True)
+)
+
+
+framework_spec = dict(
+ name=dict(
+ type='str',
+ required=True,
+ choices=['net_framework', 'java', 'php', 'node', 'python', 'dotnetcore', 'ruby']),
+ version=dict(type='str', required=True),
+ settings=dict(type='dict', options=framework_settings_spec)
+)
+
+
+def _normalize_sku(sku):
+ if sku is None:
+ return sku
+
+ sku = sku.upper()
+ if sku == 'FREE':
+ return 'F1'
+ elif sku == 'SHARED':
+ return 'D1'
+ return sku
+
+
+def get_sku_name(tier):
+ tier = tier.upper()
+ if tier == 'F1' or tier == "FREE":
+ return 'FREE'
+ elif tier == 'D1' or tier == "SHARED":
+ return 'SHARED'
+ elif tier in ['B1', 'B2', 'B3', 'BASIC']:
+ return 'BASIC'
+ elif tier in ['S1', 'S2', 'S3']:
+ return 'STANDARD'
+ elif tier in ['P1', 'P2', 'P3']:
+ return 'PREMIUM'
+ elif tier in ['P1V2', 'P2V2', 'P3V2']:
+ return 'PREMIUMV2'
+ else:
+ return None
+
+
+def appserviceplan_to_dict(plan):
+ return dict(
+ id=plan.id,
+ name=plan.name,
+ kind=plan.kind,
+ location=plan.location,
+ reserved=plan.reserved,
+ is_linux=plan.reserved,
+ provisioning_state=plan.provisioning_state,
+ tags=plan.tags if plan.tags else None
+ )
+
+
+def webapp_to_dict(webapp):
+ return dict(
+ id=webapp.id,
+ name=webapp.name,
+ location=webapp.location,
+ client_cert_enabled=webapp.client_cert_enabled,
+ enabled=webapp.enabled,
+ reserved=webapp.reserved,
+ client_affinity_enabled=webapp.client_affinity_enabled,
+ server_farm_id=webapp.server_farm_id,
+ host_names_disabled=webapp.host_names_disabled,
+ https_only=webapp.https_only if hasattr(webapp, 'https_only') else None,
+ skip_custom_domain_verification=webapp.skip_custom_domain_verification if hasattr(webapp, 'skip_custom_domain_verification') else None,
+ ttl_in_seconds=webapp.ttl_in_seconds if hasattr(webapp, 'ttl_in_seconds') else None,
+ state=webapp.state,
+ tags=webapp.tags if webapp.tags else None
+ )
+
+
+class Actions:
+ CreateOrUpdate, UpdateAppSettings, Delete = range(3)
+
+
+class AzureRMWebApps(AzureRMModuleBase):
+ """Configuration class for an Azure RM Web App resource"""
+
+ def __init__(self):
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str',
+ required=True
+ ),
+ location=dict(
+ type='str'
+ ),
+ plan=dict(
+ type='raw'
+ ),
+ frameworks=dict(
+ type='list',
+ elements='dict',
+ options=framework_spec
+ ),
+ container_settings=dict(
+ type='dict',
+ options=container_settings_spec
+ ),
+ scm_type=dict(
+ type='str',
+ ),
+ deployment_source=dict(
+ type='dict',
+ options=deployment_source_spec
+ ),
+ startup_file=dict(
+ type='str'
+ ),
+ client_affinity_enabled=dict(
+ type='bool',
+ default=True
+ ),
+ dns_registration=dict(
+ type='bool'
+ ),
+ https_only=dict(
+ type='bool'
+ ),
+ skip_custom_domain_verification=dict(
+ type='bool'
+ ),
+ ttl_in_seconds=dict(
+ type='int'
+ ),
+ app_settings=dict(
+ type='dict'
+ ),
+ purge_app_settings=dict(
+ type='bool',
+ default=False
+ ),
+ app_state=dict(
+ type='str',
+ choices=['started', 'stopped', 'restarted'],
+ default='started'
+ ),
+ state=dict(
+ type='str',
+ default='present',
+ choices=['present', 'absent']
+ )
+ )
+
+ mutually_exclusive = [['container_settings', 'frameworks']]
+
+ self.resource_group = None
+ self.name = None
+ self.location = None
+
+ # update in create_or_update as parameters
+ self.client_affinity_enabled = True
+ self.dns_registration = None
+ self.skip_custom_domain_verification = None
+ self.ttl_in_seconds = None
+ self.https_only = None
+
+ self.tags = None
+
+ # site config, e.g app settings, ssl
+ self.site_config = dict()
+ self.app_settings = dict()
+ self.app_settings_strDic = None
+
+ # app service plan
+ self.plan = None
+
+ # siteSourceControl
+ self.deployment_source = dict()
+
+ # site, used at level creation, or update. e.g windows/linux, client_affinity etc first level args
+ self.site = None
+
+ # property for internal usage, not used for sdk
+ self.container_settings = None
+
+ self.purge_app_settings = False
+ self.app_state = 'started'
+
+ self.results = dict(
+ changed=False,
+ id=None,
+ )
+ self.state = None
+ self.to_do = []
+
+ self.frameworks = None
+
+ # set site_config value from kwargs
+ self.site_config_updatable_properties = ["net_framework_version",
+ "java_version",
+ "php_version",
+ "python_version",
+ "scm_type"]
+
+ # updatable_properties
+ self.updatable_properties = ["client_affinity_enabled",
+ "force_dns_registration",
+ "https_only",
+ "skip_custom_domain_verification",
+ "ttl_in_seconds"]
+
+ self.supported_linux_frameworks = ['ruby', 'php', 'dotnetcore', 'node', 'java']
+ self.supported_windows_frameworks = ['net_framework', 'php', 'python', 'node', 'java']
+
+ super(AzureRMWebApps, self).__init__(derived_arg_spec=self.module_arg_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ supports_tags=True)
+
+ def exec_module(self, **kwargs):
+ """Main module execution method"""
+
+ for key in list(self.module_arg_spec.keys()) + ['tags']:
+ if hasattr(self, key):
+ setattr(self, key, kwargs[key])
+ elif kwargs[key] is not None:
+ if key == "scm_type":
+ self.site_config[key] = kwargs[key]
+
+ old_response = None
+ response = None
+ to_be_updated = False
+
+ # set location
+ resource_group = self.get_resource_group(self.resource_group)
+ if not self.location:
+ self.location = resource_group.location
+
+ # get existing web app
+ old_response = self.get_webapp()
+
+ if old_response:
+ self.results['id'] = old_response['id']
+
+ if self.state == 'present':
+ if not self.plan and not old_response:
+ self.fail("Please specify plan for newly created web app.")
+
+ if not self.plan:
+ self.plan = old_response['server_farm_id']
+
+ self.plan = self.parse_resource_to_dict(self.plan)
+
+ # get app service plan
+ is_linux = False
+ old_plan = self.get_app_service_plan()
+ if old_plan:
+ is_linux = old_plan['reserved']
+ else:
+ is_linux = self.plan['is_linux'] if 'is_linux' in self.plan else False
+
+ if self.frameworks:
+ # java is mutually exclusive with other frameworks
+ if len(self.frameworks) > 1 and any(f['name'] == 'java' for f in self.frameworks):
+ self.fail('Java is mutually exclusive with other frameworks.')
+
+ if is_linux:
+ if len(self.frameworks) != 1:
+ self.fail('Can specify one framework only for Linux web app.')
+
+ if self.frameworks[0]['name'] not in self.supported_linux_frameworks:
+ self.fail('Unsupported framework {0} for Linux web app.'.format(self.frameworks[0]['name']))
+
+ self.site_config['linux_fx_version'] = (self.frameworks[0]['name'] + '|' + self.frameworks[0]['version']).upper()
+
+ if self.frameworks[0]['name'] == 'java':
+ if self.frameworks[0]['version'] != '8':
+ self.fail("Linux web app only supports java 8.")
+ if self.frameworks[0]['settings'] and self.frameworks[0]['settings']['java_container'].lower() != 'tomcat':
+ self.fail("Linux web app only supports tomcat container.")
+
+ if self.frameworks[0]['settings'] and self.frameworks[0]['settings']['java_container'].lower() == 'tomcat':
+ self.site_config['linux_fx_version'] = 'TOMCAT|' + self.frameworks[0]['settings']['java_container_version'] + '-jre8'
+ else:
+ self.site_config['linux_fx_version'] = 'JAVA|8-jre8'
+ else:
+ for fx in self.frameworks:
+ if fx.get('name') not in self.supported_windows_frameworks:
+ self.fail('Unsupported framework {0} for Windows web app.'.format(fx.get('name')))
+ else:
+ self.site_config[fx.get('name') + '_version'] = fx.get('version')
+
+ if 'settings' in fx and fx['settings'] is not None:
+ for key, value in fx['settings'].items():
+ self.site_config[key] = value
+
+ if not self.app_settings:
+ self.app_settings = dict()
+
+ if self.container_settings:
+ linux_fx_version = 'DOCKER|'
+
+ if self.container_settings.get('registry_server_url'):
+ self.app_settings['DOCKER_REGISTRY_SERVER_URL'] = 'https://' + self.container_settings['registry_server_url']
+
+ linux_fx_version += self.container_settings['registry_server_url'] + '/'
+
+ linux_fx_version += self.container_settings['name']
+
+ self.site_config['linux_fx_version'] = linux_fx_version
+
+ if self.container_settings.get('registry_server_user'):
+ self.app_settings['DOCKER_REGISTRY_SERVER_USERNAME'] = self.container_settings['registry_server_user']
+
+ if self.container_settings.get('registry_server_password'):
+ self.app_settings['DOCKER_REGISTRY_SERVER_PASSWORD'] = self.container_settings['registry_server_password']
+
+ # init site
+ self.site = Site(location=self.location, site_config=self.site_config)
+
+ if self.https_only is not None:
+ self.site.https_only = self.https_only
+
+ if self.client_affinity_enabled:
+ self.site.client_affinity_enabled = self.client_affinity_enabled
+
+ # check if the web app already present in the resource group
+ if not old_response:
+ self.log("Web App instance doesn't exist")
+
+ to_be_updated = True
+ self.to_do.append(Actions.CreateOrUpdate)
+ self.site.tags = self.tags
+
+ # service plan is required for creation
+ if not self.plan:
+ self.fail("Please specify app service plan in plan parameter.")
+
+ if not old_plan:
+ # no existing service plan, create one
+ if (not self.plan.get('name') or not self.plan.get('sku')):
+ self.fail('Please specify name, is_linux, sku in plan')
+
+ if 'location' not in self.plan:
+ plan_resource_group = self.get_resource_group(self.plan['resource_group'])
+ self.plan['location'] = plan_resource_group.location
+
+ old_plan = self.create_app_service_plan()
+
+ self.site.server_farm_id = old_plan['id']
+
+ # if linux, setup startup_file
+ if old_plan['is_linux']:
+ if hasattr(self, 'startup_file'):
+ self.site_config['app_command_line'] = self.startup_file
+
+ # set app setting
+ if self.app_settings:
+ app_settings = []
+ for key in self.app_settings.keys():
+ app_settings.append(NameValuePair(name=key, value=self.app_settings[key]))
+
+ self.site_config['app_settings'] = app_settings
+ else:
+ # existing web app, do update
+ self.log("Web App instance already exists")
+
+ self.log('Result: {0}'.format(old_response))
+
+ update_tags, self.site.tags = self.update_tags(old_response.get('tags', None))
+
+ if update_tags:
+ to_be_updated = True
+
+ # check if root level property changed
+ if self.is_updatable_property_changed(old_response):
+ to_be_updated = True
+ self.to_do.append(Actions.CreateOrUpdate)
+
+ # check if site_config changed
+ old_config = self.get_webapp_configuration()
+
+ if self.is_site_config_changed(old_config):
+ to_be_updated = True
+ self.to_do.append(Actions.CreateOrUpdate)
+
+ # check if linux_fx_version changed
+ if old_config.linux_fx_version != self.site_config.get('linux_fx_version', ''):
+ to_be_updated = True
+ self.to_do.append(Actions.CreateOrUpdate)
+
+ self.app_settings_strDic = self.list_app_settings()
+
+ # purge existing app_settings:
+ if self.purge_app_settings:
+ to_be_updated = True
+ self.app_settings_strDic = dict()
+ self.to_do.append(Actions.UpdateAppSettings)
+
+ # check if app settings changed
+ if self.purge_app_settings or self.is_app_settings_changed():
+ to_be_updated = True
+ self.to_do.append(Actions.UpdateAppSettings)
+
+ if self.app_settings:
+ for key in self.app_settings.keys():
+ self.app_settings_strDic[key] = self.app_settings[key]
+
+ elif self.state == 'absent':
+ if old_response:
+ self.log("Delete Web App instance")
+ self.results['changed'] = True
+
+ if self.check_mode:
+ return self.results
+
+ self.delete_webapp()
+
+ self.log('Web App instance deleted')
+
+ else:
+ self.fail("Web app {0} not exists.".format(self.name))
+
+ if to_be_updated:
+ self.log('Need to Create/Update web app')
+ self.results['changed'] = True
+
+ if self.check_mode:
+ return self.results
+
+ if Actions.CreateOrUpdate in self.to_do:
+ response = self.create_update_webapp()
+
+ self.results['id'] = response['id']
+
+ if Actions.UpdateAppSettings in self.to_do:
+ update_response = self.update_app_settings()
+ self.results['id'] = update_response.id
+
+ webapp = None
+ if old_response:
+ webapp = old_response
+ if response:
+ webapp = response
+
+ if webapp:
+ if (webapp['state'] != 'Stopped' and self.app_state == 'stopped') or \
+ (webapp['state'] != 'Running' and self.app_state == 'started') or \
+ self.app_state == 'restarted':
+
+ self.results['changed'] = True
+ if self.check_mode:
+ return self.results
+
+ self.set_webapp_state(self.app_state)
+
+ return self.results
+
+ # compare existing web app with input, determine weather it's update operation
+ def is_updatable_property_changed(self, existing_webapp):
+ for property_name in self.updatable_properties:
+ if hasattr(self, property_name) and getattr(self, property_name) is not None and \
+ getattr(self, property_name) != existing_webapp.get(property_name, None):
+ return True
+
+ return False
+
+ # compare xxx_version
+ def is_site_config_changed(self, existing_config):
+ for fx_version in self.site_config_updatable_properties:
+ if self.site_config.get(fx_version):
+ if not getattr(existing_config, fx_version) or \
+ getattr(existing_config, fx_version).upper() != self.site_config.get(fx_version).upper():
+ return True
+
+ return False
+
+ # comparing existing app setting with input, determine whether it's changed
+ def is_app_settings_changed(self):
+ if self.app_settings:
+ if self.app_settings_strDic:
+ for key in self.app_settings.keys():
+ if self.app_settings[key] != self.app_settings_strDic.get(key, None):
+ return True
+ else:
+ return True
+ return False
+
+ # comparing deployment source with input, determine wheather it's changed
+ def is_deployment_source_changed(self, existing_webapp):
+ if self.deployment_source:
+ if self.deployment_source.get('url') \
+ and self.deployment_source['url'] != existing_webapp.get('site_source_control')['url']:
+ return True
+
+ if self.deployment_source.get('branch') \
+ and self.deployment_source['branch'] != existing_webapp.get('site_source_control')['branch']:
+ return True
+
+ return False
+
+ def create_update_webapp(self):
+ '''
+ Creates or updates Web App with the specified configuration.
+
+ :return: deserialized Web App instance state dictionary
+ '''
+ self.log(
+ "Creating / Updating the Web App instance {0}".format(self.name))
+
+ try:
+ skip_dns_registration = self.dns_registration
+ force_dns_registration = None if self.dns_registration is None else not self.dns_registration
+
+ response = self.web_client.web_apps.create_or_update(resource_group_name=self.resource_group,
+ name=self.name,
+ site_envelope=self.site,
+ skip_dns_registration=skip_dns_registration,
+ skip_custom_domain_verification=self.skip_custom_domain_verification,
+ force_dns_registration=force_dns_registration,
+ ttl_in_seconds=self.ttl_in_seconds)
+ if isinstance(response, LROPoller):
+ response = self.get_poller_result(response)
+
+ except CloudError as exc:
+ self.log('Error attempting to create the Web App instance.')
+ self.fail(
+ "Error creating the Web App instance: {0}".format(str(exc)))
+ return webapp_to_dict(response)
+
+ def delete_webapp(self):
+ '''
+ Deletes specified Web App instance in the specified subscription and resource group.
+
+ :return: True
+ '''
+ self.log("Deleting the Web App instance {0}".format(self.name))
+ try:
+ response = self.web_client.web_apps.delete(resource_group_name=self.resource_group,
+ name=self.name)
+ except CloudError as e:
+ self.log('Error attempting to delete the Web App instance.')
+ self.fail(
+ "Error deleting the Web App instance: {0}".format(str(e)))
+
+ return True
+
+ def get_webapp(self):
+ '''
+ Gets the properties of the specified Web App.
+
+ :return: deserialized Web App instance state dictionary
+ '''
+ self.log(
+ "Checking if the Web App instance {0} is present".format(self.name))
+
+ response = None
+
+ try:
+ response = self.web_client.web_apps.get(resource_group_name=self.resource_group,
+ name=self.name)
+
+ # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError
+ if response is not None:
+ self.log("Response : {0}".format(response))
+ self.log("Web App instance : {0} found".format(response.name))
+ return webapp_to_dict(response)
+
+ except CloudError as ex:
+ pass
+
+ self.log("Didn't find web app {0} in resource group {1}".format(
+ self.name, self.resource_group))
+
+ return False
+
+ def get_app_service_plan(self):
+ '''
+ Gets app service plan
+ :return: deserialized app service plan dictionary
+ '''
+ self.log("Get App Service Plan {0}".format(self.plan['name']))
+
+ try:
+ response = self.web_client.app_service_plans.get(
+ resource_group_name=self.plan['resource_group'],
+ name=self.plan['name'])
+
+ # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError
+ if response is not None:
+ self.log("Response : {0}".format(response))
+ self.log("App Service Plan : {0} found".format(response.name))
+
+ return appserviceplan_to_dict(response)
+ except CloudError as ex:
+ pass
+
+ self.log("Didn't find app service plan {0} in resource group {1}".format(
+ self.plan['name'], self.plan['resource_group']))
+
+ return False
+
+ def create_app_service_plan(self):
+ '''
+ Creates app service plan
+ :return: deserialized app service plan dictionary
+ '''
+ self.log("Create App Service Plan {0}".format(self.plan['name']))
+
+ try:
+ # normalize sku
+ sku = _normalize_sku(self.plan['sku'])
+
+ sku_def = SkuDescription(tier=get_sku_name(
+ sku), name=sku, capacity=(self.plan.get('number_of_workers', None)))
+ plan_def = AppServicePlan(
+ location=self.plan['location'], app_service_plan_name=self.plan['name'], sku=sku_def, reserved=(self.plan.get('is_linux', None)))
+
+ poller = self.web_client.app_service_plans.create_or_update(
+ self.plan['resource_group'], self.plan['name'], plan_def)
+
+ if isinstance(poller, LROPoller):
+ response = self.get_poller_result(poller)
+
+ self.log("Response : {0}".format(response))
+
+ return appserviceplan_to_dict(response)
+ except CloudError as ex:
+ self.fail("Failed to create app service plan {0} in resource group {1}: {2}".format(
+ self.plan['name'], self.plan['resource_group'], str(ex)))
+
+ def list_app_settings(self):
+ '''
+ List application settings
+ :return: deserialized list response
+ '''
+ self.log("List application setting")
+
+ try:
+
+ response = self.web_client.web_apps.list_application_settings(
+ resource_group_name=self.resource_group, name=self.name)
+ self.log("Response : {0}".format(response))
+
+ return response.properties
+ except CloudError as ex:
+ self.fail("Failed to list application settings for web app {0} in resource group {1}: {2}".format(
+ self.name, self.resource_group, str(ex)))
+
+ def update_app_settings(self):
+ '''
+ Update application settings
+ :return: deserialized updating response
+ '''
+ self.log("Update application setting")
+
+ try:
+ response = self.web_client.web_apps.update_application_settings(
+ resource_group_name=self.resource_group, name=self.name, properties=self.app_settings_strDic)
+ self.log("Response : {0}".format(response))
+
+ return response
+ except CloudError as ex:
+ self.fail("Failed to update application settings for web app {0} in resource group {1}: {2}".format(
+ self.name, self.resource_group, str(ex)))
+
+ def create_or_update_source_control(self):
+ '''
+ Update site source control
+ :return: deserialized updating response
+ '''
+ self.log("Update site source control")
+
+ if self.deployment_source is None:
+ return False
+
+ self.deployment_source['is_manual_integration'] = False
+ self.deployment_source['is_mercurial'] = False
+
+ try:
+ response = self.web_client.web_client.create_or_update_source_control(
+ self.resource_group, self.name, self.deployment_source)
+ self.log("Response : {0}".format(response))
+
+ return response.as_dict()
+ except CloudError as ex:
+ self.fail("Failed to update site source control for web app {0} in resource group {1}".format(
+ self.name, self.resource_group))
+
+ def get_webapp_configuration(self):
+ '''
+ Get web app configuration
+ :return: deserialized web app configuration response
+ '''
+ self.log("Get web app configuration")
+
+ try:
+
+ response = self.web_client.web_apps.get_configuration(
+ resource_group_name=self.resource_group, name=self.name)
+ self.log("Response : {0}".format(response))
+
+ return response
+ except CloudError as ex:
+ self.log("Failed to get configuration for web app {0} in resource group {1}: {2}".format(
+ self.name, self.resource_group, str(ex)))
+
+ return False
+
+ def set_webapp_state(self, appstate):
+ '''
+ Start/stop/restart web app
+ :return: deserialized updating response
+ '''
+ try:
+ if appstate == 'started':
+ response = self.web_client.web_apps.start(resource_group_name=self.resource_group, name=self.name)
+ elif appstate == 'stopped':
+ response = self.web_client.web_apps.stop(resource_group_name=self.resource_group, name=self.name)
+ elif appstate == 'restarted':
+ response = self.web_client.web_apps.restart(resource_group_name=self.resource_group, name=self.name)
+ else:
+ self.fail("Invalid web app state {0}".format(appstate))
+
+ self.log("Response : {0}".format(response))
+
+ return response
+ except CloudError as ex:
+ request_id = ex.request_id if ex.request_id else ''
+ self.log("Failed to {0} web app {1} in resource group {2}, request_id {3} - {4}".format(
+ appstate, self.name, self.resource_group, request_id, str(ex)))
+
+
+def main():
+ """Main execution"""
+ AzureRMWebApps()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_webapp_info.py b/test/support/integration/plugins/modules/azure_rm_webapp_info.py
new file mode 100644
index 00000000..22286803
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_webapp_info.py
@@ -0,0 +1,489 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2018 Yunge Zhu, <yungez@microsoft.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_webapp_info
+
+version_added: "2.9"
+
+short_description: Get Azure web app facts
+
+description:
+ - Get facts for a specific web app or all web app in a resource group, or all web app in current subscription.
+
+options:
+ name:
+ description:
+ - Only show results for a specific web app.
+ resource_group:
+ description:
+ - Limit results by resource group.
+ return_publish_profile:
+ description:
+ - Indicate whether to return publishing profile of the web app.
+ default: False
+ type: bool
+ tags:
+ description:
+ - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Yunge Zhu (@yungezz)
+'''
+
+EXAMPLES = '''
+ - name: Get facts for web app by name
+ azure_rm_webapp_info:
+ resource_group: myResourceGroup
+ name: winwebapp1
+
+ - name: Get facts for web apps in resource group
+ azure_rm_webapp_info:
+ resource_group: myResourceGroup
+
+ - name: Get facts for web apps with tags
+ azure_rm_webapp_info:
+ tags:
+ - testtag
+ - foo:bar
+'''
+
+RETURN = '''
+webapps:
+ description:
+ - List of web apps.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - ID of the web app.
+ returned: always
+ type: str
+ sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/myWebApp
+ name:
+ description:
+ - Name of the web app.
+ returned: always
+ type: str
+ sample: winwebapp1
+ resource_group:
+ description:
+ - Resource group of the web app.
+ returned: always
+ type: str
+ sample: myResourceGroup
+ location:
+ description:
+ - Location of the web app.
+ returned: always
+ type: str
+ sample: eastus
+ plan:
+ description:
+ - ID of app service plan used by the web app.
+ returned: always
+ type: str
+ sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/serverfarms/myAppServicePlan
+ app_settings:
+ description:
+ - App settings of the application. Only returned when web app has app settings.
+ returned: always
+ type: dict
+ sample: {
+ "testkey": "testvalue",
+ "testkey2": "testvalue2"
+ }
+ frameworks:
+ description:
+ - Frameworks of the application. Only returned when web app has frameworks.
+ returned: always
+ type: list
+ sample: [
+ {
+ "name": "net_framework",
+ "version": "v4.0"
+ },
+ {
+ "name": "java",
+ "settings": {
+ "java_container": "tomcat",
+ "java_container_version": "8.5"
+ },
+ "version": "1.7"
+ },
+ {
+ "name": "php",
+ "version": "5.6"
+ }
+ ]
+ availability_state:
+ description:
+ - Availability of this web app.
+ returned: always
+ type: str
+ sample: Normal
+ default_host_name:
+ description:
+ - Host name of the web app.
+ returned: always
+ type: str
+ sample: vxxisurg397winapp4.azurewebsites.net
+ enabled:
+ description:
+ - Indicates the web app enabled or not.
+ returned: always
+ type: bool
+ sample: true
+ enabled_host_names:
+ description:
+ - Enabled host names of the web app.
+ returned: always
+ type: list
+ sample: [
+ "vxxisurg397winapp4.azurewebsites.net",
+ "vxxisurg397winapp4.scm.azurewebsites.net"
+ ]
+ host_name_ssl_states:
+ description:
+ - SSL state per host names of the web app.
+ returned: always
+ type: list
+ sample: [
+ {
+ "hostType": "Standard",
+ "name": "vxxisurg397winapp4.azurewebsites.net",
+ "sslState": "Disabled"
+ },
+ {
+ "hostType": "Repository",
+ "name": "vxxisurg397winapp4.scm.azurewebsites.net",
+ "sslState": "Disabled"
+ }
+ ]
+ host_names:
+ description:
+ - Host names of the web app.
+ returned: always
+ type: list
+ sample: [
+ "vxxisurg397winapp4.azurewebsites.net"
+ ]
+ outbound_ip_addresses:
+ description:
+ - Outbound IP address of the web app.
+ returned: always
+ type: str
+ sample: "40.71.11.131,40.85.166.200,168.62.166.67,137.135.126.248,137.135.121.45"
+ ftp_publish_url:
+ description:
+ - Publishing URL of the web app when deployment type is FTP.
+ returned: always
+ type: str
+ sample: ftp://xxxx.ftp.azurewebsites.windows.net
+ state:
+ description:
+ - State of the web app.
+ returned: always
+ type: str
+ sample: running
+ publishing_username:
+ description:
+ - Publishing profile user name.
+ returned: only when I(return_publish_profile=True).
+ type: str
+ sample: "$vxxisuRG397winapp4"
+ publishing_password:
+ description:
+ - Publishing profile password.
+ returned: only when I(return_publish_profile=True).
+ type: str
+ sample: "uvANsPQpGjWJmrFfm4Ssd5rpBSqGhjMk11pMSgW2vCsQtNx9tcgZ0xN26s9A"
+ tags:
+ description:
+ - Tags assigned to the resource. Dictionary of string:string pairs.
+ returned: always
+ type: dict
+ sample: { tag1: abc }
+'''
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrest.polling import LROPoller
+ from azure.common import AzureMissingResourceHttpError, AzureHttpError
+except Exception:
+ # This is handled in azure_rm_common
+ pass
+
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+AZURE_OBJECT_CLASS = 'WebApp'
+
+
+class AzureRMWebAppInfo(AzureRMModuleBase):
+
+ def __init__(self):
+
+ self.module_arg_spec = dict(
+ name=dict(type='str'),
+ resource_group=dict(type='str'),
+ tags=dict(type='list'),
+ return_publish_profile=dict(type='bool', default=False),
+ )
+
+ self.results = dict(
+ changed=False,
+ webapps=[],
+ )
+
+ self.name = None
+ self.resource_group = None
+ self.tags = None
+ self.return_publish_profile = False
+
+ self.framework_names = ['net_framework', 'java', 'php', 'node', 'python', 'dotnetcore', 'ruby']
+
+ super(AzureRMWebAppInfo, self).__init__(self.module_arg_spec,
+ supports_tags=False,
+ facts_module=True)
+
+ def exec_module(self, **kwargs):
+ is_old_facts = self.module._name == 'azure_rm_webapp_facts'
+ if is_old_facts:
+ self.module.deprecate("The 'azure_rm_webapp_facts' module has been renamed to 'azure_rm_webapp_info'",
+ version='2.13', collection_name='ansible.builtin')
+
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+
+ if self.name:
+ self.results['webapps'] = self.list_by_name()
+ elif self.resource_group:
+ self.results['webapps'] = self.list_by_resource_group()
+ else:
+ self.results['webapps'] = self.list_all()
+
+ return self.results
+
+ def list_by_name(self):
+ self.log('Get web app {0}'.format(self.name))
+ item = None
+ result = []
+
+ try:
+ item = self.web_client.web_apps.get(self.resource_group, self.name)
+ except CloudError:
+ pass
+
+ if item and self.has_tags(item.tags, self.tags):
+ curated_result = self.get_curated_webapp(self.resource_group, self.name, item)
+ result = [curated_result]
+
+ return result
+
+ def list_by_resource_group(self):
+ self.log('List web apps in resource groups {0}'.format(self.resource_group))
+ try:
+ response = list(self.web_client.web_apps.list_by_resource_group(self.resource_group))
+ except CloudError as exc:
+ request_id = exc.request_id if exc.request_id else ''
+ self.fail("Error listing web apps in resource groups {0}, request id: {1} - {2}".format(self.resource_group, request_id, str(exc)))
+
+ results = []
+ for item in response:
+ if self.has_tags(item.tags, self.tags):
+ curated_output = self.get_curated_webapp(self.resource_group, item.name, item)
+ results.append(curated_output)
+ return results
+
+ def list_all(self):
+ self.log('List web apps in current subscription')
+ try:
+ response = list(self.web_client.web_apps.list())
+ except CloudError as exc:
+ request_id = exc.request_id if exc.request_id else ''
+ self.fail("Error listing web apps, request id {0} - {1}".format(request_id, str(exc)))
+
+ results = []
+ for item in response:
+ if self.has_tags(item.tags, self.tags):
+ curated_output = self.get_curated_webapp(item.resource_group, item.name, item)
+ results.append(curated_output)
+ return results
+
+ def list_webapp_configuration(self, resource_group, name):
+ self.log('Get web app {0} configuration'.format(name))
+
+ response = []
+
+ try:
+ response = self.web_client.web_apps.get_configuration(resource_group_name=resource_group, name=name)
+ except CloudError as ex:
+ request_id = ex.request_id if ex.request_id else ''
+ self.fail('Error getting web app {0} configuration, request id {1} - {2}'.format(name, request_id, str(ex)))
+
+ return response.as_dict()
+
+ def list_webapp_appsettings(self, resource_group, name):
+ self.log('Get web app {0} app settings'.format(name))
+
+ response = []
+
+ try:
+ response = self.web_client.web_apps.list_application_settings(resource_group_name=resource_group, name=name)
+ except CloudError as ex:
+ request_id = ex.request_id if ex.request_id else ''
+ self.fail('Error getting web app {0} app settings, request id {1} - {2}'.format(name, request_id, str(ex)))
+
+ return response.as_dict()
+
+ def get_publish_credentials(self, resource_group, name):
+ self.log('Get web app {0} publish credentials'.format(name))
+ try:
+ poller = self.web_client.web_apps.list_publishing_credentials(resource_group, name)
+ if isinstance(poller, LROPoller):
+ response = self.get_poller_result(poller)
+ except CloudError as ex:
+ request_id = ex.request_id if ex.request_id else ''
+ self.fail('Error getting web app {0} publishing credentials - {1}'.format(request_id, str(ex)))
+ return response
+
+ def get_webapp_ftp_publish_url(self, resource_group, name):
+ import xmltodict
+
+ self.log('Get web app {0} app publish profile'.format(name))
+
+ url = None
+ try:
+ content = self.web_client.web_apps.list_publishing_profile_xml_with_secrets(resource_group_name=resource_group, name=name)
+ if not content:
+ return url
+
+ full_xml = ''
+ for f in content:
+ full_xml += f.decode()
+ profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
+
+ if not profiles:
+ return url
+
+ for profile in profiles:
+ if profile['@publishMethod'] == 'FTP':
+ url = profile['@publishUrl']
+
+ except CloudError as ex:
+ self.fail('Error getting web app {0} app settings'.format(name))
+
+ return url
+
+ def get_curated_webapp(self, resource_group, name, webapp):
+ pip = self.serialize_obj(webapp, AZURE_OBJECT_CLASS)
+
+ try:
+ site_config = self.list_webapp_configuration(resource_group, name)
+ app_settings = self.list_webapp_appsettings(resource_group, name)
+ publish_cred = self.get_publish_credentials(resource_group, name)
+ ftp_publish_url = self.get_webapp_ftp_publish_url(resource_group, name)
+ except CloudError as ex:
+ pass
+ return self.construct_curated_webapp(webapp=pip,
+ configuration=site_config,
+ app_settings=app_settings,
+ deployment_slot=None,
+ ftp_publish_url=ftp_publish_url,
+ publish_credentials=publish_cred)
+
+ def construct_curated_webapp(self,
+ webapp,
+ configuration=None,
+ app_settings=None,
+ deployment_slot=None,
+ ftp_publish_url=None,
+ publish_credentials=None):
+ curated_output = dict()
+ curated_output['id'] = webapp['id']
+ curated_output['name'] = webapp['name']
+ curated_output['resource_group'] = webapp['properties']['resourceGroup']
+ curated_output['location'] = webapp['location']
+ curated_output['plan'] = webapp['properties']['serverFarmId']
+ curated_output['tags'] = webapp.get('tags', None)
+
+ # important properties from output. not match input arguments.
+ curated_output['app_state'] = webapp['properties']['state']
+ curated_output['availability_state'] = webapp['properties']['availabilityState']
+ curated_output['default_host_name'] = webapp['properties']['defaultHostName']
+ curated_output['host_names'] = webapp['properties']['hostNames']
+ curated_output['enabled'] = webapp['properties']['enabled']
+ curated_output['enabled_host_names'] = webapp['properties']['enabledHostNames']
+ curated_output['host_name_ssl_states'] = webapp['properties']['hostNameSslStates']
+ curated_output['outbound_ip_addresses'] = webapp['properties']['outboundIpAddresses']
+
+ # curated site_config
+ if configuration:
+ curated_output['frameworks'] = []
+ for fx_name in self.framework_names:
+ fx_version = configuration.get(fx_name + '_version', None)
+ if fx_version:
+ fx = {
+ 'name': fx_name,
+ 'version': fx_version
+ }
+ # java container setting
+ if fx_name == 'java':
+ if configuration['java_container'] and configuration['java_container_version']:
+ settings = {
+ 'java_container': configuration['java_container'].lower(),
+ 'java_container_version': configuration['java_container_version']
+ }
+ fx['settings'] = settings
+
+ curated_output['frameworks'].append(fx)
+
+ # linux_fx_version
+ if configuration.get('linux_fx_version', None):
+ tmp = configuration.get('linux_fx_version').split("|")
+ if len(tmp) == 2:
+ curated_output['frameworks'].append({'name': tmp[0].lower(), 'version': tmp[1]})
+
+ # curated app_settings
+ if app_settings and app_settings.get('properties', None):
+ curated_output['app_settings'] = dict()
+ for item in app_settings['properties']:
+ curated_output['app_settings'][item] = app_settings['properties'][item]
+
+ # curated deploymenet_slot
+ if deployment_slot:
+ curated_output['deployment_slot'] = deployment_slot
+
+ # ftp_publish_url
+ if ftp_publish_url:
+ curated_output['ftp_publish_url'] = ftp_publish_url
+
+ # curated publish credentials
+ if publish_credentials and self.return_publish_profile:
+ curated_output['publishing_username'] = publish_credentials.publishing_user_name
+ curated_output['publishing_password'] = publish_credentials.publishing_password
+ return curated_output
+
+
+def main():
+ AzureRMWebAppInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_webappslot.py b/test/support/integration/plugins/modules/azure_rm_webappslot.py
new file mode 100644
index 00000000..ddba710b
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_webappslot.py
@@ -0,0 +1,1058 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2018 Yunge Zhu, <yungez@microsoft.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_webappslot
+version_added: "2.8"
+short_description: Manage Azure Web App slot
+description:
+ - Create, update and delete Azure Web App slot.
+
+options:
+ resource_group:
+ description:
+ - Name of the resource group to which the resource belongs.
+ required: True
+ name:
+ description:
+ - Unique name of the deployment slot to create or update.
+ required: True
+ webapp_name:
+ description:
+ - Web app name which this deployment slot belongs to.
+ required: True
+ location:
+ description:
+ - Resource location. If not set, location from the resource group will be used as default.
+ configuration_source:
+ description:
+ - Source slot to clone configurations from when creating slot. Use webapp's name to refer to the production slot.
+ auto_swap_slot_name:
+ description:
+ - Used to configure target slot name to auto swap, or disable auto swap.
+ - Set it target slot name to auto swap.
+ - Set it to False to disable auto slot swap.
+ swap:
+ description:
+ - Swap deployment slots of a web app.
+ suboptions:
+ action:
+ description:
+ - Swap types.
+ - C(preview) is to apply target slot settings on source slot first.
+ - C(swap) is to complete swapping.
+ - C(reset) is to reset the swap.
+ choices:
+ - preview
+ - swap
+ - reset
+ default: preview
+ target_slot:
+ description:
+ - Name of target slot to swap. If set to None, then swap with production slot.
+ preserve_vnet:
+ description:
+ - C(True) to preserve virtual network to the slot during swap. Otherwise C(False).
+ type: bool
+ default: True
+ frameworks:
+ description:
+ - Set of run time framework settings. Each setting is a dictionary.
+ - See U(https://docs.microsoft.com/en-us/azure/app-service/app-service-web-overview) for more info.
+ suboptions:
+ name:
+ description:
+ - Name of the framework.
+ - Supported framework list for Windows web app and Linux web app is different.
+ - Windows web apps support C(java), C(net_framework), C(php), C(python), and C(node) from June 2018.
+ - Windows web apps support multiple framework at same time.
+ - Linux web apps support C(java), C(ruby), C(php), C(dotnetcore), and C(node) from June 2018.
+ - Linux web apps support only one framework.
+ - Java framework is mutually exclusive with others.
+ choices:
+ - java
+ - net_framework
+ - php
+ - python
+ - ruby
+ - dotnetcore
+ - node
+ version:
+ description:
+ - Version of the framework. For Linux web app supported value, see U(https://aka.ms/linux-stacks) for more info.
+ - C(net_framework) supported value sample, C(v4.0) for .NET 4.6 and C(v3.0) for .NET 3.5.
+ - C(php) supported value sample, C(5.5), C(5.6), C(7.0).
+ - C(python) supported value sample, C(5.5), C(5.6), C(7.0).
+ - C(node) supported value sample, C(6.6), C(6.9).
+ - C(dotnetcore) supported value sample, C(1.0), C(1.1), C(1.2).
+ - C(ruby) supported value sample, 2.3.
+ - C(java) supported value sample, C(1.9) for Windows web app. C(1.8) for Linux web app.
+ settings:
+ description:
+ - List of settings of the framework.
+ suboptions:
+ java_container:
+ description:
+ - Name of Java container. This is supported by specific framework C(java) onlys, for example C(Tomcat), C(Jetty).
+ java_container_version:
+ description:
+ - Version of Java container. This is supported by specific framework C(java) only.
+ - For C(Tomcat), for example C(8.0), C(8.5), C(9.0). For C(Jetty), for example C(9.1), C(9.3).
+ container_settings:
+ description:
+ - Web app slot container settings.
+ suboptions:
+ name:
+ description:
+ - Name of container, for example C(imagename:tag).
+ registry_server_url:
+ description:
+ - Container registry server URL, for example C(mydockerregistry.io).
+ registry_server_user:
+ description:
+ - The container registry server user name.
+ registry_server_password:
+ description:
+ - The container registry server password.
+ startup_file:
+ description:
+ - The slot startup file.
+ - This only applies for Linux web app slot.
+ app_settings:
+ description:
+ - Configure web app slot application settings. Suboptions are in key value pair format.
+ purge_app_settings:
+ description:
+ - Purge any existing application settings. Replace slot application settings with app_settings.
+ type: bool
+ deployment_source:
+ description:
+ - Deployment source for git.
+ suboptions:
+ url:
+ description:
+ - Repository URL of deployment source.
+ branch:
+ description:
+ - The branch name of the repository.
+ app_state:
+ description:
+ - Start/Stop/Restart the slot.
+ type: str
+ choices:
+ - started
+ - stopped
+ - restarted
+ default: started
+ state:
+ description:
+ - State of the Web App deployment slot.
+ - Use C(present) to create or update a slot and C(absent) to delete it.
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+ - azure
+ - azure_tags
+
+author:
+ - Yunge Zhu(@yungezz)
+
+'''
+
+EXAMPLES = '''
+ - name: Create a webapp slot
+ azure_rm_webappslot:
+ resource_group: myResourceGroup
+ webapp_name: myJavaWebApp
+ name: stage
+ configuration_source: myJavaWebApp
+ app_settings:
+ testkey: testvalue
+
+ - name: swap the slot with production slot
+ azure_rm_webappslot:
+ resource_group: myResourceGroup
+ webapp_name: myJavaWebApp
+ name: stage
+ swap:
+ action: swap
+
+ - name: stop the slot
+ azure_rm_webappslot:
+ resource_group: myResourceGroup
+ webapp_name: myJavaWebApp
+ name: stage
+ app_state: stopped
+
+ - name: udpate a webapp slot app settings
+ azure_rm_webappslot:
+ resource_group: myResourceGroup
+ webapp_name: myJavaWebApp
+ name: stage
+ app_settings:
+ testkey: testvalue2
+
+ - name: udpate a webapp slot frameworks
+ azure_rm_webappslot:
+ resource_group: myResourceGroup
+ webapp_name: myJavaWebApp
+ name: stage
+ frameworks:
+ - name: "node"
+ version: "10.1"
+'''
+
+RETURN = '''
+id:
+ description:
+ - ID of current slot.
+ returned: always
+ type: str
+ sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/testapp/slots/stage1
+'''
+
+import time
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrest.polling import LROPoller
+ from msrest.serialization import Model
+ from azure.mgmt.web.models import (
+ site_config, app_service_plan, Site,
+ AppServicePlan, SkuDescription, NameValuePair
+ )
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+swap_spec = dict(
+ action=dict(
+ type='str',
+ choices=[
+ 'preview',
+ 'swap',
+ 'reset'
+ ],
+ default='preview'
+ ),
+ target_slot=dict(
+ type='str'
+ ),
+ preserve_vnet=dict(
+ type='bool',
+ default=True
+ )
+)
+
+container_settings_spec = dict(
+ name=dict(type='str', required=True),
+ registry_server_url=dict(type='str'),
+ registry_server_user=dict(type='str'),
+ registry_server_password=dict(type='str', no_log=True)
+)
+
+deployment_source_spec = dict(
+ url=dict(type='str'),
+ branch=dict(type='str')
+)
+
+
+framework_settings_spec = dict(
+ java_container=dict(type='str', required=True),
+ java_container_version=dict(type='str', required=True)
+)
+
+
+framework_spec = dict(
+ name=dict(
+ type='str',
+ required=True,
+ choices=['net_framework', 'java', 'php', 'node', 'python', 'dotnetcore', 'ruby']),
+ version=dict(type='str', required=True),
+ settings=dict(type='dict', options=framework_settings_spec)
+)
+
+
+def webapp_to_dict(webapp):
+ return dict(
+ id=webapp.id,
+ name=webapp.name,
+ location=webapp.location,
+ client_cert_enabled=webapp.client_cert_enabled,
+ enabled=webapp.enabled,
+ reserved=webapp.reserved,
+ client_affinity_enabled=webapp.client_affinity_enabled,
+ server_farm_id=webapp.server_farm_id,
+ host_names_disabled=webapp.host_names_disabled,
+ https_only=webapp.https_only if hasattr(webapp, 'https_only') else None,
+ skip_custom_domain_verification=webapp.skip_custom_domain_verification if hasattr(webapp, 'skip_custom_domain_verification') else None,
+ ttl_in_seconds=webapp.ttl_in_seconds if hasattr(webapp, 'ttl_in_seconds') else None,
+ state=webapp.state,
+ tags=webapp.tags if webapp.tags else None
+ )
+
+
+def slot_to_dict(slot):
+ return dict(
+ id=slot.id,
+ resource_group=slot.resource_group,
+ server_farm_id=slot.server_farm_id,
+ target_swap_slot=slot.target_swap_slot,
+ enabled_host_names=slot.enabled_host_names,
+ slot_swap_status=slot.slot_swap_status,
+ name=slot.name,
+ location=slot.location,
+ enabled=slot.enabled,
+ reserved=slot.reserved,
+ host_names_disabled=slot.host_names_disabled,
+ state=slot.state,
+ repository_site_name=slot.repository_site_name,
+ default_host_name=slot.default_host_name,
+ kind=slot.kind,
+ site_config=slot.site_config,
+ tags=slot.tags if slot.tags else None
+ )
+
+
+class Actions:
+ NoAction, CreateOrUpdate, UpdateAppSettings, Delete = range(4)
+
+
+class AzureRMWebAppSlots(AzureRMModuleBase):
+ """Configuration class for an Azure RM Web App slot resource"""
+
+ def __init__(self):
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str',
+ required=True
+ ),
+ webapp_name=dict(
+ type='str',
+ required=True
+ ),
+ location=dict(
+ type='str'
+ ),
+ configuration_source=dict(
+ type='str'
+ ),
+ auto_swap_slot_name=dict(
+ type='raw'
+ ),
+ swap=dict(
+ type='dict',
+ options=swap_spec
+ ),
+ frameworks=dict(
+ type='list',
+ elements='dict',
+ options=framework_spec
+ ),
+ container_settings=dict(
+ type='dict',
+ options=container_settings_spec
+ ),
+ deployment_source=dict(
+ type='dict',
+ options=deployment_source_spec
+ ),
+ startup_file=dict(
+ type='str'
+ ),
+ app_settings=dict(
+ type='dict'
+ ),
+ purge_app_settings=dict(
+ type='bool',
+ default=False
+ ),
+ app_state=dict(
+ type='str',
+ choices=['started', 'stopped', 'restarted'],
+ default='started'
+ ),
+ state=dict(
+ type='str',
+ default='present',
+ choices=['present', 'absent']
+ )
+ )
+
+ mutually_exclusive = [['container_settings', 'frameworks']]
+
+ self.resource_group = None
+ self.name = None
+ self.webapp_name = None
+ self.location = None
+
+ self.auto_swap_slot_name = None
+ self.swap = None
+ self.tags = None
+ self.startup_file = None
+ self.configuration_source = None
+ self.clone = False
+
+ # site config, e.g app settings, ssl
+ self.site_config = dict()
+ self.app_settings = dict()
+ self.app_settings_strDic = None
+
+ # siteSourceControl
+ self.deployment_source = dict()
+
+ # site, used at level creation, or update.
+ self.site = None
+
+ # property for internal usage, not used for sdk
+ self.container_settings = None
+
+ self.purge_app_settings = False
+ self.app_state = 'started'
+
+ self.results = dict(
+ changed=False,
+ id=None,
+ )
+ self.state = None
+ self.to_do = Actions.NoAction
+
+ self.frameworks = None
+
+ # set site_config value from kwargs
+ self.site_config_updatable_frameworks = ["net_framework_version",
+ "java_version",
+ "php_version",
+ "python_version",
+ "linux_fx_version"]
+
+ self.supported_linux_frameworks = ['ruby', 'php', 'dotnetcore', 'node', 'java']
+ self.supported_windows_frameworks = ['net_framework', 'php', 'python', 'node', 'java']
+
+ super(AzureRMWebAppSlots, self).__init__(derived_arg_spec=self.module_arg_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ supports_tags=True)
+
+ def exec_module(self, **kwargs):
+ """Main module execution method"""
+
+ for key in list(self.module_arg_spec.keys()) + ['tags']:
+ if hasattr(self, key):
+ setattr(self, key, kwargs[key])
+ elif kwargs[key] is not None:
+ if key == "scm_type":
+ self.site_config[key] = kwargs[key]
+
+ old_response = None
+ response = None
+ to_be_updated = False
+
+ # set location
+ resource_group = self.get_resource_group(self.resource_group)
+ if not self.location:
+ self.location = resource_group.location
+
+ # get web app
+ webapp_response = self.get_webapp()
+
+ if not webapp_response:
+ self.fail("Web app {0} does not exist in resource group {1}.".format(self.webapp_name, self.resource_group))
+
+ # get slot
+ old_response = self.get_slot()
+
+ # set is_linux
+ is_linux = True if webapp_response['reserved'] else False
+
+ if self.state == 'present':
+ if self.frameworks:
+ # java is mutually exclusive with other frameworks
+ if len(self.frameworks) > 1 and any(f['name'] == 'java' for f in self.frameworks):
+ self.fail('Java is mutually exclusive with other frameworks.')
+
+ if is_linux:
+ if len(self.frameworks) != 1:
+ self.fail('Can specify one framework only for Linux web app.')
+
+ if self.frameworks[0]['name'] not in self.supported_linux_frameworks:
+ self.fail('Unsupported framework {0} for Linux web app.'.format(self.frameworks[0]['name']))
+
+ self.site_config['linux_fx_version'] = (self.frameworks[0]['name'] + '|' + self.frameworks[0]['version']).upper()
+
+ if self.frameworks[0]['name'] == 'java':
+ if self.frameworks[0]['version'] != '8':
+ self.fail("Linux web app only supports java 8.")
+
+ if self.frameworks[0].get('settings', {}) and self.frameworks[0]['settings'].get('java_container', None) and \
+ self.frameworks[0]['settings']['java_container'].lower() != 'tomcat':
+ self.fail("Linux web app only supports tomcat container.")
+
+ if self.frameworks[0].get('settings', {}) and self.frameworks[0]['settings'].get('java_container', None) and \
+ self.frameworks[0]['settings']['java_container'].lower() == 'tomcat':
+ self.site_config['linux_fx_version'] = 'TOMCAT|' + self.frameworks[0]['settings']['java_container_version'] + '-jre8'
+ else:
+ self.site_config['linux_fx_version'] = 'JAVA|8-jre8'
+ else:
+ for fx in self.frameworks:
+ if fx.get('name') not in self.supported_windows_frameworks:
+ self.fail('Unsupported framework {0} for Windows web app.'.format(fx.get('name')))
+ else:
+ self.site_config[fx.get('name') + '_version'] = fx.get('version')
+
+ if 'settings' in fx and fx['settings'] is not None:
+ for key, value in fx['settings'].items():
+ self.site_config[key] = value
+
+ if not self.app_settings:
+ self.app_settings = dict()
+
+ if self.container_settings:
+ linux_fx_version = 'DOCKER|'
+
+ if self.container_settings.get('registry_server_url'):
+ self.app_settings['DOCKER_REGISTRY_SERVER_URL'] = 'https://' + self.container_settings['registry_server_url']
+
+ linux_fx_version += self.container_settings['registry_server_url'] + '/'
+
+ linux_fx_version += self.container_settings['name']
+
+ self.site_config['linux_fx_version'] = linux_fx_version
+
+ if self.container_settings.get('registry_server_user'):
+ self.app_settings['DOCKER_REGISTRY_SERVER_USERNAME'] = self.container_settings['registry_server_user']
+
+ if self.container_settings.get('registry_server_password'):
+ self.app_settings['DOCKER_REGISTRY_SERVER_PASSWORD'] = self.container_settings['registry_server_password']
+
+ # set auto_swap_slot_name
+ if self.auto_swap_slot_name and isinstance(self.auto_swap_slot_name, str):
+ self.site_config['auto_swap_slot_name'] = self.auto_swap_slot_name
+ if self.auto_swap_slot_name is False:
+ self.site_config['auto_swap_slot_name'] = None
+
+ # init site
+ self.site = Site(location=self.location, site_config=self.site_config)
+
+ # check if the slot already present in the webapp
+ if not old_response:
+ self.log("Web App slot doesn't exist")
+
+ to_be_updated = True
+ self.to_do = Actions.CreateOrUpdate
+ self.site.tags = self.tags
+
+ # if linux, setup startup_file
+ if self.startup_file:
+ self.site_config['app_command_line'] = self.startup_file
+
+ # set app setting
+ if self.app_settings:
+ app_settings = []
+ for key in self.app_settings.keys():
+ app_settings.append(NameValuePair(name=key, value=self.app_settings[key]))
+
+ self.site_config['app_settings'] = app_settings
+
+ # clone slot
+ if self.configuration_source:
+ self.clone = True
+
+ else:
+ # existing slot, do update
+ self.log("Web App slot already exists")
+
+ self.log('Result: {0}'.format(old_response))
+
+ update_tags, self.site.tags = self.update_tags(old_response.get('tags', None))
+
+ if update_tags:
+ to_be_updated = True
+
+ # check if site_config changed
+ old_config = self.get_configuration_slot(self.name)
+
+ if self.is_site_config_changed(old_config):
+ to_be_updated = True
+ self.to_do = Actions.CreateOrUpdate
+
+ self.app_settings_strDic = self.list_app_settings_slot(self.name)
+
+ # purge existing app_settings:
+ if self.purge_app_settings:
+ to_be_updated = True
+ self.to_do = Actions.UpdateAppSettings
+ self.app_settings_strDic = dict()
+
+ # check if app settings changed
+ if self.purge_app_settings or self.is_app_settings_changed():
+ to_be_updated = True
+ self.to_do = Actions.UpdateAppSettings
+
+ if self.app_settings:
+ for key in self.app_settings.keys():
+ self.app_settings_strDic[key] = self.app_settings[key]
+
+ elif self.state == 'absent':
+ if old_response:
+ self.log("Delete Web App slot")
+ self.results['changed'] = True
+
+ if self.check_mode:
+ return self.results
+
+ self.delete_slot()
+
+ self.log('Web App slot deleted')
+
+ else:
+ self.log("Web app slot {0} not exists.".format(self.name))
+
+ if to_be_updated:
+ self.log('Need to Create/Update web app')
+ self.results['changed'] = True
+
+ if self.check_mode:
+ return self.results
+
+ if self.to_do == Actions.CreateOrUpdate:
+ response = self.create_update_slot()
+
+ self.results['id'] = response['id']
+
+ if self.clone:
+ self.clone_slot()
+
+ if self.to_do == Actions.UpdateAppSettings:
+ self.update_app_settings_slot()
+
+ slot = None
+ if response:
+ slot = response
+ if old_response:
+ slot = old_response
+
+ if slot:
+ if (slot['state'] != 'Stopped' and self.app_state == 'stopped') or \
+ (slot['state'] != 'Running' and self.app_state == 'started') or \
+ self.app_state == 'restarted':
+
+ self.results['changed'] = True
+ if self.check_mode:
+ return self.results
+
+ self.set_state_slot(self.app_state)
+
+ if self.swap:
+ self.results['changed'] = True
+ if self.check_mode:
+ return self.results
+
+ self.swap_slot()
+
+ return self.results
+
+ # compare site config
+ def is_site_config_changed(self, existing_config):
+ for fx_version in self.site_config_updatable_frameworks:
+ if self.site_config.get(fx_version):
+ if not getattr(existing_config, fx_version) or \
+ getattr(existing_config, fx_version).upper() != self.site_config.get(fx_version).upper():
+ return True
+
+ if self.auto_swap_slot_name is False and existing_config.auto_swap_slot_name is not None:
+ return True
+ elif self.auto_swap_slot_name and self.auto_swap_slot_name != getattr(existing_config, 'auto_swap_slot_name', None):
+ return True
+ return False
+
+ # comparing existing app setting with input, determine whether it's changed
+ def is_app_settings_changed(self):
+ if self.app_settings:
+ if len(self.app_settings_strDic) != len(self.app_settings):
+ return True
+
+ if self.app_settings_strDic != self.app_settings:
+ return True
+ return False
+
+ # comparing deployment source with input, determine whether it's changed
+ def is_deployment_source_changed(self, existing_webapp):
+ if self.deployment_source:
+ if self.deployment_source.get('url') \
+ and self.deployment_source['url'] != existing_webapp.get('site_source_control')['url']:
+ return True
+
+ if self.deployment_source.get('branch') \
+ and self.deployment_source['branch'] != existing_webapp.get('site_source_control')['branch']:
+ return True
+
+ return False
+
+ def create_update_slot(self):
+ '''
+ Creates or updates Web App slot with the specified configuration.
+
+ :return: deserialized Web App instance state dictionary
+ '''
+ self.log(
+ "Creating / Updating the Web App slot {0}".format(self.name))
+
+ try:
+ response = self.web_client.web_apps.create_or_update_slot(resource_group_name=self.resource_group,
+ slot=self.name,
+ name=self.webapp_name,
+ site_envelope=self.site)
+ if isinstance(response, LROPoller):
+ response = self.get_poller_result(response)
+
+ except CloudError as exc:
+ self.log('Error attempting to create the Web App slot instance.')
+ self.fail("Error creating the Web App slot: {0}".format(str(exc)))
+ return slot_to_dict(response)
+
+ def delete_slot(self):
+ '''
+ Deletes specified Web App slot in the specified subscription and resource group.
+
+ :return: True
+ '''
+ self.log("Deleting the Web App slot {0}".format(self.name))
+ try:
+ response = self.web_client.web_apps.delete_slot(resource_group_name=self.resource_group,
+ name=self.webapp_name,
+ slot=self.name)
+ except CloudError as e:
+ self.log('Error attempting to delete the Web App slot.')
+ self.fail(
+ "Error deleting the Web App slots: {0}".format(str(e)))
+
+ return True
+
+ def get_webapp(self):
+ '''
+ Gets the properties of the specified Web App.
+
+ :return: deserialized Web App instance state dictionary
+ '''
+ self.log(
+ "Checking if the Web App instance {0} is present".format(self.webapp_name))
+
+ response = None
+
+ try:
+ response = self.web_client.web_apps.get(resource_group_name=self.resource_group,
+ name=self.webapp_name)
+
+ # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError
+ if response is not None:
+ self.log("Response : {0}".format(response))
+ self.log("Web App instance : {0} found".format(response.name))
+ return webapp_to_dict(response)
+
+ except CloudError as ex:
+ pass
+
+ self.log("Didn't find web app {0} in resource group {1}".format(
+ self.webapp_name, self.resource_group))
+
+ return False
+
+ def get_slot(self):
+ '''
+ Gets the properties of the specified Web App slot.
+
+ :return: deserialized Web App slot state dictionary
+ '''
+ self.log(
+ "Checking if the Web App slot {0} is present".format(self.name))
+
+ response = None
+
+ try:
+ response = self.web_client.web_apps.get_slot(resource_group_name=self.resource_group,
+ name=self.webapp_name,
+ slot=self.name)
+
+ # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError
+ if response is not None:
+ self.log("Response : {0}".format(response))
+ self.log("Web App slot: {0} found".format(response.name))
+ return slot_to_dict(response)
+
+ except CloudError as ex:
+ pass
+
+ self.log("Does not find web app slot {0} in resource group {1}".format(self.name, self.resource_group))
+
+ return False
+
+ def list_app_settings(self):
+ '''
+ List webapp application settings
+ :return: deserialized list response
+ '''
+ self.log("List webapp application setting")
+
+ try:
+
+ response = self.web_client.web_apps.list_application_settings(
+ resource_group_name=self.resource_group, name=self.webapp_name)
+ self.log("Response : {0}".format(response))
+
+ return response.properties
+ except CloudError as ex:
+ self.fail("Failed to list application settings for web app {0} in resource group {1}: {2}".format(
+ self.name, self.resource_group, str(ex)))
+
+ def list_app_settings_slot(self, slot_name):
+ '''
+ List application settings
+ :return: deserialized list response
+ '''
+ self.log("List application setting")
+
+ try:
+
+ response = self.web_client.web_apps.list_application_settings_slot(
+ resource_group_name=self.resource_group, name=self.webapp_name, slot=slot_name)
+ self.log("Response : {0}".format(response))
+
+ return response.properties
+ except CloudError as ex:
+ self.fail("Failed to list application settings for web app slot {0} in resource group {1}: {2}".format(
+ self.name, self.resource_group, str(ex)))
+
+ def update_app_settings_slot(self, slot_name=None, app_settings=None):
+ '''
+ Update application settings
+ :return: deserialized updating response
+ '''
+ self.log("Update application setting")
+
+ if slot_name is None:
+ slot_name = self.name
+ if app_settings is None:
+ app_settings = self.app_settings_strDic
+ try:
+ response = self.web_client.web_apps.update_application_settings_slot(resource_group_name=self.resource_group,
+ name=self.webapp_name,
+ slot=slot_name,
+ kind=None,
+ properties=app_settings)
+ self.log("Response : {0}".format(response))
+
+ return response.as_dict()
+ except CloudError as ex:
+ self.fail("Failed to update application settings for web app slot {0} in resource group {1}: {2}".format(
+ self.name, self.resource_group, str(ex)))
+
+ return response
+
+ def create_or_update_source_control_slot(self):
+ '''
+ Update site source control
+ :return: deserialized updating response
+ '''
+ self.log("Update site source control")
+
+ if self.deployment_source is None:
+ return False
+
+ self.deployment_source['is_manual_integration'] = False
+ self.deployment_source['is_mercurial'] = False
+
+ try:
+ response = self.web_client.web_client.create_or_update_source_control_slot(
+ resource_group_name=self.resource_group,
+ name=self.webapp_name,
+ site_source_control=self.deployment_source,
+ slot=self.name)
+ self.log("Response : {0}".format(response))
+
+ return response.as_dict()
+ except CloudError as ex:
+ self.fail("Failed to update site source control for web app slot {0} in resource group {1}: {2}".format(
+ self.name, self.resource_group, str(ex)))
+
+ def get_configuration(self):
+ '''
+ Get web app configuration
+ :return: deserialized web app configuration response
+ '''
+ self.log("Get web app configuration")
+
+ try:
+
+ response = self.web_client.web_apps.get_configuration(
+ resource_group_name=self.resource_group, name=self.webapp_name)
+ self.log("Response : {0}".format(response))
+
+ return response
+ except CloudError as ex:
+ self.fail("Failed to get configuration for web app {0} in resource group {1}: {2}".format(
+ self.webapp_name, self.resource_group, str(ex)))
+
+ def get_configuration_slot(self, slot_name):
+ '''
+ Get slot configuration
+ :return: deserialized slot configuration response
+ '''
+ self.log("Get web app slot configuration")
+
+ try:
+
+ response = self.web_client.web_apps.get_configuration_slot(
+ resource_group_name=self.resource_group, name=self.webapp_name, slot=slot_name)
+ self.log("Response : {0}".format(response))
+
+ return response
+ except CloudError as ex:
+ self.fail("Failed to get configuration for web app slot {0} in resource group {1}: {2}".format(
+ slot_name, self.resource_group, str(ex)))
+
+ def update_configuration_slot(self, slot_name=None, site_config=None):
+ '''
+ Update slot configuration
+ :return: deserialized slot configuration response
+ '''
+ self.log("Update web app slot configuration")
+
+ if slot_name is None:
+ slot_name = self.name
+ if site_config is None:
+ site_config = self.site_config
+ try:
+
+ response = self.web_client.web_apps.update_configuration_slot(
+ resource_group_name=self.resource_group, name=self.webapp_name, slot=slot_name, site_config=site_config)
+ self.log("Response : {0}".format(response))
+
+ return response
+ except CloudError as ex:
+ self.fail("Failed to update configuration for web app slot {0} in resource group {1}: {2}".format(
+ slot_name, self.resource_group, str(ex)))
+
+ def set_state_slot(self, appstate):
+ '''
+ Start/stop/restart web app slot
+ :return: deserialized updating response
+ '''
+ try:
+ if appstate == 'started':
+ response = self.web_client.web_apps.start_slot(resource_group_name=self.resource_group, name=self.webapp_name, slot=self.name)
+ elif appstate == 'stopped':
+ response = self.web_client.web_apps.stop_slot(resource_group_name=self.resource_group, name=self.webapp_name, slot=self.name)
+ elif appstate == 'restarted':
+ response = self.web_client.web_apps.restart_slot(resource_group_name=self.resource_group, name=self.webapp_name, slot=self.name)
+ else:
+ self.fail("Invalid web app slot state {0}".format(appstate))
+
+ self.log("Response : {0}".format(response))
+
+ return response
+ except CloudError as ex:
+ request_id = ex.request_id if ex.request_id else ''
+ self.fail("Failed to {0} web app slot {1} in resource group {2}, request_id {3} - {4}".format(
+ appstate, self.name, self.resource_group, request_id, str(ex)))
+
+ def swap_slot(self):
+ '''
+ Swap slot
+ :return: deserialized response
+ '''
+ self.log("Swap slot")
+
+ try:
+ if self.swap['action'] == 'swap':
+ if self.swap['target_slot'] is None:
+ response = self.web_client.web_apps.swap_slot_with_production(resource_group_name=self.resource_group,
+ name=self.webapp_name,
+ target_slot=self.name,
+ preserve_vnet=self.swap['preserve_vnet'])
+ else:
+ response = self.web_client.web_apps.swap_slot_slot(resource_group_name=self.resource_group,
+ name=self.webapp_name,
+ slot=self.name,
+ target_slot=self.swap['target_slot'],
+ preserve_vnet=self.swap['preserve_vnet'])
+ elif self.swap['action'] == 'preview':
+ if self.swap['target_slot'] is None:
+ response = self.web_client.web_apps.apply_slot_config_to_production(resource_group_name=self.resource_group,
+ name=self.webapp_name,
+ target_slot=self.name,
+ preserve_vnet=self.swap['preserve_vnet'])
+ else:
+ response = self.web_client.web_apps.apply_slot_configuration_slot(resource_group_name=self.resource_group,
+ name=self.webapp_name,
+ slot=self.name,
+ target_slot=self.swap['target_slot'],
+ preserve_vnet=self.swap['preserve_vnet'])
+ elif self.swap['action'] == 'reset':
+ if self.swap['target_slot'] is None:
+ response = self.web_client.web_apps.reset_production_slot_config(resource_group_name=self.resource_group,
+ name=self.webapp_name)
+ else:
+ response = self.web_client.web_apps.reset_slot_configuration_slot(resource_group_name=self.resource_group,
+ name=self.webapp_name,
+ slot=self.swap['target_slot'])
+ response = self.web_client.web_apps.reset_slot_configuration_slot(resource_group_name=self.resource_group,
+ name=self.webapp_name,
+ slot=self.name)
+
+ self.log("Response : {0}".format(response))
+
+ return response
+ except CloudError as ex:
+ self.fail("Failed to swap web app slot {0} in resource group {1}: {2}".format(self.name, self.resource_group, str(ex)))
+
+ def clone_slot(self):
+ if self.configuration_source:
+ src_slot = None if self.configuration_source.lower() == self.webapp_name.lower() else self.configuration_source
+
+ if src_slot is None:
+ site_config_clone_from = self.get_configuration()
+ else:
+ site_config_clone_from = self.get_configuration_slot(slot_name=src_slot)
+
+ self.update_configuration_slot(site_config=site_config_clone_from)
+
+ if src_slot is None:
+ app_setting_clone_from = self.list_app_settings()
+ else:
+ app_setting_clone_from = self.list_app_settings_slot(src_slot)
+
+ if self.app_settings:
+ app_setting_clone_from.update(self.app_settings)
+
+ self.update_app_settings_slot(app_settings=app_setting_clone_from)
+
+
+def main():
+ """Main execution"""
+ AzureRMWebAppSlots()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/cloud_init_data_facts.py b/test/support/integration/plugins/modules/cloud_init_data_facts.py
new file mode 100644
index 00000000..4f871b99
--- /dev/null
+++ b/test/support/integration/plugins/modules/cloud_init_data_facts.py
@@ -0,0 +1,134 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: cloud_init_data_facts
+short_description: Retrieve facts of cloud-init.
+description:
+ - Gathers facts by reading the status.json and result.json of cloud-init.
+version_added: 2.6
+author: René Moser (@resmo)
+options:
+ filter:
+ description:
+ - Filter facts
+ choices: [ status, result ]
+notes:
+ - See http://cloudinit.readthedocs.io/ for more information about cloud-init.
+'''
+
+EXAMPLES = '''
+- name: Gather all facts of cloud init
+ cloud_init_data_facts:
+ register: result
+
+- debug:
+ var: result
+
+- name: Wait for cloud init to finish
+ cloud_init_data_facts:
+ filter: status
+ register: res
+ until: "res.cloud_init_data_facts.status.v1.stage is defined and not res.cloud_init_data_facts.status.v1.stage"
+ retries: 50
+ delay: 5
+'''
+
+RETURN = '''
+---
+cloud_init_data_facts:
+ description: Facts of result and status.
+ returned: success
+ type: dict
+ sample: '{
+ "status": {
+ "v1": {
+ "datasource": "DataSourceCloudStack",
+ "errors": []
+ },
+ "result": {
+ "v1": {
+ "datasource": "DataSourceCloudStack",
+ "init": {
+ "errors": [],
+ "finished": 1522066377.0185432,
+ "start": 1522066375.2648022
+ },
+ "init-local": {
+ "errors": [],
+ "finished": 1522066373.70919,
+ "start": 1522066373.4726632
+ },
+ "modules-config": {
+ "errors": [],
+ "finished": 1522066380.9097016,
+ "start": 1522066379.0011985
+ },
+ "modules-final": {
+ "errors": [],
+ "finished": 1522066383.56594,
+ "start": 1522066382.3449218
+ },
+ "stage": null
+ }
+ }'
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+
+
+CLOUD_INIT_PATH = "/var/lib/cloud/data/"
+
+
+def gather_cloud_init_data_facts(module):
+ res = {
+ 'cloud_init_data_facts': dict()
+ }
+
+ for i in ['result', 'status']:
+ filter = module.params.get('filter')
+ if filter is None or filter == i:
+ res['cloud_init_data_facts'][i] = dict()
+ json_file = CLOUD_INIT_PATH + i + '.json'
+
+ if os.path.exists(json_file):
+ f = open(json_file, 'rb')
+ contents = to_text(f.read(), errors='surrogate_or_strict')
+ f.close()
+
+ if contents:
+ res['cloud_init_data_facts'][i] = module.from_json(contents)
+ return res
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ filter=dict(choices=['result', 'status']),
+ ),
+ supports_check_mode=True,
+ )
+
+ facts = gather_cloud_init_data_facts(module)
+ result = dict(changed=False, ansible_facts=facts, **facts)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/cloudformation.py b/test/support/integration/plugins/modules/cloudformation.py
new file mode 100644
index 00000000..cd031465
--- /dev/null
+++ b/test/support/integration/plugins/modules/cloudformation.py
@@ -0,0 +1,837 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+
+DOCUMENTATION = '''
+---
+module: cloudformation
+short_description: Create or delete an AWS CloudFormation stack
+description:
+ - Launches or updates an AWS CloudFormation stack and waits for it complete.
+notes:
+ - CloudFormation features change often, and this module tries to keep up. That means your botocore version should be fresh.
+ The version listed in the requirements is the oldest version that works with the module as a whole.
+ Some features may require recent versions, and we do not pinpoint a minimum version for each feature.
+ Instead of relying on the minimum version, keep botocore up to date. AWS is always releasing features and fixing bugs.
+version_added: "1.1"
+options:
+ stack_name:
+ description:
+ - Name of the CloudFormation stack.
+ required: true
+ type: str
+ disable_rollback:
+ description:
+ - If a stacks fails to form, rollback will remove the stack.
+ default: false
+ type: bool
+ on_create_failure:
+ description:
+ - Action to take upon failure of stack creation. Incompatible with the I(disable_rollback) option.
+ choices:
+ - DO_NOTHING
+ - ROLLBACK
+ - DELETE
+ version_added: "2.8"
+ type: str
+ create_timeout:
+ description:
+ - The amount of time (in minutes) that can pass before the stack status becomes CREATE_FAILED
+ version_added: "2.6"
+ type: int
+ template_parameters:
+ description:
+ - A list of hashes of all the template variables for the stack. The value can be a string or a dict.
+ - Dict can be used to set additional template parameter attributes like UsePreviousValue (see example).
+ default: {}
+ type: dict
+ state:
+ description:
+ - If I(state=present), stack will be created.
+ - If I(state=present) and if stack exists and template has changed, it will be updated.
+ - If I(state=absent), stack will be removed.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ template:
+ description:
+ - The local path of the CloudFormation template.
+ - This must be the full path to the file, relative to the working directory. If using roles this may look
+ like C(roles/cloudformation/files/cloudformation-example.json).
+ - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
+ must be specified (but only one of them).
+ - If I(state=present), the stack does exist, and neither I(template),
+ I(template_body) nor I(template_url) are specified, the previous template will be reused.
+ type: path
+ notification_arns:
+ description:
+ - A comma separated list of Simple Notification Service (SNS) topic ARNs to publish stack related events.
+ version_added: "2.0"
+ type: str
+ stack_policy:
+ description:
+ - The path of the CloudFormation stack policy. A policy cannot be removed once placed, but it can be modified.
+ for instance, allow all updates U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html#d0e9051)
+ version_added: "1.9"
+ type: str
+ tags:
+ description:
+ - Dictionary of tags to associate with stack and its resources during stack creation.
+ - Can be updated later, updating tags removes previous entries.
+ version_added: "1.4"
+ type: dict
+ template_url:
+ description:
+ - Location of file containing the template body. The URL must point to a template (max size 307,200 bytes) located in an
+ S3 bucket in the same region as the stack.
+ - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
+ must be specified (but only one of them).
+ - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url) are specified,
+ the previous template will be reused.
+ version_added: "2.0"
+ type: str
+ create_changeset:
+ description:
+ - "If stack already exists create a changeset instead of directly applying changes. See the AWS Change Sets docs
+ U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html)."
+ - "WARNING: if the stack does not exist, it will be created without changeset. If I(state=absent), the stack will be
+ deleted immediately with no changeset."
+ type: bool
+ default: false
+ version_added: "2.4"
+ changeset_name:
+ description:
+ - Name given to the changeset when creating a changeset.
+ - Only used when I(create_changeset=true).
+ - By default a name prefixed with Ansible-STACKNAME is generated based on input parameters.
+ See the AWS Change Sets docs for more information
+ U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html)
+ version_added: "2.4"
+ type: str
+ template_format:
+ description:
+ - This parameter is ignored since Ansible 2.3 and will be removed in Ansible 2.14.
+ - Templates are now passed raw to CloudFormation regardless of format.
+ version_added: "2.0"
+ type: str
+ role_arn:
+ description:
+ - The role that AWS CloudFormation assumes to create the stack. See the AWS CloudFormation Service Role
+ docs U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-servicerole.html)
+ version_added: "2.3"
+ type: str
+ termination_protection:
+ description:
+ - Enable or disable termination protection on the stack. Only works with botocore >= 1.7.18.
+ type: bool
+ version_added: "2.5"
+ template_body:
+ description:
+ - Template body. Use this to pass in the actual body of the CloudFormation template.
+ - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
+ must be specified (but only one of them).
+ - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url)
+ are specified, the previous template will be reused.
+ version_added: "2.5"
+ type: str
+ events_limit:
+ description:
+ - Maximum number of CloudFormation events to fetch from a stack when creating or updating it.
+ default: 200
+ version_added: "2.7"
+ type: int
+ backoff_delay:
+ description:
+ - Number of seconds to wait for the next retry.
+ default: 3
+ version_added: "2.8"
+ type: int
+ required: False
+ backoff_max_delay:
+ description:
+ - Maximum amount of time to wait between retries.
+ default: 30
+ version_added: "2.8"
+ type: int
+ required: False
+ backoff_retries:
+ description:
+ - Number of times to retry operation.
+ - AWS API throttling mechanism fails CloudFormation module so we have to retry a couple of times.
+ default: 10
+ version_added: "2.8"
+ type: int
+ required: False
+ capabilities:
+ description:
+ - Specify capabilities that stack template contains.
+ - Valid values are C(CAPABILITY_IAM), C(CAPABILITY_NAMED_IAM) and C(CAPABILITY_AUTO_EXPAND).
+ type: list
+ elements: str
+ version_added: "2.8"
+ default: [ CAPABILITY_IAM, CAPABILITY_NAMED_IAM ]
+
+author: "James S. Martin (@jsmartin)"
+extends_documentation_fragment:
+- aws
+- ec2
+requirements: [ boto3, botocore>=1.5.45 ]
+'''
+
+EXAMPLES = '''
+- name: create a cloudformation stack
+ cloudformation:
+ stack_name: "ansible-cloudformation"
+ state: "present"
+ region: "us-east-1"
+ disable_rollback: true
+ template: "files/cloudformation-example.json"
+ template_parameters:
+ KeyName: "jmartin"
+ DiskType: "ephemeral"
+ InstanceType: "m1.small"
+ ClusterSize: 3
+ tags:
+ Stack: "ansible-cloudformation"
+
+# Basic role example
+- name: create a stack, specify role that cloudformation assumes
+ cloudformation:
+ stack_name: "ansible-cloudformation"
+ state: "present"
+ region: "us-east-1"
+ disable_rollback: true
+ template: "roles/cloudformation/files/cloudformation-example.json"
+ role_arn: 'arn:aws:iam::123456789012:role/cloudformation-iam-role'
+
+- name: delete a stack
+ cloudformation:
+ stack_name: "ansible-cloudformation-old"
+ state: "absent"
+
+# Create a stack, pass in template from a URL, disable rollback if stack creation fails,
+# pass in some parameters to the template, provide tags for resources created
+- name: create a stack, pass in the template via an URL
+ cloudformation:
+ stack_name: "ansible-cloudformation"
+ state: present
+ region: us-east-1
+ disable_rollback: true
+ template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
+ template_parameters:
+ KeyName: jmartin
+ DiskType: ephemeral
+ InstanceType: m1.small
+ ClusterSize: 3
+ tags:
+ Stack: ansible-cloudformation
+
+# Create a stack, passing in template body using lookup of Jinja2 template, disable rollback if stack creation fails,
+# pass in some parameters to the template, provide tags for resources created
+- name: create a stack, pass in the template body via lookup template
+ cloudformation:
+ stack_name: "ansible-cloudformation"
+ state: present
+ region: us-east-1
+ disable_rollback: true
+ template_body: "{{ lookup('template', 'cloudformation.j2') }}"
+ template_parameters:
+ KeyName: jmartin
+ DiskType: ephemeral
+ InstanceType: m1.small
+ ClusterSize: 3
+ tags:
+ Stack: ansible-cloudformation
+
+# Pass a template parameter which uses CloudFormation's UsePreviousValue attribute
+# When use_previous_value is set to True, the given value will be ignored and
+# CloudFormation will use the value from a previously submitted template.
+# If use_previous_value is set to False (default) the given value is used.
+- cloudformation:
+ stack_name: "ansible-cloudformation"
+ state: "present"
+ region: "us-east-1"
+ template: "files/cloudformation-example.json"
+ template_parameters:
+ DBSnapshotIdentifier:
+ use_previous_value: True
+ value: arn:aws:rds:es-east-1:000000000000:snapshot:rds:my-db-snapshot
+ DBName:
+ use_previous_value: True
+ tags:
+ Stack: "ansible-cloudformation"
+
+# Enable termination protection on a stack.
+# If the stack already exists, this will update its termination protection
+- name: enable termination protection during stack creation
+ cloudformation:
+ stack_name: my_stack
+ state: present
+ template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
+ termination_protection: yes
+
+# Configure TimeoutInMinutes before the stack status becomes CREATE_FAILED
+# In this case, if disable_rollback is not set or is set to false, the stack will be rolled back.
+- name: enable termination protection during stack creation
+ cloudformation:
+ stack_name: my_stack
+ state: present
+ template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
+ create_timeout: 5
+
+# Configure rollback behaviour on the unsuccessful creation of a stack allowing
+# CloudFormation to clean up, or do nothing in the event of an unsuccessful
+# deployment
+# In this case, if on_create_failure is set to "DELETE", it will clean up the stack if
+# it fails to create
+- name: create stack which will delete on creation failure
+ cloudformation:
+ stack_name: my_stack
+ state: present
+ template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
+ on_create_failure: DELETE
+'''
+
+RETURN = '''
+events:
+ type: list
+ description: Most recent events in CloudFormation's event log. This may be from a previous run in some cases.
+ returned: always
+ sample: ["StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE", "StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE_CLEANUP_IN_PROGRESS"]
+log:
+ description: Debugging logs. Useful when modifying or finding an error.
+ returned: always
+ type: list
+ sample: ["updating stack"]
+change_set_id:
+ description: The ID of the stack change set if one was created
+ returned: I(state=present) and I(create_changeset=true)
+ type: str
+ sample: "arn:aws:cloudformation:us-east-1:012345678901:changeSet/Ansible-StackName-f4496805bd1b2be824d1e315c6884247ede41eb0"
+stack_resources:
+ description: AWS stack resources and their status. List of dictionaries, one dict per resource.
+ returned: state == present
+ type: list
+ sample: [
+ {
+ "last_updated_time": "2016-10-11T19:40:14.979000+00:00",
+ "logical_resource_id": "CFTestSg",
+ "physical_resource_id": "cloudformation2-CFTestSg-16UQ4CYQ57O9F",
+ "resource_type": "AWS::EC2::SecurityGroup",
+ "status": "UPDATE_COMPLETE",
+ "status_reason": null
+ }
+ ]
+stack_outputs:
+ type: dict
+ description: A key:value dictionary of all the stack outputs currently defined. If there are no stack outputs, it is an empty dictionary.
+ returned: state == present
+ sample: {"MySg": "AnsibleModuleTestYAML-CFTestSg-C8UVS567B6NS"}
+''' # NOQA
+
+import json
+import time
+import uuid
+import traceback
+from hashlib import sha1
+
+try:
+ import boto3
+ import botocore
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+from ansible.module_utils.ec2 import ansible_dict_to_boto3_tag_list, AWSRetry, boto3_conn, boto_exception, ec2_argument_spec, get_aws_connection_info
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+
+
+def get_stack_events(cfn, stack_name, events_limit, token_filter=None):
+ '''This event data was never correct, it worked as a side effect. So the v2.3 format is different.'''
+ ret = {'events': [], 'log': []}
+
+ try:
+ pg = cfn.get_paginator(
+ 'describe_stack_events'
+ ).paginate(
+ StackName=stack_name,
+ PaginationConfig={'MaxItems': events_limit}
+ )
+ if token_filter is not None:
+ events = list(pg.search(
+ "StackEvents[?ClientRequestToken == '{0}']".format(token_filter)
+ ))
+ else:
+ events = list(pg.search("StackEvents[*]"))
+ except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
+ error_msg = boto_exception(err)
+ if 'does not exist' in error_msg:
+ # missing stack, don't bail.
+ ret['log'].append('Stack does not exist.')
+ return ret
+ ret['log'].append('Unknown error: ' + str(error_msg))
+ return ret
+
+ for e in events:
+ eventline = 'StackEvent {ResourceType} {LogicalResourceId} {ResourceStatus}'.format(**e)
+ ret['events'].append(eventline)
+
+ if e['ResourceStatus'].endswith('FAILED'):
+ failline = '{ResourceType} {LogicalResourceId} {ResourceStatus}: {ResourceStatusReason}'.format(**e)
+ ret['log'].append(failline)
+
+ return ret
+
+
+def create_stack(module, stack_params, cfn, events_limit):
+ if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
+ module.fail_json(msg="Either 'template', 'template_body' or 'template_url' is required when the stack does not exist.")
+
+ # 'DisableRollback', 'TimeoutInMinutes', 'EnableTerminationProtection' and
+ # 'OnFailure' only apply on creation, not update.
+ if module.params.get('on_create_failure') is not None:
+ stack_params['OnFailure'] = module.params['on_create_failure']
+ else:
+ stack_params['DisableRollback'] = module.params['disable_rollback']
+
+ if module.params.get('create_timeout') is not None:
+ stack_params['TimeoutInMinutes'] = module.params['create_timeout']
+ if module.params.get('termination_protection') is not None:
+ if boto_supports_termination_protection(cfn):
+ stack_params['EnableTerminationProtection'] = bool(module.params.get('termination_protection'))
+ else:
+ module.fail_json(msg="termination_protection parameter requires botocore >= 1.7.18")
+
+ try:
+ response = cfn.create_stack(**stack_params)
+ # Use stack ID to follow stack state in case of on_create_failure = DELETE
+ result = stack_operation(cfn, response['StackId'], 'CREATE', events_limit, stack_params.get('ClientRequestToken', None))
+ except Exception as err:
+ error_msg = boto_exception(err)
+ module.fail_json(msg="Failed to create stack {0}: {1}.".format(stack_params.get('StackName'), error_msg), exception=traceback.format_exc())
+ if not result:
+ module.fail_json(msg="empty result")
+ return result
+
+
+def list_changesets(cfn, stack_name):
+ res = cfn.list_change_sets(StackName=stack_name)
+ return [cs['ChangeSetName'] for cs in res['Summaries']]
+
+
+def create_changeset(module, stack_params, cfn, events_limit):
+ if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
+ module.fail_json(msg="Either 'template' or 'template_url' is required.")
+ if module.params['changeset_name'] is not None:
+ stack_params['ChangeSetName'] = module.params['changeset_name']
+
+ # changesets don't accept ClientRequestToken parameters
+ stack_params.pop('ClientRequestToken', None)
+
+ try:
+ changeset_name = build_changeset_name(stack_params)
+ stack_params['ChangeSetName'] = changeset_name
+
+ # Determine if this changeset already exists
+ pending_changesets = list_changesets(cfn, stack_params['StackName'])
+ if changeset_name in pending_changesets:
+ warning = 'WARNING: %d pending changeset(s) exist(s) for this stack!' % len(pending_changesets)
+ result = dict(changed=False, output='ChangeSet %s already exists.' % changeset_name, warnings=[warning])
+ else:
+ cs = cfn.create_change_set(**stack_params)
+ # Make sure we don't enter an infinite loop
+ time_end = time.time() + 600
+ while time.time() < time_end:
+ try:
+ newcs = cfn.describe_change_set(ChangeSetName=cs['Id'])
+ except botocore.exceptions.BotoCoreError as err:
+ error_msg = boto_exception(err)
+ module.fail_json(msg=error_msg)
+ if newcs['Status'] == 'CREATE_PENDING' or newcs['Status'] == 'CREATE_IN_PROGRESS':
+ time.sleep(1)
+ elif newcs['Status'] == 'FAILED' and "The submitted information didn't contain changes" in newcs['StatusReason']:
+ cfn.delete_change_set(ChangeSetName=cs['Id'])
+ result = dict(changed=False,
+ output='The created Change Set did not contain any changes to this stack and was deleted.')
+ # a failed change set does not trigger any stack events so we just want to
+ # skip any further processing of result and just return it directly
+ return result
+ else:
+ break
+ # Lets not hog the cpu/spam the AWS API
+ time.sleep(1)
+ result = stack_operation(cfn, stack_params['StackName'], 'CREATE_CHANGESET', events_limit)
+ result['change_set_id'] = cs['Id']
+ result['warnings'] = ['Created changeset named %s for stack %s' % (changeset_name, stack_params['StackName']),
+ 'You can execute it using: aws cloudformation execute-change-set --change-set-name %s' % cs['Id'],
+ 'NOTE that dependencies on this stack might fail due to pending changes!']
+ except Exception as err:
+ error_msg = boto_exception(err)
+ if 'No updates are to be performed.' in error_msg:
+ result = dict(changed=False, output='Stack is already up-to-date.')
+ else:
+ module.fail_json(msg="Failed to create change set: {0}".format(error_msg), exception=traceback.format_exc())
+
+ if not result:
+ module.fail_json(msg="empty result")
+ return result
+
+
+def update_stack(module, stack_params, cfn, events_limit):
+ if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
+ stack_params['UsePreviousTemplate'] = True
+
+ # if the state is present and the stack already exists, we try to update it.
+ # AWS will tell us if the stack template and parameters are the same and
+ # don't need to be updated.
+ try:
+ cfn.update_stack(**stack_params)
+ result = stack_operation(cfn, stack_params['StackName'], 'UPDATE', events_limit, stack_params.get('ClientRequestToken', None))
+ except Exception as err:
+ error_msg = boto_exception(err)
+ if 'No updates are to be performed.' in error_msg:
+ result = dict(changed=False, output='Stack is already up-to-date.')
+ else:
+ module.fail_json(msg="Failed to update stack {0}: {1}".format(stack_params.get('StackName'), error_msg), exception=traceback.format_exc())
+ if not result:
+ module.fail_json(msg="empty result")
+ return result
+
+
+def update_termination_protection(module, cfn, stack_name, desired_termination_protection_state):
+ '''updates termination protection of a stack'''
+ if not boto_supports_termination_protection(cfn):
+ module.fail_json(msg="termination_protection parameter requires botocore >= 1.7.18")
+ stack = get_stack_facts(cfn, stack_name)
+ if stack:
+ if stack['EnableTerminationProtection'] is not desired_termination_protection_state:
+ try:
+ cfn.update_termination_protection(
+ EnableTerminationProtection=desired_termination_protection_state,
+ StackName=stack_name)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=boto_exception(e), exception=traceback.format_exc())
+
+
+def boto_supports_termination_protection(cfn):
+ '''termination protection was added in botocore 1.7.18'''
+ return hasattr(cfn, "update_termination_protection")
+
+
+def stack_operation(cfn, stack_name, operation, events_limit, op_token=None):
+ '''gets the status of a stack while it is created/updated/deleted'''
+ existed = []
+ while True:
+ try:
+ stack = get_stack_facts(cfn, stack_name)
+ existed.append('yes')
+ except Exception:
+ # If the stack previously existed, and now can't be found then it's
+ # been deleted successfully.
+ if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
+ ret = get_stack_events(cfn, stack_name, events_limit, op_token)
+ ret.update({'changed': True, 'output': 'Stack Deleted'})
+ return ret
+ else:
+ return {'changed': True, 'failed': True, 'output': 'Stack Not Found', 'exception': traceback.format_exc()}
+ ret = get_stack_events(cfn, stack_name, events_limit, op_token)
+ if not stack:
+ if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
+ ret = get_stack_events(cfn, stack_name, events_limit, op_token)
+ ret.update({'changed': True, 'output': 'Stack Deleted'})
+ return ret
+ else:
+ ret.update({'changed': False, 'failed': True, 'output': 'Stack not found.'})
+ return ret
+ # it covers ROLLBACK_COMPLETE and UPDATE_ROLLBACK_COMPLETE
+ # Possible states: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-describing-stacks.html#w1ab2c15c17c21c13
+ elif stack['StackStatus'].endswith('ROLLBACK_COMPLETE') and operation != 'CREATE_CHANGESET':
+ ret.update({'changed': True, 'failed': True, 'output': 'Problem with %s. Rollback complete' % operation})
+ return ret
+ elif stack['StackStatus'] == 'DELETE_COMPLETE' and operation == 'CREATE':
+ ret.update({'changed': True, 'failed': True, 'output': 'Stack create failed. Delete complete.'})
+ return ret
+ # note the ordering of ROLLBACK_COMPLETE, DELETE_COMPLETE, and COMPLETE, because otherwise COMPLETE will match all cases.
+ elif stack['StackStatus'].endswith('_COMPLETE'):
+ ret.update({'changed': True, 'output': 'Stack %s complete' % operation})
+ return ret
+ elif stack['StackStatus'].endswith('_ROLLBACK_FAILED'):
+ ret.update({'changed': True, 'failed': True, 'output': 'Stack %s rollback failed' % operation})
+ return ret
+ # note the ordering of ROLLBACK_FAILED and FAILED, because otherwise FAILED will match both cases.
+ elif stack['StackStatus'].endswith('_FAILED'):
+ ret.update({'changed': True, 'failed': True, 'output': 'Stack %s failed' % operation})
+ return ret
+ else:
+ # this can loop forever :/
+ time.sleep(5)
+ return {'failed': True, 'output': 'Failed for unknown reasons.'}
+
+
+def build_changeset_name(stack_params):
+ if 'ChangeSetName' in stack_params:
+ return stack_params['ChangeSetName']
+
+ json_params = json.dumps(stack_params, sort_keys=True)
+
+ return 'Ansible-{0}-{1}'.format(
+ stack_params['StackName'],
+ sha1(to_bytes(json_params, errors='surrogate_or_strict')).hexdigest()
+ )
+
+
+def check_mode_changeset(module, stack_params, cfn):
+ """Create a change set, describe it and delete it before returning check mode outputs."""
+ stack_params['ChangeSetName'] = build_changeset_name(stack_params)
+ # changesets don't accept ClientRequestToken parameters
+ stack_params.pop('ClientRequestToken', None)
+
+ try:
+ change_set = cfn.create_change_set(**stack_params)
+ for i in range(60): # total time 5 min
+ description = cfn.describe_change_set(ChangeSetName=change_set['Id'])
+ if description['Status'] in ('CREATE_COMPLETE', 'FAILED'):
+ break
+ time.sleep(5)
+ else:
+ # if the changeset doesn't finish in 5 mins, this `else` will trigger and fail
+ module.fail_json(msg="Failed to create change set %s" % stack_params['ChangeSetName'])
+
+ cfn.delete_change_set(ChangeSetName=change_set['Id'])
+
+ reason = description.get('StatusReason')
+
+ if description['Status'] == 'FAILED' and "didn't contain changes" in description['StatusReason']:
+ return {'changed': False, 'msg': reason, 'meta': description['StatusReason']}
+ return {'changed': True, 'msg': reason, 'meta': description['Changes']}
+
+ except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
+ error_msg = boto_exception(err)
+ module.fail_json(msg=error_msg, exception=traceback.format_exc())
+
+
+def get_stack_facts(cfn, stack_name):
+ try:
+ stack_response = cfn.describe_stacks(StackName=stack_name)
+ stack_info = stack_response['Stacks'][0]
+ except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
+ error_msg = boto_exception(err)
+ if 'does not exist' in error_msg:
+ # missing stack, don't bail.
+ return None
+
+ # other error, bail.
+ raise err
+
+ if stack_response and stack_response.get('Stacks', None):
+ stacks = stack_response['Stacks']
+ if len(stacks):
+ stack_info = stacks[0]
+
+ return stack_info
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ stack_name=dict(required=True),
+ template_parameters=dict(required=False, type='dict', default={}),
+ state=dict(default='present', choices=['present', 'absent']),
+ template=dict(default=None, required=False, type='path'),
+ notification_arns=dict(default=None, required=False),
+ stack_policy=dict(default=None, required=False),
+ disable_rollback=dict(default=False, type='bool'),
+ on_create_failure=dict(default=None, required=False, choices=['DO_NOTHING', 'ROLLBACK', 'DELETE']),
+ create_timeout=dict(default=None, type='int'),
+ template_url=dict(default=None, required=False),
+ template_body=dict(default=None, required=False),
+ template_format=dict(removed_in_version='2.14'),
+ create_changeset=dict(default=False, type='bool'),
+ changeset_name=dict(default=None, required=False),
+ role_arn=dict(default=None, required=False),
+ tags=dict(default=None, type='dict'),
+ termination_protection=dict(default=None, type='bool'),
+ events_limit=dict(default=200, type='int'),
+ backoff_retries=dict(type='int', default=10, required=False),
+ backoff_delay=dict(type='int', default=3, required=False),
+ backoff_max_delay=dict(type='int', default=30, required=False),
+ capabilities=dict(type='list', default=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[['template_url', 'template', 'template_body'],
+ ['disable_rollback', 'on_create_failure']],
+ supports_check_mode=True
+ )
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 and botocore are required for this module')
+
+ invalid_capabilities = []
+ user_capabilities = module.params.get('capabilities')
+ for user_cap in user_capabilities:
+ if user_cap not in ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM', 'CAPABILITY_AUTO_EXPAND']:
+ invalid_capabilities.append(user_cap)
+
+ if invalid_capabilities:
+ module.fail_json(msg="Specified capabilities are invalid : %r,"
+ " please check documentation for valid capabilities" % invalid_capabilities)
+
+ # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around.
+ stack_params = {
+ 'Capabilities': user_capabilities,
+ 'ClientRequestToken': to_native(uuid.uuid4()),
+ }
+ state = module.params['state']
+ stack_params['StackName'] = module.params['stack_name']
+
+ if module.params['template'] is not None:
+ with open(module.params['template'], 'r') as template_fh:
+ stack_params['TemplateBody'] = template_fh.read()
+ elif module.params['template_body'] is not None:
+ stack_params['TemplateBody'] = module.params['template_body']
+ elif module.params['template_url'] is not None:
+ stack_params['TemplateURL'] = module.params['template_url']
+
+ if module.params.get('notification_arns'):
+ stack_params['NotificationARNs'] = module.params['notification_arns'].split(',')
+ else:
+ stack_params['NotificationARNs'] = []
+
+ # can't check the policy when verifying.
+ if module.params['stack_policy'] is not None and not module.check_mode and not module.params['create_changeset']:
+ with open(module.params['stack_policy'], 'r') as stack_policy_fh:
+ stack_params['StackPolicyBody'] = stack_policy_fh.read()
+
+ template_parameters = module.params['template_parameters']
+
+ stack_params['Parameters'] = []
+ for k, v in template_parameters.items():
+ if isinstance(v, dict):
+ # set parameter based on a dict to allow additional CFN Parameter Attributes
+ param = dict(ParameterKey=k)
+
+ if 'value' in v:
+ param['ParameterValue'] = str(v['value'])
+
+ if 'use_previous_value' in v and bool(v['use_previous_value']):
+ param['UsePreviousValue'] = True
+ param.pop('ParameterValue', None)
+
+ stack_params['Parameters'].append(param)
+ else:
+ # allow default k/v configuration to set a template parameter
+ stack_params['Parameters'].append({'ParameterKey': k, 'ParameterValue': str(v)})
+
+ if isinstance(module.params.get('tags'), dict):
+ stack_params['Tags'] = ansible_dict_to_boto3_tag_list(module.params['tags'])
+
+ if module.params.get('role_arn'):
+ stack_params['RoleARN'] = module.params['role_arn']
+
+ result = {}
+
+ try:
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ cfn = boto3_conn(module, conn_type='client', resource='cloudformation', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except botocore.exceptions.NoCredentialsError as e:
+ module.fail_json(msg=boto_exception(e))
+
+ # Wrap the cloudformation client methods that this module uses with
+ # automatic backoff / retry for throttling error codes
+ backoff_wrapper = AWSRetry.jittered_backoff(
+ retries=module.params.get('backoff_retries'),
+ delay=module.params.get('backoff_delay'),
+ max_delay=module.params.get('backoff_max_delay')
+ )
+ cfn.describe_stack_events = backoff_wrapper(cfn.describe_stack_events)
+ cfn.create_stack = backoff_wrapper(cfn.create_stack)
+ cfn.list_change_sets = backoff_wrapper(cfn.list_change_sets)
+ cfn.create_change_set = backoff_wrapper(cfn.create_change_set)
+ cfn.update_stack = backoff_wrapper(cfn.update_stack)
+ cfn.describe_stacks = backoff_wrapper(cfn.describe_stacks)
+ cfn.list_stack_resources = backoff_wrapper(cfn.list_stack_resources)
+ cfn.delete_stack = backoff_wrapper(cfn.delete_stack)
+ if boto_supports_termination_protection(cfn):
+ cfn.update_termination_protection = backoff_wrapper(cfn.update_termination_protection)
+
+ stack_info = get_stack_facts(cfn, stack_params['StackName'])
+
+ if module.check_mode:
+ if state == 'absent' and stack_info:
+ module.exit_json(changed=True, msg='Stack would be deleted', meta=[])
+ elif state == 'absent' and not stack_info:
+ module.exit_json(changed=False, msg='Stack doesn\'t exist', meta=[])
+ elif state == 'present' and not stack_info:
+ module.exit_json(changed=True, msg='New stack would be created', meta=[])
+ else:
+ module.exit_json(**check_mode_changeset(module, stack_params, cfn))
+
+ if state == 'present':
+ if not stack_info:
+ result = create_stack(module, stack_params, cfn, module.params.get('events_limit'))
+ elif module.params.get('create_changeset'):
+ result = create_changeset(module, stack_params, cfn, module.params.get('events_limit'))
+ else:
+ if module.params.get('termination_protection') is not None:
+ update_termination_protection(module, cfn, stack_params['StackName'],
+ bool(module.params.get('termination_protection')))
+ result = update_stack(module, stack_params, cfn, module.params.get('events_limit'))
+
+ # format the stack output
+
+ stack = get_stack_facts(cfn, stack_params['StackName'])
+ if stack is not None:
+ if result.get('stack_outputs') is None:
+ # always define stack_outputs, but it may be empty
+ result['stack_outputs'] = {}
+ for output in stack.get('Outputs', []):
+ result['stack_outputs'][output['OutputKey']] = output['OutputValue']
+ stack_resources = []
+ reslist = cfn.list_stack_resources(StackName=stack_params['StackName'])
+ for res in reslist.get('StackResourceSummaries', []):
+ stack_resources.append({
+ "logical_resource_id": res['LogicalResourceId'],
+ "physical_resource_id": res.get('PhysicalResourceId', ''),
+ "resource_type": res['ResourceType'],
+ "last_updated_time": res['LastUpdatedTimestamp'],
+ "status": res['ResourceStatus'],
+ "status_reason": res.get('ResourceStatusReason') # can be blank, apparently
+ })
+ result['stack_resources'] = stack_resources
+
+ elif state == 'absent':
+ # absent state is different because of the way delete_stack works.
+ # problem is it it doesn't give an error if stack isn't found
+ # so must describe the stack first
+
+ try:
+ stack = get_stack_facts(cfn, stack_params['StackName'])
+ if not stack:
+ result = {'changed': False, 'output': 'Stack not found.'}
+ else:
+ if stack_params.get('RoleARN') is None:
+ cfn.delete_stack(StackName=stack_params['StackName'])
+ else:
+ cfn.delete_stack(StackName=stack_params['StackName'], RoleARN=stack_params['RoleARN'])
+ result = stack_operation(cfn, stack_params['StackName'], 'DELETE', module.params.get('events_limit'),
+ stack_params.get('ClientRequestToken', None))
+ except Exception as err:
+ module.fail_json(msg=boto_exception(err), exception=traceback.format_exc())
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/cloudformation_info.py b/test/support/integration/plugins/modules/cloudformation_info.py
new file mode 100644
index 00000000..ee2e5c17
--- /dev/null
+++ b/test/support/integration/plugins/modules/cloudformation_info.py
@@ -0,0 +1,355 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: cloudformation_info
+short_description: Obtain information about an AWS CloudFormation stack
+description:
+ - Gets information about an AWS CloudFormation stack.
+ - This module was called C(cloudformation_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(cloudformation_info) module no longer returns C(ansible_facts)!
+requirements:
+ - boto3 >= 1.0.0
+ - python >= 2.6
+version_added: "2.2"
+author:
+ - Justin Menga (@jmenga)
+ - Kevin Coming (@waffie1)
+options:
+ stack_name:
+ description:
+ - The name or id of the CloudFormation stack. Gathers information on all stacks by default.
+ type: str
+ all_facts:
+ description:
+ - Get all stack information for the stack.
+ type: bool
+ default: false
+ stack_events:
+ description:
+ - Get stack events for the stack.
+ type: bool
+ default: false
+ stack_template:
+ description:
+ - Get stack template body for the stack.
+ type: bool
+ default: false
+ stack_resources:
+ description:
+ - Get stack resources for the stack.
+ type: bool
+ default: false
+ stack_policy:
+ description:
+ - Get stack policy for the stack.
+ type: bool
+ default: false
+ stack_change_sets:
+ description:
+ - Get stack change sets for the stack
+ type: bool
+ default: false
+ version_added: '2.10'
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Get summary information about a stack
+- cloudformation_info:
+ stack_name: my-cloudformation-stack
+ register: output
+
+- debug:
+ msg: "{{ output['cloudformation']['my-cloudformation-stack'] }}"
+
+# When the module is called as cloudformation_facts, return values are published
+# in ansible_facts['cloudformation'][<stack_name>] and can be used as follows.
+# Note that this is deprecated and will stop working in Ansible 2.13.
+
+- cloudformation_facts:
+ stack_name: my-cloudformation-stack
+
+- debug:
+ msg: "{{ ansible_facts['cloudformation']['my-cloudformation-stack'] }}"
+
+# Get stack outputs, when you have the stack name available as a fact
+- set_fact:
+ stack_name: my-awesome-stack
+
+- cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: my_stack
+
+- debug:
+ msg: "{{ my_stack.cloudformation[stack_name].stack_outputs }}"
+
+# Get all stack information about a stack
+- cloudformation_info:
+ stack_name: my-cloudformation-stack
+ all_facts: true
+
+# Get stack resource and stack policy information about a stack
+- cloudformation_info:
+ stack_name: my-cloudformation-stack
+ stack_resources: true
+ stack_policy: true
+
+# Fail if the stack doesn't exist
+- name: try to get facts about a stack but fail if it doesn't exist
+ cloudformation_info:
+ stack_name: nonexistent-stack
+ all_facts: yes
+ failed_when: cloudformation['nonexistent-stack'] is undefined
+'''
+
+RETURN = '''
+stack_description:
+ description: Summary facts about the stack
+ returned: if the stack exists
+ type: dict
+stack_outputs:
+ description: Dictionary of stack outputs keyed by the value of each output 'OutputKey' parameter and corresponding value of each
+ output 'OutputValue' parameter
+ returned: if the stack exists
+ type: dict
+ sample:
+ ApplicationDatabaseName: dazvlpr01xj55a.ap-southeast-2.rds.amazonaws.com
+stack_parameters:
+ description: Dictionary of stack parameters keyed by the value of each parameter 'ParameterKey' parameter and corresponding value of
+ each parameter 'ParameterValue' parameter
+ returned: if the stack exists
+ type: dict
+ sample:
+ DatabaseEngine: mysql
+ DatabasePassword: "***"
+stack_events:
+ description: All stack events for the stack
+ returned: only if all_facts or stack_events is true and the stack exists
+ type: list
+stack_policy:
+ description: Describes the stack policy for the stack
+ returned: only if all_facts or stack_policy is true and the stack exists
+ type: dict
+stack_template:
+ description: Describes the stack template for the stack
+ returned: only if all_facts or stack_template is true and the stack exists
+ type: dict
+stack_resource_list:
+ description: Describes stack resources for the stack
+ returned: only if all_facts or stack_resourses is true and the stack exists
+ type: list
+stack_resources:
+ description: Dictionary of stack resources keyed by the value of each resource 'LogicalResourceId' parameter and corresponding value of each
+ resource 'PhysicalResourceId' parameter
+ returned: only if all_facts or stack_resourses is true and the stack exists
+ type: dict
+ sample:
+ AutoScalingGroup: "dev-someapp-AutoscalingGroup-1SKEXXBCAN0S7"
+ AutoScalingSecurityGroup: "sg-abcd1234"
+ ApplicationDatabase: "dazvlpr01xj55a"
+stack_change_sets:
+ description: A list of stack change sets. Each item in the list represents the details of a specific changeset
+
+ returned: only if all_facts or stack_change_sets is true and the stack exists
+ type: list
+'''
+
+import json
+import traceback
+
+from functools import partial
+from ansible.module_utils._text import to_native
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils.ec2 import (camel_dict_to_snake_dict, AWSRetry, boto3_tag_list_to_ansible_dict)
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+
+class CloudFormationServiceManager:
+ """Handles CloudFormation Services"""
+
+ def __init__(self, module):
+ self.module = module
+ self.client = module.client('cloudformation')
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def describe_stacks_with_backoff(self, **kwargs):
+ paginator = self.client.get_paginator('describe_stacks')
+ return paginator.paginate(**kwargs).build_full_result()['Stacks']
+
+ def describe_stacks(self, stack_name=None):
+ try:
+ kwargs = {'StackName': stack_name} if stack_name else {}
+ response = self.describe_stacks_with_backoff(**kwargs)
+ if response is not None:
+ return response
+ self.module.fail_json(msg="Error describing stack(s) - an empty response was returned")
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ if 'does not exist' in e.response['Error']['Message']:
+ # missing stack, don't bail.
+ return {}
+ self.module.fail_json_aws(e, msg="Error describing stack " + stack_name)
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def list_stack_resources_with_backoff(self, stack_name):
+ paginator = self.client.get_paginator('list_stack_resources')
+ return paginator.paginate(StackName=stack_name).build_full_result()['StackResourceSummaries']
+
+ def list_stack_resources(self, stack_name):
+ try:
+ return self.list_stack_resources_with_backoff(stack_name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error listing stack resources for stack " + stack_name)
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def describe_stack_events_with_backoff(self, stack_name):
+ paginator = self.client.get_paginator('describe_stack_events')
+ return paginator.paginate(StackName=stack_name).build_full_result()['StackEvents']
+
+ def describe_stack_events(self, stack_name):
+ try:
+ return self.describe_stack_events_with_backoff(stack_name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error listing stack events for stack " + stack_name)
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def list_stack_change_sets_with_backoff(self, stack_name):
+ paginator = self.client.get_paginator('list_change_sets')
+ return paginator.paginate(StackName=stack_name).build_full_result()['Summaries']
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def describe_stack_change_set_with_backoff(self, **kwargs):
+ paginator = self.client.get_paginator('describe_change_set')
+ return paginator.paginate(**kwargs).build_full_result()
+
+ def describe_stack_change_sets(self, stack_name):
+ changes = []
+ try:
+ change_sets = self.list_stack_change_sets_with_backoff(stack_name)
+ for item in change_sets:
+ changes.append(self.describe_stack_change_set_with_backoff(
+ StackName=stack_name,
+ ChangeSetName=item['ChangeSetName']))
+ return changes
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error describing stack change sets for stack " + stack_name)
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def get_stack_policy_with_backoff(self, stack_name):
+ return self.client.get_stack_policy(StackName=stack_name)
+
+ def get_stack_policy(self, stack_name):
+ try:
+ response = self.get_stack_policy_with_backoff(stack_name)
+ stack_policy = response.get('StackPolicyBody')
+ if stack_policy:
+ return json.loads(stack_policy)
+ return dict()
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error getting stack policy for stack " + stack_name)
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def get_template_with_backoff(self, stack_name):
+ return self.client.get_template(StackName=stack_name)
+
+ def get_template(self, stack_name):
+ try:
+ response = self.get_template_with_backoff(stack_name)
+ return response.get('TemplateBody')
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error getting stack template for stack " + stack_name)
+
+
+def to_dict(items, key, value):
+ ''' Transforms a list of items to a Key/Value dictionary '''
+ if items:
+ return dict(zip([i.get(key) for i in items], [i.get(value) for i in items]))
+ else:
+ return dict()
+
+
+def main():
+ argument_spec = dict(
+ stack_name=dict(),
+ all_facts=dict(required=False, default=False, type='bool'),
+ stack_policy=dict(required=False, default=False, type='bool'),
+ stack_events=dict(required=False, default=False, type='bool'),
+ stack_resources=dict(required=False, default=False, type='bool'),
+ stack_template=dict(required=False, default=False, type='bool'),
+ stack_change_sets=dict(required=False, default=False, type='bool'),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ is_old_facts = module._name == 'cloudformation_facts'
+ if is_old_facts:
+ module.deprecate("The 'cloudformation_facts' module has been renamed to 'cloudformation_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='2.13', collection_name='ansible.builtin')
+
+ service_mgr = CloudFormationServiceManager(module)
+
+ if is_old_facts:
+ result = {'ansible_facts': {'cloudformation': {}}}
+ else:
+ result = {'cloudformation': {}}
+
+ for stack_description in service_mgr.describe_stacks(module.params.get('stack_name')):
+ facts = {'stack_description': stack_description}
+ stack_name = stack_description.get('StackName')
+
+ # Create stack output and stack parameter dictionaries
+ if facts['stack_description']:
+ facts['stack_outputs'] = to_dict(facts['stack_description'].get('Outputs'), 'OutputKey', 'OutputValue')
+ facts['stack_parameters'] = to_dict(facts['stack_description'].get('Parameters'),
+ 'ParameterKey', 'ParameterValue')
+ facts['stack_tags'] = boto3_tag_list_to_ansible_dict(facts['stack_description'].get('Tags'))
+
+ # Create optional stack outputs
+ all_facts = module.params.get('all_facts')
+ if all_facts or module.params.get('stack_resources'):
+ facts['stack_resource_list'] = service_mgr.list_stack_resources(stack_name)
+ facts['stack_resources'] = to_dict(facts.get('stack_resource_list'),
+ 'LogicalResourceId', 'PhysicalResourceId')
+ if all_facts or module.params.get('stack_template'):
+ facts['stack_template'] = service_mgr.get_template(stack_name)
+ if all_facts or module.params.get('stack_policy'):
+ facts['stack_policy'] = service_mgr.get_stack_policy(stack_name)
+ if all_facts or module.params.get('stack_events'):
+ facts['stack_events'] = service_mgr.describe_stack_events(stack_name)
+ if all_facts or module.params.get('stack_change_sets'):
+ facts['stack_change_sets'] = service_mgr.describe_stack_change_sets(stack_name)
+
+ if is_old_facts:
+ result['ansible_facts']['cloudformation'][stack_name] = facts
+ else:
+ result['cloudformation'][stack_name] = camel_dict_to_snake_dict(facts, ignore_list=('stack_outputs',
+ 'stack_parameters',
+ 'stack_policy',
+ 'stack_resources',
+ 'stack_tags',
+ 'stack_template'))
+
+ module.exit_json(changed=False, **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/deploy_helper.py b/test/support/integration/plugins/modules/deploy_helper.py
new file mode 100644
index 00000000..38594dde
--- /dev/null
+++ b/test/support/integration/plugins/modules/deploy_helper.py
@@ -0,0 +1,521 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Jasper N. Brouwer <jasper@nerdsweide.nl>
+# (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: deploy_helper
+version_added: "2.0"
+author: "Ramon de la Fuente (@ramondelafuente)"
+short_description: Manages some of the steps common in deploying projects.
+description:
+ - The Deploy Helper manages some of the steps common in deploying software.
+ It creates a folder structure, manages a symlink for the current release
+ and cleans up old releases.
+ - "Running it with the C(state=query) or C(state=present) will return the C(deploy_helper) fact.
+ C(project_path), whatever you set in the path parameter,
+ C(current_path), the path to the symlink that points to the active release,
+ C(releases_path), the path to the folder to keep releases in,
+ C(shared_path), the path to the folder to keep shared resources in,
+ C(unfinished_filename), the file to check for to recognize unfinished builds,
+ C(previous_release), the release the 'current' symlink is pointing to,
+ C(previous_release_path), the full path to the 'current' symlink target,
+ C(new_release), either the 'release' parameter or a generated timestamp,
+ C(new_release_path), the path to the new release folder (not created by the module)."
+
+options:
+ path:
+ required: True
+ aliases: ['dest']
+ description:
+ - the root path of the project. Alias I(dest).
+ Returned in the C(deploy_helper.project_path) fact.
+
+ state:
+ description:
+ - the state of the project.
+ C(query) will only gather facts,
+ C(present) will create the project I(root) folder, and in it the I(releases) and I(shared) folders,
+ C(finalize) will remove the unfinished_filename file, create a symlink to the newly
+ deployed release and optionally clean old releases,
+ C(clean) will remove failed & old releases,
+ C(absent) will remove the project folder (synonymous to the M(file) module with C(state=absent))
+ choices: [ present, finalize, absent, clean, query ]
+ default: present
+
+ release:
+ description:
+ - the release version that is being deployed. Defaults to a timestamp format %Y%m%d%H%M%S (i.e. '20141119223359').
+ This parameter is optional during C(state=present), but needs to be set explicitly for C(state=finalize).
+ You can use the generated fact C(release={{ deploy_helper.new_release }}).
+
+ releases_path:
+ description:
+ - the name of the folder that will hold the releases. This can be relative to C(path) or absolute.
+ Returned in the C(deploy_helper.releases_path) fact.
+ default: releases
+
+ shared_path:
+ description:
+ - the name of the folder that will hold the shared resources. This can be relative to C(path) or absolute.
+ If this is set to an empty string, no shared folder will be created.
+ Returned in the C(deploy_helper.shared_path) fact.
+ default: shared
+
+ current_path:
+ description:
+ - the name of the symlink that is created when the deploy is finalized. Used in C(finalize) and C(clean).
+ Returned in the C(deploy_helper.current_path) fact.
+ default: current
+
+ unfinished_filename:
+ description:
+ - the name of the file that indicates a deploy has not finished. All folders in the releases_path that
+ contain this file will be deleted on C(state=finalize) with clean=True, or C(state=clean). This file is
+ automatically deleted from the I(new_release_path) during C(state=finalize).
+ default: DEPLOY_UNFINISHED
+
+ clean:
+ description:
+ - Whether to run the clean procedure in case of C(state=finalize).
+ type: bool
+ default: 'yes'
+
+ keep_releases:
+ description:
+ - the number of old releases to keep when cleaning. Used in C(finalize) and C(clean). Any unfinished builds
+ will be deleted first, so only correct releases will count. The current version will not count.
+ default: 5
+
+notes:
+ - Facts are only returned for C(state=query) and C(state=present). If you use both, you should pass any overridden
+ parameters to both calls, otherwise the second call will overwrite the facts of the first one.
+ - When using C(state=clean), the releases are ordered by I(creation date). You should be able to switch to a
+ new naming strategy without problems.
+ - Because of the default behaviour of generating the I(new_release) fact, this module will not be idempotent
+ unless you pass your own release name with C(release). Due to the nature of deploying software, this should not
+ be much of a problem.
+'''
+
+EXAMPLES = '''
+
+# General explanation, starting with an example folder structure for a project:
+
+# root:
+# releases:
+# - 20140415234508
+# - 20140415235146
+# - 20140416082818
+#
+# shared:
+# - sessions
+# - uploads
+#
+# current: releases/20140416082818
+
+
+# The 'releases' folder holds all the available releases. A release is a complete build of the application being
+# deployed. This can be a clone of a repository for example, or a sync of a local folder on your filesystem.
+# Having timestamped folders is one way of having distinct releases, but you could choose your own strategy like
+# git tags or commit hashes.
+#
+# During a deploy, a new folder should be created in the releases folder and any build steps required should be
+# performed. Once the new build is ready, the deploy procedure is 'finalized' by replacing the 'current' symlink
+# with a link to this build.
+#
+# The 'shared' folder holds any resource that is shared between releases. Examples of this are web-server
+# session files, or files uploaded by users of your application. It's quite common to have symlinks from a release
+# folder pointing to a shared/subfolder, and creating these links would be automated as part of the build steps.
+#
+# The 'current' symlink points to one of the releases. Probably the latest one, unless a deploy is in progress.
+# The web-server's root for the project will go through this symlink, so the 'downtime' when switching to a new
+# release is reduced to the time it takes to switch the link.
+#
+# To distinguish between successful builds and unfinished ones, a file can be placed in the folder of the release
+# that is currently in progress. The existence of this file will mark it as unfinished, and allow an automated
+# procedure to remove it during cleanup.
+
+
+# Typical usage
+- name: Initialize the deploy root and gather facts
+ deploy_helper:
+ path: /path/to/root
+- name: Clone the project to the new release folder
+ git:
+ repo: git://foosball.example.org/path/to/repo.git
+ dest: '{{ deploy_helper.new_release_path }}'
+ version: v1.1.1
+- name: Add an unfinished file, to allow cleanup on successful finalize
+ file:
+ path: '{{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }}'
+ state: touch
+- name: Perform some build steps, like running your dependency manager for example
+ composer:
+ command: install
+ working_dir: '{{ deploy_helper.new_release_path }}'
+- name: Create some folders in the shared folder
+ file:
+ path: '{{ deploy_helper.shared_path }}/{{ item }}'
+ state: directory
+ with_items:
+ - sessions
+ - uploads
+- name: Add symlinks from the new release to the shared folder
+ file:
+ path: '{{ deploy_helper.new_release_path }}/{{ item.path }}'
+ src: '{{ deploy_helper.shared_path }}/{{ item.src }}'
+ state: link
+ with_items:
+ - path: app/sessions
+ src: sessions
+ - path: web/uploads
+ src: uploads
+- name: Finalize the deploy, removing the unfinished file and switching the symlink
+ deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+
+# Retrieving facts before running a deploy
+- name: Run 'state=query' to gather facts without changing anything
+ deploy_helper:
+ path: /path/to/root
+ state: query
+# Remember to set the 'release' parameter when you actually call 'state=present' later
+- name: Initialize the deploy root
+ deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: present
+
+# all paths can be absolute or relative (to the 'path' parameter)
+- deploy_helper:
+ path: /path/to/root
+ releases_path: /var/www/project/releases
+ shared_path: /var/www/shared
+ current_path: /var/www/active
+
+# Using your own naming strategy for releases (a version tag in this case):
+- deploy_helper:
+ path: /path/to/root
+ release: v1.1.1
+ state: present
+- deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+
+# Using a different unfinished_filename:
+- deploy_helper:
+ path: /path/to/root
+ unfinished_filename: README.md
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+
+# Postponing the cleanup of older builds:
+- deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+ clean: False
+- deploy_helper:
+ path: /path/to/root
+ state: clean
+# Or running the cleanup ahead of the new deploy
+- deploy_helper:
+ path: /path/to/root
+ state: clean
+- deploy_helper:
+ path: /path/to/root
+ state: present
+
+# Keeping more old releases:
+- deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+ keep_releases: 10
+# Or, if you use 'clean=false' on finalize:
+- deploy_helper:
+ path: /path/to/root
+ state: clean
+ keep_releases: 10
+
+# Removing the entire project root folder
+- deploy_helper:
+ path: /path/to/root
+ state: absent
+
+# Debugging the facts returned by the module
+- deploy_helper:
+ path: /path/to/root
+- debug:
+ var: deploy_helper
+'''
+import os
+import shutil
+import time
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+class DeployHelper(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.file_args = module.load_file_common_arguments(module.params)
+
+ self.clean = module.params['clean']
+ self.current_path = module.params['current_path']
+ self.keep_releases = module.params['keep_releases']
+ self.path = module.params['path']
+ self.release = module.params['release']
+ self.releases_path = module.params['releases_path']
+ self.shared_path = module.params['shared_path']
+ self.state = module.params['state']
+ self.unfinished_filename = module.params['unfinished_filename']
+
+ def gather_facts(self):
+ current_path = os.path.join(self.path, self.current_path)
+ releases_path = os.path.join(self.path, self.releases_path)
+ if self.shared_path:
+ shared_path = os.path.join(self.path, self.shared_path)
+ else:
+ shared_path = None
+
+ previous_release, previous_release_path = self._get_last_release(current_path)
+
+ if not self.release and (self.state == 'query' or self.state == 'present'):
+ self.release = time.strftime("%Y%m%d%H%M%S")
+
+ if self.release:
+ new_release_path = os.path.join(releases_path, self.release)
+ else:
+ new_release_path = None
+
+ return {
+ 'project_path': self.path,
+ 'current_path': current_path,
+ 'releases_path': releases_path,
+ 'shared_path': shared_path,
+ 'previous_release': previous_release,
+ 'previous_release_path': previous_release_path,
+ 'new_release': self.release,
+ 'new_release_path': new_release_path,
+ 'unfinished_filename': self.unfinished_filename
+ }
+
+ def delete_path(self, path):
+ if not os.path.lexists(path):
+ return False
+
+ if not os.path.isdir(path):
+ self.module.fail_json(msg="%s exists but is not a directory" % path)
+
+ if not self.module.check_mode:
+ try:
+ shutil.rmtree(path, ignore_errors=False)
+ except Exception as e:
+ self.module.fail_json(msg="rmtree failed: %s" % to_native(e), exception=traceback.format_exc())
+
+ return True
+
+ def create_path(self, path):
+ changed = False
+
+ if not os.path.lexists(path):
+ changed = True
+ if not self.module.check_mode:
+ os.makedirs(path)
+
+ elif not os.path.isdir(path):
+ self.module.fail_json(msg="%s exists but is not a directory" % path)
+
+ changed += self.module.set_directory_attributes_if_different(self._get_file_args(path), changed)
+
+ return changed
+
+ def check_link(self, path):
+ if os.path.lexists(path):
+ if not os.path.islink(path):
+ self.module.fail_json(msg="%s exists but is not a symbolic link" % path)
+
+ def create_link(self, source, link_name):
+ changed = False
+
+ if os.path.islink(link_name):
+ norm_link = os.path.normpath(os.path.realpath(link_name))
+ norm_source = os.path.normpath(os.path.realpath(source))
+ if norm_link == norm_source:
+ changed = False
+ else:
+ changed = True
+ if not self.module.check_mode:
+ if not os.path.lexists(source):
+ self.module.fail_json(msg="the symlink target %s doesn't exists" % source)
+ tmp_link_name = link_name + '.' + self.unfinished_filename
+ if os.path.islink(tmp_link_name):
+ os.unlink(tmp_link_name)
+ os.symlink(source, tmp_link_name)
+ os.rename(tmp_link_name, link_name)
+ else:
+ changed = True
+ if not self.module.check_mode:
+ os.symlink(source, link_name)
+
+ return changed
+
+ def remove_unfinished_file(self, new_release_path):
+ changed = False
+ unfinished_file_path = os.path.join(new_release_path, self.unfinished_filename)
+ if os.path.lexists(unfinished_file_path):
+ changed = True
+ if not self.module.check_mode:
+ os.remove(unfinished_file_path)
+
+ return changed
+
+ def remove_unfinished_builds(self, releases_path):
+ changes = 0
+
+ for release in os.listdir(releases_path):
+ if os.path.isfile(os.path.join(releases_path, release, self.unfinished_filename)):
+ if self.module.check_mode:
+ changes += 1
+ else:
+ changes += self.delete_path(os.path.join(releases_path, release))
+
+ return changes
+
+ def remove_unfinished_link(self, path):
+ changed = False
+
+ tmp_link_name = os.path.join(path, self.release + '.' + self.unfinished_filename)
+ if not self.module.check_mode and os.path.exists(tmp_link_name):
+ changed = True
+ os.remove(tmp_link_name)
+
+ return changed
+
+ def cleanup(self, releases_path, reserve_version):
+ changes = 0
+
+ if os.path.lexists(releases_path):
+ releases = [f for f in os.listdir(releases_path) if os.path.isdir(os.path.join(releases_path, f))]
+ try:
+ releases.remove(reserve_version)
+ except ValueError:
+ pass
+
+ if not self.module.check_mode:
+ releases.sort(key=lambda x: os.path.getctime(os.path.join(releases_path, x)), reverse=True)
+ for release in releases[self.keep_releases:]:
+ changes += self.delete_path(os.path.join(releases_path, release))
+ elif len(releases) > self.keep_releases:
+ changes += (len(releases) - self.keep_releases)
+
+ return changes
+
+ def _get_file_args(self, path):
+ file_args = self.file_args.copy()
+ file_args['path'] = path
+ return file_args
+
+ def _get_last_release(self, current_path):
+ previous_release = None
+ previous_release_path = None
+
+ if os.path.lexists(current_path):
+ previous_release_path = os.path.realpath(current_path)
+ previous_release = os.path.basename(previous_release_path)
+
+ return previous_release, previous_release_path
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(aliases=['dest'], required=True, type='path'),
+ release=dict(required=False, type='str', default=None),
+ releases_path=dict(required=False, type='str', default='releases'),
+ shared_path=dict(required=False, type='path', default='shared'),
+ current_path=dict(required=False, type='path', default='current'),
+ keep_releases=dict(required=False, type='int', default=5),
+ clean=dict(required=False, type='bool', default=True),
+ unfinished_filename=dict(required=False, type='str', default='DEPLOY_UNFINISHED'),
+ state=dict(required=False, choices=['present', 'absent', 'clean', 'finalize', 'query'], default='present')
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True
+ )
+
+ deploy_helper = DeployHelper(module)
+ facts = deploy_helper.gather_facts()
+
+ result = {
+ 'state': deploy_helper.state
+ }
+
+ changes = 0
+
+ if deploy_helper.state == 'query':
+ result['ansible_facts'] = {'deploy_helper': facts}
+
+ elif deploy_helper.state == 'present':
+ deploy_helper.check_link(facts['current_path'])
+ changes += deploy_helper.create_path(facts['project_path'])
+ changes += deploy_helper.create_path(facts['releases_path'])
+ if deploy_helper.shared_path:
+ changes += deploy_helper.create_path(facts['shared_path'])
+
+ result['ansible_facts'] = {'deploy_helper': facts}
+
+ elif deploy_helper.state == 'finalize':
+ if not deploy_helper.release:
+ module.fail_json(msg="'release' is a required parameter for state=finalize (try the 'deploy_helper.new_release' fact)")
+ if deploy_helper.keep_releases <= 0:
+ module.fail_json(msg="'keep_releases' should be at least 1")
+
+ changes += deploy_helper.remove_unfinished_file(facts['new_release_path'])
+ changes += deploy_helper.create_link(facts['new_release_path'], facts['current_path'])
+ if deploy_helper.clean:
+ changes += deploy_helper.remove_unfinished_link(facts['project_path'])
+ changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
+ changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
+
+ elif deploy_helper.state == 'clean':
+ changes += deploy_helper.remove_unfinished_link(facts['project_path'])
+ changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
+ changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
+
+ elif deploy_helper.state == 'absent':
+ # destroy the facts
+ result['ansible_facts'] = {'deploy_helper': []}
+ changes += deploy_helper.delete_path(facts['project_path'])
+
+ if changes > 0:
+ result['changed'] = True
+ else:
+ result['changed'] = False
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/docker_swarm.py b/test/support/integration/plugins/modules/docker_swarm.py
new file mode 100644
index 00000000..a2c076c5
--- /dev/null
+++ b/test/support/integration/plugins/modules/docker_swarm.py
@@ -0,0 +1,681 @@
+#!/usr/bin/python
+
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+---
+module: docker_swarm
+short_description: Manage Swarm cluster
+version_added: "2.7"
+description:
+ - Create a new Swarm cluster.
+ - Add/Remove nodes or managers to an existing cluster.
+options:
+ advertise_addr:
+ description:
+ - Externally reachable address advertised to other nodes.
+ - This can either be an address/port combination
+ in the form C(192.168.1.1:4567), or an interface followed by a
+ port number, like C(eth0:4567).
+ - If the port number is omitted,
+ the port number from the listen address is used.
+ - If I(advertise_addr) is not specified, it will be automatically
+ detected when possible.
+ - Only used when swarm is initialised or joined. Because of this it's not
+ considered for idempotency checking.
+ type: str
+ default_addr_pool:
+ description:
+ - Default address pool in CIDR format.
+ - Only used when swarm is initialised. Because of this it's not considered
+ for idempotency checking.
+ - Requires API version >= 1.39.
+ type: list
+ elements: str
+ version_added: "2.8"
+ subnet_size:
+ description:
+ - Default address pool subnet mask length.
+ - Only used when swarm is initialised. Because of this it's not considered
+ for idempotency checking.
+ - Requires API version >= 1.39.
+ type: int
+ version_added: "2.8"
+ listen_addr:
+ description:
+ - Listen address used for inter-manager communication.
+ - This can either be an address/port combination in the form
+ C(192.168.1.1:4567), or an interface followed by a port number,
+ like C(eth0:4567).
+ - If the port number is omitted, the default swarm listening port
+ is used.
+ - Only used when swarm is initialised or joined. Because of this it's not
+ considered for idempotency checking.
+ type: str
+ default: 0.0.0.0:2377
+ force:
+ description:
+ - Use with state C(present) to force creating a new Swarm, even if already part of one.
+ - Use with state C(absent) to Leave the swarm even if this node is a manager.
+ type: bool
+ default: no
+ state:
+ description:
+ - Set to C(present), to create/update a new cluster.
+ - Set to C(join), to join an existing cluster.
+ - Set to C(absent), to leave an existing cluster.
+ - Set to C(remove), to remove an absent node from the cluster.
+ Note that removing requires Docker SDK for Python >= 2.4.0.
+ - Set to C(inspect) to display swarm informations.
+ type: str
+ default: present
+ choices:
+ - present
+ - join
+ - absent
+ - remove
+ - inspect
+ node_id:
+ description:
+ - Swarm id of the node to remove.
+ - Used with I(state=remove).
+ type: str
+ join_token:
+ description:
+ - Swarm token used to join a swarm cluster.
+ - Used with I(state=join).
+ type: str
+ remote_addrs:
+ description:
+ - Remote address of one or more manager nodes of an existing Swarm to connect to.
+ - Used with I(state=join).
+ type: list
+ elements: str
+ task_history_retention_limit:
+ description:
+ - Maximum number of tasks history stored.
+ - Docker default value is C(5).
+ type: int
+ snapshot_interval:
+ description:
+ - Number of logs entries between snapshot.
+ - Docker default value is C(10000).
+ type: int
+ keep_old_snapshots:
+ description:
+ - Number of snapshots to keep beyond the current snapshot.
+ - Docker default value is C(0).
+ type: int
+ log_entries_for_slow_followers:
+ description:
+ - Number of log entries to keep around to sync up slow followers after a snapshot is created.
+ type: int
+ heartbeat_tick:
+ description:
+ - Amount of ticks (in seconds) between each heartbeat.
+ - Docker default value is C(1s).
+ type: int
+ election_tick:
+ description:
+ - Amount of ticks (in seconds) needed without a leader to trigger a new election.
+ - Docker default value is C(10s).
+ type: int
+ dispatcher_heartbeat_period:
+ description:
+ - The delay for an agent to send a heartbeat to the dispatcher.
+ - Docker default value is C(5s).
+ type: int
+ node_cert_expiry:
+ description:
+ - Automatic expiry for nodes certificates.
+ - Docker default value is C(3months).
+ type: int
+ name:
+ description:
+ - The name of the swarm.
+ type: str
+ labels:
+ description:
+ - User-defined key/value metadata.
+ - Label operations in this module apply to the docker swarm cluster.
+ Use M(docker_node) module to add/modify/remove swarm node labels.
+ - Requires API version >= 1.32.
+ type: dict
+ signing_ca_cert:
+ description:
+ - The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format.
+ - This must not be a path to a certificate, but the contents of the certificate.
+ - Requires API version >= 1.30.
+ type: str
+ signing_ca_key:
+ description:
+ - The desired signing CA key for all swarm node TLS leaf certificates, in PEM format.
+ - This must not be a path to a key, but the contents of the key.
+ - Requires API version >= 1.30.
+ type: str
+ ca_force_rotate:
+ description:
+ - An integer whose purpose is to force swarm to generate a new signing CA certificate and key,
+ if none have been specified.
+ - Docker default value is C(0).
+ - Requires API version >= 1.30.
+ type: int
+ autolock_managers:
+ description:
+ - If set, generate a key and use it to lock data stored on the managers.
+ - Docker default value is C(no).
+ - M(docker_swarm_info) can be used to retrieve the unlock key.
+ type: bool
+ rotate_worker_token:
+ description: Rotate the worker join token.
+ type: bool
+ default: no
+ rotate_manager_token:
+ description: Rotate the manager join token.
+ type: bool
+ default: no
+extends_documentation_fragment:
+ - docker
+ - docker.docker_py_1_documentation
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - Docker API >= 1.25
+author:
+ - Thierry Bouvet (@tbouvet)
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+'''
+
+EXAMPLES = '''
+
+- name: Init a new swarm with default parameters
+ docker_swarm:
+ state: present
+
+- name: Update swarm configuration
+ docker_swarm:
+ state: present
+ election_tick: 5
+
+- name: Add nodes
+ docker_swarm:
+ state: join
+ advertise_addr: 192.168.1.2
+ join_token: SWMTKN-1--xxxxx
+ remote_addrs: [ '192.168.1.1:2377' ]
+
+- name: Leave swarm for a node
+ docker_swarm:
+ state: absent
+
+- name: Remove a swarm manager
+ docker_swarm:
+ state: absent
+ force: true
+
+- name: Remove node from swarm
+ docker_swarm:
+ state: remove
+ node_id: mynode
+
+- name: Inspect swarm
+ docker_swarm:
+ state: inspect
+ register: swarm_info
+'''
+
+RETURN = '''
+swarm_facts:
+ description: Informations about swarm.
+ returned: success
+ type: dict
+ contains:
+ JoinTokens:
+ description: Tokens to connect to the Swarm.
+ returned: success
+ type: dict
+ contains:
+ Worker:
+ description: Token to create a new *worker* node
+ returned: success
+ type: str
+ example: SWMTKN-1--xxxxx
+ Manager:
+ description: Token to create a new *manager* node
+ returned: success
+ type: str
+ example: SWMTKN-1--xxxxx
+ UnlockKey:
+ description: The swarm unlock-key if I(autolock_managers) is C(true).
+ returned: on success if I(autolock_managers) is C(true)
+ and swarm is initialised, or if I(autolock_managers) has changed.
+ type: str
+ example: SWMKEY-1-xxx
+
+actions:
+ description: Provides the actions done on the swarm.
+ returned: when action failed.
+ type: list
+ elements: str
+ example: "['This cluster is already a swarm cluster']"
+
+'''
+
+import json
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible.module_utils.docker.common import (
+ DockerBaseClass,
+ DifferenceTracker,
+ RequestException,
+)
+
+from ansible.module_utils.docker.swarm import AnsibleDockerSwarmClient
+
+from ansible.module_utils._text import to_native
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self):
+ super(TaskParameters, self).__init__()
+
+ self.advertise_addr = None
+ self.listen_addr = None
+ self.remote_addrs = None
+ self.join_token = None
+
+ # Spec
+ self.snapshot_interval = None
+ self.task_history_retention_limit = None
+ self.keep_old_snapshots = None
+ self.log_entries_for_slow_followers = None
+ self.heartbeat_tick = None
+ self.election_tick = None
+ self.dispatcher_heartbeat_period = None
+ self.node_cert_expiry = None
+ self.name = None
+ self.labels = None
+ self.log_driver = None
+ self.signing_ca_cert = None
+ self.signing_ca_key = None
+ self.ca_force_rotate = None
+ self.autolock_managers = None
+ self.rotate_worker_token = None
+ self.rotate_manager_token = None
+ self.default_addr_pool = None
+ self.subnet_size = None
+
+ @staticmethod
+ def from_ansible_params(client):
+ result = TaskParameters()
+ for key, value in client.module.params.items():
+ if key in result.__dict__:
+ setattr(result, key, value)
+
+ result.update_parameters(client)
+ return result
+
+ def update_from_swarm_info(self, swarm_info):
+ spec = swarm_info['Spec']
+
+ ca_config = spec.get('CAConfig') or dict()
+ if self.node_cert_expiry is None:
+ self.node_cert_expiry = ca_config.get('NodeCertExpiry')
+ if self.ca_force_rotate is None:
+ self.ca_force_rotate = ca_config.get('ForceRotate')
+
+ dispatcher = spec.get('Dispatcher') or dict()
+ if self.dispatcher_heartbeat_period is None:
+ self.dispatcher_heartbeat_period = dispatcher.get('HeartbeatPeriod')
+
+ raft = spec.get('Raft') or dict()
+ if self.snapshot_interval is None:
+ self.snapshot_interval = raft.get('SnapshotInterval')
+ if self.keep_old_snapshots is None:
+ self.keep_old_snapshots = raft.get('KeepOldSnapshots')
+ if self.heartbeat_tick is None:
+ self.heartbeat_tick = raft.get('HeartbeatTick')
+ if self.log_entries_for_slow_followers is None:
+ self.log_entries_for_slow_followers = raft.get('LogEntriesForSlowFollowers')
+ if self.election_tick is None:
+ self.election_tick = raft.get('ElectionTick')
+
+ orchestration = spec.get('Orchestration') or dict()
+ if self.task_history_retention_limit is None:
+ self.task_history_retention_limit = orchestration.get('TaskHistoryRetentionLimit')
+
+ encryption_config = spec.get('EncryptionConfig') or dict()
+ if self.autolock_managers is None:
+ self.autolock_managers = encryption_config.get('AutoLockManagers')
+
+ if self.name is None:
+ self.name = spec['Name']
+
+ if self.labels is None:
+ self.labels = spec.get('Labels') or {}
+
+ if 'LogDriver' in spec['TaskDefaults']:
+ self.log_driver = spec['TaskDefaults']['LogDriver']
+
+ def update_parameters(self, client):
+ assign = dict(
+ snapshot_interval='snapshot_interval',
+ task_history_retention_limit='task_history_retention_limit',
+ keep_old_snapshots='keep_old_snapshots',
+ log_entries_for_slow_followers='log_entries_for_slow_followers',
+ heartbeat_tick='heartbeat_tick',
+ election_tick='election_tick',
+ dispatcher_heartbeat_period='dispatcher_heartbeat_period',
+ node_cert_expiry='node_cert_expiry',
+ name='name',
+ labels='labels',
+ signing_ca_cert='signing_ca_cert',
+ signing_ca_key='signing_ca_key',
+ ca_force_rotate='ca_force_rotate',
+ autolock_managers='autolock_managers',
+ log_driver='log_driver',
+ )
+ params = dict()
+ for dest, source in assign.items():
+ if not client.option_minimal_versions[source]['supported']:
+ continue
+ value = getattr(self, source)
+ if value is not None:
+ params[dest] = value
+ self.spec = client.create_swarm_spec(**params)
+
+ def compare_to_active(self, other, client, differences):
+ for k in self.__dict__:
+ if k in ('advertise_addr', 'listen_addr', 'remote_addrs', 'join_token',
+ 'rotate_worker_token', 'rotate_manager_token', 'spec',
+ 'default_addr_pool', 'subnet_size'):
+ continue
+ if not client.option_minimal_versions[k]['supported']:
+ continue
+ value = getattr(self, k)
+ if value is None:
+ continue
+ other_value = getattr(other, k)
+ if value != other_value:
+ differences.add(k, parameter=value, active=other_value)
+ if self.rotate_worker_token:
+ differences.add('rotate_worker_token', parameter=True, active=False)
+ if self.rotate_manager_token:
+ differences.add('rotate_manager_token', parameter=True, active=False)
+ return differences
+
+
+class SwarmManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(SwarmManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.check_mode = self.client.check_mode
+ self.swarm_info = {}
+
+ self.state = client.module.params['state']
+ self.force = client.module.params['force']
+ self.node_id = client.module.params['node_id']
+
+ self.differences = DifferenceTracker()
+ self.parameters = TaskParameters.from_ansible_params(client)
+
+ self.created = False
+
+ def __call__(self):
+ choice_map = {
+ "present": self.init_swarm,
+ "join": self.join,
+ "absent": self.leave,
+ "remove": self.remove,
+ "inspect": self.inspect_swarm
+ }
+
+ if self.state == 'inspect':
+ self.client.module.deprecate(
+ "The 'inspect' state is deprecated, please use 'docker_swarm_info' to inspect swarm cluster",
+ version='2.12', collection_name='ansible.builtin')
+
+ choice_map.get(self.state)()
+
+ if self.client.module._diff or self.parameters.debug:
+ diff = dict()
+ diff['before'], diff['after'] = self.differences.get_before_after()
+ self.results['diff'] = diff
+
+ def inspect_swarm(self):
+ try:
+ data = self.client.inspect_swarm()
+ json_str = json.dumps(data, ensure_ascii=False)
+ self.swarm_info = json.loads(json_str)
+
+ self.results['changed'] = False
+ self.results['swarm_facts'] = self.swarm_info
+
+ unlock_key = self.get_unlock_key()
+ self.swarm_info.update(unlock_key)
+ except APIError:
+ return
+
+ def get_unlock_key(self):
+ default = {'UnlockKey': None}
+ if not self.has_swarm_lock_changed():
+ return default
+ try:
+ return self.client.get_unlock_key() or default
+ except APIError:
+ return default
+
+ def has_swarm_lock_changed(self):
+ return self.parameters.autolock_managers and (
+ self.created or self.differences.has_difference_for('autolock_managers')
+ )
+
+ def init_swarm(self):
+ if not self.force and self.client.check_if_swarm_manager():
+ self.__update_swarm()
+ return
+
+ if not self.check_mode:
+ init_arguments = {
+ 'advertise_addr': self.parameters.advertise_addr,
+ 'listen_addr': self.parameters.listen_addr,
+ 'force_new_cluster': self.force,
+ 'swarm_spec': self.parameters.spec,
+ }
+ if self.parameters.default_addr_pool is not None:
+ init_arguments['default_addr_pool'] = self.parameters.default_addr_pool
+ if self.parameters.subnet_size is not None:
+ init_arguments['subnet_size'] = self.parameters.subnet_size
+ try:
+ self.client.init_swarm(**init_arguments)
+ except APIError as exc:
+ self.client.fail("Can not create a new Swarm Cluster: %s" % to_native(exc))
+
+ if not self.client.check_if_swarm_manager():
+ if not self.check_mode:
+ self.client.fail("Swarm not created or other error!")
+
+ self.created = True
+ self.inspect_swarm()
+ self.results['actions'].append("New Swarm cluster created: %s" % (self.swarm_info.get('ID')))
+ self.differences.add('state', parameter='present', active='absent')
+ self.results['changed'] = True
+ self.results['swarm_facts'] = {
+ 'JoinTokens': self.swarm_info.get('JoinTokens'),
+ 'UnlockKey': self.swarm_info.get('UnlockKey')
+ }
+
+ def __update_swarm(self):
+ try:
+ self.inspect_swarm()
+ version = self.swarm_info['Version']['Index']
+ self.parameters.update_from_swarm_info(self.swarm_info)
+ old_parameters = TaskParameters()
+ old_parameters.update_from_swarm_info(self.swarm_info)
+ self.parameters.compare_to_active(old_parameters, self.client, self.differences)
+ if self.differences.empty:
+ self.results['actions'].append("No modification")
+ self.results['changed'] = False
+ return
+ update_parameters = TaskParameters.from_ansible_params(self.client)
+ update_parameters.update_parameters(self.client)
+ if not self.check_mode:
+ self.client.update_swarm(
+ version=version, swarm_spec=update_parameters.spec,
+ rotate_worker_token=self.parameters.rotate_worker_token,
+ rotate_manager_token=self.parameters.rotate_manager_token)
+ except APIError as exc:
+ self.client.fail("Can not update a Swarm Cluster: %s" % to_native(exc))
+ return
+
+ self.inspect_swarm()
+ self.results['actions'].append("Swarm cluster updated")
+ self.results['changed'] = True
+
+ def join(self):
+ if self.client.check_if_swarm_node():
+ self.results['actions'].append("This node is already part of a swarm.")
+ return
+ if not self.check_mode:
+ try:
+ self.client.join_swarm(
+ remote_addrs=self.parameters.remote_addrs, join_token=self.parameters.join_token,
+ listen_addr=self.parameters.listen_addr, advertise_addr=self.parameters.advertise_addr)
+ except APIError as exc:
+ self.client.fail("Can not join the Swarm Cluster: %s" % to_native(exc))
+ self.results['actions'].append("New node is added to swarm cluster")
+ self.differences.add('joined', parameter=True, active=False)
+ self.results['changed'] = True
+
+ def leave(self):
+ if not self.client.check_if_swarm_node():
+ self.results['actions'].append("This node is not part of a swarm.")
+ return
+ if not self.check_mode:
+ try:
+ self.client.leave_swarm(force=self.force)
+ except APIError as exc:
+ self.client.fail("This node can not leave the Swarm Cluster: %s" % to_native(exc))
+ self.results['actions'].append("Node has left the swarm cluster")
+ self.differences.add('joined', parameter='absent', active='present')
+ self.results['changed'] = True
+
+ def remove(self):
+ if not self.client.check_if_swarm_manager():
+ self.client.fail("This node is not a manager.")
+
+ try:
+ status_down = self.client.check_if_swarm_node_is_down(node_id=self.node_id, repeat_check=5)
+ except APIError:
+ return
+
+ if not status_down:
+ self.client.fail("Can not remove the node. The status node is ready and not down.")
+
+ if not self.check_mode:
+ try:
+ self.client.remove_node(node_id=self.node_id, force=self.force)
+ except APIError as exc:
+ self.client.fail("Can not remove the node from the Swarm Cluster: %s" % to_native(exc))
+ self.results['actions'].append("Node is removed from swarm cluster.")
+ self.differences.add('joined', parameter=False, active=True)
+ self.results['changed'] = True
+
+
+def _detect_remove_operation(client):
+ return client.module.params['state'] == 'remove'
+
+
+def main():
+ argument_spec = dict(
+ advertise_addr=dict(type='str'),
+ state=dict(type='str', default='present', choices=['present', 'join', 'absent', 'remove', 'inspect']),
+ force=dict(type='bool', default=False),
+ listen_addr=dict(type='str', default='0.0.0.0:2377'),
+ remote_addrs=dict(type='list', elements='str'),
+ join_token=dict(type='str'),
+ snapshot_interval=dict(type='int'),
+ task_history_retention_limit=dict(type='int'),
+ keep_old_snapshots=dict(type='int'),
+ log_entries_for_slow_followers=dict(type='int'),
+ heartbeat_tick=dict(type='int'),
+ election_tick=dict(type='int'),
+ dispatcher_heartbeat_period=dict(type='int'),
+ node_cert_expiry=dict(type='int'),
+ name=dict(type='str'),
+ labels=dict(type='dict'),
+ signing_ca_cert=dict(type='str'),
+ signing_ca_key=dict(type='str'),
+ ca_force_rotate=dict(type='int'),
+ autolock_managers=dict(type='bool'),
+ node_id=dict(type='str'),
+ rotate_worker_token=dict(type='bool', default=False),
+ rotate_manager_token=dict(type='bool', default=False),
+ default_addr_pool=dict(type='list', elements='str'),
+ subnet_size=dict(type='int'),
+ )
+
+ required_if = [
+ ('state', 'join', ['advertise_addr', 'remote_addrs', 'join_token']),
+ ('state', 'remove', ['node_id'])
+ ]
+
+ option_minimal_versions = dict(
+ labels=dict(docker_py_version='2.6.0', docker_api_version='1.32'),
+ signing_ca_cert=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ signing_ca_key=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ ca_force_rotate=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ autolock_managers=dict(docker_py_version='2.6.0'),
+ log_driver=dict(docker_py_version='2.6.0'),
+ remove_operation=dict(
+ docker_py_version='2.4.0',
+ detect_usage=_detect_remove_operation,
+ usage_msg='remove swarm nodes'
+ ),
+ default_addr_pool=dict(docker_py_version='4.0.0', docker_api_version='1.39'),
+ subnet_size=dict(docker_py_version='4.0.0', docker_api_version='1.39'),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ min_docker_version='1.10.0',
+ min_docker_api_version='1.25',
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ result='',
+ actions=[]
+ )
+
+ SwarmManager(client, results)()
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/ec2.py b/test/support/integration/plugins/modules/ec2.py
new file mode 100644
index 00000000..952aa5a1
--- /dev/null
+++ b/test/support/integration/plugins/modules/ec2.py
@@ -0,0 +1,1766 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+
+DOCUMENTATION = '''
+---
+module: ec2
+short_description: create, terminate, start or stop an instance in ec2
+description:
+ - Creates or terminates ec2 instances.
+ - >
+ Note: This module uses the older boto Python module to interact with the EC2 API.
+ M(ec2) will still receive bug fixes, but no new features.
+ Consider using the M(ec2_instance) module instead.
+ If M(ec2_instance) does not support a feature you need that is available in M(ec2), please
+ file a feature request.
+version_added: "0.9"
+options:
+ key_name:
+ description:
+ - Key pair to use on the instance.
+ - The SSH key must already exist in AWS in order to use this argument.
+ - Keys can be created / deleted using the M(ec2_key) module.
+ aliases: ['keypair']
+ type: str
+ id:
+ version_added: "1.1"
+ description:
+ - Identifier for this instance or set of instances, so that the module will be idempotent with respect to EC2 instances.
+ - This identifier is valid for at least 24 hours after the termination of the instance, and should not be reused for another call later on.
+ - For details, see the description of client token at U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html).
+ type: str
+ group:
+ description:
+ - Security group (or list of groups) to use with the instance.
+ aliases: [ 'groups' ]
+ type: list
+ elements: str
+ group_id:
+ version_added: "1.1"
+ description:
+ - Security group id (or list of ids) to use with the instance.
+ type: list
+ elements: str
+ zone:
+ version_added: "1.2"
+ description:
+ - AWS availability zone in which to launch the instance.
+ aliases: [ 'aws_zone', 'ec2_zone' ]
+ type: str
+ instance_type:
+ description:
+ - Instance type to use for the instance, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html).
+ - Required when creating a new instance.
+ type: str
+ aliases: ['type']
+ tenancy:
+ version_added: "1.9"
+ description:
+ - An instance with a tenancy of C(dedicated) runs on single-tenant hardware and can only be launched into a VPC.
+ - Note that to use dedicated tenancy you MUST specify a I(vpc_subnet_id) as well.
+ - Dedicated tenancy is not available for EC2 "micro" instances.
+ default: default
+ choices: [ "default", "dedicated" ]
+ type: str
+ spot_price:
+ version_added: "1.5"
+ description:
+ - Maximum spot price to bid. If not set, a regular on-demand instance is requested.
+ - A spot request is made with this maximum bid. When it is filled, the instance is started.
+ type: str
+ spot_type:
+ version_added: "2.0"
+ description:
+ - The type of spot request.
+ - After being interrupted a C(persistent) spot instance will be started once there is capacity to fill the request again.
+ default: "one-time"
+ choices: [ "one-time", "persistent" ]
+ type: str
+ image:
+ description:
+ - I(ami) ID to use for the instance.
+ - Required when I(state=present).
+ type: str
+ kernel:
+ description:
+ - Kernel eki to use for the instance.
+ type: str
+ ramdisk:
+ description:
+ - Ramdisk eri to use for the instance.
+ type: str
+ wait:
+ description:
+ - Wait for the instance to reach its desired state before returning.
+ - Does not wait for SSH, see the 'wait_for_connection' example for details.
+ type: bool
+ default: false
+ wait_timeout:
+ description:
+ - How long before wait gives up, in seconds.
+ default: 300
+ type: int
+ spot_wait_timeout:
+ version_added: "1.5"
+ description:
+ - How long to wait for the spot instance request to be fulfilled. Affects 'Request valid until' for setting spot request lifespan.
+ default: 600
+ type: int
+ count:
+ description:
+ - Number of instances to launch.
+ default: 1
+ type: int
+ monitoring:
+ version_added: "1.1"
+ description:
+ - Enable detailed monitoring (CloudWatch) for instance.
+ type: bool
+ default: false
+ user_data:
+ version_added: "0.9"
+ description:
+ - Opaque blob of data which is made available to the EC2 instance.
+ type: str
+ instance_tags:
+ version_added: "1.0"
+ description:
+ - A hash/dictionary of tags to add to the new instance or for starting/stopping instance by tag; '{"key":"value"}' and '{"key":"value","key":"value"}'.
+ type: dict
+ placement_group:
+ version_added: "1.3"
+ description:
+ - Placement group for the instance when using EC2 Clustered Compute.
+ type: str
+ vpc_subnet_id:
+ version_added: "1.1"
+ description:
+ - the subnet ID in which to launch the instance (VPC).
+ type: str
+ assign_public_ip:
+ version_added: "1.5"
+ description:
+ - When provisioning within vpc, assign a public IP address. Boto library must be 2.13.0+.
+ type: bool
+ private_ip:
+ version_added: "1.2"
+ description:
+ - The private ip address to assign the instance (from the vpc subnet).
+ type: str
+ instance_profile_name:
+ version_added: "1.3"
+ description:
+ - Name of the IAM instance profile (i.e. what the EC2 console refers to as an "IAM Role") to use. Boto library must be 2.5.0+.
+ type: str
+ instance_ids:
+ version_added: "1.3"
+ description:
+ - "list of instance ids, currently used for states: absent, running, stopped"
+ aliases: ['instance_id']
+ type: list
+ elements: str
+ source_dest_check:
+ version_added: "1.6"
+ description:
+ - Enable or Disable the Source/Destination checks (for NAT instances and Virtual Routers).
+ When initially creating an instance the EC2 API defaults this to C(True).
+ type: bool
+ termination_protection:
+ version_added: "2.0"
+ description:
+ - Enable or Disable the Termination Protection.
+ type: bool
+ default: false
+ instance_initiated_shutdown_behavior:
+ version_added: "2.2"
+ description:
+ - Set whether AWS will Stop or Terminate an instance on shutdown. This parameter is ignored when using instance-store.
+ images (which require termination on shutdown).
+ default: 'stop'
+ choices: [ "stop", "terminate" ]
+ type: str
+ state:
+ version_added: "1.3"
+ description:
+ - Create, terminate, start, stop or restart instances. The state 'restarted' was added in Ansible 2.2.
+ - When I(state=absent), I(instance_ids) is required.
+ - When I(state=running), I(state=stopped) or I(state=restarted) then either I(instance_ids) or I(instance_tags) is required.
+ default: 'present'
+ choices: ['absent', 'present', 'restarted', 'running', 'stopped']
+ type: str
+ volumes:
+ version_added: "1.5"
+ description:
+ - A list of hash/dictionaries of volumes to add to the new instance.
+ type: list
+ elements: dict
+ suboptions:
+ device_name:
+ type: str
+ required: true
+ description:
+ - A name for the device (For example C(/dev/sda)).
+ delete_on_termination:
+ type: bool
+ default: false
+ description:
+ - Whether the volume should be automatically deleted when the instance is terminated.
+ ephemeral:
+ type: str
+ description:
+ - Whether the volume should be ephemeral.
+ - Data on ephemeral volumes is lost when the instance is stopped.
+ - Mutually exclusive with the I(snapshot) parameter.
+ encrypted:
+ type: bool
+ default: false
+ description:
+ - Whether the volume should be encrypted using the 'aws/ebs' KMS CMK.
+ snapshot:
+ type: str
+ description:
+ - The ID of an EBS snapshot to copy when creating the volume.
+ - Mutually exclusive with the I(ephemeral) parameter.
+ volume_type:
+ type: str
+ description:
+ - The type of volume to create.
+ - See U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) for more information on the available volume types.
+ volume_size:
+ type: int
+ description:
+ - The size of the volume (in GiB).
+ iops:
+ type: int
+ description:
+ - The number of IOPS per second to provision for the volume.
+ - Required when I(volume_type=io1).
+ ebs_optimized:
+ version_added: "1.6"
+ description:
+ - Whether instance is using optimized EBS volumes, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html).
+ default: false
+ type: bool
+ exact_count:
+ version_added: "1.5"
+ description:
+ - An integer value which indicates how many instances that match the 'count_tag' parameter should be running.
+ Instances are either created or terminated based on this value.
+ type: int
+ count_tag:
+ version_added: "1.5"
+ description:
+ - Used with I(exact_count) to determine how many nodes based on a specific tag criteria should be running.
+ This can be expressed in multiple ways and is shown in the EXAMPLES section. For instance, one can request 25 servers
+ that are tagged with "class=webserver". The specified tag must already exist or be passed in as the I(instance_tags) option.
+ type: raw
+ network_interfaces:
+ version_added: "2.0"
+ description:
+ - A list of existing network interfaces to attach to the instance at launch. When specifying existing network interfaces,
+ none of the I(assign_public_ip), I(private_ip), I(vpc_subnet_id), I(group), or I(group_id) parameters may be used. (Those parameters are
+ for creating a new network interface at launch.)
+ aliases: ['network_interface']
+ type: list
+ elements: str
+ spot_launch_group:
+ version_added: "2.1"
+ description:
+ - Launch group for spot requests, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/how-spot-instances-work.html#spot-launch-group).
+ type: str
+author:
+ - "Tim Gerla (@tgerla)"
+ - "Lester Wade (@lwade)"
+ - "Seth Vidal (@skvidal)"
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Basic provisioning example
+- ec2:
+ key_name: mykey
+ instance_type: t2.micro
+ image: ami-123456
+ wait: yes
+ group: webserver
+ count: 3
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+# Advanced example with tagging and CloudWatch
+- ec2:
+ key_name: mykey
+ group: databases
+ instance_type: t2.micro
+ image: ami-123456
+ wait: yes
+ wait_timeout: 500
+ count: 5
+ instance_tags:
+ db: postgres
+ monitoring: yes
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+# Single instance with additional IOPS volume from snapshot and volume delete on termination
+- ec2:
+ key_name: mykey
+ group: webserver
+ instance_type: c3.medium
+ image: ami-123456
+ wait: yes
+ wait_timeout: 500
+ volumes:
+ - device_name: /dev/sdb
+ snapshot: snap-abcdef12
+ volume_type: io1
+ iops: 1000
+ volume_size: 100
+ delete_on_termination: true
+ monitoring: yes
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+# Single instance with ssd gp2 root volume
+- ec2:
+ key_name: mykey
+ group: webserver
+ instance_type: c3.medium
+ image: ami-123456
+ wait: yes
+ wait_timeout: 500
+ volumes:
+ - device_name: /dev/xvda
+ volume_type: gp2
+ volume_size: 8
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+ count_tag:
+ Name: dbserver
+ exact_count: 1
+
+# Multiple groups example
+- ec2:
+ key_name: mykey
+ group: ['databases', 'internal-services', 'sshable', 'and-so-forth']
+ instance_type: m1.large
+ image: ami-6e649707
+ wait: yes
+ wait_timeout: 500
+ count: 5
+ instance_tags:
+ db: postgres
+ monitoring: yes
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+# Multiple instances with additional volume from snapshot
+- ec2:
+ key_name: mykey
+ group: webserver
+ instance_type: m1.large
+ image: ami-6e649707
+ wait: yes
+ wait_timeout: 500
+ count: 5
+ volumes:
+ - device_name: /dev/sdb
+ snapshot: snap-abcdef12
+ volume_size: 10
+ monitoring: yes
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+# Dedicated tenancy example
+- local_action:
+ module: ec2
+ assign_public_ip: yes
+ group_id: sg-1dc53f72
+ key_name: mykey
+ image: ami-6e649707
+ instance_type: m1.small
+ tenancy: dedicated
+ vpc_subnet_id: subnet-29e63245
+ wait: yes
+
+# Spot instance example
+- ec2:
+ spot_price: 0.24
+ spot_wait_timeout: 600
+ keypair: mykey
+ group_id: sg-1dc53f72
+ instance_type: m1.small
+ image: ami-6e649707
+ wait: yes
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+ spot_launch_group: report_generators
+ instance_initiated_shutdown_behavior: terminate
+
+# Examples using pre-existing network interfaces
+- ec2:
+ key_name: mykey
+ instance_type: t2.small
+ image: ami-f005ba11
+ network_interface: eni-deadbeef
+
+- ec2:
+ key_name: mykey
+ instance_type: t2.small
+ image: ami-f005ba11
+ network_interfaces: ['eni-deadbeef', 'eni-5ca1ab1e']
+
+# Launch instances, runs some tasks
+# and then terminate them
+
+- name: Create a sandbox instance
+ hosts: localhost
+ gather_facts: False
+ vars:
+ keypair: my_keypair
+ instance_type: m1.small
+ security_group: my_securitygroup
+ image: my_ami_id
+ region: us-east-1
+ tasks:
+ - name: Launch instance
+ ec2:
+ key_name: "{{ keypair }}"
+ group: "{{ security_group }}"
+ instance_type: "{{ instance_type }}"
+ image: "{{ image }}"
+ wait: true
+ region: "{{ region }}"
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+ register: ec2
+
+ - name: Add new instance to host group
+ add_host:
+ hostname: "{{ item.public_ip }}"
+ groupname: launched
+ loop: "{{ ec2.instances }}"
+
+ - name: Wait for SSH to come up
+ delegate_to: "{{ item.public_dns_name }}"
+ wait_for_connection:
+ delay: 60
+ timeout: 320
+ loop: "{{ ec2.instances }}"
+
+- name: Configure instance(s)
+ hosts: launched
+ become: True
+ gather_facts: True
+ roles:
+ - my_awesome_role
+ - my_awesome_test
+
+- name: Terminate instances
+ hosts: localhost
+ tasks:
+ - name: Terminate instances that were previously launched
+ ec2:
+ state: 'absent'
+ instance_ids: '{{ ec2.instance_ids }}'
+
+# Start a few existing instances, run some tasks
+# and stop the instances
+
+- name: Start sandbox instances
+ hosts: localhost
+ gather_facts: false
+ vars:
+ instance_ids:
+ - 'i-xxxxxx'
+ - 'i-xxxxxx'
+ - 'i-xxxxxx'
+ region: us-east-1
+ tasks:
+ - name: Start the sandbox instances
+ ec2:
+ instance_ids: '{{ instance_ids }}'
+ region: '{{ region }}'
+ state: running
+ wait: True
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+ roles:
+ - do_neat_stuff
+ - do_more_neat_stuff
+
+- name: Stop sandbox instances
+ hosts: localhost
+ gather_facts: false
+ vars:
+ instance_ids:
+ - 'i-xxxxxx'
+ - 'i-xxxxxx'
+ - 'i-xxxxxx'
+ region: us-east-1
+ tasks:
+ - name: Stop the sandbox instances
+ ec2:
+ instance_ids: '{{ instance_ids }}'
+ region: '{{ region }}'
+ state: stopped
+ wait: True
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+#
+# Start stopped instances specified by tag
+#
+- local_action:
+ module: ec2
+ instance_tags:
+ Name: ExtraPower
+ state: running
+
+#
+# Restart instances specified by tag
+#
+- local_action:
+ module: ec2
+ instance_tags:
+ Name: ExtraPower
+ state: restarted
+
+#
+# Enforce that 5 instances with a tag "foo" are running
+# (Highly recommended!)
+#
+
+- ec2:
+ key_name: mykey
+ instance_type: c1.medium
+ image: ami-40603AD1
+ wait: yes
+ group: webserver
+ instance_tags:
+ foo: bar
+ exact_count: 5
+ count_tag: foo
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+#
+# Enforce that 5 running instances named "database" with a "dbtype" of "postgres"
+#
+
+- ec2:
+ key_name: mykey
+ instance_type: c1.medium
+ image: ami-40603AD1
+ wait: yes
+ group: webserver
+ instance_tags:
+ Name: database
+ dbtype: postgres
+ exact_count: 5
+ count_tag:
+ Name: database
+ dbtype: postgres
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+#
+# count_tag complex argument examples
+#
+
+ # instances with tag foo
+- ec2:
+ count_tag:
+ foo:
+
+ # instances with tag foo=bar
+- ec2:
+ count_tag:
+ foo: bar
+
+ # instances with tags foo=bar & baz
+- ec2:
+ count_tag:
+ foo: bar
+ baz:
+
+ # instances with tags foo & bar & baz=bang
+- ec2:
+ count_tag:
+ - foo
+ - bar
+ - baz: bang
+
+'''
+
+import time
+import datetime
+import traceback
+from ast import literal_eval
+from distutils.version import LooseVersion
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import get_aws_connection_info, ec2_argument_spec, ec2_connect
+from ansible.module_utils.six import get_function_code, string_types
+from ansible.module_utils._text import to_bytes, to_text
+
+try:
+ import boto.ec2
+ from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
+ from boto.exception import EC2ResponseError
+ from boto import connect_ec2_endpoint
+ from boto import connect_vpc
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+
+def find_running_instances_by_count_tag(module, ec2, vpc, count_tag, zone=None):
+
+ # get reservations for instances that match tag(s) and are in the desired state
+ state = module.params.get('state')
+ if state not in ['running', 'stopped']:
+ state = None
+ reservations = get_reservations(module, ec2, vpc, tags=count_tag, state=state, zone=zone)
+
+ instances = []
+ for res in reservations:
+ if hasattr(res, 'instances'):
+ for inst in res.instances:
+ if inst.state == 'terminated' or inst.state == 'shutting-down':
+ continue
+ instances.append(inst)
+
+ return reservations, instances
+
+
+def _set_none_to_blank(dictionary):
+ result = dictionary
+ for k in result:
+ if isinstance(result[k], dict):
+ result[k] = _set_none_to_blank(result[k])
+ elif not result[k]:
+ result[k] = ""
+ return result
+
+
+def get_reservations(module, ec2, vpc, tags=None, state=None, zone=None):
+ # TODO: filters do not work with tags that have underscores
+ filters = dict()
+
+ vpc_subnet_id = module.params.get('vpc_subnet_id')
+ vpc_id = None
+ if vpc_subnet_id:
+ filters.update({"subnet-id": vpc_subnet_id})
+ if vpc:
+ vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id
+
+ if vpc_id:
+ filters.update({"vpc-id": vpc_id})
+
+ if tags is not None:
+
+ if isinstance(tags, str):
+ try:
+ tags = literal_eval(tags)
+ except Exception:
+ pass
+
+ # if not a string type, convert and make sure it's a text string
+ if isinstance(tags, int):
+ tags = to_text(tags)
+
+ # if string, we only care that a tag of that name exists
+ if isinstance(tags, str):
+ filters.update({"tag-key": tags})
+
+ # if list, append each item to filters
+ if isinstance(tags, list):
+ for x in tags:
+ if isinstance(x, dict):
+ x = _set_none_to_blank(x)
+ filters.update(dict(("tag:" + tn, tv) for (tn, tv) in x.items()))
+ else:
+ filters.update({"tag-key": x})
+
+ # if dict, add the key and value to the filter
+ if isinstance(tags, dict):
+ tags = _set_none_to_blank(tags)
+ filters.update(dict(("tag:" + tn, tv) for (tn, tv) in tags.items()))
+
+ # lets check to see if the filters dict is empty, if so then stop
+ if not filters:
+ module.fail_json(msg="Filters based on tag is empty => tags: %s" % (tags))
+
+ if state:
+ # http://stackoverflow.com/questions/437511/what-are-the-valid-instancestates-for-the-amazon-ec2-api
+ filters.update({'instance-state-name': state})
+
+ if zone:
+ filters.update({'availability-zone': zone})
+
+ if module.params.get('id'):
+ filters['client-token'] = module.params['id']
+
+ results = ec2.get_all_instances(filters=filters)
+
+ return results
+
+
+def get_instance_info(inst):
+ """
+ Retrieves instance information from an instance
+ ID and returns it as a dictionary
+ """
+ instance_info = {'id': inst.id,
+ 'ami_launch_index': inst.ami_launch_index,
+ 'private_ip': inst.private_ip_address,
+ 'private_dns_name': inst.private_dns_name,
+ 'public_ip': inst.ip_address,
+ 'dns_name': inst.dns_name,
+ 'public_dns_name': inst.public_dns_name,
+ 'state_code': inst.state_code,
+ 'architecture': inst.architecture,
+ 'image_id': inst.image_id,
+ 'key_name': inst.key_name,
+ 'placement': inst.placement,
+ 'region': inst.placement[:-1],
+ 'kernel': inst.kernel,
+ 'ramdisk': inst.ramdisk,
+ 'launch_time': inst.launch_time,
+ 'instance_type': inst.instance_type,
+ 'root_device_type': inst.root_device_type,
+ 'root_device_name': inst.root_device_name,
+ 'state': inst.state,
+ 'hypervisor': inst.hypervisor,
+ 'tags': inst.tags,
+ 'groups': dict((group.id, group.name) for group in inst.groups),
+ }
+ try:
+ instance_info['virtualization_type'] = getattr(inst, 'virtualization_type')
+ except AttributeError:
+ instance_info['virtualization_type'] = None
+
+ try:
+ instance_info['ebs_optimized'] = getattr(inst, 'ebs_optimized')
+ except AttributeError:
+ instance_info['ebs_optimized'] = False
+
+ try:
+ bdm_dict = {}
+ bdm = getattr(inst, 'block_device_mapping')
+ for device_name in bdm.keys():
+ bdm_dict[device_name] = {
+ 'status': bdm[device_name].status,
+ 'volume_id': bdm[device_name].volume_id,
+ 'delete_on_termination': bdm[device_name].delete_on_termination
+ }
+ instance_info['block_device_mapping'] = bdm_dict
+ except AttributeError:
+ instance_info['block_device_mapping'] = False
+
+ try:
+ instance_info['tenancy'] = getattr(inst, 'placement_tenancy')
+ except AttributeError:
+ instance_info['tenancy'] = 'default'
+
+ return instance_info
+
+
+def boto_supports_associate_public_ip_address(ec2):
+ """
+ Check if Boto library has associate_public_ip_address in the NetworkInterfaceSpecification
+ class. Added in Boto 2.13.0
+
+ ec2: authenticated ec2 connection object
+
+ Returns:
+ True if Boto library accepts associate_public_ip_address argument, else false
+ """
+
+ try:
+ network_interface = boto.ec2.networkinterface.NetworkInterfaceSpecification()
+ getattr(network_interface, "associate_public_ip_address")
+ return True
+ except AttributeError:
+ return False
+
+
+def boto_supports_profile_name_arg(ec2):
+ """
+ Check if Boto library has instance_profile_name argument. instance_profile_name has been added in Boto 2.5.0
+
+ ec2: authenticated ec2 connection object
+
+ Returns:
+ True if Boto library accept instance_profile_name argument, else false
+ """
+ run_instances_method = getattr(ec2, 'run_instances')
+ return 'instance_profile_name' in get_function_code(run_instances_method).co_varnames
+
+
+def boto_supports_volume_encryption():
+ """
+ Check if Boto library supports encryption of EBS volumes (added in 2.29.0)
+
+ Returns:
+ True if boto library has the named param as an argument on the request_spot_instances method, else False
+ """
+ return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
+
+
+def create_block_device(module, ec2, volume):
+ # Not aware of a way to determine this programatically
+ # http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/
+ MAX_IOPS_TO_SIZE_RATIO = 30
+
+ volume_type = volume.get('volume_type')
+
+ if 'snapshot' not in volume and 'ephemeral' not in volume:
+ if 'volume_size' not in volume:
+ module.fail_json(msg='Size must be specified when creating a new volume or modifying the root volume')
+ if 'snapshot' in volume:
+ if volume_type == 'io1' and 'iops' not in volume:
+ module.fail_json(msg='io1 volumes must have an iops value set')
+ if 'iops' in volume:
+ snapshot = ec2.get_all_snapshots(snapshot_ids=[volume['snapshot']])[0]
+ size = volume.get('volume_size', snapshot.volume_size)
+ if int(volume['iops']) > MAX_IOPS_TO_SIZE_RATIO * size:
+ module.fail_json(msg='IOPS must be at most %d times greater than size' % MAX_IOPS_TO_SIZE_RATIO)
+ if 'ephemeral' in volume:
+ if 'snapshot' in volume:
+ module.fail_json(msg='Cannot set both ephemeral and snapshot')
+ if boto_supports_volume_encryption():
+ return BlockDeviceType(snapshot_id=volume.get('snapshot'),
+ ephemeral_name=volume.get('ephemeral'),
+ size=volume.get('volume_size'),
+ volume_type=volume_type,
+ delete_on_termination=volume.get('delete_on_termination', False),
+ iops=volume.get('iops'),
+ encrypted=volume.get('encrypted', None))
+ else:
+ return BlockDeviceType(snapshot_id=volume.get('snapshot'),
+ ephemeral_name=volume.get('ephemeral'),
+ size=volume.get('volume_size'),
+ volume_type=volume_type,
+ delete_on_termination=volume.get('delete_on_termination', False),
+ iops=volume.get('iops'))
+
+
+def boto_supports_param_in_spot_request(ec2, param):
+ """
+ Check if Boto library has a <param> in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0.
+
+ ec2: authenticated ec2 connection object
+
+ Returns:
+ True if boto library has the named param as an argument on the request_spot_instances method, else False
+ """
+ method = getattr(ec2, 'request_spot_instances')
+ return param in get_function_code(method).co_varnames
+
+
+def await_spot_requests(module, ec2, spot_requests, count):
+ """
+ Wait for a group of spot requests to be fulfilled, or fail.
+
+ module: Ansible module object
+ ec2: authenticated ec2 connection object
+ spot_requests: boto.ec2.spotinstancerequest.SpotInstanceRequest object returned by ec2.request_spot_instances
+ count: Total number of instances to be created by the spot requests
+
+ Returns:
+ list of instance ID's created by the spot request(s)
+ """
+ spot_wait_timeout = int(module.params.get('spot_wait_timeout'))
+ wait_complete = time.time() + spot_wait_timeout
+
+ spot_req_inst_ids = dict()
+ while time.time() < wait_complete:
+ reqs = ec2.get_all_spot_instance_requests()
+ for sirb in spot_requests:
+ if sirb.id in spot_req_inst_ids:
+ continue
+ for sir in reqs:
+ if sir.id != sirb.id:
+ continue # this is not our spot instance
+ if sir.instance_id is not None:
+ spot_req_inst_ids[sirb.id] = sir.instance_id
+ elif sir.state == 'open':
+ continue # still waiting, nothing to do here
+ elif sir.state == 'active':
+ continue # Instance is created already, nothing to do here
+ elif sir.state == 'failed':
+ module.fail_json(msg="Spot instance request %s failed with status %s and fault %s:%s" % (
+ sir.id, sir.status.code, sir.fault.code, sir.fault.message))
+ elif sir.state == 'cancelled':
+ module.fail_json(msg="Spot instance request %s was cancelled before it could be fulfilled." % sir.id)
+ elif sir.state == 'closed':
+ # instance is terminating or marked for termination
+ # this may be intentional on the part of the operator,
+ # or it may have been terminated by AWS due to capacity,
+ # price, or group constraints in this case, we'll fail
+ # the module if the reason for the state is anything
+ # other than termination by user. Codes are documented at
+ # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html
+ if sir.status.code == 'instance-terminated-by-user':
+ # do nothing, since the user likely did this on purpose
+ pass
+ else:
+ spot_msg = "Spot instance request %s was closed by AWS with the status %s and fault %s:%s"
+ module.fail_json(msg=spot_msg % (sir.id, sir.status.code, sir.fault.code, sir.fault.message))
+
+ if len(spot_req_inst_ids) < count:
+ time.sleep(5)
+ else:
+ return list(spot_req_inst_ids.values())
+ module.fail_json(msg="wait for spot requests timeout on %s" % time.asctime())
+
+
+def enforce_count(module, ec2, vpc):
+
+ exact_count = module.params.get('exact_count')
+ count_tag = module.params.get('count_tag')
+ zone = module.params.get('zone')
+
+ # fail here if the exact count was specified without filtering
+ # on a tag, as this may lead to a undesired removal of instances
+ if exact_count and count_tag is None:
+ module.fail_json(msg="you must use the 'count_tag' option with exact_count")
+
+ reservations, instances = find_running_instances_by_count_tag(module, ec2, vpc, count_tag, zone)
+
+ changed = None
+ checkmode = False
+ instance_dict_array = []
+ changed_instance_ids = None
+
+ if len(instances) == exact_count:
+ changed = False
+ elif len(instances) < exact_count:
+ changed = True
+ to_create = exact_count - len(instances)
+ if not checkmode:
+ (instance_dict_array, changed_instance_ids, changed) \
+ = create_instances(module, ec2, vpc, override_count=to_create)
+
+ for inst in instance_dict_array:
+ instances.append(inst)
+ elif len(instances) > exact_count:
+ changed = True
+ to_remove = len(instances) - exact_count
+ if not checkmode:
+ all_instance_ids = sorted([x.id for x in instances])
+ remove_ids = all_instance_ids[0:to_remove]
+
+ instances = [x for x in instances if x.id not in remove_ids]
+
+ (changed, instance_dict_array, changed_instance_ids) \
+ = terminate_instances(module, ec2, remove_ids)
+ terminated_list = []
+ for inst in instance_dict_array:
+ inst['state'] = "terminated"
+ terminated_list.append(inst)
+ instance_dict_array = terminated_list
+
+ # ensure all instances are dictionaries
+ all_instances = []
+ for inst in instances:
+
+ if not isinstance(inst, dict):
+ warn_if_public_ip_assignment_changed(module, inst)
+ inst = get_instance_info(inst)
+ all_instances.append(inst)
+
+ return (all_instances, instance_dict_array, changed_instance_ids, changed)
+
+
+def create_instances(module, ec2, vpc, override_count=None):
+ """
+ Creates new instances
+
+ module : AnsibleModule object
+ ec2: authenticated ec2 connection object
+
+ Returns:
+ A list of dictionaries with instance information
+ about the instances that were launched
+ """
+
+ key_name = module.params.get('key_name')
+ id = module.params.get('id')
+ group_name = module.params.get('group')
+ group_id = module.params.get('group_id')
+ zone = module.params.get('zone')
+ instance_type = module.params.get('instance_type')
+ tenancy = module.params.get('tenancy')
+ spot_price = module.params.get('spot_price')
+ spot_type = module.params.get('spot_type')
+ image = module.params.get('image')
+ if override_count:
+ count = override_count
+ else:
+ count = module.params.get('count')
+ monitoring = module.params.get('monitoring')
+ kernel = module.params.get('kernel')
+ ramdisk = module.params.get('ramdisk')
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+ spot_wait_timeout = int(module.params.get('spot_wait_timeout'))
+ placement_group = module.params.get('placement_group')
+ user_data = module.params.get('user_data')
+ instance_tags = module.params.get('instance_tags')
+ vpc_subnet_id = module.params.get('vpc_subnet_id')
+ assign_public_ip = module.boolean(module.params.get('assign_public_ip'))
+ private_ip = module.params.get('private_ip')
+ instance_profile_name = module.params.get('instance_profile_name')
+ volumes = module.params.get('volumes')
+ ebs_optimized = module.params.get('ebs_optimized')
+ exact_count = module.params.get('exact_count')
+ count_tag = module.params.get('count_tag')
+ source_dest_check = module.boolean(module.params.get('source_dest_check'))
+ termination_protection = module.boolean(module.params.get('termination_protection'))
+ network_interfaces = module.params.get('network_interfaces')
+ spot_launch_group = module.params.get('spot_launch_group')
+ instance_initiated_shutdown_behavior = module.params.get('instance_initiated_shutdown_behavior')
+
+ vpc_id = None
+ if vpc_subnet_id:
+ if not vpc:
+ module.fail_json(msg="region must be specified")
+ else:
+ vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id
+ else:
+ vpc_id = None
+
+ try:
+ # Here we try to lookup the group id from the security group name - if group is set.
+ if group_name:
+ if vpc_id:
+ grp_details = ec2.get_all_security_groups(filters={'vpc_id': vpc_id})
+ else:
+ grp_details = ec2.get_all_security_groups()
+ if isinstance(group_name, string_types):
+ group_name = [group_name]
+ unmatched = set(group_name).difference(str(grp.name) for grp in grp_details)
+ if len(unmatched) > 0:
+ module.fail_json(msg="The following group names are not valid: %s" % ', '.join(unmatched))
+ group_id = [str(grp.id) for grp in grp_details if str(grp.name) in group_name]
+ # Now we try to lookup the group id testing if group exists.
+ elif group_id:
+ # wrap the group_id in a list if it's not one already
+ if isinstance(group_id, string_types):
+ group_id = [group_id]
+ grp_details = ec2.get_all_security_groups(group_ids=group_id)
+ group_name = [grp_item.name for grp_item in grp_details]
+ except boto.exception.NoAuthHandlerFound as e:
+ module.fail_json(msg=str(e))
+
+ # Lookup any instances that much our run id.
+
+ running_instances = []
+ count_remaining = int(count)
+
+ if id is not None:
+ filter_dict = {'client-token': id, 'instance-state-name': 'running'}
+ previous_reservations = ec2.get_all_instances(None, filter_dict)
+ for res in previous_reservations:
+ for prev_instance in res.instances:
+ running_instances.append(prev_instance)
+ count_remaining = count_remaining - len(running_instances)
+
+ # Both min_count and max_count equal count parameter. This means the launch request is explicit (we want count, or fail) in how many instances we want.
+
+ if count_remaining == 0:
+ changed = False
+ else:
+ changed = True
+ try:
+ params = {'image_id': image,
+ 'key_name': key_name,
+ 'monitoring_enabled': monitoring,
+ 'placement': zone,
+ 'instance_type': instance_type,
+ 'kernel_id': kernel,
+ 'ramdisk_id': ramdisk}
+ if user_data is not None:
+ params['user_data'] = to_bytes(user_data, errors='surrogate_or_strict')
+
+ if ebs_optimized:
+ params['ebs_optimized'] = ebs_optimized
+
+ # 'tenancy' always has a default value, but it is not a valid parameter for spot instance request
+ if not spot_price:
+ params['tenancy'] = tenancy
+
+ if boto_supports_profile_name_arg(ec2):
+ params['instance_profile_name'] = instance_profile_name
+ else:
+ if instance_profile_name is not None:
+ module.fail_json(
+ msg="instance_profile_name parameter requires Boto version 2.5.0 or higher")
+
+ if assign_public_ip is not None:
+ if not boto_supports_associate_public_ip_address(ec2):
+ module.fail_json(
+ msg="assign_public_ip parameter requires Boto version 2.13.0 or higher.")
+ elif not vpc_subnet_id:
+ module.fail_json(
+ msg="assign_public_ip only available with vpc_subnet_id")
+
+ else:
+ if private_ip:
+ interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
+ subnet_id=vpc_subnet_id,
+ private_ip_address=private_ip,
+ groups=group_id,
+ associate_public_ip_address=assign_public_ip)
+ else:
+ interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
+ subnet_id=vpc_subnet_id,
+ groups=group_id,
+ associate_public_ip_address=assign_public_ip)
+ interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface)
+ params['network_interfaces'] = interfaces
+ else:
+ if network_interfaces:
+ if isinstance(network_interfaces, string_types):
+ network_interfaces = [network_interfaces]
+ interfaces = []
+ for i, network_interface_id in enumerate(network_interfaces):
+ interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
+ network_interface_id=network_interface_id,
+ device_index=i)
+ interfaces.append(interface)
+ params['network_interfaces'] = \
+ boto.ec2.networkinterface.NetworkInterfaceCollection(*interfaces)
+ else:
+ params['subnet_id'] = vpc_subnet_id
+ if vpc_subnet_id:
+ params['security_group_ids'] = group_id
+ else:
+ params['security_groups'] = group_name
+
+ if volumes:
+ bdm = BlockDeviceMapping()
+ for volume in volumes:
+ if 'device_name' not in volume:
+ module.fail_json(msg='Device name must be set for volume')
+ # Minimum volume size is 1GiB. We'll use volume size explicitly set to 0
+ # to be a signal not to create this volume
+ if 'volume_size' not in volume or int(volume['volume_size']) > 0:
+ bdm[volume['device_name']] = create_block_device(module, ec2, volume)
+
+ params['block_device_map'] = bdm
+
+ # check to see if we're using spot pricing first before starting instances
+ if not spot_price:
+ if assign_public_ip is not None and private_ip:
+ params.update(
+ dict(
+ min_count=count_remaining,
+ max_count=count_remaining,
+ client_token=id,
+ placement_group=placement_group,
+ )
+ )
+ else:
+ params.update(
+ dict(
+ min_count=count_remaining,
+ max_count=count_remaining,
+ client_token=id,
+ placement_group=placement_group,
+ private_ip_address=private_ip,
+ )
+ )
+
+ # For ordinary (not spot) instances, we can select 'stop'
+ # (the default) or 'terminate' here.
+ params['instance_initiated_shutdown_behavior'] = instance_initiated_shutdown_behavior or 'stop'
+
+ try:
+ res = ec2.run_instances(**params)
+ except boto.exception.EC2ResponseError as e:
+ if (params['instance_initiated_shutdown_behavior'] != 'terminate' and
+ "InvalidParameterCombination" == e.error_code):
+ params['instance_initiated_shutdown_behavior'] = 'terminate'
+ res = ec2.run_instances(**params)
+ else:
+ raise
+
+ instids = [i.id for i in res.instances]
+ while True:
+ try:
+ ec2.get_all_instances(instids)
+ break
+ except boto.exception.EC2ResponseError as e:
+ if "<Code>InvalidInstanceID.NotFound</Code>" in str(e):
+ # there's a race between start and get an instance
+ continue
+ else:
+ module.fail_json(msg=str(e))
+
+ # The instances returned through ec2.run_instances above can be in
+ # terminated state due to idempotency. See commit 7f11c3d for a complete
+ # explanation.
+ terminated_instances = [
+ str(instance.id) for instance in res.instances if instance.state == 'terminated'
+ ]
+ if terminated_instances:
+ module.fail_json(msg="Instances with id(s) %s " % terminated_instances +
+ "were created previously but have since been terminated - " +
+ "use a (possibly different) 'instanceid' parameter")
+
+ else:
+ if private_ip:
+ module.fail_json(
+ msg='private_ip only available with on-demand (non-spot) instances')
+ if boto_supports_param_in_spot_request(ec2, 'placement_group'):
+ params['placement_group'] = placement_group
+ elif placement_group:
+ module.fail_json(
+ msg="placement_group parameter requires Boto version 2.3.0 or higher.")
+
+ # You can't tell spot instances to 'stop'; they will always be
+ # 'terminate'd. For convenience, we'll ignore the latter value.
+ if instance_initiated_shutdown_behavior and instance_initiated_shutdown_behavior != 'terminate':
+ module.fail_json(
+ msg="instance_initiated_shutdown_behavior=stop is not supported for spot instances.")
+
+ if spot_launch_group and isinstance(spot_launch_group, string_types):
+ params['launch_group'] = spot_launch_group
+
+ params.update(dict(
+ count=count_remaining,
+ type=spot_type,
+ ))
+
+ # Set spot ValidUntil
+ # ValidUntil -> (timestamp). The end date of the request, in
+ # UTC format (for example, YYYY -MM -DD T*HH* :MM :SS Z).
+ utc_valid_until = (
+ datetime.datetime.utcnow()
+ + datetime.timedelta(seconds=spot_wait_timeout))
+ params['valid_until'] = utc_valid_until.strftime('%Y-%m-%dT%H:%M:%S.000Z')
+
+ res = ec2.request_spot_instances(spot_price, **params)
+
+ # Now we have to do the intermediate waiting
+ if wait:
+ instids = await_spot_requests(module, ec2, res, count)
+ else:
+ instids = []
+ except boto.exception.BotoServerError as e:
+ module.fail_json(msg="Instance creation failed => %s: %s" % (e.error_code, e.error_message))
+
+ # wait here until the instances are up
+ num_running = 0
+ wait_timeout = time.time() + wait_timeout
+ res_list = ()
+ while wait_timeout > time.time() and num_running < len(instids):
+ try:
+ res_list = ec2.get_all_instances(instids)
+ except boto.exception.BotoServerError as e:
+ if e.error_code == 'InvalidInstanceID.NotFound':
+ time.sleep(1)
+ continue
+ else:
+ raise
+
+ num_running = 0
+ for res in res_list:
+ num_running += len([i for i in res.instances if i.state == 'running'])
+ if len(res_list) <= 0:
+ # got a bad response of some sort, possibly due to
+ # stale/cached data. Wait a second and then try again
+ time.sleep(1)
+ continue
+ if wait and num_running < len(instids):
+ time.sleep(5)
+ else:
+ break
+
+ if wait and wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="wait for instances running timeout on %s" % time.asctime())
+
+ # We do this after the loop ends so that we end up with one list
+ for res in res_list:
+ running_instances.extend(res.instances)
+
+ # Enabled by default by AWS
+ if source_dest_check is False:
+ for inst in res.instances:
+ inst.modify_attribute('sourceDestCheck', False)
+
+ # Disabled by default by AWS
+ if termination_protection is True:
+ for inst in res.instances:
+ inst.modify_attribute('disableApiTermination', True)
+
+ # Leave this as late as possible to try and avoid InvalidInstanceID.NotFound
+ if instance_tags and instids:
+ try:
+ ec2.create_tags(instids, instance_tags)
+ except boto.exception.EC2ResponseError as e:
+ module.fail_json(msg="Instance tagging failed => %s: %s" % (e.error_code, e.error_message))
+
+ instance_dict_array = []
+ created_instance_ids = []
+ for inst in running_instances:
+ inst.update()
+ d = get_instance_info(inst)
+ created_instance_ids.append(inst.id)
+ instance_dict_array.append(d)
+
+ return (instance_dict_array, created_instance_ids, changed)
+
+
+def terminate_instances(module, ec2, instance_ids):
+ """
+ Terminates a list of instances
+
+ module: Ansible module object
+ ec2: authenticated ec2 connection object
+ termination_list: a list of instances to terminate in the form of
+ [ {id: <inst-id>}, ..]
+
+ Returns a dictionary of instance information
+ about the instances terminated.
+
+ If the instance to be terminated is running
+ "changed" will be set to False.
+
+ """
+
+ # Whether to wait for termination to complete before returning
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+
+ changed = False
+ instance_dict_array = []
+
+ if not isinstance(instance_ids, list) or len(instance_ids) < 1:
+ module.fail_json(msg='instance_ids should be a list of instances, aborting')
+
+ terminated_instance_ids = []
+ for res in ec2.get_all_instances(instance_ids):
+ for inst in res.instances:
+ if inst.state == 'running' or inst.state == 'stopped':
+ terminated_instance_ids.append(inst.id)
+ instance_dict_array.append(get_instance_info(inst))
+ try:
+ ec2.terminate_instances([inst.id])
+ except EC2ResponseError as e:
+ module.fail_json(msg='Unable to terminate instance {0}, error: {1}'.format(inst.id, e))
+ changed = True
+
+ # wait here until the instances are 'terminated'
+ if wait:
+ num_terminated = 0
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time() and num_terminated < len(terminated_instance_ids):
+ response = ec2.get_all_instances(instance_ids=terminated_instance_ids,
+ filters={'instance-state-name': 'terminated'})
+ try:
+ num_terminated = sum([len(res.instances) for res in response])
+ except Exception as e:
+ # got a bad response of some sort, possibly due to
+ # stale/cached data. Wait a second and then try again
+ time.sleep(1)
+ continue
+
+ if num_terminated < len(terminated_instance_ids):
+ time.sleep(5)
+
+ # waiting took too long
+ if wait_timeout < time.time() and num_terminated < len(terminated_instance_ids):
+ module.fail_json(msg="wait for instance termination timeout on %s" % time.asctime())
+ # Lets get the current state of the instances after terminating - issue600
+ instance_dict_array = []
+ for res in ec2.get_all_instances(instance_ids=terminated_instance_ids, filters={'instance-state-name': 'terminated'}):
+ for inst in res.instances:
+ instance_dict_array.append(get_instance_info(inst))
+
+ return (changed, instance_dict_array, terminated_instance_ids)
+
+
+def startstop_instances(module, ec2, instance_ids, state, instance_tags):
+ """
+ Starts or stops a list of existing instances
+
+ module: Ansible module object
+ ec2: authenticated ec2 connection object
+ instance_ids: The list of instances to start in the form of
+ [ {id: <inst-id>}, ..]
+ instance_tags: A dict of tag keys and values in the form of
+ {key: value, ... }
+ state: Intended state ("running" or "stopped")
+
+ Returns a dictionary of instance information
+ about the instances started/stopped.
+
+ If the instance was not able to change state,
+ "changed" will be set to False.
+
+ Note that if instance_ids and instance_tags are both non-empty,
+ this method will process the intersection of the two
+ """
+
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+ group_id = module.params.get('group_id')
+ group_name = module.params.get('group')
+ changed = False
+ instance_dict_array = []
+
+ if not isinstance(instance_ids, list) or len(instance_ids) < 1:
+ # Fail unless the user defined instance tags
+ if not instance_tags:
+ module.fail_json(msg='instance_ids should be a list of instances, aborting')
+
+ # To make an EC2 tag filter, we need to prepend 'tag:' to each key.
+ # An empty filter does no filtering, so it's safe to pass it to the
+ # get_all_instances method even if the user did not specify instance_tags
+ filters = {}
+ if instance_tags:
+ for key, value in instance_tags.items():
+ filters["tag:" + key] = value
+
+ if module.params.get('id'):
+ filters['client-token'] = module.params['id']
+ # Check that our instances are not in the state we want to take
+
+ # Check (and eventually change) instances attributes and instances state
+ existing_instances_array = []
+ for res in ec2.get_all_instances(instance_ids, filters=filters):
+ for inst in res.instances:
+
+ warn_if_public_ip_assignment_changed(module, inst)
+
+ changed = (check_source_dest_attr(module, inst, ec2) or
+ check_termination_protection(module, inst) or changed)
+
+ # Check security groups and if we're using ec2-vpc; ec2-classic security groups may not be modified
+ if inst.vpc_id and group_name:
+ grp_details = ec2.get_all_security_groups(filters={'vpc_id': inst.vpc_id})
+ if isinstance(group_name, string_types):
+ group_name = [group_name]
+ unmatched = set(group_name) - set(to_text(grp.name) for grp in grp_details)
+ if unmatched:
+ module.fail_json(msg="The following group names are not valid: %s" % ', '.join(unmatched))
+ group_ids = [to_text(grp.id) for grp in grp_details if to_text(grp.name) in group_name]
+ elif inst.vpc_id and group_id:
+ if isinstance(group_id, string_types):
+ group_id = [group_id]
+ grp_details = ec2.get_all_security_groups(group_ids=group_id)
+ group_ids = [grp_item.id for grp_item in grp_details]
+ if inst.vpc_id and (group_name or group_id):
+ if set(sg.id for sg in inst.groups) != set(group_ids):
+ changed = inst.modify_attribute('groupSet', group_ids)
+
+ # Check instance state
+ if inst.state != state:
+ instance_dict_array.append(get_instance_info(inst))
+ try:
+ if state == 'running':
+ inst.start()
+ else:
+ inst.stop()
+ except EC2ResponseError as e:
+ module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))
+ changed = True
+ existing_instances_array.append(inst.id)
+
+ instance_ids = list(set(existing_instances_array + (instance_ids or [])))
+ # Wait for all the instances to finish starting or stopping
+ wait_timeout = time.time() + wait_timeout
+ while wait and wait_timeout > time.time():
+ instance_dict_array = []
+ matched_instances = []
+ for res in ec2.get_all_instances(instance_ids):
+ for i in res.instances:
+ if i.state == state:
+ instance_dict_array.append(get_instance_info(i))
+ matched_instances.append(i)
+ if len(matched_instances) < len(instance_ids):
+ time.sleep(5)
+ else:
+ break
+
+ if wait and wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="wait for instances running timeout on %s" % time.asctime())
+
+ return (changed, instance_dict_array, instance_ids)
+
+
+def restart_instances(module, ec2, instance_ids, state, instance_tags):
+ """
+ Restarts a list of existing instances
+
+ module: Ansible module object
+ ec2: authenticated ec2 connection object
+ instance_ids: The list of instances to start in the form of
+ [ {id: <inst-id>}, ..]
+ instance_tags: A dict of tag keys and values in the form of
+ {key: value, ... }
+ state: Intended state ("restarted")
+
+ Returns a dictionary of instance information
+ about the instances.
+
+ If the instance was not able to change state,
+ "changed" will be set to False.
+
+ Wait will not apply here as this is a OS level operation.
+
+ Note that if instance_ids and instance_tags are both non-empty,
+ this method will process the intersection of the two.
+ """
+
+ changed = False
+ instance_dict_array = []
+
+ if not isinstance(instance_ids, list) or len(instance_ids) < 1:
+ # Fail unless the user defined instance tags
+ if not instance_tags:
+ module.fail_json(msg='instance_ids should be a list of instances, aborting')
+
+ # To make an EC2 tag filter, we need to prepend 'tag:' to each key.
+ # An empty filter does no filtering, so it's safe to pass it to the
+ # get_all_instances method even if the user did not specify instance_tags
+ filters = {}
+ if instance_tags:
+ for key, value in instance_tags.items():
+ filters["tag:" + key] = value
+ if module.params.get('id'):
+ filters['client-token'] = module.params['id']
+
+ # Check that our instances are not in the state we want to take
+
+ # Check (and eventually change) instances attributes and instances state
+ for res in ec2.get_all_instances(instance_ids, filters=filters):
+ for inst in res.instances:
+
+ warn_if_public_ip_assignment_changed(module, inst)
+
+ changed = (check_source_dest_attr(module, inst, ec2) or
+ check_termination_protection(module, inst) or changed)
+
+ # Check instance state
+ if inst.state != state:
+ instance_dict_array.append(get_instance_info(inst))
+ try:
+ inst.reboot()
+ except EC2ResponseError as e:
+ module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))
+ changed = True
+
+ return (changed, instance_dict_array, instance_ids)
+
+
+def check_termination_protection(module, inst):
+ """
+ Check the instance disableApiTermination attribute.
+
+ module: Ansible module object
+ inst: EC2 instance object
+
+ returns: True if state changed None otherwise
+ """
+
+ termination_protection = module.params.get('termination_protection')
+
+ if (inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection and termination_protection is not None):
+ inst.modify_attribute('disableApiTermination', termination_protection)
+ return True
+
+
+def check_source_dest_attr(module, inst, ec2):
+ """
+ Check the instance sourceDestCheck attribute.
+
+ module: Ansible module object
+ inst: EC2 instance object
+
+ returns: True if state changed None otherwise
+ """
+
+ source_dest_check = module.params.get('source_dest_check')
+
+ if source_dest_check is not None:
+ try:
+ if inst.vpc_id is not None and inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check:
+ inst.modify_attribute('sourceDestCheck', source_dest_check)
+ return True
+ except boto.exception.EC2ResponseError as exc:
+ # instances with more than one Elastic Network Interface will
+ # fail, because they have the sourceDestCheck attribute defined
+ # per-interface
+ if exc.code == 'InvalidInstanceID':
+ for interface in inst.interfaces:
+ if interface.source_dest_check != source_dest_check:
+ ec2.modify_network_interface_attribute(interface.id, "sourceDestCheck", source_dest_check)
+ return True
+ else:
+ module.fail_json(msg='Failed to handle source_dest_check state for instance {0}, error: {1}'.format(inst.id, exc),
+ exception=traceback.format_exc())
+
+
+def warn_if_public_ip_assignment_changed(module, instance):
+ # This is a non-modifiable attribute.
+ assign_public_ip = module.params.get('assign_public_ip')
+
+ # Check that public ip assignment is the same and warn if not
+ public_dns_name = getattr(instance, 'public_dns_name', None)
+ if (assign_public_ip or public_dns_name) and (not public_dns_name or assign_public_ip is False):
+ module.warn("Unable to modify public ip assignment to {0} for instance {1}. "
+ "Whether or not to assign a public IP is determined during instance creation.".format(assign_public_ip, instance.id))
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ key_name=dict(aliases=['keypair']),
+ id=dict(),
+ group=dict(type='list', aliases=['groups']),
+ group_id=dict(type='list'),
+ zone=dict(aliases=['aws_zone', 'ec2_zone']),
+ instance_type=dict(aliases=['type']),
+ spot_price=dict(),
+ spot_type=dict(default='one-time', choices=["one-time", "persistent"]),
+ spot_launch_group=dict(),
+ image=dict(),
+ kernel=dict(),
+ count=dict(type='int', default='1'),
+ monitoring=dict(type='bool', default=False),
+ ramdisk=dict(),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300),
+ spot_wait_timeout=dict(type='int', default=600),
+ placement_group=dict(),
+ user_data=dict(),
+ instance_tags=dict(type='dict'),
+ vpc_subnet_id=dict(),
+ assign_public_ip=dict(type='bool'),
+ private_ip=dict(),
+ instance_profile_name=dict(),
+ instance_ids=dict(type='list', aliases=['instance_id']),
+ source_dest_check=dict(type='bool', default=None),
+ termination_protection=dict(type='bool', default=None),
+ state=dict(default='present', choices=['present', 'absent', 'running', 'restarted', 'stopped']),
+ instance_initiated_shutdown_behavior=dict(default='stop', choices=['stop', 'terminate']),
+ exact_count=dict(type='int', default=None),
+ count_tag=dict(type='raw'),
+ volumes=dict(type='list'),
+ ebs_optimized=dict(type='bool', default=False),
+ tenancy=dict(default='default', choices=['default', 'dedicated']),
+ network_interfaces=dict(type='list', aliases=['network_interface'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ # Can be uncommented when we finish the deprecation cycle.
+ # ['group', 'group_id'],
+ ['exact_count', 'count'],
+ ['exact_count', 'state'],
+ ['exact_count', 'instance_ids'],
+ ['network_interfaces', 'assign_public_ip'],
+ ['network_interfaces', 'group'],
+ ['network_interfaces', 'group_id'],
+ ['network_interfaces', 'private_ip'],
+ ['network_interfaces', 'vpc_subnet_id'],
+ ],
+ )
+
+ if module.params.get('group') and module.params.get('group_id'):
+ module.deprecate(
+ msg='Support for passing both group and group_id has been deprecated. '
+ 'Currently group_id is ignored, in future passing both will result in an error',
+ version='2.14', collection_name='ansible.builtin')
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ try:
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
+ if module.params.get('region') or not module.params.get('ec2_url'):
+ ec2 = ec2_connect(module)
+ elif module.params.get('ec2_url'):
+ ec2 = connect_ec2_endpoint(ec2_url, **aws_connect_kwargs)
+
+ if 'region' not in aws_connect_kwargs:
+ aws_connect_kwargs['region'] = ec2.region
+
+ vpc = connect_vpc(**aws_connect_kwargs)
+ except boto.exception.NoAuthHandlerFound as e:
+ module.fail_json(msg="Failed to get connection: %s" % e.message, exception=traceback.format_exc())
+
+ tagged_instances = []
+
+ state = module.params['state']
+
+ if state == 'absent':
+ instance_ids = module.params['instance_ids']
+ if not instance_ids:
+ module.fail_json(msg='instance_ids list is required for absent state')
+
+ (changed, instance_dict_array, new_instance_ids) = terminate_instances(module, ec2, instance_ids)
+
+ elif state in ('running', 'stopped'):
+ instance_ids = module.params.get('instance_ids')
+ instance_tags = module.params.get('instance_tags')
+ if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)):
+ module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids)
+
+ (changed, instance_dict_array, new_instance_ids) = startstop_instances(module, ec2, instance_ids, state, instance_tags)
+
+ elif state in ('restarted'):
+ instance_ids = module.params.get('instance_ids')
+ instance_tags = module.params.get('instance_tags')
+ if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)):
+ module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids)
+
+ (changed, instance_dict_array, new_instance_ids) = restart_instances(module, ec2, instance_ids, state, instance_tags)
+
+ elif state == 'present':
+ # Changed is always set to true when provisioning new instances
+ if not module.params.get('image'):
+ module.fail_json(msg='image parameter is required for new instance')
+
+ if module.params.get('exact_count') is None:
+ (instance_dict_array, new_instance_ids, changed) = create_instances(module, ec2, vpc)
+ else:
+ (tagged_instances, instance_dict_array, new_instance_ids, changed) = enforce_count(module, ec2, vpc)
+
+ # Always return instances in the same order
+ if new_instance_ids:
+ new_instance_ids.sort()
+ if instance_dict_array:
+ instance_dict_array.sort(key=lambda x: x['id'])
+ if tagged_instances:
+ tagged_instances.sort(key=lambda x: x['id'])
+
+ module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array, tagged_instances=tagged_instances)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/ec2_ami_info.py b/test/support/integration/plugins/modules/ec2_ami_info.py
new file mode 100644
index 00000000..53c2374d
--- /dev/null
+++ b/test/support/integration/plugins/modules/ec2_ami_info.py
@@ -0,0 +1,282 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: ec2_ami_info
+version_added: '2.5'
+short_description: Gather information about ec2 AMIs
+description:
+ - Gather information about ec2 AMIs
+ - This module was called C(ec2_ami_facts) before Ansible 2.9. The usage did not change.
+author:
+ - Prasad Katti (@prasadkatti)
+requirements: [ boto3 ]
+options:
+ image_ids:
+ description: One or more image IDs.
+ aliases: [image_id]
+ type: list
+ elements: str
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html) for possible filters.
+ - Filter names and values are case sensitive.
+ type: dict
+ owners:
+ description:
+ - Filter the images by the owner. Valid options are an AWS account ID, self,
+ or an AWS owner alias ( amazon | aws-marketplace | microsoft ).
+ aliases: [owner]
+ type: list
+ elements: str
+ executable_users:
+ description:
+ - Filter images by users with explicit launch permissions. Valid options are an AWS account ID, self, or all (public AMIs).
+ aliases: [executable_user]
+ type: list
+ elements: str
+ describe_image_attributes:
+ description:
+ - Describe attributes (like launchPermission) of the images found.
+ default: no
+ type: bool
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: gather information about an AMI using ami-id
+ ec2_ami_info:
+ image_ids: ami-5b488823
+
+- name: gather information about all AMIs with tag key Name and value webapp
+ ec2_ami_info:
+ filters:
+ "tag:Name": webapp
+
+- name: gather information about an AMI with 'AMI Name' equal to foobar
+ ec2_ami_info:
+ filters:
+ name: foobar
+
+- name: gather information about Ubuntu 17.04 AMIs published by Canonical (099720109477)
+ ec2_ami_info:
+ owners: 099720109477
+ filters:
+ name: "ubuntu/images/ubuntu-zesty-17.04-*"
+'''
+
+RETURN = '''
+images:
+ description: A list of images.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ architecture:
+ description: The architecture of the image.
+ returned: always
+ type: str
+ sample: x86_64
+ block_device_mappings:
+ description: Any block device mapping entries.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ device_name:
+ description: The device name exposed to the instance.
+ returned: always
+ type: str
+ sample: /dev/sda1
+ ebs:
+ description: EBS volumes
+ returned: always
+ type: complex
+ creation_date:
+ description: The date and time the image was created.
+ returned: always
+ type: str
+ sample: '2017-10-16T19:22:13.000Z'
+ description:
+ description: The description of the AMI.
+ returned: always
+ type: str
+ sample: ''
+ ena_support:
+ description: Whether enhanced networking with ENA is enabled.
+ returned: always
+ type: bool
+ sample: true
+ hypervisor:
+ description: The hypervisor type of the image.
+ returned: always
+ type: str
+ sample: xen
+ image_id:
+ description: The ID of the AMI.
+ returned: always
+ type: str
+ sample: ami-5b466623
+ image_location:
+ description: The location of the AMI.
+ returned: always
+ type: str
+ sample: 408466080000/Webapp
+ image_type:
+ description: The type of image.
+ returned: always
+ type: str
+ sample: machine
+ launch_permissions:
+ description: A List of AWS accounts may launch the AMI.
+ returned: When image is owned by calling account and I(describe_image_attributes) is yes.
+ type: list
+ elements: dict
+ contains:
+ group:
+ description: A value of 'all' means the AMI is public.
+ type: str
+ user_id:
+ description: An AWS account ID with permissions to launch the AMI.
+ type: str
+ sample: [{"group": "all"}, {"user_id": "408466080000"}]
+ name:
+ description: The name of the AMI that was provided during image creation.
+ returned: always
+ type: str
+ sample: Webapp
+ owner_id:
+ description: The AWS account ID of the image owner.
+ returned: always
+ type: str
+ sample: '408466080000'
+ public:
+ description: Whether the image has public launch permissions.
+ returned: always
+ type: bool
+ sample: true
+ root_device_name:
+ description: The device name of the root device.
+ returned: always
+ type: str
+ sample: /dev/sda1
+ root_device_type:
+ description: The type of root device used by the AMI.
+ returned: always
+ type: str
+ sample: ebs
+ sriov_net_support:
+ description: Whether enhanced networking is enabled.
+ returned: always
+ type: str
+ sample: simple
+ state:
+ description: The current state of the AMI.
+ returned: always
+ type: str
+ sample: available
+ tags:
+ description: Any tags assigned to the image.
+ returned: always
+ type: dict
+ virtualization_type:
+ description: The type of virtualization of the AMI.
+ returned: always
+ type: str
+ sample: hvm
+'''
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict
+
+
+def list_ec2_images(ec2_client, module):
+
+ image_ids = module.params.get("image_ids")
+ owners = module.params.get("owners")
+ executable_users = module.params.get("executable_users")
+ filters = module.params.get("filters")
+ owner_param = []
+
+ # describe_images is *very* slow if you pass the `Owners`
+ # param (unless it's self), for some reason.
+ # Converting the owners to filters and removing from the
+ # owners param greatly speeds things up.
+ # Implementation based on aioue's suggestion in #24886
+ for owner in owners:
+ if owner.isdigit():
+ if 'owner-id' not in filters:
+ filters['owner-id'] = list()
+ filters['owner-id'].append(owner)
+ elif owner == 'self':
+ # self not a valid owner-alias filter (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html)
+ owner_param.append(owner)
+ else:
+ if 'owner-alias' not in filters:
+ filters['owner-alias'] = list()
+ filters['owner-alias'].append(owner)
+
+ filters = ansible_dict_to_boto3_filter_list(filters)
+
+ try:
+ images = ec2_client.describe_images(ImageIds=image_ids, Filters=filters, Owners=owner_param, ExecutableUsers=executable_users)
+ images = [camel_dict_to_snake_dict(image) for image in images["Images"]]
+ except (ClientError, BotoCoreError) as err:
+ module.fail_json_aws(err, msg="error describing images")
+ for image in images:
+ try:
+ image['tags'] = boto3_tag_list_to_ansible_dict(image.get('tags', []))
+ if module.params.get("describe_image_attributes"):
+ launch_permissions = ec2_client.describe_image_attribute(Attribute='launchPermission', ImageId=image['image_id'])['LaunchPermissions']
+ image['launch_permissions'] = [camel_dict_to_snake_dict(perm) for perm in launch_permissions]
+ except (ClientError, BotoCoreError) as err:
+ # describing launch permissions of images owned by others is not permitted, but shouldn't cause failures
+ pass
+
+ images.sort(key=lambda e: e.get('creation_date', '')) # it may be possible that creation_date does not always exist
+ module.exit_json(images=images)
+
+
+def main():
+
+ argument_spec = dict(
+ image_ids=dict(default=[], type='list', aliases=['image_id']),
+ filters=dict(default={}, type='dict'),
+ owners=dict(default=[], type='list', aliases=['owner']),
+ executable_users=dict(default=[], type='list', aliases=['executable_user']),
+ describe_image_attributes=dict(default=False, type='bool')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._module._name == 'ec2_ami_facts':
+ module._module.deprecate("The 'ec2_ami_facts' module has been renamed to 'ec2_ami_info'",
+ version='2.13', collection_name='ansible.builtin')
+
+ ec2_client = module.client('ec2')
+
+ list_ec2_images(ec2_client, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/ec2_group.py b/test/support/integration/plugins/modules/ec2_group.py
new file mode 100644
index 00000000..bc416f66
--- /dev/null
+++ b/test/support/integration/plugins/modules/ec2_group.py
@@ -0,0 +1,1345 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = '''
+---
+module: ec2_group
+author: "Andrew de Quincey (@adq)"
+version_added: "1.3"
+requirements: [ boto3 ]
+short_description: maintain an ec2 VPC security group.
+description:
+ - Maintains ec2 security groups. This module has a dependency on python-boto >= 2.5.
+options:
+ name:
+ description:
+ - Name of the security group.
+ - One of and only one of I(name) or I(group_id) is required.
+ - Required if I(state=present).
+ required: false
+ type: str
+ group_id:
+ description:
+ - Id of group to delete (works only with absent).
+ - One of and only one of I(name) or I(group_id) is required.
+ required: false
+ version_added: "2.4"
+ type: str
+ description:
+ description:
+ - Description of the security group. Required when C(state) is C(present).
+ required: false
+ type: str
+ vpc_id:
+ description:
+ - ID of the VPC to create the group in.
+ required: false
+ type: str
+ rules:
+ description:
+ - List of firewall inbound rules to enforce in this group (see example). If none are supplied,
+ no inbound rules will be enabled. Rules list may include its own name in `group_name`.
+ This allows idempotent loopback additions (e.g. allow group to access itself).
+ Rule sources list support was added in version 2.4. This allows to define multiple sources per
+ source type as well as multiple source types per rule. Prior to 2.4 an individual source is allowed.
+ In version 2.5 support for rule descriptions was added.
+ required: false
+ type: list
+ elements: dict
+ suboptions:
+ cidr_ip:
+ type: str
+ description:
+ - The IPv4 CIDR range traffic is coming from.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ cidr_ipv6:
+ type: str
+ description:
+ - The IPv6 CIDR range traffic is coming from.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ ip_prefix:
+ type: str
+ description:
+ - The IP Prefix U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-prefix-lists.html)
+ that traffic is coming from.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ group_id:
+ type: str
+ description:
+ - The ID of the Security Group that traffic is coming from.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ group_name:
+ type: str
+ description:
+ - Name of the Security Group that traffic is coming from.
+ - If the Security Group doesn't exist a new Security Group will be
+ created with I(group_desc) as the description.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ group_desc:
+ type: str
+ description:
+ - If the I(group_name) is set and the Security Group doesn't exist a new Security Group will be
+ created with I(group_desc) as the description.
+ proto:
+ type: str
+ description:
+ - The IP protocol name (C(tcp), C(udp), C(icmp), C(icmpv6)) or number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers))
+ from_port:
+ type: int
+ description: The start of the range of ports that traffic is coming from. A value of C(-1) indicates all ports.
+ to_port:
+ type: int
+ description: The end of the range of ports that traffic is coming from. A value of C(-1) indicates all ports.
+ rule_desc:
+ type: str
+ description: A description for the rule.
+ rules_egress:
+ description:
+ - List of firewall outbound rules to enforce in this group (see example). If none are supplied,
+ a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled.
+ Rule Egress sources list support was added in version 2.4. In version 2.5 support for rule descriptions
+ was added.
+ required: false
+ version_added: "1.6"
+ type: list
+ elements: dict
+ suboptions:
+ cidr_ip:
+ type: str
+ description:
+ - The IPv4 CIDR range traffic is going to.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ cidr_ipv6:
+ type: str
+ description:
+ - The IPv6 CIDR range traffic is going to.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ ip_prefix:
+ type: str
+ description:
+ - The IP Prefix U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-prefix-lists.html)
+ that traffic is going to.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ group_id:
+ type: str
+ description:
+ - The ID of the Security Group that traffic is going to.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ group_name:
+ type: str
+ description:
+ - Name of the Security Group that traffic is going to.
+ - If the Security Group doesn't exist a new Security Group will be
+ created with I(group_desc) as the description.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ group_desc:
+ type: str
+ description:
+ - If the I(group_name) is set and the Security Group doesn't exist a new Security Group will be
+ created with I(group_desc) as the description.
+ proto:
+ type: str
+ description:
+ - The IP protocol name (C(tcp), C(udp), C(icmp), C(icmpv6)) or number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers))
+ from_port:
+ type: int
+ description: The start of the range of ports that traffic is going to. A value of C(-1) indicates all ports.
+ to_port:
+ type: int
+ description: The end of the range of ports that traffic is going to. A value of C(-1) indicates all ports.
+ rule_desc:
+ type: str
+ description: A description for the rule.
+ state:
+ version_added: "1.4"
+ description:
+ - Create or delete a security group.
+ required: false
+ default: 'present'
+ choices: [ "present", "absent" ]
+ aliases: []
+ type: str
+ purge_rules:
+ version_added: "1.8"
+ description:
+ - Purge existing rules on security group that are not found in rules.
+ required: false
+ default: 'true'
+ aliases: []
+ type: bool
+ purge_rules_egress:
+ version_added: "1.8"
+ description:
+ - Purge existing rules_egress on security group that are not found in rules_egress.
+ required: false
+ default: 'true'
+ aliases: []
+ type: bool
+ tags:
+ version_added: "2.4"
+ description:
+ - A dictionary of one or more tags to assign to the security group.
+ required: false
+ type: dict
+ aliases: ['resource_tags']
+ purge_tags:
+ version_added: "2.4"
+ description:
+ - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter is not set then
+ tags will not be modified.
+ required: false
+ default: yes
+ type: bool
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+
+notes:
+ - If a rule declares a group_name and that group doesn't exist, it will be
+ automatically created. In that case, group_desc should be provided as well.
+ The module will refuse to create a depended-on group without a description.
+ - Preview diff mode support is added in version 2.7.
+'''
+
+EXAMPLES = '''
+- name: example using security group rule descriptions
+ ec2_group:
+ name: "{{ name }}"
+ description: sg with rule descriptions
+ vpc_id: vpc-xxxxxxxx
+ profile: "{{ aws_profile }}"
+ region: us-east-1
+ rules:
+ - proto: tcp
+ ports:
+ - 80
+ cidr_ip: 0.0.0.0/0
+ rule_desc: allow all on port 80
+
+- name: example ec2 group
+ ec2_group:
+ name: example
+ description: an example EC2 group
+ vpc_id: 12345
+ region: eu-west-1
+ aws_secret_key: SECRET
+ aws_access_key: ACCESS
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 10.0.0.0/8
+ - proto: tcp
+ from_port: 443
+ to_port: 443
+ # this should only be needed for EC2 Classic security group rules
+ # because in a VPC an ELB will use a user-account security group
+ group_id: amazon-elb/sg-87654321/amazon-elb-sg
+ - proto: tcp
+ from_port: 3306
+ to_port: 3306
+ group_id: 123412341234/sg-87654321/exact-name-of-sg
+ - proto: udp
+ from_port: 10050
+ to_port: 10050
+ cidr_ip: 10.0.0.0/8
+ - proto: udp
+ from_port: 10051
+ to_port: 10051
+ group_id: sg-12345678
+ - proto: icmp
+ from_port: 8 # icmp type, -1 = any type
+ to_port: -1 # icmp subtype, -1 = any subtype
+ cidr_ip: 10.0.0.0/8
+ - proto: all
+ # the containing group name may be specified here
+ group_name: example
+ - proto: all
+ # in the 'proto' attribute, if you specify -1, all, or a protocol number other than tcp, udp, icmp, or 58 (ICMPv6),
+ # traffic on all ports is allowed, regardless of any ports you specify
+ from_port: 10050 # this value is ignored
+ to_port: 10050 # this value is ignored
+ cidr_ip: 10.0.0.0/8
+
+ rules_egress:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ cidr_ipv6: 64:ff9b::/96
+ group_name: example-other
+ # description to use if example-other needs to be created
+ group_desc: other example EC2 group
+
+- name: example2 ec2 group
+ ec2_group:
+ name: example2
+ description: an example2 EC2 group
+ vpc_id: 12345
+ region: eu-west-1
+ rules:
+ # 'ports' rule keyword was introduced in version 2.4. It accepts a single port value or a list of values including ranges (from_port-to_port).
+ - proto: tcp
+ ports: 22
+ group_name: example-vpn
+ - proto: tcp
+ ports:
+ - 80
+ - 443
+ - 8080-8099
+ cidr_ip: 0.0.0.0/0
+ # Rule sources list support was added in version 2.4. This allows to define multiple sources per source type as well as multiple source types per rule.
+ - proto: tcp
+ ports:
+ - 6379
+ - 26379
+ group_name:
+ - example-vpn
+ - example-redis
+ - proto: tcp
+ ports: 5665
+ group_name: example-vpn
+ cidr_ip:
+ - 172.16.1.0/24
+ - 172.16.17.0/24
+ cidr_ipv6:
+ - 2607:F8B0::/32
+ - 64:ff9b::/96
+ group_id:
+ - sg-edcd9784
+ diff: True
+
+- name: "Delete group by its id"
+ ec2_group:
+ region: eu-west-1
+ group_id: sg-33b4ee5b
+ state: absent
+'''
+
+RETURN = '''
+group_name:
+ description: Security group name
+ sample: My Security Group
+ type: str
+ returned: on create/update
+group_id:
+ description: Security group id
+ sample: sg-abcd1234
+ type: str
+ returned: on create/update
+description:
+ description: Description of security group
+ sample: My Security Group
+ type: str
+ returned: on create/update
+tags:
+ description: Tags associated with the security group
+ sample:
+ Name: My Security Group
+ Purpose: protecting stuff
+ type: dict
+ returned: on create/update
+vpc_id:
+ description: ID of VPC to which the security group belongs
+ sample: vpc-abcd1234
+ type: str
+ returned: on create/update
+ip_permissions:
+ description: Inbound rules associated with the security group.
+ sample:
+ - from_port: 8182
+ ip_protocol: tcp
+ ip_ranges:
+ - cidr_ip: "1.1.1.1/32"
+ ipv6_ranges: []
+ prefix_list_ids: []
+ to_port: 8182
+ user_id_group_pairs: []
+ type: list
+ returned: on create/update
+ip_permissions_egress:
+ description: Outbound rules associated with the security group.
+ sample:
+ - ip_protocol: -1
+ ip_ranges:
+ - cidr_ip: "0.0.0.0/0"
+ ipv6_ranges: []
+ prefix_list_ids: []
+ user_id_group_pairs: []
+ type: list
+ returned: on create/update
+owner_id:
+ description: AWS Account ID of the security group
+ sample: 123456789012
+ type: int
+ returned: on create/update
+'''
+
+import json
+import re
+import itertools
+from copy import deepcopy
+from time import sleep
+from collections import namedtuple
+from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
+from ansible.module_utils.aws.iam import get_aws_account_id
+from ansible.module_utils.aws.waiters import get_waiter
+from ansible.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict, compare_aws_tags
+from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list
+from ansible.module_utils.common.network import to_ipv6_subnet, to_subnet
+from ansible.module_utils.compat.ipaddress import ip_network, IPv6Network
+from ansible.module_utils._text import to_text
+from ansible.module_utils.six import string_types
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+Rule = namedtuple('Rule', ['port_range', 'protocol', 'target', 'target_type', 'description'])
+valid_targets = set(['ipv4', 'ipv6', 'group', 'ip_prefix'])
+current_account_id = None
+
+
+def rule_cmp(a, b):
+ """Compare rules without descriptions"""
+ for prop in ['port_range', 'protocol', 'target', 'target_type']:
+ if prop == 'port_range' and to_text(a.protocol) == to_text(b.protocol):
+ # equal protocols can interchange `(-1, -1)` and `(None, None)`
+ if a.port_range in ((None, None), (-1, -1)) and b.port_range in ((None, None), (-1, -1)):
+ continue
+ elif getattr(a, prop) != getattr(b, prop):
+ return False
+ elif getattr(a, prop) != getattr(b, prop):
+ return False
+ return True
+
+
+def rules_to_permissions(rules):
+ return [to_permission(rule) for rule in rules]
+
+
+def to_permission(rule):
+ # take a Rule, output the serialized grant
+ perm = {
+ 'IpProtocol': rule.protocol,
+ }
+ perm['FromPort'], perm['ToPort'] = rule.port_range
+ if rule.target_type == 'ipv4':
+ perm['IpRanges'] = [{
+ 'CidrIp': rule.target,
+ }]
+ if rule.description:
+ perm['IpRanges'][0]['Description'] = rule.description
+ elif rule.target_type == 'ipv6':
+ perm['Ipv6Ranges'] = [{
+ 'CidrIpv6': rule.target,
+ }]
+ if rule.description:
+ perm['Ipv6Ranges'][0]['Description'] = rule.description
+ elif rule.target_type == 'group':
+ if isinstance(rule.target, tuple):
+ pair = {}
+ if rule.target[0]:
+ pair['UserId'] = rule.target[0]
+ # group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific
+ if rule.target[1]:
+ pair['GroupId'] = rule.target[1]
+ elif rule.target[2]:
+ pair['GroupName'] = rule.target[2]
+ perm['UserIdGroupPairs'] = [pair]
+ else:
+ perm['UserIdGroupPairs'] = [{
+ 'GroupId': rule.target
+ }]
+ if rule.description:
+ perm['UserIdGroupPairs'][0]['Description'] = rule.description
+ elif rule.target_type == 'ip_prefix':
+ perm['PrefixListIds'] = [{
+ 'PrefixListId': rule.target,
+ }]
+ if rule.description:
+ perm['PrefixListIds'][0]['Description'] = rule.description
+ elif rule.target_type not in valid_targets:
+ raise ValueError('Invalid target type for rule {0}'.format(rule))
+ return fix_port_and_protocol(perm)
+
+
+def rule_from_group_permission(perm):
+ def ports_from_permission(p):
+ if 'FromPort' not in p and 'ToPort' not in p:
+ return (None, None)
+ return (int(perm['FromPort']), int(perm['ToPort']))
+
+ # outputs a rule tuple
+ for target_key, target_subkey, target_type in [
+ ('IpRanges', 'CidrIp', 'ipv4'),
+ ('Ipv6Ranges', 'CidrIpv6', 'ipv6'),
+ ('PrefixListIds', 'PrefixListId', 'ip_prefix'),
+ ]:
+ if target_key not in perm:
+ continue
+ for r in perm[target_key]:
+ # there may be several IP ranges here, which is ok
+ yield Rule(
+ ports_from_permission(perm),
+ to_text(perm['IpProtocol']),
+ r[target_subkey],
+ target_type,
+ r.get('Description')
+ )
+ if 'UserIdGroupPairs' in perm and perm['UserIdGroupPairs']:
+ for pair in perm['UserIdGroupPairs']:
+ target = (
+ pair.get('UserId', None),
+ pair.get('GroupId', None),
+ pair.get('GroupName', None),
+ )
+ if pair.get('UserId', '').startswith('amazon-'):
+ # amazon-elb and amazon-prefix rules don't need
+ # group-id specified, so remove it when querying
+ # from permission
+ target = (
+ target[0],
+ None,
+ target[2],
+ )
+ elif 'VpcPeeringConnectionId' in pair or pair['UserId'] != current_account_id:
+ target = (
+ pair.get('UserId', None),
+ pair.get('GroupId', None),
+ pair.get('GroupName', None),
+ )
+
+ yield Rule(
+ ports_from_permission(perm),
+ to_text(perm['IpProtocol']),
+ target,
+ 'group',
+ pair.get('Description')
+ )
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0, catch_extra_error_codes=['InvalidGroup.NotFound'])
+def get_security_groups_with_backoff(connection, **kwargs):
+ return connection.describe_security_groups(**kwargs)
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def sg_exists_with_backoff(connection, **kwargs):
+ try:
+ return connection.describe_security_groups(**kwargs)
+ except is_boto3_error_code('InvalidGroup.NotFound'):
+ return {'SecurityGroups': []}
+
+
+def deduplicate_rules_args(rules):
+ """Returns unique rules"""
+ if rules is None:
+ return None
+ return list(dict(zip((json.dumps(r, sort_keys=True) for r in rules), rules)).values())
+
+
+def validate_rule(module, rule):
+ VALID_PARAMS = ('cidr_ip', 'cidr_ipv6', 'ip_prefix',
+ 'group_id', 'group_name', 'group_desc',
+ 'proto', 'from_port', 'to_port', 'rule_desc')
+ if not isinstance(rule, dict):
+ module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule))
+ for k in rule:
+ if k not in VALID_PARAMS:
+ module.fail_json(msg='Invalid rule parameter \'{0}\' for rule: {1}'.format(k, rule))
+
+ if 'group_id' in rule and 'cidr_ip' in rule:
+ module.fail_json(msg='Specify group_id OR cidr_ip, not both')
+ elif 'group_name' in rule and 'cidr_ip' in rule:
+ module.fail_json(msg='Specify group_name OR cidr_ip, not both')
+ elif 'group_id' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify group_id OR cidr_ipv6, not both")
+ elif 'group_name' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify group_name OR cidr_ipv6, not both")
+ elif 'cidr_ip' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both")
+ elif 'group_id' in rule and 'group_name' in rule:
+ module.fail_json(msg='Specify group_id OR group_name, not both')
+
+
+def get_target_from_rule(module, client, rule, name, group, groups, vpc_id):
+ """
+ Returns tuple of (target_type, target, group_created) after validating rule params.
+
+ rule: Dict describing a rule.
+ name: Name of the security group being managed.
+ groups: Dict of all available security groups.
+
+ AWS accepts an ip range or a security group as target of a rule. This
+ function validate the rule specification and return either a non-None
+ group_id or a non-None ip range.
+ """
+ FOREIGN_SECURITY_GROUP_REGEX = r'^([^/]+)/?(sg-\S+)?/(\S+)'
+ group_id = None
+ group_name = None
+ target_group_created = False
+
+ validate_rule(module, rule)
+ if rule.get('group_id') and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']):
+ # this is a foreign Security Group. Since you can't fetch it you must create an instance of it
+ owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups()
+ group_instance = dict(UserId=owner_id, GroupId=group_id, GroupName=group_name)
+ groups[group_id] = group_instance
+ groups[group_name] = group_instance
+ # group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific
+ if group_id and group_name:
+ group_name = None
+ return 'group', (owner_id, group_id, group_name), False
+ elif 'group_id' in rule:
+ return 'group', rule['group_id'], False
+ elif 'group_name' in rule:
+ group_name = rule['group_name']
+ if group_name == name:
+ group_id = group['GroupId']
+ groups[group_id] = group
+ groups[group_name] = group
+ elif group_name in groups and group.get('VpcId') and groups[group_name].get('VpcId'):
+ # both are VPC groups, this is ok
+ group_id = groups[group_name]['GroupId']
+ elif group_name in groups and not (group.get('VpcId') or groups[group_name].get('VpcId')):
+ # both are EC2 classic, this is ok
+ group_id = groups[group_name]['GroupId']
+ else:
+ auto_group = None
+ filters = {'group-name': group_name}
+ if vpc_id:
+ filters['vpc-id'] = vpc_id
+ # if we got here, either the target group does not exist, or there
+ # is a mix of EC2 classic + VPC groups. Mixing of EC2 classic + VPC
+ # is bad, so we have to create a new SG because no compatible group
+ # exists
+ if not rule.get('group_desc', '').strip():
+ # retry describing the group once
+ try:
+ auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0]
+ except (is_boto3_error_code('InvalidGroup.NotFound'), IndexError):
+ module.fail_json(msg="group %s will be automatically created by rule %s but "
+ "no description was provided" % (group_name, rule))
+ except ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e)
+ elif not module.check_mode:
+ params = dict(GroupName=group_name, Description=rule['group_desc'])
+ if vpc_id:
+ params['VpcId'] = vpc_id
+ try:
+ auto_group = client.create_security_group(**params)
+ get_waiter(
+ client, 'security_group_exists',
+ ).wait(
+ GroupIds=[auto_group['GroupId']],
+ )
+ except is_boto3_error_code('InvalidGroup.Duplicate'):
+ # The group exists, but didn't show up in any of our describe-security-groups calls
+ # Try searching on a filter for the name, and allow a retry window for AWS to update
+ # the model on their end.
+ try:
+ auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0]
+ except IndexError as e:
+ module.fail_json(msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name))
+ except ClientError as e:
+ module.fail_json_aws(
+ e,
+ msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name))
+ if auto_group is not None:
+ group_id = auto_group['GroupId']
+ groups[group_id] = auto_group
+ groups[group_name] = auto_group
+ target_group_created = True
+ return 'group', group_id, target_group_created
+ elif 'cidr_ip' in rule:
+ return 'ipv4', validate_ip(module, rule['cidr_ip']), False
+ elif 'cidr_ipv6' in rule:
+ return 'ipv6', validate_ip(module, rule['cidr_ipv6']), False
+ elif 'ip_prefix' in rule:
+ return 'ip_prefix', rule['ip_prefix'], False
+
+ module.fail_json(msg="Could not match target for rule {0}".format(rule), failed_rule=rule)
+
+
+def ports_expand(ports):
+ # takes a list of ports and returns a list of (port_from, port_to)
+ ports_expanded = []
+ for port in ports:
+ if not isinstance(port, string_types):
+ ports_expanded.append((port,) * 2)
+ elif '-' in port:
+ ports_expanded.append(tuple(int(p.strip()) for p in port.split('-', 1)))
+ else:
+ ports_expanded.append((int(port.strip()),) * 2)
+
+ return ports_expanded
+
+
+def rule_expand_ports(rule):
+ # takes a rule dict and returns a list of expanded rule dicts
+ if 'ports' not in rule:
+ if isinstance(rule.get('from_port'), string_types):
+ rule['from_port'] = int(rule.get('from_port'))
+ if isinstance(rule.get('to_port'), string_types):
+ rule['to_port'] = int(rule.get('to_port'))
+ return [rule]
+
+ ports = rule['ports'] if isinstance(rule['ports'], list) else [rule['ports']]
+
+ rule_expanded = []
+ for from_to in ports_expand(ports):
+ temp_rule = rule.copy()
+ del temp_rule['ports']
+ temp_rule['from_port'], temp_rule['to_port'] = sorted(from_to)
+ rule_expanded.append(temp_rule)
+
+ return rule_expanded
+
+
+def rules_expand_ports(rules):
+ # takes a list of rules and expands it based on 'ports'
+ if not rules:
+ return rules
+
+ return [rule for rule_complex in rules
+ for rule in rule_expand_ports(rule_complex)]
+
+
+def rule_expand_source(rule, source_type):
+ # takes a rule dict and returns a list of expanded rule dicts for specified source_type
+ sources = rule[source_type] if isinstance(rule[source_type], list) else [rule[source_type]]
+ source_types_all = ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix')
+
+ rule_expanded = []
+ for source in sources:
+ temp_rule = rule.copy()
+ for s in source_types_all:
+ temp_rule.pop(s, None)
+ temp_rule[source_type] = source
+ rule_expanded.append(temp_rule)
+
+ return rule_expanded
+
+
+def rule_expand_sources(rule):
+ # takes a rule dict and returns a list of expanded rule discts
+ source_types = (stype for stype in ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix') if stype in rule)
+
+ return [r for stype in source_types
+ for r in rule_expand_source(rule, stype)]
+
+
+def rules_expand_sources(rules):
+ # takes a list of rules and expands it based on 'cidr_ip', 'group_id', 'group_name'
+ if not rules:
+ return rules
+
+ return [rule for rule_complex in rules
+ for rule in rule_expand_sources(rule_complex)]
+
+
+def update_rules_description(module, client, rule_type, group_id, ip_permissions):
+ if module.check_mode:
+ return
+ try:
+ if rule_type == "in":
+ client.update_security_group_rule_descriptions_ingress(GroupId=group_id, IpPermissions=ip_permissions)
+ if rule_type == "out":
+ client.update_security_group_rule_descriptions_egress(GroupId=group_id, IpPermissions=ip_permissions)
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to update rule description for group %s" % group_id)
+
+
+def fix_port_and_protocol(permission):
+ for key in ('FromPort', 'ToPort'):
+ if key in permission:
+ if permission[key] is None:
+ del permission[key]
+ else:
+ permission[key] = int(permission[key])
+
+ permission['IpProtocol'] = to_text(permission['IpProtocol'])
+
+ return permission
+
+
+def remove_old_permissions(client, module, revoke_ingress, revoke_egress, group_id):
+ if revoke_ingress:
+ revoke(client, module, revoke_ingress, group_id, 'in')
+ if revoke_egress:
+ revoke(client, module, revoke_egress, group_id, 'out')
+ return bool(revoke_ingress or revoke_egress)
+
+
+def revoke(client, module, ip_permissions, group_id, rule_type):
+ if not module.check_mode:
+ try:
+ if rule_type == 'in':
+ client.revoke_security_group_ingress(GroupId=group_id, IpPermissions=ip_permissions)
+ elif rule_type == 'out':
+ client.revoke_security_group_egress(GroupId=group_id, IpPermissions=ip_permissions)
+ except (BotoCoreError, ClientError) as e:
+ rules = 'ingress rules' if rule_type == 'in' else 'egress rules'
+ module.fail_json_aws(e, "Unable to revoke {0}: {1}".format(rules, ip_permissions))
+
+
+def add_new_permissions(client, module, new_ingress, new_egress, group_id):
+ if new_ingress:
+ authorize(client, module, new_ingress, group_id, 'in')
+ if new_egress:
+ authorize(client, module, new_egress, group_id, 'out')
+ return bool(new_ingress or new_egress)
+
+
+def authorize(client, module, ip_permissions, group_id, rule_type):
+ if not module.check_mode:
+ try:
+ if rule_type == 'in':
+ client.authorize_security_group_ingress(GroupId=group_id, IpPermissions=ip_permissions)
+ elif rule_type == 'out':
+ client.authorize_security_group_egress(GroupId=group_id, IpPermissions=ip_permissions)
+ except (BotoCoreError, ClientError) as e:
+ rules = 'ingress rules' if rule_type == 'in' else 'egress rules'
+ module.fail_json_aws(e, "Unable to authorize {0}: {1}".format(rules, ip_permissions))
+
+
+def validate_ip(module, cidr_ip):
+ split_addr = cidr_ip.split('/')
+ if len(split_addr) == 2:
+ # this_ip is a IPv4 or IPv6 CIDR that may or may not have host bits set
+ # Get the network bits if IPv4, and validate if IPv6.
+ try:
+ ip = to_subnet(split_addr[0], split_addr[1])
+ if ip != cidr_ip:
+ module.warn("One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, "
+ "check the network mask and make sure that only network bits are set: {1}.".format(
+ cidr_ip, ip))
+ except ValueError:
+ # to_subnet throws a ValueError on IPv6 networks, so we should be working with v6 if we get here
+ try:
+ isinstance(ip_network(to_text(cidr_ip)), IPv6Network)
+ ip = cidr_ip
+ except ValueError:
+ # If a host bit is set on something other than a /128, IPv6Network will throw a ValueError
+ # The ipv6_cidr in this case probably looks like "2001:DB8:A0B:12F0::1/64" and we just want the network bits
+ ip6 = to_ipv6_subnet(split_addr[0]) + "/" + split_addr[1]
+ if ip6 != cidr_ip:
+ module.warn("One of your IPv6 CIDR addresses ({0}) has host bits set. To get rid of this warning, "
+ "check the network mask and make sure that only network bits are set: {1}.".format(cidr_ip, ip6))
+ return ip6
+ return ip
+ return cidr_ip
+
+
+def update_tags(client, module, group_id, current_tags, tags, purge_tags):
+ tags_need_modify, tags_to_delete = compare_aws_tags(current_tags, tags, purge_tags)
+
+ if not module.check_mode:
+ if tags_to_delete:
+ try:
+ client.delete_tags(Resources=[group_id], Tags=[{'Key': tag} for tag in tags_to_delete])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to delete tags {0}".format(tags_to_delete))
+
+ # Add/update tags
+ if tags_need_modify:
+ try:
+ client.create_tags(Resources=[group_id], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json(e, msg="Unable to add tags {0}".format(tags_need_modify))
+
+ return bool(tags_need_modify or tags_to_delete)
+
+
+def update_rule_descriptions(module, group_id, present_ingress, named_tuple_ingress_list, present_egress, named_tuple_egress_list):
+ changed = False
+ client = module.client('ec2')
+ ingress_needs_desc_update = []
+ egress_needs_desc_update = []
+
+ for present_rule in present_egress:
+ needs_update = [r for r in named_tuple_egress_list if rule_cmp(r, present_rule) and r.description != present_rule.description]
+ for r in needs_update:
+ named_tuple_egress_list.remove(r)
+ egress_needs_desc_update.extend(needs_update)
+ for present_rule in present_ingress:
+ needs_update = [r for r in named_tuple_ingress_list if rule_cmp(r, present_rule) and r.description != present_rule.description]
+ for r in needs_update:
+ named_tuple_ingress_list.remove(r)
+ ingress_needs_desc_update.extend(needs_update)
+
+ if ingress_needs_desc_update:
+ update_rules_description(module, client, 'in', group_id, rules_to_permissions(ingress_needs_desc_update))
+ changed |= True
+ if egress_needs_desc_update:
+ update_rules_description(module, client, 'out', group_id, rules_to_permissions(egress_needs_desc_update))
+ changed |= True
+ return changed
+
+
+def create_security_group(client, module, name, description, vpc_id):
+ if not module.check_mode:
+ params = dict(GroupName=name, Description=description)
+ if vpc_id:
+ params['VpcId'] = vpc_id
+ try:
+ group = client.create_security_group(**params)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to create security group")
+ # When a group is created, an egress_rule ALLOW ALL
+ # to 0.0.0.0/0 is added automatically but it's not
+ # reflected in the object returned by the AWS API
+ # call. We re-read the group for getting an updated object
+ # amazon sometimes takes a couple seconds to update the security group so wait till it exists
+ while True:
+ sleep(3)
+ group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
+ if group.get('VpcId') and not group.get('IpPermissionsEgress'):
+ pass
+ else:
+ break
+ return group
+ return None
+
+
+def wait_for_rule_propagation(module, group, desired_ingress, desired_egress, purge_ingress, purge_egress):
+ group_id = group['GroupId']
+ tries = 6
+
+ def await_rules(group, desired_rules, purge, rule_key):
+ for i in range(tries):
+ current_rules = set(sum([list(rule_from_group_permission(p)) for p in group[rule_key]], []))
+ if purge and len(current_rules ^ set(desired_rules)) == 0:
+ return group
+ elif purge:
+ conflicts = current_rules ^ set(desired_rules)
+ # For cases where set comparison is equivalent, but invalid port/proto exist
+ for a, b in itertools.combinations(conflicts, 2):
+ if rule_cmp(a, b):
+ conflicts.discard(a)
+ conflicts.discard(b)
+ if not len(conflicts):
+ return group
+ elif current_rules.issuperset(desired_rules) and not purge:
+ return group
+ sleep(10)
+ group = get_security_groups_with_backoff(module.client('ec2'), GroupIds=[group_id])['SecurityGroups'][0]
+ module.warn("Ran out of time waiting for {0} {1}. Current: {2}, Desired: {3}".format(group_id, rule_key, current_rules, desired_rules))
+ return group
+
+ group = get_security_groups_with_backoff(module.client('ec2'), GroupIds=[group_id])['SecurityGroups'][0]
+ if 'VpcId' in group and module.params.get('rules_egress') is not None:
+ group = await_rules(group, desired_egress, purge_egress, 'IpPermissionsEgress')
+ return await_rules(group, desired_ingress, purge_ingress, 'IpPermissions')
+
+
+def group_exists(client, module, vpc_id, group_id, name):
+ params = {'Filters': []}
+ if group_id:
+ params['GroupIds'] = [group_id]
+ if name:
+ # Add name to filters rather than params['GroupNames']
+ # because params['GroupNames'] only checks the default vpc if no vpc is provided
+ params['Filters'].append({'Name': 'group-name', 'Values': [name]})
+ if vpc_id:
+ params['Filters'].append({'Name': 'vpc-id', 'Values': [vpc_id]})
+ # Don't filter by description to maintain backwards compatibility
+
+ try:
+ security_groups = sg_exists_with_backoff(client, **params).get('SecurityGroups', [])
+ all_groups = get_security_groups_with_backoff(client).get('SecurityGroups', [])
+ except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Error in describe_security_groups")
+
+ if security_groups:
+ groups = dict((group['GroupId'], group) for group in all_groups)
+ groups.update(dict((group['GroupName'], group) for group in all_groups))
+ if vpc_id:
+ vpc_wins = dict((group['GroupName'], group) for group in all_groups if group.get('VpcId') and group['VpcId'] == vpc_id)
+ groups.update(vpc_wins)
+ # maintain backwards compatibility by using the last matching group
+ return security_groups[-1], groups
+ return None, {}
+
+
+def verify_rules_with_descriptions_permitted(client, module, rules, rules_egress):
+ if not hasattr(client, "update_security_group_rule_descriptions_egress"):
+ all_rules = rules if rules else [] + rules_egress if rules_egress else []
+ if any('rule_desc' in rule for rule in all_rules):
+ module.fail_json(msg="Using rule descriptions requires botocore version >= 1.7.2.")
+
+
+def get_diff_final_resource(client, module, security_group):
+ def get_account_id(security_group, module):
+ try:
+ owner_id = security_group.get('owner_id', module.client('sts').get_caller_identity()['Account'])
+ except (BotoCoreError, ClientError) as e:
+ owner_id = "Unable to determine owner_id: {0}".format(to_text(e))
+ return owner_id
+
+ def get_final_tags(security_group_tags, specified_tags, purge_tags):
+ if specified_tags is None:
+ return security_group_tags
+ tags_need_modify, tags_to_delete = compare_aws_tags(security_group_tags, specified_tags, purge_tags)
+ end_result_tags = dict((k, v) for k, v in specified_tags.items() if k not in tags_to_delete)
+ end_result_tags.update(dict((k, v) for k, v in security_group_tags.items() if k not in tags_to_delete))
+ end_result_tags.update(tags_need_modify)
+ return end_result_tags
+
+ def get_final_rules(client, module, security_group_rules, specified_rules, purge_rules):
+ if specified_rules is None:
+ return security_group_rules
+ if purge_rules:
+ final_rules = []
+ else:
+ final_rules = list(security_group_rules)
+ specified_rules = flatten_nested_targets(module, deepcopy(specified_rules))
+ for rule in specified_rules:
+ format_rule = {
+ 'from_port': None, 'to_port': None, 'ip_protocol': rule.get('proto', 'tcp'),
+ 'ip_ranges': [], 'ipv6_ranges': [], 'prefix_list_ids': [], 'user_id_group_pairs': []
+ }
+ if rule.get('proto', 'tcp') in ('all', '-1', -1):
+ format_rule['ip_protocol'] = '-1'
+ format_rule.pop('from_port')
+ format_rule.pop('to_port')
+ elif rule.get('ports'):
+ if rule.get('ports') and (isinstance(rule['ports'], string_types) or isinstance(rule['ports'], int)):
+ rule['ports'] = [rule['ports']]
+ for port in rule.get('ports'):
+ if isinstance(port, string_types) and '-' in port:
+ format_rule['from_port'], format_rule['to_port'] = port.split('-')
+ else:
+ format_rule['from_port'] = format_rule['to_port'] = port
+ elif rule.get('from_port') or rule.get('to_port'):
+ format_rule['from_port'] = rule.get('from_port', rule.get('to_port'))
+ format_rule['to_port'] = rule.get('to_port', rule.get('from_port'))
+ for source_type in ('cidr_ip', 'cidr_ipv6', 'prefix_list_id'):
+ if rule.get(source_type):
+ rule_key = {'cidr_ip': 'ip_ranges', 'cidr_ipv6': 'ipv6_ranges', 'prefix_list_id': 'prefix_list_ids'}.get(source_type)
+ if rule.get('rule_desc'):
+ format_rule[rule_key] = [{source_type: rule[source_type], 'description': rule['rule_desc']}]
+ else:
+ if not isinstance(rule[source_type], list):
+ rule[source_type] = [rule[source_type]]
+ format_rule[rule_key] = [{source_type: target} for target in rule[source_type]]
+ if rule.get('group_id') or rule.get('group_name'):
+ rule_sg = camel_dict_to_snake_dict(group_exists(client, module, module.params['vpc_id'], rule.get('group_id'), rule.get('group_name'))[0])
+ format_rule['user_id_group_pairs'] = [{
+ 'description': rule_sg.get('description', rule_sg.get('group_desc')),
+ 'group_id': rule_sg.get('group_id', rule.get('group_id')),
+ 'group_name': rule_sg.get('group_name', rule.get('group_name')),
+ 'peering_status': rule_sg.get('peering_status'),
+ 'user_id': rule_sg.get('user_id', get_account_id(security_group, module)),
+ 'vpc_id': rule_sg.get('vpc_id', module.params['vpc_id']),
+ 'vpc_peering_connection_id': rule_sg.get('vpc_peering_connection_id')
+ }]
+ for k, v in list(format_rule['user_id_group_pairs'][0].items()):
+ if v is None:
+ format_rule['user_id_group_pairs'][0].pop(k)
+ final_rules.append(format_rule)
+ # Order final rules consistently
+ final_rules.sort(key=get_ip_permissions_sort_key)
+ return final_rules
+ security_group_ingress = security_group.get('ip_permissions', [])
+ specified_ingress = module.params['rules']
+ purge_ingress = module.params['purge_rules']
+ security_group_egress = security_group.get('ip_permissions_egress', [])
+ specified_egress = module.params['rules_egress']
+ purge_egress = module.params['purge_rules_egress']
+ return {
+ 'description': module.params['description'],
+ 'group_id': security_group.get('group_id', 'sg-xxxxxxxx'),
+ 'group_name': security_group.get('group_name', module.params['name']),
+ 'ip_permissions': get_final_rules(client, module, security_group_ingress, specified_ingress, purge_ingress),
+ 'ip_permissions_egress': get_final_rules(client, module, security_group_egress, specified_egress, purge_egress),
+ 'owner_id': get_account_id(security_group, module),
+ 'tags': get_final_tags(security_group.get('tags', {}), module.params['tags'], module.params['purge_tags']),
+ 'vpc_id': security_group.get('vpc_id', module.params['vpc_id'])}
+
+
+def flatten_nested_targets(module, rules):
+ def _flatten(targets):
+ for target in targets:
+ if isinstance(target, list):
+ for t in _flatten(target):
+ yield t
+ elif isinstance(target, string_types):
+ yield target
+
+ if rules is not None:
+ for rule in rules:
+ target_list_type = None
+ if isinstance(rule.get('cidr_ip'), list):
+ target_list_type = 'cidr_ip'
+ elif isinstance(rule.get('cidr_ipv6'), list):
+ target_list_type = 'cidr_ipv6'
+ if target_list_type is not None:
+ rule[target_list_type] = list(_flatten(rule[target_list_type]))
+ return rules
+
+
+def get_rule_sort_key(dicts):
+ if dicts.get('cidr_ip'):
+ return dicts.get('cidr_ip')
+ elif dicts.get('cidr_ipv6'):
+ return dicts.get('cidr_ipv6')
+ elif dicts.get('prefix_list_id'):
+ return dicts.get('prefix_list_id')
+ elif dicts.get('group_id'):
+ return dicts.get('group_id')
+ return None
+
+
+def get_ip_permissions_sort_key(rule):
+ if rule.get('ip_ranges'):
+ rule.get('ip_ranges').sort(key=get_rule_sort_key)
+ return rule.get('ip_ranges')[0]['cidr_ip']
+ elif rule.get('ipv6_ranges'):
+ rule.get('ipv6_ranges').sort(key=get_rule_sort_key)
+ return rule.get('ipv6_ranges')[0]['cidr_ipv6']
+ elif rule.get('prefix_list_ids'):
+ rule.get('prefix_list_ids').sort(key=get_rule_sort_key)
+ return rule.get('prefix_list_ids')[0]['prefix_list_id']
+ elif rule.get('user_id_group_pairs'):
+ rule.get('user_id_group_pairs').sort(key=get_rule_sort_key)
+ return rule.get('user_id_group_pairs')[0]['group_id']
+ return None
+
+
+def main():
+ argument_spec = dict(
+ name=dict(),
+ group_id=dict(),
+ description=dict(),
+ vpc_id=dict(),
+ rules=dict(type='list'),
+ rules_egress=dict(type='list'),
+ state=dict(default='present', type='str', choices=['present', 'absent']),
+ purge_rules=dict(default=True, required=False, type='bool'),
+ purge_rules_egress=dict(default=True, required=False, type='bool'),
+ tags=dict(required=False, type='dict', aliases=['resource_tags']),
+ purge_tags=dict(default=True, required=False, type='bool')
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[['name', 'group_id']],
+ required_if=[['state', 'present', ['name']]],
+ )
+
+ name = module.params['name']
+ group_id = module.params['group_id']
+ description = module.params['description']
+ vpc_id = module.params['vpc_id']
+ rules = flatten_nested_targets(module, deepcopy(module.params['rules']))
+ rules_egress = flatten_nested_targets(module, deepcopy(module.params['rules_egress']))
+ rules = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules)))
+ rules_egress = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules_egress)))
+ state = module.params.get('state')
+ purge_rules = module.params['purge_rules']
+ purge_rules_egress = module.params['purge_rules_egress']
+ tags = module.params['tags']
+ purge_tags = module.params['purge_tags']
+
+ if state == 'present' and not description:
+ module.fail_json(msg='Must provide description when state is present.')
+
+ changed = False
+ client = module.client('ec2')
+
+ verify_rules_with_descriptions_permitted(client, module, rules, rules_egress)
+ group, groups = group_exists(client, module, vpc_id, group_id, name)
+ group_created_new = not bool(group)
+
+ global current_account_id
+ current_account_id = get_aws_account_id(module)
+
+ before = {}
+ after = {}
+
+ # Ensure requested group is absent
+ if state == 'absent':
+ if group:
+ # found a match, delete it
+ before = camel_dict_to_snake_dict(group, ignore_list=['Tags'])
+ before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', []))
+ try:
+ if not module.check_mode:
+ client.delete_security_group(GroupId=group['GroupId'])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to delete security group '%s'" % group)
+ else:
+ group = None
+ changed = True
+ else:
+ # no match found, no changes required
+ pass
+
+ # Ensure requested group is present
+ elif state == 'present':
+ if group:
+ # existing group
+ before = camel_dict_to_snake_dict(group, ignore_list=['Tags'])
+ before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', []))
+ if group['Description'] != description:
+ module.warn("Group description does not match existing group. Descriptions cannot be changed without deleting "
+ "and re-creating the security group. Try using state=absent to delete, then rerunning this task.")
+ else:
+ # no match found, create it
+ group = create_security_group(client, module, name, description, vpc_id)
+ changed = True
+
+ if tags is not None and group is not None:
+ current_tags = boto3_tag_list_to_ansible_dict(group.get('Tags', []))
+ changed |= update_tags(client, module, group['GroupId'], current_tags, tags, purge_tags)
+
+ if group:
+ named_tuple_ingress_list = []
+ named_tuple_egress_list = []
+ current_ingress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissions']], [])
+ current_egress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissionsEgress']], [])
+
+ for new_rules, rule_type, named_tuple_rule_list in [(rules, 'in', named_tuple_ingress_list),
+ (rules_egress, 'out', named_tuple_egress_list)]:
+ if new_rules is None:
+ continue
+ for rule in new_rules:
+ target_type, target, target_group_created = get_target_from_rule(
+ module, client, rule, name, group, groups, vpc_id)
+ changed |= target_group_created
+
+ if rule.get('proto', 'tcp') in ('all', '-1', -1):
+ rule['proto'] = '-1'
+ rule['from_port'] = None
+ rule['to_port'] = None
+ try:
+ int(rule.get('proto', 'tcp'))
+ rule['proto'] = to_text(rule.get('proto', 'tcp'))
+ rule['from_port'] = None
+ rule['to_port'] = None
+ except ValueError:
+ # rule does not use numeric protocol spec
+ pass
+
+ named_tuple_rule_list.append(
+ Rule(
+ port_range=(rule['from_port'], rule['to_port']),
+ protocol=to_text(rule.get('proto', 'tcp')),
+ target=target, target_type=target_type,
+ description=rule.get('rule_desc'),
+ )
+ )
+
+ # List comprehensions for rules to add, rules to modify, and rule ids to determine purging
+ new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))]
+ new_egress_permissions = [to_permission(r) for r in (set(named_tuple_egress_list) - set(current_egress))]
+
+ if module.params.get('rules_egress') is None and 'VpcId' in group:
+ # when no egress rules are specified and we're in a VPC,
+ # we add in a default allow all out rule, which was the
+ # default behavior before egress rules were added
+ rule = Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None)
+ if rule in current_egress:
+ named_tuple_egress_list.append(rule)
+ if rule not in current_egress:
+ current_egress.append(rule)
+
+ # List comprehensions for rules to add, rules to modify, and rule ids to determine purging
+ present_ingress = list(set(named_tuple_ingress_list).union(set(current_ingress)))
+ present_egress = list(set(named_tuple_egress_list).union(set(current_egress)))
+
+ if purge_rules:
+ revoke_ingress = []
+ for p in present_ingress:
+ if not any([rule_cmp(p, b) for b in named_tuple_ingress_list]):
+ revoke_ingress.append(to_permission(p))
+ else:
+ revoke_ingress = []
+ if purge_rules_egress and module.params.get('rules_egress') is not None:
+ if module.params.get('rules_egress') is []:
+ revoke_egress = [
+ to_permission(r) for r in set(present_egress) - set(named_tuple_egress_list)
+ if r != Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None)
+ ]
+ else:
+ revoke_egress = []
+ for p in present_egress:
+ if not any([rule_cmp(p, b) for b in named_tuple_egress_list]):
+ revoke_egress.append(to_permission(p))
+ else:
+ revoke_egress = []
+
+ # named_tuple_ingress_list and named_tuple_egress_list got updated by
+ # method update_rule_descriptions, deep copy these two lists to new
+ # variables for the record of the 'desired' ingress and egress sg permissions
+ desired_ingress = deepcopy(named_tuple_ingress_list)
+ desired_egress = deepcopy(named_tuple_egress_list)
+
+ changed |= update_rule_descriptions(module, group['GroupId'], present_ingress, named_tuple_ingress_list, present_egress, named_tuple_egress_list)
+
+ # Revoke old rules
+ changed |= remove_old_permissions(client, module, revoke_ingress, revoke_egress, group['GroupId'])
+ rule_msg = 'Revoking {0}, and egress {1}'.format(revoke_ingress, revoke_egress)
+
+ new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))]
+ new_ingress_permissions = rules_to_permissions(set(named_tuple_ingress_list) - set(current_ingress))
+ new_egress_permissions = rules_to_permissions(set(named_tuple_egress_list) - set(current_egress))
+ # Authorize new rules
+ changed |= add_new_permissions(client, module, new_ingress_permissions, new_egress_permissions, group['GroupId'])
+
+ if group_created_new and module.params.get('rules') is None and module.params.get('rules_egress') is None:
+ # A new group with no rules provided is already being awaited.
+ # When it is created we wait for the default egress rule to be added by AWS
+ security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
+ elif changed and not module.check_mode:
+ # keep pulling until current security group rules match the desired ingress and egress rules
+ security_group = wait_for_rule_propagation(module, group, desired_ingress, desired_egress, purge_rules, purge_rules_egress)
+ else:
+ security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
+ security_group = camel_dict_to_snake_dict(security_group, ignore_list=['Tags'])
+ security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', []))
+
+ else:
+ security_group = {'group_id': None}
+
+ if module._diff:
+ if module.params['state'] == 'present':
+ after = get_diff_final_resource(client, module, security_group)
+ if before.get('ip_permissions'):
+ before['ip_permissions'].sort(key=get_ip_permissions_sort_key)
+
+ security_group['diff'] = [{'before': before, 'after': after}]
+
+ module.exit_json(changed=changed, **security_group)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/ec2_vpc_net.py b/test/support/integration/plugins/modules/ec2_vpc_net.py
new file mode 100644
index 00000000..30e4b1e9
--- /dev/null
+++ b/test/support/integration/plugins/modules/ec2_vpc_net.py
@@ -0,0 +1,524 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_net
+short_description: Configure AWS virtual private clouds
+description:
+ - Create, modify, and terminate AWS virtual private clouds.
+version_added: "2.0"
+author:
+ - Jonathan Davila (@defionscode)
+ - Sloane Hertel (@s-hertel)
+options:
+ name:
+ description:
+ - The name to give your VPC. This is used in combination with C(cidr_block) to determine if a VPC already exists.
+ required: yes
+ type: str
+ cidr_block:
+ description:
+ - The primary CIDR of the VPC. After 2.5 a list of CIDRs can be provided. The first in the list will be used as the primary CIDR
+ and is used in conjunction with the C(name) to ensure idempotence.
+ required: yes
+ type: list
+ elements: str
+ ipv6_cidr:
+ description:
+ - Request an Amazon-provided IPv6 CIDR block with /56 prefix length. You cannot specify the range of IPv6 addresses,
+ or the size of the CIDR block.
+ default: False
+ type: bool
+ version_added: '2.10'
+ purge_cidrs:
+ description:
+ - Remove CIDRs that are associated with the VPC and are not specified in C(cidr_block).
+ default: no
+ type: bool
+ version_added: '2.5'
+ tenancy:
+ description:
+ - Whether to be default or dedicated tenancy. This cannot be changed after the VPC has been created.
+ default: default
+ choices: [ 'default', 'dedicated' ]
+ type: str
+ dns_support:
+ description:
+ - Whether to enable AWS DNS support.
+ default: yes
+ type: bool
+ dns_hostnames:
+ description:
+ - Whether to enable AWS hostname support.
+ default: yes
+ type: bool
+ dhcp_opts_id:
+ description:
+ - The id of the DHCP options to use for this VPC.
+ type: str
+ tags:
+ description:
+ - The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of
+ the VPC if it's different.
+ aliases: [ 'resource_tags' ]
+ type: dict
+ state:
+ description:
+ - The state of the VPC. Either absent or present.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ multi_ok:
+ description:
+ - By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want
+ duplicate VPCs created.
+ type: bool
+ default: false
+requirements:
+ - boto3
+ - botocore
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: create a VPC with dedicated tenancy and a couple of tags
+ ec2_vpc_net:
+ name: Module_dev2
+ cidr_block: 10.10.0.0/16
+ region: us-east-1
+ tags:
+ module: ec2_vpc_net
+ this: works
+ tenancy: dedicated
+
+- name: create a VPC with dedicated tenancy and request an IPv6 CIDR
+ ec2_vpc_net:
+ name: Module_dev2
+ cidr_block: 10.10.0.0/16
+ ipv6_cidr: True
+ region: us-east-1
+ tenancy: dedicated
+'''
+
+RETURN = '''
+vpc:
+ description: info about the VPC that was created or deleted
+ returned: always
+ type: complex
+ contains:
+ cidr_block:
+ description: The CIDR of the VPC
+ returned: always
+ type: str
+ sample: 10.0.0.0/16
+ cidr_block_association_set:
+ description: IPv4 CIDR blocks associated with the VPC
+ returned: success
+ type: list
+ sample:
+ "cidr_block_association_set": [
+ {
+ "association_id": "vpc-cidr-assoc-97aeeefd",
+ "cidr_block": "20.0.0.0/24",
+ "cidr_block_state": {
+ "state": "associated"
+ }
+ }
+ ]
+ classic_link_enabled:
+ description: indicates whether ClassicLink is enabled
+ returned: always
+ type: bool
+ sample: false
+ dhcp_options_id:
+ description: the id of the DHCP options associated with this VPC
+ returned: always
+ type: str
+ sample: dopt-0fb8bd6b
+ id:
+ description: VPC resource id
+ returned: always
+ type: str
+ sample: vpc-c2e00da5
+ instance_tenancy:
+ description: indicates whether VPC uses default or dedicated tenancy
+ returned: always
+ type: str
+ sample: default
+ ipv6_cidr_block_association_set:
+ description: IPv6 CIDR blocks associated with the VPC
+ returned: success
+ type: list
+ sample:
+ "ipv6_cidr_block_association_set": [
+ {
+ "association_id": "vpc-cidr-assoc-97aeeefd",
+ "ipv6_cidr_block": "2001:db8::/56",
+ "ipv6_cidr_block_state": {
+ "state": "associated"
+ }
+ }
+ ]
+ is_default:
+ description: indicates whether this is the default VPC
+ returned: always
+ type: bool
+ sample: false
+ state:
+ description: state of the VPC
+ returned: always
+ type: str
+ sample: available
+ tags:
+ description: tags attached to the VPC, includes name
+ returned: always
+ type: complex
+ contains:
+ Name:
+ description: name tag for the VPC
+ returned: always
+ type: str
+ sample: pk_vpc4
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from time import sleep, time
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils.ec2 import (AWSRetry, camel_dict_to_snake_dict, compare_aws_tags,
+ ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict)
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_native
+from ansible.module_utils.network.common.utils import to_subnet
+
+
+def vpc_exists(module, vpc, name, cidr_block, multi):
+ """Returns None or a vpc object depending on the existence of a VPC. When supplied
+ with a CIDR, it will check for matching tags to determine if it is a match
+ otherwise it will assume the VPC does not exist and thus return None.
+ """
+ try:
+ matching_vpcs = vpc.describe_vpcs(Filters=[{'Name': 'tag:Name', 'Values': [name]}, {'Name': 'cidr-block', 'Values': cidr_block}])['Vpcs']
+ # If an exact matching using a list of CIDRs isn't found, check for a match with the first CIDR as is documented for C(cidr_block)
+ if not matching_vpcs:
+ matching_vpcs = vpc.describe_vpcs(Filters=[{'Name': 'tag:Name', 'Values': [name]}, {'Name': 'cidr-block', 'Values': [cidr_block[0]]}])['Vpcs']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe VPCs")
+
+ if multi:
+ return None
+ elif len(matching_vpcs) == 1:
+ return matching_vpcs[0]['VpcId']
+ elif len(matching_vpcs) > 1:
+ module.fail_json(msg='Currently there are %d VPCs that have the same name and '
+ 'CIDR block you specified. If you would like to create '
+ 'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs))
+ return None
+
+
+@AWSRetry.backoff(delay=3, tries=8, catch_extra_error_codes=['InvalidVpcID.NotFound'])
+def get_classic_link_with_backoff(connection, vpc_id):
+ try:
+ return connection.describe_vpc_classic_link(VpcIds=[vpc_id])['Vpcs'][0].get('ClassicLinkEnabled')
+ except botocore.exceptions.ClientError as e:
+ if e.response["Error"]["Message"] == "The functionality you requested is not available in this region.":
+ return False
+ else:
+ raise
+
+
+def get_vpc(module, connection, vpc_id):
+ # wait for vpc to be available
+ try:
+ connection.get_waiter('vpc_available').wait(VpcIds=[vpc_id])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to wait for VPC {0} to be available.".format(vpc_id))
+
+ try:
+ vpc_obj = connection.describe_vpcs(VpcIds=[vpc_id], aws_retry=True)['Vpcs'][0]
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe VPCs")
+ try:
+ vpc_obj['ClassicLinkEnabled'] = get_classic_link_with_backoff(connection, vpc_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe VPCs")
+
+ return vpc_obj
+
+
+def update_vpc_tags(connection, module, vpc_id, tags, name):
+ if tags is None:
+ tags = dict()
+
+ tags.update({'Name': name})
+ tags = dict((k, to_native(v)) for k, v in tags.items())
+ try:
+ current_tags = dict((t['Key'], t['Value']) for t in connection.describe_tags(Filters=[{'Name': 'resource-id', 'Values': [vpc_id]}])['Tags'])
+ tags_to_update, dummy = compare_aws_tags(current_tags, tags, False)
+ if tags_to_update:
+ if not module.check_mode:
+ tags = ansible_dict_to_boto3_tag_list(tags_to_update)
+ vpc_obj = connection.create_tags(Resources=[vpc_id], Tags=tags, aws_retry=True)
+
+ # Wait for tags to be updated
+ expected_tags = boto3_tag_list_to_ansible_dict(tags)
+ filters = [{'Name': 'tag:{0}'.format(key), 'Values': [value]} for key, value in expected_tags.items()]
+ connection.get_waiter('vpc_available').wait(VpcIds=[vpc_id], Filters=filters)
+
+ return True
+ else:
+ return False
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to update tags")
+
+
+def update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
+ if vpc_obj['DhcpOptionsId'] != dhcp_id:
+ if not module.check_mode:
+ try:
+ connection.associate_dhcp_options(DhcpOptionsId=dhcp_id, VpcId=vpc_obj['VpcId'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to associate DhcpOptionsId {0}".format(dhcp_id))
+
+ try:
+ # Wait for DhcpOptionsId to be updated
+ filters = [{'Name': 'dhcp-options-id', 'Values': [dhcp_id]}]
+ connection.get_waiter('vpc_available').wait(VpcIds=[vpc_obj['VpcId']], Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json(msg="Failed to wait for DhcpOptionsId to be updated")
+
+ return True
+ else:
+ return False
+
+
+def create_vpc(connection, module, cidr_block, tenancy):
+ try:
+ if not module.check_mode:
+ vpc_obj = connection.create_vpc(CidrBlock=cidr_block, InstanceTenancy=tenancy)
+ else:
+ module.exit_json(changed=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Failed to create the VPC")
+
+ # wait for vpc to exist
+ try:
+ connection.get_waiter('vpc_exists').wait(VpcIds=[vpc_obj['Vpc']['VpcId']])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to wait for VPC {0} to be created.".format(vpc_obj['Vpc']['VpcId']))
+
+ return vpc_obj['Vpc']['VpcId']
+
+
+def wait_for_vpc_attribute(connection, module, vpc_id, attribute, expected_value):
+ start_time = time()
+ updated = False
+ while time() < start_time + 300:
+ current_value = connection.describe_vpc_attribute(
+ Attribute=attribute,
+ VpcId=vpc_id
+ )['{0}{1}'.format(attribute[0].upper(), attribute[1:])]['Value']
+ if current_value != expected_value:
+ sleep(3)
+ else:
+ updated = True
+ break
+ if not updated:
+ module.fail_json(msg="Failed to wait for {0} to be updated".format(attribute))
+
+
+def get_cidr_network_bits(module, cidr_block):
+ fixed_cidrs = []
+ for cidr in cidr_block:
+ split_addr = cidr.split('/')
+ if len(split_addr) == 2:
+ # this_ip is a IPv4 CIDR that may or may not have host bits set
+ # Get the network bits.
+ valid_cidr = to_subnet(split_addr[0], split_addr[1])
+ if cidr != valid_cidr:
+ module.warn("One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, "
+ "check the network mask and make sure that only network bits are set: {1}.".format(cidr, valid_cidr))
+ fixed_cidrs.append(valid_cidr)
+ else:
+ # let AWS handle invalid CIDRs
+ fixed_cidrs.append(cidr)
+ return fixed_cidrs
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ cidr_block=dict(type='list', required=True),
+ ipv6_cidr=dict(type='bool', default=False),
+ tenancy=dict(choices=['default', 'dedicated'], default='default'),
+ dns_support=dict(type='bool', default=True),
+ dns_hostnames=dict(type='bool', default=True),
+ dhcp_opts_id=dict(),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ state=dict(choices=['present', 'absent'], default='present'),
+ multi_ok=dict(type='bool', default=False),
+ purge_cidrs=dict(type='bool', default=False),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ name = module.params.get('name')
+ cidr_block = get_cidr_network_bits(module, module.params.get('cidr_block'))
+ ipv6_cidr = module.params.get('ipv6_cidr')
+ purge_cidrs = module.params.get('purge_cidrs')
+ tenancy = module.params.get('tenancy')
+ dns_support = module.params.get('dns_support')
+ dns_hostnames = module.params.get('dns_hostnames')
+ dhcp_id = module.params.get('dhcp_opts_id')
+ tags = module.params.get('tags')
+ state = module.params.get('state')
+ multi = module.params.get('multi_ok')
+
+ changed = False
+
+ connection = module.client(
+ 'ec2',
+ retry_decorator=AWSRetry.jittered_backoff(
+ retries=8, delay=3, catch_extra_error_codes=['InvalidVpcID.NotFound']
+ )
+ )
+
+ if dns_hostnames and not dns_support:
+ module.fail_json(msg='In order to enable DNS Hostnames you must also enable DNS support')
+
+ if state == 'present':
+
+ # Check if VPC exists
+ vpc_id = vpc_exists(module, connection, name, cidr_block, multi)
+
+ if vpc_id is None:
+ vpc_id = create_vpc(connection, module, cidr_block[0], tenancy)
+ changed = True
+
+ vpc_obj = get_vpc(module, connection, vpc_id)
+
+ associated_cidrs = dict((cidr['CidrBlock'], cidr['AssociationId']) for cidr in vpc_obj.get('CidrBlockAssociationSet', [])
+ if cidr['CidrBlockState']['State'] != 'disassociated')
+ to_add = [cidr for cidr in cidr_block if cidr not in associated_cidrs]
+ to_remove = [associated_cidrs[cidr] for cidr in associated_cidrs if cidr not in cidr_block]
+ expected_cidrs = [cidr for cidr in associated_cidrs if associated_cidrs[cidr] not in to_remove] + to_add
+
+ if len(cidr_block) > 1:
+ for cidr in to_add:
+ changed = True
+ try:
+ connection.associate_vpc_cidr_block(CidrBlock=cidr, VpcId=vpc_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Unable to associate CIDR {0}.".format(ipv6_cidr))
+ if ipv6_cidr:
+ if 'Ipv6CidrBlockAssociationSet' in vpc_obj.keys():
+ module.warn("Only one IPv6 CIDR is permitted per VPC, {0} already has CIDR {1}".format(
+ vpc_id,
+ vpc_obj['Ipv6CidrBlockAssociationSet'][0]['Ipv6CidrBlock']))
+ else:
+ try:
+ connection.associate_vpc_cidr_block(AmazonProvidedIpv6CidrBlock=ipv6_cidr, VpcId=vpc_id)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Unable to associate CIDR {0}.".format(ipv6_cidr))
+
+ if purge_cidrs:
+ for association_id in to_remove:
+ changed = True
+ try:
+ connection.disassociate_vpc_cidr_block(AssociationId=association_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Unable to disassociate {0}. You must detach or delete all gateways and resources that "
+ "are associated with the CIDR block before you can disassociate it.".format(association_id))
+
+ if dhcp_id is not None:
+ try:
+ if update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Failed to update DHCP options")
+
+ if tags is not None or name is not None:
+ try:
+ if update_vpc_tags(connection, module, vpc_id, tags, name):
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to update tags")
+
+ current_dns_enabled = connection.describe_vpc_attribute(Attribute='enableDnsSupport', VpcId=vpc_id, aws_retry=True)['EnableDnsSupport']['Value']
+ current_dns_hostnames = connection.describe_vpc_attribute(Attribute='enableDnsHostnames', VpcId=vpc_id, aws_retry=True)['EnableDnsHostnames']['Value']
+ if current_dns_enabled != dns_support:
+ changed = True
+ if not module.check_mode:
+ try:
+ connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsSupport={'Value': dns_support})
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Failed to update enabled dns support attribute")
+ if current_dns_hostnames != dns_hostnames:
+ changed = True
+ if not module.check_mode:
+ try:
+ connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsHostnames={'Value': dns_hostnames})
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Failed to update enabled dns hostnames attribute")
+
+ # wait for associated cidrs to match
+ if to_add or to_remove:
+ try:
+ connection.get_waiter('vpc_available').wait(
+ VpcIds=[vpc_id],
+ Filters=[{'Name': 'cidr-block-association.cidr-block', 'Values': expected_cidrs}]
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Failed to wait for CIDRs to update")
+
+ # try to wait for enableDnsSupport and enableDnsHostnames to match
+ wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsSupport', dns_support)
+ wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsHostnames', dns_hostnames)
+
+ final_state = camel_dict_to_snake_dict(get_vpc(module, connection, vpc_id))
+ final_state['tags'] = boto3_tag_list_to_ansible_dict(final_state.get('tags', []))
+ final_state['id'] = final_state.pop('vpc_id')
+
+ module.exit_json(changed=changed, vpc=final_state)
+
+ elif state == 'absent':
+
+ # Check if VPC exists
+ vpc_id = vpc_exists(module, connection, name, cidr_block, multi)
+
+ if vpc_id is not None:
+ try:
+ if not module.check_mode:
+ connection.delete_vpc(VpcId=vpc_id)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to delete VPC {0} You may want to use the ec2_vpc_subnet, ec2_vpc_igw, "
+ "and/or ec2_vpc_route_table modules to ensure the other components are absent.".format(vpc_id))
+
+ module.exit_json(changed=changed, vpc={})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/ec2_vpc_subnet.py b/test/support/integration/plugins/modules/ec2_vpc_subnet.py
new file mode 100644
index 00000000..5085e99b
--- /dev/null
+++ b/test/support/integration/plugins/modules/ec2_vpc_subnet.py
@@ -0,0 +1,604 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_subnet
+short_description: Manage subnets in AWS virtual private clouds
+description:
+ - Manage subnets in AWS virtual private clouds.
+version_added: "2.0"
+author:
+- Robert Estelle (@erydo)
+- Brad Davidson (@brandond)
+requirements: [ boto3 ]
+options:
+ az:
+ description:
+ - "The availability zone for the subnet."
+ type: str
+ cidr:
+ description:
+ - "The CIDR block for the subnet. E.g. 192.0.2.0/24."
+ type: str
+ required: true
+ ipv6_cidr:
+ description:
+ - "The IPv6 CIDR block for the subnet. The VPC must have a /56 block assigned and this value must be a valid IPv6 /64 that falls in the VPC range."
+ - "Required if I(assign_instances_ipv6=true)"
+ version_added: "2.5"
+ type: str
+ tags:
+ description:
+ - "A dict of tags to apply to the subnet. Any tags currently applied to the subnet and not present here will be removed."
+ aliases: [ 'resource_tags' ]
+ type: dict
+ state:
+ description:
+ - "Create or remove the subnet."
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ vpc_id:
+ description:
+ - "VPC ID of the VPC in which to create or delete the subnet."
+ required: true
+ type: str
+ map_public:
+ description:
+ - "Specify C(yes) to indicate that instances launched into the subnet should be assigned public IP address by default."
+ type: bool
+ default: 'no'
+ version_added: "2.4"
+ assign_instances_ipv6:
+ description:
+ - "Specify C(yes) to indicate that instances launched into the subnet should be automatically assigned an IPv6 address."
+ type: bool
+ default: false
+ version_added: "2.5"
+ wait:
+ description:
+ - "When I(wait=true) and I(state=present), module will wait for subnet to be in available state before continuing."
+ type: bool
+ default: true
+ version_added: "2.5"
+ wait_timeout:
+ description:
+ - "Number of seconds to wait for subnet to become available I(wait=True)."
+ default: 300
+ version_added: "2.5"
+ type: int
+ purge_tags:
+ description:
+ - Whether or not to remove tags that do not appear in the I(tags) list.
+ type: bool
+ default: true
+ version_added: "2.5"
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Create subnet for database servers
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: vpc-123456
+ cidr: 10.0.1.16/28
+ tags:
+ Name: Database Subnet
+ register: database_subnet
+
+- name: Remove subnet for database servers
+ ec2_vpc_subnet:
+ state: absent
+ vpc_id: vpc-123456
+ cidr: 10.0.1.16/28
+
+- name: Create subnet with IPv6 block assigned
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: vpc-123456
+ cidr: 10.1.100.0/24
+ ipv6_cidr: 2001:db8:0:102::/64
+
+- name: Remove IPv6 block assigned to subnet
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: vpc-123456
+ cidr: 10.1.100.0/24
+ ipv6_cidr: ''
+'''
+
+RETURN = '''
+subnet:
+ description: Dictionary of subnet values
+ returned: I(state=present)
+ type: complex
+ contains:
+ id:
+ description: Subnet resource id
+ returned: I(state=present)
+ type: str
+ sample: subnet-b883b2c4
+ cidr_block:
+ description: The IPv4 CIDR of the Subnet
+ returned: I(state=present)
+ type: str
+ sample: "10.0.0.0/16"
+ ipv6_cidr_block:
+ description: The IPv6 CIDR block actively associated with the Subnet
+ returned: I(state=present)
+ type: str
+ sample: "2001:db8:0:102::/64"
+ availability_zone:
+ description: Availability zone of the Subnet
+ returned: I(state=present)
+ type: str
+ sample: us-east-1a
+ state:
+ description: state of the Subnet
+ returned: I(state=present)
+ type: str
+ sample: available
+ tags:
+ description: tags attached to the Subnet, includes name
+ returned: I(state=present)
+ type: dict
+ sample: {"Name": "My Subnet", "env": "staging"}
+ map_public_ip_on_launch:
+ description: whether public IP is auto-assigned to new instances
+ returned: I(state=present)
+ type: bool
+ sample: false
+ assign_ipv6_address_on_creation:
+ description: whether IPv6 address is auto-assigned to new instances
+ returned: I(state=present)
+ type: bool
+ sample: false
+ vpc_id:
+ description: the id of the VPC where this Subnet exists
+ returned: I(state=present)
+ type: str
+ sample: vpc-67236184
+ available_ip_address_count:
+ description: number of available IPv4 addresses
+ returned: I(state=present)
+ type: str
+ sample: 251
+ default_for_az:
+ description: indicates whether this is the default Subnet for this Availability Zone
+ returned: I(state=present)
+ type: bool
+ sample: false
+ ipv6_association_id:
+ description: The IPv6 association ID for the currently associated CIDR
+ returned: I(state=present)
+ type: str
+ sample: subnet-cidr-assoc-b85c74d2
+ ipv6_cidr_block_association_set:
+ description: An array of IPv6 cidr block association set information.
+ returned: I(state=present)
+ type: complex
+ contains:
+ association_id:
+ description: The association ID
+ returned: always
+ type: str
+ ipv6_cidr_block:
+ description: The IPv6 CIDR block that is associated with the subnet.
+ returned: always
+ type: str
+ ipv6_cidr_block_state:
+ description: A hash/dict that contains a single item. The state of the cidr block association.
+ returned: always
+ type: dict
+ contains:
+ state:
+ description: The CIDR block association state.
+ returned: always
+ type: str
+'''
+
+
+import time
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils.aws.waiters import get_waiter
+from ansible.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, ansible_dict_to_boto3_tag_list,
+ camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags, AWSRetry)
+
+
+def get_subnet_info(subnet):
+ if 'Subnets' in subnet:
+ return [get_subnet_info(s) for s in subnet['Subnets']]
+ elif 'Subnet' in subnet:
+ subnet = camel_dict_to_snake_dict(subnet['Subnet'])
+ else:
+ subnet = camel_dict_to_snake_dict(subnet)
+
+ if 'tags' in subnet:
+ subnet['tags'] = boto3_tag_list_to_ansible_dict(subnet['tags'])
+ else:
+ subnet['tags'] = dict()
+
+ if 'subnet_id' in subnet:
+ subnet['id'] = subnet['subnet_id']
+ del subnet['subnet_id']
+
+ subnet['ipv6_cidr_block'] = ''
+ subnet['ipv6_association_id'] = ''
+ ipv6set = subnet.get('ipv6_cidr_block_association_set')
+ if ipv6set:
+ for item in ipv6set:
+ if item.get('ipv6_cidr_block_state', {}).get('state') in ('associated', 'associating'):
+ subnet['ipv6_cidr_block'] = item['ipv6_cidr_block']
+ subnet['ipv6_association_id'] = item['association_id']
+
+ return subnet
+
+
+@AWSRetry.exponential_backoff()
+def describe_subnets_with_backoff(client, **params):
+ return client.describe_subnets(**params)
+
+
+def waiter_params(module, params, start_time):
+ if not module.botocore_at_least("1.7.0"):
+ remaining_wait_timeout = int(module.params['wait_timeout'] + start_time - time.time())
+ params['WaiterConfig'] = {'Delay': 5, 'MaxAttempts': remaining_wait_timeout // 5}
+ return params
+
+
+def handle_waiter(conn, module, waiter_name, params, start_time):
+ try:
+ get_waiter(conn, waiter_name).wait(
+ **waiter_params(module, params, start_time)
+ )
+ except botocore.exceptions.WaiterError as e:
+ module.fail_json_aws(e, "Failed to wait for updates to complete")
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "An exception happened while trying to wait for updates")
+
+
+def create_subnet(conn, module, vpc_id, cidr, ipv6_cidr=None, az=None, start_time=None):
+ wait = module.params['wait']
+ wait_timeout = module.params['wait_timeout']
+
+ params = dict(VpcId=vpc_id,
+ CidrBlock=cidr)
+
+ if ipv6_cidr:
+ params['Ipv6CidrBlock'] = ipv6_cidr
+
+ if az:
+ params['AvailabilityZone'] = az
+
+ try:
+ subnet = get_subnet_info(conn.create_subnet(**params))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create subnet")
+
+ # Sometimes AWS takes its time to create a subnet and so using
+ # new subnets's id to do things like create tags results in
+ # exception.
+ if wait and subnet.get('state') != 'available':
+ handle_waiter(conn, module, 'subnet_exists', {'SubnetIds': [subnet['id']]}, start_time)
+ try:
+ conn.get_waiter('subnet_available').wait(
+ **waiter_params(module, {'SubnetIds': [subnet['id']]}, start_time)
+ )
+ subnet['state'] = 'available'
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Create subnet action timed out waiting for subnet to become available")
+
+ return subnet
+
+
+def ensure_tags(conn, module, subnet, tags, purge_tags, start_time):
+ changed = False
+
+ filters = ansible_dict_to_boto3_filter_list({'resource-id': subnet['id'], 'resource-type': 'subnet'})
+ try:
+ cur_tags = conn.describe_tags(Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't describe tags")
+
+ to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags)
+
+ if to_update:
+ try:
+ if not module.check_mode:
+ AWSRetry.exponential_backoff(
+ catch_extra_error_codes=['InvalidSubnetID.NotFound']
+ )(conn.create_tags)(
+ Resources=[subnet['id']],
+ Tags=ansible_dict_to_boto3_tag_list(to_update)
+ )
+
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create tags")
+
+ if to_delete:
+ try:
+ if not module.check_mode:
+ tags_list = []
+ for key in to_delete:
+ tags_list.append({'Key': key})
+
+ AWSRetry.exponential_backoff(
+ catch_extra_error_codes=['InvalidSubnetID.NotFound']
+ )(conn.delete_tags)(Resources=[subnet['id']], Tags=tags_list)
+
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete tags")
+
+ if module.params['wait'] and not module.check_mode:
+ # Wait for tags to be updated
+ filters = [{'Name': 'tag:{0}'.format(k), 'Values': [v]} for k, v in tags.items()]
+ handle_waiter(conn, module, 'subnet_exists',
+ {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time)
+
+ return changed
+
+
+def ensure_map_public(conn, module, subnet, map_public, check_mode, start_time):
+ if check_mode:
+ return
+ try:
+ conn.modify_subnet_attribute(SubnetId=subnet['id'], MapPublicIpOnLaunch={'Value': map_public})
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't modify subnet attribute")
+
+
+def ensure_assign_ipv6_on_create(conn, module, subnet, assign_instances_ipv6, check_mode, start_time):
+ if check_mode:
+ return
+ try:
+ conn.modify_subnet_attribute(SubnetId=subnet['id'], AssignIpv6AddressOnCreation={'Value': assign_instances_ipv6})
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't modify subnet attribute")
+
+
+def disassociate_ipv6_cidr(conn, module, subnet, start_time):
+ if subnet.get('assign_ipv6_address_on_creation'):
+ ensure_assign_ipv6_on_create(conn, module, subnet, False, False, start_time)
+
+ try:
+ conn.disassociate_subnet_cidr_block(AssociationId=subnet['ipv6_association_id'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't disassociate ipv6 cidr block id {0} from subnet {1}"
+ .format(subnet['ipv6_association_id'], subnet['id']))
+
+ # Wait for cidr block to be disassociated
+ if module.params['wait']:
+ filters = ansible_dict_to_boto3_filter_list(
+ {'ipv6-cidr-block-association.state': ['disassociated'],
+ 'vpc-id': subnet['vpc_id']}
+ )
+ handle_waiter(conn, module, 'subnet_exists',
+ {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time)
+
+
+def ensure_ipv6_cidr_block(conn, module, subnet, ipv6_cidr, check_mode, start_time):
+ wait = module.params['wait']
+ changed = False
+
+ if subnet['ipv6_association_id'] and not ipv6_cidr:
+ if not check_mode:
+ disassociate_ipv6_cidr(conn, module, subnet, start_time)
+ changed = True
+
+ if ipv6_cidr:
+ filters = ansible_dict_to_boto3_filter_list({'ipv6-cidr-block-association.ipv6-cidr-block': ipv6_cidr,
+ 'vpc-id': subnet['vpc_id']})
+
+ try:
+ check_subnets = get_subnet_info(describe_subnets_with_backoff(conn, Filters=filters))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get subnet info")
+
+ if check_subnets and check_subnets[0]['ipv6_cidr_block']:
+ module.fail_json(msg="The IPv6 CIDR '{0}' conflicts with another subnet".format(ipv6_cidr))
+
+ if subnet['ipv6_association_id']:
+ if not check_mode:
+ disassociate_ipv6_cidr(conn, module, subnet, start_time)
+ changed = True
+
+ try:
+ if not check_mode:
+ associate_resp = conn.associate_subnet_cidr_block(SubnetId=subnet['id'], Ipv6CidrBlock=ipv6_cidr)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't associate ipv6 cidr {0} to {1}".format(ipv6_cidr, subnet['id']))
+ else:
+ if not check_mode and wait:
+ filters = ansible_dict_to_boto3_filter_list(
+ {'ipv6-cidr-block-association.state': ['associated'],
+ 'vpc-id': subnet['vpc_id']}
+ )
+ handle_waiter(conn, module, 'subnet_exists',
+ {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time)
+
+ if associate_resp.get('Ipv6CidrBlockAssociation', {}).get('AssociationId'):
+ subnet['ipv6_association_id'] = associate_resp['Ipv6CidrBlockAssociation']['AssociationId']
+ subnet['ipv6_cidr_block'] = associate_resp['Ipv6CidrBlockAssociation']['Ipv6CidrBlock']
+ if subnet['ipv6_cidr_block_association_set']:
+ subnet['ipv6_cidr_block_association_set'][0] = camel_dict_to_snake_dict(associate_resp['Ipv6CidrBlockAssociation'])
+ else:
+ subnet['ipv6_cidr_block_association_set'].append(camel_dict_to_snake_dict(associate_resp['Ipv6CidrBlockAssociation']))
+
+ return changed
+
+
+def get_matching_subnet(conn, module, vpc_id, cidr):
+ filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'cidr-block': cidr})
+ try:
+ subnets = get_subnet_info(describe_subnets_with_backoff(conn, Filters=filters))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get matching subnet")
+
+ if subnets:
+ return subnets[0]
+
+ return None
+
+
+def ensure_subnet_present(conn, module):
+ subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
+ changed = False
+
+ # Initialize start so max time does not exceed the specified wait_timeout for multiple operations
+ start_time = time.time()
+
+ if subnet is None:
+ if not module.check_mode:
+ subnet = create_subnet(conn, module, module.params['vpc_id'], module.params['cidr'],
+ ipv6_cidr=module.params['ipv6_cidr'], az=module.params['az'], start_time=start_time)
+ changed = True
+ # Subnet will be None when check_mode is true
+ if subnet is None:
+ return {
+ 'changed': changed,
+ 'subnet': {}
+ }
+ if module.params['wait']:
+ handle_waiter(conn, module, 'subnet_exists', {'SubnetIds': [subnet['id']]}, start_time)
+
+ if module.params['ipv6_cidr'] != subnet.get('ipv6_cidr_block'):
+ if ensure_ipv6_cidr_block(conn, module, subnet, module.params['ipv6_cidr'], module.check_mode, start_time):
+ changed = True
+
+ if module.params['map_public'] != subnet['map_public_ip_on_launch']:
+ ensure_map_public(conn, module, subnet, module.params['map_public'], module.check_mode, start_time)
+ changed = True
+
+ if module.params['assign_instances_ipv6'] != subnet.get('assign_ipv6_address_on_creation'):
+ ensure_assign_ipv6_on_create(conn, module, subnet, module.params['assign_instances_ipv6'], module.check_mode, start_time)
+ changed = True
+
+ if module.params['tags'] != subnet['tags']:
+ stringified_tags_dict = dict((to_text(k), to_text(v)) for k, v in module.params['tags'].items())
+ if ensure_tags(conn, module, subnet, stringified_tags_dict, module.params['purge_tags'], start_time):
+ changed = True
+
+ subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
+ if not module.check_mode and module.params['wait']:
+ # GET calls are not monotonic for map_public_ip_on_launch and assign_ipv6_address_on_creation
+ # so we only wait for those if necessary just before returning the subnet
+ subnet = ensure_final_subnet(conn, module, subnet, start_time)
+
+ return {
+ 'changed': changed,
+ 'subnet': subnet
+ }
+
+
+def ensure_final_subnet(conn, module, subnet, start_time):
+ for rewait in range(0, 30):
+ map_public_correct = False
+ assign_ipv6_correct = False
+
+ if module.params['map_public'] == subnet['map_public_ip_on_launch']:
+ map_public_correct = True
+ else:
+ if module.params['map_public']:
+ handle_waiter(conn, module, 'subnet_has_map_public', {'SubnetIds': [subnet['id']]}, start_time)
+ else:
+ handle_waiter(conn, module, 'subnet_no_map_public', {'SubnetIds': [subnet['id']]}, start_time)
+
+ if module.params['assign_instances_ipv6'] == subnet.get('assign_ipv6_address_on_creation'):
+ assign_ipv6_correct = True
+ else:
+ if module.params['assign_instances_ipv6']:
+ handle_waiter(conn, module, 'subnet_has_assign_ipv6', {'SubnetIds': [subnet['id']]}, start_time)
+ else:
+ handle_waiter(conn, module, 'subnet_no_assign_ipv6', {'SubnetIds': [subnet['id']]}, start_time)
+
+ if map_public_correct and assign_ipv6_correct:
+ break
+
+ time.sleep(5)
+ subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
+
+ return subnet
+
+
+def ensure_subnet_absent(conn, module):
+ subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
+ if subnet is None:
+ return {'changed': False}
+
+ try:
+ if not module.check_mode:
+ conn.delete_subnet(SubnetId=subnet['id'])
+ if module.params['wait']:
+ handle_waiter(conn, module, 'subnet_deleted', {'SubnetIds': [subnet['id']]}, time.time())
+ return {'changed': True}
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete subnet")
+
+
+def main():
+ argument_spec = dict(
+ az=dict(default=None, required=False),
+ cidr=dict(required=True),
+ ipv6_cidr=dict(default='', required=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ tags=dict(default={}, required=False, type='dict', aliases=['resource_tags']),
+ vpc_id=dict(required=True),
+ map_public=dict(default=False, required=False, type='bool'),
+ assign_instances_ipv6=dict(default=False, required=False, type='bool'),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=300, required=False),
+ purge_tags=dict(default=True, type='bool')
+ )
+
+ required_if = [('assign_instances_ipv6', True, ['ipv6_cidr'])]
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if)
+
+ if module.params.get('assign_instances_ipv6') and not module.params.get('ipv6_cidr'):
+ module.fail_json(msg="assign_instances_ipv6 is True but ipv6_cidr is None or an empty string")
+
+ if not module.botocore_at_least("1.7.0"):
+ module.warn("botocore >= 1.7.0 is required to use wait_timeout for custom wait times")
+
+ connection = module.client('ec2')
+
+ state = module.params.get('state')
+
+ try:
+ if state == 'present':
+ result = ensure_subnet_present(connection, module)
+ elif state == 'absent':
+ result = ensure_subnet_absent(connection, module)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/flatpak_remote.py b/test/support/integration/plugins/modules/flatpak_remote.py
new file mode 100644
index 00000000..db208f1b
--- /dev/null
+++ b/test/support/integration/plugins/modules/flatpak_remote.py
@@ -0,0 +1,243 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017 John Kwiatkoski (@JayKayy) <jkwiat40@gmail.com>
+# Copyright: (c) 2018 Alexander Bethke (@oolongbrothers) <oolongbrothers@gmx.net>
+# Copyright: (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+# ATTENTION CONTRIBUTORS!
+#
+# TL;DR: Run this module's integration tests manually before opening a pull request
+#
+# Long explanation:
+# The integration tests for this module are currently NOT run on the Ansible project's continuous
+# delivery pipeline. So please: When you make changes to this module, make sure that you run the
+# included integration tests manually for both Python 2 and Python 3:
+#
+# Python 2:
+# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 2.7 flatpak_remote
+# Python 3:
+# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 3.6 flatpak_remote
+#
+# Because of external dependencies, the current integration tests are somewhat too slow and brittle
+# to be included right now. I have plans to rewrite the integration tests based on a local flatpak
+# repository so that they can be included into the normal CI pipeline.
+# //oolongbrothers
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: flatpak_remote
+version_added: '2.6'
+short_description: Manage flatpak repository remotes
+description:
+- Allows users to add or remove flatpak remotes.
+- The flatpak remotes concept is comparable to what is called repositories in other packaging
+ formats.
+- Currently, remote addition is only supported via I(flatpakrepo) file URLs.
+- Existing remotes will not be updated.
+- See the M(flatpak) module for managing flatpaks.
+author:
+- John Kwiatkoski (@JayKayy)
+- Alexander Bethke (@oolongbrothers)
+requirements:
+- flatpak
+options:
+ executable:
+ description:
+ - The path to the C(flatpak) executable to use.
+ - By default, this module looks for the C(flatpak) executable on the path.
+ default: flatpak
+ flatpakrepo_url:
+ description:
+ - The URL to the I(flatpakrepo) file representing the repository remote to add.
+ - When used with I(state=present), the flatpak remote specified under the I(flatpakrepo_url)
+ is added using the specified installation C(method).
+ - When used with I(state=absent), this is not required.
+ - Required when I(state=present).
+ method:
+ description:
+ - The installation method to use.
+ - Defines if the I(flatpak) is supposed to be installed globally for the whole C(system)
+ or only for the current C(user).
+ choices: [ system, user ]
+ default: system
+ name:
+ description:
+ - The desired name for the flatpak remote to be registered under on the managed host.
+ - When used with I(state=present), the remote will be added to the managed host under
+ the specified I(name).
+ - When used with I(state=absent) the remote with that name will be removed.
+ required: true
+ state:
+ description:
+ - Indicates the desired package state.
+ choices: [ absent, present ]
+ default: present
+'''
+
+EXAMPLES = r'''
+- name: Add the Gnome flatpak remote to the system installation
+ flatpak_remote:
+ name: gnome
+ state: present
+ flatpakrepo_url: https://sdk.gnome.org/gnome-apps.flatpakrepo
+
+- name: Add the flathub flatpak repository remote to the user installation
+ flatpak_remote:
+ name: flathub
+ state: present
+ flatpakrepo_url: https://dl.flathub.org/repo/flathub.flatpakrepo
+ method: user
+
+- name: Remove the Gnome flatpak remote from the user installation
+ flatpak_remote:
+ name: gnome
+ state: absent
+ method: user
+
+- name: Remove the flathub remote from the system installation
+ flatpak_remote:
+ name: flathub
+ state: absent
+'''
+
+RETURN = r'''
+command:
+ description: The exact flatpak command that was executed
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "/usr/bin/flatpak remote-add --system flatpak-test https://dl.flathub.org/repo/flathub.flatpakrepo"
+msg:
+ description: Module error message
+ returned: failure
+ type: str
+ sample: "Executable '/usr/local/bin/flatpak' was not found on the system."
+rc:
+ description: Return code from flatpak binary
+ returned: When a flatpak command has been executed
+ type: int
+ sample: 0
+stderr:
+ description: Error output from flatpak binary
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "error: GPG verification enabled, but no summary found (check that the configured URL in remote config is correct)\n"
+stdout:
+ description: Output from flatpak binary
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "flathub\tFlathub\thttps://dl.flathub.org/repo/\t1\t\n"
+'''
+
+import subprocess
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+
+
+def add_remote(module, binary, name, flatpakrepo_url, method):
+ """Add a new remote."""
+ global result
+ command = "{0} remote-add --{1} {2} {3}".format(
+ binary, method, name, flatpakrepo_url)
+ _flatpak_command(module, module.check_mode, command)
+ result['changed'] = True
+
+
+def remove_remote(module, binary, name, method):
+ """Remove an existing remote."""
+ global result
+ command = "{0} remote-delete --{1} --force {2} ".format(
+ binary, method, name)
+ _flatpak_command(module, module.check_mode, command)
+ result['changed'] = True
+
+
+def remote_exists(module, binary, name, method):
+ """Check if the remote exists."""
+ command = "{0} remote-list -d --{1}".format(binary, method)
+ # The query operation for the remote needs to be run even in check mode
+ output = _flatpak_command(module, False, command)
+ for line in output.splitlines():
+ listed_remote = line.split()
+ if len(listed_remote) == 0:
+ continue
+ if listed_remote[0] == to_native(name):
+ return True
+ return False
+
+
+def _flatpak_command(module, noop, command):
+ global result
+ if noop:
+ result['rc'] = 0
+ result['command'] = command
+ return ""
+
+ process = subprocess.Popen(
+ command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout_data, stderr_data = process.communicate()
+ result['rc'] = process.returncode
+ result['command'] = command
+ result['stdout'] = stdout_data
+ result['stderr'] = stderr_data
+ if result['rc'] != 0:
+ module.fail_json(msg="Failed to execute flatpak command", **result)
+ return to_native(stdout_data)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ flatpakrepo_url=dict(type='str'),
+ method=dict(type='str', default='system',
+ choices=['user', 'system']),
+ state=dict(type='str', default="present",
+ choices=['absent', 'present']),
+ executable=dict(type='str', default="flatpak")
+ ),
+ # This module supports check mode
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ flatpakrepo_url = module.params['flatpakrepo_url']
+ method = module.params['method']
+ state = module.params['state']
+ executable = module.params['executable']
+ binary = module.get_bin_path(executable, None)
+
+ if flatpakrepo_url is None:
+ flatpakrepo_url = ''
+
+ global result
+ result = dict(
+ changed=False
+ )
+
+ # If the binary was not found, fail the operation
+ if not binary:
+ module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result)
+
+ remote_already_exists = remote_exists(module, binary, to_bytes(name), method)
+
+ if state == 'present' and not remote_already_exists:
+ add_remote(module, binary, name, flatpakrepo_url, method)
+ elif state == 'absent' and remote_already_exists:
+ remove_remote(module, binary, name, method)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/htpasswd.py b/test/support/integration/plugins/modules/htpasswd.py
new file mode 100644
index 00000000..ad12b0c0
--- /dev/null
+++ b/test/support/integration/plugins/modules/htpasswd.py
@@ -0,0 +1,275 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Nimbis Services, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = """
+module: htpasswd
+version_added: "1.3"
+short_description: manage user files for basic authentication
+description:
+ - Add and remove username/password entries in a password file using htpasswd.
+ - This is used by web servers such as Apache and Nginx for basic authentication.
+options:
+ path:
+ required: true
+ aliases: [ dest, destfile ]
+ description:
+ - Path to the file that contains the usernames and passwords
+ name:
+ required: true
+ aliases: [ username ]
+ description:
+ - User name to add or remove
+ password:
+ required: false
+ description:
+ - Password associated with user.
+ - Must be specified if user does not exist yet.
+ crypt_scheme:
+ required: false
+ choices: ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"]
+ default: "apr_md5_crypt"
+ description:
+ - Encryption scheme to be used. As well as the four choices listed
+ here, you can also use any other hash supported by passlib, such as
+ md5_crypt and sha256_crypt, which are linux passwd hashes. If you
+ do so the password file will not be compatible with Apache or Nginx
+ state:
+ required: false
+ choices: [ present, absent ]
+ default: "present"
+ description:
+ - Whether the user entry should be present or not
+ create:
+ required: false
+ type: bool
+ default: "yes"
+ description:
+ - Used with C(state=present). If specified, the file will be created
+ if it does not already exist. If set to "no", will fail if the
+ file does not exist
+notes:
+ - "This module depends on the I(passlib) Python library, which needs to be installed on all target systems."
+ - "On Debian, Ubuntu, or Fedora: install I(python-passlib)."
+ - "On RHEL or CentOS: Enable EPEL, then install I(python-passlib)."
+requirements: [ passlib>=1.6 ]
+author: "Ansible Core Team"
+extends_documentation_fragment: files
+"""
+
+EXAMPLES = """
+# Add a user to a password file and ensure permissions are set
+- htpasswd:
+ path: /etc/nginx/passwdfile
+ name: janedoe
+ password: '9s36?;fyNp'
+ owner: root
+ group: www-data
+ mode: 0640
+
+# Remove a user from a password file
+- htpasswd:
+ path: /etc/apache2/passwdfile
+ name: foobar
+ state: absent
+
+# Add a user to a password file suitable for use by libpam-pwdfile
+- htpasswd:
+ path: /etc/mail/passwords
+ name: alex
+ password: oedu2eGh
+ crypt_scheme: md5_crypt
+"""
+
+
+import os
+import tempfile
+import traceback
+from distutils.version import LooseVersion
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+PASSLIB_IMP_ERR = None
+try:
+ from passlib.apache import HtpasswdFile, htpasswd_context
+ from passlib.context import CryptContext
+ import passlib
+except ImportError:
+ PASSLIB_IMP_ERR = traceback.format_exc()
+ passlib_installed = False
+else:
+ passlib_installed = True
+
+apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"]
+
+
+def create_missing_directories(dest):
+ destpath = os.path.dirname(dest)
+ if not os.path.exists(destpath):
+ os.makedirs(destpath)
+
+
+def present(dest, username, password, crypt_scheme, create, check_mode):
+ """ Ensures user is present
+
+ Returns (msg, changed) """
+ if crypt_scheme in apache_hashes:
+ context = htpasswd_context
+ else:
+ context = CryptContext(schemes=[crypt_scheme] + apache_hashes)
+ if not os.path.exists(dest):
+ if not create:
+ raise ValueError('Destination %s does not exist' % dest)
+ if check_mode:
+ return ("Create %s" % dest, True)
+ create_missing_directories(dest)
+ if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
+ ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme, context=context)
+ else:
+ ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme, context=context)
+ if getattr(ht, 'set_password', None):
+ ht.set_password(username, password)
+ else:
+ ht.update(username, password)
+ ht.save()
+ return ("Created %s and added %s" % (dest, username), True)
+ else:
+ if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
+ ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme, context=context)
+ else:
+ ht = HtpasswdFile(dest, default=crypt_scheme, context=context)
+
+ found = None
+ if getattr(ht, 'check_password', None):
+ found = ht.check_password(username, password)
+ else:
+ found = ht.verify(username, password)
+
+ if found:
+ return ("%s already present" % username, False)
+ else:
+ if not check_mode:
+ if getattr(ht, 'set_password', None):
+ ht.set_password(username, password)
+ else:
+ ht.update(username, password)
+ ht.save()
+ return ("Add/update %s" % username, True)
+
+
+def absent(dest, username, check_mode):
+ """ Ensures user is absent
+
+ Returns (msg, changed) """
+ if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
+ ht = HtpasswdFile(dest, new=False)
+ else:
+ ht = HtpasswdFile(dest)
+
+ if username not in ht.users():
+ return ("%s not present" % username, False)
+ else:
+ if not check_mode:
+ ht.delete(username)
+ ht.save()
+ return ("Remove %s" % username, True)
+
+
+def check_file_attrs(module, changed, message):
+
+ file_args = module.load_file_common_arguments(module.params)
+ if module.set_fs_attributes_if_different(file_args, False):
+
+ if changed:
+ message += " and "
+ changed = True
+ message += "ownership, perms or SE linux context changed"
+
+ return message, changed
+
+
+def main():
+ arg_spec = dict(
+ path=dict(required=True, aliases=["dest", "destfile"]),
+ name=dict(required=True, aliases=["username"]),
+ password=dict(required=False, default=None, no_log=True),
+ crypt_scheme=dict(required=False, default="apr_md5_crypt"),
+ state=dict(required=False, default="present"),
+ create=dict(type='bool', default='yes'),
+
+ )
+ module = AnsibleModule(argument_spec=arg_spec,
+ add_file_common_args=True,
+ supports_check_mode=True)
+
+ path = module.params['path']
+ username = module.params['name']
+ password = module.params['password']
+ crypt_scheme = module.params['crypt_scheme']
+ state = module.params['state']
+ create = module.params['create']
+ check_mode = module.check_mode
+
+ if not passlib_installed:
+ module.fail_json(msg=missing_required_lib("passlib"), exception=PASSLIB_IMP_ERR)
+
+ # Check file for blank lines in effort to avoid "need more than 1 value to unpack" error.
+ try:
+ f = open(path, "r")
+ except IOError:
+ # No preexisting file to remove blank lines from
+ f = None
+ else:
+ try:
+ lines = f.readlines()
+ finally:
+ f.close()
+
+ # If the file gets edited, it returns true, so only edit the file if it has blank lines
+ strip = False
+ for line in lines:
+ if not line.strip():
+ strip = True
+ break
+
+ if strip:
+ # If check mode, create a temporary file
+ if check_mode:
+ temp = tempfile.NamedTemporaryFile()
+ path = temp.name
+ f = open(path, "w")
+ try:
+ [f.write(line) for line in lines if line.strip()]
+ finally:
+ f.close()
+
+ try:
+ if state == 'present':
+ (msg, changed) = present(path, username, password, crypt_scheme, create, check_mode)
+ elif state == 'absent':
+ if not os.path.exists(path):
+ module.exit_json(msg="%s not present" % username,
+ warnings="%s does not exist" % path, changed=False)
+ (msg, changed) = absent(path, username, check_mode)
+ else:
+ module.fail_json(msg="Invalid state: %s" % state)
+
+ check_file_attrs(module, changed, msg)
+ module.exit_json(msg=msg, changed=changed)
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/locale_gen.py b/test/support/integration/plugins/modules/locale_gen.py
new file mode 100644
index 00000000..4968b834
--- /dev/null
+++ b/test/support/integration/plugins/modules/locale_gen.py
@@ -0,0 +1,237 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+---
+module: locale_gen
+short_description: Creates or removes locales
+description:
+ - Manages locales by editing /etc/locale.gen and invoking locale-gen.
+version_added: "1.6"
+author:
+- Augustus Kling (@AugustusKling)
+options:
+ name:
+ description:
+ - Name and encoding of the locale, such as "en_GB.UTF-8".
+ required: true
+ state:
+ description:
+ - Whether the locale shall be present.
+ choices: [ absent, present ]
+ default: present
+'''
+
+EXAMPLES = '''
+- name: Ensure a locale exists
+ locale_gen:
+ name: de_CH.UTF-8
+ state: present
+'''
+
+import os
+import re
+from subprocess import Popen, PIPE, call
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+LOCALE_NORMALIZATION = {
+ ".utf8": ".UTF-8",
+ ".eucjp": ".EUC-JP",
+ ".iso885915": ".ISO-8859-15",
+ ".cp1251": ".CP1251",
+ ".koi8r": ".KOI8-R",
+ ".armscii8": ".ARMSCII-8",
+ ".euckr": ".EUC-KR",
+ ".gbk": ".GBK",
+ ".gb18030": ".GB18030",
+ ".euctw": ".EUC-TW",
+}
+
+
+# ===========================================
+# location module specific support methods.
+#
+
+def is_available(name, ubuntuMode):
+ """Check if the given locale is available on the system. This is done by
+ checking either :
+ * if the locale is present in /etc/locales.gen
+ * or if the locale is present in /usr/share/i18n/SUPPORTED"""
+ if ubuntuMode:
+ __regexp = r'^(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
+ __locales_available = '/usr/share/i18n/SUPPORTED'
+ else:
+ __regexp = r'^#{0,1}\s*(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
+ __locales_available = '/etc/locale.gen'
+
+ re_compiled = re.compile(__regexp)
+ fd = open(__locales_available, 'r')
+ for line in fd:
+ result = re_compiled.match(line)
+ if result and result.group('locale') == name:
+ return True
+ fd.close()
+ return False
+
+
+def is_present(name):
+ """Checks if the given locale is currently installed."""
+ output = Popen(["locale", "-a"], stdout=PIPE).communicate()[0]
+ output = to_native(output)
+ return any(fix_case(name) == fix_case(line) for line in output.splitlines())
+
+
+def fix_case(name):
+ """locale -a might return the encoding in either lower or upper case.
+ Passing through this function makes them uniform for comparisons."""
+ for s, r in LOCALE_NORMALIZATION.items():
+ name = name.replace(s, r)
+ return name
+
+
+def replace_line(existing_line, new_line):
+ """Replaces lines in /etc/locale.gen"""
+ try:
+ f = open("/etc/locale.gen", "r")
+ lines = [line.replace(existing_line, new_line) for line in f]
+ finally:
+ f.close()
+ try:
+ f = open("/etc/locale.gen", "w")
+ f.write("".join(lines))
+ finally:
+ f.close()
+
+
+def set_locale(name, enabled=True):
+ """ Sets the state of the locale. Defaults to enabled. """
+ search_string = r'#{0,1}\s*%s (?P<charset>.+)' % name
+ if enabled:
+ new_string = r'%s \g<charset>' % (name)
+ else:
+ new_string = r'# %s \g<charset>' % (name)
+ try:
+ f = open("/etc/locale.gen", "r")
+ lines = [re.sub(search_string, new_string, line) for line in f]
+ finally:
+ f.close()
+ try:
+ f = open("/etc/locale.gen", "w")
+ f.write("".join(lines))
+ finally:
+ f.close()
+
+
+def apply_change(targetState, name):
+ """Create or remove locale.
+
+ Keyword arguments:
+ targetState -- Desired state, either present or absent.
+ name -- Name including encoding such as de_CH.UTF-8.
+ """
+ if targetState == "present":
+ # Create locale.
+ set_locale(name, enabled=True)
+ else:
+ # Delete locale.
+ set_locale(name, enabled=False)
+
+ localeGenExitValue = call("locale-gen")
+ if localeGenExitValue != 0:
+ raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned " + str(localeGenExitValue))
+
+
+def apply_change_ubuntu(targetState, name):
+ """Create or remove locale.
+
+ Keyword arguments:
+ targetState -- Desired state, either present or absent.
+ name -- Name including encoding such as de_CH.UTF-8.
+ """
+ if targetState == "present":
+ # Create locale.
+ # Ubuntu's patched locale-gen automatically adds the new locale to /var/lib/locales/supported.d/local
+ localeGenExitValue = call(["locale-gen", name])
+ else:
+ # Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales.
+ try:
+ f = open("/var/lib/locales/supported.d/local", "r")
+ content = f.readlines()
+ finally:
+ f.close()
+ try:
+ f = open("/var/lib/locales/supported.d/local", "w")
+ for line in content:
+ locale, charset = line.split(' ')
+ if locale != name:
+ f.write(line)
+ finally:
+ f.close()
+ # Purge locales and regenerate.
+ # Please provide a patch if you know how to avoid regenerating the locales to keep!
+ localeGenExitValue = call(["locale-gen", "--purge"])
+
+ if localeGenExitValue != 0:
+ raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned " + str(localeGenExitValue))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ state = module.params['state']
+
+ if not os.path.exists("/etc/locale.gen"):
+ if os.path.exists("/var/lib/locales/supported.d/"):
+ # Ubuntu created its own system to manage locales.
+ ubuntuMode = True
+ else:
+ module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package \"locales\" installed?")
+ else:
+ # We found the common way to manage locales.
+ ubuntuMode = False
+
+ if not is_available(name, ubuntuMode):
+ module.fail_json(msg="The locale you've entered is not available "
+ "on your system.")
+
+ if is_present(name):
+ prev_state = "present"
+ else:
+ prev_state = "absent"
+ changed = (prev_state != state)
+
+ if module.check_mode:
+ module.exit_json(changed=changed)
+ else:
+ if changed:
+ try:
+ if ubuntuMode is False:
+ apply_change(state, name)
+ else:
+ apply_change_ubuntu(state, name)
+ except EnvironmentError as e:
+ module.fail_json(msg=to_native(e), exitValue=e.errno)
+
+ module.exit_json(name=name, changed=changed, msg="OK")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/lvg.py b/test/support/integration/plugins/modules/lvg.py
new file mode 100644
index 00000000..e2035f68
--- /dev/null
+++ b/test/support/integration/plugins/modules/lvg.py
@@ -0,0 +1,295 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Alexander Bulimov <lazywolf0@gmail.com>
+# Based on lvol module by Jeroen Hoekx <jeroen.hoekx@dsquare.be>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+author:
+- Alexander Bulimov (@abulimov)
+module: lvg
+short_description: Configure LVM volume groups
+description:
+ - This module creates, removes or resizes volume groups.
+version_added: "1.1"
+options:
+ vg:
+ description:
+ - The name of the volume group.
+ type: str
+ required: true
+ pvs:
+ description:
+ - List of comma-separated devices to use as physical devices in this volume group.
+ - Required when creating or resizing volume group.
+ - The module will take care of running pvcreate if needed.
+ type: list
+ pesize:
+ description:
+ - "The size of the physical extent. I(pesize) must be a power of 2 of at least 1 sector
+ (where the sector size is the largest sector size of the PVs currently used in the VG),
+ or at least 128KiB."
+ - Since Ansible 2.6, pesize can be optionally suffixed by a UNIT (k/K/m/M/g/G), default unit is megabyte.
+ type: str
+ default: "4"
+ pv_options:
+ description:
+ - Additional options to pass to C(pvcreate) when creating the volume group.
+ type: str
+ version_added: "2.4"
+ vg_options:
+ description:
+ - Additional options to pass to C(vgcreate) when creating the volume group.
+ type: str
+ version_added: "1.6"
+ state:
+ description:
+ - Control if the volume group exists.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ force:
+ description:
+ - If C(yes), allows to remove volume group with logical volumes.
+ type: bool
+ default: no
+seealso:
+- module: filesystem
+- module: lvol
+- module: parted
+notes:
+ - This module does not modify PE size for already present volume group.
+'''
+
+EXAMPLES = r'''
+- name: Create a volume group on top of /dev/sda1 with physical extent size = 32MB
+ lvg:
+ vg: vg.services
+ pvs: /dev/sda1
+ pesize: 32
+
+- name: Create a volume group on top of /dev/sdb with physical extent size = 128KiB
+ lvg:
+ vg: vg.services
+ pvs: /dev/sdb
+ pesize: 128K
+
+# If, for example, we already have VG vg.services on top of /dev/sdb1,
+# this VG will be extended by /dev/sdc5. Or if vg.services was created on
+# top of /dev/sda5, we first extend it with /dev/sdb1 and /dev/sdc5,
+# and then reduce by /dev/sda5.
+- name: Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5.
+ lvg:
+ vg: vg.services
+ pvs: /dev/sdb1,/dev/sdc5
+
+- name: Remove a volume group with name vg.services
+ lvg:
+ vg: vg.services
+ state: absent
+'''
+
+import itertools
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def parse_vgs(data):
+ vgs = []
+ for line in data.splitlines():
+ parts = line.strip().split(';')
+ vgs.append({
+ 'name': parts[0],
+ 'pv_count': int(parts[1]),
+ 'lv_count': int(parts[2]),
+ })
+ return vgs
+
+
+def find_mapper_device_name(module, dm_device):
+ dmsetup_cmd = module.get_bin_path('dmsetup', True)
+ mapper_prefix = '/dev/mapper/'
+ rc, dm_name, err = module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device))
+ if rc != 0:
+ module.fail_json(msg="Failed executing dmsetup command.", rc=rc, err=err)
+ mapper_device = mapper_prefix + dm_name.rstrip()
+ return mapper_device
+
+
+def parse_pvs(module, data):
+ pvs = []
+ dm_prefix = '/dev/dm-'
+ for line in data.splitlines():
+ parts = line.strip().split(';')
+ if parts[0].startswith(dm_prefix):
+ parts[0] = find_mapper_device_name(module, parts[0])
+ pvs.append({
+ 'name': parts[0],
+ 'vg_name': parts[1],
+ })
+ return pvs
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ vg=dict(type='str', required=True),
+ pvs=dict(type='list'),
+ pesize=dict(type='str', default='4'),
+ pv_options=dict(type='str', default=''),
+ vg_options=dict(type='str', default=''),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ force=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ vg = module.params['vg']
+ state = module.params['state']
+ force = module.boolean(module.params['force'])
+ pesize = module.params['pesize']
+ pvoptions = module.params['pv_options'].split()
+ vgoptions = module.params['vg_options'].split()
+
+ dev_list = []
+ if module.params['pvs']:
+ dev_list = list(module.params['pvs'])
+ elif state == 'present':
+ module.fail_json(msg="No physical volumes given.")
+
+ # LVM always uses real paths not symlinks so replace symlinks with actual path
+ for idx, dev in enumerate(dev_list):
+ dev_list[idx] = os.path.realpath(dev)
+
+ if state == 'present':
+ # check given devices
+ for test_dev in dev_list:
+ if not os.path.exists(test_dev):
+ module.fail_json(msg="Device %s not found." % test_dev)
+
+ # get pv list
+ pvs_cmd = module.get_bin_path('pvs', True)
+ if dev_list:
+ pvs_filter_pv_name = ' || '.join(
+ 'pv_name = {0}'.format(x)
+ for x in itertools.chain(dev_list, module.params['pvs'])
+ )
+ pvs_filter_vg_name = 'vg_name = {0}'.format(vg)
+ pvs_filter = "--select '{0} || {1}' ".format(pvs_filter_pv_name, pvs_filter_vg_name)
+ else:
+ pvs_filter = ''
+ rc, current_pvs, err = module.run_command("%s --noheadings -o pv_name,vg_name --separator ';' %s" % (pvs_cmd, pvs_filter))
+ if rc != 0:
+ module.fail_json(msg="Failed executing pvs command.", rc=rc, err=err)
+
+ # check pv for devices
+ pvs = parse_pvs(module, current_pvs)
+ used_pvs = [pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg]
+ if used_pvs:
+ module.fail_json(msg="Device %s is already in %s volume group." % (used_pvs[0]['name'], used_pvs[0]['vg_name']))
+
+ vgs_cmd = module.get_bin_path('vgs', True)
+ rc, current_vgs, err = module.run_command("%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd)
+
+ if rc != 0:
+ module.fail_json(msg="Failed executing vgs command.", rc=rc, err=err)
+
+ changed = False
+
+ vgs = parse_vgs(current_vgs)
+
+ for test_vg in vgs:
+ if test_vg['name'] == vg:
+ this_vg = test_vg
+ break
+ else:
+ this_vg = None
+
+ if this_vg is None:
+ if state == 'present':
+ # create VG
+ if module.check_mode:
+ changed = True
+ else:
+ # create PV
+ pvcreate_cmd = module.get_bin_path('pvcreate', True)
+ for current_dev in dev_list:
+ rc, _, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)])
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err)
+ vgcreate_cmd = module.get_bin_path('vgcreate')
+ rc, _, err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', pesize, vg] + dev_list)
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating volume group '%s' failed" % vg, rc=rc, err=err)
+ else:
+ if state == 'absent':
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ if this_vg['lv_count'] == 0 or force:
+ # remove VG
+ vgremove_cmd = module.get_bin_path('vgremove', True)
+ rc, _, err = module.run_command("%s --force %s" % (vgremove_cmd, vg))
+ if rc == 0:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="Failed to remove volume group %s" % (vg), rc=rc, err=err)
+ else:
+ module.fail_json(msg="Refuse to remove non-empty volume group %s without force=yes" % (vg))
+
+ # resize VG
+ current_devs = [os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg]
+ devs_to_remove = list(set(current_devs) - set(dev_list))
+ devs_to_add = list(set(dev_list) - set(current_devs))
+
+ if devs_to_add or devs_to_remove:
+ if module.check_mode:
+ changed = True
+ else:
+ if devs_to_add:
+ devs_to_add_string = ' '.join(devs_to_add)
+ # create PV
+ pvcreate_cmd = module.get_bin_path('pvcreate', True)
+ for current_dev in devs_to_add:
+ rc, _, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)])
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err)
+ # add PV to our VG
+ vgextend_cmd = module.get_bin_path('vgextend', True)
+ rc, _, err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string))
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Unable to extend %s by %s." % (vg, devs_to_add_string), rc=rc, err=err)
+
+ # remove some PV from our VG
+ if devs_to_remove:
+ devs_to_remove_string = ' '.join(devs_to_remove)
+ vgreduce_cmd = module.get_bin_path('vgreduce', True)
+ rc, _, err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string))
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Unable to reduce %s by %s." % (vg, devs_to_remove_string), rc=rc, err=err)
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/mongodb_parameter.py b/test/support/integration/plugins/modules/mongodb_parameter.py
new file mode 100644
index 00000000..05de42b2
--- /dev/null
+++ b/test/support/integration/plugins/modules/mongodb_parameter.py
@@ -0,0 +1,223 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Loic Blot <loic.blot@unix-experience.fr>
+# Sponsored by Infopro Digital. http://www.infopro-digital.com/
+# Sponsored by E.T.A.I. http://www.etai.fr/
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = r'''
+---
+module: mongodb_parameter
+short_description: Change an administrative parameter on a MongoDB server
+description:
+ - Change an administrative parameter on a MongoDB server.
+version_added: "2.1"
+options:
+ login_user:
+ description:
+ - The MongoDB username used to authenticate with.
+ type: str
+ login_password:
+ description:
+ - The login user's password used to authenticate with.
+ type: str
+ login_host:
+ description:
+ - The host running the database.
+ type: str
+ default: localhost
+ login_port:
+ description:
+ - The MongoDB port to connect to.
+ default: 27017
+ type: int
+ login_database:
+ description:
+ - The database where login credentials are stored.
+ type: str
+ replica_set:
+ description:
+ - Replica set to connect to (automatically connects to primary for writes).
+ type: str
+ ssl:
+ description:
+ - Whether to use an SSL connection when connecting to the database.
+ type: bool
+ default: no
+ param:
+ description:
+ - MongoDB administrative parameter to modify.
+ type: str
+ required: true
+ value:
+ description:
+ - MongoDB administrative parameter value to set.
+ type: str
+ required: true
+ param_type:
+ description:
+ - Define the type of parameter value.
+ default: str
+ type: str
+ choices: [int, str]
+
+notes:
+ - Requires the pymongo Python package on the remote host, version 2.4.2+.
+ - This can be installed using pip or the OS package manager.
+ - See also U(http://api.mongodb.org/python/current/installation.html)
+requirements: [ "pymongo" ]
+author: "Loic Blot (@nerzhul)"
+'''
+
+EXAMPLES = r'''
+- name: Set MongoDB syncdelay to 60 (this is an int)
+ mongodb_parameter:
+ param: syncdelay
+ value: 60
+ param_type: int
+'''
+
+RETURN = r'''
+before:
+ description: value before modification
+ returned: success
+ type: str
+after:
+ description: value after modification
+ returned: success
+ type: str
+'''
+
+import os
+import traceback
+
+try:
+ from pymongo.errors import ConnectionFailure
+ from pymongo.errors import OperationFailure
+ from pymongo import version as PyMongoVersion
+ from pymongo import MongoClient
+except ImportError:
+ try: # for older PyMongo 2.2
+ from pymongo import Connection as MongoClient
+ except ImportError:
+ pymongo_found = False
+ else:
+ pymongo_found = True
+else:
+ pymongo_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six.moves import configparser
+from ansible.module_utils._text import to_native
+
+
+# =========================================
+# MongoDB module specific support methods.
+#
+
+def load_mongocnf():
+ config = configparser.RawConfigParser()
+ mongocnf = os.path.expanduser('~/.mongodb.cnf')
+
+ try:
+ config.readfp(open(mongocnf))
+ creds = dict(
+ user=config.get('client', 'user'),
+ password=config.get('client', 'pass')
+ )
+ except (configparser.NoOptionError, IOError):
+ return False
+
+ return creds
+
+
+# =========================================
+# Module execution.
+#
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ login_user=dict(default=None),
+ login_password=dict(default=None, no_log=True),
+ login_host=dict(default='localhost'),
+ login_port=dict(default=27017, type='int'),
+ login_database=dict(default=None),
+ replica_set=dict(default=None),
+ param=dict(required=True),
+ value=dict(required=True),
+ param_type=dict(default="str", choices=['str', 'int']),
+ ssl=dict(default=False, type='bool'),
+ )
+ )
+
+ if not pymongo_found:
+ module.fail_json(msg=missing_required_lib('pymongo'))
+
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ login_host = module.params['login_host']
+ login_port = module.params['login_port']
+ login_database = module.params['login_database']
+
+ replica_set = module.params['replica_set']
+ ssl = module.params['ssl']
+
+ param = module.params['param']
+ param_type = module.params['param_type']
+ value = module.params['value']
+
+ # Verify parameter is coherent with specified type
+ try:
+ if param_type == 'int':
+ value = int(value)
+ except ValueError:
+ module.fail_json(msg="value '%s' is not %s" % (value, param_type))
+
+ try:
+ if replica_set:
+ client = MongoClient(login_host, int(login_port), replicaset=replica_set, ssl=ssl)
+ else:
+ client = MongoClient(login_host, int(login_port), ssl=ssl)
+
+ if login_user is None and login_password is None:
+ mongocnf_creds = load_mongocnf()
+ if mongocnf_creds is not False:
+ login_user = mongocnf_creds['user']
+ login_password = mongocnf_creds['password']
+ elif login_password is None or login_user is None:
+ module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided')
+
+ if login_user is not None and login_password is not None:
+ client.admin.authenticate(login_user, login_password, source=login_database)
+
+ except ConnectionFailure as e:
+ module.fail_json(msg='unable to connect to database: %s' % to_native(e), exception=traceback.format_exc())
+
+ db = client.admin
+
+ try:
+ after_value = db.command("setParameter", **{param: value})
+ except OperationFailure as e:
+ module.fail_json(msg="unable to change parameter: %s" % to_native(e), exception=traceback.format_exc())
+
+ if "was" not in after_value:
+ module.exit_json(changed=True, msg="Unable to determine old value, assume it changed.")
+ else:
+ module.exit_json(changed=(value != after_value["was"]), before=after_value["was"],
+ after=value)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/mongodb_user.py b/test/support/integration/plugins/modules/mongodb_user.py
new file mode 100644
index 00000000..362b3aa4
--- /dev/null
+++ b/test/support/integration/plugins/modules/mongodb_user.py
@@ -0,0 +1,474 @@
+#!/usr/bin/python
+
+# (c) 2012, Elliott Foster <elliott@fourkitchens.com>
+# Sponsored by Four Kitchens http://fourkitchens.com.
+# (c) 2014, Epic Games, Inc.
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: mongodb_user
+short_description: Adds or removes a user from a MongoDB database
+description:
+ - Adds or removes a user from a MongoDB database.
+version_added: "1.1"
+options:
+ login_user:
+ description:
+ - The MongoDB username used to authenticate with.
+ type: str
+ login_password:
+ description:
+ - The login user's password used to authenticate with.
+ type: str
+ login_host:
+ description:
+ - The host running the database.
+ default: localhost
+ type: str
+ login_port:
+ description:
+ - The MongoDB port to connect to.
+ default: '27017'
+ type: str
+ login_database:
+ version_added: "2.0"
+ description:
+ - The database where login credentials are stored.
+ type: str
+ replica_set:
+ version_added: "1.6"
+ description:
+ - Replica set to connect to (automatically connects to primary for writes).
+ type: str
+ database:
+ description:
+ - The name of the database to add/remove the user from.
+ required: true
+ type: str
+ aliases: [db]
+ name:
+ description:
+ - The name of the user to add or remove.
+ required: true
+ aliases: [user]
+ type: str
+ password:
+ description:
+ - The password to use for the user.
+ type: str
+ aliases: [pass]
+ ssl:
+ version_added: "1.8"
+ description:
+ - Whether to use an SSL connection when connecting to the database.
+ type: bool
+ ssl_cert_reqs:
+ version_added: "2.2"
+ description:
+ - Specifies whether a certificate is required from the other side of the connection,
+ and whether it will be validated if provided.
+ default: CERT_REQUIRED
+ choices: [CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED]
+ type: str
+ roles:
+ version_added: "1.3"
+ type: list
+ elements: raw
+ description:
+ - >
+ The database user roles valid values could either be one or more of the following strings:
+ 'read', 'readWrite', 'dbAdmin', 'userAdmin', 'clusterAdmin', 'readAnyDatabase', 'readWriteAnyDatabase', 'userAdminAnyDatabase',
+ 'dbAdminAnyDatabase'
+ - "Or the following dictionary '{ db: DATABASE_NAME, role: ROLE_NAME }'."
+ - "This param requires pymongo 2.5+. If it is a string, mongodb 2.4+ is also required. If it is a dictionary, mongo 2.6+ is required."
+ state:
+ description:
+ - The database user state.
+ default: present
+ choices: [absent, present]
+ type: str
+ update_password:
+ default: always
+ choices: [always, on_create]
+ version_added: "2.1"
+ description:
+ - C(always) will update passwords if they differ.
+ - C(on_create) will only set the password for newly created users.
+ type: str
+
+notes:
+ - Requires the pymongo Python package on the remote host, version 2.4.2+. This
+ can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html
+requirements: [ "pymongo" ]
+author:
+ - "Elliott Foster (@elliotttf)"
+ - "Julien Thebault (@Lujeni)"
+'''
+
+EXAMPLES = '''
+- name: Create 'burgers' database user with name 'bob' and password '12345'.
+ mongodb_user:
+ database: burgers
+ name: bob
+ password: 12345
+ state: present
+
+- name: Create a database user via SSL (MongoDB must be compiled with the SSL option and configured properly)
+ mongodb_user:
+ database: burgers
+ name: bob
+ password: 12345
+ state: present
+ ssl: True
+
+- name: Delete 'burgers' database user with name 'bob'.
+ mongodb_user:
+ database: burgers
+ name: bob
+ state: absent
+
+- name: Define more users with various specific roles (if not defined, no roles is assigned, and the user will be added via pre mongo 2.2 style)
+ mongodb_user:
+ database: burgers
+ name: ben
+ password: 12345
+ roles: read
+ state: present
+
+- name: Define roles
+ mongodb_user:
+ database: burgers
+ name: jim
+ password: 12345
+ roles: readWrite,dbAdmin,userAdmin
+ state: present
+
+- name: Define roles
+ mongodb_user:
+ database: burgers
+ name: joe
+ password: 12345
+ roles: readWriteAnyDatabase
+ state: present
+
+- name: Add a user to database in a replica set, the primary server is automatically discovered and written to
+ mongodb_user:
+ database: burgers
+ name: bob
+ replica_set: belcher
+ password: 12345
+ roles: readWriteAnyDatabase
+ state: present
+
+# add a user 'oplog_reader' with read only access to the 'local' database on the replica_set 'belcher'. This is useful for oplog access (MONGO_OPLOG_URL).
+# please notice the credentials must be added to the 'admin' database because the 'local' database is not synchronized and can't receive user credentials
+# To login with such user, the connection string should be MONGO_OPLOG_URL="mongodb://oplog_reader:oplog_reader_password@server1,server2/local?authSource=admin"
+# This syntax requires mongodb 2.6+ and pymongo 2.5+
+- name: Roles as a dictionary
+ mongodb_user:
+ login_user: root
+ login_password: root_password
+ database: admin
+ user: oplog_reader
+ password: oplog_reader_password
+ state: present
+ replica_set: belcher
+ roles:
+ - db: local
+ role: read
+
+'''
+
+RETURN = '''
+user:
+ description: The name of the user to add or remove.
+ returned: success
+ type: str
+'''
+
+import os
+import ssl as ssl_lib
+import traceback
+from distutils.version import LooseVersion
+from operator import itemgetter
+
+try:
+ from pymongo.errors import ConnectionFailure
+ from pymongo.errors import OperationFailure
+ from pymongo import version as PyMongoVersion
+ from pymongo import MongoClient
+except ImportError:
+ try: # for older PyMongo 2.2
+ from pymongo import Connection as MongoClient
+ except ImportError:
+ pymongo_found = False
+ else:
+ pymongo_found = True
+else:
+ pymongo_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six import binary_type, text_type
+from ansible.module_utils.six.moves import configparser
+from ansible.module_utils._text import to_native
+
+
+# =========================================
+# MongoDB module specific support methods.
+#
+
+def check_compatibility(module, client):
+ """Check the compatibility between the driver and the database.
+
+ See: https://docs.mongodb.com/ecosystem/drivers/driver-compatibility-reference/#python-driver-compatibility
+
+ Args:
+ module: Ansible module.
+ client (cursor): Mongodb cursor on admin database.
+ """
+ loose_srv_version = LooseVersion(client.server_info()['version'])
+ loose_driver_version = LooseVersion(PyMongoVersion)
+
+ if loose_srv_version >= LooseVersion('3.2') and loose_driver_version < LooseVersion('3.2'):
+ module.fail_json(msg=' (Note: you must use pymongo 3.2+ with MongoDB >= 3.2)')
+
+ elif loose_srv_version >= LooseVersion('3.0') and loose_driver_version <= LooseVersion('2.8'):
+ module.fail_json(msg=' (Note: you must use pymongo 2.8+ with MongoDB 3.0)')
+
+ elif loose_srv_version >= LooseVersion('2.6') and loose_driver_version <= LooseVersion('2.7'):
+ module.fail_json(msg=' (Note: you must use pymongo 2.7+ with MongoDB 2.6)')
+
+ elif LooseVersion(PyMongoVersion) <= LooseVersion('2.5'):
+ module.fail_json(msg=' (Note: you must be on mongodb 2.4+ and pymongo 2.5+ to use the roles param)')
+
+
+def user_find(client, user, db_name):
+ """Check if the user exists.
+
+ Args:
+ client (cursor): Mongodb cursor on admin database.
+ user (str): User to check.
+ db_name (str): User's database.
+
+ Returns:
+ dict: when user exists, False otherwise.
+ """
+ for mongo_user in client["admin"].system.users.find():
+ if mongo_user['user'] == user:
+ # NOTE: there is no 'db' field in mongo 2.4.
+ if 'db' not in mongo_user:
+ return mongo_user
+
+ if mongo_user["db"] == db_name:
+ return mongo_user
+ return False
+
+
+def user_add(module, client, db_name, user, password, roles):
+ # pymongo's user_add is a _create_or_update_user so we won't know if it was changed or updated
+ # without reproducing a lot of the logic in database.py of pymongo
+ db = client[db_name]
+
+ if roles is None:
+ db.add_user(user, password, False)
+ else:
+ db.add_user(user, password, None, roles=roles)
+
+
+def user_remove(module, client, db_name, user):
+ exists = user_find(client, user, db_name)
+ if exists:
+ if module.check_mode:
+ module.exit_json(changed=True, user=user)
+ db = client[db_name]
+ db.remove_user(user)
+ else:
+ module.exit_json(changed=False, user=user)
+
+
+def load_mongocnf():
+ config = configparser.RawConfigParser()
+ mongocnf = os.path.expanduser('~/.mongodb.cnf')
+
+ try:
+ config.readfp(open(mongocnf))
+ creds = dict(
+ user=config.get('client', 'user'),
+ password=config.get('client', 'pass')
+ )
+ except (configparser.NoOptionError, IOError):
+ return False
+
+ return creds
+
+
+def check_if_roles_changed(uinfo, roles, db_name):
+ # We must be aware of users which can read the oplog on a replicaset
+ # Such users must have access to the local DB, but since this DB does not store users credentials
+ # and is not synchronized among replica sets, the user must be stored on the admin db
+ # Therefore their structure is the following :
+ # {
+ # "_id" : "admin.oplog_reader",
+ # "user" : "oplog_reader",
+ # "db" : "admin", # <-- admin DB
+ # "roles" : [
+ # {
+ # "role" : "read",
+ # "db" : "local" # <-- local DB
+ # }
+ # ]
+ # }
+
+ def make_sure_roles_are_a_list_of_dict(roles, db_name):
+ output = list()
+ for role in roles:
+ if isinstance(role, (binary_type, text_type)):
+ new_role = {"role": role, "db": db_name}
+ output.append(new_role)
+ else:
+ output.append(role)
+ return output
+
+ roles_as_list_of_dict = make_sure_roles_are_a_list_of_dict(roles, db_name)
+ uinfo_roles = uinfo.get('roles', [])
+
+ if sorted(roles_as_list_of_dict, key=itemgetter('db')) == sorted(uinfo_roles, key=itemgetter('db')):
+ return False
+ return True
+
+
+# =========================================
+# Module execution.
+#
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ login_user=dict(default=None),
+ login_password=dict(default=None, no_log=True),
+ login_host=dict(default='localhost'),
+ login_port=dict(default='27017'),
+ login_database=dict(default=None),
+ replica_set=dict(default=None),
+ database=dict(required=True, aliases=['db']),
+ name=dict(required=True, aliases=['user']),
+ password=dict(aliases=['pass'], no_log=True),
+ ssl=dict(default=False, type='bool'),
+ roles=dict(default=None, type='list', elements='raw'),
+ state=dict(default='present', choices=['absent', 'present']),
+ update_password=dict(default="always", choices=["always", "on_create"]),
+ ssl_cert_reqs=dict(default='CERT_REQUIRED', choices=['CERT_NONE', 'CERT_OPTIONAL', 'CERT_REQUIRED']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not pymongo_found:
+ module.fail_json(msg=missing_required_lib('pymongo'))
+
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ login_host = module.params['login_host']
+ login_port = module.params['login_port']
+ login_database = module.params['login_database']
+
+ replica_set = module.params['replica_set']
+ db_name = module.params['database']
+ user = module.params['name']
+ password = module.params['password']
+ ssl = module.params['ssl']
+ roles = module.params['roles'] or []
+ state = module.params['state']
+ update_password = module.params['update_password']
+
+ try:
+ connection_params = {
+ "host": login_host,
+ "port": int(login_port),
+ }
+
+ if replica_set:
+ connection_params["replicaset"] = replica_set
+
+ if ssl:
+ connection_params["ssl"] = ssl
+ connection_params["ssl_cert_reqs"] = getattr(ssl_lib, module.params['ssl_cert_reqs'])
+
+ client = MongoClient(**connection_params)
+
+ # NOTE: this check must be done ASAP.
+ # We doesn't need to be authenticated (this ability has lost in PyMongo 3.6)
+ if LooseVersion(PyMongoVersion) <= LooseVersion('3.5'):
+ check_compatibility(module, client)
+
+ if login_user is None and login_password is None:
+ mongocnf_creds = load_mongocnf()
+ if mongocnf_creds is not False:
+ login_user = mongocnf_creds['user']
+ login_password = mongocnf_creds['password']
+ elif login_password is None or login_user is None:
+ module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided')
+
+ if login_user is not None and login_password is not None:
+ client.admin.authenticate(login_user, login_password, source=login_database)
+ elif LooseVersion(PyMongoVersion) >= LooseVersion('3.0'):
+ if db_name != "admin":
+ module.fail_json(msg='The localhost login exception only allows the first admin account to be created')
+ # else: this has to be the first admin user added
+
+ except Exception as e:
+ module.fail_json(msg='unable to connect to database: %s' % to_native(e), exception=traceback.format_exc())
+
+ if state == 'present':
+ if password is None and update_password == 'always':
+ module.fail_json(msg='password parameter required when adding a user unless update_password is set to on_create')
+
+ try:
+ if update_password != 'always':
+ uinfo = user_find(client, user, db_name)
+ if uinfo:
+ password = None
+ if not check_if_roles_changed(uinfo, roles, db_name):
+ module.exit_json(changed=False, user=user)
+
+ if module.check_mode:
+ module.exit_json(changed=True, user=user)
+
+ user_add(module, client, db_name, user, password, roles)
+ except Exception as e:
+ module.fail_json(msg='Unable to add or update user: %s' % to_native(e), exception=traceback.format_exc())
+ finally:
+ try:
+ client.close()
+ except Exception:
+ pass
+ # Here we can check password change if mongo provide a query for that : https://jira.mongodb.org/browse/SERVER-22848
+ # newuinfo = user_find(client, user, db_name)
+ # if uinfo['role'] == newuinfo['role'] and CheckPasswordHere:
+ # module.exit_json(changed=False, user=user)
+
+ elif state == 'absent':
+ try:
+ user_remove(module, client, db_name, user)
+ except Exception as e:
+ module.fail_json(msg='Unable to remove user: %s' % to_native(e), exception=traceback.format_exc())
+ finally:
+ try:
+ client.close()
+ except Exception:
+ pass
+ module.exit_json(changed=True, user=user)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/pids.py b/test/support/integration/plugins/modules/pids.py
new file mode 100644
index 00000000..4cbf45a9
--- /dev/null
+++ b/test/support/integration/plugins/modules/pids.py
@@ -0,0 +1,89 @@
+#!/usr/bin/python
+# Copyright: (c) 2019, Saranya Sridharan
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+module: pids
+version_added: 2.8
+description: "Retrieves a list of PIDs of given process name in Ansible controller/controlled machines.Returns an empty list if no process in that name exists."
+short_description: "Retrieves process IDs list if the process is running otherwise return empty list"
+author:
+ - Saranya Sridharan (@saranyasridharan)
+requirements:
+ - psutil(python module)
+options:
+ name:
+ description: the name of the process you want to get PID for.
+ required: true
+ type: str
+'''
+
+EXAMPLES = '''
+# Pass the process name
+- name: Getting process IDs of the process
+ pids:
+ name: python
+ register: pids_of_python
+
+- name: Printing the process IDs obtained
+ debug:
+ msg: "PIDS of python:{{pids_of_python.pids|join(',')}}"
+'''
+
+RETURN = '''
+pids:
+ description: Process IDs of the given process
+ returned: list of none, one, or more process IDs
+ type: list
+ sample: [100,200]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ import psutil
+ HAS_PSUTIL = True
+except ImportError:
+ HAS_PSUTIL = False
+
+
+def compare_lower(a, b):
+ if a is None or b is None:
+ # this could just be "return False" but would lead to surprising behavior if both a and b are None
+ return a == b
+
+ return a.lower() == b.lower()
+
+
+def get_pid(name):
+ pids = []
+
+ for proc in psutil.process_iter(attrs=['name', 'cmdline']):
+ if compare_lower(proc.info['name'], name) or \
+ proc.info['cmdline'] and compare_lower(proc.info['cmdline'][0], name):
+ pids.append(proc.pid)
+
+ return pids
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, type="str"),
+ ),
+ supports_check_mode=True,
+ )
+ if not HAS_PSUTIL:
+ module.fail_json(msg="Missing required 'psutil' python module. Try installing it with: pip install psutil")
+ name = module.params["name"]
+ response = dict(pids=get_pid(name))
+ module.exit_json(**response)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/pkgng.py b/test/support/integration/plugins/modules/pkgng.py
new file mode 100644
index 00000000..11363479
--- /dev/null
+++ b/test/support/integration/plugins/modules/pkgng.py
@@ -0,0 +1,406 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, bleader
+# Written by bleader <bleader@ratonland.org>
+# Based on pkgin module written by Shaun Zinck <shaun.zinck at gmail.com>
+# that was based on pacman module written by Afterburn <https://github.com/afterburn>
+# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: pkgng
+short_description: Package manager for FreeBSD >= 9.0
+description:
+ - Manage binary packages for FreeBSD using 'pkgng' which is available in versions after 9.0.
+version_added: "1.2"
+options:
+ name:
+ description:
+ - Name or list of names of packages to install/remove.
+ required: true
+ state:
+ description:
+ - State of the package.
+ - 'Note: "latest" added in 2.7'
+ choices: [ 'present', 'latest', 'absent' ]
+ required: false
+ default: present
+ cached:
+ description:
+ - Use local package base instead of fetching an updated one.
+ type: bool
+ required: false
+ default: no
+ annotation:
+ description:
+ - A comma-separated list of keyvalue-pairs of the form
+ C(<+/-/:><key>[=<value>]). A C(+) denotes adding an annotation, a
+ C(-) denotes removing an annotation, and C(:) denotes modifying an
+ annotation.
+ If setting or modifying annotations, a value must be provided.
+ required: false
+ version_added: "1.6"
+ pkgsite:
+ description:
+ - For pkgng versions before 1.1.4, specify packagesite to use
+ for downloading packages. If not specified, use settings from
+ C(/usr/local/etc/pkg.conf).
+ - For newer pkgng versions, specify a the name of a repository
+ configured in C(/usr/local/etc/pkg/repos).
+ required: false
+ rootdir:
+ description:
+ - For pkgng versions 1.5 and later, pkg will install all packages
+ within the specified root directory.
+ - Can not be used together with I(chroot) or I(jail) options.
+ required: false
+ chroot:
+ version_added: "2.1"
+ description:
+ - Pkg will chroot in the specified environment.
+ - Can not be used together with I(rootdir) or I(jail) options.
+ required: false
+ jail:
+ version_added: "2.4"
+ description:
+ - Pkg will execute in the given jail name or id.
+ - Can not be used together with I(chroot) or I(rootdir) options.
+ autoremove:
+ version_added: "2.2"
+ description:
+ - Remove automatically installed packages which are no longer needed.
+ required: false
+ type: bool
+ default: no
+author: "bleader (@bleader)"
+notes:
+ - When using pkgsite, be careful that already in cache packages won't be downloaded again.
+ - When used with a `loop:` each package will be processed individually,
+ it is much more efficient to pass the list directly to the `name` option.
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ pkgng:
+ name: foo
+ state: present
+
+- name: Annotate package foo and bar
+ pkgng:
+ name: foo,bar
+ annotation: '+test1=baz,-test2,:test3=foobar'
+
+- name: Remove packages foo and bar
+ pkgng:
+ name: foo,bar
+ state: absent
+
+# "latest" support added in 2.7
+- name: Upgrade package baz
+ pkgng:
+ name: baz
+ state: latest
+'''
+
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+def query_package(module, pkgng_path, name, dir_arg):
+
+ rc, out, err = module.run_command("%s %s info -g -e %s" % (pkgng_path, dir_arg, name))
+
+ if rc == 0:
+ return True
+
+ return False
+
+
+def query_update(module, pkgng_path, name, dir_arg, old_pkgng, pkgsite):
+
+ # Check to see if a package upgrade is available.
+ # rc = 0, no updates available or package not installed
+ # rc = 1, updates available
+ if old_pkgng:
+ rc, out, err = module.run_command("%s %s upgrade -g -n %s" % (pkgsite, pkgng_path, name))
+ else:
+ rc, out, err = module.run_command("%s %s upgrade %s -g -n %s" % (pkgng_path, dir_arg, pkgsite, name))
+
+ if rc == 1:
+ return True
+
+ return False
+
+
+def pkgng_older_than(module, pkgng_path, compare_version):
+
+ rc, out, err = module.run_command("%s -v" % pkgng_path)
+ version = [int(x) for x in re.split(r'[\._]', out)]
+
+ i = 0
+ new_pkgng = True
+ while compare_version[i] == version[i]:
+ i += 1
+ if i == min(len(compare_version), len(version)):
+ break
+ else:
+ if compare_version[i] > version[i]:
+ new_pkgng = False
+ return not new_pkgng
+
+
+def remove_packages(module, pkgng_path, packages, dir_arg):
+
+ remove_c = 0
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, pkgng_path, package, dir_arg):
+ continue
+
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s %s delete -y %s" % (pkgng_path, dir_arg, package))
+
+ if not module.check_mode and query_package(module, pkgng_path, package, dir_arg):
+ module.fail_json(msg="failed to remove %s: %s" % (package, out))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ return (True, "removed %s package(s)" % remove_c)
+
+ return (False, "package(s) already absent")
+
+
+def install_packages(module, pkgng_path, packages, cached, pkgsite, dir_arg, state):
+
+ install_c = 0
+
+ # as of pkg-1.1.4, PACKAGESITE is deprecated in favor of repository definitions
+ # in /usr/local/etc/pkg/repos
+ old_pkgng = pkgng_older_than(module, pkgng_path, [1, 1, 4])
+ if pkgsite != "":
+ if old_pkgng:
+ pkgsite = "PACKAGESITE=%s" % (pkgsite)
+ else:
+ pkgsite = "-r %s" % (pkgsite)
+
+ # This environment variable skips mid-install prompts,
+ # setting them to their default values.
+ batch_var = 'env BATCH=yes'
+
+ if not module.check_mode and not cached:
+ if old_pkgng:
+ rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgng_path))
+ else:
+ rc, out, err = module.run_command("%s %s update" % (pkgng_path, dir_arg))
+ if rc != 0:
+ module.fail_json(msg="Could not update catalogue [%d]: %s %s" % (rc, out, err))
+
+ for package in packages:
+ already_installed = query_package(module, pkgng_path, package, dir_arg)
+ if already_installed and state == "present":
+ continue
+
+ update_available = query_update(module, pkgng_path, package, dir_arg, old_pkgng, pkgsite)
+ if not update_available and already_installed and state == "latest":
+ continue
+
+ if not module.check_mode:
+ if already_installed:
+ action = "upgrade"
+ else:
+ action = "install"
+ if old_pkgng:
+ rc, out, err = module.run_command("%s %s %s %s -g -U -y %s" % (batch_var, pkgsite, pkgng_path, action, package))
+ else:
+ rc, out, err = module.run_command("%s %s %s %s %s -g -U -y %s" % (batch_var, pkgng_path, dir_arg, action, pkgsite, package))
+
+ if not module.check_mode and not query_package(module, pkgng_path, package, dir_arg):
+ module.fail_json(msg="failed to %s %s: %s" % (action, package, out), stderr=err)
+
+ install_c += 1
+
+ if install_c > 0:
+ return (True, "added %s package(s)" % (install_c))
+
+ return (False, "package(s) already %s" % (state))
+
+
+def annotation_query(module, pkgng_path, package, tag, dir_arg):
+ rc, out, err = module.run_command("%s %s info -g -A %s" % (pkgng_path, dir_arg, package))
+ match = re.search(r'^\s*(?P<tag>%s)\s*:\s*(?P<value>\w+)' % tag, out, flags=re.MULTILINE)
+ if match:
+ return match.group('value')
+ return False
+
+
+def annotation_add(module, pkgng_path, package, tag, value, dir_arg):
+ _value = annotation_query(module, pkgng_path, package, tag, dir_arg)
+ if not _value:
+ # Annotation does not exist, add it.
+ rc, out, err = module.run_command('%s %s annotate -y -A %s %s "%s"'
+ % (pkgng_path, dir_arg, package, tag, value))
+ if rc != 0:
+ module.fail_json(msg="could not annotate %s: %s"
+ % (package, out), stderr=err)
+ return True
+ elif _value != value:
+ # Annotation exists, but value differs
+ module.fail_json(
+ mgs="failed to annotate %s, because %s is already set to %s, but should be set to %s"
+ % (package, tag, _value, value))
+ return False
+ else:
+ # Annotation exists, nothing to do
+ return False
+
+
+def annotation_delete(module, pkgng_path, package, tag, value, dir_arg):
+ _value = annotation_query(module, pkgng_path, package, tag, dir_arg)
+ if _value:
+ rc, out, err = module.run_command('%s %s annotate -y -D %s %s'
+ % (pkgng_path, dir_arg, package, tag))
+ if rc != 0:
+ module.fail_json(msg="could not delete annotation to %s: %s"
+ % (package, out), stderr=err)
+ return True
+ return False
+
+
+def annotation_modify(module, pkgng_path, package, tag, value, dir_arg):
+ _value = annotation_query(module, pkgng_path, package, tag, dir_arg)
+ if not value:
+ # No such tag
+ module.fail_json(msg="could not change annotation to %s: tag %s does not exist"
+ % (package, tag))
+ elif _value == value:
+ # No change in value
+ return False
+ else:
+ rc, out, err = module.run_command('%s %s annotate -y -M %s %s "%s"'
+ % (pkgng_path, dir_arg, package, tag, value))
+ if rc != 0:
+ module.fail_json(msg="could not change annotation annotation to %s: %s"
+ % (package, out), stderr=err)
+ return True
+
+
+def annotate_packages(module, pkgng_path, packages, annotation, dir_arg):
+ annotate_c = 0
+ annotations = map(lambda _annotation:
+ re.match(r'(?P<operation>[\+-:])(?P<tag>\w+)(=(?P<value>\w+))?',
+ _annotation).groupdict(),
+ re.split(r',', annotation))
+
+ operation = {
+ '+': annotation_add,
+ '-': annotation_delete,
+ ':': annotation_modify
+ }
+
+ for package in packages:
+ for _annotation in annotations:
+ if operation[_annotation['operation']](module, pkgng_path, package, _annotation['tag'], _annotation['value']):
+ annotate_c += 1
+
+ if annotate_c > 0:
+ return (True, "added %s annotations." % annotate_c)
+ return (False, "changed no annotations")
+
+
+def autoremove_packages(module, pkgng_path, dir_arg):
+ rc, out, err = module.run_command("%s %s autoremove -n" % (pkgng_path, dir_arg))
+
+ autoremove_c = 0
+
+ match = re.search('^Deinstallation has been requested for the following ([0-9]+) packages', out, re.MULTILINE)
+ if match:
+ autoremove_c = int(match.group(1))
+
+ if autoremove_c == 0:
+ return False, "no package(s) to autoremove"
+
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s %s autoremove -y" % (pkgng_path, dir_arg))
+
+ return True, "autoremoved %d package(s)" % (autoremove_c)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default="present", choices=["present", "latest", "absent"], required=False),
+ name=dict(aliases=["pkg"], required=True, type='list'),
+ cached=dict(default=False, type='bool'),
+ annotation=dict(default="", required=False),
+ pkgsite=dict(default="", required=False),
+ rootdir=dict(default="", required=False, type='path'),
+ chroot=dict(default="", required=False, type='path'),
+ jail=dict(default="", required=False, type='str'),
+ autoremove=dict(default=False, type='bool')),
+ supports_check_mode=True,
+ mutually_exclusive=[["rootdir", "chroot", "jail"]])
+
+ pkgng_path = module.get_bin_path('pkg', True)
+
+ p = module.params
+
+ pkgs = p["name"]
+
+ changed = False
+ msgs = []
+ dir_arg = ""
+
+ if p["rootdir"] != "":
+ old_pkgng = pkgng_older_than(module, pkgng_path, [1, 5, 0])
+ if old_pkgng:
+ module.fail_json(msg="To use option 'rootdir' pkg version must be 1.5 or greater")
+ else:
+ dir_arg = "--rootdir %s" % (p["rootdir"])
+
+ if p["chroot"] != "":
+ dir_arg = '--chroot %s' % (p["chroot"])
+
+ if p["jail"] != "":
+ dir_arg = '--jail %s' % (p["jail"])
+
+ if p["state"] in ("present", "latest"):
+ _changed, _msg = install_packages(module, pkgng_path, pkgs, p["cached"], p["pkgsite"], dir_arg, p["state"])
+ changed = changed or _changed
+ msgs.append(_msg)
+
+ elif p["state"] == "absent":
+ _changed, _msg = remove_packages(module, pkgng_path, pkgs, dir_arg)
+ changed = changed or _changed
+ msgs.append(_msg)
+
+ if p["autoremove"]:
+ _changed, _msg = autoremove_packages(module, pkgng_path, dir_arg)
+ changed = changed or _changed
+ msgs.append(_msg)
+
+ if p["annotation"]:
+ _changed, _msg = annotate_packages(module, pkgng_path, pkgs, p["annotation"], dir_arg)
+ changed = changed or _changed
+ msgs.append(_msg)
+
+ module.exit_json(changed=changed, msg=", ".join(msgs))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/postgresql_db.py b/test/support/integration/plugins/modules/postgresql_db.py
new file mode 100644
index 00000000..40858d99
--- /dev/null
+++ b/test/support/integration/plugins/modules/postgresql_db.py
@@ -0,0 +1,657 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: postgresql_db
+short_description: Add or remove PostgreSQL databases from a remote host.
+description:
+ - Add or remove PostgreSQL databases from a remote host.
+version_added: '0.6'
+options:
+ name:
+ description:
+ - Name of the database to add or remove
+ type: str
+ required: true
+ aliases: [ db ]
+ port:
+ description:
+ - Database port to connect (if needed)
+ type: int
+ default: 5432
+ aliases:
+ - login_port
+ owner:
+ description:
+ - Name of the role to set as owner of the database
+ type: str
+ template:
+ description:
+ - Template used to create the database
+ type: str
+ encoding:
+ description:
+ - Encoding of the database
+ type: str
+ lc_collate:
+ description:
+ - Collation order (LC_COLLATE) to use in the database. Must match collation order of template database unless C(template0) is used as template.
+ type: str
+ lc_ctype:
+ description:
+ - Character classification (LC_CTYPE) to use in the database (e.g. lower, upper, ...) Must match LC_CTYPE of template database unless C(template0)
+ is used as template.
+ type: str
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
+ type: str
+ version_added: '2.8'
+ state:
+ description:
+ - The database state.
+ - C(present) implies that the database should be created if necessary.
+ - C(absent) implies that the database should be removed if present.
+ - C(dump) requires a target definition to which the database will be backed up. (Added in Ansible 2.4)
+ Note that in some PostgreSQL versions of pg_dump, which is an embedded PostgreSQL utility and is used by the module,
+ returns rc 0 even when errors occurred (e.g. the connection is forbidden by pg_hba.conf, etc.),
+ so the module returns changed=True but the dump has not actually been done. Please, be sure that your version of
+ pg_dump returns rc 1 in this case.
+ - C(restore) also requires a target definition from which the database will be restored. (Added in Ansible 2.4)
+ - The format of the backup will be detected based on the target name.
+ - Supported compression formats for dump and restore include C(.pgc), C(.bz2), C(.gz) and C(.xz)
+ - Supported formats for dump and restore include C(.sql) and C(.tar)
+ type: str
+ choices: [ absent, dump, present, restore ]
+ default: present
+ target:
+ description:
+ - File to back up or restore from.
+ - Used when I(state) is C(dump) or C(restore).
+ type: path
+ version_added: '2.4'
+ target_opts:
+ description:
+ - Further arguments for pg_dump or pg_restore.
+ - Used when I(state) is C(dump) or C(restore).
+ type: str
+ version_added: '2.4'
+ maintenance_db:
+ description:
+ - The value specifies the initial database (which is also called as maintenance DB) that Ansible connects to.
+ type: str
+ default: postgres
+ version_added: '2.5'
+ conn_limit:
+ description:
+ - Specifies the database connection limit.
+ type: str
+ version_added: '2.8'
+ tablespace:
+ description:
+ - The tablespace to set for the database
+ U(https://www.postgresql.org/docs/current/sql-alterdatabase.html).
+ - If you want to move the database back to the default tablespace,
+ explicitly set this to pg_default.
+ type: path
+ version_added: '2.9'
+ dump_extra_args:
+ description:
+ - Provides additional arguments when I(state) is C(dump).
+ - Cannot be used with dump-file-format-related arguments like ``--format=d``.
+ type: str
+ version_added: '2.10'
+seealso:
+- name: CREATE DATABASE reference
+ description: Complete reference of the CREATE DATABASE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createdatabase.html
+- name: DROP DATABASE reference
+ description: Complete reference of the DROP DATABASE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropdatabase.html
+- name: pg_dump reference
+ description: Complete reference of pg_dump documentation.
+ link: https://www.postgresql.org/docs/current/app-pgdump.html
+- name: pg_restore reference
+ description: Complete reference of pg_restore documentation.
+ link: https://www.postgresql.org/docs/current/app-pgrestore.html
+- module: postgresql_tablespace
+- module: postgresql_info
+- module: postgresql_ping
+notes:
+- State C(dump) and C(restore) don't require I(psycopg2) since version 2.8.
+author: "Ansible Core Team"
+extends_documentation_fragment:
+- postgres
+'''
+
+EXAMPLES = r'''
+- name: Create a new database with name "acme"
+ postgresql_db:
+ name: acme
+
+# Note: If a template different from "template0" is specified, encoding and locale settings must match those of the template.
+- name: Create a new database with name "acme" and specific encoding and locale # settings.
+ postgresql_db:
+ name: acme
+ encoding: UTF-8
+ lc_collate: de_DE.UTF-8
+ lc_ctype: de_DE.UTF-8
+ template: template0
+
+# Note: Default limit for the number of concurrent connections to a specific database is "-1", which means "unlimited"
+- name: Create a new database with name "acme" which has a limit of 100 concurrent connections
+ postgresql_db:
+ name: acme
+ conn_limit: "100"
+
+- name: Dump an existing database to a file
+ postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql
+
+- name: Dump an existing database to a file excluding the test table
+ postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql
+ dump_extra_args: --exclude-table=test
+
+- name: Dump an existing database to a file (with compression)
+ postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql.gz
+
+- name: Dump a single schema for an existing database
+ postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql
+ target_opts: "-n public"
+
+# Note: In the example below, if database foo exists and has another tablespace
+# the tablespace will be changed to foo. Access to the database will be locked
+# until the copying of database files is finished.
+- name: Create a new database called foo in tablespace bar
+ postgresql_db:
+ name: foo
+ tablespace: bar
+'''
+
+RETURN = r'''
+executed_commands:
+ description: List of commands which tried to run.
+ returned: always
+ type: list
+ sample: ["CREATE DATABASE acme"]
+ version_added: '2.10'
+'''
+
+
+import os
+import subprocess
+import traceback
+
+try:
+ import psycopg2
+ import psycopg2.extras
+except ImportError:
+ HAS_PSYCOPG2 = False
+else:
+ HAS_PSYCOPG2 = True
+
+import ansible.module_utils.postgres as pgutils
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.database import SQLParseError, pg_quote_identifier
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils._text import to_native
+
+executed_commands = []
+
+
+class NotSupportedError(Exception):
+ pass
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+def set_owner(cursor, db, owner):
+ query = 'ALTER DATABASE %s OWNER TO "%s"' % (
+ pg_quote_identifier(db, 'database'),
+ owner)
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+
+
+def set_conn_limit(cursor, db, conn_limit):
+ query = "ALTER DATABASE %s CONNECTION LIMIT %s" % (
+ pg_quote_identifier(db, 'database'),
+ conn_limit)
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+
+
+def get_encoding_id(cursor, encoding):
+ query = "SELECT pg_char_to_encoding(%(encoding)s) AS encoding_id;"
+ cursor.execute(query, {'encoding': encoding})
+ return cursor.fetchone()['encoding_id']
+
+
+def get_db_info(cursor, db):
+ query = """
+ SELECT rolname AS owner,
+ pg_encoding_to_char(encoding) AS encoding, encoding AS encoding_id,
+ datcollate AS lc_collate, datctype AS lc_ctype, pg_database.datconnlimit AS conn_limit,
+ spcname AS tablespace
+ FROM pg_database
+ JOIN pg_roles ON pg_roles.oid = pg_database.datdba
+ JOIN pg_tablespace ON pg_tablespace.oid = pg_database.dattablespace
+ WHERE datname = %(db)s
+ """
+ cursor.execute(query, {'db': db})
+ return cursor.fetchone()
+
+
+def db_exists(cursor, db):
+ query = "SELECT * FROM pg_database WHERE datname=%(db)s"
+ cursor.execute(query, {'db': db})
+ return cursor.rowcount == 1
+
+
+def db_delete(cursor, db):
+ if db_exists(cursor, db):
+ query = "DROP DATABASE %s" % pg_quote_identifier(db, 'database')
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+ else:
+ return False
+
+
+def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace):
+ params = dict(enc=encoding, collate=lc_collate, ctype=lc_ctype, conn_limit=conn_limit, tablespace=tablespace)
+ if not db_exists(cursor, db):
+ query_fragments = ['CREATE DATABASE %s' % pg_quote_identifier(db, 'database')]
+ if owner:
+ query_fragments.append('OWNER "%s"' % owner)
+ if template:
+ query_fragments.append('TEMPLATE %s' % pg_quote_identifier(template, 'database'))
+ if encoding:
+ query_fragments.append('ENCODING %(enc)s')
+ if lc_collate:
+ query_fragments.append('LC_COLLATE %(collate)s')
+ if lc_ctype:
+ query_fragments.append('LC_CTYPE %(ctype)s')
+ if tablespace:
+ query_fragments.append('TABLESPACE %s' % pg_quote_identifier(tablespace, 'tablespace'))
+ if conn_limit:
+ query_fragments.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
+ query = ' '.join(query_fragments)
+ executed_commands.append(cursor.mogrify(query, params))
+ cursor.execute(query, params)
+ return True
+ else:
+ db_info = get_db_info(cursor, db)
+ if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']):
+ raise NotSupportedError(
+ 'Changing database encoding is not supported. '
+ 'Current encoding: %s' % db_info['encoding']
+ )
+ elif lc_collate and lc_collate != db_info['lc_collate']:
+ raise NotSupportedError(
+ 'Changing LC_COLLATE is not supported. '
+ 'Current LC_COLLATE: %s' % db_info['lc_collate']
+ )
+ elif lc_ctype and lc_ctype != db_info['lc_ctype']:
+ raise NotSupportedError(
+ 'Changing LC_CTYPE is not supported.'
+ 'Current LC_CTYPE: %s' % db_info['lc_ctype']
+ )
+ else:
+ changed = False
+
+ if owner and owner != db_info['owner']:
+ changed = set_owner(cursor, db, owner)
+
+ if conn_limit and conn_limit != str(db_info['conn_limit']):
+ changed = set_conn_limit(cursor, db, conn_limit)
+
+ if tablespace and tablespace != db_info['tablespace']:
+ changed = set_tablespace(cursor, db, tablespace)
+
+ return changed
+
+
+def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace):
+ if not db_exists(cursor, db):
+ return False
+ else:
+ db_info = get_db_info(cursor, db)
+ if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']):
+ return False
+ elif lc_collate and lc_collate != db_info['lc_collate']:
+ return False
+ elif lc_ctype and lc_ctype != db_info['lc_ctype']:
+ return False
+ elif owner and owner != db_info['owner']:
+ return False
+ elif conn_limit and conn_limit != str(db_info['conn_limit']):
+ return False
+ elif tablespace and tablespace != db_info['tablespace']:
+ return False
+ else:
+ return True
+
+
+def db_dump(module, target, target_opts="",
+ db=None,
+ dump_extra_args=None,
+ user=None,
+ password=None,
+ host=None,
+ port=None,
+ **kw):
+
+ flags = login_flags(db, host, port, user, db_prefix=False)
+ cmd = module.get_bin_path('pg_dump', True)
+ comp_prog_path = None
+
+ if os.path.splitext(target)[-1] == '.tar':
+ flags.append(' --format=t')
+ elif os.path.splitext(target)[-1] == '.pgc':
+ flags.append(' --format=c')
+ if os.path.splitext(target)[-1] == '.gz':
+ if module.get_bin_path('pigz'):
+ comp_prog_path = module.get_bin_path('pigz', True)
+ else:
+ comp_prog_path = module.get_bin_path('gzip', True)
+ elif os.path.splitext(target)[-1] == '.bz2':
+ comp_prog_path = module.get_bin_path('bzip2', True)
+ elif os.path.splitext(target)[-1] == '.xz':
+ comp_prog_path = module.get_bin_path('xz', True)
+
+ cmd += "".join(flags)
+
+ if dump_extra_args:
+ cmd += " {0} ".format(dump_extra_args)
+
+ if target_opts:
+ cmd += " {0} ".format(target_opts)
+
+ if comp_prog_path:
+ # Use a fifo to be notified of an error in pg_dump
+ # Using shell pipe has no way to return the code of the first command
+ # in a portable way.
+ fifo = os.path.join(module.tmpdir, 'pg_fifo')
+ os.mkfifo(fifo)
+ cmd = '{1} <{3} > {2} & {0} >{3}'.format(cmd, comp_prog_path, shlex_quote(target), fifo)
+ else:
+ cmd = '{0} > {1}'.format(cmd, shlex_quote(target))
+
+ return do_with_password(module, cmd, password)
+
+
+def db_restore(module, target, target_opts="",
+ db=None,
+ user=None,
+ password=None,
+ host=None,
+ port=None,
+ **kw):
+
+ flags = login_flags(db, host, port, user)
+ comp_prog_path = None
+ cmd = module.get_bin_path('psql', True)
+
+ if os.path.splitext(target)[-1] == '.sql':
+ flags.append(' --file={0}'.format(target))
+
+ elif os.path.splitext(target)[-1] == '.tar':
+ flags.append(' --format=Tar')
+ cmd = module.get_bin_path('pg_restore', True)
+
+ elif os.path.splitext(target)[-1] == '.pgc':
+ flags.append(' --format=Custom')
+ cmd = module.get_bin_path('pg_restore', True)
+
+ elif os.path.splitext(target)[-1] == '.gz':
+ comp_prog_path = module.get_bin_path('zcat', True)
+
+ elif os.path.splitext(target)[-1] == '.bz2':
+ comp_prog_path = module.get_bin_path('bzcat', True)
+
+ elif os.path.splitext(target)[-1] == '.xz':
+ comp_prog_path = module.get_bin_path('xzcat', True)
+
+ cmd += "".join(flags)
+ if target_opts:
+ cmd += " {0} ".format(target_opts)
+
+ if comp_prog_path:
+ env = os.environ.copy()
+ if password:
+ env = {"PGPASSWORD": password}
+ p1 = subprocess.Popen([comp_prog_path, target], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p2 = subprocess.Popen(cmd, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=env)
+ (stdout2, stderr2) = p2.communicate()
+ p1.stdout.close()
+ p1.wait()
+ if p1.returncode != 0:
+ stderr1 = p1.stderr.read()
+ return p1.returncode, '', stderr1, 'cmd: ****'
+ else:
+ return p2.returncode, '', stderr2, 'cmd: ****'
+ else:
+ cmd = '{0} < {1}'.format(cmd, shlex_quote(target))
+
+ return do_with_password(module, cmd, password)
+
+
+def login_flags(db, host, port, user, db_prefix=True):
+ """
+ returns a list of connection argument strings each prefixed
+ with a space and quoted where necessary to later be combined
+ in a single shell string with `"".join(rv)`
+
+ db_prefix determines if "--dbname" is prefixed to the db argument,
+ since the argument was introduced in 9.3.
+ """
+ flags = []
+ if db:
+ if db_prefix:
+ flags.append(' --dbname={0}'.format(shlex_quote(db)))
+ else:
+ flags.append(' {0}'.format(shlex_quote(db)))
+ if host:
+ flags.append(' --host={0}'.format(host))
+ if port:
+ flags.append(' --port={0}'.format(port))
+ if user:
+ flags.append(' --username={0}'.format(user))
+ return flags
+
+
+def do_with_password(module, cmd, password):
+ env = {}
+ if password:
+ env = {"PGPASSWORD": password}
+ executed_commands.append(cmd)
+ rc, stderr, stdout = module.run_command(cmd, use_unsafe_shell=True, environ_update=env)
+ return rc, stderr, stdout, cmd
+
+
+def set_tablespace(cursor, db, tablespace):
+ query = "ALTER DATABASE %s SET TABLESPACE %s" % (
+ pg_quote_identifier(db, 'database'),
+ pg_quote_identifier(tablespace, 'tablespace'))
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = pgutils.postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type='str', required=True, aliases=['name']),
+ owner=dict(type='str', default=''),
+ template=dict(type='str', default=''),
+ encoding=dict(type='str', default=''),
+ lc_collate=dict(type='str', default=''),
+ lc_ctype=dict(type='str', default=''),
+ state=dict(type='str', default='present', choices=['absent', 'dump', 'present', 'restore']),
+ target=dict(type='path', default=''),
+ target_opts=dict(type='str', default=''),
+ maintenance_db=dict(type='str', default="postgres"),
+ session_role=dict(type='str'),
+ conn_limit=dict(type='str', default=''),
+ tablespace=dict(type='path', default=''),
+ dump_extra_args=dict(type='str', default=None),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ db = module.params["db"]
+ owner = module.params["owner"]
+ template = module.params["template"]
+ encoding = module.params["encoding"]
+ lc_collate = module.params["lc_collate"]
+ lc_ctype = module.params["lc_ctype"]
+ target = module.params["target"]
+ target_opts = module.params["target_opts"]
+ state = module.params["state"]
+ changed = False
+ maintenance_db = module.params['maintenance_db']
+ session_role = module.params["session_role"]
+ conn_limit = module.params['conn_limit']
+ tablespace = module.params['tablespace']
+ dump_extra_args = module.params['dump_extra_args']
+
+ raw_connection = state in ("dump", "restore")
+
+ if not raw_connection:
+ pgutils.ensure_required_libs(module)
+
+ # To use defaults values, keyword arguments must be absent, so
+ # check which values are empty and don't include in the **kw
+ # dictionary
+ params_map = {
+ "login_host": "host",
+ "login_user": "user",
+ "login_password": "password",
+ "port": "port",
+ "ssl_mode": "sslmode",
+ "ca_cert": "sslrootcert"
+ }
+ kw = dict((params_map[k], v) for (k, v) in iteritems(module.params)
+ if k in params_map and v != '' and v is not None)
+
+ # If a login_unix_socket is specified, incorporate it here.
+ is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
+
+ if is_localhost and module.params["login_unix_socket"] != "":
+ kw["host"] = module.params["login_unix_socket"]
+
+ if target == "":
+ target = "{0}/{1}.sql".format(os.getcwd(), db)
+ target = os.path.expanduser(target)
+
+ if not raw_connection:
+ try:
+ db_connection = psycopg2.connect(database=maintenance_db, **kw)
+
+ # Enable autocommit so we can create databases
+ if psycopg2.__version__ >= '2.4.2':
+ db_connection.autocommit = True
+ else:
+ db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
+ cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
+
+ except TypeError as e:
+ if 'sslrootcert' in e.args[0]:
+ module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert. Exception: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ except Exception as e:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ if session_role:
+ try:
+ cursor.execute('SET ROLE "%s"' % session_role)
+ except Exception as e:
+ module.fail_json(msg="Could not switch role: %s" % to_native(e), exception=traceback.format_exc())
+
+ try:
+ if module.check_mode:
+ if state == "absent":
+ changed = db_exists(cursor, db)
+ elif state == "present":
+ changed = not db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace)
+ module.exit_json(changed=changed, db=db, executed_commands=executed_commands)
+
+ if state == "absent":
+ try:
+ changed = db_delete(cursor, db)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ elif state == "present":
+ try:
+ changed = db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ elif state in ("dump", "restore"):
+ method = state == "dump" and db_dump or db_restore
+ try:
+ if state == 'dump':
+ rc, stdout, stderr, cmd = method(module, target, target_opts, db, dump_extra_args, **kw)
+ else:
+ rc, stdout, stderr, cmd = method(module, target, target_opts, db, **kw)
+
+ if rc != 0:
+ module.fail_json(msg=stderr, stdout=stdout, rc=rc, cmd=cmd)
+ else:
+ module.exit_json(changed=True, msg=stdout, stderr=stderr, rc=rc, cmd=cmd,
+ executed_commands=executed_commands)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except SystemExit:
+ # Avoid catching this on Python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, db=db, executed_commands=executed_commands)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/postgresql_privs.py b/test/support/integration/plugins/modules/postgresql_privs.py
new file mode 100644
index 00000000..ba8324dd
--- /dev/null
+++ b/test/support/integration/plugins/modules/postgresql_privs.py
@@ -0,0 +1,1097 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# Copyright: (c) 2019, Tobias Birkefeld (@tcraxs) <t@craxs.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: postgresql_privs
+version_added: '1.2'
+short_description: Grant or revoke privileges on PostgreSQL database objects
+description:
+- Grant or revoke privileges on PostgreSQL database objects.
+- This module is basically a wrapper around most of the functionality of
+ PostgreSQL's GRANT and REVOKE statements with detection of changes
+ (GRANT/REVOKE I(privs) ON I(type) I(objs) TO/FROM I(roles)).
+options:
+ database:
+ description:
+ - Name of database to connect to.
+ required: yes
+ type: str
+ aliases:
+ - db
+ - login_db
+ state:
+ description:
+ - If C(present), the specified privileges are granted, if C(absent) they are revoked.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ privs:
+ description:
+ - Comma separated list of privileges to grant/revoke.
+ type: str
+ aliases:
+ - priv
+ type:
+ description:
+ - Type of database object to set privileges on.
+ - The C(default_privs) choice is available starting at version 2.7.
+ - The C(foreign_data_wrapper) and C(foreign_server) object types are available from Ansible version '2.8'.
+ - The C(type) choice is available from Ansible version '2.10'.
+ type: str
+ default: table
+ choices: [ database, default_privs, foreign_data_wrapper, foreign_server, function,
+ group, language, table, tablespace, schema, sequence, type ]
+ objs:
+ description:
+ - Comma separated list of database objects to set privileges on.
+ - If I(type) is C(table), C(partition table), C(sequence) or C(function),
+ the special valueC(ALL_IN_SCHEMA) can be provided instead to specify all
+ database objects of type I(type) in the schema specified via I(schema).
+ (This also works with PostgreSQL < 9.0.) (C(ALL_IN_SCHEMA) is available
+ for C(function) and C(partition table) from version 2.8)
+ - If I(type) is C(database), this parameter can be omitted, in which case
+ privileges are set for the database specified via I(database).
+ - 'If I(type) is I(function), colons (":") in object names will be
+ replaced with commas (needed to specify function signatures, see examples)'
+ type: str
+ aliases:
+ - obj
+ schema:
+ description:
+ - Schema that contains the database objects specified via I(objs).
+ - May only be provided if I(type) is C(table), C(sequence), C(function), C(type),
+ or C(default_privs). Defaults to C(public) in these cases.
+ - Pay attention, for embedded types when I(type=type)
+ I(schema) can be C(pg_catalog) or C(information_schema) respectively.
+ type: str
+ roles:
+ description:
+ - Comma separated list of role (user/group) names to set permissions for.
+ - The special value C(PUBLIC) can be provided instead to set permissions
+ for the implicitly defined PUBLIC group.
+ type: str
+ required: yes
+ aliases:
+ - role
+ fail_on_role:
+ version_added: '2.8'
+ description:
+ - If C(yes), fail when target role (for whom privs need to be granted) does not exist.
+ Otherwise just warn and continue.
+ default: yes
+ type: bool
+ session_role:
+ version_added: '2.8'
+ description:
+ - Switch to session_role after connecting.
+ - The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
+ type: str
+ target_roles:
+ description:
+ - A list of existing role (user/group) names to set as the
+ default permissions for database objects subsequently created by them.
+ - Parameter I(target_roles) is only available with C(type=default_privs).
+ type: str
+ version_added: '2.8'
+ grant_option:
+ description:
+ - Whether C(role) may grant/revoke the specified privileges/group memberships to others.
+ - Set to C(no) to revoke GRANT OPTION, leave unspecified to make no changes.
+ - I(grant_option) only has an effect if I(state) is C(present).
+ type: bool
+ aliases:
+ - admin_option
+ host:
+ description:
+ - Database host address. If unspecified, connect via Unix socket.
+ type: str
+ aliases:
+ - login_host
+ port:
+ description:
+ - Database port to connect to.
+ type: int
+ default: 5432
+ aliases:
+ - login_port
+ unix_socket:
+ description:
+ - Path to a Unix domain socket for local connections.
+ type: str
+ aliases:
+ - login_unix_socket
+ login:
+ description:
+ - The username to authenticate with.
+ type: str
+ default: postgres
+ aliases:
+ - login_user
+ password:
+ description:
+ - The password to authenticate with.
+ type: str
+ aliases:
+ - login_password
+ ssl_mode:
+ description:
+ - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
+ - See https://www.postgresql.org/docs/current/static/libpq-ssl.html for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ version_added: '2.3'
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, the server's certificate will be verified to be signed by one of these authorities.
+ version_added: '2.3'
+ type: str
+ aliases:
+ - ssl_rootcert
+
+notes:
+- Parameters that accept comma separated lists (I(privs), I(objs), I(roles))
+ have singular alias names (I(priv), I(obj), I(role)).
+- To revoke only C(GRANT OPTION) for a specific object, set I(state) to
+ C(present) and I(grant_option) to C(no) (see examples).
+- Note that when revoking privileges from a role R, this role may still have
+ access via privileges granted to any role R is a member of including C(PUBLIC).
+- Note that when revoking privileges from a role R, you do so as the user
+ specified via I(login). If R has been granted the same privileges by
+ another user also, R can still access database objects via these privileges.
+- When revoking privileges, C(RESTRICT) is assumed (see PostgreSQL docs).
+
+seealso:
+- module: postgresql_user
+- module: postgresql_owner
+- module: postgresql_membership
+- name: PostgreSQL privileges
+ description: General information about PostgreSQL privileges.
+ link: https://www.postgresql.org/docs/current/ddl-priv.html
+- name: PostgreSQL GRANT command reference
+ description: Complete reference of the PostgreSQL GRANT command documentation.
+ link: https://www.postgresql.org/docs/current/sql-grant.html
+- name: PostgreSQL REVOKE command reference
+ description: Complete reference of the PostgreSQL REVOKE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-revoke.html
+
+extends_documentation_fragment:
+- postgres
+
+author:
+- Bernhard Weitzhofer (@b6d)
+- Tobias Birkefeld (@tcraxs)
+'''
+
+EXAMPLES = r'''
+# On database "library":
+# GRANT SELECT, INSERT, UPDATE ON TABLE public.books, public.authors
+# TO librarian, reader WITH GRANT OPTION
+- name: Grant privs to librarian and reader on database library
+ postgresql_privs:
+ database: library
+ state: present
+ privs: SELECT,INSERT,UPDATE
+ type: table
+ objs: books,authors
+ schema: public
+ roles: librarian,reader
+ grant_option: yes
+
+- name: Same as above leveraging default values
+ postgresql_privs:
+ db: library
+ privs: SELECT,INSERT,UPDATE
+ objs: books,authors
+ roles: librarian,reader
+ grant_option: yes
+
+# REVOKE GRANT OPTION FOR INSERT ON TABLE books FROM reader
+# Note that role "reader" will be *granted* INSERT privilege itself if this
+# isn't already the case (since state: present).
+- name: Revoke privs from reader
+ postgresql_privs:
+ db: library
+ state: present
+ priv: INSERT
+ obj: books
+ role: reader
+ grant_option: no
+
+# "public" is the default schema. This also works for PostgreSQL 8.x.
+- name: REVOKE INSERT, UPDATE ON ALL TABLES IN SCHEMA public FROM reader
+ postgresql_privs:
+ db: library
+ state: absent
+ privs: INSERT,UPDATE
+ objs: ALL_IN_SCHEMA
+ role: reader
+
+- name: GRANT ALL PRIVILEGES ON SCHEMA public, math TO librarian
+ postgresql_privs:
+ db: library
+ privs: ALL
+ type: schema
+ objs: public,math
+ role: librarian
+
+# Note the separation of arguments with colons.
+- name: GRANT ALL PRIVILEGES ON FUNCTION math.add(int, int) TO librarian, reader
+ postgresql_privs:
+ db: library
+ privs: ALL
+ type: function
+ obj: add(int:int)
+ schema: math
+ roles: librarian,reader
+
+# Note that group role memberships apply cluster-wide and therefore are not
+# restricted to database "library" here.
+- name: GRANT librarian, reader TO alice, bob WITH ADMIN OPTION
+ postgresql_privs:
+ db: library
+ type: group
+ objs: librarian,reader
+ roles: alice,bob
+ admin_option: yes
+
+# Note that here "db: postgres" specifies the database to connect to, not the
+# database to grant privileges on (which is specified via the "objs" param)
+- name: GRANT ALL PRIVILEGES ON DATABASE library TO librarian
+ postgresql_privs:
+ db: postgres
+ privs: ALL
+ type: database
+ obj: library
+ role: librarian
+
+# If objs is omitted for type "database", it defaults to the database
+# to which the connection is established
+- name: GRANT ALL PRIVILEGES ON DATABASE library TO librarian
+ postgresql_privs:
+ db: library
+ privs: ALL
+ type: database
+ role: librarian
+
+# Available since version 2.7
+# Objs must be set, ALL_DEFAULT to TABLES/SEQUENCES/TYPES/FUNCTIONS
+# ALL_DEFAULT works only with privs=ALL
+# For specific
+- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO librarian
+ postgresql_privs:
+ db: library
+ objs: ALL_DEFAULT
+ privs: ALL
+ type: default_privs
+ role: librarian
+ grant_option: yes
+
+# Available since version 2.7
+# Objs must be set, ALL_DEFAULT to TABLES/SEQUENCES/TYPES/FUNCTIONS
+# ALL_DEFAULT works only with privs=ALL
+# For specific
+- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO reader, step 1
+ postgresql_privs:
+ db: library
+ objs: TABLES,SEQUENCES
+ privs: SELECT
+ type: default_privs
+ role: reader
+
+- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO reader, step 2
+ postgresql_privs:
+ db: library
+ objs: TYPES
+ privs: USAGE
+ type: default_privs
+ role: reader
+
+# Available since version 2.8
+- name: GRANT ALL PRIVILEGES ON FOREIGN DATA WRAPPER fdw TO reader
+ postgresql_privs:
+ db: test
+ objs: fdw
+ privs: ALL
+ type: foreign_data_wrapper
+ role: reader
+
+# Available since version 2.10
+- name: GRANT ALL PRIVILEGES ON TYPE customtype TO reader
+ postgresql_privs:
+ db: test
+ objs: customtype
+ privs: ALL
+ type: type
+ role: reader
+
+# Available since version 2.8
+- name: GRANT ALL PRIVILEGES ON FOREIGN SERVER fdw_server TO reader
+ postgresql_privs:
+ db: test
+ objs: fdw_server
+ privs: ALL
+ type: foreign_server
+ role: reader
+
+# Available since version 2.8
+# Grant 'execute' permissions on all functions in schema 'common' to role 'caller'
+- name: GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA common TO caller
+ postgresql_privs:
+ type: function
+ state: present
+ privs: EXECUTE
+ roles: caller
+ objs: ALL_IN_SCHEMA
+ schema: common
+
+# Available since version 2.8
+# ALTER DEFAULT PRIVILEGES FOR ROLE librarian IN SCHEMA library GRANT SELECT ON TABLES TO reader
+# GRANT SELECT privileges for new TABLES objects created by librarian as
+# default to the role reader.
+# For specific
+- name: ALTER privs
+ postgresql_privs:
+ db: library
+ schema: library
+ objs: TABLES
+ privs: SELECT
+ type: default_privs
+ role: reader
+ target_roles: librarian
+
+# Available since version 2.8
+# ALTER DEFAULT PRIVILEGES FOR ROLE librarian IN SCHEMA library REVOKE SELECT ON TABLES FROM reader
+# REVOKE SELECT privileges for new TABLES objects created by librarian as
+# default from the role reader.
+# For specific
+- name: ALTER privs
+ postgresql_privs:
+ db: library
+ state: absent
+ schema: library
+ objs: TABLES
+ privs: SELECT
+ type: default_privs
+ role: reader
+ target_roles: librarian
+
+# Available since version 2.10
+- name: Grant type privileges for pg_catalog.numeric type to alice
+ postgresql_privs:
+ type: type
+ roles: alice
+ privs: ALL
+ objs: numeric
+ schema: pg_catalog
+ db: acme
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ['REVOKE GRANT OPTION FOR INSERT ON TABLE "books" FROM "reader";']
+ version_added: '2.8'
+'''
+
+import traceback
+
+PSYCOPG2_IMP_ERR = None
+try:
+ import psycopg2
+ import psycopg2.extensions
+except ImportError:
+ PSYCOPG2_IMP_ERR = traceback.format_exc()
+ psycopg2 = None
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.database import pg_quote_identifier
+from ansible.module_utils.postgres import postgres_common_argument_spec
+from ansible.module_utils._text import to_native
+
+VALID_PRIVS = frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE',
+ 'REFERENCES', 'TRIGGER', 'CREATE', 'CONNECT',
+ 'TEMPORARY', 'TEMP', 'EXECUTE', 'USAGE', 'ALL', 'USAGE'))
+VALID_DEFAULT_OBJS = {'TABLES': ('ALL', 'SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER'),
+ 'SEQUENCES': ('ALL', 'SELECT', 'UPDATE', 'USAGE'),
+ 'FUNCTIONS': ('ALL', 'EXECUTE'),
+ 'TYPES': ('ALL', 'USAGE')}
+
+executed_queries = []
+
+
+class Error(Exception):
+ pass
+
+
+def role_exists(module, cursor, rolname):
+ """Check user exists or not"""
+ query = "SELECT 1 FROM pg_roles WHERE rolname = '%s'" % rolname
+ try:
+ cursor.execute(query)
+ return cursor.rowcount > 0
+
+ except Exception as e:
+ module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
+
+ return False
+
+
+# We don't have functools.partial in Python < 2.5
+def partial(f, *args, **kwargs):
+ """Partial function application"""
+
+ def g(*g_args, **g_kwargs):
+ new_kwargs = kwargs.copy()
+ new_kwargs.update(g_kwargs)
+ return f(*(args + g_args), **g_kwargs)
+
+ g.f = f
+ g.args = args
+ g.kwargs = kwargs
+ return g
+
+
+class Connection(object):
+ """Wrapper around a psycopg2 connection with some convenience methods"""
+
+ def __init__(self, params, module):
+ self.database = params.database
+ self.module = module
+ # To use defaults values, keyword arguments must be absent, so
+ # check which values are empty and don't include in the **kw
+ # dictionary
+ params_map = {
+ "host": "host",
+ "login": "user",
+ "password": "password",
+ "port": "port",
+ "database": "database",
+ "ssl_mode": "sslmode",
+ "ca_cert": "sslrootcert"
+ }
+
+ kw = dict((params_map[k], getattr(params, k)) for k in params_map
+ if getattr(params, k) != '' and getattr(params, k) is not None)
+
+ # If a unix_socket is specified, incorporate it here.
+ is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
+ if is_localhost and params.unix_socket != "":
+ kw["host"] = params.unix_socket
+
+ sslrootcert = params.ca_cert
+ if psycopg2.__version__ < '2.4.3' and sslrootcert is not None:
+ raise ValueError('psycopg2 must be at least 2.4.3 in order to user the ca_cert parameter')
+
+ self.connection = psycopg2.connect(**kw)
+ self.cursor = self.connection.cursor()
+
+ def commit(self):
+ self.connection.commit()
+
+ def rollback(self):
+ self.connection.rollback()
+
+ @property
+ def encoding(self):
+ """Connection encoding in Python-compatible form"""
+ return psycopg2.extensions.encodings[self.connection.encoding]
+
+ # Methods for querying database objects
+
+ # PostgreSQL < 9.0 doesn't support "ALL TABLES IN SCHEMA schema"-like
+ # phrases in GRANT or REVOKE statements, therefore alternative methods are
+ # provided here.
+
+ def schema_exists(self, schema):
+ query = """SELECT count(*)
+ FROM pg_catalog.pg_namespace WHERE nspname = %s"""
+ self.cursor.execute(query, (schema,))
+ return self.cursor.fetchone()[0] > 0
+
+ def get_all_tables_in_schema(self, schema):
+ if not self.schema_exists(schema):
+ raise Error('Schema "%s" does not exist.' % schema)
+ query = """SELECT relname
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind in ('r', 'v', 'm', 'p')"""
+ self.cursor.execute(query, (schema,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_all_sequences_in_schema(self, schema):
+ if not self.schema_exists(schema):
+ raise Error('Schema "%s" does not exist.' % schema)
+ query = """SELECT relname
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind = 'S'"""
+ self.cursor.execute(query, (schema,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_all_functions_in_schema(self, schema):
+ if not self.schema_exists(schema):
+ raise Error('Schema "%s" does not exist.' % schema)
+ query = """SELECT p.proname, oidvectortypes(p.proargtypes)
+ FROM pg_catalog.pg_proc p
+ JOIN pg_namespace n ON n.oid = p.pronamespace
+ WHERE nspname = %s"""
+ self.cursor.execute(query, (schema,))
+ return ["%s(%s)" % (t[0], t[1]) for t in self.cursor.fetchall()]
+
+ # Methods for getting access control lists and group membership info
+
+ # To determine whether anything has changed after granting/revoking
+ # privileges, we compare the access control lists of the specified database
+ # objects before and afterwards. Python's list/string comparison should
+ # suffice for change detection, we should not actually have to parse ACLs.
+ # The same should apply to group membership information.
+
+ def get_table_acls(self, schema, tables):
+ query = """SELECT relacl
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind in ('r','p','v','m') AND relname = ANY (%s)
+ ORDER BY relname"""
+ self.cursor.execute(query, (schema, tables))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_sequence_acls(self, schema, sequences):
+ query = """SELECT relacl
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind = 'S' AND relname = ANY (%s)
+ ORDER BY relname"""
+ self.cursor.execute(query, (schema, sequences))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_function_acls(self, schema, function_signatures):
+ funcnames = [f.split('(', 1)[0] for f in function_signatures]
+ query = """SELECT proacl
+ FROM pg_catalog.pg_proc p
+ JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace
+ WHERE nspname = %s AND proname = ANY (%s)
+ ORDER BY proname, proargtypes"""
+ self.cursor.execute(query, (schema, funcnames))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_schema_acls(self, schemas):
+ query = """SELECT nspacl FROM pg_catalog.pg_namespace
+ WHERE nspname = ANY (%s) ORDER BY nspname"""
+ self.cursor.execute(query, (schemas,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_language_acls(self, languages):
+ query = """SELECT lanacl FROM pg_catalog.pg_language
+ WHERE lanname = ANY (%s) ORDER BY lanname"""
+ self.cursor.execute(query, (languages,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_tablespace_acls(self, tablespaces):
+ query = """SELECT spcacl FROM pg_catalog.pg_tablespace
+ WHERE spcname = ANY (%s) ORDER BY spcname"""
+ self.cursor.execute(query, (tablespaces,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_database_acls(self, databases):
+ query = """SELECT datacl FROM pg_catalog.pg_database
+ WHERE datname = ANY (%s) ORDER BY datname"""
+ self.cursor.execute(query, (databases,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_group_memberships(self, groups):
+ query = """SELECT roleid, grantor, member, admin_option
+ FROM pg_catalog.pg_auth_members am
+ JOIN pg_catalog.pg_roles r ON r.oid = am.roleid
+ WHERE r.rolname = ANY(%s)
+ ORDER BY roleid, grantor, member"""
+ self.cursor.execute(query, (groups,))
+ return self.cursor.fetchall()
+
+ def get_default_privs(self, schema, *args):
+ query = """SELECT defaclacl
+ FROM pg_default_acl a
+ JOIN pg_namespace b ON a.defaclnamespace=b.oid
+ WHERE b.nspname = %s;"""
+ self.cursor.execute(query, (schema,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_foreign_data_wrapper_acls(self, fdws):
+ query = """SELECT fdwacl FROM pg_catalog.pg_foreign_data_wrapper
+ WHERE fdwname = ANY (%s) ORDER BY fdwname"""
+ self.cursor.execute(query, (fdws,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_foreign_server_acls(self, fs):
+ query = """SELECT srvacl FROM pg_catalog.pg_foreign_server
+ WHERE srvname = ANY (%s) ORDER BY srvname"""
+ self.cursor.execute(query, (fs,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_type_acls(self, schema, types):
+ query = """SELECT t.typacl FROM pg_catalog.pg_type t
+ JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
+ WHERE n.nspname = %s AND t.typname = ANY (%s) ORDER BY typname"""
+ self.cursor.execute(query, (schema, types))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ # Manipulating privileges
+
+ def manipulate_privs(self, obj_type, privs, objs, roles, target_roles,
+ state, grant_option, schema_qualifier=None, fail_on_role=True):
+ """Manipulate database object privileges.
+
+ :param obj_type: Type of database object to grant/revoke
+ privileges for.
+ :param privs: Either a list of privileges to grant/revoke
+ or None if type is "group".
+ :param objs: List of database objects to grant/revoke
+ privileges for.
+ :param roles: Either a list of role names or "PUBLIC"
+ for the implicitly defined "PUBLIC" group
+ :param target_roles: List of role names to grant/revoke
+ default privileges as.
+ :param state: "present" to grant privileges, "absent" to revoke.
+ :param grant_option: Only for state "present": If True, set
+ grant/admin option. If False, revoke it.
+ If None, don't change grant option.
+ :param schema_qualifier: Some object types ("TABLE", "SEQUENCE",
+ "FUNCTION") must be qualified by schema.
+ Ignored for other Types.
+ """
+ # get_status: function to get current status
+ if obj_type == 'table':
+ get_status = partial(self.get_table_acls, schema_qualifier)
+ elif obj_type == 'sequence':
+ get_status = partial(self.get_sequence_acls, schema_qualifier)
+ elif obj_type == 'function':
+ get_status = partial(self.get_function_acls, schema_qualifier)
+ elif obj_type == 'schema':
+ get_status = self.get_schema_acls
+ elif obj_type == 'language':
+ get_status = self.get_language_acls
+ elif obj_type == 'tablespace':
+ get_status = self.get_tablespace_acls
+ elif obj_type == 'database':
+ get_status = self.get_database_acls
+ elif obj_type == 'group':
+ get_status = self.get_group_memberships
+ elif obj_type == 'default_privs':
+ get_status = partial(self.get_default_privs, schema_qualifier)
+ elif obj_type == 'foreign_data_wrapper':
+ get_status = self.get_foreign_data_wrapper_acls
+ elif obj_type == 'foreign_server':
+ get_status = self.get_foreign_server_acls
+ elif obj_type == 'type':
+ get_status = partial(self.get_type_acls, schema_qualifier)
+ else:
+ raise Error('Unsupported database object type "%s".' % obj_type)
+
+ # Return False (nothing has changed) if there are no objs to work on.
+ if not objs:
+ return False
+
+ # obj_ids: quoted db object identifiers (sometimes schema-qualified)
+ if obj_type == 'function':
+ obj_ids = []
+ for obj in objs:
+ try:
+ f, args = obj.split('(', 1)
+ except Exception:
+ raise Error('Illegal function signature: "%s".' % obj)
+ obj_ids.append('"%s"."%s"(%s' % (schema_qualifier, f, args))
+ elif obj_type in ['table', 'sequence', 'type']:
+ obj_ids = ['"%s"."%s"' % (schema_qualifier, o) for o in objs]
+ else:
+ obj_ids = ['"%s"' % o for o in objs]
+
+ # set_what: SQL-fragment specifying what to set for the target roles:
+ # Either group membership or privileges on objects of a certain type
+ if obj_type == 'group':
+ set_what = ','.join('"%s"' % i for i in obj_ids)
+ elif obj_type == 'default_privs':
+ # We don't want privs to be quoted here
+ set_what = ','.join(privs)
+ else:
+ # function types are already quoted above
+ if obj_type != 'function':
+ obj_ids = [pg_quote_identifier(i, 'table') for i in obj_ids]
+ # Note: obj_type has been checked against a set of string literals
+ # and privs was escaped when it was parsed
+ # Note: Underscores are replaced with spaces to support multi-word obj_type
+ set_what = '%s ON %s %s' % (','.join(privs), obj_type.replace('_', ' '),
+ ','.join(obj_ids))
+
+ # for_whom: SQL-fragment specifying for whom to set the above
+ if roles == 'PUBLIC':
+ for_whom = 'PUBLIC'
+ else:
+ for_whom = []
+ for r in roles:
+ if not role_exists(self.module, self.cursor, r):
+ if fail_on_role:
+ self.module.fail_json(msg="Role '%s' does not exist" % r.strip())
+
+ else:
+ self.module.warn("Role '%s' does not exist, pass it" % r.strip())
+ else:
+ for_whom.append('"%s"' % r)
+
+ if not for_whom:
+ return False
+
+ for_whom = ','.join(for_whom)
+
+ # as_who:
+ as_who = None
+ if target_roles:
+ as_who = ','.join('"%s"' % r for r in target_roles)
+
+ status_before = get_status(objs)
+
+ query = QueryBuilder(state) \
+ .for_objtype(obj_type) \
+ .with_grant_option(grant_option) \
+ .for_whom(for_whom) \
+ .as_who(as_who) \
+ .for_schema(schema_qualifier) \
+ .set_what(set_what) \
+ .for_objs(objs) \
+ .build()
+
+ executed_queries.append(query)
+ self.cursor.execute(query)
+ status_after = get_status(objs)
+
+ def nonesorted(e):
+ # For python 3+ that can fail trying
+ # to compare NoneType elements by sort method.
+ if e is None:
+ return ''
+ return e
+
+ status_before.sort(key=nonesorted)
+ status_after.sort(key=nonesorted)
+ return status_before != status_after
+
+
+class QueryBuilder(object):
+ def __init__(self, state):
+ self._grant_option = None
+ self._for_whom = None
+ self._as_who = None
+ self._set_what = None
+ self._obj_type = None
+ self._state = state
+ self._schema = None
+ self._objs = None
+ self.query = []
+
+ def for_objs(self, objs):
+ self._objs = objs
+ return self
+
+ def for_schema(self, schema):
+ self._schema = schema
+ return self
+
+ def with_grant_option(self, option):
+ self._grant_option = option
+ return self
+
+ def for_whom(self, who):
+ self._for_whom = who
+ return self
+
+ def as_who(self, target_roles):
+ self._as_who = target_roles
+ return self
+
+ def set_what(self, what):
+ self._set_what = what
+ return self
+
+ def for_objtype(self, objtype):
+ self._obj_type = objtype
+ return self
+
+ def build(self):
+ if self._state == 'present':
+ self.build_present()
+ elif self._state == 'absent':
+ self.build_absent()
+ else:
+ self.build_absent()
+ return '\n'.join(self.query)
+
+ def add_default_revoke(self):
+ for obj in self._objs:
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} REVOKE ALL ON {2} FROM {3};'.format(self._as_who,
+ self._schema, obj,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} REVOKE ALL ON {1} FROM {2};'.format(self._schema, obj,
+ self._for_whom))
+
+ def add_grant_option(self):
+ if self._grant_option:
+ if self._obj_type == 'group':
+ self.query[-1] += ' WITH ADMIN OPTION;'
+ else:
+ self.query[-1] += ' WITH GRANT OPTION;'
+ else:
+ self.query[-1] += ';'
+ if self._obj_type == 'group':
+ self.query.append('REVOKE ADMIN OPTION FOR {0} FROM {1};'.format(self._set_what, self._for_whom))
+ elif not self._obj_type == 'default_privs':
+ self.query.append('REVOKE GRANT OPTION FOR {0} FROM {1};'.format(self._set_what, self._for_whom))
+
+ def add_default_priv(self):
+ for obj in self._objs:
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} GRANT {2} ON {3} TO {4}'.format(self._as_who,
+ self._schema,
+ self._set_what,
+ obj,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} GRANT {1} ON {2} TO {3}'.format(self._schema,
+ self._set_what,
+ obj,
+ self._for_whom))
+ self.add_grant_option()
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} GRANT USAGE ON TYPES TO {2}'.format(self._as_who,
+ self._schema,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} GRANT USAGE ON TYPES TO {1}'.format(self._schema, self._for_whom))
+ self.add_grant_option()
+
+ def build_present(self):
+ if self._obj_type == 'default_privs':
+ self.add_default_revoke()
+ self.add_default_priv()
+ else:
+ self.query.append('GRANT {0} TO {1}'.format(self._set_what, self._for_whom))
+ self.add_grant_option()
+
+ def build_absent(self):
+ if self._obj_type == 'default_privs':
+ self.query = []
+ for obj in ['TABLES', 'SEQUENCES', 'TYPES']:
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} REVOKE ALL ON {2} FROM {3};'.format(self._as_who,
+ self._schema, obj,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} REVOKE ALL ON {1} FROM {2};'.format(self._schema, obj,
+ self._for_whom))
+ else:
+ self.query.append('REVOKE {0} FROM {1};'.format(self._set_what, self._for_whom))
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ database=dict(required=True, aliases=['db', 'login_db']),
+ state=dict(default='present', choices=['present', 'absent']),
+ privs=dict(required=False, aliases=['priv']),
+ type=dict(default='table',
+ choices=['table',
+ 'sequence',
+ 'function',
+ 'database',
+ 'schema',
+ 'language',
+ 'tablespace',
+ 'group',
+ 'default_privs',
+ 'foreign_data_wrapper',
+ 'foreign_server',
+ 'type', ]),
+ objs=dict(required=False, aliases=['obj']),
+ schema=dict(required=False),
+ roles=dict(required=True, aliases=['role']),
+ session_role=dict(required=False),
+ target_roles=dict(required=False),
+ grant_option=dict(required=False, type='bool',
+ aliases=['admin_option']),
+ host=dict(default='', aliases=['login_host']),
+ unix_socket=dict(default='', aliases=['login_unix_socket']),
+ login=dict(default='postgres', aliases=['login_user']),
+ password=dict(default='', aliases=['login_password'], no_log=True),
+ fail_on_role=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ fail_on_role = module.params['fail_on_role']
+
+ # Create type object as namespace for module params
+ p = type('Params', (), module.params)
+ # param "schema": default, allowed depends on param "type"
+ if p.type in ['table', 'sequence', 'function', 'type', 'default_privs']:
+ p.schema = p.schema or 'public'
+ elif p.schema:
+ module.fail_json(msg='Argument "schema" is not allowed '
+ 'for type "%s".' % p.type)
+
+ # param "objs": default, required depends on param "type"
+ if p.type == 'database':
+ p.objs = p.objs or p.database
+ elif not p.objs:
+ module.fail_json(msg='Argument "objs" is required '
+ 'for type "%s".' % p.type)
+
+ # param "privs": allowed, required depends on param "type"
+ if p.type == 'group':
+ if p.privs:
+ module.fail_json(msg='Argument "privs" is not allowed '
+ 'for type "group".')
+ elif not p.privs:
+ module.fail_json(msg='Argument "privs" is required '
+ 'for type "%s".' % p.type)
+
+ # Connect to Database
+ if not psycopg2:
+ module.fail_json(msg=missing_required_lib('psycopg2'), exception=PSYCOPG2_IMP_ERR)
+ try:
+ conn = Connection(p, module)
+ except psycopg2.Error as e:
+ module.fail_json(msg='Could not connect to database: %s' % to_native(e), exception=traceback.format_exc())
+ except TypeError as e:
+ if 'sslrootcert' in e.args[0]:
+ module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert')
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+ except ValueError as e:
+ # We raise this when the psycopg library is too old
+ module.fail_json(msg=to_native(e))
+
+ if p.session_role:
+ try:
+ conn.cursor.execute('SET ROLE "%s"' % p.session_role)
+ except Exception as e:
+ module.fail_json(msg="Could not switch to role %s: %s" % (p.session_role, to_native(e)), exception=traceback.format_exc())
+
+ try:
+ # privs
+ if p.privs:
+ privs = frozenset(pr.upper() for pr in p.privs.split(','))
+ if not privs.issubset(VALID_PRIVS):
+ module.fail_json(msg='Invalid privileges specified: %s' % privs.difference(VALID_PRIVS))
+ else:
+ privs = None
+ # objs:
+ if p.type == 'table' and p.objs == 'ALL_IN_SCHEMA':
+ objs = conn.get_all_tables_in_schema(p.schema)
+ elif p.type == 'sequence' and p.objs == 'ALL_IN_SCHEMA':
+ objs = conn.get_all_sequences_in_schema(p.schema)
+ elif p.type == 'function' and p.objs == 'ALL_IN_SCHEMA':
+ objs = conn.get_all_functions_in_schema(p.schema)
+ elif p.type == 'default_privs':
+ if p.objs == 'ALL_DEFAULT':
+ objs = frozenset(VALID_DEFAULT_OBJS.keys())
+ else:
+ objs = frozenset(obj.upper() for obj in p.objs.split(','))
+ if not objs.issubset(VALID_DEFAULT_OBJS):
+ module.fail_json(
+ msg='Invalid Object set specified: %s' % objs.difference(VALID_DEFAULT_OBJS.keys()))
+ # Again, do we have valid privs specified for object type:
+ valid_objects_for_priv = frozenset(obj for obj in objs if privs.issubset(VALID_DEFAULT_OBJS[obj]))
+ if not valid_objects_for_priv == objs:
+ module.fail_json(
+ msg='Invalid priv specified. Valid object for priv: {0}. Objects: {1}'.format(
+ valid_objects_for_priv, objs))
+ else:
+ objs = p.objs.split(',')
+
+ # function signatures are encoded using ':' to separate args
+ if p.type == 'function':
+ objs = [obj.replace(':', ',') for obj in objs]
+
+ # roles
+ if p.roles == 'PUBLIC':
+ roles = 'PUBLIC'
+ else:
+ roles = p.roles.split(',')
+
+ if len(roles) == 1 and not role_exists(module, conn.cursor, roles[0]):
+ module.exit_json(changed=False)
+
+ if fail_on_role:
+ module.fail_json(msg="Role '%s' does not exist" % roles[0].strip())
+
+ else:
+ module.warn("Role '%s' does not exist, nothing to do" % roles[0].strip())
+
+ # check if target_roles is set with type: default_privs
+ if p.target_roles and not p.type == 'default_privs':
+ module.warn('"target_roles" will be ignored '
+ 'Argument "type: default_privs" is required for usage of "target_roles".')
+
+ # target roles
+ if p.target_roles:
+ target_roles = p.target_roles.split(',')
+ else:
+ target_roles = None
+
+ changed = conn.manipulate_privs(
+ obj_type=p.type,
+ privs=privs,
+ objs=objs,
+ roles=roles,
+ target_roles=target_roles,
+ state=p.state,
+ grant_option=p.grant_option,
+ schema_qualifier=p.schema,
+ fail_on_role=fail_on_role,
+ )
+
+ except Error as e:
+ conn.rollback()
+ module.fail_json(msg=e.message, exception=traceback.format_exc())
+
+ except psycopg2.Error as e:
+ conn.rollback()
+ module.fail_json(msg=to_native(e.message))
+
+ if module.check_mode:
+ conn.rollback()
+ else:
+ conn.commit()
+ module.exit_json(changed=changed, queries=executed_queries)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/postgresql_query.py b/test/support/integration/plugins/modules/postgresql_query.py
new file mode 100644
index 00000000..18d63e33
--- /dev/null
+++ b/test/support/integration/plugins/modules/postgresql_query.py
@@ -0,0 +1,364 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Felix Archambault
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'supported_by': 'community',
+ 'status': ['preview']
+}
+
+DOCUMENTATION = r'''
+---
+module: postgresql_query
+short_description: Run PostgreSQL queries
+description:
+- Runs arbitrary PostgreSQL queries.
+- Can run queries from SQL script files.
+- Does not run against backup files. Use M(postgresql_db) with I(state=restore)
+ to run queries on files made by pg_dump/pg_dumpall utilities.
+version_added: '2.8'
+options:
+ query:
+ description:
+ - SQL query to run. Variables can be escaped with psycopg2 syntax
+ U(http://initd.org/psycopg/docs/usage.html).
+ type: str
+ positional_args:
+ description:
+ - List of values to be passed as positional arguments to the query.
+ When the value is a list, it will be converted to PostgreSQL array.
+ - Mutually exclusive with I(named_args).
+ type: list
+ elements: raw
+ named_args:
+ description:
+ - Dictionary of key-value arguments to pass to the query.
+ When the value is a list, it will be converted to PostgreSQL array.
+ - Mutually exclusive with I(positional_args).
+ type: dict
+ path_to_script:
+ description:
+ - Path to SQL script on the remote host.
+ - Returns result of the last query in the script.
+ - Mutually exclusive with I(query).
+ type: path
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ db:
+ description:
+ - Name of database to connect to and run queries against.
+ type: str
+ aliases:
+ - login_db
+ autocommit:
+ description:
+ - Execute in autocommit mode when the query can't be run inside a transaction block
+ (e.g., VACUUM).
+ - Mutually exclusive with I(check_mode).
+ type: bool
+ default: no
+ version_added: '2.9'
+ encoding:
+ description:
+ - Set the client encoding for the current session (e.g. C(UTF-8)).
+ - The default is the encoding defined by the database.
+ type: str
+ version_added: '2.10'
+seealso:
+- module: postgresql_db
+author:
+- Felix Archambault (@archf)
+- Andrew Klychkov (@Andersson007)
+- Will Rouesnel (@wrouesnel)
+extends_documentation_fragment: postgres
+'''
+
+EXAMPLES = r'''
+- name: Simple select query to acme db
+ postgresql_query:
+ db: acme
+ query: SELECT version()
+
+- name: Select query to db acme with positional arguments and non-default credentials
+ postgresql_query:
+ db: acme
+ login_user: django
+ login_password: mysecretpass
+ query: SELECT * FROM acme WHERE id = %s AND story = %s
+ positional_args:
+ - 1
+ - test
+
+- name: Select query to test_db with named_args
+ postgresql_query:
+ db: test_db
+ query: SELECT * FROM test WHERE id = %(id_val)s AND story = %(story_val)s
+ named_args:
+ id_val: 1
+ story_val: test
+
+- name: Insert query to test_table in db test_db
+ postgresql_query:
+ db: test_db
+ query: INSERT INTO test_table (id, story) VALUES (2, 'my_long_story')
+
+- name: Run queries from SQL script using UTF-8 client encoding for session
+ postgresql_query:
+ db: test_db
+ path_to_script: /var/lib/pgsql/test.sql
+ positional_args:
+ - 1
+ encoding: UTF-8
+
+- name: Example of using autocommit parameter
+ postgresql_query:
+ db: test_db
+ query: VACUUM
+ autocommit: yes
+
+- name: >
+ Insert data to the column of array type using positional_args.
+ Note that we use quotes here, the same as for passing JSON, etc.
+ postgresql_query:
+ query: INSERT INTO test_table (array_column) VALUES (%s)
+ positional_args:
+ - '{1,2,3}'
+
+# Pass list and string vars as positional_args
+- name: Set vars
+ set_fact:
+ my_list:
+ - 1
+ - 2
+ - 3
+ my_arr: '{1, 2, 3}'
+
+- name: Select from test table by passing positional_args as arrays
+ postgresql_query:
+ query: SELECT * FROM test_array_table WHERE arr_col1 = %s AND arr_col2 = %s
+ positional_args:
+ - '{{ my_list }}'
+ - '{{ my_arr|string }}'
+'''
+
+RETURN = r'''
+query:
+ description: Query that was tried to be executed.
+ returned: always
+ type: str
+ sample: 'SELECT * FROM bar'
+statusmessage:
+ description: Attribute containing the message returned by the command.
+ returned: always
+ type: str
+ sample: 'INSERT 0 1'
+query_result:
+ description:
+ - List of dictionaries in column:value form representing returned rows.
+ returned: changed
+ type: list
+ sample: [{"Column": "Value1"},{"Column": "Value2"}]
+rowcount:
+ description: Number of affected rows.
+ returned: changed
+ type: int
+ sample: 5
+'''
+
+try:
+ from psycopg2 import ProgrammingError as Psycopg2ProgrammingError
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # it is needed for checking 'no result to fetch' in main(),
+ # psycopg2 availability will be checked by connect_to_db() into
+ # ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import iteritems
+
+
+# ===========================================
+# Module execution.
+#
+
+def list_to_pg_array(elem):
+ """Convert the passed list to PostgreSQL array
+ represented as a string.
+
+ Args:
+ elem (list): List that needs to be converted.
+
+ Returns:
+ elem (str): String representation of PostgreSQL array.
+ """
+ elem = str(elem).strip('[]')
+ elem = '{' + elem + '}'
+ return elem
+
+
+def convert_elements_to_pg_arrays(obj):
+ """Convert list elements of the passed object
+ to PostgreSQL arrays represented as strings.
+
+ Args:
+ obj (dict or list): Object whose elements need to be converted.
+
+ Returns:
+ obj (dict or list): Object with converted elements.
+ """
+ if isinstance(obj, dict):
+ for (key, elem) in iteritems(obj):
+ if isinstance(elem, list):
+ obj[key] = list_to_pg_array(elem)
+
+ elif isinstance(obj, list):
+ for i, elem in enumerate(obj):
+ if isinstance(elem, list):
+ obj[i] = list_to_pg_array(elem)
+
+ return obj
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ query=dict(type='str'),
+ db=dict(type='str', aliases=['login_db']),
+ positional_args=dict(type='list', elements='raw'),
+ named_args=dict(type='dict'),
+ session_role=dict(type='str'),
+ path_to_script=dict(type='path'),
+ autocommit=dict(type='bool', default=False),
+ encoding=dict(type='str'),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=(('positional_args', 'named_args'),),
+ supports_check_mode=True,
+ )
+
+ query = module.params["query"]
+ positional_args = module.params["positional_args"]
+ named_args = module.params["named_args"]
+ path_to_script = module.params["path_to_script"]
+ autocommit = module.params["autocommit"]
+ encoding = module.params["encoding"]
+
+ if autocommit and module.check_mode:
+ module.fail_json(msg="Using autocommit is mutually exclusive with check_mode")
+
+ if path_to_script and query:
+ module.fail_json(msg="path_to_script is mutually exclusive with query")
+
+ if positional_args:
+ positional_args = convert_elements_to_pg_arrays(positional_args)
+
+ elif named_args:
+ named_args = convert_elements_to_pg_arrays(named_args)
+
+ if path_to_script:
+ try:
+ with open(path_to_script, 'rb') as f:
+ query = to_native(f.read())
+ except Exception as e:
+ module.fail_json(msg="Cannot read file '%s' : %s" % (path_to_script, to_native(e)))
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=autocommit)
+ if encoding is not None:
+ db_connection.set_client_encoding(encoding)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Prepare args:
+ if module.params.get("positional_args"):
+ arguments = module.params["positional_args"]
+ elif module.params.get("named_args"):
+ arguments = module.params["named_args"]
+ else:
+ arguments = None
+
+ # Set defaults:
+ changed = False
+
+ # Execute query:
+ try:
+ cursor.execute(query, arguments)
+ except Exception as e:
+ if not autocommit:
+ db_connection.rollback()
+
+ cursor.close()
+ db_connection.close()
+ module.fail_json(msg="Cannot execute SQL '%s' %s: %s" % (query, arguments, to_native(e)))
+
+ statusmessage = cursor.statusmessage
+ rowcount = cursor.rowcount
+
+ try:
+ query_result = [dict(row) for row in cursor.fetchall()]
+ except Psycopg2ProgrammingError as e:
+ if to_native(e) == 'no results to fetch':
+ query_result = {}
+
+ except Exception as e:
+ module.fail_json(msg="Cannot fetch rows from cursor: %s" % to_native(e))
+
+ if 'SELECT' not in statusmessage:
+ if 'UPDATE' in statusmessage or 'INSERT' in statusmessage or 'DELETE' in statusmessage:
+ s = statusmessage.split()
+ if len(s) == 3:
+ if statusmessage.split()[2] != '0':
+ changed = True
+
+ elif len(s) == 2:
+ if statusmessage.split()[1] != '0':
+ changed = True
+
+ else:
+ changed = True
+
+ else:
+ changed = True
+
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ if not autocommit:
+ db_connection.commit()
+
+ kw = dict(
+ changed=changed,
+ query=cursor.query,
+ statusmessage=statusmessage,
+ query_result=query_result,
+ rowcount=rowcount if rowcount >= 0 else 0,
+ )
+
+ cursor.close()
+ db_connection.close()
+
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/postgresql_set.py b/test/support/integration/plugins/modules/postgresql_set.py
new file mode 100644
index 00000000..cfbdae64
--- /dev/null
+++ b/test/support/integration/plugins/modules/postgresql_set.py
@@ -0,0 +1,434 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = r'''
+---
+module: postgresql_set
+short_description: Change a PostgreSQL server configuration parameter
+description:
+ - Allows to change a PostgreSQL server configuration parameter.
+ - The module uses ALTER SYSTEM command and applies changes by reload server configuration.
+ - ALTER SYSTEM is used for changing server configuration parameters across the entire database cluster.
+ - It can be more convenient and safe than the traditional method of manually editing the postgresql.conf file.
+ - ALTER SYSTEM writes the given parameter setting to the $PGDATA/postgresql.auto.conf file,
+ which is read in addition to postgresql.conf.
+ - The module allows to reset parameter to boot_val (cluster initial value) by I(reset=yes) or remove parameter
+ string from postgresql.auto.conf and reload I(value=default) (for settings with postmaster context restart is required).
+ - After change you can see in the ansible output the previous and
+ the new parameter value and other information using returned values and M(debug) module.
+version_added: '2.8'
+options:
+ name:
+ description:
+ - Name of PostgreSQL server parameter.
+ type: str
+ required: true
+ value:
+ description:
+ - Parameter value to set.
+ - To remove parameter string from postgresql.auto.conf and
+ reload the server configuration you must pass I(value=default).
+ With I(value=default) the playbook always returns changed is true.
+ type: str
+ reset:
+ description:
+ - Restore parameter to initial state (boot_val). Mutually exclusive with I(value).
+ type: bool
+ default: false
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ db:
+ description:
+ - Name of database to connect.
+ type: str
+ aliases:
+ - login_db
+notes:
+- Supported version of PostgreSQL is 9.4 and later.
+- Pay attention, change setting with 'postmaster' context can return changed is true
+ when actually nothing changes because the same value may be presented in
+ several different form, for example, 1024MB, 1GB, etc. However in pg_settings
+ system view it can be defined like 131072 number of 8kB pages.
+ The final check of the parameter value cannot compare it because the server was
+ not restarted and the value in pg_settings is not updated yet.
+- For some parameters restart of PostgreSQL server is required.
+ See official documentation U(https://www.postgresql.org/docs/current/view-pg-settings.html).
+seealso:
+- module: postgresql_info
+- name: PostgreSQL server configuration
+ description: General information about PostgreSQL server configuration.
+ link: https://www.postgresql.org/docs/current/runtime-config.html
+- name: PostgreSQL view pg_settings reference
+ description: Complete reference of the pg_settings view documentation.
+ link: https://www.postgresql.org/docs/current/view-pg-settings.html
+- name: PostgreSQL ALTER SYSTEM command reference
+ description: Complete reference of the ALTER SYSTEM command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altersystem.html
+author:
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment: postgres
+'''
+
+EXAMPLES = r'''
+- name: Restore wal_keep_segments parameter to initial state
+ postgresql_set:
+ name: wal_keep_segments
+ reset: yes
+
+# Set work_mem parameter to 32MB and show what's been changed and restart is required or not
+# (output example: "msg": "work_mem 4MB >> 64MB restart_req: False")
+- name: Set work mem parameter
+ postgresql_set:
+ name: work_mem
+ value: 32mb
+ register: set
+
+- debug:
+ msg: "{{ set.name }} {{ set.prev_val_pretty }} >> {{ set.value_pretty }} restart_req: {{ set.restart_required }}"
+ when: set.changed
+# Ensure that the restart of PostgreSQL server must be required for some parameters.
+# In this situation you see the same parameter in prev_val and value_prettyue, but 'changed=True'
+# (If you passed the value that was different from the current server setting).
+
+- name: Set log_min_duration_statement parameter to 1 second
+ postgresql_set:
+ name: log_min_duration_statement
+ value: 1s
+
+- name: Set wal_log_hints parameter to default value (remove parameter from postgresql.auto.conf)
+ postgresql_set:
+ name: wal_log_hints
+ value: default
+'''
+
+RETURN = r'''
+name:
+ description: Name of PostgreSQL server parameter.
+ returned: always
+ type: str
+ sample: 'shared_buffers'
+restart_required:
+ description: Information about parameter current state.
+ returned: always
+ type: bool
+ sample: true
+prev_val_pretty:
+ description: Information about previous state of the parameter.
+ returned: always
+ type: str
+ sample: '4MB'
+value_pretty:
+ description: Information about current state of the parameter.
+ returned: always
+ type: str
+ sample: '64MB'
+value:
+ description:
+ - Dictionary that contains the current parameter value (at the time of playbook finish).
+ - Pay attention that for real change some parameters restart of PostgreSQL server is required.
+ - Returns the current value in the check mode.
+ returned: always
+ type: dict
+ sample: { "value": 67108864, "unit": "b" }
+context:
+ description:
+ - PostgreSQL setting context.
+ returned: always
+ type: str
+ sample: user
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except Exception:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from copy import deepcopy
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils._text import to_native
+
+PG_REQ_VER = 90400
+
+# To allow to set value like 1mb instead of 1MB, etc:
+POSSIBLE_SIZE_UNITS = ("mb", "gb", "tb")
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+def param_get(cursor, module, name):
+ query = ("SELECT name, setting, unit, context, boot_val "
+ "FROM pg_settings WHERE name = %(name)s")
+ try:
+ cursor.execute(query, {'name': name})
+ info = cursor.fetchall()
+ cursor.execute("SHOW %s" % name)
+ val = cursor.fetchone()
+
+ except Exception as e:
+ module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e)))
+
+ raw_val = info[0][1]
+ unit = info[0][2]
+ context = info[0][3]
+ boot_val = info[0][4]
+
+ if val[0] == 'True':
+ val[0] = 'on'
+ elif val[0] == 'False':
+ val[0] = 'off'
+
+ if unit == 'kB':
+ if int(raw_val) > 0:
+ raw_val = int(raw_val) * 1024
+ if int(boot_val) > 0:
+ boot_val = int(boot_val) * 1024
+
+ unit = 'b'
+
+ elif unit == 'MB':
+ if int(raw_val) > 0:
+ raw_val = int(raw_val) * 1024 * 1024
+ if int(boot_val) > 0:
+ boot_val = int(boot_val) * 1024 * 1024
+
+ unit = 'b'
+
+ return (val[0], raw_val, unit, boot_val, context)
+
+
+def pretty_to_bytes(pretty_val):
+ # The function returns a value in bytes
+ # if the value contains 'B', 'kB', 'MB', 'GB', 'TB'.
+ # Otherwise it returns the passed argument.
+
+ val_in_bytes = None
+
+ if 'kB' in pretty_val:
+ num_part = int(''.join(d for d in pretty_val if d.isdigit()))
+ val_in_bytes = num_part * 1024
+
+ elif 'MB' in pretty_val.upper():
+ num_part = int(''.join(d for d in pretty_val if d.isdigit()))
+ val_in_bytes = num_part * 1024 * 1024
+
+ elif 'GB' in pretty_val.upper():
+ num_part = int(''.join(d for d in pretty_val if d.isdigit()))
+ val_in_bytes = num_part * 1024 * 1024 * 1024
+
+ elif 'TB' in pretty_val.upper():
+ num_part = int(''.join(d for d in pretty_val if d.isdigit()))
+ val_in_bytes = num_part * 1024 * 1024 * 1024 * 1024
+
+ elif 'B' in pretty_val.upper():
+ num_part = int(''.join(d for d in pretty_val if d.isdigit()))
+ val_in_bytes = num_part
+
+ else:
+ return pretty_val
+
+ return val_in_bytes
+
+
+def param_set(cursor, module, name, value, context):
+ try:
+ if str(value).lower() == 'default':
+ query = "ALTER SYSTEM SET %s = DEFAULT" % name
+ else:
+ query = "ALTER SYSTEM SET %s = '%s'" % (name, value)
+ cursor.execute(query)
+
+ if context != 'postmaster':
+ cursor.execute("SELECT pg_reload_conf()")
+
+ except Exception as e:
+ module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e)))
+
+ return True
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ name=dict(type='str', required=True),
+ db=dict(type='str', aliases=['login_db']),
+ value=dict(type='str'),
+ reset=dict(type='bool'),
+ session_role=dict(type='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ name = module.params["name"]
+ value = module.params["value"]
+ reset = module.params["reset"]
+
+ # Allow to pass values like 1mb instead of 1MB, etc:
+ if value:
+ for unit in POSSIBLE_SIZE_UNITS:
+ if value[:-2].isdigit() and unit in value[-2:]:
+ value = value.upper()
+
+ if value and reset:
+ module.fail_json(msg="%s: value and reset params are mutually exclusive" % name)
+
+ if not value and not reset:
+ module.fail_json(msg="%s: at least one of value or reset param must be specified" % name)
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ kw = {}
+ # Check server version (needs 9.4 or later):
+ ver = db_connection.server_version
+ if ver < PG_REQ_VER:
+ module.warn("PostgreSQL is %s version but %s or later is required" % (ver, PG_REQ_VER))
+ kw = dict(
+ changed=False,
+ restart_required=False,
+ value_pretty="",
+ prev_val_pretty="",
+ value={"value": "", "unit": ""},
+ )
+ kw['name'] = name
+ db_connection.close()
+ module.exit_json(**kw)
+
+ # Set default returned values:
+ restart_required = False
+ changed = False
+ kw['name'] = name
+ kw['restart_required'] = False
+
+ # Get info about param state:
+ res = param_get(cursor, module, name)
+ current_value = res[0]
+ raw_val = res[1]
+ unit = res[2]
+ boot_val = res[3]
+ context = res[4]
+
+ if value == 'True':
+ value = 'on'
+ elif value == 'False':
+ value = 'off'
+
+ kw['prev_val_pretty'] = current_value
+ kw['value_pretty'] = deepcopy(kw['prev_val_pretty'])
+ kw['context'] = context
+
+ # Do job
+ if context == "internal":
+ module.fail_json(msg="%s: cannot be changed (internal context). See "
+ "https://www.postgresql.org/docs/current/runtime-config-preset.html" % name)
+
+ if context == "postmaster":
+ restart_required = True
+
+ # If check_mode, just compare and exit:
+ if module.check_mode:
+ if pretty_to_bytes(value) == pretty_to_bytes(current_value):
+ kw['changed'] = False
+
+ else:
+ kw['value_pretty'] = value
+ kw['changed'] = True
+
+ # Anyway returns current raw value in the check_mode:
+ kw['value'] = dict(
+ value=raw_val,
+ unit=unit,
+ )
+ kw['restart_required'] = restart_required
+ module.exit_json(**kw)
+
+ # Set param:
+ if value and value != current_value:
+ changed = param_set(cursor, module, name, value, context)
+
+ kw['value_pretty'] = value
+
+ # Reset param:
+ elif reset:
+ if raw_val == boot_val:
+ # nothing to change, exit:
+ kw['value'] = dict(
+ value=raw_val,
+ unit=unit,
+ )
+ module.exit_json(**kw)
+
+ changed = param_set(cursor, module, name, boot_val, context)
+
+ if restart_required:
+ module.warn("Restart of PostgreSQL is required for setting %s" % name)
+
+ cursor.close()
+ db_connection.close()
+
+ # Reconnect and recheck current value:
+ if context in ('sighup', 'superuser-backend', 'backend', 'superuser', 'user'):
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ res = param_get(cursor, module, name)
+ # f_ means 'final'
+ f_value = res[0]
+ f_raw_val = res[1]
+
+ if raw_val == f_raw_val:
+ changed = False
+
+ else:
+ changed = True
+
+ kw['value_pretty'] = f_value
+ kw['value'] = dict(
+ value=f_raw_val,
+ unit=unit,
+ )
+
+ cursor.close()
+ db_connection.close()
+
+ kw['changed'] = changed
+ kw['restart_required'] = restart_required
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/postgresql_table.py b/test/support/integration/plugins/modules/postgresql_table.py
new file mode 100644
index 00000000..3bef03b0
--- /dev/null
+++ b/test/support/integration/plugins/modules/postgresql_table.py
@@ -0,0 +1,601 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = r'''
+---
+module: postgresql_table
+short_description: Create, drop, or modify a PostgreSQL table
+description:
+- Allows to create, drop, rename, truncate a table, or change some table attributes.
+version_added: '2.8'
+options:
+ table:
+ description:
+ - Table name.
+ required: true
+ aliases:
+ - name
+ type: str
+ state:
+ description:
+ - The table state. I(state=absent) is mutually exclusive with I(tablespace), I(owner), I(unlogged),
+ I(like), I(including), I(columns), I(truncate), I(storage_params) and, I(rename).
+ type: str
+ default: present
+ choices: [ absent, present ]
+ tablespace:
+ description:
+ - Set a tablespace for the table.
+ required: false
+ type: str
+ owner:
+ description:
+ - Set a table owner.
+ type: str
+ unlogged:
+ description:
+ - Create an unlogged table.
+ type: bool
+ default: no
+ like:
+ description:
+ - Create a table like another table (with similar DDL).
+ Mutually exclusive with I(columns), I(rename), and I(truncate).
+ type: str
+ including:
+ description:
+ - Keywords that are used with like parameter, may be DEFAULTS, CONSTRAINTS, INDEXES, STORAGE, COMMENTS or ALL.
+ Needs I(like) specified. Mutually exclusive with I(columns), I(rename), and I(truncate).
+ type: str
+ columns:
+ description:
+ - Columns that are needed.
+ type: list
+ elements: str
+ rename:
+ description:
+ - New table name. Mutually exclusive with I(tablespace), I(owner),
+ I(unlogged), I(like), I(including), I(columns), I(truncate), and I(storage_params).
+ type: str
+ truncate:
+ description:
+ - Truncate a table. Mutually exclusive with I(tablespace), I(owner), I(unlogged),
+ I(like), I(including), I(columns), I(rename), and I(storage_params).
+ type: bool
+ default: no
+ storage_params:
+ description:
+ - Storage parameters like fillfactor, autovacuum_vacuum_treshold, etc.
+ Mutually exclusive with I(rename) and I(truncate).
+ type: list
+ elements: str
+ db:
+ description:
+ - Name of database to connect and where the table will be created.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ cascade:
+ description:
+ - Automatically drop objects that depend on the table (such as views).
+ Used with I(state=absent) only.
+ type: bool
+ default: no
+ version_added: '2.9'
+notes:
+- If you do not pass db parameter, tables will be created in the database
+ named postgres.
+- PostgreSQL allows to create columnless table, so columns param is optional.
+- Unlogged tables are available from PostgreSQL server version 9.1.
+seealso:
+- module: postgresql_sequence
+- module: postgresql_idx
+- module: postgresql_info
+- module: postgresql_tablespace
+- module: postgresql_owner
+- module: postgresql_privs
+- module: postgresql_copy
+- name: CREATE TABLE reference
+ description: Complete reference of the CREATE TABLE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createtable.html
+- name: ALTER TABLE reference
+ description: Complete reference of the ALTER TABLE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altertable.html
+- name: DROP TABLE reference
+ description: Complete reference of the DROP TABLE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droptable.html
+- name: PostgreSQL data types
+ description: Complete reference of the PostgreSQL data types documentation.
+ link: https://www.postgresql.org/docs/current/datatype.html
+author:
+- Andrei Klychkov (@Andersson007)
+extends_documentation_fragment: postgres
+'''
+
+EXAMPLES = r'''
+- name: Create tbl2 in the acme database with the DDL like tbl1 with testuser as an owner
+ postgresql_table:
+ db: acme
+ name: tbl2
+ like: tbl1
+ owner: testuser
+
+- name: Create tbl2 in the acme database and tablespace ssd with the DDL like tbl1 including comments and indexes
+ postgresql_table:
+ db: acme
+ table: tbl2
+ like: tbl1
+ including: comments, indexes
+ tablespace: ssd
+
+- name: Create test_table with several columns in ssd tablespace with fillfactor=10 and autovacuum_analyze_threshold=1
+ postgresql_table:
+ name: test_table
+ columns:
+ - id bigserial primary key
+ - num bigint
+ - stories text
+ tablespace: ssd
+ storage_params:
+ - fillfactor=10
+ - autovacuum_analyze_threshold=1
+
+- name: Create an unlogged table in schema acme
+ postgresql_table:
+ name: acme.useless_data
+ columns: waste_id int
+ unlogged: true
+
+- name: Rename table foo to bar
+ postgresql_table:
+ table: foo
+ rename: bar
+
+- name: Rename table foo from schema acme to bar
+ postgresql_table:
+ name: acme.foo
+ rename: bar
+
+- name: Set owner to someuser
+ postgresql_table:
+ name: foo
+ owner: someuser
+
+- name: Change tablespace of foo table to new_tablespace and set owner to new_user
+ postgresql_table:
+ name: foo
+ tablespace: new_tablespace
+ owner: new_user
+
+- name: Truncate table foo
+ postgresql_table:
+ name: foo
+ truncate: yes
+
+- name: Drop table foo from schema acme
+ postgresql_table:
+ name: acme.foo
+ state: absent
+
+- name: Drop table bar cascade
+ postgresql_table:
+ name: bar
+ state: absent
+ cascade: yes
+'''
+
+RETURN = r'''
+table:
+ description: Name of a table.
+ returned: always
+ type: str
+ sample: 'foo'
+state:
+ description: Table state.
+ returned: always
+ type: str
+ sample: 'present'
+owner:
+ description: Table owner.
+ returned: always
+ type: str
+ sample: 'postgres'
+tablespace:
+ description: Tablespace.
+ returned: always
+ type: str
+ sample: 'ssd_tablespace'
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ 'CREATE TABLE "test_table" (id bigint)' ]
+storage_params:
+ description: Storage parameters.
+ returned: always
+ type: list
+ sample: [ "fillfactor=100", "autovacuum_analyze_threshold=1" ]
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.database import pg_quote_identifier
+from ansible.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+class Table(object):
+ def __init__(self, name, module, cursor):
+ self.name = name
+ self.module = module
+ self.cursor = cursor
+ self.info = {
+ 'owner': '',
+ 'tblspace': '',
+ 'storage_params': [],
+ }
+ self.exists = False
+ self.__exists_in_db()
+ self.executed_queries = []
+
+ def get_info(self):
+ """Getter to refresh and get table info"""
+ self.__exists_in_db()
+
+ def __exists_in_db(self):
+ """Check table exists and refresh info"""
+ if "." in self.name:
+ schema = self.name.split('.')[-2]
+ tblname = self.name.split('.')[-1]
+ else:
+ schema = 'public'
+ tblname = self.name
+
+ query = ("SELECT t.tableowner, t.tablespace, c.reloptions "
+ "FROM pg_tables AS t "
+ "INNER JOIN pg_class AS c ON c.relname = t.tablename "
+ "INNER JOIN pg_namespace AS n ON c.relnamespace = n.oid "
+ "WHERE t.tablename = %(tblname)s "
+ "AND n.nspname = %(schema)s")
+ res = exec_sql(self, query, query_params={'tblname': tblname, 'schema': schema},
+ add_to_executed=False)
+ if res:
+ self.exists = True
+ self.info = dict(
+ owner=res[0][0],
+ tblspace=res[0][1] if res[0][1] else '',
+ storage_params=res[0][2] if res[0][2] else [],
+ )
+
+ return True
+ else:
+ self.exists = False
+ return False
+
+ def create(self, columns='', params='', tblspace='',
+ unlogged=False, owner=''):
+ """
+ Create table.
+ If table exists, check passed args (params, tblspace, owner) and,
+ if they're different from current, change them.
+ Arguments:
+ params - storage params (passed by "WITH (...)" in SQL),
+ comma separated.
+ tblspace - tablespace.
+ owner - table owner.
+ unlogged - create unlogged table.
+ columns - column string (comma separated).
+ """
+ name = pg_quote_identifier(self.name, 'table')
+
+ changed = False
+
+ if self.exists:
+ if tblspace == 'pg_default' and self.info['tblspace'] is None:
+ pass # Because they have the same meaning
+ elif tblspace and self.info['tblspace'] != tblspace:
+ self.set_tblspace(tblspace)
+ changed = True
+
+ if owner and self.info['owner'] != owner:
+ self.set_owner(owner)
+ changed = True
+
+ if params:
+ param_list = [p.strip(' ') for p in params.split(',')]
+
+ new_param = False
+ for p in param_list:
+ if p not in self.info['storage_params']:
+ new_param = True
+
+ if new_param:
+ self.set_stor_params(params)
+ changed = True
+
+ if changed:
+ return True
+ return False
+
+ query = "CREATE"
+ if unlogged:
+ query += " UNLOGGED TABLE %s" % name
+ else:
+ query += " TABLE %s" % name
+
+ if columns:
+ query += " (%s)" % columns
+ else:
+ query += " ()"
+
+ if params:
+ query += " WITH (%s)" % params
+
+ if tblspace:
+ query += " TABLESPACE %s" % pg_quote_identifier(tblspace, 'database')
+
+ if exec_sql(self, query, ddl=True):
+ changed = True
+
+ if owner:
+ changed = self.set_owner(owner)
+
+ return changed
+
+ def create_like(self, src_table, including='', tblspace='',
+ unlogged=False, params='', owner=''):
+ """
+ Create table like another table (with similar DDL).
+ Arguments:
+ src_table - source table.
+ including - corresponds to optional INCLUDING expression
+ in CREATE TABLE ... LIKE statement.
+ params - storage params (passed by "WITH (...)" in SQL),
+ comma separated.
+ tblspace - tablespace.
+ owner - table owner.
+ unlogged - create unlogged table.
+ """
+ changed = False
+
+ name = pg_quote_identifier(self.name, 'table')
+
+ query = "CREATE"
+ if unlogged:
+ query += " UNLOGGED TABLE %s" % name
+ else:
+ query += " TABLE %s" % name
+
+ query += " (LIKE %s" % pg_quote_identifier(src_table, 'table')
+
+ if including:
+ including = including.split(',')
+ for i in including:
+ query += " INCLUDING %s" % i
+
+ query += ')'
+
+ if params:
+ query += " WITH (%s)" % params
+
+ if tblspace:
+ query += " TABLESPACE %s" % pg_quote_identifier(tblspace, 'database')
+
+ if exec_sql(self, query, ddl=True):
+ changed = True
+
+ if owner:
+ changed = self.set_owner(owner)
+
+ return changed
+
+ def truncate(self):
+ query = "TRUNCATE TABLE %s" % pg_quote_identifier(self.name, 'table')
+ return exec_sql(self, query, ddl=True)
+
+ def rename(self, newname):
+ query = "ALTER TABLE %s RENAME TO %s" % (pg_quote_identifier(self.name, 'table'),
+ pg_quote_identifier(newname, 'table'))
+ return exec_sql(self, query, ddl=True)
+
+ def set_owner(self, username):
+ query = "ALTER TABLE %s OWNER TO %s" % (pg_quote_identifier(self.name, 'table'),
+ pg_quote_identifier(username, 'role'))
+ return exec_sql(self, query, ddl=True)
+
+ def drop(self, cascade=False):
+ if not self.exists:
+ return False
+
+ query = "DROP TABLE %s" % pg_quote_identifier(self.name, 'table')
+ if cascade:
+ query += " CASCADE"
+ return exec_sql(self, query, ddl=True)
+
+ def set_tblspace(self, tblspace):
+ query = "ALTER TABLE %s SET TABLESPACE %s" % (pg_quote_identifier(self.name, 'table'),
+ pg_quote_identifier(tblspace, 'database'))
+ return exec_sql(self, query, ddl=True)
+
+ def set_stor_params(self, params):
+ query = "ALTER TABLE %s SET (%s)" % (pg_quote_identifier(self.name, 'table'), params)
+ return exec_sql(self, query, ddl=True)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ table=dict(type='str', required=True, aliases=['name']),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ db=dict(type='str', default='', aliases=['login_db']),
+ tablespace=dict(type='str'),
+ owner=dict(type='str'),
+ unlogged=dict(type='bool', default=False),
+ like=dict(type='str'),
+ including=dict(type='str'),
+ rename=dict(type='str'),
+ truncate=dict(type='bool', default=False),
+ columns=dict(type='list', elements='str'),
+ storage_params=dict(type='list', elements='str'),
+ session_role=dict(type='str'),
+ cascade=dict(type='bool', default=False),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ table = module.params["table"]
+ state = module.params["state"]
+ tablespace = module.params["tablespace"]
+ owner = module.params["owner"]
+ unlogged = module.params["unlogged"]
+ like = module.params["like"]
+ including = module.params["including"]
+ newname = module.params["rename"]
+ storage_params = module.params["storage_params"]
+ truncate = module.params["truncate"]
+ columns = module.params["columns"]
+ cascade = module.params["cascade"]
+
+ if state == 'present' and cascade:
+ module.warn("cascade=true is ignored when state=present")
+
+ # Check mutual exclusive parameters:
+ if state == 'absent' and (truncate or newname or columns or tablespace or like or storage_params or unlogged or owner or including):
+ module.fail_json(msg="%s: state=absent is mutually exclusive with: "
+ "truncate, rename, columns, tablespace, "
+ "including, like, storage_params, unlogged, owner" % table)
+
+ if truncate and (newname or columns or like or unlogged or storage_params or owner or tablespace or including):
+ module.fail_json(msg="%s: truncate is mutually exclusive with: "
+ "rename, columns, like, unlogged, including, "
+ "storage_params, owner, tablespace" % table)
+
+ if newname and (columns or like or unlogged or storage_params or owner or tablespace or including):
+ module.fail_json(msg="%s: rename is mutually exclusive with: "
+ "columns, like, unlogged, including, "
+ "storage_params, owner, tablespace" % table)
+
+ if like and columns:
+ module.fail_json(msg="%s: like and columns params are mutually exclusive" % table)
+ if including and not like:
+ module.fail_json(msg="%s: including param needs like param specified" % table)
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ if storage_params:
+ storage_params = ','.join(storage_params)
+
+ if columns:
+ columns = ','.join(columns)
+
+ ##############
+ # Do main job:
+ table_obj = Table(table, module, cursor)
+
+ # Set default returned values:
+ changed = False
+ kw = {}
+ kw['table'] = table
+ kw['state'] = ''
+ if table_obj.exists:
+ kw = dict(
+ table=table,
+ state='present',
+ owner=table_obj.info['owner'],
+ tablespace=table_obj.info['tblspace'],
+ storage_params=table_obj.info['storage_params'],
+ )
+
+ if state == 'absent':
+ changed = table_obj.drop(cascade=cascade)
+
+ elif truncate:
+ changed = table_obj.truncate()
+
+ elif newname:
+ changed = table_obj.rename(newname)
+ q = table_obj.executed_queries
+ table_obj = Table(newname, module, cursor)
+ table_obj.executed_queries = q
+
+ elif state == 'present' and not like:
+ changed = table_obj.create(columns, storage_params,
+ tablespace, unlogged, owner)
+
+ elif state == 'present' and like:
+ changed = table_obj.create_like(like, including, tablespace,
+ unlogged, storage_params)
+
+ if changed:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ # Refresh table info for RETURN.
+ # Note, if table has been renamed, it gets info by newname:
+ table_obj.get_info()
+ db_connection.commit()
+ if table_obj.exists:
+ kw = dict(
+ table=table,
+ state='present',
+ owner=table_obj.info['owner'],
+ tablespace=table_obj.info['tblspace'],
+ storage_params=table_obj.info['storage_params'],
+ )
+ else:
+ # We just change the table state here
+ # to keep other information about the dropped table:
+ kw['state'] = 'absent'
+
+ kw['queries'] = table_obj.executed_queries
+ kw['changed'] = changed
+ db_connection.close()
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/postgresql_user.py b/test/support/integration/plugins/modules/postgresql_user.py
new file mode 100644
index 00000000..10afd0a0
--- /dev/null
+++ b/test/support/integration/plugins/modules/postgresql_user.py
@@ -0,0 +1,927 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = r'''
+---
+module: postgresql_user
+short_description: Add or remove a user (role) from a PostgreSQL server instance
+description:
+- Adds or removes a user (role) from a PostgreSQL server instance
+ ("cluster" in PostgreSQL terminology) and, optionally,
+ grants the user access to an existing database or tables.
+- A user is a role with login privilege.
+- The fundamental function of the module is to create, or delete, users from
+ a PostgreSQL instances. Privilege assignment, or removal, is an optional
+ step, which works on one database at a time. This allows for the module to
+ be called several times in the same module to modify the permissions on
+ different databases, or to grant permissions to already existing users.
+- A user cannot be removed until all the privileges have been stripped from
+ the user. In such situation, if the module tries to remove the user it
+ will fail. To avoid this from happening the fail_on_user option signals
+ the module to try to remove the user, but if not possible keep going; the
+ module will report if changes happened and separately if the user was
+ removed or not.
+version_added: '0.6'
+options:
+ name:
+ description:
+ - Name of the user (role) to add or remove.
+ type: str
+ required: true
+ aliases:
+ - user
+ password:
+ description:
+ - Set the user's password, before 1.4 this was required.
+ - Password can be passed unhashed or hashed (MD5-hashed).
+ - Unhashed password will automatically be hashed when saved into the
+ database if C(encrypted) parameter is set, otherwise it will be save in
+ plain text format.
+ - When passing a hashed password it must be generated with the format
+ C('str["md5"] + md5[ password + username ]'), resulting in a total of
+ 35 characters. An easy way to do this is C(echo "md5$(echo -n
+ 'verysecretpasswordJOE' | md5sum | awk '{print $1}')").
+ - Note that if the provided password string is already in MD5-hashed
+ format, then it is used as-is, regardless of C(encrypted) parameter.
+ type: str
+ db:
+ description:
+ - Name of database to connect to and where user's permissions will be granted.
+ type: str
+ aliases:
+ - login_db
+ fail_on_user:
+ description:
+ - If C(yes), fail when user (role) can't be removed. Otherwise just log and continue.
+ default: 'yes'
+ type: bool
+ aliases:
+ - fail_on_role
+ priv:
+ description:
+ - "Slash-separated PostgreSQL privileges string: C(priv1/priv2), where
+ privileges can be defined for database ( allowed options - 'CREATE',
+ 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL'. For example C(CONNECT) ) or
+ for table ( allowed options - 'SELECT', 'INSERT', 'UPDATE', 'DELETE',
+ 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL'. For example
+ C(table:SELECT) ). Mixed example of this string:
+ C(CONNECT/CREATE/table1:SELECT/table2:INSERT)."
+ type: str
+ role_attr_flags:
+ description:
+ - "PostgreSQL user attributes string in the format: CREATEDB,CREATEROLE,SUPERUSER."
+ - Note that '[NO]CREATEUSER' is deprecated.
+ - To create a simple role for using it like a group, use C(NOLOGIN) flag.
+ type: str
+ choices: [ '[NO]SUPERUSER', '[NO]CREATEROLE', '[NO]CREATEDB',
+ '[NO]INHERIT', '[NO]LOGIN', '[NO]REPLICATION', '[NO]BYPASSRLS' ]
+ session_role:
+ version_added: '2.8'
+ description:
+ - Switch to session_role after connecting.
+ - The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The user (role) state.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ encrypted:
+ description:
+ - Whether the password is stored hashed in the database.
+ - Passwords can be passed already hashed or unhashed, and postgresql
+ ensures the stored password is hashed when C(encrypted) is set.
+ - "Note: Postgresql 10 and newer doesn't support unhashed passwords."
+ - Previous to Ansible 2.6, this was C(no) by default.
+ default: 'yes'
+ type: bool
+ version_added: '1.4'
+ expires:
+ description:
+ - The date at which the user's password is to expire.
+ - If set to C('infinity'), user's password never expire.
+ - Note that this value should be a valid SQL date and time type.
+ type: str
+ version_added: '1.4'
+ no_password_changes:
+ description:
+ - If C(yes), don't inspect database for password changes. Effective when
+ C(pg_authid) is not accessible (such as AWS RDS). Otherwise, make
+ password changes as necessary.
+ default: 'no'
+ type: bool
+ version_added: '2.0'
+ conn_limit:
+ description:
+ - Specifies the user (role) connection limit.
+ type: int
+ version_added: '2.4'
+ ssl_mode:
+ description:
+ - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
+ - See https://www.postgresql.org/docs/current/static/libpq-ssl.html for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ version_added: '2.3'
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, the server's certificate will be verified to be signed by one of these authorities.
+ type: str
+ aliases: [ ssl_rootcert ]
+ version_added: '2.3'
+ groups:
+ description:
+ - The list of groups (roles) that need to be granted to the user.
+ type: list
+ elements: str
+ version_added: '2.9'
+ comment:
+ description:
+ - Add a comment on the user (equal to the COMMENT ON ROLE statement result).
+ type: str
+ version_added: '2.10'
+notes:
+- The module creates a user (role) with login privilege by default.
+ Use NOLOGIN role_attr_flags to change this behaviour.
+- If you specify PUBLIC as the user (role), then the privilege changes will apply to all users (roles).
+ You may not specify password or role_attr_flags when the PUBLIC user is specified.
+seealso:
+- module: postgresql_privs
+- module: postgresql_membership
+- module: postgresql_owner
+- name: PostgreSQL database roles
+ description: Complete reference of the PostgreSQL database roles documentation.
+ link: https://www.postgresql.org/docs/current/user-manag.html
+author:
+- Ansible Core Team
+extends_documentation_fragment: postgres
+'''
+
+EXAMPLES = r'''
+- name: Connect to acme database, create django user, and grant access to database and products table
+ postgresql_user:
+ db: acme
+ name: django
+ password: ceec4eif7ya
+ priv: "CONNECT/products:ALL"
+ expires: "Jan 31 2020"
+
+- name: Add a comment on django user
+ postgresql_user:
+ db: acme
+ name: django
+ comment: This is a test user
+
+# Connect to default database, create rails user, set its password (MD5-hashed),
+# and grant privilege to create other databases and demote rails from super user status if user exists
+- name: Create rails user, set MD5-hashed password, grant privs
+ postgresql_user:
+ name: rails
+ password: md59543f1d82624df2b31672ec0f7050460
+ role_attr_flags: CREATEDB,NOSUPERUSER
+
+- name: Connect to acme database and remove test user privileges from there
+ postgresql_user:
+ db: acme
+ name: test
+ priv: "ALL/products:ALL"
+ state: absent
+ fail_on_user: no
+
+- name: Connect to test database, remove test user from cluster
+ postgresql_user:
+ db: test
+ name: test
+ priv: ALL
+ state: absent
+
+- name: Connect to acme database and set user's password with no expire date
+ postgresql_user:
+ db: acme
+ name: django
+ password: mysupersecretword
+ priv: "CONNECT/products:ALL"
+ expires: infinity
+
+# Example privileges string format
+# INSERT,UPDATE/table:SELECT/anothertable:ALL
+
+- name: Connect to test database and remove an existing user's password
+ postgresql_user:
+ db: test
+ user: test
+ password: ""
+
+- name: Create user test and grant group user_ro and user_rw to it
+ postgresql_user:
+ name: test
+ groups:
+ - user_ro
+ - user_rw
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ['CREATE USER "alice"', 'GRANT CONNECT ON DATABASE "acme" TO "alice"']
+ version_added: '2.8'
+'''
+
+import itertools
+import re
+import traceback
+from hashlib import md5
+
+try:
+ import psycopg2
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.database import pg_quote_identifier, SQLParseError
+from ansible.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ PgMembership,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.module_utils.six import iteritems
+
+
+FLAGS = ('SUPERUSER', 'CREATEROLE', 'CREATEDB', 'INHERIT', 'LOGIN', 'REPLICATION')
+FLAGS_BY_VERSION = {'BYPASSRLS': 90500}
+
+VALID_PRIVS = dict(table=frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL')),
+ database=frozenset(
+ ('CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL')),
+ )
+
+# map to cope with idiosyncracies of SUPERUSER and LOGIN
+PRIV_TO_AUTHID_COLUMN = dict(SUPERUSER='rolsuper', CREATEROLE='rolcreaterole',
+ CREATEDB='rolcreatedb', INHERIT='rolinherit', LOGIN='rolcanlogin',
+ REPLICATION='rolreplication', BYPASSRLS='rolbypassrls')
+
+executed_queries = []
+
+
+class InvalidFlagsError(Exception):
+ pass
+
+
+class InvalidPrivsError(Exception):
+ pass
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+def user_exists(cursor, user):
+ # The PUBLIC user is a special case that is always there
+ if user == 'PUBLIC':
+ return True
+ query = "SELECT rolname FROM pg_roles WHERE rolname=%(user)s"
+ cursor.execute(query, {'user': user})
+ return cursor.rowcount > 0
+
+
+def user_add(cursor, user, password, role_attr_flags, encrypted, expires, conn_limit):
+ """Create a new database user (role)."""
+ # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a
+ # literal
+ query_password_data = dict(password=password, expires=expires)
+ query = ['CREATE USER "%(user)s"' %
+ {"user": user}]
+ if password is not None and password != '':
+ query.append("WITH %(crypt)s" % {"crypt": encrypted})
+ query.append("PASSWORD %(password)s")
+ if expires is not None:
+ query.append("VALID UNTIL %(expires)s")
+ if conn_limit is not None:
+ query.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
+ query.append(role_attr_flags)
+ query = ' '.join(query)
+ executed_queries.append(query)
+ cursor.execute(query, query_password_data)
+ return True
+
+
+def user_should_we_change_password(current_role_attrs, user, password, encrypted):
+ """Check if we should change the user's password.
+
+ Compare the proposed password with the existing one, comparing
+ hashes if encrypted. If we can't access it assume yes.
+ """
+
+ if current_role_attrs is None:
+ # on some databases, E.g. AWS RDS instances, there is no access to
+ # the pg_authid relation to check the pre-existing password, so we
+ # just assume password is different
+ return True
+
+ # Do we actually need to do anything?
+ pwchanging = False
+ if password is not None:
+ # Empty password means that the role shouldn't have a password, which
+ # means we need to check if the current password is None.
+ if password == '':
+ if current_role_attrs['rolpassword'] is not None:
+ pwchanging = True
+ # 32: MD5 hashes are represented as a sequence of 32 hexadecimal digits
+ # 3: The size of the 'md5' prefix
+ # When the provided password looks like a MD5-hash, value of
+ # 'encrypted' is ignored.
+ elif (password.startswith('md5') and len(password) == 32 + 3) or encrypted == 'UNENCRYPTED':
+ if password != current_role_attrs['rolpassword']:
+ pwchanging = True
+ elif encrypted == 'ENCRYPTED':
+ hashed_password = 'md5{0}'.format(md5(to_bytes(password) + to_bytes(user)).hexdigest())
+ if hashed_password != current_role_attrs['rolpassword']:
+ pwchanging = True
+
+ return pwchanging
+
+
+def user_alter(db_connection, module, user, password, role_attr_flags, encrypted, expires, no_password_changes, conn_limit):
+ """Change user password and/or attributes. Return True if changed, False otherwise."""
+ changed = False
+
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+ # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a
+ # literal
+ if user == 'PUBLIC':
+ if password is not None:
+ module.fail_json(msg="cannot change the password for PUBLIC user")
+ elif role_attr_flags != '':
+ module.fail_json(msg="cannot change the role_attr_flags for PUBLIC user")
+ else:
+ return False
+
+ # Handle passwords.
+ if not no_password_changes and (password is not None or role_attr_flags != '' or expires is not None or conn_limit is not None):
+ # Select password and all flag-like columns in order to verify changes.
+ try:
+ select = "SELECT * FROM pg_authid where rolname=%(user)s"
+ cursor.execute(select, {"user": user})
+ # Grab current role attributes.
+ current_role_attrs = cursor.fetchone()
+ except psycopg2.ProgrammingError:
+ current_role_attrs = None
+ db_connection.rollback()
+
+ pwchanging = user_should_we_change_password(current_role_attrs, user, password, encrypted)
+
+ if current_role_attrs is None:
+ try:
+ # AWS RDS instances does not allow user to access pg_authid
+ # so try to get current_role_attrs from pg_roles tables
+ select = "SELECT * FROM pg_roles where rolname=%(user)s"
+ cursor.execute(select, {"user": user})
+ # Grab current role attributes from pg_roles
+ current_role_attrs = cursor.fetchone()
+ except psycopg2.ProgrammingError as e:
+ db_connection.rollback()
+ module.fail_json(msg="Failed to get role details for current user %s: %s" % (user, e))
+
+ role_attr_flags_changing = False
+ if role_attr_flags:
+ role_attr_flags_dict = {}
+ for r in role_attr_flags.split(' '):
+ if r.startswith('NO'):
+ role_attr_flags_dict[r.replace('NO', '', 1)] = False
+ else:
+ role_attr_flags_dict[r] = True
+
+ for role_attr_name, role_attr_value in role_attr_flags_dict.items():
+ if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value:
+ role_attr_flags_changing = True
+
+ if expires is not None:
+ cursor.execute("SELECT %s::timestamptz;", (expires,))
+ expires_with_tz = cursor.fetchone()[0]
+ expires_changing = expires_with_tz != current_role_attrs.get('rolvaliduntil')
+ else:
+ expires_changing = False
+
+ conn_limit_changing = (conn_limit is not None and conn_limit != current_role_attrs['rolconnlimit'])
+
+ if not pwchanging and not role_attr_flags_changing and not expires_changing and not conn_limit_changing:
+ return False
+
+ alter = ['ALTER USER "%(user)s"' % {"user": user}]
+ if pwchanging:
+ if password != '':
+ alter.append("WITH %(crypt)s" % {"crypt": encrypted})
+ alter.append("PASSWORD %(password)s")
+ else:
+ alter.append("WITH PASSWORD NULL")
+ alter.append(role_attr_flags)
+ elif role_attr_flags:
+ alter.append('WITH %s' % role_attr_flags)
+ if expires is not None:
+ alter.append("VALID UNTIL %(expires)s")
+ if conn_limit is not None:
+ alter.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
+
+ query_password_data = dict(password=password, expires=expires)
+ try:
+ cursor.execute(' '.join(alter), query_password_data)
+ changed = True
+ except psycopg2.InternalError as e:
+ if e.pgcode == '25006':
+ # Handle errors due to read-only transactions indicated by pgcode 25006
+ # ERROR: cannot execute ALTER ROLE in a read-only transaction
+ changed = False
+ module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
+ return changed
+ else:
+ raise psycopg2.InternalError(e)
+ except psycopg2.NotSupportedError as e:
+ module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
+
+ elif no_password_changes and role_attr_flags != '':
+ # Grab role information from pg_roles instead of pg_authid
+ select = "SELECT * FROM pg_roles where rolname=%(user)s"
+ cursor.execute(select, {"user": user})
+ # Grab current role attributes.
+ current_role_attrs = cursor.fetchone()
+
+ role_attr_flags_changing = False
+
+ if role_attr_flags:
+ role_attr_flags_dict = {}
+ for r in role_attr_flags.split(' '):
+ if r.startswith('NO'):
+ role_attr_flags_dict[r.replace('NO', '', 1)] = False
+ else:
+ role_attr_flags_dict[r] = True
+
+ for role_attr_name, role_attr_value in role_attr_flags_dict.items():
+ if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value:
+ role_attr_flags_changing = True
+
+ if not role_attr_flags_changing:
+ return False
+
+ alter = ['ALTER USER "%(user)s"' %
+ {"user": user}]
+ if role_attr_flags:
+ alter.append('WITH %s' % role_attr_flags)
+
+ try:
+ cursor.execute(' '.join(alter))
+ except psycopg2.InternalError as e:
+ if e.pgcode == '25006':
+ # Handle errors due to read-only transactions indicated by pgcode 25006
+ # ERROR: cannot execute ALTER ROLE in a read-only transaction
+ changed = False
+ module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
+ return changed
+ else:
+ raise psycopg2.InternalError(e)
+
+ # Grab new role attributes.
+ cursor.execute(select, {"user": user})
+ new_role_attrs = cursor.fetchone()
+
+ # Detect any differences between current_ and new_role_attrs.
+ changed = current_role_attrs != new_role_attrs
+
+ return changed
+
+
+def user_delete(cursor, user):
+ """Try to remove a user. Returns True if successful otherwise False"""
+ cursor.execute("SAVEPOINT ansible_pgsql_user_delete")
+ try:
+ query = 'DROP USER "%s"' % user
+ executed_queries.append(query)
+ cursor.execute(query)
+ except Exception:
+ cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_user_delete")
+ cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
+ return False
+
+ cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
+ return True
+
+
+def has_table_privileges(cursor, user, table, privs):
+ """
+ Return the difference between the privileges that a user already has and
+ the privileges that they desire to have.
+
+ :returns: tuple of:
+ * privileges that they have and were requested
+ * privileges they currently hold but were not requested
+ * privileges requested that they do not hold
+ """
+ cur_privs = get_table_privileges(cursor, user, table)
+ have_currently = cur_privs.intersection(privs)
+ other_current = cur_privs.difference(privs)
+ desired = privs.difference(cur_privs)
+ return (have_currently, other_current, desired)
+
+
+def get_table_privileges(cursor, user, table):
+ if '.' in table:
+ schema, table = table.split('.', 1)
+ else:
+ schema = 'public'
+ query = ("SELECT privilege_type FROM information_schema.role_table_grants "
+ "WHERE grantee=%(user)s AND table_name=%(table)s AND table_schema=%(schema)s")
+ cursor.execute(query, {'user': user, 'table': table, 'schema': schema})
+ return frozenset([x[0] for x in cursor.fetchall()])
+
+
+def grant_table_privileges(cursor, user, table, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ query = 'GRANT %s ON TABLE %s TO "%s"' % (
+ privs, pg_quote_identifier(table, 'table'), user)
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+def revoke_table_privileges(cursor, user, table, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ query = 'REVOKE %s ON TABLE %s FROM "%s"' % (
+ privs, pg_quote_identifier(table, 'table'), user)
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+def get_database_privileges(cursor, user, db):
+ priv_map = {
+ 'C': 'CREATE',
+ 'T': 'TEMPORARY',
+ 'c': 'CONNECT',
+ }
+ query = 'SELECT datacl FROM pg_database WHERE datname = %s'
+ cursor.execute(query, (db,))
+ datacl = cursor.fetchone()[0]
+ if datacl is None:
+ return set()
+ r = re.search(r'%s\\?"?=(C?T?c?)/[^,]+,?' % user, datacl)
+ if r is None:
+ return set()
+ o = set()
+ for v in r.group(1):
+ o.add(priv_map[v])
+ return normalize_privileges(o, 'database')
+
+
+def has_database_privileges(cursor, user, db, privs):
+ """
+ Return the difference between the privileges that a user already has and
+ the privileges that they desire to have.
+
+ :returns: tuple of:
+ * privileges that they have and were requested
+ * privileges they currently hold but were not requested
+ * privileges requested that they do not hold
+ """
+ cur_privs = get_database_privileges(cursor, user, db)
+ have_currently = cur_privs.intersection(privs)
+ other_current = cur_privs.difference(privs)
+ desired = privs.difference(cur_privs)
+ return (have_currently, other_current, desired)
+
+
+def grant_database_privileges(cursor, user, db, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ if user == "PUBLIC":
+ query = 'GRANT %s ON DATABASE %s TO PUBLIC' % (
+ privs, pg_quote_identifier(db, 'database'))
+ else:
+ query = 'GRANT %s ON DATABASE %s TO "%s"' % (
+ privs, pg_quote_identifier(db, 'database'), user)
+
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+def revoke_database_privileges(cursor, user, db, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ if user == "PUBLIC":
+ query = 'REVOKE %s ON DATABASE %s FROM PUBLIC' % (
+ privs, pg_quote_identifier(db, 'database'))
+ else:
+ query = 'REVOKE %s ON DATABASE %s FROM "%s"' % (
+ privs, pg_quote_identifier(db, 'database'), user)
+
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+def revoke_privileges(cursor, user, privs):
+ if privs is None:
+ return False
+
+ revoke_funcs = dict(table=revoke_table_privileges,
+ database=revoke_database_privileges)
+ check_funcs = dict(table=has_table_privileges,
+ database=has_database_privileges)
+
+ changed = False
+ for type_ in privs:
+ for name, privileges in iteritems(privs[type_]):
+ # Check that any of the privileges requested to be removed are
+ # currently granted to the user
+ differences = check_funcs[type_](cursor, user, name, privileges)
+ if differences[0]:
+ revoke_funcs[type_](cursor, user, name, privileges)
+ changed = True
+ return changed
+
+
+def grant_privileges(cursor, user, privs):
+ if privs is None:
+ return False
+
+ grant_funcs = dict(table=grant_table_privileges,
+ database=grant_database_privileges)
+ check_funcs = dict(table=has_table_privileges,
+ database=has_database_privileges)
+
+ changed = False
+ for type_ in privs:
+ for name, privileges in iteritems(privs[type_]):
+ # Check that any of the privileges requested for the user are
+ # currently missing
+ differences = check_funcs[type_](cursor, user, name, privileges)
+ if differences[2]:
+ grant_funcs[type_](cursor, user, name, privileges)
+ changed = True
+ return changed
+
+
+def parse_role_attrs(cursor, role_attr_flags):
+ """
+ Parse role attributes string for user creation.
+ Format:
+
+ attributes[,attributes,...]
+
+ Where:
+
+ attributes := CREATEDB,CREATEROLE,NOSUPERUSER,...
+ [ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEDB",
+ "[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION",
+ "[NO]BYPASSRLS" ]
+
+ Note: "[NO]BYPASSRLS" role attribute introduced in 9.5
+ Note: "[NO]CREATEUSER" role attribute is deprecated.
+
+ """
+ flags = frozenset(role.upper() for role in role_attr_flags.split(',') if role)
+
+ valid_flags = frozenset(itertools.chain(FLAGS, get_valid_flags_by_version(cursor)))
+ valid_flags = frozenset(itertools.chain(valid_flags, ('NO%s' % flag for flag in valid_flags)))
+
+ if not flags.issubset(valid_flags):
+ raise InvalidFlagsError('Invalid role_attr_flags specified: %s' %
+ ' '.join(flags.difference(valid_flags)))
+
+ return ' '.join(flags)
+
+
+def normalize_privileges(privs, type_):
+ new_privs = set(privs)
+ if 'ALL' in new_privs:
+ new_privs.update(VALID_PRIVS[type_])
+ new_privs.remove('ALL')
+ if 'TEMP' in new_privs:
+ new_privs.add('TEMPORARY')
+ new_privs.remove('TEMP')
+
+ return new_privs
+
+
+def parse_privs(privs, db):
+ """
+ Parse privilege string to determine permissions for database db.
+ Format:
+
+ privileges[/privileges/...]
+
+ Where:
+
+ privileges := DATABASE_PRIVILEGES[,DATABASE_PRIVILEGES,...] |
+ TABLE_NAME:TABLE_PRIVILEGES[,TABLE_PRIVILEGES,...]
+ """
+ if privs is None:
+ return privs
+
+ o_privs = {
+ 'database': {},
+ 'table': {}
+ }
+ for token in privs.split('/'):
+ if ':' not in token:
+ type_ = 'database'
+ name = db
+ priv_set = frozenset(x.strip().upper()
+ for x in token.split(',') if x.strip())
+ else:
+ type_ = 'table'
+ name, privileges = token.split(':', 1)
+ priv_set = frozenset(x.strip().upper()
+ for x in privileges.split(',') if x.strip())
+
+ if not priv_set.issubset(VALID_PRIVS[type_]):
+ raise InvalidPrivsError('Invalid privs specified for %s: %s' %
+ (type_, ' '.join(priv_set.difference(VALID_PRIVS[type_]))))
+
+ priv_set = normalize_privileges(priv_set, type_)
+ o_privs[type_][name] = priv_set
+
+ return o_privs
+
+
+def get_valid_flags_by_version(cursor):
+ """
+ Some role attributes were introduced after certain versions. We want to
+ compile a list of valid flags against the current Postgres version.
+ """
+ current_version = cursor.connection.server_version
+
+ return [
+ flag
+ for flag, version_introduced in FLAGS_BY_VERSION.items()
+ if current_version >= version_introduced
+ ]
+
+
+def get_comment(cursor, user):
+ """Get user's comment."""
+ query = ("SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') "
+ "FROM pg_catalog.pg_roles r "
+ "WHERE r.rolname = %(user)s")
+ cursor.execute(query, {'user': user})
+ return cursor.fetchone()[0]
+
+
+def add_comment(cursor, user, comment):
+ """Add comment on user."""
+ if comment != get_comment(cursor, user):
+ query = 'COMMENT ON ROLE "%s" IS ' % user
+ cursor.execute(query + '%(comment)s', {'comment': comment})
+ executed_queries.append(cursor.mogrify(query + '%(comment)s', {'comment': comment}))
+ return True
+ else:
+ return False
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ user=dict(type='str', required=True, aliases=['name']),
+ password=dict(type='str', default=None, no_log=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ priv=dict(type='str', default=None),
+ db=dict(type='str', default='', aliases=['login_db']),
+ fail_on_user=dict(type='bool', default='yes', aliases=['fail_on_role']),
+ role_attr_flags=dict(type='str', default=''),
+ encrypted=dict(type='bool', default='yes'),
+ no_password_changes=dict(type='bool', default='no'),
+ expires=dict(type='str', default=None),
+ conn_limit=dict(type='int', default=None),
+ session_role=dict(type='str'),
+ groups=dict(type='list', elements='str'),
+ comment=dict(type='str', default=None),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ user = module.params["user"]
+ password = module.params["password"]
+ state = module.params["state"]
+ fail_on_user = module.params["fail_on_user"]
+ if module.params['db'] == '' and module.params["priv"] is not None:
+ module.fail_json(msg="privileges require a database to be specified")
+ privs = parse_privs(module.params["priv"], module.params["db"])
+ no_password_changes = module.params["no_password_changes"]
+ if module.params["encrypted"]:
+ encrypted = "ENCRYPTED"
+ else:
+ encrypted = "UNENCRYPTED"
+ expires = module.params["expires"]
+ conn_limit = module.params["conn_limit"]
+ role_attr_flags = module.params["role_attr_flags"]
+ groups = module.params["groups"]
+ if groups:
+ groups = [e.strip() for e in groups]
+ comment = module.params["comment"]
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection = connect_to_db(module, conn_params)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ try:
+ role_attr_flags = parse_role_attrs(cursor, role_attr_flags)
+ except InvalidFlagsError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ kw = dict(user=user)
+ changed = False
+ user_removed = False
+
+ if state == "present":
+ if user_exists(cursor, user):
+ try:
+ changed = user_alter(db_connection, module, user, password,
+ role_attr_flags, encrypted, expires, no_password_changes, conn_limit)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ else:
+ try:
+ changed = user_add(cursor, user, password,
+ role_attr_flags, encrypted, expires, conn_limit)
+ except psycopg2.ProgrammingError as e:
+ module.fail_json(msg="Unable to add user with given requirement "
+ "due to : %s" % to_native(e),
+ exception=traceback.format_exc())
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ try:
+ changed = grant_privileges(cursor, user, privs) or changed
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ if groups:
+ target_roles = []
+ target_roles.append(user)
+ pg_membership = PgMembership(module, cursor, groups, target_roles)
+ changed = pg_membership.grant() or changed
+ executed_queries.extend(pg_membership.executed_queries)
+
+ if comment is not None:
+ try:
+ changed = add_comment(cursor, user, comment) or changed
+ except Exception as e:
+ module.fail_json(msg='Unable to add comment on role: %s' % to_native(e),
+ exception=traceback.format_exc())
+
+ else:
+ if user_exists(cursor, user):
+ if module.check_mode:
+ changed = True
+ kw['user_removed'] = True
+ else:
+ try:
+ changed = revoke_privileges(cursor, user, privs)
+ user_removed = user_delete(cursor, user)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ changed = changed or user_removed
+ if fail_on_user and not user_removed:
+ msg = "Unable to remove user"
+ module.fail_json(msg=msg)
+ kw['user_removed'] = user_removed
+
+ if changed:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ kw['changed'] = changed
+ kw['queries'] = executed_queries
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/rabbitmq_plugin.py b/test/support/integration/plugins/modules/rabbitmq_plugin.py
new file mode 100644
index 00000000..301bbfe2
--- /dev/null
+++ b/test/support/integration/plugins/modules/rabbitmq_plugin.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Chatham Financial <oss@chathamfinancial.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+
+DOCUMENTATION = '''
+---
+module: rabbitmq_plugin
+short_description: Manage RabbitMQ plugins
+description:
+ - This module can be used to enable or disable RabbitMQ plugins.
+version_added: "1.1"
+author:
+ - Chris Hoffman (@chrishoffman)
+options:
+ names:
+ description:
+ - Comma-separated list of plugin names. Also, accepts plugin name.
+ required: true
+ aliases: [name]
+ new_only:
+ description:
+ - Only enable missing plugins.
+ - Does not disable plugins that are not in the names list.
+ type: bool
+ default: "no"
+ state:
+ description:
+ - Specify if plugins are to be enabled or disabled.
+ default: enabled
+ choices: [enabled, disabled]
+ prefix:
+ description:
+ - Specify a custom install prefix to a Rabbit.
+ version_added: "1.3"
+'''
+
+EXAMPLES = '''
+- name: Enables the rabbitmq_management plugin
+ rabbitmq_plugin:
+ names: rabbitmq_management
+ state: enabled
+
+- name: Enable multiple rabbitmq plugins
+ rabbitmq_plugin:
+ names: rabbitmq_management,rabbitmq_management_visualiser
+ state: enabled
+
+- name: Disable plugin
+ rabbitmq_plugin:
+ names: rabbitmq_management
+ state: disabled
+
+- name: Enable every plugin in list with existing plugins
+ rabbitmq_plugin:
+ names: rabbitmq_management,rabbitmq_management_visualiser,rabbitmq_shovel,rabbitmq_shovel_management
+ state: enabled
+ new_only: 'yes'
+'''
+
+RETURN = '''
+enabled:
+ description: list of plugins enabled during task run
+ returned: always
+ type: list
+ sample: ["rabbitmq_management"]
+disabled:
+ description: list of plugins disabled during task run
+ returned: always
+ type: list
+ sample: ["rabbitmq_management"]
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+
+
+class RabbitMqPlugins(object):
+
+ def __init__(self, module):
+ self.module = module
+ bin_path = ''
+ if module.params['prefix']:
+ if os.path.isdir(os.path.join(module.params['prefix'], 'bin')):
+ bin_path = os.path.join(module.params['prefix'], 'bin')
+ elif os.path.isdir(os.path.join(module.params['prefix'], 'sbin')):
+ bin_path = os.path.join(module.params['prefix'], 'sbin')
+ else:
+ # No such path exists.
+ module.fail_json(msg="No binary folder in prefix %s" % module.params['prefix'])
+
+ self._rabbitmq_plugins = os.path.join(bin_path, "rabbitmq-plugins")
+ else:
+ self._rabbitmq_plugins = module.get_bin_path('rabbitmq-plugins', True)
+
+ def _exec(self, args, run_in_check_mode=False):
+ if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
+ cmd = [self._rabbitmq_plugins]
+ rc, out, err = self.module.run_command(cmd + args, check_rc=True)
+ return out.splitlines()
+ return list()
+
+ def get_all(self):
+ list_output = self._exec(['list', '-E', '-m'], True)
+ plugins = []
+ for plugin in list_output:
+ if not plugin:
+ break
+ plugins.append(plugin)
+
+ return plugins
+
+ def enable(self, name):
+ self._exec(['enable', name])
+
+ def disable(self, name):
+ self._exec(['disable', name])
+
+
+def main():
+ arg_spec = dict(
+ names=dict(required=True, aliases=['name']),
+ new_only=dict(default='no', type='bool'),
+ state=dict(default='enabled', choices=['enabled', 'disabled']),
+ prefix=dict(required=False, default=None)
+ )
+ module = AnsibleModule(
+ argument_spec=arg_spec,
+ supports_check_mode=True
+ )
+
+ result = dict()
+ names = module.params['names'].split(',')
+ new_only = module.params['new_only']
+ state = module.params['state']
+
+ rabbitmq_plugins = RabbitMqPlugins(module)
+ enabled_plugins = rabbitmq_plugins.get_all()
+
+ enabled = []
+ disabled = []
+ if state == 'enabled':
+ if not new_only:
+ for plugin in enabled_plugins:
+ if " " in plugin:
+ continue
+ if plugin not in names:
+ rabbitmq_plugins.disable(plugin)
+ disabled.append(plugin)
+
+ for name in names:
+ if name not in enabled_plugins:
+ rabbitmq_plugins.enable(name)
+ enabled.append(name)
+ else:
+ for plugin in enabled_plugins:
+ if plugin in names:
+ rabbitmq_plugins.disable(plugin)
+ disabled.append(plugin)
+
+ result['changed'] = len(enabled) > 0 or len(disabled) > 0
+ result['enabled'] = enabled
+ result['disabled'] = disabled
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/rabbitmq_queue.py b/test/support/integration/plugins/modules/rabbitmq_queue.py
new file mode 100644
index 00000000..567ec813
--- /dev/null
+++ b/test/support/integration/plugins/modules/rabbitmq_queue.py
@@ -0,0 +1,257 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Manuel Sousa <manuel.sousa@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: rabbitmq_queue
+author: Manuel Sousa (@manuel-sousa)
+version_added: "2.0"
+
+short_description: Manage rabbitMQ queues
+description:
+ - This module uses rabbitMQ Rest API to create/delete queues
+requirements: [ "requests >= 1.0.0" ]
+options:
+ name:
+ description:
+ - Name of the queue
+ required: true
+ state:
+ description:
+ - Whether the queue should be present or absent
+ choices: [ "present", "absent" ]
+ default: present
+ durable:
+ description:
+ - whether queue is durable or not
+ type: bool
+ default: 'yes'
+ auto_delete:
+ description:
+ - if the queue should delete itself after all queues/queues unbound from it
+ type: bool
+ default: 'no'
+ message_ttl:
+ description:
+ - How long a message can live in queue before it is discarded (milliseconds)
+ default: forever
+ auto_expires:
+ description:
+ - How long a queue can be unused before it is automatically deleted (milliseconds)
+ default: forever
+ max_length:
+ description:
+ - How many messages can the queue contain before it starts rejecting
+ default: no limit
+ dead_letter_exchange:
+ description:
+ - Optional name of an exchange to which messages will be republished if they
+ - are rejected or expire
+ dead_letter_routing_key:
+ description:
+ - Optional replacement routing key to use when a message is dead-lettered.
+ - Original routing key will be used if unset
+ max_priority:
+ description:
+ - Maximum number of priority levels for the queue to support.
+ - If not set, the queue will not support message priorities.
+ - Larger numbers indicate higher priority.
+ version_added: "2.4"
+ arguments:
+ description:
+ - extra arguments for queue. If defined this argument is a key/value dictionary
+ default: {}
+extends_documentation_fragment:
+ - rabbitmq
+'''
+
+EXAMPLES = '''
+# Create a queue
+- rabbitmq_queue:
+ name: myQueue
+
+# Create a queue on remote host
+- rabbitmq_queue:
+ name: myRemoteQueue
+ login_user: user
+ login_password: secret
+ login_host: remote.example.org
+'''
+
+import json
+import traceback
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ HAS_REQUESTS = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six.moves.urllib import parse as urllib_parse
+from ansible.module_utils.rabbitmq import rabbitmq_argument_spec
+
+
+def main():
+
+ argument_spec = rabbitmq_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent'], type='str'),
+ name=dict(required=True, type='str'),
+ durable=dict(default=True, type='bool'),
+ auto_delete=dict(default=False, type='bool'),
+ message_ttl=dict(default=None, type='int'),
+ auto_expires=dict(default=None, type='int'),
+ max_length=dict(default=None, type='int'),
+ dead_letter_exchange=dict(default=None, type='str'),
+ dead_letter_routing_key=dict(default=None, type='str'),
+ arguments=dict(default=dict(), type='dict'),
+ max_priority=dict(default=None, type='int')
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ url = "%s://%s:%s/api/queues/%s/%s" % (
+ module.params['login_protocol'],
+ module.params['login_host'],
+ module.params['login_port'],
+ urllib_parse.quote(module.params['vhost'], ''),
+ module.params['name']
+ )
+
+ if not HAS_REQUESTS:
+ module.fail_json(msg=missing_required_lib("requests"), exception=REQUESTS_IMP_ERR)
+
+ result = dict(changed=False, name=module.params['name'])
+
+ # Check if queue already exists
+ r = requests.get(url, auth=(module.params['login_user'], module.params['login_password']),
+ verify=module.params['ca_cert'], cert=(module.params['client_cert'], module.params['client_key']))
+
+ if r.status_code == 200:
+ queue_exists = True
+ response = r.json()
+ elif r.status_code == 404:
+ queue_exists = False
+ response = r.text
+ else:
+ module.fail_json(
+ msg="Invalid response from RESTAPI when trying to check if queue exists",
+ details=r.text
+ )
+
+ if module.params['state'] == 'present':
+ change_required = not queue_exists
+ else:
+ change_required = queue_exists
+
+ # Check if attributes change on existing queue
+ if not change_required and r.status_code == 200 and module.params['state'] == 'present':
+ if not (
+ response['durable'] == module.params['durable'] and
+ response['auto_delete'] == module.params['auto_delete'] and
+ (
+ ('x-message-ttl' in response['arguments'] and response['arguments']['x-message-ttl'] == module.params['message_ttl']) or
+ ('x-message-ttl' not in response['arguments'] and module.params['message_ttl'] is None)
+ ) and
+ (
+ ('x-expires' in response['arguments'] and response['arguments']['x-expires'] == module.params['auto_expires']) or
+ ('x-expires' not in response['arguments'] and module.params['auto_expires'] is None)
+ ) and
+ (
+ ('x-max-length' in response['arguments'] and response['arguments']['x-max-length'] == module.params['max_length']) or
+ ('x-max-length' not in response['arguments'] and module.params['max_length'] is None)
+ ) and
+ (
+ ('x-dead-letter-exchange' in response['arguments'] and
+ response['arguments']['x-dead-letter-exchange'] == module.params['dead_letter_exchange']) or
+ ('x-dead-letter-exchange' not in response['arguments'] and module.params['dead_letter_exchange'] is None)
+ ) and
+ (
+ ('x-dead-letter-routing-key' in response['arguments'] and
+ response['arguments']['x-dead-letter-routing-key'] == module.params['dead_letter_routing_key']) or
+ ('x-dead-letter-routing-key' not in response['arguments'] and module.params['dead_letter_routing_key'] is None)
+ ) and
+ (
+ ('x-max-priority' in response['arguments'] and
+ response['arguments']['x-max-priority'] == module.params['max_priority']) or
+ ('x-max-priority' not in response['arguments'] and module.params['max_priority'] is None)
+ )
+ ):
+ module.fail_json(
+ msg="RabbitMQ RESTAPI doesn't support attribute changes for existing queues",
+ )
+
+ # Copy parameters to arguments as used by RabbitMQ
+ for k, v in {
+ 'message_ttl': 'x-message-ttl',
+ 'auto_expires': 'x-expires',
+ 'max_length': 'x-max-length',
+ 'dead_letter_exchange': 'x-dead-letter-exchange',
+ 'dead_letter_routing_key': 'x-dead-letter-routing-key',
+ 'max_priority': 'x-max-priority'
+ }.items():
+ if module.params[k] is not None:
+ module.params['arguments'][v] = module.params[k]
+
+ # Exit if check_mode
+ if module.check_mode:
+ result['changed'] = change_required
+ result['details'] = response
+ result['arguments'] = module.params['arguments']
+ module.exit_json(**result)
+
+ # Do changes
+ if change_required:
+ if module.params['state'] == 'present':
+ r = requests.put(
+ url,
+ auth=(module.params['login_user'], module.params['login_password']),
+ headers={"content-type": "application/json"},
+ data=json.dumps({
+ "durable": module.params['durable'],
+ "auto_delete": module.params['auto_delete'],
+ "arguments": module.params['arguments']
+ }),
+ verify=module.params['ca_cert'],
+ cert=(module.params['client_cert'], module.params['client_key'])
+ )
+ elif module.params['state'] == 'absent':
+ r = requests.delete(url, auth=(module.params['login_user'], module.params['login_password']),
+ verify=module.params['ca_cert'], cert=(module.params['client_cert'], module.params['client_key']))
+
+ # RabbitMQ 3.6.7 changed this response code from 204 to 201
+ if r.status_code == 204 or r.status_code == 201:
+ result['changed'] = True
+ module.exit_json(**result)
+ else:
+ module.fail_json(
+ msg="Error creating queue",
+ status=r.status_code,
+ details=r.text
+ )
+
+ else:
+ module.exit_json(
+ changed=False,
+ name=module.params['name']
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/s3_bucket.py b/test/support/integration/plugins/modules/s3_bucket.py
new file mode 100644
index 00000000..f35cf53b
--- /dev/null
+++ b/test/support/integration/plugins/modules/s3_bucket.py
@@ -0,0 +1,740 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+
+DOCUMENTATION = '''
+---
+module: s3_bucket
+short_description: Manage S3 buckets in AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID
+description:
+ - Manage S3 buckets in AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID
+version_added: "2.0"
+requirements: [ boto3 ]
+author: "Rob White (@wimnat)"
+options:
+ force:
+ description:
+ - When trying to delete a bucket, delete all keys (including versions and delete markers)
+ in the bucket first (an s3 bucket must be empty for a successful deletion)
+ type: bool
+ default: 'no'
+ name:
+ description:
+ - Name of the s3 bucket
+ required: true
+ type: str
+ policy:
+ description:
+ - The JSON policy as a string.
+ type: json
+ s3_url:
+ description:
+ - S3 URL endpoint for usage with DigitalOcean, Ceph, Eucalyptus and fakes3 etc.
+ - Assumes AWS if not specified.
+ - For Walrus, use FQDN of the endpoint without scheme nor path.
+ aliases: [ S3_URL ]
+ type: str
+ ceph:
+ description:
+ - Enable API compatibility with Ceph. It takes into account the S3 API subset working
+ with Ceph in order to provide the same module behaviour where possible.
+ type: bool
+ version_added: "2.2"
+ requester_pays:
+ description:
+ - With Requester Pays buckets, the requester instead of the bucket owner pays the cost
+ of the request and the data download from the bucket.
+ type: bool
+ default: False
+ state:
+ description:
+ - Create or remove the s3 bucket
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ tags:
+ description:
+ - tags dict to apply to bucket
+ type: dict
+ purge_tags:
+ description:
+ - whether to remove tags that aren't present in the C(tags) parameter
+ type: bool
+ default: True
+ version_added: "2.9"
+ versioning:
+ description:
+ - Whether versioning is enabled or disabled (note that once versioning is enabled, it can only be suspended)
+ type: bool
+ encryption:
+ description:
+ - Describes the default server-side encryption to apply to new objects in the bucket.
+ In order to remove the server-side encryption, the encryption needs to be set to 'none' explicitly.
+ choices: [ 'none', 'AES256', 'aws:kms' ]
+ version_added: "2.9"
+ type: str
+ encryption_key_id:
+ description: KMS master key ID to use for the default encryption. This parameter is allowed if encryption is aws:kms. If
+ not specified then it will default to the AWS provided KMS key.
+ version_added: "2.9"
+ type: str
+extends_documentation_fragment:
+ - aws
+ - ec2
+notes:
+ - If C(requestPayment), C(policy), C(tagging) or C(versioning)
+ operations/API aren't implemented by the endpoint, module doesn't fail
+ if each parameter satisfies the following condition.
+ I(requester_pays) is C(False), I(policy), I(tags), and I(versioning) are C(None).
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Create a simple s3 bucket
+- s3_bucket:
+ name: mys3bucket
+ state: present
+
+# Create a simple s3 bucket on Ceph Rados Gateway
+- s3_bucket:
+ name: mys3bucket
+ s3_url: http://your-ceph-rados-gateway-server.xxx
+ ceph: true
+
+# Remove an s3 bucket and any keys it contains
+- s3_bucket:
+ name: mys3bucket
+ state: absent
+ force: yes
+
+# Create a bucket, add a policy from a file, enable requester pays, enable versioning and tag
+- s3_bucket:
+ name: mys3bucket
+ policy: "{{ lookup('file','policy.json') }}"
+ requester_pays: yes
+ versioning: yes
+ tags:
+ example: tag1
+ another: tag2
+
+# Create a simple DigitalOcean Spaces bucket using their provided regional endpoint
+- s3_bucket:
+ name: mydobucket
+ s3_url: 'https://nyc3.digitaloceanspaces.com'
+
+# Create a bucket with AES256 encryption
+- s3_bucket:
+ name: mys3bucket
+ state: present
+ encryption: "AES256"
+
+# Create a bucket with aws:kms encryption, KMS key
+- s3_bucket:
+ name: mys3bucket
+ state: present
+ encryption: "aws:kms"
+ encryption_key_id: "arn:aws:kms:us-east-1:1234/5678example"
+
+# Create a bucket with aws:kms encryption, default key
+- s3_bucket:
+ name: mys3bucket
+ state: present
+ encryption: "aws:kms"
+'''
+
+import json
+import os
+import time
+
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.module_utils.six import string_types
+from ansible.module_utils.basic import to_text
+from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
+from ansible.module_utils.ec2 import compare_policies, ec2_argument_spec, boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list
+from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn, AWSRetry
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError, EndpointConnectionError, WaiterError
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+
+def create_or_update_bucket(s3_client, module, location):
+
+ policy = module.params.get("policy")
+ name = module.params.get("name")
+ requester_pays = module.params.get("requester_pays")
+ tags = module.params.get("tags")
+ purge_tags = module.params.get("purge_tags")
+ versioning = module.params.get("versioning")
+ encryption = module.params.get("encryption")
+ encryption_key_id = module.params.get("encryption_key_id")
+ changed = False
+ result = {}
+
+ try:
+ bucket_is_present = bucket_exists(s3_client, name)
+ except EndpointConnectionError as e:
+ module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to check bucket presence")
+
+ if not bucket_is_present:
+ try:
+ bucket_changed = create_bucket(s3_client, name, location)
+ s3_client.get_waiter('bucket_exists').wait(Bucket=name)
+ changed = changed or bucket_changed
+ except WaiterError as e:
+ module.fail_json_aws(e, msg='An error occurred waiting for the bucket to become available')
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed while creating bucket")
+
+ # Versioning
+ try:
+ versioning_status = get_bucket_versioning(s3_client, name)
+ except BotoCoreError as exp:
+ module.fail_json_aws(exp, msg="Failed to get bucket versioning")
+ except ClientError as exp:
+ if exp.response['Error']['Code'] != 'NotImplemented' or versioning is not None:
+ module.fail_json_aws(exp, msg="Failed to get bucket versioning")
+ else:
+ if versioning is not None:
+ required_versioning = None
+ if versioning and versioning_status.get('Status') != "Enabled":
+ required_versioning = 'Enabled'
+ elif not versioning and versioning_status.get('Status') == "Enabled":
+ required_versioning = 'Suspended'
+
+ if required_versioning:
+ try:
+ put_bucket_versioning(s3_client, name, required_versioning)
+ changed = True
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to update bucket versioning")
+
+ versioning_status = wait_versioning_is_applied(module, s3_client, name, required_versioning)
+
+ # This output format is there to ensure compatibility with previous versions of the module
+ result['versioning'] = {
+ 'Versioning': versioning_status.get('Status', 'Disabled'),
+ 'MfaDelete': versioning_status.get('MFADelete', 'Disabled'),
+ }
+
+ # Requester pays
+ try:
+ requester_pays_status = get_bucket_request_payment(s3_client, name)
+ except BotoCoreError as exp:
+ module.fail_json_aws(exp, msg="Failed to get bucket request payment")
+ except ClientError as exp:
+ if exp.response['Error']['Code'] not in ('NotImplemented', 'XNotImplemented') or requester_pays:
+ module.fail_json_aws(exp, msg="Failed to get bucket request payment")
+ else:
+ if requester_pays:
+ payer = 'Requester' if requester_pays else 'BucketOwner'
+ if requester_pays_status != payer:
+ put_bucket_request_payment(s3_client, name, payer)
+ requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=False)
+ if requester_pays_status is None:
+ # We have seen that it happens quite a lot of times that the put request was not taken into
+ # account, so we retry one more time
+ put_bucket_request_payment(s3_client, name, payer)
+ requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=True)
+ changed = True
+
+ result['requester_pays'] = requester_pays
+
+ # Policy
+ try:
+ current_policy = get_bucket_policy(s3_client, name)
+ except BotoCoreError as exp:
+ module.fail_json_aws(exp, msg="Failed to get bucket policy")
+ except ClientError as exp:
+ if exp.response['Error']['Code'] != 'NotImplemented' or policy is not None:
+ module.fail_json_aws(exp, msg="Failed to get bucket policy")
+ else:
+ if policy is not None:
+ if isinstance(policy, string_types):
+ policy = json.loads(policy)
+
+ if not policy and current_policy:
+ try:
+ delete_bucket_policy(s3_client, name)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to delete bucket policy")
+ current_policy = wait_policy_is_applied(module, s3_client, name, policy)
+ changed = True
+ elif compare_policies(current_policy, policy):
+ try:
+ put_bucket_policy(s3_client, name, policy)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to update bucket policy")
+ current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=False)
+ if current_policy is None:
+ # As for request payement, it happens quite a lot of times that the put request was not taken into
+ # account, so we retry one more time
+ put_bucket_policy(s3_client, name, policy)
+ current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=True)
+ changed = True
+
+ result['policy'] = current_policy
+
+ # Tags
+ try:
+ current_tags_dict = get_current_bucket_tags_dict(s3_client, name)
+ except BotoCoreError as exp:
+ module.fail_json_aws(exp, msg="Failed to get bucket tags")
+ except ClientError as exp:
+ if exp.response['Error']['Code'] not in ('NotImplemented', 'XNotImplemented') or tags is not None:
+ module.fail_json_aws(exp, msg="Failed to get bucket tags")
+ else:
+ if tags is not None:
+ # Tags are always returned as text
+ tags = dict((to_text(k), to_text(v)) for k, v in tags.items())
+ if not purge_tags:
+ # Ensure existing tags that aren't updated by desired tags remain
+ current_copy = current_tags_dict.copy()
+ current_copy.update(tags)
+ tags = current_copy
+ if current_tags_dict != tags:
+ if tags:
+ try:
+ put_bucket_tagging(s3_client, name, tags)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to update bucket tags")
+ else:
+ if purge_tags:
+ try:
+ delete_bucket_tagging(s3_client, name)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to delete bucket tags")
+ current_tags_dict = wait_tags_are_applied(module, s3_client, name, tags)
+ changed = True
+
+ result['tags'] = current_tags_dict
+
+ # Encryption
+ if hasattr(s3_client, "get_bucket_encryption"):
+ try:
+ current_encryption = get_bucket_encryption(s3_client, name)
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to get bucket encryption")
+ elif encryption is not None:
+ module.fail_json(msg="Using bucket encryption requires botocore version >= 1.7.41")
+
+ if encryption is not None:
+ current_encryption_algorithm = current_encryption.get('SSEAlgorithm') if current_encryption else None
+ current_encryption_key = current_encryption.get('KMSMasterKeyID') if current_encryption else None
+ if encryption == 'none' and current_encryption_algorithm is not None:
+ try:
+ delete_bucket_encryption(s3_client, name)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to delete bucket encryption")
+ current_encryption = wait_encryption_is_applied(module, s3_client, name, None)
+ changed = True
+ elif encryption != 'none' and (encryption != current_encryption_algorithm) or (encryption == 'aws:kms' and current_encryption_key != encryption_key_id):
+ expected_encryption = {'SSEAlgorithm': encryption}
+ if encryption == 'aws:kms' and encryption_key_id is not None:
+ expected_encryption.update({'KMSMasterKeyID': encryption_key_id})
+ try:
+ put_bucket_encryption(s3_client, name, expected_encryption)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to set bucket encryption")
+ current_encryption = wait_encryption_is_applied(module, s3_client, name, expected_encryption)
+ changed = True
+
+ result['encryption'] = current_encryption
+
+ module.exit_json(changed=changed, name=name, **result)
+
+
+def bucket_exists(s3_client, bucket_name):
+ # head_bucket appeared to be really inconsistent, so we use list_buckets instead,
+ # and loop over all the buckets, even if we know it's less performant :(
+ all_buckets = s3_client.list_buckets(Bucket=bucket_name)['Buckets']
+ return any(bucket['Name'] == bucket_name for bucket in all_buckets)
+
+
+@AWSRetry.exponential_backoff(max_delay=120)
+def create_bucket(s3_client, bucket_name, location):
+ try:
+ configuration = {}
+ if location not in ('us-east-1', None):
+ configuration['LocationConstraint'] = location
+ if len(configuration) > 0:
+ s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration=configuration)
+ else:
+ s3_client.create_bucket(Bucket=bucket_name)
+ return True
+ except ClientError as e:
+ error_code = e.response['Error']['Code']
+ if error_code == 'BucketAlreadyOwnedByYou':
+ # We should never get there since we check the bucket presence before calling the create_or_update_bucket
+ # method. However, the AWS Api sometimes fails to report bucket presence, so we catch this exception
+ return False
+ else:
+ raise e
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def put_bucket_tagging(s3_client, bucket_name, tags):
+ s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging={'TagSet': ansible_dict_to_boto3_tag_list(tags)})
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def put_bucket_policy(s3_client, bucket_name, policy):
+ s3_client.put_bucket_policy(Bucket=bucket_name, Policy=json.dumps(policy))
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def delete_bucket_policy(s3_client, bucket_name):
+ s3_client.delete_bucket_policy(Bucket=bucket_name)
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def get_bucket_policy(s3_client, bucket_name):
+ try:
+ current_policy = json.loads(s3_client.get_bucket_policy(Bucket=bucket_name).get('Policy'))
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'NoSuchBucketPolicy':
+ current_policy = None
+ else:
+ raise e
+ return current_policy
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def put_bucket_request_payment(s3_client, bucket_name, payer):
+ s3_client.put_bucket_request_payment(Bucket=bucket_name, RequestPaymentConfiguration={'Payer': payer})
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def get_bucket_request_payment(s3_client, bucket_name):
+ return s3_client.get_bucket_request_payment(Bucket=bucket_name).get('Payer')
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def get_bucket_versioning(s3_client, bucket_name):
+ return s3_client.get_bucket_versioning(Bucket=bucket_name)
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def put_bucket_versioning(s3_client, bucket_name, required_versioning):
+ s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': required_versioning})
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def get_bucket_encryption(s3_client, bucket_name):
+ try:
+ result = s3_client.get_bucket_encryption(Bucket=bucket_name)
+ return result.get('ServerSideEncryptionConfiguration', {}).get('Rules', [])[0].get('ApplyServerSideEncryptionByDefault')
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'ServerSideEncryptionConfigurationNotFoundError':
+ return None
+ else:
+ raise e
+ except (IndexError, KeyError):
+ return None
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def put_bucket_encryption(s3_client, bucket_name, encryption):
+ server_side_encryption_configuration = {'Rules': [{'ApplyServerSideEncryptionByDefault': encryption}]}
+ s3_client.put_bucket_encryption(Bucket=bucket_name, ServerSideEncryptionConfiguration=server_side_encryption_configuration)
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def delete_bucket_tagging(s3_client, bucket_name):
+ s3_client.delete_bucket_tagging(Bucket=bucket_name)
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def delete_bucket_encryption(s3_client, bucket_name):
+ s3_client.delete_bucket_encryption(Bucket=bucket_name)
+
+
+@AWSRetry.exponential_backoff(max_delay=120)
+def delete_bucket(s3_client, bucket_name):
+ try:
+ s3_client.delete_bucket(Bucket=bucket_name)
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'NoSuchBucket':
+ # This means bucket should have been in a deleting state when we checked it existence
+ # We just ignore the error
+ pass
+ else:
+ raise e
+
+
+def wait_policy_is_applied(module, s3_client, bucket_name, expected_policy, should_fail=True):
+ for dummy in range(0, 12):
+ try:
+ current_policy = get_bucket_policy(s3_client, bucket_name)
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to get bucket policy")
+
+ if compare_policies(current_policy, expected_policy):
+ time.sleep(5)
+ else:
+ return current_policy
+ if should_fail:
+ module.fail_json(msg="Bucket policy failed to apply in the expected time")
+ else:
+ return None
+
+
+def wait_payer_is_applied(module, s3_client, bucket_name, expected_payer, should_fail=True):
+ for dummy in range(0, 12):
+ try:
+ requester_pays_status = get_bucket_request_payment(s3_client, bucket_name)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get bucket request payment")
+ if requester_pays_status != expected_payer:
+ time.sleep(5)
+ else:
+ return requester_pays_status
+ if should_fail:
+ module.fail_json(msg="Bucket request payment failed to apply in the expected time")
+ else:
+ return None
+
+
+def wait_encryption_is_applied(module, s3_client, bucket_name, expected_encryption):
+ for dummy in range(0, 12):
+ try:
+ encryption = get_bucket_encryption(s3_client, bucket_name)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get updated encryption for bucket")
+ if encryption != expected_encryption:
+ time.sleep(5)
+ else:
+ return encryption
+ module.fail_json(msg="Bucket encryption failed to apply in the expected time")
+
+
+def wait_versioning_is_applied(module, s3_client, bucket_name, required_versioning):
+ for dummy in range(0, 24):
+ try:
+ versioning_status = get_bucket_versioning(s3_client, bucket_name)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get updated versioning for bucket")
+ if versioning_status.get('Status') != required_versioning:
+ time.sleep(8)
+ else:
+ return versioning_status
+ module.fail_json(msg="Bucket versioning failed to apply in the expected time")
+
+
+def wait_tags_are_applied(module, s3_client, bucket_name, expected_tags_dict):
+ for dummy in range(0, 12):
+ try:
+ current_tags_dict = get_current_bucket_tags_dict(s3_client, bucket_name)
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to get bucket policy")
+ if current_tags_dict != expected_tags_dict:
+ time.sleep(5)
+ else:
+ return current_tags_dict
+ module.fail_json(msg="Bucket tags failed to apply in the expected time")
+
+
+def get_current_bucket_tags_dict(s3_client, bucket_name):
+ try:
+ current_tags = s3_client.get_bucket_tagging(Bucket=bucket_name).get('TagSet')
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'NoSuchTagSet':
+ return {}
+ raise e
+
+ return boto3_tag_list_to_ansible_dict(current_tags)
+
+
+def paginated_list(s3_client, **pagination_params):
+ pg = s3_client.get_paginator('list_objects_v2')
+ for page in pg.paginate(**pagination_params):
+ yield [data['Key'] for data in page.get('Contents', [])]
+
+
+def paginated_versions_list(s3_client, **pagination_params):
+ try:
+ pg = s3_client.get_paginator('list_object_versions')
+ for page in pg.paginate(**pagination_params):
+ # We have to merge the Versions and DeleteMarker lists here, as DeleteMarkers can still prevent a bucket deletion
+ yield [(data['Key'], data['VersionId']) for data in (page.get('Versions', []) + page.get('DeleteMarkers', []))]
+ except is_boto3_error_code('NoSuchBucket'):
+ yield []
+
+
+def destroy_bucket(s3_client, module):
+
+ force = module.params.get("force")
+ name = module.params.get("name")
+ try:
+ bucket_is_present = bucket_exists(s3_client, name)
+ except EndpointConnectionError as e:
+ module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to check bucket presence")
+
+ if not bucket_is_present:
+ module.exit_json(changed=False)
+
+ if force:
+ # if there are contents then we need to delete them (including versions) before we can delete the bucket
+ try:
+ for key_version_pairs in paginated_versions_list(s3_client, Bucket=name):
+ formatted_keys = [{'Key': key, 'VersionId': version} for key, version in key_version_pairs]
+ for fk in formatted_keys:
+ # remove VersionId from cases where they are `None` so that
+ # unversioned objects are deleted using `DeleteObject`
+ # rather than `DeleteObjectVersion`, improving backwards
+ # compatibility with older IAM policies.
+ if not fk.get('VersionId'):
+ fk.pop('VersionId')
+
+ if formatted_keys:
+ resp = s3_client.delete_objects(Bucket=name, Delete={'Objects': formatted_keys})
+ if resp.get('Errors'):
+ module.fail_json(
+ msg='Could not empty bucket before deleting. Could not delete objects: {0}'.format(
+ ', '.join([k['Key'] for k in resp['Errors']])
+ ),
+ errors=resp['Errors'], response=resp
+ )
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed while deleting bucket")
+
+ try:
+ delete_bucket(s3_client, name)
+ s3_client.get_waiter('bucket_not_exists').wait(Bucket=name, WaiterConfig=dict(Delay=5, MaxAttempts=60))
+ except WaiterError as e:
+ module.fail_json_aws(e, msg='An error occurred waiting for the bucket to be deleted.')
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to delete bucket")
+
+ module.exit_json(changed=True)
+
+
+def is_fakes3(s3_url):
+ """ Return True if s3_url has scheme fakes3:// """
+ if s3_url is not None:
+ return urlparse(s3_url).scheme in ('fakes3', 'fakes3s')
+ else:
+ return False
+
+
+def get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url):
+ if s3_url and ceph: # TODO - test this
+ ceph = urlparse(s3_url)
+ params = dict(module=module, conn_type='client', resource='s3', use_ssl=ceph.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs)
+ elif is_fakes3(s3_url):
+ fakes3 = urlparse(s3_url)
+ port = fakes3.port
+ if fakes3.scheme == 'fakes3s':
+ protocol = "https"
+ if port is None:
+ port = 443
+ else:
+ protocol = "http"
+ if port is None:
+ port = 80
+ params = dict(module=module, conn_type='client', resource='s3', region=location,
+ endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)),
+ use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs)
+ else:
+ params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs)
+ return boto3_conn(**params)
+
+
+def main():
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ force=dict(default=False, type='bool'),
+ policy=dict(type='json'),
+ name=dict(required=True),
+ requester_pays=dict(default=False, type='bool'),
+ s3_url=dict(aliases=['S3_URL']),
+ state=dict(default='present', choices=['present', 'absent']),
+ tags=dict(type='dict'),
+ purge_tags=dict(type='bool', default=True),
+ versioning=dict(type='bool'),
+ ceph=dict(default=False, type='bool'),
+ encryption=dict(choices=['none', 'AES256', 'aws:kms']),
+ encryption_key_id=dict()
+ )
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ )
+
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+
+ if region in ('us-east-1', '', None):
+ # default to US Standard region
+ location = 'us-east-1'
+ else:
+ # Boto uses symbolic names for locations but region strings will
+ # actually work fine for everything except us-east-1 (US Standard)
+ location = region
+
+ s3_url = module.params.get('s3_url')
+ ceph = module.params.get('ceph')
+
+ # allow eucarc environment variables to be used if ansible vars aren't set
+ if not s3_url and 'S3_URL' in os.environ:
+ s3_url = os.environ['S3_URL']
+
+ if ceph and not s3_url:
+ module.fail_json(msg='ceph flavour requires s3_url')
+
+ # Look at s3_url and tweak connection settings
+ # if connecting to Ceph RGW, Walrus or fakes3
+ if s3_url:
+ for key in ['validate_certs', 'security_token', 'profile_name']:
+ aws_connect_kwargs.pop(key, None)
+ s3_client = get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url)
+
+ if s3_client is None: # this should never happen
+ module.fail_json(msg='Unknown error, failed to create s3 connection, no information from boto.')
+
+ state = module.params.get("state")
+ encryption = module.params.get("encryption")
+ encryption_key_id = module.params.get("encryption_key_id")
+
+ # Parameter validation
+ if encryption_key_id is not None and encryption is None:
+ module.fail_json(msg="You must specify encryption parameter along with encryption_key_id.")
+ elif encryption_key_id is not None and encryption != 'aws:kms':
+ module.fail_json(msg="Only 'aws:kms' is a valid option for encryption parameter when you specify encryption_key_id.")
+
+ if state == 'present':
+ create_or_update_bucket(s3_client, module, location)
+ elif state == 'absent':
+ destroy_bucket(s3_client, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/sefcontext.py b/test/support/integration/plugins/modules/sefcontext.py
new file mode 100644
index 00000000..33e3fd2e
--- /dev/null
+++ b/test/support/integration/plugins/modules/sefcontext.py
@@ -0,0 +1,298 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: sefcontext
+short_description: Manages SELinux file context mapping definitions
+description:
+- Manages SELinux file context mapping definitions.
+- Similar to the C(semanage fcontext) command.
+version_added: '2.2'
+options:
+ target:
+ description:
+ - Target path (expression).
+ type: str
+ required: yes
+ aliases: [ path ]
+ ftype:
+ description:
+ - The file type that should have SELinux contexts applied.
+ - "The following file type options are available:"
+ - C(a) for all files,
+ - C(b) for block devices,
+ - C(c) for character devices,
+ - C(d) for directories,
+ - C(f) for regular files,
+ - C(l) for symbolic links,
+ - C(p) for named pipes,
+ - C(s) for socket files.
+ type: str
+ choices: [ a, b, c, d, f, l, p, s ]
+ default: a
+ setype:
+ description:
+ - SELinux type for the specified target.
+ type: str
+ required: yes
+ seuser:
+ description:
+ - SELinux user for the specified target.
+ type: str
+ selevel:
+ description:
+ - SELinux range for the specified target.
+ type: str
+ aliases: [ serange ]
+ state:
+ description:
+ - Whether the SELinux file context must be C(absent) or C(present).
+ type: str
+ choices: [ absent, present ]
+ default: present
+ reload:
+ description:
+ - Reload SELinux policy after commit.
+ - Note that this does not apply SELinux file contexts to existing files.
+ type: bool
+ default: yes
+ ignore_selinux_state:
+ description:
+ - Useful for scenarios (chrooted environment) that you can't get the real SELinux state.
+ type: bool
+ default: no
+ version_added: '2.8'
+notes:
+- The changes are persistent across reboots.
+- The M(sefcontext) module does not modify existing files to the new
+ SELinux context(s), so it is advisable to first create the SELinux
+ file contexts before creating files, or run C(restorecon) manually
+ for the existing files that require the new SELinux file contexts.
+- Not applying SELinux fcontexts to existing files is a deliberate
+ decision as it would be unclear what reported changes would entail
+ to, and there's no guarantee that applying SELinux fcontext does
+ not pick up other unrelated prior changes.
+requirements:
+- libselinux-python
+- policycoreutils-python
+author:
+- Dag Wieers (@dagwieers)
+'''
+
+EXAMPLES = r'''
+- name: Allow apache to modify files in /srv/git_repos
+ sefcontext:
+ target: '/srv/git_repos(/.*)?'
+ setype: httpd_git_rw_content_t
+ state: present
+
+- name: Apply new SELinux file context to filesystem
+ command: restorecon -irv /srv/git_repos
+'''
+
+RETURN = r'''
+# Default return values
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+SELINUX_IMP_ERR = None
+try:
+ import selinux
+ HAVE_SELINUX = True
+except ImportError:
+ SELINUX_IMP_ERR = traceback.format_exc()
+ HAVE_SELINUX = False
+
+SEOBJECT_IMP_ERR = None
+try:
+ import seobject
+ HAVE_SEOBJECT = True
+except ImportError:
+ SEOBJECT_IMP_ERR = traceback.format_exc()
+ HAVE_SEOBJECT = False
+
+# Add missing entries (backward compatible)
+if HAVE_SEOBJECT:
+ seobject.file_types.update(
+ a=seobject.SEMANAGE_FCONTEXT_ALL,
+ b=seobject.SEMANAGE_FCONTEXT_BLOCK,
+ c=seobject.SEMANAGE_FCONTEXT_CHAR,
+ d=seobject.SEMANAGE_FCONTEXT_DIR,
+ f=seobject.SEMANAGE_FCONTEXT_REG,
+ l=seobject.SEMANAGE_FCONTEXT_LINK,
+ p=seobject.SEMANAGE_FCONTEXT_PIPE,
+ s=seobject.SEMANAGE_FCONTEXT_SOCK,
+ )
+
+# Make backward compatible
+option_to_file_type_str = dict(
+ a='all files',
+ b='block device',
+ c='character device',
+ d='directory',
+ f='regular file',
+ l='symbolic link',
+ p='named pipe',
+ s='socket',
+)
+
+
+def get_runtime_status(ignore_selinux_state=False):
+ return True if ignore_selinux_state is True else selinux.is_selinux_enabled()
+
+
+def semanage_fcontext_exists(sefcontext, target, ftype):
+ ''' Get the SELinux file context mapping definition from policy. Return None if it does not exist. '''
+
+ # Beware that records comprise of a string representation of the file_type
+ record = (target, option_to_file_type_str[ftype])
+ records = sefcontext.get_all()
+ try:
+ return records[record]
+ except KeyError:
+ return None
+
+
+def semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser, sestore=''):
+ ''' Add or modify SELinux file context mapping definition to the policy. '''
+
+ changed = False
+ prepared_diff = ''
+
+ try:
+ sefcontext = seobject.fcontextRecords(sestore)
+ sefcontext.set_reload(do_reload)
+ exists = semanage_fcontext_exists(sefcontext, target, ftype)
+ if exists:
+ # Modify existing entry
+ orig_seuser, orig_serole, orig_setype, orig_serange = exists
+
+ if seuser is None:
+ seuser = orig_seuser
+ if serange is None:
+ serange = orig_serange
+
+ if setype != orig_setype or seuser != orig_seuser or serange != orig_serange:
+ if not module.check_mode:
+ sefcontext.modify(target, setype, ftype, serange, seuser)
+ changed = True
+
+ if module._diff:
+ prepared_diff += '# Change to semanage file context mappings\n'
+ prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, orig_seuser, orig_serole, orig_setype, orig_serange)
+ prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, orig_serole, setype, serange)
+ else:
+ # Add missing entry
+ if seuser is None:
+ seuser = 'system_u'
+ if serange is None:
+ serange = 's0'
+
+ if not module.check_mode:
+ sefcontext.add(target, setype, ftype, serange, seuser)
+ changed = True
+
+ if module._diff:
+ prepared_diff += '# Addition to semanage file context mappings\n'
+ prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, 'object_r', setype, serange)
+
+ except Exception as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)))
+
+ if module._diff and prepared_diff:
+ result['diff'] = dict(prepared=prepared_diff)
+
+ module.exit_json(changed=changed, seuser=seuser, serange=serange, **result)
+
+
+def semanage_fcontext_delete(module, result, target, ftype, do_reload, sestore=''):
+ ''' Delete SELinux file context mapping definition from the policy. '''
+
+ changed = False
+ prepared_diff = ''
+
+ try:
+ sefcontext = seobject.fcontextRecords(sestore)
+ sefcontext.set_reload(do_reload)
+ exists = semanage_fcontext_exists(sefcontext, target, ftype)
+ if exists:
+ # Remove existing entry
+ orig_seuser, orig_serole, orig_setype, orig_serange = exists
+
+ if not module.check_mode:
+ sefcontext.delete(target, ftype)
+ changed = True
+
+ if module._diff:
+ prepared_diff += '# Deletion to semanage file context mappings\n'
+ prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, exists[0], exists[1], exists[2], exists[3])
+
+ except Exception as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)))
+
+ if module._diff and prepared_diff:
+ result['diff'] = dict(prepared=prepared_diff)
+
+ module.exit_json(changed=changed, **result)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ ignore_selinux_state=dict(type='bool', default=False),
+ target=dict(type='str', required=True, aliases=['path']),
+ ftype=dict(type='str', default='a', choices=option_to_file_type_str.keys()),
+ setype=dict(type='str', required=True),
+ seuser=dict(type='str'),
+ selevel=dict(type='str', aliases=['serange']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ reload=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+ if not HAVE_SELINUX:
+ module.fail_json(msg=missing_required_lib("libselinux-python"), exception=SELINUX_IMP_ERR)
+
+ if not HAVE_SEOBJECT:
+ module.fail_json(msg=missing_required_lib("policycoreutils-python"), exception=SEOBJECT_IMP_ERR)
+
+ ignore_selinux_state = module.params['ignore_selinux_state']
+
+ if not get_runtime_status(ignore_selinux_state):
+ module.fail_json(msg="SELinux is disabled on this host.")
+
+ target = module.params['target']
+ ftype = module.params['ftype']
+ setype = module.params['setype']
+ seuser = module.params['seuser']
+ serange = module.params['selevel']
+ state = module.params['state']
+ do_reload = module.params['reload']
+
+ result = dict(target=target, ftype=ftype, setype=setype, state=state)
+
+ if state == 'present':
+ semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser)
+ elif state == 'absent':
+ semanage_fcontext_delete(module, result, target, ftype, do_reload)
+ else:
+ module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/selogin.py b/test/support/integration/plugins/modules/selogin.py
new file mode 100644
index 00000000..6429ef36
--- /dev/null
+++ b/test/support/integration/plugins/modules/selogin.py
@@ -0,0 +1,260 @@
+#!/usr/bin/python
+
+# (c) 2017, Petr Lautrbach <plautrba@redhat.com>
+# Based on seport.py module (c) 2014, Dan Keder <dan.keder@gmail.com>
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+---
+module: selogin
+short_description: Manages linux user to SELinux user mapping
+description:
+ - Manages linux user to SELinux user mapping
+version_added: "2.8"
+options:
+ login:
+ description:
+ - a Linux user
+ required: true
+ seuser:
+ description:
+ - SELinux user name
+ required: true
+ selevel:
+ aliases: [ serange ]
+ description:
+ - MLS/MCS Security Range (MLS/MCS Systems only) SELinux Range for SELinux login mapping defaults to the SELinux user record range.
+ default: s0
+ state:
+ description:
+ - Desired mapping value.
+ required: true
+ default: present
+ choices: [ 'present', 'absent' ]
+ reload:
+ description:
+ - Reload SELinux policy after commit.
+ default: yes
+ ignore_selinux_state:
+ description:
+ - Run independent of selinux runtime state
+ type: bool
+ default: false
+notes:
+ - The changes are persistent across reboots
+ - Not tested on any debian based system
+requirements: [ 'libselinux', 'policycoreutils' ]
+author:
+- Dan Keder (@dankeder)
+- Petr Lautrbach (@bachradsusi)
+- James Cassell (@jamescassell)
+'''
+
+EXAMPLES = '''
+# Modify the default user on the system to the guest_u user
+- selogin:
+ login: __default__
+ seuser: guest_u
+ state: present
+
+# Assign gijoe user on an MLS machine a range and to the staff_u user
+- selogin:
+ login: gijoe
+ seuser: staff_u
+ serange: SystemLow-Secret
+ state: present
+
+# Assign all users in the engineering group to the staff_u user
+- selogin:
+ login: '%engineering'
+ seuser: staff_u
+ state: present
+'''
+
+RETURN = r'''
+# Default return values
+'''
+
+
+import traceback
+
+SELINUX_IMP_ERR = None
+try:
+ import selinux
+ HAVE_SELINUX = True
+except ImportError:
+ SELINUX_IMP_ERR = traceback.format_exc()
+ HAVE_SELINUX = False
+
+SEOBJECT_IMP_ERR = None
+try:
+ import seobject
+ HAVE_SEOBJECT = True
+except ImportError:
+ SEOBJECT_IMP_ERR = traceback.format_exc()
+ HAVE_SEOBJECT = False
+
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def semanage_login_add(module, login, seuser, do_reload, serange='s0', sestore=''):
+ """ Add linux user to SELinux user mapping
+
+ :type module: AnsibleModule
+ :param module: Ansible module
+
+ :type login: str
+ :param login: a Linux User or a Linux group if it begins with %
+
+ :type seuser: str
+ :param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l'
+
+ :type serange: str
+ :param serange: SELinux MLS/MCS range (defaults to 's0')
+
+ :type do_reload: bool
+ :param do_reload: Whether to reload SELinux policy after commit
+
+ :type sestore: str
+ :param sestore: SELinux store
+
+ :rtype: bool
+ :return: True if the policy was changed, otherwise False
+ """
+ try:
+ selogin = seobject.loginRecords(sestore)
+ selogin.set_reload(do_reload)
+ change = False
+ all_logins = selogin.get_all()
+ # module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore))
+ # for local_login in all_logins:
+ if login not in all_logins.keys():
+ change = True
+ if not module.check_mode:
+ selogin.add(login, seuser, serange)
+ else:
+ if all_logins[login][0] != seuser or all_logins[login][1] != serange:
+ change = True
+ if not module.check_mode:
+ selogin.modify(login, seuser, serange)
+
+ except (ValueError, KeyError, OSError, RuntimeError) as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
+
+ return change
+
+
+def semanage_login_del(module, login, seuser, do_reload, sestore=''):
+ """ Delete linux user to SELinux user mapping
+
+ :type module: AnsibleModule
+ :param module: Ansible module
+
+ :type login: str
+ :param login: a Linux User or a Linux group if it begins with %
+
+ :type seuser: str
+ :param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l'
+
+ :type do_reload: bool
+ :param do_reload: Whether to reload SELinux policy after commit
+
+ :type sestore: str
+ :param sestore: SELinux store
+
+ :rtype: bool
+ :return: True if the policy was changed, otherwise False
+ """
+ try:
+ selogin = seobject.loginRecords(sestore)
+ selogin.set_reload(do_reload)
+ change = False
+ all_logins = selogin.get_all()
+ # module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore))
+ if login in all_logins.keys():
+ change = True
+ if not module.check_mode:
+ selogin.delete(login)
+
+ except (ValueError, KeyError, OSError, RuntimeError) as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
+
+ return change
+
+
+def get_runtime_status(ignore_selinux_state=False):
+ return True if ignore_selinux_state is True else selinux.is_selinux_enabled()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ ignore_selinux_state=dict(type='bool', default=False),
+ login=dict(type='str', required=True),
+ seuser=dict(type='str'),
+ selevel=dict(type='str', aliases=['serange'], default='s0'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ reload=dict(type='bool', default=True),
+ ),
+ required_if=[
+ ["state", "present", ["seuser"]]
+ ],
+ supports_check_mode=True
+ )
+ if not HAVE_SELINUX:
+ module.fail_json(msg=missing_required_lib("libselinux"), exception=SELINUX_IMP_ERR)
+
+ if not HAVE_SEOBJECT:
+ module.fail_json(msg=missing_required_lib("seobject from policycoreutils"), exception=SEOBJECT_IMP_ERR)
+
+ ignore_selinux_state = module.params['ignore_selinux_state']
+
+ if not get_runtime_status(ignore_selinux_state):
+ module.fail_json(msg="SELinux is disabled on this host.")
+
+ login = module.params['login']
+ seuser = module.params['seuser']
+ serange = module.params['selevel']
+ state = module.params['state']
+ do_reload = module.params['reload']
+
+ result = {
+ 'login': login,
+ 'seuser': seuser,
+ 'serange': serange,
+ 'state': state,
+ }
+
+ if state == 'present':
+ result['changed'] = semanage_login_add(module, login, seuser, do_reload, serange)
+ elif state == 'absent':
+ result['changed'] = semanage_login_del(module, login, seuser, do_reload)
+ else:
+ module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/synchronize.py b/test/support/integration/plugins/modules/synchronize.py
new file mode 100644
index 00000000..e4c520b7
--- /dev/null
+++ b/test/support/integration/plugins/modules/synchronize.py
@@ -0,0 +1,618 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012-2013, Timothy Appnel <tim@appnel.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = r'''
+---
+module: synchronize
+version_added: "1.4"
+short_description: A wrapper around rsync to make common tasks in your playbooks quick and easy
+description:
+ - C(synchronize) is a wrapper around rsync to make common tasks in your playbooks quick and easy.
+ - It is run and originates on the local host where Ansible is being run.
+ - Of course, you could just use the C(command) action to call rsync yourself, but you also have to add a fair number of
+ boilerplate options and host facts.
+ - This module is not intended to provide access to the full power of rsync, but does make the most common
+ invocations easier to implement. You `still` may need to call rsync directly via C(command) or C(shell) depending on your use case.
+options:
+ src:
+ description:
+ - Path on the source host that will be synchronized to the destination.
+ - The path can be absolute or relative.
+ type: str
+ required: true
+ dest:
+ description:
+ - Path on the destination host that will be synchronized from the source.
+ - The path can be absolute or relative.
+ type: str
+ required: true
+ dest_port:
+ description:
+ - Port number for ssh on the destination host.
+ - Prior to Ansible 2.0, the ansible_ssh_port inventory var took precedence over this value.
+ - This parameter defaults to the value of C(ansible_ssh_port) or C(ansible_port),
+ the C(remote_port) config setting or the value from ssh client configuration
+ if none of the former have been set.
+ type: int
+ version_added: "1.5"
+ mode:
+ description:
+ - Specify the direction of the synchronization.
+ - In push mode the localhost or delegate is the source.
+ - In pull mode the remote host in context is the source.
+ type: str
+ choices: [ pull, push ]
+ default: push
+ archive:
+ description:
+ - Mirrors the rsync archive flag, enables recursive, links, perms, times, owner, group flags and -D.
+ type: bool
+ default: yes
+ checksum:
+ description:
+ - Skip based on checksum, rather than mod-time & size; Note that that "archive" option is still enabled by default - the "checksum" option will
+ not disable it.
+ type: bool
+ default: no
+ version_added: "1.6"
+ compress:
+ description:
+ - Compress file data during the transfer.
+ - In most cases, leave this enabled unless it causes problems.
+ type: bool
+ default: yes
+ version_added: "1.7"
+ existing_only:
+ description:
+ - Skip creating new files on receiver.
+ type: bool
+ default: no
+ version_added: "1.5"
+ delete:
+ description:
+ - Delete files in C(dest) that don't exist (after transfer, not before) in the C(src) path.
+ - This option requires C(recursive=yes).
+ - This option ignores excluded files and behaves like the rsync opt --delete-excluded.
+ type: bool
+ default: no
+ dirs:
+ description:
+ - Transfer directories without recursing.
+ type: bool
+ default: no
+ recursive:
+ description:
+ - Recurse into directories.
+ - This parameter defaults to the value of the archive option.
+ type: bool
+ links:
+ description:
+ - Copy symlinks as symlinks.
+ - This parameter defaults to the value of the archive option.
+ type: bool
+ copy_links:
+ description:
+ - Copy symlinks as the item that they point to (the referent) is copied, rather than the symlink.
+ type: bool
+ default: no
+ perms:
+ description:
+ - Preserve permissions.
+ - This parameter defaults to the value of the archive option.
+ type: bool
+ times:
+ description:
+ - Preserve modification times.
+ - This parameter defaults to the value of the archive option.
+ type: bool
+ owner:
+ description:
+ - Preserve owner (super user only).
+ - This parameter defaults to the value of the archive option.
+ type: bool
+ group:
+ description:
+ - Preserve group.
+ - This parameter defaults to the value of the archive option.
+ type: bool
+ rsync_path:
+ description:
+ - Specify the rsync command to run on the remote host. See C(--rsync-path) on the rsync man page.
+ - To specify the rsync command to run on the local host, you need to set this your task var C(ansible_rsync_path).
+ type: str
+ rsync_timeout:
+ description:
+ - Specify a C(--timeout) for the rsync command in seconds.
+ type: int
+ default: 0
+ set_remote_user:
+ description:
+ - Put user@ for the remote paths.
+ - If you have a custom ssh config to define the remote user for a host
+ that does not match the inventory user, you should set this parameter to C(no).
+ type: bool
+ default: yes
+ use_ssh_args:
+ description:
+ - Use the ssh_args specified in ansible.cfg.
+ type: bool
+ default: no
+ version_added: "2.0"
+ rsync_opts:
+ description:
+ - Specify additional rsync options by passing in an array.
+ - Note that an empty string in C(rsync_opts) will end up transfer the current working directory.
+ type: list
+ default:
+ version_added: "1.6"
+ partial:
+ description:
+ - Tells rsync to keep the partial file which should make a subsequent transfer of the rest of the file much faster.
+ type: bool
+ default: no
+ version_added: "2.0"
+ verify_host:
+ description:
+ - Verify destination host key.
+ type: bool
+ default: no
+ version_added: "2.0"
+ private_key:
+ description:
+ - Specify the private key to use for SSH-based rsync connections (e.g. C(~/.ssh/id_rsa)).
+ type: path
+ version_added: "1.6"
+ link_dest:
+ description:
+ - Add a destination to hard link against during the rsync.
+ type: list
+ default:
+ version_added: "2.5"
+notes:
+ - rsync must be installed on both the local and remote host.
+ - For the C(synchronize) module, the "local host" is the host `the synchronize task originates on`, and the "destination host" is the host
+ `synchronize is connecting to`.
+ - The "local host" can be changed to a different host by using `delegate_to`. This enables copying between two remote hosts or entirely on one
+ remote machine.
+ - >
+ The user and permissions for the synchronize `src` are those of the user running the Ansible task on the local host (or the remote_user for a
+ delegate_to host when delegate_to is used).
+ - The user and permissions for the synchronize `dest` are those of the `remote_user` on the destination host or the `become_user` if `become=yes` is active.
+ - In Ansible 2.0 a bug in the synchronize module made become occur on the "local host". This was fixed in Ansible 2.0.1.
+ - Currently, synchronize is limited to elevating permissions via passwordless sudo. This is because rsync itself is connecting to the remote machine
+ and rsync doesn't give us a way to pass sudo credentials in.
+ - Currently there are only a few connection types which support synchronize (ssh, paramiko, local, and docker) because a sync strategy has been
+ determined for those connection types. Note that the connection for these must not need a password as rsync itself is making the connection and
+ rsync does not provide us a way to pass a password to the connection.
+ - Expect that dest=~/x will be ~<remote_user>/x even if using sudo.
+ - Inspect the verbose output to validate the destination user/host/path are what was expected.
+ - To exclude files and directories from being synchronized, you may add C(.rsync-filter) files to the source directory.
+ - rsync daemon must be up and running with correct permission when using rsync protocol in source or destination path.
+ - The C(synchronize) module forces `--delay-updates` to avoid leaving a destination in a broken in-between state if the underlying rsync process
+ encounters an error. Those synchronizing large numbers of files that are willing to trade safety for performance should call rsync directly.
+ - link_destination is subject to the same limitations as the underlying rsync daemon. Hard links are only preserved if the relative subtrees
+ of the source and destination are the same. Attempts to hardlink into a directory that is a subdirectory of the source will be prevented.
+seealso:
+- module: copy
+- module: win_robocopy
+author:
+- Timothy Appnel (@tima)
+'''
+
+EXAMPLES = '''
+- name: Synchronization of src on the control machine to dest on the remote hosts
+ synchronize:
+ src: some/relative/path
+ dest: /some/absolute/path
+
+- name: Synchronization using rsync protocol (push)
+ synchronize:
+ src: some/relative/path/
+ dest: rsync://somehost.com/path/
+
+- name: Synchronization using rsync protocol (pull)
+ synchronize:
+ mode: pull
+ src: rsync://somehost.com/path/
+ dest: /some/absolute/path/
+
+- name: Synchronization using rsync protocol on delegate host (push)
+ synchronize:
+ src: /some/absolute/path/
+ dest: rsync://somehost.com/path/
+ delegate_to: delegate.host
+
+- name: Synchronization using rsync protocol on delegate host (pull)
+ synchronize:
+ mode: pull
+ src: rsync://somehost.com/path/
+ dest: /some/absolute/path/
+ delegate_to: delegate.host
+
+- name: Synchronization without any --archive options enabled
+ synchronize:
+ src: some/relative/path
+ dest: /some/absolute/path
+ archive: no
+
+- name: Synchronization with --archive options enabled except for --recursive
+ synchronize:
+ src: some/relative/path
+ dest: /some/absolute/path
+ recursive: no
+
+- name: Synchronization with --archive options enabled except for --times, with --checksum option enabled
+ synchronize:
+ src: some/relative/path
+ dest: /some/absolute/path
+ checksum: yes
+ times: no
+
+- name: Synchronization without --archive options enabled except use --links
+ synchronize:
+ src: some/relative/path
+ dest: /some/absolute/path
+ archive: no
+ links: yes
+
+- name: Synchronization of two paths both on the control machine
+ synchronize:
+ src: some/relative/path
+ dest: /some/absolute/path
+ delegate_to: localhost
+
+- name: Synchronization of src on the inventory host to the dest on the localhost in pull mode
+ synchronize:
+ mode: pull
+ src: some/relative/path
+ dest: /some/absolute/path
+
+- name: Synchronization of src on delegate host to dest on the current inventory host.
+ synchronize:
+ src: /first/absolute/path
+ dest: /second/absolute/path
+ delegate_to: delegate.host
+
+- name: Synchronize two directories on one remote host.
+ synchronize:
+ src: /first/absolute/path
+ dest: /second/absolute/path
+ delegate_to: "{{ inventory_hostname }}"
+
+- name: Synchronize and delete files in dest on the remote host that are not found in src of localhost.
+ synchronize:
+ src: some/relative/path
+ dest: /some/absolute/path
+ delete: yes
+ recursive: yes
+
+# This specific command is granted su privileges on the destination
+- name: Synchronize using an alternate rsync command
+ synchronize:
+ src: some/relative/path
+ dest: /some/absolute/path
+ rsync_path: su -c rsync
+
+# Example .rsync-filter file in the source directory
+# - var # exclude any path whose last part is 'var'
+# - /var # exclude any path starting with 'var' starting at the source directory
+# + /var/conf # include /var/conf even though it was previously excluded
+
+- name: Synchronize passing in extra rsync options
+ synchronize:
+ src: /tmp/helloworld
+ dest: /var/www/helloworld
+ rsync_opts:
+ - "--no-motd"
+ - "--exclude=.git"
+
+# Hardlink files if they didn't change
+- name: Use hardlinks when synchronizing filesystems
+ synchronize:
+ src: /tmp/path_a/foo.txt
+ dest: /tmp/path_b/foo.txt
+ link_dest: /tmp/path_a/
+
+# Specify the rsync binary to use on remote host and on local host
+- hosts: groupofhosts
+ vars:
+ ansible_rsync_path: /usr/gnu/bin/rsync
+
+ tasks:
+ - name: copy /tmp/localpath/ to remote location /tmp/remotepath
+ synchronize:
+ src: /tmp/localpath/
+ dest: /tmp/remotepath
+ rsync_path: /usr/gnu/bin/rsync
+'''
+
+
+import os
+import errno
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.module_utils.six.moves import shlex_quote
+
+
+client_addr = None
+
+
+def substitute_controller(path):
+ global client_addr
+ if not client_addr:
+ ssh_env_string = os.environ.get('SSH_CLIENT', None)
+ try:
+ client_addr, _ = ssh_env_string.split(None, 1)
+ except AttributeError:
+ ssh_env_string = os.environ.get('SSH_CONNECTION', None)
+ try:
+ client_addr, _ = ssh_env_string.split(None, 1)
+ except AttributeError:
+ pass
+ if not client_addr:
+ raise ValueError
+
+ if path.startswith('localhost:'):
+ path = path.replace('localhost', client_addr, 1)
+ return path
+
+
+def is_rsh_needed(source, dest):
+ if source.startswith('rsync://') or dest.startswith('rsync://'):
+ return False
+ if ':' in source or ':' in dest:
+ return True
+ return False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ src=dict(type='str', required=True),
+ dest=dict(type='str', required=True),
+ dest_port=dict(type='int'),
+ delete=dict(type='bool', default=False),
+ private_key=dict(type='path'),
+ rsync_path=dict(type='str'),
+ _local_rsync_path=dict(type='path', default='rsync'),
+ _local_rsync_password=dict(type='str', no_log=True),
+ _substitute_controller=dict(type='bool', default=False),
+ archive=dict(type='bool', default=True),
+ checksum=dict(type='bool', default=False),
+ compress=dict(type='bool', default=True),
+ existing_only=dict(type='bool', default=False),
+ dirs=dict(type='bool', default=False),
+ recursive=dict(type='bool'),
+ links=dict(type='bool'),
+ copy_links=dict(type='bool', default=False),
+ perms=dict(type='bool'),
+ times=dict(type='bool'),
+ owner=dict(type='bool'),
+ group=dict(type='bool'),
+ set_remote_user=dict(type='bool', default=True),
+ rsync_timeout=dict(type='int', default=0),
+ rsync_opts=dict(type='list', default=[]),
+ ssh_args=dict(type='str'),
+ partial=dict(type='bool', default=False),
+ verify_host=dict(type='bool', default=False),
+ mode=dict(type='str', default='push', choices=['pull', 'push']),
+ link_dest=dict(type='list')
+ ),
+ supports_check_mode=True,
+ )
+
+ if module.params['_substitute_controller']:
+ try:
+ source = substitute_controller(module.params['src'])
+ dest = substitute_controller(module.params['dest'])
+ except ValueError:
+ module.fail_json(msg='Could not determine controller hostname for rsync to send to')
+ else:
+ source = module.params['src']
+ dest = module.params['dest']
+ dest_port = module.params['dest_port']
+ delete = module.params['delete']
+ private_key = module.params['private_key']
+ rsync_path = module.params['rsync_path']
+ rsync = module.params.get('_local_rsync_path', 'rsync')
+ rsync_password = module.params.get('_local_rsync_password')
+ rsync_timeout = module.params.get('rsync_timeout', 'rsync_timeout')
+ archive = module.params['archive']
+ checksum = module.params['checksum']
+ compress = module.params['compress']
+ existing_only = module.params['existing_only']
+ dirs = module.params['dirs']
+ partial = module.params['partial']
+ # the default of these params depends on the value of archive
+ recursive = module.params['recursive']
+ links = module.params['links']
+ copy_links = module.params['copy_links']
+ perms = module.params['perms']
+ times = module.params['times']
+ owner = module.params['owner']
+ group = module.params['group']
+ rsync_opts = module.params['rsync_opts']
+ ssh_args = module.params['ssh_args']
+ verify_host = module.params['verify_host']
+ link_dest = module.params['link_dest']
+
+ if '/' not in rsync:
+ rsync = module.get_bin_path(rsync, required=True)
+
+ cmd = [rsync, '--delay-updates', '-F']
+ _sshpass_pipe = None
+ if rsync_password:
+ try:
+ module.run_command(["sshpass"])
+ except OSError:
+ module.fail_json(
+ msg="to use rsync connection with passwords, you must install the sshpass program"
+ )
+ _sshpass_pipe = os.pipe()
+ cmd = ['sshpass', '-d' + to_native(_sshpass_pipe[0], errors='surrogate_or_strict')] + cmd
+ if compress:
+ cmd.append('--compress')
+ if rsync_timeout:
+ cmd.append('--timeout=%s' % rsync_timeout)
+ if module.check_mode:
+ cmd.append('--dry-run')
+ if delete:
+ cmd.append('--delete-after')
+ if existing_only:
+ cmd.append('--existing')
+ if checksum:
+ cmd.append('--checksum')
+ if copy_links:
+ cmd.append('--copy-links')
+ if archive:
+ cmd.append('--archive')
+ if recursive is False:
+ cmd.append('--no-recursive')
+ if links is False:
+ cmd.append('--no-links')
+ if perms is False:
+ cmd.append('--no-perms')
+ if times is False:
+ cmd.append('--no-times')
+ if owner is False:
+ cmd.append('--no-owner')
+ if group is False:
+ cmd.append('--no-group')
+ else:
+ if recursive is True:
+ cmd.append('--recursive')
+ if links is True:
+ cmd.append('--links')
+ if perms is True:
+ cmd.append('--perms')
+ if times is True:
+ cmd.append('--times')
+ if owner is True:
+ cmd.append('--owner')
+ if group is True:
+ cmd.append('--group')
+ if dirs:
+ cmd.append('--dirs')
+
+ if source.startswith('rsync://') and dest.startswith('rsync://'):
+ module.fail_json(msg='either src or dest must be a localhost', rc=1)
+
+ if is_rsh_needed(source, dest):
+
+ # https://github.com/ansible/ansible/issues/15907
+ has_rsh = False
+ for rsync_opt in rsync_opts:
+ if '--rsh' in rsync_opt:
+ has_rsh = True
+ break
+
+ # if the user has not supplied an --rsh option go ahead and add ours
+ if not has_rsh:
+ ssh_cmd = [module.get_bin_path('ssh', required=True), '-S', 'none']
+ if private_key is not None:
+ ssh_cmd.extend(['-i', private_key])
+ # If the user specified a port value
+ # Note: The action plugin takes care of setting this to a port from
+ # inventory if the user didn't specify an explicit dest_port
+ if dest_port is not None:
+ ssh_cmd.extend(['-o', 'Port=%s' % dest_port])
+ if not verify_host:
+ ssh_cmd.extend(['-o', 'StrictHostKeyChecking=no', '-o', 'UserKnownHostsFile=/dev/null'])
+ ssh_cmd_str = ' '.join(shlex_quote(arg) for arg in ssh_cmd)
+ if ssh_args:
+ ssh_cmd_str += ' %s' % ssh_args
+ cmd.append('--rsh=%s' % ssh_cmd_str)
+
+ if rsync_path:
+ cmd.append('--rsync-path=%s' % rsync_path)
+
+ if rsync_opts:
+ if '' in rsync_opts:
+ module.warn('The empty string is present in rsync_opts which will cause rsync to'
+ ' transfer the current working directory. If this is intended, use "."'
+ ' instead to get rid of this warning. If this is unintended, check for'
+ ' problems in your playbook leading to empty string in rsync_opts.')
+ cmd.extend(rsync_opts)
+
+ if partial:
+ cmd.append('--partial')
+
+ if link_dest:
+ cmd.append('-H')
+ # verbose required because rsync does not believe that adding a
+ # hardlink is actually a change
+ cmd.append('-vv')
+ for x in link_dest:
+ link_path = os.path.abspath(os.path.expanduser(x))
+ destination_path = os.path.abspath(os.path.dirname(dest))
+ if destination_path.find(link_path) == 0:
+ module.fail_json(msg='Hardlinking into a subdirectory of the source would cause recursion. %s and %s' % (destination_path, dest))
+ cmd.append('--link-dest=%s' % link_path)
+
+ changed_marker = '<<CHANGED>>'
+ cmd.append('--out-format=' + changed_marker + '%i %n%L')
+
+ # expand the paths
+ if '@' not in source:
+ source = os.path.expanduser(source)
+ if '@' not in dest:
+ dest = os.path.expanduser(dest)
+
+ cmd.append(source)
+ cmd.append(dest)
+ cmdstr = ' '.join(cmd)
+
+ # If we are using password authentication, write the password into the pipe
+ if rsync_password:
+ def _write_password_to_pipe(proc):
+ os.close(_sshpass_pipe[0])
+ try:
+ os.write(_sshpass_pipe[1], to_bytes(rsync_password) + b'\n')
+ except OSError as exc:
+ # Ignore broken pipe errors if the sshpass process has exited.
+ if exc.errno != errno.EPIPE or proc.poll() is None:
+ raise
+
+ (rc, out, err) = module.run_command(
+ cmd, pass_fds=_sshpass_pipe,
+ before_communicate_callback=_write_password_to_pipe)
+ else:
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc:
+ return module.fail_json(msg=err, rc=rc, cmd=cmdstr)
+
+ if link_dest:
+ # a leading period indicates no change
+ changed = (changed_marker + '.') not in out
+ else:
+ changed = changed_marker in out
+
+ out_clean = out.replace(changed_marker, '')
+ out_lines = out_clean.split('\n')
+ while '' in out_lines:
+ out_lines.remove('')
+ if module._diff:
+ diff = {'prepared': out_clean}
+ return module.exit_json(changed=changed, msg=out_clean,
+ rc=rc, cmd=cmdstr, stdout_lines=out_lines,
+ diff=diff)
+
+ return module.exit_json(changed=changed, msg=out_clean,
+ rc=rc, cmd=cmdstr, stdout_lines=out_lines)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/timezone.py b/test/support/integration/plugins/modules/timezone.py
new file mode 100644
index 00000000..b7439a12
--- /dev/null
+++ b/test/support/integration/plugins/modules/timezone.py
@@ -0,0 +1,909 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Shinichi TAMURA (@tmshn)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: timezone
+short_description: Configure timezone setting
+description:
+ - This module configures the timezone setting, both of the system clock and of the hardware clock. If you want to set up the NTP, use M(service) module.
+ - It is recommended to restart C(crond) after changing the timezone, otherwise the jobs may run at the wrong time.
+ - Several different tools are used depending on the OS/Distribution involved.
+ For Linux it can use C(timedatectl) or edit C(/etc/sysconfig/clock) or C(/etc/timezone) and C(hwclock).
+ On SmartOS, C(sm-set-timezone), for macOS, C(systemsetup), for BSD, C(/etc/localtime) is modified.
+ On AIX, C(chtz) is used.
+ - As of Ansible 2.3 support was added for SmartOS and BSDs.
+ - As of Ansible 2.4 support was added for macOS.
+ - As of Ansible 2.9 support was added for AIX 6.1+
+ - Windows and HPUX are not supported, please let us know if you find any other OS/distro in which this fails.
+version_added: "2.2"
+options:
+ name:
+ description:
+ - Name of the timezone for the system clock.
+ - Default is to keep current setting.
+ - B(At least one of name and hwclock are required.)
+ type: str
+ hwclock:
+ description:
+ - Whether the hardware clock is in UTC or in local timezone.
+ - Default is to keep current setting.
+ - Note that this option is recommended not to change and may fail
+ to configure, especially on virtual environments such as AWS.
+ - B(At least one of name and hwclock are required.)
+ - I(Only used on Linux.)
+ type: str
+ aliases: [ rtc ]
+ choices: [ local, UTC ]
+notes:
+ - On SmartOS the C(sm-set-timezone) utility (part of the smtools package) is required to set the zone timezone
+ - On AIX only Olson/tz database timezones are useable (POSIX is not supported).
+ - An OS reboot is also required on AIX for the new timezone setting to take effect.
+author:
+ - Shinichi TAMURA (@tmshn)
+ - Jasper Lievisse Adriaanse (@jasperla)
+ - Indrajit Raychaudhuri (@indrajitr)
+'''
+
+RETURN = r'''
+diff:
+ description: The differences about the given arguments.
+ returned: success
+ type: complex
+ contains:
+ before:
+ description: The values before change
+ type: dict
+ after:
+ description: The values after change
+ type: dict
+'''
+
+EXAMPLES = r'''
+- name: Set timezone to Asia/Tokyo
+ timezone:
+ name: Asia/Tokyo
+'''
+
+import errno
+import os
+import platform
+import random
+import re
+import string
+import filecmp
+
+from ansible.module_utils.basic import AnsibleModule, get_distribution
+from ansible.module_utils.six import iteritems
+
+
+class Timezone(object):
+ """This is a generic Timezone manipulation class that is subclassed based on platform.
+
+ A subclass may wish to override the following action methods:
+ - get(key, phase) ... get the value from the system at `phase`
+ - set(key, value) ... set the value to the current system
+ """
+
+ def __new__(cls, module):
+ """Return the platform-specific subclass.
+
+ It does not use load_platform_subclass() because it needs to judge based
+ on whether the `timedatectl` command exists and is available.
+
+ Args:
+ module: The AnsibleModule.
+ """
+ if platform.system() == 'Linux':
+ timedatectl = module.get_bin_path('timedatectl')
+ if timedatectl is not None:
+ rc, stdout, stderr = module.run_command(timedatectl)
+ if rc == 0:
+ return super(Timezone, SystemdTimezone).__new__(SystemdTimezone)
+ else:
+ module.warn('timedatectl command was found but not usable: %s. using other method.' % stderr)
+ return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone)
+ else:
+ return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone)
+ elif re.match('^joyent_.*Z', platform.version()):
+ # platform.system() returns SunOS, which is too broad. So look at the
+ # platform version instead. However we have to ensure that we're not
+ # running in the global zone where changing the timezone has no effect.
+ zonename_cmd = module.get_bin_path('zonename')
+ if zonename_cmd is not None:
+ (rc, stdout, _) = module.run_command(zonename_cmd)
+ if rc == 0 and stdout.strip() == 'global':
+ module.fail_json(msg='Adjusting timezone is not supported in Global Zone')
+
+ return super(Timezone, SmartOSTimezone).__new__(SmartOSTimezone)
+ elif platform.system() == 'Darwin':
+ return super(Timezone, DarwinTimezone).__new__(DarwinTimezone)
+ elif re.match('^(Free|Net|Open)BSD', platform.platform()):
+ return super(Timezone, BSDTimezone).__new__(BSDTimezone)
+ elif platform.system() == 'AIX':
+ AIXoslevel = int(platform.version() + platform.release())
+ if AIXoslevel >= 61:
+ return super(Timezone, AIXTimezone).__new__(AIXTimezone)
+ else:
+ module.fail_json(msg='AIX os level must be >= 61 for timezone module (Target: %s).' % AIXoslevel)
+ else:
+ # Not supported yet
+ return super(Timezone, Timezone).__new__(Timezone)
+
+ def __init__(self, module):
+ """Initialize of the class.
+
+ Args:
+ module: The AnsibleModule.
+ """
+ super(Timezone, self).__init__()
+ self.msg = []
+ # `self.value` holds the values for each params on each phases.
+ # Initially there's only info of "planned" phase, but the
+ # `self.check()` function will fill out it.
+ self.value = dict()
+ for key in module.argument_spec:
+ value = module.params[key]
+ if value is not None:
+ self.value[key] = dict(planned=value)
+ self.module = module
+
+ def abort(self, msg):
+ """Abort the process with error message.
+
+ This is just the wrapper of module.fail_json().
+
+ Args:
+ msg: The error message.
+ """
+ error_msg = ['Error message:', msg]
+ if len(self.msg) > 0:
+ error_msg.append('Other message(s):')
+ error_msg.extend(self.msg)
+ self.module.fail_json(msg='\n'.join(error_msg))
+
+ def execute(self, *commands, **kwargs):
+ """Execute the shell command.
+
+ This is just the wrapper of module.run_command().
+
+ Args:
+ *commands: The command to execute.
+ It will be concatenated with single space.
+ **kwargs: Only 'log' key is checked.
+ If kwargs['log'] is true, record the command to self.msg.
+
+ Returns:
+ stdout: Standard output of the command.
+ """
+ command = ' '.join(commands)
+ (rc, stdout, stderr) = self.module.run_command(command, check_rc=True)
+ if kwargs.get('log', False):
+ self.msg.append('executed `%s`' % command)
+ return stdout
+
+ def diff(self, phase1='before', phase2='after'):
+ """Calculate the difference between given 2 phases.
+
+ Args:
+ phase1, phase2: The names of phase to compare.
+
+ Returns:
+ diff: The difference of value between phase1 and phase2.
+ This is in the format which can be used with the
+ `--diff` option of ansible-playbook.
+ """
+ diff = {phase1: {}, phase2: {}}
+ for key, value in iteritems(self.value):
+ diff[phase1][key] = value[phase1]
+ diff[phase2][key] = value[phase2]
+ return diff
+
+ def check(self, phase):
+ """Check the state in given phase and set it to `self.value`.
+
+ Args:
+ phase: The name of the phase to check.
+
+ Returns:
+ NO RETURN VALUE
+ """
+ if phase == 'planned':
+ return
+ for key, value in iteritems(self.value):
+ value[phase] = self.get(key, phase)
+
+ def change(self):
+ """Make the changes effect based on `self.value`."""
+ for key, value in iteritems(self.value):
+ if value['before'] != value['planned']:
+ self.set(key, value['planned'])
+
+ # ===========================================
+ # Platform specific methods (must be replaced by subclass).
+
+ def get(self, key, phase):
+ """Get the value for the key at the given phase.
+
+ Called from self.check().
+
+ Args:
+ key: The key to get the value
+ phase: The phase to get the value
+
+ Return:
+ value: The value for the key at the given phase.
+ """
+ self.abort('get(key, phase) is not implemented on target platform')
+
+ def set(self, key, value):
+ """Set the value for the key (of course, for the phase 'after').
+
+ Called from self.change().
+
+ Args:
+ key: Key to set the value
+ value: Value to set
+ """
+ self.abort('set(key, value) is not implemented on target platform')
+
+ def _verify_timezone(self):
+ tz = self.value['name']['planned']
+ tzfile = '/usr/share/zoneinfo/%s' % tz
+ if not os.path.isfile(tzfile):
+ self.abort('given timezone "%s" is not available' % tz)
+ return tzfile
+
+
+class SystemdTimezone(Timezone):
+ """This is a Timezone manipulation class for systemd-powered Linux.
+
+ It uses the `timedatectl` command to check/set all arguments.
+ """
+
+ regexps = dict(
+ hwclock=re.compile(r'^\s*RTC in local TZ\s*:\s*([^\s]+)', re.MULTILINE),
+ name=re.compile(r'^\s*Time ?zone\s*:\s*([^\s]+)', re.MULTILINE)
+ )
+
+ subcmds = dict(
+ hwclock='set-local-rtc',
+ name='set-timezone'
+ )
+
+ def __init__(self, module):
+ super(SystemdTimezone, self).__init__(module)
+ self.timedatectl = module.get_bin_path('timedatectl', required=True)
+ self.status = dict()
+ # Validate given timezone
+ if 'name' in self.value:
+ self._verify_timezone()
+
+ def _get_status(self, phase):
+ if phase not in self.status:
+ self.status[phase] = self.execute(self.timedatectl, 'status')
+ return self.status[phase]
+
+ def get(self, key, phase):
+ status = self._get_status(phase)
+ value = self.regexps[key].search(status).group(1)
+ if key == 'hwclock':
+ # For key='hwclock'; convert yes/no -> local/UTC
+ if self.module.boolean(value):
+ value = 'local'
+ else:
+ value = 'UTC'
+ return value
+
+ def set(self, key, value):
+ # For key='hwclock'; convert UTC/local -> yes/no
+ if key == 'hwclock':
+ if value == 'local':
+ value = 'yes'
+ else:
+ value = 'no'
+ self.execute(self.timedatectl, self.subcmds[key], value, log=True)
+
+
+class NosystemdTimezone(Timezone):
+ """This is a Timezone manipulation class for non systemd-powered Linux.
+
+ For timezone setting, it edits the following file and reflect changes:
+ - /etc/sysconfig/clock ... RHEL/CentOS
+ - /etc/timezone ... Debian/Ubuntu
+ For hwclock setting, it executes `hwclock --systohc` command with the
+ '--utc' or '--localtime' option.
+ """
+
+ conf_files = dict(
+ name=None, # To be set in __init__
+ hwclock=None, # To be set in __init__
+ adjtime='/etc/adjtime'
+ )
+
+ # It's fine if all tree config files don't exist
+ allow_no_file = dict(
+ name=True,
+ hwclock=True,
+ adjtime=True
+ )
+
+ regexps = dict(
+ name=None, # To be set in __init__
+ hwclock=re.compile(r'^UTC\s*=\s*([^\s]+)', re.MULTILINE),
+ adjtime=re.compile(r'^(UTC|LOCAL)$', re.MULTILINE)
+ )
+
+ dist_regexps = dict(
+ SuSE=re.compile(r'^TIMEZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE),
+ redhat=re.compile(r'^ZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE)
+ )
+
+ dist_tzline_format = dict(
+ SuSE='TIMEZONE="%s"\n',
+ redhat='ZONE="%s"\n'
+ )
+
+ def __init__(self, module):
+ super(NosystemdTimezone, self).__init__(module)
+ # Validate given timezone
+ if 'name' in self.value:
+ tzfile = self._verify_timezone()
+ # `--remove-destination` is needed if /etc/localtime is a symlink so
+ # that it overwrites it instead of following it.
+ self.update_timezone = ['%s --remove-destination %s /etc/localtime' % (self.module.get_bin_path('cp', required=True), tzfile)]
+ self.update_hwclock = self.module.get_bin_path('hwclock', required=True)
+ # Distribution-specific configurations
+ if self.module.get_bin_path('dpkg-reconfigure') is not None:
+ # Debian/Ubuntu
+ if 'name' in self.value:
+ self.update_timezone = ['%s -sf %s /etc/localtime' % (self.module.get_bin_path('ln', required=True), tzfile),
+ '%s --frontend noninteractive tzdata' % self.module.get_bin_path('dpkg-reconfigure', required=True)]
+ self.conf_files['name'] = '/etc/timezone'
+ self.conf_files['hwclock'] = '/etc/default/rcS'
+ self.regexps['name'] = re.compile(r'^([^\s]+)', re.MULTILINE)
+ self.tzline_format = '%s\n'
+ else:
+ # RHEL/CentOS/SUSE
+ if self.module.get_bin_path('tzdata-update') is not None:
+ # tzdata-update cannot update the timezone if /etc/localtime is
+ # a symlink so we have to use cp to update the time zone which
+ # was set above.
+ if not os.path.islink('/etc/localtime'):
+ self.update_timezone = [self.module.get_bin_path('tzdata-update', required=True)]
+ # else:
+ # self.update_timezone = 'cp --remove-destination ...' <- configured above
+ self.conf_files['name'] = '/etc/sysconfig/clock'
+ self.conf_files['hwclock'] = '/etc/sysconfig/clock'
+ try:
+ f = open(self.conf_files['name'], 'r')
+ except IOError as err:
+ if self._allow_ioerror(err, 'name'):
+ # If the config file doesn't exist detect the distribution and set regexps.
+ distribution = get_distribution()
+ if distribution == 'SuSE':
+ # For SUSE
+ self.regexps['name'] = self.dist_regexps['SuSE']
+ self.tzline_format = self.dist_tzline_format['SuSE']
+ else:
+ # For RHEL/CentOS
+ self.regexps['name'] = self.dist_regexps['redhat']
+ self.tzline_format = self.dist_tzline_format['redhat']
+ else:
+ self.abort('could not read configuration file "%s"' % self.conf_files['name'])
+ else:
+ # The key for timezone might be `ZONE` or `TIMEZONE`
+ # (the former is used in RHEL/CentOS and the latter is used in SUSE linux).
+ # So check the content of /etc/sysconfig/clock and decide which key to use.
+ sysconfig_clock = f.read()
+ f.close()
+ if re.search(r'^TIMEZONE\s*=', sysconfig_clock, re.MULTILINE):
+ # For SUSE
+ self.regexps['name'] = self.dist_regexps['SuSE']
+ self.tzline_format = self.dist_tzline_format['SuSE']
+ else:
+ # For RHEL/CentOS
+ self.regexps['name'] = self.dist_regexps['redhat']
+ self.tzline_format = self.dist_tzline_format['redhat']
+
+ def _allow_ioerror(self, err, key):
+ # In some cases, even if the target file does not exist,
+ # simply creating it may solve the problem.
+ # In such cases, we should continue the configuration rather than aborting.
+ if err.errno != errno.ENOENT:
+ # If the error is not ENOENT ("No such file or directory"),
+ # (e.g., permission error, etc), we should abort.
+ return False
+ return self.allow_no_file.get(key, False)
+
+ def _edit_file(self, filename, regexp, value, key):
+ """Replace the first matched line with given `value`.
+
+ If `regexp` matched more than once, other than the first line will be deleted.
+
+ Args:
+ filename: The name of the file to edit.
+ regexp: The regular expression to search with.
+ value: The line which will be inserted.
+ key: For what key the file is being editted.
+ """
+ # Read the file
+ try:
+ file = open(filename, 'r')
+ except IOError as err:
+ if self._allow_ioerror(err, key):
+ lines = []
+ else:
+ self.abort('tried to configure %s using a file "%s", but could not read it' % (key, filename))
+ else:
+ lines = file.readlines()
+ file.close()
+ # Find the all matched lines
+ matched_indices = []
+ for i, line in enumerate(lines):
+ if regexp.search(line):
+ matched_indices.append(i)
+ if len(matched_indices) > 0:
+ insert_line = matched_indices[0]
+ else:
+ insert_line = 0
+ # Remove all matched lines
+ for i in matched_indices[::-1]:
+ del lines[i]
+ # ...and insert the value
+ lines.insert(insert_line, value)
+ # Write the changes
+ try:
+ file = open(filename, 'w')
+ except IOError:
+ self.abort('tried to configure %s using a file "%s", but could not write to it' % (key, filename))
+ else:
+ file.writelines(lines)
+ file.close()
+ self.msg.append('Added 1 line and deleted %s line(s) on %s' % (len(matched_indices), filename))
+
+ def _get_value_from_config(self, key, phase):
+ filename = self.conf_files[key]
+ try:
+ file = open(filename, mode='r')
+ except IOError as err:
+ if self._allow_ioerror(err, key):
+ if key == 'hwclock':
+ return 'n/a'
+ elif key == 'adjtime':
+ return 'UTC'
+ elif key == 'name':
+ return 'n/a'
+ else:
+ self.abort('tried to configure %s using a file "%s", but could not read it' % (key, filename))
+ else:
+ status = file.read()
+ file.close()
+ try:
+ value = self.regexps[key].search(status).group(1)
+ except AttributeError:
+ if key == 'hwclock':
+ # If we cannot find UTC in the config that's fine.
+ return 'n/a'
+ elif key == 'adjtime':
+ # If we cannot find UTC/LOCAL in /etc/cannot that means UTC
+ # will be used by default.
+ return 'UTC'
+ elif key == 'name':
+ if phase == 'before':
+ # In 'before' phase UTC/LOCAL doesn't need to be set in
+ # the timezone config file, so we ignore this error.
+ return 'n/a'
+ else:
+ self.abort('tried to configure %s using a file "%s", but could not find a valid value in it' % (key, filename))
+ else:
+ if key == 'hwclock':
+ # convert yes/no -> UTC/local
+ if self.module.boolean(value):
+ value = 'UTC'
+ else:
+ value = 'local'
+ elif key == 'adjtime':
+ # convert LOCAL -> local
+ if value != 'UTC':
+ value = value.lower()
+ return value
+
+ def get(self, key, phase):
+ planned = self.value[key]['planned']
+ if key == 'hwclock':
+ value = self._get_value_from_config(key, phase)
+ if value == planned:
+ # If the value in the config file is the same as the 'planned'
+ # value, we need to check /etc/adjtime.
+ value = self._get_value_from_config('adjtime', phase)
+ elif key == 'name':
+ value = self._get_value_from_config(key, phase)
+ if value == planned:
+ # If the planned values is the same as the one in the config file
+ # we need to check if /etc/localtime is also set to the 'planned' zone.
+ if os.path.islink('/etc/localtime'):
+ # If /etc/localtime is a symlink and is not set to the TZ we 'planned'
+ # to set, we need to return the TZ which the symlink points to.
+ if os.path.exists('/etc/localtime'):
+ # We use readlink() because on some distros zone files are symlinks
+ # to other zone files, so it's hard to get which TZ is actually set
+ # if we follow the symlink.
+ path = os.readlink('/etc/localtime')
+ linktz = re.search(r'/usr/share/zoneinfo/(.*)', path, re.MULTILINE)
+ if linktz:
+ valuelink = linktz.group(1)
+ if valuelink != planned:
+ value = valuelink
+ else:
+ # Set current TZ to 'n/a' if the symlink points to a path
+ # which isn't a zone file.
+ value = 'n/a'
+ else:
+ # Set current TZ to 'n/a' if the symlink to the zone file is broken.
+ value = 'n/a'
+ else:
+ # If /etc/localtime is not a symlink best we can do is compare it with
+ # the 'planned' zone info file and return 'n/a' if they are different.
+ try:
+ if not filecmp.cmp('/etc/localtime', '/usr/share/zoneinfo/' + planned):
+ return 'n/a'
+ except Exception:
+ return 'n/a'
+ else:
+ self.abort('unknown parameter "%s"' % key)
+ return value
+
+ def set_timezone(self, value):
+ self._edit_file(filename=self.conf_files['name'],
+ regexp=self.regexps['name'],
+ value=self.tzline_format % value,
+ key='name')
+ for cmd in self.update_timezone:
+ self.execute(cmd)
+
+ def set_hwclock(self, value):
+ if value == 'local':
+ option = '--localtime'
+ utc = 'no'
+ else:
+ option = '--utc'
+ utc = 'yes'
+ if self.conf_files['hwclock'] is not None:
+ self._edit_file(filename=self.conf_files['hwclock'],
+ regexp=self.regexps['hwclock'],
+ value='UTC=%s\n' % utc,
+ key='hwclock')
+ self.execute(self.update_hwclock, '--systohc', option, log=True)
+
+ def set(self, key, value):
+ if key == 'name':
+ self.set_timezone(value)
+ elif key == 'hwclock':
+ self.set_hwclock(value)
+ else:
+ self.abort('unknown parameter "%s"' % key)
+
+
+class SmartOSTimezone(Timezone):
+ """This is a Timezone manipulation class for SmartOS instances.
+
+ It uses the C(sm-set-timezone) utility to set the timezone, and
+ inspects C(/etc/default/init) to determine the current timezone.
+
+ NB: A zone needs to be rebooted in order for the change to be
+ activated.
+ """
+
+ def __init__(self, module):
+ super(SmartOSTimezone, self).__init__(module)
+ self.settimezone = self.module.get_bin_path('sm-set-timezone', required=False)
+ if not self.settimezone:
+ module.fail_json(msg='sm-set-timezone not found. Make sure the smtools package is installed.')
+
+ def get(self, key, phase):
+ """Lookup the current timezone name in `/etc/default/init`. If anything else
+ is requested, or if the TZ field is not set we fail.
+ """
+ if key == 'name':
+ try:
+ f = open('/etc/default/init', 'r')
+ for line in f:
+ m = re.match('^TZ=(.*)$', line.strip())
+ if m:
+ return m.groups()[0]
+ except Exception:
+ self.module.fail_json(msg='Failed to read /etc/default/init')
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+ def set(self, key, value):
+ """Set the requested timezone through sm-set-timezone, an invalid timezone name
+ will be rejected and we have no further input validation to perform.
+ """
+ if key == 'name':
+ cmd = 'sm-set-timezone %s' % value
+
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg=stderr)
+
+ # sm-set-timezone knows no state and will always set the timezone.
+ # XXX: https://github.com/joyent/smtools/pull/2
+ m = re.match(r'^\* Changed (to)? timezone (to)? (%s).*' % value, stdout.splitlines()[1])
+ if not (m and m.groups()[-1] == value):
+ self.module.fail_json(msg='Failed to set timezone')
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+
+class DarwinTimezone(Timezone):
+ """This is the timezone implementation for Darwin which, unlike other *BSD
+ implementations, uses the `systemsetup` command on Darwin to check/set
+ the timezone.
+ """
+
+ regexps = dict(
+ name=re.compile(r'^\s*Time ?Zone\s*:\s*([^\s]+)', re.MULTILINE)
+ )
+
+ def __init__(self, module):
+ super(DarwinTimezone, self).__init__(module)
+ self.systemsetup = module.get_bin_path('systemsetup', required=True)
+ self.status = dict()
+ # Validate given timezone
+ if 'name' in self.value:
+ self._verify_timezone()
+
+ def _get_current_timezone(self, phase):
+ """Lookup the current timezone via `systemsetup -gettimezone`."""
+ if phase not in self.status:
+ self.status[phase] = self.execute(self.systemsetup, '-gettimezone')
+ return self.status[phase]
+
+ def _verify_timezone(self):
+ tz = self.value['name']['planned']
+ # Lookup the list of supported timezones via `systemsetup -listtimezones`.
+ # Note: Skip the first line that contains the label 'Time Zones:'
+ out = self.execute(self.systemsetup, '-listtimezones').splitlines()[1:]
+ tz_list = list(map(lambda x: x.strip(), out))
+ if tz not in tz_list:
+ self.abort('given timezone "%s" is not available' % tz)
+ return tz
+
+ def get(self, key, phase):
+ if key == 'name':
+ status = self._get_current_timezone(phase)
+ value = self.regexps[key].search(status).group(1)
+ return value
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+ def set(self, key, value):
+ if key == 'name':
+ self.execute(self.systemsetup, '-settimezone', value, log=True)
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+
+class BSDTimezone(Timezone):
+ """This is the timezone implementation for *BSD which works simply through
+ updating the `/etc/localtime` symlink to point to a valid timezone name under
+ `/usr/share/zoneinfo`.
+ """
+
+ def __init__(self, module):
+ super(BSDTimezone, self).__init__(module)
+
+ def __get_timezone(self):
+ zoneinfo_dir = '/usr/share/zoneinfo/'
+ localtime_file = '/etc/localtime'
+
+ # Strategy 1:
+ # If /etc/localtime does not exist, assum the timezone is UTC.
+ if not os.path.exists(localtime_file):
+ self.module.warn('Could not read /etc/localtime. Assuming UTC.')
+ return 'UTC'
+
+ # Strategy 2:
+ # Follow symlink of /etc/localtime
+ zoneinfo_file = localtime_file
+ while not zoneinfo_file.startswith(zoneinfo_dir):
+ try:
+ zoneinfo_file = os.readlink(localtime_file)
+ except OSError:
+ # OSError means "end of symlink chain" or broken link.
+ break
+ else:
+ return zoneinfo_file.replace(zoneinfo_dir, '')
+
+ # Strategy 3:
+ # (If /etc/localtime is not symlinked)
+ # Check all files in /usr/share/zoneinfo and return first non-link match.
+ for dname, _, fnames in sorted(os.walk(zoneinfo_dir)):
+ for fname in sorted(fnames):
+ zoneinfo_file = os.path.join(dname, fname)
+ if not os.path.islink(zoneinfo_file) and filecmp.cmp(zoneinfo_file, localtime_file):
+ return zoneinfo_file.replace(zoneinfo_dir, '')
+
+ # Strategy 4:
+ # As a fall-back, return 'UTC' as default assumption.
+ self.module.warn('Could not identify timezone name from /etc/localtime. Assuming UTC.')
+ return 'UTC'
+
+ def get(self, key, phase):
+ """Lookup the current timezone by resolving `/etc/localtime`."""
+ if key == 'name':
+ return self.__get_timezone()
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+ def set(self, key, value):
+ if key == 'name':
+ # First determine if the requested timezone is valid by looking in
+ # the zoneinfo directory.
+ zonefile = '/usr/share/zoneinfo/' + value
+ try:
+ if not os.path.isfile(zonefile):
+ self.module.fail_json(msg='%s is not a recognized timezone' % value)
+ except Exception:
+ self.module.fail_json(msg='Failed to stat %s' % zonefile)
+
+ # Now (somewhat) atomically update the symlink by creating a new
+ # symlink and move it into place. Otherwise we have to remove the
+ # original symlink and create the new symlink, however that would
+ # create a race condition in case another process tries to read
+ # /etc/localtime between removal and creation.
+ suffix = "".join([random.choice(string.ascii_letters + string.digits) for x in range(0, 10)])
+ new_localtime = '/etc/localtime.' + suffix
+
+ try:
+ os.symlink(zonefile, new_localtime)
+ os.rename(new_localtime, '/etc/localtime')
+ except Exception:
+ os.remove(new_localtime)
+ self.module.fail_json(msg='Could not update /etc/localtime')
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+
+class AIXTimezone(Timezone):
+ """This is a Timezone manipulation class for AIX instances.
+
+ It uses the C(chtz) utility to set the timezone, and
+ inspects C(/etc/environment) to determine the current timezone.
+
+ While AIX time zones can be set using two formats (POSIX and
+ Olson) the prefered method is Olson.
+ See the following article for more information:
+ https://developer.ibm.com/articles/au-aix-posix/
+
+ NB: AIX needs to be rebooted in order for the change to be
+ activated.
+ """
+
+ def __init__(self, module):
+ super(AIXTimezone, self).__init__(module)
+ self.settimezone = self.module.get_bin_path('chtz', required=True)
+
+ def __get_timezone(self):
+ """ Return the current value of TZ= in /etc/environment """
+ try:
+ f = open('/etc/environment', 'r')
+ etcenvironment = f.read()
+ f.close()
+ except Exception:
+ self.module.fail_json(msg='Issue reading contents of /etc/environment')
+
+ match = re.search(r'^TZ=(.*)$', etcenvironment, re.MULTILINE)
+ if match:
+ return match.group(1)
+ else:
+ return None
+
+ def get(self, key, phase):
+ """Lookup the current timezone name in `/etc/environment`. If anything else
+ is requested, or if the TZ field is not set we fail.
+ """
+ if key == 'name':
+ return self.__get_timezone()
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+ def set(self, key, value):
+ """Set the requested timezone through chtz, an invalid timezone name
+ will be rejected and we have no further input validation to perform.
+ """
+ if key == 'name':
+ # chtz seems to always return 0 on AIX 7.2, even for invalid timezone values.
+ # It will only return non-zero if the chtz command itself fails, it does not check for
+ # valid timezones. We need to perform a basic check to confirm that the timezone
+ # definition exists in /usr/share/lib/zoneinfo
+ # This does mean that we can only support Olson for now. The below commented out regex
+ # detects Olson date formats, so in the future we could detect Posix or Olson and
+ # act accordingly.
+
+ # regex_olson = re.compile('^([a-z0-9_\-\+]+\/?)+$', re.IGNORECASE)
+ # if not regex_olson.match(value):
+ # msg = 'Supplied timezone (%s) does not appear to a be valid Olson string' % value
+ # self.module.fail_json(msg=msg)
+
+ # First determine if the requested timezone is valid by looking in the zoneinfo
+ # directory.
+ zonefile = '/usr/share/lib/zoneinfo/' + value
+ try:
+ if not os.path.isfile(zonefile):
+ self.module.fail_json(msg='%s is not a recognized timezone.' % value)
+ except Exception:
+ self.module.fail_json(msg='Failed to check %s.' % zonefile)
+
+ # Now set the TZ using chtz
+ cmd = 'chtz %s' % value
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg=stderr)
+
+ # The best condition check we can do is to check the value of TZ after making the
+ # change.
+ TZ = self.__get_timezone()
+ if TZ != value:
+ msg = 'TZ value does not match post-change (Actual: %s, Expected: %s).' % (TZ, value)
+ self.module.fail_json(msg=msg)
+
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+
+def main():
+ # Construct 'module' and 'tz'
+ module = AnsibleModule(
+ argument_spec=dict(
+ hwclock=dict(type='str', choices=['local', 'UTC'], aliases=['rtc']),
+ name=dict(type='str'),
+ ),
+ required_one_of=[
+ ['hwclock', 'name']
+ ],
+ supports_check_mode=True,
+ )
+ tz = Timezone(module)
+
+ # Check the current state
+ tz.check(phase='before')
+ if module.check_mode:
+ diff = tz.diff('before', 'planned')
+ # In check mode, 'planned' state is treated as 'after' state
+ diff['after'] = diff.pop('planned')
+ else:
+ # Make change
+ tz.change()
+ # Check the current state
+ tz.check(phase='after')
+ # Examine if the current state matches planned state
+ (after, planned) = tz.diff('after', 'planned').values()
+ if after != planned:
+ tz.abort('still not desired state, though changes have made - '
+ 'planned: %s, after: %s' % (str(planned), str(after)))
+ diff = tz.diff('before', 'after')
+
+ changed = (diff['before'] != diff['after'])
+ if len(tz.msg) > 0:
+ module.exit_json(changed=changed, diff=diff, msg='\n'.join(tz.msg))
+ else:
+ module.exit_json(changed=changed, diff=diff)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/x509_crl.py b/test/support/integration/plugins/modules/x509_crl.py
new file mode 100644
index 00000000..ef601eda
--- /dev/null
+++ b/test/support/integration/plugins/modules/x509_crl.py
@@ -0,0 +1,783 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: x509_crl
+version_added: "2.10"
+short_description: Generate Certificate Revocation Lists (CRLs)
+description:
+ - This module allows one to (re)generate or update Certificate Revocation Lists (CRLs).
+ - Certificates on the revocation list can be either specified via serial number and (optionally) their issuer,
+ or as a path to a certificate file in PEM format.
+requirements:
+ - cryptography >= 1.2
+author:
+ - Felix Fontein (@felixfontein)
+options:
+ state:
+ description:
+ - Whether the CRL file should exist or not, taking action if the state is different from what is stated.
+ type: str
+ default: present
+ choices: [ absent, present ]
+
+ mode:
+ description:
+ - Defines how to process entries of existing CRLs.
+ - If set to C(generate), makes sure that the CRL has the exact set of revoked certificates
+ as specified in I(revoked_certificates).
+ - If set to C(update), makes sure that the CRL contains the revoked certificates from
+ I(revoked_certificates), but can also contain other revoked certificates. If the CRL file
+ already exists, all entries from the existing CRL will also be included in the new CRL.
+ When using C(update), you might be interested in setting I(ignore_timestamps) to C(yes).
+ type: str
+ default: generate
+ choices: [ generate, update ]
+
+ force:
+ description:
+ - Should the CRL be forced to be regenerated.
+ type: bool
+ default: no
+
+ backup:
+ description:
+ - Create a backup file including a timestamp so you can get the original
+ CRL back if you overwrote it with a new one by accident.
+ type: bool
+ default: no
+
+ path:
+ description:
+ - Remote absolute path where the generated CRL file should be created or is already located.
+ type: path
+ required: yes
+
+ privatekey_path:
+ description:
+ - Path to the CA's private key to use when signing the CRL.
+ - Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both.
+ type: path
+
+ privatekey_content:
+ description:
+ - The content of the CA's private key to use when signing the CRL.
+ - Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both.
+ type: str
+
+ privatekey_passphrase:
+ description:
+ - The passphrase for the I(privatekey_path).
+ - This is required if the private key is password protected.
+ type: str
+
+ issuer:
+ description:
+ - Key/value pairs that will be present in the issuer name field of the CRL.
+ - If you need to specify more than one value with the same key, use a list as value.
+ - Required if I(state) is C(present).
+ type: dict
+
+ last_update:
+ description:
+ - The point in time from which this CRL can be trusted.
+ - Time can be specified either as relative time or as absolute timestamp.
+ - Time will always be interpreted as UTC.
+ - Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ + C([w | d | h | m | s]) (e.g. C(+32w1d2h).
+ - Note that if using relative time this module is NOT idempotent, except when
+ I(ignore_timestamps) is set to C(yes).
+ type: str
+ default: "+0s"
+
+ next_update:
+ description:
+ - "The absolute latest point in time by which this I(issuer) is expected to have issued
+ another CRL. Many clients will treat a CRL as expired once I(next_update) occurs."
+ - Time can be specified either as relative time or as absolute timestamp.
+ - Time will always be interpreted as UTC.
+ - Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ + C([w | d | h | m | s]) (e.g. C(+32w1d2h).
+ - Note that if using relative time this module is NOT idempotent, except when
+ I(ignore_timestamps) is set to C(yes).
+ - Required if I(state) is C(present).
+ type: str
+
+ digest:
+ description:
+ - Digest algorithm to be used when signing the CRL.
+ type: str
+ default: sha256
+
+ revoked_certificates:
+ description:
+ - List of certificates to be revoked.
+ - Required if I(state) is C(present).
+ type: list
+ elements: dict
+ suboptions:
+ path:
+ description:
+ - Path to a certificate in PEM format.
+ - The serial number and issuer will be extracted from the certificate.
+ - Mutually exclusive with I(content) and I(serial_number). One of these three options
+ must be specified.
+ type: path
+ content:
+ description:
+ - Content of a certificate in PEM format.
+ - The serial number and issuer will be extracted from the certificate.
+ - Mutually exclusive with I(path) and I(serial_number). One of these three options
+ must be specified.
+ type: str
+ serial_number:
+ description:
+ - Serial number of the certificate.
+ - Mutually exclusive with I(path) and I(content). One of these three options must
+ be specified.
+ type: int
+ revocation_date:
+ description:
+ - The point in time the certificate was revoked.
+ - Time can be specified either as relative time or as absolute timestamp.
+ - Time will always be interpreted as UTC.
+ - Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ + C([w | d | h | m | s]) (e.g. C(+32w1d2h).
+ - Note that if using relative time this module is NOT idempotent, except when
+ I(ignore_timestamps) is set to C(yes).
+ type: str
+ default: "+0s"
+ issuer:
+ description:
+ - The certificate's issuer.
+ - "Example: C(DNS:ca.example.org)"
+ type: list
+ elements: str
+ issuer_critical:
+ description:
+ - Whether the certificate issuer extension should be critical.
+ type: bool
+ default: no
+ reason:
+ description:
+ - The value for the revocation reason extension.
+ type: str
+ choices:
+ - unspecified
+ - key_compromise
+ - ca_compromise
+ - affiliation_changed
+ - superseded
+ - cessation_of_operation
+ - certificate_hold
+ - privilege_withdrawn
+ - aa_compromise
+ - remove_from_crl
+ reason_critical:
+ description:
+ - Whether the revocation reason extension should be critical.
+ type: bool
+ default: no
+ invalidity_date:
+ description:
+ - The point in time it was known/suspected that the private key was compromised
+ or that the certificate otherwise became invalid.
+ - Time can be specified either as relative time or as absolute timestamp.
+ - Time will always be interpreted as UTC.
+ - Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ + C([w | d | h | m | s]) (e.g. C(+32w1d2h).
+ - Note that if using relative time this module is NOT idempotent. This will NOT
+ change when I(ignore_timestamps) is set to C(yes).
+ type: str
+ invalidity_date_critical:
+ description:
+ - Whether the invalidity date extension should be critical.
+ type: bool
+ default: no
+
+ ignore_timestamps:
+ description:
+ - Whether the timestamps I(last_update), I(next_update) and I(revocation_date) (in
+ I(revoked_certificates)) should be ignored for idempotency checks. The timestamp
+ I(invalidity_date) in I(revoked_certificates) will never be ignored.
+ - Use this in combination with relative timestamps for these values to get idempotency.
+ type: bool
+ default: no
+
+ return_content:
+ description:
+ - If set to C(yes), will return the (current or generated) CRL's content as I(crl).
+ type: bool
+ default: no
+
+extends_documentation_fragment:
+ - files
+
+notes:
+ - All ASN.1 TIME values should be specified following the YYYYMMDDHHMMSSZ pattern.
+ - Date specified should be UTC. Minutes and seconds are mandatory.
+'''
+
+EXAMPLES = r'''
+- name: Generate a CRL
+ x509_crl:
+ path: /etc/ssl/my-ca.crl
+ privatekey_path: /etc/ssl/private/my-ca.pem
+ issuer:
+ CN: My CA
+ last_update: "+0s"
+ next_update: "+7d"
+ revoked_certificates:
+ - serial_number: 1234
+ revocation_date: 20190331202428Z
+ issuer:
+ CN: My CA
+ - serial_number: 2345
+ revocation_date: 20191013152910Z
+ reason: affiliation_changed
+ invalidity_date: 20191001000000Z
+ - path: /etc/ssl/crt/revoked-cert.pem
+ revocation_date: 20191010010203Z
+'''
+
+RETURN = r'''
+filename:
+ description: Path to the generated CRL
+ returned: changed or success
+ type: str
+ sample: /path/to/my-ca.crl
+backup_file:
+ description: Name of backup file created.
+ returned: changed and if I(backup) is C(yes)
+ type: str
+ sample: /path/to/my-ca.crl.2019-03-09@11:22~
+privatekey:
+ description: Path to the private CA key
+ returned: changed or success
+ type: str
+ sample: /path/to/my-ca.pem
+issuer:
+ description:
+ - The CRL's issuer.
+ - Note that for repeated values, only the last one will be returned.
+ returned: success
+ type: dict
+ sample: '{"organizationName": "Ansible", "commonName": "ca.example.com"}'
+issuer_ordered:
+ description: The CRL's issuer as an ordered list of tuples.
+ returned: success
+ type: list
+ elements: list
+ sample: '[["organizationName", "Ansible"], ["commonName": "ca.example.com"]]'
+last_update:
+ description: The point in time from which this CRL can be trusted as ASN.1 TIME.
+ returned: success
+ type: str
+ sample: 20190413202428Z
+next_update:
+ description: The point in time from which a new CRL will be issued and the client has to check for it as ASN.1 TIME.
+ returned: success
+ type: str
+ sample: 20190413202428Z
+digest:
+ description: The signature algorithm used to sign the CRL.
+ returned: success
+ type: str
+ sample: sha256WithRSAEncryption
+revoked_certificates:
+ description: List of certificates to be revoked.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ serial_number:
+ description: Serial number of the certificate.
+ type: int
+ sample: 1234
+ revocation_date:
+ description: The point in time the certificate was revoked as ASN.1 TIME.
+ type: str
+ sample: 20190413202428Z
+ issuer:
+ description: The certificate's issuer.
+ type: list
+ elements: str
+ sample: '["DNS:ca.example.org"]'
+ issuer_critical:
+ description: Whether the certificate issuer extension is critical.
+ type: bool
+ sample: no
+ reason:
+ description:
+ - The value for the revocation reason extension.
+ - One of C(unspecified), C(key_compromise), C(ca_compromise), C(affiliation_changed), C(superseded),
+ C(cessation_of_operation), C(certificate_hold), C(privilege_withdrawn), C(aa_compromise), and
+ C(remove_from_crl).
+ type: str
+ sample: key_compromise
+ reason_critical:
+ description: Whether the revocation reason extension is critical.
+ type: bool
+ sample: no
+ invalidity_date:
+ description: |
+ The point in time it was known/suspected that the private key was compromised
+ or that the certificate otherwise became invalid as ASN.1 TIME.
+ type: str
+ sample: 20190413202428Z
+ invalidity_date_critical:
+ description: Whether the invalidity date extension is critical.
+ type: bool
+ sample: no
+crl:
+ description: The (current or generated) CRL's content.
+ returned: if I(state) is C(present) and I(return_content) is C(yes)
+ type: str
+'''
+
+
+import os
+import traceback
+from distutils.version import LooseVersion
+
+from ansible.module_utils import crypto as crypto_utils
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+MINIMAL_CRYPTOGRAPHY_VERSION = '1.2'
+
+CRYPTOGRAPHY_IMP_ERR = None
+try:
+ import cryptography
+ from cryptography import x509
+ from cryptography.hazmat.backends import default_backend
+ from cryptography.hazmat.primitives.serialization import Encoding
+ from cryptography.x509 import (
+ CertificateRevocationListBuilder,
+ RevokedCertificateBuilder,
+ NameAttribute,
+ Name,
+ )
+ CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
+except ImportError:
+ CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
+ CRYPTOGRAPHY_FOUND = False
+else:
+ CRYPTOGRAPHY_FOUND = True
+
+
+TIMESTAMP_FORMAT = "%Y%m%d%H%M%SZ"
+
+
+class CRLError(crypto_utils.OpenSSLObjectError):
+ pass
+
+
+class CRL(crypto_utils.OpenSSLObject):
+
+ def __init__(self, module):
+ super(CRL, self).__init__(
+ module.params['path'],
+ module.params['state'],
+ module.params['force'],
+ module.check_mode
+ )
+
+ self.update = module.params['mode'] == 'update'
+ self.ignore_timestamps = module.params['ignore_timestamps']
+ self.return_content = module.params['return_content']
+ self.crl_content = None
+
+ self.privatekey_path = module.params['privatekey_path']
+ self.privatekey_content = module.params['privatekey_content']
+ if self.privatekey_content is not None:
+ self.privatekey_content = self.privatekey_content.encode('utf-8')
+ self.privatekey_passphrase = module.params['privatekey_passphrase']
+
+ self.issuer = crypto_utils.parse_name_field(module.params['issuer'])
+ self.issuer = [(entry[0], entry[1]) for entry in self.issuer if entry[1]]
+
+ self.last_update = crypto_utils.get_relative_time_option(module.params['last_update'], 'last_update')
+ self.next_update = crypto_utils.get_relative_time_option(module.params['next_update'], 'next_update')
+
+ self.digest = crypto_utils.select_message_digest(module.params['digest'])
+ if self.digest is None:
+ raise CRLError('The digest "{0}" is not supported'.format(module.params['digest']))
+
+ self.revoked_certificates = []
+ for i, rc in enumerate(module.params['revoked_certificates']):
+ result = {
+ 'serial_number': None,
+ 'revocation_date': None,
+ 'issuer': None,
+ 'issuer_critical': False,
+ 'reason': None,
+ 'reason_critical': False,
+ 'invalidity_date': None,
+ 'invalidity_date_critical': False,
+ }
+ path_prefix = 'revoked_certificates[{0}].'.format(i)
+ if rc['path'] is not None or rc['content'] is not None:
+ # Load certificate from file or content
+ try:
+ if rc['content'] is not None:
+ rc['content'] = rc['content'].encode('utf-8')
+ cert = crypto_utils.load_certificate(rc['path'], content=rc['content'], backend='cryptography')
+ try:
+ result['serial_number'] = cert.serial_number
+ except AttributeError:
+ # The property was called "serial" before cryptography 1.4
+ result['serial_number'] = cert.serial
+ except crypto_utils.OpenSSLObjectError as e:
+ if rc['content'] is not None:
+ module.fail_json(
+ msg='Cannot parse certificate from {0}content: {1}'.format(path_prefix, to_native(e))
+ )
+ else:
+ module.fail_json(
+ msg='Cannot read certificate "{1}" from {0}path: {2}'.format(path_prefix, rc['path'], to_native(e))
+ )
+ else:
+ # Specify serial_number (and potentially issuer) directly
+ result['serial_number'] = rc['serial_number']
+ # All other options
+ if rc['issuer']:
+ result['issuer'] = [crypto_utils.cryptography_get_name(issuer) for issuer in rc['issuer']]
+ result['issuer_critical'] = rc['issuer_critical']
+ result['revocation_date'] = crypto_utils.get_relative_time_option(
+ rc['revocation_date'],
+ path_prefix + 'revocation_date'
+ )
+ if rc['reason']:
+ result['reason'] = crypto_utils.REVOCATION_REASON_MAP[rc['reason']]
+ result['reason_critical'] = rc['reason_critical']
+ if rc['invalidity_date']:
+ result['invalidity_date'] = crypto_utils.get_relative_time_option(
+ rc['invalidity_date'],
+ path_prefix + 'invalidity_date'
+ )
+ result['invalidity_date_critical'] = rc['invalidity_date_critical']
+ self.revoked_certificates.append(result)
+
+ self.module = module
+
+ self.backup = module.params['backup']
+ self.backup_file = None
+
+ try:
+ self.privatekey = crypto_utils.load_privatekey(
+ path=self.privatekey_path,
+ content=self.privatekey_content,
+ passphrase=self.privatekey_passphrase,
+ backend='cryptography'
+ )
+ except crypto_utils.OpenSSLBadPassphraseError as exc:
+ raise CRLError(exc)
+
+ self.crl = None
+ try:
+ with open(self.path, 'rb') as f:
+ data = f.read()
+ self.crl = x509.load_pem_x509_crl(data, default_backend())
+ if self.return_content:
+ self.crl_content = data
+ except Exception as dummy:
+ self.crl_content = None
+
+ def remove(self):
+ if self.backup:
+ self.backup_file = self.module.backup_local(self.path)
+ super(CRL, self).remove(self.module)
+
+ def _compress_entry(self, entry):
+ if self.ignore_timestamps:
+ # Throw out revocation_date
+ return (
+ entry['serial_number'],
+ tuple(entry['issuer']) if entry['issuer'] is not None else None,
+ entry['issuer_critical'],
+ entry['reason'],
+ entry['reason_critical'],
+ entry['invalidity_date'],
+ entry['invalidity_date_critical'],
+ )
+ else:
+ return (
+ entry['serial_number'],
+ entry['revocation_date'],
+ tuple(entry['issuer']) if entry['issuer'] is not None else None,
+ entry['issuer_critical'],
+ entry['reason'],
+ entry['reason_critical'],
+ entry['invalidity_date'],
+ entry['invalidity_date_critical'],
+ )
+
+ def check(self, perms_required=True):
+ """Ensure the resource is in its desired state."""
+
+ state_and_perms = super(CRL, self).check(self.module, perms_required)
+
+ if not state_and_perms:
+ return False
+
+ if self.crl is None:
+ return False
+
+ if self.last_update != self.crl.last_update and not self.ignore_timestamps:
+ return False
+ if self.next_update != self.crl.next_update and not self.ignore_timestamps:
+ return False
+ if self.digest.name != self.crl.signature_hash_algorithm.name:
+ return False
+
+ want_issuer = [(crypto_utils.cryptography_name_to_oid(entry[0]), entry[1]) for entry in self.issuer]
+ if want_issuer != [(sub.oid, sub.value) for sub in self.crl.issuer]:
+ return False
+
+ old_entries = [self._compress_entry(crypto_utils.cryptography_decode_revoked_certificate(cert)) for cert in self.crl]
+ new_entries = [self._compress_entry(cert) for cert in self.revoked_certificates]
+ if self.update:
+ # We don't simply use a set so that duplicate entries are treated correctly
+ for entry in new_entries:
+ try:
+ old_entries.remove(entry)
+ except ValueError:
+ return False
+ else:
+ if old_entries != new_entries:
+ return False
+
+ return True
+
+ def _generate_crl(self):
+ backend = default_backend()
+ crl = CertificateRevocationListBuilder()
+
+ try:
+ crl = crl.issuer_name(Name([
+ NameAttribute(crypto_utils.cryptography_name_to_oid(entry[0]), to_text(entry[1]))
+ for entry in self.issuer
+ ]))
+ except ValueError as e:
+ raise CRLError(e)
+
+ crl = crl.last_update(self.last_update)
+ crl = crl.next_update(self.next_update)
+
+ if self.update and self.crl:
+ new_entries = set([self._compress_entry(entry) for entry in self.revoked_certificates])
+ for entry in self.crl:
+ decoded_entry = self._compress_entry(crypto_utils.cryptography_decode_revoked_certificate(entry))
+ if decoded_entry not in new_entries:
+ crl = crl.add_revoked_certificate(entry)
+ for entry in self.revoked_certificates:
+ revoked_cert = RevokedCertificateBuilder()
+ revoked_cert = revoked_cert.serial_number(entry['serial_number'])
+ revoked_cert = revoked_cert.revocation_date(entry['revocation_date'])
+ if entry['issuer'] is not None:
+ revoked_cert = revoked_cert.add_extension(
+ x509.CertificateIssuer([
+ crypto_utils.cryptography_get_name(name) for name in self.entry['issuer']
+ ]),
+ entry['issuer_critical']
+ )
+ if entry['reason'] is not None:
+ revoked_cert = revoked_cert.add_extension(
+ x509.CRLReason(entry['reason']),
+ entry['reason_critical']
+ )
+ if entry['invalidity_date'] is not None:
+ revoked_cert = revoked_cert.add_extension(
+ x509.InvalidityDate(entry['invalidity_date']),
+ entry['invalidity_date_critical']
+ )
+ crl = crl.add_revoked_certificate(revoked_cert.build(backend))
+
+ self.crl = crl.sign(self.privatekey, self.digest, backend=backend)
+ return self.crl.public_bytes(Encoding.PEM)
+
+ def generate(self):
+ if not self.check(perms_required=False) or self.force:
+ result = self._generate_crl()
+ if self.return_content:
+ self.crl_content = result
+ if self.backup:
+ self.backup_file = self.module.backup_local(self.path)
+ crypto_utils.write_file(self.module, result)
+ self.changed = True
+
+ file_args = self.module.load_file_common_arguments(self.module.params)
+ if self.module.set_fs_attributes_if_different(file_args, False):
+ self.changed = True
+
+ def _dump_revoked(self, entry):
+ return {
+ 'serial_number': entry['serial_number'],
+ 'revocation_date': entry['revocation_date'].strftime(TIMESTAMP_FORMAT),
+ 'issuer':
+ [crypto_utils.cryptography_decode_name(issuer) for issuer in entry['issuer']]
+ if entry['issuer'] is not None else None,
+ 'issuer_critical': entry['issuer_critical'],
+ 'reason': crypto_utils.REVOCATION_REASON_MAP_INVERSE.get(entry['reason']) if entry['reason'] is not None else None,
+ 'reason_critical': entry['reason_critical'],
+ 'invalidity_date':
+ entry['invalidity_date'].strftime(TIMESTAMP_FORMAT)
+ if entry['invalidity_date'] is not None else None,
+ 'invalidity_date_critical': entry['invalidity_date_critical'],
+ }
+
+ def dump(self, check_mode=False):
+ result = {
+ 'changed': self.changed,
+ 'filename': self.path,
+ 'privatekey': self.privatekey_path,
+ 'last_update': None,
+ 'next_update': None,
+ 'digest': None,
+ 'issuer_ordered': None,
+ 'issuer': None,
+ 'revoked_certificates': [],
+ }
+ if self.backup_file:
+ result['backup_file'] = self.backup_file
+
+ if check_mode:
+ result['last_update'] = self.last_update.strftime(TIMESTAMP_FORMAT)
+ result['next_update'] = self.next_update.strftime(TIMESTAMP_FORMAT)
+ # result['digest'] = crypto_utils.cryptography_oid_to_name(self.crl.signature_algorithm_oid)
+ result['digest'] = self.module.params['digest']
+ result['issuer_ordered'] = self.issuer
+ result['issuer'] = {}
+ for k, v in self.issuer:
+ result['issuer'][k] = v
+ result['revoked_certificates'] = []
+ for entry in self.revoked_certificates:
+ result['revoked_certificates'].append(self._dump_revoked(entry))
+ elif self.crl:
+ result['last_update'] = self.crl.last_update.strftime(TIMESTAMP_FORMAT)
+ result['next_update'] = self.crl.next_update.strftime(TIMESTAMP_FORMAT)
+ try:
+ result['digest'] = crypto_utils.cryptography_oid_to_name(self.crl.signature_algorithm_oid)
+ except AttributeError:
+ # Older cryptography versions don't have signature_algorithm_oid yet
+ dotted = crypto_utils._obj2txt(
+ self.crl._backend._lib,
+ self.crl._backend._ffi,
+ self.crl._x509_crl.sig_alg.algorithm
+ )
+ oid = x509.oid.ObjectIdentifier(dotted)
+ result['digest'] = crypto_utils.cryptography_oid_to_name(oid)
+ issuer = []
+ for attribute in self.crl.issuer:
+ issuer.append([crypto_utils.cryptography_oid_to_name(attribute.oid), attribute.value])
+ result['issuer_ordered'] = issuer
+ result['issuer'] = {}
+ for k, v in issuer:
+ result['issuer'][k] = v
+ result['revoked_certificates'] = []
+ for cert in self.crl:
+ entry = crypto_utils.cryptography_decode_revoked_certificate(cert)
+ result['revoked_certificates'].append(self._dump_revoked(entry))
+
+ if self.return_content:
+ result['crl'] = self.crl_content
+
+ return result
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ mode=dict(type='str', default='generate', choices=['generate', 'update']),
+ force=dict(type='bool', default=False),
+ backup=dict(type='bool', default=False),
+ path=dict(type='path', required=True),
+ privatekey_path=dict(type='path'),
+ privatekey_content=dict(type='str'),
+ privatekey_passphrase=dict(type='str', no_log=True),
+ issuer=dict(type='dict'),
+ last_update=dict(type='str', default='+0s'),
+ next_update=dict(type='str'),
+ digest=dict(type='str', default='sha256'),
+ ignore_timestamps=dict(type='bool', default=False),
+ return_content=dict(type='bool', default=False),
+ revoked_certificates=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ path=dict(type='path'),
+ content=dict(type='str'),
+ serial_number=dict(type='int'),
+ revocation_date=dict(type='str', default='+0s'),
+ issuer=dict(type='list', elements='str'),
+ issuer_critical=dict(type='bool', default=False),
+ reason=dict(
+ type='str',
+ choices=[
+ 'unspecified', 'key_compromise', 'ca_compromise', 'affiliation_changed',
+ 'superseded', 'cessation_of_operation', 'certificate_hold',
+ 'privilege_withdrawn', 'aa_compromise', 'remove_from_crl'
+ ]
+ ),
+ reason_critical=dict(type='bool', default=False),
+ invalidity_date=dict(type='str'),
+ invalidity_date_critical=dict(type='bool', default=False),
+ ),
+ required_one_of=[['path', 'content', 'serial_number']],
+ mutually_exclusive=[['path', 'content', 'serial_number']],
+ ),
+ ),
+ required_if=[
+ ('state', 'present', ['privatekey_path', 'privatekey_content'], True),
+ ('state', 'present', ['issuer', 'next_update', 'revoked_certificates'], False),
+ ],
+ mutually_exclusive=(
+ ['privatekey_path', 'privatekey_content'],
+ ),
+ supports_check_mode=True,
+ add_file_common_args=True,
+ )
+
+ if not CRYPTOGRAPHY_FOUND:
+ module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
+ exception=CRYPTOGRAPHY_IMP_ERR)
+
+ try:
+ crl = CRL(module)
+
+ if module.params['state'] == 'present':
+ if module.check_mode:
+ result = crl.dump(check_mode=True)
+ result['changed'] = module.params['force'] or not crl.check()
+ module.exit_json(**result)
+
+ crl.generate()
+ else:
+ if module.check_mode:
+ result = crl.dump(check_mode=True)
+ result['changed'] = os.path.exists(module.params['path'])
+ module.exit_json(**result)
+
+ crl.remove()
+
+ result = crl.dump()
+ module.exit_json(**result)
+ except crypto_utils.OpenSSLObjectError as exc:
+ module.fail_json(msg=to_native(exc))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/test/support/integration/plugins/modules/x509_crl_info.py b/test/support/integration/plugins/modules/x509_crl_info.py
new file mode 100644
index 00000000..b61db26f
--- /dev/null
+++ b/test/support/integration/plugins/modules/x509_crl_info.py
@@ -0,0 +1,281 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: x509_crl_info
+version_added: "2.10"
+short_description: Retrieve information on Certificate Revocation Lists (CRLs)
+description:
+ - This module allows one to retrieve information on Certificate Revocation Lists (CRLs).
+requirements:
+ - cryptography >= 1.2
+author:
+ - Felix Fontein (@felixfontein)
+options:
+ path:
+ description:
+ - Remote absolute path where the generated CRL file should be created or is already located.
+ - Either I(path) or I(content) must be specified, but not both.
+ type: path
+ content:
+ description:
+ - Content of the X.509 certificate in PEM format.
+ - Either I(path) or I(content) must be specified, but not both.
+ type: str
+
+notes:
+ - All timestamp values are provided in ASN.1 TIME format, i.e. following the C(YYYYMMDDHHMMSSZ) pattern.
+ They are all in UTC.
+seealso:
+ - module: x509_crl
+'''
+
+EXAMPLES = r'''
+- name: Get information on CRL
+ x509_crl_info:
+ path: /etc/ssl/my-ca.crl
+ register: result
+
+- debug:
+ msg: "{{ result }}"
+'''
+
+RETURN = r'''
+issuer:
+ description:
+ - The CRL's issuer.
+ - Note that for repeated values, only the last one will be returned.
+ returned: success
+ type: dict
+ sample: '{"organizationName": "Ansible", "commonName": "ca.example.com"}'
+issuer_ordered:
+ description: The CRL's issuer as an ordered list of tuples.
+ returned: success
+ type: list
+ elements: list
+ sample: '[["organizationName", "Ansible"], ["commonName": "ca.example.com"]]'
+last_update:
+ description: The point in time from which this CRL can be trusted as ASN.1 TIME.
+ returned: success
+ type: str
+ sample: 20190413202428Z
+next_update:
+ description: The point in time from which a new CRL will be issued and the client has to check for it as ASN.1 TIME.
+ returned: success
+ type: str
+ sample: 20190413202428Z
+digest:
+ description: The signature algorithm used to sign the CRL.
+ returned: success
+ type: str
+ sample: sha256WithRSAEncryption
+revoked_certificates:
+ description: List of certificates to be revoked.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ serial_number:
+ description: Serial number of the certificate.
+ type: int
+ sample: 1234
+ revocation_date:
+ description: The point in time the certificate was revoked as ASN.1 TIME.
+ type: str
+ sample: 20190413202428Z
+ issuer:
+ description: The certificate's issuer.
+ type: list
+ elements: str
+ sample: '["DNS:ca.example.org"]'
+ issuer_critical:
+ description: Whether the certificate issuer extension is critical.
+ type: bool
+ sample: no
+ reason:
+ description:
+ - The value for the revocation reason extension.
+ - One of C(unspecified), C(key_compromise), C(ca_compromise), C(affiliation_changed), C(superseded),
+ C(cessation_of_operation), C(certificate_hold), C(privilege_withdrawn), C(aa_compromise), and
+ C(remove_from_crl).
+ type: str
+ sample: key_compromise
+ reason_critical:
+ description: Whether the revocation reason extension is critical.
+ type: bool
+ sample: no
+ invalidity_date:
+ description: |
+ The point in time it was known/suspected that the private key was compromised
+ or that the certificate otherwise became invalid as ASN.1 TIME.
+ type: str
+ sample: 20190413202428Z
+ invalidity_date_critical:
+ description: Whether the invalidity date extension is critical.
+ type: bool
+ sample: no
+'''
+
+
+import traceback
+from distutils.version import LooseVersion
+
+from ansible.module_utils import crypto as crypto_utils
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+MINIMAL_CRYPTOGRAPHY_VERSION = '1.2'
+
+CRYPTOGRAPHY_IMP_ERR = None
+try:
+ import cryptography
+ from cryptography import x509
+ from cryptography.hazmat.backends import default_backend
+ CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
+except ImportError:
+ CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
+ CRYPTOGRAPHY_FOUND = False
+else:
+ CRYPTOGRAPHY_FOUND = True
+
+
+TIMESTAMP_FORMAT = "%Y%m%d%H%M%SZ"
+
+
+class CRLError(crypto_utils.OpenSSLObjectError):
+ pass
+
+
+class CRLInfo(crypto_utils.OpenSSLObject):
+ """The main module implementation."""
+
+ def __init__(self, module):
+ super(CRLInfo, self).__init__(
+ module.params['path'] or '',
+ 'present',
+ False,
+ module.check_mode
+ )
+
+ self.content = module.params['content']
+
+ self.module = module
+
+ self.crl = None
+ if self.content is None:
+ try:
+ with open(self.path, 'rb') as f:
+ data = f.read()
+ except Exception as e:
+ self.module.fail_json(msg='Error while reading CRL file from disk: {0}'.format(e))
+ else:
+ data = self.content.encode('utf-8')
+
+ try:
+ self.crl = x509.load_pem_x509_crl(data, default_backend())
+ except Exception as e:
+ self.module.fail_json(msg='Error while decoding CRL: {0}'.format(e))
+
+ def _dump_revoked(self, entry):
+ return {
+ 'serial_number': entry['serial_number'],
+ 'revocation_date': entry['revocation_date'].strftime(TIMESTAMP_FORMAT),
+ 'issuer':
+ [crypto_utils.cryptography_decode_name(issuer) for issuer in entry['issuer']]
+ if entry['issuer'] is not None else None,
+ 'issuer_critical': entry['issuer_critical'],
+ 'reason': crypto_utils.REVOCATION_REASON_MAP_INVERSE.get(entry['reason']) if entry['reason'] is not None else None,
+ 'reason_critical': entry['reason_critical'],
+ 'invalidity_date':
+ entry['invalidity_date'].strftime(TIMESTAMP_FORMAT)
+ if entry['invalidity_date'] is not None else None,
+ 'invalidity_date_critical': entry['invalidity_date_critical'],
+ }
+
+ def get_info(self):
+ result = {
+ 'changed': False,
+ 'last_update': None,
+ 'next_update': None,
+ 'digest': None,
+ 'issuer_ordered': None,
+ 'issuer': None,
+ 'revoked_certificates': [],
+ }
+
+ result['last_update'] = self.crl.last_update.strftime(TIMESTAMP_FORMAT)
+ result['next_update'] = self.crl.next_update.strftime(TIMESTAMP_FORMAT)
+ try:
+ result['digest'] = crypto_utils.cryptography_oid_to_name(self.crl.signature_algorithm_oid)
+ except AttributeError:
+ # Older cryptography versions don't have signature_algorithm_oid yet
+ dotted = crypto_utils._obj2txt(
+ self.crl._backend._lib,
+ self.crl._backend._ffi,
+ self.crl._x509_crl.sig_alg.algorithm
+ )
+ oid = x509.oid.ObjectIdentifier(dotted)
+ result['digest'] = crypto_utils.cryptography_oid_to_name(oid)
+ issuer = []
+ for attribute in self.crl.issuer:
+ issuer.append([crypto_utils.cryptography_oid_to_name(attribute.oid), attribute.value])
+ result['issuer_ordered'] = issuer
+ result['issuer'] = {}
+ for k, v in issuer:
+ result['issuer'][k] = v
+ result['revoked_certificates'] = []
+ for cert in self.crl:
+ entry = crypto_utils.cryptography_decode_revoked_certificate(cert)
+ result['revoked_certificates'].append(self._dump_revoked(entry))
+
+ return result
+
+ def generate(self):
+ # Empty method because crypto_utils.OpenSSLObject wants this
+ pass
+
+ def dump(self):
+ # Empty method because crypto_utils.OpenSSLObject wants this
+ pass
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path'),
+ content=dict(type='str'),
+ ),
+ required_one_of=(
+ ['path', 'content'],
+ ),
+ mutually_exclusive=(
+ ['path', 'content'],
+ ),
+ supports_check_mode=True,
+ )
+
+ if not CRYPTOGRAPHY_FOUND:
+ module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
+ exception=CRYPTOGRAPHY_IMP_ERR)
+
+ try:
+ crl = CRLInfo(module)
+ result = crl.get_info()
+ module.exit_json(**result)
+ except crypto_utils.OpenSSLObjectError as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/test/support/integration/plugins/modules/xml.py b/test/support/integration/plugins/modules/xml.py
new file mode 100644
index 00000000..b5b35a38
--- /dev/null
+++ b/test/support/integration/plugins/modules/xml.py
@@ -0,0 +1,966 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Red Hat, Inc.
+# Copyright: (c) 2014, Tim Bielawa <tbielawa@redhat.com>
+# Copyright: (c) 2014, Magnus Hedemark <mhedemar@redhat.com>
+# Copyright: (c) 2017, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: xml
+short_description: Manage bits and pieces of XML files or strings
+description:
+- A CRUD-like interface to managing bits of XML files.
+version_added: '2.4'
+options:
+ path:
+ description:
+ - Path to the file to operate on.
+ - This file must exist ahead of time.
+ - This parameter is required, unless C(xmlstring) is given.
+ type: path
+ required: yes
+ aliases: [ dest, file ]
+ xmlstring:
+ description:
+ - A string containing XML on which to operate.
+ - This parameter is required, unless C(path) is given.
+ type: str
+ required: yes
+ xpath:
+ description:
+ - A valid XPath expression describing the item(s) you want to manipulate.
+ - Operates on the document root, C(/), by default.
+ type: str
+ namespaces:
+ description:
+ - The namespace C(prefix:uri) mapping for the XPath expression.
+ - Needs to be a C(dict), not a C(list) of items.
+ type: dict
+ state:
+ description:
+ - Set or remove an xpath selection (node(s), attribute(s)).
+ type: str
+ choices: [ absent, present ]
+ default: present
+ aliases: [ ensure ]
+ attribute:
+ description:
+ - The attribute to select when using parameter C(value).
+ - This is a string, not prepended with C(@).
+ type: raw
+ value:
+ description:
+ - Desired state of the selected attribute.
+ - Either a string, or to unset a value, the Python C(None) keyword (YAML Equivalent, C(null)).
+ - Elements default to no value (but present).
+ - Attributes default to an empty string.
+ type: raw
+ add_children:
+ description:
+ - Add additional child-element(s) to a selected element for a given C(xpath).
+ - Child elements must be given in a list and each item may be either a string
+ (eg. C(children=ansible) to add an empty C(<ansible/>) child element),
+ or a hash where the key is an element name and the value is the element value.
+ - This parameter requires C(xpath) to be set.
+ type: list
+ set_children:
+ description:
+ - Set the child-element(s) of a selected element for a given C(xpath).
+ - Removes any existing children.
+ - Child elements must be specified as in C(add_children).
+ - This parameter requires C(xpath) to be set.
+ type: list
+ count:
+ description:
+ - Search for a given C(xpath) and provide the count of any matches.
+ - This parameter requires C(xpath) to be set.
+ type: bool
+ default: no
+ print_match:
+ description:
+ - Search for a given C(xpath) and print out any matches.
+ - This parameter requires C(xpath) to be set.
+ type: bool
+ default: no
+ pretty_print:
+ description:
+ - Pretty print XML output.
+ type: bool
+ default: no
+ content:
+ description:
+ - Search for a given C(xpath) and get content.
+ - This parameter requires C(xpath) to be set.
+ type: str
+ choices: [ attribute, text ]
+ input_type:
+ description:
+ - Type of input for C(add_children) and C(set_children).
+ type: str
+ choices: [ xml, yaml ]
+ default: yaml
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get
+ the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+ strip_cdata_tags:
+ description:
+ - Remove CDATA tags surrounding text values.
+ - Note that this might break your XML file if text values contain characters that could be interpreted as XML.
+ type: bool
+ default: no
+ version_added: '2.7'
+ insertbefore:
+ description:
+ - Add additional child-element(s) before the first selected element for a given C(xpath).
+ - Child elements must be given in a list and each item may be either a string
+ (eg. C(children=ansible) to add an empty C(<ansible/>) child element),
+ or a hash where the key is an element name and the value is the element value.
+ - This parameter requires C(xpath) to be set.
+ type: bool
+ default: no
+ version_added: '2.8'
+ insertafter:
+ description:
+ - Add additional child-element(s) after the last selected element for a given C(xpath).
+ - Child elements must be given in a list and each item may be either a string
+ (eg. C(children=ansible) to add an empty C(<ansible/>) child element),
+ or a hash where the key is an element name and the value is the element value.
+ - This parameter requires C(xpath) to be set.
+ type: bool
+ default: no
+ version_added: '2.8'
+requirements:
+- lxml >= 2.3.0
+notes:
+- Use the C(--check) and C(--diff) options when testing your expressions.
+- The diff output is automatically pretty-printed, so may not reflect the actual file content, only the file structure.
+- This module does not handle complicated xpath expressions, so limit xpath selectors to simple expressions.
+- Beware that in case your XML elements are namespaced, you need to use the C(namespaces) parameter, see the examples.
+- Namespaces prefix should be used for all children of an element where namespace is defined, unless another namespace is defined for them.
+seealso:
+- name: Xml module development community wiki
+ description: More information related to the development of this xml module.
+ link: https://github.com/ansible/community/wiki/Module:-xml
+- name: Introduction to XPath
+ description: A brief tutorial on XPath (w3schools.com).
+ link: https://www.w3schools.com/xml/xpath_intro.asp
+- name: XPath Reference document
+ description: The reference documentation on XSLT/XPath (developer.mozilla.org).
+ link: https://developer.mozilla.org/en-US/docs/Web/XPath
+author:
+- Tim Bielawa (@tbielawa)
+- Magnus Hedemark (@magnus919)
+- Dag Wieers (@dagwieers)
+'''
+
+EXAMPLES = r'''
+# Consider the following XML file:
+#
+# <business type="bar">
+# <name>Tasty Beverage Co.</name>
+# <beers>
+# <beer>Rochefort 10</beer>
+# <beer>St. Bernardus Abbot 12</beer>
+# <beer>Schlitz</beer>
+# </beers>
+# <rating subjective="true">10</rating>
+# <website>
+# <mobilefriendly/>
+# <address>http://tastybeverageco.com</address>
+# </website>
+# </business>
+
+- name: Remove the 'subjective' attribute of the 'rating' element
+ xml:
+ path: /foo/bar.xml
+ xpath: /business/rating/@subjective
+ state: absent
+
+- name: Set the rating to '11'
+ xml:
+ path: /foo/bar.xml
+ xpath: /business/rating
+ value: 11
+
+# Retrieve and display the number of nodes
+- name: Get count of 'beers' nodes
+ xml:
+ path: /foo/bar.xml
+ xpath: /business/beers/beer
+ count: yes
+ register: hits
+
+- debug:
+ var: hits.count
+
+# Example where parent XML nodes are created automatically
+- name: Add a 'phonenumber' element to the 'business' element
+ xml:
+ path: /foo/bar.xml
+ xpath: /business/phonenumber
+ value: 555-555-1234
+
+- name: Add several more beers to the 'beers' element
+ xml:
+ path: /foo/bar.xml
+ xpath: /business/beers
+ add_children:
+ - beer: Old Rasputin
+ - beer: Old Motor Oil
+ - beer: Old Curmudgeon
+
+- name: Add several more beers to the 'beers' element and add them before the 'Rochefort 10' element
+ xml:
+ path: /foo/bar.xml
+ xpath: '/business/beers/beer[text()="Rochefort 10"]'
+ insertbefore: yes
+ add_children:
+ - beer: Old Rasputin
+ - beer: Old Motor Oil
+ - beer: Old Curmudgeon
+
+# NOTE: The 'state' defaults to 'present' and 'value' defaults to 'null' for elements
+- name: Add a 'validxhtml' element to the 'website' element
+ xml:
+ path: /foo/bar.xml
+ xpath: /business/website/validxhtml
+
+- name: Add an empty 'validatedon' attribute to the 'validxhtml' element
+ xml:
+ path: /foo/bar.xml
+ xpath: /business/website/validxhtml/@validatedon
+
+- name: Add or modify an attribute, add element if needed
+ xml:
+ path: /foo/bar.xml
+ xpath: /business/website/validxhtml
+ attribute: validatedon
+ value: 1976-08-05
+
+# How to read an attribute value and access it in Ansible
+- name: Read an element's attribute values
+ xml:
+ path: /foo/bar.xml
+ xpath: /business/website/validxhtml
+ content: attribute
+ register: xmlresp
+
+- name: Show an attribute value
+ debug:
+ var: xmlresp.matches[0].validxhtml.validatedon
+
+- name: Remove all children from the 'website' element (option 1)
+ xml:
+ path: /foo/bar.xml
+ xpath: /business/website/*
+ state: absent
+
+- name: Remove all children from the 'website' element (option 2)
+ xml:
+ path: /foo/bar.xml
+ xpath: /business/website
+ children: []
+
+# In case of namespaces, like in below XML, they have to be explicitly stated.
+#
+# <foo xmlns="http://x.test" xmlns:attr="http://z.test">
+# <bar>
+# <baz xmlns="http://y.test" attr:my_namespaced_attribute="true" />
+# </bar>
+# </foo>
+
+# NOTE: There is the prefix 'x' in front of the 'bar' element, too.
+- name: Set namespaced '/x:foo/x:bar/y:baz/@z:my_namespaced_attribute' to 'false'
+ xml:
+ path: foo.xml
+ xpath: /x:foo/x:bar/y:baz
+ namespaces:
+ x: http://x.test
+ y: http://y.test
+ z: http://z.test
+ attribute: z:my_namespaced_attribute
+ value: 'false'
+'''
+
+RETURN = r'''
+actions:
+ description: A dictionary with the original xpath, namespaces and state.
+ type: dict
+ returned: success
+ sample: {xpath: xpath, namespaces: [namespace1, namespace2], state=present}
+backup_file:
+ description: The name of the backup file that was created
+ type: str
+ returned: when backup=yes
+ sample: /path/to/file.xml.1942.2017-08-24@14:16:01~
+count:
+ description: The count of xpath matches.
+ type: int
+ returned: when parameter 'count' is set
+ sample: 2
+matches:
+ description: The xpath matches found.
+ type: list
+ returned: when parameter 'print_match' is set
+msg:
+ description: A message related to the performed action(s).
+ type: str
+ returned: always
+xmlstring:
+ description: An XML string of the resulting output.
+ type: str
+ returned: when parameter 'xmlstring' is set
+'''
+
+import copy
+import json
+import os
+import re
+import traceback
+
+from distutils.version import LooseVersion
+from io import BytesIO
+
+LXML_IMP_ERR = None
+try:
+ from lxml import etree, objectify
+ HAS_LXML = True
+except ImportError:
+ LXML_IMP_ERR = traceback.format_exc()
+ HAS_LXML = False
+
+from ansible.module_utils.basic import AnsibleModule, json_dict_bytes_to_unicode, missing_required_lib
+from ansible.module_utils.six import iteritems, string_types
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.module_utils.common._collections_compat import MutableMapping
+
+_IDENT = r"[a-zA-Z-][a-zA-Z0-9_\-\.]*"
+_NSIDENT = _IDENT + "|" + _IDENT + ":" + _IDENT
+# Note: we can't reasonably support the 'if you need to put both ' and " in a string, concatenate
+# strings wrapped by the other delimiter' XPath trick, especially as simple XPath.
+_XPSTR = "('(?:.*)'|\"(?:.*)\")"
+
+_RE_SPLITSIMPLELAST = re.compile("^(.*)/(" + _NSIDENT + ")$")
+_RE_SPLITSIMPLELASTEQVALUE = re.compile("^(.*)/(" + _NSIDENT + ")/text\\(\\)=" + _XPSTR + "$")
+_RE_SPLITSIMPLEATTRLAST = re.compile("^(.*)/(@(?:" + _NSIDENT + "))$")
+_RE_SPLITSIMPLEATTRLASTEQVALUE = re.compile("^(.*)/(@(?:" + _NSIDENT + "))=" + _XPSTR + "$")
+_RE_SPLITSUBLAST = re.compile("^(.*)/(" + _NSIDENT + ")\\[(.*)\\]$")
+_RE_SPLITONLYEQVALUE = re.compile("^(.*)/text\\(\\)=" + _XPSTR + "$")
+
+
+def has_changed(doc):
+ orig_obj = etree.tostring(objectify.fromstring(etree.tostring(orig_doc)))
+ obj = etree.tostring(objectify.fromstring(etree.tostring(doc)))
+ return (orig_obj != obj)
+
+
+def do_print_match(module, tree, xpath, namespaces):
+ match = tree.xpath(xpath, namespaces=namespaces)
+ match_xpaths = []
+ for m in match:
+ match_xpaths.append(tree.getpath(m))
+ match_str = json.dumps(match_xpaths)
+ msg = "selector '%s' match: %s" % (xpath, match_str)
+ finish(module, tree, xpath, namespaces, changed=False, msg=msg)
+
+
+def count_nodes(module, tree, xpath, namespaces):
+ """ Return the count of nodes matching the xpath """
+ hits = tree.xpath("count(/%s)" % xpath, namespaces=namespaces)
+ msg = "found %d nodes" % hits
+ finish(module, tree, xpath, namespaces, changed=False, msg=msg, hitcount=int(hits))
+
+
+def is_node(tree, xpath, namespaces):
+ """ Test if a given xpath matches anything and if that match is a node.
+
+ For now we just assume you're only searching for one specific thing."""
+ if xpath_matches(tree, xpath, namespaces):
+ # OK, it found something
+ match = tree.xpath(xpath, namespaces=namespaces)
+ if isinstance(match[0], etree._Element):
+ return True
+
+ return False
+
+
+def is_attribute(tree, xpath, namespaces):
+ """ Test if a given xpath matches and that match is an attribute
+
+ An xpath attribute search will only match one item"""
+ if xpath_matches(tree, xpath, namespaces):
+ match = tree.xpath(xpath, namespaces=namespaces)
+ if isinstance(match[0], etree._ElementStringResult):
+ return True
+ elif isinstance(match[0], etree._ElementUnicodeResult):
+ return True
+ return False
+
+
+def xpath_matches(tree, xpath, namespaces):
+ """ Test if a node exists """
+ if tree.xpath(xpath, namespaces=namespaces):
+ return True
+ return False
+
+
+def delete_xpath_target(module, tree, xpath, namespaces):
+ """ Delete an attribute or element from a tree """
+ try:
+ for result in tree.xpath(xpath, namespaces=namespaces):
+ # Get the xpath for this result
+ if is_attribute(tree, xpath, namespaces):
+ # Delete an attribute
+ parent = result.getparent()
+ # Pop this attribute match out of the parent
+ # node's 'attrib' dict by using this match's
+ # 'attrname' attribute for the key
+ parent.attrib.pop(result.attrname)
+ elif is_node(tree, xpath, namespaces):
+ # Delete an element
+ result.getparent().remove(result)
+ else:
+ raise Exception("Impossible error")
+ except Exception as e:
+ module.fail_json(msg="Couldn't delete xpath target: %s (%s)" % (xpath, e))
+ else:
+ finish(module, tree, xpath, namespaces, changed=True)
+
+
+def replace_children_of(children, match):
+ for element in list(match):
+ match.remove(element)
+ match.extend(children)
+
+
+def set_target_children_inner(module, tree, xpath, namespaces, children, in_type):
+ matches = tree.xpath(xpath, namespaces=namespaces)
+
+ # Create a list of our new children
+ children = children_to_nodes(module, children, in_type)
+ children_as_string = [etree.tostring(c) for c in children]
+
+ changed = False
+
+ # xpaths always return matches as a list, so....
+ for match in matches:
+ # Check if elements differ
+ if len(list(match)) == len(children):
+ for idx, element in enumerate(list(match)):
+ if etree.tostring(element) != children_as_string[idx]:
+ replace_children_of(children, match)
+ changed = True
+ break
+ else:
+ replace_children_of(children, match)
+ changed = True
+
+ return changed
+
+
+def set_target_children(module, tree, xpath, namespaces, children, in_type):
+ changed = set_target_children_inner(module, tree, xpath, namespaces, children, in_type)
+ # Write it out
+ finish(module, tree, xpath, namespaces, changed=changed)
+
+
+def add_target_children(module, tree, xpath, namespaces, children, in_type, insertbefore, insertafter):
+ if is_node(tree, xpath, namespaces):
+ new_kids = children_to_nodes(module, children, in_type)
+ if insertbefore or insertafter:
+ insert_target_children(tree, xpath, namespaces, new_kids, insertbefore, insertafter)
+ else:
+ for node in tree.xpath(xpath, namespaces=namespaces):
+ node.extend(new_kids)
+ finish(module, tree, xpath, namespaces, changed=True)
+ else:
+ finish(module, tree, xpath, namespaces)
+
+
+def insert_target_children(tree, xpath, namespaces, children, insertbefore, insertafter):
+ """
+ Insert the given children before or after the given xpath. If insertbefore is True, it is inserted before the
+ first xpath hit, with insertafter, it is inserted after the last xpath hit.
+ """
+ insert_target = tree.xpath(xpath, namespaces=namespaces)
+ loc_index = 0 if insertbefore else -1
+ index_in_parent = insert_target[loc_index].getparent().index(insert_target[loc_index])
+ parent = insert_target[0].getparent()
+ if insertafter:
+ index_in_parent += 1
+ for child in children:
+ parent.insert(index_in_parent, child)
+ index_in_parent += 1
+
+
+def _extract_xpstr(g):
+ return g[1:-1]
+
+
+def split_xpath_last(xpath):
+ """split an XPath of the form /foo/bar/baz into /foo/bar and baz"""
+ xpath = xpath.strip()
+ m = _RE_SPLITSIMPLELAST.match(xpath)
+ if m:
+ # requesting an element to exist
+ return (m.group(1), [(m.group(2), None)])
+ m = _RE_SPLITSIMPLELASTEQVALUE.match(xpath)
+ if m:
+ # requesting an element to exist with an inner text
+ return (m.group(1), [(m.group(2), _extract_xpstr(m.group(3)))])
+
+ m = _RE_SPLITSIMPLEATTRLAST.match(xpath)
+ if m:
+ # requesting an attribute to exist
+ return (m.group(1), [(m.group(2), None)])
+ m = _RE_SPLITSIMPLEATTRLASTEQVALUE.match(xpath)
+ if m:
+ # requesting an attribute to exist with a value
+ return (m.group(1), [(m.group(2), _extract_xpstr(m.group(3)))])
+
+ m = _RE_SPLITSUBLAST.match(xpath)
+ if m:
+ content = [x.strip() for x in m.group(3).split(" and ")]
+ return (m.group(1), [('/' + m.group(2), content)])
+
+ m = _RE_SPLITONLYEQVALUE.match(xpath)
+ if m:
+ # requesting a change of inner text
+ return (m.group(1), [("", _extract_xpstr(m.group(2)))])
+ return (xpath, [])
+
+
+def nsnameToClark(name, namespaces):
+ if ":" in name:
+ (nsname, rawname) = name.split(":")
+ # return "{{%s}}%s" % (namespaces[nsname], rawname)
+ return "{{{0}}}{1}".format(namespaces[nsname], rawname)
+
+ # no namespace name here
+ return name
+
+
+def check_or_make_target(module, tree, xpath, namespaces):
+ (inner_xpath, changes) = split_xpath_last(xpath)
+ if (inner_xpath == xpath) or (changes is None):
+ module.fail_json(msg="Can't process Xpath %s in order to spawn nodes! tree is %s" %
+ (xpath, etree.tostring(tree, pretty_print=True)))
+ return False
+
+ changed = False
+
+ if not is_node(tree, inner_xpath, namespaces):
+ changed = check_or_make_target(module, tree, inner_xpath, namespaces)
+
+ # we test again after calling check_or_make_target
+ if is_node(tree, inner_xpath, namespaces) and changes:
+ for (eoa, eoa_value) in changes:
+ if eoa and eoa[0] != '@' and eoa[0] != '/':
+ # implicitly creating an element
+ new_kids = children_to_nodes(module, [nsnameToClark(eoa, namespaces)], "yaml")
+ if eoa_value:
+ for nk in new_kids:
+ nk.text = eoa_value
+
+ for node in tree.xpath(inner_xpath, namespaces=namespaces):
+ node.extend(new_kids)
+ changed = True
+ # module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True))
+ elif eoa and eoa[0] == '/':
+ element = eoa[1:]
+ new_kids = children_to_nodes(module, [nsnameToClark(element, namespaces)], "yaml")
+ for node in tree.xpath(inner_xpath, namespaces=namespaces):
+ node.extend(new_kids)
+ for nk in new_kids:
+ for subexpr in eoa_value:
+ # module.fail_json(msg="element=%s subexpr=%s node=%s now tree=%s" %
+ # (element, subexpr, etree.tostring(node, pretty_print=True), etree.tostring(tree, pretty_print=True))
+ check_or_make_target(module, nk, "./" + subexpr, namespaces)
+ changed = True
+
+ # module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True))
+ elif eoa == "":
+ for node in tree.xpath(inner_xpath, namespaces=namespaces):
+ if (node.text != eoa_value):
+ node.text = eoa_value
+ changed = True
+
+ elif eoa and eoa[0] == '@':
+ attribute = nsnameToClark(eoa[1:], namespaces)
+
+ for element in tree.xpath(inner_xpath, namespaces=namespaces):
+ changing = (attribute not in element.attrib or element.attrib[attribute] != eoa_value)
+
+ if changing:
+ changed = changed or changing
+ if eoa_value is None:
+ value = ""
+ else:
+ value = eoa_value
+ element.attrib[attribute] = value
+
+ # module.fail_json(msg="arf %s changing=%s as curval=%s changed tree=%s" %
+ # (xpath, changing, etree.tostring(tree, changing, element[attribute], pretty_print=True)))
+
+ else:
+ module.fail_json(msg="unknown tree transformation=%s" % etree.tostring(tree, pretty_print=True))
+
+ return changed
+
+
+def ensure_xpath_exists(module, tree, xpath, namespaces):
+ changed = False
+
+ if not is_node(tree, xpath, namespaces):
+ changed = check_or_make_target(module, tree, xpath, namespaces)
+
+ finish(module, tree, xpath, namespaces, changed)
+
+
+def set_target_inner(module, tree, xpath, namespaces, attribute, value):
+ changed = False
+
+ try:
+ if not is_node(tree, xpath, namespaces):
+ changed = check_or_make_target(module, tree, xpath, namespaces)
+ except Exception as e:
+ missing_namespace = ""
+ # NOTE: This checks only the namespaces defined in root element!
+ # TODO: Implement a more robust check to check for child namespaces' existence
+ if tree.getroot().nsmap and ":" not in xpath:
+ missing_namespace = "XML document has namespace(s) defined, but no namespace prefix(es) used in xpath!\n"
+ module.fail_json(msg="%sXpath %s causes a failure: %s\n -- tree is %s" %
+ (missing_namespace, xpath, e, etree.tostring(tree, pretty_print=True)), exception=traceback.format_exc())
+
+ if not is_node(tree, xpath, namespaces):
+ module.fail_json(msg="Xpath %s does not reference a node! tree is %s" %
+ (xpath, etree.tostring(tree, pretty_print=True)))
+
+ for element in tree.xpath(xpath, namespaces=namespaces):
+ if not attribute:
+ changed = changed or (element.text != value)
+ if element.text != value:
+ element.text = value
+ else:
+ changed = changed or (element.get(attribute) != value)
+ if ":" in attribute:
+ attr_ns, attr_name = attribute.split(":")
+ # attribute = "{{%s}}%s" % (namespaces[attr_ns], attr_name)
+ attribute = "{{{0}}}{1}".format(namespaces[attr_ns], attr_name)
+ if element.get(attribute) != value:
+ element.set(attribute, value)
+
+ return changed
+
+
+def set_target(module, tree, xpath, namespaces, attribute, value):
+ changed = set_target_inner(module, tree, xpath, namespaces, attribute, value)
+ finish(module, tree, xpath, namespaces, changed)
+
+
+def get_element_text(module, tree, xpath, namespaces):
+ if not is_node(tree, xpath, namespaces):
+ module.fail_json(msg="Xpath %s does not reference a node!" % xpath)
+
+ elements = []
+ for element in tree.xpath(xpath, namespaces=namespaces):
+ elements.append({element.tag: element.text})
+
+ finish(module, tree, xpath, namespaces, changed=False, msg=len(elements), hitcount=len(elements), matches=elements)
+
+
+def get_element_attr(module, tree, xpath, namespaces):
+ if not is_node(tree, xpath, namespaces):
+ module.fail_json(msg="Xpath %s does not reference a node!" % xpath)
+
+ elements = []
+ for element in tree.xpath(xpath, namespaces=namespaces):
+ child = {}
+ for key in element.keys():
+ value = element.get(key)
+ child.update({key: value})
+ elements.append({element.tag: child})
+
+ finish(module, tree, xpath, namespaces, changed=False, msg=len(elements), hitcount=len(elements), matches=elements)
+
+
+def child_to_element(module, child, in_type):
+ if in_type == 'xml':
+ infile = BytesIO(to_bytes(child, errors='surrogate_or_strict'))
+
+ try:
+ parser = etree.XMLParser()
+ node = etree.parse(infile, parser)
+ return node.getroot()
+ except etree.XMLSyntaxError as e:
+ module.fail_json(msg="Error while parsing child element: %s" % e)
+ elif in_type == 'yaml':
+ if isinstance(child, string_types):
+ return etree.Element(child)
+ elif isinstance(child, MutableMapping):
+ if len(child) > 1:
+ module.fail_json(msg="Can only create children from hashes with one key")
+
+ (key, value) = next(iteritems(child))
+ if isinstance(value, MutableMapping):
+ children = value.pop('_', None)
+
+ node = etree.Element(key, value)
+
+ if children is not None:
+ if not isinstance(children, list):
+ module.fail_json(msg="Invalid children type: %s, must be list." % type(children))
+
+ subnodes = children_to_nodes(module, children)
+ node.extend(subnodes)
+ else:
+ node = etree.Element(key)
+ node.text = value
+ return node
+ else:
+ module.fail_json(msg="Invalid child type: %s. Children must be either strings or hashes." % type(child))
+ else:
+ module.fail_json(msg="Invalid child input type: %s. Type must be either xml or yaml." % in_type)
+
+
+def children_to_nodes(module=None, children=None, type='yaml'):
+ """turn a str/hash/list of str&hash into a list of elements"""
+ children = [] if children is None else children
+
+ return [child_to_element(module, child, type) for child in children]
+
+
+def make_pretty(module, tree):
+ xml_string = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
+
+ result = dict(
+ changed=False,
+ )
+
+ if module.params['path']:
+ xml_file = module.params['path']
+ with open(xml_file, 'rb') as xml_content:
+ if xml_string != xml_content.read():
+ result['changed'] = True
+ if not module.check_mode:
+ if module.params['backup']:
+ result['backup_file'] = module.backup_local(module.params['path'])
+ tree.write(xml_file, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
+
+ elif module.params['xmlstring']:
+ result['xmlstring'] = xml_string
+ # NOTE: Modifying a string is not considered a change !
+ if xml_string != module.params['xmlstring']:
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+def finish(module, tree, xpath, namespaces, changed=False, msg='', hitcount=0, matches=tuple()):
+
+ result = dict(
+ actions=dict(
+ xpath=xpath,
+ namespaces=namespaces,
+ state=module.params['state']
+ ),
+ changed=has_changed(tree),
+ )
+
+ if module.params['count'] or hitcount:
+ result['count'] = hitcount
+
+ if module.params['print_match'] or matches:
+ result['matches'] = matches
+
+ if msg:
+ result['msg'] = msg
+
+ if result['changed']:
+ if module._diff:
+ result['diff'] = dict(
+ before=etree.tostring(orig_doc, xml_declaration=True, encoding='UTF-8', pretty_print=True),
+ after=etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=True),
+ )
+
+ if module.params['path'] and not module.check_mode:
+ if module.params['backup']:
+ result['backup_file'] = module.backup_local(module.params['path'])
+ tree.write(module.params['path'], xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
+
+ if module.params['xmlstring']:
+ result['xmlstring'] = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
+
+ module.exit_json(**result)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', aliases=['dest', 'file']),
+ xmlstring=dict(type='str'),
+ xpath=dict(type='str'),
+ namespaces=dict(type='dict', default={}),
+ state=dict(type='str', default='present', choices=['absent', 'present'], aliases=['ensure']),
+ value=dict(type='raw'),
+ attribute=dict(type='raw'),
+ add_children=dict(type='list'),
+ set_children=dict(type='list'),
+ count=dict(type='bool', default=False),
+ print_match=dict(type='bool', default=False),
+ pretty_print=dict(type='bool', default=False),
+ content=dict(type='str', choices=['attribute', 'text']),
+ input_type=dict(type='str', default='yaml', choices=['xml', 'yaml']),
+ backup=dict(type='bool', default=False),
+ strip_cdata_tags=dict(type='bool', default=False),
+ insertbefore=dict(type='bool', default=False),
+ insertafter=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ required_by=dict(
+ add_children=['xpath'],
+ # TODO: Reinstate this in Ansible v2.12 when we have deprecated the incorrect use below
+ # attribute=['value'],
+ content=['xpath'],
+ set_children=['xpath'],
+ value=['xpath'],
+ ),
+ required_if=[
+ ['count', True, ['xpath']],
+ ['print_match', True, ['xpath']],
+ ['insertbefore', True, ['xpath']],
+ ['insertafter', True, ['xpath']],
+ ],
+ required_one_of=[
+ ['path', 'xmlstring'],
+ ['add_children', 'content', 'count', 'pretty_print', 'print_match', 'set_children', 'value'],
+ ],
+ mutually_exclusive=[
+ ['add_children', 'content', 'count', 'print_match', 'set_children', 'value'],
+ ['path', 'xmlstring'],
+ ['insertbefore', 'insertafter'],
+ ],
+ )
+
+ xml_file = module.params['path']
+ xml_string = module.params['xmlstring']
+ xpath = module.params['xpath']
+ namespaces = module.params['namespaces']
+ state = module.params['state']
+ value = json_dict_bytes_to_unicode(module.params['value'])
+ attribute = module.params['attribute']
+ set_children = json_dict_bytes_to_unicode(module.params['set_children'])
+ add_children = json_dict_bytes_to_unicode(module.params['add_children'])
+ pretty_print = module.params['pretty_print']
+ content = module.params['content']
+ input_type = module.params['input_type']
+ print_match = module.params['print_match']
+ count = module.params['count']
+ backup = module.params['backup']
+ strip_cdata_tags = module.params['strip_cdata_tags']
+ insertbefore = module.params['insertbefore']
+ insertafter = module.params['insertafter']
+
+ # Check if we have lxml 2.3.0 or newer installed
+ if not HAS_LXML:
+ module.fail_json(msg=missing_required_lib("lxml"), exception=LXML_IMP_ERR)
+ elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('2.3.0'):
+ module.fail_json(msg='The xml ansible module requires lxml 2.3.0 or newer installed on the managed machine')
+ elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('3.0.0'):
+ module.warn('Using lxml version lower than 3.0.0 does not guarantee predictable element attribute order.')
+
+ # Report wrongly used attribute parameter when using content=attribute
+ # TODO: Remove this in Ansible v2.12 (and reinstate strict parameter test above) and remove the integration test example
+ if content == 'attribute' and attribute is not None:
+ module.deprecate("Parameter 'attribute=%s' is ignored when using 'content=attribute' only 'xpath' is used. Please remove entry." % attribute,
+ '2.12', collection_name='ansible.builtin')
+
+ # Check if the file exists
+ if xml_string:
+ infile = BytesIO(to_bytes(xml_string, errors='surrogate_or_strict'))
+ elif os.path.isfile(xml_file):
+ infile = open(xml_file, 'rb')
+ else:
+ module.fail_json(msg="The target XML source '%s' does not exist." % xml_file)
+
+ # Parse and evaluate xpath expression
+ if xpath is not None:
+ try:
+ etree.XPath(xpath)
+ except etree.XPathSyntaxError as e:
+ module.fail_json(msg="Syntax error in xpath expression: %s (%s)" % (xpath, e))
+ except etree.XPathEvalError as e:
+ module.fail_json(msg="Evaluation error in xpath expression: %s (%s)" % (xpath, e))
+
+ # Try to parse in the target XML file
+ try:
+ parser = etree.XMLParser(remove_blank_text=pretty_print, strip_cdata=strip_cdata_tags)
+ doc = etree.parse(infile, parser)
+ except etree.XMLSyntaxError as e:
+ module.fail_json(msg="Error while parsing document: %s (%s)" % (xml_file or 'xml_string', e))
+
+ # Ensure we have the original copy to compare
+ global orig_doc
+ orig_doc = copy.deepcopy(doc)
+
+ if print_match:
+ do_print_match(module, doc, xpath, namespaces)
+
+ if count:
+ count_nodes(module, doc, xpath, namespaces)
+
+ if content == 'attribute':
+ get_element_attr(module, doc, xpath, namespaces)
+ elif content == 'text':
+ get_element_text(module, doc, xpath, namespaces)
+
+ # File exists:
+ if state == 'absent':
+ # - absent: delete xpath target
+ delete_xpath_target(module, doc, xpath, namespaces)
+
+ # - present: carry on
+
+ # children && value both set?: should have already aborted by now
+ # add_children && set_children both set?: should have already aborted by now
+
+ # set_children set?
+ if set_children:
+ set_target_children(module, doc, xpath, namespaces, set_children, input_type)
+
+ # add_children set?
+ if add_children:
+ add_target_children(module, doc, xpath, namespaces, add_children, input_type, insertbefore, insertafter)
+
+ # No?: Carry on
+
+ # Is the xpath target an attribute selector?
+ if value is not None:
+ set_target(module, doc, xpath, namespaces, attribute, value)
+
+ # If an xpath was provided, we need to do something with the data
+ if xpath is not None:
+ ensure_xpath_exists(module, doc, xpath, namespaces)
+
+ # Otherwise only reformat the xml data?
+ if pretty_print:
+ make_pretty(module, doc)
+
+ module.fail_json(msg="Don't know what to do")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/zypper.py b/test/support/integration/plugins/modules/zypper.py
new file mode 100644
index 00000000..bfb31819
--- /dev/null
+++ b/test/support/integration/plugins/modules/zypper.py
@@ -0,0 +1,540 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Patrick Callahan <pmc@patrickcallahan.com>
+# based on
+# openbsd_pkg
+# (c) 2013
+# Patrik Lundin <patrik.lundin.swe@gmail.com>
+#
+# yum
+# (c) 2012, Red Hat, Inc
+# Written by Seth Vidal <skvidal at fedoraproject.org>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: zypper
+author:
+ - "Patrick Callahan (@dirtyharrycallahan)"
+ - "Alexander Gubin (@alxgu)"
+ - "Thomas O'Donnell (@andytom)"
+ - "Robin Roth (@robinro)"
+ - "Andrii Radyk (@AnderEnder)"
+version_added: "1.2"
+short_description: Manage packages on SUSE and openSUSE
+description:
+ - Manage packages on SUSE and openSUSE using the zypper and rpm tools.
+options:
+ name:
+ description:
+ - Package name C(name) or package specifier or a list of either.
+ - Can include a version like C(name=1.0), C(name>3.4) or C(name<=2.7). If a version is given, C(oldpackage) is implied and zypper is allowed to
+ update the package within the version range given.
+ - You can also pass a url or a local path to a rpm file.
+ - When using state=latest, this can be '*', which updates all installed packages.
+ required: true
+ aliases: [ 'pkg' ]
+ state:
+ description:
+ - C(present) will make sure the package is installed.
+ C(latest) will make sure the latest version of the package is installed.
+ C(absent) will make sure the specified package is not installed.
+ C(dist-upgrade) will make sure the latest version of all installed packages from all enabled repositories is installed.
+ - When using C(dist-upgrade), I(name) should be C('*').
+ required: false
+ choices: [ present, latest, absent, dist-upgrade ]
+ default: "present"
+ type:
+ description:
+ - The type of package to be operated on.
+ required: false
+ choices: [ package, patch, pattern, product, srcpackage, application ]
+ default: "package"
+ version_added: "2.0"
+ extra_args_precommand:
+ version_added: "2.6"
+ required: false
+ description:
+ - Add additional global target options to C(zypper).
+ - Options should be supplied in a single line as if given in the command line.
+ disable_gpg_check:
+ description:
+ - Whether to disable to GPG signature checking of the package
+ signature being installed. Has an effect only if state is
+ I(present) or I(latest).
+ required: false
+ default: "no"
+ type: bool
+ disable_recommends:
+ version_added: "1.8"
+ description:
+ - Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (C(yes)) modifies zypper's default behavior; C(no) does
+ install recommended packages.
+ required: false
+ default: "yes"
+ type: bool
+ force:
+ version_added: "2.2"
+ description:
+ - Adds C(--force) option to I(zypper). Allows to downgrade packages and change vendor or architecture.
+ required: false
+ default: "no"
+ type: bool
+ force_resolution:
+ version_added: "2.10"
+ description:
+ - Adds C(--force-resolution) option to I(zypper). Allows to (un)install packages with conflicting requirements (resolver will choose a solution).
+ required: false
+ default: "no"
+ type: bool
+ update_cache:
+ version_added: "2.2"
+ description:
+ - Run the equivalent of C(zypper refresh) before the operation. Disabled in check mode.
+ required: false
+ default: "no"
+ type: bool
+ aliases: [ "refresh" ]
+ oldpackage:
+ version_added: "2.2"
+ description:
+ - Adds C(--oldpackage) option to I(zypper). Allows to downgrade packages with less side-effects than force. This is implied as soon as a
+ version is specified as part of the package name.
+ required: false
+ default: "no"
+ type: bool
+ extra_args:
+ version_added: "2.4"
+ required: false
+ description:
+ - Add additional options to C(zypper) command.
+ - Options should be supplied in a single line as if given in the command line.
+notes:
+ - When used with a `loop:` each package will be processed individually,
+ it is much more efficient to pass the list directly to the `name` option.
+# informational: requirements for nodes
+requirements:
+ - "zypper >= 1.0 # included in openSUSE >= 11.1 or SUSE Linux Enterprise Server/Desktop >= 11.0"
+ - python-xml
+ - rpm
+'''
+
+EXAMPLES = '''
+# Install "nmap"
+- zypper:
+ name: nmap
+ state: present
+
+# Install apache2 with recommended packages
+- zypper:
+ name: apache2
+ state: present
+ disable_recommends: no
+
+# Apply a given patch
+- zypper:
+ name: openSUSE-2016-128
+ state: present
+ type: patch
+
+# Remove the "nmap" package
+- zypper:
+ name: nmap
+ state: absent
+
+# Install the nginx rpm from a remote repo
+- zypper:
+ name: 'http://nginx.org/packages/sles/12/x86_64/RPMS/nginx-1.8.0-1.sles12.ngx.x86_64.rpm'
+ state: present
+
+# Install local rpm file
+- zypper:
+ name: /tmp/fancy-software.rpm
+ state: present
+
+# Update all packages
+- zypper:
+ name: '*'
+ state: latest
+
+# Apply all available patches
+- zypper:
+ name: '*'
+ state: latest
+ type: patch
+
+# Perform a dist-upgrade with additional arguments
+- zypper:
+ name: '*'
+ state: dist-upgrade
+ extra_args: '--no-allow-vendor-change --allow-arch-change'
+
+# Refresh repositories and update package "openssl"
+- zypper:
+ name: openssl
+ state: present
+ update_cache: yes
+
+# Install specific version (possible comparisons: <, >, <=, >=, =)
+- zypper:
+ name: 'docker>=1.10'
+ state: present
+
+# Wait 20 seconds to acquire the lock before failing
+- zypper:
+ name: mosh
+ state: present
+ environment:
+ ZYPP_LOCK_TIMEOUT: 20
+'''
+
+import xml
+import re
+from xml.dom.minidom import parseString as parseXML
+from ansible.module_utils.six import iteritems
+from ansible.module_utils._text import to_native
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Package:
+ def __init__(self, name, prefix, version):
+ self.name = name
+ self.prefix = prefix
+ self.version = version
+ self.shouldinstall = (prefix == '+')
+
+ def __str__(self):
+ return self.prefix + self.name + self.version
+
+
+def split_name_version(name):
+ """splits of the package name and desired version
+
+ example formats:
+ - docker>=1.10
+ - apache=2.4
+
+ Allowed version specifiers: <, >, <=, >=, =
+ Allowed version format: [0-9.-]*
+
+ Also allows a prefix indicating remove "-", "~" or install "+"
+ """
+
+ prefix = ''
+ if name[0] in ['-', '~', '+']:
+ prefix = name[0]
+ name = name[1:]
+ if prefix == '~':
+ prefix = '-'
+
+ version_check = re.compile('^(.*?)((?:<|>|<=|>=|=)[0-9.-]*)?$')
+ try:
+ reres = version_check.match(name)
+ name, version = reres.groups()
+ if version is None:
+ version = ''
+ return prefix, name, version
+ except Exception:
+ return prefix, name, ''
+
+
+def get_want_state(names, remove=False):
+ packages = []
+ urls = []
+ for name in names:
+ if '://' in name or name.endswith('.rpm'):
+ urls.append(name)
+ else:
+ prefix, pname, version = split_name_version(name)
+ if prefix not in ['-', '+']:
+ if remove:
+ prefix = '-'
+ else:
+ prefix = '+'
+ packages.append(Package(pname, prefix, version))
+ return packages, urls
+
+
+def get_installed_state(m, packages):
+ "get installed state of packages"
+
+ cmd = get_cmd(m, 'search')
+ cmd.extend(['--match-exact', '--details', '--installed-only'])
+ cmd.extend([p.name for p in packages])
+ return parse_zypper_xml(m, cmd, fail_not_found=False)[0]
+
+
+def parse_zypper_xml(m, cmd, fail_not_found=True, packages=None):
+ rc, stdout, stderr = m.run_command(cmd, check_rc=False)
+
+ try:
+ dom = parseXML(stdout)
+ except xml.parsers.expat.ExpatError as exc:
+ m.fail_json(msg="Failed to parse zypper xml output: %s" % to_native(exc),
+ rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
+
+ if rc == 104:
+ # exit code 104 is ZYPPER_EXIT_INF_CAP_NOT_FOUND (no packages found)
+ if fail_not_found:
+ errmsg = dom.getElementsByTagName('message')[-1].childNodes[0].data
+ m.fail_json(msg=errmsg, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
+ else:
+ return {}, rc, stdout, stderr
+ elif rc in [0, 106, 103]:
+ # zypper exit codes
+ # 0: success
+ # 106: signature verification failed
+ # 103: zypper was upgraded, run same command again
+ if packages is None:
+ firstrun = True
+ packages = {}
+ solvable_list = dom.getElementsByTagName('solvable')
+ for solvable in solvable_list:
+ name = solvable.getAttribute('name')
+ packages[name] = {}
+ packages[name]['version'] = solvable.getAttribute('edition')
+ packages[name]['oldversion'] = solvable.getAttribute('edition-old')
+ status = solvable.getAttribute('status')
+ packages[name]['installed'] = status == "installed"
+ packages[name]['group'] = solvable.parentNode.nodeName
+ if rc == 103 and firstrun:
+ # if this was the first run and it failed with 103
+ # run zypper again with the same command to complete update
+ return parse_zypper_xml(m, cmd, fail_not_found=fail_not_found, packages=packages)
+
+ return packages, rc, stdout, stderr
+ m.fail_json(msg='Zypper run command failed with return code %s.' % rc, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
+
+
+def get_cmd(m, subcommand):
+ "puts together the basic zypper command arguments with those passed to the module"
+ is_install = subcommand in ['install', 'update', 'patch', 'dist-upgrade']
+ is_refresh = subcommand == 'refresh'
+ cmd = ['/usr/bin/zypper', '--quiet', '--non-interactive', '--xmlout']
+ if m.params['extra_args_precommand']:
+ args_list = m.params['extra_args_precommand'].split()
+ cmd.extend(args_list)
+ # add global options before zypper command
+ if (is_install or is_refresh) and m.params['disable_gpg_check']:
+ cmd.append('--no-gpg-checks')
+
+ if subcommand == 'search':
+ cmd.append('--disable-repositories')
+
+ cmd.append(subcommand)
+ if subcommand not in ['patch', 'dist-upgrade'] and not is_refresh:
+ cmd.extend(['--type', m.params['type']])
+ if m.check_mode and subcommand != 'search':
+ cmd.append('--dry-run')
+ if is_install:
+ cmd.append('--auto-agree-with-licenses')
+ if m.params['disable_recommends']:
+ cmd.append('--no-recommends')
+ if m.params['force']:
+ cmd.append('--force')
+ if m.params['force_resolution']:
+ cmd.append('--force-resolution')
+ if m.params['oldpackage']:
+ cmd.append('--oldpackage')
+ if m.params['extra_args']:
+ args_list = m.params['extra_args'].split(' ')
+ cmd.extend(args_list)
+
+ return cmd
+
+
+def set_diff(m, retvals, result):
+ # TODO: if there is only one package, set before/after to version numbers
+ packages = {'installed': [], 'removed': [], 'upgraded': []}
+ if result:
+ for p in result:
+ group = result[p]['group']
+ if group == 'to-upgrade':
+ versions = ' (' + result[p]['oldversion'] + ' => ' + result[p]['version'] + ')'
+ packages['upgraded'].append(p + versions)
+ elif group == 'to-install':
+ packages['installed'].append(p)
+ elif group == 'to-remove':
+ packages['removed'].append(p)
+
+ output = ''
+ for state in packages:
+ if packages[state]:
+ output += state + ': ' + ', '.join(packages[state]) + '\n'
+ if 'diff' not in retvals:
+ retvals['diff'] = {}
+ if 'prepared' not in retvals['diff']:
+ retvals['diff']['prepared'] = output
+ else:
+ retvals['diff']['prepared'] += '\n' + output
+
+
+def package_present(m, name, want_latest):
+ "install and update (if want_latest) the packages in name_install, while removing the packages in name_remove"
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+ packages, urls = get_want_state(name)
+
+ # add oldpackage flag when a version is given to allow downgrades
+ if any(p.version for p in packages):
+ m.params['oldpackage'] = True
+
+ if not want_latest:
+ # for state=present: filter out already installed packages
+ # if a version is given leave the package in to let zypper handle the version
+ # resolution
+ packageswithoutversion = [p for p in packages if not p.version]
+ prerun_state = get_installed_state(m, packageswithoutversion)
+ # generate lists of packages to install or remove
+ packages = [p for p in packages if p.shouldinstall != (p.name in prerun_state)]
+
+ if not packages and not urls:
+ # nothing to install/remove and nothing to update
+ return None, retvals
+
+ # zypper install also updates packages
+ cmd = get_cmd(m, 'install')
+ cmd.append('--')
+ cmd.extend(urls)
+ # pass packages to zypper
+ # allow for + or - prefixes in install/remove lists
+ # also add version specifier if given
+ # do this in one zypper run to allow for dependency-resolution
+ # for example "-exim postfix" runs without removing packages depending on mailserver
+ cmd.extend([str(p) for p in packages])
+
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+
+ return result, retvals
+
+
+def package_update_all(m):
+ "run update or patch on all available packages"
+
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+ if m.params['type'] == 'patch':
+ cmdname = 'patch'
+ elif m.params['state'] == 'dist-upgrade':
+ cmdname = 'dist-upgrade'
+ else:
+ cmdname = 'update'
+
+ cmd = get_cmd(m, cmdname)
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+ return result, retvals
+
+
+def package_absent(m, name):
+ "remove the packages in name"
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+ # Get package state
+ packages, urls = get_want_state(name, remove=True)
+ if any(p.prefix == '+' for p in packages):
+ m.fail_json(msg="Can not combine '+' prefix with state=remove/absent.")
+ if urls:
+ m.fail_json(msg="Can not remove via URL.")
+ if m.params['type'] == 'patch':
+ m.fail_json(msg="Can not remove patches.")
+ prerun_state = get_installed_state(m, packages)
+ packages = [p for p in packages if p.name in prerun_state]
+
+ if not packages:
+ return None, retvals
+
+ cmd = get_cmd(m, 'remove')
+ cmd.extend([p.name + p.version for p in packages])
+
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+ return result, retvals
+
+
+def repo_refresh(m):
+ "update the repositories"
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+
+ cmd = get_cmd(m, 'refresh')
+
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+
+ return retvals
+
+# ===========================================
+# Main control flow
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, aliases=['pkg'], type='list'),
+ state=dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed', 'dist-upgrade']),
+ type=dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage', 'application']),
+ extra_args_precommand=dict(required=False, default=None),
+ disable_gpg_check=dict(required=False, default='no', type='bool'),
+ disable_recommends=dict(required=False, default='yes', type='bool'),
+ force=dict(required=False, default='no', type='bool'),
+ force_resolution=dict(required=False, default='no', type='bool'),
+ update_cache=dict(required=False, aliases=['refresh'], default='no', type='bool'),
+ oldpackage=dict(required=False, default='no', type='bool'),
+ extra_args=dict(required=False, default=None),
+ ),
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ state = module.params['state']
+ update_cache = module.params['update_cache']
+
+ # remove empty strings from package list
+ name = list(filter(None, name))
+
+ # Refresh repositories
+ if update_cache and not module.check_mode:
+ retvals = repo_refresh(module)
+
+ if retvals['rc'] != 0:
+ module.fail_json(msg="Zypper refresh run failed.", **retvals)
+
+ # Perform requested action
+ if name == ['*'] and state in ['latest', 'dist-upgrade']:
+ packages_changed, retvals = package_update_all(module)
+ elif name != ['*'] and state == 'dist-upgrade':
+ module.fail_json(msg="Can not dist-upgrade specific packages.")
+ else:
+ if state in ['absent', 'removed']:
+ packages_changed, retvals = package_absent(module, name)
+ elif state in ['installed', 'present', 'latest']:
+ packages_changed, retvals = package_present(module, name, state == 'latest')
+
+ retvals['changed'] = retvals['rc'] == 0 and bool(packages_changed)
+
+ if module._diff:
+ set_diff(module, retvals, packages_changed)
+
+ if retvals['rc'] != 0:
+ module.fail_json(msg="Zypper run failed.", **retvals)
+
+ if not retvals['changed']:
+ del retvals['stdout']
+ del retvals['stderr']
+
+ module.exit_json(name=name, state=state, update_cache=update_cache, **retvals)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/cli_config.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/cli_config.py
new file mode 100644
index 00000000..089b339f
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/cli_config.py
@@ -0,0 +1,40 @@
+#
+# Copyright 2018 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.ansible.netcommon.plugins.action.network import (
+ ActionModule as ActionNetworkModule,
+)
+
+
+class ActionModule(ActionNetworkModule):
+ def run(self, tmp=None, task_vars=None):
+ del tmp # tmp no longer has any effect
+
+ self._config_module = True
+ if self._play_context.connection.split(".")[-1] != "network_cli":
+ return {
+ "failed": True,
+ "msg": "Connection type %s is not valid for cli_config module"
+ % self._play_context.connection,
+ }
+
+ return super(ActionModule, self).run(task_vars=task_vars)
diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/net_base.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/net_base.py
new file mode 100644
index 00000000..542dcfef
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/net_base.py
@@ -0,0 +1,90 @@
+# Copyright: (c) 2015, Ansible Inc,
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import copy
+
+from ansible.errors import AnsibleError
+from ansible.plugins.action import ActionBase
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class ActionModule(ActionBase):
+ def run(self, tmp=None, task_vars=None):
+ del tmp # tmp no longer has any effect
+
+ result = {}
+ play_context = copy.deepcopy(self._play_context)
+ play_context.network_os = self._get_network_os(task_vars)
+ new_task = self._task.copy()
+
+ module = self._get_implementation_module(
+ play_context.network_os, self._task.action
+ )
+ if not module:
+ if self._task.args["fail_on_missing_module"]:
+ result["failed"] = True
+ else:
+ result["failed"] = False
+
+ result["msg"] = (
+ "Could not find implementation module %s for %s"
+ % (self._task.action, play_context.network_os)
+ )
+ return result
+
+ new_task.action = module
+
+ action = self._shared_loader_obj.action_loader.get(
+ play_context.network_os,
+ task=new_task,
+ connection=self._connection,
+ play_context=play_context,
+ loader=self._loader,
+ templar=self._templar,
+ shared_loader_obj=self._shared_loader_obj,
+ )
+ display.vvvv("Running implementation module %s" % module)
+ return action.run(task_vars=task_vars)
+
+ def _get_network_os(self, task_vars):
+ if "network_os" in self._task.args and self._task.args["network_os"]:
+ display.vvvv("Getting network OS from task argument")
+ network_os = self._task.args["network_os"]
+ elif self._play_context.network_os:
+ display.vvvv("Getting network OS from inventory")
+ network_os = self._play_context.network_os
+ elif (
+ "network_os" in task_vars.get("ansible_facts", {})
+ and task_vars["ansible_facts"]["network_os"]
+ ):
+ display.vvvv("Getting network OS from fact")
+ network_os = task_vars["ansible_facts"]["network_os"]
+ else:
+ raise AnsibleError(
+ "ansible_network_os must be specified on this host to use platform agnostic modules"
+ )
+
+ return network_os
+
+ def _get_implementation_module(self, network_os, platform_agnostic_module):
+ module_name = (
+ network_os.split(".")[-1]
+ + "_"
+ + platform_agnostic_module.partition("_")[2]
+ )
+ if "." in network_os:
+ fqcn_module = ".".join(network_os.split(".")[0:-1])
+ implementation_module = fqcn_module + "." + module_name
+ else:
+ implementation_module = module_name
+
+ if implementation_module not in self._shared_loader_obj.module_loader:
+ implementation_module = None
+
+ return implementation_module
diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/net_get.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/net_get.py
new file mode 100644
index 00000000..40205a46
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/net_get.py
@@ -0,0 +1,199 @@
+# (c) 2018, Ansible Inc,
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import os
+import re
+import uuid
+import hashlib
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_text, to_bytes
+from ansible.module_utils.connection import Connection, ConnectionError
+from ansible.plugins.action import ActionBase
+from ansible.module_utils.six.moves.urllib.parse import urlsplit
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class ActionModule(ActionBase):
+ def run(self, tmp=None, task_vars=None):
+ socket_path = None
+ self._get_network_os(task_vars)
+ persistent_connection = self._play_context.connection.split(".")[-1]
+
+ result = super(ActionModule, self).run(task_vars=task_vars)
+
+ if persistent_connection != "network_cli":
+ # It is supported only with network_cli
+ result["failed"] = True
+ result["msg"] = (
+ "connection type %s is not valid for net_get module,"
+ " please use fully qualified name of network_cli connection type"
+ % self._play_context.connection
+ )
+ return result
+
+ try:
+ src = self._task.args["src"]
+ except KeyError as exc:
+ return {
+ "failed": True,
+ "msg": "missing required argument: %s" % exc,
+ }
+
+ # Get destination file if specified
+ dest = self._task.args.get("dest")
+
+ if dest is None:
+ dest = self._get_default_dest(src)
+ else:
+ dest = self._handle_dest_path(dest)
+
+ # Get proto
+ proto = self._task.args.get("protocol")
+ if proto is None:
+ proto = "scp"
+
+ if socket_path is None:
+ socket_path = self._connection.socket_path
+
+ conn = Connection(socket_path)
+ sock_timeout = conn.get_option("persistent_command_timeout")
+
+ try:
+ changed = self._handle_existing_file(
+ conn, src, dest, proto, sock_timeout
+ )
+ if changed is False:
+ result["changed"] = changed
+ result["destination"] = dest
+ return result
+ except Exception as exc:
+ result["msg"] = (
+ "Warning: %s idempotency check failed. Check dest" % exc
+ )
+
+ try:
+ conn.get_file(
+ source=src, destination=dest, proto=proto, timeout=sock_timeout
+ )
+ except Exception as exc:
+ result["failed"] = True
+ result["msg"] = "Exception received: %s" % exc
+
+ result["changed"] = changed
+ result["destination"] = dest
+ return result
+
+ def _handle_dest_path(self, dest):
+ working_path = self._get_working_path()
+
+ if os.path.isabs(dest) or urlsplit("dest").scheme:
+ dst = dest
+ else:
+ dst = self._loader.path_dwim_relative(working_path, "", dest)
+
+ return dst
+
+ def _get_src_filename_from_path(self, src_path):
+ filename_list = re.split("/|:", src_path)
+ return filename_list[-1]
+
+ def _get_default_dest(self, src_path):
+ dest_path = self._get_working_path()
+ src_fname = self._get_src_filename_from_path(src_path)
+ filename = "%s/%s" % (dest_path, src_fname)
+ return filename
+
+ def _handle_existing_file(self, conn, source, dest, proto, timeout):
+ """
+ Determines whether the source and destination file match.
+
+ :return: False if source and dest both exist and have matching sha1 sums, True otherwise.
+ """
+ if not os.path.exists(dest):
+ return True
+
+ cwd = self._loader.get_basedir()
+ filename = str(uuid.uuid4())
+ tmp_dest_file = os.path.join(cwd, filename)
+ try:
+ conn.get_file(
+ source=source,
+ destination=tmp_dest_file,
+ proto=proto,
+ timeout=timeout,
+ )
+ except ConnectionError as exc:
+ error = to_text(exc)
+ if error.endswith("No such file or directory"):
+ if os.path.exists(tmp_dest_file):
+ os.remove(tmp_dest_file)
+ return True
+
+ try:
+ with open(tmp_dest_file, "r") as f:
+ new_content = f.read()
+ with open(dest, "r") as f:
+ old_content = f.read()
+ except (IOError, OSError):
+ os.remove(tmp_dest_file)
+ raise
+
+ sha1 = hashlib.sha1()
+ old_content_b = to_bytes(old_content, errors="surrogate_or_strict")
+ sha1.update(old_content_b)
+ checksum_old = sha1.digest()
+
+ sha1 = hashlib.sha1()
+ new_content_b = to_bytes(new_content, errors="surrogate_or_strict")
+ sha1.update(new_content_b)
+ checksum_new = sha1.digest()
+ os.remove(tmp_dest_file)
+ if checksum_old == checksum_new:
+ return False
+ return True
+
+ def _get_working_path(self):
+ cwd = self._loader.get_basedir()
+ if self._task._role is not None:
+ cwd = self._task._role._role_path
+ return cwd
+
+ def _get_network_os(self, task_vars):
+ if "network_os" in self._task.args and self._task.args["network_os"]:
+ display.vvvv("Getting network OS from task argument")
+ network_os = self._task.args["network_os"]
+ elif self._play_context.network_os:
+ display.vvvv("Getting network OS from inventory")
+ network_os = self._play_context.network_os
+ elif (
+ "network_os" in task_vars.get("ansible_facts", {})
+ and task_vars["ansible_facts"]["network_os"]
+ ):
+ display.vvvv("Getting network OS from fact")
+ network_os = task_vars["ansible_facts"]["network_os"]
+ else:
+ raise AnsibleError(
+ "ansible_network_os must be specified on this host"
+ )
+
+ return network_os
diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/net_put.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/net_put.py
new file mode 100644
index 00000000..955329d4
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/net_put.py
@@ -0,0 +1,235 @@
+# (c) 2018, Ansible Inc,
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import os
+import uuid
+import hashlib
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_text, to_bytes
+from ansible.module_utils.connection import Connection, ConnectionError
+from ansible.plugins.action import ActionBase
+from ansible.module_utils.six.moves.urllib.parse import urlsplit
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class ActionModule(ActionBase):
+ def run(self, tmp=None, task_vars=None):
+ socket_path = None
+ network_os = self._get_network_os(task_vars).split(".")[-1]
+ persistent_connection = self._play_context.connection.split(".")[-1]
+
+ result = super(ActionModule, self).run(task_vars=task_vars)
+
+ if persistent_connection != "network_cli":
+ # It is supported only with network_cli
+ result["failed"] = True
+ result["msg"] = (
+ "connection type %s is not valid for net_put module,"
+ " please use fully qualified name of network_cli connection type"
+ % self._play_context.connection
+ )
+ return result
+
+ try:
+ src = self._task.args["src"]
+ except KeyError as exc:
+ return {
+ "failed": True,
+ "msg": "missing required argument: %s" % exc,
+ }
+
+ src_file_path_name = src
+
+ # Get destination file if specified
+ dest = self._task.args.get("dest")
+
+ # Get proto
+ proto = self._task.args.get("protocol")
+ if proto is None:
+ proto = "scp"
+
+ # Get mode if set
+ mode = self._task.args.get("mode")
+ if mode is None:
+ mode = "binary"
+
+ if mode == "text":
+ try:
+ self._handle_template(convert_data=False)
+ except ValueError as exc:
+ return dict(failed=True, msg=to_text(exc))
+
+ # Now src has resolved file write to disk in current diectory for scp
+ src = self._task.args.get("src")
+ filename = str(uuid.uuid4())
+ cwd = self._loader.get_basedir()
+ output_file = os.path.join(cwd, filename)
+ try:
+ with open(output_file, "wb") as f:
+ f.write(to_bytes(src, encoding="utf-8"))
+ except Exception:
+ os.remove(output_file)
+ raise
+ else:
+ try:
+ output_file = self._get_binary_src_file(src)
+ except ValueError as exc:
+ return dict(failed=True, msg=to_text(exc))
+
+ if socket_path is None:
+ socket_path = self._connection.socket_path
+
+ conn = Connection(socket_path)
+ sock_timeout = conn.get_option("persistent_command_timeout")
+
+ if dest is None:
+ dest = src_file_path_name
+
+ try:
+ changed = self._handle_existing_file(
+ conn, output_file, dest, proto, sock_timeout
+ )
+ if changed is False:
+ result["changed"] = changed
+ result["destination"] = dest
+ return result
+ except Exception as exc:
+ result["msg"] = (
+ "Warning: %s idempotency check failed. Check dest" % exc
+ )
+
+ try:
+ conn.copy_file(
+ source=output_file,
+ destination=dest,
+ proto=proto,
+ timeout=sock_timeout,
+ )
+ except Exception as exc:
+ if to_text(exc) == "No response from server":
+ if network_os == "iosxr":
+ # IOSXR sometimes closes socket prematurely after completion
+ # of file transfer
+ result[
+ "msg"
+ ] = "Warning: iosxr scp server pre close issue. Please check dest"
+ else:
+ result["failed"] = True
+ result["msg"] = "Exception received: %s" % exc
+
+ if mode == "text":
+ # Cleanup tmp file expanded wih ansible vars
+ os.remove(output_file)
+
+ result["changed"] = changed
+ result["destination"] = dest
+ return result
+
+ def _handle_existing_file(self, conn, source, dest, proto, timeout):
+ """
+ Determines whether the source and destination file match.
+
+ :return: False if source and dest both exist and have matching sha1 sums, True otherwise.
+ """
+ cwd = self._loader.get_basedir()
+ filename = str(uuid.uuid4())
+ tmp_source_file = os.path.join(cwd, filename)
+ try:
+ conn.get_file(
+ source=dest,
+ destination=tmp_source_file,
+ proto=proto,
+ timeout=timeout,
+ )
+ except ConnectionError as exc:
+ error = to_text(exc)
+ if error.endswith("No such file or directory"):
+ if os.path.exists(tmp_source_file):
+ os.remove(tmp_source_file)
+ return True
+
+ try:
+ with open(source, "r") as f:
+ new_content = f.read()
+ with open(tmp_source_file, "r") as f:
+ old_content = f.read()
+ except (IOError, OSError):
+ os.remove(tmp_source_file)
+ raise
+
+ sha1 = hashlib.sha1()
+ old_content_b = to_bytes(old_content, errors="surrogate_or_strict")
+ sha1.update(old_content_b)
+ checksum_old = sha1.digest()
+
+ sha1 = hashlib.sha1()
+ new_content_b = to_bytes(new_content, errors="surrogate_or_strict")
+ sha1.update(new_content_b)
+ checksum_new = sha1.digest()
+ os.remove(tmp_source_file)
+ if checksum_old == checksum_new:
+ return False
+ return True
+
+ def _get_binary_src_file(self, src):
+ working_path = self._get_working_path()
+
+ if os.path.isabs(src) or urlsplit("src").scheme:
+ source = src
+ else:
+ source = self._loader.path_dwim_relative(
+ working_path, "templates", src
+ )
+ if not source:
+ source = self._loader.path_dwim_relative(working_path, src)
+
+ if not os.path.exists(source):
+ raise ValueError("path specified in src not found")
+
+ return source
+
+ def _get_working_path(self):
+ cwd = self._loader.get_basedir()
+ if self._task._role is not None:
+ cwd = self._task._role._role_path
+ return cwd
+
+ def _get_network_os(self, task_vars):
+ if "network_os" in self._task.args and self._task.args["network_os"]:
+ display.vvvv("Getting network OS from task argument")
+ network_os = self._task.args["network_os"]
+ elif self._play_context.network_os:
+ display.vvvv("Getting network OS from inventory")
+ network_os = self._play_context.network_os
+ elif (
+ "network_os" in task_vars.get("ansible_facts", {})
+ and task_vars["ansible_facts"]["network_os"]
+ ):
+ display.vvvv("Getting network OS from fact")
+ network_os = task_vars["ansible_facts"]["network_os"]
+ else:
+ raise AnsibleError(
+ "ansible_network_os must be specified on this host"
+ )
+
+ return network_os
diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/network.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/network.py
new file mode 100644
index 00000000..5d05d338
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/network.py
@@ -0,0 +1,209 @@
+#
+# (c) 2018 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import os
+import time
+import re
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_text, to_bytes
+from ansible.module_utils.six.moves.urllib.parse import urlsplit
+from ansible.plugins.action.normal import ActionModule as _ActionModule
+from ansible.utils.display import Display
+
+display = Display()
+
+PRIVATE_KEYS_RE = re.compile("__.+__")
+
+
+class ActionModule(_ActionModule):
+ def run(self, task_vars=None):
+ config_module = hasattr(self, "_config_module") and self._config_module
+ if config_module and self._task.args.get("src"):
+ try:
+ self._handle_src_option()
+ except AnsibleError as e:
+ return {"failed": True, "msg": e.message, "changed": False}
+
+ result = super(ActionModule, self).run(task_vars=task_vars)
+
+ if (
+ config_module
+ and self._task.args.get("backup")
+ and not result.get("failed")
+ ):
+ self._handle_backup_option(result, task_vars)
+
+ return result
+
+ def _handle_backup_option(self, result, task_vars):
+
+ filename = None
+ backup_path = None
+ try:
+ content = result["__backup__"]
+ except KeyError:
+ raise AnsibleError("Failed while reading configuration backup")
+
+ backup_options = self._task.args.get("backup_options")
+ if backup_options:
+ filename = backup_options.get("filename")
+ backup_path = backup_options.get("dir_path")
+
+ if not backup_path:
+ cwd = self._get_working_path()
+ backup_path = os.path.join(cwd, "backup")
+ if not filename:
+ tstamp = time.strftime(
+ "%Y-%m-%d@%H:%M:%S", time.localtime(time.time())
+ )
+ filename = "%s_config.%s" % (
+ task_vars["inventory_hostname"],
+ tstamp,
+ )
+
+ dest = os.path.join(backup_path, filename)
+ backup_path = os.path.expanduser(
+ os.path.expandvars(
+ to_bytes(backup_path, errors="surrogate_or_strict")
+ )
+ )
+
+ if not os.path.exists(backup_path):
+ os.makedirs(backup_path)
+
+ new_task = self._task.copy()
+ for item in self._task.args:
+ if not item.startswith("_"):
+ new_task.args.pop(item, None)
+
+ new_task.args.update(dict(content=content, dest=dest))
+ copy_action = self._shared_loader_obj.action_loader.get(
+ "copy",
+ task=new_task,
+ connection=self._connection,
+ play_context=self._play_context,
+ loader=self._loader,
+ templar=self._templar,
+ shared_loader_obj=self._shared_loader_obj,
+ )
+ copy_result = copy_action.run(task_vars=task_vars)
+ if copy_result.get("failed"):
+ result["failed"] = copy_result["failed"]
+ result["msg"] = copy_result.get("msg")
+ return
+
+ result["backup_path"] = dest
+ if copy_result.get("changed", False):
+ result["changed"] = copy_result["changed"]
+
+ if backup_options and backup_options.get("filename"):
+ result["date"] = time.strftime(
+ "%Y-%m-%d",
+ time.gmtime(os.stat(result["backup_path"]).st_ctime),
+ )
+ result["time"] = time.strftime(
+ "%H:%M:%S",
+ time.gmtime(os.stat(result["backup_path"]).st_ctime),
+ )
+
+ else:
+ result["date"] = tstamp.split("@")[0]
+ result["time"] = tstamp.split("@")[1]
+ result["shortname"] = result["backup_path"][::-1].split(".", 1)[1][
+ ::-1
+ ]
+ result["filename"] = result["backup_path"].split("/")[-1]
+
+ # strip out any keys that have two leading and two trailing
+ # underscore characters
+ for key in list(result.keys()):
+ if PRIVATE_KEYS_RE.match(key):
+ del result[key]
+
+ def _get_working_path(self):
+ cwd = self._loader.get_basedir()
+ if self._task._role is not None:
+ cwd = self._task._role._role_path
+ return cwd
+
+ def _handle_src_option(self, convert_data=True):
+ src = self._task.args.get("src")
+ working_path = self._get_working_path()
+
+ if os.path.isabs(src) or urlsplit("src").scheme:
+ source = src
+ else:
+ source = self._loader.path_dwim_relative(
+ working_path, "templates", src
+ )
+ if not source:
+ source = self._loader.path_dwim_relative(working_path, src)
+
+ if not os.path.exists(source):
+ raise AnsibleError("path specified in src not found")
+
+ try:
+ with open(source, "r") as f:
+ template_data = to_text(f.read())
+ except IOError as e:
+ raise AnsibleError(
+ "unable to load src file {0}, I/O error({1}): {2}".format(
+ source, e.errno, e.strerror
+ )
+ )
+
+ # Create a template search path in the following order:
+ # [working_path, self_role_path, dependent_role_paths, dirname(source)]
+ searchpath = [working_path]
+ if self._task._role is not None:
+ searchpath.append(self._task._role._role_path)
+ if hasattr(self._task, "_block:"):
+ dep_chain = self._task._block.get_dep_chain()
+ if dep_chain is not None:
+ for role in dep_chain:
+ searchpath.append(role._role_path)
+ searchpath.append(os.path.dirname(source))
+ with self._templar.set_temporary_context(searchpath=searchpath):
+ self._task.args["src"] = self._templar.template(
+ template_data, convert_data=convert_data
+ )
+
+ def _get_network_os(self, task_vars):
+ if "network_os" in self._task.args and self._task.args["network_os"]:
+ display.vvvv("Getting network OS from task argument")
+ network_os = self._task.args["network_os"]
+ elif self._play_context.network_os:
+ display.vvvv("Getting network OS from inventory")
+ network_os = self._play_context.network_os
+ elif (
+ "network_os" in task_vars.get("ansible_facts", {})
+ and task_vars["ansible_facts"]["network_os"]
+ ):
+ display.vvvv("Getting network OS from fact")
+ network_os = task_vars["ansible_facts"]["network_os"]
+ else:
+ raise AnsibleError(
+ "ansible_network_os must be specified on this host"
+ )
+
+ return network_os
diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/become/enable.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/become/enable.py
new file mode 100644
index 00000000..33938fd1
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/become/enable.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = """become: enable
+short_description: Switch to elevated permissions on a network device
+description:
+- This become plugins allows elevated permissions on a remote network device.
+author: ansible (@core)
+options:
+ become_pass:
+ description: password
+ ini:
+ - section: enable_become_plugin
+ key: password
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_enable_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_ENABLE_PASS
+notes:
+- enable is really implemented in the network connection handler and as such can only
+ be used with network connections.
+- This plugin ignores the 'become_exe' and 'become_user' settings as it uses an API
+ and not an executable.
+"""
+
+from ansible.plugins.become import BecomeBase
+
+
+class BecomeModule(BecomeBase):
+
+ name = "ansible.netcommon.enable"
+
+ def build_become_command(self, cmd, shell):
+ # enable is implemented inside the network connection plugins
+ return cmd
diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/httpapi.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/httpapi.py
new file mode 100644
index 00000000..b063ef0d
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/httpapi.py
@@ -0,0 +1,324 @@
+# (c) 2018 Red Hat Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = """author: Ansible Networking Team
+connection: httpapi
+short_description: Use httpapi to run command on network appliances
+description:
+- This connection plugin provides a connection to remote devices over a HTTP(S)-based
+ api.
+options:
+ host:
+ description:
+ - Specifies the remote device FQDN or IP address to establish the HTTP(S) connection
+ to.
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ port:
+ type: int
+ description:
+ - Specifies the port on the remote device that listens for connections when establishing
+ the HTTP(S) connection.
+ - When unspecified, will pick 80 or 443 based on the value of use_ssl.
+ ini:
+ - section: defaults
+ key: remote_port
+ env:
+ - name: ANSIBLE_REMOTE_PORT
+ vars:
+ - name: ansible_httpapi_port
+ network_os:
+ description:
+ - Configures the device platform network operating system. This value is used
+ to load the correct httpapi plugin to communicate with the remote device
+ vars:
+ - name: ansible_network_os
+ remote_user:
+ description:
+ - The username used to authenticate to the remote device when the API connection
+ is first established. If the remote_user is not specified, the connection will
+ use the username of the logged in user.
+ - Can be configured from the CLI via the C(--user) or C(-u) options.
+ ini:
+ - section: defaults
+ key: remote_user
+ env:
+ - name: ANSIBLE_REMOTE_USER
+ vars:
+ - name: ansible_user
+ password:
+ description:
+ - Configures the user password used to authenticate to the remote device when
+ needed for the device API.
+ vars:
+ - name: ansible_password
+ - name: ansible_httpapi_pass
+ - name: ansible_httpapi_password
+ use_ssl:
+ type: boolean
+ description:
+ - Whether to connect using SSL (HTTPS) or not (HTTP).
+ default: false
+ vars:
+ - name: ansible_httpapi_use_ssl
+ validate_certs:
+ type: boolean
+ description:
+ - Whether to validate SSL certificates
+ default: true
+ vars:
+ - name: ansible_httpapi_validate_certs
+ use_proxy:
+ type: boolean
+ description:
+ - Whether to use https_proxy for requests.
+ default: true
+ vars:
+ - name: ansible_httpapi_use_proxy
+ become:
+ type: boolean
+ description:
+ - The become option will instruct the CLI session to attempt privilege escalation
+ on platforms that support it. Normally this means transitioning from user mode
+ to C(enable) mode in the CLI session. If become is set to True and the remote
+ device does not support privilege escalation or the privilege has already been
+ elevated, then this option is silently ignored.
+ - Can be configured from the CLI via the C(--become) or C(-b) options.
+ default: false
+ ini:
+ - section: privilege_escalation
+ key: become
+ env:
+ - name: ANSIBLE_BECOME
+ vars:
+ - name: ansible_become
+ become_method:
+ description:
+ - This option allows the become method to be specified in for handling privilege
+ escalation. Typically the become_method value is set to C(enable) but could
+ be defined as other values.
+ default: sudo
+ ini:
+ - section: privilege_escalation
+ key: become_method
+ env:
+ - name: ANSIBLE_BECOME_METHOD
+ vars:
+ - name: ansible_become_method
+ persistent_connect_timeout:
+ type: int
+ description:
+ - Configures, in seconds, the amount of time to wait when trying to initially
+ establish a persistent connection. If this value expires before the connection
+ to the remote device is completed, the connection will fail.
+ default: 30
+ ini:
+ - section: persistent_connection
+ key: connect_timeout
+ env:
+ - name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT
+ vars:
+ - name: ansible_connect_timeout
+ persistent_command_timeout:
+ type: int
+ description:
+ - Configures, in seconds, the amount of time to wait for a command to return from
+ the remote device. If this timer is exceeded before the command returns, the
+ connection plugin will raise an exception and close.
+ default: 30
+ ini:
+ - section: persistent_connection
+ key: command_timeout
+ env:
+ - name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT
+ vars:
+ - name: ansible_command_timeout
+ persistent_log_messages:
+ type: boolean
+ description:
+ - This flag will enable logging the command executed and response received from
+ target device in the ansible log file. For this option to work 'log_path' ansible
+ configuration option is required to be set to a file path with write access.
+ - Be sure to fully understand the security implications of enabling this option
+ as it could create a security vulnerability by logging sensitive information
+ in log file.
+ default: false
+ ini:
+ - section: persistent_connection
+ key: log_messages
+ env:
+ - name: ANSIBLE_PERSISTENT_LOG_MESSAGES
+ vars:
+ - name: ansible_persistent_log_messages
+"""
+
+from io import BytesIO
+
+from ansible.errors import AnsibleConnectionFailure
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.six import PY3
+from ansible.module_utils.six.moves import cPickle
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
+from ansible.module_utils.urls import open_url
+from ansible.playbook.play_context import PlayContext
+from ansible.plugins.loader import httpapi_loader
+from ansible.plugins.connection import NetworkConnectionBase, ensure_connect
+
+
+class Connection(NetworkConnectionBase):
+ """Network API connection"""
+
+ transport = "ansible.netcommon.httpapi"
+ has_pipelining = True
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(
+ play_context, new_stdin, *args, **kwargs
+ )
+
+ self._url = None
+ self._auth = None
+
+ if self._network_os:
+
+ self.httpapi = httpapi_loader.get(self._network_os, self)
+ if self.httpapi:
+ self._sub_plugin = {
+ "type": "httpapi",
+ "name": self.httpapi._load_name,
+ "obj": self.httpapi,
+ }
+ self.queue_message(
+ "vvvv",
+ "loaded API plugin %s from path %s for network_os %s"
+ % (
+ self.httpapi._load_name,
+ self.httpapi._original_path,
+ self._network_os,
+ ),
+ )
+ else:
+ raise AnsibleConnectionFailure(
+ "unable to load API plugin for network_os %s"
+ % self._network_os
+ )
+
+ else:
+ raise AnsibleConnectionFailure(
+ "Unable to automatically determine host network os. Please "
+ "manually configure ansible_network_os value for this host"
+ )
+ self.queue_message("log", "network_os is set to %s" % self._network_os)
+
+ def update_play_context(self, pc_data):
+ """Updates the play context information for the connection"""
+ pc_data = to_bytes(pc_data)
+ if PY3:
+ pc_data = cPickle.loads(pc_data, encoding="bytes")
+ else:
+ pc_data = cPickle.loads(pc_data)
+ play_context = PlayContext()
+ play_context.deserialize(pc_data)
+
+ self.queue_message("vvvv", "updating play_context for connection")
+ if self._play_context.become ^ play_context.become:
+ self.set_become(play_context)
+ if play_context.become is True:
+ self.queue_message("vvvv", "authorizing connection")
+ else:
+ self.queue_message("vvvv", "deauthorizing connection")
+
+ self._play_context = play_context
+
+ def _connect(self):
+ if not self.connected:
+ protocol = "https" if self.get_option("use_ssl") else "http"
+ host = self.get_option("host")
+ port = self.get_option("port") or (
+ 443 if protocol == "https" else 80
+ )
+ self._url = "%s://%s:%s" % (protocol, host, port)
+
+ self.queue_message(
+ "vvv",
+ "ESTABLISH HTTP(S) CONNECTFOR USER: %s TO %s"
+ % (self._play_context.remote_user, self._url),
+ )
+ self.httpapi.set_become(self._play_context)
+ self._connected = True
+
+ self.httpapi.login(
+ self.get_option("remote_user"), self.get_option("password")
+ )
+
+ def close(self):
+ """
+ Close the active session to the device
+ """
+ # only close the connection if its connected.
+ if self._connected:
+ self.queue_message("vvvv", "closing http(s) connection to device")
+ self.logout()
+
+ super(Connection, self).close()
+
+ @ensure_connect
+ def send(self, path, data, **kwargs):
+ """
+ Sends the command to the device over api
+ """
+ url_kwargs = dict(
+ timeout=self.get_option("persistent_command_timeout"),
+ validate_certs=self.get_option("validate_certs"),
+ use_proxy=self.get_option("use_proxy"),
+ headers={},
+ )
+ url_kwargs.update(kwargs)
+ if self._auth:
+ # Avoid modifying passed-in headers
+ headers = dict(kwargs.get("headers", {}))
+ headers.update(self._auth)
+ url_kwargs["headers"] = headers
+ else:
+ url_kwargs["force_basic_auth"] = True
+ url_kwargs["url_username"] = self.get_option("remote_user")
+ url_kwargs["url_password"] = self.get_option("password")
+
+ try:
+ url = self._url + path
+ self._log_messages(
+ "send url '%s' with data '%s' and kwargs '%s'"
+ % (url, data, url_kwargs)
+ )
+ response = open_url(url, data=data, **url_kwargs)
+ except HTTPError as exc:
+ is_handled = self.handle_httperror(exc)
+ if is_handled is True:
+ return self.send(path, data, **kwargs)
+ elif is_handled is False:
+ raise
+ else:
+ response = is_handled
+ except URLError as exc:
+ raise AnsibleConnectionFailure(
+ "Could not connect to {0}: {1}".format(
+ self._url + path, exc.reason
+ )
+ )
+
+ response_buffer = BytesIO()
+ resp_data = response.read()
+ self._log_messages("received response: '%s'" % resp_data)
+ response_buffer.write(resp_data)
+
+ # Try to assign a new auth token if one is given
+ self._auth = self.update_auth(response, response_buffer) or self._auth
+
+ response_buffer.seek(0)
+
+ return response, response_buffer
diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/netconf.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/netconf.py
new file mode 100644
index 00000000..1e2d3caa
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/netconf.py
@@ -0,0 +1,404 @@
+# (c) 2016 Red Hat Inc.
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = """author: Ansible Networking Team
+connection: netconf
+short_description: Provides a persistent connection using the netconf protocol
+description:
+- This connection plugin provides a connection to remote devices over the SSH NETCONF
+ subsystem. This connection plugin is typically used by network devices for sending
+ and receiving RPC calls over NETCONF.
+- Note this connection plugin requires ncclient to be installed on the local Ansible
+ controller.
+requirements:
+- ncclient
+options:
+ host:
+ description:
+ - Specifies the remote device FQDN or IP address to establish the SSH connection
+ to.
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ port:
+ type: int
+ description:
+ - Specifies the port on the remote device that listens for connections when establishing
+ the SSH connection.
+ default: 830
+ ini:
+ - section: defaults
+ key: remote_port
+ env:
+ - name: ANSIBLE_REMOTE_PORT
+ vars:
+ - name: ansible_port
+ network_os:
+ description:
+ - Configures the device platform network operating system. This value is used
+ to load a device specific netconf plugin. If this option is not configured
+ (or set to C(auto)), then Ansible will attempt to guess the correct network_os
+ to use. If it can not guess a network_os correctly it will use C(default).
+ vars:
+ - name: ansible_network_os
+ remote_user:
+ description:
+ - The username used to authenticate to the remote device when the SSH connection
+ is first established. If the remote_user is not specified, the connection will
+ use the username of the logged in user.
+ - Can be configured from the CLI via the C(--user) or C(-u) options.
+ ini:
+ - section: defaults
+ key: remote_user
+ env:
+ - name: ANSIBLE_REMOTE_USER
+ vars:
+ - name: ansible_user
+ password:
+ description:
+ - Configures the user password used to authenticate to the remote device when
+ first establishing the SSH connection.
+ vars:
+ - name: ansible_password
+ - name: ansible_ssh_pass
+ - name: ansible_ssh_password
+ - name: ansible_netconf_password
+ private_key_file:
+ description:
+ - The private SSH key or certificate file used to authenticate to the remote device
+ when first establishing the SSH connection.
+ ini:
+ - section: defaults
+ key: private_key_file
+ env:
+ - name: ANSIBLE_PRIVATE_KEY_FILE
+ vars:
+ - name: ansible_private_key_file
+ look_for_keys:
+ default: true
+ description:
+ - Enables looking for ssh keys in the usual locations for ssh keys (e.g. :file:`~/.ssh/id_*`).
+ env:
+ - name: ANSIBLE_PARAMIKO_LOOK_FOR_KEYS
+ ini:
+ - section: paramiko_connection
+ key: look_for_keys
+ type: boolean
+ host_key_checking:
+ description: Set this to "False" if you want to avoid host key checking by the
+ underlying tools Ansible uses to connect to the host
+ type: boolean
+ default: true
+ env:
+ - name: ANSIBLE_HOST_KEY_CHECKING
+ - name: ANSIBLE_SSH_HOST_KEY_CHECKING
+ - name: ANSIBLE_NETCONF_HOST_KEY_CHECKING
+ ini:
+ - section: defaults
+ key: host_key_checking
+ - section: paramiko_connection
+ key: host_key_checking
+ vars:
+ - name: ansible_host_key_checking
+ - name: ansible_ssh_host_key_checking
+ - name: ansible_netconf_host_key_checking
+ persistent_connect_timeout:
+ type: int
+ description:
+ - Configures, in seconds, the amount of time to wait when trying to initially
+ establish a persistent connection. If this value expires before the connection
+ to the remote device is completed, the connection will fail.
+ default: 30
+ ini:
+ - section: persistent_connection
+ key: connect_timeout
+ env:
+ - name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT
+ vars:
+ - name: ansible_connect_timeout
+ persistent_command_timeout:
+ type: int
+ description:
+ - Configures, in seconds, the amount of time to wait for a command to return from
+ the remote device. If this timer is exceeded before the command returns, the
+ connection plugin will raise an exception and close.
+ default: 30
+ ini:
+ - section: persistent_connection
+ key: command_timeout
+ env:
+ - name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT
+ vars:
+ - name: ansible_command_timeout
+ netconf_ssh_config:
+ description:
+ - This variable is used to enable bastion/jump host with netconf connection. If
+ set to True the bastion/jump host ssh settings should be present in ~/.ssh/config
+ file, alternatively it can be set to custom ssh configuration file path to read
+ the bastion/jump host settings.
+ ini:
+ - section: netconf_connection
+ key: ssh_config
+ version_added: '2.7'
+ env:
+ - name: ANSIBLE_NETCONF_SSH_CONFIG
+ vars:
+ - name: ansible_netconf_ssh_config
+ version_added: '2.7'
+ persistent_log_messages:
+ type: boolean
+ description:
+ - This flag will enable logging the command executed and response received from
+ target device in the ansible log file. For this option to work 'log_path' ansible
+ configuration option is required to be set to a file path with write access.
+ - Be sure to fully understand the security implications of enabling this option
+ as it could create a security vulnerability by logging sensitive information
+ in log file.
+ default: false
+ ini:
+ - section: persistent_connection
+ key: log_messages
+ env:
+ - name: ANSIBLE_PERSISTENT_LOG_MESSAGES
+ vars:
+ - name: ansible_persistent_log_messages
+"""
+
+import os
+import logging
+import json
+
+from ansible.errors import AnsibleConnectionFailure, AnsibleError
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.basic import missing_required_lib
+from ansible.module_utils.parsing.convert_bool import (
+ BOOLEANS_TRUE,
+ BOOLEANS_FALSE,
+)
+from ansible.plugins.loader import netconf_loader
+from ansible.plugins.connection import NetworkConnectionBase, ensure_connect
+
+try:
+ from ncclient import manager
+ from ncclient.operations import RPCError
+ from ncclient.transport.errors import SSHUnknownHostError
+ from ncclient.xml_ import to_ele, to_xml
+
+ HAS_NCCLIENT = True
+ NCCLIENT_IMP_ERR = None
+except (
+ ImportError,
+ AttributeError,
+) as err: # paramiko and gssapi are incompatible and raise AttributeError not ImportError
+ HAS_NCCLIENT = False
+ NCCLIENT_IMP_ERR = err
+
+logging.getLogger("ncclient").setLevel(logging.INFO)
+
+
+class Connection(NetworkConnectionBase):
+ """NetConf connections"""
+
+ transport = "ansible.netcommon.netconf"
+ has_pipelining = False
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(
+ play_context, new_stdin, *args, **kwargs
+ )
+
+ # If network_os is not specified then set the network os to auto
+ # This will be used to trigger the use of guess_network_os when connecting.
+ self._network_os = self._network_os or "auto"
+
+ self.netconf = netconf_loader.get(self._network_os, self)
+ if self.netconf:
+ self._sub_plugin = {
+ "type": "netconf",
+ "name": self.netconf._load_name,
+ "obj": self.netconf,
+ }
+ self.queue_message(
+ "vvvv",
+ "loaded netconf plugin %s from path %s for network_os %s"
+ % (
+ self.netconf._load_name,
+ self.netconf._original_path,
+ self._network_os,
+ ),
+ )
+ else:
+ self.netconf = netconf_loader.get("default", self)
+ self._sub_plugin = {
+ "type": "netconf",
+ "name": "default",
+ "obj": self.netconf,
+ }
+ self.queue_message(
+ "display",
+ "unable to load netconf plugin for network_os %s, falling back to default plugin"
+ % self._network_os,
+ )
+
+ self.queue_message("log", "network_os is set to %s" % self._network_os)
+ self._manager = None
+ self.key_filename = None
+ self._ssh_config = None
+
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ """Sends the request to the node and returns the reply
+ The method accepts two forms of request. The first form is as a byte
+ string that represents xml string be send over netconf session.
+ The second form is a json-rpc (2.0) byte string.
+ """
+ if self._manager:
+ # to_ele operates on native strings
+ request = to_ele(to_native(cmd, errors="surrogate_or_strict"))
+
+ if request is None:
+ return "unable to parse request"
+
+ try:
+ reply = self._manager.rpc(request)
+ except RPCError as exc:
+ error = self.internal_error(
+ data=to_text(to_xml(exc.xml), errors="surrogate_or_strict")
+ )
+ return json.dumps(error)
+
+ return reply.data_xml
+ else:
+ return super(Connection, self).exec_command(cmd, in_data, sudoable)
+
+ @property
+ @ensure_connect
+ def manager(self):
+ return self._manager
+
+ def _connect(self):
+ if not HAS_NCCLIENT:
+ raise AnsibleError(
+ "%s: %s"
+ % (
+ missing_required_lib("ncclient"),
+ to_native(NCCLIENT_IMP_ERR),
+ )
+ )
+
+ self.queue_message("log", "ssh connection done, starting ncclient")
+
+ allow_agent = True
+ if self._play_context.password is not None:
+ allow_agent = False
+ setattr(self._play_context, "allow_agent", allow_agent)
+
+ self.key_filename = (
+ self._play_context.private_key_file
+ or self.get_option("private_key_file")
+ )
+ if self.key_filename:
+ self.key_filename = str(os.path.expanduser(self.key_filename))
+
+ self._ssh_config = self.get_option("netconf_ssh_config")
+ if self._ssh_config in BOOLEANS_TRUE:
+ self._ssh_config = True
+ elif self._ssh_config in BOOLEANS_FALSE:
+ self._ssh_config = None
+
+ # Try to guess the network_os if the network_os is set to auto
+ if self._network_os == "auto":
+ for cls in netconf_loader.all(class_only=True):
+ network_os = cls.guess_network_os(self)
+ if network_os:
+ self.queue_message(
+ "vvv", "discovered network_os %s" % network_os
+ )
+ self._network_os = network_os
+
+ # If we have tried to detect the network_os but were unable to i.e. network_os is still 'auto'
+ # then use default as the network_os
+
+ if self._network_os == "auto":
+ # Network os not discovered. Set it to default
+ self.queue_message(
+ "vvv",
+ "Unable to discover network_os. Falling back to default.",
+ )
+ self._network_os = "default"
+ try:
+ ncclient_device_handler = self.netconf.get_option(
+ "ncclient_device_handler"
+ )
+ except KeyError:
+ ncclient_device_handler = "default"
+ self.queue_message(
+ "vvv",
+ "identified ncclient device handler: %s."
+ % ncclient_device_handler,
+ )
+ device_params = {"name": ncclient_device_handler}
+
+ try:
+ port = self._play_context.port or 830
+ self.queue_message(
+ "vvv",
+ "ESTABLISH NETCONF SSH CONNECTION FOR USER: %s on PORT %s TO %s WITH SSH_CONFIG = %s"
+ % (
+ self._play_context.remote_user,
+ port,
+ self._play_context.remote_addr,
+ self._ssh_config,
+ ),
+ )
+ self._manager = manager.connect(
+ host=self._play_context.remote_addr,
+ port=port,
+ username=self._play_context.remote_user,
+ password=self._play_context.password,
+ key_filename=self.key_filename,
+ hostkey_verify=self.get_option("host_key_checking"),
+ look_for_keys=self.get_option("look_for_keys"),
+ device_params=device_params,
+ allow_agent=self._play_context.allow_agent,
+ timeout=self.get_option("persistent_connect_timeout"),
+ ssh_config=self._ssh_config,
+ )
+
+ self._manager._timeout = self.get_option(
+ "persistent_command_timeout"
+ )
+ except SSHUnknownHostError as exc:
+ raise AnsibleConnectionFailure(to_native(exc))
+ except ImportError:
+ raise AnsibleError(
+ "connection=netconf is not supported on {0}".format(
+ self._network_os
+ )
+ )
+
+ if not self._manager.connected:
+ return 1, b"", b"not connected"
+
+ self.queue_message(
+ "log", "ncclient manager object created successfully"
+ )
+
+ self._connected = True
+
+ super(Connection, self)._connect()
+
+ return (
+ 0,
+ to_bytes(self._manager.session_id, errors="surrogate_or_strict"),
+ b"",
+ )
+
+ def close(self):
+ if self._manager:
+ self._manager.close_session()
+ super(Connection, self).close()
diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/network_cli.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/network_cli.py
new file mode 100644
index 00000000..8abcf8e8
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/network_cli.py
@@ -0,0 +1,924 @@
+# (c) 2016 Red Hat Inc.
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = """author: Ansible Networking Team
+connection: network_cli
+short_description: Use network_cli to run command on network appliances
+description:
+- This connection plugin provides a connection to remote devices over the SSH and
+ implements a CLI shell. This connection plugin is typically used by network devices
+ for sending and receiving CLi commands to network devices.
+options:
+ host:
+ description:
+ - Specifies the remote device FQDN or IP address to establish the SSH connection
+ to.
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ port:
+ type: int
+ description:
+ - Specifies the port on the remote device that listens for connections when establishing
+ the SSH connection.
+ default: 22
+ ini:
+ - section: defaults
+ key: remote_port
+ env:
+ - name: ANSIBLE_REMOTE_PORT
+ vars:
+ - name: ansible_port
+ network_os:
+ description:
+ - Configures the device platform network operating system. This value is used
+ to load the correct terminal and cliconf plugins to communicate with the remote
+ device.
+ vars:
+ - name: ansible_network_os
+ remote_user:
+ description:
+ - The username used to authenticate to the remote device when the SSH connection
+ is first established. If the remote_user is not specified, the connection will
+ use the username of the logged in user.
+ - Can be configured from the CLI via the C(--user) or C(-u) options.
+ ini:
+ - section: defaults
+ key: remote_user
+ env:
+ - name: ANSIBLE_REMOTE_USER
+ vars:
+ - name: ansible_user
+ password:
+ description:
+ - Configures the user password used to authenticate to the remote device when
+ first establishing the SSH connection.
+ vars:
+ - name: ansible_password
+ - name: ansible_ssh_pass
+ - name: ansible_ssh_password
+ private_key_file:
+ description:
+ - The private SSH key or certificate file used to authenticate to the remote device
+ when first establishing the SSH connection.
+ ini:
+ - section: defaults
+ key: private_key_file
+ env:
+ - name: ANSIBLE_PRIVATE_KEY_FILE
+ vars:
+ - name: ansible_private_key_file
+ become:
+ type: boolean
+ description:
+ - The become option will instruct the CLI session to attempt privilege escalation
+ on platforms that support it. Normally this means transitioning from user mode
+ to C(enable) mode in the CLI session. If become is set to True and the remote
+ device does not support privilege escalation or the privilege has already been
+ elevated, then this option is silently ignored.
+ - Can be configured from the CLI via the C(--become) or C(-b) options.
+ default: false
+ ini:
+ - section: privilege_escalation
+ key: become
+ env:
+ - name: ANSIBLE_BECOME
+ vars:
+ - name: ansible_become
+ become_method:
+ description:
+ - This option allows the become method to be specified in for handling privilege
+ escalation. Typically the become_method value is set to C(enable) but could
+ be defined as other values.
+ default: sudo
+ ini:
+ - section: privilege_escalation
+ key: become_method
+ env:
+ - name: ANSIBLE_BECOME_METHOD
+ vars:
+ - name: ansible_become_method
+ host_key_auto_add:
+ type: boolean
+ description:
+ - By default, Ansible will prompt the user before adding SSH keys to the known
+ hosts file. Since persistent connections such as network_cli run in background
+ processes, the user will never be prompted. By enabling this option, unknown
+ host keys will automatically be added to the known hosts file.
+ - Be sure to fully understand the security implications of enabling this option
+ on production systems as it could create a security vulnerability.
+ default: false
+ ini:
+ - section: paramiko_connection
+ key: host_key_auto_add
+ env:
+ - name: ANSIBLE_HOST_KEY_AUTO_ADD
+ persistent_connect_timeout:
+ type: int
+ description:
+ - Configures, in seconds, the amount of time to wait when trying to initially
+ establish a persistent connection. If this value expires before the connection
+ to the remote device is completed, the connection will fail.
+ default: 30
+ ini:
+ - section: persistent_connection
+ key: connect_timeout
+ env:
+ - name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT
+ vars:
+ - name: ansible_connect_timeout
+ persistent_command_timeout:
+ type: int
+ description:
+ - Configures, in seconds, the amount of time to wait for a command to return from
+ the remote device. If this timer is exceeded before the command returns, the
+ connection plugin will raise an exception and close.
+ default: 30
+ ini:
+ - section: persistent_connection
+ key: command_timeout
+ env:
+ - name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT
+ vars:
+ - name: ansible_command_timeout
+ persistent_buffer_read_timeout:
+ type: float
+ description:
+ - Configures, in seconds, the amount of time to wait for the data to be read from
+ Paramiko channel after the command prompt is matched. This timeout value ensures
+ that command prompt matched is correct and there is no more data left to be
+ received from remote host.
+ default: 0.1
+ ini:
+ - section: persistent_connection
+ key: buffer_read_timeout
+ env:
+ - name: ANSIBLE_PERSISTENT_BUFFER_READ_TIMEOUT
+ vars:
+ - name: ansible_buffer_read_timeout
+ persistent_log_messages:
+ type: boolean
+ description:
+ - This flag will enable logging the command executed and response received from
+ target device in the ansible log file. For this option to work 'log_path' ansible
+ configuration option is required to be set to a file path with write access.
+ - Be sure to fully understand the security implications of enabling this option
+ as it could create a security vulnerability by logging sensitive information
+ in log file.
+ default: false
+ ini:
+ - section: persistent_connection
+ key: log_messages
+ env:
+ - name: ANSIBLE_PERSISTENT_LOG_MESSAGES
+ vars:
+ - name: ansible_persistent_log_messages
+ terminal_stdout_re:
+ type: list
+ elements: dict
+ description:
+ - A single regex pattern or a sequence of patterns along with optional flags to
+ match the command prompt from the received response chunk. This option accepts
+ C(pattern) and C(flags) keys. The value of C(pattern) is a python regex pattern
+ to match the response and the value of C(flags) is the value accepted by I(flags)
+ argument of I(re.compile) python method to control the way regex is matched
+ with the response, for example I('re.I').
+ vars:
+ - name: ansible_terminal_stdout_re
+ terminal_stderr_re:
+ type: list
+ elements: dict
+ description:
+ - This option provides the regex pattern and optional flags to match the error
+ string from the received response chunk. This option accepts C(pattern) and
+ C(flags) keys. The value of C(pattern) is a python regex pattern to match the
+ response and the value of C(flags) is the value accepted by I(flags) argument
+ of I(re.compile) python method to control the way regex is matched with the
+ response, for example I('re.I').
+ vars:
+ - name: ansible_terminal_stderr_re
+ terminal_initial_prompt:
+ type: list
+ description:
+ - A single regex pattern or a sequence of patterns to evaluate the expected prompt
+ at the time of initial login to the remote host.
+ vars:
+ - name: ansible_terminal_initial_prompt
+ terminal_initial_answer:
+ type: list
+ description:
+ - The answer to reply with if the C(terminal_initial_prompt) is matched. The value
+ can be a single answer or a list of answers for multiple terminal_initial_prompt.
+ In case the login menu has multiple prompts the sequence of the prompt and excepted
+ answer should be in same order and the value of I(terminal_prompt_checkall)
+ should be set to I(True) if all the values in C(terminal_initial_prompt) are
+ expected to be matched and set to I(False) if any one login prompt is to be
+ matched.
+ vars:
+ - name: ansible_terminal_initial_answer
+ terminal_initial_prompt_checkall:
+ type: boolean
+ description:
+ - By default the value is set to I(False) and any one of the prompts mentioned
+ in C(terminal_initial_prompt) option is matched it won't check for other prompts.
+ When set to I(True) it will check for all the prompts mentioned in C(terminal_initial_prompt)
+ option in the given order and all the prompts should be received from remote
+ host if not it will result in timeout.
+ default: false
+ vars:
+ - name: ansible_terminal_initial_prompt_checkall
+ terminal_inital_prompt_newline:
+ type: boolean
+ description:
+ - This boolean flag, that when set to I(True) will send newline in the response
+ if any of values in I(terminal_initial_prompt) is matched.
+ default: true
+ vars:
+ - name: ansible_terminal_initial_prompt_newline
+ network_cli_retries:
+ description:
+ - Number of attempts to connect to remote host. The delay time between the retires
+ increases after every attempt by power of 2 in seconds till either the maximum
+ attempts are exhausted or any of the C(persistent_command_timeout) or C(persistent_connect_timeout)
+ timers are triggered.
+ default: 3
+ type: integer
+ env:
+ - name: ANSIBLE_NETWORK_CLI_RETRIES
+ ini:
+ - section: persistent_connection
+ key: network_cli_retries
+ vars:
+ - name: ansible_network_cli_retries
+"""
+
+from functools import wraps
+import getpass
+import json
+import logging
+import re
+import os
+import signal
+import socket
+import time
+import traceback
+from io import BytesIO
+
+from ansible.errors import AnsibleConnectionFailure
+from ansible.module_utils.six import PY3
+from ansible.module_utils.six.moves import cPickle
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ to_list,
+)
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.playbook.play_context import PlayContext
+from ansible.plugins.connection import NetworkConnectionBase
+from ansible.plugins.loader import (
+ cliconf_loader,
+ terminal_loader,
+ connection_loader,
+)
+
+
+def ensure_connect(func):
+ @wraps(func)
+ def wrapped(self, *args, **kwargs):
+ if not self._connected:
+ self._connect()
+ self.update_cli_prompt_context()
+ return func(self, *args, **kwargs)
+
+ return wrapped
+
+
+class AnsibleCmdRespRecv(Exception):
+ pass
+
+
+class Connection(NetworkConnectionBase):
+ """ CLI (shell) SSH connections on Paramiko """
+
+ transport = "ansible.netcommon.network_cli"
+ has_pipelining = True
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(
+ play_context, new_stdin, *args, **kwargs
+ )
+ self._ssh_shell = None
+
+ self._matched_prompt = None
+ self._matched_cmd_prompt = None
+ self._matched_pattern = None
+ self._last_response = None
+ self._history = list()
+ self._command_response = None
+ self._last_recv_window = None
+
+ self._terminal = None
+ self.cliconf = None
+ self._paramiko_conn = None
+
+ # Managing prompt context
+ self._check_prompt = False
+ self._task_uuid = to_text(kwargs.get("task_uuid", ""))
+
+ if self._play_context.verbosity > 3:
+ logging.getLogger("paramiko").setLevel(logging.DEBUG)
+
+ if self._network_os:
+ self._terminal = terminal_loader.get(self._network_os, self)
+ if not self._terminal:
+ raise AnsibleConnectionFailure(
+ "network os %s is not supported" % self._network_os
+ )
+
+ self.cliconf = cliconf_loader.get(self._network_os, self)
+ if self.cliconf:
+ self._sub_plugin = {
+ "type": "cliconf",
+ "name": self.cliconf._load_name,
+ "obj": self.cliconf,
+ }
+ self.queue_message(
+ "vvvv",
+ "loaded cliconf plugin %s from path %s for network_os %s"
+ % (
+ self.cliconf._load_name,
+ self.cliconf._original_path,
+ self._network_os,
+ ),
+ )
+ else:
+ self.queue_message(
+ "vvvv",
+ "unable to load cliconf for network_os %s"
+ % self._network_os,
+ )
+ else:
+ raise AnsibleConnectionFailure(
+ "Unable to automatically determine host network os. Please "
+ "manually configure ansible_network_os value for this host"
+ )
+ self.queue_message("log", "network_os is set to %s" % self._network_os)
+
+ @property
+ def paramiko_conn(self):
+ if self._paramiko_conn is None:
+ self._paramiko_conn = connection_loader.get(
+ "paramiko", self._play_context, "/dev/null"
+ )
+ self._paramiko_conn.set_options(
+ direct={
+ "look_for_keys": not bool(
+ self._play_context.password
+ and not self._play_context.private_key_file
+ )
+ }
+ )
+ return self._paramiko_conn
+
+ def _get_log_channel(self):
+ name = "p=%s u=%s | " % (os.getpid(), getpass.getuser())
+ name += "paramiko [%s]" % self._play_context.remote_addr
+ return name
+
+ @ensure_connect
+ def get_prompt(self):
+ """Returns the current prompt from the device"""
+ return self._matched_prompt
+
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ # this try..except block is just to handle the transition to supporting
+ # network_cli as a toplevel connection. Once connection=local is gone,
+ # this block can be removed as well and all calls passed directly to
+ # the local connection
+ if self._ssh_shell:
+ try:
+ cmd = json.loads(to_text(cmd, errors="surrogate_or_strict"))
+ kwargs = {
+ "command": to_bytes(
+ cmd["command"], errors="surrogate_or_strict"
+ )
+ }
+ for key in (
+ "prompt",
+ "answer",
+ "sendonly",
+ "newline",
+ "prompt_retry_check",
+ ):
+ if cmd.get(key) is True or cmd.get(key) is False:
+ kwargs[key] = cmd[key]
+ elif cmd.get(key) is not None:
+ kwargs[key] = to_bytes(
+ cmd[key], errors="surrogate_or_strict"
+ )
+ return self.send(**kwargs)
+ except ValueError:
+ cmd = to_bytes(cmd, errors="surrogate_or_strict")
+ return self.send(command=cmd)
+
+ else:
+ return super(Connection, self).exec_command(cmd, in_data, sudoable)
+
+ def update_play_context(self, pc_data):
+ """Updates the play context information for the connection"""
+ pc_data = to_bytes(pc_data)
+ if PY3:
+ pc_data = cPickle.loads(pc_data, encoding="bytes")
+ else:
+ pc_data = cPickle.loads(pc_data)
+ play_context = PlayContext()
+ play_context.deserialize(pc_data)
+
+ self.queue_message("vvvv", "updating play_context for connection")
+ if self._play_context.become ^ play_context.become:
+ if play_context.become is True:
+ auth_pass = play_context.become_pass
+ self._terminal.on_become(passwd=auth_pass)
+ self.queue_message("vvvv", "authorizing connection")
+ else:
+ self._terminal.on_unbecome()
+ self.queue_message("vvvv", "deauthorizing connection")
+
+ self._play_context = play_context
+
+ if hasattr(self, "reset_history"):
+ self.reset_history()
+ if hasattr(self, "disable_response_logging"):
+ self.disable_response_logging()
+
+ def set_check_prompt(self, task_uuid):
+ self._check_prompt = task_uuid
+
+ def update_cli_prompt_context(self):
+ # set cli prompt context at the start of new task run only
+ if self._check_prompt and self._task_uuid != self._check_prompt:
+ self._task_uuid, self._check_prompt = self._check_prompt, False
+ self.set_cli_prompt_context()
+
+ def _connect(self):
+ """
+ Connects to the remote device and starts the terminal
+ """
+ if not self.connected:
+ self.paramiko_conn._set_log_channel(self._get_log_channel())
+ self.paramiko_conn.force_persistence = self.force_persistence
+
+ command_timeout = self.get_option("persistent_command_timeout")
+ max_pause = min(
+ [
+ self.get_option("persistent_connect_timeout"),
+ command_timeout,
+ ]
+ )
+ retries = self.get_option("network_cli_retries")
+ total_pause = 0
+
+ for attempt in range(retries + 1):
+ try:
+ ssh = self.paramiko_conn._connect()
+ break
+ except Exception as e:
+ pause = 2 ** (attempt + 1)
+ if attempt == retries or total_pause >= max_pause:
+ raise AnsibleConnectionFailure(
+ to_text(e, errors="surrogate_or_strict")
+ )
+ else:
+ msg = (
+ u"network_cli_retry: attempt: %d, caught exception(%s), "
+ u"pausing for %d seconds"
+ % (
+ attempt + 1,
+ to_text(e, errors="surrogate_or_strict"),
+ pause,
+ )
+ )
+
+ self.queue_message("vv", msg)
+ time.sleep(pause)
+ total_pause += pause
+ continue
+
+ self.queue_message("vvvv", "ssh connection done, setting terminal")
+ self._connected = True
+
+ self._ssh_shell = ssh.ssh.invoke_shell()
+ self._ssh_shell.settimeout(command_timeout)
+
+ self.queue_message(
+ "vvvv",
+ "loaded terminal plugin for network_os %s" % self._network_os,
+ )
+
+ terminal_initial_prompt = (
+ self.get_option("terminal_initial_prompt")
+ or self._terminal.terminal_initial_prompt
+ )
+ terminal_initial_answer = (
+ self.get_option("terminal_initial_answer")
+ or self._terminal.terminal_initial_answer
+ )
+ newline = (
+ self.get_option("terminal_inital_prompt_newline")
+ or self._terminal.terminal_inital_prompt_newline
+ )
+ check_all = (
+ self.get_option("terminal_initial_prompt_checkall") or False
+ )
+
+ self.receive(
+ prompts=terminal_initial_prompt,
+ answer=terminal_initial_answer,
+ newline=newline,
+ check_all=check_all,
+ )
+
+ if self._play_context.become:
+ self.queue_message("vvvv", "firing event: on_become")
+ auth_pass = self._play_context.become_pass
+ self._terminal.on_become(passwd=auth_pass)
+
+ self.queue_message("vvvv", "firing event: on_open_shell()")
+ self._terminal.on_open_shell()
+
+ self.queue_message(
+ "vvvv", "ssh connection has completed successfully"
+ )
+
+ return self
+
+ def close(self):
+ """
+ Close the active connection to the device
+ """
+ # only close the connection if its connected.
+ if self._connected:
+ self.queue_message("debug", "closing ssh connection to device")
+ if self._ssh_shell:
+ self.queue_message("debug", "firing event: on_close_shell()")
+ self._terminal.on_close_shell()
+ self._ssh_shell.close()
+ self._ssh_shell = None
+ self.queue_message("debug", "cli session is now closed")
+
+ self.paramiko_conn.close()
+ self._paramiko_conn = None
+ self.queue_message(
+ "debug", "ssh connection has been closed successfully"
+ )
+ super(Connection, self).close()
+
+ def receive(
+ self,
+ command=None,
+ prompts=None,
+ answer=None,
+ newline=True,
+ prompt_retry_check=False,
+ check_all=False,
+ ):
+ """
+ Handles receiving of output from command
+ """
+ self._matched_prompt = None
+ self._matched_cmd_prompt = None
+ recv = BytesIO()
+ handled = False
+ command_prompt_matched = False
+ matched_prompt_window = window_count = 0
+
+ # set terminal regex values for command prompt and errors in response
+ self._terminal_stderr_re = self._get_terminal_std_re(
+ "terminal_stderr_re"
+ )
+ self._terminal_stdout_re = self._get_terminal_std_re(
+ "terminal_stdout_re"
+ )
+
+ cache_socket_timeout = self._ssh_shell.gettimeout()
+ command_timeout = self.get_option("persistent_command_timeout")
+ self._validate_timeout_value(
+ command_timeout, "persistent_command_timeout"
+ )
+ if cache_socket_timeout != command_timeout:
+ self._ssh_shell.settimeout(command_timeout)
+
+ buffer_read_timeout = self.get_option("persistent_buffer_read_timeout")
+ self._validate_timeout_value(
+ buffer_read_timeout, "persistent_buffer_read_timeout"
+ )
+
+ self._log_messages("command: %s" % command)
+ while True:
+ if command_prompt_matched:
+ try:
+ signal.signal(
+ signal.SIGALRM, self._handle_buffer_read_timeout
+ )
+ signal.setitimer(signal.ITIMER_REAL, buffer_read_timeout)
+ data = self._ssh_shell.recv(256)
+ signal.alarm(0)
+ self._log_messages(
+ "response-%s: %s" % (window_count + 1, data)
+ )
+ # if data is still received on channel it indicates the prompt string
+ # is wrongly matched in between response chunks, continue to read
+ # remaining response.
+ command_prompt_matched = False
+
+ # restart command_timeout timer
+ signal.signal(signal.SIGALRM, self._handle_command_timeout)
+ signal.alarm(command_timeout)
+
+ except AnsibleCmdRespRecv:
+ # reset socket timeout to global timeout
+ self._ssh_shell.settimeout(cache_socket_timeout)
+ return self._command_response
+ else:
+ data = self._ssh_shell.recv(256)
+ self._log_messages(
+ "response-%s: %s" % (window_count + 1, data)
+ )
+ # when a channel stream is closed, received data will be empty
+ if not data:
+ break
+
+ recv.write(data)
+ offset = recv.tell() - 256 if recv.tell() > 256 else 0
+ recv.seek(offset)
+
+ window = self._strip(recv.read())
+ self._last_recv_window = window
+ window_count += 1
+
+ if prompts and not handled:
+ handled = self._handle_prompt(
+ window, prompts, answer, newline, False, check_all
+ )
+ matched_prompt_window = window_count
+ elif (
+ prompts
+ and handled
+ and prompt_retry_check
+ and matched_prompt_window + 1 == window_count
+ ):
+ # check again even when handled, if same prompt repeats in next window
+ # (like in the case of a wrong enable password, etc) indicates
+ # value of answer is wrong, report this as error.
+ if self._handle_prompt(
+ window,
+ prompts,
+ answer,
+ newline,
+ prompt_retry_check,
+ check_all,
+ ):
+ raise AnsibleConnectionFailure(
+ "For matched prompt '%s', answer is not valid"
+ % self._matched_cmd_prompt
+ )
+
+ if self._find_prompt(window):
+ self._last_response = recv.getvalue()
+ resp = self._strip(self._last_response)
+ self._command_response = self._sanitize(resp, command)
+ if buffer_read_timeout == 0.0:
+ # reset socket timeout to global timeout
+ self._ssh_shell.settimeout(cache_socket_timeout)
+ return self._command_response
+ else:
+ command_prompt_matched = True
+
+ @ensure_connect
+ def send(
+ self,
+ command,
+ prompt=None,
+ answer=None,
+ newline=True,
+ sendonly=False,
+ prompt_retry_check=False,
+ check_all=False,
+ ):
+ """
+ Sends the command to the device in the opened shell
+ """
+ if check_all:
+ prompt_len = len(to_list(prompt))
+ answer_len = len(to_list(answer))
+ if prompt_len != answer_len:
+ raise AnsibleConnectionFailure(
+ "Number of prompts (%s) is not same as that of answers (%s)"
+ % (prompt_len, answer_len)
+ )
+ try:
+ cmd = b"%s\r" % command
+ self._history.append(cmd)
+ self._ssh_shell.sendall(cmd)
+ self._log_messages("send command: %s" % cmd)
+ if sendonly:
+ return
+ response = self.receive(
+ command, prompt, answer, newline, prompt_retry_check, check_all
+ )
+ return to_text(response, errors="surrogate_then_replace")
+ except (socket.timeout, AttributeError):
+ self.queue_message("error", traceback.format_exc())
+ raise AnsibleConnectionFailure(
+ "timeout value %s seconds reached while trying to send command: %s"
+ % (self._ssh_shell.gettimeout(), command.strip())
+ )
+
+ def _handle_buffer_read_timeout(self, signum, frame):
+ self.queue_message(
+ "vvvv",
+ "Response received, triggered 'persistent_buffer_read_timeout' timer of %s seconds"
+ % self.get_option("persistent_buffer_read_timeout"),
+ )
+ raise AnsibleCmdRespRecv()
+
+ def _handle_command_timeout(self, signum, frame):
+ msg = (
+ "command timeout triggered, timeout value is %s secs.\nSee the timeout setting options in the Network Debug and Troubleshooting Guide."
+ % self.get_option("persistent_command_timeout")
+ )
+ self.queue_message("log", msg)
+ raise AnsibleConnectionFailure(msg)
+
+ def _strip(self, data):
+ """
+ Removes ANSI codes from device response
+ """
+ for regex in self._terminal.ansi_re:
+ data = regex.sub(b"", data)
+ return data
+
+ def _handle_prompt(
+ self,
+ resp,
+ prompts,
+ answer,
+ newline,
+ prompt_retry_check=False,
+ check_all=False,
+ ):
+ """
+ Matches the command prompt and responds
+
+ :arg resp: Byte string containing the raw response from the remote
+ :arg prompts: Sequence of byte strings that we consider prompts for input
+ :arg answer: Sequence of Byte string to send back to the remote if we find a prompt.
+ A carriage return is automatically appended to this string.
+ :param prompt_retry_check: Bool value for trying to detect more prompts
+ :param check_all: Bool value to indicate if all the values in prompt sequence should be matched or any one of
+ given prompt.
+ :returns: True if a prompt was found in ``resp``. If check_all is True
+ will True only after all the prompt in the prompts list are matched. False otherwise.
+ """
+ single_prompt = False
+ if not isinstance(prompts, list):
+ prompts = [prompts]
+ single_prompt = True
+ if not isinstance(answer, list):
+ answer = [answer]
+ prompts_regex = [re.compile(to_bytes(r), re.I) for r in prompts]
+ for index, regex in enumerate(prompts_regex):
+ match = regex.search(resp)
+ if match:
+ self._matched_cmd_prompt = match.group()
+ self._log_messages(
+ "matched command prompt: %s" % self._matched_cmd_prompt
+ )
+
+ # if prompt_retry_check is enabled to check if same prompt is
+ # repeated don't send answer again.
+ if not prompt_retry_check:
+ prompt_answer = (
+ answer[index] if len(answer) > index else answer[0]
+ )
+ self._ssh_shell.sendall(b"%s" % prompt_answer)
+ if newline:
+ self._ssh_shell.sendall(b"\r")
+ prompt_answer += b"\r"
+ self._log_messages(
+ "matched command prompt answer: %s" % prompt_answer
+ )
+ if check_all and prompts and not single_prompt:
+ prompts.pop(0)
+ answer.pop(0)
+ return False
+ return True
+ return False
+
+ def _sanitize(self, resp, command=None):
+ """
+ Removes elements from the response before returning to the caller
+ """
+ cleaned = []
+ for line in resp.splitlines():
+ if command and line.strip() == command.strip():
+ continue
+
+ for prompt in self._matched_prompt.strip().splitlines():
+ if prompt.strip() in line:
+ break
+ else:
+ cleaned.append(line)
+ return b"\n".join(cleaned).strip()
+
+ def _find_prompt(self, response):
+ """Searches the buffered response for a matching command prompt
+ """
+ errored_response = None
+ is_error_message = False
+
+ for regex in self._terminal_stderr_re:
+ if regex.search(response):
+ is_error_message = True
+
+ # Check if error response ends with command prompt if not
+ # receive it buffered prompt
+ for regex in self._terminal_stdout_re:
+ match = regex.search(response)
+ if match:
+ errored_response = response
+ self._matched_pattern = regex.pattern
+ self._matched_prompt = match.group()
+ self._log_messages(
+ "matched error regex '%s' from response '%s'"
+ % (self._matched_pattern, errored_response)
+ )
+ break
+
+ if not is_error_message:
+ for regex in self._terminal_stdout_re:
+ match = regex.search(response)
+ if match:
+ self._matched_pattern = regex.pattern
+ self._matched_prompt = match.group()
+ self._log_messages(
+ "matched cli prompt '%s' with regex '%s' from response '%s'"
+ % (
+ self._matched_prompt,
+ self._matched_pattern,
+ response,
+ )
+ )
+ if not errored_response:
+ return True
+
+ if errored_response:
+ raise AnsibleConnectionFailure(errored_response)
+
+ return False
+
+ def _validate_timeout_value(self, timeout, timer_name):
+ if timeout < 0:
+ raise AnsibleConnectionFailure(
+ "'%s' timer value '%s' is invalid, value should be greater than or equal to zero."
+ % (timer_name, timeout)
+ )
+
+ def transport_test(self, connect_timeout):
+ """This method enables wait_for_connection to work.
+
+ As it is used by wait_for_connection, it is called by that module's action plugin,
+ which is on the controller process, which means that nothing done on this instance
+ should impact the actual persistent connection... this check is for informational
+ purposes only and should be properly cleaned up.
+ """
+
+ # Force a fresh connect if for some reason we have connected before.
+ self.close()
+ self._connect()
+ self.close()
+
+ def _get_terminal_std_re(self, option):
+ terminal_std_option = self.get_option(option)
+ terminal_std_re = []
+
+ if terminal_std_option:
+ for item in terminal_std_option:
+ if "pattern" not in item:
+ raise AnsibleConnectionFailure(
+ "'pattern' is a required key for option '%s',"
+ " received option value is %s" % (option, item)
+ )
+ pattern = br"%s" % to_bytes(item["pattern"])
+ flag = item.get("flags", 0)
+ if flag:
+ flag = getattr(re, flag.split(".")[1])
+ terminal_std_re.append(re.compile(pattern, flag))
+ else:
+ # To maintain backward compatibility
+ terminal_std_re = getattr(self._terminal, option)
+
+ return terminal_std_re
diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/persistent.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/persistent.py
new file mode 100644
index 00000000..b29b4872
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/persistent.py
@@ -0,0 +1,97 @@
+# 2017 Red Hat Inc.
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = """author: Ansible Core Team
+connection: persistent
+short_description: Use a persistent unix socket for connection
+description:
+- This is a helper plugin to allow making other connections persistent.
+options:
+ persistent_command_timeout:
+ type: int
+ description:
+ - Configures, in seconds, the amount of time to wait for a command to return from
+ the remote device. If this timer is exceeded before the command returns, the
+ connection plugin will raise an exception and close
+ default: 10
+ ini:
+ - section: persistent_connection
+ key: command_timeout
+ env:
+ - name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT
+ vars:
+ - name: ansible_command_timeout
+"""
+from ansible.executor.task_executor import start_connection
+from ansible.plugins.connection import ConnectionBase
+from ansible.module_utils._text import to_text
+from ansible.module_utils.connection import Connection as SocketConnection
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class Connection(ConnectionBase):
+ """ Local based connections """
+
+ transport = "ansible.netcommon.persistent"
+ has_pipelining = False
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(
+ play_context, new_stdin, *args, **kwargs
+ )
+ self._task_uuid = to_text(kwargs.get("task_uuid", ""))
+
+ def _connect(self):
+ self._connected = True
+ return self
+
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ display.vvvv(
+ "exec_command(), socket_path=%s" % self.socket_path,
+ host=self._play_context.remote_addr,
+ )
+ connection = SocketConnection(self.socket_path)
+ out = connection.exec_command(cmd, in_data=in_data, sudoable=sudoable)
+ return 0, out, ""
+
+ def put_file(self, in_path, out_path):
+ pass
+
+ def fetch_file(self, in_path, out_path):
+ pass
+
+ def close(self):
+ self._connected = False
+
+ def run(self):
+ """Returns the path of the persistent connection socket.
+
+ Attempts to ensure (within playcontext.timeout seconds) that the
+ socket path exists. If the path exists (or the timeout has expired),
+ returns the socket path.
+ """
+ display.vvvv(
+ "starting connection from persistent connection plugin",
+ host=self._play_context.remote_addr,
+ )
+ variables = {
+ "ansible_command_timeout": self.get_option(
+ "persistent_command_timeout"
+ )
+ }
+ socket_path = start_connection(
+ self._play_context, variables, self._task_uuid
+ )
+ display.vvvv(
+ "local domain socket path is %s" % socket_path,
+ host=self._play_context.remote_addr,
+ )
+ setattr(self, "_socket_path", socket_path)
+ return socket_path
diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/netconf.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/netconf.py
new file mode 100644
index 00000000..8789075a
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/netconf.py
@@ -0,0 +1,66 @@
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+class ModuleDocFragment(object):
+
+ # Standard files documentation fragment
+ DOCUMENTATION = r"""options:
+ host:
+ description:
+ - Specifies the DNS host name or address for connecting to the remote device over
+ the specified transport. The value of host is used as the destination address
+ for the transport.
+ type: str
+ required: true
+ port:
+ description:
+ - Specifies the port to use when building the connection to the remote device. The
+ port value will default to port 830.
+ type: int
+ default: 830
+ username:
+ description:
+ - Configures the username to use to authenticate the connection to the remote
+ device. This value is used to authenticate the SSH session. If the value is
+ not specified in the task, the value of environment variable C(ANSIBLE_NET_USERNAME)
+ will be used instead.
+ type: str
+ password:
+ description:
+ - Specifies the password to use to authenticate the connection to the remote device. This
+ value is used to authenticate the SSH session. If the value is not specified
+ in the task, the value of environment variable C(ANSIBLE_NET_PASSWORD) will
+ be used instead.
+ type: str
+ timeout:
+ description:
+ - Specifies the timeout in seconds for communicating with the network device for
+ either connecting or sending commands. If the timeout is exceeded before the
+ operation is completed, the module will error.
+ type: int
+ default: 10
+ ssh_keyfile:
+ description:
+ - Specifies the SSH key to use to authenticate the connection to the remote device. This
+ value is the path to the key used to authenticate the SSH session. If the value
+ is not specified in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE)
+ will be used instead.
+ type: path
+ hostkey_verify:
+ description:
+ - If set to C(yes), the ssh host key of the device must match a ssh key present
+ on the host if set to C(no), the ssh host key of the device is not checked.
+ type: bool
+ default: true
+ look_for_keys:
+ description:
+ - Enables looking in the usual locations for the ssh keys (e.g. :file:`~/.ssh/id_*`)
+ type: bool
+ default: true
+notes:
+- For information on using netconf see the :ref:`Platform Options guide using Netconf<netconf_enabled_platform_options>`
+- For more information on using Ansible to manage network devices see the :ref:`Ansible
+ Network Guide <network_guide>`
+"""
diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/network_agnostic.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/network_agnostic.py
new file mode 100644
index 00000000..ad65f6ef
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/network_agnostic.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019 Ansible, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+class ModuleDocFragment(object):
+
+ # Standard files documentation fragment
+ DOCUMENTATION = r"""options: {}
+notes:
+- This module is supported on C(ansible_network_os) network platforms. See the :ref:`Network
+ Platform Options <platform_options>` for details.
+"""
diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/filter/ipaddr.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/filter/ipaddr.py
new file mode 100644
index 00000000..6ae47a73
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/filter/ipaddr.py
@@ -0,0 +1,1186 @@
+# (c) 2014, Maciej Delmanowski <drybjed@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from functools import partial
+import types
+
+try:
+ import netaddr
+except ImportError:
+ # in this case, we'll make the filters return error messages (see bottom)
+ netaddr = None
+else:
+
+ class mac_linux(netaddr.mac_unix):
+ pass
+
+ mac_linux.word_fmt = "%.2x"
+
+from ansible import errors
+
+
+# ---- IP address and network query helpers ----
+def _empty_ipaddr_query(v, vtype):
+ # We don't have any query to process, so just check what type the user
+ # expects, and return the IP address in a correct format
+ if v:
+ if vtype == "address":
+ return str(v.ip)
+ elif vtype == "network":
+ return str(v)
+
+
+def _first_last(v):
+ if v.size == 2:
+ first_usable = int(netaddr.IPAddress(v.first))
+ last_usable = int(netaddr.IPAddress(v.last))
+ return first_usable, last_usable
+ elif v.size > 1:
+ first_usable = int(netaddr.IPAddress(v.first + 1))
+ last_usable = int(netaddr.IPAddress(v.last - 1))
+ return first_usable, last_usable
+
+
+def _6to4_query(v, vtype, value):
+ if v.version == 4:
+
+ if v.size == 1:
+ ipconv = str(v.ip)
+ elif v.size > 1:
+ if v.ip != v.network:
+ ipconv = str(v.ip)
+ else:
+ ipconv = False
+
+ if ipaddr(ipconv, "public"):
+ numbers = list(map(int, ipconv.split(".")))
+
+ try:
+ return "2002:{:02x}{:02x}:{:02x}{:02x}::1/48".format(*numbers)
+ except Exception:
+ return False
+
+ elif v.version == 6:
+ if vtype == "address":
+ if ipaddr(str(v), "2002::/16"):
+ return value
+ elif vtype == "network":
+ if v.ip != v.network:
+ if ipaddr(str(v.ip), "2002::/16"):
+ return value
+ else:
+ return False
+
+
+def _ip_query(v):
+ if v.size == 1:
+ return str(v.ip)
+ if v.size > 1:
+ # /31 networks in netaddr have no broadcast address
+ if v.ip != v.network or not v.broadcast:
+ return str(v.ip)
+
+
+def _gateway_query(v):
+ if v.size > 1:
+ if v.ip != v.network:
+ return str(v.ip) + "/" + str(v.prefixlen)
+
+
+def _address_prefix_query(v):
+ if v.size > 1:
+ if v.ip != v.network:
+ return str(v.ip) + "/" + str(v.prefixlen)
+
+
+def _bool_ipaddr_query(v):
+ if v:
+ return True
+
+
+def _broadcast_query(v):
+ if v.size > 2:
+ return str(v.broadcast)
+
+
+def _cidr_query(v):
+ return str(v)
+
+
+def _cidr_lookup_query(v, iplist, value):
+ try:
+ if v in iplist:
+ return value
+ except Exception:
+ return False
+
+
+def _first_usable_query(v, vtype):
+ if vtype == "address":
+ "Does it make sense to raise an error"
+ raise errors.AnsibleFilterError("Not a network address")
+ elif vtype == "network":
+ if v.size == 2:
+ return str(netaddr.IPAddress(int(v.network)))
+ elif v.size > 1:
+ return str(netaddr.IPAddress(int(v.network) + 1))
+
+
+def _host_query(v):
+ if v.size == 1:
+ return str(v)
+ elif v.size > 1:
+ if v.ip != v.network:
+ return str(v.ip) + "/" + str(v.prefixlen)
+
+
+def _hostmask_query(v):
+ return str(v.hostmask)
+
+
+def _int_query(v, vtype):
+ if vtype == "address":
+ return int(v.ip)
+ elif vtype == "network":
+ return str(int(v.ip)) + "/" + str(int(v.prefixlen))
+
+
+def _ip_prefix_query(v):
+ if v.size == 2:
+ return str(v.ip) + "/" + str(v.prefixlen)
+ elif v.size > 1:
+ if v.ip != v.network:
+ return str(v.ip) + "/" + str(v.prefixlen)
+
+
+def _ip_netmask_query(v):
+ if v.size == 2:
+ return str(v.ip) + " " + str(v.netmask)
+ elif v.size > 1:
+ if v.ip != v.network:
+ return str(v.ip) + " " + str(v.netmask)
+
+
+"""
+def _ip_wildcard_query(v):
+ if v.size == 2:
+ return str(v.ip) + ' ' + str(v.hostmask)
+ elif v.size > 1:
+ if v.ip != v.network:
+ return str(v.ip) + ' ' + str(v.hostmask)
+"""
+
+
+def _ipv4_query(v, value):
+ if v.version == 6:
+ try:
+ return str(v.ipv4())
+ except Exception:
+ return False
+ else:
+ return value
+
+
+def _ipv6_query(v, value):
+ if v.version == 4:
+ return str(v.ipv6())
+ else:
+ return value
+
+
+def _last_usable_query(v, vtype):
+ if vtype == "address":
+ "Does it make sense to raise an error"
+ raise errors.AnsibleFilterError("Not a network address")
+ elif vtype == "network":
+ if v.size > 1:
+ first_usable, last_usable = _first_last(v)
+ return str(netaddr.IPAddress(last_usable))
+
+
+def _link_local_query(v, value):
+ v_ip = netaddr.IPAddress(str(v.ip))
+ if v.version == 4:
+ if ipaddr(str(v_ip), "169.254.0.0/24"):
+ return value
+
+ elif v.version == 6:
+ if ipaddr(str(v_ip), "fe80::/10"):
+ return value
+
+
+def _loopback_query(v, value):
+ v_ip = netaddr.IPAddress(str(v.ip))
+ if v_ip.is_loopback():
+ return value
+
+
+def _multicast_query(v, value):
+ if v.is_multicast():
+ return value
+
+
+def _net_query(v):
+ if v.size > 1:
+ if v.ip == v.network:
+ return str(v.network) + "/" + str(v.prefixlen)
+
+
+def _netmask_query(v):
+ return str(v.netmask)
+
+
+def _network_query(v):
+ """Return the network of a given IP or subnet"""
+ return str(v.network)
+
+
+def _network_id_query(v):
+ """Return the network of a given IP or subnet"""
+ return str(v.network)
+
+
+def _network_netmask_query(v):
+ return str(v.network) + " " + str(v.netmask)
+
+
+def _network_wildcard_query(v):
+ return str(v.network) + " " + str(v.hostmask)
+
+
+def _next_usable_query(v, vtype):
+ if vtype == "address":
+ "Does it make sense to raise an error"
+ raise errors.AnsibleFilterError("Not a network address")
+ elif vtype == "network":
+ if v.size > 1:
+ first_usable, last_usable = _first_last(v)
+ next_ip = int(netaddr.IPAddress(int(v.ip) + 1))
+ if next_ip >= first_usable and next_ip <= last_usable:
+ return str(netaddr.IPAddress(int(v.ip) + 1))
+
+
+def _peer_query(v, vtype):
+ if vtype == "address":
+ raise errors.AnsibleFilterError("Not a network address")
+ elif vtype == "network":
+ if v.size == 2:
+ return str(netaddr.IPAddress(int(v.ip) ^ 1))
+ if v.size == 4:
+ if int(v.ip) % 4 == 0:
+ raise errors.AnsibleFilterError(
+ "Network address of /30 has no peer"
+ )
+ if int(v.ip) % 4 == 3:
+ raise errors.AnsibleFilterError(
+ "Broadcast address of /30 has no peer"
+ )
+ return str(netaddr.IPAddress(int(v.ip) ^ 3))
+ raise errors.AnsibleFilterError("Not a point-to-point network")
+
+
+def _prefix_query(v):
+ return int(v.prefixlen)
+
+
+def _previous_usable_query(v, vtype):
+ if vtype == "address":
+ "Does it make sense to raise an error"
+ raise errors.AnsibleFilterError("Not a network address")
+ elif vtype == "network":
+ if v.size > 1:
+ first_usable, last_usable = _first_last(v)
+ previous_ip = int(netaddr.IPAddress(int(v.ip) - 1))
+ if previous_ip >= first_usable and previous_ip <= last_usable:
+ return str(netaddr.IPAddress(int(v.ip) - 1))
+
+
+def _private_query(v, value):
+ if v.is_private():
+ return value
+
+
+def _public_query(v, value):
+ v_ip = netaddr.IPAddress(str(v.ip))
+ if (
+ v_ip.is_unicast()
+ and not v_ip.is_private()
+ and not v_ip.is_loopback()
+ and not v_ip.is_netmask()
+ and not v_ip.is_hostmask()
+ ):
+ return value
+
+
+def _range_usable_query(v, vtype):
+ if vtype == "address":
+ "Does it make sense to raise an error"
+ raise errors.AnsibleFilterError("Not a network address")
+ elif vtype == "network":
+ if v.size > 1:
+ first_usable, last_usable = _first_last(v)
+ first_usable = str(netaddr.IPAddress(first_usable))
+ last_usable = str(netaddr.IPAddress(last_usable))
+ return "{0}-{1}".format(first_usable, last_usable)
+
+
+def _revdns_query(v):
+ v_ip = netaddr.IPAddress(str(v.ip))
+ return v_ip.reverse_dns
+
+
+def _size_query(v):
+ return v.size
+
+
+def _size_usable_query(v):
+ if v.size == 1:
+ return 0
+ elif v.size == 2:
+ return 2
+ return v.size - 2
+
+
+def _subnet_query(v):
+ return str(v.cidr)
+
+
+def _type_query(v):
+ if v.size == 1:
+ return "address"
+ if v.size > 1:
+ if v.ip != v.network:
+ return "address"
+ else:
+ return "network"
+
+
+def _unicast_query(v, value):
+ if v.is_unicast():
+ return value
+
+
+def _version_query(v):
+ return v.version
+
+
+def _wrap_query(v, vtype, value):
+ if v.version == 6:
+ if vtype == "address":
+ return "[" + str(v.ip) + "]"
+ elif vtype == "network":
+ return "[" + str(v.ip) + "]/" + str(v.prefixlen)
+ else:
+ return value
+
+
+# ---- HWaddr query helpers ----
+def _bare_query(v):
+ v.dialect = netaddr.mac_bare
+ return str(v)
+
+
+def _bool_hwaddr_query(v):
+ if v:
+ return True
+
+
+def _int_hwaddr_query(v):
+ return int(v)
+
+
+def _cisco_query(v):
+ v.dialect = netaddr.mac_cisco
+ return str(v)
+
+
+def _empty_hwaddr_query(v, value):
+ if v:
+ return value
+
+
+def _linux_query(v):
+ v.dialect = mac_linux
+ return str(v)
+
+
+def _postgresql_query(v):
+ v.dialect = netaddr.mac_pgsql
+ return str(v)
+
+
+def _unix_query(v):
+ v.dialect = netaddr.mac_unix
+ return str(v)
+
+
+def _win_query(v):
+ v.dialect = netaddr.mac_eui48
+ return str(v)
+
+
+# ---- IP address and network filters ----
+
+# Returns a minified list of subnets or a single subnet that spans all of
+# the inputs.
+def cidr_merge(value, action="merge"):
+ if not hasattr(value, "__iter__"):
+ raise errors.AnsibleFilterError(
+ "cidr_merge: expected iterable, got " + repr(value)
+ )
+
+ if action == "merge":
+ try:
+ return [str(ip) for ip in netaddr.cidr_merge(value)]
+ except Exception as e:
+ raise errors.AnsibleFilterError(
+ "cidr_merge: error in netaddr:\n%s" % e
+ )
+
+ elif action == "span":
+ # spanning_cidr needs at least two values
+ if len(value) == 0:
+ return None
+ elif len(value) == 1:
+ try:
+ return str(netaddr.IPNetwork(value[0]))
+ except Exception as e:
+ raise errors.AnsibleFilterError(
+ "cidr_merge: error in netaddr:\n%s" % e
+ )
+ else:
+ try:
+ return str(netaddr.spanning_cidr(value))
+ except Exception as e:
+ raise errors.AnsibleFilterError(
+ "cidr_merge: error in netaddr:\n%s" % e
+ )
+
+ else:
+ raise errors.AnsibleFilterError(
+ "cidr_merge: invalid action '%s'" % action
+ )
+
+
+def ipaddr(value, query="", version=False, alias="ipaddr"):
+ """ Check if string is an IP address or network and filter it """
+
+ query_func_extra_args = {
+ "": ("vtype",),
+ "6to4": ("vtype", "value"),
+ "cidr_lookup": ("iplist", "value"),
+ "first_usable": ("vtype",),
+ "int": ("vtype",),
+ "ipv4": ("value",),
+ "ipv6": ("value",),
+ "last_usable": ("vtype",),
+ "link-local": ("value",),
+ "loopback": ("value",),
+ "lo": ("value",),
+ "multicast": ("value",),
+ "next_usable": ("vtype",),
+ "peer": ("vtype",),
+ "previous_usable": ("vtype",),
+ "private": ("value",),
+ "public": ("value",),
+ "unicast": ("value",),
+ "range_usable": ("vtype",),
+ "wrap": ("vtype", "value"),
+ }
+
+ query_func_map = {
+ "": _empty_ipaddr_query,
+ "6to4": _6to4_query,
+ "address": _ip_query,
+ "address/prefix": _address_prefix_query, # deprecate
+ "bool": _bool_ipaddr_query,
+ "broadcast": _broadcast_query,
+ "cidr": _cidr_query,
+ "cidr_lookup": _cidr_lookup_query,
+ "first_usable": _first_usable_query,
+ "gateway": _gateway_query, # deprecate
+ "gw": _gateway_query, # deprecate
+ "host": _host_query,
+ "host/prefix": _address_prefix_query, # deprecate
+ "hostmask": _hostmask_query,
+ "hostnet": _gateway_query, # deprecate
+ "int": _int_query,
+ "ip": _ip_query,
+ "ip/prefix": _ip_prefix_query,
+ "ip_netmask": _ip_netmask_query,
+ # 'ip_wildcard': _ip_wildcard_query, built then could not think of use case
+ "ipv4": _ipv4_query,
+ "ipv6": _ipv6_query,
+ "last_usable": _last_usable_query,
+ "link-local": _link_local_query,
+ "lo": _loopback_query,
+ "loopback": _loopback_query,
+ "multicast": _multicast_query,
+ "net": _net_query,
+ "next_usable": _next_usable_query,
+ "netmask": _netmask_query,
+ "network": _network_query,
+ "network_id": _network_id_query,
+ "network/prefix": _subnet_query,
+ "network_netmask": _network_netmask_query,
+ "network_wildcard": _network_wildcard_query,
+ "peer": _peer_query,
+ "prefix": _prefix_query,
+ "previous_usable": _previous_usable_query,
+ "private": _private_query,
+ "public": _public_query,
+ "range_usable": _range_usable_query,
+ "revdns": _revdns_query,
+ "router": _gateway_query, # deprecate
+ "size": _size_query,
+ "size_usable": _size_usable_query,
+ "subnet": _subnet_query,
+ "type": _type_query,
+ "unicast": _unicast_query,
+ "v4": _ipv4_query,
+ "v6": _ipv6_query,
+ "version": _version_query,
+ "wildcard": _hostmask_query,
+ "wrap": _wrap_query,
+ }
+
+ vtype = None
+
+ if not value:
+ return False
+
+ elif value is True:
+ return False
+
+ # Check if value is a list and parse each element
+ elif isinstance(value, (list, tuple, types.GeneratorType)):
+
+ _ret = []
+ for element in value:
+ if ipaddr(element, str(query), version):
+ _ret.append(ipaddr(element, str(query), version))
+
+ if _ret:
+ return _ret
+ else:
+ return list()
+
+ # Check if value is a number and convert it to an IP address
+ elif str(value).isdigit():
+
+ # We don't know what IP version to assume, so let's check IPv4 first,
+ # then IPv6
+ try:
+ if (not version) or (version and version == 4):
+ v = netaddr.IPNetwork("0.0.0.0/0")
+ v.value = int(value)
+ v.prefixlen = 32
+ elif version and version == 6:
+ v = netaddr.IPNetwork("::/0")
+ v.value = int(value)
+ v.prefixlen = 128
+
+ # IPv4 didn't work the first time, so it definitely has to be IPv6
+ except Exception:
+ try:
+ v = netaddr.IPNetwork("::/0")
+ v.value = int(value)
+ v.prefixlen = 128
+
+ # The value is too big for IPv6. Are you a nanobot?
+ except Exception:
+ return False
+
+ # We got an IP address, let's mark it as such
+ value = str(v)
+ vtype = "address"
+
+ # value has not been recognized, check if it's a valid IP string
+ else:
+ try:
+ v = netaddr.IPNetwork(value)
+
+ # value is a valid IP string, check if user specified
+ # CIDR prefix or just an IP address, this will indicate default
+ # output format
+ try:
+ address, prefix = value.split("/")
+ vtype = "network"
+ except Exception:
+ vtype = "address"
+
+ # value hasn't been recognized, maybe it's a numerical CIDR?
+ except Exception:
+ try:
+ address, prefix = value.split("/")
+ address.isdigit()
+ address = int(address)
+ prefix.isdigit()
+ prefix = int(prefix)
+
+ # It's not numerical CIDR, give up
+ except Exception:
+ return False
+
+ # It is something, so let's try and build a CIDR from the parts
+ try:
+ v = netaddr.IPNetwork("0.0.0.0/0")
+ v.value = address
+ v.prefixlen = prefix
+
+ # It's not a valid IPv4 CIDR
+ except Exception:
+ try:
+ v = netaddr.IPNetwork("::/0")
+ v.value = address
+ v.prefixlen = prefix
+
+ # It's not a valid IPv6 CIDR. Give up.
+ except Exception:
+ return False
+
+ # We have a valid CIDR, so let's write it in correct format
+ value = str(v)
+ vtype = "network"
+
+ # We have a query string but it's not in the known query types. Check if
+ # that string is a valid subnet, if so, we can check later if given IP
+ # address/network is inside that specific subnet
+ try:
+ # ?? 6to4 and link-local were True here before. Should they still?
+ if (
+ query
+ and (query not in query_func_map or query == "cidr_lookup")
+ and not str(query).isdigit()
+ and ipaddr(query, "network")
+ ):
+ iplist = netaddr.IPSet([netaddr.IPNetwork(query)])
+ query = "cidr_lookup"
+ except Exception:
+ pass
+
+ # This code checks if value maches the IP version the user wants, ie. if
+ # it's any version ("ipaddr()"), IPv4 ("ipv4()") or IPv6 ("ipv6()")
+ # If version does not match, return False
+ if version and v.version != version:
+ return False
+
+ extras = []
+ for arg in query_func_extra_args.get(query, tuple()):
+ extras.append(locals()[arg])
+ try:
+ return query_func_map[query](v, *extras)
+ except KeyError:
+ try:
+ float(query)
+ if v.size == 1:
+ if vtype == "address":
+ return str(v.ip)
+ elif vtype == "network":
+ return str(v)
+
+ elif v.size > 1:
+ try:
+ return str(v[query]) + "/" + str(v.prefixlen)
+ except Exception:
+ return False
+
+ else:
+ return value
+
+ except Exception:
+ raise errors.AnsibleFilterError(
+ alias + ": unknown filter type: %s" % query
+ )
+
+ return False
+
+
+def ipmath(value, amount):
+ try:
+ if "/" in value:
+ ip = netaddr.IPNetwork(value).ip
+ else:
+ ip = netaddr.IPAddress(value)
+ except (netaddr.AddrFormatError, ValueError):
+ msg = "You must pass a valid IP address; {0} is invalid".format(value)
+ raise errors.AnsibleFilterError(msg)
+
+ if not isinstance(amount, int):
+ msg = (
+ "You must pass an integer for arithmetic; "
+ "{0} is not a valid integer"
+ ).format(amount)
+ raise errors.AnsibleFilterError(msg)
+
+ return str(ip + amount)
+
+
+def ipwrap(value, query=""):
+ try:
+ if isinstance(value, (list, tuple, types.GeneratorType)):
+ _ret = []
+ for element in value:
+ if ipaddr(element, query, version=False, alias="ipwrap"):
+ _ret.append(ipaddr(element, "wrap"))
+ else:
+ _ret.append(element)
+
+ return _ret
+ else:
+ _ret = ipaddr(value, query, version=False, alias="ipwrap")
+ if _ret:
+ return ipaddr(_ret, "wrap")
+ else:
+ return value
+
+ except Exception:
+ return value
+
+
+def ipv4(value, query=""):
+ return ipaddr(value, query, version=4, alias="ipv4")
+
+
+def ipv6(value, query=""):
+ return ipaddr(value, query, version=6, alias="ipv6")
+
+
+# Split given subnet into smaller subnets or find out the biggest subnet of
+# a given IP address with given CIDR prefix
+# Usage:
+#
+# - address or address/prefix | ipsubnet
+# returns CIDR subnet of a given input
+#
+# - address/prefix | ipsubnet(cidr)
+# returns number of possible subnets for given CIDR prefix
+#
+# - address/prefix | ipsubnet(cidr, index)
+# returns new subnet with given CIDR prefix
+#
+# - address | ipsubnet(cidr)
+# returns biggest subnet with given CIDR prefix that address belongs to
+#
+# - address | ipsubnet(cidr, index)
+# returns next indexed subnet which contains given address
+#
+# - address/prefix | ipsubnet(subnet/prefix)
+# return the index of the subnet in the subnet
+def ipsubnet(value, query="", index="x"):
+ """ Manipulate IPv4/IPv6 subnets """
+
+ try:
+ vtype = ipaddr(value, "type")
+ if vtype == "address":
+ v = ipaddr(value, "cidr")
+ elif vtype == "network":
+ v = ipaddr(value, "subnet")
+
+ value = netaddr.IPNetwork(v)
+ except Exception:
+ return False
+ query_string = str(query)
+ if not query:
+ return str(value)
+
+ elif query_string.isdigit():
+ vsize = ipaddr(v, "size")
+ query = int(query)
+
+ try:
+ float(index)
+ index = int(index)
+
+ if vsize > 1:
+ try:
+ return str(list(value.subnet(query))[index])
+ except Exception:
+ return False
+
+ elif vsize == 1:
+ try:
+ return str(value.supernet(query)[index])
+ except Exception:
+ return False
+
+ except Exception:
+ if vsize > 1:
+ try:
+ return str(len(list(value.subnet(query))))
+ except Exception:
+ return False
+
+ elif vsize == 1:
+ try:
+ return str(value.supernet(query)[0])
+ except Exception:
+ return False
+
+ elif query_string:
+ vtype = ipaddr(query, "type")
+ if vtype == "address":
+ v = ipaddr(query, "cidr")
+ elif vtype == "network":
+ v = ipaddr(query, "subnet")
+ else:
+ msg = "You must pass a valid subnet or IP address; {0} is invalid".format(
+ query_string
+ )
+ raise errors.AnsibleFilterError(msg)
+ query = netaddr.IPNetwork(v)
+ for i, subnet in enumerate(query.subnet(value.prefixlen), 1):
+ if subnet == value:
+ return str(i)
+ msg = "{0} is not in the subnet {1}".format(value.cidr, query.cidr)
+ raise errors.AnsibleFilterError(msg)
+ return False
+
+
+# Returns the nth host within a network described by value.
+# Usage:
+#
+# - address or address/prefix | nthhost(nth)
+# returns the nth host within the given network
+def nthhost(value, query=""):
+ """ Get the nth host within a given network """
+ try:
+ vtype = ipaddr(value, "type")
+ if vtype == "address":
+ v = ipaddr(value, "cidr")
+ elif vtype == "network":
+ v = ipaddr(value, "subnet")
+
+ value = netaddr.IPNetwork(v)
+ except Exception:
+ return False
+
+ if not query:
+ return False
+
+ try:
+ nth = int(query)
+ if value.size > nth:
+ return value[nth]
+
+ except ValueError:
+ return False
+
+ return False
+
+
+# Returns the next nth usable ip within a network described by value.
+def next_nth_usable(value, offset):
+ try:
+ vtype = ipaddr(value, "type")
+ if vtype == "address":
+ v = ipaddr(value, "cidr")
+ elif vtype == "network":
+ v = ipaddr(value, "subnet")
+
+ v = netaddr.IPNetwork(v)
+ except Exception:
+ return False
+
+ if type(offset) != int:
+ raise errors.AnsibleFilterError("Must pass in an integer")
+ if v.size > 1:
+ first_usable, last_usable = _first_last(v)
+ nth_ip = int(netaddr.IPAddress(int(v.ip) + offset))
+ if nth_ip >= first_usable and nth_ip <= last_usable:
+ return str(netaddr.IPAddress(int(v.ip) + offset))
+
+
+# Returns the previous nth usable ip within a network described by value.
+def previous_nth_usable(value, offset):
+ try:
+ vtype = ipaddr(value, "type")
+ if vtype == "address":
+ v = ipaddr(value, "cidr")
+ elif vtype == "network":
+ v = ipaddr(value, "subnet")
+
+ v = netaddr.IPNetwork(v)
+ except Exception:
+ return False
+
+ if type(offset) != int:
+ raise errors.AnsibleFilterError("Must pass in an integer")
+ if v.size > 1:
+ first_usable, last_usable = _first_last(v)
+ nth_ip = int(netaddr.IPAddress(int(v.ip) - offset))
+ if nth_ip >= first_usable and nth_ip <= last_usable:
+ return str(netaddr.IPAddress(int(v.ip) - offset))
+
+
+def _range_checker(ip_check, first, last):
+ """
+ Tests whether an ip address is within the bounds of the first and last address.
+
+ :param ip_check: The ip to test if it is within first and last.
+ :param first: The first IP in the range to test against.
+ :param last: The last IP in the range to test against.
+
+ :return: bool
+ """
+ if ip_check >= first and ip_check <= last:
+ return True
+ else:
+ return False
+
+
+def _address_normalizer(value):
+ """
+ Used to validate an address or network type and return it in a consistent format.
+ This is being used for future use cases not currently available such as an address range.
+
+ :param value: The string representation of an address or network.
+
+ :return: The address or network in the normalized form.
+ """
+ try:
+ vtype = ipaddr(value, "type")
+ if vtype == "address" or vtype == "network":
+ v = ipaddr(value, "subnet")
+ except Exception:
+ return False
+
+ return v
+
+
+def network_in_usable(value, test):
+ """
+ Checks whether 'test' is a useable address or addresses in 'value'
+
+ :param: value: The string representation of an address or network to test against.
+ :param test: The string representation of an address or network to validate if it is within the range of 'value'.
+
+ :return: bool
+ """
+ # normalize value and test variables into an ipaddr
+ v = _address_normalizer(value)
+ w = _address_normalizer(test)
+
+ # get first and last addresses as integers to compare value and test; or cathes value when case is /32
+ v_first = ipaddr(ipaddr(v, "first_usable") or ipaddr(v, "address"), "int")
+ v_last = ipaddr(ipaddr(v, "last_usable") or ipaddr(v, "address"), "int")
+ w_first = ipaddr(ipaddr(w, "network") or ipaddr(w, "address"), "int")
+ w_last = ipaddr(ipaddr(w, "broadcast") or ipaddr(w, "address"), "int")
+
+ if _range_checker(w_first, v_first, v_last) and _range_checker(
+ w_last, v_first, v_last
+ ):
+ return True
+ else:
+ return False
+
+
+def network_in_network(value, test):
+ """
+ Checks whether the 'test' address or addresses are in 'value', including broadcast and network
+
+ :param: value: The network address or range to test against.
+ :param test: The address or network to validate if it is within the range of 'value'.
+
+ :return: bool
+ """
+ # normalize value and test variables into an ipaddr
+ v = _address_normalizer(value)
+ w = _address_normalizer(test)
+
+ # get first and last addresses as integers to compare value and test; or cathes value when case is /32
+ v_first = ipaddr(ipaddr(v, "network") or ipaddr(v, "address"), "int")
+ v_last = ipaddr(ipaddr(v, "broadcast") or ipaddr(v, "address"), "int")
+ w_first = ipaddr(ipaddr(w, "network") or ipaddr(w, "address"), "int")
+ w_last = ipaddr(ipaddr(w, "broadcast") or ipaddr(w, "address"), "int")
+
+ if _range_checker(w_first, v_first, v_last) and _range_checker(
+ w_last, v_first, v_last
+ ):
+ return True
+ else:
+ return False
+
+
+def reduce_on_network(value, network):
+ """
+ Reduces a list of addresses to only the addresses that match a given network.
+
+ :param: value: The list of addresses to filter on.
+ :param: network: The network to validate against.
+
+ :return: The reduced list of addresses.
+ """
+ # normalize network variable into an ipaddr
+ n = _address_normalizer(network)
+
+ # get first and last addresses as integers to compare value and test; or cathes value when case is /32
+ n_first = ipaddr(ipaddr(n, "network") or ipaddr(n, "address"), "int")
+ n_last = ipaddr(ipaddr(n, "broadcast") or ipaddr(n, "address"), "int")
+
+ # create an empty list to fill and return
+ r = []
+
+ for address in value:
+ # normalize address variables into an ipaddr
+ a = _address_normalizer(address)
+
+ # get first and last addresses as integers to compare value and test; or cathes value when case is /32
+ a_first = ipaddr(ipaddr(a, "network") or ipaddr(a, "address"), "int")
+ a_last = ipaddr(ipaddr(a, "broadcast") or ipaddr(a, "address"), "int")
+
+ if _range_checker(a_first, n_first, n_last) and _range_checker(
+ a_last, n_first, n_last
+ ):
+ r.append(address)
+
+ return r
+
+
+# Returns the SLAAC address within a network for a given HW/MAC address.
+# Usage:
+#
+# - prefix | slaac(mac)
+def slaac(value, query=""):
+ """ Get the SLAAC address within given network """
+ try:
+ vtype = ipaddr(value, "type")
+ if vtype == "address":
+ v = ipaddr(value, "cidr")
+ elif vtype == "network":
+ v = ipaddr(value, "subnet")
+
+ if ipaddr(value, "version") != 6:
+ return False
+
+ value = netaddr.IPNetwork(v)
+ except Exception:
+ return False
+
+ if not query:
+ return False
+
+ try:
+ mac = hwaddr(query, alias="slaac")
+
+ eui = netaddr.EUI(mac)
+ except Exception:
+ return False
+
+ return eui.ipv6(value.network)
+
+
+# ---- HWaddr / MAC address filters ----
+def hwaddr(value, query="", alias="hwaddr"):
+ """ Check if string is a HW/MAC address and filter it """
+
+ query_func_extra_args = {"": ("value",)}
+
+ query_func_map = {
+ "": _empty_hwaddr_query,
+ "bare": _bare_query,
+ "bool": _bool_hwaddr_query,
+ "int": _int_hwaddr_query,
+ "cisco": _cisco_query,
+ "eui48": _win_query,
+ "linux": _linux_query,
+ "pgsql": _postgresql_query,
+ "postgresql": _postgresql_query,
+ "psql": _postgresql_query,
+ "unix": _unix_query,
+ "win": _win_query,
+ }
+
+ try:
+ v = netaddr.EUI(value)
+ except Exception:
+ if query and query != "bool":
+ raise errors.AnsibleFilterError(
+ alias + ": not a hardware address: %s" % value
+ )
+
+ extras = []
+ for arg in query_func_extra_args.get(query, tuple()):
+ extras.append(locals()[arg])
+ try:
+ return query_func_map[query](v, *extras)
+ except KeyError:
+ raise errors.AnsibleFilterError(
+ alias + ": unknown filter type: %s" % query
+ )
+
+ return False
+
+
+def macaddr(value, query=""):
+ return hwaddr(value, query, alias="macaddr")
+
+
+def _need_netaddr(f_name, *args, **kwargs):
+ raise errors.AnsibleFilterError(
+ "The %s filter requires python's netaddr be "
+ "installed on the ansible controller" % f_name
+ )
+
+
+def ip4_hex(arg, delimiter=""):
+ """ Convert an IPv4 address to Hexadecimal notation """
+ numbers = list(map(int, arg.split(".")))
+ return "{0:02x}{sep}{1:02x}{sep}{2:02x}{sep}{3:02x}".format(
+ *numbers, sep=delimiter
+ )
+
+
+# ---- Ansible filters ----
+class FilterModule(object):
+ """ IP address and network manipulation filters """
+
+ filter_map = {
+ # IP addresses and networks
+ "cidr_merge": cidr_merge,
+ "ipaddr": ipaddr,
+ "ipmath": ipmath,
+ "ipwrap": ipwrap,
+ "ip4_hex": ip4_hex,
+ "ipv4": ipv4,
+ "ipv6": ipv6,
+ "ipsubnet": ipsubnet,
+ "next_nth_usable": next_nth_usable,
+ "network_in_network": network_in_network,
+ "network_in_usable": network_in_usable,
+ "reduce_on_network": reduce_on_network,
+ "nthhost": nthhost,
+ "previous_nth_usable": previous_nth_usable,
+ "slaac": slaac,
+ # MAC / HW addresses
+ "hwaddr": hwaddr,
+ "macaddr": macaddr,
+ }
+
+ def filters(self):
+ if netaddr:
+ return self.filter_map
+ else:
+ # Need to install python's netaddr for these filters to work
+ return dict(
+ (f, partial(_need_netaddr, f)) for f in self.filter_map
+ )
diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/filter/network.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/filter/network.py
new file mode 100644
index 00000000..f99e6e76
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/filter/network.py
@@ -0,0 +1,531 @@
+#
+# {c) 2017 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import re
+import os
+import traceback
+import string
+
+from xml.etree.ElementTree import fromstring
+
+from ansible.module_utils._text import to_native, to_text
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ Template,
+)
+from ansible.module_utils.six import iteritems, string_types
+from ansible.module_utils.common._collections_compat import Mapping
+from ansible.errors import AnsibleError, AnsibleFilterError
+from ansible.utils.display import Display
+from ansible.utils.encrypt import passlib_or_crypt, random_password
+
+try:
+ import yaml
+
+ HAS_YAML = True
+except ImportError:
+ HAS_YAML = False
+
+try:
+ import textfsm
+
+ HAS_TEXTFSM = True
+except ImportError:
+ HAS_TEXTFSM = False
+
+display = Display()
+
+
+def re_matchall(regex, value):
+ objects = list()
+ for match in re.findall(regex.pattern, value, re.M):
+ obj = {}
+ if regex.groupindex:
+ for name, index in iteritems(regex.groupindex):
+ if len(regex.groupindex) == 1:
+ obj[name] = match
+ else:
+ obj[name] = match[index - 1]
+ objects.append(obj)
+ return objects
+
+
+def re_search(regex, value):
+ obj = {}
+ match = regex.search(value, re.M)
+ if match:
+ items = list(match.groups())
+ if regex.groupindex:
+ for name, index in iteritems(regex.groupindex):
+ obj[name] = items[index - 1]
+ return obj
+
+
+def parse_cli(output, tmpl):
+ if not isinstance(output, string_types):
+ raise AnsibleError(
+ "parse_cli input should be a string, but was given a input of %s"
+ % (type(output))
+ )
+
+ if not os.path.exists(tmpl):
+ raise AnsibleError("unable to locate parse_cli template: %s" % tmpl)
+
+ try:
+ template = Template()
+ except ImportError as exc:
+ raise AnsibleError(to_native(exc))
+
+ with open(tmpl) as tmpl_fh:
+ tmpl_content = tmpl_fh.read()
+
+ spec = yaml.safe_load(tmpl_content)
+ obj = {}
+
+ for name, attrs in iteritems(spec["keys"]):
+ value = attrs["value"]
+
+ try:
+ variables = spec.get("vars", {})
+ value = template(value, variables)
+ except Exception:
+ pass
+
+ if "start_block" in attrs and "end_block" in attrs:
+ start_block = re.compile(attrs["start_block"])
+ end_block = re.compile(attrs["end_block"])
+
+ blocks = list()
+ lines = None
+ block_started = False
+
+ for line in output.split("\n"):
+ match_start = start_block.match(line)
+ match_end = end_block.match(line)
+
+ if match_start:
+ lines = list()
+ lines.append(line)
+ block_started = True
+
+ elif match_end:
+ if lines:
+ lines.append(line)
+ blocks.append("\n".join(lines))
+ block_started = False
+
+ elif block_started:
+ if lines:
+ lines.append(line)
+
+ regex_items = [re.compile(r) for r in attrs["items"]]
+ objects = list()
+
+ for block in blocks:
+ if isinstance(value, Mapping) and "key" not in value:
+ items = list()
+ for regex in regex_items:
+ match = regex.search(block)
+ if match:
+ item_values = match.groupdict()
+ item_values["match"] = list(match.groups())
+ items.append(item_values)
+ else:
+ items.append(None)
+
+ obj = {}
+ for k, v in iteritems(value):
+ try:
+ obj[k] = template(
+ v, {"item": items}, fail_on_undefined=False
+ )
+ except Exception:
+ obj[k] = None
+ objects.append(obj)
+
+ elif isinstance(value, Mapping):
+ items = list()
+ for regex in regex_items:
+ match = regex.search(block)
+ if match:
+ item_values = match.groupdict()
+ item_values["match"] = list(match.groups())
+ items.append(item_values)
+ else:
+ items.append(None)
+
+ key = template(value["key"], {"item": items})
+ values = dict(
+ [
+ (k, template(v, {"item": items}))
+ for k, v in iteritems(value["values"])
+ ]
+ )
+ objects.append({key: values})
+
+ return objects
+
+ elif "items" in attrs:
+ regexp = re.compile(attrs["items"])
+ when = attrs.get("when")
+ conditional = (
+ "{%% if %s %%}True{%% else %%}False{%% endif %%}" % when
+ )
+
+ if isinstance(value, Mapping) and "key" not in value:
+ values = list()
+
+ for item in re_matchall(regexp, output):
+ entry = {}
+
+ for item_key, item_value in iteritems(value):
+ entry[item_key] = template(item_value, {"item": item})
+
+ if when:
+ if template(conditional, {"item": entry}):
+ values.append(entry)
+ else:
+ values.append(entry)
+
+ obj[name] = values
+
+ elif isinstance(value, Mapping):
+ values = dict()
+
+ for item in re_matchall(regexp, output):
+ entry = {}
+
+ for item_key, item_value in iteritems(value["values"]):
+ entry[item_key] = template(item_value, {"item": item})
+
+ key = template(value["key"], {"item": item})
+
+ if when:
+ if template(
+ conditional, {"item": {"key": key, "value": entry}}
+ ):
+ values[key] = entry
+ else:
+ values[key] = entry
+
+ obj[name] = values
+
+ else:
+ item = re_search(regexp, output)
+ obj[name] = template(value, {"item": item})
+
+ else:
+ obj[name] = value
+
+ return obj
+
+
+def parse_cli_textfsm(value, template):
+ if not HAS_TEXTFSM:
+ raise AnsibleError(
+ "parse_cli_textfsm filter requires TextFSM library to be installed"
+ )
+
+ if not isinstance(value, string_types):
+ raise AnsibleError(
+ "parse_cli_textfsm input should be a string, but was given a input of %s"
+ % (type(value))
+ )
+
+ if not os.path.exists(template):
+ raise AnsibleError(
+ "unable to locate parse_cli_textfsm template: %s" % template
+ )
+
+ try:
+ template = open(template)
+ except IOError as exc:
+ raise AnsibleError(to_native(exc))
+
+ re_table = textfsm.TextFSM(template)
+ fsm_results = re_table.ParseText(value)
+
+ results = list()
+ for item in fsm_results:
+ results.append(dict(zip(re_table.header, item)))
+
+ return results
+
+
+def _extract_param(template, root, attrs, value):
+
+ key = None
+ when = attrs.get("when")
+ conditional = "{%% if %s %%}True{%% else %%}False{%% endif %%}" % when
+ param_to_xpath_map = attrs["items"]
+
+ if isinstance(value, Mapping):
+ key = value.get("key", None)
+ if key:
+ value = value["values"]
+
+ entries = dict() if key else list()
+
+ for element in root.findall(attrs["top"]):
+ entry = dict()
+ item_dict = dict()
+ for param, param_xpath in iteritems(param_to_xpath_map):
+ fields = None
+ try:
+ fields = element.findall(param_xpath)
+ except Exception:
+ display.warning(
+ "Failed to evaluate value of '%s' with XPath '%s'.\nUnexpected error: %s."
+ % (param, param_xpath, traceback.format_exc())
+ )
+
+ tags = param_xpath.split("/")
+
+ # check if xpath ends with attribute.
+ # If yes set attribute key/value dict to param value in case attribute matches
+ # else if it is a normal xpath assign matched element text value.
+ if len(tags) and tags[-1].endswith("]"):
+ if fields:
+ if len(fields) > 1:
+ item_dict[param] = [field.attrib for field in fields]
+ else:
+ item_dict[param] = fields[0].attrib
+ else:
+ item_dict[param] = {}
+ else:
+ if fields:
+ if len(fields) > 1:
+ item_dict[param] = [field.text for field in fields]
+ else:
+ item_dict[param] = fields[0].text
+ else:
+ item_dict[param] = None
+
+ if isinstance(value, Mapping):
+ for item_key, item_value in iteritems(value):
+ entry[item_key] = template(item_value, {"item": item_dict})
+ else:
+ entry = template(value, {"item": item_dict})
+
+ if key:
+ expanded_key = template(key, {"item": item_dict})
+ if when:
+ if template(
+ conditional,
+ {"item": {"key": expanded_key, "value": entry}},
+ ):
+ entries[expanded_key] = entry
+ else:
+ entries[expanded_key] = entry
+ else:
+ if when:
+ if template(conditional, {"item": entry}):
+ entries.append(entry)
+ else:
+ entries.append(entry)
+
+ return entries
+
+
+def parse_xml(output, tmpl):
+ if not os.path.exists(tmpl):
+ raise AnsibleError("unable to locate parse_xml template: %s" % tmpl)
+
+ if not isinstance(output, string_types):
+ raise AnsibleError(
+ "parse_xml works on string input, but given input of : %s"
+ % type(output)
+ )
+
+ root = fromstring(output)
+ try:
+ template = Template()
+ except ImportError as exc:
+ raise AnsibleError(to_native(exc))
+
+ with open(tmpl) as tmpl_fh:
+ tmpl_content = tmpl_fh.read()
+
+ spec = yaml.safe_load(tmpl_content)
+ obj = {}
+
+ for name, attrs in iteritems(spec["keys"]):
+ value = attrs["value"]
+
+ try:
+ variables = spec.get("vars", {})
+ value = template(value, variables)
+ except Exception:
+ pass
+
+ if "items" in attrs:
+ obj[name] = _extract_param(template, root, attrs, value)
+ else:
+ obj[name] = value
+
+ return obj
+
+
+def type5_pw(password, salt=None):
+ if not isinstance(password, string_types):
+ raise AnsibleFilterError(
+ "type5_pw password input should be a string, but was given a input of %s"
+ % (type(password).__name__)
+ )
+
+ salt_chars = u"".join(
+ (to_text(string.ascii_letters), to_text(string.digits), u"./")
+ )
+ if salt is not None and not isinstance(salt, string_types):
+ raise AnsibleFilterError(
+ "type5_pw salt input should be a string, but was given a input of %s"
+ % (type(salt).__name__)
+ )
+ elif not salt:
+ salt = random_password(length=4, chars=salt_chars)
+ elif not set(salt) <= set(salt_chars):
+ raise AnsibleFilterError(
+ "type5_pw salt used inproper characters, must be one of %s"
+ % (salt_chars)
+ )
+
+ encrypted_password = passlib_or_crypt(password, "md5_crypt", salt=salt)
+
+ return encrypted_password
+
+
+def hash_salt(password):
+
+ split_password = password.split("$")
+ if len(split_password) != 4:
+ raise AnsibleFilterError(
+ "Could not parse salt out password correctly from {0}".format(
+ password
+ )
+ )
+ else:
+ return split_password[2]
+
+
+def comp_type5(
+ unencrypted_password, encrypted_password, return_original=False
+):
+
+ salt = hash_salt(encrypted_password)
+ if type5_pw(unencrypted_password, salt) == encrypted_password:
+ if return_original is True:
+ return encrypted_password
+ else:
+ return True
+ return False
+
+
+def vlan_parser(vlan_list, first_line_len=48, other_line_len=44):
+
+ """
+ Input: Unsorted list of vlan integers
+ Output: Sorted string list of integers according to IOS-like vlan list rules
+
+ 1. Vlans are listed in ascending order
+ 2. Runs of 3 or more consecutive vlans are listed with a dash
+ 3. The first line of the list can be first_line_len characters long
+ 4. Subsequent list lines can be other_line_len characters
+ """
+
+ # Sort and remove duplicates
+ sorted_list = sorted(set(vlan_list))
+
+ if sorted_list[0] < 1 or sorted_list[-1] > 4094:
+ raise AnsibleFilterError("Valid VLAN range is 1-4094")
+
+ parse_list = []
+ idx = 0
+ while idx < len(sorted_list):
+ start = idx
+ end = start
+ while end < len(sorted_list) - 1:
+ if sorted_list[end + 1] - sorted_list[end] == 1:
+ end += 1
+ else:
+ break
+
+ if start == end:
+ # Single VLAN
+ parse_list.append(str(sorted_list[idx]))
+ elif start + 1 == end:
+ # Run of 2 VLANs
+ parse_list.append(str(sorted_list[start]))
+ parse_list.append(str(sorted_list[end]))
+ else:
+ # Run of 3 or more VLANs
+ parse_list.append(
+ str(sorted_list[start]) + "-" + str(sorted_list[end])
+ )
+ idx = end + 1
+
+ line_count = 0
+ result = [""]
+ for vlans in parse_list:
+ # First line (" switchport trunk allowed vlan ")
+ if line_count == 0:
+ if len(result[line_count] + vlans) > first_line_len:
+ result.append("")
+ line_count += 1
+ result[line_count] += vlans + ","
+ else:
+ result[line_count] += vlans + ","
+
+ # Subsequent lines (" switchport trunk allowed vlan add ")
+ else:
+ if len(result[line_count] + vlans) > other_line_len:
+ result.append("")
+ line_count += 1
+ result[line_count] += vlans + ","
+ else:
+ result[line_count] += vlans + ","
+
+ # Remove trailing orphan commas
+ for idx in range(0, len(result)):
+ result[idx] = result[idx].rstrip(",")
+
+ # Sometimes text wraps to next line, but there are no remaining VLANs
+ if "" in result:
+ result.remove("")
+
+ return result
+
+
+class FilterModule(object):
+ """Filters for working with output from network devices"""
+
+ filter_map = {
+ "parse_cli": parse_cli,
+ "parse_cli_textfsm": parse_cli_textfsm,
+ "parse_xml": parse_xml,
+ "type5_pw": type5_pw,
+ "hash_salt": hash_salt,
+ "comp_type5": comp_type5,
+ "vlan_parser": vlan_parser,
+ }
+
+ def filters(self):
+ return self.filter_map
diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/httpapi/restconf.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/httpapi/restconf.py
new file mode 100644
index 00000000..8afb3e5e
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/httpapi/restconf.py
@@ -0,0 +1,91 @@
+# Copyright (c) 2018 Cisco and/or its affiliates.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = """author: Ansible Networking Team
+httpapi: restconf
+short_description: HttpApi Plugin for devices supporting Restconf API
+description:
+- This HttpApi plugin provides methods to connect to Restconf API endpoints.
+options:
+ root_path:
+ type: str
+ description:
+ - Specifies the location of the Restconf root.
+ default: /restconf
+ vars:
+ - name: ansible_httpapi_restconf_root
+"""
+
+import json
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.connection import ConnectionError
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+from ansible.plugins.httpapi import HttpApiBase
+
+
+CONTENT_TYPE = "application/yang-data+json"
+
+
+class HttpApi(HttpApiBase):
+ def send_request(self, data, **message_kwargs):
+ if data:
+ data = json.dumps(data)
+
+ path = "/".join(
+ [
+ self.get_option("root_path").rstrip("/"),
+ message_kwargs.get("path", "").lstrip("/"),
+ ]
+ )
+
+ headers = {
+ "Content-Type": message_kwargs.get("content_type") or CONTENT_TYPE,
+ "Accept": message_kwargs.get("accept") or CONTENT_TYPE,
+ }
+ response, response_data = self.connection.send(
+ path, data, headers=headers, method=message_kwargs.get("method")
+ )
+
+ return handle_response(response, response_data)
+
+
+def handle_response(response, response_data):
+ try:
+ response_data = json.loads(response_data.read())
+ except ValueError:
+ response_data = response_data.read()
+
+ if isinstance(response, HTTPError):
+ if response_data:
+ if "errors" in response_data:
+ errors = response_data["errors"]["error"]
+ error_text = "\n".join(
+ (error["error-message"] for error in errors)
+ )
+ else:
+ error_text = response_data
+
+ raise ConnectionError(error_text, code=response.code)
+ raise ConnectionError(to_text(response), code=response.code)
+
+ return response_data
diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/compat/ipaddress.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/compat/ipaddress.py
new file mode 100644
index 00000000..dc0a19f7
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/compat/ipaddress.py
@@ -0,0 +1,2578 @@
+# -*- coding: utf-8 -*-
+
+# This code is part of Ansible, but is an independent component.
+# This particular file, and this file only, is based on
+# Lib/ipaddress.py of cpython
+# It is licensed under the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+#
+# 1. This LICENSE AGREEMENT is between the Python Software Foundation
+# ("PSF"), and the Individual or Organization ("Licensee") accessing and
+# otherwise using this software ("Python") in source or binary form and
+# its associated documentation.
+#
+# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
+# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+# analyze, test, perform and/or display publicly, prepare derivative works,
+# distribute, and otherwise use Python alone or in any derivative version,
+# provided, however, that PSF's License Agreement and PSF's notice of copyright,
+# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved"
+# are retained in Python alone or in any derivative version prepared by Licensee.
+#
+# 3. In the event Licensee prepares a derivative work that is based on
+# or incorporates Python or any part thereof, and wants to make
+# the derivative work available to others as provided herein, then
+# Licensee hereby agrees to include in any such work a brief summary of
+# the changes made to Python.
+#
+# 4. PSF is making Python available to Licensee on an "AS IS"
+# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+# INFRINGE ANY THIRD PARTY RIGHTS.
+#
+# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+#
+# 6. This License Agreement will automatically terminate upon a material
+# breach of its terms and conditions.
+#
+# 7. Nothing in this License Agreement shall be deemed to create any
+# relationship of agency, partnership, or joint venture between PSF and
+# Licensee. This License Agreement does not grant permission to use PSF
+# trademarks or trade name in a trademark sense to endorse or promote
+# products or services of Licensee, or any third party.
+#
+# 8. By copying, installing or otherwise using Python, Licensee
+# agrees to be bound by the terms and conditions of this License
+# Agreement.
+
+# Copyright 2007 Google Inc.
+# Licensed to PSF under a Contributor Agreement.
+
+"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
+
+This library is used to create/poke/manipulate IPv4 and IPv6 addresses
+and networks.
+
+"""
+
+from __future__ import unicode_literals
+
+
+import itertools
+import struct
+
+
+# The following makes it easier for us to script updates of the bundled code and is not part of
+# upstream
+_BUNDLED_METADATA = {"pypi_name": "ipaddress", "version": "1.0.22"}
+
+__version__ = "1.0.22"
+
+# Compatibility functions
+_compat_int_types = (int,)
+try:
+ _compat_int_types = (int, long)
+except NameError:
+ pass
+try:
+ _compat_str = unicode
+except NameError:
+ _compat_str = str
+ assert bytes != str
+if b"\0"[0] == 0: # Python 3 semantics
+
+ def _compat_bytes_to_byte_vals(byt):
+ return byt
+
+
+else:
+
+ def _compat_bytes_to_byte_vals(byt):
+ return [struct.unpack(b"!B", b)[0] for b in byt]
+
+
+try:
+ _compat_int_from_byte_vals = int.from_bytes
+except AttributeError:
+
+ def _compat_int_from_byte_vals(bytvals, endianess):
+ assert endianess == "big"
+ res = 0
+ for bv in bytvals:
+ assert isinstance(bv, _compat_int_types)
+ res = (res << 8) + bv
+ return res
+
+
+def _compat_to_bytes(intval, length, endianess):
+ assert isinstance(intval, _compat_int_types)
+ assert endianess == "big"
+ if length == 4:
+ if intval < 0 or intval >= 2 ** 32:
+ raise struct.error("integer out of range for 'I' format code")
+ return struct.pack(b"!I", intval)
+ elif length == 16:
+ if intval < 0 or intval >= 2 ** 128:
+ raise struct.error("integer out of range for 'QQ' format code")
+ return struct.pack(b"!QQ", intval >> 64, intval & 0xFFFFFFFFFFFFFFFF)
+ else:
+ raise NotImplementedError()
+
+
+if hasattr(int, "bit_length"):
+ # Not int.bit_length , since that won't work in 2.7 where long exists
+ def _compat_bit_length(i):
+ return i.bit_length()
+
+
+else:
+
+ def _compat_bit_length(i):
+ for res in itertools.count():
+ if i >> res == 0:
+ return res
+
+
+def _compat_range(start, end, step=1):
+ assert step > 0
+ i = start
+ while i < end:
+ yield i
+ i += step
+
+
+class _TotalOrderingMixin(object):
+ __slots__ = ()
+
+ # Helper that derives the other comparison operations from
+ # __lt__ and __eq__
+ # We avoid functools.total_ordering because it doesn't handle
+ # NotImplemented correctly yet (http://bugs.python.org/issue10042)
+ def __eq__(self, other):
+ raise NotImplementedError
+
+ def __ne__(self, other):
+ equal = self.__eq__(other)
+ if equal is NotImplemented:
+ return NotImplemented
+ return not equal
+
+ def __lt__(self, other):
+ raise NotImplementedError
+
+ def __le__(self, other):
+ less = self.__lt__(other)
+ if less is NotImplemented or not less:
+ return self.__eq__(other)
+ return less
+
+ def __gt__(self, other):
+ less = self.__lt__(other)
+ if less is NotImplemented:
+ return NotImplemented
+ equal = self.__eq__(other)
+ if equal is NotImplemented:
+ return NotImplemented
+ return not (less or equal)
+
+ def __ge__(self, other):
+ less = self.__lt__(other)
+ if less is NotImplemented:
+ return NotImplemented
+ return not less
+
+
+IPV4LENGTH = 32
+IPV6LENGTH = 128
+
+
+class AddressValueError(ValueError):
+ """A Value Error related to the address."""
+
+
+class NetmaskValueError(ValueError):
+ """A Value Error related to the netmask."""
+
+
+def ip_address(address):
+ """Take an IP string/int and return an object of the correct type.
+
+ Args:
+ address: A string or integer, the IP address. Either IPv4 or
+ IPv6 addresses may be supplied; integers less than 2**32 will
+ be considered to be IPv4 by default.
+
+ Returns:
+ An IPv4Address or IPv6Address object.
+
+ Raises:
+ ValueError: if the *address* passed isn't either a v4 or a v6
+ address
+
+ """
+ try:
+ return IPv4Address(address)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ try:
+ return IPv6Address(address)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ if isinstance(address, bytes):
+ raise AddressValueError(
+ "%r does not appear to be an IPv4 or IPv6 address. "
+ "Did you pass in a bytes (str in Python 2) instead of"
+ " a unicode object?" % address
+ )
+
+ raise ValueError(
+ "%r does not appear to be an IPv4 or IPv6 address" % address
+ )
+
+
+def ip_network(address, strict=True):
+ """Take an IP string/int and return an object of the correct type.
+
+ Args:
+ address: A string or integer, the IP network. Either IPv4 or
+ IPv6 networks may be supplied; integers less than 2**32 will
+ be considered to be IPv4 by default.
+
+ Returns:
+ An IPv4Network or IPv6Network object.
+
+ Raises:
+ ValueError: if the string passed isn't either a v4 or a v6
+ address. Or if the network has host bits set.
+
+ """
+ try:
+ return IPv4Network(address, strict)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ try:
+ return IPv6Network(address, strict)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ if isinstance(address, bytes):
+ raise AddressValueError(
+ "%r does not appear to be an IPv4 or IPv6 network. "
+ "Did you pass in a bytes (str in Python 2) instead of"
+ " a unicode object?" % address
+ )
+
+ raise ValueError(
+ "%r does not appear to be an IPv4 or IPv6 network" % address
+ )
+
+
+def ip_interface(address):
+ """Take an IP string/int and return an object of the correct type.
+
+ Args:
+ address: A string or integer, the IP address. Either IPv4 or
+ IPv6 addresses may be supplied; integers less than 2**32 will
+ be considered to be IPv4 by default.
+
+ Returns:
+ An IPv4Interface or IPv6Interface object.
+
+ Raises:
+ ValueError: if the string passed isn't either a v4 or a v6
+ address.
+
+ Notes:
+ The IPv?Interface classes describe an Address on a particular
+ Network, so they're basically a combination of both the Address
+ and Network classes.
+
+ """
+ try:
+ return IPv4Interface(address)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ try:
+ return IPv6Interface(address)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ raise ValueError(
+ "%r does not appear to be an IPv4 or IPv6 interface" % address
+ )
+
+
+def v4_int_to_packed(address):
+ """Represent an address as 4 packed bytes in network (big-endian) order.
+
+ Args:
+ address: An integer representation of an IPv4 IP address.
+
+ Returns:
+ The integer address packed as 4 bytes in network (big-endian) order.
+
+ Raises:
+ ValueError: If the integer is negative or too large to be an
+ IPv4 IP address.
+
+ """
+ try:
+ return _compat_to_bytes(address, 4, "big")
+ except (struct.error, OverflowError):
+ raise ValueError("Address negative or too large for IPv4")
+
+
+def v6_int_to_packed(address):
+ """Represent an address as 16 packed bytes in network (big-endian) order.
+
+ Args:
+ address: An integer representation of an IPv6 IP address.
+
+ Returns:
+ The integer address packed as 16 bytes in network (big-endian) order.
+
+ """
+ try:
+ return _compat_to_bytes(address, 16, "big")
+ except (struct.error, OverflowError):
+ raise ValueError("Address negative or too large for IPv6")
+
+
+def _split_optional_netmask(address):
+ """Helper to split the netmask and raise AddressValueError if needed"""
+ addr = _compat_str(address).split("/")
+ if len(addr) > 2:
+ raise AddressValueError("Only one '/' permitted in %r" % address)
+ return addr
+
+
+def _find_address_range(addresses):
+ """Find a sequence of sorted deduplicated IPv#Address.
+
+ Args:
+ addresses: a list of IPv#Address objects.
+
+ Yields:
+ A tuple containing the first and last IP addresses in the sequence.
+
+ """
+ it = iter(addresses)
+ first = last = next(it) # pylint: disable=stop-iteration-return
+ for ip in it:
+ if ip._ip != last._ip + 1:
+ yield first, last
+ first = ip
+ last = ip
+ yield first, last
+
+
+def _count_righthand_zero_bits(number, bits):
+ """Count the number of zero bits on the right hand side.
+
+ Args:
+ number: an integer.
+ bits: maximum number of bits to count.
+
+ Returns:
+ The number of zero bits on the right hand side of the number.
+
+ """
+ if number == 0:
+ return bits
+ return min(bits, _compat_bit_length(~number & (number - 1)))
+
+
+def summarize_address_range(first, last):
+ """Summarize a network range given the first and last IP addresses.
+
+ Example:
+ >>> list(summarize_address_range(IPv4Address('192.0.2.0'),
+ ... IPv4Address('192.0.2.130')))
+ ... #doctest: +NORMALIZE_WHITESPACE
+ [IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'),
+ IPv4Network('192.0.2.130/32')]
+
+ Args:
+ first: the first IPv4Address or IPv6Address in the range.
+ last: the last IPv4Address or IPv6Address in the range.
+
+ Returns:
+ An iterator of the summarized IPv(4|6) network objects.
+
+ Raise:
+ TypeError:
+ If the first and last objects are not IP addresses.
+ If the first and last objects are not the same version.
+ ValueError:
+ If the last object is not greater than the first.
+ If the version of the first address is not 4 or 6.
+
+ """
+ if not (
+ isinstance(first, _BaseAddress) and isinstance(last, _BaseAddress)
+ ):
+ raise TypeError("first and last must be IP addresses, not networks")
+ if first.version != last.version:
+ raise TypeError(
+ "%s and %s are not of the same version" % (first, last)
+ )
+ if first > last:
+ raise ValueError("last IP address must be greater than first")
+
+ if first.version == 4:
+ ip = IPv4Network
+ elif first.version == 6:
+ ip = IPv6Network
+ else:
+ raise ValueError("unknown IP version")
+
+ ip_bits = first._max_prefixlen
+ first_int = first._ip
+ last_int = last._ip
+ while first_int <= last_int:
+ nbits = min(
+ _count_righthand_zero_bits(first_int, ip_bits),
+ _compat_bit_length(last_int - first_int + 1) - 1,
+ )
+ net = ip((first_int, ip_bits - nbits))
+ yield net
+ first_int += 1 << nbits
+ if first_int - 1 == ip._ALL_ONES:
+ break
+
+
+def _collapse_addresses_internal(addresses):
+ """Loops through the addresses, collapsing concurrent netblocks.
+
+ Example:
+
+ ip1 = IPv4Network('192.0.2.0/26')
+ ip2 = IPv4Network('192.0.2.64/26')
+ ip3 = IPv4Network('192.0.2.128/26')
+ ip4 = IPv4Network('192.0.2.192/26')
+
+ _collapse_addresses_internal([ip1, ip2, ip3, ip4]) ->
+ [IPv4Network('192.0.2.0/24')]
+
+ This shouldn't be called directly; it is called via
+ collapse_addresses([]).
+
+ Args:
+ addresses: A list of IPv4Network's or IPv6Network's
+
+ Returns:
+ A list of IPv4Network's or IPv6Network's depending on what we were
+ passed.
+
+ """
+ # First merge
+ to_merge = list(addresses)
+ subnets = {}
+ while to_merge:
+ net = to_merge.pop()
+ supernet = net.supernet()
+ existing = subnets.get(supernet)
+ if existing is None:
+ subnets[supernet] = net
+ elif existing != net:
+ # Merge consecutive subnets
+ del subnets[supernet]
+ to_merge.append(supernet)
+ # Then iterate over resulting networks, skipping subsumed subnets
+ last = None
+ for net in sorted(subnets.values()):
+ if last is not None:
+ # Since they are sorted,
+ # last.network_address <= net.network_address is a given.
+ if last.broadcast_address >= net.broadcast_address:
+ continue
+ yield net
+ last = net
+
+
+def collapse_addresses(addresses):
+ """Collapse a list of IP objects.
+
+ Example:
+ collapse_addresses([IPv4Network('192.0.2.0/25'),
+ IPv4Network('192.0.2.128/25')]) ->
+ [IPv4Network('192.0.2.0/24')]
+
+ Args:
+ addresses: An iterator of IPv4Network or IPv6Network objects.
+
+ Returns:
+ An iterator of the collapsed IPv(4|6)Network objects.
+
+ Raises:
+ TypeError: If passed a list of mixed version objects.
+
+ """
+ addrs = []
+ ips = []
+ nets = []
+
+ # split IP addresses and networks
+ for ip in addresses:
+ if isinstance(ip, _BaseAddress):
+ if ips and ips[-1]._version != ip._version:
+ raise TypeError(
+ "%s and %s are not of the same version" % (ip, ips[-1])
+ )
+ ips.append(ip)
+ elif ip._prefixlen == ip._max_prefixlen:
+ if ips and ips[-1]._version != ip._version:
+ raise TypeError(
+ "%s and %s are not of the same version" % (ip, ips[-1])
+ )
+ try:
+ ips.append(ip.ip)
+ except AttributeError:
+ ips.append(ip.network_address)
+ else:
+ if nets and nets[-1]._version != ip._version:
+ raise TypeError(
+ "%s and %s are not of the same version" % (ip, nets[-1])
+ )
+ nets.append(ip)
+
+ # sort and dedup
+ ips = sorted(set(ips))
+
+ # find consecutive address ranges in the sorted sequence and summarize them
+ if ips:
+ for first, last in _find_address_range(ips):
+ addrs.extend(summarize_address_range(first, last))
+
+ return _collapse_addresses_internal(addrs + nets)
+
+
+def get_mixed_type_key(obj):
+ """Return a key suitable for sorting between networks and addresses.
+
+ Address and Network objects are not sortable by default; they're
+ fundamentally different so the expression
+
+ IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24')
+
+ doesn't make any sense. There are some times however, where you may wish
+ to have ipaddress sort these for you anyway. If you need to do this, you
+ can use this function as the key= argument to sorted().
+
+ Args:
+ obj: either a Network or Address object.
+ Returns:
+ appropriate key.
+
+ """
+ if isinstance(obj, _BaseNetwork):
+ return obj._get_networks_key()
+ elif isinstance(obj, _BaseAddress):
+ return obj._get_address_key()
+ return NotImplemented
+
+
+class _IPAddressBase(_TotalOrderingMixin):
+
+ """The mother class."""
+
+ __slots__ = ()
+
+ @property
+ def exploded(self):
+ """Return the longhand version of the IP address as a string."""
+ return self._explode_shorthand_ip_string()
+
+ @property
+ def compressed(self):
+ """Return the shorthand version of the IP address as a string."""
+ return _compat_str(self)
+
+ @property
+ def reverse_pointer(self):
+ """The name of the reverse DNS pointer for the IP address, e.g.:
+ >>> ipaddress.ip_address("127.0.0.1").reverse_pointer
+ '1.0.0.127.in-addr.arpa'
+ >>> ipaddress.ip_address("2001:db8::1").reverse_pointer
+ '1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa'
+
+ """
+ return self._reverse_pointer()
+
+ @property
+ def version(self):
+ msg = "%200s has no version specified" % (type(self),)
+ raise NotImplementedError(msg)
+
+ def _check_int_address(self, address):
+ if address < 0:
+ msg = "%d (< 0) is not permitted as an IPv%d address"
+ raise AddressValueError(msg % (address, self._version))
+ if address > self._ALL_ONES:
+ msg = "%d (>= 2**%d) is not permitted as an IPv%d address"
+ raise AddressValueError(
+ msg % (address, self._max_prefixlen, self._version)
+ )
+
+ def _check_packed_address(self, address, expected_len):
+ address_len = len(address)
+ if address_len != expected_len:
+ msg = (
+ "%r (len %d != %d) is not permitted as an IPv%d address. "
+ "Did you pass in a bytes (str in Python 2) instead of"
+ " a unicode object?"
+ )
+ raise AddressValueError(
+ msg % (address, address_len, expected_len, self._version)
+ )
+
+ @classmethod
+ def _ip_int_from_prefix(cls, prefixlen):
+ """Turn the prefix length into a bitwise netmask
+
+ Args:
+ prefixlen: An integer, the prefix length.
+
+ Returns:
+ An integer.
+
+ """
+ return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen)
+
+ @classmethod
+ def _prefix_from_ip_int(cls, ip_int):
+ """Return prefix length from the bitwise netmask.
+
+ Args:
+ ip_int: An integer, the netmask in expanded bitwise format
+
+ Returns:
+ An integer, the prefix length.
+
+ Raises:
+ ValueError: If the input intermingles zeroes & ones
+ """
+ trailing_zeroes = _count_righthand_zero_bits(
+ ip_int, cls._max_prefixlen
+ )
+ prefixlen = cls._max_prefixlen - trailing_zeroes
+ leading_ones = ip_int >> trailing_zeroes
+ all_ones = (1 << prefixlen) - 1
+ if leading_ones != all_ones:
+ byteslen = cls._max_prefixlen // 8
+ details = _compat_to_bytes(ip_int, byteslen, "big")
+ msg = "Netmask pattern %r mixes zeroes & ones"
+ raise ValueError(msg % details)
+ return prefixlen
+
+ @classmethod
+ def _report_invalid_netmask(cls, netmask_str):
+ msg = "%r is not a valid netmask" % netmask_str
+ raise NetmaskValueError(msg)
+
+ @classmethod
+ def _prefix_from_prefix_string(cls, prefixlen_str):
+ """Return prefix length from a numeric string
+
+ Args:
+ prefixlen_str: The string to be converted
+
+ Returns:
+ An integer, the prefix length.
+
+ Raises:
+ NetmaskValueError: If the input is not a valid netmask
+ """
+ # int allows a leading +/- as well as surrounding whitespace,
+ # so we ensure that isn't the case
+ if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str):
+ cls._report_invalid_netmask(prefixlen_str)
+ try:
+ prefixlen = int(prefixlen_str)
+ except ValueError:
+ cls._report_invalid_netmask(prefixlen_str)
+ if not (0 <= prefixlen <= cls._max_prefixlen):
+ cls._report_invalid_netmask(prefixlen_str)
+ return prefixlen
+
+ @classmethod
+ def _prefix_from_ip_string(cls, ip_str):
+ """Turn a netmask/hostmask string into a prefix length
+
+ Args:
+ ip_str: The netmask/hostmask to be converted
+
+ Returns:
+ An integer, the prefix length.
+
+ Raises:
+ NetmaskValueError: If the input is not a valid netmask/hostmask
+ """
+ # Parse the netmask/hostmask like an IP address.
+ try:
+ ip_int = cls._ip_int_from_string(ip_str)
+ except AddressValueError:
+ cls._report_invalid_netmask(ip_str)
+
+ # Try matching a netmask (this would be /1*0*/ as a bitwise regexp).
+ # Note that the two ambiguous cases (all-ones and all-zeroes) are
+ # treated as netmasks.
+ try:
+ return cls._prefix_from_ip_int(ip_int)
+ except ValueError:
+ pass
+
+ # Invert the bits, and try matching a /0+1+/ hostmask instead.
+ ip_int ^= cls._ALL_ONES
+ try:
+ return cls._prefix_from_ip_int(ip_int)
+ except ValueError:
+ cls._report_invalid_netmask(ip_str)
+
+ def __reduce__(self):
+ return self.__class__, (_compat_str(self),)
+
+
+class _BaseAddress(_IPAddressBase):
+
+ """A generic IP object.
+
+ This IP class contains the version independent methods which are
+ used by single IP addresses.
+ """
+
+ __slots__ = ()
+
+ def __int__(self):
+ return self._ip
+
+ def __eq__(self, other):
+ try:
+ return self._ip == other._ip and self._version == other._version
+ except AttributeError:
+ return NotImplemented
+
+ def __lt__(self, other):
+ if not isinstance(other, _IPAddressBase):
+ return NotImplemented
+ if not isinstance(other, _BaseAddress):
+ raise TypeError(
+ "%s and %s are not of the same type" % (self, other)
+ )
+ if self._version != other._version:
+ raise TypeError(
+ "%s and %s are not of the same version" % (self, other)
+ )
+ if self._ip != other._ip:
+ return self._ip < other._ip
+ return False
+
+ # Shorthand for Integer addition and subtraction. This is not
+ # meant to ever support addition/subtraction of addresses.
+ def __add__(self, other):
+ if not isinstance(other, _compat_int_types):
+ return NotImplemented
+ return self.__class__(int(self) + other)
+
+ def __sub__(self, other):
+ if not isinstance(other, _compat_int_types):
+ return NotImplemented
+ return self.__class__(int(self) - other)
+
+ def __repr__(self):
+ return "%s(%r)" % (self.__class__.__name__, _compat_str(self))
+
+ def __str__(self):
+ return _compat_str(self._string_from_ip_int(self._ip))
+
+ def __hash__(self):
+ return hash(hex(int(self._ip)))
+
+ def _get_address_key(self):
+ return (self._version, self)
+
+ def __reduce__(self):
+ return self.__class__, (self._ip,)
+
+
+class _BaseNetwork(_IPAddressBase):
+
+ """A generic IP network object.
+
+ This IP class contains the version independent methods which are
+ used by networks.
+
+ """
+
+ def __init__(self, address):
+ self._cache = {}
+
+ def __repr__(self):
+ return "%s(%r)" % (self.__class__.__name__, _compat_str(self))
+
+ def __str__(self):
+ return "%s/%d" % (self.network_address, self.prefixlen)
+
+ def hosts(self):
+ """Generate Iterator over usable hosts in a network.
+
+ This is like __iter__ except it doesn't return the network
+ or broadcast addresses.
+
+ """
+ network = int(self.network_address)
+ broadcast = int(self.broadcast_address)
+ for x in _compat_range(network + 1, broadcast):
+ yield self._address_class(x)
+
+ def __iter__(self):
+ network = int(self.network_address)
+ broadcast = int(self.broadcast_address)
+ for x in _compat_range(network, broadcast + 1):
+ yield self._address_class(x)
+
+ def __getitem__(self, n):
+ network = int(self.network_address)
+ broadcast = int(self.broadcast_address)
+ if n >= 0:
+ if network + n > broadcast:
+ raise IndexError("address out of range")
+ return self._address_class(network + n)
+ else:
+ n += 1
+ if broadcast + n < network:
+ raise IndexError("address out of range")
+ return self._address_class(broadcast + n)
+
+ def __lt__(self, other):
+ if not isinstance(other, _IPAddressBase):
+ return NotImplemented
+ if not isinstance(other, _BaseNetwork):
+ raise TypeError(
+ "%s and %s are not of the same type" % (self, other)
+ )
+ if self._version != other._version:
+ raise TypeError(
+ "%s and %s are not of the same version" % (self, other)
+ )
+ if self.network_address != other.network_address:
+ return self.network_address < other.network_address
+ if self.netmask != other.netmask:
+ return self.netmask < other.netmask
+ return False
+
+ def __eq__(self, other):
+ try:
+ return (
+ self._version == other._version
+ and self.network_address == other.network_address
+ and int(self.netmask) == int(other.netmask)
+ )
+ except AttributeError:
+ return NotImplemented
+
+ def __hash__(self):
+ return hash(int(self.network_address) ^ int(self.netmask))
+
+ def __contains__(self, other):
+ # always false if one is v4 and the other is v6.
+ if self._version != other._version:
+ return False
+ # dealing with another network.
+ if isinstance(other, _BaseNetwork):
+ return False
+ # dealing with another address
+ else:
+ # address
+ return (
+ int(self.network_address)
+ <= int(other._ip)
+ <= int(self.broadcast_address)
+ )
+
+ def overlaps(self, other):
+ """Tell if self is partly contained in other."""
+ return self.network_address in other or (
+ self.broadcast_address in other
+ or (
+ other.network_address in self
+ or (other.broadcast_address in self)
+ )
+ )
+
+ @property
+ def broadcast_address(self):
+ x = self._cache.get("broadcast_address")
+ if x is None:
+ x = self._address_class(
+ int(self.network_address) | int(self.hostmask)
+ )
+ self._cache["broadcast_address"] = x
+ return x
+
+ @property
+ def hostmask(self):
+ x = self._cache.get("hostmask")
+ if x is None:
+ x = self._address_class(int(self.netmask) ^ self._ALL_ONES)
+ self._cache["hostmask"] = x
+ return x
+
+ @property
+ def with_prefixlen(self):
+ return "%s/%d" % (self.network_address, self._prefixlen)
+
+ @property
+ def with_netmask(self):
+ return "%s/%s" % (self.network_address, self.netmask)
+
+ @property
+ def with_hostmask(self):
+ return "%s/%s" % (self.network_address, self.hostmask)
+
+ @property
+ def num_addresses(self):
+ """Number of hosts in the current subnet."""
+ return int(self.broadcast_address) - int(self.network_address) + 1
+
+ @property
+ def _address_class(self):
+ # Returning bare address objects (rather than interfaces) allows for
+ # more consistent behaviour across the network address, broadcast
+ # address and individual host addresses.
+ msg = "%200s has no associated address class" % (type(self),)
+ raise NotImplementedError(msg)
+
+ @property
+ def prefixlen(self):
+ return self._prefixlen
+
+ def address_exclude(self, other):
+ """Remove an address from a larger block.
+
+ For example:
+
+ addr1 = ip_network('192.0.2.0/28')
+ addr2 = ip_network('192.0.2.1/32')
+ list(addr1.address_exclude(addr2)) =
+ [IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'),
+ IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')]
+
+ or IPv6:
+
+ addr1 = ip_network('2001:db8::1/32')
+ addr2 = ip_network('2001:db8::1/128')
+ list(addr1.address_exclude(addr2)) =
+ [ip_network('2001:db8::1/128'),
+ ip_network('2001:db8::2/127'),
+ ip_network('2001:db8::4/126'),
+ ip_network('2001:db8::8/125'),
+ ...
+ ip_network('2001:db8:8000::/33')]
+
+ Args:
+ other: An IPv4Network or IPv6Network object of the same type.
+
+ Returns:
+ An iterator of the IPv(4|6)Network objects which is self
+ minus other.
+
+ Raises:
+ TypeError: If self and other are of differing address
+ versions, or if other is not a network object.
+ ValueError: If other is not completely contained by self.
+
+ """
+ if not self._version == other._version:
+ raise TypeError(
+ "%s and %s are not of the same version" % (self, other)
+ )
+
+ if not isinstance(other, _BaseNetwork):
+ raise TypeError("%s is not a network object" % other)
+
+ if not other.subnet_of(self):
+ raise ValueError("%s not contained in %s" % (other, self))
+ if other == self:
+ return
+
+ # Make sure we're comparing the network of other.
+ other = other.__class__(
+ "%s/%s" % (other.network_address, other.prefixlen)
+ )
+
+ s1, s2 = self.subnets()
+ while s1 != other and s2 != other:
+ if other.subnet_of(s1):
+ yield s2
+ s1, s2 = s1.subnets()
+ elif other.subnet_of(s2):
+ yield s1
+ s1, s2 = s2.subnets()
+ else:
+ # If we got here, there's a bug somewhere.
+ raise AssertionError(
+ "Error performing exclusion: "
+ "s1: %s s2: %s other: %s" % (s1, s2, other)
+ )
+ if s1 == other:
+ yield s2
+ elif s2 == other:
+ yield s1
+ else:
+ # If we got here, there's a bug somewhere.
+ raise AssertionError(
+ "Error performing exclusion: "
+ "s1: %s s2: %s other: %s" % (s1, s2, other)
+ )
+
+ def compare_networks(self, other):
+ """Compare two IP objects.
+
+ This is only concerned about the comparison of the integer
+ representation of the network addresses. This means that the
+ host bits aren't considered at all in this method. If you want
+ to compare host bits, you can easily enough do a
+ 'HostA._ip < HostB._ip'
+
+ Args:
+ other: An IP object.
+
+ Returns:
+ If the IP versions of self and other are the same, returns:
+
+ -1 if self < other:
+ eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25')
+ IPv6Network('2001:db8::1000/124') <
+ IPv6Network('2001:db8::2000/124')
+ 0 if self == other
+ eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24')
+ IPv6Network('2001:db8::1000/124') ==
+ IPv6Network('2001:db8::1000/124')
+ 1 if self > other
+ eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25')
+ IPv6Network('2001:db8::2000/124') >
+ IPv6Network('2001:db8::1000/124')
+
+ Raises:
+ TypeError if the IP versions are different.
+
+ """
+ # does this need to raise a ValueError?
+ if self._version != other._version:
+ raise TypeError(
+ "%s and %s are not of the same type" % (self, other)
+ )
+ # self._version == other._version below here:
+ if self.network_address < other.network_address:
+ return -1
+ if self.network_address > other.network_address:
+ return 1
+ # self.network_address == other.network_address below here:
+ if self.netmask < other.netmask:
+ return -1
+ if self.netmask > other.netmask:
+ return 1
+ return 0
+
+ def _get_networks_key(self):
+ """Network-only key function.
+
+ Returns an object that identifies this address' network and
+ netmask. This function is a suitable "key" argument for sorted()
+ and list.sort().
+
+ """
+ return (self._version, self.network_address, self.netmask)
+
+ def subnets(self, prefixlen_diff=1, new_prefix=None):
+ """The subnets which join to make the current subnet.
+
+ In the case that self contains only one IP
+ (self._prefixlen == 32 for IPv4 or self._prefixlen == 128
+ for IPv6), yield an iterator with just ourself.
+
+ Args:
+ prefixlen_diff: An integer, the amount the prefix length
+ should be increased by. This should not be set if
+ new_prefix is also set.
+ new_prefix: The desired new prefix length. This must be a
+ larger number (smaller prefix) than the existing prefix.
+ This should not be set if prefixlen_diff is also set.
+
+ Returns:
+ An iterator of IPv(4|6) objects.
+
+ Raises:
+ ValueError: The prefixlen_diff is too small or too large.
+ OR
+ prefixlen_diff and new_prefix are both set or new_prefix
+ is a smaller number than the current prefix (smaller
+ number means a larger network)
+
+ """
+ if self._prefixlen == self._max_prefixlen:
+ yield self
+ return
+
+ if new_prefix is not None:
+ if new_prefix < self._prefixlen:
+ raise ValueError("new prefix must be longer")
+ if prefixlen_diff != 1:
+ raise ValueError("cannot set prefixlen_diff and new_prefix")
+ prefixlen_diff = new_prefix - self._prefixlen
+
+ if prefixlen_diff < 0:
+ raise ValueError("prefix length diff must be > 0")
+ new_prefixlen = self._prefixlen + prefixlen_diff
+
+ if new_prefixlen > self._max_prefixlen:
+ raise ValueError(
+ "prefix length diff %d is invalid for netblock %s"
+ % (new_prefixlen, self)
+ )
+
+ start = int(self.network_address)
+ end = int(self.broadcast_address) + 1
+ step = (int(self.hostmask) + 1) >> prefixlen_diff
+ for new_addr in _compat_range(start, end, step):
+ current = self.__class__((new_addr, new_prefixlen))
+ yield current
+
+ def supernet(self, prefixlen_diff=1, new_prefix=None):
+ """The supernet containing the current network.
+
+ Args:
+ prefixlen_diff: An integer, the amount the prefix length of
+ the network should be decreased by. For example, given a
+ /24 network and a prefixlen_diff of 3, a supernet with a
+ /21 netmask is returned.
+
+ Returns:
+ An IPv4 network object.
+
+ Raises:
+ ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have
+ a negative prefix length.
+ OR
+ If prefixlen_diff and new_prefix are both set or new_prefix is a
+ larger number than the current prefix (larger number means a
+ smaller network)
+
+ """
+ if self._prefixlen == 0:
+ return self
+
+ if new_prefix is not None:
+ if new_prefix > self._prefixlen:
+ raise ValueError("new prefix must be shorter")
+ if prefixlen_diff != 1:
+ raise ValueError("cannot set prefixlen_diff and new_prefix")
+ prefixlen_diff = self._prefixlen - new_prefix
+
+ new_prefixlen = self.prefixlen - prefixlen_diff
+ if new_prefixlen < 0:
+ raise ValueError(
+ "current prefixlen is %d, cannot have a prefixlen_diff of %d"
+ % (self.prefixlen, prefixlen_diff)
+ )
+ return self.__class__(
+ (
+ int(self.network_address)
+ & (int(self.netmask) << prefixlen_diff),
+ new_prefixlen,
+ )
+ )
+
+ @property
+ def is_multicast(self):
+ """Test if the address is reserved for multicast use.
+
+ Returns:
+ A boolean, True if the address is a multicast address.
+ See RFC 2373 2.7 for details.
+
+ """
+ return (
+ self.network_address.is_multicast
+ and self.broadcast_address.is_multicast
+ )
+
+ @staticmethod
+ def _is_subnet_of(a, b):
+ try:
+ # Always false if one is v4 and the other is v6.
+ if a._version != b._version:
+ raise TypeError(
+ "%s and %s are not of the same version" % (a, b)
+ )
+ return (
+ b.network_address <= a.network_address
+ and b.broadcast_address >= a.broadcast_address
+ )
+ except AttributeError:
+ raise TypeError(
+ "Unable to test subnet containment "
+ "between %s and %s" % (a, b)
+ )
+
+ def subnet_of(self, other):
+ """Return True if this network is a subnet of other."""
+ return self._is_subnet_of(self, other)
+
+ def supernet_of(self, other):
+ """Return True if this network is a supernet of other."""
+ return self._is_subnet_of(other, self)
+
+ @property
+ def is_reserved(self):
+ """Test if the address is otherwise IETF reserved.
+
+ Returns:
+ A boolean, True if the address is within one of the
+ reserved IPv6 Network ranges.
+
+ """
+ return (
+ self.network_address.is_reserved
+ and self.broadcast_address.is_reserved
+ )
+
+ @property
+ def is_link_local(self):
+ """Test if the address is reserved for link-local.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 4291.
+
+ """
+ return (
+ self.network_address.is_link_local
+ and self.broadcast_address.is_link_local
+ )
+
+ @property
+ def is_private(self):
+ """Test if this address is allocated for private networks.
+
+ Returns:
+ A boolean, True if the address is reserved per
+ iana-ipv4-special-registry or iana-ipv6-special-registry.
+
+ """
+ return (
+ self.network_address.is_private
+ and self.broadcast_address.is_private
+ )
+
+ @property
+ def is_global(self):
+ """Test if this address is allocated for public networks.
+
+ Returns:
+ A boolean, True if the address is not reserved per
+ iana-ipv4-special-registry or iana-ipv6-special-registry.
+
+ """
+ return not self.is_private
+
+ @property
+ def is_unspecified(self):
+ """Test if the address is unspecified.
+
+ Returns:
+ A boolean, True if this is the unspecified address as defined in
+ RFC 2373 2.5.2.
+
+ """
+ return (
+ self.network_address.is_unspecified
+ and self.broadcast_address.is_unspecified
+ )
+
+ @property
+ def is_loopback(self):
+ """Test if the address is a loopback address.
+
+ Returns:
+ A boolean, True if the address is a loopback address as defined in
+ RFC 2373 2.5.3.
+
+ """
+ return (
+ self.network_address.is_loopback
+ and self.broadcast_address.is_loopback
+ )
+
+
+class _BaseV4(object):
+
+ """Base IPv4 object.
+
+ The following methods are used by IPv4 objects in both single IP
+ addresses and networks.
+
+ """
+
+ __slots__ = ()
+ _version = 4
+ # Equivalent to 255.255.255.255 or 32 bits of 1's.
+ _ALL_ONES = (2 ** IPV4LENGTH) - 1
+ _DECIMAL_DIGITS = frozenset("0123456789")
+
+ # the valid octets for host and netmasks. only useful for IPv4.
+ _valid_mask_octets = frozenset([255, 254, 252, 248, 240, 224, 192, 128, 0])
+
+ _max_prefixlen = IPV4LENGTH
+ # There are only a handful of valid v4 netmasks, so we cache them all
+ # when constructed (see _make_netmask()).
+ _netmask_cache = {}
+
+ def _explode_shorthand_ip_string(self):
+ return _compat_str(self)
+
+ @classmethod
+ def _make_netmask(cls, arg):
+ """Make a (netmask, prefix_len) tuple from the given argument.
+
+ Argument can be:
+ - an integer (the prefix length)
+ - a string representing the prefix length (e.g. "24")
+ - a string representing the prefix netmask (e.g. "255.255.255.0")
+ """
+ if arg not in cls._netmask_cache:
+ if isinstance(arg, _compat_int_types):
+ prefixlen = arg
+ else:
+ try:
+ # Check for a netmask in prefix length form
+ prefixlen = cls._prefix_from_prefix_string(arg)
+ except NetmaskValueError:
+ # Check for a netmask or hostmask in dotted-quad form.
+ # This may raise NetmaskValueError.
+ prefixlen = cls._prefix_from_ip_string(arg)
+ netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen))
+ cls._netmask_cache[arg] = netmask, prefixlen
+ return cls._netmask_cache[arg]
+
+ @classmethod
+ def _ip_int_from_string(cls, ip_str):
+ """Turn the given IP string into an integer for comparison.
+
+ Args:
+ ip_str: A string, the IP ip_str.
+
+ Returns:
+ The IP ip_str as an integer.
+
+ Raises:
+ AddressValueError: if ip_str isn't a valid IPv4 Address.
+
+ """
+ if not ip_str:
+ raise AddressValueError("Address cannot be empty")
+
+ octets = ip_str.split(".")
+ if len(octets) != 4:
+ raise AddressValueError("Expected 4 octets in %r" % ip_str)
+
+ try:
+ return _compat_int_from_byte_vals(
+ map(cls._parse_octet, octets), "big"
+ )
+ except ValueError as exc:
+ raise AddressValueError("%s in %r" % (exc, ip_str))
+
+ @classmethod
+ def _parse_octet(cls, octet_str):
+ """Convert a decimal octet into an integer.
+
+ Args:
+ octet_str: A string, the number to parse.
+
+ Returns:
+ The octet as an integer.
+
+ Raises:
+ ValueError: if the octet isn't strictly a decimal from [0..255].
+
+ """
+ if not octet_str:
+ raise ValueError("Empty octet not permitted")
+ # Whitelist the characters, since int() allows a lot of bizarre stuff.
+ if not cls._DECIMAL_DIGITS.issuperset(octet_str):
+ msg = "Only decimal digits permitted in %r"
+ raise ValueError(msg % octet_str)
+ # We do the length check second, since the invalid character error
+ # is likely to be more informative for the user
+ if len(octet_str) > 3:
+ msg = "At most 3 characters permitted in %r"
+ raise ValueError(msg % octet_str)
+ # Convert to integer (we know digits are legal)
+ octet_int = int(octet_str, 10)
+ # Any octets that look like they *might* be written in octal,
+ # and which don't look exactly the same in both octal and
+ # decimal are rejected as ambiguous
+ if octet_int > 7 and octet_str[0] == "0":
+ msg = "Ambiguous (octal/decimal) value in %r not permitted"
+ raise ValueError(msg % octet_str)
+ if octet_int > 255:
+ raise ValueError("Octet %d (> 255) not permitted" % octet_int)
+ return octet_int
+
+ @classmethod
+ def _string_from_ip_int(cls, ip_int):
+ """Turns a 32-bit integer into dotted decimal notation.
+
+ Args:
+ ip_int: An integer, the IP address.
+
+ Returns:
+ The IP address as a string in dotted decimal notation.
+
+ """
+ return ".".join(
+ _compat_str(
+ struct.unpack(b"!B", b)[0] if isinstance(b, bytes) else b
+ )
+ for b in _compat_to_bytes(ip_int, 4, "big")
+ )
+
+ def _is_hostmask(self, ip_str):
+ """Test if the IP string is a hostmask (rather than a netmask).
+
+ Args:
+ ip_str: A string, the potential hostmask.
+
+ Returns:
+ A boolean, True if the IP string is a hostmask.
+
+ """
+ bits = ip_str.split(".")
+ try:
+ parts = [x for x in map(int, bits) if x in self._valid_mask_octets]
+ except ValueError:
+ return False
+ if len(parts) != len(bits):
+ return False
+ if parts[0] < parts[-1]:
+ return True
+ return False
+
+ def _reverse_pointer(self):
+ """Return the reverse DNS pointer name for the IPv4 address.
+
+ This implements the method described in RFC1035 3.5.
+
+ """
+ reverse_octets = _compat_str(self).split(".")[::-1]
+ return ".".join(reverse_octets) + ".in-addr.arpa"
+
+ @property
+ def max_prefixlen(self):
+ return self._max_prefixlen
+
+ @property
+ def version(self):
+ return self._version
+
+
+class IPv4Address(_BaseV4, _BaseAddress):
+
+ """Represent and manipulate single IPv4 Addresses."""
+
+ __slots__ = ("_ip", "__weakref__")
+
+ def __init__(self, address):
+
+ """
+ Args:
+ address: A string or integer representing the IP
+
+ Additionally, an integer can be passed, so
+ IPv4Address('192.0.2.1') == IPv4Address(3221225985).
+ or, more generally
+ IPv4Address(int(IPv4Address('192.0.2.1'))) ==
+ IPv4Address('192.0.2.1')
+
+ Raises:
+ AddressValueError: If ipaddress isn't a valid IPv4 address.
+
+ """
+ # Efficient constructor from integer.
+ if isinstance(address, _compat_int_types):
+ self._check_int_address(address)
+ self._ip = address
+ return
+
+ # Constructing from a packed address
+ if isinstance(address, bytes):
+ self._check_packed_address(address, 4)
+ bvs = _compat_bytes_to_byte_vals(address)
+ self._ip = _compat_int_from_byte_vals(bvs, "big")
+ return
+
+ # Assume input argument to be string or any object representation
+ # which converts into a formatted IP string.
+ addr_str = _compat_str(address)
+ if "/" in addr_str:
+ raise AddressValueError("Unexpected '/' in %r" % address)
+ self._ip = self._ip_int_from_string(addr_str)
+
+ @property
+ def packed(self):
+ """The binary representation of this address."""
+ return v4_int_to_packed(self._ip)
+
+ @property
+ def is_reserved(self):
+ """Test if the address is otherwise IETF reserved.
+
+ Returns:
+ A boolean, True if the address is within the
+ reserved IPv4 Network range.
+
+ """
+ return self in self._constants._reserved_network
+
+ @property
+ def is_private(self):
+ """Test if this address is allocated for private networks.
+
+ Returns:
+ A boolean, True if the address is reserved per
+ iana-ipv4-special-registry.
+
+ """
+ return any(self in net for net in self._constants._private_networks)
+
+ @property
+ def is_global(self):
+ return (
+ self not in self._constants._public_network and not self.is_private
+ )
+
+ @property
+ def is_multicast(self):
+ """Test if the address is reserved for multicast use.
+
+ Returns:
+ A boolean, True if the address is multicast.
+ See RFC 3171 for details.
+
+ """
+ return self in self._constants._multicast_network
+
+ @property
+ def is_unspecified(self):
+ """Test if the address is unspecified.
+
+ Returns:
+ A boolean, True if this is the unspecified address as defined in
+ RFC 5735 3.
+
+ """
+ return self == self._constants._unspecified_address
+
+ @property
+ def is_loopback(self):
+ """Test if the address is a loopback address.
+
+ Returns:
+ A boolean, True if the address is a loopback per RFC 3330.
+
+ """
+ return self in self._constants._loopback_network
+
+ @property
+ def is_link_local(self):
+ """Test if the address is reserved for link-local.
+
+ Returns:
+ A boolean, True if the address is link-local per RFC 3927.
+
+ """
+ return self in self._constants._linklocal_network
+
+
+class IPv4Interface(IPv4Address):
+ def __init__(self, address):
+ if isinstance(address, (bytes, _compat_int_types)):
+ IPv4Address.__init__(self, address)
+ self.network = IPv4Network(self._ip)
+ self._prefixlen = self._max_prefixlen
+ return
+
+ if isinstance(address, tuple):
+ IPv4Address.__init__(self, address[0])
+ if len(address) > 1:
+ self._prefixlen = int(address[1])
+ else:
+ self._prefixlen = self._max_prefixlen
+
+ self.network = IPv4Network(address, strict=False)
+ self.netmask = self.network.netmask
+ self.hostmask = self.network.hostmask
+ return
+
+ addr = _split_optional_netmask(address)
+ IPv4Address.__init__(self, addr[0])
+
+ self.network = IPv4Network(address, strict=False)
+ self._prefixlen = self.network._prefixlen
+
+ self.netmask = self.network.netmask
+ self.hostmask = self.network.hostmask
+
+ def __str__(self):
+ return "%s/%d" % (
+ self._string_from_ip_int(self._ip),
+ self.network.prefixlen,
+ )
+
+ def __eq__(self, other):
+ address_equal = IPv4Address.__eq__(self, other)
+ if not address_equal or address_equal is NotImplemented:
+ return address_equal
+ try:
+ return self.network == other.network
+ except AttributeError:
+ # An interface with an associated network is NOT the
+ # same as an unassociated address. That's why the hash
+ # takes the extra info into account.
+ return False
+
+ def __lt__(self, other):
+ address_less = IPv4Address.__lt__(self, other)
+ if address_less is NotImplemented:
+ return NotImplemented
+ try:
+ return (
+ self.network < other.network
+ or self.network == other.network
+ and address_less
+ )
+ except AttributeError:
+ # We *do* allow addresses and interfaces to be sorted. The
+ # unassociated address is considered less than all interfaces.
+ return False
+
+ def __hash__(self):
+ return self._ip ^ self._prefixlen ^ int(self.network.network_address)
+
+ __reduce__ = _IPAddressBase.__reduce__
+
+ @property
+ def ip(self):
+ return IPv4Address(self._ip)
+
+ @property
+ def with_prefixlen(self):
+ return "%s/%s" % (self._string_from_ip_int(self._ip), self._prefixlen)
+
+ @property
+ def with_netmask(self):
+ return "%s/%s" % (self._string_from_ip_int(self._ip), self.netmask)
+
+ @property
+ def with_hostmask(self):
+ return "%s/%s" % (self._string_from_ip_int(self._ip), self.hostmask)
+
+
+class IPv4Network(_BaseV4, _BaseNetwork):
+
+ """This class represents and manipulates 32-bit IPv4 network + addresses..
+
+ Attributes: [examples for IPv4Network('192.0.2.0/27')]
+ .network_address: IPv4Address('192.0.2.0')
+ .hostmask: IPv4Address('0.0.0.31')
+ .broadcast_address: IPv4Address('192.0.2.32')
+ .netmask: IPv4Address('255.255.255.224')
+ .prefixlen: 27
+
+ """
+
+ # Class to use when creating address objects
+ _address_class = IPv4Address
+
+ def __init__(self, address, strict=True):
+
+ """Instantiate a new IPv4 network object.
+
+ Args:
+ address: A string or integer representing the IP [& network].
+ '192.0.2.0/24'
+ '192.0.2.0/255.255.255.0'
+ '192.0.0.2/0.0.0.255'
+ are all functionally the same in IPv4. Similarly,
+ '192.0.2.1'
+ '192.0.2.1/255.255.255.255'
+ '192.0.2.1/32'
+ are also functionally equivalent. That is to say, failing to
+ provide a subnetmask will create an object with a mask of /32.
+
+ If the mask (portion after the / in the argument) is given in
+ dotted quad form, it is treated as a netmask if it starts with a
+ non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
+ starts with a zero field (e.g. 0.255.255.255 == /8), with the
+ single exception of an all-zero mask which is treated as a
+ netmask == /0. If no mask is given, a default of /32 is used.
+
+ Additionally, an integer can be passed, so
+ IPv4Network('192.0.2.1') == IPv4Network(3221225985)
+ or, more generally
+ IPv4Interface(int(IPv4Interface('192.0.2.1'))) ==
+ IPv4Interface('192.0.2.1')
+
+ Raises:
+ AddressValueError: If ipaddress isn't a valid IPv4 address.
+ NetmaskValueError: If the netmask isn't valid for
+ an IPv4 address.
+ ValueError: If strict is True and a network address is not
+ supplied.
+
+ """
+ _BaseNetwork.__init__(self, address)
+
+ # Constructing from a packed address or integer
+ if isinstance(address, (_compat_int_types, bytes)):
+ self.network_address = IPv4Address(address)
+ self.netmask, self._prefixlen = self._make_netmask(
+ self._max_prefixlen
+ )
+ # fixme: address/network test here.
+ return
+
+ if isinstance(address, tuple):
+ if len(address) > 1:
+ arg = address[1]
+ else:
+ # We weren't given an address[1]
+ arg = self._max_prefixlen
+ self.network_address = IPv4Address(address[0])
+ self.netmask, self._prefixlen = self._make_netmask(arg)
+ packed = int(self.network_address)
+ if packed & int(self.netmask) != packed:
+ if strict:
+ raise ValueError("%s has host bits set" % self)
+ else:
+ self.network_address = IPv4Address(
+ packed & int(self.netmask)
+ )
+ return
+
+ # Assume input argument to be string or any object representation
+ # which converts into a formatted IP prefix string.
+ addr = _split_optional_netmask(address)
+ self.network_address = IPv4Address(self._ip_int_from_string(addr[0]))
+
+ if len(addr) == 2:
+ arg = addr[1]
+ else:
+ arg = self._max_prefixlen
+ self.netmask, self._prefixlen = self._make_netmask(arg)
+
+ if strict:
+ if (
+ IPv4Address(int(self.network_address) & int(self.netmask))
+ != self.network_address
+ ):
+ raise ValueError("%s has host bits set" % self)
+ self.network_address = IPv4Address(
+ int(self.network_address) & int(self.netmask)
+ )
+
+ if self._prefixlen == (self._max_prefixlen - 1):
+ self.hosts = self.__iter__
+
+ @property
+ def is_global(self):
+ """Test if this address is allocated for public networks.
+
+ Returns:
+ A boolean, True if the address is not reserved per
+ iana-ipv4-special-registry.
+
+ """
+ return (
+ not (
+ self.network_address in IPv4Network("100.64.0.0/10")
+ and self.broadcast_address in IPv4Network("100.64.0.0/10")
+ )
+ and not self.is_private
+ )
+
+
+class _IPv4Constants(object):
+
+ _linklocal_network = IPv4Network("169.254.0.0/16")
+
+ _loopback_network = IPv4Network("127.0.0.0/8")
+
+ _multicast_network = IPv4Network("224.0.0.0/4")
+
+ _public_network = IPv4Network("100.64.0.0/10")
+
+ _private_networks = [
+ IPv4Network("0.0.0.0/8"),
+ IPv4Network("10.0.0.0/8"),
+ IPv4Network("127.0.0.0/8"),
+ IPv4Network("169.254.0.0/16"),
+ IPv4Network("172.16.0.0/12"),
+ IPv4Network("192.0.0.0/29"),
+ IPv4Network("192.0.0.170/31"),
+ IPv4Network("192.0.2.0/24"),
+ IPv4Network("192.168.0.0/16"),
+ IPv4Network("198.18.0.0/15"),
+ IPv4Network("198.51.100.0/24"),
+ IPv4Network("203.0.113.0/24"),
+ IPv4Network("240.0.0.0/4"),
+ IPv4Network("255.255.255.255/32"),
+ ]
+
+ _reserved_network = IPv4Network("240.0.0.0/4")
+
+ _unspecified_address = IPv4Address("0.0.0.0")
+
+
+IPv4Address._constants = _IPv4Constants
+
+
+class _BaseV6(object):
+
+ """Base IPv6 object.
+
+ The following methods are used by IPv6 objects in both single IP
+ addresses and networks.
+
+ """
+
+ __slots__ = ()
+ _version = 6
+ _ALL_ONES = (2 ** IPV6LENGTH) - 1
+ _HEXTET_COUNT = 8
+ _HEX_DIGITS = frozenset("0123456789ABCDEFabcdef")
+ _max_prefixlen = IPV6LENGTH
+
+ # There are only a bunch of valid v6 netmasks, so we cache them all
+ # when constructed (see _make_netmask()).
+ _netmask_cache = {}
+
+ @classmethod
+ def _make_netmask(cls, arg):
+ """Make a (netmask, prefix_len) tuple from the given argument.
+
+ Argument can be:
+ - an integer (the prefix length)
+ - a string representing the prefix length (e.g. "24")
+ - a string representing the prefix netmask (e.g. "255.255.255.0")
+ """
+ if arg not in cls._netmask_cache:
+ if isinstance(arg, _compat_int_types):
+ prefixlen = arg
+ else:
+ prefixlen = cls._prefix_from_prefix_string(arg)
+ netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen))
+ cls._netmask_cache[arg] = netmask, prefixlen
+ return cls._netmask_cache[arg]
+
+ @classmethod
+ def _ip_int_from_string(cls, ip_str):
+ """Turn an IPv6 ip_str into an integer.
+
+ Args:
+ ip_str: A string, the IPv6 ip_str.
+
+ Returns:
+ An int, the IPv6 address
+
+ Raises:
+ AddressValueError: if ip_str isn't a valid IPv6 Address.
+
+ """
+ if not ip_str:
+ raise AddressValueError("Address cannot be empty")
+
+ parts = ip_str.split(":")
+
+ # An IPv6 address needs at least 2 colons (3 parts).
+ _min_parts = 3
+ if len(parts) < _min_parts:
+ msg = "At least %d parts expected in %r" % (_min_parts, ip_str)
+ raise AddressValueError(msg)
+
+ # If the address has an IPv4-style suffix, convert it to hexadecimal.
+ if "." in parts[-1]:
+ try:
+ ipv4_int = IPv4Address(parts.pop())._ip
+ except AddressValueError as exc:
+ raise AddressValueError("%s in %r" % (exc, ip_str))
+ parts.append("%x" % ((ipv4_int >> 16) & 0xFFFF))
+ parts.append("%x" % (ipv4_int & 0xFFFF))
+
+ # An IPv6 address can't have more than 8 colons (9 parts).
+ # The extra colon comes from using the "::" notation for a single
+ # leading or trailing zero part.
+ _max_parts = cls._HEXTET_COUNT + 1
+ if len(parts) > _max_parts:
+ msg = "At most %d colons permitted in %r" % (
+ _max_parts - 1,
+ ip_str,
+ )
+ raise AddressValueError(msg)
+
+ # Disregarding the endpoints, find '::' with nothing in between.
+ # This indicates that a run of zeroes has been skipped.
+ skip_index = None
+ for i in _compat_range(1, len(parts) - 1):
+ if not parts[i]:
+ if skip_index is not None:
+ # Can't have more than one '::'
+ msg = "At most one '::' permitted in %r" % ip_str
+ raise AddressValueError(msg)
+ skip_index = i
+
+ # parts_hi is the number of parts to copy from above/before the '::'
+ # parts_lo is the number of parts to copy from below/after the '::'
+ if skip_index is not None:
+ # If we found a '::', then check if it also covers the endpoints.
+ parts_hi = skip_index
+ parts_lo = len(parts) - skip_index - 1
+ if not parts[0]:
+ parts_hi -= 1
+ if parts_hi:
+ msg = "Leading ':' only permitted as part of '::' in %r"
+ raise AddressValueError(msg % ip_str) # ^: requires ^::
+ if not parts[-1]:
+ parts_lo -= 1
+ if parts_lo:
+ msg = "Trailing ':' only permitted as part of '::' in %r"
+ raise AddressValueError(msg % ip_str) # :$ requires ::$
+ parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo)
+ if parts_skipped < 1:
+ msg = "Expected at most %d other parts with '::' in %r"
+ raise AddressValueError(msg % (cls._HEXTET_COUNT - 1, ip_str))
+ else:
+ # Otherwise, allocate the entire address to parts_hi. The
+ # endpoints could still be empty, but _parse_hextet() will check
+ # for that.
+ if len(parts) != cls._HEXTET_COUNT:
+ msg = "Exactly %d parts expected without '::' in %r"
+ raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str))
+ if not parts[0]:
+ msg = "Leading ':' only permitted as part of '::' in %r"
+ raise AddressValueError(msg % ip_str) # ^: requires ^::
+ if not parts[-1]:
+ msg = "Trailing ':' only permitted as part of '::' in %r"
+ raise AddressValueError(msg % ip_str) # :$ requires ::$
+ parts_hi = len(parts)
+ parts_lo = 0
+ parts_skipped = 0
+
+ try:
+ # Now, parse the hextets into a 128-bit integer.
+ ip_int = 0
+ for i in range(parts_hi):
+ ip_int <<= 16
+ ip_int |= cls._parse_hextet(parts[i])
+ ip_int <<= 16 * parts_skipped
+ for i in range(-parts_lo, 0):
+ ip_int <<= 16
+ ip_int |= cls._parse_hextet(parts[i])
+ return ip_int
+ except ValueError as exc:
+ raise AddressValueError("%s in %r" % (exc, ip_str))
+
+ @classmethod
+ def _parse_hextet(cls, hextet_str):
+ """Convert an IPv6 hextet string into an integer.
+
+ Args:
+ hextet_str: A string, the number to parse.
+
+ Returns:
+ The hextet as an integer.
+
+ Raises:
+ ValueError: if the input isn't strictly a hex number from
+ [0..FFFF].
+
+ """
+ # Whitelist the characters, since int() allows a lot of bizarre stuff.
+ if not cls._HEX_DIGITS.issuperset(hextet_str):
+ raise ValueError("Only hex digits permitted in %r" % hextet_str)
+ # We do the length check second, since the invalid character error
+ # is likely to be more informative for the user
+ if len(hextet_str) > 4:
+ msg = "At most 4 characters permitted in %r"
+ raise ValueError(msg % hextet_str)
+ # Length check means we can skip checking the integer value
+ return int(hextet_str, 16)
+
+ @classmethod
+ def _compress_hextets(cls, hextets):
+ """Compresses a list of hextets.
+
+ Compresses a list of strings, replacing the longest continuous
+ sequence of "0" in the list with "" and adding empty strings at
+ the beginning or at the end of the string such that subsequently
+ calling ":".join(hextets) will produce the compressed version of
+ the IPv6 address.
+
+ Args:
+ hextets: A list of strings, the hextets to compress.
+
+ Returns:
+ A list of strings.
+
+ """
+ best_doublecolon_start = -1
+ best_doublecolon_len = 0
+ doublecolon_start = -1
+ doublecolon_len = 0
+ for index, hextet in enumerate(hextets):
+ if hextet == "0":
+ doublecolon_len += 1
+ if doublecolon_start == -1:
+ # Start of a sequence of zeros.
+ doublecolon_start = index
+ if doublecolon_len > best_doublecolon_len:
+ # This is the longest sequence of zeros so far.
+ best_doublecolon_len = doublecolon_len
+ best_doublecolon_start = doublecolon_start
+ else:
+ doublecolon_len = 0
+ doublecolon_start = -1
+
+ if best_doublecolon_len > 1:
+ best_doublecolon_end = (
+ best_doublecolon_start + best_doublecolon_len
+ )
+ # For zeros at the end of the address.
+ if best_doublecolon_end == len(hextets):
+ hextets += [""]
+ hextets[best_doublecolon_start:best_doublecolon_end] = [""]
+ # For zeros at the beginning of the address.
+ if best_doublecolon_start == 0:
+ hextets = [""] + hextets
+
+ return hextets
+
+ @classmethod
+ def _string_from_ip_int(cls, ip_int=None):
+ """Turns a 128-bit integer into hexadecimal notation.
+
+ Args:
+ ip_int: An integer, the IP address.
+
+ Returns:
+ A string, the hexadecimal representation of the address.
+
+ Raises:
+ ValueError: The address is bigger than 128 bits of all ones.
+
+ """
+ if ip_int is None:
+ ip_int = int(cls._ip)
+
+ if ip_int > cls._ALL_ONES:
+ raise ValueError("IPv6 address is too large")
+
+ hex_str = "%032x" % ip_int
+ hextets = ["%x" % int(hex_str[x : x + 4], 16) for x in range(0, 32, 4)]
+
+ hextets = cls._compress_hextets(hextets)
+ return ":".join(hextets)
+
+ def _explode_shorthand_ip_string(self):
+ """Expand a shortened IPv6 address.
+
+ Args:
+ ip_str: A string, the IPv6 address.
+
+ Returns:
+ A string, the expanded IPv6 address.
+
+ """
+ if isinstance(self, IPv6Network):
+ ip_str = _compat_str(self.network_address)
+ elif isinstance(self, IPv6Interface):
+ ip_str = _compat_str(self.ip)
+ else:
+ ip_str = _compat_str(self)
+
+ ip_int = self._ip_int_from_string(ip_str)
+ hex_str = "%032x" % ip_int
+ parts = [hex_str[x : x + 4] for x in range(0, 32, 4)]
+ if isinstance(self, (_BaseNetwork, IPv6Interface)):
+ return "%s/%d" % (":".join(parts), self._prefixlen)
+ return ":".join(parts)
+
+ def _reverse_pointer(self):
+ """Return the reverse DNS pointer name for the IPv6 address.
+
+ This implements the method described in RFC3596 2.5.
+
+ """
+ reverse_chars = self.exploded[::-1].replace(":", "")
+ return ".".join(reverse_chars) + ".ip6.arpa"
+
+ @property
+ def max_prefixlen(self):
+ return self._max_prefixlen
+
+ @property
+ def version(self):
+ return self._version
+
+
+class IPv6Address(_BaseV6, _BaseAddress):
+
+ """Represent and manipulate single IPv6 Addresses."""
+
+ __slots__ = ("_ip", "__weakref__")
+
+ def __init__(self, address):
+ """Instantiate a new IPv6 address object.
+
+ Args:
+ address: A string or integer representing the IP
+
+ Additionally, an integer can be passed, so
+ IPv6Address('2001:db8::') ==
+ IPv6Address(42540766411282592856903984951653826560)
+ or, more generally
+ IPv6Address(int(IPv6Address('2001:db8::'))) ==
+ IPv6Address('2001:db8::')
+
+ Raises:
+ AddressValueError: If address isn't a valid IPv6 address.
+
+ """
+ # Efficient constructor from integer.
+ if isinstance(address, _compat_int_types):
+ self._check_int_address(address)
+ self._ip = address
+ return
+
+ # Constructing from a packed address
+ if isinstance(address, bytes):
+ self._check_packed_address(address, 16)
+ bvs = _compat_bytes_to_byte_vals(address)
+ self._ip = _compat_int_from_byte_vals(bvs, "big")
+ return
+
+ # Assume input argument to be string or any object representation
+ # which converts into a formatted IP string.
+ addr_str = _compat_str(address)
+ if "/" in addr_str:
+ raise AddressValueError("Unexpected '/' in %r" % address)
+ self._ip = self._ip_int_from_string(addr_str)
+
+ @property
+ def packed(self):
+ """The binary representation of this address."""
+ return v6_int_to_packed(self._ip)
+
+ @property
+ def is_multicast(self):
+ """Test if the address is reserved for multicast use.
+
+ Returns:
+ A boolean, True if the address is a multicast address.
+ See RFC 2373 2.7 for details.
+
+ """
+ return self in self._constants._multicast_network
+
+ @property
+ def is_reserved(self):
+ """Test if the address is otherwise IETF reserved.
+
+ Returns:
+ A boolean, True if the address is within one of the
+ reserved IPv6 Network ranges.
+
+ """
+ return any(self in x for x in self._constants._reserved_networks)
+
+ @property
+ def is_link_local(self):
+ """Test if the address is reserved for link-local.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 4291.
+
+ """
+ return self in self._constants._linklocal_network
+
+ @property
+ def is_site_local(self):
+ """Test if the address is reserved for site-local.
+
+ Note that the site-local address space has been deprecated by RFC 3879.
+ Use is_private to test if this address is in the space of unique local
+ addresses as defined by RFC 4193.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 3513 2.5.6.
+
+ """
+ return self in self._constants._sitelocal_network
+
+ @property
+ def is_private(self):
+ """Test if this address is allocated for private networks.
+
+ Returns:
+ A boolean, True if the address is reserved per
+ iana-ipv6-special-registry.
+
+ """
+ return any(self in net for net in self._constants._private_networks)
+
+ @property
+ def is_global(self):
+ """Test if this address is allocated for public networks.
+
+ Returns:
+ A boolean, true if the address is not reserved per
+ iana-ipv6-special-registry.
+
+ """
+ return not self.is_private
+
+ @property
+ def is_unspecified(self):
+ """Test if the address is unspecified.
+
+ Returns:
+ A boolean, True if this is the unspecified address as defined in
+ RFC 2373 2.5.2.
+
+ """
+ return self._ip == 0
+
+ @property
+ def is_loopback(self):
+ """Test if the address is a loopback address.
+
+ Returns:
+ A boolean, True if the address is a loopback address as defined in
+ RFC 2373 2.5.3.
+
+ """
+ return self._ip == 1
+
+ @property
+ def ipv4_mapped(self):
+ """Return the IPv4 mapped address.
+
+ Returns:
+ If the IPv6 address is a v4 mapped address, return the
+ IPv4 mapped address. Return None otherwise.
+
+ """
+ if (self._ip >> 32) != 0xFFFF:
+ return None
+ return IPv4Address(self._ip & 0xFFFFFFFF)
+
+ @property
+ def teredo(self):
+ """Tuple of embedded teredo IPs.
+
+ Returns:
+ Tuple of the (server, client) IPs or None if the address
+ doesn't appear to be a teredo address (doesn't start with
+ 2001::/32)
+
+ """
+ if (self._ip >> 96) != 0x20010000:
+ return None
+ return (
+ IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
+ IPv4Address(~self._ip & 0xFFFFFFFF),
+ )
+
+ @property
+ def sixtofour(self):
+ """Return the IPv4 6to4 embedded address.
+
+ Returns:
+ The IPv4 6to4-embedded address if present or None if the
+ address doesn't appear to contain a 6to4 embedded address.
+
+ """
+ if (self._ip >> 112) != 0x2002:
+ return None
+ return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
+
+
+class IPv6Interface(IPv6Address):
+ def __init__(self, address):
+ if isinstance(address, (bytes, _compat_int_types)):
+ IPv6Address.__init__(self, address)
+ self.network = IPv6Network(self._ip)
+ self._prefixlen = self._max_prefixlen
+ return
+ if isinstance(address, tuple):
+ IPv6Address.__init__(self, address[0])
+ if len(address) > 1:
+ self._prefixlen = int(address[1])
+ else:
+ self._prefixlen = self._max_prefixlen
+ self.network = IPv6Network(address, strict=False)
+ self.netmask = self.network.netmask
+ self.hostmask = self.network.hostmask
+ return
+
+ addr = _split_optional_netmask(address)
+ IPv6Address.__init__(self, addr[0])
+ self.network = IPv6Network(address, strict=False)
+ self.netmask = self.network.netmask
+ self._prefixlen = self.network._prefixlen
+ self.hostmask = self.network.hostmask
+
+ def __str__(self):
+ return "%s/%d" % (
+ self._string_from_ip_int(self._ip),
+ self.network.prefixlen,
+ )
+
+ def __eq__(self, other):
+ address_equal = IPv6Address.__eq__(self, other)
+ if not address_equal or address_equal is NotImplemented:
+ return address_equal
+ try:
+ return self.network == other.network
+ except AttributeError:
+ # An interface with an associated network is NOT the
+ # same as an unassociated address. That's why the hash
+ # takes the extra info into account.
+ return False
+
+ def __lt__(self, other):
+ address_less = IPv6Address.__lt__(self, other)
+ if address_less is NotImplemented:
+ return NotImplemented
+ try:
+ return (
+ self.network < other.network
+ or self.network == other.network
+ and address_less
+ )
+ except AttributeError:
+ # We *do* allow addresses and interfaces to be sorted. The
+ # unassociated address is considered less than all interfaces.
+ return False
+
+ def __hash__(self):
+ return self._ip ^ self._prefixlen ^ int(self.network.network_address)
+
+ __reduce__ = _IPAddressBase.__reduce__
+
+ @property
+ def ip(self):
+ return IPv6Address(self._ip)
+
+ @property
+ def with_prefixlen(self):
+ return "%s/%s" % (self._string_from_ip_int(self._ip), self._prefixlen)
+
+ @property
+ def with_netmask(self):
+ return "%s/%s" % (self._string_from_ip_int(self._ip), self.netmask)
+
+ @property
+ def with_hostmask(self):
+ return "%s/%s" % (self._string_from_ip_int(self._ip), self.hostmask)
+
+ @property
+ def is_unspecified(self):
+ return self._ip == 0 and self.network.is_unspecified
+
+ @property
+ def is_loopback(self):
+ return self._ip == 1 and self.network.is_loopback
+
+
+class IPv6Network(_BaseV6, _BaseNetwork):
+
+ """This class represents and manipulates 128-bit IPv6 networks.
+
+ Attributes: [examples for IPv6('2001:db8::1000/124')]
+ .network_address: IPv6Address('2001:db8::1000')
+ .hostmask: IPv6Address('::f')
+ .broadcast_address: IPv6Address('2001:db8::100f')
+ .netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0')
+ .prefixlen: 124
+
+ """
+
+ # Class to use when creating address objects
+ _address_class = IPv6Address
+
+ def __init__(self, address, strict=True):
+ """Instantiate a new IPv6 Network object.
+
+ Args:
+ address: A string or integer representing the IPv6 network or the
+ IP and prefix/netmask.
+ '2001:db8::/128'
+ '2001:db8:0000:0000:0000:0000:0000:0000/128'
+ '2001:db8::'
+ are all functionally the same in IPv6. That is to say,
+ failing to provide a subnetmask will create an object with
+ a mask of /128.
+
+ Additionally, an integer can be passed, so
+ IPv6Network('2001:db8::') ==
+ IPv6Network(42540766411282592856903984951653826560)
+ or, more generally
+ IPv6Network(int(IPv6Network('2001:db8::'))) ==
+ IPv6Network('2001:db8::')
+
+ strict: A boolean. If true, ensure that we have been passed
+ A true network address, eg, 2001:db8::1000/124 and not an
+ IP address on a network, eg, 2001:db8::1/124.
+
+ Raises:
+ AddressValueError: If address isn't a valid IPv6 address.
+ NetmaskValueError: If the netmask isn't valid for
+ an IPv6 address.
+ ValueError: If strict was True and a network address was not
+ supplied.
+
+ """
+ _BaseNetwork.__init__(self, address)
+
+ # Efficient constructor from integer or packed address
+ if isinstance(address, (bytes, _compat_int_types)):
+ self.network_address = IPv6Address(address)
+ self.netmask, self._prefixlen = self._make_netmask(
+ self._max_prefixlen
+ )
+ return
+
+ if isinstance(address, tuple):
+ if len(address) > 1:
+ arg = address[1]
+ else:
+ arg = self._max_prefixlen
+ self.netmask, self._prefixlen = self._make_netmask(arg)
+ self.network_address = IPv6Address(address[0])
+ packed = int(self.network_address)
+ if packed & int(self.netmask) != packed:
+ if strict:
+ raise ValueError("%s has host bits set" % self)
+ else:
+ self.network_address = IPv6Address(
+ packed & int(self.netmask)
+ )
+ return
+
+ # Assume input argument to be string or any object representation
+ # which converts into a formatted IP prefix string.
+ addr = _split_optional_netmask(address)
+
+ self.network_address = IPv6Address(self._ip_int_from_string(addr[0]))
+
+ if len(addr) == 2:
+ arg = addr[1]
+ else:
+ arg = self._max_prefixlen
+ self.netmask, self._prefixlen = self._make_netmask(arg)
+
+ if strict:
+ if (
+ IPv6Address(int(self.network_address) & int(self.netmask))
+ != self.network_address
+ ):
+ raise ValueError("%s has host bits set" % self)
+ self.network_address = IPv6Address(
+ int(self.network_address) & int(self.netmask)
+ )
+
+ if self._prefixlen == (self._max_prefixlen - 1):
+ self.hosts = self.__iter__
+
+ def hosts(self):
+ """Generate Iterator over usable hosts in a network.
+
+ This is like __iter__ except it doesn't return the
+ Subnet-Router anycast address.
+
+ """
+ network = int(self.network_address)
+ broadcast = int(self.broadcast_address)
+ for x in _compat_range(network + 1, broadcast + 1):
+ yield self._address_class(x)
+
+ @property
+ def is_site_local(self):
+ """Test if the address is reserved for site-local.
+
+ Note that the site-local address space has been deprecated by RFC 3879.
+ Use is_private to test if this address is in the space of unique local
+ addresses as defined by RFC 4193.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 3513 2.5.6.
+
+ """
+ return (
+ self.network_address.is_site_local
+ and self.broadcast_address.is_site_local
+ )
+
+
+class _IPv6Constants(object):
+
+ _linklocal_network = IPv6Network("fe80::/10")
+
+ _multicast_network = IPv6Network("ff00::/8")
+
+ _private_networks = [
+ IPv6Network("::1/128"),
+ IPv6Network("::/128"),
+ IPv6Network("::ffff:0:0/96"),
+ IPv6Network("100::/64"),
+ IPv6Network("2001::/23"),
+ IPv6Network("2001:2::/48"),
+ IPv6Network("2001:db8::/32"),
+ IPv6Network("2001:10::/28"),
+ IPv6Network("fc00::/7"),
+ IPv6Network("fe80::/10"),
+ ]
+
+ _reserved_networks = [
+ IPv6Network("::/8"),
+ IPv6Network("100::/8"),
+ IPv6Network("200::/7"),
+ IPv6Network("400::/6"),
+ IPv6Network("800::/5"),
+ IPv6Network("1000::/4"),
+ IPv6Network("4000::/3"),
+ IPv6Network("6000::/3"),
+ IPv6Network("8000::/3"),
+ IPv6Network("A000::/3"),
+ IPv6Network("C000::/3"),
+ IPv6Network("E000::/4"),
+ IPv6Network("F000::/5"),
+ IPv6Network("F800::/6"),
+ IPv6Network("FE00::/9"),
+ ]
+
+ _sitelocal_network = IPv6Network("fec0::/10")
+
+
+IPv6Address._constants = _IPv6Constants
diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/cfg/base.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/cfg/base.py
new file mode 100644
index 00000000..68608d1b
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/cfg/base.py
@@ -0,0 +1,27 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The base class for all resource modules
+"""
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network import (
+ get_resource_connection,
+)
+
+
+class ConfigBase(object):
+ """ The base class for all resource modules
+ """
+
+ ACTION_STATES = ["merged", "replaced", "overridden", "deleted"]
+
+ def __init__(self, module):
+ self._module = module
+ self.state = module.params["state"]
+ self._connection = None
+
+ if self.state not in ["rendered", "parsed"]:
+ self._connection = get_resource_connection(module)
diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/config.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/config.py
new file mode 100644
index 00000000..bc458eb5
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/config.py
@@ -0,0 +1,473 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# (c) 2016 Red Hat Inc.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+import re
+import hashlib
+
+from ansible.module_utils.six.moves import zip
+from ansible.module_utils._text import to_bytes, to_native
+
+DEFAULT_COMMENT_TOKENS = ["#", "!", "/*", "*/", "echo"]
+
+DEFAULT_IGNORE_LINES_RE = set(
+ [
+ re.compile(r"Using \d+ out of \d+ bytes"),
+ re.compile(r"Building configuration"),
+ re.compile(r"Current configuration : \d+ bytes"),
+ ]
+)
+
+
+try:
+ Pattern = re._pattern_type
+except AttributeError:
+ Pattern = re.Pattern
+
+
+class ConfigLine(object):
+ def __init__(self, raw):
+ self.text = str(raw).strip()
+ self.raw = raw
+ self._children = list()
+ self._parents = list()
+
+ def __str__(self):
+ return self.raw
+
+ def __eq__(self, other):
+ return self.line == other.line
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __getitem__(self, key):
+ for item in self._children:
+ if item.text == key:
+ return item
+ raise KeyError(key)
+
+ @property
+ def line(self):
+ line = self.parents
+ line.append(self.text)
+ return " ".join(line)
+
+ @property
+ def children(self):
+ return _obj_to_text(self._children)
+
+ @property
+ def child_objs(self):
+ return self._children
+
+ @property
+ def parents(self):
+ return _obj_to_text(self._parents)
+
+ @property
+ def path(self):
+ config = _obj_to_raw(self._parents)
+ config.append(self.raw)
+ return "\n".join(config)
+
+ @property
+ def has_children(self):
+ return len(self._children) > 0
+
+ @property
+ def has_parents(self):
+ return len(self._parents) > 0
+
+ def add_child(self, obj):
+ if not isinstance(obj, ConfigLine):
+ raise AssertionError("child must be of type `ConfigLine`")
+ self._children.append(obj)
+
+
+def ignore_line(text, tokens=None):
+ for item in tokens or DEFAULT_COMMENT_TOKENS:
+ if text.startswith(item):
+ return True
+ for regex in DEFAULT_IGNORE_LINES_RE:
+ if regex.match(text):
+ return True
+
+
+def _obj_to_text(x):
+ return [o.text for o in x]
+
+
+def _obj_to_raw(x):
+ return [o.raw for o in x]
+
+
+def _obj_to_block(objects, visited=None):
+ items = list()
+ for o in objects:
+ if o not in items:
+ items.append(o)
+ for child in o._children:
+ if child not in items:
+ items.append(child)
+ return _obj_to_raw(items)
+
+
+def dumps(objects, output="block", comments=False):
+ if output == "block":
+ items = _obj_to_block(objects)
+ elif output == "commands":
+ items = _obj_to_text(objects)
+ elif output == "raw":
+ items = _obj_to_raw(objects)
+ else:
+ raise TypeError("unknown value supplied for keyword output")
+
+ if output == "block":
+ if comments:
+ for index, item in enumerate(items):
+ nextitem = index + 1
+ if (
+ nextitem < len(items)
+ and not item.startswith(" ")
+ and items[nextitem].startswith(" ")
+ ):
+ item = "!\n%s" % item
+ items[index] = item
+ items.append("!")
+ items.append("end")
+
+ return "\n".join(items)
+
+
+class NetworkConfig(object):
+ def __init__(self, indent=1, contents=None, ignore_lines=None):
+ self._indent = indent
+ self._items = list()
+ self._config_text = None
+
+ if ignore_lines:
+ for item in ignore_lines:
+ if not isinstance(item, Pattern):
+ item = re.compile(item)
+ DEFAULT_IGNORE_LINES_RE.add(item)
+
+ if contents:
+ self.load(contents)
+
+ @property
+ def items(self):
+ return self._items
+
+ @property
+ def config_text(self):
+ return self._config_text
+
+ @property
+ def sha1(self):
+ sha1 = hashlib.sha1()
+ sha1.update(to_bytes(str(self), errors="surrogate_or_strict"))
+ return sha1.digest()
+
+ def __getitem__(self, key):
+ for line in self:
+ if line.text == key:
+ return line
+ raise KeyError(key)
+
+ def __iter__(self):
+ return iter(self._items)
+
+ def __str__(self):
+ return "\n".join([c.raw for c in self.items])
+
+ def __len__(self):
+ return len(self._items)
+
+ def load(self, s):
+ self._config_text = s
+ self._items = self.parse(s)
+
+ def loadfp(self, fp):
+ with open(fp) as f:
+ return self.load(f.read())
+
+ def parse(self, lines, comment_tokens=None):
+ toplevel = re.compile(r"\S")
+ childline = re.compile(r"^\s*(.+)$")
+ entry_reg = re.compile(r"([{};])")
+
+ ancestors = list()
+ config = list()
+
+ indents = [0]
+
+ for linenum, line in enumerate(
+ to_native(lines, errors="surrogate_or_strict").split("\n")
+ ):
+ text = entry_reg.sub("", line).strip()
+
+ cfg = ConfigLine(line)
+
+ if not text or ignore_line(text, comment_tokens):
+ continue
+
+ # handle top level commands
+ if toplevel.match(line):
+ ancestors = [cfg]
+ indents = [0]
+
+ # handle sub level commands
+ else:
+ match = childline.match(line)
+ line_indent = match.start(1)
+
+ if line_indent < indents[-1]:
+ while indents[-1] > line_indent:
+ indents.pop()
+
+ if line_indent > indents[-1]:
+ indents.append(line_indent)
+
+ curlevel = len(indents) - 1
+ parent_level = curlevel - 1
+
+ cfg._parents = ancestors[:curlevel]
+
+ if curlevel > len(ancestors):
+ config.append(cfg)
+ continue
+
+ for i in range(curlevel, len(ancestors)):
+ ancestors.pop()
+
+ ancestors.append(cfg)
+ ancestors[parent_level].add_child(cfg)
+
+ config.append(cfg)
+
+ return config
+
+ def get_object(self, path):
+ for item in self.items:
+ if item.text == path[-1]:
+ if item.parents == path[:-1]:
+ return item
+
+ def get_block(self, path):
+ if not isinstance(path, list):
+ raise AssertionError("path argument must be a list object")
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError("path does not exist in config")
+ return self._expand_block(obj)
+
+ def get_block_config(self, path):
+ block = self.get_block(path)
+ return dumps(block, "block")
+
+ def _expand_block(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj._children:
+ if child in S:
+ continue
+ self._expand_block(child, S)
+ return S
+
+ def _diff_line(self, other):
+ updates = list()
+ for item in self.items:
+ if item not in other:
+ updates.append(item)
+ return updates
+
+ def _diff_strict(self, other):
+ updates = list()
+ # block extracted from other does not have all parents
+ # but the last one. In case of multiple parents we need
+ # to add additional parents.
+ if other and isinstance(other, list) and len(other) > 0:
+ start_other = other[0]
+ if start_other.parents:
+ for parent in start_other.parents:
+ other.insert(0, ConfigLine(parent))
+ for index, line in enumerate(self.items):
+ try:
+ if str(line).strip() != str(other[index]).strip():
+ updates.append(line)
+ except (AttributeError, IndexError):
+ updates.append(line)
+ return updates
+
+ def _diff_exact(self, other):
+ updates = list()
+ if len(other) != len(self.items):
+ updates.extend(self.items)
+ else:
+ for ours, theirs in zip(self.items, other):
+ if ours != theirs:
+ updates.extend(self.items)
+ break
+ return updates
+
+ def difference(self, other, match="line", path=None, replace=None):
+ """Perform a config diff against the another network config
+
+ :param other: instance of NetworkConfig to diff against
+ :param match: type of diff to perform. valid values are 'line',
+ 'strict', 'exact'
+ :param path: context in the network config to filter the diff
+ :param replace: the method used to generate the replacement lines.
+ valid values are 'block', 'line'
+
+ :returns: a string of lines that are different
+ """
+ if path and match != "line":
+ try:
+ other = other.get_block(path)
+ except ValueError:
+ other = list()
+ else:
+ other = other.items
+
+ # generate a list of ConfigLines that aren't in other
+ meth = getattr(self, "_diff_%s" % match)
+ updates = meth(other)
+
+ if replace == "block":
+ parents = list()
+ for item in updates:
+ if not item.has_parents:
+ parents.append(item)
+ else:
+ for p in item._parents:
+ if p not in parents:
+ parents.append(p)
+
+ updates = list()
+ for item in parents:
+ updates.extend(self._expand_block(item))
+
+ visited = set()
+ expanded = list()
+
+ for item in updates:
+ for p in item._parents:
+ if p.line not in visited:
+ visited.add(p.line)
+ expanded.append(p)
+ expanded.append(item)
+ visited.add(item.line)
+
+ return expanded
+
+ def add(self, lines, parents=None):
+ ancestors = list()
+ offset = 0
+ obj = None
+
+ # global config command
+ if not parents:
+ for line in lines:
+ # handle ignore lines
+ if ignore_line(line):
+ continue
+
+ item = ConfigLine(line)
+ item.raw = line
+ if item not in self.items:
+ self.items.append(item)
+
+ else:
+ for index, p in enumerate(parents):
+ try:
+ i = index + 1
+ obj = self.get_block(parents[:i])[0]
+ ancestors.append(obj)
+
+ except ValueError:
+ # add parent to config
+ offset = index * self._indent
+ obj = ConfigLine(p)
+ obj.raw = p.rjust(len(p) + offset)
+ if ancestors:
+ obj._parents = list(ancestors)
+ ancestors[-1]._children.append(obj)
+ self.items.append(obj)
+ ancestors.append(obj)
+
+ # add child objects
+ for line in lines:
+ # handle ignore lines
+ if ignore_line(line):
+ continue
+
+ # check if child already exists
+ for child in ancestors[-1]._children:
+ if child.text == line:
+ break
+ else:
+ offset = len(parents) * self._indent
+ item = ConfigLine(line)
+ item.raw = line.rjust(len(line) + offset)
+ item._parents = ancestors
+ ancestors[-1]._children.append(item)
+ self.items.append(item)
+
+
+class CustomNetworkConfig(NetworkConfig):
+ def items_text(self):
+ return [item.text for item in self.items]
+
+ def expand_section(self, configobj, S=None):
+ if S is None:
+ S = list()
+ S.append(configobj)
+ for child in configobj.child_objs:
+ if child in S:
+ continue
+ self.expand_section(child, S)
+ return S
+
+ def to_block(self, section):
+ return "\n".join([item.raw for item in section])
+
+ def get_section(self, path):
+ try:
+ section = self.get_section_objects(path)
+ return self.to_block(section)
+ except ValueError:
+ return list()
+
+ def get_section_objects(self, path):
+ if not isinstance(path, list):
+ path = [path]
+ obj = self.get_object(path)
+ if not obj:
+ raise ValueError("path does not exist in config")
+ return self.expand_section(obj)
diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/facts/facts.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/facts/facts.py
new file mode 100644
index 00000000..477d3184
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/facts/facts.py
@@ -0,0 +1,162 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The facts base class
+this contains methods common to all facts subsets
+"""
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network import (
+ get_resource_connection,
+)
+from ansible.module_utils.six import iteritems
+
+
+class FactsBase(object):
+ """
+ The facts base class
+ """
+
+ def __init__(self, module):
+ self._module = module
+ self._warnings = []
+ self._gather_subset = module.params.get("gather_subset")
+ self._gather_network_resources = module.params.get(
+ "gather_network_resources"
+ )
+ self._connection = None
+ if module.params.get("state") not in ["rendered", "parsed"]:
+ self._connection = get_resource_connection(module)
+
+ self.ansible_facts = {"ansible_network_resources": {}}
+ self.ansible_facts["ansible_net_gather_network_resources"] = list()
+ self.ansible_facts["ansible_net_gather_subset"] = list()
+
+ if not self._gather_subset:
+ self._gather_subset = ["!config"]
+ if not self._gather_network_resources:
+ self._gather_network_resources = ["!all"]
+
+ def gen_runable(self, subsets, valid_subsets, resource_facts=False):
+ """ Generate the runable subset
+
+ :param module: The module instance
+ :param subsets: The provided subsets
+ :param valid_subsets: The valid subsets
+ :param resource_facts: A boolean flag
+ :rtype: list
+ :returns: The runable subsets
+ """
+ runable_subsets = set()
+ exclude_subsets = set()
+ minimal_gather_subset = set()
+ if not resource_facts:
+ minimal_gather_subset = frozenset(["default"])
+
+ for subset in subsets:
+ if subset == "all":
+ runable_subsets.update(valid_subsets)
+ continue
+ if subset == "min" and minimal_gather_subset:
+ runable_subsets.update(minimal_gather_subset)
+ continue
+ if subset.startswith("!"):
+ subset = subset[1:]
+ if subset == "min":
+ exclude_subsets.update(minimal_gather_subset)
+ continue
+ if subset == "all":
+ exclude_subsets.update(
+ valid_subsets - minimal_gather_subset
+ )
+ continue
+ exclude = True
+ else:
+ exclude = False
+
+ if subset not in valid_subsets:
+ self._module.fail_json(
+ msg="Subset must be one of [%s], got %s"
+ % (
+ ", ".join(sorted([item for item in valid_subsets])),
+ subset,
+ )
+ )
+
+ if exclude:
+ exclude_subsets.add(subset)
+ else:
+ runable_subsets.add(subset)
+
+ if not runable_subsets:
+ runable_subsets.update(valid_subsets)
+ runable_subsets.difference_update(exclude_subsets)
+ return runable_subsets
+
+ def get_network_resources_facts(
+ self, facts_resource_obj_map, resource_facts_type=None, data=None
+ ):
+ """
+ :param fact_resource_subsets:
+ :param data: previously collected configuration
+ :return:
+ """
+ if not resource_facts_type:
+ resource_facts_type = self._gather_network_resources
+
+ restorun_subsets = self.gen_runable(
+ resource_facts_type,
+ frozenset(facts_resource_obj_map.keys()),
+ resource_facts=True,
+ )
+ if restorun_subsets:
+ self.ansible_facts["ansible_net_gather_network_resources"] = list(
+ restorun_subsets
+ )
+ instances = list()
+ for key in restorun_subsets:
+ fact_cls_obj = facts_resource_obj_map.get(key)
+ if fact_cls_obj:
+ instances.append(fact_cls_obj(self._module))
+ else:
+ self._warnings.extend(
+ [
+ "network resource fact gathering for '%s' is not supported"
+ % key
+ ]
+ )
+
+ for inst in instances:
+ inst.populate_facts(self._connection, self.ansible_facts, data)
+
+ def get_network_legacy_facts(
+ self, fact_legacy_obj_map, legacy_facts_type=None
+ ):
+ if not legacy_facts_type:
+ legacy_facts_type = self._gather_subset
+
+ runable_subsets = self.gen_runable(
+ legacy_facts_type, frozenset(fact_legacy_obj_map.keys())
+ )
+ if runable_subsets:
+ facts = dict()
+ # default subset should always returned be with legacy facts subsets
+ if "default" not in runable_subsets:
+ runable_subsets.add("default")
+ self.ansible_facts["ansible_net_gather_subset"] = list(
+ runable_subsets
+ )
+
+ instances = list()
+ for key in runable_subsets:
+ instances.append(fact_legacy_obj_map[key](self._module))
+
+ for inst in instances:
+ inst.populate()
+ facts.update(inst.facts)
+ self._warnings.extend(inst.warnings)
+
+ for key, value in iteritems(facts):
+ key = "ansible_net_%s" % key
+ self.ansible_facts[key] = value
diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/netconf.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/netconf.py
new file mode 100644
index 00000000..53a91e8c
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/netconf.py
@@ -0,0 +1,179 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# (c) 2017 Red Hat Inc.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+import sys
+
+from ansible.module_utils._text import to_text, to_bytes
+from ansible.module_utils.connection import Connection, ConnectionError
+
+try:
+ from ncclient.xml_ import NCElement, new_ele, sub_ele
+
+ HAS_NCCLIENT = True
+except (ImportError, AttributeError):
+ HAS_NCCLIENT = False
+
+try:
+ from lxml.etree import Element, fromstring, XMLSyntaxError
+except ImportError:
+ from xml.etree.ElementTree import Element, fromstring
+
+ if sys.version_info < (2, 7):
+ from xml.parsers.expat import ExpatError as XMLSyntaxError
+ else:
+ from xml.etree.ElementTree import ParseError as XMLSyntaxError
+
+NS_MAP = {"nc": "urn:ietf:params:xml:ns:netconf:base:1.0"}
+
+
+def exec_rpc(module, *args, **kwargs):
+ connection = NetconfConnection(module._socket_path)
+ return connection.execute_rpc(*args, **kwargs)
+
+
+class NetconfConnection(Connection):
+ def __init__(self, socket_path):
+ super(NetconfConnection, self).__init__(socket_path)
+
+ def __rpc__(self, name, *args, **kwargs):
+ """Executes the json-rpc and returns the output received
+ from remote device.
+ :name: rpc method to be executed over connection plugin that implements jsonrpc 2.0
+ :args: Ordered list of params passed as arguments to rpc method
+ :kwargs: Dict of valid key, value pairs passed as arguments to rpc method
+
+ For usage refer the respective connection plugin docs.
+ """
+ self.check_rc = kwargs.pop("check_rc", True)
+ self.ignore_warning = kwargs.pop("ignore_warning", True)
+
+ response = self._exec_jsonrpc(name, *args, **kwargs)
+ if "error" in response:
+ rpc_error = response["error"].get("data")
+ return self.parse_rpc_error(
+ to_bytes(rpc_error, errors="surrogate_then_replace")
+ )
+
+ return fromstring(
+ to_bytes(response["result"], errors="surrogate_then_replace")
+ )
+
+ def parse_rpc_error(self, rpc_error):
+ if self.check_rc:
+ try:
+ error_root = fromstring(rpc_error)
+ root = Element("root")
+ root.append(error_root)
+
+ error_list = root.findall(".//nc:rpc-error", NS_MAP)
+ if not error_list:
+ raise ConnectionError(
+ to_text(rpc_error, errors="surrogate_then_replace")
+ )
+
+ warnings = []
+ for error in error_list:
+ message_ele = error.find("./nc:error-message", NS_MAP)
+
+ if message_ele is None:
+ message_ele = error.find("./nc:error-info", NS_MAP)
+
+ message = (
+ message_ele.text if message_ele is not None else None
+ )
+
+ severity = error.find("./nc:error-severity", NS_MAP).text
+
+ if (
+ severity == "warning"
+ and self.ignore_warning
+ and message is not None
+ ):
+ warnings.append(message)
+ else:
+ raise ConnectionError(
+ to_text(rpc_error, errors="surrogate_then_replace")
+ )
+ return warnings
+ except XMLSyntaxError:
+ raise ConnectionError(rpc_error)
+
+
+def transform_reply():
+ return b"""<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
+ <xsl:output method="xml" indent="no"/>
+
+ <xsl:template match="/|comment()|processing-instruction()">
+ <xsl:copy>
+ <xsl:apply-templates/>
+ </xsl:copy>
+ </xsl:template>
+
+ <xsl:template match="*">
+ <xsl:element name="{local-name()}">
+ <xsl:apply-templates select="@*|node()"/>
+ </xsl:element>
+ </xsl:template>
+
+ <xsl:template match="@*">
+ <xsl:attribute name="{local-name()}">
+ <xsl:value-of select="."/>
+ </xsl:attribute>
+ </xsl:template>
+ </xsl:stylesheet>
+ """
+
+
+# Note: Workaround for ncclient 0.5.3
+def remove_namespaces(data):
+ if not HAS_NCCLIENT:
+ raise ImportError(
+ "ncclient is required but does not appear to be installed. "
+ "It can be installed using `pip install ncclient`"
+ )
+ return NCElement(data, transform_reply()).data_xml
+
+
+def build_root_xml_node(tag):
+ return new_ele(tag)
+
+
+def build_child_xml_node(parent, tag, text=None, attrib=None):
+ element = sub_ele(parent, tag)
+ if text:
+ element.text = to_text(text)
+ if attrib:
+ element.attrib.update(attrib)
+ return element
+
+
+def build_subtree(parent, path):
+ element = parent
+ for field in path.split("/"):
+ sub_element = build_child_xml_node(element, field)
+ element = sub_element
+ return element
diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/network.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/network.py
new file mode 100644
index 00000000..555fc713
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/network.py
@@ -0,0 +1,275 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2015 Peter Sprygada, <psprygada@ansible.com>
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import traceback
+import json
+
+from ansible.module_utils._text import to_text, to_native
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.basic import env_fallback
+from ansible.module_utils.connection import Connection, ConnectionError
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.netconf import (
+ NetconfConnection,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import (
+ Cli,
+)
+from ansible.module_utils.six import iteritems
+
+
+NET_TRANSPORT_ARGS = dict(
+ host=dict(required=True),
+ port=dict(type="int"),
+ username=dict(fallback=(env_fallback, ["ANSIBLE_NET_USERNAME"])),
+ password=dict(
+ no_log=True, fallback=(env_fallback, ["ANSIBLE_NET_PASSWORD"])
+ ),
+ ssh_keyfile=dict(
+ fallback=(env_fallback, ["ANSIBLE_NET_SSH_KEYFILE"]), type="path"
+ ),
+ authorize=dict(
+ default=False,
+ fallback=(env_fallback, ["ANSIBLE_NET_AUTHORIZE"]),
+ type="bool",
+ ),
+ auth_pass=dict(
+ no_log=True, fallback=(env_fallback, ["ANSIBLE_NET_AUTH_PASS"])
+ ),
+ provider=dict(type="dict", no_log=True),
+ transport=dict(choices=list()),
+ timeout=dict(default=10, type="int"),
+)
+
+NET_CONNECTION_ARGS = dict()
+
+NET_CONNECTIONS = dict()
+
+
+def _transitional_argument_spec():
+ argument_spec = {}
+ for key, value in iteritems(NET_TRANSPORT_ARGS):
+ value["required"] = False
+ argument_spec[key] = value
+ return argument_spec
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class ModuleStub(object):
+ def __init__(self, argument_spec, fail_json):
+ self.params = dict()
+ for key, value in argument_spec.items():
+ self.params[key] = value.get("default")
+ self.fail_json = fail_json
+
+
+class NetworkError(Exception):
+ def __init__(self, msg, **kwargs):
+ super(NetworkError, self).__init__(msg)
+ self.kwargs = kwargs
+
+
+class Config(object):
+ def __init__(self, connection):
+ self.connection = connection
+
+ def __call__(self, commands, **kwargs):
+ lines = to_list(commands)
+ return self.connection.configure(lines, **kwargs)
+
+ def load_config(self, commands, **kwargs):
+ commands = to_list(commands)
+ return self.connection.load_config(commands, **kwargs)
+
+ def get_config(self, **kwargs):
+ return self.connection.get_config(**kwargs)
+
+ def save_config(self):
+ return self.connection.save_config()
+
+
+class NetworkModule(AnsibleModule):
+ def __init__(self, *args, **kwargs):
+ connect_on_load = kwargs.pop("connect_on_load", True)
+
+ argument_spec = NET_TRANSPORT_ARGS.copy()
+ argument_spec["transport"]["choices"] = NET_CONNECTIONS.keys()
+ argument_spec.update(NET_CONNECTION_ARGS.copy())
+
+ if kwargs.get("argument_spec"):
+ argument_spec.update(kwargs["argument_spec"])
+ kwargs["argument_spec"] = argument_spec
+
+ super(NetworkModule, self).__init__(*args, **kwargs)
+
+ self.connection = None
+ self._cli = None
+ self._config = None
+
+ try:
+ transport = self.params["transport"] or "__default__"
+ cls = NET_CONNECTIONS[transport]
+ self.connection = cls()
+ except KeyError:
+ self.fail_json(
+ msg="Unknown transport or no default transport specified"
+ )
+ except (TypeError, NetworkError) as exc:
+ self.fail_json(
+ msg=to_native(exc), exception=traceback.format_exc()
+ )
+
+ if connect_on_load:
+ self.connect()
+
+ @property
+ def cli(self):
+ if not self.connected:
+ self.connect()
+ if self._cli:
+ return self._cli
+ self._cli = Cli(self.connection)
+ return self._cli
+
+ @property
+ def config(self):
+ if not self.connected:
+ self.connect()
+ if self._config:
+ return self._config
+ self._config = Config(self.connection)
+ return self._config
+
+ @property
+ def connected(self):
+ return self.connection._connected
+
+ def _load_params(self):
+ super(NetworkModule, self)._load_params()
+ provider = self.params.get("provider") or dict()
+ for key, value in provider.items():
+ for args in [NET_TRANSPORT_ARGS, NET_CONNECTION_ARGS]:
+ if key in args:
+ if self.params.get(key) is None and value is not None:
+ self.params[key] = value
+
+ def connect(self):
+ try:
+ if not self.connected:
+ self.connection.connect(self.params)
+ if self.params["authorize"]:
+ self.connection.authorize(self.params)
+ self.log(
+ "connected to %s:%s using %s"
+ % (
+ self.params["host"],
+ self.params["port"],
+ self.params["transport"],
+ )
+ )
+ except NetworkError as exc:
+ self.fail_json(
+ msg=to_native(exc), exception=traceback.format_exc()
+ )
+
+ def disconnect(self):
+ try:
+ if self.connected:
+ self.connection.disconnect()
+ self.log("disconnected from %s" % self.params["host"])
+ except NetworkError as exc:
+ self.fail_json(
+ msg=to_native(exc), exception=traceback.format_exc()
+ )
+
+
+def register_transport(transport, default=False):
+ def register(cls):
+ NET_CONNECTIONS[transport] = cls
+ if default:
+ NET_CONNECTIONS["__default__"] = cls
+ return cls
+
+ return register
+
+
+def add_argument(key, value):
+ NET_CONNECTION_ARGS[key] = value
+
+
+def get_resource_connection(module):
+ if hasattr(module, "_connection"):
+ return module._connection
+
+ capabilities = get_capabilities(module)
+ network_api = capabilities.get("network_api")
+ if network_api in ("cliconf", "nxapi", "eapi", "exosapi"):
+ module._connection = Connection(module._socket_path)
+ elif network_api == "netconf":
+ module._connection = NetconfConnection(module._socket_path)
+ elif network_api == "local":
+ # This isn't supported, but we shouldn't fail here.
+ # Set the connection to a fake connection so it fails sensibly.
+ module._connection = LocalResourceConnection(module)
+ else:
+ module.fail_json(
+ msg="Invalid connection type {0!s}".format(network_api)
+ )
+
+ return module._connection
+
+
+def get_capabilities(module):
+ if hasattr(module, "capabilities"):
+ return module._capabilities
+ try:
+ capabilities = Connection(module._socket_path).get_capabilities()
+ except ConnectionError as exc:
+ module.fail_json(msg=to_text(exc, errors="surrogate_then_replace"))
+ except AssertionError:
+ # No socket_path, connection most likely local.
+ return dict(network_api="local")
+ module._capabilities = json.loads(capabilities)
+
+ return module._capabilities
+
+
+class LocalResourceConnection:
+ def __init__(self, module):
+ self.module = module
+
+ def get(self, *args, **kwargs):
+ self.module.fail_json(
+ msg="Network resource modules not supported over local connection."
+ )
diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/parsing.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/parsing.py
new file mode 100644
index 00000000..2dd1de9e
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/parsing.py
@@ -0,0 +1,316 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2015 Peter Sprygada, <psprygada@ansible.com>
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import re
+import shlex
+import time
+
+from ansible.module_utils.parsing.convert_bool import (
+ BOOLEANS_TRUE,
+ BOOLEANS_FALSE,
+)
+from ansible.module_utils.six import string_types, text_type
+from ansible.module_utils.six.moves import zip
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+class FailedConditionsError(Exception):
+ def __init__(self, msg, failed_conditions):
+ super(FailedConditionsError, self).__init__(msg)
+ self.failed_conditions = failed_conditions
+
+
+class FailedConditionalError(Exception):
+ def __init__(self, msg, failed_conditional):
+ super(FailedConditionalError, self).__init__(msg)
+ self.failed_conditional = failed_conditional
+
+
+class AddCommandError(Exception):
+ def __init__(self, msg, command):
+ super(AddCommandError, self).__init__(msg)
+ self.command = command
+
+
+class AddConditionError(Exception):
+ def __init__(self, msg, condition):
+ super(AddConditionError, self).__init__(msg)
+ self.condition = condition
+
+
+class Cli(object):
+ def __init__(self, connection):
+ self.connection = connection
+ self.default_output = connection.default_output or "text"
+ self._commands = list()
+
+ @property
+ def commands(self):
+ return [str(c) for c in self._commands]
+
+ def __call__(self, commands, output=None):
+ objects = list()
+ for cmd in to_list(commands):
+ objects.append(self.to_command(cmd, output))
+ return self.connection.run_commands(objects)
+
+ def to_command(
+ self, command, output=None, prompt=None, response=None, **kwargs
+ ):
+ output = output or self.default_output
+ if isinstance(command, Command):
+ return command
+ if isinstance(prompt, string_types):
+ prompt = re.compile(re.escape(prompt))
+ return Command(
+ command, output, prompt=prompt, response=response, **kwargs
+ )
+
+ def add_commands(self, commands, output=None, **kwargs):
+ for cmd in commands:
+ self._commands.append(self.to_command(cmd, output, **kwargs))
+
+ def run_commands(self):
+ responses = self.connection.run_commands(self._commands)
+ for resp, cmd in zip(responses, self._commands):
+ cmd.response = resp
+
+ # wipe out the commands list to avoid issues if additional
+ # commands are executed later
+ self._commands = list()
+
+ return responses
+
+
+class Command(object):
+ def __init__(
+ self, command, output=None, prompt=None, response=None, **kwargs
+ ):
+
+ self.command = command
+ self.output = output
+ self.command_string = command
+
+ self.prompt = prompt
+ self.response = response
+
+ self.args = kwargs
+
+ def __str__(self):
+ return self.command_string
+
+
+class CommandRunner(object):
+ def __init__(self, module):
+ self.module = module
+
+ self.items = list()
+ self.conditionals = set()
+
+ self.commands = list()
+
+ self.retries = 10
+ self.interval = 1
+
+ self.match = "all"
+
+ self._default_output = module.connection.default_output
+
+ def add_command(
+ self, command, output=None, prompt=None, response=None, **kwargs
+ ):
+ if command in [str(c) for c in self.commands]:
+ raise AddCommandError(
+ "duplicated command detected", command=command
+ )
+ cmd = self.module.cli.to_command(
+ command, output=output, prompt=prompt, response=response, **kwargs
+ )
+ self.commands.append(cmd)
+
+ def get_command(self, command, output=None):
+ for cmd in self.commands:
+ if cmd.command == command:
+ return cmd.response
+ raise ValueError("command '%s' not found" % command)
+
+ def get_responses(self):
+ return [cmd.response for cmd in self.commands]
+
+ def add_conditional(self, condition):
+ try:
+ self.conditionals.add(Conditional(condition))
+ except AttributeError as exc:
+ raise AddConditionError(msg=str(exc), condition=condition)
+
+ def run(self):
+ while self.retries > 0:
+ self.module.cli.add_commands(self.commands)
+ responses = self.module.cli.run_commands()
+
+ for item in list(self.conditionals):
+ if item(responses):
+ if self.match == "any":
+ return item
+ self.conditionals.remove(item)
+
+ if not self.conditionals:
+ break
+
+ time.sleep(self.interval)
+ self.retries -= 1
+ else:
+ failed_conditions = [item.raw for item in self.conditionals]
+ errmsg = (
+ "One or more conditional statements have not been satisfied"
+ )
+ raise FailedConditionsError(errmsg, failed_conditions)
+
+
+class Conditional(object):
+ """Used in command modules to evaluate waitfor conditions
+ """
+
+ OPERATORS = {
+ "eq": ["eq", "=="],
+ "neq": ["neq", "ne", "!="],
+ "gt": ["gt", ">"],
+ "ge": ["ge", ">="],
+ "lt": ["lt", "<"],
+ "le": ["le", "<="],
+ "contains": ["contains"],
+ "matches": ["matches"],
+ }
+
+ def __init__(self, conditional, encoding=None):
+ self.raw = conditional
+ self.negate = False
+ try:
+ components = shlex.split(conditional)
+ key, val = components[0], components[-1]
+ op_components = components[1:-1]
+ if "not" in op_components:
+ self.negate = True
+ op_components.pop(op_components.index("not"))
+ op = op_components[0]
+
+ except ValueError:
+ raise ValueError("failed to parse conditional")
+
+ self.key = key
+ self.func = self._func(op)
+ self.value = self._cast_value(val)
+
+ def __call__(self, data):
+ value = self.get_value(dict(result=data))
+ if not self.negate:
+ return self.func(value)
+ else:
+ return not self.func(value)
+
+ def _cast_value(self, value):
+ if value in BOOLEANS_TRUE:
+ return True
+ elif value in BOOLEANS_FALSE:
+ return False
+ elif re.match(r"^\d+\.d+$", value):
+ return float(value)
+ elif re.match(r"^\d+$", value):
+ return int(value)
+ else:
+ return text_type(value)
+
+ def _func(self, oper):
+ for func, operators in self.OPERATORS.items():
+ if oper in operators:
+ return getattr(self, func)
+ raise AttributeError("unknown operator: %s" % oper)
+
+ def get_value(self, result):
+ try:
+ return self.get_json(result)
+ except (IndexError, TypeError, AttributeError):
+ msg = "unable to apply conditional to result"
+ raise FailedConditionalError(msg, self.raw)
+
+ def get_json(self, result):
+ string = re.sub(r"\[[\'|\"]", ".", self.key)
+ string = re.sub(r"[\'|\"]\]", ".", string)
+ parts = re.split(r"\.(?=[^\]]*(?:\[|$))", string)
+ for part in parts:
+ match = re.findall(r"\[(\S+?)\]", part)
+ if match:
+ key = part[: part.find("[")]
+ result = result[key]
+ for m in match:
+ try:
+ m = int(m)
+ except ValueError:
+ m = str(m)
+ result = result[m]
+ else:
+ result = result.get(part)
+ return result
+
+ def number(self, value):
+ if "." in str(value):
+ return float(value)
+ else:
+ return int(value)
+
+ def eq(self, value):
+ return value == self.value
+
+ def neq(self, value):
+ return value != self.value
+
+ def gt(self, value):
+ return self.number(value) > self.value
+
+ def ge(self, value):
+ return self.number(value) >= self.value
+
+ def lt(self, value):
+ return self.number(value) < self.value
+
+ def le(self, value):
+ return self.number(value) <= self.value
+
+ def contains(self, value):
+ return str(self.value) in value
+
+ def matches(self, value):
+ match = re.search(self.value, value, re.M)
+ return match is not None
diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/utils.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/utils.py
new file mode 100644
index 00000000..64eca157
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/utils.py
@@ -0,0 +1,686 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# (c) 2016 Red Hat Inc.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+# Networking tools for network modules only
+
+import re
+import ast
+import operator
+import socket
+import json
+
+from itertools import chain
+
+from ansible.module_utils._text import to_text, to_bytes
+from ansible.module_utils.common._collections_compat import Mapping
+from ansible.module_utils.six import iteritems, string_types
+from ansible.module_utils import basic
+from ansible.module_utils.parsing.convert_bool import boolean
+
+# Backwards compatibility for 3rd party modules
+# TODO(pabelanger): With move to ansible.netcommon, we should clean this code
+# up and have modules import directly themself.
+from ansible.module_utils.common.network import ( # noqa: F401
+ to_bits,
+ is_netmask,
+ is_masklen,
+ to_netmask,
+ to_masklen,
+ to_subnet,
+ to_ipv6_network,
+ VALID_MASKS,
+)
+
+try:
+ from jinja2 import Environment, StrictUndefined
+ from jinja2.exceptions import UndefinedError
+
+ HAS_JINJA2 = True
+except ImportError:
+ HAS_JINJA2 = False
+
+
+OPERATORS = frozenset(["ge", "gt", "eq", "neq", "lt", "le"])
+ALIASES = frozenset(
+ [("min", "ge"), ("max", "le"), ("exactly", "eq"), ("neq", "ne")]
+)
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple, set)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+def to_lines(stdout):
+ for item in stdout:
+ if isinstance(item, string_types):
+ item = to_text(item).split("\n")
+ yield item
+
+
+def transform_commands(module):
+ transform = ComplexList(
+ dict(
+ command=dict(key=True),
+ output=dict(),
+ prompt=dict(type="list"),
+ answer=dict(type="list"),
+ newline=dict(type="bool", default=True),
+ sendonly=dict(type="bool", default=False),
+ check_all=dict(type="bool", default=False),
+ ),
+ module,
+ )
+
+ return transform(module.params["commands"])
+
+
+def sort_list(val):
+ if isinstance(val, list):
+ return sorted(val)
+ return val
+
+
+class Entity(object):
+ """Transforms a dict to with an argument spec
+
+ This class will take a dict and apply an Ansible argument spec to the
+ values. The resulting dict will contain all of the keys in the param
+ with appropriate values set.
+
+ Example::
+
+ argument_spec = dict(
+ command=dict(key=True),
+ display=dict(default='text', choices=['text', 'json']),
+ validate=dict(type='bool')
+ )
+ transform = Entity(module, argument_spec)
+ value = dict(command='foo')
+ result = transform(value)
+ print result
+ {'command': 'foo', 'display': 'text', 'validate': None}
+
+ Supported argument spec:
+ * key - specifies how to map a single value to a dict
+ * read_from - read and apply the argument_spec from the module
+ * required - a value is required
+ * type - type of value (uses AnsibleModule type checker)
+ * fallback - implements fallback function
+ * choices - set of valid options
+ * default - default value
+ """
+
+ def __init__(
+ self, module, attrs=None, args=None, keys=None, from_argspec=False
+ ):
+ args = [] if args is None else args
+
+ self._attributes = attrs or {}
+ self._module = module
+
+ for arg in args:
+ self._attributes[arg] = dict()
+ if from_argspec:
+ self._attributes[arg]["read_from"] = arg
+ if keys and arg in keys:
+ self._attributes[arg]["key"] = True
+
+ self.attr_names = frozenset(self._attributes.keys())
+
+ _has_key = False
+
+ for name, attr in iteritems(self._attributes):
+ if attr.get("read_from"):
+ if attr["read_from"] not in self._module.argument_spec:
+ module.fail_json(
+ msg="argument %s does not exist" % attr["read_from"]
+ )
+ spec = self._module.argument_spec.get(attr["read_from"])
+ for key, value in iteritems(spec):
+ if key not in attr:
+ attr[key] = value
+
+ if attr.get("key"):
+ if _has_key:
+ module.fail_json(msg="only one key value can be specified")
+ _has_key = True
+ attr["required"] = True
+
+ def serialize(self):
+ return self._attributes
+
+ def to_dict(self, value):
+ obj = {}
+ for name, attr in iteritems(self._attributes):
+ if attr.get("key"):
+ obj[name] = value
+ else:
+ obj[name] = attr.get("default")
+ return obj
+
+ def __call__(self, value, strict=True):
+ if not isinstance(value, dict):
+ value = self.to_dict(value)
+
+ if strict:
+ unknown = set(value).difference(self.attr_names)
+ if unknown:
+ self._module.fail_json(
+ msg="invalid keys: %s" % ",".join(unknown)
+ )
+
+ for name, attr in iteritems(self._attributes):
+ if value.get(name) is None:
+ value[name] = attr.get("default")
+
+ if attr.get("fallback") and not value.get(name):
+ fallback = attr.get("fallback", (None,))
+ fallback_strategy = fallback[0]
+ fallback_args = []
+ fallback_kwargs = {}
+ if fallback_strategy is not None:
+ for item in fallback[1:]:
+ if isinstance(item, dict):
+ fallback_kwargs = item
+ else:
+ fallback_args = item
+ try:
+ value[name] = fallback_strategy(
+ *fallback_args, **fallback_kwargs
+ )
+ except basic.AnsibleFallbackNotFound:
+ continue
+
+ if attr.get("required") and value.get(name) is None:
+ self._module.fail_json(
+ msg="missing required attribute %s" % name
+ )
+
+ if "choices" in attr:
+ if value[name] not in attr["choices"]:
+ self._module.fail_json(
+ msg="%s must be one of %s, got %s"
+ % (name, ", ".join(attr["choices"]), value[name])
+ )
+
+ if value[name] is not None:
+ value_type = attr.get("type", "str")
+ type_checker = self._module._CHECK_ARGUMENT_TYPES_DISPATCHER[
+ value_type
+ ]
+ type_checker(value[name])
+ elif value.get(name):
+ value[name] = self._module.params[name]
+
+ return value
+
+
+class EntityCollection(Entity):
+ """Extends ```Entity``` to handle a list of dicts """
+
+ def __call__(self, iterable, strict=True):
+ if iterable is None:
+ iterable = [
+ super(EntityCollection, self).__call__(
+ self._module.params, strict
+ )
+ ]
+
+ if not isinstance(iterable, (list, tuple)):
+ self._module.fail_json(msg="value must be an iterable")
+
+ return [
+ (super(EntityCollection, self).__call__(i, strict))
+ for i in iterable
+ ]
+
+
+# these two are for backwards compatibility and can be removed once all of the
+# modules that use them are updated
+class ComplexDict(Entity):
+ def __init__(self, attrs, module, *args, **kwargs):
+ super(ComplexDict, self).__init__(module, attrs, *args, **kwargs)
+
+
+class ComplexList(EntityCollection):
+ def __init__(self, attrs, module, *args, **kwargs):
+ super(ComplexList, self).__init__(module, attrs, *args, **kwargs)
+
+
+def dict_diff(base, comparable):
+ """ Generate a dict object of differences
+
+ This function will compare two dict objects and return the difference
+ between them as a dict object. For scalar values, the key will reflect
+ the updated value. If the key does not exist in `comparable`, then then no
+ key will be returned. For lists, the value in comparable will wholly replace
+ the value in base for the key. For dicts, the returned value will only
+ return keys that are different.
+
+ :param base: dict object to base the diff on
+ :param comparable: dict object to compare against base
+
+ :returns: new dict object with differences
+ """
+ if not isinstance(base, dict):
+ raise AssertionError("`base` must be of type <dict>")
+ if not isinstance(comparable, dict):
+ if comparable is None:
+ comparable = dict()
+ else:
+ raise AssertionError("`comparable` must be of type <dict>")
+
+ updates = dict()
+
+ for key, value in iteritems(base):
+ if isinstance(value, dict):
+ item = comparable.get(key)
+ if item is not None:
+ sub_diff = dict_diff(value, comparable[key])
+ if sub_diff:
+ updates[key] = sub_diff
+ else:
+ comparable_value = comparable.get(key)
+ if comparable_value is not None:
+ if sort_list(base[key]) != sort_list(comparable_value):
+ updates[key] = comparable_value
+
+ for key in set(comparable.keys()).difference(base.keys()):
+ updates[key] = comparable.get(key)
+
+ return updates
+
+
+def dict_merge(base, other):
+ """ Return a new dict object that combines base and other
+
+ This will create a new dict object that is a combination of the key/value
+ pairs from base and other. When both keys exist, the value will be
+ selected from other. If the value is a list object, the two lists will
+ be combined and duplicate entries removed.
+
+ :param base: dict object to serve as base
+ :param other: dict object to combine with base
+
+ :returns: new combined dict object
+ """
+ if not isinstance(base, dict):
+ raise AssertionError("`base` must be of type <dict>")
+ if not isinstance(other, dict):
+ raise AssertionError("`other` must be of type <dict>")
+
+ combined = dict()
+
+ for key, value in iteritems(base):
+ if isinstance(value, dict):
+ if key in other:
+ item = other.get(key)
+ if item is not None:
+ if isinstance(other[key], Mapping):
+ combined[key] = dict_merge(value, other[key])
+ else:
+ combined[key] = other[key]
+ else:
+ combined[key] = item
+ else:
+ combined[key] = value
+ elif isinstance(value, list):
+ if key in other:
+ item = other.get(key)
+ if item is not None:
+ try:
+ combined[key] = list(set(chain(value, item)))
+ except TypeError:
+ value.extend([i for i in item if i not in value])
+ combined[key] = value
+ else:
+ combined[key] = item
+ else:
+ combined[key] = value
+ else:
+ if key in other:
+ other_value = other.get(key)
+ if other_value is not None:
+ if sort_list(base[key]) != sort_list(other_value):
+ combined[key] = other_value
+ else:
+ combined[key] = value
+ else:
+ combined[key] = other_value
+ else:
+ combined[key] = value
+
+ for key in set(other.keys()).difference(base.keys()):
+ combined[key] = other.get(key)
+
+ return combined
+
+
+def param_list_to_dict(param_list, unique_key="name", remove_key=True):
+ """Rotates a list of dictionaries to be a dictionary of dictionaries.
+
+ :param param_list: The aforementioned list of dictionaries
+ :param unique_key: The name of a key which is present and unique in all of param_list's dictionaries. The value
+ behind this key will be the key each dictionary can be found at in the new root dictionary
+ :param remove_key: If True, remove unique_key from the individual dictionaries before returning.
+ """
+ param_dict = {}
+ for params in param_list:
+ params = params.copy()
+ if remove_key:
+ name = params.pop(unique_key)
+ else:
+ name = params.get(unique_key)
+ param_dict[name] = params
+
+ return param_dict
+
+
+def conditional(expr, val, cast=None):
+ match = re.match(r"^(.+)\((.+)\)$", str(expr), re.I)
+ if match:
+ op, arg = match.groups()
+ else:
+ op = "eq"
+ if " " in str(expr):
+ raise AssertionError("invalid expression: cannot contain spaces")
+ arg = expr
+
+ if cast is None and val is not None:
+ arg = type(val)(arg)
+ elif callable(cast):
+ arg = cast(arg)
+ val = cast(val)
+
+ op = next((oper for alias, oper in ALIASES if op == alias), op)
+
+ if not hasattr(operator, op) and op not in OPERATORS:
+ raise ValueError("unknown operator: %s" % op)
+
+ func = getattr(operator, op)
+ return func(val, arg)
+
+
+def ternary(value, true_val, false_val):
+ """ value ? true_val : false_val """
+ if value:
+ return true_val
+ else:
+ return false_val
+
+
+def remove_default_spec(spec):
+ for item in spec:
+ if "default" in spec[item]:
+ del spec[item]["default"]
+
+
+def validate_ip_address(address):
+ try:
+ socket.inet_aton(address)
+ except socket.error:
+ return False
+ return address.count(".") == 3
+
+
+def validate_ip_v6_address(address):
+ try:
+ socket.inet_pton(socket.AF_INET6, address)
+ except socket.error:
+ return False
+ return True
+
+
+def validate_prefix(prefix):
+ if prefix and not 0 <= int(prefix) <= 32:
+ return False
+ return True
+
+
+def load_provider(spec, args):
+ provider = args.get("provider") or {}
+ for key, value in iteritems(spec):
+ if key not in provider:
+ if "fallback" in value:
+ provider[key] = _fallback(value["fallback"])
+ elif "default" in value:
+ provider[key] = value["default"]
+ else:
+ provider[key] = None
+ if "authorize" in provider:
+ # Coerce authorize to provider if a string has somehow snuck in.
+ provider["authorize"] = boolean(provider["authorize"] or False)
+ args["provider"] = provider
+ return provider
+
+
+def _fallback(fallback):
+ strategy = fallback[0]
+ args = []
+ kwargs = {}
+
+ for item in fallback[1:]:
+ if isinstance(item, dict):
+ kwargs = item
+ else:
+ args = item
+ try:
+ return strategy(*args, **kwargs)
+ except basic.AnsibleFallbackNotFound:
+ pass
+
+
+def generate_dict(spec):
+ """
+ Generate dictionary which is in sync with argspec
+
+ :param spec: A dictionary that is the argspec of the module
+ :rtype: A dictionary
+ :returns: A dictionary in sync with argspec with default value
+ """
+ obj = {}
+ if not spec:
+ return obj
+
+ for key, val in iteritems(spec):
+ if "default" in val:
+ dct = {key: val["default"]}
+ elif "type" in val and val["type"] == "dict":
+ dct = {key: generate_dict(val["options"])}
+ else:
+ dct = {key: None}
+ obj.update(dct)
+ return obj
+
+
+def parse_conf_arg(cfg, arg):
+ """
+ Parse config based on argument
+
+ :param cfg: A text string which is a line of configuration.
+ :param arg: A text string which is to be matched.
+ :rtype: A text string
+ :returns: A text string if match is found
+ """
+ match = re.search(r"%s (.+)(\n|$)" % arg, cfg, re.M)
+ if match:
+ result = match.group(1).strip()
+ else:
+ result = None
+ return result
+
+
+def parse_conf_cmd_arg(cfg, cmd, res1, res2=None, delete_str="no"):
+ """
+ Parse config based on command
+
+ :param cfg: A text string which is a line of configuration.
+ :param cmd: A text string which is the command to be matched
+ :param res1: A text string to be returned if the command is present
+ :param res2: A text string to be returned if the negate command
+ is present
+ :param delete_str: A text string to identify the start of the
+ negate command
+ :rtype: A text string
+ :returns: A text string if match is found
+ """
+ match = re.search(r"\n\s+%s(\n|$)" % cmd, cfg)
+ if match:
+ return res1
+ if res2 is not None:
+ match = re.search(r"\n\s+%s %s(\n|$)" % (delete_str, cmd), cfg)
+ if match:
+ return res2
+ return None
+
+
+def get_xml_conf_arg(cfg, path, data="text"):
+ """
+ :param cfg: The top level configuration lxml Element tree object
+ :param path: The relative xpath w.r.t to top level element (cfg)
+ to be searched in the xml hierarchy
+ :param data: The type of data to be returned for the matched xml node.
+ Valid values are text, tag, attrib, with default as text.
+ :return: Returns the required type for the matched xml node or else None
+ """
+ match = cfg.xpath(path)
+ if len(match):
+ if data == "tag":
+ result = getattr(match[0], "tag")
+ elif data == "attrib":
+ result = getattr(match[0], "attrib")
+ else:
+ result = getattr(match[0], "text")
+ else:
+ result = None
+ return result
+
+
+def remove_empties(cfg_dict):
+ """
+ Generate final config dictionary
+
+ :param cfg_dict: A dictionary parsed in the facts system
+ :rtype: A dictionary
+ :returns: A dictionary by eliminating keys that have null values
+ """
+ final_cfg = {}
+ if not cfg_dict:
+ return final_cfg
+
+ for key, val in iteritems(cfg_dict):
+ dct = None
+ if isinstance(val, dict):
+ child_val = remove_empties(val)
+ if child_val:
+ dct = {key: child_val}
+ elif (
+ isinstance(val, list)
+ and val
+ and all([isinstance(x, dict) for x in val])
+ ):
+ child_val = [remove_empties(x) for x in val]
+ if child_val:
+ dct = {key: child_val}
+ elif val not in [None, [], {}, (), ""]:
+ dct = {key: val}
+ if dct:
+ final_cfg.update(dct)
+ return final_cfg
+
+
+def validate_config(spec, data):
+ """
+ Validate if the input data against the AnsibleModule spec format
+ :param spec: Ansible argument spec
+ :param data: Data to be validated
+ :return:
+ """
+ params = basic._ANSIBLE_ARGS
+ basic._ANSIBLE_ARGS = to_bytes(json.dumps({"ANSIBLE_MODULE_ARGS": data}))
+ validated_data = basic.AnsibleModule(spec).params
+ basic._ANSIBLE_ARGS = params
+ return validated_data
+
+
+def search_obj_in_list(name, lst, key="name"):
+ if not lst:
+ return None
+ else:
+ for item in lst:
+ if item.get(key) == name:
+ return item
+
+
+class Template:
+ def __init__(self):
+ if not HAS_JINJA2:
+ raise ImportError(
+ "jinja2 is required but does not appear to be installed. "
+ "It can be installed using `pip install jinja2`"
+ )
+
+ self.env = Environment(undefined=StrictUndefined)
+ self.env.filters.update({"ternary": ternary})
+
+ def __call__(self, value, variables=None, fail_on_undefined=True):
+ variables = variables or {}
+
+ if not self.contains_vars(value):
+ return value
+
+ try:
+ value = self.env.from_string(value).render(variables)
+ except UndefinedError:
+ if not fail_on_undefined:
+ return None
+ raise
+
+ if value:
+ try:
+ return ast.literal_eval(value)
+ except Exception:
+ return str(value)
+ else:
+ return None
+
+ def contains_vars(self, data):
+ if isinstance(data, string_types):
+ for marker in (
+ self.env.block_start_string,
+ self.env.variable_start_string,
+ self.env.comment_start_string,
+ ):
+ if marker in data:
+ return True
+ return False
diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/netconf/netconf.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/netconf/netconf.py
new file mode 100644
index 00000000..1f03299b
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/netconf/netconf.py
@@ -0,0 +1,147 @@
+#
+# (c) 2018 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+import json
+
+from copy import deepcopy
+from contextlib import contextmanager
+
+try:
+ from lxml.etree import fromstring, tostring
+except ImportError:
+ from xml.etree.ElementTree import fromstring, tostring
+
+from ansible.module_utils._text import to_text, to_bytes
+from ansible.module_utils.connection import Connection, ConnectionError
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.netconf import (
+ NetconfConnection,
+)
+
+
+IGNORE_XML_ATTRIBUTE = ()
+
+
+def get_connection(module):
+ if hasattr(module, "_netconf_connection"):
+ return module._netconf_connection
+
+ capabilities = get_capabilities(module)
+ network_api = capabilities.get("network_api")
+ if network_api == "netconf":
+ module._netconf_connection = NetconfConnection(module._socket_path)
+ else:
+ module.fail_json(msg="Invalid connection type %s" % network_api)
+
+ return module._netconf_connection
+
+
+def get_capabilities(module):
+ if hasattr(module, "_netconf_capabilities"):
+ return module._netconf_capabilities
+
+ capabilities = Connection(module._socket_path).get_capabilities()
+ module._netconf_capabilities = json.loads(capabilities)
+ return module._netconf_capabilities
+
+
+def lock_configuration(module, target=None):
+ conn = get_connection(module)
+ return conn.lock(target=target)
+
+
+def unlock_configuration(module, target=None):
+ conn = get_connection(module)
+ return conn.unlock(target=target)
+
+
+@contextmanager
+def locked_config(module, target=None):
+ try:
+ lock_configuration(module, target=target)
+ yield
+ finally:
+ unlock_configuration(module, target=target)
+
+
+def get_config(module, source, filter=None, lock=False):
+ conn = get_connection(module)
+ try:
+ locked = False
+ if lock:
+ conn.lock(target=source)
+ locked = True
+ response = conn.get_config(source=source, filter=filter)
+
+ except ConnectionError as e:
+ module.fail_json(
+ msg=to_text(e, errors="surrogate_then_replace").strip()
+ )
+
+ finally:
+ if locked:
+ conn.unlock(target=source)
+
+ return response
+
+
+def get(module, filter, lock=False):
+ conn = get_connection(module)
+ try:
+ locked = False
+ if lock:
+ conn.lock(target="running")
+ locked = True
+
+ response = conn.get(filter=filter)
+
+ except ConnectionError as e:
+ module.fail_json(
+ msg=to_text(e, errors="surrogate_then_replace").strip()
+ )
+
+ finally:
+ if locked:
+ conn.unlock(target="running")
+
+ return response
+
+
+def dispatch(module, request):
+ conn = get_connection(module)
+ try:
+ response = conn.dispatch(request)
+ except ConnectionError as e:
+ module.fail_json(
+ msg=to_text(e, errors="surrogate_then_replace").strip()
+ )
+
+ return response
+
+
+def sanitize_xml(data):
+ tree = fromstring(
+ to_bytes(deepcopy(data), errors="surrogate_then_replace")
+ )
+ for element in tree.getiterator():
+ # remove attributes
+ attribute = element.attrib
+ if attribute:
+ for key in list(attribute):
+ if key not in IGNORE_XML_ATTRIBUTE:
+ attribute.pop(key)
+ return to_text(tostring(tree), errors="surrogate_then_replace").strip()
diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/restconf/restconf.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/restconf/restconf.py
new file mode 100644
index 00000000..fba46be0
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/restconf/restconf.py
@@ -0,0 +1,61 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# (c) 2018 Red Hat Inc.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+from ansible.module_utils.connection import Connection
+
+
+def get(module, path=None, content=None, fields=None, output="json"):
+ if path is None:
+ raise ValueError("path value must be provided")
+ if content:
+ path += "?" + "content=%s" % content
+ if fields:
+ path += "?" + "field=%s" % fields
+
+ accept = None
+ if output == "xml":
+ accept = "application/yang-data+xml"
+
+ connection = Connection(module._socket_path)
+ return connection.send_request(
+ None, path=path, method="GET", accept=accept
+ )
+
+
+def edit_config(module, path=None, content=None, method="GET", format="json"):
+ if path is None:
+ raise ValueError("path value must be provided")
+
+ content_type = None
+ if format == "xml":
+ content_type = "application/yang-data+xml"
+
+ connection = Connection(module._socket_path)
+ return connection.send_request(
+ content, path=path, method=method, content_type=content_type
+ )
diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/modules/cli_config.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/modules/cli_config.py
new file mode 100644
index 00000000..c1384c1d
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/modules/cli_config.py
@@ -0,0 +1,444 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Ansible by Red Hat, inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "network",
+}
+
+
+DOCUMENTATION = """module: cli_config
+author: Trishna Guha (@trishnaguha)
+notes:
+- The commands will be returned only for platforms that do not support onbox diff.
+ The C(--diff) option with the playbook will return the difference in configuration
+ for devices that has support for onbox diff
+short_description: Push text based configuration to network devices over network_cli
+description:
+- This module provides platform agnostic way of pushing text based configuration to
+ network devices over network_cli connection plugin.
+extends_documentation_fragment:
+- ansible.netcommon.network_agnostic
+options:
+ config:
+ description:
+ - The config to be pushed to the network device. This argument is mutually exclusive
+ with C(rollback) and either one of the option should be given as input. The
+ config should have indentation that the device uses.
+ type: str
+ commit:
+ description:
+ - The C(commit) argument instructs the module to push the configuration to the
+ device. This is mapped to module check mode.
+ type: bool
+ replace:
+ description:
+ - If the C(replace) argument is set to C(yes), it will replace the entire running-config
+ of the device with the C(config) argument value. For devices that support replacing
+ running configuration from file on device like NXOS/JUNOS, the C(replace) argument
+ takes path to the file on the device that will be used for replacing the entire
+ running-config. The value of C(config) option should be I(None) for such devices.
+ Nexus 9K devices only support replace. Use I(net_put) or I(nxos_file_copy) in
+ case of NXOS module to copy the flat file to remote device and then use set
+ the fullpath to this argument.
+ type: str
+ backup:
+ description:
+ - This argument will cause the module to create a full backup of the current running
+ config from the remote device before any changes are made. If the C(backup_options)
+ value is not given, the backup file is written to the C(backup) folder in the
+ playbook root directory or role root directory, if playbook is part of an ansible
+ role. If the directory does not exist, it is created.
+ type: bool
+ default: 'no'
+ rollback:
+ description:
+ - The C(rollback) argument instructs the module to rollback the current configuration
+ to the identifier specified in the argument. If the specified rollback identifier
+ does not exist on the remote device, the module will fail. To rollback to the
+ most recent commit, set the C(rollback) argument to 0. This option is mutually
+ exclusive with C(config).
+ commit_comment:
+ description:
+ - The C(commit_comment) argument specifies a text string to be used when committing
+ the configuration. If the C(commit) argument is set to False, this argument
+ is silently ignored. This argument is only valid for the platforms that support
+ commit operation with comment.
+ type: str
+ defaults:
+ description:
+ - The I(defaults) argument will influence how the running-config is collected
+ from the device. When the value is set to true, the command used to collect
+ the running-config is append with the all keyword. When the value is set to
+ false, the command is issued without the all keyword.
+ default: 'no'
+ type: bool
+ multiline_delimiter:
+ description:
+ - This argument is used when pushing a multiline configuration element to the
+ device. It specifies the character to use as the delimiting character. This
+ only applies to the configuration action.
+ type: str
+ diff_replace:
+ description:
+ - Instructs the module on the way to perform the configuration on the device.
+ If the C(diff_replace) argument is set to I(line) then the modified lines are
+ pushed to the device in configuration mode. If the argument is set to I(block)
+ then the entire command block is pushed to the device in configuration mode
+ if any line is not correct. Note that this parameter will be ignored if the
+ platform has onbox diff support.
+ choices:
+ - line
+ - block
+ - config
+ diff_match:
+ description:
+ - Instructs the module on the way to perform the matching of the set of commands
+ against the current device config. If C(diff_match) is set to I(line), commands
+ are matched line by line. If C(diff_match) is set to I(strict), command lines
+ are matched with respect to position. If C(diff_match) is set to I(exact), command
+ lines must be an equal match. Finally, if C(diff_match) is set to I(none), the
+ module will not attempt to compare the source configuration with the running
+ configuration on the remote device. Note that this parameter will be ignored
+ if the platform has onbox diff support.
+ choices:
+ - line
+ - strict
+ - exact
+ - none
+ diff_ignore_lines:
+ description:
+ - Use this argument to specify one or more lines that should be ignored during
+ the diff. This is used for lines in the configuration that are automatically
+ updated by the system. This argument takes a list of regular expressions or
+ exact line matches. Note that this parameter will be ignored if the platform
+ has onbox diff support.
+ backup_options:
+ description:
+ - This is a dict object containing configurable options related to backup file
+ path. The value of this option is read only when C(backup) is set to I(yes),
+ if C(backup) is set to I(no) this option will be silently ignored.
+ suboptions:
+ filename:
+ description:
+ - The filename to be used to store the backup configuration. If the filename
+ is not given it will be generated based on the hostname, current time and
+ date in format defined by <hostname>_config.<current-date>@<current-time>
+ dir_path:
+ description:
+ - This option provides the path ending with directory name in which the backup
+ configuration file will be stored. If the directory does not exist it will
+ be first created and the filename is either the value of C(filename) or
+ default filename as described in C(filename) options description. If the
+ path value is not given in that case a I(backup) directory will be created
+ in the current working directory and backup configuration will be copied
+ in C(filename) within I(backup) directory.
+ type: path
+ type: dict
+"""
+
+EXAMPLES = """
+- name: configure device with config
+ cli_config:
+ config: "{{ lookup('template', 'basic/config.j2') }}"
+
+- name: multiline config
+ cli_config:
+ config: |
+ hostname foo
+ feature nxapi
+
+- name: configure device with config with defaults enabled
+ cli_config:
+ config: "{{ lookup('template', 'basic/config.j2') }}"
+ defaults: yes
+
+- name: Use diff_match
+ cli_config:
+ config: "{{ lookup('file', 'interface_config') }}"
+ diff_match: none
+
+- name: nxos replace config
+ cli_config:
+ replace: 'bootflash:nxoscfg'
+
+- name: junos replace config
+ cli_config:
+ replace: '/var/home/ansible/junos01.cfg'
+
+- name: commit with comment
+ cli_config:
+ config: set system host-name foo
+ commit_comment: this is a test
+
+- name: configurable backup path
+ cli_config:
+ config: "{{ lookup('template', 'basic/config.j2') }}"
+ backup: yes
+ backup_options:
+ filename: backup.cfg
+ dir_path: /home/user
+"""
+
+RETURN = """
+commands:
+ description: The set of commands that will be pushed to the remote device
+ returned: always
+ type: list
+ sample: ['interface Loopback999', 'no shutdown']
+backup_path:
+ description: The full path to the backup file
+ returned: when backup is yes
+ type: str
+ sample: /playbooks/ansible/backup/hostname_config.2016-07-16@22:28:34
+"""
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.connection import Connection
+from ansible.module_utils._text import to_text
+
+
+def validate_args(module, device_operations):
+ """validate param if it is supported on the platform
+ """
+ feature_list = [
+ "replace",
+ "rollback",
+ "commit_comment",
+ "defaults",
+ "multiline_delimiter",
+ "diff_replace",
+ "diff_match",
+ "diff_ignore_lines",
+ ]
+
+ for feature in feature_list:
+ if module.params[feature]:
+ supports_feature = device_operations.get("supports_%s" % feature)
+ if supports_feature is None:
+ module.fail_json(
+ "This platform does not specify whether %s is supported or not. "
+ "Please report an issue against this platform's cliconf plugin."
+ % feature
+ )
+ elif not supports_feature:
+ module.fail_json(
+ msg="Option %s is not supported on this platform" % feature
+ )
+
+
+def run(
+ module, device_operations, connection, candidate, running, rollback_id
+):
+ result = {}
+ resp = {}
+ config_diff = []
+ banner_diff = {}
+
+ replace = module.params["replace"]
+ commit_comment = module.params["commit_comment"]
+ multiline_delimiter = module.params["multiline_delimiter"]
+ diff_replace = module.params["diff_replace"]
+ diff_match = module.params["diff_match"]
+ diff_ignore_lines = module.params["diff_ignore_lines"]
+
+ commit = not module.check_mode
+
+ if replace in ("yes", "true", "True"):
+ replace = True
+ elif replace in ("no", "false", "False"):
+ replace = False
+
+ if (
+ replace is not None
+ and replace not in [True, False]
+ and candidate is not None
+ ):
+ module.fail_json(
+ msg="Replace value '%s' is a configuration file path already"
+ " present on the device. Hence 'replace' and 'config' options"
+ " are mutually exclusive" % replace
+ )
+
+ if rollback_id is not None:
+ resp = connection.rollback(rollback_id, commit)
+ if "diff" in resp:
+ result["changed"] = True
+
+ elif device_operations.get("supports_onbox_diff"):
+ if diff_replace:
+ module.warn(
+ "diff_replace is ignored as the device supports onbox diff"
+ )
+ if diff_match:
+ module.warn(
+ "diff_mattch is ignored as the device supports onbox diff"
+ )
+ if diff_ignore_lines:
+ module.warn(
+ "diff_ignore_lines is ignored as the device supports onbox diff"
+ )
+
+ if candidate and not isinstance(candidate, list):
+ candidate = candidate.strip("\n").splitlines()
+
+ kwargs = {
+ "candidate": candidate,
+ "commit": commit,
+ "replace": replace,
+ "comment": commit_comment,
+ }
+ resp = connection.edit_config(**kwargs)
+
+ if "diff" in resp:
+ result["changed"] = True
+
+ elif device_operations.get("supports_generate_diff"):
+ kwargs = {"candidate": candidate, "running": running}
+ if diff_match:
+ kwargs.update({"diff_match": diff_match})
+ if diff_replace:
+ kwargs.update({"diff_replace": diff_replace})
+ if diff_ignore_lines:
+ kwargs.update({"diff_ignore_lines": diff_ignore_lines})
+
+ diff_response = connection.get_diff(**kwargs)
+
+ config_diff = diff_response.get("config_diff")
+ banner_diff = diff_response.get("banner_diff")
+
+ if config_diff:
+ if isinstance(config_diff, list):
+ candidate = config_diff
+ else:
+ candidate = config_diff.splitlines()
+
+ kwargs = {
+ "candidate": candidate,
+ "commit": commit,
+ "replace": replace,
+ "comment": commit_comment,
+ }
+ if commit:
+ connection.edit_config(**kwargs)
+ result["changed"] = True
+ result["commands"] = config_diff.split("\n")
+
+ if banner_diff:
+ candidate = json.dumps(banner_diff)
+
+ kwargs = {"candidate": candidate, "commit": commit}
+ if multiline_delimiter:
+ kwargs.update({"multiline_delimiter": multiline_delimiter})
+ if commit:
+ connection.edit_banner(**kwargs)
+ result["changed"] = True
+
+ if module._diff:
+ if "diff" in resp:
+ result["diff"] = {"prepared": resp["diff"]}
+ else:
+ diff = ""
+ if config_diff:
+ if isinstance(config_diff, list):
+ diff += "\n".join(config_diff)
+ else:
+ diff += config_diff
+ if banner_diff:
+ diff += json.dumps(banner_diff)
+ result["diff"] = {"prepared": diff}
+
+ return result
+
+
+def main():
+ """main entry point for execution
+ """
+ backup_spec = dict(filename=dict(), dir_path=dict(type="path"))
+ argument_spec = dict(
+ backup=dict(default=False, type="bool"),
+ backup_options=dict(type="dict", options=backup_spec),
+ config=dict(type="str"),
+ commit=dict(type="bool"),
+ replace=dict(type="str"),
+ rollback=dict(type="int"),
+ commit_comment=dict(type="str"),
+ defaults=dict(default=False, type="bool"),
+ multiline_delimiter=dict(type="str"),
+ diff_replace=dict(choices=["line", "block", "config"]),
+ diff_match=dict(choices=["line", "strict", "exact", "none"]),
+ diff_ignore_lines=dict(type="list"),
+ )
+
+ mutually_exclusive = [("config", "rollback")]
+ required_one_of = [["backup", "config", "rollback"]]
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ required_one_of=required_one_of,
+ supports_check_mode=True,
+ )
+
+ result = {"changed": False}
+
+ connection = Connection(module._socket_path)
+ capabilities = module.from_json(connection.get_capabilities())
+
+ if capabilities:
+ device_operations = capabilities.get("device_operations", dict())
+ validate_args(module, device_operations)
+ else:
+ device_operations = dict()
+
+ if module.params["defaults"]:
+ if "get_default_flag" in capabilities.get("rpc"):
+ flags = connection.get_default_flag()
+ else:
+ flags = "all"
+ else:
+ flags = []
+
+ candidate = module.params["config"]
+ candidate = (
+ to_text(candidate, errors="surrogate_then_replace")
+ if candidate
+ else None
+ )
+ running = connection.get_config(flags=flags)
+ rollback_id = module.params["rollback"]
+
+ if module.params["backup"]:
+ result["__backup__"] = running
+
+ if candidate or rollback_id or module.params["replace"]:
+ try:
+ result.update(
+ run(
+ module,
+ device_operations,
+ connection,
+ candidate,
+ running,
+ rollback_id,
+ )
+ )
+ except Exception as exc:
+ module.fail_json(msg=to_text(exc))
+
+ module.exit_json(**result)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/modules/net_get.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/modules/net_get.py
new file mode 100644
index 00000000..f0910f52
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/modules/net_get.py
@@ -0,0 +1,71 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Ansible by Red Hat, inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "network",
+}
+
+
+DOCUMENTATION = """module: net_get
+author: Deepak Agrawal (@dagrawal)
+short_description: Copy a file from a network device to Ansible Controller
+description:
+- This module provides functionality to copy file from network device to ansible controller.
+extends_documentation_fragment:
+- ansible.netcommon.network_agnostic
+options:
+ src:
+ description:
+ - Specifies the source file. The path to the source file can either be the full
+ path on the network device or a relative path as per path supported by destination
+ network device.
+ required: true
+ protocol:
+ description:
+ - Protocol used to transfer file.
+ default: scp
+ choices:
+ - scp
+ - sftp
+ dest:
+ description:
+ - Specifies the destination file. The path to the destination file can either
+ be the full path on the Ansible control host or a relative path from the playbook
+ or role root directory.
+ default:
+ - Same filename as specified in I(src). The path will be playbook root or role
+ root directory if playbook is part of a role.
+requirements:
+- scp
+notes:
+- Some devices need specific configurations to be enabled before scp can work These
+ configuration should be pre-configured before using this module e.g ios - C(ip scp
+ server enable).
+- User privilege to do scp on network device should be pre-configured e.g. ios - need
+ user privilege 15 by default for allowing scp.
+- Default destination of source file.
+"""
+
+EXAMPLES = """
+- name: copy file from the network device to Ansible controller
+ net_get:
+ src: running_cfg_ios1.txt
+
+- name: copy file from ios to common location at /tmp
+ net_get:
+ src: running_cfg_sw1.txt
+ dest : /tmp/ios1.txt
+"""
+
+RETURN = """
+"""
diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/modules/net_put.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/modules/net_put.py
new file mode 100644
index 00000000..2fc4a98c
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/modules/net_put.py
@@ -0,0 +1,82 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Ansible by Red Hat, inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "network",
+}
+
+
+DOCUMENTATION = """module: net_put
+author: Deepak Agrawal (@dagrawal)
+short_description: Copy a file from Ansible Controller to a network device
+description:
+- This module provides functionality to copy file from Ansible controller to network
+ devices.
+extends_documentation_fragment:
+- ansible.netcommon.network_agnostic
+options:
+ src:
+ description:
+ - Specifies the source file. The path to the source file can either be the full
+ path on the Ansible control host or a relative path from the playbook or role
+ root directory.
+ required: true
+ protocol:
+ description:
+ - Protocol used to transfer file.
+ default: scp
+ choices:
+ - scp
+ - sftp
+ dest:
+ description:
+ - Specifies the destination file. The path to destination file can either be the
+ full path or relative path as supported by network_os.
+ default:
+ - Filename from src and at default directory of user shell on network_os.
+ required: false
+ mode:
+ description:
+ - Set the file transfer mode. If mode is set to I(text) then I(src) file will
+ go through Jinja2 template engine to replace any vars if present in the src
+ file. If mode is set to I(binary) then file will be copied as it is to destination
+ device.
+ default: binary
+ choices:
+ - binary
+ - text
+requirements:
+- scp
+notes:
+- Some devices need specific configurations to be enabled before scp can work These
+ configuration should be pre-configured before using this module e.g ios - C(ip scp
+ server enable).
+- User privilege to do scp on network device should be pre-configured e.g. ios - need
+ user privilege 15 by default for allowing scp.
+- Default destination of source file.
+"""
+
+EXAMPLES = """
+- name: copy file from ansible controller to a network device
+ net_put:
+ src: running_cfg_ios1.txt
+
+- name: copy file at root dir of flash in slot 3 of sw1(ios)
+ net_put:
+ src: running_cfg_sw1.txt
+ protocol: sftp
+ dest : flash3:/running_cfg_sw1.txt
+"""
+
+RETURN = """
+"""
diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/netconf/default.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/netconf/default.py
new file mode 100644
index 00000000..e9332f26
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/netconf/default.py
@@ -0,0 +1,70 @@
+#
+# (c) 2017 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = """author: Ansible Networking Team
+netconf: default
+short_description: Use default netconf plugin to run standard netconf commands as
+ per RFC
+description:
+- This default plugin provides low level abstraction apis for sending and receiving
+ netconf commands as per Netconf RFC specification.
+options:
+ ncclient_device_handler:
+ type: str
+ default: default
+ description:
+ - Specifies the ncclient device handler name for network os that support default
+ netconf implementation as per Netconf RFC specification. To identify the ncclient
+ device handler name refer ncclient library documentation.
+"""
+import json
+
+from ansible.module_utils._text import to_text
+from ansible.plugins.netconf import NetconfBase
+
+
+class Netconf(NetconfBase):
+ def get_text(self, ele, tag):
+ try:
+ return to_text(
+ ele.find(tag).text, errors="surrogate_then_replace"
+ ).strip()
+ except AttributeError:
+ pass
+
+ def get_device_info(self):
+ device_info = dict()
+ device_info["network_os"] = "default"
+ return device_info
+
+ def get_capabilities(self):
+ result = dict()
+ result["rpc"] = self.get_base_rpc()
+ result["network_api"] = "netconf"
+ result["device_info"] = self.get_device_info()
+ result["server_capabilities"] = [c for c in self.m.server_capabilities]
+ result["client_capabilities"] = [c for c in self.m.client_capabilities]
+ result["session_id"] = self.m.session_id
+ result["device_operations"] = self.get_device_operations(
+ result["server_capabilities"]
+ )
+ return json.dumps(result)
diff --git a/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/action/ios.py b/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/action/ios.py
new file mode 100644
index 00000000..e5ac2cd1
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/action/ios.py
@@ -0,0 +1,133 @@
+#
+# (c) 2016 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import sys
+import copy
+
+from ansible_collections.ansible.netcommon.plugins.action.network import (
+ ActionModule as ActionNetworkModule,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ load_provider,
+)
+from ansible_collections.cisco.ios.plugins.module_utils.network.ios.ios import (
+ ios_provider_spec,
+)
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class ActionModule(ActionNetworkModule):
+ def run(self, tmp=None, task_vars=None):
+ del tmp # tmp no longer has any effect
+
+ module_name = self._task.action.split(".")[-1]
+ self._config_module = True if module_name == "ios_config" else False
+ persistent_connection = self._play_context.connection.split(".")[-1]
+ warnings = []
+
+ if persistent_connection == "network_cli":
+ provider = self._task.args.get("provider", {})
+ if any(provider.values()):
+ display.warning(
+ "provider is unnecessary when using network_cli and will be ignored"
+ )
+ del self._task.args["provider"]
+ elif self._play_context.connection == "local":
+ provider = load_provider(ios_provider_spec, self._task.args)
+ pc = copy.deepcopy(self._play_context)
+ pc.connection = "ansible.netcommon.network_cli"
+ pc.network_os = "cisco.ios.ios"
+ pc.remote_addr = provider["host"] or self._play_context.remote_addr
+ pc.port = int(provider["port"] or self._play_context.port or 22)
+ pc.remote_user = (
+ provider["username"] or self._play_context.connection_user
+ )
+ pc.password = provider["password"] or self._play_context.password
+ pc.private_key_file = (
+ provider["ssh_keyfile"] or self._play_context.private_key_file
+ )
+ pc.become = provider["authorize"] or False
+ if pc.become:
+ pc.become_method = "enable"
+ pc.become_pass = provider["auth_pass"]
+
+ connection = self._shared_loader_obj.connection_loader.get(
+ "ansible.netcommon.persistent",
+ pc,
+ sys.stdin,
+ task_uuid=self._task._uuid,
+ )
+
+ # TODO: Remove below code after ansible minimal is cut out
+ if connection is None:
+ pc.connection = "network_cli"
+ pc.network_os = "ios"
+ connection = self._shared_loader_obj.connection_loader.get(
+ "persistent", pc, sys.stdin, task_uuid=self._task._uuid
+ )
+
+ display.vvv(
+ "using connection plugin %s (was local)" % pc.connection,
+ pc.remote_addr,
+ )
+
+ command_timeout = (
+ int(provider["timeout"])
+ if provider["timeout"]
+ else connection.get_option("persistent_command_timeout")
+ )
+ connection.set_options(
+ direct={"persistent_command_timeout": command_timeout}
+ )
+
+ socket_path = connection.run()
+ display.vvvv("socket_path: %s" % socket_path, pc.remote_addr)
+ if not socket_path:
+ return {
+ "failed": True,
+ "msg": "unable to open shell. Please see: "
+ + "https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell",
+ }
+
+ task_vars["ansible_socket"] = socket_path
+ warnings.append(
+ [
+ "connection local support for this module is deprecated and will be removed in version 2.14, use connection %s"
+ % pc.connection
+ ]
+ )
+ else:
+ return {
+ "failed": True,
+ "msg": "Connection type %s is not valid for this module"
+ % self._play_context.connection,
+ }
+
+ result = super(ActionModule, self).run(task_vars=task_vars)
+ if warnings:
+ if "warnings" in result:
+ result["warnings"].extend(warnings)
+ else:
+ result["warnings"] = warnings
+ return result
diff --git a/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/cliconf/ios.py b/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/cliconf/ios.py
new file mode 100644
index 00000000..8a390034
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/cliconf/ios.py
@@ -0,0 +1,465 @@
+#
+# (c) 2017 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+author: Ansible Networking Team
+cliconf: ios
+short_description: Use ios cliconf to run command on Cisco IOS platform
+description:
+ - This ios plugin provides low level abstraction apis for
+ sending and receiving CLI commands from Cisco IOS network devices.
+version_added: "2.4"
+"""
+
+import re
+import time
+import json
+
+from ansible.errors import AnsibleConnectionFailure
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common._collections_compat import Mapping
+from ansible.module_utils.six import iteritems
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import (
+ NetworkConfig,
+ dumps,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ to_list,
+)
+from ansible.plugins.cliconf import CliconfBase, enable_mode
+
+
+class Cliconf(CliconfBase):
+ @enable_mode
+ def get_config(self, source="running", flags=None, format=None):
+ if source not in ("running", "startup"):
+ raise ValueError(
+ "fetching configuration from %s is not supported" % source
+ )
+
+ if format:
+ raise ValueError(
+ "'format' value %s is not supported for get_config" % format
+ )
+
+ if not flags:
+ flags = []
+ if source == "running":
+ cmd = "show running-config "
+ else:
+ cmd = "show startup-config "
+
+ cmd += " ".join(to_list(flags))
+ cmd = cmd.strip()
+
+ return self.send_command(cmd)
+
+ def get_diff(
+ self,
+ candidate=None,
+ running=None,
+ diff_match="line",
+ diff_ignore_lines=None,
+ path=None,
+ diff_replace="line",
+ ):
+ """
+ Generate diff between candidate and running configuration. If the
+ remote host supports onbox diff capabilities ie. supports_onbox_diff in that case
+ candidate and running configurations are not required to be passed as argument.
+ In case if onbox diff capability is not supported candidate argument is mandatory
+ and running argument is optional.
+ :param candidate: The configuration which is expected to be present on remote host.
+ :param running: The base configuration which is used to generate diff.
+ :param diff_match: Instructs how to match the candidate configuration with current device configuration
+ Valid values are 'line', 'strict', 'exact', 'none'.
+ 'line' - commands are matched line by line
+ 'strict' - command lines are matched with respect to position
+ 'exact' - command lines must be an equal match
+ 'none' - will not compare the candidate configuration with the running configuration
+ :param diff_ignore_lines: Use this argument to specify one or more lines that should be
+ ignored during the diff. This is used for lines in the configuration
+ that are automatically updated by the system. This argument takes
+ a list of regular expressions or exact line matches.
+ :param path: The ordered set of parents that uniquely identify the section or hierarchy
+ the commands should be checked against. If the parents argument
+ is omitted, the commands are checked against the set of top
+ level or global commands.
+ :param diff_replace: Instructs on the way to perform the configuration on the device.
+ If the replace argument is set to I(line) then the modified lines are
+ pushed to the device in configuration mode. If the replace argument is
+ set to I(block) then the entire command block is pushed to the device in
+ configuration mode if any line is not correct.
+ :return: Configuration diff in json format.
+ {
+ 'config_diff': '',
+ 'banner_diff': {}
+ }
+
+ """
+ diff = {}
+ device_operations = self.get_device_operations()
+ option_values = self.get_option_values()
+
+ if candidate is None and device_operations["supports_generate_diff"]:
+ raise ValueError(
+ "candidate configuration is required to generate diff"
+ )
+
+ if diff_match not in option_values["diff_match"]:
+ raise ValueError(
+ "'match' value %s in invalid, valid values are %s"
+ % (diff_match, ", ".join(option_values["diff_match"]))
+ )
+
+ if diff_replace not in option_values["diff_replace"]:
+ raise ValueError(
+ "'replace' value %s in invalid, valid values are %s"
+ % (diff_replace, ", ".join(option_values["diff_replace"]))
+ )
+
+ # prepare candidate configuration
+ candidate_obj = NetworkConfig(indent=1)
+ want_src, want_banners = self._extract_banners(candidate)
+ candidate_obj.load(want_src)
+
+ if running and diff_match != "none":
+ # running configuration
+ have_src, have_banners = self._extract_banners(running)
+ running_obj = NetworkConfig(
+ indent=1, contents=have_src, ignore_lines=diff_ignore_lines
+ )
+ configdiffobjs = candidate_obj.difference(
+ running_obj, path=path, match=diff_match, replace=diff_replace
+ )
+
+ else:
+ configdiffobjs = candidate_obj.items
+ have_banners = {}
+
+ diff["config_diff"] = (
+ dumps(configdiffobjs, "commands") if configdiffobjs else ""
+ )
+ banners = self._diff_banners(want_banners, have_banners)
+ diff["banner_diff"] = banners if banners else {}
+ return diff
+
+ @enable_mode
+ def edit_config(
+ self, candidate=None, commit=True, replace=None, comment=None
+ ):
+ resp = {}
+ operations = self.get_device_operations()
+ self.check_edit_config_capability(
+ operations, candidate, commit, replace, comment
+ )
+
+ results = []
+ requests = []
+ if commit:
+ self.send_command("configure terminal")
+ for line in to_list(candidate):
+ if not isinstance(line, Mapping):
+ line = {"command": line}
+
+ cmd = line["command"]
+ if cmd != "end" and cmd[0] != "!":
+ results.append(self.send_command(**line))
+ requests.append(cmd)
+
+ self.send_command("end")
+ else:
+ raise ValueError("check mode is not supported")
+
+ resp["request"] = requests
+ resp["response"] = results
+ return resp
+
+ def edit_macro(
+ self, candidate=None, commit=True, replace=None, comment=None
+ ):
+ """
+ ios_config:
+ lines: "{{ macro_lines }}"
+ parents: "macro name {{ macro_name }}"
+ after: '@'
+ match: line
+ replace: block
+ """
+ resp = {}
+ operations = self.get_device_operations()
+ self.check_edit_config_capability(
+ operations, candidate, commit, replace, comment
+ )
+
+ results = []
+ requests = []
+ if commit:
+ commands = ""
+ self.send_command("config terminal")
+ time.sleep(0.1)
+ # first item: macro command
+ commands += candidate.pop(0) + "\n"
+ multiline_delimiter = candidate.pop(-1)
+ for line in candidate:
+ commands += " " + line + "\n"
+ commands += multiline_delimiter + "\n"
+ obj = {"command": commands, "sendonly": True}
+ results.append(self.send_command(**obj))
+ requests.append(commands)
+
+ time.sleep(0.1)
+ self.send_command("end", sendonly=True)
+ time.sleep(0.1)
+ results.append(self.send_command("\n"))
+ requests.append("\n")
+
+ resp["request"] = requests
+ resp["response"] = results
+ return resp
+
+ def get(
+ self,
+ command=None,
+ prompt=None,
+ answer=None,
+ sendonly=False,
+ output=None,
+ newline=True,
+ check_all=False,
+ ):
+ if not command:
+ raise ValueError("must provide value of command to execute")
+ if output:
+ raise ValueError(
+ "'output' value %s is not supported for get" % output
+ )
+
+ return self.send_command(
+ command=command,
+ prompt=prompt,
+ answer=answer,
+ sendonly=sendonly,
+ newline=newline,
+ check_all=check_all,
+ )
+
+ def get_device_info(self):
+ device_info = {}
+
+ device_info["network_os"] = "ios"
+ reply = self.get(command="show version")
+ data = to_text(reply, errors="surrogate_or_strict").strip()
+
+ match = re.search(r"Version (\S+)", data)
+ if match:
+ device_info["network_os_version"] = match.group(1).strip(",")
+
+ model_search_strs = [
+ r"^[Cc]isco (.+) \(revision",
+ r"^[Cc]isco (\S+).+bytes of .*memory",
+ ]
+ for item in model_search_strs:
+ match = re.search(item, data, re.M)
+ if match:
+ version = match.group(1).split(" ")
+ device_info["network_os_model"] = version[0]
+ break
+
+ match = re.search(r"^(.+) uptime", data, re.M)
+ if match:
+ device_info["network_os_hostname"] = match.group(1)
+
+ match = re.search(r'image file is "(.+)"', data)
+ if match:
+ device_info["network_os_image"] = match.group(1)
+
+ return device_info
+
+ def get_device_operations(self):
+ return {
+ "supports_diff_replace": True,
+ "supports_commit": False,
+ "supports_rollback": False,
+ "supports_defaults": True,
+ "supports_onbox_diff": False,
+ "supports_commit_comment": False,
+ "supports_multiline_delimiter": True,
+ "supports_diff_match": True,
+ "supports_diff_ignore_lines": True,
+ "supports_generate_diff": True,
+ "supports_replace": False,
+ }
+
+ def get_option_values(self):
+ return {
+ "format": ["text"],
+ "diff_match": ["line", "strict", "exact", "none"],
+ "diff_replace": ["line", "block"],
+ "output": [],
+ }
+
+ def get_capabilities(self):
+ result = super(Cliconf, self).get_capabilities()
+ result["rpc"] += [
+ "edit_banner",
+ "get_diff",
+ "run_commands",
+ "get_defaults_flag",
+ ]
+ result["device_operations"] = self.get_device_operations()
+ result.update(self.get_option_values())
+ return json.dumps(result)
+
+ def edit_banner(
+ self, candidate=None, multiline_delimiter="@", commit=True
+ ):
+ """
+ Edit banner on remote device
+ :param banners: Banners to be loaded in json format
+ :param multiline_delimiter: Line delimiter for banner
+ :param commit: Boolean value that indicates if the device candidate
+ configuration should be pushed in the running configuration or discarded.
+ :param diff: Boolean flag to indicate if configuration that is applied on remote host should
+ generated and returned in response or not
+ :return: Returns response of executing the configuration command received
+ from remote host
+ """
+ resp = {}
+ banners_obj = json.loads(candidate)
+ results = []
+ requests = []
+ if commit:
+ for key, value in iteritems(banners_obj):
+ key += " %s" % multiline_delimiter
+ self.send_command("config terminal", sendonly=True)
+ for cmd in [key, value, multiline_delimiter]:
+ obj = {"command": cmd, "sendonly": True}
+ results.append(self.send_command(**obj))
+ requests.append(cmd)
+
+ self.send_command("end", sendonly=True)
+ time.sleep(0.1)
+ results.append(self.send_command("\n"))
+ requests.append("\n")
+
+ resp["request"] = requests
+ resp["response"] = results
+
+ return resp
+
+ def run_commands(self, commands=None, check_rc=True):
+ if commands is None:
+ raise ValueError("'commands' value is required")
+
+ responses = list()
+ for cmd in to_list(commands):
+ if not isinstance(cmd, Mapping):
+ cmd = {"command": cmd}
+
+ output = cmd.pop("output", None)
+ if output:
+ raise ValueError(
+ "'output' value %s is not supported for run_commands"
+ % output
+ )
+
+ try:
+ out = self.send_command(**cmd)
+ except AnsibleConnectionFailure as e:
+ if check_rc:
+ raise
+ out = getattr(e, "err", to_text(e))
+
+ responses.append(out)
+
+ return responses
+
+ def get_defaults_flag(self):
+ """
+ The method identifies the filter that should be used to fetch running-configuration
+ with defaults.
+ :return: valid default filter
+ """
+ out = self.get("show running-config ?")
+ out = to_text(out, errors="surrogate_then_replace")
+
+ commands = set()
+ for line in out.splitlines():
+ if line.strip():
+ commands.add(line.strip().split()[0])
+
+ if "all" in commands:
+ return "all"
+ else:
+ return "full"
+
+ def set_cli_prompt_context(self):
+ """
+ Make sure we are in the operational cli mode
+ :return: None
+ """
+ if self._connection.connected:
+ out = self._connection.get_prompt()
+
+ if out is None:
+ raise AnsibleConnectionFailure(
+ message=u"cli prompt is not identified from the last received"
+ u" response window: %s"
+ % self._connection._last_recv_window
+ )
+
+ if re.search(
+ r"config.*\)#",
+ to_text(out, errors="surrogate_then_replace").strip(),
+ ):
+ self._connection.queue_message(
+ "vvvv", "wrong context, sending end to device"
+ )
+ self._connection.send_command("end")
+
+ def _extract_banners(self, config):
+ banners = {}
+ banner_cmds = re.findall(r"^banner (\w+)", config, re.M)
+ for cmd in banner_cmds:
+ regex = r"banner %s \^C(.+?)(?=\^C)" % cmd
+ match = re.search(regex, config, re.S)
+ if match:
+ key = "banner %s" % cmd
+ banners[key] = match.group(1).strip()
+
+ for cmd in banner_cmds:
+ regex = r"banner %s \^C(.+?)(?=\^C)" % cmd
+ match = re.search(regex, config, re.S)
+ if match:
+ config = config.replace(str(match.group(1)), "")
+
+ config = re.sub(r"banner \w+ \^C\^C", "!! banner removed", config)
+ return config, banners
+
+ def _diff_banners(self, want, have):
+ candidate = {}
+ for key, value in iteritems(want):
+ if value != have.get(key):
+ candidate[key] = value
+ return candidate
diff --git a/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/doc_fragments/ios.py b/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/doc_fragments/ios.py
new file mode 100644
index 00000000..ff22d27c
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/doc_fragments/ios.py
@@ -0,0 +1,81 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Peter Sprygada <psprygada@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+class ModuleDocFragment(object):
+
+ # Standard files documentation fragment
+ DOCUMENTATION = r"""options:
+ provider:
+ description:
+ - B(Deprecated)
+ - 'Starting with Ansible 2.5 we recommend using C(connection: network_cli).'
+ - For more information please see the L(IOS Platform Options guide, ../network/user_guide/platform_ios.html).
+ - HORIZONTALLINE
+ - A dict object containing connection details.
+ type: dict
+ suboptions:
+ host:
+ description:
+ - Specifies the DNS host name or address for connecting to the remote device
+ over the specified transport. The value of host is used as the destination
+ address for the transport.
+ type: str
+ required: true
+ port:
+ description:
+ - Specifies the port to use when building the connection to the remote device.
+ type: int
+ default: 22
+ username:
+ description:
+ - Configures the username to use to authenticate the connection to the remote
+ device. This value is used to authenticate the SSH session. If the value
+ is not specified in the task, the value of environment variable C(ANSIBLE_NET_USERNAME)
+ will be used instead.
+ type: str
+ password:
+ description:
+ - Specifies the password to use to authenticate the connection to the remote
+ device. This value is used to authenticate the SSH session. If the value
+ is not specified in the task, the value of environment variable C(ANSIBLE_NET_PASSWORD)
+ will be used instead.
+ type: str
+ timeout:
+ description:
+ - Specifies the timeout in seconds for communicating with the network device
+ for either connecting or sending commands. If the timeout is exceeded before
+ the operation is completed, the module will error.
+ type: int
+ default: 10
+ ssh_keyfile:
+ description:
+ - Specifies the SSH key to use to authenticate the connection to the remote
+ device. This value is the path to the key used to authenticate the SSH
+ session. If the value is not specified in the task, the value of environment
+ variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
+ type: path
+ authorize:
+ description:
+ - Instructs the module to enter privileged mode on the remote device before
+ sending any commands. If not specified, the device will attempt to execute
+ all commands in non-privileged mode. If the value is not specified in the
+ task, the value of environment variable C(ANSIBLE_NET_AUTHORIZE) will be
+ used instead.
+ type: bool
+ default: false
+ auth_pass:
+ description:
+ - Specifies the password to use if required to enter privileged mode on the
+ remote device. If I(authorize) is false, then this argument does nothing.
+ If the value is not specified in the task, the value of environment variable
+ C(ANSIBLE_NET_AUTH_PASS) will be used instead.
+ type: str
+notes:
+- For more information on using Ansible to manage network devices see the :ref:`Ansible
+ Network Guide <network_guide>`
+- For more information on using Ansible to manage Cisco devices see the `Cisco integration
+ page <https://www.ansible.com/integrations/networks/cisco>`_.
+"""
diff --git a/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/module_utils/network/ios/ios.py b/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/module_utils/network/ios/ios.py
new file mode 100644
index 00000000..6818a0ce
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/module_utils/network/ios/ios.py
@@ -0,0 +1,197 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# (c) 2016 Red Hat Inc.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+import json
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import env_fallback
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ to_list,
+)
+from ansible.module_utils.connection import Connection, ConnectionError
+
+_DEVICE_CONFIGS = {}
+
+ios_provider_spec = {
+ "host": dict(),
+ "port": dict(type="int"),
+ "username": dict(fallback=(env_fallback, ["ANSIBLE_NET_USERNAME"])),
+ "password": dict(
+ fallback=(env_fallback, ["ANSIBLE_NET_PASSWORD"]), no_log=True
+ ),
+ "ssh_keyfile": dict(
+ fallback=(env_fallback, ["ANSIBLE_NET_SSH_KEYFILE"]), type="path"
+ ),
+ "authorize": dict(
+ fallback=(env_fallback, ["ANSIBLE_NET_AUTHORIZE"]), type="bool"
+ ),
+ "auth_pass": dict(
+ fallback=(env_fallback, ["ANSIBLE_NET_AUTH_PASS"]), no_log=True
+ ),
+ "timeout": dict(type="int"),
+}
+ios_argument_spec = {
+ "provider": dict(
+ type="dict", options=ios_provider_spec, removed_in_version=2.14
+ )
+}
+
+
+def get_provider_argspec():
+ return ios_provider_spec
+
+
+def get_connection(module):
+ if hasattr(module, "_ios_connection"):
+ return module._ios_connection
+
+ capabilities = get_capabilities(module)
+ network_api = capabilities.get("network_api")
+ if network_api == "cliconf":
+ module._ios_connection = Connection(module._socket_path)
+ else:
+ module.fail_json(msg="Invalid connection type %s" % network_api)
+
+ return module._ios_connection
+
+
+def get_capabilities(module):
+ if hasattr(module, "_ios_capabilities"):
+ return module._ios_capabilities
+ try:
+ capabilities = Connection(module._socket_path).get_capabilities()
+ except ConnectionError as exc:
+ module.fail_json(msg=to_text(exc, errors="surrogate_then_replace"))
+ module._ios_capabilities = json.loads(capabilities)
+ return module._ios_capabilities
+
+
+def get_defaults_flag(module):
+ connection = get_connection(module)
+ try:
+ out = connection.get_defaults_flag()
+ except ConnectionError as exc:
+ module.fail_json(msg=to_text(exc, errors="surrogate_then_replace"))
+ return to_text(out, errors="surrogate_then_replace").strip()
+
+
+def get_config(module, flags=None):
+ flags = to_list(flags)
+
+ section_filter = False
+ if flags and "section" in flags[-1]:
+ section_filter = True
+
+ flag_str = " ".join(flags)
+
+ try:
+ return _DEVICE_CONFIGS[flag_str]
+ except KeyError:
+ connection = get_connection(module)
+ try:
+ out = connection.get_config(flags=flags)
+ except ConnectionError as exc:
+ if section_filter:
+ # Some ios devices don't understand `| section foo`
+ out = get_config(module, flags=flags[:-1])
+ else:
+ module.fail_json(
+ msg=to_text(exc, errors="surrogate_then_replace")
+ )
+ cfg = to_text(out, errors="surrogate_then_replace").strip()
+ _DEVICE_CONFIGS[flag_str] = cfg
+ return cfg
+
+
+def run_commands(module, commands, check_rc=True):
+ connection = get_connection(module)
+ try:
+ return connection.run_commands(commands=commands, check_rc=check_rc)
+ except ConnectionError as exc:
+ module.fail_json(msg=to_text(exc))
+
+
+def load_config(module, commands):
+ connection = get_connection(module)
+
+ try:
+ resp = connection.edit_config(commands)
+ return resp.get("response")
+ except ConnectionError as exc:
+ module.fail_json(msg=to_text(exc))
+
+
+def normalize_interface(name):
+ """Return the normalized interface name
+ """
+ if not name:
+ return
+
+ def _get_number(name):
+ digits = ""
+ for char in name:
+ if char.isdigit() or char in "/.":
+ digits += char
+ return digits
+
+ if name.lower().startswith("gi"):
+ if_type = "GigabitEthernet"
+ elif name.lower().startswith("te"):
+ if_type = "TenGigabitEthernet"
+ elif name.lower().startswith("fa"):
+ if_type = "FastEthernet"
+ elif name.lower().startswith("fo"):
+ if_type = "FortyGigabitEthernet"
+ elif name.lower().startswith("et"):
+ if_type = "Ethernet"
+ elif name.lower().startswith("vl"):
+ if_type = "Vlan"
+ elif name.lower().startswith("lo"):
+ if_type = "loopback"
+ elif name.lower().startswith("po"):
+ if_type = "port-channel"
+ elif name.lower().startswith("nv"):
+ if_type = "nve"
+ elif name.lower().startswith("twe"):
+ if_type = "TwentyFiveGigE"
+ elif name.lower().startswith("hu"):
+ if_type = "HundredGigE"
+ else:
+ if_type = None
+
+ number_list = name.split(" ")
+ if len(number_list) == 2:
+ if_number = number_list[-1].strip()
+ else:
+ if_number = _get_number(name)
+
+ if if_type:
+ proper_interface = if_type + if_number
+ else:
+ proper_interface = name
+
+ return proper_interface
diff --git a/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_command.py b/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_command.py
new file mode 100644
index 00000000..ef383fcc
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_command.py
@@ -0,0 +1,229 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "network",
+}
+
+
+DOCUMENTATION = """module: ios_command
+author: Peter Sprygada (@privateip)
+short_description: Run commands on remote devices running Cisco IOS
+description:
+- Sends arbitrary commands to an ios node and returns the results read from the device.
+ This module includes an argument that will cause the module to wait for a specific
+ condition before returning or timing out if the condition is not met.
+- This module does not support running commands in configuration mode. Please use
+ M(ios_config) to configure IOS devices.
+extends_documentation_fragment:
+- cisco.ios.ios
+notes:
+- Tested against IOS 15.6
+options:
+ commands:
+ description:
+ - List of commands to send to the remote ios device over the configured provider.
+ The resulting output from the command is returned. If the I(wait_for) argument
+ is provided, the module is not returned until the condition is satisfied or
+ the number of retries has expired. If a command sent to the device requires
+ answering a prompt, it is possible to pass a dict containing I(command), I(answer)
+ and I(prompt). Common answers are 'y' or "\r" (carriage return, must be double
+ quotes). See examples.
+ required: true
+ wait_for:
+ description:
+ - List of conditions to evaluate against the output of the command. The task will
+ wait for each condition to be true before moving forward. If the conditional
+ is not true within the configured number of retries, the task fails. See examples.
+ aliases:
+ - waitfor
+ match:
+ description:
+ - The I(match) argument is used in conjunction with the I(wait_for) argument to
+ specify the match policy. Valid values are C(all) or C(any). If the value
+ is set to C(all) then all conditionals in the wait_for must be satisfied. If
+ the value is set to C(any) then only one of the values must be satisfied.
+ default: all
+ choices:
+ - any
+ - all
+ retries:
+ description:
+ - Specifies the number of retries a command should by tried before it is considered
+ failed. The command is run on the target device every retry and evaluated against
+ the I(wait_for) conditions.
+ default: 10
+ interval:
+ description:
+ - Configures the interval in seconds to wait between retries of the command. If
+ the command does not pass the specified conditions, the interval indicates how
+ long to wait before trying the command again.
+ default: 1
+"""
+
+EXAMPLES = r"""
+tasks:
+ - name: run show version on remote devices
+ ios_command:
+ commands: show version
+
+ - name: run show version and check to see if output contains IOS
+ ios_command:
+ commands: show version
+ wait_for: result[0] contains IOS
+
+ - name: run multiple commands on remote nodes
+ ios_command:
+ commands:
+ - show version
+ - show interfaces
+
+ - name: run multiple commands and evaluate the output
+ ios_command:
+ commands:
+ - show version
+ - show interfaces
+ wait_for:
+ - result[0] contains IOS
+ - result[1] contains Loopback0
+
+ - name: run commands that require answering a prompt
+ ios_command:
+ commands:
+ - command: 'clear counters GigabitEthernet0/1'
+ prompt: 'Clear "show interface" counters on this interface \[confirm\]'
+ answer: 'y'
+ - command: 'clear counters GigabitEthernet0/2'
+ prompt: '[confirm]'
+ answer: "\r"
+"""
+
+RETURN = """
+stdout:
+ description: The set of responses from the commands
+ returned: always apart from low level errors (such as action plugin)
+ type: list
+ sample: ['...', '...']
+stdout_lines:
+ description: The value of stdout split into a list
+ returned: always apart from low level errors (such as action plugin)
+ type: list
+ sample: [['...', '...'], ['...'], ['...']]
+failed_conditions:
+ description: The list of conditionals that have failed
+ returned: failed
+ type: list
+ sample: ['...', '...']
+"""
+import time
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import (
+ Conditional,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ transform_commands,
+ to_lines,
+)
+from ansible_collections.cisco.ios.plugins.module_utils.network.ios.ios import (
+ run_commands,
+)
+from ansible_collections.cisco.ios.plugins.module_utils.network.ios.ios import (
+ ios_argument_spec,
+)
+
+
+def parse_commands(module, warnings):
+ commands = transform_commands(module)
+
+ if module.check_mode:
+ for item in list(commands):
+ if not item["command"].startswith("show"):
+ warnings.append(
+ "Only show commands are supported when using check mode, not "
+ "executing %s" % item["command"]
+ )
+ commands.remove(item)
+
+ return commands
+
+
+def main():
+ """main entry point for module execution
+ """
+ argument_spec = dict(
+ commands=dict(type="list", required=True),
+ wait_for=dict(type="list", aliases=["waitfor"]),
+ match=dict(default="all", choices=["all", "any"]),
+ retries=dict(default=10, type="int"),
+ interval=dict(default=1, type="int"),
+ )
+
+ argument_spec.update(ios_argument_spec)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec, supports_check_mode=True
+ )
+
+ warnings = list()
+ result = {"changed": False, "warnings": warnings}
+ commands = parse_commands(module, warnings)
+ wait_for = module.params["wait_for"] or list()
+
+ try:
+ conditionals = [Conditional(c) for c in wait_for]
+ except AttributeError as exc:
+ module.fail_json(msg=to_text(exc))
+
+ retries = module.params["retries"]
+ interval = module.params["interval"]
+ match = module.params["match"]
+
+ while retries > 0:
+ responses = run_commands(module, commands)
+
+ for item in list(conditionals):
+ if item(responses):
+ if match == "any":
+ conditionals = list()
+ break
+ conditionals.remove(item)
+
+ if not conditionals:
+ break
+
+ time.sleep(interval)
+ retries -= 1
+
+ if conditionals:
+ failed_conditions = [item.raw for item in conditionals]
+ msg = "One or more conditional statements have not been satisfied"
+ module.fail_json(msg=msg, failed_conditions=failed_conditions)
+
+ result.update(
+ {"stdout": responses, "stdout_lines": list(to_lines(responses))}
+ )
+
+ module.exit_json(**result)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_config.py b/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_config.py
new file mode 100644
index 00000000..beec5b8d
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_config.py
@@ -0,0 +1,596 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "network",
+}
+
+
+DOCUMENTATION = """module: ios_config
+author: Peter Sprygada (@privateip)
+short_description: Manage Cisco IOS configuration sections
+description:
+- Cisco IOS configurations use a simple block indent file syntax for segmenting configuration
+ into sections. This module provides an implementation for working with IOS configuration
+ sections in a deterministic way.
+extends_documentation_fragment:
+- cisco.ios.ios
+notes:
+- Tested against IOS 15.6
+- Abbreviated commands are NOT idempotent, see L(Network FAQ,../network/user_guide/faq.html#why-do-the-config-modules-always-return-changed-true-with-abbreviated-commands).
+options:
+ lines:
+ description:
+ - The ordered set of commands that should be configured in the section. The commands
+ must be the exact same commands as found in the device running-config. Be sure
+ to note the configuration command syntax as some commands are automatically
+ modified by the device config parser.
+ aliases:
+ - commands
+ parents:
+ description:
+ - The ordered set of parents that uniquely identify the section or hierarchy the
+ commands should be checked against. If the parents argument is omitted, the
+ commands are checked against the set of top level or global commands.
+ src:
+ description:
+ - Specifies the source path to the file that contains the configuration or configuration
+ template to load. The path to the source file can either be the full path on
+ the Ansible control host or a relative path from the playbook or role root directory. This
+ argument is mutually exclusive with I(lines), I(parents).
+ before:
+ description:
+ - The ordered set of commands to push on to the command stack if a change needs
+ to be made. This allows the playbook designer the opportunity to perform configuration
+ commands prior to pushing any changes without affecting how the set of commands
+ are matched against the system.
+ after:
+ description:
+ - The ordered set of commands to append to the end of the command stack if a change
+ needs to be made. Just like with I(before) this allows the playbook designer
+ to append a set of commands to be executed after the command set.
+ match:
+ description:
+ - Instructs the module on the way to perform the matching of the set of commands
+ against the current device config. If match is set to I(line), commands are
+ matched line by line. If match is set to I(strict), command lines are matched
+ with respect to position. If match is set to I(exact), command lines must be
+ an equal match. Finally, if match is set to I(none), the module will not attempt
+ to compare the source configuration with the running configuration on the remote
+ device.
+ choices:
+ - line
+ - strict
+ - exact
+ - none
+ default: line
+ replace:
+ description:
+ - Instructs the module on the way to perform the configuration on the device.
+ If the replace argument is set to I(line) then the modified lines are pushed
+ to the device in configuration mode. If the replace argument is set to I(block)
+ then the entire command block is pushed to the device in configuration mode
+ if any line is not correct.
+ default: line
+ choices:
+ - line
+ - block
+ multiline_delimiter:
+ description:
+ - This argument is used when pushing a multiline configuration element to the
+ IOS device. It specifies the character to use as the delimiting character. This
+ only applies to the configuration action.
+ default: '@'
+ backup:
+ description:
+ - This argument will cause the module to create a full backup of the current C(running-config)
+ from the remote device before any changes are made. If the C(backup_options)
+ value is not given, the backup file is written to the C(backup) folder in the
+ playbook root directory or role root directory, if playbook is part of an ansible
+ role. If the directory does not exist, it is created.
+ type: bool
+ default: 'no'
+ running_config:
+ description:
+ - The module, by default, will connect to the remote device and retrieve the current
+ running-config to use as a base for comparing against the contents of source.
+ There are times when it is not desirable to have the task get the current running-config
+ for every task in a playbook. The I(running_config) argument allows the implementer
+ to pass in the configuration to use as the base config for comparison.
+ aliases:
+ - config
+ defaults:
+ description:
+ - This argument specifies whether or not to collect all defaults when getting
+ the remote device running config. When enabled, the module will get the current
+ config by issuing the command C(show running-config all).
+ type: bool
+ default: 'no'
+ save_when:
+ description:
+ - When changes are made to the device running-configuration, the changes are not
+ copied to non-volatile storage by default. Using this argument will change
+ that before. If the argument is set to I(always), then the running-config will
+ always be copied to the startup-config and the I(modified) flag will always
+ be set to True. If the argument is set to I(modified), then the running-config
+ will only be copied to the startup-config if it has changed since the last save
+ to startup-config. If the argument is set to I(never), the running-config will
+ never be copied to the startup-config. If the argument is set to I(changed),
+ then the running-config will only be copied to the startup-config if the task
+ has made a change. I(changed) was added in Ansible 2.5.
+ default: never
+ choices:
+ - always
+ - never
+ - modified
+ - changed
+ diff_against:
+ description:
+ - When using the C(ansible-playbook --diff) command line argument the module can
+ generate diffs against different sources.
+ - When this option is configure as I(startup), the module will return the diff
+ of the running-config against the startup-config.
+ - When this option is configured as I(intended), the module will return the diff
+ of the running-config against the configuration provided in the C(intended_config)
+ argument.
+ - When this option is configured as I(running), the module will return the before
+ and after diff of the running-config with respect to any changes made to the
+ device configuration.
+ choices:
+ - running
+ - startup
+ - intended
+ diff_ignore_lines:
+ description:
+ - Use this argument to specify one or more lines that should be ignored during
+ the diff. This is used for lines in the configuration that are automatically
+ updated by the system. This argument takes a list of regular expressions or
+ exact line matches.
+ intended_config:
+ description:
+ - The C(intended_config) provides the master configuration that the node should
+ conform to and is used to check the final running-config against. This argument
+ will not modify any settings on the remote device and is strictly used to check
+ the compliance of the current device's configuration against. When specifying
+ this argument, the task should also modify the C(diff_against) value and set
+ it to I(intended).
+ backup_options:
+ description:
+ - This is a dict object containing configurable options related to backup file
+ path. The value of this option is read only when C(backup) is set to I(yes),
+ if C(backup) is set to I(no) this option will be silently ignored.
+ suboptions:
+ filename:
+ description:
+ - The filename to be used to store the backup configuration. If the filename
+ is not given it will be generated based on the hostname, current time and
+ date in format defined by <hostname>_config.<current-date>@<current-time>
+ dir_path:
+ description:
+ - This option provides the path ending with directory name in which the backup
+ configuration file will be stored. If the directory does not exist it will
+ be first created and the filename is either the value of C(filename) or
+ default filename as described in C(filename) options description. If the
+ path value is not given in that case a I(backup) directory will be created
+ in the current working directory and backup configuration will be copied
+ in C(filename) within I(backup) directory.
+ type: path
+ type: dict
+"""
+
+EXAMPLES = """
+- name: configure top level configuration
+ ios_config:
+ lines: hostname {{ inventory_hostname }}
+
+- name: configure interface settings
+ ios_config:
+ lines:
+ - description test interface
+ - ip address 172.31.1.1 255.255.255.0
+ parents: interface Ethernet1
+
+- name: configure ip helpers on multiple interfaces
+ ios_config:
+ lines:
+ - ip helper-address 172.26.1.10
+ - ip helper-address 172.26.3.8
+ parents: "{{ item }}"
+ with_items:
+ - interface Ethernet1
+ - interface Ethernet2
+ - interface GigabitEthernet1
+
+- name: configure policer in Scavenger class
+ ios_config:
+ lines:
+ - conform-action transmit
+ - exceed-action drop
+ parents:
+ - policy-map Foo
+ - class Scavenger
+ - police cir 64000
+
+- name: load new acl into device
+ ios_config:
+ lines:
+ - 10 permit ip host 192.0.2.1 any log
+ - 20 permit ip host 192.0.2.2 any log
+ - 30 permit ip host 192.0.2.3 any log
+ - 40 permit ip host 192.0.2.4 any log
+ - 50 permit ip host 192.0.2.5 any log
+ parents: ip access-list extended test
+ before: no ip access-list extended test
+ match: exact
+
+- name: check the running-config against master config
+ ios_config:
+ diff_against: intended
+ intended_config: "{{ lookup('file', 'master.cfg') }}"
+
+- name: check the startup-config against the running-config
+ ios_config:
+ diff_against: startup
+ diff_ignore_lines:
+ - ntp clock .*
+
+- name: save running to startup when modified
+ ios_config:
+ save_when: modified
+
+- name: for idempotency, use full-form commands
+ ios_config:
+ lines:
+ # - shut
+ - shutdown
+ # parents: int gig1/0/11
+ parents: interface GigabitEthernet1/0/11
+
+# Set boot image based on comparison to a group_var (version) and the version
+# that is returned from the `ios_facts` module
+- name: SETTING BOOT IMAGE
+ ios_config:
+ lines:
+ - no boot system
+ - boot system flash bootflash:{{new_image}}
+ host: "{{ inventory_hostname }}"
+ when: ansible_net_version != version
+
+- name: render a Jinja2 template onto an IOS device
+ ios_config:
+ backup: yes
+ src: ios_template.j2
+
+- name: configurable backup path
+ ios_config:
+ src: ios_template.j2
+ backup: yes
+ backup_options:
+ filename: backup.cfg
+ dir_path: /home/user
+"""
+
+RETURN = """
+updates:
+ description: The set of commands that will be pushed to the remote device
+ returned: always
+ type: list
+ sample: ['hostname foo', 'router ospf 1', 'router-id 192.0.2.1']
+commands:
+ description: The set of commands that will be pushed to the remote device
+ returned: always
+ type: list
+ sample: ['hostname foo', 'router ospf 1', 'router-id 192.0.2.1']
+backup_path:
+ description: The full path to the backup file
+ returned: when backup is yes
+ type: str
+ sample: /playbooks/ansible/backup/ios_config.2016-07-16@22:28:34
+filename:
+ description: The name of the backup file
+ returned: when backup is yes and filename is not specified in backup options
+ type: str
+ sample: ios_config.2016-07-16@22:28:34
+shortname:
+ description: The full path to the backup file excluding the timestamp
+ returned: when backup is yes and filename is not specified in backup options
+ type: str
+ sample: /playbooks/ansible/backup/ios_config
+date:
+ description: The date extracted from the backup file name
+ returned: when backup is yes
+ type: str
+ sample: "2016-07-16"
+time:
+ description: The time extracted from the backup file name
+ returned: when backup is yes
+ type: str
+ sample: "22:28:34"
+"""
+import json
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.connection import ConnectionError
+from ansible_collections.cisco.ios.plugins.module_utils.network.ios.ios import (
+ run_commands,
+ get_config,
+)
+from ansible_collections.cisco.ios.plugins.module_utils.network.ios.ios import (
+ get_defaults_flag,
+ get_connection,
+)
+from ansible_collections.cisco.ios.plugins.module_utils.network.ios.ios import (
+ ios_argument_spec,
+)
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import (
+ NetworkConfig,
+ dumps,
+)
+
+
+def check_args(module, warnings):
+ if module.params["multiline_delimiter"]:
+ if len(module.params["multiline_delimiter"]) != 1:
+ module.fail_json(
+ msg="multiline_delimiter value can only be a "
+ "single character"
+ )
+
+
+def edit_config_or_macro(connection, commands):
+ # only catch the macro configuration command,
+ # not negated 'no' variation.
+ if commands[0].startswith("macro name"):
+ connection.edit_macro(candidate=commands)
+ else:
+ connection.edit_config(candidate=commands)
+
+
+def get_candidate_config(module):
+ candidate = ""
+ if module.params["src"]:
+ candidate = module.params["src"]
+
+ elif module.params["lines"]:
+ candidate_obj = NetworkConfig(indent=1)
+ parents = module.params["parents"] or list()
+ candidate_obj.add(module.params["lines"], parents=parents)
+ candidate = dumps(candidate_obj, "raw")
+
+ return candidate
+
+
+def get_running_config(module, current_config=None, flags=None):
+ running = module.params["running_config"]
+ if not running:
+ if not module.params["defaults"] and current_config:
+ running = current_config
+ else:
+ running = get_config(module, flags=flags)
+
+ return running
+
+
+def save_config(module, result):
+ result["changed"] = True
+ if not module.check_mode:
+ run_commands(module, "copy running-config startup-config\r")
+ else:
+ module.warn(
+ "Skipping command `copy running-config startup-config` "
+ "due to check_mode. Configuration not copied to "
+ "non-volatile storage"
+ )
+
+
+def main():
+ """ main entry point for module execution
+ """
+ backup_spec = dict(filename=dict(), dir_path=dict(type="path"))
+ argument_spec = dict(
+ src=dict(type="path"),
+ lines=dict(aliases=["commands"], type="list"),
+ parents=dict(type="list"),
+ before=dict(type="list"),
+ after=dict(type="list"),
+ match=dict(
+ default="line", choices=["line", "strict", "exact", "none"]
+ ),
+ replace=dict(default="line", choices=["line", "block"]),
+ multiline_delimiter=dict(default="@"),
+ running_config=dict(aliases=["config"]),
+ intended_config=dict(),
+ defaults=dict(type="bool", default=False),
+ backup=dict(type="bool", default=False),
+ backup_options=dict(type="dict", options=backup_spec),
+ save_when=dict(
+ choices=["always", "never", "modified", "changed"], default="never"
+ ),
+ diff_against=dict(choices=["startup", "intended", "running"]),
+ diff_ignore_lines=dict(type="list"),
+ )
+
+ argument_spec.update(ios_argument_spec)
+
+ mutually_exclusive = [("lines", "src"), ("parents", "src")]
+
+ required_if = [
+ ("match", "strict", ["lines"]),
+ ("match", "exact", ["lines"]),
+ ("replace", "block", ["lines"]),
+ ("diff_against", "intended", ["intended_config"]),
+ ]
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ required_if=required_if,
+ supports_check_mode=True,
+ )
+
+ result = {"changed": False}
+
+ warnings = list()
+ check_args(module, warnings)
+ result["warnings"] = warnings
+
+ diff_ignore_lines = module.params["diff_ignore_lines"]
+ config = None
+ contents = None
+ flags = get_defaults_flag(module) if module.params["defaults"] else []
+ connection = get_connection(module)
+
+ if module.params["backup"] or (
+ module._diff and module.params["diff_against"] == "running"
+ ):
+ contents = get_config(module, flags=flags)
+ config = NetworkConfig(indent=1, contents=contents)
+ if module.params["backup"]:
+ result["__backup__"] = contents
+
+ if any((module.params["lines"], module.params["src"])):
+ match = module.params["match"]
+ replace = module.params["replace"]
+ path = module.params["parents"]
+
+ candidate = get_candidate_config(module)
+ running = get_running_config(module, contents, flags=flags)
+ try:
+ response = connection.get_diff(
+ candidate=candidate,
+ running=running,
+ diff_match=match,
+ diff_ignore_lines=diff_ignore_lines,
+ path=path,
+ diff_replace=replace,
+ )
+ except ConnectionError as exc:
+ module.fail_json(msg=to_text(exc, errors="surrogate_then_replace"))
+
+ config_diff = response["config_diff"]
+ banner_diff = response["banner_diff"]
+
+ if config_diff or banner_diff:
+ commands = config_diff.split("\n")
+
+ if module.params["before"]:
+ commands[:0] = module.params["before"]
+
+ if module.params["after"]:
+ commands.extend(module.params["after"])
+
+ result["commands"] = commands
+ result["updates"] = commands
+ result["banners"] = banner_diff
+
+ # send the configuration commands to the device and merge
+ # them with the current running config
+ if not module.check_mode:
+ if commands:
+ edit_config_or_macro(connection, commands)
+ if banner_diff:
+ connection.edit_banner(
+ candidate=json.dumps(banner_diff),
+ multiline_delimiter=module.params[
+ "multiline_delimiter"
+ ],
+ )
+
+ result["changed"] = True
+
+ running_config = module.params["running_config"]
+ startup_config = None
+
+ if module.params["save_when"] == "always":
+ save_config(module, result)
+ elif module.params["save_when"] == "modified":
+ output = run_commands(
+ module, ["show running-config", "show startup-config"]
+ )
+
+ running_config = NetworkConfig(
+ indent=1, contents=output[0], ignore_lines=diff_ignore_lines
+ )
+ startup_config = NetworkConfig(
+ indent=1, contents=output[1], ignore_lines=diff_ignore_lines
+ )
+
+ if running_config.sha1 != startup_config.sha1:
+ save_config(module, result)
+ elif module.params["save_when"] == "changed" and result["changed"]:
+ save_config(module, result)
+
+ if module._diff:
+ if not running_config:
+ output = run_commands(module, "show running-config")
+ contents = output[0]
+ else:
+ contents = running_config
+
+ # recreate the object in order to process diff_ignore_lines
+ running_config = NetworkConfig(
+ indent=1, contents=contents, ignore_lines=diff_ignore_lines
+ )
+
+ if module.params["diff_against"] == "running":
+ if module.check_mode:
+ module.warn(
+ "unable to perform diff against running-config due to check mode"
+ )
+ contents = None
+ else:
+ contents = config.config_text
+
+ elif module.params["diff_against"] == "startup":
+ if not startup_config:
+ output = run_commands(module, "show startup-config")
+ contents = output[0]
+ else:
+ contents = startup_config.config_text
+
+ elif module.params["diff_against"] == "intended":
+ contents = module.params["intended_config"]
+
+ if contents is not None:
+ base_config = NetworkConfig(
+ indent=1, contents=contents, ignore_lines=diff_ignore_lines
+ )
+
+ if running_config.sha1 != base_config.sha1:
+ if module.params["diff_against"] == "intended":
+ before = running_config
+ after = base_config
+ elif module.params["diff_against"] in ("startup", "running"):
+ before = base_config
+ after = running_config
+
+ result.update(
+ {
+ "changed": True,
+ "diff": {"before": str(before), "after": str(after)},
+ }
+ )
+
+ module.exit_json(**result)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/terminal/ios.py b/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/terminal/ios.py
new file mode 100644
index 00000000..29f31b0e
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/terminal/ios.py
@@ -0,0 +1,115 @@
+#
+# (c) 2016 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import json
+import re
+
+from ansible.errors import AnsibleConnectionFailure
+from ansible.module_utils._text import to_text, to_bytes
+from ansible.plugins.terminal import TerminalBase
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class TerminalModule(TerminalBase):
+
+ terminal_stdout_re = [
+ re.compile(br"[\r\n]?[\w\+\-\.:\/\[\]]+(?:\([^\)]+\)){0,3}(?:[>#]) ?$")
+ ]
+
+ terminal_stderr_re = [
+ re.compile(br"% ?Error"),
+ # re.compile(br"^% \w+", re.M),
+ re.compile(br"% ?Bad secret"),
+ re.compile(br"[\r\n%] Bad passwords"),
+ re.compile(br"invalid input", re.I),
+ re.compile(br"(?:incomplete|ambiguous) command", re.I),
+ re.compile(br"connection timed out", re.I),
+ re.compile(br"[^\r\n]+ not found"),
+ re.compile(br"'[^']' +returned error code: ?\d+"),
+ re.compile(br"Bad mask", re.I),
+ re.compile(br"% ?(\S+) ?overlaps with ?(\S+)", re.I),
+ re.compile(br"[%\S] ?Error: ?[\s]+", re.I),
+ re.compile(br"[%\S] ?Informational: ?[\s]+", re.I),
+ re.compile(br"Command authorization failed"),
+ ]
+
+ def on_open_shell(self):
+ try:
+ self._exec_cli_command(b"terminal length 0")
+ except AnsibleConnectionFailure:
+ raise AnsibleConnectionFailure("unable to set terminal parameters")
+
+ try:
+ self._exec_cli_command(b"terminal width 512")
+ try:
+ self._exec_cli_command(b"terminal width 0")
+ except AnsibleConnectionFailure:
+ pass
+ except AnsibleConnectionFailure:
+ display.display(
+ "WARNING: Unable to set terminal width, command responses may be truncated"
+ )
+
+ def on_become(self, passwd=None):
+ if self._get_prompt().endswith(b"#"):
+ return
+
+ cmd = {u"command": u"enable"}
+ if passwd:
+ # Note: python-3.5 cannot combine u"" and r"" together. Thus make
+ # an r string and use to_text to ensure it's text on both py2 and py3.
+ cmd[u"prompt"] = to_text(
+ r"[\r\n]?(?:.*)?[Pp]assword: ?$", errors="surrogate_or_strict"
+ )
+ cmd[u"answer"] = passwd
+ cmd[u"prompt_retry_check"] = True
+ try:
+ self._exec_cli_command(
+ to_bytes(json.dumps(cmd), errors="surrogate_or_strict")
+ )
+ prompt = self._get_prompt()
+ if prompt is None or not prompt.endswith(b"#"):
+ raise AnsibleConnectionFailure(
+ "failed to elevate privilege to enable mode still at prompt [%s]"
+ % prompt
+ )
+ except AnsibleConnectionFailure as e:
+ prompt = self._get_prompt()
+ raise AnsibleConnectionFailure(
+ "unable to elevate privilege to enable mode, at prompt [%s] with error: %s"
+ % (prompt, e.message)
+ )
+
+ def on_unbecome(self):
+ prompt = self._get_prompt()
+ if prompt is None:
+ # if prompt is None most likely the terminal is hung up at a prompt
+ return
+
+ if b"(config" in prompt:
+ self._exec_cli_command(b"end")
+ self._exec_cli_command(b"disable")
+
+ elif prompt.endswith(b"#"):
+ self._exec_cli_command(b"disable")
diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/action/vyos.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/action/vyos.py
new file mode 100644
index 00000000..cab2f3fd
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/action/vyos.py
@@ -0,0 +1,129 @@
+#
+# (c) 2016 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import sys
+import copy
+
+from ansible_collections.ansible.netcommon.plugins.action.network import (
+ ActionModule as ActionNetworkModule,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ load_provider,
+)
+from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.vyos import (
+ vyos_provider_spec,
+)
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class ActionModule(ActionNetworkModule):
+ def run(self, tmp=None, task_vars=None):
+ del tmp # tmp no longer has any effect
+
+ module_name = self._task.action.split(".")[-1]
+ self._config_module = True if module_name == "vyos_config" else False
+ persistent_connection = self._play_context.connection.split(".")[-1]
+ warnings = []
+
+ if persistent_connection == "network_cli":
+ provider = self._task.args.get("provider", {})
+ if any(provider.values()):
+ display.warning(
+ "provider is unnecessary when using network_cli and will be ignored"
+ )
+ del self._task.args["provider"]
+ elif self._play_context.connection == "local":
+ provider = load_provider(vyos_provider_spec, self._task.args)
+ pc = copy.deepcopy(self._play_context)
+ pc.connection = "ansible.netcommon.network_cli"
+ pc.network_os = "vyos.vyos.vyos"
+ pc.remote_addr = provider["host"] or self._play_context.remote_addr
+ pc.port = int(provider["port"] or self._play_context.port or 22)
+ pc.remote_user = (
+ provider["username"] or self._play_context.connection_user
+ )
+ pc.password = provider["password"] or self._play_context.password
+ pc.private_key_file = (
+ provider["ssh_keyfile"] or self._play_context.private_key_file
+ )
+
+ connection = self._shared_loader_obj.connection_loader.get(
+ "ansible.netcommon.persistent",
+ pc,
+ sys.stdin,
+ task_uuid=self._task._uuid,
+ )
+
+ # TODO: Remove below code after ansible minimal is cut out
+ if connection is None:
+ pc.connection = "network_cli"
+ pc.network_os = "vyos"
+ connection = self._shared_loader_obj.connection_loader.get(
+ "persistent", pc, sys.stdin, task_uuid=self._task._uuid
+ )
+
+ display.vvv(
+ "using connection plugin %s (was local)" % pc.connection,
+ pc.remote_addr,
+ )
+
+ command_timeout = (
+ int(provider["timeout"])
+ if provider["timeout"]
+ else connection.get_option("persistent_command_timeout")
+ )
+ connection.set_options(
+ direct={"persistent_command_timeout": command_timeout}
+ )
+
+ socket_path = connection.run()
+ display.vvvv("socket_path: %s" % socket_path, pc.remote_addr)
+ if not socket_path:
+ return {
+ "failed": True,
+ "msg": "unable to open shell. Please see: "
+ + "https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell",
+ }
+
+ task_vars["ansible_socket"] = socket_path
+ warnings.append(
+ [
+ "connection local support for this module is deprecated and will be removed in version 2.14, use connection %s"
+ % pc.connection
+ ]
+ )
+ else:
+ return {
+ "failed": True,
+ "msg": "Connection type %s is not valid for this module"
+ % self._play_context.connection,
+ }
+
+ result = super(ActionModule, self).run(task_vars=task_vars)
+ if warnings:
+ if "warnings" in result:
+ result["warnings"].extend(warnings)
+ else:
+ result["warnings"] = warnings
+ return result
diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/cliconf/vyos.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/cliconf/vyos.py
new file mode 100644
index 00000000..30336031
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/cliconf/vyos.py
@@ -0,0 +1,342 @@
+#
+# (c) 2017 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+author: Ansible Networking Team
+cliconf: vyos
+short_description: Use vyos cliconf to run command on VyOS platform
+description:
+ - This vyos plugin provides low level abstraction apis for
+ sending and receiving CLI commands from VyOS network devices.
+version_added: "2.4"
+"""
+
+import re
+import json
+
+from ansible.errors import AnsibleConnectionFailure
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common._collections_compat import Mapping
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import (
+ NetworkConfig,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ to_list,
+)
+from ansible.plugins.cliconf import CliconfBase
+
+
+class Cliconf(CliconfBase):
+ def get_device_info(self):
+ device_info = {}
+
+ device_info["network_os"] = "vyos"
+ reply = self.get("show version")
+ data = to_text(reply, errors="surrogate_or_strict").strip()
+
+ match = re.search(r"Version:\s*(.*)", data)
+ if match:
+ device_info["network_os_version"] = match.group(1)
+
+ match = re.search(r"HW model:\s*(\S+)", data)
+ if match:
+ device_info["network_os_model"] = match.group(1)
+
+ reply = self.get("show host name")
+ device_info["network_os_hostname"] = to_text(
+ reply, errors="surrogate_or_strict"
+ ).strip()
+
+ return device_info
+
+ def get_config(self, flags=None, format=None):
+ if format:
+ option_values = self.get_option_values()
+ if format not in option_values["format"]:
+ raise ValueError(
+ "'format' value %s is invalid. Valid values of format are %s"
+ % (format, ", ".join(option_values["format"]))
+ )
+
+ if not flags:
+ flags = []
+
+ if format == "text":
+ command = "show configuration"
+ else:
+ command = "show configuration commands"
+
+ command += " ".join(to_list(flags))
+ command = command.strip()
+
+ out = self.send_command(command)
+ return out
+
+ def edit_config(
+ self, candidate=None, commit=True, replace=None, comment=None
+ ):
+ resp = {}
+ operations = self.get_device_operations()
+ self.check_edit_config_capability(
+ operations, candidate, commit, replace, comment
+ )
+
+ results = []
+ requests = []
+ self.send_command("configure")
+ for cmd in to_list(candidate):
+ if not isinstance(cmd, Mapping):
+ cmd = {"command": cmd}
+
+ results.append(self.send_command(**cmd))
+ requests.append(cmd["command"])
+ out = self.get("compare")
+ out = to_text(out, errors="surrogate_or_strict")
+ diff_config = out if not out.startswith("No changes") else None
+
+ if diff_config:
+ if commit:
+ try:
+ self.commit(comment)
+ except AnsibleConnectionFailure as e:
+ msg = "commit failed: %s" % e.message
+ self.discard_changes()
+ raise AnsibleConnectionFailure(msg)
+ else:
+ self.send_command("exit")
+ else:
+ self.discard_changes()
+ else:
+ self.send_command("exit")
+ if (
+ to_text(
+ self._connection.get_prompt(), errors="surrogate_or_strict"
+ )
+ .strip()
+ .endswith("#")
+ ):
+ self.discard_changes()
+
+ if diff_config:
+ resp["diff"] = diff_config
+ resp["response"] = results
+ resp["request"] = requests
+ return resp
+
+ def get(
+ self,
+ command=None,
+ prompt=None,
+ answer=None,
+ sendonly=False,
+ output=None,
+ newline=True,
+ check_all=False,
+ ):
+ if not command:
+ raise ValueError("must provide value of command to execute")
+ if output:
+ raise ValueError(
+ "'output' value %s is not supported for get" % output
+ )
+
+ return self.send_command(
+ command=command,
+ prompt=prompt,
+ answer=answer,
+ sendonly=sendonly,
+ newline=newline,
+ check_all=check_all,
+ )
+
+ def commit(self, comment=None):
+ if comment:
+ command = 'commit comment "{0}"'.format(comment)
+ else:
+ command = "commit"
+ self.send_command(command)
+
+ def discard_changes(self):
+ self.send_command("exit discard")
+
+ def get_diff(
+ self,
+ candidate=None,
+ running=None,
+ diff_match="line",
+ diff_ignore_lines=None,
+ path=None,
+ diff_replace=None,
+ ):
+ diff = {}
+ device_operations = self.get_device_operations()
+ option_values = self.get_option_values()
+
+ if candidate is None and device_operations["supports_generate_diff"]:
+ raise ValueError(
+ "candidate configuration is required to generate diff"
+ )
+
+ if diff_match not in option_values["diff_match"]:
+ raise ValueError(
+ "'match' value %s in invalid, valid values are %s"
+ % (diff_match, ", ".join(option_values["diff_match"]))
+ )
+
+ if diff_replace:
+ raise ValueError("'replace' in diff is not supported")
+
+ if diff_ignore_lines:
+ raise ValueError("'diff_ignore_lines' in diff is not supported")
+
+ if path:
+ raise ValueError("'path' in diff is not supported")
+
+ set_format = candidate.startswith("set") or candidate.startswith(
+ "delete"
+ )
+ candidate_obj = NetworkConfig(indent=4, contents=candidate)
+ if not set_format:
+ config = [c.line for c in candidate_obj.items]
+ commands = list()
+ # this filters out less specific lines
+ for item in config:
+ for index, entry in enumerate(commands):
+ if item.startswith(entry):
+ del commands[index]
+ break
+ commands.append(item)
+
+ candidate_commands = [
+ "set %s" % cmd.replace(" {", "") for cmd in commands
+ ]
+
+ else:
+ candidate_commands = str(candidate).strip().split("\n")
+
+ if diff_match == "none":
+ diff["config_diff"] = list(candidate_commands)
+ return diff
+
+ running_commands = [
+ str(c).replace("'", "") for c in running.splitlines()
+ ]
+
+ updates = list()
+ visited = set()
+
+ for line in candidate_commands:
+ item = str(line).replace("'", "")
+
+ if not item.startswith("set") and not item.startswith("delete"):
+ raise ValueError(
+ "line must start with either `set` or `delete`"
+ )
+
+ elif item.startswith("set") and item not in running_commands:
+ updates.append(line)
+
+ elif item.startswith("delete"):
+ if not running_commands:
+ updates.append(line)
+ else:
+ item = re.sub(r"delete", "set", item)
+ for entry in running_commands:
+ if entry.startswith(item) and line not in visited:
+ updates.append(line)
+ visited.add(line)
+
+ diff["config_diff"] = list(updates)
+ return diff
+
+ def run_commands(self, commands=None, check_rc=True):
+ if commands is None:
+ raise ValueError("'commands' value is required")
+
+ responses = list()
+ for cmd in to_list(commands):
+ if not isinstance(cmd, Mapping):
+ cmd = {"command": cmd}
+
+ output = cmd.pop("output", None)
+ if output:
+ raise ValueError(
+ "'output' value %s is not supported for run_commands"
+ % output
+ )
+
+ try:
+ out = self.send_command(**cmd)
+ except AnsibleConnectionFailure as e:
+ if check_rc:
+ raise
+ out = getattr(e, "err", e)
+
+ responses.append(out)
+
+ return responses
+
+ def get_device_operations(self):
+ return {
+ "supports_diff_replace": False,
+ "supports_commit": True,
+ "supports_rollback": False,
+ "supports_defaults": False,
+ "supports_onbox_diff": True,
+ "supports_commit_comment": True,
+ "supports_multiline_delimiter": False,
+ "supports_diff_match": True,
+ "supports_diff_ignore_lines": False,
+ "supports_generate_diff": False,
+ "supports_replace": False,
+ }
+
+ def get_option_values(self):
+ return {
+ "format": ["text", "set"],
+ "diff_match": ["line", "none"],
+ "diff_replace": [],
+ "output": [],
+ }
+
+ def get_capabilities(self):
+ result = super(Cliconf, self).get_capabilities()
+ result["rpc"] += [
+ "commit",
+ "discard_changes",
+ "get_diff",
+ "run_commands",
+ ]
+ result["device_operations"] = self.get_device_operations()
+ result.update(self.get_option_values())
+ return json.dumps(result)
+
+ def set_cli_prompt_context(self):
+ """
+ Make sure we are in the operational cli mode
+ :return: None
+ """
+ if self._connection.connected:
+ self._update_cli_prompt_context(
+ config_context="#", exit_command="exit discard"
+ )
diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/doc_fragments/vyos.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/doc_fragments/vyos.py
new file mode 100644
index 00000000..094963f1
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/doc_fragments/vyos.py
@@ -0,0 +1,63 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Peter Sprygada <psprygada@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+class ModuleDocFragment(object):
+
+ # Standard files documentation fragment
+ DOCUMENTATION = r"""options:
+ provider:
+ description:
+ - B(Deprecated)
+ - 'Starting with Ansible 2.5 we recommend using C(connection: network_cli).'
+ - For more information please see the L(Network Guide, ../network/getting_started/network_differences.html#multiple-communication-protocols).
+ - HORIZONTALLINE
+ - A dict object containing connection details.
+ type: dict
+ suboptions:
+ host:
+ description:
+ - Specifies the DNS host name or address for connecting to the remote device
+ over the specified transport. The value of host is used as the destination
+ address for the transport.
+ type: str
+ required: true
+ port:
+ description:
+ - Specifies the port to use when building the connection to the remote device.
+ type: int
+ default: 22
+ username:
+ description:
+ - Configures the username to use to authenticate the connection to the remote
+ device. This value is used to authenticate the SSH session. If the value
+ is not specified in the task, the value of environment variable C(ANSIBLE_NET_USERNAME)
+ will be used instead.
+ type: str
+ password:
+ description:
+ - Specifies the password to use to authenticate the connection to the remote
+ device. This value is used to authenticate the SSH session. If the value
+ is not specified in the task, the value of environment variable C(ANSIBLE_NET_PASSWORD)
+ will be used instead.
+ type: str
+ timeout:
+ description:
+ - Specifies the timeout in seconds for communicating with the network device
+ for either connecting or sending commands. If the timeout is exceeded before
+ the operation is completed, the module will error.
+ type: int
+ default: 10
+ ssh_keyfile:
+ description:
+ - Specifies the SSH key to use to authenticate the connection to the remote
+ device. This value is the path to the key used to authenticate the SSH
+ session. If the value is not specified in the task, the value of environment
+ variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
+ type: path
+notes:
+- For more information on using Ansible to manage network devices see the :ref:`Ansible
+ Network Guide <network_guide>`
+"""
diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/facts/facts.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/facts/facts.py
new file mode 100644
index 00000000..46fabaa2
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/facts/facts.py
@@ -0,0 +1,22 @@
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The arg spec for the vyos facts module.
+"""
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+class FactsArgs(object): # pylint: disable=R0903
+ """ The arg spec for the vyos facts module
+ """
+
+ def __init__(self, **kwargs):
+ pass
+
+ argument_spec = {
+ "gather_subset": dict(default=["!config"], type="list"),
+ "gather_network_resources": dict(type="list"),
+ }
diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/firewall_rules/firewall_rules.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/firewall_rules/firewall_rules.py
new file mode 100644
index 00000000..a018cc0b
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/firewall_rules/firewall_rules.py
@@ -0,0 +1,263 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+"""
+The arg spec for the vyos_firewall_rules module
+"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+class Firewall_rulesArgs(object): # pylint: disable=R0903
+ """The arg spec for the vyos_firewall_rules module
+ """
+
+ def __init__(self, **kwargs):
+ pass
+
+ argument_spec = {
+ "config": {
+ "elements": "dict",
+ "options": {
+ "afi": {
+ "choices": ["ipv4", "ipv6"],
+ "required": True,
+ "type": "str",
+ },
+ "rule_sets": {
+ "elements": "dict",
+ "options": {
+ "default_action": {
+ "choices": ["drop", "reject", "accept"],
+ "type": "str",
+ },
+ "description": {"type": "str"},
+ "enable_default_log": {"type": "bool"},
+ "name": {"type": "str"},
+ "rules": {
+ "elements": "dict",
+ "options": {
+ "action": {
+ "choices": [
+ "drop",
+ "reject",
+ "accept",
+ "inspect",
+ ],
+ "type": "str",
+ },
+ "description": {"type": "str"},
+ "destination": {
+ "options": {
+ "address": {"type": "str"},
+ "group": {
+ "options": {
+ "address_group": {
+ "type": "str"
+ },
+ "network_group": {
+ "type": "str"
+ },
+ "port_group": {"type": "str"},
+ },
+ "type": "dict",
+ },
+ "port": {"type": "str"},
+ },
+ "type": "dict",
+ },
+ "disabled": {"type": "bool"},
+ "fragment": {
+ "choices": [
+ "match-frag",
+ "match-non-frag",
+ ],
+ "type": "str",
+ },
+ "icmp": {
+ "options": {
+ "code": {"type": "int"},
+ "type": {"type": "int"},
+ "type_name": {
+ "choices": [
+ "any",
+ "echo-reply",
+ "destination-unreachable",
+ "network-unreachable",
+ "host-unreachable",
+ "protocol-unreachable",
+ "port-unreachable",
+ "fragmentation-needed",
+ "source-route-failed",
+ "network-unknown",
+ "host-unknown",
+ "network-prohibited",
+ "host-prohibited",
+ "TOS-network-unreachable",
+ "TOS-host-unreachable",
+ "communication-prohibited",
+ "host-precedence-violation",
+ "precedence-cutoff",
+ "source-quench",
+ "redirect",
+ "network-redirect",
+ "host-redirect",
+ "TOS-network-redirect",
+ "TOS-host-redirect",
+ "echo-request",
+ "router-advertisement",
+ "router-solicitation",
+ "time-exceeded",
+ "ttl-zero-during-transit",
+ "ttl-zero-during-reassembly",
+ "parameter-problem",
+ "ip-header-bad",
+ "required-option-missing",
+ "timestamp-request",
+ "timestamp-reply",
+ "address-mask-request",
+ "address-mask-reply",
+ "ping",
+ "pong",
+ "ttl-exceeded",
+ ],
+ "type": "str",
+ },
+ },
+ "type": "dict",
+ },
+ "ipsec": {
+ "choices": ["match-ipsec", "match-none"],
+ "type": "str",
+ },
+ "limit": {
+ "options": {
+ "burst": {"type": "int"},
+ "rate": {
+ "options": {
+ "number": {"type": "int"},
+ "unit": {"type": "str"},
+ },
+ "type": "dict",
+ },
+ },
+ "type": "dict",
+ },
+ "number": {"required": True, "type": "int"},
+ "p2p": {
+ "elements": "dict",
+ "options": {
+ "application": {
+ "choices": [
+ "all",
+ "applejuice",
+ "bittorrent",
+ "directconnect",
+ "edonkey",
+ "gnutella",
+ "kazaa",
+ ],
+ "type": "str",
+ }
+ },
+ "type": "list",
+ },
+ "protocol": {"type": "str"},
+ "recent": {
+ "options": {
+ "count": {"type": "int"},
+ "time": {"type": "int"},
+ },
+ "type": "dict",
+ },
+ "source": {
+ "options": {
+ "address": {"type": "str"},
+ "group": {
+ "options": {
+ "address_group": {
+ "type": "str"
+ },
+ "network_group": {
+ "type": "str"
+ },
+ "port_group": {"type": "str"},
+ },
+ "type": "dict",
+ },
+ "mac_address": {"type": "str"},
+ "port": {"type": "str"},
+ },
+ "type": "dict",
+ },
+ "state": {
+ "options": {
+ "established": {"type": "bool"},
+ "invalid": {"type": "bool"},
+ "new": {"type": "bool"},
+ "related": {"type": "bool"},
+ },
+ "type": "dict",
+ },
+ "tcp": {
+ "options": {"flags": {"type": "str"}},
+ "type": "dict",
+ },
+ "time": {
+ "options": {
+ "monthdays": {"type": "str"},
+ "startdate": {"type": "str"},
+ "starttime": {"type": "str"},
+ "stopdate": {"type": "str"},
+ "stoptime": {"type": "str"},
+ "utc": {"type": "bool"},
+ "weekdays": {"type": "str"},
+ },
+ "type": "dict",
+ },
+ },
+ "type": "list",
+ },
+ },
+ "type": "list",
+ },
+ },
+ "type": "list",
+ },
+ "running_config": {"type": "str"},
+ "state": {
+ "choices": [
+ "merged",
+ "replaced",
+ "overridden",
+ "deleted",
+ "gathered",
+ "rendered",
+ "parsed",
+ ],
+ "default": "merged",
+ "type": "str",
+ },
+ } # pylint: disable=C0301
diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/interfaces/interfaces.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/interfaces/interfaces.py
new file mode 100644
index 00000000..3542cb19
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/interfaces/interfaces.py
@@ -0,0 +1,69 @@
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+"""
+The arg spec for the vyos_interfaces module
+"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+class InterfacesArgs(object): # pylint: disable=R0903
+ """The arg spec for the vyos_interfaces module
+ """
+
+ def __init__(self, **kwargs):
+ pass
+
+ argument_spec = {
+ "config": {
+ "elements": "dict",
+ "options": {
+ "description": {"type": "str"},
+ "duplex": {"choices": ["full", "half", "auto"]},
+ "enabled": {"default": True, "type": "bool"},
+ "mtu": {"type": "int"},
+ "name": {"required": True, "type": "str"},
+ "speed": {
+ "choices": ["auto", "10", "100", "1000", "2500", "10000"],
+ "type": "str",
+ },
+ "vifs": {
+ "elements": "dict",
+ "options": {
+ "vlan_id": {"type": "int"},
+ "description": {"type": "str"},
+ "enabled": {"default": True, "type": "bool"},
+ "mtu": {"type": "int"},
+ },
+ "type": "list",
+ },
+ },
+ "type": "list",
+ },
+ "state": {
+ "choices": ["merged", "replaced", "overridden", "deleted"],
+ "default": "merged",
+ "type": "str",
+ },
+ } # pylint: disable=C0301
diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/l3_interfaces/l3_interfaces.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/l3_interfaces/l3_interfaces.py
new file mode 100644
index 00000000..91434e4b
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/l3_interfaces/l3_interfaces.py
@@ -0,0 +1,81 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+"""
+The arg spec for the vyos_l3_interfaces module
+"""
+
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+class L3_interfacesArgs(object): # pylint: disable=R0903
+ """The arg spec for the vyos_l3_interfaces module
+ """
+
+ def __init__(self, **kwargs):
+ pass
+
+ argument_spec = {
+ "config": {
+ "elements": "dict",
+ "options": {
+ "ipv4": {
+ "elements": "dict",
+ "options": {"address": {"type": "str"}},
+ "type": "list",
+ },
+ "ipv6": {
+ "elements": "dict",
+ "options": {"address": {"type": "str"}},
+ "type": "list",
+ },
+ "name": {"required": True, "type": "str"},
+ "vifs": {
+ "elements": "dict",
+ "options": {
+ "ipv4": {
+ "elements": "dict",
+ "options": {"address": {"type": "str"}},
+ "type": "list",
+ },
+ "ipv6": {
+ "elements": "dict",
+ "options": {"address": {"type": "str"}},
+ "type": "list",
+ },
+ "vlan_id": {"type": "int"},
+ },
+ "type": "list",
+ },
+ },
+ "type": "list",
+ },
+ "state": {
+ "choices": ["merged", "replaced", "overridden", "deleted"],
+ "default": "merged",
+ "type": "str",
+ },
+ } # pylint: disable=C0301
diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/lag_interfaces/lag_interfaces.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/lag_interfaces/lag_interfaces.py
new file mode 100644
index 00000000..97c5d5a2
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/lag_interfaces/lag_interfaces.py
@@ -0,0 +1,80 @@
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The arg spec for the vyos_lag_interfaces module
+"""
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+class Lag_interfacesArgs(object): # pylint: disable=R0903
+ """The arg spec for the vyos_lag_interfaces module
+ """
+
+ def __init__(self, **kwargs):
+ pass
+
+ argument_spec = {
+ "config": {
+ "elements": "dict",
+ "options": {
+ "arp_monitor": {
+ "options": {
+ "interval": {"type": "int"},
+ "target": {"type": "list"},
+ },
+ "type": "dict",
+ },
+ "hash_policy": {
+ "choices": ["layer2", "layer2+3", "layer3+4"],
+ "type": "str",
+ },
+ "members": {
+ "elements": "dict",
+ "options": {"member": {"type": "str"}},
+ "type": "list",
+ },
+ "mode": {
+ "choices": [
+ "802.3ad",
+ "active-backup",
+ "broadcast",
+ "round-robin",
+ "transmit-load-balance",
+ "adaptive-load-balance",
+ "xor-hash",
+ ],
+ "type": "str",
+ },
+ "name": {"required": True, "type": "str"},
+ "primary": {"type": "str"},
+ },
+ "type": "list",
+ },
+ "state": {
+ "choices": ["merged", "replaced", "overridden", "deleted"],
+ "default": "merged",
+ "type": "str",
+ },
+ } # pylint: disable=C0301
diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/lldp_global/lldp_global.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/lldp_global/lldp_global.py
new file mode 100644
index 00000000..84bbc00c
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/lldp_global/lldp_global.py
@@ -0,0 +1,56 @@
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The arg spec for the vyos_lldp_global module
+"""
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+class Lldp_globalArgs(object): # pylint: disable=R0903
+ """The arg spec for the vyos_lldp_global module
+ """
+
+ def __init__(self, **kwargs):
+ pass
+
+ argument_spec = {
+ "config": {
+ "options": {
+ "address": {"type": "str"},
+ "enable": {"type": "bool"},
+ "legacy_protocols": {
+ "choices": ["cdp", "edp", "fdp", "sonmp"],
+ "type": "list",
+ },
+ "snmp": {"type": "str"},
+ },
+ "type": "dict",
+ },
+ "state": {
+ "choices": ["merged", "replaced", "deleted"],
+ "default": "merged",
+ "type": "str",
+ },
+ } # pylint: disable=C0301
diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/lldp_interfaces/lldp_interfaces.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/lldp_interfaces/lldp_interfaces.py
new file mode 100644
index 00000000..2976fc09
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/lldp_interfaces/lldp_interfaces.py
@@ -0,0 +1,89 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+"""
+The arg spec for the vyos_lldp_interfaces module
+"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+class Lldp_interfacesArgs(object): # pylint: disable=R0903
+ """The arg spec for the vyos_lldp_interfaces module
+ """
+
+ def __init__(self, **kwargs):
+ pass
+
+ argument_spec = {
+ "config": {
+ "elements": "dict",
+ "options": {
+ "enable": {"default": True, "type": "bool"},
+ "location": {
+ "options": {
+ "civic_based": {
+ "options": {
+ "ca_info": {
+ "elements": "dict",
+ "options": {
+ "ca_type": {"type": "int"},
+ "ca_value": {"type": "str"},
+ },
+ "type": "list",
+ },
+ "country_code": {
+ "required": True,
+ "type": "str",
+ },
+ },
+ "type": "dict",
+ },
+ "coordinate_based": {
+ "options": {
+ "altitude": {"type": "int"},
+ "datum": {
+ "choices": ["WGS84", "NAD83", "MLLW"],
+ "type": "str",
+ },
+ "latitude": {"required": True, "type": "str"},
+ "longitude": {"required": True, "type": "str"},
+ },
+ "type": "dict",
+ },
+ "elin": {"type": "str"},
+ },
+ "type": "dict",
+ },
+ "name": {"required": True, "type": "str"},
+ },
+ "type": "list",
+ },
+ "state": {
+ "choices": ["merged", "replaced", "overridden", "deleted"],
+ "default": "merged",
+ "type": "str",
+ },
+ } # pylint: disable=C0301
diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/static_routes/static_routes.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/static_routes/static_routes.py
new file mode 100644
index 00000000..8ecd955a
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/static_routes/static_routes.py
@@ -0,0 +1,99 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+"""
+The arg spec for the vyos_static_routes module
+"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+class Static_routesArgs(object): # pylint: disable=R0903
+ """The arg spec for the vyos_static_routes module
+ """
+
+ def __init__(self, **kwargs):
+ pass
+
+ argument_spec = {
+ "config": {
+ "elements": "dict",
+ "options": {
+ "address_families": {
+ "elements": "dict",
+ "options": {
+ "afi": {
+ "choices": ["ipv4", "ipv6"],
+ "required": True,
+ "type": "str",
+ },
+ "routes": {
+ "elements": "dict",
+ "options": {
+ "blackhole_config": {
+ "options": {
+ "distance": {"type": "int"},
+ "type": {"type": "str"},
+ },
+ "type": "dict",
+ },
+ "dest": {"required": True, "type": "str"},
+ "next_hops": {
+ "elements": "dict",
+ "options": {
+ "admin_distance": {"type": "int"},
+ "enabled": {"type": "bool"},
+ "forward_router_address": {
+ "required": True,
+ "type": "str",
+ },
+ "interface": {"type": "str"},
+ },
+ "type": "list",
+ },
+ },
+ "type": "list",
+ },
+ },
+ "type": "list",
+ }
+ },
+ "type": "list",
+ },
+ "running_config": {"type": "str"},
+ "state": {
+ "choices": [
+ "merged",
+ "replaced",
+ "overridden",
+ "deleted",
+ "gathered",
+ "rendered",
+ "parsed",
+ ],
+ "default": "merged",
+ "type": "str",
+ },
+ } # pylint: disable=C0301
diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/config/lldp_interfaces/lldp_interfaces.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/config/lldp_interfaces/lldp_interfaces.py
new file mode 100644
index 00000000..377fec9a
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/config/lldp_interfaces/lldp_interfaces.py
@@ -0,0 +1,438 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The vyos_lldp_interfaces class
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to it's desired end-state is
+created
+"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase,
+)
+from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.facts.facts import (
+ Facts,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ to_list,
+ dict_diff,
+)
+from ansible.module_utils.six import iteritems
+from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.utils.utils import (
+ search_obj_in_list,
+ search_dict_tv_in_list,
+ key_value_in_dict,
+ is_dict_element_present,
+)
+
+
+class Lldp_interfaces(ConfigBase):
+ """
+ The vyos_lldp_interfaces class
+ """
+
+ gather_subset = [
+ "!all",
+ "!min",
+ ]
+
+ gather_network_resources = [
+ "lldp_interfaces",
+ ]
+
+ params = ["enable", "location", "name"]
+
+ def __init__(self, module):
+ super(Lldp_interfaces, self).__init__(module)
+
+ def get_lldp_interfaces_facts(self):
+ """ Get the 'facts' (the current configuration)
+
+ :rtype: A dictionary
+ :returns: The current configuration as a dictionary
+ """
+ facts, _warnings = Facts(self._module).get_facts(
+ self.gather_subset, self.gather_network_resources
+ )
+ lldp_interfaces_facts = facts["ansible_network_resources"].get(
+ "lldp_interfaces"
+ )
+ if not lldp_interfaces_facts:
+ return []
+ return lldp_interfaces_facts
+
+ def execute_module(self):
+ """ Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {"changed": False}
+ commands = list()
+ warnings = list()
+ existing_lldp_interfaces_facts = self.get_lldp_interfaces_facts()
+ commands.extend(self.set_config(existing_lldp_interfaces_facts))
+ if commands:
+ if self._module.check_mode:
+ resp = self._connection.edit_config(commands, commit=False)
+ else:
+ resp = self._connection.edit_config(commands)
+ result["changed"] = True
+
+ result["commands"] = commands
+
+ if self._module._diff:
+ result["diff"] = resp["diff"] if result["changed"] else None
+
+ changed_lldp_interfaces_facts = self.get_lldp_interfaces_facts()
+ result["before"] = existing_lldp_interfaces_facts
+ if result["changed"]:
+ result["after"] = changed_lldp_interfaces_facts
+
+ result["warnings"] = warnings
+ return result
+
+ def set_config(self, existing_lldp_interfaces_facts):
+ """ Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ want = self._module.params["config"]
+ have = existing_lldp_interfaces_facts
+ resp = self.set_state(want, have)
+ return to_list(resp)
+
+ def set_state(self, want, have):
+ """ Select the appropriate function based on the state provided
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ state = self._module.params["state"]
+ if state in ("merged", "replaced", "overridden") and not want:
+ self._module.fail_json(
+ msg="value of config parameter must not be empty for state {0}".format(
+ state
+ )
+ )
+ if state == "overridden":
+ commands.extend(self._state_overridden(want=want, have=have))
+ elif state == "deleted":
+ if want:
+ for item in want:
+ name = item["name"]
+ have_item = search_obj_in_list(name, have)
+ commands.extend(
+ self._state_deleted(want=None, have=have_item)
+ )
+ else:
+ for have_item in have:
+ commands.extend(
+ self._state_deleted(want=None, have=have_item)
+ )
+ else:
+ for want_item in want:
+ name = want_item["name"]
+ have_item = search_obj_in_list(name, have)
+ if state == "merged":
+ commands.extend(
+ self._state_merged(want=want_item, have=have_item)
+ )
+ else:
+ commands.extend(
+ self._state_replaced(want=want_item, have=have_item)
+ )
+ return commands
+
+ def _state_replaced(self, want, have):
+ """ The command generator when state is replaced
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ if have:
+ commands.extend(self._state_deleted(want, have))
+ commands.extend(self._state_merged(want, have))
+ return commands
+
+ def _state_overridden(self, want, have):
+ """ The command generator when state is overridden
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ for have_item in have:
+ lldp_name = have_item["name"]
+ lldp_in_want = search_obj_in_list(lldp_name, want)
+ if not lldp_in_want:
+ commands.append(
+ self._compute_command(have_item["name"], remove=True)
+ )
+
+ for want_item in want:
+ name = want_item["name"]
+ lldp_in_have = search_obj_in_list(name, have)
+ commands.extend(self._state_replaced(want_item, lldp_in_have))
+ return commands
+
+ def _state_merged(self, want, have):
+ """ The command generator when state is merged
+
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ commands = []
+ if have:
+ commands.extend(self._render_updates(want, have))
+ else:
+ commands.extend(self._render_set_commands(want))
+ return commands
+
+ def _state_deleted(self, want, have):
+ """ The command generator when state is deleted
+
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ commands = []
+ if want:
+ params = Lldp_interfaces.params
+ for attrib in params:
+ if attrib == "location":
+ commands.extend(
+ self._update_location(have["name"], want, have)
+ )
+
+ elif have:
+ commands.append(self._compute_command(have["name"], remove=True))
+ return commands
+
+ def _render_updates(self, want, have):
+ commands = []
+ lldp_name = have["name"]
+ commands.extend(self._configure_status(lldp_name, want, have))
+ commands.extend(self._add_location(lldp_name, want, have))
+
+ return commands
+
+ def _render_set_commands(self, want):
+ commands = []
+ have = {}
+ lldp_name = want["name"]
+ params = Lldp_interfaces.params
+
+ commands.extend(self._add_location(lldp_name, want, have))
+ for attrib in params:
+ value = want[attrib]
+ if value:
+ if attrib == "location":
+ commands.extend(self._add_location(lldp_name, want, have))
+ elif attrib == "enable":
+ if not value:
+ commands.append(
+ self._compute_command(lldp_name, value="disable")
+ )
+ else:
+ commands.append(self._compute_command(lldp_name))
+
+ return commands
+
+ def _configure_status(self, name, want_item, have_item):
+ commands = []
+ if is_dict_element_present(have_item, "enable"):
+ temp_have_item = False
+ else:
+ temp_have_item = True
+ if want_item["enable"] != temp_have_item:
+ if want_item["enable"]:
+ commands.append(
+ self._compute_command(name, value="disable", remove=True)
+ )
+ else:
+ commands.append(self._compute_command(name, value="disable"))
+ return commands
+
+ def _add_location(self, name, want_item, have_item):
+ commands = []
+ have_dict = {}
+ have_ca = {}
+ set_cmd = name + " location "
+ want_location_type = want_item.get("location") or {}
+ have_location_type = have_item.get("location") or {}
+
+ if want_location_type["coordinate_based"]:
+ want_dict = want_location_type.get("coordinate_based") or {}
+ if is_dict_element_present(have_location_type, "coordinate_based"):
+ have_dict = have_location_type.get("coordinate_based") or {}
+ location_type = "coordinate-based"
+ updates = dict_diff(have_dict, want_dict)
+ for key, value in iteritems(updates):
+ if value:
+ commands.append(
+ self._compute_command(
+ set_cmd + location_type, key, str(value)
+ )
+ )
+
+ elif want_location_type["civic_based"]:
+ location_type = "civic-based"
+ want_dict = want_location_type.get("civic_based") or {}
+ want_ca = want_dict.get("ca_info") or []
+ if is_dict_element_present(have_location_type, "civic_based"):
+ have_dict = have_location_type.get("civic_based") or {}
+ have_ca = have_dict.get("ca_info") or []
+ if want_dict["country_code"] != have_dict["country_code"]:
+ commands.append(
+ self._compute_command(
+ set_cmd + location_type,
+ "country-code",
+ str(want_dict["country_code"]),
+ )
+ )
+ else:
+ commands.append(
+ self._compute_command(
+ set_cmd + location_type,
+ "country-code",
+ str(want_dict["country_code"]),
+ )
+ )
+ commands.extend(self._add_civic_address(name, want_ca, have_ca))
+
+ elif want_location_type["elin"]:
+ location_type = "elin"
+ if is_dict_element_present(have_location_type, "elin"):
+ if want_location_type.get("elin") != have_location_type.get(
+ "elin"
+ ):
+ commands.append(
+ self._compute_command(
+ set_cmd + location_type,
+ value=str(want_location_type["elin"]),
+ )
+ )
+ else:
+ commands.append(
+ self._compute_command(
+ set_cmd + location_type,
+ value=str(want_location_type["elin"]),
+ )
+ )
+ return commands
+
+ def _update_location(self, name, want_item, have_item):
+ commands = []
+ del_cmd = name + " location"
+ want_location_type = want_item.get("location") or {}
+ have_location_type = have_item.get("location") or {}
+
+ if want_location_type["coordinate_based"]:
+ want_dict = want_location_type.get("coordinate_based") or {}
+ if is_dict_element_present(have_location_type, "coordinate_based"):
+ have_dict = have_location_type.get("coordinate_based") or {}
+ location_type = "coordinate-based"
+ for key, value in iteritems(have_dict):
+ only_in_have = key_value_in_dict(key, value, want_dict)
+ if not only_in_have:
+ commands.append(
+ self._compute_command(
+ del_cmd + location_type, key, str(value), True
+ )
+ )
+ else:
+ commands.append(self._compute_command(del_cmd, remove=True))
+
+ elif want_location_type["civic_based"]:
+ want_dict = want_location_type.get("civic_based") or {}
+ want_ca = want_dict.get("ca_info") or []
+ if is_dict_element_present(have_location_type, "civic_based"):
+ have_dict = have_location_type.get("civic_based") or {}
+ have_ca = have_dict.get("ca_info")
+ commands.extend(
+ self._update_civic_address(name, want_ca, have_ca)
+ )
+ else:
+ commands.append(self._compute_command(del_cmd, remove=True))
+
+ else:
+ if is_dict_element_present(have_location_type, "elin"):
+ if want_location_type.get("elin") != have_location_type.get(
+ "elin"
+ ):
+ commands.append(
+ self._compute_command(del_cmd, remove=True)
+ )
+ else:
+ commands.append(self._compute_command(del_cmd, remove=True))
+ return commands
+
+ def _add_civic_address(self, name, want, have):
+ commands = []
+ for item in want:
+ ca_type = item["ca_type"]
+ ca_value = item["ca_value"]
+ obj_in_have = search_dict_tv_in_list(
+ ca_type, ca_value, have, "ca_type", "ca_value"
+ )
+ if not obj_in_have:
+ commands.append(
+ self._compute_command(
+ key=name + " location civic-based ca-type",
+ attrib=str(ca_type) + " ca-value",
+ value=ca_value,
+ )
+ )
+ return commands
+
+ def _update_civic_address(self, name, want, have):
+ commands = []
+ for item in have:
+ ca_type = item["ca_type"]
+ ca_value = item["ca_value"]
+ in_want = search_dict_tv_in_list(
+ ca_type, ca_value, want, "ca_type", "ca_value"
+ )
+ if not in_want:
+ commands.append(
+ self._compute_command(
+ name,
+ "location civic-based ca-type",
+ str(ca_type),
+ remove=True,
+ )
+ )
+ return commands
+
+ def _compute_command(self, key, attrib=None, value=None, remove=False):
+ if remove:
+ cmd = "delete service lldp interface "
+ else:
+ cmd = "set service lldp interface "
+ cmd += key
+ if attrib:
+ cmd += " " + attrib
+ if value:
+ cmd += " '" + value + "'"
+ return cmd
diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/facts.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/facts.py
new file mode 100644
index 00000000..8f0a3bb6
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/facts.py
@@ -0,0 +1,83 @@
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The facts class for vyos
+this file validates each subset of facts and selectively
+calls the appropriate facts gathering function
+"""
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.facts.facts import (
+ FactsBase,
+)
+from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.facts.interfaces.interfaces import (
+ InterfacesFacts,
+)
+from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.facts.l3_interfaces.l3_interfaces import (
+ L3_interfacesFacts,
+)
+from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.facts.lag_interfaces.lag_interfaces import (
+ Lag_interfacesFacts,
+)
+from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.facts.lldp_global.lldp_global import (
+ Lldp_globalFacts,
+)
+from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.facts.lldp_interfaces.lldp_interfaces import (
+ Lldp_interfacesFacts,
+)
+from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.facts.firewall_rules.firewall_rules import (
+ Firewall_rulesFacts,
+)
+from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.facts.static_routes.static_routes import (
+ Static_routesFacts,
+)
+from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.facts.legacy.base import (
+ Default,
+ Neighbors,
+ Config,
+)
+
+
+FACT_LEGACY_SUBSETS = dict(default=Default, neighbors=Neighbors, config=Config)
+FACT_RESOURCE_SUBSETS = dict(
+ interfaces=InterfacesFacts,
+ l3_interfaces=L3_interfacesFacts,
+ lag_interfaces=Lag_interfacesFacts,
+ lldp_global=Lldp_globalFacts,
+ lldp_interfaces=Lldp_interfacesFacts,
+ static_routes=Static_routesFacts,
+ firewall_rules=Firewall_rulesFacts,
+)
+
+
+class Facts(FactsBase):
+ """ The fact class for vyos
+ """
+
+ VALID_LEGACY_GATHER_SUBSETS = frozenset(FACT_LEGACY_SUBSETS.keys())
+ VALID_RESOURCE_SUBSETS = frozenset(FACT_RESOURCE_SUBSETS.keys())
+
+ def __init__(self, module):
+ super(Facts, self).__init__(module)
+
+ def get_facts(
+ self, legacy_facts_type=None, resource_facts_type=None, data=None
+ ):
+ """ Collect the facts for vyos
+ :param legacy_facts_type: List of legacy facts types
+ :param resource_facts_type: List of resource fact types
+ :param data: previously collected conf
+ :rtype: dict
+ :return: the facts gathered
+ """
+ if self.VALID_RESOURCE_SUBSETS:
+ self.get_network_resources_facts(
+ FACT_RESOURCE_SUBSETS, resource_facts_type, data
+ )
+ if self.VALID_LEGACY_GATHER_SUBSETS:
+ self.get_network_legacy_facts(
+ FACT_LEGACY_SUBSETS, legacy_facts_type
+ )
+ return self.ansible_facts, self._warnings
diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/firewall_rules/firewall_rules.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/firewall_rules/firewall_rules.py
new file mode 100644
index 00000000..971ea6fe
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/firewall_rules/firewall_rules.py
@@ -0,0 +1,380 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The vyos firewall_rules fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from re import findall, search, M
+from copy import deepcopy
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
+ utils,
+)
+from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.argspec.firewall_rules.firewall_rules import (
+ Firewall_rulesArgs,
+)
+
+
+class Firewall_rulesFacts(object):
+ """ The vyos firewall_rules fact class
+ """
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = Firewall_rulesArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def get_device_data(self, connection):
+ return connection.get_config()
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """ Populate the facts for firewall_rules
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ if not data:
+ # typically data is populated from the current device configuration
+ # data = connection.get('show running-config | section ^interface')
+ # using mock data instead
+ data = self.get_device_data(connection)
+ # split the config into instances of the resource
+ objs = []
+ v6_rules = findall(
+ r"^set firewall ipv6-name (?:\'*)(\S+)(?:\'*)", data, M
+ )
+ v4_rules = findall(r"^set firewall name (?:\'*)(\S+)(?:\'*)", data, M)
+ if v6_rules:
+ config = self.get_rules(data, v6_rules, type="ipv6")
+ if config:
+ config = utils.remove_empties(config)
+ objs.append(config)
+ if v4_rules:
+ config = self.get_rules(data, v4_rules, type="ipv4")
+ if config:
+ config = utils.remove_empties(config)
+ objs.append(config)
+
+ ansible_facts["ansible_network_resources"].pop("firewall_rules", None)
+ facts = {}
+ if objs:
+ facts["firewall_rules"] = []
+ params = utils.validate_config(
+ self.argument_spec, {"config": objs}
+ )
+ for cfg in params["config"]:
+ facts["firewall_rules"].append(utils.remove_empties(cfg))
+
+ ansible_facts["ansible_network_resources"].update(facts)
+ return ansible_facts
+
+ def get_rules(self, data, rules, type):
+ """
+ This function performs following:
+ - Form regex to fetch 'rule-sets' specific config from data.
+ - Form the rule-set list based on ip address.
+ :param data: configuration.
+ :param rules: list of rule-sets.
+ :param type: ip address type.
+ :return: generated rule-sets configuration.
+ """
+ r_v4 = []
+ r_v6 = []
+ for r in set(rules):
+ rule_regex = r" %s .+$" % r.strip("'")
+ cfg = findall(rule_regex, data, M)
+ fr = self.render_config(cfg, r.strip("'"))
+ fr["name"] = r.strip("'")
+ if type == "ipv6":
+ r_v6.append(fr)
+ else:
+ r_v4.append(fr)
+ if r_v4:
+ config = {"afi": "ipv4", "rule_sets": r_v4}
+ if r_v6:
+ config = {"afi": "ipv6", "rule_sets": r_v6}
+ return config
+
+ def render_config(self, conf, match):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ conf = "\n".join(filter(lambda x: x, conf))
+ a_lst = ["description", "default_action", "enable_default_log"]
+ config = self.parse_attr(conf, a_lst, match)
+ if not config:
+ config = {}
+ config["rules"] = self.parse_rules_lst(conf)
+ return config
+
+ def parse_rules_lst(self, conf):
+ """
+ This function forms the regex to fetch the 'rules' with in
+ 'rule-sets'
+ :param conf: configuration data.
+ :return: generated rule list configuration.
+ """
+ r_lst = []
+ rules = findall(r"rule (?:\'*)(\d+)(?:\'*)", conf, M)
+ if rules:
+ rules_lst = []
+ for r in set(rules):
+ r_regex = r" %s .+$" % r
+ cfg = "\n".join(findall(r_regex, conf, M))
+ obj = self.parse_rules(cfg)
+ obj["number"] = int(r)
+ if obj:
+ rules_lst.append(obj)
+ r_lst = sorted(rules_lst, key=lambda i: i["number"])
+ return r_lst
+
+ def parse_rules(self, conf):
+ """
+ This function triggers the parsing of 'rule' attributes.
+ a_lst is a list having rule attributes which doesn't
+ have further sub attributes.
+ :param conf: configuration
+ :return: generated rule configuration dictionary.
+ """
+ a_lst = [
+ "ipsec",
+ "action",
+ "protocol",
+ "fragment",
+ "disabled",
+ "description",
+ ]
+ rule = self.parse_attr(conf, a_lst)
+ r_sub = {
+ "p2p": self.parse_p2p(conf),
+ "tcp": self.parse_tcp(conf, "tcp"),
+ "icmp": self.parse_icmp(conf, "icmp"),
+ "time": self.parse_time(conf, "time"),
+ "limit": self.parse_limit(conf, "limit"),
+ "state": self.parse_state(conf, "state"),
+ "recent": self.parse_recent(conf, "recent"),
+ "source": self.parse_src_or_dest(conf, "source"),
+ "destination": self.parse_src_or_dest(conf, "destination"),
+ }
+ rule.update(r_sub)
+ return rule
+
+ def parse_p2p(self, conf):
+ """
+ This function forms the regex to fetch the 'p2p' with in
+ 'rules'
+ :param conf: configuration data.
+ :return: generated rule list configuration.
+ """
+ a_lst = []
+ applications = findall(r"p2p (?:\'*)(\d+)(?:\'*)", conf, M)
+ if applications:
+ app_lst = []
+ for r in set(applications):
+ obj = {"application": r.strip("'")}
+ app_lst.append(obj)
+ a_lst = sorted(app_lst, key=lambda i: i["application"])
+ return a_lst
+
+ def parse_src_or_dest(self, conf, attrib=None):
+ """
+ This function triggers the parsing of 'source or
+ destination' attributes.
+ :param conf: configuration.
+ :param attrib:'source/destination'.
+ :return:generated source/destination configuration dictionary.
+ """
+ a_lst = ["port", "address", "mac_address"]
+ cfg_dict = self.parse_attr(conf, a_lst, match=attrib)
+ cfg_dict["group"] = self.parse_group(conf, attrib + " group")
+ return cfg_dict
+
+ def parse_recent(self, conf, attrib=None):
+ """
+ This function triggers the parsing of 'recent' attributes
+ :param conf: configuration.
+ :param attrib: 'recent'.
+ :return: generated config dictionary.
+ """
+ a_lst = ["time", "count"]
+ cfg_dict = self.parse_attr(conf, a_lst, match=attrib)
+ return cfg_dict
+
+ def parse_tcp(self, conf, attrib=None):
+ """
+ This function triggers the parsing of 'tcp' attributes.
+ :param conf: configuration.
+ :param attrib: 'tcp'.
+ :return: generated config dictionary.
+ """
+ cfg_dict = self.parse_attr(conf, ["flags"], match=attrib)
+ return cfg_dict
+
+ def parse_time(self, conf, attrib=None):
+ """
+ This function triggers the parsing of 'time' attributes.
+ :param conf: configuration.
+ :param attrib: 'time'.
+ :return: generated config dictionary.
+ """
+ a_lst = [
+ "stopdate",
+ "stoptime",
+ "weekdays",
+ "monthdays",
+ "startdate",
+ "starttime",
+ ]
+ cfg_dict = self.parse_attr(conf, a_lst, match=attrib)
+ return cfg_dict
+
+ def parse_state(self, conf, attrib=None):
+ """
+ This function triggers the parsing of 'state' attributes.
+ :param conf: configuration
+ :param attrib: 'state'.
+ :return: generated config dictionary.
+ """
+ a_lst = ["new", "invalid", "related", "established"]
+ cfg_dict = self.parse_attr(conf, a_lst, match=attrib)
+ return cfg_dict
+
+ def parse_group(self, conf, attrib=None):
+ """
+ This function triggers the parsing of 'group' attributes.
+ :param conf: configuration.
+ :param attrib: 'group'.
+ :return: generated config dictionary.
+ """
+ a_lst = ["port_group", "address_group", "network_group"]
+ cfg_dict = self.parse_attr(conf, a_lst, match=attrib)
+ return cfg_dict
+
+ def parse_icmp(self, conf, attrib=None):
+ """
+ This function triggers the parsing of 'icmp' attributes.
+ :param conf: configuration to be parsed.
+ :param attrib: 'icmp'.
+ :return: generated config dictionary.
+ """
+ a_lst = ["code", "type", "type_name"]
+ cfg_dict = self.parse_attr(conf, a_lst, match=attrib)
+ return cfg_dict
+
+ def parse_limit(self, conf, attrib=None):
+ """
+ This function triggers the parsing of 'limit' attributes.
+ :param conf: configuration to be parsed.
+ :param attrib: 'limit'
+ :return: generated config dictionary.
+ """
+ cfg_dict = self.parse_attr(conf, ["burst"], match=attrib)
+ cfg_dict["rate"] = self.parse_rate(conf, "rate")
+ return cfg_dict
+
+ def parse_rate(self, conf, attrib=None):
+ """
+ This function triggers the parsing of 'rate' attributes.
+ :param conf: configuration.
+ :param attrib: 'rate'
+ :return: generated config dictionary.
+ """
+ a_lst = ["unit", "number"]
+ cfg_dict = self.parse_attr(conf, a_lst, match=attrib)
+ return cfg_dict
+
+ def parse_attr(self, conf, attr_list, match=None):
+ """
+ This function peforms the following:
+ - Form the regex to fetch the required attribute config.
+ - Type cast the output in desired format.
+ :param conf: configuration.
+ :param attr_list: list of attributes.
+ :param match: parent node/attribute name.
+ :return: generated config dictionary.
+ """
+ config = {}
+ for attrib in attr_list:
+ regex = self.map_regex(attrib)
+ if match:
+ regex = match + " " + regex
+ if conf:
+ if self.is_bool(attrib):
+ out = conf.find(attrib.replace("_", "-"))
+
+ dis = conf.find(attrib.replace("_", "-") + " 'disable'")
+ if out >= 1:
+ if dis >= 1:
+ config[attrib] = False
+ else:
+ config[attrib] = True
+ else:
+ out = search(r"^.*" + regex + " (.+)", conf, M)
+ if out:
+ val = out.group(1).strip("'")
+ if self.is_num(attrib):
+ val = int(val)
+ config[attrib] = val
+ return config
+
+ def map_regex(self, attrib):
+ """
+ - This function construct the regex string.
+ - replace the underscore with hyphen.
+ :param attrib: attribute
+ :return: regex string
+ """
+ regex = attrib.replace("_", "-")
+ if attrib == "disabled":
+ regex = "disable"
+ return regex
+
+ def is_bool(self, attrib):
+ """
+ This function looks for the attribute in predefined bool type set.
+ :param attrib: attribute.
+ :return: True/False
+ """
+ bool_set = (
+ "new",
+ "invalid",
+ "related",
+ "disabled",
+ "established",
+ "enable_default_log",
+ )
+ return True if attrib in bool_set else False
+
+ def is_num(self, attrib):
+ """
+ This function looks for the attribute in predefined integer type set.
+ :param attrib: attribute.
+ :return: True/false.
+ """
+ num_set = ("time", "code", "type", "count", "burst", "number")
+ return True if attrib in num_set else False
diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/interfaces/interfaces.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/interfaces/interfaces.py
new file mode 100644
index 00000000..4b24803b
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/interfaces/interfaces.py
@@ -0,0 +1,134 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The vyos interfaces fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+from re import findall, M
+from copy import deepcopy
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
+ utils,
+)
+from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.argspec.interfaces.interfaces import (
+ InterfacesArgs,
+)
+
+
+class InterfacesFacts(object):
+ """ The vyos interfaces fact class
+ """
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = InterfacesArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """ Populate the facts for interfaces
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ if not data:
+ data = connection.get_config(flags=["| grep interfaces"])
+
+ objs = []
+ interface_names = findall(
+ r"^set interfaces (?:ethernet|bonding|vti|loopback|vxlan) (?:\'*)(\S+)(?:\'*)",
+ data,
+ M,
+ )
+ if interface_names:
+ for interface in set(interface_names):
+ intf_regex = r" %s .+$" % interface.strip("'")
+ cfg = findall(intf_regex, data, M)
+ obj = self.render_config(cfg)
+ obj["name"] = interface.strip("'")
+ if obj:
+ objs.append(obj)
+ facts = {}
+ if objs:
+ facts["interfaces"] = []
+ params = utils.validate_config(
+ self.argument_spec, {"config": objs}
+ )
+ for cfg in params["config"]:
+ facts["interfaces"].append(utils.remove_empties(cfg))
+
+ ansible_facts["ansible_network_resources"].update(facts)
+ return ansible_facts
+
+ def render_config(self, conf):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ vif_conf = "\n".join(filter(lambda x: ("vif" in x), conf))
+ eth_conf = "\n".join(filter(lambda x: ("vif" not in x), conf))
+ config = self.parse_attribs(
+ ["description", "speed", "mtu", "duplex"], eth_conf
+ )
+ config["vifs"] = self.parse_vifs(vif_conf)
+
+ return utils.remove_empties(config)
+
+ def parse_vifs(self, conf):
+ vif_names = findall(r"vif (?:\'*)(\d+)(?:\'*)", conf, M)
+ vifs_list = None
+
+ if vif_names:
+ vifs_list = []
+ for vif in set(vif_names):
+ vif_regex = r" %s .+$" % vif
+ cfg = "\n".join(findall(vif_regex, conf, M))
+ obj = self.parse_attribs(["description", "mtu"], cfg)
+ obj["vlan_id"] = int(vif)
+ if obj:
+ vifs_list.append(obj)
+ vifs_list = sorted(vifs_list, key=lambda i: i["vlan_id"])
+
+ return vifs_list
+
+ def parse_attribs(self, attribs, conf):
+ config = {}
+ for item in attribs:
+ value = utils.parse_conf_arg(conf, item)
+ if value and item == "mtu":
+ config[item] = int(value.strip("'"))
+ elif value:
+ config[item] = value.strip("'")
+ else:
+ config[item] = None
+ if "disable" in conf:
+ config["enabled"] = False
+ else:
+ config["enabled"] = True
+
+ return utils.remove_empties(config)
diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/l3_interfaces/l3_interfaces.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/l3_interfaces/l3_interfaces.py
new file mode 100644
index 00000000..d1d62c23
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/l3_interfaces/l3_interfaces.py
@@ -0,0 +1,143 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The vyos l3_interfaces fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+import re
+from copy import deepcopy
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
+ utils,
+)
+from ansible.module_utils.six import iteritems
+from ansible_collections.ansible.netcommon.plugins.module_utils.compat import (
+ ipaddress,
+)
+from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.argspec.l3_interfaces.l3_interfaces import (
+ L3_interfacesArgs,
+)
+
+
+class L3_interfacesFacts(object):
+ """ The vyos l3_interfaces fact class
+ """
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = L3_interfacesArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """ Populate the facts for l3_interfaces
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ if not data:
+ data = connection.get_config()
+
+ # operate on a collection of resource x
+ objs = []
+ interface_names = re.findall(
+ r"set interfaces (?:ethernet|bonding|vti|vxlan) (?:\'*)(\S+)(?:\'*)",
+ data,
+ re.M,
+ )
+ if interface_names:
+ for interface in set(interface_names):
+ intf_regex = r" %s .+$" % interface
+ cfg = re.findall(intf_regex, data, re.M)
+ obj = self.render_config(cfg)
+ obj["name"] = interface.strip("'")
+ if obj:
+ objs.append(obj)
+
+ ansible_facts["ansible_network_resources"].pop("l3_interfaces", None)
+ facts = {}
+ if objs:
+ facts["l3_interfaces"] = []
+ params = utils.validate_config(
+ self.argument_spec, {"config": objs}
+ )
+ for cfg in params["config"]:
+ facts["l3_interfaces"].append(utils.remove_empties(cfg))
+
+ ansible_facts["ansible_network_resources"].update(facts)
+ return ansible_facts
+
+ def render_config(self, conf):
+ """
+ Render config as dictionary structure and delete keys from spec for null values
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ vif_conf = "\n".join(filter(lambda x: ("vif" in x), conf))
+ eth_conf = "\n".join(filter(lambda x: ("vif" not in x), conf))
+ config = self.parse_attribs(eth_conf)
+ config["vifs"] = self.parse_vifs(vif_conf)
+
+ return utils.remove_empties(config)
+
+ def parse_vifs(self, conf):
+ vif_names = re.findall(r"vif (\d+)", conf, re.M)
+ vifs_list = None
+ if vif_names:
+ vifs_list = []
+ for vif in set(vif_names):
+ vif_regex = r" %s .+$" % vif
+ cfg = "\n".join(re.findall(vif_regex, conf, re.M))
+ obj = self.parse_attribs(cfg)
+ obj["vlan_id"] = vif
+ if obj:
+ vifs_list.append(obj)
+
+ return vifs_list
+
+ def parse_attribs(self, conf):
+ config = {}
+ ipaddrs = re.findall(r"address (\S+)", conf, re.M)
+ config["ipv4"] = []
+ config["ipv6"] = []
+
+ for item in ipaddrs:
+ item = item.strip("'")
+ if item == "dhcp":
+ config["ipv4"].append({"address": item})
+ elif item == "dhcpv6":
+ config["ipv6"].append({"address": item})
+ else:
+ ip_version = ipaddress.ip_address(item.split("/")[0]).version
+ if ip_version == 4:
+ config["ipv4"].append({"address": item})
+ else:
+ config["ipv6"].append({"address": item})
+
+ for key, value in iteritems(config):
+ if value == []:
+ config[key] = None
+
+ return utils.remove_empties(config)
diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/lag_interfaces/lag_interfaces.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/lag_interfaces/lag_interfaces.py
new file mode 100644
index 00000000..9201e5c6
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/lag_interfaces/lag_interfaces.py
@@ -0,0 +1,152 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The vyos lag_interfaces fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+from re import findall, search, M
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
+ utils,
+)
+from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.argspec.lag_interfaces.lag_interfaces import (
+ Lag_interfacesArgs,
+)
+
+
+class Lag_interfacesFacts(object):
+ """ The vyos lag_interfaces fact class
+ """
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = Lag_interfacesArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """ Populate the facts for lag_interfaces
+ :param module: the module instance
+ :param connection: the device connection
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ if not data:
+ data = connection.get_config()
+
+ objs = []
+ lag_names = findall(r"^set interfaces bonding (\S+)", data, M)
+ if lag_names:
+ for lag in set(lag_names):
+ lag_regex = r" %s .+$" % lag
+ cfg = findall(lag_regex, data, M)
+ obj = self.render_config(cfg)
+
+ output = connection.run_commands(
+ ["show interfaces bonding " + lag + " slaves"]
+ )
+ lines = output[0].splitlines()
+ members = []
+ member = {}
+ if len(lines) > 1:
+ for line in lines[2:]:
+ splitted_line = line.split()
+
+ if len(splitted_line) > 1:
+ member["member"] = splitted_line[0]
+ members.append(member)
+ else:
+ members = []
+ member = {}
+ obj["name"] = lag.strip("'")
+ if members:
+ obj["members"] = members
+
+ if obj:
+ objs.append(obj)
+
+ facts = {}
+ if objs:
+ facts["lag_interfaces"] = []
+ params = utils.validate_config(
+ self.argument_spec, {"config": objs}
+ )
+ for cfg in params["config"]:
+ facts["lag_interfaces"].append(utils.remove_empties(cfg))
+
+ ansible_facts["ansible_network_resources"].update(facts)
+ return ansible_facts
+
+ def render_config(self, conf):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ arp_monitor_conf = "\n".join(
+ filter(lambda x: ("arp-monitor" in x), conf)
+ )
+ hash_policy_conf = "\n".join(
+ filter(lambda x: ("hash-policy" in x), conf)
+ )
+ lag_conf = "\n".join(filter(lambda x: ("bond" in x), conf))
+ config = self.parse_attribs(["mode", "primary"], lag_conf)
+ config["arp_monitor"] = self.parse_arp_monitor(arp_monitor_conf)
+ config["hash_policy"] = self.parse_hash_policy(hash_policy_conf)
+
+ return utils.remove_empties(config)
+
+ def parse_attribs(self, attribs, conf):
+ config = {}
+ for item in attribs:
+ value = utils.parse_conf_arg(conf, item)
+ if value:
+ config[item] = value.strip("'")
+ else:
+ config[item] = None
+ return utils.remove_empties(config)
+
+ def parse_arp_monitor(self, conf):
+ arp_monitor = None
+ if conf:
+ arp_monitor = {}
+ target_list = []
+ interval = search(r"^.*arp-monitor interval (.+)", conf, M)
+ targets = findall(r"^.*arp-monitor target '(.+)'", conf, M)
+ if targets:
+ for target in targets:
+ target_list.append(target)
+ arp_monitor["target"] = target_list
+ if interval:
+ value = interval.group(1).strip("'")
+ arp_monitor["interval"] = int(value)
+ return arp_monitor
+
+ def parse_hash_policy(self, conf):
+ hash_policy = None
+ if conf:
+ hash_policy = search(r"^.*hash-policy (.+)", conf, M)
+ hash_policy = hash_policy.group(1).strip("'")
+ return hash_policy
diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/legacy/base.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/legacy/base.py
new file mode 100644
index 00000000..f6b343e0
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/legacy/base.py
@@ -0,0 +1,162 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The VyOS interfaces fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+import platform
+import re
+from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.vyos import (
+ run_commands,
+ get_capabilities,
+)
+
+
+class LegacyFactsBase(object):
+
+ COMMANDS = frozenset()
+
+ def __init__(self, module):
+ self.module = module
+ self.facts = dict()
+ self.warnings = list()
+ self.responses = None
+
+ def populate(self):
+ self.responses = run_commands(self.module, list(self.COMMANDS))
+
+
+class Default(LegacyFactsBase):
+
+ COMMANDS = [
+ "show version",
+ ]
+
+ def populate(self):
+ super(Default, self).populate()
+ data = self.responses[0]
+ self.facts["serialnum"] = self.parse_serialnum(data)
+ self.facts.update(self.platform_facts())
+
+ def parse_serialnum(self, data):
+ match = re.search(r"HW S/N:\s+(\S+)", data)
+ if match:
+ return match.group(1)
+
+ def platform_facts(self):
+ platform_facts = {}
+
+ resp = get_capabilities(self.module)
+ device_info = resp["device_info"]
+
+ platform_facts["system"] = device_info["network_os"]
+
+ for item in ("model", "image", "version", "platform", "hostname"):
+ val = device_info.get("network_os_%s" % item)
+ if val:
+ platform_facts[item] = val
+
+ platform_facts["api"] = resp["network_api"]
+ platform_facts["python_version"] = platform.python_version()
+
+ return platform_facts
+
+
+class Config(LegacyFactsBase):
+
+ COMMANDS = [
+ "show configuration commands",
+ "show system commit",
+ ]
+
+ def populate(self):
+ super(Config, self).populate()
+
+ self.facts["config"] = self.responses
+
+ commits = self.responses[1]
+ entries = list()
+ entry = None
+
+ for line in commits.split("\n"):
+ match = re.match(r"(\d+)\s+(.+)by(.+)via(.+)", line)
+ if match:
+ if entry:
+ entries.append(entry)
+
+ entry = dict(
+ revision=match.group(1),
+ datetime=match.group(2),
+ by=str(match.group(3)).strip(),
+ via=str(match.group(4)).strip(),
+ comment=None,
+ )
+ else:
+ entry["comment"] = line.strip()
+
+ self.facts["commits"] = entries
+
+
+class Neighbors(LegacyFactsBase):
+
+ COMMANDS = [
+ "show lldp neighbors",
+ "show lldp neighbors detail",
+ ]
+
+ def populate(self):
+ super(Neighbors, self).populate()
+
+ all_neighbors = self.responses[0]
+ if "LLDP not configured" not in all_neighbors:
+ neighbors = self.parse(self.responses[1])
+ self.facts["neighbors"] = self.parse_neighbors(neighbors)
+
+ def parse(self, data):
+ parsed = list()
+ values = None
+ for line in data.split("\n"):
+ if not line:
+ continue
+ elif line[0] == " ":
+ values += "\n%s" % line
+ elif line.startswith("Interface"):
+ if values:
+ parsed.append(values)
+ values = line
+ if values:
+ parsed.append(values)
+ return parsed
+
+ def parse_neighbors(self, data):
+ facts = dict()
+ for item in data:
+ interface = self.parse_interface(item)
+ host = self.parse_host(item)
+ port = self.parse_port(item)
+ if interface not in facts:
+ facts[interface] = list()
+ facts[interface].append(dict(host=host, port=port))
+ return facts
+
+ def parse_interface(self, data):
+ match = re.search(r"^Interface:\s+(\S+),", data)
+ return match.group(1)
+
+ def parse_host(self, data):
+ match = re.search(r"SysName:\s+(.+)$", data, re.M)
+ if match:
+ return match.group(1)
+
+ def parse_port(self, data):
+ match = re.search(r"PortDescr:\s+(.+)$", data, re.M)
+ if match:
+ return match.group(1)
diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/lldp_global/lldp_global.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/lldp_global/lldp_global.py
new file mode 100644
index 00000000..3c7e2f93
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/lldp_global/lldp_global.py
@@ -0,0 +1,116 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The vyos lldp_global fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from re import findall, M
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
+ utils,
+)
+from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.argspec.lldp_global.lldp_global import (
+ Lldp_globalArgs,
+)
+
+
+class Lldp_globalFacts(object):
+ """ The vyos lldp_global fact class
+ """
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = Lldp_globalArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """ Populate the facts for lldp_global
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ if not data:
+ data = connection.get_config()
+
+ objs = {}
+ lldp_output = findall(r"^set service lldp (\S+)", data, M)
+ if lldp_output:
+ for item in set(lldp_output):
+ lldp_regex = r" %s .+$" % item
+ cfg = findall(lldp_regex, data, M)
+ obj = self.render_config(cfg)
+ if obj:
+ objs.update(obj)
+ lldp_service = findall(r"^set service (lldp)?('lldp')", data, M)
+ if lldp_service or lldp_output:
+ lldp_obj = {}
+ lldp_obj["enable"] = True
+ objs.update(lldp_obj)
+
+ facts = {}
+ params = utils.validate_config(self.argument_spec, {"config": objs})
+ facts["lldp_global"] = utils.remove_empties(params["config"])
+
+ ansible_facts["ansible_network_resources"].update(facts)
+
+ return ansible_facts
+
+ def render_config(self, conf):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ protocol_conf = "\n".join(
+ filter(lambda x: ("legacy-protocols" in x), conf)
+ )
+ att_conf = "\n".join(
+ filter(lambda x: ("legacy-protocols" not in x), conf)
+ )
+ config = self.parse_attribs(["snmp", "address"], att_conf)
+ config["legacy_protocols"] = self.parse_protocols(protocol_conf)
+ return utils.remove_empties(config)
+
+ def parse_protocols(self, conf):
+ protocol_support = None
+ if conf:
+ protocols = findall(r"^.*legacy-protocols (.+)", conf, M)
+ if protocols:
+ protocol_support = []
+ for protocol in protocols:
+ protocol_support.append(protocol.strip("'"))
+ return protocol_support
+
+ def parse_attribs(self, attribs, conf):
+ config = {}
+ for item in attribs:
+ value = utils.parse_conf_arg(conf, item)
+ if value:
+ config[item] = value.strip("'")
+ else:
+ config[item] = None
+ return utils.remove_empties(config)
diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/lldp_interfaces/lldp_interfaces.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/lldp_interfaces/lldp_interfaces.py
new file mode 100644
index 00000000..dcfbc6ee
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/lldp_interfaces/lldp_interfaces.py
@@ -0,0 +1,155 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The vyos lldp_interfaces fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+from re import findall, search, M
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
+ utils,
+)
+from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.argspec.lldp_interfaces.lldp_interfaces import (
+ Lldp_interfacesArgs,
+)
+
+
+class Lldp_interfacesFacts(object):
+ """ The vyos lldp_interfaces fact class
+ """
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = Lldp_interfacesArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """ Populate the facts for lldp_interfaces
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ if not data:
+ data = connection.get_config()
+
+ objs = []
+ lldp_names = findall(r"^set service lldp interface (\S+)", data, M)
+ if lldp_names:
+ for lldp in set(lldp_names):
+ lldp_regex = r" %s .+$" % lldp
+ cfg = findall(lldp_regex, data, M)
+ obj = self.render_config(cfg)
+ obj["name"] = lldp.strip("'")
+ if obj:
+ objs.append(obj)
+ facts = {}
+ if objs:
+ facts["lldp_interfaces"] = objs
+ ansible_facts["ansible_network_resources"].update(facts)
+
+ ansible_facts["ansible_network_resources"].update(facts)
+ return ansible_facts
+
+ def render_config(self, conf):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ config = {}
+ location = {}
+
+ civic_conf = "\n".join(filter(lambda x: ("civic-based" in x), conf))
+ elin_conf = "\n".join(filter(lambda x: ("elin" in x), conf))
+ coordinate_conf = "\n".join(
+ filter(lambda x: ("coordinate-based" in x), conf)
+ )
+ disable = "\n".join(filter(lambda x: ("disable" in x), conf))
+
+ coordinate_based_conf = self.parse_attribs(
+ ["altitude", "datum", "longitude", "latitude"], coordinate_conf
+ )
+ elin_based_conf = self.parse_lldp_elin_based(elin_conf)
+ civic_based_conf = self.parse_lldp_civic_based(civic_conf)
+ if disable:
+ config["enable"] = False
+ if coordinate_conf:
+ location["coordinate_based"] = coordinate_based_conf
+ config["location"] = location
+ elif civic_based_conf:
+ location["civic_based"] = civic_based_conf
+ config["location"] = location
+ elif elin_conf:
+ location["elin"] = elin_based_conf
+ config["location"] = location
+
+ return utils.remove_empties(config)
+
+ def parse_attribs(self, attribs, conf):
+ config = {}
+ for item in attribs:
+ value = utils.parse_conf_arg(conf, item)
+ if value:
+ value = value.strip("'")
+ if item == "altitude":
+ value = int(value)
+ config[item] = value
+ else:
+ config[item] = None
+ return utils.remove_empties(config)
+
+ def parse_lldp_civic_based(self, conf):
+ civic_based = None
+ if conf:
+ civic_info_list = []
+ civic_add_list = findall(r"^.*civic-based ca-type (.+)", conf, M)
+ if civic_add_list:
+ for civic_add in civic_add_list:
+ ca = civic_add.split(" ")
+ c_add = {}
+ c_add["ca_type"] = int(ca[0].strip("'"))
+ c_add["ca_value"] = ca[2].strip("'")
+ civic_info_list.append(c_add)
+
+ country_code = search(
+ r"^.*civic-based country-code (.+)", conf, M
+ )
+ civic_based = {}
+ civic_based["ca_info"] = civic_info_list
+ civic_based["country_code"] = country_code.group(1).strip("'")
+ return civic_based
+
+ def parse_lldp_elin_based(self, conf):
+ elin_based = None
+ if conf:
+ e_num = search(r"^.* elin (.+)", conf, M)
+ elin_based = e_num.group(1).strip("'")
+
+ return elin_based
diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/static_routes/static_routes.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/static_routes/static_routes.py
new file mode 100644
index 00000000..00049475
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/facts/static_routes/static_routes.py
@@ -0,0 +1,181 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The vyos static_routes fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+from re import findall, search, M
+from copy import deepcopy
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
+ utils,
+)
+from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.argspec.static_routes.static_routes import (
+ Static_routesArgs,
+)
+from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.utils.utils import (
+ get_route_type,
+)
+
+
+class Static_routesFacts(object):
+ """ The vyos static_routes fact class
+ """
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = Static_routesArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def get_device_data(self, connection):
+ return connection.get_config()
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """ Populate the facts for static_routes
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ if not data:
+ data = self.get_device_data(connection)
+ # typically data is populated from the current device configuration
+ # data = connection.get('show running-config | section ^interface')
+ # using mock data instead
+ objs = []
+ r_v4 = []
+ r_v6 = []
+ af = []
+ static_routes = findall(
+ r"set protocols static route(6)? (\S+)", data, M
+ )
+ if static_routes:
+ for route in set(static_routes):
+ route_regex = r" %s .+$" % route[1]
+ cfg = findall(route_regex, data, M)
+ sr = self.render_config(cfg)
+ sr["dest"] = route[1].strip("'")
+ afi = self.get_afi(sr["dest"])
+ if afi == "ipv4":
+ r_v4.append(sr)
+ else:
+ r_v6.append(sr)
+ if r_v4:
+ afi_v4 = {"afi": "ipv4", "routes": r_v4}
+ af.append(afi_v4)
+ if r_v6:
+ afi_v6 = {"afi": "ipv6", "routes": r_v6}
+ af.append(afi_v6)
+ config = {"address_families": af}
+ if config:
+ objs.append(config)
+
+ ansible_facts["ansible_network_resources"].pop("static_routes", None)
+ facts = {}
+ if objs:
+ facts["static_routes"] = []
+ params = utils.validate_config(
+ self.argument_spec, {"config": objs}
+ )
+ for cfg in params["config"]:
+ facts["static_routes"].append(utils.remove_empties(cfg))
+
+ ansible_facts["ansible_network_resources"].update(facts)
+ return ansible_facts
+
+ def render_config(self, conf):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ next_hops_conf = "\n".join(filter(lambda x: ("next-hop" in x), conf))
+ blackhole_conf = "\n".join(filter(lambda x: ("blackhole" in x), conf))
+ routes_dict = {
+ "blackhole_config": self.parse_blackhole(blackhole_conf),
+ "next_hops": self.parse_next_hop(next_hops_conf),
+ }
+ return routes_dict
+
+ def parse_blackhole(self, conf):
+ blackhole = None
+ if conf:
+ distance = search(r"^.*blackhole distance (.\S+)", conf, M)
+ bh = conf.find("blackhole")
+ if distance is not None:
+ blackhole = {}
+ value = distance.group(1).strip("'")
+ blackhole["distance"] = int(value)
+ elif bh:
+ blackhole = {}
+ blackhole["type"] = "blackhole"
+ return blackhole
+
+ def get_afi(self, address):
+ route_type = get_route_type(address)
+ if route_type == "route":
+ return "ipv4"
+ elif route_type == "route6":
+ return "ipv6"
+
+ def parse_next_hop(self, conf):
+ nh_list = None
+ if conf:
+ nh_list = []
+ hop_list = findall(r"^.*next-hop (.+)", conf, M)
+ if hop_list:
+ for hop in hop_list:
+ distance = search(r"^.*distance (.\S+)", hop, M)
+ interface = search(r"^.*interface (.\S+)", hop, M)
+
+ dis = hop.find("disable")
+ hop_info = hop.split(" ")
+ nh_info = {
+ "forward_router_address": hop_info[0].strip("'")
+ }
+ if interface:
+ nh_info["interface"] = interface.group(1).strip("'")
+ if distance:
+ value = distance.group(1).strip("'")
+ nh_info["admin_distance"] = int(value)
+ elif dis >= 1:
+ nh_info["enabled"] = False
+ for element in nh_list:
+ if (
+ element["forward_router_address"]
+ == nh_info["forward_router_address"]
+ ):
+ if "interface" in nh_info.keys():
+ element["interface"] = nh_info["interface"]
+ if "admin_distance" in nh_info.keys():
+ element["admin_distance"] = nh_info[
+ "admin_distance"
+ ]
+ if "enabled" in nh_info.keys():
+ element["enabled"] = nh_info["enabled"]
+ nh_info = None
+ if nh_info is not None:
+ nh_list.append(nh_info)
+ return nh_list
diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/utils/utils.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/utils/utils.py
new file mode 100644
index 00000000..402adfc9
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/utils/utils.py
@@ -0,0 +1,231 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# utils
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+from ansible.module_utils.six import iteritems
+from ansible_collections.ansible.netcommon.plugins.module_utils.compat import (
+ ipaddress,
+)
+
+
+def search_obj_in_list(name, lst, key="name"):
+ for item in lst:
+ if item[key] == name:
+ return item
+ return None
+
+
+def get_interface_type(interface):
+ """Gets the type of interface
+ """
+ if interface.startswith("eth"):
+ return "ethernet"
+ elif interface.startswith("bond"):
+ return "bonding"
+ elif interface.startswith("vti"):
+ return "vti"
+ elif interface.startswith("lo"):
+ return "loopback"
+
+
+def dict_delete(base, comparable):
+ """
+ This function generates a dict containing key, value pairs for keys
+ that are present in the `base` dict but not present in the `comparable`
+ dict.
+
+ :param base: dict object to base the diff on
+ :param comparable: dict object to compare against base
+ :returns: new dict object with key, value pairs that needs to be deleted.
+
+ """
+ to_delete = dict()
+
+ for key in base:
+ if isinstance(base[key], dict):
+ sub_diff = dict_delete(base[key], comparable.get(key, {}))
+ if sub_diff:
+ to_delete[key] = sub_diff
+ else:
+ if key not in comparable:
+ to_delete[key] = base[key]
+
+ return to_delete
+
+
+def diff_list_of_dicts(want, have):
+ diff = []
+
+ set_w = set(tuple(d.items()) for d in want)
+ set_h = set(tuple(d.items()) for d in have)
+ difference = set_w.difference(set_h)
+
+ for element in difference:
+ diff.append(dict((x, y) for x, y in element))
+
+ return diff
+
+
+def get_lst_diff_for_dicts(want, have, lst):
+ """
+ This function generates a list containing values
+ that are only in want and not in list in have dict
+ :param want: dict object to want
+ :param have: dict object to have
+ :param lst: list the diff on
+ :return: new list object with values which are only in want.
+ """
+ if not have:
+ diff = want.get(lst) or []
+
+ else:
+ want_elements = want.get(lst) or {}
+ have_elements = have.get(lst) or {}
+ diff = list_diff_want_only(want_elements, have_elements)
+ return diff
+
+
+def get_lst_same_for_dicts(want, have, lst):
+ """
+ This function generates a list containing values
+ that are common for list in want and list in have dict
+ :param want: dict object to want
+ :param have: dict object to have
+ :param lst: list the comparison on
+ :return: new list object with values which are common in want and have.
+ """
+ diff = None
+ if want and have:
+ want_list = want.get(lst) or {}
+ have_list = have.get(lst) or {}
+ diff = [
+ i
+ for i in want_list and have_list
+ if i in have_list and i in want_list
+ ]
+ return diff
+
+
+def list_diff_have_only(want_list, have_list):
+ """
+ This function generated the list containing values
+ that are only in have list.
+ :param want_list:
+ :param have_list:
+ :return: new list with values which are only in have list
+ """
+ if have_list and not want_list:
+ diff = have_list
+ elif not have_list:
+ diff = None
+ else:
+ diff = [
+ i
+ for i in have_list + want_list
+ if i in have_list and i not in want_list
+ ]
+ return diff
+
+
+def list_diff_want_only(want_list, have_list):
+ """
+ This function generated the list containing values
+ that are only in want list.
+ :param want_list:
+ :param have_list:
+ :return: new list with values which are only in want list
+ """
+ if have_list and not want_list:
+ diff = None
+ elif not have_list:
+ diff = want_list
+ else:
+ diff = [
+ i
+ for i in have_list + want_list
+ if i in want_list and i not in have_list
+ ]
+ return diff
+
+
+def search_dict_tv_in_list(d_val1, d_val2, lst, key1, key2):
+ """
+ This function return the dict object if it exist in list.
+ :param d_val1:
+ :param d_val2:
+ :param lst:
+ :param key1:
+ :param key2:
+ :return:
+ """
+ obj = next(
+ (
+ item
+ for item in lst
+ if item[key1] == d_val1 and item[key2] == d_val2
+ ),
+ None,
+ )
+ if obj:
+ return obj
+ else:
+ return None
+
+
+def key_value_in_dict(have_key, have_value, want_dict):
+ """
+ This function checks whether the key and values exist in dict
+ :param have_key:
+ :param have_value:
+ :param want_dict:
+ :return:
+ """
+ for key, value in iteritems(want_dict):
+ if key == have_key and value == have_value:
+ return True
+ return False
+
+
+def is_dict_element_present(dict, key):
+ """
+ This function checks whether the key is present in dict.
+ :param dict:
+ :param key:
+ :return:
+ """
+ for item in dict:
+ if item == key:
+ return True
+ return False
+
+
+def get_ip_address_version(address):
+ """
+ This function returns the version of IP address
+ :param address: IP address
+ :return:
+ """
+ try:
+ address = unicode(address)
+ except NameError:
+ address = str(address)
+ version = ipaddress.ip_address(address.split("/")[0]).version
+ return version
+
+
+def get_route_type(address):
+ """
+ This function returns the route type based on IP address
+ :param address:
+ :return:
+ """
+ version = get_ip_address_version(address)
+ if version == 6:
+ return "route6"
+ elif version == 4:
+ return "route"
diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/vyos.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/vyos.py
new file mode 100644
index 00000000..908395a6
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/vyos.py
@@ -0,0 +1,124 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# (c) 2016 Red Hat Inc.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+import json
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import env_fallback
+from ansible.module_utils.connection import Connection, ConnectionError
+
+_DEVICE_CONFIGS = {}
+
+vyos_provider_spec = {
+ "host": dict(),
+ "port": dict(type="int"),
+ "username": dict(fallback=(env_fallback, ["ANSIBLE_NET_USERNAME"])),
+ "password": dict(
+ fallback=(env_fallback, ["ANSIBLE_NET_PASSWORD"]), no_log=True
+ ),
+ "ssh_keyfile": dict(
+ fallback=(env_fallback, ["ANSIBLE_NET_SSH_KEYFILE"]), type="path"
+ ),
+ "timeout": dict(type="int"),
+}
+vyos_argument_spec = {
+ "provider": dict(
+ type="dict", options=vyos_provider_spec, removed_in_version=2.14
+ ),
+}
+
+
+def get_provider_argspec():
+ return vyos_provider_spec
+
+
+def get_connection(module):
+ if hasattr(module, "_vyos_connection"):
+ return module._vyos_connection
+
+ capabilities = get_capabilities(module)
+ network_api = capabilities.get("network_api")
+ if network_api == "cliconf":
+ module._vyos_connection = Connection(module._socket_path)
+ else:
+ module.fail_json(msg="Invalid connection type %s" % network_api)
+
+ return module._vyos_connection
+
+
+def get_capabilities(module):
+ if hasattr(module, "_vyos_capabilities"):
+ return module._vyos_capabilities
+
+ try:
+ capabilities = Connection(module._socket_path).get_capabilities()
+ except ConnectionError as exc:
+ module.fail_json(msg=to_text(exc, errors="surrogate_then_replace"))
+
+ module._vyos_capabilities = json.loads(capabilities)
+ return module._vyos_capabilities
+
+
+def get_config(module, flags=None, format=None):
+ flags = [] if flags is None else flags
+ global _DEVICE_CONFIGS
+
+ if _DEVICE_CONFIGS != {}:
+ return _DEVICE_CONFIGS
+ else:
+ connection = get_connection(module)
+ try:
+ out = connection.get_config(flags=flags, format=format)
+ except ConnectionError as exc:
+ module.fail_json(msg=to_text(exc, errors="surrogate_then_replace"))
+ cfg = to_text(out, errors="surrogate_then_replace").strip()
+ _DEVICE_CONFIGS = cfg
+ return cfg
+
+
+def run_commands(module, commands, check_rc=True):
+ connection = get_connection(module)
+ try:
+ response = connection.run_commands(
+ commands=commands, check_rc=check_rc
+ )
+ except ConnectionError as exc:
+ module.fail_json(msg=to_text(exc, errors="surrogate_then_replace"))
+ return response
+
+
+def load_config(module, commands, commit=False, comment=None):
+ connection = get_connection(module)
+
+ try:
+ response = connection.edit_config(
+ candidate=commands, commit=commit, comment=comment
+ )
+ except ConnectionError as exc:
+ module.fail_json(msg=to_text(exc, errors="surrogate_then_replace"))
+
+ return response.get("diff")
diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_command.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_command.py
new file mode 100644
index 00000000..18538491
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_command.py
@@ -0,0 +1,223 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "network",
+}
+
+
+DOCUMENTATION = """module: vyos_command
+author: Nathaniel Case (@Qalthos)
+short_description: Run one or more commands on VyOS devices
+description:
+- The command module allows running one or more commands on remote devices running
+ VyOS. This module can also be introspected to validate key parameters before returning
+ successfully. If the conditional statements are not met in the wait period, the
+ task fails.
+- Certain C(show) commands in VyOS produce many lines of output and use a custom pager
+ that can cause this module to hang. If the value of the environment variable C(ANSIBLE_VYOS_TERMINAL_LENGTH)
+ is not set, the default number of 10000 is used.
+extends_documentation_fragment:
+- vyos.vyos.vyos
+options:
+ commands:
+ description:
+ - The ordered set of commands to execute on the remote device running VyOS. The
+ output from the command execution is returned to the playbook. If the I(wait_for)
+ argument is provided, the module is not returned until the condition is satisfied
+ or the number of retries has been exceeded.
+ required: true
+ wait_for:
+ description:
+ - Specifies what to evaluate from the output of the command and what conditionals
+ to apply. This argument will cause the task to wait for a particular conditional
+ to be true before moving forward. If the conditional is not true by the configured
+ I(retries), the task fails. See examples.
+ aliases:
+ - waitfor
+ match:
+ description:
+ - The I(match) argument is used in conjunction with the I(wait_for) argument to
+ specify the match policy. Valid values are C(all) or C(any). If the value is
+ set to C(all) then all conditionals in the wait_for must be satisfied. If the
+ value is set to C(any) then only one of the values must be satisfied.
+ default: all
+ choices:
+ - any
+ - all
+ retries:
+ description:
+ - Specifies the number of retries a command should be tried before it is considered
+ failed. The command is run on the target device every retry and evaluated against
+ the I(wait_for) conditionals.
+ default: 10
+ interval:
+ description:
+ - Configures the interval in seconds to wait between I(retries) of the command.
+ If the command does not pass the specified conditions, the interval indicates
+ how long to wait before trying the command again.
+ default: 1
+notes:
+- Tested against VyOS 1.1.8 (helium).
+- Running C(show system boot-messages all) will cause the module to hang since VyOS
+ is using a custom pager setting to display the output of that command.
+- If a command sent to the device requires answering a prompt, it is possible to pass
+ a dict containing I(command), I(answer) and I(prompt). See examples.
+- This module works with connection C(network_cli). See L(the VyOS OS Platform Options,../network/user_guide/platform_vyos.html).
+"""
+
+EXAMPLES = """
+tasks:
+ - name: show configuration on ethernet devices eth0 and eth1
+ vyos_command:
+ commands:
+ - show interfaces ethernet {{ item }}
+ with_items:
+ - eth0
+ - eth1
+
+ - name: run multiple commands and check if version output contains specific version string
+ vyos_command:
+ commands:
+ - show version
+ - show hardware cpu
+ wait_for:
+ - "result[0] contains 'VyOS 1.1.7'"
+
+ - name: run command that requires answering a prompt
+ vyos_command:
+ commands:
+ - command: 'rollback 1'
+ prompt: 'Proceed with reboot? [confirm][y]'
+ answer: y
+"""
+
+RETURN = """
+stdout:
+ description: The set of responses from the commands
+ returned: always apart from low level errors (such as action plugin)
+ type: list
+ sample: ['...', '...']
+stdout_lines:
+ description: The value of stdout split into a list
+ returned: always
+ type: list
+ sample: [['...', '...'], ['...'], ['...']]
+failed_conditions:
+ description: The list of conditionals that have failed
+ returned: failed
+ type: list
+ sample: ['...', '...']
+warnings:
+ description: The list of warnings (if any) generated by module based on arguments
+ returned: always
+ type: list
+ sample: ['...', '...']
+"""
+import time
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import (
+ Conditional,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ transform_commands,
+ to_lines,
+)
+from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.vyos import (
+ run_commands,
+)
+from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.vyos import (
+ vyos_argument_spec,
+)
+
+
+def parse_commands(module, warnings):
+ commands = transform_commands(module)
+
+ if module.check_mode:
+ for item in list(commands):
+ if not item["command"].startswith("show"):
+ warnings.append(
+ "Only show commands are supported when using check mode, not "
+ "executing %s" % item["command"]
+ )
+ commands.remove(item)
+
+ return commands
+
+
+def main():
+ spec = dict(
+ commands=dict(type="list", required=True),
+ wait_for=dict(type="list", aliases=["waitfor"]),
+ match=dict(default="all", choices=["all", "any"]),
+ retries=dict(default=10, type="int"),
+ interval=dict(default=1, type="int"),
+ )
+
+ spec.update(vyos_argument_spec)
+
+ module = AnsibleModule(argument_spec=spec, supports_check_mode=True)
+
+ warnings = list()
+ result = {"changed": False, "warnings": warnings}
+ commands = parse_commands(module, warnings)
+ wait_for = module.params["wait_for"] or list()
+
+ try:
+ conditionals = [Conditional(c) for c in wait_for]
+ except AttributeError as exc:
+ module.fail_json(msg=to_text(exc))
+
+ retries = module.params["retries"]
+ interval = module.params["interval"]
+ match = module.params["match"]
+
+ for _ in range(retries):
+ responses = run_commands(module, commands)
+
+ for item in list(conditionals):
+ if item(responses):
+ if match == "any":
+ conditionals = list()
+ break
+ conditionals.remove(item)
+
+ if not conditionals:
+ break
+
+ time.sleep(interval)
+
+ if conditionals:
+ failed_conditions = [item.raw for item in conditionals]
+ msg = "One or more conditional statements have not been satisfied"
+ module.fail_json(msg=msg, failed_conditions=failed_conditions)
+
+ result.update(
+ {"stdout": responses, "stdout_lines": list(to_lines(responses)),}
+ )
+
+ module.exit_json(**result)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_config.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_config.py
new file mode 100644
index 00000000..b899045a
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_config.py
@@ -0,0 +1,354 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "network",
+}
+
+
+DOCUMENTATION = """module: vyos_config
+author: Nathaniel Case (@Qalthos)
+short_description: Manage VyOS configuration on remote device
+description:
+- This module provides configuration file management of VyOS devices. It provides
+ arguments for managing both the configuration file and state of the active configuration.
+ All configuration statements are based on `set` and `delete` commands in the device
+ configuration.
+extends_documentation_fragment:
+- vyos.vyos.vyos
+notes:
+- Tested against VyOS 1.1.8 (helium).
+- This module works with connection C(network_cli). See L(the VyOS OS Platform Options,../network/user_guide/platform_vyos.html).
+options:
+ lines:
+ description:
+ - The ordered set of configuration lines to be managed and compared with the existing
+ configuration on the remote device.
+ src:
+ description:
+ - The C(src) argument specifies the path to the source config file to load. The
+ source config file can either be in bracket format or set format. The source
+ file can include Jinja2 template variables.
+ match:
+ description:
+ - The C(match) argument controls the method used to match against the current
+ active configuration. By default, the desired config is matched against the
+ active config and the deltas are loaded. If the C(match) argument is set to
+ C(none) the active configuration is ignored and the configuration is always
+ loaded.
+ default: line
+ choices:
+ - line
+ - none
+ backup:
+ description:
+ - The C(backup) argument will backup the current devices active configuration
+ to the Ansible control host prior to making any changes. If the C(backup_options)
+ value is not given, the backup file will be located in the backup folder in
+ the playbook root directory or role root directory, if playbook is part of an
+ ansible role. If the directory does not exist, it is created.
+ type: bool
+ default: 'no'
+ comment:
+ description:
+ - Allows a commit description to be specified to be included when the configuration
+ is committed. If the configuration is not changed or committed, this argument
+ is ignored.
+ default: configured by vyos_config
+ config:
+ description:
+ - The C(config) argument specifies the base configuration to use to compare against
+ the desired configuration. If this value is not specified, the module will
+ automatically retrieve the current active configuration from the remote device.
+ save:
+ description:
+ - The C(save) argument controls whether or not changes made to the active configuration
+ are saved to disk. This is independent of committing the config. When set
+ to True, the active configuration is saved.
+ type: bool
+ default: 'no'
+ backup_options:
+ description:
+ - This is a dict object containing configurable options related to backup file
+ path. The value of this option is read only when C(backup) is set to I(yes),
+ if C(backup) is set to I(no) this option will be silently ignored.
+ suboptions:
+ filename:
+ description:
+ - The filename to be used to store the backup configuration. If the filename
+ is not given it will be generated based on the hostname, current time and
+ date in format defined by <hostname>_config.<current-date>@<current-time>
+ dir_path:
+ description:
+ - This option provides the path ending with directory name in which the backup
+ configuration file will be stored. If the directory does not exist it will
+ be first created and the filename is either the value of C(filename) or
+ default filename as described in C(filename) options description. If the
+ path value is not given in that case a I(backup) directory will be created
+ in the current working directory and backup configuration will be copied
+ in C(filename) within I(backup) directory.
+ type: path
+ type: dict
+"""
+
+EXAMPLES = """
+- name: configure the remote device
+ vyos_config:
+ lines:
+ - set system host-name {{ inventory_hostname }}
+ - set service lldp
+ - delete service dhcp-server
+
+- name: backup and load from file
+ vyos_config:
+ src: vyos.cfg
+ backup: yes
+
+- name: render a Jinja2 template onto the VyOS router
+ vyos_config:
+ src: vyos_template.j2
+
+- name: for idempotency, use full-form commands
+ vyos_config:
+ lines:
+ # - set int eth eth2 description 'OUTSIDE'
+ - set interface ethernet eth2 description 'OUTSIDE'
+
+- name: configurable backup path
+ vyos_config:
+ backup: yes
+ backup_options:
+ filename: backup.cfg
+ dir_path: /home/user
+"""
+
+RETURN = """
+commands:
+ description: The list of configuration commands sent to the device
+ returned: always
+ type: list
+ sample: ['...', '...']
+filtered:
+ description: The list of configuration commands removed to avoid a load failure
+ returned: always
+ type: list
+ sample: ['...', '...']
+backup_path:
+ description: The full path to the backup file
+ returned: when backup is yes
+ type: str
+ sample: /playbooks/ansible/backup/vyos_config.2016-07-16@22:28:34
+filename:
+ description: The name of the backup file
+ returned: when backup is yes and filename is not specified in backup options
+ type: str
+ sample: vyos_config.2016-07-16@22:28:34
+shortname:
+ description: The full path to the backup file excluding the timestamp
+ returned: when backup is yes and filename is not specified in backup options
+ type: str
+ sample: /playbooks/ansible/backup/vyos_config
+date:
+ description: The date extracted from the backup file name
+ returned: when backup is yes
+ type: str
+ sample: "2016-07-16"
+time:
+ description: The time extracted from the backup file name
+ returned: when backup is yes
+ type: str
+ sample: "22:28:34"
+"""
+import re
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.connection import ConnectionError
+from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.vyos import (
+ load_config,
+ get_config,
+ run_commands,
+)
+from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.vyos import (
+ vyos_argument_spec,
+ get_connection,
+)
+
+
+DEFAULT_COMMENT = "configured by vyos_config"
+
+CONFIG_FILTERS = [
+ re.compile(r"set system login user \S+ authentication encrypted-password")
+]
+
+
+def get_candidate(module):
+ contents = module.params["src"] or module.params["lines"]
+
+ if module.params["src"]:
+ contents = format_commands(contents.splitlines())
+
+ contents = "\n".join(contents)
+ return contents
+
+
+def format_commands(commands):
+ """
+ This function format the input commands and removes the prepend white spaces
+ for command lines having 'set' or 'delete' and it skips empty lines.
+ :param commands:
+ :return: list of commands
+ """
+ return [
+ line.strip() if line.split()[0] in ("set", "delete") else line
+ for line in commands
+ if len(line.strip()) > 0
+ ]
+
+
+def diff_config(commands, config):
+ config = [str(c).replace("'", "") for c in config.splitlines()]
+
+ updates = list()
+ visited = set()
+
+ for line in commands:
+ item = str(line).replace("'", "")
+
+ if not item.startswith("set") and not item.startswith("delete"):
+ raise ValueError("line must start with either `set` or `delete`")
+
+ elif item.startswith("set") and item not in config:
+ updates.append(line)
+
+ elif item.startswith("delete"):
+ if not config:
+ updates.append(line)
+ else:
+ item = re.sub(r"delete", "set", item)
+ for entry in config:
+ if entry.startswith(item) and line not in visited:
+ updates.append(line)
+ visited.add(line)
+
+ return list(updates)
+
+
+def sanitize_config(config, result):
+ result["filtered"] = list()
+ index_to_filter = list()
+ for regex in CONFIG_FILTERS:
+ for index, line in enumerate(list(config)):
+ if regex.search(line):
+ result["filtered"].append(line)
+ index_to_filter.append(index)
+ # Delete all filtered configs
+ for filter_index in sorted(index_to_filter, reverse=True):
+ del config[filter_index]
+
+
+def run(module, result):
+ # get the current active config from the node or passed in via
+ # the config param
+ config = module.params["config"] or get_config(module)
+
+ # create the candidate config object from the arguments
+ candidate = get_candidate(module)
+
+ # create loadable config that includes only the configuration updates
+ connection = get_connection(module)
+ try:
+ response = connection.get_diff(
+ candidate=candidate,
+ running=config,
+ diff_match=module.params["match"],
+ )
+ except ConnectionError as exc:
+ module.fail_json(msg=to_text(exc, errors="surrogate_then_replace"))
+
+ commands = response.get("config_diff")
+ sanitize_config(commands, result)
+
+ result["commands"] = commands
+
+ commit = not module.check_mode
+ comment = module.params["comment"]
+
+ diff = None
+ if commands:
+ diff = load_config(module, commands, commit=commit, comment=comment)
+
+ if result.get("filtered"):
+ result["warnings"].append(
+ "Some configuration commands were "
+ "removed, please see the filtered key"
+ )
+
+ result["changed"] = True
+
+ if module._diff:
+ result["diff"] = {"prepared": diff}
+
+
+def main():
+ backup_spec = dict(filename=dict(), dir_path=dict(type="path"))
+ argument_spec = dict(
+ src=dict(type="path"),
+ lines=dict(type="list"),
+ match=dict(default="line", choices=["line", "none"]),
+ comment=dict(default=DEFAULT_COMMENT),
+ config=dict(),
+ backup=dict(type="bool", default=False),
+ backup_options=dict(type="dict", options=backup_spec),
+ save=dict(type="bool", default=False),
+ )
+
+ argument_spec.update(vyos_argument_spec)
+
+ mutually_exclusive = [("lines", "src")]
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ )
+
+ warnings = list()
+
+ result = dict(changed=False, warnings=warnings)
+
+ if module.params["backup"]:
+ result["__backup__"] = get_config(module=module)
+
+ if any((module.params["src"], module.params["lines"])):
+ run(module, result)
+
+ if module.params["save"]:
+ diff = run_commands(module, commands=["configure", "compare saved"])[1]
+ if diff != "[edit]":
+ run_commands(module, commands=["save"])
+ result["changed"] = True
+ run_commands(module, commands=["exit"])
+
+ module.exit_json(**result)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_facts.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_facts.py
new file mode 100644
index 00000000..19fb727f
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_facts.py
@@ -0,0 +1,174 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The module file for vyos_facts
+"""
+
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": [u"preview"],
+ "supported_by": "network",
+}
+
+
+DOCUMENTATION = """module: vyos_facts
+short_description: Get facts about vyos devices.
+description:
+- Collects facts from network devices running the vyos operating system. This module
+ places the facts gathered in the fact tree keyed by the respective resource name. The
+ facts module will always collect a base set of facts from the device and can enable
+ or disable collection of additional facts.
+author:
+- Nathaniel Case (@qalthos)
+- Nilashish Chakraborty (@Nilashishc)
+- Rohit Thakur (@rohitthakur2590)
+extends_documentation_fragment:
+- vyos.vyos.vyos
+notes:
+- Tested against VyOS 1.1.8 (helium).
+- This module works with connection C(network_cli). See L(the VyOS OS Platform Options,../network/user_guide/platform_vyos.html).
+options:
+ gather_subset:
+ description:
+ - When supplied, this argument will restrict the facts collected to a given subset. Possible
+ values for this argument include all, default, config, and neighbors. Can specify
+ a list of values to include a larger subset. Values can also be used with an
+ initial C(M(!)) to specify that a specific subset should not be collected.
+ required: false
+ default: '!config'
+ gather_network_resources:
+ description:
+ - When supplied, this argument will restrict the facts collected to a given subset.
+ Possible values for this argument include all and the resources like interfaces.
+ Can specify a list of values to include a larger subset. Values can also be
+ used with an initial C(M(!)) to specify that a specific subset should not be
+ collected. Valid subsets are 'all', 'interfaces', 'l3_interfaces', 'lag_interfaces',
+ 'lldp_global', 'lldp_interfaces', 'static_routes', 'firewall_rules'.
+ required: false
+"""
+
+EXAMPLES = """
+# Gather all facts
+- vyos_facts:
+ gather_subset: all
+ gather_network_resources: all
+
+# collect only the config and default facts
+- vyos_facts:
+ gather_subset: config
+
+# collect everything exception the config
+- vyos_facts:
+ gather_subset: "!config"
+
+# Collect only the interfaces facts
+- vyos_facts:
+ gather_subset:
+ - '!all'
+ - '!min'
+ gather_network_resources:
+ - interfaces
+
+# Do not collect interfaces facts
+- vyos_facts:
+ gather_network_resources:
+ - "!interfaces"
+
+# Collect interfaces and minimal default facts
+- vyos_facts:
+ gather_subset: min
+ gather_network_resources: interfaces
+"""
+
+RETURN = """
+ansible_net_config:
+ description: The running-config from the device
+ returned: when config is configured
+ type: str
+ansible_net_commits:
+ description: The set of available configuration revisions
+ returned: when present
+ type: list
+ansible_net_hostname:
+ description: The configured system hostname
+ returned: always
+ type: str
+ansible_net_model:
+ description: The device model string
+ returned: always
+ type: str
+ansible_net_serialnum:
+ description: The serial number of the device
+ returned: always
+ type: str
+ansible_net_version:
+ description: The version of the software running
+ returned: always
+ type: str
+ansible_net_neighbors:
+ description: The set of LLDP neighbors
+ returned: when interface is configured
+ type: list
+ansible_net_gather_subset:
+ description: The list of subsets gathered by the module
+ returned: always
+ type: list
+ansible_net_api:
+ description: The name of the transport
+ returned: always
+ type: str
+ansible_net_python_version:
+ description: The Python version Ansible controller is using
+ returned: always
+ type: str
+ansible_net_gather_network_resources:
+ description: The list of fact resource subsets collected from the device
+ returned: always
+ type: list
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.argspec.facts.facts import (
+ FactsArgs,
+)
+from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.facts.facts import (
+ Facts,
+)
+from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.vyos import (
+ vyos_argument_spec,
+)
+
+
+def main():
+ """
+ Main entry point for module execution
+
+ :returns: ansible_facts
+ """
+ argument_spec = FactsArgs.argument_spec
+ argument_spec.update(vyos_argument_spec)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec, supports_check_mode=True
+ )
+
+ warnings = []
+ if module.params["gather_subset"] == "!config":
+ warnings.append(
+ "default value for `gather_subset` will be changed to `min` from `!config` v2.11 onwards"
+ )
+
+ result = Facts(module).get_facts()
+
+ ansible_facts, additional_warnings = result
+ warnings.extend(additional_warnings)
+
+ module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_lldp_interfaces.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_lldp_interfaces.py
new file mode 100644
index 00000000..8fe572b0
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_lldp_interfaces.py
@@ -0,0 +1,513 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright 2019 Red Hat
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The module file for vyos_lldp_interfaces
+"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "network",
+}
+
+DOCUMENTATION = """module: vyos_lldp_interfaces
+short_description: Manages attributes of lldp interfaces on VyOS devices.
+description: This module manages attributes of lldp interfaces on VyOS network devices.
+notes:
+- Tested against VyOS 1.1.8 (helium).
+- This module works with connection C(network_cli). See L(the VyOS OS Platform Options,../network/user_guide/platform_vyos.html).
+author:
+- Rohit Thakur (@rohitthakur2590)
+options:
+ config:
+ description: A list of lldp interfaces configurations.
+ type: list
+ suboptions:
+ name:
+ description:
+ - Name of the lldp interface.
+ type: str
+ required: true
+ enable:
+ description:
+ - to disable lldp on the interface.
+ type: bool
+ default: true
+ location:
+ description:
+ - LLDP-MED location data.
+ type: dict
+ suboptions:
+ civic_based:
+ description:
+ - Civic-based location data.
+ type: dict
+ suboptions:
+ ca_info:
+ description: LLDP-MED address info
+ type: list
+ suboptions:
+ ca_type:
+ description: LLDP-MED Civic Address type.
+ type: int
+ required: true
+ ca_value:
+ description: LLDP-MED Civic Address value.
+ type: str
+ required: true
+ country_code:
+ description: Country Code
+ type: str
+ required: true
+ coordinate_based:
+ description:
+ - Coordinate-based location.
+ type: dict
+ suboptions:
+ altitude:
+ description: Altitude in meters.
+ type: int
+ datum:
+ description: Coordinate datum type.
+ type: str
+ choices:
+ - WGS84
+ - NAD83
+ - MLLW
+ latitude:
+ description: Latitude.
+ type: str
+ required: true
+ longitude:
+ description: Longitude.
+ type: str
+ required: true
+ elin:
+ description: Emergency Call Service ELIN number (between 10-25 numbers).
+ type: str
+ state:
+ description:
+ - The state of the configuration after module completion.
+ type: str
+ choices:
+ - merged
+ - replaced
+ - overridden
+ - deleted
+ default: merged
+"""
+EXAMPLES = """
+# Using merged
+#
+# Before state:
+# -------------
+#
+# vyos@vyos:~$ show configuration commands | grep lldp
+#
+- name: Merge provided configuration with device configuration
+ vyos_lldp_interfaces:
+ config:
+ - name: 'eth1'
+ location:
+ civic_based:
+ country_code: 'US'
+ ca_info:
+ - ca_type: 0
+ ca_value: 'ENGLISH'
+
+ - name: 'eth2'
+ location:
+ coordinate_based:
+ altitude: 2200
+ datum: 'WGS84'
+ longitude: '222.267255W'
+ latitude: '33.524449N'
+ state: merged
+#
+#
+# -------------------------
+# Module Execution Result
+# -------------------------
+#
+# before": []
+#
+# "commands": [
+# "set service lldp interface eth1 location civic-based country-code 'US'",
+# "set service lldp interface eth1 location civic-based ca-type 0 ca-value 'ENGLISH'",
+# "set service lldp interface eth1",
+# "set service lldp interface eth2 location coordinate-based latitude '33.524449N'",
+# "set service lldp interface eth2 location coordinate-based altitude '2200'",
+# "set service lldp interface eth2 location coordinate-based datum 'WGS84'",
+# "set service lldp interface eth2 location coordinate-based longitude '222.267255W'",
+# "set service lldp interface eth2 location coordinate-based latitude '33.524449N'",
+# "set service lldp interface eth2 location coordinate-based altitude '2200'",
+# "set service lldp interface eth2 location coordinate-based datum 'WGS84'",
+# "set service lldp interface eth2 location coordinate-based longitude '222.267255W'",
+# "set service lldp interface eth2"
+#
+# "after": [
+# {
+# "location": {
+# "coordinate_based": {
+# "altitude": 2200,
+# "datum": "WGS84",
+# "latitude": "33.524449N",
+# "longitude": "222.267255W"
+# }
+# },
+# "name": "eth2"
+# },
+# {
+# "location": {
+# "civic_based": {
+# "ca_info": [
+# {
+# "ca_type": 0,
+# "ca_value": "ENGLISH"
+# }
+# ],
+# "country_code": "US"
+# }
+# },
+# "name": "eth1"
+# }
+# ],
+#
+# After state:
+# -------------
+#
+# vyos@vyos:~$ show configuration commands | grep lldp
+# set service lldp interface eth1 location civic-based ca-type 0 ca-value 'ENGLISH'
+# set service lldp interface eth1 location civic-based country-code 'US'
+# set service lldp interface eth2 location coordinate-based altitude '2200'
+# set service lldp interface eth2 location coordinate-based datum 'WGS84'
+# set service lldp interface eth2 location coordinate-based latitude '33.524449N'
+# set service lldp interface eth2 location coordinate-based longitude '222.267255W'
+
+
+# Using replaced
+#
+# Before state:
+# -------------
+#
+# vyos@vyos:~$ show configuration commands | grep lldp
+# set service lldp interface eth1 location civic-based ca-type 0 ca-value 'ENGLISH'
+# set service lldp interface eth1 location civic-based country-code 'US'
+# set service lldp interface eth2 location coordinate-based altitude '2200'
+# set service lldp interface eth2 location coordinate-based datum 'WGS84'
+# set service lldp interface eth2 location coordinate-based latitude '33.524449N'
+# set service lldp interface eth2 location coordinate-based longitude '222.267255W'
+#
+- name: Replace device configurations of listed LLDP interfaces with provided configurations
+ vyos_lldp_interfaces:
+ config:
+ - name: 'eth2'
+ location:
+ civic_based:
+ country_code: 'US'
+ ca_info:
+ - ca_type: 0
+ ca_value: 'ENGLISH'
+
+ - name: 'eth1'
+ location:
+ coordinate_based:
+ altitude: 2200
+ datum: 'WGS84'
+ longitude: '222.267255W'
+ latitude: '33.524449N'
+ state: replaced
+#
+#
+# -------------------------
+# Module Execution Result
+# -------------------------
+#
+# "before": [
+# {
+# "location": {
+# "coordinate_based": {
+# "altitude": 2200,
+# "datum": "WGS84",
+# "latitude": "33.524449N",
+# "longitude": "222.267255W"
+# }
+# },
+# "name": "eth2"
+# },
+# {
+# "location": {
+# "civic_based": {
+# "ca_info": [
+# {
+# "ca_type": 0,
+# "ca_value": "ENGLISH"
+# }
+# ],
+# "country_code": "US"
+# }
+# },
+# "name": "eth1"
+# }
+# ]
+#
+# "commands": [
+# "delete service lldp interface eth2 location",
+# "set service lldp interface eth2 'disable'",
+# "set service lldp interface eth2 location civic-based country-code 'US'",
+# "set service lldp interface eth2 location civic-based ca-type 0 ca-value 'ENGLISH'",
+# "delete service lldp interface eth1 location",
+# "set service lldp interface eth1 'disable'",
+# "set service lldp interface eth1 location coordinate-based latitude '33.524449N'",
+# "set service lldp interface eth1 location coordinate-based altitude '2200'",
+# "set service lldp interface eth1 location coordinate-based datum 'WGS84'",
+# "set service lldp interface eth1 location coordinate-based longitude '222.267255W'"
+# ]
+#
+# "after": [
+# {
+# "location": {
+# "civic_based": {
+# "ca_info": [
+# {
+# "ca_type": 0,
+# "ca_value": "ENGLISH"
+# }
+# ],
+# "country_code": "US"
+# }
+# },
+# "name": "eth2"
+# },
+# {
+# "location": {
+# "coordinate_based": {
+# "altitude": 2200,
+# "datum": "WGS84",
+# "latitude": "33.524449N",
+# "longitude": "222.267255W"
+# }
+# },
+# "name": "eth1"
+# }
+# ]
+#
+# After state:
+# -------------
+#
+# vyos@vyos:~$ show configuration commands | grep lldp
+# set service lldp interface eth1 'disable'
+# set service lldp interface eth1 location coordinate-based altitude '2200'
+# set service lldp interface eth1 location coordinate-based datum 'WGS84'
+# set service lldp interface eth1 location coordinate-based latitude '33.524449N'
+# set service lldp interface eth1 location coordinate-based longitude '222.267255W'
+# set service lldp interface eth2 'disable'
+# set service lldp interface eth2 location civic-based ca-type 0 ca-value 'ENGLISH'
+# set service lldp interface eth2 location civic-based country-code 'US'
+
+
+# Using overridden
+#
+# Before state
+# --------------
+#
+# vyos@vyos:~$ show configuration commands | grep lldp
+# set service lldp interface eth1 'disable'
+# set service lldp interface eth1 location coordinate-based altitude '2200'
+# set service lldp interface eth1 location coordinate-based datum 'WGS84'
+# set service lldp interface eth1 location coordinate-based latitude '33.524449N'
+# set service lldp interface eth1 location coordinate-based longitude '222.267255W'
+# set service lldp interface eth2 'disable'
+# set service lldp interface eth2 location civic-based ca-type 0 ca-value 'ENGLISH'
+# set service lldp interface eth2 location civic-based country-code 'US'
+#
+- name: Overrides all device configuration with provided configuration
+ vyos_lag_interfaces:
+ config:
+ - name: 'eth2'
+ location:
+ elin: 0000000911
+
+ state: overridden
+#
+#
+# -------------------------
+# Module Execution Result
+# -------------------------
+#
+# "before": [
+# {
+# "enable": false,
+# "location": {
+# "civic_based": {
+# "ca_info": [
+# {
+# "ca_type": 0,
+# "ca_value": "ENGLISH"
+# }
+# ],
+# "country_code": "US"
+# }
+# },
+# "name": "eth2"
+# },
+# {
+# "enable": false,
+# "location": {
+# "coordinate_based": {
+# "altitude": 2200,
+# "datum": "WGS84",
+# "latitude": "33.524449N",
+# "longitude": "222.267255W"
+# }
+# },
+# "name": "eth1"
+# }
+# ]
+#
+# "commands": [
+# "delete service lldp interface eth2 location",
+# "delete service lldp interface eth2 disable",
+# "set service lldp interface eth2 location elin 0000000911"
+#
+#
+# "after": [
+# {
+# "location": {
+# "elin": 0000000911
+# },
+# "name": "eth2"
+# }
+# ]
+#
+#
+# After state
+# ------------
+#
+# vyos@vyos# run show configuration commands | grep lldp
+# set service lldp interface eth2 location elin '0000000911'
+
+
+# Using deleted
+#
+# Before state
+# -------------
+#
+# vyos@vyos# run show configuration commands | grep lldp
+# set service lldp interface eth2 location elin '0000000911'
+#
+- name: Delete lldp interface attributes of given interfaces.
+ vyos_lag_interfaces:
+ config:
+ - name: 'eth2'
+ state: deleted
+#
+#
+# ------------------------
+# Module Execution Results
+# ------------------------
+#
+ "before": [
+ {
+ "location": {
+ "elin": 0000000911
+ },
+ "name": "eth2"
+ }
+ ]
+# "commands": [
+# "commands": [
+# "delete service lldp interface eth2"
+# ]
+#
+# "after": []
+# After state
+# ------------
+# vyos@vyos# run show configuration commands | grep lldp
+# set service 'lldp'
+
+
+"""
+RETURN = """
+before:
+ description: The configuration as structured data prior to module invocation.
+ returned: always
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+after:
+ description: The configuration as structured data after module completion.
+ returned: when changed
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+commands:
+ description: The set of commands pushed to the remote device.
+ returned: always
+ type: list
+ sample:
+ - "set service lldp interface eth2 'disable'"
+ - "delete service lldp interface eth1 location"
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.argspec.lldp_interfaces.lldp_interfaces import (
+ Lldp_interfacesArgs,
+)
+from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.config.lldp_interfaces.lldp_interfaces import (
+ Lldp_interfaces,
+)
+
+
+def main():
+ """
+ Main entry point for module execution
+
+ :returns: the result form module invocation
+ """
+ required_if = [
+ ("state", "merged", ("config",)),
+ ("state", "replaced", ("config",)),
+ ("state", "overridden", ("config",)),
+ ]
+ module = AnsibleModule(
+ argument_spec=Lldp_interfacesArgs.argument_spec,
+ required_if=required_if,
+ supports_check_mode=True,
+ )
+
+ result = Lldp_interfaces(module).execute_module()
+ module.exit_json(**result)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/terminal/vyos.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/terminal/vyos.py
new file mode 100644
index 00000000..fe7712f6
--- /dev/null
+++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/terminal/vyos.py
@@ -0,0 +1,53 @@
+#
+# (c) 2016 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import os
+import re
+
+from ansible.plugins.terminal import TerminalBase
+from ansible.errors import AnsibleConnectionFailure
+
+
+class TerminalModule(TerminalBase):
+
+ terminal_stdout_re = [
+ re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
+ re.compile(br"\@[\w\-\.]+:\S+?[>#\$] ?$"),
+ ]
+
+ terminal_stderr_re = [
+ re.compile(br"\n\s*Invalid command:"),
+ re.compile(br"\nCommit failed"),
+ re.compile(br"\n\s+Set failed"),
+ ]
+
+ terminal_length = os.getenv("ANSIBLE_VYOS_TERMINAL_LENGTH", 10000)
+
+ def on_open_shell(self):
+ try:
+ for cmd in (b"set terminal length 0", b"set terminal width 512"):
+ self._exec_cli_command(cmd)
+ self._exec_cli_command(
+ b"set terminal length %d" % self.terminal_length
+ )
+ except AnsibleConnectionFailure:
+ raise AnsibleConnectionFailure("unable to set terminal parameters")
diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/action/win_copy.py b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/action/win_copy.py
new file mode 120000
index 00000000..0364d766
--- /dev/null
+++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/action/win_copy.py
@@ -0,0 +1 @@
+../../../../../../plugins/action/win_copy.py \ No newline at end of file
diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/async_status.ps1 b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/async_status.ps1
new file mode 120000
index 00000000..6fc438d6
--- /dev/null
+++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/async_status.ps1
@@ -0,0 +1 @@
+../../../../../../plugins/modules/async_status.ps1 \ No newline at end of file
diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_acl.ps1 b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_acl.ps1
new file mode 120000
index 00000000..81d8afa3
--- /dev/null
+++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_acl.ps1
@@ -0,0 +1 @@
+../../../../../../plugins/modules/win_acl.ps1 \ No newline at end of file
diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_acl.py b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_acl.py
new file mode 120000
index 00000000..3a2434cf
--- /dev/null
+++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_acl.py
@@ -0,0 +1 @@
+../../../../../../plugins/modules/win_acl.py \ No newline at end of file
diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_copy.ps1 b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_copy.ps1
new file mode 120000
index 00000000..a34fb012
--- /dev/null
+++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_copy.ps1
@@ -0,0 +1 @@
+../../../../../../plugins/modules/win_copy.ps1 \ No newline at end of file
diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_copy.py b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_copy.py
new file mode 120000
index 00000000..2d2c69a2
--- /dev/null
+++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_copy.py
@@ -0,0 +1 @@
+../../../../../../plugins/modules/win_copy.py \ No newline at end of file
diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_file.ps1 b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_file.ps1
new file mode 120000
index 00000000..8ee5c2b5
--- /dev/null
+++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_file.ps1
@@ -0,0 +1 @@
+../../../../../../plugins/modules/win_file.ps1 \ No newline at end of file
diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_file.py b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_file.py
new file mode 120000
index 00000000..b4bc0583
--- /dev/null
+++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_file.py
@@ -0,0 +1 @@
+../../../../../../plugins/modules/win_file.py \ No newline at end of file
diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_ping.ps1 b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_ping.ps1
new file mode 120000
index 00000000..d7b25ed0
--- /dev/null
+++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_ping.ps1
@@ -0,0 +1 @@
+../../../../../../plugins/modules/win_ping.ps1 \ No newline at end of file
diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_ping.py b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_ping.py
new file mode 120000
index 00000000..0b97c87b
--- /dev/null
+++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_ping.py
@@ -0,0 +1 @@
+../../../../../../plugins/modules/win_ping.py \ No newline at end of file
diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_shell.ps1 b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_shell.ps1
new file mode 120000
index 00000000..eb07a017
--- /dev/null
+++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_shell.ps1
@@ -0,0 +1 @@
+../../../../../../plugins/modules/win_shell.ps1 \ No newline at end of file
diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_shell.py b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_shell.py
new file mode 120000
index 00000000..3c6f0749
--- /dev/null
+++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_shell.py
@@ -0,0 +1 @@
+../../../../../../plugins/modules/win_shell.py \ No newline at end of file
diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_stat.ps1 b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_stat.ps1
new file mode 120000
index 00000000..62a7a40a
--- /dev/null
+++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_stat.ps1
@@ -0,0 +1 @@
+../../../../../../plugins/modules/win_stat.ps1 \ No newline at end of file
diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_stat.py b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_stat.py
new file mode 120000
index 00000000..1db4c95e
--- /dev/null
+++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_stat.py
@@ -0,0 +1 @@
+../../../../../../plugins/modules/win_stat.py \ No newline at end of file
diff --git a/test/support/windows-integration/plugins/action/win_copy.py b/test/support/windows-integration/plugins/action/win_copy.py
new file mode 100644
index 00000000..adb918be
--- /dev/null
+++ b/test/support/windows-integration/plugins/action/win_copy.py
@@ -0,0 +1,522 @@
+# This file is part of Ansible
+
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import base64
+import json
+import os
+import os.path
+import shutil
+import tempfile
+import traceback
+import zipfile
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleFileNotFound
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.plugins.action import ActionBase
+from ansible.utils.hashing import checksum
+
+
+def _walk_dirs(topdir, loader, decrypt=True, base_path=None, local_follow=False, trailing_slash_detector=None, checksum_check=False):
+ """
+ Walk a filesystem tree returning enough information to copy the files.
+ This is similar to the _walk_dirs function in ``copy.py`` but returns
+ a dict instead of a tuple for each entry and includes the checksum of
+ a local file if wanted.
+
+ :arg topdir: The directory that the filesystem tree is rooted at
+ :arg loader: The self._loader object from ActionBase
+ :kwarg decrypt: Whether to decrypt a file encrypted with ansible-vault
+ :kwarg base_path: The initial directory structure to strip off of the
+ files for the destination directory. If this is None (the default),
+ the base_path is set to ``top_dir``.
+ :kwarg local_follow: Whether to follow symlinks on the source. When set
+ to False, no symlinks are dereferenced. When set to True (the
+ default), the code will dereference most symlinks. However, symlinks
+ can still be present if needed to break a circular link.
+ :kwarg trailing_slash_detector: Function to determine if a path has
+ a trailing directory separator. Only needed when dealing with paths on
+ a remote machine (in which case, pass in a function that is aware of the
+ directory separator conventions on the remote machine).
+ :kawrg whether to get the checksum of the local file and add to the dict
+ :returns: dictionary of dictionaries. All of the path elements in the structure are text string.
+ This separates all the files, directories, and symlinks along with
+ import information about each::
+
+ {
+ 'files'; [{
+ src: '/absolute/path/to/copy/from',
+ dest: 'relative/path/to/copy/to',
+ checksum: 'b54ba7f5621240d403f06815f7246006ef8c7d43'
+ }, ...],
+ 'directories'; [{
+ src: '/absolute/path/to/copy/from',
+ dest: 'relative/path/to/copy/to'
+ }, ...],
+ 'symlinks'; [{
+ src: '/symlink/target/path',
+ dest: 'relative/path/to/copy/to'
+ }, ...],
+
+ }
+
+ The ``symlinks`` field is only populated if ``local_follow`` is set to False
+ *or* a circular symlink cannot be dereferenced. The ``checksum`` entry is set
+ to None if checksum_check=False.
+
+ """
+ # Convert the path segments into byte strings
+
+ r_files = {'files': [], 'directories': [], 'symlinks': []}
+
+ def _recurse(topdir, rel_offset, parent_dirs, rel_base=u'', checksum_check=False):
+ """
+ This is a closure (function utilizing variables from it's parent
+ function's scope) so that we only need one copy of all the containers.
+ Note that this function uses side effects (See the Variables used from
+ outer scope).
+
+ :arg topdir: The directory we are walking for files
+ :arg rel_offset: Integer defining how many characters to strip off of
+ the beginning of a path
+ :arg parent_dirs: Directories that we're copying that this directory is in.
+ :kwarg rel_base: String to prepend to the path after ``rel_offset`` is
+ applied to form the relative path.
+
+ Variables used from the outer scope
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :r_files: Dictionary of files in the hierarchy. See the return value
+ for :func:`walk` for the structure of this dictionary.
+ :local_follow: Read-only inside of :func:`_recurse`. Whether to follow symlinks
+ """
+ for base_path, sub_folders, files in os.walk(topdir):
+ for filename in files:
+ filepath = os.path.join(base_path, filename)
+ dest_filepath = os.path.join(rel_base, filepath[rel_offset:])
+
+ if os.path.islink(filepath):
+ # Dereference the symlnk
+ real_file = loader.get_real_file(os.path.realpath(filepath), decrypt=decrypt)
+ if local_follow and os.path.isfile(real_file):
+ # Add the file pointed to by the symlink
+ r_files['files'].append(
+ {
+ "src": real_file,
+ "dest": dest_filepath,
+ "checksum": _get_local_checksum(checksum_check, real_file)
+ }
+ )
+ else:
+ # Mark this file as a symlink to copy
+ r_files['symlinks'].append({"src": os.readlink(filepath), "dest": dest_filepath})
+ else:
+ # Just a normal file
+ real_file = loader.get_real_file(filepath, decrypt=decrypt)
+ r_files['files'].append(
+ {
+ "src": real_file,
+ "dest": dest_filepath,
+ "checksum": _get_local_checksum(checksum_check, real_file)
+ }
+ )
+
+ for dirname in sub_folders:
+ dirpath = os.path.join(base_path, dirname)
+ dest_dirpath = os.path.join(rel_base, dirpath[rel_offset:])
+ real_dir = os.path.realpath(dirpath)
+ dir_stats = os.stat(real_dir)
+
+ if os.path.islink(dirpath):
+ if local_follow:
+ if (dir_stats.st_dev, dir_stats.st_ino) in parent_dirs:
+ # Just insert the symlink if the target directory
+ # exists inside of the copy already
+ r_files['symlinks'].append({"src": os.readlink(dirpath), "dest": dest_dirpath})
+ else:
+ # Walk the dirpath to find all parent directories.
+ new_parents = set()
+ parent_dir_list = os.path.dirname(dirpath).split(os.path.sep)
+ for parent in range(len(parent_dir_list), 0, -1):
+ parent_stat = os.stat(u'/'.join(parent_dir_list[:parent]))
+ if (parent_stat.st_dev, parent_stat.st_ino) in parent_dirs:
+ # Reached the point at which the directory
+ # tree is already known. Don't add any
+ # more or we might go to an ancestor that
+ # isn't being copied.
+ break
+ new_parents.add((parent_stat.st_dev, parent_stat.st_ino))
+
+ if (dir_stats.st_dev, dir_stats.st_ino) in new_parents:
+ # This was a a circular symlink. So add it as
+ # a symlink
+ r_files['symlinks'].append({"src": os.readlink(dirpath), "dest": dest_dirpath})
+ else:
+ # Walk the directory pointed to by the symlink
+ r_files['directories'].append({"src": real_dir, "dest": dest_dirpath})
+ offset = len(real_dir) + 1
+ _recurse(real_dir, offset, parent_dirs.union(new_parents),
+ rel_base=dest_dirpath,
+ checksum_check=checksum_check)
+ else:
+ # Add the symlink to the destination
+ r_files['symlinks'].append({"src": os.readlink(dirpath), "dest": dest_dirpath})
+ else:
+ # Just a normal directory
+ r_files['directories'].append({"src": dirpath, "dest": dest_dirpath})
+
+ # Check if the source ends with a "/" so that we know which directory
+ # level to work at (similar to rsync)
+ source_trailing_slash = False
+ if trailing_slash_detector:
+ source_trailing_slash = trailing_slash_detector(topdir)
+ else:
+ source_trailing_slash = topdir.endswith(os.path.sep)
+
+ # Calculate the offset needed to strip the base_path to make relative
+ # paths
+ if base_path is None:
+ base_path = topdir
+ if not source_trailing_slash:
+ base_path = os.path.dirname(base_path)
+ if topdir.startswith(base_path):
+ offset = len(base_path)
+
+ # Make sure we're making the new paths relative
+ if trailing_slash_detector and not trailing_slash_detector(base_path):
+ offset += 1
+ elif not base_path.endswith(os.path.sep):
+ offset += 1
+
+ if os.path.islink(topdir) and not local_follow:
+ r_files['symlinks'] = {"src": os.readlink(topdir), "dest": os.path.basename(topdir)}
+ return r_files
+
+ dir_stats = os.stat(topdir)
+ parents = frozenset(((dir_stats.st_dev, dir_stats.st_ino),))
+ # Actually walk the directory hierarchy
+ _recurse(topdir, offset, parents, checksum_check=checksum_check)
+
+ return r_files
+
+
+def _get_local_checksum(get_checksum, local_path):
+ if get_checksum:
+ return checksum(local_path)
+ else:
+ return None
+
+
+class ActionModule(ActionBase):
+
+ WIN_PATH_SEPARATOR = "\\"
+
+ def _create_content_tempfile(self, content):
+ ''' Create a tempfile containing defined content '''
+ fd, content_tempfile = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP)
+ f = os.fdopen(fd, 'wb')
+ content = to_bytes(content)
+ try:
+ f.write(content)
+ except Exception as err:
+ os.remove(content_tempfile)
+ raise Exception(err)
+ finally:
+ f.close()
+ return content_tempfile
+
+ def _create_zip_tempfile(self, files, directories):
+ tmpdir = tempfile.mkdtemp(dir=C.DEFAULT_LOCAL_TMP)
+ zip_file_path = os.path.join(tmpdir, "win_copy.zip")
+ zip_file = zipfile.ZipFile(zip_file_path, "w", zipfile.ZIP_STORED, True)
+
+ # encoding the file/dir name with base64 so Windows can unzip a unicode
+ # filename and get the right name, Windows doesn't handle unicode names
+ # very well
+ for directory in directories:
+ directory_path = to_bytes(directory['src'], errors='surrogate_or_strict')
+ archive_path = to_bytes(directory['dest'], errors='surrogate_or_strict')
+
+ encoded_path = to_text(base64.b64encode(archive_path), errors='surrogate_or_strict')
+ zip_file.write(directory_path, encoded_path, zipfile.ZIP_DEFLATED)
+
+ for file in files:
+ file_path = to_bytes(file['src'], errors='surrogate_or_strict')
+ archive_path = to_bytes(file['dest'], errors='surrogate_or_strict')
+
+ encoded_path = to_text(base64.b64encode(archive_path), errors='surrogate_or_strict')
+ zip_file.write(file_path, encoded_path, zipfile.ZIP_DEFLATED)
+
+ return zip_file_path
+
+ def _remove_tempfile_if_content_defined(self, content, content_tempfile):
+ if content is not None:
+ os.remove(content_tempfile)
+
+ def _copy_single_file(self, local_file, dest, source_rel, task_vars, tmp, backup):
+ if self._play_context.check_mode:
+ module_return = dict(changed=True)
+ return module_return
+
+ # copy the file across to the server
+ tmp_src = self._connection._shell.join_path(tmp, 'source')
+ self._transfer_file(local_file, tmp_src)
+
+ copy_args = self._task.args.copy()
+ copy_args.update(
+ dict(
+ dest=dest,
+ src=tmp_src,
+ _original_basename=source_rel,
+ _copy_mode="single",
+ backup=backup,
+ )
+ )
+ copy_args.pop('content', None)
+
+ copy_result = self._execute_module(module_name="copy",
+ module_args=copy_args,
+ task_vars=task_vars)
+
+ return copy_result
+
+ def _copy_zip_file(self, dest, files, directories, task_vars, tmp, backup):
+ # create local zip file containing all the files and directories that
+ # need to be copied to the server
+ if self._play_context.check_mode:
+ module_return = dict(changed=True)
+ return module_return
+
+ try:
+ zip_file = self._create_zip_tempfile(files, directories)
+ except Exception as e:
+ module_return = dict(
+ changed=False,
+ failed=True,
+ msg="failed to create tmp zip file: %s" % to_text(e),
+ exception=traceback.format_exc()
+ )
+ return module_return
+
+ zip_path = self._loader.get_real_file(zip_file)
+
+ # send zip file to remote, file must end in .zip so
+ # Com Shell.Application works
+ tmp_src = self._connection._shell.join_path(tmp, 'source.zip')
+ self._transfer_file(zip_path, tmp_src)
+
+ # run the explode operation of win_copy on remote
+ copy_args = self._task.args.copy()
+ copy_args.update(
+ dict(
+ src=tmp_src,
+ dest=dest,
+ _copy_mode="explode",
+ backup=backup,
+ )
+ )
+ copy_args.pop('content', None)
+ module_return = self._execute_module(module_name='copy',
+ module_args=copy_args,
+ task_vars=task_vars)
+ shutil.rmtree(os.path.dirname(zip_path))
+ return module_return
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for file transfer operations '''
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ source = self._task.args.get('src', None)
+ content = self._task.args.get('content', None)
+ dest = self._task.args.get('dest', None)
+ remote_src = boolean(self._task.args.get('remote_src', False), strict=False)
+ local_follow = boolean(self._task.args.get('local_follow', False), strict=False)
+ force = boolean(self._task.args.get('force', True), strict=False)
+ decrypt = boolean(self._task.args.get('decrypt', True), strict=False)
+ backup = boolean(self._task.args.get('backup', False), strict=False)
+
+ result['src'] = source
+ result['dest'] = dest
+
+ result['failed'] = True
+ if (source is None and content is None) or dest is None:
+ result['msg'] = "src (or content) and dest are required"
+ elif source is not None and content is not None:
+ result['msg'] = "src and content are mutually exclusive"
+ elif content is not None and dest is not None and (
+ dest.endswith(os.path.sep) or dest.endswith(self.WIN_PATH_SEPARATOR)):
+ result['msg'] = "dest must be a file if content is defined"
+ else:
+ del result['failed']
+
+ if result.get('failed'):
+ return result
+
+ # If content is defined make a temp file and write the content into it
+ content_tempfile = None
+ if content is not None:
+ try:
+ # if content comes to us as a dict it should be decoded json.
+ # We need to encode it back into a string and write it out
+ if isinstance(content, dict) or isinstance(content, list):
+ content_tempfile = self._create_content_tempfile(json.dumps(content))
+ else:
+ content_tempfile = self._create_content_tempfile(content)
+ source = content_tempfile
+ except Exception as err:
+ result['failed'] = True
+ result['msg'] = "could not write content tmp file: %s" % to_native(err)
+ return result
+ # all actions should occur on the remote server, run win_copy module
+ elif remote_src:
+ new_module_args = self._task.args.copy()
+ new_module_args.update(
+ dict(
+ _copy_mode="remote",
+ dest=dest,
+ src=source,
+ force=force,
+ backup=backup,
+ )
+ )
+ new_module_args.pop('content', None)
+ result.update(self._execute_module(module_args=new_module_args, task_vars=task_vars))
+ return result
+ # find_needle returns a path that may not have a trailing slash on a
+ # directory so we need to find that out first and append at the end
+ else:
+ trailing_slash = source.endswith(os.path.sep)
+ try:
+ # find in expected paths
+ source = self._find_needle('files', source)
+ except AnsibleError as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+ result['exception'] = traceback.format_exc()
+ return result
+
+ if trailing_slash != source.endswith(os.path.sep):
+ if source[-1] == os.path.sep:
+ source = source[:-1]
+ else:
+ source = source + os.path.sep
+
+ # A list of source file tuples (full_path, relative_path) which will try to copy to the destination
+ source_files = {'files': [], 'directories': [], 'symlinks': []}
+
+ # If source is a directory populate our list else source is a file and translate it to a tuple.
+ if os.path.isdir(to_bytes(source, errors='surrogate_or_strict')):
+ result['operation'] = 'folder_copy'
+
+ # Get a list of the files we want to replicate on the remote side
+ source_files = _walk_dirs(source, self._loader, decrypt=decrypt, local_follow=local_follow,
+ trailing_slash_detector=self._connection._shell.path_has_trailing_slash,
+ checksum_check=force)
+
+ # If it's recursive copy, destination is always a dir,
+ # explicitly mark it so (note - win_copy module relies on this).
+ if not self._connection._shell.path_has_trailing_slash(dest):
+ dest = "%s%s" % (dest, self.WIN_PATH_SEPARATOR)
+
+ check_dest = dest
+ # Source is a file, add details to source_files dict
+ else:
+ result['operation'] = 'file_copy'
+
+ # If the local file does not exist, get_real_file() raises AnsibleFileNotFound
+ try:
+ source_full = self._loader.get_real_file(source, decrypt=decrypt)
+ except AnsibleFileNotFound as e:
+ result['failed'] = True
+ result['msg'] = "could not find src=%s, %s" % (source_full, to_text(e))
+ return result
+
+ original_basename = os.path.basename(source)
+ result['original_basename'] = original_basename
+
+ # check if dest ends with / or \ and append source filename to dest
+ if self._connection._shell.path_has_trailing_slash(dest):
+ check_dest = dest
+ filename = original_basename
+ result['dest'] = self._connection._shell.join_path(dest, filename)
+ else:
+ # replace \\ with / so we can use os.path to get the filename or dirname
+ unix_path = dest.replace(self.WIN_PATH_SEPARATOR, os.path.sep)
+ filename = os.path.basename(unix_path)
+ check_dest = os.path.dirname(unix_path)
+
+ file_checksum = _get_local_checksum(force, source_full)
+ source_files['files'].append(
+ dict(
+ src=source_full,
+ dest=filename,
+ checksum=file_checksum
+ )
+ )
+ result['checksum'] = file_checksum
+ result['size'] = os.path.getsize(to_bytes(source_full, errors='surrogate_or_strict'))
+
+ # find out the files/directories/symlinks that we need to copy to the server
+ query_args = self._task.args.copy()
+ query_args.update(
+ dict(
+ _copy_mode="query",
+ dest=check_dest,
+ force=force,
+ files=source_files['files'],
+ directories=source_files['directories'],
+ symlinks=source_files['symlinks'],
+ )
+ )
+ # src is not required for query, will fail path validation is src has unix allowed chars
+ query_args.pop('src', None)
+
+ query_args.pop('content', None)
+ query_return = self._execute_module(module_args=query_args,
+ task_vars=task_vars)
+
+ if query_return.get('failed') is True:
+ result.update(query_return)
+ return result
+
+ if len(query_return['files']) > 0 or len(query_return['directories']) > 0 and self._connection._shell.tmpdir is None:
+ self._connection._shell.tmpdir = self._make_tmp_path()
+
+ if len(query_return['files']) == 1 and len(query_return['directories']) == 0:
+ # we only need to copy 1 file, don't mess around with zips
+ file_src = query_return['files'][0]['src']
+ file_dest = query_return['files'][0]['dest']
+ result.update(self._copy_single_file(file_src, dest, file_dest,
+ task_vars, self._connection._shell.tmpdir, backup))
+ if result.get('failed') is True:
+ result['msg'] = "failed to copy file %s: %s" % (file_src, result['msg'])
+ result['changed'] = True
+
+ elif len(query_return['files']) > 0 or len(query_return['directories']) > 0:
+ # either multiple files or directories need to be copied, compress
+ # to a zip and 'explode' the zip on the server
+ # TODO: handle symlinks
+ result.update(self._copy_zip_file(dest, source_files['files'],
+ source_files['directories'],
+ task_vars, self._connection._shell.tmpdir, backup))
+ result['changed'] = True
+ else:
+ # no operations need to occur
+ result['failed'] = False
+ result['changed'] = False
+
+ # remove the content tmp file and remote tmp file if it was created
+ self._remove_tempfile_if_content_defined(content, content_tempfile)
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+ return result
diff --git a/test/support/windows-integration/plugins/action/win_reboot.py b/test/support/windows-integration/plugins/action/win_reboot.py
new file mode 100644
index 00000000..c408f4f3
--- /dev/null
+++ b/test/support/windows-integration/plugins/action/win_reboot.py
@@ -0,0 +1,96 @@
+# Copyright: (c) 2018, Matt Davis <mdavis@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from datetime import datetime
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native
+from ansible.plugins.action import ActionBase
+from ansible.plugins.action.reboot import ActionModule as RebootActionModule
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class TimedOutException(Exception):
+ pass
+
+
+class ActionModule(RebootActionModule, ActionBase):
+ TRANSFERS_FILES = False
+ _VALID_ARGS = frozenset((
+ 'connect_timeout', 'connect_timeout_sec', 'msg', 'post_reboot_delay', 'post_reboot_delay_sec', 'pre_reboot_delay', 'pre_reboot_delay_sec',
+ 'reboot_timeout', 'reboot_timeout_sec', 'shutdown_timeout', 'shutdown_timeout_sec', 'test_command',
+ ))
+
+ DEFAULT_BOOT_TIME_COMMAND = "(Get-WmiObject -ClassName Win32_OperatingSystem).LastBootUpTime"
+ DEFAULT_CONNECT_TIMEOUT = 5
+ DEFAULT_PRE_REBOOT_DELAY = 2
+ DEFAULT_SUDOABLE = False
+ DEFAULT_SHUTDOWN_COMMAND_ARGS = '/r /t {delay_sec} /c "{message}"'
+
+ DEPRECATED_ARGS = {
+ 'shutdown_timeout': '2.5',
+ 'shutdown_timeout_sec': '2.5',
+ }
+
+ def __init__(self, *args, **kwargs):
+ super(ActionModule, self).__init__(*args, **kwargs)
+
+ def get_distribution(self, task_vars):
+ return {'name': 'windows', 'version': '', 'family': ''}
+
+ def get_shutdown_command(self, task_vars, distribution):
+ return self.DEFAULT_SHUTDOWN_COMMAND
+
+ def run_test_command(self, distribution, **kwargs):
+ # Need to wrap the test_command in our PowerShell encoded wrapper. This is done to align the command input to a
+ # common shell and to allow the psrp connection plugin to report the correct exit code without manually setting
+ # $LASTEXITCODE for just that plugin.
+ test_command = self._task.args.get('test_command', self.DEFAULT_TEST_COMMAND)
+ kwargs['test_command'] = self._connection._shell._encode_script(test_command)
+ super(ActionModule, self).run_test_command(distribution, **kwargs)
+
+ def perform_reboot(self, task_vars, distribution):
+ shutdown_command = self.get_shutdown_command(task_vars, distribution)
+ shutdown_command_args = self.get_shutdown_command_args(distribution)
+ reboot_command = self._connection._shell._encode_script('{0} {1}'.format(shutdown_command, shutdown_command_args))
+
+ display.vvv("{action}: rebooting server...".format(action=self._task.action))
+ display.debug("{action}: distribution: {dist}".format(action=self._task.action, dist=distribution))
+ display.debug("{action}: rebooting server with command '{command}'".format(action=self._task.action, command=reboot_command))
+
+ result = {}
+ reboot_result = self._low_level_execute_command(reboot_command, sudoable=self.DEFAULT_SUDOABLE)
+ result['start'] = datetime.utcnow()
+
+ # Test for "A system shutdown has already been scheduled. (1190)" and handle it gracefully
+ stdout = reboot_result['stdout']
+ stderr = reboot_result['stderr']
+ if reboot_result['rc'] == 1190 or (reboot_result['rc'] != 0 and "(1190)" in reboot_result['stderr']):
+ display.warning('A scheduled reboot was pre-empted by Ansible.')
+
+ # Try to abort (this may fail if it was already aborted)
+ result1 = self._low_level_execute_command(self._connection._shell._encode_script('shutdown /a'),
+ sudoable=self.DEFAULT_SUDOABLE)
+
+ # Initiate reboot again
+ result2 = self._low_level_execute_command(reboot_command, sudoable=self.DEFAULT_SUDOABLE)
+
+ reboot_result['rc'] = result2['rc']
+ stdout += result1['stdout'] + result2['stdout']
+ stderr += result1['stderr'] + result2['stderr']
+
+ if reboot_result['rc'] != 0:
+ result['failed'] = True
+ result['rebooted'] = False
+ result['msg'] = "Reboot command failed, error was: {stdout} {stderr}".format(
+ stdout=to_native(stdout.strip()),
+ stderr=to_native(stderr.strip()))
+ return result
+
+ result['failed'] = False
+ return result
diff --git a/test/support/windows-integration/plugins/action/win_template.py b/test/support/windows-integration/plugins/action/win_template.py
new file mode 100644
index 00000000..20494b93
--- /dev/null
+++ b/test/support/windows-integration/plugins/action/win_template.py
@@ -0,0 +1,29 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.action import ActionBase
+from ansible.plugins.action.template import ActionModule as TemplateActionModule
+
+
+# Even though TemplateActionModule inherits from ActionBase, we still need to
+# directly inherit from ActionBase to appease the plugin loader.
+class ActionModule(TemplateActionModule, ActionBase):
+ DEFAULT_NEWLINE_SEQUENCE = '\r\n'
diff --git a/test/support/windows-integration/plugins/become/runas.py b/test/support/windows-integration/plugins/become/runas.py
new file mode 100644
index 00000000..c8ae881c
--- /dev/null
+++ b/test/support/windows-integration/plugins/become/runas.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ become: runas
+ short_description: Run As user
+ description:
+ - This become plugins allows your remote/login user to execute commands as another user via the windows runas facility.
+ author: ansible (@core)
+ version_added: "2.8"
+ options:
+ become_user:
+ description: User you 'become' to execute the task
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: runas_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_runas_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_RUNAS_USER
+ required: True
+ become_flags:
+ description: Options to pass to runas, a space delimited list of k=v pairs
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: runas_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_runas_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_RUNAS_FLAGS
+ become_pass:
+ description: password
+ ini:
+ - section: runas_become_plugin
+ key: password
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_runas_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_RUNAS_PASS
+ notes:
+ - runas is really implemented in the powershell module handler and as such can only be used with winrm connections.
+ - This plugin ignores the 'become_exe' setting as it uses an API and not an executable.
+ - The Secondary Logon service (seclogon) must be running to use runas
+"""
+
+from ansible.plugins.become import BecomeBase
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'runas'
+
+ def build_become_command(self, cmd, shell):
+ # runas is implemented inside the winrm connection plugin
+ return cmd
diff --git a/test/support/windows-integration/plugins/module_utils/Ansible.Service.cs b/test/support/windows-integration/plugins/module_utils/Ansible.Service.cs
new file mode 100644
index 00000000..be0f3db3
--- /dev/null
+++ b/test/support/windows-integration/plugins/module_utils/Ansible.Service.cs
@@ -0,0 +1,1341 @@
+using Microsoft.Win32.SafeHandles;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.ConstrainedExecution;
+using System.Runtime.InteropServices;
+using System.Security.Principal;
+using System.Text;
+using Ansible.Privilege;
+
+namespace Ansible.Service
+{
+ internal class NativeHelpers
+ {
+ [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Unicode)]
+ public struct ENUM_SERVICE_STATUSW
+ {
+ public string lpServiceName;
+ public string lpDisplayName;
+ public SERVICE_STATUS ServiceStatus;
+ }
+
+ [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Unicode)]
+ public struct QUERY_SERVICE_CONFIGW
+ {
+ public ServiceType dwServiceType;
+ public ServiceStartType dwStartType;
+ public ErrorControl dwErrorControl;
+ [MarshalAs(UnmanagedType.LPWStr)] public string lpBinaryPathName;
+ [MarshalAs(UnmanagedType.LPWStr)] public string lpLoadOrderGroup;
+ public Int32 dwTagId;
+ public IntPtr lpDependencies; // Can't rely on marshaling as dependencies are delimited by \0.
+ [MarshalAs(UnmanagedType.LPWStr)] public string lpServiceStartName;
+ [MarshalAs(UnmanagedType.LPWStr)] public string lpDisplayName;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct SC_ACTION
+ {
+ public FailureAction Type;
+ public UInt32 Delay;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct SERVICE_DELAYED_AUTO_START_INFO
+ {
+ public bool fDelayedAutostart;
+ }
+
+ [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Unicode)]
+ public struct SERVICE_DESCRIPTIONW
+ {
+ [MarshalAs(UnmanagedType.LPWStr)] public string lpDescription;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct SERVICE_FAILURE_ACTIONS_FLAG
+ {
+ public bool fFailureActionsOnNonCrashFailures;
+ }
+
+ [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Unicode)]
+ public struct SERVICE_FAILURE_ACTIONSW
+ {
+ public UInt32 dwResetPeriod;
+ [MarshalAs(UnmanagedType.LPWStr)] public string lpRebootMsg;
+ [MarshalAs(UnmanagedType.LPWStr)] public string lpCommand;
+ public UInt32 cActions;
+ public IntPtr lpsaActions;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct SERVICE_LAUNCH_PROTECTED_INFO
+ {
+ public LaunchProtection dwLaunchProtected;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct SERVICE_PREFERRED_NODE_INFO
+ {
+ public UInt16 usPreferredNode;
+ public bool fDelete;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct SERVICE_PRESHUTDOWN_INFO
+ {
+ public UInt32 dwPreshutdownTimeout;
+ }
+
+ [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Unicode)]
+ public struct SERVICE_REQUIRED_PRIVILEGES_INFOW
+ {
+ // Can't rely on marshaling as privileges are delimited by \0.
+ public IntPtr pmszRequiredPrivileges;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct SERVICE_SID_INFO
+ {
+ public ServiceSidInfo dwServiceSidType;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct SERVICE_STATUS
+ {
+ public ServiceType dwServiceType;
+ public ServiceStatus dwCurrentState;
+ public ControlsAccepted dwControlsAccepted;
+ public UInt32 dwWin32ExitCode;
+ public UInt32 dwServiceSpecificExitCode;
+ public UInt32 dwCheckPoint;
+ public UInt32 dwWaitHint;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct SERVICE_STATUS_PROCESS
+ {
+ public ServiceType dwServiceType;
+ public ServiceStatus dwCurrentState;
+ public ControlsAccepted dwControlsAccepted;
+ public UInt32 dwWin32ExitCode;
+ public UInt32 dwServiceSpecificExitCode;
+ public UInt32 dwCheckPoint;
+ public UInt32 dwWaitHint;
+ public UInt32 dwProcessId;
+ public ServiceFlags dwServiceFlags;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct SERVICE_TRIGGER
+ {
+ public TriggerType dwTriggerType;
+ public TriggerAction dwAction;
+ public IntPtr pTriggerSubtype;
+ public UInt32 cDataItems;
+ public IntPtr pDataItems;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct SERVICE_TRIGGER_SPECIFIC_DATA_ITEM
+ {
+ public TriggerDataType dwDataType;
+ public UInt32 cbData;
+ public IntPtr pData;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct SERVICE_TRIGGER_INFO
+ {
+ public UInt32 cTriggers;
+ public IntPtr pTriggers;
+ public IntPtr pReserved;
+ }
+
+ public enum ConfigInfoLevel : uint
+ {
+ SERVICE_CONFIG_DESCRIPTION = 0x00000001,
+ SERVICE_CONFIG_FAILURE_ACTIONS = 0x00000002,
+ SERVICE_CONFIG_DELAYED_AUTO_START_INFO = 0x00000003,
+ SERVICE_CONFIG_FAILURE_ACTIONS_FLAG = 0x00000004,
+ SERVICE_CONFIG_SERVICE_SID_INFO = 0x00000005,
+ SERVICE_CONFIG_REQUIRED_PRIVILEGES_INFO = 0x00000006,
+ SERVICE_CONFIG_PRESHUTDOWN_INFO = 0x00000007,
+ SERVICE_CONFIG_TRIGGER_INFO = 0x00000008,
+ SERVICE_CONFIG_PREFERRED_NODE = 0x00000009,
+ SERVICE_CONFIG_LAUNCH_PROTECTED = 0x0000000c,
+ }
+ }
+
+ internal class NativeMethods
+ {
+ [DllImport("Advapi32.dll", CharSet = CharSet.Unicode, SetLastError = true)]
+ public static extern bool ChangeServiceConfigW(
+ SafeHandle hService,
+ ServiceType dwServiceType,
+ ServiceStartType dwStartType,
+ ErrorControl dwErrorControl,
+ string lpBinaryPathName,
+ string lpLoadOrderGroup,
+ IntPtr lpdwTagId,
+ string lpDependencies,
+ string lpServiceStartName,
+ string lpPassword,
+ string lpDisplayName);
+
+ [DllImport("Advapi32.dll", CharSet = CharSet.Unicode, SetLastError = true)]
+ public static extern bool ChangeServiceConfig2W(
+ SafeHandle hService,
+ NativeHelpers.ConfigInfoLevel dwInfoLevel,
+ IntPtr lpInfo);
+
+ [DllImport("Advapi32.dll", SetLastError = true)]
+ public static extern bool CloseServiceHandle(
+ IntPtr hSCObject);
+
+ [DllImport("Advapi32.dll", CharSet = CharSet.Unicode, SetLastError = true)]
+ public static extern SafeServiceHandle CreateServiceW(
+ SafeHandle hSCManager,
+ string lpServiceName,
+ string lpDisplayName,
+ ServiceRights dwDesiredAccess,
+ ServiceType dwServiceType,
+ ServiceStartType dwStartType,
+ ErrorControl dwErrorControl,
+ string lpBinaryPathName,
+ string lpLoadOrderGroup,
+ IntPtr lpdwTagId,
+ string lpDependencies,
+ string lpServiceStartName,
+ string lpPassword);
+
+ [DllImport("Advapi32.dll", SetLastError = true)]
+ public static extern bool DeleteService(
+ SafeHandle hService);
+
+ [DllImport("Advapi32.dll", CharSet = CharSet.Unicode, SetLastError = true)]
+ public static extern bool EnumDependentServicesW(
+ SafeHandle hService,
+ UInt32 dwServiceState,
+ SafeMemoryBuffer lpServices,
+ UInt32 cbBufSize,
+ out UInt32 pcbBytesNeeded,
+ out UInt32 lpServicesReturned);
+
+ [DllImport("Advapi32.dll", CharSet = CharSet.Unicode, SetLastError = true)]
+ public static extern SafeServiceHandle OpenSCManagerW(
+ string lpMachineName,
+ string lpDatabaseNmae,
+ SCMRights dwDesiredAccess);
+
+ [DllImport("Advapi32.dll", CharSet = CharSet.Unicode, SetLastError = true)]
+ public static extern SafeServiceHandle OpenServiceW(
+ SafeHandle hSCManager,
+ string lpServiceName,
+ ServiceRights dwDesiredAccess);
+
+ [DllImport("Advapi32.dll", CharSet = CharSet.Unicode, SetLastError = true)]
+ public static extern bool QueryServiceConfigW(
+ SafeHandle hService,
+ IntPtr lpServiceConfig,
+ UInt32 cbBufSize,
+ out UInt32 pcbBytesNeeded);
+
+ [DllImport("Advapi32.dll", CharSet = CharSet.Unicode, SetLastError = true)]
+ public static extern bool QueryServiceConfig2W(
+ SafeHandle hservice,
+ NativeHelpers.ConfigInfoLevel dwInfoLevel,
+ IntPtr lpBuffer,
+ UInt32 cbBufSize,
+ out UInt32 pcbBytesNeeded);
+
+ [DllImport("Advapi32.dll", SetLastError = true)]
+ public static extern bool QueryServiceStatusEx(
+ SafeHandle hService,
+ UInt32 InfoLevel,
+ IntPtr lpBuffer,
+ UInt32 cbBufSize,
+ out UInt32 pcbBytesNeeded);
+ }
+
+ internal class SafeMemoryBuffer : SafeHandleZeroOrMinusOneIsInvalid
+ {
+ public UInt32 BufferLength { get; internal set; }
+
+ public SafeMemoryBuffer() : base(true) { }
+ public SafeMemoryBuffer(int cb) : base(true)
+ {
+ BufferLength = (UInt32)cb;
+ base.SetHandle(Marshal.AllocHGlobal(cb));
+ }
+ public SafeMemoryBuffer(IntPtr handle) : base(true)
+ {
+ base.SetHandle(handle);
+ }
+
+ [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)]
+ protected override bool ReleaseHandle()
+ {
+ Marshal.FreeHGlobal(handle);
+ return true;
+ }
+ }
+
+ internal class SafeServiceHandle : SafeHandleZeroOrMinusOneIsInvalid
+ {
+ public SafeServiceHandle() : base(true) { }
+ public SafeServiceHandle(IntPtr handle) : base(true) { this.handle = handle; }
+
+ [ReliabilityContract(Consistency.WillNotCorruptState, Cer.MayFail)]
+ protected override bool ReleaseHandle()
+ {
+ return NativeMethods.CloseServiceHandle(handle);
+ }
+ }
+
+ [Flags]
+ public enum ControlsAccepted : uint
+ {
+ None = 0x00000000,
+ Stop = 0x00000001,
+ PauseContinue = 0x00000002,
+ Shutdown = 0x00000004,
+ ParamChange = 0x00000008,
+ NetbindChange = 0x00000010,
+ HardwareProfileChange = 0x00000020,
+ PowerEvent = 0x00000040,
+ SessionChange = 0x00000080,
+ PreShutdown = 0x00000100,
+ }
+
+ public enum ErrorControl : uint
+ {
+ Ignore = 0x00000000,
+ Normal = 0x00000001,
+ Severe = 0x00000002,
+ Critical = 0x00000003,
+ }
+
+ public enum FailureAction : uint
+ {
+ None = 0x00000000,
+ Restart = 0x00000001,
+ Reboot = 0x00000002,
+ RunCommand = 0x00000003,
+ }
+
+ public enum LaunchProtection : uint
+ {
+ None = 0,
+ Windows = 1,
+ WindowsLight = 2,
+ AntimalwareLight = 3,
+ }
+
+ [Flags]
+ public enum SCMRights : uint
+ {
+ Connect = 0x00000001,
+ CreateService = 0x00000002,
+ EnumerateService = 0x00000004,
+ Lock = 0x00000008,
+ QueryLockStatus = 0x00000010,
+ ModifyBootConfig = 0x00000020,
+ AllAccess = 0x000F003F,
+ }
+
+ [Flags]
+ public enum ServiceFlags : uint
+ {
+ None = 0x0000000,
+ RunsInSystemProcess = 0x00000001,
+ }
+
+ [Flags]
+ public enum ServiceRights : uint
+ {
+ QueryConfig = 0x00000001,
+ ChangeConfig = 0x00000002,
+ QueryStatus = 0x00000004,
+ EnumerateDependents = 0x00000008,
+ Start = 0x00000010,
+ Stop = 0x00000020,
+ PauseContinue = 0x00000040,
+ Interrogate = 0x00000080,
+ UserDefinedControl = 0x00000100,
+ Delete = 0x00010000,
+ ReadControl = 0x00020000,
+ WriteDac = 0x00040000,
+ WriteOwner = 0x00080000,
+ AllAccess = 0x000F01FF,
+ AccessSystemSecurity = 0x01000000,
+ }
+
+ public enum ServiceStartType : uint
+ {
+ BootStart = 0x00000000,
+ SystemStart = 0x00000001,
+ AutoStart = 0x00000002,
+ DemandStart = 0x00000003,
+ Disabled = 0x00000004,
+
+ // Not part of ChangeServiceConfig enumeration but built by the Srvice class for the StartType property.
+ AutoStartDelayed = 0x1000000
+ }
+
+ [Flags]
+ public enum ServiceType : uint
+ {
+ KernelDriver = 0x00000001,
+ FileSystemDriver = 0x00000002,
+ Adapter = 0x00000004,
+ RecognizerDriver = 0x00000008,
+ Driver = KernelDriver | FileSystemDriver | RecognizerDriver,
+ Win32OwnProcess = 0x00000010,
+ Win32ShareProcess = 0x00000020,
+ Win32 = Win32OwnProcess | Win32ShareProcess,
+ UserProcess = 0x00000040,
+ UserOwnprocess = Win32OwnProcess | UserProcess,
+ UserShareProcess = Win32ShareProcess | UserProcess,
+ UserServiceInstance = 0x00000080,
+ InteractiveProcess = 0x00000100,
+ PkgService = 0x00000200,
+ }
+
+ public enum ServiceSidInfo : uint
+ {
+ None,
+ Unrestricted,
+ Restricted = 3,
+ }
+
+ public enum ServiceStatus : uint
+ {
+ Stopped = 0x00000001,
+ StartPending = 0x00000002,
+ StopPending = 0x00000003,
+ Running = 0x00000004,
+ ContinuePending = 0x00000005,
+ PausePending = 0x00000006,
+ Paused = 0x00000007,
+ }
+
+ public enum TriggerAction : uint
+ {
+ ServiceStart = 0x00000001,
+ ServiceStop = 0x000000002,
+ }
+
+ public enum TriggerDataType : uint
+ {
+ Binary = 00000001,
+ String = 0x00000002,
+ Level = 0x00000003,
+ KeywordAny = 0x00000004,
+ KeywordAll = 0x00000005,
+ }
+
+ public enum TriggerType : uint
+ {
+ DeviceInterfaceArrival = 0x00000001,
+ IpAddressAvailability = 0x00000002,
+ DomainJoin = 0x00000003,
+ FirewallPortEvent = 0x00000004,
+ GroupPolicy = 0x00000005,
+ NetworkEndpoint = 0x00000006,
+ Custom = 0x00000014,
+ }
+
+ public class ServiceManagerException : System.ComponentModel.Win32Exception
+ {
+ private string _msg;
+
+ public ServiceManagerException(string message) : this(Marshal.GetLastWin32Error(), message) { }
+ public ServiceManagerException(int errorCode, string message) : base(errorCode)
+ {
+ _msg = String.Format("{0} ({1}, Win32ErrorCode {2} - 0x{2:X8})", message, base.Message, errorCode);
+ }
+
+ public override string Message { get { return _msg; } }
+ public static explicit operator ServiceManagerException(string message)
+ {
+ return new ServiceManagerException(message);
+ }
+ }
+
+ public class Action
+ {
+ public FailureAction Type;
+ public UInt32 Delay;
+ }
+
+ public class FailureActions
+ {
+ public UInt32? ResetPeriod = null; // Get is always populated, can be null on set to preserve existing.
+ public string RebootMsg = null;
+ public string Command = null;
+ public List<Action> Actions = null;
+
+ public FailureActions() { }
+
+ internal FailureActions(NativeHelpers.SERVICE_FAILURE_ACTIONSW actions)
+ {
+ ResetPeriod = actions.dwResetPeriod;
+ RebootMsg = actions.lpRebootMsg;
+ Command = actions.lpCommand;
+ Actions = new List<Action>();
+
+ int actionLength = Marshal.SizeOf(typeof(NativeHelpers.SC_ACTION));
+ for (int i = 0; i < actions.cActions; i++)
+ {
+ IntPtr actionPtr = IntPtr.Add(actions.lpsaActions, i * actionLength);
+
+ NativeHelpers.SC_ACTION rawAction = (NativeHelpers.SC_ACTION)Marshal.PtrToStructure(
+ actionPtr, typeof(NativeHelpers.SC_ACTION));
+
+ Actions.Add(new Action()
+ {
+ Type = rawAction.Type,
+ Delay = rawAction.Delay,
+ });
+ }
+ }
+ }
+
+ public class TriggerItem
+ {
+ public TriggerDataType Type;
+ public object Data; // Can be string, List<string>, byte, byte[], or Int64 depending on Type.
+
+ public TriggerItem() { }
+
+ internal TriggerItem(NativeHelpers.SERVICE_TRIGGER_SPECIFIC_DATA_ITEM dataItem)
+ {
+ Type = dataItem.dwDataType;
+
+ byte[] itemBytes = new byte[dataItem.cbData];
+ Marshal.Copy(dataItem.pData, itemBytes, 0, itemBytes.Length);
+
+ switch (dataItem.dwDataType)
+ {
+ case TriggerDataType.String:
+ string value = Encoding.Unicode.GetString(itemBytes, 0, itemBytes.Length);
+
+ if (value.EndsWith("\0\0"))
+ {
+ // Multistring with a delimiter of \0 and terminated with \0\0.
+ Data = new List<string>(value.Split(new char[1] { '\0' }, StringSplitOptions.RemoveEmptyEntries));
+ }
+ else
+ // Just a single string with null character at the end, strip it off.
+ Data = value.Substring(0, value.Length - 1);
+ break;
+ case TriggerDataType.Level:
+ Data = itemBytes[0];
+ break;
+ case TriggerDataType.KeywordAll:
+ case TriggerDataType.KeywordAny:
+ Data = BitConverter.ToUInt64(itemBytes, 0);
+ break;
+ default:
+ Data = itemBytes;
+ break;
+ }
+ }
+ }
+
+ public class Trigger
+ {
+ // https://docs.microsoft.com/en-us/windows/win32/api/winsvc/ns-winsvc-service_trigger
+ public const string NAMED_PIPE_EVENT_GUID = "1f81d131-3fac-4537-9e0c-7e7b0c2f4b55";
+ public const string RPC_INTERFACE_EVENT_GUID = "bc90d167-9470-4139-a9ba-be0bbbf5b74d";
+ public const string DOMAIN_JOIN_GUID = "1ce20aba-9851-4421-9430-1ddeb766e809";
+ public const string DOMAIN_LEAVE_GUID = "ddaf516e-58c2-4866-9574-c3b615d42ea1";
+ public const string FIREWALL_PORT_OPEN_GUID = "b7569e07-8421-4ee0-ad10-86915afdad09";
+ public const string FIREWALL_PORT_CLOSE_GUID = "a144ed38-8e12-4de4-9d96-e64740b1a524";
+ public const string MACHINE_POLICY_PRESENT_GUID = "659fcae6-5bdb-4da9-b1ff-ca2a178d46e0";
+ public const string NETWORK_MANAGER_FIRST_IP_ADDRESS_ARRIVAL_GUID = "4f27f2de-14e2-430b-a549-7cd48cbc8245";
+ public const string NETWORK_MANAGER_LAST_IP_ADDRESS_REMOVAL_GUID = "cc4ba62a-162e-4648-847a-b6bdf993e335";
+ public const string USER_POLICY_PRESENT_GUID = "54fb46c8-f089-464c-b1fd-59d1b62c3b50";
+
+ public TriggerType Type;
+ public TriggerAction Action;
+ public Guid SubType;
+ public List<TriggerItem> DataItems = new List<TriggerItem>();
+
+ public Trigger() { }
+
+ internal Trigger(NativeHelpers.SERVICE_TRIGGER trigger)
+ {
+ Type = trigger.dwTriggerType;
+ Action = trigger.dwAction;
+ SubType = (Guid)Marshal.PtrToStructure(trigger.pTriggerSubtype, typeof(Guid));
+
+ int dataItemLength = Marshal.SizeOf(typeof(NativeHelpers.SERVICE_TRIGGER_SPECIFIC_DATA_ITEM));
+ for (int i = 0; i < trigger.cDataItems; i++)
+ {
+ IntPtr dataPtr = IntPtr.Add(trigger.pDataItems, i * dataItemLength);
+
+ var dataItem = (NativeHelpers.SERVICE_TRIGGER_SPECIFIC_DATA_ITEM)Marshal.PtrToStructure(
+ dataPtr, typeof(NativeHelpers.SERVICE_TRIGGER_SPECIFIC_DATA_ITEM));
+
+ DataItems.Add(new TriggerItem(dataItem));
+ }
+ }
+ }
+
+ public class Service : IDisposable
+ {
+ private const UInt32 SERVICE_NO_CHANGE = 0xFFFFFFFF;
+
+ private SafeServiceHandle _scmHandle;
+ private SafeServiceHandle _serviceHandle;
+ private SafeMemoryBuffer _rawServiceConfig;
+ private NativeHelpers.SERVICE_STATUS_PROCESS _statusProcess;
+
+ private NativeHelpers.QUERY_SERVICE_CONFIGW _ServiceConfig
+ {
+ get
+ {
+ return (NativeHelpers.QUERY_SERVICE_CONFIGW)Marshal.PtrToStructure(
+ _rawServiceConfig.DangerousGetHandle(), typeof(NativeHelpers.QUERY_SERVICE_CONFIGW));
+ }
+ }
+
+ // ServiceConfig
+ public string ServiceName { get; private set; }
+
+ public ServiceType ServiceType
+ {
+ get { return _ServiceConfig.dwServiceType; }
+ set { ChangeServiceConfig(serviceType: value); }
+ }
+
+ public ServiceStartType StartType
+ {
+ get
+ {
+ ServiceStartType startType = _ServiceConfig.dwStartType;
+ if (startType == ServiceStartType.AutoStart)
+ {
+ var value = QueryServiceConfig2<NativeHelpers.SERVICE_DELAYED_AUTO_START_INFO>(
+ NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_DELAYED_AUTO_START_INFO);
+
+ if (value.fDelayedAutostart)
+ startType = ServiceStartType.AutoStartDelayed;
+ }
+
+ return startType;
+ }
+ set
+ {
+ ServiceStartType newStartType = value;
+ bool delayedStart = false;
+ if (value == ServiceStartType.AutoStartDelayed)
+ {
+ newStartType = ServiceStartType.AutoStart;
+ delayedStart = true;
+ }
+
+ ChangeServiceConfig(startType: newStartType);
+
+ var info = new NativeHelpers.SERVICE_DELAYED_AUTO_START_INFO()
+ {
+ fDelayedAutostart = delayedStart,
+ };
+ ChangeServiceConfig2(NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_DELAYED_AUTO_START_INFO, info);
+ }
+ }
+
+ public ErrorControl ErrorControl
+ {
+ get { return _ServiceConfig.dwErrorControl; }
+ set { ChangeServiceConfig(errorControl: value); }
+ }
+
+ public string Path
+ {
+ get { return _ServiceConfig.lpBinaryPathName; }
+ set { ChangeServiceConfig(binaryPath: value); }
+ }
+
+ public string LoadOrderGroup
+ {
+ get { return _ServiceConfig.lpLoadOrderGroup; }
+ set { ChangeServiceConfig(loadOrderGroup: value); }
+ }
+
+ public List<string> DependentOn
+ {
+ get
+ {
+ StringBuilder deps = new StringBuilder();
+ IntPtr depPtr = _ServiceConfig.lpDependencies;
+
+ bool wasNull = false;
+ while (true)
+ {
+ // Get the current char at the pointer and add it to the StringBuilder.
+ byte[] charBytes = new byte[sizeof(char)];
+ Marshal.Copy(depPtr, charBytes, 0, charBytes.Length);
+ depPtr = IntPtr.Add(depPtr, charBytes.Length);
+ char currentChar = BitConverter.ToChar(charBytes, 0);
+ deps.Append(currentChar);
+
+ // If the previous and current char is \0 exit the loop.
+ if (currentChar == '\0' && wasNull)
+ break;
+ wasNull = currentChar == '\0';
+ }
+
+ return new List<string>(deps.ToString().Split(new char[1] { '\0' },
+ StringSplitOptions.RemoveEmptyEntries));
+ }
+ set { ChangeServiceConfig(dependencies: value); }
+ }
+
+ public IdentityReference Account
+ {
+ get
+ {
+ if (_ServiceConfig.lpServiceStartName == null)
+ // User services don't have the start name specified and will be null.
+ return null;
+ else if (_ServiceConfig.lpServiceStartName == "LocalSystem")
+ // Special string used for the SYSTEM account, this is the same even for different localisations.
+ return (NTAccount)new SecurityIdentifier("S-1-5-18").Translate(typeof(NTAccount));
+ else
+ return new NTAccount(_ServiceConfig.lpServiceStartName);
+ }
+ set
+ {
+ string startName = null;
+ string pass = null;
+
+ if (value != null)
+ {
+ // Create a SID and convert back from a SID to get the Netlogon form regardless of the input
+ // specified.
+ SecurityIdentifier accountSid = (SecurityIdentifier)value.Translate(typeof(SecurityIdentifier));
+ NTAccount accountName = (NTAccount)accountSid.Translate(typeof(NTAccount));
+ string[] accountSplit = accountName.Value.Split(new char[1] { '\\' }, 2);
+
+ // SYSTEM, Local Service, Network Service
+ List<string> serviceAccounts = new List<string> { "S-1-5-18", "S-1-5-19", "S-1-5-20" };
+
+ // Well known service accounts and MSAs should have no password set. Explicitly blank out the
+ // existing password to ensure older passwords are no longer stored by Windows.
+ if (serviceAccounts.Contains(accountSid.Value) || accountSplit[1].EndsWith("$"))
+ pass = "";
+
+ // The SYSTEM account uses this special string to specify that account otherwise use the original
+ // NTAccount value in case it is in a custom format (not Netlogon) for a reason.
+ if (accountSid.Value == serviceAccounts[0])
+ startName = "LocalSystem";
+ else
+ startName = value.Translate(typeof(NTAccount)).Value;
+ }
+
+ ChangeServiceConfig(startName: startName, password: pass);
+ }
+ }
+
+ public string Password { set { ChangeServiceConfig(password: value); } }
+
+ public string DisplayName
+ {
+ get { return _ServiceConfig.lpDisplayName; }
+ set { ChangeServiceConfig(displayName: value); }
+ }
+
+ // ServiceConfig2
+
+ public string Description
+ {
+ get
+ {
+ var value = QueryServiceConfig2<NativeHelpers.SERVICE_DESCRIPTIONW>(
+ NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_DESCRIPTION);
+
+ return value.lpDescription;
+ }
+ set
+ {
+ var info = new NativeHelpers.SERVICE_DESCRIPTIONW()
+ {
+ lpDescription = value,
+ };
+ ChangeServiceConfig2(NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_DESCRIPTION, info);
+ }
+ }
+
+ public FailureActions FailureActions
+ {
+ get
+ {
+ using (SafeMemoryBuffer b = QueryServiceConfig2(
+ NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_FAILURE_ACTIONS))
+ {
+ NativeHelpers.SERVICE_FAILURE_ACTIONSW value = (NativeHelpers.SERVICE_FAILURE_ACTIONSW)
+ Marshal.PtrToStructure(b.DangerousGetHandle(), typeof(NativeHelpers.SERVICE_FAILURE_ACTIONSW));
+
+ return new FailureActions(value);
+ }
+ }
+ set
+ {
+ // dwResetPeriod and lpsaActions must be set together, we need to read the existing config if someone
+ // wants to update 1 or the other but both aren't explicitly defined.
+ UInt32? resetPeriod = value.ResetPeriod;
+ List<Action> actions = value.Actions;
+ if ((resetPeriod != null && actions == null) || (resetPeriod == null && actions != null))
+ {
+ FailureActions existingValue = this.FailureActions;
+
+ if (resetPeriod != null && existingValue.Actions.Count == 0)
+ throw new ArgumentException(
+ "Cannot set FailureAction ResetPeriod without explicit Actions and no existing Actions");
+ else if (resetPeriod == null)
+ resetPeriod = (UInt32)existingValue.ResetPeriod;
+
+ if (actions == null)
+ actions = existingValue.Actions;
+ }
+
+ var info = new NativeHelpers.SERVICE_FAILURE_ACTIONSW()
+ {
+ dwResetPeriod = resetPeriod == null ? 0 : (UInt32)resetPeriod,
+ lpRebootMsg = value.RebootMsg,
+ lpCommand = value.Command,
+ cActions = actions == null ? 0 : (UInt32)actions.Count,
+ lpsaActions = IntPtr.Zero,
+ };
+
+ // null means to keep the existing actions whereas an empty list deletes the actions.
+ if (actions == null)
+ {
+ ChangeServiceConfig2(NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_FAILURE_ACTIONS, info);
+ return;
+ }
+
+ int actionLength = Marshal.SizeOf(typeof(NativeHelpers.SC_ACTION));
+ using (SafeMemoryBuffer buffer = new SafeMemoryBuffer(actionLength * actions.Count))
+ {
+ info.lpsaActions = buffer.DangerousGetHandle();
+ HashSet<string> privileges = new HashSet<string>();
+
+ for (int i = 0; i < actions.Count; i++)
+ {
+ IntPtr actionPtr = IntPtr.Add(info.lpsaActions, i * actionLength);
+ NativeHelpers.SC_ACTION action = new NativeHelpers.SC_ACTION()
+ {
+ Delay = actions[i].Delay,
+ Type = actions[i].Type,
+ };
+ Marshal.StructureToPtr(action, actionPtr, false);
+
+ // Need to make sure the SeShutdownPrivilege is enabled when adding a reboot failure action.
+ if (action.Type == FailureAction.Reboot)
+ privileges.Add("SeShutdownPrivilege");
+ }
+
+ using (new PrivilegeEnabler(true, privileges.ToList().ToArray()))
+ ChangeServiceConfig2(NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_FAILURE_ACTIONS, info);
+ }
+ }
+ }
+
+ public bool FailureActionsOnNonCrashFailures
+ {
+ get
+ {
+ var value = QueryServiceConfig2<NativeHelpers.SERVICE_FAILURE_ACTIONS_FLAG>(
+ NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_FAILURE_ACTIONS_FLAG);
+
+ return value.fFailureActionsOnNonCrashFailures;
+ }
+ set
+ {
+ var info = new NativeHelpers.SERVICE_FAILURE_ACTIONS_FLAG()
+ {
+ fFailureActionsOnNonCrashFailures = value,
+ };
+ ChangeServiceConfig2(NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_FAILURE_ACTIONS_FLAG, info);
+ }
+ }
+
+ public ServiceSidInfo ServiceSidInfo
+ {
+ get
+ {
+ var value = QueryServiceConfig2<NativeHelpers.SERVICE_SID_INFO>(
+ NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_SERVICE_SID_INFO);
+
+ return value.dwServiceSidType;
+ }
+ set
+ {
+ var info = new NativeHelpers.SERVICE_SID_INFO()
+ {
+ dwServiceSidType = value,
+ };
+ ChangeServiceConfig2(NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_SERVICE_SID_INFO, info);
+ }
+ }
+
+ public List<string> RequiredPrivileges
+ {
+ get
+ {
+ using (SafeMemoryBuffer buffer = QueryServiceConfig2(
+ NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_REQUIRED_PRIVILEGES_INFO))
+ {
+ var value = (NativeHelpers.SERVICE_REQUIRED_PRIVILEGES_INFOW)Marshal.PtrToStructure(
+ buffer.DangerousGetHandle(), typeof(NativeHelpers.SERVICE_REQUIRED_PRIVILEGES_INFOW));
+
+ int structLength = Marshal.SizeOf(value);
+ int stringLength = ((int)buffer.BufferLength - structLength) / sizeof(char);
+
+ if (stringLength > 0)
+ {
+ string privilegesString = Marshal.PtrToStringUni(value.pmszRequiredPrivileges, stringLength);
+ return new List<string>(privilegesString.Split(new char[1] { '\0' },
+ StringSplitOptions.RemoveEmptyEntries));
+ }
+ else
+ return new List<string>();
+ }
+ }
+ set
+ {
+ string privilegeString = String.Join("\0", value ?? new List<string>()) + "\0\0";
+
+ using (SafeMemoryBuffer buffer = new SafeMemoryBuffer(Marshal.StringToHGlobalUni(privilegeString)))
+ {
+ var info = new NativeHelpers.SERVICE_REQUIRED_PRIVILEGES_INFOW()
+ {
+ pmszRequiredPrivileges = buffer.DangerousGetHandle(),
+ };
+ ChangeServiceConfig2(NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_REQUIRED_PRIVILEGES_INFO, info);
+ }
+ }
+ }
+
+ public UInt32 PreShutdownTimeout
+ {
+ get
+ {
+ var value = QueryServiceConfig2<NativeHelpers.SERVICE_PRESHUTDOWN_INFO>(
+ NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_PRESHUTDOWN_INFO);
+
+ return value.dwPreshutdownTimeout;
+ }
+ set
+ {
+ var info = new NativeHelpers.SERVICE_PRESHUTDOWN_INFO()
+ {
+ dwPreshutdownTimeout = value,
+ };
+ ChangeServiceConfig2(NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_PRESHUTDOWN_INFO, info);
+ }
+ }
+
+ public List<Trigger> Triggers
+ {
+ get
+ {
+ List<Trigger> triggers = new List<Trigger>();
+
+ using (SafeMemoryBuffer b = QueryServiceConfig2(
+ NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_TRIGGER_INFO))
+ {
+ var value = (NativeHelpers.SERVICE_TRIGGER_INFO)Marshal.PtrToStructure(
+ b.DangerousGetHandle(), typeof(NativeHelpers.SERVICE_TRIGGER_INFO));
+
+ int triggerLength = Marshal.SizeOf(typeof(NativeHelpers.SERVICE_TRIGGER));
+ for (int i = 0; i < value.cTriggers; i++)
+ {
+ IntPtr triggerPtr = IntPtr.Add(value.pTriggers, i * triggerLength);
+ var trigger = (NativeHelpers.SERVICE_TRIGGER)Marshal.PtrToStructure(triggerPtr,
+ typeof(NativeHelpers.SERVICE_TRIGGER));
+
+ triggers.Add(new Trigger(trigger));
+ }
+ }
+
+ return triggers;
+ }
+ set
+ {
+ var info = new NativeHelpers.SERVICE_TRIGGER_INFO()
+ {
+ cTriggers = value == null ? 0 : (UInt32)value.Count,
+ pTriggers = IntPtr.Zero,
+ pReserved = IntPtr.Zero,
+ };
+
+ if (info.cTriggers == 0)
+ {
+ try
+ {
+ ChangeServiceConfig2(NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_TRIGGER_INFO, info);
+ }
+ catch (ServiceManagerException e)
+ {
+ // Can fail with ERROR_INVALID_PARAMETER if no triggers were already set on the service, just
+ // continue as the service is what we want it to be.
+ if (e.NativeErrorCode != 87)
+ throw;
+ }
+ return;
+ }
+
+ // Due to the dynamic nature of the trigger structure(s) we need to manually calculate the size of the
+ // data items on each trigger if present. This also serializes the raw data items to bytes here.
+ int structDataLength = 0;
+ int dataLength = 0;
+ Queue<byte[]> dataItems = new Queue<byte[]>();
+ foreach (Trigger trigger in value)
+ {
+ if (trigger.DataItems == null || trigger.DataItems.Count == 0)
+ continue;
+
+ foreach (TriggerItem dataItem in trigger.DataItems)
+ {
+ structDataLength += Marshal.SizeOf(typeof(NativeHelpers.SERVICE_TRIGGER_SPECIFIC_DATA_ITEM));
+
+ byte[] dataItemBytes;
+ Type dataItemType = dataItem.Data.GetType();
+ if (dataItemType == typeof(byte))
+ dataItemBytes = new byte[1] { (byte)dataItem.Data };
+ else if (dataItemType == typeof(byte[]))
+ dataItemBytes = (byte[])dataItem.Data;
+ else if (dataItemType == typeof(UInt64))
+ dataItemBytes = BitConverter.GetBytes((UInt64)dataItem.Data);
+ else if (dataItemType == typeof(string))
+ dataItemBytes = Encoding.Unicode.GetBytes((string)dataItem.Data + "\0");
+ else if (dataItemType == typeof(List<string>))
+ dataItemBytes = Encoding.Unicode.GetBytes(
+ String.Join("\0", (List<string>)dataItem.Data) + "\0");
+ else
+ throw new ArgumentException(String.Format("Trigger data type '{0}' not a value type",
+ dataItemType.Name));
+
+ dataLength += dataItemBytes.Length;
+ dataItems.Enqueue(dataItemBytes);
+ }
+ }
+
+ using (SafeMemoryBuffer triggerBuffer = new SafeMemoryBuffer(
+ value.Count * Marshal.SizeOf(typeof(NativeHelpers.SERVICE_TRIGGER))))
+ using (SafeMemoryBuffer triggerGuidBuffer = new SafeMemoryBuffer(
+ value.Count * Marshal.SizeOf(typeof(Guid))))
+ using (SafeMemoryBuffer dataItemBuffer = new SafeMemoryBuffer(structDataLength))
+ using (SafeMemoryBuffer dataBuffer = new SafeMemoryBuffer(dataLength))
+ {
+ info.pTriggers = triggerBuffer.DangerousGetHandle();
+
+ IntPtr triggerPtr = triggerBuffer.DangerousGetHandle();
+ IntPtr guidPtr = triggerGuidBuffer.DangerousGetHandle();
+ IntPtr dataItemPtr = dataItemBuffer.DangerousGetHandle();
+ IntPtr dataPtr = dataBuffer.DangerousGetHandle();
+
+ foreach (Trigger trigger in value)
+ {
+ int dataCount = trigger.DataItems == null ? 0 : trigger.DataItems.Count;
+ var rawTrigger = new NativeHelpers.SERVICE_TRIGGER()
+ {
+ dwTriggerType = trigger.Type,
+ dwAction = trigger.Action,
+ pTriggerSubtype = guidPtr,
+ cDataItems = (UInt32)dataCount,
+ pDataItems = dataCount == 0 ? IntPtr.Zero : dataItemPtr,
+ };
+ guidPtr = StructureToPtr(trigger.SubType, guidPtr);
+
+ for (int i = 0; i < rawTrigger.cDataItems; i++)
+ {
+ byte[] dataItemBytes = dataItems.Dequeue();
+ var rawTriggerData = new NativeHelpers.SERVICE_TRIGGER_SPECIFIC_DATA_ITEM()
+ {
+ dwDataType = trigger.DataItems[i].Type,
+ cbData = (UInt32)dataItemBytes.Length,
+ pData = dataPtr,
+ };
+ Marshal.Copy(dataItemBytes, 0, dataPtr, dataItemBytes.Length);
+ dataPtr = IntPtr.Add(dataPtr, dataItemBytes.Length);
+
+ dataItemPtr = StructureToPtr(rawTriggerData, dataItemPtr);
+ }
+
+ triggerPtr = StructureToPtr(rawTrigger, triggerPtr);
+ }
+
+ ChangeServiceConfig2(NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_TRIGGER_INFO, info);
+ }
+ }
+ }
+
+ public UInt16? PreferredNode
+ {
+ get
+ {
+ try
+ {
+ var value = QueryServiceConfig2<NativeHelpers.SERVICE_PREFERRED_NODE_INFO>(
+ NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_PREFERRED_NODE);
+
+ return value.usPreferredNode;
+ }
+ catch (ServiceManagerException e)
+ {
+ // If host has no NUMA support this will fail with ERROR_INVALID_PARAMETER
+ if (e.NativeErrorCode == 0x00000057) // ERROR_INVALID_PARAMETER
+ return null;
+
+ throw;
+ }
+ }
+ set
+ {
+ var info = new NativeHelpers.SERVICE_PREFERRED_NODE_INFO();
+ if (value == null)
+ info.fDelete = true;
+ else
+ info.usPreferredNode = (UInt16)value;
+ ChangeServiceConfig2(NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_PREFERRED_NODE, info);
+ }
+ }
+
+ public LaunchProtection LaunchProtection
+ {
+ get
+ {
+ var value = QueryServiceConfig2<NativeHelpers.SERVICE_LAUNCH_PROTECTED_INFO>(
+ NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_LAUNCH_PROTECTED);
+
+ return value.dwLaunchProtected;
+ }
+ set
+ {
+ var info = new NativeHelpers.SERVICE_LAUNCH_PROTECTED_INFO()
+ {
+ dwLaunchProtected = value,
+ };
+ ChangeServiceConfig2(NativeHelpers.ConfigInfoLevel.SERVICE_CONFIG_LAUNCH_PROTECTED, info);
+ }
+ }
+
+ // ServiceStatus
+ public ServiceStatus State { get { return _statusProcess.dwCurrentState; } }
+
+ public ControlsAccepted ControlsAccepted { get { return _statusProcess.dwControlsAccepted; } }
+
+ public UInt32 Win32ExitCode { get { return _statusProcess.dwWin32ExitCode; } }
+
+ public UInt32 ServiceExitCode { get { return _statusProcess.dwServiceSpecificExitCode; } }
+
+ public UInt32 Checkpoint { get { return _statusProcess.dwCheckPoint; } }
+
+ public UInt32 WaitHint { get { return _statusProcess.dwWaitHint; } }
+
+ public UInt32 ProcessId { get { return _statusProcess.dwProcessId; } }
+
+ public ServiceFlags ServiceFlags { get { return _statusProcess.dwServiceFlags; } }
+
+ public Service(string name) : this(name, ServiceRights.AllAccess) { }
+
+ public Service(string name, ServiceRights access) : this(name, access, SCMRights.Connect) { }
+
+ public Service(string name, ServiceRights access, SCMRights scmAccess)
+ {
+ ServiceName = name;
+ _scmHandle = OpenSCManager(scmAccess);
+ _serviceHandle = NativeMethods.OpenServiceW(_scmHandle, name, access);
+ if (_serviceHandle.IsInvalid)
+ throw new ServiceManagerException(String.Format("Failed to open service '{0}'", name));
+
+ Refresh();
+ }
+
+ private Service(SafeServiceHandle scmHandle, SafeServiceHandle serviceHandle, string name)
+ {
+ ServiceName = name;
+ _scmHandle = scmHandle;
+ _serviceHandle = serviceHandle;
+
+ Refresh();
+ }
+
+ // EnumDependentServices
+ public List<string> DependedBy
+ {
+ get
+ {
+ UInt32 bytesNeeded = 0;
+ UInt32 numServices = 0;
+ NativeMethods.EnumDependentServicesW(_serviceHandle, 3, new SafeMemoryBuffer(IntPtr.Zero), 0,
+ out bytesNeeded, out numServices);
+
+ using (SafeMemoryBuffer buffer = new SafeMemoryBuffer((int)bytesNeeded))
+ {
+ if (!NativeMethods.EnumDependentServicesW(_serviceHandle, 3, buffer, bytesNeeded, out bytesNeeded,
+ out numServices))
+ {
+ throw new ServiceManagerException("Failed to enumerated dependent services");
+ }
+
+ List<string> dependents = new List<string>();
+ Type enumType = typeof(NativeHelpers.ENUM_SERVICE_STATUSW);
+ for (int i = 0; i < numServices; i++)
+ {
+ var service = (NativeHelpers.ENUM_SERVICE_STATUSW)Marshal.PtrToStructure(
+ IntPtr.Add(buffer.DangerousGetHandle(), i * Marshal.SizeOf(enumType)), enumType);
+
+ dependents.Add(service.lpServiceName);
+ }
+
+ return dependents;
+ }
+ }
+ }
+
+ public static Service Create(string name, string binaryPath, string displayName = null,
+ ServiceType serviceType = ServiceType.Win32OwnProcess,
+ ServiceStartType startType = ServiceStartType.DemandStart, ErrorControl errorControl = ErrorControl.Normal,
+ string loadOrderGroup = null, List<string> dependencies = null, string startName = null,
+ string password = null)
+ {
+ SafeServiceHandle scmHandle = OpenSCManager(SCMRights.CreateService | SCMRights.Connect);
+
+ if (displayName == null)
+ displayName = name;
+
+ string depString = null;
+ if (dependencies != null && dependencies.Count > 0)
+ depString = String.Join("\0", dependencies) + "\0\0";
+
+ SafeServiceHandle serviceHandle = NativeMethods.CreateServiceW(scmHandle, name, displayName,
+ ServiceRights.AllAccess, serviceType, startType, errorControl, binaryPath,
+ loadOrderGroup, IntPtr.Zero, depString, startName, password);
+
+ if (serviceHandle.IsInvalid)
+ throw new ServiceManagerException(String.Format("Failed to create new service '{0}'", name));
+
+ return new Service(scmHandle, serviceHandle, name);
+ }
+
+ public void Delete()
+ {
+ if (!NativeMethods.DeleteService(_serviceHandle))
+ throw new ServiceManagerException("Failed to delete service");
+ Dispose();
+ }
+
+ public void Dispose()
+ {
+ if (_serviceHandle != null)
+ _serviceHandle.Dispose();
+
+ if (_scmHandle != null)
+ _scmHandle.Dispose();
+ GC.SuppressFinalize(this);
+ }
+
+ public void Refresh()
+ {
+ UInt32 bytesNeeded;
+ NativeMethods.QueryServiceConfigW(_serviceHandle, IntPtr.Zero, 0, out bytesNeeded);
+
+ _rawServiceConfig = new SafeMemoryBuffer((int)bytesNeeded);
+ if (!NativeMethods.QueryServiceConfigW(_serviceHandle, _rawServiceConfig.DangerousGetHandle(), bytesNeeded,
+ out bytesNeeded))
+ {
+ throw new ServiceManagerException("Failed to query service config");
+ }
+
+ NativeMethods.QueryServiceStatusEx(_serviceHandle, 0, IntPtr.Zero, 0, out bytesNeeded);
+ using (SafeMemoryBuffer buffer = new SafeMemoryBuffer((int)bytesNeeded))
+ {
+ if (!NativeMethods.QueryServiceStatusEx(_serviceHandle, 0, buffer.DangerousGetHandle(), bytesNeeded,
+ out bytesNeeded))
+ {
+ throw new ServiceManagerException("Failed to query service status");
+ }
+
+ _statusProcess = (NativeHelpers.SERVICE_STATUS_PROCESS)Marshal.PtrToStructure(
+ buffer.DangerousGetHandle(), typeof(NativeHelpers.SERVICE_STATUS_PROCESS));
+ }
+ }
+
+ private void ChangeServiceConfig(ServiceType serviceType = (ServiceType)SERVICE_NO_CHANGE,
+ ServiceStartType startType = (ServiceStartType)SERVICE_NO_CHANGE,
+ ErrorControl errorControl = (ErrorControl)SERVICE_NO_CHANGE, string binaryPath = null,
+ string loadOrderGroup = null, List<string> dependencies = null, string startName = null,
+ string password = null, string displayName = null)
+ {
+ string depString = null;
+ if (dependencies != null && dependencies.Count > 0)
+ depString = String.Join("\0", dependencies) + "\0\0";
+
+ if (!NativeMethods.ChangeServiceConfigW(_serviceHandle, serviceType, startType, errorControl, binaryPath,
+ loadOrderGroup, IntPtr.Zero, depString, startName, password, displayName))
+ {
+ throw new ServiceManagerException("Failed to change service config");
+ }
+
+ Refresh();
+ }
+
+ private void ChangeServiceConfig2(NativeHelpers.ConfigInfoLevel infoLevel, object info)
+ {
+ using (SafeMemoryBuffer buffer = new SafeMemoryBuffer(Marshal.SizeOf(info)))
+ {
+ Marshal.StructureToPtr(info, buffer.DangerousGetHandle(), false);
+
+ if (!NativeMethods.ChangeServiceConfig2W(_serviceHandle, infoLevel, buffer.DangerousGetHandle()))
+ throw new ServiceManagerException("Failed to change service config");
+ }
+ }
+
+ private static SafeServiceHandle OpenSCManager(SCMRights desiredAccess)
+ {
+ SafeServiceHandle handle = NativeMethods.OpenSCManagerW(null, null, desiredAccess);
+ if (handle.IsInvalid)
+ throw new ServiceManagerException("Failed to open SCManager");
+
+ return handle;
+ }
+
+ private T QueryServiceConfig2<T>(NativeHelpers.ConfigInfoLevel infoLevel)
+ {
+ using (SafeMemoryBuffer buffer = QueryServiceConfig2(infoLevel))
+ return (T)Marshal.PtrToStructure(buffer.DangerousGetHandle(), typeof(T));
+ }
+
+ private SafeMemoryBuffer QueryServiceConfig2(NativeHelpers.ConfigInfoLevel infoLevel)
+ {
+ UInt32 bytesNeeded = 0;
+ NativeMethods.QueryServiceConfig2W(_serviceHandle, infoLevel, IntPtr.Zero, 0, out bytesNeeded);
+
+ SafeMemoryBuffer buffer = new SafeMemoryBuffer((int)bytesNeeded);
+ if (!NativeMethods.QueryServiceConfig2W(_serviceHandle, infoLevel, buffer.DangerousGetHandle(), bytesNeeded,
+ out bytesNeeded))
+ {
+ throw new ServiceManagerException(String.Format("QueryServiceConfig2W({0}) failed",
+ infoLevel.ToString()));
+ }
+
+ return buffer;
+ }
+
+ private static IntPtr StructureToPtr(object structure, IntPtr ptr)
+ {
+ Marshal.StructureToPtr(structure, ptr, false);
+ return IntPtr.Add(ptr, Marshal.SizeOf(structure));
+ }
+
+ ~Service() { Dispose(); }
+ }
+}
diff --git a/test/support/windows-integration/plugins/modules/async_status.ps1 b/test/support/windows-integration/plugins/modules/async_status.ps1
new file mode 100644
index 00000000..1ce3ff40
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/async_status.ps1
@@ -0,0 +1,58 @@
+#!powershell
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+
+$results = @{changed=$false}
+
+$parsed_args = Parse-Args $args
+$jid = Get-AnsibleParam $parsed_args "jid" -failifempty $true -resultobj $results
+$mode = Get-AnsibleParam $parsed_args "mode" -Default "status" -ValidateSet "status","cleanup"
+
+# parsed in from the async_status action plugin
+$async_dir = Get-AnsibleParam $parsed_args "_async_dir" -type "path" -failifempty $true
+
+$log_path = [System.IO.Path]::Combine($async_dir, $jid)
+
+If(-not $(Test-Path $log_path))
+{
+ Fail-Json @{ansible_job_id=$jid; started=1; finished=1} "could not find job at '$async_dir'"
+}
+
+If($mode -eq "cleanup") {
+ Remove-Item $log_path -Recurse
+ Exit-Json @{ansible_job_id=$jid; erased=$log_path}
+}
+
+# NOT in cleanup mode, assume regular status mode
+# no remote kill mode currently exists, but probably should
+# consider log_path + ".pid" file and also unlink that above
+
+$data = $null
+Try {
+ $data_raw = Get-Content $log_path
+
+ # TODO: move this into module_utils/powershell.ps1?
+ $jss = New-Object System.Web.Script.Serialization.JavaScriptSerializer
+ $data = $jss.DeserializeObject($data_raw)
+}
+Catch {
+ If(-not $data_raw) {
+ # file not written yet? That means it is running
+ Exit-Json @{results_file=$log_path; ansible_job_id=$jid; started=1; finished=0}
+ }
+ Else {
+ Fail-Json @{ansible_job_id=$jid; results_file=$log_path; started=1; finished=1} "Could not parse job output: $data"
+ }
+}
+
+If (-not $data.ContainsKey("started")) {
+ $data['finished'] = 1
+ $data['ansible_job_id'] = $jid
+}
+ElseIf (-not $data.ContainsKey("finished")) {
+ $data['finished'] = 0
+}
+
+Exit-Json $data
diff --git a/test/support/windows-integration/plugins/modules/setup.ps1 b/test/support/windows-integration/plugins/modules/setup.ps1
new file mode 100644
index 00000000..50647239
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/setup.ps1
@@ -0,0 +1,516 @@
+#!powershell
+
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+
+Function Get-CustomFacts {
+ [cmdletBinding()]
+ param (
+ [Parameter(mandatory=$false)]
+ $factpath = $null
+ )
+
+ if (Test-Path -Path $factpath) {
+ $FactsFiles = Get-ChildItem -Path $factpath | Where-Object -FilterScript {($PSItem.PSIsContainer -eq $false) -and ($PSItem.Extension -eq '.ps1')}
+
+ foreach ($FactsFile in $FactsFiles) {
+ $out = & $($FactsFile.FullName)
+ $result.ansible_facts.Add("ansible_$(($FactsFile.Name).Split('.')[0])", $out)
+ }
+ }
+ else
+ {
+ Add-Warning $result "Non existing path was set for local facts - $factpath"
+ }
+}
+
+Function Get-MachineSid {
+ # The Machine SID is stored in HKLM:\SECURITY\SAM\Domains\Account and is
+ # only accessible by the Local System account. This method get's the local
+ # admin account (ends with -500) and lops it off to get the machine sid.
+
+ $machine_sid = $null
+
+ try {
+ $admins_sid = "S-1-5-32-544"
+ $admin_group = ([Security.Principal.SecurityIdentifier]$admins_sid).Translate([Security.Principal.NTAccount]).Value
+
+ Add-Type -AssemblyName System.DirectoryServices.AccountManagement
+ $principal_context = New-Object -TypeName System.DirectoryServices.AccountManagement.PrincipalContext([System.DirectoryServices.AccountManagement.ContextType]::Machine)
+ $group_principal = New-Object -TypeName System.DirectoryServices.AccountManagement.GroupPrincipal($principal_context, $admin_group)
+ $searcher = New-Object -TypeName System.DirectoryServices.AccountManagement.PrincipalSearcher($group_principal)
+ $groups = $searcher.FindOne()
+
+ foreach ($user in $groups.Members) {
+ $user_sid = $user.Sid
+ if ($user_sid.Value.EndsWith("-500")) {
+ $machine_sid = $user_sid.AccountDomainSid.Value
+ break
+ }
+ }
+ } catch {
+ #can fail for any number of reasons, if it does just return the original null
+ Add-Warning -obj $result -message "Error during machine sid retrieval: $($_.Exception.Message)"
+ }
+
+ return $machine_sid
+}
+
+$cim_instances = @{}
+
+Function Get-LazyCimInstance([string]$instance_name, [string]$namespace="Root\CIMV2") {
+ if(-not $cim_instances.ContainsKey($instance_name)) {
+ $cim_instances[$instance_name] = $(Get-CimInstance -Namespace $namespace -ClassName $instance_name)
+ }
+
+ return $cim_instances[$instance_name]
+}
+
+$result = @{
+ ansible_facts = @{ }
+ changed = $false
+}
+
+$grouped_subsets = @{
+ min=[System.Collections.Generic.List[string]]@('date_time','distribution','dns','env','local','platform','powershell_version','user')
+ network=[System.Collections.Generic.List[string]]@('all_ipv4_addresses','all_ipv6_addresses','interfaces','windows_domain', 'winrm')
+ hardware=[System.Collections.Generic.List[string]]@('bios','memory','processor','uptime','virtual')
+ external=[System.Collections.Generic.List[string]]@('facter')
+}
+
+# build "all" set from everything mentioned in the group- this means every value must be in at least one subset to be considered legal
+$all_set = [System.Collections.Generic.HashSet[string]]@()
+
+foreach($kv in $grouped_subsets.GetEnumerator()) {
+ [void] $all_set.UnionWith($kv.Value)
+}
+
+# dynamically create an "all" subset now that we know what should be in it
+$grouped_subsets['all'] = [System.Collections.Generic.List[string]]$all_set
+
+# start with all, build up gather and exclude subsets
+$gather_subset = [System.Collections.Generic.HashSet[string]]$grouped_subsets.all
+$explicit_subset = [System.Collections.Generic.HashSet[string]]@()
+$exclude_subset = [System.Collections.Generic.HashSet[string]]@()
+
+$params = Parse-Args $args -supports_check_mode $true
+$factpath = Get-AnsibleParam -obj $params -name "fact_path" -type "path"
+$gather_subset_source = Get-AnsibleParam -obj $params -name "gather_subset" -type "list" -default "all"
+
+foreach($item in $gather_subset_source) {
+ if(([string]$item).StartsWith("!")) {
+ $item = ([string]$item).Substring(1)
+ if($item -eq "all") {
+ $all_minus_min = [System.Collections.Generic.HashSet[string]]@($all_set)
+ [void] $all_minus_min.ExceptWith($grouped_subsets.min)
+ [void] $exclude_subset.UnionWith($all_minus_min)
+ }
+ elseif($grouped_subsets.ContainsKey($item)) {
+ [void] $exclude_subset.UnionWith($grouped_subsets[$item])
+ }
+ elseif($all_set.Contains($item)) {
+ [void] $exclude_subset.Add($item)
+ }
+ # NB: invalid exclude values are ignored, since that's what posix setup does
+ }
+ else {
+ if($grouped_subsets.ContainsKey($item)) {
+ [void] $explicit_subset.UnionWith($grouped_subsets[$item])
+ }
+ elseif($all_set.Contains($item)) {
+ [void] $explicit_subset.Add($item)
+ }
+ else {
+ # NB: POSIX setup fails on invalid value; we warn, because we don't implement the same set as POSIX
+ # and we don't have platform-specific config for this...
+ Add-Warning $result "invalid value $item specified in gather_subset"
+ }
+ }
+}
+
+[void] $gather_subset.ExceptWith($exclude_subset)
+[void] $gather_subset.UnionWith($explicit_subset)
+
+$ansible_facts = @{
+ gather_subset=@($gather_subset_source)
+ module_setup=$true
+}
+
+$osversion = [Environment]::OSVersion
+
+if ($osversion.Version -lt [version]"6.2") {
+ # Server 2008, 2008 R2, and Windows 7 are not tested in CI and we want to let customers know about it before
+ # removing support altogether.
+ $version_string = "{0}.{1}" -f ($osversion.Version.Major, $osversion.Version.Minor)
+ $msg = "Windows version '$version_string' will no longer be supported or tested in the next Ansible release"
+ Add-DeprecationWarning -obj $result -message $msg -version "2.11"
+}
+
+if($gather_subset.Contains('all_ipv4_addresses') -or $gather_subset.Contains('all_ipv6_addresses')) {
+ $netcfg = Get-LazyCimInstance Win32_NetworkAdapterConfiguration
+
+ # TODO: split v4/v6 properly, return in separate keys
+ $ips = @()
+ Foreach ($ip in $netcfg.IPAddress) {
+ If ($ip) {
+ $ips += $ip
+ }
+ }
+
+ $ansible_facts += @{
+ ansible_ip_addresses = $ips
+ }
+}
+
+if($gather_subset.Contains('bios')) {
+ $win32_bios = Get-LazyCimInstance Win32_Bios
+ $win32_cs = Get-LazyCimInstance Win32_ComputerSystem
+ $ansible_facts += @{
+ ansible_bios_date = $win32_bios.ReleaseDate.ToString("MM/dd/yyyy")
+ ansible_bios_version = $win32_bios.SMBIOSBIOSVersion
+ ansible_product_name = $win32_cs.Model.Trim()
+ ansible_product_serial = $win32_bios.SerialNumber
+ # ansible_product_version = ([string] $win32_cs.SystemFamily)
+ }
+}
+
+if($gather_subset.Contains('date_time')) {
+ $datetime = (Get-Date)
+ $datetime_utc = $datetime.ToUniversalTime()
+ $date = @{
+ date = $datetime.ToString("yyyy-MM-dd")
+ day = $datetime.ToString("dd")
+ epoch = (Get-Date -UFormat "%s")
+ hour = $datetime.ToString("HH")
+ iso8601 = $datetime_utc.ToString("yyyy-MM-ddTHH:mm:ssZ")
+ iso8601_basic = $datetime.ToString("yyyyMMddTHHmmssffffff")
+ iso8601_basic_short = $datetime.ToString("yyyyMMddTHHmmss")
+ iso8601_micro = $datetime_utc.ToString("yyyy-MM-ddTHH:mm:ss.ffffffZ")
+ minute = $datetime.ToString("mm")
+ month = $datetime.ToString("MM")
+ second = $datetime.ToString("ss")
+ time = $datetime.ToString("HH:mm:ss")
+ tz = ([System.TimeZoneInfo]::Local.Id)
+ tz_offset = $datetime.ToString("zzzz")
+ # Ensure that the weekday is in English
+ weekday = $datetime.ToString("dddd", [System.Globalization.CultureInfo]::InvariantCulture)
+ weekday_number = (Get-Date -UFormat "%w")
+ weeknumber = (Get-Date -UFormat "%W")
+ year = $datetime.ToString("yyyy")
+ }
+
+ $ansible_facts += @{
+ ansible_date_time = $date
+ }
+}
+
+if($gather_subset.Contains('distribution')) {
+ $win32_os = Get-LazyCimInstance Win32_OperatingSystem
+ $product_type = switch($win32_os.ProductType) {
+ 1 { "workstation" }
+ 2 { "domain_controller" }
+ 3 { "server" }
+ default { "unknown" }
+ }
+
+ $installation_type = $null
+ $current_version_path = "HKLM:\SOFTWARE\Microsoft\Windows NT\CurrentVersion"
+ if (Test-Path -LiteralPath $current_version_path) {
+ $install_type_prop = Get-ItemProperty -LiteralPath $current_version_path -ErrorAction SilentlyContinue
+ $installation_type = [String]$install_type_prop.InstallationType
+ }
+
+ $ansible_facts += @{
+ ansible_distribution = $win32_os.Caption
+ ansible_distribution_version = $osversion.Version.ToString()
+ ansible_distribution_major_version = $osversion.Version.Major.ToString()
+ ansible_os_family = "Windows"
+ ansible_os_name = ($win32_os.Name.Split('|')[0]).Trim()
+ ansible_os_product_type = $product_type
+ ansible_os_installation_type = $installation_type
+ }
+}
+
+if($gather_subset.Contains('env')) {
+ $env_vars = @{ }
+ foreach ($item in Get-ChildItem Env:) {
+ $name = $item | Select-Object -ExpandProperty Name
+ # Powershell ConvertTo-Json fails if string ends with \
+ $value = ($item | Select-Object -ExpandProperty Value).TrimEnd("\")
+ $env_vars.Add($name, $value)
+ }
+
+ $ansible_facts += @{
+ ansible_env = $env_vars
+ }
+}
+
+if($gather_subset.Contains('facter')) {
+ # See if Facter is on the System Path
+ Try {
+ Get-Command facter -ErrorAction Stop > $null
+ $facter_installed = $true
+ } Catch {
+ $facter_installed = $false
+ }
+
+ # Get JSON from Facter, and parse it out.
+ if ($facter_installed) {
+ &facter -j | Tee-Object -Variable facter_output > $null
+ $facts = "$facter_output" | ConvertFrom-Json
+ ForEach($fact in $facts.PSObject.Properties) {
+ $fact_name = $fact.Name
+ $ansible_facts.Add("facter_$fact_name", $fact.Value)
+ }
+ }
+}
+
+if($gather_subset.Contains('interfaces')) {
+ $netcfg = Get-LazyCimInstance Win32_NetworkAdapterConfiguration
+ $ActiveNetcfg = @()
+ $ActiveNetcfg += $netcfg | Where-Object {$_.ipaddress -ne $null}
+
+ $namespaces = Get-LazyCimInstance __Namespace -namespace root
+ if ($namespaces | Where-Object { $_.Name -eq "StandardCimv" }) {
+ $net_adapters = Get-LazyCimInstance MSFT_NetAdapter -namespace Root\StandardCimv2
+ $guid_key = "InterfaceGUID"
+ $name_key = "Name"
+ } else {
+ $net_adapters = Get-LazyCimInstance Win32_NetworkAdapter
+ $guid_key = "GUID"
+ $name_key = "NetConnectionID"
+ }
+
+ $formattednetcfg = @()
+ foreach ($adapter in $ActiveNetcfg)
+ {
+ $thisadapter = @{
+ default_gateway = $null
+ connection_name = $null
+ dns_domain = $adapter.dnsdomain
+ interface_index = $adapter.InterfaceIndex
+ interface_name = $adapter.description
+ macaddress = $adapter.macaddress
+ }
+
+ if ($adapter.defaultIPGateway)
+ {
+ $thisadapter.default_gateway = $adapter.DefaultIPGateway[0].ToString()
+ }
+ $net_adapter = $net_adapters | Where-Object { $_.$guid_key -eq $adapter.SettingID }
+ if ($net_adapter) {
+ $thisadapter.connection_name = $net_adapter.$name_key
+ }
+
+ $formattednetcfg += $thisadapter
+ }
+
+ $ansible_facts += @{
+ ansible_interfaces = $formattednetcfg
+ }
+}
+
+if ($gather_subset.Contains("local") -and $null -ne $factpath) {
+ # Get any custom facts; results are updated in the
+ Get-CustomFacts -factpath $factpath
+}
+
+if($gather_subset.Contains('memory')) {
+ $win32_cs = Get-LazyCimInstance Win32_ComputerSystem
+ $win32_os = Get-LazyCimInstance Win32_OperatingSystem
+ $ansible_facts += @{
+ # Win32_PhysicalMemory is empty on some virtual platforms
+ ansible_memtotal_mb = ([math]::ceiling($win32_cs.TotalPhysicalMemory / 1024 / 1024))
+ ansible_memfree_mb = ([math]::ceiling($win32_os.FreePhysicalMemory / 1024))
+ ansible_swaptotal_mb = ([math]::round($win32_os.TotalSwapSpaceSize / 1024))
+ ansible_pagefiletotal_mb = ([math]::round($win32_os.SizeStoredInPagingFiles / 1024))
+ ansible_pagefilefree_mb = ([math]::round($win32_os.FreeSpaceInPagingFiles / 1024))
+ }
+}
+
+
+if($gather_subset.Contains('platform')) {
+ $win32_cs = Get-LazyCimInstance Win32_ComputerSystem
+ $win32_os = Get-LazyCimInstance Win32_OperatingSystem
+ $domain_suffix = $win32_cs.Domain.Substring($win32_cs.Workgroup.length)
+ $fqdn = $win32_cs.DNSHostname
+
+ if( $domain_suffix -ne "")
+ {
+ $fqdn = $win32_cs.DNSHostname + "." + $domain_suffix
+ }
+
+ try {
+ $ansible_reboot_pending = Get-PendingRebootStatus
+ } catch {
+ # fails for non-admin users, set to null in this case
+ $ansible_reboot_pending = $null
+ }
+
+ $ansible_facts += @{
+ ansible_architecture = $win32_os.OSArchitecture
+ ansible_domain = $domain_suffix
+ ansible_fqdn = $fqdn
+ ansible_hostname = $win32_cs.DNSHostname
+ ansible_netbios_name = $win32_cs.Name
+ ansible_kernel = $osversion.Version.ToString()
+ ansible_nodename = $fqdn
+ ansible_machine_id = Get-MachineSid
+ ansible_owner_contact = ([string] $win32_cs.PrimaryOwnerContact)
+ ansible_owner_name = ([string] $win32_cs.PrimaryOwnerName)
+ # FUTURE: should this live in its own subset?
+ ansible_reboot_pending = $ansible_reboot_pending
+ ansible_system = $osversion.Platform.ToString()
+ ansible_system_description = ([string] $win32_os.Description)
+ ansible_system_vendor = $win32_cs.Manufacturer
+ }
+}
+
+if($gather_subset.Contains('powershell_version')) {
+ $ansible_facts += @{
+ ansible_powershell_version = ($PSVersionTable.PSVersion.Major)
+ }
+}
+
+if($gather_subset.Contains('processor')) {
+ $win32_cs = Get-LazyCimInstance Win32_ComputerSystem
+ $win32_cpu = Get-LazyCimInstance Win32_Processor
+ if ($win32_cpu -is [array]) {
+ # multi-socket, pick first
+ $win32_cpu = $win32_cpu[0]
+ }
+
+ $cpu_list = @( )
+ for ($i=1; $i -le $win32_cs.NumberOfLogicalProcessors; $i++) {
+ $cpu_list += $win32_cpu.Manufacturer
+ $cpu_list += $win32_cpu.Name
+ }
+
+ $ansible_facts += @{
+ ansible_processor = $cpu_list
+ ansible_processor_cores = $win32_cpu.NumberOfCores
+ ansible_processor_count = $win32_cs.NumberOfProcessors
+ ansible_processor_threads_per_core = ($win32_cpu.NumberOfLogicalProcessors / $win32_cpu.NumberofCores)
+ ansible_processor_vcpus = $win32_cs.NumberOfLogicalProcessors
+ }
+}
+
+if($gather_subset.Contains('uptime')) {
+ $win32_os = Get-LazyCimInstance Win32_OperatingSystem
+ $ansible_facts += @{
+ ansible_lastboot = $win32_os.lastbootuptime.ToString("u")
+ ansible_uptime_seconds = $([System.Convert]::ToInt64($(Get-Date).Subtract($win32_os.lastbootuptime).TotalSeconds))
+ }
+}
+
+if($gather_subset.Contains('user')) {
+ $user = [Security.Principal.WindowsIdentity]::GetCurrent()
+ $ansible_facts += @{
+ ansible_user_dir = $env:userprofile
+ # Win32_UserAccount.FullName is probably the right thing here, but it can be expensive to get on large domains
+ ansible_user_gecos = ""
+ ansible_user_id = $env:username
+ ansible_user_sid = $user.User.Value
+ }
+}
+
+if($gather_subset.Contains('windows_domain')) {
+ $win32_cs = Get-LazyCimInstance Win32_ComputerSystem
+ $domain_roles = @{
+ 0 = "Stand-alone workstation"
+ 1 = "Member workstation"
+ 2 = "Stand-alone server"
+ 3 = "Member server"
+ 4 = "Backup domain controller"
+ 5 = "Primary domain controller"
+ }
+
+ $domain_role = $domain_roles.Get_Item([Int32]$win32_cs.DomainRole)
+
+ $ansible_facts += @{
+ ansible_windows_domain = $win32_cs.Domain
+ ansible_windows_domain_member = $win32_cs.PartOfDomain
+ ansible_windows_domain_role = $domain_role
+ }
+}
+
+if($gather_subset.Contains('winrm')) {
+
+ $winrm_https_listener_parent_paths = Get-ChildItem -Path WSMan:\localhost\Listener -Recurse -ErrorAction SilentlyContinue | `
+ Where-Object {$_.PSChildName -eq "Transport" -and $_.Value -eq "HTTPS"} | Select-Object PSParentPath
+ if ($winrm_https_listener_parent_paths -isnot [array]) {
+ $winrm_https_listener_parent_paths = @($winrm_https_listener_parent_paths)
+ }
+
+ $winrm_https_listener_paths = @()
+ foreach ($winrm_https_listener_parent_path in $winrm_https_listener_parent_paths) {
+ $winrm_https_listener_paths += $winrm_https_listener_parent_path.PSParentPath.Substring($winrm_https_listener_parent_path.PSParentPath.LastIndexOf("\"))
+ }
+
+ $https_listeners = @()
+ foreach ($winrm_https_listener_path in $winrm_https_listener_paths) {
+ $https_listeners += Get-ChildItem -Path "WSMan:\localhost\Listener$winrm_https_listener_path"
+ }
+
+ $winrm_cert_thumbprints = @()
+ foreach ($https_listener in $https_listeners) {
+ $winrm_cert_thumbprints += $https_listener | Where-Object {$_.Name -EQ "CertificateThumbprint" } | Select-Object Value
+ }
+
+ $winrm_cert_expiry = @()
+ foreach ($winrm_cert_thumbprint in $winrm_cert_thumbprints) {
+ Try {
+ $winrm_cert_expiry += Get-ChildItem -Path Cert:\LocalMachine\My | Where-Object Thumbprint -EQ $winrm_cert_thumbprint.Value.ToString().ToUpper() | Select-Object NotAfter
+ } Catch {
+ Add-Warning -obj $result -message "Error during certificate expiration retrieval: $($_.Exception.Message)"
+ }
+ }
+
+ $winrm_cert_expirations = $winrm_cert_expiry | Sort-Object NotAfter
+ if ($winrm_cert_expirations) {
+ # this fact was renamed from ansible_winrm_certificate_expires due to collision with ansible_winrm_X connection var pattern
+ $ansible_facts.Add("ansible_win_rm_certificate_expires", $winrm_cert_expirations[0].NotAfter.ToString("yyyy-MM-dd HH:mm:ss"))
+ }
+}
+
+if($gather_subset.Contains('virtual')) {
+ $machine_info = Get-LazyCimInstance Win32_ComputerSystem
+
+ switch ($machine_info.model) {
+ "Virtual Machine" {
+ $machine_type="Hyper-V"
+ $machine_role="guest"
+ }
+
+ "VMware Virtual Platform" {
+ $machine_type="VMware"
+ $machine_role="guest"
+ }
+
+ "VirtualBox" {
+ $machine_type="VirtualBox"
+ $machine_role="guest"
+ }
+
+ "HVM domU" {
+ $machine_type="Xen"
+ $machine_role="guest"
+ }
+
+ default {
+ $machine_type="NA"
+ $machine_role="NA"
+ }
+ }
+
+ $ansible_facts += @{
+ ansible_virtualization_role = $machine_role
+ ansible_virtualization_type = $machine_type
+ }
+}
+
+$result.ansible_facts += $ansible_facts
+
+Exit-Json $result
diff --git a/test/support/windows-integration/plugins/modules/slurp.ps1 b/test/support/windows-integration/plugins/modules/slurp.ps1
new file mode 100644
index 00000000..eb506c7c
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/slurp.ps1
@@ -0,0 +1,28 @@
+#!powershell
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+
+$params = Parse-Args $args -supports_check_mode $true;
+$src = Get-AnsibleParam -obj $params -name "src" -type "path" -aliases "path" -failifempty $true;
+
+$result = @{
+ changed = $false;
+}
+
+If (Test-Path -LiteralPath $src -PathType Leaf)
+{
+ $bytes = [System.IO.File]::ReadAllBytes($src);
+ $result.content = [System.Convert]::ToBase64String($bytes);
+ $result.encoding = "base64";
+ Exit-Json $result;
+}
+ElseIf (Test-Path -LiteralPath $src -PathType Container)
+{
+ Fail-Json $result "Path $src is a directory";
+}
+Else
+{
+ Fail-Json $result "Path $src is not found";
+}
diff --git a/test/support/windows-integration/plugins/modules/win_acl.ps1 b/test/support/windows-integration/plugins/modules/win_acl.ps1
new file mode 100644
index 00000000..e3c38130
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_acl.ps1
@@ -0,0 +1,225 @@
+#!powershell
+
+# Copyright: (c) 2015, Phil Schwartz <schwartzmx@gmail.com>
+# Copyright: (c) 2015, Trond Hindenes
+# Copyright: (c) 2015, Hans-Joachim Kliemeck <git@kliemeck.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+#Requires -Module Ansible.ModuleUtils.PrivilegeUtil
+#Requires -Module Ansible.ModuleUtils.SID
+
+$ErrorActionPreference = "Stop"
+
+# win_acl module (File/Resources Permission Additions/Removal)
+
+#Functions
+function Get-UserSID {
+ param(
+ [String]$AccountName
+ )
+
+ $userSID = $null
+ $searchAppPools = $false
+
+ if ($AccountName.Split("\").Count -gt 1) {
+ if ($AccountName.Split("\")[0] -eq "IIS APPPOOL") {
+ $searchAppPools = $true
+ $AccountName = $AccountName.Split("\")[1]
+ }
+ }
+
+ if ($searchAppPools) {
+ Import-Module -Name WebAdministration
+ $testIISPath = Test-Path -LiteralPath "IIS:"
+ if ($testIISPath) {
+ $appPoolObj = Get-ItemProperty -LiteralPath "IIS:\AppPools\$AccountName"
+ $userSID = $appPoolObj.applicationPoolSid
+ }
+ }
+ else {
+ $userSID = Convert-ToSID -account_name $AccountName
+ }
+
+ return $userSID
+}
+
+$params = Parse-Args $args
+
+Function SetPrivilegeTokens() {
+ # Set privilege tokens only if admin.
+ # Admins would have these privs or be able to set these privs in the UI Anyway
+
+ $adminRole=[System.Security.Principal.WindowsBuiltInRole]::Administrator
+ $myWindowsID=[System.Security.Principal.WindowsIdentity]::GetCurrent()
+ $myWindowsPrincipal=new-object System.Security.Principal.WindowsPrincipal($myWindowsID)
+
+
+ if ($myWindowsPrincipal.IsInRole($adminRole)) {
+ # Need to adjust token privs when executing Set-ACL in certain cases.
+ # e.g. d:\testdir is owned by group in which current user is not a member and no perms are inherited from d:\
+ # This also sets us up for setting the owner as a feature.
+ # See the following for details of each privilege
+ # https://msdn.microsoft.com/en-us/library/windows/desktop/bb530716(v=vs.85).aspx
+ $privileges = @(
+ "SeRestorePrivilege", # Grants all write access control to any file, regardless of ACL.
+ "SeBackupPrivilege", # Grants all read access control to any file, regardless of ACL.
+ "SeTakeOwnershipPrivilege" # Grants ability to take owernship of an object w/out being granted discretionary access
+ )
+ foreach ($privilege in $privileges) {
+ $state = Get-AnsiblePrivilege -Name $privilege
+ if ($state -eq $false) {
+ Set-AnsiblePrivilege -Name $privilege -Value $true
+ }
+ }
+ }
+}
+
+
+$result = @{
+ changed = $false
+}
+
+$path = Get-AnsibleParam -obj $params -name "path" -type "str" -failifempty $true
+$user = Get-AnsibleParam -obj $params -name "user" -type "str" -failifempty $true
+$rights = Get-AnsibleParam -obj $params -name "rights" -type "str" -failifempty $true
+
+$type = Get-AnsibleParam -obj $params -name "type" -type "str" -failifempty $true -validateset "allow","deny"
+$state = Get-AnsibleParam -obj $params -name "state" -type "str" -default "present" -validateset "absent","present"
+
+$inherit = Get-AnsibleParam -obj $params -name "inherit" -type "str"
+$propagation = Get-AnsibleParam -obj $params -name "propagation" -type "str" -default "None" -validateset "InheritOnly","None","NoPropagateInherit"
+
+# We mount the HKCR, HKU, and HKCC registry hives so PS can access them.
+# Network paths have no qualifiers so we use -EA SilentlyContinue to ignore that
+$path_qualifier = Split-Path -Path $path -Qualifier -ErrorAction SilentlyContinue
+if ($path_qualifier -eq "HKCR:" -and (-not (Test-Path -LiteralPath HKCR:\))) {
+ New-PSDrive -Name HKCR -PSProvider Registry -Root HKEY_CLASSES_ROOT > $null
+}
+if ($path_qualifier -eq "HKU:" -and (-not (Test-Path -LiteralPath HKU:\))) {
+ New-PSDrive -Name HKU -PSProvider Registry -Root HKEY_USERS > $null
+}
+if ($path_qualifier -eq "HKCC:" -and (-not (Test-Path -LiteralPath HKCC:\))) {
+ New-PSDrive -Name HKCC -PSProvider Registry -Root HKEY_CURRENT_CONFIG > $null
+}
+
+If (-Not (Test-Path -LiteralPath $path)) {
+ Fail-Json -obj $result -message "$path file or directory does not exist on the host"
+}
+
+# Test that the user/group is resolvable on the local machine
+$sid = Get-UserSID -AccountName $user
+if (!$sid) {
+ Fail-Json -obj $result -message "$user is not a valid user or group on the host machine or domain"
+}
+
+If (Test-Path -LiteralPath $path -PathType Leaf) {
+ $inherit = "None"
+}
+ElseIf ($null -eq $inherit) {
+ $inherit = "ContainerInherit, ObjectInherit"
+}
+
+# Bug in Set-Acl, Get-Acl where -LiteralPath only works for the Registry provider if the location is in that root
+# qualifier. We also don't have a qualifier for a network path so only change if not null
+if ($null -ne $path_qualifier) {
+ Push-Location -LiteralPath $path_qualifier
+}
+
+Try {
+ SetPrivilegeTokens
+ $path_item = Get-Item -LiteralPath $path -Force
+ If ($path_item.PSProvider.Name -eq "Registry") {
+ $colRights = [System.Security.AccessControl.RegistryRights]$rights
+ }
+ Else {
+ $colRights = [System.Security.AccessControl.FileSystemRights]$rights
+ }
+
+ $InheritanceFlag = [System.Security.AccessControl.InheritanceFlags]$inherit
+ $PropagationFlag = [System.Security.AccessControl.PropagationFlags]$propagation
+
+ If ($type -eq "allow") {
+ $objType =[System.Security.AccessControl.AccessControlType]::Allow
+ }
+ Else {
+ $objType =[System.Security.AccessControl.AccessControlType]::Deny
+ }
+
+ $objUser = New-Object System.Security.Principal.SecurityIdentifier($sid)
+ If ($path_item.PSProvider.Name -eq "Registry") {
+ $objACE = New-Object System.Security.AccessControl.RegistryAccessRule ($objUser, $colRights, $InheritanceFlag, $PropagationFlag, $objType)
+ }
+ Else {
+ $objACE = New-Object System.Security.AccessControl.FileSystemAccessRule ($objUser, $colRights, $InheritanceFlag, $PropagationFlag, $objType)
+ }
+ $objACL = Get-ACL -LiteralPath $path
+
+ # Check if the ACE exists already in the objects ACL list
+ $match = $false
+
+ ForEach($rule in $objACL.GetAccessRules($true, $true, [System.Security.Principal.SecurityIdentifier])){
+
+ If ($path_item.PSProvider.Name -eq "Registry") {
+ If (($rule.RegistryRights -eq $objACE.RegistryRights) -And ($rule.AccessControlType -eq $objACE.AccessControlType) -And ($rule.IdentityReference -eq $objACE.IdentityReference) -And ($rule.IsInherited -eq $objACE.IsInherited) -And ($rule.InheritanceFlags -eq $objACE.InheritanceFlags) -And ($rule.PropagationFlags -eq $objACE.PropagationFlags)) {
+ $match = $true
+ Break
+ }
+ } else {
+ If (($rule.FileSystemRights -eq $objACE.FileSystemRights) -And ($rule.AccessControlType -eq $objACE.AccessControlType) -And ($rule.IdentityReference -eq $objACE.IdentityReference) -And ($rule.IsInherited -eq $objACE.IsInherited) -And ($rule.InheritanceFlags -eq $objACE.InheritanceFlags) -And ($rule.PropagationFlags -eq $objACE.PropagationFlags)) {
+ $match = $true
+ Break
+ }
+ }
+ }
+
+ If ($state -eq "present" -And $match -eq $false) {
+ Try {
+ $objACL.AddAccessRule($objACE)
+ If ($path_item.PSProvider.Name -eq "Registry") {
+ Set-ACL -LiteralPath $path -AclObject $objACL
+ } else {
+ (Get-Item -LiteralPath $path).SetAccessControl($objACL)
+ }
+ $result.changed = $true
+ }
+ Catch {
+ Fail-Json -obj $result -message "an exception occurred when adding the specified rule - $($_.Exception.Message)"
+ }
+ }
+ ElseIf ($state -eq "absent" -And $match -eq $true) {
+ Try {
+ $objACL.RemoveAccessRule($objACE)
+ If ($path_item.PSProvider.Name -eq "Registry") {
+ Set-ACL -LiteralPath $path -AclObject $objACL
+ } else {
+ (Get-Item -LiteralPath $path).SetAccessControl($objACL)
+ }
+ $result.changed = $true
+ }
+ Catch {
+ Fail-Json -obj $result -message "an exception occurred when removing the specified rule - $($_.Exception.Message)"
+ }
+ }
+ Else {
+ # A rule was attempting to be added but already exists
+ If ($match -eq $true) {
+ Exit-Json -obj $result -message "the specified rule already exists"
+ }
+ # A rule didn't exist that was trying to be removed
+ Else {
+ Exit-Json -obj $result -message "the specified rule does not exist"
+ }
+ }
+}
+Catch {
+ Fail-Json -obj $result -message "an error occurred when attempting to $state $rights permission(s) on $path for $user - $($_.Exception.Message)"
+}
+Finally {
+ # Make sure we revert the location stack to the original path just for cleanups sake
+ if ($null -ne $path_qualifier) {
+ Pop-Location
+ }
+}
+
+Exit-Json -obj $result
diff --git a/test/support/windows-integration/plugins/modules/win_acl.py b/test/support/windows-integration/plugins/modules/win_acl.py
new file mode 100644
index 00000000..14fbd82f
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_acl.py
@@ -0,0 +1,132 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Phil Schwartz <schwartzmx@gmail.com>
+# Copyright: (c) 2015, Trond Hindenes
+# Copyright: (c) 2015, Hans-Joachim Kliemeck <git@kliemeck.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = r'''
+---
+module: win_acl
+version_added: "2.0"
+short_description: Set file/directory/registry permissions for a system user or group
+description:
+- Add or remove rights/permissions for a given user or group for the specified
+ file, folder, registry key or AppPool identifies.
+options:
+ path:
+ description:
+ - The path to the file or directory.
+ type: str
+ required: yes
+ user:
+ description:
+ - User or Group to add specified rights to act on src file/folder or
+ registry key.
+ type: str
+ required: yes
+ state:
+ description:
+ - Specify whether to add C(present) or remove C(absent) the specified access rule.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ type:
+ description:
+ - Specify whether to allow or deny the rights specified.
+ type: str
+ required: yes
+ choices: [ allow, deny ]
+ rights:
+ description:
+ - The rights/permissions that are to be allowed/denied for the specified
+ user or group for the item at C(path).
+ - If C(path) is a file or directory, rights can be any right under MSDN
+ FileSystemRights U(https://msdn.microsoft.com/en-us/library/system.security.accesscontrol.filesystemrights.aspx).
+ - If C(path) is a registry key, rights can be any right under MSDN
+ RegistryRights U(https://msdn.microsoft.com/en-us/library/system.security.accesscontrol.registryrights.aspx).
+ type: str
+ required: yes
+ inherit:
+ description:
+ - Inherit flags on the ACL rules.
+ - Can be specified as a comma separated list, e.g. C(ContainerInherit),
+ C(ObjectInherit).
+ - For more information on the choices see MSDN InheritanceFlags enumeration
+ at U(https://msdn.microsoft.com/en-us/library/system.security.accesscontrol.inheritanceflags.aspx).
+ - Defaults to C(ContainerInherit, ObjectInherit) for Directories.
+ type: str
+ choices: [ ContainerInherit, ObjectInherit ]
+ propagation:
+ description:
+ - Propagation flag on the ACL rules.
+ - For more information on the choices see MSDN PropagationFlags enumeration
+ at U(https://msdn.microsoft.com/en-us/library/system.security.accesscontrol.propagationflags.aspx).
+ type: str
+ choices: [ InheritOnly, None, NoPropagateInherit ]
+ default: "None"
+notes:
+- If adding ACL's for AppPool identities (available since 2.3), the Windows
+ Feature "Web-Scripting-Tools" must be enabled.
+seealso:
+- module: win_acl_inheritance
+- module: win_file
+- module: win_owner
+- module: win_stat
+author:
+- Phil Schwartz (@schwartzmx)
+- Trond Hindenes (@trondhindenes)
+- Hans-Joachim Kliemeck (@h0nIg)
+'''
+
+EXAMPLES = r'''
+- name: Restrict write and execute access to User Fed-Phil
+ win_acl:
+ user: Fed-Phil
+ path: C:\Important\Executable.exe
+ type: deny
+ rights: ExecuteFile,Write
+
+- name: Add IIS_IUSRS allow rights
+ win_acl:
+ path: C:\inetpub\wwwroot\MySite
+ user: IIS_IUSRS
+ rights: FullControl
+ type: allow
+ state: present
+ inherit: ContainerInherit, ObjectInherit
+ propagation: 'None'
+
+- name: Set registry key right
+ win_acl:
+ path: HKCU:\Bovine\Key
+ user: BUILTIN\Users
+ rights: EnumerateSubKeys
+ type: allow
+ state: present
+ inherit: ContainerInherit, ObjectInherit
+ propagation: 'None'
+
+- name: Remove FullControl AccessRule for IIS_IUSRS
+ win_acl:
+ path: C:\inetpub\wwwroot\MySite
+ user: IIS_IUSRS
+ rights: FullControl
+ type: allow
+ state: absent
+ inherit: ContainerInherit, ObjectInherit
+ propagation: 'None'
+
+- name: Deny Intern
+ win_acl:
+ path: C:\Administrator\Documents
+ user: Intern
+ rights: Read,Write,Modify,FullControl,Delete
+ type: deny
+ state: present
+'''
diff --git a/test/support/windows-integration/plugins/modules/win_certificate_store.ps1 b/test/support/windows-integration/plugins/modules/win_certificate_store.ps1
new file mode 100644
index 00000000..db984130
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_certificate_store.ps1
@@ -0,0 +1,260 @@
+#!powershell
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#AnsibleRequires -CSharpUtil Ansible.Basic
+
+$store_name_values = ([System.Security.Cryptography.X509Certificates.StoreName]).GetEnumValues() | ForEach-Object { $_.ToString() }
+$store_location_values = ([System.Security.Cryptography.X509Certificates.StoreLocation]).GetEnumValues() | ForEach-Object { $_.ToString() }
+
+$spec = @{
+ options = @{
+ state = @{ type = "str"; default = "present"; choices = "absent", "exported", "present" }
+ path = @{ type = "path" }
+ thumbprint = @{ type = "str" }
+ store_name = @{ type = "str"; default = "My"; choices = $store_name_values }
+ store_location = @{ type = "str"; default = "LocalMachine"; choices = $store_location_values }
+ password = @{ type = "str"; no_log = $true }
+ key_exportable = @{ type = "bool"; default = $true }
+ key_storage = @{ type = "str"; default = "default"; choices = "default", "machine", "user" }
+ file_type = @{ type = "str"; default = "der"; choices = "der", "pem", "pkcs12" }
+ }
+ required_if = @(
+ @("state", "absent", @("path", "thumbprint"), $true),
+ @("state", "exported", @("path", "thumbprint")),
+ @("state", "present", @("path"))
+ )
+ supports_check_mode = $true
+}
+$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec)
+
+Function Get-CertFile($module, $path, $password, $key_exportable, $key_storage) {
+ # parses a certificate file and returns X509Certificate2Collection
+ if (-not (Test-Path -LiteralPath $path -PathType Leaf)) {
+ $module.FailJson("File at '$path' either does not exist or is not a file")
+ }
+
+ # must set at least the PersistKeySet flag so that the PrivateKey
+ # is stored in a permanent container and not deleted once the handle
+ # is gone.
+ $store_flags = [System.Security.Cryptography.X509Certificates.X509KeyStorageFlags]::PersistKeySet
+
+ $key_storage = $key_storage.substring(0,1).ToUpper() + $key_storage.substring(1).ToLower()
+ $store_flags = $store_flags -bor [Enum]::Parse([System.Security.Cryptography.X509Certificates.X509KeyStorageFlags], "$($key_storage)KeySet")
+ if ($key_exportable) {
+ $store_flags = $store_flags -bor [System.Security.Cryptography.X509Certificates.X509KeyStorageFlags]::Exportable
+ }
+
+ # TODO: If I'm feeling adventurours, write code to parse PKCS#12 PEM encoded
+ # file as .NET does not have an easy way to import this
+ $certs = New-Object -TypeName System.Security.Cryptography.X509Certificates.X509Certificate2Collection
+
+ try {
+ $certs.Import($path, $password, $store_flags)
+ } catch {
+ $module.FailJson("Failed to load cert from file: $($_.Exception.Message)", $_)
+ }
+
+ return $certs
+}
+
+Function New-CertFile($module, $cert, $path, $type, $password) {
+ $content_type = switch ($type) {
+ "pem" { [System.Security.Cryptography.X509Certificates.X509ContentType]::Cert }
+ "der" { [System.Security.Cryptography.X509Certificates.X509ContentType]::Cert }
+ "pkcs12" { [System.Security.Cryptography.X509Certificates.X509ContentType]::Pkcs12 }
+ }
+ if ($type -eq "pkcs12") {
+ $missing_key = $false
+ if ($null -eq $cert.PrivateKey) {
+ $missing_key = $true
+ } elseif ($cert.PrivateKey.CspKeyContainerInfo.Exportable -eq $false) {
+ $missing_key = $true
+ }
+ if ($missing_key) {
+ $module.FailJson("Cannot export cert with key as PKCS12 when the key is not marked as exportable or not accessible by the current user")
+ }
+ }
+
+ if (Test-Path -LiteralPath $path) {
+ Remove-Item -LiteralPath $path -Force
+ $module.Result.changed = $true
+ }
+ try {
+ $cert_bytes = $cert.Export($content_type, $password)
+ } catch {
+ $module.FailJson("Failed to export certificate as bytes: $($_.Exception.Message)", $_)
+ }
+
+ # Need to manually handle a PEM file
+ if ($type -eq "pem") {
+ $cert_content = "-----BEGIN CERTIFICATE-----`r`n"
+ $base64_string = [System.Convert]::ToBase64String($cert_bytes, [System.Base64FormattingOptions]::InsertLineBreaks)
+ $cert_content += $base64_string
+ $cert_content += "`r`n-----END CERTIFICATE-----"
+ $file_encoding = [System.Text.Encoding]::ASCII
+ $cert_bytes = $file_encoding.GetBytes($cert_content)
+ } elseif ($type -eq "pkcs12") {
+ $module.Result.key_exported = $false
+ if ($null -ne $cert.PrivateKey) {
+ $module.Result.key_exportable = $cert.PrivateKey.CspKeyContainerInfo.Exportable
+ }
+ }
+
+ if (-not $module.CheckMode) {
+ try {
+ [System.IO.File]::WriteAllBytes($path, $cert_bytes)
+ } catch [System.ArgumentNullException] {
+ $module.FailJson("Failed to write cert to file, cert was null: $($_.Exception.Message)", $_)
+ } catch [System.IO.IOException] {
+ $module.FailJson("Failed to write cert to file due to IO Exception: $($_.Exception.Message)", $_)
+ } catch [System.UnauthorizedAccessException] {
+ $module.FailJson("Failed to write cert to file due to permissions: $($_.Exception.Message)", $_)
+ } catch {
+ $module.FailJson("Failed to write cert to file: $($_.Exception.Message)", $_)
+ }
+ }
+ $module.Result.changed = $true
+}
+
+Function Get-CertFileType($path, $password) {
+ $certs = New-Object -TypeName System.Security.Cryptography.X509Certificates.X509Certificate2Collection
+ try {
+ $certs.Import($path, $password, 0)
+ } catch [System.Security.Cryptography.CryptographicException] {
+ # the file is a pkcs12 we just had the wrong password
+ return "pkcs12"
+ } catch {
+ return "unknown"
+ }
+
+ $file_contents = Get-Content -LiteralPath $path -Raw
+ if ($file_contents.StartsWith("-----BEGIN CERTIFICATE-----")) {
+ return "pem"
+ } elseif ($file_contents.StartsWith("-----BEGIN PKCS7-----")) {
+ return "pkcs7-ascii"
+ } elseif ($certs.Count -gt 1) {
+ # multiple certs must be pkcs7
+ return "pkcs7-binary"
+ } elseif ($certs[0].HasPrivateKey) {
+ return "pkcs12"
+ } elseif ($path.EndsWith(".pfx") -or $path.EndsWith(".p12")) {
+ # no way to differenciate a pfx with a der file so we must rely on the
+ # extension
+ return "pkcs12"
+ } else {
+ return "der"
+ }
+}
+
+$state = $module.Params.state
+$path = $module.Params.path
+$thumbprint = $module.Params.thumbprint
+$store_name = [System.Security.Cryptography.X509Certificates.StoreName]"$($module.Params.store_name)"
+$store_location = [System.Security.Cryptography.X509Certificates.Storelocation]"$($module.Params.store_location)"
+$password = $module.Params.password
+$key_exportable = $module.Params.key_exportable
+$key_storage = $module.Params.key_storage
+$file_type = $module.Params.file_type
+
+$module.Result.thumbprints = @()
+
+$store = New-Object -TypeName System.Security.Cryptography.X509Certificates.X509Store -ArgumentList $store_name, $store_location
+try {
+ $store.Open([System.Security.Cryptography.X509Certificates.OpenFlags]::ReadWrite)
+} catch [System.Security.Cryptography.CryptographicException] {
+ $module.FailJson("Unable to open the store as it is not readable: $($_.Exception.Message)", $_)
+} catch [System.Security.SecurityException] {
+ $module.FailJson("Unable to open the store with the current permissions: $($_.Exception.Message)", $_)
+} catch {
+ $module.FailJson("Unable to open the store: $($_.Exception.Message)", $_)
+}
+$store_certificates = $store.Certificates
+
+try {
+ if ($state -eq "absent") {
+ $cert_thumbprints = @()
+
+ if ($null -ne $path) {
+ $certs = Get-CertFile -module $module -path $path -password $password -key_exportable $key_exportable -key_storage $key_storage
+ foreach ($cert in $certs) {
+ $cert_thumbprints += $cert.Thumbprint
+ }
+ } elseif ($null -ne $thumbprint) {
+ $cert_thumbprints += $thumbprint
+ }
+
+ foreach ($cert_thumbprint in $cert_thumbprints) {
+ $module.Result.thumbprints += $cert_thumbprint
+ $found_certs = $store_certificates.Find([System.Security.Cryptography.X509Certificates.X509FindType]::FindByThumbprint, $cert_thumbprint, $false)
+ if ($found_certs.Count -gt 0) {
+ foreach ($found_cert in $found_certs) {
+ try {
+ if (-not $module.CheckMode) {
+ $store.Remove($found_cert)
+ }
+ } catch [System.Security.SecurityException] {
+ $module.FailJson("Unable to remove cert with thumbprint '$cert_thumbprint' with current permissions: $($_.Exception.Message)", $_)
+ } catch {
+ $module.FailJson("Unable to remove cert with thumbprint '$cert_thumbprint': $($_.Exception.Message)", $_)
+ }
+ $module.Result.changed = $true
+ }
+ }
+ }
+ } elseif ($state -eq "exported") {
+ # TODO: Add support for PKCS7 and exporting a cert chain
+ $module.Result.thumbprints += $thumbprint
+ $export = $true
+ if (Test-Path -LiteralPath $path -PathType Container) {
+ $module.FailJson("Cannot export cert to path '$path' as it is a directory")
+ } elseif (Test-Path -LiteralPath $path -PathType Leaf) {
+ $actual_cert_type = Get-CertFileType -path $path -password $password
+ if ($actual_cert_type -eq $file_type) {
+ try {
+ $certs = Get-CertFile -module $module -path $path -password $password -key_exportable $key_exportable -key_storage $key_storage
+ } catch {
+ # failed to load the file so we set the thumbprint to something
+ # that will fail validation
+ $certs = @{Thumbprint = $null}
+ }
+
+ if ($certs.Thumbprint -eq $thumbprint) {
+ $export = $false
+ }
+ }
+ }
+
+ if ($export) {
+ $found_certs = $store_certificates.Find([System.Security.Cryptography.X509Certificates.X509FindType]::FindByThumbprint, $thumbprint, $false)
+ if ($found_certs.Count -ne 1) {
+ $module.FailJson("Found $($found_certs.Count) certs when only expecting 1")
+ }
+
+ New-CertFile -module $module -cert $found_certs -path $path -type $file_type -password $password
+ }
+ } else {
+ $certs = Get-CertFile -module $module -path $path -password $password -key_exportable $key_exportable -key_storage $key_storage
+ foreach ($cert in $certs) {
+ $module.Result.thumbprints += $cert.Thumbprint
+ $found_certs = $store_certificates.Find([System.Security.Cryptography.X509Certificates.X509FindType]::FindByThumbprint, $cert.Thumbprint, $false)
+ if ($found_certs.Count -eq 0) {
+ try {
+ if (-not $module.CheckMode) {
+ $store.Add($cert)
+ }
+ } catch [System.Security.Cryptography.CryptographicException] {
+ $module.FailJson("Unable to import certificate with thumbprint '$($cert.Thumbprint)' with the current permissions: $($_.Exception.Message)", $_)
+ } catch {
+ $module.FailJson("Unable to import certificate with thumbprint '$($cert.Thumbprint)': $($_.Exception.Message)", $_)
+ }
+ $module.Result.changed = $true
+ }
+ }
+ }
+} finally {
+ $store.Close()
+}
+
+$module.ExitJson()
diff --git a/test/support/windows-integration/plugins/modules/win_certificate_store.py b/test/support/windows-integration/plugins/modules/win_certificate_store.py
new file mode 100644
index 00000000..dc617e33
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_certificate_store.py
@@ -0,0 +1,208 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: win_certificate_store
+version_added: '2.5'
+short_description: Manages the certificate store
+description:
+- Used to import/export and remove certificates and keys from the local
+ certificate store.
+- This module is not used to create certificates and will only manage existing
+ certs as a file or in the store.
+- It can be used to import PEM, DER, P7B, PKCS12 (PFX) certificates and export
+ PEM, DER and PKCS12 certificates.
+options:
+ state:
+ description:
+ - If C(present), will ensure that the certificate at I(path) is imported
+ into the certificate store specified.
+ - If C(absent), will ensure that the certificate specified by I(thumbprint)
+ or the thumbprint of the cert at I(path) is removed from the store
+ specified.
+ - If C(exported), will ensure the file at I(path) is a certificate
+ specified by I(thumbprint).
+ - When exporting a certificate, if I(path) is a directory then the module
+ will fail, otherwise the file will be replaced if needed.
+ type: str
+ choices: [ absent, exported, present ]
+ default: present
+ path:
+ description:
+ - The path to a certificate file.
+ - This is required when I(state) is C(present) or C(exported).
+ - When I(state) is C(absent) and I(thumbprint) is not specified, the
+ thumbprint is derived from the certificate at this path.
+ type: path
+ thumbprint:
+ description:
+ - The thumbprint as a hex string to either export or remove.
+ - See the examples for how to specify the thumbprint.
+ type: str
+ store_name:
+ description:
+ - The store name to use when importing a certificate or searching for a
+ certificate.
+ - "C(AddressBook): The X.509 certificate store for other users"
+ - "C(AuthRoot): The X.509 certificate store for third-party certificate authorities (CAs)"
+ - "C(CertificateAuthority): The X.509 certificate store for intermediate certificate authorities (CAs)"
+ - "C(Disallowed): The X.509 certificate store for revoked certificates"
+ - "C(My): The X.509 certificate store for personal certificates"
+ - "C(Root): The X.509 certificate store for trusted root certificate authorities (CAs)"
+ - "C(TrustedPeople): The X.509 certificate store for directly trusted people and resources"
+ - "C(TrustedPublisher): The X.509 certificate store for directly trusted publishers"
+ type: str
+ choices:
+ - AddressBook
+ - AuthRoot
+ - CertificateAuthority
+ - Disallowed
+ - My
+ - Root
+ - TrustedPeople
+ - TrustedPublisher
+ default: My
+ store_location:
+ description:
+ - The store location to use when importing a certificate or searching for a
+ certificate.
+ choices: [ CurrentUser, LocalMachine ]
+ default: LocalMachine
+ password:
+ description:
+ - The password of the pkcs12 certificate key.
+ - This is used when reading a pkcs12 certificate file or the password to
+ set when C(state=exported) and C(file_type=pkcs12).
+ - If the pkcs12 file has no password set or no password should be set on
+ the exported file, do not set this option.
+ type: str
+ key_exportable:
+ description:
+ - Whether to allow the private key to be exported.
+ - If C(no), then this module and other process will only be able to export
+ the certificate and the private key cannot be exported.
+ - Used when C(state=present) only.
+ type: bool
+ default: yes
+ key_storage:
+ description:
+ - Specifies where Windows will store the private key when it is imported.
+ - When set to C(default), the default option as set by Windows is used, typically C(user).
+ - When set to C(machine), the key is stored in a path accessible by various
+ users.
+ - When set to C(user), the key is stored in a path only accessible by the
+ current user.
+ - Used when C(state=present) only and cannot be changed once imported.
+ - See U(https://msdn.microsoft.com/en-us/library/system.security.cryptography.x509certificates.x509keystorageflags.aspx)
+ for more details.
+ type: str
+ choices: [ default, machine, user ]
+ default: default
+ file_type:
+ description:
+ - The file type to export the certificate as when C(state=exported).
+ - C(der) is a binary ASN.1 encoded file.
+ - C(pem) is a base64 encoded file of a der file in the OpenSSL form.
+ - C(pkcs12) (also known as pfx) is a binary container that contains both
+ the certificate and private key unlike the other options.
+ - When C(pkcs12) is set and the private key is not exportable or accessible
+ by the current user, it will throw an exception.
+ type: str
+ choices: [ der, pem, pkcs12 ]
+ default: der
+notes:
+- Some actions on PKCS12 certificates and keys may fail with the error
+ C(the specified network password is not correct), either use CredSSP or
+ Kerberos with credential delegation, or use C(become) to bypass these
+ restrictions.
+- The certificates must be located on the Windows host to be set with I(path).
+- When importing a certificate for usage in IIS, it is generally required
+ to use the C(machine) key_storage option, as both C(default) and C(user)
+ will make the private key unreadable to IIS APPPOOL identities and prevent
+ binding the certificate to the https endpoint.
+author:
+- Jordan Borean (@jborean93)
+'''
+
+EXAMPLES = r'''
+- name: Import a certificate
+ win_certificate_store:
+ path: C:\Temp\cert.pem
+ state: present
+
+- name: Import pfx certificate that is password protected
+ win_certificate_store:
+ path: C:\Temp\cert.pfx
+ state: present
+ password: VeryStrongPasswordHere!
+ become: yes
+ become_method: runas
+
+- name: Import pfx certificate without password and set private key as un-exportable
+ win_certificate_store:
+ path: C:\Temp\cert.pfx
+ state: present
+ key_exportable: no
+ # usually you don't set this here but it is for illustrative purposes
+ vars:
+ ansible_winrm_transport: credssp
+
+- name: Remove a certificate based on file thumbprint
+ win_certificate_store:
+ path: C:\Temp\cert.pem
+ state: absent
+
+- name: Remove a certificate based on thumbprint
+ win_certificate_store:
+ thumbprint: BD7AF104CF1872BDB518D95C9534EA941665FD27
+ state: absent
+
+- name: Remove certificate based on thumbprint is CurrentUser/TrustedPublishers store
+ win_certificate_store:
+ thumbprint: BD7AF104CF1872BDB518D95C9534EA941665FD27
+ state: absent
+ store_location: CurrentUser
+ store_name: TrustedPublisher
+
+- name: Export certificate as der encoded file
+ win_certificate_store:
+ path: C:\Temp\cert.cer
+ state: exported
+ file_type: der
+
+- name: Export certificate and key as pfx encoded file
+ win_certificate_store:
+ path: C:\Temp\cert.pfx
+ state: exported
+ file_type: pkcs12
+ password: AnotherStrongPass!
+ become: yes
+ become_method: runas
+ become_user: SYSTEM
+
+- name: Import certificate be used by IIS
+ win_certificate_store:
+ path: C:\Temp\cert.pfx
+ file_type: pkcs12
+ password: StrongPassword!
+ store_location: LocalMachine
+ key_storage: machine
+ state: present
+'''
+
+RETURN = r'''
+thumbprints:
+ description: A list of certificate thumbprints that were touched by the
+ module.
+ returned: success
+ type: list
+ sample: ["BC05633694E675449136679A658281F17A191087"]
+'''
diff --git a/test/support/windows-integration/plugins/modules/win_command.ps1 b/test/support/windows-integration/plugins/modules/win_command.ps1
new file mode 100644
index 00000000..e2a30650
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_command.ps1
@@ -0,0 +1,78 @@
+#!powershell
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+#Requires -Module Ansible.ModuleUtils.CommandUtil
+#Requires -Module Ansible.ModuleUtils.FileUtil
+
+# TODO: add check mode support
+
+Set-StrictMode -Version 2
+$ErrorActionPreference = 'Stop'
+
+$params = Parse-Args $args -supports_check_mode $false
+
+$raw_command_line = Get-AnsibleParam -obj $params -name "_raw_params" -type "str" -failifempty $true
+$chdir = Get-AnsibleParam -obj $params -name "chdir" -type "path"
+$creates = Get-AnsibleParam -obj $params -name "creates" -type "path"
+$removes = Get-AnsibleParam -obj $params -name "removes" -type "path"
+$stdin = Get-AnsibleParam -obj $params -name "stdin" -type "str"
+$output_encoding_override = Get-AnsibleParam -obj $params -name "output_encoding_override" -type "str"
+
+$raw_command_line = $raw_command_line.Trim()
+
+$result = @{
+ changed = $true
+ cmd = $raw_command_line
+}
+
+if ($creates -and $(Test-AnsiblePath -Path $creates)) {
+ Exit-Json @{msg="skipped, since $creates exists";cmd=$raw_command_line;changed=$false;skipped=$true;rc=0}
+}
+
+if ($removes -and -not $(Test-AnsiblePath -Path $removes)) {
+ Exit-Json @{msg="skipped, since $removes does not exist";cmd=$raw_command_line;changed=$false;skipped=$true;rc=0}
+}
+
+$command_args = @{
+ command = $raw_command_line
+}
+if ($chdir) {
+ $command_args['working_directory'] = $chdir
+}
+if ($stdin) {
+ $command_args['stdin'] = $stdin
+}
+if ($output_encoding_override) {
+ $command_args['output_encoding_override'] = $output_encoding_override
+}
+
+$start_datetime = [DateTime]::UtcNow
+try {
+ $command_result = Run-Command @command_args
+} catch {
+ $result.changed = $false
+ try {
+ $result.rc = $_.Exception.NativeErrorCode
+ } catch {
+ $result.rc = 2
+ }
+ Fail-Json -obj $result -message $_.Exception.Message
+}
+
+$result.stdout = $command_result.stdout
+$result.stderr = $command_result.stderr
+$result.rc = $command_result.rc
+
+$end_datetime = [DateTime]::UtcNow
+$result.start = $start_datetime.ToString("yyyy-MM-dd hh:mm:ss.ffffff")
+$result.end = $end_datetime.ToString("yyyy-MM-dd hh:mm:ss.ffffff")
+$result.delta = $($end_datetime - $start_datetime).ToString("h\:mm\:ss\.ffffff")
+
+If ($result.rc -ne 0) {
+ Fail-Json -obj $result -message "non-zero return code"
+}
+
+Exit-Json $result
diff --git a/test/support/windows-integration/plugins/modules/win_command.py b/test/support/windows-integration/plugins/modules/win_command.py
new file mode 100644
index 00000000..508419b2
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_command.py
@@ -0,0 +1,136 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Ansible, inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = r'''
+---
+module: win_command
+short_description: Executes a command on a remote Windows node
+version_added: 2.2
+description:
+ - The C(win_command) module takes the command name followed by a list of space-delimited arguments.
+ - The given command will be executed on all selected nodes. It will not be
+ processed through the shell, so variables like C($env:HOME) and operations
+ like C("<"), C(">"), C("|"), and C(";") will not work (use the M(win_shell)
+ module if you need these features).
+ - For non-Windows targets, use the M(command) module instead.
+options:
+ free_form:
+ description:
+ - The C(win_command) module takes a free form command to run.
+ - There is no parameter actually named 'free form'. See the examples!
+ type: str
+ required: yes
+ creates:
+ description:
+ - A path or path filter pattern; when the referenced path exists on the target host, the task will be skipped.
+ type: path
+ removes:
+ description:
+ - A path or path filter pattern; when the referenced path B(does not) exist on the target host, the task will be skipped.
+ type: path
+ chdir:
+ description:
+ - Set the specified path as the current working directory before executing a command.
+ type: path
+ stdin:
+ description:
+ - Set the stdin of the command directly to the specified value.
+ type: str
+ version_added: '2.5'
+ output_encoding_override:
+ description:
+ - This option overrides the encoding of stdout/stderr output.
+ - You can use this option when you need to run a command which ignore the console's codepage.
+ - You should only need to use this option in very rare circumstances.
+ - This value can be any valid encoding C(Name) based on the output of C([System.Text.Encoding]::GetEncodings()).
+ See U(https://docs.microsoft.com/dotnet/api/system.text.encoding.getencodings).
+ type: str
+ version_added: '2.10'
+notes:
+ - If you want to run a command through a shell (say you are using C(<),
+ C(>), C(|), etc), you actually want the M(win_shell) module instead. The
+ C(win_command) module is much more secure as it's not affected by the user's
+ environment.
+ - C(creates), C(removes), and C(chdir) can be specified after the command. For instance, if you only want to run a command if a certain file does not
+ exist, use this.
+seealso:
+- module: command
+- module: psexec
+- module: raw
+- module: win_psexec
+- module: win_shell
+author:
+ - Matt Davis (@nitzmahone)
+'''
+
+EXAMPLES = r'''
+- name: Save the result of 'whoami' in 'whoami_out'
+ win_command: whoami
+ register: whoami_out
+
+- name: Run command that only runs if folder exists and runs from a specific folder
+ win_command: wbadmin -backupTarget:C:\backup\
+ args:
+ chdir: C:\somedir\
+ creates: C:\backup\
+
+- name: Run an executable and send data to the stdin for the executable
+ win_command: powershell.exe -
+ args:
+ stdin: Write-Host test
+'''
+
+RETURN = r'''
+msg:
+ description: changed
+ returned: always
+ type: bool
+ sample: true
+start:
+ description: The command execution start time
+ returned: always
+ type: str
+ sample: '2016-02-25 09:18:26.429568'
+end:
+ description: The command execution end time
+ returned: always
+ type: str
+ sample: '2016-02-25 09:18:26.755339'
+delta:
+ description: The command execution delta time
+ returned: always
+ type: str
+ sample: '0:00:00.325771'
+stdout:
+ description: The command standard output
+ returned: always
+ type: str
+ sample: 'Clustering node rabbit@slave1 with rabbit@master ...'
+stderr:
+ description: The command standard error
+ returned: always
+ type: str
+ sample: 'ls: cannot access foo: No such file or directory'
+cmd:
+ description: The command executed by the task
+ returned: always
+ type: str
+ sample: 'rabbitmqctl join_cluster rabbit@master'
+rc:
+ description: The command return code (0 means success)
+ returned: always
+ type: int
+ sample: 0
+stdout_lines:
+ description: The command standard output split in lines
+ returned: always
+ type: list
+ sample: [u'Clustering node rabbit@slave1 with rabbit@master ...']
+'''
diff --git a/test/support/windows-integration/plugins/modules/win_copy.ps1 b/test/support/windows-integration/plugins/modules/win_copy.ps1
new file mode 100644
index 00000000..6a26ee72
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_copy.ps1
@@ -0,0 +1,403 @@
+#!powershell
+
+# Copyright: (c) 2015, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+#Requires -Module Ansible.ModuleUtils.Backup
+
+$ErrorActionPreference = 'Stop'
+
+$params = Parse-Args -arguments $args -supports_check_mode $true
+$check_mode = Get-AnsibleParam -obj $params -name "_ansible_check_mode" -type "bool" -default $false
+$diff_mode = Get-AnsibleParam -obj $params -name "_ansible_diff" -type "bool" -default $false
+
+# there are 4 modes to win_copy which are driven by the action plugins:
+# explode: src is a zip file which needs to be extracted to dest, for use with multiple files
+# query: win_copy action plugin wants to get the state of remote files to check whether it needs to send them
+# remote: all copy action is happening remotely (remote_src=True)
+# single: a single file has been copied, also used with template
+$copy_mode = Get-AnsibleParam -obj $params -name "_copy_mode" -type "str" -default "single" -validateset "explode","query","remote","single"
+
+# used in explode, remote and single mode
+$src = Get-AnsibleParam -obj $params -name "src" -type "path" -failifempty ($copy_mode -in @("explode","process","single"))
+$dest = Get-AnsibleParam -obj $params -name "dest" -type "path" -failifempty $true
+$backup = Get-AnsibleParam -obj $params -name "backup" -type "bool" -default $false
+
+# used in single mode
+$original_basename = Get-AnsibleParam -obj $params -name "_original_basename" -type "str"
+
+# used in query and remote mode
+$force = Get-AnsibleParam -obj $params -name "force" -type "bool" -default $true
+
+# used in query mode, contains the local files/directories/symlinks that are to be copied
+$files = Get-AnsibleParam -obj $params -name "files" -type "list"
+$directories = Get-AnsibleParam -obj $params -name "directories" -type "list"
+
+$result = @{
+ changed = $false
+}
+
+if ($diff_mode) {
+ $result.diff = @{}
+}
+
+Function Copy-File($source, $dest) {
+ $diff = ""
+ $copy_file = $false
+ $source_checksum = $null
+ if ($force) {
+ $source_checksum = Get-FileChecksum -path $source
+ }
+
+ if (Test-Path -LiteralPath $dest -PathType Container) {
+ Fail-Json -obj $result -message "cannot copy file from '$source' to '$dest': dest is already a folder"
+ } elseif (Test-Path -LiteralPath $dest -PathType Leaf) {
+ if ($force) {
+ $target_checksum = Get-FileChecksum -path $dest
+ if ($source_checksum -ne $target_checksum) {
+ $copy_file = $true
+ }
+ }
+ } else {
+ $copy_file = $true
+ }
+
+ if ($copy_file) {
+ $file_dir = [System.IO.Path]::GetDirectoryName($dest)
+ # validate the parent dir is not a file and that it exists
+ if (Test-Path -LiteralPath $file_dir -PathType Leaf) {
+ Fail-Json -obj $result -message "cannot copy file from '$source' to '$dest': object at dest parent dir is not a folder"
+ } elseif (-not (Test-Path -LiteralPath $file_dir)) {
+ # directory doesn't exist, need to create
+ New-Item -Path $file_dir -ItemType Directory -WhatIf:$check_mode | Out-Null
+ $diff += "+$file_dir\`n"
+ }
+
+ if ($backup) {
+ $result.backup_file = Backup-File -path $dest -WhatIf:$check_mode
+ }
+
+ if (Test-Path -LiteralPath $dest -PathType Leaf) {
+ Remove-Item -LiteralPath $dest -Force -Recurse -WhatIf:$check_mode | Out-Null
+ $diff += "-$dest`n"
+ }
+
+ if (-not $check_mode) {
+ # cannot run with -WhatIf:$check_mode as if the parent dir didn't
+ # exist and was created above would still not exist in check mode
+ Copy-Item -LiteralPath $source -Destination $dest -Force | Out-Null
+ }
+ $diff += "+$dest`n"
+
+ $result.changed = $true
+ }
+
+ # ugly but to save us from running the checksum twice, let's return it for
+ # the main code to add it to $result
+ return ,@{ diff = $diff; checksum = $source_checksum }
+}
+
+Function Copy-Folder($source, $dest) {
+ $diff = ""
+
+ if (-not (Test-Path -LiteralPath $dest -PathType Container)) {
+ $parent_dir = [System.IO.Path]::GetDirectoryName($dest)
+ if (Test-Path -LiteralPath $parent_dir -PathType Leaf) {
+ Fail-Json -obj $result -message "cannot copy file from '$source' to '$dest': object at dest parent dir is not a folder"
+ }
+ if (Test-Path -LiteralPath $dest -PathType Leaf) {
+ Fail-Json -obj $result -message "cannot copy folder from '$source' to '$dest': dest is already a file"
+ }
+
+ New-Item -Path $dest -ItemType Container -WhatIf:$check_mode | Out-Null
+ $diff += "+$dest\`n"
+ $result.changed = $true
+ }
+
+ $child_items = Get-ChildItem -LiteralPath $source -Force
+ foreach ($child_item in $child_items) {
+ $dest_child_path = Join-Path -Path $dest -ChildPath $child_item.Name
+ if ($child_item.PSIsContainer) {
+ $diff += (Copy-Folder -source $child_item.Fullname -dest $dest_child_path)
+ } else {
+ $diff += (Copy-File -source $child_item.Fullname -dest $dest_child_path).diff
+ }
+ }
+
+ return $diff
+}
+
+Function Get-FileSize($path) {
+ $file = Get-Item -LiteralPath $path -Force
+ if ($file.PSIsContainer) {
+ $size = (Get-ChildItem -Literalpath $file.FullName -Recurse -Force | `
+ Where-Object { $_.PSObject.Properties.Name -contains 'Length' } | `
+ Measure-Object -Property Length -Sum).Sum
+ if ($null -eq $size) {
+ $size = 0
+ }
+ } else {
+ $size = $file.Length
+ }
+
+ $size
+}
+
+Function Extract-Zip($src, $dest) {
+ $archive = [System.IO.Compression.ZipFile]::Open($src, [System.IO.Compression.ZipArchiveMode]::Read, [System.Text.Encoding]::UTF8)
+ foreach ($entry in $archive.Entries) {
+ $archive_name = $entry.FullName
+
+ # FullName may be appended with / or \, determine if it is padded and remove it
+ $padding_length = $archive_name.Length % 4
+ if ($padding_length -eq 0) {
+ $is_dir = $false
+ $base64_name = $archive_name
+ } elseif ($padding_length -eq 1) {
+ $is_dir = $true
+ if ($archive_name.EndsWith("/") -or $archive_name.EndsWith("`\")) {
+ $base64_name = $archive_name.Substring(0, $archive_name.Length - 1)
+ } else {
+ throw "invalid base64 archive name '$archive_name'"
+ }
+ } else {
+ throw "invalid base64 length '$archive_name'"
+ }
+
+ # to handle unicode character, win_copy action plugin has encoded the filename
+ $decoded_archive_name = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($base64_name))
+ # re-add the / to the entry full name if it was a directory
+ if ($is_dir) {
+ $decoded_archive_name = "$decoded_archive_name/"
+ }
+ $entry_target_path = [System.IO.Path]::Combine($dest, $decoded_archive_name)
+ $entry_dir = [System.IO.Path]::GetDirectoryName($entry_target_path)
+
+ if (-not (Test-Path -LiteralPath $entry_dir)) {
+ New-Item -Path $entry_dir -ItemType Directory -WhatIf:$check_mode | Out-Null
+ }
+
+ if ($is_dir -eq $false) {
+ if (-not $check_mode) {
+ [System.IO.Compression.ZipFileExtensions]::ExtractToFile($entry, $entry_target_path, $true)
+ }
+ }
+ }
+ $archive.Dispose() # release the handle of the zip file
+}
+
+Function Extract-ZipLegacy($src, $dest) {
+ if (-not (Test-Path -LiteralPath $dest)) {
+ New-Item -Path $dest -ItemType Directory -WhatIf:$check_mode | Out-Null
+ }
+ $shell = New-Object -ComObject Shell.Application
+ $zip = $shell.NameSpace($src)
+ $dest_path = $shell.NameSpace($dest)
+
+ foreach ($entry in $zip.Items()) {
+ $is_dir = $entry.IsFolder
+ $encoded_archive_entry = $entry.Name
+ # to handle unicode character, win_copy action plugin has encoded the filename
+ $decoded_archive_entry = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($encoded_archive_entry))
+ if ($is_dir) {
+ $decoded_archive_entry = "$decoded_archive_entry/"
+ }
+
+ $entry_target_path = [System.IO.Path]::Combine($dest, $decoded_archive_entry)
+ $entry_dir = [System.IO.Path]::GetDirectoryName($entry_target_path)
+
+ if (-not (Test-Path -LiteralPath $entry_dir)) {
+ New-Item -Path $entry_dir -ItemType Directory -WhatIf:$check_mode | Out-Null
+ }
+
+ if ($is_dir -eq $false -and (-not $check_mode)) {
+ # https://msdn.microsoft.com/en-us/library/windows/desktop/bb787866.aspx
+ # From Folder.CopyHere documentation, 1044 means:
+ # - 1024: do not display a user interface if an error occurs
+ # - 16: respond with "yes to all" for any dialog box that is displayed
+ # - 4: do not display a progress dialog box
+ $dest_path.CopyHere($entry, 1044)
+
+ # once file is extraced, we need to rename it with non base64 name
+ $combined_encoded_path = [System.IO.Path]::Combine($dest, $encoded_archive_entry)
+ Move-Item -LiteralPath $combined_encoded_path -Destination $entry_target_path -Force | Out-Null
+ }
+ }
+}
+
+if ($copy_mode -eq "query") {
+ # we only return a list of files/directories that need to be copied over
+ # the source of the local file will be the key used
+ $changed_files = @()
+ $changed_directories = @()
+ $changed_symlinks = @()
+
+ foreach ($file in $files) {
+ $filename = $file.dest
+ $local_checksum = $file.checksum
+
+ $filepath = Join-Path -Path $dest -ChildPath $filename
+ if (Test-Path -LiteralPath $filepath -PathType Leaf) {
+ if ($force) {
+ $checksum = Get-FileChecksum -path $filepath
+ if ($checksum -ne $local_checksum) {
+ $changed_files += $file
+ }
+ }
+ } elseif (Test-Path -LiteralPath $filepath -PathType Container) {
+ Fail-Json -obj $result -message "cannot copy file to dest '$filepath': object at path is already a directory"
+ } else {
+ $changed_files += $file
+ }
+ }
+
+ foreach ($directory in $directories) {
+ $dirname = $directory.dest
+
+ $dirpath = Join-Path -Path $dest -ChildPath $dirname
+ $parent_dir = [System.IO.Path]::GetDirectoryName($dirpath)
+ if (Test-Path -LiteralPath $parent_dir -PathType Leaf) {
+ Fail-Json -obj $result -message "cannot copy folder to dest '$dirpath': object at parent directory path is already a file"
+ }
+ if (Test-Path -LiteralPath $dirpath -PathType Leaf) {
+ Fail-Json -obj $result -message "cannot copy folder to dest '$dirpath': object at path is already a file"
+ } elseif (-not (Test-Path -LiteralPath $dirpath -PathType Container)) {
+ $changed_directories += $directory
+ }
+ }
+
+ # TODO: Handle symlinks
+
+ $result.files = $changed_files
+ $result.directories = $changed_directories
+ $result.symlinks = $changed_symlinks
+} elseif ($copy_mode -eq "explode") {
+ # a single zip file containing the files and directories needs to be
+ # expanded this will always result in a change as the calculation is done
+ # on the win_copy action plugin and is only run if a change needs to occur
+ if (-not (Test-Path -LiteralPath $src -PathType Leaf)) {
+ Fail-Json -obj $result -message "Cannot expand src zip file: '$src' as it does not exist"
+ }
+
+ # Detect if the PS zip assemblies are available or whether to use Shell
+ $use_legacy = $false
+ try {
+ Add-Type -AssemblyName System.IO.Compression.FileSystem | Out-Null
+ Add-Type -AssemblyName System.IO.Compression | Out-Null
+ } catch {
+ $use_legacy = $true
+ }
+ if ($use_legacy) {
+ Extract-ZipLegacy -src $src -dest $dest
+ } else {
+ Extract-Zip -src $src -dest $dest
+ }
+
+ $result.changed = $true
+} elseif ($copy_mode -eq "remote") {
+ # all copy actions are happening on the remote side (windows host), need
+ # too copy source and dest using PS code
+ $result.src = $src
+ $result.dest = $dest
+
+ if (-not (Test-Path -LiteralPath $src)) {
+ Fail-Json -obj $result -message "Cannot copy src file: '$src' as it does not exist"
+ }
+
+ if (Test-Path -LiteralPath $src -PathType Container) {
+ # we are copying a directory or the contents of a directory
+ $result.operation = 'folder_copy'
+ if ($src.EndsWith("/") -or $src.EndsWith("`\")) {
+ # copying the folder's contents to dest
+ $diff = ""
+ $child_files = Get-ChildItem -LiteralPath $src -Force
+ foreach ($child_file in $child_files) {
+ $dest_child_path = Join-Path -Path $dest -ChildPath $child_file.Name
+ if ($child_file.PSIsContainer) {
+ $diff += Copy-Folder -source $child_file.FullName -dest $dest_child_path
+ } else {
+ $diff += (Copy-File -source $child_file.FullName -dest $dest_child_path).diff
+ }
+ }
+ } else {
+ # copying the folder and it's contents to dest
+ $dest = Join-Path -Path $dest -ChildPath (Get-Item -LiteralPath $src -Force).Name
+ $result.dest = $dest
+ $diff = Copy-Folder -source $src -dest $dest
+ }
+ } else {
+ # we are just copying a single file to dest
+ $result.operation = 'file_copy'
+
+ $source_basename = (Get-Item -LiteralPath $src -Force).Name
+ $result.original_basename = $source_basename
+
+ if ($dest.EndsWith("/") -or $dest.EndsWith("`\")) {
+ $dest = Join-Path -Path $dest -ChildPath (Get-Item -LiteralPath $src -Force).Name
+ $result.dest = $dest
+ } else {
+ # check if the parent dir exists, this is only done if src is a
+ # file and dest if the path to a file (doesn't end with \ or /)
+ $parent_dir = Split-Path -LiteralPath $dest
+ if (Test-Path -LiteralPath $parent_dir -PathType Leaf) {
+ Fail-Json -obj $result -message "object at destination parent dir '$parent_dir' is currently a file"
+ } elseif (-not (Test-Path -LiteralPath $parent_dir -PathType Container)) {
+ Fail-Json -obj $result -message "Destination directory '$parent_dir' does not exist"
+ }
+ }
+ $copy_result = Copy-File -source $src -dest $dest
+ $diff = $copy_result.diff
+ $result.checksum = $copy_result.checksum
+ }
+
+ # the file might not exist if running in check mode
+ if (-not $check_mode -or (Test-Path -LiteralPath $dest -PathType Leaf)) {
+ $result.size = Get-FileSize -path $dest
+ } else {
+ $result.size = $null
+ }
+ if ($diff_mode) {
+ $result.diff.prepared = $diff
+ }
+} elseif ($copy_mode -eq "single") {
+ # a single file is located in src and we need to copy to dest, this will
+ # always result in a change as the calculation is done on the Ansible side
+ # before this is run. This should also never run in check mode
+ if (-not (Test-Path -LiteralPath $src -PathType Leaf)) {
+ Fail-Json -obj $result -message "Cannot copy src file: '$src' as it does not exist"
+ }
+
+ # the dest parameter is a directory, we need to append original_basename
+ if ($dest.EndsWith("/") -or $dest.EndsWith("`\") -or (Test-Path -LiteralPath $dest -PathType Container)) {
+ $remote_dest = Join-Path -Path $dest -ChildPath $original_basename
+ $parent_dir = Split-Path -LiteralPath $remote_dest
+
+ # when dest ends with /, we need to create the destination directories
+ if (Test-Path -LiteralPath $parent_dir -PathType Leaf) {
+ Fail-Json -obj $result -message "object at destination parent dir '$parent_dir' is currently a file"
+ } elseif (-not (Test-Path -LiteralPath $parent_dir -PathType Container)) {
+ New-Item -Path $parent_dir -ItemType Directory | Out-Null
+ }
+ } else {
+ $remote_dest = $dest
+ $parent_dir = Split-Path -LiteralPath $remote_dest
+
+ # check if the dest parent dirs exist, need to fail if they don't
+ if (Test-Path -LiteralPath $parent_dir -PathType Leaf) {
+ Fail-Json -obj $result -message "object at destination parent dir '$parent_dir' is currently a file"
+ } elseif (-not (Test-Path -LiteralPath $parent_dir -PathType Container)) {
+ Fail-Json -obj $result -message "Destination directory '$parent_dir' does not exist"
+ }
+ }
+
+ if ($backup) {
+ $result.backup_file = Backup-File -path $remote_dest -WhatIf:$check_mode
+ }
+
+ Copy-Item -LiteralPath $src -Destination $remote_dest -Force | Out-Null
+ $result.changed = $true
+}
+
+Exit-Json -obj $result
diff --git a/test/support/windows-integration/plugins/modules/win_copy.py b/test/support/windows-integration/plugins/modules/win_copy.py
new file mode 100644
index 00000000..a55f4c65
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_copy.py
@@ -0,0 +1,207 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = r'''
+---
+module: win_copy
+version_added: '1.9.2'
+short_description: Copies files to remote locations on windows hosts
+description:
+- The C(win_copy) module copies a file on the local box to remote windows locations.
+- For non-Windows targets, use the M(copy) module instead.
+options:
+ content:
+ description:
+ - When used instead of C(src), sets the contents of a file directly to the
+ specified value.
+ - This is for simple values, for anything complex or with formatting please
+ switch to the M(template) module.
+ type: str
+ version_added: '2.3'
+ decrypt:
+ description:
+ - This option controls the autodecryption of source files using vault.
+ type: bool
+ default: yes
+ version_added: '2.5'
+ dest:
+ description:
+ - Remote absolute path where the file should be copied to.
+ - If C(src) is a directory, this must be a directory too.
+ - Use \ for path separators or \\ when in "double quotes".
+ - If C(dest) ends with \ then source or the contents of source will be
+ copied to the directory without renaming.
+ - If C(dest) is a nonexistent path, it will only be created if C(dest) ends
+ with "/" or "\", or C(src) is a directory.
+ - If C(src) and C(dest) are files and if the parent directory of C(dest)
+ doesn't exist, then the task will fail.
+ type: path
+ required: yes
+ backup:
+ description:
+ - Determine whether a backup should be created.
+ - When set to C(yes), create a backup file including the timestamp information
+ so you can get the original file back if you somehow clobbered it incorrectly.
+ - No backup is taken when C(remote_src=False) and multiple files are being
+ copied.
+ type: bool
+ default: no
+ version_added: '2.8'
+ force:
+ description:
+ - If set to C(yes), the file will only be transferred if the content
+ is different than destination.
+ - If set to C(no), the file will only be transferred if the
+ destination does not exist.
+ - If set to C(no), no checksuming of the content is performed which can
+ help improve performance on larger files.
+ type: bool
+ default: yes
+ version_added: '2.3'
+ local_follow:
+ description:
+ - This flag indicates that filesystem links in the source tree, if they
+ exist, should be followed.
+ type: bool
+ default: yes
+ version_added: '2.4'
+ remote_src:
+ description:
+ - If C(no), it will search for src at originating/master machine.
+ - If C(yes), it will go to the remote/target machine for the src.
+ type: bool
+ default: no
+ version_added: '2.3'
+ src:
+ description:
+ - Local path to a file to copy to the remote server; can be absolute or
+ relative.
+ - If path is a directory, it is copied (including the source folder name)
+ recursively to C(dest).
+ - If path is a directory and ends with "/", only the inside contents of
+ that directory are copied to the destination. Otherwise, if it does not
+ end with "/", the directory itself with all contents is copied.
+ - If path is a file and dest ends with "\", the file is copied to the
+ folder with the same filename.
+ - Required unless using C(content).
+ type: path
+notes:
+- Currently win_copy does not support copying symbolic links from both local to
+ remote and remote to remote.
+- It is recommended that backslashes C(\) are used instead of C(/) when dealing
+ with remote paths.
+- Because win_copy runs over WinRM, it is not a very efficient transfer
+ mechanism. If sending large files consider hosting them on a web service and
+ using M(win_get_url) instead.
+seealso:
+- module: assemble
+- module: copy
+- module: win_get_url
+- module: win_robocopy
+author:
+- Jon Hawkesworth (@jhawkesworth)
+- Jordan Borean (@jborean93)
+'''
+
+EXAMPLES = r'''
+- name: Copy a single file
+ win_copy:
+ src: /srv/myfiles/foo.conf
+ dest: C:\Temp\renamed-foo.conf
+
+- name: Copy a single file, but keep a backup
+ win_copy:
+ src: /srv/myfiles/foo.conf
+ dest: C:\Temp\renamed-foo.conf
+ backup: yes
+
+- name: Copy a single file keeping the filename
+ win_copy:
+ src: /src/myfiles/foo.conf
+ dest: C:\Temp\
+
+- name: Copy folder to C:\Temp (results in C:\Temp\temp_files)
+ win_copy:
+ src: files/temp_files
+ dest: C:\Temp
+
+- name: Copy folder contents recursively
+ win_copy:
+ src: files/temp_files/
+ dest: C:\Temp
+
+- name: Copy a single file where the source is on the remote host
+ win_copy:
+ src: C:\Temp\foo.txt
+ dest: C:\ansible\foo.txt
+ remote_src: yes
+
+- name: Copy a folder recursively where the source is on the remote host
+ win_copy:
+ src: C:\Temp
+ dest: C:\ansible
+ remote_src: yes
+
+- name: Set the contents of a file
+ win_copy:
+ content: abc123
+ dest: C:\Temp\foo.txt
+
+- name: Copy a single file as another user
+ win_copy:
+ src: NuGet.config
+ dest: '%AppData%\NuGet\NuGet.config'
+ vars:
+ ansible_become_user: user
+ ansible_become_password: pass
+ # The tmp dir must be set when using win_copy as another user
+ # This ensures the become user will have permissions for the operation
+ # Make sure to specify a folder both the ansible_user and the become_user have access to (i.e not %TEMP% which is user specific and requires Admin)
+ ansible_remote_tmp: 'c:\tmp'
+'''
+
+RETURN = r'''
+backup_file:
+ description: Name of the backup file that was created.
+ returned: if backup=yes
+ type: str
+ sample: C:\Path\To\File.txt.11540.20150212-220915.bak
+dest:
+ description: Destination file/path.
+ returned: changed
+ type: str
+ sample: C:\Temp\
+src:
+ description: Source file used for the copy on the target machine.
+ returned: changed
+ type: str
+ sample: /home/httpd/.ansible/tmp/ansible-tmp-1423796390.97-147729857856000/source
+checksum:
+ description: SHA1 checksum of the file after running copy.
+ returned: success, src is a file
+ type: str
+ sample: 6e642bb8dd5c2e027bf21dd923337cbb4214f827
+size:
+ description: Size of the target, after execution.
+ returned: changed, src is a file
+ type: int
+ sample: 1220
+operation:
+ description: Whether a single file copy took place or a folder copy.
+ returned: success
+ type: str
+ sample: file_copy
+original_basename:
+ description: Basename of the copied file.
+ returned: changed, src is a file
+ type: str
+ sample: foo.txt
+'''
diff --git a/test/support/windows-integration/plugins/modules/win_data_deduplication.ps1 b/test/support/windows-integration/plugins/modules/win_data_deduplication.ps1
new file mode 100644
index 00000000..593ee763
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_data_deduplication.ps1
@@ -0,0 +1,129 @@
+#!powershell
+
+# Copyright: 2019, rnsc(@rnsc) <github@rnsc.be>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt
+
+#AnsibleRequires -CSharpUtil Ansible.Basic
+#AnsibleRequires -OSVersion 6.3
+
+$spec = @{
+ options = @{
+ drive_letter = @{ type = "str"; required = $true }
+ state = @{ type = "str"; choices = "absent", "present"; default = "present"; }
+ settings = @{
+ type = "dict"
+ required = $false
+ options = @{
+ minimum_file_size = @{ type = "int"; default = 32768 }
+ minimum_file_age_days = @{ type = "int"; default = 2 }
+ no_compress = @{ type = "bool"; required = $false; default = $false }
+ optimize_in_use_files = @{ type = "bool"; required = $false; default = $false }
+ verify = @{ type = "bool"; required = $false; default = $false }
+ }
+ }
+ }
+ supports_check_mode = $true
+}
+
+$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec)
+
+$drive_letter = $module.Params.drive_letter
+$state = $module.Params.state
+$settings = $module.Params.settings
+
+$module.Result.changed = $false
+$module.Result.reboot_required = $false
+$module.Result.msg = ""
+
+function Set-DataDeduplication($volume, $state, $settings, $dedup_job) {
+
+ $current_state = 'absent'
+
+ try {
+ $dedup_info = Get-DedupVolume -Volume "$($volume.DriveLetter):"
+ } catch {
+ $dedup_info = $null
+ }
+
+ if ($dedup_info.Enabled) {
+ $current_state = 'present'
+ }
+
+ if ( $state -ne $current_state ) {
+ if( -not $module.CheckMode) {
+ if($state -eq 'present') {
+ # Enable-DedupVolume -Volume <String>
+ Enable-DedupVolume -Volume "$($volume.DriveLetter):"
+ } elseif ($state -eq 'absent') {
+ Disable-DedupVolume -Volume "$($volume.DriveLetter):"
+ }
+ }
+ $module.Result.changed = $true
+ }
+
+ if ($state -eq 'present') {
+ if ($null -ne $settings) {
+ Set-DataDedupJobSettings -volume $volume -settings $settings
+ }
+ }
+}
+
+function Set-DataDedupJobSettings ($volume, $settings) {
+
+ try {
+ $dedup_info = Get-DedupVolume -Volume "$($volume.DriveLetter):"
+ } catch {
+ $dedup_info = $null
+ }
+
+ ForEach ($key in $settings.keys) {
+
+ # See Microsoft documentation:
+ # https://docs.microsoft.com/en-us/powershell/module/deduplication/set-dedupvolume?view=win10-ps
+
+ $update_key = $key
+ $update_value = $settings.$($key)
+ # Transform Ansible style options to Powershell params
+ $update_key = $update_key -replace('_', '')
+
+ if ($update_key -eq "MinimumFileSize" -and $update_value -lt 32768) {
+ $update_value = 32768
+ }
+
+ $current_value = ($dedup_info | Select-Object -ExpandProperty $update_key)
+
+ if ($update_value -ne $current_value) {
+ $command_param = @{
+ $($update_key) = $update_value
+ }
+
+ # Set-DedupVolume -Volume <String>`
+ # -NoCompress <bool> `
+ # -MinimumFileAgeDays <UInt32> `
+ # -MinimumFileSize <UInt32> (minimum 32768)
+ if( -not $module.CheckMode ) {
+ Set-DedupVolume -Volume "$($volume.DriveLetter):" @command_param
+ }
+
+ $module.Result.changed = $true
+ }
+ }
+
+}
+
+# Install required feature
+$feature_name = "FS-Data-Deduplication"
+if( -not $module.CheckMode) {
+ $feature = Install-WindowsFeature -Name $feature_name
+
+ if ($feature.RestartNeeded -eq 'Yes') {
+ $module.Result.reboot_required = $true
+ $module.FailJson("$feature_name was installed but requires Windows to be rebooted to work.")
+ }
+}
+
+$volume = Get-Volume -DriveLetter $drive_letter
+
+Set-DataDeduplication -volume $volume -state $state -settings $settings -dedup_job $dedup_job
+
+$module.ExitJson()
diff --git a/test/support/windows-integration/plugins/modules/win_data_deduplication.py b/test/support/windows-integration/plugins/modules/win_data_deduplication.py
new file mode 100644
index 00000000..d320b9f7
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_data_deduplication.py
@@ -0,0 +1,87 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: 2019, rnsc(@rnsc) <github@rnsc.be>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: win_data_deduplication
+version_added: "2.10"
+short_description: Module to enable Data Deduplication on a volume.
+description:
+- This module can be used to enable Data Deduplication on a Windows volume.
+- The module will install the FS-Data-Deduplication feature (a reboot will be necessary).
+options:
+ drive_letter:
+ description:
+ - Windows drive letter on which to enable data deduplication.
+ required: yes
+ type: str
+ state:
+ description:
+ - Wether to enable or disable data deduplication on the selected volume.
+ default: present
+ type: str
+ choices: [ present, absent ]
+ settings:
+ description:
+ - Dictionary of settings to pass to the Set-DedupVolume powershell command.
+ type: dict
+ suboptions:
+ minimum_file_size:
+ description:
+ - Minimum file size you want to target for deduplication.
+ - It will default to 32768 if not defined or if the value is less than 32768.
+ type: int
+ default: 32768
+ minimum_file_age_days:
+ description:
+ - Minimum file age you want to target for deduplication.
+ type: int
+ default: 2
+ no_compress:
+ description:
+ - Wether you want to enabled filesystem compression or not.
+ type: bool
+ default: no
+ optimize_in_use_files:
+ description:
+ - Indicates that the server attempts to optimize currently open files.
+ type: bool
+ default: no
+ verify:
+ description:
+ - Indicates whether the deduplication engine performs a byte-for-byte verification for each duplicate chunk
+ that optimization creates, rather than relying on a cryptographically strong hash.
+ - This option is not recommend.
+ - Setting this parameter to True can degrade optimization performance.
+ type: bool
+ default: no
+author:
+- rnsc (@rnsc)
+'''
+
+EXAMPLES = r'''
+- name: Enable Data Deduplication on D
+ win_data_deduplication:
+ drive_letter: 'D'
+ state: present
+
+- name: Enable Data Deduplication on D
+ win_data_deduplication:
+ drive_letter: 'D'
+ state: present
+ settings:
+ no_compress: true
+ minimum_file_age_days: 1
+ minimum_file_size: 0
+'''
+
+RETURN = r'''
+#
+'''
diff --git a/test/support/windows-integration/plugins/modules/win_dsc.ps1 b/test/support/windows-integration/plugins/modules/win_dsc.ps1
new file mode 100644
index 00000000..690f391a
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_dsc.ps1
@@ -0,0 +1,398 @@
+#!powershell
+
+# Copyright: (c) 2015, Trond Hindenes <trond@hindenes.com>, and others
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#AnsibleRequires -CSharpUtil Ansible.Basic
+#Requires -Version 5
+
+Function ConvertTo-ArgSpecType {
+ <#
+ .SYNOPSIS
+ Converts the DSC parameter type to the arg spec type required for Ansible.
+ #>
+ param(
+ [Parameter(Mandatory=$true)][String]$CimType
+ )
+
+ $arg_type = switch($CimType) {
+ Boolean { "bool" }
+ Char16 { [Func[[Object], [Char]]]{ [System.Char]::Parse($args[0].ToString()) } }
+ DateTime { [Func[[Object], [DateTime]]]{ [System.DateTime]($args[0].ToString()) } }
+ Instance { "dict" }
+ Real32 { "float" }
+ Real64 { [Func[[Object], [Double]]]{ [System.Double]::Parse($args[0].ToString()) } }
+ Reference { "dict" }
+ SInt16 { [Func[[Object], [Int16]]]{ [System.Int16]::Parse($args[0].ToString()) } }
+ SInt32 { "int" }
+ SInt64 { [Func[[Object], [Int64]]]{ [System.Int64]::Parse($args[0].ToString()) } }
+ SInt8 { [Func[[Object], [SByte]]]{ [System.SByte]::Parse($args[0].ToString()) } }
+ String { "str" }
+ UInt16 { [Func[[Object], [UInt16]]]{ [System.UInt16]::Parse($args[0].ToString()) } }
+ UInt32 { [Func[[Object], [UInt32]]]{ [System.UInt32]::Parse($args[0].ToString()) } }
+ UInt64 { [Func[[Object], [UInt64]]]{ [System.UInt64]::Parse($args[0].ToString()) } }
+ UInt8 { [Func[[Object], [Byte]]]{ [System.Byte]::Parse($args[0].ToString()) } }
+ Unknown { "raw" }
+ default { "raw" }
+ }
+ return $arg_type
+}
+
+Function Get-DscCimClassProperties {
+ <#
+ .SYNOPSIS
+ Get's a list of CimProperties of a CIM Class. It filters out any magic or
+ read only properties that we don't need to know about.
+ #>
+ param([Parameter(Mandatory=$true)][String]$ClassName)
+
+ $resource = Get-CimClass -ClassName $ClassName -Namespace root\Microsoft\Windows\DesiredStateConfiguration
+
+ # Filter out any magic properties that are used internally on an OMI_BaseResource
+ # https://github.com/PowerShell/PowerShell/blob/master/src/System.Management.Automation/DscSupport/CimDSCParser.cs#L1203
+ $magic_properties = @("ResourceId", "SourceInfo", "ModuleName", "ModuleVersion", "ConfigurationName")
+ $properties = $resource.CimClassProperties | Where-Object {
+
+ ($resource.CimSuperClassName -ne "OMI_BaseResource" -or $_.Name -notin $magic_properties) -and
+ -not $_.Flags.HasFlag([Microsoft.Management.Infrastructure.CimFlags]::ReadOnly)
+ }
+
+ return ,$properties
+}
+
+Function Add-PropertyOption {
+ <#
+ .SYNOPSIS
+ Adds the spec for the property type to the existing module specification.
+ #>
+ param(
+ [Parameter(Mandatory=$true)][Hashtable]$Spec,
+ [Parameter(Mandatory=$true)]
+ [Microsoft.Management.Infrastructure.CimPropertyDeclaration]$Property
+ )
+
+ $option = @{
+ required = $false
+ }
+ $property_name = $Property.Name
+ $property_type = $Property.CimType.ToString()
+
+ if ($Property.Flags.HasFlag([Microsoft.Management.Infrastructure.CimFlags]::Key) -or
+ $Property.Flags.HasFlag([Microsoft.Management.Infrastructure.CimFlags]::Required)) {
+ $option.required = $true
+ }
+
+ if ($null -ne $Property.Qualifiers['Values']) {
+ $option.choices = [System.Collections.Generic.List`1[Object]]$Property.Qualifiers['Values'].Value
+ }
+
+ if ($property_name -eq "Name") {
+ # For backwards compatibility we support specifying the Name DSC property as item_name
+ $option.aliases = @("item_name")
+ } elseif ($property_name -ceq "key") {
+ # There seems to be a bug in the CIM property parsing when the property name is 'Key'. The CIM instance will
+ # think the name is 'key' when the MOF actually defines it as 'Key'. We set the proper casing so the module arg
+ # validator won't fire a case sensitive warning
+ $property_name = "Key"
+ }
+
+ if ($Property.ReferenceClassName -eq "MSFT_Credential") {
+ # Special handling for the MSFT_Credential type (PSCredential), we handle this with having 2 options that
+ # have the suffix _username and _password.
+ $option_spec_pass = @{
+ type = "str"
+ required = $option.required
+ no_log = $true
+ }
+ $Spec.options."$($property_name)_password" = $option_spec_pass
+ $Spec.required_together.Add(@("$($property_name)_username", "$($property_name)_password")) > $null
+
+ $property_name = "$($property_name)_username"
+ $option.type = "str"
+ } elseif ($Property.ReferenceClassName -eq "MSFT_KeyValuePair") {
+ $option.type = "dict"
+ } elseif ($property_type.EndsWith("Array")) {
+ $option.type = "list"
+ $option.elements = ConvertTo-ArgSpecType -CimType $property_type.Substring(0, $property_type.Length - 5)
+ } else {
+ $option.type = ConvertTo-ArgSpecType -CimType $property_type
+ }
+
+ if (($option.type -eq "dict" -or ($option.type -eq "list" -and $option.elements -eq "dict")) -and
+ $Property.ReferenceClassName -ne "MSFT_KeyValuePair") {
+ # Get the sub spec if the type is a Instance (CimInstance/dict)
+ $sub_option_spec = Get-OptionSpec -ClassName $Property.ReferenceClassName
+ $option += $sub_option_spec
+ }
+
+ $Spec.options.$property_name = $option
+}
+
+Function Get-OptionSpec {
+ <#
+ .SYNOPSIS
+ Generates the specifiec used in AnsibleModule for a CIM MOF resource name.
+
+ .NOTES
+ This won't be able to retrieve the default values for an option as that is not defined in the MOF for a resource.
+ Default values are still preserved in the DSC engine if we don't pass in the property at all, we just can't report
+ on what they are automatically.
+ #>
+ param(
+ [Parameter(Mandatory=$true)][String]$ClassName
+ )
+
+ $spec = @{
+ options = @{}
+ required_together = [System.Collections.ArrayList]@()
+ }
+ $properties = Get-DscCimClassProperties -ClassName $ClassName
+ foreach ($property in $properties) {
+ Add-PropertyOption -Spec $spec -Property $property
+ }
+
+ return $spec
+}
+
+Function ConvertTo-CimInstance {
+ <#
+ .SYNOPSIS
+ Converts a dict to a CimInstance of the specified Class. Also provides a
+ better error message if this fails that contains the option name that failed.
+ #>
+ param(
+ [Parameter(Mandatory=$true)][String]$Name,
+ [Parameter(Mandatory=$true)][String]$ClassName,
+ [Parameter(Mandatory=$true)][System.Collections.IDictionary]$Value,
+ [Parameter(Mandatory=$true)][Ansible.Basic.AnsibleModule]$Module,
+ [Switch]$Recurse
+ )
+
+ $properties = @{}
+ foreach ($value_info in $Value.GetEnumerator()) {
+ # Need to remove all null values from existing dict so the conversion works
+ if ($null -eq $value_info.Value) {
+ continue
+ }
+ $properties.($value_info.Key) = $value_info.Value
+ }
+
+ if ($Recurse) {
+ # We want to validate and convert and values to what's required by DSC
+ $properties = ConvertTo-DscProperty -ClassName $ClassName -Params $properties -Module $Module
+ }
+
+ try {
+ return (New-CimInstance -ClassName $ClassName -Property $properties -ClientOnly)
+ } catch {
+ # New-CimInstance raises a poor error message, make sure we mention what option it is for
+ $Module.FailJson("Failed to cast dict value for option '$Name' to a CimInstance: $($_.Exception.Message)", $_)
+ }
+}
+
+Function ConvertTo-DscProperty {
+ <#
+ .SYNOPSIS
+ Converts the input module parameters that have been validated and casted
+ into the types expected by the DSC engine. This is mostly done to deal with
+ types like PSCredential and Dictionaries.
+ #>
+ param(
+ [Parameter(Mandatory=$true)][String]$ClassName,
+ [Parameter(Mandatory=$true)][System.Collections.IDictionary]$Params,
+ [Parameter(Mandatory=$true)][Ansible.Basic.AnsibleModule]$Module
+ )
+ $properties = Get-DscCimClassProperties -ClassName $ClassName
+
+ $dsc_properties = @{}
+ foreach ($property in $properties) {
+ $property_name = $property.Name
+ $property_type = $property.CimType.ToString()
+
+ if ($property.ReferenceClassName -eq "MSFT_Credential") {
+ $username = $Params."$($property_name)_username"
+ $password = $Params."$($property_name)_password"
+
+ # No user set == No option set in playbook, skip this property
+ if ($null -eq $username) {
+ continue
+ }
+ $sec_password = ConvertTo-SecureString -String $password -AsPlainText -Force
+ $value = New-Object -TypeName System.Management.Automation.PSCredential -ArgumentList $username, $sec_password
+ } else {
+ $value = $Params.$property_name
+
+ # The actual value wasn't set, skip adding this property
+ if ($null -eq $value) {
+ continue
+ }
+
+ if ($property.ReferenceClassName -eq "MSFT_KeyValuePair") {
+ $key_value_pairs = [System.Collections.Generic.List`1[CimInstance]]@()
+ foreach ($value_info in $value.GetEnumerator()) {
+ $kvp = @{Key = $value_info.Key; Value = $value_info.Value.ToString()}
+ $cim_instance = ConvertTo-CimInstance -Name $property_name -ClassName MSFT_KeyValuePair `
+ -Value $kvp -Module $Module
+ $key_value_pairs.Add($cim_instance) > $null
+ }
+ $value = $key_value_pairs.ToArray()
+ } elseif ($null -ne $property.ReferenceClassName) {
+ # Convert the dict to a CimInstance (or list of CimInstances)
+ $convert_args = @{
+ ClassName = $property.ReferenceClassName
+ Module = $Module
+ Name = $property_name
+ Recurse = $true
+ }
+ if ($property_type.EndsWith("Array")) {
+ $value = [System.Collections.Generic.List`1[CimInstance]]@()
+ foreach ($raw in $Params.$property_name.GetEnumerator()) {
+ $cim_instance = ConvertTo-CimInstance -Value $raw @convert_args
+ $value.Add($cim_instance) > $null
+ }
+ $value = $value.ToArray() # Need to make sure we are dealing with an Array not a List
+ } else {
+ $value = ConvertTo-CimInstance -Value $value @convert_args
+ }
+ }
+ }
+ $dsc_properties.$property_name = $value
+ }
+
+ return $dsc_properties
+}
+
+Function Invoke-DscMethod {
+ <#
+ .SYNOPSIS
+ Invokes the DSC Resource Method specified in another PS pipeline. This is
+ done so we can retrieve the Verbose stream and return it back to the user
+ for futher debugging.
+ #>
+ param(
+ [Parameter(Mandatory=$true)][Ansible.Basic.AnsibleModule]$Module,
+ [Parameter(Mandatory=$true)][String]$Method,
+ [Parameter(Mandatory=$true)][Hashtable]$Arguments
+ )
+
+ # Invoke the DSC resource in a separate runspace so we can capture the Verbose output
+ $ps = [PowerShell]::Create()
+ $ps.AddCommand("Invoke-DscResource").AddParameter("Method", $Method) > $null
+ $ps.AddParameters($Arguments) > $null
+
+ $result = $ps.Invoke()
+
+ # Pass the warnings through to the AnsibleModule return result
+ foreach ($warning in $ps.Streams.Warning) {
+ $Module.Warn($warning.Message)
+ }
+
+ # If running at a high enough verbosity, add the verbose output to the AnsibleModule return result
+ if ($Module.Verbosity -ge 3) {
+ $verbose_logs = [System.Collections.Generic.List`1[String]]@()
+ foreach ($verbosity in $ps.Streams.Verbose) {
+ $verbose_logs.Add($verbosity.Message) > $null
+ }
+ $Module.Result."verbose_$($Method.ToLower())" = $verbose_logs
+ }
+
+ if ($ps.HadErrors) {
+ # Cannot pass in the ErrorRecord as it's a RemotingErrorRecord and doesn't contain the ScriptStackTrace
+ # or other info that would be useful
+ $Module.FailJson("Failed to invoke DSC $Method method: $($ps.Streams.Error[0].Exception.Message)")
+ }
+
+ return $result
+}
+
+# win_dsc is unique in that is builds the arg spec based on DSC Resource input. To get this info
+# we need to read the resource_name and module_version value which is done outside of Ansible.Basic
+if ($args.Length -gt 0) {
+ $params = Get-Content -Path $args[0] | ConvertFrom-Json
+} else {
+ $params = $complex_args
+}
+if (-not $params.ContainsKey("resource_name")) {
+ $res = @{
+ msg = "missing required argument: resource_name"
+ failed = $true
+ }
+ Write-Output -InputObject (ConvertTo-Json -Compress -InputObject $res)
+ exit 1
+}
+$resource_name = $params.resource_name
+
+if ($params.ContainsKey("module_version")) {
+ $module_version = $params.module_version
+} else {
+ $module_version = "latest"
+}
+
+$module_versions = (Get-DscResource -Name $resource_name -ErrorAction SilentlyContinue | Sort-Object -Property Version)
+$resource = $null
+if ($module_version -eq "latest" -and $null -ne $module_versions) {
+ $resource = $module_versions[-1]
+} elseif ($module_version -ne "latest") {
+ $resource = $module_versions | Where-Object { $_.Version -eq $module_version }
+}
+
+if (-not $resource) {
+ if ($module_version -eq "latest") {
+ $msg = "Resource '$resource_name' not found."
+ } else {
+ $msg = "Resource '$resource_name' with version '$module_version' not found."
+ $msg += " Versions installed: '$($module_versions.Version -join "', '")'."
+ }
+
+ Write-Output -InputObject (ConvertTo-Json -Compress -InputObject @{ failed = $true; msg = $msg })
+ exit 1
+}
+
+# Build the base args for the DSC Invocation based on the resource selected
+$dsc_args = @{
+ Name = $resource.Name
+}
+
+# Binary resources are not working very well with that approach - need to guesstimate module name/version
+$module_version = $null
+if ($resource.Module) {
+ $dsc_args.ModuleName = @{
+ ModuleName = $resource.Module.Name
+ ModuleVersion = $resource.Module.Version
+ }
+ $module_version = $resource.Module.Version.ToString()
+} else {
+ $dsc_args.ModuleName = "PSDesiredStateConfiguration"
+}
+
+# To ensure the class registered with CIM is the one based on our version, we want to run the Get method so the DSC
+# engine updates the metadata propery. We don't care about any errors here
+try {
+ Invoke-DscResource -Method Get -Property @{Fake="Fake"} @dsc_args > $null
+} catch {}
+
+# Dynamically build the option spec based on the resource_name specified and create the module object
+$spec = Get-OptionSpec -ClassName $resource.ResourceType
+$spec.supports_check_mode = $true
+$spec.options.module_version = @{ type = "str"; default = "latest" }
+$spec.options.resource_name = @{ type = "str"; required = $true }
+
+$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec)
+$module.Result.reboot_required = $false
+$module.Result.module_version = $module_version
+
+# Build the DSC invocation arguments and invoke the resource
+$dsc_args.Property = ConvertTo-DscProperty -ClassName $resource.ResourceType -Module $module -Params $Module.Params
+$dsc_args.Verbose = $true
+
+$test_result = Invoke-DscMethod -Module $module -Method Test -Arguments $dsc_args
+if ($test_result.InDesiredState -ne $true) {
+ if (-not $module.CheckMode) {
+ $result = Invoke-DscMethod -Module $module -Method Set -Arguments $dsc_args
+ $module.Result.reboot_required = $result.RebootRequired
+ }
+ $module.Result.changed = $true
+}
+
+$module.ExitJson()
diff --git a/test/support/windows-integration/plugins/modules/win_dsc.py b/test/support/windows-integration/plugins/modules/win_dsc.py
new file mode 100644
index 00000000..200d025e
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_dsc.py
@@ -0,0 +1,183 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Trond Hindenes <trond@hindenes.com>, and others
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: win_dsc
+version_added: "2.4"
+short_description: Invokes a PowerShell DSC configuration
+description:
+- Configures a resource using PowerShell DSC.
+- Requires PowerShell version 5.0 or newer.
+- Most of the options for this module are dynamic and will vary depending on
+ the DSC Resource specified in I(resource_name).
+- See :doc:`/user_guide/windows_dsc` for more information on how to use this module.
+options:
+ resource_name:
+ description:
+ - The name of the DSC Resource to use.
+ - Must be accessible to PowerShell using any of the default paths.
+ type: str
+ required: yes
+ module_version:
+ description:
+ - Can be used to configure the exact version of the DSC resource to be
+ invoked.
+ - Useful if the target node has multiple versions installed of the module
+ containing the DSC resource.
+ - If not specified, the module will follow standard PowerShell convention
+ and use the highest version available.
+ type: str
+ default: latest
+ free_form:
+ description:
+ - The M(win_dsc) module takes in multiple free form options based on the
+ DSC resource being invoked by I(resource_name).
+ - There is no option actually named C(free_form) so see the examples.
+ - This module will try and convert the option to the correct type required
+ by the DSC resource and throw a warning if it fails.
+ - If the type of the DSC resource option is a C(CimInstance) or
+ C(CimInstance[]), this means the value should be a dictionary or list
+ of dictionaries based on the values required by that option.
+ - If the type of the DSC resource option is a C(PSCredential) then there
+ needs to be 2 options set in the Ansible task definition suffixed with
+ C(_username) and C(_password).
+ - If the type of the DSC resource option is an array, then a list should be
+ provided but a comma separated string also work. Use a list where
+ possible as no escaping is required and it works with more complex types
+ list C(CimInstance[]).
+ - If the type of the DSC resource option is a C(DateTime), you should use
+ a string in the form of an ISO 8901 string to ensure the exact date is
+ used.
+ - Since Ansible 2.8, Ansible will now validate the input fields against the
+ DSC resource definition automatically. Older versions will silently
+ ignore invalid fields.
+ type: str
+ required: true
+notes:
+- By default there are a few builtin resources that come with PowerShell 5.0,
+ see U(https://docs.microsoft.com/en-us/powershell/scripting/dsc/resources/resources) for
+ more information on these resources.
+- Custom DSC resources can be installed with M(win_psmodule) using the I(name)
+ option.
+- The DSC engine run's each task as the SYSTEM account, any resources that need
+ to be accessed with a different account need to have C(PsDscRunAsCredential)
+ set.
+- To see the valid options for a DSC resource, run the module with C(-vvv) to
+ show the possible module invocation. Default values are not shown in this
+ output but are applied within the DSC engine.
+author:
+- Trond Hindenes (@trondhindenes)
+'''
+
+EXAMPLES = r'''
+- name: Extract zip file
+ win_dsc:
+ resource_name: Archive
+ Ensure: Present
+ Path: C:\Temp\zipfile.zip
+ Destination: C:\Temp\Temp2
+
+- name: Install a Windows feature with the WindowsFeature resource
+ win_dsc:
+ resource_name: WindowsFeature
+ Name: telnet-client
+
+- name: Edit HKCU reg key under specific user
+ win_dsc:
+ resource_name: Registry
+ Ensure: Present
+ Key: HKEY_CURRENT_USER\ExampleKey
+ ValueName: TestValue
+ ValueData: TestData
+ PsDscRunAsCredential_username: '{{ansible_user}}'
+ PsDscRunAsCredential_password: '{{ansible_password}}'
+ no_log: true
+
+- name: Create file with multiple attributes
+ win_dsc:
+ resource_name: File
+ DestinationPath: C:\ansible\dsc
+ Attributes: # can also be a comma separated string, e.g. 'Hidden, System'
+ - Hidden
+ - System
+ Ensure: Present
+ Type: Directory
+
+- name: Call DSC resource with DateTime option
+ win_dsc:
+ resource_name: DateTimeResource
+ DateTimeOption: '2019-02-22T13:57:31.2311892+00:00'
+
+# more complex example using custom DSC resource and dict values
+- name: Setup the xWebAdministration module
+ win_psmodule:
+ name: xWebAdministration
+ state: present
+
+- name: Create IIS Website with Binding and Authentication options
+ win_dsc:
+ resource_name: xWebsite
+ Ensure: Present
+ Name: DSC Website
+ State: Started
+ PhysicalPath: C:\inetpub\wwwroot
+ BindingInfo: # Example of a CimInstance[] DSC parameter (list of dicts)
+ - Protocol: https
+ Port: 1234
+ CertificateStoreName: MY
+ CertificateThumbprint: C676A89018C4D5902353545343634F35E6B3A659
+ HostName: DSCTest
+ IPAddress: '*'
+ SSLFlags: '1'
+ - Protocol: http
+ Port: 4321
+ IPAddress: '*'
+ AuthenticationInfo: # Example of a CimInstance DSC parameter (dict)
+ Anonymous: no
+ Basic: true
+ Digest: false
+ Windows: yes
+'''
+
+RETURN = r'''
+module_version:
+ description: The version of the dsc resource/module used.
+ returned: always
+ type: str
+ sample: "1.0.1"
+reboot_required:
+ description: Flag returned from the DSC engine indicating whether or not
+ the machine requires a reboot for the invoked changes to take effect.
+ returned: always
+ type: bool
+ sample: true
+verbose_test:
+ description: The verbose output as a list from executing the DSC test
+ method.
+ returned: Ansible verbosity is -vvv or greater
+ type: list
+ sample: [
+ "Perform operation 'Invoke CimMethod' with the following parameters, ",
+ "[SERVER]: LCM: [Start Test ] [[File]DirectResourceAccess]",
+ "Operation 'Invoke CimMethod' complete."
+ ]
+verbose_set:
+ description: The verbose output as a list from executing the DSC Set
+ method.
+ returned: Ansible verbosity is -vvv or greater and a change occurred
+ type: list
+ sample: [
+ "Perform operation 'Invoke CimMethod' with the following parameters, ",
+ "[SERVER]: LCM: [Start Set ] [[File]DirectResourceAccess]",
+ "Operation 'Invoke CimMethod' complete."
+ ]
+'''
diff --git a/test/support/windows-integration/plugins/modules/win_feature.ps1 b/test/support/windows-integration/plugins/modules/win_feature.ps1
new file mode 100644
index 00000000..9a7e1c30
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_feature.ps1
@@ -0,0 +1,111 @@
+#!powershell
+
+# Copyright: (c) 2014, Paul Durivage <paul.durivage@rackspace.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+
+Import-Module -Name ServerManager
+
+$result = @{
+ changed = $false
+}
+
+$params = Parse-Args $args -supports_check_mode $true
+$check_mode = Get-AnsibleParam -obj $params -name "_ansible_check_mode" -type "bool" -default $false
+
+$name = Get-AnsibleParam -obj $params -name "name" -type "list" -failifempty $true
+$state = Get-AnsibleParam -obj $params -name "state" -type "str" -default "present" -validateset "present","absent"
+
+$include_sub_features = Get-AnsibleParam -obj $params -name "include_sub_features" -type "bool" -default $false
+$include_management_tools = Get-AnsibleParam -obj $params -name "include_management_tools" -type "bool" -default $false
+$source = Get-AnsibleParam -obj $params -name "source" -type "str"
+
+$install_cmdlet = $false
+if (Get-Command -Name Install-WindowsFeature -ErrorAction SilentlyContinue) {
+ Set-Alias -Name Install-AnsibleWindowsFeature -Value Install-WindowsFeature
+ Set-Alias -Name Uninstall-AnsibleWindowsFeature -Value Uninstall-WindowsFeature
+ $install_cmdlet = $true
+} elseif (Get-Command -Name Add-WindowsFeature -ErrorAction SilentlyContinue) {
+ Set-Alias -Name Install-AnsibleWindowsFeature -Value Add-WindowsFeature
+ Set-Alias -Name Uninstall-AnsibleWindowsFeature -Value Remove-WindowsFeature
+} else {
+ Fail-Json -obj $result -message "This version of Windows does not support the cmdlets Install-WindowsFeature or Add-WindowsFeature"
+}
+
+if ($state -eq "present") {
+ $install_args = @{
+ Name = $name
+ IncludeAllSubFeature = $include_sub_features
+ Restart = $false
+ WhatIf = $check_mode
+ ErrorAction = "Stop"
+ }
+
+ if ($install_cmdlet) {
+ $install_args.IncludeManagementTools = $include_management_tools
+ $install_args.Confirm = $false
+ if ($source) {
+ if (-not (Test-Path -Path $source)) {
+ Fail-Json -obj $result -message "Failed to find source path $source for feature install"
+ }
+ $install_args.Source = $source
+ }
+ }
+
+ try {
+ $action_results = Install-AnsibleWindowsFeature @install_args
+ } catch {
+ Fail-Json -obj $result -message "Failed to install Windows Feature: $($_.Exception.Message)"
+ }
+} else {
+ $uninstall_args = @{
+ Name = $name
+ Restart = $false
+ WhatIf = $check_mode
+ ErrorAction = "Stop"
+ }
+ if ($install_cmdlet) {
+ $uninstall_args.IncludeManagementTools = $include_management_tools
+ }
+
+ try {
+ $action_results = Uninstall-AnsibleWindowsFeature @uninstall_args
+ } catch {
+ Fail-Json -obj $result -message "Failed to uninstall Windows Feature: $($_.Exception.Message)"
+ }
+}
+
+# Loop through results and create a hash containing details about
+# each role/feature that is installed/removed
+# $action_results.FeatureResult is not empty if anything was changed
+$feature_results = @()
+foreach ($action_result in $action_results.FeatureResult) {
+ $message = @()
+ foreach ($msg in $action_result.Message) {
+ $message += @{
+ message_type = $msg.MessageType.ToString()
+ error_code = $msg.ErrorCode
+ text = $msg.Text
+ }
+ }
+
+ $feature_results += @{
+ id = $action_result.Id
+ display_name = $action_result.DisplayName
+ message = $message
+ reboot_required = ConvertTo-Bool -obj $action_result.RestartNeeded
+ skip_reason = $action_result.SkipReason.ToString()
+ success = ConvertTo-Bool -obj $action_result.Success
+ restart_needed = ConvertTo-Bool -obj $action_result.RestartNeeded
+ }
+ $result.changed = $true
+}
+$result.feature_result = $feature_results
+$result.success = ConvertTo-Bool -obj $action_results.Success
+$result.exitcode = $action_results.ExitCode.ToString()
+$result.reboot_required = ConvertTo-Bool -obj $action_results.RestartNeeded
+# controls whether Ansible will fail or not
+$result.failed = (-not $action_results.Success)
+
+Exit-Json -obj $result
diff --git a/test/support/windows-integration/plugins/modules/win_feature.py b/test/support/windows-integration/plugins/modules/win_feature.py
new file mode 100644
index 00000000..62e310b2
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_feature.py
@@ -0,0 +1,149 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Paul Durivage <paul.durivage@rackspace.com>
+# Copyright: (c) 2014, Trond Hindenes <trond@hindenes.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: win_feature
+version_added: "1.7"
+short_description: Installs and uninstalls Windows Features on Windows Server
+description:
+ - Installs or uninstalls Windows Roles or Features on Windows Server.
+ - This module uses the Add/Remove-WindowsFeature Cmdlets on Windows 2008 R2
+ and Install/Uninstall-WindowsFeature Cmdlets on Windows 2012, which are not available on client os machines.
+options:
+ name:
+ description:
+ - Names of roles or features to install as a single feature or a comma-separated list of features.
+ - To list all available features use the PowerShell command C(Get-WindowsFeature).
+ type: list
+ required: yes
+ state:
+ description:
+ - State of the features or roles on the system.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ include_sub_features:
+ description:
+ - Adds all subfeatures of the specified feature.
+ type: bool
+ default: no
+ include_management_tools:
+ description:
+ - Adds the corresponding management tools to the specified feature.
+ - Not supported in Windows 2008 R2 and will be ignored.
+ type: bool
+ default: no
+ source:
+ description:
+ - Specify a source to install the feature from.
+ - Not supported in Windows 2008 R2 and will be ignored.
+ - Can either be C({driveletter}:\sources\sxs) or C(\\{IP}\share\sources\sxs).
+ type: str
+ version_added: "2.1"
+seealso:
+- module: win_chocolatey
+- module: win_package
+author:
+ - Paul Durivage (@angstwad)
+ - Trond Hindenes (@trondhindenes)
+'''
+
+EXAMPLES = r'''
+- name: Install IIS (Web-Server only)
+ win_feature:
+ name: Web-Server
+ state: present
+
+- name: Install IIS (Web-Server and Web-Common-Http)
+ win_feature:
+ name:
+ - Web-Server
+ - Web-Common-Http
+ state: present
+
+- name: Install NET-Framework-Core from file
+ win_feature:
+ name: NET-Framework-Core
+ source: C:\Temp\iso\sources\sxs
+ state: present
+
+- name: Install IIS Web-Server with sub features and management tools
+ win_feature:
+ name: Web-Server
+ state: present
+ include_sub_features: yes
+ include_management_tools: yes
+ register: win_feature
+
+- name: Reboot if installing Web-Server feature requires it
+ win_reboot:
+ when: win_feature.reboot_required
+'''
+
+RETURN = r'''
+exitcode:
+ description: The stringified exit code from the feature installation/removal command.
+ returned: always
+ type: str
+ sample: Success
+feature_result:
+ description: List of features that were installed or removed.
+ returned: success
+ type: complex
+ sample:
+ contains:
+ display_name:
+ description: Feature display name.
+ returned: always
+ type: str
+ sample: "Telnet Client"
+ id:
+ description: A list of KB article IDs that apply to the update.
+ returned: always
+ type: int
+ sample: 44
+ message:
+ description: Any messages returned from the feature subsystem that occurred during installation or removal of this feature.
+ returned: always
+ type: list
+ elements: str
+ sample: []
+ reboot_required:
+ description: True when the target server requires a reboot as a result of installing or removing this feature.
+ returned: always
+ type: bool
+ sample: true
+ restart_needed:
+ description: DEPRECATED in Ansible 2.4 (refer to C(reboot_required) instead). True when the target server requires a reboot as a
+ result of installing or removing this feature.
+ returned: always
+ type: bool
+ sample: true
+ skip_reason:
+ description: The reason a feature installation or removal was skipped.
+ returned: always
+ type: str
+ sample: NotSkipped
+ success:
+ description: If the feature installation or removal was successful.
+ returned: always
+ type: bool
+ sample: true
+reboot_required:
+ description: True when the target server requires a reboot to complete updates (no further updates can be installed until after a reboot).
+ returned: success
+ type: bool
+ sample: true
+'''
diff --git a/test/support/windows-integration/plugins/modules/win_file.ps1 b/test/support/windows-integration/plugins/modules/win_file.ps1
new file mode 100644
index 00000000..54427549
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_file.ps1
@@ -0,0 +1,152 @@
+#!powershell
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+
+$ErrorActionPreference = "Stop"
+
+$params = Parse-Args $args -supports_check_mode $true
+
+$check_mode = Get-AnsibleParam -obj $params -name "_ansible_check_mode" -default $false
+$_remote_tmp = Get-AnsibleParam $params "_ansible_remote_tmp" -type "path" -default $env:TMP
+
+$path = Get-AnsibleParam -obj $params -name "path" -type "path" -failifempty $true -aliases "dest","name"
+$state = Get-AnsibleParam -obj $params -name "state" -type "str" -validateset "absent","directory","file","touch"
+
+# used in template/copy when dest is the path to a dir and source is a file
+$original_basename = Get-AnsibleParam -obj $params -name "_original_basename" -type "str"
+if ((Test-Path -LiteralPath $path -PathType Container) -and ($null -ne $original_basename)) {
+ $path = Join-Path -Path $path -ChildPath $original_basename
+}
+
+$result = @{
+ changed = $false
+}
+
+# Used to delete symlinks as powershell cannot delete broken symlinks
+$symlink_util = @"
+using System;
+using System.ComponentModel;
+using System.Runtime.InteropServices;
+
+namespace Ansible.Command {
+ public class SymLinkHelper {
+ [DllImport("kernel32.dll", CharSet=CharSet.Unicode, SetLastError=true)]
+ public static extern bool DeleteFileW(string lpFileName);
+
+ [DllImport("kernel32.dll", CharSet=CharSet.Unicode, SetLastError=true)]
+ public static extern bool RemoveDirectoryW(string lpPathName);
+
+ public static void DeleteDirectory(string path) {
+ if (!RemoveDirectoryW(path))
+ throw new Exception(String.Format("RemoveDirectoryW({0}) failed: {1}", path, new Win32Exception(Marshal.GetLastWin32Error()).Message));
+ }
+
+ public static void DeleteFile(string path) {
+ if (!DeleteFileW(path))
+ throw new Exception(String.Format("DeleteFileW({0}) failed: {1}", path, new Win32Exception(Marshal.GetLastWin32Error()).Message));
+ }
+ }
+}
+"@
+$original_tmp = $env:TMP
+$env:TMP = $_remote_tmp
+Add-Type -TypeDefinition $symlink_util
+$env:TMP = $original_tmp
+
+# Used to delete directories and files with logic on handling symbolic links
+function Remove-File($file, $checkmode) {
+ try {
+ if ($file.Attributes -band [System.IO.FileAttributes]::ReparsePoint) {
+ # Bug with powershell, if you try and delete a symbolic link that is pointing
+ # to an invalid path it will fail, using Win32 API to do this instead
+ if ($file.PSIsContainer) {
+ if (-not $checkmode) {
+ [Ansible.Command.SymLinkHelper]::DeleteDirectory($file.FullName)
+ }
+ } else {
+ if (-not $checkmode) {
+ [Ansible.Command.SymlinkHelper]::DeleteFile($file.FullName)
+ }
+ }
+ } elseif ($file.PSIsContainer) {
+ Remove-Directory -directory $file -checkmode $checkmode
+ } else {
+ Remove-Item -LiteralPath $file.FullName -Force -WhatIf:$checkmode
+ }
+ } catch [Exception] {
+ Fail-Json $result "Failed to delete $($file.FullName): $($_.Exception.Message)"
+ }
+}
+
+function Remove-Directory($directory, $checkmode) {
+ foreach ($file in Get-ChildItem -LiteralPath $directory.FullName) {
+ Remove-File -file $file -checkmode $checkmode
+ }
+ Remove-Item -LiteralPath $directory.FullName -Force -Recurse -WhatIf:$checkmode
+}
+
+
+if ($state -eq "touch") {
+ if (Test-Path -LiteralPath $path) {
+ if (-not $check_mode) {
+ (Get-ChildItem -LiteralPath $path).LastWriteTime = Get-Date
+ }
+ $result.changed = $true
+ } else {
+ Write-Output $null | Out-File -LiteralPath $path -Encoding ASCII -WhatIf:$check_mode
+ $result.changed = $true
+ }
+}
+
+if (Test-Path -LiteralPath $path) {
+ $fileinfo = Get-Item -LiteralPath $path -Force
+ if ($state -eq "absent") {
+ Remove-File -file $fileinfo -checkmode $check_mode
+ $result.changed = $true
+ } else {
+ if ($state -eq "directory" -and -not $fileinfo.PsIsContainer) {
+ Fail-Json $result "path $path is not a directory"
+ }
+
+ if ($state -eq "file" -and $fileinfo.PsIsContainer) {
+ Fail-Json $result "path $path is not a file"
+ }
+ }
+
+} else {
+
+ # If state is not supplied, test the $path to see if it looks like
+ # a file or a folder and set state to file or folder
+ if ($null -eq $state) {
+ $basename = Split-Path -Path $path -Leaf
+ if ($basename.length -gt 0) {
+ $state = "file"
+ } else {
+ $state = "directory"
+ }
+ }
+
+ if ($state -eq "directory") {
+ try {
+ New-Item -Path $path -ItemType Directory -WhatIf:$check_mode | Out-Null
+ } catch {
+ if ($_.CategoryInfo.Category -eq "ResourceExists") {
+ $fileinfo = Get-Item -LiteralPath $_.CategoryInfo.TargetName
+ if ($state -eq "directory" -and -not $fileinfo.PsIsContainer) {
+ Fail-Json $result "path $path is not a directory"
+ }
+ } else {
+ Fail-Json $result $_.Exception.Message
+ }
+ }
+ $result.changed = $true
+ } elseif ($state -eq "file") {
+ Fail-Json $result "path $path will not be created"
+ }
+
+}
+
+Exit-Json $result
diff --git a/test/support/windows-integration/plugins/modules/win_file.py b/test/support/windows-integration/plugins/modules/win_file.py
new file mode 100644
index 00000000..28149579
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_file.py
@@ -0,0 +1,70 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = r'''
+---
+module: win_file
+version_added: "1.9.2"
+short_description: Creates, touches or removes files or directories
+description:
+ - Creates (empty) files, updates file modification stamps of existing files,
+ and can create or remove directories.
+ - Unlike M(file), does not modify ownership, permissions or manipulate links.
+ - For non-Windows targets, use the M(file) module instead.
+options:
+ path:
+ description:
+ - Path to the file being managed.
+ required: yes
+ type: path
+ aliases: [ dest, name ]
+ state:
+ description:
+ - If C(directory), all immediate subdirectories will be created if they
+ do not exist.
+ - If C(file), the file will NOT be created if it does not exist, see the M(copy)
+ or M(template) module if you want that behavior.
+ - If C(absent), directories will be recursively deleted, and files will be removed.
+ - If C(touch), an empty file will be created if the C(path) does not
+ exist, while an existing file or directory will receive updated file access and
+ modification times (similar to the way C(touch) works from the command line).
+ type: str
+ choices: [ absent, directory, file, touch ]
+seealso:
+- module: file
+- module: win_acl
+- module: win_acl_inheritance
+- module: win_owner
+- module: win_stat
+author:
+- Jon Hawkesworth (@jhawkesworth)
+'''
+
+EXAMPLES = r'''
+- name: Touch a file (creates if not present, updates modification time if present)
+ win_file:
+ path: C:\Temp\foo.conf
+ state: touch
+
+- name: Remove a file, if present
+ win_file:
+ path: C:\Temp\foo.conf
+ state: absent
+
+- name: Create directory structure
+ win_file:
+ path: C:\Temp\folder\subfolder
+ state: directory
+
+- name: Remove directory structure
+ win_file:
+ path: C:\Temp
+ state: absent
+'''
diff --git a/test/support/windows-integration/plugins/modules/win_find.ps1 b/test/support/windows-integration/plugins/modules/win_find.ps1
new file mode 100644
index 00000000..bc57c5ff
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_find.ps1
@@ -0,0 +1,416 @@
+#!powershell
+
+# Copyright: (c) 2016, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#AnsibleRequires -CSharpUtil Ansible.Basic
+#Requires -Module Ansible.ModuleUtils.LinkUtil
+
+$spec = @{
+ options = @{
+ paths = @{ type = "list"; elements = "str"; required = $true }
+ age = @{ type = "str" }
+ age_stamp = @{ type = "str"; default = "mtime"; choices = "mtime", "ctime", "atime" }
+ file_type = @{ type = "str"; default = "file"; choices = "file", "directory" }
+ follow = @{ type = "bool"; default = $false }
+ hidden = @{ type = "bool"; default = $false }
+ patterns = @{ type = "list"; elements = "str"; aliases = "regex", "regexp" }
+ recurse = @{ type = "bool"; default = $false }
+ size = @{ type = "str" }
+ use_regex = @{ type = "bool"; default = $false }
+ get_checksum = @{ type = "bool"; default = $true }
+ checksum_algorithm = @{ type = "str"; default = "sha1"; choices = "md5", "sha1", "sha256", "sha384", "sha512" }
+ }
+ supports_check_mode = $true
+}
+
+$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec)
+
+$paths = $module.Params.paths
+$age = $module.Params.age
+$age_stamp = $module.Params.age_stamp
+$file_type = $module.Params.file_type
+$follow = $module.Params.follow
+$hidden = $module.Params.hidden
+$patterns = $module.Params.patterns
+$recurse = $module.Params.recurse
+$size = $module.Params.size
+$use_regex = $module.Params.use_regex
+$get_checksum = $module.Params.get_checksum
+$checksum_algorithm = $module.Params.checksum_algorithm
+
+$module.Result.examined = 0
+$module.Result.files = @()
+$module.Result.matched = 0
+
+Load-LinkUtils
+
+Function Assert-Age {
+ Param (
+ [System.IO.FileSystemInfo]$File,
+ [System.Int64]$Age,
+ [System.String]$AgeStamp
+ )
+
+ $actual_age = switch ($AgeStamp) {
+ mtime { $File.LastWriteTime.Ticks }
+ ctime { $File.CreationTime.Ticks }
+ atime { $File.LastAccessTime.Ticks }
+ }
+
+ if ($Age -ge 0) {
+ return $Age -ge $actual_age
+ } else {
+ return ($Age * -1) -le $actual_age
+ }
+}
+
+Function Assert-FileType {
+ Param (
+ [System.IO.FileSystemInfo]$File,
+ [System.String]$FileType
+ )
+
+ $is_dir = $File.Attributes.HasFlag([System.IO.FileAttributes]::Directory)
+ return ($FileType -eq 'directory' -and $is_dir) -or ($FileType -eq 'file' -and -not $is_dir)
+}
+
+Function Assert-FileHidden {
+ Param (
+ [System.IO.FileSystemInfo]$File,
+ [Switch]$IsHidden
+ )
+
+ $file_is_hidden = $File.Attributes.HasFlag([System.IO.FileAttributes]::Hidden)
+ return $IsHidden.IsPresent -eq $file_is_hidden
+}
+
+
+Function Assert-FileNamePattern {
+ Param (
+ [System.IO.FileSystemInfo]$File,
+ [System.String[]]$Patterns,
+ [Switch]$UseRegex
+ )
+
+ $valid_match = $false
+ foreach ($pattern in $Patterns) {
+ if ($UseRegex) {
+ if ($File.Name -match $pattern) {
+ $valid_match = $true
+ break
+ }
+ } else {
+ if ($File.Name -like $pattern) {
+ $valid_match = $true
+ break
+ }
+ }
+ }
+ return $valid_match
+}
+
+Function Assert-FileSize {
+ Param (
+ [System.IO.FileSystemInfo]$File,
+ [System.Int64]$Size
+ )
+
+ if ($Size -ge 0) {
+ return $File.Length -ge $Size
+ } else {
+ return $File.Length -le ($Size * -1)
+ }
+}
+
+Function Get-FileChecksum {
+ Param (
+ [System.String]$Path,
+ [System.String]$Algorithm
+ )
+
+ $sp = switch ($algorithm) {
+ 'md5' { New-Object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider }
+ 'sha1' { New-Object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider }
+ 'sha256' { New-Object -TypeName System.Security.Cryptography.SHA256CryptoServiceProvider }
+ 'sha384' { New-Object -TypeName System.Security.Cryptography.SHA384CryptoServiceProvider }
+ 'sha512' { New-Object -TypeName System.Security.Cryptography.SHA512CryptoServiceProvider }
+ }
+
+ $fp = [System.IO.File]::Open($Path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read, [System.IO.FileShare]::ReadWrite)
+ try {
+ $hash = [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower()
+ } finally {
+ $fp.Dispose()
+ }
+
+ return $hash
+}
+
+Function Search-Path {
+ [CmdletBinding()]
+ Param (
+ [Parameter(Mandatory=$true)]
+ [System.String]
+ $Path,
+
+ [Parameter(Mandatory=$true)]
+ [AllowEmptyCollection()]
+ [System.Collections.Generic.HashSet`1[System.String]]
+ $CheckedPaths,
+
+ [Parameter(Mandatory=$true)]
+ [Object]
+ $Module,
+
+ [System.Int64]
+ $Age,
+
+ [System.String]
+ $AgeStamp,
+
+ [System.String]
+ $FileType,
+
+ [Switch]
+ $Follow,
+
+ [Switch]
+ $GetChecksum,
+
+ [Switch]
+ $IsHidden,
+
+ [System.String[]]
+ $Patterns,
+
+ [Switch]
+ $Recurse,
+
+ [System.Int64]
+ $Size,
+
+ [Switch]
+ $UseRegex
+ )
+
+ $dir_obj = New-Object -TypeName System.IO.DirectoryInfo -ArgumentList $Path
+ if ([Int32]$dir_obj.Attributes -eq -1) {
+ $Module.Warn("Argument path '$Path' does not exist, skipping")
+ return
+ } elseif (-not $dir_obj.Attributes.HasFlag([System.IO.FileAttributes]::Directory)) {
+ $Module.Warn("Argument path '$Path' is a file not a directory, skipping")
+ return
+ }
+
+ $dir_files = @()
+ try {
+ $dir_files = $dir_obj.EnumerateFileSystemInfos("*", [System.IO.SearchOption]::TopDirectoryOnly)
+ } catch [System.IO.DirectoryNotFoundException] { # Broken ReparsePoint/Symlink, cannot enumerate
+ } catch [System.UnauthorizedAccessException] {} # No ListDirectory permissions, Get-ChildItem ignored this
+
+ foreach ($dir_child in $dir_files) {
+ if ($dir_child.Attributes.HasFlag([System.IO.FileAttributes]::Directory) -and $Recurse) {
+ if ($Follow -or -not $dir_child.Attributes.HasFlag([System.IO.FileAttributes]::ReparsePoint)) {
+ $PSBoundParameters.Remove('Path') > $null
+ Search-Path -Path $dir_child.FullName @PSBoundParameters
+ }
+ }
+
+ # Check to see if we've already encountered this path and skip if we have.
+ if (-not $CheckedPaths.Add($dir_child.FullName.ToLowerInvariant())) {
+ continue
+ }
+
+ $Module.Result.examined++
+
+ if ($PSBoundParameters.ContainsKey('Age')) {
+ $age_match = Assert-Age -File $dir_child -Age $Age -AgeStamp $AgeStamp
+ } else {
+ $age_match = $true
+ }
+
+ $file_type_match = Assert-FileType -File $dir_child -FileType $FileType
+ $hidden_match = Assert-FileHidden -File $dir_child -IsHidden:$IsHidden
+
+ if ($PSBoundParameters.ContainsKey('Patterns')) {
+ $pattern_match = Assert-FileNamePattern -File $dir_child -Patterns $Patterns -UseRegex:$UseRegex.IsPresent
+ } else {
+ $pattern_match = $true
+ }
+
+ if ($PSBoundParameters.ContainsKey('Size')) {
+ $size_match = Assert-FileSize -File $dir_child -Size $Size
+ } else {
+ $size_match = $true
+ }
+
+ if (-not ($age_match -and $file_type_match -and $hidden_match -and $pattern_match -and $size_match)) {
+ continue
+ }
+
+ # It passed all our filters so add it
+ $module.Result.matched++
+
+ # TODO: Make this generic so it can be shared with win_find and win_stat.
+ $epoch = New-Object -Type System.DateTime -ArgumentList 1970, 1, 1, 0, 0, 0, 0
+ $file_info = @{
+ attributes = $dir_child.Attributes.ToString()
+ checksum = $null
+ creationtime = (New-TimeSpan -Start $epoch -End $dir_child.CreationTime).TotalSeconds
+ exists = $true
+ extension = $null
+ filename = $dir_child.Name
+ isarchive = $dir_child.Attributes.HasFlag([System.IO.FileAttributes]::Archive)
+ isdir = $dir_child.Attributes.HasFlag([System.IO.FileAttributes]::Directory)
+ ishidden = $dir_child.Attributes.HasFlag([System.IO.FileAttributes]::Hidden)
+ isreadonly = $dir_child.Attributes.HasFlag([System.IO.FileAttributes]::ReadOnly)
+ isreg = $false
+ isshared = $false
+ lastaccesstime = (New-TimeSpan -Start $epoch -End $dir_child.LastAccessTime).TotalSeconds
+ lastwritetime = (New-TimeSpan -Start $epoch -End $dir_child.LastWriteTime).TotalSeconds
+ owner = $null
+ path = $dir_child.FullName
+ sharename = $null
+ size = $null
+ }
+
+ try {
+ $file_info.owner = $dir_child.GetAccessControl().Owner
+ } catch {} # May not have rights to get the Owner, historical behaviour is to ignore.
+
+ if ($dir_child.Attributes.HasFlag([System.IO.FileAttributes]::Directory)) {
+ $share_info = Get-CimInstance -ClassName Win32_Share -Filter "Path='$($dir_child.FullName -replace '\\', '\\')'"
+ if ($null -ne $share_info) {
+ $file_info.isshared = $true
+ $file_info.sharename = $share_info.Name
+ }
+ } else {
+ $file_info.extension = $dir_child.Extension
+ $file_info.isreg = $true
+ $file_info.size = $dir_child.Length
+
+ if ($GetChecksum) {
+ try {
+ $file_info.checksum = Get-FileChecksum -Path $dir_child.FullName -Algorithm $checksum_algorithm
+ } catch {} # Just keep the checksum as $null in the case of a failure.
+ }
+ }
+
+ # Append the link information if the path is a link
+ $link_info = @{
+ isjunction = $false
+ islnk = $false
+ nlink = 1
+ lnk_source = $null
+ lnk_target = $null
+ hlnk_targets = @()
+ }
+ $link_stat = Get-Link -link_path $dir_child.FullName
+ if ($null -ne $link_stat) {
+ switch ($link_stat.Type) {
+ "SymbolicLink" {
+ $link_info.islnk = $true
+ $link_info.isreg = $false
+ $link_info.lnk_source = $link_stat.AbsolutePath
+ $link_info.lnk_target = $link_stat.TargetPath
+ break
+ }
+ "JunctionPoint" {
+ $link_info.isjunction = $true
+ $link_info.isreg = $false
+ $link_info.lnk_source = $link_stat.AbsolutePath
+ $link_info.lnk_target = $link_stat.TargetPath
+ break
+ }
+ "HardLink" {
+ $link_info.nlink = $link_stat.HardTargets.Count
+
+ # remove current path from the targets
+ $hlnk_targets = $link_info.HardTargets | Where-Object { $_ -ne $dir_child.FullName }
+ $link_info.hlnk_targets = @($hlnk_targets)
+ break
+ }
+ }
+ }
+ foreach ($kv in $link_info.GetEnumerator()) {
+ $file_info.$($kv.Key) = $kv.Value
+ }
+
+ # Output the file_info object
+ $file_info
+ }
+}
+
+$search_params = @{
+ CheckedPaths = [System.Collections.Generic.HashSet`1[System.String]]@()
+ GetChecksum = $get_checksum
+ Module = $module
+ FileType = $file_type
+ Follow = $follow
+ IsHidden = $hidden
+ Recurse = $recurse
+}
+
+if ($null -ne $age) {
+ $seconds_per_unit = @{'s'=1; 'm'=60; 'h'=3600; 'd'=86400; 'w'=604800}
+ $seconds_pattern = '^(-?\d+)(s|m|h|d|w)?$'
+ $match = $age -match $seconds_pattern
+ if ($Match) {
+ $specified_seconds = [Int64]$Matches[1]
+ if ($null -eq $Matches[2]) {
+ $chosen_unit = 's'
+ } else {
+ $chosen_unit = $Matches[2]
+ }
+
+ $total_seconds = $specified_seconds * ($seconds_per_unit.$chosen_unit)
+
+ if ($total_seconds -ge 0) {
+ $search_params.Age = (Get-Date).AddSeconds($total_seconds * -1).Ticks
+ } else {
+ # Make sure we add the positive value of seconds to current time then make it negative for later comparisons.
+ $age = (Get-Date).AddSeconds($total_seconds).Ticks
+ $search_params.Age = $age * -1
+ }
+ $search_params.AgeStamp = $age_stamp
+ } else {
+ $module.FailJson("Invalid age pattern specified")
+ }
+}
+
+if ($null -ne $patterns) {
+ $search_params.Patterns = $patterns
+ $search_params.UseRegex = $use_regex
+}
+
+if ($null -ne $size) {
+ $bytes_per_unit = @{'b'=1; 'k'=1KB; 'm'=1MB; 'g'=1GB;'t'=1TB}
+ $size_pattern = '^(-?\d+)(b|k|m|g|t)?$'
+ $match = $size -match $size_pattern
+ if ($Match) {
+ $specified_size = [Int64]$Matches[1]
+ if ($null -eq $Matches[2]) {
+ $chosen_byte = 'b'
+ } else {
+ $chosen_byte = $Matches[2]
+ }
+
+ $search_params.Size = $specified_size * ($bytes_per_unit.$chosen_byte)
+ } else {
+ $module.FailJson("Invalid size pattern specified")
+ }
+}
+
+$matched_files = foreach ($path in $paths) {
+ # Ensure we pass in an absolute path. We use the ExecutionContext as this is based on the PSProvider path not the
+ # process location which can be different.
+ $abs_path = $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath($path)
+ Search-Path -Path $abs_path @search_params
+}
+
+# Make sure we sort the files in alphabetical order.
+$module.Result.files = @() + ($matched_files | Sort-Object -Property {$_.path})
+
+$module.ExitJson()
+
diff --git a/test/support/windows-integration/plugins/modules/win_find.py b/test/support/windows-integration/plugins/modules/win_find.py
new file mode 100644
index 00000000..f506f956
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_find.py
@@ -0,0 +1,345 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: win_find
+version_added: "2.3"
+short_description: Return a list of files based on specific criteria
+description:
+ - Return a list of files based on specified criteria.
+ - Multiple criteria are AND'd together.
+ - For non-Windows targets, use the M(find) module instead.
+options:
+ age:
+ description:
+ - Select files or folders whose age is equal to or greater than
+ the specified time.
+ - Use a negative age to find files equal to or less than
+ the specified time.
+ - You can choose seconds, minutes, hours, days or weeks
+ by specifying the first letter of an of
+ those words (e.g., "2s", "10d", 1w").
+ type: str
+ age_stamp:
+ description:
+ - Choose the file property against which we compare C(age).
+ - The default attribute we compare with is the last modification time.
+ type: str
+ choices: [ atime, ctime, mtime ]
+ default: mtime
+ checksum_algorithm:
+ description:
+ - Algorithm to determine the checksum of a file.
+ - Will throw an error if the host is unable to use specified algorithm.
+ type: str
+ choices: [ md5, sha1, sha256, sha384, sha512 ]
+ default: sha1
+ file_type:
+ description: Type of file to search for.
+ type: str
+ choices: [ directory, file ]
+ default: file
+ follow:
+ description:
+ - Set this to C(yes) to follow symlinks in the path.
+ - This needs to be used in conjunction with C(recurse).
+ type: bool
+ default: no
+ get_checksum:
+ description:
+ - Whether to return a checksum of the file in the return info (default sha1),
+ use C(checksum_algorithm) to change from the default.
+ type: bool
+ default: yes
+ hidden:
+ description: Set this to include hidden files or folders.
+ type: bool
+ default: no
+ paths:
+ description:
+ - List of paths of directories to search for files or folders in.
+ - This can be supplied as a single path or a list of paths.
+ type: list
+ required: yes
+ patterns:
+ description:
+ - One or more (powershell or regex) patterns to compare filenames with.
+ - The type of pattern matching is controlled by C(use_regex) option.
+ - The patterns restrict the list of files or folders to be returned based on the filenames.
+ - For a file to be matched it only has to match with one pattern in a list provided.
+ type: list
+ aliases: [ "regex", "regexp" ]
+ recurse:
+ description:
+ - Will recursively descend into the directory looking for files or folders.
+ type: bool
+ default: no
+ size:
+ description:
+ - Select files or folders whose size is equal to or greater than the specified size.
+ - Use a negative value to find files equal to or less than the specified size.
+ - You can specify the size with a suffix of the byte type i.e. kilo = k, mega = m...
+ - Size is not evaluated for symbolic links.
+ type: str
+ use_regex:
+ description:
+ - Will set patterns to run as a regex check if set to C(yes).
+ type: bool
+ default: no
+author:
+- Jordan Borean (@jborean93)
+'''
+
+EXAMPLES = r'''
+- name: Find files in path
+ win_find:
+ paths: D:\Temp
+
+- name: Find hidden files in path
+ win_find:
+ paths: D:\Temp
+ hidden: yes
+
+- name: Find files in multiple paths
+ win_find:
+ paths:
+ - C:\Temp
+ - D:\Temp
+
+- name: Find files in directory while searching recursively
+ win_find:
+ paths: D:\Temp
+ recurse: yes
+
+- name: Find files in directory while following symlinks
+ win_find:
+ paths: D:\Temp
+ recurse: yes
+ follow: yes
+
+- name: Find files with .log and .out extension using powershell wildcards
+ win_find:
+ paths: D:\Temp
+ patterns: [ '*.log', '*.out' ]
+
+- name: Find files in path based on regex pattern
+ win_find:
+ paths: D:\Temp
+ patterns: out_\d{8}-\d{6}.log
+
+- name: Find files older than 1 day
+ win_find:
+ paths: D:\Temp
+ age: 86400
+
+- name: Find files older than 1 day based on create time
+ win_find:
+ paths: D:\Temp
+ age: 86400
+ age_stamp: ctime
+
+- name: Find files older than 1 day with unit syntax
+ win_find:
+ paths: D:\Temp
+ age: 1d
+
+- name: Find files newer than 1 hour
+ win_find:
+ paths: D:\Temp
+ age: -3600
+
+- name: Find files newer than 1 hour with unit syntax
+ win_find:
+ paths: D:\Temp
+ age: -1h
+
+- name: Find files larger than 1MB
+ win_find:
+ paths: D:\Temp
+ size: 1048576
+
+- name: Find files larger than 1GB with unit syntax
+ win_find:
+ paths: D:\Temp
+ size: 1g
+
+- name: Find files smaller than 1MB
+ win_find:
+ paths: D:\Temp
+ size: -1048576
+
+- name: Find files smaller than 1GB with unit syntax
+ win_find:
+ paths: D:\Temp
+ size: -1g
+
+- name: Find folders/symlinks in multiple paths
+ win_find:
+ paths:
+ - C:\Temp
+ - D:\Temp
+ file_type: directory
+
+- name: Find files and return SHA256 checksum of files found
+ win_find:
+ paths: C:\Temp
+ get_checksum: yes
+ checksum_algorithm: sha256
+
+- name: Find files and do not return the checksum
+ win_find:
+ paths: C:\Temp
+ get_checksum: no
+'''
+
+RETURN = r'''
+examined:
+ description: The number of files/folders that was checked.
+ returned: always
+ type: int
+ sample: 10
+matched:
+ description: The number of files/folders that match the criteria.
+ returned: always
+ type: int
+ sample: 2
+files:
+ description: Information on the files/folders that match the criteria returned as a list of dictionary elements
+ for each file matched. The entries are sorted by the path value alphabetically.
+ returned: success
+ type: complex
+ contains:
+ attributes:
+ description: attributes of the file at path in raw form.
+ returned: success, path exists
+ type: str
+ sample: "Archive, Hidden"
+ checksum:
+ description: The checksum of a file based on checksum_algorithm specified.
+ returned: success, path exists, path is a file, get_checksum == True
+ type: str
+ sample: 09cb79e8fc7453c84a07f644e441fd81623b7f98
+ creationtime:
+ description: The create time of the file represented in seconds since epoch.
+ returned: success, path exists
+ type: float
+ sample: 1477984205.15
+ exists:
+ description: Whether the file exists, will always be true for M(win_find).
+ returned: success, path exists
+ type: bool
+ sample: true
+ extension:
+ description: The extension of the file at path.
+ returned: success, path exists, path is a file
+ type: str
+ sample: ".ps1"
+ filename:
+ description: The name of the file.
+ returned: success, path exists
+ type: str
+ sample: temp
+ hlnk_targets:
+ description: List of other files pointing to the same file (hard links), excludes the current file.
+ returned: success, path exists
+ type: list
+ sample:
+ - C:\temp\file.txt
+ - C:\Windows\update.log
+ isarchive:
+ description: If the path is ready for archiving or not.
+ returned: success, path exists
+ type: bool
+ sample: true
+ isdir:
+ description: If the path is a directory or not.
+ returned: success, path exists
+ type: bool
+ sample: true
+ ishidden:
+ description: If the path is hidden or not.
+ returned: success, path exists
+ type: bool
+ sample: true
+ isjunction:
+ description: If the path is a junction point.
+ returned: success, path exists
+ type: bool
+ sample: true
+ islnk:
+ description: If the path is a symbolic link.
+ returned: success, path exists
+ type: bool
+ sample: true
+ isreadonly:
+ description: If the path is read only or not.
+ returned: success, path exists
+ type: bool
+ sample: true
+ isreg:
+ description: If the path is a regular file or not.
+ returned: success, path exists
+ type: bool
+ sample: true
+ isshared:
+ description: If the path is shared or not.
+ returned: success, path exists
+ type: bool
+ sample: true
+ lastaccesstime:
+ description: The last access time of the file represented in seconds since epoch.
+ returned: success, path exists
+ type: float
+ sample: 1477984205.15
+ lastwritetime:
+ description: The last modification time of the file represented in seconds since epoch.
+ returned: success, path exists
+ type: float
+ sample: 1477984205.15
+ lnk_source:
+ description: The target of the symlink normalized for the remote filesystem.
+ returned: success, path exists, path is a symbolic link or junction point
+ type: str
+ sample: C:\temp
+ lnk_target:
+ description: The target of the symlink. Note that relative paths remain relative, will return null if not a link.
+ returned: success, path exists, path is a symbolic link or junction point
+ type: str
+ sample: temp
+ nlink:
+ description: Number of links to the file (hard links)
+ returned: success, path exists
+ type: int
+ sample: 1
+ owner:
+ description: The owner of the file.
+ returned: success, path exists
+ type: str
+ sample: BUILTIN\Administrators
+ path:
+ description: The full absolute path to the file.
+ returned: success, path exists
+ type: str
+ sample: BUILTIN\Administrators
+ sharename:
+ description: The name of share if folder is shared.
+ returned: success, path exists, path is a directory and isshared == True
+ type: str
+ sample: file-share
+ size:
+ description: The size in bytes of the file.
+ returned: success, path exists, path is a file
+ type: int
+ sample: 1024
+'''
diff --git a/test/support/windows-integration/plugins/modules/win_format.ps1 b/test/support/windows-integration/plugins/modules/win_format.ps1
new file mode 100644
index 00000000..b5fd3ae0
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_format.ps1
@@ -0,0 +1,200 @@
+#!powershell
+
+# Copyright: (c) 2019, Varun Chopra (@chopraaa) <v@chopraaa.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#AnsibleRequires -CSharpUtil Ansible.Basic
+#AnsibleRequires -OSVersion 6.2
+
+Set-StrictMode -Version 2
+
+$ErrorActionPreference = "Stop"
+
+$spec = @{
+ options = @{
+ drive_letter = @{ type = "str" }
+ path = @{ type = "str" }
+ label = @{ type = "str" }
+ new_label = @{ type = "str" }
+ file_system = @{ type = "str"; choices = "ntfs", "refs", "exfat", "fat32", "fat" }
+ allocation_unit_size = @{ type = "int" }
+ large_frs = @{ type = "bool" }
+ full = @{ type = "bool"; default = $false }
+ compress = @{ type = "bool" }
+ integrity_streams = @{ type = "bool" }
+ force = @{ type = "bool"; default = $false }
+ }
+ mutually_exclusive = @(
+ ,@('drive_letter', 'path', 'label')
+ )
+ required_one_of = @(
+ ,@('drive_letter', 'path', 'label')
+ )
+ supports_check_mode = $true
+}
+
+$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec)
+
+$drive_letter = $module.Params.drive_letter
+$path = $module.Params.path
+$label = $module.Params.label
+$new_label = $module.Params.new_label
+$file_system = $module.Params.file_system
+$allocation_unit_size = $module.Params.allocation_unit_size
+$large_frs = $module.Params.large_frs
+$full_format = $module.Params.full
+$compress_volume = $module.Params.compress
+$integrity_streams = $module.Params.integrity_streams
+$force_format = $module.Params.force
+
+# Some pre-checks
+if ($null -ne $drive_letter -and $drive_letter -notmatch "^[a-zA-Z]$") {
+ $module.FailJson("The parameter drive_letter should be a single character A-Z")
+}
+if ($integrity_streams -eq $true -and $file_system -ne "refs") {
+ $module.FailJson("Integrity streams can be enabled only on ReFS volumes. You specified: $($file_system)")
+}
+if ($compress_volume -eq $true) {
+ if ($file_system -eq "ntfs") {
+ if ($null -ne $allocation_unit_size -and $allocation_unit_size -gt 4096) {
+ $module.FailJson("NTFS compression is not supported for allocation unit sizes above 4096")
+ }
+ }
+ else {
+ $module.FailJson("Compression can be enabled only on NTFS volumes. You specified: $($file_system)")
+ }
+}
+
+function Get-AnsibleVolume {
+ param(
+ $DriveLetter,
+ $Path,
+ $Label
+ )
+
+ if ($null -ne $DriveLetter) {
+ try {
+ $volume = Get-Volume -DriveLetter $DriveLetter
+ } catch {
+ $module.FailJson("There was an error retrieving the volume using drive_letter $($DriveLetter): $($_.Exception.Message)", $_)
+ }
+ }
+ elseif ($null -ne $Path) {
+ try {
+ $volume = Get-Volume -Path $Path
+ } catch {
+ $module.FailJson("There was an error retrieving the volume using path $($Path): $($_.Exception.Message)", $_)
+ }
+ }
+ elseif ($null -ne $Label) {
+ try {
+ $volume = Get-Volume -FileSystemLabel $Label
+ } catch {
+ $module.FailJson("There was an error retrieving the volume using label $($Label): $($_.Exception.Message)", $_)
+ }
+ }
+ else {
+ $module.FailJson("Unable to locate volume: drive_letter, path and label were not specified")
+ }
+
+ return $volume
+}
+
+function Format-AnsibleVolume {
+ param(
+ $Path,
+ $Label,
+ $FileSystem,
+ $Full,
+ $UseLargeFRS,
+ $Compress,
+ $SetIntegrityStreams,
+ $AllocationUnitSize
+ )
+ $parameters = @{
+ Path = $Path
+ Full = $Full
+ }
+ if ($null -ne $UseLargeFRS) {
+ $parameters.Add("UseLargeFRS", $UseLargeFRS)
+ }
+ if ($null -ne $SetIntegrityStreams) {
+ $parameters.Add("SetIntegrityStreams", $SetIntegrityStreams)
+ }
+ if ($null -ne $Compress){
+ $parameters.Add("Compress", $Compress)
+ }
+ if ($null -ne $Label) {
+ $parameters.Add("NewFileSystemLabel", $Label)
+ }
+ if ($null -ne $FileSystem) {
+ $parameters.Add("FileSystem", $FileSystem)
+ }
+ if ($null -ne $AllocationUnitSize) {
+ $parameters.Add("AllocationUnitSize", $AllocationUnitSize)
+ }
+
+ Format-Volume @parameters -Confirm:$false | Out-Null
+
+}
+
+$ansible_volume = Get-AnsibleVolume -DriveLetter $drive_letter -Path $path -Label $label
+$ansible_file_system = $ansible_volume.FileSystem
+$ansible_volume_size = $ansible_volume.Size
+$ansible_volume_alu = (Get-CimInstance -ClassName Win32_Volume -Filter "DeviceId = '$($ansible_volume.path.replace('\','\\'))'" -Property BlockSize).BlockSize
+
+$ansible_partition = Get-Partition -Volume $ansible_volume
+
+if (-not $force_format -and $null -ne $allocation_unit_size -and $ansible_volume_alu -ne 0 -and $null -ne $ansible_volume_alu -and $allocation_unit_size -ne $ansible_volume_alu) {
+ $module.FailJson("Force format must be specified since target allocation unit size: $($allocation_unit_size) is different from the current allocation unit size of the volume: $($ansible_volume_alu)")
+}
+
+foreach ($access_path in $ansible_partition.AccessPaths) {
+ if ($access_path -ne $Path) {
+ if ($null -ne $file_system -and
+ -not [string]::IsNullOrEmpty($ansible_file_system) -and
+ $file_system -ne $ansible_file_system)
+ {
+ if (-not $force_format)
+ {
+ $no_files_in_volume = (Get-ChildItem -LiteralPath $access_path -ErrorAction SilentlyContinue | Measure-Object).Count -eq 0
+ if($no_files_in_volume)
+ {
+ $module.FailJson("Force format must be specified since target file system: $($file_system) is different from the current file system of the volume: $($ansible_file_system.ToLower())")
+ }
+ else
+ {
+ $module.FailJson("Force format must be specified to format non-pristine volumes")
+ }
+ }
+ }
+ else
+ {
+ $pristine = -not $force_format
+ }
+ }
+}
+
+if ($force_format) {
+ if (-not $module.CheckMode) {
+ Format-AnsibleVolume -Path $ansible_volume.Path -Full $full_format -Label $new_label -FileSystem $file_system -SetIntegrityStreams $integrity_streams -UseLargeFRS $large_frs -Compress $compress_volume -AllocationUnitSize $allocation_unit_size
+ }
+ $module.Result.changed = $true
+}
+else {
+ if ($pristine) {
+ if ($null -eq $new_label) {
+ $new_label = $ansible_volume.FileSystemLabel
+ }
+ # Conditions for formatting
+ if ($ansible_volume_size -eq 0 -or
+ $ansible_volume.FileSystemLabel -ne $new_label) {
+ if (-not $module.CheckMode) {
+ Format-AnsibleVolume -Path $ansible_volume.Path -Full $full_format -Label $new_label -FileSystem $file_system -SetIntegrityStreams $integrity_streams -UseLargeFRS $large_frs -Compress $compress_volume -AllocationUnitSize $allocation_unit_size
+ }
+ $module.Result.changed = $true
+ }
+ }
+}
+
+$module.ExitJson()
diff --git a/test/support/windows-integration/plugins/modules/win_format.py b/test/support/windows-integration/plugins/modules/win_format.py
new file mode 100644
index 00000000..f8f18ed7
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_format.py
@@ -0,0 +1,103 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Varun Chopra (@chopraaa) <v@chopraaa.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = r'''
+module: win_format
+version_added: '2.8'
+short_description: Formats an existing volume or a new volume on an existing partition on Windows
+description:
+ - The M(win_format) module formats an existing volume or a new volume on an existing partition on Windows
+options:
+ drive_letter:
+ description:
+ - Used to specify the drive letter of the volume to be formatted.
+ type: str
+ path:
+ description:
+ - Used to specify the path to the volume to be formatted.
+ type: str
+ label:
+ description:
+ - Used to specify the label of the volume to be formatted.
+ type: str
+ new_label:
+ description:
+ - Used to specify the new file system label of the formatted volume.
+ type: str
+ file_system:
+ description:
+ - Used to specify the file system to be used when formatting the target volume.
+ type: str
+ choices: [ ntfs, refs, exfat, fat32, fat ]
+ allocation_unit_size:
+ description:
+ - Specifies the cluster size to use when formatting the volume.
+ - If no cluster size is specified when you format a partition, defaults are selected based on
+ the size of the partition.
+ - This value must be a multiple of the physical sector size of the disk.
+ type: int
+ large_frs:
+ description:
+ - Specifies that large File Record System (FRS) should be used.
+ type: bool
+ compress:
+ description:
+ - Enable compression on the resulting NTFS volume.
+ - NTFS compression is not supported where I(allocation_unit_size) is more than 4096.
+ type: bool
+ integrity_streams:
+ description:
+ - Enable integrity streams on the resulting ReFS volume.
+ type: bool
+ full:
+ description:
+ - A full format writes to every sector of the disk, takes much longer to perform than the
+ default (quick) format, and is not recommended on storage that is thinly provisioned.
+ - Specify C(true) for full format.
+ type: bool
+ force:
+ description:
+ - Specify if formatting should be forced for volumes that are not created from new partitions
+ or if the source and target file system are different.
+ type: bool
+notes:
+ - Microsoft Windows Server 2012 or Microsoft Windows 8 or newer is required to use this module. To check if your system is compatible, see
+ U(https://docs.microsoft.com/en-us/windows/desktop/sysinfo/operating-system-version).
+ - One of three parameters (I(drive_letter), I(path) and I(label)) are mandatory to identify the target
+ volume but more than one cannot be specified at the same time.
+ - This module is idempotent if I(force) is not specified and file system labels remain preserved.
+ - For more information, see U(https://docs.microsoft.com/en-us/previous-versions/windows/desktop/stormgmt/format-msft-volume)
+seealso:
+ - module: win_disk_facts
+ - module: win_partition
+author:
+ - Varun Chopra (@chopraaa) <v@chopraaa.com>
+'''
+
+EXAMPLES = r'''
+- name: Create a partition with drive letter D and size 5 GiB
+ win_partition:
+ drive_letter: D
+ partition_size: 5 GiB
+ disk_number: 1
+
+- name: Full format the newly created partition as NTFS and label it
+ win_format:
+ drive_letter: D
+ file_system: NTFS
+ new_label: Formatted
+ full: True
+'''
+
+RETURN = r'''
+#
+'''
diff --git a/test/support/windows-integration/plugins/modules/win_get_url.ps1 b/test/support/windows-integration/plugins/modules/win_get_url.ps1
new file mode 100644
index 00000000..1d8dd5a3
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_get_url.ps1
@@ -0,0 +1,274 @@
+#!powershell
+
+# Copyright: (c) 2015, Paul Durivage <paul.durivage@rackspace.com>
+# Copyright: (c) 2015, Tal Auslander <tal@cloudshare.com>
+# Copyright: (c) 2017, Dag Wieers <dag@wieers.com>
+# Copyright: (c) 2019, Viktor Utkin <viktor_utkin@epam.com>
+# Copyright: (c) 2019, Uladzimir Klybik <uladzimir_klybik@epam.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#AnsibleRequires -CSharpUtil Ansible.Basic
+#Requires -Module Ansible.ModuleUtils.FileUtil
+#Requires -Module Ansible.ModuleUtils.WebRequest
+
+$spec = @{
+ options = @{
+ url = @{ type="str"; required=$true }
+ dest = @{ type='path'; required=$true }
+ force = @{ type='bool'; default=$true }
+ checksum = @{ type='str' }
+ checksum_algorithm = @{ type='str'; default='sha1'; choices = @("md5", "sha1", "sha256", "sha384", "sha512") }
+ checksum_url = @{ type='str' }
+
+ # Defined for the alias backwards compatibility, remove once aliases are removed
+ url_username = @{
+ aliases = @("user", "username")
+ deprecated_aliases = @(
+ @{ name = "user"; version = "2.14" },
+ @{ name = "username"; version = "2.14" }
+ )
+ }
+ url_password = @{
+ aliases = @("password")
+ deprecated_aliases = @(
+ @{ name = "password"; version = "2.14" }
+ )
+ }
+ }
+ mutually_exclusive = @(
+ ,@('checksum', 'checksum_url')
+ )
+ supports_check_mode = $true
+}
+$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec, @(Get-AnsibleWebRequestSpec))
+
+$url = $module.Params.url
+$dest = $module.Params.dest
+$force = $module.Params.force
+$checksum = $module.Params.checksum
+$checksum_algorithm = $module.Params.checksum_algorithm
+$checksum_url = $module.Params.checksum_url
+
+$module.Result.elapsed = 0
+$module.Result.url = $url
+
+Function Get-ChecksumFromUri {
+ param(
+ [Parameter(Mandatory=$true)][Ansible.Basic.AnsibleModule]$Module,
+ [Parameter(Mandatory=$true)][Uri]$Uri,
+ [Uri]$SourceUri
+ )
+
+ $script = {
+ param($Response, $Stream)
+
+ $read_stream = New-Object -TypeName System.IO.StreamReader -ArgumentList $Stream
+ $web_checksum = $read_stream.ReadToEnd()
+ $basename = (Split-Path -Path $SourceUri.LocalPath -Leaf)
+ $basename = [regex]::Escape($basename)
+ $web_checksum_str = $web_checksum -split '\r?\n' | Select-String -Pattern $("\s+\.?\/?\\?" + $basename + "\s*$")
+ if (-not $web_checksum_str) {
+ $Module.FailJson("Checksum record not found for file name '$basename' in file from url: '$Uri'")
+ }
+
+ $web_checksum_str_splitted = $web_checksum_str[0].ToString().split(" ", 2)
+ $hash_from_file = $web_checksum_str_splitted[0].Trim()
+ # Remove any non-alphanumeric characters
+ $hash_from_file = $hash_from_file -replace '\W+', ''
+
+ Write-Output -InputObject $hash_from_file
+ }
+ $web_request = Get-AnsibleWebRequest -Uri $Uri -Module $Module
+
+ try {
+ Invoke-WithWebRequest -Module $Module -Request $web_request -Script $script
+ } catch {
+ $Module.FailJson("Error when getting the remote checksum from '$Uri'. $($_.Exception.Message)", $_)
+ }
+}
+
+Function Compare-ModifiedFile {
+ <#
+ .SYNOPSIS
+ Compares the remote URI resource against the local Dest resource. Will
+ return true if the LastWriteTime/LastModificationDate of the remote is
+ newer than the local resource date.
+ #>
+ param(
+ [Parameter(Mandatory=$true)][Ansible.Basic.AnsibleModule]$Module,
+ [Parameter(Mandatory=$true)][Uri]$Uri,
+ [Parameter(Mandatory=$true)][String]$Dest
+ )
+
+ $dest_last_mod = (Get-AnsibleItem -Path $Dest).LastWriteTimeUtc
+
+ # If the URI is a file we don't need to go through the whole WebRequest
+ if ($Uri.IsFile) {
+ $src_last_mod = (Get-AnsibleItem -Path $Uri.AbsolutePath).LastWriteTimeUtc
+ } else {
+ $web_request = Get-AnsibleWebRequest -Uri $Uri -Module $Module
+ $web_request.Method = switch ($web_request.GetType().Name) {
+ FtpWebRequest { [System.Net.WebRequestMethods+Ftp]::GetDateTimestamp }
+ HttpWebRequest { [System.Net.WebRequestMethods+Http]::Head }
+ }
+ $script = { param($Response, $Stream); $Response.LastModified }
+
+ try {
+ $src_last_mod = Invoke-WithWebRequest -Module $Module -Request $web_request -Script $script
+ } catch {
+ $Module.FailJson("Error when requesting 'Last-Modified' date from '$Uri'. $($_.Exception.Message)", $_)
+ }
+ }
+
+ # Return $true if the Uri LastModification date is newer than the Dest LastModification date
+ ((Get-Date -Date $src_last_mod).ToUniversalTime() -gt $dest_last_mod)
+}
+
+Function Get-Checksum {
+ param(
+ [Parameter(Mandatory=$true)][String]$Path,
+ [String]$Algorithm = "sha1"
+ )
+
+ switch ($Algorithm) {
+ 'md5' { $sp = New-Object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider }
+ 'sha1' { $sp = New-Object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider }
+ 'sha256' { $sp = New-Object -TypeName System.Security.Cryptography.SHA256CryptoServiceProvider }
+ 'sha384' { $sp = New-Object -TypeName System.Security.Cryptography.SHA384CryptoServiceProvider }
+ 'sha512' { $sp = New-Object -TypeName System.Security.Cryptography.SHA512CryptoServiceProvider }
+ }
+
+ $fs = [System.IO.File]::Open($Path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read,
+ [System.IO.FileShare]::ReadWrite)
+ try {
+ $hash = [System.BitConverter]::ToString($sp.ComputeHash($fs)).Replace("-", "").ToLower()
+ } finally {
+ $fs.Dispose()
+ }
+ return $hash
+}
+
+Function Invoke-DownloadFile {
+ param(
+ [Parameter(Mandatory=$true)][Ansible.Basic.AnsibleModule]$Module,
+ [Parameter(Mandatory=$true)][Uri]$Uri,
+ [Parameter(Mandatory=$true)][String]$Dest,
+ [String]$Checksum,
+ [String]$ChecksumAlgorithm
+ )
+
+ # Check $dest parent folder exists before attempting download, which avoids unhelpful generic error message.
+ $dest_parent = Split-Path -LiteralPath $Dest
+ if (-not (Test-Path -LiteralPath $dest_parent -PathType Container)) {
+ $module.FailJson("The path '$dest_parent' does not exist for destination '$Dest', or is not visible to the current user. Ensure download destination folder exists (perhaps using win_file state=directory) before win_get_url runs.")
+ }
+
+ $download_script = {
+ param($Response, $Stream)
+
+ # Download the file to a temporary directory so we can compare it
+ $tmp_dest = Join-Path -Path $Module.Tmpdir -ChildPath ([System.IO.Path]::GetRandomFileName())
+ $fs = [System.IO.File]::Create($tmp_dest)
+ try {
+ $Stream.CopyTo($fs)
+ $fs.Flush()
+ } finally {
+ $fs.Dispose()
+ }
+ $tmp_checksum = Get-Checksum -Path $tmp_dest -Algorithm $ChecksumAlgorithm
+ $Module.Result.checksum_src = $tmp_checksum
+
+ # If the checksum has been set, verify the checksum of the remote against the input checksum.
+ if ($Checksum -and $Checksum -ne $tmp_checksum) {
+ $Module.FailJson(("The checksum for {0} did not match '{1}', it was '{2}'" -f $Uri, $Checksum, $tmp_checksum))
+ }
+
+ $download = $true
+ if (Test-Path -LiteralPath $Dest) {
+ # Validate the remote checksum against the existing downloaded file
+ $dest_checksum = Get-Checksum -Path $Dest -Algorithm $ChecksumAlgorithm
+
+ # If we don't need to download anything, save the dest checksum so we don't waste time calculating it
+ # again at the end of the script
+ if ($dest_checksum -eq $tmp_checksum) {
+ $download = $false
+ $Module.Result.checksum_dest = $dest_checksum
+ $Module.Result.size = (Get-AnsibleItem -Path $Dest).Length
+ }
+ }
+
+ if ($download) {
+ Copy-Item -LiteralPath $tmp_dest -Destination $Dest -Force -WhatIf:$Module.CheckMode > $null
+ $Module.Result.changed = $true
+ }
+ }
+ $web_request = Get-AnsibleWebRequest -Uri $Uri -Module $Module
+
+ try {
+ Invoke-WithWebRequest -Module $Module -Request $web_request -Script $download_script
+ } catch {
+ $Module.FailJson("Error downloading '$Uri' to '$Dest': $($_.Exception.Message)", $_)
+ }
+}
+
+# Use last part of url for dest file name if a directory is supplied for $dest
+if (Test-Path -LiteralPath $dest -PathType Container) {
+ $uri = [System.Uri]$url
+ $basename = Split-Path -Path $uri.LocalPath -Leaf
+ if ($uri.LocalPath -and $uri.LocalPath -ne '/' -and $basename) {
+ $url_basename = Split-Path -Path $uri.LocalPath -Leaf
+ $dest = Join-Path -Path $dest -ChildPath $url_basename
+ } else {
+ $dest = Join-Path -Path $dest -ChildPath $uri.Host
+ }
+
+ # Ensure we have a string instead of a PS object to avoid serialization issues
+ $dest = $dest.ToString()
+} elseif (([System.IO.Path]::GetFileName($dest)) -eq '') {
+ # We have a trailing path separator
+ $module.FailJson("The destination path '$dest' does not exist, or is not visible to the current user. Ensure download destination folder exists (perhaps using win_file state=directory) before win_get_url runs.")
+}
+
+$module.Result.dest = $dest
+
+if ($checksum) {
+ $checksum = $checksum.Trim().ToLower()
+}
+if ($checksum_algorithm) {
+ $checksum_algorithm = $checksum_algorithm.Trim().ToLower()
+}
+if ($checksum_url) {
+ $checksum_url = $checksum_url.Trim()
+}
+
+# Check for case $checksum variable contain url. If yes, get file data from url and replace original value in $checksum
+if ($checksum_url) {
+ $checksum_uri = [System.Uri]$checksum_url
+ if ($checksum_uri.Scheme -notin @("file", "ftp", "http", "https")) {
+ $module.FailJson("Unsupported 'checksum_url' value for '$dest': '$checksum_url'")
+ }
+
+ $checksum = Get-ChecksumFromUri -Module $Module -Uri $checksum_uri -SourceUri $url
+}
+
+if ($force -or -not (Test-Path -LiteralPath $dest)) {
+ # force=yes or dest does not exist, download the file
+ # Note: Invoke-DownloadFile will compare the checksums internally if dest exists
+ Invoke-DownloadFile -Module $module -Uri $url -Dest $dest -Checksum $checksum `
+ -ChecksumAlgorithm $checksum_algorithm
+} else {
+ # force=no, we want to check the last modified dates and only download if they don't match
+ $is_modified = Compare-ModifiedFile -Module $module -Uri $url -Dest $dest
+ if ($is_modified) {
+ Invoke-DownloadFile -Module $module -Uri $url -Dest $dest -Checksum $checksum `
+ -ChecksumAlgorithm $checksum_algorithm
+ }
+}
+
+if ((-not $module.Result.ContainsKey("checksum_dest")) -and (Test-Path -LiteralPath $dest)) {
+ # Calculate the dest file checksum if it hasn't already been done
+ $module.Result.checksum_dest = Get-Checksum -Path $dest -Algorithm $checksum_algorithm
+ $module.Result.size = (Get-AnsibleItem -Path $dest).Length
+}
+
+$module.ExitJson()
diff --git a/test/support/windows-integration/plugins/modules/win_get_url.py b/test/support/windows-integration/plugins/modules/win_get_url.py
new file mode 100644
index 00000000..ef5b5f97
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_get_url.py
@@ -0,0 +1,215 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Paul Durivage <paul.durivage@rackspace.com>, and others
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# This is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = r'''
+---
+module: win_get_url
+version_added: "1.7"
+short_description: Downloads file from HTTP, HTTPS, or FTP to node
+description:
+- Downloads files from HTTP, HTTPS, or FTP to the remote server.
+- The remote server I(must) have direct access to the remote resource.
+- For non-Windows targets, use the M(get_url) module instead.
+options:
+ url:
+ description:
+ - The full URL of a file to download.
+ type: str
+ required: yes
+ dest:
+ description:
+ - The location to save the file at the URL.
+ - Be sure to include a filename and extension as appropriate.
+ type: path
+ required: yes
+ force:
+ description:
+ - If C(yes), will download the file every time and replace the file if the contents change. If C(no), will only
+ download the file if it does not exist or the remote file has been
+ modified more recently than the local file.
+ - This works by sending an http HEAD request to retrieve last modified
+ time of the requested resource, so for this to work, the remote web
+ server must support HEAD requests.
+ type: bool
+ default: yes
+ version_added: "2.0"
+ checksum:
+ description:
+ - If a I(checksum) is passed to this parameter, the digest of the
+ destination file will be calculated after it is downloaded to ensure
+ its integrity and verify that the transfer completed successfully.
+ - This option cannot be set with I(checksum_url).
+ type: str
+ version_added: "2.8"
+ checksum_algorithm:
+ description:
+ - Specifies the hashing algorithm used when calculating the checksum of
+ the remote and destination file.
+ type: str
+ choices:
+ - md5
+ - sha1
+ - sha256
+ - sha384
+ - sha512
+ default: sha1
+ version_added: "2.8"
+ checksum_url:
+ description:
+ - Specifies a URL that contains the checksum values for the resource at
+ I(url).
+ - Like C(checksum), this is used to verify the integrity of the remote
+ transfer.
+ - This option cannot be set with I(checksum).
+ type: str
+ version_added: "2.8"
+ url_username:
+ description:
+ - The username to use for authentication.
+ - The aliases I(user) and I(username) are deprecated and will be removed in
+ Ansible 2.14.
+ aliases:
+ - user
+ - username
+ url_password:
+ description:
+ - The password for I(url_username).
+ - The alias I(password) is deprecated and will be removed in Ansible 2.14.
+ aliases:
+ - password
+ proxy_url:
+ version_added: "2.0"
+ proxy_username:
+ version_added: "2.0"
+ proxy_password:
+ version_added: "2.0"
+ headers:
+ version_added: "2.4"
+ use_proxy:
+ version_added: "2.4"
+ follow_redirects:
+ version_added: "2.9"
+ maximum_redirection:
+ version_added: "2.9"
+ client_cert:
+ version_added: "2.9"
+ client_cert_password:
+ version_added: "2.9"
+ method:
+ description:
+ - This option is not for use with C(win_get_url) and should be ignored.
+ version_added: "2.9"
+notes:
+- If your URL includes an escaped slash character (%2F) this module will convert it to a real slash.
+ This is a result of the behaviour of the System.Uri class as described in
+ L(the documentation,https://docs.microsoft.com/en-us/dotnet/framework/configure-apps/file-schema/network/schemesettings-element-uri-settings#remarks).
+- Since Ansible 2.8, the module will skip reporting a change if the remote
+ checksum is the same as the local local even when C(force=yes). This is to
+ better align with M(get_url).
+extends_documentation_fragment:
+- url_windows
+seealso:
+- module: get_url
+- module: uri
+- module: win_uri
+author:
+- Paul Durivage (@angstwad)
+- Takeshi Kuramochi (@tksarah)
+'''
+
+EXAMPLES = r'''
+- name: Download earthrise.jpg to specified path
+ win_get_url:
+ url: http://www.example.com/earthrise.jpg
+ dest: C:\Users\RandomUser\earthrise.jpg
+
+- name: Download earthrise.jpg to specified path only if modified
+ win_get_url:
+ url: http://www.example.com/earthrise.jpg
+ dest: C:\Users\RandomUser\earthrise.jpg
+ force: no
+
+- name: Download earthrise.jpg to specified path through a proxy server.
+ win_get_url:
+ url: http://www.example.com/earthrise.jpg
+ dest: C:\Users\RandomUser\earthrise.jpg
+ proxy_url: http://10.0.0.1:8080
+ proxy_username: username
+ proxy_password: password
+
+- name: Download file from FTP with authentication
+ win_get_url:
+ url: ftp://server/file.txt
+ dest: '%TEMP%\ftp-file.txt'
+ url_username: ftp-user
+ url_password: ftp-password
+
+- name: Download src with sha256 checksum url
+ win_get_url:
+ url: http://www.example.com/earthrise.jpg
+ dest: C:\temp\earthrise.jpg
+ checksum_url: http://www.example.com/sha256sum.txt
+ checksum_algorithm: sha256
+ force: True
+
+- name: Download src with sha256 checksum url
+ win_get_url:
+ url: http://www.example.com/earthrise.jpg
+ dest: C:\temp\earthrise.jpg
+ checksum: a97e6837f60cec6da4491bab387296bbcd72bdba
+ checksum_algorithm: sha1
+ force: True
+'''
+
+RETURN = r'''
+dest:
+ description: destination file/path
+ returned: always
+ type: str
+ sample: C:\Users\RandomUser\earthrise.jpg
+checksum_dest:
+ description: <algorithm> checksum of the file after the download
+ returned: success and dest has been downloaded
+ type: str
+ sample: 6e642bb8dd5c2e027bf21dd923337cbb4214f827
+checksum_src:
+ description: <algorithm> checksum of the remote resource
+ returned: force=yes or dest did not exist
+ type: str
+ sample: 6e642bb8dd5c2e027bf21dd923337cbb4214f827
+elapsed:
+ description: The elapsed seconds between the start of poll and the end of the module.
+ returned: always
+ type: float
+ sample: 2.1406487
+size:
+ description: size of the dest file
+ returned: success
+ type: int
+ sample: 1220
+url:
+ description: requested url
+ returned: always
+ type: str
+ sample: http://www.example.com/earthrise.jpg
+msg:
+ description: Error message, or HTTP status message from web-server
+ returned: always
+ type: str
+ sample: OK
+status_code:
+ description: HTTP status code
+ returned: always
+ type: int
+ sample: 200
+'''
diff --git a/test/support/windows-integration/plugins/modules/win_lineinfile.ps1 b/test/support/windows-integration/plugins/modules/win_lineinfile.ps1
new file mode 100644
index 00000000..38dd8b8b
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_lineinfile.ps1
@@ -0,0 +1,450 @@
+#!powershell
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+#Requires -Module Ansible.ModuleUtils.Backup
+
+function WriteLines($outlines, $path, $linesep, $encodingobj, $validate, $check_mode) {
+ Try {
+ $temppath = [System.IO.Path]::GetTempFileName();
+ }
+ Catch {
+ Fail-Json @{} "Cannot create temporary file! ($($_.Exception.Message))";
+ }
+ $joined = $outlines -join $linesep;
+ [System.IO.File]::WriteAllText($temppath, $joined, $encodingobj);
+
+ If ($validate) {
+
+ If (-not ($validate -like "*%s*")) {
+ Fail-Json @{} "validate must contain %s: $validate";
+ }
+
+ $validate = $validate.Replace("%s", $temppath);
+
+ $parts = [System.Collections.ArrayList] $validate.Split(" ");
+ $cmdname = $parts[0];
+
+ $cmdargs = $validate.Substring($cmdname.Length + 1);
+
+ $process = [Diagnostics.Process]::Start($cmdname, $cmdargs);
+ $process.WaitForExit();
+
+ If ($process.ExitCode -ne 0) {
+ [string] $output = $process.StandardOutput.ReadToEnd();
+ [string] $error = $process.StandardError.ReadToEnd();
+ Remove-Item $temppath -force;
+ Fail-Json @{} "failed to validate $cmdname $cmdargs with error: $output $error";
+ }
+
+ }
+
+ # Commit changes to the path
+ $cleanpath = $path.Replace("/", "\");
+ Try {
+ Copy-Item -Path $temppath -Destination $cleanpath -Force -WhatIf:$check_mode;
+ }
+ Catch {
+ Fail-Json @{} "Cannot write to: $cleanpath ($($_.Exception.Message))";
+ }
+
+ Try {
+ Remove-Item -Path $temppath -Force -WhatIf:$check_mode;
+ }
+ Catch {
+ Fail-Json @{} "Cannot remove temporary file: $temppath ($($_.Exception.Message))";
+ }
+
+ return $joined;
+
+}
+
+
+# Implement the functionality for state == 'present'
+function Present($path, $regex, $line, $insertafter, $insertbefore, $create, $backup, $backrefs, $validate, $encodingobj, $linesep, $check_mode, $diff_support) {
+
+ # Note that we have to clean up the path because ansible wants to treat / and \ as
+ # interchangeable in windows pathnames, but .NET framework internals do not support that.
+ $cleanpath = $path.Replace("/", "\");
+
+ # Check if path exists. If it does not exist, either create it if create == "yes"
+ # was specified or fail with a reasonable error message.
+ If (-not (Test-Path -LiteralPath $path)) {
+ If (-not $create) {
+ Fail-Json @{} "Path $path does not exist !";
+ }
+ # Create new empty file, using the specified encoding to write correct BOM
+ [System.IO.File]::WriteAllLines($cleanpath, "", $encodingobj);
+ }
+
+ # Initialize result information
+ $result = @{
+ backup = "";
+ changed = $false;
+ msg = "";
+ }
+
+ # Read the dest file lines using the indicated encoding into a mutable ArrayList.
+ $before = [System.IO.File]::ReadAllLines($cleanpath, $encodingobj)
+ If ($null -eq $before) {
+ $lines = New-Object System.Collections.ArrayList;
+ }
+ Else {
+ $lines = [System.Collections.ArrayList] $before;
+ }
+
+ if ($diff_support) {
+ $result.diff = @{
+ before = $before -join $linesep;
+ }
+ }
+
+ # Compile the regex specified, if provided
+ $mre = $null;
+ If ($regex) {
+ $mre = New-Object Regex $regex, 'Compiled';
+ }
+
+ # Compile the regex for insertafter or insertbefore, if provided
+ $insre = $null;
+ If ($insertafter -and $insertafter -ne "BOF" -and $insertafter -ne "EOF") {
+ $insre = New-Object Regex $insertafter, 'Compiled';
+ }
+ ElseIf ($insertbefore -and $insertbefore -ne "BOF") {
+ $insre = New-Object Regex $insertbefore, 'Compiled';
+ }
+
+ # index[0] is the line num where regex has been found
+ # index[1] is the line num where insertafter/insertbefore has been found
+ $index = -1, -1;
+ $lineno = 0;
+
+ # The latest match object and matched line
+ $matched_line = "";
+
+ # Iterate through the lines in the file looking for matches
+ Foreach ($cur_line in $lines) {
+ If ($regex) {
+ $m = $mre.Match($cur_line);
+ $match_found = $m.Success;
+ If ($match_found) {
+ $matched_line = $cur_line;
+ }
+ }
+ Else {
+ $match_found = $line -ceq $cur_line;
+ }
+ If ($match_found) {
+ $index[0] = $lineno;
+ }
+ ElseIf ($insre -and $insre.Match($cur_line).Success) {
+ If ($insertafter) {
+ $index[1] = $lineno + 1;
+ }
+ If ($insertbefore) {
+ $index[1] = $lineno;
+ }
+ }
+ $lineno = $lineno + 1;
+ }
+
+ If ($index[0] -ne -1) {
+ If ($backrefs) {
+ $new_line = [regex]::Replace($matched_line, $regex, $line);
+ }
+ Else {
+ $new_line = $line;
+ }
+ If ($lines[$index[0]] -cne $new_line) {
+ $lines[$index[0]] = $new_line;
+ $result.changed = $true;
+ $result.msg = "line replaced";
+ }
+ }
+ ElseIf ($backrefs) {
+ # No matches - no-op
+ }
+ ElseIf ($insertbefore -eq "BOF" -or $insertafter -eq "BOF") {
+ $lines.Insert(0, $line);
+ $result.changed = $true;
+ $result.msg = "line added";
+ }
+ ElseIf ($insertafter -eq "EOF" -or $index[1] -eq -1) {
+ $lines.Add($line) > $null;
+ $result.changed = $true;
+ $result.msg = "line added";
+ }
+ Else {
+ $lines.Insert($index[1], $line);
+ $result.changed = $true;
+ $result.msg = "line added";
+ }
+
+ # Write changes to the path if changes were made
+ If ($result.changed) {
+
+ # Write backup file if backup == "yes"
+ If ($backup) {
+ $result.backup_file = Backup-File -path $path -WhatIf:$check_mode
+ # Ensure backward compatibility (deprecate in future)
+ $result.backup = $result.backup_file
+ }
+
+ $writelines_params = @{
+ outlines = $lines
+ path = $path
+ linesep = $linesep
+ encodingobj = $encodingobj
+ validate = $validate
+ check_mode = $check_mode
+ }
+ $after = WriteLines @writelines_params;
+
+ if ($diff_support) {
+ $result.diff.after = $after;
+ }
+ }
+
+ $result.encoding = $encodingobj.WebName;
+
+ Exit-Json $result;
+}
+
+
+# Implement the functionality for state == 'absent'
+function Absent($path, $regex, $line, $backup, $validate, $encodingobj, $linesep, $check_mode, $diff_support) {
+
+ # Check if path exists. If it does not exist, fail with a reasonable error message.
+ If (-not (Test-Path -LiteralPath $path)) {
+ Fail-Json @{} "Path $path does not exist !";
+ }
+
+ # Initialize result information
+ $result = @{
+ backup = "";
+ changed = $false;
+ msg = "";
+ }
+
+ # Read the dest file lines using the indicated encoding into a mutable ArrayList. Note
+ # that we have to clean up the path because ansible wants to treat / and \ as
+ # interchangeable in windows pathnames, but .NET framework internals do not support that.
+ $cleanpath = $path.Replace("/", "\");
+ $before = [System.IO.File]::ReadAllLines($cleanpath, $encodingobj);
+ If ($null -eq $before) {
+ $lines = New-Object System.Collections.ArrayList;
+ }
+ Else {
+ $lines = [System.Collections.ArrayList] $before;
+ }
+
+ if ($diff_support) {
+ $result.diff = @{
+ before = $before -join $linesep;
+ }
+ }
+
+ # Compile the regex specified, if provided
+ $cre = $null;
+ If ($regex) {
+ $cre = New-Object Regex $regex, 'Compiled';
+ }
+
+ $found = New-Object System.Collections.ArrayList;
+ $left = New-Object System.Collections.ArrayList;
+
+ Foreach ($cur_line in $lines) {
+ If ($regex) {
+ $m = $cre.Match($cur_line);
+ $match_found = $m.Success;
+ }
+ Else {
+ $match_found = $line -ceq $cur_line;
+ }
+ If ($match_found) {
+ $found.Add($cur_line) > $null;
+ $result.changed = $true;
+ }
+ Else {
+ $left.Add($cur_line) > $null;
+ }
+ }
+
+ # Write changes to the path if changes were made
+ If ($result.changed) {
+
+ # Write backup file if backup == "yes"
+ If ($backup) {
+ $result.backup_file = Backup-File -path $path -WhatIf:$check_mode
+ # Ensure backward compatibility (deprecate in future)
+ $result.backup = $result.backup_file
+ }
+
+ $writelines_params = @{
+ outlines = $left
+ path = $path
+ linesep = $linesep
+ encodingobj = $encodingobj
+ validate = $validate
+ check_mode = $check_mode
+ }
+ $after = WriteLines @writelines_params;
+
+ if ($diff_support) {
+ $result.diff.after = $after;
+ }
+ }
+
+ $result.encoding = $encodingobj.WebName;
+ $result.found = $found.Count;
+ $result.msg = "$($found.Count) line(s) removed";
+
+ Exit-Json $result;
+}
+
+
+# Parse the parameters file dropped by the Ansible machinery
+$params = Parse-Args $args -supports_check_mode $true;
+$check_mode = Get-AnsibleParam -obj $params -name "_ansible_check_mode" -type "bool" -default $false;
+$diff_support = Get-AnsibleParam -obj $params -name "_ansible_diff" -type "bool" -default $false;
+
+# Initialize defaults for input parameters.
+$path = Get-AnsibleParam -obj $params -name "path" -type "path" -failifempty $true -aliases "dest","destfile","name";
+$regex = Get-AnsibleParam -obj $params -name "regex" -type "str" -aliases "regexp";
+$state = Get-AnsibleParam -obj $params -name "state" -type "str" -default "present" -validateset "present","absent";
+$line = Get-AnsibleParam -obj $params -name "line" -type "str";
+$backrefs = Get-AnsibleParam -obj $params -name "backrefs" -type "bool" -default $false;
+$insertafter = Get-AnsibleParam -obj $params -name "insertafter" -type "str";
+$insertbefore = Get-AnsibleParam -obj $params -name "insertbefore" -type "str";
+$create = Get-AnsibleParam -obj $params -name "create" -type "bool" -default $false;
+$backup = Get-AnsibleParam -obj $params -name "backup" -type "bool" -default $false;
+$validate = Get-AnsibleParam -obj $params -name "validate" -type "str";
+$encoding = Get-AnsibleParam -obj $params -name "encoding" -type "str" -default "auto";
+$newline = Get-AnsibleParam -obj $params -name "newline" -type "str" -default "windows" -validateset "unix","windows";
+
+# Fail if the path is not a file
+If (Test-Path -LiteralPath $path -PathType "container") {
+ Fail-Json @{} "Path $path is a directory";
+}
+
+# Default to windows line separator - probably most common
+$linesep = "`r`n"
+If ($newline -eq "unix") {
+ $linesep = "`n";
+}
+
+# Figure out the proper encoding to use for reading / writing the target file.
+
+# The default encoding is UTF-8 without BOM
+$encodingobj = [System.Text.UTF8Encoding] $false;
+
+# If an explicit encoding is specified, use that instead
+If ($encoding -ne "auto") {
+ $encodingobj = [System.Text.Encoding]::GetEncoding($encoding);
+}
+
+# Otherwise see if we can determine the current encoding of the target file.
+# If the file doesn't exist yet (create == 'yes') we use the default or
+# explicitly specified encoding set above.
+ElseIf (Test-Path -LiteralPath $path) {
+
+ # Get a sorted list of encodings with preambles, longest first
+ $max_preamble_len = 0;
+ $sortedlist = New-Object System.Collections.SortedList;
+ Foreach ($encodinginfo in [System.Text.Encoding]::GetEncodings()) {
+ $encoding = $encodinginfo.GetEncoding();
+ $plen = $encoding.GetPreamble().Length;
+ If ($plen -gt $max_preamble_len) {
+ $max_preamble_len = $plen;
+ }
+ If ($plen -gt 0) {
+ $sortedlist.Add(-($plen * 1000000 + $encoding.CodePage), $encoding) > $null;
+ }
+ }
+
+ # Get the first N bytes from the file, where N is the max preamble length we saw
+ [Byte[]]$bom = Get-Content -Encoding Byte -ReadCount $max_preamble_len -TotalCount $max_preamble_len -LiteralPath $path;
+
+ # Iterate through the sorted encodings, looking for a full match.
+ $found = $false;
+ Foreach ($encoding in $sortedlist.GetValueList()) {
+ $preamble = $encoding.GetPreamble();
+ If ($preamble -and $bom) {
+ Foreach ($i in 0..($preamble.Length - 1)) {
+ If ($i -ge $bom.Length) {
+ break;
+ }
+ If ($preamble[$i] -ne $bom[$i]) {
+ break;
+ }
+ ElseIf ($i + 1 -eq $preamble.Length) {
+ $encodingobj = $encoding;
+ $found = $true;
+ }
+ }
+ If ($found) {
+ break;
+ }
+ }
+ }
+}
+
+
+# Main dispatch - based on the value of 'state', perform argument validation and
+# call the appropriate handler function.
+If ($state -eq "present") {
+
+ If ($backrefs -and -not $regex) {
+ Fail-Json @{} "regexp= is required with backrefs=true";
+ }
+
+ If (-not $line) {
+ Fail-Json @{} "line= is required with state=present";
+ }
+
+ If ($insertbefore -and $insertafter) {
+ Add-Warning $result "Both insertbefore and insertafter parameters found, ignoring `"insertafter=$insertafter`""
+ }
+
+ If (-not $insertbefore -and -not $insertafter) {
+ $insertafter = "EOF";
+ }
+
+ $present_params = @{
+ path = $path
+ regex = $regex
+ line = $line
+ insertafter = $insertafter
+ insertbefore = $insertbefore
+ create = $create
+ backup = $backup
+ backrefs = $backrefs
+ validate = $validate
+ encodingobj = $encodingobj
+ linesep = $linesep
+ check_mode = $check_mode
+ diff_support = $diff_support
+ }
+ Present @present_params;
+
+}
+ElseIf ($state -eq "absent") {
+
+ If (-not $regex -and -not $line) {
+ Fail-Json @{} "one of line= or regexp= is required with state=absent";
+ }
+
+ $absent_params = @{
+ path = $path
+ regex = $regex
+ line = $line
+ backup = $backup
+ validate = $validate
+ encodingobj = $encodingobj
+ linesep = $linesep
+ check_mode = $check_mode
+ diff_support = $diff_support
+ }
+ Absent @absent_params;
+}
diff --git a/test/support/windows-integration/plugins/modules/win_lineinfile.py b/test/support/windows-integration/plugins/modules/win_lineinfile.py
new file mode 100644
index 00000000..f4fb7f5a
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_lineinfile.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: win_lineinfile
+short_description: Ensure a particular line is in a file, or replace an existing line using a back-referenced regular expression
+description:
+ - This module will search a file for a line, and ensure that it is present or absent.
+ - This is primarily useful when you want to change a single line in a file only.
+version_added: "2.0"
+options:
+ path:
+ description:
+ - The path of the file to modify.
+ - Note that the Windows path delimiter C(\) must be escaped as C(\\) when the line is double quoted.
+ - Before Ansible 2.3 this option was only usable as I(dest), I(destfile) and I(name).
+ type: path
+ required: yes
+ aliases: [ dest, destfile, name ]
+ backup:
+ description:
+ - Determine whether a backup should be created.
+ - When set to C(yes), create a backup file including the timestamp information
+ so you can get the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+ regex:
+ description:
+ - The regular expression to look for in every line of the file. For C(state=present), the pattern to replace if found; only the last line found
+ will be replaced. For C(state=absent), the pattern of the line to remove. Uses .NET compatible regular expressions;
+ see U(https://msdn.microsoft.com/en-us/library/hs600312%28v=vs.110%29.aspx).
+ aliases: [ "regexp" ]
+ state:
+ description:
+ - Whether the line should be there or not.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ line:
+ description:
+ - Required for C(state=present). The line to insert/replace into the file. If C(backrefs) is set, may contain backreferences that will get
+ expanded with the C(regexp) capture groups if the regexp matches.
+ - Be aware that the line is processed first on the controller and thus is dependent on yaml quoting rules. Any double quoted line
+ will have control characters, such as '\r\n', expanded. To print such characters literally, use single or no quotes.
+ type: str
+ backrefs:
+ description:
+ - Used with C(state=present). If set, line can contain backreferences (both positional and named) that will get populated if the C(regexp)
+ matches. This flag changes the operation of the module slightly; C(insertbefore) and C(insertafter) will be ignored, and if the C(regexp)
+ doesn't match anywhere in the file, the file will be left unchanged.
+ - If the C(regexp) does match, the last matching line will be replaced by the expanded line parameter.
+ type: bool
+ default: no
+ insertafter:
+ description:
+ - Used with C(state=present). If specified, the line will be inserted after the last match of specified regular expression. A special value is
+ available; C(EOF) for inserting the line at the end of the file.
+ - If specified regular expression has no matches, EOF will be used instead. May not be used with C(backrefs).
+ type: str
+ choices: [ EOF, '*regex*' ]
+ default: EOF
+ insertbefore:
+ description:
+ - Used with C(state=present). If specified, the line will be inserted before the last match of specified regular expression. A value is available;
+ C(BOF) for inserting the line at the beginning of the file.
+ - If specified regular expression has no matches, the line will be inserted at the end of the file. May not be used with C(backrefs).
+ type: str
+ choices: [ BOF, '*regex*' ]
+ create:
+ description:
+ - Used with C(state=present). If specified, the file will be created if it does not already exist. By default it will fail if the file is missing.
+ type: bool
+ default: no
+ validate:
+ description:
+ - Validation to run before copying into place. Use %s in the command to indicate the current file to validate.
+ - The command is passed securely so shell features like expansion and pipes won't work.
+ type: str
+ encoding:
+ description:
+ - Specifies the encoding of the source text file to operate on (and thus what the output encoding will be). The default of C(auto) will cause
+ the module to auto-detect the encoding of the source file and ensure that the modified file is written with the same encoding.
+ - An explicit encoding can be passed as a string that is a valid value to pass to the .NET framework System.Text.Encoding.GetEncoding() method -
+ see U(https://msdn.microsoft.com/en-us/library/system.text.encoding%28v=vs.110%29.aspx).
+ - This is mostly useful with C(create=yes) if you want to create a new file with a specific encoding. If C(create=yes) is specified without a
+ specific encoding, the default encoding (UTF-8, no BOM) will be used.
+ type: str
+ default: auto
+ newline:
+ description:
+ - Specifies the line separator style to use for the modified file. This defaults to the windows line separator (C(\r\n)). Note that the indicated
+ line separator will be used for file output regardless of the original line separator that appears in the input file.
+ type: str
+ choices: [ unix, windows ]
+ default: windows
+notes:
+ - As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well.
+seealso:
+- module: assemble
+- module: lineinfile
+author:
+- Brian Lloyd (@brianlloyd)
+'''
+
+EXAMPLES = r'''
+# Before Ansible 2.3, option 'dest', 'destfile' or 'name' was used instead of 'path'
+- name: Insert path without converting \r\n
+ win_lineinfile:
+ path: c:\file.txt
+ line: c:\return\new
+
+- win_lineinfile:
+ path: C:\Temp\example.conf
+ regex: '^name='
+ line: 'name=JohnDoe'
+
+- win_lineinfile:
+ path: C:\Temp\example.conf
+ regex: '^name='
+ state: absent
+
+- win_lineinfile:
+ path: C:\Temp\example.conf
+ regex: '^127\.0\.0\.1'
+ line: '127.0.0.1 localhost'
+
+- win_lineinfile:
+ path: C:\Temp\httpd.conf
+ regex: '^Listen '
+ insertafter: '^#Listen '
+ line: Listen 8080
+
+- win_lineinfile:
+ path: C:\Temp\services
+ regex: '^# port for http'
+ insertbefore: '^www.*80/tcp'
+ line: '# port for http by default'
+
+- name: Create file if it doesn't exist with a specific encoding
+ win_lineinfile:
+ path: C:\Temp\utf16.txt
+ create: yes
+ encoding: utf-16
+ line: This is a utf-16 encoded file
+
+- name: Add a line to a file and ensure the resulting file uses unix line separators
+ win_lineinfile:
+ path: C:\Temp\testfile.txt
+ line: Line added to file
+ newline: unix
+
+- name: Update a line using backrefs
+ win_lineinfile:
+ path: C:\Temp\example.conf
+ backrefs: yes
+ regex: '(^name=)'
+ line: '$1JohnDoe'
+'''
+
+RETURN = r'''
+backup:
+ description:
+ - Name of the backup file that was created.
+ - This is now deprecated, use C(backup_file) instead.
+ returned: if backup=yes
+ type: str
+ sample: C:\Path\To\File.txt.11540.20150212-220915.bak
+backup_file:
+ description: Name of the backup file that was created.
+ returned: if backup=yes
+ type: str
+ sample: C:\Path\To\File.txt.11540.20150212-220915.bak
+'''
diff --git a/test/support/windows-integration/plugins/modules/win_path.ps1 b/test/support/windows-integration/plugins/modules/win_path.ps1
new file mode 100644
index 00000000..04eb41a3
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_path.ps1
@@ -0,0 +1,145 @@
+#!powershell
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+
+Set-StrictMode -Version 2
+$ErrorActionPreference = "Stop"
+
+$system_path = "System\CurrentControlSet\Control\Session Manager\Environment"
+$user_path = "Environment"
+
+# list/arraylist methods don't allow IEqualityComparer override for case/backslash/quote-insensitivity, roll our own search
+Function Get-IndexOfPathElement ($list, [string]$value) {
+ $idx = 0
+ $value = $value.Trim('"').Trim('\')
+ ForEach($el in $list) {
+ If ([string]$el.Trim('"').Trim('\') -ieq $value) {
+ return $idx
+ }
+
+ $idx++
+ }
+
+ return -1
+}
+
+# alters list in place, returns true if at least one element was added
+Function Add-Elements ($existing_elements, $elements_to_add) {
+ $last_idx = -1
+ $changed = $false
+
+ ForEach($el in $elements_to_add) {
+ $idx = Get-IndexOfPathElement $existing_elements $el
+
+ # add missing elements at the end
+ If ($idx -eq -1) {
+ $last_idx = $existing_elements.Add($el)
+ $changed = $true
+ }
+ ElseIf ($idx -lt $last_idx) {
+ $existing_elements.RemoveAt($idx) | Out-Null
+ $existing_elements.Add($el) | Out-Null
+ $last_idx = $existing_elements.Count - 1
+ $changed = $true
+ }
+ Else {
+ $last_idx = $idx
+ }
+ }
+
+ return $changed
+}
+
+# alters list in place, returns true if at least one element was removed
+Function Remove-Elements ($existing_elements, $elements_to_remove) {
+ $count = $existing_elements.Count
+
+ ForEach($el in $elements_to_remove) {
+ $idx = Get-IndexOfPathElement $existing_elements $el
+ $result.removed_idx = $idx
+ If ($idx -gt -1) {
+ $existing_elements.RemoveAt($idx)
+ }
+ }
+
+ return $count -ne $existing_elements.Count
+}
+
+# PS registry provider doesn't allow access to unexpanded REG_EXPAND_SZ; fall back to .NET
+Function Get-RawPathVar ($scope) {
+ If ($scope -eq "user") {
+ $env_key = [Microsoft.Win32.Registry]::CurrentUser.OpenSubKey($user_path)
+ }
+ ElseIf ($scope -eq "machine") {
+ $env_key = [Microsoft.Win32.Registry]::LocalMachine.OpenSubKey($system_path)
+ }
+
+ return $env_key.GetValue($var_name, "", [Microsoft.Win32.RegistryValueOptions]::DoNotExpandEnvironmentNames)
+}
+
+Function Set-RawPathVar($path_value, $scope) {
+ If ($scope -eq "user") {
+ $var_path = "HKCU:\" + $user_path
+ }
+ ElseIf ($scope -eq "machine") {
+ $var_path = "HKLM:\" + $system_path
+ }
+
+ Set-ItemProperty $var_path -Name $var_name -Value $path_value -Type ExpandString | Out-Null
+
+ return $path_value
+}
+
+$parsed_args = Parse-Args $args -supports_check_mode $true
+
+$result = @{changed=$false}
+
+$var_name = Get-AnsibleParam $parsed_args "name" -Default "PATH"
+$elements = Get-AnsibleParam $parsed_args "elements" -FailIfEmpty $result
+$state = Get-AnsibleParam $parsed_args "state" -Default "present" -ValidateSet "present","absent"
+$scope = Get-AnsibleParam $parsed_args "scope" -Default "machine" -ValidateSet "machine","user"
+
+$check_mode = Get-AnsibleParam $parsed_args "_ansible_check_mode" -Default $false
+
+If ($elements -is [string]) {
+ $elements = @($elements)
+}
+
+If ($elements -isnot [Array]) {
+ Fail-Json $result "elements must be a string or list of path strings"
+}
+
+$current_value = Get-RawPathVar $scope
+$result.path_value = $current_value
+
+# TODO: test case-canonicalization on wacky unicode values (eg turkish i)
+# TODO: detect and warn/fail on unparseable path? (eg, unbalanced quotes, invalid path chars)
+# TODO: detect and warn/fail if system path and Powershell isn't on it?
+
+$existing_elements = New-Object System.Collections.ArrayList
+
+# split on semicolons, accounting for quoted values with embedded semicolons (which may or may not be wrapped in whitespace)
+$pathsplit_re = [regex] '((?<q>\s*"[^"]+"\s*)|(?<q>[^;]+))(;$|$|;)'
+
+ForEach ($m in $pathsplit_re.Matches($current_value)) {
+ $existing_elements.Add($m.Groups['q'].Value) | Out-Null
+}
+
+If ($state -eq "absent") {
+ $result.changed = Remove-Elements $existing_elements $elements
+}
+ElseIf ($state -eq "present") {
+ $result.changed = Add-Elements $existing_elements $elements
+}
+
+# calculate the new path value from the existing elements
+$path_value = [String]::Join(";", $existing_elements.ToArray())
+$result.path_value = $path_value
+
+If ($result.changed -and -not $check_mode) {
+ Set-RawPathVar $path_value $scope | Out-Null
+}
+
+Exit-Json $result
diff --git a/test/support/windows-integration/plugins/modules/win_path.py b/test/support/windows-integration/plugins/modules/win_path.py
new file mode 100644
index 00000000..6404504f
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_path.py
@@ -0,0 +1,79 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# This is a windows documentation stub. Actual code lives in the .ps1
+# file of the same name
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = r'''
+---
+module: win_path
+version_added: "2.3"
+short_description: Manage Windows path environment variables
+description:
+ - Allows element-based ordering, addition, and removal of Windows path environment variables.
+options:
+ name:
+ description:
+ - Target path environment variable name.
+ type: str
+ default: PATH
+ elements:
+ description:
+ - A single path element, or a list of path elements (ie, directories) to add or remove.
+ - When multiple elements are included in the list (and C(state) is C(present)), the elements are guaranteed to appear in the same relative order
+ in the resultant path value.
+ - Variable expansions (eg, C(%VARNAME%)) are allowed, and are stored unexpanded in the target path element.
+ - Any existing path elements not mentioned in C(elements) are always preserved in their current order.
+ - New path elements are appended to the path, and existing path elements may be moved closer to the end to satisfy the requested ordering.
+ - Paths are compared in a case-insensitive fashion, and trailing backslashes are ignored for comparison purposes. However, note that trailing
+ backslashes in YAML require quotes.
+ type: list
+ required: yes
+ state:
+ description:
+ - Whether the path elements specified in C(elements) should be present or absent.
+ type: str
+ choices: [ absent, present ]
+ scope:
+ description:
+ - The level at which the environment variable specified by C(name) should be managed (either for the current user or global machine scope).
+ type: str
+ choices: [ machine, user ]
+ default: machine
+notes:
+ - This module is for modifying individual elements of path-like
+ environment variables. For general-purpose management of other
+ environment vars, use the M(win_environment) module.
+ - This module does not broadcast change events.
+ This means that the minority of windows applications which can have
+ their environment changed without restarting will not be notified and
+ therefore will need restarting to pick up new environment settings.
+ - User level environment variables will require an interactive user to
+ log out and in again before they become available.
+seealso:
+- module: win_environment
+author:
+- Matt Davis (@nitzmahone)
+'''
+
+EXAMPLES = r'''
+- name: Ensure that system32 and Powershell are present on the global system path, and in the specified order
+ win_path:
+ elements:
+ - '%SystemRoot%\system32'
+ - '%SystemRoot%\system32\WindowsPowerShell\v1.0'
+
+- name: Ensure that C:\Program Files\MyJavaThing is not on the current user's CLASSPATH
+ win_path:
+ name: CLASSPATH
+ elements: C:\Program Files\MyJavaThing
+ scope: user
+ state: absent
+'''
diff --git a/test/support/windows-integration/plugins/modules/win_ping.ps1 b/test/support/windows-integration/plugins/modules/win_ping.ps1
new file mode 100644
index 00000000..c848b912
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_ping.ps1
@@ -0,0 +1,21 @@
+#!powershell
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#AnsibleRequires -CSharpUtil Ansible.Basic
+
+$spec = @{
+ options = @{
+ data = @{ type = "str"; default = "pong" }
+ }
+ supports_check_mode = $true
+}
+$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec)
+$data = $module.Params.data
+
+if ($data -eq "crash") {
+ throw "boom"
+}
+
+$module.Result.ping = $data
+$module.ExitJson()
diff --git a/test/support/windows-integration/plugins/modules/win_ping.py b/test/support/windows-integration/plugins/modules/win_ping.py
new file mode 100644
index 00000000..6d35f379
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_ping.py
@@ -0,0 +1,55 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = r'''
+---
+module: win_ping
+version_added: "1.7"
+short_description: A windows version of the classic ping module
+description:
+ - Checks management connectivity of a windows host.
+ - This is NOT ICMP ping, this is just a trivial test module.
+ - For non-Windows targets, use the M(ping) module instead.
+ - For Network targets, use the M(net_ping) module instead.
+options:
+ data:
+ description:
+ - Alternate data to return instead of 'pong'.
+ - If this parameter is set to C(crash), the module will cause an exception.
+ type: str
+ default: pong
+seealso:
+- module: ping
+author:
+- Chris Church (@cchurch)
+'''
+
+EXAMPLES = r'''
+# Test connectivity to a windows host
+# ansible winserver -m win_ping
+
+- name: Example from an Ansible Playbook
+ win_ping:
+
+- name: Induce an exception to see what happens
+ win_ping:
+ data: crash
+'''
+
+RETURN = r'''
+ping:
+ description: Value provided with the data parameter.
+ returned: success
+ type: str
+ sample: pong
+'''
diff --git a/test/support/windows-integration/plugins/modules/win_psexec.ps1 b/test/support/windows-integration/plugins/modules/win_psexec.ps1
new file mode 100644
index 00000000..04a51270
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_psexec.ps1
@@ -0,0 +1,152 @@
+#!powershell
+
+# Copyright: (c) 2017, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#AnsibleRequires -CSharpUtil Ansible.Basic
+#Requires -Module Ansible.ModuleUtils.ArgvParser
+#Requires -Module Ansible.ModuleUtils.CommandUtil
+
+# See also: https://technet.microsoft.com/en-us/sysinternals/pxexec.aspx
+
+$spec = @{
+ options = @{
+ command = @{ type='str'; required=$true }
+ executable = @{ type='path'; default='psexec.exe' }
+ hostnames = @{ type='list' }
+ username = @{ type='str' }
+ password = @{ type='str'; no_log=$true }
+ chdir = @{ type='path' }
+ wait = @{ type='bool'; default=$true }
+ nobanner = @{ type='bool'; default=$false }
+ noprofile = @{ type='bool'; default=$false }
+ elevated = @{ type='bool'; default=$false }
+ limited = @{ type='bool'; default=$false }
+ system = @{ type='bool'; default=$false }
+ interactive = @{ type='bool'; default=$false }
+ session = @{ type='int' }
+ priority = @{ type='str'; choices=@( 'background', 'low', 'belownormal', 'abovenormal', 'high', 'realtime' ) }
+ timeout = @{ type='int' }
+ }
+}
+
+$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec)
+
+$command = $module.Params.command
+$executable = $module.Params.executable
+$hostnames = $module.Params.hostnames
+$username = $module.Params.username
+$password = $module.Params.password
+$chdir = $module.Params.chdir
+$wait = $module.Params.wait
+$nobanner = $module.Params.nobanner
+$noprofile = $module.Params.noprofile
+$elevated = $module.Params.elevated
+$limited = $module.Params.limited
+$system = $module.Params.system
+$interactive = $module.Params.interactive
+$session = $module.Params.session
+$priority = $module.Params.Priority
+$timeout = $module.Params.timeout
+
+$module.Result.changed = $true
+
+If (-Not (Get-Command $executable -ErrorAction SilentlyContinue)) {
+ $module.FailJson("Executable '$executable' was not found.")
+}
+
+$arguments = [System.Collections.Generic.List`1[String]]@($executable)
+
+If ($nobanner -eq $true) {
+ $arguments.Add("-nobanner")
+}
+
+# Support running on local system if no hostname is specified
+If ($hostnames) {
+ $hostname_argument = ($hostnames | sort -Unique) -join ','
+ $arguments.Add("\\$hostname_argument")
+}
+
+# Username is optional
+If ($null -ne $username) {
+ $arguments.Add("-u")
+ $arguments.Add($username)
+}
+
+# Password is optional
+If ($null -ne $password) {
+ $arguments.Add("-p")
+ $arguments.Add($password)
+}
+
+If ($null -ne $chdir) {
+ $arguments.Add("-w")
+ $arguments.Add($chdir)
+}
+
+If ($wait -eq $false) {
+ $arguments.Add("-d")
+}
+
+If ($noprofile -eq $true) {
+ $arguments.Add("-e")
+}
+
+If ($elevated -eq $true) {
+ $arguments.Add("-h")
+}
+
+If ($system -eq $true) {
+ $arguments.Add("-s")
+}
+
+If ($interactive -eq $true) {
+ $arguments.Add("-i")
+ If ($null -ne $session) {
+ $arguments.Add($session)
+ }
+}
+
+If ($limited -eq $true) {
+ $arguments.Add("-l")
+}
+
+If ($null -ne $priority) {
+ $arguments.Add("-$priority")
+}
+
+If ($null -ne $timeout) {
+ $arguments.Add("-n")
+ $arguments.Add($timeout)
+}
+
+$arguments.Add("-accepteula")
+
+$argument_string = Argv-ToString -arguments $arguments
+
+# Add the command at the end of the argument string, we don't want to escape
+# that as psexec doesn't expect it to be one arg
+$argument_string += " $command"
+
+$start_datetime = [DateTime]::UtcNow
+$module.Result.psexec_command = $argument_string
+
+$command_result = Run-Command -command $argument_string
+
+$end_datetime = [DateTime]::UtcNow
+
+$module.Result.stdout = $command_result.stdout
+$module.Result.stderr = $command_result.stderr
+
+If ($wait -eq $true) {
+ $module.Result.rc = $command_result.rc
+} else {
+ $module.Result.rc = 0
+ $module.Result.pid = $command_result.rc
+}
+
+$module.Result.start = $start_datetime.ToString("yyyy-MM-dd hh:mm:ss.ffffff")
+$module.Result.end = $end_datetime.ToString("yyyy-MM-dd hh:mm:ss.ffffff")
+$module.Result.delta = $($end_datetime - $start_datetime).ToString("h\:mm\:ss\.ffffff")
+
+$module.ExitJson()
diff --git a/test/support/windows-integration/plugins/modules/win_psexec.py b/test/support/windows-integration/plugins/modules/win_psexec.py
new file mode 100644
index 00000000..c3fc37e4
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_psexec.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: 2017, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: win_psexec
+version_added: '2.3'
+short_description: Runs commands (remotely) as another (privileged) user
+description:
+- Run commands (remotely) through the PsExec service.
+- Run commands as another (domain) user (with elevated privileges).
+requirements:
+- Microsoft PsExec
+options:
+ command:
+ description:
+ - The command line to run through PsExec (limited to 260 characters).
+ type: str
+ required: yes
+ executable:
+ description:
+ - The location of the PsExec utility (in case it is not located in your PATH).
+ type: path
+ default: psexec.exe
+ hostnames:
+ description:
+ - The hostnames to run the command.
+ - If not provided, the command is run locally.
+ type: list
+ username:
+ description:
+ - The (remote) user to run the command as.
+ - If not provided, the current user is used.
+ type: str
+ password:
+ description:
+ - The password for the (remote) user to run the command as.
+ - This is mandatory in order authenticate yourself.
+ type: str
+ chdir:
+ description:
+ - Run the command from this (remote) directory.
+ type: path
+ nobanner:
+ description:
+ - Do not display the startup banner and copyright message.
+ - This only works for specific versions of the PsExec binary.
+ type: bool
+ default: no
+ version_added: '2.4'
+ noprofile:
+ description:
+ - Run the command without loading the account's profile.
+ type: bool
+ default: no
+ elevated:
+ description:
+ - Run the command with elevated privileges.
+ type: bool
+ default: no
+ interactive:
+ description:
+ - Run the program so that it interacts with the desktop on the remote system.
+ type: bool
+ default: no
+ session:
+ description:
+ - Specifies the session ID to use.
+ - This parameter works in conjunction with I(interactive).
+ - It has no effect when I(interactive) is set to C(no).
+ type: int
+ version_added: '2.7'
+ limited:
+ description:
+ - Run the command as limited user (strips the Administrators group and allows only privileges assigned to the Users group).
+ type: bool
+ default: no
+ system:
+ description:
+ - Run the remote command in the System account.
+ type: bool
+ default: no
+ priority:
+ description:
+ - Used to run the command at a different priority.
+ choices: [ abovenormal, background, belownormal, high, low, realtime ]
+ timeout:
+ description:
+ - The connection timeout in seconds
+ type: int
+ wait:
+ description:
+ - Wait for the application to terminate.
+ - Only use for non-interactive applications.
+ type: bool
+ default: yes
+notes:
+- More information related to Microsoft PsExec is available from
+ U(https://technet.microsoft.com/en-us/sysinternals/bb897553.aspx)
+seealso:
+- module: psexec
+- module: raw
+- module: win_command
+- module: win_shell
+author:
+- Dag Wieers (@dagwieers)
+'''
+
+EXAMPLES = r'''
+- name: Test the PsExec connection to the local system (target node) with your user
+ win_psexec:
+ command: whoami.exe
+
+- name: Run regedit.exe locally (on target node) as SYSTEM and interactively
+ win_psexec:
+ command: regedit.exe
+ interactive: yes
+ system: yes
+
+- name: Run the setup.exe installer on multiple servers using the Domain Administrator
+ win_psexec:
+ command: E:\setup.exe /i /IACCEPTEULA
+ hostnames:
+ - remote_server1
+ - remote_server2
+ username: DOMAIN\Administrator
+ password: some_password
+ priority: high
+
+- name: Run PsExec from custom location C:\Program Files\sysinternals\
+ win_psexec:
+ command: netsh advfirewall set allprofiles state off
+ executable: C:\Program Files\sysinternals\psexec.exe
+ hostnames: [ remote_server ]
+ password: some_password
+ priority: low
+'''
+
+RETURN = r'''
+cmd:
+ description: The complete command line used by the module, including PsExec call and additional options.
+ returned: always
+ type: str
+ sample: psexec.exe -nobanner \\remote_server -u "DOMAIN\Administrator" -p "some_password" -accepteula E:\setup.exe
+pid:
+ description: The PID of the async process created by PsExec.
+ returned: when C(wait=False)
+ type: int
+ sample: 1532
+rc:
+ description: The return code for the command.
+ returned: always
+ type: int
+ sample: 0
+stdout:
+ description: The standard output from the command.
+ returned: always
+ type: str
+ sample: Success.
+stderr:
+ description: The error output from the command.
+ returned: always
+ type: str
+ sample: Error 15 running E:\setup.exe
+'''
diff --git a/test/support/windows-integration/plugins/modules/win_reboot.py b/test/support/windows-integration/plugins/modules/win_reboot.py
new file mode 100644
index 00000000..14318041
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_reboot.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = r'''
+---
+module: win_reboot
+short_description: Reboot a windows machine
+description:
+- Reboot a Windows machine, wait for it to go down, come back up, and respond to commands.
+- For non-Windows targets, use the M(reboot) module instead.
+version_added: '2.1'
+options:
+ pre_reboot_delay:
+ description:
+ - Seconds to wait before reboot. Passed as a parameter to the reboot command.
+ type: int
+ default: 2
+ aliases: [ pre_reboot_delay_sec ]
+ post_reboot_delay:
+ description:
+ - Seconds to wait after the reboot command was successful before attempting to validate the system rebooted successfully.
+ - This is useful if you want wait for something to settle despite your connection already working.
+ type: int
+ default: 0
+ version_added: '2.4'
+ aliases: [ post_reboot_delay_sec ]
+ shutdown_timeout:
+ description:
+ - Maximum seconds to wait for shutdown to occur.
+ - Increase this timeout for very slow hardware, large update applications, etc.
+ - This option has been removed since Ansible 2.5 as the win_reboot behavior has changed.
+ type: int
+ default: 600
+ aliases: [ shutdown_timeout_sec ]
+ reboot_timeout:
+ description:
+ - Maximum seconds to wait for machine to re-appear on the network and respond to a test command.
+ - This timeout is evaluated separately for both reboot verification and test command success so maximum clock time is actually twice this value.
+ type: int
+ default: 600
+ aliases: [ reboot_timeout_sec ]
+ connect_timeout:
+ description:
+ - Maximum seconds to wait for a single successful TCP connection to the WinRM endpoint before trying again.
+ type: int
+ default: 5
+ aliases: [ connect_timeout_sec ]
+ test_command:
+ description:
+ - Command to expect success for to determine the machine is ready for management.
+ type: str
+ default: whoami
+ msg:
+ description:
+ - Message to display to users.
+ type: str
+ default: Reboot initiated by Ansible
+ boot_time_command:
+ description:
+ - Command to run that returns a unique string indicating the last time the system was booted.
+ - Setting this to a command that has different output each time it is run will cause the task to fail.
+ type: str
+ default: '(Get-WmiObject -ClassName Win32_OperatingSystem).LastBootUpTime'
+ version_added: '2.10'
+notes:
+- If a shutdown was already scheduled on the system, C(win_reboot) will abort the scheduled shutdown and enforce its own shutdown.
+- Beware that when C(win_reboot) returns, the Windows system may not have settled yet and some base services could be in limbo.
+ This can result in unexpected behavior. Check the examples for ways to mitigate this.
+- The connection user must have the C(SeRemoteShutdownPrivilege) privilege enabled, see
+ U(https://docs.microsoft.com/en-us/windows/security/threat-protection/security-policy-settings/force-shutdown-from-a-remote-system)
+ for more information.
+seealso:
+- module: reboot
+author:
+- Matt Davis (@nitzmahone)
+'''
+
+EXAMPLES = r'''
+- name: Reboot the machine with all defaults
+ win_reboot:
+
+- name: Reboot a slow machine that might have lots of updates to apply
+ win_reboot:
+ reboot_timeout: 3600
+
+# Install a Windows feature and reboot if necessary
+- name: Install IIS Web-Server
+ win_feature:
+ name: Web-Server
+ register: iis_install
+
+- name: Reboot when Web-Server feature requires it
+ win_reboot:
+ when: iis_install.reboot_required
+
+# One way to ensure the system is reliable, is to set WinRM to a delayed startup
+- name: Ensure WinRM starts when the system has settled and is ready to work reliably
+ win_service:
+ name: WinRM
+ start_mode: delayed
+
+
+# Additionally, you can add a delay before running the next task
+- name: Reboot a machine that takes time to settle after being booted
+ win_reboot:
+ post_reboot_delay: 120
+
+# Or you can make win_reboot validate exactly what you need to work before running the next task
+- name: Validate that the netlogon service has started, before running the next task
+ win_reboot:
+ test_command: 'exit (Get-Service -Name Netlogon).Status -ne "Running"'
+'''
+
+RETURN = r'''
+rebooted:
+ description: True if the machine was rebooted.
+ returned: always
+ type: bool
+ sample: true
+elapsed:
+ description: The number of seconds that elapsed waiting for the system to be rebooted.
+ returned: always
+ type: float
+ sample: 23.2
+'''
diff --git a/test/support/windows-integration/plugins/modules/win_regedit.ps1 b/test/support/windows-integration/plugins/modules/win_regedit.ps1
new file mode 100644
index 00000000..c56b4833
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_regedit.ps1
@@ -0,0 +1,495 @@
+#!powershell
+
+# Copyright: (c) 2015, Adam Keech <akeech@chathamfinancial.com>
+# Copyright: (c) 2015, Josh Ludwig <jludwig@chathamfinancial.com>
+# Copyright: (c) 2017, Jordan Borean <jborean93@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+#Requires -Module Ansible.ModuleUtils.PrivilegeUtil
+
+$params = Parse-Args -arguments $args -supports_check_mode $true
+$check_mode = Get-AnsibleParam -obj $params -name "_ansible_check_mode" -type "bool" -default $false
+$diff_mode = Get-AnsibleParam -obj $params -name "_ansible_diff" -type "bool" -default $false
+$_remote_tmp = Get-AnsibleParam $params "_ansible_remote_tmp" -type "path" -default $env:TMP
+
+$path = Get-AnsibleParam -obj $params -name "path" -type "str" -failifempty $true -aliases "key"
+$name = Get-AnsibleParam -obj $params -name "name" -type "str" -aliases "entry","value"
+$data = Get-AnsibleParam -obj $params -name "data"
+$type = Get-AnsibleParam -obj $params -name "type" -type "str" -default "string" -validateset "none","binary","dword","expandstring","multistring","string","qword" -aliases "datatype"
+$state = Get-AnsibleParam -obj $params -name "state" -type "str" -default "present" -validateset "present","absent"
+$delete_key = Get-AnsibleParam -obj $params -name "delete_key" -type "bool" -default $true
+$hive = Get-AnsibleParam -obj $params -name "hive" -type "path"
+
+$result = @{
+ changed = $false
+ data_changed = $false
+ data_type_changed = $false
+}
+
+if ($diff_mode) {
+ $result.diff = @{
+ before = ""
+ after = ""
+ }
+}
+
+$registry_util = @'
+using System;
+using System.Collections.Generic;
+using System.Runtime.InteropServices;
+
+namespace Ansible.WinRegedit
+{
+ internal class NativeMethods
+ {
+ [DllImport("advapi32.dll", CharSet = CharSet.Unicode)]
+ public static extern int RegLoadKeyW(
+ UInt32 hKey,
+ string lpSubKey,
+ string lpFile);
+
+ [DllImport("advapi32.dll", CharSet = CharSet.Unicode)]
+ public static extern int RegUnLoadKeyW(
+ UInt32 hKey,
+ string lpSubKey);
+ }
+
+ public class Win32Exception : System.ComponentModel.Win32Exception
+ {
+ private string _msg;
+ public Win32Exception(string message) : this(Marshal.GetLastWin32Error(), message) { }
+ public Win32Exception(int errorCode, string message) : base(errorCode)
+ {
+ _msg = String.Format("{0} ({1}, Win32ErrorCode {2})", message, base.Message, errorCode);
+ }
+ public override string Message { get { return _msg; } }
+ public static explicit operator Win32Exception(string message) { return new Win32Exception(message); }
+ }
+
+ public class Hive : IDisposable
+ {
+ private const UInt32 SCOPE = 0x80000002; // HKLM
+ private string hiveKey;
+ private bool loaded = false;
+
+ public Hive(string hiveKey, string hivePath)
+ {
+ this.hiveKey = hiveKey;
+ int ret = NativeMethods.RegLoadKeyW(SCOPE, hiveKey, hivePath);
+ if (ret != 0)
+ throw new Win32Exception(ret, String.Format("Failed to load registry hive at {0}", hivePath));
+ loaded = true;
+ }
+
+ public static void UnloadHive(string hiveKey)
+ {
+ int ret = NativeMethods.RegUnLoadKeyW(SCOPE, hiveKey);
+ if (ret != 0)
+ throw new Win32Exception(ret, String.Format("Failed to unload registry hive at {0}", hiveKey));
+ }
+
+ public void Dispose()
+ {
+ if (loaded)
+ {
+ // Make sure the garbage collector disposes all unused handles and waits until it is complete
+ GC.Collect();
+ GC.WaitForPendingFinalizers();
+
+ UnloadHive(hiveKey);
+ loaded = false;
+ }
+ GC.SuppressFinalize(this);
+ }
+ ~Hive() { this.Dispose(); }
+ }
+}
+'@
+
+# fire a warning if the property name isn't specified, the (Default) key ($null) can only be a string
+if ($null -eq $name -and $type -ne "string") {
+ Add-Warning -obj $result -message "the data type when name is not specified can only be 'string', the type has automatically been converted"
+ $type = "string"
+}
+
+# Check that the registry path is in PSDrive format: HKCC, HKCR, HKCU, HKLM, HKU
+if ($path -notmatch "^HK(CC|CR|CU|LM|U):\\") {
+ Fail-Json $result "path: $path is not a valid powershell path, see module documentation for examples."
+}
+
+# Add a warning if the path does not contains a \ and is not the leaf path
+$registry_path = (Split-Path -Path $path -NoQualifier).Substring(1) # removes the hive: and leading \
+$registry_leaf = Split-Path -Path $path -Leaf
+if ($registry_path -ne $registry_leaf -and -not $registry_path.Contains('\')) {
+ $msg = "path is not using '\' as a separator, support for '/' as a separator will be removed in a future Ansible version"
+ Add-DeprecationWarning -obj $result -message $msg -version 2.12
+ $registry_path = $registry_path.Replace('/', '\')
+}
+
+# Simplified version of Convert-HexStringToByteArray from
+# https://cyber-defense.sans.org/blog/2010/02/11/powershell-byte-array-hex-convert
+# Expects a hex in the format you get when you run reg.exe export,
+# and converts to a byte array so powershell can modify binary registry entries
+# import format is like 'hex:be,ef,be,ef,be,ef,be,ef,be,ef'
+Function Convert-RegExportHexStringToByteArray($string) {
+ # Remove 'hex:' from the front of the string if present
+ $string = $string.ToLower() -replace '^hex\:',''
+
+ # Remove whitespace and any other non-hex crud.
+ $string = $string -replace '[^a-f0-9\\,x\-\:]',''
+
+ # Turn commas into colons
+ $string = $string -replace ',',':'
+
+ # Maybe there's nothing left over to convert...
+ if ($string.Length -eq 0) {
+ return ,@()
+ }
+
+ # Split string with or without colon delimiters.
+ if ($string.Length -eq 1) {
+ return ,@([System.Convert]::ToByte($string,16))
+ } elseif (($string.Length % 2 -eq 0) -and ($string.IndexOf(":") -eq -1)) {
+ return ,@($string -split '([a-f0-9]{2})' | foreach-object { if ($_) {[System.Convert]::ToByte($_,16)}})
+ } elseif ($string.IndexOf(":") -ne -1) {
+ return ,@($string -split ':+' | foreach-object {[System.Convert]::ToByte($_,16)})
+ } else {
+ return ,@()
+ }
+}
+
+Function Compare-RegistryProperties($existing, $new) {
+ # Outputs $true if the property values don't match
+ if ($existing -is [Array]) {
+ (Compare-Object -ReferenceObject $existing -DifferenceObject $new -SyncWindow 0).Length -ne 0
+ } else {
+ $existing -cne $new
+ }
+}
+
+Function Get-DiffValue {
+ param(
+ [Parameter(Mandatory=$true)][Microsoft.Win32.RegistryValueKind]$Type,
+ [Parameter(Mandatory=$true)][Object]$Value
+ )
+
+ $diff = @{ type = $Type.ToString(); value = $Value }
+
+ $enum = [Microsoft.Win32.RegistryValueKind]
+ if ($Type -in @($enum::Binary, $enum::None)) {
+ $diff.value = [System.Collections.Generic.List`1[String]]@()
+ foreach ($dec_value in $Value) {
+ $diff.value.Add("0x{0:x2}" -f $dec_value)
+ }
+ } elseif ($Type -eq $enum::DWord) {
+ $diff.value = "0x{0:x8}" -f $Value
+ } elseif ($Type -eq $enum::QWord) {
+ $diff.value = "0x{0:x16}" -f $Value
+ }
+
+ return $diff
+}
+
+Function Set-StateAbsent {
+ param(
+ # Used for diffs and exception messages to match up against Ansible input
+ [Parameter(Mandatory=$true)][String]$PrintPath,
+ [Parameter(Mandatory=$true)][Microsoft.Win32.RegistryKey]$Hive,
+ [Parameter(Mandatory=$true)][String]$Path,
+ [String]$Name,
+ [Switch]$DeleteKey
+ )
+
+ $key = $Hive.OpenSubKey($Path, $true)
+ if ($null -eq $key) {
+ # Key does not exist, no need to delete anything
+ return
+ }
+
+ try {
+ if ($DeleteKey -and -not $Name) {
+ # delete_key=yes is set and name is null/empty, so delete the entire key
+ $key.Dispose()
+ $key = $null
+ if (-not $check_mode) {
+ try {
+ $Hive.DeleteSubKeyTree($Path, $false)
+ } catch {
+ Fail-Json -obj $result -message "failed to delete registry key at $($PrintPath): $($_.Exception.Message)"
+ }
+ }
+ $result.changed = $true
+
+ if ($diff_mode) {
+ $result.diff.before = @{$PrintPath = @{}}
+ $result.diff.after = @{}
+ }
+ } else {
+ # delete_key=no or name is not null/empty, delete the property not the full key
+ $property = $key.GetValue($Name)
+ if ($null -eq $property) {
+ # property does not exist
+ return
+ }
+ $property_type = $key.GetValueKind($Name) # used for the diff
+
+ if (-not $check_mode) {
+ try {
+ $key.DeleteValue($Name)
+ } catch {
+ Fail-Json -obj $result -message "failed to delete registry property '$Name' at $($PrintPath): $($_.Exception.Message)"
+ }
+ }
+
+ $result.changed = $true
+ if ($diff_mode) {
+ $diff_value = Get-DiffValue -Type $property_type -Value $property
+ $result.diff.before = @{ $PrintPath = @{ $Name = $diff_value } }
+ $result.diff.after = @{ $PrintPath = @{} }
+ }
+ }
+ } finally {
+ if ($key) {
+ $key.Dispose()
+ }
+ }
+}
+
+Function Set-StatePresent {
+ param(
+ [Parameter(Mandatory=$true)][String]$PrintPath,
+ [Parameter(Mandatory=$true)][Microsoft.Win32.RegistryKey]$Hive,
+ [Parameter(Mandatory=$true)][String]$Path,
+ [String]$Name,
+ [Object]$Data,
+ [Microsoft.Win32.RegistryValueKind]$Type
+ )
+
+ $key = $Hive.OpenSubKey($Path, $true)
+ try {
+ if ($null -eq $key) {
+ # the key does not exist, create it so the next steps work
+ if (-not $check_mode) {
+ try {
+ $key = $Hive.CreateSubKey($Path)
+ } catch {
+ Fail-Json -obj $result -message "failed to create registry key at $($PrintPath): $($_.Exception.Message)"
+ }
+ }
+ $result.changed = $true
+
+ if ($diff_mode) {
+ $result.diff.before = @{}
+ $result.diff.after = @{$PrintPath = @{}}
+ }
+ } elseif ($diff_mode) {
+ # Make sure the diff is in an expected state for the key
+ $result.diff.before = @{$PrintPath = @{}}
+ $result.diff.after = @{$PrintPath = @{}}
+ }
+
+ if ($null -eq $key -or $null -eq $Data) {
+ # Check mode and key was created above, we cannot do any more work, or $Data is $null which happens when
+ # we create a new key but haven't explicitly set the data
+ return
+ }
+
+ $property = $key.GetValue($Name, $null, [Microsoft.Win32.RegistryValueOptions]::DoNotExpandEnvironmentNames)
+ if ($null -ne $property) {
+ # property exists, need to compare the values and type
+ $existing_type = $key.GetValueKind($name)
+ $change_value = $false
+
+ if ($Type -ne $existing_type) {
+ $change_value = $true
+ $result.data_type_changed = $true
+ $data_mismatch = Compare-RegistryProperties -existing $property -new $Data
+ if ($data_mismatch) {
+ $result.data_changed = $true
+ }
+ } else {
+ $data_mismatch = Compare-RegistryProperties -existing $property -new $Data
+ if ($data_mismatch) {
+ $change_value = $true
+ $result.data_changed = $true
+ }
+ }
+
+ if ($change_value) {
+ if (-not $check_mode) {
+ try {
+ $key.SetValue($Name, $Data, $Type)
+ } catch {
+ Fail-Json -obj $result -message "failed to change registry property '$Name' at $($PrintPath): $($_.Exception.Message)"
+ }
+ }
+ $result.changed = $true
+
+ if ($diff_mode) {
+ $result.diff.before.$PrintPath.$Name = Get-DiffValue -Type $existing_type -Value $property
+ $result.diff.after.$PrintPath.$Name = Get-DiffValue -Type $Type -Value $Data
+ }
+ } elseif ($diff_mode) {
+ $diff_value = Get-DiffValue -Type $existing_type -Value $property
+ $result.diff.before.$PrintPath.$Name = $diff_value
+ $result.diff.after.$PrintPath.$Name = $diff_value
+ }
+ } else {
+ # property doesn't exist just create a new one
+ if (-not $check_mode) {
+ try {
+ $key.SetValue($Name, $Data, $Type)
+ } catch {
+ Fail-Json -obj $result -message "failed to create registry property '$Name' at $($PrintPath): $($_.Exception.Message)"
+ }
+ }
+ $result.changed = $true
+
+ if ($diff_mode) {
+ $result.diff.after.$PrintPath.$Name = Get-DiffValue -Type $Type -Value $Data
+ }
+ }
+ } finally {
+ if ($key) {
+ $key.Dispose()
+ }
+ }
+}
+
+# convert property names "" to $null as "" refers to (Default)
+if ($name -eq "") {
+ $name = $null
+}
+
+# convert the data to the required format
+if ($type -in @("binary", "none")) {
+ if ($null -eq $data) {
+ $data = ""
+ }
+
+ # convert the data from string to byte array if in hex: format
+ if ($data -is [String]) {
+ $data = [byte[]](Convert-RegExportHexStringToByteArray -string $data)
+ } elseif ($data -is [Int]) {
+ if ($data -gt 255) {
+ Fail-Json $result "cannot convert binary data '$data' to byte array, please specify this value as a yaml byte array or a comma separated hex value string"
+ }
+ $data = [byte[]]@([byte]$data)
+ } elseif ($data -is [Array]) {
+ $data = [byte[]]$data
+ }
+} elseif ($type -in @("dword", "qword")) {
+ # dword's and dword's don't allow null values, set to 0
+ if ($null -eq $data) {
+ $data = 0
+ }
+
+ if ($data -is [String]) {
+ # if the data is a string we need to convert it to an unsigned int64
+ # it needs to be unsigned as Ansible passes in an unsigned value while
+ # powershell uses a signed data type. The value will then be converted
+ # below
+ $data = [UInt64]$data
+ }
+
+ if ($type -eq "dword") {
+ if ($data -gt [UInt32]::MaxValue) {
+ Fail-Json $result "data cannot be larger than 0xffffffff when type is dword"
+ } elseif ($data -gt [Int32]::MaxValue) {
+ # when dealing with larger int32 (> 2147483647 or 0x7FFFFFFF) powershell
+ # automatically converts it to a signed int64. We need to convert this to
+ # signed int32 by parsing the hex string value.
+ $data = "0x$("{0:x}" -f $data)"
+ }
+ $data = [Int32]$data
+ } else {
+ if ($data -gt [UInt64]::MaxValue) {
+ Fail-Json $result "data cannot be larger than 0xffffffffffffffff when type is qword"
+ } elseif ($data -gt [Int64]::MaxValue) {
+ $data = "0x$("{0:x}" -f $data)"
+ }
+ $data = [Int64]$data
+ }
+} elseif ($type -in @("string", "expandstring") -and $name) {
+ # a null string or expandstring must be empty quotes
+ # Only do this if $name has been defined (not the default key)
+ if ($null -eq $data) {
+ $data = ""
+ }
+} elseif ($type -eq "multistring") {
+ # convert the data for a multistring to a String[] array
+ if ($null -eq $data) {
+ $data = [String[]]@()
+ } elseif ($data -isnot [Array]) {
+ $new_data = New-Object -TypeName String[] -ArgumentList 1
+ $new_data[0] = $data.ToString([CultureInfo]::InvariantCulture)
+ $data = $new_data
+ } else {
+ $new_data = New-Object -TypeName String[] -ArgumentList $data.Count
+ foreach ($entry in $data) {
+ $new_data[$data.IndexOf($entry)] = $entry.ToString([CultureInfo]::InvariantCulture)
+ }
+ $data = $new_data
+ }
+}
+
+# convert the type string to the .NET class
+$type = [System.Enum]::Parse([Microsoft.Win32.RegistryValueKind], $type, $true)
+
+$registry_hive = switch(Split-Path -Path $path -Qualifier) {
+ "HKCR:" { [Microsoft.Win32.Registry]::ClassesRoot }
+ "HKCC:" { [Microsoft.Win32.Registry]::CurrentConfig }
+ "HKCU:" { [Microsoft.Win32.Registry]::CurrentUser }
+ "HKLM:" { [Microsoft.Win32.Registry]::LocalMachine }
+ "HKU:" { [Microsoft.Win32.Registry]::Users }
+}
+$loaded_hive = $null
+try {
+ if ($hive) {
+ if (-not (Test-Path -LiteralPath $hive)) {
+ Fail-Json -obj $result -message "hive at path '$hive' is not valid or accessible, cannot load hive"
+ }
+
+ $original_tmp = $env:TMP
+ $env:TMP = $_remote_tmp
+ Add-Type -TypeDefinition $registry_util
+ $env:TMP = $original_tmp
+
+ try {
+ Set-AnsiblePrivilege -Name SeBackupPrivilege -Value $true
+ Set-AnsiblePrivilege -Name SeRestorePrivilege -Value $true
+ } catch [System.ComponentModel.Win32Exception] {
+ Fail-Json -obj $result -message "failed to enable SeBackupPrivilege and SeRestorePrivilege for the current process: $($_.Exception.Message)"
+ }
+
+ if (Test-Path -Path HKLM:\ANSIBLE) {
+ Add-Warning -obj $result -message "hive already loaded at HKLM:\ANSIBLE, had to unload hive for win_regedit to continue"
+ try {
+ [Ansible.WinRegedit.Hive]::UnloadHive("ANSIBLE")
+ } catch [System.ComponentModel.Win32Exception] {
+ Fail-Json -obj $result -message "failed to unload registry hive HKLM:\ANSIBLE from $($hive): $($_.Exception.Message)"
+ }
+ }
+
+ try {
+ $loaded_hive = New-Object -TypeName Ansible.WinRegedit.Hive -ArgumentList "ANSIBLE", $hive
+ } catch [System.ComponentModel.Win32Exception] {
+ Fail-Json -obj $result -message "failed to load registry hive from '$hive' to HKLM:\ANSIBLE: $($_.Exception.Message)"
+ }
+ }
+
+ if ($state -eq "present") {
+ Set-StatePresent -PrintPath $path -Hive $registry_hive -Path $registry_path -Name $name -Data $data -Type $type
+ } else {
+ Set-StateAbsent -PrintPath $path -Hive $registry_hive -Path $registry_path -Name $name -DeleteKey:$delete_key
+ }
+} finally {
+ $registry_hive.Dispose()
+ if ($loaded_hive) {
+ $loaded_hive.Dispose()
+ }
+}
+
+Exit-Json $result
+
diff --git a/test/support/windows-integration/plugins/modules/win_regedit.py b/test/support/windows-integration/plugins/modules/win_regedit.py
new file mode 100644
index 00000000..2c0fff71
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_regedit.py
@@ -0,0 +1,210 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Adam Keech <akeech@chathamfinancial.com>
+# Copyright: (c) 2015, Josh Ludwig <jludwig@chathamfinancial.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'core'}
+
+
+DOCUMENTATION = r'''
+---
+module: win_regedit
+version_added: '2.0'
+short_description: Add, change, or remove registry keys and values
+description:
+- Add, modify or remove registry keys and values.
+- More information about the windows registry from Wikipedia
+ U(https://en.wikipedia.org/wiki/Windows_Registry).
+options:
+ path:
+ description:
+ - Name of the registry path.
+ - 'Should be in one of the following registry hives: HKCC, HKCR, HKCU,
+ HKLM, HKU.'
+ type: str
+ required: yes
+ aliases: [ key ]
+ name:
+ description:
+ - Name of the registry entry in the above C(path) parameters.
+ - If not provided, or empty then the '(Default)' property for the key will
+ be used.
+ type: str
+ aliases: [ entry, value ]
+ data:
+ description:
+ - Value of the registry entry C(name) in C(path).
+ - If not specified then the value for the property will be null for the
+ corresponding C(type).
+ - Binary and None data should be expressed in a yaml byte array or as comma
+ separated hex values.
+ - An easy way to generate this is to run C(regedit.exe) and use the
+ I(export) option to save the registry values to a file.
+ - In the exported file, binary value will look like C(hex:be,ef,be,ef), the
+ C(hex:) prefix is optional.
+ - DWORD and QWORD values should either be represented as a decimal number
+ or a hex value.
+ - Multistring values should be passed in as a list.
+ - See the examples for more details on how to format this data.
+ type: str
+ type:
+ description:
+ - The registry value data type.
+ type: str
+ choices: [ binary, dword, expandstring, multistring, string, qword ]
+ default: string
+ aliases: [ datatype ]
+ state:
+ description:
+ - The state of the registry entry.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ delete_key:
+ description:
+ - When C(state) is 'absent' then this will delete the entire key.
+ - If C(no) then it will only clear out the '(Default)' property for
+ that key.
+ type: bool
+ default: yes
+ version_added: '2.4'
+ hive:
+ description:
+ - A path to a hive key like C:\Users\Default\NTUSER.DAT to load in the
+ registry.
+ - This hive is loaded under the HKLM:\ANSIBLE key which can then be used
+ in I(name) like any other path.
+ - This can be used to load the default user profile registry hive or any
+ other hive saved as a file.
+ - Using this function requires the user to have the C(SeRestorePrivilege)
+ and C(SeBackupPrivilege) privileges enabled.
+ type: path
+ version_added: '2.5'
+notes:
+- Check-mode C(-C/--check) and diff output C(-D/--diff) are supported, so that you can test every change against the active configuration before
+ applying changes.
+- Beware that some registry hives (C(HKEY_USERS) in particular) do not allow to create new registry paths in the root folder.
+- Since ansible 2.4, when checking if a string registry value has changed, a case-sensitive test is used. Previously the test was case-insensitive.
+seealso:
+- module: win_reg_stat
+- module: win_regmerge
+author:
+- Adam Keech (@smadam813)
+- Josh Ludwig (@joshludwig)
+- Jordan Borean (@jborean93)
+'''
+
+EXAMPLES = r'''
+- name: Create registry path MyCompany
+ win_regedit:
+ path: HKCU:\Software\MyCompany
+
+- name: Add or update registry path MyCompany, with entry 'hello', and containing 'world'
+ win_regedit:
+ path: HKCU:\Software\MyCompany
+ name: hello
+ data: world
+
+- name: Add or update registry path MyCompany, with dword entry 'hello', and containing 1337 as the decimal value
+ win_regedit:
+ path: HKCU:\Software\MyCompany
+ name: hello
+ data: 1337
+ type: dword
+
+- name: Add or update registry path MyCompany, with dword entry 'hello', and containing 0xff2500ae as the hex value
+ win_regedit:
+ path: HKCU:\Software\MyCompany
+ name: hello
+ data: 0xff2500ae
+ type: dword
+
+- name: Add or update registry path MyCompany, with binary entry 'hello', and containing binary data in hex-string format
+ win_regedit:
+ path: HKCU:\Software\MyCompany
+ name: hello
+ data: hex:be,ef,be,ef,be,ef,be,ef,be,ef
+ type: binary
+
+- name: Add or update registry path MyCompany, with binary entry 'hello', and containing binary data in yaml format
+ win_regedit:
+ path: HKCU:\Software\MyCompany
+ name: hello
+ data: [0xbe,0xef,0xbe,0xef,0xbe,0xef,0xbe,0xef,0xbe,0xef]
+ type: binary
+
+- name: Add or update registry path MyCompany, with expand string entry 'hello'
+ win_regedit:
+ path: HKCU:\Software\MyCompany
+ name: hello
+ data: '%appdata%\local'
+ type: expandstring
+
+- name: Add or update registry path MyCompany, with multi string entry 'hello'
+ win_regedit:
+ path: HKCU:\Software\MyCompany
+ name: hello
+ data: ['hello', 'world']
+ type: multistring
+
+- name: Disable keyboard layout hotkey for all users (changes existing)
+ win_regedit:
+ path: HKU:\.DEFAULT\Keyboard Layout\Toggle
+ name: Layout Hotkey
+ data: 3
+ type: dword
+
+- name: Disable language hotkey for current users (adds new)
+ win_regedit:
+ path: HKCU:\Keyboard Layout\Toggle
+ name: Language Hotkey
+ data: 3
+ type: dword
+
+- name: Remove registry path MyCompany (including all entries it contains)
+ win_regedit:
+ path: HKCU:\Software\MyCompany
+ state: absent
+ delete_key: yes
+
+- name: Clear the existing (Default) entry at path MyCompany
+ win_regedit:
+ path: HKCU:\Software\MyCompany
+ state: absent
+ delete_key: no
+
+- name: Remove entry 'hello' from registry path MyCompany
+ win_regedit:
+ path: HKCU:\Software\MyCompany
+ name: hello
+ state: absent
+
+- name: Change default mouse trailing settings for new users
+ win_regedit:
+ path: HKLM:\ANSIBLE\Control Panel\Mouse
+ name: MouseTrails
+ data: 10
+ type: str
+ state: present
+ hive: C:\Users\Default\NTUSER.dat
+'''
+
+RETURN = r'''
+data_changed:
+ description: Whether this invocation changed the data in the registry value.
+ returned: success
+ type: bool
+ sample: false
+data_type_changed:
+ description: Whether this invocation changed the datatype of the registry value.
+ returned: success
+ type: bool
+ sample: true
+'''
diff --git a/test/support/windows-integration/plugins/modules/win_security_policy.ps1 b/test/support/windows-integration/plugins/modules/win_security_policy.ps1
new file mode 100644
index 00000000..274204b6
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_security_policy.ps1
@@ -0,0 +1,196 @@
+#!powershell
+
+# Copyright: (c) 2017, Jordan Borean <jborean93@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+
+$ErrorActionPreference = 'Stop'
+
+$params = Parse-Args $args -supports_check_mode $true
+$check_mode = Get-AnsibleParam -obj $params -name "_ansible_check_mode" -type "bool" -default $false
+$diff_mode = Get-AnsibleParam -obj $Params -name "_ansible_diff" -type "bool" -default $false
+
+$section = Get-AnsibleParam -obj $params -name "section" -type "str" -failifempty $true
+$key = Get-AnsibleParam -obj $params -name "key" -type "str" -failifempty $true
+$value = Get-AnsibleParam -obj $params -name "value" -failifempty $true
+
+$result = @{
+ changed = $false
+ section = $section
+ key = $key
+ value = $value
+}
+
+if ($diff_mode) {
+ $result.diff = @{}
+}
+
+Function Run-SecEdit($arguments) {
+ $stdout = $null
+ $stderr = $null
+ $log_path = [IO.Path]::GetTempFileName()
+ $arguments = $arguments + @("/log", $log_path)
+
+ try {
+ $stdout = &SecEdit.exe $arguments | Out-String
+ } catch {
+ $stderr = $_.Exception.Message
+ }
+ $log = Get-Content -Path $log_path
+ Remove-Item -Path $log_path -Force
+
+ $return = @{
+ log = ($log -join "`n").Trim()
+ stdout = $stdout
+ stderr = $stderr
+ rc = $LASTEXITCODE
+ }
+
+ return $return
+}
+
+Function Export-SecEdit() {
+ $secedit_ini_path = [IO.Path]::GetTempFileName()
+ # while this will technically make a change to the system in check mode by
+ # creating a new file, we need these values to be able to do anything
+ # substantial in check mode
+ $export_result = Run-SecEdit -arguments @("/export", "/cfg", $secedit_ini_path, "/quiet")
+
+ # check the return code and if the file has been populated, otherwise error out
+ if (($export_result.rc -ne 0) -or ((Get-Item -Path $secedit_ini_path).Length -eq 0)) {
+ Remove-Item -Path $secedit_ini_path -Force
+ $result.rc = $export_result.rc
+ $result.stdout = $export_result.stdout
+ $result.stderr = $export_result.stderr
+ Fail-Json $result "Failed to export secedit.ini file to $($secedit_ini_path)"
+ }
+ $secedit_ini = ConvertFrom-Ini -file_path $secedit_ini_path
+
+ return $secedit_ini
+}
+
+Function Import-SecEdit($ini) {
+ $secedit_ini_path = [IO.Path]::GetTempFileName()
+ $secedit_db_path = [IO.Path]::GetTempFileName()
+ Remove-Item -Path $secedit_db_path -Force # needs to be deleted for SecEdit.exe /import to work
+
+ $ini_contents = ConvertTo-Ini -ini $ini
+ Set-Content -Path $secedit_ini_path -Value $ini_contents
+ $result.changed = $true
+
+ $import_result = Run-SecEdit -arguments @("/configure", "/db", $secedit_db_path, "/cfg", $secedit_ini_path, "/quiet")
+ $result.import_log = $import_result.log
+ Remove-Item -Path $secedit_ini_path -Force
+ if ($import_result.rc -ne 0) {
+ $result.rc = $import_result.rc
+ $result.stdout = $import_result.stdout
+ $result.stderr = $import_result.stderr
+ Fail-Json $result "Failed to import secedit.ini file from $($secedit_ini_path)"
+ }
+}
+
+Function ConvertTo-Ini($ini) {
+ $content = @()
+ foreach ($key in $ini.GetEnumerator()) {
+ $section = $key.Name
+ $values = $key.Value
+
+ $content += "[$section]"
+ foreach ($value in $values.GetEnumerator()) {
+ $value_key = $value.Name
+ $value_value = $value.Value
+
+ if ($null -ne $value_value) {
+ $content += "$value_key = $value_value"
+ }
+ }
+ }
+
+ return $content -join "`r`n"
+}
+
+Function ConvertFrom-Ini($file_path) {
+ $ini = @{}
+ switch -Regex -File $file_path {
+ "^\[(.+)\]" {
+ $section = $matches[1]
+ $ini.$section = @{}
+ }
+ "(.+?)\s*=(.*)" {
+ $name = $matches[1].Trim()
+ $value = $matches[2].Trim()
+ if ($value -match "^\d+$") {
+ $value = [int]$value
+ } elseif ($value.StartsWith('"') -and $value.EndsWith('"')) {
+ $value = $value.Substring(1, $value.Length - 2)
+ }
+
+ $ini.$section.$name = $value
+ }
+ }
+
+ return $ini
+}
+
+if ($section -eq "Privilege Rights") {
+ Add-Warning -obj $result -message "Using this module to edit rights and privileges is error-prone, use the win_user_right module instead"
+}
+
+$will_change = $false
+$secedit_ini = Export-SecEdit
+if (-not ($secedit_ini.ContainsKey($section))) {
+ Fail-Json $result "The section '$section' does not exist in SecEdit.exe output ini"
+}
+
+if ($secedit_ini.$section.ContainsKey($key)) {
+ $current_value = $secedit_ini.$section.$key
+
+ if ($current_value -cne $value) {
+ if ($diff_mode) {
+ $result.diff.prepared = @"
+[$section]
+-$key = $current_value
++$key = $value
+"@
+ }
+
+ $secedit_ini.$section.$key = $value
+ $will_change = $true
+ }
+} elseif ([string]$value -eq "") {
+ # Value is requested to be removed, and has already been removed, do nothing
+} else {
+ if ($diff_mode) {
+ $result.diff.prepared = @"
+[$section]
++$key = $value
+"@
+ }
+ $secedit_ini.$section.$key = $value
+ $will_change = $true
+}
+
+if ($will_change -eq $true) {
+ $result.changed = $true
+ if (-not $check_mode) {
+ Import-SecEdit -ini $secedit_ini
+
+ # secedit doesn't error out on improper entries, re-export and verify
+ # the changes occurred
+ $verification_ini = Export-SecEdit
+ $new_section_values = $verification_ini.$section
+ if ($new_section_values.ContainsKey($key)) {
+ $new_value = $new_section_values.$key
+ if ($new_value -cne $value) {
+ Fail-Json $result "Failed to change the value for key '$key' in section '$section', the value is still $new_value"
+ }
+ } elseif ([string]$value -eq "") {
+ # Value was empty, so OK if no longer in the result
+ } else {
+ Fail-Json $result "The key '$key' in section '$section' is not a valid key, cannot set this value"
+ }
+ }
+}
+
+Exit-Json $result
diff --git a/test/support/windows-integration/plugins/modules/win_security_policy.py b/test/support/windows-integration/plugins/modules/win_security_policy.py
new file mode 100644
index 00000000..d582a532
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_security_policy.py
@@ -0,0 +1,126 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# this is a windows documentation stub, actual code lives in the .ps1
+# file of the same name
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: win_security_policy
+version_added: '2.4'
+short_description: Change local security policy settings
+description:
+- Allows you to set the local security policies that are configured by
+ SecEdit.exe.
+options:
+ section:
+ description:
+ - The ini section the key exists in.
+ - If the section does not exist then the module will return an error.
+ - Example sections to use are 'Account Policies', 'Local Policies',
+ 'Event Log', 'Restricted Groups', 'System Services', 'Registry' and
+ 'File System'
+ - If wanting to edit the C(Privilege Rights) section, use the
+ M(win_user_right) module instead.
+ type: str
+ required: yes
+ key:
+ description:
+ - The ini key of the section or policy name to modify.
+ - The module will return an error if this key is invalid.
+ type: str
+ required: yes
+ value:
+ description:
+ - The value for the ini key or policy name.
+ - If the key takes in a boolean value then 0 = False and 1 = True.
+ type: str
+ required: yes
+notes:
+- This module uses the SecEdit.exe tool to configure the values, more details
+ of the areas and keys that can be configured can be found here
+ U(https://msdn.microsoft.com/en-us/library/bb742512.aspx).
+- If you are in a domain environment these policies may be set by a GPO policy,
+ this module can temporarily change these values but the GPO will override
+ it if the value differs.
+- You can also run C(SecEdit.exe /export /cfg C:\temp\output.ini) to view the
+ current policies set on your system.
+- When assigning user rights, use the M(win_user_right) module instead.
+seealso:
+- module: win_user_right
+author:
+- Jordan Borean (@jborean93)
+'''
+
+EXAMPLES = r'''
+- name: Change the guest account name
+ win_security_policy:
+ section: System Access
+ key: NewGuestName
+ value: Guest Account
+
+- name: Set the maximum password age
+ win_security_policy:
+ section: System Access
+ key: MaximumPasswordAge
+ value: 15
+
+- name: Do not store passwords using reversible encryption
+ win_security_policy:
+ section: System Access
+ key: ClearTextPassword
+ value: 0
+
+- name: Enable system events
+ win_security_policy:
+ section: Event Audit
+ key: AuditSystemEvents
+ value: 1
+'''
+
+RETURN = r'''
+rc:
+ description: The return code after a failure when running SecEdit.exe.
+ returned: failure with secedit calls
+ type: int
+ sample: -1
+stdout:
+ description: The output of the STDOUT buffer after a failure when running
+ SecEdit.exe.
+ returned: failure with secedit calls
+ type: str
+ sample: check log for error details
+stderr:
+ description: The output of the STDERR buffer after a failure when running
+ SecEdit.exe.
+ returned: failure with secedit calls
+ type: str
+ sample: failed to import security policy
+import_log:
+ description: The log of the SecEdit.exe /configure job that configured the
+ local policies. This is used for debugging purposes on failures.
+ returned: secedit.exe /import run and change occurred
+ type: str
+ sample: Completed 6 percent (0/15) \tProcess Privilege Rights area.
+key:
+ description: The key in the section passed to the module to modify.
+ returned: success
+ type: str
+ sample: NewGuestName
+section:
+ description: The section passed to the module to modify.
+ returned: success
+ type: str
+ sample: System Access
+value:
+ description: The value passed to the module to modify to.
+ returned: success
+ type: str
+ sample: Guest Account
+'''
diff --git a/test/support/windows-integration/plugins/modules/win_shell.ps1 b/test/support/windows-integration/plugins/modules/win_shell.ps1
new file mode 100644
index 00000000..54aef8de
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_shell.ps1
@@ -0,0 +1,138 @@
+#!powershell
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+#Requires -Module Ansible.ModuleUtils.CommandUtil
+#Requires -Module Ansible.ModuleUtils.FileUtil
+
+# TODO: add check mode support
+
+Set-StrictMode -Version 2
+$ErrorActionPreference = "Stop"
+
+# Cleanse CLIXML from stderr (sift out error stream data, discard others for now)
+Function Cleanse-Stderr($raw_stderr) {
+ Try {
+ # NB: this regex isn't perfect, but is decent at finding CLIXML amongst other stderr noise
+ If($raw_stderr -match "(?s)(?<prenoise1>.*)#< CLIXML(?<prenoise2>.*)(?<clixml><Objs.+</Objs>)(?<postnoise>.*)") {
+ $clixml = [xml]$matches["clixml"]
+
+ $merged_stderr = "{0}{1}{2}{3}" -f @(
+ $matches["prenoise1"],
+ $matches["prenoise2"],
+ # filter out just the Error-tagged strings for now, and zap embedded CRLF chars
+ ($clixml.Objs.ChildNodes | Where-Object { $_.Name -eq 'S' } | Where-Object { $_.S -eq 'Error' } | ForEach-Object { $_.'#text'.Replace('_x000D__x000A_','') } | Out-String),
+ $matches["postnoise"]) | Out-String
+
+ return $merged_stderr.Trim()
+
+ # FUTURE: parse/return other streams
+ }
+ Else {
+ $raw_stderr
+ }
+ }
+ Catch {
+ "***EXCEPTION PARSING CLIXML: $_***" + $raw_stderr
+ }
+}
+
+$params = Parse-Args $args -supports_check_mode $false
+
+$raw_command_line = Get-AnsibleParam -obj $params -name "_raw_params" -type "str" -failifempty $true
+$chdir = Get-AnsibleParam -obj $params -name "chdir" -type "path"
+$executable = Get-AnsibleParam -obj $params -name "executable" -type "path"
+$creates = Get-AnsibleParam -obj $params -name "creates" -type "path"
+$removes = Get-AnsibleParam -obj $params -name "removes" -type "path"
+$stdin = Get-AnsibleParam -obj $params -name "stdin" -type "str"
+$no_profile = Get-AnsibleParam -obj $params -name "no_profile" -type "bool" -default $false
+$output_encoding_override = Get-AnsibleParam -obj $params -name "output_encoding_override" -type "str"
+
+$raw_command_line = $raw_command_line.Trim()
+
+$result = @{
+ changed = $true
+ cmd = $raw_command_line
+}
+
+if ($creates -and $(Test-AnsiblePath -Path $creates)) {
+ Exit-Json @{msg="skipped, since $creates exists";cmd=$raw_command_line;changed=$false;skipped=$true;rc=0}
+}
+
+if ($removes -and -not $(Test-AnsiblePath -Path $removes)) {
+ Exit-Json @{msg="skipped, since $removes does not exist";cmd=$raw_command_line;changed=$false;skipped=$true;rc=0}
+}
+
+$exec_args = $null
+If(-not $executable -or $executable -eq "powershell") {
+ $exec_application = "powershell.exe"
+
+ # force input encoding to preamble-free UTF8 so PS sub-processes (eg, Start-Job) don't blow up
+ $raw_command_line = "[Console]::InputEncoding = New-Object Text.UTF8Encoding `$false; " + $raw_command_line
+
+ # Base64 encode the command so we don't have to worry about the various levels of escaping
+ $encoded_command = [Convert]::ToBase64String([System.Text.Encoding]::Unicode.GetBytes($raw_command_line))
+
+ if ($stdin) {
+ $exec_args = "-encodedcommand $encoded_command"
+ } else {
+ $exec_args = "-noninteractive -encodedcommand $encoded_command"
+ }
+
+ if ($no_profile) {
+ $exec_args = "-noprofile $exec_args"
+ }
+}
+Else {
+ # FUTURE: support arg translation from executable (or executable_args?) to process arguments for arbitrary interpreter?
+ $exec_application = $executable
+ if (-not ($exec_application.EndsWith(".exe"))) {
+ $exec_application = "$($exec_application).exe"
+ }
+ $exec_args = "/c $raw_command_line"
+}
+
+$command = "`"$exec_application`" $exec_args"
+$run_command_arg = @{
+ command = $command
+}
+if ($chdir) {
+ $run_command_arg['working_directory'] = $chdir
+}
+if ($stdin) {
+ $run_command_arg['stdin'] = $stdin
+}
+if ($output_encoding_override) {
+ $run_command_arg['output_encoding_override'] = $output_encoding_override
+}
+
+$start_datetime = [DateTime]::UtcNow
+try {
+ $command_result = Run-Command @run_command_arg
+} catch {
+ $result.changed = $false
+ try {
+ $result.rc = $_.Exception.NativeErrorCode
+ } catch {
+ $result.rc = 2
+ }
+ Fail-Json -obj $result -message $_.Exception.Message
+}
+
+# TODO: decode CLIXML stderr output (and other streams?)
+$result.stdout = $command_result.stdout
+$result.stderr = Cleanse-Stderr $command_result.stderr
+$result.rc = $command_result.rc
+
+$end_datetime = [DateTime]::UtcNow
+$result.start = $start_datetime.ToString("yyyy-MM-dd hh:mm:ss.ffffff")
+$result.end = $end_datetime.ToString("yyyy-MM-dd hh:mm:ss.ffffff")
+$result.delta = $($end_datetime - $start_datetime).ToString("h\:mm\:ss\.ffffff")
+
+If ($result.rc -ne 0) {
+ Fail-Json -obj $result -message "non-zero return code"
+}
+
+Exit-Json $result
diff --git a/test/support/windows-integration/plugins/modules/win_shell.py b/test/support/windows-integration/plugins/modules/win_shell.py
new file mode 100644
index 00000000..ee2cd762
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_shell.py
@@ -0,0 +1,167 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Ansible, inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = r'''
+---
+module: win_shell
+short_description: Execute shell commands on target hosts
+version_added: 2.2
+description:
+ - The C(win_shell) module takes the command name followed by a list of space-delimited arguments.
+ It is similar to the M(win_command) module, but runs
+ the command via a shell (defaults to PowerShell) on the target host.
+ - For non-Windows targets, use the M(shell) module instead.
+options:
+ free_form:
+ description:
+ - The C(win_shell) module takes a free form command to run.
+ - There is no parameter actually named 'free form'. See the examples!
+ type: str
+ required: yes
+ creates:
+ description:
+ - A path or path filter pattern; when the referenced path exists on the target host, the task will be skipped.
+ type: path
+ removes:
+ description:
+ - A path or path filter pattern; when the referenced path B(does not) exist on the target host, the task will be skipped.
+ type: path
+ chdir:
+ description:
+ - Set the specified path as the current working directory before executing a command
+ type: path
+ executable:
+ description:
+ - Change the shell used to execute the command (eg, C(cmd)).
+ - The target shell must accept a C(/c) parameter followed by the raw command line to be executed.
+ type: path
+ stdin:
+ description:
+ - Set the stdin of the command directly to the specified value.
+ type: str
+ version_added: '2.5'
+ no_profile:
+ description:
+ - Do not load the user profile before running a command. This is only valid
+ when using PowerShell as the executable.
+ type: bool
+ default: no
+ version_added: '2.8'
+ output_encoding_override:
+ description:
+ - This option overrides the encoding of stdout/stderr output.
+ - You can use this option when you need to run a command which ignore the console's codepage.
+ - You should only need to use this option in very rare circumstances.
+ - This value can be any valid encoding C(Name) based on the output of C([System.Text.Encoding]::GetEncodings()).
+ See U(https://docs.microsoft.com/dotnet/api/system.text.encoding.getencodings).
+ type: str
+ version_added: '2.10'
+notes:
+ - If you want to run an executable securely and predictably, it may be
+ better to use the M(win_command) module instead. Best practices when writing
+ playbooks will follow the trend of using M(win_command) unless C(win_shell) is
+ explicitly required. When running ad-hoc commands, use your best judgement.
+ - WinRM will not return from a command execution until all child processes created have exited.
+ Thus, it is not possible to use C(win_shell) to spawn long-running child or background processes.
+ Consider creating a Windows service for managing background processes.
+seealso:
+- module: psexec
+- module: raw
+- module: script
+- module: shell
+- module: win_command
+- module: win_psexec
+author:
+ - Matt Davis (@nitzmahone)
+'''
+
+EXAMPLES = r'''
+# Execute a command in the remote shell; stdout goes to the specified
+# file on the remote.
+- win_shell: C:\somescript.ps1 >> C:\somelog.txt
+
+# Change the working directory to somedir/ before executing the command.
+- win_shell: C:\somescript.ps1 >> C:\somelog.txt chdir=C:\somedir
+
+# You can also use the 'args' form to provide the options. This command
+# will change the working directory to somedir/ and will only run when
+# somedir/somelog.txt doesn't exist.
+- win_shell: C:\somescript.ps1 >> C:\somelog.txt
+ args:
+ chdir: C:\somedir
+ creates: C:\somelog.txt
+
+# Run a command under a non-Powershell interpreter (cmd in this case)
+- win_shell: echo %HOMEDIR%
+ args:
+ executable: cmd
+ register: homedir_out
+
+- name: Run multi-lined shell commands
+ win_shell: |
+ $value = Test-Path -Path C:\temp
+ if ($value) {
+ Remove-Item -Path C:\temp -Force
+ }
+ New-Item -Path C:\temp -ItemType Directory
+
+- name: Retrieve the input based on stdin
+ win_shell: '$string = [Console]::In.ReadToEnd(); Write-Output $string.Trim()'
+ args:
+ stdin: Input message
+'''
+
+RETURN = r'''
+msg:
+ description: Changed.
+ returned: always
+ type: bool
+ sample: true
+start:
+ description: The command execution start time.
+ returned: always
+ type: str
+ sample: '2016-02-25 09:18:26.429568'
+end:
+ description: The command execution end time.
+ returned: always
+ type: str
+ sample: '2016-02-25 09:18:26.755339'
+delta:
+ description: The command execution delta time.
+ returned: always
+ type: str
+ sample: '0:00:00.325771'
+stdout:
+ description: The command standard output.
+ returned: always
+ type: str
+ sample: 'Clustering node rabbit@slave1 with rabbit@master ...'
+stderr:
+ description: The command standard error.
+ returned: always
+ type: str
+ sample: 'ls: cannot access foo: No such file or directory'
+cmd:
+ description: The command executed by the task.
+ returned: always
+ type: str
+ sample: 'rabbitmqctl join_cluster rabbit@master'
+rc:
+ description: The command return code (0 means success).
+ returned: always
+ type: int
+ sample: 0
+stdout_lines:
+ description: The command standard output split in lines.
+ returned: always
+ type: list
+ sample: [u'Clustering node rabbit@slave1 with rabbit@master ...']
+'''
diff --git a/test/support/windows-integration/plugins/modules/win_stat.ps1 b/test/support/windows-integration/plugins/modules/win_stat.ps1
new file mode 100644
index 00000000..071eb11c
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_stat.ps1
@@ -0,0 +1,186 @@
+#!powershell
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#AnsibleRequires -CSharpUtil Ansible.Basic
+#Requires -Module Ansible.ModuleUtils.FileUtil
+#Requires -Module Ansible.ModuleUtils.LinkUtil
+
+function ConvertTo-Timestamp($start_date, $end_date) {
+ if ($start_date -and $end_date) {
+ return (New-TimeSpan -Start $start_date -End $end_date).TotalSeconds
+ }
+}
+
+function Get-FileChecksum($path, $algorithm) {
+ switch ($algorithm) {
+ 'md5' { $sp = New-Object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider }
+ 'sha1' { $sp = New-Object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider }
+ 'sha256' { $sp = New-Object -TypeName System.Security.Cryptography.SHA256CryptoServiceProvider }
+ 'sha384' { $sp = New-Object -TypeName System.Security.Cryptography.SHA384CryptoServiceProvider }
+ 'sha512' { $sp = New-Object -TypeName System.Security.Cryptography.SHA512CryptoServiceProvider }
+ default { Fail-Json -obj $result -message "Unsupported hash algorithm supplied '$algorithm'" }
+ }
+
+ $fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read, [System.IO.FileShare]::ReadWrite)
+ try {
+ $hash = [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower()
+ } finally {
+ $fp.Dispose()
+ }
+
+ return $hash
+}
+
+function Get-FileInfo {
+ param([String]$Path, [Switch]$Follow)
+
+ $info = Get-AnsibleItem -Path $Path -ErrorAction SilentlyContinue
+ $link_info = $null
+ if ($null -ne $info) {
+ try {
+ $link_info = Get-Link -link_path $info.FullName
+ } catch {
+ $module.Warn("Failed to check/get link info for file: $($_.Exception.Message)")
+ }
+
+ # If follow=true we want to follow the link all the way back to root object
+ if ($Follow -and $null -ne $link_info -and $link_info.Type -in @("SymbolicLink", "JunctionPoint")) {
+ $info, $link_info = Get-FileInfo -Path $link_info.AbsolutePath -Follow
+ }
+ }
+
+ return $info, $link_info
+}
+
+$spec = @{
+ options = @{
+ path = @{ type='path'; required=$true; aliases=@( 'dest', 'name' ) }
+ get_checksum = @{ type='bool'; default=$true }
+ checksum_algorithm = @{ type='str'; default='sha1'; choices=@( 'md5', 'sha1', 'sha256', 'sha384', 'sha512' ) }
+ follow = @{ type='bool'; default=$false }
+ }
+ supports_check_mode = $true
+}
+
+$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec)
+
+$path = $module.Params.path
+$get_checksum = $module.Params.get_checksum
+$checksum_algorithm = $module.Params.checksum_algorithm
+$follow = $module.Params.follow
+
+$module.Result.stat = @{ exists=$false }
+
+Load-LinkUtils
+$info, $link_info = Get-FileInfo -Path $path -Follow:$follow
+If ($null -ne $info) {
+ $epoch_date = Get-Date -Date "01/01/1970"
+ $attributes = @()
+ foreach ($attribute in ($info.Attributes -split ',')) {
+ $attributes += $attribute.Trim()
+ }
+
+ # default values that are always set, specific values are set below this
+ # but are kept commented for easier readability
+ $stat = @{
+ exists = $true
+ attributes = $info.Attributes.ToString()
+ isarchive = ($attributes -contains "Archive")
+ isdir = $false
+ ishidden = ($attributes -contains "Hidden")
+ isjunction = $false
+ islnk = $false
+ isreadonly = ($attributes -contains "ReadOnly")
+ isreg = $false
+ isshared = $false
+ nlink = 1 # Number of links to the file (hard links), overriden below if islnk
+ # lnk_target = islnk or isjunction Target of the symlink. Note that relative paths remain relative
+ # lnk_source = islnk os isjunction Target of the symlink normalized for the remote filesystem
+ hlnk_targets = @()
+ creationtime = (ConvertTo-Timestamp -start_date $epoch_date -end_date $info.CreationTime)
+ lastaccesstime = (ConvertTo-Timestamp -start_date $epoch_date -end_date $info.LastAccessTime)
+ lastwritetime = (ConvertTo-Timestamp -start_date $epoch_date -end_date $info.LastWriteTime)
+ # size = a file and directory - calculated below
+ path = $info.FullName
+ filename = $info.Name
+ # extension = a file
+ # owner = set outsite this dict in case it fails
+ # sharename = a directory and isshared is True
+ # checksum = a file and get_checksum: True
+ }
+ try {
+ $stat.owner = $info.GetAccessControl().Owner
+ } catch {
+ # may not have rights, historical behaviour was to just set to $null
+ # due to ErrorActionPreference being set to "Continue"
+ $stat.owner = $null
+ }
+
+ # values that are set according to the type of file
+ if ($info.Attributes.HasFlag([System.IO.FileAttributes]::Directory)) {
+ $stat.isdir = $true
+ $share_info = Get-CimInstance -ClassName Win32_Share -Filter "Path='$($stat.path -replace '\\', '\\')'"
+ if ($null -ne $share_info) {
+ $stat.isshared = $true
+ $stat.sharename = $share_info.Name
+ }
+
+ try {
+ $size = 0
+ foreach ($file in $info.EnumerateFiles("*", [System.IO.SearchOption]::AllDirectories)) {
+ $size += $file.Length
+ }
+ $stat.size = $size
+ } catch {
+ $stat.size = 0
+ }
+ } else {
+ $stat.extension = $info.Extension
+ $stat.isreg = $true
+ $stat.size = $info.Length
+
+ if ($get_checksum) {
+ try {
+ $stat.checksum = Get-FileChecksum -path $path -algorithm $checksum_algorithm
+ } catch {
+ $module.FailJson("Failed to get hash of file, set get_checksum to False to ignore this error: $($_.Exception.Message)", $_)
+ }
+ }
+ }
+
+ # Get symbolic link, junction point, hard link info
+ if ($null -ne $link_info) {
+ switch ($link_info.Type) {
+ "SymbolicLink" {
+ $stat.islnk = $true
+ $stat.isreg = $false
+ $stat.lnk_target = $link_info.TargetPath
+ $stat.lnk_source = $link_info.AbsolutePath
+ break
+ }
+ "JunctionPoint" {
+ $stat.isjunction = $true
+ $stat.isreg = $false
+ $stat.lnk_target = $link_info.TargetPath
+ $stat.lnk_source = $link_info.AbsolutePath
+ break
+ }
+ "HardLink" {
+ $stat.lnk_type = "hard"
+ $stat.nlink = $link_info.HardTargets.Count
+
+ # remove current path from the targets
+ $hlnk_targets = $link_info.HardTargets | Where-Object { $_ -ne $stat.path }
+ $stat.hlnk_targets = @($hlnk_targets)
+ break
+ }
+ }
+ }
+
+ $module.Result.stat = $stat
+}
+
+$module.ExitJson()
+
diff --git a/test/support/windows-integration/plugins/modules/win_stat.py b/test/support/windows-integration/plugins/modules/win_stat.py
new file mode 100644
index 00000000..0676b5b2
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_stat.py
@@ -0,0 +1,236 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = r'''
+---
+module: win_stat
+version_added: "1.7"
+short_description: Get information about Windows files
+description:
+ - Returns information about a Windows file.
+ - For non-Windows targets, use the M(stat) module instead.
+options:
+ path:
+ description:
+ - The full path of the file/object to get the facts of; both forward and
+ back slashes are accepted.
+ type: path
+ required: yes
+ aliases: [ dest, name ]
+ get_checksum:
+ description:
+ - Whether to return a checksum of the file (default sha1)
+ type: bool
+ default: yes
+ version_added: "2.1"
+ checksum_algorithm:
+ description:
+ - Algorithm to determine checksum of file.
+ - Will throw an error if the host is unable to use specified algorithm.
+ type: str
+ default: sha1
+ choices: [ md5, sha1, sha256, sha384, sha512 ]
+ version_added: "2.3"
+ follow:
+ description:
+ - Whether to follow symlinks or junction points.
+ - In the case of C(path) pointing to another link, then that will
+ be followed until no more links are found.
+ type: bool
+ default: no
+ version_added: "2.8"
+seealso:
+- module: stat
+- module: win_acl
+- module: win_file
+- module: win_owner
+author:
+- Chris Church (@cchurch)
+'''
+
+EXAMPLES = r'''
+- name: Obtain information about a file
+ win_stat:
+ path: C:\foo.ini
+ register: file_info
+
+- name: Obtain information about a folder
+ win_stat:
+ path: C:\bar
+ register: folder_info
+
+- name: Get MD5 checksum of a file
+ win_stat:
+ path: C:\foo.ini
+ get_checksum: yes
+ checksum_algorithm: md5
+ register: md5_checksum
+
+- debug:
+ var: md5_checksum.stat.checksum
+
+- name: Get SHA1 checksum of file
+ win_stat:
+ path: C:\foo.ini
+ get_checksum: yes
+ register: sha1_checksum
+
+- debug:
+ var: sha1_checksum.stat.checksum
+
+- name: Get SHA256 checksum of file
+ win_stat:
+ path: C:\foo.ini
+ get_checksum: yes
+ checksum_algorithm: sha256
+ register: sha256_checksum
+
+- debug:
+ var: sha256_checksum.stat.checksum
+'''
+
+RETURN = r'''
+changed:
+ description: Whether anything was changed
+ returned: always
+ type: bool
+ sample: true
+stat:
+ description: dictionary containing all the stat data
+ returned: success
+ type: complex
+ contains:
+ attributes:
+ description: Attributes of the file at path in raw form.
+ returned: success, path exists
+ type: str
+ sample: "Archive, Hidden"
+ checksum:
+ description: The checksum of a file based on checksum_algorithm specified.
+ returned: success, path exist, path is a file, get_checksum == True
+ checksum_algorithm specified is supported
+ type: str
+ sample: 09cb79e8fc7453c84a07f644e441fd81623b7f98
+ creationtime:
+ description: The create time of the file represented in seconds since epoch.
+ returned: success, path exists
+ type: float
+ sample: 1477984205.15
+ exists:
+ description: If the path exists or not.
+ returned: success
+ type: bool
+ sample: true
+ extension:
+ description: The extension of the file at path.
+ returned: success, path exists, path is a file
+ type: str
+ sample: ".ps1"
+ filename:
+ description: The name of the file (without path).
+ returned: success, path exists, path is a file
+ type: str
+ sample: foo.ini
+ hlnk_targets:
+ description: List of other files pointing to the same file (hard links), excludes the current file.
+ returned: success, path exists
+ type: list
+ sample:
+ - C:\temp\file.txt
+ - C:\Windows\update.log
+ isarchive:
+ description: If the path is ready for archiving or not.
+ returned: success, path exists
+ type: bool
+ sample: true
+ isdir:
+ description: If the path is a directory or not.
+ returned: success, path exists
+ type: bool
+ sample: true
+ ishidden:
+ description: If the path is hidden or not.
+ returned: success, path exists
+ type: bool
+ sample: true
+ isjunction:
+ description: If the path is a junction point or not.
+ returned: success, path exists
+ type: bool
+ sample: true
+ islnk:
+ description: If the path is a symbolic link or not.
+ returned: success, path exists
+ type: bool
+ sample: true
+ isreadonly:
+ description: If the path is read only or not.
+ returned: success, path exists
+ type: bool
+ sample: true
+ isreg:
+ description: If the path is a regular file.
+ returned: success, path exists
+ type: bool
+ sample: true
+ isshared:
+ description: If the path is shared or not.
+ returned: success, path exists
+ type: bool
+ sample: true
+ lastaccesstime:
+ description: The last access time of the file represented in seconds since epoch.
+ returned: success, path exists
+ type: float
+ sample: 1477984205.15
+ lastwritetime:
+ description: The last modification time of the file represented in seconds since epoch.
+ returned: success, path exists
+ type: float
+ sample: 1477984205.15
+ lnk_source:
+ description: Target of the symlink normalized for the remote filesystem.
+ returned: success, path exists and the path is a symbolic link or junction point
+ type: str
+ sample: C:\temp\link
+ lnk_target:
+ description: Target of the symlink. Note that relative paths remain relative.
+ returned: success, path exists and the path is a symbolic link or junction point
+ type: str
+ sample: ..\link
+ nlink:
+ description: Number of links to the file (hard links).
+ returned: success, path exists
+ type: int
+ sample: 1
+ owner:
+ description: The owner of the file.
+ returned: success, path exists
+ type: str
+ sample: BUILTIN\Administrators
+ path:
+ description: The full absolute path to the file.
+ returned: success, path exists, file exists
+ type: str
+ sample: C:\foo.ini
+ sharename:
+ description: The name of share if folder is shared.
+ returned: success, path exists, file is a directory and isshared == True
+ type: str
+ sample: file-share
+ size:
+ description: The size in bytes of a file or folder.
+ returned: success, path exists, file is not a link
+ type: int
+ sample: 1024
+'''
diff --git a/test/support/windows-integration/plugins/modules/win_tempfile.ps1 b/test/support/windows-integration/plugins/modules/win_tempfile.ps1
new file mode 100644
index 00000000..9a1a7174
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_tempfile.ps1
@@ -0,0 +1,72 @@
+#!powershell
+
+# Copyright: (c) 2017, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#AnsibleRequires -CSharpUtil Ansible.Basic
+
+Function New-TempFile {
+ Param ([string]$path, [string]$prefix, [string]$suffix, [string]$type, [bool]$checkmode)
+ $temppath = $null
+ $curerror = $null
+ $attempt = 0
+
+ # Since we don't know if the file already exists, we try 5 times with a random name
+ do {
+ $attempt += 1
+ $randomname = [System.IO.Path]::GetRandomFileName()
+ $temppath = (Join-Path -Path $path -ChildPath "$prefix$randomname$suffix")
+ Try {
+ $file = New-Item -Path $temppath -ItemType $type -WhatIf:$checkmode
+ # Makes sure we get the full absolute path of the created temp file and not a relative or DOS 8.3 dir
+ if (-not $checkmode) {
+ $temppath = $file.FullName
+ } else {
+ # Just rely on GetFulLpath for check mode
+ $temppath = [System.IO.Path]::GetFullPath($temppath)
+ }
+ } Catch {
+ $temppath = $null
+ $curerror = $_
+ }
+ } until (($null -ne $temppath) -or ($attempt -ge 5))
+
+ # If it fails 5 times, something is wrong and we have to report the details
+ if ($null -eq $temppath) {
+ $module.FailJson("No random temporary file worked in $attempt attempts. Error: $($curerror.Exception.Message)", $curerror)
+ }
+
+ return $temppath.ToString()
+}
+
+$spec = @{
+ options = @{
+ path = @{ type='path'; default='%TEMP%'; aliases=@( 'dest' ) }
+ state = @{ type='str'; default='file'; choices=@( 'directory', 'file') }
+ prefix = @{ type='str'; default='ansible.' }
+ suffix = @{ type='str' }
+ }
+ supports_check_mode = $true
+}
+
+$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec)
+
+$path = $module.Params.path
+$state = $module.Params.state
+$prefix = $module.Params.prefix
+$suffix = $module.Params.suffix
+
+# Expand environment variables on non-path types
+if ($null -ne $prefix) {
+ $prefix = [System.Environment]::ExpandEnvironmentVariables($prefix)
+}
+if ($null -ne $suffix) {
+ $suffix = [System.Environment]::ExpandEnvironmentVariables($suffix)
+}
+
+$module.Result.changed = $true
+$module.Result.state = $state
+
+$module.Result.path = New-TempFile -Path $path -Prefix $prefix -Suffix $suffix -Type $state -CheckMode $module.CheckMode
+
+$module.ExitJson()
diff --git a/test/support/windows-integration/plugins/modules/win_tempfile.py b/test/support/windows-integration/plugins/modules/win_tempfile.py
new file mode 100644
index 00000000..58dd6501
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_tempfile.py
@@ -0,0 +1,67 @@
+#!/usr/bin/python
+# coding: utf-8 -*-
+
+# Copyright: (c) 2017, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: win_tempfile
+version_added: "2.3"
+short_description: Creates temporary files and directories
+description:
+ - Creates temporary files and directories.
+ - For non-Windows targets, please use the M(tempfile) module instead.
+options:
+ state:
+ description:
+ - Whether to create file or directory.
+ type: str
+ choices: [ directory, file ]
+ default: file
+ path:
+ description:
+ - Location where temporary file or directory should be created.
+ - If path is not specified default system temporary directory (%TEMP%) will be used.
+ type: path
+ default: '%TEMP%'
+ aliases: [ dest ]
+ prefix:
+ description:
+ - Prefix of file/directory name created by module.
+ type: str
+ default: ansible.
+ suffix:
+ description:
+ - Suffix of file/directory name created by module.
+ type: str
+ default: ''
+seealso:
+- module: tempfile
+author:
+- Dag Wieers (@dagwieers)
+'''
+
+EXAMPLES = r"""
+- name: Create temporary build directory
+ win_tempfile:
+ state: directory
+ suffix: build
+
+- name: Create temporary file
+ win_tempfile:
+ state: file
+ suffix: temp
+"""
+
+RETURN = r'''
+path:
+ description: The absolute path to the created file or directory.
+ returned: success
+ type: str
+ sample: C:\Users\Administrator\AppData\Local\Temp\ansible.bMlvdk
+'''
diff --git a/test/support/windows-integration/plugins/modules/win_template.py b/test/support/windows-integration/plugins/modules/win_template.py
new file mode 100644
index 00000000..bd8b2492
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_template.py
@@ -0,0 +1,66 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# this is a virtual module that is entirely implemented server side
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = r'''
+---
+module: win_template
+version_added: "1.9.2"
+short_description: Template a file out to a remote server
+options:
+ backup:
+ description:
+ - Determine whether a backup should be created.
+ - When set to C(yes), create a backup file including the timestamp information
+ so you can get the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+ version_added: '2.8'
+ newline_sequence:
+ default: '\r\n'
+ force:
+ version_added: '2.4'
+notes:
+- Beware fetching files from windows machines when creating templates because certain tools, such as Powershell ISE,
+ and regedit's export facility add a Byte Order Mark as the first character of the file, which can cause tracebacks.
+- You can use the M(win_copy) module with the C(content:) option if you prefer the template inline, as part of the
+ playbook.
+- For Linux you can use M(template) which uses '\\n' as C(newline_sequence) by default.
+seealso:
+- module: win_copy
+- module: copy
+- module: template
+author:
+- Jon Hawkesworth (@jhawkesworth)
+extends_documentation_fragment:
+- template_common
+'''
+
+EXAMPLES = r'''
+- name: Create a file from a Jinja2 template
+ win_template:
+ src: /mytemplates/file.conf.j2
+ dest: C:\Temp\file.conf
+
+- name: Create a Unix-style file from a Jinja2 template
+ win_template:
+ src: unix/config.conf.j2
+ dest: C:\share\unix\config.conf
+ newline_sequence: '\n'
+ backup: yes
+'''
+
+RETURN = r'''
+backup_file:
+ description: Name of the backup file that was created.
+ returned: if backup=yes
+ type: str
+ sample: C:\Path\To\File.txt.11540.20150212-220915.bak
+'''
diff --git a/test/support/windows-integration/plugins/modules/win_user.ps1 b/test/support/windows-integration/plugins/modules/win_user.ps1
new file mode 100644
index 00000000..54905cb2
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_user.ps1
@@ -0,0 +1,273 @@
+#!powershell
+
+# Copyright: (c) 2014, Paul Durivage <paul.durivage@rackspace.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#AnsibleRequires -CSharpUtil Ansible.AccessToken
+#Requires -Module Ansible.ModuleUtils.Legacy
+
+########
+$ADS_UF_PASSWD_CANT_CHANGE = 64
+$ADS_UF_DONT_EXPIRE_PASSWD = 65536
+
+$adsi = [ADSI]"WinNT://$env:COMPUTERNAME"
+
+function Get-User($user) {
+ $adsi.Children | Where-Object {$_.SchemaClassName -eq 'user' -and $_.Name -eq $user }
+ return
+}
+
+function Get-UserFlag($user, $flag) {
+ If ($user.UserFlags[0] -band $flag) {
+ $true
+ }
+ Else {
+ $false
+ }
+}
+
+function Set-UserFlag($user, $flag) {
+ $user.UserFlags = ($user.UserFlags[0] -BOR $flag)
+}
+
+function Clear-UserFlag($user, $flag) {
+ $user.UserFlags = ($user.UserFlags[0] -BXOR $flag)
+}
+
+function Get-Group($grp) {
+ $adsi.Children | Where-Object { $_.SchemaClassName -eq 'Group' -and $_.Name -eq $grp }
+ return
+}
+
+Function Test-LocalCredential {
+ param([String]$Username, [String]$Password)
+
+ try {
+ $handle = [Ansible.AccessToken.TokenUtil]::LogonUser($Username, $null, $Password, "Network", "Default")
+ $handle.Dispose()
+ $valid_credentials = $true
+ } catch [Ansible.AccessToken.Win32Exception] {
+ # following errors indicate the creds are correct but the user was
+ # unable to log on for other reasons, which we don't care about
+ $success_codes = @(
+ 0x0000052F, # ERROR_ACCOUNT_RESTRICTION
+ 0x00000530, # ERROR_INVALID_LOGON_HOURS
+ 0x00000531, # ERROR_INVALID_WORKSTATION
+ 0x00000569 # ERROR_LOGON_TYPE_GRANTED
+ )
+
+ if ($_.Exception.NativeErrorCode -eq 0x0000052E) {
+ # ERROR_LOGON_FAILURE - the user or pass was incorrect
+ $valid_credentials = $false
+ } elseif ($_.Exception.NativeErrorCode -in $success_codes) {
+ $valid_credentials = $true
+ } else {
+ # an unknown failure, reraise exception
+ throw $_
+ }
+ }
+ return $valid_credentials
+}
+
+########
+
+$params = Parse-Args $args;
+
+$result = @{
+ changed = $false
+};
+
+$username = Get-AnsibleParam -obj $params -name "name" -type "str" -failifempty $true
+$fullname = Get-AnsibleParam -obj $params -name "fullname" -type "str"
+$description = Get-AnsibleParam -obj $params -name "description" -type "str"
+$password = Get-AnsibleParam -obj $params -name "password" -type "str"
+$state = Get-AnsibleParam -obj $params -name "state" -type "str" -default "present" -validateset "present","absent","query"
+$update_password = Get-AnsibleParam -obj $params -name "update_password" -type "str" -default "always" -validateset "always","on_create"
+$password_expired = Get-AnsibleParam -obj $params -name "password_expired" -type "bool"
+$password_never_expires = Get-AnsibleParam -obj $params -name "password_never_expires" -type "bool"
+$user_cannot_change_password = Get-AnsibleParam -obj $params -name "user_cannot_change_password" -type "bool"
+$account_disabled = Get-AnsibleParam -obj $params -name "account_disabled" -type "bool"
+$account_locked = Get-AnsibleParam -obj $params -name "account_locked" -type "bool"
+$groups = Get-AnsibleParam -obj $params -name "groups"
+$groups_action = Get-AnsibleParam -obj $params -name "groups_action" -type "str" -default "replace" -validateset "add","remove","replace"
+
+If ($null -ne $account_locked -and $account_locked) {
+ Fail-Json $result "account_locked must be set to 'no' if provided"
+}
+
+If ($null -ne $groups) {
+ If ($groups -is [System.String]) {
+ [string[]]$groups = $groups.Split(",")
+ }
+ ElseIf ($groups -isnot [System.Collections.IList]) {
+ Fail-Json $result "groups must be a string or array"
+ }
+ $groups = $groups | ForEach-Object { ([string]$_).Trim() } | Where-Object { $_ }
+ If ($null -eq $groups) {
+ $groups = @()
+ }
+}
+
+$user_obj = Get-User $username
+
+If ($state -eq 'present') {
+ # Add or update user
+ try {
+ If (-not $user_obj) {
+ $user_obj = $adsi.Create("User", $username)
+ If ($null -ne $password) {
+ $user_obj.SetPassword($password)
+ }
+ $user_obj.SetInfo()
+ $result.changed = $true
+ }
+ ElseIf (($null -ne $password) -and ($update_password -eq 'always')) {
+ # ValidateCredentials will fail if either of these are true- just force update...
+ If($user_obj.AccountDisabled -or $user_obj.PasswordExpired) {
+ $password_match = $false
+ }
+ Else {
+ try {
+ $password_match = Test-LocalCredential -Username $username -Password $password
+ } catch [System.ComponentModel.Win32Exception] {
+ Fail-Json -obj $result -message "Failed to validate the user's credentials: $($_.Exception.Message)"
+ }
+ }
+
+ If (-not $password_match) {
+ $user_obj.SetPassword($password)
+ $result.changed = $true
+ }
+ }
+ If (($null -ne $fullname) -and ($fullname -ne $user_obj.FullName[0])) {
+ $user_obj.FullName = $fullname
+ $result.changed = $true
+ }
+ If (($null -ne $description) -and ($description -ne $user_obj.Description[0])) {
+ $user_obj.Description = $description
+ $result.changed = $true
+ }
+ If (($null -ne $password_expired) -and ($password_expired -ne ($user_obj.PasswordExpired | ConvertTo-Bool))) {
+ $user_obj.PasswordExpired = If ($password_expired) { 1 } Else { 0 }
+ $result.changed = $true
+ }
+ If (($null -ne $password_never_expires) -and ($password_never_expires -ne (Get-UserFlag $user_obj $ADS_UF_DONT_EXPIRE_PASSWD))) {
+ If ($password_never_expires) {
+ Set-UserFlag $user_obj $ADS_UF_DONT_EXPIRE_PASSWD
+ }
+ Else {
+ Clear-UserFlag $user_obj $ADS_UF_DONT_EXPIRE_PASSWD
+ }
+ $result.changed = $true
+ }
+ If (($null -ne $user_cannot_change_password) -and ($user_cannot_change_password -ne (Get-UserFlag $user_obj $ADS_UF_PASSWD_CANT_CHANGE))) {
+ If ($user_cannot_change_password) {
+ Set-UserFlag $user_obj $ADS_UF_PASSWD_CANT_CHANGE
+ }
+ Else {
+ Clear-UserFlag $user_obj $ADS_UF_PASSWD_CANT_CHANGE
+ }
+ $result.changed = $true
+ }
+ If (($null -ne $account_disabled) -and ($account_disabled -ne $user_obj.AccountDisabled)) {
+ $user_obj.AccountDisabled = $account_disabled
+ $result.changed = $true
+ }
+ If (($null -ne $account_locked) -and ($account_locked -ne $user_obj.IsAccountLocked)) {
+ $user_obj.IsAccountLocked = $account_locked
+ $result.changed = $true
+ }
+ If ($result.changed) {
+ $user_obj.SetInfo()
+ }
+ If ($null -ne $groups) {
+ [string[]]$current_groups = $user_obj.Groups() | ForEach-Object { $_.GetType().InvokeMember("Name", "GetProperty", $null, $_, $null) }
+ If (($groups_action -eq "remove") -or ($groups_action -eq "replace")) {
+ ForEach ($grp in $current_groups) {
+ If ((($groups_action -eq "remove") -and ($groups -contains $grp)) -or (($groups_action -eq "replace") -and ($groups -notcontains $grp))) {
+ $group_obj = Get-Group $grp
+ If ($group_obj) {
+ $group_obj.Remove($user_obj.Path)
+ $result.changed = $true
+ }
+ Else {
+ Fail-Json $result "group '$grp' not found"
+ }
+ }
+ }
+ }
+ If (($groups_action -eq "add") -or ($groups_action -eq "replace")) {
+ ForEach ($grp in $groups) {
+ If ($current_groups -notcontains $grp) {
+ $group_obj = Get-Group $grp
+ If ($group_obj) {
+ $group_obj.Add($user_obj.Path)
+ $result.changed = $true
+ }
+ Else {
+ Fail-Json $result "group '$grp' not found"
+ }
+ }
+ }
+ }
+ }
+ }
+ catch {
+ Fail-Json $result $_.Exception.Message
+ }
+}
+ElseIf ($state -eq 'absent') {
+ # Remove user
+ try {
+ If ($user_obj) {
+ $username = $user_obj.Name.Value
+ $adsi.delete("User", $user_obj.Name.Value)
+ $result.changed = $true
+ $result.msg = "User '$username' deleted successfully"
+ $user_obj = $null
+ } else {
+ $result.msg = "User '$username' was not found"
+ }
+ }
+ catch {
+ Fail-Json $result $_.Exception.Message
+ }
+}
+
+try {
+ If ($user_obj -and $user_obj -is [System.DirectoryServices.DirectoryEntry]) {
+ $user_obj.RefreshCache()
+ $result.name = $user_obj.Name[0]
+ $result.fullname = $user_obj.FullName[0]
+ $result.path = $user_obj.Path
+ $result.description = $user_obj.Description[0]
+ $result.password_expired = ($user_obj.PasswordExpired | ConvertTo-Bool)
+ $result.password_never_expires = (Get-UserFlag $user_obj $ADS_UF_DONT_EXPIRE_PASSWD)
+ $result.user_cannot_change_password = (Get-UserFlag $user_obj $ADS_UF_PASSWD_CANT_CHANGE)
+ $result.account_disabled = $user_obj.AccountDisabled
+ $result.account_locked = $user_obj.IsAccountLocked
+ $result.sid = (New-Object System.Security.Principal.SecurityIdentifier($user_obj.ObjectSid.Value, 0)).Value
+ $user_groups = @()
+ ForEach ($grp in $user_obj.Groups()) {
+ $group_result = @{
+ name = $grp.GetType().InvokeMember("Name", "GetProperty", $null, $grp, $null)
+ path = $grp.GetType().InvokeMember("ADsPath", "GetProperty", $null, $grp, $null)
+ }
+ $user_groups += $group_result;
+ }
+ $result.groups = $user_groups
+ $result.state = "present"
+ }
+ Else {
+ $result.name = $username
+ if ($state -eq 'query') {
+ $result.msg = "User '$username' was not found"
+ }
+ $result.state = "absent"
+ }
+}
+catch {
+ Fail-Json $result $_.Exception.Message
+}
+
+Exit-Json $result
diff --git a/test/support/windows-integration/plugins/modules/win_user.py b/test/support/windows-integration/plugins/modules/win_user.py
new file mode 100644
index 00000000..5fc0633d
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_user.py
@@ -0,0 +1,194 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Matt Martz <matt@sivel.net>, and others
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = r'''
+---
+module: win_user
+version_added: "1.7"
+short_description: Manages local Windows user accounts
+description:
+ - Manages local Windows user accounts.
+ - For non-Windows targets, use the M(user) module instead.
+options:
+ name:
+ description:
+ - Name of the user to create, remove or modify.
+ type: str
+ required: yes
+ fullname:
+ description:
+ - Full name of the user.
+ type: str
+ version_added: "1.9"
+ description:
+ description:
+ - Description of the user.
+ type: str
+ version_added: "1.9"
+ password:
+ description:
+ - Optionally set the user's password to this (plain text) value.
+ type: str
+ update_password:
+ description:
+ - C(always) will update passwords if they differ. C(on_create) will
+ only set the password for newly created users.
+ type: str
+ choices: [ always, on_create ]
+ default: always
+ version_added: "1.9"
+ password_expired:
+ description:
+ - C(yes) will require the user to change their password at next login.
+ - C(no) will clear the expired password flag.
+ type: bool
+ version_added: "1.9"
+ password_never_expires:
+ description:
+ - C(yes) will set the password to never expire.
+ - C(no) will allow the password to expire.
+ type: bool
+ version_added: "1.9"
+ user_cannot_change_password:
+ description:
+ - C(yes) will prevent the user from changing their password.
+ - C(no) will allow the user to change their password.
+ type: bool
+ version_added: "1.9"
+ account_disabled:
+ description:
+ - C(yes) will disable the user account.
+ - C(no) will clear the disabled flag.
+ type: bool
+ version_added: "1.9"
+ account_locked:
+ description:
+ - C(no) will unlock the user account if locked.
+ choices: [ 'no' ]
+ version_added: "1.9"
+ groups:
+ description:
+ - Adds or removes the user from this comma-separated list of groups,
+ depending on the value of I(groups_action).
+ - When I(groups_action) is C(replace) and I(groups) is set to the empty
+ string ('groups='), the user is removed from all groups.
+ version_added: "1.9"
+ groups_action:
+ description:
+ - If C(add), the user is added to each group in I(groups) where not
+ already a member.
+ - If C(replace), the user is added as a member of each group in
+ I(groups) and removed from any other groups.
+ - If C(remove), the user is removed from each group in I(groups).
+ type: str
+ choices: [ add, replace, remove ]
+ default: replace
+ version_added: "1.9"
+ state:
+ description:
+ - When C(absent), removes the user account if it exists.
+ - When C(present), creates or updates the user account.
+ - When C(query) (new in 1.9), retrieves the user account details
+ without making any changes.
+ type: str
+ choices: [ absent, present, query ]
+ default: present
+seealso:
+- module: user
+- module: win_domain_membership
+- module: win_domain_user
+- module: win_group
+- module: win_group_membership
+- module: win_user_profile
+author:
+ - Paul Durivage (@angstwad)
+ - Chris Church (@cchurch)
+'''
+
+EXAMPLES = r'''
+- name: Ensure user bob is present
+ win_user:
+ name: bob
+ password: B0bP4ssw0rd
+ state: present
+ groups:
+ - Users
+
+- name: Ensure user bob is absent
+ win_user:
+ name: bob
+ state: absent
+'''
+
+RETURN = r'''
+account_disabled:
+ description: Whether the user is disabled.
+ returned: user exists
+ type: bool
+ sample: false
+account_locked:
+ description: Whether the user is locked.
+ returned: user exists
+ type: bool
+ sample: false
+description:
+ description: The description set for the user.
+ returned: user exists
+ type: str
+ sample: Username for test
+fullname:
+ description: The full name set for the user.
+ returned: user exists
+ type: str
+ sample: Test Username
+groups:
+ description: A list of groups and their ADSI path the user is a member of.
+ returned: user exists
+ type: list
+ sample: [
+ {
+ "name": "Administrators",
+ "path": "WinNT://WORKGROUP/USER-PC/Administrators"
+ }
+ ]
+name:
+ description: The name of the user
+ returned: always
+ type: str
+ sample: username
+password_expired:
+ description: Whether the password is expired.
+ returned: user exists
+ type: bool
+ sample: false
+password_never_expires:
+ description: Whether the password is set to never expire.
+ returned: user exists
+ type: bool
+ sample: true
+path:
+ description: The ADSI path for the user.
+ returned: user exists
+ type: str
+ sample: "WinNT://WORKGROUP/USER-PC/username"
+sid:
+ description: The SID for the user.
+ returned: user exists
+ type: str
+ sample: S-1-5-21-3322259488-2828151810-3939402796-1001
+user_cannot_change_password:
+ description: Whether the user can change their own password.
+ returned: user exists
+ type: bool
+ sample: false
+'''
diff --git a/test/support/windows-integration/plugins/modules/win_user_right.ps1 b/test/support/windows-integration/plugins/modules/win_user_right.ps1
new file mode 100644
index 00000000..3fac52a8
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_user_right.ps1
@@ -0,0 +1,349 @@
+#!powershell
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+#Requires -Module Ansible.ModuleUtils.SID
+
+$ErrorActionPreference = 'Stop'
+
+$params = Parse-Args $args -supports_check_mode $true
+$check_mode = Get-AnsibleParam -obj $params -name "_ansible_check_mode" -type "bool" -default $false
+$diff_mode = Get-AnsibleParam -obj $params -name "_ansible_diff" -type "bool" -default $false
+$_remote_tmp = Get-AnsibleParam $params "_ansible_remote_tmp" -type "path" -default $env:TMP
+
+$name = Get-AnsibleParam -obj $params -name "name" -type "str" -failifempty $true
+$users = Get-AnsibleParam -obj $params -name "users" -type "list" -failifempty $true
+$action = Get-AnsibleParam -obj $params -name "action" -type "str" -default "set" -validateset "add","remove","set"
+
+$result = @{
+ changed = $false
+ added = @()
+ removed = @()
+}
+
+if ($diff_mode) {
+ $result.diff = @{}
+}
+
+$sec_helper_util = @"
+using System;
+using System.ComponentModel;
+using System.Runtime.InteropServices;
+using System.Security.Principal;
+
+namespace Ansible
+{
+ public class LsaRightHelper : IDisposable
+ {
+ // Code modified from https://gallery.technet.microsoft.com/scriptcenter/Grant-Revoke-Query-user-26e259b0
+
+ enum Access : int
+ {
+ POLICY_READ = 0x20006,
+ POLICY_ALL_ACCESS = 0x00F0FFF,
+ POLICY_EXECUTE = 0X20801,
+ POLICY_WRITE = 0X207F8
+ }
+
+ IntPtr lsaHandle;
+
+ const string LSA_DLL = "advapi32.dll";
+ const CharSet DEFAULT_CHAR_SET = CharSet.Unicode;
+
+ const uint STATUS_NO_MORE_ENTRIES = 0x8000001a;
+ const uint STATUS_NO_SUCH_PRIVILEGE = 0xc0000060;
+
+ internal sealed class Sid : IDisposable
+ {
+ public IntPtr pSid = IntPtr.Zero;
+ public SecurityIdentifier sid = null;
+
+ public Sid(string sidString)
+ {
+ try
+ {
+ sid = new SecurityIdentifier(sidString);
+ } catch
+ {
+ throw new ArgumentException(String.Format("SID string {0} could not be converted to SecurityIdentifier", sidString));
+ }
+
+ Byte[] buffer = new Byte[sid.BinaryLength];
+ sid.GetBinaryForm(buffer, 0);
+
+ pSid = Marshal.AllocHGlobal(sid.BinaryLength);
+ Marshal.Copy(buffer, 0, pSid, sid.BinaryLength);
+ }
+
+ public void Dispose()
+ {
+ if (pSid != IntPtr.Zero)
+ {
+ Marshal.FreeHGlobal(pSid);
+ pSid = IntPtr.Zero;
+ }
+ GC.SuppressFinalize(this);
+ }
+ ~Sid() { Dispose(); }
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ private struct LSA_OBJECT_ATTRIBUTES
+ {
+ public int Length;
+ public IntPtr RootDirectory;
+ public IntPtr ObjectName;
+ public int Attributes;
+ public IntPtr SecurityDescriptor;
+ public IntPtr SecurityQualityOfService;
+ }
+
+ [StructLayout(LayoutKind.Sequential, CharSet = DEFAULT_CHAR_SET)]
+ private struct LSA_UNICODE_STRING
+ {
+ public ushort Length;
+ public ushort MaximumLength;
+ [MarshalAs(UnmanagedType.LPWStr)]
+ public string Buffer;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ private struct LSA_ENUMERATION_INFORMATION
+ {
+ public IntPtr Sid;
+ }
+
+ [DllImport(LSA_DLL, CharSet = DEFAULT_CHAR_SET, SetLastError = true)]
+ private static extern uint LsaOpenPolicy(
+ LSA_UNICODE_STRING[] SystemName,
+ ref LSA_OBJECT_ATTRIBUTES ObjectAttributes,
+ int AccessMask,
+ out IntPtr PolicyHandle
+ );
+
+ [DllImport(LSA_DLL, CharSet = DEFAULT_CHAR_SET, SetLastError = true)]
+ private static extern uint LsaAddAccountRights(
+ IntPtr PolicyHandle,
+ IntPtr pSID,
+ LSA_UNICODE_STRING[] UserRights,
+ int CountOfRights
+ );
+
+ [DllImport(LSA_DLL, CharSet = DEFAULT_CHAR_SET, SetLastError = true)]
+ private static extern uint LsaRemoveAccountRights(
+ IntPtr PolicyHandle,
+ IntPtr pSID,
+ bool AllRights,
+ LSA_UNICODE_STRING[] UserRights,
+ int CountOfRights
+ );
+
+ [DllImport(LSA_DLL, CharSet = DEFAULT_CHAR_SET, SetLastError = true)]
+ private static extern uint LsaEnumerateAccountsWithUserRight(
+ IntPtr PolicyHandle,
+ LSA_UNICODE_STRING[] UserRights,
+ out IntPtr EnumerationBuffer,
+ out ulong CountReturned
+ );
+
+ [DllImport(LSA_DLL)]
+ private static extern int LsaNtStatusToWinError(int NTSTATUS);
+
+ [DllImport(LSA_DLL)]
+ private static extern int LsaClose(IntPtr PolicyHandle);
+
+ [DllImport(LSA_DLL)]
+ private static extern int LsaFreeMemory(IntPtr Buffer);
+
+ public LsaRightHelper()
+ {
+ LSA_OBJECT_ATTRIBUTES lsaAttr;
+ lsaAttr.RootDirectory = IntPtr.Zero;
+ lsaAttr.ObjectName = IntPtr.Zero;
+ lsaAttr.Attributes = 0;
+ lsaAttr.SecurityDescriptor = IntPtr.Zero;
+ lsaAttr.SecurityQualityOfService = IntPtr.Zero;
+ lsaAttr.Length = Marshal.SizeOf(typeof(LSA_OBJECT_ATTRIBUTES));
+
+ lsaHandle = IntPtr.Zero;
+
+ LSA_UNICODE_STRING[] system = new LSA_UNICODE_STRING[1];
+ system[0] = InitLsaString("");
+
+ uint ret = LsaOpenPolicy(system, ref lsaAttr, (int)Access.POLICY_ALL_ACCESS, out lsaHandle);
+ if (ret != 0)
+ throw new Win32Exception(LsaNtStatusToWinError((int)ret));
+ }
+
+ public void AddPrivilege(string sidString, string privilege)
+ {
+ uint ret = 0;
+ using (Sid sid = new Sid(sidString))
+ {
+ LSA_UNICODE_STRING[] privileges = new LSA_UNICODE_STRING[1];
+ privileges[0] = InitLsaString(privilege);
+ ret = LsaAddAccountRights(lsaHandle, sid.pSid, privileges, 1);
+ }
+ if (ret != 0)
+ throw new Win32Exception(LsaNtStatusToWinError((int)ret));
+ }
+
+ public void RemovePrivilege(string sidString, string privilege)
+ {
+ uint ret = 0;
+ using (Sid sid = new Sid(sidString))
+ {
+ LSA_UNICODE_STRING[] privileges = new LSA_UNICODE_STRING[1];
+ privileges[0] = InitLsaString(privilege);
+ ret = LsaRemoveAccountRights(lsaHandle, sid.pSid, false, privileges, 1);
+ }
+ if (ret != 0)
+ throw new Win32Exception(LsaNtStatusToWinError((int)ret));
+ }
+
+ public string[] EnumerateAccountsWithUserRight(string privilege)
+ {
+ uint ret = 0;
+ ulong count = 0;
+ LSA_UNICODE_STRING[] rights = new LSA_UNICODE_STRING[1];
+ rights[0] = InitLsaString(privilege);
+ IntPtr buffer = IntPtr.Zero;
+
+ ret = LsaEnumerateAccountsWithUserRight(lsaHandle, rights, out buffer, out count);
+ switch (ret)
+ {
+ case 0:
+ string[] accounts = new string[count];
+ for (int i = 0; i < (int)count; i++)
+ {
+ LSA_ENUMERATION_INFORMATION LsaInfo = (LSA_ENUMERATION_INFORMATION)Marshal.PtrToStructure(
+ IntPtr.Add(buffer, i * Marshal.SizeOf(typeof(LSA_ENUMERATION_INFORMATION))),
+ typeof(LSA_ENUMERATION_INFORMATION));
+
+ accounts[i] = new SecurityIdentifier(LsaInfo.Sid).ToString();
+ }
+ LsaFreeMemory(buffer);
+ return accounts;
+
+ case STATUS_NO_MORE_ENTRIES:
+ return new string[0];
+
+ case STATUS_NO_SUCH_PRIVILEGE:
+ throw new ArgumentException(String.Format("Invalid privilege {0} not found in LSA database", privilege));
+
+ default:
+ throw new Win32Exception(LsaNtStatusToWinError((int)ret));
+ }
+ }
+
+ static LSA_UNICODE_STRING InitLsaString(string s)
+ {
+ // Unicode strings max. 32KB
+ if (s.Length > 0x7ffe)
+ throw new ArgumentException("String too long");
+
+ LSA_UNICODE_STRING lus = new LSA_UNICODE_STRING();
+ lus.Buffer = s;
+ lus.Length = (ushort)(s.Length * sizeof(char));
+ lus.MaximumLength = (ushort)(lus.Length + sizeof(char));
+
+ return lus;
+ }
+
+ public void Dispose()
+ {
+ if (lsaHandle != IntPtr.Zero)
+ {
+ LsaClose(lsaHandle);
+ lsaHandle = IntPtr.Zero;
+ }
+ GC.SuppressFinalize(this);
+ }
+ ~LsaRightHelper() { Dispose(); }
+ }
+}
+"@
+
+$original_tmp = $env:TMP
+$env:TMP = $_remote_tmp
+Add-Type -TypeDefinition $sec_helper_util
+$env:TMP = $original_tmp
+
+Function Compare-UserList($existing_users, $new_users) {
+ $added_users = [String[]]@()
+ $removed_users = [String[]]@()
+ if ($action -eq "add") {
+ $added_users = [Linq.Enumerable]::Except($new_users, $existing_users)
+ } elseif ($action -eq "remove") {
+ $removed_users = [Linq.Enumerable]::Intersect($new_users, $existing_users)
+ } else {
+ $added_users = [Linq.Enumerable]::Except($new_users, $existing_users)
+ $removed_users = [Linq.Enumerable]::Except($existing_users, $new_users)
+ }
+
+ $change_result = @{
+ added = $added_users
+ removed = $removed_users
+ }
+
+ return $change_result
+}
+
+# C# class we can use to enumerate/add/remove rights
+$lsa_helper = New-Object -TypeName Ansible.LsaRightHelper
+
+$new_users = [System.Collections.ArrayList]@()
+foreach ($user in $users) {
+ $user_sid = Convert-ToSID -account_name $user
+ $new_users.Add($user_sid) > $null
+}
+$new_users = [String[]]$new_users.ToArray()
+try {
+ $existing_users = $lsa_helper.EnumerateAccountsWithUserRight($name)
+} catch [ArgumentException] {
+ Fail-Json -obj $result -message "the specified right $name is not a valid right"
+} catch {
+ Fail-Json -obj $result -message "failed to enumerate existing accounts with right: $($_.Exception.Message)"
+}
+
+$change_result = Compare-UserList -existing_users $existing_users -new_user $new_users
+if (($change_result.added.Length -gt 0) -or ($change_result.removed.Length -gt 0)) {
+ $result.changed = $true
+ $diff_text = "[$name]`n"
+
+ # used in diff mode calculation
+ $new_user_list = [System.Collections.ArrayList]$existing_users
+ foreach ($user in $change_result.removed) {
+ if (-not $check_mode) {
+ $lsa_helper.RemovePrivilege($user, $name)
+ }
+ $user_name = Convert-FromSID -sid $user
+ $result.removed += $user_name
+ $diff_text += "-$user_name`n"
+ $new_user_list.Remove($user) > $null
+ }
+ foreach ($user in $change_result.added) {
+ if (-not $check_mode) {
+ $lsa_helper.AddPrivilege($user, $name)
+ }
+ $user_name = Convert-FromSID -sid $user
+ $result.added += $user_name
+ $diff_text += "+$user_name`n"
+ $new_user_list.Add($user) > $null
+ }
+
+ if ($diff_mode) {
+ if ($new_user_list.Count -eq 0) {
+ $diff_text = "-$diff_text"
+ } else {
+ if ($existing_users.Count -eq 0) {
+ $diff_text = "+$diff_text"
+ }
+ }
+ $result.diff.prepared = $diff_text
+ }
+}
+
+Exit-Json $result
diff --git a/test/support/windows-integration/plugins/modules/win_user_right.py b/test/support/windows-integration/plugins/modules/win_user_right.py
new file mode 100644
index 00000000..55882083
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_user_right.py
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: win_user_right
+version_added: '2.4'
+short_description: Manage Windows User Rights
+description:
+- Add, remove or set User Rights for a group or users or groups.
+- You can set user rights for both local and domain accounts.
+options:
+ name:
+ description:
+ - The name of the User Right as shown by the C(Constant Name) value from
+ U(https://technet.microsoft.com/en-us/library/dd349804.aspx).
+ - The module will return an error if the right is invalid.
+ type: str
+ required: yes
+ users:
+ description:
+ - A list of users or groups to add/remove on the User Right.
+ - These can be in the form DOMAIN\user-group, user-group@DOMAIN.COM for
+ domain users/groups.
+ - For local users/groups it can be in the form user-group, .\user-group,
+ SERVERNAME\user-group where SERVERNAME is the name of the remote server.
+ - You can also add special local accounts like SYSTEM and others.
+ - Can be set to an empty list with I(action=set) to remove all accounts
+ from the right.
+ type: list
+ required: yes
+ action:
+ description:
+ - C(add) will add the users/groups to the existing right.
+ - C(remove) will remove the users/groups from the existing right.
+ - C(set) will replace the users/groups of the existing right.
+ type: str
+ default: set
+ choices: [ add, remove, set ]
+notes:
+- If the server is domain joined this module can change a right but if a GPO
+ governs this right then the changes won't last.
+seealso:
+- module: win_group
+- module: win_group_membership
+- module: win_user
+author:
+- Jordan Borean (@jborean93)
+'''
+
+EXAMPLES = r'''
+---
+- name: Replace the entries of Deny log on locally
+ win_user_right:
+ name: SeDenyInteractiveLogonRight
+ users:
+ - Guest
+ - Users
+ action: set
+
+- name: Add account to Log on as a service
+ win_user_right:
+ name: SeServiceLogonRight
+ users:
+ - .\Administrator
+ - '{{ansible_hostname}}\local-user'
+ action: add
+
+- name: Remove accounts who can create Symbolic links
+ win_user_right:
+ name: SeCreateSymbolicLinkPrivilege
+ users:
+ - SYSTEM
+ - Administrators
+ - DOMAIN\User
+ - group@DOMAIN.COM
+ action: remove
+
+- name: Remove all accounts who cannot log on remote interactively
+ win_user_right:
+ name: SeDenyRemoteInteractiveLogonRight
+ users: []
+'''
+
+RETURN = r'''
+added:
+ description: A list of accounts that were added to the right, this is empty
+ if no accounts were added.
+ returned: success
+ type: list
+ sample: ["NT AUTHORITY\\SYSTEM", "DOMAIN\\User"]
+removed:
+ description: A list of accounts that were removed from the right, this is
+ empty if no accounts were removed.
+ returned: success
+ type: list
+ sample: ["SERVERNAME\\Administrator", "BUILTIN\\Administrators"]
+'''
diff --git a/test/support/windows-integration/plugins/modules/win_wait_for.ps1 b/test/support/windows-integration/plugins/modules/win_wait_for.ps1
new file mode 100644
index 00000000..e0a9a720
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_wait_for.ps1
@@ -0,0 +1,259 @@
+#!powershell
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+#Requires -Module Ansible.ModuleUtils.FileUtil
+
+$ErrorActionPreference = "Stop"
+
+$params = Parse-Args -arguments $args -supports_check_mode $true
+
+$connect_timeout = Get-AnsibleParam -obj $params -name "connect_timeout" -type "int" -default 5
+$delay = Get-AnsibleParam -obj $params -name "delay" -type "int"
+$exclude_hosts = Get-AnsibleParam -obj $params -name "exclude_hosts" -type "list"
+$hostname = Get-AnsibleParam -obj $params -name "host" -type "str" -default "127.0.0.1"
+$path = Get-AnsibleParam -obj $params -name "path" -type "path"
+$port = Get-AnsibleParam -obj $params -name "port" -type "int"
+$regex = Get-AnsibleParam -obj $params -name "regex" -type "str" -aliases "search_regex","regexp"
+$sleep = Get-AnsibleParam -obj $params -name "sleep" -type "int" -default 1
+$state = Get-AnsibleParam -obj $params -name "state" -type "str" -default "started" -validateset "present","started","stopped","absent","drained"
+$timeout = Get-AnsibleParam -obj $params -name "timeout" -type "int" -default 300
+
+$result = @{
+ changed = $false
+ elapsed = 0
+}
+
+# validate the input with the various options
+if ($null -ne $port -and $null -ne $path) {
+ Fail-Json $result "port and path parameter can not both be passed to win_wait_for"
+}
+if ($null -ne $exclude_hosts -and $state -ne "drained") {
+ Fail-Json $result "exclude_hosts should only be with state=drained"
+}
+if ($null -ne $path) {
+ if ($state -in @("stopped","drained")) {
+ Fail-Json $result "state=$state should only be used for checking a port in the win_wait_for module"
+ }
+
+ if ($null -ne $exclude_hosts) {
+ Fail-Json $result "exclude_hosts should only be used when checking a port and state=drained in the win_wait_for module"
+ }
+}
+
+if ($null -ne $port) {
+ if ($null -ne $regex) {
+ Fail-Json $result "regex should by used when checking a string in a file in the win_wait_for module"
+ }
+
+ if ($null -ne $exclude_hosts -and $state -ne "drained") {
+ Fail-Json $result "exclude_hosts should be used when state=drained in the win_wait_for module"
+ }
+}
+
+Function Test-Port($hostname, $port) {
+ $timeout = $connect_timeout * 1000
+ $socket = New-Object -TypeName System.Net.Sockets.TcpClient
+ $connect = $socket.BeginConnect($hostname, $port, $null, $null)
+ $wait = $connect.AsyncWaitHandle.WaitOne($timeout, $false)
+
+ if ($wait) {
+ try {
+ $socket.EndConnect($connect) | Out-Null
+ $valid = $true
+ } catch {
+ $valid = $false
+ }
+ } else {
+ $valid = $false
+ }
+
+ $socket.Close()
+ $socket.Dispose()
+
+ $valid
+}
+
+Function Get-PortConnections($hostname, $port) {
+ $connections = @()
+
+ $conn_info = [Net.NetworkInformation.IPGlobalProperties]::GetIPGlobalProperties()
+ if ($hostname -eq "0.0.0.0") {
+ $active_connections = $conn_info.GetActiveTcpConnections() | Where-Object { $_.LocalEndPoint.Port -eq $port }
+ } else {
+ $active_connections = $conn_info.GetActiveTcpConnections() | Where-Object { $_.LocalEndPoint.Address -eq $hostname -and $_.LocalEndPoint.Port -eq $port }
+ }
+
+ if ($null -ne $active_connections) {
+ foreach ($active_connection in $active_connections) {
+ $connections += $active_connection.RemoteEndPoint.Address
+ }
+ }
+
+ $connections
+}
+
+$module_start = Get-Date
+
+if ($null -ne $delay) {
+ Start-Sleep -Seconds $delay
+}
+
+$attempts = 0
+if ($null -eq $path -and $null -eq $port -and $state -ne "drained") {
+ Start-Sleep -Seconds $timeout
+} elseif ($null -ne $path) {
+ if ($state -in @("present", "started")) {
+ # check if the file exists or string exists in file
+ $start_time = Get-Date
+ $complete = $false
+ while (((Get-Date) - $start_time).TotalSeconds -lt $timeout) {
+ $attempts += 1
+ if (Test-AnsiblePath -Path $path) {
+ if ($null -eq $regex) {
+ $complete = $true
+ break
+ } else {
+ $file_contents = Get-Content -Path $path -Raw
+ if ($file_contents -match $regex) {
+ $complete = $true
+ break
+ }
+ }
+ }
+ Start-Sleep -Seconds $sleep
+ }
+
+ if ($complete -eq $false) {
+ $result.elapsed = ((Get-Date) - $module_start).TotalSeconds
+ $result.wait_attempts = $attempts
+ if ($null -eq $regex) {
+ Fail-Json $result "timeout while waiting for file $path to be present"
+ } else {
+ Fail-Json $result "timeout while waiting for string regex $regex in file $path to match"
+ }
+ }
+ } elseif ($state -in @("absent")) {
+ # check if the file is deleted or string doesn't exist in file
+ $start_time = Get-Date
+ $complete = $false
+ while (((Get-Date) - $start_time).TotalSeconds -lt $timeout) {
+ $attempts += 1
+ if (Test-AnsiblePath -Path $path) {
+ if ($null -ne $regex) {
+ $file_contents = Get-Content -Path $path -Raw
+ if ($file_contents -notmatch $regex) {
+ $complete = $true
+ break
+ }
+ }
+ } else {
+ $complete = $true
+ break
+ }
+
+ Start-Sleep -Seconds $sleep
+ }
+
+ if ($complete -eq $false) {
+ $result.elapsed = ((Get-Date) - $module_start).TotalSeconds
+ $result.wait_attempts = $attempts
+ if ($null -eq $regex) {
+ Fail-Json $result "timeout while waiting for file $path to be absent"
+ } else {
+ Fail-Json $result "timeout while waiting for string regex $regex in file $path to not match"
+ }
+ }
+ }
+} elseif ($null -ne $port) {
+ if ($state -in @("started","present")) {
+ # check that the port is online and is listening
+ $start_time = Get-Date
+ $complete = $false
+ while (((Get-Date) - $start_time).TotalSeconds -lt $timeout) {
+ $attempts += 1
+ $port_result = Test-Port -hostname $hostname -port $port
+ if ($port_result -eq $true) {
+ $complete = $true
+ break
+ }
+
+ Start-Sleep -Seconds $sleep
+ }
+
+ if ($complete -eq $false) {
+ $result.elapsed = ((Get-Date) - $module_start).TotalSeconds
+ $result.wait_attempts = $attempts
+ Fail-Json $result "timeout while waiting for $($hostname):$port to start listening"
+ }
+ } elseif ($state -in @("stopped","absent")) {
+ # check that the port is offline and is not listening
+ $start_time = Get-Date
+ $complete = $false
+ while (((Get-Date) - $start_time).TotalSeconds -lt $timeout) {
+ $attempts += 1
+ $port_result = Test-Port -hostname $hostname -port $port
+ if ($port_result -eq $false) {
+ $complete = $true
+ break
+ }
+
+ Start-Sleep -Seconds $sleep
+ }
+
+ if ($complete -eq $false) {
+ $result.elapsed = ((Get-Date) - $module_start).TotalSeconds
+ $result.wait_attempts = $attempts
+ Fail-Json $result "timeout while waiting for $($hostname):$port to stop listening"
+ }
+ } elseif ($state -eq "drained") {
+ # check that the local port is online but has no active connections
+ $start_time = Get-Date
+ $complete = $false
+ while (((Get-Date) - $start_time).TotalSeconds -lt $timeout) {
+ $attempts += 1
+ $active_connections = Get-PortConnections -hostname $hostname -port $port
+ if ($null -eq $active_connections) {
+ $complete = $true
+ break
+ } elseif ($active_connections.Count -eq 0) {
+ # no connections on port
+ $complete = $true
+ break
+ } else {
+ # there are listeners, check if we should ignore any hosts
+ if ($null -ne $exclude_hosts) {
+ $connection_info = $active_connections
+ foreach ($exclude_host in $exclude_hosts) {
+ try {
+ $exclude_ips = [System.Net.Dns]::GetHostAddresses($exclude_host) | ForEach-Object { Write-Output $_.IPAddressToString }
+ $connection_info = $connection_info | Where-Object { $_ -notin $exclude_ips }
+ } catch { # ignore invalid hostnames
+ Add-Warning -obj $result -message "Invalid hostname specified $exclude_host"
+ }
+ }
+
+ if ($connection_info.Count -eq 0) {
+ $complete = $true
+ break
+ }
+ }
+ }
+
+ Start-Sleep -Seconds $sleep
+ }
+
+ if ($complete -eq $false) {
+ $result.elapsed = ((Get-Date) - $module_start).TotalSeconds
+ $result.wait_attempts = $attempts
+ Fail-Json $result "timeout while waiting for $($hostname):$port to drain"
+ }
+ }
+}
+
+$result.elapsed = ((Get-Date) - $module_start).TotalSeconds
+$result.wait_attempts = $attempts
+
+Exit-Json $result
diff --git a/test/support/windows-integration/plugins/modules/win_wait_for.py b/test/support/windows-integration/plugins/modules/win_wait_for.py
new file mode 100644
index 00000000..85721e7d
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_wait_for.py
@@ -0,0 +1,155 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# this is a windows documentation stub, actual code lives in the .ps1
+# file of the same name
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: win_wait_for
+version_added: '2.4'
+short_description: Waits for a condition before continuing
+description:
+- You can wait for a set amount of time C(timeout), this is the default if
+ nothing is specified.
+- Waiting for a port to become available is useful for when services are not
+ immediately available after their init scripts return which is true of
+ certain Java application servers.
+- You can wait for a file to exist or not exist on the filesystem.
+- This module can also be used to wait for a regex match string to be present
+ in a file.
+- You can wait for active connections to be closed before continuing on a
+ local port.
+options:
+ connect_timeout:
+ description:
+ - The maximum number of seconds to wait for a connection to happen before
+ closing and retrying.
+ type: int
+ default: 5
+ delay:
+ description:
+ - The number of seconds to wait before starting to poll.
+ type: int
+ exclude_hosts:
+ description:
+ - The list of hosts or IPs to ignore when looking for active TCP
+ connections when C(state=drained).
+ type: list
+ host:
+ description:
+ - A resolvable hostname or IP address to wait for.
+ - If C(state=drained) then it will only check for connections on the IP
+ specified, you can use '0.0.0.0' to use all host IPs.
+ type: str
+ default: '127.0.0.1'
+ path:
+ description:
+ - The path to a file on the filesystem to check.
+ - If C(state) is present or started then it will wait until the file
+ exists.
+ - If C(state) is absent then it will wait until the file does not exist.
+ type: path
+ port:
+ description:
+ - The port number to poll on C(host).
+ type: int
+ regex:
+ description:
+ - Can be used to match a string in a file.
+ - If C(state) is present or started then it will wait until the regex
+ matches.
+ - If C(state) is absent then it will wait until the regex does not match.
+ - Defaults to a multiline regex.
+ type: str
+ aliases: [ "search_regex", "regexp" ]
+ sleep:
+ description:
+ - Number of seconds to sleep between checks.
+ type: int
+ default: 1
+ state:
+ description:
+ - When checking a port, C(started) will ensure the port is open, C(stopped)
+ will check that is it closed and C(drained) will check for active
+ connections.
+ - When checking for a file or a search string C(present) or C(started) will
+ ensure that the file or string is present, C(absent) will check that the
+ file or search string is absent or removed.
+ type: str
+ choices: [ absent, drained, present, started, stopped ]
+ default: started
+ timeout:
+ description:
+ - The maximum number of seconds to wait for.
+ type: int
+ default: 300
+seealso:
+- module: wait_for
+- module: win_wait_for_process
+author:
+- Jordan Borean (@jborean93)
+'''
+
+EXAMPLES = r'''
+- name: Wait 300 seconds for port 8000 to become open on the host, don't start checking for 10 seconds
+ win_wait_for:
+ port: 8000
+ delay: 10
+
+- name: Wait 150 seconds for port 8000 of any IP to close active connections
+ win_wait_for:
+ host: 0.0.0.0
+ port: 8000
+ state: drained
+ timeout: 150
+
+- name: Wait for port 8000 of any IP to close active connection, ignoring certain hosts
+ win_wait_for:
+ host: 0.0.0.0
+ port: 8000
+ state: drained
+ exclude_hosts: ['10.2.1.2', '10.2.1.3']
+
+- name: Wait for file C:\temp\log.txt to exist before continuing
+ win_wait_for:
+ path: C:\temp\log.txt
+
+- name: Wait until process complete is in the file before continuing
+ win_wait_for:
+ path: C:\temp\log.txt
+ regex: process complete
+
+- name: Wait until file is removed
+ win_wait_for:
+ path: C:\temp\log.txt
+ state: absent
+
+- name: Wait until port 1234 is offline but try every 10 seconds
+ win_wait_for:
+ port: 1234
+ state: absent
+ sleep: 10
+'''
+
+RETURN = r'''
+wait_attempts:
+ description: The number of attempts to poll the file or port before module
+ finishes.
+ returned: always
+ type: int
+ sample: 1
+elapsed:
+ description: The elapsed seconds between the start of poll and the end of the
+ module. This includes the delay if the option is set.
+ returned: always
+ type: float
+ sample: 2.1406487
+'''
diff --git a/test/support/windows-integration/plugins/modules/win_whoami.ps1 b/test/support/windows-integration/plugins/modules/win_whoami.ps1
new file mode 100644
index 00000000..6c9965af
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_whoami.ps1
@@ -0,0 +1,837 @@
+#!powershell
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+#Requires -Module Ansible.ModuleUtils.CamelConversion
+
+$ErrorActionPreference = "Stop"
+
+$params = Parse-Args $args -supports_check_mode $true
+$_remote_tmp = Get-AnsibleParam $params "_ansible_remote_tmp" -type "path" -default $env:TMP
+
+$session_util = @'
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.InteropServices;
+using System.Security.Principal;
+using System.Text;
+
+namespace Ansible
+{
+ public class SessionInfo
+ {
+ // SECURITY_LOGON_SESSION_DATA
+ public UInt64 LogonId { get; internal set; }
+ public Sid Account { get; internal set; }
+ public string LoginDomain { get; internal set; }
+ public string AuthenticationPackage { get; internal set; }
+ public SECURITY_LOGON_TYPE LogonType { get; internal set; }
+ public string LoginTime { get; internal set; }
+ public string LogonServer { get; internal set; }
+ public string DnsDomainName { get; internal set; }
+ public string Upn { get; internal set; }
+ public ArrayList UserFlags { get; internal set; }
+
+ // TOKEN_STATISTICS
+ public SECURITY_IMPERSONATION_LEVEL ImpersonationLevel { get; internal set; }
+ public TOKEN_TYPE TokenType { get; internal set; }
+
+ // TOKEN_GROUPS
+ public ArrayList Groups { get; internal set; }
+ public ArrayList Rights { get; internal set; }
+
+ // TOKEN_MANDATORY_LABEL
+ public Sid Label { get; internal set; }
+
+ // TOKEN_PRIVILEGES
+ public Hashtable Privileges { get; internal set; }
+ }
+
+ public class Win32Exception : System.ComponentModel.Win32Exception
+ {
+ private string _msg;
+ public Win32Exception(string message) : this(Marshal.GetLastWin32Error(), message) { }
+ public Win32Exception(int errorCode, string message) : base(errorCode)
+ {
+ _msg = String.Format("{0} ({1}, Win32ErrorCode {2})", message, base.Message, errorCode);
+ }
+ public override string Message { get { return _msg; } }
+ public static explicit operator Win32Exception(string message) { return new Win32Exception(message); }
+ }
+
+ [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Unicode)]
+ public struct LSA_UNICODE_STRING
+ {
+ public UInt16 Length;
+ public UInt16 MaximumLength;
+ public IntPtr buffer;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct LUID
+ {
+ public UInt32 LowPart;
+ public Int32 HighPart;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct SECURITY_LOGON_SESSION_DATA
+ {
+ public UInt32 Size;
+ public LUID LogonId;
+ public LSA_UNICODE_STRING Username;
+ public LSA_UNICODE_STRING LoginDomain;
+ public LSA_UNICODE_STRING AuthenticationPackage;
+ public SECURITY_LOGON_TYPE LogonType;
+ public UInt32 Session;
+ public IntPtr Sid;
+ public UInt64 LoginTime;
+ public LSA_UNICODE_STRING LogonServer;
+ public LSA_UNICODE_STRING DnsDomainName;
+ public LSA_UNICODE_STRING Upn;
+ public UInt32 UserFlags;
+ public LSA_LAST_INTER_LOGON_INFO LastLogonInfo;
+ public LSA_UNICODE_STRING LogonScript;
+ public LSA_UNICODE_STRING ProfilePath;
+ public LSA_UNICODE_STRING HomeDirectory;
+ public LSA_UNICODE_STRING HomeDirectoryDrive;
+ public UInt64 LogoffTime;
+ public UInt64 KickOffTime;
+ public UInt64 PasswordLastSet;
+ public UInt64 PasswordCanChange;
+ public UInt64 PasswordMustChange;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct LSA_LAST_INTER_LOGON_INFO
+ {
+ public UInt64 LastSuccessfulLogon;
+ public UInt64 LastFailedLogon;
+ public UInt32 FailedAttemptCountSinceLastSuccessfulLogon;
+ }
+
+ public enum TOKEN_TYPE
+ {
+ TokenPrimary = 1,
+ TokenImpersonation
+ }
+
+ public enum SECURITY_IMPERSONATION_LEVEL
+ {
+ SecurityAnonymous,
+ SecurityIdentification,
+ SecurityImpersonation,
+ SecurityDelegation
+ }
+
+ public enum SECURITY_LOGON_TYPE
+ {
+ System = 0, // Used only by the Sytem account
+ Interactive = 2,
+ Network,
+ Batch,
+ Service,
+ Proxy,
+ Unlock,
+ NetworkCleartext,
+ NewCredentials,
+ RemoteInteractive,
+ CachedInteractive,
+ CachedRemoteInteractive,
+ CachedUnlock
+ }
+
+ [Flags]
+ public enum TokenGroupAttributes : uint
+ {
+ SE_GROUP_ENABLED = 0x00000004,
+ SE_GROUP_ENABLED_BY_DEFAULT = 0x00000002,
+ SE_GROUP_INTEGRITY = 0x00000020,
+ SE_GROUP_INTEGRITY_ENABLED = 0x00000040,
+ SE_GROUP_LOGON_ID = 0xC0000000,
+ SE_GROUP_MANDATORY = 0x00000001,
+ SE_GROUP_OWNER = 0x00000008,
+ SE_GROUP_RESOURCE = 0x20000000,
+ SE_GROUP_USE_FOR_DENY_ONLY = 0x00000010,
+ }
+
+ [Flags]
+ public enum UserFlags : uint
+ {
+ LOGON_OPTIMIZED = 0x4000,
+ LOGON_WINLOGON = 0x8000,
+ LOGON_PKINIT = 0x10000,
+ LOGON_NOT_OPTMIZED = 0x20000,
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct SID_AND_ATTRIBUTES
+ {
+ public IntPtr Sid;
+ public UInt32 Attributes;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct LUID_AND_ATTRIBUTES
+ {
+ public LUID Luid;
+ public UInt32 Attributes;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct TOKEN_GROUPS
+ {
+ public UInt32 GroupCount;
+ [MarshalAs(UnmanagedType.ByValArray, SizeConst = 1)]
+ public SID_AND_ATTRIBUTES[] Groups;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct TOKEN_MANDATORY_LABEL
+ {
+ public SID_AND_ATTRIBUTES Label;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct TOKEN_STATISTICS
+ {
+ public LUID TokenId;
+ public LUID AuthenticationId;
+ public UInt64 ExpirationTime;
+ public TOKEN_TYPE TokenType;
+ public SECURITY_IMPERSONATION_LEVEL ImpersonationLevel;
+ public UInt32 DynamicCharged;
+ public UInt32 DynamicAvailable;
+ public UInt32 GroupCount;
+ public UInt32 PrivilegeCount;
+ public LUID ModifiedId;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct TOKEN_PRIVILEGES
+ {
+ public UInt32 PrivilegeCount;
+ [MarshalAs(UnmanagedType.ByValArray, SizeConst = 1)]
+ public LUID_AND_ATTRIBUTES[] Privileges;
+ }
+
+ public class AccessToken : IDisposable
+ {
+ public enum TOKEN_INFORMATION_CLASS
+ {
+ TokenUser = 1,
+ TokenGroups,
+ TokenPrivileges,
+ TokenOwner,
+ TokenPrimaryGroup,
+ TokenDefaultDacl,
+ TokenSource,
+ TokenType,
+ TokenImpersonationLevel,
+ TokenStatistics,
+ TokenRestrictedSids,
+ TokenSessionId,
+ TokenGroupsAndPrivileges,
+ TokenSessionReference,
+ TokenSandBoxInert,
+ TokenAuditPolicy,
+ TokenOrigin,
+ TokenElevationType,
+ TokenLinkedToken,
+ TokenElevation,
+ TokenHasRestrictions,
+ TokenAccessInformation,
+ TokenVirtualizationAllowed,
+ TokenVirtualizationEnabled,
+ TokenIntegrityLevel,
+ TokenUIAccess,
+ TokenMandatoryPolicy,
+ TokenLogonSid,
+ TokenIsAppContainer,
+ TokenCapabilities,
+ TokenAppContainerSid,
+ TokenAppContainerNumber,
+ TokenUserClaimAttributes,
+ TokenDeviceClaimAttributes,
+ TokenRestrictedUserClaimAttributes,
+ TokenRestrictedDeviceClaimAttributes,
+ TokenDeviceGroups,
+ TokenRestrictedDeviceGroups,
+ TokenSecurityAttributes,
+ TokenIsRestricted,
+ MaxTokenInfoClass
+ }
+
+ public IntPtr hToken = IntPtr.Zero;
+
+ [DllImport("kernel32.dll")]
+ private static extern IntPtr GetCurrentProcess();
+
+ [DllImport("advapi32.dll", SetLastError = true)]
+ private static extern bool OpenProcessToken(
+ IntPtr ProcessHandle,
+ TokenAccessLevels DesiredAccess,
+ out IntPtr TokenHandle);
+
+ [DllImport("advapi32.dll", SetLastError = true)]
+ private static extern bool GetTokenInformation(
+ IntPtr TokenHandle,
+ TOKEN_INFORMATION_CLASS TokenInformationClass,
+ IntPtr TokenInformation,
+ UInt32 TokenInformationLength,
+ out UInt32 ReturnLength);
+
+ public AccessToken(TokenAccessLevels tokenAccessLevels)
+ {
+ IntPtr currentProcess = GetCurrentProcess();
+ if (!OpenProcessToken(currentProcess, tokenAccessLevels, out hToken))
+ throw new Win32Exception("OpenProcessToken() for current process failed");
+ }
+
+ public IntPtr GetTokenInformation<T>(out T tokenInformation, TOKEN_INFORMATION_CLASS tokenClass)
+ {
+ UInt32 tokenLength = 0;
+ GetTokenInformation(hToken, tokenClass, IntPtr.Zero, 0, out tokenLength);
+
+ IntPtr infoPtr = Marshal.AllocHGlobal((int)tokenLength);
+
+ if (!GetTokenInformation(hToken, tokenClass, infoPtr, tokenLength, out tokenLength))
+ throw new Win32Exception(String.Format("GetTokenInformation() data for {0} failed", tokenClass.ToString()));
+
+ tokenInformation = (T)Marshal.PtrToStructure(infoPtr, typeof(T));
+ return infoPtr;
+ }
+
+ public void Dispose()
+ {
+ GC.SuppressFinalize(this);
+ }
+
+ ~AccessToken() { Dispose(); }
+ }
+
+ public class LsaHandle : IDisposable
+ {
+ [Flags]
+ public enum DesiredAccess : uint
+ {
+ POLICY_VIEW_LOCAL_INFORMATION = 0x00000001,
+ POLICY_VIEW_AUDIT_INFORMATION = 0x00000002,
+ POLICY_GET_PRIVATE_INFORMATION = 0x00000004,
+ POLICY_TRUST_ADMIN = 0x00000008,
+ POLICY_CREATE_ACCOUNT = 0x00000010,
+ POLICY_CREATE_SECRET = 0x00000020,
+ POLICY_CREATE_PRIVILEGE = 0x00000040,
+ POLICY_SET_DEFAULT_QUOTA_LIMITS = 0x00000080,
+ POLICY_SET_AUDIT_REQUIREMENTS = 0x00000100,
+ POLICY_AUDIT_LOG_ADMIN = 0x00000200,
+ POLICY_SERVER_ADMIN = 0x00000400,
+ POLICY_LOOKUP_NAMES = 0x00000800,
+ POLICY_NOTIFICATION = 0x00001000
+ }
+
+ public IntPtr handle = IntPtr.Zero;
+
+ [DllImport("advapi32.dll", SetLastError = true)]
+ private static extern uint LsaOpenPolicy(
+ LSA_UNICODE_STRING[] SystemName,
+ ref LSA_OBJECT_ATTRIBUTES ObjectAttributes,
+ DesiredAccess AccessMask,
+ out IntPtr PolicyHandle);
+
+ [DllImport("advapi32.dll", SetLastError = true)]
+ private static extern uint LsaClose(
+ IntPtr ObjectHandle);
+
+ [DllImport("advapi32.dll", SetLastError = false)]
+ private static extern int LsaNtStatusToWinError(
+ uint Status);
+
+ [StructLayout(LayoutKind.Sequential)]
+ public struct LSA_OBJECT_ATTRIBUTES
+ {
+ public int Length;
+ public IntPtr RootDirectory;
+ public IntPtr ObjectName;
+ public int Attributes;
+ public IntPtr SecurityDescriptor;
+ public IntPtr SecurityQualityOfService;
+ }
+
+ public LsaHandle(DesiredAccess desiredAccess)
+ {
+ LSA_OBJECT_ATTRIBUTES lsaAttr;
+ lsaAttr.RootDirectory = IntPtr.Zero;
+ lsaAttr.ObjectName = IntPtr.Zero;
+ lsaAttr.Attributes = 0;
+ lsaAttr.SecurityDescriptor = IntPtr.Zero;
+ lsaAttr.SecurityQualityOfService = IntPtr.Zero;
+ lsaAttr.Length = Marshal.SizeOf(typeof(LSA_OBJECT_ATTRIBUTES));
+ LSA_UNICODE_STRING[] system = new LSA_UNICODE_STRING[1];
+ system[0].buffer = IntPtr.Zero;
+
+ uint res = LsaOpenPolicy(system, ref lsaAttr, desiredAccess, out handle);
+ if (res != 0)
+ throw new Win32Exception(LsaNtStatusToWinError(res), "LsaOpenPolicy() failed");
+ }
+
+ public void Dispose()
+ {
+ if (handle != IntPtr.Zero)
+ {
+ LsaClose(handle);
+ handle = IntPtr.Zero;
+ }
+ GC.SuppressFinalize(this);
+ }
+
+ ~LsaHandle() { Dispose(); }
+ }
+
+ public class Sid
+ {
+ public string SidString { get; internal set; }
+ public string DomainName { get; internal set; }
+ public string AccountName { get; internal set; }
+ public SID_NAME_USE SidType { get; internal set; }
+
+ public enum SID_NAME_USE
+ {
+ SidTypeUser = 1,
+ SidTypeGroup,
+ SidTypeDomain,
+ SidTypeAlias,
+ SidTypeWellKnownGroup,
+ SidTypeDeletedAccount,
+ SidTypeInvalid,
+ SidTypeUnknown,
+ SidTypeComputer,
+ SidTypeLabel,
+ SidTypeLogon,
+ }
+
+ [DllImport("advapi32.dll", SetLastError = true, CharSet = CharSet.Unicode)]
+ private static extern bool LookupAccountSid(
+ string lpSystemName,
+ [MarshalAs(UnmanagedType.LPArray)]
+ byte[] Sid,
+ StringBuilder lpName,
+ ref UInt32 cchName,
+ StringBuilder ReferencedDomainName,
+ ref UInt32 cchReferencedDomainName,
+ out SID_NAME_USE peUse);
+
+ public Sid(IntPtr sidPtr)
+ {
+ SecurityIdentifier sid;
+ try
+ {
+ sid = new SecurityIdentifier(sidPtr);
+ }
+ catch (Exception e)
+ {
+ throw new ArgumentException(String.Format("Failed to cast IntPtr to SecurityIdentifier: {0}", e));
+ }
+
+ SetSidInfo(sid);
+ }
+
+ public Sid(SecurityIdentifier sid)
+ {
+ SetSidInfo(sid);
+ }
+
+ public override string ToString()
+ {
+ return SidString;
+ }
+
+ private void SetSidInfo(SecurityIdentifier sid)
+ {
+ byte[] sidBytes = new byte[sid.BinaryLength];
+ sid.GetBinaryForm(sidBytes, 0);
+
+ StringBuilder lpName = new StringBuilder();
+ UInt32 cchName = 0;
+ StringBuilder referencedDomainName = new StringBuilder();
+ UInt32 cchReferencedDomainName = 0;
+ SID_NAME_USE peUse;
+ LookupAccountSid(null, sidBytes, lpName, ref cchName, referencedDomainName, ref cchReferencedDomainName, out peUse);
+
+ lpName.EnsureCapacity((int)cchName);
+ referencedDomainName.EnsureCapacity((int)cchReferencedDomainName);
+
+ SidString = sid.ToString();
+ if (!LookupAccountSid(null, sidBytes, lpName, ref cchName, referencedDomainName, ref cchReferencedDomainName, out peUse))
+ {
+ int lastError = Marshal.GetLastWin32Error();
+
+ if (lastError != 1332 && lastError != 1789) // Fails to lookup Logon Sid
+ {
+ throw new Win32Exception(lastError, String.Format("LookupAccountSid() failed for SID: {0} {1}", sid.ToString(), lastError));
+ }
+ else if (SidString.StartsWith("S-1-5-5-"))
+ {
+ AccountName = String.Format("LogonSessionId_{0}", SidString.Substring(8));
+ DomainName = "NT AUTHORITY";
+ SidType = SID_NAME_USE.SidTypeLogon;
+ }
+ else
+ {
+ AccountName = null;
+ DomainName = null;
+ SidType = SID_NAME_USE.SidTypeUnknown;
+ }
+ }
+ else
+ {
+ AccountName = lpName.ToString();
+ DomainName = referencedDomainName.ToString();
+ SidType = peUse;
+ }
+ }
+ }
+
+ public class SessionUtil
+ {
+ [DllImport("secur32.dll", SetLastError = false)]
+ private static extern uint LsaFreeReturnBuffer(
+ IntPtr Buffer);
+
+ [DllImport("secur32.dll", SetLastError = false)]
+ private static extern uint LsaEnumerateLogonSessions(
+ out UInt64 LogonSessionCount,
+ out IntPtr LogonSessionList);
+
+ [DllImport("secur32.dll", SetLastError = false)]
+ private static extern uint LsaGetLogonSessionData(
+ IntPtr LogonId,
+ out IntPtr ppLogonSessionData);
+
+ [DllImport("advapi32.dll", SetLastError = false)]
+ private static extern int LsaNtStatusToWinError(
+ uint Status);
+
+ [DllImport("advapi32", SetLastError = true)]
+ private static extern uint LsaEnumerateAccountRights(
+ IntPtr PolicyHandle,
+ IntPtr AccountSid,
+ out IntPtr UserRights,
+ out UInt64 CountOfRights);
+
+ [DllImport("advapi32.dll", SetLastError = true, CharSet = CharSet.Unicode)]
+ private static extern bool LookupPrivilegeName(
+ string lpSystemName,
+ ref LUID lpLuid,
+ StringBuilder lpName,
+ ref UInt32 cchName);
+
+ private const UInt32 SE_PRIVILEGE_ENABLED_BY_DEFAULT = 0x00000001;
+ private const UInt32 SE_PRIVILEGE_ENABLED = 0x00000002;
+ private const UInt32 STATUS_OBJECT_NAME_NOT_FOUND = 0xC0000034;
+ private const UInt32 STATUS_ACCESS_DENIED = 0xC0000022;
+
+ public static SessionInfo GetSessionInfo()
+ {
+ AccessToken accessToken = new AccessToken(TokenAccessLevels.Query);
+
+ // Get Privileges
+ Hashtable privilegeInfo = new Hashtable();
+ TOKEN_PRIVILEGES privileges;
+ IntPtr privilegesPtr = accessToken.GetTokenInformation(out privileges, AccessToken.TOKEN_INFORMATION_CLASS.TokenPrivileges);
+ LUID_AND_ATTRIBUTES[] luidAndAttributes = new LUID_AND_ATTRIBUTES[privileges.PrivilegeCount];
+ try
+ {
+ PtrToStructureArray(luidAndAttributes, privilegesPtr.ToInt64() + Marshal.SizeOf(privileges.PrivilegeCount));
+ }
+ finally
+ {
+ Marshal.FreeHGlobal(privilegesPtr);
+ }
+ foreach (LUID_AND_ATTRIBUTES luidAndAttribute in luidAndAttributes)
+ {
+ LUID privLuid = luidAndAttribute.Luid;
+ UInt32 privNameLen = 0;
+ StringBuilder privName = new StringBuilder();
+ LookupPrivilegeName(null, ref privLuid, null, ref privNameLen);
+ privName.EnsureCapacity((int)(privNameLen + 1));
+ if (!LookupPrivilegeName(null, ref privLuid, privName, ref privNameLen))
+ throw new Win32Exception("LookupPrivilegeName() failed");
+
+ string state = "disabled";
+ if ((luidAndAttribute.Attributes & SE_PRIVILEGE_ENABLED) == SE_PRIVILEGE_ENABLED)
+ state = "enabled";
+ if ((luidAndAttribute.Attributes & SE_PRIVILEGE_ENABLED_BY_DEFAULT) == SE_PRIVILEGE_ENABLED_BY_DEFAULT)
+ state = "enabled-by-default";
+ privilegeInfo.Add(privName.ToString(), state);
+ }
+
+ // Get Current Process LogonSID, User Rights and Groups
+ ArrayList userRights = new ArrayList();
+ ArrayList userGroups = new ArrayList();
+ TOKEN_GROUPS groups;
+ IntPtr groupsPtr = accessToken.GetTokenInformation(out groups, AccessToken.TOKEN_INFORMATION_CLASS.TokenGroups);
+ SID_AND_ATTRIBUTES[] sidAndAttributes = new SID_AND_ATTRIBUTES[groups.GroupCount];
+ LsaHandle lsaHandle = null;
+ // We can only get rights if we are an admin
+ if (new WindowsPrincipal(WindowsIdentity.GetCurrent()).IsInRole(WindowsBuiltInRole.Administrator))
+ lsaHandle = new LsaHandle(LsaHandle.DesiredAccess.POLICY_LOOKUP_NAMES);
+ try
+ {
+ PtrToStructureArray(sidAndAttributes, groupsPtr.ToInt64() + IntPtr.Size);
+ foreach (SID_AND_ATTRIBUTES sidAndAttribute in sidAndAttributes)
+ {
+ TokenGroupAttributes attributes = (TokenGroupAttributes)sidAndAttribute.Attributes;
+ if (attributes.HasFlag(TokenGroupAttributes.SE_GROUP_ENABLED) && lsaHandle != null)
+ {
+ ArrayList rights = GetAccountRights(lsaHandle.handle, sidAndAttribute.Sid);
+ foreach (string right in rights)
+ {
+ // Includes both Privileges and Account Rights, only add the ones with Logon in the name
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/bb545671(v=vs.85).aspx
+ if (!userRights.Contains(right) && right.Contains("Logon"))
+ userRights.Add(right);
+ }
+ }
+ // Do not include the Logon SID in the groups category
+ if (!attributes.HasFlag(TokenGroupAttributes.SE_GROUP_LOGON_ID))
+ {
+ Hashtable groupInfo = new Hashtable();
+ Sid group = new Sid(sidAndAttribute.Sid);
+ ArrayList groupAttributes = new ArrayList();
+ foreach (TokenGroupAttributes attribute in Enum.GetValues(typeof(TokenGroupAttributes)))
+ {
+ if (attributes.HasFlag(attribute))
+ {
+ string attributeName = attribute.ToString().Substring(9);
+ attributeName = attributeName.Replace('_', ' ');
+ attributeName = attributeName.First().ToString().ToUpper() + attributeName.Substring(1).ToLower();
+ groupAttributes.Add(attributeName);
+ }
+ }
+ // Using snake_case here as I can't generically convert all dict keys in PS (see Privileges)
+ groupInfo.Add("sid", group.SidString);
+ groupInfo.Add("domain_name", group.DomainName);
+ groupInfo.Add("account_name", group.AccountName);
+ groupInfo.Add("type", group.SidType);
+ groupInfo.Add("attributes", groupAttributes);
+ userGroups.Add(groupInfo);
+ }
+ }
+ }
+ finally
+ {
+ Marshal.FreeHGlobal(groupsPtr);
+ if (lsaHandle != null)
+ lsaHandle.Dispose();
+ }
+
+ // Get Integrity Level
+ Sid integritySid = null;
+ TOKEN_MANDATORY_LABEL mandatoryLabel;
+ IntPtr mandatoryLabelPtr = accessToken.GetTokenInformation(out mandatoryLabel, AccessToken.TOKEN_INFORMATION_CLASS.TokenIntegrityLevel);
+ Marshal.FreeHGlobal(mandatoryLabelPtr);
+ integritySid = new Sid(mandatoryLabel.Label.Sid);
+
+ // Get Token Statistics
+ TOKEN_STATISTICS tokenStats;
+ IntPtr tokenStatsPtr = accessToken.GetTokenInformation(out tokenStats, AccessToken.TOKEN_INFORMATION_CLASS.TokenStatistics);
+ Marshal.FreeHGlobal(tokenStatsPtr);
+
+ SessionInfo sessionInfo = GetSessionDataForLogonSession(tokenStats.AuthenticationId);
+ sessionInfo.Groups = userGroups;
+ sessionInfo.Label = integritySid;
+ sessionInfo.ImpersonationLevel = tokenStats.ImpersonationLevel;
+ sessionInfo.TokenType = tokenStats.TokenType;
+ sessionInfo.Privileges = privilegeInfo;
+ sessionInfo.Rights = userRights;
+ return sessionInfo;
+ }
+
+ private static ArrayList GetAccountRights(IntPtr lsaHandle, IntPtr sid)
+ {
+ UInt32 res;
+ ArrayList rights = new ArrayList();
+ IntPtr userRightsPointer = IntPtr.Zero;
+ UInt64 countOfRights = 0;
+
+ res = LsaEnumerateAccountRights(lsaHandle, sid, out userRightsPointer, out countOfRights);
+ if (res != 0 && res != STATUS_OBJECT_NAME_NOT_FOUND)
+ throw new Win32Exception(LsaNtStatusToWinError(res), "LsaEnumerateAccountRights() failed");
+ else if (res != STATUS_OBJECT_NAME_NOT_FOUND)
+ {
+ LSA_UNICODE_STRING[] userRights = new LSA_UNICODE_STRING[countOfRights];
+ PtrToStructureArray(userRights, userRightsPointer.ToInt64());
+ rights = new ArrayList();
+ foreach (LSA_UNICODE_STRING right in userRights)
+ rights.Add(Marshal.PtrToStringUni(right.buffer));
+ }
+
+ return rights;
+ }
+
+ private static SessionInfo GetSessionDataForLogonSession(LUID logonSession)
+ {
+ uint res;
+ UInt64 count = 0;
+ IntPtr luidPtr = IntPtr.Zero;
+ SessionInfo sessionInfo = null;
+ UInt64 processDataId = ConvertLuidToUint(logonSession);
+
+ res = LsaEnumerateLogonSessions(out count, out luidPtr);
+ if (res != 0)
+ throw new Win32Exception(LsaNtStatusToWinError(res), "LsaEnumerateLogonSessions() failed");
+ Int64 luidAddr = luidPtr.ToInt64();
+
+ try
+ {
+ for (UInt64 i = 0; i < count; i++)
+ {
+ IntPtr dataPointer = IntPtr.Zero;
+ res = LsaGetLogonSessionData(luidPtr, out dataPointer);
+ if (res == STATUS_ACCESS_DENIED) // Non admins won't be able to get info for session's that are not their own
+ {
+ luidPtr = new IntPtr(luidPtr.ToInt64() + Marshal.SizeOf(typeof(LUID)));
+ continue;
+ }
+ else if (res != 0)
+ throw new Win32Exception(LsaNtStatusToWinError(res), String.Format("LsaGetLogonSessionData() failed {0}", res));
+
+ SECURITY_LOGON_SESSION_DATA sessionData = (SECURITY_LOGON_SESSION_DATA)Marshal.PtrToStructure(dataPointer, typeof(SECURITY_LOGON_SESSION_DATA));
+ UInt64 sessionDataid = ConvertLuidToUint(sessionData.LogonId);
+
+ if (sessionDataid == processDataId)
+ {
+ ArrayList userFlags = new ArrayList();
+ UserFlags flags = (UserFlags)sessionData.UserFlags;
+ foreach (UserFlags flag in Enum.GetValues(typeof(UserFlags)))
+ {
+ if (flags.HasFlag(flag))
+ {
+ string flagName = flag.ToString().Substring(6);
+ flagName = flagName.Replace('_', ' ');
+ flagName = flagName.First().ToString().ToUpper() + flagName.Substring(1).ToLower();
+ userFlags.Add(flagName);
+ }
+ }
+
+ sessionInfo = new SessionInfo()
+ {
+ AuthenticationPackage = Marshal.PtrToStringUni(sessionData.AuthenticationPackage.buffer),
+ DnsDomainName = Marshal.PtrToStringUni(sessionData.DnsDomainName.buffer),
+ LoginDomain = Marshal.PtrToStringUni(sessionData.LoginDomain.buffer),
+ LoginTime = ConvertIntegerToDateString(sessionData.LoginTime),
+ LogonId = ConvertLuidToUint(sessionData.LogonId),
+ LogonServer = Marshal.PtrToStringUni(sessionData.LogonServer.buffer),
+ LogonType = sessionData.LogonType,
+ Upn = Marshal.PtrToStringUni(sessionData.Upn.buffer),
+ UserFlags = userFlags,
+ Account = new Sid(sessionData.Sid)
+ };
+ break;
+ }
+ luidPtr = new IntPtr(luidPtr.ToInt64() + Marshal.SizeOf(typeof(LUID)));
+ }
+ }
+ finally
+ {
+ LsaFreeReturnBuffer(new IntPtr(luidAddr));
+ }
+
+ if (sessionInfo == null)
+ throw new Exception(String.Format("Could not find the data for logon session {0}", processDataId));
+ return sessionInfo;
+ }
+
+ private static string ConvertIntegerToDateString(UInt64 time)
+ {
+ if (time == 0)
+ return null;
+ if (time > (UInt64)DateTime.MaxValue.ToFileTime())
+ return null;
+
+ DateTime dateTime = DateTime.FromFileTime((long)time);
+ return dateTime.ToString("o");
+ }
+
+ private static UInt64 ConvertLuidToUint(LUID luid)
+ {
+ UInt32 low = luid.LowPart;
+ UInt64 high = (UInt64)luid.HighPart;
+ high = high << 32;
+ UInt64 uintValue = (high | (UInt64)low);
+ return uintValue;
+ }
+
+ private static void PtrToStructureArray<T>(T[] array, Int64 pointerAddress)
+ {
+ Int64 pointerOffset = pointerAddress;
+ for (int i = 0; i < array.Length; i++, pointerOffset += Marshal.SizeOf(typeof(T)))
+ array[i] = (T)Marshal.PtrToStructure(new IntPtr(pointerOffset), typeof(T));
+ }
+
+ public static IEnumerable<T> GetValues<T>()
+ {
+ return Enum.GetValues(typeof(T)).Cast<T>();
+ }
+ }
+}
+'@
+
+$original_tmp = $env:TMP
+$env:TMP = $_remote_tmp
+Add-Type -TypeDefinition $session_util
+$env:TMP = $original_tmp
+
+$session_info = [Ansible.SessionUtil]::GetSessionInfo()
+
+Function Convert-Value($value) {
+ $new_value = $value
+ if ($value -is [System.Collections.ArrayList]) {
+ $new_value = [System.Collections.ArrayList]@()
+ foreach ($list_value in $value) {
+ $new_list_value = Convert-Value -value $list_value
+ [void]$new_value.Add($new_list_value)
+ }
+ } elseif ($value -is [Hashtable]) {
+ $new_value = @{}
+ foreach ($entry in $value.GetEnumerator()) {
+ $entry_value = Convert-Value -value $entry.Value
+ # manually convert Sid type entry to remove the SidType prefix
+ if ($entry.Name -eq "type") {
+ $entry_value = $entry_value.Replace("SidType", "")
+ }
+ $new_value[$entry.Name] = $entry_value
+ }
+ } elseif ($value -is [Ansible.Sid]) {
+ $new_value = @{
+ sid = $value.SidString
+ account_name = $value.AccountName
+ domain_name = $value.DomainName
+ type = $value.SidType.ToString().Replace("SidType", "")
+ }
+ } elseif ($value -is [Enum]) {
+ $new_value = $value.ToString()
+ }
+
+ return ,$new_value
+}
+
+$result = @{
+ changed = $false
+}
+
+$properties = [type][Ansible.SessionInfo]
+foreach ($property in $properties.DeclaredProperties) {
+ $property_name = $property.Name
+ $property_value = $session_info.$property_name
+ $snake_name = Convert-StringToSnakeCase -string $property_name
+
+ $result.$snake_name = Convert-Value -value $property_value
+}
+
+Exit-Json -obj $result
diff --git a/test/support/windows-integration/plugins/modules/win_whoami.py b/test/support/windows-integration/plugins/modules/win_whoami.py
new file mode 100644
index 00000000..d647374b
--- /dev/null
+++ b/test/support/windows-integration/plugins/modules/win_whoami.py
@@ -0,0 +1,203 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: win_whoami
+version_added: "2.5"
+short_description: Get information about the current user and process
+description:
+- Designed to return the same information as the C(whoami /all) command.
+- Also includes information missing from C(whoami) such as logon metadata like
+ logon rights, id, type.
+notes:
+- If running this module with a non admin user, the logon rights will be an
+ empty list as Administrator rights are required to query LSA for the
+ information.
+seealso:
+- module: win_credential
+- module: win_group_membership
+- module: win_user_right
+author:
+- Jordan Borean (@jborean93)
+'''
+
+EXAMPLES = r'''
+- name: Get whoami information
+ win_whoami:
+'''
+
+RETURN = r'''
+authentication_package:
+ description: The name of the authentication package used to authenticate the
+ user in the session.
+ returned: success
+ type: str
+ sample: Negotiate
+user_flags:
+ description: The user flags for the logon session, see UserFlags in
+ U(https://msdn.microsoft.com/en-us/library/windows/desktop/aa380128).
+ returned: success
+ type: str
+ sample: Winlogon
+upn:
+ description: The user principal name of the current user.
+ returned: success
+ type: str
+ sample: Administrator@DOMAIN.COM
+logon_type:
+ description: The logon type that identifies the logon method, see
+ U(https://msdn.microsoft.com/en-us/library/windows/desktop/aa380129.aspx).
+ returned: success
+ type: str
+ sample: Network
+privileges:
+ description: A dictionary of privileges and their state on the logon token.
+ returned: success
+ type: dict
+ sample: {
+ "SeChangeNotifyPrivileges": "enabled-by-default",
+ "SeRemoteShutdownPrivilege": "disabled",
+ "SeDebugPrivilege": "enabled"
+ }
+label:
+ description: The mandatory label set to the logon session.
+ returned: success
+ type: complex
+ contains:
+ domain_name:
+ description: The domain name of the label SID.
+ returned: success
+ type: str
+ sample: Mandatory Label
+ sid:
+ description: The SID in string form.
+ returned: success
+ type: str
+ sample: S-1-16-12288
+ account_name:
+ description: The account name of the label SID.
+ returned: success
+ type: str
+ sample: High Mandatory Level
+ type:
+ description: The type of SID.
+ returned: success
+ type: str
+ sample: Label
+impersonation_level:
+ description: The impersonation level of the token, only valid if
+ C(token_type) is C(TokenImpersonation), see
+ U(https://msdn.microsoft.com/en-us/library/windows/desktop/aa379572.aspx).
+ returned: success
+ type: str
+ sample: SecurityAnonymous
+login_time:
+ description: The logon time in ISO 8601 format
+ returned: success
+ type: str
+ sample: '2017-11-27T06:24:14.3321665+10:00'
+groups:
+ description: A list of groups and attributes that the user is a member of.
+ returned: success
+ type: list
+ sample: [
+ {
+ "account_name": "Domain Users",
+ "domain_name": "DOMAIN",
+ "attributes": [
+ "Mandatory",
+ "Enabled by default",
+ "Enabled"
+ ],
+ "sid": "S-1-5-21-1654078763-769949647-2968445802-513",
+ "type": "Group"
+ },
+ {
+ "account_name": "Administrators",
+ "domain_name": "BUILTIN",
+ "attributes": [
+ "Mandatory",
+ "Enabled by default",
+ "Enabled",
+ "Owner"
+ ],
+ "sid": "S-1-5-32-544",
+ "type": "Alias"
+ }
+ ]
+account:
+ description: The running account SID details.
+ returned: success
+ type: complex
+ contains:
+ domain_name:
+ description: The domain name of the account SID.
+ returned: success
+ type: str
+ sample: DOMAIN
+ sid:
+ description: The SID in string form.
+ returned: success
+ type: str
+ sample: S-1-5-21-1654078763-769949647-2968445802-500
+ account_name:
+ description: The account name of the account SID.
+ returned: success
+ type: str
+ sample: Administrator
+ type:
+ description: The type of SID.
+ returned: success
+ type: str
+ sample: User
+login_domain:
+ description: The name of the domain used to authenticate the owner of the
+ session.
+ returned: success
+ type: str
+ sample: DOMAIN
+rights:
+ description: A list of logon rights assigned to the logon.
+ returned: success and running user is a member of the local Administrators group
+ type: list
+ sample: [
+ "SeNetworkLogonRight",
+ "SeInteractiveLogonRight",
+ "SeBatchLogonRight",
+ "SeRemoteInteractiveLogonRight"
+ ]
+logon_server:
+ description: The name of the server used to authenticate the owner of the
+ logon session.
+ returned: success
+ type: str
+ sample: DC01
+logon_id:
+ description: The unique identifier of the logon session.
+ returned: success
+ type: int
+ sample: 20470143
+dns_domain_name:
+ description: The DNS name of the logon session, this is an empty string if
+ this is not set.
+ returned: success
+ type: str
+ sample: DOMAIN.COM
+token_type:
+ description: The token type to indicate whether it is a primary or
+ impersonation token.
+ returned: success
+ type: str
+ sample: TokenPrimary
+'''
diff --git a/test/units/__init__.py b/test/units/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/__init__.py
diff --git a/test/units/_vendor/test_vendor.py b/test/units/_vendor/test_vendor.py
new file mode 100644
index 00000000..6a0fa385
--- /dev/null
+++ b/test/units/_vendor/test_vendor.py
@@ -0,0 +1,65 @@
+# (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import pkgutil
+import pytest
+import sys
+
+from units.compat.mock import MagicMock, NonCallableMagicMock, patch
+
+
+def reset_internal_vendor_package():
+ import ansible
+ ansible_vendor_path = os.path.join(os.path.dirname(ansible.__file__), '_vendor')
+
+ if ansible_vendor_path in sys.path:
+ sys.path.remove(ansible_vendor_path)
+
+ for pkg in ['ansible._vendor', 'ansible']:
+ if pkg in sys.modules:
+ del sys.modules[pkg]
+
+
+def test_package_path_masking():
+ from ansible import _vendor
+
+ assert hasattr(_vendor, '__path__') and _vendor.__path__ == []
+
+
+def test_no_vendored():
+ reset_internal_vendor_package()
+ with patch.object(pkgutil, 'iter_modules', return_value=[]):
+ previous_path = list(sys.path)
+ import ansible
+ ansible_vendor_path = os.path.join(os.path.dirname(ansible.__file__), '_vendor')
+
+ assert ansible_vendor_path not in sys.path
+ assert sys.path == previous_path
+
+
+def test_vendored(vendored_pkg_names=None):
+ if not vendored_pkg_names:
+ vendored_pkg_names = ['boguspkg']
+ reset_internal_vendor_package()
+ with patch.object(pkgutil, 'iter_modules', return_value=list((None, p, None) for p in vendored_pkg_names)):
+ previous_path = list(sys.path)
+ import ansible
+ ansible_vendor_path = os.path.join(os.path.dirname(ansible.__file__), '_vendor')
+ assert sys.path[0] == ansible_vendor_path
+
+ if ansible_vendor_path in previous_path:
+ previous_path.remove(ansible_vendor_path)
+
+ assert sys.path[1:] == previous_path
+
+
+def test_vendored_conflict():
+ with pytest.warns(UserWarning) as w:
+ import pkgutil
+ import sys
+ test_vendored(vendored_pkg_names=['sys', 'pkgutil']) # pass a real package we know is already loaded
+ assert 'pkgutil, sys' in str(w[0].message) # ensure both conflicting modules are listed and sorted
diff --git a/test/units/ansible_test/__init__.py b/test/units/ansible_test/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/ansible_test/__init__.py
diff --git a/test/units/ansible_test/ci/__init__.py b/test/units/ansible_test/ci/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/ansible_test/ci/__init__.py
diff --git a/test/units/ansible_test/ci/test_azp.py b/test/units/ansible_test/ci/test_azp.py
new file mode 100644
index 00000000..69c4fa49
--- /dev/null
+++ b/test/units/ansible_test/ci/test_azp.py
@@ -0,0 +1,31 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from .util import common_auth_test
+
+
+def test_auth():
+ # noinspection PyProtectedMember
+ from ansible_test._internal.ci.azp import (
+ AzurePipelinesAuthHelper,
+ )
+
+ class TestAzurePipelinesAuthHelper(AzurePipelinesAuthHelper):
+ def __init__(self):
+ self.public_key_pem = None
+ self.private_key_pem = None
+
+ def publish_public_key(self, public_key_pem):
+ # avoid publishing key
+ self.public_key_pem = public_key_pem
+
+ def initialize_private_key(self):
+ # cache in memory instead of on disk
+ if not self.private_key_pem:
+ self.private_key_pem = self.generate_private_key()
+
+ return self.private_key_pem
+
+ auth = TestAzurePipelinesAuthHelper()
+
+ common_auth_test(auth)
diff --git a/test/units/ansible_test/ci/test_shippable.py b/test/units/ansible_test/ci/test_shippable.py
new file mode 100644
index 00000000..08b276c7
--- /dev/null
+++ b/test/units/ansible_test/ci/test_shippable.py
@@ -0,0 +1,31 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from .util import common_auth_test
+
+
+def test_auth():
+ # noinspection PyProtectedMember
+ from ansible_test._internal.ci.shippable import (
+ ShippableAuthHelper,
+ )
+
+ class TestShippableAuthHelper(ShippableAuthHelper):
+ def __init__(self):
+ self.public_key_pem = None
+ self.private_key_pem = None
+
+ def publish_public_key(self, public_key_pem):
+ # avoid publishing key
+ self.public_key_pem = public_key_pem
+
+ def initialize_private_key(self):
+ # cache in memory instead of on disk
+ if not self.private_key_pem:
+ self.private_key_pem = self.generate_private_key()
+
+ return self.private_key_pem
+
+ auth = TestShippableAuthHelper()
+
+ common_auth_test(auth)
diff --git a/test/units/ansible_test/ci/util.py b/test/units/ansible_test/ci/util.py
new file mode 100644
index 00000000..ba8e358b
--- /dev/null
+++ b/test/units/ansible_test/ci/util.py
@@ -0,0 +1,53 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import base64
+import json
+import re
+
+
+def common_auth_test(auth):
+ private_key_pem = auth.initialize_private_key()
+ public_key_pem = auth.public_key_pem
+
+ extract_pem_key(private_key_pem, private=True)
+ extract_pem_key(public_key_pem, private=False)
+
+ request = dict(hello='World')
+ auth.sign_request(request)
+
+ verify_signature(request, public_key_pem)
+
+
+def extract_pem_key(value, private):
+ assert isinstance(value, type(u''))
+
+ key_type = '(EC )?PRIVATE' if private else 'PUBLIC'
+ pattern = r'^-----BEGIN ' + key_type + r' KEY-----\n(?P<key>.*?)\n-----END ' + key_type + r' KEY-----\n$'
+ match = re.search(pattern, value, flags=re.DOTALL)
+
+ assert match, 'key "%s" does not match pattern "%s"' % (value, pattern)
+
+ base64.b64decode(match.group('key')) # make sure the key can be decoded
+
+
+def verify_signature(request, public_key_pem):
+ signature = request.pop('signature')
+ payload_bytes = json.dumps(request, sort_keys=True).encode()
+
+ assert isinstance(signature, type(u''))
+
+ from cryptography.hazmat.backends import default_backend
+ from cryptography.hazmat.primitives import hashes
+ from cryptography.hazmat.primitives.asymmetric import ec
+ from cryptography.hazmat.primitives.serialization import load_pem_public_key
+
+ public_key = load_pem_public_key(public_key_pem.encode(), default_backend())
+
+ verifier = public_key.verifier(
+ base64.b64decode(signature.encode()),
+ ec.ECDSA(hashes.SHA256()),
+ )
+
+ verifier.update(payload_bytes)
+ verifier.verify()
diff --git a/test/units/ansible_test/conftest.py b/test/units/ansible_test/conftest.py
new file mode 100644
index 00000000..9ec9a02f
--- /dev/null
+++ b/test/units/ansible_test/conftest.py
@@ -0,0 +1,14 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import os
+import pytest
+import sys
+
+
+@pytest.fixture(autouse=True, scope='session')
+def ansible_test():
+ """Make ansible_test available on sys.path for unit testing ansible-test."""
+ test_lib = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'lib')
+ sys.path.insert(0, test_lib)
diff --git a/test/units/cli/__init__.py b/test/units/cli/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/cli/__init__.py
diff --git a/test/units/cli/arguments/test_optparse_helpers.py b/test/units/cli/arguments/test_optparse_helpers.py
new file mode 100644
index 00000000..0e80fba9
--- /dev/null
+++ b/test/units/cli/arguments/test_optparse_helpers.py
@@ -0,0 +1,37 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+
+import pytest
+
+from ansible import constants as C
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible import __path__ as ansible_path
+from ansible.release import __version__ as ansible_version
+
+if C.DEFAULT_MODULE_PATH is None:
+ cpath = u'Default w/o overrides'
+else:
+ cpath = C.DEFAULT_MODULE_PATH
+
+FAKE_PROG = u'ansible-cli-test'
+VERSION_OUTPUT = opt_help.version(prog=FAKE_PROG)
+
+
+@pytest.mark.parametrize(
+ 'must_have', [
+ FAKE_PROG + u' %s' % ansible_version,
+ u'config file = %s' % C.CONFIG_FILE,
+ u'configured module search path = %s' % cpath,
+ u'ansible python module location = %s' % ':'.join(ansible_path),
+ u'executable location = ',
+ u'python version = %s' % ''.join(sys.version.splitlines()),
+ ]
+)
+def test_option_helper_version(must_have):
+ assert must_have in VERSION_OUTPUT
diff --git a/test/units/cli/galaxy/test_collection_extract_tar.py b/test/units/cli/galaxy/test_collection_extract_tar.py
new file mode 100644
index 00000000..526442cc
--- /dev/null
+++ b/test/units/cli/galaxy/test_collection_extract_tar.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible.errors import AnsibleError
+from ansible.galaxy.collection import _extract_tar_dir
+
+
+@pytest.fixture
+def fake_tar_obj(mocker):
+ m_tarfile = mocker.Mock()
+ m_tarfile.type = mocker.Mock(return_value=b'99')
+ m_tarfile.SYMTYPE = mocker.Mock(return_value=b'22')
+
+ return m_tarfile
+
+
+def test_extract_tar_member_trailing_sep(mocker):
+ m_tarfile = mocker.Mock()
+ m_tarfile.getmember = mocker.Mock(side_effect=KeyError)
+
+ with pytest.raises(AnsibleError, match='Unable to extract'):
+ _extract_tar_dir(m_tarfile, '/some/dir/', b'/some/dest')
+
+ assert m_tarfile.getmember.call_count == 1
+
+
+def test_extract_tar_member_no_trailing_sep(mocker):
+ m_tarfile = mocker.Mock()
+ m_tarfile.getmember = mocker.Mock(side_effect=KeyError)
+
+ with pytest.raises(AnsibleError, match='Unable to extract'):
+ _extract_tar_dir(m_tarfile, '/some/dir', b'/some/dest')
+
+ assert m_tarfile.getmember.call_count == 2
+
+
+def test_extract_tar_dir_exists(mocker, fake_tar_obj):
+ mocker.patch('os.makedirs', return_value=None)
+ m_makedir = mocker.patch('os.mkdir', return_value=None)
+ mocker.patch('os.path.isdir', return_value=True)
+
+ _extract_tar_dir(fake_tar_obj, '/some/dir', b'/some/dest')
+
+ assert not m_makedir.called
+
+
+def test_extract_tar_dir_does_not_exist(mocker, fake_tar_obj):
+ mocker.patch('os.makedirs', return_value=None)
+ m_makedir = mocker.patch('os.mkdir', return_value=None)
+ mocker.patch('os.path.isdir', return_value=False)
+
+ _extract_tar_dir(fake_tar_obj, '/some/dir', b'/some/dest')
+
+ assert m_makedir.called
+ assert m_makedir.call_args[0] == (b'/some/dir', 0o0755)
diff --git a/test/units/cli/galaxy/test_display_collection.py b/test/units/cli/galaxy/test_display_collection.py
new file mode 100644
index 00000000..d4a3b31d
--- /dev/null
+++ b/test/units/cli/galaxy/test_display_collection.py
@@ -0,0 +1,47 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible.cli.galaxy import _display_collection
+
+
+@pytest.fixture
+def collection_object(mocker):
+ def _cobj(fqcn='sandwiches.ham'):
+ cobj = mocker.MagicMock(latest_version='1.5.0')
+ cobj.__str__.return_value = fqcn
+ return cobj
+ return _cobj
+
+
+def test_display_collection(capsys, collection_object):
+ _display_collection(collection_object())
+ out, err = capsys.readouterr()
+
+ assert out == 'sandwiches.ham 1.5.0 \n'
+
+
+def test_display_collections_small_max_widths(capsys, collection_object):
+ _display_collection(collection_object(), 1, 1)
+ out, err = capsys.readouterr()
+
+ assert out == 'sandwiches.ham 1.5.0 \n'
+
+
+def test_display_collections_large_max_widths(capsys, collection_object):
+ _display_collection(collection_object(), 20, 20)
+ out, err = capsys.readouterr()
+
+ assert out == 'sandwiches.ham 1.5.0 \n'
+
+
+def test_display_collection_small_minimum_widths(capsys, collection_object):
+ _display_collection(collection_object('a.b'), min_cwidth=0, min_vwidth=0)
+ out, err = capsys.readouterr()
+
+ assert out == 'a.b 1.5.0 \n'
diff --git a/test/units/cli/galaxy/test_display_header.py b/test/units/cli/galaxy/test_display_header.py
new file mode 100644
index 00000000..ae926b0d
--- /dev/null
+++ b/test/units/cli/galaxy/test_display_header.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.cli.galaxy import _display_header
+
+
+def test_display_header_default(capsys):
+ _display_header('/collections/path', 'h1', 'h2')
+ out, err = capsys.readouterr()
+ out_lines = out.splitlines()
+
+ assert out_lines[0] == ''
+ assert out_lines[1] == '# /collections/path'
+ assert out_lines[2] == 'h1 h2 '
+ assert out_lines[3] == '---------- -------'
+
+
+def test_display_header_widths(capsys):
+ _display_header('/collections/path', 'Collection', 'Version', 18, 18)
+ out, err = capsys.readouterr()
+ out_lines = out.splitlines()
+
+ assert out_lines[0] == ''
+ assert out_lines[1] == '# /collections/path'
+ assert out_lines[2] == 'Collection Version '
+ assert out_lines[3] == '------------------ ------------------'
+
+
+def test_display_header_small_widths(capsys):
+ _display_header('/collections/path', 'Col', 'Ver', 1, 1)
+ out, err = capsys.readouterr()
+ out_lines = out.splitlines()
+
+ assert out_lines[0] == ''
+ assert out_lines[1] == '# /collections/path'
+ assert out_lines[2] == 'Col Ver'
+ assert out_lines[3] == '--- ---'
diff --git a/test/units/cli/galaxy/test_display_role.py b/test/units/cli/galaxy/test_display_role.py
new file mode 100644
index 00000000..e23a7725
--- /dev/null
+++ b/test/units/cli/galaxy/test_display_role.py
@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.cli.galaxy import _display_role
+
+
+def test_display_role(mocker, capsys):
+ mocked_galaxy_role = mocker.Mock(install_info=None)
+ mocked_galaxy_role.name = 'testrole'
+ _display_role(mocked_galaxy_role)
+ out, err = capsys.readouterr()
+ out_lines = out.splitlines()
+
+ assert out_lines[0] == '- testrole, (unknown version)'
+
+
+def test_display_role_known_version(mocker, capsys):
+ mocked_galaxy_role = mocker.Mock(install_info={'version': '1.0.0'})
+ mocked_galaxy_role.name = 'testrole'
+ _display_role(mocked_galaxy_role)
+ out, err = capsys.readouterr()
+ out_lines = out.splitlines()
+
+ assert out_lines[0] == '- testrole, 1.0.0'
diff --git a/test/units/cli/galaxy/test_execute_list.py b/test/units/cli/galaxy/test_execute_list.py
new file mode 100644
index 00000000..41fee0bf
--- /dev/null
+++ b/test/units/cli/galaxy/test_execute_list.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible import context
+from ansible.cli.galaxy import GalaxyCLI
+
+
+def test_execute_list_role_called(mocker):
+ """Make sure the correct method is called for a role"""
+
+ gc = GalaxyCLI(['ansible-galaxy', 'role', 'list'])
+ context.CLIARGS._store = {'type': 'role'}
+ execute_list_role_mock = mocker.patch('ansible.cli.galaxy.GalaxyCLI.execute_list_role', side_effect=AttributeError('raised intentionally'))
+ execute_list_collection_mock = mocker.patch('ansible.cli.galaxy.GalaxyCLI.execute_list_collection', side_effect=AttributeError('raised intentionally'))
+ with pytest.raises(AttributeError):
+ gc.execute_list()
+
+ assert execute_list_role_mock.call_count == 1
+ assert execute_list_collection_mock.call_count == 0
+
+
+def test_execute_list_collection_called(mocker):
+ """Make sure the correct method is called for a collection"""
+
+ gc = GalaxyCLI(['ansible-galaxy', 'collection', 'list'])
+ context.CLIARGS._store = {'type': 'collection'}
+ execute_list_role_mock = mocker.patch('ansible.cli.galaxy.GalaxyCLI.execute_list_role', side_effect=AttributeError('raised intentionally'))
+ execute_list_collection_mock = mocker.patch('ansible.cli.galaxy.GalaxyCLI.execute_list_collection', side_effect=AttributeError('raised intentionally'))
+ with pytest.raises(AttributeError):
+ gc.execute_list()
+
+ assert execute_list_role_mock.call_count == 0
+ assert execute_list_collection_mock.call_count == 1
diff --git a/test/units/cli/galaxy/test_execute_list_collection.py b/test/units/cli/galaxy/test_execute_list_collection.py
new file mode 100644
index 00000000..040acf1e
--- /dev/null
+++ b/test/units/cli/galaxy/test_execute_list_collection.py
@@ -0,0 +1,278 @@
+# Copyright (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible import context
+from ansible.cli.galaxy import GalaxyCLI
+from ansible.errors import AnsibleError, AnsibleOptionsError
+from ansible.galaxy.collection import CollectionRequirement
+from ansible.module_utils._text import to_native
+
+
+def path_exists(path):
+ if to_native(path) == '/root/.ansible/collections/ansible_collections/sandwiches/ham':
+ return False
+ elif to_native(path) == '/usr/share/ansible/collections/ansible_collections/sandwiches/reuben':
+ return False
+ elif to_native(path) == 'nope':
+ return False
+ else:
+ return True
+
+
+def isdir(path):
+ if to_native(path) == 'nope':
+ return False
+ else:
+ return True
+
+
+def cliargs(collections_paths=None, collection_name=None):
+ if collections_paths is None:
+ collections_paths = ['~/root/.ansible/collections', '/usr/share/ansible/collections']
+
+ context.CLIARGS._store = {
+ 'collections_path': collections_paths,
+ 'collection': collection_name,
+ 'type': 'collection',
+ }
+
+
+@pytest.fixture
+def mock_collection_objects(mocker):
+ mocker.patch('ansible.cli.galaxy.GalaxyCLI._resolve_path', side_effect=['/root/.ansible/collections', '/usr/share/ansible/collections'])
+ mocker.patch('ansible.cli.galaxy.validate_collection_path',
+ side_effect=['/root/.ansible/collections/ansible_collections', '/usr/share/ansible/collections/ansible_collections'])
+
+ collection_args = (
+ (
+ 'sandwiches',
+ 'pbj',
+ b'/usr/share/ansible/collections/ansible_collections/sandwiches/pbj',
+ mocker.Mock(),
+ ['1.0.0', '1.5.0'],
+ '1.0.0',
+ False,
+ ),
+ (
+ 'sandwiches',
+ 'pbj',
+ b'/root/.ansible/collections/ansible_collections/sandwiches/pbj',
+ mocker.Mock(),
+ ['1.0.0', '1.5.0'],
+ '1.5.0',
+ False,
+ ),
+ (
+ 'sandwiches',
+ 'ham',
+ b'/usr/share/ansible/collections/ansible_collections/sandwiches/ham',
+ mocker.Mock(),
+ ['1.0.0'],
+ '1.0.0',
+ False,
+ ),
+ (
+ 'sandwiches',
+ 'reuben',
+ b'/root/.ansible/collections/ansible_collections/sandwiches/reuben',
+ mocker.Mock(),
+ ['1.0.0', '2.5.0'],
+ '2.5.0',
+ False,
+ ),
+ )
+
+ collections_path_1 = [CollectionRequirement(*cargs) for cargs in collection_args if to_native(cargs[2]).startswith('/root')]
+ collections_path_2 = [CollectionRequirement(*cargs) for cargs in collection_args if to_native(cargs[2]).startswith('/usr/share')]
+ mocker.patch('ansible.cli.galaxy.find_existing_collections', side_effect=[collections_path_1, collections_path_2])
+
+
+@pytest.fixture
+def mock_from_path(mocker):
+ def _from_path(collection_name='pbj'):
+ collection_args = {
+ 'sandwiches.pbj': (
+ (
+ 'sandwiches',
+ 'pbj',
+ b'/root/.ansible/collections/ansible_collections/sandwiches/pbj',
+ mocker.Mock(),
+ ['1.0.0', '1.5.0'],
+ '1.5.0',
+ False,
+ ),
+ (
+ 'sandwiches',
+ 'pbj',
+ b'/usr/share/ansible/collections/ansible_collections/sandwiches/pbj',
+ mocker.Mock(),
+ ['1.0.0', '1.5.0'],
+ '1.0.0',
+ False,
+ ),
+ ),
+ 'sandwiches.ham': (
+ (
+ 'sandwiches',
+ 'ham',
+ b'/usr/share/ansible/collections/ansible_collections/sandwiches/ham',
+ mocker.Mock(),
+ ['1.0.0'],
+ '1.0.0',
+ False,
+ ),
+ ),
+ }
+
+ from_path_objects = [CollectionRequirement(*args) for args in collection_args[collection_name]]
+ mocker.patch('ansible.galaxy.collection.CollectionRequirement.from_path', side_effect=from_path_objects)
+
+ return _from_path
+
+
+def test_execute_list_collection_all(mocker, capsys, mock_collection_objects):
+ """Test listing all collections from multiple paths"""
+
+ cliargs()
+
+ mocker.patch('os.path.exists', return_value=True)
+ mocker.patch('os.path.isdir', return_value=True)
+ gc = GalaxyCLI(['ansible-galaxy', 'collection', 'list'])
+ gc.execute_list_collection()
+
+ out, err = capsys.readouterr()
+ out_lines = out.splitlines()
+
+ assert len(out_lines) == 12
+ assert out_lines[0] == ''
+ assert out_lines[1] == '# /root/.ansible/collections/ansible_collections'
+ assert out_lines[2] == 'Collection Version'
+ assert out_lines[3] == '----------------- -------'
+ assert out_lines[4] == 'sandwiches.pbj 1.5.0 '
+ assert out_lines[5] == 'sandwiches.reuben 2.5.0 '
+ assert out_lines[6] == ''
+ assert out_lines[7] == '# /usr/share/ansible/collections/ansible_collections'
+ assert out_lines[8] == 'Collection Version'
+ assert out_lines[9] == '-------------- -------'
+ assert out_lines[10] == 'sandwiches.ham 1.0.0 '
+ assert out_lines[11] == 'sandwiches.pbj 1.0.0 '
+
+
+def test_execute_list_collection_specific(mocker, capsys, mock_collection_objects, mock_from_path):
+ """Test listing a specific collection"""
+
+ collection_name = 'sandwiches.ham'
+ mock_from_path(collection_name)
+
+ cliargs(collection_name=collection_name)
+ mocker.patch('os.path.exists', path_exists)
+ mocker.patch('os.path.isdir', return_value=True)
+ mocker.patch('ansible.galaxy.collection.validate_collection_name', collection_name)
+ mocker.patch('ansible.cli.galaxy._get_collection_widths', return_value=(14, 5))
+
+ gc = GalaxyCLI(['ansible-galaxy', 'collection', 'list', collection_name])
+ gc.execute_list_collection()
+
+ out, err = capsys.readouterr()
+ out_lines = out.splitlines()
+
+ assert len(out_lines) == 5
+ assert out_lines[0] == ''
+ assert out_lines[1] == '# /usr/share/ansible/collections/ansible_collections'
+ assert out_lines[2] == 'Collection Version'
+ assert out_lines[3] == '-------------- -------'
+ assert out_lines[4] == 'sandwiches.ham 1.0.0 '
+
+
+def test_execute_list_collection_specific_duplicate(mocker, capsys, mock_collection_objects, mock_from_path):
+ """Test listing a specific collection that exists at multiple paths"""
+
+ collection_name = 'sandwiches.pbj'
+ mock_from_path(collection_name)
+
+ cliargs(collection_name=collection_name)
+ mocker.patch('os.path.exists', path_exists)
+ mocker.patch('os.path.isdir', return_value=True)
+ mocker.patch('ansible.galaxy.collection.validate_collection_name', collection_name)
+
+ gc = GalaxyCLI(['ansible-galaxy', 'collection', 'list', collection_name])
+ gc.execute_list_collection()
+
+ out, err = capsys.readouterr()
+ out_lines = out.splitlines()
+
+ assert len(out_lines) == 10
+ assert out_lines[0] == ''
+ assert out_lines[1] == '# /root/.ansible/collections/ansible_collections'
+ assert out_lines[2] == 'Collection Version'
+ assert out_lines[3] == '-------------- -------'
+ assert out_lines[4] == 'sandwiches.pbj 1.5.0 '
+ assert out_lines[5] == ''
+ assert out_lines[6] == '# /usr/share/ansible/collections/ansible_collections'
+ assert out_lines[7] == 'Collection Version'
+ assert out_lines[8] == '-------------- -------'
+ assert out_lines[9] == 'sandwiches.pbj 1.0.0 '
+
+
+def test_execute_list_collection_specific_invalid_fqcn(mocker):
+ """Test an invalid fully qualified collection name (FQCN)"""
+
+ collection_name = 'no.good.name'
+
+ cliargs(collection_name=collection_name)
+ mocker.patch('os.path.exists', return_value=True)
+ mocker.patch('os.path.isdir', return_value=True)
+
+ gc = GalaxyCLI(['ansible-galaxy', 'collection', 'list', collection_name])
+ with pytest.raises(AnsibleError, match='Invalid collection name'):
+ gc.execute_list_collection()
+
+
+def test_execute_list_collection_no_valid_paths(mocker, capsys):
+ """Test listing collections when no valid paths are given"""
+
+ cliargs()
+
+ mocker.patch('os.path.exists', return_value=True)
+ mocker.patch('os.path.isdir', return_value=False)
+ mocker.patch('ansible.utils.color.ANSIBLE_COLOR', False)
+ mocker.patch('ansible.cli.galaxy.display.columns', 79)
+ gc = GalaxyCLI(['ansible-galaxy', 'collection', 'list'])
+
+ with pytest.raises(AnsibleOptionsError, match=r'None of the provided paths were usable.'):
+ gc.execute_list_collection()
+
+ out, err = capsys.readouterr()
+
+ assert '[WARNING]: - the configured path' in err
+ assert 'exists, but it\nis not a directory.' in err
+
+
+def test_execute_list_collection_one_invalid_path(mocker, capsys, mock_collection_objects):
+ """Test listing all collections when one invalid path is given"""
+
+ cliargs()
+ mocker.patch('os.path.exists', return_value=True)
+ mocker.patch('os.path.isdir', isdir)
+ mocker.patch('ansible.cli.galaxy.GalaxyCLI._resolve_path', side_effect=['/root/.ansible/collections', 'nope'])
+ mocker.patch('ansible.utils.color.ANSIBLE_COLOR', False)
+
+ gc = GalaxyCLI(['ansible-galaxy', 'collection', 'list', '-p', 'nope'])
+ gc.execute_list_collection()
+
+ out, err = capsys.readouterr()
+ out_lines = out.splitlines()
+
+ assert out_lines[0] == ''
+ assert out_lines[1] == '# /root/.ansible/collections/ansible_collections'
+ assert out_lines[2] == 'Collection Version'
+ assert out_lines[3] == '----------------- -------'
+ assert out_lines[4] == 'sandwiches.pbj 1.5.0 '
+ # Only a partial test of the output
+
+ assert err == '[WARNING]: - the configured path nope, exists, but it is not a directory.\n'
diff --git a/test/units/cli/galaxy/test_get_collection_widths.py b/test/units/cli/galaxy/test_get_collection_widths.py
new file mode 100644
index 00000000..25649242
--- /dev/null
+++ b/test/units/cli/galaxy/test_get_collection_widths.py
@@ -0,0 +1,37 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible.cli.galaxy import _get_collection_widths
+
+
+@pytest.fixture
+def collection_objects(mocker):
+ collection_ham = mocker.MagicMock(latest_version='1.5.0')
+ collection_ham.__str__.return_value = 'sandwiches.ham'
+
+ collection_pbj = mocker.MagicMock(latest_version='2.5')
+ collection_pbj.__str__.return_value = 'sandwiches.pbj'
+
+ collection_reuben = mocker.MagicMock(latest_version='4')
+ collection_reuben.__str__.return_value = 'sandwiches.reuben'
+
+ return [collection_ham, collection_pbj, collection_reuben]
+
+
+def test_get_collection_widths(collection_objects):
+ assert _get_collection_widths(collection_objects) == (17, 5)
+
+
+def test_get_collection_widths_single_collection(mocker):
+ mocked_collection = mocker.MagicMock(latest_version='3.0.0')
+ mocked_collection.__str__.return_value = 'sandwiches.club'
+ # Make this look like it is not iterable
+ mocker.patch('ansible.cli.galaxy.is_iterable', return_value=False)
+
+ assert _get_collection_widths(mocked_collection) == (15, 5)
diff --git a/test/units/cli/test_adhoc.py b/test/units/cli/test_adhoc.py
new file mode 100644
index 00000000..0e7475c6
--- /dev/null
+++ b/test/units/cli/test_adhoc.py
@@ -0,0 +1,113 @@
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+import re
+
+from ansible import context
+from ansible.cli.adhoc import AdHocCLI, display
+from ansible.errors import AnsibleOptionsError
+
+
+def test_parse():
+ """ Test adhoc parse"""
+ with pytest.raises(ValueError, match='A non-empty list for args is required'):
+ adhoc_cli = AdHocCLI([])
+
+ adhoc_cli = AdHocCLI(['ansibletest'])
+ with pytest.raises(SystemExit):
+ adhoc_cli.parse()
+
+
+def test_with_command():
+ """ Test simple adhoc command"""
+ module_name = 'command'
+ adhoc_cli = AdHocCLI(args=['ansible', '-m', module_name, '-vv', 'localhost'])
+ adhoc_cli.parse()
+ assert context.CLIARGS['module_name'] == module_name
+ assert display.verbosity == 2
+
+
+def test_simple_command():
+ """ Test valid command and its run"""
+ adhoc_cli = AdHocCLI(['/bin/ansible', '-m', 'command', 'localhost', '-a', 'echo "hi"'])
+ adhoc_cli.parse()
+ ret = adhoc_cli.run()
+ assert ret == 0
+
+
+def test_no_argument():
+ """ Test no argument command"""
+ adhoc_cli = AdHocCLI(['/bin/ansible', '-m', 'command', 'localhost'])
+ adhoc_cli.parse()
+ with pytest.raises(AnsibleOptionsError) as exec_info:
+ adhoc_cli.run()
+ assert 'No argument passed to command module' == str(exec_info.value)
+
+
+def test_did_you_mean_playbook():
+ """ Test adhoc with yml file as argument parameter"""
+ adhoc_cli = AdHocCLI(['/bin/ansible', '-m', 'command', 'localhost.yml'])
+ adhoc_cli.parse()
+ with pytest.raises(AnsibleOptionsError) as exec_info:
+ adhoc_cli.run()
+ assert 'No argument passed to command module (did you mean to run ansible-playbook?)' == str(exec_info.value)
+
+
+def test_play_ds_positive():
+ """ Test _play_ds"""
+ adhoc_cli = AdHocCLI(args=['/bin/ansible', 'localhost', '-m', 'command'])
+ adhoc_cli.parse()
+ ret = adhoc_cli._play_ds('command', 10, 2)
+ assert ret['name'] == 'Ansible Ad-Hoc'
+ assert ret['tasks'] == [{'action': {'module': 'command', 'args': {}}, 'async_val': 10, 'poll': 2}]
+
+
+def test_play_ds_with_include_role():
+ """ Test include_role command with poll"""
+ adhoc_cli = AdHocCLI(args=['/bin/ansible', 'localhost', '-m', 'include_role'])
+ adhoc_cli.parse()
+ ret = adhoc_cli._play_ds('include_role', None, 2)
+ assert ret['name'] == 'Ansible Ad-Hoc'
+ assert ret['gather_facts'] == 'no'
+
+
+def test_run_import_playbook():
+ """ Test import_playbook which is not allowed with ad-hoc command"""
+ import_playbook = 'import_playbook'
+ adhoc_cli = AdHocCLI(args=['/bin/ansible', '-m', import_playbook, 'localhost'])
+ adhoc_cli.parse()
+ with pytest.raises(AnsibleOptionsError) as exec_info:
+ adhoc_cli.run()
+ assert context.CLIARGS['module_name'] == import_playbook
+ assert "'%s' is not a valid action for ad-hoc commands" % import_playbook == str(exec_info.value)
+
+
+def test_run_no_extra_vars():
+ adhoc_cli = AdHocCLI(args=['/bin/ansible', 'localhost', '-e'])
+ with pytest.raises(SystemExit) as exec_info:
+ adhoc_cli.parse()
+ assert exec_info.value.code == 2
+
+
+def test_ansible_version(capsys, mocker):
+ adhoc_cli = AdHocCLI(args=['/bin/ansible', '--version'])
+ with pytest.raises(SystemExit):
+ adhoc_cli.run()
+ version = capsys.readouterr()
+ try:
+ version_lines = version.out.splitlines()
+ except AttributeError:
+ # Python 2.6 does return a named tuple, so get the first item
+ version_lines = version[0].splitlines()
+
+ assert len(version_lines) == 6, 'Incorrect number of lines in "ansible --version" output'
+ assert re.match('ansible [0-9.a-z]+$', version_lines[0]), 'Incorrect ansible version line in "ansible --version" output'
+ assert re.match(' config file = .*$', version_lines[1]), 'Incorrect config file line in "ansible --version" output'
+ assert re.match(' configured module search path = .*$', version_lines[2]), 'Incorrect module search path in "ansible --version" output'
+ assert re.match(' ansible python module location = .*$', version_lines[3]), 'Incorrect python module location in "ansible --version" output'
+ assert re.match(' executable location = .*$', version_lines[4]), 'Incorrect executable locaction in "ansible --version" output'
+ assert re.match(' python version = .*$', version_lines[5]), 'Incorrect python version in "ansible --version" output'
diff --git a/test/units/cli/test_cli.py b/test/units/cli/test_cli.py
new file mode 100644
index 00000000..6dcd9e35
--- /dev/null
+++ b/test/units/cli/test_cli.py
@@ -0,0 +1,381 @@
+# (c) 2017, Adrian Likins <alikins@redhat.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest
+from units.compat.mock import patch, MagicMock
+
+from units.mock.loader import DictDataLoader
+
+from ansible.release import __version__
+from ansible.parsing import vault
+from ansible import cli
+
+
+class TestCliVersion(unittest.TestCase):
+
+ def test_version_info(self):
+ version_info = cli.CLI.version_info()
+ self.assertEqual(version_info['string'], __version__)
+
+ def test_version_info_gitinfo(self):
+ version_info = cli.CLI.version_info(gitinfo=True)
+ self.assertIn('python version', version_info['string'])
+
+
+class TestCliBuildVaultIds(unittest.TestCase):
+ def setUp(self):
+ self.tty_patcher = patch('ansible.cli.sys.stdin.isatty', return_value=True)
+ self.mock_isatty = self.tty_patcher.start()
+
+ def tearDown(self):
+ self.tty_patcher.stop()
+
+ def test(self):
+ res = cli.CLI.build_vault_ids(['foo@bar'])
+ self.assertEqual(res, ['foo@bar'])
+
+ def test_create_new_password_no_vault_id(self):
+ res = cli.CLI.build_vault_ids([], create_new_password=True)
+ self.assertEqual(res, ['default@prompt_ask_vault_pass'])
+
+ def test_create_new_password_no_vault_id_no_auto_prompt(self):
+ res = cli.CLI.build_vault_ids([], auto_prompt=False, create_new_password=True)
+ self.assertEqual(res, [])
+
+ def test_no_vault_id_no_auto_prompt(self):
+ # similate 'ansible-playbook site.yml' with out --ask-vault-pass, should not prompt
+ res = cli.CLI.build_vault_ids([], auto_prompt=False)
+ self.assertEqual(res, [])
+
+ def test_no_vault_ids_auto_prompt(self):
+ # create_new_password=False
+ # simulate 'ansible-vault edit encrypted.yml'
+ res = cli.CLI.build_vault_ids([], auto_prompt=True)
+ self.assertEqual(res, ['default@prompt_ask_vault_pass'])
+
+ def test_no_vault_ids_auto_prompt_ask_vault_pass(self):
+ # create_new_password=False
+ # simulate 'ansible-vault edit --ask-vault-pass encrypted.yml'
+ res = cli.CLI.build_vault_ids([], auto_prompt=True, ask_vault_pass=True)
+ self.assertEqual(res, ['default@prompt_ask_vault_pass'])
+
+ def test_create_new_password_auto_prompt(self):
+ # simulate 'ansible-vault encrypt somefile.yml'
+ res = cli.CLI.build_vault_ids([], auto_prompt=True, create_new_password=True)
+ self.assertEqual(res, ['default@prompt_ask_vault_pass'])
+
+ def test_create_new_password_no_vault_id_ask_vault_pass(self):
+ res = cli.CLI.build_vault_ids([], ask_vault_pass=True,
+ create_new_password=True)
+ self.assertEqual(res, ['default@prompt_ask_vault_pass'])
+
+ def test_create_new_password_with_vault_ids(self):
+ res = cli.CLI.build_vault_ids(['foo@bar'], create_new_password=True)
+ self.assertEqual(res, ['foo@bar'])
+
+ def test_create_new_password_no_vault_ids_password_files(self):
+ res = cli.CLI.build_vault_ids([], vault_password_files=['some-password-file'],
+ create_new_password=True)
+ self.assertEqual(res, ['default@some-password-file'])
+
+ def test_everything(self):
+ res = cli.CLI.build_vault_ids(['blip@prompt', 'baz@prompt_ask_vault_pass',
+ 'some-password-file', 'qux@another-password-file'],
+ vault_password_files=['yet-another-password-file',
+ 'one-more-password-file'],
+ ask_vault_pass=True,
+ create_new_password=True,
+ auto_prompt=False)
+
+ self.assertEqual(set(res), set(['blip@prompt', 'baz@prompt_ask_vault_pass',
+ 'default@prompt_ask_vault_pass',
+ 'some-password-file', 'qux@another-password-file',
+ 'default@yet-another-password-file',
+ 'default@one-more-password-file']))
+
+
+class TestCliSetupVaultSecrets(unittest.TestCase):
+ def setUp(self):
+ self.fake_loader = DictDataLoader({})
+ self.tty_patcher = patch('ansible.cli.sys.stdin.isatty', return_value=True)
+ self.mock_isatty = self.tty_patcher.start()
+
+ self.display_v_patcher = patch('ansible.cli.display.verbosity', return_value=6)
+ self.mock_display_v = self.display_v_patcher.start()
+ cli.display.verbosity = 5
+
+ def tearDown(self):
+ self.tty_patcher.stop()
+ self.display_v_patcher.stop()
+ cli.display.verbosity = 0
+
+ def test(self):
+ res = cli.CLI.setup_vault_secrets(None, None, auto_prompt=False)
+ self.assertIsInstance(res, list)
+
+ @patch('ansible.cli.get_file_vault_secret')
+ def test_password_file(self, mock_file_secret):
+ filename = '/dev/null/secret'
+ mock_file_secret.return_value = MagicMock(bytes=b'file1_password',
+ vault_id='file1',
+ filename=filename)
+ res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
+ vault_ids=['secret1@%s' % filename, 'secret2'],
+ vault_password_files=[filename])
+ self.assertIsInstance(res, list)
+ matches = vault.match_secrets(res, ['secret1'])
+ self.assertIn('secret1', [x[0] for x in matches])
+ match = matches[0][1]
+ self.assertEqual(match.bytes, b'file1_password')
+
+ @patch('ansible.cli.PromptVaultSecret')
+ def test_prompt(self, mock_prompt_secret):
+ mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
+ vault_id='prompt1')
+
+ res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
+ vault_ids=['prompt1@prompt'],
+ ask_vault_pass=True,
+ auto_prompt=False)
+
+ self.assertIsInstance(res, list)
+ matches = vault.match_secrets(res, ['prompt1'])
+ self.assertIn('prompt1', [x[0] for x in matches])
+ match = matches[0][1]
+ self.assertEqual(match.bytes, b'prompt1_password')
+
+ @patch('ansible.cli.PromptVaultSecret')
+ def test_prompt_no_tty(self, mock_prompt_secret):
+ self.mock_isatty.return_value = False
+ mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
+ vault_id='prompt1',
+ name='bytes_should_be_prompt1_password',
+ spec=vault.PromptVaultSecret)
+ res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
+ vault_ids=['prompt1@prompt'],
+ ask_vault_pass=True,
+ auto_prompt=False)
+
+ self.assertIsInstance(res, list)
+ self.assertEqual(len(res), 2)
+ matches = vault.match_secrets(res, ['prompt1'])
+ self.assertIn('prompt1', [x[0] for x in matches])
+ self.assertEqual(len(matches), 1)
+
+ @patch('ansible.cli.get_file_vault_secret')
+ @patch('ansible.cli.PromptVaultSecret')
+ def test_prompt_no_tty_and_password_file(self, mock_prompt_secret, mock_file_secret):
+ self.mock_isatty.return_value = False
+ mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
+ vault_id='prompt1')
+ filename = '/dev/null/secret'
+ mock_file_secret.return_value = MagicMock(bytes=b'file1_password',
+ vault_id='file1',
+ filename=filename)
+
+ res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
+ vault_ids=['prompt1@prompt', 'file1@/dev/null/secret'],
+ ask_vault_pass=True)
+
+ self.assertIsInstance(res, list)
+ matches = vault.match_secrets(res, ['file1'])
+ self.assertIn('file1', [x[0] for x in matches])
+ self.assertNotIn('prompt1', [x[0] for x in matches])
+ match = matches[0][1]
+ self.assertEqual(match.bytes, b'file1_password')
+
+ def _assert_ids(self, vault_id_names, res, password=b'prompt1_password'):
+ self.assertIsInstance(res, list)
+ len_ids = len(vault_id_names)
+ matches = vault.match_secrets(res, vault_id_names)
+ self.assertEqual(len(res), len_ids, 'len(res):%s does not match len_ids:%s' % (len(res), len_ids))
+ self.assertEqual(len(matches), len_ids)
+ for index, prompt in enumerate(vault_id_names):
+ self.assertIn(prompt, [x[0] for x in matches])
+ # simple mock, same password/prompt for each mock_prompt_secret
+ self.assertEqual(matches[index][1].bytes, password)
+
+ @patch('ansible.cli.PromptVaultSecret')
+ def test_multiple_prompts(self, mock_prompt_secret):
+ mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
+ vault_id='prompt1')
+
+ res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
+ vault_ids=['prompt1@prompt',
+ 'prompt2@prompt'],
+ ask_vault_pass=False)
+
+ vault_id_names = ['prompt1', 'prompt2']
+ self._assert_ids(vault_id_names, res)
+
+ @patch('ansible.cli.PromptVaultSecret')
+ def test_multiple_prompts_and_ask_vault_pass(self, mock_prompt_secret):
+ self.mock_isatty.return_value = False
+ mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
+ vault_id='prompt1')
+
+ res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
+ vault_ids=['prompt1@prompt',
+ 'prompt2@prompt',
+ 'prompt3@prompt_ask_vault_pass'],
+ ask_vault_pass=True)
+
+ # We provide some vault-ids and secrets, so auto_prompt shouldn't get triggered,
+ # so there is
+ vault_id_names = ['prompt1', 'prompt2', 'prompt3', 'default']
+ self._assert_ids(vault_id_names, res)
+
+ @patch('ansible.cli.C')
+ @patch('ansible.cli.get_file_vault_secret')
+ @patch('ansible.cli.PromptVaultSecret')
+ def test_default_file_vault(self, mock_prompt_secret,
+ mock_file_secret,
+ mock_config):
+ mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
+ vault_id='default')
+ mock_file_secret.return_value = MagicMock(bytes=b'file1_password',
+ vault_id='default')
+ mock_config.DEFAULT_VAULT_PASSWORD_FILE = '/dev/null/faux/vault_password_file'
+ mock_config.DEFAULT_VAULT_IDENTITY = 'default'
+
+ res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
+ vault_ids=[],
+ create_new_password=False,
+ ask_vault_pass=False)
+
+ self.assertIsInstance(res, list)
+ matches = vault.match_secrets(res, ['default'])
+ # --vault-password-file/DEFAULT_VAULT_PASSWORD_FILE is higher precendce than prompts
+ # if the same vault-id ('default') regardless of cli order since it didn't matter in 2.3
+
+ self.assertEqual(matches[0][1].bytes, b'file1_password')
+ self.assertEqual(len(matches), 1)
+
+ res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
+ vault_ids=[],
+ create_new_password=False,
+ ask_vault_pass=True,
+ auto_prompt=True)
+
+ self.assertIsInstance(res, list)
+ matches = vault.match_secrets(res, ['default'])
+ self.assertEqual(matches[0][1].bytes, b'file1_password')
+ self.assertEqual(matches[1][1].bytes, b'prompt1_password')
+ self.assertEqual(len(matches), 2)
+
+ @patch('ansible.cli.get_file_vault_secret')
+ @patch('ansible.cli.PromptVaultSecret')
+ def test_default_file_vault_identity_list(self, mock_prompt_secret,
+ mock_file_secret):
+ default_vault_ids = ['some_prompt@prompt',
+ 'some_file@/dev/null/secret']
+
+ mock_prompt_secret.return_value = MagicMock(bytes=b'some_prompt_password',
+ vault_id='some_prompt')
+
+ filename = '/dev/null/secret'
+ mock_file_secret.return_value = MagicMock(bytes=b'some_file_password',
+ vault_id='some_file',
+ filename=filename)
+
+ vault_ids = default_vault_ids
+ res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
+ vault_ids=vault_ids,
+ create_new_password=False,
+ ask_vault_pass=True)
+
+ self.assertIsInstance(res, list)
+ matches = vault.match_secrets(res, ['some_file'])
+ # --vault-password-file/DEFAULT_VAULT_PASSWORD_FILE is higher precendce than prompts
+ # if the same vault-id ('default') regardless of cli order since it didn't matter in 2.3
+ self.assertEqual(matches[0][1].bytes, b'some_file_password')
+ matches = vault.match_secrets(res, ['some_prompt'])
+ self.assertEqual(matches[0][1].bytes, b'some_prompt_password')
+
+ @patch('ansible.cli.PromptVaultSecret')
+ def test_prompt_just_ask_vault_pass(self, mock_prompt_secret):
+ mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
+ vault_id='default')
+
+ res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
+ vault_ids=[],
+ create_new_password=False,
+ ask_vault_pass=True)
+
+ self.assertIsInstance(res, list)
+ match = vault.match_secrets(res, ['default'])[0][1]
+ self.assertEqual(match.bytes, b'prompt1_password')
+
+ @patch('ansible.cli.PromptVaultSecret')
+ def test_prompt_new_password_ask_vault_pass(self, mock_prompt_secret):
+ mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
+ vault_id='default')
+
+ res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
+ vault_ids=[],
+ create_new_password=True,
+ ask_vault_pass=True)
+
+ self.assertIsInstance(res, list)
+ match = vault.match_secrets(res, ['default'])[0][1]
+ self.assertEqual(match.bytes, b'prompt1_password')
+
+ @patch('ansible.cli.PromptVaultSecret')
+ def test_prompt_new_password_vault_id_prompt(self, mock_prompt_secret):
+ mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
+ vault_id='some_vault_id')
+
+ res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
+ vault_ids=['some_vault_id@prompt'],
+ create_new_password=True,
+ ask_vault_pass=False)
+
+ self.assertIsInstance(res, list)
+ match = vault.match_secrets(res, ['some_vault_id'])[0][1]
+ self.assertEqual(match.bytes, b'prompt1_password')
+
+ @patch('ansible.cli.PromptVaultSecret')
+ def test_prompt_new_password_vault_id_prompt_ask_vault_pass(self, mock_prompt_secret):
+ mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
+ vault_id='default')
+
+ res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
+ vault_ids=['some_vault_id@prompt_ask_vault_pass'],
+ create_new_password=True,
+ ask_vault_pass=False)
+
+ self.assertIsInstance(res, list)
+ match = vault.match_secrets(res, ['some_vault_id'])[0][1]
+ self.assertEqual(match.bytes, b'prompt1_password')
+
+ @patch('ansible.cli.PromptVaultSecret')
+ def test_prompt_new_password_vault_id_prompt_ask_vault_pass_ask_vault_pass(self, mock_prompt_secret):
+ mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password',
+ vault_id='default')
+
+ res = cli.CLI.setup_vault_secrets(loader=self.fake_loader,
+ vault_ids=['some_vault_id@prompt_ask_vault_pass'],
+ create_new_password=True,
+ ask_vault_pass=True)
+
+ self.assertIsInstance(res, list)
+ match = vault.match_secrets(res, ['some_vault_id'])[0][1]
+ self.assertEqual(match.bytes, b'prompt1_password')
diff --git a/test/units/cli/test_console.py b/test/units/cli/test_console.py
new file mode 100644
index 00000000..3acc4faa
--- /dev/null
+++ b/test/units/cli/test_console.py
@@ -0,0 +1,51 @@
+# (c) 2016, Thilo Uttendorfer <tlo@sengaya.de>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest
+from units.compat.mock import patch
+
+from ansible.cli.console import ConsoleCLI
+
+
+class TestConsoleCLI(unittest.TestCase):
+ def test_parse(self):
+ cli = ConsoleCLI(['ansible test'])
+ cli.parse()
+ self.assertTrue(cli.parser is not None)
+
+ def test_module_args(self):
+ cli = ConsoleCLI(['ansible test'])
+ cli.parse()
+ res = cli.module_args('copy')
+ self.assertTrue(cli.parser is not None)
+ self.assertIn('src', res)
+ self.assertIn('backup', res)
+ self.assertIsInstance(res, list)
+
+ @patch('ansible.utils.display.Display.display')
+ def test_helpdefault(self, mock_display):
+ cli = ConsoleCLI(['ansible test'])
+ cli.parse()
+ cli.modules = set(['copy'])
+ cli.helpdefault('copy')
+ self.assertTrue(cli.parser is not None)
+ self.assertTrue(len(mock_display.call_args_list) > 0,
+ "display.display should have been called but was not")
diff --git a/test/units/cli/test_data/collection_skeleton/README.md b/test/units/cli/test_data/collection_skeleton/README.md
new file mode 100644
index 00000000..4cfd8afe
--- /dev/null
+++ b/test/units/cli/test_data/collection_skeleton/README.md
@@ -0,0 +1 @@
+A readme \ No newline at end of file
diff --git a/test/units/cli/test_data/collection_skeleton/docs/My Collection.md b/test/units/cli/test_data/collection_skeleton/docs/My Collection.md
new file mode 100644
index 00000000..6fa917f2
--- /dev/null
+++ b/test/units/cli/test_data/collection_skeleton/docs/My Collection.md
@@ -0,0 +1 @@
+Welcome to my test collection doc for {{ namespace }}. \ No newline at end of file
diff --git a/test/units/cli/test_data/collection_skeleton/galaxy.yml.j2 b/test/units/cli/test_data/collection_skeleton/galaxy.yml.j2
new file mode 100644
index 00000000..b1da267a
--- /dev/null
+++ b/test/units/cli/test_data/collection_skeleton/galaxy.yml.j2
@@ -0,0 +1,7 @@
+namespace: '{{ namespace }}'
+name: '{{ collection_name }}'
+version: 0.1.0
+readme: README.md
+authors:
+- Ansible Cow <acow@bovineuniversity.edu>
+- Tu Cow <tucow@bovineuniversity.edu>
diff --git a/test/units/cli/test_data/collection_skeleton/playbooks/main.yml b/test/units/cli/test_data/collection_skeleton/playbooks/main.yml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/cli/test_data/collection_skeleton/playbooks/main.yml
diff --git a/test/units/cli/test_data/collection_skeleton/playbooks/templates/subfolder/test.conf.j2 b/test/units/cli/test_data/collection_skeleton/playbooks/templates/subfolder/test.conf.j2
new file mode 100644
index 00000000..b4e33641
--- /dev/null
+++ b/test/units/cli/test_data/collection_skeleton/playbooks/templates/subfolder/test.conf.j2
@@ -0,0 +1,2 @@
+[defaults]
+test_key = {{ test_variable }}
diff --git a/test/units/cli/test_data/collection_skeleton/playbooks/templates/test.conf.j2 b/test/units/cli/test_data/collection_skeleton/playbooks/templates/test.conf.j2
new file mode 100644
index 00000000..b4e33641
--- /dev/null
+++ b/test/units/cli/test_data/collection_skeleton/playbooks/templates/test.conf.j2
@@ -0,0 +1,2 @@
+[defaults]
+test_key = {{ test_variable }}
diff --git a/test/units/cli/test_data/collection_skeleton/plugins/action/.git_keep b/test/units/cli/test_data/collection_skeleton/plugins/action/.git_keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/cli/test_data/collection_skeleton/plugins/action/.git_keep
diff --git a/test/units/cli/test_data/collection_skeleton/plugins/filter/.git_keep b/test/units/cli/test_data/collection_skeleton/plugins/filter/.git_keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/cli/test_data/collection_skeleton/plugins/filter/.git_keep
diff --git a/test/units/cli/test_data/collection_skeleton/plugins/inventory/.git_keep b/test/units/cli/test_data/collection_skeleton/plugins/inventory/.git_keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/cli/test_data/collection_skeleton/plugins/inventory/.git_keep
diff --git a/test/units/cli/test_data/collection_skeleton/plugins/lookup/.git_keep b/test/units/cli/test_data/collection_skeleton/plugins/lookup/.git_keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/cli/test_data/collection_skeleton/plugins/lookup/.git_keep
diff --git a/test/units/cli/test_data/collection_skeleton/plugins/module_utils/.git_keep b/test/units/cli/test_data/collection_skeleton/plugins/module_utils/.git_keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/cli/test_data/collection_skeleton/plugins/module_utils/.git_keep
diff --git a/test/units/cli/test_data/collection_skeleton/plugins/modules/.git_keep b/test/units/cli/test_data/collection_skeleton/plugins/modules/.git_keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/cli/test_data/collection_skeleton/plugins/modules/.git_keep
diff --git a/test/units/cli/test_data/collection_skeleton/roles/common/tasks/main.yml.j2 b/test/units/cli/test_data/collection_skeleton/roles/common/tasks/main.yml.j2
new file mode 100644
index 00000000..77adf2ef
--- /dev/null
+++ b/test/units/cli/test_data/collection_skeleton/roles/common/tasks/main.yml.j2
@@ -0,0 +1,3 @@
+- name: test collection skeleton
+ debug:
+ msg: "Namespace: {{ namespace }}" \ No newline at end of file
diff --git a/test/units/cli/test_data/collection_skeleton/roles/common/templates/subfolder/test.conf.j2 b/test/units/cli/test_data/collection_skeleton/roles/common/templates/subfolder/test.conf.j2
new file mode 100644
index 00000000..b4e33641
--- /dev/null
+++ b/test/units/cli/test_data/collection_skeleton/roles/common/templates/subfolder/test.conf.j2
@@ -0,0 +1,2 @@
+[defaults]
+test_key = {{ test_variable }}
diff --git a/test/units/cli/test_data/collection_skeleton/roles/common/templates/test.conf.j2 b/test/units/cli/test_data/collection_skeleton/roles/common/templates/test.conf.j2
new file mode 100644
index 00000000..b4e33641
--- /dev/null
+++ b/test/units/cli/test_data/collection_skeleton/roles/common/templates/test.conf.j2
@@ -0,0 +1,2 @@
+[defaults]
+test_key = {{ test_variable }}
diff --git a/test/units/cli/test_data/role_skeleton/.travis.yml b/test/units/cli/test_data/role_skeleton/.travis.yml
new file mode 100644
index 00000000..49e7e1c5
--- /dev/null
+++ b/test/units/cli/test_data/role_skeleton/.travis.yml
@@ -0,0 +1,29 @@
+---
+language: python
+python: "2.7"
+
+# Use the new container infrastructure
+sudo: false
+
+# Install ansible
+addons:
+ apt:
+ packages:
+ - python-pip
+
+install:
+ # Install ansible
+ - pip install ansible
+
+ # Check ansible version
+ - ansible --version
+
+ # Create ansible.cfg with correct roles_path
+ - printf '[defaults]\nroles_path=../' >ansible.cfg
+
+script:
+ # Basic role syntax check
+ - ansible-playbook tests/test.yml -i tests/inventory --syntax-check
+
+notifications:
+ webhooks: https://galaxy.ansible.com/api/v1/notifications/
diff --git a/test/units/cli/test_data/role_skeleton/README.md b/test/units/cli/test_data/role_skeleton/README.md
new file mode 100644
index 00000000..225dd44b
--- /dev/null
+++ b/test/units/cli/test_data/role_skeleton/README.md
@@ -0,0 +1,38 @@
+Role Name
+=========
+
+A brief description of the role goes here.
+
+Requirements
+------------
+
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+
+Role Variables
+--------------
+
+A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
+
+Dependencies
+------------
+
+A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+
+Example Playbook
+----------------
+
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+
+ - hosts: servers
+ roles:
+ - { role: username.rolename, x: 42 }
+
+License
+-------
+
+BSD
+
+Author Information
+------------------
+
+An optional section for the role authors to include contact information, or a website (HTML is not allowed).
diff --git a/test/units/cli/test_data/role_skeleton/defaults/main.yml.j2 b/test/units/cli/test_data/role_skeleton/defaults/main.yml.j2
new file mode 100644
index 00000000..3818e64c
--- /dev/null
+++ b/test/units/cli/test_data/role_skeleton/defaults/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# defaults file for {{ role_name }}
diff --git a/test/units/cli/test_data/role_skeleton/files/.git_keep b/test/units/cli/test_data/role_skeleton/files/.git_keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/cli/test_data/role_skeleton/files/.git_keep
diff --git a/test/units/cli/test_data/role_skeleton/handlers/main.yml.j2 b/test/units/cli/test_data/role_skeleton/handlers/main.yml.j2
new file mode 100644
index 00000000..3f4c4967
--- /dev/null
+++ b/test/units/cli/test_data/role_skeleton/handlers/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# handlers file for {{ role_name }}
diff --git a/test/units/cli/test_data/role_skeleton/inventory b/test/units/cli/test_data/role_skeleton/inventory
new file mode 100644
index 00000000..2fbb50c4
--- /dev/null
+++ b/test/units/cli/test_data/role_skeleton/inventory
@@ -0,0 +1 @@
+localhost
diff --git a/test/units/cli/test_data/role_skeleton/meta/main.yml.j2 b/test/units/cli/test_data/role_skeleton/meta/main.yml.j2
new file mode 100644
index 00000000..2fc53cbe
--- /dev/null
+++ b/test/units/cli/test_data/role_skeleton/meta/main.yml.j2
@@ -0,0 +1,62 @@
+galaxy_info:
+ author: {{ author }}
+ description: {{ description }}
+ company: {{ company }}
+
+ # If the issue tracker for your role is not on github, uncomment the
+ # next line and provide a value
+ # issue_tracker_url: {{ issue_tracker_url }}
+
+ # Some suggested licenses:
+ # - BSD (default)
+ # - MIT
+ # - GPLv2
+ # - GPLv3
+ # - Apache
+ # - CC-BY
+ license: {{ license }}
+
+ min_ansible_version: {{ min_ansible_version }}
+
+ # Optionally specify the branch Galaxy will use when accessing the GitHub
+ # repo for this role. During role install, if no tags are available,
+ # Galaxy will use this branch. During import Galaxy will access files on
+ # this branch. If travis integration is configured, only notification for this
+ # branch will be accepted. Otherwise, in all cases, the repo's default branch
+ # (usually master) will be used.
+ #github_branch:
+
+ #
+ # Provide a list of supported platforms, and for each platform a list of versions.
+ # If you don't wish to enumerate all versions for a particular platform, use 'all'.
+ # To view available platforms and versions (or releases), visit:
+ # https://galaxy.ansible.com/api/v1/platforms/
+ #
+ # platforms:
+ # - name: Fedora
+ # versions:
+ # - all
+ # - 25
+ # - name: SomePlatform
+ # versions:
+ # - all
+ # - 1.0
+ # - 7
+ # - 99.99
+
+ galaxy_tags: []
+ # List tags for your role here, one per line. A tag is
+ # a keyword that describes and categorizes the role.
+ # Users find roles by searching for tags. Be sure to
+ # remove the '[]' above if you add tags to this list.
+ #
+ # NOTE: A tag is limited to a single word comprised of
+ # alphanumeric characters. Maximum 20 tags per role.
+
+dependencies: []
+ # List your role dependencies here, one per line.
+ # Be sure to remove the '[]' above if you add dependencies
+ # to this list.
+{%- for dependency in dependencies %}
+ #- {{ dependency }}
+{%- endfor %}
diff --git a/test/units/cli/test_data/role_skeleton/tasks/main.yml.j2 b/test/units/cli/test_data/role_skeleton/tasks/main.yml.j2
new file mode 100644
index 00000000..a9880650
--- /dev/null
+++ b/test/units/cli/test_data/role_skeleton/tasks/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# tasks file for {{ role_name }}
diff --git a/test/units/cli/test_data/role_skeleton/templates/.git_keep b/test/units/cli/test_data/role_skeleton/templates/.git_keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/cli/test_data/role_skeleton/templates/.git_keep
diff --git a/test/units/cli/test_data/role_skeleton/templates/subfolder/test.conf.j2 b/test/units/cli/test_data/role_skeleton/templates/subfolder/test.conf.j2
new file mode 100644
index 00000000..b4e33641
--- /dev/null
+++ b/test/units/cli/test_data/role_skeleton/templates/subfolder/test.conf.j2
@@ -0,0 +1,2 @@
+[defaults]
+test_key = {{ test_variable }}
diff --git a/test/units/cli/test_data/role_skeleton/templates/test.conf.j2 b/test/units/cli/test_data/role_skeleton/templates/test.conf.j2
new file mode 100644
index 00000000..b4e33641
--- /dev/null
+++ b/test/units/cli/test_data/role_skeleton/templates/test.conf.j2
@@ -0,0 +1,2 @@
+[defaults]
+test_key = {{ test_variable }}
diff --git a/test/units/cli/test_data/role_skeleton/templates_extra/templates.txt.j2 b/test/units/cli/test_data/role_skeleton/templates_extra/templates.txt.j2
new file mode 100644
index 00000000..143d6302
--- /dev/null
+++ b/test/units/cli/test_data/role_skeleton/templates_extra/templates.txt.j2
@@ -0,0 +1 @@
+{{ role_name }}
diff --git a/test/units/cli/test_data/role_skeleton/tests/test.yml.j2 b/test/units/cli/test_data/role_skeleton/tests/test.yml.j2
new file mode 100644
index 00000000..0c40f95a
--- /dev/null
+++ b/test/units/cli/test_data/role_skeleton/tests/test.yml.j2
@@ -0,0 +1,5 @@
+---
+- hosts: localhost
+ remote_user: root
+ roles:
+ - {{ role_name }}
diff --git a/test/units/cli/test_data/role_skeleton/vars/main.yml.j2 b/test/units/cli/test_data/role_skeleton/vars/main.yml.j2
new file mode 100644
index 00000000..092d511a
--- /dev/null
+++ b/test/units/cli/test_data/role_skeleton/vars/main.yml.j2
@@ -0,0 +1,2 @@
+---
+# vars file for {{ role_name }}
diff --git a/test/units/cli/test_doc.py b/test/units/cli/test_doc.py
new file mode 100644
index 00000000..d93b5aa1
--- /dev/null
+++ b/test/units/cli/test_doc.py
@@ -0,0 +1,35 @@
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible.cli.doc import DocCLI
+
+
+TTY_IFY_DATA = {
+ # No substitutions
+ 'no-op': 'no-op',
+ 'no-op Z(test)': 'no-op Z(test)',
+ # Simple cases of all substitutions
+ 'I(italic)': "`italic'",
+ 'B(bold)': '*bold*',
+ 'M(ansible.builtin.module)': '[ansible.builtin.module]',
+ 'U(https://docs.ansible.com)': 'https://docs.ansible.com',
+ 'L(the user guide,https://docs.ansible.com/user-guide.html)': 'the user guide <https://docs.ansible.com/user-guide.html>',
+ 'R(the user guide,user-guide)': 'the user guide',
+ 'C(/usr/bin/file)': "`/usr/bin/file'",
+ 'HORIZONTALLINE': '\n{0}\n'.format('-' * 13),
+ # Multiple substitutions
+ 'The M(ansible.builtin.yum) module B(MUST) be given the C(package) parameter. See the R(looping docs,using-loops) for more info':
+ "The [ansible.builtin.yum] module *MUST* be given the `package' parameter. See the looping docs for more info",
+ # Problem cases
+ 'IBM(International Business Machines)': 'IBM(International Business Machines)',
+ 'L(the user guide, https://docs.ansible.com/)': 'the user guide <https://docs.ansible.com/>',
+ 'R(the user guide, user-guide)': 'the user guide',
+}
+
+
+@pytest.mark.parametrize('text, expected', sorted(TTY_IFY_DATA.items()))
+def test_ttyify(text, expected):
+ assert DocCLI.tty_ify(text) == expected
diff --git a/test/units/cli/test_galaxy.py b/test/units/cli/test_galaxy.py
new file mode 100644
index 00000000..c6c09159
--- /dev/null
+++ b/test/units/cli/test_galaxy.py
@@ -0,0 +1,1341 @@
+# -*- coding: utf-8 -*-
+# (c) 2016, Adrian Likins <alikins@redhat.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ansible
+import json
+import os
+import pytest
+import shutil
+import stat
+import tarfile
+import tempfile
+import yaml
+
+import ansible.constants as C
+from ansible import context
+from ansible.cli.galaxy import GalaxyCLI
+from ansible.galaxy.api import GalaxyAPI
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.utils import context_objects as co
+from ansible.utils.display import Display
+from units.compat import unittest
+from units.compat.mock import patch, MagicMock
+
+
+@pytest.fixture(autouse='function')
+def reset_cli_args():
+ co.GlobalCLIArgs._Singleton__instance = None
+ yield
+ co.GlobalCLIArgs._Singleton__instance = None
+
+
+class TestGalaxy(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ '''creating prerequisites for installing a role; setUpClass occurs ONCE whereas setUp occurs with every method tested.'''
+ # class data for easy viewing: role_dir, role_tar, role_name, role_req, role_path
+
+ cls.temp_dir = tempfile.mkdtemp(prefix='ansible-test_galaxy-')
+ os.chdir(cls.temp_dir)
+
+ if os.path.exists("./delete_me"):
+ shutil.rmtree("./delete_me")
+
+ # creating framework for a role
+ gc = GalaxyCLI(args=["ansible-galaxy", "init", "--offline", "delete_me"])
+ gc.run()
+ cls.role_dir = "./delete_me"
+ cls.role_name = "delete_me"
+
+ # making a temp dir for role installation
+ cls.role_path = os.path.join(tempfile.mkdtemp(), "roles")
+ if not os.path.isdir(cls.role_path):
+ os.makedirs(cls.role_path)
+
+ # creating a tar file name for class data
+ cls.role_tar = './delete_me.tar.gz'
+ cls.makeTar(cls.role_tar, cls.role_dir)
+
+ # creating a temp file with installation requirements
+ cls.role_req = './delete_me_requirements.yml'
+ fd = open(cls.role_req, "w")
+ fd.write("- 'src': '%s'\n 'name': '%s'\n 'path': '%s'" % (cls.role_tar, cls.role_name, cls.role_path))
+ fd.close()
+
+ @classmethod
+ def makeTar(cls, output_file, source_dir):
+ ''' used for making a tarfile from a role directory '''
+ # adding directory into a tar file
+ try:
+ tar = tarfile.open(output_file, "w:gz")
+ tar.add(source_dir, arcname=os.path.basename(source_dir))
+ except AttributeError: # tarfile obj. has no attribute __exit__ prior to python 2. 7
+ pass
+ finally: # ensuring closure of tarfile obj
+ tar.close()
+
+ @classmethod
+ def tearDownClass(cls):
+ '''After tests are finished removes things created in setUpClass'''
+ # deleting the temp role directory
+ if os.path.exists(cls.role_dir):
+ shutil.rmtree(cls.role_dir)
+ if os.path.exists(cls.role_req):
+ os.remove(cls.role_req)
+ if os.path.exists(cls.role_tar):
+ os.remove(cls.role_tar)
+ if os.path.isdir(cls.role_path):
+ shutil.rmtree(cls.role_path)
+
+ os.chdir('/')
+ shutil.rmtree(cls.temp_dir)
+
+ def setUp(self):
+ # Reset the stored command line args
+ co.GlobalCLIArgs._Singleton__instance = None
+ self.default_args = ['ansible-galaxy']
+
+ def tearDown(self):
+ # Reset the stored command line args
+ co.GlobalCLIArgs._Singleton__instance = None
+
+ def test_init(self):
+ galaxy_cli = GalaxyCLI(args=self.default_args)
+ self.assertTrue(isinstance(galaxy_cli, GalaxyCLI))
+
+ def test_display_min(self):
+ gc = GalaxyCLI(args=self.default_args)
+ role_info = {'name': 'some_role_name'}
+ display_result = gc._display_role_info(role_info)
+ self.assertTrue(display_result.find('some_role_name') > -1)
+
+ def test_display_galaxy_info(self):
+ gc = GalaxyCLI(args=self.default_args)
+ galaxy_info = {}
+ role_info = {'name': 'some_role_name',
+ 'galaxy_info': galaxy_info}
+ display_result = gc._display_role_info(role_info)
+ if display_result.find('\n\tgalaxy_info:') == -1:
+ self.fail('Expected galaxy_info to be indented once')
+
+ def test_run(self):
+ ''' verifies that the GalaxyCLI object's api is created and that execute() is called. '''
+ gc = GalaxyCLI(args=["ansible-galaxy", "install", "--ignore-errors", "imaginary_role"])
+ gc.parse()
+ with patch.object(ansible.cli.CLI, "run", return_value=None) as mock_run:
+ gc.run()
+ # testing
+ self.assertIsInstance(gc.galaxy, ansible.galaxy.Galaxy)
+ self.assertEqual(mock_run.call_count, 1)
+ self.assertTrue(isinstance(gc.api, ansible.galaxy.api.GalaxyAPI))
+
+ def test_execute_remove(self):
+ # installing role
+ gc = GalaxyCLI(args=["ansible-galaxy", "install", "-p", self.role_path, "-r", self.role_req, '--force'])
+ gc.run()
+
+ # location where the role was installed
+ role_file = os.path.join(self.role_path, self.role_name)
+
+ # removing role
+ # Have to reset the arguments in the context object manually since we're doing the
+ # equivalent of running the command line program twice
+ co.GlobalCLIArgs._Singleton__instance = None
+ gc = GalaxyCLI(args=["ansible-galaxy", "remove", role_file, self.role_name])
+ gc.run()
+
+ # testing role was removed
+ removed_role = not os.path.exists(role_file)
+ self.assertTrue(removed_role)
+
+ def test_exit_without_ignore_without_flag(self):
+ ''' tests that GalaxyCLI exits with the error specified if the --ignore-errors flag is not used '''
+ gc = GalaxyCLI(args=["ansible-galaxy", "install", "--server=None", "fake_role_name"])
+ with patch.object(ansible.utils.display.Display, "display", return_value=None) as mocked_display:
+ # testing that error expected is raised
+ self.assertRaises(AnsibleError, gc.run)
+ self.assertTrue(mocked_display.called_once_with("- downloading role 'fake_role_name', owned by "))
+
+ def test_exit_without_ignore_with_flag(self):
+ ''' tests that GalaxyCLI exits without the error specified if the --ignore-errors flag is used '''
+ # testing with --ignore-errors flag
+ gc = GalaxyCLI(args=["ansible-galaxy", "install", "--server=None", "fake_role_name", "--ignore-errors"])
+ with patch.object(ansible.utils.display.Display, "display", return_value=None) as mocked_display:
+ gc.run()
+ self.assertTrue(mocked_display.called_once_with("- downloading role 'fake_role_name', owned by "))
+
+ def test_parse_no_action(self):
+ ''' testing the options parser when no action is given '''
+ gc = GalaxyCLI(args=["ansible-galaxy", ""])
+ self.assertRaises(SystemExit, gc.parse)
+
+ def test_parse_invalid_action(self):
+ ''' testing the options parser when an invalid action is given '''
+ gc = GalaxyCLI(args=["ansible-galaxy", "NOT_ACTION"])
+ self.assertRaises(SystemExit, gc.parse)
+
+ def test_parse_delete(self):
+ ''' testing the options parser when the action 'delete' is given '''
+ gc = GalaxyCLI(args=["ansible-galaxy", "delete", "foo", "bar"])
+ gc.parse()
+ self.assertEqual(context.CLIARGS['verbosity'], 0)
+
+ def test_parse_import(self):
+ ''' testing the options parser when the action 'import' is given '''
+ gc = GalaxyCLI(args=["ansible-galaxy", "import", "foo", "bar"])
+ gc.parse()
+ self.assertEqual(context.CLIARGS['wait'], True)
+ self.assertEqual(context.CLIARGS['reference'], None)
+ self.assertEqual(context.CLIARGS['check_status'], False)
+ self.assertEqual(context.CLIARGS['verbosity'], 0)
+
+ def test_parse_info(self):
+ ''' testing the options parser when the action 'info' is given '''
+ gc = GalaxyCLI(args=["ansible-galaxy", "info", "foo", "bar"])
+ gc.parse()
+ self.assertEqual(context.CLIARGS['offline'], False)
+
+ def test_parse_init(self):
+ ''' testing the options parser when the action 'init' is given '''
+ gc = GalaxyCLI(args=["ansible-galaxy", "init", "foo"])
+ gc.parse()
+ self.assertEqual(context.CLIARGS['offline'], False)
+ self.assertEqual(context.CLIARGS['force'], False)
+
+ def test_parse_install(self):
+ ''' testing the options parser when the action 'install' is given '''
+ gc = GalaxyCLI(args=["ansible-galaxy", "install"])
+ gc.parse()
+ self.assertEqual(context.CLIARGS['ignore_errors'], False)
+ self.assertEqual(context.CLIARGS['no_deps'], False)
+ self.assertEqual(context.CLIARGS['requirements'], None)
+ self.assertEqual(context.CLIARGS['force'], False)
+
+ def test_parse_list(self):
+ ''' testing the options parser when the action 'list' is given '''
+ gc = GalaxyCLI(args=["ansible-galaxy", "list"])
+ gc.parse()
+ self.assertEqual(context.CLIARGS['verbosity'], 0)
+
+ def test_parse_remove(self):
+ ''' testing the options parser when the action 'remove' is given '''
+ gc = GalaxyCLI(args=["ansible-galaxy", "remove", "foo"])
+ gc.parse()
+ self.assertEqual(context.CLIARGS['verbosity'], 0)
+
+ def test_parse_search(self):
+ ''' testing the options parswer when the action 'search' is given '''
+ gc = GalaxyCLI(args=["ansible-galaxy", "search"])
+ gc.parse()
+ self.assertEqual(context.CLIARGS['platforms'], None)
+ self.assertEqual(context.CLIARGS['galaxy_tags'], None)
+ self.assertEqual(context.CLIARGS['author'], None)
+
+ def test_parse_setup(self):
+ ''' testing the options parser when the action 'setup' is given '''
+ gc = GalaxyCLI(args=["ansible-galaxy", "setup", "source", "github_user", "github_repo", "secret"])
+ gc.parse()
+ self.assertEqual(context.CLIARGS['verbosity'], 0)
+ self.assertEqual(context.CLIARGS['remove_id'], None)
+ self.assertEqual(context.CLIARGS['setup_list'], False)
+
+
+class ValidRoleTests(object):
+
+ expected_role_dirs = ('defaults', 'files', 'handlers', 'meta', 'tasks', 'templates', 'vars', 'tests')
+
+ @classmethod
+ def setUpRole(cls, role_name, galaxy_args=None, skeleton_path=None, use_explicit_type=False):
+ if galaxy_args is None:
+ galaxy_args = []
+
+ if skeleton_path is not None:
+ cls.role_skeleton_path = skeleton_path
+ galaxy_args += ['--role-skeleton', skeleton_path]
+
+ # Make temp directory for testing
+ cls.test_dir = tempfile.mkdtemp()
+ if not os.path.isdir(cls.test_dir):
+ os.makedirs(cls.test_dir)
+
+ cls.role_dir = os.path.join(cls.test_dir, role_name)
+ cls.role_name = role_name
+
+ # create role using default skeleton
+ args = ['ansible-galaxy']
+ if use_explicit_type:
+ args += ['role']
+ args += ['init', '-c', '--offline'] + galaxy_args + ['--init-path', cls.test_dir, cls.role_name]
+
+ gc = GalaxyCLI(args=args)
+ gc.run()
+ cls.gc = gc
+
+ if skeleton_path is None:
+ cls.role_skeleton_path = gc.galaxy.default_role_skeleton_path
+
+ @classmethod
+ def tearDownClass(cls):
+ if os.path.isdir(cls.test_dir):
+ shutil.rmtree(cls.test_dir)
+
+ def test_metadata(self):
+ with open(os.path.join(self.role_dir, 'meta', 'main.yml'), 'r') as mf:
+ metadata = yaml.safe_load(mf)
+ self.assertIn('galaxy_info', metadata, msg='unable to find galaxy_info in metadata')
+ self.assertIn('dependencies', metadata, msg='unable to find dependencies in metadata')
+
+ def test_readme(self):
+ readme_path = os.path.join(self.role_dir, 'README.md')
+ self.assertTrue(os.path.exists(readme_path), msg='Readme doesn\'t exist')
+
+ def test_main_ymls(self):
+ need_main_ymls = set(self.expected_role_dirs) - set(['meta', 'tests', 'files', 'templates'])
+ for d in need_main_ymls:
+ main_yml = os.path.join(self.role_dir, d, 'main.yml')
+ self.assertTrue(os.path.exists(main_yml))
+ expected_string = "---\n# {0} file for {1}".format(d, self.role_name)
+ with open(main_yml, 'r') as f:
+ self.assertEqual(expected_string, f.read().strip())
+
+ def test_role_dirs(self):
+ for d in self.expected_role_dirs:
+ self.assertTrue(os.path.isdir(os.path.join(self.role_dir, d)), msg="Expected role subdirectory {0} doesn't exist".format(d))
+
+ def test_travis_yml(self):
+ with open(os.path.join(self.role_dir, '.travis.yml'), 'r') as f:
+ contents = f.read()
+
+ with open(os.path.join(self.role_skeleton_path, '.travis.yml'), 'r') as f:
+ expected_contents = f.read()
+
+ self.assertEqual(expected_contents, contents, msg='.travis.yml does not match expected')
+
+ def test_readme_contents(self):
+ with open(os.path.join(self.role_dir, 'README.md'), 'r') as readme:
+ contents = readme.read()
+
+ with open(os.path.join(self.role_skeleton_path, 'README.md'), 'r') as f:
+ expected_contents = f.read()
+
+ self.assertEqual(expected_contents, contents, msg='README.md does not match expected')
+
+ def test_test_yml(self):
+ with open(os.path.join(self.role_dir, 'tests', 'test.yml'), 'r') as f:
+ test_playbook = yaml.safe_load(f)
+ print(test_playbook)
+ self.assertEqual(len(test_playbook), 1)
+ self.assertEqual(test_playbook[0]['hosts'], 'localhost')
+ self.assertEqual(test_playbook[0]['remote_user'], 'root')
+ self.assertListEqual(test_playbook[0]['roles'], [self.role_name], msg='The list of roles included in the test play doesn\'t match')
+
+
+class TestGalaxyInitDefault(unittest.TestCase, ValidRoleTests):
+
+ @classmethod
+ def setUpClass(cls):
+ cls.setUpRole(role_name='delete_me')
+
+ def test_metadata_contents(self):
+ with open(os.path.join(self.role_dir, 'meta', 'main.yml'), 'r') as mf:
+ metadata = yaml.safe_load(mf)
+ self.assertEqual(metadata.get('galaxy_info', dict()).get('author'), 'your name', msg='author was not set properly in metadata')
+
+
+class TestGalaxyInitAPB(unittest.TestCase, ValidRoleTests):
+
+ @classmethod
+ def setUpClass(cls):
+ cls.setUpRole('delete_me_apb', galaxy_args=['--type=apb'])
+
+ def test_metadata_apb_tag(self):
+ with open(os.path.join(self.role_dir, 'meta', 'main.yml'), 'r') as mf:
+ metadata = yaml.safe_load(mf)
+ self.assertIn('apb', metadata.get('galaxy_info', dict()).get('galaxy_tags', []), msg='apb tag not set in role metadata')
+
+ def test_metadata_contents(self):
+ with open(os.path.join(self.role_dir, 'meta', 'main.yml'), 'r') as mf:
+ metadata = yaml.safe_load(mf)
+ self.assertEqual(metadata.get('galaxy_info', dict()).get('author'), 'your name', msg='author was not set properly in metadata')
+
+ def test_apb_yml(self):
+ self.assertTrue(os.path.exists(os.path.join(self.role_dir, 'apb.yml')), msg='apb.yml was not created')
+
+ def test_test_yml(self):
+ with open(os.path.join(self.role_dir, 'tests', 'test.yml'), 'r') as f:
+ test_playbook = yaml.safe_load(f)
+ print(test_playbook)
+ self.assertEqual(len(test_playbook), 1)
+ self.assertEqual(test_playbook[0]['hosts'], 'localhost')
+ self.assertFalse(test_playbook[0]['gather_facts'])
+ self.assertEqual(test_playbook[0]['connection'], 'local')
+ self.assertIsNone(test_playbook[0]['tasks'], msg='We\'re expecting an unset list of tasks in test.yml')
+
+
+class TestGalaxyInitContainer(unittest.TestCase, ValidRoleTests):
+
+ @classmethod
+ def setUpClass(cls):
+ cls.setUpRole('delete_me_container', galaxy_args=['--type=container'])
+
+ def test_metadata_container_tag(self):
+ with open(os.path.join(self.role_dir, 'meta', 'main.yml'), 'r') as mf:
+ metadata = yaml.safe_load(mf)
+ self.assertIn('container', metadata.get('galaxy_info', dict()).get('galaxy_tags', []), msg='container tag not set in role metadata')
+
+ def test_metadata_contents(self):
+ with open(os.path.join(self.role_dir, 'meta', 'main.yml'), 'r') as mf:
+ metadata = yaml.safe_load(mf)
+ self.assertEqual(metadata.get('galaxy_info', dict()).get('author'), 'your name', msg='author was not set properly in metadata')
+
+ def test_meta_container_yml(self):
+ self.assertTrue(os.path.exists(os.path.join(self.role_dir, 'meta', 'container.yml')), msg='container.yml was not created')
+
+ def test_test_yml(self):
+ with open(os.path.join(self.role_dir, 'tests', 'test.yml'), 'r') as f:
+ test_playbook = yaml.safe_load(f)
+ print(test_playbook)
+ self.assertEqual(len(test_playbook), 1)
+ self.assertEqual(test_playbook[0]['hosts'], 'localhost')
+ self.assertFalse(test_playbook[0]['gather_facts'])
+ self.assertEqual(test_playbook[0]['connection'], 'local')
+ self.assertIsNone(test_playbook[0]['tasks'], msg='We\'re expecting an unset list of tasks in test.yml')
+
+
+class TestGalaxyInitSkeleton(unittest.TestCase, ValidRoleTests):
+
+ @classmethod
+ def setUpClass(cls):
+ role_skeleton_path = os.path.join(os.path.split(__file__)[0], 'test_data', 'role_skeleton')
+ cls.setUpRole('delete_me_skeleton', skeleton_path=role_skeleton_path, use_explicit_type=True)
+
+ def test_empty_files_dir(self):
+ files_dir = os.path.join(self.role_dir, 'files')
+ self.assertTrue(os.path.isdir(files_dir))
+ self.assertListEqual(os.listdir(files_dir), [], msg='we expect the files directory to be empty, is ignore working?')
+
+ def test_template_ignore_jinja(self):
+ test_conf_j2 = os.path.join(self.role_dir, 'templates', 'test.conf.j2')
+ self.assertTrue(os.path.exists(test_conf_j2), msg="The test.conf.j2 template doesn't seem to exist, is it being rendered as test.conf?")
+ with open(test_conf_j2, 'r') as f:
+ contents = f.read()
+ expected_contents = '[defaults]\ntest_key = {{ test_variable }}'
+ self.assertEqual(expected_contents, contents.strip(), msg="test.conf.j2 doesn't contain what it should, is it being rendered?")
+
+ def test_template_ignore_jinja_subfolder(self):
+ test_conf_j2 = os.path.join(self.role_dir, 'templates', 'subfolder', 'test.conf.j2')
+ self.assertTrue(os.path.exists(test_conf_j2), msg="The test.conf.j2 template doesn't seem to exist, is it being rendered as test.conf?")
+ with open(test_conf_j2, 'r') as f:
+ contents = f.read()
+ expected_contents = '[defaults]\ntest_key = {{ test_variable }}'
+ self.assertEqual(expected_contents, contents.strip(), msg="test.conf.j2 doesn't contain what it should, is it being rendered?")
+
+ def test_template_ignore_similar_folder(self):
+ self.assertTrue(os.path.exists(os.path.join(self.role_dir, 'templates_extra', 'templates.txt')))
+
+ def test_skeleton_option(self):
+ self.assertEqual(self.role_skeleton_path, context.CLIARGS['role_skeleton'], msg='Skeleton path was not parsed properly from the command line')
+
+
+@pytest.mark.parametrize('cli_args, expected', [
+ (['ansible-galaxy', 'collection', 'init', 'abc.def'], 0),
+ (['ansible-galaxy', 'collection', 'init', 'abc.def', '-vvv'], 3),
+ (['ansible-galaxy', '-vv', 'collection', 'init', 'abc.def'], 2),
+ # Due to our manual parsing we want to verify that -v set in the sub parser takes precedence. This behaviour is
+ # deprecated and tests should be removed when the code that handles it is removed
+ (['ansible-galaxy', '-vv', 'collection', 'init', 'abc.def', '-v'], 1),
+ (['ansible-galaxy', '-vv', 'collection', 'init', 'abc.def', '-vvvv'], 4),
+ (['ansible-galaxy', '-vvv', 'init', 'name'], 3),
+ (['ansible-galaxy', '-vvvvv', 'init', '-v', 'name'], 1),
+])
+def test_verbosity_arguments(cli_args, expected, monkeypatch):
+ # Mock out the functions so we don't actually execute anything
+ for func_name in [f for f in dir(GalaxyCLI) if f.startswith("execute_")]:
+ monkeypatch.setattr(GalaxyCLI, func_name, MagicMock())
+
+ cli = GalaxyCLI(args=cli_args)
+ cli.run()
+
+ assert context.CLIARGS['verbosity'] == expected
+
+
+@pytest.fixture()
+def collection_skeleton(request, tmp_path_factory):
+ name, skeleton_path = request.param
+
+ galaxy_args = ['ansible-galaxy', 'collection', 'init', '-c']
+
+ if skeleton_path is not None:
+ galaxy_args += ['--collection-skeleton', skeleton_path]
+
+ test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβÅÈ Collections'))
+ galaxy_args += ['--init-path', test_dir, name]
+
+ GalaxyCLI(args=galaxy_args).run()
+ namespace_name, collection_name = name.split('.', 1)
+ collection_dir = os.path.join(test_dir, namespace_name, collection_name)
+
+ return collection_dir
+
+
+@pytest.mark.parametrize('collection_skeleton', [
+ ('ansible_test.my_collection', None),
+], indirect=True)
+def test_collection_default(collection_skeleton):
+ meta_path = os.path.join(collection_skeleton, 'galaxy.yml')
+
+ with open(meta_path, 'r') as galaxy_meta:
+ metadata = yaml.safe_load(galaxy_meta)
+
+ assert metadata['namespace'] == 'ansible_test'
+ assert metadata['name'] == 'my_collection'
+ assert metadata['authors'] == ['your name <example@domain.com>']
+ assert metadata['readme'] == 'README.md'
+ assert metadata['version'] == '1.0.0'
+ assert metadata['description'] == 'your collection description'
+ assert metadata['license'] == ['GPL-2.0-or-later']
+ assert metadata['tags'] == []
+ assert metadata['dependencies'] == {}
+ assert metadata['documentation'] == 'http://docs.example.com'
+ assert metadata['repository'] == 'http://example.com/repository'
+ assert metadata['homepage'] == 'http://example.com'
+ assert metadata['issues'] == 'http://example.com/issue/tracker'
+
+ for d in ['docs', 'plugins', 'roles']:
+ assert os.path.isdir(os.path.join(collection_skeleton, d)), \
+ "Expected collection subdirectory {0} doesn't exist".format(d)
+
+
+@pytest.mark.parametrize('collection_skeleton', [
+ ('ansible_test.delete_me_skeleton', os.path.join(os.path.split(__file__)[0], 'test_data', 'collection_skeleton')),
+], indirect=True)
+def test_collection_skeleton(collection_skeleton):
+ meta_path = os.path.join(collection_skeleton, 'galaxy.yml')
+
+ with open(meta_path, 'r') as galaxy_meta:
+ metadata = yaml.safe_load(galaxy_meta)
+
+ assert metadata['namespace'] == 'ansible_test'
+ assert metadata['name'] == 'delete_me_skeleton'
+ assert metadata['authors'] == ['Ansible Cow <acow@bovineuniversity.edu>', 'Tu Cow <tucow@bovineuniversity.edu>']
+ assert metadata['version'] == '0.1.0'
+ assert metadata['readme'] == 'README.md'
+ assert len(metadata) == 5
+
+ assert os.path.exists(os.path.join(collection_skeleton, 'README.md'))
+
+ # Test empty directories exist and are empty
+ for empty_dir in ['plugins/action', 'plugins/filter', 'plugins/inventory', 'plugins/lookup',
+ 'plugins/module_utils', 'plugins/modules']:
+
+ assert os.listdir(os.path.join(collection_skeleton, empty_dir)) == []
+
+ # Test files that don't end with .j2 were not templated
+ doc_file = os.path.join(collection_skeleton, 'docs', 'My Collection.md')
+ with open(doc_file, 'r') as f:
+ doc_contents = f.read()
+ assert doc_contents.strip() == 'Welcome to my test collection doc for {{ namespace }}.'
+
+ # Test files that end with .j2 but are in the templates directory were not templated
+ for template_dir in ['playbooks/templates', 'playbooks/templates/subfolder',
+ 'roles/common/templates', 'roles/common/templates/subfolder']:
+ test_conf_j2 = os.path.join(collection_skeleton, template_dir, 'test.conf.j2')
+ assert os.path.exists(test_conf_j2)
+
+ with open(test_conf_j2, 'r') as f:
+ contents = f.read()
+ expected_contents = '[defaults]\ntest_key = {{ test_variable }}'
+
+ assert expected_contents == contents.strip()
+
+
+@pytest.fixture()
+def collection_artifact(collection_skeleton, tmp_path_factory):
+ ''' Creates a collection artifact tarball that is ready to be published and installed '''
+ output_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβÅÈ Output'))
+
+ # Create a file with +x in the collection so we can test the permissions
+ execute_path = os.path.join(collection_skeleton, 'runme.sh')
+ with open(execute_path, mode='wb') as fd:
+ fd.write(b"echo hi")
+
+ # S_ISUID should not be present on extraction.
+ os.chmod(execute_path, os.stat(execute_path).st_mode | stat.S_ISUID | stat.S_IEXEC)
+
+ # Because we call GalaxyCLI in collection_skeleton we need to reset the singleton back to None so it uses the new
+ # args, we reset the original args once it is done.
+ orig_cli_args = co.GlobalCLIArgs._Singleton__instance
+ try:
+ co.GlobalCLIArgs._Singleton__instance = None
+ galaxy_args = ['ansible-galaxy', 'collection', 'build', collection_skeleton, '--output-path', output_dir]
+ gc = GalaxyCLI(args=galaxy_args)
+ gc.run()
+
+ yield output_dir
+ finally:
+ co.GlobalCLIArgs._Singleton__instance = orig_cli_args
+
+
+def test_invalid_skeleton_path():
+ expected = "- the skeleton path '/fake/path' does not exist, cannot init collection"
+
+ gc = GalaxyCLI(args=['ansible-galaxy', 'collection', 'init', 'my.collection', '--collection-skeleton',
+ '/fake/path'])
+ with pytest.raises(AnsibleError, match=expected):
+ gc.run()
+
+
+@pytest.mark.parametrize("name", [
+ "",
+ "invalid",
+ "hypen-ns.collection",
+ "ns.hyphen-collection",
+ "ns.collection.weird",
+])
+def test_invalid_collection_name_init(name):
+ expected = "Invalid collection name '%s', name must be in the format <namespace>.<collection>" % name
+
+ gc = GalaxyCLI(args=['ansible-galaxy', 'collection', 'init', name])
+ with pytest.raises(AnsibleError, match=expected):
+ gc.run()
+
+
+@pytest.mark.parametrize("name, expected", [
+ ("", ""),
+ ("invalid", "invalid"),
+ ("invalid:1.0.0", "invalid"),
+ ("hypen-ns.collection", "hypen-ns.collection"),
+ ("ns.hyphen-collection", "ns.hyphen-collection"),
+ ("ns.collection.weird", "ns.collection.weird"),
+])
+def test_invalid_collection_name_install(name, expected, tmp_path_factory):
+ install_path = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβÅÈ Collections'))
+ expected = "Invalid collection name '%s', name must be in the format <namespace>.<collection>" % expected
+
+ gc = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', name, '-p', os.path.join(install_path, 'install')])
+ with pytest.raises(AnsibleError, match=expected):
+ gc.run()
+
+
+@pytest.mark.parametrize('collection_skeleton', [
+ ('ansible_test.build_collection', None),
+], indirect=True)
+def test_collection_build(collection_artifact):
+ tar_path = os.path.join(collection_artifact, 'ansible_test-build_collection-1.0.0.tar.gz')
+ assert tarfile.is_tarfile(tar_path)
+
+ with tarfile.open(tar_path, mode='r') as tar:
+ tar_members = tar.getmembers()
+
+ valid_files = ['MANIFEST.json', 'FILES.json', 'roles', 'docs', 'plugins', 'plugins/README.md', 'README.md',
+ 'runme.sh']
+ assert len(tar_members) == len(valid_files)
+
+ # Verify the uid and gid is 0 and the correct perms are set
+ for member in tar_members:
+ assert member.name in valid_files
+
+ assert member.gid == 0
+ assert member.gname == ''
+ assert member.uid == 0
+ assert member.uname == ''
+ if member.isdir() or member.name == 'runme.sh':
+ assert member.mode == 0o0755
+ else:
+ assert member.mode == 0o0644
+
+ manifest_file = tar.extractfile(tar_members[0])
+ try:
+ manifest = json.loads(to_text(manifest_file.read()))
+ finally:
+ manifest_file.close()
+
+ coll_info = manifest['collection_info']
+ file_manifest = manifest['file_manifest_file']
+ assert manifest['format'] == 1
+ assert len(manifest.keys()) == 3
+
+ assert coll_info['namespace'] == 'ansible_test'
+ assert coll_info['name'] == 'build_collection'
+ assert coll_info['version'] == '1.0.0'
+ assert coll_info['authors'] == ['your name <example@domain.com>']
+ assert coll_info['readme'] == 'README.md'
+ assert coll_info['tags'] == []
+ assert coll_info['description'] == 'your collection description'
+ assert coll_info['license'] == ['GPL-2.0-or-later']
+ assert coll_info['license_file'] is None
+ assert coll_info['dependencies'] == {}
+ assert coll_info['repository'] == 'http://example.com/repository'
+ assert coll_info['documentation'] == 'http://docs.example.com'
+ assert coll_info['homepage'] == 'http://example.com'
+ assert coll_info['issues'] == 'http://example.com/issue/tracker'
+ assert len(coll_info.keys()) == 14
+
+ assert file_manifest['name'] == 'FILES.json'
+ assert file_manifest['ftype'] == 'file'
+ assert file_manifest['chksum_type'] == 'sha256'
+ assert file_manifest['chksum_sha256'] is not None # Order of keys makes it hard to verify the checksum
+ assert file_manifest['format'] == 1
+ assert len(file_manifest.keys()) == 5
+
+ files_file = tar.extractfile(tar_members[1])
+ try:
+ files = json.loads(to_text(files_file.read()))
+ finally:
+ files_file.close()
+
+ assert len(files['files']) == 7
+ assert files['format'] == 1
+ assert len(files.keys()) == 2
+
+ valid_files_entries = ['.', 'roles', 'docs', 'plugins', 'plugins/README.md', 'README.md', 'runme.sh']
+ for file_entry in files['files']:
+ assert file_entry['name'] in valid_files_entries
+ assert file_entry['format'] == 1
+
+ if file_entry['name'] in ['plugins/README.md', 'runme.sh']:
+ assert file_entry['ftype'] == 'file'
+ assert file_entry['chksum_type'] == 'sha256'
+ # Can't test the actual checksum as the html link changes based on the version or the file contents
+ # don't matter
+ assert file_entry['chksum_sha256'] is not None
+ elif file_entry['name'] == 'README.md':
+ assert file_entry['ftype'] == 'file'
+ assert file_entry['chksum_type'] == 'sha256'
+ assert file_entry['chksum_sha256'] == '6d8b5f9b5d53d346a8cd7638a0ec26e75e8d9773d952162779a49d25da6ef4f5'
+ else:
+ assert file_entry['ftype'] == 'dir'
+ assert file_entry['chksum_type'] is None
+ assert file_entry['chksum_sha256'] is None
+
+ assert len(file_entry.keys()) == 5
+
+
+@pytest.fixture()
+def collection_install(reset_cli_args, tmp_path_factory, monkeypatch):
+ mock_install = MagicMock()
+ monkeypatch.setattr(ansible.cli.galaxy, 'install_collections', mock_install)
+
+ mock_warning = MagicMock()
+ monkeypatch.setattr(ansible.utils.display.Display, 'warning', mock_warning)
+
+ output_dir = to_text((tmp_path_factory.mktemp('test-ÅÑŚÌβÅÈ Output')))
+ yield mock_install, mock_warning, output_dir
+
+
+def test_collection_install_with_names(collection_install):
+ mock_install, mock_warning, output_dir = collection_install
+
+ galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', 'namespace2.collection:1.0.1',
+ '--collections-path', output_dir]
+ GalaxyCLI(args=galaxy_args).run()
+
+ collection_path = os.path.join(output_dir, 'ansible_collections')
+ assert os.path.isdir(collection_path)
+
+ assert mock_warning.call_count == 1
+ assert "The specified collections path '%s' is not part of the configured Ansible collections path" % output_dir \
+ in mock_warning.call_args[0][0]
+
+ assert mock_install.call_count == 1
+ assert mock_install.call_args[0][0] == [('namespace.collection', '*', None, None),
+ ('namespace2.collection', '1.0.1', None, None)]
+ assert mock_install.call_args[0][1] == collection_path
+ assert len(mock_install.call_args[0][2]) == 1
+ assert mock_install.call_args[0][2][0].api_server == 'https://galaxy.ansible.com'
+ assert mock_install.call_args[0][2][0].validate_certs is True
+ assert mock_install.call_args[0][3] is True
+ assert mock_install.call_args[0][4] is False
+ assert mock_install.call_args[0][5] is False
+ assert mock_install.call_args[0][6] is False
+ assert mock_install.call_args[0][7] is False
+
+
+def test_collection_install_with_requirements_file(collection_install):
+ mock_install, mock_warning, output_dir = collection_install
+
+ requirements_file = os.path.join(output_dir, 'requirements.yml')
+ with open(requirements_file, 'wb') as req_obj:
+ req_obj.write(b'''---
+collections:
+- namespace.coll
+- name: namespace2.coll
+ version: '>2.0.1'
+''')
+
+ galaxy_args = ['ansible-galaxy', 'collection', 'install', '--requirements-file', requirements_file,
+ '--collections-path', output_dir]
+ GalaxyCLI(args=galaxy_args).run()
+
+ collection_path = os.path.join(output_dir, 'ansible_collections')
+ assert os.path.isdir(collection_path)
+
+ assert mock_warning.call_count == 1
+ assert "The specified collections path '%s' is not part of the configured Ansible collections path" % output_dir \
+ in mock_warning.call_args[0][0]
+
+ assert mock_install.call_count == 1
+ assert mock_install.call_args[0][0] == [('namespace.coll', '*', None, None),
+ ('namespace2.coll', '>2.0.1', None, None)]
+ assert mock_install.call_args[0][1] == collection_path
+ assert len(mock_install.call_args[0][2]) == 1
+ assert mock_install.call_args[0][2][0].api_server == 'https://galaxy.ansible.com'
+ assert mock_install.call_args[0][2][0].validate_certs is True
+ assert mock_install.call_args[0][3] is True
+ assert mock_install.call_args[0][4] is False
+ assert mock_install.call_args[0][5] is False
+ assert mock_install.call_args[0][6] is False
+ assert mock_install.call_args[0][7] is False
+
+
+def test_collection_install_with_relative_path(collection_install, monkeypatch):
+ mock_install = collection_install[0]
+
+ mock_req = MagicMock()
+ mock_req.return_value = {'collections': [('namespace.coll', '*', None, None)], 'roles': []}
+ monkeypatch.setattr(ansible.cli.galaxy.GalaxyCLI, '_parse_requirements_file', mock_req)
+
+ monkeypatch.setattr(os, 'makedirs', MagicMock())
+
+ requirements_file = './requirements.myl'
+ collections_path = './ansible_collections'
+ galaxy_args = ['ansible-galaxy', 'collection', 'install', '--requirements-file', requirements_file,
+ '--collections-path', collections_path]
+ GalaxyCLI(args=galaxy_args).run()
+
+ assert mock_install.call_count == 1
+ assert mock_install.call_args[0][0] == [('namespace.coll', '*', None, None)]
+ assert mock_install.call_args[0][1] == os.path.abspath(collections_path)
+ assert len(mock_install.call_args[0][2]) == 1
+ assert mock_install.call_args[0][2][0].api_server == 'https://galaxy.ansible.com'
+ assert mock_install.call_args[0][2][0].validate_certs is True
+ assert mock_install.call_args[0][3] is True
+ assert mock_install.call_args[0][4] is False
+ assert mock_install.call_args[0][5] is False
+ assert mock_install.call_args[0][6] is False
+ assert mock_install.call_args[0][7] is False
+
+ assert mock_req.call_count == 1
+ assert mock_req.call_args[0][0] == os.path.abspath(requirements_file)
+
+
+def test_collection_install_with_unexpanded_path(collection_install, monkeypatch):
+ mock_install = collection_install[0]
+
+ mock_req = MagicMock()
+ mock_req.return_value = {'collections': [('namespace.coll', '*', None, None)], 'roles': []}
+ monkeypatch.setattr(ansible.cli.galaxy.GalaxyCLI, '_parse_requirements_file', mock_req)
+
+ monkeypatch.setattr(os, 'makedirs', MagicMock())
+
+ requirements_file = '~/requirements.myl'
+ collections_path = '~/ansible_collections'
+ galaxy_args = ['ansible-galaxy', 'collection', 'install', '--requirements-file', requirements_file,
+ '--collections-path', collections_path]
+ GalaxyCLI(args=galaxy_args).run()
+
+ assert mock_install.call_count == 1
+ assert mock_install.call_args[0][0] == [('namespace.coll', '*', None, None)]
+ assert mock_install.call_args[0][1] == os.path.expanduser(os.path.expandvars(collections_path))
+ assert len(mock_install.call_args[0][2]) == 1
+ assert mock_install.call_args[0][2][0].api_server == 'https://galaxy.ansible.com'
+ assert mock_install.call_args[0][2][0].validate_certs is True
+ assert mock_install.call_args[0][3] is True
+ assert mock_install.call_args[0][4] is False
+ assert mock_install.call_args[0][5] is False
+ assert mock_install.call_args[0][6] is False
+ assert mock_install.call_args[0][7] is False
+
+ assert mock_req.call_count == 1
+ assert mock_req.call_args[0][0] == os.path.expanduser(os.path.expandvars(requirements_file))
+
+
+def test_collection_install_in_collection_dir(collection_install, monkeypatch):
+ mock_install, mock_warning, output_dir = collection_install
+
+ collections_path = C.COLLECTIONS_PATHS[0]
+
+ galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', 'namespace2.collection:1.0.1',
+ '--collections-path', collections_path]
+ GalaxyCLI(args=galaxy_args).run()
+
+ assert mock_warning.call_count == 0
+
+ assert mock_install.call_count == 1
+ assert mock_install.call_args[0][0] == [('namespace.collection', '*', None, None),
+ ('namespace2.collection', '1.0.1', None, None)]
+ assert mock_install.call_args[0][1] == os.path.join(collections_path, 'ansible_collections')
+ assert len(mock_install.call_args[0][2]) == 1
+ assert mock_install.call_args[0][2][0].api_server == 'https://galaxy.ansible.com'
+ assert mock_install.call_args[0][2][0].validate_certs is True
+ assert mock_install.call_args[0][3] is True
+ assert mock_install.call_args[0][4] is False
+ assert mock_install.call_args[0][5] is False
+ assert mock_install.call_args[0][6] is False
+ assert mock_install.call_args[0][7] is False
+
+
+def test_collection_install_with_url(collection_install):
+ mock_install, dummy, output_dir = collection_install
+
+ galaxy_args = ['ansible-galaxy', 'collection', 'install', 'https://foo/bar/foo-bar-v1.0.0.tar.gz',
+ '--collections-path', output_dir]
+ GalaxyCLI(args=galaxy_args).run()
+
+ collection_path = os.path.join(output_dir, 'ansible_collections')
+ assert os.path.isdir(collection_path)
+
+ assert mock_install.call_count == 1
+ assert mock_install.call_args[0][0] == [('https://foo/bar/foo-bar-v1.0.0.tar.gz', '*', None, None)]
+ assert mock_install.call_args[0][1] == collection_path
+ assert len(mock_install.call_args[0][2]) == 1
+ assert mock_install.call_args[0][2][0].api_server == 'https://galaxy.ansible.com'
+ assert mock_install.call_args[0][2][0].validate_certs is True
+ assert mock_install.call_args[0][3] is True
+ assert mock_install.call_args[0][4] is False
+ assert mock_install.call_args[0][5] is False
+ assert mock_install.call_args[0][6] is False
+ assert mock_install.call_args[0][7] is False
+
+
+def test_collection_install_name_and_requirements_fail(collection_install):
+ test_path = collection_install[2]
+ expected = 'The positional collection_name arg and --requirements-file are mutually exclusive.'
+
+ with pytest.raises(AnsibleError, match=expected):
+ GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection', '--collections-path',
+ test_path, '--requirements-file', test_path]).run()
+
+
+def test_collection_install_no_name_and_requirements_fail(collection_install):
+ test_path = collection_install[2]
+ expected = 'You must specify a collection name or a requirements file.'
+
+ with pytest.raises(AnsibleError, match=expected):
+ GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', '--collections-path', test_path]).run()
+
+
+def test_collection_install_path_with_ansible_collections(collection_install):
+ mock_install, mock_warning, output_dir = collection_install
+
+ collection_path = os.path.join(output_dir, 'ansible_collections')
+
+ galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', 'namespace2.collection:1.0.1',
+ '--collections-path', collection_path]
+ GalaxyCLI(args=galaxy_args).run()
+
+ assert os.path.isdir(collection_path)
+
+ assert mock_warning.call_count == 1
+ assert "The specified collections path '%s' is not part of the configured Ansible collections path" \
+ % collection_path in mock_warning.call_args[0][0]
+
+ assert mock_install.call_count == 1
+ assert mock_install.call_args[0][0] == [('namespace.collection', '*', None, None),
+ ('namespace2.collection', '1.0.1', None, None)]
+ assert mock_install.call_args[0][1] == collection_path
+ assert len(mock_install.call_args[0][2]) == 1
+ assert mock_install.call_args[0][2][0].api_server == 'https://galaxy.ansible.com'
+ assert mock_install.call_args[0][2][0].validate_certs is True
+ assert mock_install.call_args[0][3] is True
+ assert mock_install.call_args[0][4] is False
+ assert mock_install.call_args[0][5] is False
+ assert mock_install.call_args[0][6] is False
+ assert mock_install.call_args[0][7] is False
+
+
+def test_collection_install_ignore_certs(collection_install):
+ mock_install, mock_warning, output_dir = collection_install
+
+ galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', '--collections-path', output_dir,
+ '--ignore-certs']
+ GalaxyCLI(args=galaxy_args).run()
+
+ assert mock_install.call_args[0][3] is False
+
+
+def test_collection_install_force(collection_install):
+ mock_install, mock_warning, output_dir = collection_install
+
+ galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', '--collections-path', output_dir,
+ '--force']
+ GalaxyCLI(args=galaxy_args).run()
+
+ assert mock_install.call_args[0][6] is True
+
+
+def test_collection_install_force_deps(collection_install):
+ mock_install, mock_warning, output_dir = collection_install
+
+ galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', '--collections-path', output_dir,
+ '--force-with-deps']
+ GalaxyCLI(args=galaxy_args).run()
+
+ assert mock_install.call_args[0][7] is True
+
+
+def test_collection_install_no_deps(collection_install):
+ mock_install, mock_warning, output_dir = collection_install
+
+ galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', '--collections-path', output_dir,
+ '--no-deps']
+ GalaxyCLI(args=galaxy_args).run()
+
+ assert mock_install.call_args[0][5] is True
+
+
+def test_collection_install_ignore(collection_install):
+ mock_install, mock_warning, output_dir = collection_install
+
+ galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', '--collections-path', output_dir,
+ '--ignore-errors']
+ GalaxyCLI(args=galaxy_args).run()
+
+ assert mock_install.call_args[0][4] is True
+
+
+def test_collection_install_custom_server(collection_install):
+ mock_install, mock_warning, output_dir = collection_install
+
+ galaxy_args = ['ansible-galaxy', 'collection', 'install', 'namespace.collection', '--collections-path', output_dir,
+ '--server', 'https://galaxy-dev.ansible.com']
+ GalaxyCLI(args=galaxy_args).run()
+
+ assert len(mock_install.call_args[0][2]) == 1
+ assert mock_install.call_args[0][2][0].api_server == 'https://galaxy-dev.ansible.com'
+ assert mock_install.call_args[0][2][0].validate_certs is True
+
+
+@pytest.fixture()
+def requirements_file(request, tmp_path_factory):
+ content = request.param
+
+ test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβÅÈ Collections Requirements'))
+ requirements_file = os.path.join(test_dir, 'requirements.yml')
+
+ if content:
+ with open(requirements_file, 'wb') as req_obj:
+ req_obj.write(to_bytes(content))
+
+ yield requirements_file
+
+
+@pytest.fixture()
+def requirements_cli(monkeypatch):
+ monkeypatch.setattr(GalaxyCLI, 'execute_install', MagicMock())
+ cli = GalaxyCLI(args=['ansible-galaxy', 'install'])
+ cli.run()
+ return cli
+
+
+@pytest.mark.parametrize('requirements_file', [None], indirect=True)
+def test_parse_requirements_file_that_doesnt_exist(requirements_cli, requirements_file):
+ expected = "The requirements file '%s' does not exist." % to_native(requirements_file)
+ with pytest.raises(AnsibleError, match=expected):
+ requirements_cli._parse_requirements_file(requirements_file)
+
+
+@pytest.mark.parametrize('requirements_file', ['not a valid yml file: hi: world'], indirect=True)
+def test_parse_requirements_file_that_isnt_yaml(requirements_cli, requirements_file):
+ expected = "Failed to parse the requirements yml at '%s' with the following error" % to_native(requirements_file)
+ with pytest.raises(AnsibleError, match=expected):
+ requirements_cli._parse_requirements_file(requirements_file)
+
+
+@pytest.mark.parametrize('requirements_file', [('''
+# Older role based requirements.yml
+- galaxy.role
+- anotherrole
+''')], indirect=True)
+def test_parse_requirements_in_older_format_illega(requirements_cli, requirements_file):
+ expected = "Expecting requirements file to be a dict with the key 'collections' that contains a list of " \
+ "collections to install"
+
+ with pytest.raises(AnsibleError, match=expected):
+ requirements_cli._parse_requirements_file(requirements_file, allow_old_format=False)
+
+
+@pytest.mark.parametrize('requirements_file', ['''
+collections:
+- version: 1.0.0
+'''], indirect=True)
+def test_parse_requirements_without_mandatory_name_key(requirements_cli, requirements_file):
+ expected = "Collections requirement entry should contain the key name."
+ with pytest.raises(AnsibleError, match=expected):
+ requirements_cli._parse_requirements_file(requirements_file)
+
+
+@pytest.mark.parametrize('requirements_file', [('''
+collections:
+- namespace.collection1
+- namespace.collection2
+'''), ('''
+collections:
+- name: namespace.collection1
+- name: namespace.collection2
+''')], indirect=True)
+def test_parse_requirements(requirements_cli, requirements_file):
+ expected = {
+ 'roles': [],
+ 'collections': [('namespace.collection1', '*', None, None), ('namespace.collection2', '*', None, None)]
+ }
+ actual = requirements_cli._parse_requirements_file(requirements_file)
+
+ assert actual == expected
+
+
+@pytest.mark.parametrize('requirements_file', ['''
+collections:
+- name: namespace.collection1
+ version: ">=1.0.0,<=2.0.0"
+ source: https://galaxy-dev.ansible.com
+- namespace.collection2'''], indirect=True)
+def test_parse_requirements_with_extra_info(requirements_cli, requirements_file):
+ actual = requirements_cli._parse_requirements_file(requirements_file)
+
+ assert len(actual['roles']) == 0
+ assert len(actual['collections']) == 2
+ assert actual['collections'][0][0] == 'namespace.collection1'
+ assert actual['collections'][0][1] == '>=1.0.0,<=2.0.0'
+ assert actual['collections'][0][2].api_server == 'https://galaxy-dev.ansible.com'
+ assert actual['collections'][0][2].name == 'explicit_requirement_namespace.collection1'
+ assert actual['collections'][0][2].token is None
+ assert actual['collections'][0][2].username is None
+ assert actual['collections'][0][2].password is None
+ assert actual['collections'][0][2].validate_certs is True
+
+ assert actual['collections'][1] == ('namespace.collection2', '*', None, None)
+
+
+@pytest.mark.parametrize('requirements_file', ['''
+roles:
+- username.role_name
+- src: username2.role_name2
+- src: ssh://github.com/user/repo
+ scm: git
+
+collections:
+- namespace.collection2
+'''], indirect=True)
+def test_parse_requirements_with_roles_and_collections(requirements_cli, requirements_file):
+ actual = requirements_cli._parse_requirements_file(requirements_file)
+
+ assert len(actual['roles']) == 3
+ assert actual['roles'][0].name == 'username.role_name'
+ assert actual['roles'][1].name == 'username2.role_name2'
+ assert actual['roles'][2].name == 'repo'
+ assert actual['roles'][2].src == 'ssh://github.com/user/repo'
+
+ assert len(actual['collections']) == 1
+ assert actual['collections'][0] == ('namespace.collection2', '*', None, None)
+
+
+@pytest.mark.parametrize('requirements_file', ['''
+collections:
+- name: namespace.collection
+- name: namespace2.collection2
+ source: https://galaxy-dev.ansible.com/
+- name: namespace3.collection3
+ source: server
+'''], indirect=True)
+def test_parse_requirements_with_collection_source(requirements_cli, requirements_file):
+ galaxy_api = GalaxyAPI(requirements_cli.api, 'server', 'https://config-server')
+ requirements_cli.api_servers.append(galaxy_api)
+
+ actual = requirements_cli._parse_requirements_file(requirements_file)
+
+ assert actual['roles'] == []
+ assert len(actual['collections']) == 3
+ assert actual['collections'][0] == ('namespace.collection', '*', None, None)
+
+ assert actual['collections'][1][0] == 'namespace2.collection2'
+ assert actual['collections'][1][1] == '*'
+ assert actual['collections'][1][2].api_server == 'https://galaxy-dev.ansible.com/'
+ assert actual['collections'][1][2].name == 'explicit_requirement_namespace2.collection2'
+ assert actual['collections'][1][2].token is None
+
+ assert actual['collections'][2] == ('namespace3.collection3', '*', galaxy_api, None)
+
+
+@pytest.mark.parametrize('requirements_file', ['''
+- username.included_role
+- src: https://github.com/user/repo
+'''], indirect=True)
+def test_parse_requirements_roles_with_include(requirements_cli, requirements_file):
+ reqs = [
+ 'ansible.role',
+ {'include': requirements_file},
+ ]
+ parent_requirements = os.path.join(os.path.dirname(requirements_file), 'parent.yaml')
+ with open(to_bytes(parent_requirements), 'wb') as req_fd:
+ req_fd.write(to_bytes(yaml.safe_dump(reqs)))
+
+ actual = requirements_cli._parse_requirements_file(parent_requirements)
+
+ assert len(actual['roles']) == 3
+ assert actual['collections'] == []
+ assert actual['roles'][0].name == 'ansible.role'
+ assert actual['roles'][1].name == 'username.included_role'
+ assert actual['roles'][2].name == 'repo'
+ assert actual['roles'][2].src == 'https://github.com/user/repo'
+
+
+@pytest.mark.parametrize('requirements_file', ['''
+- username.role
+- include: missing.yml
+'''], indirect=True)
+def test_parse_requirements_roles_with_include_missing(requirements_cli, requirements_file):
+ expected = "Failed to find include requirements file 'missing.yml' in '%s'" % to_native(requirements_file)
+
+ with pytest.raises(AnsibleError, match=expected):
+ requirements_cli._parse_requirements_file(requirements_file)
+
+
+@pytest.mark.parametrize('requirements_file', ['''
+collections:
+- namespace.name
+roles:
+- namespace.name
+'''], indirect=True)
+def test_install_implicit_role_with_collections(requirements_file, monkeypatch):
+ mock_collection_install = MagicMock()
+ monkeypatch.setattr(GalaxyCLI, '_execute_install_collection', mock_collection_install)
+ mock_role_install = MagicMock()
+ monkeypatch.setattr(GalaxyCLI, '_execute_install_role', mock_role_install)
+
+ mock_display = MagicMock()
+ monkeypatch.setattr(Display, 'display', mock_display)
+
+ cli = GalaxyCLI(args=['ansible-galaxy', 'install', '-r', requirements_file])
+ cli.run()
+
+ assert mock_collection_install.call_count == 1
+ assert mock_collection_install.call_args[0][0] == [('namespace.name', '*', None, None)]
+ assert mock_collection_install.call_args[0][1] == cli._get_default_collection_path()
+
+ assert mock_role_install.call_count == 1
+ assert len(mock_role_install.call_args[0][0]) == 1
+ assert str(mock_role_install.call_args[0][0][0]) == 'namespace.name'
+
+ found = False
+ for mock_call in mock_display.mock_calls:
+ if 'contains collections which will be ignored' in mock_call[1][0]:
+ found = True
+ break
+ assert not found
+
+
+@pytest.mark.parametrize('requirements_file', ['''
+collections:
+- namespace.name
+roles:
+- namespace.name
+'''], indirect=True)
+def test_install_explicit_role_with_collections(requirements_file, monkeypatch):
+ mock_collection_install = MagicMock()
+ monkeypatch.setattr(GalaxyCLI, '_execute_install_collection', mock_collection_install)
+ mock_role_install = MagicMock()
+ monkeypatch.setattr(GalaxyCLI, '_execute_install_role', mock_role_install)
+
+ mock_display = MagicMock()
+ monkeypatch.setattr(Display, 'vvv', mock_display)
+
+ cli = GalaxyCLI(args=['ansible-galaxy', 'role', 'install', '-r', requirements_file])
+ cli.run()
+
+ assert mock_collection_install.call_count == 0
+
+ assert mock_role_install.call_count == 1
+ assert len(mock_role_install.call_args[0][0]) == 1
+ assert str(mock_role_install.call_args[0][0][0]) == 'namespace.name'
+
+ found = False
+ for mock_call in mock_display.mock_calls:
+ if 'contains collections which will be ignored' in mock_call[1][0]:
+ found = True
+ break
+ assert found
+
+
+@pytest.mark.parametrize('requirements_file', ['''
+collections:
+- namespace.name
+roles:
+- namespace.name
+'''], indirect=True)
+def test_install_role_with_collections_and_path(requirements_file, monkeypatch):
+ mock_collection_install = MagicMock()
+ monkeypatch.setattr(GalaxyCLI, '_execute_install_collection', mock_collection_install)
+ mock_role_install = MagicMock()
+ monkeypatch.setattr(GalaxyCLI, '_execute_install_role', mock_role_install)
+
+ mock_display = MagicMock()
+ monkeypatch.setattr(Display, 'warning', mock_display)
+
+ cli = GalaxyCLI(args=['ansible-galaxy', 'install', '-p', 'path', '-r', requirements_file])
+ cli.run()
+
+ assert mock_collection_install.call_count == 0
+
+ assert mock_role_install.call_count == 1
+ assert len(mock_role_install.call_args[0][0]) == 1
+ assert str(mock_role_install.call_args[0][0][0]) == 'namespace.name'
+
+ found = False
+ for mock_call in mock_display.mock_calls:
+ if 'contains collections which will be ignored' in mock_call[1][0]:
+ found = True
+ break
+ assert found
+
+
+@pytest.mark.parametrize('requirements_file', ['''
+collections:
+- namespace.name
+roles:
+- namespace.name
+'''], indirect=True)
+def test_install_collection_with_roles(requirements_file, monkeypatch):
+ mock_collection_install = MagicMock()
+ monkeypatch.setattr(GalaxyCLI, '_execute_install_collection', mock_collection_install)
+ mock_role_install = MagicMock()
+ monkeypatch.setattr(GalaxyCLI, '_execute_install_role', mock_role_install)
+
+ mock_display = MagicMock()
+ monkeypatch.setattr(Display, 'vvv', mock_display)
+
+ cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', '-r', requirements_file])
+ cli.run()
+
+ assert mock_collection_install.call_count == 1
+ assert mock_collection_install.call_args[0][0] == [('namespace.name', '*', None, None)]
+ assert mock_collection_install.call_args[0][1] == cli._get_default_collection_path()
+
+ assert mock_role_install.call_count == 0
+
+ found = False
+ for mock_call in mock_display.mock_calls:
+ if 'contains roles which will be ignored' in mock_call[1][0]:
+ found = True
+ break
+ assert found
diff --git a/test/units/cli/test_playbook.py b/test/units/cli/test_playbook.py
new file mode 100644
index 00000000..f25e54df
--- /dev/null
+++ b/test/units/cli/test_playbook.py
@@ -0,0 +1,46 @@
+# (c) 2016, Adrian Likins <alikins@redhat.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest
+from units.mock.loader import DictDataLoader
+
+from ansible import context
+from ansible.inventory.manager import InventoryManager
+from ansible.vars.manager import VariableManager
+
+from ansible.cli.playbook import PlaybookCLI
+
+
+class TestPlaybookCLI(unittest.TestCase):
+ def test_flush_cache(self):
+ cli = PlaybookCLI(args=["ansible-playbook", "--flush-cache", "foobar.yml"])
+ cli.parse()
+ self.assertTrue(context.CLIARGS['flush_cache'])
+
+ variable_manager = VariableManager()
+ fake_loader = DictDataLoader({'foobar.yml': ""})
+ inventory = InventoryManager(loader=fake_loader, sources='testhost,')
+
+ variable_manager.set_host_facts('testhost', {'canary': True})
+ self.assertTrue('testhost' in variable_manager._fact_cache)
+
+ cli._flush_cache(inventory, variable_manager)
+ self.assertFalse('testhost' in variable_manager._fact_cache)
diff --git a/test/units/cli/test_vault.py b/test/units/cli/test_vault.py
new file mode 100644
index 00000000..9f2ba685
--- /dev/null
+++ b/test/units/cli/test_vault.py
@@ -0,0 +1,217 @@
+# -*- coding: utf-8 -*-
+# (c) 2017, Adrian Likins <alikins@redhat.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import pytest
+
+from units.compat import unittest
+from units.compat.mock import patch, MagicMock
+from units.mock.vault_helper import TextVaultSecret
+
+from ansible import context, errors
+from ansible.cli.vault import VaultCLI
+from ansible.module_utils._text import to_text
+from ansible.utils import context_objects as co
+
+
+# TODO: make these tests assert something, likely by verifing
+# mock calls
+
+
+@pytest.fixture(autouse='function')
+def reset_cli_args():
+ co.GlobalCLIArgs._Singleton__instance = None
+ yield
+ co.GlobalCLIArgs._Singleton__instance = None
+
+
+class TestVaultCli(unittest.TestCase):
+ def setUp(self):
+ self.tty_patcher = patch('ansible.cli.sys.stdin.isatty', return_value=False)
+ self.mock_isatty = self.tty_patcher.start()
+
+ def tearDown(self):
+ self.tty_patcher.stop()
+
+ def test_parse_empty(self):
+ cli = VaultCLI(['vaultcli'])
+ self.assertRaises(SystemExit,
+ cli.parse)
+
+ # FIXME: something weird seems to be afoot when parsing actions
+ # cli = VaultCLI(args=['view', '/dev/null/foo', 'mysecret3'])
+ # will skip '/dev/null/foo'. something in cli.CLI.set_action() ?
+ # maybe we self.args gets modified in a loop?
+ def test_parse_view_file(self):
+ cli = VaultCLI(args=['ansible-vault', 'view', '/dev/null/foo'])
+ cli.parse()
+
+ @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets')
+ def test_view_missing_file_no_secret(self, mock_setup_vault_secrets):
+ mock_setup_vault_secrets.return_value = []
+ cli = VaultCLI(args=['ansible-vault', 'view', '/dev/null/foo'])
+ cli.parse()
+ self.assertRaisesRegexp(errors.AnsibleOptionsError,
+ "A vault password is required to use Ansible's Vault",
+ cli.run)
+
+ @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets')
+ def test_encrypt_missing_file_no_secret(self, mock_setup_vault_secrets):
+ mock_setup_vault_secrets.return_value = []
+ cli = VaultCLI(args=['ansible-vault', 'encrypt', '/dev/null/foo'])
+ cli.parse()
+ self.assertRaisesRegexp(errors.AnsibleOptionsError,
+ "A vault password is required to use Ansible's Vault",
+ cli.run)
+
+ @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets')
+ @patch('ansible.cli.vault.VaultEditor')
+ def test_encrypt(self, mock_vault_editor, mock_setup_vault_secrets):
+ mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))]
+ cli = VaultCLI(args=['ansible-vault', 'encrypt', '/dev/null/foo'])
+ cli.parse()
+ cli.run()
+
+ @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets')
+ @patch('ansible.cli.vault.VaultEditor')
+ def test_encrypt_string(self, mock_vault_editor, mock_setup_vault_secrets):
+ mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))]
+ cli = VaultCLI(args=['ansible-vault', 'encrypt_string',
+ 'some string to encrypt'])
+ cli.parse()
+ cli.run()
+
+ @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets')
+ @patch('ansible.cli.vault.VaultEditor')
+ @patch('ansible.cli.vault.display.prompt', return_value='a_prompt')
+ def test_encrypt_string_prompt(self, mock_display, mock_vault_editor, mock_setup_vault_secrets):
+ mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))]
+ cli = VaultCLI(args=['ansible-vault',
+ 'encrypt_string',
+ '--prompt',
+ 'some string to encrypt'])
+ cli.parse()
+ cli.run()
+
+ @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets')
+ @patch('ansible.cli.vault.VaultEditor')
+ @patch('ansible.cli.vault.sys.stdin.read', return_value='This is data from stdin')
+ def test_encrypt_string_stdin(self, mock_stdin_read, mock_vault_editor, mock_setup_vault_secrets):
+ mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))]
+ cli = VaultCLI(args=['ansible-vault',
+ 'encrypt_string',
+ '--stdin-name',
+ 'the_var_from_stdin',
+ '-'])
+ cli.parse()
+ cli.run()
+
+ @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets')
+ @patch('ansible.cli.vault.VaultEditor')
+ def test_encrypt_string_names(self, mock_vault_editor, mock_setup_vault_secrets):
+ mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))]
+ cli = VaultCLI(args=['ansible-vault', 'encrypt_string',
+ '--name', 'foo1',
+ '--name', 'foo2',
+ 'some string to encrypt'])
+ cli.parse()
+ cli.run()
+
+ @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets')
+ @patch('ansible.cli.vault.VaultEditor')
+ def test_encrypt_string_more_args_than_names(self, mock_vault_editor, mock_setup_vault_secrets):
+ mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))]
+ cli = VaultCLI(args=['ansible-vault', 'encrypt_string',
+ '--name', 'foo1',
+ 'some string to encrypt',
+ 'other strings',
+ 'a few more string args'])
+ cli.parse()
+ cli.run()
+
+ @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets')
+ @patch('ansible.cli.vault.VaultEditor')
+ def test_create(self, mock_vault_editor, mock_setup_vault_secrets):
+ mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))]
+ cli = VaultCLI(args=['ansible-vault', 'create', '/dev/null/foo'])
+ cli.parse()
+ cli.run()
+
+ @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets')
+ @patch('ansible.cli.vault.VaultEditor')
+ def test_edit(self, mock_vault_editor, mock_setup_vault_secrets):
+ mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))]
+ cli = VaultCLI(args=['ansible-vault', 'edit', '/dev/null/foo'])
+ cli.parse()
+ cli.run()
+
+ @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets')
+ @patch('ansible.cli.vault.VaultEditor')
+ def test_decrypt(self, mock_vault_editor, mock_setup_vault_secrets):
+ mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))]
+ cli = VaultCLI(args=['ansible-vault', 'decrypt', '/dev/null/foo'])
+ cli.parse()
+ cli.run()
+
+ @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets')
+ @patch('ansible.cli.vault.VaultEditor')
+ def test_view(self, mock_vault_editor, mock_setup_vault_secrets):
+ mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))]
+ cli = VaultCLI(args=['ansible-vault', 'view', '/dev/null/foo'])
+ cli.parse()
+ cli.run()
+
+ @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets')
+ @patch('ansible.cli.vault.VaultEditor')
+ def test_rekey(self, mock_vault_editor, mock_setup_vault_secrets):
+ mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))]
+ cli = VaultCLI(args=['ansible-vault', 'rekey', '/dev/null/foo'])
+ cli.parse()
+ cli.run()
+
+
+@pytest.mark.parametrize('cli_args, expected', [
+ (['ansible-vault', 'view', 'vault.txt'], 0),
+ (['ansible-vault', 'view', 'vault.txt', '-vvv'], 3),
+ (['ansible-vault', '-vv', 'view', 'vault.txt'], 2),
+ # Due to our manual parsing we want to verify that -v set in the sub parser takes precedence. This behaviour is
+ # deprecated and tests should be removed when the code that handles it is removed
+ (['ansible-vault', '-vv', 'view', 'vault.txt', '-v'], 1),
+ (['ansible-vault', '-vv', 'view', 'vault.txt', '-vvvv'], 4),
+])
+def test_verbosity_arguments(cli_args, expected, tmp_path_factory, monkeypatch):
+ # Add a password file so we don't get a prompt in the test
+ test_dir = to_text(tmp_path_factory.mktemp('test-ansible-vault'))
+ pass_file = os.path.join(test_dir, 'pass.txt')
+ with open(pass_file, 'w') as pass_fd:
+ pass_fd.write('password')
+
+ cli_args.extend(['--vault-id', pass_file])
+
+ # Mock out the functions so we don't actually execute anything
+ for func_name in [f for f in dir(VaultCLI) if f.startswith("execute_")]:
+ monkeypatch.setattr(VaultCLI, func_name, MagicMock())
+
+ cli = VaultCLI(args=cli_args)
+ cli.run()
+
+ assert context.CLIARGS['verbosity'] == expected
diff --git a/test/units/compat/__init__.py b/test/units/compat/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/compat/__init__.py
diff --git a/test/units/compat/builtins.py b/test/units/compat/builtins.py
new file mode 100644
index 00000000..f60ee678
--- /dev/null
+++ b/test/units/compat/builtins.py
@@ -0,0 +1,33 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+#
+# Compat for python2.7
+#
+
+# One unittest needs to import builtins via __import__() so we need to have
+# the string that represents it
+try:
+ import __builtin__
+except ImportError:
+ BUILTINS = 'builtins'
+else:
+ BUILTINS = '__builtin__'
diff --git a/test/units/compat/mock.py b/test/units/compat/mock.py
new file mode 100644
index 00000000..0972cd2e
--- /dev/null
+++ b/test/units/compat/mock.py
@@ -0,0 +1,122 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat module for Python3.x's unittest.mock module
+'''
+import sys
+
+# Python 2.7
+
+# Note: Could use the pypi mock library on python3.x as well as python2.x. It
+# is the same as the python3 stdlib mock library
+
+try:
+ # Allow wildcard import because we really do want to import all of mock's
+ # symbols into this compat shim
+ # pylint: disable=wildcard-import,unused-wildcard-import
+ from unittest.mock import *
+except ImportError:
+ # Python 2
+ # pylint: disable=wildcard-import,unused-wildcard-import
+ try:
+ from mock import *
+ except ImportError:
+ print('You need the mock library installed on python2.x to run tests')
+
+
+# Prior to 3.4.4, mock_open cannot handle binary read_data
+if sys.version_info >= (3,) and sys.version_info < (3, 4, 4):
+ file_spec = None
+
+ def _iterate_read_data(read_data):
+ # Helper for mock_open:
+ # Retrieve lines from read_data via a generator so that separate calls to
+ # readline, read, and readlines are properly interleaved
+ sep = b'\n' if isinstance(read_data, bytes) else '\n'
+ data_as_list = [l + sep for l in read_data.split(sep)]
+
+ if data_as_list[-1] == sep:
+ # If the last line ended in a newline, the list comprehension will have an
+ # extra entry that's just a newline. Remove this.
+ data_as_list = data_as_list[:-1]
+ else:
+ # If there wasn't an extra newline by itself, then the file being
+ # emulated doesn't have a newline to end the last line remove the
+ # newline that our naive format() added
+ data_as_list[-1] = data_as_list[-1][:-1]
+
+ for line in data_as_list:
+ yield line
+
+ def mock_open(mock=None, read_data=''):
+ """
+ A helper function to create a mock to replace the use of `open`. It works
+ for `open` called directly or used as a context manager.
+
+ The `mock` argument is the mock object to configure. If `None` (the
+ default) then a `MagicMock` will be created for you, with the API limited
+ to methods or attributes available on standard file handles.
+
+ `read_data` is a string for the `read` methoddline`, and `readlines` of the
+ file handle to return. This is an empty string by default.
+ """
+ def _readlines_side_effect(*args, **kwargs):
+ if handle.readlines.return_value is not None:
+ return handle.readlines.return_value
+ return list(_data)
+
+ def _read_side_effect(*args, **kwargs):
+ if handle.read.return_value is not None:
+ return handle.read.return_value
+ return type(read_data)().join(_data)
+
+ def _readline_side_effect():
+ if handle.readline.return_value is not None:
+ while True:
+ yield handle.readline.return_value
+ for line in _data:
+ yield line
+
+ global file_spec
+ if file_spec is None:
+ import _io
+ file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
+
+ if mock is None:
+ mock = MagicMock(name='open', spec=open)
+
+ handle = MagicMock(spec=file_spec)
+ handle.__enter__.return_value = handle
+
+ _data = _iterate_read_data(read_data)
+
+ handle.write.return_value = None
+ handle.read.return_value = None
+ handle.readline.return_value = None
+ handle.readlines.return_value = None
+
+ handle.read.side_effect = _read_side_effect
+ handle.readline.side_effect = _readline_side_effect()
+ handle.readlines.side_effect = _readlines_side_effect
+
+ mock.return_value = handle
+ return mock
diff --git a/test/units/compat/unittest.py b/test/units/compat/unittest.py
new file mode 100644
index 00000000..98f08ad6
--- /dev/null
+++ b/test/units/compat/unittest.py
@@ -0,0 +1,38 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat module for Python2.7's unittest module
+'''
+
+import sys
+
+# Allow wildcard import because we really do want to import all of
+# unittests's symbols into this compat shim
+# pylint: disable=wildcard-import,unused-wildcard-import
+if sys.version_info < (2, 7):
+ try:
+ # Need unittest2 on python2.6
+ from unittest2 import *
+ except ImportError:
+ print('You need unittest2 installed on python2.6.x to run tests')
+else:
+ from unittest import *
diff --git a/test/units/config/manager/__init__.py b/test/units/config/manager/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/config/manager/__init__.py
diff --git a/test/units/config/manager/test_find_ini_config_file.py b/test/units/config/manager/test_find_ini_config_file.py
new file mode 100644
index 00000000..df411388
--- /dev/null
+++ b/test/units/config/manager/test_find_ini_config_file.py
@@ -0,0 +1,253 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import os.path
+import stat
+
+import pytest
+
+from ansible.config.manager import find_ini_config_file
+from ansible.module_utils._text import to_text
+
+real_exists = os.path.exists
+real_isdir = os.path.isdir
+
+working_dir = os.path.dirname(__file__)
+cfg_in_cwd = os.path.join(working_dir, 'ansible.cfg')
+
+cfg_dir = os.path.join(working_dir, 'data')
+cfg_file = os.path.join(cfg_dir, 'ansible.cfg')
+alt_cfg_file = os.path.join(cfg_dir, 'test.cfg')
+cfg_in_homedir = os.path.expanduser('~/.ansible.cfg')
+
+
+@pytest.fixture
+def setup_env(request):
+ cur_config = os.environ.get('ANSIBLE_CONFIG', None)
+ cfg_path = request.param[0]
+
+ if cfg_path is None and cur_config:
+ del os.environ['ANSIBLE_CONFIG']
+ else:
+ os.environ['ANSIBLE_CONFIG'] = request.param[0]
+
+ yield
+
+ if cur_config is None and cfg_path:
+ del os.environ['ANSIBLE_CONFIG']
+ else:
+ os.environ['ANSIBLE_CONFIG'] = cur_config
+
+
+@pytest.fixture
+def setup_existing_files(request, monkeypatch):
+ def _os_path_exists(path):
+ if to_text(path) in (request.param[0]):
+ return True
+ else:
+ return False
+
+ def _os_access(path, access):
+ if to_text(path) in (request.param[0]):
+ return True
+ else:
+ return False
+
+ # Enable user and system dirs so that we know cwd takes precedence
+ monkeypatch.setattr("os.path.exists", _os_path_exists)
+ monkeypatch.setattr("os.access", _os_access)
+ monkeypatch.setattr("os.getcwd", lambda: os.path.dirname(cfg_dir))
+ monkeypatch.setattr("os.path.isdir", lambda path: True if to_text(path) == cfg_dir else real_isdir(path))
+
+
+class TestFindIniFile:
+ # This tells us to run twice, once with a file specified and once with a directory
+ @pytest.mark.parametrize('setup_env, expected', (([alt_cfg_file], alt_cfg_file), ([cfg_dir], cfg_file)), indirect=['setup_env'])
+ # This just passes the list of files that exist to the fixture
+ @pytest.mark.parametrize('setup_existing_files',
+ [[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_in_cwd, alt_cfg_file, cfg_file)]],
+ indirect=['setup_existing_files'])
+ def test_env_has_cfg_file(self, setup_env, setup_existing_files, expected):
+ """ANSIBLE_CONFIG is specified, use it"""
+ warnings = set()
+ assert find_ini_config_file(warnings) == expected
+ assert warnings == set()
+
+ @pytest.mark.parametrize('setup_env', ([alt_cfg_file], [cfg_dir]), indirect=['setup_env'])
+ @pytest.mark.parametrize('setup_existing_files',
+ [[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_in_cwd)]],
+ indirect=['setup_existing_files'])
+ def test_env_has_no_cfg_file(self, setup_env, setup_existing_files):
+ """ANSIBLE_CONFIG is specified but the file does not exist"""
+
+ warnings = set()
+ # since the cfg file specified by ANSIBLE_CONFIG doesn't exist, the one at cwd that does
+ # exist should be returned
+ assert find_ini_config_file(warnings) == cfg_in_cwd
+ assert warnings == set()
+
+ # ANSIBLE_CONFIG not specified
+ @pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env'])
+ # All config files are present
+ @pytest.mark.parametrize('setup_existing_files',
+ [[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_in_cwd, cfg_file, alt_cfg_file)]],
+ indirect=['setup_existing_files'])
+ def test_ini_in_cwd(self, setup_env, setup_existing_files):
+ """ANSIBLE_CONFIG not specified. Use the cwd cfg"""
+ warnings = set()
+ assert find_ini_config_file(warnings) == cfg_in_cwd
+ assert warnings == set()
+
+ # ANSIBLE_CONFIG not specified
+ @pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env'])
+ # No config in cwd
+ @pytest.mark.parametrize('setup_existing_files',
+ [[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_file, alt_cfg_file)]],
+ indirect=['setup_existing_files'])
+ def test_ini_in_homedir(self, setup_env, setup_existing_files):
+ """First config found is in the homedir"""
+ warnings = set()
+ assert find_ini_config_file(warnings) == cfg_in_homedir
+ assert warnings == set()
+
+ # ANSIBLE_CONFIG not specified
+ @pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env'])
+ # No config in cwd
+ @pytest.mark.parametrize('setup_existing_files', [[('/etc/ansible/ansible.cfg', cfg_file, alt_cfg_file)]], indirect=['setup_existing_files'])
+ def test_ini_in_systemdir(self, setup_env, setup_existing_files):
+ """First config found is the system config"""
+ warnings = set()
+ assert find_ini_config_file(warnings) == '/etc/ansible/ansible.cfg'
+ assert warnings == set()
+
+ # ANSIBLE_CONFIG not specified
+ @pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env'])
+ # No config in cwd
+ @pytest.mark.parametrize('setup_existing_files',
+ [[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_file, alt_cfg_file)]],
+ indirect=['setup_existing_files'])
+ def test_cwd_does_not_exist(self, setup_env, setup_existing_files, monkeypatch):
+ """Smoketest current working directory doesn't exist"""
+ def _os_stat(path):
+ raise OSError('%s does not exist' % path)
+ monkeypatch.setattr('os.stat', _os_stat)
+
+ warnings = set()
+ assert find_ini_config_file(warnings) == cfg_in_homedir
+ assert warnings == set()
+
+ @pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env'])
+ # No config in cwd
+ @pytest.mark.parametrize('setup_existing_files', [[list()]], indirect=['setup_existing_files'])
+ def test_no_config(self, setup_env, setup_existing_files):
+ """No config present, no config found"""
+ warnings = set()
+ assert find_ini_config_file(warnings) is None
+ assert warnings == set()
+
+ # ANSIBLE_CONFIG not specified
+ @pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env'])
+ # All config files are present except in cwd
+ @pytest.mark.parametrize('setup_existing_files',
+ [[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_file, alt_cfg_file)]],
+ indirect=['setup_existing_files'])
+ def test_no_cwd_cfg_no_warning_on_writable(self, setup_env, setup_existing_files, monkeypatch):
+ """If the cwd is writable but there is no config file there, move on with no warning"""
+ real_stat = os.stat
+
+ def _os_stat(path):
+ if path == working_dir:
+ from posix import stat_result
+ stat_info = list(real_stat(path))
+ stat_info[stat.ST_MODE] |= stat.S_IWOTH
+ return stat_result(stat_info)
+ else:
+ return real_stat(path)
+
+ monkeypatch.setattr('os.stat', _os_stat)
+
+ warnings = set()
+ assert find_ini_config_file(warnings) == cfg_in_homedir
+ assert len(warnings) == 0
+
+ # ANSIBLE_CONFIG not specified
+ @pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env'])
+ # All config files are present
+ @pytest.mark.parametrize('setup_existing_files',
+ [[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_in_cwd, cfg_file, alt_cfg_file)]],
+ indirect=['setup_existing_files'])
+ def test_cwd_warning_on_writable(self, setup_env, setup_existing_files, monkeypatch):
+ """If the cwd is writable, warn and skip it """
+ real_stat = os.stat
+
+ def _os_stat(path):
+ if path == working_dir:
+ from posix import stat_result
+ stat_info = list(real_stat(path))
+ stat_info[stat.ST_MODE] |= stat.S_IWOTH
+ return stat_result(stat_info)
+ else:
+ return real_stat(path)
+
+ monkeypatch.setattr('os.stat', _os_stat)
+
+ warnings = set()
+ assert find_ini_config_file(warnings) == cfg_in_homedir
+ assert len(warnings) == 1
+ warning = warnings.pop()
+ assert u'Ansible is being run in a world writable directory' in warning
+ assert u'ignoring it as an ansible.cfg source' in warning
+
+ # ANSIBLE_CONFIG is sepcified
+ @pytest.mark.parametrize('setup_env, expected', (([alt_cfg_file], alt_cfg_file), ([cfg_in_cwd], cfg_in_cwd)), indirect=['setup_env'])
+ # All config files are present
+ @pytest.mark.parametrize('setup_existing_files',
+ [[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_in_cwd, cfg_file, alt_cfg_file)]],
+ indirect=['setup_existing_files'])
+ def test_no_warning_on_writable_if_env_used(self, setup_env, setup_existing_files, monkeypatch, expected):
+ """If the cwd is writable but ANSIBLE_CONFIG was used, no warning should be issued"""
+ real_stat = os.stat
+
+ def _os_stat(path):
+ if path == working_dir:
+ from posix import stat_result
+ stat_info = list(real_stat(path))
+ stat_info[stat.ST_MODE] |= stat.S_IWOTH
+ return stat_result(stat_info)
+ else:
+ return real_stat(path)
+
+ monkeypatch.setattr('os.stat', _os_stat)
+
+ warnings = set()
+ assert find_ini_config_file(warnings) == expected
+ assert warnings == set()
+
+ # ANSIBLE_CONFIG not specified
+ @pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env'])
+ # All config files are present
+ @pytest.mark.parametrize('setup_existing_files',
+ [[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_in_cwd, cfg_file, alt_cfg_file)]],
+ indirect=['setup_existing_files'])
+ def test_cwd_warning_on_writable_no_warning_set(self, setup_env, setup_existing_files, monkeypatch):
+ """Smoketest that the function succeeds even though no warning set was passed in"""
+ real_stat = os.stat
+
+ def _os_stat(path):
+ if path == working_dir:
+ from posix import stat_result
+ stat_info = list(real_stat(path))
+ stat_info[stat.ST_MODE] |= stat.S_IWOTH
+ return stat_result(stat_info)
+ else:
+ return real_stat(path)
+
+ monkeypatch.setattr('os.stat', _os_stat)
+
+ assert find_ini_config_file() == cfg_in_homedir
diff --git a/test/units/config/test.cfg b/test/units/config/test.cfg
new file mode 100644
index 00000000..57958d87
--- /dev/null
+++ b/test/units/config/test.cfg
@@ -0,0 +1,4 @@
+[defaults]
+inikey=fromini
+matterless=lessfromini
+mattermore=morefromini
diff --git a/test/units/config/test.yml b/test/units/config/test.yml
new file mode 100644
index 00000000..384a055b
--- /dev/null
+++ b/test/units/config/test.yml
@@ -0,0 +1,55 @@
+# mock config defs with diff use cases
+config_entry: &entry
+ name: test config
+ default: DEFAULT
+ description:
+ - This does nothing, its for testing
+ env:
+ - name: ENVVAR
+ ini:
+ - section: defaults
+ key: inikey
+ type: string
+config_entry_multi: &entry_multi
+ name: has more than one entry per config source
+ default: DEFAULT
+ description:
+ - This does nothing, its for testing
+ env:
+ - name: MATTERLESS
+ - name: MATTERMORE
+ ini:
+ - section: defaults
+ key: matterless
+ - section: defaults
+ key: mattermore
+ type: string
+config_entry_bool:
+ <<: *entry
+ type: bool
+ default: False
+config_entry_list:
+ <<: *entry
+ type: list
+ default: [DEFAULT]
+config_entry_deprecated:
+ <<: *entry
+ deprecated: &dep
+ why: 'cause i wanna'
+ version: 9.2
+ alternative: 'none whatso ever'
+config_entry_multi_deprecated:
+ <<: *entry_multi
+ deprecated: *dep
+config_entry_multi_deprecated_source:
+ <<: *entry_multi
+ env:
+ - name: MATTERLESS
+ deprecated: *dep
+ - name: MATTERMORE
+ ini:
+ - section: defaults
+ key: matterless
+ deprecated: *dep
+ - section: defaults
+ key: mattermore
diff --git a/test/units/config/test2.cfg b/test/units/config/test2.cfg
new file mode 100644
index 00000000..da2d77b0
--- /dev/null
+++ b/test/units/config/test2.cfg
@@ -0,0 +1,4 @@
+[defaults]
+inikey=fromini2
+matterless=lessfromini2
+mattermore=morefromini2
diff --git a/test/units/config/test_data.py b/test/units/config/test_data.py
new file mode 100644
index 00000000..da043e7b
--- /dev/null
+++ b/test/units/config/test_data.py
@@ -0,0 +1,41 @@
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest
+
+from ansible.config.data import ConfigData
+from ansible.config.manager import Setting
+
+
+mykey = Setting('mykey', 'myvalue', 'test', 'string')
+mykey2 = Setting('mykey2', 'myvalue2', ['test', 'test2'], 'list')
+mykey3 = Setting('mykey3', 'myvalue3', 11111111111, 'integer')
+
+
+class TestConfigData(unittest.TestCase):
+
+ def setUp(self):
+ self.cdata = ConfigData()
+
+ def tearDown(self):
+ self.cdata = None
+
+ def test_update_setting(self):
+ for setting in [mykey, mykey2, mykey3]:
+ self.cdata.update_setting(setting)
+ self.assertEqual(setting, self.cdata._global_settings.get(setting.name))
+
+ def test_update_setting_with_plugin(self):
+ pass
+
+ def test_get_setting(self):
+ self.cdata._global_settings = {'mykey': mykey}
+ self.assertEqual(mykey, self.cdata.get_setting('mykey'))
+
+ def test_get_settings(self):
+ all_settings = {'mykey': mykey, 'mykey2': mykey2}
+ self.cdata._global_settings = all_settings
+
+ for setting in self.cdata.get_settings():
+ self.assertEqual(all_settings[setting.name], setting)
diff --git a/test/units/config/test_manager.py b/test/units/config/test_manager.py
new file mode 100644
index 00000000..d103e5e6
--- /dev/null
+++ b/test/units/config/test_manager.py
@@ -0,0 +1,145 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import os.path
+import pytest
+
+from ansible.config.manager import ConfigManager, Setting, ensure_type, resolve_path, get_config_type
+from ansible.errors import AnsibleOptionsError, AnsibleError
+from ansible.module_utils.six import integer_types, string_types
+from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
+
+curdir = os.path.dirname(__file__)
+cfg_file = os.path.join(curdir, 'test.cfg')
+cfg_file2 = os.path.join(curdir, 'test2.cfg')
+
+expected_ini = {'CONFIG_FILE': Setting(name='CONFIG_FILE', value=cfg_file, origin='', type='string'),
+ 'config_entry': Setting(name='config_entry', value=u'fromini', origin=cfg_file, type='string'),
+ 'config_entry_bool': Setting(name='config_entry_bool', value=False, origin=cfg_file, type='bool'),
+ 'config_entry_list': Setting(name='config_entry_list', value=['fromini'], origin=cfg_file, type='list'),
+ 'config_entry_deprecated': Setting(name='config_entry_deprecated', value=u'fromini', origin=cfg_file, type='string'),
+ 'config_entry_multi': Setting(name='config_entry_multi', value=u'morefromini', origin=cfg_file, type='string'),
+ 'config_entry_multi_deprecated': Setting(name='config_entry_multi_deprecated', value=u'morefromini', origin=cfg_file, type='string'),
+ 'config_entry_multi_deprecated_source': Setting(name='config_entry_multi_deprecated_source', value=u'morefromini',
+ origin=cfg_file, type='string')}
+
+ensure_test_data = [
+ ('a,b', 'list', list),
+ (['a', 'b'], 'list', list),
+ ('y', 'bool', bool),
+ ('yes', 'bool', bool),
+ ('on', 'bool', bool),
+ ('1', 'bool', bool),
+ ('true', 'bool', bool),
+ ('t', 'bool', bool),
+ (1, 'bool', bool),
+ (1.0, 'bool', bool),
+ (True, 'bool', bool),
+ ('n', 'bool', bool),
+ ('no', 'bool', bool),
+ ('off', 'bool', bool),
+ ('0', 'bool', bool),
+ ('false', 'bool', bool),
+ ('f', 'bool', bool),
+ (0, 'bool', bool),
+ (0.0, 'bool', bool),
+ (False, 'bool', bool),
+ ('10', 'int', integer_types),
+ (20, 'int', integer_types),
+ ('0.10', 'float', float),
+ (0.2, 'float', float),
+ ('/tmp/test.yml', 'pathspec', list),
+ ('/tmp/test.yml,/home/test2.yml', 'pathlist', list),
+ ('a', 'str', string_types),
+ ('a', 'string', string_types),
+ ('Café', 'string', string_types),
+ ('', 'string', string_types),
+ ('None', 'none', type(None))
+]
+
+
+class TestConfigManager:
+ @classmethod
+ def setup_class(cls):
+ cls.manager = ConfigManager(cfg_file, os.path.join(curdir, 'test.yml'))
+
+ @classmethod
+ def teardown_class(cls):
+ cls.manager = None
+
+ def test_initial_load(self):
+ assert self.manager.data._global_settings == expected_ini
+
+ @pytest.mark.parametrize("value, expected_type, python_type", ensure_test_data)
+ def test_ensure_type(self, value, expected_type, python_type):
+ assert isinstance(ensure_type(value, expected_type), python_type)
+
+ def test_resolve_path(self):
+ assert os.path.join(curdir, 'test.yml') == resolve_path('./test.yml', cfg_file)
+
+ def test_resolve_path_cwd(self):
+ assert os.path.join(os.getcwd(), 'test.yml') == resolve_path('{{CWD}}/test.yml')
+ assert os.path.join(os.getcwd(), 'test.yml') == resolve_path('./test.yml')
+
+ def test_value_and_origin_from_ini(self):
+ assert self.manager.get_config_value_and_origin('config_entry') == ('fromini', cfg_file)
+
+ def test_value_from_ini(self):
+ assert self.manager.get_config_value('config_entry') == 'fromini'
+
+ def test_value_and_origin_from_alt_ini(self):
+ assert self.manager.get_config_value_and_origin('config_entry', cfile=cfg_file2) == ('fromini2', cfg_file2)
+
+ def test_value_from_alt_ini(self):
+ assert self.manager.get_config_value('config_entry', cfile=cfg_file2) == 'fromini2'
+
+ def test_config_types(self):
+ assert get_config_type('/tmp/ansible.ini') == 'ini'
+ assert get_config_type('/tmp/ansible.cfg') == 'ini'
+ assert get_config_type('/tmp/ansible.yaml') == 'yaml'
+ assert get_config_type('/tmp/ansible.yml') == 'yaml'
+
+ def test_config_types_negative(self):
+ with pytest.raises(AnsibleOptionsError) as exec_info:
+ get_config_type('/tmp/ansible.txt')
+ assert "Unsupported configuration file extension for" in str(exec_info.value)
+
+ def test_read_config_yaml_file(self):
+ assert isinstance(self.manager._read_config_yaml_file(os.path.join(curdir, 'test.yml')), dict)
+
+ def test_read_config_yaml_file_negative(self):
+ with pytest.raises(AnsibleError) as exec_info:
+ self.manager._read_config_yaml_file(os.path.join(curdir, 'test_non_existent.yml'))
+
+ assert "Missing base YAML definition file (bad install?)" in str(exec_info.value)
+
+ def test_entry_as_vault_var(self):
+ class MockVault:
+
+ def decrypt(self, value):
+ return value
+
+ vault_var = AnsibleVaultEncryptedUnicode(b"vault text")
+ vault_var.vault = MockVault()
+
+ actual_value, actual_origin = self.manager._loop_entries({'name': vault_var}, [{'name': 'name'}])
+ assert actual_value == "vault text"
+ assert actual_origin == "name"
+
+ @pytest.mark.parametrize("value_type", ("str", "string", None))
+ def test_ensure_type_with_vaulted_str(self, value_type):
+ class MockVault:
+ def decrypt(self, value):
+ return value
+
+ vault_var = AnsibleVaultEncryptedUnicode(b"vault text")
+ vault_var.vault = MockVault()
+
+ actual_value = ensure_type(vault_var, value_type)
+ assert actual_value == "vault text"
diff --git a/test/units/errors/__init__.py b/test/units/errors/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/errors/__init__.py
diff --git a/test/units/errors/test_errors.py b/test/units/errors/test_errors.py
new file mode 100644
index 00000000..ab5c19cd
--- /dev/null
+++ b/test/units/errors/test_errors.py
@@ -0,0 +1,121 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from units.compat import unittest
+from units.compat.builtins import BUILTINS
+from units.compat.mock import mock_open, patch
+from ansible.errors import AnsibleError
+from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
+
+
+class TestErrors(unittest.TestCase):
+
+ def setUp(self):
+ self.message = 'This is the error message'
+ self.unicode_message = 'This is an error with \xf0\x9f\x98\xa8 in it'
+
+ self.obj = AnsibleBaseYAMLObject()
+
+ def test_basic_error(self):
+ e = AnsibleError(self.message)
+ self.assertEqual(e.message, self.message)
+ self.assertEqual(e.__repr__(), self.message)
+
+ def test_basic_unicode_error(self):
+ e = AnsibleError(self.unicode_message)
+ self.assertEqual(e.message, self.unicode_message)
+ self.assertEqual(e.__repr__(), self.unicode_message)
+
+ @patch.object(AnsibleError, '_get_error_lines_from_file')
+ def test_error_with_kv(self, mock_method):
+ ''' This tests a task with both YAML and k=v syntax
+
+ - lineinfile: line=foo path=bar
+ line: foo
+
+ An accurate error message and position indicator are expected.
+
+ _get_error_lines_from_file() returns (target_line, prev_line)
+ '''
+
+ self.obj.ansible_pos = ('foo.yml', 2, 1)
+
+ mock_method.return_value = [' line: foo\n', '- lineinfile: line=foo path=bar\n']
+
+ e = AnsibleError(self.message, self.obj)
+ self.assertEqual(
+ e.message,
+ ("This is the error message\n\nThe error appears to be in 'foo.yml': line 1, column 19, but may\nbe elsewhere in the "
+ "file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n- lineinfile: line=foo path=bar\n"
+ " ^ here\n\n"
+ "There appears to be both 'k=v' shorthand syntax and YAML in this task. Only one syntax may be used.\n")
+ )
+
+ @patch.object(AnsibleError, '_get_error_lines_from_file')
+ def test_error_with_object(self, mock_method):
+ self.obj.ansible_pos = ('foo.yml', 1, 1)
+
+ mock_method.return_value = ('this is line 1\n', '')
+ e = AnsibleError(self.message, self.obj)
+
+ self.assertEqual(
+ e.message,
+ ("This is the error message\n\nThe error appears to be in 'foo.yml': line 1, column 1, but may\nbe elsewhere in the file depending on the "
+ "exact syntax problem.\n\nThe offending line appears to be:\n\n\nthis is line 1\n^ here\n")
+ )
+
+ def test_get_error_lines_from_file(self):
+ m = mock_open()
+ m.return_value.readlines.return_value = ['this is line 1\n']
+
+ with patch('{0}.open'.format(BUILTINS), m):
+ # this line will be found in the file
+ self.obj.ansible_pos = ('foo.yml', 1, 1)
+ e = AnsibleError(self.message, self.obj)
+ self.assertEqual(
+ e.message,
+ ("This is the error message\n\nThe error appears to be in 'foo.yml': line 1, column 1, but may\nbe elsewhere in the file depending on "
+ "the exact syntax problem.\n\nThe offending line appears to be:\n\n\nthis is line 1\n^ here\n")
+ )
+
+ # this line will not be found, as it is out of the index range
+ self.obj.ansible_pos = ('foo.yml', 2, 1)
+ e = AnsibleError(self.message, self.obj)
+ self.assertEqual(
+ e.message,
+ ("This is the error message\n\nThe error appears to be in 'foo.yml': line 2, column 1, but may\nbe elsewhere in the file depending on "
+ "the exact syntax problem.\n\n(specified line no longer in file, maybe it changed?)")
+ )
+
+ m = mock_open()
+ m.return_value.readlines.return_value = ['this line has unicode \xf0\x9f\x98\xa8 in it!\n']
+
+ with patch('{0}.open'.format(BUILTINS), m):
+ # this line will be found in the file
+ self.obj.ansible_pos = ('foo.yml', 1, 1)
+ e = AnsibleError(self.unicode_message, self.obj)
+ self.assertEqual(
+ e.message,
+ ("This is an error with \xf0\x9f\x98\xa8 in it\n\nThe error appears to be in 'foo.yml': line 1, column 1, but may\nbe elsewhere in the "
+ "file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\nthis line has unicode \xf0\x9f\x98\xa8 in it!\n^ "
+ "here\n")
+ )
diff --git a/test/units/executor/__init__.py b/test/units/executor/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/executor/__init__.py
diff --git a/test/units/executor/module_common/test_modify_module.py b/test/units/executor/module_common/test_modify_module.py
new file mode 100644
index 00000000..dceef763
--- /dev/null
+++ b/test/units/executor/module_common/test_modify_module.py
@@ -0,0 +1,43 @@
+# Copyright (c) 2018 Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# -*- coding: utf-8 -*-
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible.executor.module_common import modify_module
+from ansible.module_utils.six import PY2
+
+from test_module_common import templar
+
+
+FAKE_OLD_MODULE = b'''#!/usr/bin/python
+import sys
+print('{"result": "%s"}' % sys.executable)
+'''
+
+
+@pytest.fixture
+def fake_old_module_open(mocker):
+ m = mocker.mock_open(read_data=FAKE_OLD_MODULE)
+ if PY2:
+ mocker.patch('__builtin__.open', m)
+ else:
+ mocker.patch('builtins.open', m)
+
+# this test no longer makes sense, since a Python module will always either have interpreter discovery run or
+# an explicit interpreter passed (so we'll never default to the module shebang)
+# def test_shebang(fake_old_module_open, templar):
+# (data, style, shebang) = modify_module('fake_module', 'fake_path', {}, templar)
+# assert shebang == '#!/usr/bin/python'
+
+
+def test_shebang_task_vars(fake_old_module_open, templar):
+ task_vars = {
+ 'ansible_python_interpreter': '/usr/bin/python3'
+ }
+
+ (data, style, shebang) = modify_module('fake_module', 'fake_path', {}, templar, task_vars=task_vars)
+ assert shebang == '#!/usr/bin/python3'
diff --git a/test/units/executor/module_common/test_module_common.py b/test/units/executor/module_common/test_module_common.py
new file mode 100644
index 00000000..04bae85d
--- /dev/null
+++ b/test/units/executor/module_common/test_module_common.py
@@ -0,0 +1,197 @@
+# (c) 2017, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os.path
+
+import pytest
+
+import ansible.errors
+
+from ansible.executor import module_common as amc
+from ansible.executor.interpreter_discovery import InterpreterDiscoveryRequiredError
+from ansible.module_utils.six import PY2
+
+
+class TestStripComments:
+ def test_no_changes(self):
+ no_comments = u"""def some_code():
+ return False"""
+ assert amc._strip_comments(no_comments) == no_comments
+
+ def test_all_comments(self):
+ all_comments = u"""# This is a test
+ # Being as it is
+ # To be
+ """
+ assert amc._strip_comments(all_comments) == u""
+
+ def test_all_whitespace(self):
+ # Note: Do not remove the spaces on the blank lines below. They're
+ # test data to show that the lines get removed despite having spaces
+ # on them
+ all_whitespace = u"""
+
+
+
+\t\t\r\n
+ """ # nopep8
+ assert amc._strip_comments(all_whitespace) == u""
+
+ def test_somewhat_normal(self):
+ mixed = u"""#!/usr/bin/python
+
+# here we go
+def test(arg):
+ # this is a thing
+ thing = '# test'
+ return thing
+# End
+"""
+ mixed_results = u"""def test(arg):
+ thing = '# test'
+ return thing"""
+ assert amc._strip_comments(mixed) == mixed_results
+
+
+class TestSlurp:
+ def test_slurp_nonexistent(self, mocker):
+ mocker.patch('os.path.exists', side_effect=lambda x: False)
+ with pytest.raises(ansible.errors.AnsibleError):
+ amc._slurp('no_file')
+
+ def test_slurp_file(self, mocker):
+ mocker.patch('os.path.exists', side_effect=lambda x: True)
+ m = mocker.mock_open(read_data='This is a test')
+ if PY2:
+ mocker.patch('__builtin__.open', m)
+ else:
+ mocker.patch('builtins.open', m)
+ assert amc._slurp('some_file') == 'This is a test'
+
+ def test_slurp_file_with_newlines(self, mocker):
+ mocker.patch('os.path.exists', side_effect=lambda x: True)
+ m = mocker.mock_open(read_data='#!/usr/bin/python\ndef test(args):\nprint("hi")\n')
+ if PY2:
+ mocker.patch('__builtin__.open', m)
+ else:
+ mocker.patch('builtins.open', m)
+ assert amc._slurp('some_file') == '#!/usr/bin/python\ndef test(args):\nprint("hi")\n'
+
+
+@pytest.fixture
+def templar():
+ class FakeTemplar:
+ def template(self, template_string, *args, **kwargs):
+ return template_string
+
+ return FakeTemplar()
+
+
+class TestGetShebang:
+ """Note: We may want to change the API of this function in the future. It isn't a great API"""
+ def test_no_interpreter_set(self, templar):
+ # normally this would return /usr/bin/python, but so long as we're defaulting to auto python discovery, we'll get
+ # an InterpreterDiscoveryRequiredError here instead
+ with pytest.raises(InterpreterDiscoveryRequiredError):
+ amc._get_shebang(u'/usr/bin/python', {}, templar)
+
+ def test_non_python_interpreter(self, templar):
+ assert amc._get_shebang(u'/usr/bin/ruby', {}, templar) == (None, u'/usr/bin/ruby')
+
+ def test_interpreter_set_in_task_vars(self, templar):
+ assert amc._get_shebang(u'/usr/bin/python', {u'ansible_python_interpreter': u'/usr/bin/pypy'}, templar) == \
+ (u'#!/usr/bin/pypy', u'/usr/bin/pypy')
+
+ def test_non_python_interpreter_in_task_vars(self, templar):
+ assert amc._get_shebang(u'/usr/bin/ruby', {u'ansible_ruby_interpreter': u'/usr/local/bin/ruby'}, templar) == \
+ (u'#!/usr/local/bin/ruby', u'/usr/local/bin/ruby')
+
+ def test_with_args(self, templar):
+ assert amc._get_shebang(u'/usr/bin/python', {u'ansible_python_interpreter': u'/usr/bin/python3'}, templar, args=('-tt', '-OO')) == \
+ (u'#!/usr/bin/python3 -tt -OO', u'/usr/bin/python3')
+
+ def test_python_via_env(self, templar):
+ assert amc._get_shebang(u'/usr/bin/python', {u'ansible_python_interpreter': u'/usr/bin/env python'}, templar) == \
+ (u'#!/usr/bin/env python', u'/usr/bin/env python')
+
+
+class TestDetectionRegexes:
+ ANSIBLE_MODULE_UTIL_STRINGS = (
+ # Absolute collection imports
+ b'import ansible_collections.my_ns.my_col.plugins.module_utils.my_util',
+ b'from ansible_collections.my_ns.my_col.plugins.module_utils import my_util',
+ b'from ansible_collections.my_ns.my_col.plugins.module_utils.my_util import my_func',
+ # Absolute core imports
+ b'import ansible.module_utils.basic',
+ b'from ansible.module_utils import basic',
+ b'from ansible.module_utils.basic import AnsibleModule',
+ # Relative imports
+ b'from ..module_utils import basic',
+ b'from .. module_utils import basic',
+ b'from ....module_utils import basic',
+ b'from ..module_utils.basic import AnsibleModule',
+ )
+ NOT_ANSIBLE_MODULE_UTIL_STRINGS = (
+ b'from ansible import release',
+ b'from ..release import __version__',
+ b'from .. import release',
+ b'from ansible.modules.system import ping',
+ b'from ansible_collecitons.my_ns.my_col.plugins.modules import function',
+ )
+
+ OFFSET = os.path.dirname(os.path.dirname(amc.__file__))
+ CORE_PATHS = (
+ ('%s/modules/from_role.py' % OFFSET, 'ansible/modules/from_role'),
+ ('%s/modules/system/ping.py' % OFFSET, 'ansible/modules/system/ping'),
+ ('%s/modules/cloud/amazon/s3.py' % OFFSET, 'ansible/modules/cloud/amazon/s3'),
+ )
+
+ COLLECTION_PATHS = (
+ ('/root/ansible_collections/ns/col/plugins/modules/ping.py',
+ 'ansible_collections/ns/col/plugins/modules/ping'),
+ ('/root/ansible_collections/ns/col/plugins/modules/subdir/ping.py',
+ 'ansible_collections/ns/col/plugins/modules/subdir/ping'),
+ )
+
+ @pytest.mark.parametrize('testcase', ANSIBLE_MODULE_UTIL_STRINGS)
+ def test_detect_new_style_python_module_re(self, testcase):
+ assert amc.NEW_STYLE_PYTHON_MODULE_RE.search(testcase)
+
+ @pytest.mark.parametrize('testcase', NOT_ANSIBLE_MODULE_UTIL_STRINGS)
+ def test_no_detect_new_style_python_module_re(self, testcase):
+ assert not amc.NEW_STYLE_PYTHON_MODULE_RE.search(testcase)
+
+ # pylint bug: https://github.com/PyCQA/pylint/issues/511
+ @pytest.mark.parametrize('testcase, result', CORE_PATHS) # pylint: disable=undefined-variable
+ def test_detect_core_library_path_re(self, testcase, result):
+ assert amc.CORE_LIBRARY_PATH_RE.search(testcase).group('path') == result
+
+ @pytest.mark.parametrize('testcase', (p[0] for p in COLLECTION_PATHS)) # pylint: disable=undefined-variable
+ def test_no_detect_core_library_path_re(self, testcase):
+ assert not amc.CORE_LIBRARY_PATH_RE.search(testcase)
+
+ @pytest.mark.parametrize('testcase, result', COLLECTION_PATHS) # pylint: disable=undefined-variable
+ def test_detect_collection_path_re(self, testcase, result):
+ assert amc.COLLECTION_PATH_RE.search(testcase).group('path') == result
+
+ @pytest.mark.parametrize('testcase', (p[0] for p in CORE_PATHS)) # pylint: disable=undefined-variable
+ def test_no_detect_collection_path_re(self, testcase):
+ assert not amc.COLLECTION_PATH_RE.search(testcase)
diff --git a/test/units/executor/module_common/test_recursive_finder.py b/test/units/executor/module_common/test_recursive_finder.py
new file mode 100644
index 00000000..c72973f8
--- /dev/null
+++ b/test/units/executor/module_common/test_recursive_finder.py
@@ -0,0 +1,127 @@
+# (c) 2017, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import pytest
+import zipfile
+
+from collections import namedtuple
+from io import BytesIO
+
+import ansible.errors
+
+from ansible.executor.module_common import recursive_finder
+from ansible.module_utils.six import PY2
+
+
+# These are the modules that are brought in by module_utils/basic.py This may need to be updated
+# when basic.py gains new imports
+# We will remove these when we modify AnsiBallZ to store its args in a separate file instead of in
+# basic.py
+
+MODULE_UTILS_BASIC_FILES = frozenset(('ansible/__init__.py',
+ 'ansible/module_utils/__init__.py',
+ 'ansible/module_utils/_text.py',
+ 'ansible/module_utils/basic.py',
+ 'ansible/module_utils/six/__init__.py',
+ 'ansible/module_utils/_text.py',
+ 'ansible/module_utils/common/_collections_compat.py',
+ 'ansible/module_utils/common/_json_compat.py',
+ 'ansible/module_utils/common/collections.py',
+ 'ansible/module_utils/common/parameters.py',
+ 'ansible/module_utils/common/warnings.py',
+ 'ansible/module_utils/parsing/convert_bool.py',
+ 'ansible/module_utils/common/__init__.py',
+ 'ansible/module_utils/common/file.py',
+ 'ansible/module_utils/common/process.py',
+ 'ansible/module_utils/common/sys_info.py',
+ 'ansible/module_utils/common/text/__init__.py',
+ 'ansible/module_utils/common/text/converters.py',
+ 'ansible/module_utils/common/text/formatters.py',
+ 'ansible/module_utils/common/validation.py',
+ 'ansible/module_utils/common/_utils.py',
+ 'ansible/module_utils/compat/__init__.py',
+ 'ansible/module_utils/compat/_selectors2.py',
+ 'ansible/module_utils/compat/selectors.py',
+ 'ansible/module_utils/distro/__init__.py',
+ 'ansible/module_utils/distro/_distro.py',
+ 'ansible/module_utils/parsing/__init__.py',
+ 'ansible/module_utils/parsing/convert_bool.py',
+ 'ansible/module_utils/pycompat24.py',
+ 'ansible/module_utils/six/__init__.py',
+ ))
+
+ONLY_BASIC_FILE = frozenset(('ansible/module_utils/basic.py',))
+
+ANSIBLE_LIB = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))), 'lib', 'ansible')
+
+
+@pytest.fixture
+def finder_containers():
+ FinderContainers = namedtuple('FinderContainers', ['zf'])
+
+ zipoutput = BytesIO()
+ zf = zipfile.ZipFile(zipoutput, mode='w', compression=zipfile.ZIP_STORED)
+
+ return FinderContainers(zf)
+
+
+class TestRecursiveFinder(object):
+ def test_no_module_utils(self, finder_containers):
+ name = 'ping'
+ data = b'#!/usr/bin/python\nreturn \'{\"changed\": false}\''
+ recursive_finder(name, os.path.join(ANSIBLE_LIB, 'modules', 'system', 'ping.py'), data, *finder_containers)
+ assert frozenset(finder_containers.zf.namelist()) == MODULE_UTILS_BASIC_FILES
+
+ def test_module_utils_with_syntax_error(self, finder_containers):
+ name = 'fake_module'
+ data = b'#!/usr/bin/python\ndef something(:\n pass\n'
+ with pytest.raises(ansible.errors.AnsibleError) as exec_info:
+ recursive_finder(name, os.path.join(ANSIBLE_LIB, 'modules', 'system', 'fake_module.py'), data, *finder_containers)
+ assert 'Unable to import fake_module due to invalid syntax' in str(exec_info.value)
+
+ def test_module_utils_with_identation_error(self, finder_containers):
+ name = 'fake_module'
+ data = b'#!/usr/bin/python\n def something():\n pass\n'
+ with pytest.raises(ansible.errors.AnsibleError) as exec_info:
+ recursive_finder(name, os.path.join(ANSIBLE_LIB, 'modules', 'system', 'fake_module.py'), data, *finder_containers)
+ assert 'Unable to import fake_module due to unexpected indent' in str(exec_info.value)
+
+ #
+ # Test importing six with many permutations because it is not a normal module
+ #
+ def test_from_import_six(self, finder_containers):
+ name = 'ping'
+ data = b'#!/usr/bin/python\nfrom ansible.module_utils import six'
+ recursive_finder(name, os.path.join(ANSIBLE_LIB, 'modules', 'system', 'ping.py'), data, *finder_containers)
+ assert frozenset(finder_containers.zf.namelist()) == frozenset(('ansible/module_utils/six/__init__.py', )).union(MODULE_UTILS_BASIC_FILES)
+
+ def test_import_six(self, finder_containers):
+ name = 'ping'
+ data = b'#!/usr/bin/python\nimport ansible.module_utils.six'
+ recursive_finder(name, os.path.join(ANSIBLE_LIB, 'modules', 'system', 'ping.py'), data, *finder_containers)
+ assert frozenset(finder_containers.zf.namelist()) == frozenset(('ansible/module_utils/six/__init__.py', )).union(MODULE_UTILS_BASIC_FILES)
+
+ def test_import_six_from_many_submodules(self, finder_containers):
+ name = 'ping'
+ data = b'#!/usr/bin/python\nfrom ansible.module_utils.six.moves.urllib.parse import urlparse'
+ recursive_finder(name, os.path.join(ANSIBLE_LIB, 'modules', 'system', 'ping.py'), data, *finder_containers)
+ assert frozenset(finder_containers.zf.namelist()) == frozenset(('ansible/module_utils/six/__init__.py',)).union(MODULE_UTILS_BASIC_FILES)
diff --git a/test/units/executor/test_interpreter_discovery.py b/test/units/executor/test_interpreter_discovery.py
new file mode 100644
index 00000000..10f97d63
--- /dev/null
+++ b/test/units/executor/test_interpreter_discovery.py
@@ -0,0 +1,87 @@
+# -*- coding: utf-8 -*-
+# (c) 2019, Jordan Borean <jborean@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat.mock import MagicMock
+
+from ansible.executor.interpreter_discovery import discover_interpreter
+from ansible.module_utils._text import to_text
+
+mock_ubuntu_platform_res = to_text(
+ r'{"osrelease_content": "NAME=\"Ubuntu\"\nVERSION=\"16.04.5 LTS (Xenial Xerus)\"\nID=ubuntu\nID_LIKE=debian\n'
+ r'PRETTY_NAME=\"Ubuntu 16.04.5 LTS\"\nVERSION_ID=\"16.04\"\nHOME_URL=\"http://www.ubuntu.com/\"\n'
+ r'SUPPORT_URL=\"http://help.ubuntu.com/\"\nBUG_REPORT_URL=\"http://bugs.launchpad.net/ubuntu/\"\n'
+ r'VERSION_CODENAME=xenial\nUBUNTU_CODENAME=xenial\n", "platform_dist_result": ["Ubuntu", "16.04", "xenial"]}'
+)
+
+
+def test_discovery_interpreter_linux_auto_legacy():
+ res1 = u'PLATFORM\nLinux\nFOUND\n/usr/bin/python\n/usr/bin/python3.5\n/usr/bin/python3\nENDFOUND'
+
+ mock_action = MagicMock()
+ mock_action._low_level_execute_command.side_effect = [{'stdout': res1}, {'stdout': mock_ubuntu_platform_res}]
+
+ actual = discover_interpreter(mock_action, 'python', 'auto_legacy', {'inventory_hostname': u'host-fóöbär'})
+
+ assert actual == u'/usr/bin/python'
+ assert len(mock_action.method_calls) == 3
+ assert mock_action.method_calls[2][0] == '_discovery_deprecation_warnings.append'
+ assert u'Distribution Ubuntu 16.04 on host host-fóöbär should use /usr/bin/python3, but is using /usr/bin/python' \
+ u' for backward compatibility' in mock_action.method_calls[2][1][0]['msg']
+ assert mock_action.method_calls[2][1][0]['version'] == '2.12'
+
+
+def test_discovery_interpreter_linux_auto_legacy_silent():
+ res1 = u'PLATFORM\nLinux\nFOUND\n/usr/bin/python\n/usr/bin/python3.5\n/usr/bin/python3\nENDFOUND'
+
+ mock_action = MagicMock()
+ mock_action._low_level_execute_command.side_effect = [{'stdout': res1}, {'stdout': mock_ubuntu_platform_res}]
+
+ actual = discover_interpreter(mock_action, 'python', 'auto_legacy_silent', {'inventory_hostname': u'host-fóöbär'})
+
+ assert actual == u'/usr/bin/python'
+ assert len(mock_action.method_calls) == 2
+
+
+def test_discovery_interpreter_linux_auto():
+ res1 = u'PLATFORM\nLinux\nFOUND\n/usr/bin/python\n/usr/bin/python3.5\n/usr/bin/python3\nENDFOUND'
+
+ mock_action = MagicMock()
+ mock_action._low_level_execute_command.side_effect = [{'stdout': res1}, {'stdout': mock_ubuntu_platform_res}]
+
+ actual = discover_interpreter(mock_action, 'python', 'auto', {'inventory_hostname': u'host-fóöbär'})
+
+ assert actual == u'/usr/bin/python3'
+ assert len(mock_action.method_calls) == 2
+
+
+def test_discovery_interpreter_non_linux():
+ mock_action = MagicMock()
+ mock_action._low_level_execute_command.return_value = \
+ {'stdout': u'PLATFORM\nDarwin\nFOUND\n/usr/bin/python\nENDFOUND'}
+
+ actual = discover_interpreter(mock_action, 'python', 'auto_legacy', {'inventory_hostname': u'host-fóöbär'})
+
+ assert actual == u'/usr/bin/python'
+ assert len(mock_action.method_calls) == 2
+ assert mock_action.method_calls[1][0] == '_discovery_warnings.append'
+ assert u'Platform darwin on host host-fóöbär is using the discovered Python interpreter at /usr/bin/python, ' \
+ u'but future installation of another Python interpreter could change the meaning of that path' \
+ in mock_action.method_calls[1][1][0]
+
+
+def test_no_interpreters_found():
+ mock_action = MagicMock()
+ mock_action._low_level_execute_command.return_value = {'stdout': u'PLATFORM\nWindows\nFOUND\nENDFOUND'}
+
+ actual = discover_interpreter(mock_action, 'python', 'auto_legacy', {'inventory_hostname': u'host-fóöbär'})
+
+ assert actual == u'/usr/bin/python'
+ assert len(mock_action.method_calls) == 2
+ assert mock_action.method_calls[1][0] == '_discovery_warnings.append'
+ assert u'No python interpreters found for host host-fóöbär (tried' \
+ in mock_action.method_calls[1][1][0]
diff --git a/test/units/executor/test_play_iterator.py b/test/units/executor/test_play_iterator.py
new file mode 100644
index 00000000..4ccfd69a
--- /dev/null
+++ b/test/units/executor/test_play_iterator.py
@@ -0,0 +1,458 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest
+from units.compat.mock import patch, MagicMock
+
+from ansible.executor.play_iterator import HostState, PlayIterator
+from ansible.playbook import Playbook
+from ansible.playbook.play_context import PlayContext
+
+from units.mock.loader import DictDataLoader
+from units.mock.path import mock_unfrackpath_noop
+
+
+class TestPlayIterator(unittest.TestCase):
+
+ def test_host_state(self):
+ hs = HostState(blocks=[x for x in range(0, 10)])
+ hs.tasks_child_state = HostState(blocks=[0])
+ hs.rescue_child_state = HostState(blocks=[1])
+ hs.always_child_state = HostState(blocks=[2])
+ hs.__repr__()
+ hs.run_state = 100
+ hs.__repr__()
+ hs.fail_state = 15
+ hs.__repr__()
+
+ for i in range(0, 10):
+ hs.cur_block = i
+ self.assertEqual(hs.get_current_block(), i)
+
+ new_hs = hs.copy()
+
+ @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
+ def test_play_iterator(self):
+ # import epdb; epdb.st()
+ fake_loader = DictDataLoader({
+ "test_play.yml": """
+ - hosts: all
+ gather_facts: false
+ roles:
+ - test_role
+ pre_tasks:
+ - debug: msg="this is a pre_task"
+ tasks:
+ - debug: msg="this is a regular task"
+ - block:
+ - debug: msg="this is a block task"
+ - block:
+ - debug: msg="this is a sub-block in a block"
+ rescue:
+ - debug: msg="this is a rescue task"
+ - block:
+ - debug: msg="this is a sub-block in a rescue"
+ always:
+ - debug: msg="this is an always task"
+ - block:
+ - debug: msg="this is a sub-block in an always"
+ post_tasks:
+ - debug: msg="this is a post_task"
+ """,
+ '/etc/ansible/roles/test_role/tasks/main.yml': """
+ - name: role task
+ debug: msg="this is a role task"
+ - block:
+ - name: role block task
+ debug: msg="inside block in role"
+ always:
+ - name: role always task
+ debug: msg="always task in block in role"
+ - include: foo.yml
+ - name: role task after include
+ debug: msg="after include in role"
+ - block:
+ - name: starting role nested block 1
+ debug:
+ - block:
+ - name: role nested block 1 task 1
+ debug:
+ - name: role nested block 1 task 2
+ debug:
+ - name: role nested block 1 task 3
+ debug:
+ - name: end of role nested block 1
+ debug:
+ - name: starting role nested block 2
+ debug:
+ - block:
+ - name: role nested block 2 task 1
+ debug:
+ - name: role nested block 2 task 2
+ debug:
+ - name: role nested block 2 task 3
+ debug:
+ - name: end of role nested block 2
+ debug:
+ """,
+ '/etc/ansible/roles/test_role/tasks/foo.yml': """
+ - name: role included task
+ debug: msg="this is task in an include from a role"
+ """
+ })
+
+ mock_var_manager = MagicMock()
+ mock_var_manager._fact_cache = dict()
+ mock_var_manager.get_vars.return_value = dict()
+
+ p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager)
+
+ hosts = []
+ for i in range(0, 10):
+ host = MagicMock()
+ host.name = host.get_name.return_value = 'host%02d' % i
+ hosts.append(host)
+
+ mock_var_manager._fact_cache['host00'] = dict()
+
+ inventory = MagicMock()
+ inventory.get_hosts.return_value = hosts
+ inventory.filter_hosts.return_value = hosts
+
+ play_context = PlayContext(play=p._entries[0])
+
+ itr = PlayIterator(
+ inventory=inventory,
+ play=p._entries[0],
+ play_context=play_context,
+ variable_manager=mock_var_manager,
+ all_vars=dict(),
+ )
+
+ # pre task
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.action, 'debug')
+ # implicit meta: flush_handlers
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.action, 'meta')
+ # role task
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.action, 'debug')
+ self.assertEqual(task.name, "role task")
+ self.assertIsNotNone(task._role)
+ # role block task
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.name, "role block task")
+ self.assertIsNotNone(task._role)
+ # role block always task
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.name, "role always task")
+ self.assertIsNotNone(task._role)
+ # role include task
+ # (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ # self.assertIsNotNone(task)
+ # self.assertEqual(task.action, 'debug')
+ # self.assertEqual(task.name, "role included task")
+ # self.assertIsNotNone(task._role)
+ # role task after include
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.name, "role task after include")
+ self.assertIsNotNone(task._role)
+ # role nested block tasks
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.name, "starting role nested block 1")
+ self.assertIsNotNone(task._role)
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.name, "role nested block 1 task 1")
+ self.assertIsNotNone(task._role)
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.name, "role nested block 1 task 2")
+ self.assertIsNotNone(task._role)
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.name, "role nested block 1 task 3")
+ self.assertIsNotNone(task._role)
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.name, "end of role nested block 1")
+ self.assertIsNotNone(task._role)
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.name, "starting role nested block 2")
+ self.assertIsNotNone(task._role)
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.name, "role nested block 2 task 1")
+ self.assertIsNotNone(task._role)
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.name, "role nested block 2 task 2")
+ self.assertIsNotNone(task._role)
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.name, "role nested block 2 task 3")
+ self.assertIsNotNone(task._role)
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.name, "end of role nested block 2")
+ self.assertIsNotNone(task._role)
+ # regular play task
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.action, 'debug')
+ self.assertIsNone(task._role)
+ # block task
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.action, 'debug')
+ self.assertEqual(task.args, dict(msg="this is a block task"))
+ # sub-block task
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.action, 'debug')
+ self.assertEqual(task.args, dict(msg="this is a sub-block in a block"))
+ # mark the host failed
+ itr.mark_host_failed(hosts[0])
+ # block rescue task
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.action, 'debug')
+ self.assertEqual(task.args, dict(msg="this is a rescue task"))
+ # sub-block rescue task
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.action, 'debug')
+ self.assertEqual(task.args, dict(msg="this is a sub-block in a rescue"))
+ # block always task
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.action, 'debug')
+ self.assertEqual(task.args, dict(msg="this is an always task"))
+ # sub-block always task
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.action, 'debug')
+ self.assertEqual(task.args, dict(msg="this is a sub-block in an always"))
+ # implicit meta: flush_handlers
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.action, 'meta')
+ # post task
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.action, 'debug')
+ # implicit meta: flush_handlers
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.action, 'meta')
+ # end of iteration
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNone(task)
+
+ # host 0 shouldn't be in the failed hosts, as the error
+ # was handled by a rescue block
+ failed_hosts = itr.get_failed_hosts()
+ self.assertNotIn(hosts[0], failed_hosts)
+
+ def test_play_iterator_nested_blocks(self):
+ fake_loader = DictDataLoader({
+ "test_play.yml": """
+ - hosts: all
+ gather_facts: false
+ tasks:
+ - block:
+ - block:
+ - block:
+ - block:
+ - block:
+ - debug: msg="this is the first task"
+ - ping:
+ rescue:
+ - block:
+ - block:
+ - block:
+ - block:
+ - debug: msg="this is the rescue task"
+ always:
+ - block:
+ - block:
+ - block:
+ - block:
+ - debug: msg="this is the always task"
+ """,
+ })
+
+ mock_var_manager = MagicMock()
+ mock_var_manager._fact_cache = dict()
+ mock_var_manager.get_vars.return_value = dict()
+
+ p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager)
+
+ hosts = []
+ for i in range(0, 10):
+ host = MagicMock()
+ host.name = host.get_name.return_value = 'host%02d' % i
+ hosts.append(host)
+
+ inventory = MagicMock()
+ inventory.get_hosts.return_value = hosts
+ inventory.filter_hosts.return_value = hosts
+
+ play_context = PlayContext(play=p._entries[0])
+
+ itr = PlayIterator(
+ inventory=inventory,
+ play=p._entries[0],
+ play_context=play_context,
+ variable_manager=mock_var_manager,
+ all_vars=dict(),
+ )
+
+ # implicit meta: flush_handlers
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.action, 'meta')
+ self.assertEqual(task.args, dict(_raw_params='flush_handlers'))
+ # get the first task
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.action, 'debug')
+ self.assertEqual(task.args, dict(msg='this is the first task'))
+ # fail the host
+ itr.mark_host_failed(hosts[0])
+ # get the resuce task
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.action, 'debug')
+ self.assertEqual(task.args, dict(msg='this is the rescue task'))
+ # get the always task
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.action, 'debug')
+ self.assertEqual(task.args, dict(msg='this is the always task'))
+ # implicit meta: flush_handlers
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.action, 'meta')
+ self.assertEqual(task.args, dict(_raw_params='flush_handlers'))
+ # implicit meta: flush_handlers
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNotNone(task)
+ self.assertEqual(task.action, 'meta')
+ self.assertEqual(task.args, dict(_raw_params='flush_handlers'))
+ # end of iteration
+ (host_state, task) = itr.get_next_task_for_host(hosts[0])
+ self.assertIsNone(task)
+
+ def test_play_iterator_add_tasks(self):
+ fake_loader = DictDataLoader({
+ 'test_play.yml': """
+ - hosts: all
+ gather_facts: no
+ tasks:
+ - debug: msg="dummy task"
+ """,
+ })
+
+ mock_var_manager = MagicMock()
+ mock_var_manager._fact_cache = dict()
+ mock_var_manager.get_vars.return_value = dict()
+
+ p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager)
+
+ hosts = []
+ for i in range(0, 10):
+ host = MagicMock()
+ host.name = host.get_name.return_value = 'host%02d' % i
+ hosts.append(host)
+
+ inventory = MagicMock()
+ inventory.get_hosts.return_value = hosts
+ inventory.filter_hosts.return_value = hosts
+
+ play_context = PlayContext(play=p._entries[0])
+
+ itr = PlayIterator(
+ inventory=inventory,
+ play=p._entries[0],
+ play_context=play_context,
+ variable_manager=mock_var_manager,
+ all_vars=dict(),
+ )
+
+ # test the high-level add_tasks() method
+ s = HostState(blocks=[0, 1, 2])
+ itr._insert_tasks_into_state = MagicMock(return_value=s)
+ itr.add_tasks(hosts[0], [MagicMock(), MagicMock(), MagicMock()])
+ self.assertEqual(itr._host_states[hosts[0].name], s)
+
+ # now actually test the lower-level method that does the work
+ itr = PlayIterator(
+ inventory=inventory,
+ play=p._entries[0],
+ play_context=play_context,
+ variable_manager=mock_var_manager,
+ all_vars=dict(),
+ )
+
+ # iterate past first task
+ _, task = itr.get_next_task_for_host(hosts[0])
+ while(task and task.action != 'debug'):
+ _, task = itr.get_next_task_for_host(hosts[0])
+
+ if task is None:
+ raise Exception("iterated past end of play while looking for place to insert tasks")
+
+ # get the current host state and copy it so we can mutate it
+ s = itr.get_host_state(hosts[0])
+ s_copy = s.copy()
+
+ # assert with an empty task list, or if we're in a failed state, we simply return the state as-is
+ res_state = itr._insert_tasks_into_state(s_copy, task_list=[])
+ self.assertEqual(res_state, s_copy)
+
+ s_copy.fail_state = itr.FAILED_TASKS
+ res_state = itr._insert_tasks_into_state(s_copy, task_list=[MagicMock()])
+ self.assertEqual(res_state, s_copy)
+
+ # but if we've failed with a rescue/always block
+ mock_task = MagicMock()
+ s_copy.run_state = itr.ITERATING_RESCUE
+ res_state = itr._insert_tasks_into_state(s_copy, task_list=[mock_task])
+ self.assertEqual(res_state, s_copy)
+ self.assertIn(mock_task, res_state._blocks[res_state.cur_block].rescue)
+ itr._host_states[hosts[0].name] = res_state
+ (next_state, next_task) = itr.get_next_task_for_host(hosts[0], peek=True)
+ self.assertEqual(next_task, mock_task)
+ itr._host_states[hosts[0].name] = s
+
+ # test a regular insertion
+ s_copy = s.copy()
+ res_state = itr._insert_tasks_into_state(s_copy, task_list=[MagicMock()])
diff --git a/test/units/executor/test_playbook_executor.py b/test/units/executor/test_playbook_executor.py
new file mode 100644
index 00000000..529eda36
--- /dev/null
+++ b/test/units/executor/test_playbook_executor.py
@@ -0,0 +1,148 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest
+from units.compat.mock import MagicMock
+
+from ansible.executor.playbook_executor import PlaybookExecutor
+from ansible.playbook import Playbook
+from ansible.template import Templar
+from ansible.utils import context_objects as co
+
+from units.mock.loader import DictDataLoader
+
+
+class TestPlaybookExecutor(unittest.TestCase):
+
+ def setUp(self):
+ # Reset command line args for every test
+ co.GlobalCLIArgs._Singleton__instance = None
+
+ def tearDown(self):
+ # And cleanup after ourselves too
+ co.GlobalCLIArgs._Singleton__instance = None
+
+ def test_get_serialized_batches(self):
+ fake_loader = DictDataLoader({
+ 'no_serial.yml': '''
+ - hosts: all
+ gather_facts: no
+ tasks:
+ - debug: var=inventory_hostname
+ ''',
+ 'serial_int.yml': '''
+ - hosts: all
+ gather_facts: no
+ serial: 2
+ tasks:
+ - debug: var=inventory_hostname
+ ''',
+ 'serial_pct.yml': '''
+ - hosts: all
+ gather_facts: no
+ serial: 20%
+ tasks:
+ - debug: var=inventory_hostname
+ ''',
+ 'serial_list.yml': '''
+ - hosts: all
+ gather_facts: no
+ serial: [1, 2, 3]
+ tasks:
+ - debug: var=inventory_hostname
+ ''',
+ 'serial_list_mixed.yml': '''
+ - hosts: all
+ gather_facts: no
+ serial: [1, "20%", -1]
+ tasks:
+ - debug: var=inventory_hostname
+ ''',
+ })
+
+ mock_inventory = MagicMock()
+ mock_var_manager = MagicMock()
+
+ templar = Templar(loader=fake_loader)
+
+ pbe = PlaybookExecutor(
+ playbooks=['no_serial.yml', 'serial_int.yml', 'serial_pct.yml', 'serial_list.yml', 'serial_list_mixed.yml'],
+ inventory=mock_inventory,
+ variable_manager=mock_var_manager,
+ loader=fake_loader,
+ passwords=[],
+ )
+
+ playbook = Playbook.load(pbe._playbooks[0], variable_manager=mock_var_manager, loader=fake_loader)
+ play = playbook.get_plays()[0]
+ play.post_validate(templar)
+ mock_inventory.get_hosts.return_value = ['host0', 'host1', 'host2', 'host3', 'host4', 'host5', 'host6', 'host7', 'host8', 'host9']
+ self.assertEqual(pbe._get_serialized_batches(play), [['host0', 'host1', 'host2', 'host3', 'host4', 'host5', 'host6', 'host7', 'host8', 'host9']])
+
+ playbook = Playbook.load(pbe._playbooks[1], variable_manager=mock_var_manager, loader=fake_loader)
+ play = playbook.get_plays()[0]
+ play.post_validate(templar)
+ mock_inventory.get_hosts.return_value = ['host0', 'host1', 'host2', 'host3', 'host4', 'host5', 'host6', 'host7', 'host8', 'host9']
+ self.assertEqual(
+ pbe._get_serialized_batches(play),
+ [['host0', 'host1'], ['host2', 'host3'], ['host4', 'host5'], ['host6', 'host7'], ['host8', 'host9']]
+ )
+
+ playbook = Playbook.load(pbe._playbooks[2], variable_manager=mock_var_manager, loader=fake_loader)
+ play = playbook.get_plays()[0]
+ play.post_validate(templar)
+ mock_inventory.get_hosts.return_value = ['host0', 'host1', 'host2', 'host3', 'host4', 'host5', 'host6', 'host7', 'host8', 'host9']
+ self.assertEqual(
+ pbe._get_serialized_batches(play),
+ [['host0', 'host1'], ['host2', 'host3'], ['host4', 'host5'], ['host6', 'host7'], ['host8', 'host9']]
+ )
+
+ playbook = Playbook.load(pbe._playbooks[3], variable_manager=mock_var_manager, loader=fake_loader)
+ play = playbook.get_plays()[0]
+ play.post_validate(templar)
+ mock_inventory.get_hosts.return_value = ['host0', 'host1', 'host2', 'host3', 'host4', 'host5', 'host6', 'host7', 'host8', 'host9']
+ self.assertEqual(
+ pbe._get_serialized_batches(play),
+ [['host0'], ['host1', 'host2'], ['host3', 'host4', 'host5'], ['host6', 'host7', 'host8'], ['host9']]
+ )
+
+ playbook = Playbook.load(pbe._playbooks[4], variable_manager=mock_var_manager, loader=fake_loader)
+ play = playbook.get_plays()[0]
+ play.post_validate(templar)
+ mock_inventory.get_hosts.return_value = ['host0', 'host1', 'host2', 'host3', 'host4', 'host5', 'host6', 'host7', 'host8', 'host9']
+ self.assertEqual(pbe._get_serialized_batches(play), [['host0'], ['host1', 'host2'], ['host3', 'host4', 'host5', 'host6', 'host7', 'host8', 'host9']])
+
+ # Test when serial percent is under 1.0
+ playbook = Playbook.load(pbe._playbooks[2], variable_manager=mock_var_manager, loader=fake_loader)
+ play = playbook.get_plays()[0]
+ play.post_validate(templar)
+ mock_inventory.get_hosts.return_value = ['host0', 'host1', 'host2']
+ self.assertEqual(pbe._get_serialized_batches(play), [['host0'], ['host1'], ['host2']])
+
+ # Test when there is a remainder for serial as a percent
+ playbook = Playbook.load(pbe._playbooks[2], variable_manager=mock_var_manager, loader=fake_loader)
+ play = playbook.get_plays()[0]
+ play.post_validate(templar)
+ mock_inventory.get_hosts.return_value = ['host0', 'host1', 'host2', 'host3', 'host4', 'host5', 'host6', 'host7', 'host8', 'host9', 'host10']
+ self.assertEqual(
+ pbe._get_serialized_batches(play),
+ [['host0', 'host1'], ['host2', 'host3'], ['host4', 'host5'], ['host6', 'host7'], ['host8', 'host9'], ['host10']]
+ )
diff --git a/test/units/executor/test_task_executor.py b/test/units/executor/test_task_executor.py
new file mode 100644
index 00000000..7d9d711f
--- /dev/null
+++ b/test/units/executor/test_task_executor.py
@@ -0,0 +1,656 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import mock
+
+from units.compat import unittest
+from units.compat.mock import patch, MagicMock
+from ansible.errors import AnsibleError
+from ansible.executor.task_executor import TaskExecutor, remove_omit
+from ansible.plugins.loader import action_loader, lookup_loader
+from ansible.parsing.yaml.objects import AnsibleUnicode
+from ansible.utils.unsafe_proxy import AnsibleUnsafeText, AnsibleUnsafeBytes
+from ansible.module_utils.six import text_type
+
+from units.mock.loader import DictDataLoader
+
+
+class TestTaskExecutor(unittest.TestCase):
+
+ def test_task_executor_init(self):
+ fake_loader = DictDataLoader({})
+ mock_host = MagicMock()
+ mock_task = MagicMock()
+ mock_play_context = MagicMock()
+ mock_shared_loader = MagicMock()
+ new_stdin = None
+ job_vars = dict()
+ mock_queue = MagicMock()
+ te = TaskExecutor(
+ host=mock_host,
+ task=mock_task,
+ job_vars=job_vars,
+ play_context=mock_play_context,
+ new_stdin=new_stdin,
+ loader=fake_loader,
+ shared_loader_obj=mock_shared_loader,
+ final_q=mock_queue,
+ )
+
+ def test_task_executor_run(self):
+ fake_loader = DictDataLoader({})
+
+ mock_host = MagicMock()
+
+ mock_task = MagicMock()
+ mock_task._role._role_path = '/path/to/role/foo'
+
+ mock_play_context = MagicMock()
+
+ mock_shared_loader = MagicMock()
+ mock_queue = MagicMock()
+
+ new_stdin = None
+ job_vars = dict()
+
+ te = TaskExecutor(
+ host=mock_host,
+ task=mock_task,
+ job_vars=job_vars,
+ play_context=mock_play_context,
+ new_stdin=new_stdin,
+ loader=fake_loader,
+ shared_loader_obj=mock_shared_loader,
+ final_q=mock_queue,
+ )
+
+ te._get_loop_items = MagicMock(return_value=None)
+ te._execute = MagicMock(return_value=dict())
+ res = te.run()
+
+ te._get_loop_items = MagicMock(return_value=[])
+ res = te.run()
+
+ te._get_loop_items = MagicMock(return_value=['a', 'b', 'c'])
+ te._run_loop = MagicMock(return_value=[dict(item='a', changed=True), dict(item='b', failed=True), dict(item='c')])
+ res = te.run()
+
+ te._get_loop_items = MagicMock(side_effect=AnsibleError(""))
+ res = te.run()
+ self.assertIn("failed", res)
+
+ def test_task_executor_run_clean_res(self):
+ te = TaskExecutor(None, MagicMock(), None, None, None, None, None, None)
+ te._get_loop_items = MagicMock(return_value=[1])
+ te._run_loop = MagicMock(
+ return_value=[
+ {
+ 'unsafe_bytes': AnsibleUnsafeBytes(b'{{ $bar }}'),
+ 'unsafe_text': AnsibleUnsafeText(u'{{ $bar }}'),
+ 'bytes': b'bytes',
+ 'text': u'text',
+ 'int': 1,
+ }
+ ]
+ )
+ res = te.run()
+ data = res['results'][0]
+ self.assertIsInstance(data['unsafe_bytes'], AnsibleUnsafeText)
+ self.assertIsInstance(data['unsafe_text'], AnsibleUnsafeText)
+ self.assertIsInstance(data['bytes'], text_type)
+ self.assertIsInstance(data['text'], text_type)
+ self.assertIsInstance(data['int'], int)
+
+ def test_task_executor_get_loop_items(self):
+ fake_loader = DictDataLoader({})
+
+ mock_host = MagicMock()
+
+ mock_task = MagicMock()
+ mock_task.loop_with = 'items'
+ mock_task.loop = ['a', 'b', 'c']
+
+ mock_play_context = MagicMock()
+
+ mock_shared_loader = MagicMock()
+ mock_shared_loader.lookup_loader = lookup_loader
+
+ new_stdin = None
+ job_vars = dict()
+ mock_queue = MagicMock()
+
+ te = TaskExecutor(
+ host=mock_host,
+ task=mock_task,
+ job_vars=job_vars,
+ play_context=mock_play_context,
+ new_stdin=new_stdin,
+ loader=fake_loader,
+ shared_loader_obj=mock_shared_loader,
+ final_q=mock_queue,
+ )
+
+ items = te._get_loop_items()
+ self.assertEqual(items, ['a', 'b', 'c'])
+
+ def test_task_executor_run_loop(self):
+ items = ['a', 'b', 'c']
+
+ fake_loader = DictDataLoader({})
+
+ mock_host = MagicMock()
+
+ def _copy(exclude_parent=False, exclude_tasks=False):
+ new_item = MagicMock()
+ return new_item
+
+ mock_task = MagicMock()
+ mock_task.copy.side_effect = _copy
+
+ mock_play_context = MagicMock()
+
+ mock_shared_loader = MagicMock()
+ mock_queue = MagicMock()
+
+ new_stdin = None
+ job_vars = dict()
+
+ te = TaskExecutor(
+ host=mock_host,
+ task=mock_task,
+ job_vars=job_vars,
+ play_context=mock_play_context,
+ new_stdin=new_stdin,
+ loader=fake_loader,
+ shared_loader_obj=mock_shared_loader,
+ final_q=mock_queue,
+ )
+
+ def _execute(variables):
+ return dict(item=variables.get('item'))
+
+ te._squash_items = MagicMock(return_value=items)
+ te._execute = MagicMock(side_effect=_execute)
+
+ res = te._run_loop(items)
+ self.assertEqual(len(res), 3)
+
+ def test_task_executor_squash_items(self):
+ items = ['a', 'b', 'c']
+
+ fake_loader = DictDataLoader({})
+
+ mock_host = MagicMock()
+
+ loop_var = 'item'
+
+ def _evaluate_conditional(templar, variables):
+ item = variables.get(loop_var)
+ if item == 'b':
+ return False
+ return True
+
+ mock_task = MagicMock()
+ mock_task.evaluate_conditional.side_effect = _evaluate_conditional
+
+ mock_play_context = MagicMock()
+
+ mock_shared_loader = None
+ mock_queue = MagicMock()
+
+ new_stdin = None
+ job_vars = dict(pkg_mgr='yum')
+
+ te = TaskExecutor(
+ host=mock_host,
+ task=mock_task,
+ job_vars=job_vars,
+ play_context=mock_play_context,
+ new_stdin=new_stdin,
+ loader=fake_loader,
+ shared_loader_obj=mock_shared_loader,
+ final_q=mock_queue,
+ )
+
+ # No replacement
+ mock_task.action = 'yum'
+ new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
+ self.assertEqual(new_items, ['a', 'b', 'c'])
+ self.assertIsInstance(mock_task.args, MagicMock)
+
+ mock_task.action = 'foo'
+ mock_task.args = {'name': '{{item}}'}
+ new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
+ self.assertEqual(new_items, ['a', 'b', 'c'])
+ self.assertEqual(mock_task.args, {'name': '{{item}}'})
+
+ mock_task.action = 'yum'
+ mock_task.args = {'name': 'static'}
+ new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
+ self.assertEqual(new_items, ['a', 'b', 'c'])
+ self.assertEqual(mock_task.args, {'name': 'static'})
+
+ mock_task.action = 'yum'
+ mock_task.args = {'name': '{{pkg_mgr}}'}
+ new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
+ self.assertEqual(new_items, ['a', 'b', 'c'])
+ self.assertEqual(mock_task.args, {'name': '{{pkg_mgr}}'})
+
+ mock_task.action = '{{unknown}}'
+ mock_task.args = {'name': '{{item}}'}
+ new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
+ self.assertEqual(new_items, ['a', 'b', 'c'])
+ self.assertEqual(mock_task.args, {'name': '{{item}}'})
+
+ # Could do something like this to recover from bad deps in a package
+ job_vars = dict(pkg_mgr='yum', packages=['a', 'b'])
+ items = ['absent', 'latest']
+ mock_task.action = 'yum'
+ mock_task.args = {'name': '{{ packages }}', 'state': '{{ item }}'}
+ new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
+ self.assertEqual(new_items, items)
+ self.assertEqual(mock_task.args, {'name': '{{ packages }}', 'state': '{{ item }}'})
+
+ # Maybe should raise an error in this case. The user would have to specify:
+ # - yum: name="{{ packages[item] }}"
+ # with_items:
+ # - ['a', 'b']
+ # - ['foo', 'bar']
+ # you can't use a list as a dict key so that would probably throw
+ # an error later. If so, we can throw it now instead.
+ # Squashing in this case would not be intuitive as the user is being
+ # explicit in using each list entry as a key.
+ job_vars = dict(pkg_mgr='yum', packages={"a": "foo", "b": "bar", "foo": "baz", "bar": "quux"})
+ items = [['a', 'b'], ['foo', 'bar']]
+ mock_task.action = 'yum'
+ mock_task.args = {'name': '{{ packages[item] }}'}
+ new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
+ self.assertEqual(new_items, items)
+ self.assertEqual(mock_task.args, {'name': '{{ packages[item] }}'})
+
+ # Replaces
+ items = ['a', 'b', 'c']
+ mock_task.action = 'yum'
+ mock_task.args = {'name': '{{item}}'}
+ new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
+ self.assertEqual(new_items, [['a', 'c']])
+ self.assertEqual(mock_task.args, {'name': ['a', 'c']})
+
+ mock_task.action = '{{pkg_mgr}}'
+ mock_task.args = {'name': '{{item}}'}
+ new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
+ self.assertEqual(new_items, [['a', 'c']])
+ self.assertEqual(mock_task.args, {'name': ['a', 'c']})
+
+ # New loop_var
+ mock_task.action = 'yum'
+ mock_task.args = {'name': '{{a_loop_var_item}}'}
+ mock_task.loop_control = {'loop_var': 'a_loop_var_item'}
+ loop_var = 'a_loop_var_item'
+ new_items = te._squash_items(items=items, loop_var='a_loop_var_item', variables=job_vars)
+ self.assertEqual(new_items, [['a', 'c']])
+ self.assertEqual(mock_task.args, {'name': ['a', 'c']})
+ loop_var = 'item'
+
+ #
+ # These are presently not optimized but could be in the future.
+ # Expected output if they were optimized is given as a comment
+ # Please move these to a different section if they are optimized
+ #
+
+ # Squashing lists
+ job_vars = dict(pkg_mgr='yum')
+ items = [['a', 'b'], ['foo', 'bar']]
+ mock_task.action = 'yum'
+ mock_task.args = {'name': '{{ item }}'}
+ new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
+ # self.assertEqual(new_items, [['a', 'b', 'foo', 'bar']])
+ # self.assertEqual(mock_task.args, {'name': ['a', 'b', 'foo', 'bar']})
+ self.assertEqual(new_items, items)
+ self.assertEqual(mock_task.args, {'name': '{{ item }}'})
+
+ # Retrieving from a dict
+ items = ['a', 'b', 'foo']
+ mock_task.action = 'yum'
+ mock_task.args = {'name': '{{ packages[item] }}'}
+ new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
+ # self.assertEqual(new_items, [['foo', 'baz']])
+ # self.assertEqual(mock_task.args, {'name': ['foo', 'baz']})
+ self.assertEqual(new_items, items)
+ self.assertEqual(mock_task.args, {'name': '{{ packages[item] }}'})
+
+ # Another way to retrieve from a dict
+ job_vars = dict(pkg_mgr='yum')
+ items = [{'package': 'foo'}, {'package': 'bar'}]
+ mock_task.action = 'yum'
+ mock_task.args = {'name': '{{ item["package"] }}'}
+ new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
+ # self.assertEqual(new_items, [['foo', 'bar']])
+ # self.assertEqual(mock_task.args, {'name': ['foo', 'bar']})
+ self.assertEqual(new_items, items)
+ self.assertEqual(mock_task.args, {'name': '{{ item["package"] }}'})
+
+ items = [
+ dict(name='a', state='present'),
+ dict(name='b', state='present'),
+ dict(name='c', state='present'),
+ ]
+ mock_task.action = 'yum'
+ mock_task.args = {'name': '{{item.name}}', 'state': '{{item.state}}'}
+ new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
+ # self.assertEqual(new_items, [dict(name=['a', 'b', 'c'], state='present')])
+ # self.assertEqual(mock_task.args, {'name': ['a', 'b', 'c'], 'state': 'present'})
+ self.assertEqual(new_items, items)
+ self.assertEqual(mock_task.args, {'name': '{{item.name}}', 'state': '{{item.state}}'})
+
+ items = [
+ dict(name='a', state='present'),
+ dict(name='b', state='present'),
+ dict(name='c', state='absent'),
+ ]
+ mock_task.action = 'yum'
+ mock_task.args = {'name': '{{item.name}}', 'state': '{{item.state}}'}
+ new_items = te._squash_items(items=items, loop_var='item', variables=job_vars)
+ # self.assertEqual(new_items, [dict(name=['a', 'b'], state='present'),
+ # dict(name='c', state='absent')])
+ # self.assertEqual(mock_task.args, {'name': '{{item.name}}', 'state': '{{item.state}}'})
+ self.assertEqual(new_items, items)
+ self.assertEqual(mock_task.args, {'name': '{{item.name}}', 'state': '{{item.state}}'})
+
+ def test_task_executor_get_action_handler(self):
+ te = TaskExecutor(
+ host=MagicMock(),
+ task=MagicMock(),
+ job_vars={},
+ play_context=MagicMock(),
+ new_stdin=None,
+ loader=DictDataLoader({}),
+ shared_loader_obj=MagicMock(),
+ final_q=MagicMock(),
+ )
+
+ action_loader = te._shared_loader_obj.action_loader
+ action_loader.has_plugin.return_value = True
+ action_loader.get.return_value = mock.sentinel.handler
+
+ mock_connection = MagicMock()
+ mock_templar = MagicMock()
+ action = 'namespace.prefix_suffix'
+ te._task.action = action
+
+ handler = te._get_action_handler(mock_connection, mock_templar)
+
+ self.assertIs(mock.sentinel.handler, handler)
+
+ action_loader.has_plugin.assert_called_once_with(
+ action, collection_list=te._task.collections)
+
+ action_loader.get.assert_called_once_with(
+ te._task.action, task=te._task, connection=mock_connection,
+ play_context=te._play_context, loader=te._loader,
+ templar=mock_templar, shared_loader_obj=te._shared_loader_obj,
+ collection_list=te._task.collections)
+
+ def test_task_executor_get_handler_prefix(self):
+ te = TaskExecutor(
+ host=MagicMock(),
+ task=MagicMock(),
+ job_vars={},
+ play_context=MagicMock(),
+ new_stdin=None,
+ loader=DictDataLoader({}),
+ shared_loader_obj=MagicMock(),
+ final_q=MagicMock(),
+ )
+
+ action_loader = te._shared_loader_obj.action_loader
+ action_loader.has_plugin.side_effect = [False, True]
+ action_loader.get.return_value = mock.sentinel.handler
+ action_loader.__contains__.return_value = True
+
+ mock_connection = MagicMock()
+ mock_templar = MagicMock()
+ action = 'namespace.netconf_suffix'
+ module_prefix = action.split('_')[0]
+ te._task.action = action
+
+ handler = te._get_action_handler(mock_connection, mock_templar)
+
+ self.assertIs(mock.sentinel.handler, handler)
+ action_loader.has_plugin.assert_has_calls([mock.call(action, collection_list=te._task.collections),
+ mock.call(module_prefix, collection_list=te._task.collections)])
+
+ action_loader.get.assert_called_once_with(
+ module_prefix, task=te._task, connection=mock_connection,
+ play_context=te._play_context, loader=te._loader,
+ templar=mock_templar, shared_loader_obj=te._shared_loader_obj,
+ collection_list=te._task.collections)
+
+ def test_task_executor_get_handler_normal(self):
+ te = TaskExecutor(
+ host=MagicMock(),
+ task=MagicMock(),
+ job_vars={},
+ play_context=MagicMock(),
+ new_stdin=None,
+ loader=DictDataLoader({}),
+ shared_loader_obj=MagicMock(),
+ final_q=MagicMock(),
+ )
+
+ action_loader = te._shared_loader_obj.action_loader
+ action_loader.has_plugin.return_value = False
+ action_loader.get.return_value = mock.sentinel.handler
+ action_loader.__contains__.return_value = False
+
+ mock_connection = MagicMock()
+ mock_templar = MagicMock()
+ action = 'namespace.prefix_suffix'
+ module_prefix = action.split('_')[0]
+ te._task.action = action
+ handler = te._get_action_handler(mock_connection, mock_templar)
+
+ self.assertIs(mock.sentinel.handler, handler)
+
+ action_loader.has_plugin.assert_has_calls([mock.call(action, collection_list=te._task.collections),
+ mock.call(module_prefix, collection_list=te._task.collections)])
+
+ action_loader.get.assert_called_once_with(
+ 'ansible.legacy.normal', task=te._task, connection=mock_connection,
+ play_context=te._play_context, loader=te._loader,
+ templar=mock_templar, shared_loader_obj=te._shared_loader_obj,
+ collection_list=None)
+
+ def test_task_executor_execute(self):
+ fake_loader = DictDataLoader({})
+
+ mock_host = MagicMock()
+
+ mock_task = MagicMock()
+ mock_task.args = dict()
+ mock_task.retries = 0
+ mock_task.delay = -1
+ mock_task.register = 'foo'
+ mock_task.until = None
+ mock_task.changed_when = None
+ mock_task.failed_when = None
+ mock_task.post_validate.return_value = None
+ # mock_task.async_val cannot be left unset, because on Python 3 MagicMock()
+ # > 0 raises a TypeError There are two reasons for using the value 1
+ # here: on Python 2 comparing MagicMock() > 0 returns True, and the
+ # other reason is that if I specify 0 here, the test fails. ;)
+ mock_task.async_val = 1
+ mock_task.poll = 0
+
+ mock_play_context = MagicMock()
+ mock_play_context.post_validate.return_value = None
+ mock_play_context.update_vars.return_value = None
+
+ mock_connection = MagicMock()
+ mock_connection.set_host_overrides.return_value = None
+ mock_connection._connect.return_value = None
+
+ mock_action = MagicMock()
+ mock_queue = MagicMock()
+
+ shared_loader = None
+ new_stdin = None
+ job_vars = dict(omit="XXXXXXXXXXXXXXXXXXX")
+
+ te = TaskExecutor(
+ host=mock_host,
+ task=mock_task,
+ job_vars=job_vars,
+ play_context=mock_play_context,
+ new_stdin=new_stdin,
+ loader=fake_loader,
+ shared_loader_obj=shared_loader,
+ final_q=mock_queue,
+ )
+
+ te._get_connection = MagicMock(return_value=mock_connection)
+ te._get_action_handler = MagicMock(return_value=mock_action)
+
+ mock_action.run.return_value = dict(ansible_facts=dict())
+ res = te._execute()
+
+ mock_task.changed_when = MagicMock(return_value=AnsibleUnicode("1 == 1"))
+ res = te._execute()
+
+ mock_task.changed_when = None
+ mock_task.failed_when = MagicMock(return_value=AnsibleUnicode("1 == 1"))
+ res = te._execute()
+
+ mock_task.failed_when = None
+ mock_task.evaluate_conditional.return_value = False
+ res = te._execute()
+
+ mock_task.evaluate_conditional.return_value = True
+ mock_task.args = dict(_raw_params='foo.yml', a='foo', b='bar')
+ mock_task.action = 'include'
+ res = te._execute()
+
+ def test_task_executor_poll_async_result(self):
+ fake_loader = DictDataLoader({})
+
+ mock_host = MagicMock()
+
+ mock_task = MagicMock()
+ mock_task.async_val = 0.1
+ mock_task.poll = 0.05
+
+ mock_play_context = MagicMock()
+
+ mock_connection = MagicMock()
+
+ mock_action = MagicMock()
+ mock_queue = MagicMock()
+
+ shared_loader = MagicMock()
+ shared_loader.action_loader = action_loader
+
+ new_stdin = None
+ job_vars = dict(omit="XXXXXXXXXXXXXXXXXXX")
+
+ te = TaskExecutor(
+ host=mock_host,
+ task=mock_task,
+ job_vars=job_vars,
+ play_context=mock_play_context,
+ new_stdin=new_stdin,
+ loader=fake_loader,
+ shared_loader_obj=shared_loader,
+ final_q=mock_queue,
+ )
+
+ te._connection = MagicMock()
+
+ def _get(*args, **kwargs):
+ mock_action = MagicMock()
+ mock_action.run.return_value = dict(stdout='')
+ return mock_action
+
+ # testing with some bad values in the result passed to poll async,
+ # and with a bad value returned from the mock action
+ with patch.object(action_loader, 'get', _get):
+ mock_templar = MagicMock()
+ res = te._poll_async_result(result=dict(), templar=mock_templar)
+ self.assertIn('failed', res)
+ res = te._poll_async_result(result=dict(ansible_job_id=1), templar=mock_templar)
+ self.assertIn('failed', res)
+
+ def _get(*args, **kwargs):
+ mock_action = MagicMock()
+ mock_action.run.return_value = dict(finished=1)
+ return mock_action
+
+ # now testing with good values
+ with patch.object(action_loader, 'get', _get):
+ mock_templar = MagicMock()
+ res = te._poll_async_result(result=dict(ansible_job_id=1), templar=mock_templar)
+ self.assertEqual(res, dict(finished=1))
+
+ def test_recursive_remove_omit(self):
+ omit_token = 'POPCORN'
+
+ data = {
+ 'foo': 'bar',
+ 'baz': 1,
+ 'qux': ['one', 'two', 'three'],
+ 'subdict': {
+ 'remove': 'POPCORN',
+ 'keep': 'not_popcorn',
+ 'subsubdict': {
+ 'remove': 'POPCORN',
+ 'keep': 'not_popcorn',
+ },
+ 'a_list': ['POPCORN'],
+ },
+ 'a_list': ['POPCORN'],
+ 'list_of_lists': [
+ ['some', 'thing'],
+ ],
+ 'list_of_dicts': [
+ {
+ 'remove': 'POPCORN',
+ }
+ ],
+ }
+
+ expected = {
+ 'foo': 'bar',
+ 'baz': 1,
+ 'qux': ['one', 'two', 'three'],
+ 'subdict': {
+ 'keep': 'not_popcorn',
+ 'subsubdict': {
+ 'keep': 'not_popcorn',
+ },
+ 'a_list': ['POPCORN'],
+ },
+ 'a_list': ['POPCORN'],
+ 'list_of_lists': [
+ ['some', 'thing'],
+ ],
+ 'list_of_dicts': [{}],
+ }
+
+ self.assertEqual(remove_omit(data, omit_token), expected)
diff --git a/test/units/executor/test_task_queue_manager_callbacks.py b/test/units/executor/test_task_queue_manager_callbacks.py
new file mode 100644
index 00000000..6c0ceee0
--- /dev/null
+++ b/test/units/executor/test_task_queue_manager_callbacks.py
@@ -0,0 +1,121 @@
+# (c) 2016, Steve Kuznetsov <skuznets@redhat.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+
+from units.compat import unittest
+from units.compat.mock import MagicMock
+
+from ansible.executor.task_queue_manager import TaskQueueManager
+from ansible.playbook import Playbook
+from ansible.plugins.callback import CallbackBase
+from ansible.utils import context_objects as co
+
+__metaclass__ = type
+
+
+class TestTaskQueueManagerCallbacks(unittest.TestCase):
+ def setUp(self):
+ inventory = MagicMock()
+ variable_manager = MagicMock()
+ loader = MagicMock()
+ passwords = []
+
+ # Reset the stored command line args
+ co.GlobalCLIArgs._Singleton__instance = None
+ self._tqm = TaskQueueManager(inventory, variable_manager, loader, passwords)
+ self._playbook = Playbook(loader)
+
+ # we use a MagicMock to register the result of the call we
+ # expect to `v2_playbook_on_call`. We don't mock out the
+ # method since we're testing code that uses `inspect` to
+ # look at that method's argspec and we want to ensure this
+ # test is easy to reason about.
+ self._register = MagicMock()
+
+ def tearDown(self):
+ # Reset the stored command line args
+ co.GlobalCLIArgs._Singleton__instance = None
+
+ def test_task_queue_manager_callbacks_v2_playbook_on_start(self):
+ """
+ Assert that no exceptions are raised when sending a Playbook
+ start callback to a current callback module plugin.
+ """
+ register = self._register
+
+ class CallbackModule(CallbackBase):
+ """
+ This is a callback module with the current
+ method signature for `v2_playbook_on_start`.
+ """
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'current_module'
+
+ def v2_playbook_on_start(self, playbook):
+ register(self, playbook)
+
+ callback_module = CallbackModule()
+ self._tqm._callback_plugins.append(callback_module)
+ self._tqm.send_callback('v2_playbook_on_start', self._playbook)
+ register.assert_called_once_with(callback_module, self._playbook)
+
+ def test_task_queue_manager_callbacks_v2_playbook_on_start_wrapped(self):
+ """
+ Assert that no exceptions are raised when sending a Playbook
+ start callback to a wrapped current callback module plugin.
+ """
+ register = self._register
+
+ def wrap_callback(func):
+ """
+ This wrapper changes the exposed argument
+ names for a method from the original names
+ to (*args, **kwargs). This is used in order
+ to validate that wrappers which change par-
+ ameter names do not break the TQM callback
+ system.
+
+ :param func: function to decorate
+ :return: decorated function
+ """
+
+ def wrapper(*args, **kwargs):
+ return func(*args, **kwargs)
+
+ return wrapper
+
+ class WrappedCallbackModule(CallbackBase):
+ """
+ This is a callback module with the current
+ method signature for `v2_playbook_on_start`
+ wrapped in order to change the signature.
+ """
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'current_module'
+
+ @wrap_callback
+ def v2_playbook_on_start(self, playbook):
+ register(self, playbook)
+
+ callback_module = WrappedCallbackModule()
+ self._tqm._callback_plugins.append(callback_module)
+ self._tqm.send_callback('v2_playbook_on_start', self._playbook)
+ register.assert_called_once_with(callback_module, self._playbook)
diff --git a/test/units/executor/test_task_result.py b/test/units/executor/test_task_result.py
new file mode 100644
index 00000000..3ce210de
--- /dev/null
+++ b/test/units/executor/test_task_result.py
@@ -0,0 +1,171 @@
+# (c) 2016, James Cammarata <jimi@sngx.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest
+from units.compat.mock import patch, MagicMock
+
+from ansible.executor.task_result import TaskResult
+
+
+class TestTaskResult(unittest.TestCase):
+ def test_task_result_basic(self):
+ mock_host = MagicMock()
+ mock_task = MagicMock()
+
+ # test loading a result with a dict
+ tr = TaskResult(mock_host, mock_task, dict())
+
+ # test loading a result with a JSON string
+ with patch('ansible.parsing.dataloader.DataLoader.load') as p:
+ tr = TaskResult(mock_host, mock_task, '{}')
+
+ def test_task_result_is_changed(self):
+ mock_host = MagicMock()
+ mock_task = MagicMock()
+
+ # test with no changed in result
+ tr = TaskResult(mock_host, mock_task, dict())
+ self.assertFalse(tr.is_changed())
+
+ # test with changed in the result
+ tr = TaskResult(mock_host, mock_task, dict(changed=True))
+ self.assertTrue(tr.is_changed())
+
+ # test with multiple results but none changed
+ mock_task.loop = 'foo'
+ tr = TaskResult(mock_host, mock_task, dict(results=[dict(foo='bar'), dict(bam='baz'), True]))
+ self.assertFalse(tr.is_changed())
+
+ # test with multiple results and one changed
+ mock_task.loop = 'foo'
+ tr = TaskResult(mock_host, mock_task, dict(results=[dict(changed=False), dict(changed=True), dict(some_key=False)]))
+ self.assertTrue(tr.is_changed())
+
+ def test_task_result_is_skipped(self):
+ mock_host = MagicMock()
+ mock_task = MagicMock()
+
+ # test with no skipped in result
+ tr = TaskResult(mock_host, mock_task, dict())
+ self.assertFalse(tr.is_skipped())
+
+ # test with skipped in the result
+ tr = TaskResult(mock_host, mock_task, dict(skipped=True))
+ self.assertTrue(tr.is_skipped())
+
+ # test with multiple results but none skipped
+ mock_task.loop = 'foo'
+ tr = TaskResult(mock_host, mock_task, dict(results=[dict(foo='bar'), dict(bam='baz'), True]))
+ self.assertFalse(tr.is_skipped())
+
+ # test with multiple results and one skipped
+ mock_task.loop = 'foo'
+ tr = TaskResult(mock_host, mock_task, dict(results=[dict(skipped=False), dict(skipped=True), dict(some_key=False)]))
+ self.assertFalse(tr.is_skipped())
+
+ # test with multiple results and all skipped
+ mock_task.loop = 'foo'
+ tr = TaskResult(mock_host, mock_task, dict(results=[dict(skipped=True), dict(skipped=True), dict(skipped=True)]))
+ self.assertTrue(tr.is_skipped())
+
+ # test with multiple squashed results (list of strings)
+ # first with the main result having skipped=False
+ mock_task.loop = 'foo'
+ tr = TaskResult(mock_host, mock_task, dict(results=["a", "b", "c"], skipped=False))
+ self.assertFalse(tr.is_skipped())
+ # then with the main result having skipped=True
+ tr = TaskResult(mock_host, mock_task, dict(results=["a", "b", "c"], skipped=True))
+ self.assertTrue(tr.is_skipped())
+
+ def test_task_result_is_unreachable(self):
+ mock_host = MagicMock()
+ mock_task = MagicMock()
+
+ # test with no unreachable in result
+ tr = TaskResult(mock_host, mock_task, dict())
+ self.assertFalse(tr.is_unreachable())
+
+ # test with unreachable in the result
+ tr = TaskResult(mock_host, mock_task, dict(unreachable=True))
+ self.assertTrue(tr.is_unreachable())
+
+ # test with multiple results but none unreachable
+ mock_task.loop = 'foo'
+ tr = TaskResult(mock_host, mock_task, dict(results=[dict(foo='bar'), dict(bam='baz'), True]))
+ self.assertFalse(tr.is_unreachable())
+
+ # test with multiple results and one unreachable
+ mock_task.loop = 'foo'
+ tr = TaskResult(mock_host, mock_task, dict(results=[dict(unreachable=False), dict(unreachable=True), dict(some_key=False)]))
+ self.assertTrue(tr.is_unreachable())
+
+ def test_task_result_is_failed(self):
+ mock_host = MagicMock()
+ mock_task = MagicMock()
+
+ # test with no failed in result
+ tr = TaskResult(mock_host, mock_task, dict())
+ self.assertFalse(tr.is_failed())
+
+ # test failed result with rc values (should not matter)
+ tr = TaskResult(mock_host, mock_task, dict(rc=0))
+ self.assertFalse(tr.is_failed())
+ tr = TaskResult(mock_host, mock_task, dict(rc=1))
+ self.assertFalse(tr.is_failed())
+
+ # test with failed in result
+ tr = TaskResult(mock_host, mock_task, dict(failed=True))
+ self.assertTrue(tr.is_failed())
+
+ # test with failed_when in result
+ tr = TaskResult(mock_host, mock_task, dict(failed_when_result=True))
+ self.assertTrue(tr.is_failed())
+
+ def test_task_result_no_log(self):
+ mock_host = MagicMock()
+ mock_task = MagicMock()
+
+ # no_log should remove secrets
+ tr = TaskResult(mock_host, mock_task, dict(_ansible_no_log=True, secret='DONTSHOWME'))
+ clean = tr.clean_copy()
+ self.assertTrue('secret' not in clean._result)
+
+ def test_task_result_no_log_preserve(self):
+ mock_host = MagicMock()
+ mock_task = MagicMock()
+
+ # no_log should not remove presrved keys
+ tr = TaskResult(
+ mock_host,
+ mock_task,
+ dict(
+ _ansible_no_log=True,
+ retries=5,
+ attempts=5,
+ changed=False,
+ foo='bar',
+ )
+ )
+ clean = tr.clean_copy()
+ self.assertTrue('retries' in clean._result)
+ self.assertTrue('attempts' in clean._result)
+ self.assertTrue('changed' in clean._result)
+ self.assertTrue('foo' not in clean._result)
diff --git a/test/units/galaxy/__init__.py b/test/units/galaxy/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/galaxy/__init__.py
diff --git a/test/units/galaxy/test_api.py b/test/units/galaxy/test_api.py
new file mode 100644
index 00000000..f333a64b
--- /dev/null
+++ b/test/units/galaxy/test_api.py
@@ -0,0 +1,912 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import re
+import pytest
+import tarfile
+import tempfile
+import time
+
+from io import BytesIO, StringIO
+from units.compat.mock import MagicMock
+
+from ansible import context
+from ansible.errors import AnsibleError
+from ansible.galaxy import api as galaxy_api
+from ansible.galaxy.api import CollectionVersionMetadata, GalaxyAPI, GalaxyError
+from ansible.galaxy.token import BasicAuthToken, GalaxyToken, KeycloakToken
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.six.moves.urllib import error as urllib_error
+from ansible.utils import context_objects as co
+from ansible.utils.display import Display
+
+
+@pytest.fixture(autouse='function')
+def reset_cli_args():
+ co.GlobalCLIArgs._Singleton__instance = None
+ # Required to initialise the GalaxyAPI object
+ context.CLIARGS._store = {'ignore_certs': False}
+ yield
+ co.GlobalCLIArgs._Singleton__instance = None
+
+
+@pytest.fixture()
+def collection_artifact(tmp_path_factory):
+ ''' Creates a collection artifact tarball that is ready to be published '''
+ output_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβÅÈ Output'))
+
+ tar_path = os.path.join(output_dir, 'namespace-collection-v1.0.0.tar.gz')
+ with tarfile.open(tar_path, 'w:gz') as tfile:
+ b_io = BytesIO(b"\x00\x01\x02\x03")
+ tar_info = tarfile.TarInfo('test')
+ tar_info.size = 4
+ tar_info.mode = 0o0644
+ tfile.addfile(tarinfo=tar_info, fileobj=b_io)
+
+ yield tar_path
+
+
+def get_test_galaxy_api(url, version, token_ins=None, token_value=None):
+ token_value = token_value or "my token"
+ token_ins = token_ins or GalaxyToken(token_value)
+ api = GalaxyAPI(None, "test", url)
+ # Warning, this doesn't test g_connect() because _availabe_api_versions is set here. That means
+ # that urls for v2 servers have to append '/api/' themselves in the input data.
+ api._available_api_versions = {version: '%s' % version}
+ api.token = token_ins
+
+ return api
+
+
+def test_api_no_auth():
+ api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/")
+ actual = {}
+ api._add_auth_token(actual, "")
+ assert actual == {}
+
+
+def test_api_no_auth_but_required():
+ expected = "No access token or username set. A token can be set with --api-key or at "
+ with pytest.raises(AnsibleError, match=expected):
+ GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/")._add_auth_token({}, "", required=True)
+
+
+def test_api_token_auth():
+ token = GalaxyToken(token=u"my_token")
+ api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token)
+ actual = {}
+ api._add_auth_token(actual, "", required=True)
+ assert actual == {'Authorization': 'Token my_token'}
+
+
+def test_api_token_auth_with_token_type(monkeypatch):
+ token = KeycloakToken(auth_url='https://api.test/')
+ mock_token_get = MagicMock()
+ mock_token_get.return_value = 'my_token'
+ monkeypatch.setattr(token, 'get', mock_token_get)
+ api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token)
+ actual = {}
+ api._add_auth_token(actual, "", token_type="Bearer", required=True)
+ assert actual == {'Authorization': 'Bearer my_token'}
+
+
+def test_api_token_auth_with_v3_url(monkeypatch):
+ token = KeycloakToken(auth_url='https://api.test/')
+ mock_token_get = MagicMock()
+ mock_token_get.return_value = 'my_token'
+ monkeypatch.setattr(token, 'get', mock_token_get)
+ api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token)
+ actual = {}
+ api._add_auth_token(actual, "https://galaxy.ansible.com/api/v3/resource/name", required=True)
+ assert actual == {'Authorization': 'Bearer my_token'}
+
+
+def test_api_token_auth_with_v2_url():
+ token = GalaxyToken(token=u"my_token")
+ api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token)
+ actual = {}
+ # Add v3 to random part of URL but response should only see the v2 as the full URI path segment.
+ api._add_auth_token(actual, "https://galaxy.ansible.com/api/v2/resourcev3/name", required=True)
+ assert actual == {'Authorization': 'Token my_token'}
+
+
+def test_api_basic_auth_password():
+ token = BasicAuthToken(username=u"user", password=u"pass")
+ api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token)
+ actual = {}
+ api._add_auth_token(actual, "", required=True)
+ assert actual == {'Authorization': 'Basic dXNlcjpwYXNz'}
+
+
+def test_api_basic_auth_no_password():
+ token = BasicAuthToken(username=u"user")
+ api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token)
+ actual = {}
+ api._add_auth_token(actual, "", required=True)
+ assert actual == {'Authorization': 'Basic dXNlcjo='}
+
+
+def test_api_dont_override_auth_header():
+ api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/")
+ actual = {'Authorization': 'Custom token'}
+ api._add_auth_token(actual, "", required=True)
+ assert actual == {'Authorization': 'Custom token'}
+
+
+def test_initialise_galaxy(monkeypatch):
+ mock_open = MagicMock()
+ mock_open.side_effect = [
+ StringIO(u'{"available_versions":{"v1":"v1/"}}'),
+ StringIO(u'{"token":"my token"}'),
+ ]
+ monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
+
+ api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/")
+ actual = api.authenticate("github_token")
+
+ assert len(api.available_api_versions) == 2
+ assert api.available_api_versions['v1'] == u'v1/'
+ assert api.available_api_versions['v2'] == u'v2/'
+ assert actual == {u'token': u'my token'}
+ assert mock_open.call_count == 2
+ assert mock_open.mock_calls[0][1][0] == 'https://galaxy.ansible.com/api/'
+ assert 'ansible-galaxy' in mock_open.mock_calls[0][2]['http_agent']
+ assert mock_open.mock_calls[1][1][0] == 'https://galaxy.ansible.com/api/v1/tokens/'
+ assert 'ansible-galaxy' in mock_open.mock_calls[1][2]['http_agent']
+ assert mock_open.mock_calls[1][2]['data'] == 'github_token=github_token'
+
+
+def test_initialise_galaxy_with_auth(monkeypatch):
+ mock_open = MagicMock()
+ mock_open.side_effect = [
+ StringIO(u'{"available_versions":{"v1":"v1/"}}'),
+ StringIO(u'{"token":"my token"}'),
+ ]
+ monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
+
+ api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=GalaxyToken(token='my_token'))
+ actual = api.authenticate("github_token")
+
+ assert len(api.available_api_versions) == 2
+ assert api.available_api_versions['v1'] == u'v1/'
+ assert api.available_api_versions['v2'] == u'v2/'
+ assert actual == {u'token': u'my token'}
+ assert mock_open.call_count == 2
+ assert mock_open.mock_calls[0][1][0] == 'https://galaxy.ansible.com/api/'
+ assert 'ansible-galaxy' in mock_open.mock_calls[0][2]['http_agent']
+ assert mock_open.mock_calls[1][1][0] == 'https://galaxy.ansible.com/api/v1/tokens/'
+ assert 'ansible-galaxy' in mock_open.mock_calls[1][2]['http_agent']
+ assert mock_open.mock_calls[1][2]['data'] == 'github_token=github_token'
+
+
+def test_initialise_automation_hub(monkeypatch):
+ mock_open = MagicMock()
+ mock_open.side_effect = [
+ StringIO(u'{"available_versions":{"v2": "v2/", "v3":"v3/"}}'),
+ ]
+ monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
+ token = KeycloakToken(auth_url='https://api.test/')
+ mock_token_get = MagicMock()
+ mock_token_get.return_value = 'my_token'
+ monkeypatch.setattr(token, 'get', mock_token_get)
+
+ api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token)
+
+ assert len(api.available_api_versions) == 2
+ assert api.available_api_versions['v2'] == u'v2/'
+ assert api.available_api_versions['v3'] == u'v3/'
+
+ assert mock_open.mock_calls[0][1][0] == 'https://galaxy.ansible.com/api/'
+ assert 'ansible-galaxy' in mock_open.mock_calls[0][2]['http_agent']
+ assert mock_open.mock_calls[0][2]['headers'] == {'Authorization': 'Bearer my_token'}
+
+
+def test_initialise_unknown(monkeypatch):
+ mock_open = MagicMock()
+ mock_open.side_effect = [
+ urllib_error.HTTPError('https://galaxy.ansible.com/api/', 500, 'msg', {}, StringIO(u'{"msg":"raw error"}')),
+ urllib_error.HTTPError('https://galaxy.ansible.com/api/api/', 500, 'msg', {}, StringIO(u'{"msg":"raw error"}')),
+ ]
+ monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
+
+ api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=GalaxyToken(token='my_token'))
+
+ expected = "Error when finding available api versions from test (%s) (HTTP Code: 500, Message: msg)" \
+ % api.api_server
+ with pytest.raises(AnsibleError, match=re.escape(expected)):
+ api.authenticate("github_token")
+
+
+def test_get_available_api_versions(monkeypatch):
+ mock_open = MagicMock()
+ mock_open.side_effect = [
+ StringIO(u'{"available_versions":{"v1":"v1/","v2":"v2/"}}'),
+ ]
+ monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
+
+ api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/")
+ actual = api.available_api_versions
+ assert len(actual) == 2
+ assert actual['v1'] == u'v1/'
+ assert actual['v2'] == u'v2/'
+
+ assert mock_open.call_count == 1
+ assert mock_open.mock_calls[0][1][0] == 'https://galaxy.ansible.com/api/'
+ assert 'ansible-galaxy' in mock_open.mock_calls[0][2]['http_agent']
+
+
+def test_publish_collection_missing_file():
+ fake_path = u'/fake/ÅÑŚÌβÅÈ/path'
+ expected = to_native("The collection path specified '%s' does not exist." % fake_path)
+
+ api = get_test_galaxy_api("https://galaxy.ansible.com/api/", "v2")
+ with pytest.raises(AnsibleError, match=expected):
+ api.publish_collection(fake_path)
+
+
+def test_publish_collection_not_a_tarball():
+ expected = "The collection path specified '{0}' is not a tarball, use 'ansible-galaxy collection build' to " \
+ "create a proper release artifact."
+
+ api = get_test_galaxy_api("https://galaxy.ansible.com/api/", "v2")
+ with tempfile.NamedTemporaryFile(prefix=u'ÅÑŚÌβÅÈ') as temp_file:
+ temp_file.write(b"\x00")
+ temp_file.flush()
+ with pytest.raises(AnsibleError, match=expected.format(to_native(temp_file.name))):
+ api.publish_collection(temp_file.name)
+
+
+def test_publish_collection_unsupported_version():
+ expected = "Galaxy action publish_collection requires API versions 'v2, v3' but only 'v1' are available on test " \
+ "https://galaxy.ansible.com/api/"
+
+ api = get_test_galaxy_api("https://galaxy.ansible.com/api/", "v1")
+ with pytest.raises(AnsibleError, match=expected):
+ api.publish_collection("path")
+
+
+@pytest.mark.parametrize('api_version, collection_url', [
+ ('v2', 'collections'),
+ ('v3', 'artifacts/collections'),
+])
+def test_publish_collection(api_version, collection_url, collection_artifact, monkeypatch):
+ api = get_test_galaxy_api("https://galaxy.ansible.com/api/", api_version)
+
+ mock_call = MagicMock()
+ mock_call.return_value = {'task': 'http://task.url/'}
+ monkeypatch.setattr(api, '_call_galaxy', mock_call)
+
+ actual = api.publish_collection(collection_artifact)
+ assert actual == 'http://task.url/'
+ assert mock_call.call_count == 1
+ assert mock_call.mock_calls[0][1][0] == 'https://galaxy.ansible.com/api/%s/%s/' % (api_version, collection_url)
+ assert mock_call.mock_calls[0][2]['headers']['Content-length'] == len(mock_call.mock_calls[0][2]['args'])
+ assert mock_call.mock_calls[0][2]['headers']['Content-type'].startswith(
+ 'multipart/form-data; boundary=')
+ assert mock_call.mock_calls[0][2]['args'].startswith(b'--')
+ assert mock_call.mock_calls[0][2]['method'] == 'POST'
+ assert mock_call.mock_calls[0][2]['auth_required'] is True
+
+
+@pytest.mark.parametrize('api_version, collection_url, response, expected', [
+ ('v2', 'collections', {},
+ 'Error when publishing collection to test (%s) (HTTP Code: 500, Message: msg Code: Unknown)'),
+ ('v2', 'collections', {
+ 'message': u'Galaxy error messäge',
+ 'code': 'GWE002',
+ }, u'Error when publishing collection to test (%s) (HTTP Code: 500, Message: Galaxy error messäge Code: GWE002)'),
+ ('v3', 'artifact/collections', {},
+ 'Error when publishing collection to test (%s) (HTTP Code: 500, Message: msg Code: Unknown)'),
+ ('v3', 'artifact/collections', {
+ 'errors': [
+ {
+ 'code': 'conflict.collection_exists',
+ 'detail': 'Collection "mynamespace-mycollection-4.1.1" already exists.',
+ 'title': 'Conflict.',
+ 'status': '400',
+ },
+ {
+ 'code': 'quantum_improbability',
+ 'title': u'Rändom(?) quantum improbability.',
+ 'source': {'parameter': 'the_arrow_of_time'},
+ 'meta': {'remediation': 'Try again before'},
+ },
+ ],
+ }, u'Error when publishing collection to test (%s) (HTTP Code: 500, Message: Collection '
+ u'"mynamespace-mycollection-4.1.1" already exists. Code: conflict.collection_exists), (HTTP Code: 500, '
+ u'Message: Rändom(?) quantum improbability. Code: quantum_improbability)')
+])
+def test_publish_failure(api_version, collection_url, response, expected, collection_artifact, monkeypatch):
+ api = get_test_galaxy_api('https://galaxy.server.com/api/', api_version)
+
+ expected_url = '%s/api/%s/%s' % (api.api_server, api_version, collection_url)
+
+ mock_open = MagicMock()
+ mock_open.side_effect = urllib_error.HTTPError(expected_url, 500, 'msg', {},
+ StringIO(to_text(json.dumps(response))))
+ monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
+
+ with pytest.raises(GalaxyError, match=re.escape(to_native(expected % api.api_server))):
+ api.publish_collection(collection_artifact)
+
+
+@pytest.mark.parametrize('server_url, api_version, token_type, token_ins, import_uri, full_import_uri', [
+ ('https://galaxy.server.com/api', 'v2', 'Token', GalaxyToken('my token'),
+ '1234',
+ 'https://galaxy.server.com/api/v2/collection-imports/1234/'),
+ ('https://galaxy.server.com/api/automation-hub/', 'v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'),
+ '1234',
+ 'https://galaxy.server.com/api/automation-hub/v3/imports/collections/1234/'),
+])
+def test_wait_import_task(server_url, api_version, token_type, token_ins, import_uri, full_import_uri, monkeypatch):
+ api = get_test_galaxy_api(server_url, api_version, token_ins=token_ins)
+
+ if token_ins:
+ mock_token_get = MagicMock()
+ mock_token_get.return_value = 'my token'
+ monkeypatch.setattr(token_ins, 'get', mock_token_get)
+
+ mock_open = MagicMock()
+ mock_open.return_value = StringIO(u'{"state":"success","finished_at":"time"}')
+ monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
+
+ mock_display = MagicMock()
+ monkeypatch.setattr(Display, 'display', mock_display)
+
+ api.wait_import_task(import_uri)
+
+ assert mock_open.call_count == 1
+ assert mock_open.mock_calls[0][1][0] == full_import_uri
+ assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
+
+ assert mock_display.call_count == 1
+ assert mock_display.mock_calls[0][1][0] == 'Waiting until Galaxy import task %s has completed' % full_import_uri
+
+
+@pytest.mark.parametrize('server_url, api_version, token_type, token_ins, import_uri, full_import_uri', [
+ ('https://galaxy.server.com/api/', 'v2', 'Token', GalaxyToken('my token'),
+ '1234',
+ 'https://galaxy.server.com/api/v2/collection-imports/1234/'),
+ ('https://galaxy.server.com/api/automation-hub', 'v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'),
+ '1234',
+ 'https://galaxy.server.com/api/automation-hub/v3/imports/collections/1234/'),
+])
+def test_wait_import_task_multiple_requests(server_url, api_version, token_type, token_ins, import_uri, full_import_uri, monkeypatch):
+ api = get_test_galaxy_api(server_url, api_version, token_ins=token_ins)
+
+ if token_ins:
+ mock_token_get = MagicMock()
+ mock_token_get.return_value = 'my token'
+ monkeypatch.setattr(token_ins, 'get', mock_token_get)
+
+ mock_open = MagicMock()
+ mock_open.side_effect = [
+ StringIO(u'{"state":"test"}'),
+ StringIO(u'{"state":"success","finished_at":"time"}'),
+ ]
+ monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
+
+ mock_display = MagicMock()
+ monkeypatch.setattr(Display, 'display', mock_display)
+
+ mock_vvv = MagicMock()
+ monkeypatch.setattr(Display, 'vvv', mock_vvv)
+
+ monkeypatch.setattr(time, 'sleep', MagicMock())
+
+ api.wait_import_task(import_uri)
+
+ assert mock_open.call_count == 2
+ assert mock_open.mock_calls[0][1][0] == full_import_uri
+ assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
+ assert mock_open.mock_calls[1][1][0] == full_import_uri
+ assert mock_open.mock_calls[1][2]['headers']['Authorization'] == '%s my token' % token_type
+
+ assert mock_display.call_count == 1
+ assert mock_display.mock_calls[0][1][0] == 'Waiting until Galaxy import task %s has completed' % full_import_uri
+
+ assert mock_vvv.call_count == 1
+ assert mock_vvv.mock_calls[0][1][0] == \
+ 'Galaxy import process has a status of test, wait 2 seconds before trying again'
+
+
+@pytest.mark.parametrize('server_url, api_version, token_type, token_ins, import_uri, full_import_uri,', [
+ ('https://galaxy.server.com/api/', 'v2', 'Token', GalaxyToken('my token'),
+ '1234',
+ 'https://galaxy.server.com/api/v2/collection-imports/1234/'),
+ ('https://galaxy.server.com/api/automation-hub/', 'v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'),
+ '1234',
+ 'https://galaxy.server.com/api/automation-hub/v3/imports/collections/1234/'),
+])
+def test_wait_import_task_with_failure(server_url, api_version, token_type, token_ins, import_uri, full_import_uri, monkeypatch):
+ api = get_test_galaxy_api(server_url, api_version, token_ins=token_ins)
+
+ if token_ins:
+ mock_token_get = MagicMock()
+ mock_token_get.return_value = 'my token'
+ monkeypatch.setattr(token_ins, 'get', mock_token_get)
+
+ mock_open = MagicMock()
+ mock_open.side_effect = [
+ StringIO(to_text(json.dumps({
+ 'finished_at': 'some_time',
+ 'state': 'failed',
+ 'error': {
+ 'code': 'GW001',
+ 'description': u'Becäuse I said so!',
+
+ },
+ 'messages': [
+ {
+ 'level': 'error',
+ 'message': u'Somé error',
+ },
+ {
+ 'level': 'warning',
+ 'message': u'Some wärning',
+ },
+ {
+ 'level': 'info',
+ 'message': u'Somé info',
+ },
+ ],
+ }))),
+ ]
+ monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
+
+ mock_display = MagicMock()
+ monkeypatch.setattr(Display, 'display', mock_display)
+
+ mock_vvv = MagicMock()
+ monkeypatch.setattr(Display, 'vvv', mock_vvv)
+
+ mock_warn = MagicMock()
+ monkeypatch.setattr(Display, 'warning', mock_warn)
+
+ mock_err = MagicMock()
+ monkeypatch.setattr(Display, 'error', mock_err)
+
+ expected = to_native(u'Galaxy import process failed: Becäuse I said so! (Code: GW001)')
+ with pytest.raises(AnsibleError, match=re.escape(expected)):
+ api.wait_import_task(import_uri)
+
+ assert mock_open.call_count == 1
+ assert mock_open.mock_calls[0][1][0] == full_import_uri
+ assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
+
+ assert mock_display.call_count == 1
+ assert mock_display.mock_calls[0][1][0] == 'Waiting until Galaxy import task %s has completed' % full_import_uri
+
+ assert mock_vvv.call_count == 1
+ assert mock_vvv.mock_calls[0][1][0] == u'Galaxy import message: info - Somé info'
+
+ assert mock_warn.call_count == 1
+ assert mock_warn.mock_calls[0][1][0] == u'Galaxy import warning message: Some wärning'
+
+ assert mock_err.call_count == 1
+ assert mock_err.mock_calls[0][1][0] == u'Galaxy import error message: Somé error'
+
+
+@pytest.mark.parametrize('server_url, api_version, token_type, token_ins, import_uri, full_import_uri', [
+ ('https://galaxy.server.com/api/', 'v2', 'Token', GalaxyToken('my_token'),
+ '1234',
+ 'https://galaxy.server.com/api/v2/collection-imports/1234/'),
+ ('https://galaxy.server.com/api/automation-hub/', 'v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'),
+ '1234',
+ 'https://galaxy.server.com/api/automation-hub/v3/imports/collections/1234/'),
+])
+def test_wait_import_task_with_failure_no_error(server_url, api_version, token_type, token_ins, import_uri, full_import_uri, monkeypatch):
+ api = get_test_galaxy_api(server_url, api_version, token_ins=token_ins)
+
+ if token_ins:
+ mock_token_get = MagicMock()
+ mock_token_get.return_value = 'my token'
+ monkeypatch.setattr(token_ins, 'get', mock_token_get)
+
+ mock_open = MagicMock()
+ mock_open.side_effect = [
+ StringIO(to_text(json.dumps({
+ 'finished_at': 'some_time',
+ 'state': 'failed',
+ 'error': {},
+ 'messages': [
+ {
+ 'level': 'error',
+ 'message': u'Somé error',
+ },
+ {
+ 'level': 'warning',
+ 'message': u'Some wärning',
+ },
+ {
+ 'level': 'info',
+ 'message': u'Somé info',
+ },
+ ],
+ }))),
+ ]
+ monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
+
+ mock_display = MagicMock()
+ monkeypatch.setattr(Display, 'display', mock_display)
+
+ mock_vvv = MagicMock()
+ monkeypatch.setattr(Display, 'vvv', mock_vvv)
+
+ mock_warn = MagicMock()
+ monkeypatch.setattr(Display, 'warning', mock_warn)
+
+ mock_err = MagicMock()
+ monkeypatch.setattr(Display, 'error', mock_err)
+
+ expected = 'Galaxy import process failed: Unknown error, see %s for more details \\(Code: UNKNOWN\\)' % full_import_uri
+ with pytest.raises(AnsibleError, match=expected):
+ api.wait_import_task(import_uri)
+
+ assert mock_open.call_count == 1
+ assert mock_open.mock_calls[0][1][0] == full_import_uri
+ assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
+
+ assert mock_display.call_count == 1
+ assert mock_display.mock_calls[0][1][0] == 'Waiting until Galaxy import task %s has completed' % full_import_uri
+
+ assert mock_vvv.call_count == 1
+ assert mock_vvv.mock_calls[0][1][0] == u'Galaxy import message: info - Somé info'
+
+ assert mock_warn.call_count == 1
+ assert mock_warn.mock_calls[0][1][0] == u'Galaxy import warning message: Some wärning'
+
+ assert mock_err.call_count == 1
+ assert mock_err.mock_calls[0][1][0] == u'Galaxy import error message: Somé error'
+
+
+@pytest.mark.parametrize('server_url, api_version, token_type, token_ins, import_uri, full_import_uri', [
+ ('https://galaxy.server.com/api', 'v2', 'Token', GalaxyToken('my token'),
+ '1234',
+ 'https://galaxy.server.com/api/v2/collection-imports/1234/'),
+ ('https://galaxy.server.com/api/automation-hub', 'v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'),
+ '1234',
+ 'https://galaxy.server.com/api/automation-hub/v3/imports/collections/1234/'),
+])
+def test_wait_import_task_timeout(server_url, api_version, token_type, token_ins, import_uri, full_import_uri, monkeypatch):
+ api = get_test_galaxy_api(server_url, api_version, token_ins=token_ins)
+
+ if token_ins:
+ mock_token_get = MagicMock()
+ mock_token_get.return_value = 'my token'
+ monkeypatch.setattr(token_ins, 'get', mock_token_get)
+
+ def return_response(*args, **kwargs):
+ return StringIO(u'{"state":"waiting"}')
+
+ mock_open = MagicMock()
+ mock_open.side_effect = return_response
+ monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
+
+ mock_display = MagicMock()
+ monkeypatch.setattr(Display, 'display', mock_display)
+
+ mock_vvv = MagicMock()
+ monkeypatch.setattr(Display, 'vvv', mock_vvv)
+
+ monkeypatch.setattr(time, 'sleep', MagicMock())
+
+ expected = "Timeout while waiting for the Galaxy import process to finish, check progress at '%s'" % full_import_uri
+ with pytest.raises(AnsibleError, match=expected):
+ api.wait_import_task(import_uri, 1)
+
+ assert mock_open.call_count > 1
+ assert mock_open.mock_calls[0][1][0] == full_import_uri
+ assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
+ assert mock_open.mock_calls[1][1][0] == full_import_uri
+ assert mock_open.mock_calls[1][2]['headers']['Authorization'] == '%s my token' % token_type
+
+ assert mock_display.call_count == 1
+ assert mock_display.mock_calls[0][1][0] == 'Waiting until Galaxy import task %s has completed' % full_import_uri
+
+ # expected_wait_msg = 'Galaxy import process has a status of waiting, wait {0} seconds before trying again'
+ assert mock_vvv.call_count > 9 # 1st is opening Galaxy token file.
+
+ # FIXME:
+ # assert mock_vvv.mock_calls[1][1][0] == expected_wait_msg.format(2)
+ # assert mock_vvv.mock_calls[2][1][0] == expected_wait_msg.format(3)
+ # assert mock_vvv.mock_calls[3][1][0] == expected_wait_msg.format(4)
+ # assert mock_vvv.mock_calls[4][1][0] == expected_wait_msg.format(6)
+ # assert mock_vvv.mock_calls[5][1][0] == expected_wait_msg.format(10)
+ # assert mock_vvv.mock_calls[6][1][0] == expected_wait_msg.format(15)
+ # assert mock_vvv.mock_calls[7][1][0] == expected_wait_msg.format(22)
+ # assert mock_vvv.mock_calls[8][1][0] == expected_wait_msg.format(30)
+
+
+@pytest.mark.parametrize('api_version, token_type, version, token_ins', [
+ ('v2', None, 'v2.1.13', None),
+ ('v3', 'Bearer', 'v1.0.0', KeycloakToken(auth_url='https://api.test/api/automation-hub/')),
+])
+def test_get_collection_version_metadata_no_version(api_version, token_type, version, token_ins, monkeypatch):
+ api = get_test_galaxy_api('https://galaxy.server.com/api/', api_version, token_ins=token_ins)
+
+ if token_ins:
+ mock_token_get = MagicMock()
+ mock_token_get.return_value = 'my token'
+ monkeypatch.setattr(token_ins, 'get', mock_token_get)
+
+ mock_open = MagicMock()
+ mock_open.side_effect = [
+ StringIO(to_text(json.dumps({
+ 'download_url': 'https://downloadme.com',
+ 'artifact': {
+ 'sha256': 'ac47b6fac117d7c171812750dacda655b04533cf56b31080b82d1c0db3c9d80f',
+ },
+ 'namespace': {
+ 'name': 'namespace',
+ },
+ 'collection': {
+ 'name': 'collection',
+ },
+ 'version': version,
+ 'metadata': {
+ 'dependencies': {},
+ }
+ }))),
+ ]
+ monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
+
+ actual = api.get_collection_version_metadata('namespace', 'collection', version)
+
+ assert isinstance(actual, CollectionVersionMetadata)
+ assert actual.namespace == u'namespace'
+ assert actual.name == u'collection'
+ assert actual.download_url == u'https://downloadme.com'
+ assert actual.artifact_sha256 == u'ac47b6fac117d7c171812750dacda655b04533cf56b31080b82d1c0db3c9d80f'
+ assert actual.version == version
+ assert actual.dependencies == {}
+
+ assert mock_open.call_count == 1
+ assert mock_open.mock_calls[0][1][0] == '%s%s/collections/namespace/collection/versions/%s/' \
+ % (api.api_server, api_version, version)
+
+ # v2 calls dont need auth, so no authz header or token_type
+ if token_type:
+ assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
+
+
+@pytest.mark.parametrize('api_version, token_type, token_ins, response', [
+ ('v2', None, None, {
+ 'count': 2,
+ 'next': None,
+ 'previous': None,
+ 'results': [
+ {
+ 'version': '1.0.0',
+ 'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.0',
+ },
+ {
+ 'version': '1.0.1',
+ 'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.1',
+ },
+ ],
+ }),
+ # TODO: Verify this once Automation Hub is actually out
+ ('v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'), {
+ 'count': 2,
+ 'next': None,
+ 'previous': None,
+ 'data': [
+ {
+ 'version': '1.0.0',
+ 'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.0',
+ },
+ {
+ 'version': '1.0.1',
+ 'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.1',
+ },
+ ],
+ }),
+])
+def test_get_collection_versions(api_version, token_type, token_ins, response, monkeypatch):
+ api = get_test_galaxy_api('https://galaxy.server.com/api/', api_version, token_ins=token_ins)
+
+ if token_ins:
+ mock_token_get = MagicMock()
+ mock_token_get.return_value = 'my token'
+ monkeypatch.setattr(token_ins, 'get', mock_token_get)
+
+ mock_open = MagicMock()
+ mock_open.side_effect = [
+ StringIO(to_text(json.dumps(response))),
+ ]
+ monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
+
+ actual = api.get_collection_versions('namespace', 'collection')
+ assert actual == [u'1.0.0', u'1.0.1']
+
+ assert mock_open.call_count == 1
+ assert mock_open.mock_calls[0][1][0] == 'https://galaxy.server.com/api/%s/collections/namespace/collection/' \
+ 'versions/' % api_version
+ if token_ins:
+ assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
+
+
+@pytest.mark.parametrize('api_version, token_type, token_ins, responses', [
+ ('v2', None, None, [
+ {
+ 'count': 6,
+ 'next': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/?page=2',
+ 'previous': None,
+ 'results': [
+ {
+ 'version': '1.0.0',
+ 'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.0',
+ },
+ {
+ 'version': '1.0.1',
+ 'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.1',
+ },
+ ],
+ },
+ {
+ 'count': 6,
+ 'next': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/?page=3',
+ 'previous': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions',
+ 'results': [
+ {
+ 'version': '1.0.2',
+ 'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.2',
+ },
+ {
+ 'version': '1.0.3',
+ 'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.3',
+ },
+ ],
+ },
+ {
+ 'count': 6,
+ 'next': None,
+ 'previous': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/?page=2',
+ 'results': [
+ {
+ 'version': '1.0.4',
+ 'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.4',
+ },
+ {
+ 'version': '1.0.5',
+ 'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.5',
+ },
+ ],
+ },
+ ]),
+ ('v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'), [
+ {
+ 'count': 6,
+ 'links': {
+ 'next': '/api/v3/collections/namespace/collection/versions/?page=2',
+ 'previous': None,
+ },
+ 'data': [
+ {
+ 'version': '1.0.0',
+ 'href': '/api/v3/collections/namespace/collection/versions/1.0.0',
+ },
+ {
+ 'version': '1.0.1',
+ 'href': '/api/v3/collections/namespace/collection/versions/1.0.1',
+ },
+ ],
+ },
+ {
+ 'count': 6,
+ 'links': {
+ 'next': '/api/v3/collections/namespace/collection/versions/?page=3',
+ 'previous': '/api/v3/collections/namespace/collection/versions',
+ },
+ 'data': [
+ {
+ 'version': '1.0.2',
+ 'href': '/api/v3/collections/namespace/collection/versions/1.0.2',
+ },
+ {
+ 'version': '1.0.3',
+ 'href': '/api/v3/collections/namespace/collection/versions/1.0.3',
+ },
+ ],
+ },
+ {
+ 'count': 6,
+ 'links': {
+ 'next': None,
+ 'previous': '/api/v3/collections/namespace/collection/versions/?page=2',
+ },
+ 'data': [
+ {
+ 'version': '1.0.4',
+ 'href': '/api/v3/collections/namespace/collection/versions/1.0.4',
+ },
+ {
+ 'version': '1.0.5',
+ 'href': '/api/v3/collections/namespace/collection/versions/1.0.5',
+ },
+ ],
+ },
+ ]),
+])
+def test_get_collection_versions_pagination(api_version, token_type, token_ins, responses, monkeypatch):
+ api = get_test_galaxy_api('https://galaxy.server.com/api/', api_version, token_ins=token_ins)
+
+ if token_ins:
+ mock_token_get = MagicMock()
+ mock_token_get.return_value = 'my token'
+ monkeypatch.setattr(token_ins, 'get', mock_token_get)
+
+ mock_open = MagicMock()
+ mock_open.side_effect = [StringIO(to_text(json.dumps(r))) for r in responses]
+ monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
+
+ actual = api.get_collection_versions('namespace', 'collection')
+ assert actual == [u'1.0.0', u'1.0.1', u'1.0.2', u'1.0.3', u'1.0.4', u'1.0.5']
+
+ assert mock_open.call_count == 3
+ assert mock_open.mock_calls[0][1][0] == 'https://galaxy.server.com/api/%s/collections/namespace/collection/' \
+ 'versions/' % api_version
+ assert mock_open.mock_calls[1][1][0] == 'https://galaxy.server.com/api/%s/collections/namespace/collection/' \
+ 'versions/?page=2' % api_version
+ assert mock_open.mock_calls[2][1][0] == 'https://galaxy.server.com/api/%s/collections/namespace/collection/' \
+ 'versions/?page=3' % api_version
+
+ if token_type:
+ assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
+ assert mock_open.mock_calls[1][2]['headers']['Authorization'] == '%s my token' % token_type
+ assert mock_open.mock_calls[2][2]['headers']['Authorization'] == '%s my token' % token_type
+
+
+@pytest.mark.parametrize('responses', [
+ [
+ {
+ 'count': 2,
+ 'results': [{'name': '3.5.1', }, {'name': '3.5.2'}],
+ 'next_link': None,
+ 'next': None,
+ 'previous_link': None,
+ 'previous': None
+ },
+ ],
+ [
+ {
+ 'count': 2,
+ 'results': [{'name': '3.5.1'}],
+ 'next_link': '/api/v1/roles/432/versions/?page=2&page_size=50',
+ 'next': '/roles/432/versions/?page=2&page_size=50',
+ 'previous_link': None,
+ 'previous': None
+ },
+ {
+ 'count': 2,
+ 'results': [{'name': '3.5.2'}],
+ 'next_link': None,
+ 'next': None,
+ 'previous_link': '/api/v1/roles/432/versions/?&page_size=50',
+ 'previous': '/roles/432/versions/?page_size=50',
+ },
+ ]
+])
+def test_get_role_versions_pagination(monkeypatch, responses):
+ api = get_test_galaxy_api('https://galaxy.com/api/', 'v1')
+
+ mock_open = MagicMock()
+ mock_open.side_effect = [StringIO(to_text(json.dumps(r))) for r in responses]
+ monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
+
+ actual = api.fetch_role_related('versions', 432)
+ assert actual == [{'name': '3.5.1'}, {'name': '3.5.2'}]
+
+ assert mock_open.call_count == len(responses)
+
+ assert mock_open.mock_calls[0][1][0] == 'https://galaxy.com/api/v1/roles/432/versions/?page_size=50'
+ if len(responses) == 2:
+ assert mock_open.mock_calls[1][1][0] == 'https://galaxy.com/api/v1/roles/432/versions/?page=2&page_size=50'
diff --git a/test/units/galaxy/test_collection.py b/test/units/galaxy/test_collection.py
new file mode 100644
index 00000000..fda6fe69
--- /dev/null
+++ b/test/units/galaxy/test_collection.py
@@ -0,0 +1,1326 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import pytest
+import re
+import tarfile
+import uuid
+
+from hashlib import sha256
+from io import BytesIO
+from units.compat.mock import MagicMock, mock_open, patch
+
+from ansible import context
+from ansible.cli.galaxy import GalaxyCLI
+from ansible.errors import AnsibleError
+from ansible.galaxy import api, collection, token
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.six.moves import builtins
+from ansible.utils import context_objects as co
+from ansible.utils.display import Display
+from ansible.utils.hashing import secure_hash_s
+
+
+@pytest.fixture(autouse='function')
+def reset_cli_args():
+ co.GlobalCLIArgs._Singleton__instance = None
+ yield
+ co.GlobalCLIArgs._Singleton__instance = None
+
+
+@pytest.fixture()
+def collection_input(tmp_path_factory):
+ ''' Creates a collection skeleton directory for build tests '''
+ test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβÅÈ Collections Input'))
+ namespace = 'ansible_namespace'
+ collection = 'collection'
+ skeleton = os.path.join(os.path.dirname(os.path.split(__file__)[0]), 'cli', 'test_data', 'collection_skeleton')
+
+ galaxy_args = ['ansible-galaxy', 'collection', 'init', '%s.%s' % (namespace, collection),
+ '-c', '--init-path', test_dir, '--collection-skeleton', skeleton]
+ GalaxyCLI(args=galaxy_args).run()
+ collection_dir = os.path.join(test_dir, namespace, collection)
+ output_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβÅÈ Collections Output'))
+
+ return collection_dir, output_dir
+
+
+@pytest.fixture()
+def collection_artifact(monkeypatch, tmp_path_factory):
+ ''' Creates a temp collection artifact and mocked open_url instance for publishing tests '''
+ mock_open = MagicMock()
+ monkeypatch.setattr(collection, 'open_url', mock_open)
+
+ mock_uuid = MagicMock()
+ mock_uuid.return_value.hex = 'uuid'
+ monkeypatch.setattr(uuid, 'uuid4', mock_uuid)
+
+ tmp_path = tmp_path_factory.mktemp('test-ÅÑŚÌβÅÈ Collections')
+ input_file = to_text(tmp_path / 'collection.tar.gz')
+
+ with tarfile.open(input_file, 'w:gz') as tfile:
+ b_io = BytesIO(b"\x00\x01\x02\x03")
+ tar_info = tarfile.TarInfo('test')
+ tar_info.size = 4
+ tar_info.mode = 0o0644
+ tfile.addfile(tarinfo=tar_info, fileobj=b_io)
+
+ return input_file, mock_open
+
+
+@pytest.fixture()
+def galaxy_yml(request, tmp_path_factory):
+ b_test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβÅÈ Collections'))
+ b_galaxy_yml = os.path.join(b_test_dir, b'galaxy.yml')
+ with open(b_galaxy_yml, 'wb') as galaxy_obj:
+ galaxy_obj.write(to_bytes(request.param))
+
+ yield b_galaxy_yml
+
+
+@pytest.fixture()
+def tmp_tarfile(tmp_path_factory, manifest_info):
+ ''' Creates a temporary tar file for _extract_tar_file tests '''
+ filename = u'ÅÑŚÌβÅÈ'
+ temp_dir = to_bytes(tmp_path_factory.mktemp('test-%s Collections' % to_native(filename)))
+ tar_file = os.path.join(temp_dir, to_bytes('%s.tar.gz' % filename))
+ data = os.urandom(8)
+
+ with tarfile.open(tar_file, 'w:gz') as tfile:
+ b_io = BytesIO(data)
+ tar_info = tarfile.TarInfo(filename)
+ tar_info.size = len(data)
+ tar_info.mode = 0o0644
+ tfile.addfile(tarinfo=tar_info, fileobj=b_io)
+
+ b_data = to_bytes(json.dumps(manifest_info, indent=True), errors='surrogate_or_strict')
+ b_io = BytesIO(b_data)
+ tar_info = tarfile.TarInfo('MANIFEST.json')
+ tar_info.size = len(b_data)
+ tar_info.mode = 0o0644
+ tfile.addfile(tarinfo=tar_info, fileobj=b_io)
+
+ sha256_hash = sha256()
+ sha256_hash.update(data)
+
+ with tarfile.open(tar_file, 'r') as tfile:
+ yield temp_dir, tfile, filename, sha256_hash.hexdigest()
+
+
+@pytest.fixture()
+def galaxy_server():
+ context.CLIARGS._store = {'ignore_certs': False}
+ galaxy_api = api.GalaxyAPI(None, 'test_server', 'https://galaxy.ansible.com',
+ token=token.GalaxyToken(token='key'))
+ return galaxy_api
+
+
+@pytest.fixture()
+def manifest_template():
+ def get_manifest_info(namespace='ansible_namespace', name='collection', version='0.1.0'):
+ return {
+ "collection_info": {
+ "namespace": namespace,
+ "name": name,
+ "version": version,
+ "authors": [
+ "shertel"
+ ],
+ "readme": "README.md",
+ "tags": [
+ "test",
+ "collection"
+ ],
+ "description": "Test",
+ "license": [
+ "MIT"
+ ],
+ "license_file": None,
+ "dependencies": {},
+ "repository": "https://github.com/{0}/{1}".format(namespace, name),
+ "documentation": None,
+ "homepage": None,
+ "issues": None
+ },
+ "file_manifest_file": {
+ "name": "FILES.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "files_manifest_checksum",
+ "format": 1
+ },
+ "format": 1
+ }
+
+ return get_manifest_info
+
+
+@pytest.fixture()
+def manifest_info(manifest_template):
+ return manifest_template()
+
+
+@pytest.fixture()
+def files_manifest_info():
+ return {
+ "files": [
+ {
+ "name": ".",
+ "ftype": "dir",
+ "chksum_type": None,
+ "chksum_sha256": None,
+ "format": 1
+ },
+ {
+ "name": "README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "individual_file_checksum",
+ "format": 1
+ }
+ ],
+ "format": 1}
+
+
+@pytest.fixture()
+def manifest(manifest_info):
+ b_data = to_bytes(json.dumps(manifest_info))
+
+ with patch.object(builtins, 'open', mock_open(read_data=b_data)) as m:
+ with open('MANIFEST.json', mode='rb') as fake_file:
+ yield fake_file, sha256(b_data).hexdigest()
+
+
+@pytest.fixture()
+def mock_collection(galaxy_server):
+ def create_mock_collection(namespace='ansible_namespace', name='collection', version='0.1.0', local=True, local_installed=True):
+ b_path = None
+ force = False
+
+ if local:
+ mock_collection = collection.CollectionRequirement(namespace, name, b_path, galaxy_server, [version], version, force, skip=local_installed)
+ else:
+ download_url = 'https://galaxy.ansible.com/download/{0}-{1}-{2}.tar.gz'.format(namespace, name, version)
+ digest = '19415a6a6df831df61cffde4a09d1d89ac8d8ca5c0586e85bea0b106d6dff29a'
+ dependencies = {}
+ metadata = api.CollectionVersionMetadata(namespace, name, version, download_url, digest, dependencies)
+ mock_collection = collection.CollectionRequirement(namespace, name, b_path, galaxy_server, [version], version, force, metadata=metadata)
+
+ return mock_collection
+ return create_mock_collection
+
+
+def test_build_collection_no_galaxy_yaml():
+ fake_path = u'/fake/ÅÑŚÌβÅÈ/path'
+ expected = to_native("The collection galaxy.yml path '%s/galaxy.yml' does not exist." % fake_path)
+
+ with pytest.raises(AnsibleError, match=expected):
+ collection.build_collection(fake_path, 'output', False)
+
+
+def test_build_existing_output_file(collection_input):
+ input_dir, output_dir = collection_input
+
+ existing_output_dir = os.path.join(output_dir, 'ansible_namespace-collection-0.1.0.tar.gz')
+ os.makedirs(existing_output_dir)
+
+ expected = "The output collection artifact '%s' already exists, but is a directory - aborting" \
+ % to_native(existing_output_dir)
+ with pytest.raises(AnsibleError, match=expected):
+ collection.build_collection(input_dir, output_dir, False)
+
+
+def test_build_existing_output_without_force(collection_input):
+ input_dir, output_dir = collection_input
+
+ existing_output = os.path.join(output_dir, 'ansible_namespace-collection-0.1.0.tar.gz')
+ with open(existing_output, 'w+') as out_file:
+ out_file.write("random garbage")
+ out_file.flush()
+
+ expected = "The file '%s' already exists. You can use --force to re-create the collection artifact." \
+ % to_native(existing_output)
+ with pytest.raises(AnsibleError, match=expected):
+ collection.build_collection(input_dir, output_dir, False)
+
+
+def test_build_existing_output_with_force(collection_input):
+ input_dir, output_dir = collection_input
+
+ existing_output = os.path.join(output_dir, 'ansible_namespace-collection-0.1.0.tar.gz')
+ with open(existing_output, 'w+') as out_file:
+ out_file.write("random garbage")
+ out_file.flush()
+
+ collection.build_collection(input_dir, output_dir, True)
+
+ # Verify the file was replaced with an actual tar file
+ assert tarfile.is_tarfile(existing_output)
+
+
+@pytest.mark.parametrize('galaxy_yml', [b'namespace: value: broken'], indirect=True)
+def test_invalid_yaml_galaxy_file(galaxy_yml):
+ expected = to_native(b"Failed to parse the galaxy.yml at '%s' with the following error:" % galaxy_yml)
+
+ with pytest.raises(AnsibleError, match=expected):
+ collection._get_galaxy_yml(galaxy_yml)
+
+
+@pytest.mark.parametrize('galaxy_yml', [b'namespace: test_namespace'], indirect=True)
+def test_missing_required_galaxy_key(galaxy_yml):
+ expected = "The collection galaxy.yml at '%s' is missing the following mandatory keys: authors, name, " \
+ "readme, version" % to_native(galaxy_yml)
+
+ with pytest.raises(AnsibleError, match=expected):
+ collection._get_galaxy_yml(galaxy_yml)
+
+
+@pytest.mark.parametrize('galaxy_yml', [b"""
+namespace: namespace
+name: collection
+authors: Jordan
+version: 0.1.0
+readme: README.md
+invalid: value"""], indirect=True)
+def test_warning_extra_keys(galaxy_yml, monkeypatch):
+ display_mock = MagicMock()
+ monkeypatch.setattr(Display, 'warning', display_mock)
+
+ collection._get_galaxy_yml(galaxy_yml)
+
+ assert display_mock.call_count == 1
+ assert display_mock.call_args[0][0] == "Found unknown keys in collection galaxy.yml at '%s': invalid"\
+ % to_text(galaxy_yml)
+
+
+@pytest.mark.parametrize('galaxy_yml', [b"""
+namespace: namespace
+name: collection
+authors: Jordan
+version: 0.1.0
+readme: README.md"""], indirect=True)
+def test_defaults_galaxy_yml(galaxy_yml):
+ actual = collection._get_galaxy_yml(galaxy_yml)
+
+ assert actual['namespace'] == 'namespace'
+ assert actual['name'] == 'collection'
+ assert actual['authors'] == ['Jordan']
+ assert actual['version'] == '0.1.0'
+ assert actual['readme'] == 'README.md'
+ assert actual['description'] is None
+ assert actual['repository'] is None
+ assert actual['documentation'] is None
+ assert actual['homepage'] is None
+ assert actual['issues'] is None
+ assert actual['tags'] == []
+ assert actual['dependencies'] == {}
+ assert actual['license_ids'] == []
+
+
+@pytest.mark.parametrize('galaxy_yml', [(b"""
+namespace: namespace
+name: collection
+authors: Jordan
+version: 0.1.0
+readme: README.md
+license: MIT"""), (b"""
+namespace: namespace
+name: collection
+authors: Jordan
+version: 0.1.0
+readme: README.md
+license:
+- MIT""")], indirect=True)
+def test_galaxy_yml_list_value(galaxy_yml):
+ actual = collection._get_galaxy_yml(galaxy_yml)
+ assert actual['license_ids'] == ['MIT']
+
+
+def test_build_ignore_files_and_folders(collection_input, monkeypatch):
+ input_dir = collection_input[0]
+
+ mock_display = MagicMock()
+ monkeypatch.setattr(Display, 'vvv', mock_display)
+
+ git_folder = os.path.join(input_dir, '.git')
+ retry_file = os.path.join(input_dir, 'ansible.retry')
+
+ tests_folder = os.path.join(input_dir, 'tests', 'output')
+ tests_output_file = os.path.join(tests_folder, 'result.txt')
+
+ os.makedirs(git_folder)
+ os.makedirs(tests_folder)
+
+ with open(retry_file, 'w+') as ignore_file:
+ ignore_file.write('random')
+ ignore_file.flush()
+
+ with open(tests_output_file, 'w+') as tests_file:
+ tests_file.write('random')
+ tests_file.flush()
+
+ actual = collection._build_files_manifest(to_bytes(input_dir), 'namespace', 'collection', [])
+
+ assert actual['format'] == 1
+ for manifest_entry in actual['files']:
+ assert manifest_entry['name'] not in ['.git', 'ansible.retry', 'galaxy.yml', 'tests/output', 'tests/output/result.txt']
+
+ expected_msgs = [
+ "Skipping '%s/galaxy.yml' for collection build" % to_text(input_dir),
+ "Skipping '%s' for collection build" % to_text(retry_file),
+ "Skipping '%s' for collection build" % to_text(git_folder),
+ "Skipping '%s' for collection build" % to_text(tests_folder),
+ ]
+ assert mock_display.call_count == 4
+ assert mock_display.mock_calls[0][1][0] in expected_msgs
+ assert mock_display.mock_calls[1][1][0] in expected_msgs
+ assert mock_display.mock_calls[2][1][0] in expected_msgs
+ assert mock_display.mock_calls[3][1][0] in expected_msgs
+
+
+def test_build_ignore_older_release_in_root(collection_input, monkeypatch):
+ input_dir = collection_input[0]
+
+ mock_display = MagicMock()
+ monkeypatch.setattr(Display, 'vvv', mock_display)
+
+ # This is expected to be ignored because it is in the root collection dir.
+ release_file = os.path.join(input_dir, 'namespace-collection-0.0.0.tar.gz')
+
+ # This is not expected to be ignored because it is not in the root collection dir.
+ fake_release_file = os.path.join(input_dir, 'plugins', 'namespace-collection-0.0.0.tar.gz')
+
+ for filename in [release_file, fake_release_file]:
+ with open(filename, 'w+') as file_obj:
+ file_obj.write('random')
+ file_obj.flush()
+
+ actual = collection._build_files_manifest(to_bytes(input_dir), 'namespace', 'collection', [])
+ assert actual['format'] == 1
+
+ plugin_release_found = False
+ for manifest_entry in actual['files']:
+ assert manifest_entry['name'] != 'namespace-collection-0.0.0.tar.gz'
+ if manifest_entry['name'] == 'plugins/namespace-collection-0.0.0.tar.gz':
+ plugin_release_found = True
+
+ assert plugin_release_found
+
+ expected_msgs = [
+ "Skipping '%s/galaxy.yml' for collection build" % to_text(input_dir),
+ "Skipping '%s' for collection build" % to_text(release_file)
+ ]
+ assert mock_display.call_count == 2
+ assert mock_display.mock_calls[0][1][0] in expected_msgs
+ assert mock_display.mock_calls[1][1][0] in expected_msgs
+
+
+def test_build_ignore_patterns(collection_input, monkeypatch):
+ input_dir = collection_input[0]
+
+ mock_display = MagicMock()
+ monkeypatch.setattr(Display, 'vvv', mock_display)
+
+ actual = collection._build_files_manifest(to_bytes(input_dir), 'namespace', 'collection',
+ ['*.md', 'plugins/action', 'playbooks/*.j2'])
+ assert actual['format'] == 1
+
+ expected_missing = [
+ 'README.md',
+ 'docs/My Collection.md',
+ 'plugins/action',
+ 'playbooks/templates/test.conf.j2',
+ 'playbooks/templates/subfolder/test.conf.j2',
+ ]
+
+ # Files or dirs that are close to a match but are not, make sure they are present
+ expected_present = [
+ 'docs',
+ 'roles/common/templates/test.conf.j2',
+ 'roles/common/templates/subfolder/test.conf.j2',
+ ]
+
+ actual_files = [e['name'] for e in actual['files']]
+ for m in expected_missing:
+ assert m not in actual_files
+
+ for p in expected_present:
+ assert p in actual_files
+
+ expected_msgs = [
+ "Skipping '%s/galaxy.yml' for collection build" % to_text(input_dir),
+ "Skipping '%s/README.md' for collection build" % to_text(input_dir),
+ "Skipping '%s/docs/My Collection.md' for collection build" % to_text(input_dir),
+ "Skipping '%s/plugins/action' for collection build" % to_text(input_dir),
+ "Skipping '%s/playbooks/templates/test.conf.j2' for collection build" % to_text(input_dir),
+ "Skipping '%s/playbooks/templates/subfolder/test.conf.j2' for collection build" % to_text(input_dir),
+ ]
+ assert mock_display.call_count == len(expected_msgs)
+ assert mock_display.mock_calls[0][1][0] in expected_msgs
+ assert mock_display.mock_calls[1][1][0] in expected_msgs
+ assert mock_display.mock_calls[2][1][0] in expected_msgs
+ assert mock_display.mock_calls[3][1][0] in expected_msgs
+ assert mock_display.mock_calls[4][1][0] in expected_msgs
+ assert mock_display.mock_calls[5][1][0] in expected_msgs
+
+
+def test_build_ignore_symlink_target_outside_collection(collection_input, monkeypatch):
+ input_dir, outside_dir = collection_input
+
+ mock_display = MagicMock()
+ monkeypatch.setattr(Display, 'warning', mock_display)
+
+ link_path = os.path.join(input_dir, 'plugins', 'connection')
+ os.symlink(outside_dir, link_path)
+
+ actual = collection._build_files_manifest(to_bytes(input_dir), 'namespace', 'collection', [])
+ for manifest_entry in actual['files']:
+ assert manifest_entry['name'] != 'plugins/connection'
+
+ assert mock_display.call_count == 1
+ assert mock_display.mock_calls[0][1][0] == "Skipping '%s' as it is a symbolic link to a directory outside " \
+ "the collection" % to_text(link_path)
+
+
+def test_build_copy_symlink_target_inside_collection(collection_input):
+ input_dir = collection_input[0]
+
+ os.makedirs(os.path.join(input_dir, 'playbooks', 'roles'))
+ roles_link = os.path.join(input_dir, 'playbooks', 'roles', 'linked')
+
+ roles_target = os.path.join(input_dir, 'roles', 'linked')
+ roles_target_tasks = os.path.join(roles_target, 'tasks')
+ os.makedirs(roles_target_tasks)
+ with open(os.path.join(roles_target_tasks, 'main.yml'), 'w+') as tasks_main:
+ tasks_main.write("---\n- hosts: localhost\n tasks:\n - ping:")
+ tasks_main.flush()
+
+ os.symlink(roles_target, roles_link)
+
+ actual = collection._build_files_manifest(to_bytes(input_dir), 'namespace', 'collection', [])
+
+ linked_entries = [e for e in actual['files'] if e['name'].startswith('playbooks/roles/linked')]
+ assert len(linked_entries) == 1
+ assert linked_entries[0]['name'] == 'playbooks/roles/linked'
+ assert linked_entries[0]['ftype'] == 'dir'
+
+
+def test_build_with_symlink_inside_collection(collection_input):
+ input_dir, output_dir = collection_input
+
+ os.makedirs(os.path.join(input_dir, 'playbooks', 'roles'))
+ roles_link = os.path.join(input_dir, 'playbooks', 'roles', 'linked')
+ file_link = os.path.join(input_dir, 'docs', 'README.md')
+
+ roles_target = os.path.join(input_dir, 'roles', 'linked')
+ roles_target_tasks = os.path.join(roles_target, 'tasks')
+ os.makedirs(roles_target_tasks)
+ with open(os.path.join(roles_target_tasks, 'main.yml'), 'w+') as tasks_main:
+ tasks_main.write("---\n- hosts: localhost\n tasks:\n - ping:")
+ tasks_main.flush()
+
+ os.symlink(roles_target, roles_link)
+ os.symlink(os.path.join(input_dir, 'README.md'), file_link)
+
+ collection.build_collection(input_dir, output_dir, False)
+
+ output_artifact = os.path.join(output_dir, 'ansible_namespace-collection-0.1.0.tar.gz')
+ assert tarfile.is_tarfile(output_artifact)
+
+ with tarfile.open(output_artifact, mode='r') as actual:
+ members = actual.getmembers()
+
+ linked_folder = next(m for m in members if m.path == 'playbooks/roles/linked')
+ assert linked_folder.type == tarfile.SYMTYPE
+ assert linked_folder.linkname == '../../roles/linked'
+
+ linked_file = next(m for m in members if m.path == 'docs/README.md')
+ assert linked_file.type == tarfile.SYMTYPE
+ assert linked_file.linkname == '../README.md'
+
+ linked_file_obj = actual.extractfile(linked_file.name)
+ actual_file = secure_hash_s(linked_file_obj.read())
+ linked_file_obj.close()
+
+ assert actual_file == '63444bfc766154e1bc7557ef6280de20d03fcd81'
+
+
+def test_publish_no_wait(galaxy_server, collection_artifact, monkeypatch):
+ mock_display = MagicMock()
+ monkeypatch.setattr(Display, 'display', mock_display)
+
+ artifact_path, mock_open = collection_artifact
+ fake_import_uri = 'https://galaxy.server.com/api/v2/import/1234'
+
+ mock_publish = MagicMock()
+ mock_publish.return_value = fake_import_uri
+ monkeypatch.setattr(galaxy_server, 'publish_collection', mock_publish)
+
+ collection.publish_collection(artifact_path, galaxy_server, False, 0)
+
+ assert mock_publish.call_count == 1
+ assert mock_publish.mock_calls[0][1][0] == artifact_path
+
+ assert mock_display.call_count == 1
+ assert mock_display.mock_calls[0][1][0] == \
+ "Collection has been pushed to the Galaxy server %s %s, not waiting until import has completed due to " \
+ "--no-wait being set. Import task results can be found at %s" % (galaxy_server.name, galaxy_server.api_server,
+ fake_import_uri)
+
+
+def test_publish_with_wait(galaxy_server, collection_artifact, monkeypatch):
+ mock_display = MagicMock()
+ monkeypatch.setattr(Display, 'display', mock_display)
+
+ artifact_path, mock_open = collection_artifact
+ fake_import_uri = 'https://galaxy.server.com/api/v2/import/1234'
+
+ mock_publish = MagicMock()
+ mock_publish.return_value = fake_import_uri
+ monkeypatch.setattr(galaxy_server, 'publish_collection', mock_publish)
+
+ mock_wait = MagicMock()
+ monkeypatch.setattr(galaxy_server, 'wait_import_task', mock_wait)
+
+ collection.publish_collection(artifact_path, galaxy_server, True, 0)
+
+ assert mock_publish.call_count == 1
+ assert mock_publish.mock_calls[0][1][0] == artifact_path
+
+ assert mock_wait.call_count == 1
+ assert mock_wait.mock_calls[0][1][0] == '1234'
+
+ assert mock_display.mock_calls[0][1][0] == "Collection has been published to the Galaxy server test_server %s" \
+ % galaxy_server.api_server
+
+
+def test_find_existing_collections(tmp_path_factory, monkeypatch):
+ test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβÅÈ Collections'))
+ collection1 = os.path.join(test_dir, 'namespace1', 'collection1')
+ collection2 = os.path.join(test_dir, 'namespace2', 'collection2')
+ fake_collection1 = os.path.join(test_dir, 'namespace3', 'collection3')
+ fake_collection2 = os.path.join(test_dir, 'namespace4')
+ os.makedirs(collection1)
+ os.makedirs(collection2)
+ os.makedirs(os.path.split(fake_collection1)[0])
+
+ open(fake_collection1, 'wb+').close()
+ open(fake_collection2, 'wb+').close()
+
+ collection1_manifest = json.dumps({
+ 'collection_info': {
+ 'namespace': 'namespace1',
+ 'name': 'collection1',
+ 'version': '1.2.3',
+ 'authors': ['Jordan Borean'],
+ 'readme': 'README.md',
+ 'dependencies': {},
+ },
+ 'format': 1,
+ })
+ with open(os.path.join(collection1, 'MANIFEST.json'), 'wb') as manifest_obj:
+ manifest_obj.write(to_bytes(collection1_manifest))
+
+ mock_warning = MagicMock()
+ monkeypatch.setattr(Display, 'warning', mock_warning)
+
+ actual = collection.find_existing_collections(test_dir)
+
+ assert len(actual) == 2
+ for actual_collection in actual:
+ assert actual_collection.skip is True
+
+ if str(actual_collection) == 'namespace1.collection1':
+ assert actual_collection.namespace == 'namespace1'
+ assert actual_collection.name == 'collection1'
+ assert actual_collection.b_path == to_bytes(collection1)
+ assert actual_collection.api is None
+ assert actual_collection.versions == set(['1.2.3'])
+ assert actual_collection.latest_version == '1.2.3'
+ assert actual_collection.dependencies == {}
+ else:
+ assert actual_collection.namespace == 'namespace2'
+ assert actual_collection.name == 'collection2'
+ assert actual_collection.b_path == to_bytes(collection2)
+ assert actual_collection.api is None
+ assert actual_collection.versions == set(['*'])
+ assert actual_collection.latest_version == '*'
+ assert actual_collection.dependencies == {}
+
+ assert mock_warning.call_count == 1
+ assert mock_warning.mock_calls[0][1][0] == "Collection at '%s' does not have a MANIFEST.json file, cannot " \
+ "detect version." % to_text(collection2)
+
+
+def test_download_file(tmp_path_factory, monkeypatch):
+ temp_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβÅÈ Collections'))
+
+ data = b"\x00\x01\x02\x03"
+ sha256_hash = sha256()
+ sha256_hash.update(data)
+
+ mock_open = MagicMock()
+ mock_open.return_value = BytesIO(data)
+ monkeypatch.setattr(collection, 'open_url', mock_open)
+
+ expected = os.path.join(temp_dir, b'file')
+ actual = collection._download_file('http://google.com/file', temp_dir, sha256_hash.hexdigest(), True)
+
+ assert actual.startswith(expected)
+ assert os.path.isfile(actual)
+ with open(actual, 'rb') as file_obj:
+ assert file_obj.read() == data
+
+ assert mock_open.call_count == 1
+ assert mock_open.mock_calls[0][1][0] == 'http://google.com/file'
+
+
+def test_download_file_hash_mismatch(tmp_path_factory, monkeypatch):
+ temp_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβÅÈ Collections'))
+
+ data = b"\x00\x01\x02\x03"
+
+ mock_open = MagicMock()
+ mock_open.return_value = BytesIO(data)
+ monkeypatch.setattr(collection, 'open_url', mock_open)
+
+ expected = "Mismatch artifact hash with downloaded file"
+ with pytest.raises(AnsibleError, match=expected):
+ collection._download_file('http://google.com/file', temp_dir, 'bad', True)
+
+
+def test_extract_tar_file_invalid_hash(tmp_tarfile):
+ temp_dir, tfile, filename, dummy = tmp_tarfile
+
+ expected = "Checksum mismatch for '%s' inside collection at '%s'" % (to_native(filename), to_native(tfile.name))
+ with pytest.raises(AnsibleError, match=expected):
+ collection._extract_tar_file(tfile, filename, temp_dir, temp_dir, "fakehash")
+
+
+def test_extract_tar_file_missing_member(tmp_tarfile):
+ temp_dir, tfile, dummy, dummy = tmp_tarfile
+
+ expected = "Collection tar at '%s' does not contain the expected file 'missing'." % to_native(tfile.name)
+ with pytest.raises(AnsibleError, match=expected):
+ collection._extract_tar_file(tfile, 'missing', temp_dir, temp_dir)
+
+
+def test_extract_tar_file_missing_parent_dir(tmp_tarfile):
+ temp_dir, tfile, filename, checksum = tmp_tarfile
+ output_dir = os.path.join(temp_dir, b'output')
+ output_file = os.path.join(output_dir, to_bytes(filename))
+
+ collection._extract_tar_file(tfile, filename, output_dir, temp_dir, checksum)
+ os.path.isfile(output_file)
+
+
+def test_extract_tar_file_outside_dir(tmp_path_factory):
+ filename = u'ÅÑŚÌβÅÈ'
+ temp_dir = to_bytes(tmp_path_factory.mktemp('test-%s Collections' % to_native(filename)))
+ tar_file = os.path.join(temp_dir, to_bytes('%s.tar.gz' % filename))
+ data = os.urandom(8)
+
+ tar_filename = '../%s.sh' % filename
+ with tarfile.open(tar_file, 'w:gz') as tfile:
+ b_io = BytesIO(data)
+ tar_info = tarfile.TarInfo(tar_filename)
+ tar_info.size = len(data)
+ tar_info.mode = 0o0644
+ tfile.addfile(tarinfo=tar_info, fileobj=b_io)
+
+ expected = re.escape("Cannot extract tar entry '%s' as it will be placed outside the collection directory"
+ % to_native(tar_filename))
+ with tarfile.open(tar_file, 'r') as tfile:
+ with pytest.raises(AnsibleError, match=expected):
+ collection._extract_tar_file(tfile, tar_filename, os.path.join(temp_dir, to_bytes(filename)), temp_dir)
+
+
+def test_require_one_of_collections_requirements_with_both():
+ cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'verify', 'namespace.collection', '-r', 'requirements.yml'])
+
+ with pytest.raises(AnsibleError) as req_err:
+ cli._require_one_of_collections_requirements(('namespace.collection',), 'requirements.yml')
+
+ with pytest.raises(AnsibleError) as cli_err:
+ cli.run()
+
+ assert req_err.value.message == cli_err.value.message == 'The positional collection_name arg and --requirements-file are mutually exclusive.'
+
+
+def test_require_one_of_collections_requirements_with_neither():
+ cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'verify'])
+
+ with pytest.raises(AnsibleError) as req_err:
+ cli._require_one_of_collections_requirements((), '')
+
+ with pytest.raises(AnsibleError) as cli_err:
+ cli.run()
+
+ assert req_err.value.message == cli_err.value.message == 'You must specify a collection name or a requirements file.'
+
+
+def test_require_one_of_collections_requirements_with_collections():
+ cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'verify', 'namespace1.collection1', 'namespace2.collection1:1.0.0'])
+ collections = ('namespace1.collection1', 'namespace2.collection1:1.0.0',)
+
+ requirements = cli._require_one_of_collections_requirements(collections, '')['collections']
+
+ assert requirements == [('namespace1.collection1', '*', None, None), ('namespace2.collection1', '1.0.0', None, None)]
+
+
+@patch('ansible.cli.galaxy.GalaxyCLI._parse_requirements_file')
+def test_require_one_of_collections_requirements_with_requirements(mock_parse_requirements_file, galaxy_server):
+ cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'verify', '-r', 'requirements.yml', 'namespace.collection'])
+ mock_parse_requirements_file.return_value = {'collections': [('namespace.collection', '1.0.5', galaxy_server)]}
+ requirements = cli._require_one_of_collections_requirements((), 'requirements.yml')['collections']
+
+ assert mock_parse_requirements_file.call_count == 1
+ assert requirements == [('namespace.collection', '1.0.5', galaxy_server)]
+
+
+@patch('ansible.cli.galaxy.GalaxyCLI.execute_verify', spec=True)
+def test_call_GalaxyCLI(execute_verify):
+ galaxy_args = ['ansible-galaxy', 'collection', 'verify', 'namespace.collection']
+
+ GalaxyCLI(args=galaxy_args).run()
+
+ assert execute_verify.call_count == 1
+
+
+@patch('ansible.cli.galaxy.GalaxyCLI.execute_verify')
+def test_call_GalaxyCLI_with_implicit_role(execute_verify):
+ galaxy_args = ['ansible-galaxy', 'verify', 'namespace.implicit_role']
+
+ with pytest.raises(SystemExit):
+ GalaxyCLI(args=galaxy_args).run()
+
+ assert not execute_verify.called
+
+
+@patch('ansible.cli.galaxy.GalaxyCLI.execute_verify')
+def test_call_GalaxyCLI_with_role(execute_verify):
+ galaxy_args = ['ansible-galaxy', 'role', 'verify', 'namespace.role']
+
+ with pytest.raises(SystemExit):
+ GalaxyCLI(args=galaxy_args).run()
+
+ assert not execute_verify.called
+
+
+@patch('ansible.cli.galaxy.verify_collections', spec=True)
+def test_execute_verify_with_defaults(mock_verify_collections):
+ galaxy_args = ['ansible-galaxy', 'collection', 'verify', 'namespace.collection:1.0.4']
+ GalaxyCLI(args=galaxy_args).run()
+
+ assert mock_verify_collections.call_count == 1
+
+ requirements, search_paths, galaxy_apis, validate, ignore_errors = mock_verify_collections.call_args[0]
+
+ assert requirements == [('namespace.collection', '1.0.4', None, None)]
+ for install_path in search_paths:
+ assert install_path.endswith('ansible_collections')
+ assert galaxy_apis[0].api_server == 'https://galaxy.ansible.com'
+ assert validate is True
+ assert ignore_errors is False
+
+
+@patch('ansible.cli.galaxy.verify_collections', spec=True)
+def test_execute_verify(mock_verify_collections):
+ GalaxyCLI(args=[
+ 'ansible-galaxy', 'collection', 'verify', 'namespace.collection:1.0.4', '--ignore-certs',
+ '-p', '~/.ansible', '--ignore-errors', '--server', 'http://galaxy-dev.com',
+ ]).run()
+
+ assert mock_verify_collections.call_count == 1
+
+ requirements, search_paths, galaxy_apis, validate, ignore_errors = mock_verify_collections.call_args[0]
+
+ assert requirements == [('namespace.collection', '1.0.4', None, None)]
+ for install_path in search_paths:
+ assert install_path.endswith('ansible_collections')
+ assert galaxy_apis[0].api_server == 'http://galaxy-dev.com'
+ assert validate is False
+ assert ignore_errors is True
+
+
+def test_verify_file_hash_deleted_file(manifest_info):
+ data = to_bytes(json.dumps(manifest_info))
+ digest = sha256(data).hexdigest()
+
+ namespace = manifest_info['collection_info']['namespace']
+ name = manifest_info['collection_info']['name']
+ version = manifest_info['collection_info']['version']
+ server = 'http://galaxy.ansible.com'
+
+ error_queue = []
+
+ with patch.object(builtins, 'open', mock_open(read_data=data)) as m:
+ with patch.object(collection.os.path, 'isfile', MagicMock(return_value=False)) as mock_isfile:
+ collection_req = collection.CollectionRequirement(namespace, name, './', server, [version], version, False)
+ collection_req._verify_file_hash(b'path/', 'file', digest, error_queue)
+
+ assert mock_isfile.called_once
+
+ assert len(error_queue) == 1
+ assert error_queue[0].installed is None
+ assert error_queue[0].expected == digest
+
+
+def test_verify_file_hash_matching_hash(manifest_info):
+
+ data = to_bytes(json.dumps(manifest_info))
+ digest = sha256(data).hexdigest()
+
+ namespace = manifest_info['collection_info']['namespace']
+ name = manifest_info['collection_info']['name']
+ version = manifest_info['collection_info']['version']
+ server = 'http://galaxy.ansible.com'
+
+ error_queue = []
+
+ with patch.object(builtins, 'open', mock_open(read_data=data)) as m:
+ with patch.object(collection.os.path, 'isfile', MagicMock(return_value=True)) as mock_isfile:
+ collection_req = collection.CollectionRequirement(namespace, name, './', server, [version], version, False)
+ collection_req._verify_file_hash(b'path/', 'file', digest, error_queue)
+
+ assert mock_isfile.called_once
+
+ assert error_queue == []
+
+
+def test_verify_file_hash_mismatching_hash(manifest_info):
+
+ data = to_bytes(json.dumps(manifest_info))
+ digest = sha256(data).hexdigest()
+ different_digest = 'not_{0}'.format(digest)
+
+ namespace = manifest_info['collection_info']['namespace']
+ name = manifest_info['collection_info']['name']
+ version = manifest_info['collection_info']['version']
+ server = 'http://galaxy.ansible.com'
+
+ error_queue = []
+
+ with patch.object(builtins, 'open', mock_open(read_data=data)) as m:
+ with patch.object(collection.os.path, 'isfile', MagicMock(return_value=True)) as mock_isfile:
+ collection_req = collection.CollectionRequirement(namespace, name, './', server, [version], version, False)
+ collection_req._verify_file_hash(b'path/', 'file', different_digest, error_queue)
+
+ assert mock_isfile.called_once
+
+ assert len(error_queue) == 1
+ assert error_queue[0].installed == digest
+ assert error_queue[0].expected == different_digest
+
+
+def test_consume_file(manifest):
+
+ manifest_file, checksum = manifest
+ assert checksum == collection._consume_file(manifest_file)
+
+
+def test_consume_file_and_write_contents(manifest, manifest_info):
+
+ manifest_file, checksum = manifest
+
+ write_to = BytesIO()
+ actual_hash = collection._consume_file(manifest_file, write_to)
+
+ write_to.seek(0)
+ assert to_bytes(json.dumps(manifest_info)) == write_to.read()
+ assert actual_hash == checksum
+
+
+def test_get_tar_file_member(tmp_tarfile):
+
+ temp_dir, tfile, filename, checksum = tmp_tarfile
+
+ with collection._get_tar_file_member(tfile, filename) as (tar_file_member, tar_file_obj):
+ assert isinstance(tar_file_member, tarfile.TarInfo)
+ assert isinstance(tar_file_obj, tarfile.ExFileObject)
+
+
+def test_get_nonexistent_tar_file_member(tmp_tarfile):
+ temp_dir, tfile, filename, checksum = tmp_tarfile
+
+ file_does_not_exist = filename + 'nonexistent'
+
+ with pytest.raises(AnsibleError) as err:
+ collection._get_tar_file_member(tfile, file_does_not_exist)
+
+ assert to_text(err.value.message) == "Collection tar at '%s' does not contain the expected file '%s'." % (to_text(tfile.name), file_does_not_exist)
+
+
+def test_get_tar_file_hash(tmp_tarfile):
+ temp_dir, tfile, filename, checksum = tmp_tarfile
+
+ assert checksum == collection._get_tar_file_hash(tfile.name, filename)
+
+
+def test_get_json_from_tar_file(tmp_tarfile):
+ temp_dir, tfile, filename, checksum = tmp_tarfile
+
+ assert 'MANIFEST.json' in tfile.getnames()
+
+ data = collection._get_json_from_tar_file(tfile.name, 'MANIFEST.json')
+
+ assert isinstance(data, dict)
+
+
+def test_verify_collection_not_installed(mock_collection):
+
+ local_collection = mock_collection(local_installed=False)
+ remote_collection = mock_collection(local=False)
+
+ with patch.object(collection.display, 'display') as mocked_display:
+ local_collection.verify(remote_collection, './', './')
+
+ assert mocked_display.called
+ assert mocked_display.call_args[0][0] == "'%s.%s' has not been installed, nothing to verify" % (local_collection.namespace, local_collection.name)
+
+
+def test_verify_successful_debug_info(monkeypatch, mock_collection):
+ local_collection = mock_collection()
+ remote_collection = mock_collection(local=False)
+
+ monkeypatch.setattr(collection, '_get_tar_file_hash', MagicMock())
+ monkeypatch.setattr(collection.CollectionRequirement, '_verify_file_hash', MagicMock())
+ monkeypatch.setattr(collection, '_get_json_from_tar_file', MagicMock())
+
+ with patch.object(collection.display, 'vvv') as mock_display:
+ local_collection.verify(remote_collection, './', './')
+
+ namespace = local_collection.namespace
+ name = local_collection.name
+ version = local_collection.latest_version
+
+ assert mock_display.call_count == 4
+ assert mock_display.call_args_list[0][0][0] == "Verifying '%s.%s:%s'." % (namespace, name, version)
+ assert mock_display.call_args_list[1][0][0] == "Installed collection found at './%s/%s'" % (namespace, name)
+ located = "Remote collection found at 'https://galaxy.ansible.com/download/%s-%s-%s.tar.gz'" % (namespace, name, version)
+ assert mock_display.call_args_list[2][0][0] == located
+ verified = "Successfully verified that checksums for '%s.%s:%s' match the remote collection" % (namespace, name, version)
+ assert mock_display.call_args_list[3][0][0] == verified
+
+
+def test_verify_different_versions(mock_collection):
+
+ local_collection = mock_collection(version='0.1.0')
+ remote_collection = mock_collection(local=False, version='3.0.0')
+
+ with patch.object(collection.display, 'display') as mock_display:
+ local_collection.verify(remote_collection, './', './')
+
+ namespace = local_collection.namespace
+ name = local_collection.name
+ installed_version = local_collection.latest_version
+ compared_version = remote_collection.latest_version
+
+ msg = "%s.%s has the version '%s' but is being compared to '%s'" % (namespace, name, installed_version, compared_version)
+
+ assert mock_display.call_count == 1
+ assert mock_display.call_args[0][0] == msg
+
+
+@patch.object(builtins, 'open', mock_open())
+def test_verify_modified_manifest(monkeypatch, mock_collection, manifest_info):
+ local_collection = mock_collection()
+ remote_collection = mock_collection(local=False)
+
+ monkeypatch.setattr(collection, '_get_tar_file_hash', MagicMock(side_effect=['manifest_checksum']))
+ monkeypatch.setattr(collection, '_consume_file', MagicMock(side_effect=['manifest_checksum_modified', 'files_manifest_checksum']))
+ monkeypatch.setattr(collection, '_get_json_from_tar_file', MagicMock(side_effect=[manifest_info, {'files': []}]))
+ monkeypatch.setattr(collection.os.path, 'isfile', MagicMock(return_value=True))
+
+ with patch.object(collection.display, 'display') as mock_display:
+ with patch.object(collection.display, 'vvv') as mock_debug:
+ local_collection.verify(remote_collection, './', './')
+
+ namespace = local_collection.namespace
+ name = local_collection.name
+
+ assert mock_display.call_count == 3
+ assert mock_display.call_args_list[0][0][0] == 'Collection %s.%s contains modified content in the following files:' % (namespace, name)
+ assert mock_display.call_args_list[1][0][0] == '%s.%s' % (namespace, name)
+ assert mock_display.call_args_list[2][0][0] == ' MANIFEST.json'
+
+ # The -vvv output should show details (the checksums do not match)
+ assert mock_debug.call_count == 5
+ assert mock_debug.call_args_list[-1][0][0] == ' Expected: manifest_checksum\n Found: manifest_checksum_modified'
+
+
+@patch.object(builtins, 'open', mock_open())
+def test_verify_modified_files_manifest(monkeypatch, mock_collection, manifest_info):
+ local_collection = mock_collection()
+ remote_collection = mock_collection(local=False)
+
+ monkeypatch.setattr(collection, '_get_tar_file_hash', MagicMock(side_effect=['manifest_checksum']))
+ monkeypatch.setattr(collection, '_consume_file', MagicMock(side_effect=['manifest_checksum', 'files_manifest_checksum_modified']))
+ monkeypatch.setattr(collection, '_get_json_from_tar_file', MagicMock(side_effect=[manifest_info, {'files': []}]))
+ monkeypatch.setattr(collection.os.path, 'isfile', MagicMock(return_value=True))
+
+ with patch.object(collection.display, 'display') as mock_display:
+ with patch.object(collection.display, 'vvv') as mock_debug:
+ local_collection.verify(remote_collection, './', './')
+
+ namespace = local_collection.namespace
+ name = local_collection.name
+
+ assert mock_display.call_count == 3
+ assert mock_display.call_args_list[0][0][0] == 'Collection %s.%s contains modified content in the following files:' % (namespace, name)
+ assert mock_display.call_args_list[1][0][0] == '%s.%s' % (namespace, name)
+ assert mock_display.call_args_list[2][0][0] == ' FILES.json'
+
+ # The -vvv output should show details (the checksums do not match)
+ assert mock_debug.call_count == 5
+ assert mock_debug.call_args_list[-1][0][0] == ' Expected: files_manifest_checksum\n Found: files_manifest_checksum_modified'
+
+
+@patch.object(builtins, 'open', mock_open())
+def test_verify_modified_files(monkeypatch, mock_collection, manifest_info, files_manifest_info):
+
+ local_collection = mock_collection()
+ remote_collection = mock_collection(local=False)
+
+ monkeypatch.setattr(collection, '_get_tar_file_hash', MagicMock(side_effect=['manifest_checksum']))
+ fakehashes = ['manifest_checksum', 'files_manifest_checksum', 'individual_file_checksum_modified']
+ monkeypatch.setattr(collection, '_consume_file', MagicMock(side_effect=fakehashes))
+ monkeypatch.setattr(collection, '_get_json_from_tar_file', MagicMock(side_effect=[manifest_info, files_manifest_info]))
+ monkeypatch.setattr(collection.os.path, 'isfile', MagicMock(return_value=True))
+
+ with patch.object(collection.display, 'display') as mock_display:
+ with patch.object(collection.display, 'vvv') as mock_debug:
+ local_collection.verify(remote_collection, './', './')
+
+ namespace = local_collection.namespace
+ name = local_collection.name
+
+ assert mock_display.call_count == 3
+ assert mock_display.call_args_list[0][0][0] == 'Collection %s.%s contains modified content in the following files:' % (namespace, name)
+ assert mock_display.call_args_list[1][0][0] == '%s.%s' % (namespace, name)
+ assert mock_display.call_args_list[2][0][0] == ' README.md'
+
+ # The -vvv output should show details (the checksums do not match)
+ assert mock_debug.call_count == 5
+ assert mock_debug.call_args_list[-1][0][0] == ' Expected: individual_file_checksum\n Found: individual_file_checksum_modified'
+
+
+@patch.object(builtins, 'open', mock_open())
+def test_verify_identical(monkeypatch, mock_collection, manifest_info, files_manifest_info):
+
+ local_collection = mock_collection()
+ remote_collection = mock_collection(local=False)
+
+ monkeypatch.setattr(collection, '_get_tar_file_hash', MagicMock(side_effect=['manifest_checksum']))
+ monkeypatch.setattr(collection, '_consume_file', MagicMock(side_effect=['manifest_checksum', 'files_manifest_checksum', 'individual_file_checksum']))
+ monkeypatch.setattr(collection, '_get_json_from_tar_file', MagicMock(side_effect=[manifest_info, files_manifest_info]))
+ monkeypatch.setattr(collection.os.path, 'isfile', MagicMock(return_value=True))
+
+ with patch.object(collection.display, 'display') as mock_display:
+ with patch.object(collection.display, 'vvv') as mock_debug:
+ local_collection.verify(remote_collection, './', './')
+
+ # Successful verification is quiet
+ assert mock_display.call_count == 0
+
+ # The -vvv output should show the checksums not matching
+ namespace = local_collection.namespace
+ name = local_collection.name
+ version = local_collection.latest_version
+ success_msg = "Successfully verified that checksums for '%s.%s:%s' match the remote collection" % (namespace, name, version)
+
+ assert mock_debug.call_count == 4
+ assert mock_debug.call_args_list[-1][0][0] == success_msg
+
+
+@patch.object(os.path, 'isdir', return_value=True)
+def test_verify_collections_no_version(mock_isdir, mock_collection, monkeypatch):
+ namespace = 'ansible_namespace'
+ name = 'collection'
+ version = '*' # Occurs if MANIFEST.json does not exist
+
+ local_collection = mock_collection(namespace=namespace, name=name, version=version)
+ monkeypatch.setattr(collection.CollectionRequirement, 'from_path', MagicMock(return_value=local_collection))
+
+ collections = [('%s.%s' % (namespace, name), version, None)]
+
+ with pytest.raises(AnsibleError) as err:
+ collection.verify_collections(collections, './', local_collection.api, False, False)
+
+ err_msg = 'Collection %s.%s does not appear to have a MANIFEST.json. ' % (namespace, name)
+ err_msg += 'A MANIFEST.json is expected if the collection has been built and installed via ansible-galaxy.'
+ assert err.value.message == err_msg
+
+
+@patch.object(collection.CollectionRequirement, 'verify')
+def test_verify_collections_not_installed(mock_verify, mock_collection, monkeypatch):
+ namespace = 'ansible_namespace'
+ name = 'collection'
+ version = '1.0.0'
+
+ local_collection = mock_collection(local_installed=False)
+
+ found_remote = MagicMock(return_value=mock_collection(local=False))
+ monkeypatch.setattr(collection.CollectionRequirement, 'from_name', found_remote)
+
+ collections = [('%s.%s' % (namespace, name), version, None, None)]
+ search_path = './'
+ validate_certs = False
+ ignore_errors = False
+ apis = [local_collection.api]
+
+ with patch.object(collection, '_download_file') as mock_download_file:
+ with pytest.raises(AnsibleError) as err:
+ collection.verify_collections(collections, search_path, apis, validate_certs, ignore_errors)
+
+ assert err.value.message == "Collection %s.%s is not installed in any of the collection paths." % (namespace, name)
+
+
+@patch.object(collection.CollectionRequirement, 'verify')
+def test_verify_collections_not_installed_ignore_errors(mock_verify, mock_collection, monkeypatch):
+ namespace = 'ansible_namespace'
+ name = 'collection'
+ version = '1.0.0'
+
+ local_collection = mock_collection(local_installed=False)
+
+ found_remote = MagicMock(return_value=mock_collection(local=False))
+ monkeypatch.setattr(collection.CollectionRequirement, 'from_name', found_remote)
+
+ collections = [('%s.%s' % (namespace, name), version, None)]
+ search_path = './'
+ validate_certs = False
+ ignore_errors = True
+ apis = [local_collection.api]
+
+ with patch.object(collection, '_download_file') as mock_download_file:
+ with patch.object(Display, 'warning') as mock_warning:
+ collection.verify_collections(collections, search_path, apis, validate_certs, ignore_errors)
+
+ skip_message = "Failed to verify collection %s.%s but skipping due to --ignore-errors being set." % (namespace, name)
+ original_err = "Error: Collection %s.%s is not installed in any of the collection paths." % (namespace, name)
+
+ assert mock_warning.called
+ assert mock_warning.call_args[0][0] == skip_message + " " + original_err
+
+
+@patch.object(os.path, 'isdir', return_value=True)
+@patch.object(collection.CollectionRequirement, 'verify')
+def test_verify_collections_no_remote(mock_verify, mock_isdir, mock_collection, monkeypatch):
+ namespace = 'ansible_namespace'
+ name = 'collection'
+ version = '1.0.0'
+
+ monkeypatch.setattr(os.path, 'isfile', MagicMock(side_effect=[False, True]))
+ monkeypatch.setattr(collection.CollectionRequirement, 'from_path', MagicMock(return_value=mock_collection()))
+
+ collections = [('%s.%s' % (namespace, name), version, None)]
+ search_path = './'
+ validate_certs = False
+ ignore_errors = False
+ apis = []
+
+ with pytest.raises(AnsibleError) as err:
+ collection.verify_collections(collections, search_path, apis, validate_certs, ignore_errors)
+
+ assert err.value.message == "Failed to find remote collection %s.%s:%s on any of the galaxy servers" % (namespace, name, version)
+
+
+@patch.object(os.path, 'isdir', return_value=True)
+@patch.object(collection.CollectionRequirement, 'verify')
+def test_verify_collections_no_remote_ignore_errors(mock_verify, mock_isdir, mock_collection, monkeypatch):
+ namespace = 'ansible_namespace'
+ name = 'collection'
+ version = '1.0.0'
+
+ monkeypatch.setattr(os.path, 'isfile', MagicMock(side_effect=[False, True]))
+ monkeypatch.setattr(collection.CollectionRequirement, 'from_path', MagicMock(return_value=mock_collection()))
+
+ collections = [('%s.%s' % (namespace, name), version, None)]
+ search_path = './'
+ validate_certs = False
+ ignore_errors = True
+ apis = []
+
+ with patch.object(Display, 'warning') as mock_warning:
+ collection.verify_collections(collections, search_path, apis, validate_certs, ignore_errors)
+
+ skip_message = "Failed to verify collection %s.%s but skipping due to --ignore-errors being set." % (namespace, name)
+ original_err = "Error: Failed to find remote collection %s.%s:%s on any of the galaxy servers" % (namespace, name, version)
+
+ assert mock_warning.called
+ assert mock_warning.call_args[0][0] == skip_message + " " + original_err
+
+
+def test_verify_collections_tarfile(monkeypatch):
+
+ monkeypatch.setattr(os.path, 'isfile', MagicMock(return_value=True))
+
+ invalid_format = 'ansible_namespace-collection-0.1.0.tar.gz'
+ collections = [(invalid_format, '*', None)]
+
+ with pytest.raises(AnsibleError) as err:
+ collection.verify_collections(collections, './', [], False, False)
+
+ msg = "'%s' is not a valid collection name. The format namespace.name is expected." % invalid_format
+ assert err.value.message == msg
+
+
+def test_verify_collections_path(monkeypatch):
+
+ monkeypatch.setattr(os.path, 'isfile', MagicMock(return_value=False))
+
+ invalid_format = 'collections/collection_namespace/collection_name'
+ collections = [(invalid_format, '*', None)]
+
+ with pytest.raises(AnsibleError) as err:
+ collection.verify_collections(collections, './', [], False, False)
+
+ msg = "'%s' is not a valid collection name. The format namespace.name is expected." % invalid_format
+ assert err.value.message == msg
+
+
+def test_verify_collections_url(monkeypatch):
+
+ monkeypatch.setattr(os.path, 'isfile', MagicMock(return_value=False))
+
+ invalid_format = 'https://galaxy.ansible.com/download/ansible_namespace-collection-0.1.0.tar.gz'
+ collections = [(invalid_format, '*', None)]
+
+ with pytest.raises(AnsibleError) as err:
+ collection.verify_collections(collections, './', [], False, False)
+
+ msg = "'%s' is not a valid collection name. The format namespace.name is expected." % invalid_format
+ assert err.value.message == msg
+
+
+@patch.object(os.path, 'isdir', return_value=True)
+@patch.object(collection.CollectionRequirement, 'verify')
+def test_verify_collections_name(mock_verify, mock_isdir, mock_collection, monkeypatch):
+ local_collection = mock_collection()
+ monkeypatch.setattr(collection.CollectionRequirement, 'from_path', MagicMock(return_value=local_collection))
+
+ monkeypatch.setattr(os.path, 'isfile', MagicMock(side_effect=[False, True, False]))
+
+ located_remote_from_name = MagicMock(return_value=mock_collection(local=False))
+ monkeypatch.setattr(collection.CollectionRequirement, 'from_name', located_remote_from_name)
+
+ with patch.object(collection, '_download_file') as mock_download_file:
+
+ collections = [('%s.%s' % (local_collection.namespace, local_collection.name), '%s' % local_collection.latest_version, None)]
+ search_path = './'
+ validate_certs = False
+ ignore_errors = False
+ apis = [local_collection.api]
+
+ collection.verify_collections(collections, search_path, apis, validate_certs, ignore_errors)
+
+ assert mock_download_file.call_count == 1
+ assert located_remote_from_name.call_count == 1
diff --git a/test/units/galaxy/test_collection_install.py b/test/units/galaxy/test_collection_install.py
new file mode 100644
index 00000000..629a3564
--- /dev/null
+++ b/test/units/galaxy/test_collection_install.py
@@ -0,0 +1,816 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import copy
+import json
+import os
+import pytest
+import re
+import shutil
+import stat
+import tarfile
+import yaml
+
+from io import BytesIO, StringIO
+from units.compat.mock import MagicMock
+
+import ansible.module_utils.six.moves.urllib.error as urllib_error
+
+from ansible import context
+from ansible.cli.galaxy import GalaxyCLI
+from ansible.errors import AnsibleError
+from ansible.galaxy import collection, api
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.utils import context_objects as co
+from ansible.utils.display import Display
+
+
+def call_galaxy_cli(args):
+ orig = co.GlobalCLIArgs._Singleton__instance
+ co.GlobalCLIArgs._Singleton__instance = None
+ try:
+ GalaxyCLI(args=['ansible-galaxy', 'collection'] + args).run()
+ finally:
+ co.GlobalCLIArgs._Singleton__instance = orig
+
+
+def artifact_json(namespace, name, version, dependencies, server):
+ json_str = json.dumps({
+ 'artifact': {
+ 'filename': '%s-%s-%s.tar.gz' % (namespace, name, version),
+ 'sha256': '2d76f3b8c4bab1072848107fb3914c345f71a12a1722f25c08f5d3f51f4ab5fd',
+ 'size': 1234,
+ },
+ 'download_url': '%s/download/%s-%s-%s.tar.gz' % (server, namespace, name, version),
+ 'metadata': {
+ 'namespace': namespace,
+ 'name': name,
+ 'dependencies': dependencies,
+ },
+ 'version': version
+ })
+ return to_text(json_str)
+
+
+def artifact_versions_json(namespace, name, versions, galaxy_api, available_api_versions=None):
+ results = []
+ available_api_versions = available_api_versions or {}
+ api_version = 'v2'
+ if 'v3' in available_api_versions:
+ api_version = 'v3'
+ for version in versions:
+ results.append({
+ 'href': '%s/api/%s/%s/%s/versions/%s/' % (galaxy_api.api_server, api_version, namespace, name, version),
+ 'version': version,
+ })
+
+ if api_version == 'v2':
+ json_str = json.dumps({
+ 'count': len(versions),
+ 'next': None,
+ 'previous': None,
+ 'results': results
+ })
+
+ if api_version == 'v3':
+ response = {'meta': {'count': len(versions)},
+ 'data': results,
+ 'links': {'first': None,
+ 'last': None,
+ 'next': None,
+ 'previous': None},
+ }
+ json_str = json.dumps(response)
+ return to_text(json_str)
+
+
+def error_json(galaxy_api, errors_to_return=None, available_api_versions=None):
+ errors_to_return = errors_to_return or []
+ available_api_versions = available_api_versions or {}
+
+ response = {}
+
+ api_version = 'v2'
+ if 'v3' in available_api_versions:
+ api_version = 'v3'
+
+ if api_version == 'v2':
+ assert len(errors_to_return) <= 1
+ if errors_to_return:
+ response = errors_to_return[0]
+
+ if api_version == 'v3':
+ response['errors'] = errors_to_return
+
+ json_str = json.dumps(response)
+ return to_text(json_str)
+
+
+@pytest.fixture(autouse='function')
+def reset_cli_args():
+ co.GlobalCLIArgs._Singleton__instance = None
+ yield
+ co.GlobalCLIArgs._Singleton__instance = None
+
+
+@pytest.fixture()
+def collection_artifact(request, tmp_path_factory):
+ test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβÅÈ Collections Input'))
+ namespace = 'ansible_namespace'
+ collection = 'collection'
+
+ skeleton_path = os.path.join(os.path.dirname(os.path.split(__file__)[0]), 'cli', 'test_data', 'collection_skeleton')
+ collection_path = os.path.join(test_dir, namespace, collection)
+
+ call_galaxy_cli(['init', '%s.%s' % (namespace, collection), '-c', '--init-path', test_dir,
+ '--collection-skeleton', skeleton_path])
+ dependencies = getattr(request, 'param', None)
+ if dependencies:
+ galaxy_yml = os.path.join(collection_path, 'galaxy.yml')
+ with open(galaxy_yml, 'rb+') as galaxy_obj:
+ existing_yaml = yaml.safe_load(galaxy_obj)
+ existing_yaml['dependencies'] = dependencies
+
+ galaxy_obj.seek(0)
+ galaxy_obj.write(to_bytes(yaml.safe_dump(existing_yaml)))
+ galaxy_obj.truncate()
+
+ # Create a file with +x in the collection so we can test the permissions
+ execute_path = os.path.join(collection_path, 'runme.sh')
+ with open(execute_path, mode='wb') as fd:
+ fd.write(b"echo hi")
+ os.chmod(execute_path, os.stat(execute_path).st_mode | stat.S_IEXEC)
+
+ call_galaxy_cli(['build', collection_path, '--output-path', test_dir])
+
+ collection_tar = os.path.join(test_dir, '%s-%s-0.1.0.tar.gz' % (namespace, collection))
+ return to_bytes(collection_path), to_bytes(collection_tar)
+
+
+@pytest.fixture()
+def galaxy_server():
+ context.CLIARGS._store = {'ignore_certs': False}
+ galaxy_api = api.GalaxyAPI(None, 'test_server', 'https://galaxy.ansible.com')
+ return galaxy_api
+
+
+def test_build_requirement_from_path(collection_artifact):
+ actual = collection.CollectionRequirement.from_path(collection_artifact[0], True)
+
+ assert actual.namespace == u'ansible_namespace'
+ assert actual.name == u'collection'
+ assert actual.b_path == collection_artifact[0]
+ assert actual.api is None
+ assert actual.skip is True
+ assert actual.versions == set([u'*'])
+ assert actual.latest_version == u'*'
+ assert actual.dependencies == {}
+
+
+@pytest.mark.parametrize('version', ['1.1.1', '1.1.0', '1.0.0'])
+def test_build_requirement_from_path_with_manifest(version, collection_artifact):
+ manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
+ manifest_value = json.dumps({
+ 'collection_info': {
+ 'namespace': 'namespace',
+ 'name': 'name',
+ 'version': version,
+ 'dependencies': {
+ 'ansible_namespace.collection': '*'
+ }
+ }
+ })
+ with open(manifest_path, 'wb') as manifest_obj:
+ manifest_obj.write(to_bytes(manifest_value))
+
+ actual = collection.CollectionRequirement.from_path(collection_artifact[0], True)
+
+ # While the folder name suggests a different collection, we treat MANIFEST.json as the source of truth.
+ assert actual.namespace == u'namespace'
+ assert actual.name == u'name'
+ assert actual.b_path == collection_artifact[0]
+ assert actual.api is None
+ assert actual.skip is True
+ assert actual.versions == set([to_text(version)])
+ assert actual.latest_version == to_text(version)
+ assert actual.dependencies == {'ansible_namespace.collection': '*'}
+
+
+def test_build_requirement_from_path_invalid_manifest(collection_artifact):
+ manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
+ with open(manifest_path, 'wb') as manifest_obj:
+ manifest_obj.write(b"not json")
+
+ expected = "Collection file at '%s' does not contain a valid json string." % to_native(manifest_path)
+ with pytest.raises(AnsibleError, match=expected):
+ collection.CollectionRequirement.from_path(collection_artifact[0], True)
+
+
+def test_build_requirement_from_path_no_version(collection_artifact, monkeypatch):
+ manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
+ manifest_value = json.dumps({
+ 'collection_info': {
+ 'namespace': 'namespace',
+ 'name': 'name',
+ 'version': '',
+ 'dependencies': {}
+ }
+ })
+ with open(manifest_path, 'wb') as manifest_obj:
+ manifest_obj.write(to_bytes(manifest_value))
+
+ mock_display = MagicMock()
+ monkeypatch.setattr(Display, 'display', mock_display)
+
+ actual = collection.CollectionRequirement.from_path(collection_artifact[0], True)
+
+ # While the folder name suggests a different collection, we treat MANIFEST.json as the source of truth.
+ assert actual.namespace == u'namespace'
+ assert actual.name == u'name'
+ assert actual.b_path == collection_artifact[0]
+ assert actual.api is None
+ assert actual.skip is True
+ assert actual.versions == set(['*'])
+ assert actual.latest_version == u'*'
+ assert actual.dependencies == {}
+
+ assert mock_display.call_count == 1
+
+ actual_warn = ' '.join(mock_display.mock_calls[0][1][0].split('\n'))
+ expected_warn = "Collection at '%s' does not have a valid version set, falling back to '*'. Found version: ''" \
+ % to_text(collection_artifact[0])
+ assert expected_warn in actual_warn
+
+
+def test_build_requirement_from_tar(collection_artifact):
+ actual = collection.CollectionRequirement.from_tar(collection_artifact[1], True, True)
+
+ assert actual.namespace == u'ansible_namespace'
+ assert actual.name == u'collection'
+ assert actual.b_path == collection_artifact[1]
+ assert actual.api is None
+ assert actual.skip is False
+ assert actual.versions == set([u'0.1.0'])
+ assert actual.latest_version == u'0.1.0'
+ assert actual.dependencies == {}
+
+
+def test_build_requirement_from_tar_fail_not_tar(tmp_path_factory):
+ test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβÅÈ Collections Input'))
+ test_file = os.path.join(test_dir, b'fake.tar.gz')
+ with open(test_file, 'wb') as test_obj:
+ test_obj.write(b"\x00\x01\x02\x03")
+
+ expected = "Collection artifact at '%s' is not a valid tar file." % to_native(test_file)
+ with pytest.raises(AnsibleError, match=expected):
+ collection.CollectionRequirement.from_tar(test_file, True, True)
+
+
+def test_build_requirement_from_tar_no_manifest(tmp_path_factory):
+ test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβÅÈ Collections Input'))
+
+ json_data = to_bytes(json.dumps(
+ {
+ 'files': [],
+ 'format': 1,
+ }
+ ))
+
+ tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
+ with tarfile.open(tar_path, 'w:gz') as tfile:
+ b_io = BytesIO(json_data)
+ tar_info = tarfile.TarInfo('FILES.json')
+ tar_info.size = len(json_data)
+ tar_info.mode = 0o0644
+ tfile.addfile(tarinfo=tar_info, fileobj=b_io)
+
+ expected = "Collection at '%s' does not contain the required file MANIFEST.json." % to_native(tar_path)
+ with pytest.raises(AnsibleError, match=expected):
+ collection.CollectionRequirement.from_tar(tar_path, True, True)
+
+
+def test_build_requirement_from_tar_no_files(tmp_path_factory):
+ test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβÅÈ Collections Input'))
+
+ json_data = to_bytes(json.dumps(
+ {
+ 'collection_info': {},
+ }
+ ))
+
+ tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
+ with tarfile.open(tar_path, 'w:gz') as tfile:
+ b_io = BytesIO(json_data)
+ tar_info = tarfile.TarInfo('MANIFEST.json')
+ tar_info.size = len(json_data)
+ tar_info.mode = 0o0644
+ tfile.addfile(tarinfo=tar_info, fileobj=b_io)
+
+ expected = "Collection at '%s' does not contain the required file FILES.json." % to_native(tar_path)
+ with pytest.raises(AnsibleError, match=expected):
+ collection.CollectionRequirement.from_tar(tar_path, True, True)
+
+
+def test_build_requirement_from_tar_invalid_manifest(tmp_path_factory):
+ test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβÅÈ Collections Input'))
+
+ json_data = b"not a json"
+
+ tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
+ with tarfile.open(tar_path, 'w:gz') as tfile:
+ b_io = BytesIO(json_data)
+ tar_info = tarfile.TarInfo('MANIFEST.json')
+ tar_info.size = len(json_data)
+ tar_info.mode = 0o0644
+ tfile.addfile(tarinfo=tar_info, fileobj=b_io)
+
+ expected = "Collection tar file member MANIFEST.json does not contain a valid json string."
+ with pytest.raises(AnsibleError, match=expected):
+ collection.CollectionRequirement.from_tar(tar_path, True, True)
+
+
+def test_build_requirement_from_name(galaxy_server, monkeypatch):
+ mock_get_versions = MagicMock()
+ mock_get_versions.return_value = ['2.1.9', '2.1.10']
+ monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
+
+ actual = collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server], '*', True, True)
+
+ assert actual.namespace == u'namespace'
+ assert actual.name == u'collection'
+ assert actual.b_path is None
+ assert actual.api == galaxy_server
+ assert actual.skip is False
+ assert actual.versions == set([u'2.1.9', u'2.1.10'])
+ assert actual.latest_version == u'2.1.10'
+ assert actual.dependencies == {}
+
+ assert mock_get_versions.call_count == 1
+ assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
+
+
+def test_build_requirement_from_name_with_prerelease(galaxy_server, monkeypatch):
+ mock_get_versions = MagicMock()
+ mock_get_versions.return_value = ['1.0.1', '2.0.1-beta.1', '2.0.1']
+ monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
+
+ actual = collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server], '*', True, True)
+
+ assert actual.namespace == u'namespace'
+ assert actual.name == u'collection'
+ assert actual.b_path is None
+ assert actual.api == galaxy_server
+ assert actual.skip is False
+ assert actual.versions == set([u'1.0.1', u'2.0.1'])
+ assert actual.latest_version == u'2.0.1'
+ assert actual.dependencies == {}
+
+ assert mock_get_versions.call_count == 1
+ assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
+
+
+def test_build_requirment_from_name_with_prerelease_explicit(galaxy_server, monkeypatch):
+ mock_get_info = MagicMock()
+ mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.1-beta.1', None, None,
+ {})
+ monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
+
+ actual = collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server], '2.0.1-beta.1', True,
+ True)
+
+ assert actual.namespace == u'namespace'
+ assert actual.name == u'collection'
+ assert actual.b_path is None
+ assert actual.api == galaxy_server
+ assert actual.skip is False
+ assert actual.versions == set([u'2.0.1-beta.1'])
+ assert actual.latest_version == u'2.0.1-beta.1'
+ assert actual.dependencies == {}
+
+ assert mock_get_info.call_count == 1
+ assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.1-beta.1')
+
+
+def test_build_requirement_from_name_second_server(galaxy_server, monkeypatch):
+ mock_get_versions = MagicMock()
+ mock_get_versions.return_value = ['1.0.1', '1.0.2', '1.0.3']
+ monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
+
+ broken_server = copy.copy(galaxy_server)
+ broken_server.api_server = 'https://broken.com/'
+ mock_404 = MagicMock()
+ mock_404.side_effect = api.GalaxyError(urllib_error.HTTPError('https://galaxy.server.com', 404, 'msg', {},
+ StringIO()), "custom msg")
+ monkeypatch.setattr(broken_server, 'get_collection_versions', mock_404)
+
+ actual = collection.CollectionRequirement.from_name('namespace.collection', [broken_server, galaxy_server],
+ '>1.0.1', False, True)
+
+ assert actual.namespace == u'namespace'
+ assert actual.name == u'collection'
+ assert actual.b_path is None
+ # assert actual.api == galaxy_server
+ assert actual.skip is False
+ assert actual.versions == set([u'1.0.2', u'1.0.3'])
+ assert actual.latest_version == u'1.0.3'
+ assert actual.dependencies == {}
+
+ assert mock_404.call_count == 1
+ assert mock_404.mock_calls[0][1] == ('namespace', 'collection')
+
+ assert mock_get_versions.call_count == 1
+ assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
+
+
+def test_build_requirement_from_name_missing(galaxy_server, monkeypatch):
+ mock_open = MagicMock()
+ mock_open.side_effect = api.GalaxyError(urllib_error.HTTPError('https://galaxy.server.com', 404, 'msg', {},
+ StringIO()), "")
+
+ monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_open)
+
+ expected = "Failed to find collection namespace.collection:*"
+ with pytest.raises(AnsibleError, match=expected):
+ collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server, galaxy_server], '*', False,
+ True)
+
+
+def test_build_requirement_from_name_401_unauthorized(galaxy_server, monkeypatch):
+ mock_open = MagicMock()
+ mock_open.side_effect = api.GalaxyError(urllib_error.HTTPError('https://galaxy.server.com', 401, 'msg', {},
+ StringIO()), "error")
+
+ monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_open)
+
+ expected = "error (HTTP Code: 401, Message: msg)"
+ with pytest.raises(api.GalaxyError, match=re.escape(expected)):
+ collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server, galaxy_server], '*', False)
+
+
+def test_build_requirement_from_name_single_version(galaxy_server, monkeypatch):
+ mock_get_info = MagicMock()
+ mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.0', None, None,
+ {})
+ monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
+
+ actual = collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server], '2.0.0', True,
+ True)
+
+ assert actual.namespace == u'namespace'
+ assert actual.name == u'collection'
+ assert actual.b_path is None
+ assert actual.api == galaxy_server
+ assert actual.skip is False
+ assert actual.versions == set([u'2.0.0'])
+ assert actual.latest_version == u'2.0.0'
+ assert actual.dependencies == {}
+
+ assert mock_get_info.call_count == 1
+ assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.0')
+
+
+def test_build_requirement_from_name_multiple_versions_one_match(galaxy_server, monkeypatch):
+ mock_get_versions = MagicMock()
+ mock_get_versions.return_value = ['2.0.0', '2.0.1', '2.0.2']
+ monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
+
+ mock_get_info = MagicMock()
+ mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.1', None, None,
+ {})
+ monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
+
+ actual = collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server], '>=2.0.1,<2.0.2',
+ True, True)
+
+ assert actual.namespace == u'namespace'
+ assert actual.name == u'collection'
+ assert actual.b_path is None
+ assert actual.api == galaxy_server
+ assert actual.skip is False
+ assert actual.versions == set([u'2.0.1'])
+ assert actual.latest_version == u'2.0.1'
+ assert actual.dependencies == {}
+
+ assert mock_get_versions.call_count == 1
+ assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
+
+ assert mock_get_info.call_count == 1
+ assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.1')
+
+
+def test_build_requirement_from_name_multiple_version_results(galaxy_server, monkeypatch):
+ mock_get_versions = MagicMock()
+ mock_get_versions.return_value = ['2.0.0', '2.0.1', '2.0.2', '2.0.3', '2.0.4', '2.0.5']
+ monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
+
+ actual = collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server], '!=2.0.2',
+ True, True)
+
+ assert actual.namespace == u'namespace'
+ assert actual.name == u'collection'
+ assert actual.b_path is None
+ assert actual.api == galaxy_server
+ assert actual.skip is False
+ assert actual.versions == set([u'2.0.0', u'2.0.1', u'2.0.3', u'2.0.4', u'2.0.5'])
+ assert actual.latest_version == u'2.0.5'
+ assert actual.dependencies == {}
+
+ assert mock_get_versions.call_count == 1
+ assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
+
+
+@pytest.mark.parametrize('versions, requirement, expected_filter, expected_latest', [
+ [['1.0.0', '1.0.1'], '*', ['1.0.0', '1.0.1'], '1.0.1'],
+ [['1.0.0', '1.0.5', '1.1.0'], '>1.0.0,<1.1.0', ['1.0.5'], '1.0.5'],
+ [['1.0.0', '1.0.5', '1.1.0'], '>1.0.0,<=1.0.5', ['1.0.5'], '1.0.5'],
+ [['1.0.0', '1.0.5', '1.1.0'], '>=1.1.0', ['1.1.0'], '1.1.0'],
+ [['1.0.0', '1.0.5', '1.1.0'], '!=1.1.0', ['1.0.0', '1.0.5'], '1.0.5'],
+ [['1.0.0', '1.0.5', '1.1.0'], '==1.0.5', ['1.0.5'], '1.0.5'],
+ [['1.0.0', '1.0.5', '1.1.0'], '1.0.5', ['1.0.5'], '1.0.5'],
+ [['1.0.0', '2.0.0', '3.0.0'], '>=2', ['2.0.0', '3.0.0'], '3.0.0'],
+])
+def test_add_collection_requirements(versions, requirement, expected_filter, expected_latest):
+ req = collection.CollectionRequirement('namespace', 'name', None, 'https://galaxy.com', versions, requirement,
+ False)
+ assert req.versions == set(expected_filter)
+ assert req.latest_version == expected_latest
+
+
+def test_add_collection_requirement_to_unknown_installed_version(monkeypatch):
+ mock_display = MagicMock()
+ monkeypatch.setattr(Display, 'display', mock_display)
+
+ req = collection.CollectionRequirement('namespace', 'name', None, 'https://galaxy.com', ['*'], '*', False,
+ skip=True)
+
+ req.add_requirement('parent.collection', '1.0.0')
+ assert req.latest_version == '*'
+
+ assert mock_display.call_count == 1
+
+ actual_warn = ' '.join(mock_display.mock_calls[0][1][0].split('\n'))
+ assert "Failed to validate the collection requirement 'namespace.name:1.0.0' for parent.collection" in actual_warn
+
+
+def test_add_collection_wildcard_requirement_to_unknown_installed_version():
+ req = collection.CollectionRequirement('namespace', 'name', None, 'https://galaxy.com', ['*'], '*', False,
+ skip=True)
+ req.add_requirement(str(req), '*')
+
+ assert req.versions == set('*')
+ assert req.latest_version == '*'
+
+
+def test_add_collection_requirement_with_conflict(galaxy_server):
+ expected = "Cannot meet requirement ==1.0.2 for dependency namespace.name from source '%s'. Available versions " \
+ "before last requirement added: 1.0.0, 1.0.1\n" \
+ "Requirements from:\n" \
+ "\tbase - 'namespace.name:==1.0.2'" % galaxy_server.api_server
+ with pytest.raises(AnsibleError, match=expected):
+ collection.CollectionRequirement('namespace', 'name', None, galaxy_server, ['1.0.0', '1.0.1'], '==1.0.2',
+ False)
+
+
+def test_add_requirement_to_existing_collection_with_conflict(galaxy_server):
+ req = collection.CollectionRequirement('namespace', 'name', None, galaxy_server, ['1.0.0', '1.0.1'], '*', False)
+
+ expected = "Cannot meet dependency requirement 'namespace.name:1.0.2' for collection namespace.collection2 from " \
+ "source '%s'. Available versions before last requirement added: 1.0.0, 1.0.1\n" \
+ "Requirements from:\n" \
+ "\tbase - 'namespace.name:*'\n" \
+ "\tnamespace.collection2 - 'namespace.name:1.0.2'" % galaxy_server.api_server
+ with pytest.raises(AnsibleError, match=re.escape(expected)):
+ req.add_requirement('namespace.collection2', '1.0.2')
+
+
+def test_add_requirement_to_installed_collection_with_conflict():
+ source = 'https://galaxy.ansible.com'
+ req = collection.CollectionRequirement('namespace', 'name', None, source, ['1.0.0', '1.0.1'], '*', False,
+ skip=True)
+
+ expected = "Cannot meet requirement namespace.name:1.0.2 as it is already installed at version '1.0.1'. " \
+ "Use --force to overwrite"
+ with pytest.raises(AnsibleError, match=re.escape(expected)):
+ req.add_requirement(None, '1.0.2')
+
+
+def test_add_requirement_to_installed_collection_with_conflict_as_dep():
+ source = 'https://galaxy.ansible.com'
+ req = collection.CollectionRequirement('namespace', 'name', None, source, ['1.0.0', '1.0.1'], '*', False,
+ skip=True)
+
+ expected = "Cannot meet requirement namespace.name:1.0.2 as it is already installed at version '1.0.1'. " \
+ "Use --force-with-deps to overwrite"
+ with pytest.raises(AnsibleError, match=re.escape(expected)):
+ req.add_requirement('namespace.collection2', '1.0.2')
+
+
+def test_install_skipped_collection(monkeypatch):
+ mock_display = MagicMock()
+ monkeypatch.setattr(Display, 'display', mock_display)
+
+ req = collection.CollectionRequirement('namespace', 'name', None, 'source', ['1.0.0'], '*', False, skip=True)
+ req.install(None, None)
+
+ assert mock_display.call_count == 1
+ assert mock_display.mock_calls[0][1][0] == "Skipping 'namespace.name' as it is already installed"
+
+
+def test_install_collection(collection_artifact, monkeypatch):
+ mock_display = MagicMock()
+ monkeypatch.setattr(Display, 'display', mock_display)
+
+ collection_tar = collection_artifact[1]
+ output_path = os.path.join(os.path.split(collection_tar)[0], b'output')
+ collection_path = os.path.join(output_path, b'ansible_namespace', b'collection')
+ os.makedirs(os.path.join(collection_path, b'delete_me')) # Create a folder to verify the install cleans out the dir
+
+ temp_path = os.path.join(os.path.split(collection_tar)[0], b'temp')
+ os.makedirs(temp_path)
+
+ req = collection.CollectionRequirement.from_tar(collection_tar, True, True)
+ req.install(to_text(output_path), temp_path)
+
+ # Ensure the temp directory is empty, nothing is left behind
+ assert os.listdir(temp_path) == []
+
+ actual_files = os.listdir(collection_path)
+ actual_files.sort()
+ assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
+ b'runme.sh']
+
+ assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'plugins')).st_mode) == 0o0755
+ assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'README.md')).st_mode) == 0o0644
+ assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'runme.sh')).st_mode) == 0o0755
+
+ assert mock_display.call_count == 2
+ assert mock_display.mock_calls[0][1][0] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" \
+ % to_text(collection_path)
+ assert mock_display.mock_calls[1][1][0] == "ansible_namespace.collection (0.1.0) was installed successfully"
+
+
+def test_install_collection_with_download(galaxy_server, collection_artifact, monkeypatch):
+ collection_tar = collection_artifact[1]
+ output_path = os.path.join(os.path.split(collection_tar)[0], b'output')
+ collection_path = os.path.join(output_path, b'ansible_namespace', b'collection')
+
+ mock_display = MagicMock()
+ monkeypatch.setattr(Display, 'display', mock_display)
+
+ mock_download = MagicMock()
+ mock_download.return_value = collection_tar
+ monkeypatch.setattr(collection, '_download_file', mock_download)
+
+ monkeypatch.setattr(galaxy_server, '_available_api_versions', {'v2': 'v2/'})
+ temp_path = os.path.join(os.path.split(collection_tar)[0], b'temp')
+ os.makedirs(temp_path)
+
+ meta = api.CollectionVersionMetadata('ansible_namespace', 'collection', '0.1.0', 'https://downloadme.com',
+ 'myhash', {})
+ req = collection.CollectionRequirement('ansible_namespace', 'collection', None, galaxy_server,
+ ['0.1.0'], '*', False, metadata=meta)
+ req.install(to_text(output_path), temp_path)
+
+ # Ensure the temp directory is empty, nothing is left behind
+ assert os.listdir(temp_path) == []
+
+ actual_files = os.listdir(collection_path)
+ actual_files.sort()
+ assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
+ b'runme.sh']
+
+ assert mock_display.call_count == 2
+ assert mock_display.mock_calls[0][1][0] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" \
+ % to_text(collection_path)
+ assert mock_display.mock_calls[1][1][0] == "ansible_namespace.collection (0.1.0) was installed successfully"
+
+ assert mock_download.call_count == 1
+ assert mock_download.mock_calls[0][1][0] == 'https://downloadme.com'
+ assert mock_download.mock_calls[0][1][1] == temp_path
+ assert mock_download.mock_calls[0][1][2] == 'myhash'
+ assert mock_download.mock_calls[0][1][3] is True
+
+
+def test_install_collections_from_tar(collection_artifact, monkeypatch):
+ collection_path, collection_tar = collection_artifact
+ temp_path = os.path.split(collection_tar)[0]
+ shutil.rmtree(collection_path)
+
+ mock_display = MagicMock()
+ monkeypatch.setattr(Display, 'display', mock_display)
+
+ collection.install_collections([(to_text(collection_tar), '*', None, None)], to_text(temp_path),
+ [u'https://galaxy.ansible.com'], True, False, False, False, False)
+
+ assert os.path.isdir(collection_path)
+
+ actual_files = os.listdir(collection_path)
+ actual_files.sort()
+ assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
+ b'runme.sh']
+
+ with open(os.path.join(collection_path, b'MANIFEST.json'), 'rb') as manifest_obj:
+ actual_manifest = json.loads(to_text(manifest_obj.read()))
+
+ assert actual_manifest['collection_info']['namespace'] == 'ansible_namespace'
+ assert actual_manifest['collection_info']['name'] == 'collection'
+ assert actual_manifest['collection_info']['version'] == '0.1.0'
+
+ # Filter out the progress cursor display calls.
+ display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
+ assert len(display_msgs) == 4
+ assert display_msgs[0] == "Process install dependency map"
+ assert display_msgs[1] == "Starting collection install process"
+ assert display_msgs[2] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" % to_text(collection_path)
+
+
+def test_install_collections_existing_without_force(collection_artifact, monkeypatch):
+ collection_path, collection_tar = collection_artifact
+ temp_path = os.path.split(collection_tar)[0]
+
+ mock_display = MagicMock()
+ monkeypatch.setattr(Display, 'display', mock_display)
+
+ # If we don't delete collection_path it will think the original build skeleton is installed so we expect a skip
+ collection.install_collections([(to_text(collection_tar), '*', None, None)], to_text(temp_path),
+ [u'https://galaxy.ansible.com'], True, False, False, False, False)
+
+ assert os.path.isdir(collection_path)
+
+ actual_files = os.listdir(collection_path)
+ actual_files.sort()
+ assert actual_files == [b'README.md', b'docs', b'galaxy.yml', b'playbooks', b'plugins', b'roles', b'runme.sh']
+
+ # Filter out the progress cursor display calls.
+ display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
+ assert len(display_msgs) == 3
+
+ assert display_msgs[0] == "Process install dependency map"
+ assert display_msgs[1] == "Starting collection install process"
+ assert display_msgs[2] == "Skipping 'ansible_namespace.collection' as it is already installed"
+
+ for msg in display_msgs:
+ assert 'WARNING' not in msg
+
+
+def test_install_missing_metadata_warning(collection_artifact, monkeypatch):
+ collection_path, collection_tar = collection_artifact
+ temp_path = os.path.split(collection_tar)[0]
+
+ mock_display = MagicMock()
+ monkeypatch.setattr(Display, 'display', mock_display)
+
+ for file in [b'MANIFEST.json', b'galaxy.yml']:
+ b_path = os.path.join(collection_path, file)
+ if os.path.isfile(b_path):
+ os.unlink(b_path)
+
+ collection.install_collections([(to_text(collection_tar), '*', None, None)], to_text(temp_path),
+ [u'https://galaxy.ansible.com'], True, False, False, False, False)
+
+ display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
+
+ assert 'WARNING' in display_msgs[0]
+
+
+# Makes sure we don't get stuck in some recursive loop
+@pytest.mark.parametrize('collection_artifact', [
+ {'ansible_namespace.collection': '>=0.0.1'},
+], indirect=True)
+def test_install_collection_with_circular_dependency(collection_artifact, monkeypatch):
+ collection_path, collection_tar = collection_artifact
+ temp_path = os.path.split(collection_tar)[0]
+ shutil.rmtree(collection_path)
+
+ mock_display = MagicMock()
+ monkeypatch.setattr(Display, 'display', mock_display)
+
+ collection.install_collections([(to_text(collection_tar), '*', None, None)], to_text(temp_path),
+ [u'https://galaxy.ansible.com'], True, False, False, False, False)
+
+ assert os.path.isdir(collection_path)
+
+ actual_files = os.listdir(collection_path)
+ actual_files.sort()
+ assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
+ b'runme.sh']
+
+ with open(os.path.join(collection_path, b'MANIFEST.json'), 'rb') as manifest_obj:
+ actual_manifest = json.loads(to_text(manifest_obj.read()))
+
+ assert actual_manifest['collection_info']['namespace'] == 'ansible_namespace'
+ assert actual_manifest['collection_info']['name'] == 'collection'
+ assert actual_manifest['collection_info']['version'] == '0.1.0'
+
+ # Filter out the progress cursor display calls.
+ display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
+ assert len(display_msgs) == 4
+ assert display_msgs[0] == "Process install dependency map"
+ assert display_msgs[1] == "Starting collection install process"
+ assert display_msgs[2] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" % to_text(collection_path)
+ assert display_msgs[3] == "ansible_namespace.collection (0.1.0) was installed successfully"
diff --git a/test/units/galaxy/test_token.py b/test/units/galaxy/test_token.py
new file mode 100644
index 00000000..94449e28
--- /dev/null
+++ b/test/units/galaxy/test_token.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import pytest
+
+import ansible.constants as C
+from ansible.galaxy.token import GalaxyToken, NoTokenSentinel
+from ansible.module_utils._text import to_bytes, to_text
+
+
+@pytest.fixture()
+def b_token_file(request, tmp_path_factory):
+ b_test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβÅÈ Token'))
+ b_token_path = os.path.join(b_test_dir, b"token.yml")
+
+ token = getattr(request, 'param', None)
+ if token:
+ with open(b_token_path, 'wb') as token_fd:
+ token_fd.write(b"token: %s" % to_bytes(token))
+
+ orig_token_path = C.GALAXY_TOKEN_PATH
+ C.GALAXY_TOKEN_PATH = to_text(b_token_path)
+ try:
+ yield b_token_path
+ finally:
+ C.GALAXY_TOKEN_PATH = orig_token_path
+
+
+def test_token_explicit(b_token_file):
+ assert GalaxyToken(token="explicit").get() == "explicit"
+
+
+@pytest.mark.parametrize('b_token_file', ['file'], indirect=True)
+def test_token_explicit_override_file(b_token_file):
+ assert GalaxyToken(token="explicit").get() == "explicit"
+
+
+@pytest.mark.parametrize('b_token_file', ['file'], indirect=True)
+def test_token_from_file(b_token_file):
+ assert GalaxyToken().get() == "file"
+
+
+def test_token_from_file_missing(b_token_file):
+ assert GalaxyToken().get() is None
+
+
+@pytest.mark.parametrize('b_token_file', ['file'], indirect=True)
+def test_token_none(b_token_file):
+ assert GalaxyToken(token=NoTokenSentinel).get() is None
diff --git a/test/units/galaxy/test_user_agent.py b/test/units/galaxy/test_user_agent.py
new file mode 100644
index 00000000..da0103f3
--- /dev/null
+++ b/test/units/galaxy/test_user_agent.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import platform
+
+from ansible.galaxy import user_agent
+from ansible.module_utils.ansible_release import __version__ as ansible_version
+
+
+def test_user_agent():
+ res = user_agent.user_agent()
+ assert res.startswith('ansible-galaxy/%s' % ansible_version)
+ assert platform.system() in res
+ assert 'python:' in res
diff --git a/test/units/inventory/test_group.py b/test/units/inventory/test_group.py
new file mode 100644
index 00000000..e8f1c0b0
--- /dev/null
+++ b/test/units/inventory/test_group.py
@@ -0,0 +1,155 @@
+# Copyright 2018 Alan Rominger <arominge@redhat.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest
+
+from ansible.inventory.group import Group
+from ansible.inventory.host import Host
+from ansible.errors import AnsibleError
+
+
+class TestGroup(unittest.TestCase):
+
+ def test_depth_update(self):
+ A = Group('A')
+ B = Group('B')
+ Z = Group('Z')
+ A.add_child_group(B)
+ A.add_child_group(Z)
+ self.assertEqual(A.depth, 0)
+ self.assertEqual(Z.depth, 1)
+ self.assertEqual(B.depth, 1)
+
+ def test_depth_update_dual_branches(self):
+ alpha = Group('alpha')
+ A = Group('A')
+ alpha.add_child_group(A)
+ B = Group('B')
+ A.add_child_group(B)
+ Z = Group('Z')
+ alpha.add_child_group(Z)
+ beta = Group('beta')
+ B.add_child_group(beta)
+ Z.add_child_group(beta)
+
+ self.assertEqual(alpha.depth, 0) # apex
+ self.assertEqual(beta.depth, 3) # alpha -> A -> B -> beta
+
+ omega = Group('omega')
+ omega.add_child_group(alpha)
+
+ # verify that both paths are traversed to get the max depth value
+ self.assertEqual(B.depth, 3) # omega -> alpha -> A -> B
+ self.assertEqual(beta.depth, 4) # B -> beta
+
+ def test_depth_recursion(self):
+ A = Group('A')
+ B = Group('B')
+ A.add_child_group(B)
+ # hypothetical of adding B as child group to A
+ A.parent_groups.append(B)
+ B.child_groups.append(A)
+ # can't update depths of groups, because of loop
+ with self.assertRaises(AnsibleError):
+ B._check_children_depth()
+
+ def test_loop_detection(self):
+ A = Group('A')
+ B = Group('B')
+ C = Group('C')
+ A.add_child_group(B)
+ B.add_child_group(C)
+ with self.assertRaises(AnsibleError):
+ C.add_child_group(A)
+
+ def test_direct_host_ordering(self):
+ """Hosts are returned in order they are added
+ """
+ group = Group('A')
+ # host names not added in alphabetical order
+ host_name_list = ['z', 'b', 'c', 'a', 'p', 'q']
+ expected_hosts = []
+ for host_name in host_name_list:
+ h = Host(host_name)
+ group.add_host(h)
+ expected_hosts.append(h)
+ assert group.get_hosts() == expected_hosts
+
+ def test_sub_group_host_ordering(self):
+ """With multiple nested groups, asserts that hosts are returned
+ in deterministic order
+ """
+ top_group = Group('A')
+ expected_hosts = []
+ for name in ['z', 'b', 'c', 'a', 'p', 'q']:
+ child = Group('group_{0}'.format(name))
+ top_group.add_child_group(child)
+ host = Host('host_{0}'.format(name))
+ child.add_host(host)
+ expected_hosts.append(host)
+ assert top_group.get_hosts() == expected_hosts
+
+ def test_populates_descendant_hosts(self):
+ A = Group('A')
+ B = Group('B')
+ C = Group('C')
+ h = Host('h')
+ C.add_host(h)
+ A.add_child_group(B) # B is child of A
+ B.add_child_group(C) # C is descendant of A
+ A.add_child_group(B)
+ self.assertEqual(set(h.groups), set([C, B, A]))
+ h2 = Host('h2')
+ C.add_host(h2)
+ self.assertEqual(set(h2.groups), set([C, B, A]))
+
+ def test_ancestor_example(self):
+ # see docstring for Group._walk_relationship
+ groups = {}
+ for name in ['A', 'B', 'C', 'D', 'E', 'F']:
+ groups[name] = Group(name)
+ # first row
+ groups['A'].add_child_group(groups['D'])
+ groups['B'].add_child_group(groups['D'])
+ groups['B'].add_child_group(groups['E'])
+ groups['C'].add_child_group(groups['D'])
+ # second row
+ groups['D'].add_child_group(groups['E'])
+ groups['D'].add_child_group(groups['F'])
+ groups['E'].add_child_group(groups['F'])
+
+ self.assertEqual(
+ set(groups['F'].get_ancestors()),
+ set([
+ groups['A'], groups['B'], groups['C'], groups['D'], groups['E']
+ ])
+ )
+
+ def test_ancestors_recursive_loop_safe(self):
+ '''
+ The get_ancestors method may be referenced before circular parenting
+ checks, so the method is expected to be stable even with loops
+ '''
+ A = Group('A')
+ B = Group('B')
+ A.parent_groups.append(B)
+ B.parent_groups.append(A)
+ # finishes in finite time
+ self.assertEqual(A.get_ancestors(), set([A, B]))
diff --git a/test/units/inventory/test_host.py b/test/units/inventory/test_host.py
new file mode 100644
index 00000000..c8f47714
--- /dev/null
+++ b/test/units/inventory/test_host.py
@@ -0,0 +1,112 @@
+# Copyright 2015 Marius Gedminas <marius@gedmin.as>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# for __setstate__/__getstate__ tests
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pickle
+
+from units.compat import unittest
+
+from ansible.inventory.group import Group
+from ansible.inventory.host import Host
+from ansible.module_utils.six import string_types
+
+
+class TestHost(unittest.TestCase):
+ ansible_port = 22
+
+ def setUp(self):
+ self.hostA = Host('a')
+ self.hostB = Host('b')
+
+ def test_equality(self):
+ self.assertEqual(self.hostA, self.hostA)
+ self.assertNotEqual(self.hostA, self.hostB)
+ self.assertNotEqual(self.hostA, Host('a'))
+
+ def test_hashability(self):
+ # equality implies the hash values are the same
+ self.assertEqual(hash(self.hostA), hash(Host('a')))
+
+ def test_get_vars(self):
+ host_vars = self.hostA.get_vars()
+ self.assertIsInstance(host_vars, dict)
+
+ def test_repr(self):
+ host_repr = repr(self.hostA)
+ self.assertIsInstance(host_repr, string_types)
+
+ def test_add_group(self):
+ group = Group('some_group')
+ group_len = len(self.hostA.groups)
+ self.hostA.add_group(group)
+ self.assertEqual(len(self.hostA.groups), group_len + 1)
+
+ def test_get_groups(self):
+ group = Group('some_group')
+ self.hostA.add_group(group)
+ groups = self.hostA.get_groups()
+ self.assertEqual(len(groups), 1)
+ for _group in groups:
+ self.assertIsInstance(_group, Group)
+
+ def test_equals_none(self):
+ other = None
+ self.hostA == other
+ other == self.hostA
+ self.hostA != other
+ other != self.hostA
+ self.assertNotEqual(self.hostA, other)
+
+ def test_serialize(self):
+ group = Group('some_group')
+ self.hostA.add_group(group)
+ data = self.hostA.serialize()
+ self.assertIsInstance(data, dict)
+
+ def test_serialize_then_deserialize(self):
+ group = Group('some_group')
+ self.hostA.add_group(group)
+ hostA_data = self.hostA.serialize()
+
+ hostA_clone = Host()
+ hostA_clone.deserialize(hostA_data)
+ self.assertEqual(self.hostA, hostA_clone)
+
+ def test_set_state(self):
+ group = Group('some_group')
+ self.hostA.add_group(group)
+
+ pickled_hostA = pickle.dumps(self.hostA)
+
+ hostA_clone = pickle.loads(pickled_hostA)
+ self.assertEqual(self.hostA, hostA_clone)
+
+
+class TestHostWithPort(TestHost):
+ ansible_port = 8822
+
+ def setUp(self):
+ self.hostA = Host(name='a', port=self.ansible_port)
+ self.hostB = Host(name='b', port=self.ansible_port)
+
+ def test_get_vars_ansible_port(self):
+ host_vars = self.hostA.get_vars()
+ self.assertEqual(host_vars['ansible_port'], self.ansible_port)
diff --git a/test/units/inventory_test_data/group_vars/noparse/all.yml~ b/test/units/inventory_test_data/group_vars/noparse/all.yml~
new file mode 100644
index 00000000..6f52f114
--- /dev/null
+++ b/test/units/inventory_test_data/group_vars/noparse/all.yml~
@@ -0,0 +1,2 @@
+---
+YAML_FILENAME_EXTENSIONS_TEST: False
diff --git a/test/units/inventory_test_data/group_vars/noparse/file.txt b/test/units/inventory_test_data/group_vars/noparse/file.txt
new file mode 100644
index 00000000..6f52f114
--- /dev/null
+++ b/test/units/inventory_test_data/group_vars/noparse/file.txt
@@ -0,0 +1,2 @@
+---
+YAML_FILENAME_EXTENSIONS_TEST: False
diff --git a/test/units/inventory_test_data/group_vars/parse/all.yml b/test/units/inventory_test_data/group_vars/parse/all.yml
new file mode 100644
index 00000000..8687c86c
--- /dev/null
+++ b/test/units/inventory_test_data/group_vars/parse/all.yml
@@ -0,0 +1,2 @@
+---
+YAML_FILENAME_EXTENSIONS_TEST: True
diff --git a/test/units/mock/__init__.py b/test/units/mock/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/mock/__init__.py
diff --git a/test/units/mock/loader.py b/test/units/mock/loader.py
new file mode 100644
index 00000000..0ee47fbb
--- /dev/null
+++ b/test/units/mock/loader.py
@@ -0,0 +1,116 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.errors import AnsibleParserError
+from ansible.parsing.dataloader import DataLoader
+from ansible.module_utils._text import to_bytes, to_text
+
+
+class DictDataLoader(DataLoader):
+
+ def __init__(self, file_mapping=None):
+ file_mapping = {} if file_mapping is None else file_mapping
+ assert type(file_mapping) == dict
+
+ super(DictDataLoader, self).__init__()
+
+ self._file_mapping = file_mapping
+ self._build_known_directories()
+ self._vault_secrets = None
+
+ def load_from_file(self, path, cache=True, unsafe=False):
+ path = to_text(path)
+ if path in self._file_mapping:
+ return self.load(self._file_mapping[path], path)
+ return None
+
+ # TODO: the real _get_file_contents returns a bytestring, so we actually convert the
+ # unicode/text it's created with to utf-8
+ def _get_file_contents(self, path):
+ path = to_text(path)
+ if path in self._file_mapping:
+ return (to_bytes(self._file_mapping[path]), False)
+ else:
+ raise AnsibleParserError("file not found: %s" % path)
+
+ def path_exists(self, path):
+ path = to_text(path)
+ return path in self._file_mapping or path in self._known_directories
+
+ def is_file(self, path):
+ path = to_text(path)
+ return path in self._file_mapping
+
+ def is_directory(self, path):
+ path = to_text(path)
+ return path in self._known_directories
+
+ def list_directory(self, path):
+ ret = []
+ path = to_text(path)
+ for x in (list(self._file_mapping.keys()) + self._known_directories):
+ if x.startswith(path):
+ if os.path.dirname(x) == path:
+ ret.append(os.path.basename(x))
+ return ret
+
+ def is_executable(self, path):
+ # FIXME: figure out a way to make paths return true for this
+ return False
+
+ def _add_known_directory(self, directory):
+ if directory not in self._known_directories:
+ self._known_directories.append(directory)
+
+ def _build_known_directories(self):
+ self._known_directories = []
+ for path in self._file_mapping:
+ dirname = os.path.dirname(path)
+ while dirname not in ('/', ''):
+ self._add_known_directory(dirname)
+ dirname = os.path.dirname(dirname)
+
+ def push(self, path, content):
+ rebuild_dirs = False
+ if path not in self._file_mapping:
+ rebuild_dirs = True
+
+ self._file_mapping[path] = content
+
+ if rebuild_dirs:
+ self._build_known_directories()
+
+ def pop(self, path):
+ if path in self._file_mapping:
+ del self._file_mapping[path]
+ self._build_known_directories()
+
+ def clear(self):
+ self._file_mapping = dict()
+ self._known_directories = []
+
+ def get_basedir(self):
+ return os.getcwd()
+
+ def set_vault_secrets(self, vault_secrets):
+ self._vault_secrets = vault_secrets
diff --git a/test/units/mock/path.py b/test/units/mock/path.py
new file mode 100644
index 00000000..721dc293
--- /dev/null
+++ b/test/units/mock/path.py
@@ -0,0 +1,8 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat.mock import MagicMock
+from ansible.utils.path import unfrackpath
+
+
+mock_unfrackpath_noop = MagicMock(spec_set=unfrackpath, side_effect=lambda x, *args, **kwargs: x)
diff --git a/test/units/mock/procenv.py b/test/units/mock/procenv.py
new file mode 100644
index 00000000..271a207e
--- /dev/null
+++ b/test/units/mock/procenv.py
@@ -0,0 +1,90 @@
+# (c) 2016, Matt Davis <mdavis@ansible.com>
+# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+import json
+
+from contextlib import contextmanager
+from io import BytesIO, StringIO
+from units.compat import unittest
+from ansible.module_utils.six import PY3
+from ansible.module_utils._text import to_bytes
+
+
+@contextmanager
+def swap_stdin_and_argv(stdin_data='', argv_data=tuple()):
+ """
+ context manager that temporarily masks the test runner's values for stdin and argv
+ """
+ real_stdin = sys.stdin
+ real_argv = sys.argv
+
+ if PY3:
+ fake_stream = StringIO(stdin_data)
+ fake_stream.buffer = BytesIO(to_bytes(stdin_data))
+ else:
+ fake_stream = BytesIO(to_bytes(stdin_data))
+
+ try:
+ sys.stdin = fake_stream
+ sys.argv = argv_data
+
+ yield
+ finally:
+ sys.stdin = real_stdin
+ sys.argv = real_argv
+
+
+@contextmanager
+def swap_stdout():
+ """
+ context manager that temporarily replaces stdout for tests that need to verify output
+ """
+ old_stdout = sys.stdout
+
+ if PY3:
+ fake_stream = StringIO()
+ else:
+ fake_stream = BytesIO()
+
+ try:
+ sys.stdout = fake_stream
+
+ yield fake_stream
+ finally:
+ sys.stdout = old_stdout
+
+
+class ModuleTestCase(unittest.TestCase):
+ def setUp(self, module_args=None):
+ if module_args is None:
+ module_args = {'_ansible_remote_tmp': '/tmp', '_ansible_keep_remote_files': False}
+
+ args = json.dumps(dict(ANSIBLE_MODULE_ARGS=module_args))
+
+ # unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually
+ self.stdin_swap = swap_stdin_and_argv(stdin_data=args)
+ self.stdin_swap.__enter__()
+
+ def tearDown(self):
+ # unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually
+ self.stdin_swap.__exit__(None, None, None)
diff --git a/test/units/mock/vault_helper.py b/test/units/mock/vault_helper.py
new file mode 100644
index 00000000..dcce9c78
--- /dev/null
+++ b/test/units/mock/vault_helper.py
@@ -0,0 +1,39 @@
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils._text import to_bytes
+
+from ansible.parsing.vault import VaultSecret
+
+
+class TextVaultSecret(VaultSecret):
+ '''A secret piece of text. ie, a password. Tracks text encoding.
+
+ The text encoding of the text may not be the default text encoding so
+ we keep track of the encoding so we encode it to the same bytes.'''
+
+ def __init__(self, text, encoding=None, errors=None, _bytes=None):
+ super(TextVaultSecret, self).__init__()
+ self.text = text
+ self.encoding = encoding or 'utf-8'
+ self._bytes = _bytes
+ self.errors = errors or 'strict'
+
+ @property
+ def bytes(self):
+ '''The text encoded with encoding, unless we specifically set _bytes.'''
+ return self._bytes or to_bytes(self.text, encoding=self.encoding, errors=self.errors)
diff --git a/test/units/mock/yaml_helper.py b/test/units/mock/yaml_helper.py
new file mode 100644
index 00000000..1ef17215
--- /dev/null
+++ b/test/units/mock/yaml_helper.py
@@ -0,0 +1,124 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import io
+import yaml
+
+from ansible.module_utils.six import PY3
+from ansible.parsing.yaml.loader import AnsibleLoader
+from ansible.parsing.yaml.dumper import AnsibleDumper
+
+
+class YamlTestUtils(object):
+ """Mixin class to combine with a unittest.TestCase subclass."""
+ def _loader(self, stream):
+ """Vault related tests will want to override this.
+
+ Vault cases should setup a AnsibleLoader that has the vault password."""
+ return AnsibleLoader(stream)
+
+ def _dump_stream(self, obj, stream, dumper=None):
+ """Dump to a py2-unicode or py3-string stream."""
+ if PY3:
+ return yaml.dump(obj, stream, Dumper=dumper)
+ else:
+ return yaml.dump(obj, stream, Dumper=dumper, encoding=None)
+
+ def _dump_string(self, obj, dumper=None):
+ """Dump to a py2-unicode or py3-string"""
+ if PY3:
+ return yaml.dump(obj, Dumper=dumper)
+ else:
+ return yaml.dump(obj, Dumper=dumper, encoding=None)
+
+ def _dump_load_cycle(self, obj):
+ # Each pass though a dump or load revs the 'generation'
+ # obj to yaml string
+ string_from_object_dump = self._dump_string(obj, dumper=AnsibleDumper)
+
+ # wrap a stream/file like StringIO around that yaml
+ stream_from_object_dump = io.StringIO(string_from_object_dump)
+ loader = self._loader(stream_from_object_dump)
+ # load the yaml stream to create a new instance of the object (gen 2)
+ obj_2 = loader.get_data()
+
+ # dump the gen 2 objects directory to strings
+ string_from_object_dump_2 = self._dump_string(obj_2,
+ dumper=AnsibleDumper)
+
+ # The gen 1 and gen 2 yaml strings
+ self.assertEqual(string_from_object_dump, string_from_object_dump_2)
+ # the gen 1 (orig) and gen 2 py object
+ self.assertEqual(obj, obj_2)
+
+ # again! gen 3... load strings into py objects
+ stream_3 = io.StringIO(string_from_object_dump_2)
+ loader_3 = self._loader(stream_3)
+ obj_3 = loader_3.get_data()
+
+ string_from_object_dump_3 = self._dump_string(obj_3, dumper=AnsibleDumper)
+
+ self.assertEqual(obj, obj_3)
+ # should be transitive, but...
+ self.assertEqual(obj_2, obj_3)
+ self.assertEqual(string_from_object_dump, string_from_object_dump_3)
+
+ def _old_dump_load_cycle(self, obj):
+ '''Dump the passed in object to yaml, load it back up, dump again, compare.'''
+ stream = io.StringIO()
+
+ yaml_string = self._dump_string(obj, dumper=AnsibleDumper)
+ self._dump_stream(obj, stream, dumper=AnsibleDumper)
+
+ yaml_string_from_stream = stream.getvalue()
+
+ # reset stream
+ stream.seek(0)
+
+ loader = self._loader(stream)
+ # loader = AnsibleLoader(stream, vault_password=self.vault_password)
+ obj_from_stream = loader.get_data()
+
+ stream_from_string = io.StringIO(yaml_string)
+ loader2 = self._loader(stream_from_string)
+ # loader2 = AnsibleLoader(stream_from_string, vault_password=self.vault_password)
+ obj_from_string = loader2.get_data()
+
+ stream_obj_from_stream = io.StringIO()
+ stream_obj_from_string = io.StringIO()
+
+ if PY3:
+ yaml.dump(obj_from_stream, stream_obj_from_stream, Dumper=AnsibleDumper)
+ yaml.dump(obj_from_stream, stream_obj_from_string, Dumper=AnsibleDumper)
+ else:
+ yaml.dump(obj_from_stream, stream_obj_from_stream, Dumper=AnsibleDumper, encoding=None)
+ yaml.dump(obj_from_stream, stream_obj_from_string, Dumper=AnsibleDumper, encoding=None)
+
+ yaml_string_stream_obj_from_stream = stream_obj_from_stream.getvalue()
+ yaml_string_stream_obj_from_string = stream_obj_from_string.getvalue()
+
+ stream_obj_from_stream.seek(0)
+ stream_obj_from_string.seek(0)
+
+ if PY3:
+ yaml_string_obj_from_stream = yaml.dump(obj_from_stream, Dumper=AnsibleDumper)
+ yaml_string_obj_from_string = yaml.dump(obj_from_string, Dumper=AnsibleDumper)
+ else:
+ yaml_string_obj_from_stream = yaml.dump(obj_from_stream, Dumper=AnsibleDumper, encoding=None)
+ yaml_string_obj_from_string = yaml.dump(obj_from_string, Dumper=AnsibleDumper, encoding=None)
+
+ assert yaml_string == yaml_string_obj_from_stream
+ assert yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string
+ assert (yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string == yaml_string_stream_obj_from_stream ==
+ yaml_string_stream_obj_from_string)
+ assert obj == obj_from_stream
+ assert obj == obj_from_string
+ assert obj == yaml_string_obj_from_stream
+ assert obj == yaml_string_obj_from_string
+ assert obj == obj_from_stream == obj_from_string == yaml_string_obj_from_stream == yaml_string_obj_from_string
+ return {'obj': obj,
+ 'yaml_string': yaml_string,
+ 'yaml_string_from_stream': yaml_string_from_stream,
+ 'obj_from_stream': obj_from_stream,
+ 'obj_from_string': obj_from_string,
+ 'yaml_string_obj_from_string': yaml_string_obj_from_string}
diff --git a/test/units/module_utils/__init__.py b/test/units/module_utils/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/module_utils/__init__.py
diff --git a/test/units/module_utils/basic/__init__.py b/test/units/module_utils/basic/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/module_utils/basic/__init__.py
diff --git a/test/units/module_utils/basic/test__log_invocation.py b/test/units/module_utils/basic/test__log_invocation.py
new file mode 100644
index 00000000..3beda8bd
--- /dev/null
+++ b/test/units/module_utils/basic/test__log_invocation.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+# (c) 2016, James Cammarata <jimi@sngx.net>
+# (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+
+ARGS = dict(foo=False, bar=[1, 2, 3], bam="bam", baz=u'baz')
+ARGUMENT_SPEC = dict(
+ foo=dict(default=True, type='bool'),
+ bar=dict(default=[], type='list'),
+ bam=dict(default="bam"),
+ baz=dict(default=u"baz"),
+ password=dict(default=True),
+ no_log=dict(default="you shouldn't see me", no_log=True),
+)
+
+
+@pytest.mark.parametrize('am, stdin', [(ARGUMENT_SPEC, ARGS)], indirect=['am', 'stdin'])
+def test_module_utils_basic__log_invocation(am, mocker):
+
+ am.log = mocker.MagicMock()
+ am._log_invocation()
+
+ # Message is generated from a dict so it will be in an unknown order.
+ # have to check this manually rather than with assert_called_with()
+ args = am.log.call_args[0]
+ assert len(args) == 1
+ message = args[0]
+
+ assert len(message) == \
+ len('Invoked with bam=bam bar=[1, 2, 3] foo=False baz=baz no_log=NOT_LOGGING_PARAMETER password=NOT_LOGGING_PASSWORD')
+
+ assert message.startswith('Invoked with ')
+ assert ' bam=bam' in message
+ assert ' bar=[1, 2, 3]' in message
+ assert ' foo=False' in message
+ assert ' baz=baz' in message
+ assert ' no_log=NOT_LOGGING_PARAMETER' in message
+ assert ' password=NOT_LOGGING_PASSWORD' in message
+
+ kwargs = am.log.call_args[1]
+ assert kwargs == \
+ dict(log_args={
+ 'foo': 'False',
+ 'bar': '[1, 2, 3]',
+ 'bam': 'bam',
+ 'baz': 'baz',
+ 'password': 'NOT_LOGGING_PASSWORD',
+ 'no_log': 'NOT_LOGGING_PARAMETER',
+ })
diff --git a/test/units/module_utils/basic/test__symbolic_mode_to_octal.py b/test/units/module_utils/basic/test__symbolic_mode_to_octal.py
new file mode 100644
index 00000000..7793b348
--- /dev/null
+++ b/test/units/module_utils/basic/test__symbolic_mode_to_octal.py
@@ -0,0 +1,103 @@
+# -*- coding: utf-8 -*-
+# Copyright:
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2016-2017 Ansible Project
+# License: GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+#
+# Info helpful for making new test cases:
+#
+# base_mode = {'dir no perms': 0o040000,
+# 'file no perms': 0o100000,
+# 'dir all perms': 0o400000 | 0o777,
+# 'file all perms': 0o100000, | 0o777}
+#
+# perm_bits = {'x': 0b001,
+# 'w': 0b010,
+# 'r': 0b100}
+#
+# role_shift = {'u': 6,
+# 'g': 3,
+# 'o': 0}
+
+DATA = ( # Going from no permissions to setting all for user, group, and/or other
+ (0o040000, u'a+rwx', 0o0777),
+ (0o040000, u'u+rwx,g+rwx,o+rwx', 0o0777),
+ (0o040000, u'o+rwx', 0o0007),
+ (0o040000, u'g+rwx', 0o0070),
+ (0o040000, u'u+rwx', 0o0700),
+
+ # Going from all permissions to none for user, group, and/or other
+ (0o040777, u'a-rwx', 0o0000),
+ (0o040777, u'u-rwx,g-rwx,o-rwx', 0o0000),
+ (0o040777, u'o-rwx', 0o0770),
+ (0o040777, u'g-rwx', 0o0707),
+ (0o040777, u'u-rwx', 0o0077),
+
+ # now using absolute assignment from None to a set of perms
+ (0o040000, u'a=rwx', 0o0777),
+ (0o040000, u'u=rwx,g=rwx,o=rwx', 0o0777),
+ (0o040000, u'o=rwx', 0o0007),
+ (0o040000, u'g=rwx', 0o0070),
+ (0o040000, u'u=rwx', 0o0700),
+
+ # X effect on files and dirs
+ (0o040000, u'a+X', 0o0111),
+ (0o100000, u'a+X', 0),
+ (0o040000, u'a=X', 0o0111),
+ (0o100000, u'a=X', 0),
+ (0o040777, u'a-X', 0o0666),
+ # Same as chmod but is it a bug?
+ # chmod a-X statfile <== removes execute from statfile
+ (0o100777, u'a-X', 0o0666),
+
+ # Multiple permissions
+ (0o040000, u'u=rw-x+X,g=r-x+X,o=r-x+X', 0o0755),
+ (0o100000, u'u=rw-x+X,g=r-x+X,o=r-x+X', 0o0644),
+)
+
+UMASK_DATA = (
+ (0o100000, '+rwx', 0o770),
+ (0o100777, '-rwx', 0o007),
+)
+
+INVALID_DATA = (
+ (0o040000, u'a=foo', "bad symbolic permission for mode: a=foo"),
+ (0o040000, u'f=rwx', "bad symbolic permission for mode: f=rwx"),
+)
+
+
+@pytest.mark.parametrize('stat_info, mode_string, expected', DATA)
+def test_good_symbolic_modes(mocker, stat_info, mode_string, expected):
+ mock_stat = mocker.MagicMock()
+ mock_stat.st_mode = stat_info
+ assert AnsibleModule._symbolic_mode_to_octal(mock_stat, mode_string) == expected
+
+
+@pytest.mark.parametrize('stat_info, mode_string, expected', UMASK_DATA)
+def test_umask_with_symbolic_modes(mocker, stat_info, mode_string, expected):
+ mock_umask = mocker.patch('os.umask')
+ mock_umask.return_value = 0o7
+
+ mock_stat = mocker.MagicMock()
+ mock_stat.st_mode = stat_info
+
+ assert AnsibleModule._symbolic_mode_to_octal(mock_stat, mode_string) == expected
+
+
+@pytest.mark.parametrize('stat_info, mode_string, expected', INVALID_DATA)
+def test_invalid_symbolic_modes(mocker, stat_info, mode_string, expected):
+ mock_stat = mocker.MagicMock()
+ mock_stat.st_mode = stat_info
+ with pytest.raises(ValueError) as exc:
+ assert AnsibleModule._symbolic_mode_to_octal(mock_stat, mode_string) == 'blah'
+ assert exc.match(expected)
diff --git a/test/units/module_utils/basic/test_argument_spec.py b/test/units/module_utils/basic/test_argument_spec.py
new file mode 100644
index 00000000..6e297669
--- /dev/null
+++ b/test/units/module_utils/basic/test_argument_spec.py
@@ -0,0 +1,706 @@
+# -*- coding: utf-8 -*-
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2016 Toshio Kuratomi <tkuratomi@ansible.com>
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import json
+import os
+
+import pytest
+
+from units.compat.mock import MagicMock
+from ansible.module_utils import basic
+from ansible.module_utils.api import basic_auth_argument_spec, rate_limit_argument_spec, retry_argument_spec
+from ansible.module_utils.common.warnings import get_deprecation_messages, get_warning_messages
+from ansible.module_utils.six import integer_types, string_types
+from ansible.module_utils.six.moves import builtins
+
+
+MOCK_VALIDATOR_FAIL = MagicMock(side_effect=TypeError("bad conversion"))
+# Data is argspec, argument, expected
+VALID_SPECS = (
+ # Simple type=int
+ ({'arg': {'type': 'int'}}, {'arg': 42}, 42),
+ # Simple type=int with a large value (will be of type long under Python 2)
+ ({'arg': {'type': 'int'}}, {'arg': 18765432109876543210}, 18765432109876543210),
+ # Simple type=list, elements=int
+ ({'arg': {'type': 'list', 'elements': 'int'}}, {'arg': [42, 32]}, [42, 32]),
+ # Type=int with conversion from string
+ ({'arg': {'type': 'int'}}, {'arg': '42'}, 42),
+ # Type=list elements=int with conversion from string
+ ({'arg': {'type': 'list', 'elements': 'int'}}, {'arg': ['42', '32']}, [42, 32]),
+ # Simple type=float
+ ({'arg': {'type': 'float'}}, {'arg': 42.0}, 42.0),
+ # Simple type=list, elements=float
+ ({'arg': {'type': 'list', 'elements': 'float'}}, {'arg': [42.1, 32.2]}, [42.1, 32.2]),
+ # Type=float conversion from int
+ ({'arg': {'type': 'float'}}, {'arg': 42}, 42.0),
+ # type=list, elements=float conversion from int
+ ({'arg': {'type': 'list', 'elements': 'float'}}, {'arg': [42, 32]}, [42.0, 32.0]),
+ # Type=float conversion from string
+ ({'arg': {'type': 'float'}}, {'arg': '42.0'}, 42.0),
+ # type=list, elements=float conversion from string
+ ({'arg': {'type': 'list', 'elements': 'float'}}, {'arg': ['42.1', '32.2']}, [42.1, 32.2]),
+ # Type=float conversion from string without decimal point
+ ({'arg': {'type': 'float'}}, {'arg': '42'}, 42.0),
+ # Type=list elements=float conversion from string without decimal point
+ ({'arg': {'type': 'list', 'elements': 'float'}}, {'arg': ['42', '32.2']}, [42.0, 32.2]),
+ # Simple type=bool
+ ({'arg': {'type': 'bool'}}, {'arg': True}, True),
+ # Simple type=list elements=bool
+ ({'arg': {'type': 'list', 'elements': 'bool'}}, {'arg': [True, 'true', 1, 'yes', False, 'false', 'no', 0]},
+ [True, True, True, True, False, False, False, False]),
+ # Type=int with conversion from string
+ ({'arg': {'type': 'bool'}}, {'arg': 'yes'}, True),
+ # Type=str converts to string
+ ({'arg': {'type': 'str'}}, {'arg': 42}, '42'),
+ # Type=list elements=str simple converts to string
+ ({'arg': {'type': 'list', 'elements': 'str'}}, {'arg': ['42', '32']}, ['42', '32']),
+ # Type is implicit, converts to string
+ ({'arg': {'type': 'str'}}, {'arg': 42}, '42'),
+ # Type=list elements=str implicit converts to string
+ ({'arg': {'type': 'list', 'elements': 'str'}}, {'arg': [42, 32]}, ['42', '32']),
+ # parameter is required
+ ({'arg': {'required': True}}, {'arg': 42}, '42'),
+)
+
+INVALID_SPECS = (
+ # Type is int; unable to convert this string
+ ({'arg': {'type': 'int'}}, {'arg': "wolf"}, "is of type {0} and we were unable to convert to int: {0} cannot be converted to an int".format(type('bad'))),
+ # Type is list elements is int; unable to convert this string
+ ({'arg': {'type': 'list', 'elements': 'int'}}, {'arg': [1, "bad"]}, "is of type {0} and we were unable to convert to int: {0} cannot be converted to "
+ "an int".format(type('int'))),
+ # Type is int; unable to convert float
+ ({'arg': {'type': 'int'}}, {'arg': 42.1}, "'float'> cannot be converted to an int"),
+ # Type is list, elements is int; unable to convert float
+ ({'arg': {'type': 'list', 'elements': 'int'}}, {'arg': [42.1, 32, 2]}, "'float'> cannot be converted to an int"),
+ # type is a callable that fails to convert
+ ({'arg': {'type': MOCK_VALIDATOR_FAIL}}, {'arg': "bad"}, "bad conversion"),
+ # type is a list, elements is callable that fails to convert
+ ({'arg': {'type': 'list', 'elements': MOCK_VALIDATOR_FAIL}}, {'arg': [1, "bad"]}, "bad conversion"),
+ # unknown parameter
+ ({'arg': {'type': 'int'}}, {'other': 'bad', '_ansible_module_name': 'ansible_unittest'},
+ 'Unsupported parameters for (ansible_unittest) module: other Supported parameters include: arg'),
+ # parameter is required
+ ({'arg': {'required': True}}, {}, 'missing required arguments: arg'),
+)
+
+BASIC_AUTH_VALID_ARGS = [
+ {'api_username': 'user1', 'api_password': 'password1', 'api_url': 'http://example.com', 'validate_certs': False},
+ {'api_username': 'user1', 'api_password': 'password1', 'api_url': 'http://example.com', 'validate_certs': True},
+]
+
+RATE_LIMIT_VALID_ARGS = [
+ {'rate': 1, 'rate_limit': 1},
+ {'rate': '1', 'rate_limit': 1},
+ {'rate': 1, 'rate_limit': '1'},
+ {'rate': '1', 'rate_limit': '1'},
+]
+
+RETRY_VALID_ARGS = [
+ {'retries': 1, 'retry_pause': 1.5},
+ {'retries': '1', 'retry_pause': '1.5'},
+ {'retries': 1, 'retry_pause': '1.5'},
+ {'retries': '1', 'retry_pause': 1.5},
+]
+
+
+@pytest.fixture
+def complex_argspec():
+ arg_spec = dict(
+ foo=dict(required=True, aliases=['dup']),
+ bar=dict(),
+ bam=dict(),
+ bing=dict(),
+ bang=dict(),
+ bong=dict(),
+ baz=dict(fallback=(basic.env_fallback, ['BAZ'])),
+ bar1=dict(type='bool'),
+ bar3=dict(type='list', elements='path'),
+ bar_str=dict(type='list', elements=str),
+ zardoz=dict(choices=['one', 'two']),
+ zardoz2=dict(type='list', choices=['one', 'two', 'three']),
+ zardoz3=dict(type='str', aliases=['zodraz'], deprecated_aliases=[dict(name='zodraz', version='9.99')]),
+ )
+ mut_ex = (('bar', 'bam'), ('bing', 'bang', 'bong'))
+ req_to = (('bam', 'baz'),)
+
+ kwargs = dict(
+ argument_spec=arg_spec,
+ mutually_exclusive=mut_ex,
+ required_together=req_to,
+ no_log=True,
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+ return kwargs
+
+
+@pytest.fixture
+def options_argspec_list():
+ options_spec = dict(
+ foo=dict(required=True, aliases=['dup']),
+ bar=dict(),
+ bar1=dict(type='list', elements='str'),
+ bar2=dict(type='list', elements='int'),
+ bar3=dict(type='list', elements='float'),
+ bar4=dict(type='list', elements='path'),
+ bam=dict(),
+ baz=dict(fallback=(basic.env_fallback, ['BAZ'])),
+ bam1=dict(),
+ bam2=dict(default='test'),
+ bam3=dict(type='bool'),
+ bam4=dict(type='str'),
+ )
+
+ arg_spec = dict(
+ foobar=dict(
+ type='list',
+ elements='dict',
+ options=options_spec,
+ mutually_exclusive=[
+ ['bam', 'bam1'],
+ ],
+ required_if=[
+ ['foo', 'hello', ['bam']],
+ ['foo', 'bam2', ['bam2']]
+ ],
+ required_one_of=[
+ ['bar', 'bam']
+ ],
+ required_together=[
+ ['bam1', 'baz']
+ ],
+ required_by={
+ 'bam4': ('bam1', 'bam3'),
+ },
+ )
+ )
+
+ kwargs = dict(
+ argument_spec=arg_spec,
+ no_log=True,
+ add_file_common_args=True,
+ supports_check_mode=True
+ )
+ return kwargs
+
+
+@pytest.fixture
+def options_argspec_dict(options_argspec_list):
+ # should test ok, for options in dict format.
+ kwargs = options_argspec_list
+ kwargs['argument_spec']['foobar']['type'] = 'dict'
+ kwargs['argument_spec']['foobar']['elements'] = None
+
+ return kwargs
+
+
+#
+# Tests for one aspect of arg_spec
+#
+
+@pytest.mark.parametrize('argspec, expected, stdin', [(s[0], s[2], s[1]) for s in VALID_SPECS],
+ indirect=['stdin'])
+def test_validator_basic_types(argspec, expected, stdin):
+
+ am = basic.AnsibleModule(argspec)
+
+ if 'type' in argspec['arg']:
+ if argspec['arg']['type'] == 'int':
+ type_ = integer_types
+ else:
+ type_ = getattr(builtins, argspec['arg']['type'])
+ else:
+ type_ = str
+
+ assert isinstance(am.params['arg'], type_)
+ assert am.params['arg'] == expected
+
+
+@pytest.mark.parametrize('stdin', [{'arg': 42}, {'arg': 18765432109876543210}], indirect=['stdin'])
+def test_validator_function(mocker, stdin):
+ # Type is a callable
+ MOCK_VALIDATOR_SUCCESS = mocker.MagicMock(return_value=27)
+ argspec = {'arg': {'type': MOCK_VALIDATOR_SUCCESS}}
+ am = basic.AnsibleModule(argspec)
+
+ assert isinstance(am.params['arg'], integer_types)
+ assert am.params['arg'] == 27
+
+
+@pytest.mark.parametrize('stdin', BASIC_AUTH_VALID_ARGS, indirect=['stdin'])
+def test_validate_basic_auth_arg(mocker, stdin):
+ kwargs = dict(
+ argument_spec=basic_auth_argument_spec()
+ )
+ am = basic.AnsibleModule(**kwargs)
+ assert isinstance(am.params['api_username'], string_types)
+ assert isinstance(am.params['api_password'], string_types)
+ assert isinstance(am.params['api_url'], string_types)
+ assert isinstance(am.params['validate_certs'], bool)
+
+
+@pytest.mark.parametrize('stdin', RATE_LIMIT_VALID_ARGS, indirect=['stdin'])
+def test_validate_rate_limit_argument_spec(mocker, stdin):
+ kwargs = dict(
+ argument_spec=rate_limit_argument_spec()
+ )
+ am = basic.AnsibleModule(**kwargs)
+ assert isinstance(am.params['rate'], integer_types)
+ assert isinstance(am.params['rate_limit'], integer_types)
+
+
+@pytest.mark.parametrize('stdin', RETRY_VALID_ARGS, indirect=['stdin'])
+def test_validate_retry_argument_spec(mocker, stdin):
+ kwargs = dict(
+ argument_spec=retry_argument_spec()
+ )
+ am = basic.AnsibleModule(**kwargs)
+ assert isinstance(am.params['retries'], integer_types)
+ assert isinstance(am.params['retry_pause'], float)
+
+
+@pytest.mark.parametrize('stdin', [{'arg': '123'}, {'arg': 123}], indirect=['stdin'])
+def test_validator_string_type(mocker, stdin):
+ # Custom callable that is 'str'
+ argspec = {'arg': {'type': str}}
+ am = basic.AnsibleModule(argspec)
+
+ assert isinstance(am.params['arg'], string_types)
+ assert am.params['arg'] == '123'
+
+
+@pytest.mark.parametrize('argspec, expected, stdin', [(s[0], s[2], s[1]) for s in INVALID_SPECS],
+ indirect=['stdin'])
+def test_validator_fail(stdin, capfd, argspec, expected):
+ with pytest.raises(SystemExit):
+ basic.AnsibleModule(argument_spec=argspec)
+
+ out, err = capfd.readouterr()
+ assert not err
+ assert expected in json.loads(out)['msg']
+ assert json.loads(out)['failed']
+
+
+class TestComplexArgSpecs:
+ """Test with a more complex arg_spec"""
+
+ @pytest.mark.parametrize('stdin', [{'foo': 'hello'}, {'dup': 'hello'}], indirect=['stdin'])
+ def test_complex_required(self, stdin, complex_argspec):
+ """Test that the complex argspec works if we give it its required param as either the canonical or aliased name"""
+ am = basic.AnsibleModule(**complex_argspec)
+ assert isinstance(am.params['foo'], str)
+ assert am.params['foo'] == 'hello'
+
+ @pytest.mark.parametrize('stdin', [{'foo': 'hello1', 'dup': 'hello2'}], indirect=['stdin'])
+ def test_complex_duplicate_warning(self, stdin, complex_argspec):
+ """Test that the complex argspec issues a warning if we specify an option both with its canonical name and its alias"""
+ am = basic.AnsibleModule(**complex_argspec)
+ assert isinstance(am.params['foo'], str)
+ assert 'Both option foo and its alias dup are set.' in get_warning_messages()
+ assert am.params['foo'] == 'hello2'
+
+ @pytest.mark.parametrize('stdin', [{'foo': 'hello', 'bam': 'test'}], indirect=['stdin'])
+ def test_complex_type_fallback(self, mocker, stdin, complex_argspec):
+ """Test that the complex argspec works if we get a required parameter via fallback"""
+ environ = os.environ.copy()
+ environ['BAZ'] = 'test data'
+ mocker.patch('ansible.module_utils.basic.os.environ', environ)
+
+ am = basic.AnsibleModule(**complex_argspec)
+
+ assert isinstance(am.params['baz'], str)
+ assert am.params['baz'] == 'test data'
+
+ @pytest.mark.parametrize('stdin', [{'foo': 'hello', 'bar': 'bad', 'bam': 'bad2', 'bing': 'a', 'bang': 'b', 'bong': 'c'}], indirect=['stdin'])
+ def test_fail_mutually_exclusive(self, capfd, stdin, complex_argspec):
+ """Fail because of mutually exclusive parameters"""
+ with pytest.raises(SystemExit):
+ am = basic.AnsibleModule(**complex_argspec)
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert results['failed']
+ assert results['msg'] == "parameters are mutually exclusive: bar|bam, bing|bang|bong"
+
+ @pytest.mark.parametrize('stdin', [{'foo': 'hello', 'bam': 'bad2'}], indirect=['stdin'])
+ def test_fail_required_together(self, capfd, stdin, complex_argspec):
+ """Fail because only one of a required_together pair of parameters was specified"""
+ with pytest.raises(SystemExit):
+ am = basic.AnsibleModule(**complex_argspec)
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert results['failed']
+ assert results['msg'] == "parameters are required together: bam, baz"
+
+ @pytest.mark.parametrize('stdin', [{'foo': 'hello', 'bar': 'hi'}], indirect=['stdin'])
+ def test_fail_required_together_and_default(self, capfd, stdin, complex_argspec):
+ """Fail because one of a required_together pair of parameters has a default and the other was not specified"""
+ complex_argspec['argument_spec']['baz'] = {'default': 42}
+ with pytest.raises(SystemExit):
+ am = basic.AnsibleModule(**complex_argspec)
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert results['failed']
+ assert results['msg'] == "parameters are required together: bam, baz"
+
+ @pytest.mark.parametrize('stdin', [{'foo': 'hello'}], indirect=['stdin'])
+ def test_fail_required_together_and_fallback(self, capfd, mocker, stdin, complex_argspec):
+ """Fail because one of a required_together pair of parameters has a fallback and the other was not specified"""
+ environ = os.environ.copy()
+ environ['BAZ'] = 'test data'
+ mocker.patch('ansible.module_utils.basic.os.environ', environ)
+
+ with pytest.raises(SystemExit):
+ am = basic.AnsibleModule(**complex_argspec)
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert results['failed']
+ assert results['msg'] == "parameters are required together: bam, baz"
+
+ @pytest.mark.parametrize('stdin', [{'foo': 'hello', 'zardoz2': ['one', 'four', 'five']}], indirect=['stdin'])
+ def test_fail_list_with_choices(self, capfd, mocker, stdin, complex_argspec):
+ """Fail because one of the items is not in the choice"""
+ with pytest.raises(SystemExit):
+ basic.AnsibleModule(**complex_argspec)
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert results['failed']
+ assert results['msg'] == "value of zardoz2 must be one or more of: one, two, three. Got no match for: four, five"
+
+ @pytest.mark.parametrize('stdin', [{'foo': 'hello', 'zardoz2': ['one', 'three']}], indirect=['stdin'])
+ def test_list_with_choices(self, capfd, mocker, stdin, complex_argspec):
+ """Test choices with list"""
+ am = basic.AnsibleModule(**complex_argspec)
+ assert isinstance(am.params['zardoz2'], list)
+ assert am.params['zardoz2'] == ['one', 'three']
+
+ @pytest.mark.parametrize('stdin', [{'foo': 'hello', 'bar3': ['~/test', 'test/']}], indirect=['stdin'])
+ def test_list_with_elements_path(self, capfd, mocker, stdin, complex_argspec):
+ """Test choices with list"""
+ am = basic.AnsibleModule(**complex_argspec)
+ assert isinstance(am.params['bar3'], list)
+ assert am.params['bar3'][0].startswith('/')
+ assert am.params['bar3'][1] == 'test/'
+
+ @pytest.mark.parametrize('stdin', [{'foo': 'hello', 'zodraz': 'one'}], indirect=['stdin'])
+ def test_deprecated_alias(self, capfd, mocker, stdin, complex_argspec):
+ """Test a deprecated alias"""
+ am = basic.AnsibleModule(**complex_argspec)
+
+ assert "Alias 'zodraz' is deprecated." in get_deprecation_messages()[0]['msg']
+ assert get_deprecation_messages()[0]['version'] == '9.99'
+
+ @pytest.mark.parametrize('stdin', [{'foo': 'hello', 'bar_str': [867, '5309']}], indirect=['stdin'])
+ def test_list_with_elements_callable_str(self, capfd, mocker, stdin, complex_argspec):
+ """Test choices with list"""
+ am = basic.AnsibleModule(**complex_argspec)
+ assert isinstance(am.params['bar_str'], list)
+ assert isinstance(am.params['bar_str'][0], string_types)
+ assert isinstance(am.params['bar_str'][1], string_types)
+ assert am.params['bar_str'][0] == '867'
+ assert am.params['bar_str'][1] == '5309'
+
+
+class TestComplexOptions:
+ """Test arg spec options"""
+
+ # (Parameters, expected value of module.params['foobar'])
+ OPTIONS_PARAMS_LIST = (
+ ({'foobar': [{"foo": "hello", "bam": "good"}, {"foo": "test", "bar": "good"}]},
+ [{'foo': 'hello', 'bam': 'good', 'bam2': 'test', 'bar': None, 'baz': None, 'bam1': None, 'bam3': None, 'bam4': None,
+ 'bar1': None, 'bar2': None, 'bar3': None, 'bar4': None},
+ {'foo': 'test', 'bam': None, 'bam2': 'test', 'bar': 'good', 'baz': None, 'bam1': None, 'bam3': None, 'bam4': None,
+ 'bar1': None, 'bar2': None, 'bar3': None, 'bar4': None}]
+ ),
+ # Alias for required param
+ ({'foobar': [{"dup": "test", "bar": "good"}]},
+ [{'foo': 'test', 'dup': 'test', 'bam': None, 'bam2': 'test', 'bar': 'good', 'baz': None, 'bam1': None, 'bam3': None, 'bam4': None,
+ 'bar1': None, 'bar2': None, 'bar3': None, 'bar4': None}]
+ ),
+ # Required_if utilizing default value of the requirement
+ ({'foobar': [{"foo": "bam2", "bar": "required_one_of"}]},
+ [{'bam': None, 'bam1': None, 'bam2': 'test', 'bam3': None, 'bam4': None, 'bar': 'required_one_of', 'baz': None, 'foo': 'bam2',
+ 'bar1': None, 'bar2': None, 'bar3': None, 'bar4': None}]
+ ),
+ # Check that a bool option is converted
+ ({"foobar": [{"foo": "required", "bam": "good", "bam3": "yes"}]},
+ [{'bam': 'good', 'bam1': None, 'bam2': 'test', 'bam3': True, 'bam4': None, 'bar': None, 'baz': None, 'foo': 'required',
+ 'bar1': None, 'bar2': None, 'bar3': None, 'bar4': None}]
+ ),
+ # Check required_by options
+ ({"foobar": [{"foo": "required", "bar": "good", "baz": "good", "bam4": "required_by", "bam1": "ok", "bam3": "yes"}]},
+ [{'bar': 'good', 'baz': 'good', 'bam1': 'ok', 'bam2': 'test', 'bam3': True, 'bam4': 'required_by', 'bam': None, 'foo': 'required',
+ 'bar1': None, 'bar2': None, 'bar3': None, 'bar4': None}]
+ ),
+ # Check for elements in sub-options
+ ({"foobar": [{"foo": "good", "bam": "required_one_of", "bar1": [1, "good", "yes"], "bar2": ['1', 1], "bar3":['1.3', 1.3, 1]}]},
+ [{'foo': 'good', 'bam1': None, 'bam2': 'test', 'bam3': None, 'bam4': None, 'bar': None, 'baz': None, 'bam': 'required_one_of',
+ 'bar1': ["1", "good", "yes"], 'bar2': [1, 1], 'bar3': [1.3, 1.3, 1.0], 'bar4': None}]
+ ),
+ )
+
+ # (Parameters, expected value of module.params['foobar'])
+ OPTIONS_PARAMS_DICT = (
+ ({'foobar': {"foo": "hello", "bam": "good"}},
+ {'foo': 'hello', 'bam': 'good', 'bam2': 'test', 'bar': None, 'baz': None, 'bam1': None, 'bam3': None, 'bam4': None,
+ 'bar1': None, 'bar2': None, 'bar3': None, 'bar4': None}
+ ),
+ # Alias for required param
+ ({'foobar': {"dup": "test", "bar": "good"}},
+ {'foo': 'test', 'dup': 'test', 'bam': None, 'bam2': 'test', 'bar': 'good', 'baz': None, 'bam1': None, 'bam3': None, 'bam4': None,
+ 'bar1': None, 'bar2': None, 'bar3': None, 'bar4': None}
+ ),
+ # Required_if utilizing default value of the requirement
+ ({'foobar': {"foo": "bam2", "bar": "required_one_of"}},
+ {'bam': None, 'bam1': None, 'bam2': 'test', 'bam3': None, 'bam4': None, 'bar': 'required_one_of', 'baz': None, 'foo': 'bam2',
+ 'bar1': None, 'bar2': None, 'bar3': None, 'bar4': None}
+ ),
+ # Check that a bool option is converted
+ ({"foobar": {"foo": "required", "bam": "good", "bam3": "yes"}},
+ {'bam': 'good', 'bam1': None, 'bam2': 'test', 'bam3': True, 'bam4': None, 'bar': None, 'baz': None, 'foo': 'required',
+ 'bar1': None, 'bar2': None, 'bar3': None, 'bar4': None}
+ ),
+ # Check required_by options
+ ({"foobar": {"foo": "required", "bar": "good", "baz": "good", "bam4": "required_by", "bam1": "ok", "bam3": "yes"}},
+ {'bar': 'good', 'baz': 'good', 'bam1': 'ok', 'bam2': 'test', 'bam3': True, 'bam4': 'required_by', 'bam': None,
+ 'foo': 'required', 'bar1': None, 'bar3': None, 'bar2': None, 'bar4': None}
+ ),
+ # Check for elements in sub-options
+ ({"foobar": {"foo": "good", "bam": "required_one_of", "bar1": [1, "good", "yes"],
+ "bar2": ['1', 1], "bar3": ['1.3', 1.3, 1]}},
+ {'foo': 'good', 'bam1': None, 'bam2': 'test', 'bam3': None, 'bam4': None, 'bar': None,
+ 'baz': None, 'bam': 'required_one_of',
+ 'bar1': ["1", "good", "yes"], 'bar2': [1, 1], 'bar3': [1.3, 1.3, 1.0], 'bar4': None}
+ ),
+ )
+
+ # (Parameters, failure message)
+ FAILING_PARAMS_LIST = (
+ # Missing required option
+ ({'foobar': [{}]}, 'missing required arguments: foo found in foobar'),
+ # Invalid option
+ ({'foobar': [{"foo": "hello", "bam": "good", "invalid": "bad"}]}, 'module: invalid found in foobar. Supported parameters include'),
+ # Mutually exclusive options found
+ ({'foobar': [{"foo": "test", "bam": "bad", "bam1": "bad", "baz": "req_to"}]},
+ 'parameters are mutually exclusive: bam|bam1 found in foobar'),
+ # required_if fails
+ ({'foobar': [{"foo": "hello", "bar": "bad"}]},
+ 'foo is hello but all of the following are missing: bam found in foobar'),
+ # Missing required_one_of option
+ ({'foobar': [{"foo": "test"}]},
+ 'one of the following is required: bar, bam found in foobar'),
+ # Missing required_together option
+ ({'foobar': [{"foo": "test", "bar": "required_one_of", "bam1": "bad"}]},
+ 'parameters are required together: bam1, baz found in foobar'),
+ # Missing required_by options
+ ({'foobar': [{"foo": "test", "bar": "required_one_of", "bam4": "required_by"}]},
+ "missing parameter(s) required by 'bam4': bam1, bam3"),
+ )
+
+ # (Parameters, failure message)
+ FAILING_PARAMS_DICT = (
+ # Missing required option
+ ({'foobar': {}}, 'missing required arguments: foo found in foobar'),
+ # Invalid option
+ ({'foobar': {"foo": "hello", "bam": "good", "invalid": "bad"}},
+ 'module: invalid found in foobar. Supported parameters include'),
+ # Mutually exclusive options found
+ ({'foobar': {"foo": "test", "bam": "bad", "bam1": "bad", "baz": "req_to"}},
+ 'parameters are mutually exclusive: bam|bam1 found in foobar'),
+ # required_if fails
+ ({'foobar': {"foo": "hello", "bar": "bad"}},
+ 'foo is hello but all of the following are missing: bam found in foobar'),
+ # Missing required_one_of option
+ ({'foobar': {"foo": "test"}},
+ 'one of the following is required: bar, bam found in foobar'),
+ # Missing required_together option
+ ({'foobar': {"foo": "test", "bar": "required_one_of", "bam1": "bad"}},
+ 'parameters are required together: bam1, baz found in foobar'),
+ # Missing required_by options
+ ({'foobar': {"foo": "test", "bar": "required_one_of", "bam4": "required_by"}},
+ "missing parameter(s) required by 'bam4': bam1, bam3"),
+ )
+
+ @pytest.mark.parametrize('stdin, expected', OPTIONS_PARAMS_DICT, indirect=['stdin'])
+ def test_options_type_dict(self, stdin, options_argspec_dict, expected):
+ """Test that a basic creation with required and required_if works"""
+ # should test ok, tests basic foo requirement and required_if
+ am = basic.AnsibleModule(**options_argspec_dict)
+
+ assert isinstance(am.params['foobar'], dict)
+ assert am.params['foobar'] == expected
+
+ @pytest.mark.parametrize('stdin, expected', OPTIONS_PARAMS_LIST, indirect=['stdin'])
+ def test_options_type_list(self, stdin, options_argspec_list, expected):
+ """Test that a basic creation with required and required_if works"""
+ # should test ok, tests basic foo requirement and required_if
+ am = basic.AnsibleModule(**options_argspec_list)
+
+ assert isinstance(am.params['foobar'], list)
+ assert am.params['foobar'] == expected
+
+ @pytest.mark.parametrize('stdin, expected', FAILING_PARAMS_DICT, indirect=['stdin'])
+ def test_fail_validate_options_dict(self, capfd, stdin, options_argspec_dict, expected):
+ """Fail because one of a required_together pair of parameters has a default and the other was not specified"""
+ with pytest.raises(SystemExit):
+ am = basic.AnsibleModule(**options_argspec_dict)
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert results['failed']
+ assert expected in results['msg']
+
+ @pytest.mark.parametrize('stdin, expected', FAILING_PARAMS_LIST, indirect=['stdin'])
+ def test_fail_validate_options_list(self, capfd, stdin, options_argspec_list, expected):
+ """Fail because one of a required_together pair of parameters has a default and the other was not specified"""
+ with pytest.raises(SystemExit):
+ am = basic.AnsibleModule(**options_argspec_list)
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert results['failed']
+ assert expected in results['msg']
+
+ @pytest.mark.parametrize('stdin', [{'foobar': {'foo': 'required', 'bam1': 'test', 'bar': 'case'}}], indirect=['stdin'])
+ def test_fallback_in_option(self, mocker, stdin, options_argspec_dict):
+ """Test that the complex argspec works if we get a required parameter via fallback"""
+ environ = os.environ.copy()
+ environ['BAZ'] = 'test data'
+ mocker.patch('ansible.module_utils.basic.os.environ', environ)
+
+ am = basic.AnsibleModule(**options_argspec_dict)
+
+ assert isinstance(am.params['foobar']['baz'], str)
+ assert am.params['foobar']['baz'] == 'test data'
+
+ @pytest.mark.parametrize('stdin',
+ [{'foobar': {'foo': 'required', 'bam1': 'test', 'baz': 'data', 'bar': 'case', 'bar4': '~/test'}}],
+ indirect=['stdin'])
+ def test_elements_path_in_option(self, mocker, stdin, options_argspec_dict):
+ """Test that the complex argspec works with elements path type"""
+
+ am = basic.AnsibleModule(**options_argspec_dict)
+
+ assert isinstance(am.params['foobar']['bar4'][0], str)
+ assert am.params['foobar']['bar4'][0].startswith('/')
+
+ @pytest.mark.parametrize('stdin,spec,expected', [
+ ({},
+ {'one': {'type': 'dict', 'apply_defaults': True, 'options': {'two': {'default': True, 'type': 'bool'}}}},
+ {'two': True}),
+ ({},
+ {'one': {'type': 'dict', 'options': {'two': {'default': True, 'type': 'bool'}}}},
+ None),
+ ], indirect=['stdin'])
+ def test_subspec_not_required_defaults(self, stdin, spec, expected):
+ # Check that top level not required, processed subspec defaults
+ am = basic.AnsibleModule(spec)
+ assert am.params['one'] == expected
+
+
+class TestLoadFileCommonArguments:
+ @pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
+ def test_smoketest_load_file_common_args(self, am):
+ """With no file arguments, an empty dict is returned"""
+ am.selinux_mls_enabled = MagicMock()
+ am.selinux_mls_enabled.return_value = True
+ am.selinux_default_context = MagicMock()
+ am.selinux_default_context.return_value = 'unconfined_u:object_r:default_t:s0'.split(':', 3)
+
+ assert am.load_file_common_arguments(params={}) == {}
+
+ @pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
+ def test_load_file_common_args(self, am, mocker):
+ am.selinux_mls_enabled = MagicMock()
+ am.selinux_mls_enabled.return_value = True
+ am.selinux_default_context = MagicMock()
+ am.selinux_default_context.return_value = 'unconfined_u:object_r:default_t:s0'.split(':', 3)
+
+ base_params = dict(
+ path='/path/to/file',
+ mode=0o600,
+ owner='root',
+ group='root',
+ seuser='_default',
+ serole='_default',
+ setype='_default',
+ selevel='_default',
+ )
+
+ extended_params = base_params.copy()
+ extended_params.update(dict(
+ follow=True,
+ foo='bar',
+ ))
+
+ final_params = base_params.copy()
+ final_params.update(dict(
+ path='/path/to/real_file',
+ secontext=['unconfined_u', 'object_r', 'default_t', 's0'],
+ attributes=None,
+ ))
+
+ # with the proper params specified, the returned dictionary should represent
+ # only those params which have something to do with the file arguments, excluding
+ # other params and updated as required with proper values which may have been
+ # massaged by the method
+ mocker.patch('os.path.islink', return_value=True)
+ mocker.patch('os.path.realpath', return_value='/path/to/real_file')
+
+ res = am.load_file_common_arguments(params=extended_params)
+
+ assert res == final_params
+
+
+@pytest.mark.parametrize("stdin", [{"arg_pass": "testing"}], indirect=["stdin"])
+def test_no_log_true(stdin, capfd):
+ """Explicitly mask an argument (no_log=True)."""
+ arg_spec = {
+ "arg_pass": {"no_log": True}
+ }
+ am = basic.AnsibleModule(arg_spec)
+ # no_log=True is picked up by both am._log_invocation and list_no_log_values
+ # (called by am._handle_no_log_values). As a result, we can check for the
+ # value in am.no_log_values.
+ assert "testing" in am.no_log_values
+
+
+@pytest.mark.parametrize("stdin", [{"arg_pass": "testing"}], indirect=["stdin"])
+def test_no_log_false(stdin, capfd):
+ """Explicitly log and display an argument (no_log=False)."""
+ arg_spec = {
+ "arg_pass": {"no_log": False}
+ }
+ am = basic.AnsibleModule(arg_spec)
+ assert "testing" not in am.no_log_values and not get_warning_messages()
+
+
+@pytest.mark.parametrize("stdin", [{"arg_pass": "testing"}], indirect=["stdin"])
+def test_no_log_none(stdin, capfd):
+ """Allow Ansible to make the decision by matching the argument name
+ against PASSWORD_MATCH."""
+ arg_spec = {
+ "arg_pass": {}
+ }
+ am = basic.AnsibleModule(arg_spec)
+ # Omitting no_log is only picked up by _log_invocation, so the value never
+ # makes it into am.no_log_values. Instead we can check for the warning
+ # emitted by am._log_invocation.
+ assert len(get_warning_messages()) > 0
diff --git a/test/units/module_utils/basic/test_atomic_move.py b/test/units/module_utils/basic/test_atomic_move.py
new file mode 100644
index 00000000..7bd9496e
--- /dev/null
+++ b/test/units/module_utils/basic/test_atomic_move.py
@@ -0,0 +1,222 @@
+# -*- coding: utf-8 -*-
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2016 Toshio Kuratomi <tkuratomi@ansible.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import os
+import errno
+import json
+from itertools import product
+
+import pytest
+
+from ansible.module_utils import basic
+
+
+@pytest.fixture
+def atomic_am(am, mocker):
+ am.selinux_enabled = mocker.MagicMock()
+ am.selinux_context = mocker.MagicMock()
+ am.selinux_default_context = mocker.MagicMock()
+ am.set_context_if_different = mocker.MagicMock()
+
+ yield am
+
+
+@pytest.fixture
+def atomic_mocks(mocker, monkeypatch):
+ environ = dict()
+ mocks = {
+ 'chmod': mocker.patch('os.chmod'),
+ 'chown': mocker.patch('os.chown'),
+ 'close': mocker.patch('os.close'),
+ 'environ': mocker.patch('os.environ', environ),
+ 'getlogin': mocker.patch('os.getlogin'),
+ 'getuid': mocker.patch('os.getuid'),
+ 'path_exists': mocker.patch('os.path.exists'),
+ 'rename': mocker.patch('os.rename'),
+ 'stat': mocker.patch('os.stat'),
+ 'umask': mocker.patch('os.umask'),
+ 'getpwuid': mocker.patch('pwd.getpwuid'),
+ 'copy2': mocker.patch('shutil.copy2'),
+ 'copyfileobj': mocker.patch('shutil.copyfileobj'),
+ 'move': mocker.patch('shutil.move'),
+ 'mkstemp': mocker.patch('tempfile.mkstemp'),
+ }
+
+ mocks['getlogin'].return_value = 'root'
+ mocks['getuid'].return_value = 0
+ mocks['getpwuid'].return_value = ('root', '', 0, 0, '', '', '')
+ mocks['umask'].side_effect = [18, 0]
+ mocks['rename'].return_value = None
+
+ # normalize OS specific features
+ monkeypatch.delattr(os, 'chflags', raising=False)
+
+ yield mocks
+
+
+@pytest.fixture
+def fake_stat(mocker):
+ stat1 = mocker.MagicMock()
+ stat1.st_mode = 0o0644
+ stat1.st_uid = 0
+ stat1.st_gid = 0
+ stat1.st_flags = 0
+ yield stat1
+
+
+@pytest.mark.parametrize('stdin, selinux', product([{}], (True, False)), indirect=['stdin'])
+def test_new_file(atomic_am, atomic_mocks, mocker, selinux):
+ # test destination does not exist, login name = 'root', no environment, os.rename() succeeds
+ mock_context = atomic_am.selinux_default_context.return_value
+ atomic_mocks['path_exists'].return_value = False
+ atomic_am.selinux_enabled.return_value = selinux
+
+ atomic_am.atomic_move('/path/to/src', '/path/to/dest')
+
+ atomic_mocks['rename'].assert_called_with(b'/path/to/src', b'/path/to/dest')
+ assert atomic_mocks['chmod'].call_args_list == [mocker.call(b'/path/to/dest', basic.DEFAULT_PERM & ~18)]
+
+ if selinux:
+ assert atomic_am.selinux_default_context.call_args_list == [mocker.call('/path/to/dest')]
+ assert atomic_am.set_context_if_different.call_args_list == [mocker.call('/path/to/dest', mock_context, False)]
+ else:
+ assert not atomic_am.selinux_default_context.called
+ assert not atomic_am.set_context_if_different.called
+
+
+@pytest.mark.parametrize('stdin, selinux', product([{}], (True, False)), indirect=['stdin'])
+def test_existing_file(atomic_am, atomic_mocks, fake_stat, mocker, selinux):
+ # Test destination already present
+ mock_context = atomic_am.selinux_context.return_value
+ atomic_mocks['stat'].return_value = fake_stat
+ atomic_mocks['path_exists'].return_value = True
+ atomic_am.selinux_enabled.return_value = selinux
+
+ atomic_am.atomic_move('/path/to/src', '/path/to/dest')
+
+ atomic_mocks['rename'].assert_called_with(b'/path/to/src', b'/path/to/dest')
+ assert atomic_mocks['chmod'].call_args_list == [mocker.call(b'/path/to/src', basic.DEFAULT_PERM & ~18)]
+
+ if selinux:
+ assert atomic_am.set_context_if_different.call_args_list == [mocker.call('/path/to/dest', mock_context, False)]
+ assert atomic_am.selinux_context.call_args_list == [mocker.call('/path/to/dest')]
+ else:
+ assert not atomic_am.selinux_default_context.called
+ assert not atomic_am.set_context_if_different.called
+
+
+@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
+def test_no_tty_fallback(atomic_am, atomic_mocks, fake_stat, mocker):
+ """Raise OSError when using getlogin() to simulate no tty cornercase"""
+ mock_context = atomic_am.selinux_context.return_value
+ atomic_mocks['stat'].return_value = fake_stat
+ atomic_mocks['path_exists'].return_value = True
+ atomic_am.selinux_enabled.return_value = True
+ atomic_mocks['getlogin'].side_effect = OSError()
+ atomic_mocks['environ']['LOGNAME'] = 'root'
+
+ atomic_am.atomic_move('/path/to/src', '/path/to/dest')
+
+ atomic_mocks['rename'].assert_called_with(b'/path/to/src', b'/path/to/dest')
+ assert atomic_mocks['chmod'].call_args_list == [mocker.call(b'/path/to/src', basic.DEFAULT_PERM & ~18)]
+
+ assert atomic_am.set_context_if_different.call_args_list == [mocker.call('/path/to/dest', mock_context, False)]
+ assert atomic_am.selinux_context.call_args_list == [mocker.call('/path/to/dest')]
+
+
+@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
+def test_existing_file_stat_failure(atomic_am, atomic_mocks, mocker):
+ """Failure to stat an existing file in order to copy permissions propogates the error (unless EPERM)"""
+ atomic_mocks['stat'].side_effect = OSError()
+ atomic_mocks['path_exists'].return_value = True
+
+ with pytest.raises(OSError):
+ atomic_am.atomic_move('/path/to/src', '/path/to/dest')
+
+
+@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
+def test_existing_file_stat_perms_failure(atomic_am, atomic_mocks, mocker):
+ """Failure to stat an existing file to copy the permissions due to permissions passes fine"""
+ # and now have os.stat return EPERM, which should not fail
+ mock_context = atomic_am.selinux_context.return_value
+ atomic_mocks['stat'].side_effect = OSError(errno.EPERM, 'testing os stat with EPERM')
+ atomic_mocks['path_exists'].return_value = True
+ atomic_am.selinux_enabled.return_value = True
+
+ atomic_am.atomic_move('/path/to/src', '/path/to/dest')
+
+ atomic_mocks['rename'].assert_called_with(b'/path/to/src', b'/path/to/dest')
+ # FIXME: Should atomic_move() set a default permission value when it cannot retrieve the
+ # existing file's permissions? (Right now it's up to the calling code.
+ # assert atomic_mocks['chmod'].call_args_list == [mocker.call(b'/path/to/src', basic.DEFAULT_PERM & ~18)]
+ assert atomic_am.set_context_if_different.call_args_list == [mocker.call('/path/to/dest', mock_context, False)]
+ assert atomic_am.selinux_context.call_args_list == [mocker.call('/path/to/dest')]
+
+
+@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
+def test_rename_failure(atomic_am, atomic_mocks, mocker, capfd):
+ """Test os.rename fails with EIO, causing it to bail out"""
+ atomic_mocks['path_exists'].side_effect = [False, False]
+ atomic_mocks['rename'].side_effect = OSError(errno.EIO, 'failing with EIO')
+
+ with pytest.raises(SystemExit):
+ atomic_am.atomic_move('/path/to/src', '/path/to/dest')
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert 'Could not replace file' in results['msg']
+ assert 'failing with EIO' in results['msg']
+ assert results['failed']
+
+
+@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
+def test_rename_perms_fail_temp_creation_fails(atomic_am, atomic_mocks, mocker, capfd):
+ """Test os.rename fails with EPERM working but failure in mkstemp"""
+ atomic_mocks['path_exists'].return_value = False
+ atomic_mocks['close'].return_value = None
+ atomic_mocks['rename'].side_effect = [OSError(errno.EPERM, 'failing with EPERM'), None]
+ atomic_mocks['mkstemp'].return_value = None
+ atomic_mocks['mkstemp'].side_effect = OSError()
+ atomic_am.selinux_enabled.return_value = False
+
+ with pytest.raises(SystemExit):
+ atomic_am.atomic_move('/path/to/src', '/path/to/dest')
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert 'is not writable by the current user' in results['msg']
+ assert results['failed']
+
+
+@pytest.mark.parametrize('stdin, selinux', product([{}], (True, False)), indirect=['stdin'])
+def test_rename_perms_fail_temp_succeeds(atomic_am, atomic_mocks, fake_stat, mocker, selinux):
+ """Test os.rename raising an error but fallback to using mkstemp works"""
+ mock_context = atomic_am.selinux_default_context.return_value
+ atomic_mocks['path_exists'].return_value = False
+ atomic_mocks['rename'].side_effect = [OSError(errno.EPERM, 'failing with EPERM'), None]
+ atomic_mocks['stat'].return_value = fake_stat
+ atomic_mocks['stat'].side_effect = None
+ atomic_mocks['mkstemp'].return_value = (None, '/path/to/tempfile')
+ atomic_mocks['mkstemp'].side_effect = None
+ atomic_am.selinux_enabled.return_value = selinux
+
+ atomic_am.atomic_move('/path/to/src', '/path/to/dest')
+ assert atomic_mocks['rename'].call_args_list == [mocker.call(b'/path/to/src', b'/path/to/dest'),
+ mocker.call(b'/path/to/tempfile', b'/path/to/dest')]
+ assert atomic_mocks['chmod'].call_args_list == [mocker.call(b'/path/to/dest', basic.DEFAULT_PERM & ~18)]
+
+ if selinux:
+ assert atomic_am.selinux_default_context.call_args_list == [mocker.call('/path/to/dest')]
+ assert atomic_am.set_context_if_different.call_args_list == [mocker.call(b'/path/to/tempfile', mock_context, False),
+ mocker.call('/path/to/dest', mock_context, False)]
+ else:
+ assert not atomic_am.selinux_default_context.called
+ assert not atomic_am.set_context_if_different.called
diff --git a/test/units/module_utils/basic/test_deprecate_warn.py b/test/units/module_utils/basic/test_deprecate_warn.py
new file mode 100644
index 00000000..351cf25b
--- /dev/null
+++ b/test/units/module_utils/basic/test_deprecate_warn.py
@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+import pytest
+
+
+@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
+def test_warn(am, capfd):
+
+ am.warn('warning1')
+
+ with pytest.raises(SystemExit):
+ am.exit_json(warnings=['warning2'])
+ out, err = capfd.readouterr()
+ assert json.loads(out)['warnings'] == ['warning1', 'warning2']
+
+
+@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
+def test_deprecate(am, capfd):
+ am.deprecate('deprecation1')
+ am.deprecate('deprecation2', '2.3') # pylint: disable=ansible-deprecated-no-collection-name
+ am.deprecate('deprecation3', version='2.4') # pylint: disable=ansible-deprecated-no-collection-name
+ am.deprecate('deprecation4', date='2020-03-10') # pylint: disable=ansible-deprecated-no-collection-name
+ am.deprecate('deprecation5', collection_name='ansible.builtin')
+ am.deprecate('deprecation6', '2.3', collection_name='ansible.builtin')
+ am.deprecate('deprecation7', version='2.4', collection_name='ansible.builtin')
+ am.deprecate('deprecation8', date='2020-03-10', collection_name='ansible.builtin')
+
+ with pytest.raises(SystemExit):
+ am.exit_json(deprecations=['deprecation9', ('deprecation10', '2.4')])
+
+ out, err = capfd.readouterr()
+ output = json.loads(out)
+ assert ('warnings' not in output or output['warnings'] == [])
+ assert output['deprecations'] == [
+ {u'msg': u'deprecation1', u'version': None, u'collection_name': None},
+ {u'msg': u'deprecation2', u'version': '2.3', u'collection_name': None},
+ {u'msg': u'deprecation3', u'version': '2.4', u'collection_name': None},
+ {u'msg': u'deprecation4', u'date': '2020-03-10', u'collection_name': None},
+ {u'msg': u'deprecation5', u'version': None, u'collection_name': 'ansible.builtin'},
+ {u'msg': u'deprecation6', u'version': '2.3', u'collection_name': 'ansible.builtin'},
+ {u'msg': u'deprecation7', u'version': '2.4', u'collection_name': 'ansible.builtin'},
+ {u'msg': u'deprecation8', u'date': '2020-03-10', u'collection_name': 'ansible.builtin'},
+ {u'msg': u'deprecation9', u'version': None, u'collection_name': None},
+ {u'msg': u'deprecation10', u'version': '2.4', u'collection_name': None},
+ ]
+
+
+@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
+def test_deprecate_without_list(am, capfd):
+ with pytest.raises(SystemExit):
+ am.exit_json(deprecations='Simple deprecation warning')
+
+ out, err = capfd.readouterr()
+ output = json.loads(out)
+ assert ('warnings' not in output or output['warnings'] == [])
+ assert output['deprecations'] == [
+ {u'msg': u'Simple deprecation warning', u'version': None, u'collection_name': None},
+ ]
+
+
+@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
+def test_deprecate_without_list(am, capfd):
+ with pytest.raises(AssertionError) as ctx:
+ am.deprecate('Simple deprecation warning', date='', version='')
+ assert ctx.value.args[0] == "implementation error -- version and date must not both be set"
diff --git a/test/units/module_utils/basic/test_dict_converters.py b/test/units/module_utils/basic/test_dict_converters.py
new file mode 100644
index 00000000..f63ed9c6
--- /dev/null
+++ b/test/units/module_utils/basic/test_dict_converters.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2016 Toshio Kuratomi <tkuratomi@ansible.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from units.mock.procenv import ModuleTestCase
+
+from ansible.module_utils.six.moves import builtins
+
+realimport = builtins.__import__
+
+
+class TestTextifyContainers(ModuleTestCase):
+ def test_module_utils_basic_json_dict_converters(self):
+ from ansible.module_utils.basic import json_dict_unicode_to_bytes, json_dict_bytes_to_unicode
+
+ test_data = dict(
+ item1=u"Fóo",
+ item2=[u"Bár", u"Bam"],
+ item3=dict(sub1=u"Súb"),
+ item4=(u"föo", u"bär", u"©"),
+ item5=42,
+ )
+ res = json_dict_unicode_to_bytes(test_data)
+ res2 = json_dict_bytes_to_unicode(res)
+
+ self.assertEqual(test_data, res2)
diff --git a/test/units/module_utils/basic/test_exit_json.py b/test/units/module_utils/basic/test_exit_json.py
new file mode 100644
index 00000000..240095c0
--- /dev/null
+++ b/test/units/module_utils/basic/test_exit_json.py
@@ -0,0 +1,154 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2015-2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import sys
+
+import pytest
+
+
+EMPTY_INVOCATION = {u'module_args': {}}
+
+
+class TestAnsibleModuleExitJson:
+ """
+ Test that various means of calling exitJson and FailJson return the messages they've been given
+ """
+ DATA = (
+ ({}, {'invocation': EMPTY_INVOCATION}),
+ ({'msg': 'message'}, {'msg': 'message', 'invocation': EMPTY_INVOCATION}),
+ ({'msg': 'success', 'changed': True},
+ {'msg': 'success', 'changed': True, 'invocation': EMPTY_INVOCATION}),
+ ({'msg': 'nochange', 'changed': False},
+ {'msg': 'nochange', 'changed': False, 'invocation': EMPTY_INVOCATION}),
+ )
+
+ # pylint bug: https://github.com/PyCQA/pylint/issues/511
+ # pylint: disable=undefined-variable
+ @pytest.mark.parametrize('args, expected, stdin', ((a, e, {}) for a, e in DATA), indirect=['stdin'])
+ def test_exit_json_exits(self, am, capfd, args, expected):
+ with pytest.raises(SystemExit) as ctx:
+ am.exit_json(**args)
+ assert ctx.value.code == 0
+
+ out, err = capfd.readouterr()
+ return_val = json.loads(out)
+ assert return_val == expected
+
+ # Fail_json is only legal if it's called with a message
+ # pylint bug: https://github.com/PyCQA/pylint/issues/511
+ @pytest.mark.parametrize('args, expected, stdin',
+ ((a, e, {}) for a, e in DATA if 'msg' in a), # pylint: disable=undefined-variable
+ indirect=['stdin'])
+ def test_fail_json_exits(self, am, capfd, args, expected):
+ with pytest.raises(SystemExit) as ctx:
+ am.fail_json(**args)
+ assert ctx.value.code == 1
+
+ out, err = capfd.readouterr()
+ return_val = json.loads(out)
+ # Fail_json should add failed=True
+ expected['failed'] = True
+ assert return_val == expected
+
+ @pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
+ def test_fail_json_msg_positional(self, am, capfd):
+ with pytest.raises(SystemExit) as ctx:
+ am.fail_json('This is the msg')
+ assert ctx.value.code == 1
+
+ out, err = capfd.readouterr()
+ return_val = json.loads(out)
+ # Fail_json should add failed=True
+ assert return_val == {'msg': 'This is the msg', 'failed': True,
+ 'invocation': EMPTY_INVOCATION}
+
+ @pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
+ def test_fail_json_msg_as_kwarg_after(self, am, capfd):
+ """Test that msg as a kwarg after other kwargs works"""
+ with pytest.raises(SystemExit) as ctx:
+ am.fail_json(arbitrary=42, msg='This is the msg')
+ assert ctx.value.code == 1
+
+ out, err = capfd.readouterr()
+ return_val = json.loads(out)
+ # Fail_json should add failed=True
+ assert return_val == {'msg': 'This is the msg', 'failed': True,
+ 'arbitrary': 42,
+ 'invocation': EMPTY_INVOCATION}
+
+ @pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
+ def test_fail_json_no_msg(self, am):
+ with pytest.raises(TypeError) as ctx:
+ am.fail_json()
+
+ if sys.version_info < (3,):
+ error_msg = "fail_json() takes exactly 2 arguments (1 given)"
+ else:
+ error_msg = "fail_json() missing 1 required positional argument: 'msg'"
+
+ assert ctx.value.args[0] == error_msg
+
+
+class TestAnsibleModuleExitValuesRemoved:
+ """
+ Test that ExitJson and FailJson remove password-like values
+ """
+ OMIT = 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
+
+ DATA = (
+ (
+ dict(username='person', password='$ecret k3y'),
+ dict(one=1, pwd='$ecret k3y', url='https://username:password12345@foo.com/login/',
+ not_secret='following the leader', msg='here'),
+ dict(one=1, pwd=OMIT, url='https://username:password12345@foo.com/login/',
+ not_secret='following the leader', msg='here',
+ invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))),
+ ),
+ (
+ dict(username='person', password='password12345'),
+ dict(one=1, pwd='$ecret k3y', url='https://username:password12345@foo.com/login/',
+ not_secret='following the leader', msg='here'),
+ dict(one=1, pwd='$ecret k3y', url='https://username:********@foo.com/login/',
+ not_secret='following the leader', msg='here',
+ invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))),
+ ),
+ (
+ dict(username='person', password='$ecret k3y'),
+ dict(one=1, pwd='$ecret k3y', url='https://username:$ecret k3y@foo.com/login/',
+ not_secret='following the leader', msg='here'),
+ dict(one=1, pwd=OMIT, url='https://username:********@foo.com/login/',
+ not_secret='following the leader', msg='here',
+ invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))),
+ ),
+ )
+
+ # pylint bug: https://github.com/PyCQA/pylint/issues/511
+ @pytest.mark.parametrize('am, stdin, return_val, expected',
+ (({'username': {}, 'password': {'no_log': True}, 'token': {'no_log': True}}, s, r, e)
+ for s, r, e in DATA), # pylint: disable=undefined-variable
+ indirect=['am', 'stdin'])
+ def test_exit_json_removes_values(self, am, capfd, return_val, expected):
+ with pytest.raises(SystemExit):
+ am.exit_json(**return_val)
+ out, err = capfd.readouterr()
+
+ assert json.loads(out) == expected
+
+ # pylint bug: https://github.com/PyCQA/pylint/issues/511
+ @pytest.mark.parametrize('am, stdin, return_val, expected',
+ (({'username': {}, 'password': {'no_log': True}, 'token': {'no_log': True}}, s, r, e)
+ for s, r, e in DATA), # pylint: disable=undefined-variable
+ indirect=['am', 'stdin'])
+ def test_fail_json_removes_values(self, am, capfd, return_val, expected):
+ expected['failed'] = True
+ with pytest.raises(SystemExit):
+ am.fail_json(**return_val) == expected
+ out, err = capfd.readouterr()
+
+ assert json.loads(out) == expected
diff --git a/test/units/module_utils/basic/test_filesystem.py b/test/units/module_utils/basic/test_filesystem.py
new file mode 100644
index 00000000..37d1c553
--- /dev/null
+++ b/test/units/module_utils/basic/test_filesystem.py
@@ -0,0 +1,136 @@
+# -*- coding: utf-8 -*-
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2016 Toshio Kuratomi <tkuratomi@ansible.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from units.mock.procenv import ModuleTestCase
+
+from units.compat.mock import patch, MagicMock
+from ansible.module_utils.six.moves import builtins
+
+realimport = builtins.__import__
+
+
+class TestOtherFilesystem(ModuleTestCase):
+ def test_module_utils_basic_ansible_module_user_and_group(self):
+ from ansible.module_utils import basic
+ basic._ANSIBLE_ARGS = None
+
+ am = basic.AnsibleModule(
+ argument_spec=dict(),
+ )
+
+ mock_stat = MagicMock()
+ mock_stat.st_uid = 0
+ mock_stat.st_gid = 0
+
+ with patch('os.lstat', return_value=mock_stat):
+ self.assertEqual(am.user_and_group('/path/to/file'), (0, 0))
+
+ def test_module_utils_basic_ansible_module_find_mount_point(self):
+ from ansible.module_utils import basic
+ basic._ANSIBLE_ARGS = None
+
+ am = basic.AnsibleModule(
+ argument_spec=dict(),
+ )
+
+ def _mock_ismount(path):
+ if path == b'/':
+ return True
+ return False
+
+ with patch('os.path.ismount', side_effect=_mock_ismount):
+ self.assertEqual(am.find_mount_point('/root/fs/../mounted/path/to/whatever'), '/')
+
+ def _mock_ismount(path):
+ if path == b'/subdir/mount':
+ return True
+ if path == b'/':
+ return True
+ return False
+
+ with patch('os.path.ismount', side_effect=_mock_ismount):
+ self.assertEqual(am.find_mount_point('/subdir/mount/path/to/whatever'), '/subdir/mount')
+
+ def test_module_utils_basic_ansible_module_set_owner_if_different(self):
+ from ansible.module_utils import basic
+ basic._ANSIBLE_ARGS = None
+
+ am = basic.AnsibleModule(
+ argument_spec=dict(),
+ )
+
+ self.assertEqual(am.set_owner_if_different('/path/to/file', None, True), True)
+ self.assertEqual(am.set_owner_if_different('/path/to/file', None, False), False)
+
+ am.user_and_group = MagicMock(return_value=(500, 500))
+
+ with patch('os.lchown', return_value=None) as m:
+ self.assertEqual(am.set_owner_if_different('/path/to/file', 0, False), True)
+ m.assert_called_with(b'/path/to/file', 0, -1)
+
+ def _mock_getpwnam(*args, **kwargs):
+ mock_pw = MagicMock()
+ mock_pw.pw_uid = 0
+ return mock_pw
+
+ m.reset_mock()
+ with patch('pwd.getpwnam', side_effect=_mock_getpwnam):
+ self.assertEqual(am.set_owner_if_different('/path/to/file', 'root', False), True)
+ m.assert_called_with(b'/path/to/file', 0, -1)
+
+ with patch('pwd.getpwnam', side_effect=KeyError):
+ self.assertRaises(SystemExit, am.set_owner_if_different, '/path/to/file', 'root', False)
+
+ m.reset_mock()
+ am.check_mode = True
+ self.assertEqual(am.set_owner_if_different('/path/to/file', 0, False), True)
+ self.assertEqual(m.called, False)
+ am.check_mode = False
+
+ with patch('os.lchown', side_effect=OSError) as m:
+ self.assertRaises(SystemExit, am.set_owner_if_different, '/path/to/file', 'root', False)
+
+ def test_module_utils_basic_ansible_module_set_group_if_different(self):
+ from ansible.module_utils import basic
+ basic._ANSIBLE_ARGS = None
+
+ am = basic.AnsibleModule(
+ argument_spec=dict(),
+ )
+
+ self.assertEqual(am.set_group_if_different('/path/to/file', None, True), True)
+ self.assertEqual(am.set_group_if_different('/path/to/file', None, False), False)
+
+ am.user_and_group = MagicMock(return_value=(500, 500))
+
+ with patch('os.lchown', return_value=None) as m:
+ self.assertEqual(am.set_group_if_different('/path/to/file', 0, False), True)
+ m.assert_called_with(b'/path/to/file', -1, 0)
+
+ def _mock_getgrnam(*args, **kwargs):
+ mock_gr = MagicMock()
+ mock_gr.gr_gid = 0
+ return mock_gr
+
+ m.reset_mock()
+ with patch('grp.getgrnam', side_effect=_mock_getgrnam):
+ self.assertEqual(am.set_group_if_different('/path/to/file', 'root', False), True)
+ m.assert_called_with(b'/path/to/file', -1, 0)
+
+ with patch('grp.getgrnam', side_effect=KeyError):
+ self.assertRaises(SystemExit, am.set_group_if_different, '/path/to/file', 'root', False)
+
+ m.reset_mock()
+ am.check_mode = True
+ self.assertEqual(am.set_group_if_different('/path/to/file', 0, False), True)
+ self.assertEqual(m.called, False)
+ am.check_mode = False
+
+ with patch('os.lchown', side_effect=OSError) as m:
+ self.assertRaises(SystemExit, am.set_group_if_different, '/path/to/file', 'root', False)
diff --git a/test/units/module_utils/basic/test_get_file_attributes.py b/test/units/module_utils/basic/test_get_file_attributes.py
new file mode 100644
index 00000000..5130a5fb
--- /dev/null
+++ b/test/units/module_utils/basic/test_get_file_attributes.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# Copyright:
+# (c) 2017, Pierre-Louis Bonicoli <pierre-louis@libregerbil.fr>
+# License: GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from itertools import product
+
+from ansible.module_utils.basic import AnsibleModule
+
+import pytest
+
+
+DATA = (
+ (
+ '3353595900 --------------e---- /usr/lib32',
+ {'attr_flags': 'e', 'version': '3353595900', 'attributes': ['extents']}
+ ),
+ # with e2fsprogs < 1.43, output isn't aligned
+ (
+ '78053594 -----------I--e---- /usr/lib',
+ {'attr_flags': 'Ie', 'version': '78053594', 'attributes': ['indexed', 'extents']}
+ ),
+ (
+ '15711607 -------A------e---- /tmp/test',
+ {'attr_flags': 'Ae', 'version': '15711607', 'attributes': ['noatime', 'extents']}
+ ),
+ # with e2fsprogs >= 1.43, output is aligned
+ (
+ '78053594 -----------I--e---- /usr/lib',
+ {'attr_flags': 'Ie', 'version': '78053594', 'attributes': ['indexed', 'extents']}
+ ),
+ (
+ '15711607 -------A------e---- /tmp/test',
+ {'attr_flags': 'Ae', 'version': '15711607', 'attributes': ['noatime', 'extents']}
+ ),
+)
+
+
+@pytest.mark.parametrize('stdin, data', product(({},), DATA), indirect=['stdin'])
+def test_get_file_attributes(am, stdin, mocker, data):
+ # Test #18731
+ mocker.patch.object(AnsibleModule, 'get_bin_path', return_value=(0, '/usr/bin/lsattr', ''))
+ mocker.patch.object(AnsibleModule, 'run_command', return_value=(0, data[0], ''))
+ result = am.get_file_attributes('/path/to/file')
+ for key, value in data[1].items():
+ assert key in result and result[key] == value
diff --git a/test/units/module_utils/basic/test_get_module_path.py b/test/units/module_utils/basic/test_get_module_path.py
new file mode 100644
index 00000000..6ff4a3bc
--- /dev/null
+++ b/test/units/module_utils/basic/test_get_module_path.py
@@ -0,0 +1,22 @@
+# -*- coding: utf-8 -*-
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2016 Toshio Kuratomi <tkuratomi@ansible.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from units.mock.procenv import ModuleTestCase
+
+from units.compat.mock import patch
+from ansible.module_utils.six.moves import builtins
+
+realimport = builtins.__import__
+
+
+class TestGetModulePath(ModuleTestCase):
+ def test_module_utils_basic_get_module_path(self):
+ from ansible.module_utils.basic import get_module_path
+ with patch('os.path.realpath', return_value='/path/to/foo/'):
+ self.assertEqual(get_module_path(), '/path/to/foo')
diff --git a/test/units/module_utils/basic/test_heuristic_log_sanitize.py b/test/units/module_utils/basic/test_heuristic_log_sanitize.py
new file mode 100644
index 00000000..f8a0929d
--- /dev/null
+++ b/test/units/module_utils/basic/test_heuristic_log_sanitize.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest
+from ansible.module_utils.basic import heuristic_log_sanitize
+
+
+class TestHeuristicLogSanitize(unittest.TestCase):
+ def setUp(self):
+ self.URL_SECRET = 'http://username:pas:word@foo.com/data'
+ self.SSH_SECRET = 'username:pas:word@foo.com/data'
+ self.clean_data = repr(self._gen_data(3, True, True, 'no_secret_here'))
+ self.url_data = repr(self._gen_data(3, True, True, self.URL_SECRET))
+ self.ssh_data = repr(self._gen_data(3, True, True, self.SSH_SECRET))
+
+ def _gen_data(self, records, per_rec, top_level, secret_text):
+ hostvars = {'hostvars': {}}
+ for i in range(1, records, 1):
+ host_facts = {
+ 'host%s' % i: {
+ 'pstack': {
+ 'running': '875.1',
+ 'symlinked': '880.0',
+ 'tars': [],
+ 'versions': ['885.0']
+ },
+ }
+ }
+ if per_rec:
+ host_facts['host%s' % i]['secret'] = secret_text
+ hostvars['hostvars'].update(host_facts)
+ if top_level:
+ hostvars['secret'] = secret_text
+ return hostvars
+
+ def test_did_not_hide_too_much(self):
+ self.assertEqual(heuristic_log_sanitize(self.clean_data), self.clean_data)
+
+ def test_hides_url_secrets(self):
+ url_output = heuristic_log_sanitize(self.url_data)
+ # Basic functionality: Successfully hid the password
+ self.assertNotIn('pas:word', url_output)
+
+ # Slightly more advanced, we hid all of the password despite the ":"
+ self.assertNotIn('pas', url_output)
+
+ # In this implementation we replace the password with 8 "*" which is
+ # also the length of our password. The url fields should be able to
+ # accurately detect where the password ends so the length should be
+ # the same:
+ self.assertEqual(len(url_output), len(self.url_data))
+
+ def test_hides_ssh_secrets(self):
+ ssh_output = heuristic_log_sanitize(self.ssh_data)
+ self.assertNotIn('pas:word', ssh_output)
+
+ # Slightly more advanced, we hid all of the password despite the ":"
+ self.assertNotIn('pas', ssh_output)
+
+ # ssh checking is harder as the heuristic is overzealous in many
+ # cases. Since the input will have at least one ":" present before
+ # the password we can tell some things about the beginning and end of
+ # the data, though:
+ self.assertTrue(ssh_output.startswith("{'"))
+ self.assertTrue(ssh_output.endswith("}"))
+ self.assertIn(":********@foo.com/data'", ssh_output)
+
+ def test_hides_parameter_secrets(self):
+ output = heuristic_log_sanitize('token="secret", user="person", token_entry="test=secret"', frozenset(['secret']))
+ self.assertNotIn('secret', output)
diff --git a/test/units/module_utils/basic/test_imports.py b/test/units/module_utils/basic/test_imports.py
new file mode 100644
index 00000000..9d8ae68d
--- /dev/null
+++ b/test/units/module_utils/basic/test_imports.py
@@ -0,0 +1,128 @@
+# -*- coding: utf-8 -*-
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2016 Toshio Kuratomi <tkuratomi@ansible.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import sys
+
+from units.mock.procenv import ModuleTestCase
+
+from units.compat import unittest
+from units.compat.mock import patch
+from ansible.module_utils.six.moves import builtins
+
+realimport = builtins.__import__
+
+
+class TestImports(ModuleTestCase):
+
+ def clear_modules(self, mods):
+ for mod in mods:
+ if mod in sys.modules:
+ del sys.modules[mod]
+
+ @patch.object(builtins, '__import__')
+ def test_module_utils_basic_import_syslog(self, mock_import):
+ def _mock_import(name, *args, **kwargs):
+ if name == 'syslog':
+ raise ImportError
+ return realimport(name, *args, **kwargs)
+
+ self.clear_modules(['syslog', 'ansible.module_utils.basic'])
+ mod = builtins.__import__('ansible.module_utils.basic')
+ self.assertTrue(mod.module_utils.basic.HAS_SYSLOG)
+
+ self.clear_modules(['syslog', 'ansible.module_utils.basic'])
+ mock_import.side_effect = _mock_import
+ mod = builtins.__import__('ansible.module_utils.basic')
+ self.assertFalse(mod.module_utils.basic.HAS_SYSLOG)
+
+ @patch.object(builtins, '__import__')
+ def test_module_utils_basic_import_selinux(self, mock_import):
+ def _mock_import(name, *args, **kwargs):
+ if name == 'selinux':
+ raise ImportError
+ return realimport(name, *args, **kwargs)
+
+ try:
+ self.clear_modules(['selinux', 'ansible.module_utils.basic'])
+ mod = builtins.__import__('ansible.module_utils.basic')
+ self.assertTrue(mod.module_utils.basic.HAVE_SELINUX)
+ except ImportError:
+ # no selinux on test system, so skip
+ pass
+
+ self.clear_modules(['selinux', 'ansible.module_utils.basic'])
+ mock_import.side_effect = _mock_import
+ mod = builtins.__import__('ansible.module_utils.basic')
+ self.assertFalse(mod.module_utils.basic.HAVE_SELINUX)
+
+ @patch.object(builtins, '__import__')
+ def test_module_utils_basic_import_json(self, mock_import):
+ def _mock_import(name, *args, **kwargs):
+ if name == 'ansible.module_utils.common._json_compat':
+ raise ImportError
+ return realimport(name, *args, **kwargs)
+
+ self.clear_modules(['json', 'ansible.module_utils.basic'])
+ builtins.__import__('ansible.module_utils.basic')
+ self.clear_modules(['json', 'ansible.module_utils.basic'])
+ mock_import.side_effect = _mock_import
+ with self.assertRaises(SystemExit):
+ builtins.__import__('ansible.module_utils.basic')
+
+ # FIXME: doesn't work yet
+ # @patch.object(builtins, 'bytes')
+ # def test_module_utils_basic_bytes(self, mock_bytes):
+ # mock_bytes.side_effect = NameError()
+ # from ansible.module_utils import basic
+
+ @patch.object(builtins, '__import__')
+ @unittest.skipIf(sys.version_info[0] >= 3, "literal_eval is available in every version of Python3")
+ def test_module_utils_basic_import_literal_eval(self, mock_import):
+ def _mock_import(name, *args, **kwargs):
+ try:
+ fromlist = kwargs.get('fromlist', args[2])
+ except IndexError:
+ fromlist = []
+ if name == 'ast' and 'literal_eval' in fromlist:
+ raise ImportError
+ return realimport(name, *args, **kwargs)
+
+ mock_import.side_effect = _mock_import
+ self.clear_modules(['ast', 'ansible.module_utils.basic'])
+ mod = builtins.__import__('ansible.module_utils.basic')
+ self.assertEqual(mod.module_utils.basic.literal_eval("'1'"), "1")
+ self.assertEqual(mod.module_utils.basic.literal_eval("1"), 1)
+ self.assertEqual(mod.module_utils.basic.literal_eval("-1"), -1)
+ self.assertEqual(mod.module_utils.basic.literal_eval("(1,2,3)"), (1, 2, 3))
+ self.assertEqual(mod.module_utils.basic.literal_eval("[1]"), [1])
+ self.assertEqual(mod.module_utils.basic.literal_eval("True"), True)
+ self.assertEqual(mod.module_utils.basic.literal_eval("False"), False)
+ self.assertEqual(mod.module_utils.basic.literal_eval("None"), None)
+ # self.assertEqual(mod.module_utils.basic.literal_eval('{"a": 1}'), dict(a=1))
+ self.assertRaises(ValueError, mod.module_utils.basic.literal_eval, "asdfasdfasdf")
+
+ @patch.object(builtins, '__import__')
+ def test_module_utils_basic_import_systemd_journal(self, mock_import):
+ def _mock_import(name, *args, **kwargs):
+ try:
+ fromlist = kwargs.get('fromlist', args[2])
+ except IndexError:
+ fromlist = []
+ if name == 'systemd' and 'journal' in fromlist:
+ raise ImportError
+ return realimport(name, *args, **kwargs)
+
+ self.clear_modules(['systemd', 'ansible.module_utils.basic'])
+ mod = builtins.__import__('ansible.module_utils.basic')
+ self.assertTrue(mod.module_utils.basic.has_journal)
+
+ self.clear_modules(['systemd', 'ansible.module_utils.basic'])
+ mock_import.side_effect = _mock_import
+ mod = builtins.__import__('ansible.module_utils.basic')
+ self.assertFalse(mod.module_utils.basic.has_journal)
diff --git a/test/units/module_utils/basic/test_log.py b/test/units/module_utils/basic/test_log.py
new file mode 100644
index 00000000..f3f764fc
--- /dev/null
+++ b/test/units/module_utils/basic/test_log.py
@@ -0,0 +1,152 @@
+# -*- coding: utf-8 -*-
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import syslog
+from itertools import product
+
+import pytest
+
+import ansible.module_utils.basic
+from ansible.module_utils.six import PY3
+
+
+class TestAnsibleModuleLogSmokeTest:
+ DATA = [u'Text string', u'Toshio ãらã¨ã¿ non-ascii test']
+ DATA = DATA + [d.encode('utf-8') for d in DATA]
+ DATA += [b'non-utf8 :\xff: test']
+
+ # pylint bug: https://github.com/PyCQA/pylint/issues/511
+ @pytest.mark.parametrize('msg, stdin', ((m, {}) for m in DATA), indirect=['stdin']) # pylint: disable=undefined-variable
+ def test_smoketest_syslog(self, am, mocker, msg):
+ # These talk to the live daemons on the system. Need to do this to
+ # show that what we send doesn't cause an issue once it gets to the
+ # daemon. These are just smoketests to test that we don't fail.
+ mocker.patch('ansible.module_utils.basic.has_journal', False)
+
+ am.log(u'Text string')
+ am.log(u'Toshio ãらã¨ã¿ non-ascii test')
+
+ am.log(b'Byte string')
+ am.log(u'Toshio ãらã¨ã¿ non-ascii test'.encode('utf-8'))
+ am.log(b'non-utf8 :\xff: test')
+
+ @pytest.mark.skipif(not ansible.module_utils.basic.has_journal, reason='python systemd bindings not installed')
+ # pylint bug: https://github.com/PyCQA/pylint/issues/511
+ @pytest.mark.parametrize('msg, stdin', ((m, {}) for m in DATA), indirect=['stdin']) # pylint: disable=undefined-variable
+ def test_smoketest_journal(self, am, mocker, msg):
+ # These talk to the live daemons on the system. Need to do this to
+ # show that what we send doesn't cause an issue once it gets to the
+ # daemon. These are just smoketests to test that we don't fail.
+ mocker.patch('ansible.module_utils.basic.has_journal', True)
+
+ am.log(u'Text string')
+ am.log(u'Toshio ãらã¨ã¿ non-ascii test')
+
+ am.log(b'Byte string')
+ am.log(u'Toshio ãらã¨ã¿ non-ascii test'.encode('utf-8'))
+ am.log(b'non-utf8 :\xff: test')
+
+
+class TestAnsibleModuleLogSyslog:
+ """Test the AnsibleModule Log Method"""
+
+ PY2_OUTPUT_DATA = [
+ (u'Text string', b'Text string'),
+ (u'Toshio ãらã¨ã¿ non-ascii test', u'Toshio ãらã¨ã¿ non-ascii test'.encode('utf-8')),
+ (b'Byte string', b'Byte string'),
+ (u'Toshio ãらã¨ã¿ non-ascii test'.encode('utf-8'), u'Toshio ãらã¨ã¿ non-ascii test'.encode('utf-8')),
+ (b'non-utf8 :\xff: test', b'non-utf8 :\xff: test'.decode('utf-8', 'replace').encode('utf-8')),
+ ]
+
+ PY3_OUTPUT_DATA = [
+ (u'Text string', u'Text string'),
+ (u'Toshio ãらã¨ã¿ non-ascii test', u'Toshio ãらã¨ã¿ non-ascii test'),
+ (b'Byte string', u'Byte string'),
+ (u'Toshio ãらã¨ã¿ non-ascii test'.encode('utf-8'), u'Toshio ãらã¨ã¿ non-ascii test'),
+ (b'non-utf8 :\xff: test', b'non-utf8 :\xff: test'.decode('utf-8', 'replace')),
+ ]
+
+ OUTPUT_DATA = PY3_OUTPUT_DATA if PY3 else PY2_OUTPUT_DATA
+
+ @pytest.mark.parametrize('no_log, stdin', (product((True, False), [{}])), indirect=['stdin'])
+ def test_no_log(self, am, mocker, no_log):
+ """Test that when no_log is set, logging does not occur"""
+ mock_syslog = mocker.patch('syslog.syslog', autospec=True)
+ mocker.patch('ansible.module_utils.basic.has_journal', False)
+ am.no_log = no_log
+ am.log('unittest no_log')
+ if no_log:
+ assert not mock_syslog.called
+ else:
+ mock_syslog.assert_called_once_with(syslog.LOG_INFO, 'unittest no_log')
+
+ # pylint bug: https://github.com/PyCQA/pylint/issues/511
+ @pytest.mark.parametrize('msg, param, stdin',
+ ((m, p, {}) for m, p in OUTPUT_DATA), # pylint: disable=undefined-variable
+ indirect=['stdin'])
+ def test_output_matches(self, am, mocker, msg, param):
+ """Check that log messages are sent correctly"""
+ mocker.patch('ansible.module_utils.basic.has_journal', False)
+ mock_syslog = mocker.patch('syslog.syslog', autospec=True)
+
+ am.log(msg)
+ mock_syslog.assert_called_once_with(syslog.LOG_INFO, param)
+
+
+@pytest.mark.skipif(not ansible.module_utils.basic.has_journal, reason='python systemd bindings not installed')
+class TestAnsibleModuleLogJournal:
+ """Test the AnsibleModule Log Method"""
+
+ OUTPUT_DATA = [
+ (u'Text string', u'Text string'),
+ (u'Toshio ãらã¨ã¿ non-ascii test', u'Toshio ãらã¨ã¿ non-ascii test'),
+ (b'Byte string', u'Byte string'),
+ (u'Toshio ãらã¨ã¿ non-ascii test'.encode('utf-8'), u'Toshio ãらã¨ã¿ non-ascii test'),
+ (b'non-utf8 :\xff: test', b'non-utf8 :\xff: test'.decode('utf-8', 'replace')),
+ ]
+
+ @pytest.mark.parametrize('no_log, stdin', (product((True, False), [{}])), indirect=['stdin'])
+ def test_no_log(self, am, mocker, no_log):
+ journal_send = mocker.patch('systemd.journal.send')
+ am.no_log = no_log
+ am.log('unittest no_log')
+ if no_log:
+ assert not journal_send.called
+ else:
+ assert journal_send.called == 1
+ # Message
+ # call_args is a 2-tuple of (arg_list, kwarg_dict)
+ assert journal_send.call_args[1]['MESSAGE'].endswith('unittest no_log'), 'Message was not sent to log'
+ # log adds this journal field
+ assert 'MODULE' in journal_send.call_args[1]
+ assert 'basic.py' in journal_send.call_args[1]['MODULE']
+
+ # pylint bug: https://github.com/PyCQA/pylint/issues/511
+ @pytest.mark.parametrize('msg, param, stdin',
+ ((m, p, {}) for m, p in OUTPUT_DATA), # pylint: disable=undefined-variable
+ indirect=['stdin'])
+ def test_output_matches(self, am, mocker, msg, param):
+ journal_send = mocker.patch('systemd.journal.send')
+ am.log(msg)
+ assert journal_send.call_count == 1, 'journal.send not called exactly once'
+ assert journal_send.call_args[1]['MESSAGE'].endswith(param)
+
+ @pytest.mark.parametrize('stdin', ({},), indirect=['stdin'])
+ def test_log_args(self, am, mocker):
+ journal_send = mocker.patch('systemd.journal.send')
+ am.log('unittest log_args', log_args=dict(TEST='log unittest'))
+ assert journal_send.called == 1
+ assert journal_send.call_args[1]['MESSAGE'].endswith('unittest log_args'), 'Message was not sent to log'
+
+ # log adds this journal field
+ assert 'MODULE' in journal_send.call_args[1]
+ assert 'basic.py' in journal_send.call_args[1]['MODULE']
+
+ # We added this journal field
+ assert 'TEST' in journal_send.call_args[1]
+ assert 'log unittest' in journal_send.call_args[1]['TEST']
diff --git a/test/units/module_utils/basic/test_no_log.py b/test/units/module_utils/basic/test_no_log.py
new file mode 100644
index 00000000..c4797028
--- /dev/null
+++ b/test/units/module_utils/basic/test_no_log.py
@@ -0,0 +1,160 @@
+# -*- coding: utf-8 -*-
+# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+# (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from units.compat import unittest
+
+from ansible.module_utils.basic import remove_values
+from ansible.module_utils.common.parameters import _return_datastructure_name
+
+
+class TestReturnValues(unittest.TestCase):
+ dataset = (
+ ('string', frozenset(['string'])),
+ ('', frozenset()),
+ (1, frozenset(['1'])),
+ (1.0, frozenset(['1.0'])),
+ (False, frozenset()),
+ (['1', '2', '3'], frozenset(['1', '2', '3'])),
+ (('1', '2', '3'), frozenset(['1', '2', '3'])),
+ ({'one': 1, 'two': 'dos'}, frozenset(['1', 'dos'])),
+ (
+ {
+ 'one': 1,
+ 'two': 'dos',
+ 'three': [
+ 'amigos', 'musketeers', None, {
+ 'ping': 'pong',
+ 'base': (
+ 'balls', 'raquets'
+ )
+ }
+ ]
+ },
+ frozenset(['1', 'dos', 'amigos', 'musketeers', 'pong', 'balls', 'raquets'])
+ ),
+ (u'Toshio ãらã¨ã¿', frozenset(['Toshio ãらã¨ã¿'])),
+ ('Toshio ãらã¨ã¿', frozenset(['Toshio ãらã¨ã¿'])),
+ )
+
+ def test_return_datastructure_name(self):
+ for data, expected in self.dataset:
+ self.assertEqual(frozenset(_return_datastructure_name(data)), expected)
+
+ def test_unknown_type(self):
+ self.assertRaises(TypeError, frozenset, _return_datastructure_name(object()))
+
+
+class TestRemoveValues(unittest.TestCase):
+ OMIT = 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
+ dataset_no_remove = (
+ ('string', frozenset(['nope'])),
+ (1234, frozenset(['4321'])),
+ (False, frozenset(['4321'])),
+ (1.0, frozenset(['4321'])),
+ (['string', 'strang', 'strung'], frozenset(['nope'])),
+ ({'one': 1, 'two': 'dos', 'secret': 'key'}, frozenset(['nope'])),
+ (
+ {
+ 'one': 1,
+ 'two': 'dos',
+ 'three': [
+ 'amigos', 'musketeers', None, {
+ 'ping': 'pong', 'base': ['balls', 'raquets']
+ }
+ ]
+ },
+ frozenset(['nope'])
+ ),
+ (u'Toshio ãら'.encode('utf-8'), frozenset([u'ã¨ã¿'.encode('utf-8')])),
+ (u'Toshio ãら', frozenset([u'ã¨ã¿'])),
+ )
+ dataset_remove = (
+ ('string', frozenset(['string']), OMIT),
+ (1234, frozenset(['1234']), OMIT),
+ (1234, frozenset(['23']), OMIT),
+ (1.0, frozenset(['1.0']), OMIT),
+ (['string', 'strang', 'strung'], frozenset(['strang']), ['string', OMIT, 'strung']),
+ (['string', 'strang', 'strung'], frozenset(['strang', 'string', 'strung']), [OMIT, OMIT, OMIT]),
+ (('string', 'strang', 'strung'), frozenset(['string', 'strung']), [OMIT, 'strang', OMIT]),
+ ((1234567890, 345678, 987654321), frozenset(['1234567890']), [OMIT, 345678, 987654321]),
+ ((1234567890, 345678, 987654321), frozenset(['345678']), [OMIT, OMIT, 987654321]),
+ ({'one': 1, 'two': 'dos', 'secret': 'key'}, frozenset(['key']), {'one': 1, 'two': 'dos', 'secret': OMIT}),
+ ({'one': 1, 'two': 'dos', 'secret': 'key'}, frozenset(['key', 'dos', '1']), {'one': OMIT, 'two': OMIT, 'secret': OMIT}),
+ ({'one': 1, 'two': 'dos', 'secret': 'key'}, frozenset(['key', 'dos', '1']), {'one': OMIT, 'two': OMIT, 'secret': OMIT}),
+ (
+ {
+ 'one': 1,
+ 'two': 'dos',
+ 'three': [
+ 'amigos', 'musketeers', None, {
+ 'ping': 'pong', 'base': [
+ 'balls', 'raquets'
+ ]
+ }
+ ]
+ },
+ frozenset(['balls', 'base', 'pong', 'amigos']),
+ {
+ 'one': 1,
+ 'two': 'dos',
+ 'three': [
+ OMIT, 'musketeers', None, {
+ 'ping': OMIT,
+ 'base': [
+ OMIT, 'raquets'
+ ]
+ }
+ ]
+ }
+ ),
+ (
+ 'This sentence has an enigma wrapped in a mystery inside of a secret. - mr mystery',
+ frozenset(['enigma', 'mystery', 'secret']),
+ 'This sentence has an ******** wrapped in a ******** inside of a ********. - mr ********'
+ ),
+ (u'Toshio ãらã¨ã¿'.encode('utf-8'), frozenset([u'ãらã¨ã¿'.encode('utf-8')]), u'Toshio ********'.encode('utf-8')),
+ (u'Toshio ãらã¨ã¿', frozenset([u'ãらã¨ã¿']), u'Toshio ********'),
+ )
+
+ def test_no_removal(self):
+ for value, no_log_strings in self.dataset_no_remove:
+ self.assertEqual(remove_values(value, no_log_strings), value)
+
+ def test_strings_to_remove(self):
+ for value, no_log_strings, expected in self.dataset_remove:
+ self.assertEqual(remove_values(value, no_log_strings), expected)
+
+ def test_unknown_type(self):
+ self.assertRaises(TypeError, remove_values, object(), frozenset())
+
+ def test_hit_recursion_limit(self):
+ """ Check that we do not hit a recursion limit"""
+ data_list = []
+ inner_list = data_list
+ for i in range(0, 10000):
+ new_list = []
+ inner_list.append(new_list)
+ inner_list = new_list
+ inner_list.append('secret')
+
+ # Check that this does not hit a recursion limit
+ actual_data_list = remove_values(data_list, frozenset(('secret',)))
+
+ levels = 0
+ inner_list = actual_data_list
+ while inner_list:
+ if isinstance(inner_list, list):
+ self.assertEqual(len(inner_list), 1)
+ else:
+ levels -= 1
+ break
+ inner_list = inner_list[0]
+ levels += 1
+
+ self.assertEqual(inner_list, self.OMIT)
+ self.assertEqual(levels, 10000)
diff --git a/test/units/module_utils/basic/test_platform_distribution.py b/test/units/module_utils/basic/test_platform_distribution.py
new file mode 100644
index 00000000..d7a4510c
--- /dev/null
+++ b/test/units/module_utils/basic/test_platform_distribution.py
@@ -0,0 +1,199 @@
+# -*- coding: utf-8 -*-
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2016 Toshio Kuratomi <tkuratomi@ansible.com>
+# (c) 2017-2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from units.compat.mock import patch
+
+from ansible.module_utils.six.moves import builtins
+
+# Functions being tested
+from ansible.module_utils.basic import get_platform
+from ansible.module_utils.basic import get_all_subclasses
+from ansible.module_utils.basic import get_distribution
+from ansible.module_utils.basic import get_distribution_version
+from ansible.module_utils.basic import load_platform_subclass
+
+
+realimport = builtins.__import__
+
+
+@pytest.fixture
+def platform_linux(mocker):
+ mocker.patch('platform.system', return_value='Linux')
+
+
+#
+# get_platform tests
+#
+
+def test_get_platform():
+ with patch('platform.system', return_value='foo'):
+ assert get_platform() == 'foo'
+
+
+#
+# get_distribution tests
+#
+
+def test_get_distribution_not_linux():
+ """If it's not Linux, then it has no distribution"""
+ with patch('platform.system', return_value='Foo'):
+ assert get_distribution() is None
+
+
+@pytest.mark.usefixtures("platform_linux")
+class TestGetDistribution:
+ """Tests for get_distribution that have to find something"""
+ def test_distro_known(self):
+ with patch('ansible.module_utils.distro.id', return_value="alpine"):
+ assert get_distribution() == "Alpine"
+
+ with patch('ansible.module_utils.distro.id', return_value="arch"):
+ assert get_distribution() == "Arch"
+
+ with patch('ansible.module_utils.distro.id', return_value="centos"):
+ assert get_distribution() == "Centos"
+
+ with patch('ansible.module_utils.distro.id', return_value="clear-linux-os"):
+ assert get_distribution() == "Clear-linux-os"
+
+ with patch('ansible.module_utils.distro.id', return_value="coreos"):
+ assert get_distribution() == "Coreos"
+
+ with patch('ansible.module_utils.distro.id', return_value="debian"):
+ assert get_distribution() == "Debian"
+
+ with patch('ansible.module_utils.distro.id', return_value="flatcar"):
+ assert get_distribution() == "Flatcar"
+
+ with patch('ansible.module_utils.distro.id', return_value="linuxmint"):
+ assert get_distribution() == "Linuxmint"
+
+ with patch('ansible.module_utils.distro.id', return_value="opensuse"):
+ assert get_distribution() == "Opensuse"
+
+ with patch('ansible.module_utils.distro.id', return_value="oracle"):
+ assert get_distribution() == "Oracle"
+
+ with patch('ansible.module_utils.distro.id', return_value="raspian"):
+ assert get_distribution() == "Raspian"
+
+ with patch('ansible.module_utils.distro.id', return_value="rhel"):
+ assert get_distribution() == "Redhat"
+
+ with patch('ansible.module_utils.distro.id', return_value="ubuntu"):
+ assert get_distribution() == "Ubuntu"
+
+ with patch('ansible.module_utils.distro.id', return_value="virtuozzo"):
+ assert get_distribution() == "Virtuozzo"
+
+ with patch('ansible.module_utils.distro.id', return_value="foo"):
+ assert get_distribution() == "Foo"
+
+ def test_distro_unknown(self):
+ with patch('ansible.module_utils.distro.id', return_value=""):
+ assert get_distribution() == "OtherLinux"
+
+ def test_distro_amazon_linux_short(self):
+ with patch('ansible.module_utils.distro.id', return_value="amzn"):
+ assert get_distribution() == "Amazon"
+
+ def test_distro_amazon_linux_long(self):
+ with patch('ansible.module_utils.distro.id', return_value="amazon"):
+ assert get_distribution() == "Amazon"
+
+
+#
+# get_distribution_version tests
+#
+
+def test_get_distribution_version_not_linux():
+ """If it's not Linux, then it has no distribution"""
+ with patch('platform.system', return_value='Foo'):
+ assert get_distribution_version() is None
+
+
+@pytest.mark.usefixtures("platform_linux")
+def test_distro_found():
+ with patch('ansible.module_utils.distro.version', return_value="1"):
+ assert get_distribution_version() == "1"
+
+
+#
+# Tests for LoadPlatformSubclass
+#
+
+class TestLoadPlatformSubclass:
+ class LinuxTest:
+ pass
+
+ class Foo(LinuxTest):
+ platform = "Linux"
+ distribution = None
+
+ class Bar(LinuxTest):
+ platform = "Linux"
+ distribution = "Bar"
+
+ def test_not_linux(self):
+ # if neither match, the fallback should be the top-level class
+ with patch('platform.system', return_value="Foo"):
+ with patch('ansible.module_utils.common.sys_info.get_distribution', return_value=None):
+ assert isinstance(load_platform_subclass(self.LinuxTest), self.LinuxTest)
+
+ @pytest.mark.usefixtures("platform_linux")
+ def test_get_distribution_none(self):
+ # match just the platform class, not a specific distribution
+ with patch('ansible.module_utils.common.sys_info.get_distribution', return_value=None):
+ assert isinstance(load_platform_subclass(self.LinuxTest), self.Foo)
+
+ @pytest.mark.usefixtures("platform_linux")
+ def test_get_distribution_found(self):
+ # match both the distribution and platform class
+ with patch('ansible.module_utils.common.sys_info.get_distribution', return_value="Bar"):
+ assert isinstance(load_platform_subclass(self.LinuxTest), self.Bar)
+
+
+#
+# Tests for get_all_subclasses
+#
+
+class TestGetAllSubclasses:
+ class Base:
+ pass
+
+ class BranchI(Base):
+ pass
+
+ class BranchII(Base):
+ pass
+
+ class BranchIA(BranchI):
+ pass
+
+ class BranchIB(BranchI):
+ pass
+
+ class BranchIIA(BranchII):
+ pass
+
+ class BranchIIB(BranchII):
+ pass
+
+ def test_bottom_level(self):
+ assert get_all_subclasses(self.BranchIIB) == []
+
+ def test_one_inheritance(self):
+ assert set(get_all_subclasses(self.BranchII)) == set([self.BranchIIA, self.BranchIIB])
+
+ def test_toplevel(self):
+ assert set(get_all_subclasses(self.Base)) == set([self.BranchI, self.BranchII,
+ self.BranchIA, self.BranchIB,
+ self.BranchIIA, self.BranchIIB])
diff --git a/test/units/module_utils/basic/test_run_command.py b/test/units/module_utils/basic/test_run_command.py
new file mode 100644
index 00000000..25f1c48e
--- /dev/null
+++ b/test/units/module_utils/basic/test_run_command.py
@@ -0,0 +1,283 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import errno
+from itertools import product
+from io import BytesIO
+
+import pytest
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import PY2
+from ansible.module_utils.compat import selectors
+
+
+class OpenBytesIO(BytesIO):
+ """BytesIO with dummy close() method
+
+ So that you can inspect the content after close() was called.
+ """
+
+ def close(self):
+ pass
+
+
+@pytest.fixture
+def mock_os(mocker):
+ def mock_os_chdir(path):
+ if path == '/inaccessible':
+ raise OSError(errno.EPERM, "Permission denied: '/inaccessible'")
+
+ def mock_os_abspath(path):
+ if path.startswith('/'):
+ return path
+ else:
+ return os.getcwd.return_value + '/' + path
+
+ os = mocker.patch('ansible.module_utils.basic.os')
+
+ os.path.expandvars.side_effect = lambda x: x
+ os.path.expanduser.side_effect = lambda x: x
+ os.environ = {'PATH': '/bin'}
+ os.getcwd.return_value = '/home/foo'
+ os.path.isdir.return_value = True
+ os.chdir.side_effect = mock_os_chdir
+ os.path.abspath.side_effect = mock_os_abspath
+
+ yield os
+
+
+class DummyFileObj():
+ def __init__(self, fileobj):
+ self.fileobj = fileobj
+
+
+class SpecialBytesIO(BytesIO):
+ def __init__(self, *args, **kwargs):
+ fh = kwargs.pop('fh', None)
+ super(SpecialBytesIO, self).__init__(*args, **kwargs)
+ self.fh = fh
+
+ def fileno(self):
+ return self.fh
+
+ # We need to do this because some of our tests create a new value for stdout and stderr
+ # The new value is able to affect the string that is returned by the subprocess stdout and
+ # stderr but by the time the test gets it, it is too late to change the SpecialBytesIO that
+ # subprocess.Popen returns for stdout and stderr. If we could figure out how to change those as
+ # well, then we wouldn't need this.
+ def __eq__(self, other):
+ if id(self) == id(other) or self.fh == other.fileno():
+ return True
+ return False
+
+
+class DummyKey:
+ def __init__(self, fileobj):
+ self.fileobj = fileobj
+
+
+@pytest.fixture
+def mock_subprocess(mocker):
+
+ class MockSelector(selectors.BaseSelector):
+ def __init__(self):
+ super(MockSelector, self).__init__()
+ self._file_objs = []
+
+ def register(self, fileobj, events, data=None):
+ self._file_objs.append(fileobj)
+
+ def unregister(self, fileobj):
+ self._file_objs.remove(fileobj)
+
+ def select(self, timeout=None):
+ ready = []
+ for file_obj in self._file_objs:
+ ready.append((DummyKey(subprocess._output[file_obj.fileno()]), selectors.EVENT_READ))
+ return ready
+
+ def get_map(self):
+ return self._file_objs
+
+ def close(self):
+ super(MockSelector, self).close()
+ self._file_objs = []
+
+ selectors.DefaultSelector = MockSelector
+
+ subprocess = mocker.patch('ansible.module_utils.basic.subprocess')
+ subprocess._output = {mocker.sentinel.stdout: SpecialBytesIO(b'', fh=mocker.sentinel.stdout),
+ mocker.sentinel.stderr: SpecialBytesIO(b'', fh=mocker.sentinel.stderr)}
+
+ cmd = mocker.MagicMock()
+ cmd.returncode = 0
+ cmd.stdin = OpenBytesIO()
+ cmd.stdout = subprocess._output[mocker.sentinel.stdout]
+ cmd.stderr = subprocess._output[mocker.sentinel.stderr]
+ subprocess.Popen.return_value = cmd
+
+ yield subprocess
+
+
+@pytest.fixture()
+def rc_am(mocker, am, mock_os, mock_subprocess):
+ am.fail_json = mocker.MagicMock(side_effect=SystemExit)
+ am._os = mock_os
+ am._subprocess = mock_subprocess
+ yield am
+
+
+class TestRunCommandArgs:
+ # Format is command as passed to run_command, command to Popen as list, command to Popen as string
+ ARGS_DATA = (
+ (['/bin/ls', 'a', 'b', 'c'], [b'/bin/ls', b'a', b'b', b'c'], b'/bin/ls a b c'),
+ ('/bin/ls a " b" "c "', [b'/bin/ls', b'a', b' b', b'c '], b'/bin/ls a " b" "c "'),
+ )
+
+ # pylint bug: https://github.com/PyCQA/pylint/issues/511
+ # pylint: disable=undefined-variable
+ @pytest.mark.parametrize('cmd, expected, shell, stdin',
+ ((arg, cmd_str if sh else cmd_lst, sh, {})
+ for (arg, cmd_lst, cmd_str), sh in product(ARGS_DATA, (True, False))),
+ indirect=['stdin'])
+ def test_args(self, cmd, expected, shell, rc_am):
+ rc_am.run_command(cmd, use_unsafe_shell=shell)
+ assert rc_am._subprocess.Popen.called
+ args, kwargs = rc_am._subprocess.Popen.call_args
+ assert args == (expected, )
+ assert kwargs['shell'] == shell
+
+ @pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
+ def test_tuple_as_args(self, rc_am):
+ with pytest.raises(SystemExit):
+ rc_am.run_command(('ls', '/'))
+ assert rc_am.fail_json.called
+
+
+class TestRunCommandCwd:
+ @pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
+ def test_cwd(self, mocker, rc_am):
+ rc_am._os.getcwd.return_value = '/old'
+ rc_am.run_command('/bin/ls', cwd='/new')
+ assert rc_am._os.chdir.mock_calls == [mocker.call(b'/new'), mocker.call('/old'), ]
+
+ @pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
+ def test_cwd_relative_path(self, mocker, rc_am):
+ rc_am._os.getcwd.return_value = '/old'
+ rc_am.run_command('/bin/ls', cwd='sub-dir')
+ assert rc_am._os.chdir.mock_calls == [mocker.call(b'/old/sub-dir'), mocker.call('/old'), ]
+
+ @pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
+ def test_cwd_not_a_dir(self, mocker, rc_am):
+ rc_am._os.getcwd.return_value = '/old'
+ rc_am._os.path.isdir.side_effect = lambda d: d != '/not-a-dir'
+ rc_am.run_command('/bin/ls', cwd='/not-a-dir')
+ assert rc_am._os.chdir.mock_calls == [mocker.call('/old'), ]
+
+ @pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
+ def test_cwd_not_a_dir_noignore(self, rc_am):
+ rc_am._os.getcwd.return_value = '/old'
+ rc_am._os.path.isdir.side_effect = lambda d: d != '/not-a-dir'
+ with pytest.raises(SystemExit):
+ rc_am.run_command('/bin/ls', cwd='/not-a-dir', ignore_invalid_cwd=False)
+ assert rc_am.fail_json.called
+
+
+class TestRunCommandPrompt:
+ @pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
+ def test_prompt_bad_regex(self, rc_am):
+ with pytest.raises(SystemExit):
+ rc_am.run_command('foo', prompt_regex='[pP)assword:')
+ assert rc_am.fail_json.called
+
+ @pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
+ def test_prompt_no_match(self, mocker, rc_am):
+ rc_am._os._cmd_out[mocker.sentinel.stdout] = BytesIO(b'hello')
+ (rc, _, _) = rc_am.run_command('foo', prompt_regex='[pP]assword:')
+ assert rc == 0
+
+ @pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
+ def test_prompt_match_wo_data(self, mocker, rc_am):
+ rc_am._subprocess._output = {mocker.sentinel.stdout:
+ SpecialBytesIO(b'Authentication required!\nEnter password: ',
+ fh=mocker.sentinel.stdout),
+ mocker.sentinel.stderr:
+ SpecialBytesIO(b'', fh=mocker.sentinel.stderr)}
+ (rc, _, _) = rc_am.run_command('foo', prompt_regex=r'[pP]assword:', data=None)
+ assert rc == 257
+
+
+class TestRunCommandRc:
+ @pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
+ def test_check_rc_false(self, rc_am):
+ rc_am._subprocess.Popen.return_value.returncode = 1
+ (rc, _, _) = rc_am.run_command('/bin/false', check_rc=False)
+ assert rc == 1
+
+ @pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
+ def test_check_rc_true(self, rc_am):
+ rc_am._subprocess.Popen.return_value.returncode = 1
+ with pytest.raises(SystemExit):
+ rc_am.run_command('/bin/false', check_rc=True)
+ assert rc_am.fail_json.called
+ args, kwargs = rc_am.fail_json.call_args
+ assert kwargs['rc'] == 1
+
+
+class TestRunCommandOutput:
+ @pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
+ def test_text_stdin(self, rc_am):
+ (rc, stdout, stderr) = rc_am.run_command('/bin/foo', data='hello world')
+ assert rc_am._subprocess.Popen.return_value.stdin.getvalue() == b'hello world\n'
+
+ @pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
+ def test_ascii_stdout(self, mocker, rc_am):
+ rc_am._subprocess._output = {mocker.sentinel.stdout:
+ SpecialBytesIO(b'hello', fh=mocker.sentinel.stdout),
+ mocker.sentinel.stderr:
+ SpecialBytesIO(b'', fh=mocker.sentinel.stderr)}
+ (rc, stdout, stderr) = rc_am.run_command('/bin/cat hello.txt')
+ assert rc == 0
+ # module_utils function. On py3 it returns text and py2 it returns
+ # bytes because it's returning native strings
+ assert stdout == 'hello'
+
+ @pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
+ def test_utf8_output(self, mocker, rc_am):
+ rc_am._subprocess._output = {mocker.sentinel.stdout:
+ SpecialBytesIO(u'Žarn§'.encode('utf-8'),
+ fh=mocker.sentinel.stdout),
+ mocker.sentinel.stderr:
+ SpecialBytesIO(u'لرئيسية'.encode('utf-8'),
+ fh=mocker.sentinel.stderr)}
+ (rc, stdout, stderr) = rc_am.run_command('/bin/something_ugly')
+ assert rc == 0
+ # module_utils function. On py3 it returns text and py2 it returns
+ # bytes because it's returning native strings
+ assert stdout == to_native(u'Žarn§')
+ assert stderr == to_native(u'لرئيسية')
+
+
+@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
+def test_run_command_fds(mocker, rc_am):
+ subprocess_mock = mocker.patch('ansible.module_utils.basic.subprocess')
+ subprocess_mock.Popen.side_effect = AssertionError
+
+ try:
+ rc_am.run_command('synchronize', pass_fds=(101, 42))
+ except SystemExit:
+ pass
+
+ if PY2:
+ assert subprocess_mock.Popen.call_args[1]['close_fds'] is False
+ assert 'pass_fds' not in subprocess_mock.Popen.call_args[1]
+
+ else:
+ assert subprocess_mock.Popen.call_args[1]['pass_fds'] == (101, 42)
+ assert subprocess_mock.Popen.call_args[1]['close_fds'] is True
diff --git a/test/units/module_utils/basic/test_safe_eval.py b/test/units/module_utils/basic/test_safe_eval.py
new file mode 100644
index 00000000..e8538ca9
--- /dev/null
+++ b/test/units/module_utils/basic/test_safe_eval.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+# (c) 2015-2017, Toshio Kuratomi <tkuratomi@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from itertools import chain
+import pytest
+
+
+# Strings that should be converted into a typed value
+VALID_STRINGS = (
+ ("'a'", 'a'),
+ ("'1'", '1'),
+ ("1", 1),
+ ("True", True),
+ ("False", False),
+ ("{}", {}),
+)
+
+# Passing things that aren't strings should just return the object
+NONSTRINGS = (
+ ({'a': 1}, {'a': 1}),
+)
+
+# These strings are not basic types. For security, these should not be
+# executed. We return the same string and get an exception for some
+INVALID_STRINGS = (
+ ("a=1", "a=1", SyntaxError),
+ ("a.foo()", "a.foo()", None),
+ ("import foo", "import foo", None),
+ ("__import__('foo')", "__import__('foo')", ValueError),
+)
+
+
+@pytest.mark.parametrize('code, expected, stdin',
+ ((c, e, {}) for c, e in chain(VALID_STRINGS, NONSTRINGS)),
+ indirect=['stdin'])
+def test_simple_types(am, code, expected):
+ # test some basic usage for various types
+ assert am.safe_eval(code) == expected
+
+
+@pytest.mark.parametrize('code, expected, stdin',
+ ((c, e, {}) for c, e in chain(VALID_STRINGS, NONSTRINGS)),
+ indirect=['stdin'])
+def test_simple_types_with_exceptions(am, code, expected):
+ # Test simple types with exceptions requested
+ assert am.safe_eval(code, include_exceptions=True), (expected, None)
+
+
+@pytest.mark.parametrize('code, expected, stdin',
+ ((c, e, {}) for c, e, dummy in INVALID_STRINGS),
+ indirect=['stdin'])
+def test_invalid_strings(am, code, expected):
+ assert am.safe_eval(code) == expected
+
+
+@pytest.mark.parametrize('code, expected, exception, stdin',
+ ((c, e, ex, {}) for c, e, ex in INVALID_STRINGS),
+ indirect=['stdin'])
+def test_invalid_strings_with_exceptions(am, code, expected, exception):
+ res = am.safe_eval(code, include_exceptions=True)
+ assert res[0] == expected
+ if exception is None:
+ assert res[1] == exception
+ else:
+ assert type(res[1]) == exception
diff --git a/test/units/module_utils/basic/test_sanitize_keys.py b/test/units/module_utils/basic/test_sanitize_keys.py
new file mode 100644
index 00000000..180f8662
--- /dev/null
+++ b/test/units/module_utils/basic/test_sanitize_keys.py
@@ -0,0 +1,98 @@
+# -*- coding: utf-8 -*-
+# (c) 2020, Red Hat
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+from ansible.module_utils.basic import sanitize_keys
+
+
+def test_sanitize_keys_non_dict_types():
+ """ Test that non-dict-like objects return the same data. """
+
+ type_exception = 'Unsupported type for key sanitization.'
+ no_log_strings = set()
+
+ assert 'string value' == sanitize_keys('string value', no_log_strings)
+
+ assert sanitize_keys(None, no_log_strings) is None
+
+ assert set(['x', 'y']) == sanitize_keys(set(['x', 'y']), no_log_strings)
+
+ assert not sanitize_keys(False, no_log_strings)
+
+
+def _run_comparison(obj):
+ no_log_strings = set(['secret', 'password'])
+
+ ret = sanitize_keys(obj, no_log_strings)
+
+ expected = [
+ None,
+ True,
+ 100,
+ "some string",
+ set([1, 2]),
+ [1, 2],
+
+ {'key1': ['value1a', 'value1b'],
+ 'some-********': 'value-for-some-password',
+ 'key2': {'key3': set(['value3a', 'value3b']),
+ 'i-have-a-********': {'********-********': 'value-for-secret-password', 'key4': 'value4'}
+ }
+ },
+
+ {'foo': [{'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER': 1}]}
+ ]
+
+ assert ret == expected
+
+
+def test_sanitize_keys_dict():
+ """ Test that santize_keys works with a dict. """
+
+ d = [
+ None,
+ True,
+ 100,
+ "some string",
+ set([1, 2]),
+ [1, 2],
+
+ {'key1': ['value1a', 'value1b'],
+ 'some-password': 'value-for-some-password',
+ 'key2': {'key3': set(['value3a', 'value3b']),
+ 'i-have-a-secret': {'secret-password': 'value-for-secret-password', 'key4': 'value4'}
+ }
+ },
+
+ {'foo': [{'secret': 1}]}
+ ]
+
+ _run_comparison(d)
+
+
+def test_sanitize_keys_with_ignores():
+ """ Test that we can actually ignore keys. """
+
+ no_log_strings = set(['secret', 'rc'])
+ ignore_keys = set(['changed', 'rc', 'status'])
+
+ value = {'changed': True,
+ 'rc': 0,
+ 'test-rc': 1,
+ 'another-secret': 2,
+ 'status': 'okie dokie'}
+
+ # We expect to change 'test-rc' but NOT 'rc'.
+ expected = {'changed': True,
+ 'rc': 0,
+ 'test-********': 1,
+ 'another-********': 2,
+ 'status': 'okie dokie'}
+
+ ret = sanitize_keys(value, no_log_strings, ignore_keys)
+ assert ret == expected
diff --git a/test/units/module_utils/basic/test_selinux.py b/test/units/module_utils/basic/test_selinux.py
new file mode 100644
index 00000000..8562eb88
--- /dev/null
+++ b/test/units/module_utils/basic/test_selinux.py
@@ -0,0 +1,254 @@
+# -*- coding: utf-8 -*-
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2016 Toshio Kuratomi <tkuratomi@ansible.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import errno
+import json
+
+from units.mock.procenv import ModuleTestCase, swap_stdin_and_argv
+
+from units.compat.mock import patch, MagicMock, mock_open, Mock
+from ansible.module_utils.six.moves import builtins
+
+realimport = builtins.__import__
+
+
+class TestSELinux(ModuleTestCase):
+ def test_module_utils_basic_ansible_module_selinux_mls_enabled(self):
+ from ansible.module_utils import basic
+ basic._ANSIBLE_ARGS = None
+
+ am = basic.AnsibleModule(
+ argument_spec=dict(),
+ )
+
+ basic.HAVE_SELINUX = False
+ self.assertEqual(am.selinux_mls_enabled(), False)
+
+ basic.HAVE_SELINUX = True
+ basic.selinux = Mock()
+ with patch.dict('sys.modules', {'selinux': basic.selinux}):
+ with patch('selinux.is_selinux_mls_enabled', return_value=0):
+ self.assertEqual(am.selinux_mls_enabled(), False)
+ with patch('selinux.is_selinux_mls_enabled', return_value=1):
+ self.assertEqual(am.selinux_mls_enabled(), True)
+ delattr(basic, 'selinux')
+
+ def test_module_utils_basic_ansible_module_selinux_initial_context(self):
+ from ansible.module_utils import basic
+ basic._ANSIBLE_ARGS = None
+
+ am = basic.AnsibleModule(
+ argument_spec=dict(),
+ )
+
+ am.selinux_mls_enabled = MagicMock()
+ am.selinux_mls_enabled.return_value = False
+ self.assertEqual(am.selinux_initial_context(), [None, None, None])
+ am.selinux_mls_enabled.return_value = True
+ self.assertEqual(am.selinux_initial_context(), [None, None, None, None])
+
+ def test_module_utils_basic_ansible_module_selinux_enabled(self):
+ from ansible.module_utils import basic
+ basic._ANSIBLE_ARGS = None
+
+ am = basic.AnsibleModule(
+ argument_spec=dict(),
+ )
+
+ # we first test the cases where the python selinux lib is
+ # not installed, which has two paths: one in which the system
+ # does have selinux installed (and the selinuxenabled command
+ # is present and returns 0 when run), or selinux is not installed
+ basic.HAVE_SELINUX = False
+ am.get_bin_path = MagicMock()
+ am.get_bin_path.return_value = '/path/to/selinuxenabled'
+ am.run_command = MagicMock()
+ am.run_command.return_value = (0, '', '')
+ self.assertRaises(SystemExit, am.selinux_enabled)
+ am.get_bin_path.return_value = None
+ self.assertEqual(am.selinux_enabled(), False)
+
+ # finally we test the case where the python selinux lib is installed,
+ # and both possibilities there (enabled vs. disabled)
+ basic.HAVE_SELINUX = True
+ basic.selinux = Mock()
+ with patch.dict('sys.modules', {'selinux': basic.selinux}):
+ with patch('selinux.is_selinux_enabled', return_value=0):
+ self.assertEqual(am.selinux_enabled(), False)
+ with patch('selinux.is_selinux_enabled', return_value=1):
+ self.assertEqual(am.selinux_enabled(), True)
+ delattr(basic, 'selinux')
+
+ def test_module_utils_basic_ansible_module_selinux_default_context(self):
+ from ansible.module_utils import basic
+ basic._ANSIBLE_ARGS = None
+
+ am = basic.AnsibleModule(
+ argument_spec=dict(),
+ )
+
+ am.selinux_initial_context = MagicMock(return_value=[None, None, None, None])
+ am.selinux_enabled = MagicMock(return_value=True)
+
+ # we first test the cases where the python selinux lib is not installed
+ basic.HAVE_SELINUX = False
+ self.assertEqual(am.selinux_default_context(path='/foo/bar'), [None, None, None, None])
+
+ # all following tests assume the python selinux bindings are installed
+ basic.HAVE_SELINUX = True
+
+ basic.selinux = Mock()
+
+ with patch.dict('sys.modules', {'selinux': basic.selinux}):
+ # next, we test with a mocked implementation of selinux.matchpathcon to simulate
+ # an actual context being found
+ with patch('selinux.matchpathcon', return_value=[0, 'unconfined_u:object_r:default_t:s0']):
+ self.assertEqual(am.selinux_default_context(path='/foo/bar'), ['unconfined_u', 'object_r', 'default_t', 's0'])
+
+ # we also test the case where matchpathcon returned a failure
+ with patch('selinux.matchpathcon', return_value=[-1, '']):
+ self.assertEqual(am.selinux_default_context(path='/foo/bar'), [None, None, None, None])
+
+ # finally, we test where an OSError occurred during matchpathcon's call
+ with patch('selinux.matchpathcon', side_effect=OSError):
+ self.assertEqual(am.selinux_default_context(path='/foo/bar'), [None, None, None, None])
+
+ delattr(basic, 'selinux')
+
+ def test_module_utils_basic_ansible_module_selinux_context(self):
+ from ansible.module_utils import basic
+ basic._ANSIBLE_ARGS = None
+
+ am = basic.AnsibleModule(
+ argument_spec=dict(),
+ )
+
+ am.selinux_initial_context = MagicMock(return_value=[None, None, None, None])
+ am.selinux_enabled = MagicMock(return_value=True)
+
+ # we first test the cases where the python selinux lib is not installed
+ basic.HAVE_SELINUX = False
+ self.assertEqual(am.selinux_context(path='/foo/bar'), [None, None, None, None])
+
+ # all following tests assume the python selinux bindings are installed
+ basic.HAVE_SELINUX = True
+
+ basic.selinux = Mock()
+
+ with patch.dict('sys.modules', {'selinux': basic.selinux}):
+ # next, we test with a mocked implementation of selinux.lgetfilecon_raw to simulate
+ # an actual context being found
+ with patch('selinux.lgetfilecon_raw', return_value=[0, 'unconfined_u:object_r:default_t:s0']):
+ self.assertEqual(am.selinux_context(path='/foo/bar'), ['unconfined_u', 'object_r', 'default_t', 's0'])
+
+ # we also test the case where matchpathcon returned a failure
+ with patch('selinux.lgetfilecon_raw', return_value=[-1, '']):
+ self.assertEqual(am.selinux_context(path='/foo/bar'), [None, None, None, None])
+
+ # finally, we test where an OSError occurred during matchpathcon's call
+ e = OSError()
+ e.errno = errno.ENOENT
+ with patch('selinux.lgetfilecon_raw', side_effect=e):
+ self.assertRaises(SystemExit, am.selinux_context, path='/foo/bar')
+
+ e = OSError()
+ with patch('selinux.lgetfilecon_raw', side_effect=e):
+ self.assertRaises(SystemExit, am.selinux_context, path='/foo/bar')
+
+ delattr(basic, 'selinux')
+
+ def test_module_utils_basic_ansible_module_is_special_selinux_path(self):
+ from ansible.module_utils import basic
+
+ args = json.dumps(dict(ANSIBLE_MODULE_ARGS={'_ansible_selinux_special_fs': "nfs,nfsd,foos",
+ '_ansible_remote_tmp': "/tmp",
+ '_ansible_keep_remote_files': False}))
+
+ with swap_stdin_and_argv(stdin_data=args):
+ basic._ANSIBLE_ARGS = None
+ am = basic.AnsibleModule(
+ argument_spec=dict(),
+ )
+
+ def _mock_find_mount_point(path):
+ if path.startswith('/some/path'):
+ return '/some/path'
+ elif path.startswith('/weird/random/fstype'):
+ return '/weird/random/fstype'
+ return '/'
+
+ am.find_mount_point = MagicMock(side_effect=_mock_find_mount_point)
+ am.selinux_context = MagicMock(return_value=['foo_u', 'foo_r', 'foo_t', 's0'])
+
+ m = mock_open()
+ m.side_effect = OSError
+
+ with patch.object(builtins, 'open', m, create=True):
+ self.assertEqual(am.is_special_selinux_path('/some/path/that/should/be/nfs'), (False, None))
+
+ mount_data = [
+ '/dev/disk1 / ext4 rw,seclabel,relatime,data=ordered 0 0\n',
+ '1.1.1.1:/path/to/nfs /some/path nfs ro 0 0\n',
+ 'whatever /weird/random/fstype foos rw 0 0\n',
+ ]
+
+ # mock_open has a broken readlines() implementation apparently...
+ # this should work by default but doesn't, so we fix it
+ m = mock_open(read_data=''.join(mount_data))
+ m.return_value.readlines.return_value = mount_data
+
+ with patch.object(builtins, 'open', m, create=True):
+ self.assertEqual(am.is_special_selinux_path('/some/random/path'), (False, None))
+ self.assertEqual(am.is_special_selinux_path('/some/path/that/should/be/nfs'), (True, ['foo_u', 'foo_r', 'foo_t', 's0']))
+ self.assertEqual(am.is_special_selinux_path('/weird/random/fstype/path'), (True, ['foo_u', 'foo_r', 'foo_t', 's0']))
+
+ def test_module_utils_basic_ansible_module_set_context_if_different(self):
+ from ansible.module_utils import basic
+ basic._ANSIBLE_ARGS = None
+
+ am = basic.AnsibleModule(
+ argument_spec=dict(),
+ )
+
+ basic.HAVE_SELINUX = False
+
+ am.selinux_enabled = MagicMock(return_value=False)
+ self.assertEqual(am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], True), True)
+ self.assertEqual(am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], False), False)
+
+ basic.HAVE_SELINUX = True
+
+ am.selinux_enabled = MagicMock(return_value=True)
+ am.selinux_context = MagicMock(return_value=['bar_u', 'bar_r', None, None])
+ am.is_special_selinux_path = MagicMock(return_value=(False, None))
+
+ basic.selinux = Mock()
+ with patch.dict('sys.modules', {'selinux': basic.selinux}):
+ with patch('selinux.lsetfilecon', return_value=0) as m:
+ self.assertEqual(am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], False), True)
+ m.assert_called_with('/path/to/file', 'foo_u:foo_r:foo_t:s0')
+ m.reset_mock()
+ am.check_mode = True
+ self.assertEqual(am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], False), True)
+ self.assertEqual(m.called, False)
+ am.check_mode = False
+
+ with patch('selinux.lsetfilecon', return_value=1) as m:
+ self.assertRaises(SystemExit, am.set_context_if_different, '/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], True)
+
+ with patch('selinux.lsetfilecon', side_effect=OSError) as m:
+ self.assertRaises(SystemExit, am.set_context_if_different, '/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], True)
+
+ am.is_special_selinux_path = MagicMock(return_value=(True, ['sp_u', 'sp_r', 'sp_t', 's0']))
+
+ with patch('selinux.lsetfilecon', return_value=0) as m:
+ self.assertEqual(am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], False), True)
+ m.assert_called_with('/path/to/file', 'sp_u:sp_r:sp_t:s0')
+
+ delattr(basic, 'selinux')
diff --git a/test/units/module_utils/basic/test_set_cwd.py b/test/units/module_utils/basic/test_set_cwd.py
new file mode 100644
index 00000000..159236b7
--- /dev/null
+++ b/test/units/module_utils/basic/test_set_cwd.py
@@ -0,0 +1,195 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import shutil
+import tempfile
+
+import pytest
+
+from units.compat.mock import patch, MagicMock
+from ansible.module_utils._text import to_bytes
+
+from ansible.module_utils import basic
+
+
+class TestAnsibleModuleSetCwd:
+
+ def test_set_cwd(self, monkeypatch):
+
+ '''make sure /tmp is used'''
+
+ def mock_getcwd():
+ return '/tmp'
+
+ def mock_access(path, perm):
+ return True
+
+ def mock_chdir(path):
+ pass
+
+ monkeypatch.setattr(os, 'getcwd', mock_getcwd)
+ monkeypatch.setattr(os, 'access', mock_access)
+ monkeypatch.setattr(basic, '_ANSIBLE_ARGS', to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': {}})))
+ with patch('time.time', return_value=42):
+ am = basic.AnsibleModule(argument_spec={})
+
+ result = am._set_cwd()
+ assert result == '/tmp'
+
+ def test_set_cwd_unreadable_use_self_tmpdir(self, monkeypatch):
+
+ '''pwd is not readable, use instance's tmpdir property'''
+
+ def mock_getcwd():
+ return '/tmp'
+
+ def mock_access(path, perm):
+ if path == '/tmp' and perm == 4:
+ return False
+ return True
+
+ def mock_expandvars(var):
+ if var == '$HOME':
+ return '/home/foobar'
+ return var
+
+ def mock_gettempdir():
+ return '/tmp/testdir'
+
+ def mock_chdir(path):
+ if path == '/tmp':
+ raise Exception()
+ return
+
+ monkeypatch.setattr(os, 'getcwd', mock_getcwd)
+ monkeypatch.setattr(os, 'chdir', mock_chdir)
+ monkeypatch.setattr(os, 'access', mock_access)
+ monkeypatch.setattr(os.path, 'expandvars', mock_expandvars)
+ monkeypatch.setattr(basic, '_ANSIBLE_ARGS', to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': {}})))
+ with patch('time.time', return_value=42):
+ am = basic.AnsibleModule(argument_spec={})
+
+ am._tmpdir = '/tmp2'
+ result = am._set_cwd()
+ assert result == am._tmpdir
+
+ def test_set_cwd_unreadable_use_home(self, monkeypatch):
+
+ '''cwd and instance tmpdir are unreadable, use home'''
+
+ def mock_getcwd():
+ return '/tmp'
+
+ def mock_access(path, perm):
+ if path in ['/tmp', '/tmp2'] and perm == 4:
+ return False
+ return True
+
+ def mock_expandvars(var):
+ if var == '$HOME':
+ return '/home/foobar'
+ return var
+
+ def mock_gettempdir():
+ return '/tmp/testdir'
+
+ def mock_chdir(path):
+ if path == '/tmp':
+ raise Exception()
+ return
+
+ monkeypatch.setattr(os, 'getcwd', mock_getcwd)
+ monkeypatch.setattr(os, 'chdir', mock_chdir)
+ monkeypatch.setattr(os, 'access', mock_access)
+ monkeypatch.setattr(os.path, 'expandvars', mock_expandvars)
+ monkeypatch.setattr(basic, '_ANSIBLE_ARGS', to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': {}})))
+ with patch('time.time', return_value=42):
+ am = basic.AnsibleModule(argument_spec={})
+
+ am._tmpdir = '/tmp2'
+ result = am._set_cwd()
+ assert result == '/home/foobar'
+
+ def test_set_cwd_unreadable_use_gettempdir(self, monkeypatch):
+
+ '''fallback to tempfile.gettempdir'''
+
+ thisdir = None
+
+ def mock_getcwd():
+ return '/tmp'
+
+ def mock_access(path, perm):
+ if path in ['/tmp', '/tmp2', '/home/foobar'] and perm == 4:
+ return False
+ return True
+
+ def mock_expandvars(var):
+ if var == '$HOME':
+ return '/home/foobar'
+ return var
+
+ def mock_gettempdir():
+ return '/tmp3'
+
+ def mock_chdir(path):
+ if path == '/tmp':
+ raise Exception()
+ thisdir = path
+
+ monkeypatch.setattr(os, 'getcwd', mock_getcwd)
+ monkeypatch.setattr(os, 'chdir', mock_chdir)
+ monkeypatch.setattr(os, 'access', mock_access)
+ monkeypatch.setattr(os.path, 'expandvars', mock_expandvars)
+ monkeypatch.setattr(basic, '_ANSIBLE_ARGS', to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': {}})))
+ with patch('time.time', return_value=42):
+ am = basic.AnsibleModule(argument_spec={})
+
+ am._tmpdir = '/tmp2'
+ monkeypatch.setattr(tempfile, 'gettempdir', mock_gettempdir)
+ result = am._set_cwd()
+ assert result == '/tmp3'
+
+ def test_set_cwd_unreadable_use_None(self, monkeypatch):
+
+ '''all paths are unreable, should return None and not an exception'''
+
+ def mock_getcwd():
+ return '/tmp'
+
+ def mock_access(path, perm):
+ if path in ['/tmp', '/tmp2', '/tmp3', '/home/foobar'] and perm == 4:
+ return False
+ return True
+
+ def mock_expandvars(var):
+ if var == '$HOME':
+ return '/home/foobar'
+ return var
+
+ def mock_gettempdir():
+ return '/tmp3'
+
+ def mock_chdir(path):
+ if path == '/tmp':
+ raise Exception()
+
+ monkeypatch.setattr(os, 'getcwd', mock_getcwd)
+ monkeypatch.setattr(os, 'chdir', mock_chdir)
+ monkeypatch.setattr(os, 'access', mock_access)
+ monkeypatch.setattr(os.path, 'expandvars', mock_expandvars)
+ monkeypatch.setattr(basic, '_ANSIBLE_ARGS', to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': {}})))
+ with patch('time.time', return_value=42):
+ am = basic.AnsibleModule(argument_spec={})
+
+ am._tmpdir = '/tmp2'
+ monkeypatch.setattr(tempfile, 'gettempdir', mock_gettempdir)
+ result = am._set_cwd()
+ assert result is None
diff --git a/test/units/module_utils/basic/test_set_mode_if_different.py b/test/units/module_utils/basic/test_set_mode_if_different.py
new file mode 100644
index 00000000..93fe2467
--- /dev/null
+++ b/test/units/module_utils/basic/test_set_mode_if_different.py
@@ -0,0 +1,183 @@
+# -*- coding: utf-8 -*-
+# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import errno
+import os
+
+from itertools import product
+
+try:
+ import builtins
+except ImportError:
+ import __builtin__ as builtins
+
+import pytest
+
+
+SYNONYMS_0660 = (
+ 0o660,
+ '0o660',
+ '660',
+ 'u+rw-x,g+rw-x,o-rwx',
+ 'u=rw,g=rw,o-rwx',
+)
+
+
+@pytest.fixture
+def mock_stats(mocker):
+ mock_stat1 = mocker.MagicMock()
+ mock_stat1.st_mode = 0o444
+ mock_stat2 = mocker.MagicMock()
+ mock_stat2.st_mode = 0o660
+ yield {"before": mock_stat1, "after": mock_stat2}
+
+
+@pytest.fixture
+def am_check_mode(am):
+ am.check_mode = True
+ yield am
+ am.check_mode = False
+
+
+@pytest.fixture
+def mock_lchmod(mocker):
+ m_lchmod = mocker.patch('ansible.module_utils.basic.os.lchmod', return_value=None, create=True)
+ yield m_lchmod
+
+
+@pytest.mark.parametrize('previous_changes, check_mode, exists, stdin',
+ product((True, False), (True, False), (True, False), ({},)),
+ indirect=['stdin'])
+def test_no_mode_given_returns_previous_changes(am, mock_stats, mock_lchmod, mocker, previous_changes, check_mode, exists):
+ am.check_mode = check_mode
+ mocker.patch('os.lstat', side_effect=[mock_stats['before']])
+ m_lchmod = mocker.patch('os.lchmod', return_value=None, create=True)
+ m_path_exists = mocker.patch('os.path.exists', return_value=exists)
+
+ assert am.set_mode_if_different('/path/to/file', None, previous_changes) == previous_changes
+ assert not m_lchmod.called
+ assert not m_path_exists.called
+
+
+@pytest.mark.parametrize('mode, check_mode, stdin',
+ product(SYNONYMS_0660, (True, False), ({},)),
+ indirect=['stdin'])
+def test_mode_changed_to_0660(am, mock_stats, mocker, mode, check_mode):
+ # Note: This is for checking that all the different ways of specifying
+ # 0660 mode work. It cannot be used to check that setting a mode that is
+ # not equivalent to 0660 works.
+ am.check_mode = check_mode
+ mocker.patch('os.lstat', side_effect=[mock_stats['before'], mock_stats['after'], mock_stats['after']])
+ m_lchmod = mocker.patch('os.lchmod', return_value=None, create=True)
+ mocker.patch('os.path.exists', return_value=True)
+
+ assert am.set_mode_if_different('/path/to/file', mode, False)
+ if check_mode:
+ assert not m_lchmod.called
+ else:
+ m_lchmod.assert_called_with(b'/path/to/file', 0o660)
+
+
+@pytest.mark.parametrize('mode, check_mode, stdin',
+ product(SYNONYMS_0660, (True, False), ({},)),
+ indirect=['stdin'])
+def test_mode_unchanged_when_already_0660(am, mock_stats, mocker, mode, check_mode):
+ # Note: This is for checking that all the different ways of specifying
+ # 0660 mode work. It cannot be used to check that setting a mode that is
+ # not equivalent to 0660 works.
+ am.check_mode = check_mode
+ mocker.patch('os.lstat', side_effect=[mock_stats['after'], mock_stats['after'], mock_stats['after']])
+ m_lchmod = mocker.patch('os.lchmod', return_value=None, create=True)
+ mocker.patch('os.path.exists', return_value=True)
+
+ assert not am.set_mode_if_different('/path/to/file', mode, False)
+ assert not m_lchmod.called
+
+
+@pytest.mark.parametrize('check_mode, stdin',
+ product((True, False), ({},)),
+ indirect=['stdin'])
+def test_missing_lchmod_is_not_link(am, mock_stats, mocker, monkeypatch, check_mode):
+ """Some platforms have lchmod (*BSD) others do not (Linux)"""
+
+ am.check_mode = check_mode
+ original_hasattr = hasattr
+
+ monkeypatch.delattr(os, 'lchmod', raising=False)
+
+ mocker.patch('os.lstat', side_effect=[mock_stats['before'], mock_stats['after']])
+ mocker.patch('os.path.islink', return_value=False)
+ mocker.patch('os.path.exists', return_value=True)
+ m_chmod = mocker.patch('os.chmod', return_value=None)
+
+ assert am.set_mode_if_different('/path/to/file/no_lchmod', 0o660, False)
+ if check_mode:
+ assert not m_chmod.called
+ else:
+ m_chmod.assert_called_with(b'/path/to/file/no_lchmod', 0o660)
+
+
+@pytest.mark.parametrize('check_mode, stdin',
+ product((True, False), ({},)),
+ indirect=['stdin'])
+def test_missing_lchmod_is_link(am, mock_stats, mocker, monkeypatch, check_mode):
+ """Some platforms have lchmod (*BSD) others do not (Linux)"""
+
+ am.check_mode = check_mode
+ original_hasattr = hasattr
+
+ monkeypatch.delattr(os, 'lchmod', raising=False)
+
+ mocker.patch('os.lstat', side_effect=[mock_stats['before'], mock_stats['after']])
+ mocker.patch('os.path.islink', return_value=True)
+ mocker.patch('os.path.exists', return_value=True)
+ m_chmod = mocker.patch('os.chmod', return_value=None)
+ mocker.patch('os.stat', return_value=mock_stats['after'])
+
+ assert am.set_mode_if_different('/path/to/file/no_lchmod', 0o660, False)
+ if check_mode:
+ assert not m_chmod.called
+ else:
+ m_chmod.assert_called_with(b'/path/to/file/no_lchmod', 0o660)
+
+ mocker.resetall()
+ mocker.stopall()
+
+
+@pytest.mark.parametrize('stdin,',
+ ({},),
+ indirect=['stdin'])
+def test_missing_lchmod_is_link_in_sticky_dir(am, mock_stats, mocker):
+ """Some platforms have lchmod (*BSD) others do not (Linux)"""
+
+ am.check_mode = False
+ original_hasattr = hasattr
+
+ def _hasattr(obj, name):
+ if obj == os and name == 'lchmod':
+ return False
+ return original_hasattr(obj, name)
+
+ mock_lstat = mocker.MagicMock()
+ mock_lstat.st_mode = 0o777
+
+ mocker.patch('os.lstat', side_effect=[mock_lstat, mock_lstat])
+ mocker.patch.object(builtins, 'hasattr', side_effect=_hasattr)
+ mocker.patch('os.path.islink', return_value=True)
+ mocker.patch('os.path.exists', return_value=True)
+ m_stat = mocker.patch('os.stat', side_effect=OSError(errno.EACCES, 'Permission denied'))
+ m_chmod = mocker.patch('os.chmod', return_value=None)
+
+ # not changed: can't set mode on symbolic links
+ assert not am.set_mode_if_different('/path/to/file/no_lchmod', 0o660, False)
+ m_stat.assert_called_with(b'/path/to/file/no_lchmod')
+ m_chmod.assert_not_called()
+
+ mocker.resetall()
+ mocker.stopall()
diff --git a/test/units/module_utils/basic/test_tmpdir.py b/test/units/module_utils/basic/test_tmpdir.py
new file mode 100644
index 00000000..818cb9b1
--- /dev/null
+++ b/test/units/module_utils/basic/test_tmpdir.py
@@ -0,0 +1,119 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import shutil
+import tempfile
+
+import pytest
+
+from units.compat.mock import patch, MagicMock
+from ansible.module_utils._text import to_bytes
+
+from ansible.module_utils import basic
+
+
+class TestAnsibleModuleTmpDir:
+
+ DATA = (
+ (
+ {
+ "_ansible_tmpdir": "/path/to/dir",
+ "_ansible_remote_tmp": "/path/tmpdir",
+ "_ansible_keep_remote_files": False,
+ },
+ True,
+ "/path/to/dir"
+ ),
+ (
+ {
+ "_ansible_tmpdir": None,
+ "_ansible_remote_tmp": "/path/tmpdir",
+ "_ansible_keep_remote_files": False
+ },
+ False,
+ "/path/tmpdir/ansible-moduletmp-42-"
+ ),
+ (
+ {
+ "_ansible_tmpdir": None,
+ "_ansible_remote_tmp": "/path/tmpdir",
+ "_ansible_keep_remote_files": False
+ },
+ True,
+ "/path/tmpdir/ansible-moduletmp-42-"
+ ),
+ (
+ {
+ "_ansible_tmpdir": None,
+ "_ansible_remote_tmp": "$HOME/.test",
+ "_ansible_keep_remote_files": False
+ },
+ False,
+ os.path.join(os.environ['HOME'], ".test/ansible-moduletmp-42-")
+ ),
+ )
+
+ # pylint bug: https://github.com/PyCQA/pylint/issues/511
+ # pylint: disable=undefined-variable
+ @pytest.mark.parametrize('args, expected, stat_exists', ((s, e, t) for s, t, e in DATA))
+ def test_tmpdir_property(self, monkeypatch, args, expected, stat_exists):
+ makedirs = {'called': False}
+
+ def mock_mkdtemp(prefix, dir):
+ return os.path.join(dir, prefix)
+
+ def mock_makedirs(path, mode):
+ makedirs['called'] = True
+ makedirs['path'] = path
+ makedirs['mode'] = mode
+ return
+
+ monkeypatch.setattr(tempfile, 'mkdtemp', mock_mkdtemp)
+ monkeypatch.setattr(os.path, 'exists', lambda x: stat_exists)
+ monkeypatch.setattr(os, 'makedirs', mock_makedirs)
+ monkeypatch.setattr(shutil, 'rmtree', lambda x: None)
+ monkeypatch.setattr(basic, '_ANSIBLE_ARGS', to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': args})))
+
+ with patch('time.time', return_value=42):
+ am = basic.AnsibleModule(argument_spec={})
+ actual_tmpdir = am.tmpdir
+
+ assert actual_tmpdir == expected
+
+ # verify subsequent calls always produces the same tmpdir
+ assert am.tmpdir == actual_tmpdir
+
+ if not stat_exists:
+ assert makedirs['called']
+ expected = os.path.expanduser(os.path.expandvars(am._remote_tmp))
+ assert makedirs['path'] == expected
+ assert makedirs['mode'] == 0o700
+
+ @pytest.mark.parametrize('stdin', ({"_ansible_tmpdir": None,
+ "_ansible_remote_tmp": "$HOME/.test",
+ "_ansible_keep_remote_files": True},),
+ indirect=['stdin'])
+ def test_tmpdir_makedirs_failure(self, am, monkeypatch):
+
+ mock_mkdtemp = MagicMock(return_value="/tmp/path")
+ mock_makedirs = MagicMock(side_effect=OSError("Some OS Error here"))
+
+ monkeypatch.setattr(tempfile, 'mkdtemp', mock_mkdtemp)
+ monkeypatch.setattr(os.path, 'exists', lambda x: False)
+ monkeypatch.setattr(os, 'makedirs', mock_makedirs)
+
+ actual = am.tmpdir
+ assert actual == "/tmp/path"
+ assert mock_makedirs.call_args[0] == (os.path.expanduser(os.path.expandvars("$HOME/.test")),)
+ assert mock_makedirs.call_args[1] == {"mode": 0o700}
+
+ # because makedirs failed the dir should be None so it uses the System tmp
+ assert mock_mkdtemp.call_args[1]['dir'] is None
+ assert mock_mkdtemp.call_args[1]['prefix'].startswith("ansible-moduletmp-")
diff --git a/test/units/module_utils/common/__init__.py b/test/units/module_utils/common/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/module_utils/common/__init__.py
diff --git a/test/units/module_utils/common/parameters/test_handle_aliases.py b/test/units/module_utils/common/parameters/test_handle_aliases.py
new file mode 100644
index 00000000..bc88437f
--- /dev/null
+++ b/test/units/module_utils/common/parameters/test_handle_aliases.py
@@ -0,0 +1,102 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+import pytest
+
+from ansible.module_utils.common.parameters import handle_aliases
+from ansible.module_utils._text import to_native
+
+DEFAULT_LEGAL_INPUTS = [
+ '_ansible_check_mode',
+ '_ansible_debug',
+ '_ansible_diff',
+ '_ansible_keep_remote_files',
+ '_ansible_module_name',
+ '_ansible_no_log',
+ '_ansible_remote_tmp',
+ '_ansible_selinux_special_fs',
+ '_ansible_shell_executable',
+ '_ansible_socket',
+ '_ansible_string_conversion_action',
+ '_ansible_syslog_facility',
+ '_ansible_tmpdir',
+ '_ansible_verbosity',
+ '_ansible_version',
+]
+
+
+def test_handle_aliases_no_aliases():
+ argument_spec = {
+ 'name': {'type': 'str'},
+ }
+
+ params = {
+ 'name': 'foo',
+ 'path': 'bar'
+ }
+
+ expected = (
+ {},
+ DEFAULT_LEGAL_INPUTS + ['name'],
+ )
+ expected[1].sort()
+
+ result = handle_aliases(argument_spec, params)
+ result[1].sort()
+ assert expected == result
+
+
+def test_handle_aliases_basic():
+ argument_spec = {
+ 'name': {'type': 'str', 'aliases': ['surname', 'nick']},
+ }
+
+ params = {
+ 'name': 'foo',
+ 'path': 'bar',
+ 'surname': 'foo',
+ 'nick': 'foo',
+ }
+
+ expected = (
+ {'surname': 'name', 'nick': 'name'},
+ DEFAULT_LEGAL_INPUTS + ['name', 'surname', 'nick'],
+ )
+ expected[1].sort()
+
+ result = handle_aliases(argument_spec, params)
+ result[1].sort()
+ assert expected == result
+
+
+def test_handle_aliases_value_error():
+ argument_spec = {
+ 'name': {'type': 'str', 'aliases': ['surname', 'nick'], 'default': 'bob', 'required': True},
+ }
+
+ params = {
+ 'name': 'foo',
+ }
+
+ with pytest.raises(ValueError) as ve:
+ handle_aliases(argument_spec, params)
+ assert 'internal error: aliases must be a list or tuple' == to_native(ve.error)
+
+
+def test_handle_aliases_type_error():
+ argument_spec = {
+ 'name': {'type': 'str', 'aliases': 'surname'},
+ }
+
+ params = {
+ 'name': 'foo',
+ }
+
+ with pytest.raises(TypeError) as te:
+ handle_aliases(argument_spec, params)
+ assert 'internal error: required and default are mutually exclusive' in to_native(te.error)
diff --git a/test/units/module_utils/common/parameters/test_list_deprecations.py b/test/units/module_utils/common/parameters/test_list_deprecations.py
new file mode 100644
index 00000000..0a17187c
--- /dev/null
+++ b/test/units/module_utils/common/parameters/test_list_deprecations.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible.module_utils.common.parameters import list_deprecations
+
+
+@pytest.fixture
+def params():
+ return {
+ 'name': 'bob',
+ 'dest': '/etc/hosts',
+ 'state': 'present',
+ 'value': 5,
+ }
+
+
+def test_list_deprecations():
+ argument_spec = {
+ 'old': {'type': 'str', 'removed_in_version': '2.5'},
+ 'foo': {'type': 'dict', 'options': {'old': {'type': 'str', 'removed_in_version': 1.0}}},
+ 'bar': {'type': 'list', 'elements': 'dict', 'options': {'old': {'type': 'str', 'removed_in_version': '2.10'}}},
+ }
+
+ params = {
+ 'name': 'rod',
+ 'old': 'option',
+ 'foo': {'old': 'value'},
+ 'bar': [{'old': 'value'}, {}],
+ }
+ result = list_deprecations(argument_spec, params)
+ assert len(result) == 3
+ result.sort(key=lambda entry: entry['msg'])
+ assert result[0]['msg'] == """Param 'bar["old"]' is deprecated. See the module docs for more information"""
+ assert result[0]['version'] == '2.10'
+ assert result[1]['msg'] == """Param 'foo["old"]' is deprecated. See the module docs for more information"""
+ assert result[1]['version'] == 1.0
+ assert result[2]['msg'] == "Param 'old' is deprecated. See the module docs for more information"
+ assert result[2]['version'] == '2.5'
diff --git a/test/units/module_utils/common/parameters/test_list_no_log_values.py b/test/units/module_utils/common/parameters/test_list_no_log_values.py
new file mode 100644
index 00000000..1b740555
--- /dev/null
+++ b/test/units/module_utils/common/parameters/test_list_no_log_values.py
@@ -0,0 +1,228 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible.module_utils.common.parameters import list_no_log_values
+
+
+@pytest.fixture
+def argument_spec():
+ # Allow extra specs to be passed to the fixture, which will be added to the output
+ def _argument_spec(extra_opts=None):
+ spec = {
+ 'secret': {'type': 'str', 'no_log': True},
+ 'other_secret': {'type': 'str', 'no_log': True},
+ 'state': {'type': 'str'},
+ 'value': {'type': 'int'},
+ }
+
+ if extra_opts:
+ spec.update(extra_opts)
+
+ return spec
+
+ return _argument_spec
+
+
+@pytest.fixture
+def module_parameters():
+ # Allow extra parameters to be passed to the fixture, which will be added to the output
+ def _module_parameters(extra_params=None):
+ params = {
+ 'secret': 'under',
+ 'other_secret': 'makeshift',
+ 'state': 'present',
+ 'value': 5,
+ }
+
+ if extra_params:
+ params.update(extra_params)
+
+ return params
+
+ return _module_parameters
+
+
+def test_list_no_log_values_no_secrets(module_parameters):
+ argument_spec = {
+ 'other_secret': {'type': 'str', 'no_log': False},
+ 'state': {'type': 'str'},
+ 'value': {'type': 'int'},
+ }
+ expected = set()
+ assert expected == list_no_log_values(argument_spec, module_parameters)
+
+
+def test_list_no_log_values(argument_spec, module_parameters):
+ expected = set(('under', 'makeshift'))
+ assert expected == list_no_log_values(argument_spec(), module_parameters())
+
+
+@pytest.mark.parametrize('extra_params', [
+ {'subopt1': 1},
+ {'subopt1': 3.14159},
+ {'subopt1': ['one', 'two']},
+ {'subopt1': ('one', 'two')},
+])
+def test_list_no_log_values_invalid_suboptions(argument_spec, module_parameters, extra_params):
+ extra_opts = {
+ 'subopt1': {
+ 'type': 'dict',
+ 'options': {
+ 'sub_1_1': {},
+ }
+ }
+ }
+
+ with pytest.raises(TypeError, match=r"(Value '.*?' in the sub parameter field '.*?' must by a dict, not '.*?')"
+ r"|(dictionary requested, could not parse JSON or key=value)"):
+ list_no_log_values(argument_spec(extra_opts), module_parameters(extra_params))
+
+
+def test_list_no_log_values_suboptions(argument_spec, module_parameters):
+ extra_opts = {
+ 'subopt1': {
+ 'type': 'dict',
+ 'options': {
+ 'sub_1_1': {'no_log': True},
+ 'sub_1_2': {'type': 'list'},
+ }
+ }
+ }
+
+ extra_params = {
+ 'subopt1': {
+ 'sub_1_1': 'bagel',
+ 'sub_1_2': ['pebble'],
+ }
+ }
+
+ expected = set(('under', 'makeshift', 'bagel'))
+ assert expected == list_no_log_values(argument_spec(extra_opts), module_parameters(extra_params))
+
+
+def test_list_no_log_values_sub_suboptions(argument_spec, module_parameters):
+ extra_opts = {
+ 'sub_level_1': {
+ 'type': 'dict',
+ 'options': {
+ 'l1_1': {'no_log': True},
+ 'l1_2': {},
+ 'l1_3': {
+ 'type': 'dict',
+ 'options': {
+ 'l2_1': {'no_log': True},
+ 'l2_2': {},
+ }
+ }
+ }
+ }
+ }
+
+ extra_params = {
+ 'sub_level_1': {
+ 'l1_1': 'saucy',
+ 'l1_2': 'napped',
+ 'l1_3': {
+ 'l2_1': 'corporate',
+ 'l2_2': 'tinsmith',
+ }
+ }
+ }
+
+ expected = set(('under', 'makeshift', 'saucy', 'corporate'))
+ assert expected == list_no_log_values(argument_spec(extra_opts), module_parameters(extra_params))
+
+
+def test_list_no_log_values_suboptions_list(argument_spec, module_parameters):
+ extra_opts = {
+ 'subopt1': {
+ 'type': 'list',
+ 'elements': 'dict',
+ 'options': {
+ 'sub_1_1': {'no_log': True},
+ 'sub_1_2': {},
+ }
+ }
+ }
+
+ extra_params = {
+ 'subopt1': [
+ {
+ 'sub_1_1': ['playroom', 'luxury'],
+ 'sub_1_2': 'deuce',
+ },
+ {
+ 'sub_1_2': ['squishier', 'finished'],
+ }
+ ]
+ }
+
+ expected = set(('under', 'makeshift', 'playroom', 'luxury'))
+ assert expected == list_no_log_values(argument_spec(extra_opts), module_parameters(extra_params))
+
+
+def test_list_no_log_values_sub_suboptions_list(argument_spec, module_parameters):
+ extra_opts = {
+ 'subopt1': {
+ 'type': 'list',
+ 'elements': 'dict',
+ 'options': {
+ 'sub_1_1': {'no_log': True},
+ 'sub_1_2': {},
+ 'subopt2': {
+ 'type': 'list',
+ 'elements': 'dict',
+ 'options': {
+ 'sub_2_1': {'no_log': True, 'type': 'list'},
+ 'sub_2_2': {},
+ }
+ }
+ }
+ }
+ }
+
+ extra_params = {
+ 'subopt1': {
+ 'sub_1_1': ['playroom', 'luxury'],
+ 'sub_1_2': 'deuce',
+ 'subopt2': [
+ {
+ 'sub_2_1': ['basis', 'gave'],
+ 'sub_2_2': 'liquid',
+ },
+ {
+ 'sub_2_1': ['composure', 'thumping']
+ },
+ ]
+ }
+ }
+
+ expected = set(('under', 'makeshift', 'playroom', 'luxury', 'basis', 'gave', 'composure', 'thumping'))
+ assert expected == list_no_log_values(argument_spec(extra_opts), module_parameters(extra_params))
+
+
+@pytest.mark.parametrize('extra_params, expected', (
+ ({'subopt_dict': 'dict_subopt1=rekindle-scandal,dict_subopt2=subgroupavenge'}, ('rekindle-scandal',)),
+ ({'subopt_dict': 'dict_subopt1=aversion-mutable dict_subopt2=subgroupavenge'}, ('aversion-mutable',)),
+ ({'subopt_dict': ['dict_subopt1=blip-marine,dict_subopt2=subgroupavenge', 'dict_subopt1=tipping,dict_subopt2=hardening']}, ('blip-marine', 'tipping')),
+))
+def test_string_suboptions_as_string(argument_spec, module_parameters, extra_params, expected):
+ extra_opts = {
+ 'subopt_dict': {
+ 'type': 'dict',
+ 'options': {
+ 'dict_subopt1': {'no_log': True},
+ 'dict_subopt2': {},
+ },
+ },
+ }
+
+ result = set(('under', 'makeshift'))
+ result.update(expected)
+ assert result == list_no_log_values(argument_spec(extra_opts), module_parameters(extra_params))
diff --git a/test/units/module_utils/common/process/test_get_bin_path.py b/test/units/module_utils/common/process/test_get_bin_path.py
new file mode 100644
index 00000000..a337e78d
--- /dev/null
+++ b/test/units/module_utils/common/process/test_get_bin_path.py
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible.module_utils.common.process import get_bin_path
+
+
+def test_get_bin_path(mocker):
+ path = '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'
+ mocker.patch.dict('os.environ', {'PATH': path})
+ mocker.patch('os.pathsep', ':')
+
+ mocker.patch('os.path.isdir', return_value=False)
+ mocker.patch('ansible.module_utils.common.process.is_executable', return_value=True)
+
+ # pytest-mock 2.0.0 will throw when os.path.exists is messed with
+ # and then another method is patched afterwards. Likely
+ # something in the pytest-mock chain uses os.path.exists internally, and
+ # since pytest-mock prohibits context-specific patching, there's not a
+ # good solution. For now, just patch os.path.exists last.
+ mocker.patch('os.path.exists', side_effect=[False, True])
+
+ assert '/usr/local/bin/notacommand' == get_bin_path('notacommand')
+
+
+def test_get_path_path_raise_valueerror(mocker):
+ mocker.patch.dict('os.environ', {'PATH': ''})
+
+ mocker.patch('os.path.exists', return_value=False)
+ mocker.patch('os.path.isdir', return_value=False)
+ mocker.patch('ansible.module_utils.common.process.is_executable', return_value=True)
+
+ with pytest.raises(ValueError, match='Failed to find required executable notacommand'):
+ get_bin_path('notacommand')
diff --git a/test/units/module_utils/common/test_collections.py b/test/units/module_utils/common/test_collections.py
new file mode 100644
index 00000000..95b2a402
--- /dev/null
+++ b/test/units/module_utils/common/test_collections.py
@@ -0,0 +1,175 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018–2019, Sviatoslav Sydorenko <webknjaz@redhat.com>
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+"""Test low-level utility functions from ``module_utils.common.collections``."""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible.module_utils.six import Iterator
+from ansible.module_utils.common._collections_compat import Sequence
+from ansible.module_utils.common.collections import ImmutableDict, is_iterable, is_sequence
+
+
+class SeqStub:
+ """Stub emulating a sequence type.
+
+ >>> from collections.abc import Sequence
+ >>> assert issubclass(SeqStub, Sequence)
+ >>> assert isinstance(SeqStub(), Sequence)
+ """
+
+
+Sequence.register(SeqStub)
+
+
+class IteratorStub(Iterator):
+ def __next__(self):
+ raise StopIteration
+
+
+class IterableStub:
+ def __iter__(self):
+ return IteratorStub()
+
+
+class FakeAnsibleVaultEncryptedUnicode(Sequence):
+ __ENCRYPTED__ = True
+
+ def __init__(self, data):
+ self.data = data
+
+ def __getitem__(self, index):
+ return self.data[index]
+
+ def __len__(self):
+ return len(self.data)
+
+
+TEST_STRINGS = u'he', u'Україна', u'Česká republika'
+TEST_STRINGS = TEST_STRINGS + tuple(s.encode('utf-8') for s in TEST_STRINGS) + (FakeAnsibleVaultEncryptedUnicode(u'foo'),)
+
+TEST_ITEMS_NON_SEQUENCES = (
+ {}, object(), frozenset(),
+ 4, 0.,
+) + TEST_STRINGS
+
+TEST_ITEMS_SEQUENCES = (
+ [], (),
+ SeqStub(),
+)
+TEST_ITEMS_SEQUENCES = TEST_ITEMS_SEQUENCES + (
+ # Iterable effectively containing nested random data:
+ TEST_ITEMS_NON_SEQUENCES,
+)
+
+
+@pytest.mark.parametrize('sequence_input', TEST_ITEMS_SEQUENCES)
+def test_sequence_positive(sequence_input):
+ """Test that non-string item sequences are identified correctly."""
+ assert is_sequence(sequence_input)
+ assert is_sequence(sequence_input, include_strings=False)
+
+
+@pytest.mark.parametrize('non_sequence_input', TEST_ITEMS_NON_SEQUENCES)
+def test_sequence_negative(non_sequence_input):
+ """Test that non-sequences are identified correctly."""
+ assert not is_sequence(non_sequence_input)
+
+
+@pytest.mark.parametrize('string_input', TEST_STRINGS)
+def test_sequence_string_types_with_strings(string_input):
+ """Test that ``is_sequence`` can separate string and non-string."""
+ assert is_sequence(string_input, include_strings=True)
+
+
+@pytest.mark.parametrize('string_input', TEST_STRINGS)
+def test_sequence_string_types_without_strings(string_input):
+ """Test that ``is_sequence`` can separate string and non-string."""
+ assert not is_sequence(string_input, include_strings=False)
+
+
+@pytest.mark.parametrize(
+ 'seq',
+ ([], (), {}, set(), frozenset(), IterableStub()),
+)
+def test_iterable_positive(seq):
+ assert is_iterable(seq)
+
+
+@pytest.mark.parametrize(
+ 'seq', (IteratorStub(), object(), 5, 9.)
+)
+def test_iterable_negative(seq):
+ assert not is_iterable(seq)
+
+
+@pytest.mark.parametrize('string_input', TEST_STRINGS)
+def test_iterable_including_strings(string_input):
+ assert is_iterable(string_input, include_strings=True)
+
+
+@pytest.mark.parametrize('string_input', TEST_STRINGS)
+def test_iterable_excluding_strings(string_input):
+ assert not is_iterable(string_input, include_strings=False)
+
+
+class TestImmutableDict:
+ def test_scalar(self):
+ imdict = ImmutableDict({1: 2})
+ assert imdict[1] == 2
+
+ def test_string(self):
+ imdict = ImmutableDict({u'café': u'ãらã¨ã¿'})
+ assert imdict[u'café'] == u'ãらã¨ã¿'
+
+ def test_container(self):
+ imdict = ImmutableDict({(1, 2): ['1', '2']})
+ assert imdict[(1, 2)] == ['1', '2']
+
+ def test_from_tuples(self):
+ imdict = ImmutableDict((('a', 1), ('b', 2)))
+ assert frozenset(imdict.items()) == frozenset((('a', 1), ('b', 2)))
+
+ def test_from_kwargs(self):
+ imdict = ImmutableDict(a=1, b=2)
+ assert frozenset(imdict.items()) == frozenset((('a', 1), ('b', 2)))
+
+ def test_immutable(self):
+ imdict = ImmutableDict({1: 2})
+
+ expected_reason = r"^'ImmutableDict' object does not support item assignment$"
+
+ with pytest.raises(TypeError, match=expected_reason):
+ imdict[1] = 3
+
+ with pytest.raises(TypeError, match=expected_reason):
+ imdict[5] = 3
+
+ def test_hashable(self):
+ # ImmutableDict is hashable when all of its values are hashable
+ imdict = ImmutableDict({u'café': u'ãらã¨ã¿'})
+ assert hash(imdict)
+
+ def test_nonhashable(self):
+ # ImmutableDict is unhashable when one of its values is unhashable
+ imdict = ImmutableDict({u'café': u'ãらã¨ã¿', 1: [1, 2]})
+
+ expected_reason = r"^unhashable type: 'list'$"
+
+ with pytest.raises(TypeError, match=expected_reason):
+ hash(imdict)
+
+ def test_len(self):
+ imdict = ImmutableDict({1: 2, 'a': 'b'})
+ assert len(imdict) == 2
+
+ def test_repr(self):
+ initial_data = {1: 2, 'a': 'b'}
+ initial_data_repr = repr(initial_data)
+ imdict = ImmutableDict(initial_data)
+ actual_repr = repr(imdict)
+ expected_repr = "ImmutableDict({0})".format(initial_data_repr)
+ assert actual_repr == expected_repr
diff --git a/test/units/module_utils/common/test_dict_transformations.py b/test/units/module_utils/common/test_dict_transformations.py
new file mode 100644
index 00000000..ecb520b2
--- /dev/null
+++ b/test/units/module_utils/common/test_dict_transformations.py
@@ -0,0 +1,135 @@
+# -*- coding: utf-8 -*-
+# (c) 2017, Will Thames <will.thames@xvt.com.au>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest
+from ansible.module_utils.common.dict_transformations import _camel_to_snake, _snake_to_camel, camel_dict_to_snake_dict, dict_merge
+
+EXPECTED_SNAKIFICATION = {
+ 'alllower': 'alllower',
+ 'TwoWords': 'two_words',
+ 'AllUpperAtEND': 'all_upper_at_end',
+ 'AllUpperButPLURALs': 'all_upper_but_plurals',
+ 'TargetGroupARNs': 'target_group_arns',
+ 'HTTPEndpoints': 'http_endpoints',
+ 'PLURALs': 'plurals'
+}
+
+EXPECTED_REVERSIBLE = {
+ 'TwoWords': 'two_words',
+ 'AllUpperAtEND': 'all_upper_at_e_n_d',
+ 'AllUpperButPLURALs': 'all_upper_but_p_l_u_r_a_ls',
+ 'TargetGroupARNs': 'target_group_a_r_ns',
+ 'HTTPEndpoints': 'h_t_t_p_endpoints',
+ 'PLURALs': 'p_l_u_r_a_ls'
+}
+
+
+class CamelToSnakeTestCase(unittest.TestCase):
+
+ def test_camel_to_snake(self):
+ for (k, v) in EXPECTED_SNAKIFICATION.items():
+ self.assertEqual(_camel_to_snake(k), v)
+
+ def test_reversible_camel_to_snake(self):
+ for (k, v) in EXPECTED_REVERSIBLE.items():
+ self.assertEqual(_camel_to_snake(k, reversible=True), v)
+
+
+class SnakeToCamelTestCase(unittest.TestCase):
+
+ def test_snake_to_camel_reversed(self):
+ for (k, v) in EXPECTED_REVERSIBLE.items():
+ self.assertEqual(_snake_to_camel(v, capitalize_first=True), k)
+
+
+class CamelToSnakeAndBackTestCase(unittest.TestCase):
+ def test_camel_to_snake_and_back(self):
+ for (k, v) in EXPECTED_REVERSIBLE.items():
+ self.assertEqual(_snake_to_camel(_camel_to_snake(k, reversible=True), capitalize_first=True), k)
+
+
+class CamelDictToSnakeDictTestCase(unittest.TestCase):
+ def test_ignore_list(self):
+ camel_dict = dict(Hello=dict(One='one', Two='two'), World=dict(Three='three', Four='four'))
+ snake_dict = camel_dict_to_snake_dict(camel_dict, ignore_list='World')
+ self.assertEqual(snake_dict['hello'], dict(one='one', two='two'))
+ self.assertEqual(snake_dict['world'], dict(Three='three', Four='four'))
+
+
+class DictMergeTestCase(unittest.TestCase):
+ def test_dict_merge(self):
+ base = dict(obj2=dict(), b1=True, b2=False, b3=False,
+ one=1, two=2, three=3, obj1=dict(key1=1, key2=2),
+ l1=[1, 3], l2=[1, 2, 3], l4=[4],
+ nested=dict(n1=dict(n2=2)))
+
+ other = dict(b1=True, b2=False, b3=True, b4=True,
+ one=1, three=4, four=4, obj1=dict(key1=2),
+ l1=[2, 1], l2=[3, 2, 1], l3=[1],
+ nested=dict(n1=dict(n2=2, n3=3)))
+
+ result = dict_merge(base, other)
+
+ # string assertions
+ self.assertTrue('one' in result)
+ self.assertTrue('two' in result)
+ self.assertEqual(result['three'], 4)
+ self.assertEqual(result['four'], 4)
+
+ # dict assertions
+ self.assertTrue('obj1' in result)
+ self.assertTrue('key1' in result['obj1'])
+ self.assertTrue('key2' in result['obj1'])
+
+ # list assertions
+ # this line differs from the network_utils/common test of the function of the
+ # same name as this method does not merge lists
+ self.assertEqual(result['l1'], [2, 1])
+ self.assertTrue('l2' in result)
+ self.assertEqual(result['l3'], [1])
+ self.assertTrue('l4' in result)
+
+ # nested assertions
+ self.assertTrue('obj1' in result)
+ self.assertEqual(result['obj1']['key1'], 2)
+ self.assertTrue('key2' in result['obj1'])
+
+ # bool assertions
+ self.assertTrue('b1' in result)
+ self.assertTrue('b2' in result)
+ self.assertTrue(result['b3'])
+ self.assertTrue(result['b4'])
+
+
+class AzureIncidentalTestCase(unittest.TestCase):
+
+ def test_dict_merge_invalid_dict(self):
+ ''' if b is not a dict, return b '''
+ res = dict_merge({}, None)
+ self.assertEqual(res, None)
+
+ def test_merge_sub_dicts(self):
+ '''merge sub dicts '''
+ a = {'a': {'a1': 1}}
+ b = {'a': {'b1': 2}}
+ c = {'a': {'a1': 1, 'b1': 2}}
+ res = dict_merge(a, b)
+ self.assertEqual(res, c)
diff --git a/test/units/module_utils/common/test_network.py b/test/units/module_utils/common/test_network.py
new file mode 100644
index 00000000..1267d0ce
--- /dev/null
+++ b/test/units/module_utils/common/test_network.py
@@ -0,0 +1,68 @@
+# -*- coding: utf-8 -*-
+# (c) 2017 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible.module_utils.common.network import (
+ to_masklen,
+ to_netmask,
+ to_subnet,
+ to_ipv6_network,
+ is_masklen,
+ is_netmask
+)
+
+
+def test_to_masklen():
+ assert 24 == to_masklen('255.255.255.0')
+
+
+def test_to_masklen_invalid():
+ with pytest.raises(ValueError):
+ to_masklen('255')
+
+
+def test_to_netmask():
+ assert '255.0.0.0' == to_netmask(8)
+ assert '255.0.0.0' == to_netmask('8')
+
+
+def test_to_netmask_invalid():
+ with pytest.raises(ValueError):
+ to_netmask(128)
+
+
+def test_to_subnet():
+ result = to_subnet('192.168.1.1', 24)
+ assert '192.168.1.0/24' == result
+
+ result = to_subnet('192.168.1.1', 24, dotted_notation=True)
+ assert '192.168.1.0 255.255.255.0' == result
+
+
+def test_to_subnet_invalid():
+ with pytest.raises(ValueError):
+ to_subnet('foo', 'bar')
+
+
+def test_is_masklen():
+ assert is_masklen(32)
+ assert not is_masklen(33)
+ assert not is_masklen('foo')
+
+
+def test_is_netmask():
+ assert is_netmask('255.255.255.255')
+ assert not is_netmask(24)
+ assert not is_netmask('foo')
+
+
+def test_to_ipv6_network():
+ assert '2001:db8::' == to_ipv6_network('2001:db8::')
+ assert '2001:0db8:85a3::' == to_ipv6_network('2001:0db8:85a3:0000:0000:8a2e:0370:7334')
+ assert '2001:0db8:85a3::' == to_ipv6_network('2001:0db8:85a3:0:0:8a2e:0370:7334')
diff --git a/test/units/module_utils/common/test_removed.py b/test/units/module_utils/common/test_removed.py
new file mode 100644
index 00000000..36c1c1e9
--- /dev/null
+++ b/test/units/module_utils/common/test_removed.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019, Andrew Klychkov @Andersson007 <aaklychkov@mail.ru>
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible.module_utils.common.removed import removed_module
+
+
+@pytest.mark.parametrize('input_data', [u'2.8', 2.8, 2, '', ])
+def test_removed_module_sys_exit(input_data):
+ """Test for removed_module function, sys.exit()."""
+
+ with pytest.raises(SystemExit) as wrapped_e:
+ removed_module(input_data)
+
+ assert wrapped_e.type == SystemExit
+ assert wrapped_e.value.code == 1
+
+
+@pytest.mark.parametrize(
+ 'input_data, expected_msg, expected_warn',
+ [
+ (
+ u'2.8',
+ u'This module has been removed. '
+ 'The module documentation for Ansible-2.7 may contain hints for porting',
+ u'',
+ ),
+ (
+ 2.8,
+ u'This module has been removed. '
+ 'The module documentation for Ansible-2.7 may contain hints for porting',
+ u'',
+ ),
+ (
+ 2,
+ u'This module has been removed. '
+ 'The module documentation for Ansible-1 may contain hints for porting',
+ u'',
+ ),
+ (
+ u'café',
+ u'This module has been removed',
+ u'"warnings": ["removed modules should specify the version they were removed in"]',
+ ),
+ (
+ 0.1,
+ u'This module has been removed. '
+ 'The module documentation for Ansible-0.0 may contain hints for porting',
+ u'',
+ ),
+ ]
+)
+def test_removed_module_msgs(input_data, expected_msg, expected_warn, capsys):
+ """Test for removed_module function, content of output messages."""
+
+ captured = capsys.readouterr()
+ assert expected_msg, expected_warn in captured.out
diff --git a/test/units/module_utils/common/test_sys_info.py b/test/units/module_utils/common/test_sys_info.py
new file mode 100644
index 00000000..cd68225d
--- /dev/null
+++ b/test/units/module_utils/common/test_sys_info.py
@@ -0,0 +1,150 @@
+# -*- coding: utf-8 -*-
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2016 Toshio Kuratomi <tkuratomi@ansible.com>
+# (c) 2017-2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from units.compat.mock import patch
+
+from ansible.module_utils.six.moves import builtins
+
+# Functions being tested
+from ansible.module_utils.common.sys_info import get_distribution
+from ansible.module_utils.common.sys_info import get_distribution_version
+from ansible.module_utils.common.sys_info import get_platform_subclass
+
+
+realimport = builtins.__import__
+
+
+@pytest.fixture
+def platform_linux(mocker):
+ mocker.patch('platform.system', return_value='Linux')
+
+
+#
+# get_distribution tests
+#
+
+def test_get_distribution_not_linux():
+ """If it's not Linux, then it has no distribution"""
+ with patch('platform.system', return_value='Foo'):
+ assert get_distribution() is None
+
+
+@pytest.mark.usefixtures("platform_linux")
+class TestGetDistribution:
+ """Tests for get_distribution that have to find something"""
+ def test_distro_known(self):
+ with patch('ansible.module_utils.distro.id', return_value="alpine"):
+ assert get_distribution() == "Alpine"
+
+ with patch('ansible.module_utils.distro.id', return_value="arch"):
+ assert get_distribution() == "Arch"
+
+ with patch('ansible.module_utils.distro.id', return_value="centos"):
+ assert get_distribution() == "Centos"
+
+ with patch('ansible.module_utils.distro.id', return_value="clear-linux-os"):
+ assert get_distribution() == "Clear-linux-os"
+
+ with patch('ansible.module_utils.distro.id', return_value="coreos"):
+ assert get_distribution() == "Coreos"
+
+ with patch('ansible.module_utils.distro.id', return_value="debian"):
+ assert get_distribution() == "Debian"
+
+ with patch('ansible.module_utils.distro.id', return_value="flatcar"):
+ assert get_distribution() == "Flatcar"
+
+ with patch('ansible.module_utils.distro.id', return_value="linuxmint"):
+ assert get_distribution() == "Linuxmint"
+
+ with patch('ansible.module_utils.distro.id', return_value="opensuse"):
+ assert get_distribution() == "Opensuse"
+
+ with patch('ansible.module_utils.distro.id', return_value="oracle"):
+ assert get_distribution() == "Oracle"
+
+ with patch('ansible.module_utils.distro.id', return_value="raspian"):
+ assert get_distribution() == "Raspian"
+
+ with patch('ansible.module_utils.distro.id', return_value="rhel"):
+ assert get_distribution() == "Redhat"
+
+ with patch('ansible.module_utils.distro.id', return_value="ubuntu"):
+ assert get_distribution() == "Ubuntu"
+
+ with patch('ansible.module_utils.distro.id', return_value="virtuozzo"):
+ assert get_distribution() == "Virtuozzo"
+
+ with patch('ansible.module_utils.distro.id', return_value="foo"):
+ assert get_distribution() == "Foo"
+
+ def test_distro_unknown(self):
+ with patch('ansible.module_utils.distro.id', return_value=""):
+ assert get_distribution() == "OtherLinux"
+
+ def test_distro_amazon_linux_short(self):
+ with patch('ansible.module_utils.distro.id', return_value="amzn"):
+ assert get_distribution() == "Amazon"
+
+ def test_distro_amazon_linux_long(self):
+ with patch('ansible.module_utils.distro.id', return_value="amazon"):
+ assert get_distribution() == "Amazon"
+
+
+#
+# get_distribution_version tests
+#
+
+def test_get_distribution_version_not_linux():
+ """If it's not Linux, then it has no distribution"""
+ with patch('platform.system', return_value='Foo'):
+ assert get_distribution_version() is None
+
+
+@pytest.mark.usefixtures("platform_linux")
+def test_distro_found():
+ with patch('ansible.module_utils.distro.version', return_value="1"):
+ assert get_distribution_version() == "1"
+
+
+#
+# Tests for get_platform_subclass
+#
+
+class TestGetPlatformSubclass:
+ class LinuxTest:
+ pass
+
+ class Foo(LinuxTest):
+ platform = "Linux"
+ distribution = None
+
+ class Bar(LinuxTest):
+ platform = "Linux"
+ distribution = "Bar"
+
+ def test_not_linux(self):
+ # if neither match, the fallback should be the top-level class
+ with patch('platform.system', return_value="Foo"):
+ with patch('ansible.module_utils.common.sys_info.get_distribution', return_value=None):
+ assert get_platform_subclass(self.LinuxTest) is self.LinuxTest
+
+ @pytest.mark.usefixtures("platform_linux")
+ def test_get_distribution_none(self):
+ # match just the platform class, not a specific distribution
+ with patch('ansible.module_utils.common.sys_info.get_distribution', return_value=None):
+ assert get_platform_subclass(self.LinuxTest) is self.Foo
+
+ @pytest.mark.usefixtures("platform_linux")
+ def test_get_distribution_found(self):
+ # match both the distribution and platform class
+ with patch('ansible.module_utils.common.sys_info.get_distribution', return_value="Bar"):
+ assert get_platform_subclass(self.LinuxTest) is self.Bar
diff --git a/test/units/module_utils/common/test_utils.py b/test/units/module_utils/common/test_utils.py
new file mode 100644
index 00000000..ef952393
--- /dev/null
+++ b/test/units/module_utils/common/test_utils.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.common.sys_info import get_all_subclasses
+
+
+#
+# Tests for get_all_subclasses
+#
+
+class TestGetAllSubclasses:
+ class Base:
+ pass
+
+ class BranchI(Base):
+ pass
+
+ class BranchII(Base):
+ pass
+
+ class BranchIA(BranchI):
+ pass
+
+ class BranchIB(BranchI):
+ pass
+
+ class BranchIIA(BranchII):
+ pass
+
+ class BranchIIB(BranchII):
+ pass
+
+ def test_bottom_level(self):
+ assert get_all_subclasses(self.BranchIIB) == set()
+
+ def test_one_inheritance(self):
+ assert set(get_all_subclasses(self.BranchII)) == set([self.BranchIIA, self.BranchIIB])
+
+ def test_toplevel(self):
+ assert set(get_all_subclasses(self.Base)) == set([self.BranchI, self.BranchII,
+ self.BranchIA, self.BranchIB,
+ self.BranchIIA, self.BranchIIB])
diff --git a/test/units/module_utils/common/text/converters/test_container_to_bytes.py b/test/units/module_utils/common/text/converters/test_container_to_bytes.py
new file mode 100644
index 00000000..091545e3
--- /dev/null
+++ b/test/units/module_utils/common/text/converters/test_container_to_bytes.py
@@ -0,0 +1,95 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019, Andrew Klychkov @Andersson007 <aaklychkov@mail.ru>
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible.module_utils.common.text.converters import container_to_bytes
+
+
+DEFAULT_ENCODING = 'utf-8'
+DEFAULT_ERR_HANDLER = 'surrogate_or_strict'
+
+
+@pytest.mark.parametrize(
+ 'test_input,expected',
+ [
+ ({1: 1}, {1: 1}),
+ ([1, 2], [1, 2]),
+ ((1, 2), (1, 2)),
+ (1, 1),
+ (1.1, 1.1),
+ (b'str', b'str'),
+ (u'str', b'str'),
+ ([u'str'], [b'str']),
+ ((u'str'), (b'str')),
+ ({u'str': u'str'}, {b'str': b'str'}),
+ ]
+)
+@pytest.mark.parametrize('encoding', ['utf-8', 'latin1', 'shift_jis', 'big5', 'koi8_r'])
+@pytest.mark.parametrize('errors', ['strict', 'surrogate_or_strict', 'surrogate_then_replace'])
+def test_container_to_bytes(test_input, expected, encoding, errors):
+ """Test for passing objects to container_to_bytes()."""
+ assert container_to_bytes(test_input, encoding=encoding, errors=errors) == expected
+
+
+@pytest.mark.parametrize(
+ 'test_input,expected',
+ [
+ ({1: 1}, {1: 1}),
+ ([1, 2], [1, 2]),
+ ((1, 2), (1, 2)),
+ (1, 1),
+ (1.1, 1.1),
+ (True, True),
+ (None, None),
+ (u'str', u'str'.encode(DEFAULT_ENCODING)),
+ (u'ãらã¨ã¿', u'ãらã¨ã¿'.encode(DEFAULT_ENCODING)),
+ (u'café', u'café'.encode(DEFAULT_ENCODING)),
+ (b'str', u'str'.encode(DEFAULT_ENCODING)),
+ (u'str', u'str'.encode(DEFAULT_ENCODING)),
+ ([u'str'], [u'str'.encode(DEFAULT_ENCODING)]),
+ ((u'str'), (u'str'.encode(DEFAULT_ENCODING))),
+ ({u'str': u'str'}, {u'str'.encode(DEFAULT_ENCODING): u'str'.encode(DEFAULT_ENCODING)}),
+ ]
+)
+def test_container_to_bytes_default_encoding_err(test_input, expected):
+ """
+ Test for passing objects to container_to_bytes(). Default encoding and errors
+ """
+ assert container_to_bytes(test_input, encoding=DEFAULT_ENCODING,
+ errors=DEFAULT_ERR_HANDLER) == expected
+
+
+@pytest.mark.parametrize(
+ 'test_input,encoding',
+ [
+ (u'ãらã¨ã¿', 'latin1'),
+ (u'café', 'shift_jis'),
+ ]
+)
+@pytest.mark.parametrize('errors', ['surrogate_or_strict', 'strict'])
+def test_container_to_bytes_incomp_chars_and_encod(test_input, encoding, errors):
+ """
+ Test for passing incompatible characters and encodings container_to_bytes().
+ """
+ with pytest.raises(UnicodeEncodeError, match="codec can't encode"):
+ container_to_bytes(test_input, encoding=encoding, errors=errors)
+
+
+@pytest.mark.parametrize(
+ 'test_input,encoding,expected',
+ [
+ (u'ãらã¨ã¿', 'latin1', b'????'),
+ (u'café', 'shift_jis', b'caf?'),
+ ]
+)
+def test_container_to_bytes_surrogate_then_replace(test_input, encoding, expected):
+ """
+ Test for container_to_bytes() with surrogate_then_replace err handler.
+ """
+ assert container_to_bytes(test_input, encoding=encoding,
+ errors='surrogate_then_replace') == expected
diff --git a/test/units/module_utils/common/text/converters/test_container_to_text.py b/test/units/module_utils/common/text/converters/test_container_to_text.py
new file mode 100644
index 00000000..39038f51
--- /dev/null
+++ b/test/units/module_utils/common/text/converters/test_container_to_text.py
@@ -0,0 +1,78 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019, Andrew Klychkov @Andersson007 <aaklychkov@mail.ru>
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible.module_utils.common.text.converters import container_to_text
+
+
+DEFAULT_ENCODING = 'utf-8'
+DEFAULT_ERR_HANDLER = 'surrogate_or_strict'
+
+
+@pytest.mark.parametrize(
+ 'test_input,expected',
+ [
+ ({1: 1}, {1: 1}),
+ ([1, 2], [1, 2]),
+ ((1, 2), (1, 2)),
+ (1, 1),
+ (1.1, 1.1),
+ (b'str', u'str'),
+ (u'str', u'str'),
+ ([b'str'], [u'str']),
+ ((b'str'), (u'str')),
+ ({b'str': b'str'}, {u'str': u'str'}),
+ ]
+)
+@pytest.mark.parametrize('encoding', ['utf-8', 'latin1', 'shift-jis', 'big5', 'koi8_r', ])
+@pytest.mark.parametrize('errors', ['strict', 'surrogate_or_strict', 'surrogate_then_replace', ])
+def test_container_to_text_different_types(test_input, expected, encoding, errors):
+ """Test for passing objects to container_to_text()."""
+ assert container_to_text(test_input, encoding=encoding, errors=errors) == expected
+
+
+@pytest.mark.parametrize(
+ 'test_input,expected',
+ [
+ ({1: 1}, {1: 1}),
+ ([1, 2], [1, 2]),
+ ((1, 2), (1, 2)),
+ (1, 1),
+ (1.1, 1.1),
+ (True, True),
+ (None, None),
+ (u'str', u'str'),
+ (u'ãらã¨ã¿'.encode(DEFAULT_ENCODING), u'ãらã¨ã¿'),
+ (u'café'.encode(DEFAULT_ENCODING), u'café'),
+ (u'str'.encode(DEFAULT_ENCODING), u'str'),
+ ([u'str'.encode(DEFAULT_ENCODING)], [u'str']),
+ ((u'str'.encode(DEFAULT_ENCODING)), (u'str')),
+ ({b'str': b'str'}, {u'str': u'str'}),
+ ]
+)
+def test_container_to_text_default_encoding_and_err(test_input, expected):
+ """
+ Test for passing objects to container_to_text(). Default encoding and errors
+ """
+ assert container_to_text(test_input, encoding=DEFAULT_ENCODING,
+ errors=DEFAULT_ERR_HANDLER) == expected
+
+
+@pytest.mark.parametrize(
+ 'test_input,encoding,expected',
+ [
+ (u'й'.encode('utf-8'), 'latin1', u'ù'),
+ (u'café'.encode('utf-8'), 'shift_jis', u'cafテゥ'),
+ ]
+)
+@pytest.mark.parametrize('errors', ['strict', 'surrogate_or_strict', 'surrogate_then_replace', ])
+def test_container_to_text_incomp_encod_chars(test_input, encoding, errors, expected):
+ """
+ Test for passing incompatible characters and encodings container_to_text().
+ """
+ assert container_to_text(test_input, encoding=encoding, errors=errors) == expected
diff --git a/test/units/module_utils/common/text/converters/test_json_encode_fallback.py b/test/units/module_utils/common/text/converters/test_json_encode_fallback.py
new file mode 100644
index 00000000..8cf33529
--- /dev/null
+++ b/test/units/module_utils/common/text/converters/test_json_encode_fallback.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019, Andrew Klychkov @Andersson007 <aaklychkov@mail.ru>
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from datetime import datetime
+
+from pytz import timezone as tz
+
+from ansible.module_utils.common.text.converters import _json_encode_fallback
+
+
+@pytest.mark.parametrize(
+ 'test_input,expected',
+ [
+ (set([1]), [1]),
+ (datetime(2019, 5, 14, 13, 39, 38, 569047), '2019-05-14T13:39:38.569047'),
+ (datetime(2019, 5, 14, 13, 47, 16, 923866), '2019-05-14T13:47:16.923866'),
+ (datetime(2019, 6, 15, 14, 45, tzinfo=tz('UTC')), '2019-06-15T14:45:00+00:00'),
+ (datetime(2019, 6, 15, 14, 45, tzinfo=tz('Europe/Helsinki')), '2019-06-15T14:45:00+01:40'),
+ ]
+)
+def test_json_encode_fallback(test_input, expected):
+ """
+ Test for passing expected objects to _json_encode_fallback().
+ """
+ assert _json_encode_fallback(test_input) == expected
+
+
+@pytest.mark.parametrize(
+ 'test_input',
+ [
+ 1,
+ 1.1,
+ u'string',
+ b'string',
+ [1, 2],
+ True,
+ None,
+ {1: 1},
+ (1, 2),
+ ]
+)
+def test_json_encode_fallback_default_behavior(test_input):
+ """
+ Test for _json_encode_fallback() default behavior.
+
+ It must fail with TypeError.
+ """
+ with pytest.raises(TypeError, match='Cannot json serialize'):
+ _json_encode_fallback(test_input)
diff --git a/test/units/module_utils/common/text/converters/test_jsonify.py b/test/units/module_utils/common/text/converters/test_jsonify.py
new file mode 100644
index 00000000..a3415313
--- /dev/null
+++ b/test/units/module_utils/common/text/converters/test_jsonify.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019, Andrew Klychkov @Andersson007 <aaklychkov@mail.ru>
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible.module_utils.common.text.converters import jsonify
+
+
+@pytest.mark.parametrize(
+ 'test_input,expected',
+ [
+ (1, '1'),
+ (u'string', u'"string"'),
+ (u'ãらã¨ã¿', u'"\\u304f\\u3089\\u3068\\u307f"'),
+ (u'café', u'"caf\\u00e9"'),
+ (b'string', u'"string"'),
+ (False, u'false'),
+ (u'string'.encode('utf-8'), u'"string"'),
+ ]
+)
+def test_jsonify(test_input, expected):
+ """Test for jsonify()."""
+ assert jsonify(test_input) == expected
diff --git a/test/units/module_utils/common/text/converters/test_to_str.py b/test/units/module_utils/common/text/converters/test_to_str.py
new file mode 100644
index 00000000..b645db6d
--- /dev/null
+++ b/test/units/module_utils/common/text/converters/test_to_str.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+# (c) 2016 Toshio Kuratomi <tkuratomi@ansible.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import itertools
+
+import pytest
+
+from ansible.module_utils.six import PY3
+
+from ansible.module_utils.common.text.converters import to_text, to_bytes, to_native
+from ansible.utils.unsafe_proxy import AnsibleUnsafeBytes, AnsibleUnsafeText
+
+
+# Format: byte representation, text representation, encoding of byte representation
+VALID_STRINGS = (
+ (b'abcde', u'abcde', 'ascii'),
+ (b'caf\xc3\xa9', u'caf\xe9', 'utf-8'),
+ (b'caf\xe9', u'caf\xe9', 'latin-1'),
+ # u'ãらã¨ã¿'
+ (b'\xe3\x81\x8f\xe3\x82\x89\xe3\x81\xa8\xe3\x81\xbf', u'\u304f\u3089\u3068\u307f', 'utf-8'),
+ (b'\x82\xad\x82\xe7\x82\xc6\x82\xdd', u'\u304f\u3089\u3068\u307f', 'shift-jis'),
+)
+
+
+@pytest.mark.parametrize('in_string, encoding, expected',
+ itertools.chain(((d[0], d[2], d[1]) for d in VALID_STRINGS),
+ ((d[1], d[2], d[1]) for d in VALID_STRINGS)))
+def test_to_text(in_string, encoding, expected):
+ """test happy path of decoding to text"""
+ assert to_text(in_string, encoding) == expected
+
+
+@pytest.mark.parametrize('in_string, encoding, expected',
+ itertools.chain(((d[0], d[2], d[0]) for d in VALID_STRINGS),
+ ((d[1], d[2], d[0]) for d in VALID_STRINGS)))
+def test_to_bytes(in_string, encoding, expected):
+ """test happy path of encoding to bytes"""
+ assert to_bytes(in_string, encoding) == expected
+
+
+@pytest.mark.parametrize('in_string, encoding, expected',
+ itertools.chain(((d[0], d[2], d[1] if PY3 else d[0]) for d in VALID_STRINGS),
+ ((d[1], d[2], d[1] if PY3 else d[0]) for d in VALID_STRINGS)))
+def test_to_native(in_string, encoding, expected):
+ """test happy path of encoding to native strings"""
+ assert to_native(in_string, encoding) == expected
+
+
+def test_to_text_unsafe():
+ assert isinstance(to_text(AnsibleUnsafeBytes(b'foo')), AnsibleUnsafeText)
+ assert to_text(AnsibleUnsafeBytes(b'foo')) == AnsibleUnsafeText(u'foo')
+
+
+def test_to_bytes_unsafe():
+ assert isinstance(to_bytes(AnsibleUnsafeText(u'foo')), AnsibleUnsafeBytes)
+ assert to_bytes(AnsibleUnsafeText(u'foo')) == AnsibleUnsafeBytes(b'foo')
diff --git a/test/units/module_utils/common/text/formatters/test_bytes_to_human.py b/test/units/module_utils/common/text/formatters/test_bytes_to_human.py
new file mode 100644
index 00000000..41475f56
--- /dev/null
+++ b/test/units/module_utils/common/text/formatters/test_bytes_to_human.py
@@ -0,0 +1,116 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019, Andrew Klychkov @Andersson007 <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible.module_utils.common.text.formatters import bytes_to_human
+
+
+@pytest.mark.parametrize(
+ 'input_data,expected',
+ [
+ (0, u'0.00 Bytes'),
+ (0.5, u'0.50 Bytes'),
+ (0.54, u'0.54 Bytes'),
+ (1024, u'1.00 KB'),
+ (1025, u'1.00 KB'),
+ (1536, u'1.50 KB'),
+ (1790, u'1.75 KB'),
+ (1048576, u'1.00 MB'),
+ (1073741824, u'1.00 GB'),
+ (1099511627776, u'1.00 TB'),
+ (1125899906842624, u'1.00 PB'),
+ (1152921504606846976, u'1.00 EB'),
+ (1180591620717411303424, u'1.00 ZB'),
+ (1208925819614629174706176, u'1.00 YB'),
+ ]
+)
+def test_bytes_to_human(input_data, expected):
+ """Test of bytes_to_human function, only proper numbers are passed."""
+ assert bytes_to_human(input_data) == expected
+
+
+@pytest.mark.parametrize(
+ 'input_data,expected',
+ [
+ (0, u'0.00 bits'),
+ (0.5, u'0.50 bits'),
+ (0.54, u'0.54 bits'),
+ (1024, u'1.00 Kb'),
+ (1025, u'1.00 Kb'),
+ (1536, u'1.50 Kb'),
+ (1790, u'1.75 Kb'),
+ (1048576, u'1.00 Mb'),
+ (1073741824, u'1.00 Gb'),
+ (1099511627776, u'1.00 Tb'),
+ (1125899906842624, u'1.00 Pb'),
+ (1152921504606846976, u'1.00 Eb'),
+ (1180591620717411303424, u'1.00 Zb'),
+ (1208925819614629174706176, u'1.00 Yb'),
+ ]
+)
+def test_bytes_to_human_isbits(input_data, expected):
+ """Test of bytes_to_human function with isbits=True proper results."""
+ assert bytes_to_human(input_data, isbits=True) == expected
+
+
+@pytest.mark.parametrize(
+ 'input_data,unit,expected',
+ [
+ (0, u'B', u'0.00 Bytes'),
+ (0.5, u'B', u'0.50 Bytes'),
+ (0.54, u'B', u'0.54 Bytes'),
+ (1024, u'K', u'1.00 KB'),
+ (1536, u'K', u'1.50 KB'),
+ (1790, u'K', u'1.75 KB'),
+ (1048576, u'M', u'1.00 MB'),
+ (1099511627776, u'T', u'1.00 TB'),
+ (1152921504606846976, u'E', u'1.00 EB'),
+ (1180591620717411303424, u'Z', u'1.00 ZB'),
+ (1208925819614629174706176, u'Y', u'1.00 YB'),
+ (1025, u'KB', u'1025.00 Bytes'),
+ (1073741824, u'Gb', u'1073741824.00 Bytes'),
+ (1125899906842624, u'Pb', u'1125899906842624.00 Bytes'),
+ ]
+)
+def test_bytes_to_human_unit(input_data, unit, expected):
+ """Test unit argument of bytes_to_human function proper results."""
+ assert bytes_to_human(input_data, unit=unit) == expected
+
+
+@pytest.mark.parametrize(
+ 'input_data,unit,expected',
+ [
+ (0, u'B', u'0.00 bits'),
+ (0.5, u'B', u'0.50 bits'),
+ (0.54, u'B', u'0.54 bits'),
+ (1024, u'K', u'1.00 Kb'),
+ (1536, u'K', u'1.50 Kb'),
+ (1790, u'K', u'1.75 Kb'),
+ (1048576, u'M', u'1.00 Mb'),
+ (1099511627776, u'T', u'1.00 Tb'),
+ (1152921504606846976, u'E', u'1.00 Eb'),
+ (1180591620717411303424, u'Z', u'1.00 Zb'),
+ (1208925819614629174706176, u'Y', u'1.00 Yb'),
+ (1025, u'KB', u'1025.00 bits'),
+ (1073741824, u'Gb', u'1073741824.00 bits'),
+ (1125899906842624, u'Pb', u'1125899906842624.00 bits'),
+ ]
+)
+def test_bytes_to_human_unit_isbits(input_data, unit, expected):
+ """Test unit argument of bytes_to_human function with isbits=True proper results."""
+ assert bytes_to_human(input_data, isbits=True, unit=unit) == expected
+
+
+@pytest.mark.parametrize('input_data', [0j, u'1B', [1], {1: 1}, None, b'1B'])
+def test_bytes_to_human_illegal_size(input_data):
+ """Test of bytes_to_human function, illegal objects are passed as a size."""
+ e_regexp = (r'(no ordering relation is defined for complex numbers)|'
+ r'(unsupported operand type\(s\) for /)|(unorderable types)|'
+ r'(not supported between instances of)')
+ with pytest.raises(TypeError, match=e_regexp):
+ bytes_to_human(input_data)
diff --git a/test/units/module_utils/common/text/formatters/test_human_to_bytes.py b/test/units/module_utils/common/text/formatters/test_human_to_bytes.py
new file mode 100644
index 00000000..d02699a6
--- /dev/null
+++ b/test/units/module_utils/common/text/formatters/test_human_to_bytes.py
@@ -0,0 +1,185 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019, Andrew Klychkov @Andersson007 <aaklychkov@mail.ru>
+# Copyright 2019, Sviatoslav Sydorenko <webknjaz@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible.module_utils.common.text.formatters import human_to_bytes
+
+
+NUM_IN_METRIC = {
+ 'K': 2 ** 10,
+ 'M': 2 ** 20,
+ 'G': 2 ** 30,
+ 'T': 2 ** 40,
+ 'P': 2 ** 50,
+ 'E': 2 ** 60,
+ 'Z': 2 ** 70,
+ 'Y': 2 ** 80,
+}
+
+
+@pytest.mark.parametrize(
+ 'input_data,expected',
+ [
+ (0, 0),
+ (u'0B', 0),
+ (1024, NUM_IN_METRIC['K']),
+ (u'1024B', NUM_IN_METRIC['K']),
+ (u'1K', NUM_IN_METRIC['K']),
+ (u'1KB', NUM_IN_METRIC['K']),
+ (u'1M', NUM_IN_METRIC['M']),
+ (u'1MB', NUM_IN_METRIC['M']),
+ (u'1G', NUM_IN_METRIC['G']),
+ (u'1GB', NUM_IN_METRIC['G']),
+ (u'1T', NUM_IN_METRIC['T']),
+ (u'1TB', NUM_IN_METRIC['T']),
+ (u'1P', NUM_IN_METRIC['P']),
+ (u'1PB', NUM_IN_METRIC['P']),
+ (u'1E', NUM_IN_METRIC['E']),
+ (u'1EB', NUM_IN_METRIC['E']),
+ (u'1Z', NUM_IN_METRIC['Z']),
+ (u'1ZB', NUM_IN_METRIC['Z']),
+ (u'1Y', NUM_IN_METRIC['Y']),
+ (u'1YB', NUM_IN_METRIC['Y']),
+ ]
+)
+def test_human_to_bytes_number(input_data, expected):
+ """Test of human_to_bytes function, only number arg is passed."""
+ assert human_to_bytes(input_data) == expected
+
+
+@pytest.mark.parametrize(
+ 'input_data,unit',
+ [
+ (u'1024', 'B'),
+ (1, u'K'),
+ (1, u'KB'),
+ (u'1', u'M'),
+ (u'1', u'MB'),
+ (1, u'G'),
+ (1, u'GB'),
+ (1, u'T'),
+ (1, u'TB'),
+ (u'1', u'P'),
+ (u'1', u'PB'),
+ (u'1', u'E'),
+ (u'1', u'EB'),
+ (u'1', u'Z'),
+ (u'1', u'ZB'),
+ (u'1', u'Y'),
+ (u'1', u'YB'),
+ ]
+)
+def test_human_to_bytes_number_unit(input_data, unit):
+ """Test of human_to_bytes function, number and default_unit args are passed."""
+ assert human_to_bytes(input_data, default_unit=unit) == NUM_IN_METRIC.get(unit[0], 1024)
+
+
+@pytest.mark.parametrize('test_input', [u'1024s', u'1024w', ])
+def test_human_to_bytes_wrong_unit(test_input):
+ """Test of human_to_bytes function, wrong units."""
+ with pytest.raises(ValueError, match="The suffix must be one of"):
+ human_to_bytes(test_input)
+
+
+@pytest.mark.parametrize('test_input', [u'b1bbb', u'm2mmm', u'', u' ', -1])
+def test_human_to_bytes_wrong_number(test_input):
+ """Test of human_to_bytes function, number param is invalid string / number."""
+ with pytest.raises(ValueError, match="can't interpret"):
+ human_to_bytes(test_input)
+
+
+@pytest.mark.parametrize(
+ 'input_data,expected',
+ [
+ (0, 0),
+ (u'0B', 0),
+ (u'1024b', 1024),
+ (u'1024B', 1024),
+ (u'1K', NUM_IN_METRIC['K']),
+ (u'1Kb', NUM_IN_METRIC['K']),
+ (u'1M', NUM_IN_METRIC['M']),
+ (u'1Mb', NUM_IN_METRIC['M']),
+ (u'1G', NUM_IN_METRIC['G']),
+ (u'1Gb', NUM_IN_METRIC['G']),
+ (u'1T', NUM_IN_METRIC['T']),
+ (u'1Tb', NUM_IN_METRIC['T']),
+ (u'1P', NUM_IN_METRIC['P']),
+ (u'1Pb', NUM_IN_METRIC['P']),
+ (u'1E', NUM_IN_METRIC['E']),
+ (u'1Eb', NUM_IN_METRIC['E']),
+ (u'1Z', NUM_IN_METRIC['Z']),
+ (u'1Zb', NUM_IN_METRIC['Z']),
+ (u'1Y', NUM_IN_METRIC['Y']),
+ (u'1Yb', NUM_IN_METRIC['Y']),
+ ]
+)
+def test_human_to_bytes_isbits(input_data, expected):
+ """Test of human_to_bytes function, isbits = True."""
+ assert human_to_bytes(input_data, isbits=True) == expected
+
+
+@pytest.mark.parametrize(
+ 'input_data,unit',
+ [
+ (1024, 'b'),
+ (1024, 'B'),
+ (1, u'K'),
+ (1, u'Kb'),
+ (u'1', u'M'),
+ (u'1', u'Mb'),
+ (1, u'G'),
+ (1, u'Gb'),
+ (1, u'T'),
+ (1, u'Tb'),
+ (u'1', u'P'),
+ (u'1', u'Pb'),
+ (u'1', u'E'),
+ (u'1', u'Eb'),
+ (u'1', u'Z'),
+ (u'1', u'Zb'),
+ (u'1', u'Y'),
+ (u'1', u'Yb'),
+ ]
+)
+def test_human_to_bytes_isbits_default_unit(input_data, unit):
+ """Test of human_to_bytes function, isbits = True and default_unit args are passed."""
+ assert human_to_bytes(input_data, default_unit=unit, isbits=True) == NUM_IN_METRIC.get(unit[0], 1024)
+
+
+@pytest.mark.parametrize(
+ 'test_input,isbits',
+ [
+ ('1024Kb', False),
+ ('10Mb', False),
+ ('1Gb', False),
+ ('10MB', True),
+ ('2KB', True),
+ ('4GB', True),
+ ]
+)
+def test_human_to_bytes_isbits_wrong_unit(test_input, isbits):
+ """Test of human_to_bytes function, unit identifier is in an invalid format for isbits value."""
+ with pytest.raises(ValueError, match="Value is not a valid string"):
+ human_to_bytes(test_input, isbits=isbits)
+
+
+@pytest.mark.parametrize(
+ 'test_input,unit,isbits',
+ [
+ (1024, 'Kb', False),
+ ('10', 'Mb', False),
+ ('10', 'MB', True),
+ (2, 'KB', True),
+ ('4', 'GB', True),
+ ]
+)
+def test_human_to_bytes_isbits_wrong_default_unit(test_input, unit, isbits):
+ """Test of human_to_bytes function, default_unit is in an invalid format for isbits value."""
+ with pytest.raises(ValueError, match="Value is not a valid string"):
+ human_to_bytes(test_input, default_unit=unit, isbits=isbits)
diff --git a/test/units/module_utils/common/text/formatters/test_lenient_lowercase.py b/test/units/module_utils/common/text/formatters/test_lenient_lowercase.py
new file mode 100644
index 00000000..1ecc013e
--- /dev/null
+++ b/test/units/module_utils/common/text/formatters/test_lenient_lowercase.py
@@ -0,0 +1,68 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019, Andrew Klychkov @Andersson007 <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from datetime import datetime
+
+import pytest
+
+from ansible.module_utils.common.text.formatters import lenient_lowercase
+
+
+INPUT_LIST = [
+ u'HELLO',
+ u'Ðлка',
+ u'cafÉ',
+ u'ãらã¨ã¿',
+ b'HELLO',
+ 1,
+ {1: 'Dict'},
+ True,
+ [1],
+ 3.14159,
+]
+
+EXPECTED_LIST = [
+ u'hello',
+ u'ёлка',
+ u'café',
+ u'ãらã¨ã¿',
+ b'hello',
+ 1,
+ {1: 'Dict'},
+ True,
+ [1],
+ 3.14159,
+]
+
+result_list = lenient_lowercase(INPUT_LIST)
+
+
+@pytest.mark.parametrize(
+ 'input_value,expected_outcome',
+ [
+ (result_list[0], EXPECTED_LIST[0]),
+ (result_list[1], EXPECTED_LIST[1]),
+ (result_list[2], EXPECTED_LIST[2]),
+ (result_list[3], EXPECTED_LIST[3]),
+ (result_list[4], EXPECTED_LIST[4]),
+ (result_list[5], EXPECTED_LIST[5]),
+ (result_list[6], EXPECTED_LIST[6]),
+ (result_list[7], EXPECTED_LIST[7]),
+ (result_list[8], EXPECTED_LIST[8]),
+ (result_list[9], EXPECTED_LIST[9]),
+ ]
+)
+def test_lenient_lowercase(input_value, expected_outcome):
+ """Test that lenient_lowercase() proper results."""
+ assert input_value == expected_outcome
+
+
+@pytest.mark.parametrize('input_data', [1, False, 1.001, 1j, datetime.now(), ])
+def test_lenient_lowercase_illegal_data_type(input_data):
+ """Test passing objects of illegal types to lenient_lowercase()."""
+ with pytest.raises(TypeError, match='object is not iterable'):
+ lenient_lowercase(input_data)
diff --git a/test/units/module_utils/common/validation/test_check_mutually_exclusive.py b/test/units/module_utils/common/validation/test_check_mutually_exclusive.py
new file mode 100644
index 00000000..7bf90760
--- /dev/null
+++ b/test/units/module_utils/common/validation/test_check_mutually_exclusive.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common.validation import check_mutually_exclusive
+
+
+@pytest.fixture
+def mutually_exclusive_terms():
+ return [
+ ('string1', 'string2',),
+ ('box', 'fox', 'socks'),
+ ]
+
+
+def test_check_mutually_exclusive(mutually_exclusive_terms):
+ params = {
+ 'string1': 'cat',
+ 'fox': 'hat',
+ }
+ assert check_mutually_exclusive(mutually_exclusive_terms, params) == []
+
+
+def test_check_mutually_exclusive_found(mutually_exclusive_terms):
+ params = {
+ 'string1': 'cat',
+ 'string2': 'hat',
+ 'fox': 'red',
+ 'socks': 'blue',
+ }
+ expected = "parameters are mutually exclusive: string1|string2, box|fox|socks"
+
+ with pytest.raises(TypeError) as e:
+ check_mutually_exclusive(mutually_exclusive_terms, params)
+
+ assert to_native(e.value) == expected
+
+
+def test_check_mutually_exclusive_none():
+ terms = None
+ params = {
+ 'string1': 'cat',
+ 'fox': 'hat',
+ }
+ assert check_mutually_exclusive(terms, params) == []
+
+
+def test_check_mutually_exclusive_no_params(mutually_exclusive_terms):
+ with pytest.raises(TypeError) as te:
+ check_mutually_exclusive(mutually_exclusive_terms, None)
+ assert "'NoneType' object is not iterable" in to_native(te.value)
diff --git a/test/units/module_utils/common/validation/test_check_required_arguments.py b/test/units/module_utils/common/validation/test_check_required_arguments.py
new file mode 100644
index 00000000..1dd54584
--- /dev/null
+++ b/test/units/module_utils/common/validation/test_check_required_arguments.py
@@ -0,0 +1,88 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common.validation import check_required_arguments
+
+
+@pytest.fixture
+def arguments_terms():
+ return {
+ 'foo': {
+ 'required': True,
+ },
+ 'bar': {
+ 'required': False,
+ },
+ 'tomato': {
+ 'irrelevant': 72,
+ },
+ }
+
+
+@pytest.fixture
+def arguments_terms_multiple():
+ return {
+ 'foo': {
+ 'required': True,
+ },
+ 'bar': {
+ 'required': True,
+ },
+ 'tomato': {
+ 'irrelevant': 72,
+ },
+ }
+
+
+def test_check_required_arguments(arguments_terms):
+ params = {
+ 'foo': 'hello',
+ 'bar': 'haha',
+ }
+ assert check_required_arguments(arguments_terms, params) == []
+
+
+def test_check_required_arguments_missing(arguments_terms):
+ params = {
+ 'apples': 'woohoo',
+ }
+ expected = "missing required arguments: foo"
+
+ with pytest.raises(TypeError) as e:
+ check_required_arguments(arguments_terms, params)
+
+ assert to_native(e.value) == expected
+
+
+def test_check_required_arguments_missing_multiple(arguments_terms_multiple):
+ params = {
+ 'apples': 'woohoo',
+ }
+ expected = "missing required arguments: bar, foo"
+
+ with pytest.raises(TypeError) as e:
+ check_required_arguments(arguments_terms_multiple, params)
+
+ assert to_native(e.value) == expected
+
+
+def test_check_required_arguments_missing_none():
+ terms = None
+ params = {
+ 'foo': 'bar',
+ 'baz': 'buzz',
+ }
+ assert check_required_arguments(terms, params) == []
+
+
+def test_check_required_arguments_no_params(arguments_terms):
+ with pytest.raises(TypeError) as te:
+ check_required_arguments(arguments_terms, None)
+ assert "'NoneType' is not iterable" in to_native(te.value)
diff --git a/test/units/module_utils/common/validation/test_check_required_together.py b/test/units/module_utils/common/validation/test_check_required_together.py
new file mode 100644
index 00000000..8a2daab1
--- /dev/null
+++ b/test/units/module_utils/common/validation/test_check_required_together.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common.validation import check_required_together
+
+
+@pytest.fixture
+def together_terms():
+ return [
+ ['bananas', 'potatoes'],
+ ['cats', 'wolves']
+ ]
+
+
+def test_check_required_together(together_terms):
+ params = {
+ 'bananas': 'hello',
+ 'potatoes': 'this is here too',
+ 'dogs': 'haha',
+ }
+ assert check_required_together(together_terms, params) == []
+
+
+def test_check_required_together_missing(together_terms):
+ params = {
+ 'bananas': 'woohoo',
+ 'wolves': 'uh oh',
+ }
+ expected = "parameters are required together: bananas, potatoes"
+
+ with pytest.raises(TypeError) as e:
+ check_required_together(together_terms, params)
+
+ assert to_native(e.value) == expected
+
+
+def test_check_required_together_missing_none():
+ terms = None
+ params = {
+ 'foo': 'bar',
+ 'baz': 'buzz',
+ }
+ assert check_required_together(terms, params) == []
+
+
+def test_check_required_together_no_params(together_terms):
+ with pytest.raises(TypeError) as te:
+ check_required_together(together_terms, None)
+
+ assert "'NoneType' object is not iterable" in to_native(te.value)
diff --git a/test/units/module_utils/common/validation/test_check_type_bits.py b/test/units/module_utils/common/validation/test_check_type_bits.py
new file mode 100644
index 00000000..7f6b11d3
--- /dev/null
+++ b/test/units/module_utils/common/validation/test_check_type_bits.py
@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common.validation import check_type_bits
+
+
+def test_check_type_bits():
+ test_cases = (
+ ('1', 1),
+ (99, 99),
+ (1.5, 2),
+ ('1.5', 2),
+ ('2b', 2),
+ ('2k', 2048),
+ ('2K', 2048),
+ ('1m', 1048576),
+ ('1M', 1048576),
+ ('1g', 1073741824),
+ ('1G', 1073741824),
+ (1073741824, 1073741824),
+ )
+ for case in test_cases:
+ assert case[1] == check_type_bits(case[0])
+
+
+def test_check_type_bits_fail():
+ test_cases = (
+ 'foo',
+ '2KB',
+ '1MB',
+ '1GB',
+ )
+ for case in test_cases:
+ with pytest.raises(TypeError) as e:
+ check_type_bits(case)
+ assert 'cannot be converted to a Bit value' in to_native(e.value)
diff --git a/test/units/module_utils/common/validation/test_check_type_bool.py b/test/units/module_utils/common/validation/test_check_type_bool.py
new file mode 100644
index 00000000..bd867dc9
--- /dev/null
+++ b/test/units/module_utils/common/validation/test_check_type_bool.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common.validation import check_type_bool
+
+
+def test_check_type_bool():
+ test_cases = (
+ (True, True),
+ (False, False),
+ ('1', True),
+ ('on', True),
+ (1, True),
+ ('0', False),
+ (0, False),
+ ('n', False),
+ ('f', False),
+ ('false', False),
+ ('true', True),
+ ('y', True),
+ ('t', True),
+ ('yes', True),
+ ('no', False),
+ ('off', False),
+ )
+ for case in test_cases:
+ assert case[1] == check_type_bool(case[0])
+
+
+def test_check_type_bool_fail():
+ default_test_msg = 'cannot be converted to a bool'
+ test_cases = (
+ ({'k1': 'v1'}, 'is not a valid bool'),
+ (3.14159, default_test_msg),
+ (-1, default_test_msg),
+ (-90810398401982340981023948192349081, default_test_msg),
+ (90810398401982340981023948192349081, default_test_msg),
+ )
+ for case in test_cases:
+ with pytest.raises(TypeError) as e:
+ check_type_bool(case)
+ assert 'cannot be converted to a bool' in to_native(e.value)
diff --git a/test/units/module_utils/common/validation/test_check_type_bytes.py b/test/units/module_utils/common/validation/test_check_type_bytes.py
new file mode 100644
index 00000000..6ff62dc2
--- /dev/null
+++ b/test/units/module_utils/common/validation/test_check_type_bytes.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common.validation import check_type_bytes
+
+
+def test_check_type_bytes():
+ test_cases = (
+ ('1', 1),
+ (99, 99),
+ (1.5, 2),
+ ('1.5', 2),
+ ('2b', 2),
+ ('2B', 2),
+ ('2k', 2048),
+ ('2K', 2048),
+ ('2KB', 2048),
+ ('1m', 1048576),
+ ('1M', 1048576),
+ ('1MB', 1048576),
+ ('1g', 1073741824),
+ ('1G', 1073741824),
+ ('1GB', 1073741824),
+ (1073741824, 1073741824),
+ )
+ for case in test_cases:
+ assert case[1] == check_type_bytes(case[0])
+
+
+def test_check_type_bytes_fail():
+ test_cases = (
+ 'foo',
+ '2kb',
+ '2Kb',
+ '1mb',
+ '1Mb',
+ '1gb',
+ '1Gb',
+ )
+ for case in test_cases:
+ with pytest.raises(TypeError) as e:
+ check_type_bytes(case)
+ assert 'cannot be converted to a Byte value' in to_native(e.value)
diff --git a/test/units/module_utils/common/validation/test_check_type_dict.py b/test/units/module_utils/common/validation/test_check_type_dict.py
new file mode 100644
index 00000000..75638c58
--- /dev/null
+++ b/test/units/module_utils/common/validation/test_check_type_dict.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible.module_utils.common.validation import check_type_dict
+
+
+def test_check_type_dict():
+ test_cases = (
+ ({'k1': 'v1'}, {'k1': 'v1'}),
+ ('k1=v1,k2=v2', {'k1': 'v1', 'k2': 'v2'}),
+ ('k1=v1, k2=v2', {'k1': 'v1', 'k2': 'v2'}),
+ ('k1=v1, k2=v2, k3=v3', {'k1': 'v1', 'k2': 'v2', 'k3': 'v3'}),
+ ('{"key": "value", "list": ["one", "two"]}', {'key': 'value', 'list': ['one', 'two']})
+ )
+ for case in test_cases:
+ assert case[1] == check_type_dict(case[0])
+
+
+def test_check_type_dict_fail():
+ test_cases = (
+ 1,
+ 3.14159,
+ [1, 2],
+ 'a',
+ )
+ for case in test_cases:
+ with pytest.raises(TypeError):
+ check_type_dict(case)
diff --git a/test/units/module_utils/common/validation/test_check_type_float.py b/test/units/module_utils/common/validation/test_check_type_float.py
new file mode 100644
index 00000000..57837fae
--- /dev/null
+++ b/test/units/module_utils/common/validation/test_check_type_float.py
@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common.validation import check_type_float
+
+
+def test_check_type_float():
+ test_cases = (
+ ('1.5', 1.5),
+ ('''1.5''', 1.5),
+ (u'1.5', 1.5),
+ (1002, 1002.0),
+ (1.0, 1.0),
+ (3.141592653589793, 3.141592653589793),
+ ('3.141592653589793', 3.141592653589793),
+ (b'3.141592653589793', 3.141592653589793),
+ )
+ for case in test_cases:
+ assert case[1] == check_type_float(case[0])
+
+
+def test_check_type_float_fail():
+ test_cases = (
+ {'k1': 'v1'},
+ ['a', 'b'],
+ 'b',
+ )
+ for case in test_cases:
+ with pytest.raises(TypeError) as e:
+ check_type_float(case)
+ assert 'cannot be converted to a float' in to_native(e.value)
diff --git a/test/units/module_utils/common/validation/test_check_type_int.py b/test/units/module_utils/common/validation/test_check_type_int.py
new file mode 100644
index 00000000..22cedf61
--- /dev/null
+++ b/test/units/module_utils/common/validation/test_check_type_int.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common.validation import check_type_int
+
+
+def test_check_type_int():
+ test_cases = (
+ ('1', 1),
+ (u'1', 1),
+ (1002, 1002),
+ )
+ for case in test_cases:
+ assert case[1] == check_type_int(case[0])
+
+
+def test_check_type_int_fail():
+ test_cases = (
+ {'k1': 'v1'},
+ (b'1', 1),
+ (3.14159, 3),
+ 'b',
+ )
+ for case in test_cases:
+ with pytest.raises(TypeError) as e:
+ check_type_int(case)
+ assert 'cannot be converted to an int' in to_native(e.value)
diff --git a/test/units/module_utils/common/validation/test_check_type_jsonarg.py b/test/units/module_utils/common/validation/test_check_type_jsonarg.py
new file mode 100644
index 00000000..e78e54bb
--- /dev/null
+++ b/test/units/module_utils/common/validation/test_check_type_jsonarg.py
@@ -0,0 +1,36 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common.validation import check_type_jsonarg
+
+
+def test_check_type_jsonarg():
+ test_cases = (
+ ('a', 'a'),
+ ('a ', 'a'),
+ (b'99', b'99'),
+ (b'99 ', b'99'),
+ ({'k1': 'v1'}, '{"k1": "v1"}'),
+ ([1, 'a'], '[1, "a"]'),
+ ((1, 2, 'three'), '[1, 2, "three"]'),
+ )
+ for case in test_cases:
+ assert case[1] == check_type_jsonarg(case[0])
+
+
+def test_check_type_jsonarg_fail():
+ test_cases = (
+ 1.5,
+ 910313498012384012341982374109384098,
+ )
+ for case in test_cases:
+ with pytest.raises(TypeError) as e:
+ check_type_jsonarg(case)
+ assert 'cannot be converted to a json string' in to_native(e.value)
diff --git a/test/units/module_utils/common/validation/test_check_type_list.py b/test/units/module_utils/common/validation/test_check_type_list.py
new file mode 100644
index 00000000..3f7a9ee6
--- /dev/null
+++ b/test/units/module_utils/common/validation/test_check_type_list.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible.module_utils.common.validation import check_type_list
+
+
+def test_check_type_list():
+ test_cases = (
+ ([1, 2], [1, 2]),
+ (1, ['1']),
+ (['a', 'b'], ['a', 'b']),
+ ('a', ['a']),
+ (3.14159, ['3.14159']),
+ ('a,b,1,2', ['a', 'b', '1', '2'])
+ )
+ for case in test_cases:
+ assert case[1] == check_type_list(case[0])
+
+
+def test_check_type_list_failure():
+ test_cases = (
+ {'k1': 'v1'},
+ )
+ for case in test_cases:
+ with pytest.raises(TypeError):
+ check_type_list(case)
diff --git a/test/units/module_utils/common/validation/test_check_type_path.py b/test/units/module_utils/common/validation/test_check_type_path.py
new file mode 100644
index 00000000..d6ff433a
--- /dev/null
+++ b/test/units/module_utils/common/validation/test_check_type_path.py
@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import re
+
+import os
+from ansible.module_utils.common.validation import check_type_path
+
+
+def mock_expand(value):
+ return re.sub(r'~|\$HOME', '/home/testuser', value)
+
+
+def test_check_type_path(monkeypatch):
+ monkeypatch.setattr(os.path, 'expandvars', mock_expand)
+ monkeypatch.setattr(os.path, 'expanduser', mock_expand)
+ test_cases = (
+ ('~/foo', '/home/testuser/foo'),
+ ('$HOME/foo', '/home/testuser/foo'),
+ ('/home/jane', '/home/jane'),
+ (u'/home/jané', u'/home/jané'),
+ )
+ for case in test_cases:
+ assert case[1] == check_type_path(case[0])
diff --git a/test/units/module_utils/common/validation/test_check_type_raw.py b/test/units/module_utils/common/validation/test_check_type_raw.py
new file mode 100644
index 00000000..988e5543
--- /dev/null
+++ b/test/units/module_utils/common/validation/test_check_type_raw.py
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+from ansible.module_utils.common.validation import check_type_raw
+
+
+def test_check_type_raw():
+ test_cases = (
+ (1, 1),
+ ('1', '1'),
+ ('a', 'a'),
+ ({'k1': 'v1'}, {'k1': 'v1'}),
+ ([1, 2], [1, 2]),
+ (b'42', b'42'),
+ (u'42', u'42'),
+ )
+ for case in test_cases:
+ assert case[1] == check_type_raw(case[0])
diff --git a/test/units/module_utils/common/validation/test_check_type_str.py b/test/units/module_utils/common/validation/test_check_type_str.py
new file mode 100644
index 00000000..f10dad28
--- /dev/null
+++ b/test/units/module_utils/common/validation/test_check_type_str.py
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common.validation import check_type_str
+
+
+TEST_CASES = (
+ ('string', 'string'),
+ (100, '100'),
+ (1.5, '1.5'),
+ ({'k1': 'v1'}, "{'k1': 'v1'}"),
+ ([1, 2, 'three'], "[1, 2, 'three']"),
+ ((1, 2,), '(1, 2)'),
+)
+
+
+@pytest.mark.parametrize('value, expected', TEST_CASES)
+def test_check_type_str(value, expected):
+ assert expected == check_type_str(value)
+
+
+@pytest.mark.parametrize('value, expected', TEST_CASES[1:])
+def test_check_type_str_no_conversion(value, expected):
+ with pytest.raises(TypeError) as e:
+ check_type_str(value, allow_conversion=False)
+ assert 'is not a string and conversion is not allowed' in to_native(e.value)
diff --git a/test/units/module_utils/common/validation/test_count_terms.py b/test/units/module_utils/common/validation/test_count_terms.py
new file mode 100644
index 00000000..f41dc40d
--- /dev/null
+++ b/test/units/module_utils/common/validation/test_count_terms.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible.module_utils.common.validation import count_terms
+
+
+@pytest.fixture
+def params():
+ return {
+ 'name': 'bob',
+ 'dest': '/etc/hosts',
+ 'state': 'present',
+ 'value': 5,
+ }
+
+
+def test_count_terms(params):
+ check = set(('name', 'dest'))
+ assert count_terms(check, params) == 2
+
+
+def test_count_terms_str_input(params):
+ check = 'name'
+ assert count_terms(check, params) == 1
+
+
+def test_count_terms_tuple_input(params):
+ check = ('name', 'dest')
+ assert count_terms(check, params) == 2
+
+
+def test_count_terms_list_input(params):
+ check = ['name', 'dest']
+ assert count_terms(check, params) == 2
diff --git a/test/units/module_utils/common/warnings/test_deprecate.py b/test/units/module_utils/common/warnings/test_deprecate.py
new file mode 100644
index 00000000..42046bfe
--- /dev/null
+++ b/test/units/module_utils/common/warnings/test_deprecate.py
@@ -0,0 +1,96 @@
+# -*- coding: utf-8 -*-
+# (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+import ansible.module_utils.common.warnings as warnings
+
+from ansible.module_utils.common.warnings import deprecate, get_deprecation_messages
+from ansible.module_utils.six import PY3
+
+
+@pytest.fixture
+def deprecation_messages():
+ return [
+ {'msg': 'First deprecation', 'version': None, 'collection_name': None},
+ {'msg': 'Second deprecation', 'version': None, 'collection_name': 'ansible.builtin'},
+ {'msg': 'Third deprecation', 'version': '2.14', 'collection_name': None},
+ {'msg': 'Fourth deprecation', 'version': '2.9', 'collection_name': None},
+ {'msg': 'Fifth deprecation', 'version': '2.9', 'collection_name': 'ansible.builtin'},
+ {'msg': 'Sixth deprecation', 'date': '2199-12-31', 'collection_name': None},
+ {'msg': 'Seventh deprecation', 'date': '2199-12-31', 'collection_name': 'ansible.builtin'},
+ ]
+
+
+def test_deprecate_message_only():
+ deprecate('Deprecation message')
+ assert warnings._global_deprecations == [
+ {'msg': 'Deprecation message', 'version': None, 'collection_name': None}]
+
+
+def test_deprecate_with_collection():
+ deprecate(msg='Deprecation message', collection_name='ansible.builtin')
+ assert warnings._global_deprecations == [
+ {'msg': 'Deprecation message', 'version': None, 'collection_name': 'ansible.builtin'}]
+
+
+def test_deprecate_with_version():
+ deprecate(msg='Deprecation message', version='2.14')
+ assert warnings._global_deprecations == [
+ {'msg': 'Deprecation message', 'version': '2.14', 'collection_name': None}]
+
+
+def test_deprecate_with_version_and_collection():
+ deprecate(msg='Deprecation message', version='2.14', collection_name='ansible.builtin')
+ assert warnings._global_deprecations == [
+ {'msg': 'Deprecation message', 'version': '2.14', 'collection_name': 'ansible.builtin'}]
+
+
+def test_deprecate_with_date():
+ deprecate(msg='Deprecation message', date='2199-12-31')
+ assert warnings._global_deprecations == [
+ {'msg': 'Deprecation message', 'date': '2199-12-31', 'collection_name': None}]
+
+
+def test_deprecate_with_date_and_collection():
+ deprecate(msg='Deprecation message', date='2199-12-31', collection_name='ansible.builtin')
+ assert warnings._global_deprecations == [
+ {'msg': 'Deprecation message', 'date': '2199-12-31', 'collection_name': 'ansible.builtin'}]
+
+
+def test_multiple_deprecations(deprecation_messages):
+ for d in deprecation_messages:
+ deprecate(**d)
+
+ assert deprecation_messages == warnings._global_deprecations
+
+
+def test_get_deprecation_messages(deprecation_messages):
+ for d in deprecation_messages:
+ deprecate(**d)
+
+ accessor_deprecations = get_deprecation_messages()
+ assert isinstance(accessor_deprecations, tuple)
+ assert len(accessor_deprecations) == 7
+
+
+@pytest.mark.parametrize(
+ 'test_case',
+ (
+ 1,
+ True,
+ [1],
+ {'k1': 'v1'},
+ (1, 2),
+ 6.62607004,
+ b'bytestr' if PY3 else None,
+ None,
+ )
+)
+def test_deprecate_failure(test_case):
+ with pytest.raises(TypeError, match='deprecate requires a string not a %s' % type(test_case)):
+ deprecate(test_case)
diff --git a/test/units/module_utils/common/warnings/test_warn.py b/test/units/module_utils/common/warnings/test_warn.py
new file mode 100644
index 00000000..020b0625
--- /dev/null
+++ b/test/units/module_utils/common/warnings/test_warn.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+# (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+import ansible.module_utils.common.warnings as warnings
+
+from ansible.module_utils.common.warnings import warn, get_warning_messages
+from ansible.module_utils.six import PY3
+
+
+@pytest.fixture
+def warning_messages():
+ return [
+ 'First warning',
+ 'Second warning',
+ 'Third warning',
+ ]
+
+
+def test_warn():
+ warn('Warning message')
+ assert warnings._global_warnings == ['Warning message']
+
+
+def test_multiple_warningss(warning_messages):
+ for w in warning_messages:
+ warn(w)
+
+ assert warning_messages == warnings._global_warnings
+
+
+def test_get_warning_messages(warning_messages):
+ for w in warning_messages:
+ warn(w)
+
+ accessor_warnings = get_warning_messages()
+ assert isinstance(accessor_warnings, tuple)
+ assert len(accessor_warnings) == 3
+
+
+@pytest.mark.parametrize(
+ 'test_case',
+ (
+ 1,
+ True,
+ [1],
+ {'k1': 'v1'},
+ (1, 2),
+ 6.62607004,
+ b'bytestr' if PY3 else None,
+ None,
+ )
+)
+def test_warn_failure(test_case):
+ with pytest.raises(TypeError, match='warn requires a string not a %s' % type(test_case)):
+ warn(test_case)
diff --git a/test/units/module_utils/conftest.py b/test/units/module_utils/conftest.py
new file mode 100644
index 00000000..8bc13c4d
--- /dev/null
+++ b/test/units/module_utils/conftest.py
@@ -0,0 +1,72 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import sys
+from io import BytesIO
+
+import pytest
+
+import ansible.module_utils.basic
+from ansible.module_utils.six import PY3, string_types
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.common._collections_compat import MutableMapping
+
+
+@pytest.fixture
+def stdin(mocker, request):
+ old_args = ansible.module_utils.basic._ANSIBLE_ARGS
+ ansible.module_utils.basic._ANSIBLE_ARGS = None
+ old_argv = sys.argv
+ sys.argv = ['ansible_unittest']
+
+ if isinstance(request.param, string_types):
+ args = request.param
+ elif isinstance(request.param, MutableMapping):
+ if 'ANSIBLE_MODULE_ARGS' not in request.param:
+ request.param = {'ANSIBLE_MODULE_ARGS': request.param}
+ if '_ansible_remote_tmp' not in request.param['ANSIBLE_MODULE_ARGS']:
+ request.param['ANSIBLE_MODULE_ARGS']['_ansible_remote_tmp'] = '/tmp'
+ if '_ansible_keep_remote_files' not in request.param['ANSIBLE_MODULE_ARGS']:
+ request.param['ANSIBLE_MODULE_ARGS']['_ansible_keep_remote_files'] = False
+ args = json.dumps(request.param)
+ else:
+ raise Exception('Malformed data to the stdin pytest fixture')
+
+ fake_stdin = BytesIO(to_bytes(args, errors='surrogate_or_strict'))
+ if PY3:
+ mocker.patch('ansible.module_utils.basic.sys.stdin', mocker.MagicMock())
+ mocker.patch('ansible.module_utils.basic.sys.stdin.buffer', fake_stdin)
+ else:
+ mocker.patch('ansible.module_utils.basic.sys.stdin', fake_stdin)
+
+ yield fake_stdin
+
+ ansible.module_utils.basic._ANSIBLE_ARGS = old_args
+ sys.argv = old_argv
+
+
+@pytest.fixture
+def am(stdin, request):
+ old_args = ansible.module_utils.basic._ANSIBLE_ARGS
+ ansible.module_utils.basic._ANSIBLE_ARGS = None
+ old_argv = sys.argv
+ sys.argv = ['ansible_unittest']
+
+ argspec = {}
+ if hasattr(request, 'param'):
+ if isinstance(request.param, dict):
+ argspec = request.param
+
+ am = ansible.module_utils.basic.AnsibleModule(
+ argument_spec=argspec,
+ )
+ am._name = 'ansible_unittest'
+
+ yield am
+
+ ansible.module_utils.basic._ANSIBLE_ARGS = old_args
+ sys.argv = old_argv
diff --git a/test/units/module_utils/facts/__init__.py b/test/units/module_utils/facts/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/module_utils/facts/__init__.py
diff --git a/test/units/module_utils/facts/base.py b/test/units/module_utils/facts/base.py
new file mode 100644
index 00000000..33d3087b
--- /dev/null
+++ b/test/units/module_utils/facts/base.py
@@ -0,0 +1,65 @@
+# base unit test classes for ansible/module_utils/facts/ tests
+# -*- coding: utf-8 -*-
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest
+from units.compat.mock import Mock, patch
+
+
+class BaseFactsTest(unittest.TestCase):
+ # just a base class, not an actual test
+ __test__ = False
+
+ gather_subset = ['all']
+ valid_subsets = None
+ fact_namespace = None
+ collector_class = None
+
+ # a dict ansible_facts. Some fact collectors depend on facts gathered by
+ # other collectors (like 'ansible_architecture' or 'ansible_system') which
+ # can be passed via the collected_facts arg to collect()
+ collected_facts = None
+
+ def _mock_module(self):
+ mock_module = Mock()
+ mock_module.params = {'gather_subset': self.gather_subset,
+ 'gather_timeout': 5,
+ 'filter': '*'}
+ mock_module.get_bin_path = Mock(return_value=None)
+ return mock_module
+
+ @patch('platform.system', return_value='Linux')
+ @patch('ansible.module_utils.facts.system.service_mgr.get_file_content', return_value='systemd')
+ def test_collect(self, mock_gfc, mock_ps):
+ module = self._mock_module()
+ fact_collector = self.collector_class()
+ facts_dict = fact_collector.collect(module=module, collected_facts=self.collected_facts)
+ self.assertIsInstance(facts_dict, dict)
+ return facts_dict
+
+ @patch('platform.system', return_value='Linux')
+ @patch('ansible.module_utils.facts.system.service_mgr.get_file_content', return_value='systemd')
+ def test_collect_with_namespace(self, mock_gfc, mock_ps):
+ module = self._mock_module()
+ fact_collector = self.collector_class()
+ facts_dict = fact_collector.collect_with_namespace(module=module,
+ collected_facts=self.collected_facts)
+ self.assertIsInstance(facts_dict, dict)
+ return facts_dict
diff --git a/test/units/module_utils/facts/fixtures/cpuinfo/aarch64-4cpu-cpuinfo b/test/units/module_utils/facts/fixtures/cpuinfo/aarch64-4cpu-cpuinfo
new file mode 100644
index 00000000..c3caa01c
--- /dev/null
+++ b/test/units/module_utils/facts/fixtures/cpuinfo/aarch64-4cpu-cpuinfo
@@ -0,0 +1,40 @@
+processor : 0
+Processor : AArch64 Processor rev 4 (aarch64)
+Hardware : sun50iw1p1
+BogoMIPS : 48.00
+Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 cpuid
+CPU implementer : 0x41
+CPU architecture: 8
+CPU variant : 0x0
+CPU part : 0xd03
+CPU revision : 4
+processor : 1
+Processor : AArch64 Processor rev 4 (aarch64)
+Hardware : sun50iw1p1
+BogoMIPS : 48.00
+Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 cpuid
+CPU implementer : 0x41
+CPU architecture: 8
+CPU variant : 0x0
+CPU part : 0xd03
+CPU revision : 4
+processor : 2
+Processor : AArch64 Processor rev 4 (aarch64)
+Hardware : sun50iw1p1
+BogoMIPS : 48.00
+Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 cpuid
+CPU implementer : 0x41
+CPU architecture: 8
+CPU variant : 0x0
+CPU part : 0xd03
+CPU revision : 4
+processor : 3
+Processor : AArch64 Processor rev 4 (aarch64)
+Hardware : sun50iw1p1
+BogoMIPS : 48.00
+Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 cpuid
+CPU implementer : 0x41
+CPU architecture: 8
+CPU variant : 0x0
+CPU part : 0xd03
+CPU revision : 4
diff --git a/test/units/module_utils/facts/fixtures/cpuinfo/arm64-4cpu-cpuinfo b/test/units/module_utils/facts/fixtures/cpuinfo/arm64-4cpu-cpuinfo
new file mode 100644
index 00000000..38fd06e7
--- /dev/null
+++ b/test/units/module_utils/facts/fixtures/cpuinfo/arm64-4cpu-cpuinfo
@@ -0,0 +1,32 @@
+processor : 0
+BogoMIPS : 48.00
+Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 cpuid
+CPU implementer : 0x41
+CPU architecture: 8
+CPU variant : 0x0
+CPU part : 0xd03
+CPU revision : 4
+processor : 1
+BogoMIPS : 48.00
+Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 cpuid
+CPU implementer : 0x41
+CPU architecture: 8
+CPU variant : 0x0
+CPU part : 0xd03
+CPU revision : 4
+processor : 2
+BogoMIPS : 48.00
+Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 cpuid
+CPU implementer : 0x41
+CPU architecture: 8
+CPU variant : 0x0
+CPU part : 0xd03
+CPU revision : 4
+processor : 3
+BogoMIPS : 48.00
+Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 cpuid
+CPU implementer : 0x41
+CPU architecture: 8
+CPU variant : 0x0
+CPU part : 0xd03
+CPU revision : 4
diff --git a/test/units/module_utils/facts/fixtures/cpuinfo/armv6-rev7-1cpu-cpuinfo b/test/units/module_utils/facts/fixtures/cpuinfo/armv6-rev7-1cpu-cpuinfo
new file mode 100644
index 00000000..84ee16c9
--- /dev/null
+++ b/test/units/module_utils/facts/fixtures/cpuinfo/armv6-rev7-1cpu-cpuinfo
@@ -0,0 +1,12 @@
+processor : 0
+model name : ARMv6-compatible processor rev 7 (v6l)
+BogoMIPS : 697.95
+Features : half thumb fastmult vfp edsp java tls
+CPU implementer : 0x41
+CPU architecture: 7
+CPU variant : 0x0
+CPU part : 0xb76
+CPU revision : 7
+Hardware : BCM2835
+Revision : 0010
+Serial : 000000004a0abca9
diff --git a/test/units/module_utils/facts/fixtures/cpuinfo/armv7-rev3-8cpu-cpuinfo b/test/units/module_utils/facts/fixtures/cpuinfo/armv7-rev3-8cpu-cpuinfo
new file mode 100644
index 00000000..d4b4d3b6
--- /dev/null
+++ b/test/units/module_utils/facts/fixtures/cpuinfo/armv7-rev3-8cpu-cpuinfo
@@ -0,0 +1,75 @@
+processor : 0
+model name : ARMv7 Processor rev 3 (v7l)
+BogoMIPS : 12.00
+Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae
+CPU implementer : 0x41
+CPU architecture: 7
+CPU variant : 0x0
+CPU part : 0xc07
+CPU revision : 3
+processor : 1
+model name : ARMv7 Processor rev 3 (v7l)
+BogoMIPS : 12.00
+Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae
+CPU implementer : 0x41
+CPU architecture: 7
+CPU variant : 0x0
+CPU part : 0xc07
+CPU revision : 3
+processor : 2
+model name : ARMv7 Processor rev 3 (v7l)
+BogoMIPS : 12.00
+Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae
+CPU implementer : 0x41
+CPU architecture: 7
+CPU variant : 0x0
+CPU part : 0xc07
+CPU revision : 3
+processor : 3
+model name : ARMv7 Processor rev 3 (v7l)
+BogoMIPS : 12.00
+Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae
+CPU implementer : 0x41
+CPU architecture: 7
+CPU variant : 0x0
+CPU part : 0xc07
+CPU revision : 3
+processor : 4
+model name : ARMv7 Processor rev 3 (v7l)
+BogoMIPS : 120.00
+Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae
+CPU implementer : 0x41
+CPU architecture: 7
+CPU variant : 0x2
+CPU part : 0xc0f
+CPU revision : 3
+processor : 5
+model name : ARMv7 Processor rev 3 (v7l)
+BogoMIPS : 120.00
+Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae
+CPU implementer : 0x41
+CPU architecture: 7
+CPU variant : 0x2
+CPU part : 0xc0f
+CPU revision : 3
+processor : 6
+model name : ARMv7 Processor rev 3 (v7l)
+BogoMIPS : 120.00
+Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae
+CPU implementer : 0x41
+CPU architecture: 7
+CPU variant : 0x2
+CPU part : 0xc0f
+CPU revision : 3
+processor : 7
+model name : ARMv7 Processor rev 3 (v7l)
+BogoMIPS : 120.00
+Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae
+CPU implementer : 0x41
+CPU architecture: 7
+CPU variant : 0x2
+CPU part : 0xc0f
+CPU revision : 3
+Hardware : ODROID-XU4
+Revision : 0100
+Serial : 0000000000000000
diff --git a/test/units/module_utils/facts/fixtures/cpuinfo/armv7-rev4-4cpu-cpuinfo b/test/units/module_utils/facts/fixtures/cpuinfo/armv7-rev4-4cpu-cpuinfo
new file mode 100644
index 00000000..f36075c2
--- /dev/null
+++ b/test/units/module_utils/facts/fixtures/cpuinfo/armv7-rev4-4cpu-cpuinfo
@@ -0,0 +1,39 @@
+processor : 0
+model name : ARMv7 Processor rev 4 (v7l)
+BogoMIPS : 38.40
+Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae evtstrm crc32
+CPU implementer : 0x41
+CPU architecture: 7
+CPU variant : 0x0
+CPU part : 0xd03
+CPU revision : 4
+processor : 1
+model name : ARMv7 Processor rev 4 (v7l)
+BogoMIPS : 38.40
+Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae evtstrm crc32
+CPU implementer : 0x41
+CPU architecture: 7
+CPU variant : 0x0
+CPU part : 0xd03
+CPU revision : 4
+processor : 2
+model name : ARMv7 Processor rev 4 (v7l)
+BogoMIPS : 38.40
+Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae evtstrm crc32
+CPU implementer : 0x41
+CPU architecture: 7
+CPU variant : 0x0
+CPU part : 0xd03
+CPU revision : 4
+processor : 3
+model name : ARMv7 Processor rev 4 (v7l)
+BogoMIPS : 38.40
+Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4 idiva idivt vfpd32 lpae evtstrm crc32
+CPU implementer : 0x41
+CPU architecture: 7
+CPU variant : 0x0
+CPU part : 0xd03
+CPU revision : 4
+Hardware : BCM2835
+Revision : a02082
+Serial : 000000007881bb80
diff --git a/test/units/module_utils/facts/fixtures/cpuinfo/ppc64-power7-rhel7-8cpu-cpuinfo b/test/units/module_utils/facts/fixtures/cpuinfo/ppc64-power7-rhel7-8cpu-cpuinfo
new file mode 100644
index 00000000..1309c585
--- /dev/null
+++ b/test/units/module_utils/facts/fixtures/cpuinfo/ppc64-power7-rhel7-8cpu-cpuinfo
@@ -0,0 +1,44 @@
+processor : 0
+cpu : POWER7 (architected), altivec supported
+clock : 3550.000000MHz
+revision : 2.1 (pvr 003f 0201)
+
+processor : 1
+cpu : POWER7 (architected), altivec supported
+clock : 3550.000000MHz
+revision : 2.1 (pvr 003f 0201)
+
+processor : 2
+cpu : POWER7 (architected), altivec supported
+clock : 3550.000000MHz
+revision : 2.1 (pvr 003f 0201)
+
+processor : 3
+cpu : POWER7 (architected), altivec supported
+clock : 3550.000000MHz
+revision : 2.1 (pvr 003f 0201)
+
+processor : 4
+cpu : POWER7 (architected), altivec supported
+clock : 3550.000000MHz
+revision : 2.1 (pvr 003f 0201)
+
+processor : 5
+cpu : POWER7 (architected), altivec supported
+clock : 3550.000000MHz
+revision : 2.1 (pvr 003f 0201)
+
+processor : 6
+cpu : POWER7 (architected), altivec supported
+clock : 3550.000000MHz
+revision : 2.1 (pvr 003f 0201)
+
+processor : 7
+cpu : POWER7 (architected), altivec supported
+clock : 3550.000000MHz
+revision : 2.1 (pvr 003f 0201)
+
+timebase : 512000000
+platform : pSeries
+model : IBM,8231-E2B
+machine : CHRP IBM,8231-E2B \ No newline at end of file
diff --git a/test/units/module_utils/facts/fixtures/cpuinfo/ppc64le-power8-24cpu-cpuinfo b/test/units/module_utils/facts/fixtures/cpuinfo/ppc64le-power8-24cpu-cpuinfo
new file mode 100644
index 00000000..4cbd5ac0
--- /dev/null
+++ b/test/units/module_utils/facts/fixtures/cpuinfo/ppc64le-power8-24cpu-cpuinfo
@@ -0,0 +1,125 @@
+processor : 0
+cpu : POWER8 (architected), altivec supported
+clock : 3425.000000MHz
+revision : 2.1 (pvr 004b 0201)
+
+processor : 1
+cpu : POWER8 (architected), altivec supported
+clock : 3425.000000MHz
+revision : 2.1 (pvr 004b 0201)
+
+processor : 2
+cpu : POWER8 (architected), altivec supported
+clock : 3425.000000MHz
+revision : 2.1 (pvr 004b 0201)
+
+processor : 3
+cpu : POWER8 (architected), altivec supported
+clock : 3425.000000MHz
+revision : 2.1 (pvr 004b 0201)
+
+processor : 4
+cpu : POWER8 (architected), altivec supported
+clock : 3425.000000MHz
+revision : 2.1 (pvr 004b 0201)
+
+processor : 5
+cpu : POWER8 (architected), altivec supported
+clock : 3425.000000MHz
+revision : 2.1 (pvr 004b 0201)
+
+processor : 6
+cpu : POWER8 (architected), altivec supported
+clock : 3425.000000MHz
+revision : 2.1 (pvr 004b 0201)
+
+processor : 7
+cpu : POWER8 (architected), altivec supported
+clock : 3425.000000MHz
+revision : 2.1 (pvr 004b 0201)
+
+processor : 8
+cpu : POWER8 (architected), altivec supported
+clock : 3425.000000MHz
+revision : 2.1 (pvr 004b 0201)
+
+processor : 9
+cpu : POWER8 (architected), altivec supported
+clock : 3425.000000MHz
+revision : 2.1 (pvr 004b 0201)
+
+processor : 10
+cpu : POWER8 (architected), altivec supported
+clock : 3425.000000MHz
+revision : 2.1 (pvr 004b 0201)
+
+processor : 11
+cpu : POWER8 (architected), altivec supported
+clock : 3425.000000MHz
+revision : 2.1 (pvr 004b 0201)
+
+processor : 12
+cpu : POWER8 (architected), altivec supported
+clock : 3425.000000MHz
+revision : 2.1 (pvr 004b 0201)
+
+processor : 13
+cpu : POWER8 (architected), altivec supported
+clock : 3425.000000MHz
+revision : 2.1 (pvr 004b 0201)
+
+processor : 14
+cpu : POWER8 (architected), altivec supported
+clock : 3425.000000MHz
+revision : 2.1 (pvr 004b 0201)
+
+processor : 15
+cpu : POWER8 (architected), altivec supported
+clock : 3425.000000MHz
+revision : 2.1 (pvr 004b 0201)
+
+processor : 16
+cpu : POWER8 (architected), altivec supported
+clock : 3425.000000MHz
+revision : 2.1 (pvr 004b 0201)
+
+processor : 17
+cpu : POWER8 (architected), altivec supported
+clock : 3425.000000MHz
+revision : 2.1 (pvr 004b 0201)
+
+processor : 18
+cpu : POWER8 (architected), altivec supported
+clock : 3425.000000MHz
+revision : 2.1 (pvr 004b 0201)
+
+processor : 19
+cpu : POWER8 (architected), altivec supported
+clock : 3425.000000MHz
+revision : 2.1 (pvr 004b 0201)
+
+processor : 20
+cpu : POWER8 (architected), altivec supported
+clock : 3425.000000MHz
+revision : 2.1 (pvr 004b 0201)
+
+processor : 21
+cpu : POWER8 (architected), altivec supported
+clock : 3425.000000MHz
+revision : 2.1 (pvr 004b 0201)
+
+processor : 22
+cpu : POWER8 (architected), altivec supported
+clock : 3425.000000MHz
+revision : 2.1 (pvr 004b 0201)
+
+processor : 23
+cpu : POWER8 (architected), altivec supported
+clock : 3425.000000MHz
+revision : 2.1 (pvr 004b 0201)
+
+timebase : 512000000
+platform : pSeries
+model : IBM,8247-21L
+machine : CHRP IBM,8247-21L
+
diff --git a/test/units/module_utils/facts/fixtures/cpuinfo/sparc-t5-debian-ldom-24vcpu b/test/units/module_utils/facts/fixtures/cpuinfo/sparc-t5-debian-ldom-24vcpu
new file mode 100644
index 00000000..8c29faa4
--- /dev/null
+++ b/test/units/module_utils/facts/fixtures/cpuinfo/sparc-t5-debian-ldom-24vcpu
@@ -0,0 +1,61 @@
+cpu : UltraSparc T5 (Niagara5)
+fpu : UltraSparc T5 integrated FPU
+pmu : niagara5
+prom : OBP 4.38.12 2018/03/28 14:54
+type : sun4v
+ncpus probed : 24
+ncpus active : 24
+D$ parity tl1 : 0
+I$ parity tl1 : 0
+cpucaps : flush,stbar,swap,muldiv,v9,blkinit,n2,mul32,div32,v8plus,popc,vis,vis2,ASIBlkInit,fmaf,vis3,hpc,ima,pause,cbcond,aes,des,kasumi,camellia,md5,sha1,sha256,sha512,mpmul,montmul,montsqr,crc32c
+Cpu0ClkTck : 00000000d6924470
+Cpu1ClkTck : 00000000d6924470
+Cpu2ClkTck : 00000000d6924470
+Cpu3ClkTck : 00000000d6924470
+Cpu4ClkTck : 00000000d6924470
+Cpu5ClkTck : 00000000d6924470
+Cpu6ClkTck : 00000000d6924470
+Cpu7ClkTck : 00000000d6924470
+Cpu8ClkTck : 00000000d6924470
+Cpu9ClkTck : 00000000d6924470
+Cpu10ClkTck : 00000000d6924470
+Cpu11ClkTck : 00000000d6924470
+Cpu12ClkTck : 00000000d6924470
+Cpu13ClkTck : 00000000d6924470
+Cpu14ClkTck : 00000000d6924470
+Cpu15ClkTck : 00000000d6924470
+Cpu16ClkTck : 00000000d6924470
+Cpu17ClkTck : 00000000d6924470
+Cpu18ClkTck : 00000000d6924470
+Cpu19ClkTck : 00000000d6924470
+Cpu20ClkTck : 00000000d6924470
+Cpu21ClkTck : 00000000d6924470
+Cpu22ClkTck : 00000000d6924470
+Cpu23ClkTck : 00000000d6924470
+MMU Type : Hypervisor (sun4v)
+MMU PGSZs : 8K,64K,4MB,256MB
+State:
+CPU0: online
+CPU1: online
+CPU2: online
+CPU3: online
+CPU4: online
+CPU5: online
+CPU6: online
+CPU7: online
+CPU8: online
+CPU9: online
+CPU10: online
+CPU11: online
+CPU12: online
+CPU13: online
+CPU14: online
+CPU15: online
+CPU16: online
+CPU17: online
+CPU18: online
+CPU19: online
+CPU20: online
+CPU21: online
+CPU22: online
+CPU23: online
diff --git a/test/units/module_utils/facts/fixtures/cpuinfo/x86_64-2cpu-cpuinfo b/test/units/module_utils/facts/fixtures/cpuinfo/x86_64-2cpu-cpuinfo
new file mode 100644
index 00000000..1d233f8d
--- /dev/null
+++ b/test/units/module_utils/facts/fixtures/cpuinfo/x86_64-2cpu-cpuinfo
@@ -0,0 +1,56 @@
+processor : 0
+vendor_id : GenuineIntel
+cpu family : 6
+model : 62
+model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz
+stepping : 4
+microcode : 0x1
+cpu MHz : 2799.998
+cache size : 16384 KB
+physical id : 0
+siblings : 1
+core id : 0
+cpu cores : 1
+apicid : 0
+initial apicid : 0
+fpu : yes
+fpu_exception : yes
+cpuid level : 13
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss syscall nx pdpe1gb rdtscp l'
+m constant_tsc arch_perfmon rep_good nopl xtopology cpuid tsc_known_freq pni pclmulqdq ssse3 cx16 pcid sse4_1 sse4_2 x2apic popcnt tsc_deadlin'
+e_timer aes xsave avx f16c rdrand hypervisor lahf_lm pti fsgsbase tsc_adjust smep erms xsaveopt arat
+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf
+bogomips : 5602.32
+clflush size : 64
+cache_alignment : 64
+address sizes : 40 bits physical, 48 bits virtual
+power management:
+processor : 1
+vendor_id : GenuineIntel
+cpu family : 6
+model : 62
+model name : Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz
+stepping : 4
+microcode : 0x1
+cpu MHz : 2799.998
+cache size : 16384 KB
+physical id : 1
+siblings : 1
+core id : 0
+cpu cores : 1
+apicid : 1
+initial apicid : 1
+fpu : yes
+fpu_exception : yes
+cpuid level : 13
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss syscall nx pdpe1gb rdtscp l'
+m constant_tsc arch_perfmon rep_good nopl xtopology cpuid tsc_known_freq pni pclmulqdq ssse3 cx16 pcid sse4_1 sse4_2 x2apic popcnt tsc_deadlin'
+e_timer aes xsave avx f16c rdrand hypervisor lahf_lm pti fsgsbase tsc_adjust smep erms xsaveopt arat
+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf
+bogomips : 5602.32
+clflush size : 64
+cache_alignment : 64
+address sizes : 40 bits physical, 48 bits virtual
+power management:
diff --git a/test/units/module_utils/facts/fixtures/cpuinfo/x86_64-4cpu-cpuinfo b/test/units/module_utils/facts/fixtures/cpuinfo/x86_64-4cpu-cpuinfo
new file mode 100644
index 00000000..fcc396db
--- /dev/null
+++ b/test/units/module_utils/facts/fixtures/cpuinfo/x86_64-4cpu-cpuinfo
@@ -0,0 +1,104 @@
+processor : 0
+vendor_id : AuthenticAMD
+cpu family : 15
+model : 65
+model name : Dual-Core AMD Opteron(tm) Processor 2216
+stepping : 2
+cpu MHz : 1000.000
+cache size : 1024 KB
+physical id : 0
+siblings : 2
+core id : 0
+cpu cores : 2
+apicid : 0
+initial apicid : 0
+fpu : yes
+fpu_exception : yes
+cpuid level : 1
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt '
+rdtscp lm 3dnowext 3dnow art rep_good nopl extd_apicid pni cx16 lahf_lm cmp_legacy svm extapic cr8_legacy retpoline_amd vmmcall
+bogomips : 1994.60
+TLB size : 1024 4K pages
+clflush size : 64
+cache_alignment : 64
+address sizes : 40 bits physical, 48 bits virtual
+power management: ts fid vid ttp tm stc
+processor : 1
+vendor_id : AuthenticAMD
+cpu family : 15
+model : 65
+model name : Dual-Core AMD Opteron(tm) Processor 2216
+stepping : 2
+cpu MHz : 1000.000
+cache size : 1024 KB
+physical id : 0
+siblings : 2
+core id : 1
+cpu cores : 2
+apicid : 1
+initial apicid : 1
+fpu : yes
+fpu_exception : yes
+cpuid level : 1
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt '
+rdtscp lm 3dnowext 3dnow art rep_good nopl extd_apicid pni cx16 lahf_lm cmp_legacy svm extapic cr8_legacy retpoline_amd vmmcall
+bogomips : 1994.60
+TLB size : 1024 4K pages
+clflush size : 64
+cache_alignment : 64
+address sizes : 40 bits physical, 48 bits virtual
+power management: ts fid vid ttp tm stc
+processor : 2
+vendor_id : AuthenticAMD
+cpu family : 15
+model : 65
+model name : Dual-Core AMD Opteron(tm) Processor 2216
+stepping : 2
+cpu MHz : 1000.000
+cache size : 1024 KB
+physical id : 1
+siblings : 2
+core id : 0
+cpu cores : 2
+apicid : 2
+initial apicid : 2
+fpu : yes
+fpu_exception : yes
+cpuid level : 1
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt '
+rdtscp lm 3dnowext 3dnow art rep_good nopl extd_apicid pni cx16 lahf_lm cmp_legacy svm extapic cr8_legacy retpoline_amd vmmcall
+bogomips : 1994.60
+TLB size : 1024 4K pages
+clflush size : 64
+cache_alignment : 64
+address sizes : 40 bits physical, 48 bits virtual
+power management: ts fid vid ttp tm stc
+processor : 3
+vendor_id : AuthenticAMD
+cpu family : 15
+model : 65
+model name : Dual-Core AMD Opteron(tm) Processor 2216
+stepping : 2
+cpu MHz : 1000.000
+cache size : 1024 KB
+physical id : 1
+siblings : 2
+core id : 1
+cpu cores : 2
+apicid : 3
+initial apicid : 3
+fpu : yes
+fpu_exception : yes
+cpuid level : 1
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt '
+rdtscp lm 3dnowext 3dnow art rep_good nopl extd_apicid pni cx16 lahf_lm cmp_legacy svm extapic cr8_legacy retpoline_amd vmmcall
+bogomips : 1994.60
+TLB size : 1024 4K pages
+clflush size : 64
+cache_alignment : 64
+address sizes : 40 bits physical, 48 bits virtual
+power management: ts fid vid ttp tm stc
diff --git a/test/units/module_utils/facts/fixtures/cpuinfo/x86_64-8cpu-cpuinfo b/test/units/module_utils/facts/fixtures/cpuinfo/x86_64-8cpu-cpuinfo
new file mode 100644
index 00000000..63abea2c
--- /dev/null
+++ b/test/units/module_utils/facts/fixtures/cpuinfo/x86_64-8cpu-cpuinfo
@@ -0,0 +1,216 @@
+processor : 0
+vendor_id : GenuineIntel
+cpu family : 6
+model : 60
+model name : Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz
+stepping : 3
+microcode : 0x20
+cpu MHz : 2703.625
+cache size : 6144 KB
+physical id : 0
+siblings : 8
+core id : 0
+cpu cores : 4
+apicid : 0
+initial apicid : 0
+fpu : yes
+fpu_exception : yes
+cpuid level : 13
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts
+bugs :
+bogomips : 5388.06
+clflush size : 64
+cache_alignment : 64
+address sizes : 39 bits physical, 48 bits virtual
+power management:
+
+processor : 1
+vendor_id : GenuineIntel
+cpu family : 6
+model : 60
+model name : Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz
+stepping : 3
+microcode : 0x20
+cpu MHz : 3398.565
+cache size : 6144 KB
+physical id : 0
+siblings : 8
+core id : 0
+cpu cores : 4
+apicid : 1
+initial apicid : 1
+fpu : yes
+fpu_exception : yes
+cpuid level : 13
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts
+bugs :
+bogomips : 5393.53
+clflush size : 64
+cache_alignment : 64
+address sizes : 39 bits physical, 48 bits virtual
+power management:
+
+processor : 2
+vendor_id : GenuineIntel
+cpu family : 6
+model : 60
+model name : Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz
+stepping : 3
+microcode : 0x20
+cpu MHz : 3390.325
+cache size : 6144 KB
+physical id : 0
+siblings : 8
+core id : 1
+cpu cores : 4
+apicid : 2
+initial apicid : 2
+fpu : yes
+fpu_exception : yes
+cpuid level : 13
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts
+bugs :
+bogomips : 5391.63
+clflush size : 64
+cache_alignment : 64
+address sizes : 39 bits physical, 48 bits virtual
+power management:
+
+processor : 3
+vendor_id : GenuineIntel
+cpu family : 6
+model : 60
+model name : Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz
+stepping : 3
+microcode : 0x20
+cpu MHz : 3262.774
+cache size : 6144 KB
+physical id : 0
+siblings : 8
+core id : 1
+cpu cores : 4
+apicid : 3
+initial apicid : 3
+fpu : yes
+fpu_exception : yes
+cpuid level : 13
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts
+bugs :
+bogomips : 5392.08
+clflush size : 64
+cache_alignment : 64
+address sizes : 39 bits physical, 48 bits virtual
+power management:
+
+processor : 4
+vendor_id : GenuineIntel
+cpu family : 6
+model : 60
+model name : Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz
+stepping : 3
+microcode : 0x20
+cpu MHz : 2905.169
+cache size : 6144 KB
+physical id : 0
+siblings : 8
+core id : 2
+cpu cores : 4
+apicid : 4
+initial apicid : 4
+fpu : yes
+fpu_exception : yes
+cpuid level : 13
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts
+bugs :
+bogomips : 5391.97
+clflush size : 64
+cache_alignment : 64
+address sizes : 39 bits physical, 48 bits virtual
+power management:
+
+processor : 5
+vendor_id : GenuineIntel
+cpu family : 6
+model : 60
+model name : Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz
+stepping : 3
+microcode : 0x20
+cpu MHz : 1834.826
+cache size : 6144 KB
+physical id : 0
+siblings : 8
+core id : 2
+cpu cores : 4
+apicid : 5
+initial apicid : 5
+fpu : yes
+fpu_exception : yes
+cpuid level : 13
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts
+bugs :
+bogomips : 5392.11
+clflush size : 64
+cache_alignment : 64
+address sizes : 39 bits physical, 48 bits virtual
+power management:
+
+processor : 6
+vendor_id : GenuineIntel
+cpu family : 6
+model : 60
+model name : Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz
+stepping : 3
+microcode : 0x20
+cpu MHz : 2781.573
+cache size : 6144 KB
+physical id : 0
+siblings : 8
+core id : 3
+cpu cores : 4
+apicid : 6
+initial apicid : 6
+fpu : yes
+fpu_exception : yes
+cpuid level : 13
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts
+bugs :
+bogomips : 5391.98
+clflush size : 64
+cache_alignment : 64
+address sizes : 39 bits physical, 48 bits virtual
+power management:
+
+processor : 7
+vendor_id : GenuineIntel
+cpu family : 6
+model : 60
+model name : Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz
+stepping : 3
+microcode : 0x20
+cpu MHz : 3593.353
+cache size : 6144 KB
+physical id : 0
+siblings : 8
+core id : 3
+cpu cores : 4
+apicid : 7
+initial apicid : 7
+fpu : yes
+fpu_exception : yes
+cpuid level : 13
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt dtherm ida arat pln pts
+bugs :
+bogomips : 5392.07
+clflush size : 64
+cache_alignment : 64
+address sizes : 39 bits physical, 48 bits virtual
+power management:
+
diff --git a/test/units/module_utils/facts/fixtures/distribution_files/ClearLinux b/test/units/module_utils/facts/fixtures/distribution_files/ClearLinux
new file mode 100644
index 00000000..a5442de4
--- /dev/null
+++ b/test/units/module_utils/facts/fixtures/distribution_files/ClearLinux
@@ -0,0 +1,10 @@
+NAME="Clear Linux OS"
+VERSION=1
+ID=clear-linux-os
+ID_LIKE=clear-linux-os
+VERSION_ID=28120
+PRETTY_NAME="Clear Linux OS"
+ANSI_COLOR="1;35"
+HOME_URL="https://clearlinux.org"
+SUPPORT_URL="https://clearlinux.org"
+BUG_REPORT_URL="mailto:dev@lists.clearlinux.org"',
diff --git a/test/units/module_utils/facts/fixtures/distribution_files/CoreOS b/test/units/module_utils/facts/fixtures/distribution_files/CoreOS
new file mode 100644
index 00000000..806ce306
--- /dev/null
+++ b/test/units/module_utils/facts/fixtures/distribution_files/CoreOS
@@ -0,0 +1,10 @@
+NAME="Container Linux by CoreOS"
+ID=coreos
+VERSION=1911.5.0
+VERSION_ID=1911.5.0
+BUILD_ID=2018-12-15-2317
+PRETTY_NAME="Container Linux by CoreOS 1911.5.0 (Rhyolite)"
+ANSI_COLOR="38;5;75"
+HOME_URL="https://coreos.com/"
+BUG_REPORT_URL="https://issues.coreos.com"
+COREOS_BOARD="amd64-usr"
diff --git a/test/units/module_utils/facts/fixtures/distribution_files/LinuxMint b/test/units/module_utils/facts/fixtures/distribution_files/LinuxMint
new file mode 100644
index 00000000..850f6b78
--- /dev/null
+++ b/test/units/module_utils/facts/fixtures/distribution_files/LinuxMint
@@ -0,0 +1,12 @@
+NAME="Linux Mint"
+VERSION="19.1 (Tessa)"
+ID=linuxmint
+ID_LIKE=ubuntu
+PRETTY_NAME="Linux Mint 19.1"
+VERSION_ID="19.1"
+HOME_URL="https://www.linuxmint.com/"
+SUPPORT_URL="https://forums.ubuntu.com/"
+BUG_REPORT_URL="http://linuxmint-troubleshooting-guide.readthedocs.io/en/latest/"
+PRIVACY_POLICY_URL="https://www.linuxmint.com/"
+VERSION_CODENAME=tessa
+UBUNTU_CODENAME=bionic
diff --git a/test/units/module_utils/facts/fixtures/distribution_files/Slackware b/test/units/module_utils/facts/fixtures/distribution_files/Slackware
new file mode 100644
index 00000000..1147d297
--- /dev/null
+++ b/test/units/module_utils/facts/fixtures/distribution_files/Slackware
@@ -0,0 +1 @@
+Slackware 14.1
diff --git a/test/units/module_utils/facts/fixtures/distribution_files/SlackwareCurrent b/test/units/module_utils/facts/fixtures/distribution_files/SlackwareCurrent
new file mode 100644
index 00000000..62c046c8
--- /dev/null
+++ b/test/units/module_utils/facts/fixtures/distribution_files/SlackwareCurrent
@@ -0,0 +1 @@
+Slackware 14.2+
diff --git a/test/units/module_utils/facts/fixtures/findmount_output.txt b/test/units/module_utils/facts/fixtures/findmount_output.txt
new file mode 100644
index 00000000..299a2627
--- /dev/null
+++ b/test/units/module_utils/facts/fixtures/findmount_output.txt
@@ -0,0 +1,40 @@
+/sys sysfs sysfs rw,nosuid,nodev,noexec,relatime,seclabel
+/proc proc proc rw,nosuid,nodev,noexec,relatime
+/dev devtmpfs devtmpfs rw,nosuid,seclabel,size=8044400k,nr_inodes=2011100,mode=755
+/sys/kernel/security securityfs securityfs rw,nosuid,nodev,noexec,relatime
+/dev/shm tmpfs tmpfs rw,nosuid,nodev,seclabel
+/dev/pts devpts devpts rw,nosuid,noexec,relatime,seclabel,gid=5,mode=620,ptmxmode=000
+/run tmpfs tmpfs rw,nosuid,nodev,seclabel,mode=755
+/sys/fs/cgroup tmpfs tmpfs ro,nosuid,nodev,noexec,seclabel,mode=755
+/sys/fs/cgroup/systemd cgroup cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,na
+/sys/fs/pstore pstore pstore rw,nosuid,nodev,noexec,relatime,seclabel
+/sys/fs/cgroup/devices cgroup cgroup rw,nosuid,nodev,noexec,relatime,devices
+/sys/fs/cgroup/freezer cgroup cgroup rw,nosuid,nodev,noexec,relatime,freezer
+/sys/fs/cgroup/memory cgroup cgroup rw,nosuid,nodev,noexec,relatime,memory
+/sys/fs/cgroup/pids cgroup cgroup rw,nosuid,nodev,noexec,relatime,pids
+/sys/fs/cgroup/blkio cgroup cgroup rw,nosuid,nodev,noexec,relatime,blkio
+/sys/fs/cgroup/cpuset cgroup cgroup rw,nosuid,nodev,noexec,relatime,cpuset
+/sys/fs/cgroup/cpu,cpuacct cgroup cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct
+/sys/fs/cgroup/hugetlb cgroup cgroup rw,nosuid,nodev,noexec,relatime,hugetlb
+/sys/fs/cgroup/perf_event cgroup cgroup rw,nosuid,nodev,noexec,relatime,perf_event
+/sys/fs/cgroup/net_cls,net_prio cgroup cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio
+/sys/kernel/config configfs configfs rw,relatime
+/ /dev/mapper/fedora_dhcp129--186-root ext4 rw,relatime,seclabel,data=ordered
+/sys/fs/selinux selinuxfs selinuxfs rw,relatime
+/proc/sys/fs/binfmt_misc systemd-1 autofs rw,relatime,fd=24,pgrp=1,timeout=0,minproto=5,maxproto=5,direct
+/sys/kernel/debug debugfs debugfs rw,relatime,seclabel
+/dev/hugepages hugetlbfs hugetlbfs rw,relatime,seclabel
+/tmp tmpfs tmpfs rw,seclabel
+/dev/mqueue mqueue mqueue rw,relatime,seclabel
+/var/lib/machines /dev/loop0 btrfs rw,relatime,seclabel,space_cache,subvolid=5,subvol=/
+/boot /dev/sda1 ext4 rw,relatime,seclabel,data=ordered
+/home /dev/mapper/fedora_dhcp129--186-home ext4 rw,relatime,seclabel,data=ordered
+/run/user/1000 tmpfs tmpfs rw,nosuid,nodev,relatime,seclabel,size=1611044k,mode=700,uid=1000,gid=1000
+/run/user/1000/gvfs gvfsd-fuse fuse.gvfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000
+/sys/fs/fuse/connections fusectl fusectl rw,relatime
+/not/a/real/bind_mount /dev/sdz4[/some/other/path] ext4 rw,relatime,seclabel,data=ordered
+/home/adrian/sshfs-grimlock grimlock.g.a: fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000
+/home/adrian/sshfs-grimlock-single-quote grimlock.g.a:test_path/path_with'single_quotes
+ fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000
+/home/adrian/sshfs-grimlock-single-quote-2 grimlock.g.a:path_with'single_quotes fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000
+/home/adrian/fotos grimlock.g.a:/mnt/data/foto's fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000
diff --git a/test/units/module_utils/facts/hardware/__init__.py b/test/units/module_utils/facts/hardware/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/module_utils/facts/hardware/__init__.py
diff --git a/test/units/module_utils/facts/hardware/linux_data.py b/test/units/module_utils/facts/hardware/linux_data.py
new file mode 100644
index 00000000..8e056769
--- /dev/null
+++ b/test/units/module_utils/facts/hardware/linux_data.py
@@ -0,0 +1,585 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+LSBLK_OUTPUT = b"""
+/dev/sda
+/dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0
+/dev/sda2 66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK
+/dev/mapper/fedora_dhcp129--186-swap eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d
+/dev/mapper/fedora_dhcp129--186-root d34cf5e3-3449-4a6c-8179-a1feb2bca6ce
+/dev/mapper/fedora_dhcp129--186-home 2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d
+/dev/sr0
+/dev/loop0 0f031512-ab15-497d-9abd-3a512b4a9390
+/dev/loop1 7c1b0f30-cf34-459f-9a70-2612f82b870a
+/dev/loop9 0f031512-ab15-497d-9abd-3a512b4a9390
+/dev/loop9 7c1b4444-cf34-459f-9a70-2612f82b870a
+/dev/mapper/docker-253:1-1050967-pool
+/dev/loop2
+/dev/mapper/docker-253:1-1050967-pool
+"""
+
+LSBLK_OUTPUT_2 = b"""
+/dev/sda
+/dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0
+/dev/sda2 66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK
+/dev/mapper/fedora_dhcp129--186-swap eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d
+/dev/mapper/fedora_dhcp129--186-root d34cf5e3-3449-4a6c-8179-a1feb2bca6ce
+/dev/mapper/fedora_dhcp129--186-home 2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d
+/dev/mapper/an-example-mapper with a space in the name 84639acb-013f-4d2f-9392-526a572b4373
+/dev/sr0
+/dev/loop0 0f031512-ab15-497d-9abd-3a512b4a9390
+"""
+
+LSBLK_UUIDS = {'/dev/sda1': '66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK'}
+
+UDEVADM_UUID = 'N/A'
+
+UDEVADM_OUTPUT = """
+UDEV_LOG=3
+DEVPATH=/devices/pci0000:00/0000:00:07.0/virtio2/block/vda/vda1
+MAJOR=252
+MINOR=1
+DEVNAME=/dev/vda1
+DEVTYPE=partition
+SUBSYSTEM=block
+MPATH_SBIN_PATH=/sbin
+ID_PATH=pci-0000:00:07.0-virtio-pci-virtio2
+ID_PART_TABLE_TYPE=dos
+ID_FS_UUID=57b1a3e7-9019-4747-9809-7ec52bba9179
+ID_FS_UUID_ENC=57b1a3e7-9019-4747-9809-7ec52bba9179
+ID_FS_VERSION=1.0
+ID_FS_TYPE=ext4
+ID_FS_USAGE=filesystem
+LVM_SBIN_PATH=/sbin
+DEVLINKS=/dev/block/252:1 /dev/disk/by-path/pci-0000:00:07.0-virtio-pci-virtio2-part1 /dev/disk/by-uuid/57b1a3e7-9019-4747-9809-7ec52bba9179
+"""
+
+MTAB = """
+sysfs /sys sysfs rw,seclabel,nosuid,nodev,noexec,relatime 0 0
+proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
+devtmpfs /dev devtmpfs rw,seclabel,nosuid,size=8044400k,nr_inodes=2011100,mode=755 0 0
+securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0
+tmpfs /dev/shm tmpfs rw,seclabel,nosuid,nodev 0 0
+devpts /dev/pts devpts rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0
+tmpfs /run tmpfs rw,seclabel,nosuid,nodev,mode=755 0 0
+tmpfs /sys/fs/cgroup tmpfs ro,seclabel,nosuid,nodev,noexec,mode=755 0 0
+cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 0 0
+pstore /sys/fs/pstore pstore rw,seclabel,nosuid,nodev,noexec,relatime 0 0
+cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0
+cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0
+cgroup /sys/fs/cgroup/memory cgroup rw,nosuid,nodev,noexec,relatime,memory 0 0
+cgroup /sys/fs/cgroup/pids cgroup rw,nosuid,nodev,noexec,relatime,pids 0 0
+cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0
+cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0
+cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct 0 0
+cgroup /sys/fs/cgroup/hugetlb cgroup rw,nosuid,nodev,noexec,relatime,hugetlb 0 0
+cgroup /sys/fs/cgroup/perf_event cgroup rw,nosuid,nodev,noexec,relatime,perf_event 0 0
+cgroup /sys/fs/cgroup/net_cls,net_prio cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio 0 0
+configfs /sys/kernel/config configfs rw,relatime 0 0
+/dev/mapper/fedora_dhcp129--186-root / ext4 rw,seclabel,relatime,data=ordered 0 0
+selinuxfs /sys/fs/selinux selinuxfs rw,relatime 0 0
+systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=24,pgrp=1,timeout=0,minproto=5,maxproto=5,direct 0 0
+debugfs /sys/kernel/debug debugfs rw,seclabel,relatime 0 0
+hugetlbfs /dev/hugepages hugetlbfs rw,seclabel,relatime 0 0
+tmpfs /tmp tmpfs rw,seclabel 0 0
+mqueue /dev/mqueue mqueue rw,seclabel,relatime 0 0
+/dev/loop0 /var/lib/machines btrfs rw,seclabel,relatime,space_cache,subvolid=5,subvol=/ 0 0
+/dev/sda1 /boot ext4 rw,seclabel,relatime,data=ordered 0 0
+/dev/mapper/fedora_dhcp129--186-home /home ext4 rw,seclabel,relatime,data=ordered 0 0
+tmpfs /run/user/1000 tmpfs rw,seclabel,nosuid,nodev,relatime,size=1611044k,mode=700,uid=1000,gid=1000 0 0
+gvfsd-fuse /run/user/1000/gvfs fuse.gvfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
+fusectl /sys/fs/fuse/connections fusectl rw,relatime 0 0
+grimlock.g.a: /home/adrian/sshfs-grimlock fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
+grimlock.g.a:test_path/path_with'single_quotes /home/adrian/sshfs-grimlock-single-quote fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
+grimlock.g.a:path_with'single_quotes /home/adrian/sshfs-grimlock-single-quote-2 fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
+grimlock.g.a:/mnt/data/foto's /home/adrian/fotos fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
+"""
+
+MTAB_ENTRIES = [
+ [
+ 'sysfs',
+ '/sys',
+ 'sysfs',
+ 'rw,seclabel,nosuid,nodev,noexec,relatime',
+ '0',
+ '0'
+ ],
+ ['proc', '/proc', 'proc', 'rw,nosuid,nodev,noexec,relatime', '0', '0'],
+ [
+ 'devtmpfs',
+ '/dev',
+ 'devtmpfs',
+ 'rw,seclabel,nosuid,size=8044400k,nr_inodes=2011100,mode=755',
+ '0',
+ '0'
+ ],
+ [
+ 'securityfs',
+ '/sys/kernel/security',
+ 'securityfs',
+ 'rw,nosuid,nodev,noexec,relatime',
+ '0',
+ '0'
+ ],
+ ['tmpfs', '/dev/shm', 'tmpfs', 'rw,seclabel,nosuid,nodev', '0', '0'],
+ [
+ 'devpts',
+ '/dev/pts',
+ 'devpts',
+ 'rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000',
+ '0',
+ '0'
+ ],
+ ['tmpfs', '/run', 'tmpfs', 'rw,seclabel,nosuid,nodev,mode=755', '0', '0'],
+ [
+ 'tmpfs',
+ '/sys/fs/cgroup',
+ 'tmpfs',
+ 'ro,seclabel,nosuid,nodev,noexec,mode=755',
+ '0',
+ '0'
+ ],
+ [
+ 'cgroup',
+ '/sys/fs/cgroup/systemd',
+ 'cgroup',
+ 'rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd',
+ '0',
+ '0'
+ ],
+ [
+ 'pstore',
+ '/sys/fs/pstore',
+ 'pstore',
+ 'rw,seclabel,nosuid,nodev,noexec,relatime',
+ '0',
+ '0'
+ ],
+ [
+ 'cgroup',
+ '/sys/fs/cgroup/devices',
+ 'cgroup',
+ 'rw,nosuid,nodev,noexec,relatime,devices',
+ '0',
+ '0'
+ ],
+ [
+ 'cgroup',
+ '/sys/fs/cgroup/freezer',
+ 'cgroup',
+ 'rw,nosuid,nodev,noexec,relatime,freezer',
+ '0',
+ '0'
+ ],
+ [
+ 'cgroup',
+ '/sys/fs/cgroup/memory',
+ 'cgroup',
+ 'rw,nosuid,nodev,noexec,relatime,memory',
+ '0',
+ '0'
+ ],
+ [
+ 'cgroup',
+ '/sys/fs/cgroup/pids',
+ 'cgroup',
+ 'rw,nosuid,nodev,noexec,relatime,pids',
+ '0',
+ '0'
+ ],
+ [
+ 'cgroup',
+ '/sys/fs/cgroup/blkio',
+ 'cgroup',
+ 'rw,nosuid,nodev,noexec,relatime,blkio',
+ '0',
+ '0'
+ ],
+ [
+ 'cgroup',
+ '/sys/fs/cgroup/cpuset',
+ 'cgroup',
+ 'rw,nosuid,nodev,noexec,relatime,cpuset',
+ '0',
+ '0'
+ ],
+ [
+ 'cgroup',
+ '/sys/fs/cgroup/cpu,cpuacct',
+ 'cgroup',
+ 'rw,nosuid,nodev,noexec,relatime,cpu,cpuacct',
+ '0',
+ '0'
+ ],
+ [
+ 'cgroup',
+ '/sys/fs/cgroup/hugetlb',
+ 'cgroup',
+ 'rw,nosuid,nodev,noexec,relatime,hugetlb',
+ '0',
+ '0'
+ ],
+ [
+ 'cgroup',
+ '/sys/fs/cgroup/perf_event',
+ 'cgroup',
+ 'rw,nosuid,nodev,noexec,relatime,perf_event',
+ '0',
+ '0'
+ ],
+ [
+ 'cgroup',
+ '/sys/fs/cgroup/net_cls,net_prio',
+ 'cgroup',
+ 'rw,nosuid,nodev,noexec,relatime,net_cls,net_prio',
+ '0',
+ '0'
+ ],
+ ['configfs', '/sys/kernel/config', 'configfs', 'rw,relatime', '0', '0'],
+ [
+ '/dev/mapper/fedora_dhcp129--186-root',
+ '/',
+ 'ext4',
+ 'rw,seclabel,relatime,data=ordered',
+ '0',
+ '0'
+ ],
+ ['selinuxfs', '/sys/fs/selinux', 'selinuxfs', 'rw,relatime', '0', '0'],
+ [
+ 'systemd-1',
+ '/proc/sys/fs/binfmt_misc',
+ 'autofs',
+ 'rw,relatime,fd=24,pgrp=1,timeout=0,minproto=5,maxproto=5,direct',
+ '0',
+ '0'
+ ],
+ ['debugfs', '/sys/kernel/debug', 'debugfs', 'rw,seclabel,relatime', '0', '0'],
+ [
+ 'hugetlbfs',
+ '/dev/hugepages',
+ 'hugetlbfs',
+ 'rw,seclabel,relatime',
+ '0',
+ '0'
+ ],
+ ['tmpfs', '/tmp', 'tmpfs', 'rw,seclabel', '0', '0'],
+ ['mqueue', '/dev/mqueue', 'mqueue', 'rw,seclabel,relatime', '0', '0'],
+ [
+ '/dev/loop0',
+ '/var/lib/machines',
+ 'btrfs',
+ 'rw,seclabel,relatime,space_cache,subvolid=5,subvol=/',
+ '0',
+ '0'
+ ],
+ ['/dev/sda1', '/boot', 'ext4', 'rw,seclabel,relatime,data=ordered', '0', '0'],
+ # A 'none' fstype
+ ['/dev/sdz3', '/not/a/real/device', 'none', 'rw,seclabel,relatime,data=ordered', '0', '0'],
+ # lets assume this is a bindmount
+ ['/dev/sdz4', '/not/a/real/bind_mount', 'ext4', 'rw,seclabel,relatime,data=ordered', '0', '0'],
+ [
+ '/dev/mapper/fedora_dhcp129--186-home',
+ '/home',
+ 'ext4',
+ 'rw,seclabel,relatime,data=ordered',
+ '0',
+ '0'
+ ],
+ [
+ 'tmpfs',
+ '/run/user/1000',
+ 'tmpfs',
+ 'rw,seclabel,nosuid,nodev,relatime,size=1611044k,mode=700,uid=1000,gid=1000',
+ '0',
+ '0'
+ ],
+ [
+ 'gvfsd-fuse',
+ '/run/user/1000/gvfs',
+ 'fuse.gvfsd-fuse',
+ 'rw,nosuid,nodev,relatime,user_id=1000,group_id=1000',
+ '0',
+ '0'
+ ],
+ ['fusectl', '/sys/fs/fuse/connections', 'fusectl', 'rw,relatime', '0', '0']]
+
+STATVFS_INFO = {'/': {'block_available': 10192323,
+ 'block_size': 4096,
+ 'block_total': 12868728,
+ 'block_used': 2676405,
+ 'inode_available': 3061699,
+ 'inode_total': 3276800,
+ 'inode_used': 215101,
+ 'size_available': 41747755008,
+ 'size_total': 52710309888},
+ '/not/a/real/bind_mount': {},
+ '/home': {'block_available': 1001578731,
+ 'block_size': 4096,
+ 'block_total': 105871006,
+ 'block_used': 5713133,
+ 'inode_available': 26860880,
+ 'inode_total': 26902528,
+ 'inode_used': 41648,
+ 'size_available': 410246647808,
+ 'size_total': 433647640576},
+ '/var/lib/machines': {'block_available': 10192316,
+ 'block_size': 4096,
+ 'block_total': 12868728,
+ 'block_used': 2676412,
+ 'inode_available': 3061699,
+ 'inode_total': 3276800,
+ 'inode_used': 215101,
+ 'size_available': 41747726336,
+ 'size_total': 52710309888},
+ '/boot': {'block_available': 187585,
+ 'block_size': 4096,
+ 'block_total': 249830,
+ 'block_used': 62245,
+ 'inode_available': 65096,
+ 'inode_total': 65536,
+ 'inode_used': 440,
+ 'size_available': 768348160,
+ 'size_total': 1023303680}
+ }
+
+# ['/dev/sdz4', '/not/a/real/bind_mount', 'ext4', 'rw,seclabel,relatime,data=ordered', '0', '0'],
+
+BIND_MOUNTS = ['/not/a/real/bind_mount']
+
+CPU_INFO_TEST_SCENARIOS = [
+ {
+ 'architecture': 'armv61',
+ 'nproc_out': 1,
+ 'sched_getaffinity': set([0]),
+ 'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/armv6-rev7-1cpu-cpuinfo')).readlines(),
+ 'expected_result': {
+ 'processor': ['0', 'ARMv6-compatible processor rev 7 (v6l)'],
+ 'processor_cores': 1,
+ 'processor_count': 1,
+ 'processor_nproc': 1,
+ 'processor_threads_per_core': 1,
+ 'processor_vcpus': 1},
+ },
+ {
+ 'architecture': 'armv71',
+ 'nproc_out': 4,
+ 'sched_getaffinity': set([0, 1, 2, 3]),
+ 'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/armv7-rev4-4cpu-cpuinfo')).readlines(),
+ 'expected_result': {
+ 'processor': [
+ '0', 'ARMv7 Processor rev 4 (v7l)',
+ '1', 'ARMv7 Processor rev 4 (v7l)',
+ '2', 'ARMv7 Processor rev 4 (v7l)',
+ '3', 'ARMv7 Processor rev 4 (v7l)',
+ ],
+ 'processor_cores': 1,
+ 'processor_count': 4,
+ 'processor_nproc': 4,
+ 'processor_threads_per_core': 1,
+ 'processor_vcpus': 4},
+ },
+ {
+ 'architecture': 'aarch64',
+ 'nproc_out': 4,
+ 'sched_getaffinity': set([0, 1, 2, 3]),
+ 'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/aarch64-4cpu-cpuinfo')).readlines(),
+ 'expected_result': {
+ 'processor': [
+ '0', 'AArch64 Processor rev 4 (aarch64)',
+ '1', 'AArch64 Processor rev 4 (aarch64)',
+ '2', 'AArch64 Processor rev 4 (aarch64)',
+ '3', 'AArch64 Processor rev 4 (aarch64)',
+ ],
+ 'processor_cores': 1,
+ 'processor_count': 4,
+ 'processor_nproc': 4,
+ 'processor_threads_per_core': 1,
+ 'processor_vcpus': 4},
+ },
+ {
+ 'architecture': 'x86_64',
+ 'nproc_out': 4,
+ 'sched_getaffinity': set([0, 1, 2, 3]),
+ 'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/x86_64-4cpu-cpuinfo')).readlines(),
+ 'expected_result': {
+ 'processor': [
+ '0', 'AuthenticAMD', 'Dual-Core AMD Opteron(tm) Processor 2216',
+ '1', 'AuthenticAMD', 'Dual-Core AMD Opteron(tm) Processor 2216',
+ '2', 'AuthenticAMD', 'Dual-Core AMD Opteron(tm) Processor 2216',
+ '3', 'AuthenticAMD', 'Dual-Core AMD Opteron(tm) Processor 2216',
+ ],
+ 'processor_cores': 2,
+ 'processor_count': 2,
+ 'processor_nproc': 4,
+ 'processor_threads_per_core': 1,
+ 'processor_vcpus': 4},
+ },
+ {
+ 'architecture': 'x86_64',
+ 'nproc_out': 4,
+ 'sched_getaffinity': set([0, 1, 2, 3]),
+ 'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/x86_64-8cpu-cpuinfo')).readlines(),
+ 'expected_result': {
+ 'processor': [
+ '0', 'GenuineIntel', 'Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz',
+ '1', 'GenuineIntel', 'Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz',
+ '2', 'GenuineIntel', 'Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz',
+ '3', 'GenuineIntel', 'Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz',
+ '4', 'GenuineIntel', 'Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz',
+ '5', 'GenuineIntel', 'Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz',
+ '6', 'GenuineIntel', 'Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz',
+ '7', 'GenuineIntel', 'Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz',
+ ],
+ 'processor_cores': 4,
+ 'processor_count': 1,
+ 'processor_nproc': 4,
+ 'processor_threads_per_core': 2,
+ 'processor_vcpus': 8},
+ },
+ {
+ 'architecture': 'arm64',
+ 'nproc_out': 4,
+ 'sched_getaffinity': set([0, 1, 2, 3]),
+ 'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/arm64-4cpu-cpuinfo')).readlines(),
+ 'expected_result': {
+ 'processor': ['0', '1', '2', '3'],
+ 'processor_cores': 1,
+ 'processor_count': 4,
+ 'processor_nproc': 4,
+ 'processor_threads_per_core': 1,
+ 'processor_vcpus': 4},
+ },
+ {
+ 'architecture': 'armv71',
+ 'nproc_out': 8,
+ 'sched_getaffinity': set([0, 1, 2, 3, 4, 5, 6, 7]),
+ 'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/armv7-rev3-8cpu-cpuinfo')).readlines(),
+ 'expected_result': {
+ 'processor': [
+ '0', 'ARMv7 Processor rev 3 (v7l)',
+ '1', 'ARMv7 Processor rev 3 (v7l)',
+ '2', 'ARMv7 Processor rev 3 (v7l)',
+ '3', 'ARMv7 Processor rev 3 (v7l)',
+ '4', 'ARMv7 Processor rev 3 (v7l)',
+ '5', 'ARMv7 Processor rev 3 (v7l)',
+ '6', 'ARMv7 Processor rev 3 (v7l)',
+ '7', 'ARMv7 Processor rev 3 (v7l)',
+ ],
+ 'processor_cores': 1,
+ 'processor_count': 8,
+ 'processor_nproc': 8,
+ 'processor_threads_per_core': 1,
+ 'processor_vcpus': 8},
+ },
+ {
+ 'architecture': 'x86_64',
+ 'nproc_out': 2,
+ 'sched_getaffinity': set([0, 1]),
+ 'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/x86_64-2cpu-cpuinfo')).readlines(),
+ 'expected_result': {
+ 'processor': [
+ '0', 'GenuineIntel', 'Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz',
+ '1', 'GenuineIntel', 'Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz',
+ ],
+ 'processor_cores': 1,
+ 'processor_count': 2,
+ 'processor_nproc': 2,
+ 'processor_threads_per_core': 1,
+ 'processor_vcpus': 2},
+ },
+ {
+ 'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/ppc64-power7-rhel7-8cpu-cpuinfo')).readlines(),
+ 'architecture': 'ppc64',
+ 'nproc_out': 8,
+ 'sched_getaffinity': set([0, 1, 2, 3, 4, 5, 6, 7]),
+ 'expected_result': {
+ 'processor': [
+ '0', 'POWER7 (architected), altivec supported',
+ '1', 'POWER7 (architected), altivec supported',
+ '2', 'POWER7 (architected), altivec supported',
+ '3', 'POWER7 (architected), altivec supported',
+ '4', 'POWER7 (architected), altivec supported',
+ '5', 'POWER7 (architected), altivec supported',
+ '6', 'POWER7 (architected), altivec supported',
+ '7', 'POWER7 (architected), altivec supported'
+ ],
+ 'processor_cores': 1,
+ 'processor_count': 8,
+ 'processor_nproc': 8,
+ 'processor_threads_per_core': 1,
+ 'processor_vcpus': 8
+ },
+ },
+ {
+ 'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/ppc64le-power8-24cpu-cpuinfo')).readlines(),
+ 'architecture': 'ppc64le',
+ 'nproc_out': 24,
+ 'sched_getaffinity': set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]),
+ 'expected_result': {
+ 'processor': [
+ '0', 'POWER8 (architected), altivec supported',
+ '1', 'POWER8 (architected), altivec supported',
+ '2', 'POWER8 (architected), altivec supported',
+ '3', 'POWER8 (architected), altivec supported',
+ '4', 'POWER8 (architected), altivec supported',
+ '5', 'POWER8 (architected), altivec supported',
+ '6', 'POWER8 (architected), altivec supported',
+ '7', 'POWER8 (architected), altivec supported',
+ '8', 'POWER8 (architected), altivec supported',
+ '9', 'POWER8 (architected), altivec supported',
+ '10', 'POWER8 (architected), altivec supported',
+ '11', 'POWER8 (architected), altivec supported',
+ '12', 'POWER8 (architected), altivec supported',
+ '13', 'POWER8 (architected), altivec supported',
+ '14', 'POWER8 (architected), altivec supported',
+ '15', 'POWER8 (architected), altivec supported',
+ '16', 'POWER8 (architected), altivec supported',
+ '17', 'POWER8 (architected), altivec supported',
+ '18', 'POWER8 (architected), altivec supported',
+ '19', 'POWER8 (architected), altivec supported',
+ '20', 'POWER8 (architected), altivec supported',
+ '21', 'POWER8 (architected), altivec supported',
+ '22', 'POWER8 (architected), altivec supported',
+ '23', 'POWER8 (architected), altivec supported',
+ ],
+ 'processor_cores': 1,
+ 'processor_count': 24,
+ 'processor_nproc': 24,
+ 'processor_threads_per_core': 1,
+ 'processor_vcpus': 24
+ },
+ },
+ {
+ 'cpuinfo': open(os.path.join(os.path.dirname(__file__), '../fixtures/cpuinfo/sparc-t5-debian-ldom-24vcpu')).readlines(),
+ 'architecture': 'sparc64',
+ 'nproc_out': 24,
+ 'sched_getaffinity': set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]),
+ 'expected_result': {
+ 'processor': [
+ 'UltraSparc T5 (Niagara5)',
+ ],
+ 'processor_cores': 1,
+ 'processor_count': 24,
+ 'processor_nproc': 24,
+ 'processor_threads_per_core': 1,
+ 'processor_vcpus': 24
+ },
+ },
+]
diff --git a/test/units/module_utils/facts/hardware/test_linux.py b/test/units/module_utils/facts/hardware/test_linux.py
new file mode 100644
index 00000000..6e77683a
--- /dev/null
+++ b/test/units/module_utils/facts/hardware/test_linux.py
@@ -0,0 +1,175 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from units.compat import unittest
+from units.compat.mock import Mock, patch
+
+from ansible.module_utils.facts import timeout
+
+from ansible.module_utils.facts.hardware import linux
+
+from . linux_data import LSBLK_OUTPUT, LSBLK_OUTPUT_2, LSBLK_UUIDS, MTAB, MTAB_ENTRIES, BIND_MOUNTS, STATVFS_INFO, UDEVADM_UUID, UDEVADM_OUTPUT
+
+with open(os.path.join(os.path.dirname(__file__), '../fixtures/findmount_output.txt')) as f:
+ FINDMNT_OUTPUT = f.read()
+
+GET_MOUNT_SIZE = {}
+
+
+def mock_get_mount_size(mountpoint):
+ return STATVFS_INFO.get(mountpoint, {})
+
+
+class TestFactsLinuxHardwareGetMountFacts(unittest.TestCase):
+
+ # FIXME: mock.patch instead
+ def setUp(self):
+ timeout.GATHER_TIMEOUT = 10
+
+ def tearDown(self):
+ timeout.GATHER_TIMEOUT = None
+
+ @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._mtab_entries', return_value=MTAB_ENTRIES)
+ @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._find_bind_mounts', return_value=BIND_MOUNTS)
+ @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._lsblk_uuid', return_value=LSBLK_UUIDS)
+ @patch('ansible.module_utils.facts.hardware.linux.get_mount_size', side_effect=mock_get_mount_size)
+ @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._udevadm_uuid', return_value=UDEVADM_UUID)
+ def test_get_mount_facts(self,
+ mock_get_mount_size,
+ mock_lsblk_uuid,
+ mock_find_bind_mounts,
+ mock_mtab_entries,
+ mock_udevadm_uuid):
+ module = Mock()
+ # Returns a LinuxHardware-ish
+ lh = linux.LinuxHardware(module=module, load_on_init=False)
+
+ # Nothing returned, just self.facts modified as a side effect
+ mount_facts = lh.get_mount_facts()
+ self.assertIsInstance(mount_facts, dict)
+ self.assertIn('mounts', mount_facts)
+ self.assertIsInstance(mount_facts['mounts'], list)
+ self.assertIsInstance(mount_facts['mounts'][0], dict)
+
+ home_expected = {'block_available': 1001578731,
+ 'block_size': 4096,
+ 'block_total': 105871006,
+ 'block_used': 5713133,
+ 'device': '/dev/mapper/fedora_dhcp129--186-home',
+ 'fstype': 'ext4',
+ 'inode_available': 26860880,
+ 'inode_total': 26902528,
+ 'inode_used': 41648,
+ 'mount': '/home',
+ 'options': 'rw,seclabel,relatime,data=ordered',
+ 'size_available': 410246647808,
+ 'size_total': 433647640576,
+ 'uuid': 'N/A'}
+ home_info = [x for x in mount_facts['mounts'] if x['mount'] == '/home'][0]
+
+ self.maxDiff = 4096
+ self.assertDictEqual(home_info, home_expected)
+
+ @patch('ansible.module_utils.facts.hardware.linux.get_file_content', return_value=MTAB)
+ def test_get_mtab_entries(self, mock_get_file_content):
+
+ module = Mock()
+ lh = linux.LinuxHardware(module=module, load_on_init=False)
+ mtab_entries = lh._mtab_entries()
+ self.assertIsInstance(mtab_entries, list)
+ self.assertIsInstance(mtab_entries[0], list)
+ self.assertEqual(len(mtab_entries), 38)
+
+ @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_findmnt', return_value=(0, FINDMNT_OUTPUT, ''))
+ def test_find_bind_mounts(self, mock_run_findmnt):
+ module = Mock()
+ lh = linux.LinuxHardware(module=module, load_on_init=False)
+ bind_mounts = lh._find_bind_mounts()
+
+ # If bind_mounts becomes another seq type, feel free to change
+ self.assertIsInstance(bind_mounts, set)
+ self.assertEqual(len(bind_mounts), 1)
+ self.assertIn('/not/a/real/bind_mount', bind_mounts)
+
+ @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_findmnt', return_value=(37, '', ''))
+ def test_find_bind_mounts_non_zero(self, mock_run_findmnt):
+ module = Mock()
+ lh = linux.LinuxHardware(module=module, load_on_init=False)
+ bind_mounts = lh._find_bind_mounts()
+
+ self.assertIsInstance(bind_mounts, set)
+ self.assertEqual(len(bind_mounts), 0)
+
+ def test_find_bind_mounts_no_findmnts(self):
+ module = Mock()
+ module.get_bin_path = Mock(return_value=None)
+ lh = linux.LinuxHardware(module=module, load_on_init=False)
+ bind_mounts = lh._find_bind_mounts()
+
+ self.assertIsInstance(bind_mounts, set)
+ self.assertEqual(len(bind_mounts), 0)
+
+ @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_lsblk', return_value=(0, LSBLK_OUTPUT, ''))
+ def test_lsblk_uuid(self, mock_run_lsblk):
+ module = Mock()
+ lh = linux.LinuxHardware(module=module, load_on_init=False)
+ lsblk_uuids = lh._lsblk_uuid()
+
+ self.assertIsInstance(lsblk_uuids, dict)
+ self.assertIn(b'/dev/loop9', lsblk_uuids)
+ self.assertIn(b'/dev/sda1', lsblk_uuids)
+ self.assertEqual(lsblk_uuids[b'/dev/sda1'], b'32caaec3-ef40-4691-a3b6-438c3f9bc1c0')
+
+ @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_lsblk', return_value=(37, LSBLK_OUTPUT, ''))
+ def test_lsblk_uuid_non_zero(self, mock_run_lsblk):
+ module = Mock()
+ lh = linux.LinuxHardware(module=module, load_on_init=False)
+ lsblk_uuids = lh._lsblk_uuid()
+
+ self.assertIsInstance(lsblk_uuids, dict)
+ self.assertEqual(len(lsblk_uuids), 0)
+
+ def test_lsblk_uuid_no_lsblk(self):
+ module = Mock()
+ module.get_bin_path = Mock(return_value=None)
+ lh = linux.LinuxHardware(module=module, load_on_init=False)
+ lsblk_uuids = lh._lsblk_uuid()
+
+ self.assertIsInstance(lsblk_uuids, dict)
+ self.assertEqual(len(lsblk_uuids), 0)
+
+ @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_lsblk', return_value=(0, LSBLK_OUTPUT_2, ''))
+ def test_lsblk_uuid_dev_with_space_in_name(self, mock_run_lsblk):
+ module = Mock()
+ lh = linux.LinuxHardware(module=module, load_on_init=False)
+ lsblk_uuids = lh._lsblk_uuid()
+ self.assertIsInstance(lsblk_uuids, dict)
+ self.assertIn(b'/dev/loop0', lsblk_uuids)
+ self.assertIn(b'/dev/sda1', lsblk_uuids)
+ self.assertEqual(lsblk_uuids[b'/dev/mapper/an-example-mapper with a space in the name'], b'84639acb-013f-4d2f-9392-526a572b4373')
+ self.assertEqual(lsblk_uuids[b'/dev/sda1'], b'32caaec3-ef40-4691-a3b6-438c3f9bc1c0')
+
+ def test_udevadm_uuid(self):
+ module = Mock()
+ module.run_command = Mock(return_value=(0, UDEVADM_OUTPUT, '')) # (rc, out, err)
+ lh = linux.LinuxHardware(module=module, load_on_init=False)
+ udevadm_uuid = lh._udevadm_uuid('mock_device')
+
+ self.assertEqual(udevadm_uuid, '57b1a3e7-9019-4747-9809-7ec52bba9179')
diff --git a/test/units/module_utils/facts/hardware/test_linux_get_cpu_info.py b/test/units/module_utils/facts/hardware/test_linux_get_cpu_info.py
new file mode 100644
index 00000000..aea8694e
--- /dev/null
+++ b/test/units/module_utils/facts/hardware/test_linux_get_cpu_info.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.facts.hardware import linux
+
+from . linux_data import CPU_INFO_TEST_SCENARIOS
+
+
+def test_get_cpu_info(mocker):
+ module = mocker.Mock()
+ inst = linux.LinuxHardware(module)
+
+ mocker.patch('os.path.exists', return_value=False)
+ mocker.patch('os.access', return_value=True)
+ for test in CPU_INFO_TEST_SCENARIOS:
+ mocker.patch('ansible.module_utils.facts.hardware.linux.get_file_lines', side_effect=[[], test['cpuinfo']])
+ mocker.patch('os.sched_getaffinity', create=True, return_value=test['sched_getaffinity'])
+ module.run_command.return_value = (0, test['nproc_out'], '')
+ collected_facts = {'ansible_architecture': test['architecture']}
+
+ assert test['expected_result'] == inst.get_cpu_facts(collected_facts=collected_facts)
+
+
+def test_get_cpu_info_nproc(mocker):
+ module = mocker.Mock()
+ inst = linux.LinuxHardware(module)
+
+ mocker.patch('os.path.exists', return_value=False)
+ mocker.patch('os.access', return_value=True)
+ for test in CPU_INFO_TEST_SCENARIOS:
+ mocker.patch('ansible.module_utils.facts.hardware.linux.get_file_lines', side_effect=[[], test['cpuinfo']])
+ mocker.patch('os.sched_getaffinity', create=True, side_effect=AttributeError)
+ mocker.patch('ansible.module_utils.facts.hardware.linux.get_bin_path', return_value='/usr/bin/nproc')
+ module.run_command.return_value = (0, test['nproc_out'], '')
+ collected_facts = {'ansible_architecture': test['architecture']}
+
+ assert test['expected_result'] == inst.get_cpu_facts(collected_facts=collected_facts)
+
+
+def test_get_cpu_info_missing_arch(mocker):
+ module = mocker.Mock()
+ inst = linux.LinuxHardware(module)
+
+ # ARM and Power will report incorrect processor count if architecture is not available
+ mocker.patch('os.path.exists', return_value=False)
+ mocker.patch('os.access', return_value=True)
+ for test in CPU_INFO_TEST_SCENARIOS:
+ mocker.patch('ansible.module_utils.facts.hardware.linux.get_file_lines', side_effect=[[], test['cpuinfo']])
+ mocker.patch('os.sched_getaffinity', create=True, return_value=test['sched_getaffinity'])
+
+ module.run_command.return_value = (0, test['nproc_out'], '')
+
+ test_result = inst.get_cpu_facts()
+
+ if test['architecture'].startswith(('armv', 'aarch', 'ppc')):
+ assert test['expected_result'] != test_result
+ else:
+ assert test['expected_result'] == test_result
diff --git a/test/units/module_utils/facts/hardware/test_sunos_get_uptime_facts.py b/test/units/module_utils/facts/hardware/test_sunos_get_uptime_facts.py
new file mode 100644
index 00000000..e14a2da8
--- /dev/null
+++ b/test/units/module_utils/facts/hardware/test_sunos_get_uptime_facts.py
@@ -0,0 +1,20 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import time
+from ansible.module_utils.facts.hardware import sunos
+
+
+def test_sunos_get_uptime_facts(mocker):
+ kstat_output = '\nunix:0:system_misc:boot_time\t1548249689\n'
+
+ module_mock = mocker.patch('ansible.module_utils.basic.AnsibleModule')
+ module = module_mock()
+ module.run_command.return_value = (0, kstat_output, '')
+
+ inst = sunos.SunOSHardware(module)
+
+ mocker.patch('time.time', return_value=1567052602.5089788)
+ expected = int(time.time()) - 1548249689
+ result = inst.get_uptime_facts()
+ assert expected == result['uptime_seconds']
diff --git a/test/units/module_utils/facts/network/__init__.py b/test/units/module_utils/facts/network/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/module_utils/facts/network/__init__.py
diff --git a/test/units/module_utils/facts/network/test_fc_wwn.py b/test/units/module_utils/facts/network/test_fc_wwn.py
new file mode 100644
index 00000000..b98ae378
--- /dev/null
+++ b/test/units/module_utils/facts/network/test_fc_wwn.py
@@ -0,0 +1,94 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.facts.network import fc_wwn
+from units.compat.mock import Mock
+
+
+# AIX lsdev
+LSDEV_OUTPUT = """
+fcs0 Defined 00-00 8Gb PCI Express Dual Port FC Adapter (df1000f114108a03)
+fcs1 Available 04-00 8Gb PCI Express Dual Port FC Adapter (df1000f114108a03)
+"""
+
+# a bit cutted output of lscfg (from Z0 to ZC)
+LSCFG_OUTPUT = """
+ fcs1 U78CB.001.WZS00ZS-P1-C9-T1 8Gb PCI Express Dual Port FC Adapter (df1000f114108a03)
+
+ Part Number.................00E0806
+ Serial Number...............1C4090830F
+ Manufacturer................001C
+ EC Level.................... D77161
+ Customer Card ID Number.....577D
+ FRU Number..................00E0806
+ Device Specific.(ZM)........3
+ Network Address.............10000090FA551508
+ ROS Level and ID............027820B7
+ Device Specific.(Z0)........31004549
+ Device Specific.(ZC)........00000000
+ Hardware Location Code......U78CB.001.WZS00ZS-P1-C9-T1
+"""
+
+# Solaris
+FCINFO_OUTPUT = """
+HBA Port WWN: 10000090fa1658de
+ Port Mode: Initiator
+ Port ID: 30100
+ OS Device Name: /dev/cfg/c13
+ Manufacturer: Emulex
+ Model: LPe12002-S
+ Firmware Version: LPe12002-S 2.01a12
+ FCode/BIOS Version: Boot:5.03a0 Fcode:3.01a1
+ Serial Number: 4925381+13090001ER
+ Driver Name: emlxs
+ Driver Version: 3.3.00.1 (2018.01.05.16.30)
+ Type: N-port
+ State: online
+ Supported Speeds: 2Gb 4Gb 8Gb
+ Current Speed: 8Gb
+ Node WWN: 20000090fa1658de
+ NPIV Not Supported
+"""
+
+
+def mock_get_bin_path(cmd, required=False):
+ result = None
+ if cmd == 'lsdev':
+ result = '/usr/sbin/lsdev'
+ elif cmd == 'lscfg':
+ result = '/usr/sbin/lscfg'
+ elif cmd == 'fcinfo':
+ result = '/usr/sbin/fcinfo'
+ return result
+
+
+def mock_run_command(cmd):
+ rc = 0
+ if 'lsdev' in cmd:
+ result = LSDEV_OUTPUT
+ elif 'lscfg' in cmd:
+ result = LSCFG_OUTPUT
+ elif 'fcinfo' in cmd:
+ result = FCINFO_OUTPUT
+ else:
+ rc = 1
+ result = 'Error'
+ return (rc, result, '')
+
+
+def test_get_fc_wwn_info(mocker):
+ module = Mock()
+ inst = fc_wwn.FcWwnInitiatorFactCollector()
+
+ mocker.patch.object(module, 'get_bin_path', side_effect=mock_get_bin_path)
+ mocker.patch.object(module, 'run_command', side_effect=mock_run_command)
+
+ d = {'aix6': ['10000090FA551508'], 'sunos5': ['10000090fa1658de']}
+ for key, value in d.items():
+ mocker.patch('sys.platform', key)
+ wwn_expected = {"fibre_channel_wwn": value}
+ assert wwn_expected == inst.collect(module=module)
diff --git a/test/units/module_utils/facts/network/test_generic_bsd.py b/test/units/module_utils/facts/network/test_generic_bsd.py
new file mode 100644
index 00000000..afb698c5
--- /dev/null
+++ b/test/units/module_utils/facts/network/test_generic_bsd.py
@@ -0,0 +1,175 @@
+# -*- coding: utf-8 -*-
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat.mock import Mock
+from units.compat import unittest
+
+from ansible.module_utils.facts.network import generic_bsd
+
+
+def get_bin_path(command):
+ if command == 'ifconfig':
+ return 'fake/ifconfig'
+ elif command == 'route':
+ return 'fake/route'
+ return None
+
+
+netbsd_ifconfig_a_out_7_1 = r'''
+lo0: flags=8049<UP,LOOPBACK,RUNNING,MULTICAST> mtu 33624
+ inet 127.0.0.1 netmask 0xff000000
+ inet6 ::1 prefixlen 128
+ inet6 fe80::1%lo0 prefixlen 64 scopeid 0x1
+re0: flags=8843<UP,BROADCAST,RUNNING,SIMPLEX,MULTICAST> mtu 1500
+ capabilities=3f80<TSO4,IP4CSUM_Rx,IP4CSUM_Tx,TCP4CSUM_Rx,TCP4CSUM_Tx>
+ capabilities=3f80<UDP4CSUM_Rx,UDP4CSUM_Tx>
+ enabled=0
+ ec_capabilities=3<VLAN_MTU,VLAN_HWTAGGING>
+ ec_enabled=0
+ address: 52:54:00:63:55:af
+ media: Ethernet autoselect (100baseTX full-duplex)
+ status: active
+ inet 192.168.122.205 netmask 0xffffff00 broadcast 192.168.122.255
+ inet6 fe80::5054:ff:fe63:55af%re0 prefixlen 64 scopeid 0x2
+'''
+
+netbsd_ifconfig_a_out_post_7_1 = r'''
+lo0: flags=0x8049<UP,LOOPBACK,RUNNING,MULTICAST> mtu 33624
+ inet 127.0.0.1/8 flags 0x0
+ inet6 ::1/128 flags 0x20<NODAD>
+ inet6 fe80::1%lo0/64 flags 0x0 scopeid 0x1
+re0: flags=0x8843<UP,BROADCAST,RUNNING,SIMPLEX,MULTICAST> mtu 1500
+ capabilities=3f80<TSO4,IP4CSUM_Rx,IP4CSUM_Tx,TCP4CSUM_Rx,TCP4CSUM_Tx>
+ capabilities=3f80<UDP4CSUM_Rx,UDP4CSUM_Tx>
+ enabled=0
+ ec_capabilities=3<VLAN_MTU,VLAN_HWTAGGING>
+ ec_enabled=0
+ address: 52:54:00:63:55:af
+ media: Ethernet autoselect (100baseTX full-duplex)
+ status: active
+ inet 192.168.122.205/24 broadcast 192.168.122.255 flags 0x0
+ inet6 fe80::5054:ff:fe63:55af%re0/64 flags 0x0 scopeid 0x2
+'''
+
+NETBSD_EXPECTED = {'all_ipv4_addresses': ['192.168.122.205'],
+ 'all_ipv6_addresses': ['fe80::5054:ff:fe63:55af%re0'],
+ 'default_ipv4': {},
+ 'default_ipv6': {},
+ 'interfaces': ['lo0', 're0'],
+ 'lo0': {'device': 'lo0',
+ 'flags': ['UP', 'LOOPBACK', 'RUNNING', 'MULTICAST'],
+ 'ipv4': [{'address': '127.0.0.1',
+ 'broadcast': '127.255.255.255',
+ 'netmask': '255.0.0.0',
+ 'network': '127.0.0.0'}],
+ 'ipv6': [{'address': '::1', 'prefix': '128'},
+ {'address': 'fe80::1%lo0', 'prefix': '64', 'scope': '0x1'}],
+ 'macaddress': 'unknown',
+ 'mtu': '33624',
+ 'type': 'loopback'},
+ 're0': {'device': 're0',
+ 'flags': ['UP', 'BROADCAST', 'RUNNING', 'SIMPLEX', 'MULTICAST'],
+ 'ipv4': [{'address': '192.168.122.205',
+ 'broadcast': '192.168.122.255',
+ 'netmask': '255.255.255.0',
+ 'network': '192.168.122.0'}],
+ 'ipv6': [{'address': 'fe80::5054:ff:fe63:55af%re0',
+ 'prefix': '64',
+ 'scope': '0x2'}],
+ 'macaddress': 'unknown',
+ 'media': 'Ethernet',
+ 'media_options': [],
+ 'media_select': 'autoselect',
+ 'media_type': '100baseTX',
+ 'mtu': '1500',
+ 'status': 'active',
+ 'type': 'ether'}}
+
+
+def run_command_old_ifconfig(command):
+ if command == 'fake/route':
+ return 0, 'Foo', ''
+ if command == ['fake/ifconfig', '-a']:
+ return 0, netbsd_ifconfig_a_out_7_1, ''
+ return 1, '', ''
+
+
+def run_command_post_7_1_ifconfig(command):
+ if command == 'fake/route':
+ return 0, 'Foo', ''
+ if command == ['fake/ifconfig', '-a']:
+ return 0, netbsd_ifconfig_a_out_post_7_1, ''
+ return 1, '', ''
+
+
+class TestGenericBsdNetworkNetBSD(unittest.TestCase):
+ gather_subset = ['all']
+
+ def setUp(self):
+ self.maxDiff = None
+ self.longMessage = True
+
+ # TODO: extract module run_command/get_bin_path usage to methods I can mock without mocking all of run_command
+ def test(self):
+ module = self._mock_module()
+ module.get_bin_path.side_effect = get_bin_path
+ module.run_command.side_effect = run_command_old_ifconfig
+
+ bsd_net = generic_bsd.GenericBsdIfconfigNetwork(module)
+
+ res = bsd_net.populate()
+ self.assertDictEqual(res, NETBSD_EXPECTED)
+
+ def test_ifconfig_post_7_1(self):
+ module = self._mock_module()
+ module.get_bin_path.side_effect = get_bin_path
+ module.run_command.side_effect = run_command_post_7_1_ifconfig
+
+ bsd_net = generic_bsd.GenericBsdIfconfigNetwork(module)
+
+ res = bsd_net.populate()
+ self.assertDictEqual(res, NETBSD_EXPECTED)
+
+ def test_netbsd_ifconfig_old_and_new(self):
+ module_new = self._mock_module()
+ module_new.get_bin_path.side_effect = get_bin_path
+ module_new.run_command.side_effect = run_command_post_7_1_ifconfig
+
+ bsd_net_new = generic_bsd.GenericBsdIfconfigNetwork(module_new)
+ res_new = bsd_net_new.populate()
+
+ module_old = self._mock_module()
+ module_old.get_bin_path.side_effect = get_bin_path
+ module_old.run_command.side_effect = run_command_old_ifconfig
+
+ bsd_net_old = generic_bsd.GenericBsdIfconfigNetwork(module_old)
+ res_old = bsd_net_old.populate()
+
+ self.assertDictEqual(res_old, res_new)
+ self.assertDictEqual(res_old, NETBSD_EXPECTED)
+ self.assertDictEqual(res_new, NETBSD_EXPECTED)
+
+ def _mock_module(self):
+ mock_module = Mock()
+ mock_module.params = {'gather_subset': self.gather_subset,
+ 'gather_timeout': 5,
+ 'filter': '*'}
+ mock_module.get_bin_path = Mock(return_value=None)
+ return mock_module
diff --git a/test/units/module_utils/facts/network/test_iscsi_get_initiator.py b/test/units/module_utils/facts/network/test_iscsi_get_initiator.py
new file mode 100644
index 00000000..2048ba2a
--- /dev/null
+++ b/test/units/module_utils/facts/network/test_iscsi_get_initiator.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.facts.network import iscsi
+from units.compat.mock import Mock
+
+
+# AIX # lsattr -E -l iscsi0
+LSATTR_OUTPUT = """
+disc_filename /etc/iscsi/targets Configuration file False
+disc_policy file Discovery Policy True
+initiator_name iqn.localhost.hostid.7f000002 iSCSI Initiator Name True
+isns_srvnames auto iSNS Servers IP Addresses True
+isns_srvports iSNS Servers Port Numbers True
+max_targets 16 Maximum Targets Allowed True
+num_cmd_elems 200 Maximum number of commands to queue to driver True
+"""
+
+# HP-UX # iscsiutil -l
+ISCSIUTIL_OUTPUT = """
+Initiator Name : iqn.2001-04.com.hp.stor:svcio
+Initiator Alias :
+Authentication Method : None
+CHAP Method : CHAP_UNI
+Initiator CHAP Name :
+CHAP Secret :
+NAS Hostname :
+NAS Secret :
+Radius Server Hostname :
+Header Digest : None,CRC32C (default)
+Data Digest : None,CRC32C (default)
+SLP Scope list for iSLPD :
+"""
+
+
+def test_get_iscsi_info(mocker):
+ module = Mock()
+ inst = iscsi.IscsiInitiatorNetworkCollector()
+
+ mocker.patch('sys.platform', 'aix6')
+ mocker.patch('ansible.module_utils.facts.network.iscsi.get_bin_path', return_value='/usr/sbin/lsattr')
+ mocker.patch.object(module, 'run_command', return_value=(0, LSATTR_OUTPUT, ''))
+ aix_iscsi_expected = {"iscsi_iqn": "iqn.localhost.hostid.7f000002"}
+ assert aix_iscsi_expected == inst.collect(module=module)
+
+ mocker.patch('sys.platform', 'hp-ux')
+ mocker.patch('ansible.module_utils.facts.network.iscsi.get_bin_path', return_value='/opt/iscsi/bin/iscsiutil')
+ mocker.patch.object(module, 'run_command', return_value=(0, ISCSIUTIL_OUTPUT, ''))
+ hpux_iscsi_expected = {"iscsi_iqn": " iqn.2001-04.com.hp.stor:svcio"}
+ assert hpux_iscsi_expected == inst.collect(module=module)
diff --git a/test/units/module_utils/facts/other/__init__.py b/test/units/module_utils/facts/other/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/module_utils/facts/other/__init__.py
diff --git a/test/units/module_utils/facts/other/test_facter.py b/test/units/module_utils/facts/other/test_facter.py
new file mode 100644
index 00000000..7466338e
--- /dev/null
+++ b/test/units/module_utils/facts/other/test_facter.py
@@ -0,0 +1,228 @@
+# unit tests for ansible other facter fact collector
+# -*- coding: utf-8 -*-
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat.mock import Mock, patch
+
+from .. base import BaseFactsTest
+
+from ansible.module_utils.facts.other.facter import FacterFactCollector
+
+facter_json_output = '''
+{
+ "operatingsystemmajrelease": "25",
+ "hardwareisa": "x86_64",
+ "kernel": "Linux",
+ "path": "/home/testuser/src/ansible/bin:/home/testuser/perl5/bin:/home/testuser/perl5/bin:/home/testuser/bin:/home/testuser/.local/bin:/home/testuser/pythons/bin:/usr/lib64/qt-3.3/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/home/testuser/.cabal/bin:/home/testuser/gopath/bin:/home/testuser/.rvm/bin",
+ "memorysize": "15.36 GB",
+ "memoryfree": "4.88 GB",
+ "swapsize": "7.70 GB",
+ "swapfree": "6.75 GB",
+ "swapsize_mb": "7880.00",
+ "swapfree_mb": "6911.41",
+ "memorysize_mb": "15732.95",
+ "memoryfree_mb": "4997.68",
+ "lsbmajdistrelease": "25",
+ "macaddress": "02:42:ea:15:d8:84",
+ "id": "testuser",
+ "domain": "example.com",
+ "augeasversion": "1.7.0",
+ "os": {
+ "name": "Fedora",
+ "family": "RedHat",
+ "release": {
+ "major": "25",
+ "full": "25"
+ },
+ "lsb": {
+ "distcodename": "TwentyFive",
+ "distid": "Fedora",
+ "distdescription": "Fedora release 25 (Twenty Five)",
+ "release": ":core-4.1-amd64:core-4.1-noarch:cxx-4.1-amd64:cxx-4.1-noarch:desktop-4.1-amd64:desktop-4.1-noarch:languages-4.1-amd64:languages-4.1-noarch:printing-4.1-amd64:printing-4.1-noarch",
+ "distrelease": "25",
+ "majdistrelease": "25"
+ }
+ },
+ "processors": {
+ "models": [
+ "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
+ "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
+ "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
+ "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
+ "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
+ "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
+ "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
+ "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz"
+ ],
+ "count": 8,
+ "physicalcount": 1
+ },
+ "architecture": "x86_64",
+ "hardwaremodel": "x86_64",
+ "operatingsystem": "Fedora",
+ "processor0": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
+ "processor1": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
+ "processor2": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
+ "processor3": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
+ "processor4": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
+ "processor5": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
+ "processor6": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
+ "processor7": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
+ "processorcount": 8,
+ "uptime_seconds": 1558090,
+ "fqdn": "myhostname.example.com",
+ "rubyversion": "2.3.3",
+ "gid": "testuser",
+ "physicalprocessorcount": 1,
+ "netmask": "255.255.0.0",
+ "uniqueid": "a8c01301",
+ "uptime_days": 18,
+ "interfaces": "docker0,em1,lo,vethf20ff12,virbr0,virbr1,virbr0_nic,virbr1_nic,wlp4s0",
+ "ipaddress_docker0": "172.17.0.1",
+ "macaddress_docker0": "02:42:ea:15:d8:84",
+ "netmask_docker0": "255.255.0.0",
+ "mtu_docker0": 1500,
+ "macaddress_em1": "3c:97:0e:e9:28:8e",
+ "mtu_em1": 1500,
+ "ipaddress_lo": "127.0.0.1",
+ "netmask_lo": "255.0.0.0",
+ "mtu_lo": 65536,
+ "macaddress_vethf20ff12": "ae:6e:2b:1e:a1:31",
+ "mtu_vethf20ff12": 1500,
+ "ipaddress_virbr0": "192.168.137.1",
+ "macaddress_virbr0": "52:54:00:ce:82:5e",
+ "netmask_virbr0": "255.255.255.0",
+ "mtu_virbr0": 1500,
+ "ipaddress_virbr1": "192.168.121.1",
+ "macaddress_virbr1": "52:54:00:b4:68:a9",
+ "netmask_virbr1": "255.255.255.0",
+ "mtu_virbr1": 1500,
+ "macaddress_virbr0_nic": "52:54:00:ce:82:5e",
+ "mtu_virbr0_nic": 1500,
+ "macaddress_virbr1_nic": "52:54:00:b4:68:a9",
+ "mtu_virbr1_nic": 1500,
+ "ipaddress_wlp4s0": "192.168.1.19",
+ "macaddress_wlp4s0": "5c:51:4f:e6:a8:e3",
+ "netmask_wlp4s0": "255.255.255.0",
+ "mtu_wlp4s0": 1500,
+ "virtual": "physical",
+ "is_virtual": false,
+ "partitions": {
+ "sda2": {
+ "size": "499091456"
+ },
+ "sda1": {
+ "uuid": "32caaec3-ef40-4691-a3b6-438c3f9bc1c0",
+ "size": "1024000",
+ "mount": "/boot"
+ }
+ },
+ "lsbdistcodename": "TwentyFive",
+ "lsbrelease": ":core-4.1-amd64:core-4.1-noarch:cxx-4.1-amd64:cxx-4.1-noarch:desktop-4.1-amd64:desktop-4.1-noarch:languages-4.1-amd64:languages-4.1-noarch:printing-4.1-amd64:printing-4.1-noarch", # noqa
+ "filesystems": "btrfs,ext2,ext3,ext4,xfs",
+ "system_uptime": {
+ "seconds": 1558090,
+ "hours": 432,
+ "days": 18,
+ "uptime": "18 days"
+ },
+ "ipaddress": "172.17.0.1",
+ "timezone": "EDT",
+ "ps": "ps -ef",
+ "rubyplatform": "x86_64-linux",
+ "rubysitedir": "/usr/local/share/ruby/site_ruby",
+ "uptime": "18 days",
+ "lsbdistrelease": "25",
+ "operatingsystemrelease": "25",
+ "facterversion": "2.4.3",
+ "kernelrelease": "4.9.14-200.fc25.x86_64",
+ "lsbdistdescription": "Fedora release 25 (Twenty Five)",
+ "network_docker0": "172.17.0.0",
+ "network_lo": "127.0.0.0",
+ "network_virbr0": "192.168.137.0",
+ "network_virbr1": "192.168.121.0",
+ "network_wlp4s0": "192.168.1.0",
+ "lsbdistid": "Fedora",
+ "selinux": true,
+ "selinux_enforced": false,
+ "selinux_policyversion": "30",
+ "selinux_current_mode": "permissive",
+ "selinux_config_mode": "permissive",
+ "selinux_config_policy": "targeted",
+ "hostname": "myhostname",
+ "osfamily": "RedHat",
+ "kernelmajversion": "4.9",
+ "blockdevice_sr0_size": 1073741312,
+ "blockdevice_sr0_vendor": "MATSHITA",
+ "blockdevice_sr0_model": "DVD-RAM UJ8E2",
+ "blockdevice_sda_size": 256060514304,
+ "blockdevice_sda_vendor": "ATA",
+ "blockdevice_sda_model": "SAMSUNG MZ7TD256",
+ "blockdevices": "sda,sr0",
+ "uptime_hours": 432,
+ "kernelversion": "4.9.14"
+}
+'''
+
+
+class TestFacterCollector(BaseFactsTest):
+ __test__ = True
+ gather_subset = ['!all', 'facter']
+ valid_subsets = ['facter']
+ fact_namespace = 'ansible_facter'
+ collector_class = FacterFactCollector
+
+ def _mock_module(self):
+ mock_module = Mock()
+ mock_module.params = {'gather_subset': self.gather_subset,
+ 'gather_timeout': 10,
+ 'filter': '*'}
+ mock_module.get_bin_path = Mock(return_value='/not/actually/facter')
+ mock_module.run_command = Mock(return_value=(0, facter_json_output, ''))
+ return mock_module
+
+ @patch('ansible.module_utils.facts.other.facter.FacterFactCollector.get_facter_output')
+ def test_bogus_json(self, mock_get_facter_output):
+ module = self._mock_module()
+
+ # bogus json
+ mock_get_facter_output.return_value = '{'
+
+ fact_collector = self.collector_class()
+ facts_dict = fact_collector.collect(module=module)
+
+ self.assertIsInstance(facts_dict, dict)
+ self.assertEqual(facts_dict, {})
+
+ @patch('ansible.module_utils.facts.other.facter.FacterFactCollector.run_facter')
+ def test_facter_non_zero_return_code(self, mock_run_facter):
+ module = self._mock_module()
+
+ # bogus json
+ mock_run_facter.return_value = (1, '{}', '')
+
+ fact_collector = self.collector_class()
+ facts_dict = fact_collector.collect(module=module)
+
+ self.assertIsInstance(facts_dict, dict)
+
+ # This assumes no 'facter' entry at all is correct
+ self.assertNotIn('facter', facts_dict)
+ self.assertEqual(facts_dict, {})
diff --git a/test/units/module_utils/facts/other/test_ohai.py b/test/units/module_utils/facts/other/test_ohai.py
new file mode 100644
index 00000000..42a72d97
--- /dev/null
+++ b/test/units/module_utils/facts/other/test_ohai.py
@@ -0,0 +1,6768 @@
+# unit tests for ansible ohai fact collector
+# -*- coding: utf-8 -*-
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat.mock import Mock, patch
+
+from .. base import BaseFactsTest
+
+from ansible.module_utils.facts.other.ohai import OhaiFactCollector
+
+ohai_json_output = r'''
+{
+ "kernel": {
+ "name": "Linux",
+ "release": "4.9.14-200.fc25.x86_64",
+ "version": "#1 SMP Mon Mar 13 19:26:40 UTC 2017",
+ "machine": "x86_64",
+ "processor": "x86_64",
+ "os": "GNU/Linux",
+ "modules": {
+ "binfmt_misc": {
+ "size": "20480",
+ "refcount": "1"
+ },
+ "veth": {
+ "size": "16384",
+ "refcount": "0"
+ },
+ "xfs": {
+ "size": "1200128",
+ "refcount": "1"
+ },
+ "xt_addrtype": {
+ "size": "16384",
+ "refcount": "2"
+ },
+ "br_netfilter": {
+ "size": "24576",
+ "refcount": "0"
+ },
+ "dm_thin_pool": {
+ "size": "65536",
+ "refcount": "2"
+ },
+ "dm_persistent_data": {
+ "size": "69632",
+ "refcount": "1"
+ },
+ "dm_bio_prison": {
+ "size": "16384",
+ "refcount": "1"
+ },
+ "libcrc32c": {
+ "size": "16384",
+ "refcount": "2"
+ },
+ "rfcomm": {
+ "size": "77824",
+ "refcount": "14",
+ "version": "1.11"
+ },
+ "fuse": {
+ "size": "102400",
+ "refcount": "3"
+ },
+ "ccm": {
+ "size": "20480",
+ "refcount": "2"
+ },
+ "xt_CHECKSUM": {
+ "size": "16384",
+ "refcount": "2"
+ },
+ "iptable_mangle": {
+ "size": "16384",
+ "refcount": "1"
+ },
+ "ipt_MASQUERADE": {
+ "size": "16384",
+ "refcount": "7"
+ },
+ "nf_nat_masquerade_ipv4": {
+ "size": "16384",
+ "refcount": "1"
+ },
+ "iptable_nat": {
+ "size": "16384",
+ "refcount": "1"
+ },
+ "nf_nat_ipv4": {
+ "size": "16384",
+ "refcount": "1"
+ },
+ "nf_nat": {
+ "size": "28672",
+ "refcount": "2"
+ },
+ "nf_conntrack_ipv4": {
+ "size": "16384",
+ "refcount": "4"
+ },
+ "nf_defrag_ipv4": {
+ "size": "16384",
+ "refcount": "1"
+ },
+ "xt_conntrack": {
+ "size": "16384",
+ "refcount": "3"
+ },
+ "nf_conntrack": {
+ "size": "106496",
+ "refcount": "5"
+ },
+ "ip6t_REJECT": {
+ "size": "16384",
+ "refcount": "2"
+ },
+ "nf_reject_ipv6": {
+ "size": "16384",
+ "refcount": "1"
+ },
+ "tun": {
+ "size": "28672",
+ "refcount": "4"
+ },
+ "bridge": {
+ "size": "135168",
+ "refcount": "1",
+ "version": "2.3"
+ },
+ "stp": {
+ "size": "16384",
+ "refcount": "1"
+ },
+ "llc": {
+ "size": "16384",
+ "refcount": "2"
+ },
+ "ebtable_filter": {
+ "size": "16384",
+ "refcount": "0"
+ },
+ "ebtables": {
+ "size": "36864",
+ "refcount": "1"
+ },
+ "ip6table_filter": {
+ "size": "16384",
+ "refcount": "1"
+ },
+ "ip6_tables": {
+ "size": "28672",
+ "refcount": "1"
+ },
+ "cmac": {
+ "size": "16384",
+ "refcount": "3"
+ },
+ "uhid": {
+ "size": "20480",
+ "refcount": "2"
+ },
+ "bnep": {
+ "size": "20480",
+ "refcount": "2",
+ "version": "1.3"
+ },
+ "btrfs": {
+ "size": "1056768",
+ "refcount": "1"
+ },
+ "xor": {
+ "size": "24576",
+ "refcount": "1"
+ },
+ "raid6_pq": {
+ "size": "106496",
+ "refcount": "1"
+ },
+ "loop": {
+ "size": "28672",
+ "refcount": "6"
+ },
+ "arc4": {
+ "size": "16384",
+ "refcount": "2"
+ },
+ "snd_hda_codec_hdmi": {
+ "size": "45056",
+ "refcount": "1"
+ },
+ "intel_rapl": {
+ "size": "20480",
+ "refcount": "0"
+ },
+ "x86_pkg_temp_thermal": {
+ "size": "16384",
+ "refcount": "0"
+ },
+ "intel_powerclamp": {
+ "size": "16384",
+ "refcount": "0"
+ },
+ "coretemp": {
+ "size": "16384",
+ "refcount": "0"
+ },
+ "kvm_intel": {
+ "size": "192512",
+ "refcount": "0"
+ },
+ "kvm": {
+ "size": "585728",
+ "refcount": "1"
+ },
+ "irqbypass": {
+ "size": "16384",
+ "refcount": "1"
+ },
+ "crct10dif_pclmul": {
+ "size": "16384",
+ "refcount": "0"
+ },
+ "crc32_pclmul": {
+ "size": "16384",
+ "refcount": "0"
+ },
+ "iTCO_wdt": {
+ "size": "16384",
+ "refcount": "0",
+ "version": "1.11"
+ },
+ "ghash_clmulni_intel": {
+ "size": "16384",
+ "refcount": "0"
+ },
+ "mei_wdt": {
+ "size": "16384",
+ "refcount": "0"
+ },
+ "iTCO_vendor_support": {
+ "size": "16384",
+ "refcount": "1",
+ "version": "1.04"
+ },
+ "iwlmvm": {
+ "size": "364544",
+ "refcount": "0"
+ },
+ "intel_cstate": {
+ "size": "16384",
+ "refcount": "0"
+ },
+ "uvcvideo": {
+ "size": "90112",
+ "refcount": "0",
+ "version": "1.1.1"
+ },
+ "videobuf2_vmalloc": {
+ "size": "16384",
+ "refcount": "1"
+ },
+ "intel_uncore": {
+ "size": "118784",
+ "refcount": "0"
+ },
+ "videobuf2_memops": {
+ "size": "16384",
+ "refcount": "1"
+ },
+ "videobuf2_v4l2": {
+ "size": "24576",
+ "refcount": "1"
+ },
+ "videobuf2_core": {
+ "size": "40960",
+ "refcount": "2"
+ },
+ "intel_rapl_perf": {
+ "size": "16384",
+ "refcount": "0"
+ },
+ "mac80211": {
+ "size": "749568",
+ "refcount": "1"
+ },
+ "videodev": {
+ "size": "172032",
+ "refcount": "3"
+ },
+ "snd_usb_audio": {
+ "size": "180224",
+ "refcount": "3"
+ },
+ "e1000e": {
+ "size": "249856",
+ "refcount": "0",
+ "version": "3.2.6-k"
+ }
+ }
+ },
+ "os": "linux",
+ "os_version": "4.9.14-200.fc25.x86_64",
+ "lsb": {
+ "id": "Fedora",
+ "description": "Fedora release 25 (Twenty Five)",
+ "release": "25",
+ "codename": "TwentyFive"
+ },
+ "platform": "fedora",
+ "platform_version": "25",
+ "platform_family": "fedora",
+ "packages": {
+ "ansible": {
+ "epoch": "0",
+ "version": "2.2.1.0",
+ "release": "1.fc25",
+ "installdate": "1486050042",
+ "arch": "noarch"
+ },
+ "python3": {
+ "epoch": "0",
+ "version": "3.5.3",
+ "release": "3.fc25",
+ "installdate": "1490025957",
+ "arch": "x86_64"
+ },
+ "kernel": {
+ "epoch": "0",
+ "version": "4.9.6",
+ "release": "200.fc25",
+ "installdate": "1486047522",
+ "arch": "x86_64"
+ },
+ "glibc": {
+ "epoch": "0",
+ "version": "2.24",
+ "release": "4.fc25",
+ "installdate": "1483402427",
+ "arch": "x86_64"
+ }
+ },
+ "chef_packages": {
+ ohai": {
+ "version": "13.0.0",
+ "ohai_root": "/home/some_user/.gem/ruby/gems/ohai-13.0.0/lib/ohai"
+ }
+ },
+ "dmi": {
+ "dmidecode_version": "3.0"
+ },
+ "uptime_seconds": 2509008,
+ "uptime": "29 days 00 hours 56 minutes 48 seconds",
+ "idletime_seconds": 19455087,
+ "idletime": "225 days 04 hours 11 minutes 27 seconds",
+ "memory": {
+ "swap": {
+ "cached": "262436kB",
+ "total": "8069116kB",
+ "free": "5154396kB"
+ },
+ "hugepages": {
+ "total": "0",
+ "free": "0",
+ "reserved": "0",
+ "surplus": "0"
+ },
+ "total": "16110540kB",
+ "free": "3825844kB",
+ "buffers": "377240kB",
+ "cached": "3710084kB",
+ "active": "8104320kB",
+ "inactive": "3192920kB",
+ "dirty": "812kB",
+ "writeback": "0kB",
+ "anon_pages": "7124992kB",
+ "mapped": "580700kB",
+ "slab": "622848kB",
+ "slab_reclaimable": "307300kB",
+ "slab_unreclaim": "315548kB",
+ "page_tables": "157572kB",
+ "nfs_unstable": "0kB",
+ "bounce": "0kB",
+ "commit_limit": "16124384kB",
+ "committed_as": "31345068kB",
+ "vmalloc_total": "34359738367kB",
+ "vmalloc_used": "0kB",
+ "vmalloc_chunk": "0kB",
+ "hugepage_size": "2048kB"
+ },
+ "filesystem": {
+ "by_device": {
+ "devtmpfs": {
+ "kb_size": "8044124",
+ "kb_used": "0",
+ "kb_available": "8044124",
+ "percent_used": "0%",
+ "total_inodes": "2011031",
+ "inodes_used": "629",
+ "inodes_available": "2010402",
+ "inodes_percent_used": "1%",
+ "fs_type": "devtmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "seclabel",
+ "size=8044124k",
+ "nr_inodes=2011031",
+ "mode=755"
+ ],
+ "mounts": [
+ "/dev"
+ ]
+ },
+ "tmpfs": {
+ "kb_size": "1611052",
+ "kb_used": "72",
+ "kb_available": "1610980",
+ "percent_used": "1%",
+ "total_inodes": "2013817",
+ "inodes_used": "36",
+ "inodes_available": "2013781",
+ "inodes_percent_used": "1%",
+ "fs_type": "tmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "relatime",
+ "seclabel",
+ "size=1611052k",
+ "mode=700",
+ "uid=1000",
+ "gid=1000"
+ ],
+ "mounts": [
+ "/dev/shm",
+ "/run",
+ "/sys/fs/cgroup",
+ "/tmp",
+ "/run/user/0",
+ "/run/user/1000"
+ ]
+ },
+ "/dev/mapper/fedora_host--186-root": {
+ "kb_size": "51475068",
+ "kb_used": "42551284",
+ "kb_available": "6285960",
+ "percent_used": "88%",
+ "total_inodes": "3276800",
+ "inodes_used": "532908",
+ "inodes_available": "2743892",
+ "inodes_percent_used": "17%",
+ "fs_type": "ext4",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel",
+ "data=ordered"
+ ],
+ "uuid": "12312331-3449-4a6c-8179-a1feb2bca6ce",
+ "mounts": [
+ "/",
+ "/var/lib/docker/devicemapper"
+ ]
+ },
+ "/dev/sda1": {
+ "kb_size": "487652",
+ "kb_used": "126628",
+ "kb_available": "331328",
+ "percent_used": "28%",
+ "total_inodes": "128016",
+ "inodes_used": "405",
+ "inodes_available": "127611",
+ "inodes_percent_used": "1%",
+ "fs_type": "ext4",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel",
+ "data=ordered"
+ ],
+ "uuid": "12312311-ef40-4691-a3b6-438c3f9bc1c0",
+ "mounts": [
+ "/boot"
+ ]
+ },
+ "/dev/mapper/fedora_host--186-home": {
+ "kb_size": "185948124",
+ "kb_used": "105904724",
+ "kb_available": "70574680",
+ "percent_used": "61%",
+ "total_inodes": "11821056",
+ "inodes_used": "1266687",
+ "inodes_available": "10554369",
+ "inodes_percent_used": "11%",
+ "fs_type": "ext4",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel",
+ "data=ordered"
+ ],
+ "uuid": "2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d",
+ "mounts": [
+ "/home"
+ ]
+ },
+ "/dev/loop0": {
+ "kb_size": "512000",
+ "kb_used": "16672",
+ "kb_available": "429056",
+ "percent_used": "4%",
+ "fs_type": "btrfs",
+ "uuid": "0f031512-ab15-497d-9abd-3a512b4a9390",
+ "mounts": [
+ "/var/lib/machines"
+ ]
+ },
+ "sysfs": {
+ "fs_type": "sysfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "seclabel"
+ ],
+ "mounts": [
+ "/sys"
+ ]
+ },
+ "proc": {
+ "fs_type": "proc",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime"
+ ],
+ "mounts": [
+ "/proc"
+ ]
+ },
+ "securityfs": {
+ "fs_type": "securityfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime"
+ ],
+ "mounts": [
+ "/sys/kernel/security"
+ ]
+ },
+ "devpts": {
+ "fs_type": "devpts",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "noexec",
+ "relatime",
+ "seclabel",
+ "gid=5",
+ "mode=620",
+ "ptmxmode=000"
+ ],
+ "mounts": [
+ "/dev/pts"
+ ]
+ },
+ "cgroup": {
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "net_cls",
+ "net_prio"
+ ],
+ "mounts": [
+ "/sys/fs/cgroup/systemd",
+ "/sys/fs/cgroup/devices",
+ "/sys/fs/cgroup/cpuset",
+ "/sys/fs/cgroup/perf_event",
+ "/sys/fs/cgroup/hugetlb",
+ "/sys/fs/cgroup/cpu,cpuacct",
+ "/sys/fs/cgroup/blkio",
+ "/sys/fs/cgroup/freezer",
+ "/sys/fs/cgroup/memory",
+ "/sys/fs/cgroup/pids",
+ "/sys/fs/cgroup/net_cls,net_prio"
+ ]
+ },
+ "pstore": {
+ "fs_type": "pstore",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "seclabel"
+ ],
+ "mounts": [
+ "/sys/fs/pstore"
+ ]
+ },
+ "configfs": {
+ "fs_type": "configfs",
+ "mount_options": [
+ "rw",
+ "relatime"
+ ],
+ "mounts": [
+ "/sys/kernel/config"
+ ]
+ },
+ "selinuxfs": {
+ "fs_type": "selinuxfs",
+ "mount_options": [
+ "rw",
+ "relatime"
+ ],
+ "mounts": [
+ "/sys/fs/selinux"
+ ]
+ },
+ "debugfs": {
+ "fs_type": "debugfs",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel"
+ ],
+ "mounts": [
+ "/sys/kernel/debug"
+ ]
+ },
+ "hugetlbfs": {
+ "fs_type": "hugetlbfs",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel"
+ ],
+ "mounts": [
+ "/dev/hugepages"
+ ]
+ },
+ "mqueue": {
+ "fs_type": "mqueue",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel"
+ ],
+ "mounts": [
+ "/dev/mqueue"
+ ]
+ },
+ "systemd-1": {
+ "fs_type": "autofs",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "fd=40",
+ "pgrp=1",
+ "timeout=0",
+ "minproto=5",
+ "maxproto=5",
+ "direct",
+ "pipe_ino=17610"
+ ],
+ "mounts": [
+ "/proc/sys/fs/binfmt_misc"
+ ]
+ },
+ "/var/lib/machines.raw": {
+ "fs_type": "btrfs",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel",
+ "space_cache",
+ "subvolid=5",
+ "subvol=/"
+ ],
+ "mounts": [
+ "/var/lib/machines"
+ ]
+ },
+ "fusectl": {
+ "fs_type": "fusectl",
+ "mount_options": [
+ "rw",
+ "relatime"
+ ],
+ "mounts": [
+ "/sys/fs/fuse/connections"
+ ]
+ },
+ "gvfsd-fuse": {
+ "fs_type": "fuse.gvfsd-fuse",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "relatime",
+ "user_id=1000",
+ "group_id=1000"
+ ],
+ "mounts": [
+ "/run/user/1000/gvfs"
+ ]
+ },
+ "binfmt_misc": {
+ "fs_type": "binfmt_misc",
+ "mount_options": [
+ "rw",
+ "relatime"
+ ],
+ "mounts": [
+ "/proc/sys/fs/binfmt_misc"
+ ]
+ },
+ "/dev/mapper/docker-253:1-1180487-0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8": {
+ "fs_type": "xfs",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "context=\"system_u:object_r:container_file_t:s0:c523",
+ "c681\"",
+ "nouuid",
+ "attr2",
+ "inode64",
+ "logbsize=64k",
+ "sunit=128",
+ "swidth=128",
+ "noquota"
+ ],
+ "uuid": "00e2aa25-20d8-4ad7-b3a5-c501f2f4c123",
+ "mounts": [
+ "/var/lib/docker/devicemapper/mnt/0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8"
+ ]
+ },
+ "shm": {
+ "fs_type": "tmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "context=\"system_u:object_r:container_file_t:s0:c523",
+ "c681\"",
+ "size=65536k"
+ ],
+ "mounts": [
+ "/var/lib/docker/containers/426e513ed508a451e3f70440eed040761f81529e4bc4240e7522d331f3f3bc12/shm"
+ ]
+ },
+ "nsfs": {
+ "fs_type": "nsfs",
+ "mount_options": [
+ "rw"
+ ],
+ "mounts": [
+ "/run/docker/netns/1ce89fd79f3d"
+ ]
+ },
+ "tracefs": {
+ "fs_type": "tracefs",
+ "mount_options": [
+ "rw",
+ "relatime"
+ ],
+ "mounts": [
+ "/sys/kernel/debug/tracing"
+ ]
+ },
+ "/dev/loop1": {
+ "fs_type": "xfs",
+ "uuid": "00e2aa25-20d8-4ad7-b3a5-c501f2f4c123",
+ "mounts": [
+
+ ]
+ },
+ "/dev/mapper/docker-253:1-1180487-pool": {
+ "mounts": [
+
+ ]
+ },
+ "/dev/sr0": {
+ "mounts": [
+
+ ]
+ },
+ "/dev/loop2": {
+ "mounts": [
+
+ ]
+ },
+ "/dev/sda": {
+ "mounts": [
+
+ ]
+ },
+ "/dev/sda2": {
+ "fs_type": "LVM2_member",
+ "uuid": "66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK",
+ "mounts": [
+
+ ]
+ },
+ "/dev/mapper/fedora_host--186-swap": {
+ "fs_type": "swap",
+ "uuid": "eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d",
+ "mounts": [
+
+ ]
+ }
+ },
+ "by_mountpoint": {
+ "/dev": {
+ "kb_size": "8044124",
+ "kb_used": "0",
+ "kb_available": "8044124",
+ "percent_used": "0%",
+ "total_inodes": "2011031",
+ "inodes_used": "629",
+ "inodes_available": "2010402",
+ "inodes_percent_used": "1%",
+ "fs_type": "devtmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "seclabel",
+ "size=8044124k",
+ "nr_inodes=2011031",
+ "mode=755"
+ ],
+ "devices": [
+ "devtmpfs"
+ ]
+ },
+ "/dev/shm": {
+ "kb_size": "8055268",
+ "kb_used": "96036",
+ "kb_available": "7959232",
+ "percent_used": "2%",
+ "total_inodes": "2013817",
+ "inodes_used": "217",
+ "inodes_available": "2013600",
+ "inodes_percent_used": "1%",
+ "fs_type": "tmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "seclabel"
+ ],
+ "devices": [
+ "tmpfs"
+ ]
+ },
+ "/run": {
+ "kb_size": "8055268",
+ "kb_used": "2280",
+ "kb_available": "8052988",
+ "percent_used": "1%",
+ "total_inodes": "2013817",
+ "inodes_used": "1070",
+ "inodes_available": "2012747",
+ "inodes_percent_used": "1%",
+ "fs_type": "tmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "seclabel",
+ "mode=755"
+ ],
+ "devices": [
+ "tmpfs"
+ ]
+ },
+ "/sys/fs/cgroup": {
+ "kb_size": "8055268",
+ "kb_used": "0",
+ "kb_available": "8055268",
+ "percent_used": "0%",
+ "total_inodes": "2013817",
+ "inodes_used": "16",
+ "inodes_available": "2013801",
+ "inodes_percent_used": "1%",
+ "fs_type": "tmpfs",
+ "mount_options": [
+ "ro",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "seclabel",
+ "mode=755"
+ ],
+ "devices": [
+ "tmpfs"
+ ]
+ },
+ "/": {
+ "kb_size": "51475068",
+ "kb_used": "42551284",
+ "kb_available": "6285960",
+ "percent_used": "88%",
+ "total_inodes": "3276800",
+ "inodes_used": "532908",
+ "inodes_available": "2743892",
+ "inodes_percent_used": "17%",
+ "fs_type": "ext4",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel",
+ "data=ordered"
+ ],
+ "uuid": "d34cf5e3-3449-4a6c-8179-a1feb2bca6ce",
+ "devices": [
+ "/dev/mapper/fedora_host--186-root"
+ ]
+ },
+ "/tmp": {
+ "kb_size": "8055268",
+ "kb_used": "848396",
+ "kb_available": "7206872",
+ "percent_used": "11%",
+ "total_inodes": "2013817",
+ "inodes_used": "1353",
+ "inodes_available": "2012464",
+ "inodes_percent_used": "1%",
+ "fs_type": "tmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "seclabel"
+ ],
+ "devices": [
+ "tmpfs"
+ ]
+ },
+ "/boot": {
+ "kb_size": "487652",
+ "kb_used": "126628",
+ "kb_available": "331328",
+ "percent_used": "28%",
+ "total_inodes": "128016",
+ "inodes_used": "405",
+ "inodes_available": "127611",
+ "inodes_percent_used": "1%",
+ "fs_type": "ext4",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel",
+ "data=ordered"
+ ],
+ "uuid": "32caaec3-ef40-4691-a3b6-438c3f9bc1c0",
+ "devices": [
+ "/dev/sda1"
+ ]
+ },
+ "/home": {
+ "kb_size": "185948124",
+ "kb_used": "105904724",
+ "kb_available": "70574680",
+ "percent_used": "61%",
+ "total_inodes": "11821056",
+ "inodes_used": "1266687",
+ "inodes_available": "10554369",
+ "inodes_percent_used": "11%",
+ "fs_type": "ext4",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel",
+ "data=ordered"
+ ],
+ "uuid": "2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d",
+ "devices": [
+ "/dev/mapper/fedora_host--186-home"
+ ]
+ },
+ "/var/lib/machines": {
+ "kb_size": "512000",
+ "kb_used": "16672",
+ "kb_available": "429056",
+ "percent_used": "4%",
+ "fs_type": "btrfs",
+ "uuid": "0f031512-ab15-497d-9abd-3a512b4a9390",
+ "devices": [
+ "/dev/loop0",
+ "/var/lib/machines.raw"
+ ],
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel",
+ "space_cache",
+ "subvolid=5",
+ "subvol=/"
+ ]
+ },
+ "/run/user/0": {
+ "kb_size": "1611052",
+ "kb_used": "0",
+ "kb_available": "1611052",
+ "percent_used": "0%",
+ "total_inodes": "2013817",
+ "inodes_used": "7",
+ "inodes_available": "2013810",
+ "inodes_percent_used": "1%",
+ "fs_type": "tmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "relatime",
+ "seclabel",
+ "size=1611052k",
+ "mode=700"
+ ],
+ "devices": [
+ "tmpfs"
+ ]
+ },
+ "/run/user/1000": {
+ "kb_size": "1611052",
+ "kb_used": "72",
+ "kb_available": "1610980",
+ "percent_used": "1%",
+ "total_inodes": "2013817",
+ "inodes_used": "36",
+ "inodes_available": "2013781",
+ "inodes_percent_used": "1%",
+ "fs_type": "tmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "relatime",
+ "seclabel",
+ "size=1611052k",
+ "mode=700",
+ "uid=1000",
+ "gid=1000"
+ ],
+ "devices": [
+ "tmpfs"
+ ]
+ },
+ "/sys": {
+ "fs_type": "sysfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "seclabel"
+ ],
+ "devices": [
+ "sysfs"
+ ]
+ },
+ "/proc": {
+ "fs_type": "proc",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime"
+ ],
+ "devices": [
+ "proc"
+ ]
+ },
+ "/sys/kernel/security": {
+ "fs_type": "securityfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime"
+ ],
+ "devices": [
+ "securityfs"
+ ]
+ },
+ "/dev/pts": {
+ "fs_type": "devpts",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "noexec",
+ "relatime",
+ "seclabel",
+ "gid=5",
+ "mode=620",
+ "ptmxmode=000"
+ ],
+ "devices": [
+ "devpts"
+ ]
+ },
+ "/sys/fs/cgroup/systemd": {
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "xattr",
+ "release_agent=/usr/lib/systemd/systemd-cgroups-agent",
+ "name=systemd"
+ ],
+ "devices": [
+ "cgroup"
+ ]
+ },
+ "/sys/fs/pstore": {
+ "fs_type": "pstore",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "seclabel"
+ ],
+ "devices": [
+ "pstore"
+ ]
+ },
+ "/sys/fs/cgroup/devices": {
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "devices"
+ ],
+ "devices": [
+ "cgroup"
+ ]
+ },
+ "/sys/fs/cgroup/cpuset": {
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "cpuset"
+ ],
+ "devices": [
+ "cgroup"
+ ]
+ },
+ "/sys/fs/cgroup/perf_event": {
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "perf_event"
+ ],
+ "devices": [
+ "cgroup"
+ ]
+ },
+ "/sys/fs/cgroup/hugetlb": {
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "hugetlb"
+ ],
+ "devices": [
+ "cgroup"
+ ]
+ },
+ "/sys/fs/cgroup/cpu,cpuacct": {
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "cpu",
+ "cpuacct"
+ ],
+ "devices": [
+ "cgroup"
+ ]
+ },
+ "/sys/fs/cgroup/blkio": {
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "blkio"
+ ],
+ "devices": [
+ "cgroup"
+ ]
+ },
+ "/sys/fs/cgroup/freezer": {
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "freezer"
+ ],
+ "devices": [
+ "cgroup"
+ ]
+ },
+ "/sys/fs/cgroup/memory": {
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "memory"
+ ],
+ "devices": [
+ "cgroup"
+ ]
+ },
+ "/sys/fs/cgroup/pids": {
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "pids"
+ ],
+ "devices": [
+ "cgroup"
+ ]
+ },
+ "/sys/fs/cgroup/net_cls,net_prio": {
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "net_cls",
+ "net_prio"
+ ],
+ "devices": [
+ "cgroup"
+ ]
+ },
+ "/sys/kernel/config": {
+ "fs_type": "configfs",
+ "mount_options": [
+ "rw",
+ "relatime"
+ ],
+ "devices": [
+ "configfs"
+ ]
+ },
+ "/sys/fs/selinux": {
+ "fs_type": "selinuxfs",
+ "mount_options": [
+ "rw",
+ "relatime"
+ ],
+ "devices": [
+ "selinuxfs"
+ ]
+ },
+ "/sys/kernel/debug": {
+ "fs_type": "debugfs",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel"
+ ],
+ "devices": [
+ "debugfs"
+ ]
+ },
+ "/dev/hugepages": {
+ "fs_type": "hugetlbfs",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel"
+ ],
+ "devices": [
+ "hugetlbfs"
+ ]
+ },
+ "/dev/mqueue": {
+ "fs_type": "mqueue",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel"
+ ],
+ "devices": [
+ "mqueue"
+ ]
+ },
+ "/proc/sys/fs/binfmt_misc": {
+ "fs_type": "binfmt_misc",
+ "mount_options": [
+ "rw",
+ "relatime"
+ ],
+ "devices": [
+ "systemd-1",
+ "binfmt_misc"
+ ]
+ },
+ "/sys/fs/fuse/connections": {
+ "fs_type": "fusectl",
+ "mount_options": [
+ "rw",
+ "relatime"
+ ],
+ "devices": [
+ "fusectl"
+ ]
+ },
+ "/run/user/1000/gvfs": {
+ "fs_type": "fuse.gvfsd-fuse",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "relatime",
+ "user_id=1000",
+ "group_id=1000"
+ ],
+ "devices": [
+ "gvfsd-fuse"
+ ]
+ },
+ "/var/lib/docker/devicemapper": {
+ "fs_type": "ext4",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel",
+ "data=ordered"
+ ],
+ "uuid": "d34cf5e3-3449-4a6c-8179-a1feb2bca6ce",
+ "devices": [
+ "/dev/mapper/fedora_host--186-root"
+ ]
+ },
+ "/var/lib/docker/devicemapper/mnt/0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8": {
+ "fs_type": "xfs",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "context=\"system_u:object_r:container_file_t:s0:c523",
+ "c681\"",
+ "nouuid",
+ "attr2",
+ "inode64",
+ "logbsize=64k",
+ "sunit=128",
+ "swidth=128",
+ "noquota"
+ ],
+ "uuid": "00e2aa25-20d8-4ad7-b3a5-c501f2f4c123",
+ "devices": [
+ "/dev/mapper/docker-253:1-1180487-0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8"
+ ]
+ },
+ "/var/lib/docker/containers/426e513ed508a451e3f70440eed040761f81529e4bc4240e7522d331f3f3bc12/shm": {
+ "fs_type": "tmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "context=\"system_u:object_r:container_file_t:s0:c523",
+ "c681\"",
+ "size=65536k"
+ ],
+ "devices": [
+ "shm"
+ ]
+ },
+ "/run/docker/netns/1ce89fd79f3d": {
+ "fs_type": "nsfs",
+ "mount_options": [
+ "rw"
+ ],
+ "devices": [
+ "nsfs"
+ ]
+ },
+ "/sys/kernel/debug/tracing": {
+ "fs_type": "tracefs",
+ "mount_options": [
+ "rw",
+ "relatime"
+ ],
+ "devices": [
+ "tracefs"
+ ]
+ }
+ },
+ "by_pair": {
+ "devtmpfs,/dev": {
+ "device": "devtmpfs",
+ "kb_size": "8044124",
+ "kb_used": "0",
+ "kb_available": "8044124",
+ "percent_used": "0%",
+ "mount": "/dev",
+ "total_inodes": "2011031",
+ "inodes_used": "629",
+ "inodes_available": "2010402",
+ "inodes_percent_used": "1%",
+ "fs_type": "devtmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "seclabel",
+ "size=8044124k",
+ "nr_inodes=2011031",
+ "mode=755"
+ ]
+ },
+ "tmpfs,/dev/shm": {
+ "device": "tmpfs",
+ "kb_size": "8055268",
+ "kb_used": "96036",
+ "kb_available": "7959232",
+ "percent_used": "2%",
+ "mount": "/dev/shm",
+ "total_inodes": "2013817",
+ "inodes_used": "217",
+ "inodes_available": "2013600",
+ "inodes_percent_used": "1%",
+ "fs_type": "tmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "seclabel"
+ ]
+ },
+ "tmpfs,/run": {
+ "device": "tmpfs",
+ "kb_size": "8055268",
+ "kb_used": "2280",
+ "kb_available": "8052988",
+ "percent_used": "1%",
+ "mount": "/run",
+ "total_inodes": "2013817",
+ "inodes_used": "1070",
+ "inodes_available": "2012747",
+ "inodes_percent_used": "1%",
+ "fs_type": "tmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "seclabel",
+ "mode=755"
+ ]
+ },
+ "tmpfs,/sys/fs/cgroup": {
+ "device": "tmpfs",
+ "kb_size": "8055268",
+ "kb_used": "0",
+ "kb_available": "8055268",
+ "percent_used": "0%",
+ "mount": "/sys/fs/cgroup",
+ "total_inodes": "2013817",
+ "inodes_used": "16",
+ "inodes_available": "2013801",
+ "inodes_percent_used": "1%",
+ "fs_type": "tmpfs",
+ "mount_options": [
+ "ro",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "seclabel",
+ "mode=755"
+ ]
+ },
+ "/dev/mapper/fedora_host--186-root,/": {
+ "device": "/dev/mapper/fedora_host--186-root",
+ "kb_size": "51475068",
+ "kb_used": "42551284",
+ "kb_available": "6285960",
+ "percent_used": "88%",
+ "mount": "/",
+ "total_inodes": "3276800",
+ "inodes_used": "532908",
+ "inodes_available": "2743892",
+ "inodes_percent_used": "17%",
+ "fs_type": "ext4",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel",
+ "data=ordered"
+ ],
+ "uuid": "d34cf5e3-3449-4a6c-8179-a1feb2bca6ce"
+ },
+ "tmpfs,/tmp": {
+ "device": "tmpfs",
+ "kb_size": "8055268",
+ "kb_used": "848396",
+ "kb_available": "7206872",
+ "percent_used": "11%",
+ "mount": "/tmp",
+ "total_inodes": "2013817",
+ "inodes_used": "1353",
+ "inodes_available": "2012464",
+ "inodes_percent_used": "1%",
+ "fs_type": "tmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "seclabel"
+ ]
+ },
+ "/dev/sda1,/boot": {
+ "device": "/dev/sda1",
+ "kb_size": "487652",
+ "kb_used": "126628",
+ "kb_available": "331328",
+ "percent_used": "28%",
+ "mount": "/boot",
+ "total_inodes": "128016",
+ "inodes_used": "405",
+ "inodes_available": "127611",
+ "inodes_percent_used": "1%",
+ "fs_type": "ext4",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel",
+ "data=ordered"
+ ],
+ "uuid": "32caaec3-ef40-4691-a3b6-438c3f9bc1c0"
+ },
+ "/dev/mapper/fedora_host--186-home,/home": {
+ "device": "/dev/mapper/fedora_host--186-home",
+ "kb_size": "185948124",
+ "kb_used": "105904724",
+ "kb_available": "70574680",
+ "percent_used": "61%",
+ "mount": "/home",
+ "total_inodes": "11821056",
+ "inodes_used": "1266687",
+ "inodes_available": "10554369",
+ "inodes_percent_used": "11%",
+ "fs_type": "ext4",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel",
+ "data=ordered"
+ ],
+ "uuid": "2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d"
+ },
+ "/dev/loop0,/var/lib/machines": {
+ "device": "/dev/loop0",
+ "kb_size": "512000",
+ "kb_used": "16672",
+ "kb_available": "429056",
+ "percent_used": "4%",
+ "mount": "/var/lib/machines",
+ "fs_type": "btrfs",
+ "uuid": "0f031512-ab15-497d-9abd-3a512b4a9390"
+ },
+ "tmpfs,/run/user/0": {
+ "device": "tmpfs",
+ "kb_size": "1611052",
+ "kb_used": "0",
+ "kb_available": "1611052",
+ "percent_used": "0%",
+ "mount": "/run/user/0",
+ "total_inodes": "2013817",
+ "inodes_used": "7",
+ "inodes_available": "2013810",
+ "inodes_percent_used": "1%",
+ "fs_type": "tmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "relatime",
+ "seclabel",
+ "size=1611052k",
+ "mode=700"
+ ]
+ },
+ "tmpfs,/run/user/1000": {
+ "device": "tmpfs",
+ "kb_size": "1611052",
+ "kb_used": "72",
+ "kb_available": "1610980",
+ "percent_used": "1%",
+ "mount": "/run/user/1000",
+ "total_inodes": "2013817",
+ "inodes_used": "36",
+ "inodes_available": "2013781",
+ "inodes_percent_used": "1%",
+ "fs_type": "tmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "relatime",
+ "seclabel",
+ "size=1611052k",
+ "mode=700",
+ "uid=1000",
+ "gid=1000"
+ ]
+ },
+ "sysfs,/sys": {
+ "device": "sysfs",
+ "mount": "/sys",
+ "fs_type": "sysfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "seclabel"
+ ]
+ },
+ "proc,/proc": {
+ "device": "proc",
+ "mount": "/proc",
+ "fs_type": "proc",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime"
+ ]
+ },
+ "securityfs,/sys/kernel/security": {
+ "device": "securityfs",
+ "mount": "/sys/kernel/security",
+ "fs_type": "securityfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime"
+ ]
+ },
+ "devpts,/dev/pts": {
+ "device": "devpts",
+ "mount": "/dev/pts",
+ "fs_type": "devpts",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "noexec",
+ "relatime",
+ "seclabel",
+ "gid=5",
+ "mode=620",
+ "ptmxmode=000"
+ ]
+ },
+ "cgroup,/sys/fs/cgroup/systemd": {
+ "device": "cgroup",
+ "mount": "/sys/fs/cgroup/systemd",
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "xattr",
+ "release_agent=/usr/lib/systemd/systemd-cgroups-agent",
+ "name=systemd"
+ ]
+ },
+ "pstore,/sys/fs/pstore": {
+ "device": "pstore",
+ "mount": "/sys/fs/pstore",
+ "fs_type": "pstore",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "seclabel"
+ ]
+ },
+ "cgroup,/sys/fs/cgroup/devices": {
+ "device": "cgroup",
+ "mount": "/sys/fs/cgroup/devices",
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "devices"
+ ]
+ },
+ "cgroup,/sys/fs/cgroup/cpuset": {
+ "device": "cgroup",
+ "mount": "/sys/fs/cgroup/cpuset",
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "cpuset"
+ ]
+ },
+ "cgroup,/sys/fs/cgroup/perf_event": {
+ "device": "cgroup",
+ "mount": "/sys/fs/cgroup/perf_event",
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "perf_event"
+ ]
+ },
+ "cgroup,/sys/fs/cgroup/hugetlb": {
+ "device": "cgroup",
+ "mount": "/sys/fs/cgroup/hugetlb",
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "hugetlb"
+ ]
+ },
+ "cgroup,/sys/fs/cgroup/cpu,cpuacct": {
+ "device": "cgroup",
+ "mount": "/sys/fs/cgroup/cpu,cpuacct",
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "cpu",
+ "cpuacct"
+ ]
+ },
+ "cgroup,/sys/fs/cgroup/blkio": {
+ "device": "cgroup",
+ "mount": "/sys/fs/cgroup/blkio",
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "blkio"
+ ]
+ },
+ "cgroup,/sys/fs/cgroup/freezer": {
+ "device": "cgroup",
+ "mount": "/sys/fs/cgroup/freezer",
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "freezer"
+ ]
+ },
+ "cgroup,/sys/fs/cgroup/memory": {
+ "device": "cgroup",
+ "mount": "/sys/fs/cgroup/memory",
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "memory"
+ ]
+ },
+ "cgroup,/sys/fs/cgroup/pids": {
+ "device": "cgroup",
+ "mount": "/sys/fs/cgroup/pids",
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "pids"
+ ]
+ },
+ "cgroup,/sys/fs/cgroup/net_cls,net_prio": {
+ "device": "cgroup",
+ "mount": "/sys/fs/cgroup/net_cls,net_prio",
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "net_cls",
+ "net_prio"
+ ]
+ },
+ "configfs,/sys/kernel/config": {
+ "device": "configfs",
+ "mount": "/sys/kernel/config",
+ "fs_type": "configfs",
+ "mount_options": [
+ "rw",
+ "relatime"
+ ]
+ },
+ "selinuxfs,/sys/fs/selinux": {
+ "device": "selinuxfs",
+ "mount": "/sys/fs/selinux",
+ "fs_type": "selinuxfs",
+ "mount_options": [
+ "rw",
+ "relatime"
+ ]
+ },
+ "debugfs,/sys/kernel/debug": {
+ "device": "debugfs",
+ "mount": "/sys/kernel/debug",
+ "fs_type": "debugfs",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel"
+ ]
+ },
+ "hugetlbfs,/dev/hugepages": {
+ "device": "hugetlbfs",
+ "mount": "/dev/hugepages",
+ "fs_type": "hugetlbfs",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel"
+ ]
+ },
+ "mqueue,/dev/mqueue": {
+ "device": "mqueue",
+ "mount": "/dev/mqueue",
+ "fs_type": "mqueue",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel"
+ ]
+ },
+ "systemd-1,/proc/sys/fs/binfmt_misc": {
+ "device": "systemd-1",
+ "mount": "/proc/sys/fs/binfmt_misc",
+ "fs_type": "autofs",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "fd=40",
+ "pgrp=1",
+ "timeout=0",
+ "minproto=5",
+ "maxproto=5",
+ "direct",
+ "pipe_ino=17610"
+ ]
+ },
+ "/var/lib/machines.raw,/var/lib/machines": {
+ "device": "/var/lib/machines.raw",
+ "mount": "/var/lib/machines",
+ "fs_type": "btrfs",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel",
+ "space_cache",
+ "subvolid=5",
+ "subvol=/"
+ ]
+ },
+ "fusectl,/sys/fs/fuse/connections": {
+ "device": "fusectl",
+ "mount": "/sys/fs/fuse/connections",
+ "fs_type": "fusectl",
+ "mount_options": [
+ "rw",
+ "relatime"
+ ]
+ },
+ "gvfsd-fuse,/run/user/1000/gvfs": {
+ "device": "gvfsd-fuse",
+ "mount": "/run/user/1000/gvfs",
+ "fs_type": "fuse.gvfsd-fuse",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "relatime",
+ "user_id=1000",
+ "group_id=1000"
+ ]
+ },
+ "/dev/mapper/fedora_host--186-root,/var/lib/docker/devicemapper": {
+ "device": "/dev/mapper/fedora_host--186-root",
+ "mount": "/var/lib/docker/devicemapper",
+ "fs_type": "ext4",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel",
+ "data=ordered"
+ ],
+ "uuid": "d34cf5e3-3449-4a6c-8179-a1feb2bca6ce"
+ },
+ "binfmt_misc,/proc/sys/fs/binfmt_misc": {
+ "device": "binfmt_misc",
+ "mount": "/proc/sys/fs/binfmt_misc",
+ "fs_type": "binfmt_misc",
+ "mount_options": [
+ "rw",
+ "relatime"
+ ]
+ },
+ "/dev/mapper/docker-253:1-1180487-0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8,/var/lib/docker/devicemapper/mnt/0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8": {
+ "device": "/dev/mapper/docker-253:1-1180487-0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8",
+ "mount": "/var/lib/docker/devicemapper/mnt/0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8",
+ "fs_type": "xfs",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "context=\"system_u:object_r:container_file_t:s0:c523",
+ "c681\"",
+ "nouuid",
+ "attr2",
+ "inode64",
+ "logbsize=64k",
+ "sunit=128",
+ "swidth=128",
+ "noquota"
+ ],
+ "uuid": "00e2aa25-20d8-4ad7-b3a5-c501f2f4c123"
+ },
+ "shm,/var/lib/docker/containers/426e513ed508a451e3f70440eed040761f81529e4bc4240e7522d331f3f3bc12/shm": {
+ "device": "shm",
+ "mount": "/var/lib/docker/containers/426e513ed508a451e3f70440eed040761f81529e4bc4240e7522d331f3f3bc12/shm",
+ "fs_type": "tmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "context=\"system_u:object_r:container_file_t:s0:c523",
+ "c681\"",
+ "size=65536k"
+ ]
+ },
+ "nsfs,/run/docker/netns/1ce89fd79f3d": {
+ "device": "nsfs",
+ "mount": "/run/docker/netns/1ce89fd79f3d",
+ "fs_type": "nsfs",
+ "mount_options": [
+ "rw"
+ ]
+ },
+ "tracefs,/sys/kernel/debug/tracing": {
+ "device": "tracefs",
+ "mount": "/sys/kernel/debug/tracing",
+ "fs_type": "tracefs",
+ "mount_options": [
+ "rw",
+ "relatime"
+ ]
+ },
+ "/dev/loop1,": {
+ "device": "/dev/loop1",
+ "fs_type": "xfs",
+ "uuid": "00e2aa25-20d8-4ad7-b3a5-c501f2f4c123"
+ },
+ "/dev/mapper/docker-253:1-1180487-pool,": {
+ "device": "/dev/mapper/docker-253:1-1180487-pool"
+ },
+ "/dev/sr0,": {
+ "device": "/dev/sr0"
+ },
+ "/dev/loop2,": {
+ "device": "/dev/loop2"
+ },
+ "/dev/sda,": {
+ "device": "/dev/sda"
+ },
+ "/dev/sda2,": {
+ "device": "/dev/sda2",
+ "fs_type": "LVM2_member",
+ "uuid": "66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK"
+ },
+ "/dev/mapper/fedora_host--186-swap,": {
+ "device": "/dev/mapper/fedora_host--186-swap",
+ "fs_type": "swap",
+ "uuid": "eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d"
+ }
+ }
+ },
+ "filesystem2": {
+ "by_device": {
+ "devtmpfs": {
+ "kb_size": "8044124",
+ "kb_used": "0",
+ "kb_available": "8044124",
+ "percent_used": "0%",
+ "total_inodes": "2011031",
+ "inodes_used": "629",
+ "inodes_available": "2010402",
+ "inodes_percent_used": "1%",
+ "fs_type": "devtmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "seclabel",
+ "size=8044124k",
+ "nr_inodes=2011031",
+ "mode=755"
+ ],
+ "mounts": [
+ "/dev"
+ ]
+ },
+ "tmpfs": {
+ "kb_size": "1611052",
+ "kb_used": "72",
+ "kb_available": "1610980",
+ "percent_used": "1%",
+ "total_inodes": "2013817",
+ "inodes_used": "36",
+ "inodes_available": "2013781",
+ "inodes_percent_used": "1%",
+ "fs_type": "tmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "relatime",
+ "seclabel",
+ "size=1611052k",
+ "mode=700",
+ "uid=1000",
+ "gid=1000"
+ ],
+ "mounts": [
+ "/dev/shm",
+ "/run",
+ "/sys/fs/cgroup",
+ "/tmp",
+ "/run/user/0",
+ "/run/user/1000"
+ ]
+ },
+ "/dev/mapper/fedora_host--186-root": {
+ "kb_size": "51475068",
+ "kb_used": "42551284",
+ "kb_available": "6285960",
+ "percent_used": "88%",
+ "total_inodes": "3276800",
+ "inodes_used": "532908",
+ "inodes_available": "2743892",
+ "inodes_percent_used": "17%",
+ "fs_type": "ext4",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel",
+ "data=ordered"
+ ],
+ "uuid": "d34cf5e3-3449-4a6c-8179-a1feb2bca6ce",
+ "mounts": [
+ "/",
+ "/var/lib/docker/devicemapper"
+ ]
+ },
+ "/dev/sda1": {
+ "kb_size": "487652",
+ "kb_used": "126628",
+ "kb_available": "331328",
+ "percent_used": "28%",
+ "total_inodes": "128016",
+ "inodes_used": "405",
+ "inodes_available": "127611",
+ "inodes_percent_used": "1%",
+ "fs_type": "ext4",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel",
+ "data=ordered"
+ ],
+ "uuid": "32caaec3-ef40-4691-a3b6-438c3f9bc1c0",
+ "mounts": [
+ "/boot"
+ ]
+ },
+ "/dev/mapper/fedora_host--186-home": {
+ "kb_size": "185948124",
+ "kb_used": "105904724",
+ "kb_available": "70574680",
+ "percent_used": "61%",
+ "total_inodes": "11821056",
+ "inodes_used": "1266687",
+ "inodes_available": "10554369",
+ "inodes_percent_used": "11%",
+ "fs_type": "ext4",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel",
+ "data=ordered"
+ ],
+ "uuid": "2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d",
+ "mounts": [
+ "/home"
+ ]
+ },
+ "/dev/loop0": {
+ "kb_size": "512000",
+ "kb_used": "16672",
+ "kb_available": "429056",
+ "percent_used": "4%",
+ "fs_type": "btrfs",
+ "uuid": "0f031512-ab15-497d-9abd-3a512b4a9390",
+ "mounts": [
+ "/var/lib/machines"
+ ]
+ },
+ "sysfs": {
+ "fs_type": "sysfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "seclabel"
+ ],
+ "mounts": [
+ "/sys"
+ ]
+ },
+ "proc": {
+ "fs_type": "proc",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime"
+ ],
+ "mounts": [
+ "/proc"
+ ]
+ },
+ "securityfs": {
+ "fs_type": "securityfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime"
+ ],
+ "mounts": [
+ "/sys/kernel/security"
+ ]
+ },
+ "devpts": {
+ "fs_type": "devpts",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "noexec",
+ "relatime",
+ "seclabel",
+ "gid=5",
+ "mode=620",
+ "ptmxmode=000"
+ ],
+ "mounts": [
+ "/dev/pts"
+ ]
+ },
+ "cgroup": {
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "net_cls",
+ "net_prio"
+ ],
+ "mounts": [
+ "/sys/fs/cgroup/systemd",
+ "/sys/fs/cgroup/devices",
+ "/sys/fs/cgroup/cpuset",
+ "/sys/fs/cgroup/perf_event",
+ "/sys/fs/cgroup/hugetlb",
+ "/sys/fs/cgroup/cpu,cpuacct",
+ "/sys/fs/cgroup/blkio",
+ "/sys/fs/cgroup/freezer",
+ "/sys/fs/cgroup/memory",
+ "/sys/fs/cgroup/pids",
+ "/sys/fs/cgroup/net_cls,net_prio"
+ ]
+ },
+ "pstore": {
+ "fs_type": "pstore",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "seclabel"
+ ],
+ "mounts": [
+ "/sys/fs/pstore"
+ ]
+ },
+ "configfs": {
+ "fs_type": "configfs",
+ "mount_options": [
+ "rw",
+ "relatime"
+ ],
+ "mounts": [
+ "/sys/kernel/config"
+ ]
+ },
+ "selinuxfs": {
+ "fs_type": "selinuxfs",
+ "mount_options": [
+ "rw",
+ "relatime"
+ ],
+ "mounts": [
+ "/sys/fs/selinux"
+ ]
+ },
+ "debugfs": {
+ "fs_type": "debugfs",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel"
+ ],
+ "mounts": [
+ "/sys/kernel/debug"
+ ]
+ },
+ "hugetlbfs": {
+ "fs_type": "hugetlbfs",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel"
+ ],
+ "mounts": [
+ "/dev/hugepages"
+ ]
+ },
+ "mqueue": {
+ "fs_type": "mqueue",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel"
+ ],
+ "mounts": [
+ "/dev/mqueue"
+ ]
+ },
+ "systemd-1": {
+ "fs_type": "autofs",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "fd=40",
+ "pgrp=1",
+ "timeout=0",
+ "minproto=5",
+ "maxproto=5",
+ "direct",
+ "pipe_ino=17610"
+ ],
+ "mounts": [
+ "/proc/sys/fs/binfmt_misc"
+ ]
+ },
+ "/var/lib/machines.raw": {
+ "fs_type": "btrfs",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel",
+ "space_cache",
+ "subvolid=5",
+ "subvol=/"
+ ],
+ "mounts": [
+ "/var/lib/machines"
+ ]
+ },
+ "fusectl": {
+ "fs_type": "fusectl",
+ "mount_options": [
+ "rw",
+ "relatime"
+ ],
+ "mounts": [
+ "/sys/fs/fuse/connections"
+ ]
+ },
+ "gvfsd-fuse": {
+ "fs_type": "fuse.gvfsd-fuse",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "relatime",
+ "user_id=1000",
+ "group_id=1000"
+ ],
+ "mounts": [
+ "/run/user/1000/gvfs"
+ ]
+ },
+ "binfmt_misc": {
+ "fs_type": "binfmt_misc",
+ "mount_options": [
+ "rw",
+ "relatime"
+ ],
+ "mounts": [
+ "/proc/sys/fs/binfmt_misc"
+ ]
+ },
+ "/dev/mapper/docker-253:1-1180487-0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8": {
+ "fs_type": "xfs",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "context=\"system_u:object_r:container_file_t:s0:c523",
+ "c681\"",
+ "nouuid",
+ "attr2",
+ "inode64",
+ "logbsize=64k",
+ "sunit=128",
+ "swidth=128",
+ "noquota"
+ ],
+ "uuid": "00e2aa25-20d8-4ad7-b3a5-c501f2f4c123",
+ "mounts": [
+ "/var/lib/docker/devicemapper/mnt/0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8"
+ ]
+ },
+ "shm": {
+ "fs_type": "tmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "context=\"system_u:object_r:container_file_t:s0:c523",
+ "c681\"",
+ "size=65536k"
+ ],
+ "mounts": [
+ "/var/lib/docker/containers/426e513ed508a451e3f70440eed040761f81529e4bc4240e7522d331f3f3bc12/shm"
+ ]
+ },
+ "nsfs": {
+ "fs_type": "nsfs",
+ "mount_options": [
+ "rw"
+ ],
+ "mounts": [
+ "/run/docker/netns/1ce89fd79f3d"
+ ]
+ },
+ "tracefs": {
+ "fs_type": "tracefs",
+ "mount_options": [
+ "rw",
+ "relatime"
+ ],
+ "mounts": [
+ "/sys/kernel/debug/tracing"
+ ]
+ },
+ "/dev/loop1": {
+ "fs_type": "xfs",
+ "uuid": "00e2aa25-20d8-4ad7-b3a5-c501f2f4c123",
+ "mounts": [
+
+ ]
+ },
+ "/dev/mapper/docker-253:1-1180487-pool": {
+ "mounts": [
+
+ ]
+ },
+ "/dev/sr0": {
+ "mounts": [
+
+ ]
+ },
+ "/dev/loop2": {
+ "mounts": [
+
+ ]
+ },
+ "/dev/sda": {
+ "mounts": [
+
+ ]
+ },
+ "/dev/sda2": {
+ "fs_type": "LVM2_member",
+ "uuid": "66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK",
+ "mounts": [
+
+ ]
+ },
+ "/dev/mapper/fedora_host--186-swap": {
+ "fs_type": "swap",
+ "uuid": "eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d",
+ "mounts": [
+
+ ]
+ }
+ },
+ "by_mountpoint": {
+ "/dev": {
+ "kb_size": "8044124",
+ "kb_used": "0",
+ "kb_available": "8044124",
+ "percent_used": "0%",
+ "total_inodes": "2011031",
+ "inodes_used": "629",
+ "inodes_available": "2010402",
+ "inodes_percent_used": "1%",
+ "fs_type": "devtmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "seclabel",
+ "size=8044124k",
+ "nr_inodes=2011031",
+ "mode=755"
+ ],
+ "devices": [
+ "devtmpfs"
+ ]
+ },
+ "/dev/shm": {
+ "kb_size": "8055268",
+ "kb_used": "96036",
+ "kb_available": "7959232",
+ "percent_used": "2%",
+ "total_inodes": "2013817",
+ "inodes_used": "217",
+ "inodes_available": "2013600",
+ "inodes_percent_used": "1%",
+ "fs_type": "tmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "seclabel"
+ ],
+ "devices": [
+ "tmpfs"
+ ]
+ },
+ "/run": {
+ "kb_size": "8055268",
+ "kb_used": "2280",
+ "kb_available": "8052988",
+ "percent_used": "1%",
+ "total_inodes": "2013817",
+ "inodes_used": "1070",
+ "inodes_available": "2012747",
+ "inodes_percent_used": "1%",
+ "fs_type": "tmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "seclabel",
+ "mode=755"
+ ],
+ "devices": [
+ "tmpfs"
+ ]
+ },
+ "/sys/fs/cgroup": {
+ "kb_size": "8055268",
+ "kb_used": "0",
+ "kb_available": "8055268",
+ "percent_used": "0%",
+ "total_inodes": "2013817",
+ "inodes_used": "16",
+ "inodes_available": "2013801",
+ "inodes_percent_used": "1%",
+ "fs_type": "tmpfs",
+ "mount_options": [
+ "ro",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "seclabel",
+ "mode=755"
+ ],
+ "devices": [
+ "tmpfs"
+ ]
+ },
+ "/": {
+ "kb_size": "51475068",
+ "kb_used": "42551284",
+ "kb_available": "6285960",
+ "percent_used": "88%",
+ "total_inodes": "3276800",
+ "inodes_used": "532908",
+ "inodes_available": "2743892",
+ "inodes_percent_used": "17%",
+ "fs_type": "ext4",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel",
+ "data=ordered"
+ ],
+ "uuid": "d34cf5e3-3449-4a6c-8179-a1feb2bca6ce",
+ "devices": [
+ "/dev/mapper/fedora_host--186-root"
+ ]
+ },
+ "/tmp": {
+ "kb_size": "8055268",
+ "kb_used": "848396",
+ "kb_available": "7206872",
+ "percent_used": "11%",
+ "total_inodes": "2013817",
+ "inodes_used": "1353",
+ "inodes_available": "2012464",
+ "inodes_percent_used": "1%",
+ "fs_type": "tmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "seclabel"
+ ],
+ "devices": [
+ "tmpfs"
+ ]
+ },
+ "/boot": {
+ "kb_size": "487652",
+ "kb_used": "126628",
+ "kb_available": "331328",
+ "percent_used": "28%",
+ "total_inodes": "128016",
+ "inodes_used": "405",
+ "inodes_available": "127611",
+ "inodes_percent_used": "1%",
+ "fs_type": "ext4",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel",
+ "data=ordered"
+ ],
+ "uuid": "32caaec3-ef40-4691-a3b6-438c3f9bc1c0",
+ "devices": [
+ "/dev/sda1"
+ ]
+ },
+ "/home": {
+ "kb_size": "185948124",
+ "kb_used": "105904724",
+ "kb_available": "70574680",
+ "percent_used": "61%",
+ "total_inodes": "11821056",
+ "inodes_used": "1266687",
+ "inodes_available": "10554369",
+ "inodes_percent_used": "11%",
+ "fs_type": "ext4",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel",
+ "data=ordered"
+ ],
+ "uuid": "2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d",
+ "devices": [
+ "/dev/mapper/fedora_host--186-home"
+ ]
+ },
+ "/var/lib/machines": {
+ "kb_size": "512000",
+ "kb_used": "16672",
+ "kb_available": "429056",
+ "percent_used": "4%",
+ "fs_type": "btrfs",
+ "uuid": "0f031512-ab15-497d-9abd-3a512b4a9390",
+ "devices": [
+ "/dev/loop0",
+ "/var/lib/machines.raw"
+ ],
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel",
+ "space_cache",
+ "subvolid=5",
+ "subvol=/"
+ ]
+ },
+ "/run/user/0": {
+ "kb_size": "1611052",
+ "kb_used": "0",
+ "kb_available": "1611052",
+ "percent_used": "0%",
+ "total_inodes": "2013817",
+ "inodes_used": "7",
+ "inodes_available": "2013810",
+ "inodes_percent_used": "1%",
+ "fs_type": "tmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "relatime",
+ "seclabel",
+ "size=1611052k",
+ "mode=700"
+ ],
+ "devices": [
+ "tmpfs"
+ ]
+ },
+ "/run/user/1000": {
+ "kb_size": "1611052",
+ "kb_used": "72",
+ "kb_available": "1610980",
+ "percent_used": "1%",
+ "total_inodes": "2013817",
+ "inodes_used": "36",
+ "inodes_available": "2013781",
+ "inodes_percent_used": "1%",
+ "fs_type": "tmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "relatime",
+ "seclabel",
+ "size=1611052k",
+ "mode=700",
+ "uid=1000",
+ "gid=1000"
+ ],
+ "devices": [
+ "tmpfs"
+ ]
+ },
+ "/sys": {
+ "fs_type": "sysfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "seclabel"
+ ],
+ "devices": [
+ "sysfs"
+ ]
+ },
+ "/proc": {
+ "fs_type": "proc",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime"
+ ],
+ "devices": [
+ "proc"
+ ]
+ },
+ "/sys/kernel/security": {
+ "fs_type": "securityfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime"
+ ],
+ "devices": [
+ "securityfs"
+ ]
+ },
+ "/dev/pts": {
+ "fs_type": "devpts",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "noexec",
+ "relatime",
+ "seclabel",
+ "gid=5",
+ "mode=620",
+ "ptmxmode=000"
+ ],
+ "devices": [
+ "devpts"
+ ]
+ },
+ "/sys/fs/cgroup/systemd": {
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "xattr",
+ "release_agent=/usr/lib/systemd/systemd-cgroups-agent",
+ "name=systemd"
+ ],
+ "devices": [
+ "cgroup"
+ ]
+ },
+ "/sys/fs/pstore": {
+ "fs_type": "pstore",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "seclabel"
+ ],
+ "devices": [
+ "pstore"
+ ]
+ },
+ "/sys/fs/cgroup/devices": {
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "devices"
+ ],
+ "devices": [
+ "cgroup"
+ ]
+ },
+ "/sys/fs/cgroup/cpuset": {
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "cpuset"
+ ],
+ "devices": [
+ "cgroup"
+ ]
+ },
+ "/sys/fs/cgroup/perf_event": {
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "perf_event"
+ ],
+ "devices": [
+ "cgroup"
+ ]
+ },
+ "/sys/fs/cgroup/hugetlb": {
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "hugetlb"
+ ],
+ "devices": [
+ "cgroup"
+ ]
+ },
+ "/sys/fs/cgroup/cpu,cpuacct": {
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "cpu",
+ "cpuacct"
+ ],
+ "devices": [
+ "cgroup"
+ ]
+ },
+ "/sys/fs/cgroup/blkio": {
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "blkio"
+ ],
+ "devices": [
+ "cgroup"
+ ]
+ },
+ "/sys/fs/cgroup/freezer": {
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "freezer"
+ ],
+ "devices": [
+ "cgroup"
+ ]
+ },
+ "/sys/fs/cgroup/memory": {
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "memory"
+ ],
+ "devices": [
+ "cgroup"
+ ]
+ },
+ "/sys/fs/cgroup/pids": {
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "pids"
+ ],
+ "devices": [
+ "cgroup"
+ ]
+ },
+ "/sys/fs/cgroup/net_cls,net_prio": {
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "net_cls",
+ "net_prio"
+ ],
+ "devices": [
+ "cgroup"
+ ]
+ },
+ "/sys/kernel/config": {
+ "fs_type": "configfs",
+ "mount_options": [
+ "rw",
+ "relatime"
+ ],
+ "devices": [
+ "configfs"
+ ]
+ },
+ "/sys/fs/selinux": {
+ "fs_type": "selinuxfs",
+ "mount_options": [
+ "rw",
+ "relatime"
+ ],
+ "devices": [
+ "selinuxfs"
+ ]
+ },
+ "/sys/kernel/debug": {
+ "fs_type": "debugfs",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel"
+ ],
+ "devices": [
+ "debugfs"
+ ]
+ },
+ "/dev/hugepages": {
+ "fs_type": "hugetlbfs",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel"
+ ],
+ "devices": [
+ "hugetlbfs"
+ ]
+ },
+ "/dev/mqueue": {
+ "fs_type": "mqueue",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel"
+ ],
+ "devices": [
+ "mqueue"
+ ]
+ },
+ "/proc/sys/fs/binfmt_misc": {
+ "fs_type": "binfmt_misc",
+ "mount_options": [
+ "rw",
+ "relatime"
+ ],
+ "devices": [
+ "systemd-1",
+ "binfmt_misc"
+ ]
+ },
+ "/sys/fs/fuse/connections": {
+ "fs_type": "fusectl",
+ "mount_options": [
+ "rw",
+ "relatime"
+ ],
+ "devices": [
+ "fusectl"
+ ]
+ },
+ "/run/user/1000/gvfs": {
+ "fs_type": "fuse.gvfsd-fuse",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "relatime",
+ "user_id=1000",
+ "group_id=1000"
+ ],
+ "devices": [
+ "gvfsd-fuse"
+ ]
+ },
+ "/var/lib/docker/devicemapper": {
+ "fs_type": "ext4",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel",
+ "data=ordered"
+ ],
+ "uuid": "d34cf5e3-3449-4a6c-8179-a1feb2bca6ce",
+ "devices": [
+ "/dev/mapper/fedora_host--186-root"
+ ]
+ },
+ {
+ "/run/docker/netns/1ce89fd79f3d": {
+ "fs_type": "nsfs",
+ "mount_options": [
+ "rw"
+ ],
+ "devices": [
+ "nsfs"
+ ]
+ },
+ "/sys/kernel/debug/tracing": {
+ "fs_type": "tracefs",
+ "mount_options": [
+ "rw",
+ "relatime"
+ ],
+ "devices": [
+ "tracefs"
+ ]
+ }
+ },
+ "by_pair": {
+ "devtmpfs,/dev": {
+ "device": "devtmpfs",
+ "kb_size": "8044124",
+ "kb_used": "0",
+ "kb_available": "8044124",
+ "percent_used": "0%",
+ "mount": "/dev",
+ "total_inodes": "2011031",
+ "inodes_used": "629",
+ "inodes_available": "2010402",
+ "inodes_percent_used": "1%",
+ "fs_type": "devtmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "seclabel",
+ "size=8044124k",
+ "nr_inodes=2011031",
+ "mode=755"
+ ]
+ },
+ "tmpfs,/dev/shm": {
+ "device": "tmpfs",
+ "kb_size": "8055268",
+ "kb_used": "96036",
+ "kb_available": "7959232",
+ "percent_used": "2%",
+ "mount": "/dev/shm",
+ "total_inodes": "2013817",
+ "inodes_used": "217",
+ "inodes_available": "2013600",
+ "inodes_percent_used": "1%",
+ "fs_type": "tmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "seclabel"
+ ]
+ },
+ "tmpfs,/run": {
+ "device": "tmpfs",
+ "kb_size": "8055268",
+ "kb_used": "2280",
+ "kb_available": "8052988",
+ "percent_used": "1%",
+ "mount": "/run",
+ "total_inodes": "2013817",
+ "inodes_used": "1070",
+ "inodes_available": "2012747",
+ "inodes_percent_used": "1%",
+ "fs_type": "tmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "seclabel",
+ "mode=755"
+ ]
+ },
+ "tmpfs,/sys/fs/cgroup": {
+ "device": "tmpfs",
+ "kb_size": "8055268",
+ "kb_used": "0",
+ "kb_available": "8055268",
+ "percent_used": "0%",
+ "mount": "/sys/fs/cgroup",
+ "total_inodes": "2013817",
+ "inodes_used": "16",
+ "inodes_available": "2013801",
+ "inodes_percent_used": "1%",
+ "fs_type": "tmpfs",
+ "mount_options": [
+ "ro",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "seclabel",
+ "mode=755"
+ ]
+ },
+ "/dev/mapper/fedora_host--186-root,/": {
+ "device": "/dev/mapper/fedora_host--186-root",
+ "kb_size": "51475068",
+ "kb_used": "42551284",
+ "kb_available": "6285960",
+ "percent_used": "88%",
+ "mount": "/",
+ "total_inodes": "3276800",
+ "inodes_used": "532908",
+ "inodes_available": "2743892",
+ "inodes_percent_used": "17%",
+ "fs_type": "ext4",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel",
+ "data=ordered"
+ ],
+ "uuid": "d34cf5e3-3449-4a6c-8179-a1feb2bca6ce"
+ },
+ "tmpfs,/tmp": {
+ "device": "tmpfs",
+ "kb_size": "8055268",
+ "kb_used": "848396",
+ "kb_available": "7206872",
+ "percent_used": "11%",
+ "mount": "/tmp",
+ "total_inodes": "2013817",
+ "inodes_used": "1353",
+ "inodes_available": "2012464",
+ "inodes_percent_used": "1%",
+ "fs_type": "tmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "seclabel"
+ ]
+ },
+ "/dev/sda1,/boot": {
+ "device": "/dev/sda1",
+ "kb_size": "487652",
+ "kb_used": "126628",
+ "kb_available": "331328",
+ "percent_used": "28%",
+ "mount": "/boot",
+ "total_inodes": "128016",
+ "inodes_used": "405",
+ "inodes_available": "127611",
+ "inodes_percent_used": "1%",
+ "fs_type": "ext4",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel",
+ "data=ordered"
+ ],
+ "uuid": "32caaec3-ef40-4691-a3b6-438c3f9bc1c0"
+ },
+ "/dev/mapper/fedora_host--186-home,/home": {
+ "device": "/dev/mapper/fedora_host--186-home",
+ "kb_size": "185948124",
+ "kb_used": "105904724",
+ "kb_available": "70574680",
+ "percent_used": "61%",
+ "mount": "/home",
+ "total_inodes": "11821056",
+ "inodes_used": "1266687",
+ "inodes_available": "10554369",
+ "inodes_percent_used": "11%",
+ "fs_type": "ext4",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel",
+ "data=ordered"
+ ],
+ "uuid": "2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d"
+ },
+ "/dev/loop0,/var/lib/machines": {
+ "device": "/dev/loop0",
+ "kb_size": "512000",
+ "kb_used": "16672",
+ "kb_available": "429056",
+ "percent_used": "4%",
+ "mount": "/var/lib/machines",
+ "fs_type": "btrfs",
+ "uuid": "0f031512-ab15-497d-9abd-3a512b4a9390"
+ },
+ "tmpfs,/run/user/0": {
+ "device": "tmpfs",
+ "kb_size": "1611052",
+ "kb_used": "0",
+ "kb_available": "1611052",
+ "percent_used": "0%",
+ "mount": "/run/user/0",
+ "total_inodes": "2013817",
+ "inodes_used": "7",
+ "inodes_available": "2013810",
+ "inodes_percent_used": "1%",
+ "fs_type": "tmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "relatime",
+ "seclabel",
+ "size=1611052k",
+ "mode=700"
+ ]
+ },
+ "tmpfs,/run/user/1000": {
+ "device": "tmpfs",
+ "kb_size": "1611052",
+ "kb_used": "72",
+ "kb_available": "1610980",
+ "percent_used": "1%",
+ "mount": "/run/user/1000",
+ "total_inodes": "2013817",
+ "inodes_used": "36",
+ "inodes_available": "2013781",
+ "inodes_percent_used": "1%",
+ "fs_type": "tmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "relatime",
+ "seclabel",
+ "size=1611052k",
+ "mode=700",
+ "uid=1000",
+ "gid=1000"
+ ]
+ },
+ "sysfs,/sys": {
+ "device": "sysfs",
+ "mount": "/sys",
+ "fs_type": "sysfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "seclabel"
+ ]
+ },
+ "proc,/proc": {
+ "device": "proc",
+ "mount": "/proc",
+ "fs_type": "proc",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime"
+ ]
+ },
+ "securityfs,/sys/kernel/security": {
+ "device": "securityfs",
+ "mount": "/sys/kernel/security",
+ "fs_type": "securityfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime"
+ ]
+ },
+ "devpts,/dev/pts": {
+ "device": "devpts",
+ "mount": "/dev/pts",
+ "fs_type": "devpts",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "noexec",
+ "relatime",
+ "seclabel",
+ "gid=5",
+ "mode=620",
+ "ptmxmode=000"
+ ]
+ },
+ "cgroup,/sys/fs/cgroup/systemd": {
+ "device": "cgroup",
+ "mount": "/sys/fs/cgroup/systemd",
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "xattr",
+ "release_agent=/usr/lib/systemd/systemd-cgroups-agent",
+ "name=systemd"
+ ]
+ },
+ "pstore,/sys/fs/pstore": {
+ "device": "pstore",
+ "mount": "/sys/fs/pstore",
+ "fs_type": "pstore",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "seclabel"
+ ]
+ },
+ "cgroup,/sys/fs/cgroup/devices": {
+ "device": "cgroup",
+ "mount": "/sys/fs/cgroup/devices",
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "devices"
+ ]
+ },
+ "cgroup,/sys/fs/cgroup/cpuset": {
+ "device": "cgroup",
+ "mount": "/sys/fs/cgroup/cpuset",
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "cpuset"
+ ]
+ },
+ "cgroup,/sys/fs/cgroup/perf_event": {
+ "device": "cgroup",
+ "mount": "/sys/fs/cgroup/perf_event",
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "perf_event"
+ ]
+ },
+ "cgroup,/sys/fs/cgroup/hugetlb": {
+ "device": "cgroup",
+ "mount": "/sys/fs/cgroup/hugetlb",
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "hugetlb"
+ ]
+ },
+ "cgroup,/sys/fs/cgroup/cpu,cpuacct": {
+ "device": "cgroup",
+ "mount": "/sys/fs/cgroup/cpu,cpuacct",
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "cpu",
+ "cpuacct"
+ ]
+ },
+ "cgroup,/sys/fs/cgroup/blkio": {
+ "device": "cgroup",
+ "mount": "/sys/fs/cgroup/blkio",
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "blkio"
+ ]
+ },
+ "cgroup,/sys/fs/cgroup/freezer": {
+ "device": "cgroup",
+ "mount": "/sys/fs/cgroup/freezer",
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "freezer"
+ ]
+ },
+ "cgroup,/sys/fs/cgroup/memory": {
+ "device": "cgroup",
+ "mount": "/sys/fs/cgroup/memory",
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "memory"
+ ]
+ },
+ "cgroup,/sys/fs/cgroup/pids": {
+ "device": "cgroup",
+ "mount": "/sys/fs/cgroup/pids",
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "pids"
+ ]
+ },
+ "cgroup,/sys/fs/cgroup/net_cls,net_prio": {
+ "device": "cgroup",
+ "mount": "/sys/fs/cgroup/net_cls,net_prio",
+ "fs_type": "cgroup",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "net_cls",
+ "net_prio"
+ ]
+ },
+ "configfs,/sys/kernel/config": {
+ "device": "configfs",
+ "mount": "/sys/kernel/config",
+ "fs_type": "configfs",
+ "mount_options": [
+ "rw",
+ "relatime"
+ ]
+ },
+ "selinuxfs,/sys/fs/selinux": {
+ "device": "selinuxfs",
+ "mount": "/sys/fs/selinux",
+ "fs_type": "selinuxfs",
+ "mount_options": [
+ "rw",
+ "relatime"
+ ]
+ },
+ "debugfs,/sys/kernel/debug": {
+ "device": "debugfs",
+ "mount": "/sys/kernel/debug",
+ "fs_type": "debugfs",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel"
+ ]
+ },
+ "hugetlbfs,/dev/hugepages": {
+ "device": "hugetlbfs",
+ "mount": "/dev/hugepages",
+ "fs_type": "hugetlbfs",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel"
+ ]
+ },
+ "mqueue,/dev/mqueue": {
+ "device": "mqueue",
+ "mount": "/dev/mqueue",
+ "fs_type": "mqueue",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel"
+ ]
+ },
+ "systemd-1,/proc/sys/fs/binfmt_misc": {
+ "device": "systemd-1",
+ "mount": "/proc/sys/fs/binfmt_misc",
+ "fs_type": "autofs",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "fd=40",
+ "pgrp=1",
+ "timeout=0",
+ "minproto=5",
+ "maxproto=5",
+ "direct",
+ "pipe_ino=17610"
+ ]
+ },
+ "/var/lib/machines.raw,/var/lib/machines": {
+ "device": "/var/lib/machines.raw",
+ "mount": "/var/lib/machines",
+ "fs_type": "btrfs",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel",
+ "space_cache",
+ "subvolid=5",
+ "subvol=/"
+ ]
+ },
+ "fusectl,/sys/fs/fuse/connections": {
+ "device": "fusectl",
+ "mount": "/sys/fs/fuse/connections",
+ "fs_type": "fusectl",
+ "mount_options": [
+ "rw",
+ "relatime"
+ ]
+ },
+ "gvfsd-fuse,/run/user/1000/gvfs": {
+ "device": "gvfsd-fuse",
+ "mount": "/run/user/1000/gvfs",
+ "fs_type": "fuse.gvfsd-fuse",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "relatime",
+ "user_id=1000",
+ "group_id=1000"
+ ]
+ },
+ "/dev/mapper/fedora_host--186-root,/var/lib/docker/devicemapper": {
+ "device": "/dev/mapper/fedora_host--186-root",
+ "mount": "/var/lib/docker/devicemapper",
+ "fs_type": "ext4",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "seclabel",
+ "data=ordered"
+ ],
+ "uuid": "d34cf5e3-3449-4a6c-8179-a1feb2bca6ce"
+ },
+ "binfmt_misc,/proc/sys/fs/binfmt_misc": {
+ "device": "binfmt_misc",
+ "mount": "/proc/sys/fs/binfmt_misc",
+ "fs_type": "binfmt_misc",
+ "mount_options": [
+ "rw",
+ "relatime"
+ ]
+ },
+ "/dev/mapper/docker-253:1-1180487-0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8,/var/lib/docker/devicemapper/mnt/0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8": {
+ "device": "/dev/mapper/docker-253:1-1180487-0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8",
+ "mount": "/var/lib/docker/devicemapper/mnt/0868fce108cd2524a4823aad8d665cca018ead39550ca088c440ab05deec13f8",
+ "fs_type": "xfs",
+ "mount_options": [
+ "rw",
+ "relatime",
+ "context=\"system_u:object_r:container_file_t:s0:c523",
+ "c681\"",
+ "nouuid",
+ "attr2",
+ "inode64",
+ "logbsize=64k",
+ "sunit=128",
+ "swidth=128",
+ "noquota"
+ ],
+ "uuid": "00e2aa25-20d8-4ad7-b3a5-c501f2f4c123"
+ },
+ "shm,/var/lib/docker/containers/426e513ed508a451e3f70440eed040761f81529e4bc4240e7522d331f3f3bc12/shm": {
+ "device": "shm",
+ "mount": "/var/lib/docker/containers/426e513ed508a451e3f70440eed040761f81529e4bc4240e7522d331f3f3bc12/shm",
+ "fs_type": "tmpfs",
+ "mount_options": [
+ "rw",
+ "nosuid",
+ "nodev",
+ "noexec",
+ "relatime",
+ "context=\"system_u:object_r:container_file_t:s0:c523",
+ "c681\"",
+ "size=65536k"
+ ]
+ },
+ "nsfs,/run/docker/netns/1ce89fd79f3d": {
+ "device": "nsfs",
+ "mount": "/run/docker/netns/1ce89fd79f3d",
+ "fs_type": "nsfs",
+ "mount_options": [
+ "rw"
+ ]
+ },
+ "tracefs,/sys/kernel/debug/tracing": {
+ "device": "tracefs",
+ "mount": "/sys/kernel/debug/tracing",
+ "fs_type": "tracefs",
+ "mount_options": [
+ "rw",
+ "relatime"
+ ]
+ },
+ "/dev/loop1,": {
+ "device": "/dev/loop1",
+ "fs_type": "xfs",
+ "uuid": "00e2aa25-20d8-4ad7-b3a5-c501f2f4c123"
+ },
+ "/dev/mapper/docker-253:1-1180487-pool,": {
+ "device": "/dev/mapper/docker-253:1-1180487-pool"
+ },
+ "/dev/sr0,": {
+ "device": "/dev/sr0"
+ },
+ "/dev/loop2,": {
+ "device": "/dev/loop2"
+ },
+ "/dev/sda,": {
+ "device": "/dev/sda"
+ },
+ "/dev/sda2,": {
+ "device": "/dev/sda2",
+ "fs_type": "LVM2_member",
+ "uuid": "66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK"
+ },
+ "/dev/mapper/fedora_host--186-swap,": {
+ "device": "/dev/mapper/fedora_host--186-swap",
+ "fs_type": "swap",
+ "uuid": "eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d"
+ }
+ }
+ },
+ "virtualization": {
+ "systems": {
+ "kvm": "host"
+ },
+ "system": "kvm",
+ "role": "host",
+ "libvirt_version": "2.2.0",
+ "uri": "qemu:///system",
+ "capabilities": {
+
+ },
+ "nodeinfo": {
+ "cores": 4,
+ "cpus": 8,
+ "memory": 16110540,
+ "mhz": 2832,
+ "model": "x86_64",
+ "nodes": 1,
+ "sockets": 1,
+ "threads": 2
+ },
+ "domains": {
+
+ },
+ "networks": {
+ "vagrant-libvirt": {
+ "bridge_name": "virbr1",
+ "uuid": "877ddb27-b39c-427e-a7bf-1aa829389eeb"
+ },
+ "default": {
+ "bridge_name": "virbr0",
+ "uuid": "750d2567-23a8-470d-8a2b-71cd651e30d1"
+ }
+ },
+ "storage": {
+ "virt-images": {
+ "autostart": true,
+ "uuid": "d8a189fa-f98c-462f-9ea4-204eb77a96a1",
+ "allocation": 106412863488,
+ "available": 83998015488,
+ "capacity": 190410878976,
+ "state": 2,
+ "volumes": {
+ "rhel-atomic-host-standard-2014-7-1.qcow2": {
+ "key": "/home/some_user/virt-images/rhel-atomic-host-standard-2014-7-1.qcow2",
+ "name": "rhel-atomic-host-standard-2014-7-1.qcow2",
+ "path": "/home/some_user/virt-images/rhel-atomic-host-standard-2014-7-1.qcow2",
+ "allocation": 1087115264,
+ "capacity": 8589934592,
+ "type": 0
+ },
+ "atomic-beta-instance-7.qcow2": {
+ "key": "/home/some_user/virt-images/atomic-beta-instance-7.qcow2",
+ "name": "atomic-beta-instance-7.qcow2",
+ "path": "/home/some_user/virt-images/atomic-beta-instance-7.qcow2",
+ "allocation": 200704,
+ "capacity": 8589934592,
+ "type": 0
+ },
+ "os1-atomic-meta-data": {
+ "key": "/home/some_user/virt-images/os1-atomic-meta-data",
+ "name": "os1-atomic-meta-data",
+ "path": "/home/some_user/virt-images/os1-atomic-meta-data",
+ "allocation": 4096,
+ "capacity": 49,
+ "type": 0
+ },
+ "atomic-user-data": {
+ "key": "/home/some_user/virt-images/atomic-user-data",
+ "name": "atomic-user-data",
+ "path": "/home/some_user/virt-images/atomic-user-data",
+ "allocation": 4096,
+ "capacity": 512,
+ "type": 0
+ },
+ "qemu-snap.txt": {
+ "key": "/home/some_user/virt-images/qemu-snap.txt",
+ "name": "qemu-snap.txt",
+ "path": "/home/some_user/virt-images/qemu-snap.txt",
+ "allocation": 4096,
+ "capacity": 111,
+ "type": 0
+ },
+ "atomic-beta-instance-5.qcow2": {
+ "key": "/home/some_user/virt-images/atomic-beta-instance-5.qcow2",
+ "name": "atomic-beta-instance-5.qcow2",
+ "path": "/home/some_user/virt-images/atomic-beta-instance-5.qcow2",
+ "allocation": 339091456,
+ "capacity": 8589934592,
+ "type": 0
+ },
+ "meta-data": {
+ "key": "/home/some_user/virt-images/meta-data",
+ "name": "meta-data",
+ "path": "/home/some_user/virt-images/meta-data",
+ "allocation": 4096,
+ "capacity": 49,
+ "type": 0
+ },
+ "atomic-beta-instance-8.qcow2": {
+ "key": "/home/some_user/virt-images/atomic-beta-instance-8.qcow2",
+ "name": "atomic-beta-instance-8.qcow2",
+ "path": "/home/some_user/virt-images/atomic-beta-instance-8.qcow2",
+ "allocation": 322576384,
+ "capacity": 8589934592,
+ "type": 0
+ },
+ "user-data": {
+ "key": "/home/some_user/virt-images/user-data",
+ "name": "user-data",
+ "path": "/home/some_user/virt-images/user-data",
+ "allocation": 4096,
+ "capacity": 512,
+ "type": 0
+ },
+ "rhel-6-2015-10-16.qcow2": {
+ "key": "/home/some_user/virt-images/rhel-6-2015-10-16.qcow2",
+ "name": "rhel-6-2015-10-16.qcow2",
+ "path": "/home/some_user/virt-images/rhel-6-2015-10-16.qcow2",
+ "allocation": 7209422848,
+ "capacity": 17179869184,
+ "type": 0
+ },
+ "atomic_demo_notes.txt": {
+ "key": "/home/some_user/virt-images/atomic_demo_notes.txt",
+ "name": "atomic_demo_notes.txt",
+ "path": "/home/some_user/virt-images/atomic_demo_notes.txt",
+ "allocation": 4096,
+ "capacity": 354,
+ "type": 0
+ },
+ "packer-windows-2012-R2-standard": {
+ "key": "/home/some_user/virt-images/packer-windows-2012-R2-standard",
+ "name": "packer-windows-2012-R2-standard",
+ "path": "/home/some_user/virt-images/packer-windows-2012-R2-standard",
+ "allocation": 16761495552,
+ "capacity": 64424509440,
+ "type": 0
+ },
+ "atomic3-cidata.iso": {
+ "key": "/home/some_user/virt-images/atomic3-cidata.iso",
+ "name": "atomic3-cidata.iso",
+ "path": "/home/some_user/virt-images/atomic3-cidata.iso",
+ "allocation": 376832,
+ "capacity": 374784,
+ "type": 0
+ },
+ ".atomic_demo_notes.txt.swp": {
+ "key": "/home/some_user/virt-images/.atomic_demo_notes.txt.swp",
+ "name": ".atomic_demo_notes.txt.swp",
+ "path": "/home/some_user/virt-images/.atomic_demo_notes.txt.swp",
+ "allocation": 12288,
+ "capacity": 12288,
+ "type": 0
+ },
+ "rhel7-2015-10-13.qcow2": {
+ "key": "/home/some_user/virt-images/rhel7-2015-10-13.qcow2",
+ "name": "rhel7-2015-10-13.qcow2",
+ "path": "/home/some_user/virt-images/rhel7-2015-10-13.qcow2",
+ "allocation": 4679413760,
+ "capacity": 12884901888,
+ "type": 0
+ }
+ }
+ },
+ "default": {
+ "autostart": true,
+ "uuid": "c8d9d160-efc0-4207-81c2-e79d6628f7e1",
+ "allocation": 43745488896,
+ "available": 8964980736,
+ "capacity": 52710469632,
+ "state": 2,
+ "volumes": {
+ "s3than-VAGRANTSLASH-trusty64_vagrant_box_image_0.0.1.img": {
+ "key": "/var/lib/libvirt/images/s3than-VAGRANTSLASH-trusty64_vagrant_box_image_0.0.1.img",
+ "name": "s3than-VAGRANTSLASH-trusty64_vagrant_box_image_0.0.1.img",
+ "path": "/var/lib/libvirt/images/s3than-VAGRANTSLASH-trusty64_vagrant_box_image_0.0.1.img",
+ "allocation": 1258622976,
+ "capacity": 42949672960,
+ "type": 0
+ },
+ "centos-7.0_vagrant_box_image.img": {
+ "key": "/var/lib/libvirt/images/centos-7.0_vagrant_box_image.img",
+ "name": "centos-7.0_vagrant_box_image.img",
+ "path": "/var/lib/libvirt/images/centos-7.0_vagrant_box_image.img",
+ "allocation": 1649414144,
+ "capacity": 42949672960,
+ "type": 0
+ },
+ "baremettle-VAGRANTSLASH-centos-5.10_vagrant_box_image_1.0.0.img": {
+ "key": "/var/lib/libvirt/images/baremettle-VAGRANTSLASH-centos-5.10_vagrant_box_image_1.0.0.img",
+ "name": "baremettle-VAGRANTSLASH-centos-5.10_vagrant_box_image_1.0.0.img",
+ "path": "/var/lib/libvirt/images/baremettle-VAGRANTSLASH-centos-5.10_vagrant_box_image_1.0.0.img",
+ "allocation": 810422272,
+ "capacity": 42949672960,
+ "type": 0
+ },
+ "centos-6_vagrant_box_image.img": {
+ "key": "/var/lib/libvirt/images/centos-6_vagrant_box_image.img",
+ "name": "centos-6_vagrant_box_image.img",
+ "path": "/var/lib/libvirt/images/centos-6_vagrant_box_image.img",
+ "allocation": 1423642624,
+ "capacity": 42949672960,
+ "type": 0
+ },
+ "centos5-ansible_default.img": {
+ "key": "/var/lib/libvirt/images/centos5-ansible_default.img",
+ "name": "centos5-ansible_default.img",
+ "path": "/var/lib/libvirt/images/centos5-ansible_default.img",
+ "allocation": 8986624,
+ "capacity": 42949672960,
+ "type": 0
+ },
+ "ubuntu_default.img": {
+ "key": "/var/lib/libvirt/images/ubuntu_default.img",
+ "name": "ubuntu_default.img",
+ "path": "/var/lib/libvirt/images/ubuntu_default.img",
+ "allocation": 3446833152,
+ "capacity": 42949672960,
+ "type": 0
+ }
+ }
+ },
+ "boot-scratch": {
+ "autostart": true,
+ "uuid": "e5ef4360-b889-4843-84fb-366e8fb30f20",
+ "allocation": 43745488896,
+ "available": 8964980736,
+ "capacity": 52710469632,
+ "state": 2,
+ "volumes": {
+
+ }
+ }
+ }
+ },
+ "network": {
+ "interfaces": {
+ "lo": {
+ "mtu": "65536",
+ "flags": [
+ "LOOPBACK",
+ "UP",
+ "LOWER_UP"
+ ],
+ "encapsulation": "Loopback",
+ "addresses": {
+ "127.0.0.1": {
+ "family": "inet",
+ "prefixlen": "8",
+ "netmask": "255.0.0.0",
+ "scope": "Node",
+ "ip_scope": "LOOPBACK"
+ },
+ "::1": {
+ "family": "inet6",
+ "prefixlen": "128",
+ "scope": "Node",
+ "tags": [
+
+ ],
+ "ip_scope": "LINK LOCAL LOOPBACK"
+ }
+ },
+ "state": "unknown"
+ },
+ "em1": {
+ "type": "em",
+ "number": "1",
+ "mtu": "1500",
+ "flags": [
+ "BROADCAST",
+ "MULTICAST",
+ "UP"
+ ],
+ "encapsulation": "Ethernet",
+ "addresses": {
+ "3C:97:0E:E9:28:8E": {
+ "family": "lladdr"
+ }
+ },
+ "state": "down",
+ "link_speed": 0,
+ "duplex": "Unknown! (255)",
+ "port": "Twisted Pair",
+ "transceiver": "internal",
+ "auto_negotiation": "on",
+ "mdi_x": "Unknown (auto)",
+ "ring_params": {
+ "max_rx": 4096,
+ "max_rx_mini": 0,
+ "max_rx_jumbo": 0,
+ "max_tx": 4096,
+ "current_rx": 256,
+ "current_rx_mini": 0,
+ "current_rx_jumbo": 0,
+ "current_tx": 256
+ }
+ },
+ "wlp4s0": {
+ "type": "wlp4s",
+ "number": "0",
+ "mtu": "1500",
+ "flags": [
+ "BROADCAST",
+ "MULTICAST",
+ "UP",
+ "LOWER_UP"
+ ],
+ "encapsulation": "Ethernet",
+ "addresses": {
+ "5C:51:4F:E6:A8:E3": {
+ "family": "lladdr"
+ },
+ "192.168.1.19": {
+ "family": "inet",
+ "prefixlen": "24",
+ "netmask": "255.255.255.0",
+ "broadcast": "192.168.1.255",
+ "scope": "Global",
+ "ip_scope": "RFC1918 PRIVATE"
+ },
+ "fe80::5e51:4fff:fee6:a8e3": {
+ "family": "inet6",
+ "prefixlen": "64",
+ "scope": "Link",
+ "tags": [
+
+ ],
+ "ip_scope": "LINK LOCAL UNICAST"
+ }
+ },
+ "state": "up",
+ "arp": {
+ "192.168.1.33": "00:11:d9:39:3e:e0",
+ "192.168.1.20": "ac:3a:7a:a7:49:e8",
+ "192.168.1.17": "00:09:b0:d0:64:19",
+ "192.168.1.22": "ac:bc:32:82:30:bb",
+ "192.168.1.15": "00:11:32:2e:10:d5",
+ "192.168.1.1": "84:1b:5e:03:50:b2",
+ "192.168.1.34": "00:11:d9:5f:e8:e6",
+ "192.168.1.16": "dc:a5:f4:ac:22:3a",
+ "192.168.1.21": "74:c2:46:73:28:d8",
+ "192.168.1.27": "00:17:88:09:3c:bb",
+ "192.168.1.24": "08:62:66:90:a2:b8"
+ },
+ "routes": [
+ {
+ "destination": "default",
+ "family": "inet",
+ "via": "192.168.1.1",
+ "metric": "600",
+ "proto": "static"
+ },
+ {
+ "destination": "66.187.232.64",
+ "family": "inet",
+ "via": "192.168.1.1",
+ "metric": "600",
+ "proto": "static"
+ },
+ {
+ "destination": "192.168.1.0/24",
+ "family": "inet",
+ "scope": "link",
+ "metric": "600",
+ "proto": "kernel",
+ "src": "192.168.1.19"
+ },
+ {
+ "destination": "192.168.1.1",
+ "family": "inet",
+ "scope": "link",
+ "metric": "600",
+ "proto": "static"
+ },
+ {
+ "destination": "fe80::/64",
+ "family": "inet6",
+ "metric": "256",
+ "proto": "kernel"
+ }
+ ],
+ "ring_params": {
+ "max_rx": 0,
+ "max_rx_mini": 0,
+ "max_rx_jumbo": 0,
+ "max_tx": 0,
+ "current_rx": 0,
+ "current_rx_mini": 0,
+ "current_rx_jumbo": 0,
+ "current_tx": 0
+ }
+ },
+ "virbr1": {
+ "type": "virbr",
+ "number": "1",
+ "mtu": "1500",
+ "flags": [
+ "BROADCAST",
+ "MULTICAST",
+ "UP"
+ ],
+ "encapsulation": "Ethernet",
+ "addresses": {
+ "52:54:00:B4:68:A9": {
+ "family": "lladdr"
+ },
+ "192.168.121.1": {
+ "family": "inet",
+ "prefixlen": "24",
+ "netmask": "255.255.255.0",
+ "broadcast": "192.168.121.255",
+ "scope": "Global",
+ "ip_scope": "RFC1918 PRIVATE"
+ }
+ },
+ "state": "1",
+ "routes": [
+ {
+ "destination": "192.168.121.0/24",
+ "family": "inet",
+ "scope": "link",
+ "proto": "kernel",
+ "src": "192.168.121.1"
+ }
+ ],
+ "ring_params": {
+
+ }
+ },
+ "virbr1-nic": {
+ "type": "virbr",
+ "number": "1-nic",
+ "mtu": "1500",
+ "flags": [
+ "BROADCAST",
+ "MULTICAST"
+ ],
+ "encapsulation": "Ethernet",
+ "addresses": {
+ "52:54:00:B4:68:A9": {
+ "family": "lladdr"
+ }
+ },
+ "state": "disabled",
+ "link_speed": 10,
+ "duplex": "Full",
+ "port": "Twisted Pair",
+ "transceiver": "internal",
+ "auto_negotiation": "off",
+ "mdi_x": "Unknown",
+ "ring_params": {
+
+ }
+ },
+ "virbr0": {
+ "type": "virbr",
+ "number": "0",
+ "mtu": "1500",
+ "flags": [
+ "BROADCAST",
+ "MULTICAST",
+ "UP"
+ ],
+ "encapsulation": "Ethernet",
+ "addresses": {
+ "52:54:00:CE:82:5E": {
+ "family": "lladdr"
+ },
+ "192.168.137.1": {
+ "family": "inet",
+ "prefixlen": "24",
+ "netmask": "255.255.255.0",
+ "broadcast": "192.168.137.255",
+ "scope": "Global",
+ "ip_scope": "RFC1918 PRIVATE"
+ }
+ },
+ "state": "1",
+ "routes": [
+ {
+ "destination": "192.168.137.0/24",
+ "family": "inet",
+ "scope": "link",
+ "proto": "kernel",
+ "src": "192.168.137.1"
+ }
+ ],
+ "ring_params": {
+
+ }
+ },
+ "virbr0-nic": {
+ "type": "virbr",
+ "number": "0-nic",
+ "mtu": "1500",
+ "flags": [
+ "BROADCAST",
+ "MULTICAST"
+ ],
+ "encapsulation": "Ethernet",
+ "addresses": {
+ "52:54:00:CE:82:5E": {
+ "family": "lladdr"
+ }
+ },
+ "state": "disabled",
+ "link_speed": 10,
+ "duplex": "Full",
+ "port": "Twisted Pair",
+ "transceiver": "internal",
+ "auto_negotiation": "off",
+ "mdi_x": "Unknown",
+ "ring_params": {
+
+ }
+ },
+ "docker0": {
+ "type": "docker",
+ "number": "0",
+ "mtu": "1500",
+ "flags": [
+ "BROADCAST",
+ "MULTICAST",
+ "UP",
+ "LOWER_UP"
+ ],
+ "encapsulation": "Ethernet",
+ "addresses": {
+ "02:42:EA:15:D8:84": {
+ "family": "lladdr"
+ },
+ "172.17.0.1": {
+ "family": "inet",
+ "prefixlen": "16",
+ "netmask": "255.255.0.0",
+ "scope": "Global",
+ "ip_scope": "RFC1918 PRIVATE"
+ },
+ "fe80::42:eaff:fe15:d884": {
+ "family": "inet6",
+ "prefixlen": "64",
+ "scope": "Link",
+ "tags": [
+
+ ],
+ "ip_scope": "LINK LOCAL UNICAST"
+ }
+ },
+ "state": "0",
+ "arp": {
+ "172.17.0.2": "02:42:ac:11:00:02",
+ "172.17.0.4": "02:42:ac:11:00:04",
+ "172.17.0.3": "02:42:ac:11:00:03"
+ },
+ "routes": [
+ {
+ "destination": "172.17.0.0/16",
+ "family": "inet",
+ "scope": "link",
+ "proto": "kernel",
+ "src": "172.17.0.1"
+ },
+ {
+ "destination": "fe80::/64",
+ "family": "inet6",
+ "metric": "256",
+ "proto": "kernel"
+ }
+ ],
+ "ring_params": {
+
+ }
+ },
+ "vethf20ff12": {
+ "type": "vethf20ff1",
+ "number": "2",
+ "mtu": "1500",
+ "flags": [
+ "BROADCAST",
+ "MULTICAST",
+ "UP",
+ "LOWER_UP"
+ ],
+ "encapsulation": "Ethernet",
+ "addresses": {
+ "AE:6E:2B:1E:A1:31": {
+ "family": "lladdr"
+ },
+ "fe80::ac6e:2bff:fe1e:a131": {
+ "family": "inet6",
+ "prefixlen": "64",
+ "scope": "Link",
+ "tags": [
+
+ ],
+ "ip_scope": "LINK LOCAL UNICAST"
+ }
+ },
+ "state": "forwarding",
+ "routes": [
+ {
+ "destination": "fe80::/64",
+ "family": "inet6",
+ "metric": "256",
+ "proto": "kernel"
+ }
+ ],
+ "link_speed": 10000,
+ "duplex": "Full",
+ "port": "Twisted Pair",
+ "transceiver": "internal",
+ "auto_negotiation": "off",
+ "mdi_x": "Unknown",
+ "ring_params": {
+
+ }
+ },
+ "tun0": {
+ "type": "tun",
+ "number": "0",
+ "mtu": "1360",
+ "flags": [
+ "MULTICAST",
+ "NOARP",
+ "UP",
+ "LOWER_UP"
+ ],
+ "addresses": {
+ "10.10.120.68": {
+ "family": "inet",
+ "prefixlen": "21",
+ "netmask": "255.255.248.0",
+ "broadcast": "10.10.127.255",
+ "scope": "Global",
+ "ip_scope": "RFC1918 PRIVATE"
+ },
+ "fe80::365e:885c:31ca:7670": {
+ "family": "inet6",
+ "prefixlen": "64",
+ "scope": "Link",
+ "tags": [
+ "flags",
+ "800"
+ ],
+ "ip_scope": "LINK LOCAL UNICAST"
+ }
+ },
+ "state": "unknown",
+ "routes": [
+ {
+ "destination": "10.0.0.0/8",
+ "family": "inet",
+ "via": "10.10.120.1",
+ "metric": "50",
+ "proto": "static"
+ },
+ {
+ "destination": "10.10.120.0/21",
+ "family": "inet",
+ "scope": "link",
+ "metric": "50",
+ "proto": "kernel",
+ "src": "10.10.120.68"
+ },
+ {
+ "destination": "fe80::/64",
+ "family": "inet6",
+ "metric": "256",
+ "proto": "kernel"
+ }
+ ]
+ }
+ },
+ "default_interface": "wlp4s0",
+ "default_gateway": "192.168.1.1"
+ },
+ "counters": {
+ "network": {
+ "interfaces": {
+ "lo": {
+ "tx": {
+ "queuelen": "1",
+ "bytes": "202568405",
+ "packets": "1845473",
+ "errors": "0",
+ "drop": "0",
+ "carrier": "0",
+ "collisions": "0"
+ },
+ "rx": {
+ "bytes": "202568405",
+ "packets": "1845473",
+ "errors": "0",
+ "drop": "0",
+ "overrun": "0"
+ }
+ },
+ "em1": {
+ "tx": {
+ "queuelen": "1000",
+ "bytes": "673898037",
+ "packets": "1631282",
+ "errors": "0",
+ "drop": "0",
+ "carrier": "0",
+ "collisions": "0"
+ },
+ "rx": {
+ "bytes": "1536186718",
+ "packets": "1994394",
+ "errors": "0",
+ "drop": "0",
+ "overrun": "0"
+ }
+ },
+ "wlp4s0": {
+ "tx": {
+ "queuelen": "1000",
+ "bytes": "3927670539",
+ "packets": "15146886",
+ "errors": "0",
+ "drop": "0",
+ "carrier": "0",
+ "collisions": "0"
+ },
+ "rx": {
+ "bytes": "12367173401",
+ "packets": "23981258",
+ "errors": "0",
+ "drop": "0",
+ "overrun": "0"
+ }
+ },
+ "virbr1": {
+ "tx": {
+ "queuelen": "1000",
+ "bytes": "0",
+ "packets": "0",
+ "errors": "0",
+ "drop": "0",
+ "carrier": "0",
+ "collisions": "0"
+ },
+ "rx": {
+ "bytes": "0",
+ "packets": "0",
+ "errors": "0",
+ "drop": "0",
+ "overrun": "0"
+ }
+ },
+ "virbr1-nic": {
+ "tx": {
+ "queuelen": "1000",
+ "bytes": "0",
+ "packets": "0",
+ "errors": "0",
+ "drop": "0",
+ "carrier": "0",
+ "collisions": "0"
+ },
+ "rx": {
+ "bytes": "0",
+ "packets": "0",
+ "errors": "0",
+ "drop": "0",
+ "overrun": "0"
+ }
+ },
+ "virbr0": {
+ "tx": {
+ "queuelen": "1000",
+ "bytes": "0",
+ "packets": "0",
+ "errors": "0",
+ "drop": "0",
+ "carrier": "0",
+ "collisions": "0"
+ },
+ "rx": {
+ "bytes": "0",
+ "packets": "0",
+ "errors": "0",
+ "drop": "0",
+ "overrun": "0"
+ }
+ },
+ "virbr0-nic": {
+ "tx": {
+ "queuelen": "1000",
+ "bytes": "0",
+ "packets": "0",
+ "errors": "0",
+ "drop": "0",
+ "carrier": "0",
+ "collisions": "0"
+ },
+ "rx": {
+ "bytes": "0",
+ "packets": "0",
+ "errors": "0",
+ "drop": "0",
+ "overrun": "0"
+ }
+ },
+ "docker0": {
+ "rx": {
+ "bytes": "2471313",
+ "packets": "36915",
+ "errors": "0",
+ "drop": "0",
+ "overrun": "0"
+ },
+ "tx": {
+ "bytes": "413371670",
+ "packets": "127713",
+ "errors": "0",
+ "drop": "0",
+ "carrier": "0",
+ "collisions": "0"
+ }
+ },
+ "vethf20ff12": {
+ "rx": {
+ "bytes": "34391",
+ "packets": "450",
+ "errors": "0",
+ "drop": "0",
+ "overrun": "0"
+ },
+ "tx": {
+ "bytes": "17919115",
+ "packets": "108069",
+ "errors": "0",
+ "drop": "0",
+ "carrier": "0",
+ "collisions": "0"
+ }
+ },
+ "tun0": {
+ "tx": {
+ "queuelen": "100",
+ "bytes": "22343462",
+ "packets": "253442",
+ "errors": "0",
+ "drop": "0",
+ "carrier": "0",
+ "collisions": "0"
+ },
+ "rx": {
+ "bytes": "115160002",
+ "packets": "197529",
+ "errors": "0",
+ "drop": "0",
+ "overrun": "0"
+ }
+ }
+ }
+ }
+ },
+ "ipaddress": "192.168.1.19",
+ "macaddress": "5C:51:4F:E6:A8:E3",
+ "ip6address": "fe80::42:eaff:fe15:d884",
+ "cpu": {
+ "0": {
+ "vendor_id": "GenuineIntel",
+ "family": "6",
+ "model": "60",
+ "model_name": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
+ "stepping": "3",
+ "mhz": "3238.714",
+ "cache_size": "6144 KB",
+ "physical_id": "0",
+ "core_id": "0",
+ "cores": "4",
+ "flags": [
+ "fpu",
+ "vme",
+ "de",
+ "pse",
+ "tsc",
+ "msr",
+ "pae",
+ "mce",
+ "cx8",
+ "apic",
+ "sep",
+ "mtrr",
+ "pge",
+ "mca",
+ "cmov",
+ "pat",
+ "pse36",
+ "clflush",
+ "dts",
+ "acpi",
+ "mmx",
+ "fxsr",
+ "sse",
+ "sse2",
+ "ss",
+ "ht",
+ "tm",
+ "pbe",
+ "syscall",
+ "nx",
+ "pdpe1gb",
+ "rdtscp",
+ "lm",
+ "constant_tsc",
+ "arch_perfmon",
+ "pebs",
+ "bts",
+ "rep_good",
+ "nopl",
+ "xtopology",
+ "nonstop_tsc",
+ "aperfmperf",
+ "eagerfpu",
+ "pni",
+ "pclmulqdq",
+ "dtes64",
+ "monitor",
+ "ds_cpl",
+ "vmx",
+ "smx",
+ "est",
+ "tm2",
+ "ssse3",
+ "sdbg",
+ "fma",
+ "cx16",
+ "xtpr",
+ "pdcm",
+ "pcid",
+ "sse4_1",
+ "sse4_2",
+ "x2apic",
+ "movbe",
+ "popcnt",
+ "tsc_deadline_timer",
+ "aes",
+ "xsave",
+ "avx",
+ "f16c",
+ "rdrand",
+ "lahf_lm",
+ "abm",
+ "epb",
+ "tpr_shadow",
+ "vnmi",
+ "flexpriority",
+ "ept",
+ "vpid",
+ "fsgsbase",
+ "tsc_adjust",
+ "bmi1",
+ "avx2",
+ "smep",
+ "bmi2",
+ "erms",
+ "invpcid",
+ "xsaveopt",
+ "dtherm",
+ "ida",
+ "arat",
+ "pln",
+ "pts"
+ ]
+ },
+ "1": {
+ "vendor_id": "GenuineIntel",
+ "family": "6",
+ "model": "60",
+ "model_name": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
+ "stepping": "3",
+ "mhz": "3137.200",
+ "cache_size": "6144 KB",
+ "physical_id": "0",
+ "core_id": "0",
+ "cores": "4",
+ "flags": [
+ "fpu",
+ "vme",
+ "de",
+ "pse",
+ "tsc",
+ "msr",
+ "pae",
+ "mce",
+ "cx8",
+ "apic",
+ "sep",
+ "mtrr",
+ "pge",
+ "mca",
+ "cmov",
+ "pat",
+ "pse36",
+ "clflush",
+ "dts",
+ "acpi",
+ "mmx",
+ "fxsr",
+ "sse",
+ "sse2",
+ "ss",
+ "ht",
+ "tm",
+ "pbe",
+ "syscall",
+ "nx",
+ "pdpe1gb",
+ "rdtscp",
+ "lm",
+ "constant_tsc",
+ "arch_perfmon",
+ "pebs",
+ "bts",
+ "rep_good",
+ "nopl",
+ "xtopology",
+ "nonstop_tsc",
+ "aperfmperf",
+ "eagerfpu",
+ "pni",
+ "pclmulqdq",
+ "dtes64",
+ "monitor",
+ "ds_cpl",
+ "vmx",
+ "smx",
+ "est",
+ "tm2",
+ "ssse3",
+ "sdbg",
+ "fma",
+ "cx16",
+ "xtpr",
+ "pdcm",
+ "pcid",
+ "sse4_1",
+ "sse4_2",
+ "x2apic",
+ "movbe",
+ "popcnt",
+ "tsc_deadline_timer",
+ "aes",
+ "xsave",
+ "avx",
+ "f16c",
+ "rdrand",
+ "lahf_lm",
+ "abm",
+ "epb",
+ "tpr_shadow",
+ "vnmi",
+ "flexpriority",
+ "ept",
+ "vpid",
+ "fsgsbase",
+ "tsc_adjust",
+ "bmi1",
+ "avx2",
+ "smep",
+ "bmi2",
+ "erms",
+ "invpcid",
+ "xsaveopt",
+ "dtherm",
+ "ida",
+ "arat",
+ "pln",
+ "pts"
+ ]
+ },
+ "2": {
+ "vendor_id": "GenuineIntel",
+ "family": "6",
+ "model": "60",
+ "model_name": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
+ "stepping": "3",
+ "mhz": "3077.050",
+ "cache_size": "6144 KB",
+ "physical_id": "0",
+ "core_id": "1",
+ "cores": "4",
+ "flags": [
+ "fpu",
+ "vme",
+ "de",
+ "pse",
+ "tsc",
+ "msr",
+ "pae",
+ "mce",
+ "cx8",
+ "apic",
+ "sep",
+ "mtrr",
+ "pge",
+ "mca",
+ "cmov",
+ "pat",
+ "pse36",
+ "clflush",
+ "dts",
+ "acpi",
+ "mmx",
+ "fxsr",
+ "sse",
+ "sse2",
+ "ss",
+ "ht",
+ "tm",
+ "pbe",
+ "syscall",
+ "nx",
+ "pdpe1gb",
+ "rdtscp",
+ "lm",
+ "constant_tsc",
+ "arch_perfmon",
+ "pebs",
+ "bts",
+ "rep_good",
+ "nopl",
+ "xtopology",
+ "nonstop_tsc",
+ "aperfmperf",
+ "eagerfpu",
+ "pni",
+ "pclmulqdq",
+ "dtes64",
+ "monitor",
+ "ds_cpl",
+ "vmx",
+ "smx",
+ "est",
+ "tm2",
+ "ssse3",
+ "sdbg",
+ "fma",
+ "cx16",
+ "xtpr",
+ "pdcm",
+ "pcid",
+ "sse4_1",
+ "sse4_2",
+ "x2apic",
+ "movbe",
+ "popcnt",
+ "tsc_deadline_timer",
+ "aes",
+ "xsave",
+ "avx",
+ "f16c",
+ "rdrand",
+ "lahf_lm",
+ "abm",
+ "epb",
+ "tpr_shadow",
+ "vnmi",
+ "flexpriority",
+ "ept",
+ "vpid",
+ "fsgsbase",
+ "tsc_adjust",
+ "bmi1",
+ "avx2",
+ "smep",
+ "bmi2",
+ "erms",
+ "invpcid",
+ "xsaveopt",
+ "dtherm",
+ "ida",
+ "arat",
+ "pln",
+ "pts"
+ ]
+ },
+ "3": {
+ "vendor_id": "GenuineIntel",
+ "family": "6",
+ "model": "60",
+ "model_name": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
+ "stepping": "3",
+ "mhz": "2759.655",
+ "cache_size": "6144 KB",
+ "physical_id": "0",
+ "core_id": "1",
+ "cores": "4",
+ "flags": [
+ "fpu",
+ "vme",
+ "de",
+ "pse",
+ "tsc",
+ "msr",
+ "pae",
+ "mce",
+ "cx8",
+ "apic",
+ "sep",
+ "mtrr",
+ "pge",
+ "mca",
+ "cmov",
+ "pat",
+ "pse36",
+ "clflush",
+ "dts",
+ "acpi",
+ "mmx",
+ "fxsr",
+ "sse",
+ "sse2",
+ "ss",
+ "ht",
+ "tm",
+ "pbe",
+ "syscall",
+ "nx",
+ "pdpe1gb",
+ "rdtscp",
+ "lm",
+ "constant_tsc",
+ "arch_perfmon",
+ "pebs",
+ "bts",
+ "rep_good",
+ "nopl",
+ "xtopology",
+ "nonstop_tsc",
+ "aperfmperf",
+ "eagerfpu",
+ "pni",
+ "pclmulqdq",
+ "dtes64",
+ "monitor",
+ "ds_cpl",
+ "vmx",
+ "smx",
+ "est",
+ "tm2",
+ "ssse3",
+ "sdbg",
+ "fma",
+ "cx16",
+ "xtpr",
+ "pdcm",
+ "pcid",
+ "sse4_1",
+ "sse4_2",
+ "x2apic",
+ "movbe",
+ "popcnt",
+ "tsc_deadline_timer",
+ "aes",
+ "xsave",
+ "avx",
+ "f16c",
+ "rdrand",
+ "lahf_lm",
+ "abm",
+ "epb",
+ "tpr_shadow",
+ "vnmi",
+ "flexpriority",
+ "ept",
+ "vpid",
+ "fsgsbase",
+ "tsc_adjust",
+ "bmi1",
+ "avx2",
+ "smep",
+ "bmi2",
+ "erms",
+ "invpcid",
+ "xsaveopt",
+ "dtherm",
+ "ida",
+ "arat",
+ "pln",
+ "pts"
+ ]
+ },
+ "4": {
+ "vendor_id": "GenuineIntel",
+ "family": "6",
+ "model": "60",
+ "model_name": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
+ "stepping": "3",
+ "mhz": "3419.000",
+ "cache_size": "6144 KB",
+ "physical_id": "0",
+ "core_id": "2",
+ "cores": "4",
+ "flags": [
+ "fpu",
+ "vme",
+ "de",
+ "pse",
+ "tsc",
+ "msr",
+ "pae",
+ "mce",
+ "cx8",
+ "apic",
+ "sep",
+ "mtrr",
+ "pge",
+ "mca",
+ "cmov",
+ "pat",
+ "pse36",
+ "clflush",
+ "dts",
+ "acpi",
+ "mmx",
+ "fxsr",
+ "sse",
+ "sse2",
+ "ss",
+ "ht",
+ "tm",
+ "pbe",
+ "syscall",
+ "nx",
+ "pdpe1gb",
+ "rdtscp",
+ "lm",
+ "constant_tsc",
+ "arch_perfmon",
+ "pebs",
+ "bts",
+ "rep_good",
+ "nopl",
+ "xtopology",
+ "nonstop_tsc",
+ "aperfmperf",
+ "eagerfpu",
+ "pni",
+ "pclmulqdq",
+ "dtes64",
+ "monitor",
+ "ds_cpl",
+ "vmx",
+ "smx",
+ "est",
+ "tm2",
+ "ssse3",
+ "sdbg",
+ "fma",
+ "cx16",
+ "xtpr",
+ "pdcm",
+ "pcid",
+ "sse4_1",
+ "sse4_2",
+ "x2apic",
+ "movbe",
+ "popcnt",
+ "tsc_deadline_timer",
+ "aes",
+ "xsave",
+ "avx",
+ "f16c",
+ "rdrand",
+ "lahf_lm",
+ "abm",
+ "epb",
+ "tpr_shadow",
+ "vnmi",
+ "flexpriority",
+ "ept",
+ "vpid",
+ "fsgsbase",
+ "tsc_adjust",
+ "bmi1",
+ "avx2",
+ "smep",
+ "bmi2",
+ "erms",
+ "invpcid",
+ "xsaveopt",
+ "dtherm",
+ "ida",
+ "arat",
+ "pln",
+ "pts"
+ ]
+ },
+ "5": {
+ "vendor_id": "GenuineIntel",
+ "family": "6",
+ "model": "60",
+ "model_name": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
+ "stepping": "3",
+ "mhz": "2752.569",
+ "cache_size": "6144 KB",
+ "physical_id": "0",
+ "core_id": "2",
+ "cores": "4",
+ "flags": [
+ "fpu",
+ "vme",
+ "de",
+ "pse",
+ "tsc",
+ "msr",
+ "pae",
+ "mce",
+ "cx8",
+ "apic",
+ "sep",
+ "mtrr",
+ "pge",
+ "mca",
+ "cmov",
+ "pat",
+ "pse36",
+ "clflush",
+ "dts",
+ "acpi",
+ "mmx",
+ "fxsr",
+ "sse",
+ "sse2",
+ "ss",
+ "ht",
+ "tm",
+ "pbe",
+ "syscall",
+ "nx",
+ "pdpe1gb",
+ "rdtscp",
+ "lm",
+ "constant_tsc",
+ "arch_perfmon",
+ "pebs",
+ "bts",
+ "rep_good",
+ "nopl",
+ "xtopology",
+ "nonstop_tsc",
+ "aperfmperf",
+ "eagerfpu",
+ "pni",
+ "pclmulqdq",
+ "dtes64",
+ "monitor",
+ "ds_cpl",
+ "vmx",
+ "smx",
+ "est",
+ "tm2",
+ "ssse3",
+ "sdbg",
+ "fma",
+ "cx16",
+ "xtpr",
+ "pdcm",
+ "pcid",
+ "sse4_1",
+ "sse4_2",
+ "x2apic",
+ "movbe",
+ "popcnt",
+ "tsc_deadline_timer",
+ "aes",
+ "xsave",
+ "avx",
+ "f16c",
+ "rdrand",
+ "lahf_lm",
+ "abm",
+ "epb",
+ "tpr_shadow",
+ "vnmi",
+ "flexpriority",
+ "ept",
+ "vpid",
+ "fsgsbase",
+ "tsc_adjust",
+ "bmi1",
+ "avx2",
+ "smep",
+ "bmi2",
+ "erms",
+ "invpcid",
+ "xsaveopt",
+ "dtherm",
+ "ida",
+ "arat",
+ "pln",
+ "pts"
+ ]
+ },
+ "6": {
+ "vendor_id": "GenuineIntel",
+ "family": "6",
+ "model": "60",
+ "model_name": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
+ "stepping": "3",
+ "mhz": "2953.619",
+ "cache_size": "6144 KB",
+ "physical_id": "0",
+ "core_id": "3",
+ "cores": "4",
+ "flags": [
+ "fpu",
+ "vme",
+ "de",
+ "pse",
+ "tsc",
+ "msr",
+ "pae",
+ "mce",
+ "cx8",
+ "apic",
+ "sep",
+ "mtrr",
+ "pge",
+ "mca",
+ "cmov",
+ "pat",
+ "pse36",
+ "clflush",
+ "dts",
+ "acpi",
+ "mmx",
+ "fxsr",
+ "sse",
+ "sse2",
+ "ss",
+ "ht",
+ "tm",
+ "pbe",
+ "syscall",
+ "nx",
+ "pdpe1gb",
+ "rdtscp",
+ "lm",
+ "constant_tsc",
+ "arch_perfmon",
+ "pebs",
+ "bts",
+ "rep_good",
+ "nopl",
+ "xtopology",
+ "nonstop_tsc",
+ "aperfmperf",
+ "eagerfpu",
+ "pni",
+ "pclmulqdq",
+ "dtes64",
+ "monitor",
+ "ds_cpl",
+ "vmx",
+ "smx",
+ "est",
+ "tm2",
+ "ssse3",
+ "sdbg",
+ "fma",
+ "cx16",
+ "xtpr",
+ "pdcm",
+ "pcid",
+ "sse4_1",
+ "sse4_2",
+ "x2apic",
+ "movbe",
+ "popcnt",
+ "tsc_deadline_timer",
+ "aes",
+ "xsave",
+ "avx",
+ "f16c",
+ "rdrand",
+ "lahf_lm",
+ "abm",
+ "epb",
+ "tpr_shadow",
+ "vnmi",
+ "flexpriority",
+ "ept",
+ "vpid",
+ "fsgsbase",
+ "tsc_adjust",
+ "bmi1",
+ "avx2",
+ "smep",
+ "bmi2",
+ "erms",
+ "invpcid",
+ "xsaveopt",
+ "dtherm",
+ "ida",
+ "arat",
+ "pln",
+ "pts"
+ ]
+ },
+ "7": {
+ "vendor_id": "GenuineIntel",
+ "family": "6",
+ "model": "60",
+ "model_name": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz",
+ "stepping": "3",
+ "mhz": "2927.087",
+ "cache_size": "6144 KB",
+ "physical_id": "0",
+ "core_id": "3",
+ "cores": "4",
+ "flags": [
+ "fpu",
+ "vme",
+ "de",
+ "pse",
+ "tsc",
+ "msr",
+ "pae",
+ "mce",
+ "cx8",
+ "apic",
+ "sep",
+ "mtrr",
+ "pge",
+ "mca",
+ "cmov",
+ "pat",
+ "pse36",
+ "clflush",
+ "dts",
+ "acpi",
+ "mmx",
+ "fxsr",
+ "sse",
+ "sse2",
+ "ss",
+ "ht",
+ "tm",
+ "pbe",
+ "syscall",
+ "nx",
+ "pdpe1gb",
+ "rdtscp",
+ "lm",
+ "constant_tsc",
+ "arch_perfmon",
+ "pebs",
+ "bts",
+ "rep_good",
+ "nopl",
+ "xtopology",
+ "nonstop_tsc",
+ "aperfmperf",
+ "eagerfpu",
+ "pni",
+ "pclmulqdq",
+ "dtes64",
+ "monitor",
+ "ds_cpl",
+ "vmx",
+ "smx",
+ "est",
+ "tm2",
+ "ssse3",
+ "sdbg",
+ "fma",
+ "cx16",
+ "xtpr",
+ "pdcm",
+ "pcid",
+ "sse4_1",
+ "sse4_2",
+ "x2apic",
+ "movbe",
+ "popcnt",
+ "tsc_deadline_timer",
+ "aes",
+ "xsave",
+ "avx",
+ "f16c",
+ "rdrand",
+ "lahf_lm",
+ "abm",
+ "epb",
+ "tpr_shadow",
+ "vnmi",
+ "flexpriority",
+ "ept",
+ "vpid",
+ "fsgsbase",
+ "tsc_adjust",
+ "bmi1",
+ "avx2",
+ "smep",
+ "bmi2",
+ "erms",
+ "invpcid",
+ "xsaveopt",
+ "dtherm",
+ "ida",
+ "arat",
+ "pln",
+ "pts"
+ ]
+ },
+ "total": 8,
+ "real": 1,
+ "cores": 4
+ },
+ "etc": {
+ "passwd": {
+ "root": {
+ "dir": "/root",
+ "gid": 0,
+ "uid": 0,
+ "shell": "/bin/bash",
+ "gecos": "root"
+ },
+ "bin": {
+ "dir": "/bin",
+ "gid": 1,
+ "uid": 1,
+ "shell": "/sbin/nologin",
+ "gecos": "bin"
+ },
+ "daemon": {
+ "dir": "/sbin",
+ "gid": 2,
+ "uid": 2,
+ "shell": "/sbin/nologin",
+ "gecos": "daemon"
+ },
+ "adm": {
+ "dir": "/var/adm",
+ "gid": 4,
+ "uid": 3,
+ "shell": "/sbin/nologin",
+ "gecos": "adm"
+ },
+ "lp": {
+ "dir": "/var/spool/lpd",
+ "gid": 7,
+ "uid": 4,
+ "shell": "/sbin/nologin",
+ "gecos": "lp"
+ },
+ "sync": {
+ "dir": "/sbin",
+ "gid": 0,
+ "uid": 5,
+ "shell": "/bin/sync",
+ "gecos": "sync"
+ },
+ "shutdown": {
+ "dir": "/sbin",
+ "gid": 0,
+ "uid": 6,
+ "shell": "/sbin/shutdown",
+ "gecos": "shutdown"
+ },
+ "halt": {
+ "dir": "/sbin",
+ "gid": 0,
+ "uid": 7,
+ "shell": "/sbin/halt",
+ "gecos": "halt"
+ },
+ "mail": {
+ "dir": "/var/spool/mail",
+ "gid": 12,
+ "uid": 8,
+ "shell": "/sbin/nologin",
+ "gecos": "mail"
+ },
+ "operator": {
+ "dir": "/root",
+ "gid": 0,
+ "uid": 11,
+ "shell": "/sbin/nologin",
+ "gecos": "operator"
+ },
+ "games": {
+ "dir": "/usr/games",
+ "gid": 100,
+ "uid": 12,
+ "shell": "/sbin/nologin",
+ "gecos": "games"
+ },
+ "ftp": {
+ "dir": "/var/ftp",
+ "gid": 50,
+ "uid": 14,
+ "shell": "/sbin/nologin",
+ "gecos": "FTP User"
+ },
+ "nobody": {
+ "dir": "/",
+ "gid": 99,
+ "uid": 99,
+ "shell": "/sbin/nologin",
+ "gecos": "Nobody"
+ },
+ "avahi-autoipd": {
+ "dir": "/var/lib/avahi-autoipd",
+ "gid": 170,
+ "uid": 170,
+ "shell": "/sbin/nologin",
+ "gecos": "Avahi IPv4LL Stack"
+ },
+ "dbus": {
+ "dir": "/",
+ "gid": 81,
+ "uid": 81,
+ "shell": "/sbin/nologin",
+ "gecos": "System message bus"
+ },
+ "polkitd": {
+ "dir": "/",
+ "gid": 999,
+ "uid": 999,
+ "shell": "/sbin/nologin",
+ "gecos": "User for polkitd"
+ },
+ "abrt": {
+ "dir": "/etc/abrt",
+ "gid": 173,
+ "uid": 173,
+ "shell": "/sbin/nologin",
+ "gecos": ""
+ },
+ "usbmuxd": {
+ "dir": "/",
+ "gid": 113,
+ "uid": 113,
+ "shell": "/sbin/nologin",
+ "gecos": "usbmuxd user"
+ },
+ "colord": {
+ "dir": "/var/lib/colord",
+ "gid": 998,
+ "uid": 998,
+ "shell": "/sbin/nologin",
+ "gecos": "User for colord"
+ },
+ "geoclue": {
+ "dir": "/var/lib/geoclue",
+ "gid": 997,
+ "uid": 997,
+ "shell": "/sbin/nologin",
+ "gecos": "User for geoclue"
+ },
+ "rpc": {
+ "dir": "/var/lib/rpcbind",
+ "gid": 32,
+ "uid": 32,
+ "shell": "/sbin/nologin",
+ "gecos": "Rpcbind Daemon"
+ },
+ "rpcuser": {
+ "dir": "/var/lib/nfs",
+ "gid": 29,
+ "uid": 29,
+ "shell": "/sbin/nologin",
+ "gecos": "RPC Service User"
+ },
+ "nfsnobody": {
+ "dir": "/var/lib/nfs",
+ "gid": 65534,
+ "uid": 65534,
+ "shell": "/sbin/nologin",
+ "gecos": "Anonymous NFS User"
+ },
+ "qemu": {
+ "dir": "/",
+ "gid": 107,
+ "uid": 107,
+ "shell": "/sbin/nologin",
+ "gecos": "qemu user"
+ },
+ "rtkit": {
+ "dir": "/proc",
+ "gid": 172,
+ "uid": 172,
+ "shell": "/sbin/nologin",
+ "gecos": "RealtimeKit"
+ },
+ "radvd": {
+ "dir": "/",
+ "gid": 75,
+ "uid": 75,
+ "shell": "/sbin/nologin",
+ "gecos": "radvd user"
+ },
+ "tss": {
+ "dir": "/dev/null",
+ "gid": 59,
+ "uid": 59,
+ "shell": "/sbin/nologin",
+ "gecos": "Account used by the trousers package to sandbox the tcsd daemon"
+ },
+ "unbound": {
+ "dir": "/etc/unbound",
+ "gid": 995,
+ "uid": 996,
+ "shell": "/sbin/nologin",
+ "gecos": "Unbound DNS resolver"
+ },
+ "openvpn": {
+ "dir": "/etc/openvpn",
+ "gid": 994,
+ "uid": 995,
+ "shell": "/sbin/nologin",
+ "gecos": "OpenVPN"
+ },
+ "saslauth": {
+ "dir": "/run/saslauthd",
+ "gid": 76,
+ "uid": 994,
+ "shell": "/sbin/nologin",
+ "gecos": "\"Saslauthd user\""
+ },
+ "avahi": {
+ "dir": "/var/run/avahi-daemon",
+ "gid": 70,
+ "uid": 70,
+ "shell": "/sbin/nologin",
+ "gecos": "Avahi mDNS/DNS-SD Stack"
+ },
+ "pulse": {
+ "dir": "/var/run/pulse",
+ "gid": 992,
+ "uid": 993,
+ "shell": "/sbin/nologin",
+ "gecos": "PulseAudio System Daemon"
+ },
+ "gdm": {
+ "dir": "/var/lib/gdm",
+ "gid": 42,
+ "uid": 42,
+ "shell": "/sbin/nologin",
+ "gecos": ""
+ },
+ "gnome-initial-setup": {
+ "dir": "/run/gnome-initial-setup/",
+ "gid": 990,
+ "uid": 992,
+ "shell": "/sbin/nologin",
+ "gecos": ""
+ },
+ "nm-openconnect": {
+ "dir": "/",
+ "gid": 989,
+ "uid": 991,
+ "shell": "/sbin/nologin",
+ "gecos": "NetworkManager user for OpenConnect"
+ },
+ "sshd": {
+ "dir": "/var/empty/sshd",
+ "gid": 74,
+ "uid": 74,
+ "shell": "/sbin/nologin",
+ "gecos": "Privilege-separated SSH"
+ },
+ "chrony": {
+ "dir": "/var/lib/chrony",
+ "gid": 988,
+ "uid": 990,
+ "shell": "/sbin/nologin",
+ "gecos": ""
+ },
+ "tcpdump": {
+ "dir": "/",
+ "gid": 72,
+ "uid": 72,
+ "shell": "/sbin/nologin",
+ "gecos": ""
+ },
+ "some_user": {
+ "dir": "/home/some_user",
+ "gid": 1000,
+ "uid": 1000,
+ "shell": "/bin/bash",
+ "gecos": "some_user"
+ },
+ "systemd-journal-gateway": {
+ "dir": "/var/log/journal",
+ "gid": 191,
+ "uid": 191,
+ "shell": "/sbin/nologin",
+ "gecos": "Journal Gateway"
+ },
+ "postgres": {
+ "dir": "/var/lib/pgsql",
+ "gid": 26,
+ "uid": 26,
+ "shell": "/bin/bash",
+ "gecos": "PostgreSQL Server"
+ },
+ "dockerroot": {
+ "dir": "/var/lib/docker",
+ "gid": 977,
+ "uid": 984,
+ "shell": "/sbin/nologin",
+ "gecos": "Docker User"
+ },
+ "apache": {
+ "dir": "/usr/share/httpd",
+ "gid": 48,
+ "uid": 48,
+ "shell": "/sbin/nologin",
+ "gecos": "Apache"
+ },
+ "systemd-network": {
+ "dir": "/",
+ "gid": 974,
+ "uid": 982,
+ "shell": "/sbin/nologin",
+ "gecos": "systemd Network Management"
+ },
+ "systemd-resolve": {
+ "dir": "/",
+ "gid": 973,
+ "uid": 981,
+ "shell": "/sbin/nologin",
+ "gecos": "systemd Resolver"
+ },
+ "systemd-bus-proxy": {
+ "dir": "/",
+ "gid": 972,
+ "uid": 980,
+ "shell": "/sbin/nologin",
+ "gecos": "systemd Bus Proxy"
+ },
+ "systemd-journal-remote": {
+ "dir": "//var/log/journal/remote",
+ "gid": 970,
+ "uid": 979,
+ "shell": "/sbin/nologin",
+ "gecos": "Journal Remote"
+ },
+ "systemd-journal-upload": {
+ "dir": "//var/log/journal/upload",
+ "gid": 969,
+ "uid": 978,
+ "shell": "/sbin/nologin",
+ "gecos": "Journal Upload"
+ },
+ "setroubleshoot": {
+ "dir": "/var/lib/setroubleshoot",
+ "gid": 967,
+ "uid": 977,
+ "shell": "/sbin/nologin",
+ "gecos": ""
+ },
+ "oprofile": {
+ "dir": "/var/lib/oprofile",
+ "gid": 16,
+ "uid": 16,
+ "shell": "/sbin/nologin",
+ "gecos": "Special user account to be used by OProfile"
+ }
+ },
+ "group": {
+ "root": {
+ "gid": 0,
+ "members": [
+
+ ]
+ },
+ "bin": {
+ "gid": 1,
+ "members": [
+
+ ]
+ },
+ "daemon": {
+ "gid": 2,
+ "members": [
+
+ ]
+ },
+ "sys": {
+ "gid": 3,
+ "members": [
+
+ ]
+ },
+ "adm": {
+ "gid": 4,
+ "members": [
+ "logcheck"
+ ]
+ },
+ "tty": {
+ "gid": 5,
+ "members": [
+
+ ]
+ },
+ "disk": {
+ "gid": 6,
+ "members": [
+
+ ]
+ },
+ "lp": {
+ "gid": 7,
+ "members": [
+
+ ]
+ },
+ "mem": {
+ "gid": 8,
+ "members": [
+
+ ]
+ },
+ "kmem": {
+ "gid": 9,
+ "members": [
+
+ ]
+ },
+ "wheel": {
+ "gid": 10,
+ "members": [
+
+ ]
+ },
+ "cdrom": {
+ "gid": 11,
+ "members": [
+
+ ]
+ },
+ "mail": {
+ "gid": 12,
+ "members": [
+
+ ]
+ },
+ "man": {
+ "gid": 15,
+ "members": [
+
+ ]
+ },
+ "dialout": {
+ "gid": 18,
+ "members": [
+ "lirc"
+ ]
+ },
+ "floppy": {
+ "gid": 19,
+ "members": [
+
+ ]
+ },
+ "games": {
+ "gid": 20,
+ "members": [
+
+ ]
+ },
+ "tape": {
+ "gid": 30,
+ "members": [
+
+ ]
+ },
+ "video": {
+ "gid": 39,
+ "members": [
+
+ ]
+ },
+ "ftp": {
+ "gid": 50,
+ "members": [
+
+ ]
+ },
+ "lock": {
+ "gid": 54,
+ "members": [
+ "lirc"
+ ]
+ },
+ "audio": {
+ "gid": 63,
+ "members": [
+
+ ]
+ },
+ "nobody": {
+ "gid": 99,
+ "members": [
+
+ ]
+ },
+ "users": {
+ "gid": 100,
+ "members": [
+
+ ]
+ },
+ "utmp": {
+ "gid": 22,
+ "members": [
+
+ ]
+ },
+ "utempter": {
+ "gid": 35,
+ "members": [
+
+ ]
+ },
+ "avahi-autoipd": {
+ "gid": 170,
+ "members": [
+
+ ]
+ },
+ "systemd-journal": {
+ "gid": 190,
+ "members": [
+
+ ]
+ },
+ "dbus": {
+ "gid": 81,
+ "members": [
+
+ ]
+ },
+ "polkitd": {
+ "gid": 999,
+ "members": [
+
+ ]
+ },
+ "abrt": {
+ "gid": 173,
+ "members": [
+
+ ]
+ },
+ "dip": {
+ "gid": 40,
+ "members": [
+
+ ]
+ },
+ "usbmuxd": {
+ "gid": 113,
+ "members": [
+
+ ]
+ },
+ "colord": {
+ "gid": 998,
+ "members": [
+
+ ]
+ },
+ "geoclue": {
+ "gid": 997,
+ "members": [
+
+ ]
+ },
+ "ssh_keys": {
+ "gid": 996,
+ "members": [
+
+ ]
+ },
+ "rpc": {
+ "gid": 32,
+ "members": [
+
+ ]
+ },
+ "rpcuser": {
+ "gid": 29,
+ "members": [
+
+ ]
+ },
+ "nfsnobody": {
+ "gid": 65534,
+ "members": [
+
+ ]
+ },
+ "kvm": {
+ "gid": 36,
+ "members": [
+ "qemu"
+ ]
+ },
+ "qemu": {
+ "gid": 107,
+ "members": [
+
+ ]
+ },
+ "rtkit": {
+ "gid": 172,
+ "members": [
+
+ ]
+ },
+ "radvd": {
+ "gid": 75,
+ "members": [
+
+ ]
+ },
+ "tss": {
+ "gid": 59,
+ "members": [
+
+ ]
+ },
+ "unbound": {
+ "gid": 995,
+ "members": [
+
+ ]
+ },
+ "openvpn": {
+ "gid": 994,
+ "members": [
+
+ ]
+ },
+ "saslauth": {
+ "gid": 76,
+ "members": [
+
+ ]
+ },
+ "avahi": {
+ "gid": 70,
+ "members": [
+
+ ]
+ },
+ "brlapi": {
+ "gid": 993,
+ "members": [
+
+ ]
+ },
+ "pulse": {
+ "gid": 992,
+ "members": [
+
+ ]
+ },
+ "pulse-access": {
+ "gid": 991,
+ "members": [
+
+ ]
+ },
+ "gdm": {
+ "gid": 42,
+ "members": [
+
+ ]
+ },
+ "gnome-initial-setup": {
+ "gid": 990,
+ "members": [
+
+ ]
+ },
+ "nm-openconnect": {
+ "gid": 989,
+ "members": [
+
+ ]
+ },
+ "sshd": {
+ "gid": 74,
+ "members": [
+
+ ]
+ },
+ "slocate": {
+ "gid": 21,
+ "members": [
+
+ ]
+ },
+ "chrony": {
+ "gid": 988,
+ "members": [
+
+ ]
+ },
+ "tcpdump": {
+ "gid": 72,
+ "members": [
+
+ ]
+ },
+ "some_user": {
+ "gid": 1000,
+ "members": [
+ "some_user"
+ ]
+ },
+ "docker": {
+ "gid": 986,
+ "members": [
+ "some_user"
+ ]
+ }
+ },
+ "c": {
+ "gcc": {
+ "target": "x86_64-redhat-linux",
+ "configured_with": "../configure --enable-bootstrap --enable-languages=c,c++,objc,obj-c++,fortran,ada,go,lto --prefix=/usr --mandir=/usr/share/man --infodir=/usr/share/info --with-bugurl=http://bugzilla.redhat.com/bugzilla --enable-shared --enable-threads=posix --enable-checking=release --enable-multilib --with-system-zlib --enable-__cxa_atexit --disable-libunwind-exceptions --enable-gnu-unique-object --enable-linker-build-id --with-linker-hash-style=gnu --enable-plugin --enable-initfini-array --disable-libgcj --with-isl --enable-libmpx --enable-gnu-indirect-function --with-tune=generic --with-arch_32=i686 --build=x86_64-redhat-linux",
+ "thread_model": "posix",
+ "description": "gcc version 6.3.1 20161221 (Red Hat 6.3.1-1) (GCC) ",
+ "version": "6.3.1"
+ },
+ "glibc": {
+ "version": "2.24",
+ "description": "GNU C Library (GNU libc) stable release version 2.24, by Roland McGrath et al."
+ }
+ },
+ "lua": {
+ "version": "5.3.4"
+ },
+ "ruby": {
+ "platform": "x86_64-linux",
+ "version": "2.3.3",
+ "release_date": "2016-11-21",
+ "target": "x86_64-redhat-linux-gnu",
+ "target_cpu": "x86_64",
+ "target_vendor": "redhat",
+ "target_os": "linux",
+ "host": "x86_64-redhat-linux-gnu",
+ "host_cpu": "x86_64",
+ "host_os": "linux-gnu",
+ "host_vendor": "redhat",
+ "bin_dir": "/usr/bin",
+ "ruby_bin": "/usr/bin/ruby",
+ "gems_dir": "/home/some_user/.gem/ruby",
+ "gem_bin": "/usr/bin/gem"
+ }
+ },
+ "command": {
+ "ps": "ps -ef"
+ },
+ "root_group": "root",
+ "fips": {
+ "kernel": {
+ "enabled": false
+ }
+ },
+ "hostname": "myhostname",
+ "machinename": "myhostname",
+ "fqdn": "myhostname",
+ "domain": null,
+ "machine_id": "1234567abcede123456123456123456a",
+ "privateaddress": "192.168.1.100",
+ "keys": {
+ "ssh": {
+
+ }
+ },
+ "time": {
+ "timezone": "EDT"
+ },
+ "sessions": {
+ "by_session": {
+ "1918": {
+ "session": "1918",
+ "uid": "1000",
+ "user": "some_user",
+ "seat": null
+ },
+ "5": {
+ "session": "5",
+ "uid": "1000",
+ "user": "some_user",
+ "seat": "seat0"
+ },
+ "3": {
+ "session": "3",
+ "uid": "0",
+ "user": "root",
+ "seat": "seat0"
+ }
+ },
+ "by_user": {
+ "some_user": [
+ {
+ "session": "1918",
+ "uid": "1000",
+ "user": "some_user",
+ "seat": null
+ },
+ {
+ "session": "5",
+ "uid": "1000",
+ "user": "some_user",
+ "seat": "seat0"
+ }
+ ],
+ "root": [
+ {
+ "session": "3",
+ "uid": "0",
+ "user": "root",
+ "seat": "seat0"
+ }
+ ]
+ }
+ },
+ "hostnamectl": {
+ "static_hostname": "myhostname",
+ "icon_name": "computer-laptop",
+ "chassis": "laptop",
+ "machine_id": "24dc16bd7694404c825b517ab46d9d6b",
+ "machine_id": "12345123451234512345123451242323",
+ "boot_id": "3d5d5512341234123412341234123423",
+ "operating_system": "Fedora 25 (Workstation Edition)",
+ "cpe_os_name": "cpe",
+ "kernel": "Linux 4.9.14-200.fc25.x86_64",
+ "architecture": "x86-64"
+ },
+ "block_device": {
+ "dm-1": {
+ "size": "104857600",
+ "removable": "0",
+ "rotational": "0",
+ "physical_block_size": "512",
+ "logical_block_size": "512"
+ },
+ "loop1": {
+ "size": "209715200",
+ "removable": "0",
+ "rotational": "1",
+ "physical_block_size": "512",
+ "logical_block_size": "512"
+ },
+ "sr0": {
+ "size": "2097151",
+ "removable": "1",
+ "model": "DVD-RAM UJ8E2",
+ "rev": "SB01",
+ "state": "running",
+ "timeout": "30",
+ "vendor": "MATSHITA",
+ "queue_depth": "1",
+ "rotational": "1",
+ "physical_block_size": "512",
+ "logical_block_size": "512"
+ },
+ "dm-2": {
+ "size": "378093568",
+ "removable": "0",
+ "rotational": "0",
+ "physical_block_size": "512",
+ "logical_block_size": "512"
+ },
+ "loop2": {
+ "size": "4194304",
+ "removable": "0",
+ "rotational": "1",
+ "physical_block_size": "512",
+ "logical_block_size": "512"
+ },
+ "dm-0": {
+ "size": "16138240",
+ "removable": "0",
+ "rotational": "0",
+ "physical_block_size": "512",
+ "logical_block_size": "512"
+ },
+ "loop0": {
+ "size": "1024000",
+ "removable": "0",
+ "rotational": "1",
+ "physical_block_size": "512",
+ "logical_block_size": "512"
+ },
+ "sda": {
+ "size": "500118192",
+ "removable": "0",
+ "model": "SAMSUNG MZ7TD256",
+ "rev": "2L5Q",
+ "state": "running",
+ "timeout": "30",
+ "vendor": "ATA",
+ "queue_depth": "31",
+ "rotational": "0",
+ "physical_block_size": "512",
+ "logical_block_size": "512"
+ },
+ "dm-5": {
+ "size": "20971520",
+ "removable": "0",
+ "rotational": "1",
+ "physical_block_size": "512",
+ "logical_block_size": "512"
+ },
+ "dm-3": {
+ "size": "209715200",
+ "removable": "0",
+ "rotational": "1",
+ "physical_block_size": "512",
+ "logical_block_size": "512"
+ }
+ },
+ "sysconf": {
+ "LINK_MAX": 65000,
+ "_POSIX_LINK_MAX": 65000,
+ "MAX_CANON": 255,
+ "_POSIX_MAX_CANON": 255,
+ "MAX_INPUT": 255,
+ "_POSIX_MAX_INPUT": 255,
+ "NAME_MAX": 255,
+ "_POSIX_NAME_MAX": 255,
+ "PATH_MAX": 4096,
+ "_POSIX_PATH_MAX": 4096,
+ "PIPE_BUF": 4096,
+ "_POSIX_PIPE_BUF": 4096,
+ "SOCK_MAXBUF": null,
+ "_POSIX_ASYNC_IO": null,
+ "_POSIX_CHOWN_RESTRICTED": 1,
+ "_POSIX_NO_TRUNC": 1,
+ "_POSIX_PRIO_IO": null,
+ "_POSIX_SYNC_IO": null,
+ "_POSIX_VDISABLE": 0,
+ "ARG_MAX": 2097152,
+ "ATEXIT_MAX": 2147483647,
+ "CHAR_BIT": 8,
+ "CHAR_MAX": 127,
+ "CHAR_MIN": -128,
+ "CHILD_MAX": 62844,
+ "CLK_TCK": 100,
+ "INT_MAX": 2147483647,
+ "INT_MIN": -2147483648,
+ "IOV_MAX": 1024,
+ "LOGNAME_MAX": 256,
+ "LONG_BIT": 64,
+ "MB_LEN_MAX": 16,
+ "NGROUPS_MAX": 65536,
+ "NL_ARGMAX": 4096,
+ "NL_LANGMAX": 2048,
+ "NL_MSGMAX": 2147483647,
+ "NL_NMAX": 2147483647,
+ "NL_SETMAX": 2147483647,
+ "NL_TEXTMAX": 2147483647,
+ "NSS_BUFLEN_GROUP": 1024,
+ "NSS_BUFLEN_PASSWD": 1024,
+ "NZERO": 20,
+ "OPEN_MAX": 1024,
+ "PAGESIZE": 4096,
+ "PAGE_SIZE": 4096,
+ "PASS_MAX": 8192,
+ "PTHREAD_DESTRUCTOR_ITERATIONS": 4,
+ "PTHREAD_KEYS_MAX": 1024,
+ "PTHREAD_STACK_MIN": 16384,
+ "PTHREAD_THREADS_MAX": null,
+ "SCHAR_MAX": 127,
+ "SCHAR_MIN": -128,
+ "SHRT_MAX": 32767,
+ "SHRT_MIN": -32768,
+ "SSIZE_MAX": 32767,
+ "TTY_NAME_MAX": 32,
+ "TZNAME_MAX": 6,
+ "UCHAR_MAX": 255,
+ "UINT_MAX": 4294967295,
+ "UIO_MAXIOV": 1024,
+ "ULONG_MAX": 18446744073709551615,
+ "USHRT_MAX": 65535,
+ "WORD_BIT": 32,
+ "_AVPHYS_PAGES": 955772,
+ "_NPROCESSORS_CONF": 8,
+ "_NPROCESSORS_ONLN": 8,
+ "_PHYS_PAGES": 4027635,
+ "_POSIX_ARG_MAX": 2097152,
+ "_POSIX_ASYNCHRONOUS_IO": 200809,
+ "_POSIX_CHILD_MAX": 62844,
+ "_POSIX_FSYNC": 200809,
+ "_POSIX_JOB_CONTROL": 1,
+ "_POSIX_MAPPED_FILES": 200809,
+ "_POSIX_MEMLOCK": 200809,
+ "_POSIX_MEMLOCK_RANGE": 200809,
+ "_POSIX_MEMORY_PROTECTION": 200809,
+ "_POSIX_MESSAGE_PASSING": 200809,
+ "_POSIX_NGROUPS_MAX": 65536,
+ "_POSIX_OPEN_MAX": 1024,
+ "_POSIX_PII": null,
+ "_POSIX_PII_INTERNET": null,
+ "_POSIX_PII_INTERNET_DGRAM": null,
+ "_POSIX_PII_INTERNET_STREAM": null,
+ "_POSIX_PII_OSI": null,
+ "_POSIX_PII_OSI_CLTS": null,
+ "_POSIX_PII_OSI_COTS": null,
+ "_POSIX_PII_OSI_M": null,
+ "_POSIX_PII_SOCKET": null,
+ "_POSIX_PII_XTI": null,
+ "_POSIX_POLL": null,
+ "_POSIX_PRIORITIZED_IO": 200809,
+ "_POSIX_PRIORITY_SCHEDULING": 200809,
+ "_POSIX_REALTIME_SIGNALS": 200809,
+ "_POSIX_SAVED_IDS": 1,
+ "_POSIX_SELECT": null,
+ "_POSIX_SEMAPHORES": 200809,
+ "_POSIX_SHARED_MEMORY_OBJECTS": 200809,
+ "_POSIX_SSIZE_MAX": 32767,
+ "_POSIX_STREAM_MAX": 16,
+ "_POSIX_SYNCHRONIZED_IO": 200809,
+ "_POSIX_THREADS": 200809,
+ "_POSIX_THREAD_ATTR_STACKADDR": 200809,
+ "_POSIX_THREAD_ATTR_STACKSIZE": 200809,
+ "_POSIX_THREAD_PRIORITY_SCHEDULING": 200809,
+ "_POSIX_THREAD_PRIO_INHERIT": 200809,
+ "_POSIX_THREAD_PRIO_PROTECT": 200809,
+ "_POSIX_THREAD_ROBUST_PRIO_INHERIT": null,
+ "_POSIX_THREAD_ROBUST_PRIO_PROTECT": null,
+ "_POSIX_THREAD_PROCESS_SHARED": 200809,
+ "_POSIX_THREAD_SAFE_FUNCTIONS": 200809,
+ "_POSIX_TIMERS": 200809,
+ "TIMER_MAX": null,
+ "_POSIX_TZNAME_MAX": 6,
+ "_POSIX_VERSION": 200809,
+ "_T_IOV_MAX": null,
+ "_XOPEN_CRYPT": 1,
+ "_XOPEN_ENH_I18N": 1,
+ "_XOPEN_LEGACY": 1,
+ "_XOPEN_REALTIME": 1,
+ "_XOPEN_REALTIME_THREADS": 1,
+ "_XOPEN_SHM": 1,
+ "_XOPEN_UNIX": 1,
+ "_XOPEN_VERSION": 700,
+ "_XOPEN_XCU_VERSION": 4,
+ "_XOPEN_XPG2": 1,
+ "_XOPEN_XPG3": 1,
+ "_XOPEN_XPG4": 1,
+ "BC_BASE_MAX": 99,
+ "BC_DIM_MAX": 2048,
+ "BC_SCALE_MAX": 99,
+ "BC_STRING_MAX": 1000,
+ "CHARCLASS_NAME_MAX": 2048,
+ "COLL_WEIGHTS_MAX": 255,
+ "EQUIV_CLASS_MAX": null,
+ "EXPR_NEST_MAX": 32,
+ "LINE_MAX": 2048,
+ "POSIX2_BC_BASE_MAX": 99,
+ "POSIX2_BC_DIM_MAX": 2048,
+ "POSIX2_BC_SCALE_MAX": 99,
+ "POSIX2_BC_STRING_MAX": 1000,
+ "POSIX2_CHAR_TERM": 200809,
+ "POSIX2_COLL_WEIGHTS_MAX": 255,
+ "POSIX2_C_BIND": 200809,
+ "POSIX2_C_DEV": 200809,
+ "POSIX2_C_VERSION": 200809,
+ "POSIX2_EXPR_NEST_MAX": 32,
+ "POSIX2_FORT_DEV": null,
+ "POSIX2_FORT_RUN": null,
+ "_POSIX2_LINE_MAX": 2048,
+ "POSIX2_LINE_MAX": 2048,
+ "POSIX2_LOCALEDEF": 200809,
+ "POSIX2_RE_DUP_MAX": 32767,
+ "POSIX2_SW_DEV": 200809,
+ "POSIX2_UPE": null,
+ "POSIX2_VERSION": 200809,
+ "RE_DUP_MAX": 32767,
+ "PATH": "/usr/bin",
+ "CS_PATH": "/usr/bin",
+ "LFS_CFLAGS": null,
+ "LFS_LDFLAGS": null,
+ "LFS_LIBS": null,
+ "LFS_LINTFLAGS": null,
+ "LFS64_CFLAGS": "-D_LARGEFILE64_SOURCE",
+ "LFS64_LDFLAGS": null,
+ "LFS64_LIBS": null,
+ "LFS64_LINTFLAGS": "-D_LARGEFILE64_SOURCE",
+ "_XBS5_WIDTH_RESTRICTED_ENVS": "XBS5_LP64_OFF64",
+ "XBS5_WIDTH_RESTRICTED_ENVS": "XBS5_LP64_OFF64",
+ "_XBS5_ILP32_OFF32": null,
+ "XBS5_ILP32_OFF32_CFLAGS": null,
+ "XBS5_ILP32_OFF32_LDFLAGS": null,
+ "XBS5_ILP32_OFF32_LIBS": null,
+ "XBS5_ILP32_OFF32_LINTFLAGS": null,
+ "_XBS5_ILP32_OFFBIG": null,
+ "XBS5_ILP32_OFFBIG_CFLAGS": null,
+ "XBS5_ILP32_OFFBIG_LDFLAGS": null,
+ "XBS5_ILP32_OFFBIG_LIBS": null,
+ "XBS5_ILP32_OFFBIG_LINTFLAGS": null,
+ "_XBS5_LP64_OFF64": 1,
+ "XBS5_LP64_OFF64_CFLAGS": "-m64",
+ "XBS5_LP64_OFF64_LDFLAGS": "-m64",
+ "XBS5_LP64_OFF64_LIBS": null,
+ "XBS5_LP64_OFF64_LINTFLAGS": null,
+ "_XBS5_LPBIG_OFFBIG": null,
+ "XBS5_LPBIG_OFFBIG_CFLAGS": null,
+ "XBS5_LPBIG_OFFBIG_LDFLAGS": null,
+ "XBS5_LPBIG_OFFBIG_LIBS": null,
+ "XBS5_LPBIG_OFFBIG_LINTFLAGS": null,
+ "_POSIX_V6_ILP32_OFF32": null,
+ "POSIX_V6_ILP32_OFF32_CFLAGS": null,
+ "POSIX_V6_ILP32_OFF32_LDFLAGS": null,
+ "POSIX_V6_ILP32_OFF32_LIBS": null,
+ "POSIX_V6_ILP32_OFF32_LINTFLAGS": null,
+ "_POSIX_V6_WIDTH_RESTRICTED_ENVS": "POSIX_V6_LP64_OFF64",
+ "POSIX_V6_WIDTH_RESTRICTED_ENVS": "POSIX_V6_LP64_OFF64",
+ "_POSIX_V6_ILP32_OFFBIG": null,
+ "POSIX_V6_ILP32_OFFBIG_CFLAGS": null,
+ "POSIX_V6_ILP32_OFFBIG_LDFLAGS": null,
+ "POSIX_V6_ILP32_OFFBIG_LIBS": null,
+ "POSIX_V6_ILP32_OFFBIG_LINTFLAGS": null,
+ "_POSIX_V6_LP64_OFF64": 1,
+ "POSIX_V6_LP64_OFF64_CFLAGS": "-m64",
+ "POSIX_V6_LP64_OFF64_LDFLAGS": "-m64",
+ "POSIX_V6_LP64_OFF64_LIBS": null,
+ "POSIX_V6_LP64_OFF64_LINTFLAGS": null,
+ "_POSIX_V6_LPBIG_OFFBIG": null,
+ "POSIX_V6_LPBIG_OFFBIG_CFLAGS": null,
+ "POSIX_V6_LPBIG_OFFBIG_LDFLAGS": null,
+ "POSIX_V6_LPBIG_OFFBIG_LIBS": null,
+ "POSIX_V6_LPBIG_OFFBIG_LINTFLAGS": null,
+ "_POSIX_V7_ILP32_OFF32": null,
+ "POSIX_V7_ILP32_OFF32_CFLAGS": null,
+ "POSIX_V7_ILP32_OFF32_LDFLAGS": null,
+ "POSIX_V7_ILP32_OFF32_LIBS": null,
+ "POSIX_V7_ILP32_OFF32_LINTFLAGS": null,
+ "_POSIX_V7_WIDTH_RESTRICTED_ENVS": "POSIX_V7_LP64_OFF64",
+ "POSIX_V7_WIDTH_RESTRICTED_ENVS": "POSIX_V7_LP64_OFF64",
+ "_POSIX_V7_ILP32_OFFBIG": null,
+ "POSIX_V7_ILP32_OFFBIG_CFLAGS": null,
+ "POSIX_V7_ILP32_OFFBIG_LDFLAGS": null,
+ "POSIX_V7_ILP32_OFFBIG_LIBS": null,
+ "POSIX_V7_ILP32_OFFBIG_LINTFLAGS": null,
+ "_POSIX_V7_LP64_OFF64": 1,
+ "POSIX_V7_LP64_OFF64_CFLAGS": "-m64",
+ "POSIX_V7_LP64_OFF64_LDFLAGS": "-m64",
+ "POSIX_V7_LP64_OFF64_LIBS": null,
+ "POSIX_V7_LP64_OFF64_LINTFLAGS": null,
+ "_POSIX_V7_LPBIG_OFFBIG": null,
+ "POSIX_V7_LPBIG_OFFBIG_CFLAGS": null,
+ "POSIX_V7_LPBIG_OFFBIG_LDFLAGS": null,
+ "POSIX_V7_LPBIG_OFFBIG_LIBS": null,
+ "POSIX_V7_LPBIG_OFFBIG_LINTFLAGS": null,
+ "_POSIX_ADVISORY_INFO": 200809,
+ "_POSIX_BARRIERS": 200809,
+ "_POSIX_BASE": null,
+ "_POSIX_C_LANG_SUPPORT": null,
+ "_POSIX_C_LANG_SUPPORT_R": null,
+ "_POSIX_CLOCK_SELECTION": 200809,
+ "_POSIX_CPUTIME": 200809,
+ "_POSIX_THREAD_CPUTIME": 200809,
+ "_POSIX_DEVICE_SPECIFIC": null,
+ "_POSIX_DEVICE_SPECIFIC_R": null,
+ "_POSIX_FD_MGMT": null,
+ "_POSIX_FIFO": null,
+ "_POSIX_PIPE": null,
+ "_POSIX_FILE_ATTRIBUTES": null,
+ "_POSIX_FILE_LOCKING": null,
+ "_POSIX_FILE_SYSTEM": null,
+ "_POSIX_MONOTONIC_CLOCK": 200809,
+ "_POSIX_MULTI_PROCESS": null,
+ "_POSIX_SINGLE_PROCESS": null,
+ "_POSIX_NETWORKING": null,
+ "_POSIX_READER_WRITER_LOCKS": 200809,
+ "_POSIX_SPIN_LOCKS": 200809,
+ "_POSIX_REGEXP": 1,
+ "_REGEX_VERSION": null,
+ "_POSIX_SHELL": 1,
+ "_POSIX_SIGNALS": null,
+ "_POSIX_SPAWN": 200809,
+ "_POSIX_SPORADIC_SERVER": null,
+ "_POSIX_THREAD_SPORADIC_SERVER": null,
+ "_POSIX_SYSTEM_DATABASE": null,
+ "_POSIX_SYSTEM_DATABASE_R": null,
+ "_POSIX_TIMEOUTS": 200809,
+ "_POSIX_TYPED_MEMORY_OBJECTS": null,
+ "_POSIX_USER_GROUPS": null,
+ "_POSIX_USER_GROUPS_R": null,
+ "POSIX2_PBS": null,
+ "POSIX2_PBS_ACCOUNTING": null,
+ "POSIX2_PBS_LOCATE": null,
+ "POSIX2_PBS_TRACK": null,
+ "POSIX2_PBS_MESSAGE": null,
+ "SYMLOOP_MAX": null,
+ "STREAM_MAX": 16,
+ "AIO_LISTIO_MAX": null,
+ "AIO_MAX": null,
+ "AIO_PRIO_DELTA_MAX": 20,
+ "DELAYTIMER_MAX": 2147483647,
+ "HOST_NAME_MAX": 64,
+ "LOGIN_NAME_MAX": 256,
+ "MQ_OPEN_MAX": null,
+ "MQ_PRIO_MAX": 32768,
+ "_POSIX_DEVICE_IO": null,
+ "_POSIX_TRACE": null,
+ "_POSIX_TRACE_EVENT_FILTER": null,
+ "_POSIX_TRACE_INHERIT": null,
+ "_POSIX_TRACE_LOG": null,
+ "RTSIG_MAX": 32,
+ "SEM_NSEMS_MAX": null,
+ "SEM_VALUE_MAX": 2147483647,
+ "SIGQUEUE_MAX": 62844,
+ "FILESIZEBITS": 64,
+ "POSIX_ALLOC_SIZE_MIN": 4096,
+ "POSIX_REC_INCR_XFER_SIZE": null,
+ "POSIX_REC_MAX_XFER_SIZE": null,
+ "POSIX_REC_MIN_XFER_SIZE": 4096,
+ "POSIX_REC_XFER_ALIGN": 4096,
+ "SYMLINK_MAX": null,
+ "GNU_LIBC_VERSION": "glibc 2.24",
+ "GNU_LIBPTHREAD_VERSION": "NPTL 2.24",
+ "POSIX2_SYMLINKS": 1,
+ "LEVEL1_ICACHE_SIZE": 32768,
+ "LEVEL1_ICACHE_ASSOC": 8,
+ "LEVEL1_ICACHE_LINESIZE": 64,
+ "LEVEL1_DCACHE_SIZE": 32768,
+ "LEVEL1_DCACHE_ASSOC": 8,
+ "LEVEL1_DCACHE_LINESIZE": 64,
+ "LEVEL2_CACHE_SIZE": 262144,
+ "LEVEL2_CACHE_ASSOC": 8,
+ "LEVEL2_CACHE_LINESIZE": 64,
+ "LEVEL3_CACHE_SIZE": 6291456,
+ "LEVEL3_CACHE_ASSOC": 12,
+ "LEVEL3_CACHE_LINESIZE": 64,
+ "LEVEL4_CACHE_SIZE": 0,
+ "LEVEL4_CACHE_ASSOC": 0,
+ "LEVEL4_CACHE_LINESIZE": 0,
+ "IPV6": 200809,
+ "RAW_SOCKETS": 200809,
+ "_POSIX_IPV6": 200809,
+ "_POSIX_RAW_SOCKETS": 200809
+ },
+ "init_package": "systemd",
+ "shells": [
+ "/bin/sh",
+ "/bin/bash",
+ "/sbin/nologin",
+ "/usr/bin/sh",
+ "/usr/bin/bash",
+ "/usr/sbin/nologin",
+ "/usr/bin/zsh",
+ "/bin/zsh"
+ ],
+ "ohai_time": 1492535225.41052,
+ "cloud_v2": null,
+ "cloud": null
+}
+''' # noqa
+
+
+class TestOhaiCollector(BaseFactsTest):
+ __test__ = True
+ gather_subset = ['!all', 'ohai']
+ valid_subsets = ['ohai']
+ fact_namespace = 'ansible_ohai'
+ collector_class = OhaiFactCollector
+
+ def _mock_module(self):
+ mock_module = Mock()
+ mock_module.params = {'gather_subset': self.gather_subset,
+ 'gather_timeout': 10,
+ 'filter': '*'}
+ mock_module.get_bin_path = Mock(return_value='/not/actually/ohai')
+ mock_module.run_command = Mock(return_value=(0, ohai_json_output, ''))
+ return mock_module
+
+ @patch('ansible.module_utils.facts.other.ohai.OhaiFactCollector.get_ohai_output')
+ def test_bogus_json(self, mock_get_ohai_output):
+ module = self._mock_module()
+
+ # bogus json
+ mock_get_ohai_output.return_value = '{'
+
+ fact_collector = self.collector_class()
+ facts_dict = fact_collector.collect(module=module)
+
+ self.assertIsInstance(facts_dict, dict)
+ self.assertEqual(facts_dict, {})
+
+ @patch('ansible.module_utils.facts.other.ohai.OhaiFactCollector.run_ohai')
+ def test_ohai_non_zero_return_code(self, mock_run_ohai):
+ module = self._mock_module()
+
+ # bogus json
+ mock_run_ohai.return_value = (1, '{}', '')
+
+ fact_collector = self.collector_class()
+ facts_dict = fact_collector.collect(module=module)
+
+ self.assertIsInstance(facts_dict, dict)
+
+ # This assumes no 'ohai' entry at all is correct
+ self.assertNotIn('ohai', facts_dict)
+ self.assertEqual(facts_dict, {})
diff --git a/test/units/module_utils/facts/system/__init__.py b/test/units/module_utils/facts/system/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/module_utils/facts/system/__init__.py
diff --git a/test/units/module_utils/facts/system/distribution/__init__.py b/test/units/module_utils/facts/system/distribution/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/__init__.py
diff --git a/test/units/module_utils/facts/system/distribution/conftest.py b/test/units/module_utils/facts/system/distribution/conftest.py
new file mode 100644
index 00000000..d27b97f0
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/conftest.py
@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+import pytest
+
+from units.compat.mock import Mock
+
+
+@pytest.fixture
+def mock_module():
+ mock_module = Mock()
+ mock_module.params = {'gather_subset': ['all'],
+ 'gather_timeout': 5,
+ 'filter': '*'}
+ mock_module.get_bin_path = Mock(return_value=None)
+ return mock_module
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/arch_linux_na.json b/test/units/module_utils/facts/system/distribution/fixtures/arch_linux_na.json
new file mode 100644
index 00000000..88d9ad8d
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/arch_linux_na.json
@@ -0,0 +1,24 @@
+{
+ "platform.dist": ["", "", ""],
+ "distro": {
+ "codename": "",
+ "id": "arch",
+ "name": "Arch Linux",
+ "version": "",
+ "version_best": "",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "input": {
+ "/etc/os-release": "NAME=\"Arch Linux\"\nPRETTY_NAME=\"Arch Linux\"\nID=arch\nID_LIKE=archlinux\nANSI_COLOR=\"0;36\"\nHOME_URL=\"https://www.archlinux.org/\"\nSUPPORT_URL=\"https://bbs.archlinux.org/\"\nBUG_REPORT_URL=\"https://bugs.archlinux.org/\"\n\n",
+ "/etc/arch-release": ""
+ },
+ "name": "Arch Linux NA",
+ "result": {
+ "distribution_release": "NA",
+ "distribution": "Archlinux",
+ "distribution_major_version": "NA",
+ "os_family": "Archlinux",
+ "distribution_version": "NA"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/arch_linux_no_arch-release_na.json b/test/units/module_utils/facts/system/distribution/fixtures/arch_linux_no_arch-release_na.json
new file mode 100644
index 00000000..a24bb3af
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/arch_linux_no_arch-release_na.json
@@ -0,0 +1,23 @@
+{
+ "platform.dist": ["", "", ""],
+ "distro": {
+ "codename": "",
+ "id": "arch",
+ "name": "Arch Linux",
+ "version": "",
+ "version_best": "",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "input": {
+ "/etc/os-release": "NAME=\"Arch Linux\"\nPRETTY_NAME=\"Arch Linux\"\nID=arch\nID_LIKE=archlinux\nANSI_COLOR=\"0;36\"\nHOME_URL=\"https://www.archlinux.org/\"\nSUPPORT_URL=\"https://bbs.archlinux.org/\"\nBUG_REPORT_URL=\"https://bugs.archlinux.org/\"\n\n"
+ },
+ "name": "Arch Linux no arch-release NA",
+ "result": {
+ "distribution_release": "NA",
+ "distribution": "Archlinux",
+ "distribution_major_version": "NA",
+ "os_family": "Archlinux",
+ "distribution_version": "NA"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/archlinux_rolling.json b/test/units/module_utils/facts/system/distribution/fixtures/archlinux_rolling.json
new file mode 100644
index 00000000..8f356367
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/archlinux_rolling.json
@@ -0,0 +1,31 @@
+{
+ "name": "Archlinux rolling",
+ "distro": {
+ "codename": "n/a",
+ "id": "arch",
+ "name": "Arch",
+ "version": "rolling",
+ "version_best": "rolling",
+ "lsb_release_info": {
+ "lsb_version": "1.4",
+ "distributor_id": "Arch",
+ "description": "Arch Linux",
+ "release": "rolling",
+ "codename": "n/a"
+ },
+ "os_release_info": {}
+ },
+ "input": {
+ "/etc/arch-release": "Arch Linux release\n",
+ "/etc/lsb-release": "LSB_VERSION=1.4\nDISTRIB_ID=Arch\nDISTRIB_RELEASE=rolling\nDISTRIB_DESCRIPTION=\"Arch Linux\"\n",
+ "/usr/lib/os-release": "NAME=\"Arch Linux\"\nPRETTY_NAME=\"Arch Linux\"\nID=arch\nBUILD_ID=rolling\nANSI_COLOR=\"0;36\"\nHOME_URL=\"https://www.archlinux.org/\"\nDOCUMENTATION_URL=\"https://wiki.archlinux.org/\"\nSUPPORT_URL=\"https://bbs.archlinux.org/\"\nBUG_REPORT_URL=\"https://bugs.archlinux.org/\"\nLOGO=archlinux\n"
+ },
+ "platform.dist": ["arch", "rolling", "n/a"],
+ "result": {
+ "distribution": "Archlinux",
+ "distribution_version": "rolling",
+ "distribution_release": "n/a",
+ "distribution_major_version": "rolling",
+ "os_family": "Archlinux"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/centos_6.7.json b/test/units/module_utils/facts/system/distribution/fixtures/centos_6.7.json
new file mode 100644
index 00000000..c99a0739
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/centos_6.7.json
@@ -0,0 +1,31 @@
+{
+ "name": "CentOS 6.7",
+ "platform.dist": ["centos", "6.7", "Final"],
+ "distro": {
+ "codename": "Final",
+ "id": "centos",
+ "name": "CentOS Linux",
+ "version": "6.7",
+ "version_best": "6.7",
+ "os_release_info": {},
+ "lsb_release_info": {
+ "release": "6.7",
+ "codename": "Final",
+ "distributor_id": "CentOS",
+ "lsb_version": ":base-4.0-amd64:base-4.0-noarch:core-4.0-amd64:core-4.0-noarch",
+ "description": "CentOS release 6.7 (Final)"
+ }
+ },
+ "input": {
+ "/etc/redhat-release": "CentOS release 6.7 (Final)\n",
+ "/etc/lsb-release": "LSB_VERSION=base-4.0-amd64:base-4.0-noarch:core-4.0-amd64:core-4.0-noarch:graphics-4.0-amd64:graphics-4.0-noarch:printing-4.0-amd64:printing-4.0-noarch\n",
+ "/etc/system-release": "CentOS release 6.7 (Final)\n"
+ },
+ "result": {
+ "distribution_release": "Final",
+ "distribution": "CentOS",
+ "distribution_major_version": "6",
+ "os_family": "RedHat",
+ "distribution_version": "6.7"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/clearlinux_26580.json b/test/units/module_utils/facts/system/distribution/fixtures/clearlinux_26580.json
new file mode 100644
index 00000000..1a99a86f
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/clearlinux_26580.json
@@ -0,0 +1,24 @@
+{
+ "platform.dist": ["Clear Linux OS", "26580", "clear-linux-os"],
+ "distro": {
+ "codename": "",
+ "id": "clear-linux-os",
+ "name": "Clear Linux OS",
+ "version": "26580",
+ "version_best": "26580",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "input": {
+ "/etc/os-release": "NAME=\"Clear Linux OS\"\nVERSION=1\nID=clear-linux-os\nID_LIKE=clear-linux-os\nVERSION_ID=26580\nPRETTY_NAME=\"Clear Linux OS\"\nANSI_COLOR=\"1;35\"\nHOME_URL=\"https://clearlinux.org\"\nSUPPORT_URL=\"https://clearlinux.org\"\nBUG_REPORT_URL=\"mailto:dev@lists.clearlinux.org\"\nPRIVACY_POLICY_URL=\"http://www.intel.com/privacy\"",
+ "/usr/lib/os-release": "NAME=\"Clear Linux OS\"\nVERSION=1\nID=clear-linux-os\nID_LIKE=clear-linux-os\nVERSION_ID=26580\nPRETTY_NAME=\"Clear Linux OS\"\nANSI_COLOR=\"1;35\"\nHOME_URL=\"https://clearlinux.org\"\nSUPPORT_URL=\"https://clearlinux.org\"\nBUG_REPORT_URL=\"mailto:dev@lists.clearlinux.org\"\nPRIVACY_POLICY_URL=\"http://www.intel.com/privacy\""
+ },
+ "name": "ClearLinux 26580",
+ "result": {
+ "distribution_release": "clear-linux-os",
+ "distribution": "Clear Linux OS",
+ "distribution_major_version": "26580",
+ "os_family": "ClearLinux",
+ "distribution_version": "26580"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/clearlinux_28120.json b/test/units/module_utils/facts/system/distribution/fixtures/clearlinux_28120.json
new file mode 100644
index 00000000..30b76688
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/clearlinux_28120.json
@@ -0,0 +1,24 @@
+{
+ "platform.dist": ["Clear Linux OS", "28120", "clear-linux-os"],
+ "distro": {
+ "codename": "",
+ "id": "clear-linux-os",
+ "name": "Clear Linux OS",
+ "version": "28120",
+ "version_best": "28120",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "input": {
+ "/etc/os-release": "NAME=\"Clear Linux OS\"\nVERSION=1\nID=clear-linux-os\nID_LIKE=clear-linux-os\nVERSION_ID=28120\nPRETTY_NAME=\"Clear Linux OS\"\nANSI_COLOR=\"1;35\"\nHOME_URL=\"https://clearlinux.org\"\nSUPPORT_URL=\"https://clearlinux.org\"\nBUG_REPORT_URL=\"mailto:dev@lists.clearlinux.org\"\nPRIVACY_POLICY_URL=\"http://www.intel.com/privacy\"",
+ "/usr/lib/os-release": "NAME=\"Clear Linux OS\"\nVERSION=1\nID=clear-linux-os\nID_LIKE=clear-linux-os\nVERSION_ID=28120\nPRETTY_NAME=\"Clear Linux OS\"\nANSI_COLOR=\"1;35\"\nHOME_URL=\"https://clearlinux.org\"\nSUPPORT_URL=\"https://clearlinux.org\"\nBUG_REPORT_URL=\"mailto:dev@lists.clearlinux.org\"\nPRIVACY_POLICY_URL=\"http://www.intel.com/privacy\""
+ },
+ "name": "ClearLinux 28120",
+ "result": {
+ "distribution_release": "clear-linux-os",
+ "distribution": "Clear Linux OS",
+ "distribution_major_version": "28120",
+ "os_family": "ClearLinux",
+ "distribution_version": "28120"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/core_os_1911.5.0.json b/test/units/module_utils/facts/system/distribution/fixtures/core_os_1911.5.0.json
new file mode 100644
index 00000000..af43704c
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/core_os_1911.5.0.json
@@ -0,0 +1,23 @@
+{
+ "name": "Core OS",
+ "input": {
+ "/usr/lib/os-release": "NAME=\"Container Linux by CoreOS\"\nID=coreos\nVERSION=1911.5.0\nVERSION_ID=1911.5.0\nBUILD_ID=2018-12-15-2317\nPRETTY_NAME=\"Container Linux by CoreOS 1911.5.0 (Rhyolite)\"\nANSI_COLOR=\"38;5;75\"\nHOME_URL=\"https://coreos.com/\"\nBUG_REPORT_URL=\"https://issues.coreos.com\"\nCOREOS_BOARD=\"amd64-usr\"",
+ "/etc/lsb-release": "DISTRIB_ID=CoreOS\nDISTRIB_RELEASE=1911.5.0\nDISTRIB_CODENAME=\"Rhyolite\"\nDISTRIB_DESCRIPTION=\"CoreOS 1911.5.0 (Rhyolite)\""
+ },
+ "platform.dist": ["", "", ""],
+ "distro": {
+ "codename": "Rhyolite",
+ "id": "coreos",
+ "name": "CoreOS",
+ "version": "1911.5.0",
+ "version_best": "1911.5.0",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "platform.release": "",
+ "result": {
+ "distribution": "Coreos",
+ "distribution_major_version": "1911",
+ "distribution_version": "1911.5.0"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/core_os_976.0.0.json b/test/units/module_utils/facts/system/distribution/fixtures/core_os_976.0.0.json
new file mode 100644
index 00000000..ccd06d99
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/core_os_976.0.0.json
@@ -0,0 +1,23 @@
+{
+ "name": "Core OS",
+ "input": {
+ "/etc/os-release": "NAME=CoreOS\nID=coreos\nVERSION=976.0.0\nVERSION_ID=976.0.0\nBUILD_ID=2016-03-03-2324\nPRETTY_NAME=\"CoreOS 976.0.0 (Coeur Rouge)\"\nANSI_COLOR=\"1;32\"\nHOME_URL=\"https://coreos.com/\"\nBUG_REPORT_URL=\"https://github.com/coreos/bugs/issues\"",
+ "/etc/lsb-release": "DISTRIB_ID=CoreOS\nDISTRIB_RELEASE=976.0.0\nDISTRIB_CODENAME=\"Coeur Rouge\"\nDISTRIB_DESCRIPTION=\"CoreOS 976.0.0 (Coeur Rouge)\""
+ },
+ "platform.dist": ["", "", ""],
+ "distro": {
+ "codename": "Coeur Rouge",
+ "id": "coreos",
+ "name": "CoreOS",
+ "version": "976.0.0",
+ "version_best": "976.0.0",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "platform.release": "",
+ "result": {
+ "distribution": "CoreOS",
+ "distribution_major_version": "976",
+ "distribution_version": "976.0.0"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/cumulus_linux_2.5.4.json b/test/units/module_utils/facts/system/distribution/fixtures/cumulus_linux_2.5.4.json
new file mode 100644
index 00000000..ad9c3f79
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/cumulus_linux_2.5.4.json
@@ -0,0 +1,23 @@
+{
+ "name": "Cumulus Linux 2.5.4",
+ "input": {
+ "/etc/os-release": "NAME=\"Cumulus Linux\"\nVERSION_ID=2.5.4\nVERSION=\"2.5.4-6dc6e80-201510091936-build\"\nPRETTY_NAME=\"Cumulus Linux\"\nID=cumulus-linux\nID_LIKE=debian\nCPE_NAME=cpe:/o:cumulusnetworks:cumulus_linux:2.5.4-6dc6e80-201510091936-build\nHOME_URL=\"http://www.cumulusnetworks.com/\"\nSUPPORT_URL=\"http://support.cumulusnetworks.com/\""
+ },
+ "platform.dist": ["", "", ""],
+ "distro": {
+ "codename": "",
+ "id": "cumulus-linux",
+ "name": "Cumulus Linux",
+ "version": "2.5.4",
+ "version_best": "2.5.4",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "result": {
+ "distribution": "Cumulus Linux",
+ "distribution_major_version": "2",
+ "distribution_release": "2.5.4-6dc6e80-201510091936-build",
+ "os_family": "Debian",
+ "distribution_version": "2.5.4"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/cumulus_linux_3.7.3.json b/test/units/module_utils/facts/system/distribution/fixtures/cumulus_linux_3.7.3.json
new file mode 100644
index 00000000..ec44af13
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/cumulus_linux_3.7.3.json
@@ -0,0 +1,23 @@
+{
+ "name": "Cumulus Linux 3.7.3",
+ "input": {
+ "/etc/os-release": "NAME=\"Cumulus Linux\"\nVERSION_ID=3.7.3\nVERSION=\"Cumulus Linux 3.7.3\"\nPRETTY_NAME=\"Cumulus Linux\"\nID=cumulus-linux\nID_LIKE=debian\nCPE_NAME=cpe:/o:cumulusnetworks:cumulus_linux:3.7.3\nHOME_URL=\"http://www.cumulusnetworks.com/\"\nSUPPORT_URL=\"http://support.cumulusnetworks.com/\""
+ },
+ "platform.dist": ["debian", "8.11", ""],
+ "distro": {
+ "codename": "",
+ "id": "cumulus-linux",
+ "name": "Cumulus Linux",
+ "version": "3.7.3",
+ "version_best": "3.7.3",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "result": {
+ "distribution": "Cumulus Linux",
+ "distribution_major_version": "3",
+ "distribution_release": "Cumulus Linux 3.7.3",
+ "os_family": "Debian",
+ "distribution_version": "3.7.3"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/debian_10.json b/test/units/module_utils/facts/system/distribution/fixtures/debian_10.json
new file mode 100644
index 00000000..20544c1e
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/debian_10.json
@@ -0,0 +1,40 @@
+{
+ "name": "Debian 10",
+ "distro": {
+ "codename": "buster",
+ "id": "debian",
+ "name": "Debian GNU/Linux",
+ "version": "10",
+ "version_best": "10",
+ "lsb_release_info": {
+ "distributor_id": "Debian",
+ "description": "Debian GNU/Linux 10 (buster)",
+ "release": "10",
+ "codename": "buster"
+ },
+ "os_release_info": {
+ "pretty_name": "Debian GNU/Linux 10 (buster)",
+ "name": "Debian GNU/Linux",
+ "version_id": "10",
+ "version": "10 (buster)",
+ "version_codename": "buster",
+ "id": "debian",
+ "home_url": "https://www.debian.org/",
+ "support_url": "https://www.debian.org/support",
+ "bug_report_url": "https://bugs.debian.org/",
+ "codename": "buster"
+ }
+ },
+ "input": {
+ "/etc/os-release": "PRETTY_NAME=\"Debian GNU/Linux 10 (buster)\"\nNAME=\"Debian GNU/Linux\"\nVERSION_ID=\"10\"\nVERSION=\"10 (buster)\"\nVERSION_CODENAME=buster\nID=debian\nHOME_URL=\"https://www.debian.org/\"\nSUPPORT_URL=\"https://www.debian.org/support\"\nBUG_REPORT_URL=\"https://bugs.debian.org/\"\n",
+ "/usr/lib/os-release": "PRETTY_NAME=\"Debian GNU/Linux 10 (buster)\"\nNAME=\"Debian GNU/Linux\"\nVERSION_ID=\"10\"\nVERSION=\"10 (buster)\"\nVERSION_CODENAME=buster\nID=debian\nHOME_URL=\"https://www.debian.org/\"\nSUPPORT_URL=\"https://www.debian.org/support\"\nBUG_REPORT_URL=\"https://bugs.debian.org/\"\n"
+ },
+ "platform.dist": ["debian", "10", "buster"],
+ "result": {
+ "distribution": "Debian",
+ "distribution_version": "10",
+ "distribution_release": "buster",
+ "distribution_major_version": "10",
+ "os_family": "Debian"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/debian_7.9.json b/test/units/module_utils/facts/system/distribution/fixtures/debian_7.9.json
new file mode 100644
index 00000000..894c9424
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/debian_7.9.json
@@ -0,0 +1,39 @@
+{
+ "name": "Debian 7.9",
+ "input": {
+ "/etc/os-release": "PRETTY_NAME=\"Debian GNU/Linux 7 (wheezy)\"\nNAME=\"Debian GNU/Linux\"\nVERSION_ID=\"7\"\nVERSION=\"7 (wheezy)\"\nID=debian\nANSI_COLOR=\"1;31\"\nHOME_URL=\"http://www.debian.org/\"\nSUPPORT_URL=\"http://www.debian.org/support/\"\nBUG_REPORT_URL=\"http://bugs.debian.org/\""
+ },
+ "platform.dist": ["debian", "7.9", ""],
+ "distro": {
+ "codename": "wheezy",
+ "id": "debian",
+ "name": "Debian GNU/Linux",
+ "version": "7",
+ "version_best": "7.9",
+ "os_release_info": {
+ "name": "Debian GNU/Linux",
+ "ansi_color": "1;31",
+ "support_url": "http://www.debian.org/support/",
+ "version_id": "7",
+ "bug_report_url": "http://bugs.debian.org/",
+ "pretty_name": "Debian GNU/Linux 7 (wheezy)",
+ "version": "7 (wheezy)",
+ "codename": "wheezy",
+ "home_url": "http://www.debian.org/",
+ "id": "debian"
+ },
+ "lsb_release_info": {
+ "release": "7.9",
+ "codename": "wheezy",
+ "distributor_id": "Debian",
+ "description": "Debian GNU/Linux 7.9 (wheezy)"
+ }
+ },
+ "result": {
+ "distribution": "Debian",
+ "distribution_major_version": "7",
+ "distribution_release": "wheezy",
+ "os_family": "Debian",
+ "distribution_version": "7.9"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/debian_stretch_sid.json b/test/units/module_utils/facts/system/distribution/fixtures/debian_stretch_sid.json
new file mode 100644
index 00000000..23388303
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/debian_stretch_sid.json
@@ -0,0 +1,36 @@
+{
+ "name": "Debian stretch/sid",
+ "input": {
+ "/etc/os-release": "PRETTY_NAME=\"Debian GNU/Linux stretch/sid\"\nNAME=\"Debian GNU/Linux\"\nID=debian\nHOME_URL=\"https://www.debian.org/\"\nSUPPORT_URL=\"https://www.debian.org/support\"\nBUG_REPORT_URL=\"https://bugs.debian.org/\"",
+ "/etc/debian_version": "stretch/sid\n"
+ },
+ "platform.dist": ["debian", "stretch/sid", ""],
+ "distro": {
+ "codename": "stretch",
+ "id": "debian",
+ "name": "Debian GNU/Linux",
+ "version": "9",
+ "version_best": "9.8",
+ "lsb_release_info": {
+ "release": "unstable",
+ "codename": "sid",
+ "distributor_id": "Debian",
+ "description": "Debian GNU/Linux stretch/sid"
+ },
+ "os_release_info": {
+ "name": "Debian GNU/Linux",
+ "support_url": "https://www.debian.org/support",
+ "bug_report_url": "https://bugs.debian.org/",
+ "pretty_name": "Debian GNU/Linux stretch/sid",
+ "home_url": "https://www.debian.org/",
+ "id": "debian"
+ }
+ },
+ "result": {
+ "distribution": "Debian",
+ "distribution_major_version": "9",
+ "distribution_release": "stretch",
+ "os_family": "Debian",
+ "distribution_version": "9.8"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/devuan.json b/test/units/module_utils/facts/system/distribution/fixtures/devuan.json
new file mode 100644
index 00000000..d02fc2e4
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/devuan.json
@@ -0,0 +1,23 @@
+{
+ "name": "Devuan",
+ "input": {
+ "/etc/os-release": "PRETTY_NAME=\"Devuan GNU/Linux ascii\"\nNAME=\"Devuan GNU/Linux\"\nID=devuan\nHOME_URL=\"https://www.devuan.org/\"\nSUPPORT_URL=\"https://devuan.org/os/community\"\nBUG_REPORT_URL=\"https://bugs.devuan.org/\""
+ },
+ "platform.dist": ["", "", ""],
+ "distro": {
+ "codename": "",
+ "id": "devuan",
+ "name": "Devuan GNU/Linux",
+ "version": "",
+ "version_best": "",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "result": {
+ "distribution": "Devuan",
+ "distribution_major_version": "NA",
+ "distribution_release": "ascii",
+ "os_family": "Debian",
+ "distribution_version": "NA"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/fedora_22.json b/test/units/module_utils/facts/system/distribution/fixtures/fedora_22.json
new file mode 100644
index 00000000..cec68d42
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/fedora_22.json
@@ -0,0 +1,25 @@
+{
+ "name": "Fedora 22",
+ "platform.dist": ["fedora", "22", "Twenty Two"],
+ "distro": {
+ "codename": "Twenty Two",
+ "id": "fedora",
+ "name": "Fedora",
+ "version": "22",
+ "version_best": "22",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "input": {
+ "/etc/redhat-release": "Fedora release 22 (Twenty Two)\n",
+ "/etc/os-release": "NAME=Fedora\nVERSION=\"22 (Twenty Two)\"\nID=fedora\nVERSION_ID=22\nPRETTY_NAME=\"Fedora 22 (Twenty Two)\"\nANSI_COLOR=\"0;34\"\nCPE_NAME=\"cpe:/o:fedoraproject:fedora:22\"\nHOME_URL=\"https://fedoraproject.org/\"\nBUG_REPORT_URL=\"https://bugzilla.redhat.com/\"\nREDHAT_BUGZILLA_PRODUCT=\"Fedora\"\nREDHAT_BUGZILLA_PRODUCT_VERSION=22\nREDHAT_SUPPORT_PRODUCT=\"Fedora\"\nREDHAT_SUPPORT_PRODUCT_VERSION=22\nPRIVACY_POLICY_URL=https://fedoraproject.org/wiki/Legal:PrivacyPolicy\n",
+ "/etc/system-release": "Fedora release 22 (Twenty Two)\n"
+ },
+ "result": {
+ "distribution_release": "Twenty Two",
+ "distribution": "Fedora",
+ "distribution_major_version": "22",
+ "os_family": "RedHat",
+ "distribution_version": "22"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/fedora_25.json b/test/units/module_utils/facts/system/distribution/fixtures/fedora_25.json
new file mode 100644
index 00000000..70b5bc39
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/fedora_25.json
@@ -0,0 +1,25 @@
+{
+ "platform.dist": ["fedora", "25", "Rawhide"],
+ "distro": {
+ "codename": "Rawhide",
+ "id": "fedora",
+ "name": "Fedora",
+ "version": "25",
+ "version_best": "25",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "input": {
+ "/etc/redhat-release": "Fedora release 25 (Rawhide)\n",
+ "/etc/os-release": "NAME=Fedora\nVERSION=\"25 (Workstation Edition)\"\nID=fedora\nVERSION_ID=25\nPRETTY_NAME=\"Fedora 25 (Workstation Edition)\"\nANSI_COLOR=\"0;34\"\nCPE_NAME=\"cpe:/o:fedoraproject:fedora:25\"\nHOME_URL=\"https://fedoraproject.org/\"\nBUG_REPORT_URL=\"https://bugzilla.redhat.com/\"\nREDHAT_BUGZILLA_PRODUCT=\"Fedora\"\nREDHAT_BUGZILLA_PRODUCT_VERSION=rawhide\nREDHAT_SUPPORT_PRODUCT=\"Fedora\"\nREDHAT_SUPPORT_PRODUCT_VERSION=rawhide\nPRIVACY_POLICY_URL=https://fedoraproject.org/wiki/Legal:PrivacyPolicy\nVARIANT=\"Workstation Edition\"\nVARIANT_ID=workstation\n",
+ "/etc/system-release": "Fedora release 25 (Rawhide)\n"
+ },
+ "name": "Fedora 25",
+ "result": {
+ "distribution_release": "Rawhide",
+ "distribution": "Fedora",
+ "distribution_major_version": "25",
+ "os_family": "RedHat",
+ "distribution_version": "25"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/fedora_31.json b/test/units/module_utils/facts/system/distribution/fixtures/fedora_31.json
new file mode 100644
index 00000000..e6d905e9
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/fedora_31.json
@@ -0,0 +1,55 @@
+{
+ "name": "Fedora 31",
+ "distro": {
+ "codename": "",
+ "id": "fedora",
+ "name": "Fedora",
+ "version": "31",
+ "version_best": "31",
+ "lsb_release_info": {
+ "lsb_version": ":core-4.1-amd64:core-4.1-noarch",
+ "distributor_id": "Fedora",
+ "description": "Fedora release 31 (Thirty One)",
+ "release": "31",
+ "codename": "ThirtyOne"
+ },
+ "os_release_info": {
+ "name": "Fedora",
+ "version": "31 (Workstation Edition)",
+ "id": "fedora",
+ "version_id": "31",
+ "version_codename": "",
+ "platform_id": "platform:f31",
+ "pretty_name": "Fedora 31 (Workstation Edition)",
+ "ansi_color": "0;34",
+ "logo": "fedora-logo-icon",
+ "cpe_name": "cpe:/o:fedoraproject:fedora:31",
+ "home_url": "https://fedoraproject.org/",
+ "documentation_url": "https://docs.fedoraproject.org/en-US/fedora/f31/system-administrators-guide/",
+ "support_url": "https://fedoraproject.org/wiki/Communicating_and_getting_help",
+ "bug_report_url": "https://bugzilla.redhat.com/",
+ "redhat_bugzilla_product": "Fedora",
+ "redhat_bugzilla_product_version": "31",
+ "redhat_support_product": "Fedora",
+ "redhat_support_product_version": "31",
+ "privacy_policy_url": "https://fedoraproject.org/wiki/Legal:PrivacyPolicy",
+ "variant": "Workstation Edition",
+ "variant_id": "workstation",
+ "codename": ""
+ }
+ },
+ "input": {
+ "/etc/redhat-release": "Fedora release 31 (Thirty One)\n",
+ "/etc/system-release": "Fedora release 31 (Thirty One)\n",
+ "/etc/os-release": "NAME=Fedora\nVERSION=\"31 (Workstation Edition)\"\nID=fedora\nVERSION_ID=31\nVERSION_CODENAME=\"\"\nPLATFORM_ID=\"platform:f31\"\nPRETTY_NAME=\"Fedora 31 (Workstation Edition)\"\nANSI_COLOR=\"0;34\"\nLOGO=fedora-logo-icon\nCPE_NAME=\"cpe:/o:fedoraproject:fedora:31\"\nHOME_URL=\"https://fedoraproject.org/\"\nDOCUMENTATION_URL=\"https://docs.fedoraproject.org/en-US/fedora/f31/system-administrators-guide/\"\nSUPPORT_URL=\"https://fedoraproject.org/wiki/Communicating_and_getting_help\"\nBUG_REPORT_URL=\"https://bugzilla.redhat.com/\"\nREDHAT_BUGZILLA_PRODUCT=\"Fedora\"\nREDHAT_BUGZILLA_PRODUCT_VERSION=31\nREDHAT_SUPPORT_PRODUCT=\"Fedora\"\nREDHAT_SUPPORT_PRODUCT_VERSION=31\nPRIVACY_POLICY_URL=\"https://fedoraproject.org/wiki/Legal:PrivacyPolicy\"\nVARIANT=\"Workstation Edition\"\nVARIANT_ID=workstation\n",
+ "/usr/lib/os-release": "NAME=Fedora\nVERSION=\"31 (Workstation Edition)\"\nID=fedora\nVERSION_ID=31\nVERSION_CODENAME=\"\"\nPLATFORM_ID=\"platform:f31\"\nPRETTY_NAME=\"Fedora 31 (Workstation Edition)\"\nANSI_COLOR=\"0;34\"\nLOGO=fedora-logo-icon\nCPE_NAME=\"cpe:/o:fedoraproject:fedora:31\"\nHOME_URL=\"https://fedoraproject.org/\"\nDOCUMENTATION_URL=\"https://docs.fedoraproject.org/en-US/fedora/f31/system-administrators-guide/\"\nSUPPORT_URL=\"https://fedoraproject.org/wiki/Communicating_and_getting_help\"\nBUG_REPORT_URL=\"https://bugzilla.redhat.com/\"\nREDHAT_BUGZILLA_PRODUCT=\"Fedora\"\nREDHAT_BUGZILLA_PRODUCT_VERSION=31\nREDHAT_SUPPORT_PRODUCT=\"Fedora\"\nREDHAT_SUPPORT_PRODUCT_VERSION=31\nPRIVACY_POLICY_URL=\"https://fedoraproject.org/wiki/Legal:PrivacyPolicy\"\nVARIANT=\"Workstation Edition\"\nVARIANT_ID=workstation\n"
+ },
+ "platform.dist": ["fedora", "31", ""],
+ "result": {
+ "distribution": "Fedora",
+ "distribution_version": "31",
+ "distribution_release": "",
+ "distribution_major_version": "31",
+ "os_family": "RedHat"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/flatcar_2492.0.0.json b/test/units/module_utils/facts/system/distribution/fixtures/flatcar_2492.0.0.json
new file mode 100644
index 00000000..618b2259
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/flatcar_2492.0.0.json
@@ -0,0 +1,24 @@
+{
+ "name": "Flatcar Container Linux",
+ "input": {
+ "/usr/lib/os-release": "NAME=\"Flatcar Container Linux by Kinvolk\"\nID=flatcar\nID_LIKE=coreos\nVERSION=2492.0.0\nVERSION_ID=2492.0.0\nBUILD_ID=2020-04-28-2210\nPRETTY_NAME=\"Flatcar Container Linux by Kinvolk 2492.0.0 (Rhyolite)\"\nANSI_COLOR=\"38;5;75\"\nHOME_URL=\"https://flatcar-linux.org/\"\nBUG_REPORT_URL=\"https://issues.flatcar-linux.org\"",
+ "/etc/lsb-release": "DISTRIB_ID=\"Flatcar Container Linux by Kinvolk\"\nDISTRIB_RELEASE=2492.0.0\nDISTRIB_CODENAME=\"Rhyolite\"\nDISTRIB_DESCRIPTION=\"Flatcar Container Linux by Kinvolk 2492.0.0 (Rhyolite)\""
+ },
+ "platform.dist": ["", "", ""],
+ "distro": {
+ "codename": "Rhyolite",
+ "id": "flatcar",
+ "id_like": "coreos",
+ "name": "Flatcar",
+ "version": "2492.0.0",
+ "version_best": "2492.0.0",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "platform.release": "",
+ "result": {
+ "distribution": "Flatcar",
+ "distribution_major_version": "2492",
+ "distribution_version": "2492.0.0"
+ }
+}
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/kali_2019.1.json b/test/units/module_utils/facts/system/distribution/fixtures/kali_2019.1.json
new file mode 100644
index 00000000..096b66ff
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/kali_2019.1.json
@@ -0,0 +1,25 @@
+{
+ "name": "Kali 2019.1",
+ "input": {
+ "/etc/os-release": "PRETTY_NAME=\"Kali GNU/Linux Rolling\"\nNAME=\"Kali GNU/Linux\"\nID=kali\nVERSION=\"2019.1\"\nVERSION_ID=\"2019.1\"\nID_LIKE=debian\nANSI_COLOR=\"1;31\"\nHOME_URL=\"https://www.kali.org/\"\nSUPPORT_URL=\"https://forums.kali.org/\"\nBUG_REPORT_URL=\"https://bugs.kali.org/\"\n",
+ "/etc/lsb-release": "DISTRIB_ID=Kali\nDISTRIB_RELEASE=kali-rolling\nDISTRIB_CODENAME=kali-rolling\nDISTRIB_DESCRIPTION=\"Kali GNU/Linux Rolling\"\n",
+ "/usr/lib/os-release": "PRETTY_NAME=\"Kali GNU/Linux Rolling\"\nNAME=\"Kali GNU/Linux\"\nID=kali\nVERSION=\"2019.1\"\nVERSION_ID=\"2019.1\"\nID_LIKE=debian\nANSI_COLOR=\"1;31\"\nHOME_URL=\"https://www.kali.org/\"\nSUPPORT_URL=\"https://forums.kali.org/\"\nBUG_REPORT_URL=\"https://bugs.kali.org/\"\n"
+ },
+ "platform.dist": ["kali", "2019.1", ""],
+ "distro": {
+ "codename": "kali-rolling",
+ "id": "kali",
+ "name": "Kali GNU/Linux Rolling",
+ "version": "2019.1",
+ "version_best": "2019.1",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "result": {
+ "distribution": "Kali",
+ "distribution_version": "2019.1",
+ "distribution_release": "kali-rolling",
+ "distribution_major_version": "2019",
+ "os_family": "Debian"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/kde_neon_16.04.json b/test/units/module_utils/facts/system/distribution/fixtures/kde_neon_16.04.json
new file mode 100644
index 00000000..5ff59c72
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/kde_neon_16.04.json
@@ -0,0 +1,42 @@
+{
+ "platform.dist": ["neon", "16.04", "xenial"],
+ "distro": {
+ "codename": "xenial",
+ "id": "neon",
+ "name": "KDE neon",
+ "version": "16.04",
+ "version_best": "16.04",
+ "os_release_info": {
+ "support_url": "http://help.ubuntu.com/",
+ "version_codename": "xenial",
+ "pretty_name": "Ubuntu 16.04.6 LTS",
+ "home_url": "http://www.ubuntu.com/",
+ "bug_report_url": "http://bugs.launchpad.net/ubuntu/",
+ "version": "16.04.6 LTS (Xenial Xerus)",
+ "version_id": "16.04",
+ "id": "ubuntu",
+ "ubuntu_codename": "xenial",
+ "codename": "xenial",
+ "name": "Ubuntu",
+ "id_like": "debian"
+ },
+ "lsb_release_info": {
+ "description": "Ubuntu 16.04.6 LTS",
+ "release": "16.04",
+ "distributor_id": "Ubuntu",
+ "codename": "xenial"
+ }
+ },
+ "input": {
+ "/etc/os-release": "NAME=\"KDE neon\"\nVERSION=\"5.8\"\nID=neon\nID_LIKE=\"ubuntu debian\"\nPRETTY_NAME=\"KDE neon User Edition 5.8\"\nVERSION_ID=\"16.04\"\nHOME_URL=\"http://neon.kde.org/\"\nSUPPORT_URL=\"http://neon.kde.org/\"\nBUG_REPORT_URL=\"http://bugs.kde.org/\"\nVERSION_CODENAME=xenial\nUBUNTU_CODENAME=xenial\n",
+ "/etc/lsb-release": "DISTRIB_ID=neon\nDISTRIB_RELEASE=16.04\nDISTRIB_CODENAME=xenial\nDISTRIB_DESCRIPTION=\"KDE neon User Edition 5.8\"\n"
+ },
+ "name": "KDE neon 16.04",
+ "result": {
+ "distribution_release": "xenial",
+ "distribution": "KDE neon",
+ "distribution_major_version": "16",
+ "os_family": "Debian",
+ "distribution_version": "16.04"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/linux_mint_18.2.json b/test/units/module_utils/facts/system/distribution/fixtures/linux_mint_18.2.json
new file mode 100644
index 00000000..74e628e1
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/linux_mint_18.2.json
@@ -0,0 +1,25 @@
+{
+ "platform.dist": ["linuxmint", "18.2", "sonya"],
+ "input": {
+ "/etc/os-release": "NAME=\"Linux Mint\"\nVERSION=\"18.2 (Sonya)\"\nID=linuxmint\nID_LIKE=ubuntu\nPRETTY_NAME=\"Linux Mint 18.2\"\nVERSION_ID=\"18.2\"\nHOME_URL=\"http://www.linuxmint.com/\"\nSUPPORT_URL=\"http://forums.linuxmint.com/\"\nBUG_REPORT_URL=\"http://bugs.launchpad.net/linuxmint/\"\nVERSION_CODENAME=sonya\nUBUNTU_CODENAME=xenial\n",
+ "/usr/lib/os-release": "NAME=\"Linux Mint\"\nVERSION=\"18.2 (Sonya)\"\nID=linuxmint\nID_LIKE=ubuntu\nPRETTY_NAME=\"Linux Mint 18.2\"\nVERSION_ID=\"18.2\"\nHOME_URL=\"http://www.linuxmint.com/\"\nSUPPORT_URL=\"http://forums.linuxmint.com/\"\nBUG_REPORT_URL=\"http://bugs.launchpad.net/linuxmint/\"\nVERSION_CODENAME=sonya\nUBUNTU_CODENAME=xenial\n",
+ "/etc/lsb-release": "DISTRIB_ID=LinuxMint\nDISTRIB_RELEASE=18.2\nDISTRIB_CODENAME=sonya\nDISTRIB_DESCRIPTION=\"Linux Mint 18.2 Sonya\"\n"
+ },
+ "result": {
+ "distribution_release": "sonya",
+ "distribution": "Linux Mint",
+ "distribution_major_version": "18",
+ "os_family": "Debian",
+ "distribution_version": "18.2"
+ },
+ "name": "Linux Mint 18.2",
+ "distro": {
+ "codename": "sonya",
+ "version": "18.2",
+ "id": "linuxmint",
+ "version_best": "18.2",
+ "name": "Linux Mint",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/linux_mint_19.1.json b/test/units/module_utils/facts/system/distribution/fixtures/linux_mint_19.1.json
new file mode 100644
index 00000000..7712856a
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/linux_mint_19.1.json
@@ -0,0 +1,24 @@
+{
+ "platform.dist": ["linuxmint", "19.1", "tessa"],
+ "input": {
+ "/usr/lib/os-release": "NAME=\"Linux Mint\"\nVERSION=\"19.1 (Tessa)\"\nID=linuxmint\nID_LIKE=ubuntu\nPRETTY_NAME=\"Linux Mint 19.1\"\nVERSION_ID=\"19.1\"\nHOME_URL=\"https://www.linuxmint.com/\"\nSUPPORT_URL=\"https://forums.ubuntu.com/\"\nBUG_REPORT_URL=\"http: //linuxmint-troubleshooting-guide.readthedocs.io/en/latest/\"\nPRIVACY_POLICY_URL=\"https://www.linuxmint.com/\"\nVERSION_CODENAME=tessa\nUBUNTU_CODENAME=bionic\n",
+ "/etc/lsb-release": "DISTRIB_ID=LinuxMint\nDISTRIB_RELEASE=19.1\nDISTRIB_CODENAME=tessa\nDISTRIB_DESCRIPTION=\"Linux Mint 19.1 Tessa\"\n"
+ },
+ "result": {
+ "distribution_release": "tessa",
+ "distribution": "Linux Mint",
+ "distribution_major_version": "19",
+ "os_family": "Debian",
+ "distribution_version": "19.1"
+ },
+ "name": "Linux Mint 19.1",
+ "distro": {
+ "codename": "tessa",
+ "version": "19.1",
+ "id": "linuxmint",
+ "version_best": "19.1",
+ "name": "Linux Mint",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/nexenta_3.json b/test/units/module_utils/facts/system/distribution/fixtures/nexenta_3.json
new file mode 100644
index 00000000..bdc942ba
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/nexenta_3.json
@@ -0,0 +1,25 @@
+{
+ "name": "Nexenta 3",
+ "uname_v": "NexentaOS_134f",
+ "result": {
+ "distribution_release": "Open Storage Appliance v3.1.6",
+ "distribution": "Nexenta",
+ "os_family": "Solaris",
+ "distribution_version": "3.1.6"
+ },
+ "platform.dist": ["", "", ""],
+ "distro": {
+ "codename": "",
+ "id": "",
+ "name": "",
+ "version": "",
+ "version_best": "",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "platform.release:": "",
+ "input": {
+ "/etc/release": " Open Storage Appliance v3.1.6\n Copyright (c) 2014 Nexenta Systems, Inc. All Rights Reserved.\n Copyright (c) 2011 Oracle. All Rights Reserved.\n Use is subject to license terms.\n"
+ },
+ "platform.system": "SunOS"
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/nexenta_4.json b/test/units/module_utils/facts/system/distribution/fixtures/nexenta_4.json
new file mode 100644
index 00000000..d24e9bc2
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/nexenta_4.json
@@ -0,0 +1,24 @@
+{
+ "name": "Nexenta 4",
+ "uname_v": "NexentaOS_4:cd604cd066",
+ "result": {
+ "distribution_release": "Open Storage Appliance 4.0.3-FP2",
+ "distribution": "Nexenta",
+ "os_family": "Solaris",
+ "distribution_version": "4.0.3-FP2"
+ },
+ "platform.dist": ["", "", ""],
+ "distro": {
+ "codename": "",
+ "id": "",
+ "name": "",
+ "version": "",
+ "version_best": "",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "input": {
+ "/etc/release": " Open Storage Appliance 4.0.3-FP2\n Copyright (c) 2014 Nexenta Systems, Inc. All Rights Reserved.\n Copyright (c) 2010 Oracle. All Rights Reserved.\n Use is subject to license terms.\n"
+ },
+ "platform.system": "SunOS"
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/omnios.json b/test/units/module_utils/facts/system/distribution/fixtures/omnios.json
new file mode 100644
index 00000000..8bb2b445
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/omnios.json
@@ -0,0 +1,24 @@
+{
+ "name": "OmniOS",
+ "uname_v": "omnios-10b9c79",
+ "result": {
+ "distribution_release": "OmniOS v11 r151012",
+ "distribution": "OmniOS",
+ "os_family": "Solaris",
+ "distribution_version": "r151012"
+ },
+ "platform.dist": ["", "", ""],
+ "distro": {
+ "codename": "",
+ "id": "",
+ "name": "",
+ "version": "",
+ "version_best": "",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "input": {
+ "/etc/release": " OmniOS v11 r151012\n Copyright 2014 OmniTI Computer Consulting, Inc. All rights reserved.\n Use is subject to license terms.\n\n"
+ },
+ "platform.system": "SunOS"
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/openeuler_20.03.json b/test/units/module_utils/facts/system/distribution/fixtures/openeuler_20.03.json
new file mode 100644
index 00000000..83103864
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/openeuler_20.03.json
@@ -0,0 +1,28 @@
+{
+ "platform.dist": [
+ "openeuler",
+ "20.03",
+ "LTS"
+ ],
+ "input": {
+ "/etc/os-release": "NAME=\"openEuler\"\nVERSION=\"20.03 (LTS)\"\nID=\"openEuler\"\nVERSION_ID=\"20.03\"\nPRETTY_NAME=\"openEuler 20.03 (LTS)\"\nANSI_COLOR=\"0;31\"\n\n",
+ "/etc/system-release": "openEuler release 20.03 (LTS)\n"
+ },
+ "result": {
+ "distribution_release": "LTS",
+ "distribution": "openEuler",
+ "distribution_major_version": "20",
+ "os_family": "RedHat",
+ "distribution_version": "20.03"
+ },
+ "name": "openEuler 20.03",
+ "distro": {
+ "codename": "LTS",
+ "version": "20.03",
+ "id": "openeuler",
+ "version_best": "20.03",
+ "name": "openEuler",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/openindiana.json b/test/units/module_utils/facts/system/distribution/fixtures/openindiana.json
new file mode 100644
index 00000000..a055bb07
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/openindiana.json
@@ -0,0 +1,24 @@
+{
+ "name": "OpenIndiana",
+ "uname_v": "oi_151a9",
+ "result": {
+ "distribution_release": "OpenIndiana Development oi_151.1.9 X86 (powered by illumos)",
+ "distribution": "OpenIndiana",
+ "os_family": "Solaris",
+ "distribution_version": "oi_151a9"
+ },
+ "platform.dist": ["", "", ""],
+ "distro": {
+ "codename": "",
+ "id": "",
+ "name": "",
+ "version": "",
+ "version_best": "",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "input": {
+ "/etc/release": " OpenIndiana Development oi_151.1.9 X86 (powered by illumos)\n Copyright 2011 Oracle and/or its affiliates. All rights reserved.\n Use is subject to license terms.\n Assembled 17 January 2014\n"
+ },
+ "platform.system": "SunOS"
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/opensuse_13.2.json b/test/units/module_utils/facts/system/distribution/fixtures/opensuse_13.2.json
new file mode 100644
index 00000000..76d3a338
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/opensuse_13.2.json
@@ -0,0 +1,24 @@
+{
+ "name": "openSUSE 13.2",
+ "input": {
+ "/etc/SuSE-release": "openSUSE 13.2 (x86_64)\nVERSION = 13.2\nCODENAME = Harlequin\n# /etc/SuSE-release is deprecated and will be removed in the future, use /etc/os-release instead",
+ "/etc/os-release": "NAME=openSUSE\nVERSION=\"13.2 (Harlequin)\"\nVERSION_ID=\"13.2\"\nPRETTY_NAME=\"openSUSE 13.2 (Harlequin) (x86_64)\"\nID=opensuse\nANSI_COLOR=\"0;32\"\nCPE_NAME=\"cpe:/o:opensuse:opensuse:13.2\"\nBUG_REPORT_URL=\"https://bugs.opensuse.org\"\nHOME_URL=\"https://opensuse.org/\"\nID_LIKE=\"suse\""
+ },
+ "platform.dist": ["SuSE", "13.2", "x86_64"],
+ "distro": {
+ "codename": "",
+ "id": "opensuse-harlequin",
+ "name": "openSUSE Harlequin",
+ "version": "13.2",
+ "version_best": "13.2",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "result": {
+ "distribution": "openSUSE",
+ "distribution_major_version": "13",
+ "distribution_release": "2",
+ "os_family": "Suse",
+ "distribution_version": "13.2"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/opensuse_leap_15.0.json b/test/units/module_utils/facts/system/distribution/fixtures/opensuse_leap_15.0.json
new file mode 100644
index 00000000..54f1265c
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/opensuse_leap_15.0.json
@@ -0,0 +1,23 @@
+{
+ "platform.dist": ["", "", ""],
+ "distro": {
+ "codename": "",
+ "id": "opensuse-leap",
+ "name": "openSUSE Leap",
+ "version": "15.0",
+ "version_best": "15.0",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "input": {
+ "/etc/os-release": "NAME=\"openSUSE Leap\"\n# VERSION=\"15.0\"\nID=opensuse-leap\nID_LIKE=\"suse opensuse\"\nVERSION_ID=\"15.0\"\nPRETTY_NAME=\"openSUSE Leap 15.0\"\nANSI_COLOR=\"0;32\"\nCPE_NAME=\"cpe:/o:opensuse:leap:15.0\"\nBUG_REPORT_URL=\"https://bugs.opensuse.org\"\nHOME_URL=\"https://www.opensuse.org/\"\n"
+ },
+ "name": "openSUSE Leap 15.0",
+ "result": {
+ "distribution_release": "0",
+ "distribution": "openSUSE Leap",
+ "distribution_major_version": "15",
+ "os_family": "Suse",
+ "distribution_version": "15.0"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/opensuse_leap_15.1.json b/test/units/module_utils/facts/system/distribution/fixtures/opensuse_leap_15.1.json
new file mode 100644
index 00000000..d029423b
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/opensuse_leap_15.1.json
@@ -0,0 +1,36 @@
+{
+ "name": "openSUSE Leap 15.1",
+ "distro": {
+ "codename": "",
+ "id": "opensuse-leap",
+ "name": "openSUSE Leap",
+ "version": "15.1",
+ "version_best": "15.1",
+ "lsb_release_info": {},
+ "os_release_info": {
+ "name": "openSUSE Leap",
+ "version": "15.1",
+ "codename": "",
+ "id": "opensuse-leap",
+ "id_like": "suse opensuse",
+ "version_id": "15.1",
+ "pretty_name": "openSUSE Leap 15.1",
+ "ansi_color": "0;32",
+ "cpe_name": "cpe:/o:opensuse:leap:15.1",
+ "bug_report_url": "https://bugs.opensuse.org",
+ "home_url": "https://www.opensuse.org/"
+ }
+ },
+ "input": {
+ "/etc/os-release": "NAME=\"openSUSE Leap\"\nVERSION=\"15.1\"\nID=\"opensuse-leap\"\nID_LIKE=\"suse opensuse\"\nVERSION_ID=\"15.1\"\nPRETTY_NAME=\"openSUSE Leap 15.1\"\nANSI_COLOR=\"0;32\"\nCPE_NAME=\"cpe:/o:opensuse:leap:15.1\"\nBUG_REPORT_URL=\"https://bugs.opensuse.org\"\nHOME_URL=\"https://www.opensuse.org/\"\n",
+ "/usr/lib/os-release": "NAME=\"openSUSE Leap\"\nVERSION=\"15.1\"\nID=\"opensuse-leap\"\nID_LIKE=\"suse opensuse\"\nVERSION_ID=\"15.1\"\nPRETTY_NAME=\"openSUSE Leap 15.1\"\nANSI_COLOR=\"0;32\"\nCPE_NAME=\"cpe:/o:opensuse:leap:15.1\"\nBUG_REPORT_URL=\"https://bugs.opensuse.org\"\nHOME_URL=\"https://www.opensuse.org/\"\n"
+ },
+ "platform.dist": ["opensuse-leap", "15.1", ""],
+ "result": {
+ "distribution": "openSUSE Leap",
+ "distribution_version": "15.1",
+ "distribution_release": "1",
+ "distribution_major_version": "15",
+ "os_family": "Suse"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/opensuse_leap_42.1.json b/test/units/module_utils/facts/system/distribution/fixtures/opensuse_leap_42.1.json
new file mode 100644
index 00000000..2142932e
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/opensuse_leap_42.1.json
@@ -0,0 +1,24 @@
+{
+ "name": "openSUSE Leap 42.1",
+ "input": {
+ "/etc/os-release": "NAME=\"openSUSE Leap\"\nVERSION=\"42.1\"\nVERSION_ID=\"42.1\"\nPRETTY_NAME=\"openSUSE Leap 42.1 (x86_64)\"\nID=opensuse\nANSI_COLOR=\"0;32\"\nCPE_NAME=\"cpe:/o:opensuse:opensuse:42.1\"\nBUG_REPORT_URL=\"https://bugs.opensuse.org\"\nHOME_URL=\"https://opensuse.org/\"\nID_LIKE=\"suse\"",
+ "/etc/SuSE-release": "openSUSE 42.1 (x86_64)\nVERSION = 42.1\nCODENAME = Malachite\n# /etc/SuSE-release is deprecated and will be removed in the future, use /etc/os-release instead"
+ },
+ "platform.dist": ["SuSE", "42.1", "x86_64"],
+ "distro": {
+ "codename": "",
+ "id": "opensuse-leap",
+ "name": "openSUSE Leap",
+ "version": "42.1",
+ "version_best": "42.1",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "result": {
+ "distribution": "openSUSE Leap",
+ "distribution_major_version": "42",
+ "distribution_release": "1",
+ "os_family": "Suse",
+ "distribution_version": "42.1"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/opensuse_tumbleweed_20160917.json b/test/units/module_utils/facts/system/distribution/fixtures/opensuse_tumbleweed_20160917.json
new file mode 100644
index 00000000..db1a26ca
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/opensuse_tumbleweed_20160917.json
@@ -0,0 +1,23 @@
+{
+ "platform.dist": ["", "", ""],
+ "distro": {
+ "codename": "",
+ "id": "opensuse-tumbleweed",
+ "name": "openSUSE Tumbleweed",
+ "version": "20160917",
+ "version_best": "20160917",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "input": {
+ "/etc/os-release": "NAME=\"openSUSE Tumbleweed\"\n# VERSION=\"20160917\"\nID=opensuse\nID_LIKE=\"suse\"\nVERSION_ID=\"20160917\"\nPRETTY_NAME=\"openSUSE Tumbleweed\"\nANSI_COLOR=\"0;32\"\nCPE_NAME=\"cpe:/o:opensuse:tumbleweed:20160917\"\nBUG_REPORT_URL=\"https://bugs.opensuse.org\"\nHOME_URL=\"https://www.opensuse.org/\"\n"
+ },
+ "name": "openSUSE Tumbleweed 20160917",
+ "result": {
+ "distribution_release": "",
+ "distribution": "openSUSE Tumbleweed",
+ "distribution_major_version": "20160917",
+ "os_family": "Suse",
+ "distribution_version": "20160917"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/pop_os_20.04.json b/test/units/module_utils/facts/system/distribution/fixtures/pop_os_20.04.json
new file mode 100644
index 00000000..d3184ef6
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/pop_os_20.04.json
@@ -0,0 +1,29 @@
+{
+ "name": "Pop!_OS 20.04",
+ "distro": {
+ "codename": "focal",
+ "id": "pop",
+ "name": "Pop!_OS",
+ "version": "20.04",
+ "version_best": "20.04",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "input": {
+ "/etc/os-release": "NAME=\"Pop!_OS\"\nVERSION=\"20.04\"\nID=pop\nID_LIKE=\"ubuntu debian\"\nPRETTY_NAME=\"Pop!_OS 20.04\"\nVERSION_ID=\"20.04\"\nHOME_URL=\"https://system76.com/pop\"\nSUPPORT_URL=\"http://support.system76.com\"\nBUG_REPORT_URL=\"https://github.com/pop-os/pop/issues\"\nPRIVACY_POLICY_URL=\"https://system76.com/privacy\"\nVERSION_CODENAME=focal\nUBUNTU_CODENAME=focal\nLOGO=distributor-logo-pop-os\n",
+ "/etc/lsb-release": "DISTRIB_ID=Pop\nDISTRIB_RELEASE=20.04\nDISTRIB_CODENAME=focal\nDISTRIB_DESCRIPTION=\"Pop!_OS 20.04\"\n",
+ "/usr/lib/os-release": "NAME=\"Pop!_OS\"\nVERSION=\"20.04\"\nID=pop\nID_LIKE=\"ubuntu debian\"\nPRETTY_NAME=\"Pop!_OS 20.04\"\nVERSION_ID=\"20.04\"\nHOME_URL=\"https://system76.com/pop\"\nSUPPORT_URL=\"http://support.system76.com\"\nBUG_REPORT_URL=\"https://github.com/pop-os/pop/issues\"\nPRIVACY_POLICY_URL=\"https://system76.com/privacy\"\nVERSION_CODENAME=focal\nUBUNTU_CODENAME=focal\nLOGO=distributor-logo-pop-os\n"
+ },
+ "platform.dist": [
+ "pop",
+ "20.04",
+ "focal"
+ ],
+ "result": {
+ "distribution": "Pop!_OS",
+ "distribution_version": "20.04",
+ "distribution_release": "focal",
+ "distribution_major_version": "20",
+ "os_family": "Debian"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/redhat_6.7.json b/test/units/module_utils/facts/system/distribution/fixtures/redhat_6.7.json
new file mode 100644
index 00000000..27a77d0a
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/redhat_6.7.json
@@ -0,0 +1,25 @@
+{
+ "name": "RedHat 6.7",
+ "platform.dist": ["redhat", "6.7", "Santiago"],
+ "distro": {
+ "codename": "Santiago",
+ "id": "rhel",
+ "name": "RedHat Enterprise Linux",
+ "version": "6.7",
+ "version_best": "6.7",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "input": {
+ "/etc/redhat-release": "Red Hat Enterprise Linux Server release 6.7 (Santiago)\n",
+ "/etc/lsb-release": "LSB_VERSION=base-4.0-amd64:base-4.0-noarch:core-4.0-amd64:core-4.0-noarch:graphics-4.0-amd64:graphics-4.0-noarch:printing-4.0-amd64:printing-4.0-noarch\n",
+ "/etc/system-release": "Red Hat Enterprise Linux Server release 6.7 (Santiago)\n"
+ },
+ "result": {
+ "distribution_release": "Santiago",
+ "distribution": "RedHat",
+ "distribution_major_version": "6",
+ "os_family": "RedHat",
+ "distribution_version": "6.7"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/redhat_7.2.json b/test/units/module_utils/facts/system/distribution/fixtures/redhat_7.2.json
new file mode 100644
index 00000000..3900f82a
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/redhat_7.2.json
@@ -0,0 +1,25 @@
+{
+ "name": "RedHat 7.2",
+ "platform.dist": ["redhat", "7.2", "Maipo"],
+ "distro": {
+ "codename": "Maipo",
+ "id": "rhel",
+ "name": "RedHat Enterprise Linux",
+ "version": "7.2",
+ "version_best": "7.2",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "input": {
+ "/etc/redhat-release": "Red Hat Enterprise Linux Server release 7.2 (Maipo)\n",
+ "/etc/os-release": "NAME=\"Red Hat Enterprise Linux Server\"\nVERSION=\"7.2 (Maipo)\"\nID=\"rhel\"\nID_LIKE=\"fedora\"\nVERSION_ID=\"7.2\"\nPRETTY_NAME=\"Red Hat Enterprise Linux Server 7.2 (Maipo)\"\nANSI_COLOR=\"0;31\"\nCPE_NAME=\"cpe:/o:redhat:enterprise_linux:7.2:GA:server\"\nHOME_URL=\"https://www.redhat.com/\"\nBUG_REPORT_URL=\"https://bugzilla.redhat.com/\"\n\nREDHAT_BUGZILLA_PRODUCT=\"Red Hat Enterprise Linux 7\"\nREDHAT_BUGZILLA_PRODUCT_VERSION=7.2\nREDHAT_SUPPORT_PRODUCT=\"Red Hat Enterprise Linux\"\nREDHAT_SUPPORT_PRODUCT_VERSION=\"7.2\"\n",
+ "/etc/system-release": "Red Hat Enterprise Linux Server release 7.2 (Maipo)\n"
+ },
+ "result": {
+ "distribution_release": "Maipo",
+ "distribution": "RedHat",
+ "distribution_major_version": "7",
+ "os_family": "RedHat",
+ "distribution_version": "7.2"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/redhat_7.7.json b/test/units/module_utils/facts/system/distribution/fixtures/redhat_7.7.json
new file mode 100644
index 00000000..b240efce
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/redhat_7.7.json
@@ -0,0 +1,43 @@
+{
+ "name": "RedHat 7.7",
+ "distro": {
+ "codename": "Maipo",
+ "id": "rhel",
+ "name": "Red Hat Enterprise Linux Server",
+ "version": "7.7",
+ "version_best": "7.7",
+ "lsb_release_info": {},
+ "os_release_info": {
+ "name": "Red Hat Enterprise Linux Server",
+ "version": "7.7 (Maipo)",
+ "id": "rhel",
+ "id_like": "fedora",
+ "variant": "Server",
+ "variant_id": "server",
+ "version_id": "7.7",
+ "pretty_name": "Red Hat Enterprise Linux Server 7.7 (Maipo)",
+ "ansi_color": "0;31",
+ "cpe_name": "cpe:/o:redhat:enterprise_linux:7.7:GA:server",
+ "home_url": "https://www.redhat.com/",
+ "bug_report_url": "https://bugzilla.redhat.com/",
+ "redhat_bugzilla_product": "Red Hat Enterprise Linux 7",
+ "redhat_bugzilla_product_version": "7.7",
+ "redhat_support_product": "Red Hat Enterprise Linux",
+ "redhat_support_product_version": "7.7",
+ "codename": "Maipo"
+ }
+ },
+ "input": {
+ "/etc/redhat-release": "Red Hat Enterprise Linux Server release 7.7 (Maipo)\n",
+ "/etc/system-release": "Red Hat Enterprise Linux Server release 7.7 (Maipo)\n",
+ "/etc/os-release": "NAME=\"Red Hat Enterprise Linux Server\"\nVERSION=\"7.7 (Maipo)\"\nID=\"rhel\"\nID_LIKE=\"fedora\"\nVARIANT=\"Server\"\nVARIANT_ID=\"server\"\nVERSION_ID=\"7.7\"\nPRETTY_NAME=\"Red Hat Enterprise Linux Server 7.7 (Maipo)\"\nANSI_COLOR=\"0;31\"\nCPE_NAME=\"cpe:/o:redhat:enterprise_linux:7.7:GA:server\"\nHOME_URL=\"https://www.redhat.com/\"\nBUG_REPORT_URL=\"https://bugzilla.redhat.com/\"\n\nREDHAT_BUGZILLA_PRODUCT=\"Red Hat Enterprise Linux 7\"\nREDHAT_BUGZILLA_PRODUCT_VERSION=7.7\nREDHAT_SUPPORT_PRODUCT=\"Red Hat Enterprise Linux\"\nREDHAT_SUPPORT_PRODUCT_VERSION=\"7.7\"\n"
+ },
+ "platform.dist": ["rhel", "7.7", "Maipo"],
+ "result": {
+ "distribution": "RedHat",
+ "distribution_version": "7.7",
+ "distribution_release": "Maipo",
+ "distribution_major_version": "7",
+ "os_family": "RedHat"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/sles_11.3.json b/test/units/module_utils/facts/system/distribution/fixtures/sles_11.3.json
new file mode 100644
index 00000000..be71f1cb
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/sles_11.3.json
@@ -0,0 +1,23 @@
+{
+ "name": "SLES 11.3",
+ "input": {
+ "/etc/SuSE-release": "SUSE Linux Enterprise Server 11 (x86_64)\nVERSION = 11\nPATCHLEVEL = 3"
+ },
+ "platform.dist": ["SuSE", "11", "x86_64"],
+ "distro": {
+ "codename": "",
+ "id": "sles",
+ "name": "SUSE Linux Enterprise Server",
+ "version": "11",
+ "version_best": "11",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "result": {
+ "distribution": "SLES",
+ "distribution_major_version": "11",
+ "distribution_release": "3",
+ "os_family": "Suse",
+ "distribution_version": "11.3"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/sles_11.4.json b/test/units/module_utils/facts/system/distribution/fixtures/sles_11.4.json
new file mode 100644
index 00000000..3e4012a1
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/sles_11.4.json
@@ -0,0 +1,24 @@
+{
+ "name": "SLES 11.4",
+ "input": {
+ "/etc/SuSE-release": "\nSUSE Linux Enterprise Server 11 (x86_64)\nVERSION = 11\nPATCHLEVEL = 4",
+ "/etc/os-release": "NAME=\"SLES\"\nVERSION=\"11.4\"\nVERSION_ID=\"11.4\"\nPRETTY_NAME=\"SUSE Linux Enterprise Server 11 SP4\"\nID=\"sles\"\nANSI_COLOR=\"0;32\"\nCPE_NAME=\"cpe:/o:suse:sles:11:4\""
+ },
+ "platform.dist": ["SuSE", "11", "x86_64"],
+ "distro": {
+ "codename": "",
+ "id": "sles",
+ "name": "SUSE Linux Enterprise Server",
+ "version": "11.4",
+ "version_best": "11.4",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "result": {
+ "distribution": "SLES",
+ "distribution_major_version": "11",
+ "distribution_release": "4",
+ "os_family": "Suse",
+ "distribution_version": "11.4"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/sles_12_sp0.json b/test/units/module_utils/facts/system/distribution/fixtures/sles_12_sp0.json
new file mode 100644
index 00000000..e84bbe5c
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/sles_12_sp0.json
@@ -0,0 +1,24 @@
+{
+ "name": "SLES 12 SP0",
+ "input": {
+ "/etc/SuSE-release": "\nSUSE Linux Enterprise Server 12 (x86_64)\nVERSION = 12\nPATCHLEVEL = 0\n# This file is deprecated and will be removed in a future service pack or release.\n# Please check /etc/os-release for details about this release.",
+ "/etc/os-release": "NAME=\"SLES\"\nVERSION=\"12\"\nVERSION_ID=\"12\"\nPRETTY_NAME=\"SUSE Linux Enterprise Server 12\"\nID=\"sles\"\nANSI_COLOR=\"0;32\"\nCPE_NAME=\"cpe:/o:suse:sles:12\""
+ },
+ "platform.dist": ["SuSE", "12", "x86_64"],
+ "distro": {
+ "codename": "",
+ "id": "sles",
+ "name": "SUSE Linux Enterprise Server",
+ "version": "12",
+ "version_best": "12",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "result": {
+ "distribution": "SLES",
+ "distribution_major_version": "12",
+ "distribution_release": "0",
+ "os_family": "Suse",
+ "distribution_version": "12"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/sles_12_sp1.json b/test/units/module_utils/facts/system/distribution/fixtures/sles_12_sp1.json
new file mode 100644
index 00000000..c78d53d8
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/sles_12_sp1.json
@@ -0,0 +1,24 @@
+{
+ "name": "SLES 12 SP1",
+ "input": {
+ "/etc/SuSE-release": "\nSUSE Linux Enterprise Server 12 (x86_64)\nVERSION = 12\nPATCHLEVEL = 0\n# This file is deprecated and will be removed in a future service pack or release.\n# Please check /etc/os-release for details about this release.",
+ "/etc/os-release": "NAME=\"SLES\"\nVERSION=\"12-SP1\"\nVERSION_ID=\"12.1\"\nPRETTY_NAME=\"SUSE Linux Enterprise Server 12 SP1\"\nID=\"sles\"\nANSI_COLOR=\"0;32\"\nCPE_NAME=\"cpe:/o:suse:sles:12:sp1\""
+ },
+ "platform.dist": ["SuSE", "12", "x86_64"],
+ "distro": {
+ "codename": "",
+ "id": "sles",
+ "name": "SUSE Linux Enterprise Server",
+ "version": "12.1",
+ "version_best": "12.1",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "result": {
+ "distribution": "SLES",
+ "distribution_major_version": "12",
+ "distribution_release": "1",
+ "os_family": "Suse",
+ "distribution_version": "12.1"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/smartos_global_zone.json b/test/units/module_utils/facts/system/distribution/fixtures/smartos_global_zone.json
new file mode 100644
index 00000000..ae01a106
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/smartos_global_zone.json
@@ -0,0 +1,24 @@
+{
+ "name": "SmartOS Global Zone",
+ "uname_v": "joyent_20160330T234717Z",
+ "result": {
+ "distribution_release": "SmartOS 20160330T234717Z x86_64",
+ "distribution": "SmartOS",
+ "os_family": "Solaris",
+ "distribution_version": "joyent_20160330T234717Z"
+ },
+ "platform.dist": ["", "", ""],
+ "distro": {
+ "codename": "",
+ "id": "",
+ "name": "",
+ "version": "",
+ "version_best": "",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "input": {
+ "/etc/release": " SmartOS 20160330T234717Z x86_64\n Copyright 2010 Sun Microsystems, Inc. All Rights Reserved.\n Copyright 2010-2012 Joyent, Inc. All Rights Reserved.\n Use is subject to license terms.\n\n Built with the following components:\n\n[\n { \"repo\": \"smartos-live\", \"branch\": \"release-20160331\", \"rev\": \"a77c410f2afe6dc9853a915733caec3609cc50f1\", \"commit_date\": \"1459340323\", \"url\": \"git@github.com:joyent/smartos-live.git\" }\n , { \"repo\": \"illumos-joyent\", \"branch\": \"release-20160331\", \"rev\": \"ab664c06caf06e9ce7586bff956e7709df1e702e\", \"commit_date\": \"1459362533\", \"url\": \"/root/data/jenkins/workspace/smartos/MG/build/illumos-joyent\" }\n , { \"repo\": \"illumos-extra\", \"branch\": \"release-20160331\", \"rev\": \"cc723855bceace3df7860b607c9e3827d47e0ff4\", \"commit_date\": \"1458153188\", \"url\": \"/root/data/jenkins/workspace/smartos/MG/build/illumos-extra\" }\n , { \"repo\": \"kvm\", \"branch\": \"release-20160331\", \"rev\": \"a8befd521c7e673749c64f118585814009fe4b73\", \"commit_date\": \"1450081968\", \"url\": \"/root/data/jenkins/workspace/smartos/MG/build/illumos-kvm\" }\n , { \"repo\": \"kvm-cmd\", \"branch\": \"release-20160331\", \"rev\": \"c1a197c8e4582c68739ab08f7e3198b2392c9820\", \"commit_date\": \"1454723558\", \"url\": \"/root/data/jenkins/workspace/smartos/MG/build/illumos-kvm-cmd\" }\n , { \"repo\": \"mdata-client\", \"branch\": \"release-20160331\", \"rev\": \"58158c44603a3316928975deccc5d10864832770\", \"commit_date\": \"1429917227\", \"url\": \"/root/data/jenkins/workspace/smartos/MG/build/mdata-client\" }\n]\n"
+ },
+ "platform.system": "SunOS"
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/smartos_zone.json b/test/units/module_utils/facts/system/distribution/fixtures/smartos_zone.json
new file mode 100644
index 00000000..8f20113e
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/smartos_zone.json
@@ -0,0 +1,25 @@
+{
+ "name": "SmartOS Zone",
+ "uname_v": "joyent_20160330T234717Z",
+ "result": {
+ "distribution_release": "SmartOS x86_64",
+ "distribution": "SmartOS",
+ "os_family": "Solaris",
+ "distribution_version": "14.3.0"
+ },
+ "platform.dist": ["", "", ""],
+ "distro": {
+ "codename": "",
+ "id": "",
+ "name": "",
+ "version": "",
+ "version_best": "",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "input": {
+ "/etc/release": " SmartOS x86_64\n Copyright 2010 Sun Microsystems, Inc. All Rights Reserved.\n Copyright 2010-2013 Joyent, Inc. All Rights Reserved.\n Use is subject to license terms.\n See joyent_20141002T182809Z for assembly date and time.\n",
+ "/etc/product": "Name: Joyent Instance\nImage: base64 14.3.0\nDocumentation: http://wiki.joyent.com/jpc2/Base+Instance\n"
+ },
+ "platform.system": "SunOS"
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/smgl_na.json b/test/units/module_utils/facts/system/distribution/fixtures/smgl_na.json
new file mode 100644
index 00000000..f3436b84
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/smgl_na.json
@@ -0,0 +1,23 @@
+{
+ "platform.dist": ["", "", ""],
+ "distro": {
+ "codename": "",
+ "id": "smgl",
+ "name": "Source Mage GNU/Linux",
+ "version": "",
+ "version_best": "",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "input": {
+ "/etc/sourcemage-release": "Source Mage GNU/Linux x86_64-pc-linux-gnu\nInstalled from tarball using chroot image (Grimoire 0.61-rc) on Thu May 17 17:31:37 UTC 2012\n"
+ },
+ "name": "SMGL NA",
+ "result": {
+ "distribution_release": "NA",
+ "distribution": "SMGL",
+ "distribution_major_version": "NA",
+ "os_family": "SMGL",
+ "distribution_version": "NA"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/solaris_10.json b/test/units/module_utils/facts/system/distribution/fixtures/solaris_10.json
new file mode 100644
index 00000000..de1dbdc8
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/solaris_10.json
@@ -0,0 +1,25 @@
+{
+ "name": "Solaris 10",
+ "uname_r": "5.10",
+ "platform.dist": ["", "", ""],
+ "distro": {
+ "codename": "",
+ "id": "",
+ "name": "",
+ "version": "",
+ "version_best": "",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "input": {
+ "/etc/release": " Oracle Solaris 10 1/13 s10x_u11wos_24a X86\n Copyright (c) 1983, 2013, Oracle and/or its affiliates. All rights reserved.\n Assembled 17 January 2013\n"
+ },
+ "platform.system": "SunOS",
+ "result": {
+ "distribution_release": "Oracle Solaris 10 1/13 s10x_u11wos_24a X86",
+ "distribution": "Solaris",
+ "os_family": "Solaris",
+ "distribution_major_version": "10",
+ "distribution_version": "10"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/solaris_11.3.json b/test/units/module_utils/facts/system/distribution/fixtures/solaris_11.3.json
new file mode 100644
index 00000000..056abe46
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/solaris_11.3.json
@@ -0,0 +1,25 @@
+{
+ "name": "Solaris 11.3",
+ "uname_r": "5.11",
+ "platform.dist": ["", "", ""],
+ "distro": {
+ "codename": "",
+ "id": "",
+ "name": "",
+ "version": "",
+ "version_best": "",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "input": {
+ "/etc/release": " Oracle Solaris 11.3 X86\n Copyright (c) 1983, 2018, Oracle and/or its affiliates. All rights reserved.\n Assembled 09 May 2018\n"
+ },
+ "platform.system": "SunOS",
+ "result": {
+ "distribution_release": "Oracle Solaris 11.3 X86",
+ "distribution": "Solaris",
+ "os_family": "Solaris",
+ "distribution_major_version": "11",
+ "distribution_version": "11.3"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/solaris_11.4.json b/test/units/module_utils/facts/system/distribution/fixtures/solaris_11.4.json
new file mode 100644
index 00000000..462d5508
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/solaris_11.4.json
@@ -0,0 +1,35 @@
+{
+ "name": "Solaris 11.4",
+ "uname_r": "5.11",
+ "platform.dist": ["", "", ""],
+ "distro": {
+ "codename": "",
+ "id": "",
+ "name": "",
+ "version": "",
+ "version_best": "",
+ "os_release_info": {
+ "support_url": "https://support.oracle.com/",
+ "name": "Oracle Solaris",
+ "pretty_name": "Oracle Solaris 11.4",
+ "version": "11.4",
+ "id": "solaris",
+ "version_id": "11.4",
+ "build_id": "11.4.0.0.1.15.0",
+ "home_url": "https://www.oracle.com/solaris/",
+ "cpe_name": "cpe:/o:oracle:solaris:11:4"
+ },
+ "lsb_release_info": {}
+ },
+ "input": {
+ "/etc/release": " Oracle Solaris 11.4 SPARC\n Copyright (c) 1983, 2018, Oracle and/or its affiliates. All rights reserved.\n Assembled 14 September 2018\n"
+ },
+ "platform.system": "SunOS",
+ "result": {
+ "distribution_release": "Oracle Solaris 11.4 SPARC",
+ "distribution": "Solaris",
+ "os_family": "Solaris",
+ "distribution_major_version": "11",
+ "distribution_version": "11.4"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/solaris_11.json b/test/units/module_utils/facts/system/distribution/fixtures/solaris_11.json
new file mode 100644
index 00000000..749b8bcd
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/solaris_11.json
@@ -0,0 +1,26 @@
+{
+ "name": "Solaris 11",
+ "uname_v": "11.0",
+ "uname_r": "5.11",
+ "result": {
+ "distribution_release": "Oracle Solaris 11 11/11 X86",
+ "distribution": "Solaris",
+ "os_family": "Solaris",
+ "distribution_major_version": "11",
+ "distribution_version": "11"
+ },
+ "platform.dist": ["", "", ""],
+ "distro": {
+ "codename": "",
+ "id": "",
+ "name": "",
+ "version": "",
+ "version_best": "",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "input": {
+ "/etc/release": " Oracle Solaris 11 11/11 X86\n Copyright (c) 1983, 2011, Oracle and/or its affiliates. All rights reserved.\n Assembled 18 October 2011\n"
+ },
+ "platform.system": "SunOS"
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/steamos_2.0.json b/test/units/module_utils/facts/system/distribution/fixtures/steamos_2.0.json
new file mode 100644
index 00000000..7cb9c12b
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/steamos_2.0.json
@@ -0,0 +1,40 @@
+{
+ "name": "SteamOS 2.0",
+ "input": {
+ "/etc/os-release": "PRETTY_NAME=\"SteamOS GNU/Linux 2.0 (brewmaster)\"\nNAME=\"SteamOS GNU/Linux\"\nVERSION_ID=\"2\"\nVERSION=\"2 (brewmaster)\"\nID=steamos\nID_LIKE=debian\nHOME_URL=\"http://www.steampowered.com/\"\nSUPPORT_URL=\"http://support.steampowered.com/\"\nBUG_REPORT_URL=\"http://support.steampowered.com/\"",
+ "/etc/lsb-release": "DISTRIB_ID=SteamOS\nDISTRIB_RELEASE=2.0\nDISTRIB_CODENAME=brewmaster\nDISTRIB_DESCRIPTION=\"SteamOS 2.0\""
+ },
+ "platform.dist": ["Steamos", "2.0", "brewmaster"],
+ "distro": {
+ "codename": "brewmaster",
+ "id": "steamos",
+ "name": "SteamOS GNU/Linux",
+ "version": "2.0",
+ "version_best": "2.0",
+ "os_release_info": {
+ "bug_report_url": "http://support.steampowered.com/",
+ "id_like": "debian",
+ "version_id": "2",
+ "pretty_name": "SteamOS GNU/Linux 2.0 (brewmaster)",
+ "version": "2 (brewmaster)",
+ "home_url": "http://www.steampowered.com/",
+ "name": "SteamOS GNU/Linux",
+ "support_url": "http://support.steampowered.com/",
+ "codename": "brewmaster",
+ "id": "steamos"
+ },
+ "lsb_release_info": {
+ "codename": "brewmaster",
+ "description": "SteamOS 2.0",
+ "distributor_id": "SteamOS",
+ "release": "2.0"
+ }
+ },
+ "result": {
+ "distribution": "SteamOS",
+ "distribution_major_version": "2",
+ "distribution_release": "brewmaster",
+ "os_family": "Debian",
+ "distribution_version": "2.0"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/ubuntu_10.04_guess.json b/test/units/module_utils/facts/system/distribution/fixtures/ubuntu_10.04_guess.json
new file mode 100644
index 00000000..38a6040f
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/ubuntu_10.04_guess.json
@@ -0,0 +1,23 @@
+{
+ "name": "Ubuntu 10.04 guess",
+ "input": {
+ "/etc/lsb-release": "DISTRIB_ID=Ubuntu\nDISTRIB_RELEASE=10.04\nDISTRIB_CODENAME=lucid\nDISTRIB_DESCRIPTION=\"Ubuntu 10.04.4 LTS"
+ },
+ "platform.dist": ["Ubuntu", "10.04", "lucid"],
+ "distro": {
+ "codename": "lucid",
+ "id": "ubuntu",
+ "name": "Ubuntu",
+ "version": "10.04",
+ "version_best": "10.04.1",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "result": {
+ "distribution": "Ubuntu",
+ "distribution_major_version": "10",
+ "distribution_release": "lucid",
+ "os_family": "Debian",
+ "distribution_version": "10.04"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/ubuntu_12.04.json b/test/units/module_utils/facts/system/distribution/fixtures/ubuntu_12.04.json
new file mode 100644
index 00000000..01203b5b
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/ubuntu_12.04.json
@@ -0,0 +1,24 @@
+{
+ "name": "Ubuntu 12.04",
+ "input": {
+ "/etc/lsb-release": "DISTRIB_ID=Ubuntu\nDISTRIB_RELEASE=12.04\nDISTRIB_CODENAME=precise\nDISTRIB_DESCRIPTION=\"Ubuntu 12.04.5 LTS\"",
+ "/etc/os-release": "NAME=\"Ubuntu\"\nVERSION=\"12.04.5 LTS, Precise Pangolin\"\nID=ubuntu\nID_LIKE=debian\nPRETTY_NAME=\"Ubuntu precise (12.04.5 LTS)\"\nVERSION_ID=\"12.04\""
+ },
+ "platform.dist": ["Ubuntu", "12.04", "precise"],
+ "distro": {
+ "codename": "precise",
+ "id": "ubuntu",
+ "name": "Ubuntu",
+ "version": "12.04",
+ "version_best": "12.04.5",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "result": {
+ "distribution": "Ubuntu",
+ "distribution_major_version": "12",
+ "distribution_release": "precise",
+ "os_family": "Debian",
+ "distribution_version": "12.04"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/ubuntu_14.04.json b/test/units/module_utils/facts/system/distribution/fixtures/ubuntu_14.04.json
new file mode 100644
index 00000000..5d5af0ae
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/ubuntu_14.04.json
@@ -0,0 +1,24 @@
+{
+ "name": "Ubuntu 14.04",
+ "input": {
+ "/etc/lsb-release": "DISTRIB_ID=Ubuntu\nDISTRIB_RELEASE=14.04\nDISTRIB_CODENAME=trusty\nDISTRIB_DESCRIPTION=\"Ubuntu 14.04.4 LTS\"",
+ "/etc/os-release": "NAME=\"Ubuntu\"\nVERSION=\"14.04.4 LTS, Trusty Tahr\"\nID=ubuntu\nID_LIKE=debian\nPRETTY_NAME=\"Ubuntu 14.04.4 LTS\"\nVERSION_ID=\"14.04\"\nHOME_URL=\"http://www.ubuntu.com/\"\nSUPPORT_URL=\"http://help.ubuntu.com/\"\nBUG_REPORT_URL=\"http://bugs.launchpad.net/ubuntu/\""
+ },
+ "platform.dist": ["Ubuntu", "14.04", "trusty"],
+ "distro": {
+ "codename": "trusty",
+ "id": "ubuntu",
+ "name": "Ubuntu",
+ "version": "14.04",
+ "version_best": "14.04.4",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "result": {
+ "distribution": "Ubuntu",
+ "distribution_major_version": "14",
+ "distribution_release": "trusty",
+ "os_family": "Debian",
+ "distribution_version": "14.04"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/ubuntu_16.04.json b/test/units/module_utils/facts/system/distribution/fixtures/ubuntu_16.04.json
new file mode 100644
index 00000000..f8f50a9d
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/ubuntu_16.04.json
@@ -0,0 +1,24 @@
+{
+ "platform.dist": ["Ubuntu", "16.04", "xenial"],
+ "distro": {
+ "codename": "xenial",
+ "id": "ubuntu",
+ "name": "Ubuntu",
+ "version": "16.04",
+ "version_best": "16.04.6",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "input": {
+ "/etc/os-release": "NAME=\"Ubuntu\"\nVERSION=\"16.04 LTS (Xenial Xerus)\"\nID=ubuntu\nID_LIKE=debian\nPRETTY_NAME=\"Ubuntu 16.04 LTS\"\nVERSION_ID=\"16.04\"\nHOME_URL=\"http://www.ubuntu.com/\"\nSUPPORT_URL=\"http://help.ubuntu.com/\"\nBUG_REPORT_URL=\"http://bugs.launchpad.net/ubuntu/\"\nUBUNTU_CODENAME=xenial\n",
+ "/etc/lsb-release": "DISTRIB_ID=Ubuntu\nDISTRIB_RELEASE=16.04\nDISTRIB_CODENAME=xenial\nDISTRIB_DESCRIPTION=\"Ubuntu 16.04 LTS\"\n"
+ },
+ "name": "Ubuntu 16.04",
+ "result": {
+ "distribution_release": "xenial",
+ "distribution": "Ubuntu",
+ "distribution_major_version": "16",
+ "os_family": "Debian",
+ "distribution_version": "16.04"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/ubuntu_18.04.json b/test/units/module_utils/facts/system/distribution/fixtures/ubuntu_18.04.json
new file mode 100644
index 00000000..12d15b53
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/ubuntu_18.04.json
@@ -0,0 +1,39 @@
+{
+ "name": "Ubuntu 18.04",
+ "distro": {
+ "codename": "bionic",
+ "id": "ubuntu",
+ "name": "Ubuntu",
+ "version": "18.04",
+ "version_best": "18.04.3",
+ "lsb_release_info": {},
+ "os_release_info": {
+ "name": "Ubuntu",
+ "version": "18.04.3 LTS (Bionic Beaver)",
+ "id": "ubuntu",
+ "id_like": "debian",
+ "pretty_name": "Ubuntu 18.04.3 LTS",
+ "version_id": "18.04",
+ "home_url": "https://www.ubuntu.com/",
+ "support_url": "https://help.ubuntu.com/",
+ "bug_report_url": "https://bugs.launchpad.net/ubuntu/",
+ "privacy_policy_url": "https://www.ubuntu.com/legal/terms-and-policies/privacy-policy",
+ "version_codename": "bionic",
+ "ubuntu_codename": "bionic",
+ "codename": "bionic"
+ }
+ },
+ "input": {
+ "/etc/os-release": "NAME=\"Ubuntu\"\nVERSION=\"18.04.3 LTS (Bionic Beaver)\"\nID=ubuntu\nID_LIKE=debian\nPRETTY_NAME=\"Ubuntu 18.04.3 LTS\"\nVERSION_ID=\"18.04\"\nHOME_URL=\"https://www.ubuntu.com/\"\nSUPPORT_URL=\"https://help.ubuntu.com/\"\nBUG_REPORT_URL=\"https://bugs.launchpad.net/ubuntu/\"\nPRIVACY_POLICY_URL=\"https://www.ubuntu.com/legal/terms-and-policies/privacy-policy\"\nVERSION_CODENAME=bionic\nUBUNTU_CODENAME=bionic\n",
+ "/etc/lsb-release": "DISTRIB_ID=Ubuntu\nDISTRIB_RELEASE=18.04\nDISTRIB_CODENAME=bionic\nDISTRIB_DESCRIPTION=\"Ubuntu 18.04.3 LTS\"\n",
+ "/usr/lib/os-release": "NAME=\"Ubuntu\"\nVERSION=\"18.04.3 LTS (Bionic Beaver)\"\nID=ubuntu\nID_LIKE=debian\nPRETTY_NAME=\"Ubuntu 18.04.3 LTS\"\nVERSION_ID=\"18.04\"\nHOME_URL=\"https://www.ubuntu.com/\"\nSUPPORT_URL=\"https://help.ubuntu.com/\"\nBUG_REPORT_URL=\"https://bugs.launchpad.net/ubuntu/\"\nPRIVACY_POLICY_URL=\"https://www.ubuntu.com/legal/terms-and-policies/privacy-policy\"\nVERSION_CODENAME=bionic\nUBUNTU_CODENAME=bionic\n"
+ },
+ "platform.dist": ["ubuntu", "18.04", "bionic"],
+ "result": {
+ "distribution": "Ubuntu",
+ "distribution_version": "18.04",
+ "distribution_release": "bionic",
+ "distribution_major_version": "18",
+ "os_family": "Debian"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/fixtures/virtuozzo_7.3.json b/test/units/module_utils/facts/system/distribution/fixtures/virtuozzo_7.3.json
new file mode 100644
index 00000000..d9c2f474
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/fixtures/virtuozzo_7.3.json
@@ -0,0 +1,25 @@
+{
+ "name": "Virtuozzo 7.3",
+ "platform.dist": ["redhat", "7.3", ""],
+ "distro": {
+ "codename": "",
+ "id": "virtuozzo",
+ "name": "Virtuozzo Linux",
+ "version": "7.3",
+ "version_best": "7.3",
+ "os_release_info": {},
+ "lsb_release_info": {}
+ },
+ "input": {
+ "/etc/redhat-release": "Virtuozzo Linux release 7.3\n",
+ "/etc/os-release": "NAME=\"Virtuozzo\"\nVERSION=\"7.0.3\"\nID=\"virtuozzo\"\nID_LIKE=\"rhel fedora\"\nVERSION_ID=\"7\"\nPRETTY_NAME=\"Virtuozzo release 7.0.3\"\nANSI_COLOR=\"0;31\"\nCPE_NAME=\"cpe:/o:virtuozzoproject:vz:7\"\nHOME_URL=\"http://www.virtuozzo.com\"\nBUG_REPORT_URL=\"https://bugs.openvz.org/\"\n",
+ "/etc/system-release": "Virtuozzo release 7.0.3 (640)\n"
+ },
+ "result": {
+ "distribution_release": "NA",
+ "distribution": "Virtuozzo",
+ "distribution_major_version": "7",
+ "os_family": "RedHat",
+ "distribution_version": "7.3"
+ }
+} \ No newline at end of file
diff --git a/test/units/module_utils/facts/system/distribution/test_distribution_sles4sap.py b/test/units/module_utils/facts/system/distribution/test_distribution_sles4sap.py
new file mode 100644
index 00000000..ab465eae
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/test_distribution_sles4sap.py
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible.module_utils.facts.system.distribution import DistributionFiles
+
+
+@pytest.mark.parametrize('realpath', ('SUSE_SLES_SAP.prod', 'SLES_SAP.prod'))
+def test_distribution_sles4sap_suse_sles_sap(mock_module, mocker, realpath):
+ mocker.patch('os.path.islink', return_value=True)
+ mocker.patch('os.path.realpath', return_value='/etc/products.d/' + realpath)
+
+ test_input = {
+ 'name': 'SUSE',
+ 'path': '',
+ 'data': 'suse',
+ 'collected_facts': None,
+ }
+
+ test_result = (
+ True,
+ {
+ 'distribution': 'SLES_SAP',
+ }
+ )
+
+ distribution = DistributionFiles(module=mock_module())
+ assert test_result == distribution.parse_distribution_file_SUSE(**test_input)
diff --git a/test/units/module_utils/facts/system/distribution/test_distribution_version.py b/test/units/module_utils/facts/system/distribution/test_distribution_version.py
new file mode 100644
index 00000000..091ec8ad
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/test_distribution_version.py
@@ -0,0 +1,143 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import glob
+import json
+import os
+import pytest
+from itertools import product
+
+from ansible.module_utils.six.moves import builtins
+
+# the module we are actually testing (sort of)
+from ansible.module_utils.facts.system.distribution import DistributionFactCollector
+
+# to generate the testcase data, you can use the script gen_distribution_version_testcase.py in hacking/tests
+TESTSETS = []
+
+for datafile in glob.glob(os.path.join(os.path.dirname(__file__), 'fixtures/*.json')):
+ with open(os.path.join(os.path.dirname(__file__), '%s' % datafile)) as f:
+ TESTSETS.append(json.loads(f.read()))
+
+
+@pytest.mark.parametrize("stdin, testcase", product([{}], TESTSETS), ids=lambda x: x.get('name'), indirect=['stdin'])
+def test_distribution_version(am, mocker, testcase):
+ """tests the distribution parsing code of the Facts class
+
+ testsets have
+ * a name (for output/debugging only)
+ * input files that are faked
+ * those should be complete and also include "irrelevant" files that might be mistaken as coming from other distributions
+ * all files that are not listed here are assumed to not exist at all
+ * the output of ansible.module_utils.distro.linux_distribution() [called platform.dist() for historical reasons]
+ * results for the ansible variables distribution* and os_family
+
+ """
+
+ # prepare some mock functions to get the testdata in
+ def mock_get_file_content(fname, default=None, strip=True):
+ """give fake content if it exists, otherwise pretend the file is empty"""
+ data = default
+ if fname in testcase['input']:
+ # for debugging
+ print('faked %s for %s' % (fname, testcase['name']))
+ data = testcase['input'][fname].strip()
+ if strip and data is not None:
+ data = data.strip()
+ return data
+
+ def mock_get_uname(am, flags):
+ if '-v' in flags:
+ return testcase.get('uname_v', None)
+ elif '-r' in flags:
+ return testcase.get('uname_r', None)
+ else:
+ return None
+
+ def mock_file_exists(fname, allow_empty=False):
+ if fname not in testcase['input']:
+ return False
+
+ if allow_empty:
+ return True
+ return bool(len(testcase['input'][fname]))
+
+ def mock_platform_system():
+ return testcase.get('platform.system', 'Linux')
+
+ def mock_platform_release():
+ return testcase.get('platform.release', '')
+
+ def mock_platform_version():
+ return testcase.get('platform.version', '')
+
+ def mock_distro_name():
+ return testcase['distro']['name']
+
+ def mock_distro_id():
+ return testcase['distro']['id']
+
+ def mock_distro_version(best=False):
+ if best:
+ return testcase['distro']['version_best']
+ return testcase['distro']['version']
+
+ def mock_distro_codename():
+ return testcase['distro']['codename']
+
+ def mock_distro_os_release_info():
+ return testcase['distro']['os_release_info']
+
+ def mock_distro_lsb_release_info():
+ return testcase['distro']['lsb_release_info']
+
+ def mock_open(filename, mode='r'):
+ if filename in testcase['input']:
+ file_object = mocker.mock_open(read_data=testcase['input'][filename]).return_value
+ file_object.__iter__.return_value = testcase['input'][filename].splitlines(True)
+ else:
+ file_object = real_open(filename, mode)
+ return file_object
+
+ def mock_os_path_is_file(filename):
+ if filename in testcase['input']:
+ return True
+ return False
+
+ mocker.patch('ansible.module_utils.facts.system.distribution.get_file_content', mock_get_file_content)
+ mocker.patch('ansible.module_utils.facts.system.distribution.get_uname', mock_get_uname)
+ mocker.patch('ansible.module_utils.facts.system.distribution._file_exists', mock_file_exists)
+ mocker.patch('ansible.module_utils.distro.name', mock_distro_name)
+ mocker.patch('ansible.module_utils.distro.id', mock_distro_id)
+ mocker.patch('ansible.module_utils.distro.version', mock_distro_version)
+ mocker.patch('ansible.module_utils.distro.codename', mock_distro_codename)
+ mocker.patch(
+ 'ansible.module_utils.common.sys_info.distro.os_release_info',
+ mock_distro_os_release_info)
+ mocker.patch(
+ 'ansible.module_utils.common.sys_info.distro.lsb_release_info',
+ mock_distro_lsb_release_info)
+ mocker.patch('os.path.isfile', mock_os_path_is_file)
+ mocker.patch('platform.system', mock_platform_system)
+ mocker.patch('platform.release', mock_platform_release)
+ mocker.patch('platform.version', mock_platform_version)
+
+ real_open = builtins.open
+ mocker.patch.object(builtins, 'open', new=mock_open)
+
+ # run Facts()
+ distro_collector = DistributionFactCollector()
+ generated_facts = distro_collector.collect(am)
+
+ # compare with the expected output
+
+ # testcase['result'] has a list of variables and values it expects Facts() to set
+ for key, val in testcase['result'].items():
+ assert key in generated_facts
+ msg = 'Comparing value of %s on %s, should: %s, is: %s' %\
+ (key, testcase['name'], val, generated_facts[key])
+ assert generated_facts[key] == val, msg
diff --git a/test/units/module_utils/facts/system/distribution/test_parse_distribution_file_ClearLinux.py b/test/units/module_utils/facts/system/distribution/test_parse_distribution_file_ClearLinux.py
new file mode 100644
index 00000000..c0957566
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/test_parse_distribution_file_ClearLinux.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import os
+import pytest
+
+from ansible.module_utils.facts.system.distribution import DistributionFiles
+
+
+@pytest.fixture
+def test_input():
+ return {
+ 'name': 'Clearlinux',
+ 'path': '/usr/lib/os-release',
+ 'collected_facts': None,
+ }
+
+
+def test_parse_distribution_file_clear_linux(mock_module, test_input):
+ test_input['data'] = open(os.path.join(os.path.dirname(__file__), '../../fixtures/distribution_files/ClearLinux')).read()
+
+ result = (
+ True,
+ {
+ 'distribution': 'Clear Linux OS',
+ 'distribution_major_version': '28120',
+ 'distribution_release': 'clear-linux-os',
+ 'distribution_version': '28120'
+ }
+ )
+
+ distribution = DistributionFiles(module=mock_module())
+ assert result == distribution.parse_distribution_file_ClearLinux(**test_input)
+
+
+@pytest.mark.parametrize('distro_file', ('CoreOS', 'LinuxMint'))
+def test_parse_distribution_file_clear_linux_no_match(mock_module, distro_file, test_input):
+ """
+ Test against data from Linux Mint and CoreOS to ensure we do not get a reported
+ match from parse_distribution_file_ClearLinux()
+ """
+ test_input['data'] = open(os.path.join(os.path.dirname(__file__), '../../fixtures/distribution_files', distro_file)).read()
+
+ result = (False, {})
+
+ distribution = DistributionFiles(module=mock_module())
+ assert result == distribution.parse_distribution_file_ClearLinux(**test_input)
diff --git a/test/units/module_utils/facts/system/distribution/test_parse_distribution_file_Slackware.py b/test/units/module_utils/facts/system/distribution/test_parse_distribution_file_Slackware.py
new file mode 100644
index 00000000..53fd4ea1
--- /dev/null
+++ b/test/units/module_utils/facts/system/distribution/test_parse_distribution_file_Slackware.py
@@ -0,0 +1,37 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import os
+import pytest
+
+from ansible.module_utils.facts.system.distribution import DistributionFiles
+
+
+@pytest.mark.parametrize(
+ ('distro_file', 'expected_version'),
+ (
+ ('Slackware', '14.1'),
+ ('SlackwareCurrent', '14.2+'),
+ )
+)
+def test_parse_distribution_file_slackware(mock_module, distro_file, expected_version):
+ test_input = {
+ 'name': 'Slackware',
+ 'data': open(os.path.join(os.path.dirname(__file__), '../../fixtures/distribution_files', distro_file)).read(),
+ 'path': '/etc/os-release',
+ 'collected_facts': None,
+ }
+
+ result = (
+ True,
+ {
+ 'distribution': 'Slackware',
+ 'distribution_version': expected_version
+ }
+ )
+ distribution = DistributionFiles(module=mock_module())
+ assert result == distribution.parse_distribution_file_Slackware(**test_input)
diff --git a/test/units/module_utils/facts/system/test_cmdline.py b/test/units/module_utils/facts/system/test_cmdline.py
new file mode 100644
index 00000000..59cfd118
--- /dev/null
+++ b/test/units/module_utils/facts/system/test_cmdline.py
@@ -0,0 +1,67 @@
+# unit tests for ansible system cmdline fact collectors
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+from ansible.module_utils.facts.system.cmdline import CmdLineFactCollector
+
+test_data = [
+ (
+ "crashkernel=auto rd.lvm.lv=fedora_test-elementary-os/root rd.lvm.lv=fedora_test-elementary-os/swap rhgb quiet",
+ {
+ 'crashkernel': 'auto',
+ 'quiet': True,
+ 'rd.lvm.lv': [
+ 'fedora_test-elementary-os/root',
+ 'fedora_test-elementary-os/swap',
+ ],
+ 'rhgb': True
+ }
+ ),
+ (
+ "root=/dev/mapper/vg_ssd-root ro rd.lvm.lv=fedora_xenon/root rd.lvm.lv=fedora_xenon/swap rhgb quiet "
+ "resume=/dev/mapper/fedora_xenon-swap crashkernel=128M zswap.enabled=1",
+ {
+ 'crashkernel': '128M',
+ 'quiet': True,
+ 'rd.lvm.lv': [
+ 'fedora_xenon/root',
+ 'fedora_xenon/swap'
+ ],
+ 'resume': '/dev/mapper/fedora_xenon-swap',
+ 'rhgb': True,
+ 'ro': True,
+ 'root': '/dev/mapper/vg_ssd-root',
+ 'zswap.enabled': '1'
+ }
+ ),
+ (
+ "rhgb",
+ {
+ "rhgb": True
+ }
+ ),
+ (
+ "root=/dev/mapper/vg_ssd-root",
+ {
+ 'root': '/dev/mapper/vg_ssd-root',
+ }
+ ),
+ (
+ "",
+ {},
+ )
+]
+
+test_ids = ['lvm_1', 'lvm_2', 'single_without_equal_sign', 'single_with_equal_sign', 'blank_cmdline']
+
+
+@pytest.mark.parametrize("cmdline, cmdline_dict", test_data, ids=test_ids)
+def test_cmd_line_factor(cmdline, cmdline_dict):
+ cmdline_facter = CmdLineFactCollector()
+ parsed_cmdline = cmdline_facter._parse_proc_cmdline_facts(data=cmdline)
+ assert parsed_cmdline == cmdline_dict
diff --git a/test/units/module_utils/facts/system/test_lsb.py b/test/units/module_utils/facts/system/test_lsb.py
new file mode 100644
index 00000000..e2ed2ec0
--- /dev/null
+++ b/test/units/module_utils/facts/system/test_lsb.py
@@ -0,0 +1,108 @@
+# unit tests for ansible system lsb fact collectors
+# -*- coding: utf-8 -*-
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat.mock import Mock, patch
+
+from .. base import BaseFactsTest
+
+from ansible.module_utils.facts.system.lsb import LSBFactCollector
+
+
+lsb_release_a_fedora_output = '''
+LSB Version: :core-4.1-amd64:core-4.1-noarch:cxx-4.1-amd64:cxx-4.1-noarch:desktop-4.1-amd64:desktop-4.1-noarch:languages-4.1-amd64:languages-4.1-noarch:printing-4.1-amd64:printing-4.1-noarch
+Distributor ID: Fedora
+Description: Fedora release 25 (Twenty Five)
+Release: 25
+Codename: TwentyFive
+''' # noqa
+
+# FIXME: a
+etc_lsb_release_ubuntu14 = '''DISTRIB_ID=Ubuntu
+DISTRIB_RELEASE=14.04
+DISTRIB_CODENAME=trusty
+DISTRIB_DESCRIPTION="Ubuntu 14.04.3 LTS"
+'''
+etc_lsb_release_no_decimal = '''DISTRIB_ID=AwesomeOS
+DISTRIB_RELEASE=11
+DISTRIB_CODENAME=stonehenge
+DISTRIB_DESCRIPTION="AwesomeÖS 11"
+'''
+
+
+class TestLSBFacts(BaseFactsTest):
+ __test__ = True
+ gather_subset = ['!all', 'lsb']
+ valid_subsets = ['lsb']
+ fact_namespace = 'ansible_lsb'
+ collector_class = LSBFactCollector
+
+ def _mock_module(self):
+ mock_module = Mock()
+ mock_module.params = {'gather_subset': self.gather_subset,
+ 'gather_timeout': 10,
+ 'filter': '*'}
+ mock_module.get_bin_path = Mock(return_value='/usr/bin/lsb_release')
+ mock_module.run_command = Mock(return_value=(0, lsb_release_a_fedora_output, ''))
+ return mock_module
+
+ def test_lsb_release_bin(self):
+ module = self._mock_module()
+ fact_collector = self.collector_class()
+ facts_dict = fact_collector.collect(module=module)
+
+ self.assertIsInstance(facts_dict, dict)
+ self.assertEqual(facts_dict['lsb']['release'], '25')
+ self.assertEqual(facts_dict['lsb']['id'], 'Fedora')
+ self.assertEqual(facts_dict['lsb']['description'], 'Fedora release 25 (Twenty Five)')
+ self.assertEqual(facts_dict['lsb']['codename'], 'TwentyFive')
+ self.assertEqual(facts_dict['lsb']['major_release'], '25')
+
+ def test_etc_lsb_release(self):
+ module = self._mock_module()
+ module.get_bin_path = Mock(return_value=None)
+ with patch('ansible.module_utils.facts.system.lsb.os.path.exists',
+ return_value=True):
+ with patch('ansible.module_utils.facts.system.lsb.get_file_lines',
+ return_value=etc_lsb_release_ubuntu14.splitlines()):
+ fact_collector = self.collector_class()
+ facts_dict = fact_collector.collect(module=module)
+
+ self.assertIsInstance(facts_dict, dict)
+ self.assertEqual(facts_dict['lsb']['release'], '14.04')
+ self.assertEqual(facts_dict['lsb']['id'], 'Ubuntu')
+ self.assertEqual(facts_dict['lsb']['description'], 'Ubuntu 14.04.3 LTS')
+ self.assertEqual(facts_dict['lsb']['codename'], 'trusty')
+
+ def test_etc_lsb_release_no_decimal_release(self):
+ module = self._mock_module()
+ module.get_bin_path = Mock(return_value=None)
+ with patch('ansible.module_utils.facts.system.lsb.os.path.exists',
+ return_value=True):
+ with patch('ansible.module_utils.facts.system.lsb.get_file_lines',
+ return_value=etc_lsb_release_no_decimal.splitlines()):
+ fact_collector = self.collector_class()
+ facts_dict = fact_collector.collect(module=module)
+
+ self.assertIsInstance(facts_dict, dict)
+ self.assertEqual(facts_dict['lsb']['release'], '11')
+ self.assertEqual(facts_dict['lsb']['id'], 'AwesomeOS')
+ self.assertEqual(facts_dict['lsb']['description'], 'AwesomeÖS 11')
+ self.assertEqual(facts_dict['lsb']['codename'], 'stonehenge')
diff --git a/test/units/module_utils/facts/test_ansible_collector.py b/test/units/module_utils/facts/test_ansible_collector.py
new file mode 100644
index 00000000..6c236486
--- /dev/null
+++ b/test/units/module_utils/facts/test_ansible_collector.py
@@ -0,0 +1,504 @@
+# -*- coding: utf-8 -*-
+#
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# for testing
+from units.compat import unittest
+from units.compat.mock import Mock, patch
+
+from ansible.module_utils.facts import collector
+from ansible.module_utils.facts import ansible_collector
+from ansible.module_utils.facts import namespace
+
+from ansible.module_utils.facts.other.facter import FacterFactCollector
+from ansible.module_utils.facts.other.ohai import OhaiFactCollector
+
+from ansible.module_utils.facts.system.apparmor import ApparmorFactCollector
+from ansible.module_utils.facts.system.caps import SystemCapabilitiesFactCollector
+from ansible.module_utils.facts.system.date_time import DateTimeFactCollector
+from ansible.module_utils.facts.system.env import EnvFactCollector
+from ansible.module_utils.facts.system.distribution import DistributionFactCollector
+from ansible.module_utils.facts.system.dns import DnsFactCollector
+from ansible.module_utils.facts.system.fips import FipsFactCollector
+from ansible.module_utils.facts.system.local import LocalFactCollector
+from ansible.module_utils.facts.system.lsb import LSBFactCollector
+from ansible.module_utils.facts.system.pkg_mgr import PkgMgrFactCollector, OpenBSDPkgMgrFactCollector
+from ansible.module_utils.facts.system.platform import PlatformFactCollector
+from ansible.module_utils.facts.system.python import PythonFactCollector
+from ansible.module_utils.facts.system.selinux import SelinuxFactCollector
+from ansible.module_utils.facts.system.service_mgr import ServiceMgrFactCollector
+from ansible.module_utils.facts.system.user import UserFactCollector
+
+# from ansible.module_utils.facts.hardware.base import HardwareCollector
+from ansible.module_utils.facts.network.base import NetworkCollector
+from ansible.module_utils.facts.virtual.base import VirtualCollector
+
+ALL_COLLECTOR_CLASSES = \
+ [PlatformFactCollector,
+ DistributionFactCollector,
+ SelinuxFactCollector,
+ ApparmorFactCollector,
+ SystemCapabilitiesFactCollector,
+ FipsFactCollector,
+ PkgMgrFactCollector,
+ OpenBSDPkgMgrFactCollector,
+ ServiceMgrFactCollector,
+ LSBFactCollector,
+ DateTimeFactCollector,
+ UserFactCollector,
+ LocalFactCollector,
+ EnvFactCollector,
+ DnsFactCollector,
+ PythonFactCollector,
+ # FIXME: re-enable when hardware doesnt Hardware() doesnt munge self.facts
+ # HardwareCollector
+ NetworkCollector,
+ VirtualCollector,
+ OhaiFactCollector,
+ FacterFactCollector]
+
+
+def mock_module(gather_subset=None,
+ filter=None):
+ if gather_subset is None:
+ gather_subset = ['all', '!facter', '!ohai']
+ if filter is None:
+ filter = '*'
+ mock_module = Mock()
+ mock_module.params = {'gather_subset': gather_subset,
+ 'gather_timeout': 5,
+ 'filter': filter}
+ mock_module.get_bin_path = Mock(return_value=None)
+ return mock_module
+
+
+def _collectors(module,
+ all_collector_classes=None,
+ minimal_gather_subset=None):
+ gather_subset = module.params.get('gather_subset')
+ if all_collector_classes is None:
+ all_collector_classes = ALL_COLLECTOR_CLASSES
+ if minimal_gather_subset is None:
+ minimal_gather_subset = frozenset([])
+
+ collector_classes = \
+ collector.collector_classes_from_gather_subset(all_collector_classes=all_collector_classes,
+ minimal_gather_subset=minimal_gather_subset,
+ gather_subset=gather_subset)
+
+ collectors = []
+ for collector_class in collector_classes:
+ collector_obj = collector_class()
+ collectors.append(collector_obj)
+
+ # Add a collector that knows what gather_subset we used so it it can provide a fact
+ collector_meta_data_collector = \
+ ansible_collector.CollectorMetaDataCollector(gather_subset=gather_subset,
+ module_setup=True)
+ collectors.append(collector_meta_data_collector)
+
+ return collectors
+
+
+ns = namespace.PrefixFactNamespace('ansible_facts', 'ansible_')
+
+
+# FIXME: this is brute force, but hopefully enough to get some refactoring to make facts testable
+class TestInPlace(unittest.TestCase):
+ def _mock_module(self, gather_subset=None):
+ return mock_module(gather_subset=gather_subset)
+
+ def _collectors(self, module,
+ all_collector_classes=None,
+ minimal_gather_subset=None):
+ return _collectors(module=module,
+ all_collector_classes=all_collector_classes,
+ minimal_gather_subset=minimal_gather_subset)
+
+ def test(self):
+ gather_subset = ['all']
+ mock_module = self._mock_module(gather_subset=gather_subset)
+ all_collector_classes = [EnvFactCollector]
+ collectors = self._collectors(mock_module,
+ all_collector_classes=all_collector_classes)
+
+ fact_collector = \
+ ansible_collector.AnsibleFactCollector(collectors=collectors,
+ namespace=ns)
+
+ res = fact_collector.collect(module=mock_module)
+ self.assertIsInstance(res, dict)
+ self.assertIn('env', res)
+ self.assertIn('gather_subset', res)
+ self.assertEqual(res['gather_subset'], ['all'])
+
+ def test1(self):
+ gather_subset = ['all']
+ mock_module = self._mock_module(gather_subset=gather_subset)
+ collectors = self._collectors(mock_module)
+
+ fact_collector = \
+ ansible_collector.AnsibleFactCollector(collectors=collectors,
+ namespace=ns)
+
+ res = fact_collector.collect(module=mock_module)
+ self.assertIsInstance(res, dict)
+ # just assert it's not almost empty
+ # with run_command and get_file_content mock, many facts are empty, like network
+ self.assertGreater(len(res), 20)
+
+ def test_empty_all_collector_classes(self):
+ mock_module = self._mock_module()
+ all_collector_classes = []
+
+ collectors = self._collectors(mock_module,
+ all_collector_classes=all_collector_classes)
+
+ fact_collector = \
+ ansible_collector.AnsibleFactCollector(collectors=collectors,
+ namespace=ns)
+
+ res = fact_collector.collect()
+ self.assertIsInstance(res, dict)
+ # just assert it's not almost empty
+ self.assertLess(len(res), 3)
+
+# def test_facts_class(self):
+# mock_module = self._mock_module()
+# Facts(mock_module)
+
+# def test_facts_class_load_on_init_false(self):
+# mock_module = self._mock_module()
+# Facts(mock_module, load_on_init=False)
+# # FIXME: assert something
+
+
+class TestCollectedFacts(unittest.TestCase):
+ gather_subset = ['all', '!facter', '!ohai']
+ min_fact_count = 30
+ max_fact_count = 1000
+
+ # TODO: add ansible_cmdline, ansible_*_pubkey* back when TempFactCollector goes away
+ expected_facts = ['date_time',
+ 'user_id', 'distribution',
+ 'gather_subset', 'module_setup',
+ 'env']
+ not_expected_facts = ['facter', 'ohai']
+
+ collected_facts = {}
+
+ def _mock_module(self, gather_subset=None):
+ return mock_module(gather_subset=self.gather_subset)
+
+ @patch('platform.system', return_value='Linux')
+ @patch('ansible.module_utils.facts.system.service_mgr.get_file_content', return_value='systemd')
+ def setUp(self, mock_gfc, mock_ps):
+ mock_module = self._mock_module()
+ collectors = self._collectors(mock_module)
+
+ fact_collector = \
+ ansible_collector.AnsibleFactCollector(collectors=collectors,
+ namespace=ns)
+ self.facts = fact_collector.collect(module=mock_module,
+ collected_facts=self.collected_facts)
+
+ def _collectors(self, module,
+ all_collector_classes=None,
+ minimal_gather_subset=None):
+ return _collectors(module=module,
+ all_collector_classes=all_collector_classes,
+ minimal_gather_subset=minimal_gather_subset)
+
+ def test_basics(self):
+ self._assert_basics(self.facts)
+
+ def test_expected_facts(self):
+ self._assert_expected_facts(self.facts)
+
+ def test_not_expected_facts(self):
+ self._assert_not_expected_facts(self.facts)
+
+ def _assert_basics(self, facts):
+ self.assertIsInstance(facts, dict)
+ # just assert it's not almost empty
+ self.assertGreaterEqual(len(facts), self.min_fact_count)
+ # and that is not huge number of keys
+ self.assertLess(len(facts), self.max_fact_count)
+
+ # everything starts with ansible_ namespace
+ def _assert_ansible_namespace(self, facts):
+
+ # FIXME: kluge for non-namespace fact
+ facts.pop('module_setup', None)
+ facts.pop('gather_subset', None)
+
+ for fact_key in facts:
+ self.assertTrue(fact_key.startswith('ansible_'),
+ 'The fact name "%s" does not startwith "ansible_"' % fact_key)
+
+ def _assert_expected_facts(self, facts):
+
+ facts_keys = sorted(facts.keys())
+ for expected_fact in self.expected_facts:
+ self.assertIn(expected_fact, facts_keys)
+
+ def _assert_not_expected_facts(self, facts):
+
+ facts_keys = sorted(facts.keys())
+ for not_expected_fact in self.not_expected_facts:
+ self.assertNotIn(not_expected_fact, facts_keys)
+
+
+class ProvidesOtherFactCollector(collector.BaseFactCollector):
+ name = 'provides_something'
+ _fact_ids = set(['needed_fact'])
+
+ def collect(self, module=None, collected_facts=None):
+ return {'needed_fact': 'THE_NEEDED_FACT_VALUE'}
+
+
+class RequiresOtherFactCollector(collector.BaseFactCollector):
+ name = 'requires_something'
+
+ def collect(self, module=None, collected_facts=None):
+ collected_facts = collected_facts or {}
+ fact_dict = {}
+ fact_dict['needed_fact'] = collected_facts['needed_fact']
+ fact_dict['compound_fact'] = "compound-%s" % collected_facts['needed_fact']
+ return fact_dict
+
+
+class ConCatFactCollector(collector.BaseFactCollector):
+ name = 'concat_collected'
+
+ def collect(self, module=None, collected_facts=None):
+ collected_facts = collected_facts or {}
+ fact_dict = {}
+ con_cat_list = []
+ for key, value in collected_facts.items():
+ con_cat_list.append(value)
+
+ fact_dict['concat_fact'] = '-'.join(con_cat_list)
+ return fact_dict
+
+
+class TestCollectorDepsWithFilter(unittest.TestCase):
+ gather_subset = ['all', '!facter', '!ohai']
+
+ def _mock_module(self, gather_subset=None, filter=None):
+ return mock_module(gather_subset=self.gather_subset,
+ filter=filter)
+
+ def setUp(self):
+ self.mock_module = self._mock_module()
+ self.collectors = self._collectors(mock_module)
+
+ def _collectors(self, module,
+ all_collector_classes=None,
+ minimal_gather_subset=None):
+ return [ProvidesOtherFactCollector(),
+ RequiresOtherFactCollector()]
+
+ def test_no_filter(self):
+ _mock_module = mock_module(gather_subset=['all', '!facter', '!ohai'])
+
+ facts_dict = self._collect(_mock_module)
+
+ expected = {'needed_fact': 'THE_NEEDED_FACT_VALUE',
+ 'compound_fact': 'compound-THE_NEEDED_FACT_VALUE'}
+
+ self.assertEqual(expected, facts_dict)
+
+ def test_with_filter_on_compound_fact(self):
+ _mock_module = mock_module(gather_subset=['all', '!facter', '!ohai'],
+ filter='compound_fact')
+
+ facts_dict = self._collect(_mock_module)
+
+ expected = {'compound_fact': 'compound-THE_NEEDED_FACT_VALUE'}
+
+ self.assertEqual(expected, facts_dict)
+
+ def test_with_filter_on_needed_fact(self):
+ _mock_module = mock_module(gather_subset=['all', '!facter', '!ohai'],
+ filter='needed_fact')
+
+ facts_dict = self._collect(_mock_module)
+
+ expected = {'needed_fact': 'THE_NEEDED_FACT_VALUE'}
+
+ self.assertEqual(expected, facts_dict)
+
+ def test_with_filter_on_compound_gather_compound(self):
+ _mock_module = mock_module(gather_subset=['!all', '!any', 'compound_fact'],
+ filter='compound_fact')
+
+ facts_dict = self._collect(_mock_module)
+
+ expected = {'compound_fact': 'compound-THE_NEEDED_FACT_VALUE'}
+
+ self.assertEqual(expected, facts_dict)
+
+ def test_with_filter_no_match(self):
+ _mock_module = mock_module(gather_subset=['all', '!facter', '!ohai'],
+ filter='ansible_this_doesnt_exist')
+
+ facts_dict = self._collect(_mock_module)
+
+ expected = {}
+ self.assertEqual(expected, facts_dict)
+
+ def test_concat_collector(self):
+ _mock_module = mock_module(gather_subset=['all', '!facter', '!ohai'])
+
+ _collectors = self._collectors(_mock_module)
+ _collectors.append(ConCatFactCollector())
+
+ fact_collector = \
+ ansible_collector.AnsibleFactCollector(collectors=_collectors,
+ namespace=ns,
+ filter_spec=_mock_module.params['filter'])
+
+ collected_facts = {}
+ facts_dict = fact_collector.collect(module=_mock_module,
+ collected_facts=collected_facts)
+ self.assertIn('concat_fact', facts_dict)
+ self.assertTrue('THE_NEEDED_FACT_VALUE' in facts_dict['concat_fact'])
+
+ def test_concat_collector_with_filter_on_concat(self):
+ _mock_module = mock_module(gather_subset=['all', '!facter', '!ohai'],
+ filter='concat_fact')
+
+ _collectors = self._collectors(_mock_module)
+ _collectors.append(ConCatFactCollector())
+
+ fact_collector = \
+ ansible_collector.AnsibleFactCollector(collectors=_collectors,
+ namespace=ns,
+ filter_spec=_mock_module.params['filter'])
+
+ collected_facts = {}
+ facts_dict = fact_collector.collect(module=_mock_module,
+ collected_facts=collected_facts)
+ self.assertIn('concat_fact', facts_dict)
+ self.assertTrue('THE_NEEDED_FACT_VALUE' in facts_dict['concat_fact'])
+ self.assertTrue('compound' in facts_dict['concat_fact'])
+
+ def _collect(self, _mock_module, collected_facts=None):
+ _collectors = self._collectors(_mock_module)
+
+ fact_collector = \
+ ansible_collector.AnsibleFactCollector(collectors=_collectors,
+ namespace=ns,
+ filter_spec=_mock_module.params['filter'])
+ facts_dict = fact_collector.collect(module=_mock_module,
+ collected_facts=collected_facts)
+ return facts_dict
+
+
+class ExceptionThrowingCollector(collector.BaseFactCollector):
+ def collect(self, module=None, collected_facts=None):
+ raise Exception('A collector failed')
+
+
+class TestExceptionCollectedFacts(TestCollectedFacts):
+
+ def _collectors(self, module,
+ all_collector_classes=None,
+ minimal_gather_subset=None):
+ collectors = _collectors(module=module,
+ all_collector_classes=all_collector_classes,
+ minimal_gather_subset=minimal_gather_subset)
+
+ c = [ExceptionThrowingCollector()] + collectors
+ return c
+
+
+class TestOnlyExceptionCollector(TestCollectedFacts):
+ expected_facts = []
+ min_fact_count = 0
+
+ def _collectors(self, module,
+ all_collector_classes=None,
+ minimal_gather_subset=None):
+ return [ExceptionThrowingCollector()]
+
+
+class TestMinimalCollectedFacts(TestCollectedFacts):
+ gather_subset = ['!all']
+ min_fact_count = 1
+ max_fact_count = 10
+ expected_facts = ['gather_subset',
+ 'module_setup']
+ not_expected_facts = ['lsb']
+
+
+class TestFacterCollectedFacts(TestCollectedFacts):
+ gather_subset = ['!all', 'facter']
+ min_fact_count = 1
+ max_fact_count = 10
+ expected_facts = ['gather_subset',
+ 'module_setup']
+ not_expected_facts = ['lsb']
+
+
+class TestOhaiCollectedFacts(TestCollectedFacts):
+ gather_subset = ['!all', 'ohai']
+ min_fact_count = 1
+ max_fact_count = 10
+ expected_facts = ['gather_subset',
+ 'module_setup']
+ not_expected_facts = ['lsb']
+
+
+class TestPkgMgrFacts(TestCollectedFacts):
+ gather_subset = ['pkg_mgr']
+ min_fact_count = 1
+ max_fact_count = 20
+ expected_facts = ['gather_subset',
+ 'module_setup',
+ 'pkg_mgr']
+ collected_facts = {
+ "ansible_distribution": "Fedora",
+ "ansible_distribution_major_version": "28",
+ "ansible_os_family": "RedHat"
+ }
+
+
+class TestOpenBSDPkgMgrFacts(TestPkgMgrFacts):
+ def test_is_openbsd_pkg(self):
+ self.assertIn('pkg_mgr', self.facts)
+ self.assertEqual(self.facts['pkg_mgr'], 'openbsd_pkg')
+
+ def setUp(self):
+ self.patcher = patch('platform.system')
+ mock_platform = self.patcher.start()
+ mock_platform.return_value = 'OpenBSD'
+
+ mock_module = self._mock_module()
+ collectors = self._collectors(mock_module)
+
+ fact_collector = \
+ ansible_collector.AnsibleFactCollector(collectors=collectors,
+ namespace=ns)
+ self.facts = fact_collector.collect(module=mock_module)
+
+ def tearDown(self):
+ self.patcher.stop()
diff --git a/test/units/module_utils/facts/test_collector.py b/test/units/module_utils/facts/test_collector.py
new file mode 100644
index 00000000..9eab89f7
--- /dev/null
+++ b/test/units/module_utils/facts/test_collector.py
@@ -0,0 +1,563 @@
+# This file is part of Ansible
+# -*- coding: utf-8 -*-
+#
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from collections import defaultdict
+import pprint
+
+# for testing
+from units.compat import unittest
+
+from ansible.module_utils.facts import collector
+
+from ansible.module_utils.facts import default_collectors
+
+
+class TestFindCollectorsForPlatform(unittest.TestCase):
+ def test(self):
+ compat_platforms = [{'system': 'Generic'}]
+ res = collector.find_collectors_for_platform(default_collectors.collectors,
+ compat_platforms)
+ for coll_class in res:
+ self.assertIn(coll_class._platform, ('Generic'))
+
+ def test_linux(self):
+ compat_platforms = [{'system': 'Linux'}]
+ res = collector.find_collectors_for_platform(default_collectors.collectors,
+ compat_platforms)
+ for coll_class in res:
+ self.assertIn(coll_class._platform, ('Linux'))
+
+ def test_linux_or_generic(self):
+ compat_platforms = [{'system': 'Generic'}, {'system': 'Linux'}]
+ res = collector.find_collectors_for_platform(default_collectors.collectors,
+ compat_platforms)
+ for coll_class in res:
+ self.assertIn(coll_class._platform, ('Generic', 'Linux'))
+
+
+class TestSelectCollectorNames(unittest.TestCase):
+
+ def _assert_equal_detail(self, obj1, obj2, msg=None):
+ msg = 'objects are not equal\n%s\n\n!=\n\n%s' % (pprint.pformat(obj1), pprint.pformat(obj2))
+ return self.assertEqual(obj1, obj2, msg)
+
+ def test(self):
+ collector_names = ['distribution', 'all_ipv4_addresses',
+ 'local', 'pkg_mgr']
+ all_fact_subsets = self._all_fact_subsets()
+ res = collector.select_collector_classes(collector_names,
+ all_fact_subsets)
+
+ expected = [default_collectors.DistributionFactCollector,
+ default_collectors.PkgMgrFactCollector]
+
+ self._assert_equal_detail(res, expected)
+
+ def test_default_collectors(self):
+ platform_info = {'system': 'Generic'}
+ compat_platforms = [platform_info]
+ collectors_for_platform = collector.find_collectors_for_platform(default_collectors.collectors,
+ compat_platforms)
+
+ all_fact_subsets, aliases_map = collector.build_fact_id_to_collector_map(collectors_for_platform)
+
+ all_valid_subsets = frozenset(all_fact_subsets.keys())
+ collector_names = collector.get_collector_names(valid_subsets=all_valid_subsets,
+ aliases_map=aliases_map,
+ platform_info=platform_info)
+ complete_collector_names = collector._solve_deps(collector_names, all_fact_subsets)
+
+ dep_map = collector.build_dep_data(complete_collector_names, all_fact_subsets)
+
+ ordered_deps = collector.tsort(dep_map)
+ ordered_collector_names = [x[0] for x in ordered_deps]
+
+ res = collector.select_collector_classes(ordered_collector_names,
+ all_fact_subsets)
+
+ self.assertTrue(res.index(default_collectors.ServiceMgrFactCollector) >
+ res.index(default_collectors.DistributionFactCollector),
+ res)
+ self.assertTrue(res.index(default_collectors.ServiceMgrFactCollector) >
+ res.index(default_collectors.PlatformFactCollector),
+ res)
+
+ def _all_fact_subsets(self, data=None):
+ all_fact_subsets = defaultdict(list)
+ _data = {'pkg_mgr': [default_collectors.PkgMgrFactCollector],
+ 'distribution': [default_collectors.DistributionFactCollector],
+ 'network': [default_collectors.LinuxNetworkCollector]}
+ data = data or _data
+ for key, value in data.items():
+ all_fact_subsets[key] = value
+ return all_fact_subsets
+
+
+class TestGetCollectorNames(unittest.TestCase):
+ def test_none(self):
+ res = collector.get_collector_names()
+ self.assertIsInstance(res, set)
+ self.assertEqual(res, set([]))
+
+ def test_empty_sets(self):
+ res = collector.get_collector_names(valid_subsets=frozenset([]),
+ minimal_gather_subset=frozenset([]),
+ gather_subset=[])
+ self.assertIsInstance(res, set)
+ self.assertEqual(res, set([]))
+
+ def test_empty_valid_and_min_with_all_gather_subset(self):
+ res = collector.get_collector_names(valid_subsets=frozenset([]),
+ minimal_gather_subset=frozenset([]),
+ gather_subset=['all'])
+ self.assertIsInstance(res, set)
+ self.assertEqual(res, set([]))
+
+ def test_one_valid_with_all_gather_subset(self):
+ valid_subsets = frozenset(['my_fact'])
+ res = collector.get_collector_names(valid_subsets=valid_subsets,
+ minimal_gather_subset=frozenset([]),
+ gather_subset=['all'])
+ self.assertIsInstance(res, set)
+ self.assertEqual(res, set(['my_fact']))
+
+ def _compare_res(self, gather_subset1, gather_subset2,
+ valid_subsets=None, min_subset=None):
+
+ valid_subsets = valid_subsets or frozenset()
+ minimal_gather_subset = min_subset or frozenset()
+
+ res1 = collector.get_collector_names(valid_subsets=valid_subsets,
+ minimal_gather_subset=minimal_gather_subset,
+ gather_subset=gather_subset1)
+
+ res2 = collector.get_collector_names(valid_subsets=valid_subsets,
+ minimal_gather_subset=minimal_gather_subset,
+ gather_subset=gather_subset2)
+
+ return res1, res2
+
+ def test_not_all_other_order(self):
+ valid_subsets = frozenset(['min_fact', 'something_else', 'whatever'])
+ minimal_gather_subset = frozenset(['min_fact'])
+
+ res1, res2 = self._compare_res(['!all', 'whatever'],
+ ['whatever', '!all'],
+ valid_subsets=valid_subsets,
+ min_subset=minimal_gather_subset)
+ self.assertEqual(res1, res2)
+ self.assertEqual(res1, set(['min_fact', 'whatever']))
+
+ def test_not_all_other_order_min(self):
+ valid_subsets = frozenset(['min_fact', 'something_else', 'whatever'])
+ minimal_gather_subset = frozenset(['min_fact'])
+
+ res1, res2 = self._compare_res(['!min_fact', 'whatever'],
+ ['whatever', '!min_fact'],
+ valid_subsets=valid_subsets,
+ min_subset=minimal_gather_subset)
+ self.assertEqual(res1, res2)
+ self.assertEqual(res1, set(['whatever']))
+
+ def test_one_minimal_with_all_gather_subset(self):
+ my_fact = 'my_fact'
+ valid_subsets = frozenset([my_fact])
+ minimal_gather_subset = valid_subsets
+
+ res = collector.get_collector_names(valid_subsets=valid_subsets,
+ minimal_gather_subset=minimal_gather_subset,
+ gather_subset=['all'])
+ self.assertIsInstance(res, set)
+ self.assertEqual(res, set(['my_fact']))
+
+ def test_with_all_gather_subset(self):
+ valid_subsets = frozenset(['my_fact', 'something_else', 'whatever'])
+ minimal_gather_subset = frozenset(['my_fact'])
+
+ # even with '!all', the minimal_gather_subset should be returned
+ res = collector.get_collector_names(valid_subsets=valid_subsets,
+ minimal_gather_subset=minimal_gather_subset,
+ gather_subset=['all'])
+ self.assertIsInstance(res, set)
+ self.assertEqual(res, set(['my_fact', 'something_else', 'whatever']))
+
+ def test_one_minimal_with_not_all_gather_subset(self):
+ valid_subsets = frozenset(['my_fact', 'something_else', 'whatever'])
+ minimal_gather_subset = frozenset(['my_fact'])
+
+ # even with '!all', the minimal_gather_subset should be returned
+ res = collector.get_collector_names(valid_subsets=valid_subsets,
+ minimal_gather_subset=minimal_gather_subset,
+ gather_subset=['!all'])
+ self.assertIsInstance(res, set)
+ self.assertEqual(res, set(['my_fact']))
+
+ def test_gather_subset_excludes(self):
+ valid_subsets = frozenset(['my_fact', 'something_else', 'whatever'])
+ minimal_gather_subset = frozenset(['min_fact', 'min_another'])
+
+ # even with '!all', the minimal_gather_subset should be returned
+ res = collector.get_collector_names(valid_subsets=valid_subsets,
+ minimal_gather_subset=minimal_gather_subset,
+ # gather_subset=set(['all', '!my_fact', '!whatever']))
+ # gather_subset=['all', '!my_fact', '!whatever'])
+ gather_subset=['!min_fact', '!whatever'])
+ self.assertIsInstance(res, set)
+ # min_another is in minimal_gather_subset, so always returned
+ self.assertEqual(res, set(['min_another']))
+
+ def test_gather_subset_excludes_ordering(self):
+ valid_subsets = frozenset(['my_fact', 'something_else', 'whatever'])
+ minimal_gather_subset = frozenset(['my_fact'])
+
+ res = collector.get_collector_names(valid_subsets=valid_subsets,
+ minimal_gather_subset=minimal_gather_subset,
+ gather_subset=['!all', 'whatever'])
+ self.assertIsInstance(res, set)
+ # excludes are higher precedence than includes, so !all excludes everything
+ # and then minimal_gather_subset is added. so '!all', 'other' == '!all'
+ self.assertEqual(res, set(['my_fact', 'whatever']))
+
+ def test_gather_subset_excludes_min(self):
+ valid_subsets = frozenset(['min_fact', 'something_else', 'whatever'])
+ minimal_gather_subset = frozenset(['min_fact'])
+
+ res = collector.get_collector_names(valid_subsets=valid_subsets,
+ minimal_gather_subset=minimal_gather_subset,
+ gather_subset=['whatever', '!min'])
+ self.assertIsInstance(res, set)
+ # excludes are higher precedence than includes, so !all excludes everything
+ # and then minimal_gather_subset is added. so '!all', 'other' == '!all'
+ self.assertEqual(res, set(['whatever']))
+
+ def test_gather_subset_excludes_min_and_all(self):
+ valid_subsets = frozenset(['min_fact', 'something_else', 'whatever'])
+ minimal_gather_subset = frozenset(['min_fact'])
+
+ res = collector.get_collector_names(valid_subsets=valid_subsets,
+ minimal_gather_subset=minimal_gather_subset,
+ gather_subset=['whatever', '!all', '!min'])
+ self.assertIsInstance(res, set)
+ # excludes are higher precedence than includes, so !all excludes everything
+ # and then minimal_gather_subset is added. so '!all', 'other' == '!all'
+ self.assertEqual(res, set(['whatever']))
+
+ def test_invaid_gather_subset(self):
+ valid_subsets = frozenset(['my_fact', 'something_else'])
+ minimal_gather_subset = frozenset(['my_fact'])
+
+ self.assertRaisesRegexp(TypeError,
+ r'Bad subset .* given to Ansible.*allowed\:.*all,.*my_fact.*',
+ collector.get_collector_names,
+ valid_subsets=valid_subsets,
+ minimal_gather_subset=minimal_gather_subset,
+ gather_subset=['my_fact', 'not_a_valid_gather_subset'])
+
+
+class TestFindUnresolvedRequires(unittest.TestCase):
+ def test(self):
+ names = ['network', 'virtual', 'env']
+ all_fact_subsets = {'env': [default_collectors.EnvFactCollector],
+ 'network': [default_collectors.LinuxNetworkCollector],
+ 'virtual': [default_collectors.LinuxVirtualCollector]}
+ res = collector.find_unresolved_requires(names, all_fact_subsets)
+ # pprint.pprint(res)
+
+ self.assertIsInstance(res, set)
+ self.assertEqual(res, set(['platform', 'distribution']))
+
+ def test_resolved(self):
+ names = ['network', 'virtual', 'env', 'platform', 'distribution']
+ all_fact_subsets = {'env': [default_collectors.EnvFactCollector],
+ 'network': [default_collectors.LinuxNetworkCollector],
+ 'distribution': [default_collectors.DistributionFactCollector],
+ 'platform': [default_collectors.PlatformFactCollector],
+ 'virtual': [default_collectors.LinuxVirtualCollector]}
+ res = collector.find_unresolved_requires(names, all_fact_subsets)
+ # pprint.pprint(res)
+
+ self.assertIsInstance(res, set)
+ self.assertEqual(res, set())
+
+
+class TestBuildDepData(unittest.TestCase):
+ def test(self):
+ names = ['network', 'virtual', 'env']
+ all_fact_subsets = {'env': [default_collectors.EnvFactCollector],
+ 'network': [default_collectors.LinuxNetworkCollector],
+ 'virtual': [default_collectors.LinuxVirtualCollector]}
+ res = collector.build_dep_data(names, all_fact_subsets)
+
+ # pprint.pprint(dict(res))
+ self.assertIsInstance(res, defaultdict)
+ self.assertEqual(dict(res),
+ {'network': set(['platform', 'distribution']),
+ 'virtual': set(),
+ 'env': set()})
+
+
+class TestSolveDeps(unittest.TestCase):
+ def test_no_solution(self):
+ unresolved = set(['required_thing1', 'required_thing2'])
+ all_fact_subsets = {'env': [default_collectors.EnvFactCollector],
+ 'network': [default_collectors.LinuxNetworkCollector],
+ 'virtual': [default_collectors.LinuxVirtualCollector]}
+
+ self.assertRaises(collector.CollectorNotFoundError,
+ collector._solve_deps,
+ unresolved,
+ all_fact_subsets)
+
+ def test(self):
+ unresolved = set(['env', 'network'])
+ all_fact_subsets = {'env': [default_collectors.EnvFactCollector],
+ 'network': [default_collectors.LinuxNetworkCollector],
+ 'virtual': [default_collectors.LinuxVirtualCollector],
+ 'platform': [default_collectors.PlatformFactCollector],
+ 'distribution': [default_collectors.DistributionFactCollector]}
+ res = collector.resolve_requires(unresolved, all_fact_subsets)
+
+ res = collector._solve_deps(unresolved, all_fact_subsets)
+
+ self.assertIsInstance(res, set)
+ for goal in unresolved:
+ self.assertIn(goal, res)
+
+
+class TestResolveRequires(unittest.TestCase):
+ def test_no_resolution(self):
+ unresolved = ['required_thing1', 'required_thing2']
+ all_fact_subsets = {'env': [default_collectors.EnvFactCollector],
+ 'network': [default_collectors.LinuxNetworkCollector],
+ 'virtual': [default_collectors.LinuxVirtualCollector]}
+ self.assertRaisesRegexp(collector.UnresolvedFactDep,
+ 'unresolved fact dep.*required_thing2',
+ collector.resolve_requires,
+ unresolved, all_fact_subsets)
+
+ def test(self):
+ unresolved = ['env', 'network']
+ all_fact_subsets = {'env': [default_collectors.EnvFactCollector],
+ 'network': [default_collectors.LinuxNetworkCollector],
+ 'virtual': [default_collectors.LinuxVirtualCollector]}
+ res = collector.resolve_requires(unresolved, all_fact_subsets)
+ for goal in unresolved:
+ self.assertIn(goal, res)
+
+ def test_exception(self):
+ unresolved = ['required_thing1']
+ all_fact_subsets = {}
+ try:
+ collector.resolve_requires(unresolved, all_fact_subsets)
+ except collector.UnresolvedFactDep as exc:
+ self.assertIn(unresolved[0], '%s' % exc)
+
+
+class TestTsort(unittest.TestCase):
+ def test(self):
+ dep_map = {'network': set(['distribution', 'platform']),
+ 'virtual': set(),
+ 'platform': set(['what_platform_wants']),
+ 'what_platform_wants': set(),
+ 'network_stuff': set(['network'])}
+
+ res = collector.tsort(dep_map)
+ # pprint.pprint(res)
+
+ self.assertIsInstance(res, list)
+ names = [x[0] for x in res]
+ self.assertTrue(names.index('network_stuff') > names.index('network'))
+ self.assertTrue(names.index('platform') > names.index('what_platform_wants'))
+ self.assertTrue(names.index('network') > names.index('platform'))
+
+ def test_cycles(self):
+ dep_map = {'leaf1': set(),
+ 'leaf2': set(),
+ 'node1': set(['node2']),
+ 'node2': set(['node3']),
+ 'node3': set(['node1'])}
+
+ self.assertRaises(collector.CycleFoundInFactDeps,
+ collector.tsort,
+ dep_map)
+
+ def test_just_nodes(self):
+ dep_map = {'leaf1': set(),
+ 'leaf4': set(),
+ 'leaf3': set(),
+ 'leaf2': set()}
+
+ res = collector.tsort(dep_map)
+ self.assertIsInstance(res, list)
+ names = [x[0] for x in res]
+ # not a lot to assert here, any order of the
+ # results is valid
+ self.assertEqual(set(names), set(dep_map.keys()))
+
+ def test_self_deps(self):
+ dep_map = {'node1': set(['node1']),
+ 'node2': set(['node2'])}
+ self.assertRaises(collector.CycleFoundInFactDeps,
+ collector.tsort,
+ dep_map)
+
+ def test_unsolvable(self):
+ dep_map = {'leaf1': set(),
+ 'node2': set(['leaf2'])}
+
+ res = collector.tsort(dep_map)
+ self.assertIsInstance(res, list)
+ names = [x[0] for x in res]
+ self.assertEqual(set(names), set(dep_map.keys()))
+
+ def test_chain(self):
+ dep_map = {'leaf1': set(['leaf2']),
+ 'leaf2': set(['leaf3']),
+ 'leaf3': set(['leaf4']),
+ 'leaf4': set(),
+ 'leaf5': set(['leaf1'])}
+ res = collector.tsort(dep_map)
+ self.assertIsInstance(res, list)
+ names = [x[0] for x in res]
+ self.assertEqual(set(names), set(dep_map.keys()))
+
+ def test_multi_pass(self):
+ dep_map = {'leaf1': set(),
+ 'leaf2': set(['leaf3', 'leaf1', 'leaf4', 'leaf5']),
+ 'leaf3': set(['leaf4', 'leaf1']),
+ 'leaf4': set(['leaf1']),
+ 'leaf5': set(['leaf1'])}
+ res = collector.tsort(dep_map)
+ self.assertIsInstance(res, list)
+ names = [x[0] for x in res]
+ self.assertEqual(set(names), set(dep_map.keys()))
+ self.assertTrue(names.index('leaf1') < names.index('leaf2'))
+ for leaf in ('leaf2', 'leaf3', 'leaf4', 'leaf5'):
+ self.assertTrue(names.index('leaf1') < names.index(leaf))
+
+
+class TestCollectorClassesFromGatherSubset(unittest.TestCase):
+ maxDiff = None
+
+ def _classes(self,
+ all_collector_classes=None,
+ valid_subsets=None,
+ minimal_gather_subset=None,
+ gather_subset=None,
+ gather_timeout=None,
+ platform_info=None):
+ platform_info = platform_info or {'system': 'Linux'}
+ return collector.collector_classes_from_gather_subset(all_collector_classes=all_collector_classes,
+ valid_subsets=valid_subsets,
+ minimal_gather_subset=minimal_gather_subset,
+ gather_subset=gather_subset,
+ gather_timeout=gather_timeout,
+ platform_info=platform_info)
+
+ def test_no_args(self):
+ res = self._classes()
+ self.assertIsInstance(res, list)
+ self.assertEqual(res, [])
+
+ def test_not_all(self):
+ res = self._classes(all_collector_classes=default_collectors.collectors,
+ gather_subset=['!all'])
+ self.assertIsInstance(res, list)
+ self.assertEqual(res, [])
+
+ def test_all(self):
+ res = self._classes(all_collector_classes=default_collectors.collectors,
+ gather_subset=['all'])
+ self.assertIsInstance(res, list)
+
+ def test_hardware(self):
+ res = self._classes(all_collector_classes=default_collectors.collectors,
+ gather_subset=['hardware'])
+ self.assertIsInstance(res, list)
+ self.assertIn(default_collectors.PlatformFactCollector, res)
+ self.assertIn(default_collectors.LinuxHardwareCollector, res)
+
+ self.assertTrue(res.index(default_collectors.LinuxHardwareCollector) >
+ res.index(default_collectors.PlatformFactCollector))
+
+ def test_network(self):
+ res = self._classes(all_collector_classes=default_collectors.collectors,
+ gather_subset=['network'])
+ self.assertIsInstance(res, list)
+ self.assertIn(default_collectors.DistributionFactCollector, res)
+ self.assertIn(default_collectors.PlatformFactCollector, res)
+ self.assertIn(default_collectors.LinuxNetworkCollector, res)
+
+ self.assertTrue(res.index(default_collectors.LinuxNetworkCollector) >
+ res.index(default_collectors.PlatformFactCollector))
+ self.assertTrue(res.index(default_collectors.LinuxNetworkCollector) >
+ res.index(default_collectors.DistributionFactCollector))
+
+ # self.assertEqual(set(res, [default_collectors.DistributionFactCollector,
+ # default_collectors.PlatformFactCollector,
+ # default_collectors.LinuxNetworkCollector])
+
+ def test_env(self):
+ res = self._classes(all_collector_classes=default_collectors.collectors,
+ gather_subset=['env'])
+ self.assertIsInstance(res, list)
+ self.assertEqual(res, [default_collectors.EnvFactCollector])
+
+ def test_facter(self):
+ res = self._classes(all_collector_classes=default_collectors.collectors,
+ gather_subset=set(['env', 'facter']))
+ self.assertIsInstance(res, list)
+ self.assertEqual(set(res),
+ set([default_collectors.EnvFactCollector,
+ default_collectors.FacterFactCollector]))
+
+ def test_facter_ohai(self):
+ res = self._classes(all_collector_classes=default_collectors.collectors,
+ gather_subset=set(['env', 'facter', 'ohai']))
+ self.assertIsInstance(res, list)
+ self.assertEqual(set(res),
+ set([default_collectors.EnvFactCollector,
+ default_collectors.FacterFactCollector,
+ default_collectors.OhaiFactCollector]))
+
+ def test_just_facter(self):
+ res = self._classes(all_collector_classes=default_collectors.collectors,
+ gather_subset=set(['facter']))
+ self.assertIsInstance(res, list)
+ self.assertEqual(set(res),
+ set([default_collectors.FacterFactCollector]))
+
+ def test_collector_specified_multiple_times(self):
+ res = self._classes(all_collector_classes=default_collectors.collectors,
+ gather_subset=['platform', 'all', 'machine'])
+ self.assertIsInstance(res, list)
+ self.assertIn(default_collectors.PlatformFactCollector,
+ res)
+
+ def test_unknown_collector(self):
+ # something claims 'unknown_collector' is a valid gather_subset, but there is
+ # no FactCollector mapped to 'unknown_collector'
+ self.assertRaisesRegexp(TypeError,
+ r'Bad subset.*unknown_collector.*given to Ansible.*allowed\:.*all,.*env.*',
+ self._classes,
+ all_collector_classes=default_collectors.collectors,
+ gather_subset=['env', 'unknown_collector'])
diff --git a/test/units/module_utils/facts/test_collectors.py b/test/units/module_utils/facts/test_collectors.py
new file mode 100644
index 00000000..d9fe79bf
--- /dev/null
+++ b/test/units/module_utils/facts/test_collectors.py
@@ -0,0 +1,430 @@
+# unit tests for ansible fact collectors
+# -*- coding: utf-8 -*-
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat.mock import Mock, patch
+
+from . base import BaseFactsTest
+
+from ansible.module_utils.facts import collector
+
+from ansible.module_utils.facts.system.apparmor import ApparmorFactCollector
+from ansible.module_utils.facts.system.caps import SystemCapabilitiesFactCollector
+from ansible.module_utils.facts.system.cmdline import CmdLineFactCollector
+from ansible.module_utils.facts.system.distribution import DistributionFactCollector
+from ansible.module_utils.facts.system.dns import DnsFactCollector
+from ansible.module_utils.facts.system.env import EnvFactCollector
+from ansible.module_utils.facts.system.fips import FipsFactCollector
+from ansible.module_utils.facts.system.pkg_mgr import PkgMgrFactCollector, OpenBSDPkgMgrFactCollector
+from ansible.module_utils.facts.system.platform import PlatformFactCollector
+from ansible.module_utils.facts.system.python import PythonFactCollector
+from ansible.module_utils.facts.system.selinux import SelinuxFactCollector
+from ansible.module_utils.facts.system.service_mgr import ServiceMgrFactCollector
+from ansible.module_utils.facts.system.ssh_pub_keys import SshPubKeyFactCollector
+from ansible.module_utils.facts.system.user import UserFactCollector
+
+from ansible.module_utils.facts.virtual.base import VirtualCollector
+from ansible.module_utils.facts.network.base import NetworkCollector
+from ansible.module_utils.facts.hardware.base import HardwareCollector
+
+
+class CollectorException(Exception):
+ pass
+
+
+class ExceptionThrowingCollector(collector.BaseFactCollector):
+ name = 'exc_throwing'
+
+ def __init__(self, collectors=None, namespace=None, exception=None):
+ super(ExceptionThrowingCollector, self).__init__(collectors, namespace)
+ self._exception = exception or CollectorException('collection failed')
+
+ def collect(self, module=None, collected_facts=None):
+ raise self._exception
+
+
+class TestExceptionThrowingCollector(BaseFactsTest):
+ __test__ = True
+ gather_subset = ['exc_throwing']
+ valid_subsets = ['exc_throwing']
+ collector_class = ExceptionThrowingCollector
+
+ def test_collect(self):
+ module = self._mock_module()
+ fact_collector = self.collector_class()
+ self.assertRaises(CollectorException,
+ fact_collector.collect,
+ module=module,
+ collected_facts=self.collected_facts)
+
+ def test_collect_with_namespace(self):
+ module = self._mock_module()
+ fact_collector = self.collector_class()
+ self.assertRaises(CollectorException,
+ fact_collector.collect_with_namespace,
+ module=module,
+ collected_facts=self.collected_facts)
+
+
+class TestApparmorFacts(BaseFactsTest):
+ __test__ = True
+ gather_subset = ['!all', 'apparmor']
+ valid_subsets = ['apparmor']
+ fact_namespace = 'ansible_apparmor'
+ collector_class = ApparmorFactCollector
+
+ def test_collect(self):
+ facts_dict = super(TestApparmorFacts, self).test_collect()
+ self.assertIn('status', facts_dict['apparmor'])
+
+
+class TestCapsFacts(BaseFactsTest):
+ __test__ = True
+ gather_subset = ['!all', 'caps']
+ valid_subsets = ['caps']
+ fact_namespace = 'ansible_system_capabilities'
+ collector_class = SystemCapabilitiesFactCollector
+
+ def _mock_module(self):
+ mock_module = Mock()
+ mock_module.params = {'gather_subset': self.gather_subset,
+ 'gather_timeout': 10,
+ 'filter': '*'}
+ mock_module.get_bin_path = Mock(return_value='/usr/sbin/capsh')
+ mock_module.run_command = Mock(return_value=(0, 'Current: =ep', ''))
+ return mock_module
+
+
+class TestCmdLineFacts(BaseFactsTest):
+ __test__ = True
+ gather_subset = ['!all', 'cmdline']
+ valid_subsets = ['cmdline']
+ fact_namespace = 'ansible_cmdline'
+ collector_class = CmdLineFactCollector
+
+ def test_parse_proc_cmdline_uefi(self):
+ uefi_cmdline = r'initrd=\70ef65e1a04a47aea04f7b5145ea3537\4.10.0-19-generic\initrd root=UUID=50973b75-4a66-4bf0-9764-2b7614489e64 ro quiet'
+ expected = {'initrd': r'\70ef65e1a04a47aea04f7b5145ea3537\4.10.0-19-generic\initrd',
+ 'root': 'UUID=50973b75-4a66-4bf0-9764-2b7614489e64',
+ 'quiet': True,
+ 'ro': True}
+ fact_collector = self.collector_class()
+ facts_dict = fact_collector._parse_proc_cmdline(uefi_cmdline)
+
+ self.assertDictEqual(facts_dict, expected)
+
+ def test_parse_proc_cmdline_fedora(self):
+ cmdline_fedora = r'BOOT_IMAGE=/vmlinuz-4.10.16-200.fc25.x86_64 root=/dev/mapper/fedora-root ro rd.lvm.lv=fedora/root rd.luks.uuid=luks-c80b7537-358b-4a07-b88c-c59ef187479b rd.lvm.lv=fedora/swap rhgb quiet LANG=en_US.UTF-8' # noqa
+
+ expected = {'BOOT_IMAGE': '/vmlinuz-4.10.16-200.fc25.x86_64',
+ 'LANG': 'en_US.UTF-8',
+ 'quiet': True,
+ 'rd.luks.uuid': 'luks-c80b7537-358b-4a07-b88c-c59ef187479b',
+ 'rd.lvm.lv': 'fedora/swap',
+ 'rhgb': True,
+ 'ro': True,
+ 'root': '/dev/mapper/fedora-root'}
+
+ fact_collector = self.collector_class()
+ facts_dict = fact_collector._parse_proc_cmdline(cmdline_fedora)
+
+ self.assertDictEqual(facts_dict, expected)
+
+ def test_parse_proc_cmdline_dup_console(self):
+ example = r'BOOT_IMAGE=/boot/vmlinuz-4.4.0-72-generic root=UUID=e12e46d9-06c9-4a64-a7b3-60e24b062d90 ro console=tty1 console=ttyS0'
+
+ # FIXME: Two 'console' keywords? Using a dict for the fact value here loses info. Currently the 'last' one wins
+ expected = {'BOOT_IMAGE': '/boot/vmlinuz-4.4.0-72-generic',
+ 'root': 'UUID=e12e46d9-06c9-4a64-a7b3-60e24b062d90',
+ 'ro': True,
+ 'console': 'ttyS0'}
+
+ fact_collector = self.collector_class()
+ facts_dict = fact_collector._parse_proc_cmdline(example)
+
+ # TODO: fails because we lose a 'console'
+ self.assertDictEqual(facts_dict, expected)
+
+
+class TestDistributionFacts(BaseFactsTest):
+ __test__ = True
+ gather_subset = ['!all', 'distribution']
+ valid_subsets = ['distribution']
+ fact_namespace = 'ansible_distribution'
+ collector_class = DistributionFactCollector
+
+
+class TestDnsFacts(BaseFactsTest):
+
+ __test__ = True
+ gather_subset = ['!all', 'dns']
+ valid_subsets = ['dns']
+ fact_namespace = 'ansible_dns'
+ collector_class = DnsFactCollector
+
+
+class TestEnvFacts(BaseFactsTest):
+
+ __test__ = True
+ gather_subset = ['!all', 'env']
+ valid_subsets = ['env']
+ fact_namespace = 'ansible_env'
+ collector_class = EnvFactCollector
+
+ def test_collect(self):
+ facts_dict = super(TestEnvFacts, self).test_collect()
+ self.assertIn('HOME', facts_dict['env'])
+
+
+class TestFipsFacts(BaseFactsTest):
+ __test__ = True
+ gather_subset = ['!all', 'fips']
+ valid_subsets = ['fips']
+ fact_namespace = 'ansible_fips'
+ collector_class = FipsFactCollector
+
+
+class TestHardwareCollector(BaseFactsTest):
+ __test__ = True
+ gather_subset = ['!all', 'hardware']
+ valid_subsets = ['hardware']
+ fact_namespace = 'ansible_hardware'
+ collector_class = HardwareCollector
+ collected_facts = {'ansible_architecture': 'x86_64'}
+
+
+class TestNetworkCollector(BaseFactsTest):
+ __test__ = True
+ gather_subset = ['!all', 'network']
+ valid_subsets = ['network']
+ fact_namespace = 'ansible_network'
+ collector_class = NetworkCollector
+
+
+class TestPkgMgrFacts(BaseFactsTest):
+ __test__ = True
+ gather_subset = ['!all', 'pkg_mgr']
+ valid_subsets = ['pkg_mgr']
+ fact_namespace = 'ansible_pkgmgr'
+ collector_class = PkgMgrFactCollector
+ collected_facts = {
+ "ansible_distribution": "Fedora",
+ "ansible_distribution_major_version": "28",
+ "ansible_os_family": "RedHat"
+ }
+
+ def test_collect(self):
+ module = self._mock_module()
+ fact_collector = self.collector_class()
+ facts_dict = fact_collector.collect(module=module, collected_facts=self.collected_facts)
+ self.assertIsInstance(facts_dict, dict)
+ self.assertIn('pkg_mgr', facts_dict)
+
+
+def _sanitize_os_path_apt_get(path):
+ if path == '/usr/bin/apt-get':
+ return True
+ else:
+ return False
+
+
+class TestPkgMgrFactsAptFedora(BaseFactsTest):
+ __test__ = True
+ gather_subset = ['!all', 'pkg_mgr']
+ valid_subsets = ['pkg_mgr']
+ fact_namespace = 'ansible_pkgmgr'
+ collector_class = PkgMgrFactCollector
+ collected_facts = {
+ "ansible_distribution": "Fedora",
+ "ansible_distribution_major_version": "28",
+ "ansible_os_family": "RedHat",
+ "ansible_pkg_mgr": "apt"
+ }
+
+ @patch('ansible.module_utils.facts.system.pkg_mgr.os.path.exists', side_effect=_sanitize_os_path_apt_get)
+ def test_collect(self, mock_os_path_exists):
+ module = self._mock_module()
+ fact_collector = self.collector_class()
+ facts_dict = fact_collector.collect(module=module, collected_facts=self.collected_facts)
+ self.assertIsInstance(facts_dict, dict)
+ self.assertIn('pkg_mgr', facts_dict)
+
+
+class TestOpenBSDPkgMgrFacts(BaseFactsTest):
+ __test__ = True
+ gather_subset = ['!all', 'pkg_mgr']
+ valid_subsets = ['pkg_mgr']
+ fact_namespace = 'ansible_pkgmgr'
+ collector_class = OpenBSDPkgMgrFactCollector
+
+ def test_collect(self):
+ module = self._mock_module()
+ fact_collector = self.collector_class()
+ facts_dict = fact_collector.collect(module=module, collected_facts=self.collected_facts)
+ self.assertIsInstance(facts_dict, dict)
+ self.assertIn('pkg_mgr', facts_dict)
+ self.assertEqual(facts_dict['pkg_mgr'], 'openbsd_pkg')
+
+
+class TestPlatformFactCollector(BaseFactsTest):
+ __test__ = True
+ gather_subset = ['!all', 'platform']
+ valid_subsets = ['platform']
+ fact_namespace = 'ansible_platform'
+ collector_class = PlatformFactCollector
+
+
+class TestPythonFactCollector(BaseFactsTest):
+ __test__ = True
+ gather_subset = ['!all', 'python']
+ valid_subsets = ['python']
+ fact_namespace = 'ansible_python'
+ collector_class = PythonFactCollector
+
+
+class TestSelinuxFacts(BaseFactsTest):
+ __test__ = True
+ gather_subset = ['!all', 'selinux']
+ valid_subsets = ['selinux']
+ fact_namespace = 'ansible_selinux'
+ collector_class = SelinuxFactCollector
+
+ def test_no_selinux(self):
+ with patch('ansible.module_utils.facts.system.selinux.HAVE_SELINUX', False):
+ module = self._mock_module()
+ fact_collector = self.collector_class()
+ facts_dict = fact_collector.collect(module=module)
+ self.assertIsInstance(facts_dict, dict)
+ self.assertEqual(facts_dict['selinux']['status'], 'Missing selinux Python library')
+ return facts_dict
+
+
+class TestServiceMgrFacts(BaseFactsTest):
+ __test__ = True
+ gather_subset = ['!all', 'service_mgr']
+ valid_subsets = ['service_mgr']
+ fact_namespace = 'ansible_service_mgr'
+ collector_class = ServiceMgrFactCollector
+
+ # TODO: dedupe some of this test code
+
+ @patch('ansible.module_utils.facts.system.service_mgr.get_file_content', return_value=None)
+ def test_no_proc1(self, mock_gfc):
+ # no /proc/1/comm, ps returns non-0
+ # should fallback to 'service'
+ module = self._mock_module()
+ module.run_command = Mock(return_value=(1, '', 'wat'))
+ fact_collector = self.collector_class()
+ facts_dict = fact_collector.collect(module=module)
+ self.assertIsInstance(facts_dict, dict)
+ self.assertEqual(facts_dict['service_mgr'], 'service')
+
+ @patch('ansible.module_utils.facts.system.service_mgr.get_file_content', return_value=None)
+ def test_no_proc1_ps_random_init(self, mock_gfc):
+ # no /proc/1/comm, ps returns '/sbin/sys11' which we dont know
+ # should end up return 'sys11'
+ module = self._mock_module()
+ module.run_command = Mock(return_value=(0, '/sbin/sys11', ''))
+ fact_collector = self.collector_class()
+ facts_dict = fact_collector.collect(module=module)
+ self.assertIsInstance(facts_dict, dict)
+ self.assertEqual(facts_dict['service_mgr'], 'sys11')
+
+ @patch('ansible.module_utils.facts.system.service_mgr.get_file_content', return_value=None)
+ def test_clowncar(self, mock_gfc):
+ # no /proc/1/comm, ps fails, distro and system are clowncar
+ # should end up return 'sys11'
+ module = self._mock_module()
+ module.run_command = Mock(return_value=(1, '', ''))
+ collected_facts = {'distribution': 'clowncar',
+ 'system': 'ClownCarOS'}
+ fact_collector = self.collector_class()
+ facts_dict = fact_collector.collect(module=module,
+ collected_facts=collected_facts)
+ self.assertIsInstance(facts_dict, dict)
+ self.assertEqual(facts_dict['service_mgr'], 'service')
+
+ # TODO: reenable these tests when we can mock more easily
+
+# @patch('ansible.module_utils.facts.system.service_mgr.get_file_content', return_value=None)
+# def test_sunos_fallback(self, mock_gfc):
+# # no /proc/1/comm, ps fails, 'system' is SunOS
+# # should end up return 'smf'?
+# module = self._mock_module()
+# # FIXME: the result here is a kluge to at least cover more of service_mgr.collect
+# # TODO: remove
+# # FIXME: have to force a pid for results here to get into any of the system/distro checks
+# module.run_command = Mock(return_value=(1, ' 37 ', ''))
+# collected_facts = {'system': 'SunOS'}
+# fact_collector = self.collector_class(module=module)
+# facts_dict = fact_collector.collect(collected_facts=collected_facts)
+# print('facts_dict: %s' % facts_dict)
+# self.assertIsInstance(facts_dict, dict)
+# self.assertEqual(facts_dict['service_mgr'], 'smf')
+
+# @patch('ansible.module_utils.facts.system.service_mgr.get_file_content', return_value=None)
+# def test_aix_fallback(self, mock_gfc):
+# # no /proc/1/comm, ps fails, 'system' is SunOS
+# # should end up return 'smf'?
+# module = self._mock_module()
+# module.run_command = Mock(return_value=(1, '', ''))
+# collected_facts = {'system': 'AIX'}
+# fact_collector = self.collector_class(module=module)
+# facts_dict = fact_collector.collect(collected_facts=collected_facts)
+# print('facts_dict: %s' % facts_dict)
+# self.assertIsInstance(facts_dict, dict)
+# self.assertEqual(facts_dict['service_mgr'], 'src')
+
+# @patch('ansible.module_utils.facts.system.service_mgr.get_file_content', return_value=None)
+# def test_linux_fallback(self, mock_gfc):
+# # no /proc/1/comm, ps fails, 'system' is SunOS
+# # should end up return 'smf'?
+# module = self._mock_module()
+# module.run_command = Mock(return_value=(1, ' 37 ', ''))
+# collected_facts = {'system': 'Linux'}
+# fact_collector = self.collector_class(module=module)
+# facts_dict = fact_collector.collect(collected_facts=collected_facts)
+# print('facts_dict: %s' % facts_dict)
+# self.assertIsInstance(facts_dict, dict)
+# self.assertEqual(facts_dict['service_mgr'], 'sdfadf')
+
+
+class TestSshPubKeyFactCollector(BaseFactsTest):
+ __test__ = True
+ gather_subset = ['!all', 'ssh_pub_keys']
+ valid_subsets = ['ssh_pub_keys']
+ fact_namespace = 'ansible_ssh_pub_leys'
+ collector_class = SshPubKeyFactCollector
+
+
+class TestUserFactCollector(BaseFactsTest):
+ __test__ = True
+ gather_subset = ['!all', 'user']
+ valid_subsets = ['user']
+ fact_namespace = 'ansible_user'
+ collector_class = UserFactCollector
+
+
+class TestVirtualFacts(BaseFactsTest):
+ __test__ = True
+ gather_subset = ['!all', 'virtual']
+ valid_subsets = ['virtual']
+ fact_namespace = 'ansible_virtual'
+ collector_class = VirtualCollector
diff --git a/test/units/module_utils/facts/test_date_time.py b/test/units/module_utils/facts/test_date_time.py
new file mode 100644
index 00000000..7c92e521
--- /dev/null
+++ b/test/units/module_utils/facts/test_date_time.py
@@ -0,0 +1,103 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+import datetime
+import string
+import time
+
+from ansible.module_utils.facts.system import date_time
+
+EPOCH_TS = 1594449296.123456
+DT = datetime.datetime(2020, 7, 11, 12, 34, 56, 124356)
+DT_UTC = datetime.datetime(2020, 7, 11, 2, 34, 56, 124356)
+
+
+@pytest.fixture
+def fake_now(monkeypatch):
+ """
+ Patch `datetime.datetime.fromtimestamp()`, `datetime.datetime.utcfromtimestamp()`,
+ and `time.time()` to return deterministic values.
+ """
+
+ class FakeNow:
+ @classmethod
+ def fromtimestamp(cls, timestamp):
+ return DT
+
+ @classmethod
+ def utcfromtimestamp(cls, timestamp):
+ return DT_UTC
+
+ def _time():
+ return EPOCH_TS
+
+ monkeypatch.setattr(date_time.datetime, 'datetime', FakeNow)
+ monkeypatch.setattr(time, 'time', _time)
+
+
+@pytest.fixture
+def fake_date_facts(fake_now):
+ """Return a predictable instance of collected date_time facts."""
+
+ collector = date_time.DateTimeFactCollector()
+ data = collector.collect()
+
+ return data
+
+
+@pytest.mark.parametrize(
+ ('fact_name', 'fact_value'),
+ (
+ ('year', '2020'),
+ ('month', '07'),
+ ('weekday', 'Saturday'),
+ ('weekday_number', '6'),
+ ('weeknumber', '27'),
+ ('day', '11'),
+ ('hour', '12'),
+ ('minute', '34'),
+ ('second', '56'),
+ ('date', '2020-07-11'),
+ ('time', '12:34:56'),
+ ('iso8601_basic', '20200711T123456124356'),
+ ('iso8601_basic_short', '20200711T123456'),
+ ('iso8601_micro', '2020-07-11T02:34:56.124356Z'),
+ ('iso8601', '2020-07-11T02:34:56Z'),
+ ),
+)
+def test_date_time_facts(fake_date_facts, fact_name, fact_value):
+ assert fake_date_facts['date_time'][fact_name] == fact_value
+
+
+def test_date_time_epoch(fake_date_facts):
+ """Test that format of returned epoch value is correct"""
+
+ assert fake_date_facts['date_time']['epoch'].isdigit()
+ assert len(fake_date_facts['date_time']['epoch']) == 10 # This length will not change any time soon
+
+
+def test_date_time_tz(fake_date_facts):
+ """
+ Test that the returned value for timezone consists of only uppercase
+ letters and is the expected length.
+ """
+
+ assert fake_date_facts['date_time']['tz'].isupper()
+ assert 2 <= len(fake_date_facts['date_time']['tz']) <= 5
+ assert not set(fake_date_facts['date_time']['tz']).difference(set(string.ascii_uppercase))
+
+
+def test_date_time_tz_offset(fake_date_facts):
+ """
+ Test that the timezone offset begins with a `+` or `-` and ends with a
+ series of integers.
+ """
+
+ assert fake_date_facts['date_time']['tz_offset'][0] in ['-', '+']
+ assert fake_date_facts['date_time']['tz_offset'][1:].isdigit()
+ assert len(fake_date_facts['date_time']['tz_offset']) == 5
diff --git a/test/units/module_utils/facts/test_facts.py b/test/units/module_utils/facts/test_facts.py
new file mode 100644
index 00000000..5e2de808
--- /dev/null
+++ b/test/units/module_utils/facts/test_facts.py
@@ -0,0 +1,644 @@
+# This file is part of Ansible
+# -*- coding: utf-8 -*-
+#
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+import pytest
+
+# for testing
+from units.compat import unittest
+from units.compat.mock import Mock, patch
+
+from ansible.module_utils import facts
+from ansible.module_utils.facts import hardware
+from ansible.module_utils.facts import network
+from ansible.module_utils.facts import virtual
+
+
+class BaseTestFactsPlatform(unittest.TestCase):
+ platform_id = 'Generic'
+ fact_class = hardware.base.Hardware
+ collector_class = None
+
+ """Verify that the automagic in Hardware.__new__ selects the right subclass."""
+ @patch('platform.system')
+ def test_new(self, mock_platform):
+ if not self.fact_class:
+ pytest.skip('This platform (%s) does not have a fact_class.' % self.platform_id)
+ mock_platform.return_value = self.platform_id
+ inst = self.fact_class(module=Mock(), load_on_init=False)
+ self.assertIsInstance(inst, self.fact_class)
+ self.assertEqual(inst.platform, self.platform_id)
+
+ def test_subclass(self):
+ if not self.fact_class:
+ pytest.skip('This platform (%s) does not have a fact_class.' % self.platform_id)
+ # 'Generic' will try to map to platform.system() that we are not mocking here
+ if self.platform_id == 'Generic':
+ return
+ inst = self.fact_class(module=Mock(), load_on_init=False)
+ self.assertIsInstance(inst, self.fact_class)
+ self.assertEqual(inst.platform, self.platform_id)
+
+ def test_collector(self):
+ if not self.collector_class:
+ pytest.skip('This test class needs to be updated to specify collector_class')
+ inst = self.collector_class()
+ self.assertIsInstance(inst, self.collector_class)
+ self.assertEqual(inst._platform, self.platform_id)
+
+
+class TestLinuxFactsPlatform(BaseTestFactsPlatform):
+ platform_id = 'Linux'
+ fact_class = hardware.linux.LinuxHardware
+ collector_class = hardware.linux.LinuxHardwareCollector
+
+
+class TestHurdFactsPlatform(BaseTestFactsPlatform):
+ platform_id = 'GNU'
+ fact_class = hardware.hurd.HurdHardware
+ collector_class = hardware.hurd.HurdHardwareCollector
+
+
+class TestSunOSHardware(BaseTestFactsPlatform):
+ platform_id = 'SunOS'
+ fact_class = hardware.sunos.SunOSHardware
+ collector_class = hardware.sunos.SunOSHardwareCollector
+
+
+class TestOpenBSDHardware(BaseTestFactsPlatform):
+ platform_id = 'OpenBSD'
+ fact_class = hardware.openbsd.OpenBSDHardware
+ collector_class = hardware.openbsd.OpenBSDHardwareCollector
+
+
+class TestFreeBSDHardware(BaseTestFactsPlatform):
+ platform_id = 'FreeBSD'
+ fact_class = hardware.freebsd.FreeBSDHardware
+ collector_class = hardware.freebsd.FreeBSDHardwareCollector
+
+
+class TestDragonFlyHardware(BaseTestFactsPlatform):
+ platform_id = 'DragonFly'
+ fact_class = None
+ collector_class = hardware.dragonfly.DragonFlyHardwareCollector
+
+
+class TestNetBSDHardware(BaseTestFactsPlatform):
+ platform_id = 'NetBSD'
+ fact_class = hardware.netbsd.NetBSDHardware
+ collector_class = hardware.netbsd.NetBSDHardwareCollector
+
+
+class TestAIXHardware(BaseTestFactsPlatform):
+ platform_id = 'AIX'
+ fact_class = hardware.aix.AIXHardware
+ collector_class = hardware.aix.AIXHardwareCollector
+
+
+class TestHPUXHardware(BaseTestFactsPlatform):
+ platform_id = 'HP-UX'
+ fact_class = hardware.hpux.HPUXHardware
+ collector_class = hardware.hpux.HPUXHardwareCollector
+
+
+class TestDarwinHardware(BaseTestFactsPlatform):
+ platform_id = 'Darwin'
+ fact_class = hardware.darwin.DarwinHardware
+ collector_class = hardware.darwin.DarwinHardwareCollector
+
+
+class TestGenericNetwork(BaseTestFactsPlatform):
+ platform_id = 'Generic'
+ fact_class = network.base.Network
+
+
+class TestHurdPfinetNetwork(BaseTestFactsPlatform):
+ platform_id = 'GNU'
+ fact_class = network.hurd.HurdPfinetNetwork
+ collector_class = network.hurd.HurdNetworkCollector
+
+
+class TestLinuxNetwork(BaseTestFactsPlatform):
+ platform_id = 'Linux'
+ fact_class = network.linux.LinuxNetwork
+ collector_class = network.linux.LinuxNetworkCollector
+
+
+class TestGenericBsdIfconfigNetwork(BaseTestFactsPlatform):
+ platform_id = 'Generic_BSD_Ifconfig'
+ fact_class = network.generic_bsd.GenericBsdIfconfigNetwork
+ collector_class = None
+
+
+class TestHPUXNetwork(BaseTestFactsPlatform):
+ platform_id = 'HP-UX'
+ fact_class = network.hpux.HPUXNetwork
+ collector_class = network.hpux.HPUXNetworkCollector
+
+
+class TestDarwinNetwork(BaseTestFactsPlatform):
+ platform_id = 'Darwin'
+ fact_class = network.darwin.DarwinNetwork
+ collector_class = network.darwin.DarwinNetworkCollector
+
+
+class TestFreeBSDNetwork(BaseTestFactsPlatform):
+ platform_id = 'FreeBSD'
+ fact_class = network.freebsd.FreeBSDNetwork
+ collector_class = network.freebsd.FreeBSDNetworkCollector
+
+
+class TestDragonFlyNetwork(BaseTestFactsPlatform):
+ platform_id = 'DragonFly'
+ fact_class = network.dragonfly.DragonFlyNetwork
+ collector_class = network.dragonfly.DragonFlyNetworkCollector
+
+
+class TestAIXNetwork(BaseTestFactsPlatform):
+ platform_id = 'AIX'
+ fact_class = network.aix.AIXNetwork
+ collector_class = network.aix.AIXNetworkCollector
+
+
+class TestNetBSDNetwork(BaseTestFactsPlatform):
+ platform_id = 'NetBSD'
+ fact_class = network.netbsd.NetBSDNetwork
+ collector_class = network.netbsd.NetBSDNetworkCollector
+
+
+class TestOpenBSDNetwork(BaseTestFactsPlatform):
+ platform_id = 'OpenBSD'
+ fact_class = network.openbsd.OpenBSDNetwork
+ collector_class = network.openbsd.OpenBSDNetworkCollector
+
+
+class TestSunOSNetwork(BaseTestFactsPlatform):
+ platform_id = 'SunOS'
+ fact_class = network.sunos.SunOSNetwork
+ collector_class = network.sunos.SunOSNetworkCollector
+
+
+class TestLinuxVirtual(BaseTestFactsPlatform):
+ platform_id = 'Linux'
+ fact_class = virtual.linux.LinuxVirtual
+ collector_class = virtual.linux.LinuxVirtualCollector
+
+
+class TestFreeBSDVirtual(BaseTestFactsPlatform):
+ platform_id = 'FreeBSD'
+ fact_class = virtual.freebsd.FreeBSDVirtual
+ collector_class = virtual.freebsd.FreeBSDVirtualCollector
+
+
+class TestNetBSDVirtual(BaseTestFactsPlatform):
+ platform_id = 'NetBSD'
+ fact_class = virtual.netbsd.NetBSDVirtual
+ collector_class = virtual.netbsd.NetBSDVirtualCollector
+
+
+class TestOpenBSDVirtual(BaseTestFactsPlatform):
+ platform_id = 'OpenBSD'
+ fact_class = virtual.openbsd.OpenBSDVirtual
+ collector_class = virtual.openbsd.OpenBSDVirtualCollector
+
+
+class TestHPUXVirtual(BaseTestFactsPlatform):
+ platform_id = 'HP-UX'
+ fact_class = virtual.hpux.HPUXVirtual
+ collector_class = virtual.hpux.HPUXVirtualCollector
+
+
+class TestSunOSVirtual(BaseTestFactsPlatform):
+ platform_id = 'SunOS'
+ fact_class = virtual.sunos.SunOSVirtual
+ collector_class = virtual.sunos.SunOSVirtualCollector
+
+
+LSBLK_OUTPUT = b"""
+/dev/sda
+/dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0
+/dev/sda2 66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK
+/dev/mapper/fedora_dhcp129--186-swap eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d
+/dev/mapper/fedora_dhcp129--186-root d34cf5e3-3449-4a6c-8179-a1feb2bca6ce
+/dev/mapper/fedora_dhcp129--186-home 2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d
+/dev/sr0
+/dev/loop0 0f031512-ab15-497d-9abd-3a512b4a9390
+/dev/loop1 7c1b0f30-cf34-459f-9a70-2612f82b870a
+/dev/loop9 0f031512-ab15-497d-9abd-3a512b4a9390
+/dev/loop9 7c1b4444-cf34-459f-9a70-2612f82b870a
+/dev/mapper/docker-253:1-1050967-pool
+/dev/loop2
+/dev/mapper/docker-253:1-1050967-pool
+"""
+
+LSBLK_OUTPUT_2 = b"""
+/dev/sda
+/dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0
+/dev/sda2 66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK
+/dev/mapper/fedora_dhcp129--186-swap eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d
+/dev/mapper/fedora_dhcp129--186-root d34cf5e3-3449-4a6c-8179-a1feb2bca6ce
+/dev/mapper/fedora_dhcp129--186-home 2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d
+/dev/mapper/an-example-mapper with a space in the name 84639acb-013f-4d2f-9392-526a572b4373
+/dev/sr0
+/dev/loop0 0f031512-ab15-497d-9abd-3a512b4a9390
+"""
+
+LSBLK_UUIDS = {'/dev/sda1': '66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK'}
+
+UDEVADM_UUID = 'N/A'
+
+MTAB = """
+sysfs /sys sysfs rw,seclabel,nosuid,nodev,noexec,relatime 0 0
+proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
+devtmpfs /dev devtmpfs rw,seclabel,nosuid,size=8044400k,nr_inodes=2011100,mode=755 0 0
+securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0
+tmpfs /dev/shm tmpfs rw,seclabel,nosuid,nodev 0 0
+devpts /dev/pts devpts rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0
+tmpfs /run tmpfs rw,seclabel,nosuid,nodev,mode=755 0 0
+tmpfs /sys/fs/cgroup tmpfs ro,seclabel,nosuid,nodev,noexec,mode=755 0 0
+cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 0 0
+pstore /sys/fs/pstore pstore rw,seclabel,nosuid,nodev,noexec,relatime 0 0
+cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0
+cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0
+cgroup /sys/fs/cgroup/memory cgroup rw,nosuid,nodev,noexec,relatime,memory 0 0
+cgroup /sys/fs/cgroup/pids cgroup rw,nosuid,nodev,noexec,relatime,pids 0 0
+cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0
+cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0
+cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct 0 0
+cgroup /sys/fs/cgroup/hugetlb cgroup rw,nosuid,nodev,noexec,relatime,hugetlb 0 0
+cgroup /sys/fs/cgroup/perf_event cgroup rw,nosuid,nodev,noexec,relatime,perf_event 0 0
+cgroup /sys/fs/cgroup/net_cls,net_prio cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio 0 0
+configfs /sys/kernel/config configfs rw,relatime 0 0
+/dev/mapper/fedora_dhcp129--186-root / ext4 rw,seclabel,relatime,data=ordered 0 0
+selinuxfs /sys/fs/selinux selinuxfs rw,relatime 0 0
+systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=24,pgrp=1,timeout=0,minproto=5,maxproto=5,direct 0 0
+debugfs /sys/kernel/debug debugfs rw,seclabel,relatime 0 0
+hugetlbfs /dev/hugepages hugetlbfs rw,seclabel,relatime 0 0
+tmpfs /tmp tmpfs rw,seclabel 0 0
+mqueue /dev/mqueue mqueue rw,seclabel,relatime 0 0
+/dev/loop0 /var/lib/machines btrfs rw,seclabel,relatime,space_cache,subvolid=5,subvol=/ 0 0
+/dev/sda1 /boot ext4 rw,seclabel,relatime,data=ordered 0 0
+/dev/mapper/fedora_dhcp129--186-home /home ext4 rw,seclabel,relatime,data=ordered 0 0
+tmpfs /run/user/1000 tmpfs rw,seclabel,nosuid,nodev,relatime,size=1611044k,mode=700,uid=1000,gid=1000 0 0
+gvfsd-fuse /run/user/1000/gvfs fuse.gvfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
+fusectl /sys/fs/fuse/connections fusectl rw,relatime 0 0
+grimlock.g.a: /home/adrian/sshfs-grimlock fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
+grimlock.g.a:test_path/path_with'single_quotes /home/adrian/sshfs-grimlock-single-quote fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
+grimlock.g.a:path_with'single_quotes /home/adrian/sshfs-grimlock-single-quote-2 fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
+grimlock.g.a:/mnt/data/foto's /home/adrian/fotos fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
+"""
+
+MTAB_ENTRIES = [
+ [
+ 'sysfs',
+ '/sys',
+ 'sysfs',
+ 'rw,seclabel,nosuid,nodev,noexec,relatime',
+ '0',
+ '0'
+ ],
+ ['proc', '/proc', 'proc', 'rw,nosuid,nodev,noexec,relatime', '0', '0'],
+ [
+ 'devtmpfs',
+ '/dev',
+ 'devtmpfs',
+ 'rw,seclabel,nosuid,size=8044400k,nr_inodes=2011100,mode=755',
+ '0',
+ '0'
+ ],
+ [
+ 'securityfs',
+ '/sys/kernel/security',
+ 'securityfs',
+ 'rw,nosuid,nodev,noexec,relatime',
+ '0',
+ '0'
+ ],
+ ['tmpfs', '/dev/shm', 'tmpfs', 'rw,seclabel,nosuid,nodev', '0', '0'],
+ [
+ 'devpts',
+ '/dev/pts',
+ 'devpts',
+ 'rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000',
+ '0',
+ '0'
+ ],
+ ['tmpfs', '/run', 'tmpfs', 'rw,seclabel,nosuid,nodev,mode=755', '0', '0'],
+ [
+ 'tmpfs',
+ '/sys/fs/cgroup',
+ 'tmpfs',
+ 'ro,seclabel,nosuid,nodev,noexec,mode=755',
+ '0',
+ '0'
+ ],
+ [
+ 'cgroup',
+ '/sys/fs/cgroup/systemd',
+ 'cgroup',
+ 'rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd',
+ '0',
+ '0'
+ ],
+ [
+ 'pstore',
+ '/sys/fs/pstore',
+ 'pstore',
+ 'rw,seclabel,nosuid,nodev,noexec,relatime',
+ '0',
+ '0'
+ ],
+ [
+ 'cgroup',
+ '/sys/fs/cgroup/devices',
+ 'cgroup',
+ 'rw,nosuid,nodev,noexec,relatime,devices',
+ '0',
+ '0'
+ ],
+ [
+ 'cgroup',
+ '/sys/fs/cgroup/freezer',
+ 'cgroup',
+ 'rw,nosuid,nodev,noexec,relatime,freezer',
+ '0',
+ '0'
+ ],
+ [
+ 'cgroup',
+ '/sys/fs/cgroup/memory',
+ 'cgroup',
+ 'rw,nosuid,nodev,noexec,relatime,memory',
+ '0',
+ '0'
+ ],
+ [
+ 'cgroup',
+ '/sys/fs/cgroup/pids',
+ 'cgroup',
+ 'rw,nosuid,nodev,noexec,relatime,pids',
+ '0',
+ '0'
+ ],
+ [
+ 'cgroup',
+ '/sys/fs/cgroup/blkio',
+ 'cgroup',
+ 'rw,nosuid,nodev,noexec,relatime,blkio',
+ '0',
+ '0'
+ ],
+ [
+ 'cgroup',
+ '/sys/fs/cgroup/cpuset',
+ 'cgroup',
+ 'rw,nosuid,nodev,noexec,relatime,cpuset',
+ '0',
+ '0'
+ ],
+ [
+ 'cgroup',
+ '/sys/fs/cgroup/cpu,cpuacct',
+ 'cgroup',
+ 'rw,nosuid,nodev,noexec,relatime,cpu,cpuacct',
+ '0',
+ '0'
+ ],
+ [
+ 'cgroup',
+ '/sys/fs/cgroup/hugetlb',
+ 'cgroup',
+ 'rw,nosuid,nodev,noexec,relatime,hugetlb',
+ '0',
+ '0'
+ ],
+ [
+ 'cgroup',
+ '/sys/fs/cgroup/perf_event',
+ 'cgroup',
+ 'rw,nosuid,nodev,noexec,relatime,perf_event',
+ '0',
+ '0'
+ ],
+ [
+ 'cgroup',
+ '/sys/fs/cgroup/net_cls,net_prio',
+ 'cgroup',
+ 'rw,nosuid,nodev,noexec,relatime,net_cls,net_prio',
+ '0',
+ '0'
+ ],
+ ['configfs', '/sys/kernel/config', 'configfs', 'rw,relatime', '0', '0'],
+ [
+ '/dev/mapper/fedora_dhcp129--186-root',
+ '/',
+ 'ext4',
+ 'rw,seclabel,relatime,data=ordered',
+ '0',
+ '0'
+ ],
+ ['selinuxfs', '/sys/fs/selinux', 'selinuxfs', 'rw,relatime', '0', '0'],
+ [
+ 'systemd-1',
+ '/proc/sys/fs/binfmt_misc',
+ 'autofs',
+ 'rw,relatime,fd=24,pgrp=1,timeout=0,minproto=5,maxproto=5,direct',
+ '0',
+ '0'
+ ],
+ ['debugfs', '/sys/kernel/debug', 'debugfs', 'rw,seclabel,relatime', '0', '0'],
+ [
+ 'hugetlbfs',
+ '/dev/hugepages',
+ 'hugetlbfs',
+ 'rw,seclabel,relatime',
+ '0',
+ '0'
+ ],
+ ['tmpfs', '/tmp', 'tmpfs', 'rw,seclabel', '0', '0'],
+ ['mqueue', '/dev/mqueue', 'mqueue', 'rw,seclabel,relatime', '0', '0'],
+ [
+ '/dev/loop0',
+ '/var/lib/machines',
+ 'btrfs',
+ 'rw,seclabel,relatime,space_cache,subvolid=5,subvol=/',
+ '0',
+ '0'
+ ],
+ ['/dev/sda1', '/boot', 'ext4', 'rw,seclabel,relatime,data=ordered', '0', '0'],
+ # A 'none' fstype
+ ['/dev/sdz3', '/not/a/real/device', 'none', 'rw,seclabel,relatime,data=ordered', '0', '0'],
+ # lets assume this is a bindmount
+ ['/dev/sdz4', '/not/a/real/bind_mount', 'ext4', 'rw,seclabel,relatime,data=ordered', '0', '0'],
+ [
+ '/dev/mapper/fedora_dhcp129--186-home',
+ '/home',
+ 'ext4',
+ 'rw,seclabel,relatime,data=ordered',
+ '0',
+ '0'
+ ],
+ [
+ 'tmpfs',
+ '/run/user/1000',
+ 'tmpfs',
+ 'rw,seclabel,nosuid,nodev,relatime,size=1611044k,mode=700,uid=1000,gid=1000',
+ '0',
+ '0'
+ ],
+ [
+ 'gvfsd-fuse',
+ '/run/user/1000/gvfs',
+ 'fuse.gvfsd-fuse',
+ 'rw,nosuid,nodev,relatime,user_id=1000,group_id=1000',
+ '0',
+ '0'
+ ],
+ ['fusectl', '/sys/fs/fuse/connections', 'fusectl', 'rw,relatime', '0', '0'],
+ # Mount path with space in the name
+ # The space is encoded as \040 since the fields in /etc/mtab are space-delimeted
+ ['/dev/sdz9', r'/mnt/foo\040bar', 'ext4', 'rw,relatime', '0', '0'],
+]
+
+BIND_MOUNTS = ['/not/a/real/bind_mount']
+
+with open(os.path.join(os.path.dirname(__file__), 'fixtures/findmount_output.txt')) as f:
+ FINDMNT_OUTPUT = f.read()
+
+
+class TestFactsLinuxHardwareGetMountFacts(unittest.TestCase):
+
+ # FIXME: mock.patch instead
+ def setUp(self):
+ # The @timeout tracebacks if there isn't a GATHER_TIMEOUT is None (the default until get_all_facts sets it via global)
+ facts.GATHER_TIMEOUT = 10
+
+ def tearDown(self):
+ facts.GATHER_TIMEOUT = None
+
+ # The Hardware subclasses freakout if instaniated directly, so
+ # mock platform.system and inst Hardware() so we get a LinuxHardware()
+ # we can test.
+ @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._mtab_entries', return_value=MTAB_ENTRIES)
+ @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._find_bind_mounts', return_value=BIND_MOUNTS)
+ @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._lsblk_uuid', return_value=LSBLK_UUIDS)
+ @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._udevadm_uuid', return_value=UDEVADM_UUID)
+ def test_get_mount_facts(self,
+ mock_lsblk_uuid,
+ mock_find_bind_mounts,
+ mock_mtab_entries,
+ mock_udevadm_uuid):
+ module = Mock()
+ # Returns a LinuxHardware-ish
+ lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
+
+ # Nothing returned, just self.facts modified as a side effect
+ mount_facts = lh.get_mount_facts()
+ self.assertIsInstance(mount_facts, dict)
+ self.assertIn('mounts', mount_facts)
+ self.assertIsInstance(mount_facts['mounts'], list)
+ self.assertIsInstance(mount_facts['mounts'][0], dict)
+
+ # Find mounts with space in the mountpoint path
+ mounts_with_space = [x for x in mount_facts['mounts'] if ' ' in x['mount']]
+ self.assertEqual(len(mounts_with_space), 1)
+ self.assertEqual(mounts_with_space[0]['mount'], '/mnt/foo bar')
+
+ @patch('ansible.module_utils.facts.hardware.linux.get_file_content', return_value=MTAB)
+ def test_get_mtab_entries(self, mock_get_file_content):
+
+ module = Mock()
+ lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
+ mtab_entries = lh._mtab_entries()
+ self.assertIsInstance(mtab_entries, list)
+ self.assertIsInstance(mtab_entries[0], list)
+ self.assertEqual(len(mtab_entries), 38)
+
+ @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_findmnt', return_value=(0, FINDMNT_OUTPUT, ''))
+ def test_find_bind_mounts(self, mock_run_findmnt):
+ module = Mock()
+ lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
+ bind_mounts = lh._find_bind_mounts()
+
+ # If bind_mounts becomes another seq type, feel free to change
+ self.assertIsInstance(bind_mounts, set)
+ self.assertEqual(len(bind_mounts), 1)
+ self.assertIn('/not/a/real/bind_mount', bind_mounts)
+
+ @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_findmnt', return_value=(37, '', ''))
+ def test_find_bind_mounts_non_zero(self, mock_run_findmnt):
+ module = Mock()
+ lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
+ bind_mounts = lh._find_bind_mounts()
+
+ self.assertIsInstance(bind_mounts, set)
+ self.assertEqual(len(bind_mounts), 0)
+
+ def test_find_bind_mounts_no_findmnts(self):
+ module = Mock()
+ module.get_bin_path = Mock(return_value=None)
+ lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
+ bind_mounts = lh._find_bind_mounts()
+
+ self.assertIsInstance(bind_mounts, set)
+ self.assertEqual(len(bind_mounts), 0)
+
+ @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_lsblk', return_value=(0, LSBLK_OUTPUT, ''))
+ def test_lsblk_uuid(self, mock_run_lsblk):
+ module = Mock()
+ lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
+ lsblk_uuids = lh._lsblk_uuid()
+
+ self.assertIsInstance(lsblk_uuids, dict)
+ self.assertIn(b'/dev/loop9', lsblk_uuids)
+ self.assertIn(b'/dev/sda1', lsblk_uuids)
+ self.assertEqual(lsblk_uuids[b'/dev/sda1'], b'32caaec3-ef40-4691-a3b6-438c3f9bc1c0')
+
+ @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_lsblk', return_value=(37, LSBLK_OUTPUT, ''))
+ def test_lsblk_uuid_non_zero(self, mock_run_lsblk):
+ module = Mock()
+ lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
+ lsblk_uuids = lh._lsblk_uuid()
+
+ self.assertIsInstance(lsblk_uuids, dict)
+ self.assertEqual(len(lsblk_uuids), 0)
+
+ def test_lsblk_uuid_no_lsblk(self):
+ module = Mock()
+ module.get_bin_path = Mock(return_value=None)
+ lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
+ lsblk_uuids = lh._lsblk_uuid()
+
+ self.assertIsInstance(lsblk_uuids, dict)
+ self.assertEqual(len(lsblk_uuids), 0)
+
+ @patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_lsblk', return_value=(0, LSBLK_OUTPUT_2, ''))
+ def test_lsblk_uuid_dev_with_space_in_name(self, mock_run_lsblk):
+ module = Mock()
+ lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
+ lsblk_uuids = lh._lsblk_uuid()
+ self.assertIsInstance(lsblk_uuids, dict)
+ self.assertIn(b'/dev/loop0', lsblk_uuids)
+ self.assertIn(b'/dev/sda1', lsblk_uuids)
+ self.assertEqual(lsblk_uuids[b'/dev/mapper/an-example-mapper with a space in the name'], b'84639acb-013f-4d2f-9392-526a572b4373')
+ self.assertEqual(lsblk_uuids[b'/dev/sda1'], b'32caaec3-ef40-4691-a3b6-438c3f9bc1c0')
diff --git a/test/units/module_utils/facts/test_timeout.py b/test/units/module_utils/facts/test_timeout.py
new file mode 100644
index 00000000..2adbc4a6
--- /dev/null
+++ b/test/units/module_utils/facts/test_timeout.py
@@ -0,0 +1,171 @@
+# -*- coding: utf-8 -*-
+# (c) 2017, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+import time
+
+import pytest
+
+from ansible.module_utils.facts import timeout
+
+
+@pytest.fixture
+def set_gather_timeout_higher():
+ default_timeout = timeout.GATHER_TIMEOUT
+ timeout.GATHER_TIMEOUT = 5
+ yield
+ timeout.GATHER_TIMEOUT = default_timeout
+
+
+@pytest.fixture
+def set_gather_timeout_lower():
+ default_timeout = timeout.GATHER_TIMEOUT
+ timeout.GATHER_TIMEOUT = 2
+ yield
+ timeout.GATHER_TIMEOUT = default_timeout
+
+
+@timeout.timeout
+def sleep_amount_implicit(amount):
+ # implicit refers to the lack of argument to the decorator
+ time.sleep(amount)
+ return 'Succeeded after {0} sec'.format(amount)
+
+
+@timeout.timeout(timeout.DEFAULT_GATHER_TIMEOUT + 5)
+def sleep_amount_explicit_higher(amount):
+ # explicit refers to the argument to the decorator
+ time.sleep(amount)
+ return 'Succeeded after {0} sec'.format(amount)
+
+
+@timeout.timeout(2)
+def sleep_amount_explicit_lower(amount):
+ # explicit refers to the argument to the decorator
+ time.sleep(amount)
+ return 'Succeeded after {0} sec'.format(amount)
+
+
+#
+# Tests for how the timeout decorator is specified
+#
+
+def test_defaults_still_within_bounds():
+ # If the default changes outside of these bounds, some of the tests will
+ # no longer test the right thing. Need to review and update the timeouts
+ # in the other tests if this fails
+ assert timeout.DEFAULT_GATHER_TIMEOUT >= 4
+
+
+def test_implicit_file_default_succeeds():
+ # amount checked must be less than DEFAULT_GATHER_TIMEOUT
+ assert sleep_amount_implicit(1) == 'Succeeded after 1 sec'
+
+
+def test_implicit_file_default_timesout(monkeypatch):
+ monkeypatch.setattr(timeout, 'DEFAULT_GATHER_TIMEOUT', 1)
+ # sleep_time is greater than the default
+ sleep_time = timeout.DEFAULT_GATHER_TIMEOUT + 1
+ with pytest.raises(timeout.TimeoutError):
+ assert sleep_amount_implicit(sleep_time) == '(Not expected to succeed)'
+
+
+def test_implicit_file_overridden_succeeds(set_gather_timeout_higher):
+ # Set sleep_time greater than the default timeout and less than our new timeout
+ sleep_time = 3
+ assert sleep_amount_implicit(sleep_time) == 'Succeeded after {0} sec'.format(sleep_time)
+
+
+def test_implicit_file_overridden_timesout(set_gather_timeout_lower):
+ # Set sleep_time greater than our new timeout but less than the default
+ sleep_time = 3
+ with pytest.raises(timeout.TimeoutError):
+ assert sleep_amount_implicit(sleep_time) == '(Not expected to Succeed)'
+
+
+def test_explicit_succeeds(monkeypatch):
+ monkeypatch.setattr(timeout, 'DEFAULT_GATHER_TIMEOUT', 1)
+ # Set sleep_time greater than the default timeout and less than our new timeout
+ sleep_time = 2
+ assert sleep_amount_explicit_higher(sleep_time) == 'Succeeded after {0} sec'.format(sleep_time)
+
+
+def test_explicit_timeout():
+ # Set sleep_time greater than our new timeout but less than the default
+ sleep_time = 3
+ with pytest.raises(timeout.TimeoutError):
+ assert sleep_amount_explicit_lower(sleep_time) == '(Not expected to succeed)'
+
+
+#
+# Test that exception handling works
+#
+
+@timeout.timeout(1)
+def function_times_out():
+ time.sleep(2)
+
+
+# This is just about the same test as function_times_out but uses a separate process which is where
+# we normally have our timeouts. It's more of an integration test than a unit test.
+@timeout.timeout(1)
+def function_times_out_in_run_command(am):
+ am.run_command([sys.executable, '-c', 'import time ; time.sleep(2)'])
+
+
+@timeout.timeout(1)
+def function_other_timeout():
+ raise TimeoutError('Vanilla Timeout')
+
+
+@timeout.timeout(1)
+def function_raises():
+ 1 / 0
+
+
+@timeout.timeout(1)
+def function_catches_all_exceptions():
+ try:
+ time.sleep(10)
+ except BaseException:
+ raise RuntimeError('We should not have gotten here')
+
+
+def test_timeout_raises_timeout():
+ with pytest.raises(timeout.TimeoutError):
+ assert function_times_out() == '(Not expected to succeed)'
+
+
+@pytest.mark.parametrize('stdin', ({},), indirect=['stdin'])
+def test_timeout_raises_timeout_integration_test(am):
+ with pytest.raises(timeout.TimeoutError):
+ assert function_times_out_in_run_command(am) == '(Not expected to succeed)'
+
+
+def test_timeout_raises_other_exception():
+ with pytest.raises(ZeroDivisionError):
+ assert function_raises() == '(Not expected to succeed)'
+
+
+def test_exception_not_caught_by_called_code():
+ with pytest.raises(timeout.TimeoutError):
+ assert function_catches_all_exceptions() == '(Not expected to succeed)'
diff --git a/test/units/module_utils/facts/test_utils.py b/test/units/module_utils/facts/test_utils.py
new file mode 100644
index 00000000..28cb5d31
--- /dev/null
+++ b/test/units/module_utils/facts/test_utils.py
@@ -0,0 +1,39 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest
+from units.compat.mock import patch
+
+from ansible.module_utils.facts import utils
+
+
+class TestGetMountSize(unittest.TestCase):
+ def test(self):
+ mount_info = utils.get_mount_size('/dev/null/not/a/real/mountpoint')
+ self.assertIsInstance(mount_info, dict)
+
+ def test_proc(self):
+ mount_info = utils.get_mount_size('/proc')
+ self.assertIsInstance(mount_info, dict)
+
+ @patch('ansible.module_utils.facts.utils.os.statvfs', side_effect=OSError('intentionally induced os error'))
+ def test_oserror_on_statvfs(self, mock_statvfs):
+ mount_info = utils.get_mount_size('/dev/null/doesnt/matter')
+ self.assertIsInstance(mount_info, dict)
+ self.assertDictEqual(mount_info, {})
diff --git a/test/units/module_utils/json_utils/__init__.py b/test/units/module_utils/json_utils/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/module_utils/json_utils/__init__.py
diff --git a/test/units/module_utils/json_utils/test_filter_non_json_lines.py b/test/units/module_utils/json_utils/test_filter_non_json_lines.py
new file mode 100644
index 00000000..b5b94999
--- /dev/null
+++ b/test/units/module_utils/json_utils/test_filter_non_json_lines.py
@@ -0,0 +1,88 @@
+# -*- coding: utf-8 -*-
+# (c) 2016, Matt Davis <mdavis@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest
+from ansible.module_utils.json_utils import _filter_non_json_lines
+
+
+class TestAnsibleModuleExitJson(unittest.TestCase):
+ single_line_json_dict = u"""{"key": "value", "olá": "mundo"}"""
+ single_line_json_array = u"""["a","b","c"]"""
+ multi_line_json_dict = u"""{
+"key":"value"
+}"""
+ multi_line_json_array = u"""[
+"a",
+"b",
+"c"]"""
+
+ all_inputs = [
+ single_line_json_dict,
+ single_line_json_array,
+ multi_line_json_dict,
+ multi_line_json_array
+ ]
+
+ junk = [u"single line of junk", u"line 1/2 of junk\nline 2/2 of junk"]
+
+ unparsable_cases = (
+ u'No json here',
+ u'"olá": "mundo"',
+ u'{"No json": "ending"',
+ u'{"wrong": "ending"]',
+ u'["wrong": "ending"}',
+ )
+
+ def test_just_json(self):
+ for i in self.all_inputs:
+ filtered, warnings = _filter_non_json_lines(i)
+ self.assertEqual(filtered, i)
+ self.assertEqual(warnings, [])
+
+ def test_leading_junk(self):
+ for i in self.all_inputs:
+ for j in self.junk:
+ filtered, warnings = _filter_non_json_lines(j + "\n" + i)
+ self.assertEqual(filtered, i)
+ self.assertEqual(warnings, [])
+
+ def test_trailing_junk(self):
+ for i in self.all_inputs:
+ for j in self.junk:
+ filtered, warnings = _filter_non_json_lines(i + "\n" + j)
+ self.assertEqual(filtered, i)
+ self.assertEqual(warnings, [u"Module invocation had junk after the JSON data: %s" % j.strip()])
+
+ def test_leading_and_trailing_junk(self):
+ for i in self.all_inputs:
+ for j in self.junk:
+ filtered, warnings = _filter_non_json_lines("\n".join([j, i, j]))
+ self.assertEqual(filtered, i)
+ self.assertEqual(warnings, [u"Module invocation had junk after the JSON data: %s" % j.strip()])
+
+ def test_unparsable_filter_non_json_lines(self):
+ for i in self.unparsable_cases:
+ self.assertRaises(
+ ValueError,
+ _filter_non_json_lines,
+ data=i
+ )
diff --git a/test/units/module_utils/parsing/test_convert_bool.py b/test/units/module_utils/parsing/test_convert_bool.py
new file mode 100644
index 00000000..2c5f8121
--- /dev/null
+++ b/test/units/module_utils/parsing/test_convert_bool.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017 Ansible Project
+# License: GNU General Public License v3 or later (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt )
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible.module_utils.parsing.convert_bool import boolean
+
+
+class TestBoolean:
+ def test_bools(self):
+ assert boolean(True) is True
+ assert boolean(False) is False
+
+ def test_none(self):
+ with pytest.raises(TypeError):
+ assert boolean(None, strict=True) is False
+ assert boolean(None, strict=False) is False
+
+ def test_numbers(self):
+ assert boolean(1) is True
+ assert boolean(0) is False
+ assert boolean(0.0) is False
+
+# Current boolean() doesn't consider these to be true values
+# def test_other_numbers(self):
+# assert boolean(2) is True
+# assert boolean(-1) is True
+# assert boolean(0.1) is True
+
+ def test_strings(self):
+ assert boolean("true") is True
+ assert boolean("TRUE") is True
+ assert boolean("t") is True
+ assert boolean("yes") is True
+ assert boolean("y") is True
+ assert boolean("on") is True
+
+ def test_junk_values_nonstrict(self):
+ assert boolean("flibbity", strict=False) is False
+ assert boolean(42, strict=False) is False
+ assert boolean(42.0, strict=False) is False
+ assert boolean(object(), strict=False) is False
+
+ def test_junk_values_strict(self):
+ with pytest.raises(TypeError):
+ assert boolean("flibbity", strict=True) is False
+
+ with pytest.raises(TypeError):
+ assert boolean(42, strict=True) is False
+
+ with pytest.raises(TypeError):
+ assert boolean(42.0, strict=True) is False
+
+ with pytest.raises(TypeError):
+ assert boolean(object(), strict=True) is False
diff --git a/test/units/module_utils/test_api.py b/test/units/module_utils/test_api.py
new file mode 100644
index 00000000..0eaea046
--- /dev/null
+++ b/test/units/module_utils/test_api.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020, Abhijeet Kasurde <akasurde@redhat.com>
+# Copyright: (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible.module_utils.api import rate_limit, retry
+
+import pytest
+
+
+class TestRateLimit:
+
+ def test_ratelimit(self):
+ @rate_limit(rate=1, rate_limit=1)
+ def login_database():
+ return "success"
+ r = login_database()
+
+ assert r == 'success'
+
+
+class TestRetry:
+
+ def test_no_retry_required(self):
+ self.counter = 0
+
+ @retry(retries=4, retry_pause=2)
+ def login_database():
+ self.counter += 1
+ return 'success'
+
+ r = login_database()
+
+ assert r == 'success'
+ assert self.counter == 1
+
+ def test_catch_exception(self):
+
+ @retry(retries=1)
+ def login_database():
+ return 'success'
+
+ with pytest.raises(Exception):
+ login_database()
diff --git a/test/units/module_utils/test_distro.py b/test/units/module_utils/test_distro.py
new file mode 100644
index 00000000..708e7bca
--- /dev/null
+++ b/test/units/module_utils/test_distro.py
@@ -0,0 +1,38 @@
+
+# (c) 2018 Adrian Likins <alikins@redhat.com>
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# or
+# Apache License v2.0 (see http://www.apache.org/licenses/LICENSE-2.0)
+#
+# Dual licensed so any test cases could potentially be included by the upstream project
+# that module_utils/distro.py is from (https://github.com/nir0s/distro)
+
+
+# Note that nir0s/distro has many more tests in it's test suite. The tests here are
+# primarily for testing the vendoring.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils import distro
+from ansible.module_utils.six import string_types
+
+
+# Generic test case with minimal assertions about specific returned values.
+class TestDistro():
+ # should run on any platform without errors, even if non-linux without any
+ # useful info to return
+ def test_info(self):
+ info = distro.info()
+ assert isinstance(info, dict), \
+ 'distro.info() returned %s (%s) which is not a dist' % (info, type(info))
+
+ def test_linux_distribution(self):
+ linux_dist = distro.linux_distribution()
+ assert isinstance(linux_dist, tuple), \
+ 'linux_distrution() returned %s (%s) which is not a tuple' % (linux_dist, type(linux_dist))
+
+ def test_id(self):
+ id = distro.id()
+ assert isinstance(id, string_types), 'distro.id() returned %s (%s) which is not a string' % (id, type(id))
diff --git a/test/units/module_utils/urls/__init__.py b/test/units/module_utils/urls/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/module_utils/urls/__init__.py
diff --git a/test/units/module_utils/urls/fixtures/client.key b/test/units/module_utils/urls/fixtures/client.key
new file mode 100644
index 00000000..0e90d95d
--- /dev/null
+++ b/test/units/module_utils/urls/fixtures/client.key
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDTyiVxrsSyZ+Qr
+iMT6sFYCqQtkLqlIWfbpTg9B6fZc793uoMzLUGq3efiZUhhxI78dQ3gNPgs1sK3W
+heFpk1n4IL8ll1MS1uJKk2vYqzZVhjgcvQpeV9gm7bt0ndPzGj5h4fh7proPntSy
+eBvMKVoqTT7tEnapRKy3anbwRPgTt7B5jEvJkPazuIc+ooMsYOHWfvj4oVsev0N2
+SsP0o6cHcsRujFMhz/JTJ1STQxacaVuyKpXacX7Eu1MJgGt/jU/QKNREcV9LdneO
+NgqY9tNv0h+9s7DfHYXm8U3POr+bdcW6Yy4791KGCaUNtiNqT1lvu/4yd4WRkXbF
+Fm5hJUUpAgMBAAECggEBAJYOac1MSK0nEvENbJM6ERa9cwa+UM6kf176IbFP9XAP
+u6zxXWjIR3RMBSmMkyjGbQhs30hypzqZPfH61aUZ8+rsOMKHnyKAAcFZBlZzqIGc
+IXGrNwd1Mf8S/Xg4ww1BkOWFV6s0jCu5G3Z/xyI2Ql4qcOVD6bMwpzclRbQjCand
+dvqyCdMD0sRDyeOIK5hBhUY60JnWbMCu6pBU+qPoRukbRieaeDLIN1clwEqIQV78
+LLnv4n9fuGozH0JdHHfyXFytCgIJvEspZUja/5R4orADhr3ZB010RLzYvs2ndE3B
+4cF9RgxspJZeJ/P+PglViZuzj37pXy+7GAcJLR9ka4kCgYEA/l01XKwkCzMgXHW4
+UPgl1+on42BsN7T9r3S5tihOjHf4ZJWkgYzisLVX+Nc1oUI3HQfM9PDJZXMMNm7J
+ZRvERcopU26wWqr6CFPblGv8oqXHqcpeta8i3xZKoPASsTW6ssuPCEajiLZbQ1rH
+H/HP+OZIVLM/WCPgA2BckTU9JnsCgYEA1SbXllXnlwGqmjitmY1Z07rUxQ3ah/fB
+iccbbg3E4onontYXIlI5zQms3u+qBdi0ZuwaDm5Y4BetOq0a3UyxAsugqVFnzTba
+1w/sFb3fw9KeQ/il4CXkbq87nzJfDmEyqHGCCYXbijHBxnq99PkqwVpaAhHHEW0m
+vWyMUvPRY6sCgYAbtUWR0cKfYbNdvwkT8OQWcBBmSWOgcdvMmBd+y0c7L/pj4pUn
+85PiEe8CUVcrOM5OIEJoUC5wGacz6r+PfwXTYGE+EGmvhr5z18aslVLQ2OQ2D7Bf
+dDOFP6VjgKNYoHS0802iZid8RfkNDj9wsGOqRlOMvnXhAQ9u7rlGrBj8LwKBgFfo
+ph99nH8eE9N5LrfWoUZ+loQS258aInsFYB26lgnsYMEpgO8JxIb4x5BGffPdVUHh
+fDmZbxQ1D5/UhvDgUVzayI8sYMg1KHpsOa0Z2zCzK8zSvu68EgNISCm3J5cRpUft
+UHlG+K19KfMG6lMfdG+8KMUTuetI/iI/o3wOzLvzAoGAIrOh30rHt8wit7ELARyx
+wPkp2ARYXrKfX3NES4c67zSAi+3dCjxRqywqTI0gLicyMlj8zEu9YE9Ix/rl8lRZ
+nQ9LZmqv7QHzhLTUCPGgZYnemvBzo7r0eW8Oag52dbcJO6FBszfWrxskm/fX25Rb
+WPxih2vdRy814dNPW25rgdw=
+-----END PRIVATE KEY-----
diff --git a/test/units/module_utils/urls/fixtures/client.pem b/test/units/module_utils/urls/fixtures/client.pem
new file mode 100644
index 00000000..c8c7b828
--- /dev/null
+++ b/test/units/module_utils/urls/fixtures/client.pem
@@ -0,0 +1,81 @@
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number: 4099 (0x1003)
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: C=US, ST=North Carolina, L=Durham, O=Ansible, CN=ansible.http.tests
+ Validity
+ Not Before: Mar 21 18:22:47 2018 GMT
+ Not After : Mar 18 18:22:47 2028 GMT
+ Subject: C=US, ST=North Carolina, O=Ansible, CN=client.ansible.http.tests
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ Public-Key: (2048 bit)
+ Modulus:
+ 00:d3:ca:25:71:ae:c4:b2:67:e4:2b:88:c4:fa:b0:
+ 56:02:a9:0b:64:2e:a9:48:59:f6:e9:4e:0f:41:e9:
+ f6:5c:ef:dd:ee:a0:cc:cb:50:6a:b7:79:f8:99:52:
+ 18:71:23:bf:1d:43:78:0d:3e:0b:35:b0:ad:d6:85:
+ e1:69:93:59:f8:20:bf:25:97:53:12:d6:e2:4a:93:
+ 6b:d8:ab:36:55:86:38:1c:bd:0a:5e:57:d8:26:ed:
+ bb:74:9d:d3:f3:1a:3e:61:e1:f8:7b:a6:ba:0f:9e:
+ d4:b2:78:1b:cc:29:5a:2a:4d:3e:ed:12:76:a9:44:
+ ac:b7:6a:76:f0:44:f8:13:b7:b0:79:8c:4b:c9:90:
+ f6:b3:b8:87:3e:a2:83:2c:60:e1:d6:7e:f8:f8:a1:
+ 5b:1e:bf:43:76:4a:c3:f4:a3:a7:07:72:c4:6e:8c:
+ 53:21:cf:f2:53:27:54:93:43:16:9c:69:5b:b2:2a:
+ 95:da:71:7e:c4:bb:53:09:80:6b:7f:8d:4f:d0:28:
+ d4:44:71:5f:4b:76:77:8e:36:0a:98:f6:d3:6f:d2:
+ 1f:bd:b3:b0:df:1d:85:e6:f1:4d:cf:3a:bf:9b:75:
+ c5:ba:63:2e:3b:f7:52:86:09:a5:0d:b6:23:6a:4f:
+ 59:6f:bb:fe:32:77:85:91:91:76:c5:16:6e:61:25:
+ 45:29
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ X509v3 Basic Constraints:
+ CA:FALSE
+ Netscape Comment:
+ OpenSSL Generated Certificate
+ X509v3 Subject Key Identifier:
+ AF:F3:E5:2A:EB:CF:C7:7E:A4:D6:49:92:F9:29:EE:6A:1B:68:AB:0F
+ X509v3 Authority Key Identifier:
+ keyid:13:2E:30:F0:04:EA:41:5F:B7:08:BD:34:31:D7:11:EA:56:A6:99:F0
+
+ Signature Algorithm: sha256WithRSAEncryption
+ 29:62:39:25:79:58:eb:a4:b3:0c:ea:aa:1d:2b:96:7c:6e:10:
+ ce:16:07:b7:70:7f:16:da:fd:20:e6:a2:d9:b4:88:e0:f9:84:
+ 87:f8:b0:0d:77:8b:ae:27:f5:ee:e6:4f:86:a1:2d:74:07:7c:
+ c7:5d:c2:bd:e4:70:e7:42:e4:14:ee:b9:b7:63:b8:8c:6d:21:
+ 61:56:0b:96:f6:15:ba:7a:ae:80:98:ac:57:99:79:3d:7a:a9:
+ d8:26:93:30:17:53:7c:2d:02:4b:64:49:25:65:e7:69:5a:08:
+ cf:84:94:8e:6a:42:a7:d1:4f:ba:39:4b:7c:11:67:31:f7:1b:
+ 2b:cd:79:c2:28:4d:d9:88:66:d6:7f:56:4c:4b:37:d1:3d:a8:
+ d9:4a:6b:45:1d:4d:a7:12:9f:29:77:6a:55:c1:b5:1d:0e:a5:
+ b9:4f:38:16:3c:7d:85:ae:ff:23:34:c7:2c:f6:14:0f:55:ef:
+ b8:00:89:f1:b2:8a:75:15:41:81:72:d0:43:a6:86:d1:06:e6:
+ ce:81:7e:5f:33:e6:f4:19:d6:70:00:ba:48:6e:05:fd:4c:3c:
+ c3:51:1b:bd:43:1a:24:c5:79:ea:7a:f0:85:a5:40:10:85:e9:
+ 23:09:09:80:38:9d:bc:81:5e:59:8c:5a:4d:58:56:b9:71:c2:
+ 78:cd:f3:b0
+-----BEGIN CERTIFICATE-----
+MIIDuTCCAqGgAwIBAgICEAMwDQYJKoZIhvcNAQELBQAwZjELMAkGA1UEBhMCVVMx
+FzAVBgNVBAgMDk5vcnRoIENhcm9saW5hMQ8wDQYDVQQHDAZEdXJoYW0xEDAOBgNV
+BAoMB0Fuc2libGUxGzAZBgNVBAMMEmFuc2libGUuaHR0cC50ZXN0czAeFw0xODAz
+MjExODIyNDdaFw0yODAzMTgxODIyNDdaMFwxCzAJBgNVBAYTAlVTMRcwFQYDVQQI
+DA5Ob3J0aCBDYXJvbGluYTEQMA4GA1UECgwHQW5zaWJsZTEiMCAGA1UEAwwZY2xp
+ZW50LmFuc2libGUuaHR0cC50ZXN0czCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
+AQoCggEBANPKJXGuxLJn5CuIxPqwVgKpC2QuqUhZ9ulOD0Hp9lzv3e6gzMtQard5
++JlSGHEjvx1DeA0+CzWwrdaF4WmTWfggvyWXUxLW4kqTa9irNlWGOBy9Cl5X2Cbt
+u3Sd0/MaPmHh+Humug+e1LJ4G8wpWipNPu0SdqlErLdqdvBE+BO3sHmMS8mQ9rO4
+hz6igyxg4dZ++PihWx6/Q3ZKw/SjpwdyxG6MUyHP8lMnVJNDFpxpW7IqldpxfsS7
+UwmAa3+NT9Ao1ERxX0t2d442Cpj202/SH72zsN8dhebxTc86v5t1xbpjLjv3UoYJ
+pQ22I2pPWW+7/jJ3hZGRdsUWbmElRSkCAwEAAaN7MHkwCQYDVR0TBAIwADAsBglg
+hkgBhvhCAQ0EHxYdT3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwHQYDVR0O
+BBYEFK/z5Srrz8d+pNZJkvkp7mobaKsPMB8GA1UdIwQYMBaAFBMuMPAE6kFftwi9
+NDHXEepWppnwMA0GCSqGSIb3DQEBCwUAA4IBAQApYjkleVjrpLMM6qodK5Z8bhDO
+Fge3cH8W2v0g5qLZtIjg+YSH+LANd4uuJ/Xu5k+GoS10B3zHXcK95HDnQuQU7rm3
+Y7iMbSFhVguW9hW6eq6AmKxXmXk9eqnYJpMwF1N8LQJLZEklZedpWgjPhJSOakKn
+0U+6OUt8EWcx9xsrzXnCKE3ZiGbWf1ZMSzfRPajZSmtFHU2nEp8pd2pVwbUdDqW5
+TzgWPH2Frv8jNMcs9hQPVe+4AInxsop1FUGBctBDpobRBubOgX5fM+b0GdZwALpI
+bgX9TDzDURu9QxokxXnqevCFpUAQhekjCQmAOJ28gV5ZjFpNWFa5ccJ4zfOw
+-----END CERTIFICATE-----
diff --git a/test/units/module_utils/urls/fixtures/client.txt b/test/units/module_utils/urls/fixtures/client.txt
new file mode 100644
index 00000000..380330f2
--- /dev/null
+++ b/test/units/module_utils/urls/fixtures/client.txt
@@ -0,0 +1,3 @@
+client.pem and client.key were retrieved from httptester docker image:
+
+ansible/ansible@sha256:fa5def8c294fc50813af131c0b5737594d852abac9cbe7ba38e17bf1c8476f3f
diff --git a/test/units/module_utils/urls/fixtures/multipart.txt b/test/units/module_utils/urls/fixtures/multipart.txt
new file mode 100644
index 00000000..1a4a0661
--- /dev/null
+++ b/test/units/module_utils/urls/fixtures/multipart.txt
@@ -0,0 +1,166 @@
+--===============3996062709511591449==
+Content-Type: text/plain
+Content-Disposition: form-data; name="file1"; filename="fake_file1.txt"
+
+file_content_1
+--===============3996062709511591449==
+Content-Type: text/html
+Content-Disposition: form-data; name="file2"; filename="fake_file2.html"
+
+<html></html>
+--===============3996062709511591449==
+Content-Type: application/json
+Content-Disposition: form-data; name="file3"; filename="fake_file3.json"
+
+{"foo": "bar"}
+--===============3996062709511591449==
+Content-Transfer-Encoding: base64
+Content-Type: text/plain
+Content-Disposition: form-data; name="file4"; filename="client.pem"
+
+Q2VydGlmaWNhdGU6CiAgICBEYXRhOgogICAgICAgIFZlcnNpb246IDMgKDB4MikKICAgICAgICBT
+ZXJpYWwgTnVtYmVyOiA0MDk5ICgweDEwMDMpCiAgICBTaWduYXR1cmUgQWxnb3JpdGhtOiBzaGEy
+NTZXaXRoUlNBRW5jcnlwdGlvbgogICAgICAgIElzc3VlcjogQz1VUywgU1Q9Tm9ydGggQ2Fyb2xp
+bmEsIEw9RHVyaGFtLCBPPUFuc2libGUsIENOPWFuc2libGUuaHR0cC50ZXN0cwogICAgICAgIFZh
+bGlkaXR5CiAgICAgICAgICAgIE5vdCBCZWZvcmU6IE1hciAyMSAxODoyMjo0NyAyMDE4IEdNVAog
+ICAgICAgICAgICBOb3QgQWZ0ZXIgOiBNYXIgMTggMTg6MjI6NDcgMjAyOCBHTVQKICAgICAgICBT
+dWJqZWN0OiBDPVVTLCBTVD1Ob3J0aCBDYXJvbGluYSwgTz1BbnNpYmxlLCBDTj1jbGllbnQuYW5z
+aWJsZS5odHRwLnRlc3RzCiAgICAgICAgU3ViamVjdCBQdWJsaWMgS2V5IEluZm86CiAgICAgICAg
+ICAgIFB1YmxpYyBLZXkgQWxnb3JpdGhtOiByc2FFbmNyeXB0aW9uCiAgICAgICAgICAgICAgICBQ
+dWJsaWMtS2V5OiAoMjA0OCBiaXQpCiAgICAgICAgICAgICAgICBNb2R1bHVzOgogICAgICAgICAg
+ICAgICAgICAgIDAwOmQzOmNhOjI1OjcxOmFlOmM0OmIyOjY3OmU0OjJiOjg4OmM0OmZhOmIwOgog
+ICAgICAgICAgICAgICAgICAgIDU2OjAyOmE5OjBiOjY0OjJlOmE5OjQ4OjU5OmY2OmU5OjRlOjBm
+OjQxOmU5OgogICAgICAgICAgICAgICAgICAgIGY2OjVjOmVmOmRkOmVlOmEwOmNjOmNiOjUwOjZh
+OmI3Ojc5OmY4Ojk5OjUyOgogICAgICAgICAgICAgICAgICAgIDE4OjcxOjIzOmJmOjFkOjQzOjc4
+OjBkOjNlOjBiOjM1OmIwOmFkOmQ2Ojg1OgogICAgICAgICAgICAgICAgICAgIGUxOjY5OjkzOjU5
+OmY4OjIwOmJmOjI1Ojk3OjUzOjEyOmQ2OmUyOjRhOjkzOgogICAgICAgICAgICAgICAgICAgIDZi
+OmQ4OmFiOjM2OjU1Ojg2OjM4OjFjOmJkOjBhOjVlOjU3OmQ4OjI2OmVkOgogICAgICAgICAgICAg
+ICAgICAgIGJiOjc0OjlkOmQzOmYzOjFhOjNlOjYxOmUxOmY4OjdiOmE2OmJhOjBmOjllOgogICAg
+ICAgICAgICAgICAgICAgIGQ0OmIyOjc4OjFiOmNjOjI5OjVhOjJhOjRkOjNlOmVkOjEyOjc2OmE5
+OjQ0OgogICAgICAgICAgICAgICAgICAgIGFjOmI3OjZhOjc2OmYwOjQ0OmY4OjEzOmI3OmIwOjc5
+OjhjOjRiOmM5OjkwOgogICAgICAgICAgICAgICAgICAgIGY2OmIzOmI4Ojg3OjNlOmEyOjgzOjJj
+OjYwOmUxOmQ2OjdlOmY4OmY4OmExOgogICAgICAgICAgICAgICAgICAgIDViOjFlOmJmOjQzOjc2
+OjRhOmMzOmY0OmEzOmE3OjA3OjcyOmM0OjZlOjhjOgogICAgICAgICAgICAgICAgICAgIDUzOjIx
+OmNmOmYyOjUzOjI3OjU0OjkzOjQzOjE2OjljOjY5OjViOmIyOjJhOgogICAgICAgICAgICAgICAg
+ICAgIDk1OmRhOjcxOjdlOmM0OmJiOjUzOjA5OjgwOjZiOjdmOjhkOjRmOmQwOjI4OgogICAgICAg
+ICAgICAgICAgICAgIGQ0OjQ0OjcxOjVmOjRiOjc2Ojc3OjhlOjM2OjBhOjk4OmY2OmQzOjZmOmQy
+OgogICAgICAgICAgICAgICAgICAgIDFmOmJkOmIzOmIwOmRmOjFkOjg1OmU2OmYxOjRkOmNmOjNh
+OmJmOjliOjc1OgogICAgICAgICAgICAgICAgICAgIGM1OmJhOjYzOjJlOjNiOmY3OjUyOjg2OjA5
+OmE1OjBkOmI2OjIzOjZhOjRmOgogICAgICAgICAgICAgICAgICAgIDU5OjZmOmJiOmZlOjMyOjc3
+Ojg1OjkxOjkxOjc2OmM1OjE2OjZlOjYxOjI1OgogICAgICAgICAgICAgICAgICAgIDQ1OjI5CiAg
+ICAgICAgICAgICAgICBFeHBvbmVudDogNjU1MzcgKDB4MTAwMDEpCiAgICAgICAgWDUwOXYzIGV4
+dGVuc2lvbnM6CiAgICAgICAgICAgIFg1MDl2MyBCYXNpYyBDb25zdHJhaW50czogCiAgICAgICAg
+ICAgICAgICBDQTpGQUxTRQogICAgICAgICAgICBOZXRzY2FwZSBDb21tZW50OiAKICAgICAgICAg
+ICAgICAgIE9wZW5TU0wgR2VuZXJhdGVkIENlcnRpZmljYXRlCiAgICAgICAgICAgIFg1MDl2MyBT
+dWJqZWN0IEtleSBJZGVudGlmaWVyOiAKICAgICAgICAgICAgICAgIEFGOkYzOkU1OjJBOkVCOkNG
+OkM3OjdFOkE0OkQ2OjQ5OjkyOkY5OjI5OkVFOjZBOjFCOjY4OkFCOjBGCiAgICAgICAgICAgIFg1
+MDl2MyBBdXRob3JpdHkgS2V5IElkZW50aWZpZXI6IAogICAgICAgICAgICAgICAga2V5aWQ6MTM6
+MkU6MzA6RjA6MDQ6RUE6NDE6NUY6Qjc6MDg6QkQ6MzQ6MzE6RDc6MTE6RUE6NTY6QTY6OTk6RjAK
+CiAgICBTaWduYXR1cmUgQWxnb3JpdGhtOiBzaGEyNTZXaXRoUlNBRW5jcnlwdGlvbgogICAgICAg
+ICAyOTo2MjozOToyNTo3OTo1ODplYjphNDpiMzowYzplYTphYToxZDoyYjo5Njo3Yzo2ZToxMDoK
+ICAgICAgICAgY2U6MTY6MDc6Yjc6NzA6N2Y6MTY6ZGE6ZmQ6MjA6ZTY6YTI6ZDk6YjQ6ODg6ZTA6
+Zjk6ODQ6CiAgICAgICAgIDg3OmY4OmIwOjBkOjc3OjhiOmFlOjI3OmY1OmVlOmU2OjRmOjg2OmEx
+OjJkOjc0OjA3OjdjOgogICAgICAgICBjNzo1ZDpjMjpiZDplNDo3MDplNzo0MjplNDoxNDplZTpi
+OTpiNzo2MzpiODo4Yzo2ZDoyMToKICAgICAgICAgNjE6NTY6MGI6OTY6ZjY6MTU6YmE6N2E6YWU6
+ODA6OTg6YWM6NTc6OTk6Nzk6M2Q6N2E6YTk6CiAgICAgICAgIGQ4OjI2OjkzOjMwOjE3OjUzOjdj
+OjJkOjAyOjRiOjY0OjQ5OjI1OjY1OmU3OjY5OjVhOjA4OgogICAgICAgICBjZjo4NDo5NDo4ZTo2
+YTo0MjphNzpkMTo0ZjpiYTozOTo0Yjo3YzoxMTo2NzozMTpmNzoxYjoKICAgICAgICAgMmI6Y2Q6
+Nzk6YzI6Mjg6NGQ6ZDk6ODg6NjY6ZDY6N2Y6NTY6NGM6NGI6Mzc6ZDE6M2Q6YTg6CiAgICAgICAg
+IGQ5OjRhOjZiOjQ1OjFkOjRkOmE3OjEyOjlmOjI5Ojc3OjZhOjU1OmMxOmI1OjFkOjBlOmE1Ogog
+ICAgICAgICBiOTo0ZjozODoxNjozYzo3ZDo4NTphZTpmZjoyMzozNDpjNzoyYzpmNjoxNDowZjo1
+NTplZjoKICAgICAgICAgYjg6MDA6ODk6ZjE6YjI6OGE6NzU6MTU6NDE6ODE6NzI6ZDA6NDM6YTY6
+ODY6ZDE6MDY6ZTY6CiAgICAgICAgIGNlOjgxOjdlOjVmOjMzOmU2OmY0OjE5OmQ2OjcwOjAwOmJh
+OjQ4OjZlOjA1OmZkOjRjOjNjOgogICAgICAgICBjMzo1MToxYjpiZDo0MzoxYToyNDpjNTo3OTpl
+YTo3YTpmMDo4NTphNTo0MDoxMDo4NTplOToKICAgICAgICAgMjM6MDk6MDk6ODA6Mzg6OWQ6YmM6
+ODE6NWU6NTk6OGM6NWE6NGQ6NTg6NTY6Yjk6NzE6YzI6CiAgICAgICAgIDc4OmNkOmYzOmIwCi0t
+LS0tQkVHSU4gQ0VSVElGSUNBVEUtLS0tLQpNSUlEdVRDQ0FxR2dBd0lCQWdJQ0VBTXdEUVlKS29a
+SWh2Y05BUUVMQlFBd1pqRUxNQWtHQTFVRUJoTUNWVk14CkZ6QVZCZ05WQkFnTURrNXZjblJvSUVO
+aGNtOXNhVzVoTVE4d0RRWURWUVFIREFaRWRYSm9ZVzB4RURBT0JnTlYKQkFvTUIwRnVjMmxpYkdV
+eEd6QVpCZ05WQkFNTUVtRnVjMmxpYkdVdWFIUjBjQzUwWlhOMGN6QWVGdzB4T0RBegpNakV4T0RJ
+eU5EZGFGdzB5T0RBek1UZ3hPREl5TkRkYU1Gd3hDekFKQmdOVkJBWVRBbFZUTVJjd0ZRWURWUVFJ
+CkRBNU9iM0owYUNCRFlYSnZiR2x1WVRFUU1BNEdBMVVFQ2d3SFFXNXphV0pzWlRFaU1DQUdBMVVF
+QXd3WlkyeHAKWlc1MExtRnVjMmxpYkdVdWFIUjBjQzUwWlhOMGN6Q0NBU0l3RFFZSktvWklodmNO
+QVFFQkJRQURnZ0VQQURDQwpBUW9DZ2dFQkFOUEtKWEd1eExKbjVDdUl4UHF3VmdLcEMyUXVxVWha
+OXVsT0QwSHA5bHp2M2U2Z3pNdFFhcmQ1CitKbFNHSEVqdngxRGVBMCtDeld3cmRhRjRXbVRXZmdn
+dnlXWFV4TFc0a3FUYTlpck5sV0dPQnk5Q2w1WDJDYnQKdTNTZDAvTWFQbUhoK0h1bXVnK2UxTEo0
+Rzh3cFdpcE5QdTBTZHFsRXJMZHFkdkJFK0JPM3NIbU1TOG1ROXJPNApoejZpZ3l4ZzRkWisrUGlo
+V3g2L1EzWkt3L1NqcHdkeXhHNk1VeUhQOGxNblZKTkRGcHhwVzdJcWxkcHhmc1M3ClV3bUFhMytO
+VDlBbzFFUnhYMHQyZDQ0MkNwajIwMi9TSDcyenNOOGRoZWJ4VGM4NnY1dDF4YnBqTGp2M1VvWUoK
+cFEyMkkycFBXVys3L2pKM2haR1Jkc1VXYm1FbFJTa0NBd0VBQWFON01Ia3dDUVlEVlIwVEJBSXdB
+REFzQmdsZwpoa2dCaHZoQ0FRMEVIeFlkVDNCbGJsTlRUQ0JIWlc1bGNtRjBaV1FnUTJWeWRHbG1h
+V05oZEdVd0hRWURWUjBPCkJCWUVGSy96NVNycno4ZCtwTlpKa3ZrcDdtb2JhS3NQTUI4R0ExVWRJ
+d1FZTUJhQUZCTXVNUEFFNmtGZnR3aTkKTkRIWEVlcFdwcG53TUEwR0NTcUdTSWIzRFFFQkN3VUFB
+NElCQVFBcFlqa2xlVmpycExNTTZxb2RLNVo4YmhETwpGZ2UzY0g4VzJ2MGc1cUxadElqZytZU0gr
+TEFOZDR1dUovWHU1aytHb1MxMEIzekhYY0s5NUhEblF1UVU3cm0zClk3aU1iU0ZoVmd1VzloVzZl
+cTZBbUt4WG1YazllcW5ZSnBNd0YxTjhMUUpMWkVrbFplZHBXZ2pQaEpTT2FrS24KMFUrNk9VdDhF
+V2N4OXhzcnpYbkNLRTNaaUdiV2YxWk1TemZSUGFqWlNtdEZIVTJuRXA4cGQycFZ3YlVkRHFXNQpU
+emdXUEgyRnJ2OGpOTWNzOWhRUFZlKzRBSW54c29wMUZVR0JjdEJEcG9iUkJ1Yk9nWDVmTStiMEdk
+WndBTHBJCmJnWDlURHpEVVJ1OVF4b2t4WG5xZXZDRnBVQVFoZWtqQ1FtQU9KMjhnVjVaakZwTldG
+YTVjY0o0emZPdwotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
+
+--===============3996062709511591449==
+Content-Transfer-Encoding: base64
+Content-Type: application/pgp-keys
+Content-Disposition: form-data; name="file5"; filename="client.key"
+
+LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZB
+QVNDQktjd2dnU2pBZ0VBQW9JQkFRRFR5aVZ4cnNTeVorUXIKaU1UNnNGWUNxUXRrTHFsSVdmYnBU
+ZzlCNmZaYzc5M3VvTXpMVUdxM2VmaVpVaGh4STc4ZFEzZ05QZ3Mxc0szVwpoZUZwazFuNElMOGxs
+MU1TMXVKS2sydllxelpWaGpnY3ZRcGVWOWdtN2J0MG5kUHpHajVoNGZoN3Byb1BudFN5CmVCdk1L
+Vm9xVFQ3dEVuYXBSS3kzYW5id1JQZ1R0N0I1akV2SmtQYXp1SWMrb29Nc1lPSFdmdmo0b1ZzZXYw
+TjIKU3NQMG82Y0hjc1J1akZNaHovSlRKMVNUUXhhY2FWdXlLcFhhY1g3RXUxTUpnR3QvalUvUUtO
+UkVjVjlMZG5lTwpOZ3FZOXROdjBoKzlzN0RmSFlYbThVM1BPcitiZGNXNll5NDc5MUtHQ2FVTnRp
+TnFUMWx2dS80eWQ0V1JrWGJGCkZtNWhKVVVwQWdNQkFBRUNnZ0VCQUpZT2FjMU1TSzBuRXZFTmJK
+TTZFUmE5Y3dhK1VNNmtmMTc2SWJGUDlYQVAKdTZ6eFhXaklSM1JNQlNtTWt5akdiUWhzMzBoeXB6
+cVpQZkg2MWFVWjgrcnNPTUtIbnlLQUFjRlpCbFp6cUlHYwpJWEdyTndkMU1mOFMvWGc0d3cxQmtP
+V0ZWNnMwakN1NUczWi94eUkyUWw0cWNPVkQ2Yk13cHpjbFJiUWpDYW5kCmR2cXlDZE1EMHNSRHll
+T0lLNWhCaFVZNjBKbldiTUN1NnBCVStxUG9SdWtiUmllYWVETElOMWNsd0VxSVFWNzgKTExudjRu
+OWZ1R296SDBKZEhIZnlYRnl0Q2dJSnZFc3BaVWphLzVSNG9yQURocjNaQjAxMFJMell2czJuZEUz
+Qgo0Y0Y5Umd4c3BKWmVKL1ArUGdsVmladXpqMzdwWHkrN0dBY0pMUjlrYTRrQ2dZRUEvbDAxWEt3
+a0N6TWdYSFc0ClVQZ2wxK29uNDJCc043VDlyM1M1dGloT2pIZjRaSldrZ1l6aXNMVlgrTmMxb1VJ
+M0hRZk05UERKWlhNTU5tN0oKWlJ2RVJjb3BVMjZ3V3FyNkNGUGJsR3Y4b3FYSHFjcGV0YThpM3ha
+S29QQVNzVFc2c3N1UENFYWppTFpiUTFySApIL0hQK09aSVZMTS9XQ1BnQTJCY2tUVTlKbnNDZ1lF
+QTFTYlhsbFhubHdHcW1qaXRtWTFaMDdyVXhRM2FoL2ZCCmljY2JiZzNFNG9ub250WVhJbEk1elFt
+czN1K3FCZGkwWnV3YURtNVk0QmV0T3EwYTNVeXhBc3VncVZGbnpUYmEKMXcvc0ZiM2Z3OUtlUS9p
+bDRDWGticTg3bnpKZkRtRXlxSEdDQ1lYYmlqSEJ4bnE5OVBrcXdWcGFBaEhIRVcwbQp2V3lNVXZQ
+Ulk2c0NnWUFidFVXUjBjS2ZZYk5kdndrVDhPUVdjQkJtU1dPZ2Nkdk1tQmQreTBjN0wvcGo0cFVu
+Cjg1UGlFZThDVVZjck9NNU9JRUpvVUM1d0dhY3o2citQZndYVFlHRStFR212aHI1ejE4YXNsVkxR
+Mk9RMkQ3QmYKZERPRlA2VmpnS05Zb0hTMDgwMmlaaWQ4UmZrTkRqOXdzR09xUmxPTXZuWGhBUTl1
+N3JsR3JCajhMd0tCZ0ZmbwpwaDk5bkg4ZUU5TjVMcmZXb1VaK2xvUVMyNThhSW5zRllCMjZsZ25z
+WU1FcGdPOEp4SWI0eDVCR2ZmUGRWVUhoCmZEbVpieFExRDUvVWh2RGdVVnpheUk4c1lNZzFLSHBz
+T2EwWjJ6Q3pLOHpTdnU2OEVnTklTQ20zSjVjUnBVZnQKVUhsRytLMTlLZk1HNmxNZmRHKzhLTVVU
+dWV0SS9pSS9vM3dPekx2ekFvR0FJck9oMzBySHQ4d2l0N0VMQVJ5eAp3UGtwMkFSWVhyS2ZYM05F
+UzRjNjd6U0FpKzNkQ2p4UnF5d3FUSTBnTGljeU1sajh6RXU5WUU5SXgvcmw4bFJaCm5ROUxabXF2
+N1FIemhMVFVDUEdnWlluZW12QnpvN3IwZVc4T2FnNTJkYmNKTzZGQnN6ZldyeHNrbS9mWDI1UmIK
+V1B4aWgydmRSeTgxNGROUFcyNXJnZHc9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
+
+--===============3996062709511591449==
+Content-Transfer-Encoding: base64
+Content-Type: text/plain
+Content-Disposition: form-data; name="file6"; filename="client.txt"
+
+Y2xpZW50LnBlbSBhbmQgY2xpZW50LmtleSB3ZXJlIHJldHJpZXZlZCBmcm9tIGh0dHB0ZXN0ZXIg
+ZG9ja2VyIGltYWdlOgoKYW5zaWJsZS9hbnNpYmxlQHNoYTI1NjpmYTVkZWY4YzI5NGZjNTA4MTNh
+ZjEzMWMwYjU3Mzc1OTRkODUyYWJhYzljYmU3YmEzOGUxN2JmMWM4NDc2ZjNmCg==
+
+--===============3996062709511591449==
+Content-Type: text/plain
+Content-Disposition: form-data; name="form_field_1"
+
+form_value_1
+--===============3996062709511591449==
+Content-Type: application/octet-stream
+Content-Disposition: form-data; name="form_field_2"
+
+form_value_2
+--===============3996062709511591449==
+Content-Type: text/html
+Content-Disposition: form-data; name="form_field_3"
+
+<html></html>
+--===============3996062709511591449==
+Content-Type: application/json
+Content-Disposition: form-data; name="form_field_4"
+
+{"foo": "bar"}
+--===============3996062709511591449==--
diff --git a/test/units/module_utils/urls/fixtures/netrc b/test/units/module_utils/urls/fixtures/netrc
new file mode 100644
index 00000000..8f127170
--- /dev/null
+++ b/test/units/module_utils/urls/fixtures/netrc
@@ -0,0 +1,3 @@
+machine ansible.com
+login user
+password passwd
diff --git a/test/units/module_utils/urls/test_RedirectHandlerFactory.py b/test/units/module_utils/urls/test_RedirectHandlerFactory.py
new file mode 100644
index 00000000..aa3500a1
--- /dev/null
+++ b/test/units/module_utils/urls/test_RedirectHandlerFactory.py
@@ -0,0 +1,138 @@
+# -*- coding: utf-8 -*-
+# (c) 2018 Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+from ansible.module_utils.urls import HAS_SSLCONTEXT, RedirectHandlerFactory, urllib_request, urllib_error
+from ansible.module_utils.six import StringIO
+
+import pytest
+
+
+@pytest.fixture
+def urllib_req():
+ req = urllib_request.Request(
+ 'https://ansible.com/'
+ )
+ return req
+
+
+@pytest.fixture
+def request_body():
+ return StringIO('TESTS')
+
+
+def test_no_redirs(urllib_req, request_body):
+ handler = RedirectHandlerFactory('none', False)
+ inst = handler()
+ with pytest.raises(urllib_error.HTTPError):
+ inst.redirect_request(urllib_req, request_body, 301, '301 Moved Permanently', {}, 'https://docs.ansible.com/')
+
+
+def test_urllib2_redir(urllib_req, request_body, mocker):
+ redir_request_mock = mocker.patch('ansible.module_utils.urls.urllib_request.HTTPRedirectHandler.redirect_request')
+
+ handler = RedirectHandlerFactory('urllib2', False)
+ inst = handler()
+ inst.redirect_request(urllib_req, request_body, 301, '301 Moved Permanently', {}, 'https://docs.ansible.com/')
+
+ redir_request_mock.assert_called_once_with(inst, urllib_req, request_body, 301, '301 Moved Permanently', {}, 'https://docs.ansible.com/')
+
+
+def test_all_redir(urllib_req, request_body, mocker):
+ req_mock = mocker.patch('ansible.module_utils.urls.RequestWithMethod')
+ handler = RedirectHandlerFactory('all', False)
+ inst = handler()
+ inst.redirect_request(urllib_req, request_body, 301, '301 Moved Permanently', {}, 'https://docs.ansible.com/')
+ req_mock.assert_called_once_with('https://docs.ansible.com/', data=None, headers={}, method='GET', origin_req_host='ansible.com', unverifiable=True)
+
+
+def test_all_redir_post(request_body, mocker):
+ handler = RedirectHandlerFactory('all', False)
+ inst = handler()
+
+ req = urllib_request.Request(
+ 'https://ansible.com/',
+ 'POST'
+ )
+
+ req_mock = mocker.patch('ansible.module_utils.urls.RequestWithMethod')
+ inst.redirect_request(req, request_body, 301, '301 Moved Permanently', {}, 'https://docs.ansible.com/')
+ req_mock.assert_called_once_with('https://docs.ansible.com/', data=None, headers={}, method='GET', origin_req_host='ansible.com', unverifiable=True)
+
+
+def test_redir_headers_removal(urllib_req, request_body, mocker):
+ req_mock = mocker.patch('ansible.module_utils.urls.RequestWithMethod')
+ handler = RedirectHandlerFactory('all', False)
+ inst = handler()
+
+ urllib_req.headers = {
+ 'Content-Type': 'application/json',
+ 'Content-Length': 100,
+ 'Foo': 'bar',
+ }
+
+ inst.redirect_request(urllib_req, request_body, 301, '301 Moved Permanently', {}, 'https://docs.ansible.com/')
+ req_mock.assert_called_once_with('https://docs.ansible.com/', data=None, headers={'Foo': 'bar'}, method='GET', origin_req_host='ansible.com',
+ unverifiable=True)
+
+
+def test_redir_url_spaces(urllib_req, request_body, mocker):
+ req_mock = mocker.patch('ansible.module_utils.urls.RequestWithMethod')
+ handler = RedirectHandlerFactory('all', False)
+ inst = handler()
+
+ inst.redirect_request(urllib_req, request_body, 301, '301 Moved Permanently', {}, 'https://docs.ansible.com/foo bar')
+
+ req_mock.assert_called_once_with('https://docs.ansible.com/foo%20bar', data=None, headers={}, method='GET', origin_req_host='ansible.com',
+ unverifiable=True)
+
+
+def test_redir_safe(urllib_req, request_body, mocker):
+ req_mock = mocker.patch('ansible.module_utils.urls.RequestWithMethod')
+ handler = RedirectHandlerFactory('safe', False)
+ inst = handler()
+ inst.redirect_request(urllib_req, request_body, 301, '301 Moved Permanently', {}, 'https://docs.ansible.com/')
+
+ req_mock.assert_called_once_with('https://docs.ansible.com/', data=None, headers={}, method='GET', origin_req_host='ansible.com', unverifiable=True)
+
+
+def test_redir_safe_not_safe(request_body):
+ handler = RedirectHandlerFactory('safe', False)
+ inst = handler()
+
+ req = urllib_request.Request(
+ 'https://ansible.com/',
+ 'POST'
+ )
+
+ with pytest.raises(urllib_error.HTTPError):
+ inst.redirect_request(req, request_body, 301, '301 Moved Permanently', {}, 'https://docs.ansible.com/')
+
+
+def test_redir_no_error_on_invalid(urllib_req, request_body):
+ handler = RedirectHandlerFactory('invalid', False)
+ inst = handler()
+
+ with pytest.raises(urllib_error.HTTPError):
+ inst.redirect_request(urllib_req, request_body, 301, '301 Moved Permanently', {}, 'https://docs.ansible.com/')
+
+
+def test_redir_validate_certs(urllib_req, request_body, mocker):
+ opener_mock = mocker.patch('ansible.module_utils.urls.urllib_request._opener')
+ handler = RedirectHandlerFactory('all', True)
+ inst = handler()
+ inst.redirect_request(urllib_req, request_body, 301, '301 Moved Permanently', {}, 'https://docs.ansible.com/')
+
+ assert opener_mock.add_handler.call_count == int(not HAS_SSLCONTEXT)
+
+
+def test_redir_http_error_308_urllib2(urllib_req, request_body):
+ handler = RedirectHandlerFactory('urllib2', False)
+ inst = handler()
+
+ with pytest.raises(urllib_error.HTTPError):
+ inst.redirect_request(urllib_req, request_body, 308, '308 Permanent Redirect', {}, 'https://docs.ansible.com/')
diff --git a/test/units/module_utils/urls/test_Request.py b/test/units/module_utils/urls/test_Request.py
new file mode 100644
index 00000000..ebb6de56
--- /dev/null
+++ b/test/units/module_utils/urls/test_Request.py
@@ -0,0 +1,456 @@
+# -*- coding: utf-8 -*-
+# (c) 2018 Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import datetime
+import os
+
+from ansible.module_utils.urls import (Request, open_url, urllib_request, HAS_SSLCONTEXT, cookiejar, RequestWithMethod,
+ UnixHTTPHandler, UnixHTTPSConnection, httplib)
+from ansible.module_utils.urls import SSLValidationHandler, HTTPSClientAuthHandler, RedirectHandlerFactory
+
+import pytest
+from mock import call
+
+
+if HAS_SSLCONTEXT:
+ import ssl
+
+
+@pytest.fixture
+def urlopen_mock(mocker):
+ return mocker.patch('ansible.module_utils.urls.urllib_request.urlopen')
+
+
+@pytest.fixture
+def install_opener_mock(mocker):
+ return mocker.patch('ansible.module_utils.urls.urllib_request.install_opener')
+
+
+def test_Request_fallback(urlopen_mock, install_opener_mock, mocker):
+ cookies = cookiejar.CookieJar()
+ request = Request(
+ headers={'foo': 'bar'},
+ use_proxy=False,
+ force=True,
+ timeout=100,
+ validate_certs=False,
+ url_username='user',
+ url_password='passwd',
+ http_agent='ansible-tests',
+ force_basic_auth=True,
+ follow_redirects='all',
+ client_cert='/tmp/client.pem',
+ client_key='/tmp/client.key',
+ cookies=cookies,
+ unix_socket='/foo/bar/baz.sock',
+ ca_path='/foo/bar/baz.pem',
+ )
+ fallback_mock = mocker.spy(request, '_fallback')
+
+ r = request.open('GET', 'https://ansible.com')
+
+ calls = [
+ call(None, False), # use_proxy
+ call(None, True), # force
+ call(None, 100), # timeout
+ call(None, False), # validate_certs
+ call(None, 'user'), # url_username
+ call(None, 'passwd'), # url_password
+ call(None, 'ansible-tests'), # http_agent
+ call(None, True), # force_basic_auth
+ call(None, 'all'), # follow_redirects
+ call(None, '/tmp/client.pem'), # client_cert
+ call(None, '/tmp/client.key'), # client_key
+ call(None, cookies), # cookies
+ call(None, '/foo/bar/baz.sock'), # unix_socket
+ call(None, '/foo/bar/baz.pem'), # ca_path
+ ]
+ fallback_mock.assert_has_calls(calls)
+
+ assert fallback_mock.call_count == 14 # All but headers use fallback
+
+ args = urlopen_mock.call_args[0]
+ assert args[1] is None # data, this is handled in the Request not urlopen
+ assert args[2] == 100 # timeout
+
+ req = args[0]
+ assert req.headers == {
+ 'Authorization': b'Basic dXNlcjpwYXNzd2Q=',
+ 'Cache-control': 'no-cache',
+ 'Foo': 'bar',
+ 'User-agent': 'ansible-tests'
+ }
+ assert req.data is None
+ assert req.get_method() == 'GET'
+
+
+def test_Request_open(urlopen_mock, install_opener_mock):
+ r = Request().open('GET', 'https://ansible.com/')
+ args = urlopen_mock.call_args[0]
+ assert args[1] is None # data, this is handled in the Request not urlopen
+ assert args[2] == 10 # timeout
+
+ req = args[0]
+ assert req.headers == {}
+ assert req.data is None
+ assert req.get_method() == 'GET'
+
+ opener = install_opener_mock.call_args[0][0]
+ handlers = opener.handlers
+
+ if not HAS_SSLCONTEXT:
+ expected_handlers = (
+ SSLValidationHandler,
+ RedirectHandlerFactory(), # factory, get handler
+ )
+ else:
+ expected_handlers = (
+ RedirectHandlerFactory(), # factory, get handler
+ )
+
+ found_handlers = []
+ for handler in handlers:
+ if isinstance(handler, SSLValidationHandler) or handler.__class__.__name__ == 'RedirectHandler':
+ found_handlers.append(handler)
+
+ assert len(found_handlers) == len(expected_handlers)
+
+
+def test_Request_open_http(urlopen_mock, install_opener_mock):
+ r = Request().open('GET', 'http://ansible.com/')
+ args = urlopen_mock.call_args[0]
+
+ opener = install_opener_mock.call_args[0][0]
+ handlers = opener.handlers
+
+ found_handlers = []
+ for handler in handlers:
+ if isinstance(handler, SSLValidationHandler):
+ found_handlers.append(handler)
+
+ assert len(found_handlers) == 0
+
+
+def test_Request_open_unix_socket(urlopen_mock, install_opener_mock):
+ r = Request().open('GET', 'http://ansible.com/', unix_socket='/foo/bar/baz.sock')
+ args = urlopen_mock.call_args[0]
+
+ opener = install_opener_mock.call_args[0][0]
+ handlers = opener.handlers
+
+ found_handlers = []
+ for handler in handlers:
+ if isinstance(handler, UnixHTTPHandler):
+ found_handlers.append(handler)
+
+ assert len(found_handlers) == 1
+
+
+def test_Request_open_https_unix_socket(urlopen_mock, install_opener_mock):
+ r = Request().open('GET', 'https://ansible.com/', unix_socket='/foo/bar/baz.sock')
+ args = urlopen_mock.call_args[0]
+
+ opener = install_opener_mock.call_args[0][0]
+ handlers = opener.handlers
+
+ found_handlers = []
+ for handler in handlers:
+ if isinstance(handler, HTTPSClientAuthHandler):
+ found_handlers.append(handler)
+
+ assert len(found_handlers) == 1
+
+ inst = found_handlers[0]._build_https_connection('foo')
+ assert isinstance(inst, UnixHTTPSConnection)
+
+
+def test_Request_open_ftp(urlopen_mock, install_opener_mock, mocker):
+ mocker.patch('ansible.module_utils.urls.ParseResultDottedDict.as_list', side_effect=AssertionError)
+
+ # Using ftp scheme should prevent the AssertionError side effect to fire
+ r = Request().open('GET', 'ftp://foo@ansible.com/')
+
+
+def test_Request_open_headers(urlopen_mock, install_opener_mock):
+ r = Request().open('GET', 'http://ansible.com/', headers={'Foo': 'bar'})
+ args = urlopen_mock.call_args[0]
+ req = args[0]
+ assert req.headers == {'Foo': 'bar'}
+
+
+def test_Request_open_username(urlopen_mock, install_opener_mock):
+ r = Request().open('GET', 'http://ansible.com/', url_username='user')
+
+ opener = install_opener_mock.call_args[0][0]
+ handlers = opener.handlers
+
+ expected_handlers = (
+ urllib_request.HTTPBasicAuthHandler,
+ urllib_request.HTTPDigestAuthHandler,
+ )
+
+ found_handlers = []
+ for handler in handlers:
+ if isinstance(handler, expected_handlers):
+ found_handlers.append(handler)
+ assert len(found_handlers) == 2
+ assert found_handlers[0].passwd.passwd[None] == {(('ansible.com', '/'),): ('user', None)}
+
+
+def test_Request_open_username_in_url(urlopen_mock, install_opener_mock):
+ r = Request().open('GET', 'http://user2@ansible.com/')
+
+ opener = install_opener_mock.call_args[0][0]
+ handlers = opener.handlers
+
+ expected_handlers = (
+ urllib_request.HTTPBasicAuthHandler,
+ urllib_request.HTTPDigestAuthHandler,
+ )
+
+ found_handlers = []
+ for handler in handlers:
+ if isinstance(handler, expected_handlers):
+ found_handlers.append(handler)
+ assert found_handlers[0].passwd.passwd[None] == {(('ansible.com', '/'),): ('user2', '')}
+
+
+def test_Request_open_username_force_basic(urlopen_mock, install_opener_mock):
+ r = Request().open('GET', 'http://ansible.com/', url_username='user', url_password='passwd', force_basic_auth=True)
+
+ opener = install_opener_mock.call_args[0][0]
+ handlers = opener.handlers
+
+ expected_handlers = (
+ urllib_request.HTTPBasicAuthHandler,
+ urllib_request.HTTPDigestAuthHandler,
+ )
+
+ found_handlers = []
+ for handler in handlers:
+ if isinstance(handler, expected_handlers):
+ found_handlers.append(handler)
+
+ assert len(found_handlers) == 0
+
+ args = urlopen_mock.call_args[0]
+ req = args[0]
+ assert req.headers.get('Authorization') == b'Basic dXNlcjpwYXNzd2Q='
+
+
+def test_Request_open_auth_in_netloc(urlopen_mock, install_opener_mock):
+ r = Request().open('GET', 'http://user:passwd@ansible.com/')
+ args = urlopen_mock.call_args[0]
+ req = args[0]
+ assert req.get_full_url() == 'http://ansible.com/'
+
+ opener = install_opener_mock.call_args[0][0]
+ handlers = opener.handlers
+
+ expected_handlers = (
+ urllib_request.HTTPBasicAuthHandler,
+ urllib_request.HTTPDigestAuthHandler,
+ )
+
+ found_handlers = []
+ for handler in handlers:
+ if isinstance(handler, expected_handlers):
+ found_handlers.append(handler)
+
+ assert len(found_handlers) == 2
+
+
+def test_Request_open_netrc(urlopen_mock, install_opener_mock, monkeypatch):
+ here = os.path.dirname(__file__)
+
+ monkeypatch.setenv('NETRC', os.path.join(here, 'fixtures/netrc'))
+ r = Request().open('GET', 'http://ansible.com/')
+ args = urlopen_mock.call_args[0]
+ req = args[0]
+ assert req.headers.get('Authorization') == b'Basic dXNlcjpwYXNzd2Q='
+
+ r = Request().open('GET', 'http://foo.ansible.com/')
+ args = urlopen_mock.call_args[0]
+ req = args[0]
+ assert 'Authorization' not in req.headers
+
+ monkeypatch.setenv('NETRC', os.path.join(here, 'fixtures/netrc.nonexistant'))
+ r = Request().open('GET', 'http://ansible.com/')
+ args = urlopen_mock.call_args[0]
+ req = args[0]
+ assert 'Authorization' not in req.headers
+
+
+def test_Request_open_no_proxy(urlopen_mock, install_opener_mock, mocker):
+ build_opener_mock = mocker.patch('ansible.module_utils.urls.urllib_request.build_opener')
+
+ r = Request().open('GET', 'http://ansible.com/', use_proxy=False)
+
+ handlers = build_opener_mock.call_args[0]
+ found_handlers = []
+ for handler in handlers:
+ if isinstance(handler, urllib_request.ProxyHandler):
+ found_handlers.append(handler)
+
+ assert len(found_handlers) == 1
+
+
+@pytest.mark.skipif(not HAS_SSLCONTEXT, reason="requires SSLContext")
+def test_Request_open_no_validate_certs(urlopen_mock, install_opener_mock):
+ r = Request().open('GET', 'https://ansible.com/', validate_certs=False)
+
+ opener = install_opener_mock.call_args[0][0]
+ handlers = opener.handlers
+
+ ssl_handler = None
+ for handler in handlers:
+ if isinstance(handler, HTTPSClientAuthHandler):
+ ssl_handler = handler
+ break
+
+ assert ssl_handler is not None
+
+ inst = ssl_handler._build_https_connection('foo')
+ assert isinstance(inst, httplib.HTTPSConnection)
+
+ context = ssl_handler._context
+ assert context.protocol == ssl.PROTOCOL_SSLv23
+ if ssl.OP_NO_SSLv2:
+ assert context.options & ssl.OP_NO_SSLv2
+ assert context.options & ssl.OP_NO_SSLv3
+ assert context.verify_mode == ssl.CERT_NONE
+ assert context.check_hostname is False
+
+
+def test_Request_open_client_cert(urlopen_mock, install_opener_mock):
+ here = os.path.dirname(__file__)
+
+ client_cert = os.path.join(here, 'fixtures/client.pem')
+ client_key = os.path.join(here, 'fixtures/client.key')
+
+ r = Request().open('GET', 'https://ansible.com/', client_cert=client_cert, client_key=client_key)
+
+ opener = install_opener_mock.call_args[0][0]
+ handlers = opener.handlers
+
+ ssl_handler = None
+ for handler in handlers:
+ if isinstance(handler, HTTPSClientAuthHandler):
+ ssl_handler = handler
+ break
+
+ assert ssl_handler is not None
+
+ assert ssl_handler.client_cert == client_cert
+ assert ssl_handler.client_key == client_key
+
+ https_connection = ssl_handler._build_https_connection('ansible.com')
+
+ assert https_connection.key_file == client_key
+ assert https_connection.cert_file == client_cert
+
+
+def test_Request_open_cookies(urlopen_mock, install_opener_mock):
+ r = Request().open('GET', 'https://ansible.com/', cookies=cookiejar.CookieJar())
+
+ opener = install_opener_mock.call_args[0][0]
+ handlers = opener.handlers
+
+ cookies_handler = None
+ for handler in handlers:
+ if isinstance(handler, urllib_request.HTTPCookieProcessor):
+ cookies_handler = handler
+ break
+
+ assert cookies_handler is not None
+
+
+def test_Request_open_invalid_method(urlopen_mock, install_opener_mock):
+ r = Request().open('UNKNOWN', 'https://ansible.com/')
+
+ args = urlopen_mock.call_args[0]
+ req = args[0]
+
+ assert req.data is None
+ assert req.get_method() == 'UNKNOWN'
+ # assert r.status == 504
+
+
+def test_Request_open_custom_method(urlopen_mock, install_opener_mock):
+ r = Request().open('DELETE', 'https://ansible.com/')
+
+ args = urlopen_mock.call_args[0]
+ req = args[0]
+
+ assert isinstance(req, RequestWithMethod)
+
+
+def test_Request_open_user_agent(urlopen_mock, install_opener_mock):
+ r = Request().open('GET', 'https://ansible.com/', http_agent='ansible-tests')
+
+ args = urlopen_mock.call_args[0]
+ req = args[0]
+
+ assert req.headers.get('User-agent') == 'ansible-tests'
+
+
+def test_Request_open_force(urlopen_mock, install_opener_mock):
+ r = Request().open('GET', 'https://ansible.com/', force=True, last_mod_time=datetime.datetime.now())
+
+ args = urlopen_mock.call_args[0]
+ req = args[0]
+
+ assert req.headers.get('Cache-control') == 'no-cache'
+ assert 'If-modified-since' not in req.headers
+
+
+def test_Request_open_last_mod(urlopen_mock, install_opener_mock):
+ now = datetime.datetime.now()
+ r = Request().open('GET', 'https://ansible.com/', last_mod_time=now)
+
+ args = urlopen_mock.call_args[0]
+ req = args[0]
+
+ assert req.headers.get('If-modified-since') == now.strftime('%a, %d %b %Y %H:%M:%S GMT')
+
+
+def test_Request_open_headers_not_dict(urlopen_mock, install_opener_mock):
+ with pytest.raises(ValueError):
+ Request().open('GET', 'https://ansible.com/', headers=['bob'])
+
+
+def test_Request_init_headers_not_dict(urlopen_mock, install_opener_mock):
+ with pytest.raises(ValueError):
+ Request(headers=['bob'])
+
+
+@pytest.mark.parametrize('method,kwargs', [
+ ('get', {}),
+ ('options', {}),
+ ('head', {}),
+ ('post', {'data': None}),
+ ('put', {'data': None}),
+ ('patch', {'data': None}),
+ ('delete', {}),
+])
+def test_methods(method, kwargs, mocker):
+ expected = method.upper()
+ open_mock = mocker.patch('ansible.module_utils.urls.Request.open')
+ request = Request()
+ getattr(request, method)('https://ansible.com')
+ open_mock.assert_called_once_with(expected, 'https://ansible.com', **kwargs)
+
+
+def test_open_url(urlopen_mock, install_opener_mock, mocker):
+ req_mock = mocker.patch('ansible.module_utils.urls.Request.open')
+ open_url('https://ansible.com/')
+ req_mock.assert_called_once_with('GET', 'https://ansible.com/', data=None, headers=None, use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None,
+ force_basic_auth=False, follow_redirects='urllib2',
+ client_cert=None, client_key=None, cookies=None, use_gssapi=False,
+ unix_socket=None, ca_path=None, unredirected_headers=None)
diff --git a/test/units/module_utils/urls/test_RequestWithMethod.py b/test/units/module_utils/urls/test_RequestWithMethod.py
new file mode 100644
index 00000000..05105190
--- /dev/null
+++ b/test/units/module_utils/urls/test_RequestWithMethod.py
@@ -0,0 +1,22 @@
+# -*- coding: utf-8 -*-
+# (c) 2018 Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.urls import RequestWithMethod
+
+
+def test_RequestWithMethod():
+ get = RequestWithMethod('https://ansible.com/', 'GET')
+ assert get.get_method() == 'GET'
+
+ post = RequestWithMethod('https://ansible.com/', 'POST', data='foo', headers={'Bar': 'baz'})
+ assert post.get_method() == 'POST'
+ assert post.get_full_url() == 'https://ansible.com/'
+ assert post.data == 'foo'
+ assert post.headers == {'Bar': 'baz'}
+
+ none = RequestWithMethod('https://ansible.com/', '')
+ assert none.get_method() == 'GET'
diff --git a/test/units/module_utils/urls/test_fetch_url.py b/test/units/module_utils/urls/test_fetch_url.py
new file mode 100644
index 00000000..9cac2a35
--- /dev/null
+++ b/test/units/module_utils/urls/test_fetch_url.py
@@ -0,0 +1,220 @@
+# -*- coding: utf-8 -*-
+# (c) 2018 Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import socket
+
+from ansible.module_utils.six import StringIO
+from ansible.module_utils.six.moves.http_cookiejar import Cookie
+from ansible.module_utils.six.moves.http_client import HTTPMessage
+from ansible.module_utils.urls import fetch_url, urllib_error, ConnectionError, NoSSLError, httplib
+
+import pytest
+from mock import MagicMock
+
+
+class AnsibleModuleExit(Exception):
+ def __init__(self, *args, **kwargs):
+ self.args = args
+ self.kwargs = kwargs
+
+
+class ExitJson(AnsibleModuleExit):
+ pass
+
+
+class FailJson(AnsibleModuleExit):
+ pass
+
+
+@pytest.fixture
+def open_url_mock(mocker):
+ return mocker.patch('ansible.module_utils.urls.open_url')
+
+
+@pytest.fixture
+def fake_ansible_module():
+ return FakeAnsibleModule()
+
+
+class FakeAnsibleModule:
+ def __init__(self):
+ self.params = {}
+ self.tmpdir = None
+
+ def exit_json(self, *args, **kwargs):
+ raise ExitJson(*args, **kwargs)
+
+ def fail_json(self, *args, **kwargs):
+ raise FailJson(*args, **kwargs)
+
+
+def test_fetch_url_no_urlparse(mocker, fake_ansible_module):
+ mocker.patch('ansible.module_utils.urls.HAS_URLPARSE', new=False)
+
+ with pytest.raises(FailJson):
+ fetch_url(fake_ansible_module, 'http://ansible.com/')
+
+
+def test_fetch_url(open_url_mock, fake_ansible_module):
+ r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
+
+ dummy, kwargs = open_url_mock.call_args
+
+ open_url_mock.assert_called_once_with('http://ansible.com/', client_cert=None, client_key=None, cookies=kwargs['cookies'], data=None,
+ follow_redirects='urllib2', force=False, force_basic_auth='', headers=None,
+ http_agent='ansible-httpget', last_mod_time=None, method=None, timeout=10, url_password='', url_username='',
+ use_proxy=True, validate_certs=True, use_gssapi=False, unix_socket=None, ca_path=None)
+
+
+def test_fetch_url_params(open_url_mock, fake_ansible_module):
+ fake_ansible_module.params = {
+ 'validate_certs': False,
+ 'url_username': 'user',
+ 'url_password': 'passwd',
+ 'http_agent': 'ansible-test',
+ 'force_basic_auth': True,
+ 'follow_redirects': 'all',
+ 'client_cert': 'client.pem',
+ 'client_key': 'client.key',
+ }
+
+ r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
+
+ dummy, kwargs = open_url_mock.call_args
+
+ open_url_mock.assert_called_once_with('http://ansible.com/', client_cert='client.pem', client_key='client.key', cookies=kwargs['cookies'], data=None,
+ follow_redirects='all', force=False, force_basic_auth=True, headers=None,
+ http_agent='ansible-test', last_mod_time=None, method=None, timeout=10, url_password='passwd', url_username='user',
+ use_proxy=True, validate_certs=False, use_gssapi=False, unix_socket=None, ca_path=None)
+
+
+def test_fetch_url_cookies(mocker, fake_ansible_module):
+ def make_cookies(*args, **kwargs):
+ cookies = kwargs['cookies']
+ r = MagicMock()
+ try:
+ r.headers = HTTPMessage()
+ add_header = r.headers.add_header
+ except TypeError:
+ # PY2
+ r.headers = HTTPMessage(StringIO())
+ add_header = r.headers.addheader
+ r.info.return_value = r.headers
+ for name, value in (('Foo', 'bar'), ('Baz', 'qux')):
+ cookie = Cookie(
+ version=0,
+ name=name,
+ value=value,
+ port=None,
+ port_specified=False,
+ domain="ansible.com",
+ domain_specified=True,
+ domain_initial_dot=False,
+ path="/",
+ path_specified=True,
+ secure=False,
+ expires=None,
+ discard=False,
+ comment=None,
+ comment_url=None,
+ rest=None
+ )
+ cookies.set_cookie(cookie)
+ add_header('Set-Cookie', '%s=%s' % (name, value))
+
+ return r
+
+ mocker = mocker.patch('ansible.module_utils.urls.open_url', new=make_cookies)
+
+ r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
+
+ assert info['cookies'] == {'Baz': 'qux', 'Foo': 'bar'}
+ # Python sorts cookies in order of most specific (ie. longest) path first
+ # items with the same path are reversed from response order
+ assert info['cookies_string'] == 'Baz=qux; Foo=bar'
+ # The key here has a `-` as opposed to what we see in the `uri` module that converts to `_`
+ # Note: this is response order, which differs from cookies_string
+ assert info['set-cookie'] == 'Foo=bar, Baz=qux'
+
+
+def test_fetch_url_nossl(open_url_mock, fake_ansible_module, mocker):
+ mocker.patch('ansible.module_utils.urls.get_distribution', return_value='notredhat')
+
+ open_url_mock.side_effect = NoSSLError
+ with pytest.raises(FailJson) as excinfo:
+ fetch_url(fake_ansible_module, 'http://ansible.com/')
+
+ assert 'python-ssl' not in excinfo.value.kwargs['msg']
+
+ mocker.patch('ansible.module_utils.urls.get_distribution', return_value='redhat')
+
+ open_url_mock.side_effect = NoSSLError
+ with pytest.raises(FailJson) as excinfo:
+ fetch_url(fake_ansible_module, 'http://ansible.com/')
+
+ assert 'python-ssl' in excinfo.value.kwargs['msg']
+ assert 'http://ansible.com/' == excinfo.value.kwargs['url']
+ assert excinfo.value.kwargs['status'] == -1
+
+
+def test_fetch_url_connectionerror(open_url_mock, fake_ansible_module):
+ open_url_mock.side_effect = ConnectionError('TESTS')
+ with pytest.raises(FailJson) as excinfo:
+ fetch_url(fake_ansible_module, 'http://ansible.com/')
+
+ assert excinfo.value.kwargs['msg'] == 'TESTS'
+ assert 'http://ansible.com/' == excinfo.value.kwargs['url']
+ assert excinfo.value.kwargs['status'] == -1
+
+ open_url_mock.side_effect = ValueError('TESTS')
+ with pytest.raises(FailJson) as excinfo:
+ fetch_url(fake_ansible_module, 'http://ansible.com/')
+
+ assert excinfo.value.kwargs['msg'] == 'TESTS'
+ assert 'http://ansible.com/' == excinfo.value.kwargs['url']
+ assert excinfo.value.kwargs['status'] == -1
+
+
+def test_fetch_url_httperror(open_url_mock, fake_ansible_module):
+ open_url_mock.side_effect = urllib_error.HTTPError(
+ 'http://ansible.com/',
+ 500,
+ 'Internal Server Error',
+ {'Content-Type': 'application/json'},
+ StringIO('TESTS')
+ )
+
+ r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
+
+ assert info == {'msg': 'HTTP Error 500: Internal Server Error', 'body': 'TESTS',
+ 'status': 500, 'url': 'http://ansible.com/', 'content-type': 'application/json'}
+
+
+def test_fetch_url_urlerror(open_url_mock, fake_ansible_module):
+ open_url_mock.side_effect = urllib_error.URLError('TESTS')
+ r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
+ assert info == {'msg': 'Request failed: <urlopen error TESTS>', 'status': -1, 'url': 'http://ansible.com/'}
+
+
+def test_fetch_url_socketerror(open_url_mock, fake_ansible_module):
+ open_url_mock.side_effect = socket.error('TESTS')
+ r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
+ assert info == {'msg': 'Connection failure: TESTS', 'status': -1, 'url': 'http://ansible.com/'}
+
+
+def test_fetch_url_exception(open_url_mock, fake_ansible_module):
+ open_url_mock.side_effect = Exception('TESTS')
+ r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
+ exception = info.pop('exception')
+ assert info == {'msg': 'An unknown error occurred: TESTS', 'status': -1, 'url': 'http://ansible.com/'}
+ assert "Exception: TESTS" in exception
+
+
+def test_fetch_url_badstatusline(open_url_mock, fake_ansible_module):
+ open_url_mock.side_effect = httplib.BadStatusLine('TESTS')
+ r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
+ assert info == {'msg': 'Connection failure: connection was closed before a valid response was received: TESTS', 'status': -1, 'url': 'http://ansible.com/'}
diff --git a/test/units/module_utils/urls/test_generic_urlparse.py b/test/units/module_utils/urls/test_generic_urlparse.py
new file mode 100644
index 00000000..77537268
--- /dev/null
+++ b/test/units/module_utils/urls/test_generic_urlparse.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+# (c) 2018 Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.urls import generic_urlparse
+from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
+
+
+def test_generic_urlparse():
+ url = 'https://ansible.com/blog'
+ parts = urlparse(url)
+ generic_parts = generic_urlparse(parts)
+ assert generic_parts.as_list() == list(parts)
+
+ assert urlunparse(generic_parts.as_list()) == url
+
+
+def test_generic_urlparse_netloc():
+ url = 'https://ansible.com:443/blog'
+ parts = urlparse(url)
+ generic_parts = generic_urlparse(parts)
+ assert generic_parts.hostname == parts.hostname
+ assert generic_parts.hostname == 'ansible.com'
+ assert generic_parts.port == 443
+ assert urlunparse(generic_parts.as_list()) == url
+
+
+def test_generic_urlparse_no_netloc():
+ url = 'https://user:passwd@ansible.com:443/blog'
+ parts = list(urlparse(url))
+ generic_parts = generic_urlparse(parts)
+ assert generic_parts.hostname == 'ansible.com'
+ assert generic_parts.port == 443
+ assert generic_parts.username == 'user'
+ assert generic_parts.password == 'passwd'
+ assert urlunparse(generic_parts.as_list()) == url
+
+
+def test_generic_urlparse_no_netloc_no_auth():
+ url = 'https://ansible.com:443/blog'
+ parts = list(urlparse(url))
+ generic_parts = generic_urlparse(parts)
+ assert generic_parts.username is None
+ assert generic_parts.password is None
+
+
+def test_generic_urlparse_no_netloc_no_host():
+ url = '/blog'
+ parts = list(urlparse(url))
+ generic_parts = generic_urlparse(parts)
+ assert generic_parts.username is None
+ assert generic_parts.password is None
+ assert generic_parts.port is None
+ assert generic_parts.hostname == ''
diff --git a/test/units/module_utils/urls/test_prepare_multipart.py b/test/units/module_utils/urls/test_prepare_multipart.py
new file mode 100644
index 00000000..e96aa454
--- /dev/null
+++ b/test/units/module_utils/urls/test_prepare_multipart.py
@@ -0,0 +1,102 @@
+# -*- coding: utf-8 -*-
+# (c) 2020 Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import os
+
+from io import StringIO
+
+from email.message import Message
+
+import pytest
+
+from ansible.module_utils.urls import prepare_multipart
+
+
+def test_prepare_multipart():
+ fixture_boundary = b'===============3996062709511591449=='
+
+ here = os.path.dirname(__file__)
+ multipart = os.path.join(here, 'fixtures/multipart.txt')
+
+ client_cert = os.path.join(here, 'fixtures/client.pem')
+ client_key = os.path.join(here, 'fixtures/client.key')
+ client_txt = os.path.join(here, 'fixtures/client.txt')
+ fields = {
+ 'form_field_1': 'form_value_1',
+ 'form_field_2': {
+ 'content': 'form_value_2',
+ },
+ 'form_field_3': {
+ 'content': '<html></html>',
+ 'mime_type': 'text/html',
+ },
+ 'form_field_4': {
+ 'content': '{"foo": "bar"}',
+ 'mime_type': 'application/json',
+ },
+ 'file1': {
+ 'content': 'file_content_1',
+ 'filename': 'fake_file1.txt',
+ },
+ 'file2': {
+ 'content': '<html></html>',
+ 'mime_type': 'text/html',
+ 'filename': 'fake_file2.html',
+ },
+ 'file3': {
+ 'content': '{"foo": "bar"}',
+ 'mime_type': 'application/json',
+ 'filename': 'fake_file3.json',
+ },
+ 'file4': {
+ 'filename': client_cert,
+ 'mime_type': 'text/plain',
+ },
+ 'file5': {
+ 'filename': client_key,
+ },
+ 'file6': {
+ 'filename': client_txt,
+ },
+ }
+
+ content_type, b_data = prepare_multipart(fields)
+
+ headers = Message()
+ headers['Content-Type'] = content_type
+ assert headers.get_content_type() == 'multipart/form-data'
+ boundary = headers.get_boundary()
+ assert boundary is not None
+
+ with open(multipart, 'rb') as f:
+ b_expected = f.read().replace(fixture_boundary, boundary.encode())
+
+ # Depending on Python version, there may or may not be a trailing newline
+ assert b_data.rstrip(b'\r\n') == b_expected.rstrip(b'\r\n')
+
+
+def test_wrong_type():
+ pytest.raises(TypeError, prepare_multipart, 'foo')
+ pytest.raises(TypeError, prepare_multipart, {'foo': None})
+
+
+def test_empty():
+ pytest.raises(ValueError, prepare_multipart, {'foo': {}})
+
+
+def test_unknown_mime(mocker):
+ fields = {'foo': {'filename': 'foo.boom', 'content': 'foo'}}
+ mocker.patch('mimetypes.guess_type', return_value=(None, None))
+ content_type, b_data = prepare_multipart(fields)
+ assert b'Content-Type: application/octet-stream' in b_data
+
+
+def test_bad_mime(mocker):
+ fields = {'foo': {'filename': 'foo.boom', 'content': 'foo'}}
+ mocker.patch('mimetypes.guess_type', side_effect=TypeError)
+ content_type, b_data = prepare_multipart(fields)
+ assert b'Content-Type: application/octet-stream' in b_data
diff --git a/test/units/module_utils/urls/test_urls.py b/test/units/module_utils/urls/test_urls.py
new file mode 100644
index 00000000..69c1b824
--- /dev/null
+++ b/test/units/module_utils/urls/test_urls.py
@@ -0,0 +1,109 @@
+# -*- coding: utf-8 -*-
+# (c) 2018 Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils import urls
+from ansible.module_utils._text import to_native
+
+import pytest
+
+
+def test_build_ssl_validation_error(mocker):
+ mocker.patch.object(urls, 'HAS_SSLCONTEXT', new=False)
+ mocker.patch.object(urls, 'HAS_URLLIB3_PYOPENSSLCONTEXT', new=False)
+ mocker.patch.object(urls, 'HAS_URLLIB3_SSL_WRAP_SOCKET', new=False)
+ with pytest.raises(urls.SSLValidationError) as excinfo:
+ urls.build_ssl_validation_error('hostname', 'port', 'paths', exc=None)
+
+ assert 'python >= 2.7.9' in to_native(excinfo.value)
+ assert 'the python executable used' in to_native(excinfo.value)
+ assert 'urllib3' in to_native(excinfo.value)
+ assert 'python >= 2.6' in to_native(excinfo.value)
+ assert 'validate_certs=False' in to_native(excinfo.value)
+
+ mocker.patch.object(urls, 'HAS_SSLCONTEXT', new=True)
+ with pytest.raises(urls.SSLValidationError) as excinfo:
+ urls.build_ssl_validation_error('hostname', 'port', 'paths', exc=None)
+
+ assert 'validate_certs=False' in to_native(excinfo.value)
+
+ mocker.patch.object(urls, 'HAS_SSLCONTEXT', new=False)
+ mocker.patch.object(urls, 'HAS_URLLIB3_PYOPENSSLCONTEXT', new=True)
+ mocker.patch.object(urls, 'HAS_URLLIB3_SSL_WRAP_SOCKET', new=True)
+
+ mocker.patch.object(urls, 'HAS_SSLCONTEXT', new=True)
+ with pytest.raises(urls.SSLValidationError) as excinfo:
+ urls.build_ssl_validation_error('hostname', 'port', 'paths', exc=None)
+
+ assert 'urllib3' not in to_native(excinfo.value)
+
+ with pytest.raises(urls.SSLValidationError) as excinfo:
+ urls.build_ssl_validation_error('hostname', 'port', 'paths', exc='BOOM')
+
+ assert 'BOOM' in to_native(excinfo.value)
+
+
+def test_maybe_add_ssl_handler(mocker):
+ mocker.patch.object(urls, 'HAS_SSL', new=False)
+ with pytest.raises(urls.NoSSLError):
+ urls.maybe_add_ssl_handler('https://ansible.com/', True)
+
+ mocker.patch.object(urls, 'HAS_SSL', new=True)
+ url = 'https://user:passwd@ansible.com/'
+ handler = urls.maybe_add_ssl_handler(url, True)
+ assert handler.hostname == 'ansible.com'
+ assert handler.port == 443
+
+ url = 'https://ansible.com:4433/'
+ handler = urls.maybe_add_ssl_handler(url, True)
+ assert handler.hostname == 'ansible.com'
+ assert handler.port == 4433
+
+ url = 'https://user:passwd@ansible.com:4433/'
+ handler = urls.maybe_add_ssl_handler(url, True)
+ assert handler.hostname == 'ansible.com'
+ assert handler.port == 4433
+
+ url = 'https://ansible.com/'
+ handler = urls.maybe_add_ssl_handler(url, True)
+ assert handler.hostname == 'ansible.com'
+ assert handler.port == 443
+
+ url = 'http://ansible.com/'
+ handler = urls.maybe_add_ssl_handler(url, True)
+ assert handler is None
+
+ url = 'https://[2a00:16d8:0:7::205]:4443/'
+ handler = urls.maybe_add_ssl_handler(url, True)
+ assert handler.hostname == '2a00:16d8:0:7::205'
+ assert handler.port == 4443
+
+ url = 'https://[2a00:16d8:0:7::205]/'
+ handler = urls.maybe_add_ssl_handler(url, True)
+ assert handler.hostname == '2a00:16d8:0:7::205'
+ assert handler.port == 443
+
+
+def test_basic_auth_header():
+ header = urls.basic_auth_header('user', 'passwd')
+ assert header == b'Basic dXNlcjpwYXNzd2Q='
+
+
+def test_ParseResultDottedDict():
+ url = 'https://ansible.com/blog'
+ parts = urls.urlparse(url)
+ dotted_parts = urls.ParseResultDottedDict(parts._asdict())
+ assert parts[0] == dotted_parts.scheme
+
+ assert dotted_parts.as_list() == list(parts)
+
+
+def test_unix_socket_patch_httpconnection_connect(mocker):
+ unix_conn = mocker.patch.object(urls.UnixHTTPConnection, 'connect')
+ conn = urls.httplib.HTTPConnection('ansible.com')
+ with urls.unix_socket_patch_httpconnection_connect():
+ conn.connect()
+ assert unix_conn.call_count == 1
diff --git a/test/units/modules/__init__.py b/test/units/modules/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/modules/__init__.py
diff --git a/test/units/modules/conftest.py b/test/units/modules/conftest.py
new file mode 100644
index 00000000..a7d1e047
--- /dev/null
+++ b/test/units/modules/conftest.py
@@ -0,0 +1,31 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+import pytest
+
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.common._collections_compat import MutableMapping
+
+
+@pytest.fixture
+def patch_ansible_module(request, mocker):
+ if isinstance(request.param, string_types):
+ args = request.param
+ elif isinstance(request.param, MutableMapping):
+ if 'ANSIBLE_MODULE_ARGS' not in request.param:
+ request.param = {'ANSIBLE_MODULE_ARGS': request.param}
+ if '_ansible_remote_tmp' not in request.param['ANSIBLE_MODULE_ARGS']:
+ request.param['ANSIBLE_MODULE_ARGS']['_ansible_remote_tmp'] = '/tmp'
+ if '_ansible_keep_remote_files' not in request.param['ANSIBLE_MODULE_ARGS']:
+ request.param['ANSIBLE_MODULE_ARGS']['_ansible_keep_remote_files'] = False
+ args = json.dumps(request.param)
+ else:
+ raise Exception('Malformed data to the patch_ansible_module pytest fixture')
+
+ mocker.patch('ansible.module_utils.basic._ANSIBLE_ARGS', to_bytes(args))
diff --git a/test/units/modules/test_apt.py b/test/units/modules/test_apt.py
new file mode 100644
index 00000000..3daf3c11
--- /dev/null
+++ b/test/units/modules/test_apt.py
@@ -0,0 +1,53 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import collections
+import sys
+
+from units.compat import mock
+from units.compat import unittest
+
+try:
+ from ansible.modules.apt import (
+ expand_pkgspec_from_fnmatches,
+ )
+except Exception:
+ # Need some more module_utils work (porting urls.py) before we can test
+ # modules. So don't error out in this case.
+ if sys.version_info[0] >= 3:
+ pass
+
+
+class AptExpandPkgspecTestCase(unittest.TestCase):
+
+ def setUp(self):
+ FakePackage = collections.namedtuple("Package", ("name",))
+ self.fake_cache = [
+ FakePackage("apt"),
+ FakePackage("apt-utils"),
+ FakePackage("not-selected"),
+ ]
+
+ def test_trivial(self):
+ foo = ["apt"]
+ self.assertEqual(
+ expand_pkgspec_from_fnmatches(None, foo, self.fake_cache), foo)
+
+ def test_version_wildcard(self):
+ foo = ["apt=1.0*"]
+ self.assertEqual(
+ expand_pkgspec_from_fnmatches(None, foo, self.fake_cache), foo)
+
+ def test_pkgname_wildcard_version_wildcard(self):
+ foo = ["apt*=1.0*"]
+ m_mock = mock.Mock()
+ self.assertEqual(
+ expand_pkgspec_from_fnmatches(m_mock, foo, self.fake_cache),
+ ['apt', 'apt-utils'])
+
+ def test_pkgname_expands(self):
+ foo = ["apt*"]
+ m_mock = mock.Mock()
+ self.assertEqual(
+ expand_pkgspec_from_fnmatches(m_mock, foo, self.fake_cache),
+ ["apt", "apt-utils"])
diff --git a/test/units/modules/test_async_wrapper.py b/test/units/modules/test_async_wrapper.py
new file mode 100644
index 00000000..762fc2fb
--- /dev/null
+++ b/test/units/modules/test_async_wrapper.py
@@ -0,0 +1,57 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import os
+import json
+import shutil
+import tempfile
+
+import pytest
+
+from units.compat.mock import patch, MagicMock
+from ansible.modules import async_wrapper
+
+from pprint import pprint
+
+
+class TestAsyncWrapper:
+
+ def test_run_module(self, monkeypatch):
+
+ def mock_get_interpreter(module_path):
+ return ['/usr/bin/python']
+
+ module_result = {'rc': 0}
+ module_lines = [
+ '#!/usr/bin/python',
+ 'import sys',
+ 'sys.stderr.write("stderr stuff")',
+ "print('%s')" % json.dumps(module_result)
+ ]
+ module_data = '\n'.join(module_lines) + '\n'
+ module_data = module_data.encode('utf-8')
+
+ workdir = tempfile.mkdtemp()
+ fh, fn = tempfile.mkstemp(dir=workdir)
+
+ with open(fn, 'wb') as f:
+ f.write(module_data)
+
+ command = fn
+ jobid = 0
+ jobpath = os.path.join(os.path.dirname(command), 'job')
+
+ monkeypatch.setattr(async_wrapper, '_get_interpreter', mock_get_interpreter)
+
+ res = async_wrapper._run_module(command, jobid, jobpath)
+
+ with open(os.path.join(workdir, 'job'), 'r') as f:
+ jres = json.loads(f.read())
+
+ shutil.rmtree(workdir)
+
+ assert jres.get('rc') == 0
+ assert jres.get('stderr') == 'stderr stuff'
diff --git a/test/units/modules/test_copy.py b/test/units/modules/test_copy.py
new file mode 100644
index 00000000..20c309b6
--- /dev/null
+++ b/test/units/modules/test_copy.py
@@ -0,0 +1,215 @@
+# -*- coding: utf-8 -*-
+# Copyright:
+# (c) 2018 Ansible Project
+# License: GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible.modules.copy import AnsibleModuleError, split_pre_existing_dir
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+THREE_DIRS_DATA = (('/dir1/dir2',
+ # 0 existing dirs: error (because / should always exist)
+ None,
+ # 1 existing dir:
+ ('/', ['dir1', 'dir2']),
+ # 2 existing dirs:
+ ('/dir1', ['dir2']),
+ # 3 existing dirs:
+ ('/dir1/dir2', [])
+ ),
+ ('/dir1/dir2/',
+ # 0 existing dirs: error (because / should always exist)
+ None,
+ # 1 existing dir:
+ ('/', ['dir1', 'dir2']),
+ # 2 existing dirs:
+ ('/dir1', ['dir2']),
+ # 3 existing dirs:
+ ('/dir1/dir2', [])
+ ),
+ )
+
+
+TWO_DIRS_DATA = (('dir1/dir2',
+ # 0 existing dirs:
+ ('.', ['dir1', 'dir2']),
+ # 1 existing dir:
+ ('dir1', ['dir2']),
+ # 2 existing dirs:
+ ('dir1/dir2', []),
+ # 3 existing dirs: Same as 2 because we never get to the third
+ ),
+ ('dir1/dir2/',
+ # 0 existing dirs:
+ ('.', ['dir1', 'dir2']),
+ # 1 existing dir:
+ ('dir1', ['dir2']),
+ # 2 existing dirs:
+ ('dir1/dir2', []),
+ # 3 existing dirs: Same as 2 because we never get to the third
+ ),
+ ('/dir1',
+ # 0 existing dirs: error (because / should always exist)
+ None,
+ # 1 existing dir:
+ ('/', ['dir1']),
+ # 2 existing dirs:
+ ('/dir1', []),
+ # 3 existing dirs: Same as 2 because we never get to the third
+ ),
+ ('/dir1/',
+ # 0 existing dirs: error (because / should always exist)
+ None,
+ # 1 existing dir:
+ ('/', ['dir1']),
+ # 2 existing dirs:
+ ('/dir1', []),
+ # 3 existing dirs: Same as 2 because we never get to the third
+ ),
+ ) + THREE_DIRS_DATA
+
+
+ONE_DIR_DATA = (('dir1',
+ # 0 existing dirs:
+ ('.', ['dir1']),
+ # 1 existing dir:
+ ('dir1', []),
+ # 2 existing dirs: Same as 1 because we never get to the third
+ ),
+ ('dir1/',
+ # 0 existing dirs:
+ ('.', ['dir1']),
+ # 1 existing dir:
+ ('dir1', []),
+ # 2 existing dirs: Same as 1 because we never get to the third
+ ),
+ ) + TWO_DIRS_DATA
+
+
+@pytest.mark.parametrize('directory, expected', ((d[0], d[4]) for d in THREE_DIRS_DATA))
+def test_split_pre_existing_dir_three_levels_exist(directory, expected, mocker):
+ mocker.patch('os.path.exists', side_effect=[True, True, True])
+ split_pre_existing_dir(directory) == expected
+
+
+@pytest.mark.parametrize('directory, expected', ((d[0], d[3]) for d in TWO_DIRS_DATA))
+def test_split_pre_existing_dir_two_levels_exist(directory, expected, mocker):
+ mocker.patch('os.path.exists', side_effect=[True, True, False])
+ split_pre_existing_dir(directory) == expected
+
+
+@pytest.mark.parametrize('directory, expected', ((d[0], d[2]) for d in ONE_DIR_DATA))
+def test_split_pre_existing_dir_one_level_exists(directory, expected, mocker):
+ mocker.patch('os.path.exists', side_effect=[True, False, False])
+ split_pre_existing_dir(directory) == expected
+
+
+@pytest.mark.parametrize('directory', (d[0] for d in ONE_DIR_DATA if d[1] is None))
+def test_split_pre_existing_dir_root_does_not_exist(directory, mocker):
+ mocker.patch('os.path.exists', return_value=False)
+ with pytest.raises(AnsibleModuleError) as excinfo:
+ split_pre_existing_dir(directory)
+ assert excinfo.value.results['msg'].startswith("The '/' directory doesn't exist on this machine.")
+
+
+@pytest.mark.parametrize('directory, expected', ((d[0], d[1]) for d in ONE_DIR_DATA if not d[0].startswith('/')))
+def test_split_pre_existing_dir_working_dir_exists(directory, expected, mocker):
+ mocker.patch('os.path.exists', return_value=False)
+ split_pre_existing_dir(directory) == expected
+
+
+#
+# Info helpful for making new test cases:
+#
+# base_mode = {'dir no perms': 0o040000,
+# 'file no perms': 0o100000,
+# 'dir all perms': 0o400000 | 0o777,
+# 'file all perms': 0o100000, | 0o777}
+#
+# perm_bits = {'x': 0b001,
+# 'w': 0b010,
+# 'r': 0b100}
+#
+# role_shift = {'u': 6,
+# 'g': 3,
+# 'o': 0}
+
+DATA = ( # Going from no permissions to setting all for user, group, and/or other
+ (0o040000, u'a+rwx', 0o0777),
+ (0o040000, u'u+rwx,g+rwx,o+rwx', 0o0777),
+ (0o040000, u'o+rwx', 0o0007),
+ (0o040000, u'g+rwx', 0o0070),
+ (0o040000, u'u+rwx', 0o0700),
+
+ # Going from all permissions to none for user, group, and/or other
+ (0o040777, u'a-rwx', 0o0000),
+ (0o040777, u'u-rwx,g-rwx,o-rwx', 0o0000),
+ (0o040777, u'o-rwx', 0o0770),
+ (0o040777, u'g-rwx', 0o0707),
+ (0o040777, u'u-rwx', 0o0077),
+
+ # now using absolute assignment from None to a set of perms
+ (0o040000, u'a=rwx', 0o0777),
+ (0o040000, u'u=rwx,g=rwx,o=rwx', 0o0777),
+ (0o040000, u'o=rwx', 0o0007),
+ (0o040000, u'g=rwx', 0o0070),
+ (0o040000, u'u=rwx', 0o0700),
+
+ # X effect on files and dirs
+ (0o040000, u'a+X', 0o0111),
+ (0o100000, u'a+X', 0),
+ (0o040000, u'a=X', 0o0111),
+ (0o100000, u'a=X', 0),
+ (0o040777, u'a-X', 0o0666),
+ # Same as chmod but is it a bug?
+ # chmod a-X statfile <== removes execute from statfile
+ (0o100777, u'a-X', 0o0666),
+
+ # Multiple permissions
+ (0o040000, u'u=rw-x+X,g=r-x+X,o=r-x+X', 0o0755),
+ (0o100000, u'u=rw-x+X,g=r-x+X,o=r-x+X', 0o0644),
+)
+
+UMASK_DATA = (
+ (0o100000, '+rwx', 0o770),
+ (0o100777, '-rwx', 0o007),
+)
+
+INVALID_DATA = (
+ (0o040000, u'a=foo', "bad symbolic permission for mode: a=foo"),
+ (0o040000, u'f=rwx', "bad symbolic permission for mode: f=rwx"),
+)
+
+
+@pytest.mark.parametrize('stat_info, mode_string, expected', DATA)
+def test_good_symbolic_modes(mocker, stat_info, mode_string, expected):
+ mock_stat = mocker.MagicMock()
+ mock_stat.st_mode = stat_info
+ assert AnsibleModule._symbolic_mode_to_octal(mock_stat, mode_string) == expected
+
+
+@pytest.mark.parametrize('stat_info, mode_string, expected', UMASK_DATA)
+def test_umask_with_symbolic_modes(mocker, stat_info, mode_string, expected):
+ mock_umask = mocker.patch('os.umask')
+ mock_umask.return_value = 0o7
+
+ mock_stat = mocker.MagicMock()
+ mock_stat.st_mode = stat_info
+
+ assert AnsibleModule._symbolic_mode_to_octal(mock_stat, mode_string) == expected
+
+
+@pytest.mark.parametrize('stat_info, mode_string, expected', INVALID_DATA)
+def test_invalid_symbolic_modes(mocker, stat_info, mode_string, expected):
+ mock_stat = mocker.MagicMock()
+ mock_stat.st_mode = stat_info
+ with pytest.raises(ValueError) as exc:
+ assert AnsibleModule._symbolic_mode_to_octal(mock_stat, mode_string) == 'blah'
+ assert exc.match(expected)
diff --git a/test/units/modules/test_iptables.py b/test/units/modules/test_iptables.py
new file mode 100644
index 00000000..25a157e5
--- /dev/null
+++ b/test/units/modules/test_iptables.py
@@ -0,0 +1,919 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.modules import iptables
+from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+
+
+def get_bin_path(*args, **kwargs):
+ return "/sbin/iptables"
+
+
+def get_iptables_version(iptables_path, module):
+ return "1.8.2"
+
+
+class TestIptables(ModuleTestCase):
+
+ def setUp(self):
+ super(TestIptables, self).setUp()
+ self.mock_get_bin_path = patch.object(basic.AnsibleModule, 'get_bin_path', get_bin_path)
+ self.mock_get_bin_path.start()
+ self.addCleanup(self.mock_get_bin_path.stop) # ensure that the patching is 'undone'
+ self.mock_get_iptables_version = patch.object(iptables, 'get_iptables_version', get_iptables_version)
+ self.mock_get_iptables_version.start()
+ self.addCleanup(self.mock_get_iptables_version.stop) # ensure that the patching is 'undone'
+
+ def test_without_required_parameters(self):
+ """Failure must occurs when all parameters are missing"""
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({})
+ iptables.main()
+
+ def test_flush_table_without_chain(self):
+ """Test flush without chain, flush the table"""
+ set_module_args({
+ 'flush': True,
+ })
+
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.return_value = 0, '', '' # successful execution, no output
+ with self.assertRaises(AnsibleExitJson) as result:
+ iptables.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ self.assertEqual(run_command.call_count, 1)
+ self.assertEqual(run_command.call_args[0][0][0], '/sbin/iptables')
+ self.assertEqual(run_command.call_args[0][0][1], '-t')
+ self.assertEqual(run_command.call_args[0][0][2], 'filter')
+ self.assertEqual(run_command.call_args[0][0][3], '-F')
+
+ def test_flush_table_check_true(self):
+ """Test flush without parameters and check == true"""
+ set_module_args({
+ 'flush': True,
+ '_ansible_check_mode': True,
+ })
+
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.return_value = 0, '', '' # successful execution, no output
+ with self.assertRaises(AnsibleExitJson) as result:
+ iptables.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ self.assertEqual(run_command.call_count, 0)
+
+# TODO ADD test flush table nat
+# TODO ADD test flush with chain
+# TODO ADD test flush with chain and table nat
+
+ def test_policy_table(self):
+ """Test change policy of a chain"""
+ set_module_args({
+ 'policy': 'ACCEPT',
+ 'chain': 'INPUT',
+ })
+ commands_results = [
+ (0, 'Chain INPUT (policy DROP)\n', ''),
+ (0, '', '')
+ ]
+
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.side_effect = commands_results
+ with self.assertRaises(AnsibleExitJson) as result:
+ iptables.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ self.assertEqual(run_command.call_count, 2)
+ # import pdb
+ # pdb.set_trace()
+ self.assertEqual(run_command.call_args_list[0][0][0], [
+ '/sbin/iptables',
+ '-t',
+ 'filter',
+ '-L',
+ 'INPUT',
+ ])
+ self.assertEqual(run_command.call_args_list[1][0][0], [
+ '/sbin/iptables',
+ '-t',
+ 'filter',
+ '-P',
+ 'INPUT',
+ 'ACCEPT',
+ ])
+
+ def test_policy_table_no_change(self):
+ """Test don't change policy of a chain if the policy is right"""
+ set_module_args({
+ 'policy': 'ACCEPT',
+ 'chain': 'INPUT',
+ })
+ commands_results = [
+ (0, 'Chain INPUT (policy ACCEPT)\n', ''),
+ (0, '', '')
+ ]
+
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.side_effect = commands_results
+ with self.assertRaises(AnsibleExitJson) as result:
+ iptables.main()
+ self.assertFalse(result.exception.args[0]['changed'])
+
+ self.assertEqual(run_command.call_count, 1)
+ # import pdb
+ # pdb.set_trace()
+ self.assertEqual(run_command.call_args_list[0][0][0], [
+ '/sbin/iptables',
+ '-t',
+ 'filter',
+ '-L',
+ 'INPUT',
+ ])
+
+ def test_policy_table_changed_false(self):
+ """Test flush without parameters and change == false"""
+ set_module_args({
+ 'policy': 'ACCEPT',
+ 'chain': 'INPUT',
+ '_ansible_check_mode': True,
+ })
+ commands_results = [
+ (0, 'Chain INPUT (policy DROP)\n', ''),
+ ]
+
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.side_effect = commands_results
+ with self.assertRaises(AnsibleExitJson) as result:
+ iptables.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ self.assertEqual(run_command.call_count, 1)
+ # import pdb
+ # pdb.set_trace()
+ self.assertEqual(run_command.call_args_list[0][0][0], [
+ '/sbin/iptables',
+ '-t',
+ 'filter',
+ '-L',
+ 'INPUT',
+ ])
+
+# TODO ADD test policy without chain fail
+# TODO ADD test policy with chain don't exists
+# TODO ADD test policy with wrong choice fail
+
+ def test_insert_rule_change_false(self):
+ """Test flush without parameters"""
+ set_module_args({
+ 'chain': 'OUTPUT',
+ 'source': '1.2.3.4/32',
+ 'destination': '7.8.9.10/42',
+ 'jump': 'ACCEPT',
+ 'action': 'insert',
+ '_ansible_check_mode': True,
+ })
+
+ commands_results = [
+ (1, '', ''),
+ (0, '', '')
+ ]
+
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.side_effect = commands_results
+ with self.assertRaises(AnsibleExitJson) as result:
+ iptables.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ self.assertEqual(run_command.call_count, 1)
+ # import pdb
+ # pdb.set_trace()
+ self.assertEqual(run_command.call_args_list[0][0][0], [
+ '/sbin/iptables',
+ '-t',
+ 'filter',
+ '-C',
+ 'OUTPUT',
+ '-s',
+ '1.2.3.4/32',
+ '-d',
+ '7.8.9.10/42',
+ '-j',
+ 'ACCEPT'
+ ])
+
+ def test_insert_rule(self):
+ """Test flush without parameters"""
+ set_module_args({
+ 'chain': 'OUTPUT',
+ 'source': '1.2.3.4/32',
+ 'destination': '7.8.9.10/42',
+ 'jump': 'ACCEPT',
+ 'action': 'insert'
+ })
+
+ commands_results = [
+ (1, '', ''),
+ (0, '', '')
+ ]
+
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.side_effect = commands_results
+ with self.assertRaises(AnsibleExitJson) as result:
+ iptables.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ self.assertEqual(run_command.call_count, 2)
+ # import pdb
+ # pdb.set_trace()
+ self.assertEqual(run_command.call_args_list[0][0][0], [
+ '/sbin/iptables',
+ '-t',
+ 'filter',
+ '-C',
+ 'OUTPUT',
+ '-s',
+ '1.2.3.4/32',
+ '-d',
+ '7.8.9.10/42',
+ '-j',
+ 'ACCEPT'
+ ])
+ self.assertEqual(run_command.call_args_list[1][0][0], [
+ '/sbin/iptables',
+ '-t',
+ 'filter',
+ '-I',
+ 'OUTPUT',
+ '-s',
+ '1.2.3.4/32',
+ '-d',
+ '7.8.9.10/42',
+ '-j',
+ 'ACCEPT'
+ ])
+
+ def test_append_rule_check_mode(self):
+ """Test append a redirection rule in check mode"""
+ set_module_args({
+ 'chain': 'PREROUTING',
+ 'source': '1.2.3.4/32',
+ 'destination': '7.8.9.10/42',
+ 'jump': 'REDIRECT',
+ 'table': 'nat',
+ 'to_destination': '5.5.5.5/32',
+ 'protocol': 'udp',
+ 'destination_port': '22',
+ 'to_ports': '8600',
+ '_ansible_check_mode': True,
+ })
+
+ commands_results = [
+ (1, '', ''),
+ ]
+
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.side_effect = commands_results
+ with self.assertRaises(AnsibleExitJson) as result:
+ iptables.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ self.assertEqual(run_command.call_count, 1)
+ self.assertEqual(run_command.call_args_list[0][0][0], [
+ '/sbin/iptables',
+ '-t',
+ 'nat',
+ '-C',
+ 'PREROUTING',
+ '-p',
+ 'udp',
+ '-s',
+ '1.2.3.4/32',
+ '-d',
+ '7.8.9.10/42',
+ '-j',
+ 'REDIRECT',
+ '--to-destination',
+ '5.5.5.5/32',
+ '--destination-port',
+ '22',
+ '--to-ports',
+ '8600'
+ ])
+
+ def test_append_rule(self):
+ """Test append a redirection rule"""
+ set_module_args({
+ 'chain': 'PREROUTING',
+ 'source': '1.2.3.4/32',
+ 'destination': '7.8.9.10/42',
+ 'jump': 'REDIRECT',
+ 'table': 'nat',
+ 'to_destination': '5.5.5.5/32',
+ 'protocol': 'udp',
+ 'destination_port': '22',
+ 'to_ports': '8600'
+ })
+
+ commands_results = [
+ (1, '', ''),
+ (0, '', '')
+ ]
+
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.side_effect = commands_results
+ with self.assertRaises(AnsibleExitJson) as result:
+ iptables.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ self.assertEqual(run_command.call_count, 2)
+ self.assertEqual(run_command.call_args_list[0][0][0], [
+ '/sbin/iptables',
+ '-t',
+ 'nat',
+ '-C',
+ 'PREROUTING',
+ '-p',
+ 'udp',
+ '-s',
+ '1.2.3.4/32',
+ '-d',
+ '7.8.9.10/42',
+ '-j',
+ 'REDIRECT',
+ '--to-destination',
+ '5.5.5.5/32',
+ '--destination-port',
+ '22',
+ '--to-ports',
+ '8600'
+ ])
+ self.assertEqual(run_command.call_args_list[1][0][0], [
+ '/sbin/iptables',
+ '-t',
+ 'nat',
+ '-A',
+ 'PREROUTING',
+ '-p',
+ 'udp',
+ '-s',
+ '1.2.3.4/32',
+ '-d',
+ '7.8.9.10/42',
+ '-j',
+ 'REDIRECT',
+ '--to-destination',
+ '5.5.5.5/32',
+ '--destination-port',
+ '22',
+ '--to-ports',
+ '8600'
+ ])
+
+ def test_remove_rule(self):
+ """Test flush without parameters"""
+ set_module_args({
+ 'chain': 'PREROUTING',
+ 'source': '1.2.3.4/32',
+ 'destination': '7.8.9.10/42',
+ 'jump': 'SNAT',
+ 'table': 'nat',
+ 'to_source': '5.5.5.5/32',
+ 'protocol': 'udp',
+ 'source_port': '22',
+ 'to_ports': '8600',
+ 'state': 'absent',
+ 'in_interface': 'eth0',
+ 'out_interface': 'eth1',
+ 'comment': 'this is a comment'
+ })
+
+ commands_results = [
+ (0, '', ''),
+ (0, '', ''),
+ ]
+
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.side_effect = commands_results
+ with self.assertRaises(AnsibleExitJson) as result:
+ iptables.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ self.assertEqual(run_command.call_count, 2)
+ self.assertEqual(run_command.call_args_list[0][0][0], [
+ '/sbin/iptables',
+ '-t',
+ 'nat',
+ '-C',
+ 'PREROUTING',
+ '-p',
+ 'udp',
+ '-s',
+ '1.2.3.4/32',
+ '-d',
+ '7.8.9.10/42',
+ '-j',
+ 'SNAT',
+ '--to-source',
+ '5.5.5.5/32',
+ '-i',
+ 'eth0',
+ '-o',
+ 'eth1',
+ '--source-port',
+ '22',
+ '--to-ports',
+ '8600',
+ '-m',
+ 'comment',
+ '--comment',
+ 'this is a comment'
+ ])
+ self.assertEqual(run_command.call_args_list[1][0][0], [
+ '/sbin/iptables',
+ '-t',
+ 'nat',
+ '-D',
+ 'PREROUTING',
+ '-p',
+ 'udp',
+ '-s',
+ '1.2.3.4/32',
+ '-d',
+ '7.8.9.10/42',
+ '-j',
+ 'SNAT',
+ '--to-source',
+ '5.5.5.5/32',
+ '-i',
+ 'eth0',
+ '-o',
+ 'eth1',
+ '--source-port',
+ '22',
+ '--to-ports',
+ '8600',
+ '-m',
+ 'comment',
+ '--comment',
+ 'this is a comment'
+ ])
+
+ def test_remove_rule_check_mode(self):
+ """Test flush without parameters check mode"""
+ set_module_args({
+ 'chain': 'PREROUTING',
+ 'source': '1.2.3.4/32',
+ 'destination': '7.8.9.10/42',
+ 'jump': 'SNAT',
+ 'table': 'nat',
+ 'to_source': '5.5.5.5/32',
+ 'protocol': 'udp',
+ 'source_port': '22',
+ 'to_ports': '8600',
+ 'state': 'absent',
+ 'in_interface': 'eth0',
+ 'out_interface': 'eth1',
+ 'comment': 'this is a comment',
+ '_ansible_check_mode': True,
+ })
+
+ commands_results = [
+ (0, '', ''),
+ ]
+
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.side_effect = commands_results
+ with self.assertRaises(AnsibleExitJson) as result:
+ iptables.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ self.assertEqual(run_command.call_count, 1)
+ self.assertEqual(run_command.call_args_list[0][0][0], [
+ '/sbin/iptables',
+ '-t',
+ 'nat',
+ '-C',
+ 'PREROUTING',
+ '-p',
+ 'udp',
+ '-s',
+ '1.2.3.4/32',
+ '-d',
+ '7.8.9.10/42',
+ '-j',
+ 'SNAT',
+ '--to-source',
+ '5.5.5.5/32',
+ '-i',
+ 'eth0',
+ '-o',
+ 'eth1',
+ '--source-port',
+ '22',
+ '--to-ports',
+ '8600',
+ '-m',
+ 'comment',
+ '--comment',
+ 'this is a comment'
+ ])
+
+ def test_insert_with_reject(self):
+ """ Using reject_with with a previously defined jump: REJECT results in two Jump statements #18988 """
+ set_module_args({
+ 'chain': 'INPUT',
+ 'protocol': 'tcp',
+ 'reject_with': 'tcp-reset',
+ 'ip_version': 'ipv4',
+ })
+ commands_results = [
+ (0, '', ''),
+ ]
+
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.side_effect = commands_results
+ with self.assertRaises(AnsibleExitJson) as result:
+ iptables.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ self.assertEqual(run_command.call_count, 1)
+ self.assertEqual(run_command.call_args_list[0][0][0], [
+ '/sbin/iptables',
+ '-t',
+ 'filter',
+ '-C',
+ 'INPUT',
+ '-p',
+ 'tcp',
+ '-j',
+ 'REJECT',
+ '--reject-with',
+ 'tcp-reset',
+ ])
+
+ def test_insert_jump_reject_with_reject(self):
+ """ Using reject_with with a previously defined jump: REJECT results in two Jump statements #18988 """
+ set_module_args({
+ 'chain': 'INPUT',
+ 'protocol': 'tcp',
+ 'jump': 'REJECT',
+ 'reject_with': 'tcp-reset',
+ 'ip_version': 'ipv4',
+ })
+ commands_results = [
+ (0, '', ''),
+ ]
+
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.side_effect = commands_results
+ with self.assertRaises(AnsibleExitJson) as result:
+ iptables.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ self.assertEqual(run_command.call_count, 1)
+ self.assertEqual(run_command.call_args_list[0][0][0], [
+ '/sbin/iptables',
+ '-t',
+ 'filter',
+ '-C',
+ 'INPUT',
+ '-p',
+ 'tcp',
+ '-j',
+ 'REJECT',
+ '--reject-with',
+ 'tcp-reset',
+ ])
+
+ def test_jump_tee_gateway_negative(self):
+ """ Missing gateway when JUMP is set to TEE """
+ set_module_args({
+ 'table': 'mangle',
+ 'chain': 'PREROUTING',
+ 'in_interface': 'eth0',
+ 'protocol': 'udp',
+ 'match': 'state',
+ 'jump': 'TEE',
+ 'ctstate': ['NEW'],
+ 'destination_port': '9521',
+ 'destination': '127.0.0.1'
+ })
+
+ with self.assertRaises(AnsibleFailJson) as e:
+ iptables.main()
+ self.assertTrue(e.exception.args[0]['failed'])
+ self.assertEqual(e.exception.args[0]['msg'], 'jump is TEE but all of the following are missing: gateway')
+
+ def test_jump_tee_gateway(self):
+ """ Using gateway when JUMP is set to TEE """
+ set_module_args({
+ 'table': 'mangle',
+ 'chain': 'PREROUTING',
+ 'in_interface': 'eth0',
+ 'protocol': 'udp',
+ 'match': 'state',
+ 'jump': 'TEE',
+ 'ctstate': ['NEW'],
+ 'destination_port': '9521',
+ 'gateway': '192.168.10.1',
+ 'destination': '127.0.0.1'
+ })
+ commands_results = [
+ (0, '', ''),
+ ]
+
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.side_effect = commands_results
+ with self.assertRaises(AnsibleExitJson) as result:
+ iptables.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ self.assertEqual(run_command.call_count, 1)
+ self.assertEqual(run_command.call_args_list[0][0][0], [
+ '/sbin/iptables',
+ '-t', 'mangle',
+ '-C', 'PREROUTING',
+ '-p', 'udp',
+ '-d', '127.0.0.1',
+ '-m', 'state',
+ '-j', 'TEE',
+ '--gateway', '192.168.10.1',
+ '-i', 'eth0',
+ '--destination-port', '9521',
+ '--state', 'NEW'
+ ])
+
+ def test_tcp_flags(self):
+ """ Test various ways of inputting tcp_flags """
+ args = [
+ {
+ 'chain': 'OUTPUT',
+ 'protocol': 'tcp',
+ 'jump': 'DROP',
+ 'tcp_flags': 'flags=ALL flags_set="ACK,RST,SYN,FIN"'
+ },
+ {
+ 'chain': 'OUTPUT',
+ 'protocol': 'tcp',
+ 'jump': 'DROP',
+ 'tcp_flags': {
+ 'flags': 'ALL',
+ 'flags_set': 'ACK,RST,SYN,FIN'
+ }
+ },
+ {
+ 'chain': 'OUTPUT',
+ 'protocol': 'tcp',
+ 'jump': 'DROP',
+ 'tcp_flags': {
+ 'flags': ['ALL'],
+ 'flags_set': ['ACK', 'RST', 'SYN', 'FIN']
+ }
+ },
+
+ ]
+
+ for item in args:
+ set_module_args(item)
+
+ commands_results = [
+ (0, '', ''),
+ ]
+
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.side_effect = commands_results
+ with self.assertRaises(AnsibleExitJson) as result:
+ iptables.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ self.assertEqual(run_command.call_count, 1)
+ self.assertEqual(run_command.call_args_list[0][0][0], [
+ '/sbin/iptables',
+ '-t',
+ 'filter',
+ '-C',
+ 'OUTPUT',
+ '-p',
+ 'tcp',
+ '--tcp-flags',
+ 'ALL',
+ 'ACK,RST,SYN,FIN',
+ '-j',
+ 'DROP'
+ ])
+
+ def test_log_level(self):
+ """ Test various ways of log level flag """
+
+ log_levels = ['0', '1', '2', '3', '4', '5', '6', '7',
+ 'emerg', 'alert', 'crit', 'error', 'warning', 'notice', 'info', 'debug']
+
+ for log_lvl in log_levels:
+ set_module_args({
+ 'chain': 'INPUT',
+ 'jump': 'LOG',
+ 'log_level': log_lvl,
+ 'source': '1.2.3.4/32',
+ 'log_prefix': '** DROP-this_ip **'
+ })
+ commands_results = [
+ (0, '', ''),
+ ]
+
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.side_effect = commands_results
+ with self.assertRaises(AnsibleExitJson) as result:
+ iptables.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ self.assertEqual(run_command.call_count, 1)
+ self.assertEqual(run_command.call_args_list[0][0][0], [
+ '/sbin/iptables',
+ '-t', 'filter',
+ '-C', 'INPUT',
+ '-s', '1.2.3.4/32',
+ '-j', 'LOG',
+ '--log-prefix', '** DROP-this_ip **',
+ '--log-level', log_lvl
+ ])
+
+ def test_iprange(self):
+ """ Test iprange module with its flags src_range and dst_range """
+ set_module_args({
+ 'chain': 'INPUT',
+ 'match': ['iprange'],
+ 'src_range': '192.168.1.100-192.168.1.199',
+ 'jump': 'ACCEPT'
+ })
+
+ commands_results = [
+ (0, '', ''),
+ ]
+
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.side_effect = commands_results
+ with self.assertRaises(AnsibleExitJson) as result:
+ iptables.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ self.assertEqual(run_command.call_count, 1)
+ self.assertEqual(run_command.call_args_list[0][0][0], [
+ '/sbin/iptables',
+ '-t',
+ 'filter',
+ '-C',
+ 'INPUT',
+ '-m',
+ 'iprange',
+ '-j',
+ 'ACCEPT',
+ '--src-range',
+ '192.168.1.100-192.168.1.199',
+ ])
+
+ set_module_args({
+ 'chain': 'INPUT',
+ 'src_range': '192.168.1.100-192.168.1.199',
+ 'dst_range': '10.0.0.50-10.0.0.100',
+ 'jump': 'ACCEPT'
+ })
+
+ commands_results = [
+ (0, '', ''),
+ ]
+
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.side_effect = commands_results
+ with self.assertRaises(AnsibleExitJson) as result:
+ iptables.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ self.assertEqual(run_command.call_count, 1)
+ self.assertEqual(run_command.call_args_list[0][0][0], [
+ '/sbin/iptables',
+ '-t',
+ 'filter',
+ '-C',
+ 'INPUT',
+ '-j',
+ 'ACCEPT',
+ '-m',
+ 'iprange',
+ '--src-range',
+ '192.168.1.100-192.168.1.199',
+ '--dst-range',
+ '10.0.0.50-10.0.0.100'
+ ])
+
+ set_module_args({
+ 'chain': 'INPUT',
+ 'dst_range': '10.0.0.50-10.0.0.100',
+ 'jump': 'ACCEPT'
+ })
+
+ commands_results = [
+ (0, '', ''),
+ ]
+
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.side_effect = commands_results
+ with self.assertRaises(AnsibleExitJson) as result:
+ iptables.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ self.assertEqual(run_command.call_count, 1)
+ self.assertEqual(run_command.call_args_list[0][0][0], [
+ '/sbin/iptables',
+ '-t',
+ 'filter',
+ '-C',
+ 'INPUT',
+ '-j',
+ 'ACCEPT',
+ '-m',
+ 'iprange',
+ '--dst-range',
+ '10.0.0.50-10.0.0.100'
+ ])
+
+ def test_insert_rule_with_wait(self):
+ """Test flush without parameters"""
+ set_module_args({
+ 'chain': 'OUTPUT',
+ 'source': '1.2.3.4/32',
+ 'destination': '7.8.9.10/42',
+ 'jump': 'ACCEPT',
+ 'action': 'insert',
+ 'wait': '10'
+ })
+
+ commands_results = [
+ (0, '', ''),
+ ]
+
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.side_effect = commands_results
+ with self.assertRaises(AnsibleExitJson) as result:
+ iptables.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ self.assertEqual(run_command.call_count, 1)
+ self.assertEqual(run_command.call_args_list[0][0][0], [
+ '/sbin/iptables',
+ '-t',
+ 'filter',
+ '-C',
+ 'OUTPUT',
+ '-w',
+ '10',
+ '-s',
+ '1.2.3.4/32',
+ '-d',
+ '7.8.9.10/42',
+ '-j',
+ 'ACCEPT'
+ ])
+
+ def test_comment_position_at_end(self):
+ """Test flush without parameters"""
+ set_module_args({
+ 'chain': 'INPUT',
+ 'jump': 'ACCEPT',
+ 'action': 'insert',
+ 'ctstate': ['NEW'],
+ 'comment': 'this is a comment',
+ '_ansible_check_mode': True,
+ })
+
+ commands_results = [
+ (0, '', ''),
+ ]
+
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.side_effect = commands_results
+ with self.assertRaises(AnsibleExitJson) as result:
+ iptables.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ self.assertEqual(run_command.call_count, 1)
+ self.assertEqual(run_command.call_args_list[0][0][0], [
+ '/sbin/iptables',
+ '-t',
+ 'filter',
+ '-C',
+ 'INPUT',
+ '-j',
+ 'ACCEPT',
+ '-m',
+ 'conntrack',
+ '--ctstate',
+ 'NEW',
+ '-m',
+ 'comment',
+ '--comment',
+ 'this is a comment'
+ ])
+ self.assertEqual(run_command.call_args[0][0][14], 'this is a comment')
diff --git a/test/units/modules/test_known_hosts.py b/test/units/modules/test_known_hosts.py
new file mode 100644
index 00000000..3b6dfd86
--- /dev/null
+++ b/test/units/modules/test_known_hosts.py
@@ -0,0 +1,110 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import tempfile
+from ansible.module_utils import basic
+
+from units.compat import unittest
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible.modules.known_hosts import compute_diff, sanity_check
+
+
+class KnownHostsDiffTestCase(unittest.TestCase):
+
+ def _create_file(self, content):
+ tmp_file = tempfile.NamedTemporaryFile(prefix='ansible-test-', suffix='-known_hosts', delete=False)
+ tmp_file.write(to_bytes(content))
+ tmp_file.close()
+ self.addCleanup(os.unlink, tmp_file.name)
+ return tmp_file.name
+
+ def test_no_existing_file(self):
+ path = tempfile.mktemp(prefix='ansible-test-', suffix='-known_hosts')
+ key = 'example.com ssh-rsa AAAAetc\n'
+ diff = compute_diff(path, found_line=None, replace_or_add=False, state='present', key=key)
+ self.assertEqual(diff, {
+ 'before_header': '/dev/null',
+ 'after_header': path,
+ 'before': '',
+ 'after': 'example.com ssh-rsa AAAAetc\n',
+ })
+
+ def test_key_addition(self):
+ path = self._create_file(
+ 'two.example.com ssh-rsa BBBBetc\n'
+ )
+ key = 'one.example.com ssh-rsa AAAAetc\n'
+ diff = compute_diff(path, found_line=None, replace_or_add=False, state='present', key=key)
+ self.assertEqual(diff, {
+ 'before_header': path,
+ 'after_header': path,
+ 'before': 'two.example.com ssh-rsa BBBBetc\n',
+ 'after': 'two.example.com ssh-rsa BBBBetc\none.example.com ssh-rsa AAAAetc\n',
+ })
+
+ def test_no_change(self):
+ path = self._create_file(
+ 'one.example.com ssh-rsa AAAAetc\n'
+ 'two.example.com ssh-rsa BBBBetc\n'
+ )
+ key = 'one.example.com ssh-rsa AAAAetc\n'
+ diff = compute_diff(path, found_line=1, replace_or_add=False, state='present', key=key)
+ self.assertEqual(diff, {
+ 'before_header': path,
+ 'after_header': path,
+ 'before': 'one.example.com ssh-rsa AAAAetc\ntwo.example.com ssh-rsa BBBBetc\n',
+ 'after': 'one.example.com ssh-rsa AAAAetc\ntwo.example.com ssh-rsa BBBBetc\n',
+ })
+
+ def test_key_change(self):
+ path = self._create_file(
+ 'one.example.com ssh-rsa AAAaetc\n'
+ 'two.example.com ssh-rsa BBBBetc\n'
+ )
+ key = 'one.example.com ssh-rsa AAAAetc\n'
+ diff = compute_diff(path, found_line=1, replace_or_add=True, state='present', key=key)
+ self.assertEqual(diff, {
+ 'before_header': path,
+ 'after_header': path,
+ 'before': 'one.example.com ssh-rsa AAAaetc\ntwo.example.com ssh-rsa BBBBetc\n',
+ 'after': 'two.example.com ssh-rsa BBBBetc\none.example.com ssh-rsa AAAAetc\n',
+ })
+
+ def test_key_removal(self):
+ path = self._create_file(
+ 'one.example.com ssh-rsa AAAAetc\n'
+ 'two.example.com ssh-rsa BBBBetc\n'
+ )
+ key = 'one.example.com ssh-rsa AAAAetc\n'
+ diff = compute_diff(path, found_line=1, replace_or_add=False, state='absent', key=key)
+ self.assertEqual(diff, {
+ 'before_header': path,
+ 'after_header': path,
+ 'before': 'one.example.com ssh-rsa AAAAetc\ntwo.example.com ssh-rsa BBBBetc\n',
+ 'after': 'two.example.com ssh-rsa BBBBetc\n',
+ })
+
+ def test_key_removal_no_change(self):
+ path = self._create_file(
+ 'two.example.com ssh-rsa BBBBetc\n'
+ )
+ key = 'one.example.com ssh-rsa AAAAetc\n'
+ diff = compute_diff(path, found_line=None, replace_or_add=False, state='absent', key=key)
+ self.assertEqual(diff, {
+ 'before_header': path,
+ 'after_header': path,
+ 'before': 'two.example.com ssh-rsa BBBBetc\n',
+ 'after': 'two.example.com ssh-rsa BBBBetc\n',
+ })
+
+ def test_sanity_check(self):
+ basic._load_params = lambda: {}
+ # Module used internally to execute ssh-keygen system executable
+ module = AnsibleModule(argument_spec={})
+ host = '10.0.0.1'
+ key = '%s ssh-rsa ASDF foo@bar' % (host,)
+ keygen = module.get_bin_path('ssh-keygen')
+ sanity_check(module, host, key, keygen)
diff --git a/test/units/modules/test_pip.py b/test/units/modules/test_pip.py
new file mode 100644
index 00000000..7f0f8b07
--- /dev/null
+++ b/test/units/modules/test_pip.py
@@ -0,0 +1,38 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+import pytest
+
+from ansible.modules import pip
+
+
+pytestmark = pytest.mark.usefixtures('patch_ansible_module')
+
+
+@pytest.mark.parametrize('patch_ansible_module', [{'name': 'six'}], indirect=['patch_ansible_module'])
+def test_failure_when_pip_absent(mocker, capfd):
+ get_bin_path = mocker.patch('ansible.module_utils.basic.AnsibleModule.get_bin_path')
+ get_bin_path.return_value = None
+
+ with pytest.raises(SystemExit):
+ pip.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert results['failed']
+ assert 'pip needs to be installed' in results['msg']
+
+
+@pytest.mark.parametrize('patch_ansible_module, test_input, expected', [
+ [None, ['django>1.11.1', '<1.11.2', 'ipaddress', 'simpleproject<2.0.0', '>1.1.0'],
+ ['django>1.11.1,<1.11.2', 'ipaddress', 'simpleproject<2.0.0,>1.1.0']],
+ [None, ['django>1.11.1,<1.11.2,ipaddress', 'simpleproject<2.0.0,>1.1.0'],
+ ['django>1.11.1,<1.11.2', 'ipaddress', 'simpleproject<2.0.0,>1.1.0']],
+ [None, ['django>1.11.1', '<1.11.2', 'ipaddress,simpleproject<2.0.0,>1.1.0'],
+ ['django>1.11.1,<1.11.2', 'ipaddress', 'simpleproject<2.0.0,>1.1.0']]])
+def test_recover_package_name(test_input, expected):
+ assert pip._recover_package_name(test_input) == expected
diff --git a/test/units/modules/test_systemd.py b/test/units/modules/test_systemd.py
new file mode 100644
index 00000000..52c212a0
--- /dev/null
+++ b/test/units/modules/test_systemd.py
@@ -0,0 +1,52 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest
+from ansible.modules.systemd import parse_systemctl_show
+
+
+class ParseSystemctlShowTestCase(unittest.TestCase):
+
+ def test_simple(self):
+ lines = [
+ 'Type=simple',
+ 'Restart=no',
+ 'Requires=system.slice sysinit.target',
+ 'Description=Blah blah blah',
+ ]
+ parsed = parse_systemctl_show(lines)
+ self.assertEqual(parsed, {
+ 'Type': 'simple',
+ 'Restart': 'no',
+ 'Requires': 'system.slice sysinit.target',
+ 'Description': 'Blah blah blah',
+ })
+
+ def test_multiline_exec(self):
+ # This was taken from a real service that specified "ExecStart=/bin/echo foo\nbar"
+ lines = [
+ 'Type=simple',
+ 'ExecStart={ path=/bin/echo ; argv[]=/bin/echo foo',
+ 'bar ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }',
+ 'Description=blah',
+ ]
+ parsed = parse_systemctl_show(lines)
+ self.assertEqual(parsed, {
+ 'Type': 'simple',
+ 'ExecStart': '{ path=/bin/echo ; argv[]=/bin/echo foo\n'
+ 'bar ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }',
+ 'Description': 'blah',
+ })
+
+ def test_single_line_with_brace(self):
+ lines = [
+ 'Type=simple',
+ 'Description={ this is confusing',
+ 'Restart=no',
+ ]
+ parsed = parse_systemctl_show(lines)
+ self.assertEqual(parsed, {
+ 'Type': 'simple',
+ 'Description': '{ this is confusing',
+ 'Restart': 'no',
+ })
diff --git a/test/units/modules/test_yum.py b/test/units/modules/test_yum.py
new file mode 100644
index 00000000..e5d601a6
--- /dev/null
+++ b/test/units/modules/test_yum.py
@@ -0,0 +1,207 @@
+# -*- coding: utf-8 -*-
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest
+
+from ansible.modules.yum import YumModule
+
+
+yum_plugin_load_error = """
+Plugin "product-id" can't be imported
+Plugin "search-disabled-repos" can't be imported
+Plugin "subscription-manager" can't be imported
+Plugin "product-id" can't be imported
+Plugin "search-disabled-repos" can't be imported
+Plugin "subscription-manager" can't be imported
+"""
+
+# from https://github.com/ansible/ansible/issues/20608#issuecomment-276106505
+wrapped_output_1 = """
+Загружены модули: fastestmirror
+Loading mirror speeds from cached hostfile
+ * base: mirror.h1host.ru
+ * extras: mirror.h1host.ru
+ * updates: mirror.h1host.ru
+
+vms-agent.x86_64 0.0-9 dev
+"""
+
+# from https://github.com/ansible/ansible/issues/20608#issuecomment-276971275
+wrapped_output_2 = """
+Загружены модули: fastestmirror
+Loading mirror speeds from cached hostfile
+ * base: mirror.corbina.net
+ * extras: mirror.corbina.net
+ * updates: mirror.corbina.net
+
+empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty.x86_64
+ 0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1-0
+ addons
+libtiff.x86_64 4.0.3-27.el7_3 updates
+"""
+
+# From https://github.com/ansible/ansible/issues/20608#issuecomment-276698431
+wrapped_output_3 = """
+Loaded plugins: fastestmirror, langpacks
+Loading mirror speeds from cached hostfile
+
+ceph.x86_64 1:11.2.0-0.el7 ceph
+ceph-base.x86_64 1:11.2.0-0.el7 ceph
+ceph-common.x86_64 1:11.2.0-0.el7 ceph
+ceph-mds.x86_64 1:11.2.0-0.el7 ceph
+ceph-mon.x86_64 1:11.2.0-0.el7 ceph
+ceph-osd.x86_64 1:11.2.0-0.el7 ceph
+ceph-selinux.x86_64 1:11.2.0-0.el7 ceph
+libcephfs1.x86_64 1:11.0.2-0.el7 ceph
+librados2.x86_64 1:11.2.0-0.el7 ceph
+libradosstriper1.x86_64 1:11.2.0-0.el7 ceph
+librbd1.x86_64 1:11.2.0-0.el7 ceph
+librgw2.x86_64 1:11.2.0-0.el7 ceph
+python-cephfs.x86_64 1:11.2.0-0.el7 ceph
+python-rados.x86_64 1:11.2.0-0.el7 ceph
+python-rbd.x86_64 1:11.2.0-0.el7 ceph
+"""
+
+# from https://github.com/ansible/ansible-modules-core/issues/4318#issuecomment-251416661
+wrapped_output_4 = """
+ipxe-roms-qemu.noarch 20160127-1.git6366fa7a.el7
+ rhelosp-9.0-director-puddle
+quota.x86_64 1:4.01-11.el7_2.1 rhelosp-rhel-7.2-z
+quota-nls.noarch 1:4.01-11.el7_2.1 rhelosp-rhel-7.2-z
+rdma.noarch 7.2_4.1_rc6-2.el7 rhelosp-rhel-7.2-z
+screen.x86_64 4.1.0-0.23.20120314git3c2946.el7_2
+ rhelosp-rhel-7.2-z
+sos.noarch 3.2-36.el7ost.2 rhelosp-9.0-puddle
+sssd-client.x86_64 1.13.0-40.el7_2.12 rhelosp-rhel-7.2-z
+"""
+
+
+# A 'normal-ish' yum check-update output, without any wrapped lines
+unwrapped_output_rhel7 = """
+
+Loaded plugins: etckeeper, product-id, search-disabled-repos, subscription-
+ : manager
+This system is not registered to Red Hat Subscription Management. You can use subscription-manager to register.
+
+NetworkManager-openvpn.x86_64 1:1.2.6-1.el7 epel
+NetworkManager-openvpn-gnome.x86_64 1:1.2.6-1.el7 epel
+cabal-install.x86_64 1.16.1.0-2.el7 epel
+cgit.x86_64 1.1-1.el7 epel
+python34-libs.x86_64 3.4.5-3.el7 epel
+python34-test.x86_64 3.4.5-3.el7 epel
+python34-tkinter.x86_64 3.4.5-3.el7 epel
+python34-tools.x86_64 3.4.5-3.el7 epel
+qgit.x86_64 2.6-4.el7 epel
+rdiff-backup.x86_64 1.2.8-12.el7 epel
+stoken-libs.x86_64 0.91-1.el7 epel
+xlockmore.x86_64 5.49-2.el7 epel
+"""
+
+# Some wrapped obsoletes for prepending to output for testing both
+wrapped_output_rhel7_obsoletes_postfix = """
+Obsoleting Packages
+ddashboard.x86_64 0.2.0.1-1.el7_3 mhlavink-developerdashboard
+ developerdashboard.x86_64 0.1.12.2-1.el7_2 @mhlavink-developerdashboard
+python-bugzilla.noarch 1.2.2-3.el7_2.1 mhlavink-developerdashboard
+ python-bugzilla-develdashboardfixes.noarch
+ 1.2.2-3.el7 @mhlavink-developerdashboard
+python2-futures.noarch 3.0.5-1.el7 epel
+ python-futures.noarch 3.0.3-1.el7 @epel
+python2-pip.noarch 8.1.2-5.el7 epel
+ python-pip.noarch 7.1.0-1.el7 @epel
+python2-pyxdg.noarch 0.25-6.el7 epel
+ pyxdg.noarch 0.25-5.el7 @epel
+python2-simplejson.x86_64 3.10.0-1.el7 epel
+ python-simplejson.x86_64 3.3.3-1.el7 @epel
+Security: kernel-3.10.0-327.28.2.el7.x86_64 is an installed security update
+Security: kernel-3.10.0-327.22.2.el7.x86_64 is the currently running version
+"""
+
+longname = """
+Loaded plugins: fastestmirror, priorities, rhnplugin
+This system is receiving updates from RHN Classic or Red Hat Satellite.
+Loading mirror speeds from cached hostfile
+
+xxxxxxxxxxxxxxxxxxxxxxxxxx.noarch
+ 1.16-1 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+glibc.x86_64 2.17-157.el7_3.1 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"""
+
+
+unwrapped_output_rhel7_obsoletes = unwrapped_output_rhel7 + wrapped_output_rhel7_obsoletes_postfix
+unwrapped_output_rhel7_expected_new_obsoletes_pkgs = [
+ "ddashboard", "python-bugzilla", "python2-futures", "python2-pip",
+ "python2-pyxdg", "python2-simplejson"
+]
+unwrapped_output_rhel7_expected_old_obsoletes_pkgs = [
+ "developerdashboard", "python-bugzilla-develdashboardfixes",
+ "python-futures", "python-pip", "pyxdg", "python-simplejson"
+]
+unwrapped_output_rhel7_expected_updated_pkgs = [
+ "NetworkManager-openvpn", "NetworkManager-openvpn-gnome", "cabal-install",
+ "cgit", "python34-libs", "python34-test", "python34-tkinter",
+ "python34-tools", "qgit", "rdiff-backup", "stoken-libs", "xlockmore"
+]
+
+
+class TestYumUpdateCheckParse(unittest.TestCase):
+ def _assert_expected(self, expected_pkgs, result):
+
+ for expected_pkg in expected_pkgs:
+ self.assertIn(expected_pkg, result)
+ self.assertEqual(len(result), len(expected_pkgs))
+ self.assertIsInstance(result, dict)
+
+ def test_empty_output(self):
+ res, obs = YumModule.parse_check_update("")
+ expected_pkgs = []
+ self._assert_expected(expected_pkgs, res)
+
+ def test_longname(self):
+ res, obs = YumModule.parse_check_update(longname)
+ expected_pkgs = ['xxxxxxxxxxxxxxxxxxxxxxxxxx', 'glibc']
+ self._assert_expected(expected_pkgs, res)
+
+ def test_plugin_load_error(self):
+ res, obs = YumModule.parse_check_update(yum_plugin_load_error)
+ expected_pkgs = []
+ self._assert_expected(expected_pkgs, res)
+
+ def test_wrapped_output_1(self):
+ res, obs = YumModule.parse_check_update(wrapped_output_1)
+ expected_pkgs = ["vms-agent"]
+ self._assert_expected(expected_pkgs, res)
+
+ def test_wrapped_output_2(self):
+ res, obs = YumModule.parse_check_update(wrapped_output_2)
+ expected_pkgs = ["empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty",
+ "libtiff"]
+
+ self._assert_expected(expected_pkgs, res)
+
+ def test_wrapped_output_3(self):
+ res, obs = YumModule.parse_check_update(wrapped_output_3)
+ expected_pkgs = ["ceph", "ceph-base", "ceph-common", "ceph-mds",
+ "ceph-mon", "ceph-osd", "ceph-selinux", "libcephfs1",
+ "librados2", "libradosstriper1", "librbd1", "librgw2",
+ "python-cephfs", "python-rados", "python-rbd"]
+ self._assert_expected(expected_pkgs, res)
+
+ def test_wrapped_output_4(self):
+ res, obs = YumModule.parse_check_update(wrapped_output_4)
+
+ expected_pkgs = ["ipxe-roms-qemu", "quota", "quota-nls", "rdma", "screen",
+ "sos", "sssd-client"]
+ self._assert_expected(expected_pkgs, res)
+
+ def test_wrapped_output_rhel7(self):
+ res, obs = YumModule.parse_check_update(unwrapped_output_rhel7)
+ self._assert_expected(unwrapped_output_rhel7_expected_updated_pkgs, res)
+
+ def test_wrapped_output_rhel7_obsoletes(self):
+ res, obs = YumModule.parse_check_update(unwrapped_output_rhel7_obsoletes)
+ self._assert_expected(
+ unwrapped_output_rhel7_expected_updated_pkgs + unwrapped_output_rhel7_expected_new_obsoletes_pkgs,
+ res
+ )
+ self._assert_expected(unwrapped_output_rhel7_expected_old_obsoletes_pkgs, obs)
diff --git a/test/units/modules/utils.py b/test/units/modules/utils.py
new file mode 100644
index 00000000..6d169e36
--- /dev/null
+++ b/test/units/modules/utils.py
@@ -0,0 +1,50 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+from units.compat import unittest
+from units.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+
+
+def set_module_args(args):
+ if '_ansible_remote_tmp' not in args:
+ args['_ansible_remote_tmp'] = '/tmp'
+ if '_ansible_keep_remote_files' not in args:
+ args['_ansible_keep_remote_files'] = False
+
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args)
+
+
+class AnsibleExitJson(Exception):
+ pass
+
+
+class AnsibleFailJson(Exception):
+ pass
+
+
+def exit_json(*args, **kwargs):
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class ModuleTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_module = patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
+ self.mock_module.start()
+ self.mock_sleep = patch('time.sleep')
+ self.mock_sleep.start()
+ set_module_args({})
+ self.addCleanup(self.mock_module.stop)
+ self.addCleanup(self.mock_sleep.stop)
diff --git a/test/units/parsing/__init__.py b/test/units/parsing/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/parsing/__init__.py
diff --git a/test/units/parsing/fixtures/ajson.json b/test/units/parsing/fixtures/ajson.json
new file mode 100644
index 00000000..dafec0b3
--- /dev/null
+++ b/test/units/parsing/fixtures/ajson.json
@@ -0,0 +1,19 @@
+{
+ "password": {
+ "__ansible_vault": "$ANSIBLE_VAULT;1.1;AES256\n34646264306632313333393636316562356435376162633631326264383934326565333633366238\n3863373264326461623132613931346165636465346337310a326434313830316337393263616439\n64653937313463396366633861363266633465663730303633323534363331316164623237363831\n3536333561393238370a313330316263373938326162386433313336613532653538376662306435\n3339\n"
+ },
+ "bar": {
+ "baz": [
+ {
+ "password": {
+ "__ansible_vault": "$ANSIBLE_VAULT;1.1;AES256\n34646264306632313333393636316562356435376162633631326264383934326565333633366238\n3863373264326461623132613931346165636465346337310a326434313830316337393263616439\n64653937313463396366633861363266633465663730303633323534363331316164623237363831\n3536333561393238370a313330316263373938326162386433313336613532653538376662306435\n3338\n"
+ }
+ }
+ ]
+ },
+ "foo": {
+ "password": {
+ "__ansible_vault": "$ANSIBLE_VAULT;1.1;AES256\n34646264306632313333393636316562356435376162633631326264383934326565333633366238\n3863373264326461623132613931346165636465346337310a326434313830316337393263616439\n64653937313463396366633861363266633465663730303633323534363331316164623237363831\n3536333561393238370a313330316263373938326162386433313336613532653538376662306435\n3339\n"
+ }
+ }
+}
diff --git a/test/units/parsing/fixtures/vault.yml b/test/units/parsing/fixtures/vault.yml
new file mode 100644
index 00000000..ca33ab25
--- /dev/null
+++ b/test/units/parsing/fixtures/vault.yml
@@ -0,0 +1,6 @@
+$ANSIBLE_VAULT;1.1;AES256
+33343734386261666161626433386662623039356366656637303939306563376130623138626165
+6436333766346533353463636566313332623130383662340a393835656134633665333861393331
+37666233346464636263636530626332623035633135363732623332313534306438393366323966
+3135306561356164310a343937653834643433343734653137383339323330626437313562306630
+3035
diff --git a/test/units/parsing/test_ajson.py b/test/units/parsing/test_ajson.py
new file mode 100644
index 00000000..c38f43ea
--- /dev/null
+++ b/test/units/parsing/test_ajson.py
@@ -0,0 +1,187 @@
+# Copyright 2018, Matt Martz <matt@sivel.net>
+# Copyright 2019, Andrew Klychkov @Andersson007 <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import os
+import json
+
+import pytest
+
+from datetime import date, datetime
+from pytz import timezone as tz
+
+from ansible.module_utils.common._collections_compat import Mapping
+from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
+from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
+from ansible.utils.unsafe_proxy import AnsibleUnsafeText
+
+
+def test_AnsibleJSONDecoder_vault():
+ with open(os.path.join(os.path.dirname(__file__), 'fixtures/ajson.json')) as f:
+ data = json.load(f, cls=AnsibleJSONDecoder)
+
+ assert isinstance(data['password'], AnsibleVaultEncryptedUnicode)
+ assert isinstance(data['bar']['baz'][0]['password'], AnsibleVaultEncryptedUnicode)
+ assert isinstance(data['foo']['password'], AnsibleVaultEncryptedUnicode)
+
+
+def test_encode_decode_unsafe():
+ data = {
+ 'key_value': AnsibleUnsafeText(u'{#NOTACOMMENT#}'),
+ 'list': [AnsibleUnsafeText(u'{#NOTACOMMENT#}')],
+ 'list_dict': [{'key_value': AnsibleUnsafeText(u'{#NOTACOMMENT#}')}]}
+ json_expected = (
+ '{"key_value": {"__ansible_unsafe": "{#NOTACOMMENT#}"}, '
+ '"list": [{"__ansible_unsafe": "{#NOTACOMMENT#}"}], '
+ '"list_dict": [{"key_value": {"__ansible_unsafe": "{#NOTACOMMENT#}"}}]}'
+ )
+ assert json.dumps(data, cls=AnsibleJSONEncoder, preprocess_unsafe=True, sort_keys=True) == json_expected
+ assert json.loads(json_expected, cls=AnsibleJSONDecoder) == data
+
+
+def vault_data():
+ """
+ Prepare AnsibleVaultEncryptedUnicode test data for AnsibleJSONEncoder.default().
+
+ Return a list of tuples (input, expected).
+ """
+
+ with open(os.path.join(os.path.dirname(__file__), 'fixtures/ajson.json')) as f:
+ data = json.load(f, cls=AnsibleJSONDecoder)
+
+ data_0 = data['password']
+ data_1 = data['bar']['baz'][0]['password']
+
+ expected_0 = (u'$ANSIBLE_VAULT;1.1;AES256\n34646264306632313333393636316'
+ '562356435376162633631326264383934326565333633366238\n3863'
+ '373264326461623132613931346165636465346337310a32643431383'
+ '0316337393263616439\n646539373134633963666338613632666334'
+ '65663730303633323534363331316164623237363831\n35363335613'
+ '93238370a313330316263373938326162386433313336613532653538'
+ '376662306435\n3339\n')
+
+ expected_1 = (u'$ANSIBLE_VAULT;1.1;AES256\n34646264306632313333393636316'
+ '562356435376162633631326264383934326565333633366238\n3863'
+ '373264326461623132613931346165636465346337310a32643431383'
+ '0316337393263616439\n646539373134633963666338613632666334'
+ '65663730303633323534363331316164623237363831\n35363335613'
+ '93238370a313330316263373938326162386433313336613532653538'
+ '376662306435\n3338\n')
+
+ return [
+ (data_0, expected_0),
+ (data_1, expected_1),
+ ]
+
+
+class TestAnsibleJSONEncoder:
+
+ """
+ Namespace for testing AnsibleJSONEncoder.
+ """
+
+ @pytest.fixture(scope='class')
+ def mapping(self, request):
+ """
+ Returns object of Mapping mock class.
+
+ The object is used for testing handling of Mapping objects
+ in AnsibleJSONEncoder.default().
+ Using a plain dictionary instead is not suitable because
+ it is handled by default encoder of the superclass (json.JSONEncoder).
+ """
+
+ class M(Mapping):
+
+ """Mock mapping class."""
+
+ def __init__(self, *args, **kwargs):
+ self.__dict__.update(*args, **kwargs)
+
+ def __getitem__(self, key):
+ return self.__dict__[key]
+
+ def __iter__(self):
+ return iter(self.__dict__)
+
+ def __len__(self):
+ return len(self.__dict__)
+
+ return M(request.param)
+
+ @pytest.fixture
+ def ansible_json_encoder(self):
+ """Return AnsibleJSONEncoder object."""
+ return AnsibleJSONEncoder()
+
+ ###############
+ # Test methods:
+
+ @pytest.mark.parametrize(
+ 'test_input,expected',
+ [
+ (datetime(2019, 5, 14, 13, 39, 38, 569047), '2019-05-14T13:39:38.569047'),
+ (datetime(2019, 5, 14, 13, 47, 16, 923866), '2019-05-14T13:47:16.923866'),
+ (date(2019, 5, 14), '2019-05-14'),
+ (date(2020, 5, 14), '2020-05-14'),
+ (datetime(2019, 6, 15, 14, 45, tzinfo=tz('UTC')), '2019-06-15T14:45:00+00:00'),
+ (datetime(2019, 6, 15, 14, 45, tzinfo=tz('Europe/Helsinki')), '2019-06-15T14:45:00+01:40'),
+ ]
+ )
+ def test_date_datetime(self, ansible_json_encoder, test_input, expected):
+ """
+ Test for passing datetime.date or datetime.datetime objects to AnsibleJSONEncoder.default().
+ """
+ assert ansible_json_encoder.default(test_input) == expected
+
+ @pytest.mark.parametrize(
+ 'mapping,expected',
+ [
+ ({1: 1}, {1: 1}),
+ ({2: 2}, {2: 2}),
+ ({1: 2}, {1: 2}),
+ ({2: 1}, {2: 1}),
+ ], indirect=['mapping'],
+ )
+ def test_mapping(self, ansible_json_encoder, mapping, expected):
+ """
+ Test for passing Mapping object to AnsibleJSONEncoder.default().
+ """
+ assert ansible_json_encoder.default(mapping) == expected
+
+ @pytest.mark.parametrize('test_input,expected', vault_data())
+ def test_ansible_json_decoder_vault(self, ansible_json_encoder, test_input, expected):
+ """
+ Test for passing AnsibleVaultEncryptedUnicode to AnsibleJSONEncoder.default().
+ """
+ assert ansible_json_encoder.default(test_input) == {'__ansible_vault': expected}
+ assert json.dumps(test_input, cls=AnsibleJSONEncoder, preprocess_unsafe=True) == '{"__ansible_vault": "%s"}' % expected.replace('\n', '\\n')
+
+ @pytest.mark.parametrize(
+ 'test_input,expected',
+ [
+ ({1: 'first'}, {1: 'first'}),
+ ({2: 'second'}, {2: 'second'}),
+ ]
+ )
+ def test_default_encoder(self, ansible_json_encoder, test_input, expected):
+ """
+ Test for the default encoder of AnsibleJSONEncoder.default().
+
+ If objects of different classes that are not tested above were passed,
+ AnsibleJSONEncoder.default() invokes 'default()' method of json.JSONEncoder superclass.
+ """
+ assert ansible_json_encoder.default(test_input) == expected
+
+ @pytest.mark.parametrize('test_input', [1, 1.1, 'string', [1, 2], set('set'), True, None])
+ def test_default_encoder_unserializable(self, ansible_json_encoder, test_input):
+ """
+ Test for the default encoder of AnsibleJSONEncoder.default(), not serializable objects.
+
+ It must fail with TypeError 'object is not serializable'.
+ """
+ with pytest.raises(TypeError):
+ ansible_json_encoder.default(test_input)
diff --git a/test/units/parsing/test_dataloader.py b/test/units/parsing/test_dataloader.py
new file mode 100644
index 00000000..3cc8d451
--- /dev/null
+++ b/test/units/parsing/test_dataloader.py
@@ -0,0 +1,239 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from units.compat import unittest
+from units.compat.mock import patch, mock_open
+from ansible.errors import AnsibleParserError, yaml_strings, AnsibleFileNotFound
+from ansible.parsing.vault import AnsibleVaultError
+from ansible.module_utils._text import to_text
+from ansible.module_utils.six import PY3
+
+from units.mock.vault_helper import TextVaultSecret
+from ansible.parsing.dataloader import DataLoader
+
+from units.mock.path import mock_unfrackpath_noop
+
+
+class TestDataLoader(unittest.TestCase):
+
+ def setUp(self):
+ self._loader = DataLoader()
+
+ @patch('os.path.exists')
+ def test__is_role(self, p_exists):
+ p_exists.side_effect = lambda p: p == b'test_path/tasks/main.yml'
+ self.assertTrue(self._loader._is_role('test_path/tasks'))
+ self.assertTrue(self._loader._is_role('test_path/'))
+
+ @patch.object(DataLoader, '_get_file_contents')
+ def test_parse_json_from_file(self, mock_def):
+ mock_def.return_value = (b"""{"a": 1, "b": 2, "c": 3}""", True)
+ output = self._loader.load_from_file('dummy_json.txt')
+ self.assertEqual(output, dict(a=1, b=2, c=3))
+
+ @patch.object(DataLoader, '_get_file_contents')
+ def test_parse_yaml_from_file(self, mock_def):
+ mock_def.return_value = (b"""
+ a: 1
+ b: 2
+ c: 3
+ """, True)
+ output = self._loader.load_from_file('dummy_yaml.txt')
+ self.assertEqual(output, dict(a=1, b=2, c=3))
+
+ @patch.object(DataLoader, '_get_file_contents')
+ def test_parse_fail_from_file(self, mock_def):
+ mock_def.return_value = (b"""
+ TEXT:
+ ***
+ NOT VALID
+ """, True)
+ self.assertRaises(AnsibleParserError, self._loader.load_from_file, 'dummy_yaml_bad.txt')
+
+ @patch('ansible.errors.AnsibleError._get_error_lines_from_file')
+ @patch.object(DataLoader, '_get_file_contents')
+ def test_tab_error(self, mock_def, mock_get_error_lines):
+ mock_def.return_value = (u"""---\nhosts: localhost\nvars:\n foo: bar\n\tblip: baz""", True)
+ mock_get_error_lines.return_value = ('''\tblip: baz''', '''..foo: bar''')
+ with self.assertRaises(AnsibleParserError) as cm:
+ self._loader.load_from_file('dummy_yaml_text.txt')
+ self.assertIn(yaml_strings.YAML_COMMON_LEADING_TAB_ERROR, str(cm.exception))
+ self.assertIn('foo: bar', str(cm.exception))
+
+ @patch('ansible.parsing.dataloader.unfrackpath', mock_unfrackpath_noop)
+ @patch.object(DataLoader, '_is_role')
+ def test_path_dwim_relative(self, mock_is_role):
+ """
+ simulate a nested dynamic include:
+
+ playbook.yml:
+ - hosts: localhost
+ roles:
+ - { role: 'testrole' }
+
+ testrole/tasks/main.yml:
+ - include: "include1.yml"
+ static: no
+
+ testrole/tasks/include1.yml:
+ - include: include2.yml
+ static: no
+
+ testrole/tasks/include2.yml:
+ - debug: msg="blah"
+ """
+ mock_is_role.return_value = False
+ with patch('os.path.exists') as mock_os_path_exists:
+ mock_os_path_exists.return_value = False
+ self._loader.path_dwim_relative('/tmp/roles/testrole/tasks', 'tasks', 'included2.yml')
+
+ # Fetch first args for every call
+ # mock_os_path_exists.assert_any_call isn't used because os.path.normpath must be used in order to compare paths
+ called_args = [os.path.normpath(to_text(call[0][0])) for call in mock_os_path_exists.call_args_list]
+
+ # 'path_dwim_relative' docstrings say 'with or without explicitly named dirname subdirs':
+ self.assertIn('/tmp/roles/testrole/tasks/included2.yml', called_args)
+ self.assertIn('/tmp/roles/testrole/tasks/tasks/included2.yml', called_args)
+
+ # relative directories below are taken in account too:
+ self.assertIn('tasks/included2.yml', called_args)
+ self.assertIn('included2.yml', called_args)
+
+ def test_path_dwim_root(self):
+ self.assertEqual(self._loader.path_dwim('/'), '/')
+
+ def test_path_dwim_home(self):
+ self.assertEqual(self._loader.path_dwim('~'), os.path.expanduser('~'))
+
+ def test_path_dwim_tilde_slash(self):
+ self.assertEqual(self._loader.path_dwim('~/'), os.path.expanduser('~'))
+
+ def test_get_real_file(self):
+ self.assertEqual(self._loader.get_real_file(__file__), __file__)
+
+ def test_is_file(self):
+ self.assertTrue(self._loader.is_file(__file__))
+
+ def test_is_directory_positive(self):
+ self.assertTrue(self._loader.is_directory(os.path.dirname(__file__)))
+
+ def test_get_file_contents_none_path(self):
+ self.assertRaisesRegexp(AnsibleParserError, 'Invalid filename',
+ self._loader._get_file_contents, None)
+
+ def test_get_file_contents_non_existent_path(self):
+ self.assertRaises(AnsibleFileNotFound, self._loader._get_file_contents, '/non_existent_file')
+
+
+class TestPathDwimRelativeDataLoader(unittest.TestCase):
+
+ def setUp(self):
+ self._loader = DataLoader()
+
+ def test_all_slash(self):
+ self.assertEqual(self._loader.path_dwim_relative('/', '/', '/'), '/')
+
+ def test_path_endswith_role(self):
+ self.assertEqual(self._loader.path_dwim_relative(path='foo/bar/tasks/', dirname='/', source='/'), '/')
+
+ def test_path_endswith_role_main_yml(self):
+ self.assertIn('main.yml', self._loader.path_dwim_relative(path='foo/bar/tasks/', dirname='/', source='main.yml'))
+
+ def test_path_endswith_role_source_tilde(self):
+ self.assertEqual(self._loader.path_dwim_relative(path='foo/bar/tasks/', dirname='/', source='~/'), os.path.expanduser('~'))
+
+
+class TestPathDwimRelativeStackDataLoader(unittest.TestCase):
+
+ def setUp(self):
+ self._loader = DataLoader()
+
+ def test_none(self):
+ self.assertRaisesRegexp(AnsibleFileNotFound, 'on the Ansible Controller', self._loader.path_dwim_relative_stack, None, None, None)
+
+ def test_empty_strings(self):
+ self.assertEqual(self._loader.path_dwim_relative_stack('', '', ''), './')
+
+ def test_empty_lists(self):
+ self.assertEqual(self._loader.path_dwim_relative_stack([], '', '~/'), os.path.expanduser('~'))
+
+ def test_all_slash(self):
+ self.assertEqual(self._loader.path_dwim_relative_stack('/', '/', '/'), '/')
+
+ def test_path_endswith_role(self):
+ self.assertEqual(self._loader.path_dwim_relative_stack(paths=['foo/bar/tasks/'], dirname='/', source='/'), '/')
+
+ def test_path_endswith_role_source_tilde(self):
+ self.assertEqual(self._loader.path_dwim_relative_stack(paths=['foo/bar/tasks/'], dirname='/', source='~/'), os.path.expanduser('~'))
+
+ def test_path_endswith_role_source_main_yml(self):
+ self.assertRaises(AnsibleFileNotFound, self._loader.path_dwim_relative_stack, ['foo/bar/tasks/'], '/', 'main.yml')
+
+ def test_path_endswith_role_source_main_yml_source_in_dirname(self):
+ self.assertRaises(AnsibleFileNotFound, self._loader.path_dwim_relative_stack, 'foo/bar/tasks/', 'tasks', 'tasks/main.yml')
+
+
+class TestDataLoaderWithVault(unittest.TestCase):
+
+ def setUp(self):
+ self._loader = DataLoader()
+ vault_secrets = [('default', TextVaultSecret('ansible'))]
+ self._loader.set_vault_secrets(vault_secrets)
+ self.test_vault_data_path = os.path.join(os.path.dirname(__file__), 'fixtures', 'vault.yml')
+
+ def tearDown(self):
+ pass
+
+ def test_get_real_file_vault(self):
+ real_file_path = self._loader.get_real_file(self.test_vault_data_path)
+ self.assertTrue(os.path.exists(real_file_path))
+
+ def test_get_real_file_vault_no_vault(self):
+ self._loader.set_vault_secrets(None)
+ self.assertRaises(AnsibleParserError, self._loader.get_real_file, self.test_vault_data_path)
+
+ def test_get_real_file_vault_wrong_password(self):
+ wrong_vault = [('default', TextVaultSecret('wrong_password'))]
+ self._loader.set_vault_secrets(wrong_vault)
+ self.assertRaises(AnsibleVaultError, self._loader.get_real_file, self.test_vault_data_path)
+
+ def test_get_real_file_not_a_path(self):
+ self.assertRaisesRegexp(AnsibleParserError, 'Invalid filename', self._loader.get_real_file, None)
+
+ @patch.multiple(DataLoader, path_exists=lambda s, x: True, is_file=lambda s, x: True)
+ def test_parse_from_vault_1_1_file(self):
+ vaulted_data = """$ANSIBLE_VAULT;1.1;AES256
+33343734386261666161626433386662623039356366656637303939306563376130623138626165
+6436333766346533353463636566313332623130383662340a393835656134633665333861393331
+37666233346464636263636530626332623035633135363732623332313534306438393366323966
+3135306561356164310a343937653834643433343734653137383339323330626437313562306630
+3035
+"""
+ if PY3:
+ builtins_name = 'builtins'
+ else:
+ builtins_name = '__builtin__'
+
+ with patch(builtins_name + '.open', mock_open(read_data=vaulted_data.encode('utf-8'))):
+ output = self._loader.load_from_file('dummy_vault.txt')
+ self.assertEqual(output, dict(foo='bar'))
diff --git a/test/units/parsing/test_mod_args.py b/test/units/parsing/test_mod_args.py
new file mode 100644
index 00000000..50c3b331
--- /dev/null
+++ b/test/units/parsing/test_mod_args.py
@@ -0,0 +1,137 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+import re
+
+from ansible.errors import AnsibleParserError
+from ansible.parsing.mod_args import ModuleArgsParser
+from ansible.utils.sentinel import Sentinel
+
+
+class TestModArgsDwim:
+
+ # TODO: add tests that construct ModuleArgsParser with a task reference
+ # TODO: verify the AnsibleError raised on failure knows the task
+ # and the task knows the line numbers
+
+ INVALID_MULTIPLE_ACTIONS = (
+ ({'action': 'shell echo hi', 'local_action': 'shell echo hi'}, "action and local_action are mutually exclusive"),
+ ({'action': 'shell echo hi', 'shell': 'echo hi'}, "conflicting action statements: shell, shell"),
+ ({'local_action': 'shell echo hi', 'shell': 'echo hi'}, "conflicting action statements: shell, shell"),
+ )
+
+ def _debug(self, mod, args, to):
+ print("RETURNED module = {0}".format(mod))
+ print(" args = {0}".format(args))
+ print(" to = {0}".format(to))
+
+ def test_basic_shell(self):
+ m = ModuleArgsParser(dict(shell='echo hi'))
+ mod, args, to = m.parse()
+ self._debug(mod, args, to)
+
+ assert mod == 'shell'
+ assert args == dict(
+ _raw_params='echo hi',
+ )
+ assert to is Sentinel
+
+ def test_basic_command(self):
+ m = ModuleArgsParser(dict(command='echo hi'))
+ mod, args, to = m.parse()
+ self._debug(mod, args, to)
+
+ assert mod == 'command'
+ assert args == dict(
+ _raw_params='echo hi',
+ )
+ assert to is Sentinel
+
+ def test_shell_with_modifiers(self):
+ m = ModuleArgsParser(dict(shell='/bin/foo creates=/tmp/baz removes=/tmp/bleep'))
+ mod, args, to = m.parse()
+ self._debug(mod, args, to)
+
+ assert mod == 'shell'
+ assert args == dict(
+ creates='/tmp/baz',
+ removes='/tmp/bleep',
+ _raw_params='/bin/foo',
+ )
+ assert to is Sentinel
+
+ def test_normal_usage(self):
+ m = ModuleArgsParser(dict(copy='src=a dest=b'))
+ mod, args, to = m.parse()
+ self._debug(mod, args, to)
+
+ assert mod, 'copy'
+ assert args, dict(src='a', dest='b')
+ assert to is Sentinel
+
+ def test_complex_args(self):
+ m = ModuleArgsParser(dict(copy=dict(src='a', dest='b')))
+ mod, args, to = m.parse()
+ self._debug(mod, args, to)
+
+ assert mod, 'copy'
+ assert args, dict(src='a', dest='b')
+ assert to is Sentinel
+
+ def test_action_with_complex(self):
+ m = ModuleArgsParser(dict(action=dict(module='copy', src='a', dest='b')))
+ mod, args, to = m.parse()
+ self._debug(mod, args, to)
+
+ assert mod == 'copy'
+ assert args == dict(src='a', dest='b')
+ assert to is Sentinel
+
+ def test_action_with_complex_and_complex_args(self):
+ m = ModuleArgsParser(dict(action=dict(module='copy', args=dict(src='a', dest='b'))))
+ mod, args, to = m.parse()
+ self._debug(mod, args, to)
+
+ assert mod == 'copy'
+ assert args == dict(src='a', dest='b')
+ assert to is Sentinel
+
+ def test_local_action_string(self):
+ m = ModuleArgsParser(dict(local_action='copy src=a dest=b'))
+ mod, args, delegate_to = m.parse()
+ self._debug(mod, args, delegate_to)
+
+ assert mod == 'copy'
+ assert args == dict(src='a', dest='b')
+ assert delegate_to == 'localhost'
+
+ @pytest.mark.parametrize("args_dict, msg", INVALID_MULTIPLE_ACTIONS)
+ def test_multiple_actions(self, args_dict, msg):
+ m = ModuleArgsParser(args_dict)
+ with pytest.raises(AnsibleParserError) as err:
+ m.parse()
+
+ assert err.value.args[0] == msg
+
+ def test_multiple_actions(self):
+ args_dict = {'ping': 'data=hi', 'shell': 'echo hi'}
+ m = ModuleArgsParser(args_dict)
+ with pytest.raises(AnsibleParserError) as err:
+ m.parse()
+
+ assert err.value.args[0].startswith("conflicting action statements: ")
+ actions = set(re.search(r'(\w+), (\w+)', err.value.args[0]).groups())
+ assert actions == set(['ping', 'shell'])
+
+ def test_bogus_action(self):
+ args_dict = {'bogusaction': {}}
+ m = ModuleArgsParser(args_dict)
+ with pytest.raises(AnsibleParserError) as err:
+ m.parse()
+
+ assert err.value.args[0].startswith("couldn't resolve module/action 'bogusaction'")
diff --git a/test/units/parsing/test_splitter.py b/test/units/parsing/test_splitter.py
new file mode 100644
index 00000000..a37de0f9
--- /dev/null
+++ b/test/units/parsing/test_splitter.py
@@ -0,0 +1,110 @@
+# coding: utf-8
+# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.parsing.splitter import split_args, parse_kv
+
+import pytest
+
+SPLIT_DATA = (
+ (u'a',
+ [u'a'],
+ {u'_raw_params': u'a'}),
+ (u'a=b',
+ [u'a=b'],
+ {u'a': u'b'}),
+ (u'a="foo bar"',
+ [u'a="foo bar"'],
+ {u'a': u'foo bar'}),
+ (u'"foo bar baz"',
+ [u'"foo bar baz"'],
+ {u'_raw_params': '"foo bar baz"'}),
+ (u'foo bar baz',
+ [u'foo', u'bar', u'baz'],
+ {u'_raw_params': u'foo bar baz'}),
+ (u'a=b c="foo bar"',
+ [u'a=b', u'c="foo bar"'],
+ {u'a': u'b', u'c': u'foo bar'}),
+ (u'a="echo \\"hello world\\"" b=bar',
+ [u'a="echo \\"hello world\\""', u'b=bar'],
+ {u'a': u'echo "hello world"', u'b': u'bar'}),
+ (u'a="multi\nline"',
+ [u'a="multi\nline"'],
+ {u'a': u'multi\nline'}),
+ (u'a="blank\n\nline"',
+ [u'a="blank\n\nline"'],
+ {u'a': u'blank\n\nline'}),
+ (u'a="blank\n\n\nlines"',
+ [u'a="blank\n\n\nlines"'],
+ {u'a': u'blank\n\n\nlines'}),
+ (u'a="a long\nmessage\\\nabout a thing\n"',
+ [u'a="a long\nmessage\\\nabout a thing\n"'],
+ {u'a': u'a long\nmessage\\\nabout a thing\n'}),
+ (u'a="multiline\nmessage1\\\n" b="multiline\nmessage2\\\n"',
+ [u'a="multiline\nmessage1\\\n"', u'b="multiline\nmessage2\\\n"'],
+ {u'a': 'multiline\nmessage1\\\n', u'b': u'multiline\nmessage2\\\n'}),
+ (u'a={{jinja}}',
+ [u'a={{jinja}}'],
+ {u'a': u'{{jinja}}'}),
+ (u'a={{ jinja }}',
+ [u'a={{ jinja }}'],
+ {u'a': u'{{ jinja }}'}),
+ (u'a="{{jinja}}"',
+ [u'a="{{jinja}}"'],
+ {u'a': u'{{jinja}}'}),
+ (u'a={{ jinja }}{{jinja2}}',
+ [u'a={{ jinja }}{{jinja2}}'],
+ {u'a': u'{{ jinja }}{{jinja2}}'}),
+ (u'a="{{ jinja }}{{jinja2}}"',
+ [u'a="{{ jinja }}{{jinja2}}"'],
+ {u'a': u'{{ jinja }}{{jinja2}}'}),
+ (u'a={{jinja}} b={{jinja2}}',
+ [u'a={{jinja}}', u'b={{jinja2}}'],
+ {u'a': u'{{jinja}}', u'b': u'{{jinja2}}'}),
+ (u'a="{{jinja}}\n" b="{{jinja2}}\n"',
+ [u'a="{{jinja}}\n"', u'b="{{jinja2}}\n"'],
+ {u'a': u'{{jinja}}\n', u'b': u'{{jinja2}}\n'}),
+ (u'a="café eñyei"',
+ [u'a="café eñyei"'],
+ {u'a': u'café eñyei'}),
+ (u'a=café b=eñyei',
+ [u'a=café', u'b=eñyei'],
+ {u'a': u'café', u'b': u'eñyei'}),
+ (u'a={{ foo | some_filter(\' \', " ") }} b=bar',
+ [u'a={{ foo | some_filter(\' \', " ") }}', u'b=bar'],
+ {u'a': u'{{ foo | some_filter(\' \', " ") }}', u'b': u'bar'}),
+ (u'One\n Two\n Three\n',
+ [u'One\n ', u'Two\n ', u'Three\n'],
+ {u'_raw_params': u'One\n Two\n Three\n'}),
+)
+
+SPLIT_ARGS = ((test[0], test[1]) for test in SPLIT_DATA)
+PARSE_KV = ((test[0], test[2]) for test in SPLIT_DATA)
+
+
+@pytest.mark.parametrize("args, expected", SPLIT_ARGS)
+def test_split_args(args, expected):
+ assert split_args(args) == expected
+
+
+@pytest.mark.parametrize("args, expected", PARSE_KV)
+def test_parse_kv(args, expected):
+ assert parse_kv(args) == expected
diff --git a/test/units/parsing/test_unquote.py b/test/units/parsing/test_unquote.py
new file mode 100644
index 00000000..4b4260e7
--- /dev/null
+++ b/test/units/parsing/test_unquote.py
@@ -0,0 +1,51 @@
+# coding: utf-8
+# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.parsing.quoting import unquote
+
+import pytest
+
+UNQUOTE_DATA = (
+ (u'1', u'1'),
+ (u'\'1\'', u'1'),
+ (u'"1"', u'1'),
+ (u'"1 \'2\'"', u'1 \'2\''),
+ (u'\'1 "2"\'', u'1 "2"'),
+ (u'\'1 \'2\'\'', u'1 \'2\''),
+ (u'"1\\"', u'"1\\"'),
+ (u'\'1\\\'', u'\'1\\\''),
+ (u'"1 \\"2\\" 3"', u'1 \\"2\\" 3'),
+ (u'\'1 \\\'2\\\' 3\'', u'1 \\\'2\\\' 3'),
+ (u'"', u'"'),
+ (u'\'', u'\''),
+ # Not entirely sure these are good but they match the current
+ # behaviour
+ (u'"1""2"', u'1""2'),
+ (u'\'1\'\'2\'', u'1\'\'2'),
+ (u'"1" 2 "3"', u'1" 2 "3'),
+ (u'"1"\'2\'"3"', u'1"\'2\'"3'),
+)
+
+
+@pytest.mark.parametrize("quoted, expected", UNQUOTE_DATA)
+def test_unquote(quoted, expected):
+ assert unquote(quoted) == expected
diff --git a/test/units/parsing/utils/__init__.py b/test/units/parsing/utils/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/parsing/utils/__init__.py
diff --git a/test/units/parsing/utils/test_addresses.py b/test/units/parsing/utils/test_addresses.py
new file mode 100644
index 00000000..4f7304f5
--- /dev/null
+++ b/test/units/parsing/utils/test_addresses.py
@@ -0,0 +1,98 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import unittest
+
+from ansible.parsing.utils.addresses import parse_address
+
+
+class TestParseAddress(unittest.TestCase):
+
+ tests = {
+ # IPv4 addresses
+ '192.0.2.3': ['192.0.2.3', None],
+ '192.0.2.3:23': ['192.0.2.3', 23],
+
+ # IPv6 addresses
+ '::': ['::', None],
+ '::1': ['::1', None],
+ '[::1]:442': ['::1', 442],
+ 'abcd:ef98:7654:3210:abcd:ef98:7654:3210': ['abcd:ef98:7654:3210:abcd:ef98:7654:3210', None],
+ '[abcd:ef98:7654:3210:abcd:ef98:7654:3210]:42': ['abcd:ef98:7654:3210:abcd:ef98:7654:3210', 42],
+ '1234:5678:9abc:def0:1234:5678:9abc:def0': ['1234:5678:9abc:def0:1234:5678:9abc:def0', None],
+ '1234::9abc:def0:1234:5678:9abc:def0': ['1234::9abc:def0:1234:5678:9abc:def0', None],
+ '1234:5678::def0:1234:5678:9abc:def0': ['1234:5678::def0:1234:5678:9abc:def0', None],
+ '1234:5678:9abc::1234:5678:9abc:def0': ['1234:5678:9abc::1234:5678:9abc:def0', None],
+ '1234:5678:9abc:def0::5678:9abc:def0': ['1234:5678:9abc:def0::5678:9abc:def0', None],
+ '1234:5678:9abc:def0:1234::9abc:def0': ['1234:5678:9abc:def0:1234::9abc:def0', None],
+ '1234:5678:9abc:def0:1234:5678::def0': ['1234:5678:9abc:def0:1234:5678::def0', None],
+ '1234:5678:9abc:def0:1234:5678::': ['1234:5678:9abc:def0:1234:5678::', None],
+ '::9abc:def0:1234:5678:9abc:def0': ['::9abc:def0:1234:5678:9abc:def0', None],
+ '0:0:0:0:0:ffff:1.2.3.4': ['0:0:0:0:0:ffff:1.2.3.4', None],
+ '0:0:0:0:0:0:1.2.3.4': ['0:0:0:0:0:0:1.2.3.4', None],
+ '::ffff:1.2.3.4': ['::ffff:1.2.3.4', None],
+ '::1.2.3.4': ['::1.2.3.4', None],
+ '1234::': ['1234::', None],
+
+ # Hostnames
+ 'some-host': ['some-host', None],
+ 'some-host:80': ['some-host', 80],
+ 'some.host.com:492': ['some.host.com', 492],
+ '[some.host.com]:493': ['some.host.com', 493],
+ 'a-b.3foo_bar.com:23': ['a-b.3foo_bar.com', 23],
+ u'fóöbär': [u'fóöbär', None],
+ u'fóöbär:32': [u'fóöbär', 32],
+ u'fóöbär.éxàmplê.com:632': [u'fóöbär.éxàmplê.com', 632],
+
+ # Various errors
+ '': [None, None],
+ 'some..host': [None, None],
+ 'some.': [None, None],
+ '[example.com]': [None, None],
+ 'some-': [None, None],
+ 'some-.foo.com': [None, None],
+ 'some.-foo.com': [None, None],
+ }
+
+ range_tests = {
+ '192.0.2.[3:10]': ['192.0.2.[3:10]', None],
+ '192.0.2.[3:10]:23': ['192.0.2.[3:10]', 23],
+ 'abcd:ef98::7654:[1:9]': ['abcd:ef98::7654:[1:9]', None],
+ '[abcd:ef98::7654:[6:32]]:2222': ['abcd:ef98::7654:[6:32]', 2222],
+ '[abcd:ef98::7654:[9ab3:fcb7]]:2222': ['abcd:ef98::7654:[9ab3:fcb7]', 2222],
+ u'fóöb[a:c]r.éxàmplê.com:632': [u'fóöb[a:c]r.éxàmplê.com', 632],
+ '[a:b]foo.com': ['[a:b]foo.com', None],
+ 'foo[a:b].com': ['foo[a:b].com', None],
+ 'foo[a:b]:42': ['foo[a:b]', 42],
+ 'foo[a-b]-.com': [None, None],
+ 'foo[a-b]:32': [None, None],
+ 'foo[x-y]': [None, None],
+ }
+
+ def test_without_ranges(self):
+ for t in self.tests:
+ test = self.tests[t]
+
+ try:
+ (host, port) = parse_address(t)
+ except Exception:
+ host = None
+ port = None
+
+ assert host == test[0]
+ assert port == test[1]
+
+ def test_with_ranges(self):
+ for t in self.range_tests:
+ test = self.range_tests[t]
+
+ try:
+ (host, port) = parse_address(t, allow_ranges=True)
+ except Exception:
+ host = None
+ port = None
+
+ assert host == test[0]
+ assert port == test[1]
diff --git a/test/units/parsing/utils/test_jsonify.py b/test/units/parsing/utils/test_jsonify.py
new file mode 100644
index 00000000..37be7824
--- /dev/null
+++ b/test/units/parsing/utils/test_jsonify.py
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+# (c) 2016, James Cammarata <jimi@sngx.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest
+from ansible.parsing.utils.jsonify import jsonify
+
+
+class TestJsonify(unittest.TestCase):
+ def test_jsonify_simple(self):
+ self.assertEqual(jsonify(dict(a=1, b=2, c=3)), '{"a": 1, "b": 2, "c": 3}')
+
+ def test_jsonify_simple_format(self):
+ res = jsonify(dict(a=1, b=2, c=3), format=True)
+ cleaned = "".join([x.strip() for x in res.splitlines()])
+ self.assertEqual(cleaned, '{"a": 1,"b": 2,"c": 3}')
+
+ def test_jsonify_unicode(self):
+ self.assertEqual(jsonify(dict(toshio=u'ãらã¨ã¿')), u'{"toshio": "ãらã¨ã¿"}')
+
+ def test_jsonify_empty(self):
+ self.assertEqual(jsonify(None), '{}')
diff --git a/test/units/parsing/utils/test_yaml.py b/test/units/parsing/utils/test_yaml.py
new file mode 100644
index 00000000..27b2905a
--- /dev/null
+++ b/test/units/parsing/utils/test_yaml.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+# (c) 2017, Ansible Project
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible.errors import AnsibleParserError
+from ansible.parsing.utils.yaml import from_yaml
+
+
+def test_from_yaml_simple():
+ assert from_yaml(u'---\n- test: 1\n test2: "2"\n- caf\xe9: "caf\xe9"') == [{u'test': 1, u'test2': u"2"}, {u"caf\xe9": u"caf\xe9"}]
+
+
+def test_bad_yaml():
+ with pytest.raises(AnsibleParserError):
+ from_yaml(u'foo: bar: baz')
diff --git a/test/units/parsing/vault/__init__.py b/test/units/parsing/vault/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/parsing/vault/__init__.py
diff --git a/test/units/parsing/vault/test_vault.py b/test/units/parsing/vault/test_vault.py
new file mode 100644
index 00000000..a9c4fc9e
--- /dev/null
+++ b/test/units/parsing/vault/test_vault.py
@@ -0,0 +1,941 @@
+# -*- coding: utf-8 -*-
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import binascii
+import io
+import os
+import tempfile
+
+from binascii import hexlify
+import pytest
+
+from units.compat import unittest
+from units.compat.mock import patch, MagicMock
+
+from ansible import errors
+from ansible.module_utils import six
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.parsing import vault
+
+from units.mock.loader import DictDataLoader
+from units.mock.vault_helper import TextVaultSecret
+
+
+class TestUnhexlify(unittest.TestCase):
+ def test(self):
+ b_plain_data = b'some text to hexlify'
+ b_data = hexlify(b_plain_data)
+ res = vault._unhexlify(b_data)
+ self.assertEqual(res, b_plain_data)
+
+ def test_odd_length(self):
+ b_data = b'123456789abcdefghijklmnopqrstuvwxyz'
+
+ self.assertRaisesRegexp(vault.AnsibleVaultFormatError,
+ '.*Vault format unhexlify error.*',
+ vault._unhexlify,
+ b_data)
+
+ def test_nonhex(self):
+ b_data = b'6z36316566653264333665333637623064303639353237620a636366633565663263336335656532'
+
+ self.assertRaisesRegexp(vault.AnsibleVaultFormatError,
+ '.*Vault format unhexlify error.*Non-hexadecimal digit found',
+ vault._unhexlify,
+ b_data)
+
+
+class TestParseVaulttext(unittest.TestCase):
+ def test(self):
+ vaulttext_envelope = u'''$ANSIBLE_VAULT;1.1;AES256
+33363965326261303234626463623963633531343539616138316433353830356566396130353436
+3562643163366231316662386565383735653432386435610a306664636137376132643732393835
+63383038383730306639353234326630666539346233376330303938323639306661313032396437
+6233623062366136310a633866373936313238333730653739323461656662303864663666653563
+3138'''
+
+ b_vaulttext_envelope = to_bytes(vaulttext_envelope, errors='strict', encoding='utf-8')
+ b_vaulttext, b_version, cipher_name, vault_id = vault.parse_vaulttext_envelope(b_vaulttext_envelope)
+ res = vault.parse_vaulttext(b_vaulttext)
+ self.assertIsInstance(res[0], bytes)
+ self.assertIsInstance(res[1], bytes)
+ self.assertIsInstance(res[2], bytes)
+
+ def test_non_hex(self):
+ vaulttext_envelope = u'''$ANSIBLE_VAULT;1.1;AES256
+3336396J326261303234626463623963633531343539616138316433353830356566396130353436
+3562643163366231316662386565383735653432386435610a306664636137376132643732393835
+63383038383730306639353234326630666539346233376330303938323639306661313032396437
+6233623062366136310a633866373936313238333730653739323461656662303864663666653563
+3138'''
+
+ b_vaulttext_envelope = to_bytes(vaulttext_envelope, errors='strict', encoding='utf-8')
+ b_vaulttext, b_version, cipher_name, vault_id = vault.parse_vaulttext_envelope(b_vaulttext_envelope)
+ self.assertRaisesRegexp(vault.AnsibleVaultFormatError,
+ '.*Vault format unhexlify error.*Non-hexadecimal digit found',
+ vault.parse_vaulttext,
+ b_vaulttext_envelope)
+
+
+class TestVaultSecret(unittest.TestCase):
+ def test(self):
+ secret = vault.VaultSecret()
+ secret.load()
+ self.assertIsNone(secret._bytes)
+
+ def test_bytes(self):
+ some_text = u'ç§ã¯ã‚¬ãƒ©ã‚¹ã‚’食ã¹ã‚‰ã‚Œã¾ã™ã€‚ãã‚Œã¯ç§ã‚’å‚·ã¤ã‘ã¾ã›ã‚“。'
+ _bytes = to_bytes(some_text)
+ secret = vault.VaultSecret(_bytes)
+ secret.load()
+ self.assertEqual(secret.bytes, _bytes)
+
+
+class TestPromptVaultSecret(unittest.TestCase):
+ def test_empty_prompt_formats(self):
+ secret = vault.PromptVaultSecret(vault_id='test_id', prompt_formats=[])
+ secret.load()
+ self.assertIsNone(secret._bytes)
+
+ @patch('ansible.parsing.vault.display.prompt', return_value='the_password')
+ def test_prompt_formats_none(self, mock_display_prompt):
+ secret = vault.PromptVaultSecret(vault_id='test_id')
+ secret.load()
+ self.assertEqual(secret._bytes, b'the_password')
+
+ @patch('ansible.parsing.vault.display.prompt', return_value='the_password')
+ def test_custom_prompt(self, mock_display_prompt):
+ secret = vault.PromptVaultSecret(vault_id='test_id',
+ prompt_formats=['The cow flies at midnight: '])
+ secret.load()
+ self.assertEqual(secret._bytes, b'the_password')
+
+ @patch('ansible.parsing.vault.display.prompt', side_effect=EOFError)
+ def test_prompt_eoferror(self, mock_display_prompt):
+ secret = vault.PromptVaultSecret(vault_id='test_id')
+ self.assertRaisesRegexp(vault.AnsibleVaultError,
+ 'EOFError.*test_id',
+ secret.load)
+
+ @patch('ansible.parsing.vault.display.prompt', side_effect=['first_password', 'second_password'])
+ def test_prompt_passwords_dont_match(self, mock_display_prompt):
+ secret = vault.PromptVaultSecret(vault_id='test_id',
+ prompt_formats=['Vault password: ',
+ 'Confirm Vault password: '])
+ self.assertRaisesRegexp(errors.AnsibleError,
+ 'Passwords do not match',
+ secret.load)
+
+
+class TestFileVaultSecret(unittest.TestCase):
+ def setUp(self):
+ self.vault_password = "test-vault-password"
+ text_secret = TextVaultSecret(self.vault_password)
+ self.vault_secrets = [('foo', text_secret)]
+
+ def test(self):
+ secret = vault.FileVaultSecret()
+ self.assertIsNone(secret._bytes)
+ self.assertIsNone(secret._text)
+
+ def test_repr_empty(self):
+ secret = vault.FileVaultSecret()
+ self.assertEqual(repr(secret), "FileVaultSecret()")
+
+ def test_repr(self):
+ tmp_file = tempfile.NamedTemporaryFile(delete=False)
+ fake_loader = DictDataLoader({tmp_file.name: 'sdfadf'})
+
+ secret = vault.FileVaultSecret(loader=fake_loader, filename=tmp_file.name)
+ filename = tmp_file.name
+ tmp_file.close()
+ self.assertEqual(repr(secret), "FileVaultSecret(filename='%s')" % filename)
+
+ def test_empty_bytes(self):
+ secret = vault.FileVaultSecret()
+ self.assertIsNone(secret.bytes)
+
+ def test_file(self):
+ password = 'some password'
+
+ tmp_file = tempfile.NamedTemporaryFile(delete=False)
+ tmp_file.write(to_bytes(password))
+ tmp_file.close()
+
+ fake_loader = DictDataLoader({tmp_file.name: 'sdfadf'})
+
+ secret = vault.FileVaultSecret(loader=fake_loader, filename=tmp_file.name)
+ secret.load()
+
+ os.unlink(tmp_file.name)
+
+ self.assertEqual(secret.bytes, to_bytes(password))
+
+ def test_file_empty(self):
+
+ tmp_file = tempfile.NamedTemporaryFile(delete=False)
+ tmp_file.write(to_bytes(''))
+ tmp_file.close()
+
+ fake_loader = DictDataLoader({tmp_file.name: ''})
+
+ secret = vault.FileVaultSecret(loader=fake_loader, filename=tmp_file.name)
+ self.assertRaisesRegexp(vault.AnsibleVaultPasswordError,
+ 'Invalid vault password was provided from file.*%s' % tmp_file.name,
+ secret.load)
+
+ os.unlink(tmp_file.name)
+
+ def test_file_encrypted(self):
+ vault_password = "test-vault-password"
+ text_secret = TextVaultSecret(vault_password)
+ vault_secrets = [('foo', text_secret)]
+
+ password = 'some password'
+ # 'some password' encrypted with 'test-ansible-password'
+
+ password_file_content = '''$ANSIBLE_VAULT;1.1;AES256
+61393863643638653437313566313632306462383837303132346434616433313438353634613762
+3334363431623364386164616163326537366333353663650a663634306232363432626162353665
+39623061353266373631636331643761306665343731376633623439313138396330346237653930
+6432643864346136640a653364386634666461306231353765636662316335613235383565306437
+3737
+'''
+
+ tmp_file = tempfile.NamedTemporaryFile(delete=False)
+ tmp_file.write(to_bytes(password_file_content))
+ tmp_file.close()
+
+ fake_loader = DictDataLoader({tmp_file.name: 'sdfadf'})
+ fake_loader._vault.secrets = vault_secrets
+
+ secret = vault.FileVaultSecret(loader=fake_loader, filename=tmp_file.name)
+ secret.load()
+
+ os.unlink(tmp_file.name)
+
+ self.assertEqual(secret.bytes, to_bytes(password))
+
+ def test_file_not_a_directory(self):
+ filename = '/dev/null/foobar'
+ fake_loader = DictDataLoader({filename: 'sdfadf'})
+
+ secret = vault.FileVaultSecret(loader=fake_loader, filename=filename)
+ self.assertRaisesRegexp(errors.AnsibleError,
+ '.*Could not read vault password file.*/dev/null/foobar.*Not a directory',
+ secret.load)
+
+ def test_file_not_found(self):
+ tmp_file = tempfile.NamedTemporaryFile()
+ filename = os.path.realpath(tmp_file.name)
+ tmp_file.close()
+
+ fake_loader = DictDataLoader({filename: 'sdfadf'})
+
+ secret = vault.FileVaultSecret(loader=fake_loader, filename=filename)
+ self.assertRaisesRegexp(errors.AnsibleError,
+ '.*Could not read vault password file.*%s.*' % filename,
+ secret.load)
+
+
+class TestScriptVaultSecret(unittest.TestCase):
+ def test(self):
+ secret = vault.ScriptVaultSecret()
+ self.assertIsNone(secret._bytes)
+ self.assertIsNone(secret._text)
+
+ def _mock_popen(self, mock_popen, return_code=0, stdout=b'', stderr=b''):
+ def communicate():
+ return stdout, stderr
+ mock_popen.return_value = MagicMock(returncode=return_code)
+ mock_popen_instance = mock_popen.return_value
+ mock_popen_instance.communicate = communicate
+
+ @patch('ansible.parsing.vault.subprocess.Popen')
+ def test_read_file(self, mock_popen):
+ self._mock_popen(mock_popen, stdout=b'some_password')
+ secret = vault.ScriptVaultSecret()
+ with patch.object(secret, 'loader') as mock_loader:
+ mock_loader.is_executable = MagicMock(return_value=True)
+ secret.load()
+
+ @patch('ansible.parsing.vault.subprocess.Popen')
+ def test_read_file_empty(self, mock_popen):
+ self._mock_popen(mock_popen, stdout=b'')
+ secret = vault.ScriptVaultSecret()
+ with patch.object(secret, 'loader') as mock_loader:
+ mock_loader.is_executable = MagicMock(return_value=True)
+ self.assertRaisesRegexp(vault.AnsibleVaultPasswordError,
+ 'Invalid vault password was provided from script',
+ secret.load)
+
+ @patch('ansible.parsing.vault.subprocess.Popen')
+ def test_read_file_os_error(self, mock_popen):
+ self._mock_popen(mock_popen)
+ mock_popen.side_effect = OSError('That is not an executable')
+ secret = vault.ScriptVaultSecret()
+ with patch.object(secret, 'loader') as mock_loader:
+ mock_loader.is_executable = MagicMock(return_value=True)
+ self.assertRaisesRegexp(errors.AnsibleError,
+ 'Problem running vault password script.*',
+ secret.load)
+
+ @patch('ansible.parsing.vault.subprocess.Popen')
+ def test_read_file_not_executable(self, mock_popen):
+ self._mock_popen(mock_popen)
+ secret = vault.ScriptVaultSecret()
+ with patch.object(secret, 'loader') as mock_loader:
+ mock_loader.is_executable = MagicMock(return_value=False)
+ self.assertRaisesRegexp(vault.AnsibleVaultError,
+ 'The vault password script .* was not executable',
+ secret.load)
+
+ @patch('ansible.parsing.vault.subprocess.Popen')
+ def test_read_file_non_zero_return_code(self, mock_popen):
+ stderr = b'That did not work for a random reason'
+ rc = 37
+
+ self._mock_popen(mock_popen, return_code=rc, stderr=stderr)
+ secret = vault.ScriptVaultSecret(filename='/dev/null/some_vault_secret')
+ with patch.object(secret, 'loader') as mock_loader:
+ mock_loader.is_executable = MagicMock(return_value=True)
+ self.assertRaisesRegexp(errors.AnsibleError,
+ r'Vault password script.*returned non-zero \(%s\): %s' % (rc, stderr),
+ secret.load)
+
+
+class TestScriptIsClient(unittest.TestCase):
+ def test_randomname(self):
+ filename = 'randomname'
+ res = vault.script_is_client(filename)
+ self.assertFalse(res)
+
+ def test_something_dash_client(self):
+ filename = 'something-client'
+ res = vault.script_is_client(filename)
+ self.assertTrue(res)
+
+ def test_something_dash_client_somethingelse(self):
+ filename = 'something-client-somethingelse'
+ res = vault.script_is_client(filename)
+ self.assertFalse(res)
+
+ def test_something_dash_client_py(self):
+ filename = 'something-client.py'
+ res = vault.script_is_client(filename)
+ self.assertTrue(res)
+
+ def test_full_path_something_dash_client_py(self):
+ filename = '/foo/bar/something-client.py'
+ res = vault.script_is_client(filename)
+ self.assertTrue(res)
+
+ def test_full_path_something_dash_client(self):
+ filename = '/foo/bar/something-client'
+ res = vault.script_is_client(filename)
+ self.assertTrue(res)
+
+ def test_full_path_something_dash_client_in_dir(self):
+ filename = '/foo/bar/something-client/but/not/filename'
+ res = vault.script_is_client(filename)
+ self.assertFalse(res)
+
+
+class TestGetFileVaultSecret(unittest.TestCase):
+ def test_file(self):
+ password = 'some password'
+
+ tmp_file = tempfile.NamedTemporaryFile(delete=False)
+ tmp_file.write(to_bytes(password))
+ tmp_file.close()
+
+ fake_loader = DictDataLoader({tmp_file.name: 'sdfadf'})
+
+ secret = vault.get_file_vault_secret(filename=tmp_file.name, loader=fake_loader)
+ secret.load()
+
+ os.unlink(tmp_file.name)
+
+ self.assertEqual(secret.bytes, to_bytes(password))
+
+ def test_file_not_a_directory(self):
+ filename = '/dev/null/foobar'
+ fake_loader = DictDataLoader({filename: 'sdfadf'})
+
+ self.assertRaisesRegexp(errors.AnsibleError,
+ '.*The vault password file %s was not found.*' % filename,
+ vault.get_file_vault_secret,
+ filename=filename,
+ loader=fake_loader)
+
+ def test_file_not_found(self):
+ tmp_file = tempfile.NamedTemporaryFile()
+ filename = os.path.realpath(tmp_file.name)
+ tmp_file.close()
+
+ fake_loader = DictDataLoader({filename: 'sdfadf'})
+
+ self.assertRaisesRegexp(errors.AnsibleError,
+ '.*The vault password file %s was not found.*' % filename,
+ vault.get_file_vault_secret,
+ filename=filename,
+ loader=fake_loader)
+
+
+class TestVaultIsEncrypted(unittest.TestCase):
+ def test_bytes_not_encrypted(self):
+ b_data = b"foobar"
+ self.assertFalse(vault.is_encrypted(b_data))
+
+ def test_bytes_encrypted(self):
+ b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible")
+ self.assertTrue(vault.is_encrypted(b_data))
+
+ def test_text_not_encrypted(self):
+ b_data = to_text(b"foobar")
+ self.assertFalse(vault.is_encrypted(b_data))
+
+ def test_text_encrypted(self):
+ b_data = to_text(b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible"))
+ self.assertTrue(vault.is_encrypted(b_data))
+
+ def test_invalid_text_not_ascii(self):
+ data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % u"ァ ア ィ イ ゥ ウ ェ エ ォ オ カ ガ キ ギ ク グ ケ "
+ self.assertFalse(vault.is_encrypted(data))
+
+ def test_invalid_bytes_not_ascii(self):
+ data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % u"ァ ア ィ イ ゥ ウ ェ エ ォ オ カ ガ キ ギ ク グ ケ "
+ b_data = to_bytes(data, encoding='utf-8')
+ self.assertFalse(vault.is_encrypted(b_data))
+
+
+class TestVaultIsEncryptedFile(unittest.TestCase):
+ def test_binary_file_handle_not_encrypted(self):
+ b_data = b"foobar"
+ b_data_fo = io.BytesIO(b_data)
+ self.assertFalse(vault.is_encrypted_file(b_data_fo))
+
+ def test_text_file_handle_not_encrypted(self):
+ data = u"foobar"
+ data_fo = io.StringIO(data)
+ self.assertFalse(vault.is_encrypted_file(data_fo))
+
+ def test_binary_file_handle_encrypted(self):
+ b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible")
+ b_data_fo = io.BytesIO(b_data)
+ self.assertTrue(vault.is_encrypted_file(b_data_fo))
+
+ def test_text_file_handle_encrypted(self):
+ data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % to_text(hexlify(b"ansible"))
+ data_fo = io.StringIO(data)
+ self.assertTrue(vault.is_encrypted_file(data_fo))
+
+ def test_binary_file_handle_invalid(self):
+ data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % u"ァ ア ィ イ ゥ ウ ェ エ ォ オ カ ガ キ ギ ク グ ケ "
+ b_data = to_bytes(data)
+ b_data_fo = io.BytesIO(b_data)
+ self.assertFalse(vault.is_encrypted_file(b_data_fo))
+
+ def test_text_file_handle_invalid(self):
+ data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % u"ァ ア ィ イ ゥ ウ ェ エ ォ オ カ ガ キ ギ ク グ ケ "
+ data_fo = io.StringIO(data)
+ self.assertFalse(vault.is_encrypted_file(data_fo))
+
+ def test_file_already_read_from_finds_header(self):
+ b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible\ntesting\nfile pos")
+ b_data_fo = io.BytesIO(b_data)
+ b_data_fo.read(42) # Arbitrary number
+ self.assertTrue(vault.is_encrypted_file(b_data_fo))
+
+ def test_file_already_read_from_saves_file_pos(self):
+ b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible\ntesting\nfile pos")
+ b_data_fo = io.BytesIO(b_data)
+ b_data_fo.read(69) # Arbitrary number
+ vault.is_encrypted_file(b_data_fo)
+ self.assertEqual(b_data_fo.tell(), 69)
+
+ def test_file_with_offset(self):
+ b_data = b"JUNK$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible\ntesting\nfile pos")
+ b_data_fo = io.BytesIO(b_data)
+ self.assertTrue(vault.is_encrypted_file(b_data_fo, start_pos=4))
+
+ def test_file_with_count(self):
+ b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible\ntesting\nfile pos")
+ vault_length = len(b_data)
+ b_data = b_data + u'ã‚¡ ã‚¢'.encode('utf-8')
+ b_data_fo = io.BytesIO(b_data)
+ self.assertTrue(vault.is_encrypted_file(b_data_fo, count=vault_length))
+
+ def test_file_with_offset_and_count(self):
+ b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible\ntesting\nfile pos")
+ vault_length = len(b_data)
+ b_data = b'JUNK' + b_data + u'ã‚¡ ã‚¢'.encode('utf-8')
+ b_data_fo = io.BytesIO(b_data)
+ self.assertTrue(vault.is_encrypted_file(b_data_fo, start_pos=4, count=vault_length))
+
+
+@pytest.mark.skipif(not vault.HAS_CRYPTOGRAPHY,
+ reason="Skipping cryptography tests because cryptography is not installed")
+class TestVaultCipherAes256(unittest.TestCase):
+ def setUp(self):
+ self.vault_cipher = vault.VaultAES256()
+
+ def test(self):
+ self.assertIsInstance(self.vault_cipher, vault.VaultAES256)
+
+ # TODO: tag these as slow tests
+ def test_create_key_cryptography(self):
+ b_password = b'hunter42'
+ b_salt = os.urandom(32)
+ b_key_cryptography = self.vault_cipher._create_key_cryptography(b_password, b_salt, key_length=32, iv_length=16)
+ self.assertIsInstance(b_key_cryptography, six.binary_type)
+
+ @pytest.mark.skipif(not vault.HAS_PYCRYPTO, reason='Not testing pycrypto key as pycrypto is not installed')
+ def test_create_key_pycrypto(self):
+ b_password = b'hunter42'
+ b_salt = os.urandom(32)
+
+ b_key_pycrypto = self.vault_cipher._create_key_pycrypto(b_password, b_salt, key_length=32, iv_length=16)
+ self.assertIsInstance(b_key_pycrypto, six.binary_type)
+
+ @pytest.mark.skipif(not vault.HAS_PYCRYPTO,
+ reason='Not comparing cryptography key to pycrypto key as pycrypto is not installed')
+ def test_compare_new_keys(self):
+ b_password = b'hunter42'
+ b_salt = os.urandom(32)
+ b_key_cryptography = self.vault_cipher._create_key_cryptography(b_password, b_salt, key_length=32, iv_length=16)
+
+ b_key_pycrypto = self.vault_cipher._create_key_pycrypto(b_password, b_salt, key_length=32, iv_length=16)
+ self.assertEqual(b_key_cryptography, b_key_pycrypto)
+
+ def test_create_key_known_cryptography(self):
+ b_password = b'hunter42'
+
+ # A fixed salt
+ b_salt = b'q' * 32 # q is the most random letter.
+ b_key_1 = self.vault_cipher._create_key_cryptography(b_password, b_salt, key_length=32, iv_length=16)
+ self.assertIsInstance(b_key_1, six.binary_type)
+
+ # verify we get the same answer
+ # we could potentially run a few iterations of this and time it to see if it's roughly constant time
+ # and or that it exceeds some minimal time, but that would likely cause unreliable fails, esp in CI
+ b_key_2 = self.vault_cipher._create_key_cryptography(b_password, b_salt, key_length=32, iv_length=16)
+ self.assertIsInstance(b_key_2, six.binary_type)
+ self.assertEqual(b_key_1, b_key_2)
+
+ # And again with pycrypto
+ b_key_3 = self.vault_cipher._create_key_pycrypto(b_password, b_salt, key_length=32, iv_length=16)
+ self.assertIsInstance(b_key_3, six.binary_type)
+
+ # verify we get the same answer
+ # we could potentially run a few iterations of this and time it to see if it's roughly constant time
+ # and or that it exceeds some minimal time, but that would likely cause unreliable fails, esp in CI
+ b_key_4 = self.vault_cipher._create_key_pycrypto(b_password, b_salt, key_length=32, iv_length=16)
+ self.assertIsInstance(b_key_4, six.binary_type)
+ self.assertEqual(b_key_3, b_key_4)
+ self.assertEqual(b_key_1, b_key_4)
+
+ def test_create_key_known_pycrypto(self):
+ b_password = b'hunter42'
+
+ # A fixed salt
+ b_salt = b'q' * 32 # q is the most random letter.
+ b_key_3 = self.vault_cipher._create_key_pycrypto(b_password, b_salt, key_length=32, iv_length=16)
+ self.assertIsInstance(b_key_3, six.binary_type)
+
+ # verify we get the same answer
+ # we could potentially run a few iterations of this and time it to see if it's roughly constant time
+ # and or that it exceeds some minimal time, but that would likely cause unreliable fails, esp in CI
+ b_key_4 = self.vault_cipher._create_key_pycrypto(b_password, b_salt, key_length=32, iv_length=16)
+ self.assertIsInstance(b_key_4, six.binary_type)
+ self.assertEqual(b_key_3, b_key_4)
+
+ def test_is_equal_is_equal(self):
+ self.assertTrue(self.vault_cipher._is_equal(b'abcdefghijklmnopqrstuvwxyz', b'abcdefghijklmnopqrstuvwxyz'))
+
+ def test_is_equal_unequal_length(self):
+ self.assertFalse(self.vault_cipher._is_equal(b'abcdefghijklmnopqrstuvwxyz', b'abcdefghijklmnopqrstuvwx and sometimes y'))
+
+ def test_is_equal_not_equal(self):
+ self.assertFalse(self.vault_cipher._is_equal(b'abcdefghijklmnopqrstuvwxyz', b'AbcdefghijKlmnopQrstuvwxZ'))
+
+ def test_is_equal_empty(self):
+ self.assertTrue(self.vault_cipher._is_equal(b'', b''))
+
+ def test_is_equal_non_ascii_equal(self):
+ utf8_data = to_bytes(u'ç§ã¯ã‚¬ãƒ©ã‚¹ã‚’食ã¹ã‚‰ã‚Œã¾ã™ã€‚ãã‚Œã¯ç§ã‚’å‚·ã¤ã‘ã¾ã›ã‚“。')
+ self.assertTrue(self.vault_cipher._is_equal(utf8_data, utf8_data))
+
+ def test_is_equal_non_ascii_unequal(self):
+ utf8_data = to_bytes(u'ç§ã¯ã‚¬ãƒ©ã‚¹ã‚’食ã¹ã‚‰ã‚Œã¾ã™ã€‚ãã‚Œã¯ç§ã‚’å‚·ã¤ã‘ã¾ã›ã‚“。')
+ utf8_data2 = to_bytes(u'Pot să mănânc sticlă și ea nu mă rănește.')
+
+ # Test for the len optimization path
+ self.assertFalse(self.vault_cipher._is_equal(utf8_data, utf8_data2))
+ # Test for the slower, char by char comparison path
+ self.assertFalse(self.vault_cipher._is_equal(utf8_data, utf8_data[:-1] + b'P'))
+
+ def test_is_equal_non_bytes(self):
+ """ Anything not a byte string should raise a TypeError """
+ self.assertRaises(TypeError, self.vault_cipher._is_equal, u"One fish", b"two fish")
+ self.assertRaises(TypeError, self.vault_cipher._is_equal, b"One fish", u"two fish")
+ self.assertRaises(TypeError, self.vault_cipher._is_equal, 1, b"red fish")
+ self.assertRaises(TypeError, self.vault_cipher._is_equal, b"blue fish", 2)
+
+
+@pytest.mark.skipif(not vault.HAS_PYCRYPTO,
+ reason="Skipping Pycrypto tests because pycrypto is not installed")
+class TestVaultCipherAes256PyCrypto(TestVaultCipherAes256):
+ def setUp(self):
+ self.has_cryptography = vault.HAS_CRYPTOGRAPHY
+ vault.HAS_CRYPTOGRAPHY = False
+ super(TestVaultCipherAes256PyCrypto, self).setUp()
+
+ def tearDown(self):
+ vault.HAS_CRYPTOGRAPHY = self.has_cryptography
+ super(TestVaultCipherAes256PyCrypto, self).tearDown()
+
+
+class TestMatchSecrets(unittest.TestCase):
+ def test_empty_tuple(self):
+ secrets = [tuple()]
+ vault_ids = ['vault_id_1']
+ self.assertRaises(ValueError,
+ vault.match_secrets,
+ secrets, vault_ids)
+
+ def test_empty_secrets(self):
+ matches = vault.match_secrets([], ['vault_id_1'])
+ self.assertEqual(matches, [])
+
+ def test_single_match(self):
+ secret = TextVaultSecret('password')
+ matches = vault.match_secrets([('default', secret)], ['default'])
+ self.assertEqual(matches, [('default', secret)])
+
+ def test_no_matches(self):
+ secret = TextVaultSecret('password')
+ matches = vault.match_secrets([('default', secret)], ['not_default'])
+ self.assertEqual(matches, [])
+
+ def test_multiple_matches(self):
+ secrets = [('vault_id1', TextVaultSecret('password1')),
+ ('vault_id2', TextVaultSecret('password2')),
+ ('vault_id1', TextVaultSecret('password3')),
+ ('vault_id4', TextVaultSecret('password4'))]
+ vault_ids = ['vault_id1', 'vault_id4']
+ matches = vault.match_secrets(secrets, vault_ids)
+
+ self.assertEqual(len(matches), 3)
+ expected = [('vault_id1', TextVaultSecret('password1')),
+ ('vault_id1', TextVaultSecret('password3')),
+ ('vault_id4', TextVaultSecret('password4'))]
+ self.assertEqual([x for x, y in matches],
+ [a for a, b in expected])
+
+
+@pytest.mark.skipif(not vault.HAS_CRYPTOGRAPHY,
+ reason="Skipping cryptography tests because cryptography is not installed")
+class TestVaultLib(unittest.TestCase):
+ def setUp(self):
+ self.vault_password = "test-vault-password"
+ text_secret = TextVaultSecret(self.vault_password)
+ self.vault_secrets = [('default', text_secret),
+ ('test_id', text_secret)]
+ self.v = vault.VaultLib(self.vault_secrets)
+
+ def _vault_secrets(self, vault_id, secret):
+ return [(vault_id, secret)]
+
+ def _vault_secrets_from_password(self, vault_id, password):
+ return [(vault_id, TextVaultSecret(password))]
+
+ def test_encrypt(self):
+ plaintext = u'Some text to encrypt in a café'
+ b_vaulttext = self.v.encrypt(plaintext)
+
+ self.assertIsInstance(b_vaulttext, six.binary_type)
+
+ b_header = b'$ANSIBLE_VAULT;1.1;AES256\n'
+ self.assertEqual(b_vaulttext[:len(b_header)], b_header)
+
+ def test_encrypt_vault_id(self):
+ plaintext = u'Some text to encrypt in a café'
+ b_vaulttext = self.v.encrypt(plaintext, vault_id='test_id')
+
+ self.assertIsInstance(b_vaulttext, six.binary_type)
+
+ b_header = b'$ANSIBLE_VAULT;1.2;AES256;test_id\n'
+ self.assertEqual(b_vaulttext[:len(b_header)], b_header)
+
+ def test_encrypt_bytes(self):
+
+ plaintext = to_bytes(u'Some text to encrypt in a café')
+ b_vaulttext = self.v.encrypt(plaintext)
+
+ self.assertIsInstance(b_vaulttext, six.binary_type)
+
+ b_header = b'$ANSIBLE_VAULT;1.1;AES256\n'
+ self.assertEqual(b_vaulttext[:len(b_header)], b_header)
+
+ def test_encrypt_no_secret_empty_secrets(self):
+ vault_secrets = []
+ v = vault.VaultLib(vault_secrets)
+
+ plaintext = u'Some text to encrypt in a café'
+ self.assertRaisesRegexp(vault.AnsibleVaultError,
+ '.*A vault password must be specified to encrypt data.*',
+ v.encrypt,
+ plaintext)
+
+ def test_format_vaulttext_envelope(self):
+ cipher_name = "TEST"
+ b_ciphertext = b"ansible"
+ b_vaulttext = vault.format_vaulttext_envelope(b_ciphertext,
+ cipher_name,
+ version=self.v.b_version,
+ vault_id='default')
+ b_lines = b_vaulttext.split(b'\n')
+ self.assertGreater(len(b_lines), 1, msg="failed to properly add header")
+
+ b_header = b_lines[0]
+ # self.assertTrue(b_header.endswith(b';TEST'), msg="header does not end with cipher name")
+
+ b_header_parts = b_header.split(b';')
+ self.assertEqual(len(b_header_parts), 4, msg="header has the wrong number of parts")
+ self.assertEqual(b_header_parts[0], b'$ANSIBLE_VAULT', msg="header does not start with $ANSIBLE_VAULT")
+ self.assertEqual(b_header_parts[1], self.v.b_version, msg="header version is incorrect")
+ self.assertEqual(b_header_parts[2], b'TEST', msg="header does not end with cipher name")
+
+ # And just to verify, lets parse the results and compare
+ b_ciphertext2, b_version2, cipher_name2, vault_id2 = \
+ vault.parse_vaulttext_envelope(b_vaulttext)
+ self.assertEqual(b_ciphertext, b_ciphertext2)
+ self.assertEqual(self.v.b_version, b_version2)
+ self.assertEqual(cipher_name, cipher_name2)
+ self.assertEqual('default', vault_id2)
+
+ def test_parse_vaulttext_envelope(self):
+ b_vaulttext = b"$ANSIBLE_VAULT;9.9;TEST\nansible"
+ b_ciphertext, b_version, cipher_name, vault_id = vault.parse_vaulttext_envelope(b_vaulttext)
+ b_lines = b_ciphertext.split(b'\n')
+ self.assertEqual(b_lines[0], b"ansible", msg="Payload was not properly split from the header")
+ self.assertEqual(cipher_name, u'TEST', msg="cipher name was not properly set")
+ self.assertEqual(b_version, b"9.9", msg="version was not properly set")
+
+ def test_parse_vaulttext_envelope_crlf(self):
+ b_vaulttext = b"$ANSIBLE_VAULT;9.9;TEST\r\nansible"
+ b_ciphertext, b_version, cipher_name, vault_id = vault.parse_vaulttext_envelope(b_vaulttext)
+ b_lines = b_ciphertext.split(b'\n')
+ self.assertEqual(b_lines[0], b"ansible", msg="Payload was not properly split from the header")
+ self.assertEqual(cipher_name, u'TEST', msg="cipher name was not properly set")
+ self.assertEqual(b_version, b"9.9", msg="version was not properly set")
+
+ def test_encrypt_decrypt_aes256(self):
+ self.v.cipher_name = u'AES256'
+ plaintext = u"foobar"
+ b_vaulttext = self.v.encrypt(plaintext)
+ b_plaintext = self.v.decrypt(b_vaulttext)
+ self.assertNotEqual(b_vaulttext, b"foobar", msg="encryption failed")
+ self.assertEqual(b_plaintext, b"foobar", msg="decryption failed")
+
+ def test_encrypt_decrypt_aes256_none_secrets(self):
+ vault_secrets = self._vault_secrets_from_password('default', 'ansible')
+ v = vault.VaultLib(vault_secrets)
+
+ plaintext = u"foobar"
+ b_vaulttext = v.encrypt(plaintext)
+
+ # VaultLib will default to empty {} if secrets is None
+ v_none = vault.VaultLib(None)
+ # so set secrets None explicitly
+ v_none.secrets = None
+ self.assertRaisesRegexp(vault.AnsibleVaultError,
+ '.*A vault password must be specified to decrypt data.*',
+ v_none.decrypt,
+ b_vaulttext)
+
+ def test_encrypt_decrypt_aes256_empty_secrets(self):
+ vault_secrets = self._vault_secrets_from_password('default', 'ansible')
+ v = vault.VaultLib(vault_secrets)
+
+ plaintext = u"foobar"
+ b_vaulttext = v.encrypt(plaintext)
+
+ vault_secrets_empty = []
+ v_none = vault.VaultLib(vault_secrets_empty)
+
+ self.assertRaisesRegexp(vault.AnsibleVaultError,
+ '.*Attempting to decrypt but no vault secrets found.*',
+ v_none.decrypt,
+ b_vaulttext)
+
+ def test_encrypt_decrypt_aes256_multiple_secrets_all_wrong(self):
+ plaintext = u'Some text to encrypt in a café'
+ b_vaulttext = self.v.encrypt(plaintext)
+
+ vault_secrets = [('default', TextVaultSecret('another-wrong-password')),
+ ('wrong-password', TextVaultSecret('wrong-password'))]
+
+ v_multi = vault.VaultLib(vault_secrets)
+ self.assertRaisesRegexp(errors.AnsibleError,
+ '.*Decryption failed.*',
+ v_multi.decrypt,
+ b_vaulttext,
+ filename='/dev/null/fake/filename')
+
+ def test_encrypt_decrypt_aes256_multiple_secrets_one_valid(self):
+ plaintext = u'Some text to encrypt in a café'
+ b_vaulttext = self.v.encrypt(plaintext)
+
+ correct_secret = TextVaultSecret(self.vault_password)
+ wrong_secret = TextVaultSecret('wrong-password')
+
+ vault_secrets = [('default', wrong_secret),
+ ('corect_secret', correct_secret),
+ ('wrong_secret', wrong_secret)]
+
+ v_multi = vault.VaultLib(vault_secrets)
+ b_plaintext = v_multi.decrypt(b_vaulttext)
+ self.assertNotEqual(b_vaulttext, to_bytes(plaintext), msg="encryption failed")
+ self.assertEqual(b_plaintext, to_bytes(plaintext), msg="decryption failed")
+
+ def test_encrypt_decrypt_aes256_existing_vault(self):
+ self.v.cipher_name = u'AES256'
+ b_orig_plaintext = b"Setec Astronomy"
+ vaulttext = u'''$ANSIBLE_VAULT;1.1;AES256
+33363965326261303234626463623963633531343539616138316433353830356566396130353436
+3562643163366231316662386565383735653432386435610a306664636137376132643732393835
+63383038383730306639353234326630666539346233376330303938323639306661313032396437
+6233623062366136310a633866373936313238333730653739323461656662303864663666653563
+3138'''
+
+ b_plaintext = self.v.decrypt(vaulttext)
+ self.assertEqual(b_plaintext, b_plaintext, msg="decryption failed")
+
+ b_vaulttext = to_bytes(vaulttext, encoding='ascii', errors='strict')
+ b_plaintext = self.v.decrypt(b_vaulttext)
+ self.assertEqual(b_plaintext, b_orig_plaintext, msg="decryption failed")
+
+ # FIXME This test isn't working quite yet.
+ @pytest.mark.skip(reason='This test is not ready yet')
+ def test_encrypt_decrypt_aes256_bad_hmac(self):
+
+ self.v.cipher_name = 'AES256'
+ # plaintext = "Setec Astronomy"
+ enc_data = '''$ANSIBLE_VAULT;1.1;AES256
+33363965326261303234626463623963633531343539616138316433353830356566396130353436
+3562643163366231316662386565383735653432386435610a306664636137376132643732393835
+63383038383730306639353234326630666539346233376330303938323639306661313032396437
+6233623062366136310a633866373936313238333730653739323461656662303864663666653563
+3138'''
+ b_data = to_bytes(enc_data, errors='strict', encoding='utf-8')
+ b_data = self.v._split_header(b_data)
+ foo = binascii.unhexlify(b_data)
+ lines = foo.splitlines()
+ # line 0 is salt, line 1 is hmac, line 2+ is ciphertext
+ b_salt = lines[0]
+ b_hmac = lines[1]
+ b_ciphertext_data = b'\n'.join(lines[2:])
+
+ b_ciphertext = binascii.unhexlify(b_ciphertext_data)
+ # b_orig_ciphertext = b_ciphertext[:]
+
+ # now muck with the text
+ # b_munged_ciphertext = b_ciphertext[:10] + b'\x00' + b_ciphertext[11:]
+ # b_munged_ciphertext = b_ciphertext
+ # assert b_orig_ciphertext != b_munged_ciphertext
+
+ b_ciphertext_data = binascii.hexlify(b_ciphertext)
+ b_payload = b'\n'.join([b_salt, b_hmac, b_ciphertext_data])
+ # reformat
+ b_invalid_ciphertext = self.v._format_output(b_payload)
+
+ # assert we throw an error
+ self.v.decrypt(b_invalid_ciphertext)
+
+ def test_decrypt_and_get_vault_id(self):
+ b_expected_plaintext = to_bytes('foo bar\n')
+ vaulttext = '''$ANSIBLE_VAULT;1.2;AES256;ansible_devel
+65616435333934613466373335363332373764363365633035303466643439313864663837393234
+3330656363343637313962633731333237313636633534630a386264363438363362326132363239
+39363166646664346264383934393935653933316263333838386362633534326664646166663736
+6462303664383765650a356637643633366663643566353036303162386237336233393065393164
+6264'''
+
+ vault_secrets = self._vault_secrets_from_password('ansible_devel', 'ansible')
+ v = vault.VaultLib(vault_secrets)
+
+ b_vaulttext = to_bytes(vaulttext)
+
+ b_plaintext, vault_id_used, vault_secret_used = v.decrypt_and_get_vault_id(b_vaulttext)
+
+ self.assertEqual(b_expected_plaintext, b_plaintext)
+ self.assertEqual(vault_id_used, 'ansible_devel')
+ self.assertEqual(vault_secret_used.text, 'ansible')
+
+ def test_decrypt_non_default_1_2(self):
+ b_expected_plaintext = to_bytes('foo bar\n')
+ vaulttext = '''$ANSIBLE_VAULT;1.2;AES256;ansible_devel
+65616435333934613466373335363332373764363365633035303466643439313864663837393234
+3330656363343637313962633731333237313636633534630a386264363438363362326132363239
+39363166646664346264383934393935653933316263333838386362633534326664646166663736
+6462303664383765650a356637643633366663643566353036303162386237336233393065393164
+6264'''
+
+ vault_secrets = self._vault_secrets_from_password('default', 'ansible')
+ v = vault.VaultLib(vault_secrets)
+
+ b_vaulttext = to_bytes(vaulttext)
+
+ b_plaintext = v.decrypt(b_vaulttext)
+ self.assertEqual(b_expected_plaintext, b_plaintext)
+
+ b_ciphertext, b_version, cipher_name, vault_id = vault.parse_vaulttext_envelope(b_vaulttext)
+ self.assertEqual('ansible_devel', vault_id)
+ self.assertEqual(b'1.2', b_version)
+
+ def test_decrypt_decrypted(self):
+ plaintext = u"ansible"
+ self.assertRaises(errors.AnsibleError, self.v.decrypt, plaintext)
+
+ b_plaintext = b"ansible"
+ self.assertRaises(errors.AnsibleError, self.v.decrypt, b_plaintext)
+
+ def test_cipher_not_set(self):
+ plaintext = u"ansible"
+ self.v.encrypt(plaintext)
+ self.assertEqual(self.v.cipher_name, "AES256")
+
+
+@pytest.mark.skipif(not vault.HAS_PYCRYPTO,
+ reason="Skipping Pycrypto tests because pycrypto is not installed")
+class TestVaultLibPyCrypto(TestVaultLib):
+ def setUp(self):
+ self.has_cryptography = vault.HAS_CRYPTOGRAPHY
+ vault.HAS_CRYPTOGRAPHY = False
+ super(TestVaultLibPyCrypto, self).setUp()
+
+ def tearDown(self):
+ vault.HAS_CRYPTOGRAPHY = self.has_cryptography
+ super(TestVaultLibPyCrypto, self).tearDown()
diff --git a/test/units/parsing/vault/test_vault_editor.py b/test/units/parsing/vault/test_vault_editor.py
new file mode 100644
index 00000000..8aa9b37c
--- /dev/null
+++ b/test/units/parsing/vault/test_vault_editor.py
@@ -0,0 +1,517 @@
+# (c) 2014, James Tanner <tanner.jc@gmail.com>
+# (c) 2014, James Cammarata, <jcammarata@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import tempfile
+
+import pytest
+
+from units.compat import unittest
+from units.compat.mock import patch
+
+from ansible import errors
+from ansible.parsing import vault
+from ansible.parsing.vault import VaultLib, VaultEditor, match_encrypt_secret
+
+from ansible.module_utils._text import to_bytes, to_text
+
+from units.mock.vault_helper import TextVaultSecret
+
+v11_data = """$ANSIBLE_VAULT;1.1;AES256
+62303130653266653331306264616235333735323636616539316433666463323964623162386137
+3961616263373033353631316333623566303532663065310a393036623466376263393961326530
+64336561613965383835646464623865663966323464653236343638373165343863623638316664
+3631633031323837340a396530313963373030343933616133393566366137363761373930663833
+3739"""
+
+
+@pytest.mark.skipif(not vault.HAS_CRYPTOGRAPHY,
+ reason="Skipping cryptography tests because cryptography is not installed")
+class TestVaultEditor(unittest.TestCase):
+
+ def setUp(self):
+ self._test_dir = None
+ self.vault_password = "test-vault-password"
+ vault_secret = TextVaultSecret(self.vault_password)
+ self.vault_secrets = [('vault_secret', vault_secret),
+ ('default', vault_secret)]
+
+ @property
+ def vault_secret(self):
+ return match_encrypt_secret(self.vault_secrets)[1]
+
+ def tearDown(self):
+ if self._test_dir:
+ pass
+ # shutil.rmtree(self._test_dir)
+ self._test_dir = None
+
+ def _secrets(self, password):
+ vault_secret = TextVaultSecret(password)
+ vault_secrets = [('default', vault_secret)]
+ return vault_secrets
+
+ def test_methods_exist(self):
+ v = vault.VaultEditor(None)
+ slots = ['create_file',
+ 'decrypt_file',
+ 'edit_file',
+ 'encrypt_file',
+ 'rekey_file',
+ 'read_data',
+ 'write_data']
+ for slot in slots:
+ assert hasattr(v, slot), "VaultLib is missing the %s method" % slot
+
+ def _create_test_dir(self):
+ suffix = '_ansible_unit_test_%s_' % (self.__class__.__name__)
+ return tempfile.mkdtemp(suffix=suffix)
+
+ def _create_file(self, test_dir, name, content=None, symlink=False):
+ file_path = os.path.join(test_dir, name)
+ opened_file = open(file_path, 'wb')
+ if content:
+ opened_file.write(content)
+ opened_file.close()
+ return file_path
+
+ def _vault_editor(self, vault_secrets=None):
+ if vault_secrets is None:
+ vault_secrets = self._secrets(self.vault_password)
+ return VaultEditor(VaultLib(vault_secrets))
+
+ @patch('ansible.parsing.vault.subprocess.call')
+ def test_edit_file_helper_empty_target(self, mock_sp_call):
+ self._test_dir = self._create_test_dir()
+
+ src_contents = to_bytes("some info in a file\nyup.")
+ src_file_path = self._create_file(self._test_dir, 'src_file', content=src_contents)
+
+ mock_sp_call.side_effect = self._faux_command
+ ve = self._vault_editor()
+
+ b_ciphertext = ve._edit_file_helper(src_file_path, self.vault_secret)
+
+ self.assertNotEqual(src_contents, b_ciphertext)
+
+ @patch('ansible.parsing.vault.subprocess.call')
+ def test_edit_file_helper_call_exception(self, mock_sp_call):
+ self._test_dir = self._create_test_dir()
+
+ src_contents = to_bytes("some info in a file\nyup.")
+ src_file_path = self._create_file(self._test_dir, 'src_file', content=src_contents)
+
+ error_txt = 'calling editor raised an exception'
+ mock_sp_call.side_effect = errors.AnsibleError(error_txt)
+
+ ve = self._vault_editor()
+
+ self.assertRaisesRegexp(errors.AnsibleError,
+ error_txt,
+ ve._edit_file_helper,
+ src_file_path,
+ self.vault_secret)
+
+ @patch('ansible.parsing.vault.subprocess.call')
+ def test_edit_file_helper_symlink_target(self, mock_sp_call):
+ self._test_dir = self._create_test_dir()
+
+ src_file_contents = to_bytes("some info in a file\nyup.")
+ src_file_path = self._create_file(self._test_dir, 'src_file', content=src_file_contents)
+
+ src_file_link_path = os.path.join(self._test_dir, 'a_link_to_dest_file')
+
+ os.symlink(src_file_path, src_file_link_path)
+
+ mock_sp_call.side_effect = self._faux_command
+ ve = self._vault_editor()
+
+ b_ciphertext = ve._edit_file_helper(src_file_link_path, self.vault_secret)
+
+ self.assertNotEqual(src_file_contents, b_ciphertext,
+ 'b_ciphertext should be encrypted and not equal to src_contents')
+
+ def _faux_editor(self, editor_args, new_src_contents=None):
+ if editor_args[0] == 'shred':
+ return
+
+ tmp_path = editor_args[-1]
+
+ # simulate the tmp file being editted
+ tmp_file = open(tmp_path, 'wb')
+ if new_src_contents:
+ tmp_file.write(new_src_contents)
+ tmp_file.close()
+
+ def _faux_command(self, tmp_path):
+ pass
+
+ @patch('ansible.parsing.vault.subprocess.call')
+ def test_edit_file_helper_no_change(self, mock_sp_call):
+ self._test_dir = self._create_test_dir()
+
+ src_file_contents = to_bytes("some info in a file\nyup.")
+ src_file_path = self._create_file(self._test_dir, 'src_file', content=src_file_contents)
+
+ # editor invocation doesn't change anything
+ def faux_editor(editor_args):
+ self._faux_editor(editor_args, src_file_contents)
+
+ mock_sp_call.side_effect = faux_editor
+ ve = self._vault_editor()
+
+ ve._edit_file_helper(src_file_path, self.vault_secret, existing_data=src_file_contents)
+
+ new_target_file = open(src_file_path, 'rb')
+ new_target_file_contents = new_target_file.read()
+ self.assertEqual(src_file_contents, new_target_file_contents)
+
+ def _assert_file_is_encrypted(self, vault_editor, src_file_path, src_contents):
+ new_src_file = open(src_file_path, 'rb')
+ new_src_file_contents = new_src_file.read()
+
+ # TODO: assert that it is encrypted
+ self.assertTrue(vault.is_encrypted(new_src_file_contents))
+
+ src_file_plaintext = vault_editor.vault.decrypt(new_src_file_contents)
+
+ # the plaintext should not be encrypted
+ self.assertFalse(vault.is_encrypted(src_file_plaintext))
+
+ # and the new plaintext should match the original
+ self.assertEqual(src_file_plaintext, src_contents)
+
+ def _assert_file_is_link(self, src_file_link_path, src_file_path):
+ self.assertTrue(os.path.islink(src_file_link_path),
+ 'The dest path (%s) should be a symlink to (%s) but is not' % (src_file_link_path, src_file_path))
+
+ def test_rekey_file(self):
+ self._test_dir = self._create_test_dir()
+
+ src_file_contents = to_bytes("some info in a file\nyup.")
+ src_file_path = self._create_file(self._test_dir, 'src_file', content=src_file_contents)
+
+ ve = self._vault_editor()
+ ve.encrypt_file(src_file_path, self.vault_secret)
+
+ # FIXME: update to just set self._secrets or just a new vault secret id
+ new_password = 'password2:electricbugaloo'
+ new_vault_secret = TextVaultSecret(new_password)
+ new_vault_secrets = [('default', new_vault_secret)]
+ ve.rekey_file(src_file_path, vault.match_encrypt_secret(new_vault_secrets)[1])
+
+ # FIXME: can just update self._secrets here
+ new_ve = vault.VaultEditor(VaultLib(new_vault_secrets))
+ self._assert_file_is_encrypted(new_ve, src_file_path, src_file_contents)
+
+ def test_rekey_file_no_new_password(self):
+ self._test_dir = self._create_test_dir()
+
+ src_file_contents = to_bytes("some info in a file\nyup.")
+ src_file_path = self._create_file(self._test_dir, 'src_file', content=src_file_contents)
+
+ ve = self._vault_editor()
+ ve.encrypt_file(src_file_path, self.vault_secret)
+
+ self.assertRaisesRegexp(errors.AnsibleError,
+ 'The value for the new_password to rekey',
+ ve.rekey_file,
+ src_file_path,
+ None)
+
+ def test_rekey_file_not_encrypted(self):
+ self._test_dir = self._create_test_dir()
+
+ src_file_contents = to_bytes("some info in a file\nyup.")
+ src_file_path = self._create_file(self._test_dir, 'src_file', content=src_file_contents)
+
+ ve = self._vault_editor()
+
+ new_password = 'password2:electricbugaloo'
+ self.assertRaisesRegexp(errors.AnsibleError,
+ 'input is not vault encrypted data',
+ ve.rekey_file,
+ src_file_path, new_password)
+
+ def test_plaintext(self):
+ self._test_dir = self._create_test_dir()
+
+ src_file_contents = to_bytes("some info in a file\nyup.")
+ src_file_path = self._create_file(self._test_dir, 'src_file', content=src_file_contents)
+
+ ve = self._vault_editor()
+ ve.encrypt_file(src_file_path, self.vault_secret)
+
+ res = ve.plaintext(src_file_path)
+ self.assertEqual(src_file_contents, res)
+
+ def test_plaintext_not_encrypted(self):
+ self._test_dir = self._create_test_dir()
+
+ src_file_contents = to_bytes("some info in a file\nyup.")
+ src_file_path = self._create_file(self._test_dir, 'src_file', content=src_file_contents)
+
+ ve = self._vault_editor()
+ self.assertRaisesRegexp(errors.AnsibleError,
+ 'input is not vault encrypted data',
+ ve.plaintext,
+ src_file_path)
+
+ def test_encrypt_file(self):
+ self._test_dir = self._create_test_dir()
+ src_file_contents = to_bytes("some info in a file\nyup.")
+ src_file_path = self._create_file(self._test_dir, 'src_file', content=src_file_contents)
+
+ ve = self._vault_editor()
+ ve.encrypt_file(src_file_path, self.vault_secret)
+
+ self._assert_file_is_encrypted(ve, src_file_path, src_file_contents)
+
+ def test_encrypt_file_symlink(self):
+ self._test_dir = self._create_test_dir()
+
+ src_file_contents = to_bytes("some info in a file\nyup.")
+ src_file_path = self._create_file(self._test_dir, 'src_file', content=src_file_contents)
+
+ src_file_link_path = os.path.join(self._test_dir, 'a_link_to_dest_file')
+ os.symlink(src_file_path, src_file_link_path)
+
+ ve = self._vault_editor()
+ ve.encrypt_file(src_file_link_path, self.vault_secret)
+
+ self._assert_file_is_encrypted(ve, src_file_path, src_file_contents)
+ self._assert_file_is_encrypted(ve, src_file_link_path, src_file_contents)
+
+ self._assert_file_is_link(src_file_link_path, src_file_path)
+
+ @patch('ansible.parsing.vault.subprocess.call')
+ def test_edit_file_no_vault_id(self, mock_sp_call):
+ self._test_dir = self._create_test_dir()
+ src_contents = to_bytes("some info in a file\nyup.")
+
+ src_file_path = self._create_file(self._test_dir, 'src_file', content=src_contents)
+
+ new_src_contents = to_bytes("The info is different now.")
+
+ def faux_editor(editor_args):
+ self._faux_editor(editor_args, new_src_contents)
+
+ mock_sp_call.side_effect = faux_editor
+
+ ve = self._vault_editor()
+
+ ve.encrypt_file(src_file_path, self.vault_secret)
+ ve.edit_file(src_file_path)
+
+ new_src_file = open(src_file_path, 'rb')
+ new_src_file_contents = new_src_file.read()
+
+ self.assertTrue(b'$ANSIBLE_VAULT;1.1;AES256' in new_src_file_contents)
+
+ src_file_plaintext = ve.vault.decrypt(new_src_file_contents)
+ self.assertEqual(src_file_plaintext, new_src_contents)
+
+ @patch('ansible.parsing.vault.subprocess.call')
+ def test_edit_file_with_vault_id(self, mock_sp_call):
+ self._test_dir = self._create_test_dir()
+ src_contents = to_bytes("some info in a file\nyup.")
+
+ src_file_path = self._create_file(self._test_dir, 'src_file', content=src_contents)
+
+ new_src_contents = to_bytes("The info is different now.")
+
+ def faux_editor(editor_args):
+ self._faux_editor(editor_args, new_src_contents)
+
+ mock_sp_call.side_effect = faux_editor
+
+ ve = self._vault_editor()
+
+ ve.encrypt_file(src_file_path, self.vault_secret,
+ vault_id='vault_secrets')
+ ve.edit_file(src_file_path)
+
+ new_src_file = open(src_file_path, 'rb')
+ new_src_file_contents = new_src_file.read()
+
+ self.assertTrue(b'$ANSIBLE_VAULT;1.2;AES256;vault_secrets' in new_src_file_contents)
+
+ src_file_plaintext = ve.vault.decrypt(new_src_file_contents)
+ self.assertEqual(src_file_plaintext, new_src_contents)
+
+ @patch('ansible.parsing.vault.subprocess.call')
+ def test_edit_file_symlink(self, mock_sp_call):
+ self._test_dir = self._create_test_dir()
+ src_contents = to_bytes("some info in a file\nyup.")
+
+ src_file_path = self._create_file(self._test_dir, 'src_file', content=src_contents)
+
+ new_src_contents = to_bytes("The info is different now.")
+
+ def faux_editor(editor_args):
+ self._faux_editor(editor_args, new_src_contents)
+
+ mock_sp_call.side_effect = faux_editor
+
+ ve = self._vault_editor()
+
+ ve.encrypt_file(src_file_path, self.vault_secret)
+
+ src_file_link_path = os.path.join(self._test_dir, 'a_link_to_dest_file')
+
+ os.symlink(src_file_path, src_file_link_path)
+
+ ve.edit_file(src_file_link_path)
+
+ new_src_file = open(src_file_path, 'rb')
+ new_src_file_contents = new_src_file.read()
+
+ src_file_plaintext = ve.vault.decrypt(new_src_file_contents)
+
+ self._assert_file_is_link(src_file_link_path, src_file_path)
+
+ self.assertEqual(src_file_plaintext, new_src_contents)
+
+ # self.assertEqual(src_file_plaintext, new_src_contents,
+ # 'The decrypted plaintext of the editted file is not the expected contents.')
+
+ @patch('ansible.parsing.vault.subprocess.call')
+ def test_edit_file_not_encrypted(self, mock_sp_call):
+ self._test_dir = self._create_test_dir()
+ src_contents = to_bytes("some info in a file\nyup.")
+
+ src_file_path = self._create_file(self._test_dir, 'src_file', content=src_contents)
+
+ new_src_contents = to_bytes("The info is different now.")
+
+ def faux_editor(editor_args):
+ self._faux_editor(editor_args, new_src_contents)
+
+ mock_sp_call.side_effect = faux_editor
+
+ ve = self._vault_editor()
+ self.assertRaisesRegexp(errors.AnsibleError,
+ 'input is not vault encrypted data',
+ ve.edit_file,
+ src_file_path)
+
+ def test_create_file_exists(self):
+ self._test_dir = self._create_test_dir()
+ src_contents = to_bytes("some info in a file\nyup.")
+ src_file_path = self._create_file(self._test_dir, 'src_file', content=src_contents)
+
+ ve = self._vault_editor()
+ self.assertRaisesRegexp(errors.AnsibleError,
+ 'please use .edit. instead',
+ ve.create_file,
+ src_file_path,
+ self.vault_secret)
+
+ def test_decrypt_file_exception(self):
+ self._test_dir = self._create_test_dir()
+ src_contents = to_bytes("some info in a file\nyup.")
+ src_file_path = self._create_file(self._test_dir, 'src_file', content=src_contents)
+
+ ve = self._vault_editor()
+ self.assertRaisesRegexp(errors.AnsibleError,
+ 'input is not vault encrypted data',
+ ve.decrypt_file,
+ src_file_path)
+
+ @patch.object(vault.VaultEditor, '_editor_shell_command')
+ def test_create_file(self, mock_editor_shell_command):
+
+ def sc_side_effect(filename):
+ return ['touch', filename]
+ mock_editor_shell_command.side_effect = sc_side_effect
+
+ tmp_file = tempfile.NamedTemporaryFile()
+ os.unlink(tmp_file.name)
+
+ _secrets = self._secrets('ansible')
+ ve = self._vault_editor(_secrets)
+ ve.create_file(tmp_file.name, vault.match_encrypt_secret(_secrets)[1])
+
+ self.assertTrue(os.path.exists(tmp_file.name))
+
+ def test_decrypt_1_1(self):
+ v11_file = tempfile.NamedTemporaryFile(delete=False)
+ with v11_file as f:
+ f.write(to_bytes(v11_data))
+
+ ve = self._vault_editor(self._secrets("ansible"))
+
+ # make sure the password functions for the cipher
+ error_hit = False
+ try:
+ ve.decrypt_file(v11_file.name)
+ except errors.AnsibleError:
+ error_hit = True
+
+ # verify decrypted content
+ f = open(v11_file.name, "rb")
+ fdata = to_text(f.read())
+ f.close()
+
+ os.unlink(v11_file.name)
+
+ assert error_hit is False, "error decrypting 1.1 file"
+ assert fdata.strip() == "foo", "incorrect decryption of 1.1 file: %s" % fdata.strip()
+
+ def test_real_path_dash(self):
+ filename = '-'
+ ve = self._vault_editor()
+
+ res = ve._real_path(filename)
+ self.assertEqual(res, '-')
+
+ def test_real_path_dev_null(self):
+ filename = '/dev/null'
+ ve = self._vault_editor()
+
+ res = ve._real_path(filename)
+ self.assertEqual(res, '/dev/null')
+
+ def test_real_path_symlink(self):
+ self._test_dir = os.path.realpath(self._create_test_dir())
+ file_path = self._create_file(self._test_dir, 'test_file', content=b'this is a test file')
+ file_link_path = os.path.join(self._test_dir, 'a_link_to_test_file')
+
+ os.symlink(file_path, file_link_path)
+
+ ve = self._vault_editor()
+
+ res = ve._real_path(file_link_path)
+ self.assertEqual(res, file_path)
+
+
+@pytest.mark.skipif(not vault.HAS_PYCRYPTO,
+ reason="Skipping pycrypto tests because pycrypto is not installed")
+class TestVaultEditorPyCrypto(unittest.TestCase):
+ def setUp(self):
+ self.has_cryptography = vault.HAS_CRYPTOGRAPHY
+ vault.HAS_CRYPTOGRAPHY = False
+ super(TestVaultEditorPyCrypto, self).setUp()
+
+ def tearDown(self):
+ vault.HAS_CRYPTOGRAPHY = self.has_cryptography
+ super(TestVaultEditorPyCrypto, self).tearDown()
diff --git a/test/units/parsing/yaml/__init__.py b/test/units/parsing/yaml/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/parsing/yaml/__init__.py
diff --git a/test/units/parsing/yaml/test_dumper.py b/test/units/parsing/yaml/test_dumper.py
new file mode 100644
index 00000000..8129ca3a
--- /dev/null
+++ b/test/units/parsing/yaml/test_dumper.py
@@ -0,0 +1,103 @@
+# coding: utf-8
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import io
+
+from units.compat import unittest
+from ansible.parsing import vault
+from ansible.parsing.yaml import dumper, objects
+from ansible.parsing.yaml.loader import AnsibleLoader
+from ansible.module_utils.six import PY2
+from ansible.utils.unsafe_proxy import AnsibleUnsafeText, AnsibleUnsafeBytes
+
+from units.mock.yaml_helper import YamlTestUtils
+from units.mock.vault_helper import TextVaultSecret
+
+
+class TestAnsibleDumper(unittest.TestCase, YamlTestUtils):
+ def setUp(self):
+ self.vault_password = "hunter42"
+ vault_secret = TextVaultSecret(self.vault_password)
+ self.vault_secrets = [('vault_secret', vault_secret)]
+ self.good_vault = vault.VaultLib(self.vault_secrets)
+ self.vault = self.good_vault
+ self.stream = self._build_stream()
+ self.dumper = dumper.AnsibleDumper
+
+ def _build_stream(self, yaml_text=None):
+ text = yaml_text or u''
+ stream = io.StringIO(text)
+ return stream
+
+ def _loader(self, stream):
+ return AnsibleLoader(stream, vault_secrets=self.vault.secrets)
+
+ def test_ansible_vault_encrypted_unicode(self):
+ plaintext = 'This is a string we are going to encrypt.'
+ avu = objects.AnsibleVaultEncryptedUnicode.from_plaintext(plaintext, vault=self.vault,
+ secret=vault.match_secrets(self.vault_secrets, ['vault_secret'])[0][1])
+
+ yaml_out = self._dump_string(avu, dumper=self.dumper)
+ stream = self._build_stream(yaml_out)
+ loader = self._loader(stream)
+
+ data_from_yaml = loader.get_single_data()
+
+ self.assertEqual(plaintext, data_from_yaml.data)
+
+ def test_bytes(self):
+ b_text = u'tréma'.encode('utf-8')
+ unsafe_object = AnsibleUnsafeBytes(b_text)
+ yaml_out = self._dump_string(unsafe_object, dumper=self.dumper)
+
+ stream = self._build_stream(yaml_out)
+ loader = self._loader(stream)
+
+ data_from_yaml = loader.get_single_data()
+
+ result = b_text
+ if PY2:
+ # https://pyyaml.org/wiki/PyYAMLDocumentation#string-conversion-python-2-only
+ # pyyaml on Python 2 can return either unicode or bytes when given byte strings.
+ # We normalize that to always return unicode on Python2 as that's right most of the
+ # time. However, this means byte strings can round trip through yaml on Python3 but
+ # not on Python2. To make this code work the same on Python2 and Python3 (we want
+ # the Python3 behaviour) we need to change the methods in Ansible to:
+ # (1) Let byte strings pass through yaml without being converted on Python2
+ # (2) Convert byte strings to text strings before being given to pyyaml (Without this,
+ # strings would end up as byte strings most of the time which would mostly be wrong)
+ # In practice, we mostly read bytes in from files and then pass that to pyyaml, for which
+ # the present behavior is correct.
+ # This is a workaround for the current behavior.
+ result = u'tr\xe9ma'
+
+ self.assertEqual(result, data_from_yaml)
+
+ def test_unicode(self):
+ u_text = u'nöel'
+ unsafe_object = AnsibleUnsafeText(u_text)
+ yaml_out = self._dump_string(unsafe_object, dumper=self.dumper)
+
+ stream = self._build_stream(yaml_out)
+ loader = self._loader(stream)
+
+ data_from_yaml = loader.get_single_data()
+
+ self.assertEqual(u_text, data_from_yaml)
diff --git a/test/units/parsing/yaml/test_loader.py b/test/units/parsing/yaml/test_loader.py
new file mode 100644
index 00000000..d6989f44
--- /dev/null
+++ b/test/units/parsing/yaml/test_loader.py
@@ -0,0 +1,436 @@
+# coding: utf-8
+# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from io import StringIO
+
+from units.compat import unittest
+
+from ansible import errors
+from ansible.module_utils.six import text_type, binary_type
+from ansible.module_utils.common._collections_compat import Sequence, Set, Mapping
+from ansible.parsing.yaml.loader import AnsibleLoader
+from ansible.parsing import vault
+from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
+from ansible.parsing.yaml.dumper import AnsibleDumper
+
+from units.mock.yaml_helper import YamlTestUtils
+from units.mock.vault_helper import TextVaultSecret
+
+try:
+ from _yaml import ParserError
+ from _yaml import ScannerError
+except ImportError:
+ from yaml.parser import ParserError
+ from yaml.scanner import ScannerError
+
+
+class NameStringIO(StringIO):
+ """In py2.6, StringIO doesn't let you set name because a baseclass has it
+ as readonly property"""
+ name = None
+
+ def __init__(self, *args, **kwargs):
+ super(NameStringIO, self).__init__(*args, **kwargs)
+
+
+class TestAnsibleLoaderBasic(unittest.TestCase):
+
+ def test_parse_number(self):
+ stream = StringIO(u"""
+ 1
+ """)
+ loader = AnsibleLoader(stream, 'myfile.yml')
+ data = loader.get_single_data()
+ self.assertEqual(data, 1)
+ # No line/column info saved yet
+
+ def test_parse_string(self):
+ stream = StringIO(u"""
+ Ansible
+ """)
+ loader = AnsibleLoader(stream, 'myfile.yml')
+ data = loader.get_single_data()
+ self.assertEqual(data, u'Ansible')
+ self.assertIsInstance(data, text_type)
+
+ self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17))
+
+ def test_parse_utf8_string(self):
+ stream = StringIO(u"""
+ Cafè Eñyei
+ """)
+ loader = AnsibleLoader(stream, 'myfile.yml')
+ data = loader.get_single_data()
+ self.assertEqual(data, u'Cafè Eñyei')
+ self.assertIsInstance(data, text_type)
+
+ self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17))
+
+ def test_parse_dict(self):
+ stream = StringIO(u"""
+ webster: daniel
+ oed: oxford
+ """)
+ loader = AnsibleLoader(stream, 'myfile.yml')
+ data = loader.get_single_data()
+ self.assertEqual(data, {'webster': 'daniel', 'oed': 'oxford'})
+ self.assertEqual(len(data), 2)
+ self.assertIsInstance(list(data.keys())[0], text_type)
+ self.assertIsInstance(list(data.values())[0], text_type)
+
+ # Beginning of the first key
+ self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17))
+
+ self.assertEqual(data[u'webster'].ansible_pos, ('myfile.yml', 2, 26))
+ self.assertEqual(data[u'oed'].ansible_pos, ('myfile.yml', 3, 22))
+
+ def test_parse_list(self):
+ stream = StringIO(u"""
+ - a
+ - b
+ """)
+ loader = AnsibleLoader(stream, 'myfile.yml')
+ data = loader.get_single_data()
+ self.assertEqual(data, [u'a', u'b'])
+ self.assertEqual(len(data), 2)
+ self.assertIsInstance(data[0], text_type)
+
+ self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17))
+
+ self.assertEqual(data[0].ansible_pos, ('myfile.yml', 2, 19))
+ self.assertEqual(data[1].ansible_pos, ('myfile.yml', 3, 19))
+
+ def test_parse_short_dict(self):
+ stream = StringIO(u"""{"foo": "bar"}""")
+ loader = AnsibleLoader(stream, 'myfile.yml')
+ data = loader.get_single_data()
+ self.assertEqual(data, dict(foo=u'bar'))
+
+ self.assertEqual(data.ansible_pos, ('myfile.yml', 1, 1))
+ self.assertEqual(data[u'foo'].ansible_pos, ('myfile.yml', 1, 9))
+
+ stream = StringIO(u"""foo: bar""")
+ loader = AnsibleLoader(stream, 'myfile.yml')
+ data = loader.get_single_data()
+ self.assertEqual(data, dict(foo=u'bar'))
+
+ self.assertEqual(data.ansible_pos, ('myfile.yml', 1, 1))
+ self.assertEqual(data[u'foo'].ansible_pos, ('myfile.yml', 1, 6))
+
+ def test_error_conditions(self):
+ stream = StringIO(u"""{""")
+ loader = AnsibleLoader(stream, 'myfile.yml')
+ self.assertRaises(ParserError, loader.get_single_data)
+
+ def test_tab_error(self):
+ stream = StringIO(u"""---\nhosts: localhost\nvars:\n foo: bar\n\tblip: baz""")
+ loader = AnsibleLoader(stream, 'myfile.yml')
+ self.assertRaises(ScannerError, loader.get_single_data)
+
+ def test_front_matter(self):
+ stream = StringIO(u"""---\nfoo: bar""")
+ loader = AnsibleLoader(stream, 'myfile.yml')
+ data = loader.get_single_data()
+ self.assertEqual(data, dict(foo=u'bar'))
+
+ self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 1))
+ self.assertEqual(data[u'foo'].ansible_pos, ('myfile.yml', 2, 6))
+
+ # Initial indent (See: #6348)
+ stream = StringIO(u""" - foo: bar\n baz: qux""")
+ loader = AnsibleLoader(stream, 'myfile.yml')
+ data = loader.get_single_data()
+ self.assertEqual(data, [{u'foo': u'bar', u'baz': u'qux'}])
+
+ self.assertEqual(data.ansible_pos, ('myfile.yml', 1, 2))
+ self.assertEqual(data[0].ansible_pos, ('myfile.yml', 1, 4))
+ self.assertEqual(data[0][u'foo'].ansible_pos, ('myfile.yml', 1, 9))
+ self.assertEqual(data[0][u'baz'].ansible_pos, ('myfile.yml', 2, 9))
+
+
+class TestAnsibleLoaderVault(unittest.TestCase, YamlTestUtils):
+ def setUp(self):
+ self.vault_password = "hunter42"
+ vault_secret = TextVaultSecret(self.vault_password)
+ self.vault_secrets = [('vault_secret', vault_secret),
+ ('default', vault_secret)]
+ self.vault = vault.VaultLib(self.vault_secrets)
+
+ @property
+ def vault_secret(self):
+ return vault.match_encrypt_secret(self.vault_secrets)[1]
+
+ def test_wrong_password(self):
+ plaintext = u"Ansible"
+ bob_password = "this is a different password"
+
+ bobs_secret = TextVaultSecret(bob_password)
+ bobs_secrets = [('default', bobs_secret)]
+
+ bobs_vault = vault.VaultLib(bobs_secrets)
+
+ ciphertext = bobs_vault.encrypt(plaintext, vault.match_encrypt_secret(bobs_secrets)[1])
+
+ try:
+ self.vault.decrypt(ciphertext)
+ except Exception as e:
+ self.assertIsInstance(e, errors.AnsibleError)
+ self.assertEqual(e.message, 'Decryption failed (no vault secrets were found that could decrypt)')
+
+ def _encrypt_plaintext(self, plaintext):
+ # Construct a yaml repr of a vault by hand
+ vaulted_var_bytes = self.vault.encrypt(plaintext, self.vault_secret)
+
+ # add yaml tag
+ vaulted_var = vaulted_var_bytes.decode()
+ lines = vaulted_var.splitlines()
+ lines2 = []
+ for line in lines:
+ lines2.append(' %s' % line)
+
+ vaulted_var = '\n'.join(lines2)
+ tagged_vaulted_var = u"""!vault |\n%s""" % vaulted_var
+ return tagged_vaulted_var
+
+ def _build_stream(self, yaml_text):
+ stream = NameStringIO(yaml_text)
+ stream.name = 'my.yml'
+ return stream
+
+ def _loader(self, stream):
+ return AnsibleLoader(stream, vault_secrets=self.vault.secrets)
+
+ def _load_yaml(self, yaml_text, password):
+ stream = self._build_stream(yaml_text)
+ loader = self._loader(stream)
+
+ data_from_yaml = loader.get_single_data()
+
+ return data_from_yaml
+
+ def test_dump_load_cycle(self):
+ avu = AnsibleVaultEncryptedUnicode.from_plaintext('The plaintext for test_dump_load_cycle.', self.vault, self.vault_secret)
+ self._dump_load_cycle(avu)
+
+ def test_embedded_vault_from_dump(self):
+ avu = AnsibleVaultEncryptedUnicode.from_plaintext('setec astronomy', self.vault, self.vault_secret)
+ blip = {'stuff1': [{'a dict key': 24},
+ {'shhh-ssh-secrets': avu,
+ 'nothing to see here': 'move along'}],
+ 'another key': 24.1}
+
+ blip = ['some string', 'another string', avu]
+ stream = NameStringIO()
+
+ self._dump_stream(blip, stream, dumper=AnsibleDumper)
+
+ stream.seek(0)
+
+ stream.seek(0)
+
+ loader = self._loader(stream)
+
+ data_from_yaml = loader.get_data()
+
+ stream2 = NameStringIO(u'')
+ # verify we can dump the object again
+ self._dump_stream(data_from_yaml, stream2, dumper=AnsibleDumper)
+
+ def test_embedded_vault(self):
+ plaintext_var = u"""This is the plaintext string."""
+ tagged_vaulted_var = self._encrypt_plaintext(plaintext_var)
+ another_vaulted_var = self._encrypt_plaintext(plaintext_var)
+
+ different_var = u"""A different string that is not the same as the first one."""
+ different_vaulted_var = self._encrypt_plaintext(different_var)
+
+ yaml_text = u"""---\nwebster: daniel\noed: oxford\nthe_secret: %s\nanother_secret: %s\ndifferent_secret: %s""" % (tagged_vaulted_var,
+ another_vaulted_var,
+ different_vaulted_var)
+
+ data_from_yaml = self._load_yaml(yaml_text, self.vault_password)
+ vault_string = data_from_yaml['the_secret']
+
+ self.assertEqual(plaintext_var, data_from_yaml['the_secret'])
+
+ test_dict = {}
+ test_dict[vault_string] = 'did this work?'
+
+ self.assertEqual(vault_string.data, vault_string)
+
+ # This looks weird and useless, but the object in question has a custom __eq__
+ self.assertEqual(vault_string, vault_string)
+
+ another_vault_string = data_from_yaml['another_secret']
+ different_vault_string = data_from_yaml['different_secret']
+
+ self.assertEqual(vault_string, another_vault_string)
+ self.assertNotEquals(vault_string, different_vault_string)
+
+ # More testing of __eq__/__ne__
+ self.assertTrue('some string' != vault_string)
+ self.assertNotEquals('some string', vault_string)
+
+ # Note this is a compare of the str/unicode of these, they are different types
+ # so we want to test self == other, and other == self etc
+ self.assertEqual(plaintext_var, vault_string)
+ self.assertEqual(vault_string, plaintext_var)
+ self.assertFalse(plaintext_var != vault_string)
+ self.assertFalse(vault_string != plaintext_var)
+
+
+class TestAnsibleLoaderPlay(unittest.TestCase):
+
+ def setUp(self):
+ stream = NameStringIO(u"""
+ - hosts: localhost
+ vars:
+ number: 1
+ string: Ansible
+ utf8_string: Cafè Eñyei
+ dictionary:
+ webster: daniel
+ oed: oxford
+ list:
+ - a
+ - b
+ - 1
+ - 2
+ tasks:
+ - name: Test case
+ ping:
+ data: "{{ utf8_string }}"
+
+ - name: Test 2
+ ping:
+ data: "Cafè Eñyei"
+
+ - name: Test 3
+ command: "printf 'Cafè Eñyei\\n'"
+ """)
+ self.play_filename = '/path/to/myplay.yml'
+ stream.name = self.play_filename
+ self.loader = AnsibleLoader(stream)
+ self.data = self.loader.get_single_data()
+
+ def tearDown(self):
+ pass
+
+ def test_data_complete(self):
+ self.assertEqual(len(self.data), 1)
+ self.assertIsInstance(self.data, list)
+ self.assertEqual(frozenset(self.data[0].keys()), frozenset((u'hosts', u'vars', u'tasks')))
+
+ self.assertEqual(self.data[0][u'hosts'], u'localhost')
+
+ self.assertEqual(self.data[0][u'vars'][u'number'], 1)
+ self.assertEqual(self.data[0][u'vars'][u'string'], u'Ansible')
+ self.assertEqual(self.data[0][u'vars'][u'utf8_string'], u'Cafè Eñyei')
+ self.assertEqual(self.data[0][u'vars'][u'dictionary'], {
+ u'webster': u'daniel',
+ u'oed': u'oxford'
+ })
+ self.assertEqual(self.data[0][u'vars'][u'list'], [u'a', u'b', 1, 2])
+
+ self.assertEqual(self.data[0][u'tasks'], [
+ {u'name': u'Test case', u'ping': {u'data': u'{{ utf8_string }}'}},
+ {u'name': u'Test 2', u'ping': {u'data': u'Cafè Eñyei'}},
+ {u'name': u'Test 3', u'command': u'printf \'Cafè Eñyei\n\''},
+ ])
+
+ def walk(self, data):
+ # Make sure there's no str in the data
+ self.assertNotIsInstance(data, binary_type)
+
+ # Descend into various container types
+ if isinstance(data, text_type):
+ # strings are a sequence so we have to be explicit here
+ return
+ elif isinstance(data, (Sequence, Set)):
+ for element in data:
+ self.walk(element)
+ elif isinstance(data, Mapping):
+ for k, v in data.items():
+ self.walk(k)
+ self.walk(v)
+
+ # Scalars were all checked so we're good to go
+ return
+
+ def test_no_str_in_data(self):
+ # Checks that no strings are str type
+ self.walk(self.data)
+
+ def check_vars(self):
+ # Numbers don't have line/col information yet
+ # self.assertEqual(self.data[0][u'vars'][u'number'].ansible_pos, (self.play_filename, 4, 21))
+
+ self.assertEqual(self.data[0][u'vars'][u'string'].ansible_pos, (self.play_filename, 5, 29))
+ self.assertEqual(self.data[0][u'vars'][u'utf8_string'].ansible_pos, (self.play_filename, 6, 34))
+
+ self.assertEqual(self.data[0][u'vars'][u'dictionary'].ansible_pos, (self.play_filename, 8, 23))
+ self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'webster'].ansible_pos, (self.play_filename, 8, 32))
+ self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'oed'].ansible_pos, (self.play_filename, 9, 28))
+
+ self.assertEqual(self.data[0][u'vars'][u'list'].ansible_pos, (self.play_filename, 11, 23))
+ self.assertEqual(self.data[0][u'vars'][u'list'][0].ansible_pos, (self.play_filename, 11, 25))
+ self.assertEqual(self.data[0][u'vars'][u'list'][1].ansible_pos, (self.play_filename, 12, 25))
+ # Numbers don't have line/col info yet
+ # self.assertEqual(self.data[0][u'vars'][u'list'][2].ansible_pos, (self.play_filename, 13, 25))
+ # self.assertEqual(self.data[0][u'vars'][u'list'][3].ansible_pos, (self.play_filename, 14, 25))
+
+ def check_tasks(self):
+ #
+ # First Task
+ #
+ self.assertEqual(self.data[0][u'tasks'][0].ansible_pos, (self.play_filename, 16, 23))
+ self.assertEqual(self.data[0][u'tasks'][0][u'name'].ansible_pos, (self.play_filename, 16, 29))
+ self.assertEqual(self.data[0][u'tasks'][0][u'ping'].ansible_pos, (self.play_filename, 18, 25))
+ self.assertEqual(self.data[0][u'tasks'][0][u'ping'][u'data'].ansible_pos, (self.play_filename, 18, 31))
+
+ #
+ # Second Task
+ #
+ self.assertEqual(self.data[0][u'tasks'][1].ansible_pos, (self.play_filename, 20, 23))
+ self.assertEqual(self.data[0][u'tasks'][1][u'name'].ansible_pos, (self.play_filename, 20, 29))
+ self.assertEqual(self.data[0][u'tasks'][1][u'ping'].ansible_pos, (self.play_filename, 22, 25))
+ self.assertEqual(self.data[0][u'tasks'][1][u'ping'][u'data'].ansible_pos, (self.play_filename, 22, 31))
+
+ #
+ # Third Task
+ #
+ self.assertEqual(self.data[0][u'tasks'][2].ansible_pos, (self.play_filename, 24, 23))
+ self.assertEqual(self.data[0][u'tasks'][2][u'name'].ansible_pos, (self.play_filename, 24, 29))
+ self.assertEqual(self.data[0][u'tasks'][2][u'command'].ansible_pos, (self.play_filename, 25, 32))
+
+ def test_line_numbers(self):
+ # Check the line/column numbers are correct
+ # Note: Remember, currently dicts begin at the start of their first entry
+ self.assertEqual(self.data[0].ansible_pos, (self.play_filename, 2, 19))
+ self.assertEqual(self.data[0][u'hosts'].ansible_pos, (self.play_filename, 2, 26))
+ self.assertEqual(self.data[0][u'vars'].ansible_pos, (self.play_filename, 4, 21))
+
+ self.check_vars()
+
+ self.assertEqual(self.data[0][u'tasks'].ansible_pos, (self.play_filename, 16, 21))
+
+ self.check_tasks()
diff --git a/test/units/parsing/yaml/test_objects.py b/test/units/parsing/yaml/test_objects.py
new file mode 100644
index 00000000..d4529eed
--- /dev/null
+++ b/test/units/parsing/yaml/test_objects.py
@@ -0,0 +1,164 @@
+# This file is part of Ansible
+# -*- coding: utf-8 -*-
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+# Copyright 2016, Adrian Likins <alikins@redhat.com>
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest
+
+from ansible.errors import AnsibleError
+
+from ansible.module_utils._text import to_native
+
+from ansible.parsing import vault
+from ansible.parsing.yaml.loader import AnsibleLoader
+
+# module under test
+from ansible.parsing.yaml import objects
+
+from units.mock.yaml_helper import YamlTestUtils
+from units.mock.vault_helper import TextVaultSecret
+
+
+class TestAnsibleVaultUnicodeNoVault(unittest.TestCase, YamlTestUtils):
+ def test_empty_init(self):
+ self.assertRaises(TypeError, objects.AnsibleVaultEncryptedUnicode)
+
+ def test_empty_string_init(self):
+ seq = ''.encode('utf8')
+ self.assert_values(seq)
+
+ def test_empty_byte_string_init(self):
+ seq = b''
+ self.assert_values(seq)
+
+ def _assert_values(self, avu, seq):
+ self.assertIsInstance(avu, objects.AnsibleVaultEncryptedUnicode)
+ self.assertTrue(avu.vault is None)
+ # AnsibleVaultEncryptedUnicode without a vault should never == any string
+ self.assertNotEquals(avu, seq)
+
+ def assert_values(self, seq):
+ avu = objects.AnsibleVaultEncryptedUnicode(seq)
+ self._assert_values(avu, seq)
+
+ def test_single_char(self):
+ seq = 'a'.encode('utf8')
+ self.assert_values(seq)
+
+ def test_string(self):
+ seq = 'some letters'
+ self.assert_values(seq)
+
+ def test_byte_string(self):
+ seq = 'some letters'.encode('utf8')
+ self.assert_values(seq)
+
+
+class TestAnsibleVaultEncryptedUnicode(unittest.TestCase, YamlTestUtils):
+ def setUp(self):
+ self.good_vault_password = "hunter42"
+ good_vault_secret = TextVaultSecret(self.good_vault_password)
+ self.good_vault_secrets = [('good_vault_password', good_vault_secret)]
+ self.good_vault = vault.VaultLib(self.good_vault_secrets)
+
+ # TODO: make this use two vault secret identities instead of two vaultSecrets
+ self.wrong_vault_password = 'not-hunter42'
+ wrong_vault_secret = TextVaultSecret(self.wrong_vault_password)
+ self.wrong_vault_secrets = [('wrong_vault_password', wrong_vault_secret)]
+ self.wrong_vault = vault.VaultLib(self.wrong_vault_secrets)
+
+ self.vault = self.good_vault
+ self.vault_secrets = self.good_vault_secrets
+
+ def _loader(self, stream):
+ return AnsibleLoader(stream, vault_secrets=self.vault_secrets)
+
+ def test_dump_load_cycle(self):
+ aveu = self._from_plaintext('the test string for TestAnsibleVaultEncryptedUnicode.test_dump_load_cycle')
+ self._dump_load_cycle(aveu)
+
+ def assert_values(self, avu, seq):
+ self.assertIsInstance(avu, objects.AnsibleVaultEncryptedUnicode)
+
+ self.assertEqual(avu, seq)
+ self.assertTrue(avu.vault is self.vault)
+ self.assertIsInstance(avu.vault, vault.VaultLib)
+
+ def _from_plaintext(self, seq):
+ id_secret = vault.match_encrypt_secret(self.good_vault_secrets)
+ return objects.AnsibleVaultEncryptedUnicode.from_plaintext(seq, vault=self.vault, secret=id_secret[1])
+
+ def _from_ciphertext(self, ciphertext):
+ avu = objects.AnsibleVaultEncryptedUnicode(ciphertext)
+ avu.vault = self.vault
+ return avu
+
+ def test_empty_init(self):
+ self.assertRaises(TypeError, objects.AnsibleVaultEncryptedUnicode)
+
+ def test_empty_string_init_from_plaintext(self):
+ seq = ''
+ avu = self._from_plaintext(seq)
+ self.assert_values(avu, seq)
+
+ def test_empty_unicode_init_from_plaintext(self):
+ seq = u''
+ avu = self._from_plaintext(seq)
+ self.assert_values(avu, seq)
+
+ def test_string_from_plaintext(self):
+ seq = 'some letters'
+ avu = self._from_plaintext(seq)
+ self.assert_values(avu, seq)
+
+ def test_unicode_from_plaintext(self):
+ seq = u'some letters'
+ avu = self._from_plaintext(seq)
+ self.assert_values(avu, seq)
+
+ def test_unicode_from_plaintext_encode(self):
+ seq = u'some text here'
+ avu = self._from_plaintext(seq)
+ b_avu = avu.encode('utf-8', 'strict')
+ self.assertIsInstance(avu, objects.AnsibleVaultEncryptedUnicode)
+ self.assertEqual(b_avu, seq.encode('utf-8', 'strict'))
+ self.assertTrue(avu.vault is self.vault)
+ self.assertIsInstance(avu.vault, vault.VaultLib)
+
+ # TODO/FIXME: make sure bad password fails differently than 'thats not encrypted'
+ def test_empty_string_wrong_password(self):
+ seq = ''
+ self.vault = self.wrong_vault
+ avu = self._from_plaintext(seq)
+
+ def compare(avu, seq):
+ return avu == seq
+
+ self.assertRaises(AnsibleError, compare, avu, seq)
+
+ def test_vaulted_utf8_value_37258(self):
+ seq = u"aöffü"
+ avu = self._from_plaintext(seq)
+ self.assert_values(avu, seq)
+
+ def test_str_vaulted_utf8_value_37258(self):
+ seq = u"aöffü"
+ avu = self._from_plaintext(seq)
+ assert str(avu) == to_native(seq)
diff --git a/test/units/playbook/__init__.py b/test/units/playbook/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/playbook/__init__.py
diff --git a/test/units/playbook/role/__init__.py b/test/units/playbook/role/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/playbook/role/__init__.py
diff --git a/test/units/playbook/role/test_include_role.py b/test/units/playbook/role/test_include_role.py
new file mode 100644
index 00000000..93e222c4
--- /dev/null
+++ b/test/units/playbook/role/test_include_role.py
@@ -0,0 +1,248 @@
+# (c) 2016, Daniel Miranda <danielkza2@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest
+from units.compat.mock import patch
+
+from ansible.playbook import Play
+from ansible.playbook.role_include import IncludeRole
+from ansible.playbook.task import Task
+from ansible.vars.manager import VariableManager
+
+from units.mock.loader import DictDataLoader
+from units.mock.path import mock_unfrackpath_noop
+
+
+class TestIncludeRole(unittest.TestCase):
+
+ def setUp(self):
+
+ self.loader = DictDataLoader({
+ '/etc/ansible/roles/l1/tasks/main.yml': """
+ - shell: echo 'hello world from l1'
+ - include_role: name=l2
+ """,
+ '/etc/ansible/roles/l1/tasks/alt.yml': """
+ - shell: echo 'hello world from l1 alt'
+ - include_role: name=l2 tasks_from=alt defaults_from=alt
+ """,
+ '/etc/ansible/roles/l1/defaults/main.yml': """
+ test_variable: l1-main
+ l1_variable: l1-main
+ """,
+ '/etc/ansible/roles/l1/defaults/alt.yml': """
+ test_variable: l1-alt
+ l1_variable: l1-alt
+ """,
+ '/etc/ansible/roles/l2/tasks/main.yml': """
+ - shell: echo 'hello world from l2'
+ - include_role: name=l3
+ """,
+ '/etc/ansible/roles/l2/tasks/alt.yml': """
+ - shell: echo 'hello world from l2 alt'
+ - include_role: name=l3 tasks_from=alt defaults_from=alt
+ """,
+ '/etc/ansible/roles/l2/defaults/main.yml': """
+ test_variable: l2-main
+ l2_variable: l2-main
+ """,
+ '/etc/ansible/roles/l2/defaults/alt.yml': """
+ test_variable: l2-alt
+ l2_variable: l2-alt
+ """,
+ '/etc/ansible/roles/l3/tasks/main.yml': """
+ - shell: echo 'hello world from l3'
+ """,
+ '/etc/ansible/roles/l3/tasks/alt.yml': """
+ - shell: echo 'hello world from l3 alt'
+ """,
+ '/etc/ansible/roles/l3/defaults/main.yml': """
+ test_variable: l3-main
+ l3_variable: l3-main
+ """,
+ '/etc/ansible/roles/l3/defaults/alt.yml': """
+ test_variable: l3-alt
+ l3_variable: l3-alt
+ """
+ })
+
+ self.var_manager = VariableManager(loader=self.loader)
+
+ def tearDown(self):
+ pass
+
+ def flatten_tasks(self, tasks):
+ for task in tasks:
+ if isinstance(task, IncludeRole):
+ blocks, handlers = task.get_block_list(loader=self.loader)
+ for block in blocks:
+ for t in self.flatten_tasks(block.block):
+ yield t
+ elif isinstance(task, Task):
+ yield task
+ else:
+ for t in self.flatten_tasks(task.block):
+ yield t
+
+ def get_tasks_vars(self, play, tasks):
+ for task in self.flatten_tasks(tasks):
+ role = task._role
+ if not role:
+ continue
+
+ yield (role.get_name(),
+ self.var_manager.get_vars(play=play, task=task))
+
+ @patch('ansible.playbook.role.definition.unfrackpath',
+ mock_unfrackpath_noop)
+ def test_simple(self):
+
+ """Test one-level include with default tasks and variables"""
+
+ play = Play.load(dict(
+ name="test play",
+ hosts=['foo'],
+ gather_facts=False,
+ tasks=[
+ {'include_role': 'name=l3'}
+ ]
+ ), loader=self.loader, variable_manager=self.var_manager)
+
+ tasks = play.compile()
+ tested = False
+ for role, task_vars in self.get_tasks_vars(play, tasks):
+ tested = True
+ self.assertEqual(task_vars.get('l3_variable'), 'l3-main')
+ self.assertEqual(task_vars.get('test_variable'), 'l3-main')
+ self.assertTrue(tested)
+
+ @patch('ansible.playbook.role.definition.unfrackpath',
+ mock_unfrackpath_noop)
+ def test_simple_alt_files(self):
+
+ """Test one-level include with alternative tasks and variables"""
+
+ play = Play.load(dict(
+ name="test play",
+ hosts=['foo'],
+ gather_facts=False,
+ tasks=[{'include_role': 'name=l3 tasks_from=alt defaults_from=alt'}]),
+ loader=self.loader, variable_manager=self.var_manager)
+
+ tasks = play.compile()
+ tested = False
+ for role, task_vars in self.get_tasks_vars(play, tasks):
+ tested = True
+ self.assertEqual(task_vars.get('l3_variable'), 'l3-alt')
+ self.assertEqual(task_vars.get('test_variable'), 'l3-alt')
+ self.assertTrue(tested)
+
+ @patch('ansible.playbook.role.definition.unfrackpath',
+ mock_unfrackpath_noop)
+ def test_nested(self):
+
+ """
+ Test nested includes with default tasks and variables.
+
+ Variables from outer roles should be inherited, but overridden in inner
+ roles.
+ """
+
+ play = Play.load(dict(
+ name="test play",
+ hosts=['foo'],
+ gather_facts=False,
+ tasks=[
+ {'include_role': 'name=l1'}
+ ]
+ ), loader=self.loader, variable_manager=self.var_manager)
+
+ tasks = play.compile()
+ expected_roles = ['l1', 'l2', 'l3']
+ for role, task_vars in self.get_tasks_vars(play, tasks):
+ expected_roles.remove(role)
+ # Outer-most role must not have variables from inner roles yet
+ if role == 'l1':
+ self.assertEqual(task_vars.get('l1_variable'), 'l1-main')
+ self.assertEqual(task_vars.get('l2_variable'), None)
+ self.assertEqual(task_vars.get('l3_variable'), None)
+ self.assertEqual(task_vars.get('test_variable'), 'l1-main')
+ # Middle role must have variables from outer role, but not inner
+ elif role == 'l2':
+ self.assertEqual(task_vars.get('l1_variable'), 'l1-main')
+ self.assertEqual(task_vars.get('l2_variable'), 'l2-main')
+ self.assertEqual(task_vars.get('l3_variable'), None)
+ self.assertEqual(task_vars.get('test_variable'), 'l2-main')
+ # Inner role must have variables from both outer roles
+ elif role == 'l3':
+ self.assertEqual(task_vars.get('l1_variable'), 'l1-main')
+ self.assertEqual(task_vars.get('l2_variable'), 'l2-main')
+ self.assertEqual(task_vars.get('l3_variable'), 'l3-main')
+ self.assertEqual(task_vars.get('test_variable'), 'l3-main')
+ else:
+ self.fail()
+ self.assertFalse(expected_roles)
+
+ @patch('ansible.playbook.role.definition.unfrackpath',
+ mock_unfrackpath_noop)
+ def test_nested_alt_files(self):
+
+ """
+ Test nested includes with alternative tasks and variables.
+
+ Variables from outer roles should be inherited, but overridden in inner
+ roles.
+ """
+
+ play = Play.load(dict(
+ name="test play",
+ hosts=['foo'],
+ gather_facts=False,
+ tasks=[
+ {'include_role': 'name=l1 tasks_from=alt defaults_from=alt'}
+ ]
+ ), loader=self.loader, variable_manager=self.var_manager)
+
+ tasks = play.compile()
+ expected_roles = ['l1', 'l2', 'l3']
+ for role, task_vars in self.get_tasks_vars(play, tasks):
+ expected_roles.remove(role)
+ # Outer-most role must not have variables from inner roles yet
+ if role == 'l1':
+ self.assertEqual(task_vars.get('l1_variable'), 'l1-alt')
+ self.assertEqual(task_vars.get('l2_variable'), None)
+ self.assertEqual(task_vars.get('l3_variable'), None)
+ self.assertEqual(task_vars.get('test_variable'), 'l1-alt')
+ # Middle role must have variables from outer role, but not inner
+ elif role == 'l2':
+ self.assertEqual(task_vars.get('l1_variable'), 'l1-alt')
+ self.assertEqual(task_vars.get('l2_variable'), 'l2-alt')
+ self.assertEqual(task_vars.get('l3_variable'), None)
+ self.assertEqual(task_vars.get('test_variable'), 'l2-alt')
+ # Inner role must have variables from both outer roles
+ elif role == 'l3':
+ self.assertEqual(task_vars.get('l1_variable'), 'l1-alt')
+ self.assertEqual(task_vars.get('l2_variable'), 'l2-alt')
+ self.assertEqual(task_vars.get('l3_variable'), 'l3-alt')
+ self.assertEqual(task_vars.get('test_variable'), 'l3-alt')
+ else:
+ self.fail()
+ self.assertFalse(expected_roles)
diff --git a/test/units/playbook/role/test_role.py b/test/units/playbook/role/test_role.py
new file mode 100644
index 00000000..3aa30b8b
--- /dev/null
+++ b/test/units/playbook/role/test_role.py
@@ -0,0 +1,422 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest
+from units.compat.mock import patch, MagicMock
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.module_utils.common._collections_compat import Container
+from ansible.playbook.block import Block
+
+from units.mock.loader import DictDataLoader
+from units.mock.path import mock_unfrackpath_noop
+
+from ansible.playbook.role import Role
+from ansible.playbook.role.include import RoleInclude
+from ansible.playbook.role import hash_params
+
+
+class TestHashParams(unittest.TestCase):
+ def test(self):
+ params = {'foo': 'bar'}
+ res = hash_params(params)
+ self._assert_set(res)
+ self._assert_hashable(res)
+
+ def _assert_hashable(self, res):
+ a_dict = {}
+ try:
+ a_dict[res] = res
+ except TypeError as e:
+ self.fail('%s is not hashable: %s' % (res, e))
+
+ def _assert_set(self, res):
+ self.assertIsInstance(res, frozenset)
+
+ def test_dict_tuple(self):
+ params = {'foo': (1, 'bar',)}
+ res = hash_params(params)
+ self._assert_set(res)
+
+ def test_tuple(self):
+ params = (1, None, 'foo')
+ res = hash_params(params)
+ self._assert_hashable(res)
+
+ def test_tuple_dict(self):
+ params = ({'foo': 'bar'}, 37)
+ res = hash_params(params)
+ self._assert_hashable(res)
+
+ def test_list(self):
+ params = ['foo', 'bar', 1, 37, None]
+ res = hash_params(params)
+ self._assert_set(res)
+ self._assert_hashable(res)
+
+ def test_dict_with_list_value(self):
+ params = {'foo': [1, 4, 'bar']}
+ res = hash_params(params)
+ self._assert_set(res)
+ self._assert_hashable(res)
+
+ def test_empty_set(self):
+ params = set([])
+ res = hash_params(params)
+ self._assert_hashable(res)
+ self._assert_set(res)
+
+ def test_generator(self):
+ def my_generator():
+ for i in ['a', 1, None, {}]:
+ yield i
+
+ params = my_generator()
+ res = hash_params(params)
+ self._assert_hashable(res)
+
+ def test_container_but_not_iterable(self):
+ # This is a Container that is not iterable, which is unlikely but...
+ class MyContainer(Container):
+ def __init__(self, some_thing):
+ self.data = []
+ self.data.append(some_thing)
+
+ def __contains__(self, item):
+ return item in self.data
+
+ def __hash__(self):
+ return hash(self.data)
+
+ def __len__(self):
+ return len(self.data)
+
+ def __call__(self):
+ return False
+
+ foo = MyContainer('foo bar')
+ params = foo
+
+ self.assertRaises(TypeError, hash_params, params)
+
+ def test_param_dict_dupe_values(self):
+ params1 = {'foo': False}
+ params2 = {'bar': False}
+
+ res1 = hash_params(params1)
+ res2 = hash_params(params2)
+
+ hash1 = hash(res1)
+ hash2 = hash(res2)
+ self.assertNotEqual(res1, res2)
+ self.assertNotEqual(hash1, hash2)
+
+ def test_param_dupe(self):
+ params1 = {
+ # 'from_files': {},
+ 'tags': [],
+ u'testvalue': False,
+ u'testvalue2': True,
+ # 'when': []
+ }
+ params2 = {
+ # 'from_files': {},
+ 'tags': [],
+ u'testvalue': True,
+ u'testvalue2': False,
+ # 'when': []
+ }
+ res1 = hash_params(params1)
+ res2 = hash_params(params2)
+
+ self.assertNotEqual(hash(res1), hash(res2))
+ self.assertNotEqual(res1, res2)
+
+ foo = {}
+ foo[res1] = 'params1'
+ foo[res2] = 'params2'
+
+ self.assertEqual(len(foo), 2)
+
+ del foo[res2]
+ self.assertEqual(len(foo), 1)
+
+ for key in foo:
+ self.assertTrue(key in foo)
+ self.assertIn(key, foo)
+
+
+class TestRole(unittest.TestCase):
+
+ @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
+ def test_load_role_with_tasks(self):
+
+ fake_loader = DictDataLoader({
+ "/etc/ansible/roles/foo_tasks/tasks/main.yml": """
+ - shell: echo 'hello world'
+ """,
+ })
+
+ mock_play = MagicMock()
+ mock_play.ROLE_CACHE = {}
+
+ i = RoleInclude.load('foo_tasks', play=mock_play, loader=fake_loader)
+ r = Role.load(i, play=mock_play)
+
+ self.assertEqual(str(r), 'foo_tasks')
+ self.assertEqual(len(r._task_blocks), 1)
+ assert isinstance(r._task_blocks[0], Block)
+
+ @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
+ def test_load_role_with_tasks_dir_vs_file(self):
+
+ fake_loader = DictDataLoader({
+ "/etc/ansible/roles/foo_tasks/tasks/custom_main/foo.yml": """
+ - command: bar
+ """,
+ "/etc/ansible/roles/foo_tasks/tasks/custom_main.yml": """
+ - command: baz
+ """,
+ })
+
+ mock_play = MagicMock()
+ mock_play.ROLE_CACHE = {}
+
+ i = RoleInclude.load('foo_tasks', play=mock_play, loader=fake_loader)
+ r = Role.load(i, play=mock_play, from_files=dict(tasks='custom_main'))
+
+ self.assertEqual(r._task_blocks[0]._ds[0]['command'], 'baz')
+
+ @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
+ def test_load_role_with_handlers(self):
+
+ fake_loader = DictDataLoader({
+ "/etc/ansible/roles/foo_handlers/handlers/main.yml": """
+ - name: test handler
+ shell: echo 'hello world'
+ """,
+ })
+
+ mock_play = MagicMock()
+ mock_play.ROLE_CACHE = {}
+
+ i = RoleInclude.load('foo_handlers', play=mock_play, loader=fake_loader)
+ r = Role.load(i, play=mock_play)
+
+ self.assertEqual(len(r._handler_blocks), 1)
+ assert isinstance(r._handler_blocks[0], Block)
+
+ @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
+ def test_load_role_with_vars(self):
+
+ fake_loader = DictDataLoader({
+ "/etc/ansible/roles/foo_vars/defaults/main.yml": """
+ foo: bar
+ """,
+ "/etc/ansible/roles/foo_vars/vars/main.yml": """
+ foo: bam
+ """,
+ })
+
+ mock_play = MagicMock()
+ mock_play.ROLE_CACHE = {}
+
+ i = RoleInclude.load('foo_vars', play=mock_play, loader=fake_loader)
+ r = Role.load(i, play=mock_play)
+
+ self.assertEqual(r._default_vars, dict(foo='bar'))
+ self.assertEqual(r._role_vars, dict(foo='bam'))
+
+ @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
+ def test_load_role_with_vars_dirs(self):
+
+ fake_loader = DictDataLoader({
+ "/etc/ansible/roles/foo_vars/defaults/main/foo.yml": """
+ foo: bar
+ """,
+ "/etc/ansible/roles/foo_vars/vars/main/bar.yml": """
+ foo: bam
+ """,
+ })
+
+ mock_play = MagicMock()
+ mock_play.ROLE_CACHE = {}
+
+ i = RoleInclude.load('foo_vars', play=mock_play, loader=fake_loader)
+ r = Role.load(i, play=mock_play)
+
+ self.assertEqual(r._default_vars, dict(foo='bar'))
+ self.assertEqual(r._role_vars, dict(foo='bam'))
+
+ @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
+ def test_load_role_with_vars_nested_dirs(self):
+
+ fake_loader = DictDataLoader({
+ "/etc/ansible/roles/foo_vars/defaults/main/foo/bar.yml": """
+ foo: bar
+ """,
+ "/etc/ansible/roles/foo_vars/vars/main/bar/foo.yml": """
+ foo: bam
+ """,
+ })
+
+ mock_play = MagicMock()
+ mock_play.ROLE_CACHE = {}
+
+ i = RoleInclude.load('foo_vars', play=mock_play, loader=fake_loader)
+ r = Role.load(i, play=mock_play)
+
+ self.assertEqual(r._default_vars, dict(foo='bar'))
+ self.assertEqual(r._role_vars, dict(foo='bam'))
+
+ @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
+ def test_load_role_with_vars_nested_dirs_combined(self):
+
+ fake_loader = DictDataLoader({
+ "/etc/ansible/roles/foo_vars/defaults/main/foo/bar.yml": """
+ foo: bar
+ a: 1
+ """,
+ "/etc/ansible/roles/foo_vars/defaults/main/bar/foo.yml": """
+ foo: bam
+ b: 2
+ """,
+ })
+
+ mock_play = MagicMock()
+ mock_play.ROLE_CACHE = {}
+
+ i = RoleInclude.load('foo_vars', play=mock_play, loader=fake_loader)
+ r = Role.load(i, play=mock_play)
+
+ self.assertEqual(r._default_vars, dict(foo='bar', a=1, b=2))
+
+ @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
+ def test_load_role_with_vars_dir_vs_file(self):
+
+ fake_loader = DictDataLoader({
+ "/etc/ansible/roles/foo_vars/vars/main/foo.yml": """
+ foo: bar
+ """,
+ "/etc/ansible/roles/foo_vars/vars/main.yml": """
+ foo: bam
+ """,
+ })
+
+ mock_play = MagicMock()
+ mock_play.ROLE_CACHE = {}
+
+ i = RoleInclude.load('foo_vars', play=mock_play, loader=fake_loader)
+ r = Role.load(i, play=mock_play)
+
+ self.assertEqual(r._role_vars, dict(foo='bam'))
+
+ @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
+ def test_load_role_with_metadata(self):
+
+ fake_loader = DictDataLoader({
+ '/etc/ansible/roles/foo_metadata/meta/main.yml': """
+ allow_duplicates: true
+ dependencies:
+ - bar_metadata
+ galaxy_info:
+ a: 1
+ b: 2
+ c: 3
+ """,
+ '/etc/ansible/roles/bar_metadata/meta/main.yml': """
+ dependencies:
+ - baz_metadata
+ """,
+ '/etc/ansible/roles/baz_metadata/meta/main.yml': """
+ dependencies:
+ - bam_metadata
+ """,
+ '/etc/ansible/roles/bam_metadata/meta/main.yml': """
+ dependencies: []
+ """,
+ '/etc/ansible/roles/bad1_metadata/meta/main.yml': """
+ 1
+ """,
+ '/etc/ansible/roles/bad2_metadata/meta/main.yml': """
+ foo: bar
+ """,
+ '/etc/ansible/roles/recursive1_metadata/meta/main.yml': """
+ dependencies: ['recursive2_metadata']
+ """,
+ '/etc/ansible/roles/recursive2_metadata/meta/main.yml': """
+ dependencies: ['recursive1_metadata']
+ """,
+ })
+
+ mock_play = MagicMock()
+ mock_play.collections = None
+ mock_play.ROLE_CACHE = {}
+
+ i = RoleInclude.load('foo_metadata', play=mock_play, loader=fake_loader)
+ r = Role.load(i, play=mock_play)
+
+ role_deps = r.get_direct_dependencies()
+
+ self.assertEqual(len(role_deps), 1)
+ self.assertEqual(type(role_deps[0]), Role)
+ self.assertEqual(len(role_deps[0].get_parents()), 1)
+ self.assertEqual(role_deps[0].get_parents()[0], r)
+ self.assertEqual(r._metadata.allow_duplicates, True)
+ self.assertEqual(r._metadata.galaxy_info, dict(a=1, b=2, c=3))
+
+ all_deps = r.get_all_dependencies()
+ self.assertEqual(len(all_deps), 3)
+ self.assertEqual(all_deps[0].get_name(), 'bam_metadata')
+ self.assertEqual(all_deps[1].get_name(), 'baz_metadata')
+ self.assertEqual(all_deps[2].get_name(), 'bar_metadata')
+
+ i = RoleInclude.load('bad1_metadata', play=mock_play, loader=fake_loader)
+ self.assertRaises(AnsibleParserError, Role.load, i, play=mock_play)
+
+ i = RoleInclude.load('bad2_metadata', play=mock_play, loader=fake_loader)
+ self.assertRaises(AnsibleParserError, Role.load, i, play=mock_play)
+
+ # TODO: re-enable this test once Ansible has proper role dep cycle detection
+ # that doesn't rely on stack overflows being recoverable (as they aren't in Py3.7+)
+ # see https://github.com/ansible/ansible/issues/61527
+ # i = RoleInclude.load('recursive1_metadata', play=mock_play, loader=fake_loader)
+ # self.assertRaises(AnsibleError, Role.load, i, play=mock_play)
+
+ @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
+ def test_load_role_complex(self):
+
+ # FIXME: add tests for the more complex uses of
+ # params and tags/when statements
+
+ fake_loader = DictDataLoader({
+ "/etc/ansible/roles/foo_complex/tasks/main.yml": """
+ - shell: echo 'hello world'
+ """,
+ })
+
+ mock_play = MagicMock()
+ mock_play.ROLE_CACHE = {}
+
+ i = RoleInclude.load(dict(role='foo_complex'), play=mock_play, loader=fake_loader)
+ r = Role.load(i, play=mock_play)
+
+ self.assertEqual(r.get_name(), "foo_complex")
diff --git a/test/units/playbook/test_attribute.py b/test/units/playbook/test_attribute.py
new file mode 100644
index 00000000..bdb37c11
--- /dev/null
+++ b/test/units/playbook/test_attribute.py
@@ -0,0 +1,57 @@
+# (c) 2015, Marius Gedminas <marius@gedmin.as>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest
+from ansible.playbook.attribute import Attribute
+
+
+class TestAttribute(unittest.TestCase):
+
+ def setUp(self):
+ self.one = Attribute(priority=100)
+ self.two = Attribute(priority=0)
+
+ def test_eq(self):
+ self.assertTrue(self.one == self.one)
+ self.assertFalse(self.one == self.two)
+
+ def test_ne(self):
+ self.assertFalse(self.one != self.one)
+ self.assertTrue(self.one != self.two)
+
+ def test_lt(self):
+ self.assertFalse(self.one < self.one)
+ self.assertTrue(self.one < self.two)
+ self.assertFalse(self.two < self.one)
+
+ def test_gt(self):
+ self.assertFalse(self.one > self.one)
+ self.assertFalse(self.one > self.two)
+ self.assertTrue(self.two > self.one)
+
+ def test_le(self):
+ self.assertTrue(self.one <= self.one)
+ self.assertTrue(self.one <= self.two)
+ self.assertFalse(self.two <= self.one)
+
+ def test_ge(self):
+ self.assertTrue(self.one >= self.one)
+ self.assertFalse(self.one >= self.two)
+ self.assertTrue(self.two >= self.one)
diff --git a/test/units/playbook/test_base.py b/test/units/playbook/test_base.py
new file mode 100644
index 00000000..648200af
--- /dev/null
+++ b/test/units/playbook/test_base.py
@@ -0,0 +1,630 @@
+# (c) 2016, Adrian Likins <alikins@redhat.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest
+
+from ansible.errors import AnsibleParserError
+from ansible.module_utils.six import string_types
+from ansible.playbook.attribute import FieldAttribute
+from ansible.template import Templar
+from ansible.playbook import base
+from ansible.utils.unsafe_proxy import AnsibleUnsafeBytes, AnsibleUnsafeText
+
+from units.mock.loader import DictDataLoader
+
+
+class TestBase(unittest.TestCase):
+ ClassUnderTest = base.Base
+
+ def setUp(self):
+ self.assorted_vars = {'var_2_key': 'var_2_value',
+ 'var_1_key': 'var_1_value',
+ 'a_list': ['a_list_1', 'a_list_2'],
+ 'a_dict': {'a_dict_key': 'a_dict_value'},
+ 'a_set': set(['set_1', 'set_2']),
+ 'a_int': 42,
+ 'a_float': 37.371,
+ 'a_bool': True,
+ 'a_none': None,
+ }
+ self.b = self.ClassUnderTest()
+
+ def _base_validate(self, ds):
+ bsc = self.ClassUnderTest()
+ parent = ExampleParentBaseSubClass()
+ bsc._parent = parent
+ bsc._dep_chain = [parent]
+ parent._dep_chain = None
+ bsc.load_data(ds)
+ fake_loader = DictDataLoader({})
+ templar = Templar(loader=fake_loader)
+ bsc.post_validate(templar)
+ return bsc
+
+ def test(self):
+ self.assertIsInstance(self.b, base.Base)
+ self.assertIsInstance(self.b, self.ClassUnderTest)
+
+ # dump me doesnt return anything or change anything so not much to assert
+ def test_dump_me_empty(self):
+ self.b.dump_me()
+
+ def test_dump_me(self):
+ ds = {'environment': [],
+ 'vars': {'var_2_key': 'var_2_value',
+ 'var_1_key': 'var_1_value'}}
+ b = self._base_validate(ds)
+ b.dump_me()
+
+ def _assert_copy(self, orig, copy):
+ self.assertIsInstance(copy, self.ClassUnderTest)
+ self.assertIsInstance(copy, base.Base)
+ self.assertEqual(len(orig._valid_attrs), len(copy._valid_attrs))
+
+ sentinel = 'Empty DS'
+ self.assertEqual(getattr(orig, '_ds', sentinel), getattr(copy, '_ds', sentinel))
+
+ def test_copy_empty(self):
+ copy = self.b.copy()
+ self._assert_copy(self.b, copy)
+
+ def test_copy_with_vars(self):
+ ds = {'vars': self.assorted_vars}
+ b = self._base_validate(ds)
+
+ copy = b.copy()
+ self._assert_copy(b, copy)
+
+ def test_serialize(self):
+ ds = {}
+ ds = {'environment': [],
+ 'vars': self.assorted_vars
+ }
+ b = self._base_validate(ds)
+ ret = b.serialize()
+ self.assertIsInstance(ret, dict)
+
+ def test_deserialize(self):
+ data = {}
+
+ d = self.ClassUnderTest()
+ d.deserialize(data)
+ self.assertIn('run_once', d._attributes)
+ self.assertIn('check_mode', d._attributes)
+
+ data = {'no_log': False,
+ 'remote_user': None,
+ 'vars': self.assorted_vars,
+ 'environment': [],
+ 'run_once': False,
+ 'connection': None,
+ 'ignore_errors': False,
+ 'port': 22,
+ 'a_sentinel_with_an_unlikely_name': ['sure, a list']}
+
+ d = self.ClassUnderTest()
+ d.deserialize(data)
+ self.assertNotIn('a_sentinel_with_an_unlikely_name', d._attributes)
+ self.assertIn('run_once', d._attributes)
+ self.assertIn('check_mode', d._attributes)
+
+ def test_serialize_then_deserialize(self):
+ ds = {'environment': [],
+ 'vars': self.assorted_vars}
+ b = self._base_validate(ds)
+ copy = b.copy()
+ ret = b.serialize()
+ b.deserialize(ret)
+ c = self.ClassUnderTest()
+ c.deserialize(ret)
+ # TODO: not a great test, but coverage...
+ self.maxDiff = None
+ self.assertDictEqual(b.serialize(), copy.serialize())
+ self.assertDictEqual(c.serialize(), copy.serialize())
+
+ def test_post_validate_empty(self):
+ fake_loader = DictDataLoader({})
+ templar = Templar(loader=fake_loader)
+ ret = self.b.post_validate(templar)
+ self.assertIsNone(ret)
+
+ def test_get_ds_none(self):
+ ds = self.b.get_ds()
+ self.assertIsNone(ds)
+
+ def test_load_data_ds_is_none(self):
+ self.assertRaises(AssertionError, self.b.load_data, None)
+
+ def test_load_data_invalid_attr(self):
+ ds = {'not_a_valid_attr': [],
+ 'other': None}
+
+ self.assertRaises(AnsibleParserError, self.b.load_data, ds)
+
+ def test_load_data_invalid_attr_type(self):
+ ds = {'environment': True}
+
+ # environment is supposed to be a list. This
+ # seems like it shouldn't work?
+ ret = self.b.load_data(ds)
+ self.assertEqual(True, ret._attributes['environment'])
+
+ def test_post_validate(self):
+ ds = {'environment': [],
+ 'port': 443}
+ b = self._base_validate(ds)
+ self.assertEqual(b.port, 443)
+ self.assertEqual(b.environment, [])
+
+ def test_post_validate_invalid_attr_types(self):
+ ds = {'environment': [],
+ 'port': 'some_port'}
+ b = self._base_validate(ds)
+ self.assertEqual(b.port, 'some_port')
+
+ def test_squash(self):
+ data = self.b.serialize()
+ self.b.squash()
+ squashed_data = self.b.serialize()
+ # TODO: assert something
+ self.assertFalse(data['squashed'])
+ self.assertTrue(squashed_data['squashed'])
+
+ def test_vars(self):
+ # vars as a dict.
+ ds = {'environment': [],
+ 'vars': {'var_2_key': 'var_2_value',
+ 'var_1_key': 'var_1_value'}}
+ b = self._base_validate(ds)
+ self.assertEqual(b.vars['var_1_key'], 'var_1_value')
+
+ def test_vars_list_of_dicts(self):
+ ds = {'environment': [],
+ 'vars': [{'var_2_key': 'var_2_value'},
+ {'var_1_key': 'var_1_value'}]
+ }
+ b = self._base_validate(ds)
+ self.assertEqual(b.vars['var_1_key'], 'var_1_value')
+
+ def test_vars_not_dict_or_list(self):
+ ds = {'environment': [],
+ 'vars': 'I am a string, not a dict or a list of dicts'}
+ self.assertRaises(AnsibleParserError, self.b.load_data, ds)
+
+ def test_vars_not_valid_identifier(self):
+ ds = {'environment': [],
+ 'vars': [{'var_2_key': 'var_2_value'},
+ {'1an-invalid identifer': 'var_1_value'}]
+ }
+ self.assertRaises(AnsibleParserError, self.b.load_data, ds)
+
+ def test_vars_is_list_but_not_of_dicts(self):
+ ds = {'environment': [],
+ 'vars': ['foo', 'bar', 'this is a string not a dict']
+ }
+ self.assertRaises(AnsibleParserError, self.b.load_data, ds)
+
+ def test_vars_is_none(self):
+ # If vars is None, we should get a empty dict back
+ ds = {'environment': [],
+ 'vars': None
+ }
+ b = self._base_validate(ds)
+ self.assertEqual(b.vars, {})
+
+ def test_validate_empty(self):
+ self.b.validate()
+ self.assertTrue(self.b._validated)
+
+ def test_getters(self):
+ # not sure why these exist, but here are tests anyway
+ loader = self.b.get_loader()
+ variable_manager = self.b.get_variable_manager()
+ self.assertEqual(loader, self.b._loader)
+ self.assertEqual(variable_manager, self.b._variable_manager)
+
+
+class TestExtendValue(unittest.TestCase):
+ # _extend_value could be a module or staticmethod but since its
+ # not, the test is here.
+ def test_extend_value_list_newlist(self):
+ b = base.Base()
+ value_list = ['first', 'second']
+ new_value_list = ['new_first', 'new_second']
+ ret = b._extend_value(value_list, new_value_list)
+ self.assertEqual(value_list + new_value_list, ret)
+
+ def test_extend_value_list_newlist_prepend(self):
+ b = base.Base()
+ value_list = ['first', 'second']
+ new_value_list = ['new_first', 'new_second']
+ ret_prepend = b._extend_value(value_list, new_value_list, prepend=True)
+ self.assertEqual(new_value_list + value_list, ret_prepend)
+
+ def test_extend_value_newlist_list(self):
+ b = base.Base()
+ value_list = ['first', 'second']
+ new_value_list = ['new_first', 'new_second']
+ ret = b._extend_value(new_value_list, value_list)
+ self.assertEqual(new_value_list + value_list, ret)
+
+ def test_extend_value_newlist_list_prepend(self):
+ b = base.Base()
+ value_list = ['first', 'second']
+ new_value_list = ['new_first', 'new_second']
+ ret = b._extend_value(new_value_list, value_list, prepend=True)
+ self.assertEqual(value_list + new_value_list, ret)
+
+ def test_extend_value_string_newlist(self):
+ b = base.Base()
+ some_string = 'some string'
+ new_value_list = ['new_first', 'new_second']
+ ret = b._extend_value(some_string, new_value_list)
+ self.assertEqual([some_string] + new_value_list, ret)
+
+ def test_extend_value_string_newstring(self):
+ b = base.Base()
+ some_string = 'some string'
+ new_value_string = 'this is the new values'
+ ret = b._extend_value(some_string, new_value_string)
+ self.assertEqual([some_string, new_value_string], ret)
+
+ def test_extend_value_list_newstring(self):
+ b = base.Base()
+ value_list = ['first', 'second']
+ new_value_string = 'this is the new values'
+ ret = b._extend_value(value_list, new_value_string)
+ self.assertEqual(value_list + [new_value_string], ret)
+
+ def test_extend_value_none_none(self):
+ b = base.Base()
+ ret = b._extend_value(None, None)
+ self.assertEqual(len(ret), 0)
+ self.assertFalse(ret)
+
+ def test_extend_value_none_list(self):
+ b = base.Base()
+ ret = b._extend_value(None, ['foo'])
+ self.assertEqual(ret, ['foo'])
+
+
+class ExampleException(Exception):
+ pass
+
+
+# naming fails me...
+class ExampleParentBaseSubClass(base.Base):
+ _test_attr_parent_string = FieldAttribute(isa='string', default='A string attr for a class that may be a parent for testing')
+
+ def __init__(self):
+
+ super(ExampleParentBaseSubClass, self).__init__()
+ self._dep_chain = None
+
+ def get_dep_chain(self):
+ return self._dep_chain
+
+
+class ExampleSubClass(base.Base):
+ _test_attr_blip = FieldAttribute(isa='string', default='example sub class test_attr_blip',
+ inherit=False,
+ always_post_validate=True)
+
+ def __init__(self):
+ super(ExampleSubClass, self).__init__()
+
+ def get_dep_chain(self):
+ if self._parent:
+ return self._parent.get_dep_chain()
+ else:
+ return None
+
+
+class BaseSubClass(base.Base):
+ _name = FieldAttribute(isa='string', default='', always_post_validate=True)
+ _test_attr_bool = FieldAttribute(isa='bool', always_post_validate=True)
+ _test_attr_int = FieldAttribute(isa='int', always_post_validate=True)
+ _test_attr_float = FieldAttribute(isa='float', default=3.14159, always_post_validate=True)
+ _test_attr_list = FieldAttribute(isa='list', listof=string_types, always_post_validate=True)
+ _test_attr_list_no_listof = FieldAttribute(isa='list', always_post_validate=True)
+ _test_attr_list_required = FieldAttribute(isa='list', listof=string_types, required=True,
+ default=list, always_post_validate=True)
+ _test_attr_string = FieldAttribute(isa='string', default='the_test_attr_string_default_value')
+ _test_attr_string_required = FieldAttribute(isa='string', required=True,
+ default='the_test_attr_string_default_value')
+ _test_attr_percent = FieldAttribute(isa='percent', always_post_validate=True)
+ _test_attr_set = FieldAttribute(isa='set', default=set, always_post_validate=True)
+ _test_attr_dict = FieldAttribute(isa='dict', default=lambda: {'a_key': 'a_value'}, always_post_validate=True)
+ _test_attr_class = FieldAttribute(isa='class', class_type=ExampleSubClass)
+ _test_attr_class_post_validate = FieldAttribute(isa='class', class_type=ExampleSubClass,
+ always_post_validate=True)
+ _test_attr_unknown_isa = FieldAttribute(isa='not_a_real_isa', always_post_validate=True)
+ _test_attr_example = FieldAttribute(isa='string', default='the_default',
+ always_post_validate=True)
+ _test_attr_none = FieldAttribute(isa='string', always_post_validate=True)
+ _test_attr_preprocess = FieldAttribute(isa='string', default='the default for preprocess')
+ _test_attr_method = FieldAttribute(isa='string', default='some attr with a getter',
+ always_post_validate=True)
+ _test_attr_method_missing = FieldAttribute(isa='string', default='some attr with a missing getter',
+ always_post_validate=True)
+
+ def _get_attr_test_attr_method(self):
+ return 'foo bar'
+
+ def _validate_test_attr_example(self, attr, name, value):
+ if not isinstance(value, str):
+ raise ExampleException('_test_attr_example is not a string: %s type=%s' % (value, type(value)))
+
+ def _post_validate_test_attr_example(self, attr, value, templar):
+ after_template_value = templar.template(value)
+ return after_template_value
+
+ def _post_validate_test_attr_none(self, attr, value, templar):
+ return None
+
+ def _get_parent_attribute(self, attr, extend=False, prepend=False):
+ value = None
+ try:
+ value = self._attributes[attr]
+ if self._parent and (value is None or extend):
+ parent_value = getattr(self._parent, attr, None)
+ if extend:
+ value = self._extend_value(value, parent_value, prepend)
+ else:
+ value = parent_value
+ except KeyError:
+ pass
+
+ return value
+
+
+# terrible name, but it is a TestBase subclass for testing subclasses of Base
+class TestBaseSubClass(TestBase):
+ ClassUnderTest = BaseSubClass
+
+ def _base_validate(self, ds):
+ ds['test_attr_list_required'] = []
+ return super(TestBaseSubClass, self)._base_validate(ds)
+
+ def test_attr_bool(self):
+ ds = {'test_attr_bool': True}
+ bsc = self._base_validate(ds)
+ self.assertEqual(bsc.test_attr_bool, True)
+
+ def test_attr_int(self):
+ MOST_RANDOM_NUMBER = 37
+ ds = {'test_attr_int': MOST_RANDOM_NUMBER}
+ bsc = self._base_validate(ds)
+ self.assertEqual(bsc.test_attr_int, MOST_RANDOM_NUMBER)
+
+ def test_attr_int_del(self):
+ MOST_RANDOM_NUMBER = 37
+ ds = {'test_attr_int': MOST_RANDOM_NUMBER}
+ bsc = self._base_validate(ds)
+ del bsc.test_attr_int
+ self.assertNotIn('test_attr_int', bsc._attributes)
+
+ def test_attr_float(self):
+ roughly_pi = 4.0
+ ds = {'test_attr_float': roughly_pi}
+ bsc = self._base_validate(ds)
+ self.assertEqual(bsc.test_attr_float, roughly_pi)
+
+ def test_attr_percent(self):
+ percentage = '90%'
+ percentage_float = 90.0
+ ds = {'test_attr_percent': percentage}
+ bsc = self._base_validate(ds)
+ self.assertEqual(bsc.test_attr_percent, percentage_float)
+
+ # This method works hard and gives it its all and everything it's got. It doesn't
+ # leave anything on the field. It deserves to pass. It has earned it.
+ def test_attr_percent_110_percent(self):
+ percentage = '110.11%'
+ percentage_float = 110.11
+ ds = {'test_attr_percent': percentage}
+ bsc = self._base_validate(ds)
+ self.assertEqual(bsc.test_attr_percent, percentage_float)
+
+ # This method is just here for the paycheck.
+ def test_attr_percent_60_no_percent_sign(self):
+ percentage = '60'
+ percentage_float = 60.0
+ ds = {'test_attr_percent': percentage}
+ bsc = self._base_validate(ds)
+ self.assertEqual(bsc.test_attr_percent, percentage_float)
+
+ def test_attr_set(self):
+ test_set = set(['first_string_in_set', 'second_string_in_set'])
+ ds = {'test_attr_set': test_set}
+ bsc = self._base_validate(ds)
+ self.assertEqual(bsc.test_attr_set, test_set)
+
+ def test_attr_set_string(self):
+ test_data = ['something', 'other']
+ test_value = ','.join(test_data)
+ ds = {'test_attr_set': test_value}
+ bsc = self._base_validate(ds)
+ self.assertEqual(bsc.test_attr_set, set(test_data))
+
+ def test_attr_set_not_string_or_list(self):
+ test_value = 37.1
+ ds = {'test_attr_set': test_value}
+ bsc = self._base_validate(ds)
+ self.assertEqual(bsc.test_attr_set, set([test_value]))
+
+ def test_attr_dict(self):
+ test_dict = {'a_different_key': 'a_different_value'}
+ ds = {'test_attr_dict': test_dict}
+ bsc = self._base_validate(ds)
+ self.assertEqual(bsc.test_attr_dict, test_dict)
+
+ def test_attr_dict_string(self):
+ test_value = 'just_some_random_string'
+ ds = {'test_attr_dict': test_value}
+ self.assertRaisesRegexp(AnsibleParserError, 'is not a dictionary', self._base_validate, ds)
+
+ def test_attr_class(self):
+ esc = ExampleSubClass()
+ ds = {'test_attr_class': esc}
+ bsc = self._base_validate(ds)
+ self.assertIs(bsc.test_attr_class, esc)
+
+ def test_attr_class_wrong_type(self):
+ not_a_esc = ExampleSubClass
+ ds = {'test_attr_class': not_a_esc}
+ bsc = self._base_validate(ds)
+ self.assertIs(bsc.test_attr_class, not_a_esc)
+
+ def test_attr_class_post_validate(self):
+ esc = ExampleSubClass()
+ ds = {'test_attr_class_post_validate': esc}
+ bsc = self._base_validate(ds)
+ self.assertIs(bsc.test_attr_class_post_validate, esc)
+
+ def test_attr_class_post_validate_class_not_instance(self):
+ not_a_esc = ExampleSubClass
+ ds = {'test_attr_class_post_validate': not_a_esc}
+ self.assertRaisesRegexp(AnsibleParserError, 'is not a valid.*got a.*Meta.*instead',
+ self._base_validate, ds)
+
+ def test_attr_class_post_validate_wrong_class(self):
+ not_a_esc = 37
+ ds = {'test_attr_class_post_validate': not_a_esc}
+ self.assertRaisesRegexp(AnsibleParserError, 'is not a valid.*got a.*int.*instead',
+ self._base_validate, ds)
+
+ def test_attr_remote_user(self):
+ ds = {'remote_user': 'testuser'}
+ bsc = self._base_validate(ds)
+ # TODO: attemp to verify we called parent gettters etc
+ self.assertEqual(bsc.remote_user, 'testuser')
+
+ def test_attr_example_undefined(self):
+ ds = {'test_attr_example': '{{ some_var_that_shouldnt_exist_to_test_omit }}'}
+ exc_regex_str = 'test_attr_example.*has an invalid value, which includes an undefined variable.*some_var_that_shouldnt*'
+ self.assertRaises(AnsibleParserError)
+
+ def test_attr_name_undefined(self):
+ ds = {'name': '{{ some_var_that_shouldnt_exist_to_test_omit }}'}
+ bsc = self._base_validate(ds)
+ # the attribute 'name' is special cases in post_validate
+ self.assertEqual(bsc.name, '{{ some_var_that_shouldnt_exist_to_test_omit }}')
+
+ def test_subclass_validate_method(self):
+ ds = {'test_attr_list': ['string_list_item_1', 'string_list_item_2'],
+ 'test_attr_example': 'the_test_attr_example_value_string'}
+ # Not throwing an exception here is the test
+ bsc = self._base_validate(ds)
+ self.assertEqual(bsc.test_attr_example, 'the_test_attr_example_value_string')
+
+ def test_subclass_validate_method_invalid(self):
+ ds = {'test_attr_example': [None]}
+ self.assertRaises(ExampleException, self._base_validate, ds)
+
+ def test_attr_none(self):
+ ds = {'test_attr_none': 'foo'}
+ bsc = self._base_validate(ds)
+ self.assertEqual(bsc.test_attr_none, None)
+
+ def test_attr_string(self):
+ the_string_value = "the new test_attr_string_value"
+ ds = {'test_attr_string': the_string_value}
+ bsc = self._base_validate(ds)
+ self.assertEqual(bsc.test_attr_string, the_string_value)
+
+ def test_attr_string_invalid_list(self):
+ ds = {'test_attr_string': ['The new test_attr_string', 'value, however in a list']}
+ self.assertRaises(AnsibleParserError, self._base_validate, ds)
+
+ def test_attr_string_required(self):
+ the_string_value = "the new test_attr_string_required_value"
+ ds = {'test_attr_string_required': the_string_value}
+ bsc = self._base_validate(ds)
+ self.assertEqual(bsc.test_attr_string_required, the_string_value)
+
+ def test_attr_list_invalid(self):
+ ds = {'test_attr_list': {}}
+ self.assertRaises(AnsibleParserError, self._base_validate, ds)
+
+ def test_attr_list(self):
+ string_list = ['foo', 'bar']
+ ds = {'test_attr_list': string_list}
+ bsc = self._base_validate(ds)
+ self.assertEqual(string_list, bsc._attributes['test_attr_list'])
+
+ def test_attr_list_none(self):
+ ds = {'test_attr_list': None}
+ bsc = self._base_validate(ds)
+ self.assertEqual(None, bsc._attributes['test_attr_list'])
+
+ def test_attr_list_no_listof(self):
+ test_list = ['foo', 'bar', 123]
+ ds = {'test_attr_list_no_listof': test_list}
+ bsc = self._base_validate(ds)
+ self.assertEqual(test_list, bsc._attributes['test_attr_list_no_listof'])
+
+ def test_attr_list_required(self):
+ string_list = ['foo', 'bar']
+ ds = {'test_attr_list_required': string_list}
+ bsc = self.ClassUnderTest()
+ bsc.load_data(ds)
+ fake_loader = DictDataLoader({})
+ templar = Templar(loader=fake_loader)
+ bsc.post_validate(templar)
+ self.assertEqual(string_list, bsc._attributes['test_attr_list_required'])
+
+ def test_attr_list_required_empty_string(self):
+ string_list = [""]
+ ds = {'test_attr_list_required': string_list}
+ bsc = self.ClassUnderTest()
+ bsc.load_data(ds)
+ fake_loader = DictDataLoader({})
+ templar = Templar(loader=fake_loader)
+ self.assertRaisesRegexp(AnsibleParserError, 'cannot have empty values',
+ bsc.post_validate, templar)
+
+ def test_attr_unknown(self):
+ a_list = ['some string']
+ ds = {'test_attr_unknown_isa': a_list}
+ bsc = self._base_validate(ds)
+ self.assertEqual(bsc.test_attr_unknown_isa, a_list)
+
+ def test_attr_method(self):
+ ds = {'test_attr_method': 'value from the ds'}
+ bsc = self._base_validate(ds)
+ # The value returned by the subclasses _get_attr_test_attr_method
+ self.assertEqual(bsc.test_attr_method, 'foo bar')
+
+ def test_attr_method_missing(self):
+ a_string = 'The value set from the ds'
+ ds = {'test_attr_method_missing': a_string}
+ bsc = self._base_validate(ds)
+ self.assertEqual(bsc.test_attr_method_missing, a_string)
+
+ def test_get_validated_value_string_rewrap_unsafe(self):
+ attribute = FieldAttribute(isa='string')
+ value = AnsibleUnsafeText(u'bar')
+ templar = Templar(None)
+ bsc = self.ClassUnderTest()
+ result = bsc.get_validated_value('foo', attribute, value, templar)
+ self.assertIsInstance(result, AnsibleUnsafeText)
+ self.assertEqual(result, AnsibleUnsafeText(u'bar'))
diff --git a/test/units/playbook/test_block.py b/test/units/playbook/test_block.py
new file mode 100644
index 00000000..48471237
--- /dev/null
+++ b/test/units/playbook/test_block.py
@@ -0,0 +1,82 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest
+from ansible.playbook.block import Block
+from ansible.playbook.task import Task
+
+
+class TestBlock(unittest.TestCase):
+
+ def test_construct_empty_block(self):
+ b = Block()
+
+ def test_construct_block_with_role(self):
+ pass
+
+ def test_load_block_simple(self):
+ ds = dict(
+ block=[],
+ rescue=[],
+ always=[],
+ # otherwise=[],
+ )
+ b = Block.load(ds)
+ self.assertEqual(b.block, [])
+ self.assertEqual(b.rescue, [])
+ self.assertEqual(b.always, [])
+ # not currently used
+ # self.assertEqual(b.otherwise, [])
+
+ def test_load_block_with_tasks(self):
+ ds = dict(
+ block=[dict(action='block')],
+ rescue=[dict(action='rescue')],
+ always=[dict(action='always')],
+ # otherwise=[dict(action='otherwise')],
+ )
+ b = Block.load(ds)
+ self.assertEqual(len(b.block), 1)
+ self.assertIsInstance(b.block[0], Task)
+ self.assertEqual(len(b.rescue), 1)
+ self.assertIsInstance(b.rescue[0], Task)
+ self.assertEqual(len(b.always), 1)
+ self.assertIsInstance(b.always[0], Task)
+ # not currently used
+ # self.assertEqual(len(b.otherwise), 1)
+ # self.assertIsInstance(b.otherwise[0], Task)
+
+ def test_load_implicit_block(self):
+ ds = [dict(action='foo')]
+ b = Block.load(ds)
+ self.assertEqual(len(b.block), 1)
+ self.assertIsInstance(b.block[0], Task)
+
+ def test_deserialize(self):
+ ds = dict(
+ block=[dict(action='block')],
+ rescue=[dict(action='rescue')],
+ always=[dict(action='always')],
+ )
+ b = Block.load(ds)
+ data = dict(parent=ds, parent_type='Block')
+ b.deserialize(data)
+ self.assertIsInstance(b._parent, Block)
diff --git a/test/units/playbook/test_collectionsearch.py b/test/units/playbook/test_collectionsearch.py
new file mode 100644
index 00000000..be40d85e
--- /dev/null
+++ b/test/units/playbook/test_collectionsearch.py
@@ -0,0 +1,78 @@
+# (c) 2020 Ansible Project
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleParserError
+from ansible.playbook.play import Play
+from ansible.playbook.task import Task
+from ansible.playbook.block import Block
+from ansible.playbook.collectionsearch import CollectionSearch
+
+import pytest
+
+
+def test_collection_static_warning(capsys):
+ """Test that collection name is not templated.
+
+ Also, make sure that users see the warning message for the referenced name.
+ """
+ collection_name = "foo.{{bar}}"
+ p = Play.load(dict(
+ name="test play",
+ hosts=['foo'],
+ gather_facts=False,
+ connection='local',
+ collections=collection_name,
+ ))
+ assert collection_name in p.collections
+ std_out, std_err = capsys.readouterr()
+ assert '[WARNING]: "collections" is not templatable, but we found: %s' % collection_name in std_err
+ assert '' == std_out
+
+
+def test_collection_invalid_data_play():
+ """Test that collection as a dict at the play level fails with parser error"""
+ collection_name = {'name': 'foo'}
+ with pytest.raises(AnsibleParserError):
+ Play.load(dict(
+ name="test play",
+ hosts=['foo'],
+ gather_facts=False,
+ connection='local',
+ collections=collection_name,
+ ))
+
+
+def test_collection_invalid_data_task():
+ """Test that collection as a dict at the task level fails with parser error"""
+ collection_name = {'name': 'foo'}
+ with pytest.raises(AnsibleParserError):
+ Task.load(dict(
+ name="test task",
+ collections=collection_name,
+ ))
+
+
+def test_collection_invalid_data_block():
+ """Test that collection as a dict at the block level fails with parser error"""
+ collection_name = {'name': 'foo'}
+ with pytest.raises(AnsibleParserError):
+ Block.load(dict(
+ block=[dict(name="test task", collections=collection_name)]
+ ))
diff --git a/test/units/playbook/test_conditional.py b/test/units/playbook/test_conditional.py
new file mode 100644
index 00000000..ce351484
--- /dev/null
+++ b/test/units/playbook/test_conditional.py
@@ -0,0 +1,240 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest
+from units.mock.loader import DictDataLoader
+from units.compat.mock import MagicMock
+
+from ansible.plugins.strategy import SharedPluginLoaderObj
+from ansible.template import Templar
+from ansible import errors
+
+from ansible.playbook import conditional
+
+
+class TestConditional(unittest.TestCase):
+ def setUp(self):
+ self.loader = DictDataLoader({})
+ self.cond = conditional.Conditional(loader=self.loader)
+ self.shared_loader = SharedPluginLoaderObj()
+ self.templar = Templar(loader=self.loader, variables={})
+
+ def _eval_con(self, when=None, variables=None):
+ when = when or []
+ variables = variables or {}
+ self.cond.when = when
+ ret = self.cond.evaluate_conditional(self.templar, variables)
+ return ret
+
+ def test_false(self):
+ when = [u"False"]
+ ret = self._eval_con(when, {})
+ self.assertFalse(ret)
+
+ def test_true(self):
+ when = [u"True"]
+ ret = self._eval_con(when, {})
+ self.assertTrue(ret)
+
+ def test_true_boolean(self):
+ self.cond.when = [True]
+ m = MagicMock()
+ ret = self.cond.evaluate_conditional(m, {})
+ self.assertTrue(ret)
+ self.assertFalse(m.is_template.called)
+
+ def test_false_boolean(self):
+ self.cond.when = [False]
+ m = MagicMock()
+ ret = self.cond.evaluate_conditional(m, {})
+ self.assertFalse(ret)
+ self.assertFalse(m.is_template.called)
+
+ def test_undefined(self):
+ when = [u"{{ some_undefined_thing }}"]
+ self.assertRaisesRegexp(errors.AnsibleError, "The conditional check '{{ some_undefined_thing }}' failed",
+ self._eval_con, when, {})
+
+ def test_defined(self):
+ variables = {'some_defined_thing': True}
+ when = [u"{{ some_defined_thing }}"]
+ ret = self._eval_con(when, variables)
+ self.assertTrue(ret)
+
+ def test_dict_defined_values(self):
+ variables = {'dict_value': 1,
+ 'some_defined_dict': {'key1': 'value1',
+ 'key2': '{{ dict_value }}'}}
+
+ when = [u"some_defined_dict"]
+ ret = self._eval_con(when, variables)
+ self.assertTrue(ret)
+
+ def test_dict_defined_values_is_defined(self):
+ variables = {'dict_value': 1,
+ 'some_defined_dict': {'key1': 'value1',
+ 'key2': '{{ dict_value }}'}}
+
+ when = [u"some_defined_dict.key1 is defined"]
+ ret = self._eval_con(when, variables)
+ self.assertTrue(ret)
+
+ def test_dict_defined_multiple_values_is_defined(self):
+ variables = {'dict_value': 1,
+ 'some_defined_dict': {'key1': 'value1',
+ 'key2': '{{ dict_value }}'}}
+
+ when = [u"some_defined_dict.key1 is defined",
+ u"some_defined_dict.key2 is not undefined"]
+ ret = self._eval_con(when, variables)
+ self.assertTrue(ret)
+
+ def test_dict_undefined_values(self):
+ variables = {'dict_value': 1,
+ 'some_defined_dict_with_undefined_values': {'key1': 'value1',
+ 'key2': '{{ dict_value }}',
+ 'key3': '{{ undefined_dict_value }}'
+ }}
+
+ when = [u"some_defined_dict_with_undefined_values is defined"]
+ self.assertRaisesRegexp(errors.AnsibleError,
+ "The conditional check 'some_defined_dict_with_undefined_values is defined' failed.",
+ self._eval_con,
+ when, variables)
+
+ def test_nested_hostvars_undefined_values(self):
+ variables = {'dict_value': 1,
+ 'hostvars': {'host1': {'key1': 'value1',
+ 'key2': '{{ dict_value }}'},
+ 'host2': '{{ dict_value }}',
+ 'host3': '{{ undefined_dict_value }}',
+ # no host4
+ },
+ 'some_dict': {'some_dict_key1': '{{ hostvars["host3"] }}'}
+ }
+
+ when = [u"some_dict.some_dict_key1 == hostvars['host3']"]
+ # self._eval_con(when, variables)
+ self.assertRaisesRegexp(errors.AnsibleError,
+ r"The conditional check 'some_dict.some_dict_key1 == hostvars\['host3'\]' failed",
+ # "The conditional check 'some_dict.some_dict_key1 == hostvars['host3']' failed",
+ # "The conditional check 'some_dict.some_dict_key1 == hostvars['host3']' failed.",
+ self._eval_con,
+ when, variables)
+
+ def test_dict_undefined_values_bare(self):
+ variables = {'dict_value': 1,
+ 'some_defined_dict_with_undefined_values': {'key1': 'value1',
+ 'key2': '{{ dict_value }}',
+ 'key3': '{{ undefined_dict_value }}'
+ }}
+
+ # raises an exception when a non-string conditional is passed to extract_defined_undefined()
+ when = [u"some_defined_dict_with_undefined_values"]
+ self.assertRaisesRegexp(errors.AnsibleError,
+ "The conditional check 'some_defined_dict_with_undefined_values' failed.",
+ self._eval_con,
+ when, variables)
+
+ def test_dict_undefined_values_is_defined(self):
+ variables = {'dict_value': 1,
+ 'some_defined_dict_with_undefined_values': {'key1': 'value1',
+ 'key2': '{{ dict_value }}',
+ 'key3': '{{ undefined_dict_value }}'
+ }}
+
+ when = [u"some_defined_dict_with_undefined_values is defined"]
+ self.assertRaisesRegexp(errors.AnsibleError,
+ "The conditional check 'some_defined_dict_with_undefined_values is defined' failed.",
+ self._eval_con,
+ when, variables)
+
+ def test_is_defined(self):
+ variables = {'some_defined_thing': True}
+ when = [u"some_defined_thing is defined"]
+ ret = self._eval_con(when, variables)
+ self.assertTrue(ret)
+
+ def test_is_undefined(self):
+ variables = {'some_defined_thing': True}
+ when = [u"some_defined_thing is undefined"]
+ ret = self._eval_con(when, variables)
+ self.assertFalse(ret)
+
+ def test_is_undefined_and_defined(self):
+ variables = {'some_defined_thing': True}
+ when = [u"some_defined_thing is undefined", u"some_defined_thing is defined"]
+ ret = self._eval_con(when, variables)
+ self.assertFalse(ret)
+
+ def test_is_undefined_and_defined_reversed(self):
+ variables = {'some_defined_thing': True}
+ when = [u"some_defined_thing is defined", u"some_defined_thing is undefined"]
+ ret = self._eval_con(when, variables)
+ self.assertFalse(ret)
+
+ def test_is_not_undefined(self):
+ variables = {'some_defined_thing': True}
+ when = [u"some_defined_thing is not undefined"]
+ ret = self._eval_con(when, variables)
+ self.assertTrue(ret)
+
+ def test_is_not_defined(self):
+ variables = {'some_defined_thing': True}
+ when = [u"some_undefined_thing is not defined"]
+ ret = self._eval_con(when, variables)
+ self.assertTrue(ret)
+
+ def test_is_hostvars_quotes_is_defined(self):
+ variables = {'hostvars': {'some_host': {}},
+ 'compare_targets_single': "hostvars['some_host']",
+ 'compare_targets_double': 'hostvars["some_host"]',
+ 'compare_targets': {'double': '{{ compare_targets_double }}',
+ 'single': "{{ compare_targets_single }}"},
+ }
+ when = [u"hostvars['some_host'] is defined",
+ u'hostvars["some_host"] is defined',
+ u"{{ compare_targets.double }} is defined",
+ u"{{ compare_targets.single }} is defined"]
+ ret = self._eval_con(when, variables)
+ self.assertTrue(ret)
+
+ def test_is_hostvars_quotes_is_defined_but_is_not_defined(self):
+ variables = {'hostvars': {'some_host': {}},
+ 'compare_targets_single': "hostvars['some_host']",
+ 'compare_targets_double': 'hostvars["some_host"]',
+ 'compare_targets': {'double': '{{ compare_targets_double }}',
+ 'single': "{{ compare_targets_single }}"},
+ }
+ when = [u"hostvars['some_host'] is defined",
+ u'hostvars["some_host"] is defined',
+ u"{{ compare_targets.triple }} is defined",
+ u"{{ compare_targets.quadruple }} is defined"]
+ self.assertRaisesRegexp(errors.AnsibleError,
+ "The conditional check '{{ compare_targets.triple }} is defined' failed",
+ self._eval_con,
+ when, variables)
+
+ def test_is_hostvars_host_is_defined(self):
+ variables = {'hostvars': {'some_host': {}, }}
+ when = [u"hostvars['some_host'] is defined"]
+ ret = self._eval_con(when, variables)
+ self.assertTrue(ret)
+
+ def test_is_hostvars_host_undefined_is_defined(self):
+ variables = {'hostvars': {'some_host': {}, }}
+ when = [u"hostvars['some_undefined_host'] is defined"]
+ ret = self._eval_con(when, variables)
+ self.assertFalse(ret)
+
+ def test_is_hostvars_host_undefined_is_undefined(self):
+ variables = {'hostvars': {'some_host': {}, }}
+ when = [u"hostvars['some_undefined_host'] is undefined"]
+ ret = self._eval_con(when, variables)
+ self.assertTrue(ret)
+
+ def test_is_hostvars_host_undefined_is_not_defined(self):
+ variables = {'hostvars': {'some_host': {}, }}
+ when = [u"hostvars['some_undefined_host'] is not defined"]
+ ret = self._eval_con(when, variables)
+ self.assertTrue(ret)
diff --git a/test/units/playbook/test_helpers.py b/test/units/playbook/test_helpers.py
new file mode 100644
index 00000000..a4ed6178
--- /dev/null
+++ b/test/units/playbook/test_helpers.py
@@ -0,0 +1,405 @@
+# (c) 2016, Adrian Likins <alikins@redhat.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from units.compat import unittest
+from units.compat.mock import MagicMock
+from units.mock.loader import DictDataLoader
+
+from ansible import errors
+from ansible.playbook.block import Block
+from ansible.playbook.handler import Handler
+from ansible.playbook.task import Task
+from ansible.playbook.task_include import TaskInclude
+from ansible.playbook.role.include import RoleInclude
+
+from ansible.playbook import helpers
+
+
+class MixinForMocks(object):
+ def _setup(self):
+ # This is not a very good mixin, lots of side effects
+ self.fake_loader = DictDataLoader({'include_test.yml': "",
+ 'other_include_test.yml': ""})
+ self.mock_tqm = MagicMock(name='MockTaskQueueManager')
+
+ self.mock_play = MagicMock(name='MockPlay')
+ self.mock_play._attributes = []
+ self.mock_play.collections = None
+
+ self.mock_iterator = MagicMock(name='MockIterator')
+ self.mock_iterator._play = self.mock_play
+
+ self.mock_inventory = MagicMock(name='MockInventory')
+ self.mock_inventory._hosts_cache = dict()
+
+ def _get_host(host_name):
+ return None
+
+ self.mock_inventory.get_host.side_effect = _get_host
+ # TODO: can we use a real VariableManager?
+ self.mock_variable_manager = MagicMock(name='MockVariableManager')
+ self.mock_variable_manager.get_vars.return_value = dict()
+
+ self.mock_block = MagicMock(name='MockBlock')
+
+ # On macOS /etc is actually /private/etc, tests fail when performing literal /etc checks
+ self.fake_role_loader = DictDataLoader({os.path.join(os.path.realpath("/etc"), "ansible/roles/bogus_role/tasks/main.yml"): """
+ - shell: echo 'hello world'
+ """})
+
+ self._test_data_path = os.path.dirname(__file__)
+ self.fake_include_loader = DictDataLoader({"/dev/null/includes/test_include.yml": """
+ - include: other_test_include.yml
+ - shell: echo 'hello world'
+ """,
+ "/dev/null/includes/static_test_include.yml": """
+ - include: other_test_include.yml
+ - shell: echo 'hello static world'
+ """,
+ "/dev/null/includes/other_test_include.yml": """
+ - debug:
+ msg: other_test_include_debug
+ """})
+
+
+class TestLoadListOfTasks(unittest.TestCase, MixinForMocks):
+ def setUp(self):
+ self._setup()
+
+ def _assert_is_task_list(self, results):
+ for result in results:
+ self.assertIsInstance(result, Task)
+
+ def _assert_is_task_list_or_blocks(self, results):
+ self.assertIsInstance(results, list)
+ for result in results:
+ self.assertIsInstance(result, (Task, Block))
+
+ def test_ds_not_list(self):
+ ds = {}
+ self.assertRaises(AssertionError, helpers.load_list_of_tasks,
+ ds, self.mock_play, block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None)
+
+ def test_ds_not_dict(self):
+ ds = [[]]
+ self.assertRaises(AssertionError, helpers.load_list_of_tasks,
+ ds, self.mock_play, block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None)
+
+ def test_empty_task(self):
+ ds = [{}]
+ self.assertRaisesRegexp(errors.AnsibleParserError,
+ "no module/action detected in task",
+ helpers.load_list_of_tasks,
+ ds, play=self.mock_play,
+ variable_manager=self.mock_variable_manager, loader=self.fake_loader)
+
+ def test_empty_task_use_handlers(self):
+ ds = [{}]
+ self.assertRaisesRegexp(errors.AnsibleParserError,
+ "no module/action detected in task.",
+ helpers.load_list_of_tasks,
+ ds,
+ use_handlers=True,
+ play=self.mock_play,
+ variable_manager=self.mock_variable_manager,
+ loader=self.fake_loader)
+
+ def test_one_bogus_block(self):
+ ds = [{'block': None}]
+ self.assertRaisesRegexp(errors.AnsibleParserError,
+ "A malformed block was encountered",
+ helpers.load_list_of_tasks,
+ ds, play=self.mock_play,
+ variable_manager=self.mock_variable_manager, loader=self.fake_loader)
+
+ def test_unknown_action(self):
+ action_name = 'foo_test_unknown_action'
+ ds = [{'action': action_name}]
+ res = helpers.load_list_of_tasks(ds, play=self.mock_play,
+ variable_manager=self.mock_variable_manager, loader=self.fake_loader)
+ self._assert_is_task_list_or_blocks(res)
+ self.assertEqual(res[0].action, action_name)
+
+ def test_block_unknown_action(self):
+ action_name = 'foo_test_block_unknown_action'
+ ds = [{
+ 'block': [{'action': action_name}]
+ }]
+ res = helpers.load_list_of_tasks(ds, play=self.mock_play,
+ variable_manager=self.mock_variable_manager, loader=self.fake_loader)
+ self._assert_is_task_list_or_blocks(res)
+ self.assertIsInstance(res[0], Block)
+ self._assert_default_block(res[0])
+
+ def _assert_default_block(self, block):
+ # the expected defaults
+ self.assertIsInstance(block.block, list)
+ self.assertEqual(len(block.block), 1)
+ self.assertIsInstance(block.rescue, list)
+ self.assertEqual(len(block.rescue), 0)
+ self.assertIsInstance(block.always, list)
+ self.assertEqual(len(block.always), 0)
+
+ def test_block_unknown_action_use_handlers(self):
+ ds = [{
+ 'block': [{'action': 'foo_test_block_unknown_action'}]
+ }]
+ res = helpers.load_list_of_tasks(ds, play=self.mock_play, use_handlers=True,
+ variable_manager=self.mock_variable_manager, loader=self.fake_loader)
+ self._assert_is_task_list_or_blocks(res)
+ self.assertIsInstance(res[0], Block)
+ self._assert_default_block(res[0])
+
+ def test_one_bogus_block_use_handlers(self):
+ ds = [{'block': True}]
+ self.assertRaisesRegexp(errors.AnsibleParserError,
+ "A malformed block was encountered",
+ helpers.load_list_of_tasks,
+ ds, play=self.mock_play, use_handlers=True,
+ variable_manager=self.mock_variable_manager, loader=self.fake_loader)
+
+ def test_one_bogus_include(self):
+ ds = [{'include': 'somefile.yml'}]
+ res = helpers.load_list_of_tasks(ds, play=self.mock_play,
+ variable_manager=self.mock_variable_manager, loader=self.fake_loader)
+ self.assertIsInstance(res, list)
+ self.assertEqual(len(res), 0)
+
+ def test_one_bogus_include_use_handlers(self):
+ ds = [{'include': 'somefile.yml'}]
+ res = helpers.load_list_of_tasks(ds, play=self.mock_play, use_handlers=True,
+ variable_manager=self.mock_variable_manager, loader=self.fake_loader)
+ self.assertIsInstance(res, list)
+ self.assertEqual(len(res), 0)
+
+ def test_one_bogus_include_static(self):
+ ds = [{'include': 'somefile.yml',
+ 'static': 'true'}]
+ res = helpers.load_list_of_tasks(ds, play=self.mock_play,
+ variable_manager=self.mock_variable_manager, loader=self.fake_loader)
+ self.assertIsInstance(res, list)
+ self.assertEqual(len(res), 0)
+
+ def test_one_include(self):
+ ds = [{'include': '/dev/null/includes/other_test_include.yml'}]
+ res = helpers.load_list_of_tasks(ds, play=self.mock_play,
+ variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
+ self.assertEqual(len(res), 1)
+ self._assert_is_task_list_or_blocks(res)
+
+ def test_one_parent_include(self):
+ ds = [{'include': '/dev/null/includes/test_include.yml'}]
+ res = helpers.load_list_of_tasks(ds, play=self.mock_play,
+ variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
+ self._assert_is_task_list_or_blocks(res)
+ self.assertIsInstance(res[0], Block)
+ self.assertIsInstance(res[0]._parent, TaskInclude)
+
+ # TODO/FIXME: do this non deprecated way
+ def test_one_include_tags(self):
+ ds = [{'include': '/dev/null/includes/other_test_include.yml',
+ 'tags': ['test_one_include_tags_tag1', 'and_another_tagB']
+ }]
+ res = helpers.load_list_of_tasks(ds, play=self.mock_play,
+ variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
+ self._assert_is_task_list_or_blocks(res)
+ self.assertIsInstance(res[0], Block)
+ self.assertIn('test_one_include_tags_tag1', res[0].tags)
+ self.assertIn('and_another_tagB', res[0].tags)
+
+ # TODO/FIXME: do this non deprecated way
+ def test_one_parent_include_tags(self):
+ ds = [{'include': '/dev/null/includes/test_include.yml',
+ # 'vars': {'tags': ['test_one_parent_include_tags_tag1', 'and_another_tag2']}
+ 'tags': ['test_one_parent_include_tags_tag1', 'and_another_tag2']
+ }
+ ]
+ res = helpers.load_list_of_tasks(ds, play=self.mock_play,
+ variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
+ self._assert_is_task_list_or_blocks(res)
+ self.assertIsInstance(res[0], Block)
+ self.assertIn('test_one_parent_include_tags_tag1', res[0].tags)
+ self.assertIn('and_another_tag2', res[0].tags)
+
+ # It would be useful to be able to tell what kind of deprecation we encountered and where we encountered it.
+ def test_one_include_tags_deprecated_mixed(self):
+ ds = [{'include': "/dev/null/includes/other_test_include.yml",
+ 'vars': {'tags': "['tag_on_include1', 'tag_on_include2']"},
+ 'tags': 'mixed_tag1, mixed_tag2'
+ }]
+ self.assertRaisesRegexp(errors.AnsibleParserError, 'Mixing styles',
+ helpers.load_list_of_tasks,
+ ds, play=self.mock_play,
+ variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
+
+ def test_one_include_tags_deprecated_include(self):
+ ds = [{'include': '/dev/null/includes/other_test_include.yml',
+ 'vars': {'tags': ['include_tag1_deprecated', 'and_another_tagB_deprecated']}
+ }]
+ res = helpers.load_list_of_tasks(ds, play=self.mock_play,
+ variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
+ self._assert_is_task_list_or_blocks(res)
+ self.assertIsInstance(res[0], Block)
+ self.assertIn('include_tag1_deprecated', res[0].tags)
+ self.assertIn('and_another_tagB_deprecated', res[0].tags)
+
+ def test_one_include_use_handlers(self):
+ ds = [{'include': '/dev/null/includes/other_test_include.yml'}]
+ res = helpers.load_list_of_tasks(ds, play=self.mock_play,
+ use_handlers=True,
+ variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
+ self._assert_is_task_list_or_blocks(res)
+ self.assertIsInstance(res[0], Handler)
+
+ def test_one_parent_include_use_handlers(self):
+ ds = [{'include': '/dev/null/includes/test_include.yml'}]
+ res = helpers.load_list_of_tasks(ds, play=self.mock_play,
+ use_handlers=True,
+ variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
+ self._assert_is_task_list_or_blocks(res)
+ self.assertIsInstance(res[0], Handler)
+
+ # default for Handler
+ self.assertEqual(res[0].listen, [])
+
+ # TODO/FIXME: this doesn't seen right
+ # figure out how to get the non-static errors to be raised, this seems to just ignore everything
+ def test_one_include_not_static(self):
+ ds = [{
+ 'include': '/dev/null/includes/static_test_include.yml',
+ 'static': False
+ }]
+ # a_block = Block()
+ ti_ds = {'include': '/dev/null/includes/ssdftatic_test_include.yml'}
+ a_task_include = TaskInclude()
+ ti = a_task_include.load(ti_ds)
+ res = helpers.load_list_of_tasks(ds, play=self.mock_play,
+ block=ti,
+ variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
+ self._assert_is_task_list_or_blocks(res)
+ self.assertIsInstance(res[0], Task)
+ self.assertEqual(res[0].args['_raw_params'], '/dev/null/includes/static_test_include.yml')
+
+ # TODO/FIXME: This two get stuck trying to make a mock_block into a TaskInclude
+# def test_one_include(self):
+# ds = [{'include': 'other_test_include.yml'}]
+# res = helpers.load_list_of_tasks(ds, play=self.mock_play,
+# block=self.mock_block,
+# variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
+# print(res)
+
+# def test_one_parent_include(self):
+# ds = [{'include': 'test_include.yml'}]
+# res = helpers.load_list_of_tasks(ds, play=self.mock_play,
+# block=self.mock_block,
+# variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
+# print(res)
+
+ def test_one_bogus_include_role(self):
+ ds = [{'include_role': {'name': 'bogus_role'}, 'collections': []}]
+ res = helpers.load_list_of_tasks(ds, play=self.mock_play,
+ block=self.mock_block,
+ variable_manager=self.mock_variable_manager, loader=self.fake_role_loader)
+ self.assertEqual(len(res), 1)
+ self._assert_is_task_list_or_blocks(res)
+
+ def test_one_bogus_include_role_use_handlers(self):
+ ds = [{'include_role': {'name': 'bogus_role'}, 'collections': []}]
+ res = helpers.load_list_of_tasks(ds, play=self.mock_play, use_handlers=True,
+ block=self.mock_block,
+ variable_manager=self.mock_variable_manager,
+ loader=self.fake_role_loader)
+ self.assertEqual(len(res), 1)
+ self._assert_is_task_list_or_blocks(res)
+
+
+class TestLoadListOfRoles(unittest.TestCase, MixinForMocks):
+ def setUp(self):
+ self._setup()
+
+ def test_ds_not_list(self):
+ ds = {}
+ self.assertRaises(AssertionError, helpers.load_list_of_roles,
+ ds, self.mock_play)
+
+ def test_empty_role(self):
+ ds = [{}]
+ self.assertRaisesRegexp(errors.AnsibleError,
+ "role definitions must contain a role name",
+ helpers.load_list_of_roles,
+ ds, self.mock_play,
+ variable_manager=self.mock_variable_manager, loader=self.fake_role_loader)
+
+ def test_empty_role_just_name(self):
+ ds = [{'name': 'bogus_role'}]
+ res = helpers.load_list_of_roles(ds, self.mock_play,
+ variable_manager=self.mock_variable_manager, loader=self.fake_role_loader)
+ self.assertIsInstance(res, list)
+ for r in res:
+ self.assertIsInstance(r, RoleInclude)
+
+ def test_block_unknown_action(self):
+ ds = [{
+ 'block': [{'action': 'foo_test_block_unknown_action'}]
+ }]
+ ds = [{'name': 'bogus_role'}]
+ res = helpers.load_list_of_roles(ds, self.mock_play,
+ variable_manager=self.mock_variable_manager, loader=self.fake_role_loader)
+ self.assertIsInstance(res, list)
+ for r in res:
+ self.assertIsInstance(r, RoleInclude)
+
+
+class TestLoadListOfBlocks(unittest.TestCase, MixinForMocks):
+ def setUp(self):
+ self._setup()
+
+ def test_ds_not_list(self):
+ ds = {}
+ mock_play = MagicMock(name='MockPlay')
+ self.assertRaises(AssertionError, helpers.load_list_of_blocks,
+ ds, mock_play, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None)
+
+ def test_empty_block(self):
+ ds = [{}]
+ mock_play = MagicMock(name='MockPlay')
+ self.assertRaisesRegexp(errors.AnsibleParserError,
+ "no module/action detected in task",
+ helpers.load_list_of_blocks,
+ ds, mock_play,
+ parent_block=None,
+ role=None,
+ task_include=None,
+ use_handlers=False,
+ variable_manager=None,
+ loader=None)
+
+ def test_block_unknown_action(self):
+ ds = [{'action': 'foo', 'collections': []}]
+ mock_play = MagicMock(name='MockPlay')
+ res = helpers.load_list_of_blocks(ds, mock_play, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None,
+ loader=None)
+
+ self.assertIsInstance(res, list)
+ for block in res:
+ self.assertIsInstance(block, Block)
diff --git a/test/units/playbook/test_included_file.py b/test/units/playbook/test_included_file.py
new file mode 100644
index 00000000..f143acb9
--- /dev/null
+++ b/test/units/playbook/test_included_file.py
@@ -0,0 +1,332 @@
+# (c) 2016, Adrian Likins <alikins@redhat.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+import pytest
+
+from units.compat.mock import MagicMock
+from units.mock.loader import DictDataLoader
+
+from ansible.playbook.block import Block
+from ansible.playbook.task import Task
+from ansible.playbook.task_include import TaskInclude
+from ansible.playbook.role_include import IncludeRole
+from ansible.executor import task_result
+
+from ansible.playbook.included_file import IncludedFile
+from ansible.errors import AnsibleParserError
+
+
+@pytest.fixture
+def mock_iterator():
+ mock_iterator = MagicMock(name='MockIterator')
+ mock_iterator._play = MagicMock(name='MockPlay')
+ return mock_iterator
+
+
+@pytest.fixture
+def mock_variable_manager():
+ # TODO: can we use a real VariableManager?
+ mock_variable_manager = MagicMock(name='MockVariableManager')
+ mock_variable_manager.get_vars.return_value = dict()
+ return mock_variable_manager
+
+
+def test_equals_ok():
+ uuid = '111-111'
+ parent = MagicMock(name='MockParent')
+ parent._uuid = uuid
+ task = MagicMock(name='MockTask')
+ task._uuid = uuid
+ task._parent = parent
+ inc_a = IncludedFile('a.yml', {}, {}, task)
+ inc_b = IncludedFile('a.yml', {}, {}, task)
+ assert inc_a == inc_b
+
+
+def test_equals_different_tasks():
+ parent = MagicMock(name='MockParent')
+ parent._uuid = '111-111'
+ task_a = MagicMock(name='MockTask')
+ task_a._uuid = '11-11'
+ task_a._parent = parent
+ task_b = MagicMock(name='MockTask')
+ task_b._uuid = '22-22'
+ task_b._parent = parent
+ inc_a = IncludedFile('a.yml', {}, {}, task_a)
+ inc_b = IncludedFile('a.yml', {}, {}, task_b)
+ assert inc_a != inc_b
+
+
+def test_equals_different_parents():
+ parent_a = MagicMock(name='MockParent')
+ parent_a._uuid = '111-111'
+ parent_b = MagicMock(name='MockParent')
+ parent_b._uuid = '222-222'
+ task_a = MagicMock(name='MockTask')
+ task_a._uuid = '11-11'
+ task_a._parent = parent_a
+ task_b = MagicMock(name='MockTask')
+ task_b._uuid = '11-11'
+ task_b._parent = parent_b
+ inc_a = IncludedFile('a.yml', {}, {}, task_a)
+ inc_b = IncludedFile('a.yml', {}, {}, task_b)
+ assert inc_a != inc_b
+
+
+def test_included_file_instantiation():
+ filename = 'somefile.yml'
+
+ inc_file = IncludedFile(filename=filename, args={}, vars={}, task=None)
+
+ assert isinstance(inc_file, IncludedFile)
+ assert inc_file._filename == filename
+ assert inc_file._args == {}
+ assert inc_file._vars == {}
+ assert inc_file._task is None
+
+
+def test_process_include_results(mock_iterator, mock_variable_manager):
+ hostname = "testhost1"
+ hostname2 = "testhost2"
+
+ parent_task_ds = {'debug': 'msg=foo'}
+ parent_task = Task.load(parent_task_ds)
+ parent_task._play = None
+
+ task_ds = {'include': 'include_test.yml'}
+ loaded_task = TaskInclude.load(task_ds, task_include=parent_task)
+
+ return_data = {'include': 'include_test.yml'}
+ # The task in the TaskResult has to be a TaskInclude so it has a .static attr
+ result1 = task_result.TaskResult(host=hostname, task=loaded_task, return_data=return_data)
+ result2 = task_result.TaskResult(host=hostname2, task=loaded_task, return_data=return_data)
+ results = [result1, result2]
+
+ fake_loader = DictDataLoader({'include_test.yml': ""})
+
+ res = IncludedFile.process_include_results(results, mock_iterator, fake_loader, mock_variable_manager)
+ assert isinstance(res, list)
+ assert len(res) == 1
+ assert res[0]._filename == os.path.join(os.getcwd(), 'include_test.yml')
+ assert res[0]._hosts == ['testhost1', 'testhost2']
+ assert res[0]._args == {}
+ assert res[0]._vars == {}
+
+
+def test_process_include_diff_files(mock_iterator, mock_variable_manager):
+ hostname = "testhost1"
+ hostname2 = "testhost2"
+
+ parent_task_ds = {'debug': 'msg=foo'}
+ parent_task = Task.load(parent_task_ds)
+ parent_task._play = None
+
+ task_ds = {'include': 'include_test.yml'}
+ loaded_task = TaskInclude.load(task_ds, task_include=parent_task)
+ loaded_task._play = None
+
+ child_task_ds = {'include': 'other_include_test.yml'}
+ loaded_child_task = TaskInclude.load(child_task_ds, task_include=loaded_task)
+ loaded_child_task._play = None
+
+ return_data = {'include': 'include_test.yml'}
+ # The task in the TaskResult has to be a TaskInclude so it has a .static attr
+ result1 = task_result.TaskResult(host=hostname, task=loaded_task, return_data=return_data)
+
+ return_data = {'include': 'other_include_test.yml'}
+ result2 = task_result.TaskResult(host=hostname2, task=loaded_child_task, return_data=return_data)
+ results = [result1, result2]
+
+ fake_loader = DictDataLoader({'include_test.yml': "",
+ 'other_include_test.yml': ""})
+
+ res = IncludedFile.process_include_results(results, mock_iterator, fake_loader, mock_variable_manager)
+ assert isinstance(res, list)
+ assert res[0]._filename == os.path.join(os.getcwd(), 'include_test.yml')
+ assert res[1]._filename == os.path.join(os.getcwd(), 'other_include_test.yml')
+
+ assert res[0]._hosts == ['testhost1']
+ assert res[1]._hosts == ['testhost2']
+
+ assert res[0]._args == {}
+ assert res[1]._args == {}
+
+ assert res[0]._vars == {}
+ assert res[1]._vars == {}
+
+
+def test_process_include_simulate_free(mock_iterator, mock_variable_manager):
+ hostname = "testhost1"
+ hostname2 = "testhost2"
+
+ parent_task_ds = {'debug': 'msg=foo'}
+ parent_task1 = Task.load(parent_task_ds)
+ parent_task2 = Task.load(parent_task_ds)
+
+ parent_task1._play = None
+ parent_task2._play = None
+
+ task_ds = {'include': 'include_test.yml'}
+ loaded_task1 = TaskInclude.load(task_ds, task_include=parent_task1)
+ loaded_task2 = TaskInclude.load(task_ds, task_include=parent_task2)
+
+ return_data = {'include': 'include_test.yml'}
+ # The task in the TaskResult has to be a TaskInclude so it has a .static attr
+ result1 = task_result.TaskResult(host=hostname, task=loaded_task1, return_data=return_data)
+ result2 = task_result.TaskResult(host=hostname2, task=loaded_task2, return_data=return_data)
+ results = [result1, result2]
+
+ fake_loader = DictDataLoader({'include_test.yml': ""})
+
+ res = IncludedFile.process_include_results(results, mock_iterator, fake_loader, mock_variable_manager)
+ assert isinstance(res, list)
+ assert len(res) == 2
+ assert res[0]._filename == os.path.join(os.getcwd(), 'include_test.yml')
+ assert res[1]._filename == os.path.join(os.getcwd(), 'include_test.yml')
+
+ assert res[0]._hosts == ['testhost1']
+ assert res[1]._hosts == ['testhost2']
+
+ assert res[0]._args == {}
+ assert res[1]._args == {}
+
+ assert res[0]._vars == {}
+ assert res[1]._vars == {}
+
+
+def test_process_include_simulate_free_block_role_tasks(mock_iterator,
+ mock_variable_manager):
+ """Test loading the same role returns different included files
+
+ In the case of free, we may end up with included files from roles that
+ have the same parent but are different tasks. Previously the comparison
+ for equality did not check if the tasks were the same and only checked
+ that the parents were the same. This lead to some tasks being run
+ incorrectly and some tasks being silient dropped."""
+
+ fake_loader = DictDataLoader({
+ 'include_test.yml': "",
+ '/etc/ansible/roles/foo_role/tasks/task1.yml': """
+ - debug: msg=task1
+ """,
+ '/etc/ansible/roles/foo_role/tasks/task2.yml': """
+ - debug: msg=task2
+ """,
+ })
+
+ hostname = "testhost1"
+ hostname2 = "testhost2"
+
+ role1_ds = {
+ 'name': 'task1 include',
+ 'include_role': {
+ 'name': 'foo_role',
+ 'tasks_from': 'task1.yml'
+ }
+ }
+ role2_ds = {
+ 'name': 'task2 include',
+ 'include_role': {
+ 'name': 'foo_role',
+ 'tasks_from': 'task2.yml'
+ }
+ }
+ parent_task_ds = {
+ 'block': [
+ role1_ds,
+ role2_ds
+ ]
+ }
+ parent_block = Block.load(parent_task_ds, loader=fake_loader)
+
+ parent_block._play = None
+
+ include_role1_ds = {
+ 'include_args': {
+ 'name': 'foo_role',
+ 'tasks_from': 'task1.yml'
+ }
+ }
+ include_role2_ds = {
+ 'include_args': {
+ 'name': 'foo_role',
+ 'tasks_from': 'task2.yml'
+ }
+ }
+
+ include_role1 = IncludeRole.load(role1_ds,
+ block=parent_block,
+ loader=fake_loader)
+ include_role2 = IncludeRole.load(role2_ds,
+ block=parent_block,
+ loader=fake_loader)
+
+ result1 = task_result.TaskResult(host=hostname,
+ task=include_role1,
+ return_data=include_role1_ds)
+ result2 = task_result.TaskResult(host=hostname2,
+ task=include_role2,
+ return_data=include_role2_ds)
+ results = [result1, result2]
+
+ res = IncludedFile.process_include_results(results,
+ mock_iterator,
+ fake_loader,
+ mock_variable_manager)
+ assert isinstance(res, list)
+ # we should get two different includes
+ assert len(res) == 2
+ assert res[0]._filename == 'foo_role'
+ assert res[1]._filename == 'foo_role'
+ # with different tasks
+ assert res[0]._task != res[1]._task
+
+ assert res[0]._hosts == ['testhost1']
+ assert res[1]._hosts == ['testhost2']
+
+ assert res[0]._args == {}
+ assert res[1]._args == {}
+
+ assert res[0]._vars == {}
+ assert res[1]._vars == {}
+
+
+def test_empty_raw_params():
+ parent_task_ds = {'debug': 'msg=foo'}
+ parent_task = Task.load(parent_task_ds)
+ parent_task._play = None
+
+ task_ds_list = [
+ {
+ 'include': ''
+ },
+ {
+ 'include_tasks': ''
+ },
+ {
+ 'import_tasks': ''
+ }
+ ]
+ for task_ds in task_ds_list:
+ with pytest.raises(AnsibleParserError):
+ TaskInclude.load(task_ds, task_include=parent_task)
diff --git a/test/units/playbook/test_play.py b/test/units/playbook/test_play.py
new file mode 100644
index 00000000..725c28ea
--- /dev/null
+++ b/test/units/playbook/test_play.py
@@ -0,0 +1,132 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest
+from units.compat.mock import patch, MagicMock
+
+from ansible.errors import AnsibleParserError
+from ansible.playbook.play import Play
+
+from units.mock.loader import DictDataLoader
+from units.mock.path import mock_unfrackpath_noop
+
+
+class TestPlay(unittest.TestCase):
+
+ def test_empty_play(self):
+ p = Play.load(dict())
+ self.assertEqual(str(p), '')
+
+ def test_basic_play(self):
+ p = Play.load(dict(
+ name="test play",
+ hosts=['foo'],
+ gather_facts=False,
+ connection='local',
+ remote_user="root",
+ become=True,
+ become_user="testing",
+ ))
+
+ def test_play_with_user_conflict(self):
+ p = Play.load(dict(
+ name="test play",
+ hosts=['foo'],
+ user="testing",
+ gather_facts=False,
+ ))
+ self.assertEqual(p.remote_user, "testing")
+
+ def test_play_with_user_conflict(self):
+ play_data = dict(
+ name="test play",
+ hosts=['foo'],
+ user="testing",
+ remote_user="testing",
+ )
+ self.assertRaises(AnsibleParserError, Play.load, play_data)
+
+ def test_play_with_tasks(self):
+ p = Play.load(dict(
+ name="test play",
+ hosts=['foo'],
+ gather_facts=False,
+ tasks=[dict(action='shell echo "hello world"')],
+ ))
+
+ def test_play_with_handlers(self):
+ p = Play.load(dict(
+ name="test play",
+ hosts=['foo'],
+ gather_facts=False,
+ handlers=[dict(action='shell echo "hello world"')],
+ ))
+
+ def test_play_with_pre_tasks(self):
+ p = Play.load(dict(
+ name="test play",
+ hosts=['foo'],
+ gather_facts=False,
+ pre_tasks=[dict(action='shell echo "hello world"')],
+ ))
+
+ def test_play_with_post_tasks(self):
+ p = Play.load(dict(
+ name="test play",
+ hosts=['foo'],
+ gather_facts=False,
+ post_tasks=[dict(action='shell echo "hello world"')],
+ ))
+
+ @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
+ def test_play_with_roles(self):
+ fake_loader = DictDataLoader({
+ '/etc/ansible/roles/foo/tasks.yml': """
+ - name: role task
+ shell: echo "hello world"
+ """,
+ })
+
+ mock_var_manager = MagicMock()
+ mock_var_manager.get_vars.return_value = dict()
+
+ p = Play.load(dict(
+ name="test play",
+ hosts=['foo'],
+ gather_facts=False,
+ roles=['foo'],
+ ), loader=fake_loader, variable_manager=mock_var_manager)
+
+ blocks = p.compile()
+
+ def test_play_compile(self):
+ p = Play.load(dict(
+ name="test play",
+ hosts=['foo'],
+ gather_facts=False,
+ tasks=[dict(action='shell echo "hello world"')],
+ ))
+
+ blocks = p.compile()
+
+ # with a single block, there will still be three
+ # implicit meta flush_handler blocks inserted
+ self.assertEqual(len(blocks), 4)
diff --git a/test/units/playbook/test_play_context.py b/test/units/playbook/test_play_context.py
new file mode 100644
index 00000000..0936775b
--- /dev/null
+++ b/test/units/playbook/test_play_context.py
@@ -0,0 +1,111 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible import constants as C
+from ansible import context
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.errors import AnsibleError
+from ansible.playbook.play_context import PlayContext
+from ansible.playbook.play import Play
+from ansible.plugins.loader import become_loader
+from ansible.utils import context_objects as co
+
+
+@pytest.fixture
+def parser():
+ parser = opt_help.create_base_parser('testparser')
+
+ opt_help.add_runas_options(parser)
+ opt_help.add_meta_options(parser)
+ opt_help.add_runtask_options(parser)
+ opt_help.add_vault_options(parser)
+ opt_help.add_async_options(parser)
+ opt_help.add_connect_options(parser)
+ opt_help.add_subset_options(parser)
+ opt_help.add_check_options(parser)
+ opt_help.add_inventory_options(parser)
+
+ return parser
+
+
+@pytest.fixture
+def reset_cli_args():
+ co.GlobalCLIArgs._Singleton__instance = None
+ yield
+ co.GlobalCLIArgs._Singleton__instance = None
+
+
+def test_play_context(mocker, parser, reset_cli_args):
+ options = parser.parse_args(['-vv', '--check'])
+ context._init_global_context(options)
+ play = Play.load({})
+ play_context = PlayContext(play=play)
+
+ assert play_context.remote_addr is None
+ assert play_context.remote_user is None
+ assert play_context.password == ''
+ assert play_context.private_key_file == C.DEFAULT_PRIVATE_KEY_FILE
+ assert play_context.timeout == C.DEFAULT_TIMEOUT
+ assert play_context.verbosity == 2
+ assert play_context.check_mode is True
+
+ mock_play = mocker.MagicMock()
+ mock_play.force_handlers = True
+
+ play_context = PlayContext(play=mock_play)
+ assert play_context.force_handlers is True
+
+ mock_task = mocker.MagicMock()
+ mock_task.connection = 'mocktask'
+ mock_task.remote_user = 'mocktask'
+ mock_task.port = 1234
+ mock_task.no_log = True
+ mock_task.become = True
+ mock_task.become_method = 'mocktask'
+ mock_task.become_user = 'mocktaskroot'
+ mock_task.become_pass = 'mocktaskpass'
+ mock_task._local_action = False
+ mock_task.delegate_to = None
+
+ all_vars = dict(
+ ansible_connection='mock_inventory',
+ ansible_ssh_port=4321,
+ )
+
+ mock_templar = mocker.MagicMock()
+
+ play_context = PlayContext()
+ play_context = play_context.set_task_and_variable_override(task=mock_task, variables=all_vars, templar=mock_templar)
+
+ assert play_context.connection == 'mock_inventory'
+ assert play_context.remote_user == 'mocktask'
+ assert play_context.no_log is True
+
+ mock_task.no_log = False
+ play_context = play_context.set_task_and_variable_override(task=mock_task, variables=all_vars, templar=mock_templar)
+ assert play_context.no_log is False
+
+
+def test_play_context_make_become_bad(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+ play_context = PlayContext()
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+
+ play_context.become = True
+ play_context.become_user = 'foo'
+ play_context.set_become_plugin(become_loader.get('bad'))
+ play_context.become_method = 'bad'
+
+ with pytest.raises(AnsibleError):
+ play_context.make_become_cmd(cmd=default_cmd, executable=default_exe)
diff --git a/test/units/playbook/test_playbook.py b/test/units/playbook/test_playbook.py
new file mode 100644
index 00000000..68a9fb75
--- /dev/null
+++ b/test/units/playbook/test_playbook.py
@@ -0,0 +1,61 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest
+from ansible.errors import AnsibleParserError
+from ansible.playbook import Playbook
+from ansible.vars.manager import VariableManager
+
+from units.mock.loader import DictDataLoader
+
+
+class TestPlaybook(unittest.TestCase):
+
+ def test_empty_playbook(self):
+ fake_loader = DictDataLoader({})
+ p = Playbook(loader=fake_loader)
+
+ def test_basic_playbook(self):
+ fake_loader = DictDataLoader({
+ "test_file.yml": """
+ - hosts: all
+ """,
+ })
+ p = Playbook.load("test_file.yml", loader=fake_loader)
+ plays = p.get_plays()
+
+ def test_bad_playbook_files(self):
+ fake_loader = DictDataLoader({
+ # represents a playbook which is not a list of plays
+ "bad_list.yml": """
+ foo: bar
+
+ """,
+ # represents a playbook where a play entry is mis-formatted
+ "bad_entry.yml": """
+ -
+ - "This should be a mapping..."
+
+ """,
+ })
+ vm = VariableManager()
+ self.assertRaises(AnsibleParserError, Playbook.load, "bad_list.yml", vm, fake_loader)
+ self.assertRaises(AnsibleParserError, Playbook.load, "bad_entry.yml", vm, fake_loader)
diff --git a/test/units/playbook/test_taggable.py b/test/units/playbook/test_taggable.py
new file mode 100644
index 00000000..ab5f86b4
--- /dev/null
+++ b/test/units/playbook/test_taggable.py
@@ -0,0 +1,102 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest
+from ansible.playbook.taggable import Taggable
+from units.mock.loader import DictDataLoader
+
+
+class TaggableTestObj(Taggable):
+
+ def __init__(self):
+ self._loader = DictDataLoader({})
+ self.tags = []
+
+
+class TestTaggable(unittest.TestCase):
+
+ def assert_evaluate_equal(self, test_value, tags, only_tags, skip_tags):
+ taggable_obj = TaggableTestObj()
+ taggable_obj.tags = tags
+
+ evaluate = taggable_obj.evaluate_tags(only_tags, skip_tags, {})
+
+ self.assertEqual(test_value, evaluate)
+
+ def test_evaluate_tags_tag_in_only_tags(self):
+ self.assert_evaluate_equal(True, ['tag1', 'tag2'], ['tag1'], [])
+
+ def test_evaluate_tags_tag_in_skip_tags(self):
+ self.assert_evaluate_equal(False, ['tag1', 'tag2'], [], ['tag1'])
+
+ def test_evaluate_tags_special_always_in_object_tags(self):
+ self.assert_evaluate_equal(True, ['tag', 'always'], ['random'], [])
+
+ def test_evaluate_tags_tag_in_skip_tags_special_always_in_object_tags(self):
+ self.assert_evaluate_equal(False, ['tag', 'always'], ['random'], ['tag'])
+
+ def test_evaluate_tags_special_always_in_skip_tags_and_always_in_tags(self):
+ self.assert_evaluate_equal(False, ['tag', 'always'], [], ['always'])
+
+ def test_evaluate_tags_special_tagged_in_only_tags_and_object_tagged(self):
+ self.assert_evaluate_equal(True, ['tag'], ['tagged'], [])
+
+ def test_evaluate_tags_special_tagged_in_only_tags_and_object_untagged(self):
+ self.assert_evaluate_equal(False, [], ['tagged'], [])
+
+ def test_evaluate_tags_special_tagged_in_skip_tags_and_object_tagged(self):
+ self.assert_evaluate_equal(False, ['tag'], [], ['tagged'])
+
+ def test_evaluate_tags_special_tagged_in_skip_tags_and_object_untagged(self):
+ self.assert_evaluate_equal(True, [], [], ['tagged'])
+
+ def test_evaluate_tags_special_untagged_in_only_tags_and_object_tagged(self):
+ self.assert_evaluate_equal(False, ['tag'], ['untagged'], [])
+
+ def test_evaluate_tags_special_untagged_in_only_tags_and_object_untagged(self):
+ self.assert_evaluate_equal(True, [], ['untagged'], [])
+
+ def test_evaluate_tags_special_untagged_in_skip_tags_and_object_tagged(self):
+ self.assert_evaluate_equal(True, ['tag'], [], ['untagged'])
+
+ def test_evaluate_tags_special_untagged_in_skip_tags_and_object_untagged(self):
+ self.assert_evaluate_equal(False, [], [], ['untagged'])
+
+ def test_evaluate_tags_special_all_in_only_tags(self):
+ self.assert_evaluate_equal(True, ['tag'], ['all'], ['untagged'])
+
+ def test_evaluate_tags_special_all_in_skip_tags(self):
+ self.assert_evaluate_equal(False, ['tag'], ['tag'], ['all'])
+
+ def test_evaluate_tags_special_all_in_only_tags_and_special_all_in_skip_tags(self):
+ self.assert_evaluate_equal(False, ['tag'], ['all'], ['all'])
+
+ def test_evaluate_tags_special_all_in_skip_tags_and_always_in_object_tags(self):
+ self.assert_evaluate_equal(True, ['tag', 'always'], [], ['all'])
+
+ def test_evaluate_tags_special_all_in_skip_tags_and_special_always_in_skip_tags_and_always_in_object_tags(self):
+ self.assert_evaluate_equal(False, ['tag', 'always'], [], ['all', 'always'])
+
+ def test_evaluate_tags_accepts_lists(self):
+ self.assert_evaluate_equal(True, ['tag1', 'tag2'], ['tag2'], [])
+
+ def test_evaluate_tags_with_repeated_tags(self):
+ self.assert_evaluate_equal(False, ['tag', 'tag'], [], ['tag'])
diff --git a/test/units/playbook/test_task.py b/test/units/playbook/test_task.py
new file mode 100644
index 00000000..f94419a2
--- /dev/null
+++ b/test/units/playbook/test_task.py
@@ -0,0 +1,114 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest
+from units.compat.mock import patch
+from ansible.playbook.task import Task
+from ansible.parsing.yaml import objects
+from ansible import errors
+
+
+basic_command_task = dict(
+ name='Test Task',
+ command='echo hi'
+)
+
+kv_command_task = dict(
+ action='command echo hi'
+)
+
+# See #36848
+kv_bad_args_str = '- apk: sdfs sf sdf 37'
+kv_bad_args_ds = {'apk': 'sdfs sf sdf 37'}
+
+
+class TestTask(unittest.TestCase):
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def test_construct_empty_task(self):
+ Task()
+
+ def test_construct_task_with_role(self):
+ pass
+
+ def test_construct_task_with_block(self):
+ pass
+
+ def test_construct_task_with_role_and_block(self):
+ pass
+
+ def test_load_task_simple(self):
+ t = Task.load(basic_command_task)
+ assert t is not None
+ self.assertEqual(t.name, basic_command_task['name'])
+ self.assertEqual(t.action, 'command')
+ self.assertEqual(t.args, dict(_raw_params='echo hi'))
+
+ def test_load_task_kv_form(self):
+ t = Task.load(kv_command_task)
+ self.assertEqual(t.action, 'command')
+ self.assertEqual(t.args, dict(_raw_params='echo hi'))
+
+ @patch.object(errors.AnsibleError, '_get_error_lines_from_file')
+ def test_load_task_kv_form_error_36848(self, mock_get_err_lines):
+ ds = objects.AnsibleMapping(kv_bad_args_ds)
+ ds.ansible_pos = ('test_task_faux_playbook.yml', 1, 1)
+ mock_get_err_lines.return_value = (kv_bad_args_str, '')
+
+ with self.assertRaises(errors.AnsibleParserError) as cm:
+ Task.load(ds)
+
+ self.assertIsInstance(cm.exception, errors.AnsibleParserError)
+ self.assertEqual(cm.exception._obj, ds)
+ self.assertEqual(cm.exception._obj, kv_bad_args_ds)
+ self.assertIn("The error appears to be in 'test_task_faux_playbook.yml", cm.exception.message)
+ self.assertIn(kv_bad_args_str, cm.exception.message)
+ self.assertIn('apk', cm.exception.message)
+ self.assertEqual(cm.exception.message.count('The offending line'), 1)
+ self.assertEqual(cm.exception.message.count('The error appears to be in'), 1)
+
+ def test_task_auto_name(self):
+ assert 'name' not in kv_command_task
+ Task.load(kv_command_task)
+ # self.assertEqual(t.name, 'shell echo hi')
+
+ def test_task_auto_name_with_role(self):
+ pass
+
+ def test_load_task_complex_form(self):
+ pass
+
+ def test_can_load_module_complex_form(self):
+ pass
+
+ def test_local_action_implies_delegate(self):
+ pass
+
+ def test_local_action_conflicts_with_delegate(self):
+ pass
+
+ def test_delegate_to_parses(self):
+ pass
diff --git a/test/units/plugins/__init__.py b/test/units/plugins/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/plugins/__init__.py
diff --git a/test/units/plugins/action/__init__.py b/test/units/plugins/action/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/plugins/action/__init__.py
diff --git a/test/units/plugins/action/test_action.py b/test/units/plugins/action/test_action.py
new file mode 100644
index 00000000..12488019
--- /dev/null
+++ b/test/units/plugins/action/test_action.py
@@ -0,0 +1,683 @@
+# -*- coding: utf-8 -*-
+# (c) 2015, Florian Apolloner <florian@apolloner.eu>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+
+from ansible import constants as C
+from units.compat import unittest
+from units.compat.mock import patch, MagicMock, mock_open
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.six import text_type
+from ansible.module_utils.six.moves import shlex_quote, builtins
+from ansible.module_utils._text import to_bytes
+from ansible.playbook.play_context import PlayContext
+from ansible.plugins.action import ActionBase
+from ansible.template import Templar
+from ansible.vars.clean import clean_facts
+
+from units.mock.loader import DictDataLoader
+
+
+python_module_replacers = br"""
+#!/usr/bin/python
+
+#ANSIBLE_VERSION = "<<ANSIBLE_VERSION>>"
+#MODULE_COMPLEX_ARGS = "<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>"
+#SELINUX_SPECIAL_FS="<<SELINUX_SPECIAL_FILESYSTEMS>>"
+
+test = u'Toshio \u304f\u3089\u3068\u307f'
+from ansible.module_utils.basic import *
+"""
+
+powershell_module_replacers = b"""
+WINDOWS_ARGS = "<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>"
+# POWERSHELL_COMMON
+"""
+
+
+def _action_base():
+ fake_loader = DictDataLoader({
+ })
+ mock_module_loader = MagicMock()
+ mock_shared_loader_obj = MagicMock()
+ mock_shared_loader_obj.module_loader = mock_module_loader
+ mock_connection_loader = MagicMock()
+
+ mock_shared_loader_obj.connection_loader = mock_connection_loader
+ mock_connection = MagicMock()
+
+ play_context = MagicMock()
+
+ action_base = DerivedActionBase(task=None,
+ connection=mock_connection,
+ play_context=play_context,
+ loader=fake_loader,
+ templar=None,
+ shared_loader_obj=mock_shared_loader_obj)
+ return action_base
+
+
+class DerivedActionBase(ActionBase):
+ TRANSFERS_FILES = False
+
+ def run(self, tmp=None, task_vars=None):
+ # We're not testing the plugin run() method, just the helper
+ # methods ActionBase defines
+ return super(DerivedActionBase, self).run(tmp=tmp, task_vars=task_vars)
+
+
+class TestActionBase(unittest.TestCase):
+
+ def test_action_base_run(self):
+ mock_task = MagicMock()
+ mock_task.action = "foo"
+ mock_task.args = dict(a=1, b=2, c=3)
+
+ mock_connection = MagicMock()
+
+ play_context = PlayContext()
+
+ mock_task.async_val = None
+ action_base = DerivedActionBase(mock_task, mock_connection, play_context, None, None, None)
+ results = action_base.run()
+ self.assertEqual(results, dict())
+
+ mock_task.async_val = 0
+ action_base = DerivedActionBase(mock_task, mock_connection, play_context, None, None, None)
+ results = action_base.run()
+ self.assertEqual(results, {})
+
+ def test_action_base__configure_module(self):
+ fake_loader = DictDataLoader({
+ })
+
+ # create our fake task
+ mock_task = MagicMock()
+ mock_task.action = "copy"
+ mock_task.async_val = 0
+ mock_task.delegate_to = None
+
+ # create a mock connection, so we don't actually try and connect to things
+ mock_connection = MagicMock()
+
+ # create a mock shared loader object
+ def mock_find_plugin_with_context(name, options, collection_list=None):
+ mockctx = MagicMock()
+ if name == 'badmodule':
+ mockctx.resolved = False
+ mockctx.plugin_resolved_path = None
+ elif '.ps1' in options:
+ mockctx.resolved = True
+ mockctx.plugin_resolved_path = '/fake/path/to/%s.ps1' % name
+ else:
+ mockctx.resolved = True
+ mockctx.plugin_resolved_path = '/fake/path/to/%s' % name
+ return mockctx
+
+ mock_module_loader = MagicMock()
+ mock_module_loader.find_plugin_with_context.side_effect = mock_find_plugin_with_context
+ mock_shared_obj_loader = MagicMock()
+ mock_shared_obj_loader.module_loader = mock_module_loader
+
+ # we're using a real play context here
+ play_context = PlayContext()
+
+ # our test class
+ action_base = DerivedActionBase(
+ task=mock_task,
+ connection=mock_connection,
+ play_context=play_context,
+ loader=fake_loader,
+ templar=Templar(loader=fake_loader),
+ shared_loader_obj=mock_shared_obj_loader,
+ )
+
+ # test python module formatting
+ with patch.object(builtins, 'open', mock_open(read_data=to_bytes(python_module_replacers.strip(), encoding='utf-8'))):
+ with patch.object(os, 'rename'):
+ mock_task.args = dict(a=1, foo='fö〩')
+ mock_connection.module_implementation_preferences = ('',)
+ (style, shebang, data, path) = action_base._configure_module(mock_task.action, mock_task.args,
+ task_vars=dict(ansible_python_interpreter='/usr/bin/python'))
+ self.assertEqual(style, "new")
+ self.assertEqual(shebang, u"#!/usr/bin/python")
+
+ # test module not found
+ self.assertRaises(AnsibleError, action_base._configure_module, 'badmodule', mock_task.args, {})
+
+ # test powershell module formatting
+ with patch.object(builtins, 'open', mock_open(read_data=to_bytes(powershell_module_replacers.strip(), encoding='utf-8'))):
+ mock_task.action = 'win_copy'
+ mock_task.args = dict(b=2)
+ mock_connection.module_implementation_preferences = ('.ps1',)
+ (style, shebang, data, path) = action_base._configure_module('stat', mock_task.args, {})
+ self.assertEqual(style, "new")
+ self.assertEqual(shebang, u'#!powershell')
+
+ # test module not found
+ self.assertRaises(AnsibleError, action_base._configure_module, 'badmodule', mock_task.args, {})
+
+ def test_action_base__compute_environment_string(self):
+ fake_loader = DictDataLoader({
+ })
+
+ # create our fake task
+ mock_task = MagicMock()
+ mock_task.action = "copy"
+ mock_task.args = dict(a=1)
+
+ # create a mock connection, so we don't actually try and connect to things
+ def env_prefix(**args):
+ return ' '.join(['%s=%s' % (k, shlex_quote(text_type(v))) for k, v in args.items()])
+ mock_connection = MagicMock()
+ mock_connection._shell.env_prefix.side_effect = env_prefix
+
+ # we're using a real play context here
+ play_context = PlayContext()
+
+ # and we're using a real templar here too
+ templar = Templar(loader=fake_loader)
+
+ # our test class
+ action_base = DerivedActionBase(
+ task=mock_task,
+ connection=mock_connection,
+ play_context=play_context,
+ loader=fake_loader,
+ templar=templar,
+ shared_loader_obj=None,
+ )
+
+ # test standard environment setup
+ mock_task.environment = [dict(FOO='foo'), None]
+ env_string = action_base._compute_environment_string()
+ self.assertEqual(env_string, "FOO=foo")
+
+ # test where environment is not a list
+ mock_task.environment = dict(FOO='foo')
+ env_string = action_base._compute_environment_string()
+ self.assertEqual(env_string, "FOO=foo")
+
+ # test environment with a variable in it
+ templar.available_variables = dict(the_var='bar')
+ mock_task.environment = [dict(FOO='{{the_var}}')]
+ env_string = action_base._compute_environment_string()
+ self.assertEqual(env_string, "FOO=bar")
+
+ # test with a bad environment set
+ mock_task.environment = dict(FOO='foo')
+ mock_task.environment = ['hi there']
+ self.assertRaises(AnsibleError, action_base._compute_environment_string)
+
+ def test_action_base__early_needs_tmp_path(self):
+ # create our fake task
+ mock_task = MagicMock()
+
+ # create a mock connection, so we don't actually try and connect to things
+ mock_connection = MagicMock()
+
+ # we're using a real play context here
+ play_context = PlayContext()
+
+ # our test class
+ action_base = DerivedActionBase(
+ task=mock_task,
+ connection=mock_connection,
+ play_context=play_context,
+ loader=None,
+ templar=None,
+ shared_loader_obj=None,
+ )
+
+ self.assertFalse(action_base._early_needs_tmp_path())
+
+ action_base.TRANSFERS_FILES = True
+ self.assertTrue(action_base._early_needs_tmp_path())
+
+ def test_action_base__make_tmp_path(self):
+ # create our fake task
+ mock_task = MagicMock()
+
+ def get_shell_opt(opt):
+
+ ret = None
+ if opt == 'admin_users':
+ ret = ['root', 'toor', 'Administrator']
+ elif opt == 'remote_tmp':
+ ret = '~/.ansible/tmp'
+
+ return ret
+
+ # create a mock connection, so we don't actually try and connect to things
+ mock_connection = MagicMock()
+ mock_connection.transport = 'ssh'
+ mock_connection._shell.mkdtemp.return_value = 'mkdir command'
+ mock_connection._shell.join_path.side_effect = os.path.join
+ mock_connection._shell.get_option = get_shell_opt
+ mock_connection._shell.HOMES_RE = re.compile(r'(\'|\")?(~|\$HOME)(.*)')
+
+ # we're using a real play context here
+ play_context = PlayContext()
+ play_context.become = True
+ play_context.become_user = 'foo'
+
+ # our test class
+ action_base = DerivedActionBase(
+ task=mock_task,
+ connection=mock_connection,
+ play_context=play_context,
+ loader=None,
+ templar=None,
+ shared_loader_obj=None,
+ )
+
+ action_base._low_level_execute_command = MagicMock()
+ action_base._low_level_execute_command.return_value = dict(rc=0, stdout='/some/path')
+ self.assertEqual(action_base._make_tmp_path('root'), '/some/path/')
+
+ # empty path fails
+ action_base._low_level_execute_command.return_value = dict(rc=0, stdout='')
+ self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root')
+
+ # authentication failure
+ action_base._low_level_execute_command.return_value = dict(rc=5, stdout='')
+ self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root')
+
+ # ssh error
+ action_base._low_level_execute_command.return_value = dict(rc=255, stdout='', stderr='')
+ self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root')
+ play_context.verbosity = 5
+ self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root')
+
+ # general error
+ action_base._low_level_execute_command.return_value = dict(rc=1, stdout='some stuff here', stderr='')
+ self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root')
+ action_base._low_level_execute_command.return_value = dict(rc=1, stdout='some stuff here', stderr='No space left on device')
+ self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root')
+
+ def test_action_base__remove_tmp_path(self):
+ # create our fake task
+ mock_task = MagicMock()
+
+ # create a mock connection, so we don't actually try and connect to things
+ mock_connection = MagicMock()
+ mock_connection._shell.remove.return_value = 'rm some stuff'
+
+ # we're using a real play context here
+ play_context = PlayContext()
+
+ # our test class
+ action_base = DerivedActionBase(
+ task=mock_task,
+ connection=mock_connection,
+ play_context=play_context,
+ loader=None,
+ templar=None,
+ shared_loader_obj=None,
+ )
+
+ action_base._low_level_execute_command = MagicMock()
+ # these don't really return anything or raise errors, so
+ # we're pretty much calling these for coverage right now
+ action_base._remove_tmp_path('/bad/path/dont/remove')
+ action_base._remove_tmp_path('/good/path/to/ansible-tmp-thing')
+
+ @patch('os.unlink')
+ @patch('os.fdopen')
+ @patch('tempfile.mkstemp')
+ def test_action_base__transfer_data(self, mock_mkstemp, mock_fdopen, mock_unlink):
+ # create our fake task
+ mock_task = MagicMock()
+
+ # create a mock connection, so we don't actually try and connect to things
+ mock_connection = MagicMock()
+ mock_connection.put_file.return_value = None
+
+ # we're using a real play context here
+ play_context = PlayContext()
+
+ # our test class
+ action_base = DerivedActionBase(
+ task=mock_task,
+ connection=mock_connection,
+ play_context=play_context,
+ loader=None,
+ templar=None,
+ shared_loader_obj=None,
+ )
+
+ mock_afd = MagicMock()
+ mock_afile = MagicMock()
+ mock_mkstemp.return_value = (mock_afd, mock_afile)
+
+ mock_unlink.return_value = None
+
+ mock_afo = MagicMock()
+ mock_afo.write.return_value = None
+ mock_afo.flush.return_value = None
+ mock_afo.close.return_value = None
+ mock_fdopen.return_value = mock_afo
+
+ self.assertEqual(action_base._transfer_data('/path/to/remote/file', 'some data'), '/path/to/remote/file')
+ self.assertEqual(action_base._transfer_data('/path/to/remote/file', 'some mixed data: fö〩'), '/path/to/remote/file')
+ self.assertEqual(action_base._transfer_data('/path/to/remote/file', dict(some_key='some value')), '/path/to/remote/file')
+ self.assertEqual(action_base._transfer_data('/path/to/remote/file', dict(some_key='fö〩')), '/path/to/remote/file')
+
+ mock_afo.write.side_effect = Exception()
+ self.assertRaises(AnsibleError, action_base._transfer_data, '/path/to/remote/file', '')
+
+ def test_action_base__execute_remote_stat(self):
+ # create our fake task
+ mock_task = MagicMock()
+
+ # create a mock connection, so we don't actually try and connect to things
+ mock_connection = MagicMock()
+
+ # we're using a real play context here
+ play_context = PlayContext()
+
+ # our test class
+ action_base = DerivedActionBase(
+ task=mock_task,
+ connection=mock_connection,
+ play_context=play_context,
+ loader=None,
+ templar=None,
+ shared_loader_obj=None,
+ )
+
+ action_base._execute_module = MagicMock()
+
+ # test normal case
+ action_base._execute_module.return_value = dict(stat=dict(checksum='1111111111111111111111111111111111', exists=True))
+ res = action_base._execute_remote_stat(path='/path/to/file', all_vars=dict(), follow=False)
+ self.assertEqual(res['checksum'], '1111111111111111111111111111111111')
+
+ # test does not exist
+ action_base._execute_module.return_value = dict(stat=dict(exists=False))
+ res = action_base._execute_remote_stat(path='/path/to/file', all_vars=dict(), follow=False)
+ self.assertFalse(res['exists'])
+ self.assertEqual(res['checksum'], '1')
+
+ # test no checksum in result from _execute_module
+ action_base._execute_module.return_value = dict(stat=dict(exists=True))
+ res = action_base._execute_remote_stat(path='/path/to/file', all_vars=dict(), follow=False)
+ self.assertTrue(res['exists'])
+ self.assertEqual(res['checksum'], '')
+
+ # test stat call failed
+ action_base._execute_module.return_value = dict(failed=True, msg="because I said so")
+ self.assertRaises(AnsibleError, action_base._execute_remote_stat, path='/path/to/file', all_vars=dict(), follow=False)
+
+ def test_action_base__execute_module(self):
+ # create our fake task
+ mock_task = MagicMock()
+ mock_task.action = 'copy'
+ mock_task.args = dict(a=1, b=2, c=3)
+
+ # create a mock connection, so we don't actually try and connect to things
+ def build_module_command(env_string, shebang, cmd, arg_path=None):
+ to_run = [env_string, cmd]
+ if arg_path:
+ to_run.append(arg_path)
+ return " ".join(to_run)
+
+ def get_option(option):
+ return {'admin_users': ['root', 'toor']}.get(option)
+
+ mock_connection = MagicMock()
+ mock_connection.build_module_command.side_effect = build_module_command
+ mock_connection.socket_path = None
+ mock_connection._shell.get_remote_filename.return_value = 'copy.py'
+ mock_connection._shell.join_path.side_effect = os.path.join
+ mock_connection._shell.tmpdir = '/var/tmp/mytempdir'
+ mock_connection._shell.get_option = get_option
+
+ # we're using a real play context here
+ play_context = PlayContext()
+
+ # our test class
+ action_base = DerivedActionBase(
+ task=mock_task,
+ connection=mock_connection,
+ play_context=play_context,
+ loader=None,
+ templar=None,
+ shared_loader_obj=None,
+ )
+
+ # fake a lot of methods as we test those elsewhere
+ action_base._configure_module = MagicMock()
+ action_base._supports_check_mode = MagicMock()
+ action_base._is_pipelining_enabled = MagicMock()
+ action_base._make_tmp_path = MagicMock()
+ action_base._transfer_data = MagicMock()
+ action_base._compute_environment_string = MagicMock()
+ action_base._low_level_execute_command = MagicMock()
+ action_base._fixup_perms2 = MagicMock()
+
+ action_base._configure_module.return_value = ('new', '#!/usr/bin/python', 'this is the module data', 'path')
+ action_base._is_pipelining_enabled.return_value = False
+ action_base._compute_environment_string.return_value = ''
+ action_base._connection.has_pipelining = False
+ action_base._make_tmp_path.return_value = '/the/tmp/path'
+ action_base._low_level_execute_command.return_value = dict(stdout='{"rc": 0, "stdout": "ok"}')
+ self.assertEqual(action_base._execute_module(module_name=None, module_args=None), dict(_ansible_parsed=True, rc=0, stdout="ok", stdout_lines=['ok']))
+ self.assertEqual(
+ action_base._execute_module(
+ module_name='foo',
+ module_args=dict(z=9, y=8, x=7),
+ task_vars=dict(a=1)
+ ),
+ dict(
+ _ansible_parsed=True,
+ rc=0,
+ stdout="ok",
+ stdout_lines=['ok'],
+ )
+ )
+
+ # test with needing/removing a remote tmp path
+ action_base._configure_module.return_value = ('old', '#!/usr/bin/python', 'this is the module data', 'path')
+ action_base._is_pipelining_enabled.return_value = False
+ action_base._make_tmp_path.return_value = '/the/tmp/path'
+ self.assertEqual(action_base._execute_module(), dict(_ansible_parsed=True, rc=0, stdout="ok", stdout_lines=['ok']))
+
+ action_base._configure_module.return_value = ('non_native_want_json', '#!/usr/bin/python', 'this is the module data', 'path')
+ self.assertEqual(action_base._execute_module(), dict(_ansible_parsed=True, rc=0, stdout="ok", stdout_lines=['ok']))
+
+ play_context.become = True
+ play_context.become_user = 'foo'
+ self.assertEqual(action_base._execute_module(), dict(_ansible_parsed=True, rc=0, stdout="ok", stdout_lines=['ok']))
+
+ # test an invalid shebang return
+ action_base._configure_module.return_value = ('new', '', 'this is the module data', 'path')
+ action_base._is_pipelining_enabled.return_value = False
+ action_base._make_tmp_path.return_value = '/the/tmp/path'
+ self.assertRaises(AnsibleError, action_base._execute_module)
+
+ # test with check mode enabled, once with support for check
+ # mode and once with support disabled to raise an error
+ play_context.check_mode = True
+ action_base._configure_module.return_value = ('new', '#!/usr/bin/python', 'this is the module data', 'path')
+ self.assertEqual(action_base._execute_module(), dict(_ansible_parsed=True, rc=0, stdout="ok", stdout_lines=['ok']))
+ action_base._supports_check_mode = False
+ self.assertRaises(AnsibleError, action_base._execute_module)
+
+ def test_action_base_sudo_only_if_user_differs(self):
+ fake_loader = MagicMock()
+ fake_loader.get_basedir.return_value = os.getcwd()
+ play_context = PlayContext()
+
+ action_base = DerivedActionBase(None, None, play_context, fake_loader, None, None)
+ action_base.get_become_option = MagicMock(return_value='root')
+ action_base._get_remote_user = MagicMock(return_value='root')
+
+ action_base._connection = MagicMock(exec_command=MagicMock(return_value=(0, '', '')))
+
+ action_base._connection._shell = shell = MagicMock(append_command=MagicMock(return_value=('JOINED CMD')))
+
+ action_base._connection.become = become = MagicMock()
+ become.build_become_command.return_value = 'foo'
+
+ action_base._low_level_execute_command('ECHO', sudoable=True)
+ become.build_become_command.assert_not_called()
+
+ action_base._get_remote_user.return_value = 'apo'
+ action_base._low_level_execute_command('ECHO', sudoable=True, executable='/bin/csh')
+ become.build_become_command.assert_called_once_with("ECHO", shell)
+
+ become.build_become_command.reset_mock()
+
+ with patch.object(C, 'BECOME_ALLOW_SAME_USER', new=True):
+ action_base._get_remote_user.return_value = 'root'
+ action_base._low_level_execute_command('ECHO SAME', sudoable=True)
+ become.build_become_command.assert_called_once_with("ECHO SAME", shell)
+
+ def test__remote_expand_user_relative_pathing(self):
+ action_base = _action_base()
+ action_base._play_context.remote_addr = 'bar'
+ action_base._low_level_execute_command = MagicMock(return_value={'stdout': b'../home/user'})
+ action_base._connection._shell.join_path.return_value = '../home/user/foo'
+ with self.assertRaises(AnsibleError) as cm:
+ action_base._remote_expand_user('~/foo')
+ self.assertEqual(
+ cm.exception.message,
+ "'bar' returned an invalid relative home directory path containing '..'"
+ )
+
+
+class TestActionBaseCleanReturnedData(unittest.TestCase):
+ def test(self):
+
+ fake_loader = DictDataLoader({
+ })
+ mock_module_loader = MagicMock()
+ mock_shared_loader_obj = MagicMock()
+ mock_shared_loader_obj.module_loader = mock_module_loader
+ connection_loader_paths = ['/tmp/asdfadf', '/usr/lib64/whatever',
+ 'dfadfasf',
+ 'foo.py',
+ '.*',
+ # FIXME: a path with parans breaks the regex
+ # '(.*)',
+ '/path/to/ansible/lib/ansible/plugins/connection/custom_connection.py',
+ '/path/to/ansible/lib/ansible/plugins/connection/ssh.py']
+
+ def fake_all(path_only=None):
+ for path in connection_loader_paths:
+ yield path
+
+ mock_connection_loader = MagicMock()
+ mock_connection_loader.all = fake_all
+
+ mock_shared_loader_obj.connection_loader = mock_connection_loader
+ mock_connection = MagicMock()
+ # mock_connection._shell.env_prefix.side_effect = env_prefix
+
+ # action_base = DerivedActionBase(mock_task, mock_connection, play_context, None, None, None)
+ action_base = DerivedActionBase(task=None,
+ connection=mock_connection,
+ play_context=None,
+ loader=fake_loader,
+ templar=None,
+ shared_loader_obj=mock_shared_loader_obj)
+ data = {'ansible_playbook_python': '/usr/bin/python',
+ # 'ansible_rsync_path': '/usr/bin/rsync',
+ 'ansible_python_interpreter': '/usr/bin/python',
+ 'ansible_ssh_some_var': 'whatever',
+ 'ansible_ssh_host_key_somehost': 'some key here',
+ 'some_other_var': 'foo bar'}
+ data = clean_facts(data)
+ self.assertNotIn('ansible_playbook_python', data)
+ self.assertNotIn('ansible_python_interpreter', data)
+ self.assertIn('ansible_ssh_host_key_somehost', data)
+ self.assertIn('some_other_var', data)
+
+
+class TestActionBaseParseReturnedData(unittest.TestCase):
+
+ def test_fail_no_json(self):
+ action_base = _action_base()
+ rc = 0
+ stdout = 'foo\nbar\n'
+ err = 'oopsy'
+ returned_data = {'rc': rc,
+ 'stdout': stdout,
+ 'stdout_lines': stdout.splitlines(),
+ 'stderr': err}
+ res = action_base._parse_returned_data(returned_data)
+ self.assertFalse(res['_ansible_parsed'])
+ self.assertTrue(res['failed'])
+ self.assertEqual(res['module_stderr'], err)
+
+ def test_json_empty(self):
+ action_base = _action_base()
+ rc = 0
+ stdout = '{}\n'
+ err = ''
+ returned_data = {'rc': rc,
+ 'stdout': stdout,
+ 'stdout_lines': stdout.splitlines(),
+ 'stderr': err}
+ res = action_base._parse_returned_data(returned_data)
+ del res['_ansible_parsed'] # we always have _ansible_parsed
+ self.assertEqual(len(res), 0)
+ self.assertFalse(res)
+
+ def test_json_facts(self):
+ action_base = _action_base()
+ rc = 0
+ stdout = '{"ansible_facts": {"foo": "bar", "ansible_blip": "blip_value"}}\n'
+ err = ''
+
+ returned_data = {'rc': rc,
+ 'stdout': stdout,
+ 'stdout_lines': stdout.splitlines(),
+ 'stderr': err}
+ res = action_base._parse_returned_data(returned_data)
+ self.assertTrue(res['ansible_facts'])
+ self.assertIn('ansible_blip', res['ansible_facts'])
+ # TODO: Should this be an AnsibleUnsafe?
+ # self.assertIsInstance(res['ansible_facts'], AnsibleUnsafe)
+
+ def test_json_facts_add_host(self):
+ action_base = _action_base()
+ rc = 0
+ stdout = '''{"ansible_facts": {"foo": "bar", "ansible_blip": "blip_value"},
+ "add_host": {"host_vars": {"some_key": ["whatever the add_host object is"]}
+ }
+ }\n'''
+ err = ''
+
+ returned_data = {'rc': rc,
+ 'stdout': stdout,
+ 'stdout_lines': stdout.splitlines(),
+ 'stderr': err}
+ res = action_base._parse_returned_data(returned_data)
+ self.assertTrue(res['ansible_facts'])
+ self.assertIn('ansible_blip', res['ansible_facts'])
+ self.assertIn('add_host', res)
+ # TODO: Should this be an AnsibleUnsafe?
+ # self.assertIsInstance(res['ansible_facts'], AnsibleUnsafe)
diff --git a/test/units/plugins/action/test_gather_facts.py b/test/units/plugins/action/test_gather_facts.py
new file mode 100644
index 00000000..e15edd39
--- /dev/null
+++ b/test/units/plugins/action/test_gather_facts.py
@@ -0,0 +1,87 @@
+# (c) 2016, Saran Ahluwalia <ahlusar.ahluwalia@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest
+from units.compat.mock import MagicMock, patch
+
+from ansible import constants as C
+from ansible.plugins.action.gather_facts import ActionModule
+from ansible.playbook.task import Task
+from ansible.template import Templar
+import ansible.executor.module_common as module_common
+
+from units.mock.loader import DictDataLoader
+
+
+class TestNetworkFacts(unittest.TestCase):
+ task = MagicMock(Task)
+ play_context = MagicMock()
+ play_context.check_mode = False
+ connection = MagicMock()
+ fake_loader = DictDataLoader({
+ })
+ templar = Templar(loader=fake_loader)
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def test_network_gather_facts(self):
+ self.task_vars = {'ansible_network_os': 'ios'}
+ self.task.action = 'gather_facts'
+ self.task.async_val = False
+ self.task._ansible_internal_redirect_list = []
+ self.task.args = {'gather_subset': 'min'}
+ self.task.module_defaults = [{'ios_facts': {'gather_subset': 'min'}}]
+
+ plugin = ActionModule(self.task, self.connection, self.play_context, loader=None, templar=self.templar, shared_loader_obj=None)
+ plugin._execute_module = MagicMock()
+
+ res = plugin.run(task_vars=self.task_vars)
+ self.assertEqual(res['ansible_facts']['_ansible_facts_gathered'], True)
+
+ mod_args = plugin._get_module_args('ios_facts', task_vars=self.task_vars)
+ self.assertEqual(mod_args['gather_subset'], 'min')
+
+ facts_modules = C.config.get_config_value('FACTS_MODULES', variables=self.task_vars)
+ self.assertEqual(facts_modules, ['ansible.legacy.ios_facts'])
+
+ @patch.object(module_common, '_get_collection_metadata', return_value={})
+ def test_network_gather_facts_fqcn(self, mock_collection_metadata):
+ self.fqcn_task_vars = {'ansible_network_os': 'cisco.ios.ios'}
+ self.task.action = 'gather_facts'
+ self.task._ansible_internal_redirect_list = ['cisco.ios.ios_facts']
+ self.task.async_val = False
+ self.task.args = {'gather_subset': 'min'}
+ self.task.module_defaults = [{'cisco.ios.ios_facts': {'gather_subset': 'min'}}]
+
+ plugin = ActionModule(self.task, self.connection, self.play_context, loader=None, templar=self.templar, shared_loader_obj=None)
+ plugin._execute_module = MagicMock()
+
+ res = plugin.run(task_vars=self.fqcn_task_vars)
+ self.assertEqual(res['ansible_facts']['_ansible_facts_gathered'], True)
+
+ mod_args = plugin._get_module_args('cisco.ios.ios_facts', task_vars=self.fqcn_task_vars)
+ self.assertEqual(mod_args['gather_subset'], 'min')
+
+ facts_modules = C.config.get_config_value('FACTS_MODULES', variables=self.fqcn_task_vars)
+ self.assertEqual(facts_modules, ['cisco.ios.ios_facts'])
diff --git a/test/units/plugins/action/test_raw.py b/test/units/plugins/action/test_raw.py
new file mode 100644
index 00000000..a8bde6c1
--- /dev/null
+++ b/test/units/plugins/action/test_raw.py
@@ -0,0 +1,105 @@
+# (c) 2016, Saran Ahluwalia <ahlusar.ahluwalia@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.errors import AnsibleActionFail
+from units.compat import unittest
+from units.compat.mock import MagicMock, Mock
+from ansible.plugins.action.raw import ActionModule
+from ansible.playbook.task import Task
+from ansible.plugins.loader import connection_loader
+
+
+class TestCopyResultExclude(unittest.TestCase):
+
+ def setUp(self):
+ self.play_context = Mock()
+ self.play_context.shell = 'sh'
+ self.connection = connection_loader.get('local', self.play_context, os.devnull)
+
+ def tearDown(self):
+ pass
+
+ # The current behavior of the raw aciton in regards to executable is currently in question;
+ # the test_raw_executable_is_not_empty_string verifies the current behavior (whether it is desireed or not.
+ # Please refer to the following for context:
+ # Issue: https://github.com/ansible/ansible/issues/16054
+ # PR: https://github.com/ansible/ansible/pull/16085
+
+ def test_raw_executable_is_not_empty_string(self):
+
+ task = MagicMock(Task)
+ task.async_val = False
+
+ task.args = {'_raw_params': 'Args1'}
+ self.play_context.check_mode = False
+
+ self.mock_am = ActionModule(task, self.connection, self.play_context, loader=None, templar=None, shared_loader_obj=None)
+ self.mock_am._low_level_execute_command = Mock(return_value={})
+ self.mock_am.display = Mock()
+ self.mock_am._admin_users = ['root', 'toor']
+
+ self.mock_am.run()
+ self.mock_am._low_level_execute_command.assert_called_with('Args1', executable=False)
+
+ def test_raw_check_mode_is_True(self):
+
+ task = MagicMock(Task)
+ task.async_val = False
+
+ task.args = {'_raw_params': 'Args1'}
+ self.play_context.check_mode = True
+
+ try:
+ self.mock_am = ActionModule(task, self.connection, self.play_context, loader=None, templar=None, shared_loader_obj=None)
+ except AnsibleActionFail:
+ pass
+
+ def test_raw_test_environment_is_None(self):
+
+ task = MagicMock(Task)
+ task.async_val = False
+
+ task.args = {'_raw_params': 'Args1'}
+ task.environment = None
+ self.play_context.check_mode = False
+
+ self.mock_am = ActionModule(task, self.connection, self.play_context, loader=None, templar=None, shared_loader_obj=None)
+ self.mock_am._low_level_execute_command = Mock(return_value={})
+ self.mock_am.display = Mock()
+
+ self.assertEqual(task.environment, None)
+
+ def test_raw_task_vars_is_not_None(self):
+
+ task = MagicMock(Task)
+ task.async_val = False
+
+ task.args = {'_raw_params': 'Args1'}
+ task.environment = None
+ self.play_context.check_mode = False
+
+ self.mock_am = ActionModule(task, self.connection, self.play_context, loader=None, templar=None, shared_loader_obj=None)
+ self.mock_am._low_level_execute_command = Mock(return_value={})
+ self.mock_am.display = Mock()
+
+ self.mock_am.run(task_vars={'a': 'b'})
+ self.assertEqual(task.environment, None)
diff --git a/test/units/plugins/become/__init__.py b/test/units/plugins/become/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/plugins/become/__init__.py
diff --git a/test/units/plugins/become/conftest.py b/test/units/plugins/become/conftest.py
new file mode 100644
index 00000000..a04a5e2d
--- /dev/null
+++ b/test/units/plugins/become/conftest.py
@@ -0,0 +1,37 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.utils import context_objects as co
+
+
+@pytest.fixture
+def parser():
+ parser = opt_help.create_base_parser('testparser')
+
+ opt_help.add_runas_options(parser)
+ opt_help.add_meta_options(parser)
+ opt_help.add_runtask_options(parser)
+ opt_help.add_vault_options(parser)
+ opt_help.add_async_options(parser)
+ opt_help.add_connect_options(parser)
+ opt_help.add_subset_options(parser)
+ opt_help.add_check_options(parser)
+ opt_help.add_inventory_options(parser)
+
+ return parser
+
+
+@pytest.fixture
+def reset_cli_args():
+ co.GlobalCLIArgs._Singleton__instance = None
+ yield
+ co.GlobalCLIArgs._Singleton__instance = None
diff --git a/test/units/plugins/become/test_su.py b/test/units/plugins/become/test_su.py
new file mode 100644
index 00000000..73eb71dd
--- /dev/null
+++ b/test/units/plugins/become/test_su.py
@@ -0,0 +1,40 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2020 Ansible Project
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from ansible import context
+from ansible.playbook.play_context import PlayContext
+from ansible.plugins.loader import become_loader
+
+
+def test_su(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+ play_context = PlayContext()
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ su_exe = 'su'
+ su_flags = ''
+
+ cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe)
+ assert cmd == default_cmd
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ play_context.become = True
+ play_context.become_user = 'foo'
+ play_context.become_pass = None
+ play_context.become_method = 'su'
+ play_context.set_become_plugin(become_loader.get('su'))
+ play_context.become_flags = su_flags
+ cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe)
+ assert (re.match("""%s %s -c '%s -c '"'"'echo %s; %s'"'"''""" % (su_exe, play_context.become_user, default_exe,
+ success, default_cmd), cmd) is not None)
diff --git a/test/units/plugins/become/test_sudo.py b/test/units/plugins/become/test_sudo.py
new file mode 100644
index 00000000..ba501296
--- /dev/null
+++ b/test/units/plugins/become/test_sudo.py
@@ -0,0 +1,45 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2020 Ansible Project
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from ansible import context
+from ansible.playbook.play_context import PlayContext
+from ansible.plugins.loader import become_loader
+
+
+def test_sudo(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+ play_context = PlayContext()
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ sudo_exe = 'sudo'
+ sudo_flags = '-H -s -n'
+
+ cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe)
+ assert cmd == default_cmd
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ play_context.become = True
+ play_context.become_user = 'foo'
+ play_context.set_become_plugin(become_loader.get('sudo'))
+ play_context.become_flags = sudo_flags
+ cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe)
+
+ assert (re.match("""%s %s -u %s %s -c 'echo %s; %s'""" % (sudo_exe, sudo_flags, play_context.become_user,
+ default_exe, success, default_cmd), cmd) is not None)
+
+ play_context.become_pass = 'testpass'
+ cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe)
+ assert (re.match("""%s %s -p "%s" -u %s %s -c 'echo %s; %s'""" % (sudo_exe, sudo_flags.replace('-n', ''),
+ r"\[sudo via ansible, key=.+?\] password:", play_context.become_user,
+ default_exe, success, default_cmd), cmd) is not None)
diff --git a/test/units/plugins/cache/__init__.py b/test/units/plugins/cache/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/plugins/cache/__init__.py
diff --git a/test/units/plugins/cache/test_cache.py b/test/units/plugins/cache/test_cache.py
new file mode 100644
index 00000000..1f16b806
--- /dev/null
+++ b/test/units/plugins/cache/test_cache.py
@@ -0,0 +1,167 @@
+# (c) 2012-2015, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest, mock
+from ansible.errors import AnsibleError
+from ansible.plugins.cache import FactCache, CachePluginAdjudicator
+from ansible.plugins.cache.base import BaseCacheModule
+from ansible.plugins.cache.memory import CacheModule as MemoryCache
+from ansible.plugins.loader import cache_loader
+
+import pytest
+
+
+class TestCachePluginAdjudicator:
+ # memory plugin cache
+ cache = CachePluginAdjudicator()
+ cache['cache_key'] = {'key1': 'value1', 'key2': 'value2'}
+ cache['cache_key_2'] = {'key': 'value'}
+
+ def test___setitem__(self):
+ self.cache['new_cache_key'] = {'new_key1': ['new_value1', 'new_value2']}
+ assert self.cache['new_cache_key'] == {'new_key1': ['new_value1', 'new_value2']}
+
+ def test_inner___setitem__(self):
+ self.cache['new_cache_key'] = {'new_key1': ['new_value1', 'new_value2']}
+ self.cache['new_cache_key']['new_key1'][0] = 'updated_value1'
+ assert self.cache['new_cache_key'] == {'new_key1': ['updated_value1', 'new_value2']}
+
+ def test___contains__(self):
+ assert 'cache_key' in self.cache
+ assert 'not_cache_key' not in self.cache
+
+ def test_get(self):
+ assert self.cache.get('cache_key') == {'key1': 'value1', 'key2': 'value2'}
+
+ def test_get_with_default(self):
+ assert self.cache.get('foo', 'bar') == 'bar'
+
+ def test_get_without_default(self):
+ assert self.cache.get('foo') is None
+
+ def test___getitem__(self):
+ with pytest.raises(KeyError) as err:
+ self.cache['foo']
+
+ def test_pop_with_default(self):
+ assert self.cache.pop('foo', 'bar') == 'bar'
+
+ def test_pop_without_default(self):
+ with pytest.raises(KeyError) as err:
+ assert self.cache.pop('foo')
+
+ def test_pop(self):
+ v = self.cache.pop('cache_key_2')
+ assert v == {'key': 'value'}
+ assert 'cache_key_2' not in self.cache
+
+ def test_update(self):
+ self.cache.update({'cache_key': {'key2': 'updatedvalue'}})
+ assert self.cache['cache_key']['key2'] == 'updatedvalue'
+
+
+class TestFactCache(unittest.TestCase):
+
+ def setUp(self):
+ with mock.patch('ansible.constants.CACHE_PLUGIN', 'memory'):
+ self.cache = FactCache()
+
+ def test_copy(self):
+ self.cache['avocado'] = 'fruit'
+ self.cache['daisy'] = 'flower'
+ a_copy = self.cache.copy()
+ self.assertEqual(type(a_copy), dict)
+ self.assertEqual(a_copy, dict(avocado='fruit', daisy='flower'))
+
+ def test_plugin_load_failure(self):
+ # See https://github.com/ansible/ansible/issues/18751
+ # Note no fact_connection config set, so this will fail
+ with mock.patch('ansible.constants.CACHE_PLUGIN', 'json'):
+ self.assertRaisesRegexp(AnsibleError,
+ "Unable to load the facts cache plugin.*json.*",
+ FactCache)
+
+ def test_update(self):
+ self.cache.update({'cache_key': {'key2': 'updatedvalue'}})
+ assert self.cache['cache_key']['key2'] == 'updatedvalue'
+
+ def test_update_legacy(self):
+ self.cache.update('cache_key', {'key2': 'updatedvalue'})
+ assert self.cache['cache_key']['key2'] == 'updatedvalue'
+
+ def test_update_legacy_key_exists(self):
+ self.cache['cache_key'] = {'key': 'value', 'key2': 'value2'}
+ self.cache.update('cache_key', {'key': 'updatedvalue'})
+ assert self.cache['cache_key']['key'] == 'updatedvalue'
+ assert self.cache['cache_key']['key2'] == 'value2'
+
+
+class TestAbstractClass(unittest.TestCase):
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def test_subclass_error(self):
+ class CacheModule1(BaseCacheModule):
+ pass
+ with self.assertRaises(TypeError):
+ CacheModule1() # pylint: disable=abstract-class-instantiated
+
+ class CacheModule2(BaseCacheModule):
+ def get(self, key):
+ super(CacheModule2, self).get(key)
+
+ with self.assertRaises(TypeError):
+ CacheModule2() # pylint: disable=abstract-class-instantiated
+
+ def test_subclass_success(self):
+ class CacheModule3(BaseCacheModule):
+ def get(self, key):
+ super(CacheModule3, self).get(key)
+
+ def set(self, key, value):
+ super(CacheModule3, self).set(key, value)
+
+ def keys(self):
+ super(CacheModule3, self).keys()
+
+ def contains(self, key):
+ super(CacheModule3, self).contains(key)
+
+ def delete(self, key):
+ super(CacheModule3, self).delete(key)
+
+ def flush(self):
+ super(CacheModule3, self).flush()
+
+ def copy(self):
+ super(CacheModule3, self).copy()
+
+ self.assertIsInstance(CacheModule3(), CacheModule3)
+
+ def test_memory_cachemodule(self):
+ self.assertIsInstance(MemoryCache(), MemoryCache)
+
+ def test_memory_cachemodule_with_loader(self):
+ self.assertIsInstance(cache_loader.get('memory'), MemoryCache)
diff --git a/test/units/plugins/callback/__init__.py b/test/units/plugins/callback/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/plugins/callback/__init__.py
diff --git a/test/units/plugins/callback/test_callback.py b/test/units/plugins/callback/test_callback.py
new file mode 100644
index 00000000..0c9a335c
--- /dev/null
+++ b/test/units/plugins/callback/test_callback.py
@@ -0,0 +1,412 @@
+# (c) 2012-2014, Chris Meyers <chris.meyers.fsu@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import re
+import textwrap
+import types
+
+from units.compat import unittest
+from units.compat.mock import MagicMock
+
+from ansible.plugins.callback import CallbackBase
+
+
+class TestCallback(unittest.TestCase):
+ # FIXME: This doesn't really test anything...
+ def test_init(self):
+ CallbackBase()
+
+ def test_display(self):
+ display_mock = MagicMock()
+ display_mock.verbosity = 0
+ cb = CallbackBase(display=display_mock)
+ self.assertIs(cb._display, display_mock)
+
+ def test_display_verbose(self):
+ display_mock = MagicMock()
+ display_mock.verbosity = 5
+ cb = CallbackBase(display=display_mock)
+ self.assertIs(cb._display, display_mock)
+
+ # TODO: import callback module so we can patch callback.cli/callback.C
+
+
+class TestCallbackResults(unittest.TestCase):
+
+ def test_get_item(self):
+ cb = CallbackBase()
+ results = {'item': 'some_item'}
+ res = cb._get_item(results)
+ self.assertEqual(res, 'some_item')
+
+ def test_get_item_no_log(self):
+ cb = CallbackBase()
+ results = {'item': 'some_item', '_ansible_no_log': True}
+ res = cb._get_item(results)
+ self.assertEqual(res, "(censored due to no_log)")
+
+ results = {'item': 'some_item', '_ansible_no_log': False}
+ res = cb._get_item(results)
+ self.assertEqual(res, "some_item")
+
+ def test_get_item_label(self):
+ cb = CallbackBase()
+ results = {'item': 'some_item'}
+ res = cb._get_item_label(results)
+ self.assertEqual(res, 'some_item')
+
+ def test_get_item_label_no_log(self):
+ cb = CallbackBase()
+ results = {'item': 'some_item', '_ansible_no_log': True}
+ res = cb._get_item_label(results)
+ self.assertEqual(res, "(censored due to no_log)")
+
+ results = {'item': 'some_item', '_ansible_no_log': False}
+ res = cb._get_item_label(results)
+ self.assertEqual(res, "some_item")
+
+ def test_clean_results_debug_task(self):
+ cb = CallbackBase()
+ result = {'item': 'some_item',
+ 'invocation': 'foo --bar whatever [some_json]',
+ 'a': 'a single a in result note letter a is in invocation',
+ 'b': 'a single b in result note letter b is not in invocation',
+ 'changed': True}
+
+ cb._clean_results(result, 'debug')
+
+ # See https://github.com/ansible/ansible/issues/33723
+ self.assertTrue('a' in result)
+ self.assertTrue('b' in result)
+ self.assertFalse('invocation' in result)
+ self.assertFalse('changed' in result)
+
+ def test_clean_results_debug_task_no_invocation(self):
+ cb = CallbackBase()
+ result = {'item': 'some_item',
+ 'a': 'a single a in result note letter a is in invocation',
+ 'b': 'a single b in result note letter b is not in invocation',
+ 'changed': True}
+
+ cb._clean_results(result, 'debug')
+ self.assertTrue('a' in result)
+ self.assertTrue('b' in result)
+ self.assertFalse('changed' in result)
+ self.assertFalse('invocation' in result)
+
+ def test_clean_results_debug_task_empty_results(self):
+ cb = CallbackBase()
+ result = {}
+ cb._clean_results(result, 'debug')
+ self.assertFalse('invocation' in result)
+ self.assertEqual(len(result), 0)
+
+ def test_clean_results(self):
+ cb = CallbackBase()
+ result = {'item': 'some_item',
+ 'invocation': 'foo --bar whatever [some_json]',
+ 'a': 'a single a in result note letter a is in invocation',
+ 'b': 'a single b in result note letter b is not in invocation',
+ 'changed': True}
+
+ expected_result = result.copy()
+ cb._clean_results(result, 'ebug')
+ self.assertEqual(result, expected_result)
+
+
+class TestCallbackDumpResults(object):
+ def test_internal_keys(self):
+ cb = CallbackBase()
+ result = {'item': 'some_item',
+ '_ansible_some_var': 'SENTINEL',
+ 'testing_ansible_out': 'should_be_left_in LEFTIN',
+ 'invocation': 'foo --bar whatever [some_json]',
+ 'some_dict_key': {'a_sub_dict_for_key': 'baz'},
+ 'bad_dict_key': {'_ansible_internal_blah': 'SENTINEL'},
+ 'changed': True}
+ json_out = cb._dump_results(result)
+ assert '"_ansible_' not in json_out
+ assert 'SENTINEL' not in json_out
+ assert 'LEFTIN' in json_out
+
+ def test_exception(self):
+ cb = CallbackBase()
+ result = {'item': 'some_item LEFTIN',
+ 'exception': ['frame1', 'SENTINEL']}
+ json_out = cb._dump_results(result)
+ assert 'SENTINEL' not in json_out
+ assert 'exception' not in json_out
+ assert 'LEFTIN' in json_out
+
+ def test_verbose(self):
+ cb = CallbackBase()
+ result = {'item': 'some_item LEFTIN',
+ '_ansible_verbose_always': 'chicane'}
+ json_out = cb._dump_results(result)
+ assert 'SENTINEL' not in json_out
+ assert 'LEFTIN' in json_out
+
+ def test_diff(self):
+ cb = CallbackBase()
+ result = {'item': 'some_item LEFTIN',
+ 'diff': ['remove stuff', 'added LEFTIN'],
+ '_ansible_verbose_always': 'chicane'}
+ json_out = cb._dump_results(result)
+ assert 'SENTINEL' not in json_out
+ assert 'LEFTIN' in json_out
+
+ def test_mixed_keys(self):
+ cb = CallbackBase()
+ result = {3: 'pi',
+ 'tau': 6}
+ json_out = cb._dump_results(result)
+ round_trip_result = json.loads(json_out)
+ assert len(round_trip_result) == 2
+ assert '3' in round_trip_result
+ assert 'tau' in round_trip_result
+ assert round_trip_result['3'] == 'pi'
+ assert round_trip_result['tau'] == 6
+
+
+class TestCallbackDiff(unittest.TestCase):
+
+ def setUp(self):
+ self.cb = CallbackBase()
+
+ def _strip_color(self, s):
+ return re.sub('\033\\[[^m]*m', '', s)
+
+ def test_difflist(self):
+ # TODO: split into smaller tests?
+ difflist = [{'before': u'preface\nThe Before String\npostscript',
+ 'after': u'preface\nThe After String\npostscript',
+ 'before_header': u'just before',
+ 'after_header': u'just after'
+ },
+ {'before': u'preface\nThe Before String\npostscript',
+ 'after': u'preface\nThe After String\npostscript',
+ },
+ {'src_binary': 'chicane'},
+ {'dst_binary': 'chicanery'},
+ {'dst_larger': 1},
+ {'src_larger': 2},
+ {'prepared': u'what does prepared do?'},
+ {'before_header': u'just before'},
+ {'after_header': u'just after'}]
+
+ res = self.cb._get_diff(difflist)
+
+ self.assertIn(u'Before String', res)
+ self.assertIn(u'After String', res)
+ self.assertIn(u'just before', res)
+ self.assertIn(u'just after', res)
+
+ def test_simple_diff(self):
+ self.assertMultiLineEqual(
+ self._strip_color(self.cb._get_diff({
+ 'before_header': 'somefile.txt',
+ 'after_header': 'generated from template somefile.j2',
+ 'before': 'one\ntwo\nthree\n',
+ 'after': 'one\nthree\nfour\n',
+ })),
+ textwrap.dedent('''\
+ --- before: somefile.txt
+ +++ after: generated from template somefile.j2
+ @@ -1,3 +1,3 @@
+ one
+ -two
+ three
+ +four
+
+ '''))
+
+ def test_new_file(self):
+ self.assertMultiLineEqual(
+ self._strip_color(self.cb._get_diff({
+ 'before_header': 'somefile.txt',
+ 'after_header': 'generated from template somefile.j2',
+ 'before': '',
+ 'after': 'one\ntwo\nthree\n',
+ })),
+ textwrap.dedent('''\
+ --- before: somefile.txt
+ +++ after: generated from template somefile.j2
+ @@ -0,0 +1,3 @@
+ +one
+ +two
+ +three
+
+ '''))
+
+ def test_clear_file(self):
+ self.assertMultiLineEqual(
+ self._strip_color(self.cb._get_diff({
+ 'before_header': 'somefile.txt',
+ 'after_header': 'generated from template somefile.j2',
+ 'before': 'one\ntwo\nthree\n',
+ 'after': '',
+ })),
+ textwrap.dedent('''\
+ --- before: somefile.txt
+ +++ after: generated from template somefile.j2
+ @@ -1,3 +0,0 @@
+ -one
+ -two
+ -three
+
+ '''))
+
+ def test_no_trailing_newline_before(self):
+ self.assertMultiLineEqual(
+ self._strip_color(self.cb._get_diff({
+ 'before_header': 'somefile.txt',
+ 'after_header': 'generated from template somefile.j2',
+ 'before': 'one\ntwo\nthree',
+ 'after': 'one\ntwo\nthree\n',
+ })),
+ textwrap.dedent('''\
+ --- before: somefile.txt
+ +++ after: generated from template somefile.j2
+ @@ -1,3 +1,3 @@
+ one
+ two
+ -three
+ \\ No newline at end of file
+ +three
+
+ '''))
+
+ def test_no_trailing_newline_after(self):
+ self.assertMultiLineEqual(
+ self._strip_color(self.cb._get_diff({
+ 'before_header': 'somefile.txt',
+ 'after_header': 'generated from template somefile.j2',
+ 'before': 'one\ntwo\nthree\n',
+ 'after': 'one\ntwo\nthree',
+ })),
+ textwrap.dedent('''\
+ --- before: somefile.txt
+ +++ after: generated from template somefile.j2
+ @@ -1,3 +1,3 @@
+ one
+ two
+ -three
+ +three
+ \\ No newline at end of file
+
+ '''))
+
+ def test_no_trailing_newline_both(self):
+ self.assertMultiLineEqual(
+ self.cb._get_diff({
+ 'before_header': 'somefile.txt',
+ 'after_header': 'generated from template somefile.j2',
+ 'before': 'one\ntwo\nthree',
+ 'after': 'one\ntwo\nthree',
+ }),
+ '')
+
+ def test_no_trailing_newline_both_with_some_changes(self):
+ self.assertMultiLineEqual(
+ self._strip_color(self.cb._get_diff({
+ 'before_header': 'somefile.txt',
+ 'after_header': 'generated from template somefile.j2',
+ 'before': 'one\ntwo\nthree',
+ 'after': 'one\nfive\nthree',
+ })),
+ textwrap.dedent('''\
+ --- before: somefile.txt
+ +++ after: generated from template somefile.j2
+ @@ -1,3 +1,3 @@
+ one
+ -two
+ +five
+ three
+ \\ No newline at end of file
+
+ '''))
+
+ def test_diff_dicts(self):
+ self.assertMultiLineEqual(
+ self._strip_color(self.cb._get_diff({
+ 'before': dict(one=1, two=2, three=3),
+ 'after': dict(one=1, three=3, four=4),
+ })),
+ textwrap.dedent('''\
+ --- before
+ +++ after
+ @@ -1,5 +1,5 @@
+ {
+ + "four": 4,
+ "one": 1,
+ - "three": 3,
+ - "two": 2
+ + "three": 3
+ }
+
+ '''))
+
+ def test_diff_before_none(self):
+ self.assertMultiLineEqual(
+ self._strip_color(self.cb._get_diff({
+ 'before': None,
+ 'after': 'one line\n',
+ })),
+ textwrap.dedent('''\
+ --- before
+ +++ after
+ @@ -0,0 +1 @@
+ +one line
+
+ '''))
+
+ def test_diff_after_none(self):
+ self.assertMultiLineEqual(
+ self._strip_color(self.cb._get_diff({
+ 'before': 'one line\n',
+ 'after': None,
+ })),
+ textwrap.dedent('''\
+ --- before
+ +++ after
+ @@ -1 +0,0 @@
+ -one line
+
+ '''))
+
+
+class TestCallbackOnMethods(unittest.TestCase):
+ def _find_on_methods(self, callback):
+ cb_dir = dir(callback)
+ method_names = [x for x in cb_dir if '_on_' in x]
+ methods = [getattr(callback, mn) for mn in method_names]
+ return methods
+
+ def test_are_methods(self):
+ cb = CallbackBase()
+ for method in self._find_on_methods(cb):
+ self.assertIsInstance(method, types.MethodType)
+
+ def test_on_any(self):
+ cb = CallbackBase()
+ cb.v2_on_any('whatever', some_keyword='blippy')
+ cb.on_any('whatever', some_keyword='blippy')
diff --git a/test/units/plugins/connection/__init__.py b/test/units/plugins/connection/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/plugins/connection/__init__.py
diff --git a/test/units/plugins/connection/test_connection.py b/test/units/plugins/connection/test_connection.py
new file mode 100644
index 00000000..17c2e085
--- /dev/null
+++ b/test/units/plugins/connection/test_connection.py
@@ -0,0 +1,169 @@
+# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from io import StringIO
+import sys
+import pytest
+
+from units.compat import mock
+from units.compat import unittest
+from units.compat.mock import MagicMock
+from units.compat.mock import patch
+from ansible.errors import AnsibleError
+from ansible.playbook.play_context import PlayContext
+from ansible.plugins.connection import ConnectionBase
+from ansible.plugins.loader import become_loader
+
+
+class TestConnectionBaseClass(unittest.TestCase):
+
+ def setUp(self):
+ self.play_context = PlayContext()
+ self.play_context.prompt = (
+ '[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: '
+ )
+ self.in_stream = StringIO()
+
+ def tearDown(self):
+ pass
+
+ def test_subclass_error(self):
+ class ConnectionModule1(ConnectionBase):
+ pass
+ with self.assertRaises(TypeError):
+ ConnectionModule1() # pylint: disable=abstract-class-instantiated
+
+ class ConnectionModule2(ConnectionBase):
+ def get(self, key):
+ super(ConnectionModule2, self).get(key)
+
+ with self.assertRaises(TypeError):
+ ConnectionModule2() # pylint: disable=abstract-class-instantiated
+
+ def test_subclass_success(self):
+ class ConnectionModule3(ConnectionBase):
+
+ @property
+ def transport(self):
+ pass
+
+ def _connect(self):
+ pass
+
+ def exec_command(self):
+ pass
+
+ def put_file(self):
+ pass
+
+ def fetch_file(self):
+ pass
+
+ def close(self):
+ pass
+
+ self.assertIsInstance(ConnectionModule3(self.play_context, self.in_stream), ConnectionModule3)
+
+ def test_check_password_prompt(self):
+ local = (
+ b'[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: \n'
+ b'BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq\n'
+ )
+
+ ssh_pipelining_vvvv = b'''
+debug3: mux_master_read_cb: channel 1 packet type 0x10000002 len 251
+debug2: process_mux_new_session: channel 1: request tty 0, X 1, agent 1, subsys 0, term "xterm-256color", cmd "/bin/sh -c 'sudo -H -S -p "[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: " -u root /bin/sh -c '"'"'echo BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq; /bin/true'"'"' && sleep 0'", env 0
+debug3: process_mux_new_session: got fds stdin 9, stdout 10, stderr 11
+debug2: client_session2_setup: id 2
+debug1: Sending command: /bin/sh -c 'sudo -H -S -p "[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: " -u root /bin/sh -c '"'"'echo BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq; /bin/true'"'"' && sleep 0'
+debug2: channel 2: request exec confirm 1
+debug2: channel 2: rcvd ext data 67
+[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: debug2: channel 2: written 67 to efd 11
+BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq
+debug3: receive packet: type 98
+''' # noqa
+
+ ssh_nopipelining_vvvv = b'''
+debug3: mux_master_read_cb: channel 1 packet type 0x10000002 len 251
+debug2: process_mux_new_session: channel 1: request tty 1, X 1, agent 1, subsys 0, term "xterm-256color", cmd "/bin/sh -c 'sudo -H -S -p "[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: " -u root /bin/sh -c '"'"'echo BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq; /bin/true'"'"' && sleep 0'", env 0
+debug3: mux_client_request_session: session request sent
+debug3: send packet: type 98
+debug1: Sending command: /bin/sh -c 'sudo -H -S -p "[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: " -u root /bin/sh -c '"'"'echo BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq; /bin/true'"'"' && sleep 0'
+debug2: channel 2: request exec confirm 1
+debug2: exec request accepted on channel 2
+[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: debug3: receive packet: type 2
+debug3: Received SSH2_MSG_IGNORE
+debug3: Received SSH2_MSG_IGNORE
+
+BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq
+debug3: receive packet: type 98
+''' # noqa
+
+ ssh_novvvv = (
+ b'[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: \n'
+ b'BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq\n'
+ )
+
+ dns_issue = (
+ b'timeout waiting for privilege escalation password prompt:\n'
+ b'sudo: sudo: unable to resolve host tcloud014\n'
+ b'[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: \n'
+ b'BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq\n'
+ )
+
+ nothing = b''
+
+ in_front = b'''
+debug1: Sending command: /bin/sh -c 'sudo -H -S -p "[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: " -u root /bin/sh -c '"'"'echo
+'''
+
+ class ConnectionFoo(ConnectionBase):
+
+ @property
+ def transport(self):
+ pass
+
+ def _connect(self):
+ pass
+
+ def exec_command(self):
+ pass
+
+ def put_file(self):
+ pass
+
+ def fetch_file(self):
+ pass
+
+ def close(self):
+ pass
+
+ c = ConnectionFoo(self.play_context, self.in_stream)
+ c.set_become_plugin(become_loader.get('sudo'))
+ c.become.prompt = '[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: '
+
+ self.assertTrue(c.check_password_prompt(local))
+ self.assertTrue(c.check_password_prompt(ssh_pipelining_vvvv))
+ self.assertTrue(c.check_password_prompt(ssh_nopipelining_vvvv))
+ self.assertTrue(c.check_password_prompt(ssh_novvvv))
+ self.assertTrue(c.check_password_prompt(dns_issue))
+ self.assertFalse(c.check_password_prompt(nothing))
+ self.assertFalse(c.check_password_prompt(in_front))
diff --git a/test/units/plugins/connection/test_local.py b/test/units/plugins/connection/test_local.py
new file mode 100644
index 00000000..e5525855
--- /dev/null
+++ b/test/units/plugins/connection/test_local.py
@@ -0,0 +1,40 @@
+#
+# (c) 2020 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from io import StringIO
+import pytest
+
+from units.compat import unittest
+from ansible.plugins.connection import local
+from ansible.playbook.play_context import PlayContext
+
+
+class TestLocalConnectionClass(unittest.TestCase):
+
+ def test_local_connection_module(self):
+ play_context = PlayContext()
+ play_context.prompt = (
+ '[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: '
+ )
+ in_stream = StringIO()
+
+ self.assertIsInstance(local.Connection(play_context, in_stream), local.Connection)
diff --git a/test/units/plugins/connection/test_paramiko.py b/test/units/plugins/connection/test_paramiko.py
new file mode 100644
index 00000000..e3643b14
--- /dev/null
+++ b/test/units/plugins/connection/test_paramiko.py
@@ -0,0 +1,42 @@
+#
+# (c) 2020 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from io import StringIO
+import pytest
+
+from units.compat import unittest
+from ansible.plugins.connection import paramiko_ssh
+from ansible.playbook.play_context import PlayContext
+
+
+class TestParamikoConnectionClass(unittest.TestCase):
+
+ def test_paramiko_connection_module(self):
+ play_context = PlayContext()
+ play_context.prompt = (
+ '[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: '
+ )
+ in_stream = StringIO()
+
+ self.assertIsInstance(
+ paramiko_ssh.Connection(play_context, in_stream),
+ paramiko_ssh.Connection)
diff --git a/test/units/plugins/connection/test_psrp.py b/test/units/plugins/connection/test_psrp.py
new file mode 100644
index 00000000..f6416751
--- /dev/null
+++ b/test/units/plugins/connection/test_psrp.py
@@ -0,0 +1,233 @@
+# -*- coding: utf-8 -*-
+# (c) 2018, Jordan Borean <jborean@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+import sys
+
+from io import StringIO
+from units.compat.mock import MagicMock
+
+from ansible.playbook.play_context import PlayContext
+from ansible.plugins.loader import connection_loader
+from ansible.utils.display import Display
+
+
+@pytest.fixture(autouse=True)
+def psrp_connection():
+ """Imports the psrp connection plugin with a mocked pypsrp module for testing"""
+
+ # Take a snapshot of sys.modules before we manipulate it
+ orig_modules = sys.modules.copy()
+ try:
+ fake_pypsrp = MagicMock()
+ fake_pypsrp.FEATURES = [
+ 'wsman_locale',
+ 'wsman_read_timeout',
+ 'wsman_reconnections',
+ ]
+
+ fake_wsman = MagicMock()
+ fake_wsman.AUTH_KWARGS = {
+ "certificate": ["certificate_key_pem", "certificate_pem"],
+ "credssp": ["credssp_auth_mechanism", "credssp_disable_tlsv1_2",
+ "credssp_minimum_version"],
+ "negotiate": ["negotiate_delegate", "negotiate_hostname_override",
+ "negotiate_send_cbt", "negotiate_service"],
+ "mock": ["mock_test1", "mock_test2"],
+ }
+
+ sys.modules["pypsrp"] = fake_pypsrp
+ sys.modules["pypsrp.complex_objects"] = MagicMock()
+ sys.modules["pypsrp.exceptions"] = MagicMock()
+ sys.modules["pypsrp.host"] = MagicMock()
+ sys.modules["pypsrp.powershell"] = MagicMock()
+ sys.modules["pypsrp.shell"] = MagicMock()
+ sys.modules["pypsrp.wsman"] = fake_wsman
+ sys.modules["requests.exceptions"] = MagicMock()
+
+ from ansible.plugins.connection import psrp
+
+ # Take a copy of the original import state vars before we set to an ok import
+ orig_has_psrp = psrp.HAS_PYPSRP
+ orig_psrp_imp_err = psrp.PYPSRP_IMP_ERR
+
+ yield psrp
+
+ psrp.HAS_PYPSRP = orig_has_psrp
+ psrp.PYPSRP_IMP_ERR = orig_psrp_imp_err
+ finally:
+ # Restore sys.modules back to our pre-shenanigans
+ sys.modules = orig_modules
+
+
+class TestConnectionPSRP(object):
+
+ OPTIONS_DATA = (
+ # default options
+ (
+ {'_extras': {}},
+ {
+ '_psrp_auth': 'negotiate',
+ '_psrp_cert_validation': True,
+ '_psrp_configuration_name': 'Microsoft.PowerShell',
+ '_psrp_connection_timeout': 30,
+ '_psrp_message_encryption': 'auto',
+ '_psrp_host': 'inventory_hostname',
+ '_psrp_conn_kwargs': {
+ 'server': 'inventory_hostname',
+ 'port': 5986,
+ 'username': None,
+ 'password': None,
+ 'ssl': True,
+ 'path': 'wsman',
+ 'auth': 'negotiate',
+ 'cert_validation': True,
+ 'connection_timeout': 30,
+ 'encryption': 'auto',
+ 'proxy': None,
+ 'no_proxy': False,
+ 'max_envelope_size': 153600,
+ 'operation_timeout': 20,
+ 'certificate_key_pem': None,
+ 'certificate_pem': None,
+ 'credssp_auth_mechanism': 'auto',
+ 'credssp_disable_tlsv1_2': False,
+ 'credssp_minimum_version': 2,
+ 'negotiate_delegate': None,
+ 'negotiate_hostname_override': None,
+ 'negotiate_send_cbt': True,
+ 'negotiate_service': 'WSMAN',
+ 'read_timeout': 30,
+ 'reconnection_backoff': 2.0,
+ 'reconnection_retries': 0,
+ },
+ '_psrp_max_envelope_size': 153600,
+ '_psrp_ignore_proxy': False,
+ '_psrp_operation_timeout': 20,
+ '_psrp_pass': None,
+ '_psrp_path': 'wsman',
+ '_psrp_port': 5986,
+ '_psrp_proxy': None,
+ '_psrp_protocol': 'https',
+ '_psrp_user': None
+ },
+ ),
+ # ssl=False when port defined to 5985
+ (
+ {'_extras': {}, 'ansible_port': '5985'},
+ {
+ '_psrp_port': 5985,
+ '_psrp_protocol': 'http'
+ },
+ ),
+ # ssl=True when port defined to not 5985
+ (
+ {'_extras': {}, 'ansible_port': 1234},
+ {
+ '_psrp_port': 1234,
+ '_psrp_protocol': 'https'
+ },
+ ),
+ # port 5986 when ssl=True
+ (
+ {'_extras': {}, 'ansible_psrp_protocol': 'https'},
+ {
+ '_psrp_port': 5986,
+ '_psrp_protocol': 'https'
+ },
+ ),
+ # port 5985 when ssl=False
+ (
+ {'_extras': {}, 'ansible_psrp_protocol': 'http'},
+ {
+ '_psrp_port': 5985,
+ '_psrp_protocol': 'http'
+ },
+ ),
+ # psrp extras
+ (
+ {'_extras': {'ansible_psrp_mock_test1': True}},
+ {
+ '_psrp_conn_kwargs': {
+ 'server': 'inventory_hostname',
+ 'port': 5986,
+ 'username': None,
+ 'password': None,
+ 'ssl': True,
+ 'path': 'wsman',
+ 'auth': 'negotiate',
+ 'cert_validation': True,
+ 'connection_timeout': 30,
+ 'encryption': 'auto',
+ 'proxy': None,
+ 'no_proxy': False,
+ 'max_envelope_size': 153600,
+ 'operation_timeout': 20,
+ 'certificate_key_pem': None,
+ 'certificate_pem': None,
+ 'credssp_auth_mechanism': 'auto',
+ 'credssp_disable_tlsv1_2': False,
+ 'credssp_minimum_version': 2,
+ 'negotiate_delegate': None,
+ 'negotiate_hostname_override': None,
+ 'negotiate_send_cbt': True,
+ 'negotiate_service': 'WSMAN',
+ 'read_timeout': 30,
+ 'reconnection_backoff': 2.0,
+ 'reconnection_retries': 0,
+ 'mock_test1': True
+ },
+ },
+ ),
+ # cert validation through string repr of bool
+ (
+ {'_extras': {}, 'ansible_psrp_cert_validation': 'ignore'},
+ {
+ '_psrp_cert_validation': False
+ },
+ ),
+ # cert validation path
+ (
+ {'_extras': {}, 'ansible_psrp_cert_trust_path': '/path/cert.pem'},
+ {
+ '_psrp_cert_validation': '/path/cert.pem'
+ },
+ ),
+ )
+
+ # pylint bug: https://github.com/PyCQA/pylint/issues/511
+ # pylint: disable=undefined-variable
+ @pytest.mark.parametrize('options, expected',
+ ((o, e) for o, e in OPTIONS_DATA))
+ def test_set_options(self, options, expected):
+ pc = PlayContext()
+ new_stdin = StringIO()
+
+ conn = connection_loader.get('psrp', pc, new_stdin)
+ conn.set_options(var_options=options)
+ conn._build_kwargs()
+
+ for attr, expected in expected.items():
+ actual = getattr(conn, attr)
+ assert actual == expected, \
+ "psrp attr '%s', actual '%s' != expected '%s'"\
+ % (attr, actual, expected)
+
+ def test_set_invalid_extras_options(self, monkeypatch):
+ pc = PlayContext()
+ new_stdin = StringIO()
+
+ conn = connection_loader.get('psrp', pc, new_stdin)
+ conn.set_options(var_options={'_extras': {'ansible_psrp_mock_test3': True}})
+
+ mock_display = MagicMock()
+ monkeypatch.setattr(Display, "warning", mock_display)
+ conn._build_kwargs()
+
+ assert mock_display.call_args[0][0] == \
+ 'ansible_psrp_mock_test3 is unsupported by the current psrp version installed'
diff --git a/test/units/plugins/connection/test_ssh.py b/test/units/plugins/connection/test_ssh.py
new file mode 100644
index 00000000..cfe7fcb6
--- /dev/null
+++ b/test/units/plugins/connection/test_ssh.py
@@ -0,0 +1,688 @@
+# -*- coding: utf-8 -*-
+# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from io import StringIO
+import pytest
+
+
+from ansible import constants as C
+from ansible.errors import AnsibleAuthenticationFailure
+from units.compat import unittest
+from units.compat.mock import patch, MagicMock, PropertyMock
+from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
+from ansible.module_utils.compat.selectors import SelectorKey, EVENT_READ
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils._text import to_bytes
+from ansible.playbook.play_context import PlayContext
+from ansible.plugins.connection import ssh
+from ansible.plugins.loader import connection_loader, become_loader
+
+
+class TestConnectionBaseClass(unittest.TestCase):
+
+ def test_plugins_connection_ssh_module(self):
+ play_context = PlayContext()
+ play_context.prompt = (
+ '[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: '
+ )
+ in_stream = StringIO()
+
+ self.assertIsInstance(ssh.Connection(play_context, in_stream), ssh.Connection)
+
+ def test_plugins_connection_ssh_basic(self):
+ pc = PlayContext()
+ new_stdin = StringIO()
+ conn = ssh.Connection(pc, new_stdin)
+
+ # connect just returns self, so assert that
+ res = conn._connect()
+ self.assertEqual(conn, res)
+
+ ssh.SSHPASS_AVAILABLE = False
+ self.assertFalse(conn._sshpass_available())
+
+ ssh.SSHPASS_AVAILABLE = True
+ self.assertTrue(conn._sshpass_available())
+
+ with patch('subprocess.Popen') as p:
+ ssh.SSHPASS_AVAILABLE = None
+ p.return_value = MagicMock()
+ self.assertTrue(conn._sshpass_available())
+
+ ssh.SSHPASS_AVAILABLE = None
+ p.return_value = None
+ p.side_effect = OSError()
+ self.assertFalse(conn._sshpass_available())
+
+ conn.close()
+ self.assertFalse(conn._connected)
+
+ def test_plugins_connection_ssh__build_command(self):
+ pc = PlayContext()
+ new_stdin = StringIO()
+ conn = connection_loader.get('ssh', pc, new_stdin)
+ conn._build_command('ssh')
+
+ def test_plugins_connection_ssh_exec_command(self):
+ pc = PlayContext()
+ new_stdin = StringIO()
+ conn = connection_loader.get('ssh', pc, new_stdin)
+
+ conn._build_command = MagicMock()
+ conn._build_command.return_value = 'ssh something something'
+ conn._run = MagicMock()
+ conn._run.return_value = (0, 'stdout', 'stderr')
+ conn.get_option = MagicMock()
+ conn.get_option.return_value = True
+
+ res, stdout, stderr = conn.exec_command('ssh')
+ res, stdout, stderr = conn.exec_command('ssh', 'this is some data')
+
+ def test_plugins_connection_ssh__examine_output(self):
+ pc = PlayContext()
+ new_stdin = StringIO()
+
+ conn = connection_loader.get('ssh', pc, new_stdin)
+ conn.set_become_plugin(become_loader.get('sudo'))
+
+ conn.check_password_prompt = MagicMock()
+ conn.check_become_success = MagicMock()
+ conn.check_incorrect_password = MagicMock()
+ conn.check_missing_password = MagicMock()
+
+ def _check_password_prompt(line):
+ if b'foo' in line:
+ return True
+ return False
+
+ def _check_become_success(line):
+ if b'BECOME-SUCCESS-abcdefghijklmnopqrstuvxyz' in line:
+ return True
+ return False
+
+ def _check_incorrect_password(line):
+ if b'incorrect password' in line:
+ return True
+ return False
+
+ def _check_missing_password(line):
+ if b'bad password' in line:
+ return True
+ return False
+
+ conn.become.check_password_prompt = MagicMock(side_effect=_check_password_prompt)
+ conn.become.check_become_success = MagicMock(side_effect=_check_become_success)
+ conn.become.check_incorrect_password = MagicMock(side_effect=_check_incorrect_password)
+ conn.become.check_missing_password = MagicMock(side_effect=_check_missing_password)
+
+ # test examining output for prompt
+ conn._flags = dict(
+ become_prompt=False,
+ become_success=False,
+ become_error=False,
+ become_nopasswd_error=False,
+ )
+
+ pc.prompt = True
+ conn.become.prompt = True
+
+ def get_option(option):
+ if option == 'become_pass':
+ return 'password'
+ return None
+
+ conn.become.get_option = get_option
+ output, unprocessed = conn._examine_output(u'source', u'state', b'line 1\nline 2\nfoo\nline 3\nthis should be the remainder', False)
+ self.assertEqual(output, b'line 1\nline 2\nline 3\n')
+ self.assertEqual(unprocessed, b'this should be the remainder')
+ self.assertTrue(conn._flags['become_prompt'])
+ self.assertFalse(conn._flags['become_success'])
+ self.assertFalse(conn._flags['become_error'])
+ self.assertFalse(conn._flags['become_nopasswd_error'])
+
+ # test examining output for become prompt
+ conn._flags = dict(
+ become_prompt=False,
+ become_success=False,
+ become_error=False,
+ become_nopasswd_error=False,
+ )
+
+ pc.prompt = False
+ conn.become.prompt = False
+ pc.success_key = u'BECOME-SUCCESS-abcdefghijklmnopqrstuvxyz'
+ conn.become.success = u'BECOME-SUCCESS-abcdefghijklmnopqrstuvxyz'
+ output, unprocessed = conn._examine_output(u'source', u'state', b'line 1\nline 2\nBECOME-SUCCESS-abcdefghijklmnopqrstuvxyz\nline 3\n', False)
+ self.assertEqual(output, b'line 1\nline 2\nline 3\n')
+ self.assertEqual(unprocessed, b'')
+ self.assertFalse(conn._flags['become_prompt'])
+ self.assertTrue(conn._flags['become_success'])
+ self.assertFalse(conn._flags['become_error'])
+ self.assertFalse(conn._flags['become_nopasswd_error'])
+
+ # test examining output for become failure
+ conn._flags = dict(
+ become_prompt=False,
+ become_success=False,
+ become_error=False,
+ become_nopasswd_error=False,
+ )
+
+ pc.prompt = False
+ conn.become.prompt = False
+ pc.success_key = None
+ output, unprocessed = conn._examine_output(u'source', u'state', b'line 1\nline 2\nincorrect password\n', True)
+ self.assertEqual(output, b'line 1\nline 2\nincorrect password\n')
+ self.assertEqual(unprocessed, b'')
+ self.assertFalse(conn._flags['become_prompt'])
+ self.assertFalse(conn._flags['become_success'])
+ self.assertTrue(conn._flags['become_error'])
+ self.assertFalse(conn._flags['become_nopasswd_error'])
+
+ # test examining output for missing password
+ conn._flags = dict(
+ become_prompt=False,
+ become_success=False,
+ become_error=False,
+ become_nopasswd_error=False,
+ )
+
+ pc.prompt = False
+ conn.become.prompt = False
+ pc.success_key = None
+ output, unprocessed = conn._examine_output(u'source', u'state', b'line 1\nbad password\n', True)
+ self.assertEqual(output, b'line 1\nbad password\n')
+ self.assertEqual(unprocessed, b'')
+ self.assertFalse(conn._flags['become_prompt'])
+ self.assertFalse(conn._flags['become_success'])
+ self.assertFalse(conn._flags['become_error'])
+ self.assertTrue(conn._flags['become_nopasswd_error'])
+
+ @patch('time.sleep')
+ @patch('os.path.exists')
+ def test_plugins_connection_ssh_put_file(self, mock_ospe, mock_sleep):
+ pc = PlayContext()
+ new_stdin = StringIO()
+ conn = connection_loader.get('ssh', pc, new_stdin)
+ conn._build_command = MagicMock()
+ conn._bare_run = MagicMock()
+
+ mock_ospe.return_value = True
+ conn._build_command.return_value = 'some command to run'
+ conn._bare_run.return_value = (0, '', '')
+ conn.host = "some_host"
+
+ C.ANSIBLE_SSH_RETRIES = 9
+
+ # Test with C.DEFAULT_SCP_IF_SSH set to smart
+ # Test when SFTP works
+ C.DEFAULT_SCP_IF_SSH = 'smart'
+ expected_in_data = b' '.join((b'put', to_bytes(shlex_quote('/path/to/in/file')), to_bytes(shlex_quote('/path/to/dest/file')))) + b'\n'
+ conn.put_file('/path/to/in/file', '/path/to/dest/file')
+ conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False)
+
+ # Test when SFTP doesn't work but SCP does
+ conn._bare_run.side_effect = [(1, 'stdout', 'some errors'), (0, '', '')]
+ conn.put_file('/path/to/in/file', '/path/to/dest/file')
+ conn._bare_run.assert_called_with('some command to run', None, checkrc=False)
+ conn._bare_run.side_effect = None
+
+ # test with C.DEFAULT_SCP_IF_SSH enabled
+ C.DEFAULT_SCP_IF_SSH = True
+ conn.put_file('/path/to/in/file', '/path/to/dest/file')
+ conn._bare_run.assert_called_with('some command to run', None, checkrc=False)
+
+ conn.put_file(u'/path/to/in/file/with/unicode-fö〩', u'/path/to/dest/file/with/unicode-fö〩')
+ conn._bare_run.assert_called_with('some command to run', None, checkrc=False)
+
+ # test with C.DEFAULT_SCP_IF_SSH disabled
+ C.DEFAULT_SCP_IF_SSH = False
+ expected_in_data = b' '.join((b'put', to_bytes(shlex_quote('/path/to/in/file')), to_bytes(shlex_quote('/path/to/dest/file')))) + b'\n'
+ conn.put_file('/path/to/in/file', '/path/to/dest/file')
+ conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False)
+
+ expected_in_data = b' '.join((b'put',
+ to_bytes(shlex_quote('/path/to/in/file/with/unicode-fö〩')),
+ to_bytes(shlex_quote('/path/to/dest/file/with/unicode-fö〩')))) + b'\n'
+ conn.put_file(u'/path/to/in/file/with/unicode-fö〩', u'/path/to/dest/file/with/unicode-fö〩')
+ conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False)
+
+ # test that a non-zero rc raises an error
+ conn._bare_run.return_value = (1, 'stdout', 'some errors')
+ self.assertRaises(AnsibleError, conn.put_file, '/path/to/bad/file', '/remote/path/to/file')
+
+ # test that a not-found path raises an error
+ mock_ospe.return_value = False
+ conn._bare_run.return_value = (0, 'stdout', '')
+ self.assertRaises(AnsibleFileNotFound, conn.put_file, '/path/to/bad/file', '/remote/path/to/file')
+
+ @patch('time.sleep')
+ def test_plugins_connection_ssh_fetch_file(self, mock_sleep):
+ pc = PlayContext()
+ new_stdin = StringIO()
+ conn = connection_loader.get('ssh', pc, new_stdin)
+ conn._build_command = MagicMock()
+ conn._bare_run = MagicMock()
+ conn._load_name = 'ssh'
+
+ conn._build_command.return_value = 'some command to run'
+ conn._bare_run.return_value = (0, '', '')
+ conn.host = "some_host"
+
+ C.ANSIBLE_SSH_RETRIES = 9
+
+ # Test with C.DEFAULT_SCP_IF_SSH set to smart
+ # Test when SFTP works
+ C.DEFAULT_SCP_IF_SSH = 'smart'
+ expected_in_data = b' '.join((b'get', to_bytes(shlex_quote('/path/to/in/file')), to_bytes(shlex_quote('/path/to/dest/file')))) + b'\n'
+ conn.set_options({})
+ conn.fetch_file('/path/to/in/file', '/path/to/dest/file')
+ conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False)
+
+ # Test when SFTP doesn't work but SCP does
+ conn._bare_run.side_effect = [(1, 'stdout', 'some errors'), (0, '', '')]
+ conn.fetch_file('/path/to/in/file', '/path/to/dest/file')
+ conn._bare_run.assert_called_with('some command to run', None, checkrc=False)
+ conn._bare_run.side_effect = None
+
+ # test with C.DEFAULT_SCP_IF_SSH enabled
+ C.DEFAULT_SCP_IF_SSH = True
+ conn.fetch_file('/path/to/in/file', '/path/to/dest/file')
+ conn._bare_run.assert_called_with('some command to run', None, checkrc=False)
+
+ conn.fetch_file(u'/path/to/in/file/with/unicode-fö〩', u'/path/to/dest/file/with/unicode-fö〩')
+ conn._bare_run.assert_called_with('some command to run', None, checkrc=False)
+
+ # test with C.DEFAULT_SCP_IF_SSH disabled
+ C.DEFAULT_SCP_IF_SSH = False
+ expected_in_data = b' '.join((b'get', to_bytes(shlex_quote('/path/to/in/file')), to_bytes(shlex_quote('/path/to/dest/file')))) + b'\n'
+ conn.fetch_file('/path/to/in/file', '/path/to/dest/file')
+ conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False)
+
+ expected_in_data = b' '.join((b'get',
+ to_bytes(shlex_quote('/path/to/in/file/with/unicode-fö〩')),
+ to_bytes(shlex_quote('/path/to/dest/file/with/unicode-fö〩')))) + b'\n'
+ conn.fetch_file(u'/path/to/in/file/with/unicode-fö〩', u'/path/to/dest/file/with/unicode-fö〩')
+ conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False)
+
+ # test that a non-zero rc raises an error
+ conn._bare_run.return_value = (1, 'stdout', 'some errors')
+ self.assertRaises(AnsibleError, conn.fetch_file, '/path/to/bad/file', '/remote/path/to/file')
+
+
+class MockSelector(object):
+ def __init__(self):
+ self.files_watched = 0
+ self.register = MagicMock(side_effect=self._register)
+ self.unregister = MagicMock(side_effect=self._unregister)
+ self.close = MagicMock()
+ self.get_map = MagicMock(side_effect=self._get_map)
+ self.select = MagicMock()
+
+ def _register(self, *args, **kwargs):
+ self.files_watched += 1
+
+ def _unregister(self, *args, **kwargs):
+ self.files_watched -= 1
+
+ def _get_map(self, *args, **kwargs):
+ return self.files_watched
+
+
+@pytest.fixture
+def mock_run_env(request, mocker):
+ pc = PlayContext()
+ new_stdin = StringIO()
+
+ conn = connection_loader.get('ssh', pc, new_stdin)
+ conn.set_become_plugin(become_loader.get('sudo'))
+ conn._send_initial_data = MagicMock()
+ conn._examine_output = MagicMock()
+ conn._terminate_process = MagicMock()
+ conn._load_name = 'ssh'
+ conn.sshpass_pipe = [MagicMock(), MagicMock()]
+
+ request.cls.pc = pc
+ request.cls.conn = conn
+
+ mock_popen_res = MagicMock()
+ mock_popen_res.poll = MagicMock()
+ mock_popen_res.wait = MagicMock()
+ mock_popen_res.stdin = MagicMock()
+ mock_popen_res.stdin.fileno.return_value = 1000
+ mock_popen_res.stdout = MagicMock()
+ mock_popen_res.stdout.fileno.return_value = 1001
+ mock_popen_res.stderr = MagicMock()
+ mock_popen_res.stderr.fileno.return_value = 1002
+ mock_popen_res.returncode = 0
+ request.cls.mock_popen_res = mock_popen_res
+
+ mock_popen = mocker.patch('subprocess.Popen', return_value=mock_popen_res)
+ request.cls.mock_popen = mock_popen
+
+ request.cls.mock_selector = MockSelector()
+ mocker.patch('ansible.module_utils.compat.selectors.DefaultSelector', lambda: request.cls.mock_selector)
+
+ request.cls.mock_openpty = mocker.patch('pty.openpty')
+
+ mocker.patch('fcntl.fcntl')
+ mocker.patch('os.write')
+ mocker.patch('os.close')
+
+
+@pytest.mark.usefixtures('mock_run_env')
+class TestSSHConnectionRun(object):
+ # FIXME:
+ # These tests are little more than a smoketest. Need to enhance them
+ # a bit to check that they're calling the relevant functions and making
+ # complete coverage of the code paths
+ def test_no_escalation(self):
+ self.mock_popen_res.stdout.read.side_effect = [b"my_stdout\n", b"second_line"]
+ self.mock_popen_res.stderr.read.side_effect = [b"my_stderr"]
+ self.mock_selector.select.side_effect = [
+ [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
+ [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
+ [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
+ []]
+ self.mock_selector.get_map.side_effect = lambda: True
+
+ return_code, b_stdout, b_stderr = self.conn._run("ssh", "this is input data")
+ assert return_code == 0
+ assert b_stdout == b'my_stdout\nsecond_line'
+ assert b_stderr == b'my_stderr'
+ assert self.mock_selector.register.called is True
+ assert self.mock_selector.register.call_count == 2
+ assert self.conn._send_initial_data.called is True
+ assert self.conn._send_initial_data.call_count == 1
+ assert self.conn._send_initial_data.call_args[0][1] == 'this is input data'
+
+ def test_with_password(self):
+ # test with a password set to trigger the sshpass write
+ self.pc.password = '12345'
+ self.mock_popen_res.stdout.read.side_effect = [b"some data", b"", b""]
+ self.mock_popen_res.stderr.read.side_effect = [b""]
+ self.mock_selector.select.side_effect = [
+ [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
+ [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
+ [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
+ [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
+ []]
+ self.mock_selector.get_map.side_effect = lambda: True
+
+ return_code, b_stdout, b_stderr = self.conn._run(["ssh", "is", "a", "cmd"], "this is more data")
+ assert return_code == 0
+ assert b_stdout == b'some data'
+ assert b_stderr == b''
+ assert self.mock_selector.register.called is True
+ assert self.mock_selector.register.call_count == 2
+ assert self.conn._send_initial_data.called is True
+ assert self.conn._send_initial_data.call_count == 1
+ assert self.conn._send_initial_data.call_args[0][1] == 'this is more data'
+
+ def _password_with_prompt_examine_output(self, sourice, state, b_chunk, sudoable):
+ if state == 'awaiting_prompt':
+ self.conn._flags['become_prompt'] = True
+ elif state == 'awaiting_escalation':
+ self.conn._flags['become_success'] = True
+ return (b'', b'')
+
+ def test_password_with_prompt(self):
+ # test with password prompting enabled
+ self.pc.password = None
+ self.conn.become.prompt = b'Password:'
+ self.conn._examine_output.side_effect = self._password_with_prompt_examine_output
+ self.mock_popen_res.stdout.read.side_effect = [b"Password:", b"Success", b""]
+ self.mock_popen_res.stderr.read.side_effect = [b""]
+ self.mock_selector.select.side_effect = [
+ [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
+ [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
+ [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ),
+ (SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
+ []]
+ self.mock_selector.get_map.side_effect = lambda: True
+
+ return_code, b_stdout, b_stderr = self.conn._run("ssh", "this is input data")
+ assert return_code == 0
+ assert b_stdout == b''
+ assert b_stderr == b''
+ assert self.mock_selector.register.called is True
+ assert self.mock_selector.register.call_count == 2
+ assert self.conn._send_initial_data.called is True
+ assert self.conn._send_initial_data.call_count == 1
+ assert self.conn._send_initial_data.call_args[0][1] == 'this is input data'
+
+ def test_password_with_become(self):
+ # test with some become settings
+ self.pc.prompt = b'Password:'
+ self.conn.become.prompt = b'Password:'
+ self.pc.become = True
+ self.pc.success_key = 'BECOME-SUCCESS-abcdefg'
+ self.conn.become._id = 'abcdefg'
+ self.conn._examine_output.side_effect = self._password_with_prompt_examine_output
+ self.mock_popen_res.stdout.read.side_effect = [b"Password:", b"BECOME-SUCCESS-abcdefg", b"abc"]
+ self.mock_popen_res.stderr.read.side_effect = [b"123"]
+ self.mock_selector.select.side_effect = [
+ [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
+ [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
+ [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
+ [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
+ []]
+ self.mock_selector.get_map.side_effect = lambda: True
+
+ return_code, b_stdout, b_stderr = self.conn._run("ssh", "this is input data")
+ self.mock_popen_res.stdin.flush.assert_called_once_with()
+ assert return_code == 0
+ assert b_stdout == b'abc'
+ assert b_stderr == b'123'
+ assert self.mock_selector.register.called is True
+ assert self.mock_selector.register.call_count == 2
+ assert self.conn._send_initial_data.called is True
+ assert self.conn._send_initial_data.call_count == 1
+ assert self.conn._send_initial_data.call_args[0][1] == 'this is input data'
+
+ def test_pasword_without_data(self):
+ # simulate no data input but Popen using new pty's fails
+ self.mock_popen.return_value = None
+ self.mock_popen.side_effect = [OSError(), self.mock_popen_res]
+
+ # simulate no data input
+ self.mock_openpty.return_value = (98, 99)
+ self.mock_popen_res.stdout.read.side_effect = [b"some data", b"", b""]
+ self.mock_popen_res.stderr.read.side_effect = [b""]
+ self.mock_selector.select.side_effect = [
+ [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
+ [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
+ [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
+ [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
+ []]
+ self.mock_selector.get_map.side_effect = lambda: True
+
+ return_code, b_stdout, b_stderr = self.conn._run("ssh", "")
+ assert return_code == 0
+ assert b_stdout == b'some data'
+ assert b_stderr == b''
+ assert self.mock_selector.register.called is True
+ assert self.mock_selector.register.call_count == 2
+ assert self.conn._send_initial_data.called is False
+
+
+@pytest.mark.usefixtures('mock_run_env')
+class TestSSHConnectionRetries(object):
+ def test_incorrect_password(self, monkeypatch):
+ monkeypatch.setattr(C, 'HOST_KEY_CHECKING', False)
+ monkeypatch.setattr(C, 'ANSIBLE_SSH_RETRIES', 5)
+ monkeypatch.setattr('time.sleep', lambda x: None)
+
+ self.mock_popen_res.stdout.read.side_effect = [b'']
+ self.mock_popen_res.stderr.read.side_effect = [b'Permission denied, please try again.\r\n']
+ type(self.mock_popen_res).returncode = PropertyMock(side_effect=[5] * 4)
+
+ self.mock_selector.select.side_effect = [
+ [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
+ [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
+ [],
+ ]
+
+ self.mock_selector.get_map.side_effect = lambda: True
+
+ self.conn._build_command = MagicMock()
+ self.conn._build_command.return_value = [b'sshpass', b'-d41', b'ssh', b'-C']
+ self.conn.get_option = MagicMock()
+ self.conn.get_option.return_value = True
+
+ exception_info = pytest.raises(AnsibleAuthenticationFailure, self.conn.exec_command, 'sshpass', 'some data')
+ assert exception_info.value.message == ('Invalid/incorrect username/password. Skipping remaining 5 retries to prevent account lockout: '
+ 'Permission denied, please try again.')
+ assert self.mock_popen.call_count == 1
+
+ def test_retry_then_success(self, monkeypatch):
+ monkeypatch.setattr(C, 'HOST_KEY_CHECKING', False)
+ monkeypatch.setattr(C, 'ANSIBLE_SSH_RETRIES', 3)
+
+ monkeypatch.setattr('time.sleep', lambda x: None)
+
+ self.mock_popen_res.stdout.read.side_effect = [b"", b"my_stdout\n", b"second_line"]
+ self.mock_popen_res.stderr.read.side_effect = [b"", b"my_stderr"]
+ type(self.mock_popen_res).returncode = PropertyMock(side_effect=[255] * 3 + [0] * 4)
+
+ self.mock_selector.select.side_effect = [
+ [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
+ [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
+ [],
+ [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
+ [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
+ [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
+ []
+ ]
+ self.mock_selector.get_map.side_effect = lambda: True
+
+ self.conn._build_command = MagicMock()
+ self.conn._build_command.return_value = 'ssh'
+ self.conn.get_option = MagicMock()
+ self.conn.get_option.return_value = True
+
+ return_code, b_stdout, b_stderr = self.conn.exec_command('ssh', 'some data')
+ assert return_code == 0
+ assert b_stdout == b'my_stdout\nsecond_line'
+ assert b_stderr == b'my_stderr'
+
+ def test_multiple_failures(self, monkeypatch):
+ monkeypatch.setattr(C, 'HOST_KEY_CHECKING', False)
+ monkeypatch.setattr(C, 'ANSIBLE_SSH_RETRIES', 9)
+
+ monkeypatch.setattr('time.sleep', lambda x: None)
+
+ self.mock_popen_res.stdout.read.side_effect = [b""] * 10
+ self.mock_popen_res.stderr.read.side_effect = [b""] * 10
+ type(self.mock_popen_res).returncode = PropertyMock(side_effect=[255] * 30)
+
+ self.mock_selector.select.side_effect = [
+ [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
+ [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
+ [],
+ ] * 10
+ self.mock_selector.get_map.side_effect = lambda: True
+
+ self.conn._build_command = MagicMock()
+ self.conn._build_command.return_value = 'ssh'
+ self.conn.get_option = MagicMock()
+ self.conn.get_option.return_value = True
+
+ pytest.raises(AnsibleConnectionFailure, self.conn.exec_command, 'ssh', 'some data')
+ assert self.mock_popen.call_count == 10
+
+ def test_abitrary_exceptions(self, monkeypatch):
+ monkeypatch.setattr(C, 'HOST_KEY_CHECKING', False)
+ monkeypatch.setattr(C, 'ANSIBLE_SSH_RETRIES', 9)
+
+ monkeypatch.setattr('time.sleep', lambda x: None)
+
+ self.conn._build_command = MagicMock()
+ self.conn._build_command.return_value = 'ssh'
+ self.conn.get_option = MagicMock()
+ self.conn.get_option.return_value = True
+
+ self.mock_popen.side_effect = [Exception('bad')] * 10
+ pytest.raises(Exception, self.conn.exec_command, 'ssh', 'some data')
+ assert self.mock_popen.call_count == 10
+
+ def test_put_file_retries(self, monkeypatch):
+ monkeypatch.setattr(C, 'HOST_KEY_CHECKING', False)
+ monkeypatch.setattr(C, 'ANSIBLE_SSH_RETRIES', 3)
+
+ monkeypatch.setattr('time.sleep', lambda x: None)
+ monkeypatch.setattr('ansible.plugins.connection.ssh.os.path.exists', lambda x: True)
+
+ self.mock_popen_res.stdout.read.side_effect = [b"", b"my_stdout\n", b"second_line"]
+ self.mock_popen_res.stderr.read.side_effect = [b"", b"my_stderr"]
+ type(self.mock_popen_res).returncode = PropertyMock(side_effect=[255] * 4 + [0] * 4)
+
+ self.mock_selector.select.side_effect = [
+ [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
+ [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
+ [],
+ [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
+ [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
+ [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
+ []
+ ]
+ self.mock_selector.get_map.side_effect = lambda: True
+
+ self.conn._build_command = MagicMock()
+ self.conn._build_command.return_value = 'sftp'
+
+ return_code, b_stdout, b_stderr = self.conn.put_file('/path/to/in/file', '/path/to/dest/file')
+ assert return_code == 0
+ assert b_stdout == b"my_stdout\nsecond_line"
+ assert b_stderr == b"my_stderr"
+ assert self.mock_popen.call_count == 2
+
+ def test_fetch_file_retries(self, monkeypatch):
+ monkeypatch.setattr(C, 'HOST_KEY_CHECKING', False)
+ monkeypatch.setattr(C, 'ANSIBLE_SSH_RETRIES', 3)
+
+ monkeypatch.setattr('time.sleep', lambda x: None)
+ monkeypatch.setattr('ansible.plugins.connection.ssh.os.path.exists', lambda x: True)
+
+ self.mock_popen_res.stdout.read.side_effect = [b"", b"my_stdout\n", b"second_line"]
+ self.mock_popen_res.stderr.read.side_effect = [b"", b"my_stderr"]
+ type(self.mock_popen_res).returncode = PropertyMock(side_effect=[255] * 4 + [0] * 4)
+
+ self.mock_selector.select.side_effect = [
+ [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
+ [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
+ [],
+ [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
+ [(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
+ [(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
+ []
+ ]
+ self.mock_selector.get_map.side_effect = lambda: True
+
+ self.conn._build_command = MagicMock()
+ self.conn._build_command.return_value = 'sftp'
+
+ return_code, b_stdout, b_stderr = self.conn.fetch_file('/path/to/in/file', '/path/to/dest/file')
+ assert return_code == 0
+ assert b_stdout == b"my_stdout\nsecond_line"
+ assert b_stderr == b"my_stderr"
+ assert self.mock_popen.call_count == 2
diff --git a/test/units/plugins/connection/test_winrm.py b/test/units/plugins/connection/test_winrm.py
new file mode 100644
index 00000000..67bfd9ae
--- /dev/null
+++ b/test/units/plugins/connection/test_winrm.py
@@ -0,0 +1,431 @@
+# -*- coding: utf-8 -*-
+# (c) 2018, Jordan Borean <jborean@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from io import StringIO
+
+from units.compat.mock import MagicMock
+from ansible.errors import AnsibleConnectionFailure
+from ansible.module_utils._text import to_bytes
+from ansible.playbook.play_context import PlayContext
+from ansible.plugins.loader import connection_loader
+from ansible.plugins.connection import winrm
+
+pytest.importorskip("winrm")
+
+
+class TestConnectionWinRM(object):
+
+ OPTIONS_DATA = (
+ # default options
+ (
+ {'_extras': {}},
+ {},
+ {
+ '_kerb_managed': False,
+ '_kinit_cmd': 'kinit',
+ '_winrm_connection_timeout': None,
+ '_winrm_host': 'inventory_hostname',
+ '_winrm_kwargs': {'username': None, 'password': None},
+ '_winrm_pass': None,
+ '_winrm_path': '/wsman',
+ '_winrm_port': 5986,
+ '_winrm_scheme': 'https',
+ '_winrm_transport': ['ssl'],
+ '_winrm_user': None
+ },
+ False
+ ),
+ # http through port
+ (
+ {'_extras': {}, 'ansible_port': 5985},
+ {},
+ {
+ '_winrm_kwargs': {'username': None, 'password': None},
+ '_winrm_port': 5985,
+ '_winrm_scheme': 'http',
+ '_winrm_transport': ['plaintext'],
+ },
+ False
+ ),
+ # kerberos user with kerb present
+ (
+ {'_extras': {}, 'ansible_user': 'user@domain.com'},
+ {},
+ {
+ '_kerb_managed': False,
+ '_kinit_cmd': 'kinit',
+ '_winrm_kwargs': {'username': 'user@domain.com',
+ 'password': None},
+ '_winrm_pass': None,
+ '_winrm_transport': ['kerberos', 'ssl'],
+ '_winrm_user': 'user@domain.com'
+ },
+ True
+ ),
+ # kerberos user without kerb present
+ (
+ {'_extras': {}, 'ansible_user': 'user@domain.com'},
+ {},
+ {
+ '_kerb_managed': False,
+ '_kinit_cmd': 'kinit',
+ '_winrm_kwargs': {'username': 'user@domain.com',
+ 'password': None},
+ '_winrm_pass': None,
+ '_winrm_transport': ['ssl'],
+ '_winrm_user': 'user@domain.com'
+ },
+ False
+ ),
+ # kerberos user with managed ticket (implicit)
+ (
+ {'_extras': {}, 'ansible_user': 'user@domain.com'},
+ {'remote_password': 'pass'},
+ {
+ '_kerb_managed': True,
+ '_kinit_cmd': 'kinit',
+ '_winrm_kwargs': {'username': 'user@domain.com',
+ 'password': 'pass'},
+ '_winrm_pass': 'pass',
+ '_winrm_transport': ['kerberos', 'ssl'],
+ '_winrm_user': 'user@domain.com'
+ },
+ True
+ ),
+ # kerb with managed ticket (explicit)
+ (
+ {'_extras': {}, 'ansible_user': 'user@domain.com',
+ 'ansible_winrm_kinit_mode': 'managed'},
+ {'password': 'pass'},
+ {
+ '_kerb_managed': True,
+ },
+ True
+ ),
+ # kerb with unmanaged ticket (explicit))
+ (
+ {'_extras': {}, 'ansible_user': 'user@domain.com',
+ 'ansible_winrm_kinit_mode': 'manual'},
+ {'password': 'pass'},
+ {
+ '_kerb_managed': False,
+ },
+ True
+ ),
+ # transport override (single)
+ (
+ {'_extras': {}, 'ansible_user': 'user@domain.com',
+ 'ansible_winrm_transport': 'ntlm'},
+ {},
+ {
+ '_winrm_kwargs': {'username': 'user@domain.com',
+ 'password': None},
+ '_winrm_pass': None,
+ '_winrm_transport': ['ntlm'],
+ },
+ False
+ ),
+ # transport override (list)
+ (
+ {'_extras': {}, 'ansible_user': 'user@domain.com',
+ 'ansible_winrm_transport': ['ntlm', 'certificate']},
+ {},
+ {
+ '_winrm_kwargs': {'username': 'user@domain.com',
+ 'password': None},
+ '_winrm_pass': None,
+ '_winrm_transport': ['ntlm', 'certificate'],
+ },
+ False
+ ),
+ # winrm extras
+ (
+ {'_extras': {'ansible_winrm_server_cert_validation': 'ignore',
+ 'ansible_winrm_service': 'WSMAN'}},
+ {},
+ {
+ '_winrm_kwargs': {'username': None, 'password': None,
+ 'server_cert_validation': 'ignore',
+ 'service': 'WSMAN'},
+ },
+ False
+ ),
+ # direct override
+ (
+ {'_extras': {}, 'ansible_winrm_connection_timeout': 5},
+ {'connection_timeout': 10},
+ {
+ '_winrm_connection_timeout': 10,
+ },
+ False
+ ),
+ # password as ansible_password
+ (
+ {'_extras': {}, 'ansible_password': 'pass'},
+ {},
+ {
+ '_winrm_pass': 'pass',
+ '_winrm_kwargs': {'username': None, 'password': 'pass'}
+ },
+ False
+ ),
+ # password as ansible_winrm_pass
+ (
+ {'_extras': {}, 'ansible_winrm_pass': 'pass'},
+ {},
+ {
+ '_winrm_pass': 'pass',
+ '_winrm_kwargs': {'username': None, 'password': 'pass'}
+ },
+ False
+ ),
+
+ # password as ansible_winrm_password
+ (
+ {'_extras': {}, 'ansible_winrm_password': 'pass'},
+ {},
+ {
+ '_winrm_pass': 'pass',
+ '_winrm_kwargs': {'username': None, 'password': 'pass'}
+ },
+ False
+ ),
+ )
+
+ # pylint bug: https://github.com/PyCQA/pylint/issues/511
+ # pylint: disable=undefined-variable
+ @pytest.mark.parametrize('options, direct, expected, kerb',
+ ((o, d, e, k) for o, d, e, k in OPTIONS_DATA))
+ def test_set_options(self, options, direct, expected, kerb):
+ winrm.HAVE_KERBEROS = kerb
+
+ pc = PlayContext()
+ new_stdin = StringIO()
+
+ conn = connection_loader.get('winrm', pc, new_stdin)
+ conn.set_options(var_options=options, direct=direct)
+ conn._build_winrm_kwargs()
+
+ for attr, expected in expected.items():
+ actual = getattr(conn, attr)
+ assert actual == expected, \
+ "winrm attr '%s', actual '%s' != expected '%s'"\
+ % (attr, actual, expected)
+
+
+class TestWinRMKerbAuth(object):
+
+ @pytest.mark.parametrize('options, expected', [
+ [{"_extras": {}},
+ (["kinit", "user@domain"],)],
+ [{"_extras": {}, 'ansible_winrm_kinit_cmd': 'kinit2'},
+ (["kinit2", "user@domain"],)],
+ [{"_extras": {'ansible_winrm_kerberos_delegation': True}},
+ (["kinit", "-f", "user@domain"],)],
+ ])
+ def test_kinit_success_subprocess(self, monkeypatch, options, expected):
+ def mock_communicate(input=None, timeout=None):
+ return b"", b""
+
+ mock_popen = MagicMock()
+ mock_popen.return_value.communicate = mock_communicate
+ mock_popen.return_value.returncode = 0
+ monkeypatch.setattr("subprocess.Popen", mock_popen)
+
+ winrm.HAS_PEXPECT = False
+ pc = PlayContext()
+ new_stdin = StringIO()
+ conn = connection_loader.get('winrm', pc, new_stdin)
+ conn.set_options(var_options=options)
+ conn._build_winrm_kwargs()
+
+ conn._kerb_auth("user@domain", "pass")
+ mock_calls = mock_popen.mock_calls
+ assert len(mock_calls) == 1
+ assert mock_calls[0][1] == expected
+ actual_env = mock_calls[0][2]['env']
+ assert list(actual_env.keys()) == ['KRB5CCNAME']
+ assert actual_env['KRB5CCNAME'].startswith("FILE:/")
+
+ @pytest.mark.parametrize('options, expected', [
+ [{"_extras": {}},
+ ("kinit", ["user@domain"],)],
+ [{"_extras": {}, 'ansible_winrm_kinit_cmd': 'kinit2'},
+ ("kinit2", ["user@domain"],)],
+ [{"_extras": {'ansible_winrm_kerberos_delegation': True}},
+ ("kinit", ["-f", "user@domain"],)],
+ ])
+ def test_kinit_success_pexpect(self, monkeypatch, options, expected):
+ pytest.importorskip("pexpect")
+ mock_pexpect = MagicMock()
+ mock_pexpect.return_value.exitstatus = 0
+ monkeypatch.setattr("pexpect.spawn", mock_pexpect)
+
+ winrm.HAS_PEXPECT = True
+ pc = PlayContext()
+ new_stdin = StringIO()
+ conn = connection_loader.get('winrm', pc, new_stdin)
+ conn.set_options(var_options=options)
+ conn._build_winrm_kwargs()
+
+ conn._kerb_auth("user@domain", "pass")
+ mock_calls = mock_pexpect.mock_calls
+ assert mock_calls[0][1] == expected
+ actual_env = mock_calls[0][2]['env']
+ assert list(actual_env.keys()) == ['KRB5CCNAME']
+ assert actual_env['KRB5CCNAME'].startswith("FILE:/")
+ assert mock_calls[0][2]['echo'] is False
+ assert mock_calls[1][0] == "().expect"
+ assert mock_calls[1][1] == (".*:",)
+ assert mock_calls[2][0] == "().sendline"
+ assert mock_calls[2][1] == ("pass",)
+ assert mock_calls[3][0] == "().read"
+ assert mock_calls[4][0] == "().wait"
+
+ def test_kinit_with_missing_executable_subprocess(self, monkeypatch):
+ expected_err = "[Errno 2] No such file or directory: " \
+ "'/fake/kinit': '/fake/kinit'"
+ mock_popen = MagicMock(side_effect=OSError(expected_err))
+
+ monkeypatch.setattr("subprocess.Popen", mock_popen)
+
+ winrm.HAS_PEXPECT = False
+ pc = PlayContext()
+ new_stdin = StringIO()
+ conn = connection_loader.get('winrm', pc, new_stdin)
+ options = {"_extras": {}, "ansible_winrm_kinit_cmd": "/fake/kinit"}
+ conn.set_options(var_options=options)
+ conn._build_winrm_kwargs()
+
+ with pytest.raises(AnsibleConnectionFailure) as err:
+ conn._kerb_auth("user@domain", "pass")
+ assert str(err.value) == "Kerberos auth failure when calling " \
+ "kinit cmd '/fake/kinit': %s" % expected_err
+
+ def test_kinit_with_missing_executable_pexpect(self, monkeypatch):
+ pexpect = pytest.importorskip("pexpect")
+
+ expected_err = "The command was not found or was not " \
+ "executable: /fake/kinit"
+ mock_pexpect = \
+ MagicMock(side_effect=pexpect.ExceptionPexpect(expected_err))
+
+ monkeypatch.setattr("pexpect.spawn", mock_pexpect)
+
+ winrm.HAS_PEXPECT = True
+ pc = PlayContext()
+ new_stdin = StringIO()
+ conn = connection_loader.get('winrm', pc, new_stdin)
+ options = {"_extras": {}, "ansible_winrm_kinit_cmd": "/fake/kinit"}
+ conn.set_options(var_options=options)
+ conn._build_winrm_kwargs()
+
+ with pytest.raises(AnsibleConnectionFailure) as err:
+ conn._kerb_auth("user@domain", "pass")
+ assert str(err.value) == "Kerberos auth failure when calling " \
+ "kinit cmd '/fake/kinit': %s" % expected_err
+
+ def test_kinit_error_subprocess(self, monkeypatch):
+ expected_err = "kinit: krb5_parse_name: " \
+ "Configuration file does not specify default realm"
+
+ def mock_communicate(input=None, timeout=None):
+ return b"", to_bytes(expected_err)
+
+ mock_popen = MagicMock()
+ mock_popen.return_value.communicate = mock_communicate
+ mock_popen.return_value.returncode = 1
+ monkeypatch.setattr("subprocess.Popen", mock_popen)
+
+ winrm.HAS_PEXPECT = False
+ pc = PlayContext()
+ new_stdin = StringIO()
+ conn = connection_loader.get('winrm', pc, new_stdin)
+ conn.set_options(var_options={"_extras": {}})
+ conn._build_winrm_kwargs()
+
+ with pytest.raises(AnsibleConnectionFailure) as err:
+ conn._kerb_auth("invaliduser", "pass")
+
+ assert str(err.value) == \
+ "Kerberos auth failure for principal invaliduser with " \
+ "subprocess: %s" % (expected_err)
+
+ def test_kinit_error_pexpect(self, monkeypatch):
+ pytest.importorskip("pexpect")
+
+ expected_err = "Configuration file does not specify default realm"
+ mock_pexpect = MagicMock()
+ mock_pexpect.return_value.expect = MagicMock(side_effect=OSError)
+ mock_pexpect.return_value.read.return_value = to_bytes(expected_err)
+ mock_pexpect.return_value.exitstatus = 1
+
+ monkeypatch.setattr("pexpect.spawn", mock_pexpect)
+
+ winrm.HAS_PEXPECT = True
+ pc = PlayContext()
+ new_stdin = StringIO()
+ conn = connection_loader.get('winrm', pc, new_stdin)
+ conn.set_options(var_options={"_extras": {}})
+ conn._build_winrm_kwargs()
+
+ with pytest.raises(AnsibleConnectionFailure) as err:
+ conn._kerb_auth("invaliduser", "pass")
+
+ assert str(err.value) == \
+ "Kerberos auth failure for principal invaliduser with " \
+ "pexpect: %s" % (expected_err)
+
+ def test_kinit_error_pass_in_output_subprocess(self, monkeypatch):
+ def mock_communicate(input=None, timeout=None):
+ return b"", b"Error with kinit\n" + input
+
+ mock_popen = MagicMock()
+ mock_popen.return_value.communicate = mock_communicate
+ mock_popen.return_value.returncode = 1
+ monkeypatch.setattr("subprocess.Popen", mock_popen)
+
+ winrm.HAS_PEXPECT = False
+ pc = PlayContext()
+ new_stdin = StringIO()
+ conn = connection_loader.get('winrm', pc, new_stdin)
+ conn.set_options(var_options={"_extras": {}})
+ conn._build_winrm_kwargs()
+
+ with pytest.raises(AnsibleConnectionFailure) as err:
+ conn._kerb_auth("username", "password")
+ assert str(err.value) == \
+ "Kerberos auth failure for principal username with subprocess: " \
+ "Error with kinit\n<redacted>"
+
+ def test_kinit_error_pass_in_output_pexpect(self, monkeypatch):
+ pytest.importorskip("pexpect")
+
+ mock_pexpect = MagicMock()
+ mock_pexpect.return_value.expect = MagicMock()
+ mock_pexpect.return_value.read.return_value = \
+ b"Error with kinit\npassword\n"
+ mock_pexpect.return_value.exitstatus = 1
+
+ monkeypatch.setattr("pexpect.spawn", mock_pexpect)
+
+ winrm.HAS_PEXPECT = True
+ pc = PlayContext()
+ pc = PlayContext()
+ new_stdin = StringIO()
+ conn = connection_loader.get('winrm', pc, new_stdin)
+ conn.set_options(var_options={"_extras": {}})
+ conn._build_winrm_kwargs()
+
+ with pytest.raises(AnsibleConnectionFailure) as err:
+ conn._kerb_auth("username", "password")
+ assert str(err.value) == \
+ "Kerberos auth failure for principal username with pexpect: " \
+ "Error with kinit\n<redacted>"
diff --git a/test/units/plugins/filter/__init__.py b/test/units/plugins/filter/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/plugins/filter/__init__.py
diff --git a/test/units/plugins/filter/test_core.py b/test/units/plugins/filter/test_core.py
new file mode 100644
index 00000000..8a626d9a
--- /dev/null
+++ b/test/units/plugins/filter/test_core.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible.module_utils._text import to_native
+from ansible.plugins.filter.core import to_uuid
+from ansible.errors import AnsibleFilterError
+
+
+UUID_DEFAULT_NAMESPACE_TEST_CASES = (
+ ('example.com', 'ae780c3a-a3ab-53c2-bfb4-098da300b3fe'),
+ ('test.example', '8e437a35-c7c5-50ea-867c-5c254848dbc2'),
+ ('café.example', '8a99d6b1-fb8f-5f78-af86-879768589f56'),
+)
+
+UUID_TEST_CASES = (
+ ('361E6D51-FAEC-444A-9079-341386DA8E2E', 'example.com', 'ae780c3a-a3ab-53c2-bfb4-098da300b3fe'),
+ ('361E6D51-FAEC-444A-9079-341386DA8E2E', 'test.example', '8e437a35-c7c5-50ea-867c-5c254848dbc2'),
+ ('11111111-2222-3333-4444-555555555555', 'example.com', 'e776faa5-5299-55dc-9057-7a00e6be2364'),
+)
+
+
+@pytest.mark.parametrize('value, expected', UUID_DEFAULT_NAMESPACE_TEST_CASES)
+def test_to_uuid_default_namespace(value, expected):
+ assert expected == to_uuid(value)
+
+
+@pytest.mark.parametrize('namespace, value, expected', UUID_TEST_CASES)
+def test_to_uuid(namespace, value, expected):
+ assert expected == to_uuid(value, namespace=namespace)
+
+
+def test_to_uuid_invalid_namespace():
+ with pytest.raises(AnsibleFilterError) as e:
+ to_uuid('example.com', namespace='11111111-2222-3333-4444-555555555')
+ assert 'Invalid value' in to_native(e.value)
diff --git a/test/units/plugins/filter/test_mathstuff.py b/test/units/plugins/filter/test_mathstuff.py
new file mode 100644
index 00000000..a0e78d33
--- /dev/null
+++ b/test/units/plugins/filter/test_mathstuff.py
@@ -0,0 +1,176 @@
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import pytest
+
+from jinja2 import Environment
+
+import ansible.plugins.filter.mathstuff as ms
+from ansible.errors import AnsibleFilterError, AnsibleFilterTypeError
+
+
+UNIQUE_DATA = (([1, 3, 4, 2], sorted([1, 2, 3, 4])),
+ ([1, 3, 2, 4, 2, 3], sorted([1, 2, 3, 4])),
+ (['a', 'b', 'c', 'd'], sorted(['a', 'b', 'c', 'd'])),
+ (['a', 'a', 'd', 'b', 'a', 'd', 'c', 'b'], sorted(['a', 'b', 'c', 'd'])),
+ )
+
+TWO_SETS_DATA = (([1, 2], [3, 4], ([], sorted([1, 2]), sorted([1, 2, 3, 4]), sorted([1, 2, 3, 4]))),
+ ([1, 2, 3], [5, 3, 4], ([3], sorted([1, 2]), sorted([1, 2, 5, 4]), sorted([1, 2, 3, 4, 5]))),
+ (['a', 'b', 'c'], ['d', 'c', 'e'], (['c'], sorted(['a', 'b']), sorted(['a', 'b', 'd', 'e']), sorted(['a', 'b', 'c', 'e', 'd']))),
+ )
+
+env = Environment()
+
+
+@pytest.mark.parametrize('data, expected', UNIQUE_DATA)
+class TestUnique:
+ def test_unhashable(self, data, expected):
+ assert sorted(ms.unique(env, list(data))) == expected
+
+ def test_hashable(self, data, expected):
+ assert sorted(ms.unique(env, tuple(data))) == expected
+
+
+@pytest.mark.parametrize('dataset1, dataset2, expected', TWO_SETS_DATA)
+class TestIntersect:
+ def test_unhashable(self, dataset1, dataset2, expected):
+ assert sorted(ms.intersect(env, list(dataset1), list(dataset2))) == expected[0]
+
+ def test_hashable(self, dataset1, dataset2, expected):
+ assert sorted(ms.intersect(env, tuple(dataset1), tuple(dataset2))) == expected[0]
+
+
+@pytest.mark.parametrize('dataset1, dataset2, expected', TWO_SETS_DATA)
+class TestDifference:
+ def test_unhashable(self, dataset1, dataset2, expected):
+ assert sorted(ms.difference(env, list(dataset1), list(dataset2))) == expected[1]
+
+ def test_hashable(self, dataset1, dataset2, expected):
+ assert sorted(ms.difference(env, tuple(dataset1), tuple(dataset2))) == expected[1]
+
+
+@pytest.mark.parametrize('dataset1, dataset2, expected', TWO_SETS_DATA)
+class TestSymmetricDifference:
+ def test_unhashable(self, dataset1, dataset2, expected):
+ assert sorted(ms.symmetric_difference(env, list(dataset1), list(dataset2))) == expected[2]
+
+ def test_hashable(self, dataset1, dataset2, expected):
+ assert sorted(ms.symmetric_difference(env, tuple(dataset1), tuple(dataset2))) == expected[2]
+
+
+class TestMin:
+ def test_min(self):
+ assert ms.min((1, 2)) == 1
+ assert ms.min((2, 1)) == 1
+ assert ms.min(('p', 'a', 'w', 'b', 'p')) == 'a'
+
+
+class TestMax:
+ def test_max(self):
+ assert ms.max((1, 2)) == 2
+ assert ms.max((2, 1)) == 2
+ assert ms.max(('p', 'a', 'w', 'b', 'p')) == 'w'
+
+
+class TestLogarithm:
+ def test_log_non_number(self):
+ # Message changed in python3.6
+ with pytest.raises(AnsibleFilterTypeError, match='log\\(\\) can only be used on numbers: (a float is required|must be real number, not str)'):
+ ms.logarithm('a')
+ with pytest.raises(AnsibleFilterTypeError, match='log\\(\\) can only be used on numbers: (a float is required|must be real number, not str)'):
+ ms.logarithm(10, base='a')
+
+ def test_log_ten(self):
+ assert ms.logarithm(10, 10) == 1.0
+ assert ms.logarithm(69, 10) * 1000 // 1 == 1838
+
+ def test_log_natural(self):
+ assert ms.logarithm(69) * 1000 // 1 == 4234
+
+ def test_log_two(self):
+ assert ms.logarithm(69, 2) * 1000 // 1 == 6108
+
+
+class TestPower:
+ def test_power_non_number(self):
+ # Message changed in python3.6
+ with pytest.raises(AnsibleFilterTypeError, match='pow\\(\\) can only be used on numbers: (a float is required|must be real number, not str)'):
+ ms.power('a', 10)
+
+ with pytest.raises(AnsibleFilterTypeError, match='pow\\(\\) can only be used on numbers: (a float is required|must be real number, not str)'):
+ ms.power(10, 'a')
+
+ def test_power_squared(self):
+ assert ms.power(10, 2) == 100
+
+ def test_power_cubed(self):
+ assert ms.power(10, 3) == 1000
+
+
+class TestInversePower:
+ def test_root_non_number(self):
+ # Messages differed in python-2.6, python-2.7-3.5, and python-3.6+
+ with pytest.raises(AnsibleFilterTypeError, match="root\\(\\) can only be used on numbers:"
+ " (invalid literal for float\\(\\): a"
+ "|could not convert string to float: a"
+ "|could not convert string to float: 'a')"):
+ ms.inversepower(10, 'a')
+
+ with pytest.raises(AnsibleFilterTypeError, match="root\\(\\) can only be used on numbers: (a float is required|must be real number, not str)"):
+ ms.inversepower('a', 10)
+
+ def test_square_root(self):
+ assert ms.inversepower(100) == 10
+ assert ms.inversepower(100, 2) == 10
+
+ def test_cube_root(self):
+ assert ms.inversepower(27, 3) == 3
+
+
+class TestRekeyOnMember():
+ # (Input data structure, member to rekey on, expected return)
+ VALID_ENTRIES = (
+ ([{"proto": "eigrp", "state": "enabled"}, {"proto": "ospf", "state": "enabled"}],
+ 'proto',
+ {'eigrp': {'state': 'enabled', 'proto': 'eigrp'}, 'ospf': {'state': 'enabled', 'proto': 'ospf'}}),
+ ({'eigrp': {"proto": "eigrp", "state": "enabled"}, 'ospf': {"proto": "ospf", "state": "enabled"}},
+ 'proto',
+ {'eigrp': {'state': 'enabled', 'proto': 'eigrp'}, 'ospf': {'state': 'enabled', 'proto': 'ospf'}}),
+ )
+
+ # (Input data structure, member to rekey on, expected error message)
+ INVALID_ENTRIES = (
+ # Fail when key is not found
+ (AnsibleFilterError, [{"proto": "eigrp", "state": "enabled"}], 'invalid_key', "Key invalid_key was not found"),
+ (AnsibleFilterError, {"eigrp": {"proto": "eigrp", "state": "enabled"}}, 'invalid_key', "Key invalid_key was not found"),
+ # Fail when key is duplicated
+ (AnsibleFilterError, [{"proto": "eigrp"}, {"proto": "ospf"}, {"proto": "ospf"}],
+ 'proto', 'Key ospf is not unique, cannot correctly turn into dict'),
+ # Fail when value is not a dict
+ (AnsibleFilterTypeError, ["string"], 'proto', "List item is not a valid dict"),
+ (AnsibleFilterTypeError, [123], 'proto', "List item is not a valid dict"),
+ (AnsibleFilterTypeError, [[{'proto': 1}]], 'proto', "List item is not a valid dict"),
+ # Fail when we do not send a dict or list
+ (AnsibleFilterTypeError, "string", 'proto', "Type is not a valid list, set, or dict"),
+ (AnsibleFilterTypeError, 123, 'proto', "Type is not a valid list, set, or dict"),
+ )
+
+ @pytest.mark.parametrize("list_original, key, expected", VALID_ENTRIES)
+ def test_rekey_on_member_success(self, list_original, key, expected):
+ assert ms.rekey_on_member(list_original, key) == expected
+
+ @pytest.mark.parametrize("expected_exception_type, list_original, key, expected", INVALID_ENTRIES)
+ def test_fail_rekey_on_member(self, expected_exception_type, list_original, key, expected):
+ with pytest.raises(expected_exception_type) as err:
+ ms.rekey_on_member(list_original, key)
+
+ assert err.value.message == expected
+
+ def test_duplicate_strategy_overwrite(self):
+ list_original = ({'proto': 'eigrp', 'id': 1}, {'proto': 'ospf', 'id': 2}, {'proto': 'eigrp', 'id': 3})
+ expected = {'eigrp': {'proto': 'eigrp', 'id': 3}, 'ospf': {'proto': 'ospf', 'id': 2}}
+ assert ms.rekey_on_member(list_original, 'proto', duplicates='overwrite') == expected
diff --git a/test/units/plugins/inventory/__init__.py b/test/units/plugins/inventory/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/plugins/inventory/__init__.py
diff --git a/test/units/plugins/inventory/test_constructed.py b/test/units/plugins/inventory/test_constructed.py
new file mode 100644
index 00000000..6d521982
--- /dev/null
+++ b/test/units/plugins/inventory/test_constructed.py
@@ -0,0 +1,206 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2019 Alan Rominger <arominge@redhat.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible.errors import AnsibleParserError
+from ansible.plugins.inventory.constructed import InventoryModule
+from ansible.inventory.data import InventoryData
+from ansible.template import Templar
+
+
+@pytest.fixture()
+def inventory_module():
+ r = InventoryModule()
+ r.inventory = InventoryData()
+ r.templar = Templar(None)
+ return r
+
+
+def test_group_by_value_only(inventory_module):
+ inventory_module.inventory.add_host('foohost')
+ inventory_module.inventory.set_variable('foohost', 'bar', 'my_group_name')
+ host = inventory_module.inventory.get_host('foohost')
+ keyed_groups = [
+ {
+ 'prefix': '',
+ 'separator': '',
+ 'key': 'bar'
+ }
+ ]
+ inventory_module._add_host_to_keyed_groups(
+ keyed_groups, host.vars, host.name, strict=False
+ )
+ assert 'my_group_name' in inventory_module.inventory.groups
+ group = inventory_module.inventory.groups['my_group_name']
+ assert group.hosts == [host]
+
+
+def test_keyed_group_separator(inventory_module):
+ inventory_module.inventory.add_host('farm')
+ inventory_module.inventory.set_variable('farm', 'farmer', 'mcdonald')
+ inventory_module.inventory.set_variable('farm', 'barn', {'cow': 'betsy'})
+ host = inventory_module.inventory.get_host('farm')
+ keyed_groups = [
+ {
+ 'prefix': 'farmer',
+ 'separator': '_old_',
+ 'key': 'farmer'
+ },
+ {
+ 'separator': 'mmmmmmmmmm',
+ 'key': 'barn'
+ }
+ ]
+ inventory_module._add_host_to_keyed_groups(
+ keyed_groups, host.vars, host.name, strict=False
+ )
+ for group_name in ('farmer_old_mcdonald', 'mmmmmmmmmmcowmmmmmmmmmmbetsy'):
+ assert group_name in inventory_module.inventory.groups
+ group = inventory_module.inventory.groups[group_name]
+ assert group.hosts == [host]
+
+
+def test_keyed_group_empty_construction(inventory_module):
+ inventory_module.inventory.add_host('farm')
+ inventory_module.inventory.set_variable('farm', 'barn', {})
+ host = inventory_module.inventory.get_host('farm')
+ keyed_groups = [
+ {
+ 'separator': 'mmmmmmmmmm',
+ 'key': 'barn'
+ }
+ ]
+ inventory_module._add_host_to_keyed_groups(
+ keyed_groups, host.vars, host.name, strict=True
+ )
+ assert host.groups == []
+
+
+def test_keyed_group_host_confusion(inventory_module):
+ inventory_module.inventory.add_host('cow')
+ inventory_module.inventory.add_group('cow')
+ host = inventory_module.inventory.get_host('cow')
+ host.vars['species'] = 'cow'
+ keyed_groups = [
+ {
+ 'separator': '',
+ 'prefix': '',
+ 'key': 'species'
+ }
+ ]
+ inventory_module._add_host_to_keyed_groups(
+ keyed_groups, host.vars, host.name, strict=True
+ )
+ group = inventory_module.inventory.groups['cow']
+ # group cow has host of cow
+ assert group.hosts == [host]
+
+
+def test_keyed_parent_groups(inventory_module):
+ inventory_module.inventory.add_host('web1')
+ inventory_module.inventory.add_host('web2')
+ inventory_module.inventory.set_variable('web1', 'region', 'japan')
+ inventory_module.inventory.set_variable('web2', 'region', 'japan')
+ host1 = inventory_module.inventory.get_host('web1')
+ host2 = inventory_module.inventory.get_host('web2')
+ keyed_groups = [
+ {
+ 'prefix': 'region',
+ 'key': 'region',
+ 'parent_group': 'region_list'
+ }
+ ]
+ for host in [host1, host2]:
+ inventory_module._add_host_to_keyed_groups(
+ keyed_groups, host.vars, host.name, strict=False
+ )
+ assert 'region_japan' in inventory_module.inventory.groups
+ assert 'region_list' in inventory_module.inventory.groups
+ region_group = inventory_module.inventory.groups['region_japan']
+ all_regions = inventory_module.inventory.groups['region_list']
+ assert all_regions.child_groups == [region_group]
+ assert region_group.hosts == [host1, host2]
+
+
+def test_parent_group_templating(inventory_module):
+ inventory_module.inventory.add_host('cow')
+ inventory_module.inventory.set_variable('cow', 'sound', 'mmmmmmmmmm')
+ inventory_module.inventory.set_variable('cow', 'nickname', 'betsy')
+ host = inventory_module.inventory.get_host('cow')
+ keyed_groups = [
+ {
+ 'key': 'sound',
+ 'prefix': 'sound',
+ 'parent_group': '{{ nickname }}'
+ },
+ {
+ 'key': 'nickname',
+ 'prefix': '',
+ 'separator': '',
+ 'parent_group': 'nickname' # statically-named parent group, conflicting with hostvar
+ },
+ {
+ 'key': 'nickname',
+ 'separator': '',
+ 'parent_group': '{{ location | default("field") }}'
+ }
+ ]
+ inventory_module._add_host_to_keyed_groups(
+ keyed_groups, host.vars, host.name, strict=True
+ )
+ # first keyed group, "betsy" is a parent group name dynamically generated
+ betsys_group = inventory_module.inventory.groups['betsy']
+ assert [child.name for child in betsys_group.child_groups] == ['sound_mmmmmmmmmm']
+ # second keyed group, "nickname" is a statically-named root group
+ nicknames_group = inventory_module.inventory.groups['nickname']
+ assert [child.name for child in nicknames_group.child_groups] == ['betsy']
+ # second keyed group actually generated the parent group of the first keyed group
+ # assert that these are, in fact, the same object
+ assert nicknames_group.child_groups[0] == betsys_group
+ # second keyed group has two parents
+ locations_group = inventory_module.inventory.groups['field']
+ assert [child.name for child in locations_group.child_groups] == ['betsy']
+
+
+def test_parent_group_templating_error(inventory_module):
+ inventory_module.inventory.add_host('cow')
+ inventory_module.inventory.set_variable('cow', 'nickname', 'betsy')
+ host = inventory_module.inventory.get_host('cow')
+ keyed_groups = [
+ {
+ 'key': 'nickname',
+ 'separator': '',
+ 'parent_group': '{{ location.barn-yard }}'
+ }
+ ]
+ with pytest.raises(AnsibleParserError) as err_message:
+ inventory_module._add_host_to_keyed_groups(
+ keyed_groups, host.vars, host.name, strict=True
+ )
+ assert 'Could not generate parent group' in err_message
+ # invalid parent group did not raise an exception with strict=False
+ inventory_module._add_host_to_keyed_groups(
+ keyed_groups, host.vars, host.name, strict=False
+ )
+ # assert group was never added with invalid parent
+ assert 'betsy' not in inventory_module.inventory.groups
diff --git a/test/units/plugins/inventory/test_inventory.py b/test/units/plugins/inventory/test_inventory.py
new file mode 100644
index 00000000..66b5ec37
--- /dev/null
+++ b/test/units/plugins/inventory/test_inventory.py
@@ -0,0 +1,207 @@
+# Copyright 2015 Abhijit Menon-Sen <ams@2ndQuadrant.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import string
+import textwrap
+
+from ansible import constants as C
+from units.compat import mock
+from units.compat import unittest
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_text
+from units.mock.path import mock_unfrackpath_noop
+
+from ansible.inventory.manager import InventoryManager, split_host_pattern
+
+from units.mock.loader import DictDataLoader
+
+
+class TestInventory(unittest.TestCase):
+
+ patterns = {
+ 'a': ['a'],
+ 'a, b': ['a', 'b'],
+ 'a , b': ['a', 'b'],
+ ' a,b ,c[1:2] ': ['a', 'b', 'c[1:2]'],
+ '9a01:7f8:191:7701::9': ['9a01:7f8:191:7701::9'],
+ '9a01:7f8:191:7701::9,9a01:7f8:191:7701::9': ['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9'],
+ '9a01:7f8:191:7701::9,9a01:7f8:191:7701::9,foo': ['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9', 'foo'],
+ 'foo[1:2]': ['foo[1:2]'],
+ 'a::b': ['a::b'],
+ 'a:b': ['a', 'b'],
+ ' a : b ': ['a', 'b'],
+ 'foo:bar:baz[1:2]': ['foo', 'bar', 'baz[1:2]'],
+ 'a,,b': ['a', 'b'],
+ 'a, ,b,,c, ,': ['a', 'b', 'c'],
+ ',': [],
+ '': [],
+ }
+
+ pattern_lists = [
+ [['a'], ['a']],
+ [['a', 'b'], ['a', 'b']],
+ [['a, b'], ['a', 'b']],
+ [['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9,foo'],
+ ['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9', 'foo']]
+ ]
+
+ # pattern_string: [ ('base_pattern', (a,b)), ['x','y','z'] ]
+ # a,b are the bounds of the subscript; x..z are the results of the subscript
+ # when applied to string.ascii_letters.
+
+ subscripts = {
+ 'a': [('a', None), list(string.ascii_letters)],
+ 'a[0]': [('a', (0, None)), ['a']],
+ 'a[1]': [('a', (1, None)), ['b']],
+ 'a[2:3]': [('a', (2, 3)), ['c', 'd']],
+ 'a[-1]': [('a', (-1, None)), ['Z']],
+ 'a[-2]': [('a', (-2, None)), ['Y']],
+ 'a[48:]': [('a', (48, -1)), ['W', 'X', 'Y', 'Z']],
+ 'a[49:]': [('a', (49, -1)), ['X', 'Y', 'Z']],
+ 'a[1:]': [('a', (1, -1)), list(string.ascii_letters[1:])],
+ }
+
+ ranges_to_expand = {
+ 'a[1:2]': ['a1', 'a2'],
+ 'a[1:10:2]': ['a1', 'a3', 'a5', 'a7', 'a9'],
+ 'a[a:b]': ['aa', 'ab'],
+ 'a[a:i:3]': ['aa', 'ad', 'ag'],
+ 'a[a:b][c:d]': ['aac', 'aad', 'abc', 'abd'],
+ 'a[0:1][2:3]': ['a02', 'a03', 'a12', 'a13'],
+ 'a[a:b][2:3]': ['aa2', 'aa3', 'ab2', 'ab3'],
+ }
+
+ def setUp(self):
+ fake_loader = DictDataLoader({})
+
+ self.i = InventoryManager(loader=fake_loader, sources=[None])
+
+ def test_split_patterns(self):
+
+ for p in self.patterns:
+ r = self.patterns[p]
+ self.assertEqual(r, split_host_pattern(p))
+
+ for p, r in self.pattern_lists:
+ self.assertEqual(r, split_host_pattern(p))
+
+ def test_ranges(self):
+
+ for s in self.subscripts:
+ r = self.subscripts[s]
+ self.assertEqual(r[0], self.i._split_subscript(s))
+ self.assertEqual(
+ r[1],
+ self.i._apply_subscript(
+ list(string.ascii_letters),
+ r[0][1]
+ )
+ )
+
+
+class TestInventoryPlugins(unittest.TestCase):
+
+ def test_empty_inventory(self):
+ inventory = self._get_inventory('')
+
+ self.assertIn('all', inventory.groups)
+ self.assertIn('ungrouped', inventory.groups)
+ self.assertFalse(inventory.groups['all'].get_hosts())
+ self.assertFalse(inventory.groups['ungrouped'].get_hosts())
+
+ def test_ini(self):
+ self._test_default_groups("""
+ host1
+ host2
+ host3
+ [servers]
+ host3
+ host4
+ host5
+ """)
+
+ def test_ini_explicit_ungrouped(self):
+ self._test_default_groups("""
+ [ungrouped]
+ host1
+ host2
+ host3
+ [servers]
+ host3
+ host4
+ host5
+ """)
+
+ def test_ini_variables_stringify(self):
+ values = ['string', 'no', 'No', 'false', 'FALSE', [], False, 0]
+
+ inventory_content = "host1 "
+ inventory_content += ' '.join(['var%s=%s' % (i, to_text(x)) for i, x in enumerate(values)])
+ inventory = self._get_inventory(inventory_content)
+
+ variables = inventory.get_host('host1').vars
+ for i in range(len(values)):
+ if isinstance(values[i], string_types):
+ self.assertIsInstance(variables['var%s' % i], string_types)
+ else:
+ self.assertIsInstance(variables['var%s' % i], type(values[i]))
+
+ @mock.patch('ansible.inventory.manager.unfrackpath', mock_unfrackpath_noop)
+ @mock.patch('os.path.exists', lambda x: True)
+ @mock.patch('os.access', lambda x, y: True)
+ def test_yaml_inventory(self, filename="test.yaml"):
+ inventory_content = {filename: textwrap.dedent("""\
+ ---
+ all:
+ hosts:
+ test1:
+ test2:
+ """)}
+ C.INVENTORY_ENABLED = ['yaml']
+ fake_loader = DictDataLoader(inventory_content)
+ im = InventoryManager(loader=fake_loader, sources=filename)
+ self.assertTrue(im._inventory.hosts)
+ self.assertIn('test1', im._inventory.hosts)
+ self.assertIn('test2', im._inventory.hosts)
+ self.assertIn(im._inventory.get_host('test1'), im._inventory.groups['all'].hosts)
+ self.assertIn(im._inventory.get_host('test2'), im._inventory.groups['all'].hosts)
+ self.assertEqual(len(im._inventory.groups['all'].hosts), 2)
+ self.assertIn(im._inventory.get_host('test1'), im._inventory.groups['ungrouped'].hosts)
+ self.assertIn(im._inventory.get_host('test2'), im._inventory.groups['ungrouped'].hosts)
+ self.assertEqual(len(im._inventory.groups['ungrouped'].hosts), 2)
+
+ def _get_inventory(self, inventory_content):
+
+ fake_loader = DictDataLoader({__file__: inventory_content})
+
+ return InventoryManager(loader=fake_loader, sources=[__file__])
+
+ def _test_default_groups(self, inventory_content):
+ inventory = self._get_inventory(inventory_content)
+
+ self.assertIn('all', inventory.groups)
+ self.assertIn('ungrouped', inventory.groups)
+ all_hosts = set(host.name for host in inventory.groups['all'].get_hosts())
+ self.assertEqual(set(['host1', 'host2', 'host3', 'host4', 'host5']), all_hosts)
+ ungrouped_hosts = set(host.name for host in inventory.groups['ungrouped'].get_hosts())
+ self.assertEqual(set(['host1', 'host2']), ungrouped_hosts)
+ servers_hosts = set(host.name for host in inventory.groups['servers'].get_hosts())
+ self.assertEqual(set(['host3', 'host4', 'host5']), servers_hosts)
diff --git a/test/units/plugins/inventory/test_script.py b/test/units/plugins/inventory/test_script.py
new file mode 100644
index 00000000..5f054813
--- /dev/null
+++ b/test/units/plugins/inventory/test_script.py
@@ -0,0 +1,105 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2017 Chris Meyers <cmeyers@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.plugins.loader import PluginLoader
+from units.compat import mock
+from units.compat import unittest
+from ansible.module_utils._text import to_bytes, to_native
+
+
+class TestInventoryModule(unittest.TestCase):
+
+ def setUp(self):
+
+ class Inventory():
+ cache = dict()
+
+ class PopenResult():
+ returncode = 0
+ stdout = b""
+ stderr = b""
+
+ def communicate(self):
+ return (self.stdout, self.stderr)
+
+ self.popen_result = PopenResult()
+ self.inventory = Inventory()
+ self.loader = mock.MagicMock()
+ self.loader.load = mock.MagicMock()
+
+ inv_loader = PluginLoader('InventoryModule', 'ansible.plugins.inventory', C.DEFAULT_INVENTORY_PLUGIN_PATH, 'inventory_plugins')
+ self.inventory_module = inv_loader.get('script')
+ self.inventory_module.set_options()
+
+ def register_patch(name):
+ patcher = mock.patch(name)
+ self.addCleanup(patcher.stop)
+ return patcher.start()
+
+ self.popen = register_patch('subprocess.Popen')
+ self.popen.return_value = self.popen_result
+
+ self.BaseInventoryPlugin = register_patch('ansible.plugins.inventory.BaseInventoryPlugin')
+ self.BaseInventoryPlugin.get_cache_prefix.return_value = 'abc123'
+
+ def test_parse_subprocess_path_not_found_fail(self):
+ self.popen.side_effect = OSError("dummy text")
+
+ with pytest.raises(AnsibleError) as e:
+ self.inventory_module.parse(self.inventory, self.loader, '/foo/bar/foobar.py')
+ assert e.value.message == "problem running /foo/bar/foobar.py --list (dummy text)"
+
+ def test_parse_subprocess_err_code_fail(self):
+ self.popen_result.stdout = to_bytes(u"fooébar", errors='surrogate_escape')
+ self.popen_result.stderr = to_bytes(u"dummyédata")
+
+ self.popen_result.returncode = 1
+
+ with pytest.raises(AnsibleError) as e:
+ self.inventory_module.parse(self.inventory, self.loader, '/foo/bar/foobar.py')
+ assert e.value.message == to_native("Inventory script (/foo/bar/foobar.py) had an execution error: "
+ "dummyédata\n ")
+
+ def test_parse_utf8_fail(self):
+ self.popen_result.returncode = 0
+ self.popen_result.stderr = to_bytes("dummyédata")
+ self.loader.load.side_effect = TypeError('obj must be string')
+
+ with pytest.raises(AnsibleError) as e:
+ self.inventory_module.parse(self.inventory, self.loader, '/foo/bar/foobar.py')
+ assert e.value.message == to_native("failed to parse executable inventory script results from "
+ "/foo/bar/foobar.py: obj must be string\ndummyédata\n")
+
+ def test_parse_dict_fail(self):
+ self.popen_result.returncode = 0
+ self.popen_result.stderr = to_bytes("dummyédata")
+ self.loader.load.return_value = 'i am not a dict'
+
+ with pytest.raises(AnsibleError) as e:
+ self.inventory_module.parse(self.inventory, self.loader, '/foo/bar/foobar.py')
+ assert e.value.message == to_native("failed to parse executable inventory script results from "
+ "/foo/bar/foobar.py: needs to be a json dict\ndummyédata\n")
diff --git a/test/units/plugins/loader_fixtures/__init__.py b/test/units/plugins/loader_fixtures/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/plugins/loader_fixtures/__init__.py
diff --git a/test/units/plugins/loader_fixtures/import_fixture.py b/test/units/plugins/loader_fixtures/import_fixture.py
new file mode 100644
index 00000000..81127332
--- /dev/null
+++ b/test/units/plugins/loader_fixtures/import_fixture.py
@@ -0,0 +1,9 @@
+# Nothing to see here, this file is just empty to support a imp.load_source
+# without doing anything
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class test:
+ def __init__(self, *args, **kwargs):
+ pass
diff --git a/test/units/plugins/lookup/__init__.py b/test/units/plugins/lookup/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/plugins/lookup/__init__.py
diff --git a/test/units/plugins/lookup/test_env.py b/test/units/plugins/lookup/test_env.py
new file mode 100644
index 00000000..5d9713fe
--- /dev/null
+++ b/test/units/plugins/lookup/test_env.py
@@ -0,0 +1,35 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2019, Abhay Kadam <abhaykadam88@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible.plugins.loader import lookup_loader
+
+
+@pytest.mark.parametrize('env_var,exp_value', [
+ ('foo', 'bar'),
+ ('equation', 'a=b*100')
+])
+def test_env_var_value(monkeypatch, env_var, exp_value):
+ monkeypatch.setattr('ansible.utils.py3compat.environ.get', lambda x, y: exp_value)
+
+ env_lookup = lookup_loader.get('env')
+ retval = env_lookup.run([env_var], None)
+ assert retval == [exp_value]
+
+
+@pytest.mark.parametrize('env_var,exp_value', [
+ ('simple_var', 'alpha-β-gamma'),
+ ('the_var', 'ãnˈsiβle')
+])
+def test_utf8_env_var_value(monkeypatch, env_var, exp_value):
+ monkeypatch.setattr('ansible.utils.py3compat.environ.get', lambda x, y: exp_value)
+
+ env_lookup = lookup_loader.get('env')
+ retval = env_lookup.run([env_var], None)
+ assert retval == [exp_value]
diff --git a/test/units/plugins/lookup/test_ini.py b/test/units/plugins/lookup/test_ini.py
new file mode 100644
index 00000000..adf2bac2
--- /dev/null
+++ b/test/units/plugins/lookup/test_ini.py
@@ -0,0 +1,63 @@
+# -*- coding: utf-8 -*-
+# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest
+from ansible.plugins.lookup.ini import _parse_params
+
+
+class TestINILookup(unittest.TestCase):
+
+ # Currently there isn't a new-style
+ old_style_params_data = (
+ # Simple case
+ dict(
+ term=u'keyA section=sectionA file=/path/to/file',
+ expected=[u'file=/path/to/file', u'keyA', u'section=sectionA'],
+ ),
+ dict(
+ term=u'keyB section=sectionB with space file=/path/with/embedded spaces and/file',
+ expected=[u'file=/path/with/embedded spaces and/file', u'keyB', u'section=sectionB with space'],
+ ),
+ dict(
+ term=u'keyC section=sectionC file=/path/with/equals/cn=com.ansible',
+ expected=[u'file=/path/with/equals/cn=com.ansible', u'keyC', u'section=sectionC'],
+ ),
+ dict(
+ term=u'keyD section=sectionD file=/path/with space and/equals/cn=com.ansible',
+ expected=[u'file=/path/with space and/equals/cn=com.ansible', u'keyD', u'section=sectionD'],
+ ),
+ dict(
+ term=u'keyE section=sectionE file=/path/with/unicode/ãらã¨ã¿/file',
+ expected=[u'file=/path/with/unicode/ãらã¨ã¿/file', u'keyE', u'section=sectionE'],
+ ),
+ dict(
+ term=u'keyF section=sectionF file=/path/with/utf 8 and spaces/ãらã¨ã¿/file',
+ expected=[u'file=/path/with/utf 8 and spaces/ãらã¨ã¿/file', u'keyF', u'section=sectionF'],
+ ),
+ )
+
+ def test_parse_parameters(self):
+ for testcase in self.old_style_params_data:
+ # print(testcase)
+ params = _parse_params(testcase['term'])
+ params.sort()
+ self.assertEqual(params, testcase['expected'])
diff --git a/test/units/plugins/lookup/test_password.py b/test/units/plugins/lookup/test_password.py
new file mode 100644
index 00000000..9871f4ab
--- /dev/null
+++ b/test/units/plugins/lookup/test_password.py
@@ -0,0 +1,501 @@
+# -*- coding: utf-8 -*-
+# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ import passlib
+ from passlib.handlers import pbkdf2
+except ImportError:
+ passlib = None
+ pbkdf2 = None
+
+import pytest
+
+from units.mock.loader import DictDataLoader
+
+from units.compat import unittest
+from units.compat.mock import mock_open, patch
+from ansible.errors import AnsibleError
+from ansible.module_utils.six import text_type
+from ansible.module_utils.six.moves import builtins
+from ansible.module_utils._text import to_bytes
+from ansible.plugins.loader import PluginLoader
+from ansible.plugins.lookup import password
+
+
+DEFAULT_CHARS = sorted([u'ascii_letters', u'digits', u".,:-_"])
+DEFAULT_CANDIDATE_CHARS = u'.,:-_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
+
+# Currently there isn't a new-style
+old_style_params_data = (
+ # Simple case
+ dict(
+ term=u'/path/to/file',
+ filename=u'/path/to/file',
+ params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=DEFAULT_CHARS),
+ candidate_chars=DEFAULT_CANDIDATE_CHARS,
+ ),
+
+ # Special characters in path
+ dict(
+ term=u'/path/with/embedded spaces and/file',
+ filename=u'/path/with/embedded spaces and/file',
+ params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=DEFAULT_CHARS),
+ candidate_chars=DEFAULT_CANDIDATE_CHARS,
+ ),
+ dict(
+ term=u'/path/with/equals/cn=com.ansible',
+ filename=u'/path/with/equals/cn=com.ansible',
+ params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=DEFAULT_CHARS),
+ candidate_chars=DEFAULT_CANDIDATE_CHARS,
+ ),
+ dict(
+ term=u'/path/with/unicode/ãらã¨ã¿/file',
+ filename=u'/path/with/unicode/ãらã¨ã¿/file',
+ params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=DEFAULT_CHARS),
+ candidate_chars=DEFAULT_CANDIDATE_CHARS,
+ ),
+ # Mix several special chars
+ dict(
+ term=u'/path/with/utf 8 and spaces/ãらã¨ã¿/file',
+ filename=u'/path/with/utf 8 and spaces/ãらã¨ã¿/file',
+ params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=DEFAULT_CHARS),
+ candidate_chars=DEFAULT_CANDIDATE_CHARS,
+ ),
+ dict(
+ term=u'/path/with/encoding=unicode/ãらã¨ã¿/file',
+ filename=u'/path/with/encoding=unicode/ãらã¨ã¿/file',
+ params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=DEFAULT_CHARS),
+ candidate_chars=DEFAULT_CANDIDATE_CHARS,
+ ),
+ dict(
+ term=u'/path/with/encoding=unicode/ãらã¨ã¿/and spaces file',
+ filename=u'/path/with/encoding=unicode/ãらã¨ã¿/and spaces file',
+ params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=DEFAULT_CHARS),
+ candidate_chars=DEFAULT_CANDIDATE_CHARS,
+ ),
+
+ # Simple parameters
+ dict(
+ term=u'/path/to/file length=42',
+ filename=u'/path/to/file',
+ params=dict(length=42, encrypt=None, chars=DEFAULT_CHARS),
+ candidate_chars=DEFAULT_CANDIDATE_CHARS,
+ ),
+ dict(
+ term=u'/path/to/file encrypt=pbkdf2_sha256',
+ filename=u'/path/to/file',
+ params=dict(length=password.DEFAULT_LENGTH, encrypt='pbkdf2_sha256', chars=DEFAULT_CHARS),
+ candidate_chars=DEFAULT_CANDIDATE_CHARS,
+ ),
+ dict(
+ term=u'/path/to/file chars=abcdefghijklmnop',
+ filename=u'/path/to/file',
+ params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=[u'abcdefghijklmnop']),
+ candidate_chars=u'abcdefghijklmnop',
+ ),
+ dict(
+ term=u'/path/to/file chars=digits,abc,def',
+ filename=u'/path/to/file',
+ params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=sorted([u'digits', u'abc', u'def'])),
+ candidate_chars=u'abcdef0123456789',
+ ),
+
+ # Including comma in chars
+ dict(
+ term=u'/path/to/file chars=abcdefghijklmnop,,digits',
+ filename=u'/path/to/file',
+ params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=sorted([u'abcdefghijklmnop', u',', u'digits'])),
+ candidate_chars=u',abcdefghijklmnop0123456789',
+ ),
+ dict(
+ term=u'/path/to/file chars=,,',
+ filename=u'/path/to/file',
+ params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=[u',']),
+ candidate_chars=u',',
+ ),
+
+ # Including = in chars
+ dict(
+ term=u'/path/to/file chars=digits,=,,',
+ filename=u'/path/to/file',
+ params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=sorted([u'digits', u'=', u','])),
+ candidate_chars=u',=0123456789',
+ ),
+ dict(
+ term=u'/path/to/file chars=digits,abc=def',
+ filename=u'/path/to/file',
+ params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=sorted([u'digits', u'abc=def'])),
+ candidate_chars=u'abc=def0123456789',
+ ),
+
+ # Including unicode in chars
+ dict(
+ term=u'/path/to/file chars=digits,ãらã¨ã¿,,',
+ filename=u'/path/to/file',
+ params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=sorted([u'digits', u'ãらã¨ã¿', u','])),
+ candidate_chars=u',0123456789ãらã¨ã¿',
+ ),
+ # Including only unicode in chars
+ dict(
+ term=u'/path/to/file chars=ãらã¨ã¿',
+ filename=u'/path/to/file',
+ params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=sorted([u'ãらã¨ã¿'])),
+ candidate_chars=u'ãらã¨ã¿',
+ ),
+
+ # Include ':' in path
+ dict(
+ term=u'/path/to/file_with:colon chars=ascii_letters,digits',
+ filename=u'/path/to/file_with:colon',
+ params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=sorted([u'ascii_letters', u'digits'])),
+ candidate_chars=u'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789',
+ ),
+
+ # Including special chars in both path and chars
+ # Special characters in path
+ dict(
+ term=u'/path/with/embedded spaces and/file chars=abc=def',
+ filename=u'/path/with/embedded spaces and/file',
+ params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=[u'abc=def']),
+ candidate_chars=u'abc=def',
+ ),
+ dict(
+ term=u'/path/with/equals/cn=com.ansible chars=abc=def',
+ filename=u'/path/with/equals/cn=com.ansible',
+ params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=[u'abc=def']),
+ candidate_chars=u'abc=def',
+ ),
+ dict(
+ term=u'/path/with/unicode/ãらã¨ã¿/file chars=ãらã¨ã¿',
+ filename=u'/path/with/unicode/ãらã¨ã¿/file',
+ params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=[u'ãらã¨ã¿']),
+ candidate_chars=u'ãらã¨ã¿',
+ ),
+)
+
+
+class TestParseParameters(unittest.TestCase):
+ def test(self):
+ for testcase in old_style_params_data:
+ filename, params = password._parse_parameters(testcase['term'])
+ params['chars'].sort()
+ self.assertEqual(filename, testcase['filename'])
+ self.assertEqual(params, testcase['params'])
+
+ def test_unrecognized_value(self):
+ testcase = dict(term=u'/path/to/file chars=ãらã¨ã¿i sdfsdf',
+ filename=u'/path/to/file',
+ params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=[u'ãらã¨ã¿']),
+ candidate_chars=u'ãらã¨ã¿')
+ self.assertRaises(AnsibleError, password._parse_parameters, testcase['term'])
+
+ def test_invalid_params(self):
+ testcase = dict(term=u'/path/to/file chars=ãらã¨ã¿i somethign_invalid=123',
+ filename=u'/path/to/file',
+ params=dict(length=password.DEFAULT_LENGTH, encrypt=None, chars=[u'ãらã¨ã¿']),
+ candidate_chars=u'ãらã¨ã¿')
+ self.assertRaises(AnsibleError, password._parse_parameters, testcase['term'])
+
+
+class TestReadPasswordFile(unittest.TestCase):
+ def setUp(self):
+ self.os_path_exists = password.os.path.exists
+
+ def tearDown(self):
+ password.os.path.exists = self.os_path_exists
+
+ def test_no_password_file(self):
+ password.os.path.exists = lambda x: False
+ self.assertEqual(password._read_password_file(b'/nonexistent'), None)
+
+ def test_with_password_file(self):
+ password.os.path.exists = lambda x: True
+ with patch.object(builtins, 'open', mock_open(read_data=b'Testing\n')) as m:
+ self.assertEqual(password._read_password_file(b'/etc/motd'), u'Testing')
+
+
+class TestGenCandidateChars(unittest.TestCase):
+ def _assert_gen_candidate_chars(self, testcase):
+ expected_candidate_chars = testcase['candidate_chars']
+ params = testcase['params']
+ chars_spec = params['chars']
+ res = password._gen_candidate_chars(chars_spec)
+ self.assertEqual(res, expected_candidate_chars)
+
+ def test_gen_candidate_chars(self):
+ for testcase in old_style_params_data:
+ self._assert_gen_candidate_chars(testcase)
+
+
+class TestRandomPassword(unittest.TestCase):
+ def _assert_valid_chars(self, res, chars):
+ for res_char in res:
+ self.assertIn(res_char, chars)
+
+ def test_default(self):
+ res = password.random_password()
+ self.assertEqual(len(res), password.DEFAULT_LENGTH)
+ self.assertTrue(isinstance(res, text_type))
+ self._assert_valid_chars(res, DEFAULT_CANDIDATE_CHARS)
+
+ def test_zero_length(self):
+ res = password.random_password(length=0)
+ self.assertEqual(len(res), 0)
+ self.assertTrue(isinstance(res, text_type))
+ self._assert_valid_chars(res, u',')
+
+ def test_just_a_common(self):
+ res = password.random_password(length=1, chars=u',')
+ self.assertEqual(len(res), 1)
+ self.assertEqual(res, u',')
+
+ def test_free_will(self):
+ # A Rush and Spinal Tap reference twofer
+ res = password.random_password(length=11, chars=u'a')
+ self.assertEqual(len(res), 11)
+ self.assertEqual(res, 'aaaaaaaaaaa')
+ self._assert_valid_chars(res, u'a')
+
+ def test_unicode(self):
+ res = password.random_password(length=11, chars=u'ãらã¨ã¿')
+ self._assert_valid_chars(res, u'ãらã¨ã¿')
+ self.assertEqual(len(res), 11)
+
+ def test_gen_password(self):
+ for testcase in old_style_params_data:
+ params = testcase['params']
+ candidate_chars = testcase['candidate_chars']
+ params_chars_spec = password._gen_candidate_chars(params['chars'])
+ password_string = password.random_password(length=params['length'],
+ chars=params_chars_spec)
+ self.assertEqual(len(password_string),
+ params['length'],
+ msg='generated password=%s has length (%s) instead of expected length (%s)' %
+ (password_string, len(password_string), params['length']))
+
+ for char in password_string:
+ self.assertIn(char, candidate_chars,
+ msg='%s not found in %s from chars spect %s' %
+ (char, candidate_chars, params['chars']))
+
+
+class TestParseContent(unittest.TestCase):
+ def test_empty_password_file(self):
+ plaintext_password, salt = password._parse_content(u'')
+ self.assertEqual(plaintext_password, u'')
+ self.assertEqual(salt, None)
+
+ def test(self):
+ expected_content = u'12345678'
+ file_content = expected_content
+ plaintext_password, salt = password._parse_content(file_content)
+ self.assertEqual(plaintext_password, expected_content)
+ self.assertEqual(salt, None)
+
+ def test_with_salt(self):
+ expected_content = u'12345678 salt=87654321'
+ file_content = expected_content
+ plaintext_password, salt = password._parse_content(file_content)
+ self.assertEqual(plaintext_password, u'12345678')
+ self.assertEqual(salt, u'87654321')
+
+
+class TestFormatContent(unittest.TestCase):
+ def test_no_encrypt(self):
+ self.assertEqual(
+ password._format_content(password=u'hunter42',
+ salt=u'87654321',
+ encrypt=False),
+ u'hunter42 salt=87654321')
+
+ def test_no_encrypt_no_salt(self):
+ self.assertEqual(
+ password._format_content(password=u'hunter42',
+ salt=None,
+ encrypt=None),
+ u'hunter42')
+
+ def test_encrypt(self):
+ self.assertEqual(
+ password._format_content(password=u'hunter42',
+ salt=u'87654321',
+ encrypt='pbkdf2_sha256'),
+ u'hunter42 salt=87654321')
+
+ def test_encrypt_no_salt(self):
+ self.assertRaises(AssertionError, password._format_content, u'hunter42', None, 'pbkdf2_sha256')
+
+
+class TestWritePasswordFile(unittest.TestCase):
+ def setUp(self):
+ self.makedirs_safe = password.makedirs_safe
+ self.os_chmod = password.os.chmod
+ password.makedirs_safe = lambda path, mode: None
+ password.os.chmod = lambda path, mode: None
+
+ def tearDown(self):
+ password.makedirs_safe = self.makedirs_safe
+ password.os.chmod = self.os_chmod
+
+ def test_content_written(self):
+
+ with patch.object(builtins, 'open', mock_open()) as m:
+ password._write_password_file(b'/this/is/a/test/caf\xc3\xa9', u'Testing Café')
+
+ m.assert_called_once_with(b'/this/is/a/test/caf\xc3\xa9', 'wb')
+ m().write.assert_called_once_with(u'Testing Café\n'.encode('utf-8'))
+
+
+class BaseTestLookupModule(unittest.TestCase):
+ def setUp(self):
+ self.fake_loader = DictDataLoader({'/path/to/somewhere': 'sdfsdf'})
+ self.password_lookup = password.LookupModule(loader=self.fake_loader)
+ self.os_path_exists = password.os.path.exists
+ self.os_open = password.os.open
+ password.os.open = lambda path, flag: None
+ self.os_close = password.os.close
+ password.os.close = lambda fd: None
+ self.os_remove = password.os.remove
+ password.os.remove = lambda path: None
+ self.makedirs_safe = password.makedirs_safe
+ password.makedirs_safe = lambda path, mode: None
+
+ def tearDown(self):
+ password.os.path.exists = self.os_path_exists
+ password.os.open = self.os_open
+ password.os.close = self.os_close
+ password.os.remove = self.os_remove
+ password.makedirs_safe = self.makedirs_safe
+
+
+class TestLookupModuleWithoutPasslib(BaseTestLookupModule):
+ @patch.object(PluginLoader, '_get_paths')
+ @patch('ansible.plugins.lookup.password._write_password_file')
+ def test_no_encrypt(self, mock_get_paths, mock_write_file):
+ mock_get_paths.return_value = ['/path/one', '/path/two', '/path/three']
+
+ results = self.password_lookup.run([u'/path/to/somewhere'], None)
+
+ # FIXME: assert something useful
+ for result in results:
+ assert len(result) == password.DEFAULT_LENGTH
+ assert isinstance(result, text_type)
+
+ @patch.object(PluginLoader, '_get_paths')
+ @patch('ansible.plugins.lookup.password._write_password_file')
+ def test_password_already_created_no_encrypt(self, mock_get_paths, mock_write_file):
+ mock_get_paths.return_value = ['/path/one', '/path/two', '/path/three']
+ password.os.path.exists = lambda x: x == to_bytes('/path/to/somewhere')
+
+ with patch.object(builtins, 'open', mock_open(read_data=b'hunter42 salt=87654321\n')) as m:
+ results = self.password_lookup.run([u'/path/to/somewhere chars=anything'], None)
+
+ for result in results:
+ self.assertEqual(result, u'hunter42')
+
+ @patch.object(PluginLoader, '_get_paths')
+ @patch('ansible.plugins.lookup.password._write_password_file')
+ def test_only_a(self, mock_get_paths, mock_write_file):
+ mock_get_paths.return_value = ['/path/one', '/path/two', '/path/three']
+
+ results = self.password_lookup.run([u'/path/to/somewhere chars=a'], None)
+ for result in results:
+ self.assertEqual(result, u'a' * password.DEFAULT_LENGTH)
+
+ @patch('time.sleep')
+ def test_lock_been_held(self, mock_sleep):
+ # pretend the lock file is here
+ password.os.path.exists = lambda x: True
+ try:
+ with patch.object(builtins, 'open', mock_open(read_data=b'hunter42 salt=87654321\n')) as m:
+ # should timeout here
+ results = self.password_lookup.run([u'/path/to/somewhere chars=anything'], None)
+ self.fail("Lookup didn't timeout when lock already been held")
+ except AnsibleError:
+ pass
+
+ def test_lock_not_been_held(self):
+ # pretend now there is password file but no lock
+ password.os.path.exists = lambda x: x == to_bytes('/path/to/somewhere')
+ try:
+ with patch.object(builtins, 'open', mock_open(read_data=b'hunter42 salt=87654321\n')) as m:
+ # should not timeout here
+ results = self.password_lookup.run([u'/path/to/somewhere chars=anything'], None)
+ except AnsibleError:
+ self.fail('Lookup timeouts when lock is free')
+
+ for result in results:
+ self.assertEqual(result, u'hunter42')
+
+
+@pytest.mark.skipif(passlib is None, reason='passlib must be installed to run these tests')
+class TestLookupModuleWithPasslib(BaseTestLookupModule):
+ def setUp(self):
+ super(TestLookupModuleWithPasslib, self).setUp()
+
+ # Different releases of passlib default to a different number of rounds
+ self.sha256 = passlib.registry.get_crypt_handler('pbkdf2_sha256')
+ sha256_for_tests = pbkdf2.create_pbkdf2_hash("sha256", 32, 20000)
+ passlib.registry.register_crypt_handler(sha256_for_tests, force=True)
+
+ def tearDown(self):
+ super(TestLookupModuleWithPasslib, self).tearDown()
+
+ passlib.registry.register_crypt_handler(self.sha256, force=True)
+
+ @patch.object(PluginLoader, '_get_paths')
+ @patch('ansible.plugins.lookup.password._write_password_file')
+ def test_encrypt(self, mock_get_paths, mock_write_file):
+ mock_get_paths.return_value = ['/path/one', '/path/two', '/path/three']
+
+ results = self.password_lookup.run([u'/path/to/somewhere encrypt=pbkdf2_sha256'], None)
+
+ # pbkdf2 format plus hash
+ expected_password_length = 76
+
+ for result in results:
+ self.assertEqual(len(result), expected_password_length)
+ # result should have 5 parts split by '$'
+ str_parts = result.split('$', 5)
+
+ # verify the result is parseable by the passlib
+ crypt_parts = passlib.hash.pbkdf2_sha256.parsehash(result)
+
+ # verify it used the right algo type
+ self.assertEqual(str_parts[1], 'pbkdf2-sha256')
+
+ self.assertEqual(len(str_parts), 5)
+
+ # verify the string and parsehash agree on the number of rounds
+ self.assertEqual(int(str_parts[2]), crypt_parts['rounds'])
+ self.assertIsInstance(result, text_type)
+
+ @patch.object(PluginLoader, '_get_paths')
+ @patch('ansible.plugins.lookup.password._write_password_file')
+ def test_password_already_created_encrypt(self, mock_get_paths, mock_write_file):
+ mock_get_paths.return_value = ['/path/one', '/path/two', '/path/three']
+ password.os.path.exists = lambda x: x == to_bytes('/path/to/somewhere')
+
+ with patch.object(builtins, 'open', mock_open(read_data=b'hunter42 salt=87654321\n')) as m:
+ results = self.password_lookup.run([u'/path/to/somewhere chars=anything encrypt=pbkdf2_sha256'], None)
+ for result in results:
+ self.assertEqual(result, u'$pbkdf2-sha256$20000$ODc2NTQzMjE$Uikde0cv0BKaRaAXMrUQB.zvG4GmnjClwjghwIRf2gU')
diff --git a/test/units/plugins/shell/__init__.py b/test/units/plugins/shell/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/plugins/shell/__init__.py
diff --git a/test/units/plugins/shell/test_cmd.py b/test/units/plugins/shell/test_cmd.py
new file mode 100644
index 00000000..4c1a654b
--- /dev/null
+++ b/test/units/plugins/shell/test_cmd.py
@@ -0,0 +1,19 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible.plugins.shell.cmd import ShellModule
+
+
+@pytest.mark.parametrize('s, expected', [
+ ['arg1', 'arg1'],
+ [None, '""'],
+ ['arg1 and 2', '^"arg1 and 2^"'],
+ ['malicious argument\\"&whoami', '^"malicious argument\\\\^"^&whoami^"'],
+ ['C:\\temp\\some ^%file% > nul', '^"C:\\temp\\some ^^^%file^% ^> nul^"']
+])
+def test_quote_args(s, expected):
+ cmd = ShellModule()
+ actual = cmd.quote(s)
+ assert actual == expected
diff --git a/test/units/plugins/shell/test_powershell.py b/test/units/plugins/shell/test_powershell.py
new file mode 100644
index 00000000..c94baabb
--- /dev/null
+++ b/test/units/plugins/shell/test_powershell.py
@@ -0,0 +1,83 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.shell.powershell import _parse_clixml, ShellModule
+
+
+def test_parse_clixml_empty():
+ empty = b'#< CLIXML\r\n<Objs Version="1.1.0.1" xmlns="http://schemas.microsoft.com/powershell/2004/04"></Objs>'
+ expected = b''
+ actual = _parse_clixml(empty)
+ assert actual == expected
+
+
+def test_parse_clixml_with_progress():
+ progress = b'#< CLIXML\r\n<Objs Version="1.1.0.1" xmlns="http://schemas.microsoft.com/powershell/2004/04">' \
+ b'<Obj S="progress" RefId="0"><TN RefId="0"><T>System.Management.Automation.PSCustomObject</T><T>System.Object</T></TN><MS>' \
+ b'<I64 N="SourceId">1</I64><PR N="Record"><AV>Preparing modules for first use.</AV><AI>0</AI><Nil />' \
+ b'<PI>-1</PI><PC>-1</PC><T>Completed</T><SR>-1</SR><SD> </SD></PR></MS></Obj></Objs>'
+ expected = b''
+ actual = _parse_clixml(progress)
+ assert actual == expected
+
+
+def test_parse_clixml_single_stream():
+ single_stream = b'#< CLIXML\r\n<Objs Version="1.1.0.1" xmlns="http://schemas.microsoft.com/powershell/2004/04">' \
+ b'<S S="Error">fake : The term \'fake\' is not recognized as the name of a cmdlet. Check _x000D__x000A_</S>' \
+ b'<S S="Error">the spelling of the name, or if a path was included._x000D__x000A_</S>' \
+ b'<S S="Error">At line:1 char:1_x000D__x000A_</S>' \
+ b'<S S="Error">+ fake cmdlet_x000D__x000A_</S><S S="Error">+ ~~~~_x000D__x000A_</S>' \
+ b'<S S="Error"> + CategoryInfo : ObjectNotFound: (fake:String) [], CommandNotFoundException_x000D__x000A_</S>' \
+ b'<S S="Error"> + FullyQualifiedErrorId : CommandNotFoundException_x000D__x000A_</S><S S="Error"> _x000D__x000A_</S>' \
+ b'</Objs>'
+ expected = b"fake : The term 'fake' is not recognized as the name of a cmdlet. Check \r\n" \
+ b"the spelling of the name, or if a path was included.\r\n" \
+ b"At line:1 char:1\r\n" \
+ b"+ fake cmdlet\r\n" \
+ b"+ ~~~~\r\n" \
+ b" + CategoryInfo : ObjectNotFound: (fake:String) [], CommandNotFoundException\r\n" \
+ b" + FullyQualifiedErrorId : CommandNotFoundException\r\n "
+ actual = _parse_clixml(single_stream)
+ assert actual == expected
+
+
+def test_parse_clixml_multiple_streams():
+ multiple_stream = b'#< CLIXML\r\n<Objs Version="1.1.0.1" xmlns="http://schemas.microsoft.com/powershell/2004/04">' \
+ b'<S S="Error">fake : The term \'fake\' is not recognized as the name of a cmdlet. Check _x000D__x000A_</S>' \
+ b'<S S="Error">the spelling of the name, or if a path was included._x000D__x000A_</S>' \
+ b'<S S="Error">At line:1 char:1_x000D__x000A_</S>' \
+ b'<S S="Error">+ fake cmdlet_x000D__x000A_</S><S S="Error">+ ~~~~_x000D__x000A_</S>' \
+ b'<S S="Error"> + CategoryInfo : ObjectNotFound: (fake:String) [], CommandNotFoundException_x000D__x000A_</S>' \
+ b'<S S="Error"> + FullyQualifiedErrorId : CommandNotFoundException_x000D__x000A_</S><S S="Error"> _x000D__x000A_</S>' \
+ b'<S S="Info">hi info</S>' \
+ b'</Objs>'
+ expected = b"hi info"
+ actual = _parse_clixml(multiple_stream, stream="Info")
+ assert actual == expected
+
+
+def test_parse_clixml_multiple_elements():
+ multiple_elements = b'#< CLIXML\r\n#< CLIXML\r\n<Objs Version="1.1.0.1" xmlns="http://schemas.microsoft.com/powershell/2004/04">' \
+ b'<Obj S="progress" RefId="0"><TN RefId="0"><T>System.Management.Automation.PSCustomObject</T><T>System.Object</T></TN><MS>' \
+ b'<I64 N="SourceId">1</I64><PR N="Record"><AV>Preparing modules for first use.</AV><AI>0</AI><Nil />' \
+ b'<PI>-1</PI><PC>-1</PC><T>Completed</T><SR>-1</SR><SD> </SD></PR></MS></Obj>' \
+ b'<S S="Error">Error 1</S></Objs>' \
+ b'<Objs Version="1.1.0.1" xmlns="http://schemas.microsoft.com/powershell/2004/04"><Obj S="progress" RefId="0">' \
+ b'<TN RefId="0"><T>System.Management.Automation.PSCustomObject</T><T>System.Object</T></TN><MS>' \
+ b'<I64 N="SourceId">1</I64><PR N="Record"><AV>Preparing modules for first use.</AV><AI>0</AI><Nil />' \
+ b'<PI>-1</PI><PC>-1</PC><T>Completed</T><SR>-1</SR><SD> </SD></PR></MS></Obj>' \
+ b'<Obj S="progress" RefId="1"><TNRef RefId="0" /><MS><I64 N="SourceId">2</I64>' \
+ b'<PR N="Record"><AV>Preparing modules for first use.</AV><AI>0</AI><Nil />' \
+ b'<PI>-1</PI><PC>-1</PC><T>Completed</T><SR>-1</SR><SD> </SD></PR></MS></Obj>' \
+ b'<S S="Error">Error 2</S></Objs>'
+ expected = b"Error 1\r\nError 2"
+ actual = _parse_clixml(multiple_elements)
+ assert actual == expected
+
+
+def test_join_path_unc():
+ pwsh = ShellModule()
+ unc_path_parts = ['\\\\host\\share\\dir1\\\\dir2\\', '\\dir3/dir4', 'dir5', 'dir6\\']
+ expected = '\\\\host\\share\\dir1\\dir2\\dir3\\dir4\\dir5\\dir6'
+ actual = pwsh.join_path(*unc_path_parts)
+ assert actual == expected
diff --git a/test/units/plugins/strategy/__init__.py b/test/units/plugins/strategy/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/plugins/strategy/__init__.py
diff --git a/test/units/plugins/strategy/test_linear.py b/test/units/plugins/strategy/test_linear.py
new file mode 100644
index 00000000..74887030
--- /dev/null
+++ b/test/units/plugins/strategy/test_linear.py
@@ -0,0 +1,177 @@
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from units.compat import unittest
+from units.compat.mock import patch, MagicMock
+
+from ansible.executor.play_iterator import PlayIterator
+from ansible.playbook import Playbook
+from ansible.playbook.play_context import PlayContext
+from ansible.plugins.strategy.linear import StrategyModule
+from ansible.executor.task_queue_manager import TaskQueueManager
+
+from units.mock.loader import DictDataLoader
+from units.mock.path import mock_unfrackpath_noop
+
+
+class TestStrategyLinear(unittest.TestCase):
+
+ @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
+ def test_noop(self):
+ fake_loader = DictDataLoader({
+ "test_play.yml": """
+ - hosts: all
+ gather_facts: no
+ tasks:
+ - block:
+ - block:
+ - name: task1
+ debug: msg='task1'
+ failed_when: inventory_hostname == 'host01'
+
+ - name: task2
+ debug: msg='task2'
+
+ rescue:
+ - name: rescue1
+ debug: msg='rescue1'
+
+ - name: rescue2
+ debug: msg='rescue2'
+ """,
+ })
+
+ mock_var_manager = MagicMock()
+ mock_var_manager._fact_cache = dict()
+ mock_var_manager.get_vars.return_value = dict()
+
+ p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager)
+
+ inventory = MagicMock()
+ inventory.hosts = {}
+ hosts = []
+ for i in range(0, 2):
+ host = MagicMock()
+ host.name = host.get_name.return_value = 'host%02d' % i
+ hosts.append(host)
+ inventory.hosts[host.name] = host
+ inventory.get_hosts.return_value = hosts
+ inventory.filter_hosts.return_value = hosts
+
+ mock_var_manager._fact_cache['host00'] = dict()
+
+ play_context = PlayContext(play=p._entries[0])
+
+ itr = PlayIterator(
+ inventory=inventory,
+ play=p._entries[0],
+ play_context=play_context,
+ variable_manager=mock_var_manager,
+ all_vars=dict(),
+ )
+
+ tqm = TaskQueueManager(
+ inventory=inventory,
+ variable_manager=mock_var_manager,
+ loader=fake_loader,
+ passwords=None,
+ forks=5,
+ )
+ tqm._initialize_processes(3)
+ strategy = StrategyModule(tqm)
+ strategy._hosts_cache = [h.name for h in hosts]
+ strategy._hosts_cache_all = [h.name for h in hosts]
+
+ # implicit meta: flush_handlers
+ hosts_left = strategy.get_hosts_left(itr)
+ hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
+ host1_task = hosts_tasks[0][1]
+ host2_task = hosts_tasks[1][1]
+ self.assertIsNotNone(host1_task)
+ self.assertIsNotNone(host2_task)
+ self.assertEqual(host1_task.action, 'meta')
+ self.assertEqual(host2_task.action, 'meta')
+
+ # debug: task1, debug: task1
+ hosts_left = strategy.get_hosts_left(itr)
+ hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
+ host1_task = hosts_tasks[0][1]
+ host2_task = hosts_tasks[1][1]
+ self.assertIsNotNone(host1_task)
+ self.assertIsNotNone(host2_task)
+ self.assertEqual(host1_task.action, 'debug')
+ self.assertEqual(host2_task.action, 'debug')
+ self.assertEqual(host1_task.name, 'task1')
+ self.assertEqual(host2_task.name, 'task1')
+
+ # mark the second host failed
+ itr.mark_host_failed(hosts[1])
+
+ # debug: task2, meta: noop
+ hosts_left = strategy.get_hosts_left(itr)
+ hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
+ host1_task = hosts_tasks[0][1]
+ host2_task = hosts_tasks[1][1]
+ self.assertIsNotNone(host1_task)
+ self.assertIsNotNone(host2_task)
+ self.assertEqual(host1_task.action, 'debug')
+ self.assertEqual(host2_task.action, 'meta')
+ self.assertEqual(host1_task.name, 'task2')
+ self.assertEqual(host2_task.name, '')
+
+ # meta: noop, debug: rescue1
+ hosts_left = strategy.get_hosts_left(itr)
+ hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
+ host1_task = hosts_tasks[0][1]
+ host2_task = hosts_tasks[1][1]
+ self.assertIsNotNone(host1_task)
+ self.assertIsNotNone(host2_task)
+ self.assertEqual(host1_task.action, 'meta')
+ self.assertEqual(host2_task.action, 'debug')
+ self.assertEqual(host1_task.name, '')
+ self.assertEqual(host2_task.name, 'rescue1')
+
+ # meta: noop, debug: rescue2
+ hosts_left = strategy.get_hosts_left(itr)
+ hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
+ host1_task = hosts_tasks[0][1]
+ host2_task = hosts_tasks[1][1]
+ self.assertIsNotNone(host1_task)
+ self.assertIsNotNone(host2_task)
+ self.assertEqual(host1_task.action, 'meta')
+ self.assertEqual(host2_task.action, 'debug')
+ self.assertEqual(host1_task.name, '')
+ self.assertEqual(host2_task.name, 'rescue2')
+
+ # implicit meta: flush_handlers
+ hosts_left = strategy.get_hosts_left(itr)
+ hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
+ host1_task = hosts_tasks[0][1]
+ host2_task = hosts_tasks[1][1]
+ self.assertIsNotNone(host1_task)
+ self.assertIsNotNone(host2_task)
+ self.assertEqual(host1_task.action, 'meta')
+ self.assertEqual(host2_task.action, 'meta')
+
+ # implicit meta: flush_handlers
+ hosts_left = strategy.get_hosts_left(itr)
+ hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
+ host1_task = hosts_tasks[0][1]
+ host2_task = hosts_tasks[1][1]
+ self.assertIsNotNone(host1_task)
+ self.assertIsNotNone(host2_task)
+ self.assertEqual(host1_task.action, 'meta')
+ self.assertEqual(host2_task.action, 'meta')
+
+ # end of iteration
+ hosts_left = strategy.get_hosts_left(itr)
+ hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
+ host1_task = hosts_tasks[0][1]
+ host2_task = hosts_tasks[1][1]
+ self.assertIsNone(host1_task)
+ self.assertIsNone(host2_task)
diff --git a/test/units/plugins/strategy/test_strategy.py b/test/units/plugins/strategy/test_strategy.py
new file mode 100644
index 00000000..9a2574d2
--- /dev/null
+++ b/test/units/plugins/strategy/test_strategy.py
@@ -0,0 +1,546 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.mock.loader import DictDataLoader
+from copy import deepcopy
+import uuid
+
+from units.compat import unittest
+from units.compat.mock import patch, MagicMock
+from ansible.executor.process.worker import WorkerProcess
+from ansible.executor.task_queue_manager import TaskQueueManager
+from ansible.executor.task_result import TaskResult
+from ansible.inventory.host import Host
+from ansible.module_utils.six.moves import queue as Queue
+from ansible.playbook.handler import Handler
+from ansible.plugins.strategy import StrategyBase
+
+
+class TestStrategyBase(unittest.TestCase):
+
+ def test_strategy_base_init(self):
+ queue_items = []
+
+ def _queue_empty(*args, **kwargs):
+ return len(queue_items) == 0
+
+ def _queue_get(*args, **kwargs):
+ if len(queue_items) == 0:
+ raise Queue.Empty
+ else:
+ return queue_items.pop()
+
+ def _queue_put(item, *args, **kwargs):
+ queue_items.append(item)
+
+ mock_queue = MagicMock()
+ mock_queue.empty.side_effect = _queue_empty
+ mock_queue.get.side_effect = _queue_get
+ mock_queue.put.side_effect = _queue_put
+
+ mock_tqm = MagicMock(TaskQueueManager)
+ mock_tqm._final_q = mock_queue
+ mock_tqm._workers = []
+ strategy_base = StrategyBase(tqm=mock_tqm)
+ strategy_base.cleanup()
+
+ def test_strategy_base_run(self):
+ queue_items = []
+
+ def _queue_empty(*args, **kwargs):
+ return len(queue_items) == 0
+
+ def _queue_get(*args, **kwargs):
+ if len(queue_items) == 0:
+ raise Queue.Empty
+ else:
+ return queue_items.pop()
+
+ def _queue_put(item, *args, **kwargs):
+ queue_items.append(item)
+
+ mock_queue = MagicMock()
+ mock_queue.empty.side_effect = _queue_empty
+ mock_queue.get.side_effect = _queue_get
+ mock_queue.put.side_effect = _queue_put
+
+ mock_tqm = MagicMock(TaskQueueManager)
+ mock_tqm._final_q = mock_queue
+ mock_tqm._stats = MagicMock()
+ mock_tqm.send_callback.return_value = None
+
+ for attr in ('RUN_OK', 'RUN_ERROR', 'RUN_FAILED_HOSTS', 'RUN_UNREACHABLE_HOSTS'):
+ setattr(mock_tqm, attr, getattr(TaskQueueManager, attr))
+
+ mock_iterator = MagicMock()
+ mock_iterator._play = MagicMock()
+ mock_iterator._play.handlers = []
+
+ mock_play_context = MagicMock()
+
+ mock_tqm._failed_hosts = dict()
+ mock_tqm._unreachable_hosts = dict()
+ mock_tqm._workers = []
+ strategy_base = StrategyBase(tqm=mock_tqm)
+
+ mock_host = MagicMock()
+ mock_host.name = 'host1'
+
+ self.assertEqual(strategy_base.run(iterator=mock_iterator, play_context=mock_play_context), mock_tqm.RUN_OK)
+ self.assertEqual(strategy_base.run(iterator=mock_iterator, play_context=mock_play_context, result=TaskQueueManager.RUN_ERROR), mock_tqm.RUN_ERROR)
+ mock_tqm._failed_hosts = dict(host1=True)
+ mock_iterator.get_failed_hosts.return_value = [mock_host]
+ self.assertEqual(strategy_base.run(iterator=mock_iterator, play_context=mock_play_context, result=False), mock_tqm.RUN_FAILED_HOSTS)
+ mock_tqm._unreachable_hosts = dict(host1=True)
+ mock_iterator.get_failed_hosts.return_value = []
+ self.assertEqual(strategy_base.run(iterator=mock_iterator, play_context=mock_play_context, result=False), mock_tqm.RUN_UNREACHABLE_HOSTS)
+ strategy_base.cleanup()
+
+ def test_strategy_base_get_hosts(self):
+ queue_items = []
+
+ def _queue_empty(*args, **kwargs):
+ return len(queue_items) == 0
+
+ def _queue_get(*args, **kwargs):
+ if len(queue_items) == 0:
+ raise Queue.Empty
+ else:
+ return queue_items.pop()
+
+ def _queue_put(item, *args, **kwargs):
+ queue_items.append(item)
+
+ mock_queue = MagicMock()
+ mock_queue.empty.side_effect = _queue_empty
+ mock_queue.get.side_effect = _queue_get
+ mock_queue.put.side_effect = _queue_put
+
+ mock_hosts = []
+ for i in range(0, 5):
+ mock_host = MagicMock()
+ mock_host.name = "host%02d" % (i + 1)
+ mock_host.has_hostkey = True
+ mock_hosts.append(mock_host)
+
+ mock_hosts_names = [h.name for h in mock_hosts]
+
+ mock_inventory = MagicMock()
+ mock_inventory.get_hosts.return_value = mock_hosts
+
+ mock_tqm = MagicMock()
+ mock_tqm._final_q = mock_queue
+ mock_tqm.get_inventory.return_value = mock_inventory
+
+ mock_play = MagicMock()
+ mock_play.hosts = ["host%02d" % (i + 1) for i in range(0, 5)]
+
+ strategy_base = StrategyBase(tqm=mock_tqm)
+ strategy_base._hosts_cache = strategy_base._hosts_cache_all = mock_hosts_names
+
+ mock_tqm._failed_hosts = []
+ mock_tqm._unreachable_hosts = []
+ self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), [h.name for h in mock_hosts])
+
+ mock_tqm._failed_hosts = ["host01"]
+ self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), [h.name for h in mock_hosts[1:]])
+ self.assertEqual(strategy_base.get_failed_hosts(play=mock_play), [mock_hosts[0].name])
+
+ mock_tqm._unreachable_hosts = ["host02"]
+ self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), [h.name for h in mock_hosts[2:]])
+ strategy_base.cleanup()
+
+ @patch.object(WorkerProcess, 'run')
+ def test_strategy_base_queue_task(self, mock_worker):
+ def fake_run(self):
+ return
+
+ mock_worker.run.side_effect = fake_run
+
+ fake_loader = DictDataLoader()
+ mock_var_manager = MagicMock()
+ mock_host = MagicMock()
+ mock_host.get_vars.return_value = dict()
+ mock_host.has_hostkey = True
+ mock_inventory = MagicMock()
+ mock_inventory.get.return_value = mock_host
+
+ tqm = TaskQueueManager(
+ inventory=mock_inventory,
+ variable_manager=mock_var_manager,
+ loader=fake_loader,
+ passwords=None,
+ forks=3,
+ )
+ tqm._initialize_processes(3)
+ tqm.hostvars = dict()
+
+ mock_task = MagicMock()
+ mock_task._uuid = 'abcd'
+ mock_task.throttle = 0
+
+ try:
+ strategy_base = StrategyBase(tqm=tqm)
+ strategy_base._queue_task(host=mock_host, task=mock_task, task_vars=dict(), play_context=MagicMock())
+ self.assertEqual(strategy_base._cur_worker, 1)
+ self.assertEqual(strategy_base._pending_results, 1)
+ strategy_base._queue_task(host=mock_host, task=mock_task, task_vars=dict(), play_context=MagicMock())
+ self.assertEqual(strategy_base._cur_worker, 2)
+ self.assertEqual(strategy_base._pending_results, 2)
+ strategy_base._queue_task(host=mock_host, task=mock_task, task_vars=dict(), play_context=MagicMock())
+ self.assertEqual(strategy_base._cur_worker, 0)
+ self.assertEqual(strategy_base._pending_results, 3)
+ finally:
+ tqm.cleanup()
+
+ def test_strategy_base_process_pending_results(self):
+ mock_tqm = MagicMock()
+ mock_tqm._terminated = False
+ mock_tqm._failed_hosts = dict()
+ mock_tqm._unreachable_hosts = dict()
+ mock_tqm.send_callback.return_value = None
+
+ queue_items = []
+
+ def _queue_empty(*args, **kwargs):
+ return len(queue_items) == 0
+
+ def _queue_get(*args, **kwargs):
+ if len(queue_items) == 0:
+ raise Queue.Empty
+ else:
+ return queue_items.pop()
+
+ def _queue_put(item, *args, **kwargs):
+ queue_items.append(item)
+
+ mock_queue = MagicMock()
+ mock_queue.empty.side_effect = _queue_empty
+ mock_queue.get.side_effect = _queue_get
+ mock_queue.put.side_effect = _queue_put
+ mock_tqm._final_q = mock_queue
+
+ mock_tqm._stats = MagicMock()
+ mock_tqm._stats.increment.return_value = None
+
+ mock_play = MagicMock()
+
+ mock_host = MagicMock()
+ mock_host.name = 'test01'
+ mock_host.vars = dict()
+ mock_host.get_vars.return_value = dict()
+ mock_host.has_hostkey = True
+
+ mock_task = MagicMock()
+ mock_task._role = None
+ mock_task._parent = None
+ mock_task.ignore_errors = False
+ mock_task.ignore_unreachable = False
+ mock_task._uuid = uuid.uuid4()
+ mock_task.loop = None
+ mock_task.copy.return_value = mock_task
+
+ mock_handler_task = Handler()
+ mock_handler_task.name = 'test handler'
+ mock_handler_task.action = 'foo'
+ mock_handler_task._parent = None
+ mock_handler_task._uuid = 'xxxxxxxxxxxxx'
+
+ mock_iterator = MagicMock()
+ mock_iterator._play = mock_play
+ mock_iterator.mark_host_failed.return_value = None
+ mock_iterator.get_next_task_for_host.return_value = (None, None)
+
+ mock_handler_block = MagicMock()
+ mock_handler_block.block = [mock_handler_task]
+ mock_handler_block.rescue = []
+ mock_handler_block.always = []
+ mock_play.handlers = [mock_handler_block]
+
+ mock_group = MagicMock()
+ mock_group.add_host.return_value = None
+
+ def _get_host(host_name):
+ if host_name == 'test01':
+ return mock_host
+ return None
+
+ def _get_group(group_name):
+ if group_name in ('all', 'foo'):
+ return mock_group
+ return None
+
+ mock_inventory = MagicMock()
+ mock_inventory._hosts_cache = dict()
+ mock_inventory.hosts.return_value = mock_host
+ mock_inventory.get_host.side_effect = _get_host
+ mock_inventory.get_group.side_effect = _get_group
+ mock_inventory.clear_pattern_cache.return_value = None
+ mock_inventory.get_host_vars.return_value = {}
+ mock_inventory.hosts.get.return_value = mock_host
+
+ mock_var_mgr = MagicMock()
+ mock_var_mgr.set_host_variable.return_value = None
+ mock_var_mgr.set_host_facts.return_value = None
+ mock_var_mgr.get_vars.return_value = dict()
+
+ strategy_base = StrategyBase(tqm=mock_tqm)
+ strategy_base._inventory = mock_inventory
+ strategy_base._variable_manager = mock_var_mgr
+ strategy_base._blocked_hosts = dict()
+
+ def _has_dead_workers():
+ return False
+
+ strategy_base._tqm.has_dead_workers.side_effect = _has_dead_workers
+ results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
+ self.assertEqual(len(results), 0)
+
+ task_result = TaskResult(host=mock_host.name, task=mock_task._uuid, return_data=dict(changed=True))
+ queue_items.append(task_result)
+ strategy_base._blocked_hosts['test01'] = True
+ strategy_base._pending_results = 1
+
+ mock_queued_task_cache = {
+ (mock_host.name, mock_task._uuid): {
+ 'task': mock_task,
+ 'host': mock_host,
+ 'task_vars': {},
+ 'play_context': {},
+ }
+ }
+
+ strategy_base._queued_task_cache = deepcopy(mock_queued_task_cache)
+ results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
+ self.assertEqual(len(results), 1)
+ self.assertEqual(results[0], task_result)
+ self.assertEqual(strategy_base._pending_results, 0)
+ self.assertNotIn('test01', strategy_base._blocked_hosts)
+
+ task_result = TaskResult(host=mock_host.name, task=mock_task._uuid, return_data='{"failed":true}')
+ queue_items.append(task_result)
+ strategy_base._blocked_hosts['test01'] = True
+ strategy_base._pending_results = 1
+ mock_iterator.is_failed.return_value = True
+ strategy_base._queued_task_cache = deepcopy(mock_queued_task_cache)
+ results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
+ self.assertEqual(len(results), 1)
+ self.assertEqual(results[0], task_result)
+ self.assertEqual(strategy_base._pending_results, 0)
+ self.assertNotIn('test01', strategy_base._blocked_hosts)
+ # self.assertIn('test01', mock_tqm._failed_hosts)
+ # del mock_tqm._failed_hosts['test01']
+ mock_iterator.is_failed.return_value = False
+
+ task_result = TaskResult(host=mock_host.name, task=mock_task._uuid, return_data='{"unreachable": true}')
+ queue_items.append(task_result)
+ strategy_base._blocked_hosts['test01'] = True
+ strategy_base._pending_results = 1
+ strategy_base._queued_task_cache = deepcopy(mock_queued_task_cache)
+ results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
+ self.assertEqual(len(results), 1)
+ self.assertEqual(results[0], task_result)
+ self.assertEqual(strategy_base._pending_results, 0)
+ self.assertNotIn('test01', strategy_base._blocked_hosts)
+ self.assertIn('test01', mock_tqm._unreachable_hosts)
+ del mock_tqm._unreachable_hosts['test01']
+
+ task_result = TaskResult(host=mock_host.name, task=mock_task._uuid, return_data='{"skipped": true}')
+ queue_items.append(task_result)
+ strategy_base._blocked_hosts['test01'] = True
+ strategy_base._pending_results = 1
+ strategy_base._queued_task_cache = deepcopy(mock_queued_task_cache)
+ results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
+ self.assertEqual(len(results), 1)
+ self.assertEqual(results[0], task_result)
+ self.assertEqual(strategy_base._pending_results, 0)
+ self.assertNotIn('test01', strategy_base._blocked_hosts)
+
+ queue_items.append(TaskResult(host=mock_host.name, task=mock_task._uuid, return_data=dict(add_host=dict(host_name='newhost01', new_groups=['foo']))))
+ strategy_base._blocked_hosts['test01'] = True
+ strategy_base._pending_results = 1
+ strategy_base._queued_task_cache = deepcopy(mock_queued_task_cache)
+ results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
+ self.assertEqual(len(results), 1)
+ self.assertEqual(strategy_base._pending_results, 0)
+ self.assertNotIn('test01', strategy_base._blocked_hosts)
+
+ queue_items.append(TaskResult(host=mock_host.name, task=mock_task._uuid, return_data=dict(add_group=dict(group_name='foo'))))
+ strategy_base._blocked_hosts['test01'] = True
+ strategy_base._pending_results = 1
+ strategy_base._queued_task_cache = deepcopy(mock_queued_task_cache)
+ results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
+ self.assertEqual(len(results), 1)
+ self.assertEqual(strategy_base._pending_results, 0)
+ self.assertNotIn('test01', strategy_base._blocked_hosts)
+
+ queue_items.append(TaskResult(host=mock_host.name, task=mock_task._uuid, return_data=dict(changed=True, _ansible_notify=['test handler'])))
+ strategy_base._blocked_hosts['test01'] = True
+ strategy_base._pending_results = 1
+ strategy_base._queued_task_cache = deepcopy(mock_queued_task_cache)
+ results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
+ self.assertEqual(len(results), 1)
+ self.assertEqual(strategy_base._pending_results, 0)
+ self.assertNotIn('test01', strategy_base._blocked_hosts)
+ self.assertTrue(mock_handler_task.is_host_notified(mock_host))
+
+ # queue_items.append(('set_host_var', mock_host, mock_task, None, 'foo', 'bar'))
+ # results = strategy_base._process_pending_results(iterator=mock_iterator)
+ # self.assertEqual(len(results), 0)
+ # self.assertEqual(strategy_base._pending_results, 1)
+
+ # queue_items.append(('set_host_facts', mock_host, mock_task, None, 'foo', dict()))
+ # results = strategy_base._process_pending_results(iterator=mock_iterator)
+ # self.assertEqual(len(results), 0)
+ # self.assertEqual(strategy_base._pending_results, 1)
+
+ # queue_items.append(('bad'))
+ # self.assertRaises(AnsibleError, strategy_base._process_pending_results, iterator=mock_iterator)
+ strategy_base.cleanup()
+
+ def test_strategy_base_load_included_file(self):
+ fake_loader = DictDataLoader({
+ "test.yml": """
+ - debug: msg='foo'
+ """,
+ "bad.yml": """
+ """,
+ })
+
+ queue_items = []
+
+ def _queue_empty(*args, **kwargs):
+ return len(queue_items) == 0
+
+ def _queue_get(*args, **kwargs):
+ if len(queue_items) == 0:
+ raise Queue.Empty
+ else:
+ return queue_items.pop()
+
+ def _queue_put(item, *args, **kwargs):
+ queue_items.append(item)
+
+ mock_queue = MagicMock()
+ mock_queue.empty.side_effect = _queue_empty
+ mock_queue.get.side_effect = _queue_get
+ mock_queue.put.side_effect = _queue_put
+
+ mock_tqm = MagicMock()
+ mock_tqm._final_q = mock_queue
+
+ strategy_base = StrategyBase(tqm=mock_tqm)
+ strategy_base._loader = fake_loader
+ strategy_base.cleanup()
+
+ mock_play = MagicMock()
+
+ mock_block = MagicMock()
+ mock_block._play = mock_play
+ mock_block.vars = dict()
+
+ mock_task = MagicMock()
+ mock_task._block = mock_block
+ mock_task._role = None
+ mock_task._parent = None
+
+ mock_iterator = MagicMock()
+ mock_iterator.mark_host_failed.return_value = None
+
+ mock_inc_file = MagicMock()
+ mock_inc_file._task = mock_task
+
+ mock_inc_file._filename = "test.yml"
+ res = strategy_base._load_included_file(included_file=mock_inc_file, iterator=mock_iterator)
+
+ mock_inc_file._filename = "bad.yml"
+ res = strategy_base._load_included_file(included_file=mock_inc_file, iterator=mock_iterator)
+ self.assertEqual(res, [])
+
+ @patch.object(WorkerProcess, 'run')
+ def test_strategy_base_run_handlers(self, mock_worker):
+ def fake_run(*args):
+ return
+ mock_worker.side_effect = fake_run
+ mock_play_context = MagicMock()
+
+ mock_handler_task = Handler()
+ mock_handler_task.action = 'foo'
+ mock_handler_task.cached_name = False
+ mock_handler_task.name = "test handler"
+ mock_handler_task.listen = []
+ mock_handler_task._role = None
+ mock_handler_task._parent = None
+ mock_handler_task._uuid = 'xxxxxxxxxxxxxxxx'
+
+ mock_handler = MagicMock()
+ mock_handler.block = [mock_handler_task]
+ mock_handler.flag_for_host.return_value = False
+
+ mock_play = MagicMock()
+ mock_play.handlers = [mock_handler]
+
+ mock_host = MagicMock(Host)
+ mock_host.name = "test01"
+ mock_host.has_hostkey = True
+
+ mock_inventory = MagicMock()
+ mock_inventory.get_hosts.return_value = [mock_host]
+ mock_inventory.get.return_value = mock_host
+ mock_inventory.get_host.return_value = mock_host
+
+ mock_var_mgr = MagicMock()
+ mock_var_mgr.get_vars.return_value = dict()
+
+ mock_iterator = MagicMock()
+ mock_iterator._play = mock_play
+
+ fake_loader = DictDataLoader()
+
+ tqm = TaskQueueManager(
+ inventory=mock_inventory,
+ variable_manager=mock_var_mgr,
+ loader=fake_loader,
+ passwords=None,
+ forks=5,
+ )
+ tqm._initialize_processes(3)
+ tqm.hostvars = dict()
+
+ try:
+ strategy_base = StrategyBase(tqm=tqm)
+
+ strategy_base._inventory = mock_inventory
+
+ task_result = TaskResult(mock_host.name, mock_handler_task._uuid, dict(changed=False))
+ strategy_base._queued_task_cache = dict()
+ strategy_base._queued_task_cache[(mock_host.name, mock_handler_task._uuid)] = {
+ 'task': mock_handler_task,
+ 'host': mock_host,
+ 'task_vars': {},
+ 'play_context': mock_play_context
+ }
+ tqm._final_q.put(task_result)
+
+ result = strategy_base.run_handlers(iterator=mock_iterator, play_context=mock_play_context)
+ finally:
+ strategy_base.cleanup()
+ tqm.cleanup()
diff --git a/test/units/plugins/test_plugins.py b/test/units/plugins/test_plugins.py
new file mode 100644
index 00000000..c9d80cda
--- /dev/null
+++ b/test/units/plugins/test_plugins.py
@@ -0,0 +1,134 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from units.compat import unittest
+from units.compat.builtins import BUILTINS
+from units.compat.mock import patch, MagicMock
+from ansible.plugins.loader import PluginLoader, PluginPathContext
+
+
+class TestErrors(unittest.TestCase):
+
+ @patch.object(PluginLoader, '_get_paths')
+ def test_print_paths(self, mock_method):
+ mock_method.return_value = ['/path/one', '/path/two', '/path/three']
+ pl = PluginLoader('foo', 'foo', '', 'test_plugins')
+ paths = pl.print_paths()
+ expected_paths = os.pathsep.join(['/path/one', '/path/two', '/path/three'])
+ self.assertEqual(paths, expected_paths)
+
+ def test_plugins__get_package_paths_no_package(self):
+ pl = PluginLoader('test', '', 'test', 'test_plugin')
+ self.assertEqual(pl._get_package_paths(), [])
+
+ def test_plugins__get_package_paths_with_package(self):
+ # the _get_package_paths() call uses __import__ to load a
+ # python library, and then uses the __file__ attribute of
+ # the result for that to get the library path, so we mock
+ # that here and patch the builtin to use our mocked result
+ foo = MagicMock()
+ bar = MagicMock()
+ bam = MagicMock()
+ bam.__file__ = '/path/to/my/foo/bar/bam/__init__.py'
+ bar.bam = bam
+ foo.return_value.bar = bar
+ pl = PluginLoader('test', 'foo.bar.bam', 'test', 'test_plugin')
+ with patch('{0}.__import__'.format(BUILTINS), foo):
+ self.assertEqual(pl._get_package_paths(), ['/path/to/my/foo/bar/bam'])
+
+ def test_plugins__get_paths(self):
+ pl = PluginLoader('test', '', 'test', 'test_plugin')
+ pl._paths = [PluginPathContext('/path/one', False),
+ PluginPathContext('/path/two', True)]
+ self.assertEqual(pl._get_paths(), ['/path/one', '/path/two'])
+
+ # NOT YET WORKING
+ # def fake_glob(path):
+ # if path == 'test/*':
+ # return ['test/foo', 'test/bar', 'test/bam']
+ # elif path == 'test/*/*'
+ # m._paths = None
+ # mock_glob = MagicMock()
+ # mock_glob.return_value = []
+ # with patch('glob.glob', mock_glob):
+ # pass
+
+ def assertPluginLoaderConfigBecomes(self, arg, expected):
+ pl = PluginLoader('test', '', arg, 'test_plugin')
+ self.assertEqual(pl.config, expected)
+
+ def test_plugin__init_config_list(self):
+ config = ['/one', '/two']
+ self.assertPluginLoaderConfigBecomes(config, config)
+
+ def test_plugin__init_config_str(self):
+ self.assertPluginLoaderConfigBecomes('test', ['test'])
+
+ def test_plugin__init_config_none(self):
+ self.assertPluginLoaderConfigBecomes(None, [])
+
+ def test__load_module_source_no_duplicate_names(self):
+ '''
+ This test simulates importing 2 plugins with the same name,
+ and validating that the import is short circuited if a file with the same name
+ has already been imported
+ '''
+
+ fixture_path = os.path.join(os.path.dirname(__file__), 'loader_fixtures')
+
+ pl = PluginLoader('test', '', 'test', 'test_plugin')
+ one = pl._load_module_source('import_fixture', os.path.join(fixture_path, 'import_fixture.py'))
+ # This line wouldn't even succeed if we didn't short circuit on finding a duplicate name
+ two = pl._load_module_source('import_fixture', '/path/to/import_fixture.py')
+
+ self.assertEqual(one, two)
+
+ @patch('ansible.plugins.loader.glob')
+ @patch.object(PluginLoader, '_get_paths')
+ def test_all_no_duplicate_names(self, gp_mock, glob_mock):
+ '''
+ This test goes along with ``test__load_module_source_no_duplicate_names``
+ and ensures that we ignore duplicate imports on multiple paths
+ '''
+
+ fixture_path = os.path.join(os.path.dirname(__file__), 'loader_fixtures')
+
+ gp_mock.return_value = [
+ fixture_path,
+ '/path/to'
+ ]
+
+ glob_mock.glob.side_effect = [
+ [os.path.join(fixture_path, 'import_fixture.py')],
+ ['/path/to/import_fixture.py']
+ ]
+
+ pl = PluginLoader('test', '', 'test', 'test_plugin')
+ # Aside from needing ``list()`` so we can do a len, ``PluginLoader.all`` returns a generator
+ # so ``list()`` actually causes ``PluginLoader.all`` to run.
+ plugins = list(pl.all())
+ self.assertEqual(len(plugins), 1)
+
+ self.assertIn(os.path.join(fixture_path, 'import_fixture.py'), pl._module_cache)
+ self.assertNotIn('/path/to/import_fixture.py', pl._module_cache)
diff --git a/test/units/regex/test_invalid_var_names.py b/test/units/regex/test_invalid_var_names.py
new file mode 100644
index 00000000..d47e68d3
--- /dev/null
+++ b/test/units/regex/test_invalid_var_names.py
@@ -0,0 +1,27 @@
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest
+
+from ansible import constants as C
+
+
+test_cases = (('not-valid', ['-'], 'not_valid'), ('not!valid@either', ['!', '@'], 'not_valid_either'), ('1_nor_This', ['1'], '__nor_This'))
+
+
+class TestInvalidVars(unittest.TestCase):
+
+ def test_positive_matches(self):
+
+ for name, invalid, sanitized in test_cases:
+ self.assertEqual(C.INVALID_VARIABLE_NAMES.findall(name), invalid)
+
+ def test_negative_matches(self):
+ for name in ('this_is_valid', 'Also_1_valid', 'noproblem'):
+ self.assertEqual(C.INVALID_VARIABLE_NAMES.findall(name), [])
+
+ def test_get_setting(self):
+
+ for name, invalid, sanitized in test_cases:
+ self.assertEqual(C.INVALID_VARIABLE_NAMES.sub('_', name), sanitized)
diff --git a/test/units/requirements.txt b/test/units/requirements.txt
new file mode 100644
index 00000000..153500e3
--- /dev/null
+++ b/test/units/requirements.txt
@@ -0,0 +1,6 @@
+pycrypto
+passlib
+pywinrm
+pytz
+unittest2 ; python_version < '2.7'
+pexpect
diff --git a/test/units/template/__init__.py b/test/units/template/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/template/__init__.py
diff --git a/test/units/template/test_native_concat.py b/test/units/template/test_native_concat.py
new file mode 100644
index 00000000..db85a73b
--- /dev/null
+++ b/test/units/template/test_native_concat.py
@@ -0,0 +1,28 @@
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible import constants as C
+from ansible.errors import AnsibleUndefinedVariable
+
+# need to mock DEFAULT_JINJA2_NATIVE here so native modules are imported
+# correctly within the template module
+C.DEFAULT_JINJA2_NATIVE = True
+from ansible.template import Templar
+
+from units.mock.loader import DictDataLoader
+
+
+# https://github.com/ansible/ansible/issues/52158
+def test_undefined_variable():
+ fake_loader = DictDataLoader({})
+ variables = {}
+ templar = Templar(loader=fake_loader, variables=variables)
+
+ with pytest.raises(AnsibleUndefinedVariable):
+ templar.template("{{ missing }}")
diff --git a/test/units/template/test_safe_eval.py b/test/units/template/test_safe_eval.py
new file mode 100644
index 00000000..89ff8a0e
--- /dev/null
+++ b/test/units/template/test_safe_eval.py
@@ -0,0 +1,44 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+from collections import defaultdict
+
+from units.compat import unittest
+from ansible.template.safe_eval import safe_eval
+
+
+class TestSafeEval(unittest.TestCase):
+
+ def test_safe_eval_usage(self):
+ # test safe eval calls with different possible types for the
+ # locals dictionary, to ensure we don't run into problems like
+ # ansible/ansible/issues/12206 again
+ for locals_vars in (dict(), defaultdict(dict)):
+ self.assertEqual(safe_eval('True', locals=locals_vars), True)
+ self.assertEqual(safe_eval('False', locals=locals_vars), False)
+ self.assertEqual(safe_eval('0', locals=locals_vars), 0)
+ self.assertEqual(safe_eval('[]', locals=locals_vars), [])
+ self.assertEqual(safe_eval('{}', locals=locals_vars), {})
+
+ @unittest.skipUnless(sys.version_info[:2] >= (2, 7), "Python 2.6 has no set literals")
+ def test_set_literals(self):
+ self.assertEqual(safe_eval('{0}'), set([0]))
diff --git a/test/units/template/test_templar.py b/test/units/template/test_templar.py
new file mode 100644
index 00000000..dd6985ce
--- /dev/null
+++ b/test/units/template/test_templar.py
@@ -0,0 +1,446 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from jinja2.runtime import Context
+
+from units.compat import unittest
+from units.compat.mock import patch
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleUndefinedVariable
+from ansible.module_utils.six import string_types
+from ansible.template import Templar, AnsibleContext, AnsibleEnvironment, AnsibleUndefined
+from ansible.utils.unsafe_proxy import AnsibleUnsafe, wrap_var
+from units.mock.loader import DictDataLoader
+
+
+class BaseTemplar(object):
+ def setUp(self):
+ self.test_vars = dict(
+ foo="bar",
+ bam="{{foo}}",
+ num=1,
+ var_true=True,
+ var_false=False,
+ var_dict=dict(a="b"),
+ bad_dict="{a='b'",
+ var_list=[1],
+ recursive="{{recursive}}",
+ some_var="blip",
+ some_static_var="static_blip",
+ some_keyword="{{ foo }}",
+ some_unsafe_var=wrap_var("unsafe_blip"),
+ some_static_unsafe_var=wrap_var("static_unsafe_blip"),
+ some_unsafe_keyword=wrap_var("{{ foo }}"),
+ str_with_error="{{ 'str' | from_json }}",
+ )
+ self.fake_loader = DictDataLoader({
+ "/path/to/my_file.txt": "foo\n",
+ })
+ self.templar = Templar(loader=self.fake_loader, variables=self.test_vars)
+ self._ansible_context = AnsibleContext(self.templar.environment, {}, {}, {})
+
+ def is_unsafe(self, obj):
+ return self._ansible_context._is_unsafe(obj)
+
+
+# class used for testing arbitrary objects passed to template
+class SomeClass(object):
+ foo = 'bar'
+
+ def __init__(self):
+ self.blip = 'blip'
+
+
+class SomeUnsafeClass(AnsibleUnsafe):
+ def __init__(self):
+ super(SomeUnsafeClass, self).__init__()
+ self.blip = 'unsafe blip'
+
+
+class TestTemplarTemplate(BaseTemplar, unittest.TestCase):
+ def test_lookup_jinja_dict_key_in_static_vars(self):
+ res = self.templar.template("{'some_static_var': '{{ some_var }}'}",
+ static_vars=['some_static_var'])
+ # self.assertEqual(res['{{ a_keyword }}'], "blip")
+ print(res)
+
+ def test_is_possibly_template_true(self):
+ tests = [
+ '{{ foo }}',
+ '{% foo %}',
+ '{# foo #}',
+ '{# {{ foo }} #}',
+ '{# {{ nothing }} {# #}',
+ '{# {{ nothing }} {# #} #}',
+ '{% raw %}{{ foo }}{% endraw %}',
+ '{{',
+ '{%',
+ '{#',
+ '{% raw',
+ ]
+ for test in tests:
+ self.assertTrue(self.templar.is_possibly_template(test))
+
+ def test_is_possibly_template_false(self):
+ tests = [
+ '{',
+ '%',
+ '#',
+ 'foo',
+ '}}',
+ '%}',
+ 'raw %}',
+ '#}',
+ ]
+ for test in tests:
+ self.assertFalse(self.templar.is_possibly_template(test))
+
+ def test_is_possible_template(self):
+ """This test ensures that a broken template still gets templated"""
+ # Purposefully invalid jinja
+ self.assertRaises(AnsibleError, self.templar.template, '{{ foo|default(False)) }}')
+
+ def test_is_template_true(self):
+ tests = [
+ '{{ foo }}',
+ '{% foo %}',
+ '{# foo #}',
+ '{# {{ foo }} #}',
+ '{# {{ nothing }} {# #}',
+ '{# {{ nothing }} {# #} #}',
+ '{% raw %}{{ foo }}{% endraw %}',
+ ]
+ for test in tests:
+ self.assertTrue(self.templar.is_template(test))
+
+ def test_is_template_false(self):
+ tests = [
+ 'foo',
+ '{{ foo',
+ '{% foo',
+ '{# foo',
+ '{{ foo %}',
+ '{{ foo #}',
+ '{% foo }}',
+ '{% foo #}',
+ '{# foo %}',
+ '{# foo }}',
+ '{{ foo {{',
+ '{% raw %}{% foo %}',
+ ]
+ for test in tests:
+ self.assertFalse(self.templar.is_template(test))
+
+ def test_is_template_raw_string(self):
+ res = self.templar.is_template('foo')
+ self.assertFalse(res)
+
+ def test_is_template_none(self):
+ res = self.templar.is_template(None)
+ self.assertFalse(res)
+
+ def test_template_convert_bare_string(self):
+ res = self.templar.template('foo', convert_bare=True)
+ self.assertEqual(res, 'bar')
+
+ def test_template_convert_bare_nested(self):
+ res = self.templar.template('bam', convert_bare=True)
+ self.assertEqual(res, 'bar')
+
+ def test_template_convert_bare_unsafe(self):
+ res = self.templar.template('some_unsafe_var', convert_bare=True)
+ self.assertEqual(res, 'unsafe_blip')
+ # self.assertIsInstance(res, AnsibleUnsafe)
+ self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
+
+ def test_template_convert_bare_filter(self):
+ res = self.templar.template('bam|capitalize', convert_bare=True)
+ self.assertEqual(res, 'Bar')
+
+ def test_template_convert_bare_filter_unsafe(self):
+ res = self.templar.template('some_unsafe_var|capitalize', convert_bare=True)
+ self.assertEqual(res, 'Unsafe_blip')
+ # self.assertIsInstance(res, AnsibleUnsafe)
+ self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
+
+ def test_template_convert_data(self):
+ res = self.templar.template('{{foo}}', convert_data=True)
+ self.assertTrue(res)
+ self.assertEqual(res, 'bar')
+
+ @patch('ansible.template.safe_eval', side_effect=AnsibleError)
+ def test_template_convert_data_template_in_data(self, mock_safe_eval):
+ res = self.templar.template('{{bam}}', convert_data=True)
+ self.assertTrue(res)
+ self.assertEqual(res, 'bar')
+
+ def test_template_convert_data_bare(self):
+ res = self.templar.template('bam', convert_data=True)
+ self.assertTrue(res)
+ self.assertEqual(res, 'bam')
+
+ def test_template_convert_data_to_json(self):
+ res = self.templar.template('{{bam|to_json}}', convert_data=True)
+ self.assertTrue(res)
+ self.assertEqual(res, '"bar"')
+
+ def test_template_convert_data_convert_bare_data_bare(self):
+ res = self.templar.template('bam', convert_data=True, convert_bare=True)
+ self.assertTrue(res)
+ self.assertEqual(res, 'bar')
+
+ def test_template_unsafe_non_string(self):
+ unsafe_obj = AnsibleUnsafe()
+ res = self.templar.template(unsafe_obj)
+ self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
+
+ def test_template_unsafe_non_string_subclass(self):
+ unsafe_obj = SomeUnsafeClass()
+ res = self.templar.template(unsafe_obj)
+ self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
+
+ def test_weird(self):
+ data = u'''1 2 #}huh{# %}ddfg{% }}dfdfg{{ {%what%} {{#foo#}} {%{bar}%} {#%blip%#} {{asdfsd%} 3 4 {{foo}} 5 6 7'''
+ self.assertRaisesRegexp(AnsibleError,
+ 'template error while templating string',
+ self.templar.template,
+ data)
+
+ def test_template_with_error(self):
+ """Check that AnsibleError is raised, fail if an unhandled exception is raised"""
+ self.assertRaises(AnsibleError, self.templar.template, "{{ str_with_error }}")
+
+
+class TestTemplarMisc(BaseTemplar, unittest.TestCase):
+ def test_templar_simple(self):
+
+ templar = self.templar
+ # test some basic templating
+ self.assertEqual(templar.template("{{foo}}"), "bar")
+ self.assertEqual(templar.template("{{foo}}\n"), "bar\n")
+ self.assertEqual(templar.template("{{foo}}\n", preserve_trailing_newlines=True), "bar\n")
+ self.assertEqual(templar.template("{{foo}}\n", preserve_trailing_newlines=False), "bar")
+ self.assertEqual(templar.template("{{bam}}"), "bar")
+ self.assertEqual(templar.template("{{num}}"), 1)
+ self.assertEqual(templar.template("{{var_true}}"), True)
+ self.assertEqual(templar.template("{{var_false}}"), False)
+ self.assertEqual(templar.template("{{var_dict}}"), dict(a="b"))
+ self.assertEqual(templar.template("{{bad_dict}}"), "{a='b'")
+ self.assertEqual(templar.template("{{var_list}}"), [1])
+ self.assertEqual(templar.template(1, convert_bare=True), 1)
+
+ # force errors
+ self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{bad_var}}")
+ self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{lookup('file', bad_var)}}")
+ self.assertRaises(AnsibleError, templar.template, "{{lookup('bad_lookup')}}")
+ self.assertRaises(AnsibleError, templar.template, "{{recursive}}")
+ self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{foo-bar}}")
+
+ # test with fail_on_undefined=False
+ self.assertEqual(templar.template("{{bad_var}}", fail_on_undefined=False), "{{bad_var}}")
+
+ # test setting available_variables
+ templar.available_variables = dict(foo="bam")
+ self.assertEqual(templar.template("{{foo}}"), "bam")
+ # variables must be a dict() for available_variables setter
+ # FIXME Use assertRaises() as a context manager (added in 2.7) once we do not run tests on Python 2.6 anymore.
+ try:
+ templar.available_variables = "foo=bam"
+ except AssertionError:
+ pass
+ except Exception as e:
+ self.fail(e)
+
+ def test_templar_escape_backslashes(self):
+ # Rule of thumb: If escape backslashes is True you should end up with
+ # the same number of backslashes as when you started.
+ self.assertEqual(self.templar.template("\t{{foo}}", escape_backslashes=True), "\tbar")
+ self.assertEqual(self.templar.template("\t{{foo}}", escape_backslashes=False), "\tbar")
+ self.assertEqual(self.templar.template("\\{{foo}}", escape_backslashes=True), "\\bar")
+ self.assertEqual(self.templar.template("\\{{foo}}", escape_backslashes=False), "\\bar")
+ self.assertEqual(self.templar.template("\\{{foo + '\t' }}", escape_backslashes=True), "\\bar\t")
+ self.assertEqual(self.templar.template("\\{{foo + '\t' }}", escape_backslashes=False), "\\bar\t")
+ self.assertEqual(self.templar.template("\\{{foo + '\\t' }}", escape_backslashes=True), "\\bar\\t")
+ self.assertEqual(self.templar.template("\\{{foo + '\\t' }}", escape_backslashes=False), "\\bar\t")
+ self.assertEqual(self.templar.template("\\{{foo + '\\\\t' }}", escape_backslashes=True), "\\bar\\\\t")
+ self.assertEqual(self.templar.template("\\{{foo + '\\\\t' }}", escape_backslashes=False), "\\bar\\t")
+
+ def test_template_jinja2_extensions(self):
+ fake_loader = DictDataLoader({})
+ templar = Templar(loader=fake_loader)
+
+ old_exts = C.DEFAULT_JINJA2_EXTENSIONS
+ try:
+ C.DEFAULT_JINJA2_EXTENSIONS = "foo,bar"
+ self.assertEqual(templar._get_extensions(), ['foo', 'bar'])
+ finally:
+ C.DEFAULT_JINJA2_EXTENSIONS = old_exts
+
+
+class TestTemplarLookup(BaseTemplar, unittest.TestCase):
+ def test_lookup_missing_plugin(self):
+ self.assertRaisesRegexp(AnsibleError,
+ r'lookup plugin \(not_a_real_lookup_plugin\) not found',
+ self.templar._lookup,
+ 'not_a_real_lookup_plugin',
+ 'an_arg', a_keyword_arg='a_keyword_arg_value')
+
+ def test_lookup_list(self):
+ res = self.templar._lookup('list', 'an_arg', 'another_arg')
+ self.assertEqual(res, 'an_arg,another_arg')
+
+ def test_lookup_jinja_undefined(self):
+ self.assertRaisesRegexp(AnsibleUndefinedVariable,
+ "'an_undefined_jinja_var' is undefined",
+ self.templar._lookup,
+ 'list', '{{ an_undefined_jinja_var }}')
+
+ def test_lookup_jinja_defined(self):
+ res = self.templar._lookup('list', '{{ some_var }}')
+ self.assertTrue(self.is_unsafe(res))
+ # self.assertIsInstance(res, AnsibleUnsafe)
+
+ def test_lookup_jinja_dict_string_passed(self):
+ self.assertRaisesRegexp(AnsibleError,
+ "with_dict expects a dict",
+ self.templar._lookup,
+ 'dict',
+ '{{ some_var }}')
+
+ def test_lookup_jinja_dict_list_passed(self):
+ self.assertRaisesRegexp(AnsibleError,
+ "with_dict expects a dict",
+ self.templar._lookup,
+ 'dict',
+ ['foo', 'bar'])
+
+ def test_lookup_jinja_kwargs(self):
+ res = self.templar._lookup('list', 'blip', random_keyword='12345')
+ self.assertTrue(self.is_unsafe(res))
+ # self.assertIsInstance(res, AnsibleUnsafe)
+
+ def test_lookup_jinja_list_wantlist(self):
+ res = self.templar._lookup('list', '{{ some_var }}', wantlist=True)
+ self.assertEqual(res, ["blip"])
+
+ def test_lookup_jinja_list_wantlist_undefined(self):
+ self.assertRaisesRegexp(AnsibleUndefinedVariable,
+ "'some_undefined_var' is undefined",
+ self.templar._lookup,
+ 'list',
+ '{{ some_undefined_var }}',
+ wantlist=True)
+
+ def test_lookup_jinja_list_wantlist_unsafe(self):
+ res = self.templar._lookup('list', '{{ some_unsafe_var }}', wantlist=True)
+ for lookup_result in res:
+ self.assertTrue(self.is_unsafe(lookup_result))
+ # self.assertIsInstance(lookup_result, AnsibleUnsafe)
+
+ # Should this be an AnsibleUnsafe
+ # self.assertIsInstance(res, AnsibleUnsafe)
+
+ def test_lookup_jinja_dict(self):
+ res = self.templar._lookup('list', {'{{ a_keyword }}': '{{ some_var }}'})
+ self.assertEqual(res['{{ a_keyword }}'], "blip")
+ # TODO: Should this be an AnsibleUnsafe
+ # self.assertIsInstance(res['{{ a_keyword }}'], AnsibleUnsafe)
+ # self.assertIsInstance(res, AnsibleUnsafe)
+
+ def test_lookup_jinja_dict_unsafe(self):
+ res = self.templar._lookup('list', {'{{ some_unsafe_key }}': '{{ some_unsafe_var }}'})
+ self.assertTrue(self.is_unsafe(res['{{ some_unsafe_key }}']))
+ # self.assertIsInstance(res['{{ some_unsafe_key }}'], AnsibleUnsafe)
+ # TODO: Should this be an AnsibleUnsafe
+ # self.assertIsInstance(res, AnsibleUnsafe)
+
+ def test_lookup_jinja_dict_unsafe_value(self):
+ res = self.templar._lookup('list', {'{{ a_keyword }}': '{{ some_unsafe_var }}'})
+ self.assertTrue(self.is_unsafe(res['{{ a_keyword }}']))
+ # self.assertIsInstance(res['{{ a_keyword }}'], AnsibleUnsafe)
+ # TODO: Should this be an AnsibleUnsafe
+ # self.assertIsInstance(res, AnsibleUnsafe)
+
+ def test_lookup_jinja_none(self):
+ res = self.templar._lookup('list', None)
+ self.assertIsNone(res)
+
+
+class TestAnsibleContext(BaseTemplar, unittest.TestCase):
+ def _context(self, variables=None):
+ variables = variables or {}
+
+ env = AnsibleEnvironment()
+ context = AnsibleContext(env, parent={}, name='some_context',
+ blocks={})
+
+ for key, value in variables.items():
+ context.vars[key] = value
+
+ return context
+
+ def test(self):
+ context = self._context()
+ self.assertIsInstance(context, AnsibleContext)
+ self.assertIsInstance(context, Context)
+
+ def test_resolve_unsafe(self):
+ context = self._context(variables={'some_unsafe_key': wrap_var('some_unsafe_string')})
+ res = context.resolve('some_unsafe_key')
+ # self.assertIsInstance(res, AnsibleUnsafe)
+ self.assertTrue(self.is_unsafe(res),
+ 'return of AnsibleContext.resolve (%s) was expected to be marked unsafe but was not' % res)
+
+ def test_resolve_unsafe_list(self):
+ context = self._context(variables={'some_unsafe_key': [wrap_var('some unsafe string 1')]})
+ res = context.resolve('some_unsafe_key')
+ # self.assertIsInstance(res[0], AnsibleUnsafe)
+ self.assertTrue(self.is_unsafe(res),
+ 'return of AnsibleContext.resolve (%s) was expected to be marked unsafe but was not' % res)
+
+ def test_resolve_unsafe_dict(self):
+ context = self._context(variables={'some_unsafe_key':
+ {'an_unsafe_dict': wrap_var('some unsafe string 1')}
+ })
+ res = context.resolve('some_unsafe_key')
+ self.assertTrue(self.is_unsafe(res['an_unsafe_dict']),
+ 'return of AnsibleContext.resolve (%s) was expected to be marked unsafe but was not' % res['an_unsafe_dict'])
+
+ def test_resolve(self):
+ context = self._context(variables={'some_key': 'some_string'})
+ res = context.resolve('some_key')
+ self.assertEqual(res, 'some_string')
+ # self.assertNotIsInstance(res, AnsibleUnsafe)
+ self.assertFalse(self.is_unsafe(res),
+ 'return of AnsibleContext.resolve (%s) was not expected to be marked unsafe but was' % res)
+
+ def test_resolve_none(self):
+ context = self._context(variables={'some_key': None})
+ res = context.resolve('some_key')
+ self.assertEqual(res, None)
+ # self.assertNotIsInstance(res, AnsibleUnsafe)
+ self.assertFalse(self.is_unsafe(res),
+ 'return of AnsibleContext.resolve (%s) was not expected to be marked unsafe but was' % res)
+
+ def test_is_unsafe(self):
+ context = self._context()
+ self.assertFalse(context._is_unsafe(AnsibleUndefined()))
diff --git a/test/units/template/test_template_utilities.py b/test/units/template/test_template_utilities.py
new file mode 100644
index 00000000..1044895f
--- /dev/null
+++ b/test/units/template/test_template_utilities.py
@@ -0,0 +1,117 @@
+# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import jinja2
+from units.compat import unittest
+
+from ansible.template import AnsibleUndefined, _escape_backslashes, _count_newlines_from_end
+
+# These are internal utility functions only needed for templating. They're
+# algorithmic so good candidates for unittesting by themselves
+
+
+class TestBackslashEscape(unittest.TestCase):
+
+ test_data = (
+ # Test backslashes in a filter arg are double escaped
+ dict(
+ template=u"{{ 'test2 %s' | format('\\1') }}",
+ intermediate=u"{{ 'test2 %s' | format('\\\\1') }}",
+ expectation=u"test2 \\1",
+ args=dict()
+ ),
+ # Test backslashes inside the jinja2 var itself are double
+ # escaped
+ dict(
+ template=u"Test 2\\3: {{ '\\1 %s' | format('\\2') }}",
+ intermediate=u"Test 2\\3: {{ '\\\\1 %s' | format('\\\\2') }}",
+ expectation=u"Test 2\\3: \\1 \\2",
+ args=dict()
+ ),
+ # Test backslashes outside of the jinja2 var are not double
+ # escaped
+ dict(
+ template=u"Test 2\\3: {{ 'test2 %s' | format('\\1') }}; \\done",
+ intermediate=u"Test 2\\3: {{ 'test2 %s' | format('\\\\1') }}; \\done",
+ expectation=u"Test 2\\3: test2 \\1; \\done",
+ args=dict()
+ ),
+ # Test backslashes in a variable sent to a filter are handled
+ dict(
+ template=u"{{ 'test2 %s' | format(var1) }}",
+ intermediate=u"{{ 'test2 %s' | format(var1) }}",
+ expectation=u"test2 \\1",
+ args=dict(var1=u'\\1')
+ ),
+ # Test backslashes in a variable expanded by jinja2 are double
+ # escaped
+ dict(
+ template=u"Test 2\\3: {{ var1 | format('\\2') }}",
+ intermediate=u"Test 2\\3: {{ var1 | format('\\\\2') }}",
+ expectation=u"Test 2\\3: \\1 \\2",
+ args=dict(var1=u'\\1 %s')
+ ),
+ )
+
+ def setUp(self):
+ self.env = jinja2.Environment()
+
+ def test_backslash_escaping(self):
+
+ for test in self.test_data:
+ intermediate = _escape_backslashes(test['template'], self.env)
+ self.assertEqual(intermediate, test['intermediate'])
+ template = jinja2.Template(intermediate)
+ args = test['args']
+ self.assertEqual(template.render(**args), test['expectation'])
+
+
+class TestCountNewlines(unittest.TestCase):
+
+ def test_zero_length_string(self):
+ self.assertEqual(_count_newlines_from_end(u''), 0)
+
+ def test_short_string(self):
+ self.assertEqual(_count_newlines_from_end(u'The quick\n'), 1)
+
+ def test_one_newline(self):
+ self.assertEqual(_count_newlines_from_end(u'The quick brown fox jumped over the lazy dog' * 1000 + u'\n'), 1)
+
+ def test_multiple_newlines(self):
+ self.assertEqual(_count_newlines_from_end(u'The quick brown fox jumped over the lazy dog' * 1000 + u'\n\n\n'), 3)
+
+ def test_zero_newlines(self):
+ self.assertEqual(_count_newlines_from_end(u'The quick brown fox jumped over the lazy dog' * 1000), 0)
+
+ def test_all_newlines(self):
+ self.assertEqual(_count_newlines_from_end(u'\n' * 10), 10)
+
+ def test_mostly_newlines(self):
+ self.assertEqual(_count_newlines_from_end(u'The quick brown fox jumped over the lazy dog' + u'\n' * 1000), 1000)
+
+
+class TestAnsibleUndefined(unittest.TestCase):
+ def test_getattr(self):
+ val = AnsibleUndefined()
+
+ self.assertIs(getattr(val, 'foo'), val)
+
+ self.assertRaises(AttributeError, getattr, val, '__UNSAFE__')
diff --git a/test/units/template/test_vars.py b/test/units/template/test_vars.py
new file mode 100644
index 00000000..74e67839
--- /dev/null
+++ b/test/units/template/test_vars.py
@@ -0,0 +1,81 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from units.compat import unittest
+from units.compat.mock import MagicMock
+
+from ansible.template.vars import AnsibleJ2Vars
+
+
+class TestVars(unittest.TestCase):
+ def setUp(self):
+ self.mock_templar = MagicMock(name='mock_templar')
+
+ def test(self):
+ ajvars = AnsibleJ2Vars(None, None)
+ print(ajvars)
+
+ def test_globals_empty_2_8(self):
+ ajvars = AnsibleJ2Vars(self.mock_templar, {})
+ res28 = self._dict_jinja28(ajvars)
+ self.assertIsInstance(res28, dict)
+
+ def test_globals_empty_2_9(self):
+ ajvars = AnsibleJ2Vars(self.mock_templar, {})
+ res29 = self._dict_jinja29(ajvars)
+ self.assertIsInstance(res29, dict)
+
+ def _assert_globals(self, res):
+ self.assertIsInstance(res, dict)
+ self.assertIn('foo', res)
+ self.assertEqual(res['foo'], 'bar')
+
+ def test_globals_2_8(self):
+ ajvars = AnsibleJ2Vars(self.mock_templar, {'foo': 'bar', 'blip': [1, 2, 3]})
+ res28 = self._dict_jinja28(ajvars)
+ self._assert_globals(res28)
+
+ def test_globals_2_9(self):
+ ajvars = AnsibleJ2Vars(self.mock_templar, {'foo': 'bar', 'blip': [1, 2, 3]})
+ res29 = self._dict_jinja29(ajvars)
+ self._assert_globals(res29)
+
+ def _dicts(self, ajvars):
+ print(ajvars)
+ res28 = self._dict_jinja28(ajvars)
+ res29 = self._dict_jinja29(ajvars)
+ # res28_other = self._dict_jinja28(ajvars, {'other_key': 'other_value'})
+ # other = {'other_key': 'other_value'}
+ # res29_other = self._dict_jinja29(ajvars, *other)
+ print('res28: %s' % res28)
+ print('res29: %s' % res29)
+ # print('res28_other: %s' % res28_other)
+ # print('res29_other: %s' % res29_other)
+ # return (res28, res29, res28_other, res29_other)
+ # assert ajvars == res28
+ # assert ajvars == res29
+ return (res28, res29)
+
+ def _dict_jinja28(self, *args, **kwargs):
+ return dict(*args, **kwargs)
+
+ def _dict_jinja29(self, the_vars):
+ return dict(the_vars)
diff --git a/test/units/test_constants.py b/test/units/test_constants.py
new file mode 100644
index 00000000..4cf2f7f9
--- /dev/null
+++ b/test/units/test_constants.py
@@ -0,0 +1,122 @@
+# -*- coding: utf-8 -*-
+# (c) 2017 Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pwd
+import os
+
+import pytest
+
+from ansible import constants
+from ansible.module_utils.six import StringIO
+from ansible.module_utils.six.moves import configparser
+from ansible.module_utils._text import to_text
+
+
+@pytest.fixture
+def cfgparser():
+ CFGDATA = StringIO("""
+[defaults]
+defaults_one = 'data_defaults_one'
+
+[level1]
+level1_one = 'data_level1_one'
+ """)
+ p = configparser.ConfigParser()
+ p.readfp(CFGDATA)
+ return p
+
+
+@pytest.fixture
+def user():
+ user = {}
+ user['uid'] = os.geteuid()
+
+ pwd_entry = pwd.getpwuid(user['uid'])
+ user['username'] = pwd_entry.pw_name
+ user['home'] = pwd_entry.pw_dir
+
+ return user
+
+
+@pytest.fixture
+def cfg_file():
+ data = '/ansible/test/cfg/path'
+ old_cfg_file = constants.CONFIG_FILE
+ constants.CONFIG_FILE = os.path.join(data, 'ansible.cfg')
+ yield data
+
+ constants.CONFIG_FILE = old_cfg_file
+
+
+@pytest.fixture
+def null_cfg_file():
+ old_cfg_file = constants.CONFIG_FILE
+ del constants.CONFIG_FILE
+ yield
+
+ constants.CONFIG_FILE = old_cfg_file
+
+
+@pytest.fixture
+def cwd():
+ data = '/ansible/test/cwd/'
+ old_cwd = os.getcwd
+ os.getcwd = lambda: data
+
+ old_cwdu = None
+ if hasattr(os, 'getcwdu'):
+ old_cwdu = os.getcwdu
+ os.getcwdu = lambda: to_text(data)
+
+ yield data
+
+ os.getcwd = old_cwd
+ if hasattr(os, 'getcwdu'):
+ os.getcwdu = old_cwdu
+
+
+class TestMkBoolean:
+ def test_bools(self):
+ assert constants.mk_boolean(True) is True
+ assert constants.mk_boolean(False) is False
+
+ def test_none(self):
+ assert constants.mk_boolean(None) is False
+
+ def test_numbers(self):
+ assert constants.mk_boolean(1) is True
+ assert constants.mk_boolean(0) is False
+ assert constants.mk_boolean(0.0) is False
+
+# Current mk_boolean doesn't consider these to be true values
+# def test_other_numbers(self):
+# assert constants.mk_boolean(2) is True
+# assert constants.mk_boolean(-1) is True
+# assert constants.mk_boolean(0.1) is True
+
+ def test_strings(self):
+ assert constants.mk_boolean("true") is True
+ assert constants.mk_boolean("TRUE") is True
+ assert constants.mk_boolean("t") is True
+ assert constants.mk_boolean("yes") is True
+ assert constants.mk_boolean("y") is True
+ assert constants.mk_boolean("on") is True
diff --git a/test/units/test_context.py b/test/units/test_context.py
new file mode 100644
index 00000000..24e2376d
--- /dev/null
+++ b/test/units/test_context.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Toshio Kuratomi <tkuratomi@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible import context
+
+
+class FakeOptions:
+ pass
+
+
+def test_set_global_context():
+ options = FakeOptions()
+ options.tags = [u'production', u'webservers']
+ options.check_mode = True
+ options.start_at_task = u'Start with ãらã¨ã¿'
+
+ expected = frozenset((('tags', (u'production', u'webservers')),
+ ('check_mode', True),
+ ('start_at_task', u'Start with ãらã¨ã¿')))
+
+ context._init_global_context(options)
+ assert frozenset(context.CLIARGS.items()) == expected
diff --git a/test/units/utils/__init__.py b/test/units/utils/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/utils/__init__.py
diff --git a/test/units/utils/collection_loader/__init__.py b/test/units/utils/collection_loader/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/utils/collection_loader/__init__.py
diff --git a/test/units/utils/collection_loader/fixtures/collections/ansible_collections/ansible/builtin/plugins/modules/shouldnotload.py b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/ansible/builtin/plugins/modules/shouldnotload.py
new file mode 100644
index 00000000..4041a338
--- /dev/null
+++ b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/ansible/builtin/plugins/modules/shouldnotload.py
@@ -0,0 +1,4 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+raise Exception('this module should never be loaded')
diff --git a/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/meta/runtime.yml b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/meta/runtime.yml
new file mode 100644
index 00000000..f2e2fdec
--- /dev/null
+++ b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/meta/runtime.yml
@@ -0,0 +1,4 @@
+plugin_routing:
+ modules:
+ rerouted_module:
+ redirect: ansible.builtin.ping
diff --git a/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/action/my_action.py b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/action/my_action.py
new file mode 100644
index 00000000..9d30580f
--- /dev/null
+++ b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/action/my_action.py
@@ -0,0 +1,8 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ..module_utils.my_util import question
+
+
+def action_code():
+ return "hello from my_action.py"
diff --git a/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/module_utils/__init__.py b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/module_utils/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/module_utils/__init__.py
diff --git a/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/module_utils/my_other_util.py b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/module_utils/my_other_util.py
new file mode 100644
index 00000000..35e1381b
--- /dev/null
+++ b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/module_utils/my_other_util.py
@@ -0,0 +1,4 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from .my_util import question
diff --git a/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/module_utils/my_util.py b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/module_utils/my_util.py
new file mode 100644
index 00000000..c431c34c
--- /dev/null
+++ b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/module_utils/my_util.py
@@ -0,0 +1,6 @@
+# WARNING: Changing line numbers of code in this file will break collection tests that use tracing to check paths and line numbers.
+# Also, do not import division from __future__ as this will break detection of __future__ inheritance on Python 2.
+
+
+def question():
+ return 3 / 2
diff --git a/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/modules/__init__.py b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/modules/__init__.py
new file mode 100644
index 00000000..6d697034
--- /dev/null
+++ b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/modules/__init__.py
@@ -0,0 +1,5 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+raise Exception('this should never run')
diff --git a/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/modules/amodule.py b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/modules/amodule.py
new file mode 100644
index 00000000..99320a0c
--- /dev/null
+++ b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/plugins/modules/amodule.py
@@ -0,0 +1,6 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def module_code():
+ return "hello from amodule.py"
diff --git a/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/roles/some_role/.gitkeep b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/roles/some_role/.gitkeep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/utils/collection_loader/fixtures/collections/ansible_collections/testns/testcoll/roles/some_role/.gitkeep
diff --git a/test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/__init__.py b/test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/__init__.py
new file mode 100644
index 00000000..6068ac1a
--- /dev/null
+++ b/test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/__init__.py
@@ -0,0 +1,5 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+raise Exception('this code should never execute')
diff --git a/test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/ansible/__init__.py b/test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/ansible/__init__.py
new file mode 100644
index 00000000..6068ac1a
--- /dev/null
+++ b/test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/ansible/__init__.py
@@ -0,0 +1,5 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+raise Exception('this code should never execute')
diff --git a/test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/testns/__init__.py b/test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/testns/__init__.py
new file mode 100644
index 00000000..6068ac1a
--- /dev/null
+++ b/test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/testns/__init__.py
@@ -0,0 +1,5 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+raise Exception('this code should never execute')
diff --git a/test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/testns/testcoll/__init__.py b/test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/testns/testcoll/__init__.py
new file mode 100644
index 00000000..6068ac1a
--- /dev/null
+++ b/test/units/utils/collection_loader/fixtures/collections_masked/ansible_collections/testns/testcoll/__init__.py
@@ -0,0 +1,5 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+raise Exception('this code should never execute')
diff --git a/test/units/utils/collection_loader/fixtures/playbook_path/collections/ansible_collections/ansible/playbook_adj_other/.gitkeep b/test/units/utils/collection_loader/fixtures/playbook_path/collections/ansible_collections/ansible/playbook_adj_other/.gitkeep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/utils/collection_loader/fixtures/playbook_path/collections/ansible_collections/ansible/playbook_adj_other/.gitkeep
diff --git a/test/units/utils/collection_loader/fixtures/playbook_path/collections/ansible_collections/freshns/playbook_adj_other/.gitkeep b/test/units/utils/collection_loader/fixtures/playbook_path/collections/ansible_collections/freshns/playbook_adj_other/.gitkeep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/utils/collection_loader/fixtures/playbook_path/collections/ansible_collections/freshns/playbook_adj_other/.gitkeep
diff --git a/test/units/utils/collection_loader/fixtures/playbook_path/collections/ansible_collections/testns/playbook_adj_other/.gitkeep b/test/units/utils/collection_loader/fixtures/playbook_path/collections/ansible_collections/testns/playbook_adj_other/.gitkeep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/utils/collection_loader/fixtures/playbook_path/collections/ansible_collections/testns/playbook_adj_other/.gitkeep
diff --git a/test/units/utils/collection_loader/test_collection_loader.py b/test/units/utils/collection_loader/test_collection_loader.py
new file mode 100644
index 00000000..6488188c
--- /dev/null
+++ b/test/units/utils/collection_loader/test_collection_loader.py
@@ -0,0 +1,834 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import pkgutil
+import pytest
+import re
+import sys
+
+from ansible.module_utils.six import PY3, string_types
+from ansible.module_utils.compat.importlib import import_module
+from ansible.utils.collection_loader import AnsibleCollectionConfig, AnsibleCollectionRef
+from ansible.utils.collection_loader._collection_finder import (
+ _AnsibleCollectionFinder, _AnsibleCollectionLoader, _AnsibleCollectionNSPkgLoader, _AnsibleCollectionPkgLoader,
+ _AnsibleCollectionPkgLoaderBase, _AnsibleCollectionRootPkgLoader, _AnsiblePathHookFinder,
+ _get_collection_name_from_path, _get_collection_role_path, _get_collection_metadata, _iter_modules_impl
+)
+from ansible.utils.collection_loader._collection_config import _EventSource
+from units.compat.mock import MagicMock, NonCallableMagicMock, patch
+
+
+# fixture to ensure we always clean up the import stuff when we're done
+@pytest.fixture(autouse=True, scope='function')
+def teardown(*args, **kwargs):
+ yield
+ reset_collections_loader_state()
+
+# BEGIN STANDALONE TESTS - these exercise behaviors of the individual components without the import machinery
+
+
+def test_finder_setup():
+ # ensure scalar path is listified
+ f = _AnsibleCollectionFinder(paths='/bogus/bogus')
+ assert isinstance(f._n_collection_paths, list)
+
+ # ensure sys.path paths that have an ansible_collections dir are added to the end of the collections paths
+ with patch.object(sys, 'path', ['/bogus', default_test_collection_paths[1], '/morebogus', default_test_collection_paths[0]]):
+ f = _AnsibleCollectionFinder(paths=['/explicit', '/other'])
+ assert f._n_collection_paths == ['/explicit', '/other', default_test_collection_paths[1], default_test_collection_paths[0]]
+
+ configured_paths = ['/bogus']
+ playbook_paths = ['/playbookdir']
+ f = _AnsibleCollectionFinder(paths=configured_paths)
+ assert f._n_collection_paths == configured_paths
+ f.set_playbook_paths(playbook_paths)
+ assert f._n_collection_paths == extend_paths(playbook_paths, 'collections') + configured_paths
+
+ # ensure scalar playbook_paths gets listified
+ f.set_playbook_paths(playbook_paths[0])
+ assert f._n_collection_paths == extend_paths(playbook_paths, 'collections') + configured_paths
+
+
+def test_finder_not_interested():
+ f = get_default_finder()
+ assert f.find_module('nothanks') is None
+ assert f.find_module('nothanks.sub', path=['/bogus/dir']) is None
+
+
+def test_finder_ns():
+ # ensure we can still load ansible_collections and ansible_collections.ansible when they don't exist on disk
+ f = _AnsibleCollectionFinder(paths=['/bogus/bogus'])
+ loader = f.find_module('ansible_collections')
+ assert isinstance(loader, _AnsibleCollectionRootPkgLoader)
+
+ loader = f.find_module('ansible_collections.ansible', path=['/bogus/bogus'])
+ assert isinstance(loader, _AnsibleCollectionNSPkgLoader)
+
+ f = get_default_finder()
+ loader = f.find_module('ansible_collections')
+ assert isinstance(loader, _AnsibleCollectionRootPkgLoader)
+
+ # path is not allowed for top-level
+ with pytest.raises(ValueError):
+ f.find_module('ansible_collections', path=['whatever'])
+
+ # path is required for subpackages
+ with pytest.raises(ValueError):
+ f.find_module('ansible_collections.whatever', path=None)
+
+ paths = [os.path.join(p, 'ansible_collections/nonexistns') for p in default_test_collection_paths]
+
+ # test missing
+ loader = f.find_module('ansible_collections.nonexistns', paths)
+ assert loader is None
+
+
+# keep these up top to make sure the loader install/remove are working, since we rely on them heavily in the tests
+def test_loader_remove():
+ fake_mp = [MagicMock(), _AnsibleCollectionFinder(), MagicMock(), _AnsibleCollectionFinder()]
+ fake_ph = [MagicMock().m1, MagicMock().m2, _AnsibleCollectionFinder()._ansible_collection_path_hook, NonCallableMagicMock]
+ # must nest until 2.6 compilation is totally donezo
+ with patch.object(sys, 'meta_path', fake_mp):
+ with patch.object(sys, 'path_hooks', fake_ph):
+ _AnsibleCollectionFinder()._remove()
+ assert len(sys.meta_path) == 2
+ # no AnsibleCollectionFinders on the meta path after remove is called
+ assert all((not isinstance(mpf, _AnsibleCollectionFinder) for mpf in sys.meta_path))
+ assert len(sys.path_hooks) == 3
+ # none of the remaining path hooks should point at an AnsibleCollectionFinder
+ assert all((not isinstance(ph.__self__, _AnsibleCollectionFinder) for ph in sys.path_hooks if hasattr(ph, '__self__')))
+ assert AnsibleCollectionConfig.collection_finder is None
+
+
+def test_loader_install():
+ fake_mp = [MagicMock(), _AnsibleCollectionFinder(), MagicMock(), _AnsibleCollectionFinder()]
+ fake_ph = [MagicMock().m1, MagicMock().m2, _AnsibleCollectionFinder()._ansible_collection_path_hook, NonCallableMagicMock]
+ # must nest until 2.6 compilation is totally donezo
+ with patch.object(sys, 'meta_path', fake_mp):
+ with patch.object(sys, 'path_hooks', fake_ph):
+ f = _AnsibleCollectionFinder()
+ f._install()
+ assert len(sys.meta_path) == 3 # should have removed the existing ACFs and installed a new one
+ assert sys.meta_path[0] is f # at the front
+ # the rest of the meta_path should not be AnsibleCollectionFinders
+ assert all((not isinstance(mpf, _AnsibleCollectionFinder) for mpf in sys.meta_path[1:]))
+ assert len(sys.path_hooks) == 4 # should have removed the existing ACF path hooks and installed a new one
+ # the first path hook should be ours, make sure it's pointing at the right instance
+ assert hasattr(sys.path_hooks[0], '__self__') and sys.path_hooks[0].__self__ is f
+ # the rest of the path_hooks should not point at an AnsibleCollectionFinder
+ assert all((not isinstance(ph.__self__, _AnsibleCollectionFinder) for ph in sys.path_hooks[1:] if hasattr(ph, '__self__')))
+ assert AnsibleCollectionConfig.collection_finder is f
+ with pytest.raises(ValueError):
+ AnsibleCollectionConfig.collection_finder = f
+
+
+def test_finder_coll():
+ f = get_default_finder()
+
+ tests = [
+ {'name': 'ansible_collections.testns.testcoll', 'test_paths': [default_test_collection_paths]},
+ {'name': 'ansible_collections.ansible.builtin', 'test_paths': [['/bogus'], default_test_collection_paths]},
+ ]
+ # ensure finder works for legit paths and bogus paths
+ for test_dict in tests:
+ # splat the dict values to our locals
+ globals().update(test_dict)
+ parent_pkg = name.rpartition('.')[0]
+ for paths in test_paths:
+ paths = [os.path.join(p, parent_pkg.replace('.', '/')) for p in paths]
+ loader = f.find_module(name, path=paths)
+ assert isinstance(loader, _AnsibleCollectionPkgLoader)
+
+
+def test_root_loader_not_interested():
+ with pytest.raises(ImportError):
+ _AnsibleCollectionRootPkgLoader('not_ansible_collections_toplevel', path_list=[])
+
+ with pytest.raises(ImportError):
+ _AnsibleCollectionRootPkgLoader('ansible_collections.somens', path_list=['/bogus'])
+
+
+def test_root_loader():
+ name = 'ansible_collections'
+ # ensure this works even when ansible_collections doesn't exist on disk
+ for paths in [], default_test_collection_paths:
+ if name in sys.modules:
+ del sys.modules[name]
+ loader = _AnsibleCollectionRootPkgLoader(name, paths)
+ assert repr(loader).startswith('_AnsibleCollectionRootPkgLoader(path=')
+ module = loader.load_module(name)
+ assert module.__name__ == name
+ assert module.__path__ == [p for p in extend_paths(paths, name) if os.path.isdir(p)]
+ # even if the dir exists somewhere, this loader doesn't support get_data, so make __file__ a non-file
+ assert module.__file__ == '<ansible_synthetic_collection_package>'
+ assert module.__package__ == name
+ assert sys.modules.get(name) == module
+
+
+def test_nspkg_loader_not_interested():
+ with pytest.raises(ImportError):
+ _AnsibleCollectionNSPkgLoader('not_ansible_collections_toplevel.something', path_list=[])
+
+ with pytest.raises(ImportError):
+ _AnsibleCollectionNSPkgLoader('ansible_collections.somens.somecoll', path_list=[])
+
+
+def test_nspkg_loader_load_module():
+ # ensure the loader behaves on the toplevel and ansible packages for both legit and missing/bogus paths
+ for name in ['ansible_collections.ansible', 'ansible_collections.testns']:
+ parent_pkg = name.partition('.')[0]
+ module_to_load = name.rpartition('.')[2]
+ paths = extend_paths(default_test_collection_paths, parent_pkg)
+ existing_child_paths = [p for p in extend_paths(paths, module_to_load) if os.path.exists(p)]
+ if name in sys.modules:
+ del sys.modules[name]
+ loader = _AnsibleCollectionNSPkgLoader(name, path_list=paths)
+ assert repr(loader).startswith('_AnsibleCollectionNSPkgLoader(path=')
+ module = loader.load_module(name)
+ assert module.__name__ == name
+ assert isinstance(module.__loader__, _AnsibleCollectionNSPkgLoader)
+ assert module.__path__ == existing_child_paths
+ assert module.__package__ == name
+ assert module.__file__ == '<ansible_synthetic_collection_package>'
+ assert sys.modules.get(name) == module
+
+
+def test_collpkg_loader_not_interested():
+ with pytest.raises(ImportError):
+ _AnsibleCollectionPkgLoader('not_ansible_collections', path_list=[])
+
+ with pytest.raises(ImportError):
+ _AnsibleCollectionPkgLoader('ansible_collections.ns', path_list=['/bogus/bogus'])
+
+
+def test_collpkg_loader_load_module():
+ reset_collections_loader_state()
+ with patch('ansible.utils.collection_loader.AnsibleCollectionConfig') as p:
+ for name in ['ansible_collections.ansible.builtin', 'ansible_collections.testns.testcoll']:
+ parent_pkg = name.rpartition('.')[0]
+ module_to_load = name.rpartition('.')[2]
+ paths = extend_paths(default_test_collection_paths, parent_pkg)
+ existing_child_paths = [p for p in extend_paths(paths, module_to_load) if os.path.exists(p)]
+ is_builtin = 'ansible.builtin' in name
+ if name in sys.modules:
+ del sys.modules[name]
+ loader = _AnsibleCollectionPkgLoader(name, path_list=paths)
+ assert repr(loader).startswith('_AnsibleCollectionPkgLoader(path=')
+ module = loader.load_module(name)
+ assert module.__name__ == name
+ assert isinstance(module.__loader__, _AnsibleCollectionPkgLoader)
+ if is_builtin:
+ assert module.__path__ == []
+ else:
+ assert module.__path__ == [existing_child_paths[0]]
+
+ assert module.__package__ == name
+ if is_builtin:
+ assert module.__file__ == '<ansible_synthetic_collection_package>'
+ else:
+ assert module.__file__.endswith('__synthetic__') and os.path.isdir(os.path.dirname(module.__file__))
+ assert sys.modules.get(name) == module
+
+ assert hasattr(module, '_collection_meta') and isinstance(module._collection_meta, dict)
+
+ # FIXME: validate _collection_meta contents match what's on disk (or not)
+
+ # if the module has metadata, try loading it with busted metadata
+ if module._collection_meta:
+ _collection_finder = import_module('ansible.utils.collection_loader._collection_finder')
+ with patch.object(_collection_finder, '_meta_yml_to_dict', side_effect=Exception('bang')):
+ with pytest.raises(Exception) as ex:
+ _AnsibleCollectionPkgLoader(name, path_list=paths).load_module(name)
+ assert 'error parsing collection metadata' in str(ex.value)
+
+
+def test_coll_loader():
+ with patch('ansible.utils.collection_loader.AnsibleCollectionConfig'):
+ with pytest.raises(ValueError):
+ # not a collection
+ _AnsibleCollectionLoader('ansible_collections')
+
+ with pytest.raises(ValueError):
+ # bogus paths
+ _AnsibleCollectionLoader('ansible_collections.testns.testcoll', path_list=[])
+
+ # FIXME: more
+
+
+def test_path_hook_setup():
+ with patch.object(sys, 'path_hooks', []):
+ found_hook = None
+ pathhook_exc = None
+ try:
+ found_hook = _AnsiblePathHookFinder._get_filefinder_path_hook()
+ except Exception as phe:
+ pathhook_exc = phe
+
+ if PY3:
+ assert str(pathhook_exc) == 'need exactly one FileFinder import hook (found 0)'
+ else:
+ assert found_hook is None
+
+ assert repr(_AnsiblePathHookFinder(object(), '/bogus/path')) == "_AnsiblePathHookFinder(path='/bogus/path')"
+
+
+def test_path_hook_importerror():
+ # ensure that AnsiblePathHookFinder.find_module swallows ImportError from path hook delegation on Py3, eg if the delegated
+ # path hook gets passed a file on sys.path (python36.zip)
+ reset_collections_loader_state()
+ path_to_a_file = os.path.join(default_test_collection_paths[0], 'ansible_collections/testns/testcoll/plugins/action/my_action.py')
+ # it's a bug if the following pops an ImportError...
+ assert _AnsiblePathHookFinder(_AnsibleCollectionFinder(), path_to_a_file).find_module('foo.bar.my_action') is None
+
+
+def test_new_or_existing_module():
+ module_name = 'blar.test.module'
+ pkg_name = module_name.rpartition('.')[0]
+
+ # create new module case
+ nuke_module_prefix(module_name)
+ with _AnsibleCollectionPkgLoaderBase._new_or_existing_module(module_name, __package__=pkg_name) as new_module:
+ # the module we just created should now exist in sys.modules
+ assert sys.modules.get(module_name) is new_module
+ assert new_module.__name__ == module_name
+
+ # the module should stick since we didn't raise an exception in the contextmgr
+ assert sys.modules.get(module_name) is new_module
+
+ # reuse existing module case
+ with _AnsibleCollectionPkgLoaderBase._new_or_existing_module(module_name, __attr1__=42, blar='yo') as existing_module:
+ assert sys.modules.get(module_name) is new_module # should be the same module we created earlier
+ assert hasattr(existing_module, '__package__') and existing_module.__package__ == pkg_name
+ assert hasattr(existing_module, '__attr1__') and existing_module.__attr1__ == 42
+ assert hasattr(existing_module, 'blar') and existing_module.blar == 'yo'
+
+ # exception during update existing shouldn't zap existing module from sys.modules
+ with pytest.raises(ValueError) as ve:
+ with _AnsibleCollectionPkgLoaderBase._new_or_existing_module(module_name) as existing_module:
+ err_to_raise = ValueError('bang')
+ raise err_to_raise
+ # make sure we got our error
+ assert ve.value is err_to_raise
+ # and that the module still exists
+ assert sys.modules.get(module_name) is existing_module
+
+ # test module removal after exception during creation
+ nuke_module_prefix(module_name)
+ with pytest.raises(ValueError) as ve:
+ with _AnsibleCollectionPkgLoaderBase._new_or_existing_module(module_name) as new_module:
+ err_to_raise = ValueError('bang')
+ raise err_to_raise
+ # make sure we got our error
+ assert ve.value is err_to_raise
+ # and that the module was removed
+ assert sys.modules.get(module_name) is None
+
+
+def test_iter_modules_impl():
+ modules_trailer = 'ansible_collections/testns/testcoll/plugins'
+ modules_pkg_prefix = modules_trailer.replace('/', '.') + '.'
+ modules_path = os.path.join(default_test_collection_paths[0], modules_trailer)
+ modules = list(_iter_modules_impl([modules_path], modules_pkg_prefix))
+
+ assert modules
+ assert set([('ansible_collections.testns.testcoll.plugins.action', True),
+ ('ansible_collections.testns.testcoll.plugins.module_utils', True),
+ ('ansible_collections.testns.testcoll.plugins.modules', True)]) == set(modules)
+
+ modules_trailer = 'ansible_collections/testns/testcoll/plugins/modules'
+ modules_pkg_prefix = modules_trailer.replace('/', '.') + '.'
+ modules_path = os.path.join(default_test_collection_paths[0], modules_trailer)
+ modules = list(_iter_modules_impl([modules_path], modules_pkg_prefix))
+
+ assert modules
+ assert len(modules) == 1
+ assert modules[0][0] == 'ansible_collections.testns.testcoll.plugins.modules.amodule' # name
+ assert modules[0][1] is False # is_pkg
+
+ # FIXME: more
+
+
+# BEGIN IN-CIRCUIT TESTS - these exercise behaviors of the loader when wired up to the import machinery
+
+
+def test_import_from_collection(monkeypatch):
+ collection_root = os.path.join(os.path.dirname(__file__), 'fixtures', 'collections')
+ collection_path = os.path.join(collection_root, 'ansible_collections/testns/testcoll/plugins/module_utils/my_util.py')
+
+ # THIS IS UNSTABLE UNDER A DEBUGGER
+ # the trace we're expecting to be generated when running the code below:
+ # answer = question()
+ expected_trace_log = [
+ (collection_path, 5, 'call'),
+ (collection_path, 6, 'line'),
+ (collection_path, 6, 'return'),
+ ]
+
+ # define the collection root before any ansible code has been loaded
+ # otherwise config will have already been loaded and changing the environment will have no effect
+ monkeypatch.setenv('ANSIBLE_COLLECTIONS_PATH', collection_root)
+
+ finder = _AnsibleCollectionFinder(paths=[collection_root])
+ reset_collections_loader_state(finder)
+
+ from ansible_collections.testns.testcoll.plugins.module_utils.my_util import question
+
+ original_trace_function = sys.gettrace()
+ trace_log = []
+
+ if original_trace_function:
+ # enable tracing while preserving the existing trace function (coverage)
+ def my_trace_function(frame, event, arg):
+ trace_log.append((frame.f_code.co_filename, frame.f_lineno, event))
+
+ # the original trace function expects to have itself set as the trace function
+ sys.settrace(original_trace_function)
+ # call the original trace function
+ original_trace_function(frame, event, arg)
+ # restore our trace function
+ sys.settrace(my_trace_function)
+
+ return my_trace_function
+ else:
+ # no existing trace function, so our trace function is much simpler
+ def my_trace_function(frame, event, arg):
+ trace_log.append((frame.f_code.co_filename, frame.f_lineno, event))
+
+ return my_trace_function
+
+ sys.settrace(my_trace_function)
+
+ try:
+ # run a minimal amount of code while the trace is running
+ # adding more code here, including use of a context manager, will add more to our trace
+ answer = question()
+ finally:
+ sys.settrace(original_trace_function)
+
+ # make sure 'import ... as ...' works on builtin synthetic collections
+ # the following import is not supported (it tries to find module_utils in ansible.plugins)
+ # import ansible_collections.ansible.builtin.plugins.module_utils as c1
+ import ansible_collections.ansible.builtin.plugins.action as c2
+ import ansible_collections.ansible.builtin.plugins as c3
+ import ansible_collections.ansible.builtin as c4
+ import ansible_collections.ansible as c5
+ import ansible_collections as c6
+
+ # make sure 'import ...' works on builtin synthetic collections
+ import ansible_collections.ansible.builtin.plugins.module_utils
+
+ import ansible_collections.ansible.builtin.plugins.action
+ assert ansible_collections.ansible.builtin.plugins.action == c3.action == c2
+
+ import ansible_collections.ansible.builtin.plugins
+ assert ansible_collections.ansible.builtin.plugins == c4.plugins == c3
+
+ import ansible_collections.ansible.builtin
+ assert ansible_collections.ansible.builtin == c5.builtin == c4
+
+ import ansible_collections.ansible
+ assert ansible_collections.ansible == c6.ansible == c5
+
+ import ansible_collections
+ assert ansible_collections == c6
+
+ # make sure 'from ... import ...' works on builtin synthetic collections
+ from ansible_collections.ansible import builtin
+ from ansible_collections.ansible.builtin import plugins
+ assert builtin.plugins == plugins
+
+ from ansible_collections.ansible.builtin.plugins import action
+ from ansible_collections.ansible.builtin.plugins.action import command
+ assert action.command == command
+
+ from ansible_collections.ansible.builtin.plugins.module_utils import basic
+ from ansible_collections.ansible.builtin.plugins.module_utils.basic import AnsibleModule
+ assert basic.AnsibleModule == AnsibleModule
+
+ # make sure relative imports work from collections code
+ # these require __package__ to be set correctly
+ import ansible_collections.testns.testcoll.plugins.module_utils.my_other_util
+ import ansible_collections.testns.testcoll.plugins.action.my_action
+
+ # verify that code loaded from a collection does not inherit __future__ statements from the collection loader
+ if sys.version_info[0] == 2:
+ # if the collection code inherits the division future feature from the collection loader this will fail
+ assert answer == 1
+ else:
+ assert answer == 1.5
+
+ # verify that the filename and line number reported by the trace is correct
+ # this makes sure that collection loading preserves file paths and line numbers
+ assert trace_log == expected_trace_log
+
+
+def test_eventsource():
+ es = _EventSource()
+ # fire when empty should succeed
+ es.fire(42)
+ handler1 = MagicMock()
+ handler2 = MagicMock()
+ es += handler1
+ es.fire(99, my_kwarg='blah')
+ handler1.assert_called_with(99, my_kwarg='blah')
+ es += handler2
+ es.fire(123, foo='bar')
+ handler1.assert_called_with(123, foo='bar')
+ handler2.assert_called_with(123, foo='bar')
+ es -= handler2
+ handler1.reset_mock()
+ handler2.reset_mock()
+ es.fire(123, foo='bar')
+ handler1.assert_called_with(123, foo='bar')
+ handler2.assert_not_called()
+ es -= handler1
+ handler1.reset_mock()
+ es.fire('blah', kwarg=None)
+ handler1.assert_not_called()
+ handler2.assert_not_called()
+ es -= handler1 # should succeed silently
+ handler_bang = MagicMock(side_effect=Exception('bang'))
+ es += handler_bang
+ with pytest.raises(Exception) as ex:
+ es.fire(123)
+ assert 'bang' in str(ex.value)
+ handler_bang.assert_called_with(123)
+ with pytest.raises(ValueError):
+ es += 42
+
+
+def test_on_collection_load():
+ finder = get_default_finder()
+ reset_collections_loader_state(finder)
+
+ load_handler = MagicMock()
+ AnsibleCollectionConfig.on_collection_load += load_handler
+
+ m = import_module('ansible_collections.testns.testcoll')
+ load_handler.assert_called_once_with(collection_name='testns.testcoll', collection_path=os.path.dirname(m.__file__))
+
+ _meta = _get_collection_metadata('testns.testcoll')
+ assert _meta
+ # FIXME: compare to disk
+
+ finder = get_default_finder()
+ reset_collections_loader_state(finder)
+
+ AnsibleCollectionConfig.on_collection_load += MagicMock(side_effect=Exception('bang'))
+ with pytest.raises(Exception) as ex:
+ import_module('ansible_collections.testns.testcoll')
+ assert 'bang' in str(ex.value)
+
+
+def test_default_collection_config():
+ finder = get_default_finder()
+ reset_collections_loader_state(finder)
+ assert AnsibleCollectionConfig.default_collection is None
+ AnsibleCollectionConfig.default_collection = 'foo.bar'
+ assert AnsibleCollectionConfig.default_collection == 'foo.bar'
+ with pytest.raises(ValueError):
+ AnsibleCollectionConfig.default_collection = 'bar.baz'
+
+
+def test_default_collection_detection():
+ finder = get_default_finder()
+ reset_collections_loader_state(finder)
+
+ # we're clearly not under a collection path
+ assert _get_collection_name_from_path('/') is None
+
+ # something that looks like a collection path but isn't importable by our finder
+ assert _get_collection_name_from_path('/foo/ansible_collections/bogusns/boguscoll/bar') is None
+
+ # legit, at the top of the collection
+ live_collection_path = os.path.join(os.path.dirname(__file__), 'fixtures/collections/ansible_collections/testns/testcoll')
+ assert _get_collection_name_from_path(live_collection_path) == 'testns.testcoll'
+
+ # legit, deeper inside the collection
+ live_collection_deep_path = os.path.join(live_collection_path, 'plugins/modules')
+ assert _get_collection_name_from_path(live_collection_deep_path) == 'testns.testcoll'
+
+ # this one should be hidden by the real testns.testcoll, so should not resolve
+ masked_collection_path = os.path.join(os.path.dirname(__file__), 'fixtures/collections_masked/ansible_collections/testns/testcoll')
+ assert _get_collection_name_from_path(masked_collection_path) is None
+
+
+@pytest.mark.parametrize(
+ 'role_name,collection_list,expected_collection_name,expected_path_suffix',
+ [
+ ('some_role', ['testns.testcoll', 'ansible.bogus'], 'testns.testcoll', 'testns/testcoll/roles/some_role'),
+ ('testns.testcoll.some_role', ['ansible.bogus', 'testns.testcoll'], 'testns.testcoll', 'testns/testcoll/roles/some_role'),
+ ('testns.testcoll.some_role', [], 'testns.testcoll', 'testns/testcoll/roles/some_role'),
+ ('testns.testcoll.some_role', None, 'testns.testcoll', 'testns/testcoll/roles/some_role'),
+ ('some_role', [], None, None),
+ ('some_role', None, None, None),
+ ])
+def test_collection_role_name_location(role_name, collection_list, expected_collection_name, expected_path_suffix):
+ finder = get_default_finder()
+ reset_collections_loader_state(finder)
+
+ expected_path = None
+ if expected_path_suffix:
+ expected_path = os.path.join(os.path.dirname(__file__), 'fixtures/collections/ansible_collections', expected_path_suffix)
+
+ found = _get_collection_role_path(role_name, collection_list)
+
+ if found:
+ assert found[0] == role_name.rpartition('.')[2]
+ assert found[1] == expected_path
+ assert found[2] == expected_collection_name
+ else:
+ assert expected_collection_name is None and expected_path_suffix is None
+
+
+def test_bogus_imports():
+ finder = get_default_finder()
+ reset_collections_loader_state(finder)
+
+ # ensure ImportError on known-bogus imports
+ bogus_imports = ['bogus_toplevel', 'ansible_collections.bogusns', 'ansible_collections.testns.boguscoll',
+ 'ansible_collections.testns.testcoll.bogussub', 'ansible_collections.ansible.builtin.bogussub']
+ for bogus_import in bogus_imports:
+ with pytest.raises(ImportError):
+ import_module(bogus_import)
+
+
+def test_empty_vs_no_code():
+ finder = get_default_finder()
+ reset_collections_loader_state(finder)
+
+ from ansible_collections.testns import testcoll # synthetic package with no code on disk
+ from ansible_collections.testns.testcoll.plugins import module_utils # real package with empty code file
+
+ # ensure synthetic packages have no code object at all (prevent bogus coverage entries)
+ assert testcoll.__loader__.get_source(testcoll.__name__) is None
+ assert testcoll.__loader__.get_code(testcoll.__name__) is None
+
+ # ensure empty package inits do have a code object
+ assert module_utils.__loader__.get_source(module_utils.__name__) == b''
+ assert module_utils.__loader__.get_code(module_utils.__name__) is not None
+
+
+def test_finder_playbook_paths():
+ finder = get_default_finder()
+ reset_collections_loader_state(finder)
+
+ import ansible_collections
+ import ansible_collections.ansible
+ import ansible_collections.testns
+
+ # ensure the package modules look like we expect
+ assert hasattr(ansible_collections, '__path__') and len(ansible_collections.__path__) > 0
+ assert hasattr(ansible_collections.ansible, '__path__') and len(ansible_collections.ansible.__path__) > 0
+ assert hasattr(ansible_collections.testns, '__path__') and len(ansible_collections.testns.__path__) > 0
+
+ # these shouldn't be visible yet, since we haven't added the playbook dir
+ with pytest.raises(ImportError):
+ import ansible_collections.ansible.playbook_adj_other
+
+ with pytest.raises(ImportError):
+ import ansible_collections.testns.playbook_adj_other
+
+ assert AnsibleCollectionConfig.playbook_paths == []
+ playbook_path_fixture_dir = os.path.join(os.path.dirname(__file__), 'fixtures/playbook_path')
+
+ # configure the playbook paths
+ AnsibleCollectionConfig.playbook_paths = [playbook_path_fixture_dir]
+
+ # playbook paths go to the front of the line
+ assert AnsibleCollectionConfig.collection_paths[0] == os.path.join(playbook_path_fixture_dir, 'collections')
+
+ # playbook paths should be updated on the existing root ansible_collections path, as well as on the 'ansible' namespace (but no others!)
+ assert ansible_collections.__path__[0] == os.path.join(playbook_path_fixture_dir, 'collections/ansible_collections')
+ assert ansible_collections.ansible.__path__[0] == os.path.join(playbook_path_fixture_dir, 'collections/ansible_collections/ansible')
+ assert all('playbook_path' not in p for p in ansible_collections.testns.__path__)
+
+ # should succeed since we fixed up the package path
+ import ansible_collections.ansible.playbook_adj_other
+ # should succeed since we didn't import freshns before hacking in the path
+ import ansible_collections.freshns.playbook_adj_other
+ # should fail since we've already imported something from this path and didn't fix up its package path
+ with pytest.raises(ImportError):
+ import ansible_collections.testns.playbook_adj_other
+
+
+def test_toplevel_iter_modules():
+ finder = get_default_finder()
+ reset_collections_loader_state(finder)
+
+ modules = list(pkgutil.iter_modules(default_test_collection_paths, ''))
+ assert len(modules) == 1
+ assert modules[0][1] == 'ansible_collections'
+
+
+def test_iter_modules_namespaces():
+ finder = get_default_finder()
+ reset_collections_loader_state(finder)
+
+ paths = extend_paths(default_test_collection_paths, 'ansible_collections')
+ modules = list(pkgutil.iter_modules(paths, 'ansible_collections.'))
+ assert len(modules) == 2
+ assert all(m[2] is True for m in modules)
+ assert all(isinstance(m[0], _AnsiblePathHookFinder) for m in modules)
+ assert set(['ansible_collections.testns', 'ansible_collections.ansible']) == set(m[1] for m in modules)
+
+
+def test_collection_get_data():
+ finder = get_default_finder()
+ reset_collections_loader_state(finder)
+
+ # something that's there
+ d = pkgutil.get_data('ansible_collections.testns.testcoll', 'plugins/action/my_action.py')
+ assert b'hello from my_action.py' in d
+
+ # something that's not there
+ d = pkgutil.get_data('ansible_collections.testns.testcoll', 'bogus/bogus')
+ assert d is None
+
+ with pytest.raises(ValueError):
+ plugins_pkg = import_module('ansible_collections.ansible.builtin')
+ assert not os.path.exists(os.path.dirname(plugins_pkg.__file__))
+ d = pkgutil.get_data('ansible_collections.ansible.builtin', 'plugins/connection/local.py')
+
+
+@pytest.mark.parametrize(
+ 'ref,ref_type,expected_collection,expected_subdirs,expected_resource,expected_python_pkg_name',
+ [
+ ('ns.coll.myaction', 'action', 'ns.coll', '', 'myaction', 'ansible_collections.ns.coll.plugins.action'),
+ ('ns.coll.subdir1.subdir2.myaction', 'action', 'ns.coll', 'subdir1.subdir2', 'myaction', 'ansible_collections.ns.coll.plugins.action.subdir1.subdir2'),
+ ('ns.coll.myrole', 'role', 'ns.coll', '', 'myrole', 'ansible_collections.ns.coll.roles.myrole'),
+ ('ns.coll.subdir1.subdir2.myrole', 'role', 'ns.coll', 'subdir1.subdir2', 'myrole', 'ansible_collections.ns.coll.roles.subdir1.subdir2.myrole'),
+ ])
+def test_fqcr_parsing_valid(ref, ref_type, expected_collection,
+ expected_subdirs, expected_resource, expected_python_pkg_name):
+ assert AnsibleCollectionRef.is_valid_fqcr(ref, ref_type)
+
+ r = AnsibleCollectionRef.from_fqcr(ref, ref_type)
+ assert r.collection == expected_collection
+ assert r.subdirs == expected_subdirs
+ assert r.resource == expected_resource
+ assert r.n_python_package_name == expected_python_pkg_name
+
+ r = AnsibleCollectionRef.try_parse_fqcr(ref, ref_type)
+ assert r.collection == expected_collection
+ assert r.subdirs == expected_subdirs
+ assert r.resource == expected_resource
+ assert r.n_python_package_name == expected_python_pkg_name
+
+
+@pytest.mark.parametrize(
+ 'ref,ref_type,expected_error_type,expected_error_expression',
+ [
+ ('no_dots_at_all_action', 'action', ValueError, 'is not a valid collection reference'),
+ ('no_nscoll.myaction', 'action', ValueError, 'is not a valid collection reference'),
+ ('ns.coll.myaction', 'bogus', ValueError, 'invalid collection ref_type'),
+ ])
+def test_fqcr_parsing_invalid(ref, ref_type, expected_error_type, expected_error_expression):
+ assert not AnsibleCollectionRef.is_valid_fqcr(ref, ref_type)
+
+ with pytest.raises(expected_error_type) as curerr:
+ AnsibleCollectionRef.from_fqcr(ref, ref_type)
+
+ assert re.search(expected_error_expression, str(curerr.value))
+
+ r = AnsibleCollectionRef.try_parse_fqcr(ref, ref_type)
+ assert r is None
+
+
+@pytest.mark.parametrize(
+ 'name,subdirs,resource,ref_type,python_pkg_name',
+ [
+ ('ns.coll', None, 'res', 'doc_fragments', 'ansible_collections.ns.coll.plugins.doc_fragments'),
+ ('ns.coll', 'subdir1', 'res', 'doc_fragments', 'ansible_collections.ns.coll.plugins.doc_fragments.subdir1'),
+ ('ns.coll', 'subdir1.subdir2', 'res', 'action', 'ansible_collections.ns.coll.plugins.action.subdir1.subdir2'),
+ ])
+def test_collectionref_components_valid(name, subdirs, resource, ref_type, python_pkg_name):
+ x = AnsibleCollectionRef(name, subdirs, resource, ref_type)
+
+ assert x.collection == name
+ if subdirs:
+ assert x.subdirs == subdirs
+ else:
+ assert x.subdirs == ''
+
+ assert x.resource == resource
+ assert x.ref_type == ref_type
+ assert x.n_python_package_name == python_pkg_name
+
+
+@pytest.mark.parametrize(
+ 'dirname,expected_result',
+ [
+ ('become_plugins', 'become'),
+ ('cache_plugins', 'cache'),
+ ('connection_plugins', 'connection'),
+ ('library', 'modules'),
+ ('filter_plugins', 'filter'),
+ ('bogus_plugins', ValueError),
+ (None, ValueError)
+ ]
+)
+def test_legacy_plugin_dir_to_plugin_type(dirname, expected_result):
+ if isinstance(expected_result, string_types):
+ assert AnsibleCollectionRef.legacy_plugin_dir_to_plugin_type(dirname) == expected_result
+ else:
+ with pytest.raises(expected_result):
+ AnsibleCollectionRef.legacy_plugin_dir_to_plugin_type(dirname)
+
+
+@pytest.mark.parametrize(
+ 'name,subdirs,resource,ref_type,expected_error_type,expected_error_expression',
+ [
+ ('bad_ns', '', 'resource', 'action', ValueError, 'invalid collection name'),
+ ('ns.coll.', '', 'resource', 'action', ValueError, 'invalid collection name'),
+ ('ns.coll', 'badsubdir#', 'resource', 'action', ValueError, 'invalid subdirs entry'),
+ ('ns.coll', 'badsubdir.', 'resource', 'action', ValueError, 'invalid subdirs entry'),
+ ('ns.coll', '.badsubdir', 'resource', 'action', ValueError, 'invalid subdirs entry'),
+ ('ns.coll', '', 'resource', 'bogus', ValueError, 'invalid collection ref_type'),
+ ])
+def test_collectionref_components_invalid(name, subdirs, resource, ref_type, expected_error_type, expected_error_expression):
+ with pytest.raises(expected_error_type) as curerr:
+ AnsibleCollectionRef(name, subdirs, resource, ref_type)
+
+ assert re.search(expected_error_expression, str(curerr.value))
+
+
+# BEGIN TEST SUPPORT
+
+default_test_collection_paths = [
+ os.path.join(os.path.dirname(__file__), 'fixtures', 'collections'),
+ os.path.join(os.path.dirname(__file__), 'fixtures', 'collections_masked'),
+ '/bogus/bogussub'
+]
+
+
+def get_default_finder():
+ return _AnsibleCollectionFinder(paths=default_test_collection_paths)
+
+
+def extend_paths(path_list, suffix):
+ suffix = suffix.replace('.', '/')
+ return [os.path.join(p, suffix) for p in path_list]
+
+
+def nuke_module_prefix(prefix):
+ for module_to_nuke in [m for m in sys.modules if m.startswith(prefix)]:
+ sys.modules.pop(module_to_nuke)
+
+
+def reset_collections_loader_state(metapath_finder=None):
+ _AnsibleCollectionFinder._remove()
+
+ nuke_module_prefix('ansible_collections')
+ nuke_module_prefix('ansible.modules')
+ nuke_module_prefix('ansible.plugins')
+
+ # FIXME: better to move this someplace else that gets cleaned up automatically?
+ _AnsibleCollectionLoader._redirected_package_map = {}
+
+ AnsibleCollectionConfig._default_collection = None
+ AnsibleCollectionConfig._on_collection_load = _EventSource()
+
+ if metapath_finder:
+ metapath_finder._install()
diff --git a/test/units/utils/display/test_display.py b/test/units/utils/display/test_display.py
new file mode 100644
index 00000000..cdeb4966
--- /dev/null
+++ b/test/units/utils/display/test_display.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+from ansible.utils.display import Display
+
+
+def test_display_basic_message(capsys, mocker):
+ # Disable logging
+ mocker.patch('ansible.utils.display.logger', return_value=None)
+
+ d = Display()
+ d.display(u'Some displayed message')
+ out, err = capsys.readouterr()
+ assert out == 'Some displayed message\n'
+ assert err == ''
diff --git a/test/units/utils/display/test_logger.py b/test/units/utils/display/test_logger.py
new file mode 100644
index 00000000..ed69393b
--- /dev/null
+++ b/test/units/utils/display/test_logger.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+import logging
+import sys
+
+
+def test_logger():
+ '''
+ Avoid CVE-2019-14846 as 3rd party libs will disclose secrets when
+ logging is set to DEBUG
+ '''
+
+ # clear loaded modules to have unadultered test.
+ for loaded in list(sys.modules.keys()):
+ if 'ansible' in loaded:
+ del sys.modules[loaded]
+
+ # force logger to exist via config
+ from ansible import constants as C
+ C.DEFAULT_LOG_PATH = '/dev/null'
+
+ # initialize logger
+ from ansible.utils.display import logger
+
+ assert logger.root.level != logging.DEBUG
diff --git a/test/units/utils/display/test_warning.py b/test/units/utils/display/test_warning.py
new file mode 100644
index 00000000..be63c348
--- /dev/null
+++ b/test/units/utils/display/test_warning.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible.utils.display import Display
+
+
+@pytest.fixture
+def warning_message():
+ warning_message = 'bad things will happen'
+ expected_warning_message = '[WARNING]: {0}\n'.format(warning_message)
+ return warning_message, expected_warning_message
+
+
+def test_warning(capsys, mocker, warning_message):
+ warning_message, expected_warning_message = warning_message
+
+ mocker.patch('ansible.utils.color.ANSIBLE_COLOR', True)
+ mocker.patch('ansible.utils.color.parsecolor', return_value=u'1;35') # value for 'bright purple'
+
+ d = Display()
+ d.warning(warning_message)
+ out, err = capsys.readouterr()
+ assert d._warns == {expected_warning_message: 1}
+ assert err == '\x1b[1;35m{0}\x1b[0m\n'.format(expected_warning_message.rstrip('\n'))
+
+
+def test_warning_no_color(capsys, mocker, warning_message):
+ warning_message, expected_warning_message = warning_message
+
+ mocker.patch('ansible.utils.color.ANSIBLE_COLOR', False)
+
+ d = Display()
+ d.warning(warning_message)
+ out, err = capsys.readouterr()
+ assert d._warns == {expected_warning_message: 1}
+ assert err == expected_warning_message
diff --git a/test/units/utils/test_cleanup_tmp_file.py b/test/units/utils/test_cleanup_tmp_file.py
new file mode 100644
index 00000000..2a44a55b
--- /dev/null
+++ b/test/units/utils/test_cleanup_tmp_file.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import pytest
+import tempfile
+
+from ansible.utils.path import cleanup_tmp_file
+
+
+def raise_error():
+ raise OSError
+
+
+def test_cleanup_tmp_file_file():
+ tmp_fd, tmp = tempfile.mkstemp()
+ cleanup_tmp_file(tmp)
+
+ assert not os.path.exists(tmp)
+
+
+def test_cleanup_tmp_file_dir():
+ tmp = tempfile.mkdtemp()
+ cleanup_tmp_file(tmp)
+
+ assert not os.path.exists(tmp)
+
+
+def test_cleanup_tmp_file_nonexistant():
+ assert None is cleanup_tmp_file('nope')
+
+
+def test_cleanup_tmp_file_failure(mocker):
+ tmp = tempfile.mkdtemp()
+ with pytest.raises(Exception):
+ mocker.patch('shutil.rmtree', side_effect=raise_error())
+ cleanup_tmp_file(tmp)
+
+
+def test_cleanup_tmp_file_failure_warning(mocker, capsys):
+ tmp = tempfile.mkdtemp()
+ with pytest.raises(Exception):
+ mocker.patch('shutil.rmtree', side_effect=raise_error())
+ cleanup_tmp_file(tmp, warn=True)
diff --git a/test/units/utils/test_context_objects.py b/test/units/utils/test_context_objects.py
new file mode 100644
index 00000000..c56a41d0
--- /dev/null
+++ b/test/units/utils/test_context_objects.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Toshio Kuratomi <tkuratomi@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import argparse
+
+import pytest
+
+from ansible.module_utils.common.collections import ImmutableDict
+from ansible.utils import context_objects as co
+
+
+MAKE_IMMUTABLE_DATA = ((u'ãらã¨ã¿', u'ãらã¨ã¿'),
+ (42, 42),
+ ({u'café': u'ãらã¨ã¿'}, ImmutableDict({u'café': u'ãらã¨ã¿'})),
+ ([1, u'café', u'ãらã¨ã¿'], (1, u'café', u'ãらã¨ã¿')),
+ (set((1, u'café', u'ãらã¨ã¿')), frozenset((1, u'café', u'ãらã¨ã¿'))),
+ ({u'café': [1, set(u'ñ')]},
+ ImmutableDict({u'café': (1, frozenset(u'ñ'))})),
+ ([set((1, 2)), {u'ãらã¨ã¿': 3}],
+ (frozenset((1, 2)), ImmutableDict({u'ãらã¨ã¿': 3}))),
+ )
+
+
+@pytest.mark.parametrize('data, expected', MAKE_IMMUTABLE_DATA)
+def test_make_immutable(data, expected):
+ assert co._make_immutable(data) == expected
+
+
+def test_cliargs_from_dict():
+ old_dict = {'tags': [u'production', u'webservers'],
+ 'check_mode': True,
+ 'start_at_task': u'Start with ãらã¨ã¿'}
+ expected = frozenset((('tags', (u'production', u'webservers')),
+ ('check_mode', True),
+ ('start_at_task', u'Start with ãらã¨ã¿')))
+
+ assert frozenset(co.CLIArgs(old_dict).items()) == expected
+
+
+def test_cliargs():
+ class FakeOptions:
+ pass
+ options = FakeOptions()
+ options.tags = [u'production', u'webservers']
+ options.check_mode = True
+ options.start_at_task = u'Start with ãらã¨ã¿'
+
+ expected = frozenset((('tags', (u'production', u'webservers')),
+ ('check_mode', True),
+ ('start_at_task', u'Start with ãらã¨ã¿')))
+
+ assert frozenset(co.CLIArgs.from_options(options).items()) == expected
+
+
+def test_cliargs_argparse():
+ parser = argparse.ArgumentParser(description='Process some integers.')
+ parser.add_argument('integers', metavar='N', type=int, nargs='+',
+ help='an integer for the accumulator')
+ parser.add_argument('--sum', dest='accumulate', action='store_const',
+ const=sum, default=max,
+ help='sum the integers (default: find the max)')
+ args = parser.parse_args([u'--sum', u'1', u'2'])
+
+ expected = frozenset((('accumulate', sum), ('integers', (1, 2))))
+
+ assert frozenset(co.CLIArgs.from_options(args).items()) == expected
diff --git a/test/units/utils/test_encrypt.py b/test/units/utils/test_encrypt.py
new file mode 100644
index 00000000..2cbe828a
--- /dev/null
+++ b/test/units/utils/test_encrypt.py
@@ -0,0 +1,168 @@
+# (c) 2018, Matthias Fuchs <matthias.s.fuchs@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+
+import pytest
+
+from ansible.errors import AnsibleError, AnsibleFilterError
+from ansible.plugins.filter.core import get_encrypted_password
+from ansible.utils import encrypt
+
+
+class passlib_off(object):
+ def __init__(self):
+ self.orig = encrypt.PASSLIB_AVAILABLE
+
+ def __enter__(self):
+ encrypt.PASSLIB_AVAILABLE = False
+ return self
+
+ def __exit__(self, exception_type, exception_value, traceback):
+ encrypt.PASSLIB_AVAILABLE = self.orig
+
+
+def assert_hash(expected, secret, algorithm, **settings):
+
+ if encrypt.PASSLIB_AVAILABLE:
+ assert encrypt.passlib_or_crypt(secret, algorithm, **settings) == expected
+ assert encrypt.PasslibHash(algorithm).hash(secret, **settings) == expected
+ else:
+ assert encrypt.passlib_or_crypt(secret, algorithm, **settings) == expected
+ with pytest.raises(AnsibleError) as excinfo:
+ encrypt.PasslibHash(algorithm).hash(secret, **settings)
+ assert excinfo.value.args[0] == "passlib must be installed to hash with '%s'" % algorithm
+
+
+@pytest.mark.skipif(sys.platform.startswith('darwin'), reason='macOS requires passlib')
+def test_encrypt_with_rounds_no_passlib():
+ with passlib_off():
+ assert_hash("$5$12345678$uAZsE3BenI2G.nA8DpTl.9Dc8JiqacI53pEqRr5ppT7",
+ secret="123", algorithm="sha256_crypt", salt="12345678", rounds=5000)
+ assert_hash("$5$rounds=10000$12345678$JBinliYMFEcBeAXKZnLjenhgEhTmJBvZn3aR8l70Oy/",
+ secret="123", algorithm="sha256_crypt", salt="12345678", rounds=10000)
+ assert_hash("$6$12345678$LcV9LQiaPekQxZ.OfkMADjFdSO2k9zfbDQrHPVcYjSLqSdjLYpsgqviYvTEP/R41yPmhH3CCeEDqVhW1VHr3L.",
+ secret="123", algorithm="sha512_crypt", salt="12345678", rounds=5000)
+
+
+# If passlib is not installed. this is identical to the test_encrypt_with_rounds_no_passlib() test
+@pytest.mark.skipif(not encrypt.PASSLIB_AVAILABLE, reason='passlib must be installed to run this test')
+def test_encrypt_with_rounds():
+ assert_hash("$5$12345678$uAZsE3BenI2G.nA8DpTl.9Dc8JiqacI53pEqRr5ppT7",
+ secret="123", algorithm="sha256_crypt", salt="12345678", rounds=5000)
+ assert_hash("$5$rounds=10000$12345678$JBinliYMFEcBeAXKZnLjenhgEhTmJBvZn3aR8l70Oy/",
+ secret="123", algorithm="sha256_crypt", salt="12345678", rounds=10000)
+ assert_hash("$6$12345678$LcV9LQiaPekQxZ.OfkMADjFdSO2k9zfbDQrHPVcYjSLqSdjLYpsgqviYvTEP/R41yPmhH3CCeEDqVhW1VHr3L.",
+ secret="123", algorithm="sha512_crypt", salt="12345678", rounds=5000)
+
+
+@pytest.mark.skipif(sys.platform.startswith('darwin'), reason='macOS requires passlib')
+def test_encrypt_default_rounds_no_passlib():
+ with passlib_off():
+ assert_hash("$1$12345678$tRy4cXc3kmcfRZVj4iFXr/",
+ secret="123", algorithm="md5_crypt", salt="12345678")
+ assert_hash("$5$12345678$uAZsE3BenI2G.nA8DpTl.9Dc8JiqacI53pEqRr5ppT7",
+ secret="123", algorithm="sha256_crypt", salt="12345678")
+ assert_hash("$6$12345678$LcV9LQiaPekQxZ.OfkMADjFdSO2k9zfbDQrHPVcYjSLqSdjLYpsgqviYvTEP/R41yPmhH3CCeEDqVhW1VHr3L.",
+ secret="123", algorithm="sha512_crypt", salt="12345678")
+
+ assert encrypt.CryptHash("md5_crypt").hash("123")
+
+
+# If passlib is not installed. this is identical to the test_encrypt_default_rounds_no_passlib() test
+@pytest.mark.skipif(not encrypt.PASSLIB_AVAILABLE, reason='passlib must be installed to run this test')
+def test_encrypt_default_rounds():
+ assert_hash("$1$12345678$tRy4cXc3kmcfRZVj4iFXr/",
+ secret="123", algorithm="md5_crypt", salt="12345678")
+ assert_hash("$5$12345678$uAZsE3BenI2G.nA8DpTl.9Dc8JiqacI53pEqRr5ppT7",
+ secret="123", algorithm="sha256_crypt", salt="12345678")
+ assert_hash("$6$12345678$LcV9LQiaPekQxZ.OfkMADjFdSO2k9zfbDQrHPVcYjSLqSdjLYpsgqviYvTEP/R41yPmhH3CCeEDqVhW1VHr3L.",
+ secret="123", algorithm="sha512_crypt", salt="12345678")
+
+ assert encrypt.PasslibHash("md5_crypt").hash("123")
+
+
+@pytest.mark.skipif(sys.platform.startswith('darwin'), reason='macOS requires passlib')
+def test_password_hash_filter_no_passlib():
+ with passlib_off():
+ assert not encrypt.PASSLIB_AVAILABLE
+ assert get_encrypted_password("123", "md5", salt="12345678") == "$1$12345678$tRy4cXc3kmcfRZVj4iFXr/"
+
+ with pytest.raises(AnsibleFilterError):
+ get_encrypted_password("123", "crypt16", salt="12")
+
+
+def test_password_hash_filter_passlib():
+ if not encrypt.PASSLIB_AVAILABLE:
+ pytest.skip("passlib not available")
+
+ with pytest.raises(AnsibleFilterError):
+ get_encrypted_password("123", "sha257", salt="12345678")
+
+ # Uses 5000 rounds by default for sha256 matching crypt behaviour
+ assert get_encrypted_password("123", "sha256", salt="12345678") == "$5$12345678$uAZsE3BenI2G.nA8DpTl.9Dc8JiqacI53pEqRr5ppT7"
+ assert get_encrypted_password("123", "sha256", salt="12345678", rounds=5000) == "$5$12345678$uAZsE3BenI2G.nA8DpTl.9Dc8JiqacI53pEqRr5ppT7"
+
+ assert (get_encrypted_password("123", "sha256", salt="12345678", rounds=10000) ==
+ "$5$rounds=10000$12345678$JBinliYMFEcBeAXKZnLjenhgEhTmJBvZn3aR8l70Oy/")
+
+ assert (get_encrypted_password("123", "sha512", salt="12345678", rounds=6000) ==
+ "$6$rounds=6000$12345678$l/fC67BdJwZrJ7qneKGP1b6PcatfBr0dI7W6JLBrsv8P1wnv/0pu4WJsWq5p6WiXgZ2gt9Aoir3MeORJxg4.Z/")
+
+ assert (get_encrypted_password("123", "sha512", salt="12345678", rounds=5000) ==
+ "$6$12345678$LcV9LQiaPekQxZ.OfkMADjFdSO2k9zfbDQrHPVcYjSLqSdjLYpsgqviYvTEP/R41yPmhH3CCeEDqVhW1VHr3L.")
+
+ assert get_encrypted_password("123", "crypt16", salt="12") == "12pELHK2ME3McUFlHxel6uMM"
+
+ # Try algorithm that uses a raw salt
+ assert get_encrypted_password("123", "pbkdf2_sha256")
+
+
+@pytest.mark.skipif(sys.platform.startswith('darwin'), reason='macOS requires passlib')
+def test_do_encrypt_no_passlib():
+ with passlib_off():
+ assert not encrypt.PASSLIB_AVAILABLE
+ assert encrypt.do_encrypt("123", "md5_crypt", salt="12345678") == "$1$12345678$tRy4cXc3kmcfRZVj4iFXr/"
+
+ with pytest.raises(AnsibleError):
+ encrypt.do_encrypt("123", "crypt16", salt="12")
+
+
+def test_do_encrypt_passlib():
+ if not encrypt.PASSLIB_AVAILABLE:
+ pytest.skip("passlib not available")
+
+ with pytest.raises(AnsibleError):
+ encrypt.do_encrypt("123", "sha257_crypt", salt="12345678")
+
+ # Uses 5000 rounds by default for sha256 matching crypt behaviour.
+ assert encrypt.do_encrypt("123", "sha256_crypt", salt="12345678") == "$5$12345678$uAZsE3BenI2G.nA8DpTl.9Dc8JiqacI53pEqRr5ppT7"
+
+ assert encrypt.do_encrypt("123", "md5_crypt", salt="12345678") == "$1$12345678$tRy4cXc3kmcfRZVj4iFXr/"
+
+ assert encrypt.do_encrypt("123", "crypt16", salt="12") == "12pELHK2ME3McUFlHxel6uMM"
+
+
+def test_random_salt():
+ res = encrypt.random_salt()
+ expected_salt_candidate_chars = u'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789./'
+ assert len(res) == 8
+ for res_char in res:
+ assert res_char in expected_salt_candidate_chars
diff --git a/test/units/utils/test_helpers.py b/test/units/utils/test_helpers.py
new file mode 100644
index 00000000..ec37b39b
--- /dev/null
+++ b/test/units/utils/test_helpers.py
@@ -0,0 +1,34 @@
+# (c) 2015, Marius Gedminas <marius@gedmin.as>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import unittest
+
+from ansible.utils.helpers import pct_to_int
+
+
+class TestHelpers(unittest.TestCase):
+
+ def test_pct_to_int(self):
+ self.assertEqual(pct_to_int(1, 100), 1)
+ self.assertEqual(pct_to_int(-1, 100), -1)
+ self.assertEqual(pct_to_int("1%", 10), 1)
+ self.assertEqual(pct_to_int("1%", 10, 0), 0)
+ self.assertEqual(pct_to_int("1", 100), 1)
+ self.assertEqual(pct_to_int("10%", 100), 10)
diff --git a/test/units/utils/test_isidentifier.py b/test/units/utils/test_isidentifier.py
new file mode 100644
index 00000000..de6de642
--- /dev/null
+++ b/test/units/utils/test_isidentifier.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible.utils.vars import isidentifier
+
+
+# Originally posted at: http://stackoverflow.com/a/29586366
+
+
+@pytest.mark.parametrize(
+ "identifier", [
+ "foo", "foo1_23",
+ ]
+)
+def test_valid_identifier(identifier):
+ assert isidentifier(identifier)
+
+
+@pytest.mark.parametrize(
+ "identifier", [
+ "pass", "foo ", " foo", "1234", "1234abc", "", " ", "foo bar", "no-dashed-names-for-you",
+ ]
+)
+def test_invalid_identifier(identifier):
+ assert not isidentifier(identifier)
+
+
+def test_keywords_not_in_PY2():
+ """In Python 2 ("True", "False", "None") are not keywords. The isidentifier
+ method ensures that those are treated as keywords on both Python 2 and 3.
+ """
+ assert not isidentifier("True")
+ assert not isidentifier("False")
+ assert not isidentifier("None")
+
+
+def test_non_ascii():
+ """In Python 3 non-ascii characters are allowed as opposed to Python 2. The
+ isidentifier method ensures that those are treated as keywords on both
+ Python 2 and 3.
+ """
+ assert not isidentifier("křížek")
diff --git a/test/units/utils/test_plugin_docs.py b/test/units/utils/test_plugin_docs.py
new file mode 100644
index 00000000..ff973b1e
--- /dev/null
+++ b/test/units/utils/test_plugin_docs.py
@@ -0,0 +1,333 @@
+# -*- coding: utf-8 -*-
+# (c) 2020 Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import copy
+
+import pytest
+
+from ansible.utils.plugin_docs import (
+ add_collection_to_versions_and_dates,
+)
+
+
+ADD_TESTS = [
+ (
+ # Module options
+ True,
+ False,
+ {
+ 'author': 'x',
+ 'version_added': '1.0.0',
+ 'deprecated': {
+ 'removed_in': '2.0.0',
+ },
+ 'options': {
+ 'test': {
+ 'description': '',
+ 'type': 'str',
+ 'version_added': '1.1.0',
+ 'deprecated': {
+ # should not be touched since this isn't a plugin
+ 'removed_in': '2.0.0',
+ },
+ 'env': [
+ # should not be touched since this isn't a plugin
+ {
+ 'version_added': '1.3.0',
+ 'deprecated': {
+ 'version': '2.0.0',
+ },
+ },
+ ],
+ 'ini': [
+ # should not be touched since this isn't a plugin
+ {
+ 'version_added': '1.3.0',
+ 'deprecated': {
+ 'version': '2.0.0',
+ },
+ },
+ ],
+ 'vars': [
+ # should not be touched since this isn't a plugin
+ {
+ 'version_added': '1.3.0',
+ 'deprecated': {
+ 'removed_at_date': '2020-01-01',
+ },
+ },
+ ],
+ },
+ 'subtest': {
+ 'description': '',
+ 'type': 'dict',
+ 'deprecated': {
+ # should not be touched since this isn't a plugin
+ 'version': '2.0.0',
+ },
+ 'suboptions': {
+ 'suboption': {
+ 'description': '',
+ 'type': 'int',
+ 'version_added': '1.2.0',
+ }
+ },
+ }
+ },
+ },
+ {
+ 'author': 'x',
+ 'version_added': '1.0.0',
+ 'version_added_collection': 'foo.bar',
+ 'deprecated': {
+ 'removed_in': '2.0.0',
+ 'removed_from_collection': 'foo.bar',
+ },
+ 'options': {
+ 'test': {
+ 'description': '',
+ 'type': 'str',
+ 'version_added': '1.1.0',
+ 'version_added_collection': 'foo.bar',
+ 'deprecated': {
+ # should not be touched since this isn't a plugin
+ 'removed_in': '2.0.0',
+ },
+ 'env': [
+ # should not be touched since this isn't a plugin
+ {
+ 'version_added': '1.3.0',
+ 'deprecated': {
+ 'version': '2.0.0',
+ },
+ },
+ ],
+ 'ini': [
+ # should not be touched since this isn't a plugin
+ {
+ 'version_added': '1.3.0',
+ 'deprecated': {
+ 'version': '2.0.0',
+ },
+ },
+ ],
+ 'vars': [
+ # should not be touched since this isn't a plugin
+ {
+ 'version_added': '1.3.0',
+ 'deprecated': {
+ 'removed_at_date': '2020-01-01',
+ },
+ },
+ ],
+ },
+ 'subtest': {
+ 'description': '',
+ 'type': 'dict',
+ 'deprecated': {
+ # should not be touched since this isn't a plugin
+ 'version': '2.0.0',
+ },
+ 'suboptions': {
+ 'suboption': {
+ 'description': '',
+ 'type': 'int',
+ 'version_added': '1.2.0',
+ 'version_added_collection': 'foo.bar',
+ }
+ },
+ }
+ },
+ },
+ ),
+ (
+ # Module options
+ True,
+ False,
+ {
+ 'author': 'x',
+ 'deprecated': {
+ 'removed_at_date': '2020-01-01',
+ },
+ },
+ {
+ 'author': 'x',
+ 'deprecated': {
+ 'removed_at_date': '2020-01-01',
+ 'removed_from_collection': 'foo.bar',
+ },
+ },
+ ),
+ (
+ # Plugin options
+ False,
+ False,
+ {
+ 'author': 'x',
+ 'version_added': '1.0.0',
+ 'deprecated': {
+ 'removed_in': '2.0.0',
+ },
+ 'options': {
+ 'test': {
+ 'description': '',
+ 'type': 'str',
+ 'version_added': '1.1.0',
+ 'deprecated': {
+ # should not be touched since this is the wrong name
+ 'removed_in': '2.0.0',
+ },
+ 'env': [
+ {
+ 'version_added': '1.3.0',
+ 'deprecated': {
+ 'version': '2.0.0',
+ },
+ },
+ ],
+ 'ini': [
+ {
+ 'version_added': '1.3.0',
+ 'deprecated': {
+ 'version': '2.0.0',
+ },
+ },
+ ],
+ 'vars': [
+ {
+ 'version_added': '1.3.0',
+ 'deprecated': {
+ 'removed_at_date': '2020-01-01',
+ },
+ },
+ ],
+ },
+ 'subtest': {
+ 'description': '',
+ 'type': 'dict',
+ 'deprecated': {
+ 'version': '2.0.0',
+ },
+ 'suboptions': {
+ 'suboption': {
+ 'description': '',
+ 'type': 'int',
+ 'version_added': '1.2.0',
+ }
+ },
+ }
+ },
+ },
+ {
+ 'author': 'x',
+ 'version_added': '1.0.0',
+ 'version_added_collection': 'foo.bar',
+ 'deprecated': {
+ 'removed_in': '2.0.0',
+ 'removed_from_collection': 'foo.bar',
+ },
+ 'options': {
+ 'test': {
+ 'description': '',
+ 'type': 'str',
+ 'version_added': '1.1.0',
+ 'version_added_collection': 'foo.bar',
+ 'deprecated': {
+ # should not be touched since this is the wrong name
+ 'removed_in': '2.0.0',
+ },
+ 'env': [
+ {
+ 'version_added': '1.3.0',
+ 'version_added_collection': 'foo.bar',
+ 'deprecated': {
+ 'version': '2.0.0',
+ 'collection_name': 'foo.bar',
+ },
+ },
+ ],
+ 'ini': [
+ {
+ 'version_added': '1.3.0',
+ 'version_added_collection': 'foo.bar',
+ 'deprecated': {
+ 'version': '2.0.0',
+ 'collection_name': 'foo.bar',
+ },
+ },
+ ],
+ 'vars': [
+ {
+ 'version_added': '1.3.0',
+ 'version_added_collection': 'foo.bar',
+ 'deprecated': {
+ 'removed_at_date': '2020-01-01',
+ 'collection_name': 'foo.bar',
+ },
+ },
+ ],
+ },
+ 'subtest': {
+ 'description': '',
+ 'type': 'dict',
+ 'deprecated': {
+ 'version': '2.0.0',
+ 'collection_name': 'foo.bar',
+ },
+ 'suboptions': {
+ 'suboption': {
+ 'description': '',
+ 'type': 'int',
+ 'version_added': '1.2.0',
+ 'version_added_collection': 'foo.bar',
+ }
+ },
+ }
+ },
+ },
+ ),
+ (
+ # Return values
+ True, # this value is is ignored
+ True,
+ {
+ 'rv1': {
+ 'version_added': '1.0.0',
+ 'type': 'dict',
+ 'contains': {
+ 'srv1': {
+ 'version_added': '1.1.0',
+ },
+ 'srv2': {
+ },
+ }
+ },
+ },
+ {
+ 'rv1': {
+ 'version_added': '1.0.0',
+ 'version_added_collection': 'foo.bar',
+ 'type': 'dict',
+ 'contains': {
+ 'srv1': {
+ 'version_added': '1.1.0',
+ 'version_added_collection': 'foo.bar',
+ },
+ 'srv2': {
+ },
+ }
+ },
+ },
+ ),
+]
+
+
+@pytest.mark.parametrize('is_module,return_docs,fragment,expected_fragment', ADD_TESTS)
+def test_add(is_module, return_docs, fragment, expected_fragment):
+ fragment_copy = copy.deepcopy(fragment)
+ add_collection_to_versions_and_dates(fragment_copy, 'foo.bar', is_module, return_docs)
+ assert fragment_copy == expected_fragment
diff --git a/test/units/utils/test_shlex.py b/test/units/utils/test_shlex.py
new file mode 100644
index 00000000..e13d302d
--- /dev/null
+++ b/test/units/utils/test_shlex.py
@@ -0,0 +1,41 @@
+# (c) 2015, Marius Gedminas <marius@gedmin.as>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import unittest
+
+from ansible.utils.shlex import shlex_split
+
+
+class TestSplit(unittest.TestCase):
+
+ def test_trivial(self):
+ self.assertEqual(shlex_split("a b c"), ["a", "b", "c"])
+
+ def test_unicode(self):
+ self.assertEqual(shlex_split(u"a b \u010D"), [u"a", u"b", u"\u010D"])
+
+ def test_quoted(self):
+ self.assertEqual(shlex_split('"a b" c'), ["a b", "c"])
+
+ def test_comments(self):
+ self.assertEqual(shlex_split('"a b" c # d', comments=True), ["a b", "c"])
+
+ def test_error(self):
+ self.assertRaises(ValueError, shlex_split, 'a "b')
diff --git a/test/units/utils/test_unsafe_proxy.py b/test/units/utils/test_unsafe_proxy.py
new file mode 100644
index 00000000..205c0c65
--- /dev/null
+++ b/test/units/utils/test_unsafe_proxy.py
@@ -0,0 +1,110 @@
+# -*- coding: utf-8 -*-
+# (c) 2018 Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.six import PY3
+from ansible.utils.unsafe_proxy import AnsibleUnsafe, AnsibleUnsafeBytes, AnsibleUnsafeText, wrap_var
+
+
+def test_wrap_var_text():
+ assert isinstance(wrap_var(u'foo'), AnsibleUnsafeText)
+
+
+def test_wrap_var_bytes():
+ assert isinstance(wrap_var(b'foo'), AnsibleUnsafeBytes)
+
+
+def test_wrap_var_string():
+ if PY3:
+ assert isinstance(wrap_var('foo'), AnsibleUnsafeText)
+ else:
+ assert isinstance(wrap_var('foo'), AnsibleUnsafeBytes)
+
+
+def test_wrap_var_dict():
+ assert isinstance(wrap_var(dict(foo='bar')), dict)
+ assert not isinstance(wrap_var(dict(foo='bar')), AnsibleUnsafe)
+ assert isinstance(wrap_var(dict(foo=u'bar'))['foo'], AnsibleUnsafeText)
+
+
+def test_wrap_var_dict_None():
+ assert wrap_var(dict(foo=None))['foo'] is None
+ assert not isinstance(wrap_var(dict(foo=None))['foo'], AnsibleUnsafe)
+
+
+def test_wrap_var_list():
+ assert isinstance(wrap_var(['foo']), list)
+ assert not isinstance(wrap_var(['foo']), AnsibleUnsafe)
+ assert isinstance(wrap_var([u'foo'])[0], AnsibleUnsafeText)
+
+
+def test_wrap_var_list_None():
+ assert wrap_var([None])[0] is None
+ assert not isinstance(wrap_var([None])[0], AnsibleUnsafe)
+
+
+def test_wrap_var_set():
+ assert isinstance(wrap_var(set(['foo'])), set)
+ assert not isinstance(wrap_var(set(['foo'])), AnsibleUnsafe)
+ for item in wrap_var(set([u'foo'])):
+ assert isinstance(item, AnsibleUnsafeText)
+
+
+def test_wrap_var_set_None():
+ for item in wrap_var(set([None])):
+ assert item is None
+ assert not isinstance(item, AnsibleUnsafe)
+
+
+def test_wrap_var_tuple():
+ assert isinstance(wrap_var(('foo',)), tuple)
+ assert not isinstance(wrap_var(('foo',)), AnsibleUnsafe)
+ assert isinstance(wrap_var(('foo',))[0], AnsibleUnsafe)
+
+
+def test_wrap_var_tuple_None():
+ assert wrap_var((None,))[0] is None
+ assert not isinstance(wrap_var((None,))[0], AnsibleUnsafe)
+
+
+def test_wrap_var_None():
+ assert wrap_var(None) is None
+ assert not isinstance(wrap_var(None), AnsibleUnsafe)
+
+
+def test_wrap_var_unsafe_text():
+ assert isinstance(wrap_var(AnsibleUnsafeText(u'foo')), AnsibleUnsafeText)
+
+
+def test_wrap_var_unsafe_bytes():
+ assert isinstance(wrap_var(AnsibleUnsafeBytes(b'foo')), AnsibleUnsafeBytes)
+
+
+def test_wrap_var_no_ref():
+ thing = {
+ 'foo': {
+ 'bar': 'baz'
+ },
+ 'bar': ['baz', 'qux'],
+ 'baz': ('qux',),
+ 'none': None,
+ 'text': 'text',
+ }
+ wrapped_thing = wrap_var(thing)
+ thing is not wrapped_thing
+ thing['foo'] is not wrapped_thing['foo']
+ thing['bar'][0] is not wrapped_thing['bar'][0]
+ thing['baz'][0] is not wrapped_thing['baz'][0]
+ thing['none'] is not wrapped_thing['none']
+ thing['text'] is not wrapped_thing['text']
+
+
+def test_AnsibleUnsafeText():
+ assert isinstance(AnsibleUnsafeText(u'foo'), AnsibleUnsafe)
+
+
+def test_AnsibleUnsafeBytes():
+ assert isinstance(AnsibleUnsafeBytes(b'foo'), AnsibleUnsafe)
diff --git a/test/units/utils/test_vars.py b/test/units/utils/test_vars.py
new file mode 100644
index 00000000..c92ce4b6
--- /dev/null
+++ b/test/units/utils/test_vars.py
@@ -0,0 +1,282 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2015, Toshio Kuraotmi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from collections import defaultdict
+
+from units.compat import mock, unittest
+from ansible.errors import AnsibleError
+from ansible.utils.vars import combine_vars, merge_hash
+
+
+class TestVariableUtils(unittest.TestCase):
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ combine_vars_merge_data = (
+ dict(
+ a=dict(a=1),
+ b=dict(b=2),
+ result=dict(a=1, b=2),
+ ),
+ dict(
+ a=dict(a=1, c=dict(foo='bar')),
+ b=dict(b=2, c=dict(baz='bam')),
+ result=dict(a=1, b=2, c=dict(foo='bar', baz='bam'))
+ ),
+ dict(
+ a=defaultdict(a=1, c=defaultdict(foo='bar')),
+ b=dict(b=2, c=dict(baz='bam')),
+ result=defaultdict(a=1, b=2, c=defaultdict(foo='bar', baz='bam'))
+ ),
+ )
+ combine_vars_replace_data = (
+ dict(
+ a=dict(a=1),
+ b=dict(b=2),
+ result=dict(a=1, b=2)
+ ),
+ dict(
+ a=dict(a=1, c=dict(foo='bar')),
+ b=dict(b=2, c=dict(baz='bam')),
+ result=dict(a=1, b=2, c=dict(baz='bam'))
+ ),
+ dict(
+ a=defaultdict(a=1, c=dict(foo='bar')),
+ b=dict(b=2, c=defaultdict(baz='bam')),
+ result=defaultdict(a=1, b=2, c=defaultdict(baz='bam'))
+ ),
+ )
+
+ def test_combine_vars_improper_args(self):
+ with mock.patch('ansible.constants.DEFAULT_HASH_BEHAVIOUR', 'replace'):
+ with self.assertRaises(AnsibleError):
+ combine_vars([1, 2, 3], dict(a=1))
+ with self.assertRaises(AnsibleError):
+ combine_vars(dict(a=1), [1, 2, 3])
+
+ with mock.patch('ansible.constants.DEFAULT_HASH_BEHAVIOUR', 'merge'):
+ with self.assertRaises(AnsibleError):
+ combine_vars([1, 2, 3], dict(a=1))
+ with self.assertRaises(AnsibleError):
+ combine_vars(dict(a=1), [1, 2, 3])
+
+ def test_combine_vars_replace(self):
+ with mock.patch('ansible.constants.DEFAULT_HASH_BEHAVIOUR', 'replace'):
+ for test in self.combine_vars_replace_data:
+ self.assertEqual(combine_vars(test['a'], test['b']), test['result'])
+
+ def test_combine_vars_merge(self):
+ with mock.patch('ansible.constants.DEFAULT_HASH_BEHAVIOUR', 'merge'):
+ for test in self.combine_vars_merge_data:
+ self.assertEqual(combine_vars(test['a'], test['b']), test['result'])
+
+ merge_hash_data = {
+ "low_prio": {
+ "a": {
+ "a'": {
+ "x": "low_value",
+ "y": "low_value",
+ "list": ["low_value"]
+ }
+ },
+ "b": [1, 1, 2, 3]
+ },
+ "high_prio": {
+ "a": {
+ "a'": {
+ "y": "high_value",
+ "z": "high_value",
+ "list": ["high_value"]
+ }
+ },
+ "b": [3, 4, 4, {"5": "value"}]
+ }
+ }
+
+ def test_merge_hash_simple(self):
+ for test in self.combine_vars_merge_data:
+ self.assertEqual(merge_hash(test['a'], test['b']), test['result'])
+
+ low = self.merge_hash_data['low_prio']
+ high = self.merge_hash_data['high_prio']
+ expected = {
+ "a": {
+ "a'": {
+ "x": "low_value",
+ "y": "high_value",
+ "z": "high_value",
+ "list": ["high_value"]
+ }
+ },
+ "b": high['b']
+ }
+ self.assertEqual(merge_hash(low, high), expected)
+
+ def test_merge_hash_non_recursive_and_list_replace(self):
+ low = self.merge_hash_data['low_prio']
+ high = self.merge_hash_data['high_prio']
+ expected = high
+ self.assertEqual(merge_hash(low, high, False, 'replace'), expected)
+
+ def test_merge_hash_non_recursive_and_list_keep(self):
+ low = self.merge_hash_data['low_prio']
+ high = self.merge_hash_data['high_prio']
+ expected = {
+ "a": high['a'],
+ "b": low['b']
+ }
+ self.assertEqual(merge_hash(low, high, False, 'keep'), expected)
+
+ def test_merge_hash_non_recursive_and_list_append(self):
+ low = self.merge_hash_data['low_prio']
+ high = self.merge_hash_data['high_prio']
+ expected = {
+ "a": high['a'],
+ "b": low['b'] + high['b']
+ }
+ self.assertEqual(merge_hash(low, high, False, 'append'), expected)
+
+ def test_merge_hash_non_recursive_and_list_prepend(self):
+ low = self.merge_hash_data['low_prio']
+ high = self.merge_hash_data['high_prio']
+ expected = {
+ "a": high['a'],
+ "b": high['b'] + low['b']
+ }
+ self.assertEqual(merge_hash(low, high, False, 'prepend'), expected)
+
+ def test_merge_hash_non_recursive_and_list_append_rp(self):
+ low = self.merge_hash_data['low_prio']
+ high = self.merge_hash_data['high_prio']
+ expected = {
+ "a": high['a'],
+ "b": [1, 1, 2] + high['b']
+ }
+ self.assertEqual(merge_hash(low, high, False, 'append_rp'), expected)
+
+ def test_merge_hash_non_recursive_and_list_prepend_rp(self):
+ low = self.merge_hash_data['low_prio']
+ high = self.merge_hash_data['high_prio']
+ expected = {
+ "a": high['a'],
+ "b": high['b'] + [1, 1, 2]
+ }
+ self.assertEqual(merge_hash(low, high, False, 'prepend_rp'), expected)
+
+ def test_merge_hash_recursive_and_list_replace(self):
+ low = self.merge_hash_data['low_prio']
+ high = self.merge_hash_data['high_prio']
+ expected = {
+ "a": {
+ "a'": {
+ "x": "low_value",
+ "y": "high_value",
+ "z": "high_value",
+ "list": ["high_value"]
+ }
+ },
+ "b": high['b']
+ }
+ self.assertEqual(merge_hash(low, high, True, 'replace'), expected)
+
+ def test_merge_hash_recursive_and_list_keep(self):
+ low = self.merge_hash_data['low_prio']
+ high = self.merge_hash_data['high_prio']
+ expected = {
+ "a": {
+ "a'": {
+ "x": "low_value",
+ "y": "high_value",
+ "z": "high_value",
+ "list": ["low_value"]
+ }
+ },
+ "b": low['b']
+ }
+ self.assertEqual(merge_hash(low, high, True, 'keep'), expected)
+
+ def test_merge_hash_recursive_and_list_append(self):
+ low = self.merge_hash_data['low_prio']
+ high = self.merge_hash_data['high_prio']
+ expected = {
+ "a": {
+ "a'": {
+ "x": "low_value",
+ "y": "high_value",
+ "z": "high_value",
+ "list": ["low_value", "high_value"]
+ }
+ },
+ "b": low['b'] + high['b']
+ }
+ self.assertEqual(merge_hash(low, high, True, 'append'), expected)
+
+ def test_merge_hash_recursive_and_list_prepend(self):
+ low = self.merge_hash_data['low_prio']
+ high = self.merge_hash_data['high_prio']
+ expected = {
+ "a": {
+ "a'": {
+ "x": "low_value",
+ "y": "high_value",
+ "z": "high_value",
+ "list": ["high_value", "low_value"]
+ }
+ },
+ "b": high['b'] + low['b']
+ }
+ self.assertEqual(merge_hash(low, high, True, 'prepend'), expected)
+
+ def test_merge_hash_recursive_and_list_append_rp(self):
+ low = self.merge_hash_data['low_prio']
+ high = self.merge_hash_data['high_prio']
+ expected = {
+ "a": {
+ "a'": {
+ "x": "low_value",
+ "y": "high_value",
+ "z": "high_value",
+ "list": ["low_value", "high_value"]
+ }
+ },
+ "b": [1, 1, 2] + high['b']
+ }
+ self.assertEqual(merge_hash(low, high, True, 'append_rp'), expected)
+
+ def test_merge_hash_recursive_and_list_prepend_rp(self):
+ low = self.merge_hash_data['low_prio']
+ high = self.merge_hash_data['high_prio']
+ expected = {
+ "a": {
+ "a'": {
+ "x": "low_value",
+ "y": "high_value",
+ "z": "high_value",
+ "list": ["high_value", "low_value"]
+ }
+ },
+ "b": high['b'] + [1, 1, 2]
+ }
+ self.assertEqual(merge_hash(low, high, True, 'prepend_rp'), expected)
diff --git a/test/units/utils/test_version.py b/test/units/utils/test_version.py
new file mode 100644
index 00000000..7d04c112
--- /dev/null
+++ b/test/units/utils/test_version.py
@@ -0,0 +1,335 @@
+# -*- coding: utf-8 -*-
+# (c) 2020 Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from distutils.version import LooseVersion, StrictVersion
+
+import pytest
+
+from ansible.utils.version import _Alpha, _Numeric, SemanticVersion
+
+
+EQ = [
+ ('1.0.0', '1.0.0', True),
+ ('1.0.0', '1.0.0-beta', False),
+ ('1.0.0-beta2+build1', '1.0.0-beta.2+build.1', False),
+ ('1.0.0-beta+build', '1.0.0-beta+build', True),
+ ('1.0.0-beta+build1', '1.0.0-beta+build2', True),
+ ('1.0.0-beta+a', '1.0.0-alpha+bar', False),
+]
+
+NE = [
+ ('1.0.0', '1.0.0', False),
+ ('1.0.0', '1.0.0-beta', True),
+ ('1.0.0-beta2+build1', '1.0.0-beta.2+build.1', True),
+ ('1.0.0-beta+build', '1.0.0-beta+build', False),
+ ('1.0.0-beta+a', '1.0.0-alpha+bar', True),
+]
+
+LT = [
+ ('1.0.0', '2.0.0', True),
+ ('1.0.0-beta', '2.0.0-alpha', True),
+ ('1.0.0-alpha', '2.0.0-beta', True),
+ ('1.0.0-alpha', '1.0.0', True),
+ ('1.0.0-beta', '1.0.0-alpha3', False),
+ ('1.0.0+foo', '1.0.0-alpha', False),
+ ('1.0.0-beta.1', '1.0.0-beta.a', True),
+ ('1.0.0-beta+a', '1.0.0-alpha+bar', False),
+]
+
+GT = [
+ ('1.0.0', '2.0.0', False),
+ ('1.0.0-beta', '2.0.0-alpha', False),
+ ('1.0.0-alpha', '2.0.0-beta', False),
+ ('1.0.0-alpha', '1.0.0', False),
+ ('1.0.0-beta', '1.0.0-alpha3', True),
+ ('1.0.0+foo', '1.0.0-alpha', True),
+ ('1.0.0-beta.1', '1.0.0-beta.a', False),
+ ('1.0.0-beta+a', '1.0.0-alpha+bar', True),
+]
+
+LE = [
+ ('1.0.0', '1.0.0', True),
+ ('1.0.0', '2.0.0', True),
+ ('1.0.0-alpha', '1.0.0-beta', True),
+ ('1.0.0-beta', '1.0.0-alpha', False),
+]
+
+GE = [
+ ('1.0.0', '1.0.0', True),
+ ('1.0.0', '2.0.0', False),
+ ('1.0.0-alpha', '1.0.0-beta', False),
+ ('1.0.0-beta', '1.0.0-alpha', True),
+]
+
+VALID = [
+ "0.0.4",
+ "1.2.3",
+ "10.20.30",
+ "1.1.2-prerelease+meta",
+ "1.1.2+meta",
+ "1.1.2+meta-valid",
+ "1.0.0-alpha",
+ "1.0.0-beta",
+ "1.0.0-alpha.beta",
+ "1.0.0-alpha.beta.1",
+ "1.0.0-alpha.1",
+ "1.0.0-alpha0.valid",
+ "1.0.0-alpha.0valid",
+ "1.0.0-alpha-a.b-c-somethinglong+build.1-aef.1-its-okay",
+ "1.0.0-rc.1+build.1",
+ "2.0.0-rc.1+build.123",
+ "1.2.3-beta",
+ "10.2.3-DEV-SNAPSHOT",
+ "1.2.3-SNAPSHOT-123",
+ "1.0.0",
+ "2.0.0",
+ "1.1.7",
+ "2.0.0+build.1848",
+ "2.0.1-alpha.1227",
+ "1.0.0-alpha+beta",
+ "1.2.3----RC-SNAPSHOT.12.9.1--.12+788",
+ "1.2.3----R-S.12.9.1--.12+meta",
+ "1.2.3----RC-SNAPSHOT.12.9.1--.12",
+ "1.0.0+0.build.1-rc.10000aaa-kk-0.1",
+ "99999999999999999999999.999999999999999999.99999999999999999",
+ "1.0.0-0A.is.legal",
+]
+
+INVALID = [
+ "1",
+ "1.2",
+ "1.2.3-0123",
+ "1.2.3-0123.0123",
+ "1.1.2+.123",
+ "+invalid",
+ "-invalid",
+ "-invalid+invalid",
+ "-invalid.01",
+ "alpha",
+ "alpha.beta",
+ "alpha.beta.1",
+ "alpha.1",
+ "alpha+beta",
+ "alpha_beta",
+ "alpha.",
+ "alpha..",
+ "beta",
+ "1.0.0-alpha_beta",
+ "-alpha.",
+ "1.0.0-alpha..",
+ "1.0.0-alpha..1",
+ "1.0.0-alpha...1",
+ "1.0.0-alpha....1",
+ "1.0.0-alpha.....1",
+ "1.0.0-alpha......1",
+ "1.0.0-alpha.......1",
+ "01.1.1",
+ "1.01.1",
+ "1.1.01",
+ "1.2",
+ "1.2.3.DEV",
+ "1.2-SNAPSHOT",
+ "1.2.31.2.3----RC-SNAPSHOT.12.09.1--..12+788",
+ "1.2-RC-SNAPSHOT",
+ "-1.0.3-gamma+b7718",
+ "+justmeta",
+ "9.8.7+meta+meta",
+ "9.8.7-whatever+meta+meta",
+]
+
+PRERELEASE = [
+ ('1.0.0-alpha', True),
+ ('1.0.0-alpha.1', True),
+ ('1.0.0-0.3.7', True),
+ ('1.0.0-x.7.z.92', True),
+ ('0.1.2', False),
+ ('0.1.2+bob', False),
+ ('1.0.0', False),
+]
+
+STABLE = [
+ ('1.0.0-alpha', False),
+ ('1.0.0-alpha.1', False),
+ ('1.0.0-0.3.7', False),
+ ('1.0.0-x.7.z.92', False),
+ ('0.1.2', False),
+ ('0.1.2+bob', False),
+ ('1.0.0', True),
+ ('1.0.0+bob', True),
+]
+
+LOOSE_VERSION = [
+ (LooseVersion('1'), SemanticVersion('1.0.0')),
+ (LooseVersion('1-alpha'), SemanticVersion('1.0.0-alpha')),
+ (LooseVersion('1.0.0-alpha+build'), SemanticVersion('1.0.0-alpha+build')),
+]
+
+LOOSE_VERSION_INVALID = [
+ LooseVersion('1.a.3'),
+ LooseVersion(),
+ 'bar',
+ StrictVersion('1.2.3'),
+]
+
+
+def test_semanticversion_none():
+ assert SemanticVersion().major is None
+
+
+@pytest.mark.parametrize('left,right,expected', EQ)
+def test_eq(left, right, expected):
+ assert (SemanticVersion(left) == SemanticVersion(right)) is expected
+
+
+@pytest.mark.parametrize('left,right,expected', NE)
+def test_ne(left, right, expected):
+ assert (SemanticVersion(left) != SemanticVersion(right)) is expected
+
+
+@pytest.mark.parametrize('left,right,expected', LT)
+def test_lt(left, right, expected):
+ assert (SemanticVersion(left) < SemanticVersion(right)) is expected
+
+
+@pytest.mark.parametrize('left,right,expected', LE)
+def test_le(left, right, expected):
+ assert (SemanticVersion(left) <= SemanticVersion(right)) is expected
+
+
+@pytest.mark.parametrize('left,right,expected', GT)
+def test_gt(left, right, expected):
+ assert (SemanticVersion(left) > SemanticVersion(right)) is expected
+
+
+@pytest.mark.parametrize('left,right,expected', GE)
+def test_ge(left, right, expected):
+ assert (SemanticVersion(left) >= SemanticVersion(right)) is expected
+
+
+@pytest.mark.parametrize('value', VALID)
+def test_valid(value):
+ SemanticVersion(value)
+
+
+@pytest.mark.parametrize('value', INVALID)
+def test_invalid(value):
+ pytest.raises(ValueError, SemanticVersion, value)
+
+
+def test_example_precedence():
+ # https://semver.org/#spec-item-11
+ sv = SemanticVersion
+ assert sv('1.0.0') < sv('2.0.0') < sv('2.1.0') < sv('2.1.1')
+ assert sv('1.0.0-alpha') < sv('1.0.0')
+ assert sv('1.0.0-alpha') < sv('1.0.0-alpha.1') < sv('1.0.0-alpha.beta')
+ assert sv('1.0.0-beta') < sv('1.0.0-beta.2') < sv('1.0.0-beta.11') < sv('1.0.0-rc.1') < sv('1.0.0')
+
+
+@pytest.mark.parametrize('value,expected', PRERELEASE)
+def test_prerelease(value, expected):
+ assert SemanticVersion(value).is_prerelease is expected
+
+
+@pytest.mark.parametrize('value,expected', STABLE)
+def test_stable(value, expected):
+ assert SemanticVersion(value).is_stable is expected
+
+
+@pytest.mark.parametrize('value,expected', LOOSE_VERSION)
+def test_from_loose_version(value, expected):
+ assert SemanticVersion.from_loose_version(value) == expected
+
+
+@pytest.mark.parametrize('value', LOOSE_VERSION_INVALID)
+def test_from_loose_version_invalid(value):
+ pytest.raises((AttributeError, ValueError), SemanticVersion.from_loose_version, value)
+
+
+def test_comparison_with_string():
+ assert SemanticVersion('1.0.0') > '0.1.0'
+
+
+def test_alpha():
+ assert _Alpha('a') == _Alpha('a')
+ assert _Alpha('a') == 'a'
+ assert _Alpha('a') != _Alpha('b')
+ assert _Alpha('a') != 1
+ assert _Alpha('a') < _Alpha('b')
+ assert _Alpha('a') < 'c'
+ assert _Alpha('a') > _Numeric(1)
+ with pytest.raises(ValueError):
+ _Alpha('a') < None
+ assert _Alpha('a') <= _Alpha('a')
+ assert _Alpha('a') <= _Alpha('b')
+ assert _Alpha('b') >= _Alpha('a')
+ assert _Alpha('b') >= _Alpha('b')
+
+ # The following 3*6 tests check that all comparison operators perform
+ # as expected. DO NOT remove any of them, or reformulate them (to remove
+ # the explicit `not`)!
+
+ assert _Alpha('a') == _Alpha('a')
+ assert not _Alpha('a') != _Alpha('a') # pylint: disable=unneeded-not
+ assert not _Alpha('a') < _Alpha('a') # pylint: disable=unneeded-not
+ assert _Alpha('a') <= _Alpha('a')
+ assert not _Alpha('a') > _Alpha('a') # pylint: disable=unneeded-not
+ assert _Alpha('a') >= _Alpha('a')
+
+ assert not _Alpha('a') == _Alpha('b') # pylint: disable=unneeded-not
+ assert _Alpha('a') != _Alpha('b')
+ assert _Alpha('a') < _Alpha('b')
+ assert _Alpha('a') <= _Alpha('b')
+ assert not _Alpha('a') > _Alpha('b') # pylint: disable=unneeded-not
+ assert not _Alpha('a') >= _Alpha('b') # pylint: disable=unneeded-not
+
+ assert not _Alpha('b') == _Alpha('a') # pylint: disable=unneeded-not
+ assert _Alpha('b') != _Alpha('a')
+ assert not _Alpha('b') < _Alpha('a') # pylint: disable=unneeded-not
+ assert not _Alpha('b') <= _Alpha('a') # pylint: disable=unneeded-not
+ assert _Alpha('b') > _Alpha('a')
+ assert _Alpha('b') >= _Alpha('a')
+
+
+def test_numeric():
+ assert _Numeric(1) == _Numeric(1)
+ assert _Numeric(1) == 1
+ assert _Numeric(1) != _Numeric(2)
+ assert _Numeric(1) != 'a'
+ assert _Numeric(1) < _Numeric(2)
+ assert _Numeric(1) < 3
+ assert _Numeric(1) < _Alpha('b')
+ with pytest.raises(ValueError):
+ _Numeric(1) < None
+ assert _Numeric(1) <= _Numeric(1)
+ assert _Numeric(1) <= _Numeric(2)
+ assert _Numeric(2) >= _Numeric(1)
+ assert _Numeric(2) >= _Numeric(2)
+
+ # The following 3*6 tests check that all comparison operators perform
+ # as expected. DO NOT remove any of them, or reformulate them (to remove
+ # the explicit `not`)!
+
+ assert _Numeric(1) == _Numeric(1)
+ assert not _Numeric(1) != _Numeric(1) # pylint: disable=unneeded-not
+ assert not _Numeric(1) < _Numeric(1) # pylint: disable=unneeded-not
+ assert _Numeric(1) <= _Numeric(1)
+ assert not _Numeric(1) > _Numeric(1) # pylint: disable=unneeded-not
+ assert _Numeric(1) >= _Numeric(1)
+
+ assert not _Numeric(1) == _Numeric(2) # pylint: disable=unneeded-not
+ assert _Numeric(1) != _Numeric(2)
+ assert _Numeric(1) < _Numeric(2)
+ assert _Numeric(1) <= _Numeric(2)
+ assert not _Numeric(1) > _Numeric(2) # pylint: disable=unneeded-not
+ assert not _Numeric(1) >= _Numeric(2) # pylint: disable=unneeded-not
+
+ assert not _Numeric(2) == _Numeric(1) # pylint: disable=unneeded-not
+ assert _Numeric(2) != _Numeric(1)
+ assert not _Numeric(2) < _Numeric(1) # pylint: disable=unneeded-not
+ assert not _Numeric(2) <= _Numeric(1) # pylint: disable=unneeded-not
+ assert _Numeric(2) > _Numeric(1)
+ assert _Numeric(2) >= _Numeric(1)
diff --git a/test/units/vars/__init__.py b/test/units/vars/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/units/vars/__init__.py
diff --git a/test/units/vars/test_module_response_deepcopy.py b/test/units/vars/test_module_response_deepcopy.py
new file mode 100644
index 00000000..78f9de0e
--- /dev/null
+++ b/test/units/vars/test_module_response_deepcopy.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+# (c) 2018 Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.vars.clean import module_response_deepcopy
+
+import pytest
+
+
+def test_module_response_deepcopy_basic():
+ x = 42
+ y = module_response_deepcopy(x)
+ assert y == x
+
+
+def test_module_response_deepcopy_atomic():
+ tests = [None, 42, 2**100, 3.14, True, False, 1j,
+ "hello", u"hello\u1234"]
+ for x in tests:
+ assert module_response_deepcopy(x) is x
+
+
+def test_module_response_deepcopy_list():
+ x = [[1, 2], 3]
+ y = module_response_deepcopy(x)
+ assert y == x
+ assert x is not y
+ assert x[0] is not y[0]
+
+
+def test_module_response_deepcopy_empty_tuple():
+ x = ()
+ y = module_response_deepcopy(x)
+ assert x is y
+
+
+@pytest.mark.skip(reason='No current support for this situation')
+def test_module_response_deepcopy_tuple():
+ x = ([1, 2], 3)
+ y = module_response_deepcopy(x)
+ assert y == x
+ assert x is not y
+ assert x[0] is not y[0]
+
+
+def test_module_response_deepcopy_tuple_of_immutables():
+ x = ((1, 2), 3)
+ y = module_response_deepcopy(x)
+ assert x is y
+
+
+def test_module_response_deepcopy_dict():
+ x = {"foo": [1, 2], "bar": 3}
+ y = module_response_deepcopy(x)
+ assert y == x
+ assert x is not y
+ assert x["foo"] is not y["foo"]
diff --git a/test/units/vars/test_variable_manager.py b/test/units/vars/test_variable_manager.py
new file mode 100644
index 00000000..65a79286
--- /dev/null
+++ b/test/units/vars/test_variable_manager.py
@@ -0,0 +1,307 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from units.compat import unittest
+from units.compat.mock import MagicMock, patch
+from ansible.inventory.manager import InventoryManager
+from ansible.module_utils.six import iteritems
+from ansible.playbook.play import Play
+
+
+from units.mock.loader import DictDataLoader
+from units.mock.path import mock_unfrackpath_noop
+
+from ansible.vars.manager import VariableManager
+
+
+class TestVariableManager(unittest.TestCase):
+
+ def test_basic_manager(self):
+ fake_loader = DictDataLoader({})
+
+ mock_inventory = MagicMock()
+ v = VariableManager(loader=fake_loader, inventory=mock_inventory)
+ variables = v.get_vars(use_cache=False)
+
+ # Check var manager expected values, never check: ['omit', 'vars']
+ # FIXME: add the following ['ansible_version', 'ansible_playbook_python', 'groups']
+ for varname, value in (('playbook_dir', os.path.abspath('.')), ):
+ self.assertEqual(variables[varname], value)
+
+ def test_variable_manager_extra_vars(self):
+ fake_loader = DictDataLoader({})
+
+ extra_vars = dict(a=1, b=2, c=3)
+ mock_inventory = MagicMock()
+ v = VariableManager(loader=fake_loader, inventory=mock_inventory)
+
+ # override internal extra_vars loading
+ v._extra_vars = extra_vars
+
+ myvars = v.get_vars(use_cache=False)
+ for (key, val) in iteritems(extra_vars):
+ self.assertEqual(myvars.get(key), val)
+
+ def test_variable_manager_options_vars(self):
+ fake_loader = DictDataLoader({})
+
+ options_vars = dict(a=1, b=2, c=3)
+ mock_inventory = MagicMock()
+ v = VariableManager(loader=fake_loader, inventory=mock_inventory)
+
+ # override internal options_vars loading
+ v._extra_vars = options_vars
+
+ myvars = v.get_vars(use_cache=False)
+ for (key, val) in iteritems(options_vars):
+ self.assertEqual(myvars.get(key), val)
+
+ def test_variable_manager_play_vars(self):
+ fake_loader = DictDataLoader({})
+
+ mock_play = MagicMock()
+ mock_play.get_vars.return_value = dict(foo="bar")
+ mock_play.get_roles.return_value = []
+ mock_play.get_vars_files.return_value = []
+
+ mock_inventory = MagicMock()
+ v = VariableManager(loader=fake_loader, inventory=mock_inventory)
+ self.assertEqual(v.get_vars(play=mock_play, use_cache=False).get("foo"), "bar")
+
+ def test_variable_manager_play_vars_files(self):
+ fake_loader = DictDataLoader({
+ __file__: """
+ foo: bar
+ """
+ })
+
+ mock_play = MagicMock()
+ mock_play.get_vars.return_value = dict()
+ mock_play.get_roles.return_value = []
+ mock_play.get_vars_files.return_value = [__file__]
+
+ mock_inventory = MagicMock()
+ v = VariableManager(inventory=mock_inventory, loader=fake_loader)
+ self.assertEqual(v.get_vars(play=mock_play, use_cache=False).get("foo"), "bar")
+
+ def test_variable_manager_task_vars(self):
+ # FIXME: BCS make this work
+ return
+
+ # pylint: disable=unreachable
+ fake_loader = DictDataLoader({})
+
+ mock_task = MagicMock()
+ mock_task._role = None
+ mock_task.loop = None
+ mock_task.get_vars.return_value = dict(foo="bar")
+ mock_task.get_include_params.return_value = dict()
+
+ mock_all = MagicMock()
+ mock_all.get_vars.return_value = {}
+ mock_all.get_file_vars.return_value = {}
+
+ mock_host = MagicMock()
+ mock_host.get.name.return_value = 'test01'
+ mock_host.get_vars.return_value = {}
+ mock_host.get_host_vars.return_value = {}
+
+ mock_inventory = MagicMock()
+ mock_inventory.hosts.get.return_value = mock_host
+ mock_inventory.hosts.get.name.return_value = 'test01'
+ mock_inventory.get_host.return_value = mock_host
+ mock_inventory.groups.__getitem__.return_value = mock_all
+
+ v = VariableManager(loader=fake_loader, inventory=mock_inventory)
+ self.assertEqual(v.get_vars(task=mock_task, use_cache=False).get("foo"), "bar")
+
+ @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
+ def test_variable_manager_precedence(self):
+ # FIXME: this needs to be redone as dataloader is not the automatic source of data anymore
+ return
+
+ # pylint: disable=unreachable
+ '''
+ Tests complex variations and combinations of get_vars() with different
+ objects to modify the context under which variables are merged.
+ '''
+ # FIXME: BCS makethiswork
+ # return True
+
+ mock_inventory = MagicMock()
+
+ inventory1_filedata = """
+ [group2:children]
+ group1
+
+ [group1]
+ host1 host_var=host_var_from_inventory_host1
+
+ [group1:vars]
+ group_var = group_var_from_inventory_group1
+
+ [group2:vars]
+ group_var = group_var_from_inventory_group2
+ """
+
+ fake_loader = DictDataLoader({
+ # inventory1
+ '/etc/ansible/inventory1': inventory1_filedata,
+ # role defaults_only1
+ '/etc/ansible/roles/defaults_only1/defaults/main.yml': """
+ default_var: "default_var_from_defaults_only1"
+ host_var: "host_var_from_defaults_only1"
+ group_var: "group_var_from_defaults_only1"
+ group_var_all: "group_var_all_from_defaults_only1"
+ extra_var: "extra_var_from_defaults_only1"
+ """,
+ '/etc/ansible/roles/defaults_only1/tasks/main.yml': """
+ - debug: msg="here i am"
+ """,
+
+ # role defaults_only2
+ '/etc/ansible/roles/defaults_only2/defaults/main.yml': """
+ default_var: "default_var_from_defaults_only2"
+ host_var: "host_var_from_defaults_only2"
+ group_var: "group_var_from_defaults_only2"
+ group_var_all: "group_var_all_from_defaults_only2"
+ extra_var: "extra_var_from_defaults_only2"
+ """,
+ })
+
+ inv1 = InventoryManager(loader=fake_loader, sources=['/etc/ansible/inventory1'])
+ v = VariableManager(inventory=mock_inventory, loader=fake_loader)
+
+ play1 = Play.load(dict(
+ hosts=['all'],
+ roles=['defaults_only1', 'defaults_only2'],
+ ), loader=fake_loader, variable_manager=v)
+
+ # first we assert that the defaults as viewed as a whole are the merged results
+ # of the defaults from each role, with the last role defined "winning" when
+ # there is a variable naming conflict
+ res = v.get_vars(play=play1)
+ self.assertEqual(res['default_var'], 'default_var_from_defaults_only2')
+
+ # next, we assert that when vars are viewed from the context of a task within a
+ # role, that task will see its own role defaults before any other role's
+ blocks = play1.compile()
+ task = blocks[1].block[0]
+ res = v.get_vars(play=play1, task=task)
+ self.assertEqual(res['default_var'], 'default_var_from_defaults_only1')
+
+ # next we assert the precedence of inventory variables
+ v.set_inventory(inv1)
+ h1 = inv1.get_host('host1')
+
+ res = v.get_vars(play=play1, host=h1)
+ self.assertEqual(res['group_var'], 'group_var_from_inventory_group1')
+ self.assertEqual(res['host_var'], 'host_var_from_inventory_host1')
+
+ # next we test with group_vars/ files loaded
+ fake_loader.push("/etc/ansible/group_vars/all", """
+ group_var_all: group_var_all_from_group_vars_all
+ """)
+ fake_loader.push("/etc/ansible/group_vars/group1", """
+ group_var: group_var_from_group_vars_group1
+ """)
+ fake_loader.push("/etc/ansible/group_vars/group3", """
+ # this is a dummy, which should not be used anywhere
+ group_var: group_var_from_group_vars_group3
+ """)
+ fake_loader.push("/etc/ansible/host_vars/host1", """
+ host_var: host_var_from_host_vars_host1
+ """)
+ fake_loader.push("group_vars/group1", """
+ playbook_group_var: playbook_group_var
+ """)
+ fake_loader.push("host_vars/host1", """
+ playbook_host_var: playbook_host_var
+ """)
+
+ res = v.get_vars(play=play1, host=h1)
+ # self.assertEqual(res['group_var'], 'group_var_from_group_vars_group1')
+ # self.assertEqual(res['group_var_all'], 'group_var_all_from_group_vars_all')
+ # self.assertEqual(res['playbook_group_var'], 'playbook_group_var')
+ # self.assertEqual(res['host_var'], 'host_var_from_host_vars_host1')
+ # self.assertEqual(res['playbook_host_var'], 'playbook_host_var')
+
+ # add in the fact cache
+ v._fact_cache['host1'] = dict(fact_cache_var="fact_cache_var_from_fact_cache")
+
+ res = v.get_vars(play=play1, host=h1)
+ self.assertEqual(res['fact_cache_var'], 'fact_cache_var_from_fact_cache')
+
+ @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
+ def test_variable_manager_role_vars_dependencies(self):
+ '''
+ Tests vars from role dependencies with duplicate dependencies.
+ '''
+ mock_inventory = MagicMock()
+
+ fake_loader = DictDataLoader({
+ # role common-role
+ '/etc/ansible/roles/common-role/tasks/main.yml': """
+ - debug: msg="{{role_var}}"
+ """,
+ # We do not need allow_duplicates: yes for this role
+ # because eliminating duplicates is done by the execution
+ # strategy, which we do not test here.
+
+ # role role1
+ '/etc/ansible/roles/role1/vars/main.yml': """
+ role_var: "role_var_from_role1"
+ """,
+ '/etc/ansible/roles/role1/meta/main.yml': """
+ dependencies:
+ - { role: common-role }
+ """,
+
+ # role role2
+ '/etc/ansible/roles/role2/vars/main.yml': """
+ role_var: "role_var_from_role2"
+ """,
+ '/etc/ansible/roles/role2/meta/main.yml': """
+ dependencies:
+ - { role: common-role }
+ """,
+ })
+
+ v = VariableManager(loader=fake_loader, inventory=mock_inventory)
+
+ play1 = Play.load(dict(
+ hosts=['all'],
+ roles=['role1', 'role2'],
+ ), loader=fake_loader, variable_manager=v)
+
+ # The task defined by common-role exists twice because role1
+ # and role2 depend on common-role. Check that the tasks see
+ # different values of role_var.
+ blocks = play1.compile()
+ task = blocks[1].block[0]
+ res = v.get_vars(play=play1, task=task)
+ self.assertEqual(res['role_var'], 'role_var_from_role1')
+
+ task = blocks[2].block[0]
+ res = v.get_vars(play=play1, task=task)
+ self.assertEqual(res['role_var'], 'role_var_from_role2')